From af1a5f77cd8b2e1a8af1ad603be801655a804fca Mon Sep 17 00:00:00 2001
From: Tim Daly
Date: Sat, 2 Sep 2017 22:14:14 -0400
Subject: [PATCH] books/bookvolbib add Computer Algebra - Proof references
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Goal: Proving Axiom Correct
\index{Broy, Manfried}
\begin{chunk}{axiom.bib}
@article{Broy88,
author = "Broy, Manfried",
title = "Equational Specification of Partial Higher-order Algebras",
journal = "Theoretical Computer Science",
volume = "57",
number = "1",
year = "1988",
pages = "3-45",
abstract =
"The theory of algebraic abstract types specified by positive
conditional formulas formed of equations and a definedness predicate
is outlined and extended to hierarchical types with ``noustrict''
operations, partial and even infinite objects. Its model theory is
based on the concept of partial interpretations. Deduction rules are
given, too. Models of types are studied where all explicit equations
have solutions. The inclusion of nigher-order types, i.e., types
comprising higher-order functions leads to an algebraic (``equational'')
specification of algebras including sorts with ``infinite'' objects and
higher-order functions (``functionals'').",
paper = "Broy88.pdf"
}
\end{chunk}
\index{Maguire, Camm}
\index{Schelter, William}
\begin{chunk}{axiom.bib}
@misc{Magu17,
author = "Maguire, Camm and Schelter, William",
title = "Gnu Common Lisp",
link = "\url{https://savannah.gnu.org/projects/gcl}"
}
\end{chunk}
\index{Bernays, Paul}
\begin{chunk}{axiom.bib}
@book{Bern91,
author = "Bernays, Paul",
title = "Axiomatic Set Theory",
publisher = "Dover",
year = "1991"
}
\end{chunk}
\index{Frankel, A.A.}
\index{Bar-Hillel, Y.}
\index{Levy, A.}
\begin{chunk}{axiom.bib}
@book{Fran73,
author = "Frankel, A.A. and Bar-Hillel, Y. and Levy, A.",
title = "Foundations of Set Theory",
publisher = "Elsevier Science",
year = "1973",
isbn = "978-0720422702"
}
\end{chunk}
\index{Bradford, Russell J.}
\begin{chunk}{axiom.bib}
@misc{Brad92a,
author = "Bradford, Russell J.",
title = "C78: Computer Algebra Course Lecture Notes",
institution = "Univ. of Bath",
year = "1992"
}
\end{chunk}
\index{Cohn, P.M.}
\begin{chunk}{axiom.bib}
@book{Cohn91,
author = "Chon, P.M.",
title = "Algebra (2nd Ed.)",
publisher = "Wiley",
isbn = "978-0471101697",
year = "1991",
paper = "Cohn91.pdf"
}
\end{chunk}
\index{Kaliszyk, Cezary}
\index{Wiedijk, Freek}
\begin{chunk}{axiom.bib}
@inproceedings{Kali07,
author = "Kaliszyk, Cezary and Wiedijk, Freek",
title = "Certified Computer Algebra on Top of an Interactive Theorem
Prover",
booktitle = "Toward Mecanized Mathematical Assistants",
pages = "94-105",
year = "2007",
abstract =
"We present a prototype of a computer algebra system that is built on
top of a proof assistant, HOL Light. This architecture guarantees that
one can be certain that the system will make no mistakes. All
expressions in the system will have precise semantics, and the proof
assistant will check the correctness of all simplifications according
to this semantics. The system actually proves each simplification
performed by the computer algebra system.
Although our system is built on top of a proof assistant, we designed
the user interface to be very close in spirit to the interface of
systems like Maple and Mathematica. The system, therefore, allows the
user to easily probe the underlying automation of the proof assistant
for strengths and weaknesses with respect to the automation of
mainstream computer algebra systems. The system that we present is a
prototype, but can be straightforwardly scaled up to a practical
computer algebra system.",
paper = "Kali07.pdf"
}
\end{chunk}
\index{Lobachev, Oleg}
\index{Loogen, Rita}
\begin{chunk}{axiom.bib}
@article{Loba08,
author = "Lobachev, Oleg and Loogen, Rita",
title = "Towards an Implementation of a Computer Algebra System in a
Functional Language",
journal = "LNAI",
volume = "5144",
pages = "141-208",
year = "2008",
publisher = "Springer-Verlag",
abstract =
"This paper discusses the pros and cons of using a functional language
for implementing a computer algebra system. The contributions of the
paper are twofold. Firstly, we discuss some language-centered design
aspects of a computer algebra system -- the ``language unity''
concept. Secondly, we provide an implementation of a fast polynomial
multiplication algorithm, which is one of the core elements of a
computer algebra system. The goal of the paper is to test the
feasibility of an implementation of (some elements of) a computer
algebra system in a modern functional language.",
paper = "Loba08.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Butler, Greg}
\begin{chunk}{axiom.bib}
@inproceedings{Butl96,
author = "Butler, Greg",
title = "Software Architectures for Computer Algebra: A Case Study",
booktitle = "DISCO '96",
pages = "277-286",
year = "1996",
abstract =
"The architectures of the existing computer algebra systems have not
been discussed sufficiently in the literature. Instead, the focus has
been on the design of the related programming language, or the design
of a few key data structures.
We address this deficiency with a case study of the architecture of
Cayley. Our aim is twofold: to capture this knowledge before the total
passing of a system now made obsolete by Magma; and to encourage
others to describe the architecture of the computer algebra systems
with which they are familiar.
The long-term goal is a better understanding of how to construct
computer algebra systems in the future.",
paper = "Butl96.pdf",
keywords = "axiomref"
}
\end{chunk}
\index[Mosses, Peter D.}
\begin{chunk}{axiom.bib}
@article{Moss93,
author = "Mosses, Peter D.",
title = "The Use of Sorts in Algebraic Specification",
journal = "Lecture Notes in Computer Science",
volume = "655",
pages = "66-91",
year = "1993",
abstract =
"Algebraic specification frameworks exploit a variety of sort
disciplines. The treatment of sorts has a considerable influence on
the ease with which such features as partiality and polymorphism can
be specified. This survey gives an accessible overview of various
frameworks, focusing on their sort disciplines and assessing their
strengths and weaknesses for practical applications. Familiarity with
the basic notions of algebraic specification is assumed.",
paper = "Moss93.pdf"
}
\end{chunk}
\index{Harrison, John}
\index{Th\'ery, Laurent}
\begin{chunk}{axiom.bib}
@inproceedings{Harr94,
author = "Harrison, John and Thery, Laurent",
title = "Extending the HOL Thoerem Prover with a Computer Algebra System
to Reason about the Reals",
booktitle = "Proc. Higher Order Logic Theorem Proving",
year = "1994",
publisher = "Springer",
pages = "174-184",
isbn = "978-3-540-48346-5",
abstract =
"In this paper we describe an environment for reasoning about the
reals which combines the rigour of a theorem prover with the power of
a computer algebra system.",
paper = "Harr94.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Medina-Bulo, Inmaculada}
\index{Lozano-Palomo, F.}
\index{Alonso-Jimenez, J.A.}
\index{Ruiz-Reina, J.L.}
\begin{chunk}{axiom.bib}
@article{Medi04,
author = "Medina-Bulo, Inmaculada and Lozano-Palomo, F. and
Alonso-Jimenez, J.A. and Ruiz-Reina, J.L.",
title = "Verified Computer Algebra in ACL2",
journal = "LNAI",
volume = "3249",
year = "2004",
pages = "171-184",
abstract =
"In this paper, we present the formal verification of a Common Lisp
implementation of Buchberger’s algorithm for computing Gröbner bases
of polynomial ideals. This work is carried out in the Acl2 system and
shows how verified Computer Algebra can be achieved in an executable
logic.",
paper = "Medi04.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@article{Dave08,
author = "Davenport, James H.",
title = "Effective Set Membership in Computer Algebra and Beyond",
journal = "LNAI",
volume = "5144",
pages = "266-269",
year = "2008",
abstract =
"In previous work, we showed the importance of distinguishing ``I know
that $X \ne Y$'' from ``I don't know that $X = Y$''. In this paper we
look at effective set membership, starting with Groebner bases, where
the issues are well-expressed in algebra systems, and going on to
integration and other questions of `computer calculus'.
In particular, we claim that a better recognition of the role of set
membership would clarify some features of computer algebra systems,
such as `what does an integral mean as output'.",
paper = "Dave08.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@book{Dave81,
author = "Davenport, James H.",
title = "On the Integration of Algebraic Functions",
publisher = "Springer-Verlag",
series = "Lecture Notes in Computer Science 102",
isbn = "0-387-10290-6",
year = "1981",
abstract =
"This work is concerned with the following question: ``{\sl When is an
algebraic function integrable?}''. We can state this question in
another form which makes clearer our interpretation of integration:
``If we are given an algebraic function, when can we find an
expression in terms of algebraics, logarithms and exponentials whose
derivative is the given function, and what is that expression?''.
This question can be looked at purely mathematically, as a question in
decidablility theory, but our interest in this question is more
practical and springs from the requirements of computer algebra. Thus
our goal is ``{\sl Write a program which, when given an algebraic
function, will produce an expression for its integral in terms of
algebraics, exponentials and logarithms, or will prove that there is
no such expression}''.",
paper = "Dave81.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@article{Dave81c,
author = "Davenport, James H.",
title = "Algebraic Computations"
publisher = "Springer-Verlag",
series = "Lecture Notes in Computer Science 102",
pages = "14-29",
isbn = "0-387-10290-6",
year = "1981",
abstract =
"Algebraic relationships between variables and expressions are very
common in computer algebra. Not only do they often occur explicitly,
in forms like $sqrt(x^2+1)$, but well known difficulties such as
$sin(x)^2+cos(x)^2=1$ (Stoutemyer, 1977) can be expressed in this
form. Nevertheless it is difficult to compute with regard to these
relationships. This chapter discusses the problem of such computig,
and then enters the area of algebraic geometry, which is a natural
outgrowth of attempts to perform such computations as readily as one
computes without them.",
paper = "Dave81c.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@article{Dave81d,
author = "Davenport, James H.",
title = "Coates' Algorithm",
publisher = "Springer-Verlag",
journal = "Lecture Notes in Computer Science 102",
pages = "30-48",
isbn = "0-387-10290-6",
year = "1981",
abstract =
"In this chapter, we consider the problem of finding a function with a
certain set of poles. That this problem is non-trivial in the case of
algebraic functions (although it is trivial in the case of rational
functions) can be seen from the fact that such functions need not
always exist. For example, on the curve defined by $\sqrt{x^3+1}$,
there is no function with a zero of order 1 at one place lying over
the point $X=0$ and a pole of order 1 at infinity and no other poles
or zeros, but there is one with divisor 3 times that (ie.e the divisor
has order 3). On the curve defined by $Y^2=x^3-3X^2+X+1$, there are no
functions with a zero on one place lying over $X=0$ and a pole at the
other, both having the same order, and no other zeros or poles.",
paper = "Dave81d.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@article{Dave81e,
author = "Davenport, James H.",
title = "Risch's Theorem",
publisher = "Springer-Verlag",
journal = "Lecture Notes in Computer Science 102",
pages = "49-63",
isbn = "0-387-10290-6",
year = "1981",
abstract =
"This chapter describes an underlying body of theory to the area of
finding (or proving non-existent) the elementary integrals of
algebraic functions, where a function is {\sl algebraic} if it can be
generated from the variable of integration and constants by the
arithmetic operations and the taking of roots of equations (the theory
does not require that these roots should be expressible in terms of
radicals), possibly with nesting. By {\sl elementary} we mean
denerated from the variable of integration and constants by the
arithmetic operations and the taking of roots, exponentials and
logarithms, possibly with nesting.",
paper = "Dave81e.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@article{Dave81f,
author = "Davenport, James H.",
title = "The Problem of Torsion Divisors",
publisher = "Springer-Verlag",
journal = "Lecture Notes in Computer Science 102",
pages = "64-75",
isbn = "0-387-10290-6",
year = "1981",
abstract =
"This chapter and the next three are concerned with the theory and
practice of the FIND-ORDER procedure, which, as we saw in the last
chapter, is a necessary part of our integration algorithm, and which
turns out to be the mathematically most difficult. This chapter will
outline the general nature of the problem, with special reference to
the simplest non-trivial case, viz. problems involving the square root
of one cubic or quartic and involving no constants other than the
rationals.",
paper = "Dave81f.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@article{Dave81g,
author = "Davenport, James H.",
title = "Gauss-Manin Operators",
publisher = "Springer-Verlag",
journal = "Lecture Notes in Computer Science 102",
pages = "76-91",
isbn = "0-387-10290-6",
year = "1981",
abstract =
"This chapter is devoted to the case of integrands which contain a
transcendental parameter apart from the variable of integration, so
that we can consider our problem to be the integration of a function
in $\{K(x,y) | F(u,x,y) = 0\}$, where $K$ is an algebraic extension of
$k(u)$ for some field $k$ and $u$ transcendental over it. We shall
use this notation, with $u$ being the independent transcendental, as
we shall use the prefix operator $D$ to denote differentiation with
respect to $u$, and the suffix $\prime$ to denote differentiation with
respect to $x$. THis case is often more tractable than the case when
there is no such transcendental, for integration with respect to $x$
and differentiation with respect to $u$ commute, so that if $G(u,x,y0$
is integrable, then so is $DG(u,x,y)$, $D^2G(u,x,y)$ and so on.",
paper = "Dave81g.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@article{Dave81h,
author = "Davenport, James H.",
title = "Elliptic Integrals Concluded",
publisher = "Springer-Verlag",
journal = "Lecture Notes in Computer Science 102",
pages = "92-105",
isbn = "0-387-10290-6",
year = "1981",
abstract =
"The previous chapter (including the algorithm FIND\_ORDER\_MANIN)
completely solved the problem of torsion divisors over ground fields
containing a transcendental. We are therefore left with the case of
ground fields all of whose elements are algebraic over the rationals,
and this is the problem we will consider in this chapter (for elliptic
curves) and the next. Furthermore, any particular definition of a
curve and of a divisor can only involve a finite number of algebraics,
so we can restrict our attention to fields which are generated from
the rationals by extending with a finite number of algebraics, i.e.
{\sl algebraic number fields}. Before we can explore the torsion
divisor problem over them, we first need to know more about their
structure and possible computer representations, and this we discuss
in the next section, amplifying the discussion of general algebraic
expression in Chapter 2.",
paper = "Dave81h.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@article{Dave81i,
author = "Davenport, James H.",
title = "Curves over Algebraic Number Fields",
publisher = "Springer-Verlag",
journal = "Lecture Notes in Computer Science 102",
pages = "106-118",
isbn = "0-387-10290-6",
year = "1981",
abstract =
"The case of curves of arbitrary genus is much more difficult than the
case of curves of genus 1, and there are no well-developed algorithms
for this case. I have not been able to code any significant program to
deal with this case because of the large number of subsidiary
algorithms for which I do not have programs, though such programs have
been written elsewhere, or can readily be written. Presented here,
therefore, are the outlines of techniques which will enable one to
bound the torsion of curves of arbitrary genus over algebraic number
fields",
paper = "Dave81i.pdf"
}
\end{chunk}
\index{Lutzen, Jesper}
\begin{chunk}{axiom.bib}
@book{Lutz90,
author = "Lutzen, Jesper",
title = "Joseph Liouville. 1809-1882: Master of Pure and Applied
Mathematics",
publisher = "Springer",
year = "1990",
paper = "Lutz90.pdf"
}
\end{chunk}
\index{Lutzen, Jesper}
\begin{chunk}{axiom.bib}
@misc{Lutz90a,
author = "Lutzen, Jesper",
title = "Integration in Finite Terms",
publisher = "Springer",
year = "1990",
comment = "Chapter IX",
paper = "Lutz90a.pdf"
}
\end{chunk}
\index{Ueberberg, Johannes}
\begin{chunk}{axiom.bib}
@article{Uebe94,
author = "Ueberberg, Johannes",
title = "Interactive Theorem Proving and Computer Algebra",
journal = "Lecture Notes in Computer Science",
volume = "958",
year = "1994",
abstract =
"Interactive Theorem Proving, ITP for short, is a new approach for the
use of current computer algebra systems to support mathematicians in
proving theorems. ITP grew out of a more general project -- called
Symbolic Incidence Geometry -- which is concerned with the problem of
the systematic use of the computer in incidence geometry.",
paper = "Uebe94.pdf"
}
\end{chunk}
\index{Jolly, Raphael}
\begin{chunk}{axiom.bib}
@article{Joll13,
author = "Jolly, Raphael",
title = "Category as Type Classes in the Scala Algebra System",
journal = "LNCS",
pages = "209-218",
year = "2013",
abstract =
"A characterization of the categorical view of computer algebra is
proposed. Some requirements on the ability for abstraction that
programming languages must have in order to allow a categorical
approach is given. Object-oriented inheritance is presented as a
suitable abstraction scheme and exemplified by the Java Algebra
System. Type classes are then introduced as an alternative abstraction
scheme and shown to be eventually better suited for modeling
categories. Pro and cons of the two approaches are discussed and a
hybrid solution is exhibited.",
paper = "Joll13.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Davenport, James}
\begin{chunk}{axiom.bib}
@misc{Dave99,
author = "Davenport, James",
title = "A Small OpenMath Type System",
year = "1999",
link = "\url{https://www.openmath.org/standard/sts.pdf}",
paper = "Dave99.pdf"
}
\end{chunk}
\index{Davenport, James H.}
\begin{chunk}{axiom.bib}
@techreport{Dave92e,
author = "Davenport, James H.",
title = "The AXIOM System",
type = "technical report",
institution = "Numerical Algorithms Group, Oxford, U.K.",
number = "TR5/92",
year = "1992",
keywords = "axiomref"
}
\end{chunk}
\index{Kerber, Manfred}
\index{Kohlhase, Michael}
\index{Sorge, Volker}
\begin{chunk}{axiom.bib}
@article{Kerb96,
author = "Kerber, Manfred and Kohlhase, Michael and Sorge, Volker",
title = "Integrating Computer Algebra with Proof Planning",
journal = "Lecture Notes in Computer Science",
volume = "1128",
pages = "204-215",
year = "1996",
abstract =
"Mechanised reasoning systems and computer algebra systems have
apparently different objectives. Their integration is, however,
highly desirable, since in many formal proofs both of the two
different tasks, proving and calculating, have to be performed. In
the context of producing reliable proofs, the question how to ensure
correctness when integrating a computer algebra system into a
mechanised reasoning system is crucial. In this contribution, we
discuss the correctness problems that arise from such an integration
and advocate an approach in which the calculations of the computer
algebra system are checked at the calculus level of the mechanised
reasoning system. We present an implementation which achieves this
by adding a verbose mode to the computer algebra system which produces
high-level protocol information that can be processed by an interface
to derive proof plans. Such a proof plan in turn can be expanded to
proofs at different levels of abstraction, so the approach is
well-suited for producing a high-level verbalised explication as well
as for a low-level (machine checkable) calculus-level proof.",
paper = "Kerb96.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Aladjev, Victor}
\begin{chunk}{axiom.bib}
@article{Alad03,
author = "Aladjev, Victor",
title = "Computer Algebra System Maple: A New Software Library",
journal = "LNCS",
year = "2003",
pages = "711-717",
abstract =
"The paper represents Maple library containing more than 400 procedures
expanding possibilities of the Maple package of releases 6,7 and 8.
The library is structurally organized similarly to the main Maple
library. The process of the library installing is simple enough as a
result of which the above library will be logically linked with the
main Maple library, supporting access to software located in it
equally with standard Maple software. The demo library is delivered
free of charge at request to addresses mentioned above.",
paper = "Alad03.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Jackson, Paul}
\begin{chunk}{axiom.bib}
@article{Jack94,
author = "Jackson, Paul",
title = "Exploring Abstract Algebra in Constructive Type Theory",
abstract =
"I describe my implementation of computational abstract algebra in
the Nuprl system. I focus on my development of multivariate
polynomials. I show how I use Nuprl's expressive type theory to define
classes of free abelian monoids and free monoid algebras. These
classes are combined to create a class of all implementations of
polynomials. I discuss the issues of subtyping and computational
content that came up in designing the class definitions. I give
examples of relevant theory developments, tactics and proofs. I
consider how Nuprl could act as an algebraic 'oracle' for a computer
algebra system and the relevance of this work for abstract functional
programming.",
paper = "Jack94.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{van Hulzen, J.A.}
\begin{chunk}{axiom.bib}
@article{Hulz82,
author = "van Hulzen, J.A.",
title = "Computer Algebra Systems Viewed by a Notorious User",
journal = "LNCS",
volume = "144",
pages = "166-180",
year = "1982",
abstract =
"Are design and use of computer algebra systems disjoint or
complementary activi- ties? Raising and answering this question are
equally controversial, since a clear distinction between languages
features and library facilities is hard to make. Instead of even
attempting to answer this rather academic question it is argued why it
is reasonable to raise related questions: Is SMP a paradox? Is it
realistic to neglect inaccurate input data? Is a very high level
programming language instrumental for equal opportunity employment in
scientific research?",
paper = "Hulz82.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Caviness, Bob}
\begin{chunk}{axiom.bib}
@article{Cavi85,
author = "Caviness, Bob",
title = "Computer Algebra: Past and Future",
journal = "LNCS",
volume = "203",
pages = "1-18",
paper = "Cavi85.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Voevodsky, Vladimir}
\index{Benedikt, Ahrens}
\index{Grayson, Daniel}
\begin{chunk}{axiom.bib}
@misc{Voev17,
author = "Voevodsky, Vladimir and Benedikt, Ahrens and Grayson, Daniel",
title = "UniMath: Univalent Mathematics",
link = "\url{https://github.com/UniMath/UniMath}",
year = "2017"
}
\end{chunk}
\index{Mahboubi, Assia}
\begin{chunk}{axiom.bib}
@inproceedings{Mahb05,
author = "Mahboubi, Assia",
title = "Programming and certifying the CAD algorithm inside the coq system",
year = "2005",
booktitle = "Mathematics, Algorithms, Proofs, volume 05021 of Dagstuhl
Seminar Proceedings, Schloss Dagstuhl (2005)",
abstract =
"A. Tarski has shown in 1975 that one can perform quantifier
elimination in the theory of real closed fields. The introduction of
the Cylindrical Algebraic Decomposition (CAD) method has later allowed
to design rather feasible algorithms. Our aim is to program a
reflectional decision procedure for the Coq system, using the CAD, to
decide whether a (possibly multivariate) system of polynomial
inequalities with rational coefficients has a solution or not. We have
therefore implemented various computer algebra tools like gcd
computations, subresultant polynomial or Bernstein polynomials.",
paper = "Mahb05.pdf",
keywords = "axiomref"
\end{chunk}
\index{Dunstan, M.N.}
\index{Gottliebsen, H.}
\index{Kelsey, T.W.}
\index{Martin, U.}
\begin{chunk}{axiom.bib}
@misc{Duns01,
author = "Dunstan, M.N. and Gottliebsen, H. and Kelsey, T.W. and
Martin, U.",
title = "A Maple-PVS Interface",
booktitle = "Proc. Calculemus, 2001",
year = "2001"
}
\end{chunk}
\index{Komendantsky, V.}
\index{Konovalov, A.}
\index{Linton, S.A.}
\begin{chunk}{axiom.bib}
@article{Kome12,
author = "Komendantsky, V. and Konovalov, A. and Linton, S.A.",
title = "Interfacing Coq + SSReflect with GAP",
journal = "Electronic Notes in Theoretical Computer Science",
volume = "295",
number = "19",
pages = "17-28",
abstract =
"We report on an extendable implementation of the communication
interface connecting Coq proof assistant to the computational algebra
system GAP using the Symbolic Computation Software Composability
Protocol (SCSCP). It allows Coq to issue OpenMath requests to a local
or remote GAP instances and represent server responses as Coq terms.",
paper = "Kome12.pdf"
}
\end{chunk}
\index{Mayero, Micaela}
\index{Delahaye, David}
\begin{chunk}{axiom.bib}
@misc{Maye17,
author = "Mayero, Micaela and Delahaye, David",
title = "A Maple Mode for Coq",
year = "2017",
link =
"\url{http://www.lix.polytechnique.fr/coq/V8.2pl1/contribs/MapleMode.html}",
abstract =
"This contribution is an interface between Coq and Maple. In
particular, this exports the functions simplify/factor/expand/normal
giving the corresponding tactics Simplify/Factor/Expand/Normal. The
manipulations carried out by these tactics are proved thanks to the
tactic Field. These tactics can be also used as functions by means of
the Eval ... In command. "
}
\end{chunk}
\index{Konovalov, Alexander}
\index{Linton, Steve}
\begin{chunk}{axiom.bib}
@misc{Kono17,
author = "Konovalov, Alexander and Linton, Steve",
title = "Symbolic Computation Software Composability Protocol",
version = "2.2.2",
year = "2017",
link = "\url{}",
paper = "Kono17.pdf"
}
\end{chunk}
\index{Linton, S.}
\index{Sebastiani, R.}
\begin{chunk}{axiom.bib}
@article{Lint02,
author = "Linton, S. and Sebastiani, R.",
title = "Editorial: The Integration of Automated Reasoning and Computer
Algebra Systems",
journal = "J. Symbolic Computation",
volume = "34",
pages = "239-239",
year = "2002",
paper = "Lint02.pdf"
}
\end{chunk}
\index{Khan, Muhammad Taimoor}
\begin{chunk}{axiom.bib}
@phdthesis{Khan14,
author = "Khan, Muhammad Taimoor",
title = "Formal Specification and Verification of Computer Algebra
Software",
school = "Johannes Kepler University, Linz",
year = "2014",
link =
"\url{http://www.risc.jku.at/publications/download/risc_4981/main.pdf}",
abstract =
"In this thesis, we present a novel framework for the formal
specification and verification of computer algebra programs and its
application to a non-trivial computer algebra package. The programs
are written in the language MiniMaple which is a substantial subset of
the language of the commercial computer algebra system Maple. The
main goal of the thesis is the application of light-weight formal
methods to MiniMaple programs (annotated with types and behavioral
specifications) for finding internal inconsistencies and violations of
methods preconditions by employing static program analysis. This task
is more complex for a computer algebra language like Maple that for
conventional programming languages, as Maple supports non-standard
types of objects and also requires abstract data types to model
algebraic concepts and notions.
As a starting point, we have defined
and formalized a syntax, semantics, type system and specification
language for MiniMaple . For verification, we automatically trans-
late the (types and specification) annotated MiniMaple program into a
behaviorally equivalent program in the intermediate language Why3ML of
the verification tool Why3; from the translated program, Why3
generates verification conditions whose correctness can be proved by
various automated and interactive theorem provers (e.g. Z3 and Coq).
Furthermore, we have defined a denotational semantics of MiniMaple and
its specification language and proved the soundness of the translation
with re- spect to the operational semantics of Why3ML. Finally, we
discuss the application of our verification framework to the Maple
package DifferenceDifferential developed at our institute to compute
bivariate difference-differential dimension polynomials using relative
Groebner bases.",
paper = "Khan14.pdf",
keywords = "axiomref"
}
\end{chunk}
\index{Homann, Karsten}
\index{Calmet, Jacques}
\begin{chunk}{axiom.bib}
@article{Homa05,
author = "Homann, Karsten and Calmet, Jacques",
title = "Structures for Symbolic Mathematical Reasoning and Computation",
journal = "LNCS",
volume = "1128",
year = "2005",
pages = "216-227",
abstract =
"Recent research towards integrating symbolic mathematical reasoning
and computation has led to prototypes of interfaces and
environments. This paper introduces computation theories and
structures to represent mathematical objects and applications of
algorithms occuring in algorithmic services. The composition of
reasoning and computation theories and structures provide a formal
framework for the specification of symbolic mathematical problem
solving by cooperation of algorithms and theorems.",
paper = "Homa05.pdf"
}
\end{chunk}
\index{Avigad, Jeremy}
\begin{chunk}{axiom.bib}
@misc{Avig12,
author = "Avigad, Jeremy",
title = "Interactive Theorem Proving, Automated Reasoning, and
Mathematical Computation",
year = "2012",
comment = "slides",
paper = "Avig12.pdf"
}
\end{chunk}
\index{Avigad, Jeremy}
\begin{chunk}{axiom.bib}
@misc{Avig14a,
author = "Avigad, Jeremy",
title = "Formal Verification, Interactive Theorem Proving, and
Automated Reasoning",
year = "2014",
comment = "slides",
paper = "Avig14a.pdf"
}
\end{chunk}
\index{Avigad, Jeremy}
\begin{chunk}{axiom.bib}
@misc{Avig16a,
author = "Avigad, Jeremy",
title = "Interactive Theorem Proving, Automated Reasoning, and Dynamical
Systems",
year = "2016",
comment = "slides",
paper = "Avig16a.pdf"
}
\end{chunk}
\index{Avigad, Jeremy}
\begin{chunk}{axiom.bib}
@misc{Avig17a,
author = "Avigad, Jeremy",
title = "Formal Methods in Mathematics and the Lean Theorem Prover",
year = "2017",
comment = "slides",
paper = "Avig17a.pdf"
}
\end{chunk}
\index{Harrison, John}
\begin{chunk}{axiom.bib}
@inproceedings{Harr07,
author = "Harrison, John",
title = "A Short Survey of Automated Reasoning",
booktitle = "Proc. 2nd Int. Conf. on Algebraic Biology",
pages = 334-349",
year = "2007",
publisher = "Springer-Verlag",
isbn = "978-3-540-73432-1",
abstract =
"This paper surveys the field of automated reasoning, giving some
historical background and outlining a few of the main current research
themes. We particularly emphasize the points of contact and the
contrasts with computer algebra. We finish with a discussion of the
main applications so far.",
paper = "Harr07.pdf"
}
\end{chunk}
\index{Schreiner, Wolfgang}
\index{Danielczyk-Landerl, Werner}
\index{Marin, Mircea}
\index{St\"ocher, Wolfgang}
\begin{chunk}{axiom.bib}
@inproceedings{Schr00,
author = "Schreiner, Wolfgang and Danielczyk-Landerl, Werner and
Marin, Mircea and Stocher, Wolfgang",
title = "A Generic Programming Environment for High-Performance
Mathematical Libraries",
year = "2000",
pages = "256-268",
abstract =
"We report on a programming environment for the development of
generic mathematical libraries based on functors (parameterized
modules) that have rigorously specified but very abstract
interfaces. We focus on the combination of the functor-based
programming style with software engineering principles in large
development projects. The gen- erated target code is highly efficient
and can be easily embedded into foreign application environments.",
paper = "Schr00.pdf"
}
\end{chunk}
\index{Fevre, Stephane}
\index{Wang, Dongming}
\begin{chunk}{axiom.bib}
@article{Fevr98,
author = "Fevre, Stephane and Wang, Dongming",
title = "Proving Geometric Theorems using Clifford Algebra and Rewrite
Rules",
journal = "LNCS",
volume = "1421",
year = "1998",
pages = "17-32",
abstract =
"We consider geometric theorems that can be stated construc- tively by
introducing points, while each newly introduced point may be
represented in terms of the previously constructed points using
Clifford algebraic operators. To prove a concrete theorem, one first
substitutes the expressions of the dependent points into the
conclusion Clifford poly- nomial to obtain an expression that involves
only the free points and pa- rameters. A term-rewriting system is
developed that can simplify such an expression to 0, and thus prove
the theorem. A large class of theorems can be proved effectively in
this coordinate-free manner. This paper de- scribes the method in
detail and reports on our preliminary experiments.",
paper = "Fevr98.pdf"
}
\end{chunk}
---
books/axiom.bib | 30831 +++++++++++++++++++++++++++++++++++++++
books/bookvolbib.pamphlet | 1321 +-
changelog | 2 +
patch | 1021 +-
src/axiom-website/patches.html | 2 +
5 files changed, 33048 insertions(+), 129 deletions(-)
create mode 100644 books/axiom.bib
diff --git a/books/axiom.bib b/books/axiom.bib
new file mode 100644
index 0000000..debf098
--- /dev/null
+++ b/books/axiom.bib
@@ -0,0 +1,30831 @@
+@book{Book00,
+ author = "Axiom Authors",
+ title = "Volume 0: Axiom Jenks and Sutor",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol0.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Jenks:2003:AVS"
+}
+
+@book{Book01,
+ author = "Axiom Authors",
+ title = "Volume 1: Axiom Tutorial",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol1.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Book02,
+ author = "Axiom Authors",
+ title = "Volume 2: Axiom Users Guide",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol2.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAb"
+}
+
+@book{Book03,
+ author = "Axiom Authors",
+ title = "Volume 3: Axiom Programmers Guide",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol3.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAc"
+}
+
+@book{Book04,
+ author = "Axiom Authors",
+ title = "Volume 4: Axiom Developers Guide",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol4.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAd"
+}
+
+@book{Book05,
+ author = "Axiom Authors",
+ title = "Volume 5: Axiom Interpreter",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol5.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2003:AVA",
+}
+
+@book{Book06,
+ author = "Axiom Authors",
+ title = "Volume 6: Axiom Command",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol6.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAe"
+}
+
+@book{Book07,
+ author = "Axiom Authors",
+ title = "Volume 7: Axiom Hyperdoc",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol7.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAj"
+}
+
+@book{Book71,
+ author = "Axiom Authors",
+ title = "Volume 7.1: Axiom Hyperdoc Pages",
+ publisher = "Axiom Project",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol7.1.pdf}",
+ year = "2016"
+}
+
+@book{Book08,
+ author = "Axiom Authors",
+ title = "Volume 8: Axiom Graphics",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol8.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAf"
+}
+
+@book{Book81,
+ author = "Axiom Authors",
+ title = "Volume 8.1: Axiom Gallery",
+ publisher = "Axiom Project",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol8.1.pdf}",
+ year = "2016"
+}
+
+@book{Book09,
+ author = "Axiom Authors",
+ title = "Volume 9: Axiom Compiler",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol9.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAg"
+}
+
+@book{Book91,
+ author = "Axiom Authors",
+ title = "Volume 9.1: Axiom Compiler Details",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol9.1.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Book10,
+ author = "Axiom Authors",
+ title = "Volume 10: Axiom Algebra: Implementation",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol10.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAh"
+}
+
+@book{Book101,
+ author = "Axiom Authors",
+ title = "Volume 10.1: Axiom Algebra: Theory",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol10.1.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Book102,
+ author = "Axiom Authors",
+ title = "Volume 10.2: Axiom Algebra: Categories",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol10.2.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Book103,
+ author = "Axiom Authors",
+ title = "Volume 10.3: Axiom Algebra: Domains",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol10.3.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Book104,
+ author = "Axiom Authors",
+ title = "Volume 10.4: Axiom Algebra: Packages",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol10.4.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Book105,
+ author = "Axiom Authors",
+ title = "Volume 10.5: Axiom Algebra: Numerics",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol10.5.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Book11,
+ author = "Axiom Authors",
+ title = "Volume 11: Axiom Browser",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol11.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Portes:2007:AVA"
+}
+
+@book{Book12,
+ author = "Axiom Authors",
+ title = "Volume 12: Axiom Crystal",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol12.pdf}",
+ publisher = "Axiom Project",
+ year = "2016",
+ keywords = "axiomref",
+ beebe = "Daly:2005:AVAi"
+}
+
+@book{Book13,
+ author = "Axiom Authors",
+ title = "Volume 13: Proving Axiom Correct",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol13.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Book14,
+ author = "Axiom Authors",
+ title = "Volume 14: Algorithms",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvol14.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Bookbib,
+ author = "Axiom Authors",
+ title = "Volume Bibliography: Axiom Literature Citations",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvolbib.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@book{Bookbug,
+ author = "Axiom Authors",
+ title = "Volume BugList: Axiom Bugs",
+ link = "\url{http://axiom-developer.org/axiom-website/bookvolbug.pdf}",
+ publisher = "Axiom Project",
+ year = "2016"
+}
+
+@inproceedings{Abda86,
+ author = "Abdali, S. Kamal and Cherry, Guy W. and Soiffer, Neil",
+ title = "A Smalltalk System for Algebraic Manipulation",
+ booktitle = "OOPSLA 86",
+ pages = "277-293",
+ year = "1986",
+ abstract =
+ "This paper describes the design of an algebra system Views
+ implemented in Smalltalk. Views contains facilities for dynamic
+ creation and manipulation of computational domains, for viewing these
+ domains as various categories such as groups, rings, or fields, and
+ for expressing algorithms generically at the level of categories. The
+ design of Views has resulted in the addition of some new abstractions
+ to Smalltalk that are quite useful in their own right. Parameterized
+ classes provide a means for run-time creation of new classes that
+ exhibit generally very similar behavior, differing only in minor ways
+ that can be described by different instantiations of certain
+ parameters. Categories allow the abstraction of the common behavior of
+ classes that derives from the class objects and operations satisfying
+ certain laws independently of the implementation of those objects and
+ operations. Views allow the run-time association of classes with
+ categories (and of categories with other categories), facilitating the
+ use of code written for categories with quite different
+ interpretations of operations. Together, categories and views provide
+ an additional mechanism for code sharing that is richer than both
+ single and multiple inheritance. The paper gives algebraic as well as
+ non-algebraic examples of the above-mentioned features.",
+ paper = "Abda86.pdf",
+ keywords = "axiomref"
+}
+
+@book{Ahox86,
+ author = "Aho, Alfred V. and Sethi, Ravi and Ullman, Jeffrey D.",
+ title = "Compilers: Principles, Techniques, and Tools",
+ year = "1986",
+ publisher = "Addison-Wesley",
+ isbn = "978-0201100884"
+}
+
+@misc{Bake16b,
+ author = "Baker, Martin",
+ title = "add coerce from PermutationGroup to GroupPresentation",
+ link = "\url{https://groups.google.com/forum/?hl=en\#!topic/fricas-devel/EtLwgd2dWNU}",
+ year = "2016"
+}
+
+@misc{Bake17,
+ author = "Baker, Martin",
+ title = "Finite Group Implementation",
+ link = "\url{http://www.euclideanspace.com/prog/scratchpad/mycode/discrete/finiteGroup/}",
+ year = "2017"
+}
+
+@book{Bare84,
+ author = "Barendregt, H. P.",
+ title = "The Lambda Calculus: Its Syntax and Semantics",
+ publisher = "Elsevier Science",
+ year = "1984"
+}
+
+@techreport{Baum95,
+ author = "Baumgartner, Gerald and Stansifer, Ryan D.",
+ title = "A Proposal to Study Type Systems for Computer Algebra",
+ type = "technical report",
+ institution = "RISC-LINZ",
+ number = "90-07.0",
+ year = "1995",
+ abstract =
+ "It is widely recognized that programming languages should oer
+ features to help structure programs. To achieve this goal, languages
+ like Ada , Modula-2 , object- oriented languages, and functional
+ languages have been developed. The structur- ing techniques available
+ so far (like modules, classes, parametric polymorphism) are still not
+ enough or not appropriate for some application areas. In symbolic
+ computation, in particular computer algebra, several problems occur
+ that are dicult to handle with any existing programming
+ language. Indeed, nearly all available computer algebra systems suer
+ from the fact that the underlying pro- gramming language imposes too
+ many restrictions.
+
+ We propose to develop a language that combines the essential features
+ from functional languages, object-oriented languages, and computer
+ algebra systems in a semantically clean manner. Although intended for
+ use in symbolic computation, this language should prove interesting as
+ a general purpose programming language. The main innovation will be
+ the application of sophisticated type systems to the needs of computer
+ algebra systems. We will demonstrate the capabilities of the language
+ by using it to implement a small computer algebra library. This im-
+ plementation will be compared against a straightforward Lisp
+ implementation and against existing computer algebra systems. Our
+ development should have an impact both on the programming languages
+ world and on the computer algebra world.",
+ paper = "Baum95.pdf"
+}
+
+@techreport{Berg92,
+ author = "Berger, Emery",
+ title = "FP + OOP = Haskell",
+ institution = "University of Texas",
+ number = "TR-92-30",
+ year = "1992",
+ abstract =
+ "The programming language Haskell adds object-oriented functionality
+ (using a concept known as type classes) to a pure functional
+ programming framework. This paper describes these extensions and
+ analyzes its accomplishments as well as some problems."
+}
+
+@book{Birt80,
+ author = "Birtwistle, Graham M.",
+ title = "Simula Begin",
+ year = "1980",
+ publisher = "Chartwell-Bratt",
+ isbn = "9780862380090"
+}
+
+@inproceedings{Brea89,
+ author = "Breazu-Tannen, Val and Coquand, Thierry and Gunter, Carl A. and
+ Scedrov, Andre",
+ title = "Inheritance and Explicit Coercion",
+ booktitle = "Logic in Computer Science",
+ year = "1989",
+ isbn = "0-8186-1954-6",
+ abstract =
+ "A method is presented for providing semantic interpretations for
+ languages which feature inheritance in the framework of statically
+ checked, rich type disciplines. The approach is illustrated by an
+ extension of the language Fun of L. Cardelli and P. Wegner (1985),
+ which is interpreted via a translation into an extended polymorphic
+ lambda calculus. The approach interprets inheritances in Fun as
+ coercion functions already definable in the target of the
+ translation. Existing techniques in the theory of semantic domains can
+ then be used to interpret the extended polymorphic lambda calculus,
+ thus providing many models for the original language. The method
+ allows the simultaneous modeling of parametric polymorphism, recursive
+ types, and inheritance, which has been regarded as problematic because
+ of the seemingly contradictory characteristics of inheritance and type
+ recursion on higher types. The main difficulty in providing
+ interpretations for explicit type disciplines featuring inheritance is
+ identified. Since interpretations follow the type-checking
+ derivations, coherence theorems are required, and the authors prove
+ them for their semantic method.",
+ paper = "Brea89.pdf"
+}
+
+@article{Brea91,
+ author = "Breazu-Tannen, Val and Coquand, Thierry and Gunter, Carl A. and
+ Scedrov, Andre",
+ title = "Inheritance as Implicit Coercion",
+ journal = "Information and Computation",
+ volume = "93",
+ number = "1",
+ year = "1991",
+ pages = "172-221",
+ abstract =
+ "We present a method for providing semantic interpretations for
+ languages with a type system featuring inheritance polymorphism. Our
+ approach is illustrated on an extension of the language Fun of
+ Cardelli and Wegner, which we interpret via a translation into an
+ extended polymorphic lambda calculus. Our goal is to interpret
+ inheritances in Fun via coercion functions which are definable in the
+ target of the translation. Existing techniques in the theory of
+ semantic domains can be then used to interpret the extended
+ polymorphic lambda calculus, thus providing many models for the
+ original language. This technique makes it possible to model a rich
+ type discipline which includes parametric polymorphism and recursive
+ types as well as inheritance. A central difficulty in providing
+ interpretations for explicit type disciplines featuring inheritance in
+ the sense discussed in this paper arises from the fact that programs
+ can type-check in more than one way. Since interpretations follow the
+ type-checking derivations, coherence theorems are required: that is,
+ one must prove that the meaning of a program does not depend on the
+ way it was type-checked. Proofs of such theorems for our proposed
+ interpretation are the basic technical results of this
+ paper. Interestingly, proving coherence in the presence of recursive
+ types, variants, and abstract types forced us to reexamine fundamental
+ equational properties that arise in proof theory (in the form of
+ commutative reductions) and domain theory (in the form of strict
+ vs. non-strict functions).",
+ paper = "Brea91.pdf"
+}
+
+@inproceedings{Bruc93,
+ author = "Bruce, Kim B.",
+ title = "Safe type checking in a statically-typed object-oriented
+ programming language",
+ booktitle = "POPL 93",
+ year = "1993",
+ isbn = "0-89791-560-7",
+ pages = "285-298",
+ abstract =
+ " In this paper we introduce a statically-typed, functional,
+ object-oriented programming language, TOOPL, which supports classes,
+ objects, methods, instance variable, subtypes, and inheritance. It has
+ proved to be surprisingly difficult to design statically-typed
+ object-oriented languages which are nearly as expressive as Smalltalk
+ and yet have no holes in their typing systems. A particular problem
+ with statically type checking object-oriented languages is determining
+ whether a method provided in a superclass will continue to type check
+ when inherited in a subclass. This program is solved in our language
+ by providing type checking rules which guarantee that a method which
+ type checks as part of a class will type check correctly in all legal
+ subclasses in which it is inherited. This feature enables library
+ providers to provide only the interfaces of classes with executables
+ and still allow users to safely create subclasses. The design of TOOPL
+ has been guided by an analysis of the semantics of the language, which
+ is given in terms of a sufficiently rich model of the F-bounded
+ second-order lambda calculus. This semantics supported the language
+ design by providing a means of proving that the type-checking rules
+ for the language are sound, ensuring that well-typed terms produce
+ objects of the appropriate type. In particular, in a well-typed
+ program it is impossible to send a message to an object which lacks a
+ corresponding method.",
+ paper = "Bruc93.pdf"
+}
+
+@inproceedings{Brea89a,
+ author = "Breazu-Tannen, Val Gallier, Jean",
+ title = "Polymorphic Rewriting Concerves Algebraic Strong Normalization
+ and Confluence",
+ booktitle = "Automata, Languages and Programming",
+ pages = "137-150",
+ year = "1989",
+ abstract =
+ "We study combinations of many-sorted algebraic term rewriting systems
+ and polymorphic lambda term rewriting. Algebraic and lambda terms are
+ mixed by adding the symbols of the algebraic signature to the
+ polymorphic lambda calculus, as higher-order constants.
+
+ We show that if a many-sorted algebraic rewrite system R is strongly
+ normalizing (terminating, noetherian), then
+ $R+\beta+\nu+type-\beta+type-\nu$ rewriting of mixed terms is also
+ strongly normalizing. We obtain this results using a technique which
+ generalizes Girard's ``{candidats de reductibiliti\'e}'', introduced in
+ the original proof of strong normalization for the polymorphic lambda
+ calculus.
+
+ We also show that if a many-sorted algebraic rewrite system $R$ has
+ the Church-Rosser property (is confluent), then
+ $R+\beta+type-\beta+type-\nu$ rewriting of mixed terms has the
+ Church-Rosser property too. Combining the two results, we conclude
+ that if $R$ is canonical (complete) on algebraic terms, then
+ $R+\beta+type-\beta+type-\nu$ is canonical on mixed terms.
+
+ $\nu$ reduction does not commute with algebraic reduction, in general.
+ However, using long $\nu$-normal forms, we show that if $R$ is canonical
+ then $R+\beta+type-\beta+type-\nu$ convertibility is still decidable.",
+paper = "Brea89.pdf"
+}
+
+@book{Buch82,
+ author = "Buchberger, Bruno and Collins, George Edwin and Loos, Rudiger",
+ title = "Computer Algebra: Symbolic and Algebraic Computation",
+ publisher = "Springer",
+ year = "1982",
+ isbn = "978-3-211-81684-4",
+ paper = "Buch82.pdf"
+}
+
+@techreport{Buch93,
+ author = "Buchberger, Bruno and Collins, George E. and Encarnacion, Mark J.
+ and Hong, Hoon and Johnson, Jeremy R. and Krandick, Werner and
+ Loos, Rudiger and Mandache, Ana M. and Neubacher, Andreas and
+ Vielhaber, Herbert",
+ title = "SACLIB 1.1 User's Guide",
+ year = "1993",
+ institution = "Kurt Godel Institute",
+ abstract =
+ "This paper lists most of the algorithms provided by SACLIB and shows
+ how to call them from C. There is also a brief explanation of the
+ inner workings of the list processing and garbage collection
+ facilities of SACLIB",
+ paper = "Buch93.pdf"
+}
+
+@article{Buen91,
+ author = "Buendgen, R. and Hagel, G. and Loos, R. and Seitz, S. and
+ Simon, G. and Stuebner, R. and Weber, A.",
+ title = "SAC-2 in ALDES -- Ein Werkzeug fur dis Algorithmenforschung",
+ journal = "MathPAD 1",
+ volume = "3",
+ year = "1991",
+ pages = "33-37"
+}
+
+@book{Bund93,
+ author = "Bundgen, Reinhard",
+ title = "The ReDuX System Documentation",
+ year = "1993",
+ publisher = "WSI"
+}
+
+@inproceedings{Bund93a,
+ author = "Bundgen, Reinhard",
+ title = {Reduce the Redex $->$ ReDuX},
+ booktitle = "Proc. Rewriting Techniques and Applications 93",
+ year = "1993",
+ pages = "446-450",
+ publisher = "Springer-Verlag",
+ isbn = "3-540-56868-9"
+}
+
+@inproceedings{Butl90,
+ author = "Butler, Greg and Cannon, John",
+ title = "The Design of Cayley -- A Language for Modern Algebra",
+ booktitle = "DISCO 1990",
+ year = "1990",
+ pages = "10-19",
+ abstract =
+ "Established practice in the domain of modern algebra has shaped the
+ design of Cayley. The design has also been responsive to the needs of
+ its users. The requirements of the users include consistency with
+ common mathematical notation; appropriate data types such as sets,
+ sequences, mappings, algebraic structures and elements; efficiency;
+ extensibility; power of in-built functions and procedures for known
+ algorithms; and access to common examples of algebraic structures. We
+ discuss these influences on the design of Cayley's user language.",
+ paper = "Butl90.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Cant16,
+ author = "Cantrill, Bryan",
+ title = "Oral Tradition in Software Engineering",
+ link = "\url{https://www.youtube.com/watch?v=4PaWFYm0kEw}",
+ year = "2016"
+}
+
+@article{Card85,
+ author = "Cardelli, Luca and Wegner, Peter",
+ title = "On Understanding Types, Data Abstraction, and Polymorphism",
+ journal = "ACM Computing Surveys",
+ volume = "17",
+ number = "4",
+ year = "1985",
+ pages = "471-523",
+ abstract =
+ "Our objective is to understand the notion of type in programming
+ languages, present a model of typed, polymorphic programming languages
+ that reflects recent research in type theory, and examine the
+ relevance of recent research to the design of practical programming
+ languages.
+
+ Object-oriented languages provide both a framework and a
+ motivation for exploring the interaction among the concepts of type,
+ data abstraction, and polymorphism, since they extend the notion of
+ type to data abstraction and since type inheritance is an important
+ form of polymorphism. We develop a $\lambda$-calculus-based model for type
+ systems that allows us to explore these interactions in a simple
+ setting, unencumbered by complexities of production programming
+ languages.
+
+ The evolution of languages from untyped universes to
+ monomorphic and then polymorphic type systems is reviewed. Mechanisms
+ for polymorphism such as overloading, coercion, subtyping, and
+ parameterization are examined. A unifying framework for polymorphic
+ type systems is developed in terms of the typed $\lambda$-calculus
+ augmented to include binding of types by quantification as well as
+ binding of values by abstraction.
+
+ The typed $\lambda$-calculus is
+ augmented by universal quantification to model generic functions with
+ type parameters, existential quantification and packaging (information
+ hiding) to model abstract data types, and bounded quantification to
+ model subtypes and type inheritance. In this way we obtain a simple
+ and precise characterization of a powerful type system that includes
+ abstract data types, parametric polymorphism, and multiple inheritance
+ in a single consistent framework. The mechanisms for type checking for
+ the augmented $\lambda$-calculus are discussed.
+
+ The augmented typed
+ $\lambda$-calculus is used as a programming language for a variety of
+ illustrative examples. We christen this language Fun because fun
+ instead of $\lambda$ is the functional abstraction keyword and because it
+ is pleasant to deal with.
+
+ Fun is mathematically simple and can serve
+ as a basis for the design and implementation of real programming
+ languages with type facilities that are more powerful and expressive
+ than those of existing programming languages. In particular, it
+ provides a basis for the design of strongly typed object-oriented
+ languages",
+ paper = "Card85.pdf"
+}
+
+@inproceedings{Card86,
+ author = "Cardelli, Luca",
+ title = "Typechecking Dependent Types and Subtypes",
+ link =
+ "\url{http://lucacardelli.name/Papers/Dependent%20Typechecking.US.pdf}",
+ booktitle = "Foundations of Logic and Functional Programming",
+ year = "1996",
+ journal = "LNCS",
+ volume = "523",
+ pages = "45-57",
+ paper = "Card86.pdf"
+}
+
+@article{Card88,
+ author = "Cardelli, Luca",
+ title = "A Semantics of Multiple Inheritance",
+ journal = "Information and Computation",
+ volume = "76",
+ number = "2-3",
+ year = "1988",
+ pages = "138-164",
+ paper = "Card88.pdf"
+}
+
+@article{Card91,
+ author = "Cardelli, Luca and Longo, Giuseppe",
+ title = "A Semantic Basis for Quest",
+ journal = "J. of Functional Programming",
+ volume = "1",
+ number = "4",
+ pages = "417-458",
+ year = "1991",
+ abstract =
+ "Quest is a programming language based on impredicative type
+ quantifiers and subtyping within a three-level structure of kinds,
+ types and type operators, and values.
+
+ The semantics of Quest is rather challenging. In particular,
+ difficulties arise when we try to model simultaneously features such
+ as contravariant function spaces, record types, subtyping, recursive
+ types and fixpoints.
+
+ In this paper we describe in detail the type inference rules for
+ Quest, and give them meaning using a partial equivalence relation
+ model of types. Subtyping is interpreted as in previous work by Bruce
+ and Longo (1989), but the interpretation of some aspects – namely
+ subsumption, power kinds, and record subtyping – is novel. The latter
+ is based on a new encoding of record types.
+
+ We concentrate on modelling quantifiers and subtyping; recursion is
+ the subject of current work.",
+ paper = "Card91.pdf"
+}
+
+@book{Chan90,
+ author = "Chang, C.C. and Keisler, H. Jerome",
+ title = "Model Theory",
+ publisher = "North Holland",
+ year = "1990",
+ comment = "Studics in Logic and the Foundations of Mathematics",
+ volume = "73",
+ abstract =
+ "Since the second edition of this book (1977), Model Theory has
+ changed radically, and is now concerned with fields such as
+ classification (or stability) theory, nonstandard analysis,
+ model-theoretic algebra, recursive model theory, abstract model
+ theory, and model theories for a host of nonfirst order logics. Model
+ theoretic methods have also had a major impact on set theory,
+ recursion theory, and proof theory.
+
+ This new edition has been updated to take account of these changes,
+ while preserving its usefulness as a first textbook in model
+ theory. Whole new sections have been added, as well as new exercises
+ and references. A number of updates, improvements and corrections have
+ been made to the main text"
+}
+
+@book{Char91,
+ author = "Char, Bruce and Geddes, Keith O. and Gonnet, Gaston H. and
+ Leong, Benton and Monagan, Michael B. and Watt, Stephen M.",
+ title = "Maple V Language Reference Manual",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "978-0-387-94124-0"
+}
+
+@book{Char91a,
+ author = "Char, Bruce and Geddes, Keith O. and Gonnet, Gaston H. and
+ Leong, Benton and Monagan, Michael B. and Watt, Stephen M.",
+ title = "Maple V Library Reference Manual",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "978-1-4757-2133-1",
+ abstract =
+ "The design and implementation of the Maple system is an on-going
+ project of the Symbolic Com putation Group at the University of
+ Waterloo in Ontario, Canada. This manual corresponds with version V
+ (roman numeral five) of the Maple system. The on-line help subsystem
+ can be invoked from within a Maple session to view documentation on
+ specific topics. In particular, the command ?updates points the user
+ to documentation updates for each new version of Maple. The Maple
+ project was first conceived in the autumn of 1980, growing out of
+ discussions on the state of symbolic computation at the University of
+ Waterloo. The authors wish to acknowledge many fruitful discussions
+ with colleagues at the University of Waterloo, particularly Morven
+ Gen tleman, Michael Malcolm, and Frank Tompa. It was recognized in
+ these discussions that none ofthe locaIly-available systems for
+ symbolic computation provided the facilities that should be expected
+ for symbolic computation in modern computing environments. We
+ concluded that since the basic design decisions for the then-current
+ symbolic systems such as ALTRAN, CAMAL, REDUCE, and MACSYMA were based
+ on 1960's computing technology, it would be wise to design a new
+ system ``from scratch//. Thus we could take advantage of the software
+ engineering technology which had become available in recent years, as
+ well as drawing from the lessons of experience. Maple's basic features
+ (elementary data structures, Input/output, arithmetic with numbers,
+ and elementary simplification) are coded in a systems programming
+ language for efficiency."
+}
+
+@inproceedings{Chen92,
+ author = "Chen, Kung and Hudak, Paul and Odersky, Martin",
+ title = "Parametric Type Classes",
+ booktitle = "Proc. ACM Conf. on LISP and Functional Programming",
+ year = "1992",
+ pages = "170-181",
+ abstract =
+ "We propose a generalization to Haskell's type classes where a class
+ can have type parameters besides the placeholder variable. We show
+ that this generalization is essential to represent container classes
+ with overloaded data constructor and selector operations. We also show
+ that the resulting type system has principal types and present
+ unification and type reconstruction algorithms.",
+ paper = "Chen92.pdf"
+}
+
+@techreport{Coll90,
+ author = "Collins, George E. and Loos, Rudiger",
+ title = "Specification and Index of SAC-2 Algorithms",
+ institution = "Univ. of Tubingen",
+ type = "technical report",
+ year = "1990",
+ number = "WSI-90-4"
+}
+
+@inproceedings{Como90,
+ author = "Comon, Hubert",
+ title = "Equational Formulas in Order-sorted Algebras",
+ booktitle = "IICALP 90. Automata, Languages and Programming",
+ year = "1990",
+ pages = "674-688",
+ abstract =
+ "We propose a set of transformation rules for first order formulas
+ whose atoms are either equations between terms or “sort constraints” t
+ ε s where s is a regular tree language (or a sort in the algebraic
+ specification community). This set of rules is proved to be correct,
+ terminating and complete. This shows in particular that the first
+ order theory of any rational tree language is decidable, extending the
+ results of [Mal71,CL89,Mah88]. We also show how to apply our results
+ to automatic inductive proofs in equational theories."
+}
+
+@article{Como91,
+ author = "Comon, Hubert and Lugiez, D. and Schnoebelen, Ph.",
+ title = "A Rewrite-based Type Discipline for a Subset of Computer Algebra",
+ journal = "J. Symbolic Computation",
+ volume = "11",
+ number = "4",
+ year = "1991",
+ pages = "349-368",
+ abstract =
+ "This paper is concerned with the type structure of a system including
+ polymorphism, type properties and subtypes. This type system
+ originates from computer algebra but it is not intended to be the
+ solution of all type problems in this area.
+
+ Types (or sets of types) are denoted by terms in some order-sorted
+ algebra. We consider a rewrite relation in this algebra, which is
+ intended to express subtyping. The relations between the semantics and
+ the axiomatization are investigated. It is shown that the problem of
+ type inference is undecidable but a narrowing strategy for
+ semi-decision procedures is described and studied.",
+ paper = "Como91.pdf"
+}
+
+@article{Cool92,
+ author = "Coolsaet, Kris",
+ title = "A Quick Introduction to the Programming Language MIKE",
+ journal = "Sigplan Notices",
+ volume = "27",
+ number = "6",
+ year = "1992",
+ pages = "37-48",
+ abstract =
+ "MIKE is a new programming language developed by the author as a base
+ language for the development of algebraic and symbolic algorithms. It
+ is a structured programming language with a MODULA-2-like syntax
+ supporting special features such as transparent dynamic memory
+ management, discriminated union types, operator overloading, data
+ abstraction and parametrized types. This text gives an overview of the
+ main features of the language as of version 2.0."
+}
+
+@inproceedings{Dama82,
+ author = "Damas, Luis and Milner, Robin",
+ title = "Principal Type-schemes for Functional Programs",
+ booktitle = "POPL 82",
+ pages = "207-212",
+ year = "1982",
+ isbn = "0-89798-065-6",
+ paper = "Dama82.pdf"
+}
+
+@book{Davi94,
+ author = "Davis, Martin D. and Sigal, Ron and Weyuker, Elaine J.",
+ title = "Computability, Complexity, and Languages: Fundamentals of
+ Theoretical Computer Science",
+ publisher = "Academic Press",
+ year = "1994",
+ isbn = "978-0122063824"
+}
+
+@techreport{Ders89,
+ author = "Dershowitz, Nachum and Jouannaud, Jean-Pierre",
+ title = "Rewrite Systems",
+ year = "1989",
+ number = "478",
+ institution = "Laboratoire de Recherche en Informatique",
+ link = "\url{http://www.cs.tau.ac.il/~nachum/papers/survey-draft.pdf}",
+ paper = "Ders89.pdf"
+}
+
+@book{Ehri85,
+ author = "Ehrig, Hartmut and Mahr, Bernd",
+ title = "Fundamentals of Algebraic Specification 1: Equations and
+ Initial Semantics",
+ publisher = "Springer Verlag",
+ year = "1985",
+ isbn = "978-0387137186"
+}
+
+@inproceedings{Elli89,
+ author = "Elliott, Conal M.",
+ title = "Higher-order Unification with Dependent Function Types",
+ booktitle = "Rewriting Techniques and Applications",
+ year = "1989",
+ pages = "121-136",
+ abstract =
+ "Roughly fifteen years ago, Huet developed a complete semidecision
+ algorithm for unification in the simply typed $\lambda$-calculus
+ ($\lambda_\rightarrow$). In spite of the undecidability of this
+ problem, his algorithm is quite usable in practice. Since then, many
+ important applications have come about in such areas as theorem
+ proving, type inference, program transformation, and machine learning.
+
+ Another development is the discovery that by enriching
+ $\lambda_\rightarrow$ to include {\sl dependent function types},
+ the resulting calculus ($\lambda_\Pi$) forms the basis of a very
+ elegant and expressive Logical Framework, encompassing the syntax,
+ rules, and proofs for a wide class of logics.
+
+ This paper presents an algorithm in the spirit of Huet's, for
+ unification in $\lambda_\Pi$. This algorithm gives us the best
+ of both worlds: the automation previously possible in
+ $\lambda_\rightarrow$ and the greatly enriched expressive power of
+ $\lambda_\Pi$. It can be used to considerable advantage in many
+ of the current applications of Huet's algorithm, and has important
+ new applications as well. These include automated and semi-automated
+ theorem proving in encoded logics, and automatic type inference in a
+ variety of encoded languages."
+}
+
+@article{Farm90,
+ author = "Farmer, William M.",
+ title = "A Partial Functions Version of Church's Simple Theory of Types",
+ journal = "The Journal of Symbolic Logic",
+ volume = "55",
+ number = "3",
+ year = "1990",
+ pages = "1269-1291",
+ abstract =
+ "Church's simple theory of types is a system of higher-order logic in
+ which functions are assumed to be total. We present in this paper a
+ version of Church's system called PF in which functions may be
+ partial. The semantics of PF, which is based on Henkin's
+ general-models semantics, allows terms to be nondenoting but requires
+ formulas to always denote a standard truth value. We prove that PF is
+ complete with respect to its semantics. The reasoning mechanism in PF
+ for partial functions corresponds closely to mathematical practice,
+ and the formulation of PF adheres tightly to the framework of
+ Church's system.",
+ paper = "Farm90.pdf"
+}
+
+@article{Faxe02,
+ author = "Faxen, Karl-Filip",
+ title = "A Static Sematics for Haskell",
+ year = "2002",
+ journal = "J. Functional Programming",
+ volume = "12",
+ number = "4-5",
+ pages = "295-357",
+ abstract =
+ "This paper gives a static semantics for Haskell 98, a non-strict
+ purely functional programming language. The semantics formally speci
+ es nearly all the details of the Haskell 98 type system, including the
+ resolution of overloading, kind inference (including defaulting) and
+ polymorphic recursion, the only major omission being a proper
+ treatment of ambiguous overloading and its resolution. Overloading is
+ translated into explicit dictionary passing, as in all current
+ implementations of Haskell. The target language of this translation is
+ a variant of the Girard-Reynolds polymorphic lambda calculus featuring
+ higher order polymorphism and explicit type abstraction and
+ application in the term language. Translated programs can thus still
+ be type checked, although the implicit version of this system is
+ impredicative. A surprising result of this formalization e ort is that
+ the monomorphism restriction, when rendered in a system of inference
+ rules, compromises the principal type property.",
+ paper = "Faxe02.pdf"
+}
+
+@misc{Fija17,
+ author = "Fijalkow, Nathanael",
+ title =
+ "Computing using the generators of a group: the Schreier-Sims' algorithm",
+ year = "2017",
+ link = "\url{https://www.cs.ox.ac.uk/blogs/nathanael-fijalkow/2016/01/27/computing-using-the-generators-of-a-group/}"
+}
+
+@phdthesis{Fode83,
+ author = "Foderaro, John K.",
+ title = "The Design of a Language for Algebraic Computation Systems",
+ school = "U.C. Berkeley, EECS Dept.",
+ year = "1983",
+ link = "\url{http://digitalassets.lib.berkeley.edu/techreports/ucb/text/CSD-83-160.pdf}",
+ abstract =
+ "This thesis describes the design of a language to support a
+ mathematics-oriented symbolic algebra system. The language, which we
+ have named NEWSPEAK, permits the complex interrelations of
+ mathematical types, such as rings, fields and polynomials to be
+ described. Functions can be written over the most general type that
+ has the required operations and properties and the inherited by
+ subtypes. All function calls are generic, with most function
+ resolution done at compile time. Newspeak is type-safe, yet permits
+ runtime creation of tyhpes.",
+ paper = "Fode83.pdf",
+ keywords = "axiomref"
+}
+
+@book{Frey90,
+ author = "Freyd, Peter J. and Scedrov, Andre",
+ title = "Categories, Allegories",
+ publisher = "Elsevier Science",
+ year = "1990",
+ isbn = "0-444-70368-3"
+}
+
+@inproceedings{Frue91,
+ author = "Fruehwirth, Thom and Shapiro, Ehud and Vardi, Moshe Y. and
+ Yardeni, Eyal",
+ title = "Logic programs as types for logic programs",
+ booktitle = "Proc. Sixth Annual IEEE Symp. on Logic in Comp. Sci.",
+ publisher = "IEEE",
+ pages = "300-309",
+ year = "1991",
+ abstract =
+ "Type checking can be extremely useful to the program development process.
+ Of particular interest are descriptive type systems, which let the
+ programmer write programs without having to define or mention types.
+ We consider here optimistic type systems for logic programs. In such
+ systems types are conservative approximations to the success set of the
+ program predicates. We propose the use of logic programs to describe
+ types. We argue that this approach unifies the denotational and
+ operational approaches to descriptive type systems and is simpler
+ and more natural than previous approaches. We focus on the use of
+ unary-predicate programs to describe types. We identify a proper class
+ of unary-predicate programs and show that it is expressive enough to
+ express several notions of types. We use an analogy with 2-way automata
+ and a correspondence with alternating algorithms to obtain a complexity
+ characterization of type inference and type checking. This
+ characterization was facilitated by the use of logic programs to
+ represent types.",
+ paper = "Frue91.pdf"
+}
+
+@article{Fuhx89,
+ author = "Fuh, You-Chin and Mishra, Prateek",
+ title = "Polymorphic Subtype Inference -- Closing the Theory-Practice Gap",
+ journal = "Lecture Notes in Computer Science",
+ volume = "352",
+ year = "1989",
+ pages = "167-183",
+ paper = "Fuhx89.pdf"
+}
+
+@article{Fuhx90,
+ author = "Fuh, You-Chin",
+ title = "Type Inference with Subtypes",
+ journal = "Theoretical Computer Science",
+ volume = "73",
+ number = "2",
+ year = "1990",
+ pages = "155-175",
+ abstract =
+ "We extend polymorphic type inference with a very general notion of
+ subtype based on the concept of type transformation. This paper
+ describes the following results. We prove the existence of (i)
+ principal type property and (ii) syntactic completeness of the
+ type-checker, for type inference with subtypes. This result is
+ developed with only minimal assumptions on the underlying theory of
+ subtypes. As a consequence, it can be used as the basis for type
+ inference with a broad class of subtype theories. For a particular
+ “structural” theory of subtypes, those engendered by inclusions
+ between type constants only, we show that principal types are
+ compactly expressible. This suggests that type inference for the
+ structured theory of subtypes is feasible. We describe algorithms
+ necessary for such a system. The main algorithm we develop is called
+ MATCH, an extension to the classical unification algorithm. A proof of
+ correctness for MATCH is given.",
+ paper = "Fuhx90.pdf"
+}
+
+@misc{GAPx17,
+ author = "The GAP Group",
+ title = "GAP - Reference Manual",
+ year = "2017",
+ link = "\url{https://www.gap-system.org/Manuals/doc/ref/manual.pdf}"
+}
+
+@phdthesis{Gira72,
+ author = "Girard, Jean-Yves",
+ title = {Intrpr\'etation fontionelle et \'elimination des coupures de
+ l'arithm\'etique d'orde sup\'erieur},
+ school = {Universit\'e Paris VII},
+ year = "1972"
+}
+
+@book{Gira89,
+ author = "Girard, Jean-Yves",
+ title = "Proofs and Types",
+ publisher = "Cambridge University Press",
+ year = "1989"
+}
+
+@misc{Gode58,
+ author = "Godel, Kurt",
+ title = {\"Uber eine bisher noch nicht benutzte Erweiterung des Finiten
+ Standpunktes},
+ journal = "Dialectica 12",
+ year = "1958",
+ pages = "280-287"
+}
+
+@techreport{Gogu89,
+ author = "Goguen, Joseph and Meseguer, Jose",
+ title = "Order-sorted Algebra I : Equational Deduction for Multiple
+ Inheritance, Overloading, Exceptions, and Partial Operations",
+ type = "technical report",
+ institution = "SRI International",
+ year = "1989",
+ number = "SRIR 89-10"
+}
+
+@article{Gogu92,
+ author = "Goguen, Joseph and Meseguer, Jose",
+ title = "Order-sorted Algebra I : Equational Deduction for Multiple
+ Inheritance, Overloading, Exceptions, and Partial Operations",
+ journal = "Theoretical Computer Science",
+ volume = "105",
+ number = "2",
+ year = "1992",
+ pages = "217-273",
+ abstract =
+ "This paper generalizes many-sorted algebra (MSA) to order-sorted
+ algebra (OSA) by allowing a partial ordering relation on the set of
+ sorts. This supports abstract data types with multiple inheritance (in
+ roughly the sense of object-oriented programming), several forms of
+ polymorphism and overloading, partial operations (as total on
+ equationally defined subsorts), exception handling, and an operational
+ semantics based on term rewriting. We give the basic algebraic
+ constructions for OSA, including quotient, image, product and term
+ algebra, and we prove their basic properties, including quotient,
+ homomorphism, and initiality theorems. The paper's major mathematical
+ results include a notion of OSA deduction, a completeness theorem for
+ it, and an OSA Birkhoff variety theorem. We also develop conditional
+ OSA, including initiality, completeness, and McKinsey-Malcev
+ quasivariety theorems, and we reduce OSA to (conditional) MSA, which
+ allows lifting many known MSA results to OSA. Retracts, which
+ intuitively are left inverses to subsort inclusions, provide
+ relatively inexpensive run-time error handling. We show that it is
+ safe to add retracts to any OSA signature, in the sense that it gives
+ rise to a conservative extension. A final section compares and
+ contrasts many different approaches to OSA. This paper also includes
+ several examples demonstrating the flexibility and applicability of
+ OSA, including some standard benchmarks like stack and list, as well
+ as a much more substantial example, the number hierarchy from the
+ naturals up to the quaternions.",
+ paper = "Gogu92.pdf"
+}
+
+@book{Gold83,
+ author = "Goldberg, Adele and Robson, David",
+ title = "Smalltalk-80: The Language and Its Implementation",
+ publisher = "Addison-Wesley",
+ year = "1983"
+}
+
+@article{Goll90,
+ author = "Gollan, H. and Grabmeier, J.",
+ title = "Algorithms in Representation Theory and their
+ Realization in the Computer Algebra System Scratchpad",
+ journal = "Bayreuther Mathematische Schriften",
+ volume = "33",
+ year = "1990",
+ pages = "1-23",
+ algebra = "\newline\refto{package REP1 RepresentationPackage1}",
+ keywords = "axiomref"
+}
+
+@article{Gons71,
+ author = "Gonshor, H.",
+ title = "Contributions to Genetic Algebras",
+ journal = "Proc. Edinburgh Mathmatical Society (Series 2)",
+ volume = "17",
+ number = "4",
+ month = "December",
+ year = "1971",
+ issn = "1464-3839",
+ pages = "289--298",
+ doi = "10.1017/S0013091500009548",
+ link = "\url{http://journals.cambridge.org/article_S0013091500009548}",
+ algebra = "\newline\refto{domain ALGSC AlgebraGivenByStructuralConstants}",
+ abstract =
+ "Etherington introduced certain algebraic methods into the study of
+ population genetics. It was noted that algebras arising in genetic
+ systems tend to have certain abstract properties and that these can be
+ used to give elegant proofs of some classical stability theorems in
+ population genetics."
+}
+
+@misc{Gowe17,
+ author = "Gowers, Timothy",
+ title = "Group actions II: the orbit-stabilizer theorem",
+ year = "2017",
+ link = "\url{https://gowers.wordpress.com/2011/11/09/group-actions-ii-the-orbit-stabilizer-theorem/}"
+}
+
+@article{Grab87,
+ author = "Grabmeier, Johannes and Kerber, Adalbert",
+ title = "The Evaluation of Irreducible Polynomial
+ Representations of the General Linear Groups
+ and of the Unitary Groups over Fields of
+ Characteristic 0",
+ journal = "Acta Applicandae Mathematica",
+ volume = "8",
+ year = "1987",
+ pages = "271-291",
+ algebra = "\newline\refto{package REP1 RepresentationPackage1}",
+ abstract =
+ "We describe an efficient method for the computer evaluation of the
+ ordinary irreducible polynomial representations of general linear
+ groups using an integral form of the ordinary irreducible
+ representations of symmetric groups. In order to do this, we first
+ give an algebraic explanation of D. E. Littlewood's modification of
+ I. Schur's construction. Then we derive a formula for the entries of
+ the representing matrices which is much more concise and adapted to
+ the effective use of computer calculations. Finally, we describe how
+ one obtains — using this time an orthogonal form of the ordinary
+ irreducible representations of symmetric groups — a version which
+ yields a unitary representation when it is restricted to the unitary
+ subgroup. In this way we adapt D. B. Hunter's results which heavily
+ rely on Littlewood's methods, and boson polynomials come into the play
+ so that we also meet the needs of applications to physics.",
+ keywords = "axiomref"
+}
+
+@book{Grie78,
+ author = "Gries, David",
+ title = "Programming Methodology",
+ publisher = "Springer-Verlag",
+ year = "1978"
+}
+
+@book{Grae79,
+ author = "Graetzer, George",
+ title = "Universal Algebra",
+ publisher = "Springer",
+ isbn = "978-0-387-77486-2",
+ year = "1979",
+ paper = "Grae79.pdf"
+}
+
+@article{Harp93,
+ author = "Harper, Robert and Honsell, Furio and Plotkin, Gordon",
+ title = "A Framework for Defining Logics",
+ journal = "J. ACM",
+ volume = "40",
+ number = "1",
+ year = "1993",
+ pages = "143-184",
+ abstract =
+ "The Edinburgh Logical Framework (LF) provides a means to define (or
+ present) logics. It is based on a general treatment of syntax, rules,
+ and proofs by means of a typed $\lambda$-calculus with dependent
+ types. Syntax is treated in a style similar to, but more general than,
+ Martin-Lof's system of arities. The treatment of rules and proofs
+ focuses on his notion of a judgment. Logics are represented in LF via
+ a new principle, the judgments as types principle, whereby each
+ judgment is identified with the type of its proofs. This allows for a
+ smooth treatment of discharge and variable occurence conditions and
+ leads to a uniform treatment of rules and proofs whereby rules are
+ viewed as proofs of higher-order judgments and proof checking is
+ reduced to type checking. The practical benefit of our treatment of
+ formal systems is that logic-independent tools, such as proof editors
+ and proof checkers, can be constructed.",
+ paper = "Harp93.pdf"
+}
+
+@article{Hind69,
+ author = "Hindley, R.",
+ title = "The Principal Type-Scheme of an Object in Combinatory Logic",
+ journal = "Trans. AMS",
+ volume = "146",
+ year = "1969",
+ pages = "29-60",
+ paper = "Hind69.pdf"
+}
+
+@article{Hodg95,
+ author = "Hodges, Wilfrid",
+ title = "The Meaning of Specifications I: Domains and Initial Models",
+ journal = "Theoretical Computer Science",
+ volume = "192",
+ issue = "1",
+ year = "1995",
+ pages = "67-89",
+ abstract =
+ "This is the first of a short series of papers intended to provide one
+ common semantics for several different types of specification
+ language, in order to allow comparison and translations. The
+ underlying idea is that a specification describes the behaviour of a
+ system, depending on parameters. We can represent this behaviour as a
+ functor which acts on structures representing the parameters, and
+ which yields a structure representing the behaviour. We characterise
+ in domain-theoretic terms the class of functors which could in
+ principle be specified and implemented; briefly, they are the functors
+ which preserve directed colimits and whose restriction to finitely
+ presented structures is recursively enumerable. We also characterise
+ those functors which allow specification by initial semantics in
+ universal Horn classes with finite vocabulary; these functors consist
+ of a free functor (i.e. left adjoint of a forgetful functor) followed
+ by a forgetful functor. The main result is that these two classes of
+ functor are the same up to natural isomorphism.",
+ paper = "Hodg95.pdf"
+}
+
+@techreport{Howe87,
+ author = "Howe, Douglas J.",
+ title = "The Computational Behaviour of Girard's Paradox",
+ institution = "Cornell University",
+ year = "1987",
+ link = "\url{https://ecommons.cornell.edu/handle/1813/6660}",
+ number = "TR 87-820",
+ abstract =
+ "In their paper ``Type'' Is Not a Type, Meyer and Reinhold argued that
+ serious pathologies can result when a type of all types is added to a
+ programing language with dependent types. Central to their argument is
+ the claim that by following the proof of Girard's paradox it is
+ possible to construct in their calculus $\lambda^{\tau \tau}$ a term
+ having a fixed-point property. Because of the tremendous amount of
+ formal detail involved, they were unable to establish this claim. We
+ have made use of the Nuprl proof development system in constructing a
+ formal proof of Girard's paradox and analysing the resulting term. We
+ can show that the term does not have the desired fixed-point property,
+ but does have a weaker form of it that is sufficient to establish some
+ of the results of Meyer and Reinhold. We believe that the method used
+ here is in itself of some interest, representing a new kind of
+ application of a computer to a problem in symbolic logic."
+}
+
+@article{Huda92,
+ author = "Hudak, Paul and Jones, Simon Peyton and Wadler, Philip and
+ Boutel, Brian and Fairbairn, Jon and Fasel, Joseph and
+ Guzman, Maria M. and Hammond, Kevin and Hughes, John and
+ Johnsson, Thomas and Kieburtz, Dick and Nikhil, Rishiyur and
+ Patrain, Will and Peterson, John",
+ title = "Report on the Programming Language Haskell, a non-strict
+ functional language version 1.2",
+ journal = "ACM SIGPLAN Notices",
+ volume = "27",
+ number = "5",
+ year = "1992",
+ pages = "1-164",
+ abstract =
+ "Some half dozen persons have written technically on combinatory
+ logic, and most of these, including ourselves, have published
+ something erroneous. Since some of our fellow sinners are among the
+ most careful and competent logicians on the contemporary scene, we
+ regard this as evidence that the subject is refractory. Thus fullness
+ of exposition is necessory for accurary; and excessive condensation
+ would be false economy here, even more than it is ordinarily."
+}
+
+@misc{Huda99,
+ author = "Hudak, Paul and Peterson, John and Fasel, Joseph H.",
+ title = "A Gentle Introduction to Haskell 98",
+ year = "1999",
+ link = "\url{https://www.haskell.org/tutorial/haskell-98-tutorial.pdf}",
+ paper = "Huda99.pdf"
+}
+
+@book{Huet91,
+ author = "Huet, Gerard and Plotkin, G.",
+ title = "Logical Frameworks",
+ publisher = "Cambridge University",
+ year = "1991"
+}
+
+@article{Jame81,
+ author = "James, G. and Kerber, A.",
+ title = "The Representation Theory of the Symmetric Group",
+ journal = "Encycl. of Math. and its Appl.",
+ volume = "16",
+ algebra = "\newline\refto{package REP1 RepresentationPackage1}",
+ publisher = "Cambr. Univ. Press",
+ year = "1981"
+}
+
+@book{Jone87,
+ author = "Jones, Simon Peyton",
+ title = "The Implementation of Functional Programming Languages",
+ publisher = "Simon and Schuster",
+ year = "1987",
+ isbn = "0-13-453333-X",
+ paper = "Jone87.pdf"
+}
+
+@book{Joua90,
+ author = "Jouannaud, Jean-Pierre and Kirchner, Claude",
+ title = "Solving Equations in Abstract Algebras: A Rule-based Survey of
+ Unification",
+ year = "1990",
+ publisher = "Universite do Paris-Sud"
+}
+
+@inproceedings{Joua91,
+ author = "Jouannaud, Jean Pierre and Okada, Mitsuhiro",
+ title = "A Computation Model for Executable Higher-order Algebraic
+ Specification Languages",
+ booktitle = "Symposium on Logic in Computer Science",
+ pages = "350-361",
+ isbn = "081862230X",
+ year = "1991",
+ abstract =
+ "The combination of (polymorphically) typed lambda-calculi with
+ first-order as well as higher-order rewrite rules is considered. The
+ need of such a combination for exploiting the benefits of
+ algebraically defined data types within functional programming is
+ demonstrated. A general modularity result, which allows as particular
+ cases primitive recursive functionals of higher types, transfinite
+ recursion of higher types, and inheritance for all types, is
+ proved. The class of languages considered is first defined, and it is
+ shown how to reduce the Church-Rosser and termination (also called
+ strong normalization) properties of an algebraic functional language
+ to a so-called principal lemma whose proof depends on the property to
+ be proved and on the language considered. The proof of the principal
+ lemma is then sketched for various languages. The results allows
+ higher order rules defining the higher-order constants by a certain
+ generalization of primitive recursion. A prototype of such primitive
+ recursive definitions is provided by the definition of the map
+ function for lists.",
+ paper = "Joua91.pdf"
+}
+
+@article{Kaes92,
+ author = "Kaes, Stefan",
+ title = "Type Inference in the Presence of Overloading, Subtyping, and
+ Recursive Types",
+ journal = "LISP Pointers",
+ volume = "V",
+ number = "1",
+ pages = "193-204",
+ year = "1992",
+ paper = "Kaes92.pdf"
+}
+
+@incollection{Kalt83a,
+ author = "Kaltofen, E.",
+ title = "Factorization of Polynomials",
+ booktitle = "Computer Algebra - Symbolic and Algebraic Computation",
+ publisher = "ACM",
+ pages = "95-113",
+ year = "1983",
+ abstract =
+ "Algorithms for factoring polynomials in one or more variables over
+ various coefficient domains are discussed. Special emphasis is given
+ to finite fields, the integers, or algebraic extensions of the
+ rationals, and to multivariate polynomials with integral coefficients.
+ In particular, various squarefree decomposition algorithms and Hensel
+ lifting techniques are analyzed. An attempt is made to establish a
+ complete historic trace for today’s methods. The exponential worst
+ case complexity nature of these algorithms receives attention.",
+ paper = "Kalt83a.pdf"
+}
+
+@techreport{Kane90,
+ author = "Kanellakis, Paris C. and Mairson, Harry G. and Mitchell, John C.",
+ title = "Unification and ML Type Reconstruction",
+ link = "\url{ftp://ftp.cs.brown.edu/pub/techreports/90/cs90-26.pdf}",
+ institution = "Brown University",
+ year = "1990",
+ number = "CS-90-26",
+ abstract =
+ "We study the complexity of type reconstruction for a core fragment of
+ ML with lambda abstraction, function application, and the polymorphic
+ {\bf let} declaration. We derive exponential upper and lower bounds on
+ recognizing the typable core ML expressions. Our primary technical
+ tool is unification of succinctly represented type expressions. After
+ observing that core ML expressions, of size $n$, can be typed in
+ DTIME($s^n$), we exhibit two different families of programs whose
+ principal types grow exponentially. We show how to exploit the
+ expressiveness of the {\bf let}-polymorphism in these constructions to
+ derive lower bounds on deciding typability: one leads naturally to
+ NP-hardness and the other to DTIME($2^{n^k}$)-hardness for each integer
+ $k\ge 1$. Our generic simulation of any exponential time Turing
+ Machine by ML type reconstruction may be viewed as a nonstandard way
+ of computing with types. Our worse-case lower bounds stand in contrast
+ to practical experience, which suggests that commonly used algorithms
+ for type reconstruction do not slow compilation substantially.",
+ paper = "Kane90.pdf"
+}
+
+@inproceedings{Kfou88,
+ author = "Kfoury, A.J. and Tiuryn, J. and Utzyczyn, P.",
+ title = "A Proper Extension of ML with an Effective Type-Assignment",
+ booktitle = "POPL 88",
+ year = "1988",
+ pages = "58-69",
+ abstract =
+ "We extend the functional language ML by allowing the recursive calls
+ to a function F on the right-hand side of its definition to be at
+ different types, all generic instances of the (derived) type of F on
+ the left-hand side of its definition. The original definition of ML
+ does not allow this feature. This extension does not produce new types
+ beyond the usual universal polymorphic types of ML and satisfies the
+ properties already enjoyed by ML: the principal-type property and the
+ effective type-assignment property.",
+ paper = "Kfou88.pdf"
+}
+
+@article{Kfou93,
+ author = "Kfoury, A. J. and Tiuryn, J. and Urzyczyn, P.",
+ title = "The Undecidability of the Semi-unification Problem",
+ journal = "Information and Computation",
+ volume = "102",
+ number = "1",
+ year = "1993",
+ pages = "83-101",
+ abstract =
+ "The Semi-Unification Problem (SUP) is a natural generalization of
+ both first-order unification and matching. The problem arises in
+ various branches of computer science and logic. Although several
+ special cases of SUP are known to be decidable, the problem in general
+ has been open for several years. We show that SUP in general is
+ undecidable, by reducing what we call the ``boundedness problem'' of
+ Turing machines to SUP. The undecidability of this boundedness problem
+ is established by a technique developed in the mid-1960s to prove
+ related results about Turing machines.",
+ paper = "Kfou93.pdf"
+}
+
+@inproceedings{Kife91,
+ author = "Kifer, Michael and Wu, James",
+ title = "A First-order Theory of Types and Polymorphism in Logic
+ Programming",
+ booktitle = "Proc Sixth Annual IEEE Symp. on Logic in Comp. Sci.",
+ year = "1991",
+ pages = "310-321",
+ abstract =
+ "A logic called typed predicate calculus (TPC) that gives declarative
+ meaning to logic programs with type declarations and type inference is
+ introduced. The proper interaction between parametric and inclusion
+ varieties of polymorphism is achieved through a construct called type
+ dependency, which is analogous to implication types but yields more
+ natural and succinct specifications. Unlike other proposals where
+ typing has extra-logical status, in TPC the notion of type-correctness
+ has precise model-theoretic meaning that is independent of any
+ specific type-checking or type-inference procedure. Moreover, many
+ different approaches to typing that were proposed in the past can be
+ studied and compared within the framework of TPC. Another novel
+ feature of TPC is its reflexivity with respect to type declarations;
+ in TPC, these declarations can be queried the same way as any other
+ data. Type reflexivity is useful for browsing knowledge bases and,
+ potentially, for debugging logic programs.",
+ paper = "Kife91.pdf"
+}
+
+@book{Kirk89,
+ author = "Kirkerud, Bjorn",
+ title = "Object-Oriented Programming With Simula",
+ year = "1989",
+ series = "International Computer Science Series",
+ publisher = "Addison-Wesley"
+}
+
+@techreport{Klop90,
+ author = "Klop, J. W.",
+ title = "Term Rewriting Systems",
+ institution = "Stichting Methematisch Centrum",
+ year = "1990",
+ number = "CS-R9073",
+ abstract =
+ "Term Rewriting Systems play an important role in various areas, such
+ as abstract data type specifications, implementations of functional
+ programming languages and automated deduction. In this chapter we
+ introduce several of the basic concepts and facts for
+ TRSs. Specifically, we discuss Abstract Reduction Systems; general
+ Term Rewriting Systems including an account of Knuth-Bendix completion
+ and (E- )unification; orthogonal TRSs and reduction strategies;
+ strongly sequential orthogonal TRS. Finally some extended rewrite
+ formates are introduced: Conditional TRSs and Combinatory Reduction
+ Systems. The emphasis throughout the paper is on providing information
+ of a syntactic nature."
+}
+
+@book{Kowa63,
+ author = "Kowalsky, Hans Joachim",
+ title = "Linear Algebra",
+ year = "1963",
+ publisher = "Walter de Gruyter",
+ comment = "(German)"
+}
+
+@book{Lang05,
+ author = "Lang, Serge",
+ title = "Algebra",
+ publisher = "Springer",
+ year = "2005",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-0387953854"
+}
+
+@InCollection{Laue82,
+ author = "Lauer, M.",
+ title = "Computing by Homomorphic Images",
+ booktitle = "Computer Algebra: Symbolic and Algebraic Computation",
+ pages = "139-168",
+ year = "1982",
+ publisher = "Springer",
+ isbn = "978-3-211-81684-4",
+ abstract =
+ "After explaining the general technique of Computing by homomorphic
+ images, the Chinese remainder algorithm and the Hensel lifting
+ construction are treated extensively. Chinese remaindering is first
+ presented in an abstract setting. Then the specialization to Euclidean
+ domains, in particular $\mathbb{Z}$, $\mathbb{K}[y]$, and
+ $\mathbb{Z}[y_1,\ldots,y_n]$ is considered. For both techniques,
+ Chinese remaindering as well as the lifting algorithms, a complete
+ computational example is presented and the most frequent application
+ is discussed."
+}
+
+@inproceedings{Leis87,
+ author = "Leiss, Hans",
+ title = "On Type Inference for Object-Oriented Programming Languages",
+ booktitle = "Int. Workshop on Computer Science Logic",
+ year = "1987",
+ pages = "151-172",
+ abstract =
+ "We present a type inference calculus for object-oriented programming
+ languages. Explicit polymorphic types, subtypes and multiple
+ inheritance are allowed. Class types are obtained by selection from
+ record types, but not considered subtypes of record types. The subtype
+ relation for class types reflects the (mathematically clean)
+ properties of subclass relations in object-oriented programming to a
+ better extend than previous systems did.
+
+ Based on Mitchells models for type inference, a semantics for types is
+ given where types are sets of values in a model of type-free lambda
+ calculus. For the sublanguage without type quantifiers and subtype
+ relation, automatic type inference is possible by extending Milners
+ algorithm W to deal with a polymorphic fixed-point rule."
+}
+
+@article{Limo92,
+ author = "Limongelli, C. and Temperini, M.",
+ title = "Abstract Specification of Structures and Methods in Symbolic
+ Mathematical Computation",
+ journal = "Theoretical Computer Science",
+ volume = "104",
+ year = "1992",
+ pages = "89-107",
+ abstract =
+ "This paper describes a methodology based on the object-oriented
+ programming paradigm, to support the design and implementation of a
+ symbolic computation system. The requirements of the system are
+ related to the specification and treatment of mathematical
+ structures. This treatment is considered from both the numerical and
+ the symbolic points of view. The resulting programming system should
+ be able to support the formal definition of mathematical data
+ structures and methods at their highest level of abstraction, to
+ perform computations on instances created from such definitions, and
+ to handle abstract data structures through the manipulation of their
+ logical properties. Particular consideration is given to the
+ correctness aspects. Some examples of convenient application of the
+ proposed design methodology are presented.",
+ paper = "Limo92.pdf"
+}
+
+@inproceedings{Linc92,
+ author = "Lincoln, Patrick and Mitchell, John C.",
+ title = "Algorithmic Aspects of Type Inference with Subtypes",
+ booktitle = "POPL 92",
+ pages = "293-304",
+ year = "1992",
+ abstract =
+ "We study the complexity of type inference for programming languages
+ with subtypes. There are three language variations that effect the
+ problem: (i) basic functions may have polymorphic or more limited
+ types, (ii) the subtype hierarchy may be fixed or vary as a result of
+ subtype declarations within a program, and (iii) the subtype hierarchy
+ may be an arbitrary partial order or may have a more restricted form,
+ such as a tree or lattice. The naive algorithm for infering a most
+ general polymorphic type, undervariable subtype hypotheses, requires
+ deterministic exponential time. If we fix the subtype ordering, this
+ upper bound grows to nondeterministic exponential time. We show that
+ it is NP-hard to decide whether a lambda term has a type with respect
+ to a fixed subtype hierarchy (involving only atomic type names). This
+ lower bound applies to monomorphic or polymorphic languages. We give
+ PSPACE upper bounds for deciding polymorphic typability if the subtype
+ hierarchy has a lattice structure or the subtype hierarchy varies
+ arbitrarily. We also give a polynomial time algorithm for the limited
+ case where there are of no function constants and the type hierarchy
+ is either variable or any fixed lattice.",
+ paper = "Linc92.pdf"
+}
+
+@inproceedings{Lint10,
+ author = "Linton, S. and Hammond, K. and Konovalov, A. and Al Zain, A.D.
+ and Trinder, P. and Horn, P.",
+ title = "Easy Compostion of Symbolic Computation Software: A New Lingua
+ Franca for Symbolic Computation",
+ booktitle = "Proc. ISSAC 2010",
+ publisher = "ACM",
+ year = "2010",
+ pages = "339-346",
+ abstract =
+ "We present the results of the first four years of the European
+ research project SCIEnce (www.symbolic-computation.org),
+ which aims to provide key infrastructure for symbolic computation
+ research. A primary outcome of the project is that we have developed
+ a new way of combining computer algebra systems using the Symbolic
+ Computation Software Compos- ability Protocol (SCSCP), in which both
+ protocol messages and data are encoded in the OpenMath format. We
+ describe SCSCP middleware and APIs, outline some implementations for
+ various Computer Algebra Systems (CAS), and show how SCSCP-compliant
+ components may be combined to solve scientific problems that can not
+ be solved within a single CAS, or may be organised into a system for
+ distributed parallel computations.",
+ paper = "Lint10.pdf"
+}
+
+@article{Loos72,
+ author = "Loos, Rudiger",
+ title = "Algebraic Algorithm Descriptions as Programs",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "23",
+ year = "1972",
+ pages = "16-24",
+ abstract =
+ "We propose methods for writing algebraic programs in an algebraic
+ notation. We discuss the advantages of this approach and a specific
+ example",
+ paper = "Loos72.pdf"
+}
+
+@article{Loos76,
+ author = "Loos, Rudiger",
+ title = "The Algorithm Description Language (ALDES) (report)",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "10",
+ number = "1",
+ year = "1976",
+ pages = "14-38",
+ abstract =
+ "ALDES is a formalization of the method to describe algorithms used in
+ Knuth's books. The largest documentation of algebraic algorithms,
+ Collins' SAC system for Computer Algebra, is written in this
+ language. In contrast to PASCAL it provides automatic storage
+ deallocation. Compared to LISP equal emphasis was placed on efficiency
+ of arithmetic, list processing, and array handling. To allow the
+ programmer full control of efficiency all mechanisms of the system are
+ accessible to him. Currently ALDES is available as a preprocessor to
+ ANSI Fortran, using no additional primitives.",
+ paper = "Loos76.pdf"
+}
+
+@article{Loos74,
+ author = "Loos, Ruediger G. K.",
+ title = "Toward a Formal Implementation of Computer Algebra",
+ journal = "SIGSAM",
+ volume = "8",
+ number = "3",
+ pages = "9-16",
+ year = "1974",
+ abstract =
+ "We consider in this paper the task of synthesizing an algebraic
+ system. Today the task is significantly simpler than in the pioneer
+ days of symbol manipulation, mainly because of the work done by the
+ pioneers in our area, but also because of the progress in other areas
+ of Computer Science. There is now a considerable collection of
+ algebraic algorithms at hand and a much better understanding of data
+ structures and programming constructs than only a few years ago.",
+ paper = "Loos74.pdf",
+ keywords = "axiomref"
+}
+
+@book{Loos92,
+ author = "Loos, Rudiger and Collins, George E.",
+ title = "Revised Report on the ALgorithm Language ALDES",
+ publisher = "Institut fur Informatik",
+ year = "1992"
+}
+
+@book{Macl91,
+ author = "MacLane, Saunders",
+ title = "Categories for the Working Mathematician",
+ publisher = "Springer",
+ year = "1991",
+ isbn = "0-387-98403-8",
+ link = "\url{http://www.maths.ed.ac.uk/~aar/papers/maclanecat.pdf}",
+ paper = "Macl91.pdf"
+}
+
+@book{Macl92,
+ author = "MacLane, Saunders",
+ title = "Sheaves in Geometry and Logic: A First Introduction to Topos
+ Theory",
+ year = "1992",
+ isbn = "978-0-387-97710-2",
+ publisher = "Springer"
+}
+
+@book{Mane76,
+ author = "Manes, Ernest G.",
+ title = "Algebraic Theories",
+ publisher = "Springer",
+ year = "1976",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-1-9860-1"
+}
+
+@book{Marc77,
+ author = "Marcus, Daniel A.",
+ title = "Number Fields",
+ publisher = "Springer",
+ year = "1977",
+ isbn = "978-0387902791"
+}
+
+@inproceedings{Meye86,
+ author = "Meyer, Albert R. and Reinhold, Mark B.",
+ title = "Type is not a type",
+ booktitle = "POPL 86",
+ pages = "287-295",
+ year = "1986",
+ abstract =
+ "A function has a dependent type when the type of its result
+ depends upon the value of its argument. Dependent types originated in
+ the type theory of intuitionistic mathematics and have reappeared
+ independently in programming languages such as CLU, Pebble, and
+ Russell. Some of these languages make the assumption that there exists
+ a type-of-all-types which is its own type as well as the type
+ of all other types. Girard proved that this approach is inconsistent
+ from the perspective of intuitionistic logic. We apply Girard's
+ techniques to establish that the type-of-all-types assumption creates
+ serious pathologies from a programming perspective: a system using
+ this assumption is inherently not normalizing, term equality is
+ undecidable, and the resulting theory fails to be a conservative
+ extension of the theory of the underlying base types. The failure of
+ conservative extension means that classical reasoning about programs
+ in such a system is not sound.",
+}
+
+@book{Meye88,
+ author = "Meyer, Bertrand",
+ title = "Object-Oriented Software Construction",
+ year = "1988",
+ publisher = "Prentice Hall",
+ link = "\url{https://sophia.javeriana.edu.co/~cbustaca/docencia/POO-2016-01/documentos/Object%20Oriented%20Software%20Construction-Meyer.pdf}",
+ paper = "Meye88.pdf"
+}
+
+@book{Miln90,
+ author = "Milner, Robin and Torte, Mads and Harper, Robert",
+ title = "The Definition of Standard ML",
+ publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
+ link = "\url{http://sml-family.org/sml90-defn.pdf}",
+ year = "1990",
+ paper = "Miln90.pdf"
+}
+
+@book{Miln91,
+ author = "Milner, Robin and Torte, Mads",
+ title = "Commentary on Standard ML",
+ publisher = "Lab for Foundations of Computer Science, Univ. Edinburgh",
+ link = "\url{https://pdfs.semanticscholar.org/d199/16cbbda01c06b6eafa0756416e8b6f15ff44.pdf}",
+ year = "1991",
+ paper = "Miln91.pdf"
+}
+
+@article{Mitc91,
+ author = "Mitchell, John C.",
+ title = "TYpe Inference with Simple Subtypes",
+ journal = "J. of Functional Programming",
+ volume = "1",
+ number = "3",
+ year = "1991",
+ pages = "245-285",
+ abstract =
+ "Subtyping appears in a variety of programming languages, in the form
+ of the ‘automatic coercion’ of integers to reals, Pascal subranges,
+ and subtypes arising from class hierarchies in languages with
+ inheritance. A general framework based on untyped lambda calculus
+ provides a simple semantic model of subtyping and is used to
+ demonstrate that an extension of Curry's type inference rules are
+ semantically complete. An algorithm G for computing the most general
+ typing associated with any given expression, and a restricted,
+ optimized algorithm GA using only atomic subtyping hypotheses are
+ developed. Both algorithms may be extended to insert type conversion
+ functions at compile time or allow polymorphic function declarations
+ as in ML.",
+ paper = "Mitc91.pdf"
+}
+
+@InCollection{Mitc91a,
+ author = "Mitchell, John C.",
+ title = "Type Systems for Programming Languages",
+ booktitle = "Handbook of Theoretical Computer Science (Vol B.)",
+ pages = "365-458",
+ year = "1991",
+ publisher = "MIT Press",
+ isbn = "0-444-88074-7"
+}
+
+@book{Monk76,
+ author = "Monk, J. Donald",
+ title = "Mathematical Logic",
+ publisher = "Springer",
+ year = "1976",
+ isbn = "978-1-4684-9452-5"
+}
+
+@incollection{Nada92,
+ author = "Nadathur, Gopalan and Pfenning, Frank",
+ title = "The Type System of a Higher-Order Logic Programming Language",
+ booktitle = "Types in Logic Programming",
+ isbn = "9780262161312",
+ publisher = "MIT Press",
+ year = "1992"
+}
+
+@inproceedings{Nipk91,
+ author = "Nipkow, Tobias and Snelting, Gregor",
+ title = "Type Classes and Overloading Resolution via Order-Sorted
+ Unification",
+ booktitle = "Proc 5th ACM Conf. Functional Prog. Lang. and Comp. Arch.",
+ year = "1991",
+ publisher = "Springer",
+ journal = "LNCS",
+ volume = "523",
+ pages = "1-14",
+ abstract =
+ "We present a type inference algorithm for a Haskell-like language
+ based on order-sorted unification. The language features polymorphism,
+ overloading, type classes and multiple inheritance. Class and instance
+ declarations give rise to an order-sorted algebra of types. Type
+ inference essentially reduces to the Hindley/Milner algorithm where
+ unification takes place in this order-sorted algebra of types. The
+ theory of order-sorted unification provides simple sufficient
+ conditions which ensure the existence of principal types. The
+ semantics of the language is given by a translation into ordinary
+ lambda-calculus. We prove the correctness of our type inference
+ algorithm with respect to this semantics.",
+ paper = "Nipk91.pdf"
+}
+
+@book{Odif92,
+ author = "Odifreddi, Piergiorgio",
+ title = "Classical Recursion Theory: The Theory of Functions and Sets of
+ Natural Numbers",
+ publisher = "Elsevier",
+ year = "1992"
+}
+
+@article{Pate78,
+ author = "Paterson, M. S.",
+ title = "Linear Unification",
+ journal = "J. Computer and System Sciences",
+ volume = "16",
+ number = "2",
+ year = "1978",
+ pages = "158-167",
+ abstract =
+ "A unification algorithm is described which tests a set of expressions
+ for unifiability and which requires time and space which are only linear
+ in the size of the input",
+ paper = "Pate78.pdf"
+}
+
+@inproceedings{Pfen91,
+ author = "Pfenning, Frank",
+ title = "Logic Programming in the LF Logical Framework",
+ booktitle = "Proc. First Workshop on Logical Frameworks",
+ year = "1991",
+ paper = "Pfen91.pdf"
+}
+
+@inproceedings{Pfen91a,
+ author = "Pfenning, Frank",
+ title = "Unification and Anti-Unification in the Calculus of Constructions",
+ booktitle = "Logic in Computer Science 91",
+ year = "1991",
+ pages = "74-85",
+ abstract =
+ "We present algorithms for unification and anti- unification in the
+ Calculus of Constructions, where occurrences of free variables (the
+ variables subject to instantiation) are restricted to higher-order
+ patterns, a notion investigated for the simply-typed $\lambda$-calculus
+ by Miller. Most general unifiers and least common anti-instances are
+ shown to exist and are unique up to a simple equivalence. The
+ unification algorithm is used for logic program execution and type and
+ term reconstruction in the current implementation of Elf and has
+ shown itself to be practical. The main application of the
+ anti-unification algorithm we have in mind is that of proof
+ generalization.",
+ paper = "Pfen91a.pdf"
+}
+
+@book{Pfen92,
+ author = "Pfenning, Frank",
+ title = "Types in Logic Programming",
+ isbn = "9780262161312",
+ publisher = "MIT Press",
+ year = "1992",
+ abstract =
+ "Types play an increasingly important role in logic programming, in
+ language design as well as language implementation. We present
+ various views of types, their connection, and their role within the
+ logic programming paradigm.
+
+ Among the basic views of types we find
+ the so-called descriptive systems, where types describe properties of
+ untyped logic programs, and prescriptive systems, where types are
+ essential to the meaning of programs. A typical ap- plication of
+ descriptive types is the approximation of the meaning of a logic
+ program as a subset of the Herbrand universe on which a predicate
+ might be true. The value of prescriptive systems lies primarily in
+ program devel- opment, for example, through early detection of errors
+ in programs which manifest themselves as type inconsistencies, or as
+ added documentation for the intended and legal use of predicates.
+
+ Central topics within these views are the problems of type inference
+ and type reconstruction, respectively. Type inference is a form of
+ analysis of untyped logic programs, while type reconstruction attempts
+ to fill in some omitted type information in typed logic programs and
+ generalizes the prob- lem of type checking. Even though analogous
+ problems arise in functional programming, algorithms addressing these
+ problems are quite different in our setting.
+
+ Among the specific forms of types we discuss are simple types,
+ recursive types, polymorphic types, and dependent types. We also
+ briefly touch upon subtypes and inheritance, and the role of types
+ in module systems for logic programming languages."
+}
+
+@phdthesis{Pier91,
+ author = "Pierce, Benjamin C.",
+ title = "Programming with Intersection Types and Bounded Polymorphism",
+ school = "Carnegie Mellon University",
+ year = "1991",
+ comment = "CMU-CS-91-205",
+ abstract =
+ "Intersection types and bounded quantification are complementary
+ mechanisms for extending the expressive power of statically typed
+ programming languages. They begin with a common framework: a simple,
+ typed language with higher-order functions and a notion of subtyping.
+ Intersection types extend this framework by giving every pair of types
+ $\sigma$ and $\tau$ a greatest lower bound, $\sigma \land \tau$,
+ corresponding intuitively to the intersection of the sets of values
+ described by $\sigma$ and $\tau$. Bounded quantification extends the
+ basic framework along a different axis by adding polymorphic functions
+ that operate uniformly on all the subtypes of a given type. This thesis
+ unifies and extends prior work on intersection types and bounded
+ quantification, previously studied only in isolation, by investigating
+ theoretical and practical aspects of a typed $\lambda$-calculus
+ incorporating both.
+
+ The practical utility of this calculus, called $F_\land$ is
+ established by examples showing, for instance, that it allows a rich
+ form of ``coherent overloading'' and supports an analog of abstract
+ interpretation during typechecking; for example, the addition function
+ is given a type showing that it maps pairs of positive inputs to a
+ positive result, pairs of zero inputs to a zero result, etc. More
+ familiar programming examples are presented in terms of an extention
+ of Forsythe (an Algol-like language with intersection types),
+ demonstrating how parametric polymorphism can be used to simplify and
+ generalize Forsythe's design. We discuss the novel programming and
+ debugging styles that arise in $F_\land$.
+
+ We prove the correctness of a simple semi-decision procedure for the
+ subtype relation and the partial correctness of an algorithm for
+ synthesizing minimal types of $F_\land$ terms. Our main tool in this
+ analysis is a notion of ``canonical types,'' which allows proofs to be
+ factored so that intersections are handled separately from the other
+ type constructors.
+
+ A pair of negative results illustrates some subtle complexities of
+ $F_\land$. First, the subtype relation of $F_\land$ is shown to be
+ undecidable; in fact, even the sutype relation of pure second-order
+ bounded quantification is undecidable, a surprising result in its own
+ right. Second, the failure of an important technical property of the
+ subtype relation -- the existence of least upper bounds -- indicates
+ that typed semantic models of $F_\land$ will be more difficult to
+ construct and analyze than the known typed models of intersection
+ types. We propose, for future study, some simpler fragments of
+ $F_\land$ that share most of its essential features, while recovering
+ decidability and least upper bounds.
+
+ We study the semantics of $F_\land$ from several points of view. An
+ untyped model based on partial equivalence relations demonstrates the
+ consistency of the typing rules and provides a simple interpolation
+ for programs, where ``$\sigma$ is a subtype of $\tau$'' is read as
+ ``$\sigma$ is a subset of $\tau$.'' More refined models can be
+ obtained using a translation from $F_\land$ into the pure polymorphic
+ $\lambda$-calculus; in these models, ``$\sigma$ is a subtype of
+ $\tau$'' is interpreted by an explicit coercion function from $\sigma$
+ to $\tau$. The nonexistence of least upper bounds shows up here in
+ the failure of known techniques for proving the coherence of the
+ translation semantics. Finally, an equational theory of equivalences
+ between $F_\land$ terms is presented and its soundness for both styles
+ of model is verified.",
+ paper = "Pier91.pdf"
+}
+
+@techreport{Pier91a,
+ author = "Pierce, Benjamin C.",
+ title = "Bounded Quantification is Undecidable",
+ year = "1991",
+ number = "CMU-CS-91-161",
+ institution = "Carnegie Mellon University",
+ link = "\url{http://repository.cmu.edu/cgi/viewcontent.cgi?article=3059}",
+ abstract =
+ "$F_\le$ is a typed $\lambda$-calculus with subtyping and bounded
+ second-order polymorphism. First introduced by Cardelli and Wegner, it
+ has been widely studied as a core calculus for type systems with
+ subtyping.
+
+ Curien and Ghelli proved the partial correctness of a recursive
+ procedure for computing minimal types of $F_\le$ terms and showed
+ that the termination of this procedure is equivalent to the
+ termination of its major component, a procedure for checking the
+ subtype relation between $F_\le$ types. Ghelli later claimed that
+ this procedure is also guaranteed to terminate, but the discovery of a
+ subtle bug in his proof led him recently to observe that, in fact,
+ there are inputs on which the subtyping procedure diverges. This
+ reopens the question of the decidability of subtyping and hence of
+ typechecking.
+
+ This question is settled here in the negative, using a reduction from
+ the halting problem for two-counter Turing machines to show that the
+ subtype relation of $F_\le$ is undecidable.",
+ paper = "Pier91a.pdf"
+}
+
+@misc{Poiz85,
+ author = "Poizat, B.",
+ title = {Cours de Th\'eorie des Mod\'eles},
+ comment = {Nur al-Mantiq wal-Ma'rifah, Villeurbanne, France},
+ year = "1985"
+}
+
+@InCollection{Rect89,
+ author = "Rector, D. L.",
+ title = "Semantics in Algebraic Computation",
+ booktitle = "Computers and Mathematics",
+ publisher = "Springer-Verlag",
+ year = "1989",
+ pages = "299-307",
+ isbn = "0-387-97019-3",
+ abstract =
+ "I am interested in symbolic computation for theoretical research in
+ algebraic topology. Most algebraic computations in topology are hand
+ calculations; that is, they can be accomplished by the researcher in
+ times ranging from hours to weeks, and they are aimed at discovering
+ general patterns rather than producing specific formulas understood in
+ advance. Furthermore, the range of algebraic constucts used in such
+ calculations is very wide.",
+ keywords = "axiomref"
+}
+
+@article{Reed97,
+ author = "Reed, Mary Lynn",
+ title = "Algebraic Structure of Genetic Inheritance",
+ journal = "Bulletin of the American Mathematical Society",
+ year = "1997",
+ volume = "34",
+ number = "2",
+ month = "April",
+ pages = "107--130",
+ algebra = "\newline\refto{domain ALGSC AlgebraGivenByStructuralConstants}",
+ link="\url{http://www.ams.org/bull/1997-34-02/S0273-0979-97-00712-X/S0273-0979-97-00712-X.pdf}",
+ abstract =
+ "In this paper we will explore the nonassociative algebraic structure
+ that naturally ocurs as genetic informatin gets passed down through
+ the generations. While modern understanding of genetic inheritance
+ initiated with the theories of Charles Darwin, it was the Augustinian
+ monk Gregor Mendel who began to uncover the mathematical nature of the
+ subject. In fact, the symbolism Mendel used to describe his first
+ results (e.g. see his 1866 paper {\sl Experiments in
+ Plant-Hybridization} is quite algebraically suggestive. Seventy four
+ years later, I.M.H. Etherington introduced the formal language of
+ abstract algebra to the study of genetics in his series of seminal
+ papers. In this paper we will discuss the concepts of genetics that
+ suggest the underlying algebraic structure of inheritance, and we will
+ give a brief overview of the algebras which arise in genetics and some
+ of their basi properties and relationships. With the popularity of
+ biologically motivated mathematics continuing to rise, we offer this
+ survey article as another example of the breadth of mathematics that
+ has biological significance. The most comprehensive reference for the
+ mathematical research done in this area (through 1980) is
+ Worz-Busekros.",
+ paper = "Reed97.pdf"
+}
+
+@inproceedings{Remy89,
+ author = "Remy, Didier",
+ title = "Typechecking Records and Variants in a Natural Extension of ML",
+ booktitle = "POPL 89",
+ isbn = "978-0-89791-294-5",
+ year = "1989",
+ publisher = "ACM",
+ link = "\url{https://www.cs.cmu.edu/~aldrich/courses/819/row.pdf}",
+ abstract =
+ "We describe an extension of ML with records where inheritance is
+ given by ML generic polymorphism. All common operations on records but
+ concatenation are supported, in particular, the free extension of
+ records. Other operations such as renaming of fields are added. The
+ solution relies on an extension of ML, where the language of types is
+ sorted and considered modulo equations, and on a record extension of
+ types. The solution is simple and modular and the type inference
+ algorithm is efficient in practice.",
+ paper = "Remy89.pdf"
+}
+
+@inproceedings{Reyn74,
+ author = "Reynolds, John C.",
+ title = "Towards a Theory of Type Structure",
+ booktitle = "Colloquim on Programming",
+ year = "1974",
+ pages = "9-11",
+ paper = "Reyn74.pdf"
+}
+
+@inproceedings{Reyn80,
+ author = "Reynolds, John C.",
+ title = "Using Category Theory to Design Implicit Conversions and
+ Generic Operators",
+ booktitle = "Lecture Notes in Computer Science",
+ year = "1980",
+ abstract =
+ "A generalization of many-sorted algebras, called category-sorted
+ algebras, is defined and applied to the language-design problem of
+ avoiding anomalies in the interaction of implicit conversions and
+ generic operators. The definition of a simple imperative language
+ (without any binding mechanisms) is used as an example.",
+ paper = "Reyn80.pdf"
+}
+
+@inproceedings{Reyn84,
+ author = "Reynolds, John C.",
+ title = "Polymorphism is not Set-theoretic",
+ booktitle = "Proc Semantics of Data Types",
+ pages = "145-156",
+ year = "1984",
+ link = "\url{https://hal.inria.fr/inria-00076261/document}",
+ abstract =
+ "The polymorphic, or second-order, typed lambda calculus is an
+ extension of the typed lambda calculus in which polymorphic functions
+ can be defined. In this paper that the standard set-theoretic model of
+ the ordinary typed lambda calculus cannot be extended to model this
+ language extension.",
+ paper = "Reyn84.pdf"
+}
+
+@inproceedings{Reyn91,
+ author = "Reynolds, John C.",
+ title = "The Coherence of Languages with Intersection Types",
+ booktitle = "TACS 91",
+ year = "1991",
+ abstract =
+ "When a programming language has a sufficiently rich type structure,
+ there can be more than one proof of the same typing judgement;
+ potentially this can lead to semantic ambiguity since the semantics of
+ a typed language is a function of such proofs. When no such ambiguity
+ arises, we say that the language is coherent. In this paper we prove
+ the coherence of a class of lambda-calculus-based languages that use
+ the intersection type discipline, including both a purely functional
+ programming language and the Algol-like programming language Forsythe.",
+ paper = "Reyn91.pdf"
+}
+
+@book{Robi96,
+ author = "Robinson, J. S. Derek",
+ title = "A Course in the Theory of Groups",
+ year = "1996",
+ series = "Graduate Texts in Mathematics",
+ isbn = "978-1-4612-6443-9",
+ publisher = "Springer"
+}
+
+@misc{Roll1691,
+ author = "Rolle, Michel",
+ title = "Rolle's Therem",
+ year = "1691",
+ link = "\url{https://en.wikipedia.org/wiki/Rolle%27s\_theorem}",
+ abstract =
+ "If a real-valued function $f$ is continuous on a proper closed interval
+ $[a,b]$, differentiable on the open interval $(a,b)$, and $f(a)=f(b)$,
+ then there exists at least one $c$ in the open interval $(a,b)$ such
+ that $f^\prime(c)=0$."
+}
+
+@book{Ryde88,
+ author = "Rydeheard, D. E. and Burstall, R. M.",
+ title = "Computational Category Theory",
+ publisher = "Prentice Hall",
+ year = "1988",
+ isbn = "978-0131627369"
+}
+
+@book{Schm89,
+ author = "Schmidt-Schauss, M.",
+ title = "Computational Aspects of an Order-Sorted Logic with Term
+ Declarations",
+ publisher = "Springer",
+ isbn = "978-3-540-51705-4",
+ year = "1989"
+}
+
+@misc{Scho24,
+ author = "Schoenfinkel, M.",
+ title = "Uber die Bausteine der mathematischen Logik",
+ year = "1924",
+ pages = "305-316"
+}
+
+@book{Schu72,
+ author = "Schubert, Horst",
+ title = "Categories",
+ publisher = "Springer-Verlag",
+ year = "1972"
+}
+
+@article{Siek89,
+ author = "Siekmann, Jorg H.",
+ title = "Unification Theory",
+ journal = "Journal of Symbolic Computation",
+ volume = "7",
+ number = "3-4",
+ year = "1989",
+ pages = "207-274",
+ abstract =
+ "Most knowledge based systems in artificial intelligence (AI), with a
+ commitment to asymbolic representation, support one basic operation:
+ ``matching of descriptions''. This operation, called unification in work
+ on deduction, is the ``addition-and-multiplication'' of AI-systems and
+ is consequently often supported by special purpose hardware or by a
+ fast instruction set on most AI-machines. Unification theory provides
+ the formal framework for investigations into the properties of this
+ operation. This article surveys what is presently known in unification
+ theory and records its early history.",
+ paper = "Siek89.pdf"
+}
+
+@article{Sims71,
+ author = "Sims, Charles",
+ title = "Determining the Conjugacy Classes of a Permutation Group",
+ journal = "Computers in Algebra and Number Theory, SIAM-AMS Proc.",
+ volume = "4",
+ publisher = "American Math. Soc.",
+ year = "1991",
+ pages = "191--195",
+ algebra = "\newline\refto{domain PERMGRP PermutationGroup}"
+}
+
+@article{Smol88,
+ author = "Smolka, G.",
+ title = "Logic Programming with Polymorphically Order-sorted Types",
+ journal = "Lecture Notes in Computer Science",
+ volume = "343",
+ pages = "53-70",
+ year = "1988"
+}
+
+@InCollection{Smol89,
+ author = "Smolka, G. and Nutt, W. and Goguen, J. and Meseguer, J.",
+ title = "Order-sorted Equational Computation",
+ booktitle = "Resolution of Equations in Algebra Structures (Vol 2)",
+ publisher = "Academic Press",
+ pages = "297-367",
+ year = "1989"
+}
+
+@phdthesis{Smol89a,
+ author = "Smolka, G.",
+ title = "Logic Programming over Polymorphically Order-Sorted Types",
+ school = "Fachbereich Informatik, Universitat Kaiserslautern",
+ year = "1989",
+ paper = "Smol89a.pdf"
+}
+
+@misc{Stac17,
+ author = "StackExchange",
+ title = "How do Gap generate the elements in permutation groups",
+ year = "2017",
+ link = "\url{http://math.stackexchange.com/questions/1705277/how-do-gap-generate-the-elements-in-permutation-groups}"
+}
+
+@inproceedings{Stan88,
+ author = "Stansifer, R.",
+ title = "Type Inference with Subtypes",
+ booktitle = "POPL 88",
+ pages = "88-97",
+ year = "1988",
+ abstract =
+ "We give an algorithm for type inference in a language with functions,
+ records, and variant records. A similar language was studied by
+ Cardelli who gave a type checking algorithm. This language is
+ interesting because it captures aspects of object-oriented programming
+ using subtype polymorphism. We give a type system for deriving types
+ of expressions in the language and prove the type inference algorithm
+ is sound, i.e., it returns a type derivable from the proof system. We
+ also prove that the type the algorithm finds is a ``principal'' type,
+ i.e., one which characterizes all others. The approach taken here is
+ due to Milner for universal polymorphism. The result is a synthesis of
+ subtype polymorphism and universal polymorphism.",
+ paper = "Stan88.pdf"
+}
+
+@article{Stra00,
+ author = "Strachey, Christopher",
+ title = "Fundamental Concepts in Programming Languages",
+ journal = "Higher-Order and Symbolic Computation",
+ volume = "13",
+ number = "1-2",
+ pages = "11-49",
+ year = "2000",
+ abstract =
+ "This paper forms the substance of a course of lectures given at the
+ International Summer School in Computer Programming at Copenhagen in
+ August, 1967. The lectures were originally given from notes and the
+ paper was written after the course was finished. In spite of this, and
+ only partly because of the shortage of time, the paper still retains
+ many of the shortcomings of a lecture course. The chief of these are
+ an uncertainty of aim—it is never quite clear what sort of audience
+ there will be for such lectures—and an associated switching from
+ formal to informal modes of presentation which may well be less
+ acceptable in print than it is natural in the lecture room. For these
+ (and other) faults, I apologise to the reader.
+
+ There are numerous references throughout the course to CPL [1–3]. This
+ is a programming language which has been under development since 1962
+ at Cambridge and London and Oxford. It has served as a vehicle for
+ research into both programming languages and the design of
+ compilers. Partial implementations exist at Cambridge and London. The
+ language is still evolving so that there is no definitive manual
+ available yet. We hope to reach another resting point in its evolution
+ quite soon and to produce a compiler and reference manuals for this
+ version. The compiler will probably be written in such a way that it
+ is relatively easyto transfer it to another machine, and in the first
+ instance we hope to establish it on three or four machines more or
+ less at the same time.
+
+ The lack of a precise formulation for CPL should not cause much
+ difficulty in this course, as we are primarily concerned with the
+ ideas and concepts involved rather than with their precise
+ representation in a programming language.",
+ paper = "Stra00.pdf"
+}
+
+@book{Stro95,
+ author = "Stroustrup, Bjarne",
+ title = "The C++ Programming Language (2nd Edition)",
+ publisher = "Addison-Wesley",
+ year = "1995",
+ isbn = "0-201-53992-6"
+}
+
+@article{Stur1829,
+ author = "Sturm, Jacques Charles Francois",
+ title = {M\'emoire sur la r\'solution des \'equations num\'eriques},
+ journal = {Bulletin des Sciences de F\'erussac},
+ year = "1829",
+ volume = "11",
+ pages = "419-425",
+ link = "\url{https://en.wikipedia.org/wiki/Sturm%27s\_theorem}",
+ abstract =
+ "Let $p_0,\ldots,p_m$ be the Sturm chain of a square free polynomial $p$,
+ and let $\sigma{\eta}$ denote the number of sign changes (ignoring zeros)
+ in the sequence $p_0(\eta),p_1(\eta),p_2(\eta),\ldots,p_m(\eta)$.
+ Sturms' theorem states that for two real numbers $a < b$, the number of
+ distinct roots of $p$ in the half-open interval $(a,b]$ is
+ $\sigma(a)-\sigma(b)$.
+
+ A Sturm chain is a finite sequence of polynomials $p_0,p_1,\ldots,p_m$
+ of decreasing degree with these following properties:
+ \begin{itemize}
+ \item $p_0=p$ is square free (no square factors, i.e. no repeated roots)
+ \item if $p(\eta)=0$, then $sign(p_1(\eta))=sign(p^\prime(\eta))$
+ \item if $p_i(\eta)=0$ for $0* 1$. In this paper, we abstract the core SRA algorithm to arbitrary
+ finite fields and present three instantiations of our general algorithm,
+ one of which is novel and makes use of a series of isogenies derived
+ from elliptic curves with sufficiently smooth order.",
+ paper = "Dave17.pdf"
+}
+
+@InCollection{Diaz97,
+ author = "Diaz, A. and Kaltofen, E. and Pan, V.",
+ title = "Algebraic Algorithms",
+ booktitle = "The Computer Science and Engineering Handbook",
+ publisher = "CRC Press",
+ year = "1997",
+ editor = "A. B. Tucker",
+ pages = "226--248",
+ address = "Boca Raton, Florida",
+ chapter = "10",
+ keywords = "survey",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/97/DKP97.ps.gz}",
+ ref = "00965",
+ abstract = "
+ The title's subject is the algorithmic approach to algebra: arithmetic
+ with numbers, polynomials, matrices, differential polynomials, such as
+ $y^{\prime\prime} + (1/2 + x^4/4)y$, truncated series,
+ and algebraic sets, i.e.,
+ quantified expressions such as $\exists x \in {\bf R}: x^4+p\cdot x+q=0$,
+ which describes a subset of the two-dimensional space with
+ coordinates $p$ and $q$ for which the given quartic equation has a
+ real root. Algorithms that manipulate such objects are the backbone
+ of modern symbolic mathematics software such as the Maple and
+ Mathematica systems, to name but two among many useful systems. This
+ chapter restricts itself to algorithms in four areas: linear matrix
+ algebra, root finding ov univariate polynomials, solution of systems
+ of nonlinear algebraic equations, and polynomial factorization.",
+ paper = "Diaz97.ps"
+}
+
+@InCollection{Diaz99,
+ author = "Diaz, A. and Emiris, I. and Kaltofen, E. and Pan, V.",
+ title = "Algebraic Algorithms",
+ booktitle = "Algorithms \& Theory of Computation Handbook",
+ publisher = "CRC Press",
+ year = "1999",
+ editor = "M. J. Atallah",
+ address = "Boca Raton, Florida",
+ pages = "16.1--16.27",
+ isbn = "0-8493-2649-4",
+ keywords = "survey",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/99/DEKP99.ps.gz}",
+ abstract = "
+ The title's subject is the algorithmic approach to algebra: arithmetic
+ with numbers, polynomials, matrices, differential polynomials, such as
+ $y^n+(1/2+x^4/4)y$, truncated series, and algebraic sets, i.e.,
+ quantified expressions such as $\exists x \in {\bf R}: x^4+p\cdot+q=0$,
+ which describes a subset of the two-dimensional space with
+ coordinates $p$ and $q$ for which the given quartic equation has a
+ real root. Algorithms that manipulate such objects are the backbone
+ of modern symbolic mathematics software such as the Maple and
+ Mathematica systems, to name but two among many useful systems. This
+ chapter restricts itself to algorithms in four areas: linear algebra,
+ root finding for univariate polynomials, solution of systems of
+ nonlinear algebraic equations, and polynomial factorization (see
+ section 5 on some pointers to the vast further material on algebraic
+ algorithms and section 2.2 and [Pan 1993] on further applications to
+ the graph and combinatorial computations).",
+ paper = "Diaz99.ps"
+}
+
+@misc{Doye93,
+ author = "Doye, Nicolas James",
+ title = "The Implementation of Various Algorithms for Permuation Groups
+ in the Computer Algebra System: AXIOM",
+ year = "1993",
+ comment = "M.Sc thesis, University of Bath",
+ link = "\url{https://static.worldofnic.org/cdn/ps/research/msc.ps}",
+ paper = "Doye93.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Fate92a,
+ author = "Fateman, Richard",
+ title = "Honest Plotting, Global Extrema, and Interval Arithmetic",
+ booktitle = "ISSAC 92",
+ year = "1992",
+ pages = "216-223",
+ isbn = "0-89791-489-9",
+ abstract =
+ "A computer program to honestly plot curves y = f(x) must locate
+ maxima and minima in the domain of the graph. To do so it may have to
+ solve a classic problem in computation – global optimization.
+ Reducing an easy problem to a hard one is usually not an ad- vantage,
+ but in fact there is a route to solving both problems if the function
+ can be evaluated using interval arithmetic. Since some computer
+ algebra systems supply a version of interval arithmetic, it seems we
+ have the ingredients for a solution.
+
+ In this paper we address a particular problem how to compute and
+ display ``honest'' graphs of 2-D mathematical curves. By
+ ``honest'' we mean that no significant features (such as the
+ location of poles, the values at maxima or minima, or the behavior
+ of a curve at asymptotes) are misrepresented, By “mathematical” we
+ mean curves like those generally needed in scientific disciplines
+ where functions are represented by composi- tion of common
+ mathematical operations: rational operations ($+, –, *, /$),
+ exponential and log, trigonometric functions as well as continuous
+ and differentiable functions from applied mathematics.",
+ paper = "Fate92a.pdf"
+}
+
+@misc{Fate00b,
+ author = "Fateman, Richard",
+ title = "The (finite field) Fast Fourier Transform",
+ year = "2000",
+ link =
+ "\url{https://people.eecs.berkeley.edu/~fateman/282/readings/fftnotes.pdf}",
+ abstract =
+ "There are numerous directions from which one can approach the subject
+ of the fast Fourier Transform (FFT). It can be explained via numerous
+ connections to convolution, signal processing, and various other
+ properties and applications of the algorithm. We (along with
+ Geddes/Czapor/Labahn) take a rather simple view from the algebraic
+ manipulation standpoint. As will be apparent shortly, we relate the
+ FFT to the evaluation of a polynomial. We also consider it of interest
+ primarily as an algorithm in a discrete (finite) computation structure
+ rather than over the complex numbers.",
+ paper = "Fate00b.pdf"
+}
+
+@misc{Fate00c,
+ author = "Fateman, Richard",
+ title = "Additional Notes on Polynomial GCDs, Hensel construction",
+ year = "2000",
+ link =
+ "\url{https://people.eecs.berkeley.edu/~fateman/282/readings/hensel.pdf}",
+ paper = "Fate00c.pdf"
+}
+
+@article{Gent76,
+ author = "Gentleman, W. M. and Johnson, S. C.",
+ title = "Analysis of Algorithms, A Case Study: Determinants of Matrices
+ With Polynomial Entries",
+ journal = "ACM Transactions on Mathematical Software",
+ volume = "2",
+ number = "3",
+ year = "1976",
+ pages = "232-241",
+ link =
+ "\url{https://people.eecs.berkeley.edu/~fateman/282/readings/gentleman.pdf}",
+ abstract =
+ "The problem of computing the deternunant of a matrix of polynomials
+ is considered; two algorithms (expansion by minors and expansion by
+ Gaussian elimination) are compared; and each is examined under two models
+ for polynomial computatmn (dense univariate and totally sparse). The
+ results, while interesting in themselves, also serve to display two
+ points: (1) Asymptotic results are sometimes misleading for noninfinite
+ (e.g. practical) problems. (2) Models of computation are by
+ definition simplifications of reality: algorithmic analysis should be
+ carried out under several distinct computatmnal models and should be
+ supported by empirical data.",
+ paper = "Gent76.pdf"
+}
+
+@book{Hard64,
+ author = "Hardy, G. and Littlewood, J.E. and Polya, G.",
+ title = "Inequalities",
+ publisher = "Cambridge University Press",
+ year = "1964"
+}
+
+@InCollection{Kalt87a,
+ author = "Kaltofen, E.",
+ editor = "J. F. Traub",
+ title = "Computer algebra algorithms",
+ booktitle = "Annual Review in Computer Science",
+ pages = "91--118",
+ publisher = "Annual Reviews Inc.",
+ year = "1987",
+ volume = "2",
+ address = "Palo Alto, California",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/87/Ka87_annrev.pdf}",
+ abstract = "
+ The origins of the discipline of computer algebra can be found in
+ Issac Newton's {\sl Universal Arithmetic} (1728) [130], where methods
+ for methods for manipulating universal mathematical expressions (i.e.
+ formulas containing symbolic indeterminates) and algorithms for
+ solving equations built with these expressions are systematically
+ discussed. One can interpret the misson of computer algebra as the
+ construction of computer systems that enable scientific or engineering
+ users, for instance, to carry out mathematical manipulation
+ automatically. Indeed, systems with this goal already exist, among
+ them {MACSYMA}, {MAPLE}, {muMATH}, {REDUCE}, {SAC/2}, {SCRATCHPAD/II},
+ and {SMP}. These systems carry out scientific computing tasks, whose
+ results are distinguished from numerical computing in two principle
+ aspects.",
+ paper = "Kalt87a.pdf",
+ keywords = "survey,axiomref"
+}
+
+@article{Kemp81,
+ author = "Kempelmann, Helmut",
+ title = "Recursive Algorithm for the Fast Calculation of the Limit of
+ Derivatives at Points of Indeterminateness",
+ journal = "ACM SIGSAM",
+ volume = "15",
+ number = "4",
+ year = "1981",
+ pages = "10-11",
+ abstract =
+ "It is a common method in probability and queueing theory to gain the
+ $n$-th moment $E[x^n]$ of a random variable $X$
+ with density function $f_x(x)$
+ by the $n$-th derivative of the corresponding Laplace transform $L(s)$ at
+ the point $s = 0$
+ \[E[x^n] = (-1)^n\cdot L^{n}(O)\]
+ Quite often we encounter indetermined
+ expressions of the form $0/0$ which normally are treated by the rule of
+ L'Hospital. This is a time and memory consuming task requiring
+ greatest common divisor cancellations. This paper presents an
+ algorithm that calculates only those derivatives of numerator and
+ denominator which do not equal zero when taking the limit /1/. The
+ algorithm has been implemented in REDUCE /2/. It is simpler and more
+ efficient than that one proposed by /3/.",
+ paper = "Kemp81.pdf"
+}
+
+@article{Loos72a,
+ author = "Loos, Rudiger",
+ title = "Analytic Treatment of Three Similar Fredholm Integral Equations
+ of the second kind with REDUCE 2",
+ journal = "ACM SIGSAM",
+ volume = "21",
+ year = "1972",
+ pages = "32-40"
+}
+
+@article{Norf82,
+ author = "Norfolk, Timothy S.",
+ title = "Symbolic Computation of Residues at Poles and Essential
+ Singularities",
+ journal = "ACM SIGSAM",
+ volume = "16",
+ number = "1",
+ year = "1982",
+ pages = "17-23",
+ abstract =
+ "Although most books on the theory of complex variables include a
+ classification of the types of isolated singularities, and the
+ applications of residue theory, very few concern themselves with
+ methods of computing residues. In this paper we derive some results on
+ the calculation of residues at poles, and some special classes of
+ essential singularities, with a view to implementing an algorithm in
+ the VAXIMA computer algebra system.",
+ paper = "Norf82.pdf"
+}
+
+@article{Plat09,
+ author = "Platzer, Andre and Quesel, Jan-David and Rummer, Philipp",
+ title = "Real World Verification",
+ journal = "LNCS",
+ volume = "5663",
+ year = "2009",
+ pages = "495-501",
+ link = "\url{http://www.cs.cmu.edu/~aplatzer/pub/rwv.pdf}",
+ abstract =
+ "Scalable handling of real arithmetic is a crucial part of the
+ verification of hybrid systems, mathematical algorithms, and mixed
+ analog/digital circuits. Despite substantial advances in verification
+ technology, complexity issues with classical decision procedures are
+ still a major obstacle for formal verification of real-world
+ applications, e.g., in automotive and avionic industries. To identify
+ strengths and weaknesses, we examine state of the art symbolic
+ techniques and implementations for the universal fragment of
+ real-closed fields: approaches based on quantifier elimination,
+ Groebner Bases, and semidefinite programming for the
+ Positivstellensatz. Within a uniform context of the verification tool
+ KeYmaera, we compare these approaches qualitatively and quantitatively
+ on verification benchmarks from hybrid systems, textbook algorithms,
+ and on geometric problems. Finally, we introduce a new decision
+ procedure combining Groebner Bases and semidefinite programming for
+ the real Nullstellensatz that outperforms the individual approaches on
+ an interesting set of problems.",
+ paper = "Plat09,pdf"
+}
+
+@misc{Schr17,
+ author = "Wikipedia",
+ title = "Schreier-Sims algorithm",
+ year = "2017",
+ link =
+ "\url{https://en.wikipedia.org/wiki/Schreier\%E2%80%93Sims\_algorithm}",
+ abstract =
+ "The Schreier-Sims algorithm is an algorithm in computational group
+ theory named after mathematicians Otto Schreier and Charles Sims. Once
+ performed, it allows a linear time computation of the order of a
+ finite permuation group, group membership test (is a given permutation
+ contained in a group?), and many other tasks. The algorith was
+ introduced by Sims in 1970, based on Schreier's subgroup lemma. The
+ timing was subsequently improved by Donald Knuth in 1991. Later, an
+ even faster randomized version of the algorithm was developed.
+
+ The algorithm is an efficient method of computing a base and strong
+ generating set (BSGS) of a permutation group. In particular, an SGS
+ determines the order of a group and makes it easy to test membership
+ in the group. Since the SGS is critical for many algorithms in
+ computational group theory, computer algebra systems typically rely on
+ the Schreier-Sims algorithm for efficient calculations in groups"
+}
+
+@book{Somm01,
+ author = "Sommer, Gerald",
+ title = "Geometric Computing with Clifford Algebras",
+ year = "2001",
+ publisher = "Springer",
+ isbn = "3-540-41198-4"
+}
+
+@misc{Wang90a,
+ author = "Wang, Dongming",
+ title = "Some NOtes on Algebraic Methods for Geometry Theorem Proving",
+ link = "\url{http://www-polsys.lip6.fr/~wang/papers/GTPnote.ps.gz}",
+ year = "1990",
+ abstract =
+ "A new geometry theorem prover which provides the first complete
+ implementation of Wu's method and includes several Groebner bases
+ based methods is reported. This prover has been used to prove a number
+ of non-trivial geometry theorems including several {\sl large} ones
+ with less space and time cost than using the existing provers. The
+ author presents a new technique by introducing the notion of {\sl
+ normal ascending set}. This technique yields in some sense {\sl
+ simpler} non-degenerate conditions for Wu's method and allows one to
+ prove geometry theorems using characteristic sets but Groeber bases
+ type reduction. Parallel variants of Wu's method are discussed; an
+ implementation of the parallelized version of his algorithm utilizing
+ workstation networks has also been included in our prover. Timing
+ statistics for a set of typical examples is given.",
+ paper = "Wang90a.pdf"
+}
+
+@inproceedings{Wang92,
+ author = "Wang, Dongming",
+ title = "A Method for Factorizing Multivariate Polynomials over Successive
+ Algebraic Extension Fields",
+ booktitle = "Mathematics and Mathematics-Mechanization (2001)",
+ pages = "138-172",
+ institution = "Johannes Kepler University",
+ link = "\url{http://www-polsys.lip6.fr/~wang/papers/Factor.ps.gz}",
+ year = "1992",
+ abstract =
+ "We present a method for factorizing multivariate polynomials over
+ algebraic fields obtained from successive extensions of the rational
+ number field. The basic idea underlying this method is the reduction
+ of polynomial factorization over algebraic extension fields to the
+ factorization over the rational number vield via linear transformation
+ and the computation of characteristic sets with respect to a proper
+ variable ordering. The factors over the algebraic extension fields are
+ finally determined via GCD (greatest common divisor) computations. We
+ have implemented this method in the Maple system. Preliminary
+ experiments show that it is rather efficient. We give timing
+ statistics in Maple 4.3 on 40 test examples which were partly taken
+ from the literature and partly randomly generated. For all those
+ examples to which Maple built-in algorithm is applicable, our
+ algorithm is always faster.",
+ paper = "Wang92.pdf"
+}
+
+@InProceedings{Kalt96b,
+ author = "Kaltofen, E.",
+ title = "Blocked iterative sparse linear system solvers for finite fields",
+ booktitle = "Proc. Symp. Parallel Comput. Solving Large Scale Irregular
+ Applic. (Stratagem '96)",
+ editor = "C. Roucairol",
+ publisher = "INRIA",
+ address = "Sophia Antipolis, France",
+ pages = "91--95",
+ year = "1996",
+ keywords = "survey",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/96/Ka96_stratagem.ps.gz}",
+ abstract = "
+ The problem of solving a large sparse or structured system of linear
+ equations in the symbolic context, for instance when the coefficients
+ lie in a finite field, has arisen in several applications. A famous
+ example are the linear systems of ${\bf F}_2$, the field with 2
+ elements, that arise in sieve based integer factoring algorithms. For
+ example, for the factorization of the RSA-130 challenge number several
+ column dependencies of a $3504823\times 3516502$ matrix with an
+ average of $39.4.$ non-zero entries per column needed to be computed
+ [10]. A second example is the Berlekamp polynomial factorization
+ algorithm [6]. In that example, the matrix is not explicitly
+ constructed, but instead a fast algorithm for performing the matrix
+ times vector product is used. Further examples for such ``black box
+ matrices'' arise in the power series solution of algebraic or
+ differential equations by undetermined coefficients. The arising
+ linear systems for the coefficients usually have a distinct structure
+ that allows a fast coefficient matrix times vector product.",
+ paper = "Kalt96b.ps"
+}
+
+@Article{Kalt04,
+ author = "Kaltofen, E. and Villard, G.",
+ title = "Computing the sign or the value of the determinant of an integer
+ matrix, a complexity survey",
+ journal = "J. Computational Applied Math.",
+ volume = "162",
+ number = "1",
+ month = "January",
+ pages = "133--146",
+ year = "2004",
+ keywords = "survey",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/02/KaVi02.pdf}",
+ abstract = "
+ Computation of the sign of the determinant of a matrix and the
+ determiant itself is a challenge for both numerical and exact
+ methods. We survey the complexity of existing methods to solve these
+ problems when the input is an $n\times n$ matrix $A$ with integer
+ entries. We study the bit-complexities of the algorithms
+ asymptotically in $n$ and the norm $A$. Existing approaches rely on
+ numerical approximate computations, on exact computations, or on both
+ types of arithmetic in combination.",
+ paper = "Kalt04.pdf"
+}
+
+@Article{Kalt00,
+ author = "Kaltofen, E.",
+ title = "Challenges of Symbolic Computation My Favorite Open Problems",
+ journal = "Journal of Symbolic Computation",
+ volume = "29",
+ number = "6",
+ pages = "891--919",
+ year = "2000",
+ keywords = "survey",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/2K/Ka2K.pdf}",
+ abstract = "
+ The success of the symbolic mathematical computation discipline is
+ striking. The theoretical advances have been continuous and significant:
+ Gr{\"o}bner bases, the Risch integration algorithm, integer lattice
+ basis reduction, hypergeometric summation algorithms, etc. From the
+ beginning in the early 60s, it has been the tradition of our discipline
+ to create software that makes our ideas readily available to a
+ scientists, engineers, and education: {SAC-1}, {Reduce}, {Macsyma}, etc.
+ The commercial viability of our system products is proven by Maple and
+ Mathematica.
+
+ Today's user communities of symbolic computation systems are diverse:
+ educators, engineers, stock market anaysts, etc. The mathematics and
+ computer science in the design and implementation of our algorithms are
+ sophisticated. The research challenges in symbolic computation at the
+ close of the 20th century are formidable.
+
+ I state my favorite eight open problems in symbolic computations. They
+ range from problems in symbolic /numeric computing, symbolic algorithm
+ synthesis, to system component construction. I have worked on seven of
+ my problems and borrowed one from George Collins. I present background
+ to each of my problems and a clear-cut test that evaluates whether a
+ proposed attack has solved one of my problems. An additional ninth
+ open problem by Rob Corless and David Jeffrey on complex function
+ semantics is given in an appendix.",
+ paper = "Kalt00.pdf"
+}
+
+@InCollection{Kalt93a,
+ author = "Kaltofen, E.",
+ editor = "J. Reif",
+ title = "Dynamic parallel evaluation of computation {DAG}s",
+ booktitle = "Synthesis of Parallel Algorithms",
+ pages = "723--758",
+ publisher = "Morgan Kaufmann Publ.",
+ year = "1993",
+ address = "San Mateo, California",
+ keywords = "survey",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/93/Ka93_synthesis.ps.gz}",
+ abstract = "
+ One generic parallel evaluation scheme for algebraic objects, that of
+ evaluating algebraic computation trees or formulas, is presented by
+ Miller in a preceding chapter of this book. However, there are basic
+ algebraic functions for which the tree model of computation seems not
+ sufficient to allow an eficient -- even sequential -- decision-free
+ algebraic computation. The formula model essentially restricts the use
+ of an intermediate result to a single place, because the parse tree
+ nodes have fan-out one. If an intermediate result participates in the
+ computations of several further nodes, in the tree model it must be
+ recomputed anew for each of these nodes. It is a small formal change
+ to allow node values to propagate to more than one node deeper level
+ of the computation. Thus we obtain the {\sl algebraic circuit model},
+ which is equivalent to the {\sl straight-line program model}.",
+ paper = "Kalt93a.ps"
+}
+
+@misc{Corl93,
+ author = "Corless, R. M. and Gonnet, G. H. and Hare, D. E. G. and
+ Jeffrey, D. J. and Knuth, D. E.",
+ title = "On the Lambert W Function",
+ year = "1993",
+ link = "\url{http://cs.uwaterloo.ca/research/tr/1993/03/W.pdf}",
+ abstract =
+ "The Lambert W function is defined to be the multivalued inverse of
+ the function $w \rightarrow we^w$. It has many applications in pure
+ and applied mathematics, some of which are briefly described here. We
+ present a new discussion of the complex branches of $W$, an asymptotic
+ expansion valid for all branches, an efficient numerical procedure for
+ evaluating the function to arbitrary precision, and a method for the
+ symbolic integration of expressions containing $W$.",
+ paper = "Corl93.pdf"
+}
+
+@InProceedings{Hutt10,
+ author = "Hutton, Sharon E. and Kaltofen, Erich L. and Zhi, Lihong",
+ title = "Computing the radius of positive semidefiniteness of a
+ multivariate real polynomial via a dual of {Seidenberg}'s method",
+ year = "2010",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'10",
+ pages = "227--234",
+ month = "July",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/10/HKZ10.pdf}",
+ abstract = "
+ We give a stability criterion for real polynomial inequalities with
+ floating point or inexact scalars by estimating from below or
+ computing the radius of semdefiniteness. That radious is the maximum
+ deformation of the polynomial coefficent vector measured in a weighted
+ Euclidean vector norm within which the inequality remains true. A
+ large radius means that the inequalities may be considered numerically
+ valid.
+
+ The radius of positive (or negative) semidefiniteness is the distance
+ to the nearest polynomial with a real root, which has been thoroughly
+ studied before. We solve this problem by parameterized Lagrangian
+ multipliers and Karush-Kuhn-Tucker conditions. Our algorithms can
+ compute the radius for several simultaneous inequalities including
+ possibly additional linear coefficient constraints. Our distance
+ measure is the weighted Euclidena coefficient norm, but we also
+ discuss several formulas for the weighted infinity and 1-norms.
+
+ The computation of the nearest polynomial with a real root can be
+ interpreted as a dual of Seidenberg's method that decides if a real
+ hypersurface contains a real point. Sums-of-squares rational lower
+ bound certificates for the radius of semidefinitesness provide a new
+ approach to solving Seidenberg's problem, especially when the
+ coeffcients are numeric. They also offer a surprising alternative
+ sum-of-squares proof for those polynomials that themselves cannot be
+ represented by a polynomial sum-of-squares but that have a positive
+ distance to the nearest indefinte polynomial.",
+ paper = "Hutt10.pdf"
+}
+
+@InProceedings{Kalt06,
+ author = "Kaltofen, Erich and Zhi, Lihong",
+ title = "Hybrid Symbolic-Numeric Computation",
+ year = "2006",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'06",
+ pages = "7",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/06/KaZhi06.pdf}",
+ abstract = "
+ Several standard problems in symbolic computation, such as greatest
+ common divisors and factorization of polynomials, sparse
+ interpolation, or computing solutions to overdetermined systems of
+ polynomial equations have non-trivial solutions only if the input
+ coefficients satisfy certain algebraic constraints. Errors in the
+ coefficients due to floating point round-off or through physical
+ measurement thus render the exact symbolic algorithms unusable. By
+ symbolic-numeric methods one computes minimal deformations of the
+ coefficients that yield non-trivial results. We will present hybrid
+ algorithms and benchmark computations based on Gauss-Newton
+ optimazation, singular value decomposition (SVD) and
+ structure-preserving total least squares (STLS) fitting for several of
+ the above problems.
+
+ A significant body of results to solve those ``approximate computer
+ algebra'' problems has been discovered in the past 10 years. In the
+ Computer Algebra Handbook the section on ``Hybrid Methods'' concludes
+ as follows [2]: ``The challenge of hybrid symbolic-numeric algorithms
+ is to explore the effects of imprecision, discontinuity, and
+ algorithmic complexity by applying mathematical optimization,
+ perturbation theory, and inexact arithmetic and other tools in order
+ to solve mathematical problems that today are not solvable by
+ numeriiical or symbolic methods alone.'' The focus of our tutorial is
+ on how to formulate several approximate symbolic computation problems
+ as numerical problems in linear algebra and optimization and on
+ software that realizes their solutions.",
+ paper = "Kalt06.pdf"
+}
+
+@misc{Hjor10,
+ author = "Hjorth-Jensen, Morten",
+ title = "Computational Physics",
+ year = "2010",
+ link = "\url{http://depts.washington.edu/ph506/Hjorth-JensenLectures2010.pdf}",
+ paper = "Hjor10.pdf"
+}
+
+@InProceedings{Kalt09,
+ author = "Kaltofen, Erich and Yang, Zhengfeng and Zhi, Lihong",
+ title = "A Proof of the {Monotone Column Permanent (MCP) Conjecture} for
+ Dimension 4 via Sums-Of-Squares of Rational Functions",
+ year = "2009",
+ booktitle = "Proc. 2009 Internat. Workshop on Symbolic-Numeric Comput.",
+ pages = "65--69",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/09/KYZ09.pdf}",
+ abstract = "
+ For a proof of the monotone column permanent (MCP) conjecture for
+ dimension 4 it is sufficient to show that 4 polynomials, which come
+ rom the permanents of real marices, are nonnegative for all real
+ values of the variables, where the degrees and the number of the
+ variables of these polynomials are all 8. Here we apply a hybrid
+ symbolic-numerical algorithm for certifying that these polynomials can
+ be written as an exact fraction of two polynomial sums-of-squares
+ (SOS) with rational coefficients.",
+ paper = "Kalt09.pdf"
+}
+
+@Article{Kalt12,
+ author = "Kaltofen, Erich L. and Li, Bin and Yang, Zhengfeng and
+ Zhi, Lihong",
+ title = "Exact Certification in Global Polynomial Optimization
+ Via Sums-Of-Squares of Rational Functions
+ with Rational Coefficients",
+ year = "2012",
+ month = "January",
+ journal = "Journal of Symbolic Computation",
+ volume = "47",
+ number = "1",
+ pages = "1--15",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/09/KLYZ09.pdf}",
+ abstract = "
+ We present a hybrid symbolic-numeric algorithm for certifying a
+ polynomial or rational function with rational coefficients to be
+ non-negative for all real values of the variables by computing a
+ representation for it as a fraction of two polynomial sum-of-squares
+ (SOS) with rational coeficients. Our new approach turns the earlier
+ methods by Peyrl and Parrilo and SCN'07 and ours at ISSAC'08 both
+ based on polynomial SOS, which do not always exist, into a universal
+ algorithm for all inputs via Artin's theorem.
+
+ Furthermore, we scrutinize the all-important process of converting the
+ numerical SOS numerators and denomiators produced by block
+ semidefinite programming into an exact rational identity. We improve
+ on our own Newton iteration-based high precision refinement algorithm
+ by compressing the initial Gram matrices and by deploying rational
+ vector recovery aside from orthogonal projection. We successfully
+ demenstrate our algorithm on 1. various exceptional SOS problems with
+ necessary polynomial denominators from the literature and on 2. very
+ large (thousands of variables) SOS lower bound certificates for Rump's
+ model problem (up to $n=18$, factor degree $=17$).",
+ paper = "Kalt12.pdf"
+}
+
+@InProceedings{Kalt08b,
+ author = "Kaltofen, Erich and Li, Bin and Yang, Zhengfeng and Zhi, Lihong",
+ title = "Exact Certification of Global Optimality of Approximate
+ Factorizations Via Rationalizing Sums-Of-Squares
+ with Floating Point Scalars",
+ year = "2008",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'08",
+ pages = "155--163",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/08/KLYZ08.pdf}",
+ abstract = "
+ We generalize the technique by Peyrl and Parillo [Proc. SNC 2007] to
+ computing lower bound certificates for several well-known
+ factorization problems in hybrid symbolic-numeric computation. The
+ idea is to transform a numerical sum-of-squares (SOS) representation
+ of a positive polynomial into an exact rational identity. Our
+ algorithms successfully certify accurate rational lower bounds near
+ the irrational global optima for benchmark approximate polynomial
+ greatest common divisors and multivariate polynomial irreducibility
+ radii from the literature, and factor coefficient bounds in the
+ setting of a model problem by Rump (up to $n=14$, factor degree $=13$).
+
+ The numeric SOSes produced by the current fixed precision
+ semi-definite programming (SDP) packages (SeDuMi, SOSTOOLS, YALMIP)
+ are usually too coarse to allow successful projection to exact SOSes
+ via Maple 11's exact linear algebra. Therefore, before projection we
+ refine the SOSes by rank-preserving Newton iteration. For smaller
+ problems the starting SOSes for Newton can be guessed without SDP
+ (``SDP-free SOS''), but for larger inputs we additionally appeal to
+ sparsity techniques in our SDP formulation.",
+ paper = "Kalt08b.pdf"
+}
+
+@InProceedings{Kalt06b,
+ author = "Kaltofen, Erich and Yang, Zhengfeng and Zhi, Lihong",
+ title = "Approximate greatest common divisors of several polynomials
+ with linearly constrained coefficients and singular polynomials",
+ year = "2006",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'06",
+ pages = "169--176",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/06/KYZ06.pdf}",
+ abstract = "
+ We consider the problem of computing minimal real or complex
+ deformations to the coefficients in a list of relatively prime real or
+ complex multivariate polynomials such that the deformed polynomials
+ have a greatest common divisor (GCD) of a least a given degree $k$. In
+ addition, we restrict the deformed coefficients by a given set of
+ linear constraints, thus introducing the {\sl linearly constrained
+ aproximate GCD} problem. We present an algorithm based on a version
+ of the structured total least norm (STLN) method and demonstrate, on a
+ diverse set of benchmark polynomials, that the algorithm in practice
+ computes globally minimal approximations. As an application of the
+ linearly constrained approximate GCD problem, we present an STLN-based
+ method that computes for a real or complex polynomial the nearest real
+ or complex polynomial the nearest real or complex polynomial that has
+ a root of multiplicity at least $k$. We demonstrate that the
+ algorithm in practice computes, on the benchmark polynomials given in
+ the literate, the known globally optimal nearest singular
+ polynomials. Our algorithms can handle, via randomized
+ preconditioning, the difficult case when the nearest solution to a
+ list of real input polynomials actually has non-real complex
+ coefficients.",
+ paper = "Kalt06b.pdf"
+}
+
+@InCollection{Kalt05,
+ author = "Kaltofen, Erich and Yang, Zhengfeng and Zhi, Lihong",
+ title = "Structured Low Rank Approximation of a {Sylvester} Matrix",
+ booktitle = "Symbolic-Numeric Computation",
+ publisher = "Springer",
+ pages = "69--83",
+ year = "2005",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/05/KYZ05.pdf}",
+ abstract = "
+ The task of determining the approximate greatest common divisor (GCD)
+ of univariate polynomials with inexact coefficients can be formulated
+ as computing for a given Sylvester matrix a new Sylvester matrix of
+ lower rank whose entries are near the corresponding entries of that
+ input matrix. We solve the approximate GCD problem by a new method
+ based on structured total least norm (STLN) algorithms, in our case
+ for matrices with Sylvester structure. We present iterative algorithms
+ that compute an approximate GCD and that can certify an approximate
+ $\epsilon$-GCD when a tolerence $\epsilon$ is given on input. Each
+ single iteration is carried out with a number of floating point
+ operations that is of cubic order in the input degrees. We also
+ demonstrate the practical performance of our algorithms on a diverse
+ set of univariate pairs of polynomials.",
+ paper = "Kalt05.pdf"
+}
+
+@InProceedings{Kalt03a,
+ author = "Kaltofen, Erich and May, John",
+ title = "On Approximate Irreducibility of Polynomials in Several Variables",
+ year = "2003",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'03",
+ pages = "161--168",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/03/KM03.pdf}",
+ abstract = "
+ We study the problem of bounding all factorizable polynomials away
+ from a polynomial that is absolutely irreducible. Such separation
+ bounds are useful for testing whether a numerical polynomial is
+ absolutely irreducible, given a certain tolerance on its coefficients
+ Using an absolute irreducibility criterion due to Ruppert, we are able
+ to find useful separation bounds, in several norms, for bivariate
+ polynomials. We also use Ruppert's criterion to derive new, more
+ effective Noether forms for polynomials of arbitrarily many
+ variables. These forms lead to small separation bounds for polynomials
+ of arbitrarily many variables.",
+ paper = "Kalt03a.pdf"
+}
+
+@InProceedings{Gao04a,
+ author = "Gao, Shuhong and Kaltofen, Erich and May, John P. and
+ Yang, Zhengfeng and Zhi, Lihong",
+ title = "Approximate factorization of multivariate polynomials via
+ differential equations",
+ year = "2004",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'04",
+ pages = "167--174",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/04/GKMYZ04.pdf}",
+ abstract = "
+ The input to our algorithm is a multivariate polynomial, whose complex
+ rational coefficient are considered imprecise with an unknown error
+ that causes $f$ to be irreducible over the complex numbers {\bf C}.
+ We seek to perturb the coefficients by a small quantity such that the
+ resulting polynomial factors over {\bf C}. Ideally, one would like to
+ minimize the perturbation in some selected distance measure, but no
+ efficient algorithm for that is known. We give a numerical
+ multivariate greatest common divisor algorithm and use it on a
+ numerical variant of algorithms by W. M. Ruppert and S. Gao. Our
+ numerical factorizer makes repeated use of singular value
+ decompositions. We demonstrate on a significant body of experimental
+ data that our algorithm is practical and can find factorizable
+ polynomials within a distance that is about the same in relative
+ magnitude as the input error, even when the relative error in the
+ input is substantial ($10^{-3}$).",
+ paper = "Gao04a.pdf"
+}
+
+@Article{Kalt08,
+ author = "Kaltofen, Erich and May, John and Yang, Zhengfeng and Zhi, Lihong",
+ title = "Approximate Factorization of Multivariate Polynomials Using
+ Singular Value Decomposition",
+ year = "2008",
+ journal = "Journal of Symbolic Computation",
+ volume = "43",
+ number = "5",
+ pages = "359--376",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/07/KMYZ07.pdf}",
+ paper = "Kalt08.pdf"
+}
+
+@InProceedings{Hitz99,
+ author = "Hitz, M.A. and Kaltofen, E. and Lakshman, Y.N.",
+ title = "Efficient Algorithms for Computing the Nearest Polynomial
+ With A Real Root and Related Problems",
+ booktitle = "Proc. 1999 Internat. Symp. Symbolic Algebraic Comput.",
+ pages = "205--212",
+ year = "1999",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/99/HKL99.pdf}",
+ paper = "Hitz99.pdf"
+}
+
+@InProceedings{Hitz98,
+ author = "Hitz, M. A. and Kaltofen, E.",
+ title = "Efficient Algorithms for Computing the Nearest Polynomial
+ with Constrained Roots",
+ booktitle = "Proc. 1998 Internat. Symp. Symbolic Algebraic Comput.",
+ year = "1998",
+ pages = "236--243",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/98/HiKa98.pdf}",
+ paper = "Hitz98.pdf"
+}
+
+@InProceedings{Diaz91,
+ author = "Diaz, A. and Kaltofen,E. and Schmitz, K. and Valente, T.",
+ title = "DSC A System for Distributed Symbolic Computation",
+ booktitle = "Proc. 1991 Internat. Symp. Symbolic Algebraic Comput.",
+ pages = "323--332",
+ year = "1991",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/91/DKSV91.pdf}",
+ paper = "Diaz91.pdf"
+}
+
+@InProceedings{Chan94,
+ author = "Chan, K.C. and Diaz, A. and Kaltofen, E.",
+ editor = "R. J. Lopez",
+ title = "A distributed approach to problem solving in Maple",
+ booktitle = "Maple V: Mathematics and its Application",
+ pages = "13--21",
+ publisher = {Birkh\"auser},
+ year = "1994",
+ series = "Proceedings of the Maple Summer Workshop and Symposium (MSWS'94)",
+ address = "Boston",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/94/CDK94.ps.gz}",
+ paper = "Chan94.ps"
+}
+
+@InProceedings{Duma02,
+ author = "Dumas, J.-G. and Gautier, T. and Giesbrecht, M. and Giorgi, P.
+ and Hovinen, B. and Kaltofen, E. and Saunders, B.D. and
+ Turner, W.J. and Villard, G.",
+ title = "{LinBox}: A Generic Library for Exact Linear Algebra",
+ booktitle = "Proc. First Internat. Congress Math. Software ICMS 2002,
+ Beijing, China",
+ pages = "40--50",
+ year = "2002",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/02/Detal02.pdf}",
+ paper = "Duma02.pdf"
+}
+
+@inproceedings{Einw95,
+ author = "Einwohner, T. and Fateman, Richard J.",
+ title = "Searching Techniques for Integral Tables",
+ booktitle = "ISSAC 95",
+ year = "1995",
+ pages = "133-139",
+ abstract =
+ "We describe the design of data structures and a computer program for
+ storing a table of symbolic indefinite or definite integrals and
+ retrieving user-requested integrals on demand. Typical times are so
+ short that a preliminary look-up attempt prior to any algorithmic
+ integration approach seems justified. In one such test for a table
+ with around 700 entries, matches were found requiring an average of
+ 2.8 milliseconds per request, on a Hewlett Packard 9000/712
+ workstation.",
+ paper = "Einw95.pdf"
+}
+
+@InProceedings{Kalt05a,
+ author = "Kaltofen, Erich and Morozov, Dmitriy and Yuhasz, George",
+ title = "Generic Matrix Multiplication and Memory Management in {LinBox}",
+ year = "2005",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'05",
+ pages = "216--223",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/05/KMY05.pdf}",
+ paper = "Kalt05a.pdf"
+}
+
+@InProceedings{Diaz98,
+ author = "Diaz, A. and Kaltofen, E.",
+ title = "{FoxBox}, a System for Manipulating Symbolic Objects in Black Box
+ Representation",
+ booktitle = "Proc. 1998 Internat. Symp. Symbolic Algebraic Comput.",
+ publisher = "ACM Press",
+ year = "1998",
+ pages = "30--37",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/98/DiKa98.pdf}",
+ abstract =
+ "The FOXBOX system puts in practice the black box representation of
+ symbolic objects and provides algorithms for performing the symbolic
+ calculus with such representations. Black box objects are stored as
+ functions. For instance: a black box polynomial is a procedure that
+ takes values for the variables as input and evaluates the polynomial
+ at that given point. FOXBOX can compute the greatest common divisor
+ and factorize polynomials in black box representation, producing as
+ output new black boxes. It also can compute the standard sparse
+ distributed representation of a black box polynomial, for example, one
+ which was computed for an irreducible factor. We establish that the
+ black box representation of objects can push the size of symbolic
+ expressions far beyond what standard data structures could handle
+ before.
+
+ Furthermore, FOXBOX demonstrates the generic program design
+ methodology. The FOXBOX system is written in C++. C++ template
+ arguments provide for abstract domain types. Currently, FOXBOX can be
+ compiled with SACLIB 1.1, Gnu-MP 1.0, and NTL 2.0 as its underlying
+ field and polynomial arithmetic. Multiple arithmetic plugins can be
+ used in the same computation. FOXBOX provides an MPI compliant
+ distribution mechanism that allows for parallel and distributed
+ execution of FOXBOX programs. Finally, FOXBOX plugs into a
+ server/client-style Maple application interface.",
+ paper = "Diaz98.pdf"
+}
+
+@InProceedings{Diaz93,
+ author = "Diaz, A. and Kaltofen, E. and Lobo, A. and Valente, T.",
+ editor = "A. Miola",
+ title = "Process scheduling in {DSC} and the large sparse linear
+ systems challenge",
+ booktitle = "Proc. DISCO '93",
+ series = "Lect. Notes Comput. Sci.",
+ pages = "66--80",
+ year = "1993",
+ volume = "722",
+ publisher = "Springer-Verlag",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/93/DHKLV93.pdf}",
+ paper = "Diaz93.pdf"
+}
+
+@Article{Diaz95a,
+ author = "Diaz, A. and Hitz, M. and Kaltofen, E. and Lobo, A. and
+ Valente, T.",
+ title = "Process scheduling in {DSC} and the large sparse linear
+ systems challenge",
+ journal = "Journal of Symbolic Computing",
+ year = "1995",
+ volume = "19",
+ number = "1--3",
+ pages = "269--282",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/95/DHKLV95.pdf}",
+ paper = "Diaz95a.pdf"
+}
+
+@Article{Free88,
+ author = "Freeman, T.S. and Imirzian, G. and Kaltofen, E. and
+ Yagati, Lakshman",
+ title = "DAGWOOD: A system for manipulating polynomials given by
+ straight-line programs",
+ journal = "ACM Trans. Math. Software",
+ year = "1988",
+ volume = "14",
+ number = "3",
+ pages = "218--240",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/88/FIKY88.pdf}",
+ paper = "Free88.pdf"
+}
+
+@InCollection{Kalt10a,
+ author = "Kaltofen, Erich L.",
+ title = "The ``{Seven} {Dwarfs}'' of Symbolic Computation",
+ booktitle = "Numeric and Symbolic Scientific Computing
+ Progress and Prospects",
+ publisher = "Springer",
+ pages = "95--104",
+ year = "2010",
+ keywords = "survey",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/10/Ka10_7dwarfs.pdf}",
+ paper = "Kalt10a.pdf"
+}
+
+@inproceedings{Bro86,
+ author = "Bronstein, Manuel",
+ title = "Gsolve: a faster algorithm for solving systems of algebraic
+ equations",
+ booktitle = "Proc of 5th ACM SYMSAC",
+ year = "1986",
+ pages = "247-249",
+ isbn = "0-89791-199-7",
+ abstract = "
+ We apply the elimination property of Gr{\"o}bner bases with respect to
+ pure lexicographic ordering to solve systems of algebraic equations.
+ We suggest reasons for this approach to be faster than the resultant
+ technique, and give examples and timings that show that it is indeed
+ faster and more correct, than MACSYMA's solve."
+}
+
+@inproceedings{Corl95,
+ author = "Corless, Robert M. and Gianni, Patrizia, M. and Trager, Barry M.
+ and Watt, Stephen M.",
+ title = "The Singular Value Decomposition for Polynomial Systems",
+ booktitle = "ISSAC 95",
+ year = "1995",
+ pages = "195-207",
+ publisher = "ACM",
+ abstract =
+ "This paper introduces singular value decomposition (SVD) algorithms
+ for some standard polynomial computations, in the case where the
+ coefficients are inexact or imperfectly known. We first give an
+ algorithm for computing univariate GCD's which gives {\sl exact}
+ results for interesting {\sl nearby} problems, and give efficient
+ algorithms for computing precisely how nearby. We generalize this to
+ multivariate GCD computations. Next, we adapt Lazard's $u$-resultant
+ algorithm for the solution of overdetermined systems of polynomial
+ equations to the inexact-coefficent case. We also briefly discuss an
+ application of the modified Lazard's method to the location of
+ singular points on approximately known projections of algebraic curves.",
+ paper = "Corl95.pdf",
+ keywords = "axiomref",
+}
+
+@article{Lixx10,
+ author = "Li, Xiaoliang and Mou, Chenqi and Wang, Dongming",
+ title = "Decomposing polynomial sets into simple sets over finite fields:
+ The zero-dimensional case",
+ comment = "Provides clear polynomial algorithms",
+ journal = "Computers and Mathematics with Applications",
+ volume = "60",
+ pages = "2983-2997",
+ year = "2010",
+ abstract =
+ "This paper presents algorithms for decomposing any zero-dimensional
+ polynomial set into simple sets over an arbitrary finite field, with
+ an associated ideal or zero decomposition. As a key ingredient of
+ these algorithms, we generalize the squarefree decomposition approach
+ for univariate polynomials over a finite field to that over the field
+ product determined by a simple set. As a subprocedure of the
+ generalized squarefree decomposition approach, a method is proposed to
+ extract the $p$th root of any element in the field
+ product. Experiments with a preliminary implementation show the
+ effectiveness of our algorithms.",
+ paper = "Lixx10.pdf"
+}
+
+@book{Acto70,
+ author = "Acton, F.S.",
+ title = "Numerical Methods that (Usually) Work",
+ year = "1970",
+ publisher = "Harper and Row",
+ address = "New York, USA"
+}
+
+@book{Acto96,
+ author = "Acton, F.S.",
+ title = "Real Computing Made Real: Preventing Errors in Scientific
+ and Engineering Calculations",
+ year = "1996",
+ publisher = "Princeton University Press",
+ address = "Princeton, N.J. USA",
+ isbn = "0-691-03663-2"
+}
+
+@techreport{Ahre15,
+ author = "Ahrens, Peter and Nguyen, Hong Diep and Demmel, James",
+ title = "Efficient Reproducible Floating Point Summation and BLAS",
+ institution = "University of California, Berkeley",
+ year = "2015",
+ month = "December",
+ type = "technical report",
+ number = "229",
+ link = "\url{http://www.eecs.berkeley.edu/Pubs/TechRpts/2015/EECS-2015-229.pdf}",
+ abstract =
+ "We define reproducibility to mean getting bitwise identical results
+ from multiple runs of the same program, perhaps with different
+ hardware resources or other changes that should ideally not change the
+ answer. Many users depend on reproducibility for debugging or
+ correctness. However, dynamic scheduling of parallel computing
+ resources, combined with nonassociativity of floating point addition,
+ makes attaining reproducibility a challenge even for simple operations
+ like summing a vector of numbers, or more complicated operations like
+ Basic Linear Algebra Subprograms (BLAS). We describe an algorithm that
+ computes a reproducible sum of floating point numbers independent of
+ the order of summation. The algorithm depends only on a subset of the
+ IEEE Floating Point Standard 754-2008. It is communication-optimal, in
+ the sense that it does just one pass over the data in the sequential
+ case, or one reduction operation in the parallel case, requiring an
+ ``accumulator'' represented by just 6 floating point words (more can
+ be used if higher precision is desired). Th arithmetic code with a
+ 6-word accumulator is $7n$ floating point additions to sum $n$ words,
+ and (in IEEEE double precision) the final error bound can be up to
+ $10^8$ times smaller than the error bound for conventional
+ summation. We describe the basic summation algorithm, the software
+ infrastructure used to build reproducible BLAS (ReproBLAS), and
+ performance results. For example, when computing the dot product of
+ 4096 double precision floating point numbers, we get a $4x$ slowdown
+ compared to Intel Math Kernel Library (MKL) running on an Intel Core
+ i7-2600 CPU operating at 3.4 GHz and 256 KB L2 Cache.",
+ paper = "Ahre15.pdf"
+}
+
+@article{Alef00,
+ author = "Alefeld, G. and Mayer, G.",
+ title = "Interval analysis: Theory and applications",
+ journal = "J. Comput. Appl. Math.",
+ volume = "121",
+ pages = "421-464",
+ year = "2000"
+}
+
+@article{Anda94,
+ author = "Anda, A.A. and Park, H.",
+ title = "Fast plane rotations with dynamic scaling",
+ journal = "SIAM J. matrix Anal. Appl.",
+ volume = "15",
+ year = "1994",
+ pages = "162-174"
+}
+
+@phdthesis{Anda95,
+ author = "Anda, Andrew Allen",
+ title = "Self-Scaling Fast Plane Rotation Algorithms",
+ school = "University of Minnesota",
+ year = "1995",
+ month = "February",
+ abstract =
+ "A suite of {\sl self-scaling} fast circular plane rotation algorithms
+ is developed which obviates the monitoring and periodic rescaling
+ necessitated by the standard set of fast plane rotation
+ algorithms. Self-Scaling fast rotations dynamically preserve the
+ normed scaling of the diagonal factor matrix whereas standard fast
+ rotations exhibit divergent scaling. Variations on standard fast
+ rotation matrices are developed and algorithms which implement them
+ are offered. Self-Scaling fast rotations are shown to be essentially
+ as accurate as slow rotations and at least as efficient as standard
+ fast rotations. Computational experimental results utilizing the
+ Cray-2 illustrate the effectively stable scaling exhibited by
+ self-scaling fast rotations. Jacobi-class algorithms with one-sided
+ alterations are developed for the algebraic eigenvalue decomposition
+ using self-scaling fast plane rotations and one-sided
+ modifications. The new algorithms are shown to be both accurate and
+ efficient on both vector and parallel architectures. The utility is
+ described of applying fast plane rotations towards the rank-one update
+ and downdate of least squares factorizations. The equivalence is
+ illuminated of LINPACK, hyperbolic rotation, and fast negatively
+ weighted plane rotation downdating. Algorithms are presented which
+ apply self-scaling fast plane rotations to the QR factorization for
+ stiff least squares problems. Both fast and standard Givens
+ rotation-based algorithms are shown to produce accurate results
+ regardless of row sorting even with extremely heavily row weighted
+ matrices. Such matrices emanate, e.g. from equality constrainted least
+ squares problems solved via the weighting method. The necessity of
+ column sorting is emphasized. Numerical tests expose the Householder
+ QR factorization algorithm to be sensitive to row sorting and it
+ yields less accurate results for greater weights. Additionally, the
+ modified Gram-Schmidt algorithm is shown to be sensitive to row
+ sorting to a notably significant but lesser degree. Self-Scaling fast
+ plane rotation algorithms, having competitive computational
+ complexities, must therefore be the method of choice for the QR
+ factorization of stiff matrices. Timing reults on the Cray 2 [XY]M/P,
+ and C90, of rotations both in and out of a matrix factorization
+ context are presented. Architectural features that can best exploit
+ the advantagous features of the new fast rotations are subsequently
+ discussed.",
+ paper = "Anda95.pdf"
+}
+
+@misc{LAPA99,
+ author = "Anderson E. et al.",
+ title = "LAPACK User's Guide Third Addition",
+ year = "1999",
+ month = "August",
+ link = "\url{http://www.netlib.org/lapack/lug/}"
+}
+
+@book{Ande99,
+ author = "Anderson, E. and Bai, Z. and Bischof, S. and Blackford, S. and
+ Demmel, J. and Dongarra, J. J. and DuCroz, J. and Greenbaum, A.
+ and Hammarling, S. and McKenney, A. Sorensen, D. C.",
+ title = "LAPACK Users' Guide",
+ publisher = "SIAM",
+ year = "1999",
+ isbn = "0-89871-447-8",
+ link = "\url{http://www.netlib.org/lapack/lug/}"
+}
+
+@misc{Baud12,
+ author = "Baudin, Michael and Smith, Robert L.",
+ title = "A Robust Complex Division in Scilab",
+ link = "\url{http://arxiv.org/pdf/1210.4539.pdf}",
+ year = "2012",
+ month = "October",
+ abstract =
+ "The most widely used algorithm for floating point complex division,
+ known as Smith's method, may fail more often than expected. This
+ document presents two improved complex division algorithms. We present
+ a proof of robustness of the first improved algorithm. Numerical
+ simulations show that this algorithm performs well in practice and is
+ significantly more robust than other known implementations. By
+ combining additional scaling methods with this first algorithm, we
+ were able to create a second algorithm, which rarely fails.",
+ paper = "Baud12.pdf"
+}
+
+@article{Bind02,
+ author = "Bindel, D. and Demmel, J. and Kahan, W. and Marques, O.",
+ title = "On computing Givens rotations reliably and efficiently",
+ journal = "ACM Trans. Math. Software",
+ volume = "28",
+ pages = "206-238",
+ year = "2002"
+}
+
+@article{Blac97,
+ author = "Blackford, L. S. and Cleary, A. and Demmel, J. and Dhillon, I.
+ and Dongarra, J. J. and Hammarling, S. and Petitet, A. and
+ Ren, H. and Stanley, K. and Whaley, R. C.",
+ title = "Practical experience in the numerical dangers of heterogeneous
+ computing",
+ journal = "ACM Trans. Math. Software",
+ volume = "23",
+ pages = "133-147",
+ year = "1997"
+}
+
+@techreport{MMEF96,
+ author = "Boisvert, Ronald F. and Pozo, Roldan and Remington, Karin A.",
+ title = "The Matrix Market Exchange Formats: Initial Design",
+ year = "1996",
+ month = "December",
+ institution = "National Institute of Standards and Technology",
+ type = "Technical Report",
+ link = "\url{http://math.nist.gov/MatrixMarket/reports/MMformat.ps}",
+ abstract =
+ "We propose elementary ASCII exchange formats for matrices. Specific
+ instances of the format are defined for dense and sparse matrices with
+ real, complex, integer and pattern entries, with special cases for
+ symmetric, skew-symmetric and Hermitian matrices. Sparse matrices are
+ represented in a coordinate storage format. The overall file structure
+ is designed to allow future definition of other specialized matrix
+ formats, as well as for objects other than matrices.",
+ paper = "MMEF96.pdf"
+}
+
+@article{Bran97,
+ author = "Brankin, R. W. and Gladwell, I.",
+ title = "rksuite\_90: Fortran 90 software for ordinary differential
+ equation initial-value problems",
+ journal = "ACM Trans. Math. Software",
+ volume = "23",
+ pages = "402-415",
+ year = "1997"
+}
+
+@techreport{Bran92,
+ author = "Brankin, R. W. and Gladwell, I. and Shampine, L. F.",
+ title = "RKSUITE: A suite of runge-kutta codes for the initial value
+ problem for ODEs",
+ year = "1992",
+ institution = "Southern Methodist University, Dept of Math.",
+ number = "Softreport 92-S1",
+ type = "Technical Report"
+}
+
+@article{Bran93,
+ author = "Brankin, R. W. and Gladwell, I. and Shampine, L. F.",
+ title = "RKSUITE: A suite of explicit runge-kutta codes",
+ journal = "Contributions ot Numerical Mathematics",
+ pages = "41-53",
+ publisher = "World Scientific",
+ year = "1993"
+}
+
+@book{Brit92,
+ author = "Britton, J. L.",
+ title = "Collected Works of A. M. Turing: Pure Mathematics",
+ publisher = "North-Holland",
+ year = "1992",
+ isbn = "0-444-88059-3"
+}
+
+@misc{Bron99,
+ author = "Bronstein, Manuel",
+ title = "Fast Deterministic Computation of Determinants of Dense Matrices",
+ link = "\url{http://www-sop.inria.fr/cafe/Manuel.Bronstein/publications/mb_papers.html}",
+ abstract = "
+ In this paper we consider deterministic computation of the exact
+ determinant of a dense matrix $M$ of integers. We present a new
+ algorithm with worst case complexity
+ \[O(n^4(log n+ log \verb?||M||?)+x^3 log^2 \verb?||M||?)\],
+ where $n$ is the dimension of the matrix
+ and \verb?||M||? is a bound on the entries in $M$, but with
+ average expected complexity
+ \[O(n^4+m^3(log n + log \verb?||M||?)^2)\],
+ assuming some plausible properties about the distribution of $M$.
+ We will also describe a practical version of the algorithm and include
+ timing data to compare this algorithm with existing ones. Our result
+ does not depend on ``fast'' integer or matrix techniques.",
+ paper = "Bron99.pdf"
+}
+
+@misc{Broo11,
+ author = "Brookes, Mike",
+ title = "The Matrix Reference Manual",
+ year = "2011",
+ link = "\url{http://www.ee.ic.ac.uk/hp/staff/dmb/matrix/intro.html}"
+
+}
+
+@misc{Chel06,
+ author = "Chellappa, Srinivas and Franchetti, Franz and Puschel, Markus",
+ title = "How To Write Fast Numerical Code: A Small Introduction",
+ year = "2006",
+ institution = "Carnegie Mellon University",
+ link = "\url{https://users.ece.cmu.edu/~franzf/papers/gttse07.pdf}",
+ algebra = "\newline\refto{package BLAS1 BlasLevelOne}",
+ abstract =
+ "The complexity of modern computing platforms has made it extremely
+ difficult to write numerical code that achieves the best possible
+ performance. Straightforward implementations based on algorithms that
+ minimize the operations count often fall short in performance by at
+ least one order of magnitude. This tutorial introduces the reader to a
+ set of general techniques to improve the performance of numerical
+ code, focusing on optimizations for the computer's memory hierarchy.
+ Further, program generators are discussed as a way to reduce the
+ implementation and optimization effort. Two running examples are
+ used to demonstrate these techniques: matrix-matrix multiplication
+ and the discrete Fourier transform.",
+ paper = "Chel06.pdf"
+}
+
+@book{Chai96,
+ author = "Chaitin-Chatelin, F. and Fraysse, V.",
+ title = "Lectures on Finite Precision Computations",
+ publisher = "SIAM",
+ year = "1996",
+ isbn = "0-89871-358-7"
+}
+
+@article{Chan83,
+ author = "Chan, T. F. and Golub, G. H. and LeVeque, R. J.",
+ title = "Algorithms for computing the sample variance: Analysis and
+ recommendations",
+ journal = "The American Statistician",
+ volume = "37",
+ pages = "242-247",
+ year = "1983"
+}
+
+@article{Cool03,
+ author = "Cools, R. and Haegemans, A.",
+ title = "Algorithm 824: CUBPACK: A package for automatic cubature;
+ framework description",
+ journal = "ACM Trans. Math. Software",
+ volume = "29",
+ pages = "287-296",
+ year = "2003"
+}
+
+@techreport{Coxx00,
+ author = "Cox, M. G. and Dainton, M. P. and Harris, P. M.",
+ title = "Testing spreadsheets and other packages used in metrology:
+ Testing functions for the calculation of standard deviation",
+ year = "2000",
+ institution = "National Physical Lab, Teddington, Middlesex UK",
+ type = "Technical Report",
+ number = "NPL Report CMSC07/00"
+}
+
+@article{Davi11,
+ author = "Davis, Timothy A. and Hu, Yifan",
+ title = "The University of Florida Sparse Matrix Collection",
+ journal = "ACM Trans. on Math. Software",
+ volume = "38",
+ number = "1",
+ year = "2011",
+ month = "November",
+ link = "\url{http://yifanhu.net/PUB/matrices.pdf}",
+ abstract =
+ "We describe the Univerity of Florida Sparse Matrix Collection, a large
+ and actively growing set of sparse matrices that arise in real
+ applications. The Collection is widely used by the numerical linear
+ algebra community for the development and performance evaluation of
+ sparse matrix algorithms. It allows for robust and repeatable
+ experiments: robust because performance results with artificially
+ generated matrices can be misleading, and repeatable because matrices
+ are curated and made publicly available in many formats. Its matrices
+ cover a wide spectrum of domains, including those arising from
+ problems with underlying 2D or 3D geometry (as structural engineering,
+ computational fluid dynamics, model reduction, electromagnetics,
+ semiconductor devices, thermodynamics, materials, acoustics, computer
+ graphics/vision, robitics/kinematics, and other discretizations) and
+ those that typically do not have such geometry (optimization, circuit
+ simulation, economic and financial modeling, theoretical and quantum
+ chemistry, chemical process simulation, mathematics and statistics,
+ power networks, and other networks and graphs). We provide software
+ for accessing and managing the Collection, from MATLAB, Mathematica,
+ Fortran, and C, as well as an online search capability. Graph
+ visualization of the matrices is provided, and a new multilevel
+ coarsening scheme is proposed to facilitate this task.",
+ paper = "Davi11.pdf"
+}
+
+@techreport{Davi16,
+ author = "Davis, Timothy and Rajamanickam, Sivasankaran and
+ Sid-Lakhdar, Wissam M.",
+ title = "A survey of direct methods for sparse linear systems",
+ year = "2016",
+ month = "April",
+ institution = "Texas A and M",
+ type = "Technical Report",
+ link = "\url{http://faculty.cse.tamu.edu/davis/publications_files/survey_tech_report.pdf}",
+ abstract =
+ "Wilkinson defined a sparse matrix as one with enough zeros that it
+ pays to take advantage of them. This informal yet practical definition
+ captures the essence of the goal of direct methods for solving sparse
+ matrix problems. They exploit the sparsity of a matrix to solve
+ problems economically: much faster and using far less memory than if
+ all the entries of a matrix were stored and took part in explicit
+ computations. These methods form the backbone of a wide range of
+ problems in computational science. A glimpse of the breadth of
+ applications relying on sparse solvers can be seen in the origins of
+ matrices in published matrix benchmark collections. The goal of this
+ survey article is to impart a working knowledge of the underlying
+ theory and practice of sparse direct methods for solving linear
+ systems and least-squares problems, and to provide an overview of the
+ algorithms, data structures, and software available to solve these
+ problems, so that the reader can both understand the methods and know
+ how best to use them.",
+ paper = "Davi16.pdf"
+}
+
+@techreport{Demm88,
+ author = "Demmel, James and Kahan, W.",
+ title = "Computing Small Singular Values of Bidiagonal Matrices with
+ Guaranteed High Relative Accuracy",
+ year = "1988",
+ institution = "New York University",
+ type = "Technical Report",
+ number = "326",
+ abstract =
+ "Computing the singular values of a bidiagonal matrix is the final
+ phase of the standard algorithm for the singular value decomposition
+ of a general matrix. We present a new algorithm which computes all the
+ singular values of a bidiagonal matrix to high relative accuracy
+ independent of their magnitudes. In contrast, the standard algorithm
+ for bidiagonal matrices may compute small singular values with no
+ relative accuracy at all. Numerical experiments show that the new
+ algorithm is comparable in speed to the standard algorithm, and
+ frequently faster. We also show how to accurately compute tiny
+ eigenvalues of some classes of symmetric trigiagonal matrices using
+ the same technique.",
+ paper = "Demm88.pdf"
+}
+
+@techreport{Demm05,
+ author = "Demmel, James and Hida, Yozo and Kahan, W. and Li, Xiaoye S.
+ and Mukherjee, Soni and Riedy, E. Jason",
+ title = "Error Bounds from Extra Precise Iterative Refinement",
+ year = "2005",
+ institution = "Univerity of California, Berkeley",
+ type = "Technical Report",
+ number = "165",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn165.pdf}",
+ abstract =
+ "We present the design and testing of an algorithm for iterative
+ refinement of the solution of linear equations, were the residual is
+ computed with extra precision. This algorithm was originally proposed
+ in the 1960s as a means to compute very accurate solutions to all but
+ the most ill-conditioned linear systems of equations. However two
+ obstacles hae until now prevented its adoption in standard subroutine
+ libraries like LAPACK: (1) There was no standard way to access the
+ higher precision arithmetic needed to compute residuals, and (2) it
+ was unclear how to compute a reliable error bound for the computed
+ solution. The completion of the new BLAS Technical Forum Standard has
+ recently removed the first obstacle. To overcome the second obstacle,
+ we show how a single application of iterative refindment can be used
+ to copute an error bound in any norm at small cost, and use this to
+ compute both an error bound in the usual infinity norm, and a
+ componentwise relative error bound.
+
+ We report extensive test results on over 6.2 million matrices of
+ dimension 5, 10, 100, and 1000. As long as a normwise
+ (resp. componentwise) condition number computed by the algorithm is
+ less than $1/max(10,\sqrt{n})\epsilon_w$, the computed normwise
+ (resp. componentwise) error bound is at most
+ $2max(10,\sqrt{n})\epsilon_w$, and indeed bounds the true error. Here,
+ $n$ is the matrix dimension and $\epsilon_w$ is a single precision
+ roundoff error. For worse conditioned problems, we get similarly small
+ correct error bounds in over 89.4\% of cases.",
+ paper = "Demm05.pdf"
+}
+
+@techreport{Demm08,
+ author = "Demmel, James and Hoemmen, Mark and Hida, Yozo
+ and Riedy, E. Jason",
+ title = "Non-Negative Diagonals and High Performance on Low-Profile
+ Matrices from Householder QR",
+ year = "2008",
+ institution = "Univerity of California, Berkeley",
+ type = "Technical Report",
+ number = "203",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn203.pdf}",
+ abstract =
+ "The Householder reflections used in LAPACK's QR factorization leave
+ positive and negative real entries along R's diagonal. This is
+ sufficient for most applications of QR factorizations, but a few
+ require that R have a non-negative diagonal. This note provides a new
+ Householder generation routine to produce a non-negative
+ diagonal. Additionally, we find that scanning for trailing zeros in
+ the generated reflections leads to large performance improvements when
+ applying reflections with many trailing zeros. Factoring low-profile
+ matrices, those with non-zero entries mostly near the diagonal (e.g
+ band matrices), now requires far fewer operations. For example, QR
+ factorization of matrices with profile width $b$ that are stored
+ densely in an $n \cross n$ matrix improves form $O(n^3)$
+ to $O(n^2+nb^2)$.",
+ paper = "Demm08.pdf"
+}
+
+@article{Demm90,
+ author = "Demmel, James and Kahan, W.",
+ title = "Accurate Singular Values of Bidiagonal Matrices",
+ journal = "SIAM J. Sci. Stat. Comput.",
+ volume = "11",
+ number = "5",
+ pages = "873-912",
+ year = "1990",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn03.pdf}",
+ abstract =
+ "Computing the singular values of a bidiagonal matrix is the final
+ phase of the standard algorithm for the singular value decomposition
+ of a general matrix. We present a new algorithm which computes all the
+ singular values of a bidiagonal matrix to high relative accuracy
+ independent of their magnitudes. In contrast, the standard algorithm
+ for bidiagonal matrices may compute small singular values with no
+ relative accuracty at all. Numerical experiments show that the new
+ algorithm is comparable in speed to the standard algorithm, and
+ frequently faster.",
+ paper = "Demm90.pdf"
+}
+
+@article{Demm92,
+ author = "Demmel, James and Veselic, Kresimir",
+ title = "Jacobi's Method is More Accurate than QR",
+ journal = "SIAM J. Matrix Anal. and Appl",
+ volume = "13",
+ number = "4",
+ year = "1992",
+ pages = "1204-1245",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn15.pdf}",
+ abstract =
+ "We show that Jacobi's method (with a proper stopping criterion)
+ computes small eigenvalues of symmetric positive definite matrices
+ with a uniformly better relative accuracy bound than QR, divide and
+ conquer, traditional bisection, or any algorithm which first involves
+ tridiagonalizing the matrix. In fact, modulo an assumption based on
+ extensive numerical tests, we show that Jacobi's method is optimally
+ accurate in the following sense: if the matrix is such that small
+ relative errors in its entries cause small relative errors in its
+ eigenvalues, Jacobi will compute them with nearly this accuracy. In
+ other words, as long as the initial matrix has small relative errors
+ in each component, even using infinite precision will not improve on
+ Jacobi (modulo factors of dimensionality). We also show the
+ eigenvectors are computed more accurately by Jacobi than previously
+ thought possible. We prove similar results for using one-sided Jacobi
+ for the singular value decomposition of a general matrix.",
+ paper = "Demm92.pdf"
+}
+
+@article{Rijk98,
+ author = "de Rijk, P.P.",
+ title = "A One-sided Jacobi Algorithm for Computing the Singular Value
+ Decomposition on a vector computer",
+ journal = "SIAM J. Sci. Stat. Comput.",
+ volume = "10",
+ number = "2",
+ month = "March",
+ year = "1989",
+ pages = "359-371",
+ abstract =
+ "An old algorithm for computing the singular value decomposition,
+ which was first mentioned by Hestenes, has gained renewed interest
+ because of its properties of parallelism and vectorizability. Some
+ computational modifications are given and a comparison with the
+ well-known Golub-Reinsch algorithm is made. Comparative experiments on
+ a CYBER 205 are reported."
+}
+
+@phdthesis{Dhil97,
+ author = "Dhillon, Inderjit Singh",
+ title = "A New $O(n^2)$ Algorithm for the Symmetric Tridiagonal
+ Eigenvalue/Eigenvector Problem",
+ school = "University of California, Berkeley",
+ year = "1997",
+ link = "\url{http://www.eecs.berkeley.edu/Pubs/TechRpts/1997/CSD-97-971.pdf}",
+ abstract =
+ "Computing the eigenvalues and orthogonal eigenvectors of an $n\times n$
+ symmetric tridiagonal matrix is an important task that arises while
+ solving any symmetric eigenproblem. All practical software requires
+ $O(n^3)$ time to compute all the eigenvectors and ensure their
+ orthogonality when eigenvalues are close. In the first part of this
+ thesis we review earlier work and show how some existing
+ implementations of inverse iteration can fail in surprising ways.
+
+ The main contribution of this thesis is a new $O(n^2)$, easily
+ parallelizable algorithm for solving the tridiagonal
+ eigenproblem. Three main advances lead to our new algorithm. A
+ tridiagonal matrix is traditionally represented by its diagonal and
+ off-diagonal elements. Our most important advance is in recognizing
+ that its bidiagonal factors are ``better'' for computational
+ purposes. The use of bidiagonals enables us to invoke a relative
+ criterion to judge when eigenvalues are ``close''. The second advance
+ comes with using multiple bidiagonal factorizations in order to
+ compute different eigenvectors independently of each other. Thirdly,
+ we use carefully chosen dqds-like transformations as inner loops to
+ compute eigenpairs that are highly accurate and ``faithful'' to the
+ various bidiagonal representations. Orthogonality of the eigenvectors
+ is a consequence of this accuracy. Only $O(n)$ work per eigenpair is
+ neede by our new algorithm.
+
+ Conventional wisdom is that there is usually a trade-off between speed
+ and accuracy in numerical procedures, i.e., higher accuracy can be
+ achieved only at the expense of greater computing time. An interesting
+ aspect of our work is that increased accuracy in the eigenvalues and
+ eigenvectors obviates the need for explicit orthogonalization and
+ leads to greater speed.
+
+ We present timing and accuracy results comparing a computer
+ implementation of our new algorithm with four existing EISPACK and
+ LAPACK software routines. Our test-bed contains a variety of
+ tridiagonal matrices, some coming from quantum chemistry
+ applications. The numerical results demonstrate the superiority of
+ our new algorithm. For example, on a matrix of order 966 that occurs in
+ the modeling of a biphenyl molecule our method is about 10 times
+ faster than LAPACK's inverse iteration on a serial IBM RS/6000
+ processor and nearly 100 times faster on a 128 processor IBM SP2
+ parallel machine.",
+ paper = "Dhil97.pdf"
+}
+
+@article{Dhil04a,
+ author = "Dhillon, Inderjit S. and Parlett, Beresford N.",
+ title = "Multiple representations to compute orthogonal eigenvectors
+ of symmetric tridiagonal matrices",
+ journal = "Linear Algebra and its Applications",
+ volume = "387",
+ number ="1",
+ pages = "1-28",
+ year = "2004",
+ month = "August",
+ abstract =
+ "In this paper we present an $O(nk)$ procedure, Algorithm $MR^3$, for
+ computing $k$ eigenvectors of an $n\times n$ symmetric tridiagonal
+ matrix $T$. A salient feature of the algorithm is that a number of
+ different $LDL^t$ products ($L$ unit lower triangular, $D$ diagonal)
+ are computed. In exact arithmetic each $LDL^t$ is a factorization of a
+ translate of $T$. We call the various $LDL^t$ productions
+ {\sl representations} (of $T$) and, roughly speaking, there is a
+ representation for each cluster of close eigenvalues. The unfolding of
+ the algorithm, for each matrix, is well described by a
+ {\sl representation tree}. We present the tree and use it to show that if
+ each representation satisfies three prescribed conditions then the
+ computed eigenvectors are orthogonal to working accuracy and have
+ small residual norms with respect to the original matrix $T$.",
+ paper = "Dhil04a.pdf"
+}
+
+@article{Dhil04,
+ author = "Dhillon, Inderjit S. and Parlett, Beresford N.",
+ title = "Orthogonal Eigenvectors and Relative Gaps",
+ journal = "SIAM Journal on Matrix Analysis and Applications",
+ volume = "25",
+ year = "2004",
+ abstract =
+ "Let $LDL^t$ be the triangular factorization of a real symmetric
+ $n\times n$ tridiagonal matrix so that $L$ is a unit lower bidiagonal
+ matrix, $D$ is diagonal. Let $(\lambda,\nu)$ be an eigenpair,
+ $\lambda \ne 0$, with the property that both $\lambda$ and $\nu$ are
+ determined to high relative accuracy by the parameters in $L$ and $D$.
+ Suppose also that the relative gap between $\lambda$ and its nearest
+ neighbor $\mu$ in the spectrum exceeds $1/n; n|\lambda-\mu| > |\lambda|$.
+
+ This paper presents a new $O(n)$ algorithm and a proof that, in the
+ presence of round-off errors, the algorithm computes an approximate
+ eigenvector $\hat{\nu}$ that is accurate to working precision
+ $|sin \angle(\nu,\hat{\nu})| = O(n\epsilon)$, where $\epsilon$ is the
+ round-off unit. It follows that $\hat{\nu}$ is numerically orthogonal to
+ all the other eigenvectors. This result forms part of a program to
+ compute numerically orthogonal eigenvectors without resorting to the
+ Gram-Schmidt process.
+
+ The contents of this paper provide a high-level description and
+ theoretical justification for LAPACK (version 3.0) subroutine DLAR1V.",
+ paper = "Dhil04.pdf"
+}
+
+@article{Dods83,
+ author = "Dodson, D. S.",
+ title = "Corrigendum: Remark on 'Algorithm 539: Basic Linear Algebra
+ Subroutines for FORTRAN usage",
+ journal = "ACM Trans. Math. Software",
+ volume = "9",
+ pages = "140",
+ year = "1983"
+}
+
+@article{Dods82,
+ author = "Dodson, D. S. and Grimes, R. G.",
+ title = "Remark on algorithm 539: Basic Linear Algebra Subprograms for
+ Fortran usage",
+ journal = "ACM Trans. Math. Software",
+ volume = "8",
+ pages = "403-404",
+ year = "1982"
+}
+
+@article{Dong88,
+ author = "Dongarra, J. J. and DuCroz, J. and Hammarling, S. and
+ Hanson, R. J.",
+ title = "An extended set of FORTRAN Basic Linear Algebra Subprograms",
+ journal = "ACM Trans. Math. Software",
+ volume = "14",
+ pages = "1-32",
+ year = "1988"
+}
+
+@article{Dong88a,
+ author = "Dongarra, J. J. and DuCroz, J. and Hammarling, S. and
+ Hanson, R. J.",
+ title = "Corrigenda: 'An extended set of FORTRAN Basic Linear Algebra
+ Subprograms",
+ journal = "ACM Trans. Math. Software",
+ volume = "14",
+ pages = "399",
+ year = "1988"
+}
+
+@article{Dong90,
+ author = "Dongarra, J. and DuCroz, J. and Duff, I. S. and Hammarling, S.",
+ title = "A set of Level 3 Basic Linear Algebra Subprograms",
+ journal = "ACM Trans. Math. Software",
+ volume = "16",
+ pages = "1-28",
+ year = "1990"
+}
+
+@article{Drma97,
+ author = "Drmac, Zlatko",
+ title = "Implementation of Jacobi Rotations for Accurate Singular Value
+ Computation in Floating Point Arithmetic",
+ journal = "SIAM Journal on Scientific Computing",
+ volume = "18",
+ number = "4",
+ year = "1997",
+ month = "July",
+ pages = "1200-1222",
+ abstract =
+ "In this paper we consider how to compute the singular value
+ decomposition (SVD) $A = U\Sigma{}T^{\tau}$ of
+ $A=[a_1,a_2] \in R^{m\times 2}$
+ accurately in floating point arithmetic. It is shown
+ how to compute the Jacobi rotation $V$ (the right singular vector
+ matrix) and how to compute $AV=U\Sigma$ even if the floating point
+ representation of $V$ is the identity matrix. In the case
+ underflow can produce the identity matrix as
+ the floating point value of $V$ even for $a1$, $a2$ that are far from
+ being mutually orthogonal. This can cause loss of accuracy and failure
+ of convergence of the floating point implementation of the Jacobi
+ method for computing the SVD. The modified Jacobi method recommended
+ in this paper can be implemented as a reliable and highly accurate
+ procedure for computing the SVD of general real matrices whenever the
+ exact singular values do not exceed the underflow or overflow limits."
+}
+
+@article{Drma08a,
+ author = "Drmac, Zlatko and Veselic, Kresimir",
+ title = "New fast and accurate Jacobi SVD algorithm I",
+ journal = "SIAM J. Matrix Anal. Appl.",
+ volume = "35",
+ number = "2",
+ year = "2008",
+ pages = "1322-1342",
+ comment = "LAPACK Working note 169",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn169.pdf}",
+ abstract =
+ "This paper is the result of contrived efforts to break the barrier
+ between numerical accuracy and run time efficiency in computing the
+ fundamental decomposition of numerical linear algebra - the singular
+ value decomposition (SVD) of a general dense matrix. It is an
+ unfortunate fact that the numerically most accurate one-sided Jacobi
+ SVD algorithm is several times slower than generally less accurate
+ bidiagonalization based methods such as the QR or the divide and
+ conquer algorithm. Despite its sound numerical qualities, the Jacobi
+ SVD is not included in the state of the art matrix computation
+ libraries and it is even considered obsolete by some leading
+ researchers. Our quest for a highly accurate and efficient SVD
+ algorithm has led us to a new, superior variant of the Jacobi
+ algorithm. The new algorithm has inherited all good high accuracy
+ properties, and it outperforms not only the best implementations of
+ the one-sided Jacobi algorithm but also the QR algorithm. Moreoever,
+ it seems that the potential of the new approach is yet to be fully
+ exploited.",
+ paper = "Drma08a.pdf"
+}
+
+@article{Drma08b,
+ author = "Drmac, Zlatko and Veselic, Kresimir",
+ title = "New fast and accurate Jacobi SVD algorithm II",
+ journal = "SIAM J. Matrix Anal. Appl.",
+ volume = "35",
+ number = "2",
+ year = "2008",
+ pages = "1343-1362",
+ comment = "LAPACK Working note 170",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn170.pdf}",
+ abstract =
+ "This paper presents new implementation of one-sided Jacobi SVD for
+ triangular matrices and its use as the core routine in a new
+ preconditioned Jacobi SVD algorithm, recently proposed by the
+ authors. New pivot strategy exploits the triangular form and uses the
+ fact that the input triangular matrix is the result of rank revealing
+ QR factorization. If used in the preconditioned Jacobi SVD algorithm,
+ described in the first part of this report, it delivers superior
+ performance leading to the currently fastest method for computing SVD
+ decomposition with high relative accuracy. Furthermore, the efficiency
+ of the new algorithm is comparable to the less accurate
+ bidiagonalization based methods. The paper also discusses underflow
+ issues in floating point implementation, and shows how to use
+ perturbation theory to fix the imperfectness of machine arithmetic on
+ some systems.",
+ paper = "Drma08b.pdf"
+}
+
+@article{Drma08c,
+ author = "Drmac, Zlatko and Bujanovic, Zvonimir",
+ title = "On the failure of rank-revealing QR factorization software -
+ a case study.",
+ journal = "ACM Trans. math. Softw.",
+ volume = "35",
+ number = "2",
+ year = "2008",
+ pages = "1-28",
+ comment = "LAPACK Working note 176",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn176.pdf}",
+ abstract =
+ "This note reports an unexpected and rather erratic behavior of the
+ LAPACK software implementation of the QR factorization with
+ Businger-Golub column pivoting. It is shown that, due to finite
+ precision arithmetic, software implementation of the factorization can
+ catastrophically fail to produce triangular factor with the structure
+ characteristic to the Businger-Golub pivot strategy. The failure of
+ current {\sl state of the art} software, and a proposed alternative
+ implementations are analyzed in detail.",
+ paper = "Drma08c.pdf"
+}
+
+@article{Dubr83,
+ author = "Dubrulle, A. A.",
+ title = "A class of numerical methods for the computation of Pythagorean
+ sums",
+ journal = "IBM J. Res. Develop.",
+ volume = "27",
+ number = "6",
+ pages = "582-589",
+ year = "1983"
+}
+
+@book{Eina05,
+ author = "Einarsson, B.",
+ title = "Accuracy and Reliability in Scientific Computing",
+ publisher = "SIAM",
+ year = "2005",
+ isbn = "0-89871-584-9",
+ link = "\url{http://www.nsc.liu.se/wg25/book/}"
+}
+
+@article{Elmr00,
+ author = "Elmroth, E. and Gustavson, F. G.",
+ title = "Applying recursion to serial and parallel QR factorization leads
+ to better performance",
+ journal = "IBM Journal of Research and Development",
+ volume = "44",
+ number = "4",
+ month = "July",
+ year = "2000",
+ pages = "605--624",
+ doi = "10.1.1.33.1820",
+ link = "\url{http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.33.1820}",
+ abstract =
+ "We present new recursive serial and parallel algorithms for QR
+ factorization of an $m$ by $n$ matrix. They improve performance. The
+ recursion leads to an automatic variable blocking, and it also
+ replaces a Level 2 part in a standard aglorithm with Level 3
+ operations. However, there are significant additional costs for
+ creating and performing the updates, which prohibit the efficient use
+ of the recursion for large $n$. We present a quantitative analysis of
+ these extra costs. This analysis leads us to introduce a hybrid
+ recursive algorithm that outperforms the LAPACK algorithm DGEQRF by
+ about 20\% for large square matrices up to almost a factor of 3 for
+ tall thin matrices. Uniprocessor performance results are presented for
+ two IBM RS/6000 SP nodes -- a 120-Mhz IBM POWER2 node and one
+ processor of a four-way 332-Mhz IBM PowerPC 604e SMP node. The hybrid
+ recursive algorithm reaches more than 90\% of the theoretical peak
+ performance of the POWER2 node. Compared to standard block algorithms,
+ the recursive approach also shows a significant advantage in the
+ automatic tuning obtained from its automatic variable blocking. A
+ successful parallel implementation on a four-way 332-MHz IBM PPC604e
+ SMP node based on dynamic load balancing is presented. For two, three,
+ and four processors it shows speedups of up to 1.97, 299, and 3.97."
+}
+
+@misc{Fate13,
+ author = "Fateman, Richard J.",
+ title = "Interval Arithmetic, Extended Numbers and Computer Algebra Systems",
+ year = "2013",
+ link = "\url{http://www.cs.berkeley.edu/~fateman/papers/interval.pdf}",
+ abstract =
+ "Many ambitious computer algebra systems were initially designed in a
+ flush of enthusiasm, with the goal of automating any symbolic
+ mathematical manipulation ``correctly''. Historically, this approach
+ results in programs that implicitly used certain identities to
+ simplify expressions. These identities, which very likely seemed
+ universally true to the programmers in the heat of writing the CAS
+ (and often were true in well-known abstract algebraic domains) later
+ neede re-examination when such systems were extended for dealing with
+ kinds of objects unanticipated in the original design. These new
+ objects are generally introduced to the CAS by extending
+ ``generically'' the arithmetic of other operations. For example,
+ approximate floats do not have the mathematical properties of exact
+ integers or rationals. Complex numbers may strain a system designed
+ for real-valued variables. In the situation examined here, we consider
+ two categories of ``extended'' numbers: $\infty$ and {\sl undefined},
+ and real intervals. We comment on issues raised by these two
+ troublesome notions, how their introduction into a computer algebra
+ system may require a (sometimes painful) reconsideration and redesign
+ of parts of the program, and how they are related. An alternative
+ (followed most notably by the Axiom system is to essentially envision
+ a ``meta'' CAS defined in terms of categories and inheritance with
+ only the most fundamental built-in concepts; from these one can build
+ many variants of specific CAS features. This approach is appealing but
+ can fail to accommodate extensions that violate some mathematical
+ tenets in the cause of practicality.",
+ paper = "Fate13.pdf",
+ keywords = "axiomref"
+}
+
+@techreport{Fern92,
+ author = "Fernando, K. Vince and Parlett, Beresford N.",
+ title = "Accurate Singular Values and Differential qd Algorithms",
+ year = "1992",
+ institution = "University of California, Berkeley",
+ type = "Technical Report",
+ number = "PAM-554",
+ link = "\url{http://www.dtic.mil/dtic/tr/fulltext/u2/a256582.pdf}",
+ abstract =
+ "We have discovered a new implementaiton of the qd algorithm that has
+ a far wider domain of stability than Rutishauser's version. Our
+ algorithm was developed from an examination of the LR-Cholesky
+ transformation and can be adapted to parallel computation in stark
+ contrast to traditional qd. Our algorithm also yields useful a
+ posteriori upper and lower bounds on the smallest singular value of a
+ bidiagonal matrix.
+
+ The zero-shift bidiagonal QR of Demmel and Kahan computes the smallest
+ singlar values to maximal relative accuracy and the others to maximal
+ absolute accuracy with little or no degradation in efficiency when
+ compared with the LINPACK code. Our algorithm obtains maximal relative
+ accuracy for all the singluar values and runs at least four times
+ faster than the LINPACK code.",
+ paper = "Fern92.pdf"
+}
+
+@article{Fors70,
+ author = "Forsythe, G. E.",
+ title = "Pitfalls in computations, or why a math book isn't enough",
+ journal = "Amer. Math. Monthly",
+ volume = "9",
+ pages = "931-995",
+ year = "1970"
+}
+
+@incollection{Fors69,
+ author = "Forsythe, G. E.",
+ title = "What is a satisfactory quadratic equation solver",
+ booktitle = "Constructive Aspects of the Fundamental Theorem of Algebra",
+ pages = "53-61",
+ publisher = "Wiley",
+ year = "1969"
+}
+
+@article{Foxx71,
+ author = "Fox, L.",
+ title = "How to get meaningless answers in scientific computations (and
+ what to do about it)",
+ journal = "IMA Bulletin",
+ volume = "7",
+ pages = "296-302",
+ year = "1971"
+}
+
+@article{Gent74,
+ author = "Gentlman, W. Morven and Marovich, Scott B.",
+ title = "More on algorithms that reveal properties of floating point
+ arithmetic units.",
+ journal = "Comm. of the ACM",
+ year = "1974",
+ month = "April",
+ volume = "17",
+ number = "5",
+ pages = "276-277",
+ abstract =
+ "In the interests of producing portable mathematical software, it is
+ highly desirable for a program to be able directly to obtain
+ fundamental properties of the environment in which it is to run. The
+ installer would then not be obliged to change appropriate magic
+ constants in the source code, and the user would not have to provide
+ information he may very well not understand. Until the standard
+ definitions of programming languages are changed to require builtin
+ functions that provide this information, we will have to resort to
+ writing routines that discover it."
+}
+
+@techreport{Give54,
+ author = "Givens, W.",
+ title = "Numerical computation of the characteristic values of a real
+ symmetric matrix",
+ year = "1954",
+ institution = "Oak Ridge National Laboratory",
+ type = "Technical Report",
+ number = "ORNL-1574"
+}
+
+@article{Golu65,
+ author = "Golub, G.H.",
+ title = "Numerical methods for solving linear least squares problems",
+ journal = "Numer. Math.",
+ volume = "7",
+ pages = "206-216",
+ year = "1965"
+}
+
+@book{Golu89,
+ author = "Golub, Gene H. and Van Loan, Charles F.",
+ title = "Matrix Computations",
+ publisher = "Johns Hopkins University Press",
+ year = "1989",
+ isbn = "0-8018-3772-3"
+}
+
+@book{Golu96,
+ author = "Golub, Gene H. and Van Loan, Charles F.",
+ title = "Matrix Computations",
+ publisher = "Johns Hopkins University Press",
+ isbn = "978-0-8018-5414-9",
+ year = "1996"
+}
+
+@article{Hamm85,
+ author = "Hammarling S.",
+ title = " The Singular Value Decomposition in Multivariate Statistics",
+ journal = "ACM Signum Newsletter",
+ volume = "20",
+ number = "3",
+ pages = "2--25",
+ year = "1985"
+}
+
+@book{Hamm05,
+ author = "Hammarling, Sven",
+ title = "An Introduction to the Quality of Computed Solutions",
+ booktitle = "Accuracy and Reliability in Scientific Computing",
+ year = "2005",
+ publisher = "SIAM",
+ pages = "43-76",
+ link = "\url{http://eprints.ma.man.ac.uk/101/}",
+ paper = "Hamm05.pdf"
+}
+
+@mastersthesis{Harg02,
+ author = "Hargreaves, G.",
+ title = "Interval analysis in MATLAB",
+ school = "University of Manchester, Dept. of Mathematics",
+ year = "2002"
+}
+
+@book{High05,
+ author = "Higham, D. J. and Higham, N. J.",
+ title = "MATLAB Guide",
+ publisher = "SIAM",
+ year = "2002",
+ isbn = "0-89871-521-0"
+}
+
+@article{High88,
+ author = "Higham, Nicholas J.",
+ title = "FORTRAN codes for estimating the one-norm of a real or complex
+ matrix, with applications to condition estimation",
+ journal = "ACM Trans. Math. Soft",
+ volume = "14",
+ number = "4",
+ pages = "381-396",
+ year = "1988"
+}
+
+@misc{High98,
+ author = "Higham, Nicholas J.",
+ title = "Can you 'count' on your computer?",
+ link = "\url{http://www.maths.man.ac.uk/~higham/talks/}",
+ year = "1998"
+}
+
+@book{High02,
+ author = "Higham, Nicholas J.",
+ title = "Accuracy and stability of numerical algorithms",
+ publisher = "SIAM",
+ isbn = "0-89871-521-0",
+ year = "2002"
+}
+
+@article{High86,
+ author = "Higham, Nicholas J.",
+ title = "Efficient Algorithms for Computing the Condition Number of a
+ Tridiagonal Matrix",
+ journal = "SIAM J. Sci. Stat. Comput.",
+ volume = "7",
+ number = "1",
+ year = "1986",
+ month = "January",
+ abstract =
+ "Let $A$ be a tridiagonal matrix of order $n$. We show that it is
+ possible to compute $||A^{-1}||_{\infty}$ and hence
+ ${\rm cond}_{\infty}(A)$ in $O(n)$ operations. Several algorithms
+ which perform this task are given and their numerical properties are
+ investigated.
+
+ If $A$ is also positive definite then $||A^{-1}||_{\infty}$ can be
+ computed as the norm of the solution to a positive definite
+ tridiagonal linear system whose coefficient matrix is closely related
+ to $A$. We show how this computation can be carried out in parallel
+ with the solution of a linear system $Ax=b$. In particular we describe
+ some simple modifications to the LINPACK routine SPTSL which enable
+ this routine to compute ${\rm cond}_1(A)$, efficiently, in addtion to
+ solving $Ax=b$.",
+ paper = "High86.pdf"
+}
+
+@misc{IEEE85,
+ author = "IEEE",
+ title = "ANSI/IEEE Standard for Binary Floating Point Arithmetic:
+ Std 754-1985",
+ publisher = "IEEE Press",
+ year = "1985"
+}
+
+@misc{IEEE87,
+ author = "IEEE",
+ title = "ANSI/IEEE Standard for Radix Independent Floating Point Arithmetic:
+ Std 854-1987",
+ publisher = "IEEE Press",
+ year = "1987"
+}
+
+@book{Isaa94,
+ author = "Isaacson, E. and Keller, H. B.",
+ title = "Analysis of Numerical Methods",
+ publisher = "Dover",
+ year = "1994",
+ isbn = "0-486-68029-0"
+}
+
+@article{Kags89,
+ author = "Kagstrom, Bo and Westin, L.",
+ title = "Generalized Schur Methods with Condition Estimators for Solving
+ the Generalized Sylvester Equation",
+ journal = "IEEE Transactions on Automatic Control",
+ volume = "34",
+ number = "7",
+ year = "1989",
+ month = "July",
+ pages = "745-751",
+ abstract =
+ "Stable algorithms are presented for solving the generalized Sylvester
+ equation. They are based on orthogonal equivalence transformations of
+ the original problem. Perturbation theory and rounding error analysis
+ are included. Condition estimators (${\rm Dif}^{-1}$-estimators) are
+ developed which when substituted into derived error bounds give
+ accuracy estimates of a computed solution. Results from numerical
+ experiments on well-conditioned and ill-conditioned problems are
+ reported."
+}
+
+@book{Kags93a,
+ author = "Kagstrom, B.",
+ title = "A Direct Method for Reordering Eigenvalues in the
+ Generalized Real Schur Form of a Regular Matrix Pair (A, B)",
+ year = "1993",
+ pages = "195-218",
+ booktitle = "Linear Algebra for Large Scale and Real-Time Applications",
+ volume = "232",
+ journal = "NATO ASI Series",
+ institution = "NATO ASI Series",
+ isbn = "978-90-481-4246-0",
+ abstract =
+ "A direct orthogonal equivalence transformation method for reordering
+ the eigenvalues along the diagonal in the generalized real Schur form
+ of a regular matrix pair $(A,B)$ is presented. Each swap of two
+ adjacent eigenvalues (real, or complex conjugate pairs) involves
+ solving a generalized Sylvester equation and the construction of two
+ orthogonal transformation matrices from certain eigenspaces associated
+ with the corresponding diagonal blocks. An error analysis of the
+ direct reordering method is presented. Results from numerical
+ experiments on well-conditionsed as well as ill-conditioned problems
+ illustrate the stability and the accuracy of the method. Finally, a
+ direct reordering algorithm with controlled backward error is described."
+}
+
+@techreport{Kags93,
+ author = "Kagstrom, Bo and Poromaa, Peter",
+ title = "LAPACK-Style Algorithms and Software for Solving the Generalized
+ Sylvester Equation and Estimating the Separation between
+ Regular Matrix Pairs",
+ year = "1993",
+ month = "December",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn75.pdf}",
+ comment = "LAPACK Working Note 75",
+ institution = "NETLIB",
+ abstract =
+ "Level 3 algorithms for solving the generalized Sylvester equation
+ $(AR-LB,DR-LE)=(C,F)$ and the transposed analogue
+ $(A^TU+D^TV,-UB^T-VE^T)=(C,F)$ are presented. These blocked algorithms
+ permit reuse of data in complex memory hierarchies of current advanced
+ computer architectures. The separation of two regular matrix pairs
+ $(A,D)$ and $(B,E)$, Dif[$(A,D)$,$(B,E)$], is defined in terms of the
+ generalized Sylvester operator $(AR-LB,DR-LE)$. Robust, efficient and
+ reliable Dif-estimators are presented. The basic problem is to find a
+ lower bound on Dif$^{-1}$, which can be done by solving generalized
+ Sylvester equations in triangular form. Frobenius norm-based and one
+ norm based Dif estimators are described and evaluated. These estimates
+ lead to computable error bounds for the generalized Sylvester
+ equation. The one-norm-based estimator makes the condition estimation
+ uniform with LAPACK. Fortran 77 software that implements our
+ algorithms for solving generalized Sylvester equations, and for
+ computing error bounds and Dif-estimators are presented. Computational
+ experiments that illustrate the accuracy, efficiency and reliability
+ of our software are also described.",
+ paper = "Kags93.pdf"
+}
+
+@misc{Kags94,
+ author = "Kagstrom, Bo and Poromaa, Peter",
+ title = "Computing Eigenspaces with Specified Eigenvalues of a Regular
+ Matrix Pair (A, B) and Condition Estimation: Theory,
+ Algorithms and Software",
+ year = "1994",
+ month = "April",
+ link = "\url{http://www.netlib.org/lapack/lawns/lawn87.ps}",
+ abstract =
+ "Theory, algorithms and LAPACK style software for computing a pair of
+ deflating subspaces with specified eigenvalues of a regular matrix
+ pair $(A,B)$ and error bounds for computed quantities (eigenvalues and
+ eigenspaces) are presented. The {\sl reordering} of specified
+ eigenvalues is performed with a direct orthogonal transformation
+ method with guaranteed numerical stability. Each swap of two adjacent
+ diagonal blocks in the real generalized Schur form, where at least one
+ of them corresponds to a complex conjugate pair of eigenvalues,
+ involves solving a generalized Sylvester equation and the construction
+ of two orthogonal transformation matrices from certain eigenspaces
+ associated with the diagonal blocks. The swapping of two $1\cross 1$
+ blocks is performed using orthogonal (unitary) Givens rotations. The
+ {\sl error bounds} are based on estimates of condition numbers for
+ eigenvalues and eigenspaces. The software computes reciprocal values
+ of a condition number for an individual eigenvalue (or a cluster of
+ eigenvalues), a condition number for an eigenvector (or eigenspace),
+ and spectral projectors onto a selected cluster. By computing
+ reciprocal values we avoid overflow. Changes in eigenvectors and
+ eigenspaces are measured by their change in angle. The condition
+ numbers yield both {\sl asymptotic} and {\sl global} error bounds. The
+ asymptotic bounds are only accurate for small perturbations $(E,F)$ of
+ $(A,B)$, while the global bounds work for all $||(E,F)||$ up to a
+ certain bound, whose size is determined by the conditioning of the
+ problem. It is also shown how these upper bounds can be
+ estimated. Fortran 77 {\sl software} that implements our algorithms
+ for reordering eigenvalues, computing (left and right) deflating
+ subspaces with specified eigenvalues and condition number estimation
+ are presented. Computational experiments that illustrate the accuracy,
+ efficiency and reliability of our software are also described.",
+ paper = "Kags94.pdf"
+}
+
+@article{Kags94b,
+ author = "Kagstrom, Bo",
+ title = "A Perturbation Analysis of the Generalized Sylvester Equation
+ $(AR-LB,DR-LE)=(C,F)$",
+ journal = "SIAM J. Matrix Anal. and Appl.",
+ volume = "15",
+ number = "4",
+ pages = "1045-1060",
+ year = "1994",
+ abstract =
+ "Perturbation and error bounds for the generalized Sylvester equation
+ $(AR-LB,DR-LE)=(C,F)$ are presented. An explicit expression for the
+ normwise relative backward error associated with an approximate
+ solution of the generalized Sylvester equation is derived and
+ conditions when it can be much greater than the relative residual are
+ given. This analysis is applicable to any method that solves the
+ generalized Sylvester equation. A condition number that reflects the
+ structure of the problem and a normwise forward error bound based on
+ ${\rm Dif}^{-1}[(A,D),B,E)]$ and the residual are derived. The
+ structure-preserving condition number can be arbitrarily smaller than
+ a ${\rm Dif}^{-1}$-based condition number. The normwise error bound
+ can be evaluated robustly and at moderate cost by using a reliable
+ ${\rm Dif}^{-1}$ estimator. A componentwise LAPACK-style forward error
+ bound that can be stronger than the normwise error bound is
+ presented. A componentwise approximate error bound that can be
+ evaluated to a much lower cost is also proposed. Finally, some
+ computational experiments that validate and evaluate the perturbation
+ and error bounds are presented."
+}
+
+@techreport{Kaha66,
+ author = "Kahan, W.",
+ title = "Accurate Eigenvalues of a Symmetric Tri-Diagonal Matrix",
+ year = "1966",
+ month = "July",
+ institution = "Stanford University",
+ type = "Technical Report",
+ number = "CS41",
+ abstract =
+ "Having established tight bounds for the quotient of two different
+ lub-norms of the same tri-diagonal matrix J, the author observes that
+ these bounds could be of use in an error-analysis provided a suitable
+ algorithm were found. Such an algorithm is exhibited, and its errors
+ are thoroughly accounted for, including the effects of scaling,
+ over/underflow and roundoff. A typical result is that, on a computer
+ using rounded floating point binary arithmetic, the biggest eigenvalue
+ of J can be computed easily to within 2.5 units in its last place, and
+ the smaller eigenvalues will suffer absolute errors which are no
+ larger. These results are somewhat stronger than had been known before.",
+ paper = "Kaha66.pdf"
+}
+
+@misc{Kels00,
+ author = "Kelsey, Tom",
+ title = "Exact Numerical Computation via Symbolic Computation",
+ link = "\url{http://tom.host.cs.st-andrews.ac.uk/pub/ccapaper.pdf}",
+ year = "2000",
+ abstract = "
+ We provide a method for converting any symbolic algebraic expression
+ that can be converted into a floating point number into an exact
+ numeric representation. We use this method to demonstrate a suite of
+ procedures for the representation of, and arithmetic over, exact real
+ numbers in the Maple computer algebra system. Exact reals are
+ represented by potentially infinite lists of binary digits, and
+ interpreted as sums of negative powers of the golden ratio.",
+ paper = "Kels00.pdf"
+}
+
+@article{Knus98,
+ author = {Kn\"usel, L.},
+ title = "On the accuracy of statistical distributions in Microsoft
+ Excel 97",
+ journal = "Comput. Statist. Data Anal.",
+ volume = "26",
+ pages = "375-377",
+ year = "1998"
+}
+
+@misc{Kohl14,
+ author = {K\"ohler, Martin and Saak, Jens},
+ title = "On BLAS Level-3 Implementations of Common Solvers for
+ (Quasi-)Triangular Generalized Lyapunov Equations",
+ year = "2014",
+ link = "\url{http://slicot.org/objects/software/reports/SLWN2014_1.pdf}",
+ note = "SLICOT Working Note 2014-1",
+ abstract =
+ "The solutions of Lyapunov and generalized Lyapunov equations are a
+ key player in many applications in systems and control theory. Their
+ stable numerical computation, when the full solution is sought, is
+ considered solved since the seminal work of Bartels and Stewart. A
+ number of variants of their algorithm have been proposed, but none of
+ them goes beyond BLAS level-2 style implementation. On modern
+ computers, however, the formulation of BLAS level-3 type
+ implementations is crucial to enable optimal usage of cache
+ hierarchies and modern block scheduling methods based on directed
+ acyclic graphs describing the interdependence of single block
+ computations. Our contribution closes this gap by a transformation of
+ the aforementioned level-2 variants to level-3 versions and a
+ comparison on a standard multicore machine.",
+ paper = "Kohl14.pdf"
+}
+
+@misc{Krei05,
+ author = "Kreinovich, V.",
+ title = "Interval cmoputations",
+ year = "2005",
+ link = "\url{http://www.cs.utep.edu/interval-comp/}"
+}
+
+@article{Kuki72a,
+ author = "Kuki, Hirondo",
+ title = "Complex Gamma Function with Error Control",
+ year = "1972",
+ publisher = "ACM",
+ journal = "Communications of the ACM",
+ volume = "15",
+ number = "4",
+ pages = "262-267",
+ abstract =
+ "An algorithm to compute the gamma function and the loggamma function
+ of a complex variable is presented. The standard algorithm is modified
+ in several respects to insure the continuity of the function value and
+ to reduce accumulation of round-off errors. In addition to computation
+ of function values, this algorithm includes an object-time estimation
+ of round-off errors. Experimental data with regard to the effectiveness
+ of this error control are presented. A Fortran program for the algorithm
+ appears in the algorithms section of this issue."
+}
+
+@article{Kuki72b,
+ author = "Kuki, Hirondo",
+ title = "Algorithm 421: Complex Gamma Function with Error Control",
+ year = "1972",
+ publisher = "ACM",
+ journal = "Communications of the ACM",
+ volume = "15",
+ number = "4",
+ pages = "271-272",
+ abstract =
+ "This Fortran program computes either the gamma function or the
+ loggamma function of a complex variable in double precision.
+ In addition, it provides an error estimate of the computed answer.
+ The calling sequences are:
+ \verb|CALL CDLGAM (X, W, E, 0)|
+ for the loggamma, and
+ \verb|CALL CDLGAM (X, W, E, 1)|
+ for the gamma, where Z is the double precision argument, W is the
+ answer of the same type, and E is a single precision real variable.
+ Before the call, the value of E is an estimate of the error in Z,
+ and after the call, it is an estimate of the error in W."
+}
+
+@book{Laws74,
+ author = "Lawson, C. L. and Hanson, R. J.",
+ title = "Solving Least Squares Problems",
+ publisher = "Prentice-Hall",
+ year = "1974"
+}
+
+@book{Laws95,
+ author = "Lawson, C. L. and Hanson, R. J.",
+ title = "Solving Least Squares Problems",
+ publisher = "SIAM",
+ isbn = "0-89871-356-0",
+ year = "1995"
+}
+
+@article{Livn04,
+ author = "Livne, Oren E. and Golub, Gene H.",
+ title = "Scaling by Binormalization",
+ journal = "Numerical Algorithms",
+ volume = "35",
+ number = "1",
+ pages = "97-120",
+ year = "2004",
+ month = "January",
+ abstract =
+ "We present an interative algorithm (BIN) for scaling all the rows and
+ columns of a real symmetric matrix to unit 2-norm. We study the
+ theoretical convergence properties and its relation to optimal
+ conditioning. Numerical experiments show that BIN requires 2-4
+ matrix-vector multiplications to obtain an adequate scaling, and in
+ many cases significantly reduces the condition number, more than other
+ scaling algorithms. We present generalizations to complex,
+ non-symmetric and rectangular matrices.",
+ paper = "Livn04.pdf"
+}
+
+@article{Malc72,
+ author = "Malcolm, Michael A.",
+ title = "Algorithms to reveal properties of floating-point arithmetic",
+ journal = "Comms of the ACM",
+ volume = "15",
+ year = "1972",
+ pages = "949-951",
+ link = "\url{http://www.dtic.mil/dtic/tr/fulltext/u2/727104.pdf}",
+ abstract =
+ "Two algorithms are presented in the form of Fortran subroutines. Each
+ subroutine computes the radix and number of digits of the floating
+ point numbers and whether rounding or chopping is done by the machine
+ on which it is run. The methods are shown to work on any ``reasonable''
+ floating-point computer.",
+ paper = "Malc72.pdf"
+}
+
+@article{Marq06,
+ author = "Marques, Osni A. and Reidy, Jason and Vomel, Christof",
+ title = "Benefits of IEEE-754 Features in Modern Symmetric Tridiagonal
+ Eigensolvers",
+ journal = "SIAM Journal on Scientific Computing",
+ volume = "28",
+ number = "5",
+ year = "2006",
+ link = "\url{http://www.netlib.org/lapack/lawnspdf/lawn172.pdf}",
+ abstract =
+ "Bisection is one of the most common methods used to compute the
+ eigenvalues of symmetric tridiagonal matrices. Bisection relies on the
+ {\sl Sturm count}: for a given shift $\sigma$, the number of negative
+ pivots in the factorization $T-\sigma{}I=LDL^T$ equals the number of
+ eigenvalues of $T$ that are smaller than $\sigma$. In IEEE-754
+ arithmetic, the value $\infty$ permits the computation to continue
+ past a zero pivot, producing a correct Sturm count when $T$ is
+ unreduced. Demmel and Li showed in the 90s that using $\infty$ rather
+ than testing for zero pivots within the loop could improve performance
+ significantly on certain architectures.
+
+ When eigenvalues are to be computed to high relative accuracy, it is
+ often preferable to work with $LDL^T$ factorizations instead of the
+ original tridiagonal $T$, see for example the MRRR algorithm. In these
+ cases, the Sturm count has to be computed from $LDL^T$. The
+ differential stationary and progressive qds algorithms are the method
+ of choice.
+
+ While it seems trivial to replace $T$ by $LDL^T$, in reality these
+ algorithms are more complicated: in IEEE-754 arithmetic, a zero pivot
+ produces an overflow, followed by an invalid exception (NaN), that
+ renders the Sturm count incorrect.
+
+ We present alternative, safe formulations that are guaranteed to
+ produce the correct result.
+
+ Benchmarking these algorithms on a variety of platforms shows that the
+ original formulation without tests is always faster provided no
+ exception occurs. The transforms see speed-ups of up to 2.6x over the
+ careful formulation.
+
+ Tests on industrial matrices show that encountering exceptions in
+ practice is rare. This leads to the following design: First, compute
+ the Sturm count by the fast but unsafe algorithm. Then, if an
+ exception occured, recompute the count by a safe, slower alternative.
+
+ The new Sturm count algorithms improve the speed of bisection by up to
+ 2x on our test matrices. Furthermore, unlike the traditional
+ tiny-pivot substitutions, proper use of IEEE-754 features provides a
+ careful formulation that imposed no input range restrictions.",
+ paper = "Marq06.pdf"
+}
+
+@article{Mart68,
+ author = "Martin, R. S. and Wilkinson, J. H.",
+ title = "Similarity reduction ofa general matrix to Hessenberg form",
+ journal = "Numer. Math.",
+ volume = "12",
+ pages = "349-368",
+ year = "1968"
+}
+
+@misc{Math05,
+ author = "MathWorks",
+ title = "MATLAB",
+ publisher = "The Mathworks, Inc.",
+ link = "\url{http://www.mathworks.com}"
+}
+
+@article{Mccu02,
+ author = "McCullough, B. D. and Wilson, B.",
+ title = "On the accuracy of statistical procedures in Microsoft Excel
+ 2000 and Excel XP",
+ journal = "Comput. Statist. Data Anal.",
+ volume = "40",
+ pages = "713-721",
+ year = "2002"
+}
+
+@article{Mccu99,
+ author = "McCullough, B. D. and Wilson, B.",
+ title = "On the accuracy of statistical procedures in Microsoft Excel 97",
+ journal = "Comput. Statist. Data Anal.",
+ volume = "31",
+ pages = "27-37",
+ year = "1999"
+}
+
+@book{Metc96,
+ author = "Metcalf, M. and Reid, J. K.",
+ title = "Fortran 90/95 Explained",
+ publisher = "Oxford University Press",
+ year = "1996"
+}
+
+@book{Metc04,
+ author = "Metcalf, M. and Reid, J. K. and Cohen, M.",
+ title = "Fortran 95/2003 Explained",
+ publisher = "Oxford University Press",
+ year = "2004",
+ isbn = "0-19-852693-8"
+}
+
+@article{Mole83,
+ author = "Moler, C. and Morrison, D.",
+ title = "Replacing square roots by Pythagorena sums",
+ journal = "IBM J. Res. Develop.",
+ volume = "27",
+ number = "6",
+ pages = "577-581",
+ year = "1983"
+}
+
+@article{Mole71,
+ author = "Moler, C.B. and Stewart, G.W.",
+ title = "An Algorithm for Generalized Matrix Eigenvalue Problems",
+ journal = "SIAM J. Numer. Anal",
+ volume = "10",
+ year = "1973",
+ pages = "241--256",
+ abstract =
+ "A new method, called the QZ algorithm, is presented for the solution
+ of the matrix eigenvalue problem $Ax=\lambda{}Bx$ with general square
+ matrices $A$ and $B$. Particluar attention is paid to the degeracies
+ which result when $B$ is singular. No inversions of $B$ or its
+ submatrices are used. The algorithm is a generalization of the QR
+ algorithm, and reduces to it when $B=I$. A Fortran program and some
+ illustrative examples are included."
+}
+
+@book{Moon93,
+ author = "Moonen, Marc S. and Golub, Gene H. and De Moor, Bart L.R.",
+ title = "Linear Algebra and Large Scale and Real-Time Applications",
+ year = "1993",
+ publisher = "NATO ASI Series",
+ isbn = "978-904814246-0"
+}
+
+@book{Moor79,
+ author = "Moore, R. E.",
+ title = "methods and Applications of Interval Analysis",
+ publisher = "SIAM",
+ year = "1979"
+}
+
+@misc{NAGa05,
+ author = "Numerical Algorithms Group",
+ title = "The NAG Library",
+ link = "\url{http://www.nag.co.uk/numeric}",
+ year = "2005"
+}
+
+@misc{NAGb05,
+ author = "Numerical Algorithms Group",
+ title = "The NAG Fortran Library Manual",
+ link = "\url{http://www.nag.co.uk/numeric/fl/manual/html/FLlibrarymanual.asp}",
+ year = "2005"
+}
+
+@book{Over01,
+ author = "Overton, M. L.",
+ title = "Numerical Computing with IEEE Floating Point Arithmetic",
+ publisher = "SIAM",
+ year = "2001",
+ isbn = "0-89871-482-6"
+}
+
+@book{Pies83,
+ author = {Piessens, R. and de Doncker-Kapenga, E. and \"Uberhuber, C. W.
+ and Kahaner, D. K.},
+ title = "QUADPACK - A Subroutine Package for Automatic Integration",
+ publisher = "Springer-Verlag",
+ year = "1983"
+}
+
+@misc{Pete12,
+ author = "Petersen, Kaare Brandt and Pedersen, Michael Syskind",
+ title = "The Matrix Cookbook",
+ link = "\url{http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3274/pdf/imm3274.pdf}",
+ year = "2012",
+ month = "November"
+}
+
+@article{Prie04,
+ author = "Priest, D. M.",
+ title = "Efficient scaling for complex division",
+ journal = "ACM Trans. Math. Software",
+ volume = "30",
+ pages = "389-401",
+ year = "2004"
+}
+
+@InProceedings{Rump99,
+ author = "Rump, S. M.",
+ title = "INTLAB - INTerval LABoratory",
+ booktitle = "Developments in Reliable Computing",
+ pages = "77-104",
+ publisher = "Kluwer Academic",
+ year = "1999"
+}
+
+@InProceedings{Sham92,
+ author = "Shampine, L. F. and Gladwell, I.",
+ title = "The next generation of runge-kutta codes",
+ booktitle = "Computational Ordinary Differential Equations",
+ pages = "145-164",
+ publisher = "Oxford University Press",
+ year = "1992"
+}
+
+@article{Smit62,
+ author = "Smith, R. L.",
+ title = "Algorithm 116: Complex division",
+ journal = "Communs. Ass. comput. Mach.",
+ volume = "5",
+ pages = "435",
+ year = "1962"
+}
+
+@book{Stew98,
+ author = "Stewart, G. W.",
+ title = "Matrix Algorithms: Basic Decompositions, volume I",
+ publisher = "SIAM",
+ year = "1998",
+ isbn = "0-89871-414-1"
+}
+
+@article{Stew85,
+ author = "Stewart, G. W.",
+ title = "A note on complex division",
+ journal = "ACM Trans. Math. Software",
+ volume = "11",
+ pages = "238-241",
+ year = "1985"
+}
+
+@book{Stew90,
+ author = "Stewart, G. W. and Sun, J.",
+ title = "Matrix Perturbation Theory",
+ publisher = "Academic Press",
+ year = "1990"
+}
+
+@article{Stou07,
+ author = "Stoutemyer, David R.",
+ title = "Useful Computations Need Useful Numbers",
+ year = "2007",
+ publisher = "ACM",
+ journal = "Communications in Computer Algebra",
+ volume = "41",
+ number = "3",
+ abstract =
+ "Most of us have taken the exact rational and approximate numbers in
+ our computer algebra systems for granted for a long time, not thinking
+ to ask if they could be significantly better. With exact rational
+ arithmetic and adjustable-precision floating-point arithmetic to
+ precision limited only by the total computer memory or our patience,
+ what more could we want for such numbers? It turns out that there is
+ much that can be done that permits us to obtain exact results more
+ often, more intelligible results, approximate results guaranteed to
+ have requested error bounds, and recovery of exact results from
+ approximate ones."
+}
+
+@article{Sutt13,
+ author = "Sutton, Brian D.",
+ title = "Computing the Complete CS Decomposition",
+ journal = "Numerical Algorithms",
+ volume = "50",
+ pages = "33-65",
+ year = "2013",
+ month = "February",
+ link = "\url{http://arxiv.org/pdf/0707.1838v3.pdf}",
+ abstract =
+ "An algorithm is developed to compute the complete CS decomposition
+ (CSD) of a partitioned unitary matrix. Although the existence of the
+ CSD has been recognized since 1977, prior algorithms compute only a
+ reduced version (the 2-by-1 CSD) that is equivalent to two
+ simultaneous singular value decompositions. The algorithm presented
+ here computes the complete 2-by-2 CSD, which requires the simultaneous
+ diagonalization of all four blocks of a unitary matrix partitioned
+ into a 2-by-2 block structure. The algorithm appears to be the only
+ fully specified algorithm available. The computation occurs in two
+ phases. In the first phase, the unitary matrix is reduced to
+ bidiagonal block form, as described by Sutton and Edelman. In the
+ second phase, the blocks are simultaneously diagonalized using
+ techniques from bidiagonal SVD algorithms of Golub, Kahan, and
+ Demmel. The algorithm has a number of desirable numerical features.",
+ paper = "Sutt13.pdf"
+}
+
+@article{Turi48,
+ author = "Turing, A. M.",
+ title = "Rounding-off errors in matrix processes",
+ journal = "Q. J. Mech. Appl. Math.",
+ volume = "1",
+ pages = "287-308",
+ year = "1948"
+}
+
+@article{Vign93,
+ author = "Vignes, J.",
+ title = "A stochastic arithmetic for reliable scientific computation",
+ journal = "Math. and Comp. in Sim.",
+ volume = "25",
+ pages = "233-261",
+ year = "1993"
+}
+
+@article{Ward81,
+ author = "Ward, Robert C.",
+ title = "Balancing the generalized eigenvalue problem",
+ journal = "SIAM J. Sci. and Stat. Comput.",
+ volume = "2",
+ number = "2",
+ year = "1981",
+ pages = "141-152",
+ abstract =
+ "An algorithm is presented for balancing the $A$ and $B$ matirces
+ prior to computing the eigensystem of the generalized eigenvalue
+ problem $Ax=\lambda Bx$. The three-step algorithm is specifically
+ designed to preceed the $QZ$-type algorithms, but improved performance
+ is expected from most eigensystem solvers. Permutations and two-sided
+ diagonal transformations are applied to $A$ and $B$ to produce
+ matrices with certain desirable properties. Test cases are presented
+ to illustrate the improved accuracy of the computed eigenvalues."
+}
+
+@book{Wilk63,
+ author = "Wilkinson, J. H.",
+ title = "Rounding Erroors in Algebraic Processes",
+ publisher = "HMSO",
+ series = "Notes on Applied Science, No. 32",
+ year = "1963"
+}
+
+@book{Wilk65,
+ author = "Wilkinson, J. H.",
+ title = "The Algebraic Eigenvalue Problem",
+ publisher = "Oxford University Press",
+ year = "1965"
+}
+
+@InProceedings{Wilk84,
+ author = "Wilkinson, J. H.",
+ title = "The perfidious polynomial",
+ booktitle = "Studies in Numerical Analysis",
+ volume = "24",
+ chapter = "1",
+ pages = "1-28",
+ year = "1984"
+}
+
+@article{Wilk86,
+ author = "Wilkinson, J. H.",
+ title = "Error analysis revisited",
+ journal = "IMA Bulletin",
+ volume = "22",
+ pages = "192-200",
+ year = "1986"
+}
+
+@article{Wilk61,
+ author = "Wilkinson, J. H.",
+ title = "Error analysis of diret methods of matrix inversion",
+ journal = "J. ACM",
+ volume = "8",
+ pages = "281-330",
+ year = "1961"
+}
+
+@article{Wilk85,
+ author = "Wilkinson, J. H.",
+ title = "The state of the art in error analysis",
+ journal = "NAG Newsletter",
+ volume = "2/85",
+ pages = "5-28",
+ year = "1985"
+}
+
+@article{Wilk60,
+ author = "Wilkinson, J. H.",
+ title = "Error analysis of floating-point computation",
+ journal = "Numer. Math.",
+ volume = "2",
+ pages = "319-340",
+ year = "1960"
+}
+
+@book{Wilk71,
+ author = "Wilkinson, J. H.",
+ title = "Handbook for Automatic Computation, V2, Linear Algebra",
+ publisher = "Springer-Verlag",
+ year = "1971"
+}
+
+@inproceedings{Badd94,
+ author = "Baddoura, Jamil",
+ title = "A Conjecture On Integration in Finite Terms with Elementary
+ Functions and Polylogarithms",
+ booktitle = "ISSAC 94",
+ year = "1994",
+ pages = "158-162",
+ isbn = "0-89791-638-7",
+ abstract =
+ "In this abstract, we report on a conjecture that gives the form of an
+ integral if it can be expressed using elementary functions and
+ polylogarithms. The conjecture is proved by the author in the cases of
+ the dilogarithm and the trilogarithm [3] and consists of a
+ generalization of Liouville's theorem on integration in finite terms
+ with elementary functions. Those last structure theorems, for the
+ dilogarithm and the trilogarithm, are the first case of structure
+ theorems where logarithms can appear with non-constant
+ coefficients. In order to prove the conjecture for higher
+ polylogarithms we need to find the functional identities, for the
+ polylogarithms that we are using, that characterize all the possible
+ algebraic relations among the considered polylogarithms of functions
+ that are built up from the rational functions by taking the considered
+ polylogarithms, exponentials, logarithms and algebraics. The task of
+ finding those functional identities seems to be a difficult one and is
+ an unsolved problem for the most part to this date.",
+ paper = "Badd94.pdf",
+}
+
+@article{Badd06,
+ author = "Baddoura, Jamil",
+ title = "Integration in Finite Terms with Elementary Functions and
+ Dilogarithms",
+ journal = "J. Symbolic Computation",
+ volume = "41",
+ number = "8",
+ year = "2006",
+ pages = "909-942",
+ abstract =
+ "In this paper, we report on a new theorem that generalizes
+ Liouville’s theorem on integration in finite terms. The new theorem
+ allows dilogarithms to occur in the integral in addition to
+ transcendental elementary functions. The proof is based on two
+ identities for the dilogarithm, that characterize all the possible
+ algebraic relations among dilogarithms of functions that are built up
+ from the rational functions by taking transcendental exponentials,
+ dilogarithms, and logarithms. This means that we assume the integral
+ lies in a transcendental tower.",
+ paper = "Badd06.pdf"
+}
+
+@article{Barn89,
+ author = "Barnett, Michael P.",
+ title = "Using Partial Fraction Formulas to Sum some slowly convergent
+ series analytically for molecular integral calculations",
+ journal = "ACM SIGSAM",
+ volume = "23",
+ number = "3",
+ year = "1989",
+ abstract =
+ "Two sets of rational expressions, needed for quantum chemical
+ calculations, have been constructed by mechanical application of
+ partial fraction and polynomial operations on a CYBER 205. The
+ algorithms were coded in FORTRAN, using simple array manipulation. The
+ results suggest extensions that could be tackled with general
+ algebraic manipulation programs.",
+ paper = "Barn89.pdf"
+}
+
+@inproceedings{Bert94,
+ author = "Bertrand, Laurent",
+ title = "On the Implementation of a new Algorithm for the Computation
+ of Hyperelliptic Integrals",
+ booktitle = "ISSAC 94",
+ isbn = "0-89791-638-7",
+ pages = "211-215",
+ year = "1994",
+ abstract =
+ "In this paper, we present an implementation in Maple of a new
+ aJgorithm for the algebraic function integration problem in the
+ particular case of hyperelliptic integrals. This algorithm is based
+ on the general algorithm of Trager [9] and on the arithmetic in the
+ Jacobian of hyperelliptic curves of Cantor [2].",
+ paper = "Bert94.pdf"
+}
+
+@article{Clar89,
+ author = "Clarkson, M.",
+ title = "MACSYMA's inverse Laplace transform",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "23",
+ number = "1",
+ year = "1989",
+ pages = "33-38",
+ abstract =
+ "The inverse Laplace transform capability of MACSYMA has been improved
+ and extended. It has been extended to evaluate certain limits, sums,
+ derivatives and integrals of Laplace transforms. It also takes
+ advantage of the inverse Laplace transform convolution theorem, and
+ can deal with a wider range of symbolic parameters.",
+ paper = "Clar89.pdf"
+}
+
+@misc{Corl05,
+ author = "Corless, Robert M. and Jeffrey, David J. and Watt, Stephen M.
+ and Bradford, Russell and Davenport, James H.",
+ title = "Reasoning about the elementary functions of complex analysis",
+ link = "\url{http://www.csd.uwo.ca/~watt/pub/reprints/2002-amai-reasoning.pdf}",
+ abstract = "
+ There are many problems with the simplification of elementary
+ functions, particularly over the complex plane. Systems tend to make
+ ``howlers'' or not to simplify enough. In this paper we outline the
+ ``unwinding number'' approach to such problems, and show how it can be
+ used to prevent errors and to systematise such simplification, even
+ though we have not yet reduced the simplification process to a
+ complete algorithm. The unsolved problems are probably more amenable
+ to the techniques of artificial intelligence and theorem proving than
+ the original problem of complex-variable analysis.",
+ paper = "Corl05.pdf"
+}
+
+@book{Erde56,
+ author = {Erd\'elyi, A.},
+ title = "Asymptotic Expansions",
+ year = "1956",
+ isbn = "978-0-486-15505-0",
+ publisher = "Dover Publications"
+}
+
+@article{Nort80,
+ author = "Norton, Lewis M.",
+ title = "A Note about Laplace Transform Tables for Computer use",
+ journal = "ACM SIGSAM",
+ volume = "14",
+ number = "2",
+ year = "1980",
+ pages = "30-31",
+ abstract =
+ "The purpose of this note is to give another illustration of the fact
+ that the best way for a human being to represent or process
+ information is not necessarily the best way for a computer. The
+ example concerns the use of a table of inverse Laplace transforms
+ within a program, written in the REDUCE language [1] for symbolic
+ algebraic manipulation, which solves linear ordinary differential
+ equations with constant coefficients using Laplace transform
+ methods. (See [2] for discussion of an earlier program which solved
+ such equations.)",
+ paper = "Nort80.pdf"
+}
+
+@article{Piqu89,
+ author = "Piquette, J. C.",
+ title = "Special Function Integration",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "23",
+ number = "2",
+ year = "1989",
+ pages = "11-21",
+ abstract =
+ "This article describes a method by which the integration capabilities
+ of symbolic-mathematics computer programs can be extended to include
+ integrals that contain special functions. A summary of the theory that
+ forms the basis of the method is given in Appendix A. A few integrals
+ that have been evaluated using the method are presented in Appendix
+ B. A more thorough development and explanation of the method is given
+ in Piquette, in review (b)."
+}
+
+@techreport{Segl98,
+ author = "Segletes, S.B.",
+ title = "A compact analytical fit to the exponential integral $E_1(x)$",
+ year = "1998",
+ institution = "U.S. Army Ballistic Research Laboratory,
+ Aberdeen Proving Ground, MD",
+ type = "Technical Report",
+ number = "ARL-TR-1758",
+ algebra = "\newline\refto{package DFSFUN DoubleFloatSpecialFunctions}",
+ abstract = "
+ A four-parameter fit is developed for the class of integrals known as
+ the exponential integral (real branch). Unlike other fits that are
+ piecewise in nature, the current fit to the exponential integral is
+ valid over the complete domain of the function (compact) and is
+ everywhere accurate to within $\pm 0.0052\%$ when evaluating the first
+ exponential integral, $E_1$. To achieve this result, a methodology
+ that makes use of analytically known limiting behaviors at either
+ extreme of the domain is employed. Because the fit accurately captures
+ limiting behaviors of the $E_1$ function, more accuracy is retained
+ when the fit is used as part of the scheme to evaluate higher-order
+ exponential integrals, $E_n$, as compared with the use of brute-force
+ fits to $E_1$, which fail to accurately model limiting
+ behaviors. Furthermore, because the fit is compact, no special
+ accommodations are required (as in the case of spliced piecewise fits)
+ to smooth the value, slope, and higher derivatives in the transition
+ region between two piecewise domains. The general methodology employed
+ to develop this fit is outlined, since it may be used for other
+ problems as well.",
+ paper = "Segl98.pdf"
+}
+
+@techreport{Se09,
+ author = "Segletes, S.B.",
+ title = "Improved fits for $E_1(x)$ {\sl vis-\'a-vis} those presented
+ in ARL-TR-1758",
+ type = "Technical Report",
+ number = "ARL-TR-1758",
+ institution ="U.S. Army Ballistic Research Laboratory,
+ Aberdeen Proving Ground, MD",
+ year = "1998",
+ month = "September",
+ abstract = "
+ This is a writeup detailing the more accurate fits to $E_1(x)$,
+ relative to those presented in ARL-TR-1758. My actual fits are to
+ \[F1 =[x\ exp(x) E_1(x)]\] which spans a functional range from 0 to 1.
+ The best accuracy I have been yet able to achieve, defined by limiting
+ the value of \[[(F1)_{fit} - F1]/F1\] over the domain, is
+ approximately 3.1E-07 with a 12-parameter fit, which unfortunately
+ isn't quite to 32-bit floating-point accuracy. Nonetheless, the fit
+ is not a piecewise fit, but rather a single continuous function over
+ the domain of nonnegative x, which avoids some of the problems
+ associated with piecewise domain splicing.",
+ paper = "Se09.pdf"
+}
+
+@InProceedings{Kalt99a,
+ author = "Kaltofen, E. and Monagan, M.",
+ title = "On the Genericity of the Modular Polynomial {GCD} Algorithm",
+ booktitle = "Proc. 1999 Internat. Symp. Symbolic Algebraic Comput.",
+ year = "1999",
+ pages = "59--66",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/99/KaMo99.pdf}",
+ paper = "Kalt99a.pdf"
+}
+
+@book{Knut71,
+ author = "Knuth, Donald",
+ title = "The Art of Computer Programming Vol. 2 (Seminumerical Algorithms)",
+ year = "1971",
+ publisher = "Addison-Wesley"
+}
+
+@article{Ma90,
+ author = "Ma, Keju and {von zur Gathen}, Joachim",
+ title =
+ "Analysis of Euclidean Algorithms for Polynomials over Finite Fields",
+ journal = "J. Symbolic Computation",
+ year = "1990",
+ volume = "9",
+ pages = "429-455",
+ link = "\url{http://www.researchgate.net/publication/220161718_Analysis_of_Euclidean_Algorithms_for_Polynomials_over_Finite_Fields/file/60b7d52b326a1058e4.pdf}",
+ abstract = "
+ This paper analyzes the Euclidean algorithm and some variants of it
+ for computing the greatest common divisor of two univariate polynomials
+ over a finite field. The minimum, maximum, and average number of
+ arithmetic operations both on polynomials and in the ground field
+ are derived.",
+ paper = "Ma90.pdf"
+}
+
+@inproceedings{Hoei04,
+ author = "van Hoeij, Mark and Monagan, Michael",
+ title = "Algorithms for Polynomial GCD Computation over Algebraic
+ Function Fields",
+ booktitle = "Proc. ISSAC 04",
+ year = "2004",
+ isbn = "1-58113-827-X",
+ link = "\url{http://www.cecm.sfu.ca/personal/mmonagan/papers/AFGCD.pdf}",
+ abstract =
+ "Let $L$ be an algebraic function field in $k \ge 0$ parameters
+ $t_1,\ldots,t)k$. Let $f_1$, $f_2$ be non-zero polynomials in
+ $L[x]$. We give two algorithms for computing their gcd. The first, a
+ modular GCD algorithm, is an extension of the modular GCD algorithm
+ for Brown for {\bf Z}$[x_1,\ldots,x_n]$ and Encarnacion for {\bf
+ Q}$(\alpha[x])$ to function fields. The second, a fraction-free
+ algorithm, is a modification of the Moreno Maza and Rioboo algorithm
+ for computing gcds over triangular sets. The modification reduces
+ coefficient grownth in $L$ to be linear. We give an empirical
+ comparison of the two algorithms using implementations in Maple.",
+ paper = "Hoei04.pdf"
+}
+
+@misc{Meij91,
+ author = "Meijer, Erik and Fokkinga, Maarten and Paterson, Ross",
+ title = "Functional Programming with Bananas, Lenses, Envelopes and
+ Barbed Wire",
+ link = "\url{http://eprints.eemcs.utwente.nl/7281/01/db-utwente-40501F46.pdf}",
+ abstract = "
+ We develop a calculus for lazy functional programming based on
+ recursion operators associated with data type definitions. For these
+ operators we derive various algebraic laws that are useful in deriving
+ and manipulating programs. We shall show that all example functions in
+ Bird and Wadler's ``Introduction to Functional Programming'' can be
+ expressed using these operators.",
+ paper = "Meij91.pdf"
+}
+
+@misc{Yous04,
+ author = "Youssef, Saul",
+ title = "Prospects for Category Theory in Aldor",
+ year = "2004",
+ link = "\url{http://axiom-wiki.newsynthesis.org/public/refs/Youssef-ProspectsForCategoryTheoryInAldor.pdf}",
+ abstract =
+ "Ways of encorporating category theory constructions and results into
+ the Aldor language are discussed. The main features of Aldor which
+ make this possible are identified, examples of categorical
+ constructions are provided and a suggestion is made for a foundation
+ for rigorous results.",
+ paper = "Yous04.pdf"
+}
+
+@book{Acze13,
+ author = "Aczel, Peter et al.",
+ title = "Homotopy Type Theory: Univalent Foundations of Mathematics",
+ publisher = "Institute for Advanced Study",
+ year = "2013",
+ link = "\url{https://hott.github.io/book/nightly/hott-letter-1075-g3c53219.pdf}",
+ paper = "Acze13.pdf"
+}
+
+@InProceedings{Adam01,
+ author = "Adams, Andrew A. and Dunstan, Martin and Gottlieben, Hanne and
+ Kelsey, Tom and Martin, Ursula and Owre, Sam",
+ title = "Computer algebra meets automated theorem proving: Integrating
+ Maple and PVS",
+ booktitle = "Theorem proving in higher order logics",
+ series = "TPHOLs 2001",
+ year = "2001",
+ location = "Edinburgh, Scotland",
+ pages = "27-42",
+ abstract =
+ "We describe an interface between version 6 of the Maple computer
+ algebra system with the PVS automated theorem prover. The interface is
+ designed to allow Maple users access to the robust and checkable proof
+ environment of PVS. We also extend this environment by the provision
+ of a library of proof strategies for use in real analysis. We
+ demonstrate examples using the interface and the real analysis
+ library. These examples provide proofs which are both illustrative and
+ applicable to genuine symbolic computation problems.",
+ paper = "Adam01.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Ager88,
+ author = "Ager, Tryg A. and Ravaglia, R.A. and Dooley, Sam",
+ title = "Representation of Inference in Computer Algebra Systems with
+ Applications to Intelligent Tutoring",
+ year = "1988",
+ abstract =
+ "Presently computer algebra systems share with calculators the
+ property that a sequence of computations is not a unified
+ computational sequence, thereby allowing fallacies to occur. We argue
+ that if computer algebra systems operate in a framework of strict
+ mathematical proof, fallacies are eliminated. We show that this is
+ possible in a working interactive system REQD. We explain why
+ computational algebra, done under the strict constraints of proof, is
+ relevant to uses of computer algebra systems in instruction."
+}
+
+@phdthesis{Aker93,
+ author = "Akers, Robert Lawrence",
+ title = "Strong Static Type Checking for Functional Common Lisp",
+ school = "Univerity of Texas at Austin",
+ year = "1995",
+ link = "\url{ftp://www.cs.utexas.edu/pub/boyer/cli-reports/096.pdf}",
+ abstract =
+ "This thesis presents a type system which supports the strong static
+ type checking of programs developed in an applicative subset of the
+ Common Lisp language. The Lisp subset is augmented with a guard
+ construct for function definitions, which allows type restrictions to
+ be placed on the arguments. Guards support the analysis and safe use
+ of partial functions, like CAR, which are well-defined only for
+ arguments of a certain type.
+
+ A language of type descriptors characterizes the type
+ domain. Descriptors are composed into function signatures which
+ characterize the guard and which map various combinations of actual
+ parameter types to possible result types. From a base of signatures
+ for a small collection of primitive functions, the system infers
+ signatures for newly submitted functions.
+
+ The system includes a type inference algorithm which handles
+ constructs beyond the constraints of ML-style systems. Most notable
+ are the free use of CONS to construct objects of undeclared type and
+ the use of IF forms whose two result components have unrelated types,
+ resulting in ad hoc polymorphism. Accordingly, the type descriptor
+ language accommodates disjunction, unrestricted CONS, recursive type
+ forms, and ad hoc polymorphic function signatures. As with traditional
+ type inference systems, unification is a central algorithm, but the
+ richness of our type language complicates many component algorithms,
+ including unification. Special treatment is given to recognizer
+ functions, which are predicates determining whether an object is of a
+ particular type. Type inference in this setting is undecidable, so the
+ algorithm is heuristic and is not complete.
+
+ The semantics of the system are in terms of a function which
+ determines whether values satisfy descriptors with respect to a
+ binding of type variables. The soundness of each signature emitted by
+ the system is validated by a signature checker, whose properties are
+ formally specified with respect to the formal semantics and proven to
+ hold. The checker algorithm is substantially simpler than the
+ inference algorithm, as it need not perform operations such as
+ discovering closed recursive forms. Thus, its proof is both more
+ straightforward to construct and easier to validate than a direct
+ proof of the inference algorithm would be.",
+ paper = "Aker93.pdf"
+}
+
+@misc{Avig14,
+ author = "Avigad, Jeremy",
+ title = "LEAN proof of GCD",
+ link =
+ "\url{http://github.com/leanprover/lean2/blob/master/library/data/nat/gcd.lean}",
+ year = "2014"
+}
+
+@misc{Avig16,
+ author = "Avigad, Jeremy",
+ title = "LEAN github repository",
+ link = "\url{http://github.com/leanprover}",
+ year = "2016"
+}
+
+@misc{Avig17,
+ author = "Avigad, Jeremy and de Moura, Leonardo and Kong, Soonho",
+ title = "Theorem Proving in Lean",
+ year = "2017",
+ link = "\url{https://leanprover.github.io/tutorial/tutorial.pdf}",
+ paper = "Avig17.pdf"
+}
+
+@inproceedings{Ball95,
+ author = "Ballarin, Clemens and Homann, Karsten and Calmet, Jacques",
+ title = "Theorems and Algorithms: An Interface between Isabelle and Maple",
+ booktitle = "ISSAC 95",
+ year = "1995",
+ pages = "150-157",
+ publisher = "ACM",
+ link = "\url{https://pdfs.semanticscholar.org/077e/606f92b4095637e624a9efc942c5c63c4bc2.pdf}",
+ abstract =
+ "Solving sophisticated mathematical problems often requires algebraic
+ algorithms {\sl and} theorems. However, there are no environments
+ integrating theorem provers and computer algebra systems which
+ consistently provide the inference capabilities of the first and the
+ powerful arithmetic of the latter systems.
+
+ As an example for such a mechanized mathematics environment we describe
+ a prototype implementation of an interface between Isabelle and Maple.
+ It is achieved by extending the simplifier of Isabelle through the
+ introduction of a new class of simplification rules called evaluation
+ rules in order to make selected operations of Maple available, and
+ without any modification to the computer algebra system. Additionally,
+ we specify syntax translations for the concrete syntax of Maple
+ which enables the communication between both systems illustrated by
+ some examples that can be solved by theorems and algorithms",
+ paper = "Ball95.pdf"
+
+}
+
+@misc{Ball98,
+ author = "Ballarin, Clemens and Paulson, Lawrence C.",
+ title = "Reasoning about Coding Theory: The Benefits We Get from
+ Computer Algebra",
+ year = "1998",
+ link = "\url{http://www21.in.tum.de/~ballarin/publications/aisc98.pdf}",
+ abstract =
+ "The use of computer algebra is usually considered beneficial for
+ mechanised reasoning in mathematical domains. We present a case study,
+ in the application domain of coding theory, that supports this claim:
+ the mechanised proof depends on non-trivial algorithms from computer
+ algebra and increase the reasoning power of the theorem prover. The
+ unsoundness of computer algebra systems is a major problem in
+ interfacing them to theorem provers. Our approach to obtaining a sound
+ overall system is not blanket distrust but based on the distinction
+ between algorithms we call sound and {\sl ad hoc} respectively. This
+ distinction is blurred in most computer algebra systems OUr
+ experimental interface therefore uses a computer algebra library. It
+ is based on theorem templates, which provide formal specifications for
+ the algorithms.",
+ paper = "Ball98.pdf"
+}
+
+@article{Ball99,
+ author = "Ballarin, Clemens and Paulson, Lawrence C.",
+ title = "A Pragmatic Approach to Extending Provers by Computer Algebra --
+ with Applications to Coding Theory",
+ journal = "Fundam. Inform.",
+ volume = "39",
+ number = "1-2",
+ pages = "1-20",
+ year = "1999",
+ link = "\url{http://www.cl.cam.ac.uk/~lp15/papers/Isabelle/coding.pdf}",
+ abstract = "
+ The use of computer algebra is usually considered beneficial for
+ mechanised reasoning in mathematical domains. We present a case study,
+ in the application domain of coding theory, that supports this claim:
+ the mechanised proofs depend on non-trivial algorithms from computer
+ algebra and increase the reasoning power of the theorem prover.
+
+ The unsoundness of computer algebra systems is a major problem in
+ interfacing them to theorem provers. Our approach to obtaining a sound
+ overall system is not blanket distrust but based on the distinction
+ between algorithms we call sound and {\sl ad hoc} respectively. This
+ distinction is blurred in most computer algebra systems. Our
+ experimental interface therefore uses a computer algebra library. It
+ is based on formal specifications for the algorithms, and links the
+ computer algebra library Sumit to the prover Isabelle.
+
+ We give details of the interface, the use of the computer algebra
+ system on the tactic-level of Isabelle and its integration into proof
+ procedures.",
+ paper = "Ball99.pdf",
+ keywords = "axiomref"
+}
+
+@article{Bare01,
+ author = "Barendregt, Henk and Cohen, Arjeh M.",
+ title = "Electronic Communication of Mathematics and the Interaction
+ of Computer Algebra Systems and Proof Assistants",
+ journal = "J. Symbolic Computation",
+ volume = "32",
+ pages = "3-22",
+ year = "2001",
+ abstract =
+ "Present day computer algebra systems (CASs) and proof assistants
+ (PAs) are specialized programs that help humans with mathematical
+ computations and deductions. Although several such systems are
+ impressive, they all have certain limitations. In most CASs side
+ conditions that are essential for the truth of an equality are not
+ formulated; moreover there are bugs. The PAs have a limited power for
+ computing and hence also for assistance with proofs. Almost all
+ examples of both categories are stand alone special purpose systems
+ and therefore they cannot communicate with each other.
+
+ We will argue that the present state of the art in logic is such that
+ there is a natural formal language, independent of the special purpose
+ application in question, by which these systems can communicate
+ mathematical statements. In this way their individual power will be
+ enhanced.
+
+ Statements received at one particular location from other sites fall
+ into two categories: with or without the qualification ``evidently
+ impeccable'', a notion that is methodologi- cally precise and
+ sound. For statements having this quality assessment the evidence may
+ come from the other site or from the local site itself, but in both
+ cases it is verified locally. In cases where there is no evidence of
+ impeccability one has to rely on cross checking. There is a trade-off
+ between these two kinds of statements: for impeccability one has to
+ pay the price of obtaining less power.
+
+ Some examples of communication forms are given that show how the
+ participants benefit",
+ paper = "Bare01.pdf"
+}
+
+@article{Baue16,
+ author = "Bauer, Andrej",
+ title = "Five Stages of Accepting Constructive Mathematics",
+ year = "2016",
+ journal = "Bull. of the American Mathematical Society",
+ link = "\url{http://dx.doi.org/10.1090/bull/1556}",
+ abstract =
+ "On the odd day, a mathematician might wonder what constructive
+ mathematics is all about. They may have heard arguments in favor of
+ constructivism but are not at all convinced by them, and in any case
+ they may care little about philosophy. A typical introductory text
+ about constructivism spends a great deal of time explaining the
+ principles and contains only trivial mathematics, while advanced
+ constructive texts are impenetrable, like all unfamiliar mathematics.
+ How then can a mathematician find out what constructive mathematics
+ feels like? What new and relevant ideas does constructive mathematics
+ have to offer, if any? I shall attempt to answer these questions.",
+ paper = "Baue16.pdf"
+}
+
+@techreport{Baum90,
+ author = "Baumgartner, Gerald and Stansifer, Ryan",
+ title = "A Proposal to Study Type Systems for Computer Algebra",
+ institution = "RISC",
+ year = "1990",
+ type = "technical report",
+ number = "90-07.0",
+ abstract =
+ "It is widely recognized that programming languages should offer
+ features to help structure programs. To achieve this goal, languages
+ like ADA, MODULA-2, object-oriented languages, and functional
+ languages have been developed. The structuring techniques available so
+ far (like modules, classes, parametric polymorphism) are still not
+ enough or not appropriate for some application areas. In symbolic
+ computation, in particular computer algebra, several problems occur
+ that are difficult to handle with any existing programming language.
+ Indeed, nearly all available computer algebra systems suffer from the
+ fact that the underlying programming language imposes too many
+ restricitons.
+
+ We propose to develop a language that combines the essential features
+ from functional language, object-oriented languages, and computer
+ algebra systems in a semantically clean manner. Although intended for
+ use in symbolic computation, this language should prove interesting as
+ a general purpose programming language. The main innovation will be
+ the application of sophisticated type systems to the needs of computer
+ algebra systems. We will demonstrate the capabilities of the language
+ by using it to implement a small computer algebra library. This
+ implementation will be compared against a straightforward Lisp
+ implementation and against existing computer algebra systems. Our
+ development should have an impact both on the programming languages
+ world and on the computer algebra world.",
+ paper = "Baum90.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Beesxx,
+ author = "Beeson, Michael",
+ title = "Automatic Generation of Epsilon-Delta Proofs of Continuity",
+ link = "\url{http://www.michaelbeeson.com/research/papers/aisc.pdf}",
+ abstract =
+ "As part of a project on automatic generation of proofs involving both
+ logic and computation, we have automated the production of some proofs
+ involving epsilon-delta arguments. These proofs involve two or three
+ quantifiers on the logical side, and on the computational side, they
+ involve algebra, trigonometry, and some calculus. At the border of
+ logic and computation, they involve several types of arguments
+ involving inequalities, including transitivity chaining and several
+ types of bounding arguments, in which bounds are sought that do not
+ depend on certain variables. Control mechanisms have been developed
+ for intermixing logical deduction steps with computational steps and
+ with inequality reasoning. Problems discussed here as examples involve
+ the continuity and uniform continuity of various specific functions.",
+ paper = "Beesxx.pdf"
+}
+
+@article{Benk03,
+ author = "Benke, Marcin and Dybjer, Peter and Jansson, Patrik",
+ title = "Universes for generic programs and proofs in dependent type
+ theory",
+ journal = "Nordic Journal of Computing",
+ volume = "10",
+ year = "2003",
+ pages = "265-269",
+ link = "\url{http://www.cse.chalmers.se/~peterd/papers/generic.html}",
+ abstract =
+ "We show how to write generic programs and proofs in {Martin L\"of}
+ type theory. To this end we considier several extensions of
+ {Martin-L\"of}'s logical framework for dependent types. Each extension
+ has a universe of codes (signatures) for inductively defined sets with
+ generic formation, introduction, elimination, and equality
+ rules. These extensions are modeled on Dybjer and Setzer's finitely
+ axiomatized theories of inductive-recursive definitions, which also
+ have universese of codes for sets, and generic formation,
+ introduction, elimination, and equality rules. Here we consider
+ several smaller universes of interest for generic programming and
+ universal algebra. We formalize one-sorted and many-sorted term
+ algebras, as well as iterated, generalized, parameterized, and indexed
+ inductive definitions. We also show how to extend the techniques of
+ generic programming to these universes. Furthermore, we give generic
+ proofs of reflexivity and substitutivity of a generic equality
+ test. Most of the definitions in the paper have been implemented using
+ the proof assistant Alfa for dependent type theory.",
+ paper = "Benk03.pdf"
+}
+
+@book{Bert04,
+ author = {Bertot, Yves Cast\'eran, Pierre},
+ title = "Interactive Theorem Proving and Program Development",
+ publisher = "Springer",
+ year = "2004",
+ isbn = "3-540-20854-2",
+ abstract = "
+ Coq is an interactive proof assistant for the development of
+ mathematical theories and formally certified software. It is based on
+ a theory called the calculus of inductive constructions, a variant of
+ type theory.
+
+ This book provides a pragmatic introduction to the development of
+ proofs and certified programs using Coq. With its large collection of
+ examples and exercies it is an invaluable tool for researchers,
+ students, and engineers interested in formal methods and the
+ development of zero-fault software."
+}
+
+@misc{Bold07,
+ author = "Boldo, Sylvie and Filliatre, Jean-Christophe",
+ title = "Formal Verification of Floating-Point programs",
+ link = "\url{http://www-lipn.univ-paris13.fr/CerPAN/files/ARITH.pdf}",
+ paper = "Bold07.pdf"
+}
+
+@misc{Bold07a,
+ author = "Boldo, Sylvie and Filliatre, Jean-Christophe",
+ title = "Formal Verification of Floating-Point programs",
+ link = "\url{http://www.lri.fr/~filliatr/ftp/publis/caduceus-floats.pdf}",
+ abstract =
+ "This paper introduces a methodology to perform formal verification of
+ floating-point C programs. It extends an existing tool for
+ verification of C programs, Caduceus, with new annotations for
+ specific floating-point arithmetic. The Caduceus first-order logic
+ model for C programs is extended accordingly. Then verification
+ conditions are obtained in the usual way and can be discharged
+ interactively with the Coqa proof assistant, using an existing Coq
+ formalization of floating-point arithmetic. This methodology is
+ already implemented and has been successfully applied to several short
+ floating-point programs, which are presented in this paper.",
+ paper = "Bold07a.pdf"
+}
+
+@article{Bold11,
+ author = "Boldo, Sylvie and Marche, Claude",
+ title = "Formal verification of numerical programs: from C annotated
+ programs to mechanical proofs",
+ year = "2011",
+ publisher = "Springer",
+ journal = "Mathematics in Computer Science",
+ volume = "5",
+ pages = "377-393",
+ link = "\url{https://hal.archives-ouvertes.fr/hal-00777605/document}",
+ abstract =
+ "Numerical programs may require a high level of guarantee. This can be
+ achieved by applying formal methods, such as machine-checked proofs.
+ But these tools handle mathematical theorems while we are interested
+ in C code, in which numerical computations are performed using
+ floating-point arithmetic, whereas proof tools typically handle exact
+ real arithmetic. To achieve this high level of confidence on C programs,
+ we use a chain of tools: Frama-C, its Jessie plugin, Why and provers
+ among Coq, Gappa, Alt-Ergo, CVC3 and Z3. This approach requires the C
+ program to be annotated; each function must be precisely specified, and
+ we prove the correctness of the program by proving both that it meets its
+ specifications and that no runtime error may occur. The purpose of this
+ paper is to illustrate, on various examples, the features of this
+ approach.",
+ paper = "Bold11.pdf"
+}
+
+@misc{Bold16,
+ author = "Boldo, Sylvie",
+ title = "Formal verification of numerical analysis programs",
+ year = "2016",
+ link = "\url{https://www.youtube.com/watch?v=7MDwpwD6Ts4}"
+}
+
+@inproceedings{Boni07,
+ author = "Bonichon, R. and Delahaye, D. and Doligez, D.",
+ title = "Zenon: An Extensible Automated Theorem Prover Producing
+ Checkable Proofs",
+ booktitle = "LPAR 2007",
+ year = "2007",
+ link = "\url{http://zenon.inria.fr/zenlpar07.pdf}",
+ abstract =
+ "We present Zenon, an automated theorem prover for first order
+ classical logic (with equality), based on the tableau method. Zenon is
+ intended to be the dedicated prover of the Focal environment, an
+ object-oriented algebraic specification and proof system, which is
+ able to produce OCaml code for execution and Coq code for
+ certification. Zenon can directly generate Coq proofs (proof scripts
+ or proof terms), which can be reinserted in the Coq specifications
+ produced by Focal. Zenon can also be extended, which makes specific
+ (and possibly local) automation possible in Focal.",
+ paper = "Boni07,pdf"
+}
+
+@misc{Boul00,
+ author = "Boulme, S. and Hardin, T. and Rioboo, R.",
+ title = "Polymorphic Data Types, Objects, Modules and Functors,:
+ is it too much?",
+ link = "\url{ftp://ftp.lip6.fr/lip6/reports/2000/lip6.2000.014.ps.gz}",
+ abstract = "
+ Abstraction is a powerful tool for developers and it is offered by
+ numerous features such as polymorphism, classes, modules, and
+ functors, $\ldots$ A working programmer may be confused by this
+ abundance. We develop a computer algebra library which is being
+ certificed. Reporting this experience made with a language (Ocaml)
+ offering all these features, we argue that the are all needed
+ together. We compare several ways of using classes to represent
+ algebraic concepts, trying to follow as close as possible mathematical
+ specification. Then we show how to combine classes and modules to
+ produce code having very strong typing properties. Currently, this
+ library is made of one hundred units of functional code and behaves
+ faster than analogous ones such as Axiom.",
+ paper = "Boul00.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Boul99,
+ author = "Boulme, S. and Hardin, T. and Hirschkoff, D. and Rioboo, Renaud",
+ title = "On the way to certify Computer Algebra Systems",
+ booktitle = "Systems for integrated computation and deduction",
+ series = "Calculemus 99",
+ year = "1999",
+ publisher = "Elsevier",
+ location = "Trento, Italy",
+ pages = "11-12",
+ abstract =
+ "The FOC project aims at supporting, within a coherent software system,
+ the entire process of mathematical computation, starting with proved
+ theories, ending with certified implementations of algorithms. In this
+ paper, we explain our design requirements for the implementation,
+ using polynomials as a running example. Indeed, proving correctness of
+ implementations depends heavily on the way this design allows
+ mathematical properties to be truly handled at the programming level.
+
+ The FOC project, started at the fall of 1997, is aimed to build a
+ programming environment for the development of certified symbolic
+ computation. The working languages are Coq and Ocaml. In this paper,
+ we present first the motivations of the project. We then explain why
+ and how our concern for proving properties of programs has led us to
+ certain implementation choices in Ocaml. This way, the sources express
+ exactly the mathematical dependencies between different structures.
+ This may ease the achievement of proofs.",
+ paper = "Boul99.pdf",
+ keywords = "axiomref"
+}
+
+@techreport{Boul94,
+ author = "Boulton, Richard John",
+ title = "Efficiency in a fully-expansive Theorem Prover",
+ year = "1994",
+ type = "technical report",
+ number = "UCAM-CL-TR-337",
+ institution = "University of Cambridge",
+ abstract =
+ "The HOL system is a fully-expansive theorem prover: Proofs generated
+ in the system are composed of applications of the primitive inference
+ rules of the underlying logic. This has two main advantages. First,
+ the soundness of the system depends only on the implementations of the
+ primitive rules. Second, users can be given the freedom to write their
+ own proof procedures without the risk of making the system unsound. A
+ full functional programming language is provided for this purpose. The
+ disadvantage with the approach is that performance is
+ compromised. This is partly due to the inherent cost of fully
+ expanding a proof but, as demonstrated in this thesis, much of the
+ observed inefficiency is due to the way the derived proof procedures
+ are written. This thesis seeks to identify sources of non-inherent
+ inefficiency in the HOL system and proposes some general-purpose and
+ some specialised techniques for eliminating it. One area that seems to
+ be particularly amenable to optimisation is equational reasoning. This
+ is significant because equational reasoning constitutes large portions
+ of many proofs. A number of techniques are proposed that transparently
+ optimise equational reasoning. Existing programs in the HOL system
+ require little or no modification to work faster. The other major
+ contribution of this thesis is a framework in which part of the
+ computation involved in HOL proofs can be postponed. This enables
+ users to make better use of their time. The technique exploits a form
+ of lazy evaluation. The critical feature is the separation of the code
+ that generates the structure of a theorem from the code that justifies
+ it logically. Delaying the justification allows some non-local
+ optimisations to be performed in equational reasoning. None of the
+ techniques sacrifice the security of the fully-expansive approach. A
+ decision procedure for a subset of the theory of linear arithmetic is
+ used to illustrate many of the techniques. Decision procedures for
+ this theory are commonplace in theorem provers due to the importance
+ of arithmetic reasoning. The techniques described in the thesis have
+ been implemented and execution times are given. The implementation of
+ the arithmetic procedure is a major contribution in itself. For the
+ first time, users of the HOL system are able to prove many arithmetic
+ lemmas automatically in a practical amount of time (typically a second
+ or two). The applicability of the techniques to other fully-expansive
+ theorem provers and possile extensions of the ideas are considered."
+}
+
+@misc{Bove08,
+ author = "Bove, Ana and Dybjer, Peter",
+ title = "Dependent Types at Work",
+ year = "2008",
+ comment = "Lecture notes from LerNET Summer School, Piriapolis",
+ link =
+ "\url{http://www.cse.chalmers.se/~peterd/papers/DependentTypesAtWork.pdf}",
+ abstract =
+ "In these lecture notes we give an introduction to functional
+ programming with dependent types. We use the dependently typed
+ programming language Agda which is an extension of {Martin-L\"of} type
+ theory. First we show how to do simply typed functional programming in
+ the style of Haskell and ML. Some differences between Agda's type
+ system and the Hindley-Milner type system of Haskell and ML are also
+ discussed. Then we show how to use dependent types for programming and
+ we explain the basic ideas behind type-checking dependent types. We go
+ on to explain the Curry-Howard identification of propositions and
+ types. This is what makes Agda a programming logic and not only a
+ programming language. According to Curry-Howard, we identify programs
+ and proofs, something which is possible only by requiring that all
+ programs terminate. However, at the end of these notes we present a
+ method for encoding partial and general recursive functions as total
+ functions using dependent types.",
+ paper = "Bove08.pdf"
+}
+
+@book{Brad07,
+ author = "Bradley, Aaron R. and Manna, Zohar",
+ title = "The Calculus of Computation",
+ year = "2007",
+ publisher = "Springer",
+ isbn = "978-3-540-74112-1",
+}
+
+@article{Bres93,
+ author = "Bressoud, David",
+ title = "Review of The problems of mathematics",
+ journal = "Math. Intell.",
+ volume = "15",
+ number = "4",
+ year = "1993",
+ pages = "71-73"
+}
+
+@article{Brow12,
+ author = "Brown, Christopher W.",
+ title = "Fast simplifications for Tarski formulas based on monomial
+ inequalities",
+ year = "2012",
+ journal = "Journal of Symbolic Computation",
+ volume = "47",
+ pages = "859-882",
+ abstract =
+ "We define the ‘‘combinatorial part’’ of a Tarski formula in which
+ equalities and inequalities are in factored or partially- factored
+ form. The combinatorial part of a formula contains only
+ ‘‘monomial inequalities’’,which are sign conditions on monomials. We
+ give efficient algorithms for answering some basic questions about
+ conjunctions of monomial inequalities and prove the
+ NP-Completeness/Hardness of some others. By simplifying the
+ combinatorial part of a Tarski formula, and mapping the simplified
+ combinatorial part back to a Tarski formula, we obtain non-trivial
+ simplifications without algebraic operations.",
+ paper = "Brow12.pdf"
+}
+
+@article{Broy88,
+ author = "Broy, Manfried",
+ title = "Equational Specification of Partial Higher-order Algebras",
+ journal = "Theoretical Computer Science",
+ volume = "57",
+ number = "1",
+ year = "1988",
+ pages = "3-45",
+ abstract =
+ "The theory of algebraic abstract types specified by positive
+ conditional formulas formed of equations and a definedness predicate
+ is outlined and extended to hierarchical types with ``noustrict''
+ operations, partial and even infinite objects. Its model theory is
+ based on the concept of partial interpretations. Deduction rules are
+ given, too. Models of types are studied where all explicit equations
+ have solutions. The inclusion of nigher-order types, i.e., types
+ comprising higher-order functions leads to an algebraic (``equational'')
+ specification of algebras including sorts with ``infinite'' objects and
+ higher-order functions (``functionals'').",
+ paper = "Broy88.pdf"
+}
+
+@article{Buch97,
+ author = "Buchberger, Bruno",
+ title = "Mathematica: doing mathematics by computer?",
+ journal = "Advances in the design of symbolic computation systems",
+ year = "1997",
+ publisher = "Springer-Verlag",
+ pages = "2-20",
+ isbn = "978-3-211-82844-1",
+ paper = "Buch97.pdf"
+}
+
+@article{Bulo04,
+ author = {Medina-Bulo, I. and Palomo-Lozano, F. and Alonso-Jim\'enez, J.A.
+ and Ruiz-Reina, J.L.},
+ title = "Verified Computer Algebra in ACL2",
+ journal = "ASIC 2004, LNAI 3249",
+ year = "2004",
+ pages = "171-184",
+ abstract = "In this paper, we present the formal verification of a
+ Common Lisp implementation of Buchberger's algorithm for computing
+ Groebner bases of polynomial ideals. This work is carried out in the
+ ACL2 system and shows how verified Computer Algebra can be achieved
+ in an executable logic.",
+ paper = "Bulo04.pdf"
+}
+
+@article{Bulo10,
+ author = "Medina-Bulo, I. and Palomo-Lozano, F. and Alonso-Jim\'enez, J.A.
+ and Ruiz-Reina, J.L.",
+ title = "A verified Common Lisp implementation of Buchberger's algorithm
+ in ACL2",
+ journal = "Journal of Symbolic Computation",
+ year = "2010",
+ pages = "96-123",
+ abstract = "In this article, we present the formal verification of a
+ Common Lisp implementation of Buchberger's algorithm or computing
+ Groebner bases of polynomial ideals. This work is carried out in ACL2,
+ a system which provices an integrated environment where programming
+ (in a pure functional subset of Commmon Lisp) and formal verification
+ of programs, with the assistance of a theorem prover, are possible. Our
+ imlementation is written in a real programming language and it is
+ directly executable within the ACL2 system or any compliant Common Lisp
+ system. We provide here snippets o real verified code, discuss the
+ formalization details in depth, and present quantitative data about
+ the proof effort.",
+ paper = "Bulo10.pdf"
+}
+
+@misc{Byrd17,
+ author = "Byrd, William",
+ title = "The Most Beautiful Program Ever Written",
+ link = "\url{https://www.youtube.com/watch?v=OyfBQmvr2Hc}",
+ comment = "See miniKanren and Barliman (program synthesis with proof)"
+}
+
+@misc{Cast16,
+ author = "Casteran, Pierre and Sozeau, Mattieu",
+ title = "A Gentle Introduction to Type Classses and Relations in Coq",
+ year = "2016",
+ link = "\url{http://www.labri.fr/perso/casteran/CoqArt/TypeClassesTut/typeclassestut.pdf}",
+ paper = "Cast16.pdf"
+}
+
+@misc{Care11a,
+ author = "Carette, Jacques and Farmer, William M. and Jeremic, Filip and
+ Maccio, Vincent and O'Connor, Russell and Tran, Quang M.",
+ title = "The MathScheme Library: Some Preliminary Experiments",
+ year = "2011",
+ link = "\url{https://arxiv.org/pdf/1106.1862.pdf}",
+ abstract =
+ "We present some of the experiments we have performed to best test our
+ design for a library for MathScheme, the mechanized mathematics
+ software system we are building. We wish for our library design to use
+ and reflect, as much as possible, the mathematical structure present
+ in the objects which populate the library.",
+ paper = "Care11a.pdf",
+ keywords = "axiomref"
+}
+
+@article{Chli10,
+ author = "Chlipala, Adam",
+ title = "An Introduction to Programming and Proving with Dependent Types
+ in Coq",
+ journal = "Journal of Formalized Reasoning",
+ volume = "3",
+ number = "2",
+ pages = "1-93",
+ year = "2010",
+ paper = "Chli10.pdf"
+}
+
+@misc{Chli14,
+ author = "Chlipala, Adam and Braibant, Thomas and Cuellar, Santiago and
+ Delaware, Benjamin and Gross, Jason and Malecha, Gregory and
+ Clement,-Claudel, Pit and Wang, Peng",
+ title = "Bedrock: A Software Development Ecosystem Inside a Proof Assistant",
+ year = "2014",
+ link = "\url{https://www.youtube.com/watch?v=BSyrp-iYBMo}",
+ abstract =
+ "The benefits of formal correctness proofs for software are clear
+ intuitively, but the high human costs of proof construction have
+ generally been viewed as prohibitive. To support that integration, we
+ need to rethink the familiar programming toolchains. The new world
+ needn't be all about doing prodigious extra work to achieve the virtue
+ of correct programs; formal methods also suggest new programming
+ approaches that better support abstraction and modularity than do
+ coarser-grained specification styles like normal static types. This
+ talk overviews Bedrock, a framework for certified programming inside
+ of the Coq proof assistant. Bedrock programs are implemented,
+ specified, verified, and compiled inside of Coq. A single program may
+ be divided into modules with formal interfaces, written in different
+ programming languages and verified with different proof styles. The
+ common foundation is an assembly language with an operational
+ semantics (serving as the trusted code base) and a semantic module
+ system (orchestrating linking of code and proofs across source
+ languages). A few different programming styles have been connects to
+ the shared foundation, including a C-like language with an ``array of
+ bytes'' memory model, higher-level more C++-like languages with ``array
+ of abstract data types'' memory models, a domain-specific language for
+ XML processing, standard Coq functional programs, and even declarative
+ specifications that are refined automatically into assembly code with
+ correctness proofs. The talk will present Bedrock's shared foundation
+ and sketch the pieces that go into refining declarative specifications
+ into closed assembly programs, covering joint work with Thomas
+ Braibant, Santiago Cuellar, Benjamin Delaware, Jason Gross, Gregory
+ Malecha, Clement Pit-Claudel, and Peng Wang."
+
+}
+
+@book{Chli15,
+ author = "Chlipala, Adam",
+ title = "Certified Programming with Dependent Types",
+ year = "2015",
+ link = "\url{http://adam.chlipala.net/cpdt/cpdt.pdf}",
+ publisher = "MIT Press",
+ isbn = "9780262026659",
+ paper = "Chli15.pdf"
+}
+
+@misc{Clar91,
+ author = "Clarke, Edmund and Zhao, Xudong",
+ title = "Analytica -- A Theorem Prover in Mathematica",
+ year = "1991",
+ link = "\url{http://www.cs.cmu.edu/~emc/papers/Conference%20Papers/Analytica%20A%20Theorem%20Prover%20in%20Mathematica.pdf}",
+ paper = "Clar91.pdf"
+}
+
+@article{Como88,
+ author = "Comon, H. and Lugiez, D. and Schnoebelen, P.H.",
+ title = "A Rewrite-based Type Discipline for a Subset of Computer Algebra",
+ journal = "J. Symbolic Computation",
+ year = "1991",
+ volume = "11",
+ pages = "349-368",
+ abstract =
+ "This paper is concerned with the type structure of a system including
+ polymorphism, type properties, and subtypes. This type system
+ originates from computer algebra but it is not intended to be the
+ solution of all type problems in this area.
+
+ Types (or sets of types) are denoted by terms in some order-sorted
+ algebra. We consider a rewrite relation in this algebra, which is
+ intended to express subtyping. The relations between the semantics and
+ the axiomatization are investigated. It is shown that the problem of
+ type inference is undecidable but that a narrowing strategy for
+ semi-decision procedures is described and studied.",
+ paper = "Como88.pdf",
+ keywords = "axiomref"
+}
+
+@techreport{Coqu86,
+ author = {Coquand, Thierry and Huet, G\'erard},
+ title = "The Calculus of Constructions",
+ year = "1986",
+ institution = "INRIA Centre de Rocquencourt",
+ number = "530",
+ link = "\url{https://hal.inria.fr/inria-00076024/document}",
+ abstract =
+ "The Calculus of Constructions is a higher-order formalism for
+ constructive proofs in natural deduction style. Every proof is a
+ $\lambda$-expression, typed with propositions of the underlying
+ logic. By removing types we get a pure $\lambda$-expression,
+ expressing its associated algorithm. Computing this
+ $\lambda$-expression corresponds roughly to cut-elimination. It is our
+ thesis that (as already advocated by Martin-Lof) the Curry-Howard
+ correspondence between propositions and types is a powerful paradigm
+ for Computer Science. In the case of Constructions, we obtain the
+ notion of a very high-level functional programming language, with
+ complex polymorphism well-suited for modules specification. The notion
+ of type encompasses the usual notioin of data type, but allows as well
+ arbitrarily complex algorithmic specifications. We develop the basic
+ theory of a Calculus of Constructions, and prove a strong
+ normalization theorem showing that all computations terminate.
+ Finally, we suggest various extensions to stronger calculi.",
+ paper = "Coqu86.pdf"
+}
+
+@article{Coqu96,
+ author = "Coquand, Thierry and Dybjer, Peter",
+ title = "Intuitionistic Model Constructions and Normalization Proofs",
+ journal = "Mathematical Structures in Computer Science",
+ volume = "7",
+ pages = "75-94",
+ year = "1996",
+ link = "\url{http://www.cse.chalmers.se/~peterd/papers/Glueing.ps}",
+ abstract =
+ "The traditional notions of {\sl strong} and {\sl weak normalization}
+ refer to properties of a binary {\sl reduction relation}. In this
+ paper we explore an alternative approach to normalization, where we
+ bypass the reduction relation and instead focus on the
+ {\sl normalization function}, that is, the function which maps a term into
+ its normal form.
+
+ Such a normalization function can be constructed by building an
+ appropriate model and a function ``quote'' which inverts the
+ interpretation function. The normalization function is then obtained
+ by composing the quote function with the interpretation function. We
+ also discuss how to gt a simple proof of the property that
+ constructors are one-to-one, which usually is obtained as a corollary
+ of Church-Rosser and normalization in the traditional sense.
+
+ We illustrate this approach by showing how a glueing model (closely
+ related to the glueing construction used in category theory) gives
+ rise to a normalization algorithm for a combinatory formlation of
+ Godel System T. We then show how the method extends in a
+ straightforward way when we add cartesian products and disjoint unions
+ (full intuitionistic propositional logic under a Curry-Howard
+ interpretation) and transfinite inductive types such as the Brouwer
+ ordinals.",
+ paper = "Coqu96.pdf"
+}
+
+@misc{Coqu16,
+ author = {Coquand, Thierry and Huet, G\'erard and Paulin, Christine},
+ title = "The COQ Proof Assistant",
+ year = "2016",
+ link = "\url{https://coq.inria.fr}"
+}
+
+@misc{COQR16,
+ author = {Coquand, Thierry and Huet, G\'erard and Paulin, Christine},
+ title = "The COQ Proof Assistant Reference Manual",
+ year = "2016",
+ link="\url{https://coq.inria.fr/distrib/current/files/Reference-Manual.pdf}",
+ paper = "COQR16.pdf"
+}
+
+@misc{Coqu16a,
+ author = {Coquand, Thierry and Huet, G\'erard and Paulin, Christine},
+ title = "COQ Proof Assistant Library Coq.ZArith.Znumtheory",
+ year = "2016",
+ link = "\url{https://coq.inria.fr/library/Coq.ZArith.Znumtheory.html}"
+}
+
+@misc{COQnat,
+ author = "COQ Proof Assistant",
+ title = "{Library} {Coq}.{Init}.{Nat}",
+ link = "\url{https://coq.inria.fr/library/Coq.Init.Nat.html}",
+ abstract = "Peano natural numbers, defintions of operations",
+ year = "2017"
+}
+
+@misc{Cons98,
+ author = "Constable, Robert L. and Jackson, Paul B.",
+ title = "Towards Integrated Systems for Symbolic Algebra and Formal
+ Constructive Mathematics",
+ link = "\url{http://www.nuprl.org/documents/Constable/towardsintegrated.pdf}",
+ year = "1998",
+ abstract =
+ "The purpose of this paper is to report on our efforts to give a
+ formal account of some of the algebra used in Computer Algebra Systems
+ (CAS). In particular, we look at the concepts used in the so called
+ 3rd generation algebra systems, such as Axiom[4] and Weyl[9]. It is
+ our claim that the Nuprl proof development system is especially well
+ suited to support this kind of mathematics.",
+ paper = "Cons98.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Dani06,
+ author = "Danielsson, Nils Anders and Hughes, John and Jansson, Patrik and
+ Gibbons, Jeremy",
+ title = "Fast and Loose Reasoning is Morally Correct",
+ booktitle = "Proc. of ACM POPL '06",
+ series = "POPL '06",
+ year = "2006",
+ location = "Charleston, South Carolina",
+ abstract =
+ "Functional programmers often reason about programs as if they were
+ written in a total language, expecting the results to carry over to
+ non-toal (partial) languages. We justify such reasoning.
+
+ Two languages are defined, one total and one partial, with identical
+ syntax. The semantics of the partial language includes partial and
+ infinite values, and all types are lifted, including the function
+ spaces. A partial equivalence relation (PER) is then defined, the
+ domain of which is the total subset of the partial language. For types
+ not containing function spaces the PER relates equal values, and
+ functions are related if they map related values to related values.
+
+ It is proved that if two closed terms have the same semantics in the
+ total language, then they have related semantics in the partial
+ language. It is also shown that the PER gives rise to a bicartesian
+ closed category which can be used to reason about values in the domain
+ of the relation.",
+ paper = "Dani06.pdf",
+ keywords = "axiomref"
+}
+
+@article{Dave08,
+ author = "Davenport, James H.",
+ title = "Effective Set Membership in Computer Algebra and Beyond",
+ journal = "LNAI",
+ volume = "5144",
+ pages = "266-269",
+ year = "2008",
+ abstract =
+ "In previous work, we showed the importance of distinguishing ``I know
+ that $X \ne Y$'' from ``I don't know that $X = Y$''. In this paper we
+ look at effective set membership, starting with Groebner bases, where
+ the issues are well-expressed in algebra systems, and going on to
+ integration and other questions of `computer calculus'.
+
+ In particular, we claim that a better recognition of the role of set
+ membership would clarify some features of computer algebra systems,
+ such as `what does an integral mean as output'.",
+ paper = "Dave08.pdf"
+}
+
+@misc{Dave17a,
+ author = "Davenport, James",
+ title = "Computer Algebra and Formal Proof",
+ year = "2017",
+ comment = "BPR presentation, Cambridge, England",
+ video = "Dave17a.mp4",
+ paper = "Dave17a.pdf"
+}
+
+@book{Dijk76,
+ author = "Dijkstra, Edsger",
+ title = "A Discipline of Programming",
+ publisher = "Prentice-Hall",
+ year = "1976",
+ isbn = "0-13-215871-X"
+}
+
+@article{Demi79,
+ author = "DeMilo, Richard A. and Lipton, Richard J. and Perlis, Alan J.",
+ title = "Social Processes and Proofs of Theorems and Programs",
+ journal = "Communications of the ACM",
+ volume = "22",
+ number = "5",
+ year = "1979",
+ pages = "271-280",
+ abstract =
+ "It is argued that formal verifications of programs, no matter how
+ obtained, will not play the same key role in the development of
+ computer science and software engineering as proofs do in mathematics.
+ Furthermore the absence of continuity, the inevitability of change, and
+ the complexity of specification of significantly many real programs
+ make the formal verification process difficult to justify and manage.
+ It is felt that ease of formal verification should not dominate program
+ language design.",
+ paper = "Dimi79.pdf"
+}
+
+@inproceedings{Dybj90,
+ author = "Dybjer, Peter",
+ title = {Inductive Sets and Families in Marin-L\"of's Type Theory and
+ Their Set-Theoretic Semantics},
+ booktitle = "Proc. First Workshop on Logical Frameworks",
+ year = "1990",
+ link =
+ "\url{http://www.cse.chalmers.se/~peterd/papers/Setsem\_Inductive.pdf}",
+ abstract =
+ "{Martin-L\"of}'s type theory is presented in several steps. The kernel
+ is a dependently typed $\lambda$-calculs. Then there are schemata for
+ inductive sets and families of sets and for primitive recursive functions
+ and families of functions. Finally, there are set formers (generic
+ polymorphism) and universes. At each step syntax, inference rules, and
+ set-theoretic sematics are given",
+ paper = "Dybj90.pdf"
+}
+
+@article{Dybj03,
+ author = "Dybjer, Peter and Setzer, Anton",
+ title = "Induction-recursion and initial algebras",
+ journal = "Annals of Pure and Applied Logic",
+ volume = "124",
+ year = "2003",
+ pages = "1-47",
+ abstract =
+ "Induction-recursion is a powerful definition method in intuitionistic
+ type theory. It extends (generalized) inductive definitions and allows us
+ to define all standard sets of Martin-{L\"of} type theory as well as a
+ large collection of commonly occuring inductive data structures. It also
+ includes a variety of universes which are constructive analogues of
+ inaccessibles and other large cardinals below the first Mahlo cardinal.
+ In this article we give a new compact formalization of inductive-recursive
+ definnitions by modeling them as initial algebras in slice categories. We
+ give generic formation, introduction, elimination, and equality rules
+ generalizing the usual rules of type theory. Moreover, we prove that the
+ elimination and equality rules are equivalent to the principle of the
+ existence of initial algebras for certain endofunctors. We also show the
+ equivalence of the current formulation with the formulation of
+ induction-recursion as a reflection principle given in Dybjer and
+ Setzer (Lecture Notes in Comput. Sci. 2183 (2001) 93). Finally we discuss
+ two type-theoretic analogues of Mahlo cardinals in set theory: an external
+ Mahlo universe which is defined by induction-recursion and captured by our
+ formalization, and an internal Mahlo universe, which goes beyond induction-
+ recursion. We show that the external Mahlo universe, and therefore also
+ the theory of inductive-recursive definitions, have proof-theoretical
+ strength of at least Rathjen's theory KPM.",
+ paper = "Dybj03.pdf"
+}
+
+@misc{Dolz97,
+ author = "Dolzmann, Andreas and Sturm, Thomas",
+ title = "Guarded Expressions in Practice",
+ link = "\url{http://redlog.dolzmann.de/papers/pdf/MIP-9702.pdf}",
+ year = "1997",
+ abstract =
+ "Computer algebra systems typically drop some degenerate cases when
+ evaluating expressions, e.g. $x/x$ becomes 1 dropping the case
+ $x=0$. We claim that it is feasible in practice to compute also the
+ degenerate cases yielding {\sl guarded expressions}. We work over real
+ closed fields but our ideas about handling guarded expressions can be
+ easily transferred to other situations. Using formulas as guards
+ provides a powerful tool for heuristically reducing the combinatorial
+ explosion of cases: equivalent, redundant, tautological, and
+ contradictive cases can be detected by simplification and quantifier
+ elimination. Our approach allows to simplify the expressions on the
+ basis of simplification knowledge on the logical side. The method
+ described in this paper is implemented in the REDUCE package GUARDIAN,
+ which is freely available on the WWW.",
+ paper = "Dolz97.pdf"
+}
+
+@inbook{Dosr11,
+ author = "Dos Reis, Gabriel and Matthews, David and Li, Yue",
+ title = "Retargeting OpenAxiom to Poly/ML: Towards an Integrated Proof
+ Assistants and Computer Algebra System Framework",
+ booktitle = "Calculemus",
+ pages = "15-29",
+ year = "2011",
+ publisher = "Springer",
+ isbn = "978-3-642-22673-1",
+ link = "\url{http://paradise.caltech.edu/~yli/paper/oa-polyml.pdf}",
+ abstract = "
+ This paper presents an ongoing effort to integrate the Axiom family of
+ computer algebra systems with Poly/ML-based proof assistants in the
+ same framework. A long term goal is to make a large set of efficient
+ implementations of algebraic algorithms available to popular proof
+ assistants, and also to bring the power of mechanized formal
+ verification to a family of strongly typed computer algebra systems at
+ a modest cost. Our approach is based on retargeting the code generator
+ of the OpenAxiom compiler to the Poly/ML abstract machine.",
+ paper = "Dosr11.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Duns98,
+ author = "Dunstan, Martin and Kelsey, Tom and Linton, Steve and
+ Martin, Ursula",
+ title = "Lightweight Formal Methods For Computer Algebra Systems",
+ publisher = "ACM Press",
+ booktitle = "Proc. ISSAC 1998",
+ year = "1998",
+ location = "Rostock, Germany",
+ pages = "80-87",
+ link = "\url{http://www.cs.st-andrews.ac.uk/~tom/pub/issac98.pdf}",
+ abstract =
+ "Demonstrates the use of formal methods tools to provide a semantics
+ for the type hierarchy of the Axiom computer algebra system, and a
+ methodology for Aldor program analysis and verification. There are
+ examples of abstract specifications of Axiom primitives.",
+ paper = "Duns98.pdf",
+ keywords = "axiomref"
+}
+
+@phdthesis{Duns99a,
+ author = "Dunstan, Martin N.",
+ title = "Larch/Aldor - A Larch BISL for AXIOM and Aldor",
+ school = "University of St. Andrews",
+ year = "1999",
+ abstract = "
+ In this thesis we investigate the use of lightweight formal methods
+ and verification conditions (VCs) to help improve the reliability of
+ components constructed within a computer algebra system. We follow the
+ Larch approach to formal methods and have designed a new behavioural
+ interface specification language (BISL) for use with Aldor: the
+ compiled extension language of Axiom and a fully-featured programming
+ language in its own right. We describe our idea of lightweight formal
+ methods, present a design for a lightweight verification condition
+ generator and review our implementation of a prototype verification
+ condition generator for Larch/Aldor.",
+ paper = "Duns99a.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Duns99,
+ author = "Dunstan, Martin and Kelsey, Tom and Martin, Ursula and
+ Linton, Steve A.",
+ title = "Formal Methods for Extensions to CAS",
+ booktitle = "Proc. of FME'99",
+ series = "FME'99",
+ location = "Toulouse, France",
+ year = "1999",
+ pages = "1758-1777",
+ link = "\url{http://tom.host.cs.st-andrews.ac.uk/pub/fm99.ps}",
+ abstract =
+ "We demonstrate the use of formal methods tools to provide a semantics
+ for the type hierarchy of the AXIOM computer algebra system, and a
+ methodology for Aldor program analysis and verification. We give a
+ case study of abstract specifications of AXIOM primitives, and provide
+ an interface between these abstractions and Aldor code.",
+ paper = "Duns99.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Fate02a,
+ author = "Fateman, Richard J.",
+ title = "Symbolic Execution Merges Construction, Debugging and Proving",
+ link = "\url{http://www.cs.berkeley.edu/~/fateman/papers/symex.pdf}",
+ year = "2002",
+ abstract =
+ "There is naturally an interest in any technology which promises to
+ assist us in producing correct programs. Some efforts attempt to
+ insure correct programs by making their construction simpler. Some
+ efforts are oriented toward increasing the effectiveness of testing to
+ make the programs appear to perform as required. Other efforts are
+ directed to prove the correctness of the resulting program. Symbolic
+ execution, in which symbols instead of numbers are used in what
+ appears to be a numerical program, is an old but to-date still not
+ widely-used technique. It has been available in various forms for
+ decades from the computer algebra community. Symbolic execution has
+ the potential to assist in all these phases: construction, debugging,
+ and proof. We describe how this might work specifically with regard to
+ our own recent experience in the construction of correct linear
+ algebra programs for structured matrices and LU factorization. We show
+ how developing these programs with a computer algebra system, and then
+ converting incrementally to use more efficient forms. Frequent symbolic
+ execution of the algorithms, equivalent to testing over infinite test
+ sets, aids in debugging, while strengthening beliefs that the correctness
+ of results is an algebraic truth rather than an accident.",
+ paper = "Fate02a.pdf"
+}
+
+@inproceedings{Fate03a,
+ author = "Fateman, Richard J.",
+ title = "High-level proofs of mathematical programs using automatic
+ differentiation, simplification, and some common sense",
+ booktitle = "Proc. ISSAC 2003",
+ pages = "88-94",
+ year = "2003",
+ isbn = "1-58113-641-2",
+ abstract =
+ "One problem in applying elementary methods to prove correctness of
+ interesting scientific programs is the large discrepancy in level of
+ discourse between low-level proof methods and the logic of scientific
+ calculation, especially that used in a complex numerical program. The
+ justification of an algorithm typically relies on algebra or analysis,
+ but the correctness of the program requires that the arithmetic
+ expressions are written correctly and that iterations converge to
+ correct values in spite of truncation of infinite processes or series
+ and the commission of numerical roundoff errors. We hope to help
+ bridge this gap by showing how we can, in some cases, state a
+ high-level requirement and by using a computer algebra system (CAS)
+ demonstrate that a program satisfies that requirement. A CAS can
+ contribute program manipulation, partial evaluation, simplification or
+ other algorithmic methods. A novelty here is that we add to the usual
+ list of techniques automatic differentiation, a method already widely
+ used in optimization contexts where algorithms are differentiated. We
+ sketch a proof of a numerical program to compute sine, and display a
+ related approach to a version of a Bessel function algorithm for J0(x)
+ based on a recurrence.",
+ paper = "Fate03a.pdf"
+}
+
+@article{Frad08,
+ author = "Frade, Maria Joao",
+ title = "Calculus of Inductive Construction. Software Formal Verification",
+ year = "2008",
+ link = "\url{http://www4.di.uminho.pt/~jno/mfes/0809/SFV-CIC.pdf}",
+ journal = "MFES",
+ paper = "Frad08.pdf"
+}
+
+@misc{Freg1891,
+ author = "Frege, Gottlob",
+ title = "Function and Concept",
+ year = "1891",
+ link = "\url{http://fitelson.org/proseminar/frege_fac.pdf}",
+ paper = "Frege.pdf"
+}
+
+@book{Frie08,
+ author = "Friedman, Daniel P. and Wand, MItchell",
+ title = "Essentials of Programming Languages",
+ publisher = "MIT Press",
+ year = "2008",
+ isbn = "978-0-262-06279-4"
+}
+
+@misc{Gent35,
+ author = "Gentzen, Gerhard",
+ title = "Investigations into Logical Deduction",
+ year = "1935",
+ pages = "68-131",
+ paper = "Gent35.pdf"
+}
+
+@article{Gent64,
+ author = "Gentzen, Gerhard",
+ title = "Investigations into Logical Deduction",
+ journal = "American Philosophical Quarterly",
+ volume = "1",
+ number = "4",
+ year = "1964",
+ pages = "288-306",
+ paper = "Gent64.pdf"
+}
+
+@article{Gent65,
+ author = "Gentzen, Gerhard",
+ title = "Investigations into Logical Deduction: II",
+ journal = "American Philosophical Quarterly",
+ volume = "2",
+ number = "3",
+ year = "1965",
+ pages = "204-218",
+ paper = "Gent65.pdf"
+}
+
+@misc{Geuv92,
+ author = "Geuvers, Herman",
+ title = "The Calculus of Constructions and Higher Order Logic",
+ year = "1992",
+ link = "\url{http://www.cs.ru.nl/~herman/PUBS/CC\_CHiso.pdf}",
+ abstract =
+ "The Calculus of Constructions (CC) is a typed lambda calculus for
+ higher order intuitionistic logic: proofs of the higher order logic
+ are interpreted as lambda terms and formulas as types. It is also the
+ union of Girard's system $F_omega$, a higher order typed lambda
+ calculus, and a first order dependent typed lambda calculus in the
+ style of de Bruijn's Automath or Martin-Lof's intuitionistic theory
+ of types. Using the impredicative coding of data types in $F_omega$,
+ the Calculus of Constructions thus becomes a higher order language for
+ the typing of functional programs. We shall introduce and try to
+ explain CC by exploiting especially the first point of view, by
+ introducing a typed lambda calculus that faithfully represent higher
+ order predicate logic (so for this system the Curry-Howard
+ 'formuals-as-types isomorphism' is really an isomorphism.) Then we
+ discuss some propositions that are provable in CC but not in the
+ higher order logic, showing that the formulas-as-types embedding of
+ higher order predicate logic into CC is not an isomorphism. It is our
+ intention that this chapter can be read without any specialist
+ knowledge of higher order logic or higher order typed lambda calculi.",
+ paper = "Geuv92.pdf"
+}
+
+@article{Geuv02,
+ author = "Geuvers, Herman and Pollack, Randy and Wiedijk, Freek and
+ Zwanenburg, Jan",
+ title = "A Constructive Algebraic Hierarchy in Coq",
+ year = "2002",
+ journal = "Journal of Symbolic Computation",
+ abstract =
+ "We describe a framework of algebraic structures in the proof assistant
+ Coq. We have developed this framework as part of the FTA project in
+ Nijmegen, in which a constructive proof of the Fundamental Theorem of
+ Algebra has been formalized in Coq.
+
+ The algebraic hierarchy that is described here is both abstract and
+ structured. Structures like groups and rings are port of it in an
+ abstract way, defining e.g. a ring as a tuple consisting of a group, a
+ binary operation and a constant that together satisfy the properties
+ of a ring. In this way, a ring automatically inherits the group
+ properties of the additive subgroup. The algebraic hierarchy is
+ formalized in Coq by applying a combination of labeled record types
+ and coercions. In the labeled record types of Coq, one can use
+ {\sl dependent types}: the type of one label may depend on another
+ label. This allows to give a type to a dependent-typed tuple like
+ $\langle A, f, a \rangle$, where $A$ is a set, $f$ an operation on $A$
+ and $a$ an element of $A$. Coercions are functions that are used
+ implicitly (they are inferred by the type checker) and allow, for
+ example, to use the structure $\mathcal{A} := \langle A, f, a \rangle$
+ as a synonym or the carrier set $A$, as is often done in mathematical
+ practice. Apart rom the inheritance and reuse of properties, the
+ algebraic hierarchy has proven very useful for reusing notations.",
+ paper = "Geuv02.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Gime16,
+ author = "Gimenez, Eduardo and Casteran, Pierre",
+ title = "A Tutorial on [Co-]Inductive Types in Coq",
+ year = "2016",
+ link = "\url{https://coq.inria.fr/distrib/current/files/RecTutorial.pdf}",
+ abstract =
+ "This document is an introduction to the definition and use of
+ inductive and co-inductive types in the {\sl Coq} proof environment.
+ It explains how types like natural numbers and infinite streams are
+ defined in {\sl Coq}, and the kind of proof techniques that can be
+ used to reason about them (case analysis, induction, inversion of
+ predicates, co-induction, etc.) Each technique is illustrated
+ through an executable and self-contained {\sl Coq} script.",
+ paper = "Gime16.pdf"
+}
+
+@misc{Gord96,
+ author = "Gordon, Mike",
+ title = "From LCF to HOL: a short history",
+ year = "1996",
+ link = "\url{http://www.cl.cam.ac.uk/~mjcg/papers/HolHistory.pdf}",
+ paper = "Gord96.pdf"
+}
+
+@article{Gogu06,
+ author = "Goguen, Healfdene and McBride, Conor and McKinna, James",
+ title = "Eliminating Dependent Pattern Matching",
+ year = "2006",
+ journal = "Lecture Notes in Computer Science",
+ volume = "4060",
+ pages = "521-540",
+ link = "\url{http://cs.ru.nl/~james/RESEARCH/goguen2006.pdf}",
+ abstract =
+ "This paper gives a reduction-preserving translation from Coquand's
+ {\sl dependent pattern matching} into a traditional type theory
+ with universes, inductive types and relations and the axiom K. This
+ translation serves as a proof of termination for structurally
+ recursive pattern matching programs, provides an implementable
+ compilation technique in the style of functional programming languages,
+ and demonstrates the equivelence with a more easily understood type
+ theory.",
+ paper = "Gogu06.pdf"
+}
+
+@article{Gott05,
+ author = "Gottliebsen, Hanne and Kelsey, Tom and Martin, Ursula",
+ title = "Hidden verification for computational mathematics",
+ journal = "Journal of Symbolic Computation",
+ volume = "39",
+ number = "5",
+ pages = "539-567",
+ year = "2005",
+ link =
+ "\url{http://www.sciencedirect.com/science/article/pii/S0747717105000295}",
+ abstract =
+ "We present hidden verification as a means to make the power of
+ computational logic available to users of computer algebra systems
+ while shielding them from its complexity. We have implemented in PVS a
+ library of facts about elementary and transcendental function, and
+ automatic procedures to attempt proofs of continuity, convergence and
+ differentiability for functions in this class. These are called
+ directly from Maple by a simple pipe-lined interface. Hence we are
+ able to support the analysis of differential equations in Maple by
+ direct calls to PVS for: result refinement and verification, discharge
+ of verification conditions, harnesses to ensure more reliable
+ differential equation solvers, and verifiable look-up tables.",
+ paper = "Gott05.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Grab17,
+ author = {Grabm\"uller, Martin},
+ title = "Algorithm W",
+ year = "2017",
+ link = "\url{https://github.com/mgrabmueller/AlgorithmW}",
+ abstract =
+ "In this paper we develop a complete implementation of Algorithm W for
+ Hindley-Milner polyhmorphic type inference in Haskell",
+ paper = "Grab17.pdf"
+}
+
+@misc{Gran11,
+ author = "Grant, Ian",
+ title = "The Hindley-Milner Type Inference Algorithm",
+ year = "2011",
+ link = "\url{http://steshaw.org/hm/hindley-milner.pdf}",
+ abstract =
+ "The Hindley-Milner algorithm is described and an implementation in
+ Standard ML is presented.",
+ paper = "Gran11.pdf"
+}
+
+@book{Grie81,
+ author = "Gries, David",
+ title = "The Science of Programming",
+ publisher = "Springer-Verlag",
+ year = "1981",
+ isbn = "0-387-90641-X"
+}
+
+@misc{Hard13,
+ author = "Hardin, David S. and McClurg, Jedidiah R. and Davis, Jennifer A.",
+ title = "Creating Formally Verified Components for Layered Assurance
+ with an LLVM to ACL2 Translator",
+ link = "\url{http://www.jrmcclurg.com/papers/law\_2013\_paper.pdf}",
+ abstract =
+ "This paper describes an effort to create a library of formally
+ verified software component models from code that have been compiled
+ using the Low-Level Virtual Machine (LLVM) intermediate form. The idea
+ is to build a translator from LLVM to the applicative subset of Common
+ Lisp accepted by the ACL2 theorem prover. They perform verification of
+ the component model using ACL2's automated reasoning capabilities.",
+ paper = "Hard13.pdf"
+}
+
+@misc{Hard14,
+ author = "Hardin, David S. and Davis, Jennifer A. and Greve, David A. and
+ McClurg, Jedidiah R.",
+ title = "Development of a Translator from LLVM to ACL2",
+ link = "\url{http://arxiv.org/pdf/1406.1566}",
+ abstract = "
+ In our current work a library of formally verified software components
+ is to be created, and assembled, using the Low-Level Virtual Machine
+ (LLVM) intermediate form, into subsystems whose top-level assurance
+ relies on the assurance of the individual components. We have thus
+ undertaken a project to build a translator from LLVM to the
+ applicative subset of Common Lisp accepted by the ACL2 theorem
+ prover. Our translator produces executable ACL2 formal models,
+ allowing us to both prove theorems about the translated models as well
+ as validate those models by testing. The resulting models can be
+ translated and certified without user intervention, even for code with
+ loops, thanks to the use of the def::ung macro which allows us to
+ defer the question of termination. Initial measurements of concrete
+ execution for translated LLVM functions indicate that performance is
+ nearly 2.4 million LLVM instructions per second on a typical laptop
+ computer. In this paper we overview the translation process and
+ illustrate the translator's capabilities by way of a concrete example,
+ including both a functional correctness theorem as well as a
+ validation test for that example.",
+ paper = "Hard14.pdf"
+}
+
+@misc{Harp13,
+ author = "Harper, Robert",
+ title = "15.819 Homotopy Type Theory Course",
+ link = "\url{http://www.cs.cmu.edu/~rwh/courses/hott}",
+ year = "2013"
+}
+
+@inproceedings{Harr94,
+ author = "Harrison, John and Thery, Laurent",
+ title = "Extending the HOL Thoerem Prover with a Computer Algebra System
+ to Reason about the Reals",
+ booktitle = "Proc. Higher Order Logic Theorem Proving",
+ year = "1994",
+ publisher = "Springer",
+ pages = "174-184",
+ isbn = "978-3-540-48346-5",
+ abstract =
+ "In this paper we describe an environment for reasoning about the
+ reals which combines the rigour of a theorem prover with the power of
+ a computer algebra system.",
+ paper = "Harr94.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Heer02,
+ author = "Heeren, Bastiaan and Hage, Jurriaan and Swierstra, Doaitse",
+ title = "Generalizing Hindley-Milner Type Inference Algorithms",
+ year = "2002",
+ link = "\url{https://pdfs.semanticscholar.org/8983/233b3dff2c5b94efb31235f62bddc22dc899.pdf}",
+ abstract =
+ "Type inferencing according to the standard algorithms $W$ and $M$
+ often yields uninformative error messages. Many times, this is a
+ consequence of a bias inherent in the algorithms. The method
+ developed here is to first collect constraints from the program, and
+ to solve these afterwards, possibly under the influence of a
+ heuristic. We show the soundness and completeness of our algorithm.
+ The algorithms $W$ and $M$ turn out to be deterministic instances of our
+ method, giving the correctness for $W$ and $M$ with respect to the
+ Hindley-Milner typing rules for free. We also show that our algorithm
+ is more flexible, because it naturally allows the generation of
+ multiple messages",
+ paper = "Heer02.pdf"
+}
+
+@article{Hoar69,
+ author = "Hoare, C. A. R.",
+ title = "An Axiomatic Basis for Computer Programming",
+ journal = "CACM",
+ volume = "12",
+ number = "10",
+ pages = "576-580",
+ year = "1969",
+ link = "\url{https://www.cs.cmu.edu/~crary/819-f09/Hoare69.pdf}",
+ abstract =
+ "In this paper an attempt is made to explore the logical foundations
+ of computer programming by use of techniques which were first applied
+ in the study of geometry and have later been extended to other branches
+ of mathematics. This involves the elucidation of sets of axioms and
+ rules of inference which can be used in proofs of the properties of
+ computer programs. Examples are given of such axioms and rules, and
+ a formal proof of a simple theorem is displayed. Finally, it is argued
+ that important advantages, both theoretical and practical, may follow
+ from a pursuance of these topics",
+ paper = "Hoar69.pdf"
+}
+
+@misc{Howa80,
+ author = "Howard, W. A.",
+ title = "The Formulae-as-Types Notion of Construction",
+ link = "\url{http://lecomte.al.free.fr/ressources/PARIS8_LSL/Howard80.pdf}",
+ year = "1980",
+ abstract =
+ "The following consists of notes which were privately circulated in
+ 1969. Since they have been referred to a few times in the literature,
+ it seems worth while to publish them. They have been rearranged for
+ easier reading, and some inessential corrections have been made.
+
+ The ultimate goal was to develop a notion of construction suitable for
+ the interpretation of intuitionistic mathematics. The notion of
+ construction developed in the notes is certainly too crude for that,
+ so the use of the word {\sl construction} is not very appropriate.
+ However, the terminology has been kept in order to preserve the
+ original title and also to preserve the character of the notes. The
+ title has a second defect; namely, the {\sl type} should be regarded
+ as a abstract object whereas a {\sl formula} is the name of a type.",
+ paper = "Howa80.pdf"
+}
+
+@inproceedings{Huet87,
+ author = {Huet, G\'erard},
+ title = "Induction Principles Formalized in the Calculus of Constructions",
+ booktitle = "TAPSOFT 87",
+ publisher = "Springer-Verlag",
+ series ="LNCS 249",
+ year = "1987",
+ pages = "276-286",
+ abstract =
+ "The Calculus of Constructions is a higher-order formalism for writing
+ constructive proofs in a natural deduction style, inspired from work
+ by de Bruijn, Girard, Martin-Lof, and Scott. THe calculus and its
+ syntactic theory were presented in Coquand's thesis, and an
+ implementation by the author was used to mechanically verify a
+ substantial number of proofs demonstrating the power of expression of
+ the formalism. The Calculus of Constructions is proposed as a
+ foundation for the design of programming environments where programs
+ are developed consistently with formal specifications. The current
+ paper shows how to define inductive concepts in the calculus.
+
+ A very general induction schema is obtained by postulating all
+ elements of the type of interest to belong to the standard
+ interpretation associated with a predicate map. This is similar to the
+ treatment of D. Park, but the power of expression of the formallism
+ permits a very direct treatment, in a language that is formalized
+ enough to be actually implemented on a computer. Special instances of
+ the induction schema specialize to Noetherian induction and Structural
+ induction over any algebraic type. Computational Induction is treated
+ in an axiomatization of Domain Theory in Constructions. It is argued
+ that the resulting principle is more powerful than LCF's, since the
+ restriction on admissibility is expressible in the object language.",
+ paper = "Huet87.pdf"
+}
+
+@misc{Heut16,
+ author = {Huet, G\'erard and Kahn, Gilles and Paulin-Mohring, Christine},
+ title = "The COQ Proof Assistant. A Tutorial",
+ year = "2016",
+ link = "\url{https://coq.inria.fr/distrib/current/files/Tutorial.pdf}",
+ paper = "Heut16.pdf"
+}
+
+@article{Jack94,
+ author = "Jackson, Paul",
+ title = "Exploring Abstract Algebra in Constructive Type Theory",
+ year = "1994",
+ abstract =
+ "I describe my implementation of computational abstract algebra in
+ the Nuprl system. I focus on my development of multivariate
+ polynomials. I show how I use Nuprl's expressive type theory to define
+ classes of free abelian monoids and free monoid algebras. These
+ classes are combined to create a class of all implementations of
+ polynomials. I discuss the issues of subtyping and computational
+ content that came up in designing the class definitions. I give
+ examples of relevant theory developments, tactics and proofs. I
+ consider how Nuprl could act as an algebraic 'oracle' for a computer
+ algebra system and the relevance of this work for abstract functional
+ programming.",
+ paper = "Jack94.pdf",
+ keywords = "axiomref"
+}
+
+@phdthesis{Jack95,
+ author = "Jackson, Paul Bernard",
+ title = "Enhancing the NUPRL Proof Development System and Applying it to
+ Computational Abstract Algebra",
+ school = "Cornell University",
+ year = "1995",
+ month = "1",
+ abstract = "
+ This thesis describes substantial enhancements that were made to the
+ software tools in the Nuprl system that are used to interactively
+ guide the production of formal proofs. Over 20,000 lines of code were
+ written for these tools. Also, a corpus of formal mathematics was
+ created that consists of roughly 500 definitions and 1300
+ theorems. Much of this material is of a foundational nature and
+ supports all current work in Nuprl. This thesis concentrates on
+ describing the half of this corpus that is concerned with abstract
+ algebra and that covers topics central to the mathematics of the
+ computations carried out by computer algebra systems.
+
+ The new proof tools include those that solve linear arithmetic
+ problems, those that apply the properties of order relations, those
+ that carry out inductive proof to support recursive definitions, and
+ those that do sophisticated rewriting. The rewrite tools allow
+ rewriting with relations of differing strengths and take care of
+ selecting and applying appropriate congruence lemmas automatically.
+ The rewrite relations can be order relations as well as equivalence
+ relations. If they are order relations, appropriate monotonicity
+ lemmas are selected.
+
+ These proof tools were heavily used throughout the work on
+ computational algebra. Many examples are given that illustrate their
+ operation and demonstrate their effectiveness.
+
+ The foundation for algebra introduced classes of monoids, groups, ring
+ and modules, and included theories of order relations and
+ permutations. Work on finite sets and multisets illustrates how a
+ quotienting operation hides details of datatypes when reasoning about
+ functional programs. Theories of summation operators were developed
+ that drew indices from integer ranges, lists and multisets, and that
+ summed over all the classes mentioned above. Elementary factorization
+ theory was developed that characterized when cancellation monoids are
+ factorial. An abstract data type for the operations of multivariate
+ polynomial arithmetic was defined and the correctness of an
+ implementation of these operations was verified. The implementation is
+ similar to those found in current computer algebra systems.
+
+ This work was all done in Nuprl's constructive type theory. The thesis
+ discusses the appropriateness of this foundation, and the extent to
+ which the work relied on it.",
+ paper = "Jack95.pdf",
+ keyword = "axiomref"
+}
+
+@misc{Juds16,
+ author = "Judson, Thomas W.",
+ title = "Abstract Algebra: Theory and Applications",
+ link = "\url{http://abstract.ups.edu/download/aata-20160809-sage-7.3.pdf}",
+ year = "2016"
+}
+
+@inproceedings{Kali07,
+ author = "Kaliszyk, Cezary and Wiedijk, Freek",
+ title = "Certified Computer Algebra on Top of an Interactive Theorem
+ Prover",
+ booktitle = "Toward Mecanized Mathematical Assistants",
+ pages = "94-105",
+ year = "2007",
+ abstract =
+ "We present a prototype of a computer algebra system that is built on
+ top of a proof assistant, HOL Light. This architecture guarantees that
+ one can be certain that the system will make no mistakes. All
+ expressions in the system will have precise semantics, and the proof
+ assistant will check the correctness of all simplifications according
+ to this semantics. The system actually proves each simplification
+ performed by the computer algebra system.
+
+ Although our system is built on top of a proof assistant, we designed
+ the user interface to be very close in spirit to the interface of
+ systems like Maple and Mathematica. The system, therefore, allows the
+ user to easily probe the underlying automation of the proof assistant
+ for strengths and weaknesses with respect to the automation of
+ mainstream computer algebra systems. The system that we present is a
+ prototype, but can be straightforwardly scaled up to a practical
+ computer algebra system.",
+ paper = "Kali07.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Kauf98,
+ author = "Kaufmann, Matt and Moore, J Strother",
+ title = "A Precise Description of the ACL2 Logic",
+ year = "1998",
+ link = "\url{www.cs.utexas.edu/users/moore/publications/km97a.pdf}",
+ abstract = "The ACL2 logic is a first-order, essentially quantifier-free
+ logic of total recursive functions providing mathematical induction
+ and several extension principles, including symbol package definition
+ and recursive function definition. In this document we describe the
+ logic more precisely.",
+ paper = "km97a.pdf"
+}
+
+@misc{Kenn13,
+ author = "Kennedy, Andrew and Benton, Nick and Jensen, Jonas B. and
+ Dagand, Pierre-Evariste",
+ title = "Coq: The world's best macro assembler?",
+ year = "2013",
+ link = "\url{http://research.microsoft.com/en-us/um/people/nick/coqasm.pdf}",
+ abstract =
+ "We describe a Coq formalization of a subset of the x86
+ architecture. One emphasis of the model is brevity: using dependent
+ types, type classes and notation we give the x86 semantics a makeover
+ that counters its reputation for baroqueness. We model bits, bytes,
+ and memory concretely using functions that can be computed inside Coq
+ itself; concrete representations are mapped across to mathematical
+ objects in the SSREFLECT library (naturals, and integers modulo 2n)
+ to prove theorems. Finally, we use notation to support conventional
+ assembly code syntax inside Coq, including lexically-scoped
+ labels. Ordinary Coq definitions serve as a powerful ``macro'' feature
+ for everything from simple conditionals and loops to stack-allocated
+ local variables and procedures with parameters. Assembly code can be
+ assembled within Coq, producing a sequence of hex bytes. The assembler
+ enjoys a correctness theorem relating machine code in memory to a
+ separation-logic formula suitable for program verification.",
+ paper = "Kenn13.pdf"
+}
+
+@article{Kerb96,
+ author = "Kerber, Manfred and Kohlhase, Michael and Sorge, Volker",
+ title = "Integrating Computer Algebra with Proof Planning",
+ journal = "Lecture Notes in Computer Science",
+ volume = "1128",
+ pages = "204-215",
+ year = "1996",
+ abstract =
+ "Mechanised reasoning systems and computer algebra systems have
+ apparently different objectives. Their integration is, however,
+ highly desirable, since in many formal proofs both of the two
+ different tasks, proving and calculating, have to be performed. In
+ the context of producing reliable proofs, the question how to ensure
+ correctness when integrating a computer algebra system into a
+ mechanised reasoning system is crucial. In this contribution, we
+ discuss the correctness problems that arise from such an integration
+ and advocate an approach in which the calculations of the computer
+ algebra system are checked at the calculus level of the mechanised
+ reasoning system. We present an implementation which achieves this
+ by adding a verbose mode to the computer algebra system which produces
+ high-level protocol information that can be processed by an interface
+ to derive proof plans. Such a proof plan in turn can be expanded to
+ proofs at different levels of abstraction, so the approach is
+ well-suited for producing a high-level verbalised explication as well
+ as for a low-level (machine checkable) calculus-level proof.",
+ paper = "Kerb96.pdf",
+ keywords = "axiomref"
+}
+
+@article{Kome11,
+ author = "Komendantsky, Vladimir and Konovalov, Alexander and Linton, Steve",
+ title = "View of Computer Algebra Data from Coq",
+ journal = "Lecture Notes in Computer Science",
+ volume = "6824",
+ pages = "74-80",
+ year = "2011",
+ publisher = "Springer-Verlag",
+ abstract =
+ "Data representation is an important aspect of software composition.
+ It is often the case that different software components are
+ programmed to represent data in the ways which are the most
+ appropriate for their problem domains. Sometimes, converting data from
+ one representation to another is a non-trivial task. This is the
+ case with computer algebra systems and type-theory based interactive
+ theorem provers such as Coq. We provide some custom instrumentation
+ inside Coq to support a computer algebra system (CAS) communication
+ protocol known as SC- SCP. We describe general aspects of viewing
+ OpenMath terms produced by a CAS in the calculus of Coq, as well as
+ viewing pure Coq terms in a simpler type system that is behind OpenMath.",
+ paper = "Kome11.pdf"
+}
+
+@phdthesis{Kreb15,
+ author = "Krebbers, Robbert Jan",
+ title = "The C standard formalized in Coq",
+ school = "Radboud University",
+ year = "2015",
+ file = "Kreb15.pdf",
+ link = "\url{http://robbertkrebbers.nl/thesis.html}"
+}
+
+@misc{Kreb15a,
+ author = "Krebbers, Robbert and Wiedijk, Freek",
+ title = "A Typed C11 Semantics for Interactive Theorem Proving",
+ year = "2015",
+ link = "\url{http://robbertkrebbers.nl/research/articles/interpreter.pdf}",
+ abstract =
+ "We present a semantics of a significant fragment of the C programming
+ language as described by the C11 standard. It consists of a small step
+ semantics of a core language, which uses a structured memory model to
+ capture subtleties of C11, such as strict-aliasing restrictions
+ related to unions, that have not yet been addressed by others. The
+ semantics of actual C programs is defined by translation into this
+ core language. We have an explicit type system for the core language,
+ and prove type preservation and progress, as well as type correctness
+ of the translation.
+
+ Due to unspecified order of evaluation, our operational semantics is
+ non-deterministic. To explore all defined and undefined behaviors, we
+ present an executable semantics that computes a stream of finite sets
+ of reachable states. It is proved sound and complete with respect to
+ the operational semantics.
+
+ Both the translation into the core language and the executable
+ semantics are defined as Coq programs. Extraction to OCaml is used to
+ obtain a C interpreter to run and test the semantics on actual C
+ programs. All proofs are fully formalized in Coq.",
+ paper = "Kreb15a.pdf"
+}
+
+@misc{Kreb17,
+ author = "Krebbers, Robbert Jan",
+ title = {The CH$_2$O formalization of ISO C11},
+ year = "2017",
+ link = "\url{http://robbertkrebbers.nl/research/ch2o/}"
+}
+
+@article{Kupe14,
+ author = "Kuper, Jan and Wester, Rinse",
+ title = "N Queens on an FPGA: Mathematics, Programming, or Both?",
+ year = "2014",
+ link = "\url{http://doc.utwente.nl/94663/1/NQueensOnFPGA.pdf}",
+ publisher = "Open Channel Publishing Ltd",
+ journal = "Communicating Process Architectures 2014",
+ abstract =
+ "This paper presents a design methodology for deriving an FPGA
+ implementation directly from a mathematical specification, thus
+ avoiding the switch in semantic perspective as is present in widely
+ applied methods which include an imperative implementation as an
+ intermediate step.
+
+ The first step in the method presented in this paper is to transform a
+ mathematical specification into a Haskell program. The next step is to
+ make repetition structures explicit by higher order functions, and
+ after that rewrite the specification in the form of a Mealy
+ Machine. Finally, adaptations have to be made in order to comply to
+ the fixed nature of hardware. The result is then given to
+ C$\lambda$aSH, a compiler which generates synthesizable VHDL from the
+ resulting Haskell code. An advantage of the approach presented here is
+ that in all phases of the process the design can be directly simulated
+ by executing the defining code in a standard Haskell environment.
+
+ To illustrate the design process, the N queens problem is chosen as a
+ running example."
+}
+
+@article{Kell12,
+ author = "Keller, Chantal and Lasson, Marc",
+ title = "The Refined Calculus of Inductive Construction: Parametricity and
+ Abstraction",
+ journal = "arXiv",
+ year = "2012",
+ link = "\url{http://arxiv.org/pdf/1211.6341v1.pdf}",
+ abstract =
+ "We present a refinement of the Calculus of Inductive Constructions in
+ which one can easily define a notion of relational parametricity. It
+ provides a new way to automate proofs in an interactive theorem prover
+ like Coq.",
+ paper = "Kell12.pdf"
+}
+
+@book{Lamp02,
+ author = "Lamport, Leslie",
+ title = "Specifying Systems",
+ year = "2002",
+ link = "\url{http://research.microsoft.com/en-us/um/people/lamport/tla/book-02-08-08.pdf}",
+ publisher = "Addison-Wesley",
+ isbn = "0-321-14306-X",
+ paper = "Lamp02.pdf"
+}
+
+@misc{Lamp13,
+ author = "Lamport, Leslie",
+ title = "Errata to Specifying Systems",
+ year = "2013",
+ link = "\url{http://research.microsoft.com/en-us/um/people/lamport/tla/errata-1.pdf}",
+ publisher = "Microsoft",
+ abstract = "
+ These are all the errors and omissions to the first printing (July
+ 2002) of the book {\sl Specifying Systems} reported as of 29 October
+ 2013. Positions in the book are indicated by page and line number,
+ where the top line of a page is number 1 and the bottom line is number
+ $-1$. A running head and a page number are not considered to be lines,
+ but all other lines are. Please report any additional errors to the
+ author, whose email address is posted on {\tt http://lamport.org}. The
+ first person to report an error will be acknowledged in any revised
+ edition.",
+ paper = "Lamp13.pdf"
+}
+
+@misc{Lamp14,
+ author = "Lamport, Leslie",
+ title = "How to Write a $21^{st}$ Century Proof",
+ year = "2014",
+ link = "\url{http://research.microsoft.com/en-us/um/people/lamport/pubs/paper.pdf}",
+ publisher = "Microsoft",
+ abstract = "
+ A method of writing proofs is described that makes it harder to prove
+ things that are not true. The method, based on hierarchical
+ structuring, is simple and practical. The author's twenty years of
+ experience writing such proofs is discussed.",
+ paper = "Lamp14.pdf"
+}
+
+@misc{Lamp14a,
+ author = "Lamport, Leslie",
+ title = "Talk: How to Write a $21^{st}$ Century Proof",
+ year = "2014",
+ link = "\url{http://hits.mediasite.com/mediasite/Play/29d825439b3c49f088d35555426fbdf81d}",
+ comment = "2nd Heidelberg Laureate Forum Lecture Tuesday Sep 23, 2014"
+}
+
+@misc{Lamp16,
+ author = "Lamport, Leslie",
+ title = "TLA+ Proof System",
+ year = "2016",
+ link = "\url{https://tla.msr-inria.inria.fr/tlaps/content/Documentation/Tutorial/The_example.html}",
+ abstract = "Demonstration of Euclid Algorithm Proof in TLA+"
+}
+
+@article{Mahb06,
+ author = "Mahboubi, Assia",
+ title = "Proving Formally the Implementation of an Efficient gcd
+ Algorithm for Polynomials",
+ journal = "Lecture Notes in Computer Science",
+ volume = "4130",
+ year = "2006",
+ pages = "438-452",
+ abstract = "
+ We describe here a formal proof in the Coq system of the structure
+ theorem for subresultants which allows to prove formally the
+ correctness of our implementation of the subresultants algorithm.
+ Up to our knowledge it is the first mechanized proof of this result.",
+ paper = "Mahb06.pdf"
+}
+
+@book{Mahb16,
+ author = "Mahboubi, Assia and Tassi, Enrico and Bertot, Yves and
+ Gonthier, Georges",
+ title = "Mathematical Components",
+ year = "2016",
+ publisher = "math-comp.github.io/mcb",
+ link = "\url{https://math-comp.github.io/mcb/book.pdf}",
+ abstract =
+ "{\sl Mathematical Components} is the name of a library of formalized
+ mathematic for the COQ system. It covers a veriety of topics, from the
+ theory of basic data structures (e.g. numbers, lists, finite sets) to
+ advanced results in various flavors of algebra. This library
+ constitutes the infrastructure for the machine-checked proofs of the
+ Four Color Theorem and the Odd Order Theorem.
+
+ The reason of existence of this books is to break down the barriers to
+ entry. While there are several books around covering the usage of the
+ COQ system and the theory it is based on, the Mathematical Components
+ library is build in an unconventional way. As a consequence, this book
+ provides a non-standard presentation of COQ, putting upfront the
+ formalization choices and the proof style that are the pillars of the
+ library.
+
+ This book targets two classes of public. On one hand, newcomers, even
+ the more mathematically inclined ones, find a soft introduction to the
+ programming language of COQ, Gallina, and the Ssreflect proof
+ language. On the other hand accustomed COQ users find a substantial
+ accound of the formalization style that made the Mathematical
+ Components library possible.
+
+ By no means does this book pretend to be a complete description of COQ
+ or Ssreflect: both tools already come with a comprehensive user
+ manual. In the course of the book, the reader is nevertheless invited
+ to experiment with a large library of formalized concepts and she is
+ given as soon as possible sufficient tools to prove non-trivial
+ mathematical results by reusing parts of the library. By the end of
+ the first part, the reader has learnt how to prove formally the
+ infinitude of prime numbers, or the correctnes of the Euclidean's
+ division algorithm, in a few lines of proof text.",
+ paper = "Mahb16.pdf"
+}
+
+@misc{Mart80,
+ author = {Martin-L\"of, Per},
+ title = "Intuitionistic Type Theory",
+ link = "\url{http://archive-pml.github.io/martin-lof/pdfs/Bibliopolis-Book-retypeset-1984.pdf}",
+ year = "1980",
+ paper = "Mart80.pdf"
+}
+
+@inproceedings{Mart85,
+ author = {Martin-L\"of, Per},
+ title = "Costructive Mathematics and Computer Programming",
+ booktitle = "Proc Royal Soc. of London on Math. Logic and Programming Lang.",
+ link = "\url{http://www.cs.tufts.edu/~nr/cs257/archive/per-martin-lof/constructive-math.pdf}",
+ year = "1985",
+ isbn = "0-13-561465-1",
+ pages = "168-184",
+ publisher = "Prentice-Hall",
+ paper = "Mart85.pdf"
+}
+
+@article{Mart96,
+ author = {Martin-L\"of, Per},
+ title = "On the Meaning of the Logical Constants and the Justifications
+ of the Logical Laws",
+ year = "1996",
+ journal = "Nordic Journal of Philosophical Logic",
+ volume = "1",
+ number = "1",
+ pages = "11-60",
+ abstract =
+ "The following three lectures were given in the form of a short course
+ at the meeting Teoria della Dimonstrazione e Filosofia della Logica,
+ organized in Siena, 6-9 April 1983, by the Scuola di Specializzazione
+ in Logica Matematica of the Universit\`a degli Studi di Siena. I am
+ very grateful to Giovanni Sambin and Aldo Ursini of that school, not
+ only for recording the lectures on tape, but, above all, for
+ transcribing the tapes produced by the recorder: no machine could have
+ done that work. This written version of the lectures is based on their
+ transcription. The changes that I have been forced to make have mostly
+ been of a stylistic nature, except at one point. In the second
+ lecture, as I actually gave it, the order of conceptual priority
+ between the notions of proof and immediate inference was wrong. Since
+ I discovered my mistake later the same month as the meeting was held,
+ I thought it better to let the written text diverge from the oral
+ presentation rather than possibly confusing others by letting the
+ mistake remain. The oral origin of these lectures is the source of the
+ many redundancies of the written text. It is also my sole excuse for
+ the lack of detailed references.",
+ paper = "Mart96.pdf"
+}
+
+@misc{Mart97,
+ author = "Martin, Ursula and Shand, D",
+ title = "Investigating some Embedded Verification Techniques for
+ Computer Algebra Systems",
+ link =
+ "\url{http://www.risc.jku.at/conferences/Theorema/papers/shand.ps.gz}",
+ abstract = "
+ This paper reports some preliminary ideas on a collaborative project
+ between St. Andrews University in the UK and NAG Ltd. The project aims
+ to use embedded verification techniques to improve the reliability and
+ mathematical soundness of computer algebra systems. We give some
+ history of attempts to integrate computer algebra systems and
+ automated theorem provers and discuss possible advantages and
+ disadvantages of these approaches. We also discuss some possible case
+ studies.",
+ paper = "Mart97.ps"
+}
+
+@book{Maso86,
+ author = "Mason, Ian A.",
+ title = "The Semantics of Destructive Lisp",
+ publisher = "Center for the Study of Language and Information",
+ year = "1986",
+ isbn = "0-937073-06-7",
+ abstract = "
+ Our basic premise is that the ability to construct and modify programs
+ will not improve without a new and comprehensive look at the entire
+ programming process. Past theoretical research, say, in the logic of
+ programs, has tended to focus on methods for reasoning about
+ individual programs; little has been done, it seems to us, to develop
+ a sound understanding of the process of programming -- the process by
+ which programs evolve in concept and in practice. At present, we lack
+ the means to describe the techniques of program construction and
+ improvement in ways that properly link verification, documentation and
+ adaptability."
+}
+
+@article{Mcbr06,
+ author = "McBride, Conor and Goguen, Healfdene and McKinna, James",
+ title = "A Few Constructions on Constructors",
+ journal = "Lecture Notes in Computer Science",
+ volume = "3839",
+ pages = "186-200",
+ year = "2006",
+ link = "\url{http://www.strictlypositive.org/concon.ps.gz}",
+ abstract =
+ "We present four constructions for standard equipment which can be
+ generated for every inductive datatype: case analysis, structural
+ recursion, no confusion, acyclicity. Our constructions follow a
+ two-level approach -- they require less work than the standard
+ techniques which inspired them. Moreover, given a suitably
+ heterogeneous notion of equality, they extend without difficulty to
+ inductive families of datatypes. These constructions are vital
+ components of the translation from dependently typed programs in
+ pattern matching style to the equivalent programs expressed in terms
+ of induction principles and as such play a crucial behind-the-scenes
+ role in Epigram.",
+ paper = "Mcbr06.pdf"
+}
+
+@article{Mcca78,
+ author = "McCarthy, John",
+ title = "A Micro-Manual for Lisp -- Not The Whole Truth",
+ journal = "ACM SIGPLAN Notices",
+ volume = "13",
+ number = "8",
+ year = "1978",
+ paper = "Mcca78.pdf"
+}
+
+@article{Medi04,
+ author = "Medina-Bulo, Inmaculada and Lozano-Palomo, F. and
+ Alonso-Jimenez, J.A. and Ruiz-Reina, J.L.",
+ title = "Verified Computer Algebra in ACL2",
+ journal = "LNAI",
+ volume = "3249",
+ year = "2004",
+ pages = "171-184",
+ abstract =
+ "In this paper, we present the formal verification of a Common Lisp
+ implementation of Buchberger’s algorithm for computing Gröbner bases
+ of polynomial ideals. This work is carried out in the Acl2 system and
+ shows how verified Computer Algebra can be achieved in an executable
+ logic.",
+ paper = "Medi04.pdf"
+}
+
+@article{Melq12,
+ author = "Melquiond, Guillaume",
+ title = "Floating-point arithmetic in the Coq system",
+ journal = "Information and Computation",
+ volume = "216",
+ pages = "14-23",
+ year = "2012",
+ link = "\url{https://www.lri.fr/~melquion/doc/08-mc8-article.pdf}",
+ abstract =
+ "The process of proving some mathematical theorems can be greatly
+ reduced by relying on numerically-intensive computations with a
+ certified arithmetic. This article presents a formalization of
+ floating-point arithmetic that makes it possible to efficiently
+ compute inside the proofs of the Coq system. This certified library is
+ a multi-radix and multi-precision implementation free from underflow
+ and overflow. It provides the basic arithmetic operators and a few
+ elementary functions.",
+ paper = "Melq12.pdf"
+}
+
+@inproceedings{Mesh01,
+ author = "Meshveliani, Sergei D.",
+ title = "Computer Algebra with Haskell: Applying
+ Functional-Categorical-`Lazy' Programming",
+ booktitle = "Computer Algebra and its Application to Physics",
+ year = "2001",
+ pages = "203-211",
+ link =
+ "\url{compalg.jinr.ru/Confs/CAAP\_2001/Final/proceedings/proceed.pdf}",
+ abstract =
+ "We give an outline of a computer algebra program writting in a
+ functional language Haskell and implementing certain piece of
+ commutative algebra",
+ paper = "Mesh01.pdf",
+ keywords = "axiomref"
+
+}
+
+@misc{Mesh10,
+ author = "Meshveliani, Sergei D.",
+ title = "Haskell and computer algebra",
+ link = "\url{http://www.botik.ru/pub/local/Mechveliani/basAlgPropos/haskellinCA2.pdf.zip}",
+ year = "2010",
+ abstract =
+ "We consider the ways to program mathematics in the Haskell language.
+ To start a discussion, we pretend to propose certain basic algebra
+ library BAL for Haskell. We also mention several desirable language
+ features. Algebraic additions in BAL are divided into the 'ordinary'
+ and 'advanced'. Standard algebraic classes are reorganized to make
+ them mathematically meaningful. For the 'advanced' part a sample
+ argument approach is introduced -- as certain alternative for the
+ dependent type language extension. The library is implemented in the
+ existing Haskell, by 'hiding' a certain part of the existing Prelude.",
+ paper = "Mesh10.pdf",
+ keywords = "axiomref"
+}
+
+@article{Mesh14,
+ author = "Meshveliani, Sergei D.",
+ title = "On dependent types and intuitionism in programming mathematics",
+ journal = "Program systems: theory and applications",
+ year = "2014",
+ volume = "5",
+ numbrer = "3(21)",
+ pages = "27-50",
+ comment = "(In Russian)",
+ link = "\url{http://psta.psiras.ru/read/psta2014_3_27-50.pdf}",
+ abstract =
+ "It is discussed a practical possibility of a provable programming
+ of mathematics basing of the approach of intuitionism, a language
+ with dependent types, proof-carrying code. This approach is
+ illustrated with examples. The discourse bases on the experience
+ of implementing in the {\tt Agda} language of a certain small
+ algebraic library including the arithmetic of a residue domain
+ $R/(b)$ for an arbitrary Euclidean ring R. (In Russian)",
+ paper = "Mesh14.pdf",
+ keywords = "axiomref"
+}
+
+@article{Mesh15,
+ author = "Meshveliani, Sergei D.",
+ title = "Programming basic computer algebra in a language with
+ dependent types",
+ journal = "Program systems: theory and applications",
+ year = "2015",
+ volume = "6",
+ numbrer = "4(27)",
+ pages = "313-340",
+ comment = "(In Russian)",
+ link = "\url{http://psta.psiras.ru/read/psta2015_4_313-340.pdf}",
+ abstract =
+ "It is described the experience in provable programming of certain
+ classical categories of computational algebra (``group'', ``ring'',
+ and so on) basing on the approach of intuitionism, a language with
+ dependent types, forming of machine-checked proofs. There are detected
+ the related problems, and are described certain additional possibilities
+ given by the approach. The {\tt Agda} functional language is used as an
+ instrument. This paper is a continuation for the introductory paper
+ published in this journal in 2014. (In Russian)",
+ paper = "Mesh15.pdf"
+}
+
+@book{Mesh16,
+ author = "Meshveliani, Sergei D.",
+ title = "DoCon -- A Provable Algebraic Domain Constructor",
+ link =
+ "\url{http://www.botik.ru/pub/local/Mechveliani/docon-A/0.04/manual.pdf}",
+ publisher = "User Manual, Version 0.04",
+ year = "2016",
+ abstract =
+ "This book is about 1) a manual on the DoCon-A program library, 2) a book
+ explaining how to program algebra in a purely functional language with
+ {\sl dependent types} (specifially, in {\tt Agda}), with providing
+ machine-checked proofs, and following constructive mathematics.
+
+ The above point of proofs means that a program not only implements an
+ algorithm, but explains to the compiler the needed mathematical notions
+ and provides the needed proofs in the form of type expressions and
+ functions. And the compiler (more precisely, type checker) is able to
+ verify these proofs statically (before running), and to prepare the
+ algorithm for running.",
+ paper = "Mesh16,pdf",
+ keywords = "axiomref"
+}
+
+@misc{Mesh16a,
+ author = "Meshveliani, Sergei D.",
+ title = "Provable programming of algebra: particular points, examples",
+ link = "\url{http://www.botik.ru/pub/local/Mechveliani/provProgExam.zip}",
+ year = "2016",
+ abstract =
+ "It is discussed an experiance in provable programming of a computer
+ algebra library with using a purely functional language with dependent
+ tyhpes ({\tt Agda}). There are given several examples illustrating
+ particular points of implementing the approach of constructive
+ mathematics.",
+ paper = "Mesh16a.pdf"
+
+}
+
+@techreport{Miln72,
+ author = "Milner, Robert",
+ title = "Logic for Computable Functions: Description of a Machine
+ Implementation",
+ year = "1972",
+ institution = "Stanford Artificial Intelligence Project",
+ number = "STAN-CS-72-288",
+ link =
+ "\url{http://i.standford.edu/pub/cstr/reports/cs/tr/72/288/CS-TR-72-288.pdf}",
+ abstract =
+ "LCF is based on a logic by Dana Scott, proposed by him at Oxford in the
+ fall of 1969, for reasoning about computable functions.",
+ paper = "Miln72.pdf"
+}
+
+@article{Miln78,
+ author = "Milner, Robin",
+ title = "A Theory of Type Polymorphism in Programming",
+ year = "1978",
+ journal = "Journal of Computer and System Sciences",
+ volume = "17",
+ pages = "348-375",
+ link = "\url{https://courses.engr.illinois.edu/cs421/sp2013/project/milner-polymorphism.pdf}",
+ abstract =
+ "The aim of this work is largely a practical one. A widely employed
+ style of programming, particularly in structure-processing languages
+ which impose no discipline of types, entails defining procedures which
+ work well on objects of a wide variety. We present a formal type
+ discipline for such polymorphic procedures in the context of a simple
+ programming language, and a compile time type-checking algorithm $W$
+ which enforces the discipline. A Semantic Soundness Theorem (based on
+ a formal semantics for the language) states that well-type programs
+ cannot “go wrong” and a Syntactic Soundness Theorem states that if $W$
+ accepts a program then it is well typed. We also discuss extending
+ these results to richer languages; a type-checking algorithm based on
+ $W$ is in fact already implemented and working, for the metalanguage ML
+ in the Edinburgh LCF system,",
+ paper = "Miln78.pdf"
+}
+
+@misc{Mohr14,
+ author = "Mohring-Paulin, Christine",
+ title = "Introduction to the Calculus of Inductive Constructions",
+ year = "2014",
+ link = "\url{https://hal.inria.fr/hal-01094195/file/CIC.pdf}",
+ paper = "Mohr14.pdf"
+}
+
+@mastersthesis{Mort10,
+ author = {M\"ortbert, Anders},
+ title = "Constructive Algebra in Functional Programming and Type Theory",
+ school = "University of Gothenburg, Department of Computer Science",
+ year = "2010",
+ month = "5",
+ file = "Mort10.pdf",
+ abstract =
+ "This thesis considers abstract algebra from a constructive point
+ of view. The central concept of study is coherent rings -- algebraic
+ structures in which it is possible to solve homogeneous systems of
+ linear equations. Three different algebraic theories are considered;
+ Bezout domains, Prufer domains and polynomial rings. The first two
+ of these are non-Noetherian analogues of classical notions. The
+ polynomial rings are presented from a constructive point of view with a
+ treatment of Groebner bases. The goal of the thesis is to study the
+ proofs that these theories are coherent and explore how the proofs can
+ be implemented in functional programming and type theory."
+}
+
+@misc{Mour16,
+ author = "de Moura, Leonardo and Avigad, Jeremy and Kong, Soonho and
+ Roux, Cody",
+ title = "Elaboration in Dependent Type Theory",
+ year = "2016",
+ link = "\url{http://leodemoura.github.io/files/elaboration.pdf}",
+ abstract =
+ "We describe the elaboration algorithm that is used in {\sl Lean}, a
+ new interactive theorem prover based on dependent type theory. To be
+ practical, interactive theorem provers must provide mechanisms to
+ resolve ambiguities and infer implicit type information, thereby
+ supporting convenient input of expressions and proofs. Lean's
+ elaborator supports higher-order unification, ad-hoc overloading,
+ insertion of coercions, type class inference, the use of tactics, and
+ the computational reduction of terms. The interactions between these
+ components are subtle and complex, and Lean's elaborator has been
+ carefully designed to balance efficiency and usability.",
+ paper = "Mour16.pdf"
+}
+
+@book{Nipk14,
+ author = "Nipkow, Tobias and Klein, Gerwin",
+ title = "Concrete Semantics",
+ isbn = "978-3-10542-0",
+ publisher = "Springer",
+ year = "2014"
+}
+
+@article{Nguy16,
+ author = "Nguyen, Phuc C. and Tobin-Hochstadt, Sam and van Horn, David",
+ title = "Higher-order symbolic execution for contract verification and
+ refutation",
+ journal = "arXiv",
+ link = "\url{http://arxiv.org/pdf/1507.04817v2.pdf}",
+ year = "2016",
+ month = "February",
+ abstract =
+ "We present a new approach to automated reasoning about higher-order
+ programs by endowing symbolic execution with a notion of higher-order,
+ symbolic values.
+
+ To validate our approach, we use it to develop and evaluate a system
+ for verifying and refuting behavioral software contracts of components
+ in a functional language, which we call {\sl soft contract
+ verification}. In doing so, we discover a mutually beneficial relation
+ between behavioral contracts and higher-order symbolic
+ execution. Contracts aid symbolic execution by providing a rich
+ language of specifications that can serve as the basis of symbolic
+ higher-order values; the theory of blame enables modular verification
+ and leads to the theorem that {\sl verified compnents can't be
+ blamed}; and the run-time monitoring of contracts enables {\sl soft}
+ verification whereby verified and unverified components can safely
+ interact and verification is not an all-or-nothing
+ proposition. Conversely, symbolic execution aids contracts by
+ providing compile-time verification which increases assurance and
+ enables optimizations; automated test-case generation for contracts
+ with counter-examples; and engendering a virtuous cycle between
+ verification and the gradual spread of contracts.
+
+ Our system uses higher-order symbolic execution, leveraging contracts
+ as a source of symbolic values including unknown behavioral values,
+ and employs an updatable heap of contract invariants to reason about
+ flow-sensitive facts. Whenever a contract is refuted, it reports a
+ concrete {\sl counterexample} reproducing the error, which may involve
+ solving for an unknown function. The approach is able to analyze
+ first-class contracts, recursive data structures, unknown functions,
+ and control-flow-sensitive refinement of values, which are all
+ idiomatic in dynamic languages. It makes effective use of an
+ off-the-shelf solver to decide problems without heavy encodings. Our
+ counterexample search is sound and relatively complete with respect to
+ a first-order solver for base type values. Therefore, it can form the
+ basis of automated verification and bug-finding tools for higher-order
+ programs. The approach is competitive with a wide range of existing
+ tools -- including type systems, flow analyzers, and model checkers --
+ on their own benchmarks. We have built a tool which analyzes programs
+ written in Racket, and report on its effectiveness in verifying and
+ refuting contracts.",
+ paper = "Nguy16.pdf"
+}
+
+@inproceedings{Pare93,
+ author = "Parent, Catherine",
+ title = "Developing Certified Programs in the System Coq: The Program
+ Tactic",
+ booktitle = "Proc. Int. Workshop on Types for Proofs and Programs",
+ publisher = "Springer-Verlag",
+ isbn = "3-540-58085-9",
+ pages = "291-312",
+ year = "1993",
+ abstract =
+ "The system {\sl Coq} is an environment for proof development based on
+ the Calculus of Constructions extended by inductive definitions. The
+ specification of a program can be represented by a logical formula and
+ the program itself can be extracted from the constructive proof of the
+ specification. In this paper, we look at the possibility of inverting
+ the specification and a program, builds the logical condition to be
+ verified in order to obtain a correctness proof of the program. We
+ build a proof of the specification from the program from which the
+ program can be extracted. Since some information cannot automatically
+ be inferred, we show how to annotate the program by specifying some of
+ its parts in order to guide the search for the proof.",
+ paper = "Pare93.ps"
+}
+
+@techreport{Pare94,
+ author = "Parent, Catherine",
+ title = "Synthesizing proofs from programs in the Calculus of Inductive
+ Constructions",
+ year = "1994",
+ institution = {Ecole Normale Sup\'erieure de Lyon},
+ abstract =
+ "In type theory, a proof can be represented as a typed $\lambda$-term.
+ There exist methods to mark logical parts in proofs and extract their
+ algorithmic contents. The result is a correct program with respect to
+ a specification. This paper focuses on the inverse problem: how to
+ generate a proof from its specification. The framework is the Calculus
+ of Inductive Constructions. A notion of coherence is introduced between
+ a specification and a program containing types but no logical proofs.
+ This notion is based on the definition of an extraction function called
+ the weak extraction. Such a program can give a method to reconstruct a
+ set of logical properties needed to have a proof of the initial
+ specification. This can be seen either as a method of proving programs
+ or as a method of synthetically describing proofs.",
+ paper = "Pare94.pdf"
+}
+
+@misc{Pare96,
+ author = "Parent-Vigouroux, Catherine",
+ title = "Natural proofs versus programs optimization in the
+ Calculus of Inductive Constructions",
+ year = "1996",
+ abstract =
+ "This paper presents how to automatically prove that an 'optimized'
+ program is correct with respect to a set of given properties that is a
+ specification. Proofs of specifications contain logical and
+ computational parts. Programs can be seen as computational parts of
+ proofs. They can thus be extracted from proofs and be certified to be
+ correct. The inverse problem can be solved: it is possible to
+ reconstruct proof obligations from a program and its specification.
+ The framework is a type theory where a proof can be represented as a
+ typed $\lambda$-term and, particularly, the Calculus of Inductive
+ Constructions. This paper shows how programs can be simplified in
+ order to be written in a much closer way to the ML one's. Indeed,
+ proofs structures are often much more heavy than program structures.
+ The problem is consequently to consider natural programs (in a ML sense)
+ and see how to retrieve natural structures of proofs from them.",
+ paper = "Pare96.pdf"
+}
+
+@article{Pare97,
+ author = "Parent-Vigouroux, Catherine",
+ title = "Verifying programs in the Calculus of Inductive Constructions",
+ year = "1997",
+ journal = "Formal Aspects of Computing",
+ volume = "9",
+ number = "5",
+ pages = "484-517",
+ abstract =
+ "This paper deals with a particular approach to the verification of
+ functional programs. A specification of a program can be represented
+ by a logical formula. In a constructive framework, developing a program
+ then corresponds to proving this formula. Given a specification and a
+ program, we focus on reconstructing a proof of the specification whose
+ algorithmic contents corresponds to the given program. The best we can
+ hope is to generate proof obligations on atomic parts of the program
+ corresponding to logical properties to be verified. First, this paper
+ studies a weak extraction of a program from a proof that keeps track
+ of intermediate specifications. From such a program, we prove the
+ determinism of retrieving proof obligations. Then, heuristic methods
+ are proposed for retrieving the proof from a natural program containing
+ only partial annotations. Finally, the implementation of this methos as
+ a tactic of the {\sl Coq} proof assistant is presented.",
+ paper = "Pare97.pdf"
+}
+
+@article{Pela14,
+ author = "Pelayo, Alvaro and Warren, Michael A.",
+ title = "Homotopy Type Theory and Voevodsky's Univalent Foundations",
+ journal = "Bulletin of the American Mathematical Society",
+ volume = "51",
+ number = "4",
+ year = "2014",
+ pages = "597-648",
+ link = "\url{https://arxiv.org/pdf/1210.5658.pdf}",
+ abstract =
+ "Recent discoveries have been made connecting abstract homotopy
+ theory and the field of type theory from logic and theoretical computer
+ science. This has given rise to a new field, which has been christened
+ {\sl homotopy type theory}. In this direction, Vladimir Voevodsky
+ observed that it is possible to model type theory using simpical sets
+ and that this model satisfies an additional property, called the
+ {\sl Univalence Axiom}, which has a number of striking consequences.
+ He has subsequently advocated a program, which he calls {\sl univalent
+ foundations}, of developing mathematics in the setting of type theory
+ with the Univalence Axiom and possibly other additional axioms motivated
+ by the simplical set model. Because type theory posses good computational
+ properties, this program can be carried out in a computer proof assistant.
+ In this paper we give an introduction to homotopy type theory in
+ Voevodsky's setting, paying attention to both theoretical and practical
+ issues. In particular, the paper serves as an introduction to both the
+ general ideas of homotopy type theory as well as to some of the concrete
+ details of Voevodsky's work using the well-known proof assistant Coq.
+ The paper is written for a general audience of mathematicians with basic
+ knowledge of algebraic topology; the paper does not assume any
+ preliminary knowledge of type theory, logic, or computer science. Because
+ a defining characteristic of Voevodsky's program is that the Coq code has
+ fundamental mathematical content, and many of the mathematical concepts
+ which are efficiently captured in the code cannot be explained in
+ standard mathematical English without a length detour through type theory,
+ the later sections of this paper (beginning with Section 3) make use of
+ code; however, all notions are introduced from the beginning and in a
+ self-contained fashion.",
+ paper = "Pela14.pdf"
+}
+
+@techreport{Pfen89,
+ author = "Pfenning, Frank and Paulin-Mohring, Christine",
+ title = "Inductively Defined Types in the Calculus of Constructions",
+ institution = "Carnegie-Mellon University",
+ year = "1989",
+ number = "CMU-CS-89-209",
+ link = "\url{http://repository.cmu.edu/cgi/viewcontent.cgi?article=2907&context=compsci}",
+ abstract =
+ "We define the notion of an {\sl inductively defined type} in the
+ Calculus of Constructions and show how inductively defined types can
+ be represented by closed types. We show that all primitive recursive
+ functional over these inductively defined types are also
+ representable. This generalizes work by Bohm and Berarducci on
+ synthesis of functions on term algebras in the second-order
+ polymorphic $\lambda$-calculus ($F_2$). We give several applications
+ of this generalization, including a representation of $F_2$-programs
+ in $F_3$, along with a definition of functions {\bf reify}, {\bf
+ reflect}, and {\bf eval} for $F_2$ in $F_3$. We also show how to
+ define induction over inductively defined types and sketch some
+ results that show that the extension of the Calculus of Construction
+ by induction principles does not alter the set of functions in its
+ computational fragment, $F_\omega$. This is because a proof by
+ induction can be {\bf realized} by primitive recursion, which is
+ already definable in $F_\omega$.",
+ paper = "Pfen89.pdf"
+}
+
+\incollection{Pfen92,
+ author = "Pfenning, Frank",
+ title = "Dependent Types in Logic Programming",
+ booktitle = "Types in Logic Programming",
+ isbn = "9780262161312",
+ publisher = "MIT Press",
+ year = "1992"
+}
+
+@book{Pier00,
+ author = "Pierce, Benjamin C.",
+ title = "Type Systems for Programming Languages",
+ year = "2000",
+ publisher = "MIT Press",
+ link = "\url{http://ropas.snu.ac.kr/~kwang/S20/pierce\_book.pdf}",
+ paper = "Pier00.pdf"
+}
+
+@misc{Pier15,
+ author = {Pierce, Benjamin C. and Casinghino, Chris and Gaboardi, Marco and
+ Greenberg, Michael and Hritcu, Catalin and Sj\"oberg, Vilhelm and
+ Yorgey, Brent},
+ title = "Software Foundations",
+ year = "2015",
+ file = "Pier15.tgz",
+ abstract =
+ "This electronic book is a course on Software Foundations, the
+ mathematical underpinnings of reliable software. Topics include basic
+ concepts of logic, computer-assisted theorem proving, the Coq proof
+ assistant, functional programming, operational semantics, Hoare logic,
+ and static type systems. The exposition is intended for a broad range
+ of readers, from advanced undergraduates to PhD students and
+ researchers. No specific background in logic or programming languages
+ is assumed, though a degree of mathematical maturity will be helpful.
+
+ The principal novelty of the course is that it is one hundred per cent
+ formalized and machine-checked: the entire text is literally a script
+ for Coq. It is intended to be read alongside an interactive session
+ with Coq. All the details in the text are fully formalized in Coq, and
+ the exercises are designed to be worked using Coq.
+
+ The files are organized into a sequence of core chapters, covering
+ about one semester's worth of material and organized into a coherent
+ linear narrative, plus a number of appendices covering additional
+ topics. All the core chapters are suitable for both upper-level
+ undergraduate and graduate students."
+}
+
+@article{Plot77,
+ author = "Plotkin, G.D.",
+ title = "LCF Considered as a Programming Language",
+ journal = "Theoretical Computer Science",
+ volume = "5",
+ year = "1977",
+ pages = "223-255",
+ link = "\url{http://homepages.inf.ed.ac.uk/gdp/publications/LCF.pdf}",
+ abstract =
+ "The paper studies connections between denotational and operational
+ semantics for a simple programming language based on LCF. It begins
+ with the connection between the behaviour of a program and its
+ denotation. It turns out that a program denotes $\bot$ in any of
+ several possible semantics iff it does not terminate. From this it
+ follows that if two terms hae the same denotation in one of these
+ semantics, they have the same behaviour in all contexts. The converse
+ fails for all the semantics. If, however, the language is extended to
+ allow certain parallel facilities behavioural equivalence does coincide
+ with denotational equivalence in one of the semantics considered, which
+ may therefore be called ``fully abstract''. Next a connection is given
+ which actually determines the semantics up to isomorphism from the
+ behaviour alone. Conversely, by allowing further parallel facilities,
+ every r.e. element of the fully abstract semantics becomes definable,
+ thus characterising the programming language, up to interdefinability,
+ from the set of r.e. elements of the domains of the semantics.",
+ paper = "Plot77.pdf"
+}
+
+@misc{Poll98,
+ author = "Poll, Erik and Thompson, Simon",
+ title = "Adding the axioms to Axiom. Toward a system of automated
+ reasoning in Aldor",
+ year = "1998",
+ link = "\url{http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.7.1457}",
+ abstract =
+ "This paper examines the proposal of using the type system of Axiom to
+ represent a logic, and thus to use the constructions of Axiom to
+ handle the logic and represent proofs and propositions, in the same
+ way as is done in theorem provers based on type theory such as Nuprl
+ or Coq.
+
+ The paper shows an interesting way to decorate Axiom with pre- and
+ post-conditions.",
+ paper = "Poll98.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Poll99,
+ author = "Poll, Erik",
+ title = "The Type System of Axiom",
+ year = "1999",
+ link = "\url{http://www.cs.ru.nl/E.Poll/talks/axiom.pdf}",
+ abstract =
+ "This is a slide deck from a talk on the correspondence between
+ Axiom/Aldor types and Logic.",
+ paper = "Poll99.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Poll99a,
+ author = "Poll, Erik and Thompson, Simon",
+ title = "The Type System of Aldor",
+ link = "\url{http://www.cs.kent.ac.uk/pubs/1999/874/content.ps}",
+ abstract =
+ "This paper gives a formal description of -- at least a part of --
+ the type system of Aldor, the extension language of the Axiom.
+ In the process of doing this a critique of the design of the system
+ emerges.",
+ paper = "Poll99a.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Poll00,
+ author = "Poll, Erik and Thompson, Simon",
+ title = "Integrating Computer Algebra and Reasoning through the Type
+ System of Aldor",
+ year = "2000",
+ abstract =
+ "A number of combinations of reasoning and computer algebra systems
+ have been proposed; in this paper we describe another, namely a way to
+ incorporate a logic in the computer algebra system Axiom. We examine
+ the type system of Aldor -- the Axiom Library Compiler -- and show
+ that with some modifications we can use the dependent types of the
+ system to model a logic, under the Curry-Howard isomorphism. We give
+ a number of example applications of the logic we construct and explain
+ a prototype implementation of a modified type-checking system written
+ in Haskell.",
+ paper = "P0ll00.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Poll00a,
+ author = "Poll, Erik and Thompson, Simon",
+ title = "Integrating Computer Algebra and Reasoning through the Type
+ System of Aldor",
+ booktitle = "Frontiers of Combining Systems",
+ series = "Lecture Notes in Artificial Intelligence",
+ year = "2000",
+ isbn = "3-540-67281-8",
+ location = "Nancy, France",
+ pages = "136-150",
+ keywords = "axiomref"
+}
+
+@article{Prev02,
+ author = "Prevosto, Virgile and Doligez, Damien",
+ title = "Algorithms and proofs inheritance in the FOC language",
+ journal = "J. Autom. Reasoning",
+ volume = "29",
+ number = "3-4",
+ year = "2002",
+ pages = "337-363",
+ abstract =
+ "In this paper, we present the FOC language, dedicated to the
+ development of certified computer algebra libraries (that is sets of
+ programs). These libraries are based on a hierarchy of implementations
+ of mathematical structures. After presenting the core set of features
+ of our language, we describe the static analyses, which reject
+ inconsistent programs. We then show how we translate FOC definitions
+ into OCAML and COQ, our target languages for the computational part
+ and the proof checking, respectively.",
+ paper = "Prev02.pdf",
+ keywords = "axiomref"
+}
+
+#InCollection{Rect89,
+ author = "Rector, D. L.",
+ title = "Semantics in Algebraic Computation",
+ booktitle = "Computers and Mathematics",
+ publisher = "Springer-Verlag",
+ year = "1989",
+ pages = "299-307",
+ isbn = "0-387-97019-3",
+ keywords = "axiomref"
+}
+
+@misc{Robe15,
+ author = "Roberts, Siobhan",
+ title = "In Mathematics, Mistakes Aren't What They Used To Be",
+ year = 2015,
+ link = "\url{http://nautil.us/issue/24/error/In-mathematics-mistakes-arent-what-they-used-to-be}"
+}
+
+@article{Rudn01,
+ author = "Rudnicki, Piotr and Schwarzweller, Christoph and
+ Trybulec, Andrzej",
+ title = "Commutative algebra in the Mizar system",
+ journal = "J. Symb. Comput.",
+ volume = "32",
+ number = "1-2",
+ pages = "143-169",
+ year = "2001",
+ link = "\url{https://inf.ug.edu.pl/~schwarzw/papers/jsc01.pdf}",
+ abstract =
+ "We report on the development of algebra in the Mizar system. This
+ includes the construction of formal multivariate power series and
+ polynomials as well as the definition of ideals up to a proof of the
+ Hilbert basis theorem. We present how the algebraic structures are
+ handled and how we inherited the past developments from the Mizar
+ Mathematical Library (MML). The MML evolves and past contributions are
+ revised and generalized. Our work on formal power series caused a
+ number of such revisions. It seems that revising past developments
+ with an intent to generalize them is a necessity when building a
+ database of formalized mathematics. This poses a question: how much
+ generalization is best?",
+ paper = "Rudn01.pdf",
+ keywords = "axiomref"
+}
+
+@phdthesis{Schw97,
+ author = "Schwarzweller, Christoph",
+ title = "MIZAR verification of generic algebraic algorithms",
+ school = "University of Tubingen",
+ year = "1997",
+ abstract =
+ "Although generic programming founds more and more attention –
+ nowadays generic programming languages as well as generic libraries
+ exist – there are hardly approaches for the verification of generic
+ algorithms or generic libraries. This thesis deals with generic
+ algorithms in the field of computer algebra. We propose the Mizar
+ system as a theorem prover capable of verifying generic algorithms on
+ an appropriate abstract level. The main advantage of the MIZAR theorem
+ prover is its special input language that enables textbook style
+ presentation of proofs. For generic versions of Brown/Henrici addition
+ and of Euclidean’s algorithm we give complete correctness proofs
+ written in the MIZAR language.
+
+ Moreover, we do not only prove algorithms correct in the usual
+ sense. In addition we show how to check, using the MIZAR system, that
+ a generic algebraic algorithm is correctly instantiated with a
+ particular domain. Answering this question that especially arises if
+ one wants to implement generic programming languages, in the field of
+ computer algebra requires nontrivial mathematical knowledge.
+
+ To build a verification system using the MIZAR theorem prover, we also
+ implemented a generator which almost automatically computes for a
+ given algorithm a set of theorems that imply the correctness of this
+ algorithm.",
+ paper = "Schw97.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Seli13,
+ author = "Selinger, Peter",
+ title = "Lecture Notes on the Lambda Calculus",
+ year = "2013",
+ link = "\url{https://www.irif.fr/~mellies/mpri/mpri-ens/biblio/Selinger-Lambda-Calculus-Notes.pdf}",
+ abstract =
+ "This is a set of lecture notes that developed out of courses on the
+ lambda calculus that I taught at the University of Ottawa in 2001 and
+ at Dalhousie University in 2007 and 2013. Topcis covered in these
+ notes include the untyped lambda calculus, the Church-Rosser theorem,
+ combinatory algebras, the simply-typed lambda calculus, the
+ Curry-Howard isomorphism, weak and strong normalization, polymorphism,
+ type inference, denotational semantics, complete partial orders, and
+ the language PCF.",
+ paper = "Seli13.pdf"
+}
+
+@book{Simm00,
+ author = "Simmons, Harold",
+ title = "Derivation and Computation: Taking the Curry-Howard correspondence
+ seriously",
+ year = "2000",
+ publisher = "Cambridge University Press",
+ isbn = "0-521-77173-0"
+}
+
+@article{Soze08,
+ author = "Sozeau, Mattieu and Oury, Nicolas",
+ title = "First-Class Type Classes",
+ journal = "Lecture Notes in Computer Science",
+ volume = "5170",
+ publisher = "Springer",
+ year = "2008",
+ pages = "278-293",
+ link = "\url{https://www.irif.fr/~sozeau/research/publications/First-Class\_Type\_Classes.pdf}",
+ abstract =
+ "Type Classes have met a large success in Haskell and Isabelle, as a
+ solution for sharing notations by overloading and for specifying with
+ abstract structures by quantificaiton on contexts. However, both
+ systems are limited by second-class implementations of these
+ constructs, and these limitations are only overcome by ad-hoc
+ extensions to the respective systems. We propose an embedding of type
+ classes into a dependent type theory that is first-class and supports
+ some of the most popular extensions right away. The implementation is
+ correspondingly cheap, general, and integrates well inside the system,
+ as we have experimented in Coq. We show how it can be used to help
+ structured programming and proving by way of examples.",
+ paper = "Soze08.pdf"
+}
+
+@inproceedings{Soze12,
+ author = "Sozeau, Mattieu",
+ title = "Coq with Classes",
+ booktitle = "JFLA 2012",
+ link = "\url{https://www.irif.fr/~sozeau/research/publications/Coq\_with\_Classes-JFLA-040212.pdf}",
+ year = "2012",
+ paper = "Soze12.pdf"
+}
+
+@misc{Stac17a,
+ author = "cstheory.stackexchange.com",
+ title = "Why does Coq have Prop?",
+ link = "\url{http://cstheory.stackexchange.com/questions/21836/why-does-coq-have-prop/21878\#21878}",
+ year = "2017",
+}
+
+@misc{Ster17,
+ author = "Sterling, Jonathan and Harper, Robert",
+ title = "Algebraic Foundations of Proof Refinement",
+ link = "\url{http://www.cs.cmu.edu/~rwh/papers/afpr/afpr.pdf}",
+ year = "2017",
+ abstract =
+ "We contribute a general apparatus for {\sl dependent} tactic-based
+ proof refinement in the LCF tradition, in which the statements of
+ subgoals may express a dependency on the proofs of other subgoals;
+ this form of dependency is extremely useful and can serve as an
+ {\sl algorithmic} alternative to extensions of LCF based on non-local
+ instantiation of schematic variables. Additionally, we introduce a
+ novel behavioral distinction between {\sl refinement rules} and
+ {\sl tactics} based on naturality. Our framework, called Dependent
+ LCF, is already deployed in the nascent RedPRL proof assistant for
+ computational cubical type theory.",
+ paper = "Ster17.pdf"
+}
+
+@article{Ther01,
+ author = "Th\'ery, Laurent",
+ title = "A Machine-Checked Implementation of Buchberger's Algorithm",
+ journal = "Journal of Automated Reasoning",
+ volume = "26",
+ year = "2001",
+ pages = "107-137",
+ abstract = "We present an implementation of Buchberger's algorithm that
+ has been proved correct within the proof assistant Coq. The
+ implementation contains the basic algorithm plus two standard
+ optimizations.",
+ paper = "Ther01.pdf"
+}
+
+@misc{Troe97,
+ author = "Troelstra, A.S.",
+ title = "From constructivism to computer science",
+ volume = "211",
+ year = "1999",
+ pages = "232-252",
+ abstract =
+ "My field is mathematical logic, with a special interest in
+ constructivism, and I would not dare to call myself a computer
+ scientist. But some computer scientists regard my work as a
+ contribution to their field; and in this text I shall try to explain
+ how this is possible, by taking a look at the history of ideas.
+
+ I want to describe how two interrelated ideas, connected with the
+ constructivistic trend in the foundations of mathematics, developed
+ within mathematical logic and ultimately diffused into computer
+ science.
+
+ It will be seen that this development has not been a quite
+ straightforward one. In the history of ideas it often looks as if a
+ certain idea has to be discovered several times, by different people,
+ before it really enters inthe the ``consciousness'' of science",
+ paper = "Troe99.pdf"
+}
+
+@misc{Tros13,
+ author = "Trostle, Anne",
+ title = "An Algorithm for the Greatest Common Divisor",
+ link = "\url{http://www.nuprl.org/MathLibrary/gcd/}",
+ year = "2013"
+}
+
+@inproceedings{Wadl88,
+ author = "Wadler, Philip and Blott, Stephen",
+ title = "How to make ad-hoc polymorphism less ad hoc",
+ booktitle = "Proc 16th ACM SIGPLAN-SIGACT Symp. on Princ. of Prog. Lang",
+ isbn = "0-89791-294-2",
+ pages = "60-76",
+ year = "1988",
+ link = "\url{http://202.3.77.10/users/karkare/courses/2010/cs653/Papers/ad-hoc-polymorphism.pdf}",
+ abstract =
+ "This paper presents {\sl type classes}, a new approach to {\sl ad-hoc}
+ polymorphism. Type classes permit overloading of arithmetic operators
+ such as multiplication, and generalise the ``eqtype variables'' of
+ Standard ML Type classes extend the Hindley/Milner polymorphic type
+ system, and provide a new approach to issues that arise in object-oriented
+ programming, bounded type quantification, and abstract data types. This
+ paper provides an informal introduction to type classes, and defines them
+ formally by means of type inference rules",
+ paper = "Wadl88.pdf"
+}
+
+@misc{Wadl14,
+ author = "Wadler, Philip",
+ title = "Propositions as Types",
+ year = "2014",
+ link = "\url{http://homepages.inf.ed.ac.uk/wadler/papers/propositions-as-types/propositions-as-types.pdf}",
+ paper = "Wadl14.pdf"
+}
+
+@misc{Warn17,
+ author = "Warner, Evan",
+ title = "Splash Talk: The Foundational Crisis of Mathematics",
+ year = "2017",
+ link = "\url{web.stanford.edu/~ebwarner/SplashTalk.pdf}",
+ abstract =
+ "This class will cover some of the mathematics, history, and
+ philosophy of the co-called {\sl foundational crisis in mathematics}.
+ Broadly speaking, mathematics in the late nineteenth and early
+ twentieth centuries was marked by an increased awareness of
+ ``foundational issues,'' prompted by a number of problems in the
+ practice of mathematics that had accumulated over the years. We will
+ discuss a few examples of some of these problems, and then discusssthe
+ three major schools of thought that emerged to deal with them and
+ provide a coherent philosophical and methodologicial underpinning for
+ mathematics.",
+ paper = "Warn17.pdf"
+}
+
+@misc{Wijn68,
+ author = "Wijngarrden, A. van and Mailloux, B.J. and Peck, J.E.L. and
+ Koster, C.H.A. and Sintzoff, M. and Lindsey, C.H. and
+ Meertens, L.G.T. and Fisker, R.G.",
+ title = "Revised Report on the Algorithmic Language ALGOL 68",
+ link = "\url{http://www.eah-jena.de/~kleine/history/languages/algol68-revisedreport.pdf}",
+ year = "1968",
+ paper = "Wijn68.pdf"
+}
+
+@misc{Wiki17,
+ author = "Wikipedia",
+ title = "Calculus of constructions",
+ year = "2017",
+ link = "\url{https://en.wikipedia.org/wiki/Calculus\_of\_constructions}"
+}
+
+@misc{WikiED,
+ author = "Wikipedia",
+ title = "Euclidean Domain",
+ year = "2017",
+ link = "\url{https://en.wikipedia.org/wiki/Euclidean\_domain}"
+}
+
+@book{Wins93,
+ author = "Winskel, Glynn",
+ title = "The Formal Semantics of Programming Languages",
+ isbn = "978-0262731034",
+ year = "1993",
+ publisher = "MIT"
+}
+
+@book{Yasu71,
+ author = "Yasuhara, Ann",
+ title = "Recursive Function Theory and Logic",
+ year = "1971",
+ publisher = "Academic Press",
+ isbn = "0-12-768950-8"
+}
+
+@misc{Boeh86,
+ author = "Boehm, Hans-J. and Cartwright, Robert and Riggle, Mark and
+ O'Donnell, Michael J.",
+ title = "Exact Real Arithmetic: A Case Study in Higher Order Programming",
+ year = "1986",
+ link = "\url{http://dev.acm.org/pubs/citations/proceedings/lfp/319838/p162-boehm}",
+ abstract =
+ "Two methods for implementing {\sl exact} real arithmetic are explored
+ One method is based on formulating real numbers as functions that map
+ rational tolerances to rational approximations. This approach, which
+ was developed by constructive mathematicians as a concrete
+ formalization of the real numbers, has lead to a surpris- ingly
+ successful implementation. The second method formulates real numbers
+ as potentially infinite sequences of digits, evaluated on demand.
+ This approach has frequently been advocated by proponents of lazy
+ functional languages in the computer science community. Ironically,
+ it leads to much less satisfactory implementations. We discuss the
+ theoretical problems involved m both methods, give algortthms for the
+ basic arithmetic operations, and give an empirical comparison of the
+ two techniques. We conclude wtth some general observations about the
+ lazy evaluation paradigm and its implementation.",
+ paper = "Boeh86.pdf"
+}
+
+@misc{Gust16,
+ author = "Gustafson, John",
+ title = "A Radical Approach to Computation with Real Numbers",
+ link = "\url{http://www.johngustafson.net/presentations/Multicore2016-JLG.pdf}",
+ ppt = "Gust16.pptx",
+ abstract =
+ "If we are willing to give up compatibility with IEEE 754 floats and
+ design a number format with goals appropriate to 2016, we can achieve
+ several goals simultaneously: Extremely high energy efficiency and
+ information-per-bit, no penalty for decimal operations instead of
+ binary, rigorous bounds on answers without the overly pessimistic
+ bounds produced by interval methods, and unprecedented high speed up
+ to some precision. This approach extends the ideas of unum arithmetic
+ introduced two years ago by breaking completely from the IEEE
+ float-type format, resulting in fixed bit size values, fixed execution
+ time, no exception values or 'gradual underflow' issues, no wasted bit
+ patterns, and no redundant representations (like 'negative zero'). As
+ an example of the power of this format, a difficult 12-dimensional
+ nonlinear robotic kinematics problem that has defied solvers to date
+ is quickly solvable with absolute bounds. Also unlike interval
+ methods, it becomes possible to operate on arbitrary disconnected
+ subsets of the real number line with the same speed as operating on a
+ simple bound.",
+ paper = "Gust16.pdf",
+}
+
+@book{Gust16a,
+ author = "Gustafson, John",
+ title = "The End of Error: Unum Computing",
+ publisher = "Chapman and Hall / CRC Computational Series",
+ year = "2016",
+ isbn = "978-1482239867"
+}
+
+@article{Gust16b,
+ author = "Gustafson, John",
+ title = "Unums 2.0 An Interview with John L. Gustafson",
+ publisher = "ACM",
+ journal = "Ubiquity",
+ year = "2016",
+ paper = "Gust16b.pdf"
+}
+
+@incollection{Lamb06,
+ author = "Lambov, Branimir",
+ title = "Interval Arithmetic Using SSE-2",
+ booktitle = "Lecture Notes in Computer Science",
+ publisher = "Springer-Verlag",
+ year = "2006",
+ isbn = "978-3-540-85520-0",
+ pages = "102-113"
+}
+
+@book{Hamm62,
+ author = "Hamming, R W.",
+ title = "Numerical Methods for Scientists and Engineers",
+ publisher = "Dover",
+ year = "1973",
+ isbn = "0-486-65241-6"
+}
+
+@article{Nord62,
+ author = "Nordsieck, Arnold",
+ title = "On Numerical Integration of Ordinary Differential Equations",
+ journal = "Mathematics of Computations",
+ volume = "XVI",
+ year = "1962",
+ pages = "22-49",
+ abstract =
+ "A reliable efficient general-purpose method for automatic digital
+ computer integration of systems of ordinary differential equations is
+ described. The method operates with the current values of the higher
+ derivatives of a polynomial approximating the solution. It is
+ thoroughly stable under all circumstances, incorporates automatic
+ starting and automatic choice and revision of elementary interval
+ size, approximately minimizes the amount of computation for a
+ specified accuracy of solution, and applies to any system of
+ differential equations with derivatives continuous or piecewise
+ continuous with finite jumps. ILLIAC library subroutine F7, University
+ of Illinois Digital Computer Laboratory, is a digital computer program
+ applying this method."
+}
+
+@misc{Kama15,
+ author = "Kamareddine, Fairouz and Wells, Joe and Zengler, Christoph and
+ Barendregt, Henk",
+ title = "Computerising Mathematical Text",
+ year = "2015",
+ abstract =
+ "Mathematical texts can be computerised in many ways that capture
+ differing amounts of the mathematical meaning. At one end, there is
+ document imaging, which captures the arrangement of black marks on
+ paper, while at the other end there are proof assistants (e.g. Mizar,
+ Isabelle, Coq, etc.), which capture the full mathematical meaning and
+ have proofs expressed in a formal foundation of mathematics. In
+ between, there are computer typesetting systems (e.g. Latex and
+ Presentation MathML) and semantically oriented systems (e.g. Content
+ MathML, OpenMath, OMDoc, etc.). In this paper we advocate a style of
+ computerisation of mathematical texts which is flexible enough to
+ connect the diferent approaches to computerisation, which allows
+ various degrees of formalsation, and which is compatible with
+ different logical frameworks (e.g. set theory, category theory, type
+ theory, etc.) and proof systems. The basic idea is to allow a
+ man-machine collaboration which weaves human input with machine
+ computation at every step in the way. We propose that the huge step from
+ informal mathematics to fully formalised mathematics be divided into
+ smaller steps, each of which is a fully developed method in which
+ human input is minimal."
+}
+
+@misc{Leeu94a,
+ author = {van Leeuwen, Andr\'e M.A.},
+ title = "Representation of mathematical object in interactive books",
+ abstract = "
+ We present a model for the representation of mathematical objects in
+ structured electronic documents, in a way that allows for interaction
+ with applications such as computer algebra systems and proof checkers.
+ Using a representation that reflects only the intrinsic information of
+ an object, and storing application-dependent information in so-called
+ {\sl application descriptions}, it is shown how the translation from
+ the internal to an external representation and {\sl vice versa} can be
+ achieved. Hereby a formalisation of the concept of {\sl context} is
+ introduced. The proposed scheme allows for a high degree of
+ application integration, e.g., parallel evaluation of subexpressions
+ (by different computer algebra systems), or a proof checker using a
+ computer algebra system to verify an equation involving a symbolic
+ computation.",
+ paper = "Leeu94a.pdf"
+}
+
+@InProceedings{Kalt84,
+ author = "Kaltofen, E.",
+ title = "A Note on the {Risch} Differential Equation",
+ booktitle = "Proc. EUROSAM '84",
+ pages = "359--366",
+ year = "1984",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/84/Ka84_risch.ps.gz}",
+ paper = "Kalt84.ps"
+}
+
+@misc{Abra01,
+ author = "Abramov, Sergei and Bronstein, Manuel",
+ title = "On Solutions of Linear Functional Systems",
+ year = "2001",
+ link = "\url{http://www-sop.inria.fr/cafe/Manuel.Bronstein/publications/mb_papers.html}",
+ algebra =
+ "\newline\refto{category OREPCAT UnivariateSkewPolynomialCategory}
+ \newline\refto{category LODOCAT LinearOrdinaryDifferentialOperatorCategory}
+ \newline\refto{domain AUTOMOR Automorphism}
+ \newline\refto{domain ORESUP SparseUnivariateSkewPolynomial}
+ \newline\refto{domain OREUP UnivariateSkewPolynomial}
+ \newline\refto{domain LODO LinearOrdinaryDifferentialOperator}
+ \newline\refto{domain LODO1 LinearOrdinaryDifferentialOperator1}
+ \newline\refto{domain LODO2 LinearOrdinaryDifferentialOperator2}
+ \newline\refto{package APPLYORE ApplyUnivariateSkewPolynomial}
+ \newline\refto{package OREPCTO UnivariateSkewPolynomialCategoryOps}
+ \newline\refto{package LODOF LinearOrdinaryDifferentialOperatorFactorizer}
+ \newline\refto{package LODOOPS LinearOrdinaryDifferentialOperatorsOps}",
+ abstract = "
+ We describe a new direct algorithm for transforming a linear system of
+ recurrences into an equivalent one with nonsingular leading or
+ trailing matrix. Our algorithm, which is an improvement to the EG
+ elimination method, uses only elementary linear algebra operations
+ (ranks, kernels, and determinants) to produce an equation satisfied by
+ the degress of the solutions with finite support. As a consequence, we
+ can boudn and compute the polynomial and rational solutions of very
+ general linear functional systems such as systems of differential or
+ ($q$)-difference equations.",
+ paper = "Abra01.pdf"
+}
+
+@inproceedings{Bron96b,
+ author = "Bronstein, Manuel",
+ title = "On the Factorization of Linear Ordinary Differential Operators",
+ booktitle = "Mathematics and Computers in Simulation",
+ volume = "42",
+ pages = "387-389",
+ year = "1996",
+ abstract =
+ "After reviewing the arithmetic of linear ordinary differential
+ operators, we describe the current status of the factorisation
+ algorithm, specially with respect to factoring over non-algebraically
+ closed constant fields. We also describe recent results from Singer
+ and Ulmer that reduce determining the differential Galois group of an
+ operator to factoring.",
+ paper = "Bro96b.pdf"
+}
+
+@article{Bron96a,
+ author = "Bronstein, Manuel and Petkovsek, Marko",
+ title = "An introduction to pseudo-linear algebra",
+ journal = "Theoretical Computer Science",
+ volume = "157",
+ pages = "3-33",
+ year = "1966",
+ link = "\url{http://www-sop.inria.fr/cafe/Manuel.Bronstein/publications/mb_papers.html}",
+ algebra = "\newline\refto{category LORER LeftOreRing}",
+ abstract =
+ "Pseudo-linear algebra is the study of common properties of linear
+ differential and difference operators. We introduce in this paper its
+ basic objects (pseudo-derivations, skew polynomials, and pseudo-linear
+ operators) and describe several recent algorithms on them, which, when
+ applied in the differential and difference cases, yield algorithms for
+ uncoupling and solving systems of linear differential and difference
+ equations in closed form.",
+ paper = "Bron96a.pdf"
+}
+
+@inproceedings{Bron01,
+ author = "Bronstein, Manuel",
+ title = "Computer Algebra Algorithms for Linear Ordinary Differential
+ and Difference equations",
+ link = "\url{http://www-sop.inria.fr/cafe/Manuel.Bronstein/publications/ecm3.pdf}",
+ booktitle = "European Congress of Mathematics",
+ series = "Progress in Mathematics",
+ volume = "202",
+ year = "2001",
+ pages = "105-119",
+ abstract = "
+ Galois theory has now produced algorithms for solving linear ordinary
+ differential and difference equations in closed form. In addition,
+ recent algorithmic advances have made those algorithms effective and
+ implementable in computer algebra systems. After introducing the
+ relevant parts of the theory, we describe the latest algorithms for
+ solving such equations.",
+ paper = "Bron01.pdf"
+}
+
+@inproceedings{Bron02,
+ author = "Bronstein, Manuel and Lafaille, S\'ebastien",
+ title = "Solutions of linear ordinary differential equations in terms
+ of special functions",
+ booktitle = "Proc. ISSAC '02",
+ publisher = "ACM Press",
+ pages = "23-28",
+ year = "2002",
+ isbn = "1-58113-484-3",
+ link = "\url{http://www-sop.inria.fr/cafe/Manuel.Bronstein/publications/issac2002.pdf}",
+ url2 = "http://xena.hunter.cuny.edu/ksda/papers/bronstein2.pdf",
+ paper2 = "Bron02x.pdf",
+ abstract =
+ "We describe a new algorithm for computing special function solutions
+ of the form $y(x) = m(x)F(\eta(x))$ of second order linear ordinary
+ differential equations, where $m(x)$ is an arbitrary Liouvillian
+ function, $\eta(x)$ is an arbitrary rational function, and $F$
+ satisfies a given second order linear ordinary differential
+ equations. Our algorithm, which is base on finding an appropriate
+ point transformation between the equation defining $F$ and the one to
+ solve, is able to find all rational transformations for a large class
+ of functions $F$, in particular (but not only) the $_0F_1$ and $_1F_1$
+ special functions of mathematical physics, such as Airy, Bessel,
+ Kummer and Whittaker functions. It is also able to identify the values
+ of the parameters entering those special functions, and can be
+ generalized to equations of higher order.",
+ paper = "Bron02.pdf"
+}
+
+\bibitem[Davenport 86]{Dav86} Davenport, J.H.
+@article{Dave86,
+ author = "Davenport, James H.",
+ title = "The Risch Differential Equation Problem",
+ year = "1986",
+ journal = "SIAM J. COMPUT.",
+ volume = "15",
+ number = "4",
+ comment = "Technical Report 83-4, Dept. Comp. Sci, Univ. Delaware",
+ abstract = "
+ We propose a new algorithm, similar to Hermite's method for the
+ integration of rational functions, for the resolution of Risch
+ differential equations in closed form, or proving that they have no
+ resolution. By requiring more of the presentation of our differential
+ fields (in particular that the exponentials be weakly normalized), we
+ can avoid the introduction of arbitrary constants which have to be
+ solved for later.
+
+ We also define a class of fields known as exponentially reduced, and
+ show that solutions of Risch differential equations which arise from
+ integrating in these fields satisfy the ``natural'' degree constraints
+ in their main variables, and we conjecture (after Risch and Norman)
+ that this is true in all variables.",
+ paper = "Dave86.pdf"
+}
+
+@inproceedings{Cavi76,
+ author = "Caviness, Bob F. and Fateman, Richard J.",
+ title = "Simplification of Radical Expressions",
+ booktitle = "Proc. 1976 SYMSAC",
+ pages = "329-338",
+ year = "1976",
+ abstract =
+ "In this paper we discuss the problem of simplifying unnested radical
+ expressions. We describe an algorithm implemented in MACSYMA that
+ simplifies radical expressions and then follow this description with
+ a formal treatment of the problem. Theoretical computing times for some
+ of the algorithms are briefly discussed as is related work of other
+ authors",
+ paper = "Cavi76.pdf",
+ keywords = "axiomref"
+}
+
+@article{Land93,
+ author = "Landau, Susan",
+ title = "How to Tangle with a Nested Radical",
+ institution = "University of Massachusetts",
+ journal = "The Mathematical Intelligencer",
+ year = "1993",
+ paper = "Land93.pdf"
+}
+
+@article{Shac90,
+ author = "Shackell, John",
+ title = "Growth Estimates for Exp-Log Functions",
+ journal = "J. Symbolic Computation",
+ volume = "10",
+ year = "1990",
+ pages = "611-632",
+ abstract =
+ "Exp-log functions are those obtained from the constant 1 and the
+ variable X by means of arithmetic operations and the function symbols
+ exp() and log(). This paper gives an explicit algorithm for
+ determining eventual dominance of these functions modulo an oracle for
+ deciding zero equivalence of constant terms. This also provides
+ another proof that the dominance problem for exp-log functions is
+ Turing-reducible to the identity problem for constant terms."
+}
+
+@article{Stou76,
+ author = "Stoutemyer, David R.",
+ title = "Automatic Simplification for the Absolute-value Function and its
+ Relatives",
+ journal = "ACM SIGSAM",
+ volume = "10",
+ number = "4",
+ year = "1976",
+ pages = "48-49",
+ abstract =
+ "Computer symbolic mathematics has made impressive progress for the
+ automatic simplification of rational expressions, algebraic
+ expressions, and elementary transcendental expressions. However,
+ existing computer-algebra systems tend to provide little or no
+ simplification for the absolute-value function or for its relatives
+ such as the signum, unit ramp, unit step, max, min, modulo, and Dirac
+ delta functions. Although these functions lack certain desireable
+ properties that are helpful for canonical simplification, there are
+ opportunities for some ad hoc simplification. Moreover, a perusal of
+ most mathematics, engineering, and scientific journals or texts
+ reveals that these functions are too prevalent to be ignored.This
+ article describes specific simplification rules implemented in a
+ program that supplements the built-in rules for the MACSYMA ABS and
+ SIGNUM functions.",
+ paper = "Stou76.pdf"
+}
+
+@misc{Bronxxa,
+ author = "Bronstein, Manuel",
+ title = "Symbolic Integration: towards Practical Algorithms",
+ abstract =
+ "After reviewing the Risch algorithm for the integration of elementary
+ functions and the underlying theory, we descrbe the successive
+ improvements in the field, and the current ``rational'' approach to
+ symbolic integration. We describe how a technique discovered by
+ Hermite a century ago can be efficiently applied to rational,
+ algebraic, elementary transcendental and mixed elementary functions."
+}
+
+@article{Bron88,
+ author = "Bronstein, Manuel",
+ title = "The Transcendental Risch Differential Equation",
+ journal = "J. Symbolic Computation",
+ volume = "9",
+ year = "1988",
+ pages = "49-60",
+ abstract =
+ "We present a new rational algorithm for solving Risch differential
+ equations in towers of transcendental elementary extensions. In
+ contrast to a recent algorithm of Davenport we do not require a
+ progressive reduction of the denominators involved, but use weak
+ normality to obtain a formula for the denominator of a possible
+ solution. Implementation timings show this approach to be faster than
+ a Hermite-like reduction.",
+ paper = "Bron88.pdf",
+ keywords = "axiomref"
+}
+
+@article{Bron90a,
+ author = "Bronstein, Manuel",
+ title = "Integration of Elementary Functions",
+ journal = "J. Symbolic Computation",
+ volume = "9",
+ pages = "117-173",
+ year = "1990",
+ abstract =
+ "We extend a recent algorithm of Trager to a decision procedure for the
+ indefinite integration of elementary functions. We can express the
+ integral as an elementary function or prove that it is not
+ elementary. We show that if the problem of integration in finite terms
+ is solvable on a given elementary function field $k$, then it is
+ solvable in any algebraic extension of $k(\theta)$, where $\theta$ is
+ a logarithm or exponential of an element of $k$. Our proof considers
+ an element of such an extension field to be an algebraic function of
+ one variable over $k$.
+
+ In his algorithm for the integration of algebraic functions, Trager
+ describes a Hermite-type reduction to reduce the problem to an
+ integrand with only simple finite poles on the associated Riemann
+ surface. We generalize that technique to curves over liouvillian
+ ground fields, and use it to simplify our integrands. Once the
+ multipe finite poles have been removed, we use the Puiseux expansions
+ of the integrand at infinity and a generalization of the residues to
+ compute the integral. We also generalize a result of Rothstein that
+ gives us a necessary condition for elementary integrability, and
+ provide examples of its use.",
+ paper = "Bron90a.pdf"
+}
+
+@article{Bron90c,
+ author = "Bronstein, Manuel",
+ title = "On the integration of elementary functions",
+ journal = "Journal of Symbolic Computation",
+ volume = "9",
+ number = "2",
+ pages = "117-173",
+ year = "1990",
+ month = "February"
+}
+
+@inproceedings{Bron93,
+ author = "Bronstein, Manuel and Salvy, Bruno",
+ title = "Full partial fraction decomposition of rational functions",
+ booktitle = "Proc. ISSAC 1993",
+ year = "1993",
+ pages = "157-160",
+ isbn = "0-89791-604-2",
+ link = "\url{http://www.acm.org/pubs/citations/proceedings/issac/164081/}",
+ algebra = "\newline\refto{domain FPARFRAC FullPartialFractionExpansion}",
+ abstract =
+ "We describe a rational algorithm that computes the full partial
+ fraction expansion of a rational function over the algebraic closure
+ of its field of definition. The algorithm uses only gcd operations
+ over the initial field but the resulting decomposition is expressed
+ with linear denominators. We give examples from its Axiom and Maple
+ implementations.",
+ paper = "Bron93.pdf",
+ keywords = "axiomref",
+ beebe = "Bronstein:1993:FPF"
+}
+
+@book{Bron97,
+ author = "Bronstein, Manuel",
+ title = "Symbolic Integration I--Transcendental Functions",
+ publisher = "Springer, Heidelberg",
+ year = "1997",
+ isbn = "3-540-21493-3",
+ link = "\url{http://evil-wire.org/arrrXiv/Mathematics/Bronstein,_Symbolic_Integration_I,1997.pdf}",
+ paper = "Bron97.pdf"
+}
+
+@article{Bron06,
+ author = "Bronstein, M.",
+ title = "Parallel integration",
+ journal = "Programming and Computer Software",
+ year = "2006",
+ issn = "0361-7688",
+ volume = "32",
+ number = "1",
+ doi = "10.1134/S0361768806010075",
+ link = "\url{http://dx.doi.org/10.1134/S0361768806010075}",
+ publisher = "Nauka/Interperiodica",
+ pages = "59-60",
+ abstract = "
+ Parallel integration is an alternative method for symbolic
+ integration. While also based on Liouville's theorem, it handles all
+ the generators of the differential field containing the integrand ``in
+ parallel'', i.e. all at once rather than considering only the topmost
+ one in a recursive fasion. Although it still contains heuristic
+ aspects, its ease of implementation, speed, high rate of success, and
+ ability to integrate functions that cannot be handled by the Risch
+ algorithm make it an attractive alternative.",
+ paper = "Bron06.pdf"
+}
+
+@article{Bron07,
+ author = "Bronstein, Manuel",
+ title = "Structure theorems for parallel integration",
+ journal = "Journal of Symbolic Computation",
+ volume = "42",
+ number = "7",
+ pages = "757-769",
+ year = "2007",
+ month = "July",
+ abstract = "
+ We introduce structure theorems that refine Liouville's Theorem on
+ integration in closed form for general derivations on multivariate
+ rational function fields. By predicting the arguments of the new
+ logarithms that an appear in integrals, as well as the denominator of
+ the rational part, those theorems provide theoretical backing for the
+ Risch-Norman integration method. They also generalize its applicability
+ to non-monomial extensions, for example the Lambert W function.",
+ paper = "Bron07.pdf"
+}
+
+@article{Cher85,
+ author = "Cherry, G.W.",
+ title = "Integration in Finite Terms with Special Functions:
+ The Error Function",
+ journal = "J. Symbolic Computation",
+ year = "1985",
+ volume = "1",
+ pages = "283-302",
+ abstract =
+ "A decision procedure for integrating a class of transcendental
+ elementary functions in terms of elementary functions and error
+ functions is described. The procedure consists of three mutually
+ exclusive cases. In the first two cases a generalised procedure for
+ completing squares is used to limit the error functions which can
+ appear in the integral of a finite number. This reduces the problem
+ to the solution of a differential equation and we use a result of
+ Risch (1969) to solve it. The third case can be reduced to the
+ determination of what we have termed $\sum$-decompositions. The resutl
+ presented here is the key procuedure to a more general algorithm which
+ is described fully in Cherry (1983).",
+ paper = "Cher85.pdf"
+}
+
+@article{Coll69,
+ author = "Collins, George E.",
+ title = "Algorithmic Approaches to Symbolic Integration and SImplification",
+ journal = "ACM SIGSAM",
+ volume = "12",
+ year = "1969",
+ pages = "5016",
+ abstract =
+ "This panel session followed the format announced by SIGSAM Chairman
+ Carl Engelman in the announcement published in SIGSAM Bulletin No. 10
+ (October 1968). Carl gave a brief (five or ten minutes) introduction
+ to the subject and introduced Professor Joel Moses (M. I. T.). Joel
+ presented an excellent exposition of the recent research
+ accomplishments of the other panel members, synthesizing their work
+ into a single large comprehensible picture. His presentation was
+ greatly enhanced by a series of 27 carefully prepared slides
+ containing critical examples and basic formulas, and was certainly the
+ feature of the show. A panel discussion followed, with some audience
+ participation. Panel members were Dr. W. S. Brown (Bell Telephone
+ Laboratories), Professor B. F. Caviness (Duke University), Dr. Daniel
+ Richardson and Dr. R. H. Risch (IBM).",
+ paper = "Coll69.pdf"
+}
+
+@book{Dave81,
+ author = "Davenport, James H.",
+ title = "On the Integration of Algebraic Functions",
+ publisher = "Springer-Verlag",
+ series = "Lecture Notes in Computer Science 102",
+ isbn = "0-387-10290-6",
+ year = "1981",
+ abstract =
+ "This work is concerned with the following question: ``{\sl When is an
+ algebraic function integrable?}''. We can state this question in
+ another form which makes clearer our interpretation of integration:
+ ``If we are given an algebraic function, when can we find an
+ expression in terms of algebraics, logarithms and exponentials whose
+ derivative is the given function, and what is that expression?''.
+
+ This question can be looked at purely mathematically, as a question in
+ decidablility theory, but our interest in this question is more
+ practical and springs from the requirements of computer algebra. Thus
+ our goal is ``{\sl Write a program which, when given an algebraic
+ function, will produce an expression for its integral in terms of
+ algebraics, exponentials and logarithms, or will prove that there is
+ no such expression}''.",
+ paper = "Dave81.pdf"
+}
+
+@article{Dave81c,
+ author = "Davenport, James H.",
+ title = "Algebraic Computations",
+ publisher = "Springer-Verlag",
+ journal = "Lecture Notes in Computer Science 102",
+ pages = "14-29",
+ isbn = "0-387-10290-6",
+ year = "1981",
+ abstract =
+ "Algebraic relationships between variables and expressions are very
+ common in computer algebra. Not only do they often occur explicitly,
+ in forms like $sqrt(x^2+1)$, but well known difficulties such as
+ $sin(x)^2+cos(x)^2=1$ (Stoutemyer, 1977) can be expressed in this
+ form. Nevertheless it is difficult to compute with regard to these
+ relationships. This chapter discusses the problem of such computig,
+ and then enters the area of algebraic geometry, which is a natural
+ outgrowth of attempts to perform such computations as readily as one
+ computes without them.",
+ paper = "Dave81c.pdf"
+}
+
+@article{Dave81d,
+ author = "Davenport, James H.",
+ title = "Coates' Algorithm",
+ publisher = "Springer-Verlag",
+ journal = "Lecture Notes in Computer Science 102",
+ pages = "30-48",
+ isbn = "0-387-10290-6",
+ year = "1981",
+ abstract =
+ "In this chapter, we consider the problem of finding a function with a
+ certain set of poles. That this problem is non-trivial in the case of
+ algebraic functions (although it is trivial in the case of rational
+ functions) can be seen from the fact that such functions need not
+ always exist. For example, on the curve defined by $\sqrt{x^3+1}$,
+ there is no function with a zero of order 1 at one place lying over
+ the point $X=0$ and a pole of order 1 at infinity and no other poles
+ or zeros, but there is one with divisor 3 times that (ie.e the divisor
+ has order 3). On the curve defined by $Y^2=x^3-3X^2+X+1$, there are no
+ functions with a zero on one place lying over $X=0$ and a pole at the
+ other, both having the same order, and no other zeros or poles.",
+ paper = "Dave81d.pdf"
+}
+
+@article{Dave81e,
+ author = "Davenport, James H.",
+ title = "Risch's Theorem",
+ publisher = "Springer-Verlag",
+ journal = "Lecture Notes in Computer Science 102",
+ pages = "49-63",
+ isbn = "0-387-10290-6",
+ year = "1981",
+ abstract =
+ "This chapter describes an underlying body of theory to the area of
+ finding (or proving non-existent) the elementary integrals of
+ algebraic functions, where a function is {\sl algebraic} if it can be
+ generated from the variable of integration and constants by the
+ arithmetic operations and the taking of roots of equations (the theory
+ does not require that these roots should be expressible in terms of
+ radicals), possibly with nesting. By {\sl elementary} we mean
+ denerated from the variable of integration and constants by the
+ arithmetic operations and the taking of roots, exponentials and
+ logarithms, possibly with nesting.",
+ paper = "Dave81e.pdf"
+}
+
+@article{Dave81f,
+ author = "Davenport, James H.",
+ title = "The Problem of Torsion Divisors",
+ publisher = "Springer-Verlag",
+ journal = "Lecture Notes in Computer Science 102",
+ pages = "64-75",
+ isbn = "0-387-10290-6",
+ year = "1981",
+ abstract =
+ "This chapter and the next three are concerned with the theory and
+ practice of the FIND-ORDER procedure, which, as we saw in the last
+ chapter, is a necessary part of our integration algorithm, and which
+ turns out to be the mathematically most difficult. This chapter will
+ outline the general nature of the problem, with special reference to
+ the simplest non-trivial case, viz. problems involving the square root
+ of one cubic or quartic and involving no constants other than the
+ rationals.",
+ paper = "Dave81f.pdf"
+}
+
+@article{Dave81g,
+ author = "Davenport, James H.",
+ title = "Gauss-Manin Operators",
+ publisher = "Springer-Verlag",
+ journal = "Lecture Notes in Computer Science 102",
+ pages = "76-91",
+ isbn = "0-387-10290-6",
+ year = "1981",
+ abstract =
+ "This chapter is devoted to the case of integrands which contain a
+ transcendental parameter apart from the variable of integration, so
+ that we can consider our problem to be the integration of a function
+ in $\{K(x,y) | F(u,x,y) = 0\}$, where $K$ is an algebraic extension of
+ $k(u)$ for some field $k$ and $u$ transcendental over it. We shall
+ use this notation, with $u$ being the independent transcendental, as
+ we shall use the prefix operator $D$ to denote differentiation with
+ respect to $u$, and the suffix $\prime$ to denote differentiation with
+ respect to $x$. THis case is often more tractable than the case when
+ there is no such transcendental, for integration with respect to $x$
+ and differentiation with respect to $u$ commute, so that if $G(u,x,y0$
+ is integrable, then so is $DG(u,x,y)$, $D^2G(u,x,y)$ and so on.",
+ paper = "Dave81g.pdf"
+}
+
+@article{Dave81h,
+ author = "Davenport, James H.",
+ title = "Elliptic Integrals Concluded",
+ publisher = "Springer-Verlag",
+ journal = "Lecture Notes in Computer Science 102",
+ pages = "92-105",
+ isbn = "0-387-10290-6",
+ year = "1981",
+ abstract =
+ "The previous chapter (including the algorithm FIND\_ORDER\_MANIN)
+ completely solved the problem of torsion divisors over ground fields
+ containing a transcendental. We are therefore left with the case of
+ ground fields all of whose elements are algebraic over the rationals,
+ and this is the problem we will consider in this chapter (for elliptic
+ curves) and the next. Furthermore, any particular definition of a
+ curve and of a divisor can only involve a finite number of algebraics,
+ so we can restrict our attention to fields which are generated from
+ the rationals by extending with a finite number of algebraics, i.e.
+ {\sl algebraic number fields}. Before we can explore the torsion
+ divisor problem over them, we first need to know more about their
+ structure and possible computer representations, and this we discuss
+ in the next section, amplifying the discussion of general algebraic
+ expression in Chapter 2.",
+ paper = "Dave81h.pdf"
+}
+
+@article{Dave81i,
+ author = "Davenport, James H.",
+ title = "Curves over Algebraic Number Fields",
+ publisher = "Springer-Verlag",
+ journal = "Lecture Notes in Computer Science 102",
+ pages = "106-118",
+ isbn = "0-387-10290-6",
+ year = "1981",
+ abstract =
+ "The case of curves of arbitrary genus is much more difficult than the
+ case of curves of genus 1, and there are no well-developed algorithms
+ for this case. I have not been able to code any significant program to
+ deal with this case because of the large number of subsidiary
+ algorithms for which I do not have programs, though such programs have
+ been written elsewhere, or can readily be written. Presented here,
+ therefore, are the outlines of techniques which will enable one to
+ bound the torsion of curves of arbitrary genus over algebraic number
+ fields",
+ paper = "Dave81i.pdf"
+}
+
+@article{Dave79c,
+ author = "Davenport, James H.",
+ title = "Algorithms for the Integration of Algebraic Functions",
+ journal = "Lecture Notes in Computer Science",
+ volume = "72",
+ pages = "415-425",
+ year = "1979",
+ abstract = "
+ The problem of finding elementary integrals of algebraic functions has
+ long been recognized as difficult, and has sometimes been thought
+ insoluble. Risch stated a theorem characterising the integrands with
+ elementary integrals, and we can use the language of algebraic
+ geometry and the techniques of Davenport to yield an algorithm that will
+ always produce the integral if it exists. We explain the difficulty in
+ the way of extending this algorithm, and outline some ways of solving
+ it. Using work of Manin we are able to solve the problem in all cases
+ where the algebraic expressions depend on a parameter as well as on
+ the variable of integration.",
+ paper = "Dave79c.pdf"
+}
+
+@article{Dave82a,
+ author = "Davenport, Jamess H.",
+ title = "The Parallel Risch Algorithm (I)",
+ journal = "Lecture Notes in Computer Science",
+ volume = "144",
+ pages = "144-157",
+ year = "1982",
+ abstract =
+ "In this paper we review the so-called ``parallel Risch'' algorithm for
+ the integration of transcendental functions, and explain what the
+ problems with it are. We prove a positive result in the case of
+ logarithmic integrands.",
+ paper = "Dave82a.pdf"
+}
+
+@article{Dave85b,
+ author = "Davenport, Jamess H. and Trager, Barry M.",
+ title = "The Parallel Risch Algorithm (II)",
+ journal = "ACM TOMS",
+ volume = "11",
+ number = "4",
+ pages = "356-362",
+ year = "1985",
+ abstract =
+ "It is proved that, under the usual restrictions, the denominator of
+ the integral of a purely logarithmic function is the expected one,
+ that is, all factors of the denominator of the integrand have their
+ multiplicity decreased by one. Furthermore, it is determined which new
+ logarithms may appear in the integration.",
+ paper = "Dave85b.pdf"
+}
+
+@article{Dave82b,
+ author = "Davenport, Jamess H.",
+ title = "The Parallel Risch Algorithm (III): use of tangents",
+ journal = "ACM SIGSAM",
+ volume = "16",
+ number = "3",
+ pages = "3-6",
+ year = "1982",
+ abstract =
+ "In this note, we look at the extension to the parallel Risch
+ algorithm (see, e.g., the papers by Norman and Moore [1977], Norman and
+ Davenport [1979], Fitch [1981] or Davenport [1982] for a description
+ of the basic algorithm) which represents trigonometric functions in
+ terms of tangents, rather than instead of complex exponentials.",
+ paper = "Dave82b.pdf"
+}
+
+@inproceedings{Gedd89,
+ author = "Geddes, K. O. and Stefanus, L. Y.",
+ title = "On the Risch-norman Integration Method and Its Implementation
+ in MAPLE",
+ booktitle = "Proc. of the ACM-SIGSAM 1989 Int. Symp. on Symbolic and
+ Algebraic Computation",
+ series = "ISSAC '89",
+ year = "1989",
+ isbn = "0-89791-325-6",
+ location = "Portland, Oregon, USA",
+ pages = "212--217",
+ numpages = "6",
+ link = "\url{http://doi.acm.org/10.1145/74540.74567}",
+ doi = "10.1145/74540.74567",
+ acmid = "74567",
+ publisher = "ACM",
+ address = "New York, NY, USA",
+ abstract = "
+ Unlike the Recursive Risch Algorithm for the integration of
+ transcendental elementary functions, the Risch-Norman Method processes
+ the tower of field extensions directly in one step. In addition to
+ logarithmic and exponential field extensions, this method can handle
+ extentions in terms of tangents. Consequently, it allows trigonometric
+ functions to be treated without converting them to complex exponential
+ form. We review this method and describe its implementation in
+ MAPLE. A heuristic enhancement to this method is also presented.",
+ paper = "Gedd89.pdf"
+}
+
+@incollection{Grad80,
+ author = "Gradshteyn, I.S. and Ryzhik, I.M.",
+ title = "Definite Integrals of Elementary Functions",
+ booktitle = "Table of Integrals, Series, and Products",
+ publisher = "Academic Press",
+ year = "1980",
+ comment = "Chapter 3-4"
+}
+
+@article{Hebi15,
+ author = "Hebisch, Waldemer",
+ title = "Integration in terms of exponential integrals and incomplete
+ gamma functions",
+ year = "2015",
+ journal = "ACM Communications in Computer Algebra",
+ volume = "49",
+ Issue = "3",
+ pages = "98-100",
+ abstract =
+ "Indefinite integration means that given $f$ in some set we want to
+ find $g$ from possibly larger set such that $f = g^\prime$. When $f$
+ and $g$ are required to be elementary functions due to work of among
+ others Risch, Rothstein, Trager, Bronstein (see [1] for references)
+ integration problem is now solved at least in theory. In his thesis
+ Cherry gave algorithm to integrate transcendental elementary functions
+ in terms of exponential integrals. In [2] he gave algorithm to
+ integrate transcendental elementary functions in so called reduced
+ fields in terms of error functions. Knowles [3] and [4] extended this
+ allowing also liovillian integrands and weakened restrictions on the
+ field containing integrands. We extend previous results allowing
+ incomplete gamma function $\Gamma(a, x)$ with rational $a$. Also, our
+ theory can handle algebraic extensions and is complete jointly (and
+ not only separately for Ei and erf). In purely transcendental case our
+ method should be more efficient and easier to implement than [2]. In
+ fact, it seems that no system currently implements algorithm from [2],
+ while partial implementation of our method in FriCAS works well enough
+ to be turned on by default. With our approach non-reduced case from
+ [2] can be handled easily. We hope that other classes of special
+ functions can be handled in a similar way, in particular irrational
+ case of incomplete gamma function and polylogarithms (however
+ polylogarithms raise tricky theoretical questions)."
+}
+
+@misc{Herm1872,
+ author = "Hermite, E.",
+ title = "Sur l'int\'{e}gration des fractions rationelles",
+ journal = "Nouvelles Annales de Math\'{e}matiques",
+ volume = "11",
+ pages = "145-148",
+ year = "1872"
+}
+
+@misc{Kaha90,
+ author = "Kahan, William",
+ title = "The Persistence of Irrationals in Some Integrals",
+ year = "1990",
+ abstract =
+ "Computer algebra systems are expected to simplify formulas they
+ obtain for symbolic integrals whenever they can, and often they
+ succeed. However, the formulas so obtained may then produce incorrect
+ results for symblic definite integrals"
+}
+
+@TechReport{Kalt84b,
+ author = "Kaltofen, E.",
+ title = "The Algebraic Theory of Integration",
+ institution = "RPI",
+ address = "Dept. Comput. Sci., Troy, New York",
+ year = "1984",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/84/Ka84_integration.pdf}",
+ paper = "Kalt84b.pdf"
+}
+
+@article{Kano76,
+ author = "Kanoui, Henry",
+ title = "Some Aspects of Symbolic Integration via Predicate Logic
+ Programming",
+ journal = "ACM SIGSAM",
+ volume = "10",
+ number = "4",
+ year = "1976",
+ pages = "29-42",
+ abstract =
+ "During the past years, various algebraic manipulations systems have
+ been described in the literature. Most of them are implemented via
+ ``classic'' programming languages like Fortran, Lisp, PL1 ... We propose
+ an alternative approach: the use of Predicate Logic as a programming
+ language.",
+ paper = "Kano76.pdf"
+}
+
+@article{Kasp80,
+ author = "Kasper, Toni",
+ title = "Integration in Finite Terms: The Liouville Theory",
+ journal = "ACM SIGSAM",
+ volume = "14",
+ number = "4",
+ year = "1980",
+ pages = "2-8",
+ abstract =
+ "The search for elementary antiderivatives leads from classical
+ analysis through modern algebra to contemporary research in computer
+ algorithms.",
+ paper = "Kasp80.pdf"
+}
+
+@article{Know93,
+ author = "Knowles, Paul",
+ title = "Integration of a Class of Transcendental Liouvillian Functions
+ with Error-Functions, Part I",
+ journal = "Journal of Symbolic Computation",
+ volume = "13",
+ number = "5",
+ pages = "525-543",
+ year = "1993",
+ abstract =
+ "This paper gives a decision-procedure for the symbolic integration of
+ a certain class of transcendental Liouvillian functions in terms of
+ elementary functions and error-functions. An example illustrating the
+ use of the decision-procedure is given.",
+ paper = "Know93.pdf"
+}
+
+@article{Know93a,
+ author = "Knowles, Paul",
+ title = "Integration of a Class of Transcendental Liouvillian Functions
+ with Error-Functions, Part II",
+ journal = "Journal of Symbolic Computation",
+ volume = "16",
+ number = "3",
+ year = "1993",
+ pages = "227-239",
+ abstract =
+ "This paper extends the decision procedure for the symbolic
+ integration of a certain class of transcendental Liouvillian functions
+ in terms of elementary functions and error-functions given in Knowles
+ (1992) to allow a much larger class of integrands. Examples
+ illustrating the use of the decision procedure are given.",
+ paper = "Know93a.pdf"
+}
+
+@article{Krag09,
+ author = "Kragler, R.",
+ title = "On Mathematica Program for Poor Man's Integrator Algorithm",
+ journal = "Programming and Computer Software",
+ volume = "35",
+ number = "2",
+ pages = "63-78",
+ year = "2009",
+ issn = "0361-7688",
+ abstract = "
+ In this paper by means of computer experiment we study advantages and
+ disadvantages of the heuristical method of ``parallel integrator''. For
+ this purpose we describe and use implementation of the method in
+ Mathematica. In some cases we compare this implementation with the original
+ one in Maple.",
+ paper = "Krag09.pdf"
+}
+
+@article{Liou1833a,
+ author = "Liouville, Joseph",
+ title = "Premier m\'{e}moire sur la d\'{e}termination des int\'{e}grales
+ dont la valeur est alg\'{e}brique",
+ journal = "Journal de l'Ecole Polytechnique",
+ volume = "14",
+ pages = "124-128",
+ year = "1833"
+}
+
+@article{Liou1833b,
+ author = "Liouville, Joseph",
+ title = "Second m\'{e}moire sur la d\'{e}termination des int\'{e}grales
+ dont la valeur est alg\'{e}brique",
+ journal = "Journal de l'Ecole Polytechnique",
+ volume = "14",
+ pages = "149-193",
+ year = "1833"
+}
+
+@article{Lope99,
+ author = {L\'opez, Jos\'e L.},
+ title = "Asymptotic expansions of integrals: The term-by-term integration
+ method",
+ year = "1999",
+ journal = "Journal of Computational and Applied Mathematics",
+ volume = "102",
+ pages = "181-194",
+ abstract =
+ "The classical term-by-term integration technique used for obtaining
+ asymptotic expansions of integrals requires the integrand to have an
+ uniform asymptotic expansion in the integration variable. A
+ modification of this method is presented in which the uniformity
+ conditions provides the term-by-term integration technique a large
+ range of applicability. As a consequence of this generality, Watson's
+ lemma and the integration by parts technique applied to Laplace's and
+ a special family of Fourier's transforms become corollaries of the
+ term-by-term integration method.",
+ paper = "Lope99.pdf"
+}
+
+@book{Munr53,
+ author = "Munroe, M.E.",
+ title = "Introduction to Measure and Integration",
+ publisher = "Addison-Wesley",
+ year = "1953"
+}
+
+@article{Ngxx74,
+ author = "Ng, Edward W.",
+ title = "Symbolic Integration of a Class of Algebraic Functions",
+ journal = "ACM SIGSAM",
+ volume = "8",
+ number = "3",
+ year = "1974",
+ pages = "99-102",
+ abstract =
+ "In this presentation we describe the outline of an algorithmic
+ approach to handle a class of algebraic integrands. (It is important
+ to stress that for an extended abstract of the present form, we can at
+ best convey the flavor of the approach, with numerous details
+ missing.) We shall label this approach Carlson's algorithm because it
+ is based on a series of analyses rendered by Carlson and his
+ associates in the last ten years (Refs. 2, 3, 4, 8, and 12). The class
+ of integrands is of the form $r(x,y)$, where $y^2$ is a polynomial in $x$,
+ and $r$ a rational function in $x$ and $y$. This is the type of integrand
+ that classically led to the study of elliptic integrals. At first
+ glance this is a rather restricted class of algebraic functions. But
+ in fact many trigonometric and hyperbolic integrands reduce to this
+ form. The richness of this class of integrands is exemplified by a
+ recently published handbook of 3000 integral formulas (Ref. 1). Our
+ proposed approach will cover fifty to seventy percent of the items in
+ the handbook. Furthermore the non-classical approach we shall describe
+ holds great promise of developing to the case where definite integrals
+ can be evaluated in terms of a host of other well-known functions
+ (e.g., Bessel and Legendre).",
+ paper = "Ngxx74.pdf"
+}
+
+@techreport{Ngxx77,
+ author = "Ng, Edward W.",
+ title = "Observations on Approximate Integrations",
+ year = "1977",
+ paper = "Ngxx77.pdf"
+}
+
+@inproceedings{Norm90,
+ author = "Norman, Arthur C.",
+ title = "A Critical-Pair/Completion based Integration ALgorithm",
+ booktitle = "ISSAC 90",
+ pages = "201-205",
+ year = "1990",
+ isbn = "0-201-54892-5",
+ abstract =
+ "In 1976 Risch [1] proposed a scheme for finding the integrals of
+ forms built up out of transcendental functions that viewed general
+ functions as rational forms in a suitable differential field and
+ represented the polynomial parts of those forms in a distributed
+ rather than recursive way. By using a data representation where all
+ variables were (more or less) equally important this new method seemed
+ to side-step some of the complications that had appeared in his
+ previous scheme [2] where various side-constraints had to be
+ propagated between the levels present in a tower of separate
+ extensions of differential fields, otherwise seen as levels in
+ recursive data structures.
+
+ An initial implementation of the method was
+ prepared in the context of the SCRATCHPAD/1 algebra system and
+ demonstrated at the 1976 SYMSAC meeting at Yorktown Heights, a
+ subsequent version for Reduce [3][5] came after that, and made it
+ possible to try the method on a large range of integrals. These
+ practical studies showed up some problems with the method and its
+ implementation.
+
+ The presentation given here re-expresses the 1976
+ Risch method in terms of rewrite rules, and thus exposes the major
+ problem it suffers from as a manifestation of the fact that in certain
+ circumstances the set of rewrites generated is not confluent. This
+ difficulty is then attacked using a critical-pair/completion (CPC)
+ approach. For very many integrands it is then easy to see that the
+ initial set of rewrites used in the early implementations [1] and [3]
+ do not need any extension, and this fact explains the high level of
+ competence of the programs involved despite their shaky theoretical
+ foundations. For a further large collection of problems even a simple
+ CPC scheme converges rapidly; when the techniques presented here are
+ applied to the REDUCE integration test suite in all applicable cases a
+ short computation succeeds in completing the set of rewrites and hence
+ gives a secure basis for testing for integrability.
+
+ This paper describes the implementation of the CPC process and
+ discusses current limitations to and possible future extended
+ applications of it.",
+ paper = "Norm90.pdf",
+ keywords = "axiomref"
+}
+
+@article{Renb82,
+ author = "Renbao, Zhong",
+ title = "An Algorithm for Avoiding Complex Numbers in Rational Function
+ Integration",
+ journal = "ACM SIGSAM",
+ volume = "16",
+ number = "3",
+ pages = "30-32",
+ year = "1982",
+ abstract =
+ "Given a proper rational function $A(x)/B(x)$ where $A(x)$ and $B(x)$
+ both are in $R[x]$ with $gcd(A(x), B(x))= 1$, $B(x)$ monic and
+ $deg(A(x)) < deg(B(x))$, from the Hermite algorithm for rational
+ function integration in [3], we obtain
+ \[\int{frac{A(x)}{B(x)}~dx} = S(x)+\int{\frac{T(x)}{B^*(x)}~dx}\]
+ where $S(x)$ is a rational function
+ which is called the rational part of the integral of $A(x)/B(x)$ in
+ eq. (1), $B^*(x)$ is the greatest square-free factor of $B(x)$, and
+ $T(x)$ is in $R[x]$ with $deg(T(x)) < deg(B^*(x))$. The integral of
+ $T(x)/B^*(x)$ is called the transcendental part of the integral of
+ $A(x)/B(x)$ in eq. (1).",
+ paper = "Renb82.pdf"
+}
+
+@techreport{Risc68,
+ author = "Risch, Robert",
+ title = "On the integration of elementary functions which are built up
+ using algebraic operations",
+ type = "Research Report",
+ number = "SP-2801/002/00",
+ institution = "System Development Corporation, Santa Monica, CA, USA",
+ year = "1968"
+}
+
+@techreport{Risc69a,
+ author = "Risch, Robert",
+ title = "Further results on elementary functions",
+ type = "Research Report",
+ number = "RC-2042",
+ institution = "IBM Research, Yorktown Heights, NY, USA",
+ year = "1969"
+}
+
+@article{Risc69b,
+ author = "Risch, Robert",
+ title = "The problem of integration in finite terms",
+ journal = "Transactions of the American Mathematical Society",
+ volume = "139",
+ year = "1969",
+ pages = "167-189",
+ abstract = "This paper deals with the problem of telling whether a
+ given elementary function, in the sense of analysis, has an elementary
+ indefinite integral.",
+ paper = "Ris69b.pdf"
+}
+
+@article{Risc70,
+ author = "Risch, Robert",
+ title = "The Solution of the Problem of Integration in Finite Terms",
+ journal = "Bull. AMS",
+ year = "1970",
+ issn = "0002-9904",
+ volume = "76",
+ number = "3",
+ pages = "605-609",
+ abstract = "
+ The problem of integration in finite terms asks for an algorithm for
+ deciding whether an elementary function has an elementary indefinite
+ integral and for finding the integral if it does. ``Elementary'' is
+ used here to denote those functions build up from the rational
+ functions using only exponentiation, logarithms, trigonometric,
+ inverse trigonometric and algebraic operations. This vaguely worded
+ question has several precise, but inequivalent formulations. The
+ writer has devised an algorithm which solves the classical problem of
+ Liouville. A complete account is planned for a future publication. The
+ present note is intended to indiciate some of the ideas and techniques
+ involved.",
+ paper = "Risc70.pdf"
+}
+
+@article{Risc79,
+ author = "Risch, Robert",
+ title = "Algebraic properties of the elementary functions of analysis",
+ journal = "American Journal of Mathematics",
+ volume = "101",
+ pages = "743-759",
+ year = "1979"
+}
+
+@article{Rose72,
+ author = "Rosenlicht, Maxwell",
+ title = "Integration in finite terms",
+ journal = "American Mathematical Monthly",
+ year = "1972",
+ volume = "79",
+ pages = "963-972",
+ paper = "Rose72.pdf"
+}
+
+@article{Ro76a,
+ author = "Rothstein, Michael and Caviness, Bob F.",
+ title = "A structure theorem for exponential and primitive functions:
+ a preliminary report",
+ journal = "ACM Sigsam Bulletin",
+ volume = "10",
+ number = "4",
+ year = "1976",
+ abstract =
+ "In this paper a generalization of the Risch Structure Theorem is reported.
+ The generalization applies to fields $F(t_1,\ldots,t_n)$ where $F$
+ is a differential field (in our applications $F$ will be a finitely
+ generated extension of $Q$, the field of rational numbers) and each $t_i$
+ is either algebraic over $F_{i-1}=F(t_1,\ldots,t_{i-1})$, is an
+ exponential of an element in $F_{i-1}$, or is an integral of an element
+ in $F_{i-1}$. If $t_i$ is an integral and can be expressed using
+ logarithms, it must be so expressed for the generalized structure
+ theorem to apply.",
+ paper = "Ro76a.pdf"
+}
+
+@article{Roth77,
+ author = "Rothstein, Michael",
+ title = "A new algorithm for the integration of exponential and
+ logarithmic functions",
+ journal = "Proceedings of the 1977 MACSYMA Users Conference",
+ year = "1977",
+ pages = "263-274",
+ publisher = "NASA Pub CP-2012"
+}
+
+@article{Scho89,
+ author = "Schou, Wayne C. and Broughan, Kevin A.",
+ title = "The Risch Algorithms of MACSYMA and SENAC",
+ journal = "ACM SIGSAM",
+ volume = "23",
+ number = "3",
+ year = "1989",
+ abstract =
+ "The purpose of this paper is to report on a computer implementation
+ of the Risch algorithm for the symbolic integration of rational
+ functions containing nested exponential and logarithms. For the class
+ of transcendental functions, the Risch algorithm [4] represents a
+ practical method for symbolic integration. Because the Risch algorithm
+ describes a decision procedure for transcendental integration it is an
+ ideal final step in an integration package. Although the decision
+ characteristic cannot be fully realised in a computer system, because
+ of major algebraic problems such as factorisation, zero-equivalence
+ and simplification, the potential advantages are considerable.",
+ paper = "Scho89.pdf",
+}
+
+@article{Sing85,
+ author = "Singer, Michael F. and Saunders, B. David and Caviness, Bob F.",
+ title = "An extension of Liouville's theorem on integration in finite terms",
+ journal = "SIAM J. of Comp.",
+ volume = "14",
+ pages = "965-990",
+ year = "1985",
+ link = "\url{http://www4.ncsu.edu/~singer/papers/singer_saunders_caviness.pdf}",
+ abstract =
+ "In Part 1 of this paper, we give an extension of Liouville's Theorem
+ and give a number of examples which show that integration with special
+ functions involves some phenomena that do not occur in integration
+ with the elementary functions alone. Our main result generalizes
+ Liouville's Theorem by allowing, in addition to the elementary
+ functions, special functions such as the error function, Fresnel
+ integrals and the logarithmic integral (but not the dilogarithm or
+ exponential integral) to appear in the integral of an elementary
+ function. The basic conclusion is that these functions, if they
+ appear, appear linearly. We give an algorithm which decides if an
+ elementary function, built up using only exponential functions and
+ rational operations has an integral which can be expressed in terms of
+ elementary functions and error functions.",
+ paper = "Sing85.pdf"
+}
+
+@article{Smit83,
+ author = "Smith, Paul and Sterling, Leon",
+ title = "Of Integration by Man and Machine",
+ journal = "ACM SIGSAM",
+ volume = "17",
+ number = "3-4",
+ year = "1983",
+ abstract =
+ "We describe a symbolic integration problem arising from an
+ application in engineering. A solution is given and compared with the
+ solution generated by the REDUCE integration package running at
+ Cambridge. Nontrivial symbol manipulation, particularly
+ simplification, is necessary to reconcile the answers.",
+ paper = "Smit83.pdf"
+}
+
+@misc{Temmxx,
+ author = "Temme, N.M.",
+ title = "Uniform Asymptotic Expansions of Integrals",
+ abstract =
+ "The purpose of the paper is to give an account of several aspects of
+ uniform asymptotic expansions of integrals. We give examples of
+ standard forms, the role of critical points and methods to construct
+ the experiences."
+}
+
+@article{Temm95,
+ author = "Temme, N.M.",
+ title = "Uniform asymptotic expansions of integrals: a selection of
+ problems",
+ journal = "Journal of Computational and Applied Mathematics",
+ volume = "65",
+ number = "1-3",
+ year = "1995",
+ pages = "395-417",
+ abstract =
+ "On the occasion of the conference we mention examples of Stieltjes'
+ work on asymptotics of special functions. The remaining part of the
+ paper gives a selection of asymptotic methods for integrals, in
+ particular on uniform approximations. We discuss several “standard”
+ problems and examples, in which known special functions (error
+ functions, Airy functions, Bessel functions, etc.) are needed to
+ construct uniform approximations. Finally, we discuss the recent
+ interest and new insights in the Stokes phenomenon. An extensive
+ bibliography on uniform asymptotic methods for integrals is given,
+ together with references to recent papers on the Stokes phenomenon for
+ integrals and related topics.",
+ paper = "Temm95.pdf"
+
+}
+
+@mastersthesis{Tere09,
+ author = "Terelius, Bjorn",
+ title = "Symbolic Integration",
+ school = "Royal Institute of Technology",
+ address = "Stockholm, Sweden",
+ year = "2009",
+ abstract =
+ "Symbolic integration is the problem of expressing an indefinite integral
+ $\int{f}$ of a given function $f$ as a finite combination $g$ of elementary
+ functions, or more generally, to determine whether a certain class of
+ functions contains an element $g$ such that $g^\prime = f$.
+
+ In the first part of this thesis, we compare different algorithms for
+ symbolic integration. Specifically, we review the integration rules
+ taught in calculus courses and how they can be used systematically to
+ create a reasonable, but somewhat limited, integration method. Then we
+ present the differential algebra required to prove the transcendental
+ cases of Risch's algorithm. Risch's algorithm decides if the integral
+ of an elementary function is elementary and if so computes it. The
+ presentation is mostly self-contained and, we hope, simpler than
+ previous descriptions of the algorithm. Finally, we describe
+ Risch-Norman's algorithm which, although it is not a decision
+ procedure, works well in practice and is considerably simpler than the
+ full Risch algorithm.
+
+ In the second part of this thesis, we briefly discuss an
+ implementation of a computer algebra system and some of the
+ experiences it has given us. We also demonstrate an implementation of
+ the rule-based approach and how it can be used, not only to compute
+ integrals, but also to generate readable derivations of the results.",
+ paper = "Tere09.pdf"
+}
+
+@article{Trag76,
+ author = "Trager, Barry",
+ title = "Algebraic factoring and rational function integration",
+ journal = "Proceedings of SYMSAC'76",
+ year = "1976",
+ pages = "219-226",
+ abstract = "
+ This paper presents a new, simple, and efficient algorithm for
+ factoring polynomials in several variables over an algebraic number
+ field. The algorithm is then used interatively to construct the
+ splitting field of a polynomial over the integers. Finally the
+ factorization and splitting field algorithms are applied to the
+ problem of determining the transcendental part of the integral of a
+ rational function. In particular, a constructive procedure is given
+ for finding a least degree extension field in which the integral can
+ be expressed.",
+ paper = "Trag76.pdf"
+}
+
+@phdthesis{Trag84,
+ author = "Trager, Barry",
+ title = "Integration of Algebraic Functions",
+ school = "MIT",
+ year = "1984",
+ link = "\url{http://www.dm.unipi.it/pages/gianni/public_html/Alg-Comp/thesis.pdf}",
+ abstract = "
+ We show how the ``rational'' approach for integrating algebraic
+ functions can be extended to handle elementary functions. The
+ resulting algorithm is a practical decision procedure for determining
+ whether a given elementary function has an elementary antiderivative,
+ and for computing it if it exists.",
+ paper = "Trag76.pdf"
+}
+
+@phdthesis{Wang71,
+ author = "Wang, Paul S.",
+ title = "Evaluation of Definite Integrals by Symbolic Manipulation",
+ school = "MIT",
+ year = "1971",
+ link =
+ "\url{http://publications.csail.mit.edu/lcs/pubs/pdf/MIT-LCS-TR-092.pdf}",
+ comment = "MIT/LCS/TR-92",
+ abstract =
+ "A heuristic computer program for the evaluation of real definite
+ integrals of elementary functions is described This program, called
+ WANDERER, (WANg's DEfinite integRal EvaluatoR), evaluates many proper
+ and improper integrals. The improper integrals may have a finite or
+ infinite range of integration. Evaluation by contour integration and
+ residue theory is among the methods used. A program called DELIMITER
+ (DEfinitive LIMIT EvaluatoR) is used for the limit computations needed
+ in evaluating some definite integrals. DELIMITER is a heuristic
+ program written for computing limits of real or complex analytic
+ functions. For real functions of a real variable, one-sided as well
+ been implmented in the MACSYMA system, a symbolic and algebraic
+ manipulation system being developed at Project MAC, MIT. A typical
+ problem in applied mathematics, namely asymptotic analysis of a
+ definite integral, is solved using MACSYMA to demonstrate the
+ usefulness of such a system and the facilities provided by WANDERER.",
+ paper = "Wang71.pdf"
+}
+
+@misc{Dele06,
+ author = "Delenclos, Jonathon and Leroy, Andr\'e",
+ title = "Noncommutative Symmetric functions and $W$-polynomials",
+ link = "\url{http://arxiv.org/pdf/math/0606614.pdf}",
+ algebra = "\newline\refto{category LORER LeftOreRing}",
+ abstract = "
+ Let $K$, $S$, $D$ be a division ring an endomorphism and a
+ $S$-derivation of $K$, respectively. In this setting we introduce
+ generalized noncommutative symmetric functions and obtain Vi\'ete
+ formula and decompositions of different operators. $W$-polynomials
+ show up naturally, their connetions with $P$-independency. Vandermonde
+ and Wronskian matrices are briefly studied. The different linear
+ factorizations of $W$-polynomials are analysed. Connections between
+ the existence of LLCM (least left common multiples) of monic linear
+ polynomials with coefficients in a ring and the left duo property are
+ established at the end of the paper.",
+ paper = "Dele06.pdf"
+}
+
+@mastersthesis{Bohl08,
+ author = "Bohler, Per Reidar",
+ title = "Special number field sieve",
+ school = "Norwegian University of Science and Technology",
+ year = "2008",
+ link = "\url{http://www.diva-portal.org/smash/get/diva2:348611/FULLTEXT01.pdf}",
+ abstract =
+ "Integer factorization is a problem not yet solved for arbitrary integers.
+ Huge integers are therefore widely used for encrypting, e.g. in the RSA
+ encryption scheme. The special number field sieve holds the current
+ factorization record for factoring the number $2^{1039}+1$. The
+ algorithms depends on arithmetic in an algebraic number fields and is
+ a further development from the quadratic sieve factoring algorithm.
+ We therefor present the quadratic sieve first. Then the special number
+ field is described. The key concepts is evaluated one bye one.
+ Everything is illustrated with the corresponding parts of an example
+ factorization. The running time of the special number field sieve is
+ then evaluated and compared against that of the quadratic sieve. The
+ special number field sieve only applies to integers of a special form,
+ but a generalization has been made, the general number field sieve. It
+ is slower but all estimates suggests it is asymptotically faster than
+ all other existing general purpose algorithms.",
+ paper = "Bohl08.pdf"
+}
+
+@misc{Case16,
+ author = "Case, Michael",
+ title = "A Beginner's Guide to the General Number Field Sieve",
+ year = "2016",
+ link = "\url{http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.219.2389}",
+ paper = "Case16.pdf"
+}
+
+@misc{Hill11,
+ author = "Hill, Joshua E.",
+ title = "The Number Field Sieve: An Extended Abstract",
+ year = "2011",
+ link = "\url{http://www.untruth.org/~josh/math/NFS.pdf}",
+ paper = "Hill11.pdf"
+}
+
+@InProceedings{Kalt89d,
+ author = "Kaltofen, E. and Valente, T. and Yui, N.",
+ title = "An improved {Las Vegas} primality test",
+ booktitle = "Proc. 1989 Internat. Symp. Symbolic Algebraic Comput.",
+ pages = "26--33",
+ year = "1989",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/89/KVY89.pdf}",
+ paper = "Kalt89d.pdf"
+}
+
+@InCollection{Kalt91b,
+ author = "Kaltofen, E. and Yui, N.",
+ editor = "D. V. Chudnovsky and G. V. Chudnovsky and H. Cohn and
+ M. B. Nathanson",
+ title = "Explicit construction of {Hilbert} class fields of imaginary
+ quadratic fields by integer lattice reduction",
+ booktitle = "Number Theory New York Seminar 1989--1990",
+ pages = "150--202",
+ publisher = "Springer-Verlag",
+ year = "1991",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/91/KaYui91.pdf}",
+ paper = "Kalt91b.pdf"
+}
+
+@InProceedings{Kalt84a,
+ author = "Kaltofen, E. and Yui, N.",
+ title = "Explicit construction of the {Hilbert} class field of imaginary
+ quadratic fields with class number 7 and 11",
+ booktitle = "Proc. EUROSAM '84",
+ pages = "310--320",
+ year = "1984",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/84/KaYui84_eurosam.ps.gz}",
+ paper = "Kalt84a.ps"
+}
+
+@article{Pome94,
+ author = "Pomerance, Carl",
+ title = "The Number Field Sieve",
+ journal = "Proc. Symposia in Applied Mathematics",
+ volume = "48",
+ year = "1994",
+ abstract =
+ "The most exciting recent development in the integer factorization
+ problem is the number field sieve. It has had some spectacular successes
+ with integers in certain special forms, most notably the factorization in
+ 1990 of the 155 decimal digit number $2^{512}+1$. For arbitrary hard
+ integers, it now appears to threaten the quadratic sieve as the algorithm
+ of choice. In this paper the number field sieve, and the ideas behind it,
+ are described",
+ paper = "Pome94.pdf"
+}
+
+@misc{Sho08,
+ author = "Shoup, Victor",
+ title = "A Computational Introduction to Number Theory",
+ link = "\url{http://shoup.net/ntb/ntb-v2.pdf}",
+ paper = "Sho08.pdf"
+}
+
+@InProceedings{Kalt07a,
+ author = "Kaltofen, Erich and Yang, Zhengfeng and Zhi, Lihong",
+ title = "On probabilistic analysis of randomization in hybrid
+ symbolic-numeric algorithms",
+ year = "2007",
+ booktitle = "Proc. 2007 Internat. Workshop on Symbolic-Numeric Comput.",
+ pages = "11--17",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/07/KYZ07.pdf}",
+ paper = "Kalt07a.pdf"
+}
+
+@InProceedings{Kalt07b,
+ author = "Kaltofen, Erich and Yang, Zhengfeng",
+ title = "On Exact and Approximate Interpolation of Sparse
+ Rational Functions",
+ year = "2007",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'07",
+ pages = "203--210",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/07/KaYa07.pdf}",
+ paper = "Kalt07b.pdf"
+}
+
+@Article{Gies03,
+ author = "Giesbrecht, Mark and Kaltofen, Erich and Lee, Wen-shin",
+ title = "Algorithms for Computing Sparsest Shifts of Polynomials in
+ Power, {Chebychev}, and {Pochhammer} Bases",
+ year = "2003",
+ journal = "Journal of Symbolic Computation",
+ volume = "36",
+ number = "3--4",
+ pages = "401--424",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/03/GKL03.pdf}",
+ paper = "Gies03.pdf"
+}
+
+@InProceedings{Gies02,
+ author = "Giesbrecht, Mark and Kaltofen, Erich and Lee, Wen-shin",
+ title = "Algorithms for Computing the Sparsest Shifts for Polynomials via the
+ {Berlekamp}/{Massey} Algorithm",
+ booktitle = "Proc. 2002 Internat. Symp. Symbolic Algebraic Comput.",
+ pages = "101--108",
+ year = "2002",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/02/GKL02.pdf}",
+ paper = "Gies02.pdf"
+}
+
+@Article{Kalt03b,
+ author = "Kaltofen, Erich and Lee, Wen-shin",
+ title = "Early Termination in Sparse Interpolation Algorithms",
+ year = "2003",
+ journal = "Journal of Symbolic Computation",
+ volume = "36",
+ number = "3--4",
+ pages = "365--400",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/03/KL03.pdf}",
+ paper = "Kalt03b.pdf"
+}
+
+@InProceedings{Kalt00a,
+ author = "Kaltofen, E. and Lee, W.-s. and Lobo, A.A.",
+ title = "Early termination in {Ben-Or/Tiwari} sparse interpolation
+ and a hybrid of {Zippel}'s algorithm",
+ booktitle = "Proc. 2000 Internat. Symp. Symbolic Algebraic Comput.",
+ pages = "192--201",
+ year = "2000",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/2K/KLL2K.pdf}",
+ paper = "Kalt00a.pdf"
+}
+
+@InProceedings{Kalt10b,
+ author = "Kaltofen, Erich L.",
+ title = "Fifteen years after {DSC} and {WLSS2} {What} parallel
+ computations {I} do today [{Invited} Lecture at {PASCO} 2010]",
+ year = "2010",
+ booktitle = "Proc. 2010 Internat. Workshop on Parallel Symbolic Comput.",
+ pages = "10--17",
+ month = "July",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/10/Ka10_pasco.pdf}",
+ paper = "Kalt10b.pdf"
+}
+
+@InProceedings{Kalt90,
+ author = "Kaltofen, E. and Lakshman, Y.N. and Wiley, J.M.",
+ editor = "S. Watanabe and M. Nagata",
+ title = "Modular rational sparse multivariate polynomial interpolation",
+ booktitle = "Proc. 1990 Internat. Symp. Symbolic Algebraic Comput.",
+ pages = "135--139",
+ publisher = "ACM Press",
+ year = "1990",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/90/KLW90.pdf}",
+ paper = "Kalt90.pdf"
+}
+
+@InProceedings{Kalt88a,
+ author = "Kaltofen, E. and Yagati, Lakshman",
+ title = "Improved sparse multivariate polynomial interpolation algorithms",
+ booktitle = "Symbolic Algebraic Comput. Internat. Symp. ISSAC '88 Proc.",
+ pages = "467--474",
+ year = "1988",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/88/KaLa88.pdf}",
+ paper = "Kalt88a.pdf"
+}
+
+@InCollection{Gren11,
+ author = "Grenet, Bruno and Kaltofen, Erich L. and Koiran, Pascal
+ and Portier, Natacha",
+ title = "Symmetric Determinantal Representation of Formulas and Weakly
+ Skew Circuits",
+ booktitle = "Randomization, Relaxation, and Complexity in Polynomial
+ Equation Solving",
+ year = "2011",
+ editor = "Leonid Gurvits and Philippe P\'{e}bay and J. Maurice Rojas
+ and David Thompson",
+ pages = "61--96",
+ publisher = "American Mathematical Society",
+ address = "Providence, Rhode Island, USA",
+ isbn = "978-0-8218-5228-6",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/10/GKKP10.pdf}",
+ paper = "Gren11.pdf"
+}
+
+@InProceedings{Kalt08a,
+ author = "Kaltofen, Erich and Koiran, Pascal",
+ title = "Expressing a Fraction of Two Determinants as a Determinant",
+ year = "2008",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'08",
+ pages = "141--146",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/08/KaKoi08.pdf}",
+ paper = "Kalt08a.pdf"
+}
+
+@Article{Hitz95,
+ author = "Kitz, M.A. and Kaltofen, E.",
+ title = "Integer division in residue number systems",
+ journal = "IEEE Trans. Computers",
+ year = "1995",
+ volume = "44",
+ number = "8",
+ pages = "983--989",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/95/HiKa95.pdf}",
+ paper = "Hitz95.pdf"
+}
+
+@InProceedings{Kalt92a,
+ author = "Kaltofen, E.",
+ title = "On computing determinants of matrices without divisions",
+ booktitle = "Proc. 1992 Internat. Symp. Symbolic Algebraic Comput.",
+ pages = "342--349",
+ year = "1992",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/92/Ka92_issac.pdf}",
+ paper = "Kalt92a.pdf"
+}
+
+@Article{Cant91,
+ author = "Cantor, D.G. and Kaltofen, E.",
+ title = "On fast multiplication of polynomials over arbitrary algebras",
+ journal = "Acta Inform.",
+ year = "1991",
+ volume = "28",
+ number = "7",
+ pages = "693--701",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/91/CaKa91.pdf}",
+ paper = "Cant91.pdf"
+}
+
+@Article{Kalt88b,
+ author = "Kaltofen, E.",
+ title = "Greatest common divisors of polynomials given by
+ straight-line programs",
+ journal = "J. ACM",
+ year = "1988",
+ volume = "35",
+ number = "1",
+ pages = "231--264",
+ link =
+ "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/88/Ka88_jacm.pdf}",
+ abstract =
+ "Algorithms on multivariate polynomials represented by straight-line
+ programs are developed. First, it is shown that most algebraic
+ algorithms can be probabilistically applied to data that are given by
+ a straight-line computation. Testing such rational numeric data for
+ zero, for instance, is facilitated by random evaluations modulo random
+ prime numbers. Then, auxiliary algorithms that determine the
+ coefficients of a multivariate polynomial in a single variable are
+ constructed. The first main result is an algorithm that produces the
+ greatest common divisor of the input polynomials, all in straight-line
+ representation. The second result shows how to find a straight-line
+ program for the reduced numerator and denominator from one for the
+ corresponding rational function. Both the algorithm for that
+ construction and the greatest common divisor algorithm are in random
+ polynomial time for the usual coefftcient fields and output a
+ straight-line program, which with controllably high probability
+ correctly determines the requested answer. The running times are
+ polynomial functions in the binary input size, the input degrees as
+ unary numbers, and the logarithm of the inverse of the failure
+ probability. The algorithm for straight-line programs for the
+ numerators and denominators of rational functions implies that every
+ degree-bounded rational function can be computed fast in parallel,
+ that is, in polynomial size and polylogarithmic depth.",
+ paper = "Kalt88b.pdf"
+}
+
+@InProceedings{Bern97a,
+ author = "Bernardin, Laurent and Monagan, Michael B.",
+ title = "Efficient multivariate factorization over finite fields",
+ booktitle = "Applied algebra, algebraic algorithms and error-correcting
+ codes",
+ series = "AAECC-12",
+ year = "1997",
+ location = "Toulouse, France",
+ publisher = "Springer",
+ pages = "15-28",
+ link = "\url{http://www.cecm.sfu.ca/~monaganm/papers/AAECC.pdf}",
+ abstract =
+ "We describe the Maple implementation of multivariate factorization
+ over general finite fields. Our first implementation is available in
+ Maple V Release 3. We give selected details of the algorithms and show
+ several ideas that were used to improve its efficiency. Most of the
+ improvements presented here are incorporated in Maple V Release 4. In
+ particular, we show that we needed a general tool for implementing
+ computations in GF$(p^k)[x_1,x_2,\cdots,x_v]$. We also needed an
+ efficient implementation of our algorithms $\mathbb{Z}_p[y][x]$ in
+ because any multivariate factorization may depend on several bivariate
+ factorizations. The efficiency of our implementation is illustrated by
+ the ability to factor bivariate polynomials with over a million
+ monomials over a small prime field.",
+ paper = "Bern97a.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Diaz95,
+ author = "Diaz, A. and Kaltofen, E.",
+ title = "On computing greatest common divisors with polynomials given by
+ black boxes for their evaluation",
+ booktitle = "Proc. 1995 Internat. Symp. Symbolic Algebraic Comput.",
+ pages = "232--239",
+ year = "1995",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/95/DiKa95.ps.gz}",
+ paper = "Diaz95.ps"
+}
+
+@article{Gath01,
+ author = "von zur Gathen, Joachim and Panario, Daniel",
+ title = "Factoring Polynomials Over Finite Fields: A Survey",
+ journal = "J. Symbolic Computation",
+ year = "2001",
+ volume = "31",
+ pages = "3-17",
+ link = "\url{http://people.csail.mit.edu/dmoshdov/courses/codes/poly-factorization.pdf}",
+ keywords = "survey",
+ abstract =
+ "This survey reviews several algorithms for the factorization of
+ univariate polynomials over finite fields. We emphasize the main ideas
+ of the methods and provide and up-to-date bibliography of the problem.
+ This paper gives algorithms for {\sl squarefree factorization},
+ {\sl distinct-degree factorization}, and {\sl equal-degree factorization}.
+ The first and second algorithms are deterministic, the third is
+ probabilistic.",
+ paper = "Gath01.pdf"
+}
+
+@article{Gian88,
+ author = "Gianni, Patrizia. and Trager, Barry. and Zacharias, Gail",
+ title = "Groebner Bases and Primary Decomposition of Polynomial Ideals",
+ journal = "J. Symbolic Computation",
+ volume = "6",
+ pages = "149-167",
+ year = "1988",
+ link = "\url{http://www.sciencedirect.com/science/article/pii/S0747717188800403/pdf?md5=40c29b67947035884904fd4597ddf710&pid=1-s2.0-S0747717188800403-main.pdf}",
+ algebra = "\newline\refto{package IDECOMP IdealDecompositionPackage}",
+ paper = "Gian88.pdf"
+}
+
+@article{Gian96,
+ author = "Gianni, P. and Trager, B.",
+ title = "Square-free algorithms in positive characteristic",
+ journal =
+ "J. of Applicable Algebra in Engineering, Communication and Computing",
+ volume = "7",
+ pages = "1-14",
+ year = "1996",
+}
+
+@PhdThesis{Kalt82,
+ author = "Kaltofen, E.",
+ title = "On the complexity of factoring polynomials with integer
+ coefficients",
+ school = "RPI",
+ address = "Troy, N. Y.",
+ year = "1982",
+ month = "December",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/82/Ka82_thesis.pdf}",
+ paper = "Kalt82.pdf"
+}
+
+@Article{Gath85,
+ author = "{von zur Gathen}, Joachim and Kaltofen, E.",
+ title = "Factoring sparse multivariate polynomials",
+ journal = "J. Comput. System Sci.",
+ year = "1985",
+ volume = "31",
+ pages = "265--287",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/85/GaKa85_mathcomp.ps.gz}",
+ paper = "Gath85.ps"
+}
+
+@Article{Gath85b,
+ author = "{von zur Gathen}, Joachim and Kaltofen, E.",
+ title = "Polynomial-Time Factorization of Multivariate Polynomials over
+ Finite Fields",
+ journal = "Math. Comput.",
+ year = "1985",
+ volume = "45",
+ pages = "251-261",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/85/GaKa85\_mathcomp.ps.gz}",
+ paper = "Gath85.ps",
+ abstract =
+ "We present a probabilistic algorithm that finds the irreducible
+ factors of a bivariate polynomial with coefficients from a finite
+ field in time polynomial in the input size, i.e. in the degree of the
+ polynomial and $log$(cardinality of field). The algorithm generalizes
+ to multivariate polynomials and has polynomial running time for
+ densely encoded inputs. Also a deterministic version of the algorithm
+ is discussed whose running time is polynomial in the degree of the
+ input polynomial and the size of the field."
+}
+
+@InCollection{Kalt11c,
+ author = "Kaltofen, Erich and Lecerf, Gr{\'e}goire",
+ title = "Section 11.5. {Factorization} of multivariate polynomials",
+ booktitle = "Handbook of Finite Fields",
+ publisher = "Springer",
+ pages = "382--392",
+ year = "2011",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/11/KL11.pdf}",
+ paper = "Kalt11c.pdf"
+}
+
+@InProceedings{Kalt05b,
+ author = "Kaltofen, Erich and Koiran, Pascal",
+ title = "On the complexity of factoring bivariate supersparse
+ (lacunary) polynomials",
+ year = "2005",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'05",
+ pages = "208--215",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/05/KaKoi05.pdf}",
+ paper = "Kalt05b.pdf"
+}
+
+@InProceedings{Kalt06a,
+ author = "Kaltofen, Erich and Koiran, Pascal",
+ title = "Finding Small Degree Factors of Multivariate Supersparse
+ (Lacunary) Polynomials Over Algebraic Number Fields",
+ year = "2006",
+ booktitle = "Internat. Symp. Symbolic Algebraic Comput. ISSAC'06",
+ pages = "162--168",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/06/KaKoi06.pdf}",
+ paper = "Kalt06a.pdf"
+}
+
+@InProceedings{Kalt97a,
+ author = "Kaltofen, E. and Shoup, V.",
+ title = "Fast polynomial factorization over high algebraic extensions of
+ finite fields",
+ booktitle = "Proc. 1997 Internat. Symp. Symbolic Algebraic Comput.",
+ year = "1997",
+ pages = "184--188",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/97/KaSh97.pdf}",
+ paper = "Kalt97a.pdf"
+}
+
+@Article{Kalt98,
+ author = "Kaltofen, E. and Shoup, V.",
+ title = "Subquadratic-time factoring of polynomials over finite fields",
+ journal = "Math. Comput.",
+ month = "July",
+ year = "1998",
+ volume = "67",
+ number = "223",
+ pages = "1179--1197",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/98/KaSh98.pdf}",
+ paper = "Kalt98.pdf"
+}
+
+@InProceedings{Kalt95a,
+ author = "Kaltofen, E. and Shoup, V.",
+ title = "Subquadratic-time factoring of polynomials over finite fields",
+ booktitle = "Proc. 27th Annual ACM Symp. Theory Comput.",
+ year = "1995",
+ publisher = "ACM Press",
+ address = "New York, N.Y.",
+ pages = "398--406",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/95/KaSh95.ps.gz}",
+ paper = "Kalt95a.ps"
+}
+
+@InProceedings{Kalt88,
+ author = "Kaltofen, E. and Trager, B.",
+ title = "Computing with polynomials given by black boxes for their
+ evaluations: Greatest common divisors, factorization, separation of
+ numerators and denominators",
+ booktitle = "Proc. 29th Annual Symp. Foundations of Comp. Sci.",
+ pages = "296--305",
+ year = "1988",
+ organization = "IEEE",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/88/focs88.ps.gz}",
+ paper = "Kalt88.ps"
+}
+
+@InProceedings{Kalt85b,
+ author = "Kaltofen, E.",
+ title = "Computing with polynomials given by straight-line programs {II};
+ sparse factorization",
+ booktitle = "Proc. 26th Annual Symp. Foundations of Comp. Sci.",
+ year = "1985",
+ pages = "451--458",
+ organization = "IEEE",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/85/Ka85_focs.ps.gz}",
+ paper = "Kalt85b.ps"
+}
+
+@InProceedings{Kalt86,
+ author = "Kaltofen, E.",
+ title = "Uniform closure properties of p-computable functions",
+ booktitle = "Proc. 18th Annual ACM Symp. Theory Comput.",
+ year = "1986",
+ pages = "330--337",
+ organization = "ACM",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/86/Ka86_stoc.pdf}",
+ paper = "Kalt86.pdf"
+}
+
+@InProceedings{Kalt87b,
+ author = "Kaltofen, E.",
+ title = "Single-factor Hensel lifting and its application to the
+ straight-line complexity of certain polynomials",
+ booktitle = "Proc. 19th Annual ACM Symp. Theory Comput.",
+ year = "1987",
+ pages = "443--452",
+ organization = "ACM",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/87/Ka87_stoc.pdf}",
+ paper = "Kalt87b.pdf"
+}
+
+@InCollection{Kalt89,
+ author = "Kaltofen, E.",
+ editor = "S. Micali",
+ title = "Factorization of polynomials given by straight-line programs",
+ booktitle = "Randomness and Computation",
+ pages = "375--412",
+ publisher = "JAI Press Inc.",
+ year = "1989",
+ volume = "5",
+ series = "Advances in Computing Research",
+ address = "Greenwhich, Connecticut",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/89/Ka89_slpfac.pdf}",
+ paper = "Kalt89.pdf"
+}
+
+@Article{Gao04,
+ author = "Gao, Shuhong and Kaltofen, E. and Lauder, A.",
+ title = "Deterministic distinct degree factorization for polynomials
+ over finite fields",
+ year = "2004",
+ journal = "Journal of Symbolic Computation",
+ volume = "38",
+ number = "6",
+ pages = "1461--1470",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/01/GKL01.pdf}",
+ paper = "Gao04.pdf"
+}
+
+@Article{Kalt87c,
+ author = "Kaltofen, E.",
+ title = "Deterministic irreducibility testing of polynomials over
+ large finite fields",
+ journal = "Journal of Symbolic Computation",
+ year = "1987",
+ volume = "4",
+ pages = "77--82",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/87/Ka87_jsc.ps.gz}",
+ paper = "Kalt87c.ps"
+}
+
+@Article{Kalt95b,
+ author = "Kaltofen, E.",
+ title = "Effective {Noether} irreducibility forms and applications",
+ journal = "J. Comput. System Sci.",
+ year = "1995",
+ volume = "50",
+ number = "2",
+ pages = "274--295",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/95/Ka95_jcss.pdf}",
+ paper = "Kalt95b.pdf"
+}
+
+@Article{Kalt85a,
+ author = "Kaltofen, E.",
+ title = "Fast parallel absolute irreducibility testing",
+ journal = "Journal of Symbolic Computation",
+ year = "1985",
+ volume = "1",
+ number = "1",
+ pages = "57--67",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/85/Ka85_jsc.pdf}",
+ paper = "Kalt85a.pdf"
+}
+
+@Article{Gath85a,
+ author = "{von zur Gathen}, Joachim and Kaltofen, E.",
+ title = "Factoring multivariate polynomials over finite fields",
+ journal = "Math. Comput.",
+ year = "1985",
+ volume = "45",
+ pages = "251--261",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/85/GaKa85_mathcomp.ps.gz}",
+ paper = "Gath85a.ps"
+}
+
+@Article{Kalt85e,
+ author = "Kaltofen, E.",
+ title = "Polynomial-time reductions from multivariate to bi- and univariate
+ integral polynomial factorization",
+ journal = "{SIAM} J. Comput.",
+ year = "1985",
+ volume = "14",
+ number = "2",
+ pages = "469--489",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/85/Ka85_sicomp.pdf}",
+ paper = "Kalt85e.pdf"
+}
+
+@InProceedings{Kalt82a,
+ author = "Kaltofen, E.",
+ title = "A polynomial-time reduction from bivariate to univariate
+ integral polynomial factorization",
+ booktitle = "Proc. 23rd Annual Symp. Foundations of Comp. Sci.",
+ year = "1982",
+ pages = "57--64",
+ organization = "IEEE",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/82/Ka82_focs.pdf}",
+ paper = "Kalt82a.pdf"
+}
+
+@InProceedings{Kalt03,
+ author = "Kaltofen, Erich",
+ title = "Polynomial Factorization: a Success Story",
+ year = "2003",
+ booktitle = "Symbolic Algebraic Comput. Internat. Symp. ISSAC '88 Proc.",
+ pages = "3--4",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/03/Ka03.pdf}",
+ keywords = "survey",
+ paper = "Kalt03.pdf"
+}
+
+@InProceedings{Kalt92b,
+ author = "Kaltofen, E.",
+ title = "Polynomial factorization 1987-1991",
+ booktitle = "Proc. LATIN '92",
+ editor = "I. Simon",
+ series = "Lect. Notes Comput. Sci.",
+ volume = "583",
+ pages = "294--313",
+ publisher = "Springer-Verlag",
+ year = "1992",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/92/Ka92_latin.pdf}",
+ keywords = "survey",
+ paper = "Kalt92b.pdf"
+}
+
+@InCollection{Kalt90c,
+ author = "Kaltofen, E.",
+ editor = "D. V. Chudnovsky and R. D. Jenks",
+ title = "Polynomial Factorization 1982-1986",
+ booktitle = "Computers in Mathematics",
+ pages = "285--309",
+ publisher = "Marcel Dekker, Inc.",
+ year = "1990",
+ volume = "125",
+ series = "Lecture Notes in Pure and Applied Mathematics",
+ address = "New York, N. Y.",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/90/Ka90_survey.ps.gz}",
+ keywords = "survey",
+ paper = "Kalt90c.ps"
+}
+
+@InCollection{Kalt82b,
+ author = "Kaltofen, E.",
+ title = "Polynomial factorization",
+ editor = "B. Buchberger and G. Collins and R. Loos",
+ booktitle = "Computer Algebra",
+ edition = "2",
+ pages = "95--113",
+ publisher = "Springer-Verlag",
+ year = "1982",
+ link = "\url{http://www.math.ncsu.edu/~kaltofen/bibliography/82/Ka82_survey.ps.gz}",
+ keywords = "survey",
+ paper = "Kalt82b.ps"
+}
+
+@phdthesis{Sale04,
+ author = "Salem, Fatima Khaled Abu",
+ title = "Factorisation Algorithms for Univariate and Bivariate Polynomials
+ over Finite Fields",
+ school = "Meron College",
+ year = "2004",
+ link = "\url{http://www.cs.aub.edu.lb/fa21/Dissertations/My\_thesis.pdf}",
+ abstract =
+ "In this thesis we address algorithms for polynomial factorisation
+ over finite fields. In the univariate case, we study a recent
+ algorithm due to Niederreiter where the factorisation problem is
+ reduced to solving a linear system over the finite field in question,
+ and the solutions are used to produce the complete factorisation of
+ the polynomials into irreducibles. We develop a new algorithm for
+ solving the linear system using sparse Gaussian elimination with the
+ Markowitz ordering strategy, and conjecture that the Niederreiter
+ linear system is not only initially sparse, but also preserves its
+ sparsity throughout the Gaussian elimination phase. We develop a new
+ bulk synchronous parallel (BSP) algorithm base on the approach of
+ Gottfert for extracting the factors of a polynomial using a basis of
+ the Niederreiter solution set of $\mathbb{F}_2$. We improve upon the
+ complexity and performance of the original algorithm, and produce
+ binary univariate factorisations of trinomials up to degree 400000.
+
+ We present a new approach to multivariate polynomial factorisation
+ which incorporates ideas from polyhedral geometry, and generalises
+ Hensel lifting. The contribution is an algorithm for factoring
+ bivariate polynomials via polytopes which is able to exploit to some
+ extent the sparsity of polynomials. We further show that the polytope
+ method can be made sensitive to the number of nonzero terms of the
+ input polynomial. We describe a sparse adaptation of the polytope
+ method over finite fields of prime order which requires fewer bit
+ operations and memory references for polynomials which are known to be
+ the product of two sparse factors. Using this method, and to the best
+ of our knowledge, we achieve a world record in binary bivariate
+ factorisation of a sparse polynomial of degree 20000. We develop a BSP
+ variant of the absolute irreducibility testing via polytopes given in
+ [45], producing a more memory and run time efficient method that can
+ provide wider ranges of applicability. We achieve absolute
+ irreducibility testing of a bivariate and trivariate polynomial of
+ degree 30000, and of multivariate polynomials with up to 3000
+ variables.",
+ paper = "Sale04.pdf"
+}
+
+@InProceedings{Shou91,
+ author = "Shoup, Victor",
+ title = "A Fast Deterministic Algorithm for Factoring Polynomials over
+ Finite Fields of Small Characteristic",
+ booktitle = "Proc. ISSAC 1991",
+ series = "ISSAC 1991",
+ year = "1991",
+ pages = "14-21",
+ link = "\url{http://www.shoup.net/papers/quadfactor.pdf}",
+ abstract =
+ "We present a new algorithm for factoring polynomials over finite
+ fields. Our algorithm is deterministic, and its running time is
+ ``almost'' quadratic when the characteristic is a small fixed
+ prime. As such, our algorithm is asymptotically faster than previously
+ known deterministic algorithms for factoring polynomials over finite
+ fields of small characteristic.",
+ paper = "Shou91.pdf"
+}
+
+@inproceedings{Trev91,
+ author = "Trevisan, Vilmar and Wang, Paul",
+ title = "Practical factorization of univariate polynomials over
+ finite fields",
+ booktitle = "Proc. ISSAC 1991",
+ series = "ISSAC '91",
+ publisher = "ACM",
+ isbn = "0-89791-437-6",
+ pages = "22-31",
+ year = "1991",
+ link = "\url{http://lib.org/by/\_djvu\_Papers/Computer\_algebra/Algebraic\%20numbers}",
+ abstract =
+ "Research presented here is part of an effort to establish
+ state-of-the-art factoring routines for polynomials. The foundation of
+ such algorithms lies in the efficient factorization over a finite
+ field $GF(p^k)$. The Cantor-Zassenhaus algorithm together with
+ innovative ideas suggested by others is compared with the Berlekamp
+ algorithm. The studies led us to design a hybrid algorithm that
+ combine the strengths of the different approaches. The algorithms are
+ also implemented and machine timings are obtained to measure the
+ performance of these algorithms.",
+ paper = "Trev91.djvu"
+}
+
+@article{Beau03,
+ author = "Beaumont, James and Bradford, Russell and Davenport, James H.",
+ title = "Better simplification of elementary functions through power series",
+ journal = "2003 International Symposium on Symbolic and Algebraic Computation",
+ series = "ISSAC'03",
+ year = "2003",
+ month = "August",
+ abstract = "
+ In [5], we introduced an algorithm for deciding whether a proposed
+ simplification of elementary functions was correct in the presence of
+ branch cuts. This algorithm used multivalued function simplification
+ followed by verification that the branches were consistent.
+
+ In [14] an algorithm was presented for zero-testing functions defined
+ by ordinary differential equations, in terms of their power series.
+
+ The purpose of the current paper is to investigate merging the two
+ techniques. In particular, we will show an explicit reduction to the
+ constant problem [16].",
+ paper = "Beau03.pdf"
+}
+
+@article{Brad02,
+ author = "Bradford, Russell and Corless, Robert M. and Davenport, James H.
+ Jeffrey, David J. and Watt, Stephen M.",
+ title = "Reasoning about the Elementary Functions of Complex Analysis",
+ journal = "Annals of Mathematics and Artificial Intelligence",
+ year = "2002",
+ issn = "1012-2443",
+ volume = "36",
+ number = "3",
+ doi = "10.1023/A:1016007415899",
+ link = "\url{http://dx.doi.org/10.1023/A%3A1016007415899}",
+ publisher = "Kluwer Academic Publishers",
+ keywords = "elementary functions; branch cuts; complex identities",
+ pages = "303-318",
+ abstract =
+ "There are many problems with the simplification of elementary
+ functions, particularly over the complex plane, though not
+ exclusively. Systems tend to make ``howlers'' or not to simplify
+ enough. In this paper we outline the ``unwinding number'' approach to
+ such problems, and show how it can be used to prevent errors and to
+ systematise such simplification, even though we have not yet reduced
+ the simplification process to a complete algorithm. The unsolved
+ problems are probably more amenable to the techniques of artificial
+ intelligence and theorem proving than the original problem of complex
+ variable analysis.",
+ paper = "Brad02.pdf"
+}
+
+@inproceedings{Chyz11,
+ author = "Chyzak, Frederic and Davenport, James H. and
+ Koutschan, Christoph and Salvy, Bruno",
+ title = "On Kahan's Rules for Determining Branch Cuts",
+ booktitle = "Proc. 13th Int. Symp. on Symbolic and Numeric Algorithms
+ for Scientific Computing",
+ year = "2011",
+ isbn = "978-1-4673-0207-4",
+ location = "Timisoara",
+ pages = "47-51",
+ doi = "10.1109/SYNASC.2011.51",
+ acmid = "258794",
+ publisher = "IEEE",
+ abstract =
+ "In computer algebra there are different ways of approaching the
+ mathematical concept of functions, one of which is by defining them as
+ solutions of differential equations. We compare different such
+ appraoches and discuss the occurring problems. The main focus is on
+ the question of determining possible branch cuts. We explore the
+ extent to which the treatment of branch cuts can be rendered (more)
+ algorithmic, by adapting Kahan's rules to the differential equation
+ setting.",
+ paper1 = "Chyz11a.pdf",
+ paper = "Chyz11.pdf"
+}
+
+@article{Dave10,
+ author = "Davenport, James",
+ title = {The Challenges of Multivalued "Functions"},
+ journal = "Lecture Notes in Computer Science",
+ volume = "6167",
+ year = "2010",
+ pages = "1-12",
+ abstract = "
+ Although, formally, mathematics is clear that a function is a
+ single-valued object, mathematical practice is looser, particularly
+ with n-th roots and various inverse functions. In this paper, we point
+ out some of the looseness, and ask what the implications are, both for
+ Artificial Intelligence and Symbolic Computation, of these practices.
+ In doing so, we look at the steps necessary to convert existing tests
+ into
+ \begin{itemize}
+ \item (a) rigorous statements
+ \item (b) rigorously proved statements
+ \end{itemize}
+ In particular we ask whether there might be a constant ``de Bruij factor''
+ [18] as we make these texts more formal, and conclude that the answer
+ depends greatly on the interpretation being placed on the symbols.",
+ paper = "Dave10.pdf"
+}
+
+@article{Phis11,
+ author = "Phisanbut, Nalina and Bradford, Russell J. and
+ Davenport, James H.",
+ title = "Geometry of branch cuts",
+ journal = "ACM Communications in Computer Algebra",
+ volume = "44",
+ number = "3-4",
+ pages = "132-135",
+ year = "2011",
+ abstract =
+ "'Simplification' is a key concept in Computer Algebra. But many
+ simplification rules, such as $\sqrt{x}\sqrt{y} \rightarrow \sqrt{xy}$
+ are not universally valid, due to the fact that many elementary
+ functions are multi-valued. Hence a key question is ``Is this
+ simplification correct?'', which involves algorithmic analysis of the
+ branch cuts involved. In this paper, we look at variable ordering and
+ pre-conditioning as supporting technologies for this analysis.",
+ paper = "Phis11.pdf"
+}
+
+@article{Dave12,
+ author = "Davenport, James H. and Bradford, Russell and England, Matthew
+ and Wilson, David",
+ title = "Program Verification in the presence of complex numbers, functions
+ with branch cuts etc",
+ journal = "14th Int. Symp. on Symbolic and Numeric Algorithms for
+ Scientific Computing",
+ year = "2012",
+ series = "SYNASC'12",
+ pages = "83-88",
+ publisher = "IEEE",
+ abstract = "
+ In considering the reliability of numerical programs, it is normal to
+ ``limit our study to the semantics dealing with numerical precision''.
+ On the other hand, there is a great deal of work on the reliability of
+ programs that essentially ignores the numerics. The thesis of this
+ paper is that there is a class of problems that fall between the two,
+ which could be described as ``does the low-level arithmetic implement
+ the high-level mathematics''. Many of these problems arise because
+ mathematics, particularly the mathematics of the complex numbers, is
+ more difficult than expected; for example the complex function log is
+ not continuous, writing down a program to compute an inverse function
+ is more complicated than just solving an equation, and many algebraic
+ simplification rules are not universally valid.
+
+ The good news is that these problems are theoretically capable of
+ being solved, and are practically close to being solved, but not yet
+ solved, in several real-world examples. However, there is still a long
+ way to go before implementations match the theoretical possibilities.",
+ paper = "Dave12.pdf"
+}
+
+@article{Engl13,
+ author = "England, M. and Bradford, R. and Davenport, J. H. and
+ Wilson, D.",
+ title = "Understanding Branch Cuts of Expressions",
+ journal = "Intelligent Computer Mathematics",
+ year = "2013",
+ series = " LNCS 7961",
+ publisher = "Springer, Berlin",
+ pages = "136-151",
+ isbn = "9783642393198",
+ abstract = "
+ We assume some standard choices for the branch cuts of a group of
+ functions and consider the problem of then calculating the branch cuts
+ of expressions involving those functions. Typical examples include the
+ addition formulae for inverse trigonometric functions. Understanding
+ these cuts is essential for working with the single-valued
+ counterparts, the common approach to encoding multi-valued functions
+ in computer algebra systems. While the defining choices are usually
+ simple (typically portions of either the real or imaginary axes) the
+ cuts induced by the expression may be surprisingly complicated. We
+ have made explicit and implemented techniques for calculating the cuts
+ in the computer algebra programme Maple. We discuss the issues raised,
+ classifying the different cuts produced. The techniques have been
+ gathered in the BranchCuts package, along with tools for visualising
+ the cuts. The package is included in Maple 17 as part of the
+ FunctionAdvisor tool.",
+ paper = "Engl13.pdf"
+}
+
+@article{Jeff04,
+ author = "Jeffrey, D. J. and Norman, A. C.",
+ title = "Not Seeing the Roots for the Branches: Multivalued Functions in
+ Computer Algebra",
+ journal = "SIGSAM Bull.",
+ issue_date = "September 2004",
+ volume = "38",
+ number = "3",
+ month = "September",
+ year = "2004",
+ issn = "0163-5824",
+ pages = "57--66",
+ numpages = "10",
+ link = "\url{http://doi.acm.org/10.1145/1040034.1040036}",
+ doi = "10.1145/1040034.1040036",
+ acmid = "1040036",
+ publisher = "ACM",
+ address = "New York, NY, USA",
+ abstract = "
+ We discuss the multiple definitions of multivalued functions and their
+ suitability for computer algebra systems. We focus the discussion by
+ taking one specific problem and considering how it is solved using
+ different definitions. Our example problem is the classical one of
+ calculating the roots of a cubic polynomial from the Cardano formulae,
+ which contains fractional powers. We show that some definitions of
+ these functions result in formulae that are correct only in the sense
+ that they give candidates for solutions; these candidates must then be
+ tested. Formulae that are based on single-valued functions, in
+ contract, are efficient and direct.",
+ paper = "Jeff04.pdf"
+}
+
+@inproceedings{Kaha86,
+ author = "Kahan, W.",
+ title = "Branch cuts for complex elementary functions",
+ booktitle = "The State of the Art in Numerical Analysis",
+ year = "1986",
+ month = "April",
+ editor = "Powell, M.J.D and Iserles, A.",
+ publisher = "Oxford University Press",
+ paper1 = "Kaha86a.pdf",
+ paper = "Kaha86.pdf"
+}
+
+@article{Rich96,
+ author = "Rich, Albert D. and Jeffrey, David J.",
+ title = "Function Evaluation on Branch Cuts",
+ journal = "SIGSAM Bull.",
+ issue_date = "June 1996",
+ volume = "30",
+ number = "2",
+ month = "June",
+ year = "1996",
+ issn = "0163-5824",
+ pages = "25--27",
+ numpages = "3",
+ link = "\url{http://doi.acm.org/10.1145/235699.235704}",
+ doi = "10.1145/235699.235704",
+ acmid = "235704",
+ publisher = "ACM",
+ address = "New York, NY, USA",
+ abstract = "
+ Once it is decided that a CAS will evaluate multivalued functions on
+ their principal branches, questions arise concerning the branch
+ definitions. The first questions concern the standardization of the
+ positions of the branch cuts. These questions have largely been
+ resolved between the various algebra systems and the numerical
+ libraries, although not completely. In contrast to the computer
+ systems, many mathematical textbooks are much further behind: for
+ example, many popular textbooks still specify that the argument of a
+ complex number lies between 0 and $2\pi$. We do not intend to discuss
+ these first questions here, however. Once the positions of the branch
+ cuts have been fixed, a second set of questions arises concerning the
+ evaluation of functions on their branch cuts."
+}
+
+@article{Patt96,
+ author = "Patton, Charles M.",
+ title = "A Representation of Branch-cut Information",
+ journal = "SIGSAM Bull.",
+ issue_date = "June 1996",
+ volume = "30",
+ number = "2",
+ month = "June",
+ year = "1996",
+ issn = "0163-5824",
+ pages = "21--24",
+ numpages = "4",
+ link = "\url{http://doi.acm.org/10.1145/235699.235703}",
+ doi = "10.1145/235699.235703",
+ acmid = "235703",
+ publisher = "ACM",
+ address = "New York, NY, USA",
+ abstract = "
+ Handling (possibly) multi-valued functions is a problem in all current
+ computer algebra systems. The problem is not an issue of technology.
+ Its solution, however, is tied to a uniform handling of the issues by
+ the mathematics community.",
+ paper = "Patt96.pdf"
+}
+
+@article{Squi91,
+ author = "Squire, Jon S.",
+ title = "Rationale for the Proposed Standard for a Generic Package of
+ Complex Elementary Functions",
+ journal = "Ada Lett.",
+ issue_date = "Fall 1991",
+ volume = "XI",
+ number = "7",
+ month = "September",
+ year = "1991",
+ issn = "1094-3641",
+ pages = "166--179",
+ numpages = "14",
+ link = "\url{http://doi.acm.org/10.1145/123533.123545}",
+ doi = "10.1145/123533.123545",
+ acmid = "123545",
+ publisher = "ACM",
+ address = "New York, NY, USA",
+ abstract = "
+ This document provides the background on decisions that were made
+ during the development of the specification for Generic Complex
+ Elementary fuctions. It also rovides some information that was used to
+ develop error bounds, range, domain and definitions of complex
+ elementary functions.",
+ paper = "Squi91.pdf"
+}
+
+@article{Squi91a,
+ author = "Squire, Jon S.",
+ title = "Proposed Standard for a Generic Package of Complex
+ Elementary Functions",
+ journal = "Ada Lett.",
+ issue_date = "Fall 1991",
+ volume = "XI",
+ number = "7",
+ month = "September",
+ year = "1991",
+ issn = "1094-3641",
+ pages = "140--165",
+ numpages = "26",
+ link = "\url{http://doi.acm.org/10.1145/123533.123544}",
+ doi = "10.1145/123533.123544",
+ acmid = "123544",
+ publisher = "ACM",
+ address = "New York, NY, USA",
+ abstract = "
+ This document defines the specification of a generic package of
+ complex elementary functions called Generic Complex Elementary
+ Functions. It does not provide the body of the package."
+}
+
+@misc{Unkn15,
+ author = "Unknown",
+ title = "Branches of Functions",
+ link = "\url{http://scipp.ucsc.edu/~haber/ph116A/ComplexFunBranchTheory.pdf}",
+ paper = "Unkn15.pdf"
+}
+
+@article{Bern97,
+ author = "Bernardin, Laurent",
+ title = "On square-free factorization of multivariate polynomials over a
+ finite field",
+ journal = "Theoretical Computer Science",
+ volume = "187",
+ number = "1-2",
+ year = "1997",
+ month = "November",
+ pages = "105-116",
+ abstract = "
+ In this paper we present a new deterministic algorithm for computing
+ the square-free decomposition of multivariate polynomials with
+ coefficients from a finite field.
+
+ Our algorithm is based on Yun's square-free factorization algorithm
+ for characteristic 0. The new algorithm is more efficient than
+ existing, deterministic algorithms based on Musser's squarefree
+ algorithm
+
+ We will show that the modular approach presented by Yun has no
+ significant performance advantage over our algorithm. The new
+ algorithm is also simpler to implement and it can rely on any existing
+ GCD algorithm without having to worry about choosing ``good'' evaluation
+ points.
+
+ To demonstrate this, we present some timings using implementations in
+ Maple (Char et al. 1991), where the new algorithm is used for Release
+ 4 onwards, and Axiom (Jenks and Sutor, 1992) which is the only system
+ known to the author to use and implementation of Yun's modular
+ algorithm mentioned above.",
+ paper = "Bern97.pdf",
+ keywords = "axiomref"
+}
+
+@article{Chez07,
+ author = "Ch\'eze, Guillaume and Lecerf, Gr\'egoire",
+ title = "Lifting and recombination techniques for absolute factorization",
+ journal = "Journal of Complexity",
+ volume = "23",
+ number = "3",
+ year = "2007",
+ month = "June",
+ pages = "380-420",
+ abstract = "
+ In the vein of recent algorithmic advances in polynomial factorization
+ based on lifting and recombination techniques, we present new faster
+ algorithms for computing the absolute factorization of a bivariate
+ polynomial. The running time of our probabilistic algorithm is less
+ than quadratic in the dense size of the polynomial to be factored.",
+ paper = "Chez07.pdf"
+}
+
+@article{Lece07,
+ author = "Lecerf, Gr\'egoire",
+ title = "Improved dense multivariate polynomial factorization algorithms",
+ journal = "Journal of Symbolic Computation",
+ volume = "42",
+ number = "4",
+ year = "2007",
+ month = "April",
+ pages = "477-494",
+ abstract = "
+ We present new deterministic and probabilistic algorithms that reduce
+ the factorization of dense polynomials from several variables to one
+ variable. The deterministic algorithm runs in sub-quadratic time in
+ the dense size of the input polynomial, and the probabilistic
+ algorithm is softly optimal when the number of variables is at least
+ three. We also investigate the reduction from several to two variables
+ and improve the quantitative versions of Bertini's irreducibility
+ theorem.",
+ paper = "Lece07.pdf"
+}
+
+@article{Wang77,
+ author = "Wang, Paul S.",
+ title = "An efficient squarefree decomposition algorithm",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "11",
+ number = "2",
+ year = "1977",
+ month = "May",
+ pages = "4-6",
+ abstract = "
+ The concept of polynomial squarefree decomposition is an important one
+ in algebraic computation. The squarefree decomposition process has
+ many uses in computer symbolic computation. A recent survey by D. Yun
+ [3] describes many useful algorithms for this purpose. All of these
+ methods depend on computing the greated common divisor (gcd) of the
+ polynomial to be decomposed and its first derivative (with repect to
+ some variable). In the multivariate case, this gcd computation is
+ non-trivial and dominates the cost for the squarefree decompostion.",
+ paper = "Wang77.pdf"
+}
+
+@article{Wang79,
+ author = "Wang, Paul S. and Trager, Barry M.",
+ title = "New Algorithms for Polynomial Square-Free Decomposition
+ over the Integers",
+ journal = "SIAM Journal on Computing",
+ volume = "8",
+ number = "3",
+ year = "1979",
+ publisher = "Society for Industrial and Applied Mathematics",
+ issn = "00975397",
+ abstract = "
+ Previously known algorithms for polynomial square-free decomposition
+ rely on greatest common divisor (gcd) computations over the same
+ coefficient domain where the decomposition is to be performed. In
+ particular, gcd of the given polynomial and its first derivative (with
+ respect to some variable) is obtained to begin with. Application of
+ modular homomorphism and $p$-adic construction (multivariate case) or
+ the Chinese remainder algorithm (univariate case) results in new
+ square-free decomposition algorithms which, generally speaking, take
+ less time than a single gcd between the given polynomial and its first
+ derivative. The key idea is to obtain one or several ``correct''
+ homomorphic images of the desired square-free decomposition
+ first. This provides information as to how many different square-free
+ factors there are, their multiplicities and their homomorphic
+ images. Since the multiplicities are known, only the square-free
+ factors need to be constructed. Thus, these new algorithms are
+ relatively insensitive to the multiplicities of the square-free factors.",
+ paper = "Wang79.pdf"
+}
+
+@inproceedings{Yun76,
+ author = "Yun, D.Y.Y",
+ title = "On square-free decomposition algorithms",
+ booktitle = "Proceedings of SYMSAC'76",
+ year = "1976",
+ keywords = "survey",
+ pages = "26-35"
+}
+
+@article{Abra71,
+ author = "Abramov, S.A.",
+ title = "On the summation of rational functions",
+ year = "1971",
+ journal = "USSR Computational Mathematics and Mathematical Physics",
+ volume = "11",
+ number = "4",
+ pages = "324--330",
+ abstract = "
+ An algorithm is given for solving the following problem: let
+ $F(x_1,\ldots,x_n)$ be a rational function of the variables
+ $x_i$ with rational (read or complex) coefficients; to see if
+ there exists a rational function $G(v,w,x_2,\ldots,x_n)$ with
+ coefficients from the same field, such that
+ \[\sum_{x_1=v}^w{F(x_1,\ldots,x_n)} = G(v,w,x_2,\ldots,x_n)\]
+ for all integral values of $v \le w$. If $G$ exists, to obtain it.
+ Realization of the algorithm in the LISP language is discussed.",
+ paper = "Abra71.pdf"
+}
+
+@article{Gosp78,
+ author = "Gosper, R. William",
+ title = "Decision procedure for indefinite hypergeometric summation",
+ year = "1978",
+ journal = "Proc. Natl. Acad. Sci. USA",
+ volume = "75",
+ number = "1",
+ pages = "40--42",
+ month = "January",
+ abstract = "
+ Given a summand $a_n$, we seek the ``indefinite sum'' $S(n)$
+ determined (within an additive constant) by
+ \[\sum_{n=1}^m{a_n} = S(m)=S(0)\]
+ or, equivalently, by
+ \[a_n=S(n)-S(n-1)\]
+ An algorithm is exhibited which, given $a_n$, finds those $S(n)$
+ with the property
+ \[\displaystyle\frac{S(n)}{S(n-1)}=\textrm{a rational function of n}\]
+ With this algorithm, we can determine, for example, the three
+ identities
+ \[\displaystyle\sum_{n=1}^m{
+ \frac{\displaystyle\prod_{j=1}^{n-1}{bj^2+cj+d}}
+ {\displaystyle\prod_{j=1}^n{bj^2+cj+e}}=
+ \frac{1-{\displaystyle\prod_{j=1}^m{\frac{bj^2+cj+d}{bj^2+cj+e}}}}{e-d}}\]
+ \[\displaystyle\sum_{n=1}^m{
+ \frac{\displaystyle\prod_{j=1}^{n-1}{aj^3+bj^2+cj+d}}
+ {\displaystyle\prod_{j=1}^n{aj^3+bj^2+cj+e}}=
+ \frac{1-{\displaystyle\prod_{j=1}^m{
+ \frac{aj^3+bj^2+cj+d}{aj^3+bj^2+cj+e}}}}{e-d}}\]
+ \[\displaystyle\sum_{n=1}^m{
+ \displaystyle\frac{\displaystyle\prod_{j=1}^{n-1}{bj^2+cj+d}}
+ {\displaystyle\prod_{j=1}^{n+1}{bj^2+cj+e}}=
+ \displaystyle\frac{
+ \displaystyle\frac{2b}{e-d}-
+ \displaystyle\frac{3b+c+d-e}{b+c+e}-
+ \left(
+ \displaystyle\frac{2b}{e-d}-\frac{b(2m+3)+c+d-e}{b(m+1)^2+c(m+1)+e}
+ \right)
+ \displaystyle\prod_{j=1}^m{\frac{bj^2+cj+d}{bj^2+cj+e}}}
+ {b^2-c^2+d^2+e^2+2bd-2de+2eb}}\]",
+ paper = "Gosp78.pdf"
+}
+
+@article{Karr81,
+ author = "Karr, Michael",
+ title = "Summation in Finite Terms",
+ journal = "Journal Association for Computing Machinery",
+ year = "1981",
+ volume = "28",
+ number = "2",
+ month = "April",
+ issn = "0004-5411",
+ pages = "305--350",
+ link = "\url{http://doi.acm.org/10.1145/322248.322255}",
+ publisher = "ACM",
+ abstract = "
+ Results which allow either the computation of symbolic solutions to
+ first-order linear difference equations or the determination that
+ solutions of a certain form do not exist are presented. Starting with
+ a field of constants, larger fields may be constructed by the formal
+ adjunction of symbols which behave like solutions to first-order
+ linear equations (with a few restrictions). It is in these extension
+ fields that the difference equations may be posed and in which the
+ solutions are requested. The principal application of these results is
+ in finding formulas for a broad class of finite sums or in showing the
+ nonexistence of such formula.",
+ paper = "Karr81"
+}
+
+@book{Lafo82,
+ author = "Lafon, J.C.",
+ title = "Summation in Finite Terms",
+ year = "1982",
+ publisher = "Springer-Verlag",
+ isbn = "3-211-81776-X",
+ pages = "71--77",
+ abstract = "
+ A survey on algorithms for summation in finite terms is given. After a
+ precise definition of the problem the cases of polynomial and rational
+ summands are treated. The main concern of this paper is a description
+ of Gosper's algorithm, which is applicable for a wide class of
+ summands. Karr's theory of extension difference fields and some
+ heuristic techniques are touched on briefly.",
+ keywords = "axiomref,survey"
+}
+
+@article{Abra85,
+ author = "Abramov, S.A.",
+ title = "Separation of variables in rational functions",
+ year = "1985",
+ journal = "USSR Computational Mathematics and Mathematical Physics",
+ volume = "25",
+ number = "5",
+ pages = "99--102",
+ abstract =
+ "The problem of expanding a rational function of several variables into
+ terms with separable variables is formulated. An algorithm for solving
+ this problem is given. Programs which implement this algorithm can
+ occur in sets of algebraic alphabetical transformations on a computer
+ and can be used to reduce the multiplicity of sums and integrals of
+ rational functions for investigating differential equations with
+ rational right-hand sides etc.",
+ paper = "Abra85.pdf"
+}
+
+@Article{Karr85,
+ author = "Karr, Michael",
+ title = "Theory of Summation in Finite Terms",
+ year = "1985",
+ journal = "Journal of Symbolic Computation",
+ volume = "1",
+ number = "3",
+ month = "September",
+ pages = "303-315",
+ abstract = "
+ This paper discusses some of the mathematical aspects of an algorithm
+ for finding formulas for finite sums. The results presented here
+ concern a property of difference fields which show that the algorithm
+ does not divide by zero, and an analogue to Liouville's theorem on
+ elementary integrals.",
+ paper = "Karr85.pdf"
+}
+
+@book{Koep98,
+ author = "Koepf, Wolfram",
+ title = "Hypergeometric Summation",
+ publisher = "Springer",
+ year = "1998",
+ isbn = "978-1-4471-6464-7",
+ abstract = "
+ Modern algorithmic techniques for summation, most of which were
+ introduced in the 1990s, are developed here and carefully implemented
+ in the computer algebra system Maple.
+
+ The algorithms of Fasenmyer, Gosper, Zeilberger, Petkovsek and van
+ Hoeij for hypergeometric summation and recurrence equations, efficient
+ multivariate summation as well as q-analogues of the above algorithms
+ are covered. Similar algorithms concerning differential equations are
+ considered. An equivalent theory of hyperexponential integration due
+ to Almkvist and Zeilberger completes the book.
+
+ The combination of these results gives orthogonal polynomials and
+ (hypergeometric and q-hypergeometric) special functions a solid
+ algorithmic foundation. Hence, many examples from this very active
+ field are given.
+
+ The materials covered are sutiable for an introductory course on
+ algorithmic summation and will appeal to students and researchers
+ alike.",
+ paper = "Koep98.pdf"
+}
+
+@article{Liso93,
+ author = "Lisonek, Petr and Paule, Peter and Strehl, Volker",
+ title = "Improvement of the Degree Setting in Gosper's Algorithm",
+ journal = "J. Symbolic Computation",
+ volume = "16",
+ year = "1993",
+ pages = "243-258",
+ link =
+ "\url{http://www.sciencedirect.com/science/article/pii/S0747717183710436}",
+ abstract =
+ "A detailed study of the degree setting for Gosper's algorithm for
+ indefinite hypergeometric summation is presented. In particular, we
+ discriminate between rational and proper hypergeometric input. As a
+ result, the critical degree bound can be improved in the former case.",
+ paper = "Liso93.pdf"
+}
+
+@article{Manx93,
+ author = "Man, Yiu-Kwong",
+ title = "On Computing Closed Forms for Indefinite Summations",
+ journal = "J. Symbolic Computation",
+ volume = "16",
+ pages = "335-376",
+ year = "1993",
+ link =
+ "\url{http://www.sciencedirect.com/science/article/pii/S0747717183710539}",
+ abstract =
+ "A decision procedure for finding closed forms for indefinite
+ summation of polynomials, rational functions, quasipolynomials and
+ quasirational functions is presented. It is also extended to deal with
+ some non-hypergeometric sums with rational inputs, which are not
+ summable by means of Gosper's algorithm. Discussion of its
+ implementation, analysis of degree bounds and some illustrative
+ examples are included.",
+ paper = "Manx93.pdf"
+}
+
+@article{Paul95,
+ author = "Paule, Peter",
+ title = "Greatest Factorial Factorization and Symbolic Summation",
+ journal = "Journal of Symbolic Computation",
+ year = "1995",
+ volume = "20",
+ pages = "235-268",
+ link =
+ "\url{http://www.sciencedirect.com/science/article/pii/S0747717185710498}",
+ abstract =
+ "The greatest factorial factorization (GFF) of a polynomial provides
+ an analogue to square-free factorization but with respect to integer
+ shifts instead to multiplicities. We illustrate the fundamental role
+ of that concept in the context of symbolic summation. Besides a
+ detailed discussion of the basic GFF notions we present a new approach
+ to the indefinite rational summation problem as well as to Gosper's
+ algorithm for summing hypergeometric sequences.",
+ paper = "Paul95.pdf"
+}
+
+@article{Petk92,
+ author = "Petkovsek, Marko",
+ title = "Hypergeometric solutions of linear recurrences with
+ polynomial coefficients",
+ journal = "J. Symbolic Computation",
+ volume = "14",
+ pages = "243-264",
+ year = "1992",
+ link =
+ "\url{http://www.sciencedirect.com/science/article/pii/0747717192900386}",
+ abstract =
+ "We describe algorithm Hyper which can be used to find all
+ hypergeometric solutions of linear recurrences with polynomial
+ coefficients.",
+ paper = "Petk92.pdf"
+}
+
+@InProceedings{Schn00,
+ author = "Schneider, Carsten",
+ title = "An implementation of Karr's summation algorithm in Mathematica",
+ year = "2000",
+ booktitle = "S\'eminaire Lotharingien de Combinatoire",
+ volume = "S43b",
+ pages = "1-10",
+ abstract = "
+ Implementations of the celebrated Gosper algorithm (1978) for
+ indefinite summation are available on almost any computer algebra
+ platform. We report here about an implementation of an algorithm by
+ Karr, the most general indefinite summation algorithm known. Karr's
+ algorithm is, in a sense, the summation counterpart of Risch's
+ algorithm for indefinite integration. This is the first implementation
+ of this algorithm in a major computer algebra system. Our version
+ contains new extensions to handle also definite summation problems. In
+ addition we provide a feature to find automatically appropriate
+ difference field extensions in which a closed form for the summation
+ problem exists. These new aspects are illustrated by a variety of
+ examples.",
+ paper = "Schn00.pdf"
+}
+
+@phdthesis{Schn01,
+ author = "Schneider, Carsten",
+ title = "Symbolic Summation in Difference Fields",
+ school = "RISC Research Institute for Symbolic Computation",
+ year = "2001",
+ link = "\url{http://www.risc.jku.at/publications/download/risc_3017/SymbSumTHESIS.pdf}",
+ abstract =
+ "There are implementations of the celebrated Gosper algorithm (1978) on
+ almost any computer algebra platform. Within my PhD thesis work I
+ implemented Karr's Summation Algorithm (1981) based on difference
+ field theory in the Mathematica system. Karr's algorithm is, in a
+ sense, the summation counterpart of Risch's algorithm for indefinite
+ integration. Besides Karr's algorithm which allows us to find closed
+ forms for a big clas of multisums, we developed new extensions to
+ handle also definite summation problems. More precisely we are able to
+ apply creative telescoping in a very general difference field setting
+ and are capable of solving linear recurrences in its context.
+
+ Besides this we find significant new insights in symbolic summation by
+ rephrasing the summation problems in the general difference field
+ setting. In particular, we designed algorithms for finding appropriate
+ difference field extensions to solve problems in symbolic summation.
+ For instance we deal with the problem to find all nested sum
+ extensions which provide us with additional solutions for a given
+ linear recurrence of any order. Furthermore we find appropriate sum
+ extensions, if they exist, to simplify nested sums to simpler nested
+ sum expressions. Moreover we are able to interpret creative
+ telescoping as a special case of sum extensions in an indefinite
+ summation problem. In particular we are able to determine sum
+ extensions, in case of existence, to reduce the order of a recurrence
+ for a definite summation problem.",
+ paper = "Schn01.pdf"
+}
+
+@phdthesis{Scho95,
+ author = "Schorn, Markus",
+ title = "Contributions to Symbolic Summation",
+ school = "Johannes Kepler University, RISC",
+ year = "1995",
+ link = "\url{http://www.risc.jku.at/publications/download/risc_2246/diplom.pdf}",
+ paper = "Scho95.pdf"
+}
+
+@inproceedings{Gerh03,
+ author = "Gerhard, J. and Giesbrecht, M. and Storjohann, A. and Zima, E.V.",
+ title = "Shiftless decomposition and polynomial-time rational summation",
+ booktitle = "Proceedings of ISSAC'03",
+ year = "2003",
+ pages = "119--126",
+ abstract = "
+ New algorithms are presented for computing the dispersion set of two
+ polynomials over {\bf Q} and for {\sl shiftless} factorization. Together
+ with a summability criterion by Abramov, these are applied to get a
+ polynomial-time algorithm for indefinite rational summation, using a
+ sparse representation of the output.",
+ paper = "Gerh03.pdf"
+}
+
+@article{Schn05,
+ author = "Schneider, Carsten",
+ title = "A new Sigma approach to multi-summation",
+ year = "2005",
+ journal = "Advances in Applied Mathematics",
+ volume = "34",
+ number = "4",
+ pages = "740--767",
+ abstract = "
+ We present a general algorithmic framework that allows not only to
+ deal with summation problems over summands being rational expressions
+ in indefinite nested syms and products (Karr, 1981), but also over
+ $\delta$-finite and holonomic summand expressions that are given by a
+ linear recurrence. This approach implies new computer algebra tools
+ implemented in Sigma to solve multi-summation problems efficiently.
+ For instacne, the extended Sigma package has been applied successively
+ to provide a computer-assisted proof of Stembridge's TSPP Theorem.",
+ paper = "Schn05.pdf"
+}
+
+@article{Kaue08a,
+ author = "Kauers, Manuel and Schneider, Carsten",
+ title = "Indefinite summation with unspecified summands",
+ year = "2006",
+ journal = "Discrete Mathematics",
+ volume = "306",
+ number = "17",
+ pages = "2073--2083",
+ abstract = "
+ We provide a new algorithm for indefinite nested summation which is
+ applicable to summands involving unspecified sequences $x(n)$. More
+ than that, we show how to extend Karr's algorithm to a general
+ summation framework by which additional types of summand expressions
+ can be handled. Our treatment of unspecified sequences can be seen as
+ a first illustrative application of this approach.",
+ paper = "Kaue08a.pdf"
+}
+
+@article{Kaue07,
+ author = "Kauers, Manuel",
+ title = "Summation algorithms for Stirling number identities",
+ year = "2007",
+ journal = "Journal of Symbolic Computation",
+ volume = "42",
+ number = "10",
+ month = "October",
+ pages = "948--970",
+ abstract =
+ "We consider a class of sequences defined by triangular recurrence
+ equations. This class contains Stirling numbers and Eulerian numbers
+ of both kinds, and hypergeometric multiples of those. We give a
+ sufficient criterion for sums over such sequences to obey a recurrence
+ equation, and present algorithms for computing such recurrence
+ equations efficiently. Our algorithms can be used for verifying many
+ known summation identities on Stirling numbers instantly, and also for
+ discovering new identities.",
+ paper = "Kaue07.pdf"
+}
+
+@InProceedings{Schn07,
+ author = "Schneider, Carsten",
+ title = "Symbolic Summation Assists Combinatorics",
+ year = "2007",
+ booktitle = "S\'eminaire Lotharingien de Combinatoire",
+ volume = "56",
+ article = "B56b",
+ abstract = "
+ We present symbolic summation tools in the context of difference
+ fields that help scientists in practical problem solving. Throughout
+ this article we present multi-sum examples which are related to
+ combinatorial problems.",
+ paper = "Schn07.pdf"
+}
+
+@article{Schn08,
+ author = "Schneider, Carsten",
+ title = "A refined difference field theory for symbolic summation",
+ year = "2008",
+ journal = "Journal of Symbolic Computation",
+ volume = "43",
+ number = "9",
+ pages = "611--644",
+ abstract = "
+ In this article we present a refined summation theory based on Karr's
+ difference field approach. The resulting algorithms find sum
+ representations with optimal nested depth. For instance, the
+ algorithms have been applied successively to evaluate Feynman
+ integrals from Perturbative Quantum Field Theory",
+ paper = "Schn08.pdf"
+}
+
+@article{Schn09,
+ author = "Schneider, Carsten",
+ title = "Structural theorems for symbolic summation",
+ journal = "Proc. AAECC-2010",
+ year = "2010",
+ volume = "21",
+ pages = "1--32",
+ abstract = "
+ Starting with Karr's structural theorem for summation - the discrete
+ version of Liouville's structural theorem for integration - we work
+ out crucial properties of the underlying difference fields. This leads
+ to new and constructive structural theorems for symbolic summation.
+ E.g., these results can be applied for harmonic sums which arise
+ frequently in particle physics.",
+ paper = "Schn09.pdf"
+}
+
+@article{Eroc10,
+ author = {Er\"ocal, Bur\c{c}in},
+ title = "Summation in Finite Terms Using Sage",
+ journal = "ACM Commun. Comput. Algebra",
+ volume = "44",
+ number = "3/4",
+ month = "January",
+ year = "2011",
+ issn = "1932-2240",
+ pages = "190--193",
+ link = "\url{http://doi.acm.org/10.1145/1940475.1940517}",
+ publisher = "ACM",
+ abstract = "
+ The summation analogue of the Risch integration algorithm developed by
+ Karr uses towers of difference fields to model nested indefinite sums
+ and products, as the Risch algorithm uses towers of differential
+ fields to model the so called {\sl elementary functions}. The
+ algorithmic machinery developed by Karr, and later generalized and
+ extended, allows one to find solutions of first order difference
+ equations over such towers of difference fields, in turn simplifying
+ expressions involving sums and products.
+
+ We present an implementation of this machinery in the open source
+ computer algebra system Sage. Due to the nature of open source
+ software, this allows direct experimentation with the algorithms and
+ structures involved while taking advantage of the state of the art
+ primitives provided by Sage. Even though these methods are used behind
+ the scenes in the summation package Sigma and they were previously
+ implemented, this is the first open source implementation.",
+ paper = "Eroc10.pdf"
+}
+
+@phdthesis{Eroc11,
+ author = {Er\"ocal, Bur\c{c}in},
+ title = "Algebraic Extensions for Symbolic Summation",
+ school = "RISC Research Institute for Symbolic Computation",
+ year = "2011",
+ link = "\url{http://www.risc.jku.at/publications/download/risc_4320/erocal_thesis.pdf}",
+ abstract =
+ "The main result of this thesis is an effective method to extend Karr's
+ symbolic summation framework to algebraic extensions. These arise, for
+ example, when working with expressions involving $(-1)^n$. An
+ implementation of this method, including a modernised version of
+ Karr's algorithm is presented.
+
+ Karr's algorithm is the summation analogue of the Risch algorithm for
+ indefinite integration. In the summation case, towers of specialized
+ difference fields called $\prod\sum$-fields are used to model nested
+ sums and products. This is similar to the way elementary functions
+ involving nested logarithms and exponentials are represented in
+ differential fields in the integration case.
+
+ In contrast to the integration framework, only transcendental
+ extensions are allowed in Karr's construction. Algebraic extensions of
+ $\prod\sum$-fields can even be rings with zero divisors. Karr's
+ methods rely heavily on the ability to solve first-order linear
+ difference equations and they are no longer applicable over these
+ rings.
+
+ Based on Bronstein's formulation of a method used by Singer for the
+ solution of differential equations over algebraic extensions, we
+ transform a first-order linear equation over an algebraic extension to
+ a system of first-order equations over a purely transcendental
+ extension field. However, this domain is not necessarily a
+ $\prod\sum$-field. Using a structure theorem by Singer and van der
+ Put, we reduce this system to a single first-order equation over a
+ $\prod\sum$-field, which can be solved by Karr's algorithm. We also
+ describe how to construct towers of difference ring extensions on an
+ algebraic extension, where the same reduction methods can be used.
+
+ A common bottleneck for symbolic summation algorithms is the
+ computation of nullspaces of matrices over rational function
+ fields. We present a fast algorithm for matrices over $\mathbb{Q}(x)$
+ which uses fast arithmetic at the hardware level with calls to BLAS
+ subroutines after modular reduction. This part is joint work with Arne
+ Storjohann.",
+ paper = "Eroc11.pdf"
+}
+
+@article{Poly11,
+ author = "Polyadov, S.P.",
+ title = "Indefinite summation of rational functions with factorization
+ of denominators",
+ year = "2011",
+ month = "November",
+ journal = "Programming and Computer Software",
+ volume = "37",
+ number = "6",
+ pages = "322--325",
+ abstract = "
+ A computer algebra algorithm for indefinite summation of rational
+ functions based on complete factorization of denominators is
+ proposed. For a given $f$, the algorithm finds two rational functions
+ $g$, $r$ such that $f=g(x+1)-g(x)+r$ and the degree of the denominator
+ of $r$ is minimal. A modification of the algorithm is also proposed
+ that additionally minimizes the degree of the denominator of
+ $g$. Computational complexity of the algorithms without regard to
+ denominator factorization is shown to be $O(m^2)$, where $m$ is the
+ degree of the denominator of $f$.",
+ paper = "Poly11.pdf"
+}
+
+@article{Schn13,
+ author = "Schneider, Carsten",
+ title =
+ "Fast Algorithms for Refined Parameterized Telescoping in Difference Fields",
+ journal = "CoRR",
+ year = "2013",
+ volume = "abs/1307.7887",
+ keywords = "survey",
+ abstract = "
+ Parameterized telescoping (including telescoping and creative
+ telescoping) and refined versions of it play a central role in the
+ research area of symbolic summation. In 1981 Karr introduced
+ $\prod\sum$-fields, a general class of difference fields, that enables
+ one to consider this problem for indefinite nested sums and products
+ covering as special cases, e.g., the (q-)hypergeometric case and their
+ mixed versions. This survey article presents the available algorithms
+ in the framework of $\prod\sum$-extensions and elaborates new results
+ concerning efficiency.",
+ paper = "Schn13.pdf"
+}
+
+@article{Zima13,
+ author = "Zima, Eugene V.",
+ title = "Accelerating Indefinite Summation: Simple Classes of Summands",
+ journal = "Mathematics in Computer Science",
+ year = "2013",
+ month = "December",
+ volume = "7",
+ number = "4",
+ pages = "455--472",
+ abstract = "
+ We present the history of indefinite summation starting with classics
+ (Newton, Montmort, Taylor, Stirling, Euler, Boole, Jordan) followed by
+ modern classics (Abramov, Gosper, Karr) to the current implementation
+ in computer algebra system Maple. Along with historical presentation
+ we describe several ``acceleration techniques'' of algorithms for
+ indefinite summation which offer not only theoretical but also
+ practical improvements in running time. Implementations of these
+ algorithms in Maple are compared to standard Maple summation tools",
+ paper = "Zima13.pdf"
+}
+
+@misc{Schn14,
+ author = "Schneider, Carsten",
+ title = "A Difference Ring Theory for Symbolic Summation",
+ year = "2014",
+ abstract = "
+ A summation framework is developed that enhances Karr's difference
+ field approach. It covers not only indefinite nested sums and products
+ in terms of transcendental extensions, but it can treat, e.g., nested
+ products defined over roots of unity. The theory of the so-called
+ $R\prod\sum*$-extensions is supplemented by algorithms that support the
+ construction of such difference rings automatically and that assist in
+ the task to tackle symbolic summation problems. Algorithms are
+ presented that solve parameterized telescoping equations, and more
+ generally parameterized first-order difference equations, in the given
+ difference ring. As a consequence, one obtains algorithms for the
+ summation paradigms of telescoping and Zeilberger's creative
+ telescoping. With this difference ring theory one obtains a rigorous
+ summation machinery that has been applied to numerous challenging
+ problems coming, e.g., from combinatorics and particle physics.",
+ paper = "Schn14.pdf"
+}
+
+@phdthesis{Vazq14,
+ author = "Vazquez-Trejo, Javier",
+ title = "Symbolic Summation in Difference Fields",
+ year = "2014",
+ school = "Carnegie-Mellon University",
+ abstract = "
+ We seek to understand a general method for finding a closed form for a
+ given sum that acts as its antidifference in the same way that an
+ integral has an antiderivative. Once an antidifference is found, then
+ given the limits of the sum, it suffices to evaluate the
+ antidifference at the given limits. Several algorithms (by Karr and
+ Schneider) exist to find antidifferences, but the apers describing
+ these algorithms leave out several of the key proofs needed to
+ implement the algorithms. We attempt to fill in these gaps and find
+ that many of the steps to solve difference equations rely on being
+ able to solve two problems: the equivalence problem and the homogenous
+ group membership problem. Solving these two problems is essential to
+ finding the polynomial degree bounds and denominator bounds for
+ solutions of difference equations. We study Karr and Schneider's
+ treatment of these problems and elaborate on the unproven parts of
+ their work. Section 1 provides background material; section 2 provides
+ motivation and previous work; Section 3 provides an outline of Karr's
+ Algorithm; section 4 examines the Equivalance Problem, and section 5
+ examines the Homogeneous Group Membership Problem. Section 6 presents
+ some proofs for the denominator and polynomial bounds used in solving
+ difference equations, and Section 7 gives some directions for future
+ work.",
+ paper = "Vazq14.pdf"
+}
+
+@book{Petk97,
+ author = "Petkov\v{s}ek, Marko and Wilf, Herbert S. and
+ Zeilberger, Doran",
+ title = "A=B",
+ publisher = "A.K. Peters, Ltd",
+ year = "1997",
+ paper = "Petk97.pdf"
+}
+
+@misc{Temm14,
+ author = "Temme, N.M.",
+ title = "Bernoulli Polynomials Old and New",
+ abstract =
+ "We consider two problems on generalized Bernoulli polynomials
+ $B_n^u(z)$. One is connected with defining functions instead of
+ polynomials by making the degree $n$ of the polynomial a complex
+ variable. In the second problem we are concerned with the asymptotic
+ behaviour of $B_n^u(z)$ when the degree $n$ tends to infinity.",
+ paper = "Temm14.pdf"
+}
+
+@book{Cart06,
+ author = {Cartan, Henri},
+ title = {Differential Forms},
+ year = "2006",
+ location = {Mineola, N.Y},
+ edition = {Auflage: Tra},
+ isbn = {9780486450100},
+ pagetotal = {166},
+ publisher = {Dover Pubn Inc},
+ date = {2006-05-26}
+}
+
+ @book{Flan03,
+ author = "Flanders, Harley",
+ title = "Differential Forms with Applications to the Physical Sciences",
+ year = "2003",
+ location = "Mineola, N.Y",
+ isbn = "9780486661698",
+ pagetotal = "240",
+ publisher = "Dover Pubn Inc",
+ date = "2003-03-28",
+ algebra = "\newline\refto{domain DERHAM DeRhamComplex}"
+}
+
+@book{Whit12,
+ author = {Whitney, Hassler},
+ title =
+ {Geometric Integration Theory: Princeton Mathematical Series, No. 21},
+ year = "2012",
+ isbn = {9781258346386},
+ shorttitle = {Geometric Integration Theory},
+ pagetotal = {402},
+ publisher = {Literary Licensing, {LLC}},
+ date = {2012-05-01}
+}
+
+@book{Fede13,
+ author = {Federer, Herbert},
+ title = {Geometric Measure Theory},
+ year = "2013",
+ location = {Berlin ; New York},
+ edition = {Reprint of the 1st ed. Berlin, Heidelberg, New York 1969},
+ isbn = {9783540606567},
+ pagetotal = {700},
+ publisher = {Springer},
+ date = {2013-10-04},
+ abstract =
+ "This book is a major treatise in mathematics and is essential in the
+ working library of the modern analyst. (Bulletin of the London
+ Mathematical Society)"
+}
+
+@book{Abra93,
+ author = "Abraham, Ralph and Marsden, Jerrold E. and Ratiu, Tudor",
+ title = "Manifolds, Tensor Analysis, and Applications",
+ year = "1993",
+ location = "New York",
+ edition = "2nd Corrected ed. 1988. Corr. 2nd printing 1993",
+ isbn = "9780387967905",
+ pagetotal = "656",
+ publisher = "Springer",
+ date = "1993-08-26",
+ abstract = "
+ The purpose of this book is to provide core material in nonlinear
+ analysis for mathematicians, physicists, engineers, and mathematical
+ biologists. The main goal is to provide a working knowledge of
+ manifolds, dynamical systems, tensors, and differential forms. Some
+ applications to Hamiltonian mechanics, fluid mechanics,
+ electromagnetism, plasma dynamics and control theory are given using
+ both invariant and index notation. The prerequisites required are
+ solid undergraduate courses in linear algebra and advanced calculus."
+}
+
+@book{Lamb97a,
+ author = {Lambe, L. A. and Radford, D. E.},
+ title = {Introduction to the Quantum Yang-Baxter Equation and
+ Quantum Groups: An Algebraic Approach},
+ year = "1997",
+ location = {Dordrecht ; Boston},
+ edition = {Auflage: 1997},
+ isbn = {9780792347217},
+ shorttitle = {Introduction to the Quantum Yang-Baxter Equation and
+ Quantum Groups},
+ abstract = {
+ Chapter 1 The algebraic prerequisites for the book are covered here
+ and in the appendix. This chapter should be used as reference material
+ and should be consulted as needed. A systematic treatment of algebras,
+ coalgebras, bialgebras, Hopf algebras, and represen tations of these
+ objects to the extent needed for the book is given. The material here
+ not specifically cited can be found for the most part in [Sweedler,
+ 1969] in one form or another, with a few exceptions. A great deal of
+ emphasis is placed on the coalgebra which is the dual of n x n
+ matrices over a field. This is the most basic example of a coalgebra
+ for our purposes and is at the heart of most algebraic constructions
+ described in this book. We have found pointed bialgebras useful in
+ connection with solving the quantum Yang-Baxter equation. For this
+ reason we develop their theory in some detail. The class of examples
+ described in Chapter 6 in connection with the quantum double consists
+ of pointed Hopf algebras. We note the quantized enveloping algebras
+ described Hopf algebras. Thus for many reasons pointed bialgebras are
+ elsewhere are pointed of fundamental interest in the study of the
+ quantum Yang-Baxter equation and objects quantum groups.},
+ pagetotal = {300},
+ publisher = {Springer},
+ date = {1997-10-31}
+}
+
+@misc{Paga16,
+ author = "Pagani, Kurt",
+ title = "SurfaceComplex",
+ year = "2016",
+ link = "\url{https://groups.google.com/forum/\#!topic/fricas-devel/FRDGVFsoAKw}",
+ abstract =
+ "This manual describes the FriCAS domains {\bf CellMap} and
+ {\bf SurfaceComplex}. These domains provide methods to compute various
+ differential geometric properties of so-called $p$-surfaces in
+ $\mathbb{R}^n$, a notion which is used by Walter Rudin in his famous
+ {\sl Principles of Mathematical Analysis}.",
+ paper = "Paga16.pdf"
+}
+
+@misc{Paga16a,
+ author = "Pagani, Kurt",
+ title = "DifferentialGeometry1",
+ year = "2016",
+ link = "\url{https://groups.google.com/forum/\#!topic/fricas-devel/FRDGVFsoAKw}",
+ abstract =
+ "This manual describes the FriCAS package {\bf DifferentialGeometry1}.
+ This package combines differential forms and cell mappings to provide
+ methods which compute {\bf pull backs}, {\bf integrals} as well as some
+ other quantities of differential forms living on a surface complex.",
+ paper = "Paga16a.pdf"
+}
+
+@misc{Paga16b,
+ author = "Pagani, Kurt",
+ title = "DifferentialForms",
+ year = "2016",
+ link = "\url{https://groups.google.com/forum/\#!topic/fricas-devel/FRDGVFsoAKw}",
+ abstract = "Reference manual for the package {\bf {\tt DifferentialForms}}",
+ paper = "Paga16b.pdf"
+}
+
+@misc{Whee12,
+ author = "Wheeler, James T.",
+ title = "Differential Forms",
+ year = "2012",
+ month = "September",
+ link = "\url{http://www.physics.usu.edu/Wheeler/ClassicalMechanics/CMDifferentialForms.pdf}",
+ paper = "Whee12.pdf"
+}
+
+@inproceedings{Anai00,
+ author = "Anai, Hirokazu and Weispfenning, Volker",
+ title = "Deciding linear-trigonometric problems",
+ booktitle = "Proc ISSAC'00",
+ publisher = "ACM",
+ isbn = "1-58113-218-2",
+ year = "2000",
+ pages = "14-22",
+ abstract =
+ "In this paper, we present a decision procedure for certain
+ linear-trigonometric problems for the reals and integers formalized in
+ a suitable first-order language. The inputs are restricted to
+ formulas, where all but one of the quantified variables occur linearly
+ and at most one occurs both linearly and in a specific trigonometric
+ function. Moreover we may allow in addition the integer-part operation
+ in formulas. Besides ordinary quantifiers, we allow also counting
+ quantifiers. Furthermore we also determine the qualitative structure
+ of the connected components of the satisfaction set of the mixed
+ linear-trigonometric variable. We also consider the decision of these
+ problems in subfields of the real algebraic numbers.",
+ paper = "Anai00.pdf"
+}
+
+@phdthesis{Arno81,
+ author = {Arnon, Dennis Soul\'e},
+ title = "Algorithms for the Geometry of Semi-algebraic Sets",
+ school = "University of Wisconsin-Madison",
+ year = "1981",
+ abstract =
+ "Let A be a set of polynomials in r variables with integer
+ coefficients. An $A$-invariant cylindrical algebraic decomposition
+ (cad) of $r$-dimensional Euclidean space (G. Collins, Lect. Notes
+ Comp. Sci., 33, Springer-Verlag, 1975, pp 134-183) is a certain
+ cellular decomposition of $r$-space, such that each cell is a
+ semi-algebraic set, the polynomials of $A$ are sign-invariant on
+ each cell, and the cells are arranged into cylinders. The cad
+ algorithm given by Collins provides, among other applications,
+ the fastest known decision procedure for real closed fields, a
+ cellular decomposition algorithm for semi-algebraic sets, and a
+ method of solving nonlinear (polynomial) optimization problems
+ exactly. The time-consuming calculations with real algebraic
+ numbers required by the algorithm have been an obstacle to its
+ implementation and use. The major contribution of this thesis
+ is a new version of the cad algorithm for $r \le 3$, in which
+ one works with maximal connected $A$-invariant collections of
+ cells, in such a way as to often avoid the most time-consuming
+ algebraic number calculations. Essential to this new cad
+ algorithm is an algorithm we present for determination of
+ adjacenies among the cells of a cad. Computer programs for
+ the cad and adjacency algorithms have been written, providing
+ the first complete implementation of a cad algorithm. Empirical
+ data obtained from application of these programs are presented
+ and analyzed."
+}
+
+@techreport{Arno82,
+ author = "Arnon, Dennis S. and Collins, George E. and McCallum, Scott",
+ title = "Cylindrical Algebraic Decomposition I: The Basic Algorithm",
+ year = "1982",
+ institution = "Purdue University",
+ type = "Technical Report",
+ number = "82-427A",
+ link = "\url{https://pdfs.semanticscholar.org/7643/4b54250f05ebf0dcc27c33b7dc250419fb94.pdf}",
+ abstract =
+ "Given a set of r-variate integral polynomials, a {\sl cylindrical
+ algebraic decomposition (cad)} of euclidean r-space $E^r$ into connected
+ subsets compatible with the zeros of the polynomials. Collins gave a
+ cad construction algorithm in 1975, as part of a quantifier elimination
+ procedure for real closed fields. The algorithm has subsequently found
+ diverse applications (optimization, curve display); new applications
+ have been proposed (term rewriting systems, motion planning). In the
+ present two-part paper, we give an algorithm for determining the pairs
+ of adjacent cells in a cad of $E^2$. This capability is often needed
+ in applications. In Part I we describe the essential features of the
+ r-space cad algorithm, to provide a framework for the adjacency algorithm
+ in Part II.",
+ paper = "Arno82.pdf"
+}
+
+@article{Arno84,
+ author = "Arnon, Dennis S. and Collins, George E. and McCallum, Scott",
+ title = "Cylindrical Algebraic Decomposition II: An Adjacency Algorithm
+ for the Plane",
+ year = "1984",
+ journal = "SIAM J. Comput.",
+ volume = "13",
+ number = "4",
+ pages = "878-889",
+ abstract =
+ "Given a set of r-variate integral polynomials, a {\sl cylindrical
+ algebraic decomposition (cad)} of euclidean r-space $E^r$ partitions
+ $E^r$ into connected subsets compaitible with the zeros of the
+ polynomials. Each subset is a {\sl cell}. Informally, two cells of
+ a cad are {\sl adjacent} if they touch each other; formally, they are
+ adjacent if their union is connected. In applications of cad's one
+ often wishes to know the adjacent pairs of cells. Previous algorithms
+ for cad construction (such as that given in Part I of this paper) have
+ not actually determined them. We give here in Part II an algorithm
+ which determines the pairs of adjacent cells as it constructs a cad
+ of $E^2$.",
+ paper = "Arno84.pdf"
+}
+
+@article{Arno88,
+ author = "Arnon, D.S. and Mignotte, M.",
+ title = "On Mechanical Quantifier Elimination for Elementary Algebra
+ and Geometry",
+ journal = "J. Symbolic Computation",
+ volume = "5",
+ pages = "237-259",
+ year = "1988",
+ abstract = "
+ We give solutions to two problems of elementary algebra and geometry:
+ (1) find conditions on real numbers $p$, $q$, and $r$ so that the
+ polynomial function $f(x)=x^4+px^2+qx+r$ is nonnegative for all real
+ $x$ and (2) find conditions on real numbers $a$, $b$, and $c$ so that
+ the ellipse $\frac{(x-e)^2}{q^2}+\frac{y^2}{b^2}-1=0$ lies inside the
+ unit circle $y^2+x^2-1=0$. Our solutions are obtained by following the
+ basic outline of the method of quantifier elimination by cylindrical
+ algebraic decomposition (Collins, 1975), but we have developed, and
+ have been considerably aided by, modified versions of certain of its
+ steps. We have found three equally simple but not obviously equivalent
+ solutions for the first problem, illustrating the difficulty of
+ obtaining unique ``simplest'' solutions to quantifier elimination
+ problems of elementary algebra and geometry.",
+ paper = "Arno88.pdf"
+
+}
+
+@misc{Arno88a,
+ author = "Arnon, Dennis and Buchberger, Bruno",
+ title = "Algorithms in Real Algebraic Geometry",
+ publisher = "Academic Press",
+ year = "1988",
+ journal = "Journal of Symbolic Computation"
+}
+
+@article{Arno88b,
+ author = "Arnon, Dennis S. and Collins, George E. and McCallum, Scott",
+ title = "An Adjacency algorithm for cylindrical algebraic decompositions
+ of three-dimensional space",
+ journal = "J. Symbolic Computation",
+ volume = "5",
+ number = "1-2",
+ pages = "163-187",
+ year = "1988",
+ abstract =
+ "Let $A \subset \mathbb{Z}[x_1,\ldots,x_r]$
+ be a finite set. An {\sl A-invariant cylindrical
+ algebraic decomposition (cad)} is a certain partition of $r$-dimenslonal
+ euclidean space $E^r$ into semi-algebraic cells such that the value of
+ each $A_i \in A$ has constant sign (positive, negative, or zero)
+ throughout each cell. Two cells are adjacent if their union is
+ connected. We give an algorithm that determines the adjacent pairs
+ of cells as it constructs a cad of $E^3$. The general teehnlque
+ employed for $^3$ adjacency determination is “projection” into $E^2$,
+ followed by application of an existing $E^2$ adjacency elgorlthm
+ (Arnon, Collins, McCallum, 1984). Our algorithm has the following
+ properties: (1) it requires no coordinate changes, end (2) in any
+ cad of $E^1$, $E^2$, or $E^3$ that it builds, the boundary of each cell
+ is a (disjoint) union of lower-dlmenaionel cells.",
+ paper = "Arno88b.pdf"
+}
+
+@article{Arno88c,
+ author = "Arnon, Dennis S.",
+ title = "A bibliography of quantifier elimination for real closed fields",
+ journal = "J. of Symbolic Computation",
+ volume = "5",
+ number = "1-2",
+ pages = "267-274",
+ year = "1988",
+ link =
+ "\url{http://www.sciencedirect.com/science/article/pii/S0747717188800166}",
+ abstract =
+ "A basic collection of literature relating to algorithmic quantifier
+ elimination for real closed fields is assembled",
+ paper = "Arno88c.pdf"
+}
+
+@misc{Bake90,
+ author = "Baker, Henry G.",
+ title = "The Nimble Type Inferencer for Common Lisp-84",
+ link = "\url{http://home.pipeline.com/~hbaker1/TInference.html}",
+ year = "1990",
+ abstract =
+ "We describe a framework and an algorithm for doing type inference
+ analysis on programs written in full Common Lisp-84 (Common Lisp
+ without the CLOS object-oriented extensions). The objective of type
+ inference is to determine tight lattice upper bounds on the range of
+ runtime data types for Common Lisp program variables and
+ temporaries. Depending upon the lattice used, type inference can also
+ provide range analysis information for numeric variables. This lattice
+ upper bound information can be used by an optimizing compiler to
+ choose more restrictive, and hence more efficient, representations for
+ these program variables. Our analysis also produces tighter control
+ flow information, which can be used to eliminate redundant tests which
+ result in dead code. The overall goal of type inference is to
+ mechanically extract from Common Lisp programs the same degree of
+ representation information that is usually provided by the programmer
+ in traditional strongly-typed languages. In this way, we can provide
+ some classes of Common Lisp programs execution time efficiency
+ expected only for more strongly-typed compiled languages.
+
+ The Nimble type inference system follows the traditional
+ lattice/algebraic data flow techniques [Kaplan80], rather than the
+ logical/theorem-proving unification techniques of ML [Milner78]. It
+ can handle polymorphic variables and functions in a natural way, and
+ provides for ``case-based'' analysis that is quite similar to that used
+ intuitively by programmers. Additionally, this inference system can
+ deduce the termination of some simple loops, thus providing
+ surprisingly tight upper lattice bounds for many loop variables.
+
+ By using a higher resolution lattice, more precise typing of primitive
+ functions, polymorphic types and case analysis, the Nimble type
+ inference algorithm can often produce sharper bounds than
+ unification-based type inference techniques. At the present time,
+ however, our treatment of higher-order data structures and functions
+ is not as elegant as that of the unification techniques."
+}
+
+@book{Basu06,
+ author = "Basu, Saugata and Pollack, Richard and
+ Roy, Marie-Francoise",
+ title = "Algorithms in Real Algebraic Geometry",
+ publisher = "Springer",
+ year = "2006",
+ isbn = "978-3-540-33098-1"
+}
+
+@article{Beau07,
+ author = "Beaumont, James C. and Bradford, Russell J. and
+ Davenport, James H. and Phisanbut, Nalina",
+ title = "Testing elementary function identities using CAD",
+ journal = "Applicable Algebra in Engineering, Communication and Computing",
+ year = "2007",
+ volume = "18",
+ number = "6",
+ issn = "0938-1279",
+ publisher = "Springer-Verlag",
+ pages = "513-543",
+ abstract = "
+ One of the problems with manipulating function identities in computer
+ algebra systems is that they often involve functions which are
+ multivalued, whilst most users tend to work with single-valued
+ functions. The problem is that many well-known identities may no
+ longer be true everywhere in the complex plane when working with their
+ single-valued counterparts. Conversely, we cannot ignore them, since
+ in particular contexts they may be valid. We investigate the
+ practicality of a method to verify such identities by means of an
+ experiment; this is based on a set of test examples which one might
+ realistically meet in practice. Essentially, the method works as
+ follows. We decompose the complex plane via means of cylindrical
+ algebraic decomposition into regions with respect to the branch cuts
+ of the functions. We then test the identity numerically at a sample
+ point in the region. The latter step is facilitated by the notion of
+ the {\sl adherence} of a branch cut, which was previously introduced
+ by the authors. In addition to presenting the results of the
+ experiment, we explain how adherence relates to the proposal of
+ {\sl signed zeros} by W. Kahan, and develop this idea further in order to
+ allow us to cover previously untreatable cases. Finally, we discuss
+ other ways to improve upon our general methodology as well as topics
+ for future research.",
+ paper = "Beau07.pdf"
+}
+
+@article{Beno86,
+ author = "Ben-Or, Michael and Kozen, Dexter and Reif, John",
+ title = "The complexity of elementary algebra and geometry",
+ journal = "J. Computer and System Sciences",
+ volume = "32",
+ number = "2",
+ year = "1986",
+ pages = "251-264",
+ abstract =
+ "The theory of real closed fields can be decided in exponential space
+ or parallel exponential time. In fixed dimesnion, the theory can be
+ decided in NC.",
+ paper = "Beno86.pdf"
+}
+
+@misc{Brad14,
+ author = "Bradford, Russell and Chen, Changbo and Davenport, James H. and
+ England, Matthew and Maza, Marc Moreno and Wilson, David",
+ title = "Truth Table Invariant Cylindrical Algebraic Decomposition by
+ Regular Chains",
+ link = "\url{https://arxiv.org/pdf/1401.6310.pdf}",
+ year = "2014",
+ abstract =
+ "A new algorithm to compute cylindrical algebraic decompositions
+ (CADs) is presented, building on two recent advances. Firstly, the
+ output is truth table invariant (a TTICAD) meaning given formulae have
+ constant truth value on each cell of the decomposition. Secondly, the
+ computation uses regular chains theory to first build a cylindrical
+ decomposition of complex space (CCD) incrementally by polynomial.
+ Significant modification of the regular chains technology wa s used to
+ achieve the more sophisticated invariance criteria. Experimental
+ results on an implementation in the {\tt RegularChains} Library for Maple
+ verify that combining these advances gives an algorithm superior to
+ its individual components and competitive with the state of the art.",
+ paper = "Brad14.pdf"
+}
+
+@misc{Brad15,
+ author = "Bradford, Russell and Davenport, James H. and England, Matthew and
+ McCallum, Scott",
+ title = "Truth Table Invariant Cylindrical Algebraic Decomposition",
+ link = "\url{https://arxiv.org/pdf/1401.0645.pdf}",
+ year = "2015",
+ abstract =
+ "When using cylindrical algebraic decomposition (CAD) to solve a
+ problem with respect to a set of polynomials, it is likely not the
+ signs of those polynomials that are of paramount importance but rather
+ the truth values of certain quantifier free formulae involving
+ them. This observation motivates our article and definition of a Truth
+ Table Invariant CAD (TTICAD). In ISSAC 2013 the current authors
+ presented an algorithm that can efficiently and directly construct a
+ TTICAD for a list of formulae in which each has an equational
+ constraint. This was achieved by generalising McCallum's theory of
+ reduced projection operators. In this paper we present an extended
+ version of our theory which can be applied to an arbitrary list of
+ formulae, achieving savings if at least one has an equational
+ constraint. We also explain how the theory of reduced projection
+ operators can allow for further improvements to the lifting phase of
+ CAD algorithms, even in the context of a single equational constraint.
+ The algorithm is implemented fully in Maple and we present both
+ promising results from experimentation and a complexity analysis
+ showing the benefits of our contributions.",
+ paper = "Brad15.pdf"
+}
+
+@phdthesis{Brow99,
+ author = "Brown, Christopher W.",
+ title = "Solution Formula Construction for Truth Invariant CADs",
+ school = "University of Delaware",
+ year = "1999",
+ website = "http://www.usna.edu/CS/qepcadweb/B/impl/Implementation.html",
+ link = "\url{http://www.usna.edu/Users/cs/wcbrown/research/thesis.ps.gz}",
+ abstract =
+ "The CAD-based quantifier elimination algorithm takes a formula from
+ the elementary theory of real closed fields as input, and constructs a
+ CAD of the space of the formula's unquantified variables. This
+ decomposition is truth invariant with respect to the input formula,
+ meaning that the formula is either identically true or identically
+ false in each cell of the decomposition. The method determines the
+ truth of the input formula for each cell of the CAD, and then uses the
+ CAD to construct a solution formula -- a quantifier free formula that
+ is equivalent to the input formula. This final phase of the algorithm,
+ the solution formula construction phase, is the focus of this thesis.
+
+ An optimal solution formula construction algorithm would be {\sl
+ complete} -- i.e. applicable to any truth-invariant CAD, would be {\sl
+ efficient}, and would produce {\sl simple} solution formulas. Prior to
+ this thesis, no method was available with even two of these three
+ properties. Several algorithms are presented, all addressing problems
+ related to solution formula construction. In combination, these
+ provide an efficient and complete method for constructing solution
+ formulas that are simple in a variety of ways.
+
+ Algorithms presented in this thesis have been implemented using the
+ SACLIB library, and integrated into QEPCAD, a SACLIB-based
+ implementation of quantifier elimination by CAD. Example computations
+ based on these implementations are discussed.",
+ paper = "Brow99.pdf"
+}
+
+@misc{Brow01,
+ author="Brown, Christopher W.",
+ title="The McCallum projection, lifting, and order-invariance",
+ year="2001",
+ link = "\url{http://www.usna.edu/Users/cs/wcbrown/research/MOTS2001.1.ps.gz}",
+ abstract =
+ "The McCallum Projection for Cylindrical Algebraic Decomposition (CAD)
+ produces a smaller projection factor set than previous projections,
+ however it does not always produce a sign-invariant CAD for the set of
+ input polynomials. Problems may arise when a ($k+1$)-level projection
+ factor vanishes identically over a k-level cell. According to
+ McCallum's paper, when this happens (and $k+1$ is not the highest
+ level in the CAD) we do not know whether the projection is valid,
+ i.e. whether or not a sign-invariant CAD for the set of input
+ polynomials will be produced when lifting is performed in the usual
+ way. When the $k$-level cell in question has dimension 0, McCallum
+ suggests a modification of the lifting method that will ensure the
+ validity of his projection, although to my knowledge this has never
+ been implemented.
+
+ In this paper we give easily computable criteria that often allow us
+ to conclude that McCallum's projection is valid even though a
+ projection factor vanishes identically over a cell. We also improve on
+ McCallum's modified lifting method.
+
+ We've incorporated the ideas contained in the paper into QEPCAD, the
+ most complete implementation of CAD. When McCallum's projection is
+ invalid because of a projection factor not being order-invariant over a
+ region on which it vanishes identically, at least a warning message
+ ought to be issued. Currently, QEPCAD may print warning messages that
+ are not needed, and may fail to print warning messages when they are
+ needed. Our implementation in QEPCAD ensures that warning messages are
+ printed when needed, and reduces the number of times warning messages
+ are printed when not needed. Neither McCallum's modified lifting
+ method nor our improvement of it have been implemented in QEPCAD. The
+ design of the system would make implementing such a feature quite
+ difficult.",
+ paper = "Brow01.pdf"
+}
+
+@article{Brow01a,
+ author = "Brown, Christopher W.",
+ title = "Simple CAD Construction and its Applications",
+ journal = "J. Symbolic Computation",
+ year = "2001",
+ volume = "31",
+ pages = "521-547",
+ abstract =
+ "This paper presents a method for the simplification of truth-invariant
+ cylindrical algebraic decompositions (CADs). Examples are given that
+ demonstrate the usefulness of the method in speeding up the solutoin
+ formula construction phase of the CAD-based quantifier elimination
+ algorithm. Applications of the method to the construction of
+ truth-invariant CADs for very large quantifier-free formulas and
+ quantifier elimination of non-prenex formulas are also discussed.",
+ paper = "Brow01a.pdf"
+}
+
+@misc{Brow02,
+ author = "Brown, Christopher W.",
+ title = "QEPCAD B -- A program for computing with semi-algebraic sets
+ using CADs",
+ year = "2002",
+ abstract =
+ "This report introduces QEPCAD B, a program for computing with real
+ algebraic sets using cylindrical algebraic decomposition (CAD). QEPCAD
+ B both extends and improves upon the QEPCAD system for quantifier
+ elimination by partial cylindrical algebraic decomposition written by
+ Hoon Hong in the early 1990s. This paper briefly discusses some of the
+ improvements in the implementation of CAD and quantifier elimination
+ vis CAD, and provides somewhat more detail on extensions to the system
+ that go beyond quantifier elimination. The author is responsible for
+ most of the extended features of QEPCAD B, but improvements to the
+ basic CAD implementation and to the SACLIB library on which QEPCAD is
+ based are the results of many people's work.",
+ paper = "Brow02.pdf"
+}
+
+@article{Brow11,
+ author = "Brown, Christopher W.",
+ title = "Fast simplifications for Tarski formulas based on monomial
+ inequalities",
+ year = "2011",
+ journal = "Journal of Symbolic Computation",
+ volume = "47",
+ pages = "859-882",
+ abstract =
+ "We define the ``combinatorial part'' of a Tarski formula in which
+ equalities and inequalities are in factored or partially factored
+ form. The combinatorial part of a formula contains only ``monomial
+ inequalities'', which are sign conditions on monomials. We give
+ efficient algorithms for answering some basic questions about
+ conjunctions of monomial inequalities and prove the
+ NP-Completness/Hardness of some others. By simplifying the
+ combinatorial part back to a Tarski formula, we obtain non-trivial
+ simplifications without algebraic operations.",
+ paper = "Brow11.pdf"
+}
+
+@inproceedings{Cann87,
+ author = "Canny, John",
+ title = "A new algebraic method of robot motion planning and real geometry",
+ booktitle = "IEEE Symp. on Foundations of Comp. Sci.",
+ pages = "39-48",
+ year = "1987",
+ abstract =
+ "We present an algorithm which solves the findpath or generalized
+ movers' problem in single exponential sequential time. This is the
+ first algorithm for the problem whose sequential time bound is less
+ than double exponential. In fact, the combinatorial exponent of the
+ algorithm is equal to the number of degrees of freedom, making it
+ worst-case optimal, and equaling or improving the time bounds of many
+ special purpose algorithms. The algorithm accepts a formula for a
+ semi-algebraic set S describing the set of free configurations and
+ produces a one-dimensional skeleton or ``roadmap'' of the set, which is
+ connected within each connected component of S. Additional points may
+ be linked to the roadmap in linear time. Our method draws from results
+ of singularity theory, and in particular makes use of the notion of
+ stratified sets as an efficient alternative to cell decomposition. We
+ introduce an algebraic tool called the multivariate resultant which
+ gives a necessary and sufficient condition for a system of homogeneous
+ polynomials to have a solution, and show that it can be computed in
+ polynomial parallel time. Among the consequences of this result are
+ new methods for quantifier elimination and an improved gap theorem for
+ the absolute value of roots of a system of polynomials.",
+ paper = "Cann87.pdf"
+}
+
+@inproceedings{Cann88,
+ author = "Canny, John",
+ title = "Some algebraic and geometric computations in PSPACE",
+ booktitle = "Proc 20th ACM Symp. on the theory of computing",
+ pages = "460-467",
+ year = "1988",
+ isbn = "0-89791-264-0",
+ link = "\url{http://digitalassets.lib.berkeley.edu/techreports/ucb/text/CSD-88-439.pdf}",
+ abstract =
+ "We give a PSPACE algorithm for determining the signs of multivariate
+ polynomials at the common zeros of a system of polynomial
+ equations. One of the consequences of this result is that the
+ ``Generalized Movers' Problem'' in robotics drops from EXPTIME into
+ PSPACE, and is therefore PSPACE-complete by a previous hardness result
+ [Rei]. We also show that the existential theory of the real numbers
+ can be decided in PSPACE. Other geometric problems that also drop into
+ PSPACE include the 3-d Euclidean Shortest Path Problem, and the ``2-d
+ Asteroid Avoidance Problem'' described in [RS]. Our method combines the
+ theorem of the primitive element from classical algebra with a
+ symbolic polynomial evaluation lemma from [BKR]. A decision problem
+ involving several algebraic numbers is reduced to a problem involving
+ a single algebraic number or primitive element, which rationally
+ generates all the given algebraic numbers.",
+ paper = "Cann88.pdf"
+}
+
+@article{Cann93,
+ author = "Canny, John",
+ title = "Improved algorithms for sign and existential quantifier
+ elimination",
+ journal = "The Computer Journal",
+ volume = "36",
+ pages = "409-418",
+ year = "1993",
+ abstract =
+ "Recently there has been a lot of activity in algorithms that work
+ over real closed fields, and that perform such calculations as
+ quantifier elimination or computing connected components of
+ semi-algebraic sets. A cornerstone of this work is a symbolic sign
+ determination algorithm due to Ben-Or, Kozen and Reif. In this
+ paper we describe a new sign determination method based on the earlier
+ algorithm, but with two advantages: (i) It is faster in the univariate
+ case, and (ii) In the general case, it allows purely symbolic
+ quantifier elimination in pseudo-polynomial time. By purely symbolic,
+ we mean that it is possible to eliminate a quantified variable from a
+ system of polynomials no matter what the coefficient values are. The
+ previous methods required the coefficients to be themselves
+ polynomials in other variables. Our new method allows transcendental
+ functions or derivatives to appear in the coefficients.
+
+ Another corollary of the new sign-determination algorithm is a very
+ simple, practical algorithm for deciding existentially-quantified
+ formulae of the theory of the reals. We present an algorithm that has
+ a bit complexity of $n^{k+1}d^{O(k)}(c log n)^{1+\epsilon}$ randomized, or
+ $n^{n+1}d^{O(k^2)}c(1+\epsilon)$ deterministic, for any
+ $\epsilon>0$, where $n$ is the number
+ of polynomial constraints in the defining formula, $k$ is the number of
+ variables, $d$ is a bound on the degree, $c$ bounds the bit length of the
+ coefficient. The algorithm makes no general position assumptions, and
+ its constants are much smaller than other recent quantifier
+ elimination methods.",
+ paper = "Cann93.pdf"
+}
+
+@book{Cavi98,
+ author = "Caviness, B. F. and Johnson, J. R.",
+ title = "Quantifier Elimination and Cylindrical Algebraic Decomposition",
+ publisher = "Springer",
+ year = "1998",
+ isbn = "3-221-82794-3",
+ keywords = "axiomref"
+}
+
+@misc{Chen10,
+ author = "Chen, Changbo and Davenport, James H. and May, John P. and
+ Maza, Marc Moreno and Xia, Bican and Xiao, Rong",
+ title = "Triangular Decomposition of Semi-algebraic Systems",
+ year = "2010",
+ link = "\url{https://arxiv.org/pdf/1002.4784.pdf}",
+ abstract =
+ "Regular chains and triangular decompositions are fundamental and
+ well-developed tools for describing the complex solutions of
+ polynomial systems. This paper proposes adaptations of these tools
+ focusing on solutions of the real analogue: semi-algebraic systems.
+
+ We show that any such system can be decomposed into finitely many
+ regular semi-algebraic systems. We propose two specifications of such
+ a decomposition and present corresponding algorithms. Under some
+ assumptions, one type of decomposition can be computed in singly
+ exponential time w.r.t. the number of variables. We implement our
+ algorithms and the experimental results illustrate their
+ effectiveness.",
+ paper = "Chen10.pdf"
+}
+
+@misc{Chen12,
+ author = "Chen, Changbo and Maza, Marc Moreno",
+ title = "An Incremental Algorithm for Computing Cylindrical Algebraic
+ Decompositions",
+ link = "\url{https://arxiv.org/pdf/1210.5543.pdf}",
+ year = "2012",
+ abstract =
+ "In this paper, we propose an incremental algorithm for computing
+ cylindrical al gebraic decompositions. The algorithm consists of two
+ parts: computing a complex cylindrical tree and refining this complex
+ tree into a cylindrical tree in real space. The incrementality comes
+ from the first part of the algorithm, where a complex cylindrical tree
+ is constructed by refining a previous complex cylindrical tree with a
+ polynomial constraint. We have implemented our algorithm in Maple. The
+ experimentation shows that the proposed algorithm outperforms existing
+ ones for many examples taken from the literature",
+ paper = "Chen12.pdf"
+}
+
+@article{Coll71,
+ author = "Collins, George E.",
+ title = "The Calculation of Multivariate Polynomial Resultants",
+ journal = "ACM SYMSAC",
+ volume = "18",
+ number = "4",
+ year = "1971",
+ pages = "515-532",
+ abstract =
+ "An efficient algorithm is presented for the exact calculation of
+ resultants of multivariate polynomials with integer coefficients.
+ The algorithm applies modular homomorphisms and the Chinese remainder
+ theorem, evaluation homomorphisms and interpolation, in reducing
+ the problem to resultant calculation for univariate polynomials
+ over GF(p), whereupon a polynomial remainder sequence algorithm is used.
+
+ The computing time of the algorithm is analyzed theoretically as a
+ function of the degrees and coefficient sizes of its inputs. As a very
+ special case, it is shown that when all degrees are equal and the
+ coefficient size is fixed, its computing time is approximately
+ proportional to $\lambda^{2r+1}$, where $\lambda$ is the common
+ degree and $r$ is the number of variables.
+
+ Empirically observed computing times of the algorithm are tabulated
+ for a large number of examples, and other algorithms are compared.
+ Potential application of the algorithm to the solution of systems of
+ polynomial equations is discussed.",
+ paper = "Coll71.pdf"
+}
+
+@article{Coll75,
+ author = "Collins, George E.",
+ title = "Quantifier Elimination for Real Closed Fields by
+ Cylindrical Algebraic Decomposition",
+ year = "1975",
+ journal = "Lecture Notes in Computer Science",
+ volume = "33",
+ pages = "134-183",
+ abstract =
+ "I. Introduction. Tarski in 1948, published a quantifier
+ elimination method for the elementary theory of real closed fields
+ (which he had discoverd in 1930). As noted by Tarski, any quantifier
+ elimination method for this theory also provides a decision method,
+ which enables one to decide whether any sentence of the theory is
+ true or false. Since many important and difficult mathematical
+ problems can be expressed in this theory, any computationally
+ feasible quantifier elimination algorithm would be of utmost
+ significance.
+
+ However, it became apparent that Tarski's method required too much
+ computation to be practical except for quite trivial problems.
+ Seidenberg in 1954, described another method which he thought
+ would be more efficient. A third method was published by Cohen in
+ 1969. Some significant improvements of Tarski's method have been
+ made by W. Boge, which are described in a thesis by Holthusen
+
+ This paper describes a completely new method which I discoverd in
+ February 1973. This method was presented in a seminar at Stanford
+ University in March 1973 and in abstract form at a symposium at
+ Carnegie-Mellon University in May 1973. In August 1974 a full
+ presentation of the method was delivered at the EUROSAM 74 Conference
+ in Stockholm, and a preliminary version of the present paper was
+ published in the proceedings of that conference.",
+ paper = "Coll75.pdf"
+}
+
+@article{Coll91,
+ author = "Collins, George E. and Hong, Hoon",
+ title = "Partial Cylindrical Algebraic Decomposition for Quantifier
+ Elimination",
+ journal = "J. Symbolic Computation",
+ year = "1991",
+ volume = "12",
+ pages = "299-328",
+ abstract =
+ "The Cylindrical Algebraic Decomposition method (CAD) decomposes $R^r$
+ into regions over which given polynomials have constant signs. An
+ important application of GAD is quantifier elimination in elementary
+ algebra and geometry. In this paper we present a method which
+ intermingles CAD construction with truth evaluation so that parts of
+ the CAD are constructed only as needed to further truth evaluation and
+ aborts CAD construction as soon as no more truth evaluation is needed.
+ The truth evaluation utilizes in an essential way any quantifiers
+ which are present and additionally takes account of atomic formulas
+ from which some variables are absent. Preliminary observations show
+ that the new method is always more efficient than the original, and
+ often significantly more efficient.",
+ paper = "Coll91.pdf"
+}
+
+@inproceedings{Coll98,
+ author = "Collins, George E.",
+ title = "Quantifier Elimination by Cylindrical Algebraic Decomposition --
+ Twenty Years of Progress",
+ booktitle = "Quantifier Elimination and Cylindrical Algebraic
+ Decomposition",
+ isbn = "3-211-82794-3",
+ year = "1998",
+ pages = "8-23",
+ abstract =
+ "The CAD (cylindrical algebraic decomposition) method and its
+ application to QE (quantifier elimination) for ERA (elementary real
+ algebra) was announced by the author in 1973 at Carnegie Mellon
+ University (Collins 1973b). In the twenty years since then several
+ very important improvements have been made to the method which,
+ together with a very large increase in available computational power,
+ have made it possible to solve in seconds or minutes some interesting
+ problems. In the following we survey these improvements and present
+ some of these problems with their solutions."
+}
+
+@article{Coll02,
+ author = "Collins, George E. and Johnson, Jeremy R. and Krandick, Werner",
+ title = "Interval arithmetic in cylindrical algebraic decomposition",
+ journal = "J. Symbolic Computation",
+ volume = "34",
+ number = "2",
+ pages = "145-157",
+ year = "2002",
+ publisher = "Elsevier",
+ abstract =
+ "Cylindrical algebraic decomposition requires many very time consuming
+ operations, including resultant computation, polynomial factorization,
+ algebraic polynomial gcd computation and polynomial real root
+ isolation. We show how the time for algebraic polynomial real root
+ isolation can be greatly reduced by using interval arithmetic instead
+ of exact computation. This substantially reduces the overall time for
+ cylindrical algebraic decomposition.",
+ paper = "Coll02.pdf"
+}
+
+@techreport{Dave85a,
+ author = "Davenport, J.H.",
+ title = "Computer Algebra for Cylindrical Algebraic Decomposition",
+ institution = "NADA Kth Stockholm / Bath Ccomputer Science",
+ link = "\url{http://staff.bath.ac.uk/masjhd/TRITA.pdf}",
+ type = "technical report",
+ number = "88-10",
+ year = "1985",
+ abstract =
+ "This report describes techniques for resolving systems of polynomial
+ equations and inequalities. The general technique is {\sl cylindrical
+ algebraic decomposition}, which decomposes space into a number of
+ regions, on each of which the equations and inequalities have the
+ same sign. Most of the report is spent describing the algebraic and
+ algorithmic pre-requisites (resultants, algebraic numbers, Sturm
+ sequences, etc.), and then describing the method, first in two
+ dimensions and then in an artitrary number of dimensions.",
+ paper = "Dave85a.pdf"
+}
+
+@techreport{Dolz97a,
+ author = "Dolzmann, Andreas and Sturm, Thomas and
+ Weispfenning, Volker",
+ title = "Real Quantifier Elimination in Practice",
+ type = "technical report",
+ institution = "University of Passau",
+ number = "MIP-9720",
+ year = "1997",
+ abstract =
+ "We give a survey of three implemented real quantifier elimination
+ methods: partial cylindrical algebraic decomposition, virtual
+ substitution of test terms, and a combination of Groebner basis
+ computations with multivariate real root counting. We examine the
+ scope of these implementations for applications in various fields of
+ science, engineering, and economics",
+ paper = "Dolz97a.pdf"
+}
+
+@inproceedings{Dolz04,
+ author = "Dolzmann, Andreas and Seidl, Andreas and Sturm, Thomas",
+ title = "Efficient projection orders for CAD",
+ booktitle = "proc ISSAC'04",
+ year = "2004",
+ pages = "111-118",
+ publisher = "ACM",
+ isbn = "1-58113-827-X",
+ abstract =
+ "We introduce an efficient algorithm for determining a suitable
+ projection order for performing cylindrical algebraic
+ decomposition. Our algorithm is motivated by a statistical analysis of
+ comprehensive test set computations. This analysis introduces several
+ measures on both the projection sets and the entire computation, which
+ turn out to be highly correlated. The statistical data also shows that
+ the orders generated by our algorithm are significantly close to optimal.",
+ paper = "Doze04.pdf"
+}
+
+@misc{Engl16,
+ author = "England, Matthew and Davenport, James H.",
+ title = "Experience with Heuristics, Benchmarks and Standards for
+ Cylindrical Algebraic Decomposition",
+ link = "\url{https://arxiv.org/pdf/1609.09269.pdf}",
+ abstract =
+ "n the paper which inspired the $SC^2$ project, [E. Abraham,
+ Building Bridges between Symbolic Computation and Satisfiability
+ Checking , Proc. ISSAC ’15, pp. 1–6, ACM, 2015] the author identified
+ the use of sophisticated heuristics as a technique that the
+ Satisfiability Checking community excels in and from which it is
+ likely the Symbolic Computation community could learn and prosper. To
+ start this learning process we summarise our experience with heuristic
+ development for the computer algebra algorithm Cylindrical Algebraic
+ Decomposition. We also propose and discuss standards and benchmarks as
+ another area where Symbolic Computation could prosper from
+ Satisfiability Checking expertise, noting that these have been
+ identified as initial actions for the new $SC^2$ community in the CSA
+ project, as described in [E.́ Abraham et al., SC 2 : {\sl Satisfiability
+ Checking meets Symbolic Computation (Project Paper)}, Intelligent
+ Computer Mathematics (LNCS 9761), pp. 28–43, Springer, 2015].",
+ paper = "Engl16.pdf"
+}
+
+@InProceedings{Emir04,
+ author = "Emiris, Ioannis Z. and Tsigaridas, Elias P.",
+ title = "Comparing real algebraic numbers of small degree",
+ booktitle = "12th annual European symposium",
+ series = "ESA 2004",
+ year = "2004",
+ isbn = "3-540-23025-4",
+ location = "Bergen, Norway",
+ pages = "652-663",
+ link = "\url{http://www-polsys.lip6.fr/~elias/files//et-esa-04.pdf}",
+ algebra = "\newline\refto{domain RECLOS RealClosure}",
+ abstract =
+ "We study polynomials of degree up to 4 over the rationals or a
+ computable real subfield. Our motivation comes from the need to
+ evaluate predicates in nonlinear computational geometry efficiently
+ and exactly. We show a new method to compare real algebraic numbers by
+ precomputing generalized Sturm sequences, thus avoiding iterative
+ methods; the method, moreover handles all degenerate cases. Our first
+ contribution is the determination of rational isolating points, as
+ functions of the coefficients, between any pair of real roots. Our
+ second contribution is to exploit invariants and Bezoutian
+ subexpressions in writing the sequences, in order to reduce bit
+ complexity. The degree of the tested quantities in the input
+ coefficients is optimal for degree up to 3, and for degree 4 in
+ certain cases. Our methods readily apply to real solving of pairs of
+ quadratic equations, and to sign determination of polynomials over
+ algebraic numbers of degree up to 4. Our third contribution is an
+ implementation in a new module of library SYNAPS v2.1. It improves
+ significantly upon the efficiency of certain publicly available
+ implementations: Rioboo’s approach on AXIOM, the package of
+ Guibas-Karavelas-Russel, and CORE v1.6, MAPLE v9, and SYNAPS
+ v2.0. Some existing limited tests had shown that it is faster than
+ commercial library LEDA v4.5 for quadratic algebraic numbers.",
+ paper = "Emir04.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Engl14,
+ author = "England, Matthew and Wilson, David and Bradford, Russell and
+ Davenport, James H.",
+ title = "Using the Regular Chains Library to build cylindrical algebraic
+ decompositions by projecting and lifting",
+ link = "\url{https://arxiv.org/pdf/1405.6090.pdf}",
+ year = "2014",
+ abstract =
+ "Cylindrical algebraic decomposition (CAD) is an important tool, both
+ for quantifier elimination over the reals and a range of other
+ applications. Traditionally, a CAD is built through a process of
+ projection and lifting to move the problem within Euclidean spaces of
+ changing dimension. Recently, an alternative approach which first
+ decomposes complex space using triangular decomposition before
+ refining to real space has been introduced and implemented within the
+ Regular-Chains Library of Maple. We here describe a freely available
+ package ProjectionCAD which utilises the routines within the
+ RegularChains Library to build CADs by projection and lifting. We
+ detail how the projection and lifting algorithms were modified to
+ allow this, discuss the motivation and survey the functionality of the
+ package.",
+ paper = "Engl14.pdf"
+}
+
+@misc{Engl14a,
+ author = "England, Matthew and Bradford, Russell and Davenport, James H. and
+ Wilson, David",
+ title = "Choosing a variable ordering for truth-table invariant cylindrical
+ algebraic decomposition by incremental triangular decomposition",
+ link = "\url{https://arxiv.org/pdf/1405.6094.pdf}",
+ year = "2014",
+ abstract =
+ "Cylindrical algebraic decomposition (CAD) is a key tool for solving
+ problems in real algebraic geometry and beyond. In recent years a new
+ approach has been developed, where regular chains technology is used
+ to first build a decomposition in complex space. We consider the
+ latest variant of this which builds the complex decomposition
+ incrementally by polynomial and produces CADs on whose cells a
+ sequence of formulae are truth-invariant. Like all CAD algorithms the
+ user must provide a variable ordering which can have a profound impact
+ on the tractability of a problem. We evaluate existing heuristics to
+ help with the choice for this algorithm, suggest improvements and then
+ derive a new heuristic more closely aligned with the mechanics of the
+ new algorithm.",
+ paper = "Engl14a.pdf"
+}
+
+@misc{Engl14b,
+ author = "England, Matthew and Bradford, Russell and Chen, Changbo and
+ Davenport, James H. and Maza, Marc Moreno",
+ title = "Problem formulation for truth-table invariant cylindrical
+ algebraic decomposition by incremental triangular decomposition",
+ link = "\url{https://arxiv.org/pdf/1404.6371.pdf}",
+ year = "2014",
+ abstract =
+ "Cylindrical algebraic decompositions (CADs) are a key tool for
+ solving problems in real algebraic geometry and beyond. We recently
+ presented a new CAD algorithm combining two advances: truth-table
+ invariance, making the CAD invariant with respect to the truth of
+ logical formulae rather than the signs of polynomials; and CAD
+ construction by regular chains technology, where first a complex
+ decomposition is constructed by refining a tree incrementally by
+ constraint. We here consider how best to formulate problems for input
+ to this algorithm. We focus on a choice (not relevant for other CAD
+ algorithms) about the order in which constraints are presented. We
+ develop new heuristics to help make this choice and thus allow the
+ best use of the algorithm in practice. We also consider other choices
+ of problem formulation for CAD, as discussed in CICM 2013, revisiting
+ these in the context of the new algorithm.",
+ paper = "Engl14b.pdf"
+}
+
+@article{Engl15,
+ author = "England, M. and Bradford, R. and Davenport, J. H.",
+ title = "Improving the use of equational constraints in cylindrical
+ algebraic decomposition",
+ journal = "ISSAC 15",
+ year = "2015",
+ series = "LNCS 7961",
+ publisher = "ACM",
+ link = "\url{http://opus.bath.ac.uk/42451/}",
+ abstract = "
+ When building a cylindrical algebraic decomposition (CAD) savings can
+ be made in the presence of an equational constraint (EC): an equation
+ logically implied by a formula.
+
+ The present paper is concerned with how to use multiple ECs,
+ propagating those in the input throughout the projection set. We
+ improve on the approach of McCallum in ISSAC 2001 by using the reduced
+ projection theory to make savings in the lifting phase (both to the
+ polynomials we lift with and the cells lifted over). We demonstrate
+ the benefits with worked examples and a complexity analysis.",
+ paper = "Engl15.pdf"
+}
+
+@techreport{Fitc87,
+ author = "Fitchas, N. and Galligo, A. and Morgenstern, J.",
+ title = {Algorithmes repides en s\'equential et en parallele pour
+ l'\'elimination de quantificateurs en g\'eom\'etrie
+ \'el\'ementaire},
+ type = "technical report",
+ institution = {UER de Math\'ematiques Universite de Paris VII},
+ year = "1987"
+}
+
+@inproceedings{Gonz89,
+ author = "Gonzalex, Laureano and Henri, Lombardi and Recio, Tomas and
+ Roy, Marie-Francoise",
+ title = "Sturm-Habicht sequence",
+ booktitle = "Proc. ACM-SIGSAM 1989",
+ year = "1989",
+ pages = "136-146",
+ isbn = "0-89791-325-6",
+ abstract =
+ "Formal computations with inequalities is a subject of general interest
+ in computer algebra. In particular it is fundamental in the
+ parallelisation of basic algorithms and quantifier elimination for real
+ closed filed ([BKR], [HRS]).
+
+ In $\S{}I$ we give a generalisation of Sturm theorem essentially due to
+ Sylvester which is the key for formal computations with inequalities.
+ Our result is an improvement of previously known results (see [BKR])
+ since no hypotheses have to be made on polynomials.
+
+ In $\S{}II$ we study the subresultant sequence. We precise some of the
+ classical definitions in order to avoid some problems appearing in the
+ paper by Loos ([L]) and study specialisation properties in detail.
+
+ In $\S{}III$ we introduce the Sturm-Habicht sequence, which generalises
+ Habicht's work ([H]). This new sequence obtained automatically from a
+ subresultant sequence has some remarkable properties:
+ \begin{itemize}
+ \item it gives the same information than the Sturm sequence, and this
+ information may be recovered by looking only at its principal
+ coefficients
+ \item it can be computed by ring operations and exact divisions only,
+ in polynomial time in case of integer coefficients, eventually by
+ modular methods
+ \item it has goos specialisation properties
+ \end{itemize}
+
+ Finally in $\S{}IV$ we give some information about applications and
+ implementation of the Sturm-Habicht sequence.",
+ paper = "Gonz89.pdf"
+}
+
+@inproceedings{Gonz98,
+ author = "Gonzalex-Vega, L.",
+ title = "A combinatorial algorithm solving some quantifier elimination
+ problems",
+ booktitle = "Quantifier Elimination and Cylindrical Algebraic
+ Decomposition",
+ isbn = "3-211-82794-3",
+ year = "1998",
+ pages = "365-374",
+}
+
+@article{Grig88,
+ author = "Grigor'ev, D. Yu. and Vorobjov, N. N.",
+ title = "Solving systems of polynomial inequalities in subexponential time",
+ journal = "J. Symbolic Computation",
+ volume = "5",
+ number = "1-2",
+ pages = "37-64",
+ year = "1988",
+ abstract =
+ "Let the polynomials $f_1,\ldots,f_k \in \mathbb{Z}[X_1,\ldots,X_n]$
+ have degrees $deg(f_i) 0,\ldots,f_k \ge 0$. In the case of a positive
+ answer the algorithm constructs a certain finite set of solutions
+ (which is, in fact, a representative set for the family of components
+ of connectivity of the set of all real solutions of the system). The
+ algorithm runs in time polynomial in $M(kd)^{n^2}$. The previously
+ known upper time bound for this problem was $(Mkd)^{2^{O(n)}}$.",
+ paper = "Grig88.pdf"
+}
+
+@article{Grig88a,
+ author = "Grigor'ev, D. Yu.",
+ title = "The complexity of deciding Tarski algebra",
+ journal = "J. Symbolic Computation",
+ volume = "5",
+ number = "1-2",
+ pages = "65-108",
+ year = "1988",
+ abstract =
+ "Let a formula of Tarski algebra contain $k$ atomic subformulas of the
+ kind $(f_i \ge 0)$, $1 \le i \le k$, where the polynomials
+ $f_i \in \mathbb{Z}[X_1,\ldots,X_n]$ have degrees $deg(f_i) H_1 > \ldots > H_k=G$. In each step $H_{i-1} > H_i$
+ it checks as a test for $G \le H_i$ whether a relative invariant $k_i
+ \in k[x_1,\ldots,x_n]$ yields a value under the specialization
+ $\varphi : g(x_1,\ldots,x_n) \mapsto g(\alpha_1,\ldots,\alpha_n)$. In
+ implementations this evaluation has been done using $p$-adic
+ [H. Darmon and D. Ford, Commun. Algebra 17, No. 12, 2941-2943 (1989;
+ Zbl 0693.12010)] or numerical (R. Stauduhar [ibid.]; Y. Eichenlaub and
+ M. Olivier [preprint]) approximation of the roots.
+
+ The paper under review presents a new approach which avoids all
+ approximations: If $G \le H_i$ and $H_i$ is maximal in $H_{i-1}$ the
+ invariant $h_i$ is a primitive element of the invariant field
+ $k_i=k(x_1,\ldots,x_n)^{H_i}$ as an extension of
+ $k_{i-1}=K(x_1,\ldots,x_n)^{H_{i-1}}$.
+ The author develops an algorithm to express the specialized values
+ $\varphi(g)$ of elements $g \in k_i$ in terms of $k_{i-1}$ and the
+ specialization $\varphi(h_i)$.
+
+ This algorithm then is applied to the relative resolvent polynomial
+ \[s_i = \prod_a{(y-a(x_1,\ldots,x_n))}\]
+ where $a$ runs through the images of $h_i$ under $H_{i-1}$.
+ It has $y$-coefficients which are in $k_{i-1}$.
+ The algorithm then permits to express the coefficients of the
+ specialization $r_i(y)=\varphi(s_i) \in k[y]$ recursively in the
+ (already known) specializations $\varphi(h_i)$ for $j \le i-1$,
+ using the coefficients of $f$ (as $S_n$-invariants in the roots)
+ as a seed. A root of $r_i(y)$ in the base field then proves that $G$
+ is contained in (a conjugate of) $H_i$, and this value of the root can
+ be used as specialized $\varphi(h_{i+1})$ in the next step of the
+ algorithm. Special care is given to the case when denominators of
+ elements in $k(x_1,\ldots,x_n)$ evaluate to zero after specialization.
+
+ The paper closes with a short discussion of applicability. An
+ implementation using AXIOM and GAP is in process but has not yet been
+ completed.",
+ keywords = "axiomref"
+}
+
+@article{Coli97,
+ author = "Colin, Antoine",
+ title = "Solving a system of algebraic equations with symmetries",
+ journal = "J. Pure Appl. Algebra",
+ volume = "117-118",
+ pages = "195-215",
+ year = "1997",
+ abstract =
+ "Let $(F)$ be a system of $p$ polynomial equations
+ $F_i({\bf X}) \in k[{\bf X}]$, where $k$ is a commutative field and
+ ${\bf X} := (X_1,\cdots,X_n)$ are indeterminates. Let $G$ be a subgroup
+ of $GL_n(k)$. A polynomial $P \in k[{\bf X}]$ (resp. rational function
+ $P \in k({\bf X})$ ) is an invariant of $G$ if and only if for all
+ $A \in G$ we have $A\cdot P = P$. We denote $k[{\bf X}]^G$ by (resp.
+ $k({\bf X})^G$) the algebra of polynomial (resp. rational function)
+ invariants of $G$. If $L$ is another subgroup of $GL_n(k)$ such that
+ $G \subset L$, $P$ is called a primary invariant of $G$ relative to $L$ if
+ and only if $Stab_L(P) = G$ (where $Stab_L(P)$ is the stabilizer of
+ $P$ in $L$).
+
+ The paper describes the algebra of the invariants of a finite group
+ and how to express these invariants in terms of a small number of
+ them, from both the Cohen-Macaulay algebra and the field theory points
+ of view. A method is proposed to solve $(F)$ by expressing it in terms of
+ primary invariants $\Pi_1,\cdots,\Pi_n$
+ (e.g. the elementary symmetric polynomials) and one
+ ``primitive'' secondary invariant.
+
+ The main thrust of the paper is contained in the following theorem.
+ Let $(F)$ be a set of invariants of $G$. Let $L$ be a subgroup of
+ $GL_n(k)$ such that $G \subset L$ and $k({\bf X})^L$ is a purely
+ transcendental extension of $k_i$, let $\Pi_1,\cdots,\Pi_n$ be
+ polynomials such that $k({\bf X})^L = k(\Pi_1,\cdots,\Pi_n)$,
+ and let $\Theta \in k[{\bf X}]^G$ be a primitive polynomial invariant
+ of $G$ relative to $L$.
+ When possible, it is convenient to choose $\Theta$ to be one of the
+ polynomials in $(F)$. – An algorithm is given that allows each polynomial
+ $F_i$ to be expressed as $F_i({\bf X}) = H_i(\Pi_1,\cdots,\Pi_n,\Theta)$,
+ an algebraic fraction in $\Pi_1,\cdots,\Pi_n$ and a polynomial in
+ $\Theta$. Now let $L$ be the minimal polynomial of $\Theta$ over
+ $k[{\bf X}]^L$; we have
+ \[L({\bf X},T)=\prod_{\Theta^{'} \in L\cdot \Theta}(T-\Theta^{'})
+ \in k[{\bf X}]^L[T]\]
+ (where $L$ is called a generic Lagrange resolvent).
+ As $k(\Pi_1,\cdots,\Pi_n)=k({\bf X})^L$, we can write
+ $L({\bf X},T)=H_0(\Pi_1,\cdots,\Pi_n,T)$ where $H_0$ is some
+ rational function. The question
+ $H_0(\Pi_1,\cdots,\Pi_n,\Theta)=0$ is always satisfied because
+ $\Theta$ is a root of $L$. Then, we solve the system of ($p=1$)
+ algebraic equations $H_i(\Pi_1,\cdots,\Pi_n,\Theta)=0$,
+ $0 \le i \le p$ for $\Pi_1,\cdots,\Pi_n,\Theta$ as indeterminates.
+
+ Theorem 1: Let $D \in k[\Pi_1,\cdots,\Pi_n]$ be the LCM of the
+ denominators of all the fractions $H_i$,$0 \le i \le p$ and let
+ $H_i^{'}=DH_i$. For every solution
+ $x:=(x_1,\cdots,x_n)$ of the system $(F)$:$F_i({\bf X})=0$,
+ $1 \le i \le p$, there exists a solution ($\pi_1,\cdots,\pi_n,\Theta$)
+ of the system
+ $(H^{'}):H_i^{'}(\Pi_1,\cdots,\Pi_n,\Theta)=0$, $0 \le i \le p$ such
+ that $x$ is a solution of the system
+ $(P_\pi):\Pi_i({\bf X})=\pi_i$, $1 \le i \le n$ , and of the equation
+ $\Theta({\bf X})=0$. Conversely, for any solution
+ $(\pi_1,\cdots,\pi_n,\theta)$ of the system $(H^{'})$ such that
+ $D(\pi_1,\cdots,\pi_n) \ne 0$, if $x$ is a solution of the system
+ $(P_\pi)$ relative to $(\pi_1,\cdots,\pi_n)$, then there exists
+ some $A \in L$ such that $\Theta(A\cdot x)=\theta$, and then for all
+ $B \in G$, $BA\cdot x$, is a solution of the system $(F)$.
+
+ A slighly more general version of this theorem is also given. The
+ paper then presents an algorithm that applies the theory and has been
+ implemented in AXIOM. It is followed by several examples.",
+ keywords = "axiomref"
+}
+
+@article{Coll82,
+ author = "Collins, G.E. and Mignotte, M. and Winkler, F.",
+ title = "Arithmetic in Basic Algebraic Domains",
+ publisher = "Springer-Verlag",
+ journal = "Computing, Supplement 4",
+ pages = "189-220",
+ year = "1982",
+ abstract =
+ "This chapter is devoted to the arithmetic operations, essentially
+ addition, multiplication, exponentiation, division, gcd calculations
+ and evaluation, on the basic algebraic domains. The algorithms for
+ these basic domains are those most frequently used in any computer
+ algebra system. Therefore the best known algorithms, from a
+ computational point of view, are presented. The basic domains
+ considered here are the rational integers, the rational numbers,
+ integers modulo $m$, Gaussian integers, polynomials, rational
+ functions, power series, finite fields and $p$-adic numbers. BOunds on
+ the maximum, minimum and average computing time ($t^{+},t^{-},t^{*}$) for
+ the various algorithms are given."
+}
+
+@misc{Cohe03,
+ author = "Cohen, Arjeh and Cuypers, H. and Barreiro, Hans and
+ Reinaldo, Ernesto and Sterk, Hans",
+ title = "Interactive Mathematical Documents on the Web",
+ year = "2003",
+ pages = "289-306",
+ editor = "Joswig, M. and Takayma, N.",
+ publisher = "Springer-Verlag, Berlin, Germany",
+ misc = "in Algebra, Geometry and Software Systems",
+ keywords = "axiomref"
+}
+
+@book{Cohe03a,
+ author = "Cohen, Joel S.",
+ title = "Computer algebra and symbolic computation. Mathematical Methods",
+ year = "2003",
+ link = "\url{http://eclass.uth.gr/eclass/modules/document/file.php/MHX102/Cohen\_Computer\_Algebra\_and\_Symbolic\_Computation\_\_Mathematical\_Methods.pdf}",
+ publisher = "A. K. Peters",
+ isbn = "1-56881-159-4",
+ paper = "Cohe03a.pdf",
+ keywords = "axiomref"
+}
+
+@book{Cohe03b,
+ author = "Cohen, Joel S.",
+ title = "Computer algebra and symbolic computation. Elementary Algorithms",
+ year = "2003",
+ publisher = "A. K. Peters",
+ isbn = "1-56881-159-4",
+ paper = "Cohe03b.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Conrxxa,
+ author = "Conrad, Marc and French, Tim and Maple, Carsten and Pott, Sandra",
+ title = "Approaching Inheritance from a Natural Mathematical Perspective
+ and from a Java Driven Viewpoint: a Comparative Review",
+ link = "\url{http://axiom-wiki.newsynthesis.org/public/refs/McTfCmSp-axiom.pdf}",
+ abstract = "
+ It is well-known that few object-oriented programming languages allow
+ objects to change their nature at run-time. There have been a number
+ of reasons presented for this, but it appears that there is a real
+ need for matters to change. In this paper we discuss the need for
+ object-oriented programming languages to reflect the dynamic nature of
+ problems, particularly those arising in a mathematical context. It is
+ from this context that we present a framework that realistically
+ represents the dynamic and evolving characteristic of problems and
+ algorithms.",
+ paper = "Conrxxa.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Conrxxb,
+ author = "Conrad, Marc and French, Tim and Maple, Carsten and Pott, Sandra",
+ title = "Mathematical Use Cases lead naturally to non-standard Inheritance
+ Relationships: How to make them accessible in a mainstream language?",
+ abstract = "
+ Conceptually there is a strong correspondence between Mathematical
+ Reasoning and Object-Oriented techniques. We investigate how the ideas
+ of Method Renaming, Dynamic Inheritance and Interclassing can be used
+ to strengthen this relationship. A discussion is initiated concerning
+ the feasibility of each of these features.",
+ paper = "Conrxxb.pdf",
+ keywords = "axiomref"
+}
+
+@article{Corl00,
+ author = "Corless, Robert M. and Jeffrey, David J. and Watt, Stephen M. and
+ Davenport, James H.",
+ title = "``According to Abramowitz and Stegun'' or
+ arccoth needn't be Uncouth",
+ journal = "SIGSAM Bulletin - Special Issue on OpenMath",
+ volume = "34",
+ number = "2",
+ pages = "58-65",
+ year = "2000",
+ algebra =
+ "\newline\refto{category OM OpenMath}
+ \newline\refto{domain COMPLEX Complex}
+ \newline\refto{domain DFLOAT DoubleFloat}
+ \newline\refto{domain FLOAT Float}
+ \newline\refto{domain FRAC Fraction}
+ \newline\refto{domain INT Integer}
+ \newline\refto{domain LIST List}
+ \newline\refto{domain SINT SingleInteger}
+ \newline\refto{domain STRING String}
+ \newline\refto{domain SYMBOL Symbol}
+ \newline\refto{package OMEXPR ExpressionToOpenMath}
+ \newline\refto{package OMSERVER OpenMathServerPackage}",
+ abstract =
+ "This paper addresses the definitions in OpenMath of the elementary
+ functions. The original OpenMath definitions, like most other sources,
+ simply cite [2] as the definition. We show that this is not adequate,
+ and propose precise definitions, and explore the relationships between
+ these definitions.In particular, we introduce the concept of a couth
+ pair of definitions, e.g. of arcsin and arcsinh, and show that the
+ pair arccot and {\sl arccoth} can be couth.",
+ paper = "Corl00.pdf"
+}
+
+@book{Coxx07,
+ author = "Cox, David and Little, John and O'Shea, Donald",
+ title = "Ideals, varieties and algorithms. An introduction to computational
+ algebraic geometry and commutative algebra",
+ publisher = "Springer",
+ isbn = "978-0-387-35650-1",
+ year = "2007",
+ link = "\url{http://www.dm.unipi.it/~caboara/Misc/Cox,\%20Little,\%20O'Shea\%20-\%20Ideals,\%20varieties\%20and%20algorithms.pdf}",
+ algebra = "\newline\refto{package GB GroebnerPackage}
+ \newline\refto{package PSEUDLIN PseudoLinearNormalForm}
+ \newline\refto{package PGROEB PolyGroebner}
+ \newline\refto{domain DMP DistributedMultivariatePolynomial}
+ \newline\refto{domain GDMP GeneralDistributedMultivariatePolynomial}
+ \newline\refto{domain HDMP HomogeneousDistributedMultivariatePolynomial}",
+ abstract =
+ "Around 1980 two new directions in science and technique came
+ together. One was Buchberger’s algorithms in order to handle Groebner
+ bases in an effective way for solving polynomial equations. The second
+ one was the development of the personal computers. This was the
+ starting point of a computational perspective in commutative algebra
+ and algebraic geometry. In 1991 the three authors invented the first
+ edition of their book as an introduction for undergraduates to some
+ interesting ideas in commutative algebra and algebraic geometry with a
+ strong perspective to practical and computational aspects. A second
+ revised edition appeared in 1996. That means from the very beginning
+ the book provides a bridge for the new, computational aspects in the
+ field of commutative algebra and algebraic geometry.
+
+ To be more precise, the book gives an introduction to Buchberger’s
+ algorithm with applications to syzygies, Hilbert polynomials, primary
+ decompositions. There is an introduction to classical algebraic
+ geometry with applications to the ideal membership problem, solving
+ polynomial equations, and elimination theory. Some more spectacular
+ applications are about robotics, automatic geometric theorem proving,
+ and invariants of finite groups. It seems to the reviewer to carry
+ coals to Newcastle for estimating the importance and usefulness of the
+ book. It should be of some interest to ask how many undergraduates
+ have been introduced to algorithmic aspects of commutative algebra and
+ algebraic geometry following the line of the book. The reviewer will
+ be sure that this will continue in the future too.
+
+ What are the changes to the previous editions? There is a significant
+ shorter proof of the Extension Theorem, see 3.6 in Chapter 3,
+ suggested by A.H.M. Levelt. A major update has been done in Appendix C
+ ``Computer Algebra Systems''. This concerns in the main the section
+ about MAPLE. Some minor updated information concern the use of AXIOM,
+ CoCoA, Macaulay2, Magma, Mathematica, and SINGULAR. This reflects
+ about the recent developments in Computer Algebra Systems. It
+ encourages an interested reader to more practical exercises. The
+ authors have made changes on over 200 pages to enhance clarity and
+ correctness. Many individuals have reported typographical errors and
+ gave the authors feedback on the earlier editions. The book is
+ well-written. The reviewer guesses that it will become more and more
+ difficult to earn 1 dollar (sponsored by the authors) for every new
+ typographical error as it was the case also with the first and second
+ edition. The reviewer is sure that it will be a excellent guide to
+ introduce further undergraduates in the algorithmic aspect of
+ commutative algebra and algebraic geometry.",
+ paper = "Coxx07.pdf",
+ keywords = "axiomref"
+}
+
+@article{Crou95,
+ author = "Crouch, Peter E. and Lamnabhi-Lagarrigue, Francoise and
+ Pinchon, Didier",
+ title = "Some realizations of algorithms for nonlinear input-output systems",
+ journal = "Int. J. Control",
+ volume = "62",
+ number = "4",
+ pages = "941-960",
+ year = "1995",
+ abstract =
+ "The first two authors previously developed an algorithm for
+ constructing a parametrization of the observation space of a nonlinear
+ control system directly from the differential equation representation
+ of the input-output behaviour. This paper extends the previous
+ algorithm by including settings where a set of implicit input-output
+ differential equations is given as well as more general state-space
+ representations in which the controls enter nonlinearly. Various
+ state-space realizations, including bilinear, polynomial and nilpotent
+ approximating realizations are discussed. The final section of the
+ paper sketches the implementation of the algorithm using the symbolic
+ manipulation package AXIOM to find the realizations mentioned above in
+ feasible cases.",
+ keywords = "axiomref"
+}
+
+@misc{Cuyp10,
+ author = "Cuypers, Hans and Hendriks, Maxim and Knopper, Jan Willem",
+ title = "Interactive Geometry inside MathDox",
+ year = "2010",
+ link = "\url{http://www.win.tue.nl/~hansc/MathDox_and_InterGeo_paper.pdf}",
+ paper = "Cuyp10",
+ keywords = "axiomref"
+}
+
+@inproceedings{Dalm97,
+ author = {Dalmas, St\'ephane and Ga\"etano, Marc and Watt, Stephen},
+ title = "An OpenMath 1.0 Implementation",
+ booktitle = "Proc. 1997 Int. Symp. on Symbolic and Algebraic Computation",
+ series = "ISSAC'97",
+ year = "1997",
+ isbn = "0-89791-875-4",
+ location = "Kihei, Maui, Hawaii, USA",
+ pages = "241-248",
+ numpages = "8",
+ link = "\url{http://doi.acm.org/10.1145/258726.258794}",
+ doi = "10.1145/258726.258794",
+ acmid = "258794",
+ publisher = "ACM, New York, NY USA",
+ keywords = "axiomref"
+}
+
+@inproceedings{Dalm92,
+ author = "Dalmas, Stephane",
+ title = "A polymorphic functional language applied to symbolic computation",
+ year = "1992",
+ booktitle = "Proc. ISSAC 1992",
+ series = "ISSAC 1992",
+ pages = "369-375",
+ isbn = "0-89791-489-9 (soft cover) 0-89791-490-2 (hard cover)",
+ abstract =
+ "The programming language in which to describe mathematical objects
+ and algorithms is a fundamental issue in the design of a symbolic
+ computation system. XFun is a strongly typed functional programming
+ language. Although it was not designed as a specialized language, its
+ sophisticated type system can be successfully applied to describe
+ mathematical objects and structures. After illustrating its main
+ features, the author sketches how it could be applied to symbolic
+ computation. A comparison with Scratchpad II is attempted. XFun seems
+ to exhibit more flexibility simplicity and uniformity.",
+ keywords = "axiomref",
+ beebe = "Dalmas:1992:PFL"
+}
+
+@misc{Daly08,
+ author = "Daly, Timothy",
+ title = "Axiom Computer Algebra System Information Sources",
+ video = "https://www.youtube.com/watch?v=CV8y3UrpadY",
+ year = "2008",
+ keywords = "axiomref"
+}
+
+@misc{Daly88,
+ author = "Daly, Timothy",
+ title = "Axiom in an Educational Setting, Axiom course slide deck",
+ year = "1988",
+ month = "January",
+ keywords = "axiomref"
+}
+
+@article{Daly02,
+ author = "Daly, Timothy",
+ title = "Axiom as open source",
+ journal = "SIGSAM Bulletin",
+ volume = "36",
+ number = "1",
+ pages = "28-28",
+ month = "March",
+ year = "2002",
+ keywords = "axiomref",
+ beebe = "Daly:2002:AOS"
+}
+
+@misc{Daly05,
+ author = "Daly, Timothy",
+ title = "LispNYC Presentation at Trinity",
+ year = "2005",
+ month = "May",
+ day = "10",
+ abstract =
+ "Timothy Daly presents Axiom
+
+ Timothy Daly, published author, academic researcher, open source
+ programmer and lead developer of Axiom will be presenting about his role
+ as the driving force behind Axiom. With over 70 developers and 200
+ researchers worldwide it can best be described as:
+
+ Axiom is a general purpose Computer Algebra system. It is useful
+ for research and development of mathematical algorithms providing
+ a very high level way to express abstract mathematical concepts.
+ The Axiom Library defines over 1,000 strongly-typed mathematical
+ domains and categories.
+
+ Axiom consists of an interpreter and compiler, a browser, a graphical
+ interface, and a new online wiki that allows users to create web pages
+ that inline computations.
+
+ Axiom is built upon Common Lisp.",
+ keywords = "axiomref"
+}
+
+@misc{Dalyxx,
+ author = "Daly, Timothy",
+ title = "Tim Daly on Lisp in Industry",
+ link = "\url{https://news.ycombinator.com/item?id=1580904}",
+ keywords = "axiomref"
+}
+
+@misc{Daly13a,
+ author = "Daly, Timothy and Barnes, Nick",
+ title = "Ten reasons you must publish your code",
+ year = "2013",
+ link = "\url{http://climatecode.org/blog/2013/07/ten-reasons-you-must-publish-your-code/}",
+ keywords = "axiomref"
+}
+
+@techreport{Dave92e,
+ author = "Davenport, James H.",
+ title = "The AXIOM System",
+ type = "technical report",
+ institution = "Numerical Algorithms Group, Oxford, U.K.",
+ number = "TR5/92",
+ year = "1992",
+ keywords = "axiomref"
+}
+
+@misc{Dave99,
+ author = "Davenport, James",
+ title = "A Small OpenMath Type System",
+ year = "1999",
+ link = "\url{https://www.openmath.org/standard/sts.pdf}",
+ paper = "Dave99.pdf"
+}
+
+@book{Dave05,
+ author = "Davenport, James H.",
+ title = "Integration -- What do we want from the theory?",
+ booktitle = "Computer Algebra",
+ publisher = "Springer",
+ series = "Lecture Notes in Computer Science 162",
+ pages = "2-11",
+ year = "2005",
+ abstract =
+ "The theory of integration has moved a long way in the last fourteen
+ years, thought not far enough to satisfy the demands placed on it by
+ its customers. This paper outlines what problems have yet to be solved,
+ and tries to explain why they are not trivial."
+}
+
+@techreport{Dave80,
+ author = "Davenport, James H. and Jenks, Richard D.",
+ title = "MODLISP: A Preliminary Design",
+ institution = "IBM Research",
+ type = "Research Report",
+ year = "1980",
+ number = "RC 8073",
+ keywords = "axiomref"
+}
+
+@techreport{Dave80a,
+ author = "Davenport, James H. and Jenks, Richard D.",
+ title = "MODLISP",
+ institution = "IBM Research",
+ type = "Research Report",
+ year = "1980",
+ number = "RC 8537 (\#37198)",
+ comment = "http://www.computerhistory.org/collections/catalog/102719109",
+ keywords = "axiomref"
+}
+
+@article{Dave81a,
+ author = "Davenport, James H. and Jenks, Richard D.",
+ title = "MODLISP",
+ year = "1981",
+ journal = "ACM SIGSAM Bulletin",
+ volume = "15",
+ issue = "1",
+ pages = "11-20",
+ publisher = "ACM",
+ abstract =
+ "This paper discusses the design and implementation of MODLISP, a
+ LISP-like language enhanced with the idea of MODes. This extension
+ permits, but does not require, the user to declare the types of
+ various variables, and to compile functions with the arguments
+ declared to be of a particular type. It is possible to declare several
+ functions of the same name, with arguments of different type
+ (e.g. PLUS could be declared for Integer arguments, or Rational, or
+ Real, or even Polynomial arguments) and the system will apply the
+ correct function for the types of the arguments.",
+ keywords = "axiomref"
+}
+
+@manual{Dave84,
+ author = "Davenport, James H. and Gianni, Patrizia and Jenks, Richard D. and
+ Miller, Victor and Morrison, Scott C. and Rothstein, Michael and
+ Sundaresan, Christine and Sutor, Robert S. and Trager, Barry M.",
+ title = "Scratchpad",
+ organization = "Mathematical Sciences Department",
+ address = "IBM Thomas Watson Research Center, Yorktown Heights, NY",
+ year = "1984",
+ keywords = "axiomref",
+ beebe = "Davenport:1984:S"
+}
+
+@misc{Dave84a,
+ author = "Davenport, James H.",
+ title = "A New Algebra System",
+ link = "\url{http://axiom-wiki.newsynthesis.org/public/refs/Davenport-1984-a\_new\_algebra\_system.pdf}",
+ abstract = "Seminal internal paper discussing Axiom design decisions.",
+ paper = "Dave84a.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Dave84b,
+ author = "Davenport, James H. and Gianni, Patrizia and Jenks, Ricard D. and
+ Miller, Victor and Morrison, Scott and Rothstein, Michael and
+ Sundaresan, Christine J. and Sutor, Robert S. and Trager, Barry",
+ title = "SCRATCHPAD System Programming Language Manual",
+ year = "1984",
+ keywords = "axiomref"
+}
+
+@article{Dave85,
+ author = "Davenport, James H.",
+ title = "The LISP/VM Foundation of Scratchpad II",
+ journal = "The Scratchpad II Newsletter",
+ volume = "1",
+ number = "1",
+ year = "1985",
+ month = "September",
+ institution = "IBM Research",
+ keywords = "axiomref"
+}
+
+@book{Dave88,
+ author = "Davenport, James H. and Siret, Y. and Tournier, E.",
+ title = "Computer Algebra: Systems and Algorithms for Algebraic
+ Computation",
+ publisher = "Academic Press",
+ year = "1988",
+ isbn ="0-12-204230-1",
+ link = "\url{http://staff.bath.ac.uk/masjhd/masternew.pdf}",
+ paper = "Dave88.pdf",
+ keywords = "axiomref",
+ beebe = "Davenport:1988:CA"
+}
+
+@techreport{Dave89,
+ author = "Davenport, James H.",
+ title = "Looking at a set of equations",
+ institution = "University of Bath, School of Mathematical Sciences",
+ year = "1989",
+ type = "technical report",
+ link = "\url{http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.261.767}",
+ abstract =
+ "This working paper describes our experiences with using the
+ Groebner-basis method [Buchberger, 1985] to solve some related systems
+ of polynomial equations. While we have not yet been able to solve the
+ system that was our primary motivation, we feel that these experiences
+ may prove useful to others investigating Buchberger's algorithm in
+ this context, especially when, as is the case for the system under
+ investigation, the equations are highly structured. We conclude with
+ some examples of the polynomials that we factored in the course of
+ this investigation.",
+ paper = "Dave89.pdf"
+}
+
+@misc{Dave93,
+ author = "Davenport, James H.",
+ title = "The PoSSo Project",
+ paper = "Dave93.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Dave00,
+ author = "Davenport, James H.",
+ title = "Abstract data types in computer algebra",
+ booktitle = "Mathematical foundations of computer science",
+ series = "MFCS 2000",
+ year = "2000",
+ location = "Bratislava, Slovakia",
+ pages = "21-35",
+ abstract =
+ "The theory of abstract data types was developed in the late 1970s and
+ the 1980s by several people, including the ``ADJ'' group, whose work
+ influenced the design of Axiom. One practical manifestation of this
+ theory was the OBJ-3 system. An area of computing that cries out for
+ this approach is computer algebra, where the objects of discourse are
+ mathematical, generally satisfying various algebraic rules. There have
+ been various theoretical studies of this in the literature. The aim of
+ this paper is to report on the practical applications of this theory
+ within computer algebra, and also to outline some of the theoretical
+ issues raised by this practical application. We also give a
+ substantial bibliography.",
+ keywords = "axiomref"
+}
+
+@article{Dave02,
+ author = "Davenport, James H.",
+ title = "Equality in computer algebra and beyond",
+ journal = "J. Symbolic Computing",
+ volume = "34",
+ number = "4",
+ pages = "259-270",
+ year = "2002",
+ link =
+ "\url{http://www.calculemus.net/meetings/siena01/Papers/Davenport.pdf}",
+ abstract =
+ "Equality is such a fundamental concept in mathematics that, in
+ fact, we seldom explore it in detail, and tend to regard it as
+ trivial. When it is shown to be non-trivial, we are often
+ surprised. As is often the case, the computerization of
+ mathematical computation in computer algebra systems on the one
+ hand, and mathematical reasoning in theorem provers on the other
+ hand, forces us to explore the issue of equality in greater
+ detail.In practice, there are also several ambiguities in the
+ definition of equality. For example, we refer to $\mathbb{Q}(x)$
+ as ``rational functions'', even though $\frac{x^2-1}{x-1}$ and
+ $x+1$ are not equal as functions from $\mathbb{R}$ to
+ $\mathbb{R}$, since the former is not defined at $x=1$, even
+ though they are equal as elements of $\mathbb{Q}(x)$. The aim of
+ this paper is to point out some of the problems, both with
+ mathematical equality and with data structure equality, and to
+ explain how necessary it is to keep a clear distintion between the two.",
+ paper = "Dave02.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Dave07,
+ author = "Davenport, James H. and Fitch, John",
+ title = "Computer Algebra and the three 'E's: Efficiency, Elegance, and
+ Expressiveness",
+ link = "\url{http://staff.bath.ac.uk/masjhd/Drafts/PLMMS2007}",
+ abstract =
+ "What author of a programming language would not claim that the 3 'E's
+ were the goals? Nevertheless, we claim that computer algebra does lead
+ to particular emphases, and constraints, in these areas.
+
+ We restrict ``efficiency'' to mean machine efficiency, since the other
+ 'E's cover programmer efficiency. For the sake of clarity, we describe
+ as ``expressiveness'', what can be expressed in the language, and
+ ``elegance'' as how it can be expressed.",
+ paper = "Dave07.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Dave90,
+ author = "Davenport, James H. and Trager, Barry M.",
+ title = "Scratchpad's view of algebra I: Basic commutative algebra",
+ booktitle = "Design and Implementation of Symbolic Computation Systems",
+ year = "1990",
+ pages = "40-54",
+ series = "DISCO '90",
+ location = "Capri, Italy",
+ publisher = "Springer-Verlag",
+ isbn = "0-387-52531-9",
+ link = "\url{http://opus.bath.ac.uk/32336/1/Davenport\_DISCO\_1990.pdf}",
+ comment = "AXIOM Technical Report, ATR/1, NAG Ltd., Oxford, 1992",
+ keywords = "axiomref",
+ abstract =
+ "While computer algebra systems have dealt with polynomials and
+ rational functions with integer coefficients for many years, dealing
+ with more general constructs from commutative algebra is a more recent
+ problem. In this paper we explain how one system solves this problem,
+ what types and operators it is necessary to introduce and, in short,
+ how one can construct a computational theory of commutative
+ algebra. Of necessity, such a theory is rather different from the
+ conventional, non-constructive, theory. It is also somewhat different
+ from the theories of Seidenberg [1974] and his school, who are not
+ particularly concerned with practical questions of efficiency.",
+ paper = "Dave90.pdf",
+ keywords = "axiomref",
+ beebe = "Davenport:1990:SVA"
+}
+
+@inproceedings{Dave91,
+ author = "Davenport, J. H. and Gianni, P. and Trager, B. M.",
+ title = "Scratchpad's View of Algebra II:
+ A Categorical View of Factorization",
+ booktitle = "Proc. 1991 Int. Symp. on Symbolic and Algebraic Computation",
+ series = "ISSAC '91",
+ year = "1991",
+ isbn = "0-89791-437-6",
+ location = "Bonn, West Germany",
+ pages = "32--38",
+ numpages = "7",
+ link = "\url{http://doi.acm.org/10.1145/120694.120699}",
+ doi = "10.1145/120694.120699",
+ acmid = "120699",
+ publisher = "ACM",
+ address = "New York, NY, USA",
+ abstract = "
+ This paper explains how Scratchpad solves the problem of presenting a
+ categorical view of factorization in unique factorization domains,
+ i.e. a view which can be propagated by functors such as
+ SparseUnivariatePolynomial or Fraction. This is not easy, as the
+ constructive version of the classical concept of
+ UniqueFactorizationDomain cannot be so propagated. The solution
+ adopted is based largely on Seidenberg's conditions (F) and (P), but
+ there are several additional points that have to be borne in mind to
+ produce reasonably efficient algorithms in the required generality.
+
+ The consequence of the algorithms and interfaces presented in this
+ paper is that Scratchpad can factorize in any extension of the
+ integers or finite fields by any combination of polynomial, fraction
+ and algebraic extensions: a capability far more general than any other
+ computer algebra system possesses. The solution is not perfect: for
+ example we cannot use these general constructions to factorize
+ polyinmoals in $\overline{Z[\sqrt{-5}]}[x]$ since the domain
+ $Z[\sqrt{-5}]$ is not a unique factorization domain, even though
+ $\overline{Z[\sqrt{-5}]}$ is, since it is a field. Of course, we can
+ factor polynomials in $\overline{Z}[\sqrt{-5}][x]$",
+ paper = "Dave91.pdf",
+ keywords = "axiomref",
+ beebe = "Davenport:1991:SVA"
+}
+
+@techreport{Dave92c,
+ author = "Davenport, James H. and Trager, Barry M.",
+ title = "Scratchpad's view of algebra I: Basic commutative algebra",
+ number = "TR3/92 (ATR/1) (NP2490)",
+ institution = "Numerical Algorithm Group (NAG) Ltd.",
+ year = "1992",
+ abstract =
+ "While computer algebra systems have dealt with polynomials and
+ rational functions with integer coefficients for many years, dealing
+ with more general constructs from commutative algebra is a more recent
+ problem. In this paper we explain how one system solves this problem,
+ what types and operators it is necessary to introduce and, in short,
+ how one can construct a computational theory of commutative
+ algebra. Of necessity, such a theory is rather different from the
+ conventional, non-constructive, theory. It is also somewhat different
+ from the theories of Seidenberg [1974] and his school, who are not
+ particularly concerned with practical questions of efficiency.",
+ paper = "Dave90.pdf",
+ keywords = "axiomref",
+ beebe = "Davenport:1992:SVAa"
+}
+
+@techreport{Dave92d,
+ author = "Davenport, James H. and Gianni, Patrizia and Trager, Barry M.",
+ title = "Scratchpad's view of algebra II:
+ A categorical view of factorization",
+ type = "Technical Report",
+ number = "TR4/92 (ATR/2) (NP2491)",
+ institution = "Numerical Algorithms Group, Inc.",
+ address = "Downer's Grove, IL, USA and Oxford, UK",
+ year = "1992",
+ link = "\url{http://www.nag.co.uk/doc/TechRep/axiomtr.html}",
+ abstract = "
+ This paper explains how Scratchpad solves the problem of presenting a
+ categorical view of factorization in unique factorization domains,
+ i.e. a view which can be propagated by functors such as
+ SparseUnivariatePolynomial or Fraction. This is not easy, as the
+ constructive version of the classical concept of
+ UniqueFactorizationDomain cannot be so propagated. The solution
+ adopted is based largely on Seidenberg's conditions (F) and (P), but
+ there are several additional points that have to be borne in mind to
+ produce reasonably efficient algorithms in the required generality.
+
+ The consequence of the algorithms and interfaces presented in this
+ paper is that Scratchpad can factorize in any extension of the
+ integers or finite fields by any combination of polynomial, fraction
+ and algebraic extensions: a capability far more general than any other
+ computer algebra system possesses. The solution is not perfect: for
+ example we cannot use these general constructions to factorize
+ polyinmoals in $\overline{Z[\sqrt{-5}]}[x]$ since the domain
+ $Z[\sqrt{-5}]$ is not a unique factorization domain, even though
+ $\overline{Z[\sqrt{-5}]}$ is, since it is a field. Of course, we can
+ factor polynomials in $\overline{Z}[\sqrt{-5}][x]$",
+ paper = "Dave91.pdf",
+ keywords = "axiomref",
+ beebe = "Davenport:1992:SVAb"
+}
+
+@techreport{Dave92a,
+ author = "Davenport, James H.",
+ title = "The AXIOM system",
+ type = "technical report",
+ number = "TR5/92 (ATR/3) (NP2492)",
+ institution = "Numerical Algorithms Group, Inc.",
+ year = "1992",
+ abstract =
+ "AXIOM is a computer algebra system superficially like many others,
+ but fundamentally different in its internal construction, and
+ therefore in the possibilities it offers to its users. In these
+ lecture notes, we will
+ \begin{itemize}
+ \item outline the high-level design of the AXIOM kernel and the AXIOM type
+ system,
+ \item explain some of the algebraic facilities implemented in AXIOM,
+ which may be more general than the reader is used to,
+ \item show how the type system and the information system interact,
+ \item give some references to the literature on particular aspects of
+ AXIOM and,
+ \item suggest the way forward.
+ \end{itemize}",
+ paper = "Dave92a.pdf",
+ keywords = "axiomref",
+ beebe = "Davenport:1992:AS"
+}
+
+@techreport{Dave92b,
+ author = "Davenport, James H.",
+ title = "How does one program in the AXIOM system?",
+ institution = "Numerical Algorithms Group, Inc.",
+ year = "1992",
+ type = "technical report",
+ number = "TR6/92 (ATR/4)(NP2493)",
+ link = "\url{http://www.nag.co.uk/doc/TechRep/axiomtr.html}",
+ abstract =
+ "Axiom is a computer algebra system superficially like many others, but
+ fundamentally different in its internal construction, and therefore in
+ the possibilities it offers to its users and programmers. In these
+ lecture notes, we will explain, by example, the methodology that the
+ author uses for programming substantial bits of mathematics in Axiom.",
+ paper = "Dave92b.pdf",
+ keywords = "axiomref",
+ beebe = "Davenport:1992:HDO"
+}
+
+@inproceedings{Dave92,
+ author = "Davenport, James H.",
+ title = "Primality Testing Revisited",
+ link = "\url{http://staff.bath.ac.uk/masjhd/ISSACs/ISSAC1992.pdf}",
+ booktitle = "Proc. ISSAC 1992",
+ series = "ISSAC 92",
+ publisher = "ACM",
+ pages = "123-129",
+ year = "1992",
+ report = "Technical Report TR2/93 Numerical Algorithms Group, Inc",
+ algebra = "\newline\refto{package PRIMES IntegerPrimesPackage}",
+ abstract =
+ "Rabin's algorithm is commonly used in computer algebra systems and
+ elsewhere for primality testing. This paper presents an experience
+ with this in the Axiom computer algebra system. As a result of this
+ experience, we suggest certain strengthenings of the algorithm.",
+ paper = "Dave92.pdf",
+ keywords = "axiomref",
+ beebe = "Davenport:1993:PTR"
+}
+
+@techreport{Faur00,
+ author = "Faure, Christele and Davenport, James H. and Naciri, Hanane",
+ title = "Multi-Valued Computer Algebra",
+ year = "2000",
+ type = "technical report",
+ institution = "INRIA CAFE",
+ number = "4001",
+ abstract =
+ "One of the main strengths of computer algebra is being able to solve
+ a family of problems with one computation. In order to express not
+ only one problem but a family of problems, one introduces some symbols
+ which are in fact the parameters common to all the problems of the
+ family. The user must be able to understand in which way these
+ parameters affect the result when he looks at the answer. Otherwise it
+ may lead to completely wrong calculations, which when used for
+ numerical applications bring nonsensical answers. This is the case in
+ most current Computer Algebra Systems we know because the form of the
+ answer is never explicitly conditioned by the values of the
+ parameters. The user is not even informed that the given answer may be
+ wrong in some cases then computer algebra systems can not be entirely
+ trustworthy. We have introduced multi-valued expressions called
+ conditional expressions, in which each potential value is associated
+ with a condition on some parameters. This is used, in particular, to
+ capture the situation in integration, where the form of the answer can
+ depend on whether certain quantities are positive, negative or
+ zero. We show that it is also necessary when solving modular linear
+ equations or deducing congruence conditions from complex expressions.",
+ paper = "Faur00.pdf"
+}
+
+@misc{Dave94,
+ author = {Davenport, James and Faure, Christ\'ele},
+ title = "The Unknown in Computer Algebra",
+ link = "\url{http://axiom-wiki.newsynthesis.org/public/refs/TheUnknownInComputerAlgebra.pdf}",
+ year = "1994",
+ abstract = "
+ Computer algebra systems have to deal with the confusion between
+ ``programming variables'' and ``mathematical symbols''. We claim that
+ they should also deal with ``unknowns'', i.e. elements whose values
+ are unknown, but whose type is known. For examples $x^p \ne x$ if $x$
+ is a symbol, but $x^p = x$ if $x \in GF(p)$. We show how we have
+ extended Axiom to deal with this concept.",
+ paper = "Dave94.pdf",
+ keywords = "axiomref"
+}
+
+@article{Dave11,
+ author = "Davenport, James H.",
+ title = "CICM 2011: Conferences on Intelligent Computer Mathematics 2011",
+ journal = "Springer Lecture Notes in Artificial Intelligence 6824",
+ pages = "1-67",
+ link = "\url{http://people.bath.ac.uk/masjhd/Meetings/CICM2011.pdf}",
+ comment = "http://www.springerlink.com/conten/978-3-642-22672-4",
+ year = "2011",
+ paper = "Dave11.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Dave15,
+ author = "Davenport, James H.",
+ title = "SIAM AAG 15 and ICIAM 2015",
+ link = "\url{http://people.bath.ac.uk/masjhd/Meetings/AAG-ICIAM15.pdf}",
+ paper = "Dave15.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Deckxx,
+ author = "Decker, Wolfram",
+ title = "Some Introductory Remarks on Computer Algebra",
+ link = "\url{https://www.math.uni-bielefeld.de/~rehmann/ECM/cdrom/3ecm/pdfs/pant3/decker.pdf}",
+ abstract =
+ "Computer algebra is a relatively young but rapidly growing field. In
+ this introductory note to the mini-symposium on computer algebra
+ organized as part of the third European Congress of Mathematics I will
+ not even attempt to adress all major streams of research and the many
+ applications of computer algebra. I will concentrate on a few aspects,
+ mostly from a mathematical point of view, and I will discuss a few
+ typical applications in mathematics. I will present a couple of
+ examples which underline the fact that computer algebra systems
+ provide easy access to powerful computing tools. And, I will quote
+ from and refer to a couple of survey papers, textbooks and web-pages
+ which I recommend for further reading.",
+ paper = "Deckxx.pdf",
+ keywords = "axiomref"
+}
+
+@phdthesis{Dell99,
+ author = "Delliere, Stephane",
+ title = {Trangularisation de syst\`emes constructibles Application \`a
+ l'\'evaluation dynamique},
+ school = {L'Universit\'e de Limoges},
+ year = "1999",
+ link = "\url{http://www.unilim.fr/laco/theses/1999/T1999_03.pdf}",
+ paper = "Dell99.pdf",
+ keywords = "axiomref"
+}
+
+@techreport{Dell00,
+ author = "Delliere, Stephane and Wang, Dongming",
+ title = "simple systems and dynamic constructible closure",
+ institution = "Universite de Limoges",
+ year = "2000",
+ type = "technical report",
+ number = "2000-16",
+ link = "\url{http://www.unilim.fr/laco/rapports/2000/R2000\_16.pdf}",
+ abstract =
+ "Dynamic evaluation is a general method for computing with parameters
+ [6, 9]. In 1994, T. Gomez-Diaz implemented the dynamic constructible
+ closure in the scientific computation system Axiom [17]: by simulating
+ dynamic evaluation, it offers the possibility to compute with
+ parameters in a very large way [13]. The outputs of a calculs with
+ T. Gomez-Diaz programs are represented by a finite collection of
+ constructible triangular systems defined in [12, definition
+ p.106]. Though there are numerous applications of these programs
+ (notably polynomial system solving with parameters [11], automatic
+ geometric theorem proving [14, 15], computation of Jordan forms with
+ parameters [16]), nobody gives theorical interest to this kind of
+ triangular systems. The main reason of this phenomenon is that they
+ are definied in [12] within the dynamic evaluation context. On the
+ opposite, most notions of triangular systems (J.F. Ritt-W.T. Wu
+ characteristic sets [24, 28], M. Kalkbrener regular chains [18],
+ D. Lazard triangular sets [20], M. Moreno Maza regular sets [22],
+ D.M. Wang simple systems [26, 27]) are defined in terms of commutative
+ algebra. This problem is at the origin of the work done in [7] where
+ we give a relevant algebraic model of T. Gomez-Diaz systems within
+ commutative algebra terminology. This allows us to relate them to many
+ concepts of triangular systems [7]. Thus, we give interest to the
+ connections with D. Lazard triangular sets in [8]. In a way, this
+ paper is the continuation of this previous work. This time, we study
+ relationships between T. Gomez-Diaz systems and D.M. Wang simple
+ systems. The paper is structured as follows. We have collected in
+ section 2 some needed notations. In section 3, we give all the
+ terminology related to our algebraic model of T. Gomez-Diaz
+ systems. Thus, we define the notion of weak constructible triangular
+ systems and introduce the properties of normalization and
+ squarefreeness. Section 4 is more detailed. First of all, we study a
+ weaker form of normalization called $L$-normalization. Then we give
+ many properties of constructible triangular systems verifying this new
+ notion. We obtain an algebraic and geometric framework which permits,
+ in section 5, to explore the connections between T. Gomez-Diaz systems
+ and D.M. Wang simple systems. In particular, this last section will
+ demonstrate well the importance of our $L$-normalization
+ property. Indeed, we show that simple systems and squarefree
+ $L$-normalized constructible triangular systems are equivalent.",
+ paper = "Dell00.pdf",
+ keywords = "axiomref"
+}
+
+@techreport{Dell00a,
+ author = "Delliere, Stephane",
+ title = {A first course to $D_7$ with examples},
+ institution = "Universite de Limoges",
+ year = "2000",
+ type = "technical report",
+ number = "2000-17",
+ link = "\url{http://www.unilim.fr/laco/rapports/2000/R2000_17.pdf}",
+ paper = "Dell00a.pdf",
+ keywords = "axiomref"
+}
+
+@article{Dell01,
+ author = "Delliere, Stephane",
+ title = "On the links between triangular sets and dynamic constructable
+ closure",
+ journal = "J. Pure Appl. Algebra",
+ volume = "163",
+ number = "1",
+ pages = "49-68",
+ year = "2001",
+ abstract =
+ "Two kinds of triangular systems are studied: normalized triangular
+ polynomial systems (a weaker form of Lazard’s triangular sets
+ [D. Lazard, Discrete Appl. Math. 33, No. 1-3, 147-160 (1991; Zbl
+ 0753.13013)] and constructible triangular systems (involved in the
+ dynamic constructible closure programs of T. Gomez-Díaz [Quelques
+ applications de l'evaluation dynamique, Ph.D. Thesis, Universite de
+ Limoges (1994)]. This paper shows that these notions are strongly
+ related. In particular, combining the two points of view
+ (constructible and polynomial) on the subject of square-free
+ conditions, it allows us to effect dramatic improvements in the
+ dynamic constructible closure programs.",
+ keywords = "axiomref"
+}
+
+@InProceedings{Dewa92,
+ author = "Dewar, Michael C.",
+ title = "Using Computer Algebra to Select Numerical Algorithms",
+ booktitle = "Proc. ISSAC 1992",
+ series = "ISSAC 1992",
+ year = "1992",
+ location = "Berkeley, CA",
+ pages = "1-8",
+ algebra =
+ "\newline\refto{domain D01AJFA d01ajfAnnaType}
+ \newline\refto{domain D01AKFA d01akfAnnaType}
+ \newline\refto{domain D01ALFA d01alfAnnaType}
+ \newline\refto{domain D01AMFA d01amfAnnaType}
+ \newline\refto{domain D01ANFA d01anfAnnaType}
+ \newline\refto{domain D01APFA d01apfAnnaType}
+ \newline\refto{domain D01AQFA d01aqfAnnaType}
+ \newline\refto{domain D01ASFA d01asfAnnaType}
+ \newline\refto{domain D01FCFA d01fcfAnnaType}
+ \newline\refto{domain D01GBFA d01gbfAnnaType}
+ \newline\refto{domain D01TRNS d01TransformFunctionType}
+ \newline\refto{domain D02BBFA d02bbfAnnaType}
+ \newline\refto{domain D02BHFA d02bhfAnnaType}
+ \newline\refto{domain D02CJFA d02cjfAnnaType}
+ \newline\refto{domain D02EJFA d02ejfAnnaType}
+ \newline\refto{domain D03EEFA d03eefAnnaType}
+ \newline\refto{domain D03FAFA d03fafAnnaType}
+ \newline\refto{domain E04DGFA e04dgfAnnaType}
+ \newline\refto{domain E04FDFA e04fdfAnnaType}
+ \newline\refto{domain E04GCFA e04gcfAnnaType}
+ \newline\refto{domain E04JAFA e04jafAnnaType}
+ \newline\refto{domain E04MBFA e04mbfAnnaType}
+ \newline\refto{domain E04NAFA e04nafAnnaType}
+ \newline\refto{domain E04UCFA e04ucfAnnaType}
+ \newline\refto{domain NIPROB NumericalIntegrationProblem}
+ \newline\refto{domain ODEPROB NumericalODEProblem}
+ \newline\refto{domain OPTPROB NumericalOptimizationProblem}
+ \newline\refto{domain PDEPROB NumericalPDEProblem}",
+ abstract =
+ "Many real-life problems require a compbination of both symbolic and
+ numerical methods for their solution. This has led to the development
+ of intgrated, interactive symbolic / numeric packages which use a
+ computer algebra system for the former and a standard subroutine
+ library for the latter. These systems may also be viewed as simplified
+ front-ends to the numerical library. To use these packages, however, a
+ user must be able to select which of the many available routines is
+ the most appropriate for his or her problem, which contrsts with the
+ ``black-box'' style interfaces available in computer algebra
+ systems. This paper describes how a computer algebra system can be
+ used to make this decision, thus providing a much-simplified and more
+ orthogonal interface.",
+ paper = "Dewa92.pdf"
+}
+
+@misc{Dewa95,
+ author = "Dewar, Mike C.",
+ title = "AXIOM and A\#: Current Status and Future Plans",
+ year = "1995",
+ link = "\url{ftp://ftp.inf.ethz.ch/org/cathode/workshops/jan95/abstracts/dewar.ps}",
+ paper = "Dewa95.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Dewa,
+ author = "Dewar, Mike",
+ title = "OpenMath: An Overview",
+ link = "\url{http://www.sigsam.org/bulletin/articles/132/paper1.pdf}",
+ paper = "Dewa.pdf",
+ keywords = "axiomref"
+}
+
+@phdthesis{Diaz06,
+ author = "Diaz, Glauco Alfredo Lopez",
+ title = "Symbolic Methods for Factoring Linear Differential Operators",
+ school = "Johannes Kepler Universitat, Linz",
+ year = "2006",
+ month = "February",
+ abstract =
+ "A survey of symbolic methods for factoring linear differential
+ operators is given. Starting from basic notions – ring of operators,
+ differential Galois theory – methods for finding rational and
+ exponential solutions that can provide first order right-hand factors
+ are considered. Subsequently several known algorithms for
+ factorization are presented. These include Singer’s eigenring
+ factorization algorithm, factorization via Newton polygons, van
+ Hoeij’s methods for local factorization, and an adapted version of
+ Pade approximation.
+
+ In addition a procedure based on pure algebraic methods for factoring
+ second order linear partial differential operators is
+ developed. Splitting an operator of this kind reduces to solving a
+ system of linear algebraic equations. Those solutions which satisfy a
+ certain different ial condition, immediately produce linear factors of
+ the operator. The method applies also to operators of third order,
+ thereby resulting in a more complicated system of equations. In
+ contrast to the second order case, differential equations must also be
+ solved, which, in particular cases, are simplified with the aid of
+ characteristic sets.
+
+ Finally, complete decomposition into linear factors of ordinary
+ differential operators of arbitrary order is discussed. A splitting
+ formula is developed, provided that a linear basis of solutions is
+ available. This theoretical representation is valuable in
+ understanding the nature of the classical Beke algorithm and its
+ variants like the algorithm LODEF by Schwarz and the Beke-Bronstein
+ algorithm.",
+ paper = "Diaz06.pdf",
+ keywords = "axiomref"
+}
+
+@article{DiBl95,
+ author = "DiBlasio, Paolo and Temperini, Marco",
+ title = "Subtyping Inheritance and Its Application in Languages for
+ Symbolic Computation Systems",
+ journal = "J. Symbolic Computation",
+ volume = "19",
+ pages = "39-63",
+ year = "1995",
+ abstract =
+ "Application of object-oriented programming techniques to design and
+ implementation of symbolic computation is investigated. We show the
+ significance of certain correctness problems, occurring in programming
+ environments based on specialization inheritance, due to use of method
+ redefinition and polymorphism. We propose a solution to these
+ problems, by defining a mechanism of subtyping inheritance and the
+ prototype of an object-oriented programming language for a symbolic
+ computation system. We devise the subtyping inheritance {\sl ESI
+ (Enhanced String Inheritance)} by lifting to programming language
+ constructs a given model of subtyping, which is established by a
+ monotonic (covariant) subtyping rule. Type safeness of language
+ instructions is proved.
+
+ The adoption of {\sl ESI} allows to model method and class
+ specialization in a natural way. The {\sl ESI} mechanism verifies the
+ type correctness of language statements by means of type checking
+ rules and preserves their correctness at run-time by a suitable method
+ lookup algorithm.",
+ paper = "DiBl95.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{DiBl97,
+ author = "DiBlasio, Paolo and Temperini, Marco",
+ title = "On subtyping in languages for symbolic computation systems",
+ booktitle = "Advances in the design of symbolic computation systems",
+ series = "Monographs in Symbolic Computation",
+ year = "1997",
+ publisher = "Springer",
+ pages = "164-178",
+ abstract =
+ "We want to define a strongly typed OOP language suitable as the
+ software development tool of a symbolic computation system, which
+ provides class structure to manage ADTs and supports multiple
+ inheritance to model specialization hierarchies. In this paper, we
+ provide the theoretical background for such a task.",
+ keywords = "axiomref"
+}
+
+@InProceedings{Dicr88,
+ author = "Dicrescenzo, C. and Duval, D.",
+ title = "Algebraic extensions and algebraic closure in Scratchpad II",
+ booktitle = "Proc. ISSAC 1988",
+ series = "ISSAC 1998",
+ year = "1998",
+ pages = "440-446",
+ isbn = "3-540-51084-2",
+ abstract =
+ "Many problems in computer algebra, as well as in high-school
+ exercises, are such that their statement only involves integers but
+ their solution involves complex numbers. For example, the complex
+ numbers $\sqrt{2}$ and $-\sqrt{2}$ appear in the solutions of
+ elementary problems in various domains.
+ \begin{itemize}
+ \item in {\bf integration}:
+ \[\int{\frac{dx}{x^2-2}} = \frac{Log(x-\sqrt{2})}{2\sqrt{2}}
+ +\frac{Log(x-(-\sqrt{2}))}{2(-\sqrt{2})}\]
+ \item in {\bf linear algebra}: the eigenvalues of the matrix
+ \[\left(\begin{array}{cc}
+ 1 & 1\\
+ 1 & -1
+ \end{array}\right) = \sqrt{2} {\rm\ and\ }-\sqrt{2}\]
+ \item in {\bf geometry}: the line $y=x$ intersects the circle
+ $y^2+x^2=1$ at the points
+ \[(\sqrt{2},\sqrt{2}) {\rm\ and\ }(-\sqrt{2},-\sqrt{2})\]
+ \end{itemize}
+ Of course, more ``complicated'' complex numbers appear in more
+ complicated examples.
+
+ But two facts have to be emphasized:
+ \begin{itemize}
+ \item in general, if a problem is stated over the integers (or over
+ the field $\mathbb{Q}$ of rational numbers), the complex numbers that
+ appear are {\sl algebraic} complex numbers, which means that they are
+ roots of some polynomial with rational coefficients, like $\sqrt{2}$
+ and $-\sqrt{2}$ are roots of $T^2-2$.
+ \item Similar problems appear with base fields different from
+ $mathbb{Q}$. For example finite fields, or fields of rational
+ functions over $\mathbb{Q}$ or over a finite field. The general
+ situation is that a given problem is stated over some ``small field''
+ $K$, and its solution is expressed in an {\sl algebraic closure}
+ $\overline{K}$ of $K$, which means that this solution involves numbers
+ which are roots of polynomials with coefficients in $K$.
+ \end{itemize}
+
+ The aim of this paper is to describe an implementation of an algebraic
+ closure domain constructor in the language Scratchpad II, simply
+ called Scratchpad below. In the first part we analyze the problem, and
+ in the second part we describe a solution based on the D5 system.",
+ keywords = "axiomref",
+ beebe = "Dicrescenzo:1989:AEA"
+}
+
+@misc{Dicr95,
+ author = "Dicrescenzo, C. and Jung, Francoise",
+ title = "COMPASS package",
+ year = "1995",
+ link = "\url{ftp://ftp.inf.ethz.ch/org/cathode/workshops/jan95/abstracts/bronstein.ps}",
+ paper = "Dicr95.pdf",
+ keywords = "axiomref"
+}
+
+@book{Dicr05,
+ author = "Dicrescenzo, C. and Duval, D.",
+ title = "Algebraic extensions and algebraic closure in Scratchpad II",
+ booktitle = "Symbolic and Algebraic Computation",
+ series = "Lecture Notes in Computer Science 358",
+ year = "2005",
+ publisher = "Springer",
+ pages = "440-446",
+ keywords = "axiomref"
+}
+
+@InProceedings{Ding94,
+ author = "Dingle, Adam and Fateman, Richard",
+ title = "Branch Cuts in Computer Algebra",
+ year = "1994",
+ booktitle = "Proc. ISSAC 1994",
+ series = "ISSAC 94",
+ link = "\url{http://www.cs.berkeley.edu/~fateman/papers/ding.ps}",
+ abstract =
+ "Many standard functions, such as the logarithms and square root
+ functions, cannot be defined continuously on the complex
+ plane. Mistaken assumptions about the properties of these functions
+ lead computer algebra systems into various conundrums. We discuss how
+ they can manipulate such functions in a useful fashion.",
+ paper = "Ding94.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Dool98,
+ author = "Dooley, Samuel S.",
+ title = "Coordinating mathematical content and presentation markup in
+ interactive mathematical documents",
+ booktitle = "Proc. ISSAC 1998",
+ series = "ISSAC 98",
+ year = "1998",
+ publisher = "ACM Press",
+ location = "Rostock, Germany",
+ pages = "13-15",
+ abstract =
+ "This paper presents a method for representing mathematical content
+ and presentation markup in interactive mathematical documents that
+ treats each view of the information on a separate and equal
+ footing. By providing extensible, overridable, default mappings from
+ content to presentation in a way that supports efficient mappings back
+ from the presentation to the underlying content, a user interface for
+ an interactive textbook has been implemented where the user interacts
+ with high-quality presentation markup that supports user operations
+ defined in terms of the mathematical content. In addition, the user
+ interface can be insulated from content-specific information, while
+ still being enabled to transfer that information to other programs for
+ computation. This method has been employed to embed interactive
+ mathematical content into the IBM techexplorer Interactive Textbook
+ for Linear Algebra. The issues involved in the implementation of the
+ interactive textbook also shed some light on the problems faced by the
+ MathML working group in representing both presentation and content for
+ mathematics for interactive web documents.",
+ keywords = "axiomref"
+}
+
+@article{Reis11,
+ author = "Dos Reis, Gabriel",
+ title = "Retargeting OpenAxiom to Poly/ML: towards an integrated proof
+ assistants and computer algebra system framework",
+ journal = "Intelligent computer mathematics (MKM 2011)",
+ year = "2011",
+ isbn = "978-3-642-22672-4",
+ link = "\url{https://www.semanticscholar.org/paper/Retargeting-OpenAxiom-to-PolyML-Towards-an-Reis-Matthews/4ce5d85ea8424ced82d}",
+ abstract =
+ "This paper presents an ongoing effort to integrate the AXIOM family
+ of computer algebra systems with Poly/ML-based proof assistants in the
+ same framework. A long-term goal is to make a large set of efficient
+ implementations of algebraic algorithms available to popular proof
+ assistants, and also to bring the power of mechanized formal
+ verification to a family of strongly typed computer algebra systems at
+ a modest cost. Our approach is based on retargeting the code generator
+ of the OpenAxiom compiler to the Poly/ML abstract machine.",
+ paper = "Reis11.pdf",
+ keywords = "axiomref"
+}
+
+@article{Reis06,
+ author = "Dos Reis, Gabriel and Stroustrup, Bjarne",
+ title = "Specifying C++ Concepts",
+ journal = "POPL",
+ publisher = "ACM",
+ year = "2006",
+ link = "\url{http://www.stroustrup.com/popl06.pdf}",
+ abstract =
+ "C++ templates are key to the design of current successful mainstream
+ libraries and systems. They are the basis of programming techniques in
+ diverse areas ranging from conventional general-purpose programming to
+ software for safety-critical embedded systems. Current work on improving
+ templates focuses on the notion of {\sl concepts} (a type system for
+ templates), which promises significantly improved error diagnostics and
+ increased expressive power such as concept-based overloading and function
+ template partial specialization. This paper presents C++ templates with
+ an emphasis on problems related to separate compilation. We consider the
+ problem of how to express concepts in a precise way that is simple enough
+ to be usable by ordinary programmers. In doing so, we expose a few
+ weaknesses of the current specification of the C++ standard library and
+ suggest a far more precise and complete specification. We also present
+ a systematic way of translating our proposed concept definitions, based
+ on use-patterns rather than function signatures, into constraint sets
+ that can serve as convenient basis for concept checking in a compiler.",
+ paper = "Reis06.pdf",
+ keywords = "axiomref"
+}
+
+@article{Reis12,
+ author = "Dos Reis, Gabriel",
+ title = "A System for Axiomatic Programming",
+ journal = "Proc. Conf. on Intelligent Computer Mathematics",
+ publisher = "Springer",
+ year = "2012",
+ link = "\url{http://www.axiomatics.org/~gdr/liz/cicm-2012.pdf}",
+ abstract = "
+ We present the design and implementation of a system for axiomatic
+ programming, and its application to mathematical software
+ construction. Key novelties include a direct support for user-defined
+ axioms establishing local equality between types, and overload
+ resolution based on equational theories and user-defined local
+ axioms. We illustrate uses of axioms, and their organization into
+ concepts, in structured generic programming as practiced in
+ computational mathematical systems.",
+ paper = "Reis12.pdf",
+ keywords = "axiomref"
+}
+
+@phdthesis{Doye97,
+ author = "Doye, Nicolas James",
+ title = "Order Sorted Computer Algebra and Coercions",
+ school = "University of Bath",
+ year = "1997",
+ abstract =
+ "Computer algebra systems are large collections of routines for solving
+ mathematical problems algorithmically, efficiently and above all,
+ symbolically. The more advanced and rigorous computer algebra systems
+ (for example, Axiom) use the concept of strong types based on
+ order-sorted algebra and category theory to ensure that operations are
+ only applied to expressions when they ``make sense''.
+
+ In cases where Axiom uses notions which are not covered by current
+ mathematics we shall present new mathematics which will allow us to
+ prove that all such cases are reducible to cases covered by the
+ current theory. On the other hand, we shall also point out all the
+ cases where Axiom deviates undesirably from the mathematical ideal.
+ Furthermore we shall propose solutions to these deviations.
+
+ Strongly typed systems (especially of mathematics) become unusable
+ unless the system can change the type in a way a user expects. We wish
+ any change expected by a user to be automated, ``natural'', and
+ unique. ``Coercions'' are normally viewed as ``natural type changing
+ maps''. This thesis shall rigorously define the word ``coercion'' in
+ the context of computer algebra systems.
+
+ We shall list some assumptions so that we may prove new results so
+ that all coercions are unique. This concept is called ``coherence''.
+
+ We shall give an algorithm for automatically creating all coercions in
+ type system which adheres to a set of assumptions. We shall prove that
+ this is an algorithm and that it always returns a coercion when one
+ exists. Finally, we present a demonstration implementation of this
+ automated coerion algorithm in Axiom.",
+ paper = "Doye97.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Doye99,
+ author = "Doye, Nicolas James",
+ title = "Automated coercion for Axiom",
+ booktitle = "Proc. ISSAC 1999",
+ pages = "229-235",
+ year = "1999",
+ isbn = "1-58113-073-2",
+ link = "\url{http://www.acm.org/citation.cfm?id=309944}",
+ paper = "Doye99.pdf",
+ keywords = "axiomref",
+ beebe = "Doye:1999:ACA"
+}
+
+@InProceedings{Domi01,
+ author = {Dom\'inguez, C\'esar; Rubio, Julio},
+ title = "Modeling Inheritance as Coercion in a Symbolic Computation System",
+ booktitle = "Proc. ISSAC 2001",
+ series = "ISSAC 2001",
+ year = "2001",
+ abstract = "
+ In this paper the analysis of the data structures used in a symbolic
+ computation system, called Kenzo, is undertaken. We deal with the
+ specification of the inheritance relationship since Kenzo is an
+ object-oriented system, written in CLOS, the Common Lisp Object
+ System. We focus on a particular case, namely the relationship between
+ simplicial sets and chain complexes, showing how the order-sorted
+ algebraic specifications formalisms can be adapted, through the
+ ``inheritance as coercion'' metaphor, in order to model this Kenzo
+ fragment.",
+ paper = "Domi01.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Drag10,
+ author = "Dragan, Laurentiu and Watt, Stephen",
+ title = "Type Specialization in Aldor",
+ booktitle = "Computer algebra in scientific computing",
+ series = "CASC 2010",
+ year = "2010",
+ location = "Tsakhadzor, Armenia",
+ pages = "73-84",
+ link = "\url{http://www.csd.uwo.ca/~watt/pub/reprints/2010-casc-specdom.pdf}",
+ abstract =
+ "Computer algebra in scientific computation squarely faces the dilemma
+ of natural mathematical expression versus efficiency. While
+ higher-order programming constructs and parametric polymorphism
+ provide a natural and expressive language for mathematical
+ abstractions, they can come at a considerable cost. We investigate how
+ deeply nested type constructions may be optimized to achieve
+ performance similar to that of hand-tuned code written in lower-level
+ languages.",
+ paper = "Drag10.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Duns99b,
+ author = "Dunstan, Martin",
+ title = "An Introduction to Aldor and its Type System",
+ year = "1999",
+ link = "\url{http://www.aldor.org/docs/reports/cfc99/aldor-cfc99.pdf}",
+ comment = "slides",
+ paper = "Duns99b.pdf"
+}
+
+@misc{Dupe95,
+ author = "Dupee, Brian J. and Davenport, James H.",
+ title = "Using Computer Algebra to Choose and Apply Numerical Routines",
+ year = "1995",
+ link = "\url{http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.33.5645}",
+ algebra =
+ "\newline\refto{domain D01AJFA d01ajfAnnaType}
+ \newline\refto{domain D01AKFA d01akfAnnaType}
+ \newline\refto{domain D01ALFA d01alfAnnaType}
+ \newline\refto{domain D01AMFA d01amfAnnaType}
+ \newline\refto{domain D01ANFA d01anfAnnaType}
+ \newline\refto{domain D01APFA d01apfAnnaType}
+ \newline\refto{domain D01AQFA d01aqfAnnaType}
+ \newline\refto{domain D01ASFA d01asfAnnaType}
+ \newline\refto{domain D01FCFA d01fcfAnnaType}
+ \newline\refto{domain D01GBFA d01gbfAnnaType}
+ \newline\refto{domain D01TRNS d01TransformFunctionType}
+ \newline\refto{domain D02BBFA d02bbfAnnaType}
+ \newline\refto{domain D02BHFA d02bhfAnnaType}
+ \newline\refto{domain D02CJFA d02cjfAnnaType}
+ \newline\refto{domain D02EJFA d02ejfAnnaType}
+ \newline\refto{domain D03EEFA d03eefAnnaType}
+ \newline\refto{domain D03FAFA d03fafAnnaType}
+ \newline\refto{domain E04DGFA e04dgfAnnaType}
+ \newline\refto{domain E04FDFA e04fdfAnnaType}
+ \newline\refto{domain E04GCFA e04gcfAnnaType}
+ \newline\refto{domain E04JAFA e04jafAnnaType}
+ \newline\refto{domain E04MBFA e04mbfAnnaType}
+ \newline\refto{domain E04NAFA e04nafAnnaType}
+ \newline\refto{domain E04UCFA e04ucfAnnaType}
+ \newline\refto{domain NIPROB NumericalIntegrationProblem}
+ \newline\refto{domain ODEPROB NumericalODEProblem}
+ \newline\refto{domain OPTPROB NumericalOptimizationProblem}
+ \newline\refto{domain PDEPROB NumericalPDEProblem}",
+ abstract =
+ "In applied mathematics, electronic and chemical engineering, the
+ modelling process can produce a number of mathematical problems which
+ require numerical solutions for which symbolic methods are either not
+ possible or not obvious. With the plethora of numerical library
+ routines for the solution of these problems often the numerical
+ analyst has to answer the question {\sl Which routine to choose?} and
+ {\sl How do I use it?}. Some analysis needs to be carried out before
+ the appropriate routine can be identifed, i.e. {\sl How stiff is this
+ ODE?} and {\sl Is this function continuous?}. It may well be the case
+ that more than one routine is applicable to the problem. So the
+ question may become {\sl Which is likely to be the best?}. Such a
+ choice may be critical for both accuracy and efficiency.
+
+ An expert system is thus required to make this choice based on the
+ results of its own analysis of the problem, call the routine and act
+ on the outcome. This may be to put the answer in a relevant form or
+ react to an apparent failure of the chosen routine and thus choose and
+ call an alternative. It should also have sufficient explanation
+ mechanisms to inform on the choice of routine and the reasons for that
+ choice. Much of this work can be achieved using computer algebra and
+ symbolic algebra packages.
+
+ This paper describes an expert system currently in prototype in terms
+ of both its object-based structure and its computational agents. Some
+ of these agents are described in detail, paying particular attention
+ to the practical aspects of their algorithms and the use of computer
+ algebra.
+
+ The {\bf axiom2} Symbolic Algebra System is used as a user interface
+ as well as the link to the NAG Foundation Library for the numerical
+ routines and the inference mechanisms for the expert system.",
+ paper = "Dupe95.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Dupe99,
+ author = "Dupee, Brian J. and Davenport, James H.",
+ title = "An Automatic Symbolic-Numeric Taylor Series ODE Solver",
+ booktitle = "Computer Algebra in Scientific Computing, CASC'99",
+ isbn = "978-3-540-66047-7",
+ pages = "37-50",
+ year = "1999",
+ link = "\url{http://people.eecs.berkeley.edu/~fateman/papers/casc99-34.pdf}",
+ comment = "Contains FORTRAN Code of Taylor Series",
+ algebra = "\newline\refto{package EXPRODE ExpressionSpaceODESolver}",
+ abstract =
+ "One of the basic techniques in every mathematician's toolkit is the
+ Taylor series representation of functions. It is of such fundamental
+ importance and it is so well understood that its use is often a first
+ choice in numerical analysis. This faith has not, unfortunately, been
+ transferred to the design of computer algorithms.
+
+ Approximation by use of Taylor series methods is inherently partly a
+ symbolic process and partly numeric> This aspect has often, with
+ reason, been regared as a major hindrance in algorithm design. Whilst
+ attempts have been made in the past to build a consistent set of
+ programs for the symbolic and numeric paradigms, these have been
+ necessarily multi-stage processes.
+
+ Using current technology it has at last become possible to integrate
+ these two concepts and build an automatic adaptive symbolic-numeric
+ algorithm within a uniform framework which can hide the internal
+ workings behind a modern interface.",
+ paper = "Dupe99.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Dupe05,
+ author = "Dupee, Brian J. and Davenport, James H.",
+ title = "An Intelligent Interface to Numerical Routines",
+ booktitle = "Design and Implementation of Symbolic Computation Systems",
+ series = "Lecture Notes in Computer Science 1128",
+ pages = "252-262",
+ publisher = "Springer",
+ year = "2005",
+ abstract =
+ "Links from Computer Algebra Systems to Numerical Libraries have been
+ increasingly made available. However, the remain, like the numerical
+ routines which comprise those libraries, difficult to use by a novice
+ and there is little help in choosing the appropriate routine for any
+ given problem, should there be a choice.
+
+ Compuer Algebra Systems use generic names for each problem area. For
+ examples, 'integrate' (or 'int') is used for integration of a
+ function, whatever method the code may use. Numeric interfaces still
+ use different names for each method together with a variety of extra
+ parameters, some of which may be optional. Ideally, we should extend
+ the generic name structure to cover numerical routines. This would
+ then, necessarily, require algorithms for making an assessment of the
+ efficacy of different methods where such a choice exists.
+
+ This paper considers the link to the NAG Fortran Library from version
+ 2.0 of Axiom and shows how we can build on this to extend and simplify
+ the interface using an expert system for choosing and using the
+ numerical routines."
+}
+
+@article{Duva92,
+ author = "Duval, Anne and Loday-Richaud, Michele",
+ title = "Kovacic's Algorithm and Its Application to Some Families
+ of Special Functions",
+ journal = "Applicable Algebra in Engineering, Communication, and Computing",
+ series = "AAECC 3",
+ pages = "211-246",
+ year = "1992",
+ publisher = "Springer-Verlag",
+ abstract =
+ "We apply the Kovacic algorithm to some families of special functions,
+ mainly the hypergeometric one and that of Heun, in order to discuss
+ the existence of closed-form solutions. We begin by giving a slightly
+ modified version of the kovacic algorithm and a sketch proof.",
+ keywords = "axiomref"
+}
+
+@inproceedings{Duva92a,
+ author = "Duval, Dominique and Jung, F.",
+ title = "Examples of problem solving using computer algebra",
+ booktitle = "Programming environments for high-level scientific problem
+ solving",
+ series = "IFIP Transactions",
+ editor = "Gaffney, Patrick W. and Houstis, Elias N.",
+ publisher = "North-Holland",
+ pages = "133-143",
+ year = "1992",
+ keywords = "axiomref",
+ beebe = "Duval:1992:EPS"
+}
+
+@misc{Duva94e,
+ author = "Duval, Dominique",
+ title = "Symbolic or algebraic computation?",
+ booktitle = "Publication du LACO",
+ year = "1995",
+ location = "Madrid Spain",
+ comment = "NAG conference",
+ keywords = "axiomref"
+}
+
+@article{Duva94d,
+ author = "Duval, Dominique and Senechaud, Pascale",
+ title = "Sketches and parametrization",
+ journal = "Theor. Comput. Sci.",
+ volume = "123",
+ number = "1",
+ pages = "117-130",
+ year = "1994",
+ abstract =
+ "The paper deals with problems about conception and design of
+ high-level computer algebra systems. Here we use a categorical
+ approach given by the notion of sketches. Sketches allow to describe
+ computation mechanisms in a syntactic way, well adapted to
+ implementation.
+
+ A computer algebra system must allow the manipulation of algebraic
+ structures, in particular, the construction of new structures from
+ known ones. In the paper we give a definition, at the sketch level, of
+ parametrization of a structure by another one.",
+ keywords = "axiomref"
+}
+
+@article{Duva95,
+ author = "Duval, Dominique",
+ title = "Dynamic evaluation and algebraic closure in Axiom",
+ comment = "Evaluation dynamique et cl\^oture alg\'ebrique en Axiom",
+ journal = "Journal of Pure and Applied Algebra",
+ volume = "99",
+ year = "1995",
+ pages = "267--295",
+ abstract =
+ "Dynamic evaluation allows to compute with algebraic numbers without
+ factorizing polynomials. It also allows to manipulate parameters in a
+ flexible and user-friendly way. The aim of this paper is the
+ following: Explain what is dynamic evaluation, with its basic notions
+ of dynamic set and splitting. Present its application to computations
+ involving algebraic numbers, which amounts to defining the dynamic
+ algebraic closure of a field. Describe the Axiom program which
+ implements this, and give a user guide for it (only this last point
+ assumes some knowledge of Axiom) Dynamic evaluation is described here
+ without any reference to sketch theory, however our presentation, less
+ rigourous, may be considered as more accessible.",
+ paper = "Duva95.pdf",
+ keywords = "axiomref",
+ beebe = "Duval:1995:DEA"
+}
+
+@mastersthesis{ElAl01,
+ author = "El-Alfy, Hazem Mohamed",
+ title = "Computer Algebra and its Applications",
+ school = "Alexandria University, Department of Engineering, Mathematics,
+ and Physics",
+ year = "2001",
+ link = "\url{http://www.umiacs.umd.edu/~helalfy/pub/mscthesis01.pdf}",
+ file = "ElAl01.pdf",
+ abstract =
+ "In the recent decades, it has been more and more realized that
+ computers are of enormous importance for numerical
+ computations. However, these powerful general-purpose machines can
+ also be used for transforming, combining and computing symbolic
+ algebraic expressions. In other words, computers can not only deal
+ with numbers, but also with abstract symbols representing mathematical
+ formulas. This fact has been realized much later and is only now
+ gaining acceptance among mathematicians and engineers. [Franz Winkler,
+ 1996].
+
+ Computer Algebra is that field of computer science and mathematics,
+ where computation is performed on symbols representing mathematical
+ objects rather than their numeric values.
+
+ This thesis attempts to present a definition of computer algebra by
+ means of a survey of its main topics, together with its major
+ application areas. The survey includes necessary algebraic basics and
+ fundamental algorithms, essential in most computer algebra problems,
+ together with some problems that rely heavily on these algorithms. The
+ set of applications, presented from a range of fields of engineering
+ and science, although very short, indicates the applied nature of
+ computer algebra systems.
+
+ A recent research area, central in most computer algebra software
+ packages and in geometric modeling, is the implicitization
+ problem. Curves and surfaces are naturally reperesented either
+ parametrically or implicitly. Both forms are important and have their
+ uses, but many design systems start from parametric
+ representations. Implicitization is the process of converting curevs
+ and surfaces from parametric form into implicit form.
+
+ We have surveyed the problem of implicitization and investigated its
+ currently available methods. Algorithms for such methods have been
+ devised, implemented and tested for practical examples. In addition, a
+ new method has been devised for curves for which a direct method is
+ not available. The new method has been called {\sl near implicitization}
+ since it relies on an approximation of the input problem. Several
+ variants of the method try to compromise between accuracy and
+ complexity of the designed algorithms.
+
+ The problem of implicitization is an active topic where research is
+ still taking place. Examples of further research points are included
+ in the conclusion",
+ keywords = "axiomref"
+}
+
+@misc{Ency16,
+ author = "Unknown",
+ title = "Encyclopedia of Mathematics",
+ link = "\url{https://www.encyclopediaofmath.org/index.php/Computer\_algebra\_package}",
+ keywords = "axiomref"
+}
+
+@article{Fakl97,
+ author = "Fakler, Winfried",
+ title = "On second order homogeneous linear differential equations with
+ Liouvillian solutions",
+ journal = "Theor. Comput. Sci.",
+ volume = "187",
+ number = "1-2",
+ pages = "27-48",
+ year = "1997",
+ abstract =
+ "We determine all minimal polynomials for second order homogeneous
+ linear differential equations with algebraic solutions decomposed into
+ invariants and we show how easily one can recover the known conditions
+ on differential Galois groups [J. Kovacic, J. Symb. Comput. 2, 3-43
+ (1986; Zbl 0603.68035), M. F. Singer and F. Ulmer,
+ J. Symb. Comput. 16, 9-36, 37-73 (1993; Zbl 0802.12004, Zbl
+ 0802.12005), F.Ulmer and J. A. Weil, J. Symb. Comput. 22, 179-200
+ (1996; Zbl 0871.12008)] using invariant theory. Applying these
+ conditions and the differential invariants of a differential equation
+ we deduce an alternative method to the algorithms given in (loc. cit.)
+ for computing Liouvillian solutions. For irreducible second order
+ equations our method determines solutions by formulas in all but three
+ cases.",
+ paper = "Fakl97.pdf",
+ keywords = "axiomref"
+}
+
+@article{Farm03,
+ author = "Farmer, William M. and von Mohrenschildt, Martin",
+ title = "An overview of a formal framework for managing mathematics",
+ journal = "Ann. Math. Artif. Intell.",
+ volume = "38",
+ number = "1-3",
+ pages = "165-191",
+ year = "2003",
+ link = "\url{https://www.emis.de/proceedings/MKM2001/farmer.ps}",
+ abstract =
+ "Mathematics is a process of creating, exploring, and connecting
+ mathematical models. This paper presents an overview of a formal
+ framework for managing the mathematics process as well as the
+ mathematical knowledge produced by the process. The central idea of
+ the framework is the notion of a biform theory which is simultaneously
+ an axiomatic theory and an algorithmic theory. Representing a
+ collection of mathematical models, a biform theory provides a formal
+ context for both deduction and computation. The framework includes
+ facilities for deriving theorems via a mixture of deduction and
+ computation, constructing sound deduction and computation rules, and
+ developing networks of biform theories linked by interpretations. The
+ framework is not tied to a specific underlying logic; indeed, it is
+ intended to be used with several background logics
+ simultaneously. Many of the ideas and mechanisms used in the framework
+ are inspired by the IMPS Interactive Mathematical Proof System and the
+ Axiom computer algebra system.",
+ paper = "Farm03.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Fate79,
+ author = "Fateman, Richard J.",
+ title = "MACSYMA's General Simplifier: Philosophy and Operation",
+ booktitle = "Proc. Macsyma Users' Conference 1979",
+ year = "1979",
+ link = "\url{http://people.eecs.berkeley.edu/~fateman/papers/simplifier.txt}",
+ abstract =
+ "Ideally the transformations performed by MACSYMA's simplification
+ program on algebraic expressions correspond to those simplifications
+ desired by each user and each program. Since it is impossible for a
+ program to intuit all users' requirements simultaneously, explicit
+ control of the simplifier is necessary to override default
+ transformations. A model of the simplification process is helpful in
+ controlling this large and complex program.
+
+ Having examined several algebraic simplification programs, it appears
+ that to date no program has been written which combines a conceptually
+ simple and useful view of simplification with a program nearly as
+ powerful as MACSYMA's. {note, 1979. not clear this would be different
+ in 2001. RJF} Rule-directed transformation schemes struggle to
+ approach the power of the varied control structures in more usual
+ program schemes [Fenichel, 68]. {note, Mathematica pushes rules
+ further. RJF}
+
+ It is our belief that a thorough grasp of the decision and data
+ structures of the MACSYMA simplifier program itself is the most direct
+ way of understanding its potential for algebraic expression
+ transformation. This is an unfortunate admission to have to make, but
+ it appears to reflect the state of the art in dealing with
+ formalizations of complex programs. Simplification is a perplexing
+ task. Because of this, we feel it behooves the ``guardians of the
+ simplifier'' to try to meet the concerned MACSYMA users part-way by
+ documenting the program as it has evolved. We hope this paper
+ continues to grow to reflect a reasonably accurate, complete, and
+ current description.
+
+ Of course Lisp program details are available to the curious, but even
+ for those without a working knowledge of the Lisp language (in which
+ the simplifier is written) we expect this paper to be of some help in
+ answering questions which arise perennially as to why MACSYMA deals
+ with some particular class of expressions in some unanticipated
+ fashion, or is inefficient in performing some set of transformations.
+ Most often difficulties such as these are accounted for by implicit
+ design decisions which are not evident from mere descriptions of what
+ is done in the anticipated and usual cases. We also hope that
+ improvements or revisions of the simplifier will benefit from the more
+ centralized treatment of issues given here. We also provide
+ additional commentary which reflects our current outlook on how
+ simplification programs should be written, and what capabilities they
+ should have.",
+ paper = "Fate79.txt",
+ keywords = "axiomref"
+}
+
+@inproceedings{Fate90,
+ author = "Fateman, Richard J.",
+ title = "Advances and trends in the design and construction of algebraic
+ manipulation systems",
+ booktitle = "Proc. ISSAC 1990",
+ publisher = "ACM",
+ pages = "60-67",
+ isbn = "0-89791-401-5",
+ year = "1990",
+ link = "\url{http://people.eecs.berkeley.edu/~fateman/papers/advances.pdf}",
+ abstract =
+ "We compare and contrast several techniques for the implementation of
+ components of an algebraic manipulation system. On one hand is the
+ mathematical-algebraic approach which chaaracterizes (for example)
+ IBM's Axiom. On the other hand is the more {\sl ad hoc} approach which
+ characterizes many other popular systems (for example, Macsyma,
+ Reduce, Maple, and Mathematica). While the algebraic approach has
+ generally positive results, careful examination suggests that there
+ are significant remaining problems, expecially in the representation
+ and manipulation of analytical, as opposed to algebraic,
+ mathematics. We describe some of these problems and some general
+ approaches for solutions.",
+ paper = "Fate90.pdf",
+ keywords = "axiomref",
+ beebe = "Fateman:1990:ATD"
+}
+
+@misc{Fate94,
+ author = "Fateman, Richard J.",
+ title = "On the Design and Construction of Algebraic Manipulation Systems",
+ link = "\url{http://www.cs.berkeley.edu/~fateman/papers/asmerev94.ps}",
+ abstract =
+ "We compare and contrast several techniques for the implementation of
+ components of an algebraic manipulation system. On one hand is the
+ mathematical-algebraic approach which characterizes (for example)
+ IBM's Axiom. On the other hand is the more {\sl ad-hoc} approach which
+ characterizes many other popular systems (for example, Macsyma,
+ Reduce, Maple, and Mathematica). While the algebraic approach has
+ generally positive results, careful examination suggests that there
+ are significant remaining problems, especially in the representation
+ and manipulation of analytical, as opposed to algebraic,
+ mathematics. We describe some of these problems and some general
+ approaches for solutions.",
+ paper = "Fate94.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Fate96,
+ author = "Fateman, Richard J.",
+ title = "A Review of Symbolic Solvers",
+ booktitle = "Proc 1996 ISSAC",
+ series = "ISSAC 96",
+ year = "1996",
+ pages = "86-94",
+ link = "\url{http://http.cs.berkeley.edu/~fateman/papers/eval.ps}",
+ abstract =
+ "``Evaluation'' of expressions and programs in a computer algebra
+ system is central to every system, but inevitably fails to provide
+ complete satisfaction. Here we explain the conflicting requirements,
+ describe some solutions from current systems, and propose alternatives
+ that might be preferable sometimes. We give examples primarily from
+ Axiom, Macsyma, Maple, Mathematica, with passing metion of a few other
+ systems.",
+ paper = "Fate96.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Fate97,
+ author = "Fateman, Richard J.",
+ title = "Network Servers for Symbolic Mathematics",
+ booktitle = "Proc. ISSAC 1997",
+ pages = "249-256",
+ year = "1997",
+ isbn = "0-89791-875-4",
+ link = "\url{http://http.cs.berkeley.edu/~fateman/papers/cas-serve.ps}",
+ abstract =
+ "We describe advantages to using network socket facilities for
+ communication and distributed computing from the perspective of
+ symbolic mathematics systems. For some applications, an easily
+ constructed Lisp server model provides a flexible portal between
+ computer algebra and other programs, and one need not use new
+ languages or new systems or write new stand-alone web-specific cgi-bin
+ applications. Such socket programs can use, if necessary, HTML as a
+ common transport encoding, but more efficient means are possible. We
+ show that, rather than distributing all information to each computer
+ algebra user's system, it makes sense to consider networking for
+ accessing tables of information maintained at one or a few
+ sites. Finally, we mention some consequences of the economic value of
+ computation.",
+ paper = "Fate97.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Fate99,
+ author = "Fateman, Richard J.",
+ title = "Symbolic mathematics system evaluators",
+ booktitle = "Proc. ISSAC 1996",
+ pages = "86-94",
+ year = "1999",
+ link = "\url{http://people.eecs.berkeley.edu/~fateman/papers/evalnew.pdf}",
+ abstract =
+ "``Evaluation'' of expressions and programs in a computer algebra
+ system is central to every system, but inevitably fails to provide
+ complete satisfaction. Here we explain the conflicting requirements,
+ describe some solutions from current systems, and propose alternatives
+ that might be preferable sometimes. We give examples primarily from
+ Axiom, Macsyma, Maple, Mathematica, with passing metion of a few other
+ systems.",
+ paper = "Fate99.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Fate99a,
+ author = "Fateman, Richard J. and Caspi, Eylon",
+ title = "Parsing TeX into Mathematics",
+ year = "1999",
+ link = "\url{http://lib.org.by/\_djvu/\_Papers/Computer\_algebra/CAS%20systems/}",
+ abstract =
+ "Communication, storage, transmission, and searching of complex
+ material has become increasingly important. Mathematical computing in
+ a distributed environment is also becoming more plausible as libraries
+ and computing facilities are connected with each other and with user
+ facilites. TeX is a well-known mathematical typesetting language, and
+ from the display perspective it might seem that it could be used for
+ communication between computer systems as well as an intermediate form
+ for the results of OCR (optical character recognition) of mathematical
+ expressions. There are flaws in this reasoning, since exchanging
+ mathematical informaiton requires a system to parse and semantically
+ ``understand'' the TeX, even if it is ``ambiguous'' notationally. A
+ program we developed can handle 43\% of 10,740 TeX formulas in a
+ well-known table of integrals. We expect that a higher success rte can
+ be achieved easily.",
+ paper = "Fate99a.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Fate00,
+ author = "Fateman, Richard J.",
+ title = "Problem solving environments and symbolic computing",
+ booktitle = "Enabling technologies for computational science",
+ publisher = "Kluwer Academic Publishers",
+ year = "2000",
+ pages = "91-102",
+ link = "\url{http://people.eecs.berkeley.edu/~fateman/papers/pse-kluwer.pdf}",
+ abstract =
+ "What role should be played by symbolic mathematical computation
+ facilities in scientific and engineering ``problem solving
+ environments''? Drawing upon standard facilities such as numerical and
+ graphical libraries, symbolic computation should be useful for: The
+ creation and manipulation of mathematical models; The production of
+ custom optimized numerical software; The solution of delicate classes
+ of mathematical problems that require handling beyond that available
+ in traditional machine-supported floating-point computation. Symbolic
+ representation and manipulation can potentially play a central
+ organizing role in PSEs since their more general object representation
+ allows a program to deal with a wider range of computational
+ issues. In particular numerical, graphical, and other processing can
+ be viewed as special cases of symbolic manipulation with interactive
+ symbolic computing providing both an organizing backbone and the
+ communication ``glue'' among otherwise dissimilar components",
+ paper = "Fate00.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Fate91,
+ author = "Fateman, Richard J.",
+ title = "A Review of Macsyma",
+ year = "1991",
+ link = "\url{http://people.eecs.berkeley.edu/~fateman/papers/mma.pdf}",
+ abstract =
+ "The Mathematica computer system is reviewed from the perspective of
+ its contributions to symbolic and algebraic computation, as well as
+ its stated goals. Design and implementation issues are discussed.",
+ paper = "Fate91.pdf",
+ keywords = "axiomref"
+}
+
+@article{Fate01,
+ author = "Fateman, Richard J.",
+ title = "A Review of Macsyma",
+ journal = "IEEE Trans. Knowl. Eng.",
+ volume = "1",
+ number = "1",
+ year = "2001",
+ link = "\url{http://people.eecs.berkeley.edu/~fateman/papers/mac82b.pdf}",
+ abstract =
+ "We review the successes and failures of the Macsyma algebraic
+ manipulation system from the point of view of one of the original
+ contributors. We provide a retrospective examination of some of the
+ controversial ideas that worked, and some that did not. We consider
+ input/output, language semantics, data types, pattern matching,
+ knowledge-adjunction, mathematical semantics, the user community,
+ and software engineering. We also comment on the porting of this
+ system to a variety of computing systems, and possible future
+ directions for algebraic manipulation system-building.",
+ paper = "Fate01.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Fate02,
+ author = "Fateman, Richard J.",
+ title = "Comparing the speed of programs for sparse polynomial
+ multiplication",
+ link = "\url{http://www.cs.berkeley.edu/~/fateman/papers/fastmult.pdf}",
+ paper = "Fate02.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Fate05,
+ author = "Fateman, Richard J.",
+ title = "An incremental approach to building a mathematical
+ expert out of software",
+ conference = "Axiom Computer Algebra Conference",
+ location = "City College of New York, CAISS project",
+ year = "2005",
+ month = "April",
+ day = "19",
+ link = "\url{http://www.cs.berkeley.edu/~fateman/papers/axiom.pdf}",
+ paper = "Fat05.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Fate05a,
+ author = "Fateman, Richard J.",
+ title = "Haddock's eyes and computer algebra systems: Some essays",
+ link = "\url{http://www.cs.berkeley.edu/~/fateman/papers/haddock.pdf}",
+ year = "2005",
+ abstract =
+ "(From {\sl Through the Looking Glass} by Lewis Caroll)
+
+ The White Knight proposes to comfort Alice by singing her a song:
+
+ ``Is it very long?'' Alice asked, for she had heard a good deal of
+ poetry that day.
+
+ ``It's long,'' said the Knight, ``but it's very, very beautiful.
+ Everybody that hears me sing it--either it brings tears into the
+ eyes, or else --
+
+ ``Or else what?'' said Alice, for the Knight had made a sudden pause.
+
+ ``Or else it doesn't, you know. The name of the song is called
+ `Haddock's Eyes'.''
+
+ ``Oh, that' the name of the song, is it?'' Alice said, trying to feel
+ interested.
+
+ ``No, you don't understand,'' the Knight said, looking a little vexed.
+
+ ``That's what the name is called. The name really is 'The Aged Aged Man'''
+
+ ``Then I ought to have said 'That's what the song is called?'' Alice
+ corrected herself.
+
+ ``No, you oughtn't: that's quite another thing! The song is called
+ `Ways and Means': but that's only what it's called, you know!''
+
+ ``Well, what is the song, then?'' said Alice, who was by this time
+ completely bewildered.
+
+ ``I was coming to that,'' the Knight said. ``The song really is
+ `A-sitting on a Gate': and the tune's my own invention.''",
+ paper = "Fate05a.pdf"
+}
+
+@misc{Fate06a,
+ author = "Fateman, Richard J.",
+ title = "Comments on Extending Macsyma with New Data Types",
+ link = "\url{http://www.cs.berkeley.edu/~/fateman/papers/addformat.pdf}",
+ abstract =
+ "Any design for a computer algebra system (CAS) naturally includes a
+ set of data layouts for symbolic or mathematical algebraic expressions
+ intended for use by built-in or user-written programs. The CAS cannot
+ build in all plausible data designs, but supports those of most
+ interest to the programmers. In such a situation it is almost
+ inevitable that some new data encoding idea will come to mind and with
+ it an interest in adding additional data forms. The motivation may be
+ for compact representation, or efficient (fast) manipulation, or for
+ other reasons such as interchange with other programs. Most CAS
+ therefore include at least one way to extend the base set of
+ operations. We comment on the kinds of extensions possible, using
+ Macsyma as an example CAS. The particular interest in Macsyma and its
+ open-source soruceforge variant ``Maxima'' is that a substantial group
+ of very-losely coupled independent researchers are approaching this
+ problem and may benefit from some guidance. Some the observations
+ apply to other CAS, even though they are not open-source.",
+ paper = "Fate06a.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Fate06b,
+ author = "Fateman, Richard J.",
+ title = "Building Algebra Systems by Overloading Lisp: Automatic
+ Differentiation",
+ link = "\url{http://www.cs.berkeley.edu/~/fateman/papers/overload-AD.pdf}",
+ year = "2006",
+ abstract =
+ "In an earlier paper we began a discussion of the use of overloaded
+ languages for support of computer algebra systems. Here we extend that
+ notion to provide a more detailed approach to Automatic
+ Differentiation or Algorithm Differention (AD).
+
+ This paper makes three points. 1. It is extremely easy to do express
+ AD by overloading in Common Lisp. 2. While the resulting program is
+ not the most efficient approach in terms of run-time, it is quite
+ small and very general. It also interacts nicely with some other kinds
+ of generic arithmetic. 3. A more efficient AD compile-time program
+ generation approach is described as well.",
+ paper = "Fate06b.pdf",
+ keywords = "axiomref"
+}
+
+@article{Faug94,
+ author = "Faug\'ere, J.C. and Gianni, P. and Lazard, D. and Mora, T.",
+ title = "Efficient Computation of Zero-dimensional Grobner Bases by
+ Change of Ordering",
+ journal = "J. Symbolic Computation",
+ issue_date = "February 1994",
+ volume = "11",
+ number = "2",
+ month = "February",
+ year = "1984",
+ link = "\url{http://www-polsys.lip6.fr/~jcf/Papers/FGLM.pdf}",
+ publisher = "Academic Press Limited",
+ algebra = "\newline\refto{package LEXTRIPK LexTriangularPackage}",
+ abstract = "
+ We present an efficient algorithm for the transformation of a
+ Grobner basis of a zero-dimensional ideal with respect to any given
+ ordering into a Grobner basis with respect to any other
+ ordering. This algorithm is polynomial in the degree of the idea. In
+ particular, the lexicographical Grobner basis can be obtained by
+ applying this algorithm after a total degree Grobner basis
+ computation: it is usually much faster to compute the basis this way
+ than with a direct application of Buchberger's algorithm.",
+ paper = "Faug94.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Faurxx,
+ author = {Davenport, James and Faure, Christ\'ele},
+ title = "Parameters in Computer Algebra",
+ abstract =
+ "One of the main strengths of computer algebra is being able to solve
+ a family of problems with one computation. In order to express not
+ only one problem but a family of problems, one introduces some symbols
+ which are in fact the parameters common to all the problems of the family.
+
+ The user must be able to understand in which way these parameters
+ affect the result when he looks at the answer. This is not the case in
+ most current Computer Algebra Systems we know because the form of the
+ answer is never explicitly conditioned by the values of the
+ parameters. We have introduced multi-valued expressions called
+ {\sl conditional expressions}, in which each potential value is associated
+ with a condition on some parameters. This is used, in particular, to
+ capture the situation in integration, where the form of the answer can
+ depend on whether certain quantities are positive, negative, or zero.",
+ keywords = "axiomref, provisos"
+}
+
+@InProceedings{Fitc93,
+ author = "Fitch (ed), John P.",
+ title = "Design and Implementation of Symbolic Computation Systems",
+ year = "1992",
+ booktitle = "Int. Symp. DISCO '92 Proceedings",
+ series = "DISCO 92",
+ publisher = "Springer-Verlag, Berlin",
+ isbn = "0-387-57272-4",
+ paper = "Fitc93.tex",
+ keywords = "axiomref"
+}
+
+@book{Flei94,
+ author = "Fleischer, J. and Grabmeier, J. and Hehl, F.W. and
+ Kuchlin, W. (eds)",
+ title = "Proc. Conf. Computer Algebra in Science and Engineering",
+ booktitle = "Computer Algebra in Science and Engineering",
+ year = "1994",
+ location = "Bielefeld, Germany",
+ publisher = "World Scientific, River Edge, NJ",
+ abstract =
+ "Systems and tools of computer algebra (Like AXIOM, Derive, FORM,
+ Mathematica, Maple, Mupad, REDUCE, Macsyma…) let us manipulate
+ extremely complex algebraic formulae symbolically on a
+ computer. Contrary to numerics these computations are exact and there
+ is no loss of accuracy. After decades of research and development,
+ these tools are now becoming as indispensable in Science and
+ Engineering as traditional number crunching already is.
+
+ The ZiF'94 workshop is amongst the first devoted specifically to
+ applications of computer algebra (CA) in Science and Engineering. The
+ book documents the state of the art in this area and serves as an
+ important reference for future work."
+}
+
+@techreport{Fort85,
+ author = "Fortenbacher, A. and Jenks, Richard and Lucks, Michael and
+ Sutor, Robert and Trager, Barry and Watt, Stephen",
+ title = "An Overview of the Scratchpad II Language and System",
+ institution = "IBM",
+ year = "1985",
+ type = "Research Report",
+ publisher = "IBM Research Computer Algebra Group",
+ keywords = "axiomref"
+}
+
+@inproceedings{Fort90,
+ author = "Fortenbacher, Albrecht",
+ title = "Efficient type inference and coercion in computer algebra",
+ booktitle = "Design and Implementation of Symbolic Computation Systems",
+ series = "Lecture Notes in Computer Science 429",
+ pages = "56-60",
+ isbn = "0-387-52531-9",
+ year = "1990",
+ abstract =
+ "Computer algebra systesm of the new generation, like SCRATCHPAD, are
+ characterized by a very rich type concept, which models the
+ relationship between mathematical domains of computation. To use these
+ systems interactively, however, the user should be freed of type
+ information. A type inference mechanism determines the appropriate
+ function to call. All known models which allow to define a semantics
+ for type inference cannot express the rich ``mathematical'' type
+ structure, so presently type inference is done heuristically. The
+ following paper defines a semantics for a subproblem therof, namely
+ coercion, which is based on rewrite rules. From this definition, an
+ efficient coercion algorithm for SCRATCHPAD is constructed using graph
+ techniques.",
+ keywords = "axiomref",
+ beebe = "Fortenbacher:1990:ETI"
+}
+
+@article{Fort05,
+ author = "Fortuna, E. and Gianni, P. and Luminati, D. and Parenti, P.",
+ title = "The adjacency graph of a real algebraic surface",
+ journal = "Appl. Algebra Eng. Commun. Comput.",
+ volume = "16",
+ number = "5",
+ pages = "271-292",
+ year = "2005",
+ link = "\url{http://eprints.biblio.unitn.it/788/1/UTM671.pdf}",
+ abstract =
+ "The paper deals with the question of recognizing the mutual positions
+ of the connected components of a non-singular real projective surface
+ $S$ in the real projective 3-space. We present an algorithm that
+ answers this question through the computation of the adjacency graph
+ of the surface; it also allows to decide whether each connected
+ component is contractible or not. The algorithm, combined with a
+ previous one returning as an output the topology of the surface,
+ computes a set of data invariant up to ambient-homeomorphism which,
+ though not sufficient to determine the pair $(\mathbb{R}\mathbb{P}^3,S)$,
+ give information about the nature of the surface as an embedded object.",
+ paper = "Fort05.pdf",
+ keywords = "axiomref"
+}
+
+@techreport{Fouc90,
+ author = "Fouche, Francois",
+ title = "Une implantation de l'algorithme de Kovacic en Scratchpad",
+ type = "technical report",
+ number = "ULP-IRMA-447-P-254",
+ year = "1990",
+ institution = {Institut de Recherche Math{\'{e}}matique Avanc{\'{e}}e''},
+ location = "Strasbourg, France",
+ keywords = "axiomref",
+ beebe = "Fouche:1990:ILK"
+}
+
+@article{Frit94,
+ author = "Fritzson, D. and Fritzson, P. and Viklund, L. and Herber, J.",
+ title = "Object-oriented mathematical modelling - applied to machine
+ elements",
+ journal = "Comput. Struct.",
+ volume = "51",
+ number = "3",
+ pages = "241-253",
+ year = "1994",
+ abstract =
+ "Machine element analysis has a goal of describing function and other
+ aspects of machine elements in a theoretical form. This paper shows
+ how ideas from object-oriented modelling can be applied to machine
+ elment analysis. The models thus obtained are both easier to
+ understand, better structured, and allow a higher degree of re-use
+ than conventional models. An object-oriented model description is
+ natural and suitable for machine element analysis. As a realistic
+ example an equational model of rolling bearings is presented. The
+ structure of the model is general, and applies to many types of
+ rolling bearings. The model and one solution require approximately
+ 200+200 equations. The model is extensible, e.g. simple submodels of
+ detailed properties can be made more complex without altering the
+ overall structure. The example model has been implemented in a
+ language of our own design. ObjectMath (Object-oriented Mathematical
+ language for scientific computing). Using ObjectMath, it is possible
+ to model classes of equation objects, to support multiple and single
+ inheritance of equations, to support composition of equations, and to
+ solve systems of equations. Algebraic transformations can conveniently
+ be done since ObjectMath models are translated into the Mathematica
+ computer algebra language. When necessary, equations can be
+ transformed int C++ code for efficient numerical solution. The re-use
+ of equations through inheritance reduced the size of the model by a
+ factor of two, compared to a direct representation of the model in the
+ Mathematica computer algebra language.",
+ paper = "Frit94.pdf",
+ keywords = "axiomref"
+}
+
+@techreport{Gall92,
+ author = "Gallopoulos, Stratis and Houstis, Elias and Rice, John",
+ title = "Future Research Directions in Problem Solving Environments for
+ Computational Science",
+ institution = "Purdue University",
+ year = "1992",
+ type = "technical report",
+ number = "CSD-TR-92-032",
+ link = "\url{http://docs.lib.purdue.edu/cgi/viewcontent.cgi?article=1953\&context=cstech}",
+ abstract =
+ "During the early 19605 some were visualizing that computers could
+ provide a powerful problem solving environment (PSE) which would
+ interact with scientists on their own terms. By the mid 1960s there
+ were many attempts underway to create these PSEs, but the early
+ 1970s almost all of these attempts had been abandoned, because the
+ technological infrastructure could not yet support PSEs in
+ computational science. The dream of the 1960s can be the reality of
+ the 1990s; high performance computers combined with better
+ understanding of computing and computational science have put PSEs
+ well within our reach.",
+ paper = "Gall92.pdf",
+ keywords = "axiomref"
+}
+
+@book{Ganz00,
+ author = "Ganzha, Victor G. and Vorozhtsov, Evgenii V. and Wester, Michael",
+ title = "An Assessment of the Efficiency of Computer Algebra Systems in
+ the Solution of Scientific Computing Problems",
+ booktitle = "Computer Algebra in Scientific Computing",
+ year = "2000",
+ isbn = "978-3-540-41040-9",
+ publisher = "Springer",
+ pages = "145-166",
+ abstract =
+ "Computer algebra systems (CASs) have become an important tool for the
+ solution of scientific computing problems. With the increasing number
+ of general purpose CASs, there is now a need for an assessment of the
+ efficiency of these systems. We discuss some peculiarities associated
+ with the analysis of CPU time efficiency in CASs, and then present
+ results from three specific systems (Maple Vr5, Mathematics 4.0 and
+ MuPAD 1.4) on a sample of intermediate size problems. These results
+ show that Maple Vr5 is generally the speediest on our
+ examples. Finally, we formulate some requirements for developing a
+ comprehensive suite for analyzing the efficiency of CASs.",
+ keywords = "axiomref"
+}
+
+@inproceedings{Gebu86,
+ author = "Gebauer, R{\"u}diger and M{\"o}ller, H. Michael",
+ title = "Buchberger's algorithm and staggered linear bases",
+ booktitle = "Proc. 1986 Symposium on Symbolic and Algebraic Computation",
+ series = "SYMSAC '86",
+ year = "1986",
+ pages = "218-221",
+ publisher = "ACM Press",
+ isbn = "0-89791-199-7",
+ doi = "http://dx.doi.org/10.1145/32439.32482",
+ keywords = "axiomref",
+ beebe = "Gebauer:1986:BAS"
+}
+
+@article{Geba88,
+ author = "Gebauer, Rudiger and Moller, H. Michael",
+ title = "On an installation of Buchberger's algorithm",
+ journal = "Journal of Symbolic Computation",
+ volume = "6",
+ number = "2-3",
+ pages = "275-286",
+ year = "1988",
+ abstract =
+ "Buchberger's algorithm calculates Groebner bases of polynomial
+ ideals. Its efficiency depends strongly on practical criteria for
+ detecting superfluous reductions. Buchberger recommends two
+ criteria. The more important one is interpreted in this paper as a
+ criterion for detecting redundant elements in a basis of a module of
+ syzygies. We present a method for obtaining a reduced, nearly minimal
+ basis of that module. The simple procedure for detecting (redundant
+ syzygies and )superfluous reductions is incorporated now in our
+ installation of Buchberger's algorithm in SCRATCHPAD II and REDUCE
+ 3.3. The paper concludes with statistics stressing the good
+ computational properties of these installations.",
+ paper = "GM88.pdf",
+ keywords = "axiomref",
+ beebe = "Gebauer:1988:IBA"
+}
+
+@book{Gedd92,
+ author = "Geddes, Keith and Czapor, O. and Stephen R. and Labahn, George",
+ title = "Algorithms For Computer Algebra",
+ year = "1992",
+ publisher = "Kluwer Academic Publishers",
+ isbn = "0-7923-9259-0",
+ month = "September",
+ abstract =
+ "Computer Algebra (CA) is the name given to the discipline of
+ algebraic, rather than numerical, computation. There are a number of
+ computer programs – Computer Algebra Systems (CASs) – available for
+ doing this. The most widely used general-purpose systems that are
+ currently available commercially are Axiom, Derive, Macsyma, Maple,
+ Mathematica and REDUCE. The discipline of computer algebra began in
+ the early 1960s and the first version of REDUCE appeared in 1968.
+
+ A large class of mathematical problems can be solved by using a CAS
+ purely interactively, guided only by the user documentation. However,
+ sophisticated use requires an understanding of the considerable amount
+ of theory behind computer algebra, which in itself is an interesting
+ area of constructive mathematics. For example, most systems provide
+ some kind of programming language that allows the user to expand or
+ modify the capabilities of the system.
+
+ This book is probably the most general introduction to the theory of
+ computer algebra that is written as a textbook that develops the
+ subject through a smooth progression of topics. It describes not only
+ the algorithms but also the mathematics that underlies them. The book
+ provides an excellent starting point for the reader new to the
+ subject, and would make an excellent text for a postgraduate or
+ advanced undergraduate course. It is probably desirable for the reader
+ to have some background in abstract algebra, algorithms and
+ programming at about second-year undergraduate level.
+
+ The book introduces the necessary mathematical background as it is
+ required for the algorithms. The authors have avoided the temptation
+ to pursue mathematics for its own sake, and it is all sharply focused
+ on the task of performing algebraic computation. The algorithms are
+ presented in a pseudo-language that resembles a cross between Maple
+ and C. They provide a good basis for actual implementations although
+ quite a lot of work would still be required in most cases. There are
+ no code examples in any actual programming language except in the
+ introduction.
+
+ The authors are all associated with the group that began the
+ development of Maple. Hence, the book reflects the approach taken by
+ Maple, but the majority of the discussion is completely independent of
+ any actual system. The authors’ experience in implementing a practical
+ CAS comes across clearly.
+
+ The book focuses on the core of computer algebra. The first chapter
+ introduces the general concept and provides a very nice historical
+ survey. The next three chapters discuss the fundamental topics – data
+ structures, representations and the basic arithmetic of integers,
+ rational numbers, multivariate polynomials and rational functions – on
+ which the rest of the book is built.
+
+ A major technique in CA involves projection onto one or more
+ homomorphic images, for which the ground ring is usually chosen to be
+ a finite field. The image solution is lifted back to the original
+ problem domain by means of the Chinese Remainder Theorem in the case
+ of multiple homomorphic images, or the Hensel (-adic or ideal-adic)
+ construction in the case of a single image. The next two chapters are
+ devoted to these techniques in a fairly general setting. The two
+ subsequent chapters specialise them to GCD computation and
+ factorisation for multivariate polynomials; the first of these
+ chapters also discusses the important but difficult topic of
+ subresultants.
+
+ The next two chapters describe the use of fraction-free Gaussian
+ elimination, resultants and Gröbner Bases for manipulation and exact
+ solution of linear and nonlinear polynomial equations. The two final
+ chapters describe ``classical'' algorithms and the more recent Risch
+ algorithm for symbolic indefinite integration, and provide an
+ introduction to differential algebra.
+
+ The book does not consider more specialised problem areas such as
+ symbolic summation, definite integration, differential equations,
+ group theory or number theory. Nor does it consider more applied
+ problem areas such as vectors, tensors, differential forms, special
+ functions, geometry or statistics, even though Maple and other CASs
+ provide facilities in all or many of these areas. It does not consider
+ questions of CA programming language design, nor any of the important
+ but non-algebraic facilities provided by current CASs such as their
+ user interfaces, numerical and graphical facilities.
+
+ This is a long book (nearly 600 pages); it is generally very well
+ presented and the three authors have merged their contributions
+ seamlessly. I noticed very few typographical errors, and none of any
+ consequence. I have only two complaints about the book. The typeface
+ is too small, particularly for the relatively large line spacing used,
+ and it is much too expensive, particularly for a book that would
+ otherwise be an excellent student text. I recommend it highly to
+ anyone who can afford it.",
+ keywords = "axiomref"
+}
+
+@inproceedings{Gian89,
+ author = "Gianni, Patrizia and Mora, T.",
+ title = "Algebraic solution of systems of polynomial equations
+ using Groebner bases.",
+ booktitle = "Applied Algebra, Algebraic Algorithms and Error-Correcting
+ Codes",
+ series = "AAECC-5",
+ pages = "247-257",
+ year = "1989",
+ isbn = "3-540-51082-6",
+ abstract =
+ "One of the most important applications of Buchberger's algorithm for
+ Groebner basis computation is the solution of systems of polynomial
+ equations (having finitely many roots), i.e. the computation of zeros
+ of 0-dimensional polynomial ideals. It is based on a relation between
+ Groebner bases w.r.t. a lexicographical ordering and elimination
+ ideals, which was discovered by Trinks.
+
+ Packages for isolation of real roots of systems of polynomial
+ equations using Groebner basis computation are currently available in
+ different computer algebra systems, including SAC-2, Reduce,
+ Scratchpad II, Maple.
+
+ In principle, Buchberger-Trinks algorithm should allow to compute
+ solutions of such systems in the algebraic closure of the coefficient
+ field $k$ (usually the rational numbers), in the sense that it is
+ possible to represent explicitly a finite extension of $k$ containing
+ all solutions and to express the roots in this field.
+
+ However, this requires several factorisations of polynomials over a
+ tower of algebraic extensions of $k$, which is usually very costly, so
+ that the resulting algorithm is not very feasible and, as far as we
+ know, no implementation is available.
+
+ The results of [GT2] on primary decomposition of ideals include a
+ thorough study on the structure of Groebner bases for 0-dimensional
+ ideals; in particular, the paper shows, that after a ``generic''
+ linear change of coordinates, the roots of a system of polynomial
+ equations can be expressed in a simple extension of $k$. Therefore, in
+ this case, no factorisation of polynomials over towers of algebraic
+ extensions is needed.
+
+ However performing a change of coordinates has the undesirable effects
+ of introducing dense polynomials and of increasing the size of
+ coefficients.
+
+ The problem then arises of producing strategies to compute Groebner
+ bases for (0-dimensional) ideals, which at least are able to control
+ the influence of these side-effects: two such strategies are presented
+ in this paper, together with the application to the present problem of
+ an algorithm by Gianni that computes the radical of a 0-dimensional
+ ideal after a ``generic'' change of coordinates.
+
+ A different approach, based on her ``splitting algorithm'', to compute
+ solutions of systems of polynomial equations without the need of
+ polynomial factorisations has been proposed by D. Duval; also her
+ algorithm should be simplified by a ``generic'' change of coordinates.
+
+ The algorithms discussed in this paper are implemented in SCRATCHPAD II.
+
+ In the first section we recall some well-known properties of Groebner
+ bases and properties on the structure of Groebner bases of
+ zero-dimensional ideals from [GT2]; in the second section we recall
+ the Groebner basis algorithm for solving systems of algebraic
+ equations.
+
+ The original results are contained in Sections 3 to 5; in Section 3 we
+ take advantage of the obvious fact that density can be controlled by
+ performing ``small'' changes of coordinates: we show that such
+ approach is possible during a Groebner basis computation, in such a
+ way that computations done before a change of coordinates are valid
+ also after it; in Section 4 we propose a ``linear algebra'' approach
+ to obtain the Groebner basis w.r.t the lexicographical ordering from
+ the one w.r.t the total-degree ordering; in Section 5, we present a
+ zero-dimensional radical algorithm and show how to apply it to the
+ present problem.",
+ paper = "Gian89.pdf",
+ keywords = "axiomref",
+ beebe = "Gianni:1989:ASS"
+}
+
+@inproceedings{Gilx92,
+ author = "Gil, Isabelle",
+ title = "Computation of the Jordan canonical form of a square matrix
+ (using the Axiom programming language)",
+ booktitle = "Proc ISSAC 1992",
+ series = "ISSAC '92",
+ year = "1992",
+ publisher = "ACM",
+ pages = "138-145",
+ isbn = "0-89791-489-9 (soft cover), 0-89791-490-2 (hard cover)",
+ abstract =
+ "Presents an algorithm for computing: the Jordan form of a square
+ matrix with coefficients in a field K using the computer algebra
+ system Axiom. This system presents the advantage of allowing generic
+ programming. That is to say, the algorithm can first be implemented
+ for matrices with rational coefficients and then generalized to
+ matrices with coefficients in any field. Therefore the author
+ presents the general method which is essentially based on the use of
+ the Frobenius form of a matrix in order to compute its Jordan form;
+ and then restricts attention to matrices with rational
+ coefficients. On the one hand the author streamlines the algorithm
+ froben which computes the Frobenius form of a matrix, and on the other
+ she examines in some detail the transformation from the Frobenius form
+ to the Jordan form, and gives the so called algorithm Jordform. The
+ author studies in particular, the complexity of this algorithm and
+ proves that it is polynomial when the coefficients of the matrix are
+ rational. Finally the author gives some experiments and a conclusion.",
+ keywords = "axiomref",
+ beebe = "Gil:1992:CJC"
+}
+
+@phdthesis{Gome92,
+ author = "Gomez-Dias, Teresa",
+ title = {Quelques applications de l`\'evaluation dynamique},
+ school = "L'Universite de Limoges",
+ year = "1992",
+ month = "March",
+ paper = "Gome92.pdf"
+}
+
+@misc{Gome94,
+ author = "Gomez-Diaz, Teresa",
+ title = "The Possible Solutions to the Control Problem",
+ year = "1994"
+}
+
+@article{Gome96,
+ author = "Gomez-Diaz, Theresa",
+ title = "Examples of using dynamic constructible closure",
+ journal = "Math. Comput. Simul.",
+ volume = "42",
+ number = "4-6",
+ pages = "375-383",
+ year = "1996",
+ abstract =
+ "We present here some examples of using the ``Dynamic Constructible
+ Closure'' program, which performs automatic case distinctions in
+ computations involving parameters over a base field ``K''. This
+ program is an application of the ``Dynamic Evaluation'' principle
+ which generalizes tradional evaluation and was first used to deal with
+ algebraic numbers.",
+ keywords = "axiomref"
+}
+
+@misc{Gonn05,
+ author = "Gonnet, Gaston and Haigh, Thomas",
+ title = "An Interview with Gaston Gonnet",
+ year = "2005",
+ publisher = "SIAM",
+ link = "\url{http://history.siam.org/pdfs2/Gonnet_final.pdf}",
+ abstract =
+ "Born in Uruguay, Gonnet was first exposed to computers while working
+ for IBM in Montevideo as a young man. This led him to a position at
+ the university computer center, and in turn to an undergraduate degree
+ in computer science in 1973. In 1974, following a military coup, he
+ left for graduate studies in computer science at the University of
+ Waterloo. Gonnet earned an M.Sc. and a Ph.D. in just two and a half
+ years, writing a thesis on the analysis of search algorithms under the
+ supervision of Alan George. After one year teaching in Rio de Janeiro
+ he returned to Waterloo, as a faculty member.
+
+ In 1980, Gonnet began work with a group including Morven Gentleman and
+ Keith Geddes to produce an efficient interactive computer algebra
+ system able to work well on smaller computers: Maple. Gonnet discusses
+ in great detail the goals and organization of the Maple project, its
+ technical characteristics, the Maple language and kernel, the Maple
+ library, sources of funding, the contributions of the various team
+ members, and the evolution of the system over time. He compares the
+ resulting system to MACSYMA, Mathematica, Reduce, Scratchpad and other
+ systems. Gonnet also examines the licensing and distribution of Maple
+ and the project’s relations to its users. Maple was initially used for
+ teaching purposes within the university, but soon found users in other
+ institutions. From 1984, distribution was handled by Watcom, a company
+ associated with the university, and 1988, Gonnet and Geddes created a
+ new company, Waterloo Maple Software, Inc. to further commercialize
+ Maple, which established itself as the leading commercial computer
+ algebra system. However, during the mid-1990s the company ran into
+ trouble and disagreements with his colleagues caused Gonnet to
+ withdraw from managerial involvement. Since then, he feels that Maple
+ has lost its battle with Mathematica. Gonnet also discusses Maple’s
+ relation to Matlab and its creator, Cleve Moler.
+
+ From 1984 onward with Frank Tompa, Tim Bray, and other Waterloo
+ colleagues, Gonnet worked on the production of computer software to
+ support the creation of the second edition of the Oxford English
+ Dictionary. This led to the creation of another startup company, Open
+ Text, producing software for the searching and indexing of textual
+ information within large corporations. Gonnet explains his role in the
+ firm, including his departure and his feeling that it made a strategic
+ blunder by not exploiting its early lead in Internet search.
+
+ Gonnet continued to work in a number of areas of computer science,
+ including analysis of algorithms. In 1990, Gonnet moved from Waterloo
+ to ETH in Switzerland. Among his projects since then have been Darwin,
+ a bioinformatics system for the manipulation of genetic data, and
+ leadership of the OpenMath project to produce a standard
+ representation for mathematical objects. He has been involved in
+ several further startup companies, including Aruna, a relational
+ database company focused on business intelligence applications.",
+ keywords = "axiomref",
+ paper = "Gonn05.pdf"
+}
+
+@inproceedings{Good91,
+ author = "Goodwin, B. M. and Buonopane, R. A. and Lee, A.",
+ title = "Using MathCAD in teaching material and energy balance concepts",
+ booktitle = "Challenges of a Changing World",
+ comment = "Proc. 1991 Ann. Conf., Amer. Soc. for Engineering Education",
+ pages = "345-349",
+ year = "1991",
+ abstract =
+ "We show how PC-based applications software, specifically MathCAD, is
+ used in the teaching of material and energy balance concepts. MathCAD
+ is a microcomputer software package which acts as a mathematical
+ scratchpad. It has proven to be a very useful instructional tool in
+ introductory chemical engineering courses. MathCAD solutions to
+ typical course problems are presented.",
+ keywords = "axiomref",
+ beebe = "Goodwin:1991:UMT"
+}
+
+@inproceedings{Good93,
+ author = "Goodloe, A. and Loustaunau, Philippe",
+ title = "An abstract data type development of graded rings",
+ booktitle = "Design and Implementation of Symbolic Computation Systems",
+ series = "Lecture Notes in Computer Science 721",
+ pages = "193-202",
+ isbn = "0-387-57272-4 (New York), 3-540-57272-4 (Berlin)",
+ year = "1993",
+ abstract =
+ "Recently new computer algebra systems such as Scratchpad and Weyl have
+ been developed with built in mechanisms for expressing abstract data types.
+ These systems are object oriented in that they incorporate multiple
+ inheritance and polymorphic types. Davenport and Trager have build much
+ of the framework for basic commutative algebra in Scratchpad II
+ utilizing its rich set of abstraction mechanisms. Davenport and Trager
+ concentrated on developing factorization algorithms on domains which
+ were abstract data types.
+
+ We are taking a similar approach to the development of algorithms for
+ computing in graded rings. The purpose of this paper is to develop the
+ tools required to compute with polynomials with coefficients in a graded
+ ring $R$. We focus on graded rings $R$ which are polynomial rings graded
+ by a monoid, and we allow partial orders on the monomials. The ideas
+ presented here can be applied to more general graded rings $R$, such as
+ associated graded rings to filtered rings, as long as certain computational
+ ``requirements'' are satisfied",
+ keywords = "axiomref",
+ beebe = "Goodloe:1993:ADT"
+}
+
+@misc{Grab98,
+ author = "Grabe, Hans-Gert",
+ title = "About the Polynomial System Solve Facility of Axiom, Macsyma,
+ Maple Mathematica, MuPAD, and Reduce",
+ link = "\url{https://www.informatik.uni-leipzig.de/~graebe/ComputerAlgebra/Publications/WesterBook.pdf}",
+ abstract =
+ "We report on some experiences with the general purpose Computer
+ Algebra Systems (CAS) Axiom, Macsyma, Maple, Mathematica, MuPAD, and
+ Reduce solving systems of polynomial equations and the way they
+ present their solutions. This snapshot (taken in the spring of 1996)
+ of the current power of the different systems in a special area
+ concentrates on both CPU-times and the quality of the output.",
+ paper = "Grab98.pdf",
+ keywords = "axiomref"
+}
+
+@InProceedings{Grab02,
+ author = "Grabe, Hans-Gert",
+ title = "The SymbolicData Benchmark Problems Collection of Polynomial
+ Systems",
+ booktitle = "Workshop on Under- and Overdetermined Systems of Algebraic or
+ Differential Equations",
+ location = "Karlsruhe, Germany",
+ pages = "57-76",
+ year = "2002",
+ link = "\url{http://symbolicdata.org/Papers/karlsruhe-02.pdf}",
+ paper = "Grab02.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Grab06,
+ author = "Grabe, Hans-Gert",
+ title = "The Groebner Factorizer and Polynomial System Solving",
+ year = "2006",
+ report = "Special Semester on Groebner Bases",
+ location = "Linz",
+ link = "\url{https://www.ricam.oeaw.ac.at/specsem/srs/groeb/download/06\_02\_Solver.pdf}",
+ abstract =
+ "Let $S := k[x_1,\ldots, x_n]$ be the polynomial ring in the
+ variables $x_1,\ldots,x_n$ over the field $k$ and
+ $B := \{f_1,\ldots,f_m\} \subset S$
+ be a finite system of polynomials. Denote by $I(B)$ the
+ ideal generated by these polynomials. One of the major tasks of
+ constructive commutative algebra is the derivation of information
+ about the structure of
+ \[V(B):=\{a \in K^n : \forall f \in B{\rm\ such\ that\ }f(a)=0\}\]
+ the set of common zeroes of the system $B$ over an
+ algebraically closed extension $K$ of $k$. Splitting the system into
+ smaller ones, solving them separately, and patching all solutions
+ together is often a good guess for a quick solution of even highly
+ nontrivial problems. This can be done by several techniques, e.g.,
+ characteristic sets, resultants, the Groebner factorizer or some ad
+ hoc methods. Of course, such a strategy makes sense only for problems
+ that really will split, i.e., for reducible varieties of
+ solutions. Surprisingly often, problems coming from 11real life''
+ fulfill this condition.
+
+ Among the methods to split polynomial systems into smaller pieces
+ probably the Groebner factor- izer method attracted the most
+ theoretical attention, see Czapor ([4, 5]), Davenport ([6]), Melenk, M
+ ̈oller and Neun ([16, 17]) and Gr ̈abe ([13, 14]). General purpose
+ Computer Algebra Systems (CAS) are well suited for such an approach,
+ since they make available both a (more or less) well tuned
+ implementation of the classical Groebner algorithm and an effective
+ multivariate polynomial factorizer.
+
+ Furthermore it turned out that the Groebner factorizer is not only a
+ good heuristic approach for splitting, but its output is also usually
+ a collection of almost prime components. Their description allows a
+ much deeper understanding of the structure of the set of zeroes
+ compared to the result of a sole Groebner basis computation.
+
+ Of course, for special purposes a general CAS as a multipurpose
+ mathematical assistant can’t offer the same power as specialized
+ software with efficiently implemented and well adapted algorithms and
+ data types. For polynomial system solving, such specialized software
+ has to implement two algorithmically complex tasks, solving and
+ splitting, and until recently none of the specialized systems (as
+ e.g., GB, Macaulay, Singular, CoCoA, etc.) did both
+ efficiently. Meanwhile, being very efficient computing (classical)
+ Groebner bases, development efforts are also directed, not only
+ for performance reasons, towards a better inclusion of factorization
+ into such specialized systems. Needless to remark that it needs some
+ skill to force a special system to answer questions and the user will
+ probably first try his ``home system'' for an answer. Thus the
+ polynomial systems solving facility of the different CAS should behave
+ especially well on such polynomial systems that are hard enough not to
+ be done by hand, but not really hard to require special efforts. It
+ should invoke a convenient interface to get the solutions in a form
+ that is (correct and) well suited for further analysis in the familiar
+ environment of the given CAS as the personal mathematical assistant.",
+ paper = "Grab06.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Grab91,
+ author = "Grabmeier, Johannes and Huber, K. and Krieger, U.",
+ title = "Das ComputeralgebraSystem AXIOM bei kryptologischen und
+ verkehrstheoretischen Untersuchungen des Forschunginstituts
+ der Deutschen Bundespost TELEKOM'",
+ type = "technical report",
+ number = "TR 75.91.20",
+ location = "Heidelberg, Germany",
+ year = "1991",
+ keywords = "axiomref",
+ beebe = "Grabmeier:1991:CSA"
+}
+
+@article{Grab91b,
+ author = "Grabmeier, Johannes",
+ title = "Axiom, ein Computeralgebrasystem mit abstrakten Datentypen",
+ journal = "mathPAD",
+ volume = "1",
+ number = "3",
+ pages = "13-15",
+ year = "1991",
+ paper = "Grab91b.pdf",
+ keywords = "axiomref"
+}
+
+@book{Gree01,
+ author = "Green, Edward L.",
+ title = "Symbolic Computation: Solving Equations in Algebra, Geometry, and
+ Engineering",
+ booktitle = "Proc. AMS-IMS-SIAM Joint Summer Research Conference on Symbolic
+ Computation",
+ volume = "232",
+ publisher = "American Mathematical Society",
+ year = "2001",
+ abstract =
+ "This volume contains papers related to the research conference,
+ ``Symbolic Computation: Solving Equations in Algebra, Analysis, and
+ Engineering,'' held at Mount Holyoke College (MA). It provides a broad
+ range of active research areas in symbolic computation as it applies
+ to the solution of polynomial systems. The conference brought together
+ pure and applied mathematicians, computer scientists, and engineers,
+ who use symbolic computation to solve systems of equations or who
+ develop the theoretical background and tools needed for this
+ purpose. Within this general framework, the conference focused on
+ several themes: systems of polynomials, systems of differential
+ equations, noncommutative systems, and applications.",
+ keywords = "axiomref"
+}
+
+@InProceedings{Grie71,
+ author = "Griesmer, James H. and Jenks, Richard D.",
+ title = "SCRATCHPAD/1 -- an interactive facility for symbolic mathematics",
+ booktitle = "Proc. second ACM Symposium on Symbolic and Algebraic
+ Manipulation",
+ series = "SYMSAC 71",
+ year = "1971",
+ pages = "42--58",
+ doi = "http://dx.doi.org/10.1145806266",
+ link = "\url{http://delivery.acm.org/10.1145/810000/806266/p42-griesmer.pdf}",
+ abstract = "
+ The SCRATCHPAD/1 system is designed to provide an interactive symbolic
+ computational facility for the mathematician user. The system features
+ a user language designed to capture the style and succinctness of
+ mathematical notation, together with a facility for conveniently
+ introducing new notations into the language. A comprehensive system
+ library incorporates symbolic capabilities provided by such systems as
+ SIN, MATHLAB, and REDUCE.",
+ paper = "Grie71.pdf",
+ keywords = "axiomref",
+ beebe = "Griesmer:1971:SIF"
+}
+
+@techreport{Grie72a,
+ author = "Griesmer, James H. and Jenks, Richard D.",
+ title = "Experience with an online symbolic math system SCRATCHPAD",
+ institution = "IBM",
+ year = "1972",
+ isbn = "0-903796-02-3",
+ keywords = "axiomref",
+ beebe = "Griesmer:1972:EOSb"
+}
+
+@article{Grie72,
+ author = "Griesmer, James H. and Jenks, Richard D.",
+ title = "SCRATCHPAD: A capsule view",
+ journal = "ACM SIGPLAN Notices",
+ volume = "7",
+ number = "10",
+ pages = "93-102",
+ year = "1972",
+ comment = "Proc. Symp. Two-dimensional man-machine communications",
+ doi = "http://dx.doi.org/10.1145807019",
+ abstract =
+ "SCRATCHPAD is an interactive system for algebraic manipulation
+ available under the CP/CMS time-sharing system at Yorktown Heights. It
+ features an extensible declarative language for the interactive
+ formulation of symbolic computations. The system is a large and
+ complex body of LISP programs incorporating significant portions of
+ other symbolic systems. Here we present a capsule view of SCRATCHPAD,
+ its language and its capabilities. This is followed by an example
+ which illustrates its use in an application involving the solution of
+ an integral equation.",
+ keywords = "axiomref",
+ beebe = "Griesmer:1972:SCV"
+}
+
+@article{Grie74,
+ author = "Griesmer, James H. and Jenks, Richard D.",
+ title = "A solution to problem \#4: the lie transform",
+ journal = "SIGSAM Bulletin",
+ volume = "8",
+ number = "4",
+ pages = "12-13",
+ year = "1974",
+ abstract =
+ "The following SCRATCHPAD conversation for carrying out the Lie
+ Transform computation represents a slight modification of one written
+ by Dr. David Barton, when he was a summer visitor during 1972 at the
+ Watson Research Center.",
+ keywords = "axiomref"
+}
+
+@techreport{Grie75,
+ author = "Griesmer, James H. and Jenks, Richard D. and Yun, David Y.Y",
+ title = "SCRATCHPAD User's Manual",
+ institution = "IBM",
+ year = "1975",
+ type = "Research Report",
+ number = "RA70",
+ keywords = "axiomref"
+}
+
+@article{Grie75a,
+ author = "Griesmer, James H. and Jenks, Richard D. and Yun, David Y.Y.",
+ title = "A SCRATCHPAD solution to problem \#7",
+ journal = "SIGSAM",
+ volume = "9",
+ number = "3",
+ pages = "13-17",
+ year = "1975"
+}
+
+@article{Grie75b,
+ author = "Griesmer, James H. and Jenks, Richard D. and Yun, David Y.Y.",
+ title = "A FORMAT statement in SCRATCHPAD",
+ journal = "SIGSAM",
+ volume = "9",
+ number = "3",
+ pages = "24-25",
+ year = "1975",
+ abstract =
+ "Algebraic manipulation covers branches of software, particularly list
+ processing, mathematics, notably logic and number theory, and
+ applications largely in physics. The lectures will deal with all of these
+ to a varying extent.",
+ keywords = "axiomref"
+}
+
+@article{Grie79,
+ author = "Griesmer, James H.",
+ title = "The state of symbolic computation",
+ journal = "SIGSAM Bulletin",
+ volume = "13",
+ number = "3",
+ pages = "25-28",
+ year = "1979",
+ keywwords = "axiomref"
+}
+
+@article{Grun94,
+ author = "Gruntz, Dominik and Monagan, Michael B.",
+ title = "Introduction to Gauss",
+ journal = "SIGSAM Bulletin",
+ volume = "28",
+ number = "3",
+ pages = "3-19",
+ year = "1994",
+ link = "\url{http://ftp.cecm.sfu.ca/personal/monaganm/papers/Gauss.pdf}",
+ abstract =
+ "The Gauss package offers Maple users a new approach to programming
+ based on the idea of parameterized types (domains) which is central to
+ the AXIOM system. This approach to programming is now regarded by many
+ as the right way to go in computer algebra systems design. In this
+ article, we describe how Gauss is designed and show examples of usage.
+ We end with some comments about how Gauss is being used in Maple.",
+ paper = "Grun94.pdf",
+ keywords = "axiomref",
+ beebe = "Gruntz:1994:IG"
+}
+
+@phdthesis{Grun96,
+ author = "Gruntz, Dominik",
+ title = "On Computing Limits in a Symbolic Manipulation System",
+ school = "Swiss Federal Institute of Technology Zurich",
+ year = "1996",
+ link = "\url{http://www.cybertester.com/data/gruntz.pdf}",
+ abstract = "
+ This thesis presents an algorithm for computing (one-sided) limits
+ within a symbolic manipulation system. Computing limtis is an
+ important facility, as limits are used both by other functions such as
+ the definite integrator and to get directly some qualitative
+ information about a given function.
+
+ The algorithm we present is very compact, easy to understand and easy
+ to implement. It overcomes the cancellation problem other algorithms
+ suffer from. These goals were achieved using a uniform method, namely
+ by expanding the whole function into a series in terms of its most
+ rapidly varying subexpression instead of a recursive bottom up
+ expansion of the function. In the latter approach exact error terms
+ have to be kept with each approximation in order to resolve the
+ cancellation problem, and this may lead to an intermediate expression
+ swell. Our algorithm avoids this problem and is thus suited to be
+ implemented in a symbolic manipulation system.",
+ paper = "Grun96.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Gute16,
+ author = "Gutenberg Self-Publishing Press",
+ title = "OpenAxiom",
+ link = "\url{http://self.gutenberg.org/articles/openaxiom}",
+ year = "2016",
+ keywords = "axiomref"
+}
+
+@article{Harr98,
+ author = "Harrison, J. and Thery, L.",
+ title = "A Skeptic's approach to combining HOL and Maple",
+ journal = "J. Autom. Reasoning",
+ volume = "21",
+ number = "3",
+ pages = "279-294",
+ year = "1998",
+ link = "\url{http://www.cl.cam.ac.uk/~jrh13/papers/cas.ps.gz}",
+ abstract =
+ "We contrast theorem provers and computer algebra systems, pointing
+ out the advantages and disadvantages of each, and suggest a simple way
+ to achieve a synthesis of some of the best features of both. Our
+ method is based on the systematic separation of search for a solution
+ and checking the solution, using a physical connection between
+ systems. We describe the separation of proof search and checking in
+ some detail, relating it to proof planning and to the complexity class
+ NP, and discuss different ways of exploiting a physical link between
+ systems. Finally, the method is illustrated by some concrete examples
+ of computer algebra results proved formally in the HOL theorem prover
+ with the aid of Maple.",
+ paper = "Harr98.pdf",
+ keywords = "axiomref"
+}
+
+@misc{Harr12,
+ author = "Harriss, Edmund and Daly, Timothy",
+ title = "Have we ever lost mathematics?",
+ link = "\url{https://maxwelldemon.com/2012/05/09/have-we-ever-lost-mathematics/}",
+ year = "2012",
+ keywords = "axiomref"
+}
+
+@inproceedings{Kead93a,
+ author = "Keady, G. and Richardson, M.G.",
+ title = "An application of IRENA to systems of nonlinear equations arising
+ in equilibrium flows in networks",
+ booktitle = "Proc. ISSAC 1993",
+ series = "ISSAC '93",
+ year = "1993",
+ abstract =
+ "IRENA - an $I$nterface from $RE$DUCE to $NA$G - runs under the REDUCE
+ Computer Algebra (CA) system and provides an interactive front end to
+ the NAG Fortran Library.
+
+ Here IRENA is tested on a problem closer to an engineering problem
+ than previously publised examples. We also illustrate the use of the
+ {\tt codeonly} switch, which is relevant to larger scale problems. We
+ describe progress on an issue raised in the 'Future Developments'
+ section in our {\sl SIGSAM Bulletin} article [2]: the progress improves
+ the practical effectiveness of IRENA.",
+ paper = "Kead93a.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Hawk95,
+ author = "Hawkes, Evatt and Keady, Grant",
+ title = "Two more links to NAG numerics involving CA systems",
+ booktitle = "IMACS Applied Computer Algebra Conference",
+ location = "University of New Mexico",
+ year = "1995",
+ algebra =
+ "\newline\refto{domain ASP1 Asp1}
+ \newline\refto{domain ASP10 Asp10}
+ \newline\refto{domain ASP12 Asp12}
+ \newline\refto{domain ASP19 Asp19}
+ \newline\refto{domain ASP20 Asp20}
+ \newline\refto{domain ASP24 Asp24}
+ \newline\refto{domain ASP27 Asp27}
+ \newline\refto{domain ASP28 Asp28}
+ \newline\refto{domain ASP29 Asp29}
+ \newline\refto{domain ASP30 Asp30}
+ \newline\refto{domain ASP31 Asp31}
+ \newline\refto{domain ASP33 Asp33}
+ \newline\refto{domain ASP34 Asp34}
+ \newline\refto{domain ASP35 Asp35}
+ \newline\refto{domain ASP4 Asp4}
+ \newline\refto{domain ASP41 Asp41}
+ \newline\refto{domain ASP42 Asp42}
+ \newline\refto{domain ASP49 Asp49}
+ \newline\refto{domain ASP50 Asp50}
+ \newline\refto{domain ASP55 Asp55}
+ \newline\refto{domain ASP6 Asp6}
+ \newline\refto{domain ASP7 Asp7}
+ \newline\refto{domain ASP73 Asp73}
+ \newline\refto{domain ASP74 Asp74}
+ \newline\refto{domain ASP77 Asp77}
+ \newline\refto{domain ASP78 Asp78}
+ \newline\refto{domain ASP8 Asp8}
+ \newline\refto{domain ASP80 Asp80}
+ \newline\refto{domain ASP9 Asp9}",
+ abstract =
+ "The 'more' in the title is because this paper is a sequel to papers
+ by Keving Broughan, [BKRRD,BK]. For some years GK has had interests in
+ (i) interactive front-ends to numeric computation, such as the
+ NAG/IMSL library computation, and (ii) Fortran code generation for
+ Argument SubPrograms (ASPs), such as those neede by some NAG/IMSL
+ routines. Demonstrations of three links to the NAG library are
+ described in [BKRRD]. A description of a link to NAG from Macsyma
+ which was mentioned, but not in a sufficiently advanced state to
+ demonstrate in early 1991, is given in [BK]. The situation at the end
+ of 1991 was that there were links to NAG involving each of Macsyma,
+ REDUCE and Mathematica. The links are called Naglink, IRENA and
+ InterCall, respectively. The principal authors of IRENA are Mike Dewar
+ and Mike Richardson. InterCall is not specific to the NAG library;
+ indeed InterCall is used with calls to IMSL and to elsewhere at the
+ conference venue, the University of New Mexico.
+
+ The two futher links to NAG library treated in this paper are AXIOM2.0
+ and genmex/ESC, genmex allows calls to NAG from Matlab. genmex can be
+ regarded as similar to InterCall: genmes uses Matlab's mex files in a
+ similar way to InterCall's use of Mathematica's MathLink. Again genmex
+ is not specific to the NAG library. Mike Dewar is an author both of
+ IRENA and the AXIOM2.0 link to the NAG library: see [D] foe discussion
+ of the differences between the IRENA project and the AXIOM-NAG link
+ project.",
+ paper = "Hawk95.pdf",
+ keywords = "axiomref"
+}
+
+@inproceedings{Hear80,
+ author = "Hearn, Anthony C.",
+ title = "Symbolic Computation and its Application to High Energy Physics",
+ booktitle = "Proc. 1980 CERN School of Computing",
+ pages = "390-406",
+ year = "1980",
+ link = "\url{http://www.iaea.org/inis/collection/NCLCollectionStore/\_Public/12/631/12631585.pdf}",
+ abstract =
+ "It is clear that we are in the middle of an electronic revolution
+ whose effect will be as profound as the in dustrial revolution. The
+ continuing advances in computing technology will provide us with
+ devices which will make present day computers appear primitive. In
+ this environment, the algebraic and other non-numerical capabilities
+ of such devices will become increasingly important. These lectures
+ will review the present state of the field of algebraic computation
+ and its potential for problem solving in high energy physics and
+ related areas. We shall begin with a brief description of the
+ available systems and examine the data objects which they consider.
+ As an example of the facilities which these systems can offer, we
+ shall then consider the problem of analytic integration, since this •
+ is so fundamental to many of the calculational techniques used by high
+ energy physicists. Finally, we shall study the implications which the
+ current developments in hardware technology hold for scientific
+ problem solving.",
+ paper = "Hear80.pdf"
+}
+
+@article{Hear95,
+ author = "Hearn, Anthony C. and Eberhard, Schrufer",
+ title = "A computer algebra system based on order-sorted algebra",
+ journal = "J. Symbolic Computing",
+ volume = "19",
+ number = "1-3",
+ pages = "65-77",
+ year = "1995",
+ abstract =
+ "This paper presents the prototype design of an algebraic computation
+ system that manipulates algebraic quantities as generic objects using
+ order-sorted algebra as the underlying model. The resulting programs
+ have a form that is closely related to the algorithmic description of
+ a problem, but with the security of full type checking in a compact,
+ natural style.",
+ paper = "Hear95.pdf",
+ keywords = "axiomref"
+}
+
+@book{Heck93,
+ author = "Heck, Andre",
+ title = "Introduction to Maple",
+ year = "1993",
+ publisher = "Springer-Verlag",
+ abstract =
+ "This is an introductory book on one of the most powerful computer
+ algebra systems, viz, Maple: The primary emphasis in this book is on
+ learning those things that can be done with Maple and how it can be
+ used to solve mathematical problems. In this book usage of Maple as a
+ programming language is not discussed at a higher level than that of
+ defining simple procedures and using simple language constructs.
+ However, the Maple data structures are discussed in detail.
+
+ This book is divided into eighteen chapters spanning a variety of
+ topics. Starting with an introduction to symbolic computation and
+ other similar computer algebra systems, this book covers several
+ topics like polynomials and rational functions, series,
+ differentiation and integration, differential equations, linear
+ algebra, 2-D and 3-D graphics, etc. The applications covered include
+ kinematics of the Stanford manipulator, a 3-component model for
+ cadmium transfer through the human body, molecular-orbital Hückel
+ theory, prolate spheroidal coordinates and Moore-Penrose inverses.
+
+ At the end of each chapter, a good number of excercises is given. A
+ list of relevant references is also given at the end of the book.
+ This book is very useful to all users of Maple package.",
+ keywords = "axiomref"
+}
+
+@phdthesis{Hemm03,
+ author = "Hemmecke, Ralf",
+ title = "Involutive Bases for Polynomial Ideals",
+ school = "Johannes Kepler University, RISC",
+ year = "2003",
+ abstract =
+ "This thesis contributes to the theory of polynomial involutive
+ bases. Firstly, we present the two existing theories of involutive
+ divisions, compare them, and come up with a generalised approach of
+ {\sl suitable partial divisions}. The thesis is built on this
+ generalized approach. Secondly, we treat the question of choosing a
+ ``good'' suitable partial division in each iteration of the involutive
+ basis algorithm. We devise an efficient and flexible algorithm for
+ this purpose, the {\sl Sliced Division} algorithm. During the
+ involutive basis algorithm, the Sliced Division algorithm contributes
+ to an early detection of the involutive basis property and a
+ minimisation of the number of critical elements. Thirdly, we give new
+ criteria to avoid unnecessary reductions in an involutive basis
+ algorithm. We show that the termination property of an involutive
+ basis algorithm which applies our criteria is independent of the
+ prolongation selection strategy used during its run. Finally, we
+ present an implementation of the algorithm and results of this thesis
+ in our software package CALIX."
+}
+
+@misc{RISC06,
+ author = "Hemmecke, Ralf and Rubey, Martin",
+ title = "AXIOM Workshop 2006",
+ link = "\url{http://axiom-wiki.newsynthesis.org/WorkShopRISC2006}",
+ year = "2006",
+ location = "Hagenberg, Austria",
+ abstract =
+ "Axiom is a computer algebra system with a long tradition. It recently
+ became free software.
+
+ The workshop aims at a cooperation of Axiom developers with developers
+ of packages written for other Computer Algebra Systems or developers
+ of stand-alone packages. Furthermore, the workshop wants to make the
+ potential of Axiom and Aldor more widely known in order to attract new
+ users and new developers.",
+ keywords = "axiomref"
+}
+
+@misc{RISC07,
+ author = "Hemmecke, Ralf and Rubey, Martin",
+ title = "AXIOM Workshop 2007",
+ link = "\url{http://axiom-wiki.newsynthesis.org/WorkShopRISC2007}",
+ year = "2007",
+ location = "Hagenberg, Austria",
+ abstract =
+ "The workshop aims at a cooperation of Axiom developers with deelopers
+ of packages written for other Computer Algebra Systems, and
+ mathematicians that would like to use a computer algebra system to
+ perform experiments.
+
+ One goal of the workshop is to learn about the mathematical theory,
+ the design of packages written for other CAS and to make those
+ functionalities available in Axiom." ,
+ keywords = "axiomref"
+}
+
+@misc{Hera16,
+ author = "Heras, Jonathan and Martin-Mateos, Franciso Jesus and
+ Pascual, Vico",
+ title = "A Hierarchy of Mathematical Structures in ACL2",
+ link = "\url{http://staff.computing.dundee.ac.uk/jheras/papers/ahomsia.pdf}",
+ abstract =
+ "In this paper, we present a methodology which allows one to deal with
+ {\sl mathematical structures} in the ACL2 theorem prover. Namely, we
+ cope with the representation of mathematical structures, the
+ certification that an object fulfills the axioms characterizing an
+ algebraic structure and the generation of generic theories about
+ concrete structures. As a by-product, an {\sl ACL2 algebraic
+ hierarchy} has been obtained. Our framework has been tested with the
+ definition of {\sl homology groups}, an example coming from
+ Homological Algebra which involves several notions related to
+ Universal Algebra. The method presented here, when compared to a
+ from-scratch approach, is preferred when working with complex
+ mathematical structures; for instance, the ones coming from Algebraic
+ Topology. The final aim of this work is the verification of Computer
+ Algebra systems, a field where our hierarchy fits better than the ones
+ developed in other systems.",
+ paper = "Hera16.pdf",
+ keywords = "axiomref"
+}
+
+@article{Hera15,
+ author = "Heras, Jonathan and Martin-Mateos, Franciso Jesus and
+ Pascual, Vico",
+ title = "Modelling algebraic structures and morphisms in ACL2",
+ journal = "Appl. Algebra Eng. Commun. Comput.",
+ volume = "26",
+ number = "3",
+ pages = "277-303",
+ year = "2015",
+ abstract =
+ "In this paper, we present how algebraic structures and morphisms can
+ be modelled in the ACL2 theorem prover. Namely, we illustrate a
+ methodology for implementing a set of tools that facilitates the
+ formalisations related to algebraic structures -- as a result, an
+ algebraic hierarchy ranging from setoids to vector spaces has been
+ developed. The resultant tools can be used to simplify the development
+ of generic theories about algebraic structures. In particular, the
+ benefits of using the tools presented in this paper, compared to a
+ from-scratch approach, are especially relevant when working with
+ complex mathematical structures; for example, the structures employed
+ in Algebraic Topology. This work shows that ACL2 can be a suitable
+ tool for formalising algebraic concepts coming, for instance, from
+ computer algebra systems.",
+ keywords = "axiomref"
+}
+
+@misc{Here96,
+ author = "Hereman, Willy",
+ title = "The Incredible World of Symbolic Mathematics
+ A Review of Computer Algebra Systems",
+ year = "1996",
+ link = "\url{https://inside.mines.edu/~whereman/papers/Hereman-PhysicsWorld-9-March1996.pdf}",
+ paper = "Here96.pdf",
+ keywords = "axiomref"
+}
+
+@article{Here97,
+ author = "Hereman, Willy",
+ title = "Review of Symbolic Software for Lie Symmetry Analysis",
+ journal = "Math. Comput. Modelling",
+ volume = "25",
+ number = "8/9",
+ pages = "115-132",
+ year = "1997",
+ abstract =
+ "Sophus Lie (1842-1899) pioneered the study of continuous
+ transformation groups that leave systems of differential equations
+ invariant. Lie’s work [l-3] brought diverse and ad hoc integration
+ methods for solving special classes of differential equations under a
+ common conceptual umbrella. Indeed, Lie’s infinitesimal
+ transformation method provides a widely applicable technique to find
+ closed form solutions of ordinary differential equations (ODES).
+ Standard solution methods for first-order or linear ODES can be
+ characterized in terms of symmetries. Through the group
+ classification of ODES, Lie succeeded in identifying all ODES that can
+ either be reduced to lower-order ones or be completely integrated via
+ group theoretic techniques.
+
+ Applied to partial differential equations (PDEs), Lie’s method [2]
+ leads to group-invariant solutions and conservation laws. Exploiting
+ the symmetries of PDEs, new solutions can be derived from known ones,
+ and PDEs can be classified into equivalence classes. Furthermore,
+ group-invariant solutions obtained via Lie’s approach may provide
+ insight into the physical models themselves, and explicit solutions
+ can serve as benchmarks in the design, accuracy testing, and
+ comparison of numerical algorithms.
+
+ Nowadays, the concept of symmetry plays a key role in the study and
+ development of mathematics and physics. Indeed, the theory of Lie
+ groups and Lie algebras is applied to diverse fields of mathematics
+ including differential geometry, algebraic topology, bifurcation
+ theory, to name a few. Lie’s original ideas greatly influenced the
+ study of physically important systems of differential equations in
+ classical and quantum mechanics, fluid dynamics, elasticity, and many
+ other applied areas [4-81].
+
+ The application of Lie group methods to concrete physical systems
+ involves tedious computations. Even the calculation of the
+ continuous symmetry group of a modest system of differential equations
+ is prone to errors, if done with pencil and paper. Computer algebra
+ systems (CAS) such as Mathematica, MACSYMA, Maple, REDUCE, AXIOM and
+ MuPAD are extremely useful for such computations. Symbolic packages
+ [9-11], written in the language of these GAS, can find the determining
+ equations of the Lie symmetry group. The most sophisticated packages
+ then reduce these into an equivalent but more suitable system,
+ subsequently solve that system in closed form, and go on to calculate
+ the infinitesimal generators that span the Lie algebra of symmetries.
+
+ In Section 2, we discuss methods and algorithms used in the
+ computation of Lie symmetries. We address the computation of
+ determining systems, their reduction to standard form, solution
+ techniques, and the computation of the size of the symmetry group.
+ In Section 3, we look beyond Lie-point symmetries, addressing contact
+ and generalized symmetries, as well as nonclassical or conditional
+ symmetries.
+
+ Section 4 is devoted to a review of modern Lie symmetry
+ programs, classified according to the underlying CAS. The review
+ focuses on Lie symmetry software for classical Lie-point symmetries,
+ contact (or dynamical), generalized (or Lie-Backlund) symmetries,
+ nonclassical (or conditional) symmetries. Most of these packages were
+ written in the last decade. Researchers interested in details about
+ pioneering work should consult [9,10,12]. In Section 5, two examples
+ illustrate results that can be obtained with Lie symmetry software.
+ In Section 6 we draw some conclusions.
+
+ Lack of space forces us to give only a few key references for the Lie
+ symmetry packages. A comprehensive survey of the literature devoted
+ to *