%% uncomment 1st line to generate handouts - only one documentclass command
%% allowed naturally
\documentclass[thomasd,ps,colorBG,slideColor,accumulate]{prosper}
%\documentclass[prettybox,pdf,colorBG,slideColor]{prosper}
%\hypersetup{pdfpagemode=FullScreen}

%for seminar class
%\documentclass{seminar}

%Choose for non-prosper
%\newcommand{\Mac}[1]{#1}

%Choose for Prosper
\newcommand{\Mac}[1]{}

%% Uncomment alltt if you need large blocks of code in a transition
%% environment - replaces use of verbatim
%\usepackage{alltt}

\newcommand{\incgraphics}[1]{#1}
%for Mac
\Mac{
\newcommand{\slideCaption}[1]{}
\newcommand{\subtitle}[1]{}
\newcommand{\institution}[1]{}
\newcommand{\email}[1]{}
%\renewcommand{\incgraphics}[1]{}
}

\incgraphics{
\usepackage{graphics}
}

\usepackage{mathchars}



\title{\Huge Mathematical Methods}
\subtitle{{for Computer Science} \\ \large (Part 2)}
\author{\large Peter Harrison }
\institution{ Department of Computing, Imperial College\, London\\
  \vspace*{1cm} {\scriptsize Produced with prosper and \LaTeX}}
\email{ Email:\ pgh@doc.ic.ac.uk}
\date{ \today }
\slideCaption{Methods--2008}

\begin{document}

\maketitle

%% Useful macros
\newcommand{\st}{\mbox{~s.t.~}}
\newcommand{\fig}[3]{
 \begin{center}
 \scalebox{#3}{\includegraphics[#2]{figs/#1}}
 \end{center}
}
\newcommand{\nat}{\ensuremath{\bbbn}}
\newcommand{\real}{\ensuremath{\bbbr}}
\newcommand{\rat}{\ensuremath{\bbbq}}
\newcommand{\integers}{\ensuremath{\bbbz}}
\newcommand{\complex}{\ensuremath{\bbbc}}
\newcommand{\bool}{\sf Bool}
\newcommand{\conj}[1]{\overline{#1}}
\newcommand{\Arg}[1]{\mbox{Arg}\; #1}
\newcommand{\del}{\mbox{{\boldmath $\bigtriangledown$}}}

%for mac
\Mac{
\newcommand{\overlays}[2]{#2}
\newcommand{\fromSlide}[2]{#2}
\newcommand{\onlySlide}[2]{#2}

%\renewcommand{\nat}{\cal N}
%\renewcommand{\real}{\cal R}
\renewcommand{\rat}{\cal Q}
\renewcommand{\integers}{\cal Z}
\renewcommand{\complex}{\cal C}
}

\begin{slide}{BASICS OF POWER SERIES}
\begin{itemize}
\item Represent a function $f(x)$ by:
$$f(x) =  \sum\limits_{i=0}^\infty a_i x^i $$
for coefficients $a_i \in \real, i=1,2,\ldots$.
\begin{itemize}
\item Called a  {\em power series} because of the series of powers of the argument $x$
\item For example, $f(x) = (1+x)^2 = 1 + 2x + x^2$
has $a_0=1,a_1=2,a_2=1,a_i=0$ for $i>2$
\end{itemize}
\item But in general the series may be infinite \emph{provided it converges}
\end{itemize}
\end{slide}



\begin{slide}{What are the coefficients?}
$$f(x) = a_0 + a_1 x + a_2 x^2 + a_3 x^3 + \ldots $$
\begin{itemize}
\item Suppose the value of the function $f$ is known at $x=0$. Then we have
straightaway, substituting $x=0$ $$a_0 = f(0)$$
\item Now differentiate $f(x)$ to get rid of the constant term:
$$f'(x) = a_1 + 2.a_2 x + 3.a_3 x^2 + 4.a_4 x^3 +\ldots $$
\end{itemize}
\end{slide}




\begin{slide}{What are the coefficients? (2)}
\begin{itemize}
\item Suppose the derivatives of the function $f$ are known at $x=0$ and set
$x=0$:
$$a_1 = f'(0)$$
\item Differentiate again to get rid of the constant term:
 $$f''(x) \equiv f^{(2)}(x) = 2.1.a_2 + 3.2.a_3 x + 4.3.a_4 x^2 +\ldots $$
\item Set $x=0$ and repeat the process:
$$a_2 =  f^{(2)}(0)/2!, \ldots,  a_n =  f^{(n)}(0)/n!$$
for $n \geq 0$.  More formally, we have .....

\end{itemize}
\end{slide}



\begin{slide}{Maclaurin series}
\begin{itemize}
\item Suppose $f(x)$ is differentiable infinitely many times and that it
has a power series representation ({\em series expansion}) of the form
$ f(x) =   \sum\limits_{i=0}^\infty a_i x^i $, as above.
\item Differentiating $n$ times gives $$f^{(n)}(x) = 
\sum\limits_{i=n}^\infty a_i i(i-1)\ldots (i-n+1)x^{i-n} $$
\item Setting $x=0$, we have $f^{(n)}(0) = n! a_n $ because all terms but the
first have $x$ as a factor.
\end{itemize}
\end{slide}



\begin{slide}{Maclaurin series (2)}
\begin{itemize}
\item Hence we obtain Maclaurin's series:
$$ f(x) =  \sum\limits_{i=0}^\infty f^{(i)}(0) \frac{x^i}{i!} $$
\item {\em It is important to check the domain of convergence (set of
valid values for $x$)}
\item This rather sloppy argument will be tightened up later.
\end{itemize}
\end{slide}



\begin{slide}{Example 1: $f(x) = (1+x)^{3}$}
\begin{itemize}
\item $f(0)=1$ so $a_0=1$
\item $f'(x) = 3(1+x)^{2}$ so $f'(0) = 3$ and  $a_1=3/1!=3$
\item $f''(x) = 3.2(1+x)$ so $f''(0) = 6$ and  $a_2=6/2!=3$
\item $f'''(x) = 3.2.1$ so $f'''(0) = 6$ and  $a_3=6/3!=1$
\item Higher derivatives are all 0 and so (as we know)
$$(1+x)^3 = 1+3x+3x^2+x^3$$
\end{itemize}
\end{slide}



\begin{slide}{Example 2: $f(x) = (1-x)^{-1}$}
\begin{itemize}
\item We probably know what the power series is for this function -- namely the
geometric series in $x$, in which all $a_i=1$.
\item $f(0)=1$, so far so good!
\item $f'(x) = -(1-x)^{-2}(-1) = (1-x)^{-2}$
\item so $f'(0) = 1$
\end{itemize}
\end{slide}



\begin{slide}{Example 2 (2)}
\begin{itemize}
\item Differentiating repeatedly, 
\begin{eqnarray*}
f^{(n)}(x) &=& (-1)(-2)\ldots(-n)(1-x)^{-(n+1)}(-1)^n \\
&=& n!(1-x)^{-(n+1)}
\end{eqnarray*}
\item so $a_n = f^{(n)}(0) /n! =  n!(1)^{-(n+1)}/n! = 1$
\item Thus $$(1-x)^{-1} = \sum_{i=0}^\infty 1.x^i$$
{\em provided this converges}.

\end{itemize}
\end{slide}



\begin{slide}{Example 3: $f(x) = \log_e(1+x)$}
\begin{itemize}
\item $a_0=f(0)=0$ because $ \log_e 1=0$ so no constant term
\item $f'(x)=(1+x)^{-1}$ so $a_1=1$
\item $f^{(n)}(x)=(-1)^{n-1}(n-1)!(1+x)^{-n}$ so $$a_n=(-1)^{n-1}/n$$ 
\item Therefore $$\log_e(1+x)=\frac {x} 1 - \frac {x^2} {2} + \frac {x^3} {3} - \frac {x^4} {4}  + \ldots $$
{\em provided~this~converges}.
\end{itemize}
\end{slide}



\begin{slide}{A look at convergence}
\begin{itemize}
\item What about $\log_e 2$?
\begin{itemize}
\item Is it true that $$\log_e 2 = \sum_{n=1}^\infty (-1)^{n-1}/n~~?$$
\item i.e. is $\log_e 2 = 1-\frac 1 2 + \frac 1 3 - \frac 1 4 + \ldots $?
\end{itemize}
\item It depends how you `add up the terms', i.e. in what sequence
\item \emph{conditionally convergent series}
\item Try it . . . how accurate is your result after 100,000 terms?
\end{itemize}
\end{slide}



\begin{slide}{A look at convergence (2)}
\begin{itemize}
\item What about when $x=-1$ giving $\log_e 0$?
\begin{itemize}
\item Is $$\log_e 0 = \mbox{} - \sum_{n=1}^\infty 1/n  ~~?$$
\item i.e. $- (1+\frac 1 2 + \frac 1 3 + \frac 1 4 + \ldots )$?
\end{itemize}
\item Well, we know that $\log_e 0 = - \infty$, so expect this series to diverge; \emph{very slowly}, because $\log x$ diverges very slowly as $x \rightarrow \infty$ or $0$.
\begin{itemize}
\item What do think $\sum_{n=1}^{1000000} 1/n$ is?
\end{itemize}
\item More about this later
\end{itemize}
\end{slide}



\begin{slide}{Taylor series}
\begin{itemize}
\item A more general result is:
$$f(a+h) = f(a) + $$
$$ \frac{h}{1!}f^{(1)}(a) +  \ldots +
\frac{h^{n-1}}{(n-1)!}f^{(n-1)}(a) + \frac{h^{n}}{n!}f^{(n)}(a+\theta h)
$$
where $\theta \in (0,1)$
\item Also called the {\em $n$th Mean Value Theorem}
\item It is a nice result since it puts a bound on the error arising from
using a truncated series
\end{itemize}
\end{slide}



\begin{slide}{Power series solution of ODEs}
\begin{itemize}
\item Consider the differential equation 
$$\frac {{\rm d}y}{{\rm d}x} = k y $$
for constant $k$, given that $y=1$ when $x=0$.
\item Try the \emph{series solution} $$y = \sum_{i=0}^\infty a_i x^i$$
\item Find the coefficients $a_i$ by differentiating term by term, to obtain the \emph{identity}, for $i \geq 0$:
\end{itemize}
\end{slide}



\begin{slide}{Matching coefficients}
\vspace{-0.5cm} $$  \sum_{i=1}^\infty a_i i x^{i-1} \equiv \sum_{i=0}^\infty k a_i x^i \equiv \sum_{i=1}^\infty k a_{i-1} x^{i-1} $$
\begin{itemize}
\item Comparing coefficients of $x^{i-1}$ for $i \geq 1$  
$$i a_{i} = k a_{i-1} \mbox{~~~~~ hence }$$ 
$$a_{i} =  \frac{k}{i} a_{i-1} =  \frac{k}{i} \cdot \frac{k}{i-1} a_{i-2} = \ldots = \frac{k^{i}}{i!} a_0$$  
\item When $x=0, y=a_0$ so $a_0=1$ by the boundary condition.  Thus
\vspace{-0.2cm} 
$$y = \sum_{i=0}^\infty \frac{(kx)^i}{i!} = e^{kx}$$
\end{itemize}
\end{slide}



\begin{slide}{Answer ...}
$$\sum_{n=1}^{1000000} 1/n = 14.3927 $$
\end{slide}



\begin{slide}{COMPLEX NUMBERS}

A short history number systems:
\begin{itemize}
\item $\nat$: for counting, not closed under subtraction;
\item $\integers$: $\nat$ with 0 and negative numbers, not closed under
division;
\item $\rat$: fractions, closed under arithmetic operations but
can't represent the solution of non-linear equations, e.g. $\sqrt{2}$;
\item $\real$: can do this for quadratic equations with real roots
and some higher-order equations --- {\em but not all}.
\begin{itemize}
\item More on the reals when we consider limits
\end{itemize}
\end{itemize}
\end{slide}




\begin{slide}{Missing numbers}
\begin{itemize}
\item The first entity we cannot describe is the solution to the equation
$$x^2+1=0$$ i.e. $\sqrt{-1}$ which we will call $i \equiv \sqrt{-1}$
\item There is no way of squeezing this into $\real$ -- it cannot be compared
with a real number (contrast $\sqrt{2}$ or $\pi$ which we can compare with
rationals and get arbitrarily accurate approximations)
\item So we treat  $i$ as an {\bf\large imaginary} number, `orthogonal' to the reals,
and consider  $\real \cup \{i\}$

\end{itemize}
\end{slide}




\begin{slide}{Useful facts}
From the definition of $i$ we have
\begin{itemize}
\item $i^2=-1;~~i^3=i^2i=-i;~~i^4=(i^2)^2=(-1)^2 = 1$
\item more generally, for all $n \in \nat$, 
$$i^{2n} = (i^2)^n = (-1)^n;~~~i^{2n+1}=i^{2n}i =
(-1)^ni $$
\item $i^{-1} = \frac{1}{i} =  \frac{i}{i^2} = -i$
\item $ i^{-2n}=\frac{1}{i^{2n}} = \frac{1}{(-1)^n}  = (-1)^n$;
$i^{-(2n+1)}= i^{-2n}i^{-1} = (-1)^{n+1}i$ for all $n \in \nat$
\item $i^0 = 1$

\end{itemize}
\end{slide}




\begin{slide}{Closure under arithmetic operators}
\begin{itemize}
\item Closing $\real \cup \{i\}$ under the `arithmetic operators' gives the
{\em complex numbers} $\;\complex$.
\item If $z_1,z_2 \in \complex$, then 
$z_1 + z_2 \in \complex$, $z_1 - z_2 \in \complex$, $z_1 \times z_2 \in
\complex$ and  $z_1 / z_2 \in \complex$.
\item Any complex number can be written in the form $z = x+iy$ for $x,y
\in \real$. We write:
\begin{itemize}
\item $\Re(z) = x$, the {\bf\large real part} of $z$
\item $\Im(z) = y$, the {\bf\large imaginary part} of $z$
\end{itemize}

\end{itemize}
\end{slide}




\begin{slide}{Arithmetic operators}
\begin{itemize}
\item Arithmetic operations on \complex ~are defined {\em symbolically}
\begin{itemize}
\item as if $i$ were just a variable name
\item but replacing $i^2$ by $-1$
\end{itemize}

\item Hence any operation results in a real constant ({\em real part}) added
to a real constant ({\em imaginary part}) multiplied by $i$
\item The precise definitions defined next {\em must} (and do) reduce to the
well known operations on $\real$ when the imaginary parts of their operands are
zero.

\end{itemize}
\end{slide}




\begin{slide}{Addition}
{\bf\large Definition}: If $z_1=x_1+iy_1$ and $z_2=x_2+iy_2$ are complex
numbers, then $$z_1+z_2 = (x_1+x_2) + i(y_1+y_2)$$ and 
$$z_1-z_2 = (x_1-x_2) + i(y_1-y_2)$$
\begin{itemize}
\item same as `adding brackets and collecting terms' 
\item addition is associative and commutative, because it is on real numbers
(exercise)

\end{itemize}
\end{slide}




\begin{slide}{Multiplication}
{\bf\large Definition}: If $z_1=x_1+iy_1$ and $z_2=x_2+iy_2$ are complex
numbers, then $$z_1 z_2 = (x_1x_2-y_1y_2) + i(x_1y_2+y_1x_2)$$ 
\begin{itemize}
\item  same as `multiplying brackets and collecting terms' {\em but also using the fact that} $i^2 = -1$
\item multiplication is associative and commutative, because it is on real
numbers (slightly harder exercise)

\end{itemize}
\end{slide}




\begin{slide}{Complex conjugate}
{\bf\large Definition}: The {\bf\large Complex conjugate} of a complex number
$z=x+iy$ is $\conj{z} = x-iy$. 
\begin{itemize}
\item $\Re \conj{z} = \Re z$
\item $\Im \conj{z} = -\Im z$
\item $z+\conj{z} = 2x = 2\Re z \in \real$
\item $z-\conj{z} = 2iy = 2i\Im z$ which is purely imaginary
\item $\conj{z_1 + z_2} = \conj{z_1} + \conj{z_2}$

\end{itemize}
\end{slide}




\begin{slide}{Conjugate of a product}

The conjugate of a product is the product of the conjugates: 
$$\conj{z_1z_2} = \conj{z_1} \;\conj{z_2}$$
\begin{itemize}
\item either by noting that the conjugate operation simply changes every occurrence of $i$ to $-i$;
\item or since
\begin{eqnarray*} 
(x_1+iy_1) (x_2+iy_2) &=& (x_1x_2-y_1y_2) + i(x_1y_2+y_1x_2) \\
(x_1-iy_1) (x_2-iy_2) &=& (x_1x_2-y_1y_2) - i(x_1y_2+y_1x_2)
\end{eqnarray*}
which are conjugates

\end{itemize}
\end{slide}




\begin{slide}{Modulus}
{\bf\large Definition}: The {\bf\large modulus} or {\bf\large absolute value} of
$z$ is $|z| = \sqrt{z\conj{z}}$.
\begin{itemize}
\item $z\conj{z} = (x+iy)(x-iy)=x^2+y^2~~~ \in \real$
\item Notice that the term `absolute value' is the same as defined for real
numbers when $\Im z = 0$, viz.~~~$|x|$.
\item $|z_1z_2| = |z_1| \;|z_2|$ ~~ because 
$$ |z_1z_2|^2 = z_1z_2 \conj{z_1z_2} = z_1z_2 \conj{z_1} \;\conj{z_2} 
= z_1 \conj{z_1} z_2 \conj{z_2} = |z_1|^2|z_2|^2 $$

\end{itemize}
\end{slide}




\begin{slide}{Reciprocal and division}
\begin{itemize}
\item If $z=x+iy$, its reciprocal is 
$$\frac{1}{z} = \frac{\conj z}{z\conj z} = \frac{\!\!\conj z}{|z|^2}=
\frac{x-iy}{x^2+y^2}$$
\item This can be written $z^{-1} = |z|^{-2}\conj z$, using only the {\em
complex} operators multiply and add (but also real division which we already
know).
\item Complex division is now defined by $z_1/z_2 = z_1 \times z_2^{-1}$

\end{itemize}
\end{slide}



\begin{slide}{Example}
Calculate as a complex number $$\frac{3+2i}{7-3i}$$
\begin{itemize}
\item Solution:
\begin{eqnarray*}
\frac{3+2i}{7-3i} &=& \frac{(3+2i)(7+3i)}{(7-3i)(7+3i)} \\ 
&=& \frac{15+23i}{49+9} \\
&=& \frac{15}{58} + \frac{23}{58}i
\end{eqnarray*}

\end{itemize}
\end{slide}



\begin{slide}{Uses}
\begin{itemize}
\item This defines the complex numbers rigorously, consistent with the reals.
{\em But why bother?}
\item Lots of reasons!
\begin{itemize}
\item The theory of complex numbers, complex variables and functions of a
complex variable is very deep, with far-reaching results.
\item Often a `real' problem can be solved by mapping it into complex space,
deriving a solution, and mapping back again: a direct solution may not be
possible.
\end{itemize}

\end{itemize}
\end{slide}



\begin{slide}{Fundamental theorem of Algebra}
\begin{itemize}
\item It can be shown that any polynomial equation of the form 
$$1+a_1z+a_2z^2+\ldots + a_nz^n =0$$
has $n$ complex solutions (some of which might be coincident, e.g. for $z^2=0$).
\item So we know that if we need a solution to such an equation, it {\em is}
worth looking!
\item Contrast in real space where we might try to locate a root of an
equation with no real solutions.
\end{itemize}
\end{slide}




\begin{slide}{Geometrical interpretation}
\begin{itemize}
\item A complex number $z=x+iy$ is equivalent to the pair of real values
$(x,y)$, i.e. there is a 1-1 correspondence (bijective mapping) between
$\complex$ and $\real \times \real$
\item {\em Thus each complex number is uniquely represented by a point in two
dimensional space, i.e. has coordinates with respect to two axes.}
\item The {\em distance} between two points $z_1,z_2$ is the {\em modulus} 
$|z_1-z_2|$
\item This two-dimensional space is called the {\bf\large Argand diagram}.
\end{itemize}
\end{slide}




\begin{slide}{Argand diagram}
A point $z$ can be represented 
\begin{itemize}
\item in {\em Cartesian coordinates} by $z=x+iy$
\item or in {\em polar coordinates} by $z = r(\cos\theta + i\sin\theta)$ where
$|z|^2 = r^2(\sin^2\theta + \cos^2\theta) = r^2$, so $|z|=r$.
\item Clearly $x = r\cos\theta$ and $y=r\sin\theta$
\item We write $\Arg z = \theta~ $~--- the {\bf\large argument} of $z$
\item {\em Draw this for yourselves} and update the diagram as we go .....

\end{itemize} 
\end{slide}




\begin{slide}{Representation as vectors}
\begin{itemize}
\item The addition rule is {\em exactly} the same as you had for vectors.
\item Add the corresponding components: $$(x_1,y_1) +
(x_2,y_2) = (x_1+x_2,y_1+y_2)$$
\item Similarly with multiplication by a real / scalar -- as complex numbers we get:
$$\lambda(x+iy) = \lambda x + i \lambda y \sim (\lambda x, \lambda y)$$
\item Many two dimensional vector problems are solved using a complex number
representation.

\end{itemize}
\end{slide}




\begin{slide}{Products in the Argand diagram}
\begin{itemize}
\item Geometrically, the definition of a product doesn't mean very much!
\item But if we work in polar form we will see that if $z=z_1z_2$, then 
\begin{itemize}
\item The modulus of $z$ is the {\em product} of the moduli of $z_1$ and $z_2$ --- as we would expect;
\item The argument of $z$ is the {\em sum} of the arguments of $z_1$ and $z_2$.
\end{itemize}

\end{itemize}
\end{slide}




\begin{slide}{DeMoivre's theorem}
If $z_1 = r_1(\cos\theta_1+i\sin\theta_1)$ and $z_2 =
r_2(\cos\theta_2+i\sin\theta_2)$, then 
$$z_1z_2 = r_1r_2(\cos(\theta_1+\theta_2)+i\sin(\theta_1+\theta_2))$$
\begin{itemize}
\item The proof is very easy.  By definition of multiplication,
$$z_1z_2 = r_1r_2 \times$$
$$(\cos\theta_1\cos\theta_2-\sin\theta_1\sin\theta_2
+ i(\sin\theta_1\cos\theta_2+\cos\theta_1\sin\theta_2))$$
\item the result now follows by standard trigonometrical identities.

\end{itemize}
\end{slide}




\begin{slide}{Back to the Argand diagram}
So the product of the complex numbers $z_1$ and $z_2$ is identified
graphically as that point $z$ having:
\begin{itemize}
\item Arg $z$ = Arg$z_1$+Arg$z_2$, i.e. the first point's polar angle rotates
by an amount equal to the polar angle of he second point -- this gives the
{\em direction} of the result;
\item $|z| = |z_1||z_2|$, i.e. the modulus of $z$, or distance along the
now-known direction, is the product of the moduli of the two points.

\end{itemize}
\end{slide}




\begin{slide}{Example}
Multiply $3+3i$ by $(1+i)^3$
\begin{itemize}
\item Could expand $(1+i)^3$ and multiply by $3+3i$
\item Alternatively, in polar form (using degrees), 
\begin{eqnarray*}
(1+i)^3 &=& [2^{1/2}(\cos 45 + i\sin 45)]^3 \\
&=& 2^{3/2}(\cos 135 + i\sin 135)
\end{eqnarray*}
by DeMoivre's theorem.
\item $3+3i= 18^{1/2}(\cos 45 + i\sin 45)$ and so the result is
$$18^{1/2}2^{3/2}(\cos 180 + i\sin 180) = -12$$

\end{itemize}
\end{slide}




\begin{slide}{Example(2)}
\begin{itemize}
\item Geometrically, we just observe that the Arg of the second
number is 3 times that of $1+i$, i.e. $3\times \pi/4$ (or $3 \times 45$ in
degrees).  The first number has the same Arg, so the Arg of the result is
$\pi$ or 180 degrees.
\item The moduli of the numbers multiplied are $\sqrt{18}$ and $\sqrt{2^3}$,
so the product has modulus 12. 
\item The result is therefore $-12$.

\end{itemize}
\end{slide}




\begin{slide}{Triangle inequality}

$$\forall z_1,z_2 \in {\complex},~~~ |z_1+z_2| \leq |z_1|+|z_2|$$

An alternative form, with $w_1=z_1$ and $w_2=z_1+z_2$ is 
$|w_2|-|w_1| \leq |w_2-w_1|$
and, switching $w_1,w_2$, $|w_1|-|w_2| \leq |w_2-w_1|$.  Thus, relabelling
back to $z_1,z_2$:
$$\forall z_1,z_2 \in {\complex},~~~ |~|z_1|-|z_2|~| \leq |z_2-z_1|$$


\begin{itemize}
\item In the Argand diagram, this just says that:

``In the triangle with vertices at $O,Z_1,Z_2$, the length of side $Z_1Z_2$ is
not less than the difference between the lengths of the other two sides''

\end{itemize}
\end{slide}




\begin{slide}{Proof}

Let $z_1=x_1+iy_1$ and $z_2=x_2+iy_2$. Then 
\begin{itemize}
\item The square of the left hand
side is:
$$(x_1+x_2)^2 + (y_1+y_2)^2 = |z_1|^2+|z_2|^2+2(x_1x_2+y_1y_2)$$
\item The square of the right hand side is:
$$ |z_1|^2+|z_2|^2+2|z_1||z_2|$$
\item So it is required to prove $x_1x_2+y_1y_2 \leq |z_1||z_2|$. 
\end{itemize}

\end{slide}




\begin{slide}{Proof (2)}

\begin{itemize}
\item  You know this is
true, since in vector notation $\vec{v}_1.\vec{v}_2 \leq 
|\vec{v}_1||\vec{v}_2|$.
\item Otherwise, square and multiply out to require:
\begin{eqnarray*} 
x_1^2x_2^2 + y_1^2y_2^2 + 2x_1x_2y_1y_2 &\leq& x_1^2x_2^2 +
y_1^2y_2^2 + x_1^2y_2^2 + y_1^2x_2^2 \\
\mbox{i.e.~~~} 0 &\leq& (x_1y_2-y_1x_2)^2
\end{eqnarray*}
as required.
\item The Argand diagram geometrical argument is usually considered an
acceptable proof of the triangle inequality.

\end{itemize}
\end{slide}




\begin{slide}{Complex power series}
\begin{eqnarray*}
e^z &=& \sum_{n=0}^\infty \frac{z^n}{n!} =
1+z+\frac{z^2}{2!}+\frac{z^3}{3!} + \ldots \\
\sin z &=& \sum_{n=0}^\infty (-1)^n \frac{z^{2n+1}}{(2n+1)!} =
z-\frac{z^3}{3!}+\frac{z^5}{5!} - \ldots \\
\cos z &=& \sum_{n=0}^\infty (-1)^n \frac{z^{2n}}{(2n)!} =
1-\frac{z^2}{2!}+\frac{z^4}{4!} - \ldots 
\end{eqnarray*}
\begin{itemize}
\item {\em Same expansions hold in $\complex$, e.g. because these functions
are differentiable in $\complex$ and Maclaurin's series applies.}

\end{itemize}
\end{slide}




\begin{slide}{Euler's formula}
Put $z=i\theta$ in the exponential series, for $\theta \in \real$:
\begin{eqnarray*}
e^{i\theta} &=& 
1+i\theta+i^2\frac{\theta^2}{2!}+i^3\frac{\theta^3}{3!}+
i^4\frac{\theta^4}{4!} + \ldots \\
&=& 
1+i\theta-\frac{\theta^2}{2!}-i\frac{\theta^3}{3!}+
\frac{\theta^4}{4!} + i\frac{\theta^5}{5!} + \ldots \\
&=& \cos \theta + i \sin \theta
\end{eqnarray*}
\begin{itemize}
\item The polar form of a complex number may be written
$$z=r(\cos \theta + i\sin \theta) = re^{i\theta} $$
and DeMoivre's theorem follows immediately.

\end{itemize}
\end{slide}




\begin{slide}{More general form}
\begin{itemize}
\item A more general form of Euler's formula is 
$$ z = re^{i(\theta+2n\pi)}~~~\mbox{for any~} n \in \integers $$
since $e^{i2n\pi} = \cos 2n\pi + i\sin 2n\pi = 1$
\item In terms of the Argand diagram, the points $e^{i(\theta+2n\pi)},~ i=1,2,\ldots$
lie on top of each other, each corresponding to one more revolution (through
$2\pi$).
\item The complex conjugate of $e^{i\theta}$ is  $e^{-i\theta} = \cos \theta -
i \sin \theta$ and so $\cos \theta = (e^{i\theta}+e^{-i\theta})/2,~ 
\sin \theta = (e^{i\theta}-e^{-i\theta})/2i$

\end{itemize}
\end{slide}




\begin{slide}{$n$th roots of unity}
Consider the equation $z^n = 1$ for $n \in \nat$
\begin{itemize}
\item  One root is $z=1$, but by the Fundamental Theorem of Algebra, there
are $n$ altogether.
\item  Write this equation as
$$ z^n = e^{2k\pi i}$$ for $k=0,1,\ldots$
\item Then the solutions are $z= e^{2k\pi i/n}$ for $k=0,1,2,\ldots,n-1$
\item Note that the solutions repeat when $k=n,n+1,\ldots$

\end{itemize}
\end{slide}




\begin{slide}{Example: cube roots of unity}
\begin{itemize}
\item The 3rd roots of 1 are $z= e^{2k\pi i/3}$ for $k=0,1,2$, ~~i.e. $1, 
e^{2\pi i/3}, e^{4\pi i/3}$.
\item These simplify to 
\begin{eqnarray*}
&&1 \\
\cos 2\pi/3 + i\sin 2\pi/3 &=& (-1+\sqrt{3}i)/2 \\
\cos 4\pi/3 + i\sin 4\pi/3 &=& (-1-\sqrt{3}i)/2
\end{eqnarray*}
\item Try cubing each solution directly ... and then do the 8th roots
similarly!

\end{itemize}
\end{slide}


\begin{slide}{Solution of $z^n = a+ib$}
\begin{itemize} 
\item These equations are solved (almost) the same way:
\item Let $a+ib = re^{i\phi}$ in polar form.  Then, for
$k=0,1,\ldots,n-1$,
\begin{eqnarray*}
z^n &=& (a+ib)e^{2\pi k i} =  re^{(\phi + 2\pi k)i}\\
\mbox{and so~~~} z &=&   r^{\frac{1}{n}}e^{\frac{(\phi + 2\pi k)}{n}i}
\end{eqnarray*}
\item E.g. cube roots of $1-i ~(r=\sqrt{2}, \phi=-\pi/4)$ are:
$ 2^{\frac{1}{6}}(\cos \pi/12-i\sin \pi/12),~~~
2^{\frac{1}{6}}(\cos 7\pi/12 + i\sin 7\pi/12)$ and 
$2^{\frac{1}{6}}(\cos 5\pi/4 + i\sin 5\pi/4) = -2^{-1/3}(1+i)$.

\end{itemize}
\end{slide}



\begin{slide}{Multiple angle formulas}

How can we calculate $\cos n\theta$ in terms of $\cos \theta$ and $\sin
\theta$?
\begin{itemize}
\item Use DeMoivre's theorem to expand $e^{in\theta}$ and equate real and
imaginary parts: e.g. for n=5, by the binomial theorem,
\begin{eqnarray*}
\lefteqn{(\cos \theta + i \sin \theta)^5} \\
 &=& \cos^5 \theta + i5 \cos^4 \theta \sin
\theta  - 10 \cos^3 \theta \sin^2 \theta \\
&&- i10 \cos^2 \theta \sin^3 \theta  + 5 \cos \theta \sin^4 \theta +
i\sin^5 \theta
\end{eqnarray*}
\end{itemize}
\end{slide}




\begin{slide}{Multiple angle formulas (2)}
\begin{itemize}
\item Comparing real and imaginary parts now gives:
$$\cos 5\theta =  \cos^5 \theta - 10 \cos^3 \theta \sin^2 \theta + 5 \cos
\theta \sin^4 \theta$$
and
$$\sin 5\theta = 5 \cos^4 \theta \sin
\theta - 10 \cos^2 \theta \sin^3 \theta + \sin^5 \theta$$

\end{itemize}
\end{slide}




\begin{slide}{Conversely ....}

How can we calculate $\cos^n\theta$ in terms of $\cos m\theta$ and $\sin
m\theta$ for $m \in \nat$?
\begin{itemize}
\item Let $z=e^{i\theta}$ so that $z+z^{-1} = z+\conj{z} = 2 \cos \theta$
\item Similarly,  $z^m+z^{-m} = 2 \cos m\theta$ by DeMoivre's theorem.
\item Hence by the binomial theorem, e.g. for $n=5$,
\begin{eqnarray*}
 (z+z^{-1})^5  &=&  (z^5+z^{-5}) + 5 (z^3+z^{-3}) +  10(z+z^{-1}) \\
2^5\cos^5 \theta &=& 2(\cos 5\theta + 5 \cos 3\theta + 10 \cos \theta)
\end{eqnarray*}
\item Similarly, $z-z^{-1} = 2i \sin \theta$ gives $\sin^n\theta$
\end{itemize}
\end{slide}




\begin{slide}{What happens when $n$ is even?}

\begin{itemize}
\item You get an extra term in the binomial expansion, which is {\em constant}.
\item E.g. for $n=6$:
\begin{eqnarray*}
 (z+z^{-1})^6  &=&  (z^6+z^{-6}) + 6 (z^4+z^{-4}) +  15(z^2+z^{-2}) + 20\\
2^6 \cos^6 \theta &=& 2(\cos 6\theta + 6 \cos 4\theta + 15 \cos
2 \theta +10) \\
\!\!\!\mbox{and so~~~}\\
\cos^6 \theta &=& \frac{1}{32}(\cos 6\theta + 6 \cos 4\theta + 15 \cos
2 \theta +10)
\end{eqnarray*}
\end{itemize}
\end{slide}




\begin{slide}{Summation of series}

Some series with sines and cosines can be summed similarly, e.g. 
$$ C = \sum_{k=0}^n a^k \cos k\theta  $$

\begin{itemize}
\item Let $S = \sum\limits_{k=1}^n a^k \sin k\theta $.  Then,
$$ C+iS = \sum_{k=0}^n a^k e^{i k\theta} =
\frac{1-(ae^{i\theta})^{n+1}}{1-ae^{i\theta}} $$
\end{itemize}
\end{slide}




\begin{slide}{Summation of series (2)}

\begin{itemize}
\item Hence
\begin{eqnarray*}
C+iS &=& \frac{(1-(ae^{i\theta})^{n+1})(1-ae^{-i\theta})} 
{(1-ae^{i\theta})(1-ae^{-i\theta})} \\
&=&
\frac{1-ae^{-i\theta}-a^{n+1}e^{i(n+1)\theta} + a^{n+2}e^{in\theta}}{1-2a\cos
\theta + a^2}
\end{eqnarray*}

\end{itemize}
\end{slide}




\begin{slide}{Summation of series (3)}

\begin{itemize}
\item Equating real and imaginary parts, the cosine series is:
$$ C = \frac{1 - a\cos \theta - a^{n+1}\cos (n+1)\theta + 
a^{n+2} \cos n\theta}{1-2a\cos \theta + a^2}
$$
\item and the sine series is:
$$ S = \frac{a\sin \theta - a^{n+1}\sin (n+1)\theta + 
a^{n+2} \sin n\theta}{1-2a\cos \theta + a^2}
$$

\end{itemize}
\end{slide}



\begin{slide}{Integrals}

How about $C = \int_0^x e^{a \theta} \cos b \theta d\theta,~~~~
S = \int_0^x e^{a \theta} \sin b \theta d\theta $ ?
\begin{itemize}
\item Could do with reduction formulae if $a$ or $b$ is an integer, but .....
\begin{eqnarray*}
C+iS &=& \int_0^x e^{(a+ib) \theta} d\theta \\
&=& \frac{e^{(a+ib)x} - 1}{a+ib} = \frac{(e^{ax}e^{ibx} - 1)(a-ib)}{a^2+b^2}
\\ &=& \frac{(e^{ax}\cos bx - 1 + ie^{ax}\sin bx)(a-ib)}{a^2+b^2}
\end{eqnarray*}

\end{itemize}
\end{slide}




\begin{slide}{Integrals (2)}

\begin{itemize}
\item Result is therefore $C+iS = $
\end{itemize}
$$ \frac{e^{ax}(a\cos bx + b \sin bx) - a + 
i(e^{ax}(a \sin bx - b \cos bx) + b )}{a^2+b^2}  $$

\begin{itemize}
\item and so we get:
\begin{eqnarray*}
C &=&  \frac{e^{ax}(a\cos bx + b \sin bx -a)}{a^2+b^2} \\
S &=&  \frac{e^{ax}(a \sin bx - b \cos bx) + b }{a^2+b^2}
\end{eqnarray*}

\end{itemize}
\end{slide}



\begin{slide}{REAL NUMBERS}
\begin{itemize}
\item Why do we need `real numbers'?  
\begin{itemize}
\item What's wrong with just the rationals?
\item Aren't fractions accurate enough -- they have arbitrary precision?
\end{itemize}
\item  {\bf\large Proposition}: $\sqrt{2}$ is not a rational number
\end{itemize}
\end{slide}


\begin{slide}{Proof that $\sqrt{2}$ is not rational}
\medskip
Suppose $\exists p,q \in \nat$ st. $\sqrt{2} = p/q$ and choose $p,q$ st.
they have no common factor.  

\medskip
Then
$p^2 = 2 q^2$ and so $p^2$ is even.

\medskip
Therefore $p$ is even (odd $\times$
odd is odd) and so $p^2$ is a multiple of 4.  

\medskip
Therefore  $q^2 = p^2/2$ is
even and hence so is $q$.  But so is $p$, a contradiction.
\end{slide}


\begin{slide}{Useful numbers}
\begin{itemize}
\item So there are `useful' numbers that are not rational.
\item We call the `useful' numbers the {\em real numbers} or just the
{\em reals}, and denote them by $\real$.  
\item Clearly, $\nat \subset \integers \subset \rat \subset \real$
\item How many reals do you think there are, relative to the rationals?
\end{itemize}
\end{slide}

\begin{slide}{How many real numbers?}
\begin{itemize}
\item If $r$ is irrational, then so is $r+q$ for any $q \in \rat$.  (If
$r+q=p \in \rat$, then $r = p-q \in \rat$, a contradiction.)
\item so just $\sqrt{2}$ generates at least as many irrationals as there
are rationals, and we haven't even considered the other arithmetic
operations!
\item in fact there are HUGELY many `more' irrationals than rationals
\end{itemize}
\end{slide}

\begin{slide}{Gaps in the real line}
\begin{itemize}
\item Consider the real numbers in the closed
interval\footnote{Similarly, an open interval has round
brackets: $(0,1)=\{x\mid0 < x < 1\}$ and there are `mixed' intervals,
open at one end, closed at the other, e.g. $(0,1]$.}
$[0,1]=\{x\mid0\leq x \leq 1\}$
\item Number the rational numbers in [0,1] as $$r_1,r_2,r_3, \ldots$$
\item We can do this since the rationals are countable.  Note that the
ordering is not numerical, it can be anything.
\end{itemize}
\end{slide}

\begin{slide}{The rationals' space}
\begin{itemize}
\item Given any small value $\delta \in \rat$,  put the closed interval 
$$I_n = [r_n - \delta/2^n, r_n + \delta/2^n]$$
around the $n$th rational
\item i.e. $r_n$ is in the middle of an interval of
length $\delta/2^{n-1}$
\end{itemize}
\end{slide}

\begin{slide}{The rationals' space (2)}
\begin{itemize}
\item The sum of the lengths of the intervals $I_n$ is
$$\sum_{n=1}^\infty \delta/2^{n-1} = 2\delta$$
\item This is because the sum is a geometric progression of the form
$$\sum_{i=0}^\infty x^i = 1/(1-x)$$ for $| x | < 1$; $x=1/2$ in our case.
\end{itemize}
\end{slide}


\begin{slide}{Continuum of numbers}
\begin{itemize}
\item Some of the intervals overlap, but it doesn't matter, their
combined length is less than $2\delta$ {\em for any value of
$\delta$, however small}
\item Their combined length is therefore 0 (why?) and so the rationals
take up `zero space'
\item The rest of $[0,1]$ is taken up with real, irrational numbers.
\item We want the reals to form a `continuum' so we can move smoothly along
the real line without falling into gaps, e.g. to gradually approach the
solution of an equation by iteration.
\end{itemize}
\end{slide}

\begin{slide}{Digression on bounds}
\begin{itemize}
\item The number $U \in \real$ is an {\bf\large upper bound} of the set of
real numbers $X$ if
$ r \leq U$ for all $r \in X$.  Similarly for a lower bound.
\item A set of reals is {\bf\large bounded above} if it has an upper
bound, and {\bf\large bounded below} if it has a lower bound.
\item A set which is bounded above and below is just called {\bf\large
bounded}
\end{itemize}
\end{slide}

\begin{slide}{Digression on bounds (2)}
\begin{itemize}
\item The smallest element (if it exists) of a set of upper bounds is
called the least upper bound or the {\bf\large supremum} of a set $X$,
abbreviated to $\sup (X)$
\item The largest element (if it exists) of a set of lower bounds is
called the greatest lower bound or the {\bf\large inf{imum}} of a set $X$,
abbreviated to $\inf (X)$
\item What are the $\sup$ and $\inf$ of $(0,1)$?
\end{itemize}
\end{slide}


\begin{slide}{Fundamental Axiom}
\begin{itemize}
\item To get a continuum of reals, we make an assumption: the {\bf\large
Fundamental Axiom}: 

{\em An increasing sequence $r_1,r_2,\ldots$ of real
numbers that is bounded above converges to a limit which is itself a real
number}
\medskip
\begin{itemize}
\item Compare the definition of a {\bf\large Complete Partial Ordering} (CPO)
used in semantics of programming languages (maybe next year or in `domain
theory')
\item `complete'
means `closed w.r.t. limits'.
\end{itemize}
\end{itemize}
\end{slide}

\begin{slide}{Alternative definition}
\begin{itemize}
\item An equivalent form of the Fundamental Axiom is:

{\em The set of upper bounds of any set of real numbers has a least
member} (assuming it is non-empty, of course)
\item The proof of equivalence is non-trivial (but not too hard either): uses the `Chinese box theorem'
\item Similarly for lower bounds
\end{itemize}
\end{slide}


\begin{slide}{Decimal numbers}
\begin{itemize}
\item What we know about is fractions and decimals!
\begin{itemize}
\item Fractions are just rationals, so are also reals because $\rat
\subset \real$
\item Decimals, finite and infinite, define all rationals also and all of
the irrationals in every day use, like square roots, $\pi$, $e$ etc.
\item Can decimals characterise  {\em all} the reals?
\end{itemize}
\end{itemize}
\end{slide}

\begin{slide}{Real Numbers as decimals}
\begin{itemize}
\item We write a decimal in  [0,1)  in the form:
$$0.d_1d_2\ldots = \sum_{i=1}^\infty d_i 10^{-i}$$
where $d_i \in \{0,1,2,3,4,5,6,7,8,9\} \;\;\forall i \in \nat$
\item For a {\em finite} decimal of length $n$,
$d_i=0\;\;\forall i > n$
\end{itemize}
\end{slide}

\begin{slide}{Real Numbers as decimals (2)}
\begin{itemize}
\item It can be shown that the decimals provide a {\em complete
characterisation of the reals} 
\begin{itemize}
\item every decimal denotes a real number
\item every real number can be written as a decimal, e.g. .....
\end{itemize}
\end{itemize}

\end{slide}

\begin{slide}{Real Numbers as decimals (3)}
\begin{itemize}
\item the natural number $n$ is written $n.0$
\item 3/4 = 0.75
\item 1/3 = $\stackrel{~~\,.}{0.3} = 0.333333\ldots$ (recurring infinite
decimal)
\item $\pi = 3.141592653589793238462\ldots$  (non-recurring
infinite decimal)
\item The fundamental axiom is crucial in the proof.
\item This is a nice result as it means our intuitive view of real
numbers (as decimals) is sufficient ..... but no coincidence, of course!
\end{itemize}
\end{slide}

\begin{slide}{SEQUENCES AND CONVERGENCE}
\begin{itemize}
\item A sequence is a countable, ordered set of real numbers $\{a_i \in
\real
\mid i
\in
\nat
\}$, usually written $$a_1,a_2,\ldots,a_n,\ldots$$
or simply $$a_1,a_2,\ldots$$
\item Alternatively it is {\em function}, $a: \nat \rightarrow \real$ with
the obvious definition
\item examples
\begin{itemize}
\item  $1,4,9,\ldots,n^2,\ldots$
\item  $1,-0.25,\stackrel{~~\,.}{0.1},\ldots,(-1)^{n+1}/n^2,\ldots$
\end{itemize}

\end{itemize}
\end{slide}


\begin{slide}{Convergence}

{\bf\large Definition:}  A sequence $a_1,a_2,\ldots$ converges to a
limit $l \in \real$, written $a_n \rightarrow l$ as $n \rightarrow \infty$
or 
$\lim_{n \rightarrow \infty}a_n = l$,
iff $$\forall \epsilon > 0, \exists N \in \nat \mbox{~s.t.~} \forall n >
N,  |a_n - l| < \epsilon$$

\begin{itemize}
\item equivalently, $l-\epsilon < a_n < l+\epsilon$
\item `tramlines' $\epsilon$ away from the limit value $l$
\end{itemize}
\end{slide}


\begin{slide}{Illustration of bounds, Sup and Inf}
{\fig{bounds.eps}{height=5cm}{1}}

Notice how the supremum {\em decreases} and the infimum {\em increases} for the subsets $\{a_n, a_{n+1}, \ldots \}$ as $n$ increases.
\end{slide}



\begin{slide}{Illustration of convergence}
{\fig{limits.eps}{height=5cm}{1}}

Need a bigger $N$ as $\epsilon$ decreases
\end{slide}



\begin{slide}{Convergence (2)}
\begin{itemize}
\item Important in {\em any numerical algorithms \& programs that use
iteration}
\begin{itemize}
\item i.e. quite a lot!  -- graphics, performance analysis, engineering
applications like CFD and FEM .....
\item iteration no use unless it {\em converges}
\item if it does, how fast?  Can we calculate the result directly?
\end{itemize}

\end{itemize}
\end{slide}


\begin{slide}{Convergence and boundedness}
\begin{itemize}
\item {\em For a bounded increasing sequence of positive values
$p_1,p_2,\ldots$ the limit $p$ is equal to the supremum $s = \sup p_n$}
\begin{itemize}
\item Limit $p$ exists by Fundamental Axiom
\item $\forall \epsilon > 0$ the `upper tramline' is an upper bound
\item similarly, every upper bound is above the lower tramline
\item therefore  $p-\epsilon < s < p+\epsilon$ and so $s=p$
\end{itemize}

\end{itemize}
\end{slide}


\begin{slide}{Convergence and boundedness (2)}
\begin{itemize}
\item {\em A convergent sequence is bounded}
\begin{itemize}
\item Let $a_1,a_2,\ldots$ have limit $l$. 
\item Then $\exists N \st l-1 < a_n < l+1 ~\forall n>N$
\item So,  for all $i \in \nat$,
 $$\min(l-1,a_1,\ldots,a_N) \leq a_i \leq
\max(l+1,a_1,\ldots,a_N)$$
\end{itemize}

\end{itemize}
\end{slide}


\begin{slide}{Proof that $s=p$ by the $\epsilon$-$N$ method }
\begin{enumerate}
\item Suppose $p_m > p$ for some $m$.  

Pick $\epsilon = (p_m-p)/2$ so that $\forall n>m$, $p_n-p \geq p_m-p = 2\epsilon > \epsilon$.
Hence  $p_1, p_2,\ldots$ does not converge, a contradiction.  Thus $p$ is an upper bound, so $p \geq s$.
\item Now suppose that $u$ is an upper bound.  

Since $p_1,p_2,\ldots$ converges, $\forall \epsilon > 0,
\exists N \st p_N > p-\epsilon$.  
Hence $p-\epsilon < u$ and so $p \leq u$ since $\epsilon$ can be arbitrarily small.  In particular, $p \leq s$.
\end{enumerate}
$p \geq s$ and $p \leq s \Rightarrow p=s$.
\end{slide}


\begin{slide}{Example: $a_n = 1/n$}
\begin{itemize}
\item Intuitively, $1/n$ decreases, getting closer and closer to zero, as
$n$ increases.
\item This (correct) intuition is made rigorous as follows:

Given any $\epsilon > 0$, $a_N  \leq \epsilon$ if $N \geq 1/\epsilon$. 
Choose $N = \lceil 1/\epsilon \rceil$.  Then
$$\forall n > N, \; |a_n | < \epsilon$$ since $a_n$ is decreasing.  Thus, 
$a_n \rightarrow 0$ as $n \rightarrow \infty$.
\item Similarly for $a_n=1/n^\alpha$ for any $\alpha > 0$ (exercise).
\end{itemize}
\end{slide}


\begin{slide}{Trapping}

{\bf\large Theorem:}  Given convergent sequences $b_1,b_2,\ldots$ and
$c_1,c_2,\ldots$, each with limit $l$, suppose the sequence
$a_1,a_2,\ldots$ satisfies
$$b_n \leq a_n \leq c_n$$  $\forall n \geq N$ for some $N \in \nat$.  Then 
$a_n \rightarrow l$ as $n \rightarrow \infty$.
\begin{itemize}
\item Intuitively, the sequence $a_n$ becomes `trapped' between $b_n$ and
$c_n$.
\item Commonly called the {\em sandwich theorem}.
\end{itemize}
\end{slide}


\begin{slide}{Proof of sandwich theorem}
\begin{itemize}
\item Pick $\epsilon > 0$
\item Since the sequences $b_n$ and $c_n$ converge, $\exists N_1, N_2 \st 
\forall n > \max (N_1,N_2),~ l-\epsilon < b_n < l+\epsilon \mbox{~and~}
l-\epsilon < c_n < l+\epsilon$, i.e.
$$l-\epsilon < b_n < a_n < c_n < l+\epsilon$$
\item Hence, $ \exists N \big(= \max(N_1,N_2)\big) \st  \forall n>N, |a_n - l| < \epsilon $
\item So $a_n \rightarrow l$ as $n \rightarrow \infty$

\end{itemize}
\end{slide}


\begin{slide}{Special cases}
\begin{itemize}
\item If $b_n=l$ for all $n > 0$, the greatest lower bound (infimum) on
$a_n$ is the constant $l$
\item An upper bound is $c_n$ and the supremum is $l$
\item E.g. the sequence $1/n^2$ is trapped between $0$ and $1/n$, which we
just showed has limit $0$
\item Similarly, if $c_n=l$ for all $n > 0$, the supremum
on $a_n$ is the constant $l$ and a lower  bound is $b_n$ with infimum 
$l$

\end{itemize}
\end{slide}


\overlays{4}{
\begin{slide}{Example}
\begin{itemize}
\item Suppose~~$a_n = \frac{1}{\sqrt{n^2+1}} + \frac{1}{\sqrt{n^2+2}} +
\ldots + \frac{1}{\sqrt{n^2+n}}$
\fromSlide{2}{
\item $a_n > \frac{n}{\sqrt{n^2+n}} = \frac{1}{\sqrt{1+1/n}} $ 
}
\fromSlide{3}{
\item $a_n <  \frac{n}{\sqrt{n^2+1}} =
\frac{1}{\sqrt{1+1/n^2}}$
}
\fromSlide{4}{
\item Hence $a_n$ is trapped between two sequences that tend
to 1 as $n \rightarrow \infty$, so $a_n \rightarrow 1$
}
\end{itemize}
\end{slide}}


\begin{slide}{Ratio convergence test}

{\bf\large Theorem:}  If $|a_{n+1}/a_n| < c < 1$ for some $c \in \real$
and for all sufficiently large $n$ (i.e. $\forall n \geq N$ for some
integer
$N$), then $a_n \rightarrow 0$ as $n \rightarrow \infty$. 
\begin{itemize}
\item A convergent sequence with limit 0 is called a {\em null sequence}.
\item The proof is that, for $n \geq N$, $$|a_n| < c |a_{n-1}| < \ldots <
c^{n-N}|a_N| = k c^n$$ where $k$ is the constant $|a_N|/c^N$
\item But $c^n \rightarrow 0$ as $n \rightarrow \infty$ and so the theorem
is proved by the sandwich theorem

\end{itemize}
\end{slide}


\begin{slide}{Ratio divergence test}

{\bf\large Theorem:}  If $|a_{n+1}/a_n| > c > 1$ for some $c \in \real$
and for all sufficiently large $n$, then the sequence $a_n$ diverges. 
\begin{itemize}
\item The analogous proof is that, for $n \geq N$, $$|a_n| > c |a_{n-1}| > \ldots >
c^{n-N}|a_N| = k c^n$$ 
\item But $c^n$ has no upper bound, and hence neither does $|a_n|$
\end{itemize}
\end{slide}



\begin{slide}{Alternative form of ratio tests}

Simpler forms of the ratio tests use the {\em limit} of the ratio $|a_{n+1}/a_n|$, 
when this exists -- call it $r$:
\begin{itemize}
\item  Then if $r<1$ the
sequence converges and if $r>1$, it diverges.
\item The proof is simple:  e.g. if $r<1$, then $\exists N $ s.t. $ \forall
n>N, |a_{n+1}/a_n| < (r+1)/2 < 1$ and we can pick $c=(r+1)/2$

\end{itemize}
\end{slide}



\begin{slide}{Combinations of sequences}

{\bf\large Theorem:}  Given convergent sequences $a_n$ and $b_n$ with
limits $a$ and $b$ respectively, then 
\begin{itemize}
\item $\lim\limits_{n \rightarrow \infty} (a_n+b_n) = a+b$
\item $\lim\limits_{n \rightarrow \infty} (a_n-b_n) = a-b$
\item $\lim\limits_{n \rightarrow \infty} (a_n b_n) = a b$
\item $\lim\limits_{n \rightarrow \infty} (a_n / b_n) = a / b$
provided that $b \neq 0$

\end{itemize}
\end{slide}


\begin{slide}{Sample proof: product}
\begin{eqnarray*}
|a_nb_n - ab| &=& |a_n(b_n-b) + b(a_n-a)| \\
&\leq& |a_n||b_n-b| + |b||a_n-a|
\end{eqnarray*}
\begin{itemize}
\item Let $A$ be any upper bound of $\{|a_n|\}$
\item Given $\epsilon > 0, \; \exists N_1 \st |a_n-a| < \epsilon/(A+|b|) $
for all
$n>N_1$ and $ \exists N_2 \st |b_n-b| < \epsilon/(A+|b|) $ for all
$n>N_2$
\item Hence $|a_nb_n - ab| < \epsilon $ for all $n > \max(N_1,N_2)$

\end{itemize}
\end{slide}


\begin{slide}{Example}
$$ a_n = \frac{3n^2 + n}{n^2+3n+1}$$
\begin{itemize}
\item Divide numerator and denominator by $n^2$: 
$$ a_n = \frac{3 + 1/n}{1+3/n+1/n^2}$$
\item $1/n \rightarrow 0$, so $1/n^2 \rightarrow 0$ (product of sequences
or trapping)
\end{itemize}
\end{slide}


\begin{slide}{Example (2)}
\begin{itemize}
\item numerator and denominator converge to 3 and 1 respectively (sum of sequences, 3 times)
\item so $a_n \rightarrow 3$ by the division rule (denominator non-zero)
\item {\em rigorous justification of `domination of largest term' rule}

\end{itemize}
\end{slide}


\begin{slide}{General convergence theorem}
\emph{\bf \large NB: {\it This is not examinable}}

{\bf\large Theorem (Cauchy):}  The sequence $a_1,a_2,\ldots$ is convergent
if and only if $\forall \epsilon>0, \exists $N$ \st |a_n - a_m| < \epsilon$
for all $n, m > N$.
\begin{itemize}
\item This theorem is useful because you don't need to know what the limit
is (when it exists), e.g. 
\begin{itemize}
\item when $a_n$ is defined by a recurrence relation;
\item when $a_n$ is defined by a recursive Haskell function
\end{itemize}
\item It is also a test for divergence

\end{itemize}
\end{slide}


\begin{slide}{Example}
$$ a_n = \sum_{i=1}^n \frac{1}{i(i+1)}$$
\begin{eqnarray*} 
a_n - a_m &=& \frac{1}{(m+1)(m+2)} + \ldots + \frac{1}{n(n+1)} \\
&=&  \left( \frac{1}{m+1} - \frac{1}{m+2}\right) + \left( \frac{1}{m+2} -
\frac{1}{m+3}\right) + \\
&&  \ldots + \left( \frac{1}{n} - \frac{1}{n+1}\right)  \\
&=& \frac{1}{m+1} - \frac{1}{n+1}  \rightarrow 0  \mbox{~~~as~} n > m
\rightarrow \infty
\end{eqnarray*}
\end{slide}



\begin{slide}{Iteration and fixpoints}
Consider the simple iteration:
$$a_{n+1} = \frac{2+a_n}{3+a_n}$$
with initial value $a_1 = 1$.
\begin{itemize}
\item  If this converges, its limit is $l$ given by
$$l^2 + 2l-2=0$$
so that $l = -1 \pm \sqrt{3}$.
\item So will it converge, and to which root, $l=l^+$ or $l^-$?
\end{itemize}
\end{slide}



\begin{slide}{Convergence}
\begin{itemize}
\item  Clearly, every $a_n>0$ (rigorous proof by induction), so can't converge to $l^-$.
\item Let $x_n = a_n-l^+$ for $n \geq 1$ and try to prove $x_n \rightarrow 0$
\item Aiming to use the ratio test for sequences:
$$x_{n+1} = \frac{2+a_n}{3+a_n}-\frac{2+l^+}{3+l^+} = \frac{x_{n}}{(3+a_n)(3+l^+)}$$
\item Thus $ |x_{n+1}| < |x_{n}|/9$ since $a_n$ and $l^+ >0$
\item So the iteration does converge to $l^+ =\sqrt{3}-1$
\end{itemize}
\end{slide}



\begin{slide}{Graphically ....}
{\fig{cobweb1-1.eps}{height=4.5cm}{1}}
The iteration follows the red path, starting at the initial point $(1,0)$ and repeating:
\begin{itemize}
\item vertical segment up to the blue line $y=x$
\item horizontal to the curve $y=\frac{2+x}{3+x}$
\end{itemize}
\end{slide}



\begin{slide}{Smaller plot range and zoom 10$\times$}
{\fig{cobweb1-2.eps}{height=7.75cm}{1}}
\end{slide}



\begin{slide}{Zoom 100$\times$ and 1000$\times$}
{\fig{cobweb1-3.eps}{height=7.75cm}{1}}
\end{slide}



\begin{slide}{Starting at $x=-2.7$ near negative root}
{\fig{cobweb-2.7-1.eps}{height=7cm}{1}}
\end{slide}



\begin{slide}{Smaller plot range and zoom 10$\times$}
{\fig{cobweb-2.7-2.eps}{height=7.75cm}{1}}
\end{slide}



\begin{slide}{Zoom 100$\times$ and 1000$\times$}
{\fig{cobweb-2.7-3.eps}{height=7.75cm}{1}}
\end{slide}



\begin{slide}{Starting at $x=-2.8$ (other side)}
{\fig{cobweb-2.8-1.eps}{height=7cm}{1}}
\end{slide}



\begin{slide}{Smaller plot range and zoom 10$\times$}
{\fig{cobweb-2.8-2.eps}{height=7.75cm}{1}}
\end{slide}



\begin{slide}{Zoom 100$\times$ and 1000$\times$}
{\fig{cobweb-2.8-3.eps}{height=7.75cm}{1}}
\end{slide}



\begin{slide}{INFINITE SERIES}

An infinite series is a summation of the form
$S = \sum\limits_{i=1}^\infty a_i$ for a real {\em sequence} 
$a_1,a_2,\ldots$
\begin{itemize}
\item E.g. the decimal numbers
\item Finite if $\exists N \in \nat \st a_n=0 \;\; \forall n>N$
\item $n$th partial sum $S_n = \sum\limits_{i=1}^n a_i$
\item Partial sums $S_1,S_2,\ldots$ form a {\em sequence}:
\begin{itemize}
\item A series converges or diverges iff its sequence of partial sums does
\item Often the best means of analysis
\end{itemize}

\end{itemize}
\end{slide}


\begin{slide}{Geometric series}
\begin{itemize}
\item A ubiquitous example is $G = \sum\limits_{i=1}^\infty x^i$ -- the
{\em geometric progression}
\item {\em Provided $G$ exists}, $G = x + \sum\limits_{i=2}^\infty x^i = x
+ x \sum\limits_{i=1}^\infty x^i = x + xG$ so:
$$G = \frac{x}{1-x}$$
\item When does $G$ exist?  When the series (or sequence of partial sums)
is convergent!

\end{itemize}
\end{slide}


\begin{slide}{Convergence of the geometric series}
\begin{itemize}
\item Similarly, $n$th partial sum $G_n = x + \sum\limits_{i=2}^n
x^i = x + x \sum\limits_{i=1}^{n-1} x^i = x + x(G_n - x^n)$, so:
$$G_n = \frac{x-x^{n+1}}{1-x}$$
\begin{itemize}
\item For $|x| < 1, \; G_n \rightarrow x/(1-x)$ as $n \rightarrow \infty$
by rules for sequences.
\item Similarly, for $|x| > 1, \; G_n$ diverges  as $n \rightarrow \infty$.
\item For $x=1$, $G_n = n$ which also diverges.
\end{itemize}
\end{itemize}

\end{slide}


\begin{slide}{Result}
\begin{itemize}
\item If $|x|<1$, i.e. $-1<x<1$, $$G = \sum\limits_{i=1}^\infty x^i =
\frac{x}{1-x}$$
\item If $|x| \geq 1, \; G = \sum\limits_{i=1}^\infty x^i = \infty$, ~i.e.
the series {\em diverges}.

\end{itemize}
\end{slide}


\begin{slide}{Another example}
\begin{itemize}
\item Consider the convergence properties of the series
$$ S = \sum_{i=1}^\infty \frac{1}{i(i+1)}$$
\item Using partial fractions, we can write the $n$th partial sum
$$
S_n = \sum_{i=1}^n \left(\frac{1}{i} - \frac{1}{i+1}\right) = 1 -
\frac{1}{n+1}
$$
\item So $S_n$ converges, therefore so does the series and $S=1$
\end{itemize}
\end{slide}




\begin{slide}{Sum of inverse squares}
\begin{itemize}
\item What about the series
$ S = \sum\limits_{i=1}^\infty \frac{1}{i^2}$?
\item $\frac{1}{i(i+1)} < \frac{1}{i^2} < \frac{1}{(i-1)i}$ for $i \geq
2$.  So, summing from $i=2$ to $n$ and adding 1:
$$1/2 + \sum_{i=1}^n \frac{1}{i(i+1)} < S_n < 1+ \sum_{i=1}^{n-1}
\frac{1}{i(i+1)}$$
\end{itemize}
\end{slide}



\begin{slide}{Sum of inverse squares (2)}
\begin{itemize}
\item Thus, from the previous slide,
$$3/2 - 1/(n+1) < S_n < 2 - 1/n$$
\item Since $S_n$ is increasing, the series converges (by the fundamental axiom, 2 is an upper bound) to a value in
$(1.5,2)$.
\end{itemize}
\end{slide}



\begin{slide}{Dodgy series}
Consider the series
$$ S = \sum_{i=1}^\infty (-1)^{i+1}/i = 1 -\frac{1}{2} +
\frac{1}{3} - \frac{1}{4} + \ldots $$
\begin{itemize}
\item $S_{2n} = \left(1 -\frac{1}{2}\right) +
\left(\frac{1}{3} - \frac{1}{4}\right) + \ldots + \left(\frac{1}{2n-1} -
\frac{1}{2n}\right) > 0.5$ and increasing
\item $S_{2n} = 1 - \left(\frac{1}{2} - \frac{1}{3}\right) -
\left(\frac{1}{4} - \frac{1}{5}\right) - 
\ldots - \left(\frac{1}{2n-2} - \frac{1}{2n-1}\right) - \frac{1}{2n} <
1$
\end{itemize}
\end{slide}



\begin{slide}{Dodgy series (2)}
\begin{itemize}
\item Thus $S_{2n}$ is increasing and bounded, hence convergent.
\item $S_{2n+1} = S_{2n} +\frac{1}{2n+1}$ and so all partial sums
converge to the same limit, $l$ say.  Hence $S$ converges to $l$.
\end{itemize}
\end{slide}


\begin{slide}{Rearrangements}
\begin{itemize}
\item Now consider the sub-series formed by taking two positive terms and
a negative term: $B_{3n}  = \sum_{i=1}^n b_i$ where $b_i = 
\frac{1}{4i-3}+\frac{1}{4i-1} - \frac{1}{2i}$
\item Clearly, as $n \rightarrow \infty$, $B_{3n}$ includes all the terms
of $S$: it is a {\em rearrangement} of $S$
\item Now, $B_{3n} = S_{4n} + \frac{1}{2n+2} + \frac{1}{2n+4} + \ldots +
\frac{1}{4n} > S_{4n} + 0.25$
\item Hence, $B_{3n}$ converges to a different limit than $S_{4n}$ (limit
$l$)!

\end{itemize}
\end{slide}


\begin{slide}{Sums of series}

{\bf\large Theorem:}  Suppose $\sum a_i$ and $\sum b_i$ are convergent with
sums $a$ and $b$ respectively.  Then if $c_i = a_i + b_i$, $\sum c_i$ is
convergent with sum $a+b$, and $\sum \lambda a_i$ is convergent with
sum $\lambda a$.
\begin{itemize}
\item Easy to prove by considering the
partial sums
\item Further expected properties hold for series without negative terms
.....
\end{itemize}
\end{slide}



\begin{slide}{Series of non-negative terms}
\begin{itemize}
\item In a series of non-negative terms, the partial sums are increasing
and hence either
\begin{itemize}
\item converge, if the partial sums are bounded
\item diverge, if they are not
\end{itemize}
\item Notation: 
\begin{itemize}
\item $p_i$ is a non-negative term in the series $\sum p_i$
\item $\sum c_i$ is a convergent series with sum $c$
\item $\sum d_i$ is a divergent series
\end{itemize}
\end{itemize}
\end{slide}


\begin{slide}{Comparison test}

{\bf\large Theorem:}  Let $\lambda > 0$ and $N \in \nat$.  Then
\begin{enumerate}
\item if $p_i \leq \lambda c_i ~\forall i>N$, then $\sum p_i$ converges;
\item if $p_i \geq \lambda d_i ~\forall i>N$, then $\sum p_i$ diverges.
\end{enumerate}
Sometimes the following form is easier:
\begin{itemize}
\item if $\lim \frac{p_i}{c_i}$ exists, then $\sum p_i$ converges;
\item if $\lim \frac{d_i}{p_i}$ exists, then $\sum p_i$ diverges.

\end{itemize}
\end{slide}


\begin{slide}{D'Alembert's ratio test}
\begin{itemize}
\item This is a very useful -- and even over-used -- technique:
\end{itemize}
{\bf\large Theorem:} For $N \in \nat$,  
\begin{enumerate}
\item if $p_{i+1}/p_i \geq 1 ~\forall i>N$, then $\sum p_i$ diverges;
\item if $\exists k \in \real \st p_{i+1}/p_i < k < 1 ~\forall i>N$, then
$\sum p_i$ converges.
\end{enumerate}
{\bf\large Exercise:} Consider the series with $p_i = 1/i$
\end{slide}


\begin{slide}{Proof of part 2}
\begin{itemize}
\item $p_{i+1} < kp_i$ for $i>N$.  Thus, (formally by induction) 
$$p_i < k^{i}(p_{N+1}/k^{N+1}) \mbox{~~if~~} i>N+1$$
\item Thus $\sum p_i$ converges by the comparison test with $c_i = k^i$
and $\lambda = p_{N+1}/k^{N+1}$ (Note $k > 0$.)
\item Proof of part 1 is analogous.
\end{itemize}
\end{slide}


\begin{slide}{Absolute convergence} 

A series $\sum a_i$
is {\em Absolutely Convergent} if $\sum |a_i|$ converges, i.e. the sum of
the absolute values of its terms is convergent.
\begin{itemize}
\item The sum of absolute values is a sum of positive terms
\item An absolutely convergent series is convergent (proof by Cauchy's test)
\item A series which is convergent but not absolutely convergent is
called {\em conditionally convergent}
\begin{itemize}
\item E.g. the `dodgy series'
\end{itemize}
\end{itemize}
\end{slide}



\begin{slide}{CONTINUITY}
\begin{itemize}
\item A function $f(x)$ is {\em continuous} at $x=a$ if 
$f(x) \rightarrow f(a)$ as $x \rightarrow a$
\item I.e. there is no `jump' in the graph of $f(x)$ at $x=a$ or 
{\em `you can draw the graph without taking your pen off the paper'}
\begin{itemize}
\item  E.g. the {\em step-function} $f(x) = \lfloor x \rfloor$ is {\em not} continuous.
\item $f(x) = (1/x)\sin x$ {\em is} continuous at all $x$, 
including $x=0$ if we define $f(0) = 1$.
\item $f(x) = (1/x)\sin (1/x)$ {\em is not} continuous at $x=0$
\end{itemize}

\item What does it mean to say `as $x \rightarrow a$'?

\end{itemize}
\end{slide}



\begin{slide}{Graph of $f(x) = (1/x)\sin x$}
{\fig{x-1Sinx.eps}{height=7cm}{1}}
\end{slide}



\begin{slide}{Graph of $f(x) = (1/x)\sin (1/x)$}
{\fig{x-1Sinx-1.eps}{height=7cm}{1}}
\end{slide}



\begin{slide}{Limit of a function}
{\bf\large Definition}: $f(x) \rightarrow l$ as $x \rightarrow a$ if
$$\forall \epsilon > 0, \exists \delta>0 \st 
|x-a|<\delta \Rightarrow |f(x)-l| < \epsilon $$
\begin{itemize}
\item The rigorous definition of continuity is therefore 
$\forall \epsilon > 0, \exists \delta>0 \st 
|x-a|<\delta \Rightarrow |f(x)-f(a)| < \epsilon $
\begin{itemize}
\item In words, as $x$ gets closer and closer to $a$, $f(x)$ gets closer and closer to $f(a)$.
\item I.e. $f(x)$ can't suddenly `jump' to $f(a)$, skipping over intermediate values, leaving a gap, `taking the pen off the page'.
\end{itemize}
\end{itemize}
\end{slide}



\begin{slide}{A continuous function}
{\fig{cts-limits.eps}{height=7.5cm}{1}}
\end{slide}



\begin{slide}{A discontinuous function}
{\fig{non-cts-limits.eps}{height=7.5cm}{1}}
\end{slide}



\begin{slide}{Comments}
\begin{itemize}
\item In the continuous function, as $x$ gets closer and closer to $10$, $f(x)$ gets closer and closer to $l$.
\begin{itemize}
\item If $f(10)$ is defined to be $l$, $f$ is continuous at $x=10$
\item Points in the arbitrary `green' intervals on the $y$-axis must be the images of `red' intervals on the $x$-axis
\end{itemize}
\item Note the discontinuity at $x=10$ in the discontinuous function:
\begin{itemize}
\item Cannot find any `red' interval when the `green' interval gets too small.
\end{itemize}
\end{itemize}
\end{slide}



\begin{slide}{Simple properties}
\begin{itemize}
\item Sums and products of (a finite number of) continuous functions are continuous -- $f(x) + \lambda g(x),  f(x)g(x)$ are continuous if $f$ and $g$ are ($\lambda \in \real$).
\item Same for quotients $f(x)/g(x)$ where $g(x)\neq 0$.
\item A continuous function of a continuous function is continuous -- i.e. the composition $f(g(x))$ is continuous.
\end{itemize}
\end{slide}





\begin{slide}{Differentiability and continuity}
\begin{itemize}
\item If $f(x)$ is differentiable at $x=a$, it is continuous there.  Why?
\item Recalling the definition of a derivative, $\lim\limits_{\delta x \rightarrow 0} \frac{f(x+\delta x)-f(x)}{\delta x} < \infty$ and so $f(x+\delta x) \rightarrow f(x)$ as $x+\delta x \rightarrow x$
\item But $f(x) = x \sin(1/x)$ is continuous at $x=0$, where $f(x) = 0$, but {\em not} differentiable there [$f'(x) = \sin(1/x)-(1/x) \cos(1/x)$ for $x \neq 0$]
\end{itemize}
\end{slide}



\begin{slide}{Graph of $f(x) = x \sin(1/x)$}
{\fig{xSinx-1.eps}{height=7cm}{1}}
\end{slide}



\end{document}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%