\documentclass{article}

%\usepackage{teaching}

\title{Mathematical Methods: Tutorial sheet 7}

\date{29 November 2007}

\author{Peter Harrison}


\renewcommand{\baselinestretch}{1}   % {1,1.25,1.667} = % single/1.5/double line spacing

\newcommand{\answerblock}[1]{{\bf Solution:}\nopagebreak[1]#1}
\usepackage{mathchars}

\newcommand{\nat}{\bbbn}
\newcommand{\rat}{\bbbq}
\newcommand{\real}{\bbbr}
\newcommand{\integers}{\bbbz}
\newcommand{\st}{\mbox{{\it  ~s.t.~}}}

\renewcommand{\rat}{Q \!\!\!\! O}
\renewcommand{\integers}{Z \!\!\! Z}

%\renewcommand{\answerblock}[1]{}
\newcommand{\bcomment}[1]{}

\newcommand{\mtx}[4]{\left(\begin{array}{rr}#1&#2\\#3&#4\end{array}\right)}
\newcommand{\cmtx}[4]{\left(\begin{array}{cc}#1&#2\\#3&#4\end{array}\right)}
\newcommand{\Bmtx}[9]{\left(\begin{array}{rrr}#1&#2&#3\\#4&#5&#6\\#7&#8&#9\end{array}\right)}
\newcommand{\Bcmtx}[9]{\left(\begin{array}{ccc}#1&#2&#3\\#4&#5&#6\\#7&#8&#9\end{array}\right)}
\newcommand{\vcr}[2]{\left(\begin{array}{r}#1\\#2\end{array}\right)}
\newcommand{\cvcr}[2]{\left(\begin{array}{c}#1\\#2\end{array}\right)}
\newcommand{\Bvcr}[3]{\left(\begin{array}{r}#1\\#2\\#3\end{array}\right)}
\newcommand{\Bcvcr}[3]{\left(\begin{array}{c}#1\\#2\\#3\end{array}\right)}
\newcommand{\zeromtx}{\left(\begin{array}{rr}0&0\\0&0\end{array}\right)}
\newcommand{\identitymtx}{\left(\begin{array}{rr}1&0\\0&1\end{array}\right)}
\newcommand{\determinant}[1]{\left| #1 \right|}

\newcommand{\vi}{\vec{i}}
\newcommand{\vj}{\vec{j}}
\newcommand{\vk}{\vec{k}}


\begin{document}
\maketitle
\noindent {\bf Assessed questions: 1, 4(a) and 5(a,b,f).  Due:  Monday 10 December 2007.}


\bigskip

{\bf More on convergence}
\begin{enumerate}

\item  {\bf (Exam question Q5C1452005)} Use the $\epsilon$-$N$ method to prove rigorously that $x^n \rightarrow 0$ as $n
\rightarrow \infty$ if $|x|<1$.  That is, given $\epsilon>0$ find a number
$N(\epsilon)$ (which depends on $\epsilon$) for which 
$m>N(\epsilon) \Rightarrow |x^m|<\epsilon$.

\answerblock{
$x^n$ is decreasing and $|x^N| = |x|^N = \epsilon$ when $N=\log_{|x|}
\epsilon$.  So $N(\epsilon) = \log_{|x|} \epsilon$ -- or anything bigger --
does the job.}

\item  Prove from first principles (i.e. use the $\epsilon$-$N$ method) that $$\lim_{n \rightarrow \infty} \cos^2 (1/n) = 1$$ by first:
\begin{itemize}
\item using a geometrical argument to show that $\sin \theta < \theta$ for $0 < \theta < \pi/2$ ($\theta$ measured in radians).
\item deducing that $cos^2 \theta > 1 - \theta^2$ for $0 < \theta < \pi/2$
\end{itemize}

\answerblock{
\begin{itemize}
\item Draw a sector of a circle with radius 1 and angle $\theta$ at the centre.  The length of the arc is $\theta$.  Drop a perpendicular from one radius to the other; its length is $\sin \theta$ which is less than the arc length.
\item $\cos^2 \theta =1-\sin^2 \theta$ and so $\cos^2 \theta > 1 - \theta^2$
\end{itemize}
Thus, remembering that $|\cos \theta| \leq 1$,  $|\cos^2 \theta -1| < |\theta^2|$.  (Actually easier, just use $|\cos^2 \theta -1| = |\sin^2 \theta| < |\theta^2|$.)  Now proceed as in part a of question 2 on exercise sheet 5, with $\alpha=2$.

You can also use the fact that $1-\cos \theta = 2 \sin^2 (\theta/2) < \theta^2/2$ to show similarly that 
$$\lim_{n \rightarrow \infty} \cos (1/n) = 1$$
}




\item The `limit superior' of a sequence $a_1,a_2,\ldots $ is written $\limsup\limits_{n \rightarrow \infty} a_n$ and defined as follows: 
\begin{itemize}
\item let $u_n = \sup\limits_{n \leq k < \infty} a_k$
\item then $\limsup\limits_{n \rightarrow \infty} a_n = \lim\limits_{n \rightarrow \infty} u_n$
\end{itemize}
Similarly, the `limit inferior' $\liminf\limits_{n \rightarrow \infty} a_n =  \lim\limits_{n \rightarrow \infty}  \inf\limits_{n \leq k < \infty} a_k$.
\begin{enumerate}
\item Show that the sequence $u_1,u_2,\ldots $ is non-increasing, i.e. $u_1 \geq u_2 \geq u_3 \ldots $.
\item Prove that a bounded sequence has a $\limsup$ and a $\liminf$ which are both finite.
\item If a sequence is unbounded but neither diverges to $+\infty$ or $-\infty$, does it have a $\limsup$ or a $\liminf$?
\item If a sequence $a_1,a_2,\ldots $ has $$\liminf_{n \rightarrow \infty} a_n 
=  \limsup_{n \rightarrow \infty} a_n =  x$$ does $\lim\limits_{n \rightarrow \infty} a_n$ exist and, if so, what is it and why?
\end{enumerate}

\answerblock{
\begin{enumerate}
\item  For all $n \in \nat, \;\; u_{n} = \sup \{a_n,a_{n+1},\ldots\} = \max \Big(a_n, \sup  \{a_{n+1},a_{n+2},\ldots\} \Big) \geq 
 \sup  \{a_{n+1},a_{n+2},\ldots\}  = u_{n+1}$.
\item If  $a_1,a_2,\ldots $ is bounded, then the non-increasing sequence  $u_1,u_2,\ldots $ is bounded and so converges (to $u$ say) by the Fundamental Axiom.  Hence $\limsup a_n$ exists and is equal to $u$.   Similarly for $\liminf a_n$.
\item To prove $\{u_n,u_{n+1},\ldots \}$ is bounded below for all $n \in \nat$ it is sufficient to find {\em any} constant $L \st a_k > L$ for {\em infinitely many}  $k$.  In other words, we only need a real number $L$ which is less than infinitely many sequence elements -- \emph{i.e. there are always some bigger than $L$ no matter how far along the sequence we go}.  A lower bound on the \emph{whole} sequence is {\em not} necessary since the suprema $u_n$ will clearly be bigger than $L$, being bigger than the particular $a_k$ chosen by definition.

For example, the sequence defined by $a_n = 1+ 1/n$ if $n$ is odd and $-n$ if $n$ is even has no lower bound but has $\limsup$ equal to $1$, the limit of the sequence of upper bounds $u_n = 2,4/3,4/3,6/5,6/5,\ldots$.

So the answer is `yes'!

\item Let $l_n = \inf\limits_{n \leq k < \infty} a_k$.  Then, $\forall n \in \nat, \;\; l_n \leq a_n \leq u_n$.  Therefore $a_1,a_2,\ldots $ converges and 
$\lim\limits_{n \rightarrow \infty} a_n = x$
by the sandwich theorem.
\end{enumerate}
}



\item

Given that the sequences $a_1, a_2, \ldots $ and $b_1, b_2, \ldots $ converge to limits $a$ and $b$ respectively, show that:
\begin{enumerate}
\item The sequence $c_n = a_n + b_n$ converges to $a+b$;
\item If $a>0$, the sequence $d_n = 1/a_n$ converges to $1/a$.
\end{enumerate}

\answerblock{
\begin{enumerate}
\item Given $\epsilon>0, \exists N_1, N_2 ~s.t.~ \forall n>N_1, |a_n - a|<\epsilon$ and $ \forall n>N_2, |b_n - b| < \epsilon/2 $.  Thus, for $n > \max(N_1,N_2),$ $$|a_n + b_n-a-b| \leq |a_n-a| + |b_n-b| < \epsilon$$
\item Given $\epsilon>0, \exists N_1 ~s.t.~ \forall n>N_1, |a_n - a|<\epsilon $. \\  Also, $\exists N_2 ~s.t.~ |a_n - a|<|a|/2$ for $n>N_2$.

Thus
$|1/a_n - 1/a| = |a_n-a|/|a_n a| < 2|a_n-a|/|a^2| < 2 \epsilon /|a^2| $
for $n > \max(N_1,N_2)$.
\end{enumerate}
}



\item 

Investigate the convergence properties (either converges or diverges for different values of the parameter $x$, if present) of each of the following series $\sum\limits_{n=1}^\infty a_n $, where
\begin{enumerate}
\item $a_n = \frac{1}{3n+2}$
\item $a_n = \frac{1}{1+n^2}$
\item $a_n = n! x^n$
\item $a_n = \left(\frac{x}{n}\right)^n$
\item $a_n = \sin \frac{\pi}{n}$
\item $a_n = \frac{\sin n x}{n^2}$
\end{enumerate}
{\em Note:} $2x/\pi < \sin x < x\;\;\; (0<x<\pi/2)$

\answerblock{
Let the partial sum $S_n = \sum_{i=1}^n a_i$
\begin{enumerate}
\item $a_i > 1/4i$ for $i>2$ and so $ S_n > a_1 + a_2 + 0.25 \sum_{i=3}^n 1/i $ which
diverges.  So series diverges by comparison test.

\item $ a_i < 1/i^2$ so $ S_n <  \sum_{i=1}^n 1/i^2$ which converges.  Hence series converges.

\item $a_{n+1}/a_n = (n+1)x > 1$ for $n \geq 1/x$.  So series diverges for all $x > 0$ by
D'Alembert's ratio test.  Similarly for $x<0$, when series oscillates.

\item $ \frac{|x|}{n+1} \left(\frac{n}{n+1}\right)^n < \frac{|x|}{n+1} < 1$ for
$n > |x|$.  So series converges absolutely, and hence converges, for all $x$.  

\item For $n>2, a_n > 2/n$ so series diverges by comparison test, as in part (a).

\item For $|a_n| < 1/n^2$ so series converges absolutely, and hence converges,  by comparison
test, as in part (b).

\end{enumerate}
}



\item

Consider the quadratic equation $$x^2-2x-1=0$$ which has roots $1\pm \sqrt{2}$.  [Usually in `real' problems
we don't know the answer, but will know a valid range of values, e.g. in the interval [2,3].]  The aim of
this exercise is to compute $\sqrt{2}$ iteratively.  Define the recurrence:
$$a_{n+1} = \sqrt{2a_n+1},\;\;a_1=1$$

\begin{enumerate}
\item Show that, if the recurrence converges, its limit is one of the roots of the quadratic equation above;
\item Prove by induction (if you know how) that $0 < a_n < 3$ for all $n \in \nat$ [if you don't know
induction, just show that $0 < a_n < 3 \Rightarrow 0 < a_{n+1} < 3$];
\item Hence show that if the iteration converges, its limit is the positive root, $l=1+\sqrt{2}$;
\item Calculate $a_{n+1}^2-l^2 = (a_{n+1}+l)(a_{n+1}-l)$ and show that
$$a_{n+1}-l=\frac{2(a_n-l)}{a_{n+1}+l}$$
\item Hence show that $a_n \rightarrow l$ as $n \rightarrow \infty$;
\item Compute $\{ a_n-1 \mid 1\leq n \leq 6 \}$ and compare with $\sqrt{2}$.
\end{enumerate}

\answerblock{
\begin{enumerate}
\item If it converges, then in the limit we can set $a_n = a_{n+1} = l$  (no need for an $\epsilon$-argument
here, although to be strictly rigorous it can be done easily).  Thus $l = \sqrt{2l+1}$ and squaring gives $l^2
= 2l+1$ which is the given quadratic.
\item $a_1=1$ and so $0<a_1<3$ is true.  Suppose $0<a_n<3$ holds for $n \geq 1$.  
Then $$\sqrt{2\times 0 +1} \leq a_{n+1} \leq \sqrt{2\times 3 +1} = \sqrt{7} $$
Thus  certainly $0 \leq a_{n+1} \leq 3$.  Actually, we have all $a_n > 1$.
\item {\em If} the iteration converges, its limit must be $\geq 0$ because every $a_n \geq 0$.  (Again, from
first principles, you would say every large enough $n$ is maximum $\epsilon$ away from the limit, so the
limit can't be less than 0 ...... but you don't need to say this unless it's {\em specfically asked for}.) 
This excludes the negative root $1-\sqrt{2}$.
\item By definition of the recurrence and because $l^2 = 2l+1$, 
$$a_{n+1}^2-l^2 = (\sqrt{2a_n+1})^2 - 2l -1 = 2(a_n-l)$$
Hence, $$(a_{n+1}+l)(a_{n+1}-l) = 2(a_n-l)$$
\item Since $a_{n+1}>0$ and the positive root $l > 2.4$ (because $\sqrt{2} > 1.4$), 
$$a_{n+1}-l < \frac{2}{2.4}(a_n-l)$$ and so $a_{n+1}-l$ converges to 0 by the ratio test.
Thus $a_n \rightarrow l$ as $n \rightarrow \infty$.
\item $a_1 = 1, a_2 = \sqrt{2+1} = \sqrt{3} = 1.732051, a_3 =  \sqrt{2\sqrt{3}+1} = 2.11284, 
a_4 = \sqrt{2\times 2.11284+1} = 2.28598, a_5 = 2.36050, a_6 = 2.39186$ so the set is
$$\{0, 1.732051, 1.11284, 1.28598, 1.36050, 1..39186\}$$ and the next 5 elements are:
$$\{1.40494, 1.41037,
1.41262, 1.41355, 1.41394
\}$$
\end{enumerate}
}


\item  
{\bf Compare question 2 on Logic Exercises 7.} 
\newline Let $L$ be the signature consisting of constant-symbols written as the underlined decimal numbers (hence including the positive integer-symbols $\underline{1}, \underline{2}, \underline{3}, \ldots $), binary relation symbols $<, >, \leq, \geq$ and binary function symbols $+, \times$. Let $N$ be the structure whose domain consists of the real numbers $\real$, with the symbols of $L$ interpreted in the natural way.  The formula $\exists v(x=v \times v)$ expresses that $x$ is non-negative, for example. 

\begin{enumerate}
\item In the same kind of way, write down a first-order $L$-formula expressing that the sequence of real numbers $a_1, a_2, \ldots $ converges to a real number as $n$ tends to infinity.
\item Try to express the same statement in the same signature in a different way.
\item Using the rules for negating quantified expressions in predicate logic, define the condition for a sequence $a_1, a_2, \ldots $ {\em not} to converge -- i.e. find the negation of the usual definition of convergence.  What does this condition mean in terms of the existence (or otherwise) of  `tramlines'?
\end{enumerate}

\answerblock{
\begin{enumerate}
\item $\exists l( \forall \epsilon(\exists N(\forall n(n>N \rightarrow (a_n - l) \times (a_n - l) < \epsilon \times \epsilon))))$.
\item Is there a different way?
\item $\forall l( \exists \epsilon(\forall N(\exists n(\neg(n>N \rightarrow (a_n - l) \times (a_n - l) < \epsilon \times \epsilon))))) =  $ 

$\forall l( \exists \epsilon(\forall N(\exists n( n>N \wedge \neg((a_n - l) \times (a_n - l) < \epsilon \times \epsilon)))))$

So for every real number $l$, there is a number  $\epsilon$ for which there are infinitely many $a_n$ lying outside the tramlines  at $l \pm \epsilon$ --- there is no `last' such $a_n$.
\end{enumerate}

}



\item Given real functions $g,f$ which are continuous on 
$(a,b)$ and on the image of $g$, $\{z=g(x) \mid x \in (a,b) \}$, respectively, prove rigorously that the
function
$h$ defined by $h(x) = f(g(x))$ is continuous on $(a,b)$.  

{\bf\it Hint}: You have to prove that, given a point $x_0 \in (a,b)$,  $\forall \epsilon>0, \exists \delta>0$
s.t. 
$|x-x_0|<\delta \Rightarrow |h(x)-h(x_0)|<\epsilon$ and find $\delta$ as a function of $\epsilon$, using
corresponding quantities $\delta_f, \delta_g$ relating to the continuity of $f,g$.

\answerblock{
For $h$ to be continuous, as $x$ get very close to $x_0$, then $h(x)$ must get very close to
$h(x_0)$ -- this is what the hint says in a rigorous way.  

Given a point $x_0 \in (a,b)$ and a real
number  $\epsilon>0, \exists \delta_f>0$ s.t. 
$$|g(x)-g(x_0)|<\delta_f \Rightarrow
|f(g(x))-f(g(x_0))|<\epsilon$$ since $f$ is continuous at $g(x_0)$.

But $\exists \delta_g>0$ s.t.  $|x-x_0|<\delta_g \Rightarrow
|g(x)-g(x_0)|<\delta_f$ since $g$ is continuous at $x_0$.  So if we choose $\delta \leq \delta_g$, we have
$$|x-x_0|<\delta \Rightarrow |g(x)-g(x_0)|<\delta_f \Rightarrow|h(x)-h(x_0)|<\epsilon$$ as required.
}

\end{enumerate}


\end{document}
