\documentclass[11pt]{amsart} \setlength{\topmargin}{-0.3in} % usually -0.25in \addtolength{\textheight}{.9in} % usually 1.25in \addtolength{\oddsidemargin}{-0.45in} \addtolength{\evensidemargin}{-0.45in} \addtolength{\textwidth}{1.0in} %\setlength{\parindent}{0pt} \newcommand{\normalspacing}{\renewcommand{\baselinestretch}{1.1}\tiny\normalsize} \normalspacing % macros \usepackage{amssymb,xspace,alltt,verbatim} \usepackage[final]{graphicx} \usepackage{fancyvrb} \newtheorem*{lem*}{Lemma} \newtheorem*{thm}{Theorem} \newcommand{\bs}{\mathbf{s}} \newcommand{\bu}{\mathbf{u}} \newcommand{\bv}{\mathbf{v}} \newcommand{\bx}{\mathbf{x}} \newcommand{\bbf}{\mathbf{f}} \newcommand{\CC}{{\mathbb{C}}} \newcommand{\RR}{{\mathbb{R}}} \newcommand{\eps}{\epsilon} \newcommand{\ZZ}{{\mathbb{Z}}} \newcommand{\ZZn}{{\mathbb{Z}}_n} \newcommand{\NN}{{\mathbb{N}}} \newcommand{\ip}[2]{\mathrm{\left<#1,#2\right>}} \newcommand{\spn}{\operatorname{span}} \newcommand{\emach}{\epsilon_{\mathrm{mach}}} \newcommand{\prob}[1]{\bigskip\noindent\large\textbf{#1.} \, \normalsize} %\newcommand{\probpts}[2]{\bigskip\noindent\textbf{#1.} (\emph{#2 pts}) } %\newcommand{\epartpts}[2]{\textbf{(#1)} (\emph{#2 pts}) } %\newcommand{\partpts}[2]{\medskip\noindent \textbf{(#1)} (\emph{#2 pts}) } \newcommand{\probpts}[2]{\bigskip\noindent\textbf{#1.} \quad} \newcommand{\epartpts}[2]{\textbf{(#1)} \quad} \newcommand{\partpts}[2]{\medskip\noindent \textbf{(#1)} \quad} \begin{document} \thispagestyle{empty} \Large \noindent \underline{\textbf{SAMPLE}} \hfill\underline{\textbf{SAMPLE}} \scriptsize \noindent 31 May 2013 \hfill \tiny [AUTHOR: Bueler] \normalsize\bigskip \centerline{\large\textbf{Numerical Linear Algebra Comprehensive Exam}} \bigskip \centerline{\textsc{Part I: do \textbf{ALL} of the problems \textbf{A}--\textbf{F}}} \smallskip \thispagestyle{empty} \prob{A} Suppose $A\in \CC^{m\times m}$ is invertible. Consider the factorization $PA=LU$ for $L$ unit lower-triangular, $U$ upper-triangular, and $P$ a permutation matrix. \partpts{a}{5} Given $b\in \CC^m$, how is this factorization used to solve $Ax=b$ for $x\in\CC^m$? \partpts{b}{5} Give leading order estimates, as $m\to \infty$, of the number of floating point operations to implement the major stages of the method in part \textbf{(a)}, including the cost of the factorization $PA=LU$. Assume the factorization is computed by Gaussian elimination with partial pivoting and that the other major steps use standard algorithms. (\emph{Name these standard algorithms.}) \partpts{c}{5} Is Gaussian elimination with partial pivoting always the best method for solving a square system like $Ax=b$? Describe at least one alternative algorithm with superior stability properties. \prob{B} \epartpts{a}{5} Suppose $A\in\CC^{m\times n}$. Define a \emph{singular value decomposition} (SVD) of $A$. For $m>n$, describe how the \emph{reduced} SVD differs from the SVD. \partpts{b}{5} For $m>n$ and $A$ of full rank, use the reduced SVD to construct an orthogonal projector $P$ onto the range of $A$. Show that $P$ is an orthogonal projector. \prob{C} \epartpts{a}{5} Compute $\|A\|_2$ and $\|A\|_F$ for the matrix $$A = \begin{bmatrix} 1 & 1 \\ 0 & 1 \end{bmatrix}$$ \epartpts{b}{5} Compute the condition number $\operatorname{cond}(A) = \kappa(A)$ using the $2$-norm. \probpts{D}{5} Let $\|\cdot\|$ be any norm on $\CC^m$ and also let it denote the corresponding induced norm on square matrices $A\in\CC^{m\times m}$. Show that $\rho(A) \le \|A\|$ if $\rho(A)$ is the spectral radius of $A$. (\emph{The \emph{spectral radius} of $A$ is the maximum of $|\lambda|$ over the eigenvalues $\lambda$ of $A$.}) \probpts{E}{5} Consider a computer satisfying the standard axioms for floating point arithmetic,\footnote{Namely that for each such computer there exists $\emach>0$ so that the following two facts hold: (1) For all $x\in\RR$ there is $\eps$ so that $|\eps|\le \emach$ and $fl(x)=x(1+\eps)$. (2) For all $x,y\in\RR$ and each operation $\ast = +,-,\times,\div$, with computer implementation $\circledast$, there is $\eps$ so that $|\eps|\le \emach$ and $x\circledast y = (x\ast y)(1+\eps)$.} so that the machine precision $\emach$ is precisely defined. Let $X,Y$ be real, normed vector spaces and let $f:X\to Y$ be a problem. Precisely define: The algorithm $\tilde f:X\to Y$ computing the problem $f$ is \emph{backward stable}. (An informal definition might be included to explain the idea, but it does not suffice.) \probpts{F}{5} Give an example of an invertible $2\times 2$ matrix $A$ which has $\det(A) > 10^{20}$ but for which the system $Ax=b$ is well-conditioned. \newpage \phantom{foo} \bigskip \centerline{\textsc{Part II: do \textbf{TWO} of the following three problems}} \bigskip \prob{1} \epartpts{a}{5} Let $$A=\begin{bmatrix} 3 & -1 \\ -1 & 3 \end{bmatrix}.$$ Compute the eigenvalues and eigenvectors of $A$. \partpts{b}{5} Let $x\in\CC^2$ be a random vector. For the same matrix $A$, estimate $$\frac{\|A^{2013}x\|}{\|A^{2012}x\|}.$$ Explain. Include in your explanation a description of those rare vectors for which your estimate is not accurate. \prob{2} Suppose we define a square matrix $A\in\CC^{m\times m}$ to be \emph{normal} if there is an orthonormal basis of $\CC^m$ consisting of eigenvectors of $A$. \partpts{a}{5} Show that if $A$ is normal then $A^* A = A A^*$. \partpts{b}{5} Show that if $A$ is normal then any Schur factorization of $A$ is, in fact, a unitary diagonalization. \prob{3} \epartpts{a}{5} Show that if $P$ is an orthogonal projector then $I-2P$ is unitary. \partpts{b}{5} For $A\in\CC^{m\times n}$ of full rank with $m\ge n$, $A^*$ the hermitian transpose of $A$, $b\in\CC^m$, and $P$ the orthogonal projector onto the range of $A$, show that the equations $$A^* A x = A^* b \qquad \qquad \text{and} \qquad \qquad Ax = Pb$$ have the same unique solution $x\in\CC^n$. \end{document}