Created
May 22, 2011 09:53
-
-
Save josch/985314 to your computer and use it in GitHub Desktop.
jacobs university esm4a quiz and exam questions and solutions
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
\documentclass[11pt,a4paper]{article} | |
\usepackage{amsmath} | |
\usepackage{listings} | |
\usepackage[utf8]{inputenc} | |
\begin{document} | |
\renewcommand{\theenumi}{(\alph{enumi})} | |
\renewcommand{\labelenumi}{\theenumi} | |
\section{Introduction} | |
\subsection{Example - Taylor Series} | |
\subsubsection{Problem Q.1: Taylor series - Quiz 2010} | |
Let $f(x) = \ln(\frac{x}{2})$. | |
\begin{enumerate} | |
\item Derive the Taylor series of $f$ at 2. | |
\begin{eqnarray*} | |
f^{(k)}(x) &=& (-1)^{(k-1)}(k-1)!\frac{1}{x^k} \\ | |
f^{(k)}(2) &=& (-1)^{(k-1)}(k-1)!\frac{1}{2^k} \\ | |
f(x) &=& \sum^n_{k=0}\frac{(-1)^{(k-1)}}{k2^k}(x-2)^k + \frac{(-1)^n}{n\xi^{n+1}}(x-2)^{n+1}\\ | |
\end{eqnarray*} | |
with $\xi(x) \in (x, c)$ | |
\item Show that the derived Taylor series represents the function $f$ for | |
$x \in [2,3]$. | |
\begin{eqnarray*} | |
\lim_{n \to \infty} E_n &=& \frac{(-1)^n}{n\xi^{n+1}}(x-2)^{n+1} \\ | |
&.& \\ | |
&.& \\ | |
&.& \\ | |
&=& 0 \\ | |
\end{eqnarray*} | |
\item Compute $\ln(1.5)$ using the truncated Taylor series with terms up to | |
order 2. | |
\begin{eqnarray*} | |
f(2) &=& \ln(1) = 0 \\ | |
f'(x) &=& \frac{1}{x} \\ | |
f'(2) &=& \frac{1}{2} \\ | |
f''(x) &=& -\frac{1}{x^2} \\ | |
f''(2) &=& -\frac{1}{4} \\ | |
f(x) &=& \frac{1}{2}(x-2) - \frac{1}{8}(x-2)^2 \\ | |
f(1.5) &=& -\frac{1}{4} - \frac{1}{32} = -\frac{9}{32} | |
\end{eqnarray*} | |
\item Two types of errors influence the precision of the computed estimate. | |
Which ones? | |
\begin{itemize} | |
\item proximity of x to c | |
\item truncation error | |
\end{itemize} | |
\end{enumerate} | |
\subsubsection{Problem Q.1: Taylor series - Quiz 2011} | |
Let $f(x) = \ln{(1 + x)}$. | |
\begin{enumerate} | |
\item Compute the derivatives $f^{(k)}(x)$ for any natural number $k$. | |
\begin{eqnarray*} | |
f(x) &=& \ln{(1 + x)} \\ | |
f'(x) &=& \frac{1}{1+x} \\ | |
f''(x) &=& -\frac{1}{(1+x)^2} \\ | |
f'''(x) &=& \frac{2}{(1+x)^3} \\ | |
f''''(x) &=& -\frac{6}{(1+x)^4} \\ | |
f^{(k)}(x) &=& (-1)^{(k-1)}\frac{(k-1)!}{(1+x)^k} | |
\end{eqnarray*} | |
\item Derive the Taylor series of $f(x)$ at $c = 0$. | |
\begin{eqnarray*} | |
f(x) &=& \sum_{k=0}^{\infty}\frac{(-1)^{(k-1)}}{k}x^k | |
\end{eqnarray*} | |
\item Derive the error term $E_n$ of a truncated Taylor series with $n + 1$ | |
terms. | |
\begin{eqnarray*} | |
E_n &=& \frac{(-1)^n}{n(1+\xi)^{n+1}}x^{n+1} | |
\end{eqnarray*} | |
\item Show that the Taylor series represents the function $f$ for $x\in[0,1]$. | |
\begin{eqnarray*} | |
\lim_{n \to \infty} E_n &=& \frac{(-1)^n}{n(1+\xi)^{n+1}}x^{n+1} \\ | |
&.& \\ | |
&.& \\ | |
&.& \\ | |
&=& 0 \\ | |
\end{eqnarray*} | |
\item Which are the two types of errors that influence the precision when | |
computing $f(x)$ using the truncated Taylor series? | |
\begin{itemize} | |
\item proximity of x to c | |
\item truncation error | |
\end{itemize} | |
\end{enumerate} | |
\subsubsection{Problem 1: Taylor series - Midterm 1 2009} | |
Let $f(x) = (1 + x)^\alpha$ with $\alpha \in R$ | |
\begin{enumerate} | |
\item Develop the Taylor series of $f$ at 0. | |
\item Show that $[-1,1]$ belongs to the range of convergence of the Taylor | |
series. | |
\item Derive an approximation of the value $\sqrt{2}$ using a linear (i.e. | |
first-order) Taylor approximation. | |
\end{enumerate} | |
\subsection{Number Representations} | |
\section{Linear Equation Systems} | |
\subsection{Naive Gaussian Elimination} | |
\subsection{Gaussian Elimination with Scaled Partial Pivoting} | |
\subsubsection{Problem Q.2: Gaussian Elimination - Quiz 2011} | |
Given the two linear equations | |
\begin{eqnarray*} | |
100x_1+ 99x_2 &=& 199 \\ | |
x_1+ 99x_2 &=& 100 \\ | |
\end{eqnarray*} | |
with two unknowns $x1$ and $x2$. | |
\begin{enumerate} | |
\item Considering a system with base 10 and three digits precision (for the | |
normalized mantissa), use naive Gaussian elimination to compute the | |
solution for $x1$ and $x2$. | |
\item Compute the residual error. | |
\begin{eqnarray*} | |
r &=& Ax' - b \\ | |
\end{eqnarray*} | |
\item How is the relative error defined? | |
\begin{eqnarray*} | |
\frac{\|r\|}{\|b\|} \\ | |
\end{eqnarray*} | |
\item Derive the vector of relative pivot elements and the permutation vector | |
used in Gaussian elimination with scaled partial pivoting. | |
\begin{eqnarray*} | |
\left(\left|\frac{a_{l_i,1}}{s_{l_i}}\right|\right)_{i=1}^{2} &=& \left(\frac{100}{100}, \frac{1}{99}\right) \\ | |
l &=& (1, 2) | |
\end{eqnarray*} | |
\end{enumerate} | |
\subsection{Banded Systems} | |
\subsection{LU Decomposition} | |
\subsubsection{Problem Q.2: LU Decomposition - Quiz 2010} | |
Given matrix | |
$ | |
A = | |
\begin{pmatrix} | |
1 & 0 & 4 \\ | |
2 & 1 & 10 \\ | |
-1 & 5 & 4 \\ | |
\end{pmatrix} | |
$ | |
and vector | |
$ | |
b = | |
\begin{pmatrix} | |
1 \\ | |
2 \\ | |
3 \\ | |
\end{pmatrix} | |
$ | |
. | |
\begin{enumerate} | |
\item Can we use Gaussian elimination with scaled partial pivoting to solve | |
$Ax = b$ for $x$? Explain! | |
\begin{eqnarray*} | |
det(A) = -2 | |
\end{eqnarray*} | |
A is not singular, therefor a unique solution exists with scaled partial | |
pivoting. | |
\item Compute the LU decomposition of matrix A using Gaussian elimination with | |
scaled partial pivoting. | |
\begin{eqnarray*} | |
\begin{pmatrix} | |
1 & 0 & 4 \\ | |
2 & 1 & 10 \\ | |
-1 & 5 & 4 \\ | |
\end{pmatrix} &=& | |
\begin{pmatrix} | |
1 & 0 & 0 \\ | |
2 & 1 & 0 \\ | |
-1 & 5 & 1 \\ | |
\end{pmatrix} | |
\begin{pmatrix} | |
1 & 0 & 4 \\ | |
0 & 1 & 2 \\ | |
0 & 0 & -2 \\ | |
\end{pmatrix} | |
\end{eqnarray*} | |
\item Use the LU decomposition to solve $Ax = b$ for $x$. | |
\item For any matrix $A$ of size $n \times n$, give the time complexity for the | |
steps in parts (b) and | |
(c) with respect to $n$. | |
\begin{itemize} | |
\item part (b): $O(n^3)$ | |
\item part (c): $O(n^2)$ | |
\end{itemize} | |
\end{enumerate} | |
\subsection{Cholesky Decomposition} | |
\subsubsection{Problem Q.3: Decomposition - Quiz 2011} | |
Given the linear equation system | |
\begin{eqnarray*} | |
\begin{pmatrix} | |
1 & 2 & -1 \\ | |
2 & 2 & 4 \\ | |
-1 & 4 & 3 \\ | |
\end{pmatrix} | |
\begin{pmatrix} | |
x1 \\ | |
x2 \\ | |
x3 \\ | |
\end{pmatrix} | |
&=& | |
\begin{pmatrix} | |
1 \\ | |
3 \\ | |
16 \\ | |
\end{pmatrix} | |
\end{eqnarray*} | |
\begin{enumerate} | |
\item Compute the LU decomposition of the left-hand side. | |
\begin{eqnarray*} | |
\begin{pmatrix} | |
1 & 2 & -1 \\ | |
2 & 2 & 4 \\ | |
-1 & 4 & 3 \\ | |
\end{pmatrix} &=& | |
\begin{pmatrix} | |
1 & 0 & 0 \\ | |
2 & 1 & 0 \\ | |
-1 & -3 & 1 \\ | |
\end{pmatrix} | |
\begin{pmatrix} | |
1 & 2 & -1 \\ | |
0 & -2 & 6 \\ | |
0 & 0 & 20 \\ | |
\end{pmatrix} | |
\end{eqnarray*} | |
\item Solve the linear equation system from the LU decomposition using back | |
substitution twice. | |
\item What are, in general, the time complexities of the computations in (a) | |
and (b) with respect to the number of unknowns $n$? Briefly explain your | |
answers. | |
\begin{itemize} | |
\item part (a): $O(n^3)$ | |
\item part (b): $O(n^2)$ | |
\end{itemize} | |
\item Does the left-hand side also satisfy the conditions for a Cholesky | |
decomposition? Prove your answer. | |
\begin{itemize} | |
\item no, it is symmetric but not positive definite | |
\end{itemize} | |
\end{enumerate} | |
\subsubsection{Problem 2: Cholesky decomposition - Midterm 1 2009} | |
Given matrix | |
$ | |
A = | |
\begin{pmatrix} | |
1 & 3 & 0 \\ | |
3 & 13 & -2 \\ | |
0 & -2 & 26 \\ | |
\end{pmatrix} | |
$ | |
and vector | |
$ | |
b = | |
\begin{pmatrix} | |
0 \\ | |
2 \\ | |
6 \\ | |
\end{pmatrix} | |
$ | |
\begin{enumerate} | |
\item Compute the Cholesky decomposition of matrix A. | |
\begin{eqnarray*} | |
L &=& \begin{pmatrix} | |
1 & 0 & 0 \\ | |
3 & 2 & 0 \\ | |
0 & -1 & 5 \\ | |
\end{pmatrix} | |
\end{eqnarray*} | |
\item Is matrix A positive definite? Explain. | |
\begin{itemize} | |
\item it must have been because it was possible to calculate the Cholesky | |
decomposition | |
\end{itemize} | |
\item Solve $Ax = b$. | |
\end{enumerate} | |
\subsubsection{Problem 1: Linear Equation Systems - Final 2011} | |
Given the linear equation system | |
\begin{eqnarray*} | |
\begin{pmatrix} | |
1 & \alpha & -1 \\ | |
\alpha & 2 & \beta \\ | |
-1 4 3 \\ | |
\end{pmatrix} | |
\begin{pmatrix} | |
x_1 \\ | |
x_2 \\ | |
x_3 \\ | |
\end{pmatrix} &=& | |
\begin{pmatrix} | |
1 \\ | |
1 \\ | |
23 \\ | |
\end{pmatrix} | |
\end{eqnarray*} | |
\begin{enumerate} | |
\item Compute the LU decomposition of the left-hand side for $\alpha = 1$ and | |
$\beta = 4$. | |
\item Solve the linear equation system from the LU decomposition using back | |
substitution twice. | |
\item For what values of $\alpha$ and $\beta$ does the left-hand side satisfy | |
the conditions for a Cholesky decomposition | |
\end{enumerate} | |
\subsection{Iterative Solutions} | |
\subsubsection{Problem Q.3: Jacobi iteration - Quiz 2010} | |
Given the system of linear equations | |
\begin{eqnarray*} | |
5x_1+ 2x_2 - 2x_3 &=& 10 \\ | |
x_1+ 6x_2 - 2x_3 &=& -6 \\ | |
x_2+ 4x_3 &=& 8 \\ | |
\end{eqnarray*} | |
\begin{enumerate} | |
\item For what starting points can we apply the Jacobi iteration? Explain your | |
answer. | |
\item Given starting point $x_0=0$, execute the first two steps of a Jacobi | |
iteration, i.e. compute $x_1$ and $x_2$. | |
\item Show that the Jacobi iteration for solving linear equation systems is | |
consistent. | |
\end{enumerate} | |
\subsubsection{Problem Q.4: Iterative solutions - Quiz 2011} | |
Given the system of linear equations | |
\begin{eqnarray*} | |
x_1+ 2x_2+ 3x_3 &=& 2 \\ | |
3x_1+ x_2+ 4x_3 &=& -4 \\ | |
4x_1+ 3x_2+ 9x_3 &=& 0 \\ | |
\end{eqnarray*} | |
\begin{enumerate} | |
\item Explain briefly the approach of iterative solutions for linear equation | |
systems. | |
\begin{itemize} | |
\item generating a sequence $x_0,x_1,x_2,...$ of approximate solution vectors | |
by $Qx_{k+1}=(Q-A)x_k+b$ for $k=1,2,...$ | |
\end{itemize} | |
\item Show that this iterative approach is consistent. | |
\begin{eqnarray*} | |
Qx^* &=& (Q-A)x^*+b \\ | |
Qx^* &=& Qx^*-Ax^*+b \\ | |
Ax^* &=& b \\ | |
\end{eqnarray*} | |
\item What are the necessary properties of the introduced matrix $Q$ and | |
starting point $x_0$? | |
\begin{itemize} | |
\item Q is non-singular | |
\end{itemize} | |
\item Given starting point $x_0=0$, execute two steps of a Jacobi iteration. | |
\begin{eqnarray*} | |
x_{k+1} &=& (I_n - Q^{-1}A)x_k + Q^{-1}b \\ | |
Q &=& \begin{pmatrix} | |
1 & 0 & 0 \\ | |
0 & 1 & 0 \\ | |
0 & 0 & 9 \\ | |
\end{pmatrix} \\ | |
Q^{-1} &=& \begin{pmatrix} | |
1 & 0 & 0 \\ | |
0 & 1 & 0 \\ | |
0 & 0 & \frac{1}{9} \\ | |
\end{pmatrix} \\ | |
\end{eqnarray*} | |
\end{enumerate} | |
\subsubsection{Problem 3: Iterative methods for linear equation systems - Midterm 1 2009} | |
Consider an iterative method for solving $Ax = b$ of the form | |
$x_{k+1}=x_k+Br_k$, where $r_k= b - Ax_k$. | |
\begin{enumerate} | |
\item Show that the method is consistent for any non-singular matrix B. | |
\begin{eqnarray*} | |
x_{k+1} &=& x_k+ Br_k \\ | |
r_k &=& b - Ax_k \\ | |
x^* &=& x^* + B(b - Ax^*) \\ | |
Bb &=& BAx^* \\ | |
b &=& Ax^* \\ | |
\end{eqnarray*} | |
\item Let B be the identity matrix. When does this method converge? Proof your | |
statement. | |
\end{enumerate} | |
\subsubsection{Problem 1: Linear Equation Systems - Final 2010} | |
Given the system of linear equations | |
\begin{eqnarray*} | |
x_1+ 2x_2+ 3x_3 &=& 2 \\ | |
3x_1+ x_2+ 4x_3 &=& -4 \\ | |
4x_1+ 3x_2+ 9x_3 &=& 0 \\ | |
\end{eqnarray*} | |
\begin{enumerate} | |
\item Explain the approach of iterative solutions for linear equations systems. | |
What are the necessary properties of the introduced matrix $Q$ and starting | |
point $x_0$? | |
\begin{itemize} | |
\item generating a sequence $x_0,x_1,x_2,...$ of approximate solution vectors | |
by $Qx_{k+1}=(Q-A)x_k+b$ for $k=1,2,...$ | |
\item Q is non-singular | |
\end{itemize} | |
\item Show that this iterative approach is consistent. | |
\begin{eqnarray*} | |
Qx^* &=& (Q-A)x^*+b \\ | |
Qx^* &=& Qx^*-Ax^*+b \\ | |
Ax^* &=& b \\ | |
\end{eqnarray*} | |
\item Given starting point $x_0=0$, execute two steps of a Jacobi iteration. | |
\begin{eqnarray*} | |
x_{k+1} &=& (I_n - Q^{-1}A)x_k + Q^{-1}b \\ | |
Q &=& \begin{pmatrix} | |
1 & 0 & 0 \\ | |
0 & 1 & 0 \\ | |
0 & 0 & 9 \\ | |
\end{pmatrix} \\ | |
Q^{-1} &=& \begin{pmatrix} | |
1 & 0 & 0 \\ | |
0 & 1 & 0 \\ | |
0 & 0 & \frac{1}{9} \\ | |
\end{pmatrix} \\ | |
\end{eqnarray*} | |
\end{enumerate} | |
\subsection{Summary} | |
\section{Non-linear Equations} | |
\subsection{Bisection Method} | |
\subsection{Newton's Method} | |
\subsubsection{Problem Q.4: Non-linear Equations - Quiz 2010} | |
\begin{enumerate} | |
\item Derive the iteration formula for Newton's method using Taylor series. | |
Explain all steps. | |
\begin{eqnarray*} | |
t(x) &=& f(x_0) + f'(x_0)(x-x_0) \\ | |
f(x) &=& f(c) + f'(c)(x-c) + O(|x-c|^2) \\ | |
f(x) &=& t(x) \\ | |
\end{eqnarray*} | |
\begin{itemize} | |
\item the tangent t is a linear Taylor approximation of function f at c | |
\item $t(x)$ is a good approximation of $f(x)$ in a region close to $x_0$ | |
\item with the formula for the root of the tangent: | |
\end{itemize} | |
\begin{eqnarray*} | |
x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)} \\ | |
\end{eqnarray*} | |
\item Derive the iteration formula for the Secant method using part (a). | |
Explain all steps. | |
\begin{eqnarray*} | |
f'(x) &=& \lim_{h \to 0} \frac{f(x+h)-f(x)}{h} \\ | |
f'(x) &\approx& \frac{f(x+h)-f(x)}{h} \\ | |
f'(x_n) &\approx& \frac{f(x_{n-1})-f(x_n)}{x_{n-1}-x_n} \\ | |
x_{n+1} &=& x_n - \frac{x_{n-1}-x_n}{f(x_{n-1})-f(x_n)}f(x_n) \\ | |
\end{eqnarray*} | |
\item Let $f(x) = \sqrt{\|x\|}$. Apply one iteration step of Newton's method | |
starting with $x_0=1$. | |
\begin{eqnarray*} | |
x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)} \\ | |
\end{eqnarray*} | |
\item According to the theorem presented in class, what convergence rate can we | |
deduce for part (c)? Explain your answer. Caution: Trick question! | |
\end{enumerate} | |
\subsubsection{Problem 4: Newton's method - Midterm 1 2009} | |
Given function $f(x) = x^2 - 1$ and starting point $x_0= 2$ | |
\begin{enumerate} | |
\item Apply two steps of Newton's method to find approximations of the root of | |
$f(x)$. | |
\begin{eqnarray*} | |
x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)} \\ | |
\end{eqnarray*} | |
\item Use the convergence theorem to proof that Newton's method will converge | |
for this problem. | |
\end{enumerate} | |
\subsection{Secant Method} | |
\subsubsection{Problem Q.5: Non-linear Equations - Quiz 2011} | |
\begin{enumerate} | |
\item Derive the iteration formula for Newton's method. Explain all steps. | |
Hint: You may derive it geometrically or analytically. For the geometric | |
approach, you need to derive the formula for the tangent. For the | |
analytic approach, you may start with the Taylor series. | |
\item Starting from (a), derive the iteration formula of the Secant method. | |
Explain all steps. | |
\item Apply one iteration step of the Secant method starting with $x_0=2$ and | |
$x_1=1$ to determine $\sqrt{7}$. Hint: To determine the value of the | |
square root of a real, positive number $R > 0$, we can solve the | |
nonlinear equation $f(x) = x^2 - R$. | |
\end{enumerate} | |
\subsubsection{Problem 2: Nonlinear Equations - Final 2010} | |
\begin{enumerate} | |
\item Derive the iteration formula for the Secant method starting with Taylor | |
series. Explain all steps. | |
\item Apply two iteration steps of the Secant method starting with $x_0=2$ and | |
$x_1= 1$ to determine $\sqrt{7}$. | |
\textit{Hint: }To determine the value of the square root of a real, | |
positive number $R > 0$, we can solve the nonlinear equation $f(x)=x^2-R$. | |
\end{enumerate} | |
\subsubsection{Problem 2: Nonlinear Equations - Final 2011} | |
\begin{enumerate} | |
\item Derive the iteration formula for the Secant method starting with Taylor | |
series. | |
\item Apply two iteration steps of the Secant method starting with $x_0=2$ and | |
$x_1=1$ to determine the roots of function $f(x)=x^2-4x+1$. | |
\end{enumerate} | |
\subsection{Convergence} | |
\subsection{Summary} | |
\subsection{System of Non-linear Equations} | |
\section{Interpolation \& Approximation} | |
\subsection{Interpolation Problem} | |
\subsection{Polynomial Interpolation} | |
\subsection{Lagrange Interpolation} | |
\subsubsection{Problem Q.5: Polynomial Interpolation - Quiz 2010} | |
Given points $p_i$ at knots $u_i$, $i = 0,1,2$, by | |
\begin{center} | |
\begin{tabular}{|c|c|c|c|} | |
\hline | |
$u_i$ & -2 & 0 & 1 \\ | |
\hline | |
$p_i$ & 1 & 0 & 2 \\ | |
\hline | |
\end{tabular} | |
\end{center} | |
\begin{enumerate} | |
\item Derive the collocation matrix for polynomial interpolation with respect | |
to the monomial basis for the given interpolation problem. | |
\begin{eqnarray*} | |
\Phi &=& \begin{pmatrix} | |
1 & -2 & 4 \\ | |
1 & 0 & 0 \\ | |
1 & 1 & 1 \\ | |
\end{pmatrix} | |
\end{eqnarray*} | |
\item Draw the Lagrange polynomials for the given knot sequence. What | |
conditions do the Lagrange polynomials have to fulfill at the knots? | |
\begin{eqnarray*} | |
\mathcal{L}_i^n(u_k) &=& \left\{ | |
\begin{array}{l l} | |
1 & \quad \text{, if $i=k$}\\ | |
0 & \quad \text{, otherwise}\\ | |
\end{array}\right. \\ | |
\end{eqnarray*} | |
\item In a general set-up, starting with the conditions at the knots $u_i$, | |
$i=0,...,n$, derive the explicit form of the Lagrange polynomials. | |
Explain your answer! | |
\begin{eqnarray*} | |
\mathcal{L}_i^n(u) &=& \frac{\prod_{j=0, i \neq j}^n(u-u_j)}{\prod_{j=0, i \neq j}^n(u_i-u_j)} \\ | |
\end{eqnarray*} | |
\item Starting from the explicit form, derive a recursive definition of | |
Lagrange polynomials. | |
\textit{Hint:} You can assume the partition-of-unity property. | |
\begin{eqnarray*} | |
\sum_{i=0}^n\mathcal{L}_i^n(u) &\equiv& 1 \\ | |
\mathcal{L}_0^0(u) &\equiv& 1 \\ | |
\mathcal{L}_i^k(u) &=& \left\{ | |
\begin{array}{l l} | |
\mathcal{L}_i^{k-1}(u)\alpha_{ik} & \quad \text{for $i=0,...,k-1$} \\ | |
1 - \sum_{i=0}^{k-1}\mathcal{L}_i^k(u) & \quad \text{for $i=k$} \\ | |
\end{array}\right. \\ | |
\alpha_{ik} = \frac{u-u_k}{u_i-u_k} | |
\end{eqnarray*} | |
\item Apply Aitken's algorithm to compute an interpolated function value at -1. | |
\textit{Hint:} The coefficients $\alpha_{ik}$ are the ones from the | |
recursion. | |
\end{enumerate} | |
\subsubsection{Problem Q.6: Polynomial Interpolation - Quiz 2011} | |
Given points $p_i$ at knots $u_i$, $i = 0,1,2$, by | |
\begin{center} | |
\begin{tabular}{|c|c|c|c|} | |
\hline | |
$u_i$ & 0 & 2 & 4 \\ | |
\hline | |
$p_i$ & 0 & 8 & 15 \\ | |
\hline | |
\end{tabular} | |
\end{center} | |
\begin{enumerate} | |
\item Derive the collocation matrix for the given interpolation problem using a | |
monomial basis. | |
\begin{eqnarray*} | |
\Phi &=& \begin{pmatrix} | |
1 & 0 & 0 \\ | |
1 & 2 & 4 \\ | |
1 & 4 & 16 \\ | |
\end{pmatrix} | |
\end{eqnarray*} | |
\item Derive the collocation matrix for the given interpolation problem using | |
the Lagrange polynomials as a basis. | |
\item Draw all Lagrange polynomials for the given interpolation problem. | |
\item Derive the Lagrange polynomials for the given interpolation problem from | |
their properties at the knots. (Remark: no monomial form required.) | |
\item Derive the interpolating polynomial for the given interpolation problem | |
with respect to the Lagrangian interpolation scheme. (Remark: no monomial | |
form required.) | |
\begin{eqnarray*} | |
p(u) &=& p_0\mathcal{L}_0^{2}(u) + p_1\mathcal{L}_1^{2}(u) + p_2\mathcal{L}_2^{2}(u) \\ | |
\end{eqnarray*} | |
\end{enumerate} | |
\subsubsection{Problem 2: Polynomial interpolation - Midterm 2 2009} | |
Given knots $u_i$ and points $p_i$, $i = 0,1,2$, by | |
\begin{center} | |
\begin{tabular}{|c|c|c|c|} | |
\hline | |
$u_i$ & 0 & 1 & 2 \\ | |
\hline | |
$p_i$ & 2 & 0 & 1 \\ | |
\hline | |
\end{tabular} | |
\end{center} | |
\begin{enumerate} | |
\item Use polynomial interpolation with respect to the monomial basis to | |
determine a polynomial $p(u)$ of minimal degree that interpolates the | |
points at the knots. | |
\begin{eqnarray*} | |
p(u) = \phi_0(u)x_0 + \phi_1(u)x_1 + \phi_2(u)x_2 \\ | |
\end{eqnarray*} | |
nodes are $x_0$, $x_1$, $x_2$ | |
\item Use Lagrange interpolation to determine a polynomial $p(u)$ of minimal | |
degree that interpolates the points at the knots. | |
\begin{eqnarray*} | |
p(u) &=& p_0\mathcal{L}_0^{2}(u) + p_1\mathcal{L}_1^{2}(u) + p_2\mathcal{L}_2^{2}(u) \\ | |
\end{eqnarray*} | |
nodes are $p_0$, $p_1$, $p_2$ | |
\end{enumerate} | |
\textit{Remark. For both parts, state explicitly what the nodes and the | |
polynomials are.} | |
\subsection{Newton Interpolation} | |
\subsubsection{Problem Q.7: Lagrange and Newton Interpolation - Quiz 2011} | |
Given points $p_i$ at knots $u_i$, $i = 0,1,2$, by | |
\begin{center} | |
\begin{tabular}{|c|c|c|c|} | |
\hline | |
$u_i$ & 0 & 1 & 3 \\ | |
\hline | |
$p_i$ & 2 & 1 & 1 \\ | |
\hline | |
\end{tabular} | |
\end{center} | |
\begin{enumerate} | |
\item Derive, in general, a recursive form of all Lagrange polynomials | |
$L^n_i(u)$, $i =0,...,n$. \textit{Hint:} Use the property of partition of | |
unity. | |
\item Apply to the given interpolation problem Aitken's algorithm to compute an | |
interpolated value for $u = 2$. \textit{Hint:} The $\alpha_{in}$ in | |
Aitken's algorithm are those from the recursive formula in (a). | |
\item Draw all Newton polynomials $P_i(u)$ for the given interpolation problem. | |
\item Derive the interpolating function for the first point only using Newton | |
interpolation. | |
\item Derive the interpolating function for the first two points using Newton | |
interpolation. \textit{Hint:} Use the result of part (d). | |
\end{enumerate} | |
\subsubsection{Problem 3: Interpolation - Final 2010} | |
Given points $p_i$ at knots $u_i$, $i = 0,1,2$, by | |
\begin{center} | |
\begin{tabular}{|c|c|c|c|} | |
\hline | |
$u_i$ & -1 & 1 & 2 \\ | |
\hline | |
$p_i$ & 5 & -3 & -4 \\ | |
\hline | |
\end{tabular} | |
\end{center} | |
\begin{enumerate} | |
\item Derive the collocation matrix for polynomial interpolation with respect | |
to the monomial basis for the given interpolation problem. | |
\item Starting from the collocation matrix in (a), derive the interpolating | |
polynomial $p(x)$ using Gaussian elimination with scaled partial pivoting. | |
Give the vectors of all relative pivot elements. | |
\item Let $q(x)$ be the polynomial that solves the interpolation problems with | |
respect to the Newton interpolation scheme. What is $q(0.5)$? | |
\begin{eqnarray} | |
P_i(u) = (u-u_0)...(u-u_{i-1}) | |
\end{eqnarray} | |
\item Sketch the first Newton polynomial and the first Lagrange polynomial for | |
the given knot sequence. | |
\end{enumerate} | |
\subsection{Error Analysis} | |
\subsection{Bézier Curves} | |
\subsubsection{Problem Q.8: Bézier curves - Final 2010} | |
Given Bézier points $p_i$ at knots $u_i$, $i = 0,1,2$, by | |
\begin{center} | |
\begin{tabular}{|c|c|c|c|} | |
\hline | |
$u_i$ & 0 & 1 & 3 \\ | |
\hline | |
$p_i$ & 2 & 1 & 1 \\ | |
\hline | |
\end{tabular} | |
\end{center} | |
\begin{enumerate} | |
\item Prove, in general, the partition-of-unity property for the basis of | |
Bernstein polynomials $B^n_i(u)$. | |
\begin{eqnarray*} | |
1 &=& 1^n = (u+(1-u))^n = \sum_{i=0}^n\binom{n}{i}u^i(1-u)^{n-i} \\ | |
B_i^n(u) &=& \binom{n}{i}u^i(1-u)^{n-i} \\ | |
\end{eqnarray*} | |
\item Derive and draw all Bernstein polynomials $B^2_i(u)$ of degree 2 over | |
interval $[0,1]$. The drawing needs to be precise at the points $u = 0$, | |
$u = \frac{1}{2}$, and $u = 1$. | |
\begin{eqnarray*} | |
B_0^2(u) &=& (1-u)^2 \\ | |
B_1^2(u) &=& 2u(1-u) \\ | |
B_2^2(u) &=& u^2 \\ | |
\end{eqnarray*} | |
\item Derive, in general, a recursive form for Bernstein polynomials $B^n_i(u)$. | |
\begin{eqnarray*} | |
B_0^0(u) &\equiv& 1 \\ | |
B_i^{n+1}(u) &=& uB_{i-1}^n(u) + (1-u)B_i^n(u) \\ | |
\end{eqnarray*} | |
\item Apply the De Casteljau algorithm geometrically to evaluate the Bézier | |
curve $b(u)$ for the given Bézier polygon at $u = 2$. Be aware that the | |
knots are not from the interval $[0,1]$. The geometric construction needs | |
to be precise, i.e., give ratios. | |
\begin{eqnarray*} | |
t = \frac{u-a}{b-a} | |
\end{eqnarray*} | |
\item Draw the respective Bézier curve $b(u)$. | |
\end{enumerate} | |
\subsubsection{Problem 1: System of nonlinear equations \& Bernstein polynomials - Midterm 2 2009} | |
\begin{enumerate} | |
\item Execute two iterations of the generalized Newton's method with starting | |
point $(0,0)$ to solve the nonlinear equation system | |
\begin{eqnarray*} | |
x^2+ 2y &=& 3 \\ | |
x + y^2 &=& 2 \\ | |
\end{eqnarray*} | |
\begin{eqnarray*} | |
f'(x) &=& \begin{pmatrix} | |
\frac{d}{dx}f_1(x,y) & \frac{d}{dy}f_1(x,y) \\ | |
\frac{d}{dx}f_2(x,y) & \frac{d}{dy}f_2(x,y) \\ | |
\end{pmatrix} \\ | |
x_{n+1} &=& x_n - f'(x_n)^{-1}f(x_n) \\ | |
f'(x_n)c &=& -f(x_n) \\ | |
x_{n+1} &=& x_n + c \\ | |
\end{eqnarray*} | |
\item Show that the Bernstein polynomials have the property of partition of | |
unity, i.e., show that $\sum^n_{i=0}B^n_i(u) = 1$ for all $u \in [0,1]$. | |
\end{enumerate} | |
\subsection{Piecewise Hermite Interpolation} | |
\subsubsection{Problem Q.6: Hermite and Bézier interpolation - Quiz 2010} | |
\begin{enumerate} | |
\item Given interval $[a,b]$. | |
\begin{enumerate} | |
\item What does the cubic Hermite interpolant interpolate? | |
\item What are the nodes of the cubic Hermite interpolation scheme? | |
\item What are the interpolation conditions for cubic Hermite interpolation? | |
\item Sketch all cubic Hermite polynomials. | |
\end{enumerate} | |
\item Show that partition of unity holds for Bernstein polynomials of any degree $n$. | |
\item Compute the Bernstein polynomials of degree $n = 3$ and sketch them. | |
\item Show that Bézier curves of degree $n = 3$ are endpoint interpolating. | |
\end{enumerate} | |
\subsubsection{Problem Q.7: Piecewise cubic Hermite interpolation - Quiz 2010} | |
Let $[0,1]$ be one interval for piecewise cubic Hermite interpolation with | |
interpolation conditions $s(0)=1$, $s'(0)=1$, $s(1)=1$, and $s'(1)=0$, where | |
$s(u)$ denotes the interpolating Bézier curve. | |
\begin{enumerate} | |
\item Express the derivative of a cubic Bernstein polynomial $B^3_i(u)$ in | |
terms of quadratic Bernstein polynomials. | |
\item Use part (a) to show that the derivative of a cubic Bézier curve over | |
interval $[0,1]$ is a quadratic Bézier curve, i.e., show that | |
\begin{eqnarray*} | |
\frac{d}{du}\left(\sum^3_{i=0}b_iB^3_i(u)\right) &=& 3\sum^2_{i=0}\Delta b_iB^2_i(u) \\ | |
\end{eqnarray*} | |
with $\Delta b_i$ denoting the first forward differences. | |
\item Use part (b) to derive Bézier point $b_1$ for the given piecewise cubic | |
Hermite interpolation problem. Show all computation steps! | |
\item Use part (c) to draw the Bézier polygon for the Bézier curve $s(u)$. | |
\item Use part (d) and geometrically(!) apply the de Casteljau algorithm for | |
$u = \frac{1}{2}$ and sketch the curve $s(u)$ over interval $[0,1]$. | |
\end{enumerate} | |
\subsubsection{Problem Q.9: Piecewise Hermite Interpolation - Quiz 2011} | |
Given points piand derivatives diat knots $u_i$, $i = 0,1,2$, by | |
\begin{center} | |
\begin{tabular}{|c|c|c|c|} | |
\hline | |
$u_i$ & 0 & 1 & 3 \\ | |
\hline | |
$p_i$ & 2 & 1 & 1 \\ | |
\hline | |
$d_i$ & -1 & 0 & 1 \\ | |
\hline | |
\end{tabular} | |
\end{center} | |
\begin{enumerate} | |
\item Using piecewise Hermite interpolation, polynomials of what degree are to | |
be used? | |
\begin{eqnarray*} | |
n &=& 2k + 1 + m = 4 | |
\end{eqnarray*} | |
\item What conditions do these polynomials need to fulfill for the subinterval | |
$[0,1]$? | |
\item Using Bézier curves for those polynomials, derive the Bézier points for | |
the subinterval $[0,1]$. You can take for granted that | |
$\frac{d}{du}\sum^n_{i=0}b_iB^n_i(u) = \frac{n}{b-a}\sum^{n-1}_{i=0}\Delta b_iB^{n-1}_i(u)$ | |
for interval $[a,b]$. Explain all steps. | |
\item Generalize the findings in (c) to interval $[1,3]$ and draw the overall | |
Bézier polygon for the piecewise Hermite scheme over the entire interval | |
$[0,3]$. | |
\item Give the resulting interpolating curve in Bézier representation and | |
sketch it. The sketch needs to be qualitatively correct. | |
\end{enumerate} | |
\subsubsection{Problem 3: Piecewise Hermite interpolation using Bézier curves - Midterm 2 2009} | |
Given knots $u_0= 0$, $u_1= 1$, and $u_2= 4$, points $p_0= 1$, $p_1= 2$, and | |
$p_2= 2$ and derivatives $d_0= 1$, $d_1= 1$, and $d_2= -1$. | |
\begin{enumerate} | |
\item Use piecewise Hermite interpolation to determine the overall Bézier | |
polygon of the piecewise polynomial curve that interpolates points $p_i$ | |
and derivatives $d_i$ at knots $u_i$ (for $i = 0,1,2$). | |
\item Compute the monomial form of the Bézier curve $b(u)$ of the first | |
interval $[u0,u1]$ and evaluate it at $u = 0.5$ in order to draw the | |
Bézier curve $b(u)$ and the respective Bézier polygon. | |
\end{enumerate} | |
\subsection{Splines} | |
\subsubsection{Problem Q.8: Splines and Least Squares Approximation - Quiz 2010} | |
\begin{enumerate} | |
\item What is a spline? | |
\item Sketch a quadratic B-spline over an equidistant knot sequence. State what | |
the values at the knots are. Hint: You obtain the sketch by combining | |
B-splines of degree 1, which you can get by combining B-splines of | |
degree 0. | |
\begin{eqnarray*} | |
N_i^0(u) &=& \left\{ | |
\begin{array}{l l} | |
1 & \quad \text{, if $u \in [u_i,u_{i+1}]$}\\ | |
0 & \quad \text{, otherwise}\\ | |
\end{array}\right. \\ | |
N_i^n(u) &=& \alpha_i^{n-1}N_i^{n-1}(u) + (1 - \alpha_{i+1}^{n-1})N_{i+1}^{n-1}(u) \\ | |
\alpha_i^{n-1} &=& \frac{u-u_i}{u_{i+n}-u_i} | |
\end{eqnarray*} | |
\item Given the conditions $f(-1) = 0$, $f(0) = 0$, $f(1) = 1$, and $f(2) = 1$. | |
Assuming that $f$ is a linear function $f(x) = ax + b$, give an estimate | |
for the error for the best fit in a least squares approximation. | |
\item By minimizing the error functional from part (b), derive the normal | |
equations. | |
\item Solve the normal equations of part (c) to compute the best fitting linear | |
function f. | |
\item Compute the approximation error for the result of part (d) according to | |
part (b). | |
\end{enumerate} | |
\subsubsection{Problem Q.10: Splines - Quiz 2011} | |
\begin{enumerate} | |
\item How is a spline $s(u)$ defined, in general? | |
\begin{eqnarray*} | |
s(u) &=& \sum_ic_iN_i^m(u) | |
\end{eqnarray*} | |
A function $s(u)$ with domain $[a,b]$ is called a spline of degree k if | |
$s(u) \in C^{k-1}[a-b]$ and there are knots $a=u_0 < u_1 < ... < u_m = b$ | |
such that $s$ is a polynomial of degree k on each sub-interval $[u_i,u_{i+1}]$ | |
for $i=0,...,m-1$. | |
\item What is the additional property that defines a B-spline $N^n_i(u)$? | |
\item Given B-splines $N^0_0(u)$ and $N^0_1(u)$ of degree 0 with support $[0,1]$ | |
and $[1,2]$, respectively, derive a recursive formula for $N^1_0(u)$ of | |
degree 1. | |
\item Given the equidistant knot sequence $(0,1,2,3)$. Starting with the | |
B-splines $N^0_0(u)$, $N^0_1(u)$, and $N^0_2(u)$, use part (c) to draw | |
B-splines $N^1_0(u)$, $N^1_1(u)$, and $N^2_0(u)$. | |
\item Use part (d) to derive the precise values of $N^2_0(u)$ at the knots of | |
knot sequence $(0,1,2,3)$. | |
\item Given an interpolation problem with knot sequence $(1,2,3)$. Use part (e) | |
to derive the collocation matrix for a quadratic spline | |
$s(u) = \sum^2_{i=0}c_iN^2_i(u)$ in B-spline representation. | |
\end{enumerate} | |
\subsubsection{Problem 4: Splines - Midterm 2 2009} | |
Given equidistant knots $u_j= j$. | |
\begin{enumerate} | |
\item Use the de Boor algorithm to estimate values $N^2_i(u_j)$ for all | |
quadratic B-splines $N^2_i(u)$ and all knots $u_j$. | |
\item Draw the spline $s(u) = \sum^3_{i=0}c_iN^2_i(u)$ in B-spline | |
representation with $(c0,c1,c2,c3) = (1,3,1,2)$ over the given knot | |
sequence. | |
\end{enumerate} | |
\textit{Remark. The values at the knots should be precise using part (a). | |
Otherwise, a qualitative drawing that exhibits the properties of the curve | |
suffices.} | |
\subsubsection{Problem 3: Interpolation - Final 2011} | |
Given points $p_i$ at knots $u_i$, $i=0,1,2,3$, by | |
\begin{center} | |
\begin{tabular}{|c|c|c|c|c|} | |
\hline | |
$u_i$ & 2 & 3 & 4 & 5 \\ | |
\hline | |
$p_i$ & 22 & $-\pi$ & 309 & 17 \\ | |
\hline | |
\end{tabular} | |
\end{center} | |
\begin{enumerate} | |
\item Derive the linear equation system (left-hand side is collocation matrix) | |
for polynomial interpolation with respect to the monomial basis for the | |
given interpolation problem. | |
\item Derive the recursive formula for B-spline $N_0^2(u)$ over knot sequence | |
$(0,1,2,3)$. Explain how you obtain it. | |
\item Assuming equidistant knots, estimate the function values of B-splines | |
$N_i^2(u)$ with support $(i, i+3)$ for all natural numbers $u$. | |
\item Derive the linear equation system (left-hand side is collocation matrix) | |
for quadratic spline interpolation with respect to the derived B-spline | |
basis for the given interpolation problem. | |
\end{enumerate} | |
\subsubsection{Problem 8: Bonus Problem - Final 2010} | |
Sketch the quadratic B-splines $s_0(u)$, $s_1(u)$, and $s_2(u)$ over knot | |
sequence $0,0,1,2,3,...$ and state what the values at the knots are. | |
\subsection{Summary} | |
\subsection{Least Squares Approximation} | |
\subsubsection{Problem 4: Approximation - Final 2010} | |
Given the conditions $f(-1) = 5$, $f(0) = 1$, $f(1) = -1$, and $f(2) = -1$. | |
Let $f$ be a quadratic polynomial function of the form $f(x) = ax^2+ bx$ | |
with real numbers $a$ and $b$. | |
\begin{enumerate} | |
\item Give an error estimate for the best fit in a least squares approximation. | |
\begin{eqnarray*} | |
\phi(a,b) &=& \sum_{k=0}^2(ax_k + b - f(x_k))^2 \\ | |
\end{eqnarray*} | |
\item Starting from the error functional, derive the normal equations. | |
\begin{eqnarray*} | |
\phi(c_0,...,c_m) &=& \sum_{k=0}^n\left(\sum_{j=0}^mc_jg_j(x_k)-y_k\right)^2 \\ | |
\frac{d}{dc_i}\phi(c_0,...,c_m) &=& 0 \quad \text{for $i=0,...,m$} \\ | |
G^TGc &=& G^Ty \\ | |
\end{eqnarray*} | |
\item Solve the normal equations to derive the best fitting function $f$. | |
\item Could we have used Cholesky decomposition to solve the system? Why? | |
\end{enumerate} | |
\section{Differentiation \& Integration} | |
\subsection{Forward Differencing} | |
\subsection{Central Differencing} | |
\subsection{Richardson Extrapolation} | |
\subsubsection{Problem Q.11: Differentiation - Quiz 2011} | |
Given function $f(x) = x^2$ and step size $h = 0.2$. | |
\begin{enumerate} | |
\item Derive the estimate and the error term for differentiation using | |
backward differencing. | |
\begin{eqnarray*} | |
f(x-h) &=& f(x) -hf'(x) + \frac{1}{2}h^2f''(\xi) \\ | |
f'(x) &=& \frac{f(x)-f(x-h)}{h} - \frac{1}{2}hf''{\xi} \\ | |
\end{eqnarray*} | |
\item Compute $f'(1)$ using the estimate from part (a). | |
\item Use the error term in part (a) to compute the error in part (b). | |
\item Using the estimate from part (a), apply Richardsen extrapolation to | |
derive an estimate with quadratic error term. | |
\begin{eqnarray*} | |
f'(x) &=& \frac{f(x)-f(x-h)}{h} - ah + ah^2 - ah^3 + ... \\ | |
\phi(h) &=& f'(x) + ah - ah^2 + ah^3 - ... \\ | |
\phi\left(\frac{h}{2}\right) &=& f'(x) + \frac{1}{2}ah - \frac{1}{4}ah^2 + \frac{1}{8}ah^3 - ... \\ | |
\phi(h) - 2\phi\left(\frac{h}{2}\right) &=& -f'(x) - \frac{1}{2}ah^2 + \frac{3}{4}ah^3 - ... \\ | |
f'(x) &=& 2\phi\left(\frac{h}{2}\right) - \phi(h) - \frac{1}{2}ah^2 + \frac{3}{4}ah^3 - ... \\ | |
\end{eqnarray*} | |
\item Compute $f'(1)$ using the estimate from part (d). | |
\item Why was the result in part (e) expected? | |
\end{enumerate} | |
\subsubsection{Problem 5: Differentiation - Final 2010} | |
Let $f \in C^2$ be a function. | |
\begin{enumerate} | |
\item Use Taylor series to derive an estimate $\phi(h)$ for $f'(x)$ according | |
to the forward differencing scheme. | |
\begin{eqnarray*} | |
f(x+h) &=& f(x) + hf'(x) + \frac{1}{2}h^2f''(\xi) \quad \text{with $\xi \in (x,x+h)$} \\ | |
f'(x) &=& \frac{f(x+h)-f(x)}{h}-\frac{1}{2}hf''(\xi) \\ | |
\end{eqnarray*} | |
\item Let $f(x) = x^2$. Compute an estimate for $f'(1)$ with $h = 0.1$ using | |
forward differencing. | |
\item Use Richardson extrapolation on $\phi(h)$ to find an estimate for $f'(x)$ | |
with approximation error $O(h^2)$. | |
\item Let $f(x) = x^2$. Compute an estimate for $f'(1)$ with $h = 0.1$ using | |
the new estimate. | |
\item Explain why the result in (d) was expected. | |
\end{enumerate} | |
\subsubsection{Problem 4: Differentiation - Final 2011} | |
\begin{enumerate} | |
\item Use Taylor series to derive the central differencing scheme for first | |
derivative computations and its asymptotic error. | |
\item Compute derivative $f'(0)$ for $f(x)=x^2-4x+1$ with $h=0.1$ using central | |
differencing. | |
\item Apply Richardson extrapolation to derive an estimate for first derivative | |
computations with improved asymptotic error and its asymptotic error. | |
\end{enumerate} | |
\subsection{Higher-order Derivatives} | |
\subsection{Integration} | |
\subsection{Upper and Lower Sums} | |
\subsection{Trapezoid Rule} | |
\subsubsection{Problem 5: Integration - Final 2011} | |
Given the integral $I = \int_0^4(x^2+2x+1)dx$. | |
\begin{enumerate} | |
\item Derive the trapezoid rule for estimating an integral $\int_a^bf(x)dx$ and | |
apply it to compute $I$. | |
\item Derive the recursive trapezoid rule for estimating an integral | |
$\int_a^bf(x)dx$ and apply two recursion steps to obtain two new | |
estimates for $I$. | |
\item Calculate the error of the last estimate you computed. Recall that the | |
error theorem says that | |
$|\int_a^bf(x)dx-T(f;P)|=|-\frac{1}{12}(b-a)h^2f''(\xi)|$ for a | |
partition P with $\xi \in (a,b)$. | |
\end{enumerate} | |
\subsection{Romberg algorithm} | |
\subsubsection{Problem Q.9: Integration - Quiz 2010} | |
Given the integral $I = \int^2_0x^2dx$. | |
\begin{enumerate} | |
\item Derive the trapezoid rule to compute an estimate $T_0$ for $I$. | |
\item Compute $T_0$. | |
\item Derive one recursion step of the recursive trapezoid rule to compute an | |
improved estimate $T_1$ for $I$. | |
\item Starting from $T_0$, compute $T_1$. | |
\item Use Richardsen extrapolation to derive a further improved estimate $R$ | |
from $T_0$ and $T_1$. \textit{Hint:} This is the Romberg algorithm. | |
\item Starting from $T_0$ and $T_1$, compute $R$. | |
\end{enumerate} | |
\subsubsection{Problem Q.12: Romberg algorithm - Quiz 2011} | |
Given the definite integral $I = \int^4_0 (x^2+ x + 1)dx$. | |
\begin{enumerate} | |
\item Use the trapezoid rule to compute an estimate for $I$. | |
\item Derive the recursive trapezoid rule (assuming equidistant partition). | |
\item Apply two recursion steps of the recursive trapezoid rule to compute two | |
new estimates for I from the estimate of part (a). | |
\item Apply one iteration of Richardsen extrapolation to the recursive | |
trapezoid rule to derive a scheme with quartic asymptotic error (second | |
row of Romberg algorithm). | |
\item Apply the scheme from part (d) to the estimates of parts (a) and (c) to | |
compute two new estimates for $I$. | |
\end{enumerate} | |
\subsubsection{Problem 6: Integration - Final 2010} | |
Given the integral $I = \in^4_0 x^2+ xdx$. | |
\begin{enumerate} | |
\item Starting with a derivation of the trapezoid rule (for a single interval), | |
derive the recursive trapezoid rule (for a sequence of intervals). | |
\begin{eqnarray*} | |
h &=& \frac{b-a}{2^m} \\ | |
T_0(f_i, P) &=& \frac{1}{2}(b-a)(f(a)+f(b)) \\ | |
T_m(f_i, P) &=& \frac{1}{2}T_{m-1}(f_i, P) + h\sum_{i=1}^{2^{m-1}}f(a+(2i-1)h) \\ | |
\end{eqnarray*} | |
\item Use the recursive trapezoid rule to compute integral I with step sizes | |
$h = 4$, $h = 2$, and $h = 1$. | |
\item Starting with the estimates from above as the first column of the Romberg | |
scheme, derive the estimate for the second column of the Romberg scheme | |
and its asymptotic error term. | |
\item Compute the second column of the Romberg scheme for integral $I$. | |
\begin{eqnarray*} | |
R_m^1 &=& R_m^0 + \frac{1}{3}(R_m^0-R_{m-1}^0) \\ | |
R_m^k &=& R_m^{k-1} + \frac{1}{4^k-1}(R_m^{k-1}-R_{m-1}^{k-1}) \\ | |
\end{eqnarray*} | |
\end{enumerate} | |
\subsection{Simpson's Scheme} | |
\subsection{Gaussian Quadrature} | |
\subsubsection{Problem Q.10: Gaussian Quadrature - Quiz 2010} | |
Given the integral $I = \int^1_{-1}f(x)dx$ and a knot sequence | |
$(x0,x1,x2)=(-1,0,1)$. | |
\begin{enumerate} | |
\item When approximating $f(x)$ with a polynomial $p(x)$, show that the optimal | |
weights $A_i$ for computing I can be obtained as the integral of Lagrange | |
polynomials. | |
\begin{eqnarray*} | |
\int_a^bf(x)dx &=& \int_a^bp(x)dx \\ | |
&=& \int_a^b\sum_{i=0}^nf(x_i)\mathcal{L}_i(x)dx \\ | |
&=& \sum_{i=0}^nf(x_i)\int_a^b\mathcal{L}_i(x)dx \\ | |
&=& \sum_{i=0}^nA_if(x_i) \\ | |
A_i &=& \int_a^b\mathcal{L}_i(x)dx \\ | |
\end{eqnarray*} | |
\item Using part (a), find the weight $A_0$. | |
\item What is an alternative approach for computing the weights? Describe the | |
ansatz. | |
\item According to the Gaussian quadrature theorem, describe how the Gaussian | |
nodes are computed? | |
\item In the lecture, we had derived that the Gaussian nodes for $I$ are | |
$(-\sqrt{\frac{3}{5}},0,\sqrt{\frac{3}{5}})$ and the respective weights | |
are $(\frac{5}{9},\frac{8}{9},\frac{5}{9})$. Compute $I$ for $f(x) = x^4$ | |
according to the Gaussian quadrature scheme. | |
\item What is the expected error for the computation in part (e)? Why? | |
\end{enumerate} | |
\subsubsection{Problem 6: Gaussian Quadrature - Final 2011} | |
The Legrende polynomials are given by $q_0(x)=1$, $q_1(x)=x$, and | |
$q_n(x) = \frac{2n-1}{n}xq_{n-1}(x)-\frac{n-1}{n}q_{n-2}(x)$ for $n \ge 2$. | |
\begin{enumerate} | |
\item Assuming a Gaussian quadrature scheme with two nodes, use the Legrendre | |
polynomials to compute the two Gaussian nodes for $\int_{-1}^1f(x)dx$. | |
\item Compute the optimal weights for the Gaussian nodes using the Lagrange | |
interpolation scheme. | |
\item Apply the derived scheme to compute $\int_{-1}^1(x^2+\frac{1}{6})dx$ and | |
$\int_0^2(x^2+\frac{1}{6})dx$. | |
\end{enumerate} | |
\subsection{Monte Carlo Integration} | |
\subsection{Summary (Integration)} | |
\section{Differential Equations} | |
\subsection{Ordinary Differential Equations} | |
\subsubsection{Problem Q.11: Ordinary Differential Equations - Quiz 2010} | |
Given the initial value problem $x(0) = 0$, $x'(t) = (x(t))^2+ t^2+ 1$ and step | |
size $h = 0.1$. | |
\begin{enumerate} | |
\item Derive, in general, the Euler scheme for solving an initial value problem | |
and its asymptotic error term. | |
\begin{eqnarray*} | |
x'(t) &=& f(x(t),t) \\ | |
x(t+h) &=& x(t) + hx'(t) \\ | |
x(t+h) &=& x(t) + hf(x(t), t) \\ | |
\end{eqnarray*} | |
\item Apply two Euler steps to the given initial value problem. | |
\item Explain the second-order Runge-Kutta scheme to solve an initial value | |
problem. | |
\item Apply one second-order Runge-Kutta step to the given initial value | |
problem. | |
\item Derive, in general, the second-order Taylor series scheme for solving an | |
initial value problem and its asymptotic error term. | |
\begin{eqnarray*} | |
x' &=& (x(t))^2 + t^2 + 1 \\ | |
x'' &=& 2x(t)x'(t) + 2t \\ | |
x(t+h) &=& x + hx' + \frac{1}{2}h^2x'' \\ | |
\end{eqnarray*} | |
\item Apply one second-order Taylor series step to the given initial value | |
problem. | |
\end{enumerate} | |
\subsubsection{Problem Q.13: Gaussian Quadrature and ODEs - Quiz 2011} | |
\begin{enumerate} | |
\item Given the integral $I = \int^1_{-1}f(x)dx$ and a knot sequence | |
$(x0,x1,x2) = (-1,0,1)$. When approximating $f(x)$ with a polynomial | |
$p(x)$, the optimal weights $A_i$ for computing I can be obtained as the | |
integral of Lagrange polynomials. What is an alternative approach for | |
computing the weights? Compute the weights $A_i$ according to the | |
alternative approach. | |
\item We had derived that the Gaussian nodes for $I$ are | |
$(-\sqrt{\frac{3}{5}},0,\sqrt{\frac{3}{5}})$and the respective weights are | |
$(\frac{5}{9},\frac{8}{9},\frac{5}{9})$. Compute $I$ for $f(x) = x^4$ | |
according to the Gaussian quadrature scheme. What is the expected error? | |
Why? | |
\item Derive, in general, the Euler scheme for solving an initial value problem | |
and its asymptotic error term. | |
\item Given the initial value problem $x(0) = 0$, $x'(t) = (x(t))^2+ t^2+ 1$ | |
and step size $h = 0.1$. Apply two Euler steps to the given initial value | |
problem. | |
\end{enumerate} | |
\subsubsection{Problem 7: Ordinary Differential Equations - Final 2010} | |
Given the initial value problem $x(0) = 2$, $x'(t) = -tx^2(t)$. | |
\begin{enumerate} | |
\item Derive the first- and second-order Taylor series schemes for solving an | |
initial value problem and its asymptotic error term. | |
\item Apply two steps of the first-order scheme with step size $h = 0.1$ to | |
the given initial value problem to compute $x(0.2)$. | |
\item Apply one steps of the second-order scheme with step size $h = 0.2$ to | |
the given initial value problem to compute $x(0.2)$. | |
\end{enumerate} | |
\subsection{Euler's Method} | |
\subsection{Taylor Series Method} | |
\subsection{Runge-Kutta Method} | |
\subsubsection{Problem 7: Ordinary Differential Equations - Final 2011} | |
\begin{enumerate} | |
\item Derive the second-order Taylor series schemes for solving an initial | |
value problem and its error term. | |
\item Apply one step of the second-order Taylor scheme with step size $h=0.1$ | |
to the initial value problem $x(0)=0$, $x'(t)=x^2(t)t+1$. | |
\item How does the error of the derived scheme compare to the error of the | |
second-order Runge-Kutta scheme. Explain. | |
\item Give the definitions of the terms ``stability'' and ``consistency'' of | |
iterative schemes. | |
\end{enumerate} | |
\subsection{Application Example} | |
\subsection{Partial Differential Equations} | |
\end{document} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment