diff --git a/Lectures_my/NumMet/2016/Lecture10/lecture10.tex b/Lectures_my/NumMet/2016/Lecture10/lecture10.tex index 0a7397b..2d4ac99 100644 --- a/Lectures_my/NumMet/2016/Lecture10/lecture10.tex +++ b/Lectures_my/NumMet/2016/Lecture10/lecture10.tex @@ -95,7 +95,10 @@ \end{frame} \begin{frame}{Implicit Runge-Kutta methods} - Butcher Tableau now fully filled + \begin{itemize} + \item same as before, method of stage $s$ and order $p$ + \item compare to Butcher Tableau now fully filled + \end{itemize} \begin{center} \renewcommand{\arraystretch}{1.2} \begin{tabular}{c|ccccc} @@ -107,14 +110,54 @@ \end{tabular} \renewcommand{\arraystretch}{1.0} \end{center} + \begin{itemize} + \item while explicit methods have strictly $s \geq p$, implicit method + can achieve orders $p > s$ + \end{itemize} +\end{frame} + +\begin{frame}{Gauss-Legendre methods} + \begin{itemize} + \item reuse what we know about Gauss quadratures + \item rescale the problem such that Legendre polynomials are the appropriate + orthogonal basis + \item example: Gauss Legendre method of stage $2$: + \end{itemize} + \begin{center} + \renewcommand{\arraystretch}{1.2} + \begin{tabular}{c|cc} + $\frac{1}{2} - \frac{\sqrt{3}}{6}$ & $\frac{1}{4}$ & $\frac{1}{4} - \frac{\sqrt{3}}{6}$ \\ + $\frac{1}{2} - \frac{\sqrt{3}}{6}$ & $\frac{1}{4} + \frac{\sqrt{3}}{6}$ & $\frac{1}{4}$ \\ + \hline + & $\frac{1}{2}$ & $\frac{1}{2}$ \\ + \end{tabular} + \renewcommand{\arraystretch}{1.0} + \end{center} \end{frame} \begin{frame}{Tangent: Fixed-point problems} + \alert{What is a fixed-point problem?}\\ + \vfill + \begin{itemize} + \item consider an automorphism $f$, e.g.: a function $\mathbb{R}\ni z \mapsto f(z)\in \mathbb{R}$ + \item a point $z^*$ is a fixed point under $f$ if it fulfills + \end{itemize} \begin{equation*} - z^{(i + 1)} = f(z_{(i)}) + f(z^*) = z^* \end{equation*} - where $i$ denotes the iteration index, and we start at $i=0$. - Convergence? + \begin{itemize} + \item A \alert{fixed-point problem} is finding the fixed point $z^*$ for a given $f$. + \item Iteratively, we can approach $\lim_{n\to \infty} z_n = z^*$ via: + \end{itemize} + \begin{equation*} + z^{(i + 1)} = f(z^{(i)}) + \end{equation*} + \begin{itemize} + \item this works, as long as $z^*$ is an attractive fixed point, i.e.: + \end{itemize} + \begin{equation*} + || f(z^{(i+1)}) - f(z^{(i)}) || < || z^{(i + 1)} - z^{(i)} || + \end{equation*} \end{frame} \begin{frame}{Practical Aspects} @@ -131,13 +174,18 @@ \end{equation*} \item stop after a fixed number of iterations, or when the difference between previous and current iteration falls below a pre-determined threshold. + \item check attraction requirement in every step \end{itemize} \end{frame} \begin{frame}{Example} + \begin{itemize} + \item first step: Euler explicit + \item next steps: repeat Euler implicit until $| z^{(i + 1)} - z^{(i)} | < 10^{-3}$ + \end{itemize} \end{frame} -\begin{frame}{ODE of order $2$} +\begin{frame}{Toward Boundary Problems: ODEs of order $2$} Let's start with an explicit ODE of order $2$: \begin{align*} y'' & = f(x; y, y') @@ -152,6 +200,14 @@ \end{frame} \begin{frame}{Generalization to order $N$} + \begin{itemize} + \item we can apply this now to any order $N$ ODE to obtain + a system of $N$ coupled ODEs of order $1$ + \item however, a first-order system of $N$ equations + requires $N$ initial conditions to solve! + \item consequently, we already needed $N$ initial conditions + to solve the order $N$ system! + \end{itemize} \end{frame} \pgfplotstableread{ @@ -265,21 +321,51 @@ \end{column} \begin{column}{.3\textwidth} \begin{block}{IVP} - $y(1) = 0.44051$\\ - $y'(1) = 0.325147$\\ - $h = 0.05$\\ - + \begin{align*} + y(1) & = 0.44051 \\ + y'(1) & = 0.325147 + \end{align*} + step size: $h = 0.05$\\ + \bigskip Using implicit Euler method! \end{block} \end{column} \end{columns} \end{frame} -\begin{frame}{Boundary Value Problems} +\begin{frame}{Boundary Value Problem and the Shooting Method} + \begin{itemize} + \item a \alert{boundary value problem} for a second-order ODE + consists of finding the solution + to the ODE on the interval $[a, b]$ with the following condition + \end{itemize} \begin{align*} - y(a) & = y_0 & - y(b) & = Y + y(a) & = \alpha & + y(b) & = \beta \end{align*} + \begin{itemize} + \item we can rewrite it as an initial value problem: + \end{itemize} + \begin{align*} + y(a) & = \alpha & + y'(a) & = \gamma + \end{align*} + \begin{itemize} + \item We can the proceed to solve the IVP as a function of $\gamma$, such + $y(b) = \beta$ is fulfilled. + \item This is called the \alert{shooting method} + \item Practically, we define a root-finding problem: + \end{itemize} + \begin{align*} + 0 = F(\gamma) \equiv \beta - y(x = b; \alpha, \gamma) + \end{align*} +\end{frame} + +\begin{frame}{Example} + From Stoer/Bulirsch: + \begin{itemize} + \item \alert{two} solutions: $y'(a) = -8$ or $y'(a) \simeq -40$. + \end{itemize} \end{frame} \backupbegin @@ -288,442 +374,5 @@ \end{frame} -\begin{frame}{Nomenclature and Preliminaries} - We will discuss \alert{ordinary} \alert{differential equation}s, or \alert{systems} of - \alert{linear} ordinary differential equations: - \begin{overlayarea}{\textwidth}{.7\textheight} - \begin{description} - \item<1->[differential equation] Any equation of the form - \begin{equation*} - F(x; y, y', y^{(2)}, \dots, y^{(n)}) = 0 - \end{equation*} - with $x\in \mathbb{R}$ and $y(x): \mathbb{R} \mapsto \mathbb{R}^m$ - is called a system of ordinary differential equations of \alert{order $n$}.\\ - If $m=1$, it is only \alert{one} ordinary diff.\ equation (ODE).\\ - \only<1>{ - \item[linear] If we can write - \begin{equation*} - F(x; y, \dots, y^{(n)}) = A(x) \cdot \vec{y}(x) + r(x) - \end{equation*} - with $A(x) \in \mathbb{R}^{m\times (n+1)}$, $\vec{y}(x) = (y(x), \dots y^{(n)}(x))^T$ - and $r(x) \in \mathbb{R}^m$, then - $F$ describes a \alert{linear} system of ODEs. - } - \only<2>{ - \item[ordinary] The quality \alert{ordinary} hinges on the fact that $y$ only - depends on $x\in\mathbb{R}$. As soon as $x\in \mathbb{R}^{k}$, and we need to - consider partial derivatives of $y$, we have a \alert{partial} differential equation. - } - \only<3>{ - \item[explicit] If for an $n$th order ODE we can write - \begin{equation*} - y^{(n)} = f(x; y, \dots, y^{(n - 1)}) - \end{equation*} - then we call this an \alert{explicit} ODE. Otherwise, the ODE is \alert{implicit}. - } - \end{description} - \end{overlayarea} -\end{frame} - -\begin{frame}{Explicit linear ODEs of order $n=1$/Initial value problem} - Let's use a well-known linear ODE of 2nd order as an example: - \begin{equation*} - y' = f(x; y) = -y(x) - \end{equation*} - - Solving it, regardless of method, poses an \alert{initial value problem}: - \begin{equation} - \frac{y(x)}{y(x_0)} = \exp(-(x - x_0)) - \end{equation} - We need to know the initial value $y_0\equiv y(x_0)$ to solve this problem. - It must be provided \alert{externally}. -\end{frame} - -\begin{frame}{Single Step Methods} - We want to increment by one \alert{step}, i.e. from the point $(x_n, y_n)$ to the - point $(x_n + h, y_{n + 1})$. - The increment is defined by the incremental function $\Phi$: - \begin{equation*} - y_{n+1} = y_n + \Phi(F; x_n, h; y_n, y_{n+1}); - \end{equation*} - with a step size $h$. This is an implicit method. If $\Phi$ does not depend on - $y_{n+1}$, we have an explicit method. - \vfill - Whatever the choice of $\Phi$, for an implicit method we will need, in general, - to solve a non-linear equation ($\rightarrow$ see lecture on root finding). That - means that an implicit method is computationally more expensive than a similar - explicit method. -\end{frame} - -\begin{frame}{Explicit Single Step Methods: Euler Method} - \begin{block}{First order ODE} - ~\\ - \vspace{-2\bigskipamount} - \begin{align*} - y' & = f(x, y) & - y_{n+1} & = y_n + \Phi(f; x_n, h; y_n) - \end{align*} - \end{block} - Standard procedure for an unknown function: Taylor expand! - \begin{align*} - y(x_n + h) & = y(x_n) + h y'(x_n) + \frac{h^2}{2} y''(x_n) + O(h^3) - \end{align*} - Keeping only terms of order $h$, and substituting $y'(x_n) = f(x_n = x_0 + n\cdot h, y_n)$ - we obtain - \begin{block}{Euler method} - \begin{align*} - y_{n+1} & = y_n + \underbrace{h\,f(x_n, y_n)}_{=\Phi_\text{Euler}(f; x_n, h; y_n)} - \end{align*} - \end{block} -\end{frame} - -\begin{frame}{Local Truncation Error for Euler Method} - The local trunction error (LTE) is introduced as: - \begin{equation*} - y(x_0 + h) - y_0 \equiv \text{LTE} = \frac{h^2}{2} y''(x_0) + O(h^3) - \end{equation*} - \begin{overlayarea}{\textwidth}{.7\textheight} - \begin{columns} - \begin{column}{.4\textwidth} - \begin{block}{Example} - The ODE at hand reads: - \begin{align*} - y' & = -y\,, & - y_0 & = 0.5 - \end{align*} - The analytic solution is: - \begin{equation*} - y(x) = y_0 \, \exp(-x) - \end{equation*} - \end{block} - \end{column} - \begin{column}{.6\textwidth} - \resizebox{\textwidth}{!}{ - \begin{tikzpicture} - \begin{axis}[% - axis x line=center, - axis y line=left, - ymin=0.0,ymax=0.55, - xmin=0.0,xmax=+1, - samples=160 - ] - \addplot[ultra thick,gray,domain=0:+1] { - 0.5 * exp(-x) - }; - \addplot[thick,red,only marks,mark size=3pt] coordinates { - (0.25, 0.375) % h = 0.25 - (0.50, 0.250) % h = 0.50 - (1.00, 0.000) % h = 1.00 - }; - \addplot[thin,red,mark=none] { - 0.5 - 0.5 * x - }; - \end{axis} - \end{tikzpicture} - } - \begin{center}$h$\end{center} - \end{column} - \end{columns} - \end{overlayarea} -\end{frame} - -\newcommand{\hl}[1]{{\color{red}#1}} -\begin{frame}{Explicit Single Step Methods: Runge-Kutta Methods} - For Runge-Kutta methods an alternative derivation for the determination $y_{n+1}$ - uses an integral representation: - \begin{equation*} - y_{n+1} = y_n + \int_{x_n}^{x_n + h} \mathrm{d}x\, f(x, y(x))\,. - \end{equation*} - \vspace{-\medskipamount} - The \alert{$s$}-stage Runge-Kutta method then estimates this integral - \begin{equation*} - y_{n+1} = y_n + \sum_{i=1}^{\alert{s}} \hl{b_i} k_i - \end{equation*} - \vspace{-\medskipamount} - where - \begin{align*} - k_1 & = f(x_n ,& y_n & & )\\ - k_2 & = f(x_n + \hl{c_2} h,& y_n & + h\, \hl{a_{2,1}} k_1 & )\\ - k_3 & = f(x_n + \hl{c_3} h,& y_n & + h\, \hl{a_{3,1}} k_1 + h\,\hl{a_{3,2}} k_2 & )\\ - & \quad\vdots & & \vdots\\ - k_s & = (fx_n + \hl{c_s} h,& y_n & + h\, \hl{a_{s,1}} k_1 + \dots + h\,\hl{a_{s,(s-1)}} k_{s-1} & ) - \end{align*} - Now to determine the coefficients $\lbrace \hl{a_{i,j}} \rbrace$, $\lbrace \hl{b_i} \rbrace$, - and $\lbrace \hl{c_i} \rbrace$. -\end{frame} - -\begin{frame}{Explicit Single Step Methods: Runge-Kutta (cont'd)} - Usual presentation of the necessary coefficients in a \alert{Butcher tableau}: - \begin{center} - \renewcommand{\arraystretch}{1.2} - \begin{tabular}{c|ccccc} - $0$ & \\ - $c_2$ & $a_{2,1}$ \\ - $c_3$ & $a_{3,1}$ & $a_{3,2}$ \\ - $\vdots$ & $\vdots$ & & $\ddots$ \\ - $c_s$ & $a_{s,1}$ & $a_{s,2}$ & $\dots$ & $a_{s,s-1}$ \\ - \hline - & $b_1$ & $b_2$ & $\dots$ & $b_{s-1}$ & $b_s$ \\ - \end{tabular} - \renewcommand{\arraystretch}{1.0} - \end{center} - - To take away: - \begin{itemize} - \item a method of stage $s$ requires \alert{in general} $s$ evaluations of - $f$ - \item the LTE is of order $p$, i.e.: $\text{LTE} = O(h^{p + 1})$ - \item in general $s > p$ - \item up to $s = 4$ there are methods known with $s \geq p$ - \end{itemize} -\end{frame} - -\begin{frame}{Explicit Single Step Methods: Runge-Kutta (cont'd)} - \begin{block}{Euler's method} - \begin{columns} - \begin{column}{.5\textwidth} - \begin{tabular}{c|c} - $0$ & \\ - \hline - & $b_1$ \\ - \end{tabular} - \end{column} - \begin{column}{.5\textwidth} - \begin{itemize} - \item method of stage 1 and order 1 - \end{itemize} - \end{column} - \end{columns} - \end{block} - - \begin{block}{Ralston's method} - \begin{columns} - \begin{column}{.5\textwidth} - \begin{tabular}{c|cc} - $0$ & \\ - $2/3$ & $2/3$ \\ - \hline - & $1/4$ & $3/4$ \\ - \end{tabular} - \end{column} - \begin{column}{.5\textwidth} - \begin{itemize} - \item method of stage 2 and order 2 - \end{itemize} - \end{column} - \end{columns} - \end{block} - - \begin{block}{RK4 (\emph{The} Runge-Kutta method)} - \begin{columns} - \begin{column}{.5\textwidth} - \begin{tabular}{c|ccccc} - $0$ & \\ - $1/2$ & $1/2$ \\ - $1/2$ & $0$ & $1/2$ \\ - $1$ & $0$ & $0$ & $1$ & $0$ \\ - \hline - & $1/6$ & $1/3$ & $1/3$ & $1/6$ \\ - \end{tabular} - \end{column} - \begin{column}{.5\textwidth} - \begin{itemize} - \item method of stage 4 and order 4 - \end{itemize} - \end{column} - \end{columns} - \end{block} -\end{frame} - -\begin{frame}{Global Truncation Error/Accumulated Error} - \begin{columns} - \begin{column}{.7\textwidth} - \resizebox{\textwidth}{!}{ - \begin{tikzpicture} - \begin{axis}[% - axis x line=center, - axis y line=left, - ymin=-1.2040,ymax=-0.59784, - xmin=0.0,xmax=+0.5, - samples=160 - ] - \addplot[ultra thick,gray,domain=0:+1] { - ln(0.5) - x - }; - % Euler, h = 0.125 - \addplot[thick,blue,mark=x,only marks,mark size=3pt] coordinates { - (0.125, -0.82668) % ln(0.43750) - (0.250, -0.96022) % ln(0.38281) - (0.375, -1.09370) % ln(0.33496) - }; - % Runge-Kutta, h = 0.125 - \addplot[thick,red,mark=+,only marks,mark size=3pt] coordinates { - (0.125, -0.81814) % ln(0.44125) - (0.250, -0.94314) % ln(0.38940) - (0.375, -1.06810) % ln(0.34364) - }; - \end{axis} - \end{tikzpicture} - } - \end{column} - \begin{column}{.3\textwidth} - \begin{itemize} - \item[{\color{blue} x}] Euler method\\ - $h = 0.125$\\ - order $p = 1$ - \item[{\color{red} +}] Runge-Kutta 4\\ - $h = 0.125$\\ - order $p = 4$ - \end{itemize} - \end{column} - \end{columns} - The differences in accumulated trunction errors is clearly visible -\end{frame} - -\begin{frame}{Wastefulness of Single-Step Methods} - In order to achieve a single step, the previously shown methods - of stage $s$ evaluate the function $f(x; y)$ in at least $s$ points.\\ - \vfill - However, all the information obtained from these evaluations is - thrown away before the next step.\\ - \vfill - It would be efficient to reuse this information, which gives - rise to the \alert{multiple step methods}. -\end{frame} - -\begin{frame}{Multiple Step Methods} - An explicit \alert{multiple step method} of \alert{length $m$} follows from: - \begin{equation*} - y_{n+1} = -\sum_{j=0}^{m - 1} \hl{a_{j}} \, y_{n - j} + h\,\sum_{j=0}^{m - 1} \hl{b_{j}} \, f(x_{n - j}, y_{n - j}) - \end{equation*} - - Again, one strives to choose the coefficients $\lbrace \hl{a_i}\rbrace$ and $\lbrace \hl{b_i}\rbrace$ - such that the LTE is of high order in $h$. -\end{frame} - -\begin{frame}{Adams-Bashforth methods} - In Adams-Bashforth methods one chooses $\hl{a_0} = -1$, and $\hl{a_i} = 0$ for $i\geq 1$. - The coefficients $\lbrace \hl{b_i} \rbrace$ are chosen such that $y(x)$ is interpolated - in the last $m$ steps: - \begin{equation*} - \hl{b_{m - j - 1}} = \frac{(-1)^j}{j!\, (m - j - 1)!} \int_0^1 \prod_{i=0,i\neq j}^{s - 1} \mathrm{d}u\, (u + i)\,. - \end{equation*} - for $j = 0, \dots, m - 1$.\\[\medskipamount] - - The order $p$ of any Adams-Bashforth method is $p=m$. -\end{frame} - -\begin{frame}{Stiff ODEs} - \begin{block}{Example ODE} - \begin{equation*} - y' = -15\, y\qquad \text{with} \quad y(x_0) = y_0 = 1.0\,. - \end{equation*} - \end{block} - \begin{overlayarea}{\textwidth}{.7\textheight} - \only<1>{ - \vfill - \includegraphics[width=\textwidth]{StiffEquationNumericalSolvers}\\ - ~\hfill{\tiny shamelessly taken from Wikipedia} - } - \only<2>{ - \begin{itemize} - \item a stiff ODE forces you to go to \alert{unreasonably small} - step size $h$ in order to have reasonable convergence of the method - \vfill - \item in general, \alert{explicit} methods demand smaller step sizes - than computationally reasonable - \vfill - \item implicit methods solve this problem at the expense of additional - evaluations of the generation function $F$ when solving for the - increment - \end{itemize} - } - \end{overlayarea} -\end{frame} - -\begin{frame}{Implicit Euler Method} - (Also known as the backward Euler method) - \begin{equation*} - y_{n + 1} = y_n + h\cdot f(x_{n+1}, y_{n+1}) - \end{equation*} - ~\\ - \vspace{-2\bigskipamount} - \begin{block}{Example with stiff ODE} - \begin{equation*} - y' = -y,\qquad \text{with} \quad y_0 = y(x_0) = 0.5 - \end{equation*} - ~\\ - \begin{overlayarea}{\textwidth}{.5\textheight} - \vspace{-2\bigskipamount} - \only<1,3>{ - \begin{columns} - \begin{column}{.6\textwidth} - \resizebox{\textwidth}{!}{ - \begin{tikzpicture} - \begin{axis}[% - axis x line=center, - axis y line=left, - ymin=0.30,ymax=0.51, - xmin=0.00,xmax=0.50, - samples=160 - ] - \addplot[ultra thick,black,domain=0:+1] { - 0.5 * exp(-x) - }; - % Euler, h = 0.125 - \addplot[thick,blue,mark=x,only marks,mark size=3pt] coordinates { - (0.125, 0.43750) - (0.250, 0.38281) - (0.375, 0.33496) - }; - \only<3>{ - % Implict Euler, h = 0.125 - \addplot[thick,red,mark=+,only marks,mark size=3pt] coordinates { - (0.125, 0.444) - (0.250, 0.395) - (0.375, 0.351) - }; - } - \end{axis} - \end{tikzpicture} - } - \end{column} - \begin{column}{.4\textwidth} - \begin{itemize} - \item[{\color{blue} x}] regular Euler\\ - $h = 0.125$ - \item<3>[{\color{red} +}] implicit Euler\\ - $h = 0.125$ - \end{itemize} - \end{column} - \end{columns} - } - \only<2>{ - \begin{align*} - y_{n + 1} & = y_n + h\, f(x_{n+1}, y_{n +1})\\ - & = y_n - h\, y_{\hl{n + 1}}\\ - \Rightarrow - y_{n + 1} & = \frac{y_n}{1 + h} - \end{align*} - This yields the analytic result in the limit $h\to 0$ ($h = h_0 / n$, $n \to \infty$), since: - \begin{equation*} - \lim_{n\to \infty} \frac{y_0}{(1 + \frac{x}{n})^n} = y_0 \exp(-x) - \end{equation*} - } - \end{overlayarea} - \end{block} -\end{frame} - -\begin{frame}{Next time on Ordinary Differential Equations} - \begin{itemize} - \item \alert{implicit} Runge-Kutta methods - \vfill - \item how to turn an $n$-order ODE into an $n$-size \alert{system} of - first-order ODEs - \vfill - \item a glimpse at \alert{boundary problems} - \end{itemize} -\end{frame} - \backupend \end{document}