Newer
Older
Lecture_repo / Lectures_my / NumMet / 2016 / Lecture10 / lecture10.tex
@Danny van Dyk Danny van Dyk on 13 Nov 2016 23 KB Added preliminary lecture 10
% vim: set sts=4 et :
\documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer}

\usetheme[
	bullet=circle,		% Other option: square
	bigpagenumber,		% circled page number on lower right
	topline=true,			% colored bar at the top of the frame 
	shadow=false,			% Shading for beamer blocks
	watermark=BG_lower,	% png file for the watermark
	]{Flip}

\usepackage{bm}
\usepackage{tikz,pgfplots,pgfplotstable}
\usepgfplotslibrary{fillbetween}
\usepackage{graphicx}

% SOME COMMANDS THAT I FIND HANDY
% \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec
\newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild
\newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold
\newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold
\newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert
\newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert
%% "\alert" is already a beamer pre-defined
\newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}%

\graphicspath{{images/}}	% Put all images in this directory. Avoids clutter.

% suppress frame numbering for backup slides
% you always need the appendix for this!
\newcommand{\backupbegin}{
	\newcounter{framenumberappendix}
	\setcounter{framenumberappendix}{\value{framenumber}}
}
\newcommand{\backupend}{
	\addtocounter{framenumberappendix}{-\value{framenumber}}
	\addtocounter{framenumber}{\value{framenumberappendix}} 
}

\begin{document}
\tikzstyle{every picture}+=[remember picture]

{
\setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}}
\begin{frame}[c]%{\phantom{title page}} 
\begin{center}
\begin{center}
	\begin{columns}
		\begin{column}{0.9\textwidth}
			\flushright%\fontspec{Trebuchet MS}
			\bfseries \Huge {Ordinary Differential Equations (I)}
		\end{column}
		\begin{column}{0.2\textwidth}
		  %\includegraphics[width=\textwidth]{SHiP-2}
		\end{column}
	\end{columns}
\end{center}
	\quad
	\vspace{3em}
\begin{columns}
\begin{column}{0.6\textwidth}
\flushright \vspace{-1.8em} {%\fontspec{Trebuchet MS}
    \large Marcin Chrzaszcz, Danny van Dyk\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}, \href{mailto:danny.van.dyk@gmail.com}{danny.van.dyk@gmail.com}}

\end{column}
\begin{column}{0.4\textwidth}
\includegraphics[height=1.3cm]{uzh-transp}
\end{column}
\end{columns}

\vspace{1em}
\vspace{0.5em}
	\textcolor{normal text.fg!50!Comment}{Numerical Methods, \\ 07. November, 2016}
\end{center}
\end{frame}
}

\begin{frame}{Plan for today}
    \begin{itemize}
        \item \alert{Catching up with last time}\\
            \begin{itemize}
                \item What does an implicit Runge-Kutta method look like?
            \end{itemize}
        \vfill
        \item \alert{So far we discussed ODEs of first order}\\
            \begin{itemize}
                \item How to turn an ODE of order $n$ into a system of $n$ ODEs of first order?
            \end{itemize}
        \vfill
        \item \alert{Specific problem of a given ODE in a boundary value problem}\\
            \begin{itemize}
                \item How can we use our knowledge of initial value problems to solve these?
            \end{itemize}
    \end{itemize}
\end{frame}

\begin{frame}{Implicit Runge-Kutta methods}
    Butcher Tableau now fully filled
    \begin{center}
    \renewcommand{\arraystretch}{1.2}
    \begin{tabular}{c|ccccc}
        $c_1$    & $a_{1,1}$ & $a_{1,2}$ & $\dots$   & $a_{1,s-1}$ & $a_{1,s}$ \\
        $\vdots$ & $\vdots$  & $\vdots$  & $\vdots$  & $\vdots$    & $\vdots$  \\
        $c_s$    & $a_{s,1}$ & $a_{s,2}$ & $\dots$   & $a_{s,s-1}$ & $a_{s,s}$ \\
        \hline
                 & $b_1$     & $b_2$     & $\dots$   & $b_{s-1}$   & $b_s$ \\
    \end{tabular}
    \renewcommand{\arraystretch}{1.0}
    \end{center}
\end{frame}

\begin{frame}{Tangent: Fixed-point problems}
    \begin{equation*}
        z^{(i + 1)} = f(z_{(i)})
    \end{equation*}
    where $i$ denotes the iteration index, and we start at $i=0$.
    Convergence?
\end{frame}

\begin{frame}{Practical Aspects}
    We can view implicit single step methods
    \begin{equation*}
        y' = f(x, y)\qquad y_{n + 1} = y_{n} + \Phi(F; x_n, h; y_n, y_{n+1})
    \end{equation*}
    as a fixed point problem:
    \begin{itemize}
        \item start with a guess for $y_{n,i=0}$, e.g. based on an explicit method
        \item compute the new approximation of the implict result for $y_{n+1}$ as
            \begin{equation*}
                y_{n+1, i+1} = y_n + \Phi(F; x_n, h; y_n, y_{n+1, i})
            \end{equation*}
        \item stop after a fixed number of iterations, or when the difference between
            previous and current iteration falls below a pre-determined threshold.
    \end{itemize}
\end{frame}

\begin{frame}{Example}
\end{frame}

\begin{frame}{ODE of order $2$}
    Let's start with an explicit ODE of order $2$:
    \begin{align*}
        y'' & = f(x; y, y')
    \end{align*}
    Let's rename $y(x) \to z_1(x)$ introduce a new function $z_2(x) = y'(x)$, such that
    we can write
    \begin{align*}
        z_2' & = f_2(x; z_1, z_2) & & [\equiv f(x; y = z_1, y' = z_2)]\\
        z_1' & = f_1(x; z_1)      & & [\equiv z_2]
    \end{align*}
    We can now solve the (vector-valued) ODE of order $1$.
\end{frame}

\begin{frame}{Generalization to order $N$}
\end{frame}

\pgfplotstableread{
x       J            Jp
1.      0.440051     0.325147
1.05    0.455897     0.308608
1.1     0.470902     0.291529
1.15    0.485041     0.273945
1.2     0.498289     0.255892
1.25    0.510623     0.237407
1.3     0.522023     0.21853
1.35    0.53247      0.199297
1.4     0.541948     0.17975
1.45    0.550441     0.159927
1.5     0.557937     0.13987
1.55    0.564424     0.11962
1.6     0.569896     0.0992172
1.65    0.574344     0.0787044
1.7     0.577765     0.058123
1.75    0.580156     0.0375147
1.8     0.581517     0.0169214
1.85    0.581849    -0.00361515
1.9     0.581157    -0.0240536
1.95    0.579446    -0.0443527
2.      0.576725    -0.0644716
}{\BesselJtable}

\pgfplotstableread{
1	0.440051	0.325147
1.05	0.455468	0.308347
1.1	0.47002	0.291039
1.15	0.483683	0.273261
1.2	0.496435	0.255049
1.25	0.508257	0.236443
1.3	0.519132	0.21748
1.35	0.529042	0.198202
1.4	0.537974	0.178647
1.45	0.545917	0.158856
1.5	0.55286	0.138869
1.55	0.558797	0.118729
1.6	0.56372	0.0984759
1.65	0.567628	0.078151
1.7	0.570518	0.0577958
1.75	0.57239	0.0374514
1.8	0.573248	0.0171588
1.85	0.573096	-0.00304138
1.9	0.571941	-0.0231087
1.95	0.569791	-0.0430032
}{\numBesselJtable}


\begin{frame}{Example}
    Let's take the Bessel equation:\hfill\only<2>{in explicit form}
    \begin{overlayarea}{\textwidth}{.2\textheight}
    \begin{equation*}
        \only<1>{
            x^2 y'' + x y' + (x^2 - \nu^2) y = 0
        }
        \only<2>{
            y'' = -\frac{1}{x} y' - \left(1 - \frac{\nu^2}{x^2}\right) y
        }
    \end{equation*}
    \end{overlayarea}
    \begin{columns}
    \begin{column}{.7\textwidth}
    \resizebox{\textwidth}{!}{
    \begin{tikzpicture}
        \begin{axis}[%
                axis x line=center,
                axis y line=left,
                ymin=-0.25,ymax=+0.65,
                xmin=+1.00,xmax=+2.00
        ]
            \addplot+[
                ultra thick,
                mark=none,
                color=blue,
            ] table [
                x index=0,
                y index=1
            ] {\BesselJtable};
            \addplot+[
                ultra thick,
                mark=none,
                color=red,
            ] table [
                x index=0,
                y index=2
            ] {\BesselJtable};
            \addplot+[
                ultra thick,
                only marks,
                mark=x,
                color=blue,
            ] table [
                x index=0,
                y index=1
            ] {\numBesselJtable};
            \addplot+[
                ultra thick,
                only marks,
                mark=+,
                color=red,
            ] table [
                x index=0,
                y index=2
            ] {\numBesselJtable};
        \end{axis}
    \end{tikzpicture}
    }
    \end{column}
    \begin{column}{.3\textwidth}
        \begin{block}{IVP}
            $y(1) = 0.44051$\\
            $y'(1) = 0.325147$\\
            $h = 0.05$\\

            Using implicit Euler method!
        \end{block}
    \end{column}
    \end{columns}
\end{frame}

\begin{frame}{Boundary Value Problems}
    \begin{align*}
        y(a) & = y_0 &
        y(b) & = Y
    \end{align*}
\end{frame}

\backupbegin

\begin{frame}\frametitle{Backup}

\end{frame}

\begin{frame}{Nomenclature and Preliminaries}
    We will discuss \alert{ordinary} \alert{differential equation}s, or \alert{systems} of
    \alert{linear} ordinary differential equations:
    \begin{overlayarea}{\textwidth}{.7\textheight}
    \begin{description}
        \item<1->[differential equation] Any equation of the form
        \begin{equation*}
            F(x; y, y', y^{(2)}, \dots, y^{(n)}) = 0
        \end{equation*}
        with $x\in \mathbb{R}$ and $y(x): \mathbb{R} \mapsto \mathbb{R}^m$
        is called a system of ordinary differential equations of \alert{order $n$}.\\
        If $m=1$, it is only \alert{one} ordinary diff.\ equation (ODE).\\
        \only<1>{
        \item[linear] If we can write
        \begin{equation*}
            F(x; y, \dots, y^{(n)}) = A(x) \cdot \vec{y}(x) + r(x)
        \end{equation*}
        with $A(x) \in \mathbb{R}^{m\times (n+1)}$, $\vec{y}(x) = (y(x), \dots y^{(n)}(x))^T$
        and $r(x) \in \mathbb{R}^m$, then
        $F$ describes a \alert{linear} system of ODEs.
        }
        \only<2>{
        \item[ordinary] The quality \alert{ordinary} hinges on the fact that $y$ only
        depends on $x\in\mathbb{R}$. As soon as $x\in \mathbb{R}^{k}$, and we need to
        consider partial derivatives of $y$, we have a \alert{partial} differential equation.
        }
        \only<3>{
        \item[explicit] If for an $n$th order ODE we can write
        \begin{equation*}
            y^{(n)} = f(x; y, \dots, y^{(n - 1)})
        \end{equation*}
        then we call this an \alert{explicit} ODE. Otherwise, the ODE is \alert{implicit}.
        }
    \end{description}
    \end{overlayarea}
\end{frame}

\begin{frame}{Explicit linear ODEs of order $n=1$/Initial value problem}
    Let's use a well-known linear ODE of 2nd order as an example:
    \begin{equation*}
        y' = f(x; y) = -y(x)
    \end{equation*}

    Solving it, regardless of method, poses an \alert{initial value problem}:
    \begin{equation}
        \frac{y(x)}{y(x_0)} = \exp(-(x - x_0))
    \end{equation}
    We need to know the initial value $y_0\equiv y(x_0)$ to solve this problem.
    It must be provided \alert{externally}.
\end{frame}

\begin{frame}{Single Step Methods}
    We want to increment by one \alert{step}, i.e. from the point $(x_n, y_n)$ to the
    point $(x_n + h, y_{n + 1})$.
    The increment is defined by the incremental function $\Phi$:
    \begin{equation*}
        y_{n+1} = y_n + \Phi(F; x_n, h; y_n, y_{n+1});
    \end{equation*}
    with a step size $h$. This is an implicit method. If $\Phi$ does not depend on
    $y_{n+1}$, we have an explicit method.
    \vfill
    Whatever the choice of $\Phi$, for an implicit method we will need, in general,
    to solve a non-linear equation ($\rightarrow$ see lecture on root finding). That
    means that an implicit method is computationally more expensive than a similar
    explicit method.
\end{frame}

\begin{frame}{Explicit Single Step Methods: Euler Method}
    \begin{block}{First order ODE}
    ~\\
    \vspace{-2\bigskipamount}
    \begin{align*}
        y'      & = f(x, y) &
        y_{n+1} & = y_n + \Phi(f; x_n, h; y_n)
    \end{align*}
    \end{block}
    Standard procedure for an unknown function: Taylor expand!
    \begin{align*}
        y(x_n + h) & = y(x_n) + h y'(x_n) + \frac{h^2}{2} y''(x_n) + O(h^3)
    \end{align*}
    Keeping only terms of order $h$, and substituting $y'(x_n) = f(x_n = x_0 + n\cdot h, y_n)$
    we obtain
    \begin{block}{Euler method}
    \begin{align*}
        y_{n+1} & = y_n + \underbrace{h\,f(x_n, y_n)}_{=\Phi_\text{Euler}(f; x_n, h; y_n)}
    \end{align*}
    \end{block}
\end{frame}

\begin{frame}{Local Truncation Error for Euler Method}
    The local trunction error (LTE) is introduced as:
    \begin{equation*}
        y(x_0 + h) - y_0 \equiv \text{LTE} = \frac{h^2}{2} y''(x_0) + O(h^3)
    \end{equation*}
    \begin{overlayarea}{\textwidth}{.7\textheight}
    \begin{columns}
    \begin{column}{.4\textwidth}
    \begin{block}{Example}
    The ODE at hand reads:
    \begin{align*}
        y'  & = -y\,, &
        y_0 & = 0.5
    \end{align*}
    The analytic solution is:
    \begin{equation*}
        y(x) = y_0 \, \exp(-x)
    \end{equation*}
    \end{block}
    \end{column}
    \begin{column}{.6\textwidth}
    \resizebox{\textwidth}{!}{
    \begin{tikzpicture}
        \begin{axis}[%
                axis x line=center,
                axis y line=left,
                ymin=0.0,ymax=0.55,
                xmin=0.0,xmax=+1,
                samples=160
        ]
            \addplot[ultra thick,gray,domain=0:+1] {
                0.5 * exp(-x)
            };
            \addplot[thick,red,only marks,mark size=3pt] coordinates {
                (0.25, 0.375) % h = 0.25
                (0.50, 0.250) % h = 0.50
                (1.00, 0.000) % h = 1.00
            };
            \addplot[thin,red,mark=none] {
                0.5 - 0.5 * x
            };
        \end{axis}
    \end{tikzpicture}
    }
    \begin{center}$h$\end{center}
    \end{column}
    \end{columns}
    \end{overlayarea}
\end{frame}

\newcommand{\hl}[1]{{\color{red}#1}}
\begin{frame}{Explicit Single Step Methods: Runge-Kutta Methods}
    For Runge-Kutta methods an alternative derivation for the determination $y_{n+1}$
    uses an integral representation:
    \begin{equation*}
        y_{n+1} = y_n + \int_{x_n}^{x_n + h} \mathrm{d}x\, f(x, y(x))\,.
    \end{equation*}
    \vspace{-\medskipamount}
    The \alert{$s$}-stage Runge-Kutta method then estimates this integral
    \begin{equation*}
        y_{n+1} = y_n + \sum_{i=1}^{\alert{s}} \hl{b_i} k_i
    \end{equation*}
    \vspace{-\medskipamount}
    where
    \begin{align*}
        k_1 & = f(x_n        ,& y_n &                                                                     & )\\
        k_2 & = f(x_n + \hl{c_2} h,& y_n & + h\, \hl{a_{2,1}} k_1                                         & )\\
        k_3 & = f(x_n + \hl{c_3} h,& y_n & + h\, \hl{a_{3,1}} k_1 + h\,\hl{a_{3,2}} k_2                   & )\\
            & \quad\vdots     &     & \vdots\\
        k_s & = (fx_n + \hl{c_s} h,& y_n & + h\, \hl{a_{s,1}} k_1 + \dots + h\,\hl{a_{s,(s-1)}} k_{s-1}   & )
    \end{align*}
    Now to determine the coefficients $\lbrace \hl{a_{i,j}} \rbrace$, $\lbrace \hl{b_i} \rbrace$,
    and $\lbrace \hl{c_i} \rbrace$.
\end{frame}

\begin{frame}{Explicit Single Step Methods: Runge-Kutta (cont'd)}
    Usual presentation of the necessary coefficients in a \alert{Butcher tableau}:
    \begin{center}
    \renewcommand{\arraystretch}{1.2}
    \begin{tabular}{c|ccccc}
        $0$      &                                                         \\
        $c_2$    & $a_{2,1}$                                               \\
        $c_3$    & $a_{3,1}$ & $a_{3,2}$                                   \\
        $\vdots$ & $\vdots$  &           & $\ddots$                        \\
        $c_s$    & $a_{s,1}$ & $a_{s,2}$ & $\dots$   & $a_{s,s-1}$         \\
        \hline
                 & $b_1$     & $b_2$     & $\dots$   & $b_{s-1}$   & $b_s$ \\
    \end{tabular}
    \renewcommand{\arraystretch}{1.0}
    \end{center}

    To take away:
    \begin{itemize}
        \item a method of stage $s$ requires \alert{in general} $s$ evaluations of
        $f$
        \item the LTE is of order $p$, i.e.: $\text{LTE} = O(h^{p + 1})$
        \item in general $s > p$
        \item up to $s = 4$ there are methods known with $s \geq p$
    \end{itemize}
\end{frame}

\begin{frame}{Explicit Single Step Methods: Runge-Kutta (cont'd)}
    \begin{block}{Euler's method}
    \begin{columns}
    \begin{column}{.5\textwidth}
    \begin{tabular}{c|c}
        $0$      &            \\
        \hline
                 & $b_1$      \\
    \end{tabular}
    \end{column}
    \begin{column}{.5\textwidth}
    \begin{itemize}
        \item method of stage 1 and order 1
    \end{itemize}
    \end{column}
    \end{columns}
    \end{block}

    \begin{block}{Ralston's method}
    \begin{columns}
    \begin{column}{.5\textwidth}
    \begin{tabular}{c|cc}
        $0$      &               \\
        $2/3$    & $2/3$         \\
        \hline
                 & $1/4$ & $3/4$ \\
    \end{tabular}
    \end{column}
    \begin{column}{.5\textwidth}
    \begin{itemize}
        \item method of stage 2 and order 2
    \end{itemize}
    \end{column}
    \end{columns}
    \end{block}

    \begin{block}{RK4 (\emph{The} Runge-Kutta method)}
    \begin{columns}
    \begin{column}{.5\textwidth}
    \begin{tabular}{c|ccccc}
        $0$      &                               \\
        $1/2$    & $1/2$                         \\
        $1/2$    & $0$   & $1/2$                 \\
        $1$      & $0$   & $0$   & $1$   & $0$   \\
        \hline
                 & $1/6$ & $1/3$ & $1/3$ & $1/6$ \\
    \end{tabular}
    \end{column}
    \begin{column}{.5\textwidth}
    \begin{itemize}
        \item method of stage 4 and order 4
    \end{itemize}
    \end{column}
    \end{columns}
    \end{block}
\end{frame}

\begin{frame}{Global Truncation Error/Accumulated Error}
    \begin{columns}
    \begin{column}{.7\textwidth}
    \resizebox{\textwidth}{!}{
    \begin{tikzpicture}
        \begin{axis}[%
                axis x line=center,
                axis y line=left,
                ymin=-1.2040,ymax=-0.59784,
                xmin=0.0,xmax=+0.5,
                samples=160
        ]
            \addplot[ultra thick,gray,domain=0:+1] {
                ln(0.5) - x
            };
            % Euler, h = 0.125
            \addplot[thick,blue,mark=x,only marks,mark size=3pt] coordinates {
                (0.125, -0.82668) % ln(0.43750)
                (0.250, -0.96022) % ln(0.38281)
                (0.375, -1.09370) % ln(0.33496)
            };
            % Runge-Kutta, h = 0.125
            \addplot[thick,red,mark=+,only marks,mark size=3pt] coordinates {
                (0.125, -0.81814) % ln(0.44125)
                (0.250, -0.94314) % ln(0.38940)
                (0.375, -1.06810) % ln(0.34364)
            };
        \end{axis}
    \end{tikzpicture}
    }
    \end{column}
    \begin{column}{.3\textwidth}
    \begin{itemize}
        \item[{\color{blue} x}] Euler method\\
            $h = 0.125$\\
            order $p = 1$
        \item[{\color{red} +}] Runge-Kutta 4\\
            $h = 0.125$\\
            order $p = 4$
    \end{itemize}
    \end{column}
    \end{columns}
    The differences in accumulated trunction errors is clearly visible
\end{frame}

\begin{frame}{Wastefulness of Single-Step Methods}
    In order to achieve a single step, the previously shown methods
    of stage $s$ evaluate the function $f(x; y)$ in at least $s$ points.\\
    \vfill
    However, all the information obtained from these evaluations is
    thrown away before the next step.\\
    \vfill
    It would be efficient to reuse this information, which gives
    rise to the \alert{multiple step methods}.
\end{frame}

\begin{frame}{Multiple Step Methods}
    An explicit \alert{multiple step method} of \alert{length $m$} follows from:
    \begin{equation*}
        y_{n+1} = -\sum_{j=0}^{m - 1} \hl{a_{j}} \, y_{n - j} + h\,\sum_{j=0}^{m - 1} \hl{b_{j}} \, f(x_{n - j}, y_{n - j})
    \end{equation*}

    Again, one strives to choose the coefficients $\lbrace \hl{a_i}\rbrace$ and $\lbrace \hl{b_i}\rbrace$
    such that the LTE is of high order in $h$.
\end{frame}

\begin{frame}{Adams-Bashforth methods}
    In Adams-Bashforth methods one chooses $\hl{a_0} = -1$, and $\hl{a_i} = 0$ for $i\geq 1$.
    The coefficients $\lbrace \hl{b_i} \rbrace$ are chosen such that $y(x)$ is interpolated
    in the last $m$ steps:
    \begin{equation*}
        \hl{b_{m - j - 1}} = \frac{(-1)^j}{j!\, (m - j - 1)!} \int_0^1 \prod_{i=0,i\neq j}^{s - 1} \mathrm{d}u\, (u + i)\,.
    \end{equation*}
    for $j = 0, \dots, m - 1$.\\[\medskipamount]

    The order $p$ of any Adams-Bashforth method is $p=m$.
\end{frame}

\begin{frame}{Stiff ODEs}
    \begin{block}{Example ODE}
    \begin{equation*}
        y' = -15\, y\qquad \text{with} \quad y(x_0) = y_0 = 1.0\,.
    \end{equation*}
    \end{block}
    \begin{overlayarea}{\textwidth}{.7\textheight}
    \only<1>{
        \vfill
        \includegraphics[width=\textwidth]{StiffEquationNumericalSolvers}\\
        ~\hfill{\tiny shamelessly taken from Wikipedia}
    }
    \only<2>{
        \begin{itemize}
            \item a stiff ODE forces you to go to \alert{unreasonably small}
                step size $h$ in order to have reasonable convergence of the method
            \vfill
            \item in general, \alert{explicit} methods demand smaller step sizes
                than computationally reasonable
            \vfill
            \item implicit methods solve this problem at the expense of additional
                evaluations of the generation function $F$ when solving for the
                increment
        \end{itemize}
    }
    \end{overlayarea}
\end{frame}

\begin{frame}{Implicit Euler Method}
    (Also known as the backward Euler method)
    \begin{equation*}
        y_{n + 1} = y_n + h\cdot f(x_{n+1}, y_{n+1})
    \end{equation*}
    ~\\
    \vspace{-2\bigskipamount}
    \begin{block}{Example with stiff ODE}
    \begin{equation*}
        y' = -y,\qquad \text{with} \quad y_0 = y(x_0) = 0.5
    \end{equation*}
    ~\\
    \begin{overlayarea}{\textwidth}{.5\textheight}
    \vspace{-2\bigskipamount}
    \only<1,3>{
    \begin{columns}
    \begin{column}{.6\textwidth}
    \resizebox{\textwidth}{!}{
    \begin{tikzpicture}
        \begin{axis}[%
                axis x line=center,
                axis y line=left,
                ymin=0.30,ymax=0.51,
                xmin=0.00,xmax=0.50,
                samples=160
        ]
            \addplot[ultra thick,black,domain=0:+1] {
                0.5 * exp(-x)
            };
            % Euler, h = 0.125
            \addplot[thick,blue,mark=x,only marks,mark size=3pt] coordinates {
                (0.125, 0.43750)
                (0.250, 0.38281)
                (0.375, 0.33496)
            };
            \only<3>{
            % Implict Euler, h = 0.125
            \addplot[thick,red,mark=+,only marks,mark size=3pt] coordinates {
                (0.125, 0.444)
                (0.250, 0.395)
                (0.375, 0.351)
            };
            }
        \end{axis}
    \end{tikzpicture}
    }
    \end{column}
    \begin{column}{.4\textwidth}
        \begin{itemize}
            \item[{\color{blue} x}] regular Euler\\
                $h = 0.125$
            \item<3>[{\color{red} +}] implicit Euler\\
                $h = 0.125$
        \end{itemize}
    \end{column}
    \end{columns}
    }
    \only<2>{
    \begin{align*}
        y_{n + 1} & = y_n + h\, f(x_{n+1}, y_{n +1})\\
                  & = y_n - h\, y_{\hl{n + 1}}\\
    \Rightarrow
        y_{n + 1} & = \frac{y_n}{1 + h}
    \end{align*}
        This yields the analytic result in the limit $h\to 0$ ($h = h_0 / n$, $n \to \infty$), since:
    \begin{equation*}
        \lim_{n\to \infty} \frac{y_0}{(1 + \frac{x}{n})^n} = y_0 \exp(-x)
    \end{equation*}
    }
    \end{overlayarea}
    \end{block}
\end{frame}

\begin{frame}{Next time on Ordinary Differential Equations}
    \begin{itemize}
        \item \alert{implicit} Runge-Kutta methods
        \vfill
        \item how to turn an $n$-order ODE into an $n$-size \alert{system} of
            first-order ODEs
        \vfill
        \item a glimpse at \alert{boundary problems}
    \end{itemize}
\end{frame}

\backupend
\end{document}