Newer
Older
Lecture_repo / Lectures_my / NumMet / 2016 / Lecture5 / mchrzasz.tex
@mchrzasz mchrzasz on 12 Oct 2016 21 KB fixed small things
\documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer}

\usepackage[english]{babel} 
\usepackage{polski}         


\usetheme[
	bullet=circle,		% Other option: square
	bigpagenumber,		% circled page number on lower right
	topline=true,			% colored bar at the top of the frame 
	shadow=false,			% Shading for beamer blocks
	watermark=BG_lower,	% png file for the watermark
	]{Flip}

%\logo{\kern+1.em\includegraphics[height=1cm]{SHiP-3_LightCharcoal}}
                            

\usepackage[lf]{berenis}
\usepackage[LY1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{cases}
\usepackage{mathtools}
\usepackage{emerald}
\usefonttheme{professionalfonts}
\usepackage[no-math]{fontspec}		
\defaultfontfeatures{Mapping=tex-text}	% This seems to be important for mapping glyphs properly

\setmainfont{Gillius ADF}			% Beamer ignores "main font" in favor of sans font
\setsansfont{Gillius ADF}			% This is the font that beamer will use by default
% \setmainfont{Gill Sans Light}		% Prettier, but harder to read

\setbeamerfont{title}{family=\fontspec{Gillius ADF}}

\input t1augie.fd

%\newcommand{\handwriting}{\fontspec{augie}} % From Emerald City, free font
%\newcommand{\handwriting}{\usefont{T1}{fau}{m}{n}} % From Emerald City, free font
% \newcommand{\handwriting}{}	% If you prefer no special handwriting font or don't have augie

%% Gill Sans doesn't look very nice when boldfaced
%% This is a hack to use Helvetica instead
%% Usage: \textbf{\forbold some stuff}
%\newcommand{\forbold}{\fontspec{Arial}}

\usepackage{graphicx}
\usepackage[export]{adjustbox}

\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{colortbl}
\usepackage{mathrsfs} 			% For Weinberg-esque letters
\usepackage{cancel}				% For "SUSY-breaking" symbol
\usepackage{slashed}            % for slashed characters in math mode
\usepackage{bbm}                % for \mathbbm{1} (unit matrix)
\usepackage{amsthm}				% For theorem environment
\usepackage{multirow}			% For multi row cells in table
\usepackage{arydshln} 			% For dashed lines in arrays and tables
\usepackage{siunitx}
\usepackage{xhfill}
\usepackage{grffile}
\usepackage{textpos}
\usepackage{subfigure}
\usepackage{tikz}
\usepackage{hyperref}
%\usepackage{hepparticles}    
\usepackage[italic]{hepparticles}     

\usepackage{hepnicenames} 

% Drawing a line
\tikzstyle{lw} = [line width=20pt]
\newcommand{\topline}{%
  \tikz[remember picture,overlay] {%
    \draw[crimsonred] ([yshift=-23.5pt]current page.north west)
             -- ([yshift=-23.5pt,xshift=\paperwidth]current page.north west);}}



% % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
\usepackage{tikzfeynman}		% For Feynman diagrams
\usetikzlibrary{arrows,shapes}
\usetikzlibrary{trees}
\usetikzlibrary{matrix,arrows} 				% For commutative diagram
% http://www.felixl.de/commu.pdf
\usetikzlibrary{positioning}				% For "above of=" commands
\usetikzlibrary{calc,through}				% For coordinates
\usetikzlibrary{decorations.pathreplacing}  % For curly braces
% http://www.math.ucla.edu/~getreuer/tikz.html
\usepackage{pgffor}							% For repeating patterns

\usetikzlibrary{decorations.pathmorphing}	% For Feynman Diagrams
\usetikzlibrary{decorations.markings}
\tikzset{
	% >=stealth', %%  Uncomment for more conventional arrows
	vector/.style={decorate, decoration={snake}, draw},
	provector/.style={decorate, decoration={snake,amplitude=2.5pt}, draw},
	antivector/.style={decorate, decoration={snake,amplitude=-2.5pt}, draw},
	fermion/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{>}}}},
	fermionbar/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{<}}}},
	fermionnoarrow/.style={draw=gray},
	gluon/.style={decorate, draw=black,
		decoration={coil,amplitude=4pt, segment length=5pt}},
	scalar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	scalarbar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{<}}}},
	scalarnoarrow/.style={dashed,draw=black},
	electron/.style={draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	bigvector/.style={decorate, decoration={snake,amplitude=4pt}, draw},
}

% TIKZ - for block diagrams, 
% from http://www.texample.net/tikz/examples/control-system-principles/
% \usetikzlibrary{shapes,arrows}
\tikzstyle{block} = [draw, rectangle, 
minimum height=3em, minimum width=6em]




\usetikzlibrary{backgrounds}
\usetikzlibrary{mindmap,trees}	% For mind map
\newcommand{\degree}{\ensuremath{^\circ}}
\newcommand{\E}{\mathrm{E}}
\newcommand{\Var}{\mathrm{Var}}
\newcommand{\Cov}{\mathrm{Cov}}
\newcommand\Ts{\rule{0pt}{2.6ex}}       % Top strut
\newcommand\Bs{\rule[-1.2ex]{0pt}{0pt}} % Bottom strut

\graphicspath{{images/}}	% Put all images in this directory. Avoids clutter.

% SOME COMMANDS THAT I FIND HANDY
% \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec
\newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild
\newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold
\newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold
\newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert
\newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert
%% "\alert" is already a beamer pre-defined
\newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}%

\def\Put(#1,#2)#3{\leavevmode\makebox(0,0){\put(#1,#2){#3}}}

\usepackage{gmp}
\usepackage[final]{feynmp-auto}

\usepackage[backend=bibtex,style=numeric-comp,firstinits=true]{biblatex}
\bibliography{bib}
\setbeamertemplate{bibliography item}[text]

\makeatletter\let\frametextheight\beamer@frametextheight\makeatother

% suppress frame numbering for backup slides
% you always need the appendix for this!
\newcommand{\backupbegin}{
	\newcounter{framenumberappendix}
	\setcounter{framenumberappendix}{\value{framenumber}}
}
\newcommand{\backupend}{
	\addtocounter{framenumberappendix}{-\value{framenumber}}
	\addtocounter{framenumber}{\value{framenumberappendix}} 
}


\definecolor{links}{HTML}{2A1B81}
%\hypersetup{colorlinks,linkcolor=,urlcolor=links}

% For shapo's formulas:
\def\lsi{\raise0.3ex\hbox{$<$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\def\gsi{\raise0.3ex\hbox{$>$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\newcommand{\lsim}{\mathop{\lsi}}
\newcommand{\gsim}{\mathop{\gsi}}
\newcommand{\wt}{\widetilde}
%\newcommand{\ol}{\overline}
\newcommand{\Tr}{\rm{Tr}}
\newcommand{\tr}{\rm{tr}}
\newcommand{\eqn}[1]{&\hspace{-0.7em}#1\hspace{-0.7em}&}
\newcommand{\vev}[1]{\rm{$\langle #1 \rangle$}}
\newcommand{\abs}[1]{\rm{$\left| #1 \right|$}}
\newcommand{\eV}{\rm{eV}}
\newcommand{\keV}{\rm{keV}}
\newcommand{\GeV}{\rm{GeV}}
\newcommand{\im}{\rm{Im}}
\newcommand{\disp}{\displaystyle}
\def\be{\begin{equation}}
\def\ee{\end{equation}}
\def\ba{\begin{eqnarray}}
\def\ea{\end{eqnarray}}
\def\d{\partial}
\def\l{\left(}
\def\r{\right)}
\def\la{\langle}
\def\ra{\rangle}
\def\e{{\rm e}}
\def\Br{{\rm Br}}
\def\fixme{{\color{red} FIXME!}}
\def\mc{{\color{Magenta}{MC}}\xspace}
\def\pdf{{\rm p.d.f.}}
\def\ARROW{{\color{JungleGreen}{$\Rrightarrow$}}\xspace}
\def\ARROWR{{\color{WildStrawberry}{$\Rrightarrow$}}\xspace} 

\author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz, Danny van Dyk} (UZH)}
\institute{UZH}
\title[Linear equation systems: exact methods]{Linear equation systems:exact methods}
\date{21 September 2016}


\begin{document}
\tikzstyle{every picture}+=[remember picture]

{
\setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}}
\begin{frame}[c]%{\phantom{title page}} 
\begin{center}
\begin{center}
	\begin{columns}
		\begin{column}{0.9\textwidth}
			\flushright\fontspec{Trebuchet MS}\bfseries \Huge {Linear equation systems: exact methods}
		\end{column}
		\begin{column}{0.2\textwidth}
		  %\includegraphics[width=\textwidth]{SHiP-2}
		\end{column}
	\end{columns}
\end{center}
	\quad
	\vspace{3em}
\begin{columns}
\begin{column}{0.6\textwidth}
\flushright \vspace{-1.8em} {\fontspec{Trebuchet MS} \large Marcin ChrzÄ…szcz, Danny van Dyk\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}, \href{mailto:dany.van.dyk@gmail.com}{danny.van.dyk@gmail.com}}

\end{column}
\begin{column}{0.4\textwidth}
\includegraphics[height=1.3cm]{uzh-transp}
\end{column}
\end{columns}

\vspace{1em}
%		\footnotesize\textcolor{gray}{With N. Serra, B. Storaci\\Thanks to the theory support from M. Shaposhnikov, D. Gorbunov}\normalsize\\
\vspace{0.5em}
	\textcolor{normal text.fg!50!Comment}{Numerical Methods, \\ 10 October, 2016}
\end{center}
\end{frame}
}


\begin{frame}\frametitle{Linear eq. system}

\ARROW This and the next lecture will focus on a well known problem. Solve the following equation system:
\begin{align*}
A \cdot x =b,
\end{align*} 
\ARROWR $A = a_{ij} 	\in \mathbb{R}^{n\times n}$ and $\det(A) \neq 0$\\
\ARROWR $b=b_i \in  \mathbb{R}^n$.\\
\ARROW The problem: Find the $x$ vector.



\end{frame}

\begin{frame}\frametitle{Error digression}
\begin{small}
\ARROW There is enormous amount of ways to solve the linear equation system.\\
\ARROW The choice of one over the other of them should be gathered by the {\it condition} of the matrix $A$ denoted at $cond(A)$.
\ARROW If the $cond(A)$ is small we say that the problem is well conditioned, otherwise we say it's ill conditioned.\\
\ARROW The {\it condition} relation is defined as:
\begin{align*}
cond(A) = \Vert A \Vert \cdot \Vert A^{-1} \Vert
\end{align*}
\ARROW Now there are many definitions of different norms... The most popular one (so-called ''column norm''):
\begin{align*}
\Vert A \Vert_1 = \max_{1 \leq j \leq n} \sum_{i=1}^n \vert a_{i,j} \vert,
\end{align*}
where $n$ -is the dimension of $A$, $i,j$ are columns and rows numbers.



\end{small}
\end{frame}


\begin{frame}\frametitle{More norms}
\begin{small}
\ARROW A different norm is a spectral norm:
\begin{align*}
\Vert A \Vert_2 &= \sqrt{\rho(A^T A)}\\
\rho(M) &= \max \lbrace \vert\lambda_i \vert: \det{M- \lambda I} =0,~i=1,...n \rbrace
\end{align*}
where $\rho(M)$ - spectral radius  of $M$ matrix, $I$ unit matrix, $\lambda_i$ eigenvalues of $M$.\\

\ARROW Row norm:
\begin{align*}
\Vert A \Vert_{\infty} &= \max_{1 \leq i \leq n} \sum_{j=1}^n \vert a_{i,j} \vert,
\end{align*}
\begin{exampleblock}{Digression:}
\ARROWR Calculation of the matrix norms are not a simple process at all. There are certain class of matrices that make the calculations easier.\\
\ARROWR The spectral norm can be also defined:
\begin{align*}
cond_2(A) = \frac{\max_{1\leq i \leq n}\vert \lambda_i \vert }{\min_{1\leq i \leq n}\vert \lambda_i \vert },
\end{align*}
\end{exampleblock}

\end{small}
\end{frame}


\begin{frame}\frametitle{Example, ill-conditioned matrix}
\begin{small}
\ARROW The text-book example of wrongly conditioned matrix is the Hilbert matrix:
\begin{align*}
h_{i,j} = \frac{1}{i+j-1}
\end{align*}
\ARROWR Example:
\begin{align*}
h_{i,j}^{4 \times 4} = \begin{pmatrix}
1 & \frac{1}{2} & \frac{1}{3} & \frac{1}{4} \\
\frac{1}{2} & \frac{1}{3} & \frac{1}{4} & \frac{1}{5} \\
\frac{1}{3} & \frac{1}{4} & \frac{1}{5} & \frac{1}{6}\\
\frac{1}{4} & \frac{1}{5} & \frac{1}{6} & \frac{1}{7}
\end{pmatrix}
\end{align*}
\ARROW The condition of this matrix:
\begin{align*}
cond(A)=\mathcal{O}\left(\frac{e^{3.52N}}{\sqrt{N}}\right)
\end{align*}
\ARROW For $8 \times 8$ matrix we get:
\begin{align*}
cond_1(A)=3.387\cdot 10^{10},~~~cond_2(A)=1.526\cdot 10^{10},~~~~~cond_{\infty}(A)=3.387\cdot 10^{10}
\end{align*}
\ARROW Clearly large numbers ;)
\end{small}
\end{frame}

\begin{frame}\frametitle{Exact methods: Cramer method}
\begin{small}
\ARROW If $\det{A} \neq 0$ then the solutions are given by:
\begin{align*}
x_i =\frac{\det{A_i}}{\det{A}}
\end{align*}
\ARROW So calculate the solutions one needs to calculate $n+1$ determinants. To calculate each determinate one needs $(n-1)n!$ multiplications. \\
\ARROW Putting it all together one needs $(n+1)(n-1)n! = n^{n+2}$ \\
\ARROW Brute force but works ;)


\end{small}
\end{frame}





\begin{frame}\frametitle{Example, ill-conditioned matrix}
\begin{small}
\ARROW The text-book example of wrongly conditioned matrix is the Hilbert matrix:
\begin{align*}
h_{i,j} = \frac{1}{i+j-1}
\end{align*}
\ARROWR Example:
\begin{align*}
h_{i,j}^{4 \times 4} = \begin{pmatrix}
1 & \frac{1}{2} & \frac{1}{3} & \frac{1}{4} \\
\frac{1}{2} & \frac{1}{3} & \frac{1}{4} & \frac{1}{5} \\
\frac{1}{3} & \frac{1}{4} & \frac{1}{5} & \frac{1}{6}\\
\frac{1}{4} & \frac{1}{5} & \frac{1}{6} & \frac{1}{7}
\end{pmatrix}
\end{align*}
\ARROW The condition of this matrix:
\begin{align*}
cond(A)=\mathcal{O}\left(\frac{e^{3.52N}}{\sqrt{N}}\right)
\end{align*}
\ARROW For $8 \times 8$ matrix we get:
\begin{align*}
cond_1(A)=3.387\cdot 10^{10},~~~cond_2(A)=1.526\cdot 10^{10},~~~~~cond_{\infty}(A)=3.387\cdot 10^{10}
\end{align*}
\ARROW Clearly large numbers ;)
\end{small}
\end{frame}

\begin{frame}\frametitle{Exact methods: Cramer method}
\begin{small}
\ARROW If $\det{A} \neq 0$ then the solutions are given by:
\begin{align*}
x_i =\frac{\det{A_i}}{\det{A}}
\end{align*}
\ARROW So calculate the solutions one needs to calculate $n+1$ determinants. To calculate each determinate one needs $(n-1)n!$ multiplications. \\
\ARROW Putting it all together one needs $(n+1)(n-1)n! = n^{n+2}$ \\
\ARROW Brutal force but works ;)


\end{small}
\end{frame}





\begin{frame}\frametitle{Exact methods: Gauss method}
\begin{small}

\ARROW The idea besides the Gauss method is simple: transform the $A x =b$  to get the equvalent matrix $A^{\left[n\right]} x = b^{\left[n\right]}$ where $A^{\left[n\right]}$ is triangular matrix:
\begin{align*}
A^{\left[n\right]} = \begin{pmatrix}
a^{\left[n\right]}_{11} & a^{\left[n\right]_{12}} & ... & a^{\left[n\right]}_{1n}\\
0 & a^{\left[n\right]}_{22} & ... & a^{\left[n\right]}_{2n} \\
...\\
0 & 0 & ... & a^{\left[n\right]}_{nn} \\
\end{pmatrix}
\end{align*}
\ARROWR The algorithm:
\ARROW To do so we calculate the: $d^{\left[1\right]}_{i,1}=\frac{a^{\left[1\right]}_{i1}}{a^{\left[1\right]}_{11}}$ \\
\ARROW The first row multiplied by the $d^{\left[1\right]}_{i,1}$ we subtract from the $i^{th}$ row.
\ARROW After this we get:

\begin{align*}
\begin{pmatrix}
a^{\left[ 1 \right]}_{11} & a^{\left[ 1 \right]_{12}} & ... & a^{\left[ 1 \right]}_{1n}\\
0 & a^{\left[ 1 \right]}_{22} & ... & a^{\left[ 1 \right]}_{2n} \\
...\\
0 & a^{\left[ 1 \right]}_{n2} & ... & a^{\left[ 1 \right]}_{nn} \\
\end{pmatrix} \overrightarrow{x}=
\begin{pmatrix}
b^{\left[ 1 \right]}_{1} \\
b^{\left[ 1 \right]}_{1} \\
... \\
b^{\left[ 1 \right]}_{1} \\
\end{pmatrix}
\end{align*}

\end{small}
\end{frame}



\begin{frame}\frametitle{Exact methods: Gauss method 2}
\begin{small}

\ARROW Now one needs to repeat the above n times moving each time row down.
\begin{alertblock}{Cons:}
\ARROW The algoright can be stooped if you divide by zero.\\
\ARROW The method is very efficient to accumulate numerical errors.
\end{alertblock}

\begin{exampleblock}{Pros:}
\ARROWR The number of needed floating point operations is less then Cramer.\\
\ARROWR Example for 15 equations: $1345$ vs $5 \cdot 10^{12}$.
\end{exampleblock}

\end{small}
\end{frame}



\begin{frame}\frametitle{Exact methods: modified Gauss method}
\begin{small}

\ARROW The biggest disadvantage of Gauss method is the fact that we can have zero elements :( \\
\ARROW The modified method fixes this problem :)\\
\ARROW The modification is as follows:
\begin{itemize}
\item In each step before we do the elimination we look for main element:
\begin{align*}
\vert a_{mk}^{\vert k \vert } = \max \lbrace \vert a_{jk}^{\vert k \vert} : j=k, k+1,..., n \rbrace
\end{align*}
\item We exchange the rows $m$ and $k$.
\item We do the standard elimination.
\end{itemize}

\end{small}
\end{frame}



\begin{frame}\frametitle{Exact methods: Jordan elimination method}
\begin{small}


\ARROW The Jordan elimination method is similar to Guass method but the idea is to transform the matrix from $Ax =b$ to $I x = b^{\lfloor n+1 \rfloor}$.
\begin{enumerate}
\item We start from eliminating the as in Gauss method in the first row.
\item When we move to the second row we eliminate the $x_2$ element also from the first raw.
\item The third raw we eliminate the first and second raw. 
\item We repeat this $n$ times.
\end{enumerate}
\ARROW After this we will get new system $I x = b^{\lfloor n+1  \rfloor}$. \\
\ARROW The $b^{\lfloor n+1  \rfloor}$ is already the solution! No need to do more.

\end{small}
\end{frame}


\begin{frame}\frametitle{Exact methods: LU method}
\begin{small}


\ARROW The most popular of the exact methods is so-called LU method.\\
\ARROW The idea is very simple; we represent the matrix in a form:
\begin{align*}
A = &  \begin{pmatrix}
a_{11} & a_{12} & a_{13} & ... & a_{1n} \\
a_{21} & a_{22} & a_{23} & ... & a_{2n} \\
a_{31} & a_{32} & a_{33} & ... & a_{2n} \\
 & &  ... & & \\
 _{n1} & a_{n2} & a_{n3} & ... & a_{nn} \\
\end{pmatrix} = \\  & \begin{pmatrix}
1 & 0 & 0 & ... & 0 \\
l_{21} & 1 & 0 & ... & 0 \\
l_{31} & l_{32} & 1 & ... &0 & \\
 & &  ... & & \\
l_{n1} & l_{n2} & l_{n3} & ... & l_{nn}\\
\end{pmatrix} \cdot
\begin{pmatrix}
u_{11} & u{12} & u_{13} & ... & u_{1n} \\
0 & u{22} & u_{23} & ... & u_{2n} \\
0 & 0 & u_{33} & ... & u_{3n} \\
 & &  ... & & \\
0 & 0 & 0 & ... & u_{nn} 
\end{pmatrix} = L \cdot U
\end{align*}
\ARROW After this decomposition we need to solve:
\begin{align*}
\begin{cases}
L y & =b\\
U x & =y
\end{cases}
\end{align*}

\end{small}
\end{frame}

\begin{frame}\frametitle{Exact methods: LU method, algorithms}
\begin{small}
\ARROW We solve the following matrix: $A^{\lfloor 1 \rfloor} x = b^{\lfloor1 \rfloor}$\\
\ARROW We start by preparing the matrix $L^{\lfloor 1 \rfloor}$:
\begin{align*}
L^{\lfloor 1 \rfloor }=\begin{pmatrix}
1 & 0 & 0 & ... & 0 \\
-l_{21} & 1 & 0 & ... & 0\\
-l_{31} & 0 & 1 & ... & 0\\
& & ... & & \\
-l_{n1} & 0 & 0 & ... & 1
\end{pmatrix}
\end{align*}
where the $l_{i1}$ are defined:
\begin{align*}
l_{i1} = \frac{a_{i1}^{\lfloor 1 \rfloor}}{a_{11}^{\lfloor 1 \rfloor}}
\end{align*}
\ARROW Now we take our base equation by $L^{\lfloor 1 \rfloor }$:
\begin{align*}
L^{\lfloor 1 \rfloor } A^{\lfloor 1 \rfloor } x = L^{\lfloor 1 \rfloor } b^{\lfloor1 \rfloor}
\end{align*}
\ARROW We get the a new system:
\begin{align*}
A^{\lfloor 2 \rfloor} x = b^{\lfloor2  \rfloor}~~~~~~~~\Leftrightarrow~~~~~~~~~~L^{\lfloor 1 \rfloor } A^{\lfloor 1 \rfloor } x = L^{\lfloor 1 \rfloor } b^{\lfloor1 \rfloor}
\end{align*}

\end{small}
\end{frame}

\begin{frame}\frametitle{Exact methods: LU method, algorithms}
\begin{small}
\ARROW In the second step we construct the $L^{\lfloor 2 \rfloor}$ in the form:
\begin{align*}
L^{\lfloor 2 \rfloor}=\begin{pmatrix}
1 & 0 & 0 & ... & 0 \\
0 & 1 & 0 & ... & 0\\
0 & -l_{32} & 1 & ... & 0\\
& & ... & & \\
0 & -l_{n2} & 0 & ... & 1
\end{pmatrix}
\end{align*}
where :
\begin{align*}
l_{i2} = \frac{a_{i2}^{\lfloor 2 \rfloor}}{a_{22}^{\lfloor 2 \rfloor}}
\end{align*}

\ARROW Now we take the $A^{\lfloor 2 \rfloor} x = b^{\lfloor2  \rfloor}$ and multiply it by  $L^{\lfloor 2 \rfloor}$:
\begin{align*}
L^{\lfloor 2 \rfloor } A^{\lfloor 2 \rfloor } x = L^{\lfloor 2 \rfloor } b^{\lfloor2 \rfloor}  
\end{align*}
\ARROW We get the a new system:
\begin{align*}
A^{\lfloor 3 \rfloor} x = b^{\lfloor3  \rfloor}~~~~~~~~\Leftrightarrow~~~~~~~~~L^{\lfloor 2 \rfloor } L^{\lfloor 1 \rfloor } A^{\lfloor 1 \rfloor } x = L^{\lfloor 2 \rfloor } L^{\lfloor 1 \rfloor } b^{\lfloor1 \rfloor}
\end{align*}

\end{small}
\end{frame}


\begin{frame}\frametitle{Exact methods: LU method, algorithms}
\begin{small}

\ARROW Now the we repeat the above steps $n-1$ times after which we get:
\begin{align*}
& L^{\lfloor n-1 \rfloor }    L^{\lfloor n-2 \rfloor } ...    L^{\lfloor 2 \rfloor } L^{\lfloor 1 \rfloor } A^{\lfloor 1 \rfloor }  = A^{\lfloor n \rfloor } = U\\
& L^{\lfloor n-1 \rfloor }    L^{\lfloor n-2 \rfloor } ...    L^{\lfloor 2 \rfloor } L^{\lfloor 1 \rfloor } b^{\lfloor1 \rfloor} = b^{\lfloor n \rfloor}
\end{align*}
\ARROW From the above we can calculate:
\begin{align*}
A^{\lfloor 1 \rfloor } = \left( L^{\lfloor 1 \rfloor } \right)^{-1} \left( L^{\lfloor 2 \rfloor } \right)^{-1} ...  \left( L^{\lfloor n-1 \rfloor } \right)^{-1} A^{\lfloor n \rfloor }
\end{align*}
\ARROW So the matrix $L$ we search for is:
\begin{align*}
L= \left( L^{\lfloor 1 \rfloor } \right)^{-1} \left( L^{\lfloor 2 \rfloor } \right)^{-1} ...  \left( L^{\lfloor n-1 \rfloor } \right)^{-1} 
\end{align*}
\ARROW The $\left( L^{\lfloor k \rfloor } \right)^{-1}$ can be easy calculated:
\begin{footnotesize}
\begin{align*}
\left( L^{\lfloor k \rfloor } \right)^{-1} = \begin{pmatrix}
1 & 0 & ... & 0 & ... & 0 \\
0 & 1 & ... & 0 & ... & 0 \\
& & & ... & & \\
0 & 0 & ... & 1 & ... & 0 \\
0 & 0 & ... & l_{k+1 k} & ... & 0 \\
0 & 0 & ... & l_{k+2 k} & ... & 0 \\
& & & ... & & \\
0 & 0 & ... & l_{n k} & ... & 1 \\
\end{pmatrix}
\end{align*}
\end{footnotesize}

\end{small}
\end{frame}



\begin{frame}\frametitle{Exact methods: LU method, algorithms}
\begin{small}

\ARROW Now the only thing left is to solve the simple linear system:
\begin{align*}
\begin{cases}
L y & =b\\
U x & =y
\end{cases}
\end{align*}
\ARROW Because of the triangular matrix the solution is straightforward:
\begin{align*}
y_1=\frac{b_1}{L_11},~~~~~~~~~y_i=\frac{b_1 - \sum_{j=1^{i-1}} L_{ij} y_j }{L_{ii}}~~{ i\geq 2}
\end{align*}

\end{small}
\end{frame}


\begin{frame}\frametitle{Summary}
\begin{small}

\ARROW This lecture we learn the exact methods of solving linear equation system.\\
\ARROW The three most popular one are: Gauss, Jordan, LU.\\
\ARROW By default the LU method should be used.\\
\ARROW And remember: be sure the system is well conditioned!


\end{small}
\end{frame}






\backupbegin   

\begin{frame}\frametitle{Backup}


\end{frame}







\backupend			
\end{document}