Newer
Older
Lecture_repo / Lectures_my / NumMet / 2016 / Lecture6 / mchrzasz.tex
@mchrzasz mchrzasz on 12 Oct 2016 17 KB fixed small things
\documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer}

\usepackage[english]{babel} 
\usepackage{polski}         


\usetheme[
	bullet=circle,		% Other option: square
	bigpagenumber,		% circled page number on lower right
	topline=true,			% colored bar at the top of the frame 
	shadow=false,			% Shading for beamer blocks
	watermark=BG_lower,	% png file for the watermark
	]{Flip}

%\logo{\kern+1.em\includegraphics[height=1cm]{SHiP-3_LightCharcoal}}
                            

\usepackage[lf]{berenis}
\usepackage[LY1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{cases}
\usepackage{mathtools}
\usepackage{emerald}
\usefonttheme{professionalfonts}
\usepackage[no-math]{fontspec}		
\defaultfontfeatures{Mapping=tex-text}	% This seems to be important for mapping glyphs properly

\setmainfont{Gillius ADF}			% Beamer ignores "main font" in favor of sans font
\setsansfont{Gillius ADF}			% This is the font that beamer will use by default
% \setmainfont{Gill Sans Light}		% Prettier, but harder to read

\setbeamerfont{title}{family=\fontspec{Gillius ADF}}

\input t1augie.fd

%\newcommand{\handwriting}{\fontspec{augie}} % From Emerald City, free font
%\newcommand{\handwriting}{\usefont{T1}{fau}{m}{n}} % From Emerald City, free font
% \newcommand{\handwriting}{}	% If you prefer no special handwriting font or don't have augie

%% Gill Sans doesn't look very nice when boldfaced
%% This is a hack to use Helvetica instead
%% Usage: \textbf{\forbold some stuff}
%\newcommand{\forbold}{\fontspec{Arial}}

\usepackage{graphicx}
\usepackage[export]{adjustbox}

\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{colortbl}
\usepackage{mathrsfs} 			% For Weinberg-esque letters
\usepackage{cancel}				% For "SUSY-breaking" symbol
\usepackage{slashed}            % for slashed characters in math mode
\usepackage{bbm}                % for \mathbbm{1} (unit matrix)
\usepackage{amsthm}				% For theorem environment
\usepackage{multirow}			% For multi row cells in table
\usepackage{arydshln} 			% For dashed lines in arrays and tables
\usepackage{siunitx}
\usepackage{xhfill}
\usepackage{grffile}
\usepackage{textpos}
\usepackage{subfigure}
\usepackage{tikz}
\usepackage{hyperref}
%\usepackage{hepparticles}    
\usepackage[italic]{hepparticles}     

\usepackage{hepnicenames} 

% Drawing a line
\tikzstyle{lw} = [line width=20pt]
\newcommand{\topline}{%
  \tikz[remember picture,overlay] {%
    \draw[crimsonred] ([yshift=-23.5pt]current page.north west)
             -- ([yshift=-23.5pt,xshift=\paperwidth]current page.north west);}}



% % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
\usepackage{tikzfeynman}		% For Feynman diagrams
\usetikzlibrary{arrows,shapes}
\usetikzlibrary{trees}
\usetikzlibrary{matrix,arrows} 				% For commutative diagram
% http://www.felixl.de/commu.pdf
\usetikzlibrary{positioning}				% For "above of=" commands
\usetikzlibrary{calc,through}				% For coordinates
\usetikzlibrary{decorations.pathreplacing}  % For curly braces
% http://www.math.ucla.edu/~getreuer/tikz.html
\usepackage{pgffor}							% For repeating patterns

\usetikzlibrary{decorations.pathmorphing}	% For Feynman Diagrams
\usetikzlibrary{decorations.markings}
\tikzset{
	% >=stealth', %%  Uncomment for more conventional arrows
	vector/.style={decorate, decoration={snake}, draw},
	provector/.style={decorate, decoration={snake,amplitude=2.5pt}, draw},
	antivector/.style={decorate, decoration={snake,amplitude=-2.5pt}, draw},
	fermion/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{>}}}},
	fermionbar/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{<}}}},
	fermionnoarrow/.style={draw=gray},
	gluon/.style={decorate, draw=black,
		decoration={coil,amplitude=4pt, segment length=5pt}},
	scalar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	scalarbar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{<}}}},
	scalarnoarrow/.style={dashed,draw=black},
	electron/.style={draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	bigvector/.style={decorate, decoration={snake,amplitude=4pt}, draw},
}

% TIKZ - for block diagrams, 
% from http://www.texample.net/tikz/examples/control-system-principles/
% \usetikzlibrary{shapes,arrows}
\tikzstyle{block} = [draw, rectangle, 
minimum height=3em, minimum width=6em]




\usetikzlibrary{backgrounds}
\usetikzlibrary{mindmap,trees}	% For mind map
\newcommand{\degree}{\ensuremath{^\circ}}
\newcommand{\E}{\mathrm{E}}
\newcommand{\Var}{\mathrm{Var}}
\newcommand{\Cov}{\mathrm{Cov}}
\newcommand\Ts{\rule{0pt}{2.6ex}}       % Top strut
\newcommand\Bs{\rule[-1.2ex]{0pt}{0pt}} % Bottom strut

\graphicspath{{images/}}	% Put all images in this directory. Avoids clutter.

% SOME COMMANDS THAT I FIND HANDY
% \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec
\newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild
\newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold
\newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold
\newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert
\newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert
%% "\alert" is already a beamer pre-defined
\newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}%

\def\Put(#1,#2)#3{\leavevmode\makebox(0,0){\put(#1,#2){#3}}}

\usepackage{gmp}
\usepackage[final]{feynmp-auto}

\usepackage[backend=bibtex,style=numeric-comp,firstinits=true]{biblatex}
\bibliography{bib}
\setbeamertemplate{bibliography item}[text]

\makeatletter\let\frametextheight\beamer@frametextheight\makeatother

% suppress frame numbering for backup slides
% you always need the appendix for this!
\newcommand{\backupbegin}{
	\newcounter{framenumberappendix}
	\setcounter{framenumberappendix}{\value{framenumber}}
}
\newcommand{\backupend}{
	\addtocounter{framenumberappendix}{-\value{framenumber}}
	\addtocounter{framenumber}{\value{framenumberappendix}} 
}


\definecolor{links}{HTML}{2A1B81}
%\hypersetup{colorlinks,linkcolor=,urlcolor=links}

% For shapo's formulas:
\def\lsi{\raise0.3ex\hbox{$<$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\def\gsi{\raise0.3ex\hbox{$>$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\newcommand{\lsim}{\mathop{\lsi}}
\newcommand{\gsim}{\mathop{\gsi}}
\newcommand{\wt}{\widetilde}
%\newcommand{\ol}{\overline}
\newcommand{\Tr}{\rm{Tr}}
\newcommand{\tr}{\rm{tr}}
\newcommand{\eqn}[1]{&\hspace{-0.7em}#1\hspace{-0.7em}&}
\newcommand{\vev}[1]{\rm{$\langle #1 \rangle$}}
\newcommand{\abs}[1]{\rm{$\left| #1 \right|$}}
\newcommand{\eV}{\rm{eV}}
\newcommand{\keV}{\rm{keV}}
\newcommand{\GeV}{\rm{GeV}}
\newcommand{\im}{\rm{Im}}
\newcommand{\disp}{\displaystyle}
\def\be{\begin{equation}}
\def\ee{\end{equation}}
\def\ba{\begin{eqnarray}}
\def\ea{\end{eqnarray}}
\def\d{\partial}
\def\l{\left(}
\def\r{\right)}
\def\la{\langle}
\def\ra{\rangle}
\def\e{{\rm e}}
\def\Br{{\rm Br}}
\def\fixme{{\color{red} FIXME!}}
\def\mc{{\color{Magenta}{MC}}\xspace}
\def\pdf{{\rm p.d.f.}}
\def\ARROW{{\color{JungleGreen}{$\Rrightarrow$}}\xspace}
\def\ARROWR{{\color{WildStrawberry}{$\Rrightarrow$}}\xspace} 

\author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz, Danny van Dyk} (UZH)}
\institute{UZH}
\title[Linear equation systems: iteration methods]{Linear equation systems:iteration methods}
\date{21 September 2016}


\begin{document}
\tikzstyle{every picture}+=[remember picture]

{
\setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}}
\begin{frame}[c]%{\phantom{title page}} 
\begin{center}
\begin{center}
	\begin{columns}
		\begin{column}{0.9\textwidth}
			\flushright\fontspec{Trebuchet MS}\bfseries \Huge {Linear equation systems: iteration methods}
		\end{column}
		\begin{column}{0.2\textwidth}
		  %\includegraphics[width=\textwidth]{SHiP-2}
		\end{column}
	\end{columns}
\end{center}
	\quad
	\vspace{3em}
\begin{columns}
\begin{column}{0.6\textwidth}
\flushright \vspace{-1.8em} {\fontspec{Trebuchet MS} \large Marcin ChrzÄ…szcz, Danny van Dyk\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}, \href{mailto:dany.van.dyk@gmail.com}{danny.van.dyk@gmail.com}}

\end{column}
\begin{column}{0.4\textwidth}
\includegraphics[height=1.3cm]{uzh-transp}
\end{column}
\end{columns}

\vspace{1em}
%		\footnotesize\textcolor{gray}{With N. Serra, B. Storaci\\Thanks to the theory support from M. Shaposhnikov, D. Gorbunov}\normalsize\\
\vspace{0.5em}
	\textcolor{normal text.fg!50!Comment}{Numerical Methods, \\ 17 October, 2016}
\end{center}
\end{frame}
}


\begin{frame}\frametitle{Linear eq. system}

\ARROW This and the next lecture will focus on a well known problem. Solve the following equation system:
\begin{align*}
A \cdot x =b,
\end{align*} 
\ARROWR $A = a_{ij} 	\in \mathbb{R}^{n\times n}$ and $\det(A) \neq 0$\\
\ARROWR $b=b_i \in  \mathbb{R}^n$.\\
\ARROW The problem: Find the $x$ vector.


\end{frame}



\begin{frame}\frametitle{LU method with main element selection}
\begin{small}
\ARROW The algorithm for the LU matrix decomposition from previous lecture doesn't have  the main element selection. \\
\ARROW This might be problematic. For example (backboard example)




\end{small}
\end{frame}



\begin{frame}\frametitle{Chelosky decomposition}
\begin{small}

\ARROW If $A \in \textbf{R}^{N \times N}$ is a symmetric ($A^T=A$) and positively defined matrix, then there exits a special case of LU factorization such that:
\begin{align*}
A =C C^T
\end{align*}
where $C$ is a traingular matrixwith diagonal elements greater then zero. \\
\ARROW Finding the Chelosky decomposition is two times faster the finding the LU decomposition. \\
\ARROW The Chelosky decomposition has the same algorithm then the LU decomposition.



\end{small}
\end{frame}


\begin{frame}\frametitle{LDL factorization}
\begin{small}

\ARROW If matrix can be Chelosky decomposed it can also be decomposed to:
\begin{align*}
A=LDL^T
\end{align*}
where $L$ is botton triangular matrix such that $\forall_i : l_{ii}=1$ and $D$ is diagonal matrix with positive elements. \\
\ARROW The advantage of the LDL decomposition compared to Cehlosky decomposition is the fact that we don't need to square root in the calculations.



\end{small}
\end{frame}



\begin{frame}\frametitle{Iterative methods}
\begin{small}

\ARROW The exact methods are the ones that require more computations to get the solutions.\\
\ARROWR Because of this they are not really suited to solve big linear systems.\\
\ARROW The iteration methods are simple and the main goal of them is to transform:
\begin{align*}
Ax=b
\end{align*}
to:
\begin{align*}
x= Mx + c
\end{align*}
where $A$,$M$ are matrices of $n \times n$ size. \\
$b$ and $c$ are vectors of the size $n$.\\
\ARROW Once we get the second system (btw. remember MC lectures?) we can use iterative methods to solve them.

\end{small}
\end{frame}


\begin{frame}\frametitle{Expansion}
\begin{small}

\ARROW If $\overline{x}$ is the solution of the $A x = b$ system then also:
\begin{align*}
\overline{x} = M \overline{x} +c
\end{align*}
\ARROW We construct the a sequence that approximates the $\overline{x}$ in the following way:
\begin{align*}
x^{(k+1)} = M x^{(k)},~~~k=0,1,...
\end{align*}
\ARROW The necessary and sufficient requirement for the convergence of the set is:
\begin{align*}
\rho(M)<1
\end{align*}



\end{small}
\end{frame}


\begin{frame}\frametitle{Jakobi method}
\begin{small}

\ARROW The simplest method is the Jakobi method. \\
\ARROW We start from decomposition of $A$ matrix:
\begin{align*}
A = L  + U +D
\end{align*}
where
\begin{itemize}
\item $L=(a_{ij})_{i>j}$ -  triangular lower matrix.
\item $D=(a_{ik})_{i=j}$ - diagonal matrix.
\item $U=(a_{ij})_{i<j}$ - triangular upper matrix.
\end{itemize}
\ARROW Now the new system:
\begin{align*}
(L+D+U)x =b
\end{align*}
\ARROW Translating them:
\begin{align*}
D x = - (L+U)x +b
\end{align*}


\end{small}
\end{frame}

\begin{frame}\frametitle{Jakobi method}
\begin{small}

\ARROW Now the $D$ matrix can be easily reverted (it is diagonal!):
\begin{align*}
x =  -D^{-1} (L+U) x  + D^{-1}b
\end{align*}
\ARROW So the iteration would have the form:
\begin{align*}
x^{(k+1)} =  -D^{-1} (L+U) x^{(k)} + D^{-1}b
\end{align*}
\ARROW The matrix:
\begin{align*}
M_J= - D^{-1}(L+U)
\end{align*}
is called the Jakobi matrix.


\end{small}
\end{frame}


\begin{frame}\frametitle{Jakobi method}
\begin{small}


\ARROW The algorithm:
\begin{itemize}
\item Construct the matrix $A$ 
\item Assume the precision of your calculations $\epsilon$.
\item Decomposite the $A$ matrix into $L$, $D$, $U$.
\item Calculate the Jocobi matrix $M_J$.
\item Check the convergence of the method:
\begin{itemize}
\item Calculate the $\rho(M_J)$.
\item Check the convergence conditions.
\item If both are ok then continue, else stop and go home ;)
\end{itemize}
\item Choose the starting point $x^{(0)}$
\item Calculate the $k+1$ point.
\item Calculate the difference in each step:
\begin{align*}
\Vert x^{(k+1)} - x^{(k)} \Vert
\end{align*}
\item If above is smaller then $\epsilon$ the stop and assume $(k+1)$ is the solution.
\end{itemize}



\end{small}
\end{frame}





\begin{frame}\frametitle{Gauss-Seidle method}
\begin{small}


\ARROW A different iterative method is so-called Gauss-Seidle method.\\
\ARROW We start from the previous: $(L+ D +U)x =b$.\\
\ARROW Write the eq. in form:
\begin{align*}
(L+D)x = =Ux +b
\end{align*}
\ARROW The $(L+D)$ matrix is triangular and can easily be inverted.\\
\ARROW From this one gets:
\begin{align*}
x= -(L+D)^{-1}  U x +(L+D)^{-1} b
\end{align*}
\ARROW So the iteration equation will take form:
\begin{align*}
x_i^{(k+1)} = -\frac{1}{a_{ii}} \left( \sum_{j<i} a_{ij} x_j^{(k+1)} + \sum _{i>i}a_{ij}x_{j}^{(k)} \right) + \frac{b_i}{a_{ii}}
\end{align*}
\ARROW The matrix:
\begin{align*}
M_{GS} = -(L+D)^{-1} U
\end{align*}
is so-called Gauss-Seidl matrix.

\end{small}
\end{frame}


\begin{frame}\frametitle{Convergence of Gauss-Seidle and Jacobi methods}
\begin{small}

\begin{exampleblock}{Reminder:}
The necessary and sufficient condition for the iteration to be convergence is that:
\begin{align*}
\rho(M)<1
\end{align*}
where $M$ is the matrix of a given method.
\end{exampleblock}

\ARROW Now calculating the $\rho(M)$ might be already a computationally expensive...\\
\ARROW A very useful way of getting the $\rho(M_J)$ is:
\begin{align*}
r_J = \frac{\Vert x_{k+1} - x_k \Vert}{\Vert x_k - x_{k-1} \Vert} \approx \vert  \rho(M_J) -1 \vert
\end{align*} 
\ARROW Another useful equations:
\begin{align*}
\rho(M_{GS}) = \rho(M_J)^2
\end{align*}



\end{small}
\end{frame}


\begin{frame}\frametitle{Convergence of Gauss-Seidle and Jacobi methods}
\begin{small}

\begin{alertblock}{Theorem:}
If matrix $(a_{ij})$ fulfils one of the conditions:\\
\ARROW $\vert a_{ii} >  \sum_{j \neq i} \vert a_{ij} \vert~~ i=1,...,n$, so-called strong row criteria.\\
\ARROW $\vert a_{jj} > \sum_{i \neq j} \vert a_{ij} \vert~~ j=1,...,n$, so-called strong column criteria.
\end{alertblock}

\begin{exampleblock}{Practical note:}
\ARROWR One needs to note that calculating the $\rho(M)$ might be a as time consuming calculation as the solution is self...\\
\ARROWR If that is the case usually one neglects this steps and computes the solution with extra care at each step checking the convergence.

\end{exampleblock}


\end{small}
\end{frame}




\begin{frame}\frametitle{SOR method}
\begin{small}

\ARROW The Successive Over-Relaxation method is an extension of the Gauss-Seidl methods. \\
\ARROW The modification is that we can reuse the previous step to get a more precise approximation of the solution. \\
\ARROW The ''hack'' happens when we calculate the $x^{(k+1)}_{GS}$ using the  Gauss-Seidl method. Instead assuming that the next step is the $x^{(k+1)}_{GS}$ we ''relax'' the condition using liner combination:
\begin{align*}
x^{(k+1)} =\omega x^{(k+1)}_{GS} + (1-\omega) x^{(k)}
\end{align*}
\ARROW From the above once can see that if $\omega=1$ then we end up with standard  Gauss-Seidl method. \\
\ARROW The iteration method equation has the form:
\begin{align*}
x^{(k+1)} = (D+ \omega L)^{-1} ((1-\omega)D - \omega U)x^{(k)} + (D + \omega L)^{-1} \omega b
\end{align*}




\end{small}
\end{frame}




\begin{frame}\frametitle{SOR method, convergence}
\begin{small}

\ARROW The main difficulty in the SOR method is the fact we need to choose the $\omega$ parameter. \\
\ARROW One can easily prove that if the method is converging the $\omega \in \left(0, 2 \right)$. \\
\ARROW This is necessary condition but it's not enough! \\
\ARROW If the $A$ is symmetric and positively defined  then the above condition is also sufficient!




\end{small}
\end{frame}





\begin{frame}\frametitle{Conclusions}
\begin{small}

\ARROW We learned more exact method of solving the linear system.\\
\ARROW For big matrix we need to use iterative method which are not exact.\\
\ARROW My personal experience: Did not have big enough matrices to really need to apply the iterative methods but this is field dependent!


\end{small}
\end{frame}




\backupbegin   

\begin{frame}\frametitle{Backup}


\end{frame}







\backupend			
\end{document}