Newer
Older
Lecture_repo / Lectures_my / EMPP / 2018 / Lecture3 / mchrzasz.tex
@Marcin Chrzaszcz Marcin Chrzaszcz on 29 Oct 2018 34 KB lectures done
\documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer}

\usepackage[english]{babel} 
\usepackage{polski}         
\usepackage[skins,theorems]{tcolorbox}
\tcbset{highlight math style={enhanced,
  colframe=red,colback=white,arc=0pt,boxrule=1pt}}

\usetheme[
	bullet=circle,		% Other option: square
	bigpagenumber,		% circled page number on lower right
	topline=true,			% colored bar at the top of the frame 
	shadow=false,			% Shading for beamer blocks
	watermark=BG_lower,	% png file for the watermark
	]{Flip}

%\logo{\kern+1.em\includegraphics[height=1cm]{SHiP-3_LightCharcoal}}
                            

\usepackage[lf]{berenis}
\usepackage[LY1]{fontenc}
\usepackage[utf8]{inputenc}


\usefonttheme{professionalfonts}
\usepackage[no-math]{fontspec}	
\usepackage{listings}
\defaultfontfeatures{Mapping=tex-text}	% This seems to be important for mapping glyphs properly

\setmainfont{Gillius ADF}			% Beamer ignores "main font" in favor of sans font
\setsansfont{Gillius ADF}			% This is the font that beamer will use by default
% \setmainfont{Gill Sans Light}		% Prettier, but harder to read

\setbeamerfont{title}{family=\fontspec{Gillius ADF}}

\input t1augie.fd

%\newcommand{\handwriting}{\fontspec{augie}} % From Emerald City, free font
%\newcommand{\handwriting}{\usefont{T1}{fau}{m}{n}} % From Emerald City, free font
% \newcommand{\handwriting}{}	% If you prefer no special handwriting font or don't have augie

%% Gill Sans doesn't look very nice when boldfaced
%% This is a hack to use Helvetica instead
%% Usage: \textbf{\forbold some stuff}
%\newcommand{\forbold}{\fontspec{Arial}}

\usepackage{graphicx}
\usepackage[export]{adjustbox}

\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{colortbl}
\usepackage{mathrsfs} 			% For Weinberg-esque letters
\usepackage{cancel}				% For "SUSY-breaking" symbol
\usepackage{slashed}            % for slashed characters in math mode
\usepackage{bbm}                % for \mathbbm{1} (unit matrix)
\usepackage{amsthm}				% For theorem environment
\usepackage{multirow}			% For multi row cells in table
\usepackage{arydshln} 			% For dashed lines in arrays and tables
\usepackage{siunitx}
\usepackage{xhfill}
\usepackage{grffile}
\usepackage{textpos}
\usepackage{subfigure}
\usepackage{tikz}
\usepackage{hyperref}
%\usepackage{hepparticles}    
\usepackage[italic]{hepparticles}     

\usepackage{hepnicenames} 

% Drawing a line
\tikzstyle{lw} = [line width=20pt]
\newcommand{\topline}{%
  \tikz[remember picture,overlay] {%
    \draw[crimsonred] ([yshift=-23.5pt]current page.north west)
             -- ([yshift=-23.5pt,xshift=\paperwidth]current page.north west);}}



% % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
\usepackage{tikzfeynman}		% For Feynman diagrams
\usetikzlibrary{arrows,shapes}
\usetikzlibrary{trees}
\usetikzlibrary{matrix,arrows} 				% For commutative diagram
% http://www.felixl.de/commu.pdf
\usetikzlibrary{positioning}				% For "above of=" commands
\usetikzlibrary{calc,through}				% For coordinates
\usetikzlibrary{decorations.pathreplacing}  % For curly braces
% http://www.math.ucla.edu/~getreuer/tikz.html
\usepackage{pgffor}							% For repeating patterns

\usetikzlibrary{decorations.pathmorphing}	% For Feynman Diagrams
\usetikzlibrary{decorations.markings}
\tikzset{
	% >=stealth', %%  Uncomment for more conventional arrows
	vector/.style={decorate, decoration={snake}, draw},
	provector/.style={decorate, decoration={snake,amplitude=2.5pt}, draw},
	antivector/.style={decorate, decoration={snake,amplitude=-2.5pt}, draw},
	fermion/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{>}}}},
	fermionbar/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{<}}}},
	fermionnoarrow/.style={draw=gray},
	gluon/.style={decorate, draw=black,
		decoration={coil,amplitude=4pt, segment length=5pt}},
	scalar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	scalarbar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{<}}}},
	scalarnoarrow/.style={dashed,draw=black},
	electron/.style={draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	bigvector/.style={decorate, decoration={snake,amplitude=4pt}, draw},
}

% TIKZ - for block diagrams, 
% from http://www.texample.net/tikz/examples/control-system-principles/
% \usetikzlibrary{shapes,arrows}
\tikzstyle{block} = [draw, rectangle, 
minimum height=3em, minimum width=6em]




\usetikzlibrary{backgrounds}
\usetikzlibrary{mindmap,trees}	% For mind map
\newcommand{\degree}{\ensuremath{^\circ}}
\newcommand{\E}{\mathrm{E}}
\newcommand{\Var}{\mathrm{Var}}
\newcommand{\Cov}{\mathrm{Cov}}
\newcommand\Ts{\rule{0pt}{2.6ex}}       % Top strut
\newcommand\Bs{\rule[-1.2ex]{0pt}{0pt}} % Bottom strut

\graphicspath{{images/}}	% Put all images in this directory. Avoids clutter.

% SOME COMMANDS THAT I FIND HANDY
% \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec
%\newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild
%\newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold
%\newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold
\newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert
\newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert
%% "\alert" is already a beamer pre-defined
\newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}%

\def\Put(#1,#2)#3{\leavevmode\makebox(0,0){\put(#1,#2){#3}}}

\usepackage{gmp}
\usepackage[final]{feynmp-auto}

\usepackage[backend=bibtex,style=numeric-comp,firstinits=true]{biblatex}
\bibliography{bib}
\setbeamertemplate{bibliography item}[text]

\makeatletter\let\frametextheight\beamer@frametextheight\makeatother

% suppress frame numbering for backup slides
% you always need the appendix for this!
\newcommand{\backupbegin}{
	\newcounter{framenumberappendix}
	\setcounter{framenumberappendix}{\value{framenumber}}
}
\newcommand{\backupend}{
	\addtocounter{framenumberappendix}{-\value{framenumber}}
	\addtocounter{framenumber}{\value{framenumberappendix}} 
}


\definecolor{links}{HTML}{2A1B81}
%\hypersetup{colorlinks,linkcolor=,urlcolor=links}

% For shapo's formulas:

% For shapo's formulas:
\def\lsi{\raise0.3ex\hbox{$<$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\def\gsi{\raise0.3ex\hbox{$>$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\newcommand{\lsim}{\mathop{\lsi}}
\newcommand{\gsim}{\mathop{\gsi}}
\newcommand{\wt}{\widetilde}
%\newcommand{\ol}{\overline}
\newcommand{\Tr}{\rm{Tr}}
\newcommand{\tr}{\rm{tr}}
\newcommand{\eqn}[1]{&\hspace{-0.7em}#1\hspace{-0.7em}&}
\newcommand{\vev}[1]{\rm{$\langle #1 \rangle$}}
\newcommand{\abs}[1]{\rm{$\left| #1 \right|$}}
\newcommand{\eV}{\rm{eV}}
\newcommand{\keV}{\rm{keV}}
\newcommand{\GeV}{\rm{GeV}}
\newcommand{\im}{\rm{Im}}
\newcommand{\disp}{\displaystyle}
\def\be{\begin{equation}}
\def\ee{\end{equation}}
\def\ba{\begin{eqnarray}}
\def\ea{\end{eqnarray}}
\def\d{\partial}
\def\l{\left(}
\def\r{\right)}
\def\la{\langle}
\def\ra{\rangle}
\def\e{{\rm e}}
\def\Br{{\rm Br}}
\def\fixme{{\color{red} FIXME!}}
\def\mc{{\color{Magenta}{MC}}}
\def\pdf{{\rm p.d.f.}}
\def\cdf{{\rm c.d.f.}}
\def\ARROW{{\color{JungleGreen}{$\Rrightarrow$}}\xspace}  
\def\ARROWR{{\color{WildStrawberry}{$\Rrightarrow$}}\xspace}  

\author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz} (CERN)}
\institute{UZH}
\title[Specific \pdf~generation]{Specific \pdf~generation}
\date{\fixme}
\newcommand*{\QEDA}{\hfill\ensuremath{\blacksquare}}%
\newcommand*{\QEDB}{\hfill\ensuremath{\square}}%

\author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz} (CERN)}
\institute{UZH}
\title[Markov Chain MC]{Markov Chain MC}
\date{\fixme}


\begin{document}
\tikzstyle{every picture}+=[remember picture]

{
\setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}}
\begin{frame}[c]%{\phantom{title page}} 
\begin{center}
\begin{center}
	\begin{columns}
		\begin{column}{0.9\textwidth}
			\flushright\fontspec{Trebuchet MS}\bfseries \Huge {Markov Chain MC}
		\end{column}
		\begin{column}{0.2\textwidth}
		  %\includegraphics[width=\textwidth]{SHiP-2}
		\end{column}
	\end{columns}
\end{center}
	\quad
	\vspace{3em}
\begin{columns}
\begin{column}{0.44\textwidth}
\flushright \vspace{-1.8em} {\fontspec{Trebuchet MS} \Large Marcin ChrzÄ…szcz\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}}

\end{column}
\begin{column}{0.53\textwidth}
\hspace{0.3\textwidth}\includegraphics[height=1.3cm]{cern}   
\end{column}
\end{columns}

\vspace{1em}
%		\footnotesize\textcolor{gray}{With N. Serra, B. Storaci\\Thanks to the theory support from M. Shaposhnikov, D. Gorbunov}\normalsize\\
\vspace{0.5em}
	\textcolor{normal text.fg!50!Comment}{Experimental Methods in Particle Physics, \\ 7 October, 2018}
	
\end{center}
\end{frame}
}



\begin{frame}\frametitle{Trivial example}
\begin{minipage}{\textwidth}
\ARROW Lets start with a TRIVIAL example: we want to calculate $S=A+B$. We can rewrite it in:
\begin{align*}
S=p \frac{A}{p}+(1-p) \frac{B}{1-p}
\end{align*} 
and one can interpret the sum as expected value of:
\begin{align*}
W=\begin{cases}
\frac{A}{p}~~{ \rm with~propability~} p \\
\frac{A}{1-p}~~{ \rm with~propability~} 1-p
\end{cases}
\end{align*}
\ARROW The algorithm:
\begin{itemize}
\item We generate a random variable $W$ and calculate:
\begin{align*}
\hat{S}=\frac{1}{N}\sum_{i=1}^N W_i
\end{align*}
\ARROW The $\hat{S}$ is an unbias estimator of $S$.
\end{itemize}



\end{minipage}

\end{frame}

\begin{frame}\frametitle{Trivial example2 }
\begin{minipage}{\textwidth}
\begin{itemize}
\item Lets say we have a linear equation system:
\end{itemize}
\begin{equation}
\begin{array}{lcl} X & = & pY + (1-p) A \\ Y & = & qX + (1-q)B \end{array} \nonumber
\end{equation}
\begin{itemize}
\item We know $A,B,p,q$; $X$ and $Y$ are meant to be determined.
\item Algorithm:
\begin{enumerate}
\item We choose first element of the first equation with probability $p$ and second with probability $1-p$.
\item I we choose the second one, the outcome of this MCMC is $W=A$.
\item If we choose the first we go to second equation and choose the first element with probability $q$ and the second with $1-q$.
\item We we choose the second one, the outcome of this MCMC is $W=B$.
\item If we choose the first we go to the first equation back again.
\item We repeat the procedure.
\end{enumerate}
\item We can estimate the solution of this system:
\end{itemize}
\begin{equation}
\hat{X} = \dfrac{1}{N}\sum_{i=1} W_i{~}{~}{~}{~}{~} \hat{\sigma_X}=\dfrac{1}{\sqrt{N-1}}\sqrt{\dfrac{1}{N} \sum_{i=1}^N W_i^2-\hat{X}^2} \nonumber
\end{equation}

\end{minipage}

\end{frame}




\begin{frame}\frametitle{Random walk}
\begin{minipage}{\textwidth}
\begin{footnotesize}

\begin{center}
\includegraphics[width=0.8\textwidth]{images/walk.png}
\end{center}
\ARROW We are in the point $x$ and we walk accordingly to the following rules:
\begin{itemize}
\item From point $x$ we walk with probability $p$ to point $y$ or with $1-p$ to $a$.
\item From point $y$ we walk with probability $q$ to point $x$ and with $1-Q$ to $b$.
\item The walks ends when you end up in $a$ or $b$.
\item You get a ''reward'' $A$ if you end up in point $a$ and $B$ if you end up in $b$. 
\item $X$ is expected ''reward'' when you start the walk from $x$, $Y$ when you start from $y$.
\end{itemize}
\ARROW The algorithm above is so-called random walk on the set $\lbrace a,x,y,b \rbrace$\\
\ARROW The described walked can solve the linear equation system that we discussed above.

\end{footnotesize}

\end{minipage}
\end{frame}

\begin{frame}\frametitle{Markov Chain MC}
\begin{footnotesize}
\begin{itemize}

\item Consider a finite (or Countable set) possible states: $S_1$, $S_2$, ...
\item The $X_t$ is the state of the system in the time $t$
\item We are looking at discrete time steps: $1,2,3,...$


\item The conditional probability is defined as:
\end{itemize}
\begin{equation}
P(X_t=S_j \vert X_{t-1}=S_{j-1},..., X_{1}=S_{1}) \nonumber
\end{equation}
\begin{itemize}
\item The Markov chain is then if the probability depends only on previous step.
\end{itemize}
\begin{equation}
P(X_t=S_j \vert X_{t-1}=S_{j-1},..., X_{1}=S_{1}) =  P(X_t=S_j \vert X_{t-1}=S_{j-1} )\nonumber
\end{equation}
\begin{itemize}
\item For this reason MCMC is also knows as drunk sailor walk.
\item Very powerful method. Used to solve linear eq. systems, invert matrix, solve differential equations, etc.
\item Also used in physics problems: Brown motions, diffusion, etc.
\end{itemize}


\end{footnotesize}
\end{frame}




\begin{frame}\frametitle{Linear equations system}
\begin{footnotesize}
\ARROW Lets start from a linear equation system:
\begin{align*}
\textbf{A} \overrightarrow{x}=\overrightarrow{b},~~~~~\det \textbf{A} \neq 0,
\end{align*}
where $\textbf{A}=(a_{ij},i,j=1,2,...,n$ -matrix, $\overrightarrow{b}=(b_1,b_2,...,b_n)$-vector, $\overrightarrow{x}=(x_1,x_2,...,x_n)$ - vector of unknowns.\\
\ARROW The solution we mark as $\overrightarrow{x}^0 = (x_1^0, x_2^0,..., x_n^0)$\\
\ARROW The above system can be transformed into the iterative representation:
\begin{align*}
 \overrightarrow{x}=\overrightarrow{a}   + \textbf{H} \overrightarrow{x}
\end{align*}
where $\textbf{H}$ is a matrix, $\overrightarrow{a}$ is a vector.\\
\ARROW We assume that the matrix norm:
\begin{align*}
\Vert H \Vert= \max_{1 \leq i \leq n} \sum_{j=1}^n \vert h_{h_{ij}} \vert <1
\end{align*}
\pause
\ARROW We can always change transform every system to the iteration form: $\textbf{A}=\textbf{V}-\textbf{W}$. 
\begin{align*}
(\textbf{V} - \textbf{W} )\overrightarrow{x} = \overrightarrow{b}~~~~ \mapsto ~~~~ \overrightarrow{x} = \textbf{V}^{-1} \overrightarrow{b} + \textbf{V}^{-1} \textbf{W} \overrightarrow{x}
\end{align*}

\end{footnotesize}
\end{frame}

\begin{frame}\frametitle{Linear equations system}
\begin{footnotesize}
\ARROW Now we further modify the equation system:
\begin{align*}
\overrightarrow{x}=\overrightarrow{a}   + \textbf{H} \overrightarrow{x}   \Rightarrow (\textbf{I} - \textbf{H})\overrightarrow{x}=\overrightarrow{a}
\end{align*}
where $\textbf{I}=\delta_{ij}$ - unit matrix, $\delta_{ij}$ is the Kronecker delta.\\
\ARROW What one can do is to represent the solution in terns of Neumann series:
\begin{align*}
\overrightarrow{x}^0=(\textbf{I}-\textbf{H})^{-1}\overrightarrow{a}= \overrightarrow{a} + \textbf{H}  \overrightarrow{a} + \textbf{H}^2  \overrightarrow{a}+ \textbf{H}^3 \overrightarrow{a}+ ...
\end{align*}
\ARROW So for the $i^{th}$ component we have:
\begin{align*}
x_i^0=a_i+\sum_{j=1}^nh_{ij} a_j + \sum_{j_1 =1}^n \sum_{j_2 =1}^n h_{ij_1} h_{j_1 j_2} a_{j_2} \\
+...+\sum_{j_1 =1}^n ...\sum_{j_n =1}^n  h_{ij_1}... h_{j_{n-1} j_n} a_{j_n}
\end{align*}
\ARROW We will construct a probabilistic interpretation using MCMC and then we show that the expected value is equal to the above formula.
\end{footnotesize}
\end{frame}






%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam method}
\begin{footnotesize}

\begin{itemize}
\item To do so we add to our matrix an additional column of the matrix:
\end{itemize}
\begin{equation}
h_{i,0} = 1-\sum_{j=1}^n h_{ij} > 0 \nonumber
\end{equation}
\begin{itemize}
\item The system has states: $\lbrace 0,1,2...,n\rbrace$
\item State at $t$ time is denoted as $i_t(i_t=0,1,2,...,n;t=0,1,....)$
\item We make a random walk accordingly to to the following rules:
\begin{itemize}
\item At the beginning of the walk ($t=0$) we are at $i_0$. 
\item In the $t$ moment we are in the $i_t$ position then in $t+1$ time stamp we move to state $i_{t+1}$ with the probability $h_{i_t i_{t+1}}$. 
\item We stop walking if we are in state $0$. 
\end{itemize}
\item The path $\gamma = (i_0, i_1, i_2, ..., i_k, 0)$ is called trajectory.
\item For each trajectory we assign a number: 
\begin{align*}
X(\gamma)=X(i_0, i_1, i_2, ..., i_k, 0)=\frac{a_{i_k}}{h_{i_k 0}}
\end{align*}
\end{itemize}


\end{footnotesize}

\end{frame}



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam method}
\begin{footnotesize}
\ARROW The $X(\gamma)$ variable is a random variable from: $\lbrace a_1/h_{1,0},a_2/h_{2,0},...,a_n/h_{n,0} \rbrace$. The probability that $X(\gamma)= a_j/h_{j,0}$ is equal to the probability that the last non zero state of the $\gamma$ trajectory is $j$.\\
\ARROW The expected value of the $X(\gamma)$ trajectory if the trajectory begins from $i_0=s$ is:
\begin{align*}
E \lbrace X(\gamma) \vert i_0=s \rbrace=\sum_{k=0}^{\infty} \sum_{ \lbrace \gamma_k \rbrace} X(\gamma) P(\gamma)
\end{align*}
where $\gamma_k$ is a trajectory of length $k$, which starts in $i_0=s$ and $P(\gamma)$ is the probability of occurrence of this trajectory.
\ARROW Yes you guest it lets do Taylor expansion:
\begin{align*}
E \lbrace X(\gamma) \vert i_0=s \rbrace= \sum_{\gamma_0}X(\gamma)P(\gamma) +   \sum_{\gamma_1}X(\gamma)P(\gamma)+...+ \sum_{\gamma_k}X(\gamma)P(\gamma)
\end{align*}
\ARROW Now let's examine the elements of the above series.

\end{footnotesize}

\end{frame}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam method}
\begin{footnotesize}
$\lbrace \gamma_0 \rbrace$: One trajectory: $\gamma_0=(i_0=s \vert 0)$, $P(\gamma_0)=h_{s,0}$ and $X(\gamma_0)=a_s/h_{s,0}$. So:
\begin{align*}
\sum_{\gamma_0}X(\gamma)P(\gamma) = \frac{a_s}{h_{s,0}} h_{s,0}=a_s
\end{align*}



$\lbrace \gamma_1 \rbrace$: Trajectories: $\gamma_1=(i_0=s,i_1 \vert 0),~i_1 \neq 0$,  $P(\gamma_1)= P(s,i_1,0)=h_{s,i_1}h_{i_1,0} $ and $X(\gamma_1)=a_{i_1}/h_{i_1,0}$. So:
\begin{align*}
\sum_{\gamma_1}X(\gamma)P(\gamma) = \sum_{i_1=1}^n \frac{a_{i_1}}{h_{i_1,0}} h_{s,i_1} h_{i_1,0}= \sum_{i=1}^n h_{s,i_1}a_{i_1}
\end{align*}


$\lbrace \gamma_2 \rbrace$: Trajectories: $\gamma_2=(i_0=s,i_1,i_2 \vert 0),~i_1,i_2 \neq 0$,  $P(\gamma_2)= P(s,i_1, i_2,0)=h_{s,i_1}h_{i_1,i_2}h_{i_1,0} $ and $X(\gamma_2)=a_{i_2}/h_{i_2,0}$. So:
\begin{align*}
\sum_{\gamma_2}X(\gamma)P(\gamma) = \sum_{i_1=1}^n  \sum_{i_2=1}^n  \frac{a_{i_2}}{h_{i_2,0}} h_{s,i_1} h_{i_1,i_2} h_{i_2,0}= \sum_{i_1=1}^n  \sum_{i_2=1}^n  h_{s,i_1} h_{i_1,i_2}   a_{i_2}
\end{align*}

\ARROW etc...

\end{footnotesize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam method}
\begin{footnotesize}
\ARROW After summing up:
\begin{align*}
E \lbrace X(\gamma) \vert i_0=s \rbrace= a_s+ \sum_{i_1 =1}^n h_{s,i_1} a_{i_1}+ \sum_{i_1 =1}^n \sum_{i_2 =1}^n  h_{s,i_1}  h_{i_1,i_2}  a_{i_2}+....\\ + \sum_{i_1 =1}^n \sum_{i_2 =1}^n  ... \sum_{i_k =1}^n   h_{s,i_1}  h_{i_1,i_2}...  h_{i_{k-1},i_k} a_{i_k}+...
\end{align*}
\ARROW If you compare this expression with the Neumann series we will they are the same so:
\begin{align*}
x_i^0=E \lbrace X(\gamma) \vert i_0=i \rbrace
\end{align*}

\begin{exampleblock}{To sum up:}
We have proven that solving a linear system can be represented by an expectation value of the random variable $X(\gamma)$. The error is computed using standard deviation equation. 
\end{exampleblock}
\end{footnotesize}

\end{frame}






%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam method}
\begin{itemize}
\item For example lets try to solve this equation system:
\end{itemize}
\begin{equation}
\overrightarrow{x} = 
\left(\begin{array}{c}
 1.5  \\
-1.0\\
0.7  \end{array} \right) 
+
\left(\begin{array}{ccc}
0.2 &  0.3 & 0.1  \\
0.4 &  0.3 & 0.2 \\
0.3 &  0.1 & 0.1  \end{array} \right) \overrightarrow{x}
 \nonumber
\end{equation}
\begin{itemize}
\item The solution is $\overrightarrow{x}_0 = (2.154303, 0.237389, 1.522255)$.
\end{itemize}
\begin{columns}

\column{0.1in}

\column{2.5in}
\begin{itemize}
\item The propability matrix $h_{ij}$ has the shape:
\end{itemize}
\begin{tabular}{|c|cccc|}
\hline
$i/j$ &  1 & 2 & 3 & 0  \\ \hline
1 & 0.2 & 0.3 & 0.1 & 0.4 \\
2 & 0.4 & 0.3 & 0.2 & 0.1 \\
3 & 0.3 & 0.1 & 0.1 & 0.5 \\ \hline
\end{tabular}

\column{2.5in}
\begin{itemize}
\item An example solution:
\end{itemize}
\includegraphics[width=0.95\textwidth]{images/mark.png}

\end{columns}


\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\iffalse
\begin{frame}\frametitle{Neumann-Ulam dual method}
\begin{minipage}{\textwidth}

\ARROW The main problem with the Neumann-Ulam method is the fact that one has to calculate each of the $x_0^i$ separately.\\
\ARROW The generalization works as follows:
\begin{enumerate}
\item We set randomly a \pdf~of states: $q_1,q_2,q_3,...,q_n$, such that $q_i >0$ and $\sum_{i=1}^n =1$. 
\item We choose the starting point accordingly to $q_i$ probability.
\item If in the $t$ moment the point is in position $i_t$ then the with the probability $p(i_{t+1} \vert i_t)=h_{i_{t+1}, i_t}$ the points moves to the $i_{t+1}$ state. 
\item For the state $0$ we assign the probability: $h_{0,i_{t}}=1-\sum_{i=1}^n h_{i,i_t}$
\item WARNING HERE THE MATRIX IS TRANSPOSED compared to method.
\item The walk ends while you reach $0$ state.
\item For each walk/trajectory $(\gamma=(i_0,i_1,...,i_k,0))$ we assign a vector:
\begin{align*}
\overrightarrow{Y}(\gamma)=\frac{a_{i_0}}{q_{i_0}p(0\vert i_k }) \widehat{e}_{i_k}
\end{align*}
\item The final result is: $\overrightarrow{x}^0=\frac{1}{N}\sum \overrightarrow{Y}$

\end{enumerate}\textbf{




\end{minipage}

\end{frame}
\fi
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam dual method}
\begin{itemize}
\item The problem with Neumann-Ulam method is that you need to repeat it for each of the coordinates of the $\overrightarrow{x}_0$ vector.
\item The dual method calculates the whole $\overrightarrow{x}_0$ vector.
\item The algorithm:
\begin{itemize}
\item On the indexes: $\lbrace0,1,...,n\rbrace$ we set a probability distribution:\\ $q_1, q_2,..., q_n$, $q_i>0$ and $\sum_{i=1}^n q_i=1$. 
\item The starting point we select from $q_i$ distribution.
\item If in $t$ time we are in $i_t$ state then with probability $p(i_{t+1} \vert i_t) = h_{i_{t+1},i_{t}}$ in $t+1$ we will be in state $i_{t+1}$. For  $i_{t+1}=0$ we define the probability: $h_{0,i_{t}}=1-\sum_{j=1}^n h_{j,i_{t}}$. Here we also assume that $h_{j,i_{t}} > 0$.
\item NOTE: there the matrix is transposed compared to previous method: $H^{T}$.
\item Again we end our walk when we are at state $0$. 
\item For the trajectory: $\gamma = (i_0, i_1,...,i_k, 0)$, we assign the vector:
\end{itemize}
\begin{equation}
\overrightarrow{Y}(\gamma) = \dfrac{a_{i_0}}{ q_{i_{0}} p(0 \vert i_k)   }  \widehat{e}_{i_{k}} \in \mathcal{R}^n \nonumber
\end{equation}
\item The solution will be : $\overrightarrow{x}^0 = \dfrac{1}{N} \sum \overrightarrow{Y}(\gamma)$
\end{itemize}


\end{frame}
\iffalse
\begin{frame}\frametitle{Neumann-Ulam dual method, proof}
\begin{footnotesize}
\ARROW If $Y_i(\gamma)$ is the i-th component of the $\overrightarrow{Y}(\gamma)$ vector. One needs to show:
\begin{align*}
E\lbrace Y_i(\gamma)\rbrace=x_j^0
\end{align*}
\ARROW From definition:
\begin{align*}
Y_j(i_1,...,i_k,0)=\begin{cases}
\frac{a_{i_k}}{q_{i_0} p(0 \vert i_k)}~~~& i_k=j\\
0 ~~~& i_k \neq j
\end{cases}
\end{align*}
\ARROW The expected value:
\begin{align*}
E \lbrace Y_j (\gamma) \rbrace=\sum_{ {\rm trajectories}} \frac{a_j}{q_{i_0}p(0 \vert i_k) } P(i_1,i_2,...,i_k,0),
\end{align*}
where $P(i_1,i_2,...,i_k,0)$ is the probability of this trajectory occurring.\\
\ARROW But by our definition the probability:
\begin{align*}
P(i_0,i_1,...,i_{k-1},j,0)=q_{0}h_{i_1,i_0}...h_{k,i_{k-1}}p(0 \vert j)
\end{align*}
\ARROW In the end we get:
\begin{align*}
E(Y_j(\gamma))=\sum_{k=0}^{\infty} \sum_{i_{k-1}=1}^n ... \sum_{i_{1}=1}^n \sum_{i_{0}=1}^n h_{j,i_{k-1}} h_{j,i_{k-1}} ... h_{i_2,i_1} h_{i_1,i_0} a_{i_0}  
\end{align*}


\end{footnotesize}
\end{frame}
\fi
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam dual method}
\begin{itemize}
\item Let's try to solve the equation system:
\end{itemize}
\begin{equation}
\overrightarrow{x} = 
\left(\begin{array}{c}
 1.5  \\
-1.0\\
0.7  \end{array} \right) 
+
\left(\begin{array}{ccc}
0.2 &  0.3 & 0.1  \\
0.4 &  0.3 & 0.2 \\
0.1 &  0.1 & 0.1  \end{array} \right) \overrightarrow{x}
 \nonumber
\end{equation}
\begin{itemize}
\item The solution is: $\overrightarrow{x}_0 = (2.0, 0.0, 1.0)$.
\item Let's put the initial probability as constant:
\end{itemize}
\begin{equation}
q_1=q_2=q_3=\dfrac{1}{3}  \nonumber
\end{equation}
\begin{columns}

\column{0.1in}

\column{2.5in}
\begin{itemize}
\item The propability matrix $h_{ij}$ has the shape:
\end{itemize}
\begin{tabular}{|c|cccc|}
\hline
$i/j$ &  1 & 2 & 3 & 4  \\ \hline
1 & 0.2 & 0.4 & 0.1 & 0.3 \\
2 & 0.3 & 0.3 & 0.1 & 0.3 \\
3 & 0.1 & 0.2 & 0.1 & 0.6 \\ \hline
\end{tabular}

\column{2.5in}
\begin{itemize}
\item An example solution:
\end{itemize}
\includegraphics[width=0.95\textwidth]{images/mark2.png}

\end{columns}
\end{frame}




\begin{frame}\frametitle{Generalization, the algorithm}
\ARROW We set the $P$ matrix in a arbitrary way.\\
\ARROW If in the $t$ moment the point is in the $i_t$ state, then with the probability $p_{i_t, i_{t+1}}$ he can go to $i_{t+1}$ state. \\
\ARROW We stop the walk once we reach $0$.\\
\ARROW For the given trajectory we assign the value: $X(\gamma_k)$\\
\ARROW We repeat the procedure $N$ times and take the mean and RMS.\\
\ARROW We repeat this also for every of the $\overrightarrow{x}^0$ vector components.



\end{frame}


\iffalse
\begin{frame}\frametitle{Wasow method}

\begin{footnotesize}

\ARROW The main problem with the Neumann-Ulam methods is the fact that each time we estimate only one of the part of the taylor expansion.\\
\ARROW W.Wasow (1956) was smarter:
\begin{itemize}

\item For the trajectory: $\gamma(i_0,i_1,...,i_k,0)$ we look trajectories:
\begin{align*}
(i_0),~(i_0,i_1),~(i_0,i_1,...,i_k)
\end{align*}
and for each we associate a number:
\begin{align*}
(i_0,i_1,i_2,...,i_m),~0 \leq m \leq k
\end{align*}
we assign a number:
\begin{align*}
\nu_{i_0,i_1} \nu_{i_1,i_2}...\nu_{i_{m-1},i_m}a_{i_m}
\end{align*}

\end{itemize}
\ARROW For the trajectory we define:
\begin{align*}
X^{\ast}(\gamma)=\sum_{m=0}^k \nu_{i_1,i_2}...\nu_{i_{m-1},i_m}a_{i_m}
\end{align*}
\ARROW One can proof that:
\begin{align*}
E \lbrace X^{\ast}(\gamma) \vert i_0=i \rbrace =x_i^0
\end{align*}

\end{footnotesize}

\end{frame}

\fi


\begin{frame}\frametitle{Partial differential equations, intro}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW Let's say we are want to describe a point that walks on the  $\mathbb{R}$ axis:
\begin{itemize}
\item At the beginning $(t=0)$ the particle is at $x=0$
\item If in the $t$ the particle is in the $x$ then in the time $t+1$ it walks to $x+1$ with the known probability $p$ and to the point $x-1$ with the probability $q=1-p$. 
\item The moves are independent.
\end{itemize}
\ARROW So let's try to described the motion of the particle. \\
\ARROW The solution is clearly a probabilistic problem. Let $\nu(x,t)$ be a probability that at time $t$ particle is in position $x$. We get the following equation:
\begin{align*}
\nu(x,t+1)=p \nu(x-1,t)+q \nu(x+1,t)
\end{align*}
with the initial conditions:
\begin{align*}
\nu(0,0)=1,~~~~~\nu(x,0)=0~~{\rm if~}x \neq 0.
\end{align*}
\ARROW The above functions describes the whole system (every $(t,x)$ point).
\end{footnotesize}

\end{minipage}

\end{frame}



\begin{frame}\frametitle{Partial differential equations, intro}
\begin{minipage}{\textwidth}
\begin{tiny}
\ARROW Now in differential equation language we would say that the particle walks in steps of $\Delta x$ in times: $k\Delta t$, $k=1,2,3....$:
\begin{align*}
\nu(x,t+\Delta t)=p\nu(x-\Delta x,t)+q\nu(x+\Delta x,t).
\end{align*}
\ARROW To solve this equation we need to expand the $\nu(x,t)$ funciton in the Taylor series:
\begin{align*}
\nu(x,t) + \frac{\partial \nu(x,t)}{\partial t} \Delta t = p \nu(x,t) - p \frac{\partial\nu(x,t) }{\partial x} \Delta x + \frac{1}{2} p \frac{\partial^2 \nu(x,t)}{\partial x^2} (\Delta x)^2\\ + q \nu(x,t) + q \frac{\partial\nu(x,t) }{\partial x} \Delta x + \frac{1}{2} q \frac{\partial^2 \nu(x,t)}{\partial x^2} (\Delta x)^2
\end{align*}
\ARROW From which we get:
\begin{align*}
\frac{\partial \nu(x,t)}{\partial t} \Delta t = -(p-q) \frac{\partial \nu(x,t) }{\partial x}\Delta x + \frac{1}{2} \frac{\partial^2 \nu(x,t) }{\partial x^2}(\Delta x)^2
\end{align*}
\ARROW Now We divide the equation by $\Delta t$ and take the $\Delta t \to 0$:
\begin{align*}
(p-q) \frac{\Delta x }{\Delta t}  \to 2 c,~~~~~~\frac{ (\Delta x)^2}{\Delta t } \to 2D,
\end{align*}
\ARROW We get the Fokker-Planck equation for the diffusion with current:
\begin{align*}
\frac{\partial \nu(x,t)}{\partial t } = -2c \frac{\partial \nu(x,t) }{\partial x} + D \frac{\partial^2 \nu(x,t)}{\partial x^2}
\end{align*}
\ARROW The $D$ is the diffusion coefficient, $c$ is the speed of current. For $c=0$ it is a symmetric distribution.

\end{tiny}

\end{minipage}

\end{frame}






\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW The aforementioned example shows the way to solve the partial differential equation using Markov Chain MC. \\
\ARROW We will see how different classes of partial differential equations can be approximated with a Markov Chain MC, whose expectation value is the solution of the equation.
\ARROW The Laplace equation:
\begin{align*}
\frac{\partial^2 u  }{\partial x_1^2 } +\frac{\partial^2 u  }{\partial x_2^2 }+...+\frac{\partial^2 u  }{\partial x_k^2 }=0
\end{align*}
The $u(x_1,x_2,...,x_k)$ function that is a solution of above equation we call harmonic function. If one knows the values of the harmonic function on the edges  $\Gamma(D)$ of the $D$ domain one can solve the equation.\\
\begin{exampleblock}{The Dirichlet boundary conditions:}
Find the values of $u(x_1,x_2,...,x_k)$ inside the $D$ domain knowing the values of the edge are given with a function:
\begin{align*}
u(x_1,x_2,...,x_k)=f(x_1,x_2,...,x_k) \in \Gamma(D)
\end{align*}
\end{exampleblock}
\ARROW Now I am lazy so I put $k=2$ but it's the same for all k!

\end{footnotesize}

\end{minipage}

\end{frame}




\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\begin{columns}
\column{0.1in}
{~}\\
\column{3in}
\ARROW We will put the Dirichlet boundary condition as a discrete condition:\\
\begin{itemize}
\item The domain $D$ we put a lattice with distance $h$.
\item Some points we treat as inside {\color{green}(denoted with circles)}. Their form a set denoted $D^{\ast}$.
\item The other points we consider as the boundary points and they form a set $\Gamma(D)$.
\end{itemize}

\column{2in}
\begin{center}
\includegraphics[width=0.95\textwidth]{images/dir.png}
\end{center}

\end{columns}
\ARROW We express the second derivatives with the discrete form:
\begin{align*}
\frac{ \frac{u(x+h)-u(x)}{h} -\frac{u(x)-u(x-h) }{h}   }{h} = \frac{u(x+h)-2u(x)+u(x-h)}{h^2}
\end{align*}
\ARROW Now we choose the units so $h=1$.

\end{footnotesize}
\end{minipage}
\end{frame}


\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\begin{exampleblock}{The Dirichlet condition in the discrete form:}
Find the $u^{\ast}$ function which obeys the differential equation:
\begin{align*}
U^{\ast}(x,y)=\frac{1}{4}\left[ u^{\ast}(x-1,y)+u^{\ast}(x+1,y)+u^{\ast}(x,y-1)+u^{\ast}(x,y+1) \right]
\end{align*}
in all points $(x,y) \in D^{\ast}$ with the condition:
\begin{align*}
u^{\ast}(x,y)=f^{\ast}(x,y),~~~(x,y) \in \Gamma(D^{\ast})
\end{align*}
where $f^{\ast}(x,y)$ is the discrete equivalent of $f(x,y)$ function.
\end{exampleblock}
\ARROW We consider a random walk over the lattice $D^{\ast} \cup \Gamma(D^{\ast})$.
\begin{itemize}
\item In the $t=0$ we are in some point $(\xi,\eta) \in D^{\ast})$
\item If at the $t$ the particle is in $(x,y)$ then at $t+1$ it can go with equal probability to any of the four neighbour lattices: $(x-1,y)$, $(x+1,y)$, $(x,y-1)$, $(x,y+1)$.
\item If the particle at some moment gets to the edge $\Gamma(D^{\ast}$ then the walk is terminated.
\item For the particle trajectory we assign the value of: $\nu(\xi,\eta)=f^{\ast}(x,y)$, where $(x,y)\in \Gamma(D^{\ast})$.
\end{itemize}
\end{footnotesize}
\end{minipage}
\end{frame}




\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW Let $p_{\xi,\eta}(x,y)$ be the probability of particle walk that starting in $(\xi,\eta)$ to end the walk in $(x,y)$.\\
\ARROW The possibilities:
\begin{enumerate}
\item The point $(\xi,\eta) \in \Gamma(D^{\ast})$. Then:
\begin{align}
p_{\xi,\eta}(x,y)=\begin{cases}
1,~~(x,y)=\xi,\eta)\\
0,~~(x,y)\neq  \xi,\eta)
\end{cases}\label{eq:trivial}
\end{align}
\item The point $(\xi,\eta) \in D^{\ast}$:
\begin{align}
p_{\xi,\eta}(x,y) = \frac{1}{4}\left[ p_{\xi-1,\eta}(x,y) + p_{\xi+1,\eta}(x,y)+  p_{\xi,\eta-1}(x,y)+ p_{\xi,\eta+1}(x,y) \right]
\label{eq:1}
\end{align}


\end{enumerate}
this is because to get to $(x,y)$ the particle has to walk through one of the neighbours: $(x-1,y)$, $(x+1,y)$, $(x,y-1)$, $(x,y+1)$.\\
\ARROW The expected value of the $\nu(\xi,\eta)$ is given by equation:
\begin{align}
E(\xi,\eta)=\sum_{(x,y)\in \Gamma^{\ast}} p_{\xi,\eta}(x,y) f^{\ast}(x,y)\label{eq:2}
\end{align}
where the summing is over all boundary points
\end{footnotesize}
\end{minipage}
\end{frame}





\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW Now multiplying the \ref{eq:1} by $f^{\ast}(x,y)$ and summing over all edge points $(x,y)$:
\begin{align*}
E(\xi,\eta)=\frac{1}{4}\left[ E(\xi-1,\eta) +  E(\xi+1,\eta) + E(\xi,\eta-1) +  E(\xi,\eta+1) \right]
\end{align*}
\ARROW Putting now \ref{eq:trivial} to \ref{eq:2} one gets:
\begin{align*}
E(x,y)=f^{\ast}(x,y),~~(\xi,\eta) \in \Gamma(D^{\ast})
\end{align*}
\ARROW Now the expected value solves identical equation as our $u^{\ast}(x,y)$ function. From this we conclude:
\begin{align*}
E(x,y)=u^{\ast}(x,y)
\end{align*}
\ARROW The algorithm:
\begin{itemize}
\item We put a particle in $(x,y)$.
\item We observe it's walk up to the moment when it's on the edge $\Gamma(D^{\ast})$.
\item We calculate the value of $f^{\ast}$ function in the point where the particle stops.
\item Repeat the walk $N$ times taking the average afterwards.
\end{itemize}
\begin{alertblock}{Important:}
One can show the the error does not depend on the dimensions!
\end{alertblock}

\end{footnotesize}
\end{minipage}
\end{frame}





\backupbegin   

\begin{frame}\frametitle{Backup}


\end{frame}

\backupend			

\end{document}