Newer
Older
Lecture_repo / Lectures_my / MC_2016 / Lecture9 / mchrzasz.tex
\documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer}

\usepackage[english]{babel} 
\usepackage{polski}         
\usepackage[skins,theorems]{tcolorbox}
\tcbset{highlight math style={enhanced,
  colframe=red,colback=white,arc=0pt,boxrule=1pt}}

\usetheme[
	bullet=circle,		% Other option: square
	bigpagenumber,		% circled page number on lower right
	topline=true,			% colored bar at the top of the frame 
	shadow=false,			% Shading for beamer blocks
	watermark=BG_lower,	% png file for the watermark
	]{Flip}

%\logo{\kern+1.em\includegraphics[height=1cm]{SHiP-3_LightCharcoal}}
                            

\usepackage[lf]{berenis}
\usepackage[LY1]{fontenc}
\usepackage[utf8]{inputenc}

\usepackage{emerald}
\usefonttheme{professionalfonts}
\usepackage[no-math]{fontspec}	
\usepackage{listings}
\defaultfontfeatures{Mapping=tex-text}	% This seems to be important for mapping glyphs properly

\setmainfont{Gillius ADF}			% Beamer ignores "main font" in favor of sans font
\setsansfont{Gillius ADF}			% This is the font that beamer will use by default
% \setmainfont{Gill Sans Light}		% Prettier, but harder to read

\setbeamerfont{title}{family=\fontspec{Gillius ADF}}

\input t1augie.fd

%\newcommand{\handwriting}{\fontspec{augie}} % From Emerald City, free font
%\newcommand{\handwriting}{\usefont{T1}{fau}{m}{n}} % From Emerald City, free font
% \newcommand{\handwriting}{}	% If you prefer no special handwriting font or don't have augie

%% Gill Sans doesn't look very nice when boldfaced
%% This is a hack to use Helvetica instead
%% Usage: \textbf{\forbold some stuff}
%\newcommand{\forbold}{\fontspec{Arial}}

\usepackage{graphicx}
\usepackage[export]{adjustbox}

\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{colortbl}
\usepackage{mathrsfs} 			% For Weinberg-esque letters
\usepackage{cancel}				% For "SUSY-breaking" symbol
\usepackage{slashed}            % for slashed characters in math mode
\usepackage{bbm}                % for \mathbbm{1} (unit matrix)
\usepackage{amsthm}				% For theorem environment
\usepackage{multirow}			% For multi row cells in table
\usepackage{arydshln} 			% For dashed lines in arrays and tables
\usepackage{siunitx}
\usepackage{xhfill}
\usepackage{grffile}
\usepackage{textpos}
\usepackage{subfigure}
\usepackage{tikz}
\usepackage{hyperref}
%\usepackage{hepparticles}    
\usepackage[italic]{hepparticles}     

\usepackage{hepnicenames} 

% Drawing a line
\tikzstyle{lw} = [line width=20pt]
\newcommand{\topline}{%
  \tikz[remember picture,overlay] {%
    \draw[crimsonred] ([yshift=-23.5pt]current page.north west)
             -- ([yshift=-23.5pt,xshift=\paperwidth]current page.north west);}}



% % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
\usepackage{tikzfeynman}		% For Feynman diagrams
\usetikzlibrary{arrows,shapes}
\usetikzlibrary{trees}
\usetikzlibrary{matrix,arrows} 				% For commutative diagram
% http://www.felixl.de/commu.pdf
\usetikzlibrary{positioning}				% For "above of=" commands
\usetikzlibrary{calc,through}				% For coordinates
\usetikzlibrary{decorations.pathreplacing}  % For curly braces
% http://www.math.ucla.edu/~getreuer/tikz.html
\usepackage{pgffor}							% For repeating patterns

\usetikzlibrary{decorations.pathmorphing}	% For Feynman Diagrams
\usetikzlibrary{decorations.markings}
\tikzset{
	% >=stealth', %%  Uncomment for more conventional arrows
	vector/.style={decorate, decoration={snake}, draw},
	provector/.style={decorate, decoration={snake,amplitude=2.5pt}, draw},
	antivector/.style={decorate, decoration={snake,amplitude=-2.5pt}, draw},
	fermion/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{>}}}},
	fermionbar/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{<}}}},
	fermionnoarrow/.style={draw=gray},
	gluon/.style={decorate, draw=black,
		decoration={coil,amplitude=4pt, segment length=5pt}},
	scalar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	scalarbar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{<}}}},
	scalarnoarrow/.style={dashed,draw=black},
	electron/.style={draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	bigvector/.style={decorate, decoration={snake,amplitude=4pt}, draw},
}

% TIKZ - for block diagrams, 
% from http://www.texample.net/tikz/examples/control-system-principles/
% \usetikzlibrary{shapes,arrows}
\tikzstyle{block} = [draw, rectangle, 
minimum height=3em, minimum width=6em]




\usetikzlibrary{backgrounds}
\usetikzlibrary{mindmap,trees}	% For mind map
\newcommand{\degree}{\ensuremath{^\circ}}
\newcommand{\E}{\mathrm{E}}
\newcommand{\Var}{\mathrm{Var}}
\newcommand{\Cov}{\mathrm{Cov}}
\newcommand\Ts{\rule{0pt}{2.6ex}}       % Top strut
\newcommand\Bs{\rule[-1.2ex]{0pt}{0pt}} % Bottom strut

\graphicspath{{images/}}	% Put all images in this directory. Avoids clutter.

% SOME COMMANDS THAT I FIND HANDY
% \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec
%\newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild
%\newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold
%\newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold
\newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert
\newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert
%% "\alert" is already a beamer pre-defined
\newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}%

\def\Put(#1,#2)#3{\leavevmode\makebox(0,0){\put(#1,#2){#3}}}

\usepackage{gmp}
\usepackage[final]{feynmp-auto}

\usepackage[backend=bibtex,style=numeric-comp,firstinits=true]{biblatex}
\bibliography{bib}
\setbeamertemplate{bibliography item}[text]

\makeatletter\let\frametextheight\beamer@frametextheight\makeatother

% suppress frame numbering for backup slides
% you always need the appendix for this!
\newcommand{\backupbegin}{
	\newcounter{framenumberappendix}
	\setcounter{framenumberappendix}{\value{framenumber}}
}
\newcommand{\backupend}{
	\addtocounter{framenumberappendix}{-\value{framenumber}}
	\addtocounter{framenumber}{\value{framenumberappendix}} 
}


\definecolor{links}{HTML}{2A1B81}
%\hypersetup{colorlinks,linkcolor=,urlcolor=links}

% For shapo's formulas:

% For shapo's formulas:
\def\lsi{\raise0.3ex\hbox{$<$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\def\gsi{\raise0.3ex\hbox{$>$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\newcommand{\lsim}{\mathop{\lsi}}
\newcommand{\gsim}{\mathop{\gsi}}
\newcommand{\wt}{\widetilde}
%\newcommand{\ol}{\overline}
\newcommand{\Tr}{\rm{Tr}}
\newcommand{\tr}{\rm{tr}}
\newcommand{\eqn}[1]{&\hspace{-0.7em}#1\hspace{-0.7em}&}
\newcommand{\vev}[1]{\rm{$\langle #1 \rangle$}}
\newcommand{\abs}[1]{\rm{$\left| #1 \right|$}}
\newcommand{\eV}{\rm{eV}}
\newcommand{\keV}{\rm{keV}}
\newcommand{\GeV}{\rm{GeV}}
\newcommand{\im}{\rm{Im}}
\newcommand{\disp}{\displaystyle}
\def\be{\begin{equation}}
\def\ee{\end{equation}}
\def\ba{\begin{eqnarray}}
\def\ea{\end{eqnarray}}
\def\d{\partial}
\def\l{\left(}
\def\r{\right)}
\def\la{\langle}
\def\ra{\rangle}
\def\e{{\rm e}}
\def\Br{{\rm Br}}
\def\fixme{{\color{red} FIXME!}}
\def\mc{{\color{Magenta}{MC}}}
\def\pdf{{\rm p.d.f.}}
\def\cdf{{\rm c.d.f.}}
\def\ARROW{{\color{JungleGreen}{$\Rrightarrow$}}\xspace}  
\def\ARROWR{{\color{WildStrawberry}{$\Rrightarrow$}}\xspace}  

\author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz} (Universit\"{a}t Z\"{u}rich)}
\institute{UZH}
\title[Specific \pdf~generation]{Specific \pdf~generation}
\date{\fixme}
\newcommand*{\QEDA}{\hfill\ensuremath{\blacksquare}}%
\newcommand*{\QEDB}{\hfill\ensuremath{\square}}%

\author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz} (Universit\"{a}t Z\"{u}rich)}
\institute{UZH}
\title[Matrix inversion and Partial Differential Equation Solving]{Matrix inversion and Partial Differential Equation Solving}
\date{\fixme}


\begin{document}
\tikzstyle{every picture}+=[remember picture]

{
\setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}}
\begin{frame}[c]%{\phantom{title page}} 
\begin{center}
\begin{center}
	\begin{columns}
		\begin{column}{0.9\textwidth}
			\flushright\fontspec{Trebuchet MS}\bfseries \Huge {Matrix inversion and Partial Differential Equation Solving}
		\end{column}
		\begin{column}{0.2\textwidth}
		  %\includegraphics[width=\textwidth]{SHiP-2}
		\end{column}
	\end{columns}
\end{center}
	\quad
	\vspace{3em}
\begin{columns}
\begin{column}{0.44\textwidth}
\flushright \vspace{-1.8em} {\fontspec{Trebuchet MS} \Large Marcin ChrzÄ…szcz\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}}

\end{column}
\begin{column}{0.53\textwidth}
\includegraphics[height=1.3cm]{uzh-transp}
\end{column}
\end{columns}

\vspace{1em}
%		\footnotesize\textcolor{gray}{With N. Serra, B. Storaci\\Thanks to the theory support from M. Shaposhnikov, D. Gorbunov}\normalsize\\
\vspace{0.5em}
	\textcolor{normal text.fg!50!Comment}{Monte Carlo methods, \\ 28 April, 2016}
\end{center}
\end{frame}
}

\begin{frame}\frametitle{Announcement}

\begin{Large}
There will be no lectures and class on 19$^{th}$ of May
\end{Large}

\end{frame}


\begin{frame}\frametitle{Matrix inversion}
\begin{minipage}{\textwidth}
\begin{footnotesize}

\ARROW The last time we discussed the method of linear equations solving. The same methods can be used for matrix inversions! The columns of inverse matrix can be found solving:
\begin{align*}
\textbf{A}\overrightarrow{x}= \hat{e}_i,~~~i=1,2,...,n
\end{align*}
%where $\hat{e}_i$ is the $i^{th}$ versor. \\
\ARROW In order to determine the inverse of a matrix $\textbf{A}$ we need to choose a temprorary matrix $\textbf{M}$ such that:
\begin{align*}
\textbf{H}=\textbf{I}-\textbf{M}\textbf{A}
\end{align*}
with the normalization condition:
\begin{align*}
\Vert \textbf{H} \Vert = \max_{1 \leq i \leq n} \sum_{j=1}^n \vert h_{ij} \vert < 1 
\end{align*}
where $\textbf{I}$ is a unit matrix.\\
\ARROW Next we Neumann expand the $(\textbf{MA})^{-1}$ matrix:
\begin{align*}
(\textbf{MA})^{-1}=(\textbf{I}-\textbf{H})^{-1}=\textbf{I}+\textbf{H}   
+\textbf{H}^2+....
\end{align*} 
\ARROW The inverse matrix we get from the equation:
\begin{align*}
\textbf{A}^{-1}=\textbf{A}^{-1} \textbf{M}^{-1} \textbf{M}=(\textbf{MA})^{-1}\textbf{M}
\end{align*}


\end{footnotesize}

\end{minipage}

\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{frame}\frametitle{Matrix inversion, basic method}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW For the $(i,j)$ element of the matrix $(MA)^{-1}$ we have:
\begin{align*}
(MA)^{-1}_{ij}  = \delta_{ij} + h_{ij} + \sum_{i_1=1}^n h_{i i_1} h_{i_1 j} + \sum_{i_1=1}^n \sum_{i_2=1}^n  h_{i i_1} h_{i_1 i_2}h_{i_2 j} + ...
\end{align*}
\ARROW The algorithm:
We choose freely a probability matrix $P=(p_{ij})$ with the conditions:
\begin{align*}
p_{i,j}\geq 0,~~~~ p_{ij}=0 \Leftrightarrow h_{ij}=0,~~~~p_{i,0}=1-\sum_{j=1}^np_{ij} >0
\end{align*}
\ARROW We construct a random walk for the state set $\lbrace 0,1,2,3...,n \rbrace$:
\begin{enumerate}
\item In the initial moment $(t=0)$ we start in the state $i_0=i$.
\item If in the moment $t$ the point is in the $i_t$ state, then in the time $t+1$ he will be in state $i_{t+1}$ with the probability $p_{i_t,t_{t+1}}$.
\item We stop the walk if we end up in the state $0$.
\end{enumerate}
\end{footnotesize}

\end{minipage}

\end{frame}


\begin{frame}\frametitle{Matrix inversion, basic method}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW For the observed trajectory $\gamma_k=(i,i_1,..,j_k,0)$ we assign the value of:
\begin{align*}
X(\gamma_k)=\frac{ h_{ii_1}  h_{i_1 i_2}... h_{i_{k-1} i_k} ~~ \delta_{i_k j }}{ p_{ii_1}  p_{i_1 i_2}... p_{i_{k-1} i_k} ~~p_{i_k 0} }
\end{align*}
\ARROW The mean is the of all observed $X(\gamma_k)$ is an unbiased estimator of the $(MA)^{-1}_{ij}$.\\
\begin{exampleblock}{Prove:}
\begin{itemize}
\item The probability of observing the $\gamma_k$ trajectory:
\begin{align*}
P(\gamma_k) = p_{i i_1} p_{i_1 i_2}... p_{i_{k-1} i_k} p_{i_k 0}
\end{align*}
\item Form this point we follow the prove of the previous lecture (Neumann-Ulan) and prove that:
\begin{align*}
E \lbrace X(\gamma_k) \rbrace = (MA)^{-1}
\end{align*}

\end{itemize}
\end{exampleblock}
\ARROW A different estimator for the $(MA)^{-1}_{ij}$ element is the Wasow estimator:
\begin{align*}
X^{\ast} (\gamma_k) = \sum_{m=0}^k \frac{ h_{ii_1}  h_{i_1 i_2}... h_{i_{m-1} i_m} } { p_{ii_1}  p_{i_1 i_2}... p_{i_{m-1} i_m}  } \delta_{i_m j}
\end{align*}


\end{footnotesize}

\end{minipage}

\end{frame}



\begin{frame}\frametitle{Matrix inversion, dual method}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW On the set of states $\lbrace 0, 1, 2,...,n \rbrace$ we set a binned \pdf~
\begin{align*}
q_1,q_2,...,q_n~{ \rm such~that~}q_i>0,~i=1,2,3...n{\rm ~and~} \sum_{i=1}^n q_i =1.
\end{align*}
\ARROW Then choose arbitrary the probability matrix $P$ (usual restrictions apply):
\begin{itemize}
\item The initial point we choose with the probability ${q_i}$.
\item If in the moment $t$ the point is in the $i_t$ state, then in the time $t+1$ he will be in state $i_{t+1}$ with the probability $p_{i_t,t_{t+1}}$.
\item The walk ends when we reach $0$ state.
\item For the trajectory we assign a matrix:
\end{itemize}
\begin{align*}
Y(\gamma_k)=\frac{ h_{i_1 i}  h_{i_2 i_1}... h_{i_k i_{k-1}} }{ p_{i_1 i}  p_{i_2 i_1}... p_{i_k i_{k-1}}  } \frac{1}{q_{i_0}p_{i_k 0} } e_{i_k i_0} \in  \mathbb{R}^n \times\mathbb{R}^n 
\end{align*}
\ARROW The mean of $Y(\gamma)$ is an unbiased estimator of the $(MA)^{-1}$ matrix.\\
\ARROW The Wasow estimator reads:
\begin{align*}
Y^{\ast}=\sum_{m=0}^k  \frac{ h_{i_1 i}  h_{i_2 i_1}... h_{i_m i_{m-1}} }{ p_{i_1 i}  p_{i_2 i_1}... p_{i_m i_{m-1}}  } e_{i_m i_0} \in  \mathbb{R}^n \times\mathbb{R}^n 
\end{align*}
\end{footnotesize}

\end{minipage}

\end{frame}




\begin{frame}\frametitle{Partial differential equations, intro}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW Let's say we are want to describe a point that walks on the  $\mathbb{R}$ axis:
\begin{itemize}
\item At the beginning $(t=0)$ the particle is at $x=0$
\item If in the $t$ the particle is in the $x$ then in the time $t+1$ it walks to $x+1$ with the known probability $p$ and to the point $x-1$ with the probability $q=1-p$. 
\item The moves are independent.
\end{itemize}
\ARROW So let's try to described the motion of the particle. \\
\ARROW The solution is clearly a probabilistic problem. Let $\nu(x,t)$ be a probability that at time $t$ particle is in position $x$. We get the following equation:
\begin{align*}
\nu(x,t+1)=p \nu(x-1,t)+q \nu(x+1,t)
\end{align*}
with the initial conditions:
\begin{align*}
\nu(0,0)=1,~~~~~\nu(x,0)=0~~{\rm if~}x \neq 0.
\end{align*}
\ARROW The above functions describes the whole system (every $(t,x)$ point).
\end{footnotesize}

\end{minipage}

\end{frame}



\begin{frame}\frametitle{Partial differential equations, intro}
\begin{minipage}{\textwidth}
\begin{tiny}
\ARROW Now in differential equation language we would say that the particle walks in steps of $\Delta x$ in times: $k\Delta t$, $k=1,2,3....$:
\begin{align*}
\nu(x,t+\Delta t)=p\nu(x-\Delta x,t)+q\nu(x+\Delta x,t).
\end{align*}
\ARROW To solve this equation we need to expand the $\nu(x,t)$ funciton in the Taylor series:
\begin{align*}
\nu(x,t) + \frac{\partial \nu(x,t)}{\partial t} \Delta t = p \nu(x,t) - p \frac{\partial\nu(x,t) }{\partial x} \Delta x + \frac{1}{2} p \frac{\partial^2 \nu(x,t)}{\partial x^2} (\Delta x)^2\\ + q \nu(x,t) + q \frac{\partial\nu(x,t) }{\partial x} \Delta x + \frac{1}{2} q \frac{\partial^2 \nu(x,t)}{\partial x^2} (\Delta x)^2
\end{align*}
\ARROW From which we get:
\begin{align*}
\frac{\partial \nu(x,t)}{\partial t} \Delta t = -(p-q) \frac{\partial \nu(x,t) }{\partial x}\Delta x + \frac{1}{2} \frac{\partial^2 \nu(x,t) }{\partial x^2}(\Delta x)^2
\end{align*}
\ARROW Now We divide the equation by $\Delta t$ and take the $\Delta t \to 0$:
\begin{align*}
(p-q) \frac{\Delta x }{\Delta t}  \to 2 c,~~~~~~\frac{ (\Delta x)^2}{\Delta t } \to 2D,
\end{align*}
\ARROW We get the Fokker-Planck equation for the diffusion with current:
\begin{align*}
\frac{\partial \nu(x,t)}{\partial t } = -2c \frac{\partial \nu(x,t) }{\partial x} + D \frac{\partial^2 \nu(x,t)}{\partial x^2}
\end{align*}
\ARROW The $D$ is the diffusion coefficient, $c$ is the speed of current. For $c=0$ it is a symmetric distribution.

\end{tiny}

\end{minipage}

\end{frame}






\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW The aforementioned example shows the way to solve the partial differential equation using Markov Chain MC. \\
\ARROW We will see how different classes of partial differential equations can be approximated with a Markov Chain MC, whose expectation value is the solution of the equation.
\ARROW The Laplace equation:
\begin{align*}
\frac{\partial^2 u  }{\partial x_1^2 } +\frac{\partial^2 u  }{\partial x_2^2 }+...+\frac{\partial^2 u  }{\partial x_k^2 }=0
\end{align*}
The $u(x_1,x_2,...,x_k)$ function that is a solution of above equation we call harmonic function. If one knows the values of the harmonic function on the edges  $\Gamma(D)$ of the $D$ domain one can solve the equation.\\
\begin{exampleblock}{The Dirichlet boundary conditions:}
Find the values of $u(x_1,x_2,...,x_k)$ inside the $D$ domain knowing the values of the edge are given with a function:
\begin{align*}
u(x_1,x_2,...,x_k)=f(x_1,x_2,...,x_k) \in \Gamma(D)
\end{align*}
\end{exampleblock}
\ARROW Now I am lazy so I put $k=2$ but it's the same for all k!

\end{footnotesize}

\end{minipage}

\end{frame}




\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\begin{columns}
\column{0.1in}
{~}\\
\column{3in}
\ARROW We will put the Dirichlet boundary condition as a discrete condition:\\
\begin{itemize}
\item The domain $D$ we put a lattice with distance $h$.
\item Some points we treat as inside {\color{green}(denoted with circles)}. Their form a set denoted $D^{\ast}$.
\item The other points we consider as the boundary points and they form a set $\Gamma(D)$.
\end{itemize}

\column{2in}
\begin{center}
\includegraphics[width=0.95\textwidth]{images/dir.png}
\end{center}

\end{columns}
\ARROW We express the second derivatives with the discrete form:
\begin{align*}
\frac{ \frac{u(x+h)-u(x)}{h} -\frac{u(x)-u(x-h) }{h}   }{h} = \frac{u(x+h)-2u(x)+u(x-h)}{h^2}
\end{align*}
\ARROW Now we choose the units so $h=1$.

\end{footnotesize}
\end{minipage}
\end{frame}


\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\begin{exampleblock}{The Dirichlet condition in the discrete form:}
Find the $u^{\ast}$ function which obeys the differential equation:
\begin{align*}
U^{\ast}(x,y)=\frac{1}{4}\left[ u^{\ast}(x-1,y)+u^{\ast}(x+1,y)+u^{\ast}(x,y-1)+u^{\ast}(x,y+1) \right]
\end{align*}
in all points $(x,y) \in D^{\ast}$ with the condition:
\begin{align*}
u^{\ast}(x,y)=f^{\ast}(x,y),~~~(x,y) \in \Gamma(D^{\ast})
\end{align*}
where $f^{\ast}(x,y)$ is the discrete equivalent of $f(x,y)$ function.
\end{exampleblock}
\ARROW We consider a random walk over the lattice $D^{\ast} \cup \Gamma(D^{\ast})$.
\begin{itemize}
\item In the $t=0$ we are in some point $(\xi,\eta) \in D^{\ast})$
\item If at the $t$ the particle is in $(x,y)$ then at $t+1$ it can go with equal probability to any of the four neighbour lattices: $(x-1,y)$, $(x+1,y)$, $(x,y-1)$, $(x,y+1)$.
\item If the particle at some moment gets to the edge $\Gamma(D^{\ast}$ then the walk is terminated.
\item For the particle trajectory we assign the value of: $\nu(\xi,\eta)=f^{\ast}(x,y)$, where $(x,y)\in \Gamma(D^{\ast})$.
\end{itemize}
\end{footnotesize}
\end{minipage}
\end{frame}




\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW Let $p_{\xi,\eta}(x,y)$ be the probability of particle walk that starting in $(\xi,\eta)$ to end the walk in $(x,y)$.\\
\ARROW The possibilities:
\begin{enumerate}
\item The point $(\xi,\eta) \in \Gamma(D^{\ast})$. Then:
\begin{align}
p_{\xi,\eta}(x,y)=\begin{cases}
1,~~(x,y)=\xi,\eta)\\
0,~~(x,y)\neq  \xi,\eta)
\end{cases}\label{eq:trivial}
\end{align}
\item The point $(\xi,\eta) \in D^{\ast}$:
\begin{align}
p_{\xi,\eta}(x,y) = \frac{1}{4}\left[ p_{\xi-1,\eta}(x,y) + p_{\xi+1,\eta}(x,y)+  p_{\xi,\eta-1}(x,y)+ p_{\xi,\eta+1}(x,y) \right]
\label{eq:1}
\end{align}


\end{enumerate}
this is because to get to $(x,y)$ the particle has to walk through one of the neighbours: $(x-1,y)$, $(x+1,y)$, $(x,y-1)$, $(x,y+1)$.\\
\ARROW The expected value of the $\nu(\xi,\eta)$ is given by equation:
\begin{align}
E(\xi,\eta)=\sum_{(x,y)\in \Gamma^{\ast}} p_{\xi,\eta}(x,y) f^{\ast}(x,y)\label{eq:2}
\end{align}
where the summing is over all boundary points
\end{footnotesize}
\end{minipage}
\end{frame}





\begin{frame}\frametitle{Laplace equation, Dirichlet boundary conditions}
\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW Now multiplying the \ref{eq:1} by $f^{\ast}(x,y)$ and summing over all edge points $(x,y)$:
\begin{align*}
E(\xi,\eta)=\frac{1}{4}\left[ E(\xi-1,\eta) +  E(\xi+1,\eta) + E(\xi,\eta-1) +  E(\xi,\eta+1) \right]
\end{align*}
\ARROW Putting now \ref{eq:trivial} to \ref{eq:2} one gets:
\begin{align*}
E(x,y)=f^{\ast}(x,y),~~(\xi,\eta) \in \Gamma(D^{\ast})
\end{align*}
\ARROW Now the expected value solves identical equation as our $u^{\ast}(x,y)$ function. From this we conclude:
\begin{align*}
E(x,y)=u^{\ast}(x,y)
\end{align*}
\ARROW The algorithm:
\begin{itemize}
\item We put a particle in $(x,y)$.
\item We observe it's walk up to the moment when it's on the edge $\Gamma(D^{\ast})$.
\item We calculate the value of $f^{\ast}$ function in the point where the particle stops.
\item Repeat the walk $N$ times taking the average afterwards.
\end{itemize}
\begin{alertblock}{Important:}
One can show the the error does not depend on the dimensions!
\end{alertblock}

\end{footnotesize}
\end{minipage}
\end{frame}



\begin{frame}\frametitle{Example}

\begin{minipage}{\textwidth}
\begin{footnotesize}
Let function $u(x,y)$ be a solution of Laplace equation  in the square: $0 \leq x,y \leq 4$ with the boundary conditions:
\begin{align*}
u(x,0)=0,~~~u(4,y)=y,~~~u(x,4)=x,~~~x(0,y)=0
\end{align*}
\ARROWR Find the $u(2,2)$!\\
\ARROW The exact solution: $u(x,y)=xy/4$ so $u(2,2)=1$.
\begin{columns}
\column{0.1in}
{~}\\
\column{3in}
\begin{itemize}
\item We transform the continues problem to a discrete one with $h=1$. 
\item Perform a random walk starting from $(2,2)$ which ends on the edge assigning as a result the appropriative values of the edge conditions as an outcome.
\end{itemize}

\column{2in}
\begin{center}
\includegraphics[width=0.95\textwidth]{images/problem1.png}
\end{center}

\end{columns}
\ARROW E9.1 Implement the above example and find $u(2,2)$.

\end{footnotesize}
\end{minipage}
\end{frame}



\begin{frame}\frametitle{Parabolic equation}

\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW We are looking for a function $u(x_1,x_2,...,x_k,t)$, which inside the $D \subset \mathbb{R}^k$ obeys the parabolic equation:
\begin{align*}
\frac{\partial^2 u  }{\partial x_1^2 } +\frac{\partial^2 u  }{\partial x_2^2 }+...+\frac{\partial^2 u  }{\partial x_k^2 }=c \frac{\partial u}{\partial t}
\end{align*}
with the boundary conditions:
\begin{align*}
u(x_1,x_2,...,x_k,t)=g(x_1,x_2,...,x_k,t),~~~(x_1,x_2,x_3,...,x_k)\in \Gamma(D)
\end{align*}
and with the initial conditions:
\begin{align*}
u(x_1,x_2,...,x_k,0)=h(x_1,x_2,...,x_k,t),~~~(x_1,x_2,x_3,...,x_k)\in D
\end{align*}
\ARROW In the general case the boundary conditions might have also the derivatives. \\
\ARROW We will find the solution to the above problem using random walk starting from 1-dim case and then generalize it for n-dim.
\end{footnotesize}
\end{minipage}
\end{frame}


\begin{frame}\frametitle{Parabolic equation, 1-dim}

\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW We are looking for a function $u(x,t)$, which satisfies the equation:
\begin{align*}
\frac{\partial^2 u  }{\partial x^2 } = c \frac{\partial u}{\partial t}
\end{align*}
with the boundary conditions:
\begin{align*}
u(0,t)=f_1(t),~~u(a,t)=f_2(t)
\end{align*}
and with the initial conditions:
\begin{align*}
u(x,0)=g(x).
\end{align*}
\ARROW The above equation can be seen as describing the temperature of a line with time. We know the initial temperature in different points and we know that the temperature on the end points is know.\\
\ARROW The above problem can be discreteized:
\begin{align*}
x=kh,~~h=\frac{a}{n},~k=1,2,...n~~~~~~~~t=jl,~j=0,1,2,3...,~l={\rm const}
\end{align*}
\ARROW The differential equation:
\begin{align*}
\frac{u(x+h,t-l)   -2u(x,t-l)+u(x-h,t-l}{h^2})=c \frac{u(x,t)-u(x,t-l)}{l}
\end{align*}


\end{footnotesize}
\end{minipage}
\end{frame}




\begin{frame}\frametitle{Parabolic equation, 1-dim}

\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW The steps we choose such that: $c h^2 = 2l$.\\
\ARROW Then we obtain the equation:\\
\begin{align*}
u(x,t)=\frac{1}{2}u(x+h,t-l)+\frac{1}{2}u(x-h,t-l)
\end{align*}
\ARROW The value of function $u$ in the point $x$ and $t$ can be evaluated with the arithmetic mean form points: $x+h$ and $x-h$ in the previous time step.
\ARROW The algorithm estimating the function in the time $\tau$ and point $\xi$:
\begin{itemize}
\item The particle we put in the point $\xi$ and a ''weight'' equal $\tau$. 
\item If in a given time step $t$ particle is at $x$ then with $50:50$ chances it can go to $x-h$ or $x+h$ and time $t-l$.
\item The particle ends the walk in two situations:
\begin{itemize}
\item If it reaches the $x=0$ or $x=a$. In this case we  assign to a given trajectory a value of $f_1(t)$ or $f_2(t)$, where $t$ is the actuall ''weight''.
\item If the ''weight'' of the particle is equal zero. in this case we assign as a value of the trajectory the $g(x)$, where $x$ is the actual position of the particle.
\end{itemize} 
\end{itemize}

\end{footnotesize}
\end{minipage}
\end{frame}



\begin{frame}\frametitle{Parabolic equation, 1-dim}

\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW Repeat the above procedure $N$ times. The expected value of a function $u$ in $(\xi,\tau)$ point is the mean of observed values.

\begin{exampleblock}{Digression:}
The 1-dim calse can be treated as a 2-dim $(x,t)$, where the area is unbounded in the $t$ dimension. The walk is terminated after maximum $\tau/l$ steps.

\end{exampleblock}
\begin{center}
\includegraphics[width=0.6\textwidth]{images/par.png}
\end{center}

\end{footnotesize}
\end{minipage}
\end{frame}





\begin{frame}\frametitle{Parabolic equation, 1-dim}

\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW Repeat the above procedure $N$ times. The expected value of a function $u$ in $(\xi,\tau)$ point is the mean of observed values.

\begin{exampleblock}{Digresion:}
The 1-dim calse can be treated as a 2-dim $(x,t)$, where the area is unbounded in the $t$ dimension. The walk is terminated after maximum $\tau/l$ steps.

\end{exampleblock}
\begin{center}
\includegraphics[width=0.6\textwidth]{images/par.png}
\end{center}

\end{footnotesize}
\end{minipage}
\end{frame}


\begin{frame}\frametitle{Parabolic equation, n-dim generalization}

\begin{minipage}{\textwidth}
\begin{footnotesize}
\ARROW We still choose the $k$ and $l$ values accordingly to:
\begin{align*}
\frac{ch^2}{l}=2k
\end{align*}
where $k$ is the number of space dimensions.\\
\ARROW We get:
\begin{align*}
u(x_1,x_2,...,x_k)=\frac{1}{2k} \lbrace u(x_1+h,x_2,..,x_k,t-l) - u(x_1-h,x_2,..,x_k,t-l) \\ +...+u(x_1,x_2,..,x_k+h,t-l)+u(x_1,x_2,..,x_k-h,t-l) \rbrace
\end{align*}
\ARROW The k dimension problem we can solve in he same way as 1dim.\\
\ARROW In each point we have $2k$ possibility to move(left-right) in each of the dimensions. The probability has to be $\frac{1}{2k}$.


\end{footnotesize}
\end{minipage}
\end{frame}




\backupbegin   

\begin{frame}\frametitle{Backup}


\end{frame}

\backupend			

\end{document}