Newer
Older
Lecture_repo / Lectures_my / MC_2016 / Lecture3 / mchrzasz.tex
@mchrzasz mchrzasz on 3 Jun 2016 41 KB fixed typo
\documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer}

\usepackage[english]{babel} 
\usepackage{polski}         
\usepackage[skins,theorems]{tcolorbox}
\tcbset{highlight math style={enhanced,
  colframe=red,colback=white,arc=0pt,boxrule=1pt}}

\usetheme[
	bullet=circle,		% Other option: square
	bigpagenumber,		% circled page number on lower right
	topline=true,			% colored bar at the top of the frame 
	shadow=false,			% Shading for beamer blocks
	watermark=BG_lower,	% png file for the watermark
	]{Flip}

%\logo{\kern+1.em\includegraphics[height=1cm]{SHiP-3_LightCharcoal}}
                            

\usepackage[lf]{berenis}
\usepackage[LY1]{fontenc}
\usepackage[utf8]{inputenc}

\usepackage{emerald}
\usefonttheme{professionalfonts}
\usepackage[no-math]{fontspec}	
\usepackage{listings}
\defaultfontfeatures{Mapping=tex-text}	% This seems to be important for mapping glyphs properly

\setmainfont{Gillius ADF}			% Beamer ignores "main font" in favor of sans font
\setsansfont{Gillius ADF}			% This is the font that beamer will use by default
% \setmainfont{Gill Sans Light}		% Prettier, but harder to read

\setbeamerfont{title}{family=\fontspec{Gillius ADF}}

\input t1augie.fd

%\newcommand{\handwriting}{\fontspec{augie}} % From Emerald City, free font
%\newcommand{\handwriting}{\usefont{T1}{fau}{m}{n}} % From Emerald City, free font
% \newcommand{\handwriting}{}	% If you prefer no special handwriting font or don't have augie

%% Gill Sans doesn't look very nice when boldfaced
%% This is a hack to use Helvetica instead
%% Usage: \textbf{\forbold some stuff}
%\newcommand{\forbold}{\fontspec{Arial}}

\usepackage{graphicx}
\usepackage[export]{adjustbox}

\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{colortbl}
\usepackage{mathrsfs} 			% For Weinberg-esque letters
\usepackage{cancel}				% For "SUSY-breaking" symbol
\usepackage{slashed}            % for slashed characters in math mode
\usepackage{bbm}                % for \mathbbm{1} (unit matrix)
\usepackage{amsthm}				% For theorem environment
\usepackage{multirow}			% For multi row cells in table
\usepackage{arydshln} 			% For dashed lines in arrays and tables
\usepackage{siunitx}
\usepackage{xhfill}
\usepackage{grffile}
\usepackage{textpos}
\usepackage{subfigure}
\usepackage{tikz}
\usepackage{hyperref}
%\usepackage{hepparticles}    
\usepackage[italic]{hepparticles}     

\usepackage{hepnicenames} 

% Drawing a line
\tikzstyle{lw} = [line width=20pt]
\newcommand{\topline}{%
  \tikz[remember picture,overlay] {%
    \draw[crimsonred] ([yshift=-23.5pt]current page.north west)
             -- ([yshift=-23.5pt,xshift=\paperwidth]current page.north west);}}



% % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
\usepackage{tikzfeynman}		% For Feynman diagrams
\usetikzlibrary{arrows,shapes}
\usetikzlibrary{trees}
\usetikzlibrary{matrix,arrows} 				% For commutative diagram
% http://www.felixl.de/commu.pdf
\usetikzlibrary{positioning}				% For "above of=" commands
\usetikzlibrary{calc,through}				% For coordinates
\usetikzlibrary{decorations.pathreplacing}  % For curly braces
% http://www.math.ucla.edu/~getreuer/tikz.html
\usepackage{pgffor}							% For repeating patterns

\usetikzlibrary{decorations.pathmorphing}	% For Feynman Diagrams
\usetikzlibrary{decorations.markings}
\tikzset{
	% >=stealth', %%  Uncomment for more conventional arrows
	vector/.style={decorate, decoration={snake}, draw},
	provector/.style={decorate, decoration={snake,amplitude=2.5pt}, draw},
	antivector/.style={decorate, decoration={snake,amplitude=-2.5pt}, draw},
	fermion/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{>}}}},
	fermionbar/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{<}}}},
	fermionnoarrow/.style={draw=gray},
	gluon/.style={decorate, draw=black,
		decoration={coil,amplitude=4pt, segment length=5pt}},
	scalar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	scalarbar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{<}}}},
	scalarnoarrow/.style={dashed,draw=black},
	electron/.style={draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	bigvector/.style={decorate, decoration={snake,amplitude=4pt}, draw},
}

% TIKZ - for block diagrams, 
% from http://www.texample.net/tikz/examples/control-system-principles/
% \usetikzlibrary{shapes,arrows}
\tikzstyle{block} = [draw, rectangle, 
minimum height=3em, minimum width=6em]




\usetikzlibrary{backgrounds}
\usetikzlibrary{mindmap,trees}	% For mind map
\newcommand{\degree}{\ensuremath{^\circ}}
\newcommand{\E}{\mathrm{E}}
\newcommand{\Var}{\mathrm{Var}}
\newcommand{\Cov}{\mathrm{Cov}}
\newcommand\Ts{\rule{0pt}{2.6ex}}       % Top strut
\newcommand\Bs{\rule[-1.2ex]{0pt}{0pt}} % Bottom strut

\graphicspath{{images/}}	% Put all images in this directory. Avoids clutter.

% SOME COMMANDS THAT I FIND HANDY
% \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec
%\newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild
%\newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold
%\newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold
\newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert
\newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert
%% "\alert" is already a beamer pre-defined
\newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}%

\def\Put(#1,#2)#3{\leavevmode\makebox(0,0){\put(#1,#2){#3}}}

\usepackage{gmp}
\usepackage[final]{feynmp-auto}

\usepackage[backend=bibtex,style=numeric-comp,firstinits=true]{biblatex}
\bibliography{bib}
\setbeamertemplate{bibliography item}[text]

\makeatletter\let\frametextheight\beamer@frametextheight\makeatother

% suppress frame numbering for backup slides
% you always need the appendix for this!
\newcommand{\backupbegin}{
	\newcounter{framenumberappendix}
	\setcounter{framenumberappendix}{\value{framenumber}}
}
\newcommand{\backupend}{
	\addtocounter{framenumberappendix}{-\value{framenumber}}
	\addtocounter{framenumber}{\value{framenumberappendix}} 
}


\definecolor{links}{HTML}{2A1B81}
%\hypersetup{colorlinks,linkcolor=,urlcolor=links}

% For shapo's formulas:
\def\lsi{\raise0.3ex\hbox{$<$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\def\gsi{\raise0.3ex\hbox{$>$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\newcommand{\lsim}{\mathop{\lsi}}
\newcommand{\gsim}{\mathop{\gsi}}
\newcommand{\wt}{\widetilde}
%\newcommand{\ol}{\overline}
\newcommand{\Tr}{\rm{Tr}}
\newcommand{\tr}{\rm{tr}}
\newcommand{\eqn}[1]{&\hspace{-0.7em}#1\hspace{-0.7em}&}
\newcommand{\vev}[1]{\rm{$\langle #1 \rangle$}}
\newcommand{\abs}[1]{\rm{$\left| #1 \right|$}}
\newcommand{\eV}{\rm{eV}}
\newcommand{\keV}{\rm{keV}}
\newcommand{\GeV}{\rm{GeV}}
\newcommand{\im}{\rm{Im}}
\newcommand{\disp}{\displaystyle}
\def\be{\begin{equation}}
\def\ee{\end{equation}}
\def\ba{\begin{eqnarray}}
\def\ea{\end{eqnarray}}
\def\d{\partial}
\def\l{\left(}
\def\r{\right)}
\def\la{\langle}
\def\ra{\rangle}
\def\e{{\rm e}}
\def\Br{{\rm Br}}
\def\fixme{{\color{red} FIXME!}}
\def\mc{{\color{Magenta}{MC}}}
\def\pdf{{\rm p.d.f.}}
\def\ARROW{{\color{JungleGreen}{$\Rrightarrow$}}\xspace}   
\author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz} (Universit\"{a}t Z\"{u}rich)}
\institute{UZH}
\title[Adaptive Monte Carlo Integration Methods]{Adaptive Monte Carlo Integration Methods}
\date{\fixme}


\begin{document}
\tikzstyle{every picture}+=[remember picture]

{
\setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}}
\begin{frame}[c]%{\phantom{title page}} 
\begin{center}
\begin{center}
	\begin{columns}
		\begin{column}{0.9\textwidth}
			\flushright\fontspec{Trebuchet MS}\bfseries \Huge {Adaptive Monte Carlo Integration Methods}
		\end{column}
		\begin{column}{0.2\textwidth}
		  %\includegraphics[width=\textwidth]{SHiP-2}
		\end{column}
	\end{columns}
\end{center}
	\quad
	\vspace{3em}
\begin{columns}
\begin{column}{0.44\textwidth}
\flushright \vspace{-1.8em} {\fontspec{Trebuchet MS} \Large Marcin ChrzÄ…szcz\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}}

\end{column}
\begin{column}{0.53\textwidth}
\includegraphics[height=1.3cm]{uzh-transp}
\end{column}
\end{columns}

\vspace{1em}
%		\footnotesize\textcolor{gray}{With N. Serra, B. Storaci\\Thanks to the theory support from M. Shaposhnikov, D. Gorbunov}\normalsize\\
\vspace{0.5em}
 \textcolor{normal text.fg!50!Comment}{Monte Carlo methods, \\ 10 March, 2016}  

\end{center}
\end{frame}
}

\begin{frame}\frametitle{Classical methods of variance reduction}                                                    
\begin{footnotesize}                                                                                                 
                                                                                                                     
\ARROW In Monte Carlo methods the statistical uncertainty is defined as:                                     
\begin{align*}                                                                                                       
\sigma = \dfrac{1}{\sqrt{N}}\sqrt{V(f)}                                                                              
\end{align*}                                                                                                         
\ARROW Obvious conclusion:                                                                                   
\begin{itemize}                                                                                                      
\item To reduce the uncertainty one needs to increase $N$.\\                                                         
$\rightrightarrows$ Slow convergence. In order to reduce the error by factor of 10 one needs to simulate factor of 100 more points!                                                                                                      
\end{itemize}                                                                                                        
\ARROW How ever the other handle ($V(f)$) can be changed! $\longrightarrow$ Lot's of theoretical effort goes\
 into reducing this factor.\\                                                                                        
\ARROW We will discuss {\color{Mahogany}{four}} classical methods of variance reduction:                     
\begin{enumerate}                                                                                                    
\item Stratified sampling.                                                                                           
\item Importance sampling.                                                                                           
\item Control variates.                                                                                              
\item Antithetic variates.                                                                                           
\end{enumerate}                                                                                                      
                                                                                                                     
                                                                                                                     
\end{footnotesize}                                                                                                   
\end{frame}     




\begin{frame}\frametitle{Disadvantages of classical variance reduction methods}
\begin{footnotesize}                                                                                                 
                                                                                                                     
                                                                                                                     \ARROW All aforementioned methods(beside the Stratified sampling) require knowledge of the integration function!\\
                                                                                                                     \ARROW If you use the method in the incorrect way, you can easily get the opposite effect than intendant. \\
                                                                                                                     
                                                                                                                   \ARROW Successful application of then require non negligible effort before running the program.\\
                                                                                                                    \ARROW A natural solution would be that our program is ''smart'' enough that on his own, he will learn something about our function while he is trying to calculate the integral.\\
                                                                                                                    \ARROW Similar techniques were already created for numerical integration!\\
\ARROW Truly adaptive methods are nontrivial to code but are widely available in external packages as we will learn.\\
\ARROW Naming conventions:
\begin{itemize}
\item Integration \mc - software that is able to compute JUST! integrals.
\item Generator \mc - software that BESIDES! beeing able to perform the integration is also capable of performing a generation of points accordingly to the integration function.
\end{itemize}
                                                                                                             


\end{footnotesize}                                                                                                   
\end{frame}     

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Schematic of running this kind of methods}
\begin{footnotesize}                                                                                                 
                                                                                                                     \begin{enumerate}
                                                                                                                     \item Function probing (exploration):
                                                                                                                    \begin{itemize}
\item Recursive algorithm that searches for hipper-surfaces in which the function is approximately close. For evaluation of an integral in a given hipper-surface normally one uses numerical or \mc~crude methods. In general it is not an easy task!
\item Often the function is approximated by a given set of elementary functions. 
                                                                                                                    \end{itemize}
\item Calculation phase
\begin{itemize}
\item The integral is calculated using mostly using Stratified Sampling and Importance Sampling, depending on exploration phase.
\item If a \mc~program has capability to generated distributions accordingly to the function of which we want to calculate the integral, it's in this place where it happens.
\end{itemize}                                                                                                                   
                                                                                                                    \end{enumerate}
\ARROW There are algorithms where the exploration phase is linked with calculation phase. For each of the optimisation phase the integral is calculated as well. The result will be weighted average of those integrals!
\begin{alertblock}{~}
This method might be bias! if in the extrapolation phase the algorithm picks up a function peaks to late the whole method will lead to systematically bias results.
\end{alertblock}                                                                                                                    
                                                                                                                    


\end{footnotesize}                                                                                                   
\end{frame} 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{RIWIAD} algorithm}
\begin{footnotesize}                                                                                                 
                                                                                                                    \ARROW The first algorithm of this kind \texttt{RIWIAD} was proposed by Sheppeya \& Lautrupa in $1970$s. It was used to calculate integrals in cube $(0,1)^n$. \\
                                                                                                                    \ARROW It worked as follows:
                                                                                                                    \begin{itemize}
                                                                                                                    \item At the begging the hipper-cube is divided in equal size sub cubes and in each of them the integral is calculated. 
                                                                                                                    \item Based on the calculated integrals programs moves the boundaries to make the hipper-cubes smaller in the places where the function is greater and smaller where the function is smaller.
                                                                                                                    
                                                                                                                    \item The process starts over and continues over and over again. At each step the integral estimator and it's standard deviation is calculated. Form those a weighted average is constructed and it's standard deviation is constructed and its standard deviation.
                                                                                                                   
                                                                                                                   \item The process stops when the standard deviation reaches our desired sensitivity.
                                                                                                                    
                                                                                                            
                                                                                                               
                                                                                                                    \end{itemize}

\ARROW Disadvantages:
\begin{itemize}
\item Hipper-cubes are always parallel to the coordinate axis.
\item Some are are divided even thought they didn't have to.
\item The weighted average might be a bias estimator.
\end{itemize}



\end{footnotesize}                                                                                                   
\end{frame} 



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Friedmanns algorithm}
\begin{small}                                                                                                 
                                                                                                                   \ARROW In the $1970$s J.Friedmann has also developed an adaptive \mc~integration algorithm.\\
                                                                                                                   \ARROW The algorithm was as follows:
                                                                                                                   \begin{itemize}                                                                                                                 \item A probe function is constructed using a combination of Cauchy functions (Briet-Wigner), in which the peaks correspond to the local maxima of the integration function. In order to do so one needs to study the eigen functions in the neighbourhood of each peak (nasty thing...).
                                                                                                                   
                                                                                                                  \item The Briet-Wigner is chosen as it falls down to $0$ slower then a Gauss distribution.
                                                                                                                  
                                                                                                                  \item The integral and the standard deviation is calculated based on the weighted averaged based on the probe function.
                                                                                                                 
                                                                                                                  \end{itemize}
                                                                                                                  \begin{alertblock}{Disadvantage:}
Cannot be applied to functions that cannot be approximated with small number of Briet-Wigner functions.
\end{alertblock}                                                                                                                    

\end{small}                                                                                                   
\end{frame} 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{DIVIONNE2} algorithm}
\begin{footnotesize}                                                                                                 
\ARROW J.Friedmann (1977): adaptive algorithm for \mc~integration based on recursive division of the integration area (available in the CERBLIB package).\\
\ARROW The algorithm:
\begin{itemize}
\item Multidimensional division of the hipper-cube. We divide each of the initial sub cubes to minimalise the spread of the function.  
\item After this the integral is calculated using Stratified Sampling.
\item We can generate events accordingly to this function with this method.
\end{itemize}

\ARROW Disadvantages:
\begin{itemize}
\item Hipper-cubes are always parallel to the coordinate axis.
\end{itemize}
\ARROW Advantages:
\begin{itemize}
\item Because we divide only one hipper-cube at the time, the procedure doesn't get bias as easily the \text{RIWID} does.
\end{itemize}


\end{footnotesize}                                                                                                   
\end{frame} 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{VEGAS} algorithm}
\begin{footnotesize}                                                                                                 
\ARROW J. G. P. Lepage (1978): adaptive algorithm for \mc~integration based on iterative division of the integration area (similar to \texttt{RIWID}).\\
\ARROW Let's calculate: $\int_0^1 f(x)dx$.
\begin{itemize}
\item We generate M random points from $\mathcal{U}(0,1)$. We calculate from them the integral and standard deviation.
\item Now we divide the integration region in N equal subdivisions:
\begin{align*}
0=x_0<x_1<x_2<...<X_N=1,~\Delta x =x_i-x_{i-1}
\end{align*}
\item Now each of this subdivisions we divide further into $m_1 +1$ subsubdivisions.
\begin{align*}
m_i=K \dfrac{\overline{f}_i \Delta x_i}{\sum_j \overline{f}_j \Delta x_j},~K=const.~~{{\rm typically~= 1000}}
\end{align*}
and
\begin{align*}
\overline{f}_i \equiv \sum_{ x \in [ x_{i-1},x_i )} \vert f(x) \vert \sim \dfrac{1}{\Delta x_i} \int_{x_{i-1}}^{x_i} \vert f(x) \vert dx
\end{align*}
\ARROW The new subsubareas will be ''denser'' where the function is greater and less dens where the function is smaller.

\end{itemize}

\end{footnotesize}                                                                                                   
\end{frame} 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{VEGAS} algorithm}
\begin{footnotesize}                                                                                                 

\begin{itemize}
\item We are are retrieving back the original number (N) of the subdivisions by glueing together equal amount subsubdivisions.\\
\ARROW The new subdivisions will be larger where the function is larger and vice versa.
\item We generate the M points accordingly to the stop function probability:
\begin{align*}
p(x)=\dfrac{1}{N\Delta x_i}
\end{align*}
and calculate the integral Stratified sampling.
\item We repeat the procedure until we find an optimum division:
\begin{align*}
m_i \approx m_j~i,j =1,...,N.
\end{align*}
\item In each iteration we calculate the weighted average:
\begin{align*}
\sum_k \dfrac{I_k}{\sigma_k^2},
\end{align*}
where $I_k$ and $\sigma_k$ are the integral and error in the k interaction.
\item After the procedure stop we calculate the final results:
\begin{align*}
\hat{I}=\sigma_I^2\sum_k\dfrac{I_k}{\sigma_k^2}~~~~~~~\sigma_I= \left[\sum_k \dfrac{1}{\sigma_k^2}\right]^{-\frac{1}{2}}
\end{align*}

\end{itemize}

\end{footnotesize}                                                                                                   
\end{frame} 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{VEGAS} algorithm - futher improvements}
\begin{footnotesize}                                                                                                 
\ARROW In order to make the integrating area more stable(can happen that the division jumps around very rapidity). We can modify the algorithm:
\begin{align*}
m_i=K  \left[ \left[ \dfrac{\overline{f} \Delta x_i}{\sum_j \overline{f}_j \Delta x_j} -1\right] \dfrac{1}{\log \left[\overline{f}_i\Delta x_i/\sum_j \overline{f}_j \Delta x_j\right]  }   \right]^{\alpha},    
\end{align*}
where $\alpha \in [1,2]$ sets the convergence speed.
\ARROW When function has narrow peaks the $I_k$ and $\sigma_k$ might be wrongly calculated in early stages of iteraction. To fix this we can:
\begin{align*}
I=\left[ \sum_k \dfrac{I_k^2}{\sigma_k^2}\right]^{-1} \sum_k I_k \left( \dfrac{I_k^2}{\sigma_k^2}\right),~~~~~ \sigma_I=I\left[\sum_k\dfrac{I_k^2}{\sigma_k^2}\right]^{-0.5}
\end{align*}
\ARROW If the number of interactions is to large then you cannot trust the algorithm!

\end{footnotesize}                                                                                                   
\end{frame} 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{VEGAS} algorithm - 2D case}
\begin{footnotesize}                                                                                                 
\ARROW Lets take for example $\int_0^1dx\int_0^1 dy f(x,y)$.\\
\ARROW We can do a trick: 
\begin{align*}
p(x,y)=p_x(x)p_y(y)
\end{align*}
\ARROW One can show that using Lagrange multipliers that the optimum  density has the form of:
\begin{align*}
p_x(x)= \dfrac{\sqrt{\int_0^1 dy \frac{f^2(x,y)}{p_y(y)}  }}{\int_0^1dx \sqrt{\int_0^1 dy \dfrac{f^2(x,y)}{p_y(y)}}}
\end{align*}
\ARROW So our 1D algorithm can be used to each of the axis (ex. for x axis):
\begin{align*}
(f_i)^2 = \sum_{x \in [ x_{i-1},x_i )} \sum_y \dfrac{f^2(x,y)}{p_y(y)}~~\sim ~~ \dfrac{1}{\Delta x_i} \int_{x_{i-1}}^{x_i}dx \int_0^{1}dy \dfrac{f^2(x,y)}{p_y(y)}
\end{align*}
\ARROW In analogous you do it for y axis.


\end{footnotesize}                                                                                                   
\end{frame} 




%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{VEGAS} algorithm - an example}
\begin{footnotesize}                                                                                                 
\ARROW An example of usage: let's calculate:
\begin{align*}
I_n = \left(\dfrac{1}{a\sqrt{\pi}}\right)^n \int_0^1 \exp \left[ \dfrac{(x_n-0.5)^2}{a^2} \right] d^n x =1
\end{align*}
\ARROW For the $n=9$, $a=0.1$ and $\alpha=1$
\begin{tabular}{|c|c c|c c|c|}
\hline
Iteration & $I_k$ & $\sigma_k$ & $I$ & $\sigma(I)$ & Number of calculations\\ \hline \hline
$1$ &  $0.007$ & $0.005$ & $0.007$ & $0.005$ & $10^4$\\
$3$ &  $0.643$ & $0.070$ & $0.612$ & $0.064$ & $3 \cdot 10^4$\\
$5$ &  $1.009$ & $0.041$ & $0.963$ & $0.034$ & $5 \cdot 10^4$\\
$10$ &  $1.003$ & $0.041$ & $1.003$ & $0.005$ & $10^5$\\
Crude \mc~method & ~ & ~ & $0.843$ & $0.360$ & $10^5$\\ \hline \hline
\end{tabular}

\end{footnotesize}                                                                                                   
\end{frame} 




%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{VEGAS} algorithm - comparison to numerical methods}
\begin{small}                                                                                                 
\ARROW An example of usage; let's calculate:
\begin{align*}
I_n = \left(\dfrac{1}{a\sqrt{\pi}}\right)^n \int_0^1 \exp \left[ \dfrac{(x_n-0.5)^2}{a^2} \right] d^n x
\end{align*}
\ARROW For the $n=9$, $a=0.1$ and $\alpha=1$.
{~}\\
\begin{center}

\begin{tabular}{|c|c|c|}
\hline
Number of points on axis & Integral value & Number of calculations   \\ \hline \hline
$5$ & $71.364$ & $2 \cdot 10^6$ \\ 
$6$ & $0.017$ & $10^7$  \\
$10$ & $0.774$ & $10^9$  \\
$15$ & $1.002$ & $3.8\cdot 10^9$  \\ \hline
\end{tabular}

\end{center}




\end{small}                                                                                                   
\end{frame} 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Now the Foam algorithm
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{FOAM} algorithm }
\begin{footnotesize}
\ARROW S.Jadach (2000), \href{http://arxiv.org/abs/physics/9910004}{arXiv:physics/9910004, Comp. Phys. Commun. 152 (2003) 55}. Adaptive method with recursive  division of the integration domain in cells. \\
\ARROW There are two algorithms in dividing the integration domain:
\begin{itemize}
\item Symplectic: Cells are sympleces(hiper-triangles). This method can be applied to not so large number of dimensions. $(\leq 5)$.
\item Qubic: Cells are hiper-cubes. This might be applied in higher number dimensions. $(\leq20)$.
\end{itemize}
\ARROW The algorithm:
\begin{itemize}
\item Exploration phase:\\
The integration domain (hipper-cube) is divided recursively into cells. In each step only one cell is split. The splitting is not event! The procedure is stop when the number of cells reach a certain number that is set by us. One constructs an approximation function and based on this the integral is calculated.
\item Generation/Calculation Phase:\\
We generate random points accordingly to the distribution of approximation function and the integral is calculated using the Importance sampling based on the approximation function.

\end{itemize}

\end{footnotesize}                                                                                                 
\end{frame} 



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{FOAM} algorithm }
\begin{footnotesize}
\begin{center}
\includegraphics[width=0.95\textwidth]{FOAM.png}
\end{center}
\end{footnotesize}                                                                                                 
\end{frame} 




%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{\texttt{FOAM} algorithm }
\begin{footnotesize}
\begin{center}
\includegraphics[width=0.75\textwidth]{FOAM2.png}\\
\end{center}
\ARROW E3.1 Using ROOT implementation of the FOAM algorithm calculate the integrals from exercise E2.3.


\end{footnotesize}                                                                                                 
\end{frame} 


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Monte Carlo vs numerical methods}
\begin{footnotesize}
\ARROW All numerical methods are based on evaluating the integral using linear combination of function:
\begin{align*}
I_Q = \sim_{i=1}^m \omega_i f(x_i)
\end{align*}
\ARROW Different methods have different weights $\omega_i$ and lattice point $x_i$.\\
\ARROW Efficiency of Monte Carlo methods compared to the numerical ones:
\begin{center}

\begin{tabular}{c|c|c}
\hline
Standard deviation & 1D & nD\\ \hline
Monte Carlo & $n^{-1/2}$ & $n^{-1/2}$ \\ 
Trapezoidal Rule & $n^{-2}$ & $n^{-2/d}$\\
Simpson Rule & $n^{-2}$ & $n^{-2/d}$\\
m-point Gauss rule & $n^{-2m}$ & $n^{-2m/d}$\\ \hline
\end{tabular}



\end{center}
\end{footnotesize}                                                                                                 
\end{frame} 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Sum up}
\begin{footnotesize}
\ARROW In one dimension the Monte Carlo method is substantially slower then the numerical methods! Even the most simple ones.\\
\ARROW In many dimensions the Monte Carlo methods rapidity gain the advantages! \\
\ARROW For $d>4$ the \mc~method if faster then the Trapezoidal Rule.\\
\ARROW For $d>8$ the \mc~method if faster then the Simpson Rule.\\
\ARROW The disadvantages of the numerical methods:
\begin{itemize}
\item Hard to apply in multi dimensions.
\item Hard to apply in complex integration domains.
\item The integration uncertainties are hard to evaluate.
\end{itemize}



\end{footnotesize}                                                                                                 
\end{frame} 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%                                                                                                            
\begin{frame}\frametitle{Method of Moments}                                                                                                                  
                                                                                                                                                             
\begin{small}                                                                                                                                                
$\color{PineGreen}\Rrightarrow$ Now real cool things!\\                                                                                                      
$\color{PineGreen}\Rrightarrow$ Let's consider we want to study a rare decay: $\PB^{\pm} \to \PK^{\pm} \Pmu \Pmu$.  The decay is described by the following \
PDF:                                                                                                                                                         
\begin{equation}                                                                                                                                             
\dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} =\dfrac{3}{4}(1-F_H)(1-\cos^2 \theta_l)+F_H/2 + A_{FB}\cos \theta_l \nonumber                        
\end{equation}                                                                                                                                               
$\color{PineGreen}\Rrightarrow$ PDF by construction is normalized: $\int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} =1$                
\begin{columns}                                                                                                                                              
\column{0.1in}                                                                                                                                               
{~}                                                                                                                                                          
\column{2.2in}                                                                                                                                               
\begin{itemize}                                                                                                                                              
\item Normally we do a likelihood fit and we are done.                                                                                                       
\item There is a second way!                                                                                                                                 
\end{itemize}                                                                                                                                                
\column{2.8in}                                                                                                                                               
\includegraphics[width=0.95\textwidth]{images/Kmumu_LL.png}                                                                                                  
\end{columns}                                                                                                                                                
                                                                                                                                                             
\end{small}                                                                                                                                                  
\end{frame} 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%                                                                                                            
\begin{frame}\frametitle{Method of Moments}                                                                                                                  
\begin{footnotesize}                                                                                                                                                
$\color{PineGreen}\Rrightarrow$ Let's calculate the integrals:                                                                                               
\begin{equation}                                                                                                                                             
\int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} \cdot \cos \theta_l = \dfrac{2}{3}A_{FB} \nonumber
\end{equation}                                                                                                                                               
\begin{equation}                                                                                                                                             
\int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} \cdot \cos^2 \theta_l = \dfrac{1}{5} + \dfrac{2 F_H}{15} \nonumber                     
\end{equation}                                                                                                                                               
$\color{PineGreen}\Rrightarrow$ So we can get our parameters that we searched for by doing a integration. So now what?\\
$\color{PineGreen}\Rrightarrow$ Well nature is the best random number generator so let's take the data and treat and calculate the integral estimates:       
\begin{equation}                                                                                                                                             
\int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} \cdot \cos \theta_l = \dfrac{2}{3}A_{FB} = \dfrac{1}{N} \sum_{i=1}^N  \cos \theta_{l,i} \nonumber                                                                                                                                                  
\end{equation}                                                                                                                                               
\begin{equation}                                                                                                                                             
\int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} \cdot \cos^2 \theta_l = \dfrac{1}{5} + \dfrac{2 F_H}{15}=\dfrac{1}{N} \sum_{i=1}^N  cos^2 \theta_{l,i} \nonumber                                                                                                                                 
\end{equation}                                                                                                                                               
\ARROW E3.2 Calculate the $A_{FB}$ and $F_H$ using Method of moments. The events to be used to calculate them are here:\href{                                                                                                                                                             http://www.physik.uzh.ch/lectures/mcm/lectures/mom.txt}{LINK}
                                                                                                                                                             
                                                                                                                                                             
\end{footnotesize}                                                                                                                                                  
\end{frame}        



\backupbegin   

\begin{frame}\frametitle{Backup}


\end{frame}

\backupend			

\end{document}