Newer
Older
Lecture_repo / Lectures_my / MC_2016 / Lecture3 / mchrzasz.tex
@mchrzasz mchrzasz on 13 Mar 2016 41 KB finished lecture 4
  1. \documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer}
  2.  
  3. \usepackage[english]{babel}
  4. \usepackage{polski}
  5. \usepackage[skins,theorems]{tcolorbox}
  6. \tcbset{highlight math style={enhanced,
  7. colframe=red,colback=white,arc=0pt,boxrule=1pt}}
  8.  
  9. \usetheme[
  10. bullet=circle, % Other option: square
  11. bigpagenumber, % circled page number on lower right
  12. topline=true, % colored bar at the top of the frame
  13. shadow=false, % Shading for beamer blocks
  14. watermark=BG_lower, % png file for the watermark
  15. ]{Flip}
  16.  
  17. %\logo{\kern+1.em\includegraphics[height=1cm]{SHiP-3_LightCharcoal}}
  18.  
  19. \usepackage[lf]{berenis}
  20. \usepackage[LY1]{fontenc}
  21. \usepackage[utf8]{inputenc}
  22.  
  23. \usepackage{emerald}
  24. \usefonttheme{professionalfonts}
  25. \usepackage[no-math]{fontspec}
  26. \usepackage{listings}
  27. \defaultfontfeatures{Mapping=tex-text} % This seems to be important for mapping glyphs properly
  28.  
  29. \setmainfont{Gillius ADF} % Beamer ignores "main font" in favor of sans font
  30. \setsansfont{Gillius ADF} % This is the font that beamer will use by default
  31. % \setmainfont{Gill Sans Light} % Prettier, but harder to read
  32.  
  33. \setbeamerfont{title}{family=\fontspec{Gillius ADF}}
  34.  
  35. \input t1augie.fd
  36.  
  37. %\newcommand{\handwriting}{\fontspec{augie}} % From Emerald City, free font
  38. %\newcommand{\handwriting}{\usefont{T1}{fau}{m}{n}} % From Emerald City, free font
  39. % \newcommand{\handwriting}{} % If you prefer no special handwriting font or don't have augie
  40.  
  41. %% Gill Sans doesn't look very nice when boldfaced
  42. %% This is a hack to use Helvetica instead
  43. %% Usage: \textbf{\forbold some stuff}
  44. %\newcommand{\forbold}{\fontspec{Arial}}
  45.  
  46. \usepackage{graphicx}
  47. \usepackage[export]{adjustbox}
  48.  
  49. \usepackage{amsmath}
  50. \usepackage{amsfonts}
  51. \usepackage{amssymb}
  52. \usepackage{bm}
  53. \usepackage{colortbl}
  54. \usepackage{mathrsfs} % For Weinberg-esque letters
  55. \usepackage{cancel} % For "SUSY-breaking" symbol
  56. \usepackage{slashed} % for slashed characters in math mode
  57. \usepackage{bbm} % for \mathbbm{1} (unit matrix)
  58. \usepackage{amsthm} % For theorem environment
  59. \usepackage{multirow} % For multi row cells in table
  60. \usepackage{arydshln} % For dashed lines in arrays and tables
  61. \usepackage{siunitx}
  62. \usepackage{xhfill}
  63. \usepackage{grffile}
  64. \usepackage{textpos}
  65. \usepackage{subfigure}
  66. \usepackage{tikz}
  67. \usepackage{hyperref}
  68. %\usepackage{hepparticles}
  69. \usepackage[italic]{hepparticles}
  70.  
  71. \usepackage{hepnicenames}
  72.  
  73. % Drawing a line
  74. \tikzstyle{lw} = [line width=20pt]
  75. \newcommand{\topline}{%
  76. \tikz[remember picture,overlay] {%
  77. \draw[crimsonred] ([yshift=-23.5pt]current page.north west)
  78. -- ([yshift=-23.5pt,xshift=\paperwidth]current page.north west);}}
  79.  
  80.  
  81.  
  82. % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
  83. \usepackage{tikzfeynman} % For Feynman diagrams
  84. \usetikzlibrary{arrows,shapes}
  85. \usetikzlibrary{trees}
  86. \usetikzlibrary{matrix,arrows} % For commutative diagram
  87. % http://www.felixl.de/commu.pdf
  88. \usetikzlibrary{positioning} % For "above of=" commands
  89. \usetikzlibrary{calc,through} % For coordinates
  90. \usetikzlibrary{decorations.pathreplacing} % For curly braces
  91. % http://www.math.ucla.edu/~getreuer/tikz.html
  92. \usepackage{pgffor} % For repeating patterns
  93.  
  94. \usetikzlibrary{decorations.pathmorphing} % For Feynman Diagrams
  95. \usetikzlibrary{decorations.markings}
  96. \tikzset{
  97. % >=stealth', %% Uncomment for more conventional arrows
  98. vector/.style={decorate, decoration={snake}, draw},
  99. provector/.style={decorate, decoration={snake,amplitude=2.5pt}, draw},
  100. antivector/.style={decorate, decoration={snake,amplitude=-2.5pt}, draw},
  101. fermion/.style={draw=gray, postaction={decorate},
  102. decoration={markings,mark=at position .55 with {\arrow[draw=gray]{>}}}},
  103. fermionbar/.style={draw=gray, postaction={decorate},
  104. decoration={markings,mark=at position .55 with {\arrow[draw=gray]{<}}}},
  105. fermionnoarrow/.style={draw=gray},
  106. gluon/.style={decorate, draw=black,
  107. decoration={coil,amplitude=4pt, segment length=5pt}},
  108. scalar/.style={dashed,draw=black, postaction={decorate},
  109. decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
  110. scalarbar/.style={dashed,draw=black, postaction={decorate},
  111. decoration={markings,mark=at position .55 with {\arrow[draw=black]{<}}}},
  112. scalarnoarrow/.style={dashed,draw=black},
  113. electron/.style={draw=black, postaction={decorate},
  114. decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
  115. bigvector/.style={decorate, decoration={snake,amplitude=4pt}, draw},
  116. }
  117.  
  118. % TIKZ - for block diagrams,
  119. % from http://www.texample.net/tikz/examples/control-system-principles/
  120. % \usetikzlibrary{shapes,arrows}
  121. \tikzstyle{block} = [draw, rectangle,
  122. minimum height=3em, minimum width=6em]
  123.  
  124.  
  125.  
  126.  
  127. \usetikzlibrary{backgrounds}
  128. \usetikzlibrary{mindmap,trees} % For mind map
  129. \newcommand{\degree}{\ensuremath{^\circ}}
  130. \newcommand{\E}{\mathrm{E}}
  131. \newcommand{\Var}{\mathrm{Var}}
  132. \newcommand{\Cov}{\mathrm{Cov}}
  133. \newcommand\Ts{\rule{0pt}{2.6ex}} % Top strut
  134. \newcommand\Bs{\rule[-1.2ex]{0pt}{0pt}} % Bottom strut
  135.  
  136. \graphicspath{{images/}} % Put all images in this directory. Avoids clutter.
  137.  
  138. % SOME COMMANDS THAT I FIND HANDY
  139. % \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec
  140. %\newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild
  141. %\newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold
  142. %\newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold
  143. \newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert
  144. \newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert
  145. %% "\alert" is already a beamer pre-defined
  146. \newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}%
  147.  
  148. \def\Put(#1,#2)#3{\leavevmode\makebox(0,0){\put(#1,#2){#3}}}
  149.  
  150. \usepackage{gmp}
  151. \usepackage[final]{feynmp-auto}
  152.  
  153. \usepackage[backend=bibtex,style=numeric-comp,firstinits=true]{biblatex}
  154. \bibliography{bib}
  155. \setbeamertemplate{bibliography item}[text]
  156.  
  157. \makeatletter\let\frametextheight\beamer@frametextheight\makeatother
  158.  
  159. % suppress frame numbering for backup slides
  160. % you always need the appendix for this!
  161. \newcommand{\backupbegin}{
  162. \newcounter{framenumberappendix}
  163. \setcounter{framenumberappendix}{\value{framenumber}}
  164. }
  165. \newcommand{\backupend}{
  166. \addtocounter{framenumberappendix}{-\value{framenumber}}
  167. \addtocounter{framenumber}{\value{framenumberappendix}}
  168. }
  169.  
  170.  
  171. \definecolor{links}{HTML}{2A1B81}
  172. %\hypersetup{colorlinks,linkcolor=,urlcolor=links}
  173.  
  174. % For shapo's formulas:
  175. \def\lsi{\raise0.3ex\hbox{$<$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
  176. \def\gsi{\raise0.3ex\hbox{$>$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
  177. \newcommand{\lsim}{\mathop{\lsi}}
  178. \newcommand{\gsim}{\mathop{\gsi}}
  179. \newcommand{\wt}{\widetilde}
  180. %\newcommand{\ol}{\overline}
  181. \newcommand{\Tr}{\rm{Tr}}
  182. \newcommand{\tr}{\rm{tr}}
  183. \newcommand{\eqn}[1]{&\hspace{-0.7em}#1\hspace{-0.7em}&}
  184. \newcommand{\vev}[1]{\rm{$\langle #1 \rangle$}}
  185. \newcommand{\abs}[1]{\rm{$\left| #1 \right|$}}
  186. \newcommand{\eV}{\rm{eV}}
  187. \newcommand{\keV}{\rm{keV}}
  188. \newcommand{\GeV}{\rm{GeV}}
  189. \newcommand{\im}{\rm{Im}}
  190. \newcommand{\disp}{\displaystyle}
  191. \def\be{\begin{equation}}
  192. \def\ee{\end{equation}}
  193. \def\ba{\begin{eqnarray}}
  194. \def\ea{\end{eqnarray}}
  195. \def\d{\partial}
  196. \def\l{\left(}
  197. \def\r{\right)}
  198. \def\la{\langle}
  199. \def\ra{\rangle}
  200. \def\e{{\rm e}}
  201. \def\Br{{\rm Br}}
  202. \def\fixme{{\color{red} FIXME!}}
  203. \def\mc{{\color{Magenta}{MC}}}
  204. \def\pdf{{\rm p.d.f.}}
  205. \def\ARROW{{\color{JungleGreen}{$\Rrightarrow$}}\xspace}
  206. \author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz} (Universit\"{a}t Z\"{u}rich)}
  207. \institute{UZH}
  208. \title[Adaptive Monte Carlo Integration Methods]{Adaptive Monte Carlo Integration Methods}
  209. \date{\fixme}
  210.  
  211.  
  212. \begin{document}
  213. \tikzstyle{every picture}+=[remember picture]
  214.  
  215. {
  216. \setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}}
  217. \begin{frame}[c]%{\phantom{title page}}
  218. \begin{center}
  219. \begin{center}
  220. \begin{columns}
  221. \begin{column}{0.9\textwidth}
  222. \flushright\fontspec{Trebuchet MS}\bfseries \Huge {Adaptive Monte Carlo Integration Methods}
  223. \end{column}
  224. \begin{column}{0.2\textwidth}
  225. %\includegraphics[width=\textwidth]{SHiP-2}
  226. \end{column}
  227. \end{columns}
  228. \end{center}
  229. \quad
  230. \vspace{3em}
  231. \begin{columns}
  232. \begin{column}{0.44\textwidth}
  233. \flushright \vspace{-1.8em} {\fontspec{Trebuchet MS} \Large Marcin Chrząszcz\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}}
  234.  
  235. \end{column}
  236. \begin{column}{0.53\textwidth}
  237. \includegraphics[height=1.3cm]{uzh-transp}
  238. \end{column}
  239. \end{columns}
  240.  
  241. \vspace{1em}
  242. % \footnotesize\textcolor{gray}{With N. Serra, B. Storaci\\Thanks to the theory support from M. Shaposhnikov, D. Gorbunov}\normalsize\\
  243. \vspace{0.5em}
  244. \textcolor{normal text.fg!50!Comment}{Monte Carlo methods, \\ 10 March, 2016}
  245.  
  246. \end{center}
  247. \end{frame}
  248. }
  249.  
  250. \begin{frame}\frametitle{Classical methods of variance reduction}
  251. \begin{footnotesize}
  252. \ARROW In Monte Carlo methods the statistical uncertainty is defined as:
  253. \begin{align*}
  254. \sigma = \dfrac{1}{\sqrt{N}}\sqrt{V(f)}
  255. \end{align*}
  256. \ARROW Obvious conclusion:
  257. \begin{itemize}
  258. \item To reduce the uncertainty one needs to increase $N$.\\
  259. $\rightrightarrows$ Slow convergence. In order to reduce the error by factor of 10 one needs to simulate factor of 100 more points!
  260. \end{itemize}
  261. \ARROW How ever the other handle ($V(f)$) can be changed! $\longrightarrow$ Lot's of theoretical effort goes\
  262. into reducing this factor.\\
  263. \ARROW We will discuss {\color{Mahogany}{four}} classical methods of variance reduction:
  264. \begin{enumerate}
  265. \item Stratified sampling.
  266. \item Importance sampling.
  267. \item Control variates.
  268. \item Antithetic variates.
  269. \end{enumerate}
  270. \end{footnotesize}
  271. \end{frame}
  272.  
  273.  
  274.  
  275.  
  276. \begin{frame}\frametitle{Disadvantages of classical variance reduction methods}
  277. \begin{footnotesize}
  278. \ARROW All aforementioned methods(beside the Stratified sampling) require knowledge of the integration function!\\
  279. \ARROW If you use the method in the incorrect way, you can easily get the opposite effect than intendant. \\
  280. \ARROW Successful application of then require non negligible effort before running the program.\\
  281. \ARROW A natural solution would be that our program is ''smart'' enough that on his own, he will learn something about our function while he is trying to calculate the integral.\\
  282. \ARROW Similar techniques were already created for numerical integration!\\
  283. \ARROW Truly adaptive methods are nontrivial to code but are widely available in external packages as we will learn.\\
  284. \ARROW Naming conventions:
  285. \begin{itemize}
  286. \item Integration \mc - software that is able to compute JUST! integrals.
  287. \item Generator \mc - software that BESIDES! beeing able to perform the integration is also capable of performing a generation of points accordingly to the integration function.
  288. \end{itemize}
  289.  
  290.  
  291. \end{footnotesize}
  292. \end{frame}
  293.  
  294. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  295. \begin{frame}\frametitle{Schematic of running this kind of methods}
  296. \begin{footnotesize}
  297. \begin{enumerate}
  298. \item Function probing (exploration):
  299. \begin{itemize}
  300. \item Recursive algorithm that searches for hipper-surfaces in which the function is approximately close. For evaluation of an integral in a given hipper-surface normally one uses numerical or \mc~crude methods. In general it is not an easy task!
  301. \item Often the function is approximated by a given set of elementary functions.
  302. \end{itemize}
  303. \item Calculation phase
  304. \begin{itemize}
  305. \item The integral is calculated using mostly using Stratified Sampling and Importance Sampling, depending on exploration phase.
  306. \item If a \mc~program has capability to generated distributions accordingly to the function of which we want to calculate the integral, it's in this place where it happens.
  307. \end{itemize}
  308. \end{enumerate}
  309. \ARROW There are algorithms where the exploration phase is linked with calculation phase. For each of the optimisation phase the integral is calculated as well. The result will be weighted average of those integrals!
  310. \begin{alertblock}{~}
  311. This method might be bias! if in the extrapolation phase the algorithm picks up a function peaks to late the whole method will lead to systematically bias results.
  312. \end{alertblock}
  313.  
  314.  
  315. \end{footnotesize}
  316. \end{frame}
  317.  
  318. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  319. \begin{frame}\frametitle{\texttt{RIWIAD} algorithm}
  320. \begin{footnotesize}
  321. \ARROW The first algorithm of this kind \texttt{RIWIAD} was proposed by Sheppeya \& Lautrupa in $1970$s. It was used to calculate integrals in cube $(0,1)^n$. \\
  322. \ARROW It worked as follows:
  323. \begin{itemize}
  324. \item At the begging the hipper-cube is divided in equal size sub cubes and in each of them the integral is calculated.
  325. \item Based on the calculated integrals programs moves the boundaries to make the hipper-cubes smaller in the places where the function is greater and smaller where the function is smaller.
  326. \item The process starts over and continues over and over again. At each step the integral estimator and it's standard deviation is calculated. Form those a weighted average is constructed and it's standard deviation is constructed and its standard deviation.
  327. \item The process stops when the standard deviation reaches our desired sensitivity.
  328. \end{itemize}
  329.  
  330. \ARROW Disadvantages:
  331. \begin{itemize}
  332. \item Hipper-cubes are always parallel to the coordinate axis.
  333. \item Some are are divided even thought they didn't have to.
  334. \item The weighted average might be a bias estimator.
  335. \end{itemize}
  336.  
  337.  
  338.  
  339. \end{footnotesize}
  340. \end{frame}
  341.  
  342.  
  343.  
  344. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  345. \begin{frame}\frametitle{Friedmanns algorithm}
  346. \begin{small}
  347. \ARROW In the $1970$s J.Friedmann has also developed an adaptive \mc~integration algorithm.\\
  348. \ARROW The algorithm was as follows:
  349. \begin{itemize} \item A probe function is constructed using a combination of Cauchy functions (Briet-Wigner), in which the peaks correspond to the local maxima of the integration function. In order to do so one needs to study the eigen functions in the neighbourhood of each peak (nasty thing...).
  350. \item The Briet-Wigner is chosen as it falls down to $0$ slower then a Gauss distribution.
  351. \item The integral and the standard deviation is calculated based on the weighted averaged based on the probe function.
  352. \end{itemize}
  353. \begin{alertblock}{Disadvantage:}
  354. Cannot be applied to functions that cannot be approximated with small number of Briet-Wigner functions.
  355. \end{alertblock}
  356.  
  357. \end{small}
  358. \end{frame}
  359.  
  360. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  361. \begin{frame}\frametitle{\texttt{DIVIONNE2} algorithm}
  362. \begin{footnotesize}
  363. \ARROW J.Friedmann (1977): adaptive algorithm for \mc~integration based on recursive division of the integration area (available in the CERBLIB package).\\
  364. \ARROW The algorithm:
  365. \begin{itemize}
  366. \item Multidimensional division of the hipper-cube. We divide each of the initial sub cubes to minimalise the spread of the function.
  367. \item After this the integral is calculated using Stratified Sampling.
  368. \item We can generate events accordingly to this function with this method.
  369. \end{itemize}
  370.  
  371. \ARROW Disadvantages:
  372. \begin{itemize}
  373. \item Hipper-cubes are always parallel to the coordinate axis.
  374. \end{itemize}
  375. \ARROW Advantages:
  376. \begin{itemize}
  377. \item Because we divide only one hipper-cube at the time, the procedure doesn't get bias as easily the \text{RIWID} does.
  378. \end{itemize}
  379.  
  380.  
  381. \end{footnotesize}
  382. \end{frame}
  383.  
  384. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  385. \begin{frame}\frametitle{\texttt{VEGAS} algorithm}
  386. \begin{footnotesize}
  387. \ARROW J. G. P. Lepage (1978): adaptive algorithm for \mc~integration based on iterative division of the integration area (similar to \texttt{RIWID}).\\
  388. \ARROW Let's calculate: $\int_0^1 f(x)dx$.
  389. \begin{itemize}
  390. \item We generate M random points from $\mathcal{U}(0,1)$. We calculate from them the integral and standard deviation.
  391. \item Now we divide the integration region in N equal subdivisions:
  392. \begin{align*}
  393. 0=x_0<x_1<x_2<...<X_N=1,~\Delta x =x_i-x_{i-1}
  394. \end{align*}
  395. \item Now each of this subdivisions we divide further into $m_1 +1$ subsubdivisions.
  396. \begin{align*}
  397. m_i=K \dfrac{\overline{f} \Delta x_i}{\sum_j \overline{f}_j \Delta x_j},~K=const.~~{{\rm typically~= 1000}}
  398. \end{align*}
  399. and
  400. \begin{align*}
  401. \overline{f}_i \equiv \sum_{ x \in [ x_{i-1},x_i )} \vert f(x) \vert \sim \dfrac{1}{\Delta x_i} \int_{x_{i-1}}^{x_i} \vert f(x) \vert dx
  402. \end{align*}
  403. \ARROW The new subsubareas will be ''denser'' where the function is greater and less dens where the function is smaller.
  404.  
  405. \end{itemize}
  406.  
  407. \end{footnotesize}
  408. \end{frame}
  409.  
  410.  
  411. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  412. \begin{frame}\frametitle{\texttt{VEGAS} algorithm}
  413. \begin{footnotesize}
  414.  
  415. \begin{itemize}
  416. \item We are are retrieving back the original number (N) of the subdivisions by glueing together equal amount subsubdivisions.\\
  417. \ARROW The new subdivisions will be larger where the function is larger and vice versa.
  418. \item We generate the M points accordingly to the stop function probability:
  419. \begin{align*}
  420. p(x)=\dfrac{1}{N\Delta x_i}
  421. \end{align*}
  422. and calculate the integral Stratified sampling.
  423. \item We repeat the procedure until we find an optimum division:
  424. \begin{align*}
  425. m_i \approx m_j~i,j =1,...,N.
  426. \end{align*}
  427. \item In each iteration we calculate the weighted average:
  428. \begin{align*}
  429. \sum_k \dfrac{I_k}{\sigma_k^2},
  430. \end{align*}
  431. where $I_k$ and $\sigma_k$ are the integral and error in the k interaction.
  432. \item After the procedure stop we calculate the final results:
  433. \begin{align*}
  434. \hat{I}=\sigma_I^2\sum_k\dfrac{I_k}{\sigma_k^2}~~~~~~~\sigma_I= \left[\sum_k \dfrac{1}{\sigma_k^2}\right]^{-\frac{1}{2}}
  435. \end{align*}
  436.  
  437. \end{itemize}
  438.  
  439. \end{footnotesize}
  440. \end{frame}
  441.  
  442.  
  443. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  444. \begin{frame}\frametitle{\texttt{VEGAS} algorithm - futher improvements}
  445. \begin{footnotesize}
  446. \ARROW In order to make the integrating area more stable(can happen that the division jumps around very rapidity). We can modify the algorithm:
  447. \begin{align*}
  448. m_i=K \left[ \left[ \dfrac{\overline{f} \Delta x_i}{\sum_j \overline{f}_j \Delta x_j} -1\right] \dfrac{1}{\log \left[\overline{f}_i\Delta x_i/\sum_j \overline{f}_j \Delta x_j\right] } \right]^{\alpha},
  449. \end{align*}
  450. where $\alpha \in [1,2]$ sets the convergence speed.
  451. \ARROW When function has narrow peaks the $I_k$ and $\sigma_k$ might be wrongly calculated in early stages of iteraction. To fix this we can:
  452. \begin{align*}
  453. I=\left[ \sum_k \dfrac{I_k^2}{\sigma_k^2}\right]^{-1} \sum_k I_k \left( \dfrac{I_k^2}{\sigma_k^2}\right),~~~~~ \sigma_I=I\left[\sum_k\dfrac{I_k^2}{\sigma_k^2}\right]^{-0.5}
  454. \end{align*}
  455. \ARROW If the number of interactions is to large then you cannot trust the algorithm!
  456.  
  457. \end{footnotesize}
  458. \end{frame}
  459.  
  460.  
  461. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  462. \begin{frame}\frametitle{\texttt{VEGAS} algorithm - 2D case}
  463. \begin{footnotesize}
  464. \ARROW Lets take for example $\int_0^1dx\int_0^1 dy f(x,y)$.\\
  465. \ARROW We can do a trick:
  466. \begin{align*}
  467. p(x,y)=p_x(x)p_y(y)
  468. \end{align*}
  469. \ARROW One can show that using Lagrange multipliers that the optimum density has the form of:
  470. \begin{align*}
  471. p_x(x)= \dfrac{\sqrt{\int_0^1 dy \frac{f^2(x,y)}{p_y(y)} }}{\int_0^1dx \sqrt{\int_0^1 dy \dfrac{f^2(x,y)}{p_y(y)}}}
  472. \end{align*}
  473. \ARROW So our 1D algorithm can be used to each of the axis (ex. for x axis):
  474. \begin{align*}
  475. (f_i)^2 = \sum_{x \in [ x_{i-1},x_i )} \sum_y \dfrac{f^2(x,y)}{p_y(y)}~~\sim ~~ \dfrac{1}{\Delta x_i} \int_{x_{i-1}}^{x_i}dx \int_0^{1}dy \dfrac{f^2(x,y)}{p_y(y)}
  476. \end{align*}
  477. \ARROW In analogous you do it for y axis.
  478.  
  479.  
  480. \end{footnotesize}
  481. \end{frame}
  482.  
  483.  
  484.  
  485.  
  486. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  487. \begin{frame}\frametitle{\texttt{VEGAS} algorithm - an example}
  488. \begin{footnotesize}
  489. \ARROW An example of usage: let's calculate:
  490. \begin{align*}
  491. I_n = \left(\dfrac{1}{a\sqrt{\pi}}\right)^n \int_0^1 \exp \left[ \dfrac{(x_n-0.5)^2}{a^2} \right] d^n x =1
  492. \end{align*}
  493. \ARROW For the $n=9$, $a=0.1$ and $\alpha=1$
  494. \begin{tabular}{|c|c c|c c|c|}
  495. \hline
  496. Iteration & $I_k$ & $\sigma_k$ & $I$ & $\sigma(I)$ & Number of calculations\\ \hline \hline
  497. $1$ & $0.007$ & $0.005$ & $0.007$ & $0.005$ & $10^4$\\
  498. $3$ & $0.643$ & $0.070$ & $0.612$ & $0.064$ & $3 \cdot 10^4$\\
  499. $5$ & $1.009$ & $0.041$ & $0.963$ & $0.034$ & $5 \cdot 10^4$\\
  500. $10$ & $1.003$ & $0.041$ & $1.003$ & $0.005$ & $10^5$\\
  501. Crude \mc~method & ~ & ~ & $0.843$ & $0.360$ & $10^5$\\ \hline \hline
  502. \end{tabular}
  503.  
  504. \end{footnotesize}
  505. \end{frame}
  506.  
  507.  
  508.  
  509.  
  510. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  511. \begin{frame}\frametitle{\texttt{VEGAS} algorithm - comparison to numerical methods}
  512. \begin{small}
  513. \ARROW An example of usage; let's calculate:
  514. \begin{align*}
  515. I_n = \left(\dfrac{1}{a\sqrt{\pi}}\right)^n \int_0^1 \exp \left[ \dfrac{(x_n-0.5)^2}{a^2} \right] d^n x
  516. \end{align*}
  517. \ARROW For the $n=9$, $a=0.1$ and $\alpha=1$.
  518. {~}\\
  519. \begin{center}
  520.  
  521. \begin{tabular}{|c|c|c|}
  522. \hline
  523. Number of points on axis & Integral value & Number of calculations \\ \hline \hline
  524. $5$ & $71.364$ & $2 \cdot 10^6$ \\
  525. $6$ & $0.017$ & $10^7$ \\
  526. $10$ & $0.774$ & $10^9$ \\
  527. $15$ & $1.002$ & $3.8\cdot 10^9$ \\ \hline
  528. \end{tabular}
  529.  
  530. \end{center}
  531.  
  532.  
  533.  
  534.  
  535. \end{small}
  536. \end{frame}
  537.  
  538.  
  539. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  540. % Now the Foam algorithm
  541. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  542. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  543. \begin{frame}\frametitle{\texttt{FOAM} algorithm }
  544. \begin{footnotesize}
  545. \ARROW S.Jadach (2000), \href{http://arxiv.org/abs/physics/9910004}{arXiv:physics/9910004, Comp. Phys. Commun. 152 (2003) 55}. Adaptive method with recursive division of the integration domain in cells. \\
  546. \ARROW There are two algorithms in dividing the integration domain:
  547. \begin{itemize}
  548. \item Symplectic: Cells are sympleces(hiper-triangles). This method can be applied to not so large number of dimensions. $(\leq 5)$.
  549. \item Qubic: Cells are hiper-cubes. This might be applied in higher number dimensions. $(\leq20)$.
  550. \end{itemize}
  551. \ARROW The algorithm:
  552. \begin{itemize}
  553. \item Exploration phase:\\
  554. The integration domain (hipper-cube) is divided recursively into cells. In each step only one cell is split. The splitting is not event! The procedure is stop when the number of cells reach a certain number that is set by us. One constructs an approximation function and based on this the integral is calculated.
  555. \item Generation/Calculation Phase:\\
  556. We generate random points accordingly to the distribution of approximation function and the integral is calculated using the Importance sampling based on the approximation function.
  557.  
  558. \end{itemize}
  559.  
  560. \end{footnotesize}
  561. \end{frame}
  562.  
  563.  
  564.  
  565. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  566. \begin{frame}\frametitle{\texttt{FOAM} algorithm }
  567. \begin{footnotesize}
  568. \begin{center}
  569. \includegraphics[width=0.95\textwidth]{FOAM.png}
  570. \end{center}
  571. \end{footnotesize}
  572. \end{frame}
  573.  
  574.  
  575.  
  576.  
  577. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  578. \begin{frame}\frametitle{\texttt{FOAM} algorithm }
  579. \begin{footnotesize}
  580. \begin{center}
  581. \includegraphics[width=0.75\textwidth]{FOAM2.png}\\
  582. \end{center}
  583. \ARROW E3.1 Using ROOT implementation of the FOAM algorithm calculate the integrals from exercise E2.3.
  584.  
  585.  
  586. \end{footnotesize}
  587. \end{frame}
  588.  
  589.  
  590. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  591. \begin{frame}\frametitle{Monte Carlo vs numerical methods}
  592. \begin{footnotesize}
  593. \ARROW All numerical methods are based on evaluating the integral using linear combination of function:
  594. \begin{align*}
  595. I_Q = \sim_{i=1}^m \omega_i f(x_i)
  596. \end{align*}
  597. \ARROW Different methods have different weights $\omega_i$ and lattice point $x_i$.\\
  598. \ARROW Efficiency of Monte Carlo methods compared to the numerical ones:
  599. \begin{center}
  600.  
  601. \begin{tabular}{c|c|c}
  602. \hline
  603. Standard deviation & 1D & nD\\ \hline
  604. Monte Carlo & $n^{-1/2}$ & $n^{-1/2}$ \\
  605. Trapezoidal Rule & $n^{-2}$ & $n^{-2/d}$\\
  606. Simpson Rule & $n^{-2}$ & $n^{-2/d}$\\
  607. m-point Gauss rule & $n^{-2m}$ & $n^{-2m/d}$\\ \hline
  608. \end{tabular}
  609.  
  610.  
  611.  
  612. \end{center}
  613. \end{footnotesize}
  614. \end{frame}
  615.  
  616. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  617. \begin{frame}\frametitle{Sum up}
  618. \begin{footnotesize}
  619. \ARROW In one dimension the Monte Carlo method is substantially slower then the numerical methods! Even the most simple ones.\\
  620. \ARROW In many dimensions the Monte Carlo methods rapidity gain the advantages! \\
  621. \ARROW For $d>4$ the \mc~method if faster then the Trapezoidal Rule.\\
  622. \ARROW For $d>8$ the \mc~method if faster then the Simpson Rule.\\
  623. \ARROW The disadvantages of the numerical methods:
  624. \begin{itemize}
  625. \item Hard to apply in multi dimensions.
  626. \item Hard to apply in complex integration domains.
  627. \item The integration uncertainties are hard to evaluate.
  628. \end{itemize}
  629.  
  630.  
  631.  
  632. \end{footnotesize}
  633. \end{frame}
  634.  
  635. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  636. \begin{frame}\frametitle{Method of Moments}
  637. \begin{small}
  638. $\color{PineGreen}\Rrightarrow$ Now real cool things!\\
  639. $\color{PineGreen}\Rrightarrow$ Let's consider we want to study a rare decay: $\PB^{\pm} \to \PK^{\pm} \Pmu \Pmu$. The decay is described by the following \
  640. PDF:
  641. \begin{equation}
  642. \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} =\dfrac{3}{4}(1-F_H)(1-\cos^2 \theta_l)+F_H/2 + A_{FB}\cos \theta_l \nonumber
  643. \end{equation}
  644. $\color{PineGreen}\Rrightarrow$ PDF by construction is normalized: $\int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} =1$
  645. \begin{columns}
  646. \column{0.1in}
  647. {~}
  648. \column{2.2in}
  649. \begin{itemize}
  650. \item Normally we do a likelihood fit and we are done.
  651. \item There is a second way!
  652. \end{itemize}
  653. \column{2.8in}
  654. \includegraphics[width=0.95\textwidth]{images/Kmumu_LL.png}
  655. \end{columns}
  656. \end{small}
  657. \end{frame}
  658.  
  659. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  660. \begin{frame}\frametitle{Method of Moments}
  661. \begin{footnotesize}
  662. $\color{PineGreen}\Rrightarrow$ Let's calculate the integrals:
  663. \begin{equation}
  664. \int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} \cdot \cos \theta_l = \dfrac{2}{3}A_{FB} \nonumber
  665. \end{equation}
  666. \begin{equation}
  667. \int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} \cdot \cos^2 \theta_l = \dfrac{1}{5} + \dfrac{2 F_H}{15} \nonumber
  668. \end{equation}
  669. $\color{PineGreen}\Rrightarrow$ So we can get our parameters that we searched for by doing a integration. So now what?\\
  670. $\color{PineGreen}\Rrightarrow$ Well nature is the best random number generator so let's take the data and treat and calculate the integral estimates:
  671. \begin{equation}
  672. \int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} \cdot \cos \theta_l = \dfrac{2}{3}A_{FB} = \dfrac{1}{N} \sum_{i=1}^N \cos \theta_{l,i} \nonumber
  673. \end{equation}
  674. \begin{equation}
  675. \int_{-1}^{1} \dfrac{1}{\Gamma}\dfrac{d^2\Gamma}{dq^2 d\cos \theta_l} \cdot \cos^2 \theta_l = \dfrac{1}{5} + \dfrac{2 F_H}{15}=\dfrac{1}{N} \sum_{i=1}^N cos^2 \theta_{l,i} \nonumber
  676. \end{equation}
  677. \ARROW E3.2 Calculate the $A_{FB}$ and $F_H$ using Method of moments. The events to be used to calculate them are here:\href{ http://www.physik.uzh.ch/lectures/mcm/lectures/mom.txt}{LINK}
  678. \end{footnotesize}
  679. \end{frame}
  680.  
  681.  
  682.  
  683. \backupbegin
  684.  
  685. \begin{frame}\frametitle{Backup}
  686.  
  687.  
  688. \end{frame}
  689.  
  690. \backupend
  691.  
  692. \end{document}