\documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer} \usepackage[english]{babel} \usepackage{polski} \usetheme[ bullet=circle, % Other option: square bigpagenumber, % circled page number on lower right topline=true, % colored bar at the top of the frame shadow=false, % Shading for beamer blocks watermark=BG_lower, % png file for the watermark ]{Flip} %\logo{\kern+1.em\includegraphics[height=1cm]{SHiP-3_LightCharcoal}} \usepackage[lf]{berenis} \usepackage[LY1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{cases} \usepackage{mathtools} \usepackage{emerald} \usefonttheme{professionalfonts} \usepackage[no-math]{fontspec} \defaultfontfeatures{Mapping=tex-text} % This seems to be important for mapping glyphs properly \setmainfont{Gillius ADF} % Beamer ignores "main font" in favor of sans font \setsansfont{Gillius ADF} % This is the font that beamer will use by default % \setmainfont{Gill Sans Light} % Prettier, but harder to read \setbeamerfont{title}{family=\fontspec{Gillius ADF}} \input t1augie.fd %\newcommand{\handwriting}{\fontspec{augie}} % From Emerald City, free font %\newcommand{\handwriting}{\usefont{T1}{fau}{m}{n}} % From Emerald City, free font % \newcommand{\handwriting}{} % If you prefer no special handwriting font or don't have augie %% Gill Sans doesn't look very nice when boldfaced %% This is a hack to use Helvetica instead %% Usage: \textbf{\forbold some stuff} %\newcommand{\forbold}{\fontspec{Arial}} \usepackage{graphicx} \usepackage[export]{adjustbox} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{bm} \usepackage{colortbl} \usepackage{mathrsfs} % For Weinberg-esque letters \usepackage{cancel} % For "SUSY-breaking" symbol \usepackage{slashed} % for slashed characters in math mode \usepackage{bbm} % for \mathbbm{1} (unit matrix) \usepackage{amsthm} % For theorem environment \usepackage{multirow} % For multi row cells in table \usepackage{arydshln} % For dashed lines in arrays and tables \usepackage{siunitx} \usepackage{xhfill} \usepackage{grffile} \usepackage{textpos} \usepackage{subfigure} \usepackage{tikz} \usepackage{hyperref} %\usepackage{hepparticles} \usepackage[italic]{hepparticles} \usepackage{hepnicenames} % Drawing a line \tikzstyle{lw} = [line width=20pt] \newcommand{\topline}{% \tikz[remember picture,overlay] {% \draw[crimsonred] ([yshift=-23.5pt]current page.north west) -- ([yshift=-23.5pt,xshift=\paperwidth]current page.north west);}} % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % \usepackage{tikzfeynman} % For Feynman diagrams \usetikzlibrary{arrows,shapes} \usetikzlibrary{trees} \usetikzlibrary{matrix,arrows} % For commutative diagram % http://www.felixl.de/commu.pdf \usetikzlibrary{positioning} % For "above of=" commands \usetikzlibrary{calc,through} % For coordinates \usetikzlibrary{decorations.pathreplacing} % For curly braces % http://www.math.ucla.edu/~getreuer/tikz.html \usepackage{pgffor} % For repeating patterns \usetikzlibrary{decorations.pathmorphing} % For Feynman Diagrams \usetikzlibrary{decorations.markings} \tikzset{ % >=stealth', %% Uncomment for more conventional arrows vector/.style={decorate, decoration={snake}, draw}, provector/.style={decorate, decoration={snake,amplitude=2.5pt}, draw}, antivector/.style={decorate, decoration={snake,amplitude=-2.5pt}, draw}, fermion/.style={draw=gray, postaction={decorate}, decoration={markings,mark=at position .55 with {\arrow[draw=gray]{>}}}}, fermionbar/.style={draw=gray, postaction={decorate}, decoration={markings,mark=at position .55 with {\arrow[draw=gray]{<}}}}, fermionnoarrow/.style={draw=gray}, gluon/.style={decorate, draw=black, decoration={coil,amplitude=4pt, segment length=5pt}}, scalar/.style={dashed,draw=black, postaction={decorate}, decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}}, scalarbar/.style={dashed,draw=black, postaction={decorate}, decoration={markings,mark=at position .55 with {\arrow[draw=black]{<}}}}, scalarnoarrow/.style={dashed,draw=black}, electron/.style={draw=black, postaction={decorate}, decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}}, bigvector/.style={decorate, decoration={snake,amplitude=4pt}, draw}, } % TIKZ - for block diagrams, % from http://www.texample.net/tikz/examples/control-system-principles/ % \usetikzlibrary{shapes,arrows} \tikzstyle{block} = [draw, rectangle, minimum height=3em, minimum width=6em] \usetikzlibrary{backgrounds} \usetikzlibrary{mindmap,trees} % For mind map \newcommand{\degree}{\ensuremath{^\circ}} \newcommand{\E}{\mathrm{E}} \newcommand{\Var}{\mathrm{Var}} \newcommand{\Cov}{\mathrm{Cov}} \newcommand\Ts{\rule{0pt}{2.6ex}} % Top strut \newcommand\Bs{\rule[-1.2ex]{0pt}{0pt}} % Bottom strut \graphicspath{{images/}} % Put all images in this directory. Avoids clutter. % SOME COMMANDS THAT I FIND HANDY % \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec \newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild \newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold \newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold \newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert \newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert %% "\alert" is already a beamer pre-defined \newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}% \def\Put(#1,#2)#3{\leavevmode\makebox(0,0){\put(#1,#2){#3}}} \usepackage{gmp} \usepackage[final]{feynmp-auto} \usepackage[backend=bibtex,style=numeric-comp,firstinits=true]{biblatex} \bibliography{bib} \setbeamertemplate{bibliography item}[text] \makeatletter\let\frametextheight\beamer@frametextheight\makeatother % suppress frame numbering for backup slides % you always need the appendix for this! \newcommand{\backupbegin}{ \newcounter{framenumberappendix} \setcounter{framenumberappendix}{\value{framenumber}} } \newcommand{\backupend}{ \addtocounter{framenumberappendix}{-\value{framenumber}} \addtocounter{framenumber}{\value{framenumberappendix}} } \definecolor{links}{HTML}{2A1B81} %\hypersetup{colorlinks,linkcolor=,urlcolor=links} % For shapo's formulas: \def\lsi{\raise0.3ex\hbox{$<$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}} \def\gsi{\raise0.3ex\hbox{$>$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}} \newcommand{\lsim}{\mathop{\lsi}} \newcommand{\gsim}{\mathop{\gsi}} \newcommand{\wt}{\widetilde} %\newcommand{\ol}{\overline} \newcommand{\Tr}{\rm{Tr}} \newcommand{\tr}{\rm{tr}} \newcommand{\eqn}[1]{&\hspace{-0.7em}#1\hspace{-0.7em}&} \newcommand{\vev}[1]{\rm{$\langle #1 \rangle$}} \newcommand{\abs}[1]{\rm{$\left| #1 \right|$}} \newcommand{\eV}{\rm{eV}} \newcommand{\keV}{\rm{keV}} \newcommand{\GeV}{\rm{GeV}} \newcommand{\im}{\rm{Im}} \newcommand{\disp}{\displaystyle} \def\be{\begin{equation}} \def\ee{\end{equation}} \def\ba{\begin{eqnarray}} \def\ea{\end{eqnarray}} \def\d{\partial} \def\l{\left(} \def\r{\right)} \def\la{\langle} \def\ra{\rangle} \def\e{{\rm e}} \def\Br{{\rm Br}} \def\fixme{{\color{red} FIXME!}} \def\mc{{\color{Magenta}{MC}}\xspace} \def\pdf{{\rm p.d.f.}} \def\ARROW{{\color{JungleGreen}{$\Rrightarrow$}}\xspace} \def\ARROWR{{\color{WildStrawberry}{$\Rrightarrow$}}\xspace} \author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz, Danny van Dyk} (UZH)} \institute{UZH} \title[Function minimalization II]{Function minimalization II} \date{21 September 2016} \begin{document} \tikzstyle{every picture}+=[remember picture] { \setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}} \begin{frame}[c]%{\phantom{title page}} \begin{center} \begin{center} \begin{columns} \begin{column}{0.9\textwidth} \flushright\fontspec{Trebuchet MS}\bfseries \Huge {Function minimalization II} \end{column} \begin{column}{0.2\textwidth} %\includegraphics[width=\textwidth]{SHiP-2} \end{column} \end{columns} \end{center} \quad \vspace{3em} \begin{columns} \begin{column}{0.6\textwidth} \flushright \vspace{-1.8em} {\fontspec{Trebuchet MS} \large Marcin ChrzÄ…szcz, Danny van Dyk\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}, \href{mailto:dany.van.dyk@gmail.com}{danny.van.dyk@gmail.com}} \end{column} \begin{column}{0.4\textwidth} \includegraphics[height=1.3cm]{uzh-transp} \end{column} \end{columns} \vspace{1em} % \footnotesize\textcolor{gray}{With N. Serra, B. Storaci\\Thanks to the theory support from M. Shaposhnikov, D. Gorbunov}\normalsize\\ \vspace{0.5em} \textcolor{normal text.fg!50!Comment}{Numerical Methods, \\ 10 October, 2016} \end{center} \end{frame} } \begin{frame}\frametitle{Function minimalization} \ARROW Function mininalization if THE MOST important numerical technique. \\ \ARROW We will discuss finding LOCAL minimum of functions in this lecture.\\ \ARROW The numerical problem is to find the minimum one needs to evaluate the function many many times\\ \pause \ARROW Side note: If you want to find maximum do: $f(x) = - g(x)$.\\ \ARROW How to find a minimum: \begin{align*} f(x_0+\delta) \simeq f(x_0) + f^{\prime}(x_0)\delta + \frac{1}{2} f^{\prime \prime}(x_0) \delta^2 \end{align*} \ARROW The first derivative $=0$. \\ \ARROW If $\vert (f(x_0+\delta) - f(x_0) \vert \leq \epsilon$ where $\epsilon$ is the accuracy we are interested in.\\ \ARROW From the above we see that: \begin{align*} \vert \delta \vert \sim \sqrt{\epsilon} \end{align*} \begin{exampleblock}{} The minimum we are can calculate only with the $\sqrt{\epsilon}$ \end{exampleblock} \end{frame} \begin{frame}\frametitle{''RAW'' Minimum estimation} \ARROW We say that 3 points $(a,b,c)$ are bounding the minimum of $f(x)$ if: \begin{align} a<b<c:~f(a)>f(b),~f(c)>f(b) \label{eq:eq1} \end{align} \ARROW How to find the points? The fist two we choose randomly.\\ \ARROW Two points will show the fall of the function. We go in that direction by the same step as the distance between the first two points. If the above condition is not fulfilled we repeat the step.\\ \begin{center} \only<1>{ \includegraphics[width=0.6\textwidth]{images/abc.png} } \only<2>{ \includegraphics[width=0.6\textwidth]{images/abc1.png} } \end{center} \end{frame} \begin{frame}\frametitle{''RAW'' Minimum estimation} \ARROW If we have 3 points that fulfil \ref{eq:eq1} we choose d such that: \begin{align*} a<d<c,~d \neq b \end{align*} \ARROW Next steps are dependent on what value we will get. \begin{center} \begin{tabular}{c||c} $f(d) < f(b)$ & $f(d) > f(b)$\\ \hline\hline If $d < b$ then $c=b$, $b=d$ & If $d < b$ then $a=d$\\ \hline If $d > b$ then $a=b$, $b=d$ & If $d > b$ then $c=d$\\ \hline \end{tabular} \end{center} \ARROW At each step we get new 3 points that fulfil the~\ref{eq:eq1} so we can iterate the whole procedure. Each step will have smaller distance of the points. \\ \ARROW We and the iteration when the $\vert c -a \vert < \tau\left( \vert b \vert + \vert d \vert \right)$, where $\tau$ is our toleration. \ARROW Now the only question is how to choose the d? \end{frame} \begin{frame}\frametitle{''RAW'' Minimum estimation} \ARROW If we have 3 points that fulfil \ref{eq:eq1} we choose d such that: \begin{align*} a<d<c,~d \neq b \end{align*} \ARROW Next steps are dependent on what value we will get. \begin{center} \begin{tabular}{c||c} $f(d) < f(b)$ & $f(d) > f(b)$\\ \hline\hline If $d < b$ then $c=b$, $b=d$ & If $d < b$ then $a=d$\\ \hline If $d > b$ then $a=b$, $b=d$ & If $d > b$ then $c=d$\\ \hline \label{tab1} \end{tabular} \end{center} \ARROW At each step we get new 3 points that fulfil the~\ref{eq:eq1} so we can iterate the whole procedure. Each step will have smaller distance of the points. \\ \ARROW We and the iteration when the $\vert c -a \vert < \tau\left( \vert b \vert + \vert d \vert \right)$, where $\tau$ is our toleration. \ARROW Now the only question is how to choose the d? \end{frame} \begin{frame}\frametitle{Golden rule} \ARROW We seek for the $d$ point in the bigger $\left[a,b\right]$, $\left[b,c\right]$.\\ \ARROW In order to minimalize the risk that the minimum will be in the smaller of the set we apply the golder rule: \begin{align*} \frac{\vert b-a \vert}{\vert c- a \vert}= \frac{\vert c - b \vert }{\vert b -a \vert} \end{align*} \ARROW Of course the initial 3 points will now have this feature, but we can obtain it fastly in the iterative procedure: \begin{itemize} \item If $\vert b -a \vert > \vert c -b \vert$, then $d = a + w \vert b -a \vert$ \item If $\vert b -a \vert < \vert c -b \vert$, then $d = b + w \vert b -a \vert$ \end{itemize} where $w = \frac{3 -\sqrt{5}}{2}\simeq 0.381966...$ \end{frame} \begin{frame}\frametitle{Golden rule} \begin{center} \includegraphics[width=0.75\textwidth]{images/gold.png} \end{center} \end{frame} \begin{frame}\frametitle{Golden rule} \ARROW We seek for the $d$ point in the bigger $\left[a,b\right]$, $\left[b,c\right]$.\\ \ARROW In order to minimalize the risk that the minimum will be in the smaller of the set we apply the golder rule: \begin{align*} \frac{\vert b-a \vert}{\vert c- a \vert}= \frac{\vert c - b \vert }{\vert b -a \vert} \end{align*} \ARROW Of course the initial 3 points will now have this feature, but we can obtain it fastly in the iterative procedure: \begin{itemize} \item If $\vert b -a \vert > \vert c -b \vert$, then $d = a + w \vert b -a \vert$ \item If $\vert b -a \vert < \vert c -b \vert$, then $d = b + w \vert b -a \vert$ \end{itemize} where $w = \frac{3 -\sqrt{5}}{2}\simeq 0.381966...$ \end{frame} \begin{frame}\frametitle{Golden rule - example} \begin{center} \includegraphics[width=0.75\textwidth]{images/gold.png} \end{center} \end{frame} \begin{frame}\frametitle{Brenet rule} \ARROW The better method then the golden rule is the parabolic interpolation.\\ \ARROW Via 3 points we draw a parabola and as a new point we choose it's minimum: \begin{align} d = \frac{1}{2} \frac{a^2 (f_c - f_b) + b^2 (f_a + f_c ) + c^2 (f_b - f_a) }{a (f_c - f_b) + b (f_a + f_c ) + c (f_b - f_a)} \label{eq:2} \end{align} \ARROW The justification of this method is the fact that arround the minimum the parabola is usually very good approximation.\\ \ARROW We start from 3 points that fulfil~\ref{eq:eq1}. The $d$ we choose accordingly to~\ref{eq:2}.\\ \ARROW We accept the new $d$ if: \begin{itemize} \item $d$ is in: $a <d <c$ \item The size of new set calculated accordingly to~\ref{tab1} is smaller then half of the set before the last iteration \end{itemize} \ARROW if they above is not true we choose $d$ as middle point. \end{frame} \begin{frame}\frametitle{Hermite interpolation} \begin{center} \includegraphics[width=0.65\textwidth]{images/bernet.png} \end{center} \ARROW The Bernet rules is consider the ''standard'' method for minimalizing the function.\\ \ARROW Sometimes a better interpolation is to use the 3rd (Hermit) interpolation polynomial.\\ \end{frame} \begin{frame} \begin{center} \begin{Large} Global minimalization \end{Large} \end{center} \end{frame} \begin{frame}\frametitle{Intro} \ARROW In many cases we know there exists one minimum, in other we are just interested in any minimum\\ \ARROW But there exists many cases we need to know the global minimum!!!\\ \ARROW All discussed till now methods are looking for the local minimum and are useless in this scenario.\\ \ARROW The discussed here global minima searches are changing year by year as this is a very fastly developing area.\\ \ARROW The global minimum finders are mostly based on some random process :) \end{frame} \begin{frame}\frametitle{Greedy algorithms} \ARROW One can wonder why don't we just put a fine grid points and just scan every possibility for minimum?\\ \ARROW Well this will kick you hard in many dimensions. The number of grind points go with $N^n$ so explodes very rapidity with many dimensions.\\ \ARROW Also starting from different points and looking for local minimums will not do you any good for the same reason.\\ \ARROW You need to be far more intelligent in your algorithms! \end{frame} \begin{frame}\frametitle{Why Monte Carlo} \ARROW The problem with normal methods is the fact that if they see a minimum they try to go there as fast as possible.\\ \ARROW If you do so you will miss the global minimum.\\ \ARROW Your algorithm needs to be able to do a step back!\\ \ARROW Thats why MC algorithms are so good at this: The steps in better directions are favourite but also there is non-zero probability that the algorithm can go step back. \end{frame} \begin{frame}\frametitle{Why Monte Carlo - the algorithm} \ARROW $f: \mathbb{R}^N \to \mathbb{R}$ will be the function we look for minimum.\\ \ARROW We start from a random point $f_0=f(x_0)$, $x_{\min} =x_0$, $f_{\min} = f_0$. $\sigma> 0$, $T>0$ are parameters. We do $M$ steps: \begin{enumerate} \item We calculate the $\hat{x} = x_k + \sigma \epsilon_k$, where$\epsilon_k$ is N dim random number for Gauss distribution. \item We calculate $\hat{f}=f(\hat{x}$ \item If $\hat{f}<f_k$ then we accept the step: $x_{k+1}=\hat{x}, f_{k+1}=\hat{f}$ \item If $\hat{f}< f_{\min},~x_{\min}=x_{k+1},~f_{\min}=\hat{f}$ \item If $\hat{f}> f_{\min}$ \begin{itemize} \item We choose $z$ from $\mathcal{U}(0,1)$. \item If \begin{align*} z < \exp (- \frac{\hat{f}-f_k}{T} ) \end{align*} is true we accept new position. If not not we don't \end{itemize} \item We start over :) \end{enumerate} \end{frame} \begin{frame}\frametitle{Why Monte Carlo - the algorithm} \ARROW There is no natural way to stop it.\\ \ARROW We will do as many steps as we assumed.\\ \ARROW Do couple of time the walk with different starting point.\\ \ARROW The $\epsilon_k$ might not be a Gauss but some other distribution.\\ \ARROW There are many this algorithms there: genetic algorithms, MCMC.\\ \ARROW Current state of the art: "Metropolis-Hastings" algorithm.\\ \ARROW Sign up for MC course if you want to learn those :) \end{frame} \backupbegin \begin{frame}\frametitle{Backup} \end{frame} \backupend \end{document}