Newer
Older
Lecture_repo / Lectures_my / MC_2016 / Lecture3 / mchrzasz.tex
@mchrzasz mchrzasz on 23 Feb 2016 21 KB align cut
\documentclass[11 pt,xcolor={dvipsnames,svgnames,x11names,table}]{beamer}

\usepackage[english]{babel} 
\usepackage{polski}         
\usepackage[skins,theorems]{tcolorbox}
\tcbset{highlight math style={enhanced,
  colframe=red,colback=white,arc=0pt,boxrule=1pt}}

\usetheme[
	bullet=circle,		% Other option: square
	bigpagenumber,		% circled page number on lower right
	topline=true,			% colored bar at the top of the frame 
	shadow=false,			% Shading for beamer blocks
	watermark=BG_lower,	% png file for the watermark
	]{Flip}

%\logo{\kern+1.em\includegraphics[height=1cm]{SHiP-3_LightCharcoal}}
                            

\usepackage[lf]{berenis}
\usepackage[LY1]{fontenc}
\usepackage[utf8]{inputenc}

\usepackage{emerald}
\usefonttheme{professionalfonts}
\usepackage[no-math]{fontspec}	
\usepackage{listings}
\defaultfontfeatures{Mapping=tex-text}	% This seems to be important for mapping glyphs properly

\setmainfont{Gillius ADF}			% Beamer ignores "main font" in favor of sans font
\setsansfont{Gillius ADF}			% This is the font that beamer will use by default
% \setmainfont{Gill Sans Light}		% Prettier, but harder to read

\setbeamerfont{title}{family=\fontspec{Gillius ADF}}

\input t1augie.fd

%\newcommand{\handwriting}{\fontspec{augie}} % From Emerald City, free font
%\newcommand{\handwriting}{\usefont{T1}{fau}{m}{n}} % From Emerald City, free font
% \newcommand{\handwriting}{}	% If you prefer no special handwriting font or don't have augie

%% Gill Sans doesn't look very nice when boldfaced
%% This is a hack to use Helvetica instead
%% Usage: \textbf{\forbold some stuff}
%\newcommand{\forbold}{\fontspec{Arial}}

\usepackage{graphicx}
\usepackage[export]{adjustbox}

\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{colortbl}
\usepackage{mathrsfs} 			% For Weinberg-esque letters
\usepackage{cancel}				% For "SUSY-breaking" symbol
\usepackage{slashed}            % for slashed characters in math mode
\usepackage{bbm}                % for \mathbbm{1} (unit matrix)
\usepackage{amsthm}				% For theorem environment
\usepackage{multirow}			% For multi row cells in table
\usepackage{arydshln} 			% For dashed lines in arrays and tables
\usepackage{siunitx}
\usepackage{xhfill}
\usepackage{grffile}
\usepackage{textpos}
\usepackage{subfigure}
\usepackage{tikz}
\usepackage{hyperref}
%\usepackage{hepparticles}    
\usepackage[italic]{hepparticles}     

\usepackage{hepnicenames} 

% Drawing a line
\tikzstyle{lw} = [line width=20pt]
\newcommand{\topline}{%
  \tikz[remember picture,overlay] {%
    \draw[crimsonred] ([yshift=-23.5pt]current page.north west)
             -- ([yshift=-23.5pt,xshift=\paperwidth]current page.north west);}}



% % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
\usepackage{tikzfeynman}		% For Feynman diagrams
\usetikzlibrary{arrows,shapes}
\usetikzlibrary{trees}
\usetikzlibrary{matrix,arrows} 				% For commutative diagram
% http://www.felixl.de/commu.pdf
\usetikzlibrary{positioning}				% For "above of=" commands
\usetikzlibrary{calc,through}				% For coordinates
\usetikzlibrary{decorations.pathreplacing}  % For curly braces
% http://www.math.ucla.edu/~getreuer/tikz.html
\usepackage{pgffor}							% For repeating patterns

\usetikzlibrary{decorations.pathmorphing}	% For Feynman Diagrams
\usetikzlibrary{decorations.markings}
\tikzset{
	% >=stealth', %%  Uncomment for more conventional arrows
	vector/.style={decorate, decoration={snake}, draw},
	provector/.style={decorate, decoration={snake,amplitude=2.5pt}, draw},
	antivector/.style={decorate, decoration={snake,amplitude=-2.5pt}, draw},
	fermion/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{>}}}},
	fermionbar/.style={draw=gray, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=gray]{<}}}},
	fermionnoarrow/.style={draw=gray},
	gluon/.style={decorate, draw=black,
		decoration={coil,amplitude=4pt, segment length=5pt}},
	scalar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	scalarbar/.style={dashed,draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{<}}}},
	scalarnoarrow/.style={dashed,draw=black},
	electron/.style={draw=black, postaction={decorate},
		decoration={markings,mark=at position .55 with {\arrow[draw=black]{>}}}},
	bigvector/.style={decorate, decoration={snake,amplitude=4pt}, draw},
}

% TIKZ - for block diagrams, 
% from http://www.texample.net/tikz/examples/control-system-principles/
% \usetikzlibrary{shapes,arrows}
\tikzstyle{block} = [draw, rectangle, 
minimum height=3em, minimum width=6em]




\usetikzlibrary{backgrounds}
\usetikzlibrary{mindmap,trees}	% For mind map
\newcommand{\degree}{\ensuremath{^\circ}}
\newcommand{\E}{\mathrm{E}}
\newcommand{\Var}{\mathrm{Var}}
\newcommand{\Cov}{\mathrm{Cov}}
\newcommand\Ts{\rule{0pt}{2.6ex}}       % Top strut
\newcommand\Bs{\rule[-1.2ex]{0pt}{0pt}} % Bottom strut

\graphicspath{{images/}}	% Put all images in this directory. Avoids clutter.

% SOME COMMANDS THAT I FIND HANDY
% \renewcommand{\tilde}{\widetilde} % dinky tildes look silly, dosn't work with fontspec
%\newcommand{\comment}[1]{\textcolor{comment}{\footnotesize{#1}\normalsize}} % comment mild
%\newcommand{\Comment}[1]{\textcolor{Comment}{\footnotesize{#1}\normalsize}} % comment bold
%\newcommand{\COMMENT}[1]{\textcolor{COMMENT}{\footnotesize{#1}\normalsize}} % comment crazy bold
\newcommand{\Alert}[1]{\textcolor{Alert}{#1}} % louder alert
\newcommand{\ALERT}[1]{\textcolor{ALERT}{#1}} % loudest alert
%% "\alert" is already a beamer pre-defined
\newcommand*{\Scale}[2][4]{\scalebox{#1}{$#2$}}%

\def\Put(#1,#2)#3{\leavevmode\makebox(0,0){\put(#1,#2){#3}}}

\usepackage{gmp}
\usepackage[final]{feynmp-auto}

\usepackage[backend=bibtex,style=numeric-comp,firstinits=true]{biblatex}
\bibliography{bib}
\setbeamertemplate{bibliography item}[text]

\makeatletter\let\frametextheight\beamer@frametextheight\makeatother

% suppress frame numbering for backup slides
% you always need the appendix for this!
\newcommand{\backupbegin}{
	\newcounter{framenumberappendix}
	\setcounter{framenumberappendix}{\value{framenumber}}
}
\newcommand{\backupend}{
	\addtocounter{framenumberappendix}{-\value{framenumber}}
	\addtocounter{framenumber}{\value{framenumberappendix}} 
}


\definecolor{links}{HTML}{2A1B81}
%\hypersetup{colorlinks,linkcolor=,urlcolor=links}

% For shapo's formulas:
\def\lsi{\raise0.3ex\hbox{$<$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\def\gsi{\raise0.3ex\hbox{$>$\kern-0.75em\raise-1.1ex\hbox{$\sim$}}}
\newcommand{\lsim}{\mathop{\lsi}}
\newcommand{\gsim}{\mathop{\gsi}}
\newcommand{\wt}{\widetilde}
%\newcommand{\ol}{\overline}
\newcommand{\Tr}{\rm{Tr}}
\newcommand{\tr}{\rm{tr}}
\newcommand{\eqn}[1]{&\hspace{-0.7em}#1\hspace{-0.7em}&}
\newcommand{\vev}[1]{\rm{$\langle #1 \rangle$}}
\newcommand{\abs}[1]{\rm{$\left| #1 \right|$}}
\newcommand{\eV}{\rm{eV}}
\newcommand{\keV}{\rm{keV}}
\newcommand{\GeV}{\rm{GeV}}
\newcommand{\im}{\rm{Im}}
\newcommand{\disp}{\displaystyle}
\def\be{\begin{equation}}
\def\ee{\end{equation}}
\def\ba{\begin{eqnarray}}
\def\ea{\end{eqnarray}}
\def\d{\partial}
\def\l{\left(}
\def\r{\right)}
\def\la{\langle}
\def\ra{\rangle}
\def\e{{\rm e}}
\def\Br{{\rm Br}}
\def\fixme{{\color{red} FIXME!}}
\def\mc{{\color{Magenta}{MC}}}
\def\pdf{{\rm p.d.f.}}

\author{ {\fontspec{Trebuchet MS}Marcin Chrz\k{a}szcz} (Universit\"{a}t Z\"{u}rich)}
\institute{UZH}
\title[Random number generators and application]{Random number generators and application}
\date{\fixme}


\begin{document}
\tikzstyle{every picture}+=[remember picture]

{
\setbeamertemplate{sidebar right}{\llap{\includegraphics[width=\paperwidth,height=\paperheight]{bubble2}}}
\begin{frame}[c]%{\phantom{title page}} 
\begin{center}
\begin{center}
	\begin{columns}
		\begin{column}{0.9\textwidth}
			\flushright\fontspec{Trebuchet MS}\bfseries \Huge {Applications of Monte Carlo methods}
		\end{column}
		\begin{column}{0.2\textwidth}
		  %\includegraphics[width=\textwidth]{SHiP-2}
		\end{column}
	\end{columns}
\end{center}
	\quad
	\vspace{3em}
\begin{columns}
\begin{column}{0.44\textwidth}
\flushright \vspace{-1.8em} {\fontspec{Trebuchet MS} \Large Marcin ChrzÄ…szcz\\\vspace{-0.1em}\small \href{mailto:mchrzasz@cern.ch}{mchrzasz@cern.ch}}

\end{column}
\begin{column}{0.53\textwidth}
\includegraphics[height=1.3cm]{uzh-transp}
\end{column}
\end{columns}

\vspace{1em}
%		\footnotesize\textcolor{gray}{With N. Serra, B. Storaci\\Thanks to the theory support from M. Shaposhnikov, D. Gorbunov}\normalsize\\
\vspace{0.5em}
	\textcolor{normal text.fg!50!Comment}{Experimental Methods in Particle Physics, \\ 26 November, 2015}
\end{center}
\end{frame}
}





\begin{frame}\frametitle{Markov Chain MC}
\begin{itemize}
\item Consider a finite possible states: $S_1$, $S_2$, ...
\item And the time steps of time, labelled as $1$, $2$, ...
\item At time $t$ the state is denoted $X_t$.
\item The conditional probability is defined as:
\end{itemize}
\begin{equation}
P(X_t=S_j \vert X_{t-1}=S_{j-1},..., X_{1}=S_{1}) \nonumber
\end{equation}
\begin{itemize}
\item The Markov chain is then if the probability depends only on previous step.
\end{itemize}
\begin{equation}
P(X_t=S_j \vert X_{t-1}=S_{j-1},..., X_{1}=S_{1}) =  P(X_t=S_j \vert X_{t-1}=S_{j-1} )\nonumber
\end{equation}
\begin{itemize}
\item For this reason this reason MCMC is also knows as drunk sailor walk.
\item Very powerful method. Used to solve linear eq. systems, invert matrix, solve differential equations, etc.
\end{itemize}
\end{frame}



\begin{frame}\frametitle{Linear Equations}
\begin{itemize}
\item Lets say we have a linear equation system:
\end{itemize}
\begin{equation}
\begin{array}{lcl} X & = & pY + (1-p) A \\ Y & = & qX + (1-q)B \end{array} \nonumber
\end{equation}
\begin{itemize}
\item We know $A,B,p,q$; $X$ and $Y$ are meant to be determined.
\item Algorithm:
\begin{enumerate}
\item We choose first element of the first equation with probability $p$ and second with probability $1-p$.
\item We we choose the second one, the outcome of this MCMC is $W=A$.
\item If we choose the first we go to second equation and choose the first element with probability $q$ and the second with $1-q$.
\item We we choose the second one, the outcome of this MCMC is $W=B$.
\item If we choose the first we go to the first equation back again.
\item We repeat the procedure.
\end{enumerate}
\item We can estimate the solution of this system:
\end{itemize}
\begin{equation}
\hat{X} = \dfrac{1}{N}\sum_{i=1} W_i{~}{~}{~}{~}{~} \hat{\sigma_X}=\dfrac{1}{\sqrt{N-1}}\sqrt{\dfrac{1}{N} \sum_{i=1}^N W_i^2-\hat{X}^2} \nonumber
\end{equation}

\end{frame}

\begin{frame}\frametitle{Neumann-Ulam method}
\begin{itemize}
\item Let's try apply the basic MCMC method to solve a simple linear equation system:
\end{itemize}
\begin{equation}
A \overrightarrow{x} = \overrightarrow{b} \nonumber
\end{equation}
\begin{itemize}
\item The above system can be (always, see linear algebra lecture) translated into system:
\end{itemize}
\begin{equation}
\overrightarrow{x} = \overrightarrow{a} + H \overrightarrow{x} \nonumber
\end{equation}
\begin{itemize}
\item For this method we assume that the norm of the matrix is:
\end{itemize}
\begin{equation}
\Vert H \Vert =  \underset{1 \leq i \leq n}{max} \sum_{j=1}^n \vert h_{ij} \vert <1 \nonumber \end{equation}
\begin{itemize}
\item Which we can write in a form:
\end{itemize}
\begin{equation}
(1 -H)\overrightarrow{x}=\overrightarrow{a} \nonumber
\end{equation}
\end{frame}




\begin{frame}\frametitle{Neumann-Ulam method}
\begin{itemize}
\item The solution would be then:
\end{itemize}
\begin{equation}
\overrightarrow{x}_0=(1 -H)^{-1}\overrightarrow{a} \nonumber
\end{equation}
\begin{itemize}
\item We can Taylor expend this:
\end{itemize}
\begin{equation}
\overrightarrow{x}_0=(1 -H)^{-1}\overrightarrow{a} = \overrightarrow{a} + H \overrightarrow{a} + H^2 \overrightarrow{a} + H^3 \overrightarrow{a} +.... \nonumber
\end{equation}
\begin{itemize}
\item For the $i$-th component of the $\overrightarrow{x}$ vector:
\end{itemize}
\begin{equation}
x_0^i= a_i + \sum_{j=1}^n h_{ij} a_{j_1} + \sum_{j_1=1}^n \sum_{j_2=1}^n  h_{ij_1} h_{ij_2}  a_{j_2} +  \sum_{j_1=1}^n \sum_{j_2=1}^n \sum_{j_3=1}^n  h_{ij_1} h_{ij_2} h_{ij_3} a_{j_3} + ...\nonumber
\end{equation}
\begin{itemize}
\item One can construct probabilistic behaviour of a system that follows the path of equation above.
\end{itemize}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam method}
\begin{itemize}
\item To do so we add to our matrix an additional column of the matrix:
\end{itemize}
\begin{equation}
h_{i,0} = 1-\sum_{j=1}^n h_{ij} > 0 \nonumber
\end{equation}
\begin{itemize}
\item The system has states: $\lbrace 0,1,2...,n\rbrace$
\item State at $t$ time is denoted as $i_t$.
\item We make a random walk accordingly to to the following rules:
\begin{itemize}
\item At the begging of the walk ($t=0$) we are at $i_0$. 
\item In the $t$ moment we are in the $i_t$ position then in $t+1$ time stamp we move to state $i_{t+1}$ with the probability $h_{i_t i_{t+1}}$. 
\item We stop walking if we are in state $0$. 
\end{itemize}
\item The path $X(\gamma) = (i_0, i_1, i_2, ..., i_k, 0)$ is called trajectory.
\item It can be proven that $x_i^0 =E \lbrace X (\gamma) \vert i_0=j \rbrace$.
\end{itemize}



\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam method, $\rm \color{RubineRed}{Lecture3/Markov}$}
\begin{itemize}
\item For example lets try to solve this equation system:
\end{itemize}
\begin{equation}
\overrightarrow{x} = 
\left(\begin{array}{c}
 1.5  \\
-1.0\\
0.7  \end{array} \right) 
+
\left(\begin{array}{ccc}
0.2 &  0.3 & 0.1  \\
0.4 &  0.3 & 0.2 \\
0.3 &  0.1 & 0.1  \end{array} \right) \overrightarrow{x}
 \nonumber
\end{equation}
\begin{itemize}
\item The solution is $\overrightarrow{x}_0 = (2.154303, 0.237389, 1.522255)$.
\end{itemize}
\begin{columns}

\column{0.1in}

\column{2.5in}
\begin{itemize}
\item The propability matrix $h_{ij}$ has the shape:
\end{itemize}
\begin{tabular}{|c|cccc|}
\hline
$i/j$ &  1 & 2 & 3 & 4  \\ \hline
1 & 0.2 & 0.3 & 0.1 & 0.4 \\
2 & 0.4 & 0.3 & 0.2 & 0.1 \\
3 & 0.3 & 0.1 & 0.1 & 0.5 \\ \hline
\end{tabular}

\column{2.5in}
\begin{itemize}
\item An example solution:
\end{itemize}
\includegraphics[width=0.95\textwidth]{images/mark.png}

\end{columns}


\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam dual method}
\begin{itemize}
\item The problem with Neumann-Ulam method is that you need to repeat it for each of the coordinates of the $\overrightarrow{x}_0$ vector.
\item The dual method calculates the whole $\overrightarrow{x}_0$ vector.
\item The algorithm:
\begin{itemize}
\item On the indexes: $\lbrace0,1,...,n\rbrace$ we set a probability distribution:\\ $q_1, q_2,..., q_n$, $q_i>0$ and $\sum_i=1^n q_i=1$. 
\item The starting point we select from $q_i$ distribution.
\item If in $t$ time we are in $i_t$ state then with probability $p(i_{t+1} \vert i_t) = h_{i_{t+1},i_{t}}$ in $t+1$ we will be in state $i_1$. For  $i_{t+1}=0$ we define the probability: $h_{0,i_{t}}=1-\sum_{j=1}^n h_{j,i_{t}}$. Here we also assume that $h_{j,i_{t}} > 0$.
\item NOTE: there the matrix is transposed compared to previous method: $H^{T}$.
\item Again we end our walk when we are at state $0$. 
\item For the trajectory: $\gamma = (i_0, i_1,...,i_k, 0)$, we assign the vector:
\end{itemize}
\begin{equation}
\overrightarrow{Y}(\gamma) = \dfrac{a_{i_0}}{ q_{i_{0}} p(0 \vert i_k)   }  \widehat{e}_{i_{k}} \in \mathcal{R}^n \nonumber
\end{equation}
\item The solution will be : $\overrightarrow{x}^0 = \dfrac{1}{N} \sum \overrightarrow{Y}(\gamma)$
\end{itemize}


\end{frame}



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}\frametitle{Neumann-Ulam dual method, $\rm \color{RubineRed}{Lecture3/Markov2}$}
\begin{itemize}
\item Let's try to solve the equation system:
\end{itemize}
\begin{equation}
\overrightarrow{x} = 
\left(\begin{array}{c}
 1.5  \\
-1.0\\
0.7  \end{array} \right) 
+
\left(\begin{array}{ccc}
0.2 &  0.3 & 0.1  \\
0.4 &  0.3 & 0.2 \\
0.1 &  0.1 & 0.1  \end{array} \right) \overrightarrow{x}
 \nonumber
\end{equation}
\begin{itemize}
\item The solution is: $\overrightarrow{x}_0 = (2.0, 0.0, 1.0)$.
\item Let's put the initial probability as constant:
\end{itemize}
\begin{equation}
q_1=q_2=q_3=\dfrac{1}{3}  \nonumber
\end{equation}
\begin{columns}

\column{0.1in}

\column{2.5in}
\begin{itemize}
\item The propability matrix $h_{ij}$ has the shape:
\end{itemize}
\begin{tabular}{|c|cccc|}
\hline
$i/j$ &  1 & 2 & 3 & 4  \\ \hline
1 & 0.2 & 0.4 & 0.1 & 0.3 \\
2 & 0.3 & 0.3 & 0.1 & 0.3 \\
3 & 0.1 & 0.2 & 0.1 & 0.6 \\ \hline
\end{tabular}

\column{2.5in}
\begin{itemize}
\item An example solution:
\end{itemize}
\includegraphics[width=0.95\textwidth]{images/mark2.png}

\end{columns}
\end{frame}




\begin{frame}\frametitle{Look elsewhere effect, $\rm \color{RubineRed}{Lecture3/LEE}$}
\begin{itemize}
\item Look elsewhere effect addresses the following problem:
\begin{itemize}
\item Imagine you observed a $3\sigma$ deviation in one of the observable that you measured. 
\item Before you get excited one needs to understand if given the fact that you had so many measurements this might happen!
\end{itemize}
\item Example: Let's say we have measured 50 observables. What is the probability to observed 1 that is $3\sigma$ away from theory prediction?
\item Let's simulate 50 Gaussian distribution centred at 0 and width of 1. We count how simulations where at least one of the 50 numbers have the absolute value $>3$.
\item More complicated example: what if you observed 3 in a row $2\sigma$ fluctuations among 50 measurements?
\item This kind of studies are the best solvable by MC simulations.
\end{itemize}


\end{frame}






\begin{frame}\frametitle{Travelling Salesman Problem}
\begin{itemize}
\item Salesman starting from his base has to visit $n-1$ other locations and return to base headquarters. The problem is to find the shortest way.
\item For large $n$ the problem can't be solver by brutal force as the complexity of the problem is $(n-1)!$
\item There exist simplified numerical solutions assuming factorizations. Unfortunately even those require anonymous computing power.
\item Can MC help? YES :)
\item The minimum distance $l$ has to depend on 2 factors: $P$ the area of the city the Salesman is travelling and the density of places he wants to visit: $\dfrac{n}{P}$
\item Form this we can assume:
\end{itemize}
\begin{equation}
l \sim P^a (\dfrac{n}{P})^b=P^{a-b}n^b. \nonumber
\end{equation}

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%







\begin{frame}\frametitle{Traveling Salesman Problem}
\begin{itemize}
\item From dimension analysis:
\end{itemize}
\begin{equation}
a-b=\dfrac{1}{2}. \nonumber
\end{equation}
\begin{itemize}
\item To get $l$ we need square root of area.
\item From this it's obvious:
\end{itemize}
\begin{equation}
l \sim P^a (\dfrac{n}{P})^b=P^{0.5}n^{a-0.5}. \nonumber
\end{equation}
\begin{itemize}
\item Now we can multiply the area by alpha factor that keeps the density constant then:
\end{itemize}
\begin{equation}
l \sim \alpha^0.5 \alpha6{a-0.5} = \alpha^a \nonumber
\end{equation}
\begin{itemize}
\item In this case the distance between the clients will not change, but the number of clients will increase by $\alpha$ so:
\end{itemize}
\begin{equation}
l \sim \alpha \nonumber
\end{equation}
\begin{itemize}
\item In the end we get: $a=1$
\end{itemize}
\end{frame}



\begin{frame}\frametitle{Traveling Salesman Problem}
\begin{itemize}
\item In total:
\end{itemize}
\begin{equation}
l \sim k (nP)^{0.5}\nonumber
\end{equation}
\begin{itemize}
\item Of course the k depends on the shape of the area and locations of client. However for large $n$ the k starts loosing the dependency. It's an asymptotically free estimator.
\item To use the above formula we need to somehow calculate k.
\item How to estimate this? Well make a TOY MC: take a square put uniformly $n$ points. Then we can calculate $l$. Then it's trivial:
\end{itemize}
\begin{equation}
k= l(nP)^{-0.5} \nonumber
\end{equation}
\end{frame}


\begin{frame}\frametitle{Traveling Salesman Problem}
\begin{itemize}
\item This kind of MC experiment might require large CPU power and time. The adventage is that once we solve the problem we can use the obtained k for other cases (it's universal constant!).
\item It turns out that:
\end{itemize}
\begin{equation}
k \sim \dfrac{3}{4} \nonumber
\end{equation}
\begin{itemize}
\item Ok, but in this case we can calculate $l$ but not the actual shortest way! Why the hell we did this exercise?!
\item Turns out that for most of the problems we are looking for the solution that is close to smallest $l$ not the exact minimum.
\end{itemize}
\end{frame}

\begin{frame}\frametitle{War Games}

\begin{itemize}
\item S. Andersoon 1966 simulated for Swedish government how would a tank battle look like.
\item Each of the sides has  15 tanks. that they allocate on the battle field. 
\item The battle is done in time steps.
\item Each tank has 5 states:
\begin{itemize}
\item OK
\item Tank can only shoot
\item Tank can only move
\item Tank is destroyed
\item Temporary states
\end{itemize}
\item This models made possible to check different fighting strategies.

\end{itemize}
\end{frame}



\begin{frame}

\begin{center}
\begin{Huge}
 Q \& A
\end{Huge}
\end{center}


\end{frame}








\backupbegin   

\begin{frame}\frametitle{Backup}


\end{frame}

\backupend			

\end{document}