diff --git a/Lectures_my/MC_2016/Lecture8/mchrzasz.log b/Lectures_my/MC_2016/Lecture8/mchrzasz.log index cd59fa2..46eb0b2 100644 --- a/Lectures_my/MC_2016/Lecture8/mchrzasz.log +++ b/Lectures_my/MC_2016/Lecture8/mchrzasz.log @@ -1,4 +1,4 @@ -This is XeTeX, Version 3.1415926-2.5-0.9999.3 (TeX Live 2013/Debian) (format=xelatex 2015.4.1) 21 APR 2016 18:03 +This is XeTeX, Version 3.1415926-2.5-0.9999.3 (TeX Live 2013/Debian) (format=xelatex 2015.4.1) 21 APR 2016 18:15 entering extended mode restricted \write18 enabled. %&-line parsing enabled. diff --git a/Lectures_my/MC_2016/Lecture8/mchrzasz.tex b/Lectures_my/MC_2016/Lecture8/mchrzasz.tex index b521618..73ce3ec 100644 --- a/Lectures_my/MC_2016/Lecture8/mchrzasz.tex +++ b/Lectures_my/MC_2016/Lecture8/mchrzasz.tex @@ -639,7 +639,7 @@ \end{frame} -\begin{frame}\frametitle{Neumann-Ulam dual method, prove} +\begin{frame}\frametitle{Neumann-Ulam dual method, proof} \begin{footnotesize} \ARROW If $Y_i(\gamma)$ is the i-th component of the $\overrightarrow{Y}(\gamma)$ vector. One needs to show: \begin{align*} @@ -654,12 +654,12 @@ \end{align*} \ARROW The expected value: \begin{align*} -E \lbrace Y_j (\gamma)=\sum_{ {\rm trajectories}} \frac{a_j}{q_{i_0}p(0 \vert i_k) } P(i_1,i_2,...,i_k,0), +E \lbrace Y_j (\gamma) \rbrace=\sum_{ {\rm trajectories}} \frac{a_j}{q_{i_0}p(0 \vert i_k) } P(i_1,i_2,...,i_k,0), \end{align*} where $P(i_1,i_2,...,i_k,0)$ is the probability of this trajectory occurring.\\ \ARROW But by our definition the probability: \begin{align*} -P(i_0,i_1,...,i_{k-1},j,0)=q_{i_k}h_{i_1,i_0}...h_{k,i_{k-1}}p(0 \vert j) +P(i_0,i_1,...,i_{k-1},j,0)=q_{0}h_{i_1,i_0}...h_{k,i_{k-1}}p(0 \vert j) \end{align*} \ARROW In the end we get: \begin{align*} @@ -748,7 +748,7 @@ -\begin{frame}\frametitle{Generalization, prove} +\begin{frame}\frametitle{Generalization, proof} \ARROW For a $X(\gamma)$ trajectory the expected value is: \begin{align*} E \lbrace X(\gamma_k) = \sum_{k=0}^{\infty}\sum_{\gamma_k} X(\gamma_k)P \lbrace X(\gamma_k) \rbrace @@ -772,7 +772,7 @@ \begin{frame}\frametitle{Generalization, the algorithm} \ARROW We set the $P$ matrix in a arbitrary way.\\ -\ARROW If in the $t$ moment the point is in the $i_t$ state the with the probability $p_{i_t, i_{t+1}}$ he can go to $i_{t+1}$ state. \\ +\ARROW If in the $t$ moment the point is in the $i_t$ state, then with the probability $p_{i_t, i_{t+1}}$ he can go to $i_{t+1}$ state. \\ \ARROW We stop the walk once we reach $0$.\\ \ARROW For the given trajectory we assign the value: $X(\gamma_k)$\\ \ARROW We repeat the procedure $N$ times and take the mean and RMS.\\ @@ -792,7 +792,7 @@ \ARROW W.Wasow (1956) was smarter: \begin{itemize} -\item For the trajectory: $\gamma(i_0,i_1,...,i_k,0)$ we look at begging trajectories: +\item For the trajectory: $\gamma(i_0,i_1,...,i_k,0)$ we look trajectories begging with: \begin{align*} (i_0),~(i_0,i_1),~(i_0,i_1,...,i_k) \end{align*} @@ -808,9 +808,9 @@ \end{itemize} \ARROW For the trajectory we define: \begin{align*} -X^{\ast}(\gamma)=\sum_{m=0}^k nu_{i_1,i_2}...\nu_{i_{m-1},i_m}a_{i_m} +X^{\ast}(\gamma)=\sum_{m=0}^k \nu_{i_1,i_2}...\nu_{i_{m-1},i_m}a_{i_m} \end{align*} -\ARROW One can prove that: +\ARROW One can proof that: \begin{align*} E \lbrace X^{\ast}(\gamma) \vert i_0=i \rbrace =x_i^0 \end{align*}