\BOOKMARK [1][-]{section.1}{Standard Model}{}% 1 \BOOKMARK [2][-]{subsection.1.1}{Elementary particles and forces}{section.1}% 2 \BOOKMARK [2][-]{subsection.1.2}{Interaction rules}{section.1}% 3 \BOOKMARK [1][-]{section.2}{Physics beyond the SM}{}% 4 \BOOKMARK [2][-]{subsection.2.1}{Neutrino Oscillation}{section.2}% 5 \BOOKMARK [2][-]{subsection.2.2}{New physics}{section.2}% 6 \BOOKMARK [1][-]{section.3}{eee decay}{}% 7 \BOOKMARK [2][-]{subsection.3.1}{Kinematics}{section.3}% 8 \BOOKMARK [2][-]{subsection.3.2}{Background events}{section.3}% 9 \BOOKMARK [3][-]{subsubsection.3.2.1}{Internal conversions}{subsection.3.2}% 10 \BOOKMARK [3][-]{subsubsection.3.2.2}{Michel decay}{subsection.3.2}% 11 \BOOKMARK [3][-]{subsubsection.3.2.3}{Radiative muon decay}{subsection.3.2}% 12 \BOOKMARK [3][-]{subsubsection.3.2.4}{BhaBha scattering}{subsection.3.2}% 13 \BOOKMARK [3][-]{subsubsection.3.2.5}{Pion decays}{subsection.3.2}% 14 \BOOKMARK [3][-]{subsubsection.3.2.6}{Analysis of the background}{subsection.3.2}% 15 \BOOKMARK [1][-]{section.4}{Mu3e experiment}{}% 16 \BOOKMARK [2][-]{subsection.4.1}{Requirements}{section.4}% 17 \BOOKMARK [2][-]{subsection.4.2}{Phase I}{section.4}% 18 \BOOKMARK [2][-]{subsection.4.3}{Phase II}{section.4}% 19 \BOOKMARK [2][-]{subsection.4.4}{Experimental setup}{section.4}% 20 \BOOKMARK [2][-]{subsection.4.5}{The problem of low longitudinal momentum recurlers}{section.4}% 21 \BOOKMARK [1][-]{section.5}{Machine learning}{}% 22 \BOOKMARK [2][-]{subsection.5.1}{Introduction}{section.5}% 23 \BOOKMARK [2][-]{subsection.5.2}{Artificial neural networks}{section.5}% 24 \BOOKMARK [3][-]{subsubsection.5.2.1}{General concepts}{subsection.5.2}% 25 \BOOKMARK [3][-]{subsubsection.5.2.2}{Activation functions}{subsection.5.2}% 26 \BOOKMARK [3][-]{subsubsection.5.2.3}{Concepts of training}{subsection.5.2}% 27 \BOOKMARK [3][-]{subsubsection.5.2.4}{Loss functions}{subsection.5.2}% 28 \BOOKMARK [3][-]{subsubsection.5.2.5}{Stochastic gradient descent}{subsection.5.2}% 29 \BOOKMARK [3][-]{subsubsection.5.2.6}{Stochastic gradient descent with Momentum}{subsection.5.2}% 30 \BOOKMARK [3][-]{subsubsection.5.2.7}{RMSProp}{subsection.5.2}% 31 \BOOKMARK [3][-]{subsubsection.5.2.8}{Adam}{subsection.5.2}% 32 \BOOKMARK [3][-]{subsubsection.5.2.9}{Decaying learning rate}{subsection.5.2}% 33 \BOOKMARK [3][-]{subsubsection.5.2.10}{Batch normalisation}{subsection.5.2}% 34 \BOOKMARK [2][-]{subsection.5.3}{Recurrent Neural Networks}{section.5}% 35 \BOOKMARK [3][-]{subsubsection.5.3.1}{General concepts}{subsection.5.3}% 36 \BOOKMARK [3][-]{subsubsection.5.3.2}{Most common architectures}{subsection.5.3}% 37 \BOOKMARK [3][-]{subsubsection.5.3.3}{Cell types}{subsection.5.3}% 38 \BOOKMARK [2][-]{subsection.5.4}{XGBoost}{section.5}% 39 \BOOKMARK [1][-]{section.6}{Data}{}% 40 \BOOKMARK [2][-]{subsection.6.1}{General information}{section.6}% 41 \BOOKMARK [2][-]{subsection.6.2}{Preprocessing}{section.6}% 42 \BOOKMARK [3][-]{subsubsection.6.2.1}{Dataset 1}{subsection.6.2}% 43 \BOOKMARK [3][-]{subsubsection.6.2.2}{Dataset 2}{subsection.6.2}% 44 \BOOKMARK [1][-]{section.7}{RNN's used}{}% 45 \BOOKMARK [2][-]{subsection.7.1}{RNN for track prediction}{section.7}% 46 \BOOKMARK [2][-]{subsection.7.2}{RNN for classification of tracks}{section.7}% 47 \BOOKMARK [1][-]{section.8}{Analysis}{}% 48 \BOOKMARK [2][-]{subsection.8.1}{Best 2}{section.8}% 49 \BOOKMARK [2][-]{subsection.8.2}{RNN classifier with RNN track prediction input}{section.8}% 50 \BOOKMARK [2][-]{subsection.8.3}{XGBoost}{section.8}% 51 \BOOKMARK [2][-]{subsection.8.4}{Comparison in performance of the RNN and XGBoost}{section.8}% 52 \BOOKMARK [1][-]{section.9}{Results}{}% 53 \BOOKMARK [2][-]{subsection.9.1}{Results}{section.9}% 54 \BOOKMARK [2][-]{subsection.9.2}{Outlook and potential}{section.9}% 55 \BOOKMARK [1][-]{section.10}{Acknowledgements}{}% 56