{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Import" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n", "import random\n", "import numpy as np\n", "from pdg_const1 import pdg\n", "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import pickle as pkl\n", "import sys\n", "import time\n", "from helperfunctions import display_time, prepare_plot\n", "import cmath as c\n", "import scipy.integrate as integrate\n", "from scipy.optimize import fminbound\n", "from array import array as arr\n", "import collections\n", "from itertools import compress\n", "import tensorflow as tf\n", "import zfit\n", "from zfit import ztf\n", "# from IPython.display import clear_output\n", "import os\n", "import tensorflow_probability as tfp\n", "tfd = tfp.distributions" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# chunksize = 10000\n", "# zfit.run.chunking.active = True\n", "# zfit.run.chunking.max_n_points = chunksize" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Build model and graphs\n", "## Create graphs" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def formfactor(q2, subscript, b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2): #returns real value\n", " #check if subscript is viable\n", "\n", " if subscript != \"0\" and subscript != \"+\" and subscript != \"T\":\n", " raise ValueError('Wrong subscript entered, choose either 0, + or T')\n", "\n", " #get constants\n", "\n", " mK = ztf.constant(pdg['Ks_M'])\n", " mbstar0 = ztf.constant(pdg[\"mbstar0\"])\n", " mbstar = ztf.constant(pdg[\"mbstar\"])\n", "\n", "\n", " mmu = ztf.constant(pdg['muon_M'])\n", " mb = ztf.constant(pdg['bquark_M'])\n", " ms = ztf.constant(pdg['squark_M'])\n", " mB = ztf.constant(pdg['Bplus_M'])\n", "\n", " #N comes from derivation in paper\n", "\n", " N = 3\n", "\n", " #some helperfunctions\n", "\n", " tpos = (mB - mK)**2\n", " tzero = (mB + mK)*(ztf.sqrt(mB)-ztf.sqrt(mK))**2\n", "\n", " z_oben = ztf.sqrt(tpos - q2) - ztf.sqrt(tpos - tzero)\n", " z_unten = ztf.sqrt(tpos - q2) + ztf.sqrt(tpos - tzero)\n", " z = tf.divide(z_oben, z_unten)\n", "\n", " #calculate f0\n", "\n", " if subscript == \"0\":\n", " prefactor = 1/(1 - q2/(mbstar0**2))\n", " _sum = 0\n", " b0 = [b0_0, b0_1, b0_2]\n", "\n", " for i in range(N):\n", " _sum += b0[i]*(tf.pow(z,i))\n", "\n", " return ztf.to_complex(prefactor * _sum)\n", "\n", " #calculate f+ or fT\n", "\n", " else:\n", " prefactor = 1/(1 - q2/(mbstar**2))\n", " _sum = 0\n", "\n", " if subscript == \"T\":\n", " bT = [bT_0, bT_1, bT_2]\n", " for i in range(N):\n", " _sum += bT[i] * (tf.pow(z, i) - ((-1)**(i-N)) * (i/N) * tf.pow(z, N))\n", " else:\n", " bplus = [bplus_0, bplus_1, bplus_2]\n", " for i in range(N):\n", " _sum += bplus[i] * (tf.pow(z, i) - ((-1)**(i-N)) * (i/N) * tf.pow(z, N))\n", "\n", " return ztf.to_complex(prefactor * _sum)\n", "\n", "def resonance(q, _mass, width, phase, scale):\n", "\n", " q2 = tf.pow(q, 2)\n", "\n", " mmu = ztf.constant(pdg['muon_M'])\n", "\n", " p = 0.5 * ztf.sqrt(q2 - 4*(mmu**2))\n", "\n", " p0 = 0.5 * ztf.sqrt(_mass**2 - 4*mmu**2)\n", "\n", " gamma_j = tf.divide(p, q) * _mass * width / p0\n", "\n", " #Calculate the resonance\n", "\n", " _top = tf.complex(_mass * width, ztf.constant(0.0))\n", "\n", " _bottom = tf.complex(_mass**2 - q2, -_mass*gamma_j)\n", "\n", " com = _top/_bottom\n", "\n", " #Rotate by the phase\n", "\n", " r = ztf.to_complex(scale*tf.abs(com))\n", "\n", " _phase = tf.angle(com)\n", "\n", " _phase += phase\n", "\n", " com = r * tf.exp(tf.complex(ztf.constant(0.0), _phase))\n", "\n", " return com\n", "\n", "\n", "def axiv_nonres(q, b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2):\n", "\n", " GF = ztf.constant(pdg['GF'])\n", " alpha_ew = ztf.constant(pdg['alpha_ew'])\n", " Vtb = ztf.constant(pdg['Vtb'])\n", " Vts = ztf.constant(pdg['Vts'])\n", " C10eff = ztf.constant(pdg['C10eff'])\n", "\n", " mmu = ztf.constant(pdg['muon_M'])\n", " mb = ztf.constant(pdg['bquark_M'])\n", " ms = ztf.constant(pdg['squark_M'])\n", " mK = ztf.constant(pdg['Ks_M'])\n", " mB = ztf.constant(pdg['Bplus_M'])\n", "\n", " q2 = tf.pow(q, 2)\n", "\n", " #Some helperfunctions\n", "\n", " beta = 1. - 4. * mmu**2. / q2\n", "\n", " kabs = ztf.sqrt(mB**2. + tf.pow(q2, 2)/mB**2. + mK**4./mB**2. - 2. * (mB**2. * mK**2. + mK**2. * q2 + mB**2. * q2) / mB**2.)\n", "\n", " #prefactor in front of whole bracket\n", "\n", " prefactor1 = GF**2. *alpha_ew**2. * (tf.abs(Vtb*Vts))**2. * kabs * beta / (128. * np.pi**5.)\n", "\n", " #left term in bracket\n", "\n", " bracket_left = 2./3. * tf.pow(kabs,2) * tf.pow(beta,2) * tf.pow(tf.abs(ztf.to_complex(C10eff)*formfactor(q2, \"+\", b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2)),2)\n", "\n", " #middle term in bracket\n", "\n", " _top = 4. * mmu**2. * (mB**2. - mK**2.) * (mB**2. - mK**2.)\n", "\n", " _under = q2 * mB**2.\n", "\n", " bracket_middle = _top/_under *tf.pow(tf.abs(ztf.to_complex(C10eff) * formfactor(q2, \"0\", b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2)), 2)\n", " \n", " #Note sqrt(q2) comes from derivation as we use q2 and plot q\n", "\n", " return prefactor1 * (bracket_left + bracket_middle) * 2 * q\n", "\n", "def vec(q, funcs, b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2):\n", " \n", " q2 = tf.pow(q, 2)\n", "\n", " GF = ztf.constant(pdg['GF'])\n", " alpha_ew = ztf.constant(pdg['alpha_ew'])\n", " Vtb = ztf.constant(pdg['Vtb'])\n", " Vts = ztf.constant(pdg['Vts'])\n", " C7eff = ztf.constant(pdg['C7eff'])\n", "\n", " mmu = ztf.constant(pdg['muon_M'])\n", " mb = ztf.constant(pdg['bquark_M'])\n", " ms = ztf.constant(pdg['squark_M'])\n", " mK = ztf.constant(pdg['Ks_M'])\n", " mB = ztf.constant(pdg['Bplus_M'])\n", "\n", " #Some helperfunctions\n", "\n", " beta = 1. - 4. * mmu**2. / q2\n", "\n", " kabs = ztf.sqrt(mB**2. + tf.pow(q2, 2)/mB**2. + mK**4./mB**2. - 2 * (mB**2 * mK**2 + mK**2 * q2 + mB**2 * q2) / mB**2)\n", " \n", " #prefactor in front of whole bracket\n", "\n", " prefactor1 = GF**2. *alpha_ew**2. * (tf.abs(Vtb*Vts))**2 * kabs * beta / (128. * np.pi**5.)\n", "\n", " #right term in bracket\n", "\n", " prefactor2 = tf.pow(kabs,2) * (1. - 1./3. * beta)\n", "\n", " abs_bracket = tf.pow(tf.abs(c9eff(q, funcs) * formfactor(q2, \"+\", b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2) + ztf.to_complex(2.0 * C7eff * (mb + ms)/(mB + mK)) * formfactor(q2, \"T\", b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2)),2)\n", "\n", " bracket_right = prefactor2 * abs_bracket\n", "\n", " #Note sqrt(q2) comes from derivation as we use q2 and plot q\n", "\n", " return prefactor1 * bracket_right * 2 * q\n", "\n", "def c9eff(q, funcs):\n", "\n", " C9eff_nr = ztf.to_complex(ztf.constant(pdg['C9eff']))\n", "\n", " c9 = C9eff_nr + funcs\n", "\n", " return c9" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def G(y):\n", " \n", " def inner_rect_bracket(q):\n", " return tf.log(ztf.to_complex((1+tf.sqrt(q))/(1-tf.sqrt(q)))-tf.complex(ztf.constant(0), -1*ztf.constant(np.pi))) \n", " \n", " def inner_right(q):\n", " return ztf.to_complex(2 * tf.atan(1/tf.sqrt(tf.math.real(-q))))\n", " \n", " big_bracket = tf.where(tf.math.real(y) > ztf.constant(0.0), inner_rect_bracket(y), inner_right(y))\n", " \n", " return ztf.to_complex(tf.sqrt(tf.abs(y))) * big_bracket\n", "\n", "def h_S(m, q):\n", " \n", " return ztf.to_complex(2) - G(ztf.to_complex(1) - ztf.to_complex(4*tf.pow(m, 2)) / ztf.to_complex(tf.pow(q, 2)))\n", "\n", "def h_P(m, q):\n", " \n", " return ztf.to_complex(2/3) + (ztf.to_complex(1) - ztf.to_complex(4*tf.pow(m, 2)) / ztf.to_complex(tf.pow(q, 2))) * h_S(m,q)\n", "\n", "def two_p_ccbar(mD, m_D_bar, m_D_star, q):\n", " \n", " \n", " #Load constants\n", " nu_D_bar = ztf.to_complex(pdg[\"nu_D_bar\"])\n", " nu_D = ztf.to_complex(pdg[\"nu_D\"])\n", " nu_D_star = ztf.to_complex(pdg[\"nu_D_star\"])\n", " \n", " phase_D_bar = ztf.to_complex(pdg[\"phase_D_bar\"])\n", " phase_D = ztf.to_complex(pdg[\"phase_D\"])\n", " phase_D_star = ztf.to_complex(pdg[\"phase_D_star\"])\n", " \n", " #Calculation\n", " left_part = nu_D_bar * tf.exp(tf.complex(ztf.constant(0.0), phase_D_bar)) * h_S(m_D_bar, q) \n", " \n", " right_part_D = nu_D * tf.exp(tf.complex(ztf.constant(0.0), phase_D)) * h_P(m_D, q) \n", " \n", " right_part_D_star = nu_D_star * tf.exp(tf.complex(ztf.constant(0.0), phase_D_star)) * h_P(m_D_star, q) \n", "\n", " return left_part + right_part_D + right_part_D_star" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Build pdf" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class total_pdf_cut(zfit.pdf.ZPDF):\n", " _N_OBS = 1 # dimension, can be omitted\n", " _PARAMS = ['b0_0', 'b0_1', 'b0_2', \n", " 'bplus_0', 'bplus_1', 'bplus_2', \n", " 'bT_0', 'bT_1', 'bT_2', \n", " 'rho_mass', 'rho_scale', 'rho_phase', 'rho_width',\n", " 'jpsi_mass', 'jpsi_scale', 'jpsi_phase', 'jpsi_width',\n", " 'psi2s_mass', 'psi2s_scale', 'psi2s_phase', 'psi2s_width',\n", " 'p3770_mass', 'p3770_scale', 'p3770_phase', 'p3770_width',\n", " 'p4040_mass', 'p4040_scale', 'p4040_phase', 'p4040_width',\n", " 'p4160_mass', 'p4160_scale', 'p4160_phase', 'p4160_width',\n", " 'p4415_mass', 'p4415_scale', 'p4415_phase', 'p4415_width',\n", " 'omega_mass', 'omega_scale', 'omega_phase', 'omega_width',\n", " 'phi_mass', 'phi_scale', 'phi_phase', 'phi_width',\n", " 'Dbar_mass', 'Dbar_scale', 'Dbar_phase',\n", " 'Dstar_mass', 'DDstar_scale', 'DDstar_phase', 'D_mass',\n", " 'tau_mass', 'C_tt']\n", "# the name of the parameters\n", "\n", " def _unnormalized_pdf(self, x):\n", " \n", " x = x.unstack_x()\n", " \n", " b0 = [self.params['b0_0'], self.params['b0_1'], self.params['b0_2']]\n", " bplus = [self.params['bplus_0'], self.params['bplus_1'], self.params['bplus_2']]\n", " bT = [self.params['bT_0'], self.params['bT_1'], self.params['bT_2']]\n", " \n", " def rho_res(q):\n", " return resonance(q, _mass = self.params['rho_mass'], scale = self.params['rho_scale'],\n", " phase = self.params['rho_phase'], width = self.params['rho_width'])\n", " \n", " def omega_res(q):\n", " return resonance(q, _mass = self.params['omega_mass'], scale = self.params['omega_scale'],\n", " phase = self.params['omega_phase'], width = self.params['omega_width'])\n", " \n", " def phi_res(q):\n", " return resonance(q, _mass = self.params['phi_mass'], scale = self.params['phi_scale'],\n", " phase = self.params['phi_phase'], width = self.params['phi_width'])\n", "\n", " def jpsi_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['jpsi_mass'], 2)) * resonance(q, _mass = self.params['jpsi_mass'], \n", " scale = self.params['jpsi_scale'],\n", " phase = self.params['jpsi_phase'], \n", " width = self.params['jpsi_width'])\n", " def psi2s_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['psi2s_mass'], 2)) * resonance(q, _mass = self.params['psi2s_mass'], \n", " scale = self.params['psi2s_scale'],\n", " phase = self.params['psi2s_phase'], \n", " width = self.params['psi2s_width'])\n", " def p3770_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['p3770_mass'], 2)) * resonance(q, _mass = self.params['p3770_mass'], \n", " scale = self.params['p3770_scale'],\n", " phase = self.params['p3770_phase'], \n", " width = self.params['p3770_width'])\n", " \n", " def p4040_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['p4040_mass'], 2)) * resonance(q, _mass = self.params['p4040_mass'], \n", " scale = self.params['p4040_scale'],\n", " phase = self.params['p4040_phase'], \n", " width = self.params['p4040_width'])\n", " \n", " def p4160_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['p4160_mass'], 2)) * resonance(q, _mass = self.params['p4160_mass'], \n", " scale = self.params['p4160_scale'],\n", " phase = self.params['p4160_phase'], \n", " width = self.params['p4160_width'])\n", " \n", " def p4415_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['p4415_mass'], 2)) * resonance(q, _mass = self.params['p4415_mass'], \n", " scale = self.params['p4415_scale'],\n", " phase = self.params['p4415_phase'], \n", " width = self.params['p4415_width'])\n", " \n", " def P2_D(q):\n", " Dbar_contrib = ztf.to_complex(self.params['Dbar_scale'])*tf.exp(tf.complex(ztf.constant(0.0), self.params['Dbar_phase']))*ztf.to_complex(h_S(self.params['Dbar_mass'], q))\n", " DDstar_contrib = ztf.to_complex(self.params['DDstar_scale'])*tf.exp(tf.complex(ztf.constant(0.0), self.params['DDstar_phase']))*(ztf.to_complex(h_P(self.params['Dstar_mass'], q)) + ztf.to_complex(h_P(self.params['D_mass'], q)))\n", " return Dbar_contrib + DDstar_contrib\n", " \n", " def ttau_cusp(q):\n", " return ztf.to_complex(self.params['C_tt'])*(ztf.to_complex((h_S(self.params['tau_mass'], q))) - ztf.to_complex(h_P(self.params['tau_mass'], q)))\n", " \n", "\n", " funcs = rho_res(x) + omega_res(x) + phi_res(x) + jpsi_res(x) + psi2s_res(x) + p3770_res(x) + p4040_res(x)+ p4160_res(x) + p4415_res(x) + P2_D(x) + ttau_cusp(x)\n", "\n", " vec_f = vec(x, funcs, b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2)\n", "\n", " axiv_nr = axiv_nonres(x, b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2)\n", "\n", " tot = vec_f + axiv_nr\n", " \n", " #Cut out jpsi and psi2s\n", " \n", " tot = tf.where(tf.math.logical_or(x < ztf.constant(jpsi_mass-60.), x > ztf.constant(jpsi_mass+70.)), tot, 0.0*tot)\n", " \n", " tot = tf.where(tf.math.logical_or(x < ztf.constant(psi2s_mass-50.), x > ztf.constant(psi2s_mass+50.)), tot, 0.0*tot)\n", " \n", " return tot\n", " \n", "class total_pdf_full(zfit.pdf.ZPDF):\n", " _N_OBS = 1 # dimension, can be omitted\n", " _PARAMS = ['b0_0', 'b0_1', 'b0_2', \n", " 'bplus_0', 'bplus_1', 'bplus_2', \n", " 'bT_0', 'bT_1', 'bT_2', \n", " 'rho_mass', 'rho_scale', 'rho_phase', 'rho_width',\n", " 'jpsi_mass', 'jpsi_scale', 'jpsi_phase', 'jpsi_width',\n", " 'psi2s_mass', 'psi2s_scale', 'psi2s_phase', 'psi2s_width',\n", " 'p3770_mass', 'p3770_scale', 'p3770_phase', 'p3770_width',\n", " 'p4040_mass', 'p4040_scale', 'p4040_phase', 'p4040_width',\n", " 'p4160_mass', 'p4160_scale', 'p4160_phase', 'p4160_width',\n", " 'p4415_mass', 'p4415_scale', 'p4415_phase', 'p4415_width',\n", " 'omega_mass', 'omega_scale', 'omega_phase', 'omega_width',\n", " 'phi_mass', 'phi_scale', 'phi_phase', 'phi_width',\n", " 'Dbar_mass', 'Dbar_scale', 'Dbar_phase',\n", " 'Dstar_mass', 'DDstar_scale', 'DDstar_phase', 'D_mass',\n", " 'tau_mass', 'C_tt']\n", "# the name of the parameters\n", "\n", " def _unnormalized_pdf(self, x):\n", " \n", " x = x.unstack_x()\n", " \n", " b0 = [self.params['b0_0'], self.params['b0_1'], self.params['b0_2']]\n", " bplus = [self.params['bplus_0'], self.params['bplus_1'], self.params['bplus_2']]\n", " bT = [self.params['bT_0'], self.params['bT_1'], self.params['bT_2']]\n", " \n", " def rho_res(q):\n", " return resonance(q, _mass = self.params['rho_mass'], scale = self.params['rho_scale'],\n", " phase = self.params['rho_phase'], width = self.params['rho_width'])\n", " \n", " def omega_res(q):\n", " return resonance(q, _mass = self.params['omega_mass'], scale = self.params['omega_scale'],\n", " phase = self.params['omega_phase'], width = self.params['omega_width'])\n", " \n", " def phi_res(q):\n", " return resonance(q, _mass = self.params['phi_mass'], scale = self.params['phi_scale'],\n", " phase = self.params['phi_phase'], width = self.params['phi_width'])\n", "\n", " def jpsi_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['jpsi_mass'], 2)) * resonance(q, _mass = self.params['jpsi_mass'], \n", " scale = self.params['jpsi_scale'],\n", " phase = self.params['jpsi_phase'], \n", " width = self.params['jpsi_width'])\n", " def psi2s_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['psi2s_mass'], 2)) * resonance(q, _mass = self.params['psi2s_mass'], \n", " scale = self.params['psi2s_scale'],\n", " phase = self.params['psi2s_phase'], \n", " width = self.params['psi2s_width'])\n", " def p3770_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['p3770_mass'], 2)) * resonance(q, _mass = self.params['p3770_mass'], \n", " scale = self.params['p3770_scale'],\n", " phase = self.params['p3770_phase'], \n", " width = self.params['p3770_width'])\n", " \n", " def p4040_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['p4040_mass'], 2)) * resonance(q, _mass = self.params['p4040_mass'], \n", " scale = self.params['p4040_scale'],\n", " phase = self.params['p4040_phase'], \n", " width = self.params['p4040_width'])\n", " \n", " def p4160_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['p4160_mass'], 2)) * resonance(q, _mass = self.params['p4160_mass'], \n", " scale = self.params['p4160_scale'],\n", " phase = self.params['p4160_phase'], \n", " width = self.params['p4160_width'])\n", " \n", " def p4415_res(q):\n", " return ztf.to_complex(tf.pow(q, 2) / tf.pow(self.params['p4415_mass'], 2)) * resonance(q, _mass = self.params['p4415_mass'], \n", " scale = self.params['p4415_scale'],\n", " phase = self.params['p4415_phase'], \n", " width = self.params['p4415_width'])\n", " \n", " def P2_D(q):\n", " Dbar_contrib = ztf.to_complex(self.params['Dbar_scale'])*tf.exp(tf.complex(ztf.constant(0.0), self.params['Dbar_phase']))*ztf.to_complex(h_S(self.params['Dbar_mass'], q))\n", " DDstar_contrib = ztf.to_complex(self.params['DDstar_scale'])*tf.exp(tf.complex(ztf.constant(0.0), self.params['DDstar_phase']))*(ztf.to_complex(h_P(self.params['Dstar_mass'], q)) + ztf.to_complex(h_P(self.params['D_mass'], q)))\n", " return Dbar_contrib + DDstar_contrib\n", " \n", " def ttau_cusp(q):\n", " return ztf.to_complex(self.params['C_tt'])*(ztf.to_complex((h_S(self.params['tau_mass'], q))) - ztf.to_complex(h_P(self.params['tau_mass'], q)))\n", " \n", "\n", " funcs = rho_res(x) + omega_res(x) + phi_res(x) + jpsi_res(x) + psi2s_res(x) + p3770_res(x) + p4040_res(x)+ p4160_res(x) + p4415_res(x) + P2_D(x) + ttau_cusp(x)\n", "\n", " vec_f = vec(x, funcs, b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2)\n", "\n", " axiv_nr = axiv_nonres(x, b0_0, b0_1, b0_2, bplus_0, bplus_1, bplus_2, bT_0, bT_1, bT_2)\n", "\n", " tot = vec_f + axiv_nr\n", " \n", " #Cut out jpsi and psi2s\n", " \n", "# tot = tf.where(tf.math.logical_or(x < ztf.constant(jpsi_mass-60.), x > ztf.constant(jpsi_mass+70.)), tot, 0.0*tot)\n", " \n", "# tot = tf.where(tf.math.logical_or(x < ztf.constant(psi2s_mass-50.), x > ztf.constant(psi2s_mass+50.)), tot, 0.0*tot)\n", " \n", " return tot" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Setup parameters" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# formfactors\n", "\n", "b0_0 = zfit.Parameter(\"b0_0\", ztf.constant(0.292), floating = False) #, lower_limit = -2.0, upper_limit= 2.0)\n", "b0_1 = zfit.Parameter(\"b0_1\", ztf.constant(0.281), floating = False) #, lower_limit = -2.0, upper_limit= 2.0)\n", "b0_2 = zfit.Parameter(\"b0_2\", ztf.constant(0.150), floating = False) #, lower_limit = -2.0, upper_limit= 2.0)\n", "\n", "bplus_0 = zfit.Parameter(\"bplus_0\", ztf.constant(0.466), lower_limit = -2.0, upper_limit= 2.0)\n", "bplus_1 = zfit.Parameter(\"bplus_1\", ztf.constant(-0.885), lower_limit = -2.0, upper_limit= 2.0)\n", "bplus_2 = zfit.Parameter(\"bplus_2\", ztf.constant(-0.213), lower_limit = -2.0, upper_limit= 2.0)\n", "\n", "bT_0 = zfit.Parameter(\"bT_0\", ztf.constant(0.460), floating = False) #, lower_limit = -2.0, upper_limit= 2.0)\n", "bT_1 = zfit.Parameter(\"bT_1\", ztf.constant(-1.089), floating = False) #, lower_limit = -2.0, upper_limit= 2.0)\n", "bT_2 = zfit.Parameter(\"bT_2\", ztf.constant(-1.114), floating = False) #, lower_limit = -2.0, upper_limit= 2.0)\n", "\n", "\n", "#rho\n", "\n", "rho_mass, rho_width, rho_phase, rho_scale = pdg[\"rho\"]\n", "\n", "rho_m = zfit.Parameter(\"rho_m\", ztf.constant(rho_mass), floating = False) #lower_limit = rho_mass - rho_width, upper_limit = rho_mass + rho_width)\n", "rho_w = zfit.Parameter(\"rho_w\", ztf.constant(rho_width), floating = False)\n", "rho_p = zfit.Parameter(\"rho_p\", ztf.constant(rho_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "rho_s = zfit.Parameter(\"rho_s\", ztf.constant(rho_scale), lower_limit=rho_scale-np.sqrt(rho_scale), upper_limit=rho_scale+np.sqrt(rho_scale))\n", "\n", "#omega\n", "\n", "omega_mass, omega_width, omega_phase, omega_scale = pdg[\"omega\"]\n", "\n", "omega_m = zfit.Parameter(\"omega_m\", ztf.constant(omega_mass), floating = False)\n", "omega_w = zfit.Parameter(\"omega_w\", ztf.constant(omega_width), floating = False)\n", "omega_p = zfit.Parameter(\"omega_p\", ztf.constant(omega_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "omega_s = zfit.Parameter(\"omega_s\", ztf.constant(omega_scale), lower_limit=omega_scale-np.sqrt(omega_scale), upper_limit=omega_scale+np.sqrt(omega_scale))\n", "\n", "\n", "#phi\n", "\n", "phi_mass, phi_width, phi_phase, phi_scale = pdg[\"phi\"]\n", "\n", "phi_m = zfit.Parameter(\"phi_m\", ztf.constant(phi_mass), floating = False)\n", "phi_w = zfit.Parameter(\"phi_w\", ztf.constant(phi_width), floating = False)\n", "phi_p = zfit.Parameter(\"phi_p\", ztf.constant(phi_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "phi_s = zfit.Parameter(\"phi_s\", ztf.constant(phi_scale), lower_limit=phi_scale-np.sqrt(phi_scale), upper_limit=phi_scale+np.sqrt(phi_scale))\n", "\n", "#jpsi\n", "\n", "jpsi_mass, jpsi_width, jpsi_phase, jpsi_scale = pdg[\"jpsi\"]\n", "\n", "jpsi_m = zfit.Parameter(\"jpsi_m\", ztf.constant(jpsi_mass), floating = False)\n", "jpsi_w = zfit.Parameter(\"jpsi_w\", ztf.constant(jpsi_width), floating = False)\n", "jpsi_p = zfit.Parameter(\"jpsi_p\", ztf.constant(jpsi_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "jpsi_s = zfit.Parameter(\"jpsi_s\", ztf.constant(jpsi_scale), floating = False) #, lower_limit=jpsi_scale-np.sqrt(jpsi_scale), upper_limit=jpsi_scale+np.sqrt(jpsi_scale))\n", "\n", "#psi2s\n", "\n", "psi2s_mass, psi2s_width, psi2s_phase, psi2s_scale = pdg[\"psi2s\"]\n", "\n", "psi2s_m = zfit.Parameter(\"psi2s_m\", ztf.constant(psi2s_mass), floating = False)\n", "psi2s_w = zfit.Parameter(\"psi2s_w\", ztf.constant(psi2s_width), floating = False)\n", "psi2s_p = zfit.Parameter(\"psi2s_p\", ztf.constant(psi2s_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "psi2s_s = zfit.Parameter(\"psi2s_s\", ztf.constant(psi2s_scale), floating = False) #, lower_limit=psi2s_scale-np.sqrt(psi2s_scale), upper_limit=psi2s_scale+np.sqrt(psi2s_scale))\n", "\n", "#psi(3770)\n", "\n", "p3770_mass, p3770_width, p3770_phase, p3770_scale = pdg[\"p3770\"]\n", "\n", "p3770_m = zfit.Parameter(\"p3770_m\", ztf.constant(p3770_mass), floating = False)\n", "p3770_w = zfit.Parameter(\"p3770_w\", ztf.constant(p3770_width), floating = False)\n", "p3770_p = zfit.Parameter(\"p3770_p\", ztf.constant(p3770_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "p3770_s = zfit.Parameter(\"p3770_s\", ztf.constant(p3770_scale), lower_limit=p3770_scale-np.sqrt(p3770_scale), upper_limit=p3770_scale+np.sqrt(p3770_scale))\n", "\n", "#psi(4040)\n", "\n", "p4040_mass, p4040_width, p4040_phase, p4040_scale = pdg[\"p4040\"]\n", "\n", "p4040_m = zfit.Parameter(\"p4040_m\", ztf.constant(p4040_mass), floating = False)\n", "p4040_w = zfit.Parameter(\"p4040_w\", ztf.constant(p4040_width), floating = False)\n", "p4040_p = zfit.Parameter(\"p4040_p\", ztf.constant(p4040_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "p4040_s = zfit.Parameter(\"p4040_s\", ztf.constant(p4040_scale), lower_limit=p4040_scale-np.sqrt(p4040_scale), upper_limit=p4040_scale+np.sqrt(p4040_scale))\n", "\n", "#psi(4160)\n", "\n", "p4160_mass, p4160_width, p4160_phase, p4160_scale = pdg[\"p4160\"]\n", "\n", "p4160_m = zfit.Parameter(\"p4160_m\", ztf.constant(p4160_mass), floating = False)\n", "p4160_w = zfit.Parameter(\"p4160_w\", ztf.constant(p4160_width), floating = False)\n", "p4160_p = zfit.Parameter(\"p4160_p\", ztf.constant(p4160_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "p4160_s = zfit.Parameter(\"p4160_s\", ztf.constant(p4160_scale), lower_limit=p4160_scale-np.sqrt(p4160_scale), upper_limit=p4160_scale+np.sqrt(p4160_scale))\n", "\n", "#psi(4415)\n", "\n", "p4415_mass, p4415_width, p4415_phase, p4415_scale = pdg[\"p4415\"]\n", "\n", "p4415_m = zfit.Parameter(\"p4415_m\", ztf.constant(p4415_mass), floating = False)\n", "p4415_w = zfit.Parameter(\"p4415_w\", ztf.constant(p4415_width), floating = False)\n", "p4415_p = zfit.Parameter(\"p4415_p\", ztf.constant(p4415_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)\n", "p4415_s = zfit.Parameter(\"p4415_s\", ztf.constant(p4415_scale), lower_limit=p4415_scale-np.sqrt(p4415_scale), upper_limit=p4415_scale+np.sqrt(p4415_scale))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Dynamic generation of 2 particle contribution" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "m_c = 1300\n", "\n", "Dbar_phase = 0.0\n", "DDstar_phase = 0.0\n", "Dstar_mass = pdg['Dst_M']\n", "Dbar_mass = pdg['D0_M']\n", "D_mass = pdg['D0_M']\n", "\n", "Dbar_s = zfit.Parameter(\"Dbar_s\", ztf.constant(0.0), lower_limit=-0.3, upper_limit=0.3)\n", "Dbar_m = zfit.Parameter(\"Dbar_m\", ztf.constant(Dbar_mass), floating = False)\n", "Dbar_p = zfit.Parameter(\"Dbar_p\", ztf.constant(Dbar_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)#, floating = False)\n", "DDstar_s = zfit.Parameter(\"DDstar_s\", ztf.constant(0.0), lower_limit=-0.3, upper_limit=0.3)#, floating = False)\n", "Dstar_m = zfit.Parameter(\"Dstar_m\", ztf.constant(Dstar_mass), floating = False)\n", "D_m = zfit.Parameter(\"D_m\", ztf.constant(D_mass), floating = False)\n", "DDstar_p = zfit.Parameter(\"DDstar_p\", ztf.constant(DDstar_phase), lower_limit=-2*np.pi, upper_limit=2*np.pi)#, floating = False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Tau parameters" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "tau_m = zfit.Parameter(\"tau_m\", ztf.constant(pdg['tau_M']), floating = False)\n", "Ctt = zfit.Parameter(\"Ctt\", ztf.constant(0.0), lower_limit=-2.5, upper_limit=2.5)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Load data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "x_min = 2*pdg['muon_M']\n", "x_max = (pdg[\"Bplus_M\"]-pdg[\"Ks_M\"]-0.1)\n", "\n", "# # Full spectrum\n", "\n", "obs_toy = zfit.Space('q', limits = (x_min, x_max))\n", "\n", "# Jpsi and Psi2s cut out\n", "\n", "obs1 = zfit.Space('q', limits = (x_min, jpsi_mass - 60.))\n", "obs2 = zfit.Space('q', limits = (jpsi_mass + 70., psi2s_mass - 50.))\n", "obs3 = zfit.Space('q', limits = (psi2s_mass + 50., x_max))\n", "\n", "obs_fit = obs1 + obs2 + obs3\n", "\n", "# with open(r\"./data/slim_points/slim_points_toy_0_range({0}-{1}).pkl\".format(int(x_min), int(x_max)), \"rb\") as input_file:\n", "# part_set = pkl.load(input_file)\n", "\n", "# x_part = part_set['x_part']\n", "\n", "# x_part = x_part.astype('float64')\n", "\n", "# data = zfit.data.Data.from_numpy(array=x_part, obs=obs)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Setup pdf" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "total_f = total_pdf_cut(obs=obs_toy, jpsi_mass = jpsi_m, jpsi_scale = jpsi_s, jpsi_phase = jpsi_p, jpsi_width = jpsi_w,\n", " psi2s_mass = psi2s_m, psi2s_scale = psi2s_s, psi2s_phase = psi2s_p, psi2s_width = psi2s_w,\n", " p3770_mass = p3770_m, p3770_scale = p3770_s, p3770_phase = p3770_p, p3770_width = p3770_w,\n", " p4040_mass = p4040_m, p4040_scale = p4040_s, p4040_phase = p4040_p, p4040_width = p4040_w,\n", " p4160_mass = p4160_m, p4160_scale = p4160_s, p4160_phase = p4160_p, p4160_width = p4160_w,\n", " p4415_mass = p4415_m, p4415_scale = p4415_s, p4415_phase = p4415_p, p4415_width = p4415_w,\n", " rho_mass = rho_m, rho_scale = rho_s, rho_phase = rho_p, rho_width = rho_w,\n", " omega_mass = omega_m, omega_scale = omega_s, omega_phase = omega_p, omega_width = omega_w,\n", " phi_mass = phi_m, phi_scale = phi_s, phi_phase = phi_p, phi_width = phi_w,\n", " Dstar_mass = Dstar_m, DDstar_scale = DDstar_s, DDstar_phase = DDstar_p, D_mass = D_m,\n", " Dbar_mass = Dbar_m, Dbar_scale = Dbar_s, Dbar_phase = Dbar_p,\n", " tau_mass = tau_m, C_tt = Ctt, b0_0 = b0_0, b0_1 = b0_1, b0_2 = b0_2,\n", " bplus_0 = bplus_0, bplus_1 = bplus_1, bplus_2 = bplus_2,\n", " bT_0 = bT_0, bT_1 = bT_1, bT_2 = bT_2)\n", "\n", "total_f_fit = total_pdf_full(obs=obs_fit, jpsi_mass = jpsi_m, jpsi_scale = jpsi_s, jpsi_phase = jpsi_p, jpsi_width = jpsi_w,\n", " psi2s_mass = psi2s_m, psi2s_scale = psi2s_s, psi2s_phase = psi2s_p, psi2s_width = psi2s_w,\n", " p3770_mass = p3770_m, p3770_scale = p3770_s, p3770_phase = p3770_p, p3770_width = p3770_w,\n", " p4040_mass = p4040_m, p4040_scale = p4040_s, p4040_phase = p4040_p, p4040_width = p4040_w,\n", " p4160_mass = p4160_m, p4160_scale = p4160_s, p4160_phase = p4160_p, p4160_width = p4160_w,\n", " p4415_mass = p4415_m, p4415_scale = p4415_s, p4415_phase = p4415_p, p4415_width = p4415_w,\n", " rho_mass = rho_m, rho_scale = rho_s, rho_phase = rho_p, rho_width = rho_w,\n", " omega_mass = omega_m, omega_scale = omega_s, omega_phase = omega_p, omega_width = omega_w,\n", " phi_mass = phi_m, phi_scale = phi_s, phi_phase = phi_p, phi_width = phi_w,\n", " Dstar_mass = Dstar_m, DDstar_scale = DDstar_s, DDstar_phase = DDstar_p, D_mass = D_m,\n", " Dbar_mass = Dbar_m, Dbar_scale = Dbar_s, Dbar_phase = Dbar_p,\n", " tau_mass = tau_m, C_tt = Ctt, b0_0 = b0_0, b0_1 = b0_1, b0_2 = b0_2,\n", " bplus_0 = bplus_0, bplus_1 = bplus_1, bplus_2 = bplus_2,\n", " bT_0 = bT_0, bT_1 = bT_1, bT_2 = bT_2)\n", " \n", "# print(total_pdf.obs)\n", "\n", "# print(calcs_test)\n", "\n", "# for param in total_f.get_dependents():\n", "# print(zfit.run(param))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# total_f_fit.normalization(obs_fit)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Test if graphs actually work and compute values" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# def total_test_tf(xq):\n", "\n", "# def jpsi_res(q):\n", "# return resonance(q, jpsi_m, jpsi_s, jpsi_p, jpsi_w)\n", "\n", "# def psi2s_res(q):\n", "# return resonance(q, psi2s_m, psi2s_s, psi2s_p, psi2s_w)\n", "\n", "# def cusp(q):\n", "# return bifur_gauss(q, cusp_m, sig_L, sig_R, cusp_s)\n", "\n", "# funcs = jpsi_res(xq) + psi2s_res(xq) + cusp(xq)\n", "\n", "# vec_f = vec(xq, funcs)\n", "\n", "# axiv_nr = axiv_nonres(xq)\n", "\n", "# tot = vec_f + axiv_nr\n", " \n", "# return tot\n", "\n", "# def jpsi_res(q):\n", "# return resonance(q, jpsi_m, jpsi_s, jpsi_p, jpsi_w)\n", "\n", "# calcs = zfit.run(total_test_tf(x_part))\n", "\n", "\n", "\n", "test_q = np.linspace(x_min, x_max, int(2e6))\n", "\n", "probs = total_f_fit.pdf(test_q, norm_range=False)\n", "\n", "calcs_test = zfit.run(probs)\n", "\n", "Ctt.set_value(0.5)\n", "\n", "calcs_test1 = zfit.run(probs)\n", "\n", "Ctt.set_value(0.0)\n", "\n", "Dbar_s.set_value(0.3)\n", "\n", "DDstar_s.set_value(0.3)\n", "\n", "calcs_test2 = zfit.run(probs)\n", "# res_y = zfit.run(jpsi_res(test_q))\n", "# b0 = [b0_0, b0_1, b0_2]\n", "# bplus = [bplus_0, bplus_1, bplus_2]\n", "# bT = [bT_0, bT_1, bT_2]\n", "# f0_y = zfit.run(tf.math.real(formfactor(test_q,\"0\", b0, bplus, bT)))\n", "# fplus_y = zfit.run(tf.math.real(formfactor(test_q,\"+\", b0, bplus, bT)))\n", "# fT_y = zfit.run(tf.math.real(formfactor(test_q,\"T\", b0, bplus, bT)))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "plt.clf()\n", "# plt.plot(x_part, calcs, '.')\n", "plt.plot(test_q, calcs_test, label = 'pdf (Ctt = 0.0)')\n", "plt.plot(test_q, calcs_test1, label = 'pdf (Ctt = 0.5)')\n", "plt.plot(test_q, calcs_test2, label = 'pdf (D-contribs = 0.3)')\n", "# plt.plot(test_q, f0_y, label = '0')\n", "# plt.plot(test_q, fT_y, label = 'T')\n", "# plt.plot(test_q, fplus_y, label = '+')\n", "# plt.plot(test_q, res_y, label = 'res')\n", "plt.legend()\n", "plt.ylim(0.0, 1.5e-6)\n", "# plt.yscale('log')\n", "# plt.xlim(770, 785)\n", "plt.savefig('test.png')\n", "# print(jpsi_width)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", "\n", "# probs = mixture.prob(test_q)\n", "# probs_np = zfit.run(probs)\n", "# probs_np *= np.max(calcs_test) / np.max(probs_np)\n", "# plt.figure()\n", "# plt.semilogy(test_q, probs_np,label=\"importance sampling\")\n", "# plt.semilogy(test_q, calcs_test, label = 'pdf')\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 0.213/(0.00133+0.213+0.015)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Adjust scaling of different parts" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "total_f.update_integration_options(draws_per_dim=2000000, mc_sampler=None)\n", "# inte = total_f.integrate(limits = (950., 1050.), norm_range=False)\n", "# inte_fl = zfit.run(inte)\n", "# print(inte_fl/4500)\n", "# print(pdg[\"jpsi_BR\"]/pdg[\"NR_BR\"], inte_fl*pdg[\"psi2s_auc\"]/pdg[\"NR_auc\"])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# # print(\"jpsi:\", inte_fl)\n", "# # print(\"Increase am by factor:\", np.sqrt(pdg[\"jpsi_BR\"]/pdg[\"NR_BR\"]*pdg[\"NR_auc\"]/inte_fl))\n", "# # print(\"New amp:\", pdg[\"jpsi\"][3]*np.sqrt(pdg[\"jpsi_BR\"]/pdg[\"NR_BR\"]*pdg[\"NR_auc\"]/inte_fl))\n", "\n", "# # print(\"psi2s:\", inte_fl)\n", "# # print(\"Increase am by factor:\", np.sqrt(pdg[\"psi2s_BR\"]/pdg[\"NR_BR\"]*pdg[\"NR_auc\"]/inte_fl))\n", "# # print(\"New amp:\", pdg[\"psi2s\"][3]*np.sqrt(pdg[\"psi2s_BR\"]/pdg[\"NR_BR\"]*pdg[\"NR_auc\"]/inte_fl))\n", "\n", "# name = \"phi\"\n", "\n", "# print(name+\":\", inte_fl)\n", "# print(\"Increase am by factor:\", np.sqrt(pdg[name+\"_BR\"]/pdg[\"NR_BR\"]*pdg[\"NR_auc\"]/inte_fl))\n", "# print(\"New amp:\", pdg[name][0]*np.sqrt(pdg[name+\"_BR\"]/pdg[\"NR_BR\"]*pdg[\"NR_auc\"]/inte_fl))\n", "\n", "\n", "# print(x_min)\n", "# print(x_max)\n", "# # total_f.update_integration_options(draws_per_dim=2000000, mc_sampler=None)\n", "# total_f.update_integration_options(mc_sampler=lambda dim, num_results,\n", "# dtype: tf.random_uniform(maxval=1., shape=(num_results, dim), dtype=dtype),\n", "# draws_per_dim=1000000)\n", "# # _ = []\n", "\n", "# # for i in range(10):\n", "\n", "# # inte = total_f.integrate(limits = (x_min, x_max))\n", "# # inte_fl = zfit.run(inte)\n", "# # print(inte_fl)\n", "# # _.append(inte_fl)\n", "\n", "# # print(\"mean:\", np.mean(_))\n", "\n", "# _ = time.time()\n", "\n", "# inte = total_f.integrate(limits = (x_min, x_max))\n", "# inte_fl = zfit.run(inte)\n", "# print(inte_fl)\n", "# print(\"Time taken: {}\".format(display_time(int(time.time() - _))))\n", "\n", "# print(pdg['NR_BR']/pdg['NR_auc']*inte_fl)\n", "# print(0.25**2*4.2/1000)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Sampling\n", "## Mixture distribution for sampling" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", " \n", "# print(list_of_borders[:9])\n", "# print(list_of_borders[-9:])\n", "\n", "\n", "class UniformSampleAndWeights(zfit.util.execution.SessionHolderMixin):\n", " def __call__(self, limits, dtype, n_to_produce):\n", " # n_to_produce = tf.cast(n_to_produce, dtype=tf.int32)\n", " low, high = limits.limit1d\n", " low = tf.cast(low, dtype=dtype)\n", " high = tf.cast(high, dtype=dtype)\n", "# uniform = tfd.Uniform(low=low, high=high)\n", "# uniformjpsi = tfd.Uniform(low=tf.constant(3080, dtype=dtype), high=tf.constant(3112, dtype=dtype))\n", "# uniformpsi2s = tfd.Uniform(low=tf.constant(3670, dtype=dtype), high=tf.constant(3702, dtype=dtype))\n", "\n", "# list_of_borders = []\n", "# _p = []\n", "# splits = 10\n", "\n", "# _ = np.linspace(x_min, x_max, splits)\n", "\n", "# for i in range(splits):\n", "# list_of_borders.append(tf.constant(_[i], dtype=dtype))\n", "# _p.append(tf.constant(1/splits, dtype=dtype))\n", " \n", "# mixture = tfd.MixtureSameFamily(mixture_distribution=tfd.Categorical(probs=_p[:(splits-1)]),\n", "# components_distribution=tfd.Uniform(low=list_of_borders[:(splits-1)], \n", "# high=list_of_borders[-(splits-1):]))\n", " mixture = tfd.MixtureSameFamily(mixture_distribution=tfd.Categorical(probs=[tf.constant(0.05, dtype=dtype),\n", " tf.constant(0.93, dtype=dtype),\n", " tf.constant(0.05, dtype=dtype),\n", " tf.constant(0.065, dtype=dtype),\n", " tf.constant(0.04, dtype=dtype),\n", " tf.constant(0.05, dtype=dtype)]),\n", " components_distribution=tfd.Uniform(low=[tf.constant(x_min, dtype=dtype), \n", " tf.constant(3090, dtype=dtype),\n", " tf.constant(3681, dtype=dtype), \n", " tf.constant(3070, dtype=dtype),\n", " tf.constant(1000, dtype=dtype),\n", " tf.constant(3660, dtype=dtype)], \n", " high=[tf.constant(x_max, dtype=dtype),\n", " tf.constant(3102, dtype=dtype), \n", " tf.constant(3691, dtype=dtype),\n", " tf.constant(3110, dtype=dtype),\n", " tf.constant(1040, dtype=dtype),\n", " tf.constant(3710, dtype=dtype)]))\n", "# dtype = tf.float64\n", "# mixture = tfd.MixtureSameFamily(mixture_distribution=tfd.Categorical(probs=[tf.constant(0.04, dtype=dtype),\n", "# tf.constant(0.90, dtype=dtype),\n", "# tf.constant(0.02, dtype=dtype),\n", "# tf.constant(0.07, dtype=dtype),\n", "# tf.constant(0.02, dtype=dtype)]),\n", "# components_distribution=tfd.Uniform(low=[tf.constant(x_min, dtype=dtype), \n", "# tf.constant(3089, dtype=dtype),\n", "# tf.constant(3103, dtype=dtype), \n", "# tf.constant(3681, dtype=dtype),\n", "# tf.constant(3691, dtype=dtype)], \n", "# high=[tf.constant(3089, dtype=dtype),\n", "# tf.constant(3103, dtype=dtype), \n", "# tf.constant(3681, dtype=dtype),\n", "# tf.constant(3691, dtype=dtype), \n", "# tf.constant(x_max, dtype=dtype)]))\n", "# mixture = tfd.Uniform(tf.constant(x_min, dtype=dtype), tf.constant(x_max, dtype=dtype))\n", "# sample = tf.random.uniform((n_to_produce, 1), dtype=dtype)\n", " sample = mixture.sample((n_to_produce, 1))\n", "# sample = tf.random.uniform((n_to_produce, 1), dtype=dtype)\n", " weights = mixture.prob(sample)[:,0]\n", "# weights = tf.broadcast_to(tf.constant(1., dtype=dtype), shape=(n_to_produce,))\n", " # sample = tf.expand_dims(sample, axis=-1)\n", "# print(sample, weights)\n", " \n", "# weights = tf.ones(shape=(n_to_produce,), dtype=dtype)\n", " weights_max = None\n", " thresholds = tf.random_uniform(shape=(n_to_produce,), dtype=dtype)\n", " return sample, thresholds, weights, weights_max, n_to_produce" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# total_f._sample_and_weights = UniformSampleAndWeights" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 0.00133/(0.00133+0.213+0.015)*(x_max-3750)/(x_max-x_min)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# zfit.settings.set_verbosity(10)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "# # zfit.run.numeric_checks = False \n", "\n", "# nr_of_toys = 1\n", "# nevents = int(pdg[\"number_of_decays\"])\n", "# nevents = pdg[\"number_of_decays\"]\n", "# event_stack = 1000000\n", "# # zfit.settings.set_verbosity(10)\n", "# calls = int(nevents/event_stack + 1)\n", "\n", "# total_samp = []\n", "\n", "# start = time.time()\n", "\n", "# sampler = total_f.create_sampler(n=event_stack)\n", "\n", "# for toy in range(nr_of_toys):\n", " \n", "# dirName = 'data/zfit_toys/toy_{0}'.format(toy)\n", " \n", "# if not os.path.exists(dirName):\n", "# os.mkdir(dirName)\n", "# print(\"Directory \" , dirName , \" Created \")\n", "\n", "# for call in range(calls):\n", "\n", "# sampler.resample(n=event_stack)\n", "# s = sampler.unstack_x()\n", "# sam = zfit.run(s)\n", "# # clear_output(wait=True)\n", "\n", "# c = call + 1\n", " \n", "# print(\"{0}/{1} of Toy {2}/{3}\".format(c, calls, toy+1, nr_of_toys))\n", "# print(\"Time taken: {}\".format(display_time(int(time.time() - start))))\n", "# print(\"Projected time left: {}\".format(display_time(int((time.time() - start)/(c+calls*(toy))*((nr_of_toys-toy)*calls-c)))))\n", "\n", "# with open(\"data/zfit_toys/toy_{0}/{1}.pkl\".format(toy, call), \"wb\") as f:\n", "# pkl.dump(sam, f, pkl.HIGHEST_PROTOCOL)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# with open(r\"data/zfit_toys/toy_0/0.pkl\", \"rb\") as input_file:\n", "# sam = pkl.load(input_file)\n", "# print(sam[:10])\n", "\n", "# with open(r\"data/zfit_toys/toy_0/1.pkl\", \"rb\") as input_file:\n", "# sam2 = pkl.load(input_file)\n", "# print(sam2[:10])\n", "\n", "# print(np.sum(sam-sam2))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# print(\"Time to generate full toy: {} s\".format(int(time.time()-start)))\n", "\n", "# total_samp = []\n", "\n", "# for call in range(calls):\n", "# with open(r\"data/zfit_toys/toy_0/{}.pkl\".format(call), \"rb\") as input_file:\n", "# sam = pkl.load(input_file)\n", "# total_samp = np.append(total_samp, sam)\n", "\n", "# total_samp = total_samp.astype('float64')\n", "\n", "# data2 = zfit.data.Data.from_numpy(array=total_samp[:int(nevents)], obs=obs)\n", "\n", "# data3 = zfit.data.Data.from_numpy(array=total_samp, obs=obs)\n", "\n", "# print(total_samp[:nevents].shape)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# plt.clf()\n", "\n", "# bins = int((x_max-x_min)/7)\n", "\n", "# # calcs = zfit.run(total_test_tf(samp))\n", "# print(total_samp[:nevents].shape)\n", "\n", "# plt.hist(total_samp[:nevents], bins = bins, range = (x_min,x_max), label = 'data')\n", "# # plt.plot(test_q, calcs_test*nevents , label = 'pdf')\n", "\n", "# # plt.plot(sam, calcs, '.')\n", "# # plt.plot(test_q, calcs_test)\n", "# # plt.yscale('log')\n", "# plt.ylim(0, 200)\n", "# # plt.xlim(3080, 3110)\n", "\n", "# plt.legend()\n", "\n", "# plt.savefig('test2.png')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# sampler = total_f.create_sampler(n=nevents)\n", "# nll = zfit.loss.UnbinnedNLL(model=total_f, data=sampler, fit_range = (x_min, x_max))\n", "\n", "# # for param in pdf.get_dependents():\n", "# # param.set_value(initial_value)\n", "\n", "# sampler.resample(n=nevents)\n", "\n", "# # Randomise initial values\n", "# # for param in pdf.get_dependents():\n", "# # param.set_value(random value here)\n", "\n", "# # Minimise the NLL\n", "# minimizer = zfit.minimize.MinuitMinimizer(verbosity = 10)\n", "# minimum = minimizer.minimize(nll)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# jpsi_width" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# plt.hist(sample, weights=1 / prob(sample))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Fitting" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# start = time.time()\n", "\n", "# for param in total_f.get_dependents():\n", "# param.randomize()\n", " \n", "# # for param in total_f.get_dependents():\n", "# # print(zfit.run(param))\n", " \n", "# nll = zfit.loss.UnbinnedNLL(model=total_f, data=data2, fit_range = (x_min, x_max))\n", "\n", "# minimizer = zfit.minimize.MinuitMinimizer(verbosity = 5)\n", "# # minimizer._use_tfgrad = False\n", "# result = minimizer.minimize(nll)\n", "\n", "# # param_errors = result.error()\n", "\n", "# # for var, errors in param_errors.items():\n", "# # print('{}: ^{{+{}}}_{{{}}}'.format(var.name, errors['upper'], errors['lower']))\n", "\n", "# print(\"Function minimum:\", result.fmin)\n", "# # print(\"Results:\", result.params)\n", "# print(\"Hesse errors:\", result.hesse())" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# print(\"Time taken for fitting: {}\".format(display_time(int(time.time()-start))))\n", "\n", "# # probs = total_f.pdf(test_q)\n", "\n", "# calcs_test = zfit.run(probs)\n", "# res_y = zfit.run(jpsi_res(test_q))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# plt.clf()\n", "# # plt.plot(x_part, calcs, '.')\n", "# plt.plot(test_q, calcs_test, label = 'pdf')\n", "# # plt.plot(test_q, res_y, label = 'res')\n", "# plt.legend()\n", "# plt.ylim(0.0, 10e-6)\n", "# # plt.yscale('log')\n", "# # plt.xlim(3080, 3110)\n", "# plt.savefig('test3.png')\n", "# # print(jpsi_width)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# _tot = 4.37e-7+6.02e-5+4.97e-6\n", "# _probs = []\n", "# _probs.append(6.02e-5/_tot)\n", "# _probs.append(4.97e-6/_tot)\n", "# _probs.append(4.37e-7/_tot)\n", "# print(_probs)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# dtype = 'float64'\n", "# # mixture = tfd.Uniform(tf.constant(x_min, dtype=dtype), tf.constant(x_max, dtype=dtype))\n", "# mixture = tfd.MixtureSameFamily(mixture_distribution=tfd.Categorical(probs=[tf.constant(0.007, dtype=dtype),\n", "# tf.constant(0.917, dtype=dtype),\n", "# tf.constant(0.076, dtype=dtype)]),\n", "# components_distribution=tfd.Uniform(low=[tf.constant(x_min, dtype=dtype), \n", "# tf.constant(3080, dtype=dtype),\n", "# tf.constant(3670, dtype=dtype)], \n", "# high=[tf.constant(x_max, dtype=dtype),\n", "# tf.constant(3112, dtype=dtype), \n", "# tf.constant(3702, dtype=dtype)]))\n", "# # for i in range(10):\n", "# # print(zfit.run(mixture.prob(mixture.sample((10, 1)))))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# print((zfit.run(jpsi_p)%(2*np.pi))/np.pi)\n", "# print((zfit.run(psi2s_p)%(2*np.pi))/np.pi)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# def jpsi_res(q):\n", "# return resonance(q, _mass = jpsi_mass, scale = jpsi_scale,\n", "# phase = jpsi_phase, width = jpsi_width)\n", "\n", "# def psi2s_res(q):\n", "# return resonance(q, _mass = psi2s_mass, scale = psi2s_scale,\n", "# phase = psi2s_phase, width = psi2s_width)\n", " \n", "# def p3770_res(q):\n", "# return resonance(q, _mass = p3770_mass, scale = p3770_scale,\n", "# phase = p3770_phase, width = p3770_width)\n", " \n", "# def p4040_res(q):\n", "# return resonance(q, _mass = p4040_mass, scale = p4040_scale,\n", "# phase = p4040_phase, width = p4040_width)\n", " \n", "# def p4160_res(q):\n", "# return resonance(q, _mass = p4160_mass, scale = p4160_scale,\n", "# phase = p4160_phase, width = p4160_width)\n", " \n", "# def p4415_res(q):\n", "# return resonance(q, _mass = p4415_mass, scale = p4415_scale,\n", "# phase = p4415_phase, width = p4415_width)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 0.15**2*4.2/1000\n", "# result.hesse()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Constraints" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# 1. Constraint - Real part of sum of Psi contrib and D contribs\n", "\n", "sum_list = []\n", "\n", "sum_list.append(ztf.to_complex(jpsi_s) * tf.exp(tf.complex(ztf.constant(0.0), jpsi_p)) * ztf.to_complex(jpsi_w / (tf.pow(jpsi_m,3))))\n", "sum_list.append(ztf.to_complex(psi2s_s) * tf.exp(tf.complex(ztf.constant(0.0), psi2s_p)) * ztf.to_complex(psi2s_w / (tf.pow(psi2s_m,3))))\n", "sum_list.append(ztf.to_complex(p3770_s) * tf.exp(tf.complex(ztf.constant(0.0), p3770_p)) * ztf.to_complex(p3770_w / (tf.pow(p3770_m,3))))\n", "sum_list.append(ztf.to_complex(p4040_s) * tf.exp(tf.complex(ztf.constant(0.0), p4040_p)) * ztf.to_complex(p4040_w / (tf.pow(p4040_m,3))))\n", "sum_list.append(ztf.to_complex(p4160_s) * tf.exp(tf.complex(ztf.constant(0.0), p4160_p)) * ztf.to_complex(p4160_w / (tf.pow(p4160_m,3))))\n", "sum_list.append(ztf.to_complex(p4415_s) * tf.exp(tf.complex(ztf.constant(0.0), p4415_p)) * ztf.to_complex(p4415_w / (tf.pow(p4415_m,3))))\n", "sum_list.append(ztf.to_complex(DDstar_s) * tf.exp(tf.complex(ztf.constant(0.0), DDstar_p)) * (ztf.to_complex(1.0 / (10.0*tf.pow(Dstar_m,2)) + 1.0 / (10.0*tf.pow(D_m,2)))))\n", "sum_list.append(ztf.to_complex(Dbar_s) * tf.exp(tf.complex(ztf.constant(0.0), Dbar_p)) * ztf.to_complex(1.0 / (6.0*tf.pow(Dbar_m,2))))\n", "\n", "sum_ru_1 = ztf.to_complex(ztf.constant(0.0))\n", "\n", "for part in sum_list:\n", " sum_ru_1 += part\n", "\n", "sum_1 = tf.math.real(sum_ru_1)\n", "# constraint1 = zfit.constraint.GaussianConstraint(params = sum_1, mu = ztf.constant(1.7*10**-8), \n", "# sigma = ztf.constant(2.2*10**-8))\n", "\n", "constraint1 = tf.pow((sum_1-ztf.constant(1.7*10**-8))/ztf.constant(2.2*10**-8),2)/ztf.constant(2.)\n", "\n", "# 2. Constraint - Abs. of sum of Psi contribs and D contribs\n", "\n", "sum_2 = tf.abs(sum_ru_1)\n", "constraint2 = tf.cond(tf.greater_equal(sum_2, 5.0e-8), lambda: 100000., lambda: 0.)\n", "\n", "# 3. Constraint - Maximum eta of D contribs\n", "\n", "constraint3_0 = tf.cond(tf.greater_equal(tf.abs(Dbar_s), 0.2), lambda: 100000., lambda: 0.)\n", "\n", "constraint3_1 = tf.cond(tf.greater_equal(tf.abs(DDstar_s), 0.2), lambda: 100000., lambda: 0.)\n", "\n", "# 4. Constraint - Formfactor multivariant gaussian covariance fplus\n", "\n", "Cov_matrix = [[ztf.constant( 1.), ztf.constant( 0.45), ztf.constant( 0.19), ztf.constant(0.857), ztf.constant(0.598), ztf.constant(0.531), ztf.constant(0.752), ztf.constant(0.229), ztf.constant(0,117)],\n", " [ztf.constant( 0.45), ztf.constant( 1.), ztf.constant(0.677), ztf.constant(0.708), ztf.constant(0.958), ztf.constant(0.927), ztf.constant(0.227), ztf.constant(0.443), ztf.constant(0.287)],\n", " [ztf.constant( 0.19), ztf.constant(0.677), ztf.constant( 1.), ztf.constant(0.595), ztf.constant(0.770), ztf.constant(0.819),ztf.constant(-0.023), ztf.constant( 0.07), ztf.constant(0.196)],\n", " [ztf.constant(0.857), ztf.constant(0.708), ztf.constant(0.595), ztf.constant( 1.), ztf.constant( 0.83), ztf.constant(0.766), ztf.constant(0.582), ztf.constant(0.237), ztf.constant(0.192)],\n", " [ztf.constant(0.598), ztf.constant(0.958), ztf.constant(0.770), ztf.constant( 0.83), ztf.constant( 1.), ztf.constant(0.973), ztf.constant(0.324), ztf.constant(0.372), ztf.constant(0.272)],\n", " [ztf.constant(0.531), ztf.constant(0.927), ztf.constant(0.819), ztf.constant(0.766), ztf.constant(0.973), ztf.constant( 1.), ztf.constant(0.268), ztf.constant(0.332), ztf.constant(0.269)],\n", " [ztf.constant(0.752), ztf.constant(0.227),ztf.constant(-0.023), ztf.constant(0.582), ztf.constant(0.324), ztf.constant(0.268), ztf.constant( 1.), ztf.constant( 0.59), ztf.constant(0.515)],\n", " [ztf.constant(0.229), ztf.constant(0.443), ztf.constant( 0.07), ztf.constant(0.237), ztf.constant(0.372), ztf.constant(0.332), ztf.constant( 0.59), ztf.constant( 1.), ztf.constant(0.897)],\n", " [ztf.constant(0.117), ztf.constant(0.287), ztf.constant(0.196), ztf.constant(0.192), ztf.constant(0.272), ztf.constant(0.269), ztf.constant(0.515), ztf.constant(0.897), ztf.constant( 1.)]]\n", "\n", "def triGauss(val1,val2,val3,m = Cov_matrix):\n", "\n", " mean1 = ztf.constant(0.466)\n", " mean2 = ztf.constant(-0.885)\n", " mean3 = ztf.constant(-0.213)\n", " sigma1 = ztf.constant(0.014)\n", " sigma2 = ztf.constant(0.128)\n", " sigma3 = ztf.constant(0.548)\n", " x1 = (val1-mean1)/sigma1\n", " x2 = (val2-mean2)/sigma2\n", " x3 = (val3-mean3)/sigma3\n", " rho12 = m[0][1]\n", " rho13 = m[0][2]\n", " rho23 = m[1][2]\n", " w = x1*x1*(rho23*rho23-1) + x2*x2*(rho13*rho13-1)+x3*x3*(rho12*rho12-1)+2*(x1*x2*(rho12-rho13*rho23)+x1*x3*(rho13-rho12*rho23)+x2*x3*(rho23-rho12*rho13))\n", " d = 2*(rho12*rho12+rho13*rho13+rho23*rho23-2*rho12*rho13*rho23-1)\n", " \n", " fcn = -w/d\n", " chisq = -2*fcn\n", " return chisq\n", "\n", "constraint4 = triGauss(bplus_0, bplus_1, bplus_2)\n", "\n", "# mean1 = ztf.constant(0.466)\n", "# mean2 = ztf.constant(-0.885)\n", "# mean3 = ztf.constant(-0.213)\n", "# sigma1 = ztf.constant(0.014)\n", "# sigma2 = ztf.constant(0.128)\n", "# sigma3 = ztf.constant(0.548)\n", "# constraint4_0 = tf.pow((bplus_0-mean1)/sigma1,2)/ztf.constant(2.)\n", "# constraint4_1 = tf.pow((bplus_1-mean2)/sigma2,2)/ztf.constant(2.)\n", "# constraint4_2 = tf.pow((bplus_2-mean3)/sigma3,2)/ztf.constant(2.)\n", "\n", "# 5. Constraint - Abs. of sum of light contribs\n", "\n", "sum_list_5 = []\n", "\n", "sum_list_5.append(rho_s*rho_w/rho_m)\n", "sum_list_5.append(omega_s*omega_w/omega_m)\n", "sum_list_5.append(phi_s*phi_w/phi_m)\n", "\n", "\n", "sum_ru_5 = ztf.constant(0.0)\n", "\n", "for part in sum_list_5:\n", " sum_ru_5 += part\n", "\n", "constraint5 = tf.cond(tf.greater_equal(tf.abs(sum_ru_5), ztf.constant(0.02)), lambda: 100000., lambda: 0.)\n", "\n", "# 6. Constraint on phases of Jpsi and Psi2s for cut out fit\n", "\n", "\n", "# constraint6_0 = zfit.constraint.GaussianConstraint(params = jpsi_p, mu = ztf.constant(pdg[\"jpsi_phase_unc\"]),\n", "# sigma = ztf.constant(jpsi_phase))\n", "# constraint6_1 = zfit.constraint.GaussianConstraint(params = psi2s_p, mu = ztf.constant(pdg[\"psi2s_phase_unc\"]),\n", "# sigma = ztf.constant(psi2s_phase))\n", "\n", "constraint6_0 = tf.pow((jpsi_p-ztf.constant(jpsi_phase))/ztf.constant(pdg[\"jpsi_phase_unc\"]),2)/ztf.constant(2.)\n", "constraint6_1 = tf.pow((psi2s_p-ztf.constant(psi2s_phase))/ztf.constant(pdg[\"psi2s_phase_unc\"]),2)/ztf.constant(2.)\n", "\n", "# 7. Constraint on Ctt with higher limits\n", "\n", "constraint7 = tf.cond(tf.greater_equal(Ctt*Ctt, 0.25), lambda: 100000., lambda: 0.)\n", "\n", "constraint7dtype = tf.float64\n", "\n", "# zfit.run(constraint6_0)\n", "\n", "# ztf.convert_to_tensor(constraint6_0)\n", "\n", "#List of all constraints\n", "\n", "constraints = [constraint1, constraint2, constraint3_0, constraint3_1,# constraint4, #constraint4_0, constraint4_1, constraint4_2,\n", " constraint6_0, constraint6_1]#, constraint7]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Reset params" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "param_values_dic = {\n", " 'jpsi_m': jpsi_mass,\n", " 'jpsi_s': jpsi_scale,\n", " 'jpsi_p': jpsi_phase,\n", " 'jpsi_w': jpsi_width,\n", " 'psi2s_m': psi2s_mass,\n", " 'psi2s_s': psi2s_scale,\n", " 'psi2s_p': psi2s_phase,\n", " 'psi2s_w': psi2s_width,\n", " 'p3770_m': p3770_mass,\n", " 'p3770_s': p3770_scale,\n", " 'p3770_p': p3770_phase,\n", " 'p3770_w': p3770_width,\n", " 'p4040_m': p4040_mass,\n", " 'p4040_s': p4040_scale,\n", " 'p4040_p': p4040_phase,\n", " 'p4040_w': p4040_width,\n", " 'p4160_m': p4160_mass,\n", " 'p4160_s': p4160_scale,\n", " 'p4160_p': p4160_phase,\n", " 'p4160_w': p4160_width,\n", " 'p4415_m': p4415_mass,\n", " 'p4415_s': p4415_scale,\n", " 'p4415_p': p4415_phase,\n", " 'p4415_w': p4415_width,\n", " 'rho_m': rho_mass,\n", " 'rho_s': rho_scale,\n", " 'rho_p': rho_phase,\n", " 'rho_w': rho_width,\n", " 'omega_m': omega_mass,\n", " 'omega_s': omega_scale,\n", " 'omega_p': omega_phase,\n", " 'omega_w': omega_width,\n", " 'phi_m': phi_mass,\n", " 'phi_s': phi_scale,\n", " 'phi_p': phi_phase,\n", " 'phi_w': phi_width,\n", " 'Dstar_m': Dstar_mass,\n", " 'DDstar_s': 0.0,\n", " 'DDstar_p': 0.0,\n", " 'D_m': D_mass,\n", " 'Dbar_m': Dbar_mass,\n", " 'Dbar_s': 0.0,\n", " 'Dbar_p': 0.0,\n", " 'tau_m': pdg['tau_M'],\n", " 'Ctt': 0.0,\n", " 'b0_0': 0.292,\n", " 'b0_1': 0.281,\n", " 'b0_2': 0.150,\n", " 'bplus_0': 0.466,\n", " 'bplus_1': -0.885,\n", " 'bplus_2': -0.213,\n", " 'bT_0': 0.460,\n", " 'bT_1': -1.089,\n", " 'bT_2': -1.114}\n", "\n", "\n", "def reset_param_values(variation = 0.05):\n", " for param in total_f_fit.get_dependents():\n", " if param.floating:\n", " param.set_value(param_values_dic[param.name] + random.uniform(-1, 1) * param_values_dic[param.name]* variation)\n", "# print(param.name)\n", "# for param in totalf.get_dependents():\n", "# param.set_value()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Analysis" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "# # zfit.run.numeric_checks = False \n", "\n", "# fitting_range = 'cut'\n", "# total_BR = 1.7e-10 + 4.9e-10 + 2.5e-9 + 6.02e-5 + 4.97e-6 + 1.38e-9 + 4.2e-10 + 2.6e-9 + 6.1e-10 + 4.37e-7\n", "# cut_BR = 1.0 - (6.02e-5 + 4.97e-6)/total_BR\n", "\n", "# Ctt_list = []\n", "# Ctt_error_list = []\n", "\n", "# nr_of_toys = 1\n", "# if fitting_range == 'cut':\n", "# nevents = int(pdg[\"number_of_decays\"]*cut_BR)\n", "# else:\n", "# nevents = int(pdg[\"number_of_decays\"])\n", "# # nevents = pdg[\"number_of_decays\"]\n", "# event_stack = 1000000\n", "# # nevents *= 41\n", "# # zfit.settings.set_verbosity(10)\n", "# calls = int(nevents/event_stack + 1)\n", "\n", "# total_samp = []\n", "\n", "# start = time.time()\n", "\n", "# sampler = total_f.create_sampler(n=event_stack)\n", "\n", "# for toy in range(nr_of_toys):\n", " \n", "# ### Generate data\n", " \n", "# # clear_output(wait=True)\n", " \n", "# print(\"Toy {}: Generating data...\".format(toy))\n", " \n", "# dirName = 'data/zfit_toys/toy_{0}'.format(toy)\n", " \n", "# if not os.path.exists(dirName):\n", "# os.mkdir(dirName)\n", "# print(\"Directory \" , dirName , \" Created \")\n", " \n", "# reset_param_values()\n", " \n", "# if fitting_range == 'cut':\n", " \n", "# sampler.resample(n=nevents)\n", "# s = sampler.unstack_x()\n", "# sam = zfit.run(s)\n", "# calls = 0\n", "# c = 1\n", " \n", "# else: \n", "# for call in range(calls):\n", "\n", "# sampler.resample(n=event_stack)\n", "# s = sampler.unstack_x()\n", "# sam = zfit.run(s)\n", "\n", "# c = call + 1\n", "\n", "# with open(\"data/zfit_toys/toy_{0}/{1}.pkl\".format(toy, call), \"wb\") as f:\n", "# pkl.dump(sam, f, pkl.HIGHEST_PROTOCOL)\n", " \n", "# print(\"Toy {}: Data generation finished\".format(toy))\n", " \n", "# ### Load data\n", " \n", "# print(\"Toy {}: Loading data...\".format(toy))\n", " \n", "# if fitting_range == 'cut':\n", " \n", "# total_samp = sam\n", " \n", "# else:\n", " \n", "# for call in range(calls):\n", "# with open(r\"data/zfit_toys/toy_0/{}.pkl\".format(call), \"rb\") as input_file:\n", "# sam = pkl.load(input_file)\n", "# total_samp = np.append(total_samp, sam)\n", "\n", "# total_samp = total_samp.astype('float64')\n", " \n", "# if fitting_range == 'full':\n", "\n", "# data = zfit.data.Data.from_numpy(array=total_samp[:int(nevents)], obs=obs)\n", " \n", "# print(\"Toy {}: Loading data finished\".format(toy))\n", "\n", "# ### Fit data\n", "\n", "# print(\"Toy {}: Fitting pdf...\".format(toy))\n", "\n", "# for param in total_f.get_dependents():\n", "# param.randomize()\n", "\n", "# nll = zfit.loss.UnbinnedNLL(model=total_f, data=data, fit_range = (x_min, x_max), constraints = constraints)\n", "\n", "# minimizer = zfit.minimize.MinuitMinimizer(verbosity = 5)\n", "# # minimizer._use_tfgrad = False\n", "# result = minimizer.minimize(nll)\n", "\n", "# print(\"Toy {}: Fitting finished\".format(toy))\n", "\n", "# print(\"Function minimum:\", result.fmin)\n", "# print(\"Hesse errors:\", result.hesse())\n", "\n", "# params = result.params\n", "# Ctt_list.append(params[Ctt]['value'])\n", "# Ctt_error_list.append(params[Ctt]['minuit_hesse']['error'])\n", "\n", "# #plotting the result\n", "\n", "# plotdirName = 'data/plots'.format(toy)\n", "\n", "# if not os.path.exists(plotdirName):\n", "# os.mkdir(plotdirName)\n", "# # print(\"Directory \" , dirName , \" Created \")\n", " \n", "# probs = total_f.pdf(test_q, norm_range=False)\n", "# calcs_test = zfit.run(probs)\n", "# plt.clf()\n", "# plt.plot(test_q, calcs_test, label = 'pdf')\n", "# plt.legend()\n", "# plt.ylim(0.0, 6e-6)\n", "# plt.savefig(plotdirName + '/toy_fit_full_range{}.png'.format(toy))\n", "\n", "# print(\"Toy {0}/{1}\".format(toy+1, nr_of_toys))\n", "# print(\"Time taken: {}\".format(display_time(int(time.time() - start))))\n", "# print(\"Projected time left: {}\".format(display_time(int((time.time() - start)/(c+calls*(toy))*((nr_of_toys-toy)*calls-c)))))\n", " \n", "# if fitting_range == 'cut':\n", " \n", "# _1 = np.where((total_samp >= x_min) & (total_samp <= (jpsi_mass - 60.)))\n", " \n", "# tot_sam_1 = total_samp[_1]\n", " \n", "# _2 = np.where((total_samp >= (jpsi_mass + 70.)) & (total_samp <= (psi2s_mass - 50.)))\n", " \n", "# tot_sam_2 = total_samp[_2]\n", "\n", "# _3 = np.where((total_samp >= (psi2s_mass + 50.)) & (total_samp <= x_max))\n", " \n", "# tot_sam_3 = total_samp[_3]\n", "\n", "# tot_sam = np.append(tot_sam_1, tot_sam_2)\n", "# tot_sam = np.append(tot_sam, tot_sam_3)\n", " \n", "# data = zfit.data.Data.from_numpy(array=tot_sam[:int(nevents)], obs=obs_fit)\n", " \n", "# print(\"Toy {}: Loading data finished\".format(toy))\n", " \n", "# ### Fit data\n", "\n", "# print(\"Toy {}: Fitting pdf...\".format(toy))\n", "\n", "# for param in total_f_fit.get_dependents():\n", "# param.randomize()\n", "\n", "# nll = zfit.loss.UnbinnedNLL(model=total_f_fit, data=data, constraints = constraints)\n", "\n", "# minimizer = zfit.minimize.MinuitMinimizer(verbosity = 5)\n", "# # minimizer._use_tfgrad = False\n", "# result = minimizer.minimize(nll)\n", "\n", "# print(\"Function minimum:\", result.fmin)\n", "# print(\"Hesse errors:\", result.hesse())\n", "\n", "# params = result.params\n", " \n", "# if result.converged:\n", "# Ctt_list.append(params[Ctt]['value'])\n", "# Ctt_error_list.append(params[Ctt]['minuit_hesse']['error'])\n", "\n", "# #plotting the result\n", "\n", "# plotdirName = 'data/plots'.format(toy)\n", "\n", "# if not os.path.exists(plotdirName):\n", "# os.mkdir(plotdirName)\n", "# # print(\"Directory \" , dirName , \" Created \")\n", " \n", "# plt.clf()\n", "# plt.hist(tot_sam, bins = int((x_max-x_min)/7.), label = 'toy data')\n", "# plt.savefig(plotdirName + '/toy_histo_cut_region{}.png'.format(toy))\n", "\n", " \n", "# probs = total_f_fit.pdf(test_q, norm_range=False)\n", "# calcs_test = zfit.run(probs)\n", "# plt.clf()\n", "# plt.plot(test_q, calcs_test, label = 'pdf')\n", "# plt.axvline(x=jpsi_mass-60.,color='red', linewidth=0.7, linestyle = 'dotted')\n", "# plt.axvline(x=jpsi_mass+70.,color='red', linewidth=0.7, linestyle = 'dotted')\n", "# plt.axvline(x=psi2s_mass-50.,color='red', linewidth=0.7, linestyle = 'dotted')\n", "# plt.axvline(x=psi2s_mass+50.,color='red', linewidth=0.7, linestyle = 'dotted')\n", "# plt.legend()\n", "# plt.ylim(0.0, 1.5e-6)\n", "# plt.savefig(plotdirName + '/toy_fit_cut_region{}.png'.format(toy))\n", " \n", "# print(\"Toy {0}/{1}\".format(toy+1, nr_of_toys))\n", "# print(\"Time taken: {}\".format(display_time(int(time.time() - start))))\n", "# print(\"Projected time left: {}\".format(display_time(int((time.time() - start)/(toy+1))*((nr_of_toys-toy-1)))))\n", " " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# with open(\"data/results/Ctt_list.pkl\", \"wb\") as f:\n", "# pkl.dump(Ctt_list, f, pkl.HIGHEST_PROTOCOL)\n", "# with open(\"data/results/Ctt_error_list.pkl\", \"wb\") as f:\n", "# pkl.dump(Ctt_error_list, f, pkl.HIGHEST_PROTOCOL)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# print('{0}/{1} fits converged'.format(len(Ctt_list), nr_of_toys))\n", "# print('Mean Ctt value = {}'.format(np.mean(Ctt_list)))\n", "# print('Mean Ctt error = {}'.format(np.mean(Ctt_error_list)))\n", "# print('95 Sensitivy = {}'.format(((2*np.mean(Ctt_error_list))**2)*4.2/1000))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# plt.hist(tot_sam, bins = int((x_max-x_min)/7.))\n", "\n", "# plt.show()\n", "\n", "# # _ = np.where((total_samp >= x_min) & (total_samp <= (jpsi_mass - 50.)))\n", "\n", "# tot_sam.shape" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Ctt.floating = False" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# zfit.run(nll.value())" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# result.fmin" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# BR_steps = np.linspace(0.0, 1e-3, 11)\n", "pull_dic = {}\n", "\n", "mi = 0.0\n", "ma = 1e-2\n", "ste = 11\n", "\n", "for param in total_f_fit.get_dependents():\n", " if param.floating:\n", " pull_dic[param.name] = []\n", " for step in range(2*ste):\n", " pull_dic[param.name].append([])\n", " \n", "\n", "\n", "def save_pulls(step):\n", " for param in total_f_fit.get_dependents():\n", " if param.floating:\n", " pull_dic[param.name][step].append((params[param]['value'] - param_values_dic[param.name])/params[param]['minuit_hesse']['error'])\n", "\n", "\n", "\n", "# for key in pull_dic.keys():\n", "# print(np.shape(pull_dic[key]))\n", "# save_pulls(New_step=True)\n", "# params[Ctt]['value']" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# for param in total_f_fit.get_dependents():\n", "# if param.floating:\n", "# print(param.name)\n", "\n", "# print(params[Ctt])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# CLS Code" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": true }, "outputs": [], "source": [ "# zfit.run.numeric_checks = False \n", "\n", "load = False\n", "\n", "bo = True\n", "\n", "D_contribs = True\n", "\n", "if not D_contribs:\n", " Dbar_s.floating = False\n", " Dbar_p.floating = False\n", " DDstar_s.floating = False\n", " DDstar_p.floating = False\n", "\n", "bo_set = 1\n", "\n", "fitting_range = 'cut'\n", "total_BR = 1.7e-10 + 4.9e-10 + 2.5e-9 + 6.02e-5 + 4.97e-6 + 1.38e-9 + 4.2e-10 + 2.6e-9 + 6.1e-10 + 4.37e-7\n", "cut_BR = 1.0 - (6.02e-5 + 4.97e-6)/total_BR\n", "\n", "Ctt_list = []\n", "Ctt_error_list = []\n", "\n", "nr_of_toys = 1\n", "nevents = int(pdg[\"number_of_decays\"]*cut_BR)\n", "# nevents = pdg[\"number_of_decays\"]\n", "event_stack = 1000000\n", "# nevents *= 41\n", "# zfit.settings.set_verbosity(10)\n", "\n", "# mi = 0.0\n", "# ma = 1e-3\n", "# ste = 11\n", "\n", "BR_steps = np.linspace(mi, ma, ste)\n", "\n", "Ctt_steps = np.sqrt(BR_steps/4.2*1000)\n", "\n", "print(Ctt_steps)\n", "\n", "# total_samp = []\n", "\n", "start = time.time()\n", "\n", "Nll_list = []\n", "\n", "sampler = total_f.create_sampler(n=nevents, fixed_params = False)\n", "sampler.set_data_range(obs_fit)\n", "\n", "__ = -1\n", "\n", "#-----------------------------------------------------\n", "\n", "if not load:\n", " for Ctt_step in Ctt_steps:\n", " \n", " __ += 1\n", " \n", " for i in range(2):\n", " Ctt_list.append([])\n", " Ctt_error_list.append([])\n", " Nll_list.append([])\n", "\n", " for param in total_f_fit.get_dependents():\n", " if param.floating:\n", " pull_dic[param.name].append([])\n", " \n", " for toy in range(nr_of_toys): \n", " \n", " newset = True\n", " \n", " while newset:\n", " \n", " for floaty in [True, False]:\n", " Ctt.floating = floaty\n", " \n", " for bo_step in range(bo_set):\n", "\n", " print('Step: {0}/{1}'.format(int(__), ste))\n", " print('Current Ctt: {0}'.format(Ctt_step))\n", " print('Ctt floating: {0}'.format(floaty))\n", " \n", " reset_param_values(variation = 0.0)\n", "\n", " if floaty:\n", " print('Toy {0}/{1} - Fit {2}/{3}'.format(toy, nr_of_toys, bo_step, bo_set))\n", " Ctt.set_value(Ctt_step)\n", "\n", " else:\n", " Ctt.set_value(0.0)\n", " print('Toy {0}/{1} - Fit {2}/{3}'.format(toy, nr_of_toys, bo_step, bo_set))\n", "\n", " if newset:\n", " sampler.resample(n=nevents)\n", " data = sampler\n", " newset = False\n", "\n", " ### Fit data\n", " \n", " if floaty:\n", "# plt.clf()\n", "# plt.title('Ctt value: {:.2f}'.format(Ctt_step))\n", "# plt.hist(zfit.run(data), bins = int((x_max-x_min)/7), range = (x_min, x_max))\n", "# plt.savefig('data/CLs/plots/set_histo{}.png'.format(__))\n", " _step = 2*__\n", " else:\n", " _step = 2*__+1\n", " \n", " save_pulls(step = _step)\n", "\n", " nll = zfit.loss.UnbinnedNLL(model=total_f_fit, data=data, constraints = constraints)\n", "\n", " minimizer = zfit.minimize.MinuitMinimizer(verbosity = 5)\n", " # minimizer._use_tfgrad = False\n", " result = minimizer.minimize(nll)\n", "\n", " print(\"Function minimum:\", result.fmin)\n", " print(\"Hesse errors:\", result.hesse())\n", "\n", " params = result.params\n", "\n", " if result.converged:\n", "\n", " if floaty:\n", " Nll_list[-2].append(result.fmin)\n", " Ctt_list[-2].append(params[Ctt]['value'])\n", " Ctt_error_list[-2].append(params[Ctt]['minuit_hesse']['error'])\n", "\n", " else:\n", " Nll_list[-1].append(result.fmin)\n", " Ctt_list[-1].append(0.0)\n", " Ctt_error_list[-1].append(0.0)\n", " \n", "\n", " else:\n", " for _ in [1,2]:\n", " del Nll_list[-_][toy*bo_set:]\n", "# print(np.shape(Nll_list[-_]))\n", " del Ctt_list[-_][toy*bo_set:]\n", " del Ctt_error_list[-_][toy*bo_set:]\n", " for param in total_f_fit.get_dependents():\n", " if param.floating:\n", " del pull_dic[param.name][_step][toy*bo_set:]\n", " newset = True\n", " break\n", " \n", " if not result.converged:\n", " break\n", " \n", " print()\n", " print('Time taken: {}'.format(display_time(int(time.time()-start))))\n", " print('Estimated time left: {}'.format(display_time(int((time.time()-start)/(__+(toy+1)/nr_of_toys)*(ste-__-(nr_of_toys-toy-1)/nr_of_toys)))))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "if load:\n", " \n", " phase_combi = '-+'\n", "\n", " _dir = 'data/CLs/finished/f1d1/{}/'.format(phase_combi)\n", " \n", " jobs = os.listdir(_dir)\n", " \n", " First = True\n", " \n", "# print(jobs)\n", " \n", " for job in jobs:\n", " \n", " dirName = _dir + str(job) + '/data/CLs'\n", " \n", " if not os.path.exists(\"{}/{}-{}_{}s{}b{}t--CLs_Nll_list.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys)):\n", "# print(job)\n", " continue\n", " \n", " with open(r\"{}/variab.pkl\".format(dirName), \"rb\") as input_file:\n", " variab = pkl.load(input_file)\n", "# print(variab)\n", " \n", " ### sanity check:\n", " if variab['mi'] != mi or variab['ma'] != ma or variab['ste'] != ste or bo_set != bo_set:\n", " print('Fitting parameters of data dont equal the ones given -- Job {} skipped!'.format(job))\n", " \n", " with open(r\"{}/{}-{}_{}s{}b{}t--CLs_Nll_list.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys), \"rb\") as input_file:\n", " _Nll_list = pkl.load(input_file)\n", " \n", " with open(r\"{}/{}-{}_{}s{}b{}t--Ctt_list.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys), \"rb\") as input_file:\n", " _Ctt_list = pkl.load(input_file)\n", " \n", " with open(r\"{}/{}-{}_{}s{}b{}t--Ctt_error_list.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys), \"rb\") as input_file:\n", " _Ctt_error_list = pkl.load(input_file)\n", " \n", " with open(r\"{}/{}-{}_{}s{}b{}t--pull_dic.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys), \"rb\") as input_file:\n", " _pull_dic = pkl.load(input_file)\n", " \n", " with open(r\"{}/{}-{}_{}s--CLs_list.pkl\".format(dirName, mi,ma,ste), \"rb\") as input_file:\n", " _CLs_list = pkl.load(input_file)\n", " \n", " \n", " if First:\n", " Nll_list = _Nll_list\n", " Ctt_list = _Ctt_list\n", " Ctt_error_list = _Ctt_error_list\n", " pull_dic = _pull_dic\n", "# print(_pull_dic)\n", " CLs_list = _CLs_list\n", " First = False\n", " else:\n", " for step in range(2*ste):\n", "# print(Nll_list[step], step)\n", " Nll_list[step].extend(_Nll_list[step])\n", " Ctt_list[step].extend(_Ctt_list[step])\n", " Ctt_error_list[step].extend(_Ctt_error_list[step])\n", " for key in pull_dic.keys():\n", "# print(key, np.shape(pull_dic[key]))\n", " pull_dic[key][step].extend(_pull_dic[key][step])\n", " for step in range(ste):\n", " CLs_list[step].extend(_CLs_list[step])\n", "\n", "# print('----------------------')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "dirName = 'data/CLs'\n", "\n", "# if bo and not load:\n", "# for s in range(2*ste):\n", "# Nll_list[s] = [np.min(Nll_list[s])]\n", "\n", "\n", "if not load:\n", " \n", " if not os.path.exists(dirName):\n", " os.mkdir(dirName)\n", " print(\"Directory \" , dirName , \" Created \")\n", "\n", " with open(\"{}/{}-{}_{}s{}b{}t--CLs_Nll_list.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys), \"wb\") as f:\n", " pkl.dump(Nll_list, f, pkl.HIGHEST_PROTOCOL)\n", " \n", " with open(\"{}/{}-{}_{}s{}b{}t--Ctt_list.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys), \"wb\") as f:\n", " pkl.dump(Ctt_list, f, pkl.HIGHEST_PROTOCOL)\n", " \n", " with open(\"{}/{}-{}_{}s{}b{}t--Ctt_error_list.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys), \"wb\") as f:\n", " pkl.dump(Ctt_error_list, f, pkl.HIGHEST_PROTOCOL)\n", " \n", " with open(\"{}/{}-{}_{}s{}b{}t--pull_dic.pkl\".format(dirName, mi,ma,ste,bo_set,nr_of_toys), \"wb\") as f:\n", " pkl.dump(pull_dic, f, pkl.HIGHEST_PROTOCOL)\n", " \n", " variab = {'mi': mi,\n", " 'ma': ma,\n", " 'ste': ste,\n", " 'bo_set': bo_set,\n", " 'nr_of_toys': nr_of_toys}\n", " \n", " with open(\"{}/variab.pkl\".format(dirName), \"wb\") as f:\n", " pkl.dump(variab, f, pkl.HIGHEST_PROTOCOL)\n", " \n", " CLs_values = []\n", " \n", " toy_size = bo_set\n", "\n", " print(np.shape(Nll_list))\n", " print(Nll_list[0:1])\n", " \n", " for step in range(ste):\n", " CLs_values.append([])\n", " for toy in range(nr_of_toys):\n", " float_min = np.min(Nll_list[2*step][toy*bo_set:(toy+1)*bo_set])\n", " fix_min = np.min(Nll_list[2*step+1][toy*bo_set:(toy+1)*bo_set])\n", " CLs_values[step].append(float_min-fix_min)\n", " \n", " \n", " print(np.shape(CLs_values))\n", " \n", " with open(\"{}/{}-{}_{}s--CLs_list.pkl\".format(dirName, mi,ma,ste), \"wb\") as f:\n", " pkl.dump(CLs_values, f, pkl.HIGHEST_PROTOCOL)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# print(variab['mi'] != mi)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Plot" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# l = []\n", "\n", "\n", "# if load:\n", "# CLs_values = CLs_list\n", "\n", "# if not os.path.exists('data/CLs/plots'):\n", "# os.mkdir('data/CLs/plots')\n", "# print(\"Directory \" , 'data/CLs/plots' , \" Created \")\n", "\n", "# print(np.shape(CLs_values))\n", " \n", "# for step in range(1,ste):\n", "# plt.clf()\n", "# plt.title('Ctt value: {:.2f}'.format(Ctt_steps[step]))\n", "# plt.hist(CLs_values[0], bins = 100, range = (-25, 25), label = 'Ctt fixed to 0')\n", "# plt.hist(CLs_values[step], bins = 100, range = (-25, 25), label = 'Ctt floating')\n", "# plt.axvline(x=np.mean(CLs_values[0]),color='red', linewidth=1.0, linestyle = 'dotted')\n", "# plt.legend()\n", "# plt.savefig('data/CLs/plots/CLs-BR({:.1E}).png'.format(BR_steps[step]))\n", " \n", "# l.append(len(np.where(np.array(CLs_values[step]) < np.mean(CLs_values[0]))[0]))\n", " \n", "# for step in range(2*ste):\n", "# if step%2 == 0:\n", "# floaty = True\n", "# else:\n", "# floaty = False\n", "# for key in pull_dic.keys():\n", "# if not os.path.exists('data/CLs/plots/{}'.format(key)):\n", "# os.mkdir('data/CLs/plots/{}'.format(key))\n", "# plt.clf()\n", "# plt.title('Pull {} - Ctt value {:.2f} - floating {}'.format(key, Ctt_steps[int(step/2)], floaty))\n", "# plt.hist(pull_dic[key][step], bins = 50, range = (-5,5))\n", "# plt.xlabel('Pull')\n", "# plt.savefig('data/CLs/plots/{}/{:.2f}Ctt{}s{}f.png'.format(key, Ctt_steps[int(step/2)], step, floaty))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "# for s in range(len(l)):\n", "# print('BR: {:.4f}'.format(BR_steps[s]))\n", "# print(2*l[s]/len(CLs_values[s]))\n", "# print()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# for step in range(2*ste):\n", "# for key in pull_dic.keys():\n", "# print(pull_dic[key][step])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# for param in total_f_fit.get_dependents():\n", "# if param.floating:\n", "# print(params[param]['value'])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(display_time(int(time.time()-start)))" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# variab['mi'] =! mi" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" } }, "nbformat": 4, "nbformat_minor": 2 }