diff --git a/pdg_const.py b/pdg_const.py index 53ced55..9f965e7 100644 --- a/pdg_const.py +++ b/pdg_const.py @@ -1,6 +1,6 @@ pdg = { -###Particle masses### +###Particle masses### "mbstar" : 5415.4, "mbstar0" : 5711.0, @@ -47,6 +47,7 @@ "alpha_ew" : 1.0/137.0, "Vts" : 0.0394, "Vtb" : 1.019, +"number_of_decays": 8e10, #---------------> Look up real value #Formfactor z coefficients diff --git a/pdg_const.pyc b/pdg_const.pyc index da0a136..7ef6520 100644 --- a/pdg_const.pyc +++ b/pdg_const.pyc Binary files differ diff --git a/plots/fast_binned/histo.png b/plots/fast_binned/histo.png index b7141b9..b40cbeb 100644 --- a/plots/fast_binned/histo.png +++ b/plots/fast_binned/histo.png Binary files differ diff --git a/plots/fast_binned/pdf_and_parts.png b/plots/fast_binned/pdf_and_parts.png index 7bd6d9f..cdb0f59 100644 --- a/plots/fast_binned/pdf_and_parts.png +++ b/plots/fast_binned/pdf_and_parts.png Binary files differ diff --git a/plots/points/ff.png b/plots/points/ff.png index d34efcf..0dafc99 100644 --- a/plots/points/ff.png +++ b/plots/points/ff.png Binary files differ diff --git a/plots/points/histo.png b/plots/points/histo.png index c80d8cd..5b4742b 100644 --- a/plots/points/histo.png +++ b/plots/points/histo.png Binary files differ diff --git a/plots/points/histo_raw.png b/plots/points/histo_raw.png index 6db2ecc..ad1056f 100644 --- a/plots/points/histo_raw.png +++ b/plots/points/histo_raw.png Binary files differ diff --git a/plots/points/pdf_and_parts.png b/plots/points/pdf_and_parts.png index a1b815e..f0e01e3 100644 --- a/plots/points/pdf_and_parts.png +++ b/plots/points/pdf_and_parts.png Binary files differ diff --git a/plots/points/vec_axiv.png b/plots/points/vec_axiv.png index 1c6bb93..07e58e0 100644 --- a/plots/points/vec_axiv.png +++ b/plots/points/vec_axiv.png Binary files differ diff --git a/raremodel.py b/raremodel.py index 2621771..7a6a937 100644 --- a/raremodel.py +++ b/raremodel.py @@ -12,6 +12,7 @@ from helperfunctions import display_time, prepare_plot import cmath as c import scipy.integrate as integrate +from array import array as arr mmu = pdg['muon_M'] mb = pdg["bquark_M"] @@ -35,6 +36,8 @@ self.C9eff = pdg["C9eff"] self.C10eff = pdg["C10eff"] + self.number_of_decays = pdg["number_of_decays"] + #self.C1 = pdg["C1"] #self.C2 = pdg["C2"] #self.C3 = pdg["C3"] @@ -363,7 +366,7 @@ part_set = {"x_part" : x_part} - pkl.dump( part_set, open("./data/set_{0}_range({1}-{2}).pkl".format(int(set_size),int(x_min), int(x_max)) , "wb" ) ) + pkl.dump( part_set, open("./data/slim_points/set_{0}_range({1}-{2}).pkl".format(int(set_size),int(x_min), int(x_max)) , "wb" ) ) print(" Set saved!") @@ -442,7 +445,7 @@ #Count the hits in the nonres part -> Range of the whole pdf! - if x > x_min and x < x_max and y < self.total_nonres(x**2, absolut = True): + if y < self.total_nonres(x**2, absolut = True): counter_x_nr += 1 #Calculate total estimated time to produce all toys @@ -487,9 +490,9 @@ if save: - part_set = {"x_part" : x_part, "counter_x_nr": counter_x_nr} + part_set = {"x_part" : x_part, "counter_x_nr": counter_x_nr, "x_min": x_min, "x_max": x_max, "mode": mode} - pkl.dump( part_set, open("./data/true_data_toy_{0}_range({1}-{2}).pkl".format(_nonres_set_size ,int(x_min), int(x_max)) , "wb" ) ) + pkl.dump( part_set, open("./data/true_data/true_data_toy_{0}_range({1}-{2}).pkl".format(toy_nr ,int(x_min), int(x_max)) , "wb" ) ) print(" Toy set {0} saved!".format(toy_nr)) @@ -572,7 +575,7 @@ for i in bin_height: _ += i - part_set = {"bin_mean" : bin_mean, "bin_height": bin_height, "nbins": nbins, "bin_true_height": bin_true_height, "nonres_set_size": nonres_set_size} + part_set = {"bin_mean" : bin_mean, "bin_height": bin_height, "nbins": nbins, "bin_true_height": bin_true_height, "_nonres_set_size": nonres_set_size, "x_min": x_min, "x_max": x_max} pkl.dump( part_set, open("./data/fast_binned/binned_toy_{0}.pkl".format(toy_nr) , "wb" ) ) @@ -738,7 +741,7 @@ plt.clf() - plt.ylim(0.0, 2*_mean) + plt.ylim(0.0, 2.5*_mean) #Choose range in between the 2 resonances # plt.xlim(3150, 3650) @@ -844,7 +847,7 @@ _mean_histo = float(np.mean(_y)) - plt.ylim(0.0, 2*_mean_histo) + plt.ylim(0.0, 2.5*_mean_histo) prepare_plot("Binned toy data") @@ -869,15 +872,21 @@ _.append(x_min+(x_max-x_min)/bins*i) - area, err = integrate.quad(self.total_pdf, x_min**2, x_max**2, limit = 300) - _mean = area/(x_max-x_min) - self.param_val[0] = self.param_val[0]*_mean_histo/_mean + # area, err = integrate.quad(lambda x:self.total_nonres(x, absolut = True), self.x_min**2, self.x_max**2, limit = 300) + # _mean = area/(x_max-x_min) + # self.param_val[0] = self.param_val[0]*44000.0/_mean + self.param_val[0] *= self.number_of_decays - plt.ylim(0.0, 2*_mean_histo) + _dgamma_tot = [] - plt.hist(_, bins=bins, range=(x_min, x_max), weights = _y, label = "toy data binned (scaled down to pdf)") + for i in dq: + _dgamma_tot.append(self.total_pdf(i**2)) - plt.plot(dq, dgamma_tot, label = "pdf") + plt.ylim(0.0, 2.5*_mean_histo) + + plt.hist(_, bins=bins, range=(x_min, x_max), weights = _y, label = "toy data binned") + + plt.plot(dq, _dgamma_tot, label = "pdf scaled up to match histo") self.param_val[0] = __ @@ -1080,9 +1089,9 @@ self.total_pdf_string += " + self.total_nonres(q2)" - def log_likelihood(self, npar, par, part_set, unbinned_data = True, resolution = 7.0): #Replaced soon with TMinuit + def log_likelihood(self, npar, apar, part_set, unbinned_data = True, resolution = 7.0): #Replaced soon with TMinuit - self.param_val = par[1,:] + self.param_val = apar if unbinned_data: @@ -1116,7 +1125,7 @@ return ll - def fit_pdf_to_data(self, part_set, par, err): + def fit_pdf_to_data(self, part_set): x_part = part_set["x_part"] @@ -1126,99 +1135,127 @@ name = self.param_str + self.param_val[0] *= self.number_of_decays + vstart = arr("d", self.param_val) + amp_err = np.sqrt(self.param_val[0])/20 + + cusp_m_err = 10 + step = arr( 'd', ( amp_err, 0, cusp_m_err, 1.0, 0.1, 1e-7, 0, 0, 0, self.param_val[9]/50, 0, 0, 0, self.param_val[13] )) npar = len(vstart) - def fcn(npar, f, x_part, iflag): - self.log_likelihood(npar, par, err, part_set, unbinned_data = True, resolution = 7.0) + def fcn(npar, f, apar, iflag): + f[0] = self.log_likelihood(npar, apar, part_set, unbinned_data = True, resolution = 7.0) + # return f[0] - #__________________________________________________________________________________ + gMinuit = ROOT.TMinuit(npar) + gMinuit.SetFCN( fcn ) - gMinuit = ROOT.TMinuit(13) - gMinuit.SetFCN( self.log_likelihood(part_set = part_set, ) ) + arglist = arr( 'd', npar*[0.] ) + ierflg = ROOT.Long(1998) - arglist = arr( 'd', 15*[0.] ) + arglist[0] = 1 gMinuit.mnexcm( "SET ERR", arglist, 1, ierflg ) - ierflg = 0 - - arglist = arr( 'd', (6000, 0.3) ) - - amp_err = np.sqrt(self.param_val[0]) - cusp_m_err = np.sqrt(self.param_val[2]) - - vstart = arr( 'd', self.param_val ) - step = arr( 'd', ( amp_err, 0, cusp_m_err, 1.0, 0.1, 1e-7, 0, 0, 0, self.param_val[9]/50, 0, 0, 0, self.param_val[13] )) - for i in range(len(self.param_val)): - gMinuit.mnparm( i, "a{0}".format(i), vstart[i], step[i], 0, 0, ierflg ) + gMinuit.mnparm( i, "a{0}".format(i), vstart[i], step[i], 0, 0, ierflg ) + + arglist[0] = 6000 + arglist[1] = 1. gMinuit.mnexcm( "MIGRAD", arglist, 2, ierflg ) + amin, edm, errdef = 0.18, 0.19, 0.20 + nvpar, nparx, icstat = 1983, 1984, 1985 + gMinuit.mnstat( amin, edm, errdef, nvpar, nparx, icstat ) + gMinuit.mnprin( 3, amin ) #__________________________________________________________________________________ - - Error = 0; - z = array( 'f', ( 1., 0.96, 0.89, 0.85, 0.78 ) ) - errorz = array( 'f', 5*[0.01] ) - - x = array( 'f', ( 1.5751, 1.5825, 1.6069, 1.6339, 1.6706 ) ) - y = array( 'f', ( 1.0642, 0.97685, 1.13168, 1.128654, 1.44016 ) ) - - ncount = 0 - - ##______________________________________________________________________________ - def testfit(): - # - # gMinuit = TMinuit(5) - # gMinuit.SetFCN( fcn ) - # - # arglist = array( 'd', 10*[0.] ) - # ierflg = 1982 - # - # arglist[0] = 1 - # gMinuit.mnexcm( "SET ERR", arglist, 1, ierflg ) - - # Set starting values and step sizes for parameters - vstart = array( 'd', ( 3, 1, 0.1, 0.01 ) ) - step = array( 'd', ( 0.1, 0.1, 0.01, 0.001 ) ) - gMinuit.mnparm( 0, "a1", vstart[0], step[0], 0, 0, ierflg ) - gMinuit.mnparm( 1, "a2", vstart[1], step[1], 0, 0, ierflg ) - gMinuit.mnparm( 2, "a3", vstart[2], step[2], 0, 0, ierflg ) - gMinuit.mnparm( 3, "a4", vstart[3], step[3], 0, 0, ierflg ) - - # Now ready for minimization step - arglist[0] = 500 - arglist[1] = 1. - gMinuit.mnexcm( "MIGRAD", arglist, 2, ierflg ) - - # Print results - amin, edm, errdef = 0.18, 0.19, 0.20 - nvpar, nparx, icstat = 1983, 1984, 1985 - gMinuit.mnstat( amin, edm, errdef, nvpar, nparx, icstat ) - gMinuit.mnprin( 3, amin ) - - - ##______________________________________________________________________________ - # def fcn( npar, gin, f, par, iflag ): - global ncount - # nbins = 5 - # - # # calculate chisquare - # chisq, delta = 0., 0. - # for i in range(nbins): - # delta = (z[i]-func(x[i],y[i],par))/errorz[i] - # chisq += delta*delta - # - # f[0] = chisq - ncount += 1 # - # def func( x, y, par ): - # value = ( (par[0]*par[0])/(x*x)-1)/ ( par[1]+par[2]*y-par[3]*y*y) - # return value + # gMinuit = ROOT.TMinuit(13) + # gMinuit.SetFCN( self.log_likelihood(part_set = part_set, ) ) + # arglist = arr( 'd', 15*[0.] ) + # gMinuit.mnexcm( "SET ERR", arglist, 1, ierflg ) + # + # ierflg = 0 + # + # arglist = arr( 'd', (6000, 0.3) ) + # + # amp_err = np.sqrt(self.param_val[0]) + # cusp_m_err = np.sqrt(self.param_val[2]) + # + # vstart = arr( 'd', self.param_val ) + # step = arr( 'd', ( amp_err, 0, cusp_m_err, 1.0, 0.1, 1e-7, 0, 0, 0, self.param_val[9]/50, 0, 0, 0, self.param_val[13] )) + # + # for i in range(len(self.param_val)): + # gMinuit.mnparm( i, "a{0}".format(i), vstart[i], step[i], 0, 0, ierflg ) - ##______________________________________________________________________________ - if __name__ == '__main__': - testfit() + # gMinuit.mnexcm( "MIGRAD", arglist, 2, ierflg ) + # + # #__________________________________________________________________________________ + # + # Error = 0; + # z = array( 'f', ( 1., 0.96, 0.89, 0.85, 0.78 ) ) + # errorz = array( 'f', 5*[0.01] ) + # + # x = array( 'f', ( 1.5751, 1.5825, 1.6069, 1.6339, 1.6706 ) ) + # y = array( 'f', ( 1.0642, 0.97685, 1.13168, 1.128654, 1.44016 ) ) + # + # ncount = 0 + # + # ##______________________________________________________________________________ + # def testfit(): + # # + # # gMinuit = TMinuit(5) + # # gMinuit.SetFCN( fcn ) + # # + # # arglist = array( 'd', 10*[0.] ) + # # ierflg = 1982 + # # + # # arglist[0] = 1 + # # gMinuit.mnexcm( "SET ERR", arglist, 1, ierflg ) + # + # # Set starting values and step sizes for parameters + # vstart = array( 'd', ( 3, 1, 0.1, 0.01 ) ) + # step = array( 'd', ( 0.1, 0.1, 0.01, 0.001 ) ) + # gMinuit.mnparm( 0, "a1", vstart[0], step[0], 0, 0, ierflg ) + # gMinuit.mnparm( 1, "a2", vstart[1], step[1], 0, 0, ierflg ) + # gMinuit.mnparm( 2, "a3", vstart[2], step[2], 0, 0, ierflg ) + # gMinuit.mnparm( 3, "a4", vstart[3], step[3], 0, 0, ierflg ) + # + # # Now ready for minimization step + # arglist[0] = 500 + # arglist[1] = 1. + # gMinuit.mnexcm( "MIGRAD", arglist, 2, ierflg ) + # + # # Print results + # amin, edm, errdef = 0.18, 0.19, 0.20 + # nvpar, nparx, icstat = 1983, 1984, 1985 + # gMinuit.mnstat( amin, edm, errdef, nvpar, nparx, icstat ) + # gMinuit.mnprin( 3, amin ) + # + # + # ##______________________________________________________________________________ + # # def fcn( npar, gin, f, par, iflag ): + # global ncount + # # nbins = 5 + # # + # # # calculate chisquare + # # chisq, delta = 0., 0. + # # for i in range(nbins): + # # delta = (z[i]-func(x[i],y[i],par))/errorz[i] + # # chisq += delta*delta + # # + # # f[0] = chisq + # ncount += 1 + # # + # # def func( x, y, par ): + # # value = ( (par[0]*par[0])/(x*x)-1)/ ( par[1]+par[2]*y-par[3]*y*y) + # # return value + # + # + # ##______________________________________________________________________________ + # if __name__ == '__main__': + # testfit() diff --git a/raremodel.pyc b/raremodel.pyc index bad569f..f2816fe 100644 --- a/raremodel.pyc +++ b/raremodel.pyc Binary files differ diff --git a/test.py b/test.py index 31bdf6f..2e62abb 100644 --- a/test.py +++ b/test.py @@ -17,7 +17,7 @@ modl = rm.model() -load_set = False +load_set = True draw = True step_scan = False @@ -26,10 +26,10 @@ modl.mode = mode set_size = 1e5 -nonres_set_size = 44000 +nonres_set_size = 43882 # nonres_set_size = 1000 modl.reco_steps = 10000 -nr_of_toys = 10 +nr_of_toys = 1 x_min = 3150.0 x_max= 3650.0 @@ -64,7 +64,7 @@ if mode == "true_data": - with open(r"./data/set_true_tot_nonres_{0}_range({1}-{2}).pkl".format(int(nonres_set_size), int(x_min), int(x_max)), "rb") as input_file: + with open(r"./data/true_data/true_data_toy_{0}_range({1}-{2}).pkl".format(0, int(x_min), int(x_max)), "rb") as input_file: part_set = pkl.load(input_file) else: @@ -80,4 +80,6 @@ if draw: modl.draw_plots(part_set = part_set, x_min = x_min, x_max = x_max, mode = mode) +modl.fit_pdf_to_data(part_set) + print("Run finished")