diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..56ad73b --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +*checkpoints* +*test* +architectures/__pycache__ +architectures/utils/__pycache__ +architectures/utils/._* +architectures/._* diff --git a/HCAL_bicycleGAN.ipynb b/HCAL_bicycleGAN.ipynb new file mode 100644 index 0000000..aa7d171 --- /dev/null +++ b/HCAL_bicycleGAN.ipynb @@ -0,0 +1,943 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import scipy as sp\n", + "import numpy as np\n", + "import os \n", + "import pickle\n", + "\n", + "import tensorflow as tf\n", + "import matplotlib.pyplot as plt\n", + "from datetime import datetime\n", + "\n", + "from architectures.bicycle_GAN import *" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "task='TRAIN'\n", + "#task='TEST'\n", + "\n", + "# Option to save and restore hyperparameters\n", + "\n", + "PATH='HCAL_bycicleGAN_test35'" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "if task =='TRAIN' and os.path.exists(PATH+'/hyper_parameters.pkl'):\n", + " with open(PATH+'/hyper_parameters.pkl', 'rb') as f: \n", + " hyper_dict = pickle.load(f)\n", + " for key, item in hyper_dict.items():\n", + " print(key+':'+str(item))\n", + " \n", + " reco_path = hyper_dict['reco_path']\n", + " true_path = hyper_dict['true_path']\n", + " n_batches = hyper_dict['n_batches']\n", + " test_size = hyper_dict['test_size']\n", + " LEARNING_RATE = hyper_dict['LEARNING_RATE']\n", + " BETA1 = hyper_dict['BETA1']\n", + " BATCH_SIZE = hyper_dict['BATCH_SIZE']\n", + " EPOCHS = hyper_dict['EPOCHS']\n", + " SAVE_SAMPLE_PERIOD = hyper_dict['SAVE_SAMPLE_PERIOD']\n", + " SEED = hyper_dict['SEED']\n", + " d_sizes = hyper_dict['d_sizes']\n", + " g_sizes_enc = hyper_dict['g_sizes_enc']\n", + " g_sizes_dec = hyper_dict['g_sizes_dec']\n", + " e_sizes = hyper_dict['e_sizes']\n", + " preprocess = hyper_dict['preprocess']\n", + " cost_type = hyper_dict['cost_type']\n", + " validating_size=hyper_dict['validating_size']\n", + " cycl_weight=hyper_dict['cycl_weight']\n", + " latent_weight=hyper_dict['latent_weight']\n", + " kl_weight=hyper_dict['kl_weight']\n", + " discr_steps=hyper_dict['discr_steps']\n", + " gen_steps=hyper_dict['gen_steps']\n", + " vae_steps=hyper_dict['vae_steps']\n", + " \n", + "elif task=='TRAIN' and not os.path.exists(PATH+'/hyper_parameters.pkl'):\n", + " \n", + " reco_path = '/disk/lhcb_data/davide/HCAL_project/piplus_cells_inout/piplus/reco/'\n", + " true_path = '/disk/lhcb_data/davide/HCAL_project/piplus_cells_inout/piplus/true/'\n", + " #reco_path = '/disk/lhcb_data/davide/HCAL_project_full_event/reco/'\n", + " #true_path = '/disk/lhcb_data/davide/HCAL_project_full_event/true/'\n", + " n_batches = 1\n", + " test_size = 5000\n", + " validating_size=1000\n", + " \n", + " LEARNING_RATE = 2e-4\n", + " BETA1 = 0.5\n", + " BATCH_SIZE = 16\n", + " EPOCHS = 4\n", + " SAVE_SAMPLE_PERIOD = 200\n", + " SEED = 1\n", + " preprocess=False\n", + " cost_type='GAN'\n", + " \n", + " latent_weight=100\n", + " cycl_weight=10\n", + " kl_weight=1\n", + " discr_steps=1\n", + " gen_steps=4\n", + " vae_steps=4\n", + " \n", + " latent_dims=16\n", + " ndf = 16\n", + " ngf = 16\n", + " nef = 16\n", + " s = 2\n", + " f = 4\n", + " d = 0.8\n", + " \n", + " stddev_d=0.02\n", + " stddev_g=0.02\n", + " stddev_e=0.02\n", + "\n", + "\n", + " g_sizes_enc={\n", + " 'latent_dims':latent_dims,\n", + " \n", + " 'conv_layers': [\n", + " (ngf/2, f, s, False, 1, lrelu, tf.truncated_normal_initializer(stddev_g)), #(batch, 52, 64, 1) => (batch, 26, 32, ngf)\n", + " (ngf, f, s, 'bn', d, lrelu, tf.truncated_normal_initializer(stddev_g)),#(batch, 26, 32, ngf) => (batch, 13, 16, ngf*2)\n", + " (ngf*2, f, s, 'bn', 1, lrelu, tf.truncated_normal_initializer(stddev_g)),#(batch, 13, 16, ngf*4) => (batch, 7, 8, ngf*4)\n", + " (ngf*4, f, s, 'bn', 1, lrelu, tf.truncated_normal_initializer(stddev_g)),#(batch, 7, 8, ngf*4) => (batch, 4, 4, ngf*4)\n", + " (ngf*8, f, s, 'bn', d, lrelu, tf.truncated_normal_initializer(stddev_g)),#(batch, 4, 4, ngf*4) => (batch, 2, 2, ngf*4)\n", + " #(ngf*8, f, s, 'bn', 1, lrelu, tf.truncated_normal_initializer(stddev_g)),#(batch, 2, 2, ngf*4) => (batch, 1, 1, ngf*4)\n", + " \n", + " ],\n", + " }\n", + " \n", + " g_sizes_dec={\n", + " \n", + " \n", + " 'deconv_layers': [\n", + " \n", + " (ngf*4, f, s, 'bn', 1, tf.nn.softplus, tf.truncated_normal_initializer(stddev_g)),#(batch, 1, 1, ngf*4) => (batch, 2, 2, ngf*4*2)\n", + " (ngf*2, f, s, 'bn', d, tf.nn.softplus, tf.truncated_normal_initializer(stddev_g)),#(batch, 2, 2, ngf*4*2) => (batch, 4, 4, ngf*4*2)\n", + " (ngf, f, s, 'bn', 1, tf.nn.softplus, tf.truncated_normal_initializer(stddev_g)),#(batch, 4, 4, ngf*4*2) => (batch, 7, 8, ngf*4*2)\n", + " (ngf/2, f, s, 'bn', 1, tf.nn.softplus, tf.truncated_normal_initializer(stddev_g)),#(batch, 7, 8, ngf*4*2) => (batch, 13, 16, ngf*2*2)\n", + " (1, f, s, False, d, tf.nn.softplus, tf.truncated_normal_initializer(stddev_g)),#(batch, 26, 32, ngf*2) => (batch, 52, 64, 1)\n", + " \n", + " ], \n", + " }\n", + " \n", + " \n", + " d_sizes={\n", + " \n", + " 'conv_layers': [\n", + " (ndf/2, f, s, False, 1, lrelu, tf.truncated_normal_initializer(stddev_d)), #(batch, 52, 64, 2) => (batch, 26, 32, ndf)\n", + " (ndf, f, s, 'bn', d, lrelu, tf.truncated_normal_initializer(stddev_d)), #(batch, 26, 32, ndf) => (batch, 13, 16, ndf*2)\n", + " (ndf*2, f, s, 'bn', 1, lrelu, tf.truncated_normal_initializer(stddev_d)), #(batch, 13, 16, ndf*2) => (batch, 7, 8, ndf*4)\n", + " (ndf*4, f, s, 'bn', d, lrelu, tf.truncated_normal_initializer(stddev_d)), #(batch, 7, 8, ndf*4) => (batch, 7, 8, ndf*8)\n", + " (ndf*8, f, 1, 'bn', 1, lrelu, tf.truncated_normal_initializer(stddev_d)), #(batch, 7, 8, ndf*8) => (batch, 7, 8, ndf*8)\n", + " \n", + " ],\n", + " \n", + " \n", + " 'dense_layers': [\n", + " (ndf*32, 'bn', d, lrelu, tf.truncated_normal_initializer(stddev_d)),\n", + " #(ndf*4, 'bn', d, lrelu, tf.truncated_normal_initializer(stddev_d)), \n", + " (ndf, False, d, lrelu, tf.truncated_normal_initializer(stddev_d))],\n", + " \n", + " 'readout_layer_w_init':tf.truncated_normal_initializer(stddev_d),\n", + " }\n", + " \n", + "\n", + " \n", + " e_sizes={\n", + " 'latent_dims':latent_dims,\n", + " \n", + " #'conv_layer_0':[\n", + " # (nef, f, s, False, 1,lrelu, tf.truncated_normal_initializer(stddev_e)),\n", + " # ],\n", + " 'conv_layers':[\n", + " \n", + " (nef/2, f, s, False, 1, lrelu, tf.random_normal_initializer(stddev_e)),\n", + " (nef, f, s, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)),\n", + " (nef*2, f, s, 'bn', d, lrelu, tf.random_normal_initializer(stddev_e)),\n", + " (nef*4, f, s, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)),\n", + " (nef*8, f, s, 'bn', d, lrelu, tf.random_normal_initializer(stddev_e)),\n", + " ],\n", + "\n", + " \n", + " #'convblock_layer_0':[\n", + " # (nef*2, 1, s, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 26, 32, ndf) => (batch, 13, 16, ndf*2)\n", + " # (nef*2, f, 1, 'bn', d, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 13, 16, ndf*2) => (batch, 13, 16, ndf*2)\n", + " # (nef*2, 1, 1, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 13, 16, ndf*2) => (batch, 13, 16, ndf*2)\n", + " # \n", + " # ],\n", + " #'convblock_shortcut_layer_0':[\n", + " # (nef*2, 1, s, False, 1, tf.random_normal_initializer(stddev_e))\n", + " # ],\n", + " #\n", + " #'convblock_layer_1':[\n", + " # (nef*4, 1, s, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 26, 32, ndf) => (batch, 13, 16, ndf*2)\n", + " # (nef*4, f, 1, 'bn', d, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 13, 16, ndf*2) => (batch, 13, 16, ndf*2)\n", + " # (nef*4, 1, 1, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 13, 16, ndf*2) => (batch, 13, 16, ndf*2)\n", + " # \n", + " # ],\n", + " #'convblock_shortcut_layer_1':[\n", + " # (nef*4, 1, s, False, 1, tf.random_normal_initializer(stddev_e))\n", + " # ],\n", + " #'convblock_layer_2':[\n", + " # (nef*4, 1, s, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 26, 32, ndf) => (batch, 13, 16, ndf*2)\n", + " # (nef*4, f, 1, 'bn', d, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 13, 16, ndf*2) => (batch, 7, 8, ndf*4)\n", + " # (nef*4, 1, 1, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 7, 8, ndf*4) => (batch, 7, 8, ndf*8)\n", + " # \n", + " # ],\n", + " #\n", + " #'convblock_shortcut_layer_2':[\n", + " # (nef*4, 1, s, False, 1, tf.random_normal_initializer(stddev_e))\n", + " # ],\n", + " \n", + " # 'convblock_layer_3':[\n", + " # (nef*8, 1, s, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 26, 32, ndf) => (batch, 13, 16, ndf*2)\n", + " # (nef*8, f, 1, 'bn', 1, lrelu, tf.random_normal_initializer(stddev_e)), #(batch, 13, 16, ndf*2) => (batch, 7, 8, ndf*4)\n", + " # (nef*8, 1, 1, 'bn', 1, tf.nn.relu,tf.random_normal_initializer(stddev_e)), #(batch, 7, 8, ndf*4) => (batch, 7, 8, ndf*8)\n", + " # \n", + " # ],\n", + " # 'convblock_shortcut_layer_3':[\n", + " # (nef*8, 1, s, False, 1, tf.random_normal_initializer(stddev_e))\n", + " # ],\n", + "\n", + " \n", + " 'dense_layers':[ \n", + " (nef*16, 'bn', d, lrelu, tf.random_normal_initializer(stddev_e)),\n", + " (nef*8, 'bn', d, lrelu, tf.random_normal_initializer(stddev_e)),\n", + " (nef, False, d, lrelu, tf.random_normal_initializer(stddev_e)),\n", + " \n", + " ],\n", + " 'readout_layer_w_init':tf.random_normal_initializer(stddev_e)\n", + " \n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "if task == 'TEST' and os.path.exists(PATH+'/hyper_parameters.pkl'):\n", + " with open(PATH+'/hyper_parameters.pkl', 'rb') as f: \n", + " hyper_dict = pickle.load(f)\n", + " for key, item in hyper_dict.items():\n", + " print(key+':'+str(item))\n", + " \n", + " reco_path = hyper_dict['reco_path']\n", + " true_path = hyper_dict['true_path']\n", + " #true_path_K = hyper_dict['true_path_K']\n", + " n_batches = hyper_dict['n_batches']\n", + " test_size = hyper_dict['test_size']\n", + " LEARNING_RATE = hyper_dict['LEARNING_RATE']\n", + " BETA1 = hyper_dict['BETA1']\n", + " BATCH_SIZE = hyper_dict['BATCH_SIZE']\n", + " EPOCHS = hyper_dict['EPOCHS']\n", + " SAVE_SAMPLE_PERIOD = hyper_dict['SAVE_SAMPLE_PERIOD']\n", + " SEED = hyper_dict['SEED']\n", + " d_sizes = hyper_dict['d_sizes']\n", + " g_sizes_enc = hyper_dict['g_sizes_enc']\n", + " g_sizes_dec = hyper_dict['g_sizes_dec']\n", + " e_sizes = hyper_dict['e_sizes']\n", + " preprocess = hyper_dict['preprocess']\n", + " cost_type = hyper_dict['cost_type']\n", + " validating_size=hyper_dict['validating_size']\n", + " cycl_weight=hyper_dict['cycl_weight']\n", + " latent_weight=hyper_dict['latent_weight']\n", + " kl_weight=hyper_dict['kl_weight']\n", + " discr_steps=hyper_dict['discr_steps']\n", + " gen_steps=hyper_dict['gen_steps']\n", + " vae_steps=hyper_dict['vae_steps']" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "dim=1\n", + "select=False\n", + "if preprocess=='normalise':\n", + " train_true, test_true, min_true, max_true, train_reco, test_reco, min_reco, max_reco = load_data(true_path, reco_path, n_batches, select=select, n_cells=dim*dim, energy_fraction=1, preprocess=preprocess, test_size=test_size)\n", + "else:\n", + " train_true, test_true, train_reco, test_reco = load_data(true_path, reco_path, n_batches, select=select, n_cells=None, energy_fraction=1, preprocess=preprocess, test_size=test_size)\n", + " \n", + "train_true, train_reco = delete_undetected_events_double(train_true, train_reco)\n", + "test_true, test_reco = delete_undetected_events_double(test_true, test_reco)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfsAAACtCAYAAAC3K9aMAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHJJJREFUeJzt3XncHVWd5/HPl4QQZV9kDRBkUUFbULRBXBgBAQWlp8FGESJjQ7fSioMLi+IyozYoA4wzLTYtTliNCAiIKLKLjaaNI6jsO4kJm2SBsIb8+o9zLqnn5q7P3ev5vl+v+8qtqlNVv1t57jn3nFN1jiICMzMzK69VBh2AmZmZ9ZYLezMzs5JzYW9mZlZyLuzNzMxKzoW9mZlZybmwNzMzKzkX9mZWl6TbJO3eg+NOlxSSJo9z/xMkfa/bcZmVlQt7szokPShpz6p1H5X0q6p1H5Y0R9LTkhZI+pmkt9fYLyR9sGr97pLmtRjPDfkYb6xaf2lev3th3XaSfiTpCUmLJf1B0jGSJrX48QGIiB0i4oZ29um2WtcoIr4REX8/qJjMRo0Le7MOSDoGOB34BrARsAXwHeADVUlnAE/mfztxN3BY4fzrA7sAjxfWbQ3MBuYCb4iItYGDgJ2BNTs8v5mNIBf2ZuMkaW3gfwBHRcQlEbE0Il6MiJ9ExOcK6bYE3gUcCewtaaMOTns+8HeFGvqHgB8DLxTSfBW4OSKOiYgFABFxV0R8OCIW1fgcG0i6QtIiSU9KuknSKnnby60bkr6SWwvOk/SUpD/mFoTjJT0maa6k9xSOO6ZlJO9/Xq0PJelwSXfk494v6R/y+tWBnwGb5paTpyVtWn0sSe/PXQ6LcgvI66ri+Gxu3Vgs6YeSprZ32c1Gmwt7s/HbFZhKKmwbOQyYExEXA3cAh3RwzvnA7UClUD0MOKcqzZ7ARW0c8zPAPOBVpNaJE4B642jvD5wLrAv8HriKlI9sRvrh869tnLfoMWA/YC3gcOA0SW+KiKXAvsD8iFgjv+YXd5S0HfAD4NP5M1wJ/ETSlEKyDwL7AFsBfwV8dJxxmo0kF/ZmjV2aa4uLJC0iNdFXrA88ERHLmhzjMOCC/P4COm/KPwc4TNJrgHUi4tdV29cHFrRxvBeBTYAtc8vETVF/0oybIuKq/Jl/RCpcT4qIF4FZwHRJ67T1aYCI+GlE3BfJjcAvgHe0uPvfAT+NiKtzHKcArwDeVkjz7YiYHxFPAj8Bdmw3RrNR5sLerLEDImKdygv4RGHbX4ANGt1RLmk3Um1yVl51AfAGSZ0UNpcA7wY+SaplV/sLqfBu1beAe4Ff5Cb04xqkfbTw/lnSj52XCssAa7RxbgAk7SvpN7kbYRHwXmCDFnffFHioshARy0n3K2xWSPNI4f0z44nRbJS5sDcbv18DzwEHNEgzAxBwi6RHSDfOQeEmu3ZFxDOkfuyPU7uwvwb42zaO91REfCYiXk1qpj9G0h7jja9gKfDKwvLGtRJJWg24mFQj3yj/qLqSdN2gfpdCxXxgy8LxBGwO/Hl8YZuVjwt7s3GKiMXAl4B/kXSApFdKWjXXUr+ZbwL7IOnGvB0Lr08ChxRbBCRNrXqpximLTgDeFREP1tj2ZeBtkr4laeN8/G3yjXUrNbFL2i9vF7AEeCm/OnULcHC+JjsDB9ZJNwVYjfREwTJJ+7LingRIrQnr5xsia7kQeJ+kPSStSroH4Xng5i58BrNScGFv1oGIOBU4BvgiqbCaC/wTcCmpxv8scE5EPFJ5AWcBk0g3jEFqbn626rV1k/POj4hf1dl2H+nmwenAbZIWk2rOc4CnauyyLak14GlSa8V3uvRs/Ymkz7GQ9ITABbUSRcRTwKdIhfZC4MPA5YXtd5JuwLs/3zuxadX+dwEfAf4P8ASpdWL/iCg+oWA2oan+fThmZmZWBq7Zm5mZlZwLezMzs5JzYW9mZlZyLuzNzMxKbuCFfWG866clLZf0bGG5k2FFWz3/eZJeqIrjdw3S715It1RptrHivpvW27dwjAslPSJpiaQ7JRUnNlld0sWSHsrH3qVq31UknSZpodKMZl8rbNuzKpZKfO9rEs+G+XjXVK3/iKS7tGIc9LrHkTQrn+s9Veu/m9cf3CQGSXpA0odrbDtWVTPNmVlzeV6ASp76iKSZkvo6oFA+Z3Uee2uD9O9oksdu0eR8k6rSPy3pJUmnFdKsnvOmvyjNl3B9Ydtncl60RNKfJf0v5cdkJW2c87oFeb+bJL2lQSxfy/F/omr9Z/P6L7Zw/a6V9KUa6/82x9daOR4RQ/MCHgT2bJJmcpfPeR7wlXHuu026hG3vtwMwpfD+ceD1efmVpMeQdiPNkrZL1b5HA38ijZC2BXAP8NE659mH9CjTak3iORf4JXBNYd1WpGeV9yANbvJfSYOkrFPnGLOAu4DzC+tWI41c9gBwcAvX5avAz2us/xNwxKD/Pv3ya9RexTyVNKjRrcDX+xzDTOBr49x3OmlQpXHn+6T5Fp4B3lZYNyvn/RuQHoN9c2Hb1sDa+f0GwI3Ap/LyNqQ5GDbO+32cNK/DK+uc+2s5X5xdtf7WvP6LLcR/KHBPjfWXAie3eh0GXrNvJv8y+qGkH0h6CvhIro1/pZBmT0kPFpanSfqxpMfzL7SjBhB6XRFxW6x4BjhIhemr87ZnIuLbEfHvwPIau88AvhkRCyLiYdL0qh+tc6oZwKyIeL5eLEpzoG/Bys9AbwE8GhHXRnJJjmerBh/tEmBPSZVpVPcnPbf9l6pz/kNuMXhS0k8lVYY1PQfYQ9ImhbRvIn35LmxwXjNrItIYD1dRmBdA0mqSTpH0sKRHc233FYXtH5B0S67l3idpn7x+U0mX5+/wvZKO6P8natlBwJ8j4mYASTuQJlf6x4h4IiJeioiXW3MjzdGwuLD/clIhT0TcGxGnRxoz46WIOANYnTRWRT2/BtZTmssCpaGyVyFNJPUypZkbb1UaS+JXkl6fN10CbCzpbYW065OGlK6eBKuuoS/ss78hFUZrAz9slFBp6s8rgN+SBivZC/icujP8Z0skfVlSw1nHJJ0l6VngNuA+4OoWD7896Vdhxa2k1oHq469NmlP97AYxrAp8mzQITLWbgbmS9s7NYh8kFdp3NIhtKSkzqYyUttKMbLk5/9OkHwIbkf7gz4OXB4P5DWNnhTsUuKzqy2dmbZI0jVTI3VtYfTKwHekHwDakPPNLOf1bSd/fzwHrAO8ktRRAGuRoHmleggOBb/Q5j/2CpEtbTD6DsfngXwP3A19X6gr9g6QxQ15LOjRXLh8n5a9n1oljZ1Jl7f4mMZzLiiGya+WLbwH+Dfh70kRW3wcukzQl0syPFzF2iO2DgT9ExG1NzrtCP5tzWmiueJCqZnxSM8h1VevGNL2TpvR8ML/fDbi/Kv2JwL/VOed5pPHNFxVeZ7UY77ia8Qv7TyLNc34CMKnG9icoNOMDq5JaAqYX1r0BeK7GvkcAdzY5//HAafn9P1Joxi+sewZYRhp5ba8Gx5pFGkVuT+B6UvPXAtJQqHPIzfh52yFVn+lF0pjokP7Y/5DfTyZ1A+w76L9Nv/waxVfOU5/O398AriV3xZEKqaXA1oX0uwIP5Pf/Wskfqo65OWk45TUL6/4ZmFknhpk18tizW4x/Oh0045NaTF8Ctiis+1I+5ok5f3p3vg7b1dj/NcD/BDassW1tUmXtcw3O/7X8+bfK/xersuJH0ixyMz6poP9y1b73Abvl97uTunVXy8uzgU+2cy1GpWY/t420WwJbaOy0pJ+nziQc2UlRmNksIj7WUbQtitQMdCPpl3XTc0aavvN5Uh9UxVrUHgK1+tfsGJK2JBWsX66zfX9SH/rbSF+IvYFzJW3fJMzrSF+w44BLYuUhS7cEvlv4v3mc9GNiWt5+IbBNbup6D+lL+Ysm5zSz+g6IiDVJBcZrWTGb4KtI9wj9rvB9/HleD6lQv6/G8TYFnow0zHHFQ4ydZbDaKVV5bKfTPLfqMOCGSF2eFc+Sfnx8IyJeiIjrSPcs7VW9c6ShmO8C/m9xvaTVgZ8Cv4yIbzULIiIeAB4GvgHcFhHzq5JsCRxbVW5twopreiOwGNhf0nbATqTWlZbVnZpzyFSP6dtoNq25pJsZXtfzqLpnMk3GQi+4HXgj8Ie8/EbSr8uXSdoa2IXU1FPPrqQv7d1Kc668ApgqaV5ETMvHvS4ibsnpb5Z0C+lX8O31DhoRyyX9gPQDa9caSeaSfglfXGf/Jbl57jDSH/v5sWIKVTMbp4i4UdJM0uyCB5BaDp8FdoiIWjMEzqV2vjSf1Ae9ZqHA34Ihm2VQKWM7lFRpKarkna2OFT8mf1aa4OoyUtP9J+rtVMM5pO6AQ2tsmwt8NSJOrrVjRISkSlfArcCVEfFEG+cemZp9tVtIs1ytm2/m+lRh26+BF/LjE1Nzf/MbJL15MKGOlW9sOVDp0Y/JkvYjTUd6XSHNavkPCmBK4T3kPjSlR0A2J/V/z6w6zQxSQT2vQSiXkpqWKjOxfZ3UNFS5Tr8F/kvlJpHcf7cLK74ojZxC6o6ZXWPbd4EvFm5WWVdS9XSsZ5P67Rvec2BmbTsd2EvSjhGxnNR8fJqkDQEkbSZp75z2LOBwpdkEV8nbXhsRc0n39PxzzmP/itQyef4APk8j7wA2JE0CVXQ9qXvw2JwHvxN4O7kFUdIRheuxA3AsqfsDSVNIN8wtBg6P3KbeogtIrZW1KjpnAkdJeouSNSTtn1sQKs4mPWH13xhHvjiqhf1M0o1iD5GanWZVNkTEMtJdim8l9ZE8Qep7Wqv6IAUnaOwzmY90Epykr0r6cYMkR5N+Hf+FVMh+PCKuKmx/iPSLe31S882zylOVkm6ou5b0+W8BfhQRMwvnrvyaXemPQdLHlMcQiIjnYuxMbEuAFyLi0bz9KuCbwOX5RpUfACdGxC+bff5Id7heV2fbD0hNYpdIWpI/Q3Xz2dWkfvy7IuKPzc5nZq2JiMdJFYYT86pjSTfs/SZ/H68h9VMTEf8BHA6cRircbiQ1NwN8iNSfPh/4Mam/udFNxp+vymPbqpVWk3SipJ80STYDuCjSDW4vy12L7ydVJhYDZ5DuI7onJ3kn8CdJS0k3e1/Oiuv1DtJNjvsCiwufp1Yr5hiRnrS6JiKeq7FtNukxvjNIj0vfTZrJsZjmPuA/gKmkLoS2eNY7MzOzkhvVmr2ZmZm1aCCFvaR9lAZVuVfScYOIoRlJM7TykIsNh3k0M+ulUcg7WyXpkDp5bOvPjlvL+t6MrzTozd2kftp5pBvBPhQRde/wNjOb6Jx3WicGUbN/K3BvRNyfb5SYRbpRwszM6nPeaeM2iOfsN2PsIDnzSMMX1jVFq8VUVm+UxEbccyzlhXheg47DbIj1Nu+s/vY1a/RtlL6bx+pUv47d7UbyRscubHsuWss7B1HY1wpqpcsk6UjgSICpvJK/7t+wyzYAs+PaQYdgNux6mndq8tjiIJYtG3f6bh6rU/06djeP2+zYxW2/WXYVrRhEM/480jCMFdNIz2qOERFnRsTOEbHzqqzWt+DMzIaU804bt0EU9r8FtpW0VR6N6GDSoAVmZlaf804bt74340fEMkn/RJoKdRLw/Whnmj4zswmo13lnp83QnTRpt9PM326XQC91u+m+3rG78ZkGMhFORFwJXDmIc5uZjSrnnTZeHkHPzMys5FzYm5mZldyozGdvZmZDrJ3+62Z90NXH6qRvvJf96v3S8DO0+Hy/a/ZmZmYl58LezMys5NyMb2ZmbRtU03qzEfFGcfS9ZsfuRhyu2ZuZmZWcC3szM7OSc2FvZmZWcu6zNzOzvuqkD7rRDHCtHKudoXf7NRxuN9PW45q9mZlZybmwNzMzKzkX9mZmZiXnPnszM1tJu0PatqPdfvdGsfSyX72Xz9n3m2v2ZmZmJefC3szMrORc2JuZmZWc++zNzGwl7Tyv3iz9pHXXBeClhQtrLvfieffKOSpaPVen562nk/5/j41vZmZmTblmb2ZmPVFdg6+oXm6WvlHa6mNWr69Y/q6dAFjlxt+P63zNYhjvMfrFNXszM7OSc83ezGyCamec+GqtpG+3tttJjb56uZJu4b6vAWCd25cAsLxwnGbna1bz76Q23844Bg3HHWjxv801ezMzs5JzzX5EXDX/FgD23nTHAUdiZtZf4+kbr6Sr1Oh/duUFALz5Kx8HYKMf3Vn3PJV96z09MIpc2I8IF/Jm1ql2HuHq5HGvZk3trRaetdK1W/Auv+V2APa7e18A1rvrOQCW7rYtq//7PQ3jbLa+qNn16uQxvobHjtaO4WZ8MzOzknPN3szMuqpZTbjdpvjx7Fuxyo7bA7D8bxak5YXp0bupACVonm+Va/ZmZmYl55q9mdkE0as++maa9bN34wa4esdYZfFSAJZ10ErQixv0OpnWdzx6VrOXtLmk6yXdIek2SUfn9etJulrSPfnf2sMdmZlNQM47rRd62Yy/DPhMRLwO2AU4StL2wHHAtRGxLXBtXjYzs6R0eedLCxfy0sKFTFp33brD2RZV0hVfrZ6jWixaQixaMq64K+eud+xiumHXs8I+IhZExP/P758C7gA2Az4AnJ2TnQ0c0KsYzMxGjfNO64W+9NlLmg7sBMwGNoqIBZD+qCVt2I8YzMxGTcd5p1ofErebffQV9QapqdZs8JpGNedm+9SbIKfS0tBKXM3UStfuPRDjHgNhWIbLlbQGcDHw6YhouS1F0pGS5kia8yLP9y5AM7Mh1JW8M5x3WtLTmr2kVUl/rOdHxCV59aOSNsm/TDcBHqu1b0ScCZwJsJbWa3GMIDOz0de1vHOVweSdnd5938rd7+3WymvV/CfC8/UVvbwbX8BZwB0RcWph0+XAjPx+BnBZr2IwMxs1zjutF3pZs98NOBT4o6Rb8roTgJOACyV9DHgYOKiHMZiZjZru5Z3Rm774TnVjytjx1sq70WrQSrpmz8138v8ynrHxe1bYR8SvANXZvEevzmtmNsqcd1oveAQ9MzPrqk77wlu9i388WjlWGfvyPTa+mZlZydWt2Uu6EvhERDzYv3DMzEbbUOWdbTxnP5E1eya/otkd/o0M+to3qtnPBH4h6Qv5MRAzM2tuJs47bcjUrdlHxIWSfgp8CZgj6VxgeWH7qfX2NTObqJx31q8p92JWu17oZtzDotkNei8CS4HVgDUp/MGamVldI5d3dvPRsE4Ly1rpu13gFgfV6cajgMOuUZ/9PsCppIEc3hQRz/QtKjOzEeW804ZRo5r9F4CDIuK2fgVjZlYCzjuzZhPajKfm3I1jTESN+uzf0c9AzMzKwHmnDSMPqmNmVlZtDJfby+Fdu6nbA/b0a9/q61mt2fVvlLYVHlTHzMys5FyzNzOznmh32NtGNedWB7ppppvT5o4S1+zNzMxKzjV7MzNbSS/76OvVoOvVnIvPxPdSL8/RtSltx8k1ezMzs5Jzzd7MzPqq3oQz9Z7L76Sfvd104zlGv1oeOuGavZmZWcm5Zm9mZk21+5w4NK9Fd6M23Oo4/I3u8G81jn7W3ovXsxtjHrhmb2ZmVnKu2ZuZWU80eza+UR94rfWNjKfPfpCj8fWba/ZmZmYl55q9mVlZaWx/b7+f9e60z76Xd9B3Y59G27s5t0DDcfNbPKxr9mZmZiXnmr2ZmfVEpzXzYt9+p8fo9T7DzjV7MzOzknPN3sysrNqYz76b6t2F36pad+tXtxI0G32vGzp5WqCX133MsaO1fVzYm5lZV9UrkFvVymNy/SjcyzTVrZvxzczMSs41ezMzmzAadQ2Mt8Y+nhsIu/loXit6XrOXNEnS7yVdkZe3kjRb0j2SfihpSq9jMDMbNc47rZv60Yx/NHBHYflk4LSI2BZYCHysDzGYmY2akc87X1q4sO3BbIr9/NXL3dSN41biG4U+/J4W9pKmAe8DvpeXBbwbuCgnORs4oJcxmJmNGued1m297rM/Hfg8sGZeXh9YFBGVzol5wGY9jsHMbNT0Je/s1lC67Zyn0bn6UUPu5jk6GS63349E9qxmL2k/4LGI+F1xdY2kNZ8SlHSkpDmS5rzI8z2J0cxs2DjvtF7oZc1+N+D9kt4LTAXWIv1aXUfS5PwLdRowv9bOEXEmcCbAWlqvxWEDzMxGnvNO67qe1ewj4viImBYR04GDgesi4hDgeuDAnGwGcFmvYjAzGzXOO60XBjGozrHAMZLuJfVDnTWAGMzMRk3X885YtuzlVzOaPHnMq520xfN0+1yD1M0Yqz9ztz9/X65iRNwA3JDf3w+8tR/nNTMbZc47rVs8XK6ZmVnJubA3MzMrueHtDDEzs6FR3dfe6DnyQT6z386Y852OT98ofbNjNeuL7/Y1dM3ezMys5FzYm5mZlZwLezMzs5Jzn72ZmbWtl33hnWinr7zf49MXlWZsfDMzMxsOLuzNzMxKzs34ZmYTVDtN2u00zXfaRD2oY5eZa/ZmZmYl58LezMys5FzYm5mZlZz77M3MJqhu9l/3qv/fusM1ezMzs5JzYW9mZlZyLuzNzMxKThEx6BiakvQ4sBR4YtCx1LABjqsd9eLaMiJe1e9gzMrMeee4jFpcLeWdI1HYA0iaExE7DzqOao6rPcMal1lZDet3znG1p9O43IxvZmZWci7szczMSm6UCvszBx1AHY6rPcMal1lZDet3znG1p6O4RqbP3szMzMZnlGr2ZmZmNg5DX9hL2kfSXZLulXTcAOPYXNL1ku6QdJuko/P69SRdLeme/O+6A4pvkqTfS7oiL28laXaO64eSpgwgpnUkXSTpznzddh2W62VWds47W45vQuSdQ13YS5oE/AuwL7A98CFJ2w8onGXAZyLidcAuwFE5luOAayNiW+DavDwIRwN3FJZPBk7LcS0EPjaAmP438POIeC3wxhzfsFwvs9Jy3tmWiZF3RsTQvoBdgasKy8cDxw86rhzLZcBewF3AJnndJsBdA4hlWv7PfzdwBSDS4AuTa13HPsW0FvAA+b6QwvqBXy+//Cr7y3lny7FMmLxzqGv2wGbA3MLyvLxuoCRNB3YCZgMbRcQCgPzvhgMI6XTg88DyvLw+sCgiKlNJDeK6vRp4HPh/uYnse5JWZziul1nZOe9szYTJO4e9sFeNdQN9fEDSGsDFwKcjYskgY8nx7Ac8FhG/K66ukbTf120y8CbgjIjYiTRkp5vszfpjGPKAMZx3tqwneeewF/bzgM0Ly9OA+QOKBUmrkv5Yz4+IS/LqRyVtkrdvAjzW57B2A94v6UFgFqk56nRgHUmVSaMHcd3mAfMiYnZevoj0Bzzo62U2ETjvbG5C5Z3DXtj/Ftg23x05BTgYuHwQgUgScBZwR0ScWth0OTAjv59B6o/qm4g4PiKmRcR00vW5LiIOAa4HDhxgXI8AcyW9Jq/aA7idAV8vswnCeWcTEy3vHPpBdSS9l/RraxLw/Yj4+oDieDtwE/BHVvTvnEDqe7oQ2AJ4GDgoIp4cUIy7A5+NiP0kvZr0a3U94PfARyLi+T7HsyPwPWAKcD9wOOkH5lBcL7Myc97ZVoy7U/K8c+gLezMzM+vMsDfjm5mZWYdc2JuZmZWcC3szM7OSc2FvZmZWci7szczMSs6F/RDJs0M9IGm9vLxuXt5y0LGZmQ0j55utcWE/RCJiLnAGcFJedRJwZkQ8NLiozMyGl/PN1vg5+yGTh5X8HfB94Ahgp4h4YbBRmZkNL+ebzU1unsT6KSJelPQ54OfAe/wHa2bWmPPN5tyMP5z2BRYArx90IGZmI8L5ZgMu7IdMHhN5L2AX4L9XZjkyM7PanG8258J+iOTZoc4gzff8MPAt4JTBRmVmNrycb7bGhf1wOQJ4OCKuzsvfAV4r6V0DjMnMbJg532yB78Y3MzMrOdfszczMSs6FvZmZWcm5sDczMys5F/ZmZmYl58LezMys5FzYm5mZlZwLezMzs5JzYW9mZlZy/wnwUSqKXTE11AAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "if preprocess != False:\n", + " draw_one_sample(train_true, train_reco, preprocess,\n", + " min_true=min_true, max_true=max_true, \n", + " min_reco=min_reco, max_reco=max_reco,\n", + " save=False, PATH=PATH\n", + " )\n", + "else:\n", + " draw_one_sample(train_true,train_reco)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def HCAL():\n", + "\n", + " \n", + " tf.reset_default_graph()\n", + " \n", + " _, n_H_A, n_W_A ,n_C = train_true.shape\n", + " _, n_H_B, n_W_B ,n_C = train_reco.shape\n", + " \n", + " gan = bicycle_GAN(n_H_A, n_W_A, n_H_B, n_W_B, n_C,\n", + " min_true=min_true, max_true=max_true, \n", + " min_reco=min_reco, max_reco=max_reco,\n", + " d_sizes=d_sizes, g_sizes_enc=g_sizes_enc, g_sizes_dec=g_sizes_dec, e_sizes=e_sizes,\n", + " lr=LEARNING_RATE, beta1=BETA1,\n", + " preprocess=preprocess, cost_type=cost_type,\n", + " cycl_weight=cycl_weight, latent_weight=latent_weight, kl_weight=kl_weight,\n", + " discr_steps=discr_steps, gen_steps=gen_steps, vae_steps=vae_steps,\n", + " batch_size=BATCH_SIZE, epochs=EPOCHS,\n", + " save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed= SEED)\n", + " \n", + " vars_to_train= tf.trainable_variables()\n", + " \n", + " if task == 'TRAIN':\n", + " \n", + " init_op = tf.global_variables_initializer()\n", + " \n", + " \n", + " if task == 'TEST':\n", + " \n", + " vars_all = tf.global_variables()\n", + " vars_to_init = list(set(vars_all)-set(vars_to_train))\n", + " init_op = tf.variables_initializer(vars_to_init)\n", + " \n", + " saver=tf.train.Saver()\n", + " # Add ops to save and restore all the variables.\n", + " \n", + " gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.33)\n", + " \n", + " with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n", + " \n", + " sess.run(init_op)\n", + " \n", + " if task=='TRAIN':\n", + " \n", + " if os.path.exists(PATH+'/'+PATH+'bicycle.ckpt.index'):\n", + " saver.restore(sess,PATH+'/'+PATH+'bicycle.ckpt')\n", + " print('Model restored.')\n", + " \n", + " gan.set_session(sess)\n", + " gan.fit(train_true,train_reco, validating_size)\n", + " \n", + " save_all = saver.save(sess, PATH+'/'+PATH+'bicycle.ckpt')\n", + " print(\"Model saved in path: %s\" % save_all)\n", + " \n", + " \n", + " if task=='TEST':\n", + " \n", + " print('\\n Evaluate model on test set...')\n", + " \n", + " if os.path.exists(PATH+'/'+PATH+'bicycle.ckpt.index'):\n", + " saver.restore(sess, PATH+'/'+PATH+'bicycle.ckpt')\n", + " \n", + " print('Model restored.')\n", + " \n", + " gan.set_session(sess)\n", + " \n", + " #test_reco_NN=gan.get_samples_A_to_B(test_true.reshape(test_true.shape[0],n_H_A,n_W_A,n_C))\n", + " test_reco_NN=np.zeros_like(test_true)\n", + " t0 = datetime.now()\n", + " for i in range(len(test_true)):\n", + " test_reco_NN[i]=gan.get_sample_A_to_B(test_true[i].reshape(1,n_H_A,n_W_A,n_C))\n", + " per_evt_time=(datetime.now() - t0)/len(test_reco)\n", + " print('Per event simulation time {0}'.format(per_evt_time))\n", + " done = False\n", + "\n", + " while not done:\n", + " \n", + " #j = int(input(\"Input event number\"))\n", + " if preprocess:\n", + " draw_nn_sample(test_true, test_reco, 1, preprocess,\n", + " min_true=min_true, max_true=max_true, \n", + " min_reco=min_reco, max_reco=max_reco,\n", + " f=gan.get_sample_A_to_B, save=False, is_training=False, PATH=PATH)\n", + " else:\n", + " draw_nn_sample(test_true, test_reco, 1, preprocess,\n", + " f=gan.get_sample_A_to_B, save=False, is_training=False)\n", + " \n", + " ans = input(\"Generate another?\")\n", + " if ans and ans[0] in ('n' or 'N'):\n", + " done = True\n", + " \n", + " done = False\n", + " while not done:\n", + " \n", + " if preprocess:\n", + " draw_nn_sample(test_true, test_reco, 20, preprocess,\n", + " min_true=min_true, max_true=max_true, \n", + " min_reco=min_reco, max_reco=max_reco,\n", + " f=gan.get_sample_A_to_B, save=False, is_training=False)\n", + " else:\n", + " draw_nn_sample(test_true, test_reco, 20, preprocess,\n", + " f=gan.get_sample_A_to_B, save=False, is_training=False)\n", + " \n", + " ans = input(\"Generate another?\")\n", + " if ans and ans[0] in ('n' or 'N'):\n", + " done = True\n", + " \n", + " return test_reco_NN" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Convolutional Network architecture detected for discriminator B\n", + "Convolutional Network architecture detected for encoder B\n", + "Encoder_B\n", + "Convolution\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "Encoder output shape (?, 16)\n", + "Generator_A_to_B\n", + "Input for generator encoded shape (?, 52, 64, 1)\n", + "Output of generator encoder, \n", + " and input for generator decoder shape (?, 1, 1, 512)\n", + "Generator output shape (?, 52, 64, 1)\n", + "Generator_A_to_B\n", + "Input for generator encoded shape (?, 52, 64, 1)\n", + "Output of generator encoder, \n", + " and input for generator decoder shape (?, 1, 1, 512)\n", + "Generator output shape (?, 52, 64, 1)\n", + "Encoder_B\n", + "Convolution\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "Encoder output shape (?, 16)\n", + "Discriminator_B\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "minibatch features shape (?, 10)\n", + "Feature output shape (?, 16)\n", + "Logits shape (?, 1)\n", + "Discriminator_B\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "minibatch features shape (?, 10)\n", + "Feature output shape (?, 16)\n", + "Logits shape (?, 1)\n", + "Discriminator_B\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "minibatch features shape (?, 10)\n", + "Feature output shape (?, 16)\n", + "Logits shape (?, 1)\n", + "Generator_A_to_B\n", + "Input for generator encoded shape (?, 52, 64, 1)\n", + "Output of generator encoder, \n", + " and input for generator decoder shape (?, 1, 1, 512)\n", + "Generator output shape (?, 52, 64, 1)\n", + "\n", + " ****** \n", + "\n", + "Training bicycleGAN with a total of 16767 samples distributed in 1047 batches of size 16\n", + "\n", + "The validation set consists of 1000 images\n", + "The learning rate is 0.0002, and every 200 batches a generated sample will be saved to HCAL_bycicleGAN_test35\n", + "\n", + " ****** \n", + "\n", + "Epoch: 0\n", + "At iter: 200 - dt: 0:00:00.260050 - d_acc: 0.59, - d_acc_enc: 0.59\n", + "Discriminator cost 54.03, Generator cost 5920, VAE Cost 3509, KL divergence cost 545.2\n", + "Saving a sample...\n", + "At iter: 400 - dt: 0:00:00.371186 - d_acc: 0.47, - d_acc_enc: 0.50\n", + "Discriminator cost 73.59, Generator cost 2396, VAE Cost 2247, KL divergence cost 198.6\n", + "Saving a sample...\n", + "At iter: 600 - dt: 0:00:00.378116 - d_acc: 0.66, - d_acc_enc: 0.62\n", + "Discriminator cost 49.52, Generator cost 686.4, VAE Cost 1286, KL divergence cost 67.42\n", + "Saving a sample...\n", + "At iter: 800 - dt: 0:00:00.373615 - d_acc: 0.53, - d_acc_enc: 0.50\n", + "Discriminator cost 41.43, Generator cost 315.8, VAE Cost 1015, KL divergence cost 99.63\n", + "Saving a sample...\n", + "At iter: 1000 - dt: 0:00:00.367530 - d_acc: 0.56, - d_acc_enc: 0.59\n", + "Discriminator cost 41.54, Generator cost 297, VAE Cost 742.2, KL divergence cost 53.33\n", + "Saving a sample...\n", + "Printing validation set histograms at epoch 0\n", + "ET Distribution plots are being printed...\n", + "Done\n", + "Resolution plots are being printed...\n", + "Done\n", + "Epoch: 1\n", + "At iter: 1200 - dt: 0:00:00.298434 - d_acc: 0.72, - d_acc_enc: 0.72\n", + "Discriminator cost 41.07, Generator cost 247.4, VAE Cost 633.6, KL divergence cost 39.22\n", + "Saving a sample...\n", + "At iter: 1400 - dt: 0:00:00.373918 - d_acc: 0.56, - d_acc_enc: 0.47\n", + "Discriminator cost 35.16, Generator cost 144.5, VAE Cost 616, KL divergence cost 41.35\n", + "Saving a sample...\n", + "At iter: 1600 - dt: 0:00:00.370199 - d_acc: 0.44, - d_acc_enc: 0.44\n", + "Discriminator cost 47.92, Generator cost 110.3, VAE Cost 484.1, KL divergence cost 36.05\n", + "Saving a sample...\n", + "At iter: 1800 - dt: 0:00:00.366162 - d_acc: 0.53, - d_acc_enc: 0.47\n", + "Discriminator cost 44.97, Generator cost 99.2, VAE Cost 376.2, KL divergence cost 30.15\n", + "Saving a sample...\n", + "At iter: 2000 - dt: 0:00:00.372570 - d_acc: 0.53, - d_acc_enc: 0.41\n", + "Discriminator cost 38.9, Generator cost 108.8, VAE Cost 383.8, KL divergence cost 27.91\n", + "Saving a sample...\n", + "Printing validation set histograms at epoch 1\n", + "ET Distribution plots are being printed...\n", + "Done\n", + "Resolution plots are being printed...\n", + "Done\n", + "Epoch: 2\n", + "At iter: 2200 - dt: 0:00:00.287552 - d_acc: 0.66, - d_acc_enc: 0.53\n", + "Discriminator cost 18.25, Generator cost 87.42, VAE Cost 334.6, KL divergence cost 28.26\n", + "Saving a sample...\n", + "At iter: 2400 - dt: 0:00:00.366586 - d_acc: 0.44, - d_acc_enc: 0.44\n", + "Discriminator cost 34.34, Generator cost 82.36, VAE Cost 353.6, KL divergence cost 29.78\n", + "Saving a sample...\n", + "At iter: 2600 - dt: 0:00:00.362463 - d_acc: 0.50, - d_acc_enc: 0.50\n", + "Discriminator cost 38.3, Generator cost 79.64, VAE Cost 326, KL divergence cost 27.11\n", + "Saving a sample...\n", + "At iter: 2800 - dt: 0:00:00.363595 - d_acc: 0.47, - d_acc_enc: 0.47\n", + "Discriminator cost 33.89, Generator cost 79.25, VAE Cost 246, KL divergence cost 26.67\n", + "Saving a sample...\n", + "At iter: 3000 - dt: 0:00:00.373816 - d_acc: 0.56, - d_acc_enc: 0.53\n", + "Discriminator cost 16.85, Generator cost 80.94, VAE Cost 293.6, KL divergence cost 27.16\n", + "Saving a sample...\n", + "Printing validation set histograms at epoch 2\n", + "ET Distribution plots are being printed...\n", + "Done\n", + "Resolution plots are being printed...\n", + "Done\n", + "Epoch: 3\n", + "At iter: 3200 - dt: 0:00:00.300006 - d_acc: 0.53, - d_acc_enc: 0.53\n", + "Discriminator cost 23.1, Generator cost 82.69, VAE Cost 251.7, KL divergence cost 26.43\n", + "Saving a sample...\n", + "At iter: 3400 - dt: 0:00:00.373295 - d_acc: 0.56, - d_acc_enc: 0.50\n", + "Discriminator cost 16.79, Generator cost 72.6, VAE Cost 221.8, KL divergence cost 26.54\n", + "Saving a sample...\n", + "At iter: 3600 - dt: 0:00:00.372298 - d_acc: 0.38, - d_acc_enc: 0.38\n", + "Discriminator cost 38.19, Generator cost 71.64, VAE Cost 212.6, KL divergence cost 26.76\n", + "Saving a sample...\n", + "At iter: 3800 - dt: 0:00:00.369248 - d_acc: 0.38, - d_acc_enc: 0.44\n", + "Discriminator cost 17.12, Generator cost 63.71, VAE Cost 223.1, KL divergence cost 25.36\n", + "Saving a sample...\n", + "At iter: 4000 - dt: 0:00:00.367608 - d_acc: 0.50, - d_acc_enc: 0.44\n", + "Discriminator cost 21.59, Generator cost 84.74, VAE Cost 213.5, KL divergence cost 26.49\n", + "Saving a sample...\n", + "Printing validation set histograms at epoch 3\n", + "ET Distribution plots are being printed...\n", + "Done\n", + "Resolution plots are being printed...\n", + "Done\n", + "Model saved in path: HCAL_bycicleGAN_test35/HCAL_bycicleGAN_test35bicycle.ckpt\n", + "Per event simulation time 0:00:00.001866\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAmEAAADCCAYAAAAB4hUiAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xu4HFWd7vHvmysXzZAQwECAyCEqCiMog0HF4aKIiHdQETE6KI7jODheEHQG8Dw4B9QBxuc5IydHkAyowCAKE0HkRKIiYyQIihAQlAiRS7gkQCLk+jt/1NqhutO9d+/dXV3V3e/nefrZXbXqsmpX/bpXr7VqlSICMzMzM+uucWVnwMzMzGwQuRBmZmZmVgIXwszMzMxK4EKYmZmZWQlcCDMzMzMrgQthZmZmZiVwIczMzMysBC6EjYGk1bnXJknP5KaP68L+L5G0ri4ft0g6ODe9RlLULbNzk+0dJ+mutM7vJb06l3Zimrda0jWSZuTSpkq6WNKjklZI+udc2h51+16d8nNSkzwcJmmRpKck3dvJ/5d1jqRl6dqbXjf/tnR+Z+XmHZCumVWSnpD0S0kfarLdD0ramK6TpyT9WtJRxR7NFnmY1SBmVkt6j6Rrc9Pr6+Lv/Abb+nzdNp5JnxXTU/pXJd0j6ekUex/IrXtQk9h5V0qfm+L9KUnLJX1Z0oTu/acGR7reH5G0bW7ehyUtyk2HpNsljcvNO1PSRU22eXC6Flan8393s7goUsr3mrrr7GRJ5+em16XrfWj62gbbmSTpivS/CkkH16VfW7ePdZJuz6XfkL5DhuL+bbm0Q9L/dpWkxyV9T9IuIxzXSZLuS8e2VNKLcmk7SPp22t5KSd9q65/YCRHhVxsvYBnw+hGWmdDhfV4CnDHCMntmp3fEbR2RjuEAskL5TGDnlHYY8DCwFzAZmAcszK17MXApsDWwB3AfcPww+dkI7NokfQ7wfuBjwL1ln1e/ml4vy4C7gU/k5u2T5gUwK807EFgNfA6YDgh4JXB5k+1+ELgxvR8HfDStv10Xj21WOoZh4xW4CDhzlNs+A/hxbvqLwEvSsb4KWAm8usm6BwNPA9um6Y8BBwGTgF2AW4BTyr42+vGVrvfHgc/n5n0YWJSbjrTM+3LzzgQuGuZ8Lk/vBRwJbABe3OVjC2DPEZY5A7hkhGUmAZ8EXgs8BBw8wvKLgNNy0385FHMpFp4GZqTpnXju+2gy8GXg6mG2/WHgN8BL0//2fwDTcuk/A84B/gKYCOxX9jXmmrACpF9Bl0n6jqSngfcrq706I7fM6yUty03PTKX8R1Mp/uNdyu7/BE6PiF9GxKaIWB4RD6a0twCXRcTSiFhL9sFyqKTdU/pRwNkR8UxE/AH4JvA3TfYzl+xL6IFGiRHxi4i4hKwgZ9V2MfCB3PRc4D/qlvkKMD8izo6IxyJzS0S8e6SNR8SmtI9tgdlD8yXNkXRT+hX76/wvbknTJH1T0oPpF+73c2kfkXSvstq4q9WkRrgokgQcD8wfmhcRp0fEXSnmFpN9ORzYZBNzgSsiYk1a9+sR8bOIWBcRfwK+Bbym2KMYaF8BPiNpu2GW+TLwxdHWSKa4uAZ4gqwwAoCkl0i6Pl2zd0t6dy5ta0n/KumPkp6UdKOkrVPaWyXdkWJkkaS9RnWkY5Cuw/Mi4kayH9pNKaspP4gsvofW/01EbBiaJCsc7ZrSHsl9H5G2v2eTbY8DTgf+MSLuTP/b30fEEyn98LTdz0bEkxGxPiJuHf0Rd5YLYcV5B/BtshL3ZcMtKGk8sAC4meyX7RuAz0o6rMgMSpoIvAJ4QfqSekDSv0naamiR9CI3DbD3MOl7U6fRl5D1tF8AUyTtla7d95DVzgIgaRuyAsUVY9l42uaHgPXAH9O8XYAfkP0QmAZ8BviupB3SahcD2wAvA3YEzk3rHQr8L+DdwIy0vUvHkq82HET2i/67jRLTF+hfAXc0SNsGOJrhY+d1jda1jllCVnvzmWGWuRJ4iqxGt2WSxkl6K1lt8b1p3rbA9WTfHzsCxwL/LullabWvktUqv5osFk4GNqVmt++Q1UrtAFwD/JekSaPJU8E+APwsImp+bEtaIOlZYDHZ/3pJLm03SauAZ8jOwZebbHtmeu2dvsvuk/RFPddMPIesxn5+atq8WdJfd/LgxsKFsOLcGBH/lX7pPjPCsnOAKRHxL+lXxb3ABcB7h1nnlPRrZ+h1wRjyOAMYD7yTrCr5lWTVwaem9GuA90raO31RnEb2S2WblP7DlI/nSZpN9gG0DVs6mOzD4sox5NGqaag27A3AXcCfcmlTyT5bHhrlNuekD9tnyb5o3h8RK1La+4FrIuKaFFPXk31QH6msn+KbgL+NiJXpF+5P0nrHARdGxK9Sbe6pwIHK9V1r4LG62Gq3NmGoJmt1k/TzgV8D1zVIexfwGPCTBmko60u0P9n/y4pzGvCJXKG/XgD/DJwmaXIL29s5V7D4HvCpXK3MUcCyiPhmRGyIiF+RFeCPTgWKvwFOiog/RcTGiLgpXdvvAX4QEddHxHqya2JrssJaM7+qu9bf2ELe2/EBsub8GhFxFPB8sqbZ61Jt+FDa/RGxHVlB9Z/IPm8amZn+Hk7WReIQsgLsCbn0w4EbgBcA/wpcpbr+rd3mQlhxGja7NbE7sFs+GMh+3bxgmHXOiojtcq8Thlm2maHC4dci4uH0hXcuWSAQEdeR1Tx8n+f6Aj0DLE/r/T1Z9fDvyT5IvpNLy5sL/GdE/HkMebRquhh4H1nBu74pciWwiayQPxq/SB+2U4GryWqQhuwOHFMXI69N+9gVeCIiVjbY5s6k2jSAVBB6nKzGuZnpdbG1dJTHsVn68XIMTWqyJH2FrPb43RFZp5U6c4H/aJQm6e3AWcCbIuKxsebRRhYRvyVrrThlmGWuAe4HTmxhkw+ma30K8DXg0Fza7sCr6q7148i+D6YDW5F95tarv9Y3kX0PDXetv6LuWm/0Q6AjJL2W7Bga1pCnH0/XAm9MtYP16U+QxdFVTZp9h77PvhwRqyJiGfB/SN9nKX1ZRFyQ9nUp2f+n1KZ8F8KKU/+huYbaWqJ8AesB4J66YHh+RLyl0AxGPErW8b7Rh//QMl+LiD0jYieyD6FNwJ0p7bGIODYidoqIvYEJwC/z66eq9Xfhpsi+EhF/JOu/dyR1NZypsP3fZOd9LNteDfwdcLyk/dLsB4CL62Jk24g4K6VNa9Jn50GyLzVg8/W4PbU1d0V6J1l/n0X1CZK+SFaDd3hEPNUgfVeyWuT6Qi6SjgD+L/CWiLi9Pt0KcTrwEYYv1PwT8AUatwhsIdVgfQ7YJxWqIbuef1J3rT8vIj5GViv6LFmH83r117rIfqB061ofyVzgymFqhIdMoPHxDaXtSFZ4rXc3sI7m32e/GSatNC6Edc9twJuVDeswA/iHXNp/A+skfVrSVpLGS9pH0iu7kK9vAv8gabqkaSlfC2BzB9CXKbM72a+KcyPiyZS+p7IO0RMkvZms2vdLddt/F7CCrONxU6lvxFZknTKV/g8TO3mg1nEnAIcOdRivczLwQUmflbQ9gKSXS2qpP1ZEPA58g6wZCLI+Z2+R9MYUH1spu9V/ZkQ8BFxL1m9mqqSJkl6X1vs28CFJ+6Zmon8BFqdfyd3QsCZL0qlkNYlvSMfayPHATRFRU+uR+rl9C3hXRPyy4ZrWcambyGXUfnbXL7MIuJ3svLe63XVkTWND1/oC4EWSjk/X8kRJfyVpr1S7dSFwjqSdUywcmK7ty8m+Yw5Ln52fBtYCN43+aEdH0mQ915d4UopP5dKHaoQvqlvvJZLelL5rJkp6P1kfx5+k9HdKenH6ftiB7M7GW4c62+elH3+XASdLer6kmWSF5gVpke8BU5UN8TJe0tFkBeqfd+4/MQZR8u2Zvf6iwRAVNLg9maxt/gqyzpu/Bj5FVjU6lL4L2QX0MFlzzk3AIU32eQlZiX917vVw3TKtDlExiaxwtYqsD895wOSUNo3sA2VNSjsTGJ9b99g0/8/Ar+r/D2mZhWR3X9bPPxhYlZt+PdmvlPzr/5V9fv0a+XpP8yeQG6IizTuArHD0JFlt0GLgA022+0HSEBW5eTPJvkT+Mk2/iuzD+QngUbKO+rvFc9fqfOCRFD9X5rbzt2TNN0+QfSDPbJKHWekYVte9PlW33EW0MERFiukNNBgGIO1nbd1+Pl+3zF3ACQ3WvSFtN7/utWVfG/34qr/eyWqWnmXLISr2zE2/Ks27qMk2DyYNUZGbtw1ZLddb0vSL0/X9KFnz+Y+BfVPa1mSf039KsfVTYOuU9g6yloonU6y8bJhjC7LP9vx1dF7dMmcwwhAVuf9T/ef3rFz6sWRNpapbby+yz4Wnyb6DbgbekUv/BFmN+xqy78ZLgd1z6ecD5+emp6RlniarUTwtv0+yLg63p2NdAhxU9jWmlDEzMzMz6yI3R5qZmZmVwIUwMzMzsxKUUgiTdISyUYDvldT0ll+zQeGYMKvlmLBB0PU+YcpGw/4d2SCPy8k64h0bEXd2NSNmFeGYMKvlmLBBMarnXHXIAWQPaP4DQLpl/W2ksacamaTJsRXbNks266pnWcO6WKuRl2yZY8J6mmPCrFarMVFGIWwXakeTX052S29TW7Etryr2MYpmLVscCzu9yc7ExLjxWy64adjn6Vq3dOvclHQNVC4mHAs2WhrFb4jhWhDTtbd4449a2lQZhbBGR9rokRwnkh7/sFVrgw+b9SrHhFktx4QNhDIKYcvJBrwbMpPscQs1ImIeMA9giqZ5MDPrZ44Js1ptxYTGaejN5mU1aeLQOs9tYFN6/9zzoolNI4RWbtmeoOLvv9v8/+5Fjf4/ueMZGvg/f91ofFbbFes3bJ4X69dlb0ZZ41rG3ZE3A7MlvVDSJOC9ZA/rNRtUjgmzWo4JGwhdrwmLiA2S/h64DhgPXBgRd3Q7H2ZVUZmYaNSPphH3rem+Vvs4tXpump3ripzbdmMiNmzYct76zuWvp0Tx57TXKgdHo1G9aCebIcpojiQirgGuKWPfZlXkmDCr5ZiwQeAR883MzMxK4EKYmZmZWQlcCDMzMzMrQSl9wsysAL3QKbvVzv/Q3vF0q4N5t25m6PTxVKQDvtmgc02YmZmZWQlcCDMzMzMrgQthZmZmZiVwIczMzMysBO6YbzZoqtgpu9Od8LvVWb+dbVZ81HozK55rwszMzMxK4EKYmZmZWQlcCDMzMzMrgQthZmZmZiVwIczMzMysBL470qxflPn4nna1mvdWj6eIRzh1+v87mnV7+dyaWVOuCTMzMzMrgQthZmZmZiUorBAm6UJJKyT9NjdvmqTrJd2T/k4tav9mVeOYMKvlmLBBV2RN2EXAEXXzTgEWRsRsYGGaNhsUF+GYMMu7CMeEDbDCCmER8VPgibrZbwPmp/fzgbcXtX+zqik8JjZt3PJVNY3y2OzVqnHjt3y1m59Wl+2Wsv4/BfP3hA26bvcJ2ykiHgJIf3dstqCkEyUtkbRkPWu7lkGzLnNMmNVyTNjAqGzH/IiYFxH7R8T+E5lcdnbMSueYMKvlmLBe1+1C2COSZgCkvyu6vH+zqnFMmNVyTNjA6HYh7Gpgbno/F7iqy/s3qxrHhFktx4QNjMJGzJf0HeBgYLqk5cDpwFnA5ZJOAO4Hjilq/2ZVU0pMlDjS+vjt/mLz+42rnuzIdhpur8HxjLTvofR28tXMiPkdiz4dMd/fEzboCiuERcSxTZIOK2qfZlXmmDCr5ZiwQednR5pZIYargWqW3up2urXv0Siyds3M+lNl7440MzMz62cuhJmZmZmVwM2RZv2sUeftZiOmd6Gjd6ea6lrteN/JfY4lHx3T6nnsg876ZoPENWFmZmZmJXAhzMzMzKwEbo40s8K1OnZWq8uN1PQ3lnXGwndZmlk7XBNmZmZmVgLXhJn1qvqO2aPphN/J/Tbbd06RY4INLHfCN+t5rgkzMzMzK4ELYWZmZmYlcHOkmXXNWMb3KuJB4EXue6TttKprY5CZWWlcE2ZmZmZWAteEmfWqVjpmj6bzdqsd7lvcZqOanDKHlujUg8C7te8xafVGjBKfmmBmz3FNmJmZmVkJXAgzMzMzK4GbI82sEL3QmbyIzvq9cNxmVg2F1YRJ2lXSDZKWSrpD0klp/jRJ10u6J/2dWlQezKrEMWFWyzFhg67I5sgNwKcjYi9gDvBxSS8FTgEWRsRsYGGaNhsEjgmzWo4JG2iFNUdGxEPAQ+n905KWArsAbwMOTovNBxYBnysqH2ZVUfmY6PKdca02BRbZvDeWuzUrYwyPjxp23RJUPibMCtaVjvmSZgH7AYuBnVLgDQXgjk3WOVHSEklL1rO2G9k06xrHhFktx4QNosI75kt6HvBd4JMR8ZSkltaLiHnAPIApmhbF5dCsuxwTmVZroUYabb5btVXt1Mx1auT9fuWYsEFVaE2YpIlkgfWtiLgyzX5E0oyUPgNYUWQezKrEMWFWyzFhg6zIuyMFXAAsjYhzcklXA3PT+7nAVUXlwaxKHBNmtRwTNuiKbI58DXA8cLuk29K8zwNnAZdLOgG4HzimwDyYVUmxMdFOR+12t1nAvjvVMb/IZsRWH9ZduebE6jyeyN8TNtCKvDvyRqBZw/5hRe3XrKocE2a1HBM26Dxivpl1TbNO9q2ORt/pfIxmiIpG64w0Yv5INxW0um8z609+dqSZmZlZCVwIMzMzMyuBmyPNrLlWO3C3uNxYRqgfy0O0R9rmWJr6OrXvIW03vbYzOn6nb9ioTD9/s97imjAzMzOzErgmzMy6Ziy1P6OpySryeZONtt1ObZY73ptZ05owSdekZ3mZGXBr3MgzsabsbJhVxq0bf+qYMGvDcM2RFwE/kvSF9FgJs4G2M7tzKz9jLc/gmDCDnTWLWzctckyYjVHT5siIuFzSD4DTgCWSLgY25dLPabauWT/aSbsyPWbwc66FfoyJIjpw1ym6Ca7I7Xd7LLMRtXq+Wn3CQbNlh7HTuN2YHjvz800LoB9jwqxgI/UJWw+sASYDzycXXGaDSIwjDfDtmDADhHBMmI1N00KYpCOAc8gepPqKiPhz13JlVkGPxcPcw6+BAMeEGY/FQ9yz6TYcE2ZjM1xN2BeAYyLijm5lxqzKlrGUfZjDHSxhbTzrL5suGU3z33B3MHaq6bBTDwIf6ZFGvXD35LJNd7LPuFdzx6bFjgmzMRiuT9hB3cyIWdXtr0OyN1FuPsyqYv/xfsa2WTs8TphZv2i1A3azztcd7oTf7kj3o12u2bLt1FwVWXvW9rbbOV8dPtdmNjYeMd/MzMysBC6EmZmZmZWgsOZISVsBPyW7bXkCcEVEnC7phcClwDTgV8DxEbGuqHyYVcWgxUS3Hjc0UhPnWLZduTHB+tSgxYRZvSJrwtYCh0bEy4F9gSMkzQHOBs6NiNnASuCEAvNgViWOCbNajgkbaIUVwiKzOk1OTK8ADgWuSPPnA28vKg9mVVJ4TIwbv+Vr08YtX42WazaCeqv7adPGVU8OW0s29Bpu3WbrD5fWar66WQs23LHWaPU8jOZ8tbps/TU1Rv6esEFXaJ8wSeMl3QasAK4Hfg+siogNaZHlwC5F5sGsShwTZrUcEzbICi2ERcTGiNgXmAkcAOzVaLFG60o6UdISSUvWs7bIbJp1jWPCrJZjwgZZV8YJi4hVkhYBc4DtJE1Iv3JmAg82WWceMA9giqZ5eEzrK46Jxhp1hC+iKXCkzvrDpRfdWX9QbwBwTNggKqwmTNIOkrZL77cGXg8sBW4Ajk6LzQWuKioPZlXimDCr5ZiwQVdkTdgMYL6k8WSFvcsjYoGkO4FLJZ0J3ApcUGAezKqk2JhotYN0u6Oltzsy/zC6Ves10n6GS2/1+ZXNlh3TcBzt/H9Hcx66P5K+vydsoBVWCIuI3wD7NZj/B7J2f7OB4pgwq+WYsEHnEfPNzMzMSuAHeJtZX+t2R/f8/sbSFGpmg8M1YWZmZmYlcCHMzMzMrARujjSz5lq9K6/Fu+pavXOwWXqrxrKdSj+su/t3LZpZF7gmzMzMzKwErgkzs8K19EBqxlY71s4o+x793qyzxm2zzeb3m/785xJz0htcE2ZmZmZWAhfCzMzMzErg5kgzy3To0UONDDXLNWuWHO5RPu08YqhoE2bttvn9mpfuBMA2N/1u87xu31zQskbnGnwDgI3J+Je9ePP7Z3adsvn91vetBGDj3fd2PU+9wjVhZmZmZiVwTZiZdU2zGp2OPei6gXa2M1JtVOTmLfrG1QC8ced9R73vSg+PYTaStes2v/3T654rVmx843QAZl8yefO8uOWO7uWrB7gmzMzMzKwELoSZmZmZlcDNkday6x68bfP7fJOL9YmKdcruVLPccNtpltZqM2I+/ezHZ9es28r6jZbrVDPssNo91/Ud+6t16ViXbbz3vs3vN8ycuvn9H15/IQD7PPR3m+ftfEv38tULXBNmZmZmVgLXhFnLXPtlY9WV2p0xaFZr1WhIjUZ5zw9Rcf5N2RAVL1p187D7GW57w803q5rx208DYOPjT2ye98L52vx+j6c+CsBuv13f3Yz1kMJrwiSNl3SrpAVp+oWSFku6R9JlkiYVnQezKnFMmNVyTNig6kZz5EnA0tz02cC5ETEbWAmc0IU8mFWJY8KslmPCBlKhzZGSZgJvBr4EfEqSgEOB96VF5gNnAF8vMh9mVdEXMdHqyPq55TY+tbq4/bSxbruj8W9Ydv/m9y/6aPa+4VMBJjT4qC1i1PoxnJsxLdvBmzj6IiYG1MYnVm4xb8LC53rez17Yzdz0pqJrws4DTgY2pentgVURsSFNLwd2abSipBMlLZG0ZD1rC86mWdc4JsxqOSZsYBVWCJN0FLAiIvI3pKrBotFo/YiYFxH7R8T+E5ncaBGznuKYMKvlmLBBV2Rz5GuAt0o6EtgKmEL2i2c7SRPSr5yZwIMF5sGsShwTZrUcE70sGpaNbRQKqwmLiFMjYmZEzALeC/w4Io4DbgCOTovNBa4qKg9mVeKYMKvlmLBBV8Y4YZ8DLpV0JnArcEEJeTCrkt6KiVY7Zbfbebud9Ts9Ivwottn2OF/tdIRvd7mCO+GPQm/FhNkYdaUQFhGLgEXp/R+AA7qxX7OqckyY1XJM2CDyY4vMzMzMSuBCmJmZmVkJXAgzMzMzK4Ef4G1mzTUbWb1eOZ23m2u3g3mZo9b3gvq8V+z0m/UK14SZmZmZlcCFMDMzM7MSuBBmZmZmVgIXwszMzMxK4EKYmZmZWQl8d6SZZTp9J2S3HoFT5qN2uvGIoSL23Uy38m5mgGvCzMzMzErhQpiZmZlZCVwIMzMzMyuBC2FmZmZmJXDHfLN+NprO253ubN2tzttldvZv5yaF0Wh13+121i/zJgezAeSaMDMzM7MSuBBmZmZmVoJCmyMlLQOeBjYCGyJif0nTgMuAWcAy4N0RsbLIfJhVhWPCrJZjwgZZN2rCDomIfSNi/zR9CrAwImYDC9O02SBxTJjVckzYQCqjOfJtwPz0fj7w9hLyYFYlnYmJceO3fG3auOWrTI3y2G6n9Vb3M5r8dPr/1mh7o3l1S3WuF39P2EAouhAWwI8k3SLpxDRvp4h4CCD93bHRipJOlLRE0pL1rC04m2Zd45gwq+WYsIFV9BAVr4mIByXtCFwv6a5WV4yIecA8gCmaFkVl0KzLHBNmtToTE+O2D02clM1fv66QjJp1WqE1YRHxYPq7AvgecADwiKQZAOnviiLzYFYljgmzWo4JG2SFFcIkbSvp+UPvgcOB3wJXA3PTYnOBq4rKg1mVOCbMajkmbNApophWDUl7kP2qgazZ89sR8SVJ2wOXA7sB9wPHRMQTI2zrUWAN8Fghme2+6fTPsUB/HU8rx7J7ROww2g07JobVT9cQ9NfxOCbK0U/XEPTX8XQsJgorhHWapCW525d7Wj8dC/TX8fTSsfRSXkfST8cC/XU8vXQsvZTXkfTTsUB/HU8nj8Uj5puZmZmVwIUwMzMzsxL0UiFsXtkZ6KB+Ohbor+PppWPppbyOpJ+OBfrreHrpWHopryPpp2OB/jqejh1Lz/QJMzMzM+snvVQTZmZmZtY3XAgzMzMzK0FPFMIkHSHpbkn3Sjql7PyMhqRdJd0gaamkOySdlOZPk3S9pHvS36ll57VVksZLulXSgjT9QkmL07FcJmlS2XlshaTtJF0h6a50fg7slfPimKgWx0S5ejkewDFRZUXHROULYZLGA/8beBPwUuBYSS8tN1ejsgH4dETsBcwBPp7yfwqwMCJmAwvTdK84CViamz4bODcdy0rghFJyNXr/BvwwIl4CvJzsmCp/XhwTleSYKEkfxAM4Jqqs2JiIiEq/gAOB63LTpwKnlp2vNo7nKuANwN3AjDRvBnB32XlrMf8z00V3KLAAENnIwRMana+qvoApwH2km1Ny8yt/XhwT1Xo5JkrPd1/FQzoGx0QFXt2IicrXhAG7AA/kppeneT1H0ixgP2AxsFNEPASQ/u5YXs5G5TzgZGBTmt4eWBURG9J0r5yfPYBHgW+mKvNvKHt2XS+cF8dEtTgmytU38QCOiYopPCZ6oRCmBvN6blwNSc8Dvgt8MiKeKjs/YyHpKGBFRNySn91g0V44PxOAVwBfj4j9yJ451ytV/b36P6/hmKicXo2JXv1/b8ExUTmFx0QvFMKWA7vmpmcCD5aUlzGRNJEssL4VEVem2Y9ImpHSZwArysrfKLwGeKukZcClZFXN5wHbSZqQlumV87McWB4Ri9P0FWTB1gvnxTFRHY6J8vV8PIBjoqIKj4leKITdDMxOd1ZMAt4LXF1ynlomScAFwNKIOCeXdDUwN72fS9YHoNIi4tSImBkRs8jOw48j4jjgBuDotFivHMvDwAOSXpxmHQbcSW+cF8dERTgmKqGn4wEcE1XVlZgou+Nbi53jjgR+B/we+ELZ+Rll3l9LVu36G+C29DqSrI18IXBP+jut7LyO8rgOBhak93sAvwTuBf4TmFx2/lo8hn2BJencfB+Y2ivnxTFRvZdjotR892w8pPw7Jir6Kjom/NgiMzMzsxL0QnOkmZmZWd9xIczMzMysBC6EmZmZmZXAhTAzMzOzErgQZmZmZlYCF8L6nKRdJd27EvVbAAABGklEQVQnaVqanpqmdy87b2ZlcEyY1XJMlMeFsD4XEQ8AXwfOSrPOAuZFxB/Ly5VZeRwTZrUcE+XxOGEDID0O4xbgQuAjwH4Rsa7cXJmVxzFhVssxUY4JIy9ivS4i1kv6LPBD4HAHlg06x4RZLcdEOdwcOTjeBDwE7F12RswqwjFhVssx0WUuhA0ASfsCbwDmAP849PR3s0HlmDCr5ZgohwthfU6SyDpcfjIi7ge+Any13FyZlccxYVbLMVEeF8L630eA+yPi+jT978BLJP11iXkyK5NjwqyWY6IkvjvSzMzMrASuCTMzMzMrgQthZmZmZiVwIczMzMysBC6EmZmZmZXAhTAzMzOzErgQZmZmZlYCF8LMzMzMSvD/AV6vuACTfWT0AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "if __name__=='__main__':\n", + " \n", + " if task == 'TRAIN':\n", + " if not os.path.exists(PATH):\n", + " os.mkdir(PATH)\n", + " \n", + " elif os.path.exists(PATH):\n", + " if os.path.exists(PATH+'/checkpoint'):\n", + " ans = input('A previous checkpoint already exists, choose the action to perform \\n \\n 1) Overwrite the current model saved at '+PATH+'/checkpoint \\n 2) Start training a new model \\n 3) Restore and continue training the previous model \\n ')\n", + " \n", + " if ans == '1':\n", + " print('Overwriting existing model in '+PATH)\n", + " for file in os.listdir(PATH):\n", + " file_path = os.path.join(PATH, file)\n", + " try:\n", + " if os.path.isfile(file_path):\n", + " os.unlink(file_path)\n", + " #elif os.path.isdir(file_path): shutil.rmtree(file_path)\n", + " except Exception as e:\n", + " print(e)\n", + " \n", + " elif ans == '2':\n", + " PATH = input('Specify the name of the model, a new directory will be created.\\n')\n", + " os.mkdir(PATH) \n", + " \n", + " test_reco_NN = HCAL()\n", + " \n", + " elif task == 'TEST':\n", + " if not os.path.exists(PATH+'/checkpoint'):\n", + " print('No checkpoint to test')\n", + " else:\n", + " test_reco_NN = HCAL()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "max_MC=train_reco.max()\n", + "max_NN=test_reco_NN.max()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('Max NN {0}, Max_MC {1}'.format(max_NN, max_MC, ))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test_reco_NN_rescaled=(test_reco_NN/test_reco_NN.max())*max_MC" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(test_reco_NN_rescaled.max())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if preprocess:\n", + " reco_MC_hist = denormalise(test_reco, min_reco, max_reco).reshape(test_reco.shape[0], test_reco.shape[1]*test_reco.shape[2])\n", + " reco_MC_hist = np.sum(reco_MC_hist,axis=1)\n", + " max_E=np.max(reco_MC_hist)\n", + " \n", + " reco_NN_hist = denormalise(test_reco_NN, min_reco, max_reco).reshape(test_reco_NN.shape[0], test_reco_NN.shape[1]*test_reco_NN.shape[2])\n", + " reco_NN_hist = np.sum(reco_NN_hist,axis=1)\n", + " max_NN = np.max(reco_NN_hist)\n", + " \n", + " true_hist = denormalise(test_true, min_reco, max_true).reshape(test_true.shape[0], test_true.shape[1]*test_true.shape[2])\n", + " true_hist = np.sum(true_hist,axis=1)\n", + " max_true_E=np.max(true_hist)\n", + "else:\n", + " reco_MC_hist = test_reco.reshape(test_reco.shape[0], test_reco.shape[1]*test_reco.shape[2])\n", + " reco_MC_hist = np.sum(reco_MC_hist,axis=1)\n", + " max_E=np.max(reco_MC_hist)\n", + " \n", + " reco_NN_hist = test_reco_NN_rescaled.reshape(test_reco_NN_rescaled.shape[0], test_reco_NN.shape[1]*test_reco_NN.shape[2])\n", + " reco_NN_hist = np.sum(reco_NN_hist,axis=1)\n", + " max_NN = np.max(reco_NN_hist)\n", + " \n", + " true_hist = test_true.reshape(test_true.shape[0], test_true.shape[1]*test_true.shape[2])\n", + " true_hist = np.sum(true_hist,axis=1)\n", + " max_true_E=np.max(true_hist)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "reco_NN_hist=(reco_NN_hist/reco_NN_hist.max())*reco_MC_hist.max()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#if preprocess:\n", + "# reco_MC_hist = denormalise(train_reco, min_reco, max_reco).reshape(train_reco.shape[0], train_reco.shape[1]*train_reco.shape[2])\n", + "# reco_MC_hist = np.sum(reco_MC_hist,axis=1)\n", + "# max_E=np.max(reco_MC_hist)\n", + "# \n", + "# reco_NN_hist = denormalise(test_reco_NN, min_reco, max_reco).reshape(test_reco_NN.shape[0], test_reco_NN.shape[1]*test_reco_NN.shape[2])\n", + "# reco_NN_hist = np.sum(reco_NN_hist,axis=1)\n", + "# max_NN = np.max(reco_NN_hist)\n", + "# \n", + "# true_hist = denormalise(train_true, min_true, max_true).reshape(train_true.shape[0], train_true.shape[1]*train_true.shape[2])\n", + "# true_hist = np.sum(true_hist,axis=1)\n", + "# max_true_E=np.max(true_hist)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diffNN = reco_NN_hist-true_hist\n", + "diffMC = reco_MC_hist-true_hist\n", + "\n", + "plt.subplot(1,2,1)\n", + "plt.tick_params(labelsize=15);\n", + "h_reco = plt.hist(diffMC,bins=30, edgecolor='black');\n", + "plt.xlabel('ET recoMC - ET true', fontsize=15)\n", + "plt.ylabel('dN/dETdiff', fontsize=15)\n", + "plt.title('Resolution as simulated by MC', fontsize=15)\n", + "plt.subplot(1,2,2)\n", + "plt.tick_params(labelsize=15);\n", + "h_nn = plt.hist(diffNN,bins=30, edgecolor='black');\n", + "plt.xlabel('ET recoNN - ET true', fontsize=15)\n", + "plt.ylabel('dN/dETdiff', fontsize=15)\n", + "plt.title('Resolution as simulated by NN', fontsize=15)\n", + "fig = plt.gcf()\n", + "fig.set_size_inches(12,4)\n", + "plt.savefig(PATH+'/resolution.eps', format='eps', dpi=100)\n", + "\n", + "\n", + "plt.hist(diffNN, bins=30);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diffNN.mean(), diffNN.std()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diffMC.mean(), diffMC.std()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.where(true_hist==0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "plt.subplot(1,3,1)\n", + "plt.tick_params(labelsize=12);\n", + "h_reco = plt.hist(true_hist/1000,bins=30, edgecolor='black');\n", + "plt.xlabel('E_T (GeV)', fontsize=15)\n", + "plt.ylabel('dN/dE_T', fontsize=15)\n", + "plt.title('Pion True E_T', fontsize=15)\n", + "plt.subplot(1,3,2)\n", + "plt.tick_params(labelsize=12);\n", + "h_reco = plt.hist(reco_MC_hist/1000,bins=30, edgecolor='black');\n", + "plt.xlabel('E_T (GeV)', fontsize=15)\n", + "\n", + "plt.title('Pion Reco E_T from MC', fontsize=15)\n", + "plt.subplot(1,3,3)\n", + "plt.tick_params(labelsize=12);\n", + "h_nn = plt.hist(reco_NN_hist/1000,bins=30, edgecolor='black');\n", + "plt.xlabel('E_T (GeV)', fontsize=15)\n", + "\n", + "plt.title('Pion Reco E_T from BicycleGAN', fontsize=15)\n", + "fig = plt.gcf()\n", + "fig.set_size_inches(16,4)\n", + "plt.savefig(PATH+'/distribution.eps', format='eps', dpi=100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "diff=plt.bar(np.arange(0, 30), \n", + " height=(h_nn[0]-h_reco[0]), edgecolor='black', \n", + " linewidth=1, color='lightblue',width = 1, align = 'edge') \n", + "plt.xlabel('E (GeV)')\n", + "plt.ylabel('dN/dE')\n", + "plt.title(\"NN output - MC output\")\n", + "fig = plt.gcf()\n", + "fig.set_size_inches(12,4)\n", + "plt.savefig(PATH+'/difference.eps', format='eps',dpi=100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "four_cells_diff_NN= np.array([\n", + " four_cells(test_reco_NN_rescaled[i]).sum() - test_true[i].sum() for i in range(len(test_reco))\n", + "]) \n", + "four_cells_diff_MC= np.array([\n", + " four_cells(test_reco[i]).sum() - test_true[i].sum() for i in range(len(test_reco))\n", + "])\n", + "plt.hist(four_cells_diff_NN, bins=30, label = 'NN-ET_true')\n", + "plt.hist(four_cells_diff_MC, bins=30, label = 'MC-ET_true', histtype='step')\n", + "plt.legend(loc=2);\n", + "plt.xlabel('Sum of 4 max cells ET - ET true (GeV)')\n", + "plt.ylabel('dN/dET')\n", + "plt.savefig(PATH+'/four_cells_diff_combined.eps', format='eps', dpi=100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "four_cells_diff= four_cells_diff_NN-four_cells_diff_MC\n", + "plt.hist(four_cells_diff_NN-four_cells_diff_MC, bins=20, label = 'MC-NN');\n", + "plt.legend();\n", + "print('four cells diff mean {0}, std {1}'.format(four_cells_diff.mean(), four_cells_diff.std()))\n", + "plt.savefig(PATH+'/four_cells_diff.eps', format='eps', dpi=100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "hyper_dict={'LEARNING_RATE':LEARNING_RATE,\n", + " 'BETA1':BETA1,\n", + " 'BATCH_SIZE':BATCH_SIZE,\n", + " 'EPOCHS':EPOCHS,\n", + " 'SAVE_SAMPLE_PERIOD':SAVE_SAMPLE_PERIOD,\n", + " 'SEED':SEED,\n", + " 'd_sizes':d_sizes,\n", + " 'g_sizes_dec':g_sizes_dec,\n", + " 'g_sizes_enc':g_sizes_enc,\n", + " 'e_sizes':e_sizes,\n", + " 'preprocess':preprocess,\n", + " 'cost_type':cost_type,\n", + " 'validating_size':validating_size,\n", + " 'test_size':test_size,\n", + " 'n_batches':n_batches,\n", + " 'reco_path':reco_path,\n", + " 'true_path':true_path,\n", + " 'discr_steps':discr_steps,\n", + " 'gen_steps':gen_steps,\n", + " 'vae_steps':vae_steps,\n", + " 'latent_weight':latent_weight,\n", + " 'cycl_weight':cycl_weight,\n", + " 'kl_weight':kl_weight,\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with open(PATH+'/hyper_parameters.pkl', 'wb') as f: \n", + " pickle.dump(hyper_dict, f)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/architectures/CNN.py b/architectures/CNN.py new file mode 100644 index 0000000..d4fd644 --- /dev/null +++ b/architectures/CNN.py @@ -0,0 +1,414 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * + +def lrelu(x, alpha=0.1): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 + + +#tested on mnist +class CNN(object): + + """ + Builds convolutional neural network. Regularization implemented + with dropout, no regularization parameter implemented yet. Minimization through + AdamOptimizer (adaptive learning rate). Supports convolution, max_pooling and avg_pooling + + Constructor inputs: + + -Positional arguments: + - dims of input image: (n_H (rows))*(n_W (columns))*(n_C (input channels)) + + - sizes: (dict) python dictionary containing the size of the + convolutional layers and the number of classes for classification + + sizes = {'conv_layer_n' :[(mo, filter_sz, stride, apply_batch_norm, keep_probability, act_f, w_init)] + 'maxpool_layer_n':[(filter_sz, stride, keep_prob)] + 'avgpool_layer_n':[(filter_sz, stride, keep_prob)] + + 'n_classes': n_classes + + } + + convolution and pooling layers can be in any order, the last key has to be 'n_classes' + + mo: (int) number of output channels after convolution + filter_sz: (int) size of the kernel + stride: (int) stride displacement + apply_batch_norm: (bool) apply batch norm at layer n + keep_probability: (float32) probability of activation of output + act_f: (function) activation function for layer n + w_init: (tf initializer) random initializer for weights at layer n + n_classes: number of classes + + -Keyword arguments + + -lr: (float32) learning rate arg for the AdamOptimizer + -beta1: (float32) beta1 arg for the AdamOptimizer + -batch_size: (int) size of each batch + -epochs: (int) number of times the training has to be repeated over all the batches + -save_sample: (int) after how many iterations of the training algorithm performs the evaluations in fit function + -path: (str) path for saving the session checkpoint + + Class attributes: + + - X: (tf placeholder) input tensor of shape (batch_size, input features) + - Y: (tf placeholder) label tensor of shape (batch_size, n_classes) (one_hot encoding) + - Y_hat: (tf tensor) shape=(batch_size, n_classes) predicted class (one_hot) + - loss: (tf scalar) reduced mean of cost computed with softmax cross entropy with logits + - train_op: gradient descent algorithm with AdamOptimizer + + """ + def __init__( + + self, n_H, n_W, n_C, sizes, + lr=LEARNING_RATE, beta1=BETA1, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed = SEED + ): + + self.seed = seed + + self.n_classes = sizes['n_classes'] + self.n_H = n_H + self.n_W = n_W + self.n_C = n_C + + self.conv_sizes = sizes + + self.batch_sz=tf.placeholder( + tf.int32, + shape=(), + name='batch_sz', + ) + + self.X_input = tf.placeholder( + tf.float32, + shape=(None, n_H, n_W, n_C), + name = 'X_input' + ) + + self.X_test = tf.placeholder( + tf.float32, + shape=(None, n_H, n_W, n_C), + name = 'X_test' + ) + + self.Y_input = tf.placeholder( + tf.float32, + shape=(None, self.n_classes), + name='Y_input' + ) + + self.Y_test = tf.placeholder( + tf.float32, + shape=(None, self.n_classes), + name='Y_train' + ) + + self.Y_hat = self.build_CNN(self.X_input, self.conv_sizes) + + #add regularization + #reg = 0 + + self.train_accuracy = evaluation(self.Y_hat, self.Y_input) + cost = tf.nn.softmax_cross_entropy_with_logits( + logits= self.Y_hat, + labels= self.Y_input + ) + + self.loss = tf.reduce_mean(cost) + + self.train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1 + ).minimize(self.loss + ) + + + #convolve from input + with tf.variable_scope('convolutional') as scope: + scope.reuse_variables() + self.Y_hat_from_test = self.convolve( + self.X_test, reuse=True, is_training=False + ) + + self.test_accuracy = evaluation(self.Y_hat_from_test, self.Y_test) + + #saving for later + self.lr = lr + self.batch_size=batch_size + self.epochs = epochs + self.path = path + self.save_sample = save_sample + + + def build_CNN(self, X, conv_sizes): + + with tf.variable_scope('convolutional') as scope: + #keep track of dims for dense layers + mi = self.n_C + dim_W = self.n_W + dim_H = self.n_H + + self.conv_layers = [] + + count = 0 + + n = len(conv_sizes)-1 # count the number of layers leaving out n_classes key + + for key in conv_sizes: + if not 'block' in key: + print('Convolutional network architecture detected') + else: + print('Check network architecture') + break + + #convolutional layers + for key in conv_sizes: + if 'conv' in key: + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = conv_sizes[key][0] + + name = 'conv_layer_{0}'.format(count) + count += 1 + + layer = ConvLayer(name, + mi, mo, filter_sz, stride, + apply_batch_norm, keep_prob, + f=act_f, w_init=w_init + ) + self.conv_layers.append(layer) + + mi=mo + + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + + if 'pool' in key: + count+=1 + if 'max' in key: + + filter_sz, stride, keep_prob = conv_sizes[key][0] + + layer = MaxPool2D(filter_sz, stride, keep_prob) + + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + + if 'avg' in key: + + filter_sz, stride, keep_prob =conv_sizes[key][0] + + layer = AvgPool2D(filter_sz, stride, keep_prob) + + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + + self.conv_layers.append(layer) + + #dense layers + mi = mi * dim_W * dim_H + self.dense_layers = [] + for mo, apply_batch_norm, keep_prob, act_f, w_init in conv_sizes['dense_layers']: + + name = 'dense_layer_{0}'.format(count) + count += 1 + + layer = DenseLayer(name,mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.dense_layers.append(layer) + + #readout layer + readout_w_init = conv_sizes['readout_w_init'] + + readout_layer = DenseLayer('readout_layer', + mi, self.n_classes, + False, 1, lambda x: x, + readout_w_init) + + self.dense_layers.append(readout_layer) + + return self.convolve(X) + + def convolve(self, X, reuse=None, is_training=True): + + print('Convolution') + print('Input for convolution', X.get_shape()) + + output=X + i=0 + for layer in self.conv_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + #print('After convolution', i) + #print(output.get_shape()) + + + #print('After convolution shape', output.get_shape()) + + output = tf.contrib.layers.flatten(output) + + #print('After flatten shape', output.get_shape()) + for layer in self.dense_layers: + + i+=1 + output = layer.forward(output, reuse, is_training) + #print('After dense layer {0}, shape {1}'.format(i, output.get_shape())) + + print('Logits shape', output.get_shape()) + + return output + + def set_session(self, session): + + self.session=session + + for layer in self.conv_layers: + layer.set_session(session) + + def fit(self, X_train, Y_train, X_test, Y_test): + + """ + + Function is called if the flag is_training is set on TRAIN. If a model already is present + continues training the one already present, otherwise initialises all params from scratch. + + Performs the training over all the epochs, at when the number of epochs of training + is a multiple of save_sample prints out training cost, train and test accuracies + + Plots a plot of the cost versus epoch. + + Positional arguments: + + - X_train: (ndarray) size=(train set size, input features) training sample set + - X_test: (ndarray) size=(test set size, input features) test sample set + + - Y_train: (ndarray) size=(train set size, input features) training labels set + - Y_test: (ndarray) size=(test set size, input features) test labels set + + """ + seed = self.seed + + N = X_train.shape[0] + test_size = X_test.shape[0] + + n_batches = N // self.batch_size + + print('\n ****** \n') + print('Training CNN for '+str(self.epochs)+' epochs with a total of ' +str(N)+ ' samples\ndistributed in ' +str(n_batches)+ ' batches of size '+str(self.batch_size)+'\n') + print('The learning rate set is '+str(self.lr)) + print('\n ****** \n') + + costs = [] + for epoch in range(self.epochs): + seed += 1 + + train_batches = supervised_random_mini_batches(X_train, Y_train, self.batch_size, seed) + test_batches = supervised_random_mini_batches(X_test, Y_test, self.batch_size, seed) + train_acc = 0 + test_acc =0 + train_accuracies=[] + test_accuracies=[] + + for train_batch in train_batches[:-1]: + + (X_train_batch, Y_train_batch) = train_batch + feed_dict = { + + self.X_input: X_train_batch, + self.Y_input: Y_train_batch, + self.batch_sz: self.batch_size, + + } + + _, c = self.session.run( + (self.train_op, self.loss), + feed_dict=feed_dict + ) + + train_acc = self.session.run( + + self.train_accuracy, + + feed_dict={self.X_input:X_train_batch, + self.Y_input:Y_train_batch + } + + ) + + + c /= self.batch_size + + train_accuracies.append(train_acc) + costs.append(c) + + train_acc = np.array(train_accuracies).mean() + #model evaluation + if epoch % self.save_sample ==0: + + + for i, test_batch in enumerate(test_batches[:-1]): + + (X_test_batch, Y_test_batch) = test_batch + feed_dict={ + self.X_test: X_test_batch, + self.Y_test: Y_test_batch, + self.batch_sz: self.batch_size, + + } + test_acc = self.session.run( + self.test_accuracy, + feed_dict=feed_dict + + ) + + test_accuracies.append(test_acc) + + + test_acc = np.array(test_accuracies).mean() + print('Evaluating performance on train/test sets') + print('At epoch {0}, train cost: {1:.4g}, train accuracy {2:.4g}'.format(epoch, c, train_acc)) + print('test accuracy {0:.4g}'.format(test_acc)) + + + plt.plot(costs) + plt.ylabel('cost') + plt.xlabel('iteration') + plt.title('learning rate=' + str(self.lr)) + plt.show() + + print('Parameters trained') + + #get samples at test time + + def predict(self, X): + + pred = tf.nn.softmax(self.Y_hat_from_test) + output = self.session.run( + pred, + feed_dict={self.X_test:X} + ) + return output \ No newline at end of file diff --git a/architectures/DAE.py b/architectures/DAE.py new file mode 100644 index 0000000..f073abc --- /dev/null +++ b/architectures/DAE.py @@ -0,0 +1,331 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + + +def lrelu(x, alpha=0.1): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 + + +#tested on mnist +class DAE(object): + + """ + Builds densely connected deep autoencoder. Regularization implemented + with dropout, no regularization parameter implemented yet. Minimization through + AdamOptimizer (adaptive learning rate). + + The minimized loss function is the reduced sum of the sigmoid cross entropy with logis + over all the barch samples. + + Constructor inputs: + + -Positional arguments: + - dim: (int) input features + - e_sizes: (dict) + - d_sizes: (dict) + + -Keyword arguments + + - an_id: (int) number useful for stacked ae + - lr: (float32) learning rate arg for the AdamOptimizer + - beta1: (float32) beta1 arg for the AdamOptimizer + - batch_size: (int) size of each batch + - epochs: (int) number of times the training has to be repeated over all the batches + - save_sample: (int) after how many iterations of the training algorithm performs the evaluations in fit function + - path: (str) path for saving the session checkpoint + + Class attributes: + + - X: (tf placeholder) input tensor of shape (batch_size, input features) + - Y: (tf placeholder) label tensor of shape (batch_size, n_classes) (one_hot encoding) + - Y_hat: (tf tensor) shape=(batch_size, n_classes) predicted class (one_hot) + - loss: (tf scalar) reduced mean of cost computed with softmax cross entropy with logits + - train_op: gradient descent algorithm with AdamOptimizer + + + + Class methods: + - get_sample: + + """ + + + def __init__( + self, dim, e_sizes, d_sizes, an_id=0, + lr=LEARNING_RATE, beta1=BETA1, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, img_height=None, img_width=None + ): + + + self.dim = dim + self.an_id = an_id + self.latent_dims=e_sizes['z'] + self.e_sizes=e_sizes + self.d_sizes=d_sizes + self.d_last_act_f = d_sizes['last_act_f'] + self.seed = seed + + self.img_height=img_height + self.img_width=img_width + + + + self.X = tf.placeholder( + tf.float32, + shape=(None, self.dim), + name='X' + ) + + self.batch_sz = tf.placeholder( + tf.float32, + shape=(), + name='batch_sz' + ) + + self.Z=self.build_encoder(self.X, self.e_sizes) + + logits = self.build_decoder(self.Z, self.d_sizes) + + self.X_hat = self.d_last_act_f(logits) + + cost = tf.nn.sigmoid_cross_entropy_with_logits( + labels=self.X, + logits=logits + ) + + self.loss= tf.reduce_mean(cost) + + self.train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1 + ).minimize(self.loss) + + #test time + + self.X_input = tf.placeholder( + tf.float32, + shape = (None, self.dim), + name='X_input' + ) + + #encode at test time + + with tf.variable_scope('encoder') as scope: + scope.reuse_variables() + self.Z_input = self.encode( + self.X_input, reuse=True, is_training=False + ) + + #decode from encoded at test time + + with tf.variable_scope('decoder') as scope: + scope.reuse_variables() + X_decoded = self.decode( + self.Z_input, reuse=True, is_training=False + ) + self.X_decoded = tf.nn.sigmoid(X_decoded) + + + #saving for later + self.lr = lr + self.batch_size=batch_size + self.epochs = epochs + self.path = path + self.save_sample = save_sample + + def build_encoder(self, X, e_sizes): + + with tf.variable_scope('encoder') as scope: + + #dimensions of input + mi = self.dim + + self.e_layers = [] + + count=0 + for mo, apply_batch_norm, keep_prob, act_f, w_init in e_sizes['dense_layers']: + + name = 'layer_{0}'.format(count) + count +=1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.e_layers.append(layer) + mi = mo + + e_last_act_f = e_sizes['last_act_f'] + e_last_w_init = e_sizes['last_w_init'] + + name = 'layer_{0}'.format(count) + + last_enc_layer = DenseLayer(name, mi, self.latent_dims, False, 1, + act_f=e_last_act_f, w_init=e_last_w_init + ) + + self.e_layers.append(last_enc_layer) + + return self.encode(X) + + def encode(self, X, reuse=None, is_training=True): + + Z=X + for layer in self.e_layers: + Z=layer.forward(Z, reuse, is_training) + return Z + + def build_decoder(self, Z, d_sizes): + + with tf.variable_scope('decoder') as scope: + + mi = self.latent_dims + + self.d_layers = [] + count = 0 + for mo, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['dense_layers']: + + name = 'layer_{0}'.format(count) + count += 1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.d_layers.append(layer) + mi = mo + + name = 'layer_{0}'.format(count) + + d_last_w_init= d_sizes['last_w_init'] + last_dec_layer = DenseLayer(name, mi, self.dim, False, 1, + act_f=lambda x:x, w_init=d_last_w_init + ) + + self.d_layers.append(last_dec_layer) + + + return self.decode(Z) + + def decode(self, Z, reuse=None, is_training=True): + + X=Z + + for layer in self.d_layers: + X = layer.forward(X, reuse, is_training) + return X + + # def get_logits(self, X): + + def set_session(self, session): + + self.session = session + + for layer in self.d_layers: + layer.set_session(self.session) + + for layer in self.e_layers: + layer.set_session(self.session) + + def fit(self, X): + + """ + Function is called if the flag is_training is set on TRAIN. If a model already is present + continues training the one already present, otherwise initialises all params from scratch. + + Performs the training over all the epochs, at when the number of epochs of training + is a multiple of save_sample, prints the cost at that epoch. When training has gone through all + the epochs, plots a plot of the cost versus epoch. + + Positional arguments: + + - X_train: (ndarray) size=(train set size, input features) training sample set + + """ + + + seed = self.seed + + costs = [] + N = len(X) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training deep AE with a total of ' +str(N)+' samples distributed in batches of size '+str(self.batch_size)+'\n') + print('The learning rate set is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' epochs the training cost will be printed') + print('\n ****** \n') + + for epoch in range(self.epochs): + + # print('Epoch: {0}'.format(epoch)) + + seed += 1 + + batches = unsupervised_random_mini_batches(X, self.batch_size, seed) + + for X_batch in batches: + + feed_dict = { + self.X: X_batch, self.batch_sz: self.batch_size + } + + _, c = self.session.run( + (self.train_op, self.loss), + feed_dict=feed_dict + ) + + c /= self.batch_size + costs.append(c) + + if epoch % self.save_sample == 0: + + + print('At epoch %d, cost: %f' %(epoch, c)) + + plt.plot(costs) + plt.ylabel('cost') + plt.xlabel('iteration') + plt.title('learning rate=' + str(self.lr)) + plt.show() + + print('Parameters trained') + + def get_sample(self, X): + """ + Input X + + takes an X, encodes and then decodes it reproducing a X_hat + + Outputs X_hat + """ + return self.session.run( + self.X_decoded, feed_dict={self.X_input:X, self.batch_sz:1} + ) \ No newline at end of file diff --git a/architectures/DCGAN.py b/architectures/DCGAN.py new file mode 100644 index 0000000..ae1c0a1 --- /dev/null +++ b/architectures/DCGAN.py @@ -0,0 +1,369 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +PREPROCESS=None +LAMBDA=.01 +EPS=1e-6 + +DISCR_STEPS=None +GEN_STEPS=None + +min_true=None +max_true=None + +min_reco=None +max_reco=None + +n_H=None +n_W=None +n_C=None + +d_sizes=None +g_sizes=None + +#tested on mnist +class DCGAN(object): + + def __init__( + self, + n_H=n_H, n_W=n_H, n_C=n_C, + d_sizes= d_sizes,g_sizes=g_sizes, + lr=LEARNING_RATE, beta1=BETA1, preprocess=PREPROCESS, + cost_type=COST_TYPE, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'conv_layers':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer, act_f), + (,,,,), + ], + 'dense_layers':[(n_o, apply_bn, weight_init, act_f)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'conv_layers':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer, act_f), + (,,,,), + ], + 'dense_layers':[(n_o, apply_bn, weight_init, act_f)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + self.n_H = n_H + self.n_W = n_W + self.n_C = n_C + self.seed=seed + self.latent_dims = g_sizes['z'] + + #input data + + self.X = tf.placeholder( + tf.float32, + shape=(None, + n_H, n_W, n_C), + name='X', + ) + + self.Z = tf.placeholder( + tf.float32, + shape=(None, + self.latent_dims), + name='Z' + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + + D = Discriminator(self.X, d_sizes, 'A') + + G = Generator(self.Z, self.n_H, self.n_W, g_sizes, 'A') + + with tf.variable_scope('discriminator_A') as scope: + + logits_real = D.d_forward(self.X) + + with tf.variable_scope('generator_A') as scope: + + sample_images = G.g_forward(self.Z) + + # get sample logits + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + logits_fake = D.d_forward(sample_images, reuse=True) + + # get sample images for test time + with tf.variable_scope('generator_A') as scope: + scope.reuse_variables() + self.sample_images_test = G.g_forward( + self.Z, reuse=True, is_training=False + ) + + #parameters list + + predicted_real= tf.nn.sigmoid(logits_real) + #predicted_real=tf.maximum(tf.minimum(predicted_real, 0.99), 0.01) + + predicted_fake=tf.nn.sigmoid(logits_fake) + #predicted_fake=tf.maximum(tf.minimum(predicted_fake, 0.99), 0.01) + + self.d_params =[t for t in tf.trainable_variables() if t.name.startswith('d')] + self.g_params =[t for t in tf.trainable_variables() if t.name.startswith('g')] + + #using some label smoothing + epsilon=1e-3 + if cost_type == 'GAN': + + #Discriminator cost + self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real, + labels=(1-epsilon)*tf.ones_like(logits_real) + ) + + self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=epsilon+tf.zeros_like(logits_fake) + ) + + self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake) + + #Generator cost + self.g_cost = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=(1-epsilon)*tf.ones_like(logits_fake) + ) + ) + # #Discriminator cost + + + # self.d_cost_real = tf.reduce_mean(-tf.log(predicted_real + epsilon)) + # self.d_cost_fake = tf.reduce_mean(-tf.log(1 + epsilon - predicted_fake)) + # self.d_cost = self.d_cost_real+self.d_cost_fake + + # #Generator cost + # self.g_cost = tf.reduce_mean(-tf.log(predicted_fake + epsilon)) + + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.d_cost, + var_list=self.d_params + ) + + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.g_cost, + var_list=self.g_params + ) + #Measure accuracy of the discriminator + + real_predictions = tf.cast(predicted_real>0.5,tf.float32) + fake_predictions = tf.cast(predicted_fake<0.5,tf.float32) + + num_predictions=2.0*batch_size + num_correct = tf.reduce_sum(real_predictions)+tf.reduce_sum(fake_predictions) + + self.d_accuracy = num_correct/num_predictions + + + + self.cost_type=cost_type + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr = lr + self.D=D + self.G=G + self.discr_steps=discr_steps + self.gen_steps=gen_steps + + def set_session(self, session): + + self.session = session + + for layer in self.D.d_conv_layers: + layer.set_session(session) + + for layer in self.D.d_dense_layers: + layer.set_session(session) + + for layer in self.G.g_conv_layers: + layer.set_session(session) + + for layer in self.G.g_dense_layers: + layer.set_session(session) + + def fit(self, X): + + seed = self.seed + d_costs = [] + g_costs = [] + + discr_steps=self.discr_steps + gen_steps=self.gen_steps + + N = len(X) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training DCGAN with a total of ' +str(N)+' samples distributed in batches of size '+str(self.batch_size)+'\n') + print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' epoch a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed +=1 + + print('Epoch:', epoch) + + batches = unsupervised_random_mini_batches(X, self.batch_size, seed) + + for X_batch in batches: + + bs = X_batch.shape[0] + + t0 = datetime.now() + + np.random.seed(seed) + d_cost=0 + for i in range(discr_steps): + + Z = np.random.normal(size= (bs, self.latent_dims)) + + _, d_cost, d_acc = self.session.run( + (self.d_train_op, self.d_cost, self.d_accuracy), + feed_dict={self.X: X_batch, self.Z:Z, self.batch_sz: bs}, + ) + d_cost+=d_cost + + + d_costs.append(d_cost/discr_steps) + + #train the generator averaging two costs if the + #discriminator learns too fast + g_cost=0 + for i in range(gen_steps): + + Z = np.random.normal(size= (bs, self.latent_dims)) + _, g_cost = self.session.run( + (self.g_train_op, self.g_cost), + feed_dict={self.X: X_batch, self.Z:Z, self.batch_sz: bs}, + ) + g_cost+=g_cost + + g_costs.append(g_cost/gen_steps) + + + total_iters += 1 + if total_iters % self.save_sample ==0: + print("At iter: %d - dt: %s - d_acc: %.4f" % (total_iters, datetime.now() - t0, d_acc)) + print('Saving a sample...') + + np.random.seed(seed) + Z = np.random.normal(size= (16, self.latent_dims)) + + samples = self.sample(Z)#shape is (64,D,D,color) + + w = self.n_W + h = self.n_H + samples = np.sum(samples,axis=3) + samples = samples.reshape(16, h, w) + + + + for i in range(16): + plt.subplot(4,4,i+1) + plt.imshow(samples[i].reshape(h,w)) + #plt.title('Sim ET {0:.6g}'.format(samples[i].sum())) + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.axis('off') + + fig = plt.gcf() + fig.set_size_inches(5,5) + plt.savefig(self.path+'/samples_at_iter_%d.png' % total_iters,dpi=300) + + + + plt.clf() + + plt.plot(d_costs, label='Discriminator cost') + plt.plot(g_costs, label='Generator cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(10,5) + plt.savefig(self.path+'/cost vs iteration.png',dpi=300) + + def sample(self, Z): + + samples = self.session.run( + self.sample_images_test, + feed_dict={self.Z:Z, self.batch_sz: self.batch_size}) + + return samples + + def get_sample(self, Z): + + one_sample = self.session.run( + self.sample_images_test, + feed_dict={self.Z:Z, self.batch_sz: 1}) + + return one_sample \ No newline at end of file diff --git a/architectures/DCVAE.py b/architectures/DCVAE.py new file mode 100644 index 0000000..e6e36bc --- /dev/null +++ b/architectures/DCVAE.py @@ -0,0 +1,240 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.1): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 + +#tested on mnist +class DCVAE(object): + + def __init__(self, n_H, n_W, n_C, e_sizes, d_sizes, + lr=LEARNING_RATE, beta1=BETA1, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed = SEED): + + #size of every layer in the encoder + #up to the latent layer, decoder + #will have reverse shape + self.n_H = n_H + self.n_W = n_W + self.n_C = n_C + self.seed = seed + self.e_sizes = e_sizes + self.d_sizes = d_sizes + self.latent_dims = e_sizes['latent_dims'] + + self.x = tf.placeholder( + tf.float32, + shape=(None, n_H, n_W, n_C), + name='x' + ) + + self.z_test = tf.placeholder( + tf.float32, + shape=(None, self.latent_dims), + name='z_test' + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + + + E = convEncoder(self.x, e_sizes, 'E') + + with tf.variable_scope('encoder_E') as scope: + z_encoded, z_mu, z_log_sigma = E.e_forward(self.x) + + D = convDecoder(z_encoded, self.n_H, self.n_W, d_sizes, 'D') + + with tf.variable_scope('decoder_D') as scope: + self.x_hat = D.d_forward(z_encoded) + + + with tf.variable_scope('decoder_D') as scope: + scope.reuse_variables() + self.x_hat_test = D.d_forward( + self.z_test, reuse=True, is_training=False + ) + + #Loss: + #Reconstruction loss + #minimise the cross-entropy loss + # H(x, x_hat) = -\Sigma ( x*log(x_hat) + (1-x)*log(1-x_hat) ) + epsilon=1e-10 + + recon_loss = -tf.reduce_sum( + -tf.squared_difference(self.x,self.x_hat), + #self.x*tf.log(epsilon+self.x_hat) + (1-self.x)*tf.log(epsilon + 1 -self.x_hat), + axis=[1,2,3] + ) + + + self.recon_loss=tf.reduce_mean(recon_loss) + + #KL divergence loss + # Kullback Leibler divergence: measure the difference between two distributions + # Here we measure the divergence between the latent distribution and N(0, 1) + + kl_loss= -0.5 * tf.reduce_sum( + 1 + 2*z_log_sigma - (tf.square(z_mu) + tf.exp(2*z_log_sigma)), + axis=[1] + ) + + self.kl_loss=tf.reduce_mean(kl_loss) + + self.total_loss=tf.reduce_mean(self.kl_loss+10*self.recon_loss) + + self.train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize(self.total_loss) + + + #saving for later + self.lr = lr + self.batch_size=batch_size + self.epochs = epochs + self.path = path + self.save_sample = save_sample + + self.E=E + self.D=D + + def set_session(self, session): + + self.session = session + + for layer in self.E.e_conv_layers: + layer.set_session(session) + for layer in self.E.e_dense_layers: + layer.set_session(session) + + for layer in self.D.d_dense_layers: + layer.set_session(session) + for layer in self.D.d_deconv_layers: + layer.set_session(session) + + def fit(self, X): + + seed = self.seed + + total_loss = [] + rec_losses=[] + kl_losses=[] + + N = len(X) + n_batches = N // self.batch_size + + print('\n ****** \n') + print('Training deep convolutional VAE with a total of ' +str(N)+' samples distributed in batches of size '+str(self.batch_size)+'\n') + print('The learning rate set is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' iterations a generated sample will be saved to '+ self.path) + print('\n ****** \n') + total_iters=0 + + for epoch in range(self.epochs): + + t0 = datetime.now() + print('Epoch: {0}'.format(epoch)) + + seed +=1 + + batches = unsupervised_random_mini_batches(X, self.batch_size, seed) + + for X_batch in batches: + + feed_dict = { + self.x: X_batch, self.batch_sz: self.batch_size + } + + _, l, rec_loss, kl_loss = self.session.run( + (self.train_op, self.total_loss, self.recon_loss, self.kl_loss), + feed_dict=feed_dict + ) + + l /= self.batch_size + rec_loss /= self.batch_size + kl_loss /= self.batch_size + + total_loss.append(l) + rec_losses.append(rec_loss) + kl_losses.append(kl_loss) + + total_iters += 1 + + if total_iters % self.save_sample ==0: + print("At iteration: %d - dt: %s - cost: %.2f" % (total_iters, datetime.now() - t0, l)) + print('Saving a sample...') + + + z_test= np.random.normal(size=(64, self.latent_dims)) + probs = self.sample(z_test) + + for i in range(64): + + plt.subplot(8,8,i+1) + plt.suptitle('samples' ) + plt.imshow(probs[i].reshape(28,28), cmap='gray') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.axis('off') + + fig = plt.gcf() + fig.set_size_inches(4,4) + plt.savefig(self.path+'/samples_at_iter_%d.png' % total_iters,dpi=100) + + plt.clf() + plt.subplot(1,3,1) + plt.suptitle('learning rate=' + str(self.lr)) + plt.plot(total_loss, label='total_loss') + plt.ylabel('cost') + plt.legend() + + plt.subplot(1,3,2) + plt.plot(rec_losses, label='rec loss') + plt.legend() + + plt.subplot(1,3,3) + plt.plot(kl_losses, label='KL loss') + + plt.xlabel('iteration') + + plt.legend() + fig=plt.gcf() + + fig.set_size_inches(10,4) + plt.savefig(self.path+'/cost_vs_iteration.png',dpi=150) + + print('Parameters trained') + + def sample(self, Z): + + n=Z.shape[0] + samples = self.session.run( + self.x_hat_test, + feed_dict={self.z_test: Z, self.batch_sz: n} + ) + return samples diff --git a/architectures/DNN.py b/architectures/DNN.py new file mode 100644 index 0000000..e71440b --- /dev/null +++ b/architectures/DNN.py @@ -0,0 +1,315 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * + +def lrelu(x, alpha=0.1): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 + +#untested yet +class DNN(object): + + """ + Builds densely connected deep neural network. Regularization implemented + with dropout, no regularization parameter implemented yet. Minimization through + AdamOptimizer (adaptive learning rate) + + Constructor inputs: + + -Positional arguments: + - dim: (int) input features + - sizes: (dict) python dictionary containing the size of the + dense layers and the number of classes for classification + + sizes = {'dense_layer_n' :[mo, apply_batch_norm, keep_probability, act_f, w_init] + 'n_classes': n_classes + } + + mo: (int) size of layer n + apply_batch_norm: (bool) apply batch norm at layer n + keep_probability: (float32) probability of activation of output + act_f: (function) activation function for layer n + w_init: (tf initializer) random initializer for weights at layer n + n_classes: number of classes + + -Keyword arguments + + -lr: (float32) learning rate arg for the AdamOptimizer + -beta1: (float32) beta1 arg for the AdamOptimizer + -batch_size: (int) size of each batch + -epochs: (int) number of times the training has to be repeated over all the batches + -save_sample: (int) after how many iterations of the training algorithm performs the evaluations in fit function + -path: (str) path for saving the session checkpoint + + Class attributes: + + - X: (tf placeholder) input tensor of shape (batch_size, input features) + - Y: (tf placeholder) label tensor of shape (batch_size, n_classes) (one_hot encoding) + - Y_hat: (tf tensor) shape=(batch_size, n_classes) predicted class (one_hot) + - loss: (tf scalar) reduced mean of cost computed with softmax cross entropy with logits + - train_op: gradient descent algorithm with AdamOptimizer + + """ + def __init__(self, + dim, sizes, + lr=LEARNING_RATE, beta1=BETA1, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED): + + self.seed=seed + self.n_classes = sizes['n_classes'] + self.dim = dim + + self.sizes=sizes + + + self.X = tf.placeholder( + tf.float32, + shape=(None, dim), + name = 'X_data' + ) + + self.X_input = tf.placeholder( + tf.float32, + shape=(None, dim), + name = 'X_input' + ) + + self.batch_sz=tf.placeholder( + tf.int32, + shape=(), + name='batch_sz', + ) + + self.Y = tf.placeholder( + tf.float32, + shape=(None, self.n_classes), + name='Y' + ) + + self.Y_hat = self.build_NN(self.X, self.conv_sizes) + + + cost = tf.nn.softmax_cross_entropy_with_logits( + logits= self.Y_hat, + labels= self.Y + ) + + self.loss = tf.reduce_mean(cost) + + self.train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1 + ).minimize(self.loss + ) + + + + #convolve from input + with tf.variable_scope('classification') as scope: + scope.reuse_variables() + self.Y_hat_from_test = self.convolve( + self.X_input, reuse=True, is_training=False, keep_prob=1 + ) + + self.accuracy = evaluation(self.Y_hat_from_test, self.Y) + + #saving for later + self.lr = lr + self.batch_size=batch_size + self.epochs = epochs + self.path = path + self.save_sample = save_sample + + + def build_NN(self, X, sizes): + + with tf.variable_scope('classification') as scope: + + mi = self.dim + self.dense_layers = [] + count = 0 + + for mo, apply_batch_norm, keep_prob, act_f, w_init in sizes['dense_layers']: + + name = 'dense_layer_{0}'.format(count) + count += 1 + + layer = DenseLayer(name,mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.dense_layers.append(layer) + + #readout layer + readout_layer = DenseLayer('readout_layer', + mi, self.n_classes, + False, 1, tf.nn.softmax, + tf.random_uniform_initializer(seed=self.seed)) + + self.dense_layers.append(readout_layer) + + return self.propagate(X) + + def propagate(self, X, reuse=None, is_training=True): + + print('Propagation') + print('Input for propagation', X.get_shape()) + + output = X + + for layer in self.dense_layers: + output.layer.forward(output, reuse, is_training) + + print('Logits shape', output.get_shape()) + return output + + def set_session(self, session): + + for layer in self.dense_layers: + + layer.set_session(session) + + def fit(self, X_train, Y_train, X_test, Y_test): + + """ + Function is called if the flag is_training is set on TRAIN. If a model already is present + continues training the one already present, otherwise initialises all params from scratch. + + Performs the training over all the epochs, at when the number of epochs of training + is a multiple of save_sample prints out training cost, train and test accuracies + + Plots a plot of the cost versus epoch. + + Positional arguments: + + - X_train: (ndarray) size=(train set size, input features) training sample set + - X_test: (ndarray) size=(test set size, input features) test sample set + + - Y_train: (ndarray) size=(train set size, input features) training labels set + - Y_test: (ndarray) size=(test set size, input features) test labels set + + + + """ + seed = self.seed + + + N = X_train.shape[0] + test_size = X_test.shape[0] + + n_batches = N // self.batch_size + + print('\n ****** \n') + print('Training CNN for '+str(self.epochs)+' epochs with a total of ' +str(N)+ ' samples\ndistributed in ' +str(n_batches)+ ' batches of size '+str(self.batch_size)+'\n') + print('The learning rate set is '+str(self.lr)) + print('\n ****** \n') + + costs = [] + for epoch in range(self.epochs): + + train_acc = 0 + test_acc =0 + train_accuracies=[] + test_accuracies=[] + + seed += 1 + + train_batches = supervised_random_mini_batches(X_train, Y_train, self.batch_size, seed) + test_batches = supervised_random_mini_batches(X_test, Y_test, self.batch_size, seed) + + for train_batch in train_batches: + + (X_train, Y_train) = train_batch + + feed_dict = { + + self.X: X_train, + self.Y: Y_train, + self.batch_sz: self.batch_size, + + } + + _, c = self.session.run( + (self.train_op, self.loss), + feed_dict=feed_dict + ) + + train_acc = self.session.run( + self.accuracy, feed_dict={self.X_input:X_train, self.Y:Y_train} + + ) + + + c /= self.batch_size + costs.append(c) + train_accuracies.append(train_acc) + + train_acc = np.array(train_accuracies).mean() + + #model evaluation + if epoch % self.save_sample ==0: + + for test_batch in test_batches: + + (X_test_batch, Y_test_batch) = test_batch + + + feed_dict={ + self.X_input: X_test_batch, + self.Y: Y_test_batch, + + } + + test_acc = self.session.run( + self.accuracy, + feed_dict=feed_dict + + ) + + test_accuracies.append(test_acc) + + test_acc = np.array(test_accuracies).mean() + + print('Evaluating performance on train/test sets') + print('At iteration {0}, train cost: {1:.4g}, train accuracy {2:.4g}'.format(epoch, c, train_acc)) + print('test accuracy {0:.4g}'.format(test_acc)) + + + plt.plot(costs) + plt.ylabel('cost') + plt.xlabel('iteration') + plt.title('learning rate=' + str(self.lr)) + plt.show() + + print('Parameters trained') + + + def predicted_Y_hat(self, X): + + + pred = tf.nn.softmax(self.Y_hat_from_test) + output = self.session.run( + pred, + feed_dict={self.X_input:X} + ) + return output \ No newline at end of file diff --git a/architectures/DVAE.py b/architectures/DVAE.py new file mode 100644 index 0000000..1ed0e45 --- /dev/null +++ b/architectures/DVAE.py @@ -0,0 +1,305 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.1): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 + +#tested on mnist +class DVAE(object): + + """ + Builds densely connected deep variational autoencoder. Regularization implemented + with dropout, no regularization parameter implemented yet. Minimization through + AdamOptimizer (adaptive learning rate). + + The minimized loss function is the elbo function (explain further) + + Constructor inputs: + + -Positional arguments: + - dim: (int) input features + - e_sizes: (dict) + - d_sizes: (dict) + + -Keyword arguments + + - an_id: (int) number useful for stacked ae + - lr: (float32) learning rate arg for the AdamOptimizer + - beta1: (float32) beta1 arg for the AdamOptimizer + - batch_size: (int) size of each batch + - epochs: (int) number of times the training has to be repeated over all the batches + - save_sample: (int) after how many iterations of the training algorithm performs the evaluations in fit function + - path: (str) path for saving the session checkpoint + + Class attributes: + + - X: (tf placeholder) input tensor of shape (batch_size, input features) + - Y: (tf placeholder) label tensor of shape (batch_size, n_classes) (one_hot encoding) + - Y_hat: (tf tensor) shape=(batch_size, n_classes) predicted class (one_hot) + - loss: (tf scalar) reduced mean of cost computed with softmax cross entropy with logits + - train_op: gradient descent algorithm with AdamOptimizer + + + Class methods: + - posterior_predictive_sample: + - prior_predictive_sample_with_probs: + + """ + + def __init__( + self, dim, e_sizes, d_sizes, an_id=0, + lr=LEARNING_RATE, beta1=BETA1, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, img_height=None, img_width=None + ): + """ + Positional args + + + Keyword args + + """ + self.dim = dim + self.e_sizes=e_sizes + self.d_sizes=d_sizes + self.img_height=img_height + self.img_width=img_width + self.seed = seed + + self.latent_dims=e_sizes['z'] + #self.d_last_act_f = d_sizes['last_act_f'] + + self.x = tf.placeholder( + tf.float32, + shape=(None, self.dim), + name='x' + ) + + self.z_test = tf.placeholder( + tf.float32, + shape=(None, self.latent_dims), + name='z_test' + ) + + self.batch_sz = tf.placeholder( + tf.float32, + shape=(), + name='batch_sz' + ) + + E = denseEncoder(self.x, e_sizes, 'E') + + with tf.variable_scope('encoder_E') as scope: + z_encoded, z_mu, z_log_sigma = E.e_forward(self.x) + + D = denseDecoder(z_encoded, dim, d_sizes, 'D') + + with tf.variable_scope('decoder_D') as scope: + sample_from_encoded = D.d_forward(z_encoded) + + self.x_hat=tf.sigmoid(sample_from_encoded) + + with tf.variable_scope('decoder_D') as scope: + scope.reuse_variables() + self.x_hat_test = tf.nn.sigmoid(D.d_forward( + self.z_test, reuse=True, is_training=False + )) + + #Loss: + #Reconstruction loss + #minimise the cross-entropy loss + # H(x, x_hat) = -\Sigma ( x*log(x_hat) + (1-x)*log(1-x_hat) ) + epsilon=1e-6 + + recon_loss = -tf.reduce_sum( + #-tf.squared_difference(self.x,self.x_hat), + self.x*tf.log(epsilon+self.x_hat) + (1-self.x)*tf.log(epsilon + 1 -self.x_hat), + axis=[1] + ) + + + self.recon_loss=tf.reduce_mean(recon_loss) + + #KL divergence loss + # Kullback Leibler divergence: measure the difference between two distributions + # Here we measure the divergence between the latent distribution and N(0, 1) + + kl_loss= -0.5 * tf.reduce_sum( + 1 + 2*z_log_sigma - (tf.square(z_mu) + tf.exp(2*z_log_sigma)), + axis=[1] + ) + + self.kl_loss=tf.reduce_mean(kl_loss) + + self.total_loss=tf.reduce_mean(self.kl_loss+self.recon_loss) + + self.train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize(self.total_loss) + + + #saving for later + self.lr = lr + self.batch_size=batch_size + self.epochs = epochs + self.path = path + self.save_sample = save_sample + + self.E=E + self.D=D + + + def set_session(self, session): + + self.session = session + + for layer in self.D.d_layers: + layer.set_session(self.session) + + for layer in self.E.e_layers: + layer.set_session(self.session) + + def fit(self, X): + + """ + Function is called if the flag is_training is set on TRAIN. If a model already is present + continues training the one already present, otherwise initialises all params from scratch. + + Performs the training over all the epochs, at when the number of epochs of training + is a multiple of save_sample, prints the cost at that epoch. When training has gone through all + the epochs, plots a plot of the cost versus epoch. + + Positional arguments: + + - X_train: (ndarray) size=(train set size, input features) training sample set + + """ + + seed = self.seed + + total_loss = [] + rec_losses=[] + kl_losses=[] + + N = len(X) + n_batches = N // self.batch_size + + + print('\n ****** \n') + print('Training deep VAE with a total of ' +str(N)+' samples distributed in batches of size '+str(self.batch_size)+'\n') + print('The learning rate set is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' iterations a generated sample will be saved to '+ self.path) + print('\n ****** \n') + total_iters=0 + + for epoch in range(self.epochs): + + t0 = datetime.now() + print('Epoch: {0}'.format(epoch)) + + seed+=1 + + batches = unsupervised_random_mini_batches(X, self.batch_size, seed) + + for X_batch in batches: + + feed_dict = { + self.x: X_batch, self.batch_sz: self.batch_size + } + + _, l, rec_loss, kl_loss = self.session.run( + (self.train_op, self.total_loss, self.recon_loss, self.kl_loss), + feed_dict=feed_dict + ) + + l /= self.batch_size + rec_loss /= self.batch_size + kl_loss /= self.batch_size + + total_loss.append(l) + rec_losses.append(rec_loss) + kl_losses.append(kl_loss) + + total_iters += 1 + + if total_iters % self.save_sample ==0: + print("At iteration: %d - dt: %s - cost: %.2f" % (total_iters, datetime.now() - t0, l)) + print('Saving a sample...') + + + many_samples = self.get_samples(64) + + for i in range(64): + plt.subplot(8,8,i+1) + plt.imshow(many_samples[i].reshape(self.img_height,self.img_width), cmap='gray') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.axis('off') + + fig = plt.gcf() + fig.set_size_inches(4,4) + plt.savefig(self.path+'/samples_at_iter_%d.png' % total_iters,dpi=150) + + plt.clf() + plt.subplot(1,3,1) + plt.suptitle('learning rate=' + str(self.lr)) + plt.plot(total_loss, label='total_loss') + plt.ylabel('cost') + plt.legend() + + plt.subplot(1,3,2) + plt.plot(rec_losses, label='rec loss') + plt.legend() + + plt.subplot(1,3,3) + plt.plot(kl_losses, label='KL loss') + + plt.xlabel('iteration') + + plt.legend() + fig=plt.gcf() + + fig.set_size_inches(10,4) + plt.savefig(self.path+'/cost_vs_iteration.png',dpi=150) + plt.clf() + + print('Parameters trained') + + def get_samples(self, n): + + z_test= np.random.normal(size=(n, self.latent_dims)) + samples = self.session.run( + self.x_hat_test, + feed_dict={self.z_test: z_test, self.batch_sz: n} + ) + return samples + + def get_sample(self): + z_test= np.random.normal(size=(1, self.latent_dims)) + sample = self.session.run( + self.x_hat_test, + feed_dict={self.z_test: z_test, self.batch_sz: 1} + ) + return sample + \ No newline at end of file diff --git a/architectures/__init__.py b/architectures/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/architectures/__init__.py diff --git a/architectures/bicycle_GAN.py b/architectures/bicycle_GAN.py new file mode 100644 index 0000000..0b2bf3c --- /dev/null +++ b/architectures/bicycle_GAN.py @@ -0,0 +1,769 @@ +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + + +pretrain=None +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +PREPROCESS=None +LAMBDA=.01 +EPS=1e-6 +CYCL_WEIGHT=None +LATENT_WEIGHT=None +KL_WEIGHT=None +DISCR_STEPS=None +GEN_STEPS=None +VAE_STEPS=None + +min_true=None +max_true=None + +min_reco=None +max_reco=None + +n_H_A=None +n_W_A=None +n_W_B=None +n_H_B=None +n_C=None + +d_sizes=None +g_sizes_enc=None +g_sizes_dec=None +e_sizes=None + + +class bicycle_GAN(object): + #fix args + def __init__( + + self, + n_H_A=n_H_A, n_W_A=n_W_A, + n_H_B=n_H_B, n_W_B=n_W_B, n_C=n_C, + min_true=min_true, max_true=max_true, + min_reco=min_reco, max_reco=max_reco, + d_sizes=d_sizes, g_sizes_enc=g_sizes_enc, g_sizes_dec=g_sizes_dec, e_sizes=e_sizes, + pretrain=pretrain, lr=LEARNING_RATE, beta1=BETA1, preprocess=PREPROCESS, + cost_type=COST_TYPE, cycl_weight=CYCL_WEIGHT, latent_weight=LATENT_WEIGHT, kl_weight=KL_WEIGHT, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, vae_steps=VAE_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + latent_dims=e_sizes['latent_dims'] + + self.min_true=min_true + self.max_true=max_true + + self.min_reco=min_reco + self.max_reco=max_reco + self.seed=seed + + self.n_W_A = n_W_A + self.n_H_A = n_H_A + + self.n_W_B = n_W_B + self.n_H_B = n_H_B + self.n_C = n_C + + #input data + + self.input_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + + self.input_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + self.z = tf.placeholder( + tf.float32, + shape=(None, + latent_dims) + ) + + self.input_test_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_test_A', + ) + + + G = bicycleGenerator(self.input_A, self.n_H_B, self.n_W_B, g_sizes_enc, g_sizes_dec, 'A_to_B') + + D = Discriminator_minibatch(self.input_B, d_sizes, 'B') + #D = Discriminator(self.input_B, d_sizes, 'B') + + E = convEncoder(self.input_B, e_sizes, 'B') + + with tf.variable_scope('encoder_B') as scope: + z_encoded, z_encoded_mu, z_encoded_log_sigma = E.e_forward(self.input_B) + + with tf.variable_scope('generator_A_to_B') as scope: + sample_A_to_B_encoded = G.g_forward(self.input_A, z_encoded) + + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + sample_A_to_B = self.sample_A_to_B = G.g_forward(self.input_A, self.z, reuse=True) + + with tf.variable_scope('encoder_B') as scope: + scope.reuse_variables() + z_recon, z_recon_mu, z_recon_log_sigma = E.e_forward(sample_A_to_B, reuse=True) + + with tf.variable_scope('discriminator_B') as scope: + + logits_real, feature_output_real = D.d_forward(self.input_B) + + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + logits_fake, feature_output_fake = D.d_forward(sample_A_to_B, reuse=True) + logits_fake_encoded, feature_output_fake_encoded = D.d_forward(sample_A_to_B_encoded, reuse=True) + + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + self.test_images_A_to_B = G.g_forward( + self.input_test_A, self.z, reuse=True, is_training=False + ) + + #parameters lists + self.d_params =[t for t in tf.trainable_variables() if 'discriminator' in t.name] + self.e_params =[t for t in tf.trainable_variables() if 'encoder' in t.name] + self.g_params =[t for t in tf.trainable_variables() if 'generator' in t.name] + + predicted_real= tf.nn.sigmoid(logits_real) + #predicted_real=tf.maximum(tf.minimum(predicted_real, 0.99), 0.00) + + predicted_fake=tf.nn.sigmoid(logits_fake) + #predicted_fake=tf.maximum(tf.minimum(predicted_fake, 0.99), 0.00) + + predicted_fake_encoded = tf.nn.sigmoid(logits_fake_encoded) + #predicted_fake_encoded =tf.maximum(tf.minimum(predicted_fake_encoded, 0.99), 0.00) + + epsilon=1e-3 + #GAN LOSS + if cost_type=='GAN': + + #DISCRIMINATOR LOSSES + self.d_cost_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real, + labels=(1-epsilon)*tf.ones_like(logits_real) + ) + ) + self.d_cost_fake_lr_GAN = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=epsilon+tf.zeros_like(logits_fake) + ) + ) + self.d_cost_fake_vae_GAN = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake_encoded, + labels=epsilon+tf.zeros_like(logits_fake_encoded) + ) + ) + #GENERATOR LOSSES + self.g_cost_lr_GAN = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=(1-epsilon)*tf.ones_like(logits_fake) + ) + ) + + self.g_cost_vae_GAN = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake_encoded, + labels=(1-epsilon)*tf.ones_like(logits_fake_encoded) + ) + ) + + if cost_type=='WGAN': + #DISCRIMINATOR + self.d_cost_real= -tf.reduce_mean(logits_real) + self.d_cost_fake_vae_GAN = tf.reduce_mean(logits_fake_encoded) + self.d_cost_fake_lr_GAN = tf.reduce_mean(logits_fake) + + #GP + epsilon= tf.random_uniform( + [self.batch_sz, 1, 1, 1], + minval=0., + maxval=1., + ) + interpolated = epsilon*self.input_A + (1-epsilon/2)*sample_A_to_B + (1-epsilon/2)*sample_A_to_B_encoded + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + logits_interpolated= D.d_forward(self.input_A, interpolated, reuse=True) + + gradients = tf.gradients(logits_interpolated, [interpolated], name='D_logits_intp')[0] + grad_l2= tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1,2,3])) + self.grad_penalty=tf.reduce_mean(tf.square(grad_l2-1.0)) + + #GENERATOR + self.g_cost_vae_GAN= - tf.reduce_mean(logits_fake_encoded) + self.g_cost_lr_GAN= - tf.reduce_mean(logits_fake) + + if cost_type=='FEATURE': + #DISCRIMINATOR LOSSES + self.d_cost_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real, + labels=(1-epsilon)*tf.ones_like(logits_real) + ) + ) + self.d_cost_fake_lr_GAN = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=epsilon+tf.zeros_like(logits_fake) + ) + ) + self.d_cost_fake_vae_GAN = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake_encoded, + labels=epsilon+tf.zeros_like(logits_fake_encoded) + ) + ) + #GENERATOR LOSSES + + g_cost_lr_GAN = tf.reduce_sum(tf.squared_difference(feature_output_real,feature_output_fake), axis=1) + self.g_cost_lr_GAN = tf.reduce_mean(g_cost_lr_GAN) + + g_cost_vae_GAN = tf.reduce_sum(tf.squared_difference(feature_output_real,feature_output_fake_encoded), axis=1) + self.g_cost_vae_GAN=tf.reduce_mean(g_cost_vae_GAN) + + #CYCLIC WEIGHT + + self.g_cost_cycl = tf.reduce_mean(tf.abs(self.input_B - sample_A_to_B_encoded)) + + + # self.g_4_cells_cycl_encoded=tf.reduce_mean( + # tf.abs( + # tf.cast( + # tf.convert_to_tensor( + # [ + # tf.nn.top_k( + # tf.reshape( + # self.input_B[i], + # [-1]), + # k=4)[0] - + # tf.nn.top_k( + # tf.reshape( + # sample_A_to_B_encoded[i], + # [-1]), + # k=4)[0] + # for i in range(16)] + # ) + # ,dtype=tf.float32) + # ) + # ) + # self.g_4_cells_pos_encoded=tf.reduce_mean(tf.abs(tf.cast(tf.convert_to_tensor( + # [ + # tf.nn.top_k( + # tf.reshape( + # self.input_B[i], + # [-1]), + # k=4)[1] - + # tf.nn.top_k( + # tf.reshape( + # sample_A_to_B_encoded[i], + # [-1]), + # k=4)[1] + # for i in range(16)] + # ), dtype=tf.float32) + # ) + # ) + + + + #ENCODER COSTS + + self.e_cost_latent_cycle = tf.reduce_mean(tf.abs(self.z - z_recon)) + self.e_cost_kl = -0.5 * tf.reduce_mean(1 + 2*tf.log(z_encoded_log_sigma) - tf.square(z_encoded_mu) - tf.square(z_encoded_log_sigma)) + + #TOTAL COSTS + + self.d_cost = self.d_cost_fake_vae_GAN + self.d_cost_fake_lr_GAN + 2*self.d_cost_real + self.g_cost = self.g_cost_vae_GAN + self.g_cost_lr_GAN + cycl_weight*(self.g_cost_cycl) + self.e_cost_latent_cycle + self.e_cost = latent_weight*self.e_cost_latent_cycle + kl_weight*self.e_cost_kl + self.g_cost_vae_GAN + self.g_cost_cycl + + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.d_cost, + var_list=self.d_params + ) + + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.g_cost, + var_list=self.g_params + ) + + self.e_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.e_cost, + var_list=self.e_params + ) + + real_predictions = tf.cast(logits_real>0.5,tf.float32) + fake_predictions = tf.cast(logits_fake<0.5,tf.float32) + fake_enc_predictions = tf.cast(logits_fake_encoded<0.5,tf.float32) + + num_predictions=2.0*batch_size + + num_correct = tf.reduce_sum(real_predictions)+tf.reduce_sum(fake_predictions) + num_correct_enc = tf.reduce_sum(real_predictions)+tf.reduce_sum(fake_enc_predictions) + + self.d_accuracy= num_correct/num_predictions + self.d_accuracy_enc= num_correct_enc/num_predictions + + self.D=D + self.G=G + self.E=E + + self.latent_weight=latent_weight + self.cycl_weight=cycl_weight + self.kl_weight=kl_weight + self.latent_dims=latent_dims + + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr = lr + + self.preprocess=preprocess + self.cost_type=cost_type + self.gen_steps=gen_steps + self.vae_steps=vae_steps + self.discr_steps=discr_steps + + def set_session(self, session): + + self.session = session + + for layer in self.D.d_conv_layers: + layer.set_session(session) + + for layer in self.G.g_enc_conv_layers: + layer.set_session(session) + + for layer in self.G.g_dec_conv_layers: + layer.set_session(session) + + for layer in self.E.e_conv_layers: + layer.set_session(session) + + for layer in self.E.e_dense_layers: + layer.set_session(session) + + def fit(self, X_A, X_B, validating_size): + + all_A = X_A + all_B = X_B + + gen_steps=self.gen_steps + discr_steps=self.discr_steps + vae_steps = self.vae_steps + m = X_A.shape[0] + train_A = all_A[0:m-validating_size] + train_B = all_B[0:m-validating_size] + + validating_A = all_A[m-validating_size:m] + validating_B = all_B[m-validating_size:m] + + seed=self.seed + + d_costs=[] + d_costs_vae_GAN=[] + d_costs_lr_GAN=[] + d_costs_GAN=[ ] + + g_costs=[] + g_costs_lr_GAN=[] + g_costs_vae_GAN=[] + g_costs_cycl=[] + + e_costs=[] + e_costs_kl=[] + e_costs_latent_cycle=[] + + N=len(train_A) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training bicycleGAN with a total of ' +str(N)+' samples distributed in '+ str((N)//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + print('The validation set consists of {0} images'.format(validating_A.shape[0])) + print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed+=1 + print('Epoch:', epoch) + + batches_A = unsupervised_random_mini_batches(train_A, self.batch_size, seed) + batches_B = unsupervised_random_mini_batches(train_B, self.batch_size, seed) + + for X_batch_A, X_batch_B in zip(batches_A[:-1], batches_B[:-1]): + + bs=X_batch_A.shape[0] + + t0 = datetime.now() + + + e_cost=0 + e_cost_latent_cycle=0 + e_cost_kl=0 + + g_cost=0 + g_cost_cycl=0 + g_cost_lr_GAN=0 + g_cost_vae_GAN=0 + #cluster_diff=0 + + d_cost=0 + d_cost_vae_GAN=0 + d_cost_lr_GAN=0 + + + for i in range(discr_steps): + + sample_z = np.random.normal(size=(bs, self.latent_dims)) + + _, d_acc, d_acc_enc, d_cost, d_cost_vae_GAN, d_cost_lr_GAN = self.session.run( + + (self.d_train_op, self.d_accuracy, self.d_accuracy_enc, self.d_cost, self.d_cost_fake_vae_GAN, self.d_cost_fake_lr_GAN), + + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, + self.z:sample_z, self.batch_sz:bs + }, + ) + d_cost+=d_cost + d_cost_vae_GAN+=d_cost_vae_GAN + d_cost_lr_GAN+=d_cost_lr_GAN + + d_costs.append(d_cost/discr_steps) + d_costs_vae_GAN.append(d_cost_vae_GAN/discr_steps) + d_costs_lr_GAN.append(d_cost_lr_GAN/discr_steps) + + for i in range(gen_steps): + + sample_z = np.random.normal(size=(bs, self.latent_dims)) + + _, g_cost, g_cost_cycl, g_cost_lr_GAN, g_cost_vae_GAN = self.session.run( + + (self.g_train_op, self.g_cost, + self.g_cost_cycl, self.g_cost_lr_GAN, self.g_cost_vae_GAN, + ), + + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, + self.z:sample_z, self.batch_sz:bs + }, + ) + + + g_cost+=g_cost + g_cost_cycl+=g_cost_cycl + g_cost_lr_GAN+=g_cost_lr_GAN + g_cost_vae_GAN+=g_cost_vae_GAN + #cluster_diff+=cluster_diff + + g_costs.append(g_cost/gen_steps) + g_costs_vae_GAN.append(g_cost_vae_GAN/gen_steps) + g_costs_lr_GAN.append(g_cost_lr_GAN/gen_steps) + g_costs_cycl.append(self.cycl_weight*g_cost_cycl/gen_steps) + #cluster_diffs.append(self.cycl_weight*cluster_diff/gen_steps) + + for i in range(vae_steps): + sample_z = np.random.normal(size=(bs, self.latent_dims)) + + _, e_cost, e_cost_latent_cycle, e_cost_kl = self.session.run( + + (self.e_train_op, self.e_cost, + self.e_cost_latent_cycle, self.e_cost_kl + ), + + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, + self.z:sample_z, self.batch_sz:bs + }, + ) + + e_cost+=e_cost + e_cost_latent_cycle+=e_cost_latent_cycle + e_cost_kl+=e_cost_kl + + + #cluster_diff+=cluster_diff + e_costs.append(e_cost/vae_steps) + e_costs_latent_cycle.append(self.latent_weight*e_cost_latent_cycle/vae_steps) + e_costs_kl.append(self.kl_weight*e_cost_kl/vae_steps) + + total_iters+=1 + if total_iters % self.save_sample==0: + plt.clf() + print("At iter: %d - dt: %s - d_acc: %.2f, - d_acc_enc: %.2f" % (total_iters, datetime.now() - t0, d_acc, d_acc_enc)) + print("Discriminator cost {0:.4g}, Generator cost {1:.4g}, VAE Cost {2:.4g}, KL divergence cost {3:.4g}".format(d_cost, g_cost, e_cost, e_cost_kl)) + print('Saving a sample...') + + + if self.preprocess!=False: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + self.min_true, self.max_true, + self.min_reco, self.max_reco, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + else: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + + plt.clf() + plt.subplot(2,4,1) + plt.plot(d_costs, label='Discriminator total cost') + plt.plot(d_costs_lr_GAN, label='Discriminator of image with encoded noise cost') + plt.plot(d_costs_vae_GAN, label='Discriminator of image with input noise cost') + plt.xlabel('Batch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(2,4,2) + plt.plot(g_costs, label='Generator total cost') + plt.plot(g_costs_cycl, label='Generator cyclic cost') + #plt.plot(g_costs_GAN, label='GAN cost (encoded noise image)') + #plt.plot(g_costs_vae_GAN, label='GAN cost (input noise image)') + plt.xlabel('Batch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(2,4,3) + plt.plot(e_costs, label='VAE cost') + #plt.plot(e_costs_kl, label='KL cost') + #plt.plot(e_costs_latent_cycle, label='Latent space cyclic cost') + plt.xlabel('Batch') + plt.ylabel('Cost') + plt.legend() + + + plt.subplot(2,4,6) + #plt.plot(g_costs, label='Generator cost') + #plt.plot(g_costs_cycl, label='Generator cyclic cost') + plt.plot(g_costs_lr_GAN, label='GAN cost (encoded noise image)') + plt.plot(g_costs_vae_GAN, label='GAN cost (input noise image)') + plt.xlabel('Batch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(2,4,7) + plt.plot(e_costs_latent_cycle, label='Latent space cyclic cost') + plt.xlabel('Batch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(2,4,8) + plt.plot(e_costs_kl, label='KL cost') + #plt.plot(e_costs_latent_cycle, label='Latent space cyclic cost') + plt.xlabel('Batch') + plt.ylabel('Cost') + plt.legend() + + + fig = plt.gcf() + fig.set_size_inches(15,7) + plt.savefig(self.path+'/cost_iteration.png',dpi=80) + + print('Printing validation set histograms at epoch {0}'.format(epoch)) + + if not os.path.exists(self.path+'/epoch{0}/'.format(epoch)): + os.mkdir(self.path+'/epoch{0}/'.format(epoch)) + + validating_NN=np.zeros_like(validating_B) + for i in range(validating_size): + validating_NN[i]=self.get_sample_A_to_B(validating_A[i].reshape(1,self.n_H_A,self.n_W_A,self.n_C)) + + print('ET Distribution plots are being printed...') + if self.preprocess != False: + validation_MC_hist= denormalise(validating_B, self.min_reco, self.max_reco).reshape(validating_size, self.n_H_B*self.n_W_B) + validation_MC_hist = np.sum(validation_MC_hist,axis=1) + max_MC_hist = np.max(validation_MC_hist) + + validation_NN_hist= denormalise(validating_NN, self.min_reco, self.max_reco).reshape(validating_size, self.n_H_B*self.n_W_B) + validation_NN_hist = np.sum(validation_NN_hist,axis=1) + max_NN_hist = np.max(validation_NN_hist) + + validation_true_hist= denormalise(validating_A, self.min_true, self.max_true).reshape(validating_size, self.n_H_A*self.n_W_A) + validation_true_hist = np.sum(validation_true_hist,axis=1) + max_true_hist = np.max(validation_true_hist) + + else: + validation_MC_hist= validating_B.reshape(validating_size, self.n_H_B*self.n_W_B) + validation_MC_hist = np.sum(validation_MC_hist,axis=1) + max_MC_hist = np.max(validation_MC_hist) + + validation_NN_hist= validating_NN.reshape(validating_size, self.n_H_B*self.n_W_B) + validation_NN_hist = np.sum(validation_NN_hist,axis=1) + max_NN_hist = np.max(validation_NN_hist) + + validation_true_hist= validating_A.reshape(validating_size, self.n_H_A*self.n_W_A) + validation_true_hist = np.sum(validation_true_hist,axis=1) + max_true_hist = np.max(validation_true_hist) + + plt.clf() + plt.subplot(1,3,1) + h_reco = plt.hist(validation_true_hist,bins=30, edgecolor='black'); + plt.xlabel('E (MeV)') + plt.ylabel('dN/dE') + plt.title('True pion E_T distribution,\n max true hist: {0} '.format(max_true_hist)) + plt.subplot(1,3,2) + h_reco = plt.hist(validation_MC_hist,bins=30, edgecolor='black'); + plt.xlabel('E (MeV)') + plt.ylabel('dN/dE') + plt.title('Reco pion E_T distribution,\n max MC hist: {0} '.format(max_MC_hist)) + plt.subplot(1,3,3) + h_nn = plt.hist(validation_NN_hist,bins=30, edgecolor='black'); + plt.xlabel('E (MeV)') + plt.ylabel('dN/dE') + plt.title('Reco pion E_T distribution from bicycleGAN, \n max NN hist: {0} '.format(max_NN_hist)) + fig = plt.gcf() + fig.set_size_inches(16,4) + + plt.savefig(self.path+'/epoch{0}/distribution_at_epoch_{1}.png'.format(epoch, epoch), dpi=80) + + plt.clf() + diff=plt.bar(np.arange(0, max_MC_hist, step=max_MC_hist/30), + height=(h_nn[0]-h_reco[0]), edgecolor='black', + linewidth=1, color='lightblue',width = 1, align = 'edge') + plt.xlabel('E (GeV)') + plt.ylabel('dN/dE') + plt.title("ET distribution difference NN output - MC output") + fig = plt.gcf() + fig.set_size_inches(12,4) + plt.savefig(self.path+'/epoch{0}/difference_at_epoch_{1}.png'.format(epoch, epoch), dpi=80) + print('Done') + + print('Resolution plots are being printed...') + + diffNN = (validation_NN_hist/max_NN_hist)*max_MC_hist-validation_true_hist + diffMC = validation_MC_hist-validation_true_hist + + plt.clf() + plt.subplot(1,2,1) + h_reco = plt.hist(diffMC,bins=30, edgecolor='black'); + plt.xlabel('ET recoMC - ET true') + plt.ylabel('dN/dETdiff') + plt.title('Resolution as simulated by MC') + plt.subplot(1,2,2) + h_nn = plt.hist(diffNN,bins=30, edgecolor='black'); + plt.xlabel('ET recoNN - ET true') + plt.ylabel('dN/dETdiff') + plt.title('Resolution as simulated by NN') + fig = plt.gcf() + fig.set_size_inches(12,4) + plt.savefig(self.path+'/epoch{0}/resolution_at_epoch_{1}.png'.format(epoch, epoch), dpi=80) + print('Done') + + def get_sample_A_to_B(self, X): + + + z = np.random.normal(size=(1, self.latent_dims)) + + one_sample = self.session.run( + self.test_images_A_to_B, + feed_dict={self.input_test_A:X, self.z:z, self.batch_sz: 1}) + + return one_sample + + def get_samples_A_to_B(self, X): + + + z = np.random.normal(size=(X.shape[0], self.latent_dims)) + + many_samples = self.session.run( + self.test_images_A_to_B, + feed_dict={self.input_test_A:X, self.z:z, self.batch_sz: X.shape[0]}) + + return many_samples + + diff --git a/architectures/bicycle_GAN_pix2pixDisc.py b/architectures/bicycle_GAN_pix2pixDisc.py new file mode 100644 index 0000000..d676dd8 --- /dev/null +++ b/architectures/bicycle_GAN_pix2pixDisc.py @@ -0,0 +1,406 @@ +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +PREPROCESS=None +LAMBDA=.01 +EPS=1e-10 +CYCL_WEIGHT=None +LATENT_WEIGHT=None +KL_WEIGHT=None +DISCR_STEPS=None +GEN_STEPS=None +max_true=None +max_reco=None + +n_H_A=None +n_W_A=None +n_W_B=None +n_H_B=None +n_C=None + +d_sizes=None +g_sizes_enc=None +g_sizes_dec=None +e_sizes=None + + + + +class bicycle_GAN(object): + #fix args + def __init__( + + self, + n_H_A=n_H_A, n_W_A=n_W_A, + n_H_B=n_H_B, n_W_B=n_W_B, n_C=n_C, + max_true=max_true, max_reco=max_reco, + d_sizes=d_sizes, g_sizes_enc=g_sizes_enc, g_sizes_dec=g_sizes_dec, e_sizes=e_sizes, + lr=LEARNING_RATE, beta1=BETA1, preprocess=PREPROCESS, + cost_type=COST_TYPE, cycl_weight=CYCL_WEIGHT, latent_weight=LATENT_WEIGHT, kl_weight=KL_WEIGHT, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + latent_dims=e_sizes['latent_dims'] + self.max_true=max_true + self.max_reco=max_reco + self.seed=seed + + self.n_W_A = n_W_A + self.n_H_A = n_H_A + + self.n_W_B = n_W_B + self.n_H_B = n_H_B + self.n_C = n_C + + #input data + + self.input_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + + self.input_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + self.z = tf.placeholder( + tf.float32, + shape=(None, + latent_dims) + ) + + + G = bicycleGenerator(self.input_A, self.n_H_B, self.n_W_B, g_sizes_enc, g_sizes_dec, 'A_to_B') + + D = pix2pixDiscriminator(self.input_B, d_sizes, 'B') + + E = bicycleEncoder(self.input_B, e_sizes, 'B') + + + with tf.variable_scope('encoder_B') as scope: + z_encoded, z_encoded_mu, z_encoded_log_sigma = E.e_forward(self.input_B) + + with tf.variable_scope('generator_A_to_B') as scope: + sample_A_to_B_encoded = G.g_forward(self.input_A, z_encoded) + + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + sample_A_to_B = self.sample_A_to_B = G.g_forward(self.input_A, self.z, reuse=True) + + with tf.variable_scope('encoder_B') as scope: + scope.reuse_variables() + z_recon, z_recon_mu, z_recon_log_sigma = E.e_forward(sample_A_to_B, reuse=True) + + with tf.variable_scope('discriminator_B') as scope: + logits_real = D.d_forward(self.input_A, self.input_B) + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + logits_fake = D.d_forward(self.input_A, sample_A_to_B, reuse=True) + logits_fake_encoded = D.d_forward(self.input_A, sample_A_to_B_encoded, reuse=True) + + self.input_test_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_test_A', + ) + + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + self.test_images_A_to_B = G.g_forward( + self.input_test_A, self.z, reuse=True, is_training=False + ) + + #parameters lists + self.d_params =[t for t in tf.trainable_variables() if 'discriminator' in t.name] + self.e_params =[t for t in tf.trainable_variables() if 'encoder' in t.name] + self.g_params =[t for t in tf.trainable_variables() if 'generator' in t.name] + + D_real = tf.nn.sigmoid(logits_real) + D_fake = tf.nn.sigmoid(logits_fake) + D_fake_encoded = tf.nn.sigmoid(logits_fake_encoded) + + d_loss_vae_gan =tf.reduce_mean(tf.squared_difference(D_real, 0.9)) + tf.reduce_mean(tf.square(D_fake_encoded)) + d_loss_lr_gan = tf.reduce_mean(tf.squared_difference(D_real, 0.9)) + tf.reduce_mean(tf.square(D_fake)) + + g_loss_vae_gan=tf.reduce_mean(tf.squared_difference(D_fake_encoded, 0.9)) + g_loss_gan = tf.reduce_mean(tf.squared_difference(D_fake, 0.9)) + g_loss_cycl=cycl_weight * tf.reduce_mean(tf.abs(self.input_B-sample_A_to_B_encoded)) + + + e_loss_latent_cycle = tf.reduce_mean(tf.abs(self.z - z_recon)) + self.e_loss_kl = 0.5 * tf.reduce_mean(-1 - 2*z_encoded_log_sigma + z_encoded_mu ** 2 + tf.exp(z_encoded_log_sigma)**2) + + self.d_loss = d_loss_vae_gan + d_loss_lr_gan - tf.reduce_mean(tf.squared_difference(D_real, 0.9)) + self.g_loss = g_loss_vae_gan + g_loss_gan + cycl_weight*g_loss_cycl + latent_weight*e_loss_latent_cycle + self.e_loss = g_loss_vae_gan + cycl_weight *g_loss_cycl + latent_weight*e_loss_latent_cycle + kl_weight *self.e_loss_kl + + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_loss, + var_list=self.d_params + ) + + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_loss, + var_list=self.g_params + ) + + self.e_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.e_loss, + var_list=self.e_params + ) + + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr = lr + + self.D=D + self.G=G + self.E=E + + self.preprocess=preprocess + self.cost_type=cost_type + + self.latent_weight=latent_weight + self.cycl_weight=cycl_weight + self.kl_weight=kl_weight + self.latent_dims=latent_dims + + def set_session(self, session): + + self.session = session + + for layer in self.D.d_conv_layers: + layer.set_session(session) + + for layer in self.G.g_enc_conv_layers: + layer.set_session(session) + + for layer in self.G.g_dec_conv_layers: + layer.set_session(session) + + for layer in self.E.e_blocks: + layer.set_session(session) + + for layer in self.E.e_dense_layers: + layer.set_session(session) + + def fit(self, X_A, X_B, validating_size): + + all_A = X_A + all_B = X_B + + m = X_A.shape[0] + train_A = all_A[0:m-validating_size] + train_B = all_B[0:m-validating_size] + + validating_A = all_A[m-validating_size:m] + validating_B = all_B[m-validating_size:m] + + seed=self.seed + + d_losses=[] + g_losses=[] + e_losses=[] + e_loss_kls=[] + + N=len(train_A) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training bicycleGAN with a total of ' +str(N)+' samples distributed in '+ str((N)//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + print('The validation set consists of {0} images'.format(validating_A.shape[0])) + print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + + for epoch in range(self.epochs): + + seed+=1 + print('Epoch:', epoch) + + batches_A = unsupervised_random_mini_batches(train_A, self.batch_size, seed) + batches_B = unsupervised_random_mini_batches(train_B, self.batch_size, seed) + + for X_batch_A, X_batch_B in zip(batches_A, batches_B): + + bs=X_batch_A.shape[0] + + t0 = datetime.now() + + sample_z = np.random.normal(size=(bs, self.latent_dims)) + + _, _, _, e_loss, g_loss, d_loss, e_loss_kl = self.session.run( + + (self.d_train_op, self.g_train_op, self.e_train_op, self.e_loss, self.g_loss, self.d_loss, self.e_loss_kl), + + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, + self.z:sample_z, self.batch_sz:bs + }, + ) + + e_losses.append(e_loss) + g_losses.append(g_loss) + d_losses.append(d_loss) + e_loss_kls.append(e_loss_kl) + + total_iters+=1 + if total_iters % self.save_sample==0: + print("At iter: %d - dt: %s " % (total_iters, datetime.now() - t0)) + print("Discriminator cost {0:.4g}, Generator cost {1:.4g}, VAE Cost {2:.4g}, KL divergence loss {3:.4g}".format(d_loss, g_loss, e_loss, e_loss_kl)) + print('Saving a sample...') + + + if self.preprocess=='normalise': + + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + self.max_true, self.max_reco, f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + else: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_losses, label='Discriminator cost') + plt.plot(g_losses, label='Generator cost') + plt.plot(e_losses, label='VAE cost') + plt.plot(e_loss_kls, label='Total cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration.png',dpi=150) + + + + def get_sample_A_to_B(self, X): + + + z = np.random.normal(size=(1, self.latent_dims)) + + one_sample = self.session.run( + self.test_images_A_to_B, + feed_dict={self.input_test_A:X, self.z:z, self.batch_sz: 1}) + + return one_sample + + def get_samples_A_to_B(self, X): + + + z = np.random.normal(size=(X.shape[0], self.latent_dims)) + + many_samples = self.session.run( + self.test_images_A_to_B, + feed_dict={self.input_test_A:X, self.z:z, self.batch_sz: X.shape[0]}) + + return many_samples + + + + + diff --git a/architectures/condDCGAN.py b/architectures/condDCGAN.py new file mode 100644 index 0000000..9fee870 --- /dev/null +++ b/architectures/condDCGAN.py @@ -0,0 +1,443 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * +from architectures.utils.toolbox import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +PREPROCESS=None +LAMBDA=.01 +EPS=1e-6 + +DISCR_STEPS=None +GEN_STEPS=None + +min_true=None +max_true=None + +min_reco=None +max_reco=None + +dim_y=None +n_H=None +n_W=None +n_C=None + +d_sizes=None +g_sizes=None + + + +class condDCGAN(object): + + def __init__( + self, + dim_y=dim_y, n_H=n_H, n_W=n_H, n_C=n_C, + min_reco=min_reco, max_reco=max_reco, + d_sizes= d_sizes,g_sizes=g_sizes, + lr=LEARNING_RATE, beta1=BETA1, preprocess=PREPROCESS, + cost_type=COST_TYPE, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'conv_layers':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer, act_f), + (,,,,), + ], + 'dense_layers':[(n_o, apply_bn, weight_init, act_f)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'conv_layers':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer, act_f), + (,,,,), + ], + 'dense_layers':[(n_o, apply_bn, weight_init, act_f)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + self.dim_y = dim_y + self.n_H = n_H + self.n_W = n_W + self.n_C = n_C + self.seed=seed + self.latent_dims = g_sizes['z'] + self.gen_steps=gen_steps + self.discr_steps=discr_steps + + self.min_reco=min_reco + self.max_reco=max_reco + + #input data + + self.X = tf.placeholder( + tf.float32, + shape=(None, + n_H, n_W, n_C), + name='X', + ) + + self.Z = tf.placeholder( + tf.float32, + shape=(None, + self.latent_dims), + name='Z' + ) + + self.y = tf.placeholder( + tf.float32, + shape=(None, + self.dim_y), + name='y' + ) + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + + D = condDiscriminator(self.X, self.dim_y, d_sizes, 'A') + + G = condGenerator(self.dim_y, self.n_H, self.n_W, g_sizes, 'A') + + with tf.variable_scope('discriminator_A') as scope: + + logits_real, feature_output_real = D.d_forward(self.X, self.y) + + with tf.variable_scope('generator_A') as scope: + + sample_images = G.g_forward(self.Z, self.y) + + # get sample logits + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + logits_fake, feature_output_fake = D.d_forward(sample_images, self.y, reuse=True) + + # get sample images for test time + with tf.variable_scope('generator_A') as scope: + scope.reuse_variables() + self.sample_images_test = G.g_forward( + self.Z, self.y, reuse=True, is_training=False + ) + + predicted_real= tf.nn.sigmoid(logits_real) + predicted_fake=tf.nn.sigmoid(logits_fake) + + #parameters list + + self.d_params =[t for t in tf.trainable_variables() if t.name.startswith('d')] + self.g_params =[t for t in tf.trainable_variables() if t.name.startswith('g')] + + #cost building + epsilon = 1e-3 + if cost_type == 'GAN': + + self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real, + labels=(1-epsilon)*tf.ones_like(logits_real) + ) + + self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=epsilon+tf.zeros_like(logits_fake) + ) + + self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake) + + #Generator cost + self.g_cost = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=(1-epsilon)*tf.ones_like(logits_fake) + ) + ) + # #Discriminator cost + # self.d_cost_real = tf.reduce_mean(-tf.log(predicted_real + epsilon)) + # self.d_cost_fake = tf.reduce_mean(-tf.log(1 + epsilon - predicted_fake)) + # self.d_cost = self.d_cost_real+self.d_cost_fake + + # #Generator cost + # self.g_cost = tf.reduce_mean(-tf.log(predicted_fake + epsilon)) + + if cost_type == 'WGAN': + + self.d_cost_real= -tf.reduce_mean(logits_real) + self.d_cost_fake_lr_GAN = tf.reduce_mean(logits_fake) + self.d_cost_GAN= self.d_cost_real+self.d_cost_fake_lr_GAN + + epsilon= tf.random_uniform( + [self.batch_sz, 1, 1, 1], + minval=0., + maxval=1., + ) + + interpolated = epsilon*self.X+(1-epsilon)*sample_images + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + disc_interpolated, _ = D.d_forward(interpolated, self.y, reuse = True) + + gradients = tf.gradients(disc_interpolated,[interpolated])[0] + slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1,2,3])) + gradient_penalty = tf.reduce_mean(tf.square(slopes-1)) + self.d_cost=self.d_cost_GAN+10*gradient_penalty + + # self.g_cost_GAN = -tf.reduce_mean(logits_fake) + self.g_cost = tf.sqrt(tf.reduce_sum(tf.pow(feature_output_real-feature_output_fake,2))) + + if cost_type == 'FEATURE': + + self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real, + labels=(1-epsilon)*tf.ones_like(logits_real) + ) + + self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=epsilon+tf.zeros_like(logits_fake) + ) + + self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake) + + # #Discriminator cost + # self.d_cost_real = tf.reduce_mean(-tf.log(predicted_real + epsilon)) + # self.d_cost_fake = tf.reduce_mean(-tf.log(1 + epsilon - predicted_fake)) + # self.d_cost = self.d_cost_real+self.d_cost_fake + + #GENERATOR COSTS + + self.g_cost=tf.sqrt(tf.reduce_mean(tf.pow(feature_output_real-feature_output_fake,2))) + + + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.d_cost, + var_list=self.d_params + ) + + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.g_cost, + var_list=self.g_params + ) + #Measure accuracy of the discriminator + + real_predictions = tf.cast(predicted_real>0.5,tf.float32) + fake_predictions = tf.cast(predicted_fake<0.5,tf.float32) + + num_predictions=2.0*batch_size + num_correct = tf.reduce_sum(real_predictions)+tf.reduce_sum(fake_predictions) + + self.d_accuracy = num_correct/num_predictions + + self.cost_type=cost_type + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.D=D + self.G=G + + self.lr=lr + + def set_session(self, session): + + self.session = session + + for layer in self.D.d_conv_layers: + layer.set_session(session) + + for layer in self.D.d_dense_layers: + layer.set_session(session) + + for layer in self.G.g_conv_layers: + layer.set_session(session) + + for layer in self.G.g_dense_layers: + layer.set_session(session) + + def fit(self, X, y, validating_size): + + _allX = X + _ally = y + + gen_steps=self.gen_steps + discr_steps=self.discr_steps + + m = _allX.shape[0] + train_X = _allX[0:m-validating_size] + train_y = _ally[0:m-validating_size] + + validating_X = _allX[m-validating_size:m] + validating_y = _ally[m-validating_size:m] + + seed=self.seed + d_costs = [] + g_costs = [] + + N = len(train_X) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training improved DCGAN with a total of ' +str(N)+' samples distributed in batches of size '+str(self.batch_size)+'\n') + print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' epoch a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed +=1 + + print('Epoch:', epoch) + + batches_X = unsupervised_random_mini_batches(train_X, self.batch_size, seed) + batches_y = unsupervised_random_mini_batches_labels(train_y, self.batch_size, seed) + + for X_batch, y_batch in zip(batches_X, batches_y): + + bs = X_batch.shape[0] + y_batch=y_batch.reshape(bs, self.dim_y) + t0 = datetime.now() + + np.random.seed(seed) + + d_cost=0 + + for i in range(discr_steps): + + Z = np.random.uniform(-1,1, size= (bs, self.latent_dims)) + + _, d_cost, d_acc = self.session.run( + (self.d_train_op, self.d_cost, self.d_accuracy), + feed_dict={self.X: X_batch, self.Z:Z, self.batch_sz: bs, self.y:y_batch}, + ) + d_cost+=d_cost + + + d_costs.append(d_cost/discr_steps) + + #train the generator averaging two costs if the + #discriminator learns too fast + g_cost=0 + for i in range(gen_steps): + _, g_cost = self.session.run( + (self.g_train_op, self.g_cost), + feed_dict={self.X: X_batch, self.Z:Z, self.y:y_batch, self.batch_sz: bs}, + ) + g_cost+=g_cost + + g_costs.append(g_cost/gen_steps) + + total_iters += 1 + if total_iters % self.save_sample ==0: + print("At iter: %d - dt: %s - d_acc: %.4f" % (total_iters, datetime.now() - t0, d_acc)) + print("Discriminator cost {0:.4g}, Generator cost {1:.4g}".format(d_cost/discr_steps, g_cost/gen_steps)) + print('Saving a sample...') + plt.clf() + np.random.seed(seed) + Z = np.random.uniform(-1,1, size=(16,self.latent_dims)) + y = validating_y[0:16].reshape(16,1) + samples = self.get_samples(Z, y)#shape is (64,D,D,color) + #samples = denormalise(samples, self.min_reco, self.max_reco) + + w = self.n_W + h = self.n_H + samples = np.sum(samples,axis=3) + samples = samples.reshape(16, h, w) + + for i in range(16): + plt.subplot(4,4,i+1) + plt.imshow(samples[i].reshape(h,w)) + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.title('ET: {0}'.format(y[i])) + plt.axis('off') + + fig = plt.gcf() + fig.set_size_inches(7,7) + plt.savefig(self.path+'/samples_at_iter_%d.png' % total_iters,dpi=300) + + plt.clf() + plt.plot(d_costs, label='Discriminator cost') + plt.plot(g_costs, label='Generator cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(10,5) + plt.savefig(self.path+'/cost vs iteration.png',dpi=300) + + def get_sample(self, Z, y): + + Z = np.random.uniform(-1,1, size= (1, self.latent_dims)) + + one_sample = self.session.run( + self.sample_images_test, + feed_dict={self.Z:Z, self.y:y, self.batch_sz: 1}) + + return one_sample + + def get_samples(self, Z, y): + + Z = np.random.uniform(-1,1, size= (Z.shape[0], self.latent_dims)) + + many_samples = self.session.run( + self.sample_images_test, + feed_dict={self.Z:Z, self.y:y, self.batch_sz: 1}) + + return many_samples \ No newline at end of file diff --git a/architectures/cycleGAN.py b/architectures/cycleGAN.py new file mode 100644 index 0000000..982c5df --- /dev/null +++ b/architectures/cycleGAN.py @@ -0,0 +1,797 @@ +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +preprocess=None +LAMBDA=.1 + +class cycleGAN(object): + + def __init__( + self, + n_H_A, n_W_A, n_C_A, + n_H_B, n_W_B, n_C_B, + mean_true, std_true, mean_reco, std_reco, + d_sizes_A, d_sizes_B, g_sizes_A, g_sizes_B, + lr_g=LEARNING_RATE_G, lr_d=LEARNING_RATE_D, beta1=BETA1, preprocess=preprocess, + cost_type=COST_TYPE, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + self.mean_reco = mean_reco + self.mean_true = mean_true + + self.std_true = std_true + self.std_reco = std_reco + + self.seed=seed + + self.n_W_A = n_W_A + self.n_H_A = n_H_A + + self.n_W_B = n_W_B + self.n_H_B = n_H_B + n_C = n_C_A + self.n_C = n_C + + #input data + + self.input_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + + self.input_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + # self.Z = tf.placeholder( + # tf.float32, + # shape=(None, + # self.latent_dims), + # name='Z' + # ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + D_A = Discriminator(self.input_A, d_sizes_A, 'A') + D_B = Discriminator(self.input_B, d_sizes_B, 'B') + + G_A_to_B = cycleGenerator(self.input_A, self.n_H_B, self.n_W_B, g_sizes_A, 'A_to_B') + G_B_to_A = cycleGenerator(self.input_B, self.n_H_A, self.n_W_A, g_sizes_B, 'B_to_A') + + + #first cycle (A to B) + with tf.variable_scope('discriminator_A') as scope: + + logits_A = D_A.d_forward(self.input_A) + + with tf.variable_scope('generator_A_to_B') as scope: + + sample_images_B = G_A_to_B.g_forward(self.input_A) + + #second cycle (B to A) + with tf.variable_scope('discriminator_B') as scope: + + logits_B = D_B.d_forward(self.input_B) + + with tf.variable_scope('generator_B_to_A') as scope: + + sample_images_A = G_B_to_A.g_forward(self.input_B) + + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + sample_logits_A = D_A.d_forward(sample_images_A, reuse=True) + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + sample_logits_B = D_B.d_forward(sample_images_B, reuse=True) + + + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + cycl_B = G_A_to_B.g_forward(sample_images_A, reuse=True) + + + with tf.variable_scope('generator_B_to_A') as scope: + scope.reuse_variables() + cycl_A = G_B_to_A.g_forward(sample_images_B, reuse=True) + + + self.input_test_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + # get sample images for test time + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + self.sample_images_test_A_to_B = G_A_to_B.g_forward( + self.input_test_A, reuse=True, is_training=False + ) + + self.input_test_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + with tf.variable_scope('generator_B_to_A') as scope: + scope.reuse_variables() + self.sample_images_test_B_to_A = G_B_to_A.g_forward( + self.input_test_B, reuse=True, is_training=False + ) + #parameters lists + self.d_params_A =[t for t in tf.trainable_variables() if 'discriminator_A' in t.name] + self.d_params_B =[t for t in tf.trainable_variables() if 'discriminator_B' in t.name] + + self.g_params_A =[t for t in tf.trainable_variables() if 'B_to_A' in t.name] + self.g_params_B =[t for t in tf.trainable_variables() if 'A_to_B' in t.name] + + #cost building + + if cost_type == 'GAN': + + #Discriminators cost + + #Discriminator_A cost + #cost is low if real images are predicted as real (1) + d_cost_real_A = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_A, + labels=tf.ones_like(logits_A) + ) + # #cost is low if fake generated images are predicted as fake (0) + d_cost_fake_A = tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_A, + labels=tf.zeros_like(sample_logits_A) + ) + + + self.d_cost_A = tf.reduce_mean(d_cost_real_A) + tf.reduce_mean(d_cost_fake_A) + + #Discriminator_B cost + d_cost_real_B = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_B, + labels=tf.ones_like(logits_B) + ) + + d_cost_fake_B = tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_B, + labels=tf.zeros_like(sample_logits_B) + ) + + self.d_cost_B = tf.reduce_mean(d_cost_real_B) + tf.reduce_mean(d_cost_fake_B) + + + + #Generator cost + #cost is low if logits from discriminator A on samples generated by G_B_to_A + #are predicted as true (1) + g_cost_A = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_A, + labels=tf.ones_like(sample_logits_A) + ) + ) + #cost is low if logits from discriminator B on samples generated by G_A_to_B + #are predicted as true (1) + g_cost_B = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_B, + labels=tf.ones_like(sample_logits_B) + ) + ) + + #cycle cost is low if cyclic images are similar to input images (in both sets) + g_cycle_cost_A = tf.reduce_mean(tf.abs(self.input_A-cycl_A)) + g_cycle_cost_B = tf.reduce_mean(tf.abs(self.input_B-cycl_B)) + + g_cycle_cost= g_cycle_cost_A+g_cycle_cost_B + self.g_cost_A = g_cost_A + 10*g_cycle_cost + self.g_cost_B = g_cost_B + 10*g_cycle_cost + + # alpha_A = tf.random_uniform( + # shape=[self.batch_sz,self.n_H_A,self.n_W_A,self.n_C], + # minval=0., + # maxval=1. + # ) + + # interpolates_A = alpha_A*self.input_A+(1-alpha_A)*sample_images_A + + # with tf.variable_scope('discriminator_A') as scope: + # scope.reuse_variables() + # disc_A_interpolates = D_A.d_forward(interpolates_A, reuse = True) + + # gradients_A = tf.gradients(disc_A_interpolates,[interpolates_A])[0] + # slopes_A = tf.sqrt(tf.reduce_sum(tf.square(gradients_A), reduction_indices=[1])) + # gradient_penalty_A = tf.reduce_mean((slopes_A-1)**2) + # self.d_cost_A= d_cost_A + LAMBDA*gradient_penalty_A + + # alpha_B = tf.random_uniform( + # shape=[self.batch_sz,self.n_H_B,self.n_W_B,self.n_C], + # minval=0., + # maxval=1. + # ) + + # interpolates_B = alpha_B*self.input_B+(1-alpha_B)*sample_images_B + + # with tf.variable_scope('discriminator_B') as scope: + # scope.reuse_variables() + # disc_B_interpolates = D_B.d_forward(interpolates_B, reuse = True) + + # gradients_B = tf.gradients(disc_B_interpolates,[interpolates_B])[0] + # slopes_B = tf.sqrt(tf.reduce_sum(tf.square(gradients_B), reduction_indices=[1])) + # gradient_penalty_B = tf.reduce_mean((slopes_B-1)**2) + # self.d_cost_B= d_cost_B + LAMBDA*gradient_penalty_B + + self.d_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr_d, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_cost_A, + var_list=self.d_params_A + ) + + self.d_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr_d, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_cost_B, + var_list=self.d_params_B + ) + + self.g_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr_g, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_cost_A, + var_list=self.g_params_A + ) + + self.g_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr_g, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_cost_B, + var_list=self.g_params_B + ) + + if cost_type == 'WGAN-clip': + + #Discriminators cost + #Discriminator A + + self.d_cost_A = tf.reduce_mean(sample_logits_A) - tf.reduce_mean(logits_A) + self.d_cost_B = tf.reduce_mean(sample_logits_B) - tf.reduce_mean(logits_B) + + g_cost_A = -tf.reduce_mean(sample_logits_A) + g_cost_B = -tf.reduce_mean(sample_logits_B) + + g_cycle_cost_A = tf.reduce_mean(tf.abs(self.input_A-cycl_A)) + g_cycle_cost_B = tf.reduce_mean(tf.abs(self.input_B-cycl_B)) + + g_cycle_cost= g_cycle_cost_A+g_cycle_cost_B + self.g_cost_A = g_cost_A + 10*g_cycle_cost + self.g_cost_B = g_cost_B + 10*g_cycle_cost + + #clip weights in D + clip_values=[-0.01,0.01] + self.clip_discriminator_A_var_op = [var.assign(tf.clip_by_value(var, clip_values[0], clip_values[1])) for + var in self.d_params_A] + self.clip_discriminator_B_var_op = [var.assign(tf.clip_by_value(var, clip_values[0], clip_values[1])) for + var in self.d_params_B] + + # alpha_A = tf.random_uniform( + # shape=[self.batch_sz,self.n_H_A,self.n_W_A,self.n_C], + # minval=0., + # maxval=1. + # ) + + # interpolates_A = alpha_A*self.input_A+(1-alpha_A)*sample_images_A + + # with tf.variable_scope('discriminator_A') as scope: + # scope.reuse_variables() + # disc_A_interpolates = D_A.d_forward(interpolates_A,reuse = True) + + # gradients_A = tf.gradients(disc_A_interpolates,[interpolates_A])[0] + # slopes_A = tf.sqrt(tf.reduce_sum(tf.square(gradients_A), reduction_indices=[1])) + # gradient_penalty_A = tf.reduce_mean((slopes_A-1)**2) + # self.d_cost_A+=LAMBDA*gradient_penalty_A + + # alpha_B = tf.random_uniform( + # shape=[self.batch_sz,self.n_H_B,self.n_W_B,self.n_C], + # minval=0., + # maxval=1. + # ) + + # interpolates_B = alpha_B*self.input_B+(1-alpha_B)*sample_images_B + + # with tf.variable_scope('discriminator_B') as scope: + # scope.reuse_variables() + # disc_B_interpolates = D_B.d_forward(interpolates_B,reuse = True) + + # gradients_B = tf.gradients(disc_B_interpolates,[interpolates_B])[0] + # slopes_B = tf.sqrt(tf.reduce_sum(tf.square(gradients_B), reduction_indices=[1])) + # gradient_penalty_B = tf.reduce_mean((slopes_B-1)**2) + # self.d_cost_B+=LAMBDA*gradient_penalty_B + + self.d_train_op_A = tf.train.RMSPropOptimizer( + learning_rate=lr_d, + # beta1=beta1, + # beta2=0.9, + ).minimize( + self.d_cost_A, + var_list=self.d_params_A + ) + + self.d_train_op_B = tf.train.RMSPropOptimizer( + learning_rate=lr_d, + # beta1=beta1, + # beta2=0.9, + ).minimize( + self.d_cost_B, + var_list=self.d_params_B + ) + + self.g_train_op_A = tf.train.RMSPropOptimizer( + learning_rate=lr_g, + # beta1=beta1, + # beta2=0.9, + ).minimize( + self.g_cost_A, + var_list=self.g_params_A + ) + + self.g_train_op_B = tf.train.RMSPropOptimizer( + learning_rate=lr_g, + # beta1=beta1, + # beta2=0.9, + ).minimize( + self.g_cost_B, + var_list=self.g_params_B + ) + + if cost_type == 'WGAN-gp': + + #Discriminators cost + #Discriminator A + + self.d_cost_A = tf.reduce_mean(sample_logits_A) - tf.reduce_mean(logits_A) + self.d_cost_B = tf.reduce_mean(sample_logits_B) - tf.reduce_mean(logits_B) + + self.g_cost_A = -tf.reduce_mean(sample_logits_A) + self.g_cost_B = -tf.reduce_mean(sample_logits_B) + + # g_cycle_cost_A = tf.reduce_mean(tf.abs(self.input_A-cycl_A)) + # g_cycle_cost_B = tf.reduce_mean(tf.abs(self.input_B-cycl_B)) + + # g_cycle_cost= g_cycle_cost_A+g_cycle_cost_B + # self.g_cost_A = g_cost_A + 0*g_cycle_cost + # self.g_cost_B = g_cost_B + 0*g_cycle_cost + + + alpha_A = tf.random_uniform( + shape=[self.batch_sz,self.n_H_A,self.n_W_A,self.n_C], + minval=0., + maxval=1. + ) + + interpolates_A = alpha_A*self.input_A+(1-alpha_A)*sample_images_A + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + disc_A_interpolates = D_A.d_forward(interpolates_A,reuse = True) + + gradients_A = tf.gradients(disc_A_interpolates,[interpolates_A])[0] + slopes_A = tf.sqrt(tf.reduce_sum(tf.square(gradients_A), reduction_indices=[1])) + gradient_penalty_A = tf.reduce_mean((slopes_A-1)**2) + self.d_cost_A+=LAMBDA*gradient_penalty_A + + alpha_B = tf.random_uniform( + shape=[self.batch_sz,self.n_H_B,self.n_W_B,self.n_C], + minval=0., + maxval=1. + ) + + interpolates_B = alpha_B*self.input_B+(1-alpha_B)*sample_images_B + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + disc_B_interpolates = D_B.d_forward(interpolates_B,reuse = True) + + gradients_B = tf.gradients(disc_B_interpolates,[interpolates_B])[0] + slopes_B = tf.sqrt(tf.reduce_sum(tf.square(gradients_B), reduction_indices=[1])) + gradient_penalty_B = tf.reduce_mean((slopes_B-1)**2) + self.d_cost_B+=LAMBDA*gradient_penalty_B + + self.d_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr_d, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_cost_A, + var_list=self.d_params_A + ) + + self.d_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr_d, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_cost_B, + var_list=self.d_params_B + ) + + self.g_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr_g, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_cost_A, + var_list=self.g_params_A + ) + + self.g_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr_g, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_cost_B, + var_list=self.g_params_B + ) + + #Measure accuracy of the discriminators + + real_predictions_A = tf.cast(logits_A>0,tf.float32) + fake_predictions_A = tf.cast(sample_logits_A<0,tf.float32) + + num_predictions=2.0*batch_size + num_correct_A = tf.reduce_sum(real_predictions_A)+tf.reduce_sum(fake_predictions_A) + + self.d_accuracy_A = num_correct_A/num_predictions + + real_predictions_B = tf.cast(logits_B>0,tf.float32) + fake_predictions_B = tf.cast(sample_logits_B<0,tf.float32) + + num_predictions=2.0*batch_size + num_correct_B = tf.reduce_sum(real_predictions_B)+tf.reduce_sum(fake_predictions_B) + + self.d_accuracy_B = num_correct_B/num_predictions + + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr_g = lr_g + self.lr_d = lr_d + + self.D_A=D_A + self.D_B=D_B + self.G_A_to_B=G_A_to_B + self.G_B_to_A=G_B_to_A + self.sample_images_B=sample_images_B + self.sample_images_A=sample_images_A + self.preprocess=preprocess + self.cost_type=cost_type + + def set_session(self, session): + + self.session = session + + for block in self.D_A.d_conv_layers: + block.set_session(session) + + for layer in self.D_A.d_dense_layers: + layer.set_session(session) + + for layer in self.G_A_to_B.g_blocks: + layer.set_session(session) + + for block in self.D_B.d_conv_layers: + block.set_session(session) + + for layer in self.D_B.d_dense_layers: + layer.set_session(session) + + for block in self.G_B_to_A.g_blocks: + block.set_session(session) + + def fit(self, X_A, X_B): + + seed = self.seed + d_costs_A = [] + g_costs_A = [] + + d_costs_B = [] + g_costs_B = [] + + N = len(X_A) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training cycle GAN with a total of ' +str(N)+' samples distributed in '+ str(N//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + print('The learning rate set for the generator is '+str(self.lr_g)+' while for the discriminator is '+str(self.lr_d)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed +=1 + + print('Epoch:', epoch) + + # if(epoch < 100) : + # curr_lr = 0.0002 + # else: + # curr_lr = 0.0002 - 0.0002*(epoch-100)/100 + + batches_A = unsupervised_random_mini_batches(X_A, self.batch_size, seed) + batches_B = unsupervised_random_mini_batches(X_B, self.batch_size, seed) + + for X_batch_A, X_batch_B in zip(batches_A,batches_B): + bs = X_batch_A.shape[0] + if self.cost_type=='GAN': + discr_steps=1 + gen_steps=2 + if self.cost_type=='WGAN-gp': + discr_steps=10 + gen_steps=1 + + t0 = datetime.now() + + #optimize generator_A + g_cost_A=0 + for i in range(gen_steps): + _, g_cost_A = self.session.run( + (self.g_train_op_A, self.g_cost_A), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + g_cost_A+=g_cost_A + + g_costs_A.append(g_cost_A/gen_steps) # just use the avg + + + #optimize discriminator_B + d_cost_B=0 + for i in range(discr_steps): + + _, d_cost_B, d_acc_B, = self.session.run( + (self.d_train_op_B, self.d_cost_B, self.d_accuracy_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + if self.cost_type == 'WGAN-clip': + + _ = self.session.run(self.clip_discriminator_B_var_op) + d_cost_B+=d_cost_B + + d_costs_B.append(d_cost_B/discr_steps) + + #optimize generator_B + g_cost_B=0 + for i in range(gen_steps): + _, g_cost_B = self.session.run( + (self.g_train_op_B, self.g_cost_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + g_cost_B+=g_cost_B + + g_costs_B.append(g_cost_B/gen_steps) + + d_cost_A=0 + for i in range(discr_steps): + #optimize Discriminator_A + _, d_cost_A, d_acc_A, = self.session.run( + (self.d_train_op_A, self.d_cost_A, self.d_accuracy_A), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + if self.cost_type == 'WGAN-clip': + + _ = self.session.run(self.clip_discriminator_A_var_op) + d_cost_A+=d_cost_A + + d_costs_A.append(d_cost_A/discr_steps) + + total_iters += 1 + if total_iters % self.save_sample ==0: + print("At iter: %d - dt: %s - d_acc_A: %.2f" % (total_iters, datetime.now() - t0, d_acc_A)) + print("At iter: %d - dt: %s - d_acc_B: %.2f" % (total_iters, datetime.now() - t0, d_acc_B)) + print("Discrimator_A cost {0:.4g}, Generator_A_to_B cost {1:.4g}".format(d_cost_A, g_cost_A)) + print("Discrimator_B cost {0:.4g}, Generator_B_to_A cost {1:.4g}".format(d_cost_B, g_cost_B)) + print('Saving a sample...') + + #A is true + #B is reco + + #shape is (1,D,D,color) + _, n_H_A, n_W_A, n_C = X_batch_A.shape + _, n_H_B, n_W_B, _ = X_batch_B.shape + + #for i in range(10): + + j = np.random.choice(len(X_batch_A)) + + X_batch_A = X_batch_A[j] + X_batch_B = X_batch_B[j] + + sample_B = self.get_sample_A_to_B(X_batch_A.reshape(1,n_H_A,n_W_A,n_C)) + sample_A = self.get_sample_B_to_A(X_batch_B.reshape(1,n_H_B,n_W_B,n_C)) + + + plt.subplot(2,2,1) + if self.preprocess: + X_batch_A=np.where(X_batch_A!=0,X_batch_A*self.std_true+self.mean_true,0) + + plt.imshow(X_batch_A.reshape(n_H_A,n_W_A)) + plt.axis('off') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.subplot(2,2,2) + if self.preprocess: + sample_B=np.where(sample_B!=0,sample_B*self.std_reco+self.mean_reco,0) + + plt.imshow(sample_B.reshape(n_H_B,n_W_B)) + plt.axis('off') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.subplot(2,2,3) + if self.preprocess: + X_batch_B=np.where(X_batch_B!=0,X_batch_B*self.std_reco+self.mean_reco,0) + + plt.imshow(X_batch_B.reshape(n_H_B,n_W_B)) + plt.axis('off') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.subplot(2,2,4) + if self.preprocess: + sample_A=np.where(sample_A!=0,sample_A*self.std_true+self.mean_true,0) + + plt.imshow(sample_A.reshape(n_H_A,n_W_A)) + plt.axis('off') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + + fig = plt.gcf() + fig.set_size_inches(10,8) + plt.savefig(self.path+'/sample_at_iter_{0}.png'.format(total_iters),dpi=100) + + plt.clf() + plt.plot(d_costs_A, label='Discriminator A cost') + plt.plot(g_costs_A, label='Generator A cost') + plt.legend() + fig = plt.gcf() + fig.set_size_inches(8,5) + plt.savefig(self.path+'/cost_iteration_A.png',dpi=150) + + plt.clf() + plt.plot(d_costs_B, label='Discriminator B cost') + plt.plot(g_costs_B, label='Generator B cost') + plt.legend() + fig = plt.gcf() + fig.set_size_inches(8,5) + plt.savefig(self.path+'/cost_iteration_B.png',dpi=150) + + + def fake_img_pool(self, num_fakes, fake_img, fake_pool): + + if (num_fakes < pool_size): + + fake_pool[num_fakes] = fake + return fake + + else: + + p = np.random.random() + if p > 0.5: + random_id = np.random.randint(0, pool_size-1) + temp = fake_pool[random_id] + fake_pool[random_id] = fake + return temp + else: + return fake + + def get_sample_A_to_B(self, Z): + + one_sample = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: 1}) + + return one_sample + + def get_sample_B_to_A(self, Z): + + one_sample = self.session.run( + self.sample_images_test_B_to_A, + feed_dict={self.input_test_B:Z, self.batch_sz: 1}) + + return one_sample + diff --git a/architectures/cycleGAN_fullresidual.py b/architectures/cycleGAN_fullresidual.py new file mode 100644 index 0000000..d5b6fa7 --- /dev/null +++ b/architectures/cycleGAN_fullresidual.py @@ -0,0 +1,670 @@ +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +preprocess=None +LAMBDA=.01 +EPS=1e-10 +CYCL_WEIGHT=None +GAN_WEIGHT=None +DISCR_STEPS=None +GEN_STEPS=None +max_true=None +max_reco=None + +n_H_A=None +n_W_A=None +n_W_B=None +n_H_B=None +n_C=None + +d_sizes_A=None +d_sizes_B=None +g_sizes_A=None +g_sizes_B=None + +class cycleGAN_fullresidual(object): + + def __init__( + self, + n_H_A=n_H_A, n_W_A=n_W_A, + n_H_B=n_H_B, n_W_B=n_W_B, n_C=n_C, + max_true=max_true, max_reco=max_reco, + d_sizes_A=d_sizes_A, d_sizes_B=d_sizes_B, g_sizes_A=g_sizes_A, g_sizes_B=g_sizes_B, + lr=LEARNING_RATE, beta1=BETA1, preprocess=preprocess, + cost_type=COST_TYPE, gan_weight=GAN_WEIGHT, cycl_weight=CYCL_WEIGHT, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + self.max_true=max_true + self.max_reco=max_reco + + self.seed=seed + + self.n_W_A = n_W_A + self.n_H_A = n_H_A + + self.n_W_B = n_W_B + self.n_H_B = n_H_B + self.n_C = n_C + + #input data + + self.input_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + + self.input_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + D_A = Discriminator(self.input_A, d_sizes_A, 'A') + D_B = Discriminator(self.input_B, d_sizes_B, 'B') + + G_A_to_B = cycleGenerator_fullresidual(self.input_A, self.n_H_B, self.n_W_B, g_sizes_A, 'A_to_B') + G_B_to_A = cycleGenerator_fullresidual(self.input_B, self.n_H_A, self.n_W_A, g_sizes_B, 'B_to_A') + + + #first cycle (A to B) + with tf.variable_scope('discriminator_A') as scope: + + logits_A = D_A.d_forward(self.input_A) + + with tf.variable_scope('generator_A_to_B') as scope: + + sample_images_B = G_A_to_B.g_forward(self.input_A) + + #second cycle (B to A) + with tf.variable_scope('discriminator_B') as scope: + + logits_B = D_B.d_forward(self.input_B) + + with tf.variable_scope('generator_B_to_A') as scope: + + sample_images_A = G_B_to_A.g_forward(self.input_B) + + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + sample_logits_A = D_A.d_forward(sample_images_A, reuse=True) + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + sample_logits_B = D_B.d_forward(sample_images_B, reuse=True) + + + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + cycl_B = G_A_to_B.g_forward(sample_images_A, reuse=True) + + + with tf.variable_scope('generator_B_to_A') as scope: + scope.reuse_variables() + cycl_A = G_B_to_A.g_forward(sample_images_B, reuse=True) + + + self.input_test_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_test_A', + ) + # get sample images for test time + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + self.sample_images_test_A_to_B = G_A_to_B.g_forward( + self.input_test_A, reuse=True, is_training=False + ) + + self.input_test_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_test_B', + ) + + with tf.variable_scope('generator_B_to_A') as scope: + scope.reuse_variables() + self.sample_images_test_B_to_A = G_B_to_A.g_forward( + self.input_test_B, reuse=True, is_training=False + ) + #parameters lists + self.d_params_A =[t for t in tf.trainable_variables() if 'discriminator_A' in t.name] + self.d_params_B =[t for t in tf.trainable_variables() if 'discriminator_B' in t.name] + + self.g_params_A =[t for t in tf.trainable_variables() if 'B_to_A' in t.name] + self.g_params_B =[t for t in tf.trainable_variables() if 'A_to_B' in t.name] + + #cost building + + if cost_type == 'GAN': + + #Discriminators cost + + #Discriminator_A cost + #cost is low if real images are predicted as real (1) + d_cost_real_A = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_A, + labels=tf.ones_like(logits_A) + ) + # #cost is low if fake generated images are predicted as fake (0) + d_cost_fake_A = tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_A, + labels=tf.zeros_like(sample_logits_A) + ) + + + self.d_cost_A = tf.reduce_mean(d_cost_real_A) + tf.reduce_mean(d_cost_fake_A) + + #Discriminator_B cost + d_cost_real_B = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_B, + labels=tf.ones_like(logits_B) + ) + + d_cost_fake_B = tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_B, + labels=tf.zeros_like(sample_logits_B) + ) + + self.d_cost_B = tf.reduce_mean(d_cost_real_B) + tf.reduce_mean(d_cost_fake_B) + + #Generator cost + #cost is low if logits from discriminator A on samples generated by G_B_to_A + #are predicted as true (1) + self.g_GAN_cost_A = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_A, + labels=tf.ones_like(sample_logits_A) + ) + ) + #cost is low if logits from discriminator B on samples generated by G_A_to_B + #are predicted as true (1) + self.g_GAN_cost_B = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_B, + labels=tf.ones_like(sample_logits_B) + ) + ) + + #cycle cost is low if cyclic images are similar to input images (in both sets) + self.g_cycle_cost_A = tf.reduce_mean(tf.abs(self.input_A-cycl_A)) + self.g_cycle_cost_B = tf.reduce_mean(tf.abs(self.input_B-cycl_B)) + + g_cycle_cost= self.g_cycle_cost_A+self.g_cycle_cost_B + self.g_cost_A = gan_weight*self.g_GAN_cost_A + cycl_weight*g_cycle_cost + self.g_cost_B = gan_weight*self.g_GAN_cost_B + cycl_weight*g_cycle_cost + + + if cost_type == 'WGAN-gp': + + #Discriminators cost + #Discriminator A + + self.d_cost_A = tf.reduce_mean(sample_logits_A) - tf.reduce_mean(logits_A) + self.d_cost_B = tf.reduce_mean(sample_logits_B) - tf.reduce_mean(logits_B) + + self.g_cost_A = -tf.reduce_mean(sample_logits_A) + self.g_cost_B = -tf.reduce_mean(sample_logits_B) + + # g_cycle_cost_A = tf.reduce_mean(tf.abs(self.input_A-cycl_A)) + # g_cycle_cost_B = tf.reduce_mean(tf.abs(self.input_B-cycl_B)) + + # g_cycle_cost= g_cycle_cost_A+g_cycle_cost_B + # self.g_cost_A = g_cost_A + 0*g_cycle_cost + # self.g_cost_B = g_cost_B + 0*g_cycle_cost + + + alpha_A = tf.random_uniform( + shape=[self.batch_sz,self.n_H_A,self.n_W_A,self.n_C], + minval=0., + maxval=1. + ) + + interpolates_A = alpha_A*self.input_A+(1-alpha_A)*sample_images_A + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + disc_A_interpolates = D_A.d_forward(interpolates_A,reuse = True) + + gradients_A = tf.gradients(disc_A_interpolates,[interpolates_A])[0] + slopes_A = tf.sqrt(tf.reduce_sum(tf.square(gradients_A), reduction_indices=[1])) + gradient_penalty_A = tf.reduce_mean((slopes_A-1)**2) + self.d_cost_A+=LAMBDA*gradient_penalty_A + + alpha_B = tf.random_uniform( + shape=[self.batch_sz,self.n_H_B,self.n_W_B,self.n_C], + minval=0., + maxval=1. + ) + + interpolates_B = alpha_B*self.input_B+(1-alpha_B)*sample_images_B + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + disc_B_interpolates = D_B.d_forward(interpolates_B,reuse = True) + + gradients_B = tf.gradients(disc_B_interpolates,[interpolates_B])[0] + slopes_B = tf.sqrt(tf.reduce_sum(tf.square(gradients_B), reduction_indices=[1])) + gradient_penalty_B = tf.reduce_mean((slopes_B-1)**2) + self.d_cost_B+=LAMBDA*gradient_penalty_B + + + self.d_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_cost_A, + var_list=self.d_params_A + ) + + self.d_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_cost_B, + var_list=self.d_params_B + ) + + self.g_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_cost_A, + var_list=self.g_params_A + ) + + self.g_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_cost_B, + var_list=self.g_params_B + ) + #Measure accuracy of the discriminators + + real_predictions_A = tf.cast(logits_A>0,tf.float32) + fake_predictions_A = tf.cast(sample_logits_A<0,tf.float32) + + num_predictions=2.0*batch_size + num_correct_A = tf.reduce_sum(real_predictions_A)+tf.reduce_sum(fake_predictions_A) + + self.d_accuracy_A = num_correct_A/num_predictions + + real_predictions_B = tf.cast(logits_B>0,tf.float32) + fake_predictions_B = tf.cast(sample_logits_B<0,tf.float32) + + num_predictions=2.0*batch_size + num_correct_B = tf.reduce_sum(real_predictions_B)+tf.reduce_sum(fake_predictions_B) + + self.d_accuracy_B = num_correct_B/num_predictions + + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr = lr + + self.D_A=D_A + self.D_B=D_B + self.G_A_to_B=G_A_to_B + self.G_B_to_A=G_B_to_A + + self.sample_images_B=sample_images_B + self.sample_images_A=sample_images_A + + self.preprocess=preprocess + self.cost_type=cost_type + + self.gen_steps=gen_steps + self.discr_steps=discr_steps + self.cycl_weight=cycl_weight + + def set_session(self, session): + + self.session = session + + for block in self.D_A.d_conv_layers: + block.set_session(session) + + for layer in self.D_A.d_dense_layers: + layer.set_session(session) + + for layer in self.G_A_to_B.g_blocks: + layer.set_session(session) + + for block in self.D_B.d_conv_layers: + block.set_session(session) + + for layer in self.D_B.d_dense_layers: + layer.set_session(session) + + for block in self.G_B_to_A.g_blocks: + block.set_session(session) + + def fit(self, X_A, X_B, validating_size): + + all_A = X_A + all_B = X_B + + gen_steps = self.gen_steps + discr_steps = self.discr_steps + + m = X_A.shape[0] + train_A = all_A[0:m-validating_size] + train_B = all_B[0:m-validating_size] + + validating_A = all_A[m-validating_size:m] + validating_B = all_B[m-validating_size:m] + + seed = self.seed + + d_costs_A=[] + d_gps_A=[] + g_GANs_A=[] + g_cycles_A=[] + g_costs_A=[] + + d_costs_B=[] + d_gps_B=[] + g_GANs_B=[] + g_cycles_B=[] + g_costs_B=[] + + N = len(train_A) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training cycle GAN with residual arichitecture and a total of ' +str(N)+' samples distributed in '+ str((N)//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + print('The validation set consists of {0} images'.format(validating_A.shape[0])) + print('The learning rate set is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed +=1 + + print('Epoch:', epoch) + + batches_A = unsupervised_random_mini_batches(X_A, self.batch_size, seed) + batches_B = unsupervised_random_mini_batches(X_B, self.batch_size, seed) + + for X_batch_A, X_batch_B in zip(batches_A,batches_B): + + bs = X_batch_A.shape[0] + + t0 = datetime.now() + + #optimize generator_A + g_cost_A=0 + g_GAN_A=0 + g_cycle_A=0 + + for i in range(gen_steps): + + _, g_cost_A, g_GAN_A, g_cycle_A = self.session.run( + (self.g_train_op_A, self.g_cost_A, self.g_GAN_cost_A, self.g_cycle_cost_A), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + g_cost_A+=g_cost_A + g_GAN_A+=g_GAN_A + g_cycle_A+=g_cycle_A + + g_costs_A.append(g_cost_A/gen_steps) + g_GANs_A.append(g_GAN_A/gen_steps) + g_cycles_A.append(self.cycl_weight*g_cycle_A/gen_steps) + + + #optimize discriminator_B + d_cost_B=0 + d_gp_B=0 + for i in range(discr_steps): + + if self.cost_type=='WGAN-gp': + _, d_cost_B, d_acc_B, d_gp_B = self.session.run( + (self.d_train_op_B, self.d_cost_B, self.d_accuracy_B, self.gradient_penalty_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_gp_B+=d_gp_B + + else: + + _, d_cost_B, d_acc_B, = self.session.run( + (self.d_train_op_B, self.d_cost_B, self.d_accuracy_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_cost_B+=d_cost_B + + d_costs_B.append(d_cost_B/discr_steps) + d_gps_B.append(LAMBDA*d_gp_B/discr_steps) + + #optimize generator_B + g_cost_B=0 + g_GAN_B=0 + g_cycle_B=0 + for i in range(gen_steps): + _, g_cost_B, g_GAN_B, g_cycle_B = self.session.run( + (self.g_train_op_B, self.g_cost_B, self.g_GAN_cost_B, self.g_cycle_cost_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + g_cost_B+=g_cost_B + g_GAN_B+=g_GAN_B + g_cycle_B+=g_cycle_B + + g_costs_B.append(g_cost_B/gen_steps) + g_GANs_B.append(g_GAN_B/gen_steps) + g_cycles_B.append(self.cycl_weight*g_cycle_B/gen_steps) + + d_cost_A=0 + d_gp_A=0 + for i in range(discr_steps): + #optimize Discriminator_A + if self.cost_type=='WGAN-gp': + _, d_cost_A, d_acc_A, d_gp_A = self.session.run( + (self.d_train_op_A, self.d_cost_A, self.d_accuracy_A, self.gradient_penalty_A), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_gp_A+=d_gp_A + + + else: + + _, d_cost_A, d_acc_A, = self.session.run( + (self.d_train_op_A, self.d_cost_A, self.d_accuracy_A), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_cost_A+=d_cost_A + + d_costs_A.append(d_cost_A/discr_steps) + d_gps_A.append(LAMBDA*d_gp_A/discr_steps) + + total_iters += 1 + if total_iters % self.save_sample ==0: + print("At iter: %d - dt: %s - d_acc_A: %.2f" % (total_iters, datetime.now() - t0, d_acc_A)) + print("At iter: %d - dt: %s - d_acc_B: %.2f" % (total_iters, datetime.now() - t0, d_acc_B)) + print("Discrimator_A cost {0:.4g}, Generator_A_to_B cost {1:.4g}".format(d_cost_A, g_cost_A)) + print("Discrimator_B cost {0:.4g}, Generator_B_to_A cost {1:.4g}".format(d_cost_B, g_cost_B)) + print('Saving a sample...') + + #A is true + #B is reco + + if self.preprocess=='normalise': + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + self.max_true, self.max_reco, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + else: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_costs_A, label='Discriminator A cost') + plt.plot(d_gps_A, label='Gradient penalty A') + plt.plot(g_GANs_A, label='Generator B to A GAN cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(1,2,2) + plt.plot(g_costs_A, label='Generator B to A total cost') + plt.plot(g_GANs_A, label='Generator B to A GAN cost') + plt.plot(g_cycles_B, label='Generators B to A to B cycle cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration_gen_disc_B_to_A.png',dpi=150) + + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_costs_B, label='Discriminator B cost') + plt.plot(d_gps_B, label='Gradient penalty B') + plt.plot(g_GANs_B, label='Generator A to B GAN cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(1,2,2) + plt.plot(g_costs_B, label='Generator A to B total cost') + plt.plot(g_GANs_B, label='Generator A to B GAN cost') + plt.plot(g_cycles_B, label='Generators B to A to B cycle cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration_gen_disc_A_to_B.png',dpi=150) + + + + def get_sample_A_to_B(self, Z): + + one_sample = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: 1}) + + return one_sample + + def get_sample_B_to_A(self, Z): + + one_sample = self.session.run( + self.sample_images_test_B_to_A, + feed_dict={self.input_test_B:Z, self.batch_sz: 1}) + + return one_sample + + def get_samples_A_to_B(self, Z): + + many_samples = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: Z.shape[0]}) + + return many_samples + + def get_samples_B_to_A(self, Z): + + many_samples = self.session.run( + self.sample_images_test_B_to_A, + feed_dict={self.input_test_B:Z, self.batch_sz: Z.shape[0]}) + + return many_samples + diff --git a/architectures/cycleGAN_fullresidual_pix2pixDisc.py b/architectures/cycleGAN_fullresidual_pix2pixDisc.py new file mode 100644 index 0000000..c692d24 --- /dev/null +++ b/architectures/cycleGAN_fullresidual_pix2pixDisc.py @@ -0,0 +1,657 @@ +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +preprocess=None +LAMBDA=.01 +EPS=1e-10 +CYCL_WEIGHT=None +GAN_WEIGHT=None +DISCR_STEPS=None +GEN_STEPS=None + +min_true=None +max_true=None + +min_reco=None +max_reco=None + +n_H_A=None +n_W_A=None +n_W_B=None +n_H_B=None +n_C=None + +d_sizes_A=None +d_sizes_B=None +g_sizes_A=None +g_sizes_B=None + +class cycleGAN_fullresidual_pix2pixDisc(object): + + def __init__( + self, + n_H_A=n_H_A, n_W_A=n_W_A, + n_H_B=n_H_B, n_W_B=n_W_B, n_C=n_C, + min_true=min_true, max_true=max_true, + min_reco=min_reco, max_reco=max_reco, + d_sizes_A=d_sizes_A, d_sizes_B=d_sizes_B, g_sizes_A=g_sizes_A, g_sizes_B=g_sizes_B, + lr=LEARNING_RATE, beta1=BETA1, preprocess=preprocess, + cost_type=COST_TYPE, gan_weight=GAN_WEIGHT, cycl_weight=CYCL_WEIGHT, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + self.max_reco=max_reco + self.min_reco = min_reco + + self.min_true=min_true + self.max_true=max_true + + self.seed=seed + + self.n_W_A = n_W_A + self.n_H_A = n_H_A + + self.n_W_B = n_W_B + self.n_H_B = n_H_B + self.n_C = n_C + + #input data + + self.input_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + + self.input_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + D_A = pix2pixDiscriminator(self.input_A, d_sizes_A, 'A') + D_B = pix2pixDiscriminator(self.input_B, d_sizes_B, 'B') + + G_A_to_B = cycleGenerator_fullresidual(self.input_A, self.n_H_B, self.n_W_B, g_sizes_A, 'A_to_B') + G_B_to_A = cycleGenerator_fullresidual(self.input_B, self.n_H_A, self.n_W_A, g_sizes_B, 'B_to_A') + + + #first cycle (A to B) + with tf.variable_scope('discriminator_A') as scope: + + logits_A = D_A.d_forward(self.input_B, self.input_A) + + with tf.variable_scope('generator_A_to_B') as scope: + + sample_images_B = G_A_to_B.g_forward(self.input_A) + + #second cycle (B to A) + with tf.variable_scope('discriminator_B') as scope: + + logits_B = D_B.d_forward(self.input_A, self.input_B) + + with tf.variable_scope('generator_B_to_A') as scope: + + sample_images_A = G_B_to_A.g_forward(self.input_B) + + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + sample_logits_A = D_A.d_forward(self.input_B, sample_images_A, reuse=True) + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + sample_logits_B = D_B.d_forward(self.input_A, sample_images_B, reuse=True) + + + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + cycl_B = G_A_to_B.g_forward(sample_images_A, reuse=True) + + + with tf.variable_scope('generator_B_to_A') as scope: + scope.reuse_variables() + cycl_A = G_B_to_A.g_forward(sample_images_B, reuse=True) + + + self.input_test_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_test_A', + ) + # get sample images for test time + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + self.sample_images_test_A_to_B = G_A_to_B.g_forward( + self.input_test_A, reuse=True, is_training=False + ) + + self.input_test_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_test_B', + ) + + with tf.variable_scope('generator_B_to_A') as scope: + scope.reuse_variables() + self.sample_images_test_B_to_A = G_B_to_A.g_forward( + self.input_test_B, reuse=True, is_training=False + ) + #parameters lists + self.d_params_A =[t for t in tf.trainable_variables() if 'discriminator_A' in t.name] + self.d_params_B =[t for t in tf.trainable_variables() if 'discriminator_B' in t.name] + + self.g_params_A =[t for t in tf.trainable_variables() if 'B_to_A' in t.name] + self.g_params_B =[t for t in tf.trainable_variables() if 'A_to_B' in t.name] + + #cost building + + if cost_type == 'GAN': + + #Discriminators cost + + #Discriminator_A cost + #cost is low if real images are predicted as real (1) + d_cost_real_A = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_A, + labels=tf.ones_like(logits_A) + ) + # #cost is low if fake generated images are predicted as fake (0) + d_cost_fake_A = tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_A, + labels=tf.zeros_like(sample_logits_A) + ) + + + self.d_cost_A = tf.reduce_mean(d_cost_real_A) + tf.reduce_mean(d_cost_fake_A) + + #Discriminator_B cost + d_cost_real_B = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_B, + labels=tf.ones_like(logits_B) + ) + + d_cost_fake_B = tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_B, + labels=tf.zeros_like(sample_logits_B) + ) + + self.d_cost_B = tf.reduce_mean(d_cost_real_B) + tf.reduce_mean(d_cost_fake_B) + + #Generator cost + #cost is low if logits from discriminator A on samples generated by G_B_to_A + #are predicted as true (1) + self.g_GAN_cost_A = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_A, + labels=tf.ones_like(sample_logits_A) + ) + ) + #cost is low if logits from discriminator B on samples generated by G_A_to_B + #are predicted as true (1) + self.g_GAN_cost_B = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits_B, + labels=tf.ones_like(sample_logits_B) + ) + ) + + #cycle cost is low if cyclic images are similar to input images (in both sets) + self.g_cycle_cost_A = tf.reduce_mean(tf.abs(self.input_A-cycl_A)) + self.g_cycle_cost_B = tf.reduce_mean(tf.abs(self.input_B-cycl_B)) + + g_cycle_cost= self.g_cycle_cost_A+self.g_cycle_cost_B + self.g_cost_A = gan_weight*self.g_GAN_cost_A + cycl_weight*g_cycle_cost + self.g_cost_B = gan_weight*self.g_GAN_cost_B + cycl_weight*g_cycle_cost + + + if cost_type == 'WGAN-gp': + + #Discriminators cost + #Discriminator A + + self.d_cost_A = tf.reduce_mean(sample_logits_A) - tf.reduce_mean(logits_A) + self.d_cost_B = tf.reduce_mean(sample_logits_B) - tf.reduce_mean(logits_B) + + self.g_cost_A = -tf.reduce_mean(sample_logits_A) + self.g_cost_B = -tf.reduce_mean(sample_logits_B) + + # g_cycle_cost_A = tf.reduce_mean(tf.abs(self.input_A-cycl_A)) + # g_cycle_cost_B = tf.reduce_mean(tf.abs(self.input_B-cycl_B)) + + # g_cycle_cost= g_cycle_cost_A+g_cycle_cost_B + # self.g_cost_A = g_cost_A + 0*g_cycle_cost + # self.g_cost_B = g_cost_B + 0*g_cycle_cost + + + alpha_A = tf.random_uniform( + shape=[self.batch_sz,self.n_H_A,self.n_W_A,self.n_C], + minval=0., + maxval=1. + ) + + interpolates_A = alpha_A*self.input_A+(1-alpha_A)*sample_images_A + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + disc_A_interpolates = D_A.d_forward(interpolates_A,reuse = True) + + gradients_A = tf.gradients(disc_A_interpolates,[interpolates_A])[0] + slopes_A = tf.sqrt(tf.reduce_sum(tf.square(gradients_A), reduction_indices=[1])) + gradient_penalty_A = tf.reduce_mean((slopes_A-1)**2) + self.d_cost_A+=LAMBDA*gradient_penalty_A + + alpha_B = tf.random_uniform( + shape=[self.batch_sz,self.n_H_B,self.n_W_B,self.n_C], + minval=0., + maxval=1. + ) + + interpolates_B = alpha_B*self.input_B+(1-alpha_B)*sample_images_B + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + disc_B_interpolates = D_B.d_forward(interpolates_B,reuse = True) + + gradients_B = tf.gradients(disc_B_interpolates,[interpolates_B])[0] + slopes_B = tf.sqrt(tf.reduce_sum(tf.square(gradients_B), reduction_indices=[1])) + gradient_penalty_B = tf.reduce_mean((slopes_B-1)**2) + self.d_cost_B+=LAMBDA*gradient_penalty_B + + + self.d_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_cost_A, + var_list=self.d_params_A + ) + + self.d_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.d_cost_B, + var_list=self.d_params_B + ) + + self.g_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_cost_A, + var_list=self.g_params_A + ) + + self.g_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9, + ).minimize( + self.g_cost_B, + var_list=self.g_params_B + ) + + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr = lr + + self.D_A=D_A + self.D_B=D_B + self.G_A_to_B=G_A_to_B + self.G_B_to_A=G_B_to_A + + self.sample_images_B=sample_images_B + self.sample_images_A=sample_images_A + + self.preprocess=preprocess + self.cost_type=cost_type + + self.gen_steps=gen_steps + self.discr_steps=discr_steps + self.cycl_weight=cycl_weight + + def set_session(self, session): + + self.session = session + + for block in self.D_A.d_conv_layers: + block.set_session(session) + + for layer in self.G_A_to_B.g_blocks: + layer.set_session(session) + + for block in self.D_B.d_conv_layers: + block.set_session(session) + + for block in self.G_B_to_A.g_blocks: + block.set_session(session) + + def fit(self, X_A, X_B, validating_size): + + all_A = X_A + all_B = X_B + + gen_steps = self.gen_steps + discr_steps = self.discr_steps + + m = X_A.shape[0] + train_A = all_A[0:m-validating_size] + train_B = all_B[0:m-validating_size] + + validating_A = all_A[m-validating_size:m] + validating_B = all_B[m-validating_size:m] + + seed = self.seed + + d_costs_A=[] + d_gps_A=[] + g_GANs_A=[] + g_cycles_A=[] + g_costs_A=[] + + d_costs_B=[] + d_gps_B=[] + g_GANs_B=[] + g_cycles_B=[] + g_costs_B=[] + + N = len(train_A) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training cycle GAN with residual arichitecture and a total of ' +str(N)+' samples distributed in '+ str((N)//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + print('The validation set consists of {0} images'.format(validating_A.shape[0])) + print('The learning rate set is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed +=1 + + print('Epoch:', epoch) + + batches_A = unsupervised_random_mini_batches(X_A, self.batch_size, seed) + batches_B = unsupervised_random_mini_batches(X_B, self.batch_size, seed) + + for X_batch_A, X_batch_B in zip(batches_A,batches_B): + + bs = X_batch_A.shape[0] + + t0 = datetime.now() + + #optimize generator_A + g_cost_A=0 + g_GAN_A=0 + g_cycle_A=0 + + for i in range(gen_steps): + + _, g_cost_A, g_GAN_A, g_cycle_A = self.session.run( + (self.g_train_op_A, self.g_cost_A, self.g_GAN_cost_A, self.g_cycle_cost_A), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + g_cost_A+=g_cost_A + g_GAN_A+=g_GAN_A + g_cycle_A+=g_cycle_A + + g_costs_A.append(g_cost_A/gen_steps) + g_GANs_A.append(g_GAN_A/gen_steps) + g_cycles_A.append(self.cycl_weight*g_cycle_A/gen_steps) + + + #optimize discriminator_B + d_cost_B=0 + d_gp_B=0 + for i in range(discr_steps): + + if self.cost_type=='WGAN-gp': + _, d_cost_B, d_gp_B = self.session.run( + (self.d_train_op_B, self.d_cost_B, self.gradient_penalty_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_gp_B+=d_gp_B + + else: + + _, d_cost_B, = self.session.run( + (self.d_train_op_B, self.d_cost_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_cost_B+=d_cost_B + + d_costs_B.append(d_cost_B/discr_steps) + d_gps_B.append(LAMBDA*d_gp_B/discr_steps) + + #optimize generator_B + g_cost_B=0 + g_GAN_B=0 + g_cycle_B=0 + for i in range(gen_steps): + _, g_cost_B, g_GAN_B, g_cycle_B = self.session.run( + (self.g_train_op_B, self.g_cost_B, self.g_GAN_cost_B, self.g_cycle_cost_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + g_cost_B+=g_cost_B + g_GAN_B+=g_GAN_B + g_cycle_B+=g_cycle_B + + g_costs_B.append(g_cost_B/gen_steps) + g_GANs_B.append(g_GAN_B/gen_steps) + g_cycles_B.append(self.cycl_weight*g_cycle_B/gen_steps) + + d_cost_A=0 + d_gp_A=0 + for i in range(discr_steps): + #optimize Discriminator_A + if self.cost_type=='WGAN-gp': + _, d_cost_A = self.session.run( + (self.d_train_op_A, self.d_cost_A, self.gradient_penalty_A), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_gp_A+=d_gp_A + + + else: + + _, d_cost_A, = self.session.run( + (self.d_train_op_A, self.d_cost_A), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_cost_A+=d_cost_A + + d_costs_A.append(d_cost_A/discr_steps) + d_gps_A.append(LAMBDA*d_gp_A/discr_steps) + + total_iters += 1 + if total_iters % self.save_sample ==0: + plt.clf() + print("At iter: %d - dt: %s " % (total_iters, datetime.now() - t0)) + print("At iter: %d - dt: %s " % (total_iters, datetime.now() - t0)) + print("Discrimator_A cost {0:.4g}, Generator_A_to_B cost {1:.4g}".format(d_cost_A, g_cost_A)) + print("Discrimator_B cost {0:.4g}, Generator_B_to_A cost {1:.4g}".format(d_cost_B, g_cost_B)) + print('Saving a sample...') + + #A is true + #B is reco + + if self.preprocess!=False: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + self.min_true, self.max_true, + self.min_reco, self.max_reco, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + else: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_costs_A, label='Discriminator A cost') + plt.plot(d_gps_A, label='Gradient penalty A') + plt.plot(g_GANs_A, label='Generator B to A GAN cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(1,2,2) + plt.plot(g_costs_A, label='Generator B to A total cost') + plt.plot(g_GANs_A, label='Generator B to A GAN cost') + plt.plot(g_cycles_B, label='Generators B to A to B cycle cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration_gen_disc_B_to_A.png',dpi=150) + + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_costs_B, label='Discriminator B cost') + plt.plot(d_gps_B, label='Gradient penalty B') + plt.plot(g_GANs_B, label='Generator A to B GAN cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(1,2,2) + plt.plot(g_costs_B, label='Generator A to B total cost') + plt.plot(g_GANs_B, label='Generator A to B GAN cost') + plt.plot(g_cycles_B, label='Generators B to A to B cycle cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration_gen_disc_A_to_B.png',dpi=150) + + + + def get_sample_A_to_B(self, Z): + + one_sample = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: 1}) + + return one_sample + + def get_sample_B_to_A(self, Z): + + one_sample = self.session.run( + self.sample_images_test_B_to_A, + feed_dict={self.input_test_B:Z, self.batch_sz: 1}) + + return one_sample + + def get_samples_A_to_B(self, Z): + + many_samples = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: Z.shape[0]}) + + return many_samples + + def get_samples_B_to_A(self, Z): + + many_samples = self.session.run( + self.sample_images_test_B_to_A, + feed_dict={self.input_test_B:Z, self.batch_sz: Z.shape[0]}) + + return many_samples + diff --git a/architectures/cycleGAN_u_net.py b/architectures/cycleGAN_u_net.py new file mode 100644 index 0000000..0d2bdbd --- /dev/null +++ b/architectures/cycleGAN_u_net.py @@ -0,0 +1,866 @@ +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +preprocess=None +LAMBDA=.01 +EPS=1e-10 +CYCL_WEIGHT=None +GAN_WEIGHT=None +DISCR_STEPS=None +GEN_STEPS=None + +mean_A=None +std_A=None + +mean_B=None +std_B=None + +n_H_A=None +n_W_A=None +n_W_B=None +n_H_B=None +n_C=None + +d_sizes_A=None +d_sizes_B=None +g_sizes_enc_A=None +g_sizes_dec_A=None +g_sizes_enc_B=None +g_sizes_dec_B=None + +class cycleGAN_u_net(object): + + def __init__( + self, + n_H_A=n_H_A, n_W_A=n_W_A, + n_H_B=n_H_B, n_W_B=n_W_B, n_C=n_C, + mean_A=mean_A, std_A=std_A, + mean_B=mean_B, std_B=std_B, + d_sizes_A=d_sizes_A, d_sizes_B=d_sizes_B, + g_sizes_enc_A=g_sizes_enc_A, g_sizes_dec_A=g_sizes_dec_A, + g_sizes_enc_B=g_sizes_enc_B, g_sizes_dec_B=g_sizes_dec_B, + lr=LEARNING_RATE, beta1=BETA1, preprocess=preprocess, + cost_type=COST_TYPE, gan_weight=GAN_WEIGHT, cycl_weight=CYCL_WEIGHT, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + self.mean_A=mean_A + self.std_A=std_A + self.mean_B=mean_B + self.std_B=std_B + + self.seed=seed + + self.n_W_A = n_W_A + self.n_H_A = n_H_A + + self.n_W_B = n_W_B + self.n_H_B = n_H_B + self.n_C = n_C + + #input data + + self.input_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + + self.input_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + # get sample images for test time + self.input_test_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_test_A', + ) + self.input_test_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_test_B', + ) + + + D_A = Discriminator(self.input_A, d_sizes_A, 'A') + D_B = Discriminator(self.input_B, d_sizes_B, 'B') + + G_A_to_B = pix2pixGenerator(self.input_A, self.n_H_B, self.n_W_B, g_sizes_enc_A, g_sizes_dec_A, 'A_to_B') + G_B_to_A = pix2pixGenerator(self.input_B, self.n_H_A, self.n_W_A, g_sizes_enc_B, g_sizes_dec_B, 'B_to_A') + + + # A -> B' + with tf.variable_scope('generator_A_to_B') as scope: + + sample_images_B = G_A_to_B.g_forward(self.input_A) + + # B -> A' + with tf.variable_scope('generator_B_to_A') as scope: + + sample_images_A = G_B_to_A.g_forward(self.input_B) + + # B' -> A' + with tf.variable_scope('generator_B_to_A') as scope: + scope.reuse_variables() + cycl_A = G_B_to_A.g_forward(sample_images_B, reuse=True) + + with tf.variable_scope('generator_B_to_A') as scope: + scope.reuse_variables() + self.sample_images_test_A = G_B_to_A.g_forward( + self.input_test_B, reuse=True, is_training=False + ) + + # A' -> B' + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + cycl_B = G_A_to_B.g_forward(sample_images_A, reuse=True) + + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + self.sample_images_test_B = G_A_to_B.g_forward( + self.input_test_A, reuse=True, is_training=False + ) + + #Discriminator of images os set B + + with tf.variable_scope('discriminator_B') as scope: + logits_real_B = D_B.d_forward(self.input_B) + #logits_real_B = D_B.d_forward(self.input_A, self.input_B) + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + logits_fake_B = D_B.d_forward(sample_images_B, reuse=True) + #logits_fake_B = D_B.d_forward(self.input_A, cycl_B, reuse=True) + + #Discriminator of images os set A + + with tf.variable_scope('discriminator_A') as scope: + logits_real_A = D_A.d_forward(self.input_A) + #logits_real_A = D_A.d_forward(self.input_B, self.input_A) + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + logits_fake_A = D_A.d_forward(sample_images_A, reuse=True) + #logits_fake_A = D_A.d_forward(self.input_B, sample_images_A, reuse=True) + + + #parameters lists + self.d_params_A =[t for t in tf.trainable_variables() if 'discriminator_A' in t.name] + self.d_params_B =[t for t in tf.trainable_variables() if 'discriminator_B' in t.name] + + self.d_params = [t for t in tf.trainable_variables() if 'discriminator' in t.name] + + self.g_params_A =[t for t in tf.trainable_variables() if 'B_to_A' in t.name] + self.g_params_B =[t for t in tf.trainable_variables() if 'A_to_B' in t.name] + + self.g_params = [t for t in tf.trainable_variables() if 'generator' in t.name] + + predicted_fake_A=tf.nn.sigmoid(logits_fake_A) + predicted_real_A=tf.nn.sigmoid(logits_real_A) + + predicted_fake_B=tf.nn.sigmoid(logits_fake_B) + predicted_real_B=tf.nn.sigmoid(logits_real_B) + + + #cost building + epsilon = 1e-4 + if cost_type == 'GAN': + + self.d_cost_real_A = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real_A, + labels=(1-epsilon)*tf.ones_like(logits_real_A) + ) + + self.d_cost_fake_A = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake_A, + labels=epsilon+tf.zeros_like(logits_fake_A) + ) + + self.d_cost_A = tf.reduce_mean(self.d_cost_real_A) + tf.reduce_mean(self.d_cost_fake_A) + + #Generator cost + self.g_cost_GAN_A = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake_A, + labels=(1-epsilon)*tf.ones_like(logits_fake_A) + ) + ) + + # #Discriminator cost + # self.d_cost_real_A = tf.reduce_mean(-tf.log(predicted_real_A + epsilon)) + # self.d_cost_fake_A = tf.reduce_mean(-tf.log(1 + epsilon - predicted_fake_A)) + # self.d_cost_A = self.d_cost_real_A+self.d_cost_fake_A + + # #Generator cost + # self.g_cost_GAN_A = tf.reduce_mean(-tf.log(predicted_fake_A + epsilon)) + + + self.d_cost_real_B = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real_B, + labels=(1-epsilon)*tf.ones_like(logits_real_B) + ) + + self.d_cost_fake_B = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake_B, + labels=epsilon+tf.zeros_like(logits_fake_B) + ) + + self.d_cost_B = tf.reduce_mean(self.d_cost_real_B) + tf.reduce_mean(self.d_cost_fake_B) + + #Generator cost + self.g_cost_GAN_B = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake_B, + labels=(1-epsilon)*tf.ones_like(logits_fake_B) + ) + ) + + self.d_cost=(self.d_cost_B+self.d_cost_A)/2. + + # #Discriminator cost + # self.d_cost_real_B = tf.reduce_mean(-tf.log(predicted_real_B + epsilon)) + # self.d_cost_fake_B = tf.reduce_mean(-tf.log(1 + epsilon - predicted_fake_B)) + # self.d_cost_B = self.d_cost_real_B+self.d_cost_fake_B + + # #Generator cost + # self.g_cost_GAN_B = tf.reduce_mean(-tf.log(predicted_fake_B + epsilon)) + + #cycle cost is low if cyclic images are similar to input images (in both sets) + self.g_cost_cycle_A = tf.reduce_mean(tf.abs(self.input_A-cycl_A)) + self.g_cost_cycle_B = tf.reduce_mean(tf.abs(self.input_B-cycl_B)) + + + g_cost_cycle= self.g_cost_cycle_A+self.g_cost_cycle_B + self.g_cost_A = gan_weight*self.g_cost_GAN_A + cycl_weight*g_cost_cycle + self.g_cost_B = gan_weight*self.g_cost_GAN_B + cycl_weight*g_cost_cycle + + self.g_cost=gan_weight*(self.g_cost_GAN_A+self.g_cost_GAN_B)+cycl_weight*g_cost_cycle + + self.d_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.d_cost_A, + var_list=self.d_params_A + ) + + self.d_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.d_cost_B, + var_list=self.d_params_B + ) + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.d_cost, + var_list=self.d_params#_B+self.d_params_A + ) + + self.g_train_op_A = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.g_cost_A, + var_list=self.g_params_A#+self.g_params_B + ) + + self.g_train_op_B = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.g_cost_B, + var_list=self.g_params_B#+self.g_params_A + ) + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.g_cost, + var_list=self.g_params#_B+self.g_params_A + ) + + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr = lr + + self.D_A=D_A + self.D_B=D_B + self.G_A_to_B=G_A_to_B + self.G_B_to_A=G_B_to_A + + self.sample_images_B=sample_images_B + self.sample_images_A=sample_images_A + + self.preprocess=preprocess + self.cost_type=cost_type + + self.gen_steps=gen_steps + self.discr_steps=discr_steps + self.cycl_weight=cycl_weight + + + def set_session(self, session): + + self.session = session + + for layer in self.D_A.d_conv_layers: + layer.set_session(session) + + for layer in self.G_A_to_B.g_enc_conv_layers: + layer.set_session(session) + + for layer in self.G_A_to_B.g_dec_conv_layers: + layer.set_session(session) + + for layer in self.D_B.d_conv_layers: + layer.set_session(session) + + for layer in self.G_B_to_A.g_enc_conv_layers: + layer.set_session(session) + + for layer in self.G_B_to_A.g_dec_conv_layers: + layer.set_session(session) + + + def fit(self, X_A, X_B, validating_size): + + all_A = X_A + all_B = X_B + + gen_steps = self.gen_steps + discr_steps = self.discr_steps + + m = X_A.shape[0] + train_A = all_A[0:m-validating_size] + train_B = all_B[0:m-validating_size] + + validating_A = all_A[m-validating_size:m] + validating_B = all_B[m-validating_size:m] + + seed=self.seed + + g_costs=[] + d_costs=[] + + d_costs_A=[] + g_GANs_A=[] + g_cycles_A=[] + g_costs_A=[] + + d_costs_B=[] + g_GANs_B=[] + g_cycles_B=[] + g_costs_B=[] + + N=len(train_A) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training cycle GAN with pix2pix gen/disc with a total of ' +str(N)+' samples distributed in '+ str((N)//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + print('The validation set consists of {0} images'.format(validating_A.shape[0])) + print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed +=1 + + print('Epoch:', epoch) + + batches_A = unsupervised_random_mini_batches(train_A, self.batch_size, seed) + batches_B = unsupervised_random_mini_batches(train_B, self.batch_size, seed) + + for X_batch_A, X_batch_B in zip(batches_A,batches_B): + + bs = X_batch_A.shape[0] + + t0 = datetime.now() + + #optimize generator_A + g_cost=0 + g_cost_A=0 + g_GAN_A=0 + g_cycle_A=0 + + g_cost_B=0 + g_GAN_B=0 + g_cycle_B=0 + + for i in range(gen_steps): + + _, g_cost, g_cost_A, g_cost_B, g_GAN_A, g_GAN_B, g_cycle_A, g_cycle_B = self.session.run( + (self.g_train_op, self.g_cost, self.g_cost_A, self.g_cost_B, + self.g_cost_GAN_A, self.g_cost_GAN_B, + self.g_cost_cycle_A, self.g_cost_cycle_B), + + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + g_cost+=g_cost + g_cost_A+=g_cost_A + g_cost_B+=g_cost_B + + g_GAN_A+=g_GAN_A + g_GAN_B+=g_GAN_B + + g_cycle_A+=g_cycle_A + g_cycle_B+=g_cycle_B + + g_costs.append(g_cost) + + g_costs_A.append(g_cost_A/gen_steps) + g_costs_B.append(g_cost_B/gen_steps) + + g_GANs_A.append(g_GAN_A/gen_steps) + g_GANs_B.append(g_GAN_B/gen_steps) + + g_cycles_A.append(self.cycl_weight*g_cycle_A/gen_steps) + g_cycles_B.append(self.cycl_weight*g_cycle_B/gen_steps) + + + #optimize discriminator_B + d_cost=0 + d_cost_A=0 + d_cost_B=0 + for i in range(discr_steps): + + _, d_cost, d_cost_A, d_cost_B, = self.session.run( + (self.d_train_op, self.d_cost, self.d_cost_A, self.d_cost_B), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + d_cost+=d_cost + d_cost_A+=d_cost_A + d_cost_B+=d_cost_B + + d_costs.append(d_cost/discr_steps) + d_costs_B.append(d_cost_B/discr_steps) + d_costs_A.append(d_cost_A/discr_steps) + + total_iters += 1 + if total_iters % self.save_sample ==0: + plt.clf() + print("At iter: %d - dt: %s " % (total_iters, datetime.now() - t0)) + print("At iter: %d - dt: %s " % (total_iters, datetime.now() - t0)) + print("Discrimator_A cost {0:.4g}, Generator_B_to_A cost {1:.4g}".format(d_cost_A, g_cost_A)) + print("Discrimator_B cost {0:.4g}, Generator_A_to_B cost {1:.4g}".format(d_cost_B, g_cost_B)) + print('Saving a sample...') + + + X_A = validating_A[np.random.randint(validating_size)].reshape(1, self.n_H_A, self.n_W_A, self.n_C) + X_B = validating_B[np.random.randint(validating_size)].reshape(1, self.n_H_B, self.n_W_B, self.n_C) + + test_sample_B=self.get_sample_A_to_B(X_A) + test_sample_A=self.get_sample_B_to_A(X_B) + + if preprocess: + + test_sample_B_1 = test_sample_B*self.std_B + test_sample_B_2 = test_sample_B_1+self.mean_B + test_sample_B=test_sample_B_2 + + test_sample_A_1 = test_sample_A*self.std_A + test_sample_A_2 = test_sample_A_1+self.mean_A + test_sample_A=test_sample_A_2 + + plt.clf() + plt.subplot(1,2,1) + plt.imshow(X_A.reshape(self.n_H_A,self.n_W_A, self.n_C)) + plt.axis('off') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.subplot(1,2,2) + plt.imshow(test_sample_B.reshape(self.n_H_A,self.n_W_A, self.n_C)) + plt.axis('off') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.savefig(self.path+"/B_to_A_{0}.png".format(total_iters), dpi=80) + + plt.clf() + plt.subplot(1,2,1) + plt.imshow(X_B.reshape(self.n_H_B,self.n_W_B, self.n_C)) + plt.axis('off') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.subplot(1,2,2) + plt.imshow(test_sample_A.reshape(self.n_H_B,self.n_W_B, self.n_C)) + plt.axis('off') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.savefig(self.path+"/A_to_B_{0}.png".format(total_iters), dpi=80) + + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_costs_A, label='Discriminator A cost') + plt.plot(g_GANs_A, label='Generator B to A GAN cost') + plt.xlabel('Iteration') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(1,2,2) + plt.plot(g_costs_A, label='Generator B to A total cost') + plt.plot(g_GANs_A, label='Generator B to A GAN cost') + plt.plot(g_cycles_B, label='Generators B to A to B cycle cost') + plt.xlabel('Iteration') + plt.ylabel('Cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration_gen_disc_B_to_A.png',dpi=150) + + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_costs_B, label='Discriminator B cost') + plt.plot(g_GANs_B, label='Generator A to B GAN cost') + plt.xlabel('Iteration') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(1,2,2) + plt.plot(g_costs_B, label='Generator A to B total cost') + plt.plot(g_GANs_B, label='Generator A to B GAN cost') + plt.plot(g_cycles_B, label='Generators B to A to B cycle cost') + plt.xlabel('Iteration') + plt.ylabel('Cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration_gen_disc_A_to_B.png',dpi=150) + + return test_sample_A, test_sample_B + + def get_sample_A_to_B(self, Z): + + one_sample = self.session.run( + self.sample_images_test_B, + feed_dict={self.input_test_A:Z, self.batch_sz: 1}) + + return one_sample + + def get_sample_B_to_A(self, Z): + + one_sample = self.session.run( + self.sample_images_test_A, + feed_dict={self.input_test_B:Z, self.batch_sz: 1}) + + return one_sample + def get_samples_A_to_B(self, Z): + + many_samples = self.session.run( + self.sample_images_test_B, + feed_dict={self.input_test_A:Z, self.batch_sz: Z.shape[0]}) + + return many_samples + + def get_samples_B_to_A(self, Z): + + many_samples = self.session.run( + self.sample_images_test_A, + feed_dict={self.input_test_B:Z, self.batch_sz: Z.shape[0]}) + + return many_samples + + + + + # def fit(self, X_A, X_B, validating_size): + + # all_A = X_A + # all_B = X_B + + # gen_steps = self.gen_steps + # discr_steps = self.discr_steps + + # m = X_A.shape[0] + # train_A = all_A[0:m-validating_size] + # train_B = all_B[0:m-validating_size] + + # validating_A = all_A[m-validating_size:m] + # validating_B = all_B[m-validating_size:m] + + # seed=self.seed + + # d_costs_A=[] + # g_GANs_A=[] + # g_cycles_A=[] + # g_costs_A=[] + + # d_costs_B=[] + # g_GANs_B=[] + # g_cycles_B=[] + # g_costs_B=[] + + # N=len(train_A) + # n_batches = N // self.batch_size + + # total_iters=0 + + # print('\n ****** \n') + # print('Training cycle GAN with pix2pix gen/disc with a total of ' +str(N)+' samples distributed in '+ str((N)//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + # print('The validation set consists of {0} images'.format(validating_A.shape[0])) + # print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + # print('\n ****** \n') + + # for epoch in range(self.epochs): + + # seed +=1 + + # print('Epoch:', epoch) + + # batches_A = unsupervised_random_mini_batches(train_A, self.batch_size, seed) + # batches_B = unsupervised_random_mini_batches(train_B, self.batch_size, seed) + + # for X_batch_A, X_batch_B in zip(batches_A,batches_B): + + # bs = X_batch_A.shape[0] + + # t0 = datetime.now() + + # #optimize generator_A + # g_cost_A=0 + # g_GAN_A=0 + # g_cycle_A=0 + # for i in range(gen_steps): + + # _, g_cost_A, g_GAN_A, g_cycle_A = self.session.run( + # (self.g_train_op_A, self.g_cost_A, self.g_cost_GAN_A, self.g_cost_cycle_A), + # feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + # ) + # g_cost_A+=g_cost_A + # g_GAN_A+=g_GAN_A + # g_cycle_A+=g_cycle_A + + # g_costs_A.append(g_cost_A/gen_steps) + # g_GANs_A.append(g_GAN_A/gen_steps) + # g_cycles_A.append(self.cycl_weight*g_cycle_A/gen_steps) + + + # #optimize discriminator_B + # d_cost_B=0 + # for i in range(discr_steps): + + # _, d_cost_B, = self.session.run( + # (self.d_train_op_B, self.d_cost_B), + # feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + # ) + # d_cost_B+=d_cost_B + + # d_costs_B.append(d_cost_B/discr_steps) + + # #optimize generator_B + # g_cost_B=0 + # g_GAN_B=0 + # g_cycle_B=0 + # for i in range(gen_steps): + # _, g_cost_B, g_GAN_B, g_cycle_B = self.session.run( + # (self.g_train_op_B, self.g_cost_B, self.g_cost_GAN_B, self.g_cost_cycle_B), + # feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + # ) + # g_cost_B+=g_cost_B + # g_GAN_B+=g_GAN_B + # g_cycle_B+=g_cycle_B + + # g_costs_B.append(g_cost_B/gen_steps) + # g_GANs_B.append(g_GAN_B/gen_steps) + # g_cycles_B.append(self.cycl_weight*g_cycle_B/gen_steps) + + # #optimize Discriminator_A + + # d_cost_A=0 + # for i in range(discr_steps): + + # _, d_cost_A, = self.session.run( + # (self.d_train_op_A, self.d_cost_A), + # feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + # ) + + # d_cost_A+=d_cost_A + + # d_costs_A.append(d_cost_A/discr_steps) + + # total_iters += 1 + # if total_iters % self.save_sample ==0: + # plt.clf() + # print("At iter: %d - dt: %s " % (total_iters, datetime.now() - t0)) + # print("At iter: %d - dt: %s " % (total_iters, datetime.now() - t0)) + # print("Discrimator_A cost {0:.4g}, Generator_B_to_A cost {1:.4g}".format(d_cost_A, g_cost_A)) + # print("Discrimator_B cost {0:.4g}, Generator_A_to_B cost {1:.4g}".format(d_cost_B, g_cost_B)) + # print('Saving a sample...') + + # #A is apples? + # #B is oranges? + + # X_A = validating_A[np.random.randint(validating_size)].reshape(1, self.n_H_A, self.n_W_A, self.n_C) + # X_B = validating_B[np.random.randint(validating_size)].reshape(1, self.n_H_B, self.n_W_B, self.n_C) + + # test_sample_B=self.get_sample_A_to_B(X_A) + # test_sample_A=self.get_sample_B_to_A(X_B) + + # if preprocess: + + # test_sample_B_1 = test_sample_B*self.std_B + # test_sample_B_2 = test_sample_B_1+self.mean_B + # test_sample_B=test_sample_B_2 + + # test_sample_A_1 = test_sample_A*self.std_A + # test_sample_A_2 = test_sample_A_1+self.mean_A + # test_sample_A=test_sample_A_2 + + # plt.clf() + # plt.subplot(1,2,1) + # plt.imshow(X_A.reshape(self.n_H_A,self.n_W_A, self.n_C)) + # plt.axis('off') + # plt.subplots_adjust(wspace=0.2,hspace=0.2) + + # plt.subplot(1,2,2) + # plt.imshow(test_sample_B.reshape(self.n_H_A,self.n_W_A, self.n_C)) + # plt.axis('off') + # plt.subplots_adjust(wspace=0.2,hspace=0.2) + # plt.savefig(self.path+"/B_to_A_{0}.png".format(total_iters), dpi=80) + + # plt.clf() + # plt.subplot(1,2,1) + # plt.imshow(X_B.reshape(self.n_H_B,self.n_W_B, self.n_C)) + # plt.axis('off') + # plt.subplots_adjust(wspace=0.2,hspace=0.2) + + # plt.subplot(1,2,2) + # plt.imshow(test_sample_A.reshape(self.n_H_B,self.n_W_B, self.n_C)) + # plt.axis('off') + # plt.subplots_adjust(wspace=0.2,hspace=0.2) + # plt.savefig(self.path+"/A_to_B_{0}.png".format(total_iters), dpi=80) + + # plt.clf() + # plt.subplot(1,2,1) + # plt.plot(d_costs_A, label='Discriminator A cost') + # plt.plot(g_GANs_A, label='Generator B to A GAN cost') + # plt.xlabel('Iteration') + # plt.ylabel('Cost') + # plt.legend() + + # plt.subplot(1,2,2) + # plt.plot(g_costs_A, label='Generator B to A total cost') + # plt.plot(g_GANs_A, label='Generator B to A GAN cost') + # plt.plot(g_cycles_B, label='Generators B to A to B cycle cost') + # plt.xlabel('Iteration') + # plt.ylabel('Cost') + # plt.legend() + + # fig = plt.gcf() + # fig.set_size_inches(15,5) + # plt.savefig(self.path+'/cost_iteration_gen_disc_B_to_A.png',dpi=150) + + # plt.clf() + # plt.subplot(1,2,1) + # plt.plot(d_costs_B, label='Discriminator B cost') + # plt.plot(g_GANs_B, label='Generator A to B GAN cost') + # plt.xlabel('Iteration') + # plt.ylabel('Cost') + # plt.legend() + + # plt.subplot(1,2,2) + # plt.plot(g_costs_B, label='Generator A to B total cost') + # plt.plot(g_GANs_B, label='Generator A to B GAN cost') + # plt.plot(g_cycles_B, label='Generators B to A to B cycle cost') + # plt.xlabel('Iteration') + # plt.ylabel('Cost') + # plt.legend() + + # fig = plt.gcf() + # fig.set_size_inches(15,5) + # plt.savefig(self.path+'/cost_iteration_gen_disc_A_to_B.png',dpi=150) + + + # #return test_sample_A, test_sample_B + + diff --git a/architectures/improved_DCGAN.py b/architectures/improved_DCGAN.py new file mode 100644 index 0000000..2475148 --- /dev/null +++ b/architectures/improved_DCGAN.py @@ -0,0 +1,422 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * +from architectures.utils.toolbox import * + +def lrelu(x, alpha=0.2): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +PREPROCESS=None +LAMBDA=.01 +EPS=1e-6 + +DISCR_STEPS=None +GEN_STEPS=None + +min_true=None +max_true=None + +min_reco=None +max_reco=None + +n_H=None +n_W=None +n_C=None + +d_sizes=None +g_sizes=None + + +#tested on mnist +class DCGAN(object): + + def __init__( + self, + n_H=n_H, n_W=n_H, n_C=n_C, + min_reco=min_reco, max_reco=max_reco, + d_sizes= d_sizes,g_sizes=g_sizes, + lr=LEARNING_RATE, beta1=BETA1, preprocess=PREPROCESS, + cost_type=COST_TYPE, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'conv_layers':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer, act_f), + (,,,,), + ], + 'dense_layers':[(n_o, apply_bn, weight_init, act_f)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'conv_layers':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer, act_f), + (,,,,), + ], + 'dense_layers':[(n_o, apply_bn, weight_init, act_f)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + self.n_H = n_H + self.n_W = n_W + self.n_C = n_C + self.seed=seed + self.latent_dims = g_sizes['z'] + self.gen_steps=gen_steps + self.discr_steps=discr_steps + + self.min_reco=min_reco + self.max_reco=max_reco + + #input data + + self.X = tf.placeholder( + tf.float32, + shape=(None, + n_H, n_W, n_C), + name='X', + ) + + self.Z = tf.placeholder( + tf.float32, + shape=(None, + self.latent_dims), + name='Z' + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + + D = Discriminator_minibatch(self.X, d_sizes, 'A') + + G = Generator(self.Z, self.n_H, self.n_W, g_sizes, 'A') + + with tf.variable_scope('discriminator_A') as scope: + + logits_real, feature_output_real = D.d_forward(self.X) + + with tf.variable_scope('generator_A') as scope: + + sample_images = G.g_forward(self.Z) + + # get sample logits + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + logits_fake, feature_output_fake = D.d_forward(sample_images, reuse=True) + + # get sample images for test time + with tf.variable_scope('generator_A') as scope: + scope.reuse_variables() + self.sample_images_test = G.g_forward( + self.Z, reuse=True, is_training=False + ) + + predicted_real= tf.nn.sigmoid(logits_real) + predicted_fake=tf.nn.sigmoid(logits_fake) + + #parameters list + + self.d_params =[t for t in tf.trainable_variables() if t.name.startswith('d')] + self.g_params =[t for t in tf.trainable_variables() if t.name.startswith('g')] + + #cost building + epsilon = 1e-3 + if cost_type == 'GAN': + + self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real, + labels=(1-epsilon)*tf.ones_like(logits_real) + ) + + self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=epsilon+tf.zeros_like(logits_fake) + ) + + self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake) + + #Generator cost + self.g_cost = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=(1-epsilon)*tf.ones_like(logits_fake) + ) + ) + # #Discriminator cost + # self.d_cost_real = tf.reduce_mean(-tf.log(predicted_real + epsilon)) + # self.d_cost_fake = tf.reduce_mean(-tf.log(1 + epsilon - predicted_fake)) + # self.d_cost = self.d_cost_real+self.d_cost_fake + + # #Generator cost + # self.g_cost = tf.reduce_mean(-tf.log(predicted_fake + epsilon)) + + if cost_type == 'WGAN-gp': + + self.d_cost_real= -tf.reduce_mean(logits_real) + self.d_cost_fake = tf.reduce_mean(logits_fake) + + self.d_cost= self.d_cost_real+self.d_cost_fake + + epsilon= tf.random_uniform( + [self.batch_sz, 1, 1, 1], + minval=0., + maxval=1., + ) + + interpolated = epsilon*self.X+(1-epsilon)*sample_images + + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + disc_interpolated = D.d_forward(interpolated,reuse = True) + + gradients = tf.gradients(disc_interpolated,[interpolated])[0] + slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1])) + gradient_penalty = tf.reduce_mean((slopes-1.)**2) + self.d_cost+=10*gradient_penalty + + self.g_cost= -self.d_cost_fake + + if cost_type == 'FEATURE': + + self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_real, + labels=(1-epsilon)*tf.ones_like(logits_real) + ) + + self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits_fake, + labels=epsilon+tf.zeros_like(logits_fake) + ) + + self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake) + + # #Discriminator cost + # self.d_cost_real = tf.reduce_mean(-tf.log(predicted_real + epsilon)) + # self.d_cost_fake = tf.reduce_mean(-tf.log(1 + epsilon - predicted_fake)) + # self.d_cost = self.d_cost_real+self.d_cost_fake + #GENERATOR COSTS + self.g_cost=tf.sqrt(tf.reduce_mean(tf.pow(feature_output_real-feature_output_fake,2))) + + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.d_cost, + var_list=self.d_params + ) + + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize( + self.g_cost, + var_list=self.g_params + ) + #Measure accuracy of the discriminator + + real_predictions = tf.cast(predicted_real>0.5,tf.float32) + fake_predictions = tf.cast(predicted_fake<0.5,tf.float32) + + num_predictions=2.0*batch_size + num_correct = tf.reduce_sum(real_predictions)+tf.reduce_sum(fake_predictions) + + self.d_accuracy = num_correct/num_predictions + + self.cost_type=cost_type + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.D=D + self.G=G + + self.lr=lr + + def set_session(self, session): + + self.session = session + + for layer in self.D.d_conv_layers: + layer.set_session(session) + + # for layer in self.D.d_dense_layers: + # layer.set_session(session) + + for layer in self.G.g_conv_layers: + layer.set_session(session) + + for layer in self.G.g_dense_layers: + layer.set_session(session) + + def fit(self, X, validating_size): + + _all = X + + gen_steps=self.gen_steps + discr_steps=self.discr_steps + + m = _all.shape[0] + train = _all[0:m-validating_size] + + validating = _all[m-validating_size:m] + + seed=self.seed + d_costs = [] + g_costs = [] + + N = len(train) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training improved DCGAN with a total of ' +str(N)+' samples distributed in batches of size '+str(self.batch_size)+'\n') + print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' epoch a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed +=1 + + print('Epoch:', epoch) + + batches = unsupervised_random_mini_batches(train, self.batch_size, seed) + + for X_batch in batches: + bs = X_batch.shape[0] + t0 = datetime.now() + + np.random.seed(seed) + + d_cost=0 + + for i in range(discr_steps): + + Z = np.random.uniform(-1,1, size= (bs, self.latent_dims)) + + _, d_cost, d_acc = self.session.run( + (self.d_train_op, self.d_cost, self.d_accuracy), + feed_dict={self.X: X_batch, self.Z:Z, self.batch_sz: bs}, + ) + d_cost+=d_cost + + + d_costs.append(d_cost/discr_steps) + + #train the generator averaging two costs if the + #discriminator learns too fast + g_cost=0 + for i in range(gen_steps): + _, g_cost = self.session.run( + (self.g_train_op, self.g_cost), + feed_dict={self.X: X_batch, self.Z:Z, self.batch_sz:bs}, + ) + g_cost+=g_cost + + g_costs.append(g_cost/gen_steps) + + total_iters += 1 + if total_iters % self.save_sample ==0: + print("At iter: %d - dt: %s - d_acc: %.4f" % (total_iters, datetime.now() - t0, d_acc)) + print("Discriminator cost {0:.4g}, Generator cost {1:.4g}".format(d_cost/discr_steps, g_cost/gen_steps)) + print('Saving a sample...') + plt.clf() + np.random.seed(seed) + Z = np.random.uniform(-1,1, size=(16,self.latent_dims)) + + samples = self.sample(Z)#shape is (64,D,D,color) + #samples = denormalise(samples, self.min_reco, self.max_reco) + + w = self.n_W + h = self.n_H + samples = np.sum(samples,axis=3) + samples = samples.reshape(16, h, w) + + for i in range(16): + plt.subplot(4,4,i+1) + plt.imshow(samples[i].reshape(h,w)) + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.axis('off') + + fig = plt.gcf() + fig.set_size_inches(7,7) + plt.savefig(self.path+'/samples_at_iter_%d.png' % total_iters,dpi=300) + + plt.clf() + plt.plot(d_costs, label='Discriminator cost') + plt.plot(g_costs, label='Generator cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(10,5) + plt.savefig(self.path+'/cost vs iteration.png',dpi=300) + + def sample(self, Z): + + many_samples = self.session.run( + self.sample_images_test, + feed_dict={self.Z:Z, self.batch_sz: self.batch_size}) + + return many_samples + + def get_sample(self, Z): + + one_sample = self.session.run( + self.sample_images_test, + feed_dict={self.Z:Z, self.batch_sz: 1}) + + return one_sample \ No newline at end of file diff --git a/architectures/pix2pix.py b/architectures/pix2pix.py new file mode 100644 index 0000000..1999aab --- /dev/null +++ b/architectures/pix2pix.py @@ -0,0 +1,446 @@ +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * +from architectures.utils.toolbox import * + + +#some hyperparameters of the network +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +preprocess=None +LAMBDA=.01 +EPS=1e-10 +CYCL_WEIGHT=None +GAN_WEIGHT=None +DISCR_STEPS=None +GEN_STEPS=None + +min_true=None +max_true=None + +min_reco=None +max_reco=None + +n_H_A=None +n_W_A=None +n_W_B=None +n_H_B=None +n_C=None + +d_sizes=None +g_sizes_enc=None +g_sizes_dec=None + + + +class pix2pix(object): + + def __init__( + + self, + n_H_A=n_H_A, n_W_A=n_W_A, + n_H_B=n_H_B, n_W_B=n_W_B, n_C=n_C, + min_true=min_true, max_true=max_true, + min_reco=min_reco, max_reco=max_reco, + d_sizes=d_sizes, g_sizes_enc=g_sizes_enc, g_sizes_dec=g_sizes_dec, + lr=LEARNING_RATE, beta1=BETA1, preprocess=preprocess, + cost_type=COST_TYPE, gan_weight=GAN_WEIGHT, cycl_weight=CYCL_WEIGHT, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + + self.max_reco=max_reco + self.min_reco = min_reco + + self.max_true=max_true + self.min_true=min_true + + self.seed=seed + + self.n_W_A = n_W_A + self.n_H_A = n_H_A + + self.n_W_B = n_W_B + self.n_H_B = n_H_B + + self.n_C = n_C + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + + self.input_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + + self.input_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + self.input_test_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_test_A' + ) + + D = pix2pixDiscriminator(self.input_A, d_sizes, 'B') + G = pix2pixGenerator(self.input_A, n_H_B, n_W_B, g_sizes_enc, g_sizes_dec, 'A_to_B') + + with tf.variable_scope('generator_A_to_B') as scope: + + sample_images = G.g_forward(self.input_A) + + with tf.variable_scope('discriminator_B') as scope: + + predicted_real = D.d_forward(self.input_A, self.input_B) + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + predicted_fake = D.d_forward(self.input_A, sample_images, reuse=True) + + #get sample images at test time + with tf.variable_scope('generator_A_to_B') as scope: + scope.reuse_variables() + self.sample_images_test_A_to_B = G.g_forward( + self.input_test_A, reuse=True, is_training=False + ) + + self.d_params = [t for t in tf.trainable_variables() if 'discriminator' in t.name] + self.g_params = [t for t in tf.trainable_variables() if 'generator' in t.name] + + + if cost_type == 'GAN': + + #Discriminator cost + + predicted_real= tf.nn.sigmoid(predicted_real) + predicted_fake=tf.nn.sigmoid(predicted_fake) + + d_cost_real = tf.log(predicted_real + EPS) + + #d_cost_fake is low if fake images are predicted as real + d_cost_fake = tf.log(1 - predicted_fake +EPS) + + self.d_cost = tf.reduce_mean(-(d_cost_real + d_cost_fake)) + + + # #Discriminator cost + # #d_cost_real is low if real images are predicted as real + # d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits( + # logits = predicted_real, + # labels = tf.ones_like(predicted_real)-0.01 + # ) + # #d_cost_fake is low if fake images are predicted as real + # d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits( + # logits = predicted_fake, + # labels = tf.zeros_like(predicted_fake)+0.01 + # ) + + # self.d_cost = tf.reduce_mean(d_cost_real)+ tf.reduce_mean(d_cost_fake) + + #Generator cost + #g_cost is low if logits from discriminator on samples generated by generator are predicted as true (1) + self.g_cost_GAN = tf.reduce_mean(-tf.log(predicted_fake + EPS)) + + # self.g_cost_GAN = tf.reduce_mean( + # tf.nn.sigmoid_cross_entropy_with_logits( + # logits=predicted_fake, + # labels=tf.ones_like(predicted_fake)-0.01 + # ) + # ) + self.g_cost_l1 = tf.reduce_mean(tf.square(self.input_B - sample_images)) + #self.g_cost_sum = tf.abs(tf.reduce_sum(self.input_B)-tf.reduce_sum(sample_images)) + self.g_cost=gan_weight*self.g_cost_GAN + cycl_weight*self.g_cost_l1 + + if cost_type == 'WGAN-gp': + + + self.g_cost_GAN = -tf.reduce_mean(predicted_fake) + + self.g_cost_l1 = tf.reduce_mean(tf.abs(self.input_B - sample_images)) + self.g_cost=gan_weight*self.g_cost_GAN+cycl_weight*self.g_cost_l1 + + + self.d_cost = tf.reduce_mean(predicted_fake) - tf.reduce_mean(predicted_real) + + alpha = tf.random_uniform( + shape=[self.batch_sz,self.n_H_A,self.n_W_A,self.n_C], + minval=0., + maxval=1. + ) + + # interpolates_1 = alpha*self.input_A+(1-alpha)*sample_images + interpolates = alpha*self.input_B+(1-alpha)*sample_images + + with tf.variable_scope('discriminator_B') as scope: + scope.reuse_variables() + disc_interpolates = D.d_forward(self.input_A, interpolates,reuse = True) + + gradients = tf.gradients(disc_interpolates,[interpolates])[0] + slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1])) + self.gradient_penalty = tf.reduce_mean((slopes-1)**2) + self.d_cost+=LAMBDA*self.gradient_penalty + + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9 + ).minimize( + self.d_cost, + var_list=self.d_params + ) + + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9 + ).minimize( + self.g_cost, + var_list=self.g_params + ) + + #saving for later + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr = lr + + self.D=D + self.G=G + + self.sample_images=sample_images + self.preprocess=preprocess + self.cost_type=cost_type + self.cycl_weight=cycl_weight + + self.gen_steps=gen_steps + self.discr_steps=discr_steps + + def set_session(self,session): + + self.session = session + + for layer in self.D.d_conv_layers: + layer.set_session(session) + + for layer in self.G.g_enc_conv_layers: + layer.set_session(session) + + for layer in self.G.g_dec_conv_layers: + layer.set_session(session) + + def fit(self, X_A, X_B, validating_size): + + all_A = X_A + all_B = X_B + gen_steps = self.gen_steps + discr_steps = self.discr_steps + + m = X_A.shape[0] + train_A = all_A[0:m-validating_size] + train_B = all_B[0:m-validating_size] + + validating_A = all_A[m-validating_size:m] + validating_B = all_B[m-validating_size:m] + + seed=self.seed + + d_costs=[] + d_gps=[] + g_costs=[] + g_GANs=[] + g_l1s=[] + N=len(train_A) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training pix2pix (from 1611.07004) GAN with a total of ' +str(N)+' samples distributed in '+ str((N)//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + print('The validation set consists of {0} images'.format(validating_A.shape[0])) + print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed+=1 + print('Epoch:', epoch) + + batches_A = unsupervised_random_mini_batches(train_A, self.batch_size, seed) + batches_B = unsupervised_random_mini_batches(train_B, self.batch_size, seed) + + for X_batch_A, X_batch_B in zip(batches_A, batches_B): + + bs=X_batch_A.shape[0] + + t0 = datetime.now() + + g_cost=0 + g_GAN=0 + g_l1=0 + for i in range(gen_steps): + + _, g_cost, g_GAN, g_l1 = self.session.run( + (self.g_train_op, self.g_cost, self.g_cost_GAN, self.g_cost_l1), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + g_cost+=g_cost + g_GAN+=g_GAN + g_l1+=g_l1 + + g_costs.append(g_cost/gen_steps) + g_GANs.append(g_GAN/gen_steps) + g_l1s.append(self.cycl_weight*g_l1/gen_steps) + + d_cost=0 + d_gp=0 + for i in range(discr_steps): + + if self.cost_type=='WGAN-gp': + _, d_cost, d_gp = self.session.run( + (self.d_train_op, self.d_cost, self.gradient_penalty), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_gp+=d_gp + + + else: + _, d_cost = self.session.run( + (self.d_train_op, self.d_cost), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_cost+=d_cost + + + + d_costs.append(d_cost/discr_steps) + if self.cost_type=='WGAN-gp': + d_gps.append(LAMBDA*d_gp/discr_steps) + + total_iters+=1 + if total_iters % self.save_sample ==0: + + plt.clf() + print("At iter: %d - dt: %s" % (total_iters, datetime.now() - t0)) + print("Discriminator cost {0:.4g}, Generator cost {1:.4g}".format(d_costs[-1], g_costs[-1])) + print('Saving a sample...') + + if self.preprocess!=False: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + self.min_true, self.max_true, + self.min_reco, self.max_reco, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + else: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_costs, label='Discriminator GAN cost') + plt.plot(g_GANs, label='Generator GAN cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(1,2,2) + plt.plot(g_costs, label='Generator total cost') + plt.plot(g_GANs, label='Generator GAN cost') + plt.plot(g_l1s, label='Generator l1 cycle cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration_gen_disc_B_to_A.png',dpi=150) + + + def get_sample_A_to_B(self, Z): + + one_sample = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: 1}) + + return one_sample + + def get_samples_A_to_B(self, Z): + + many_samples = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: Z.shape[0]}) + + return many_samples \ No newline at end of file diff --git a/architectures/pix2pix_cycleDisc.py b/architectures/pix2pix_cycleDisc.py new file mode 100644 index 0000000..43295bf --- /dev/null +++ b/architectures/pix2pix_cycleDisc.py @@ -0,0 +1,418 @@ +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * +from architectures.utils.toolbox import * + + +# some dummy constants +LEARNING_RATE = None +BETA1 = None +COST_TYPE=None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 +preprocess=None +LAMBDA=.01 +EPS=1e-10 +CYCL_WEIGHT=None +GAN_WEIGHT=None +DISCR_STEPS=None +GEN_STEPS=None +max_true=None +max_reco=None + +n_H_A=None +n_W_A=None +n_W_B=None +n_H_B=None +n_C=None + +d_sizes=None +g_sizes_enc=None +g_sizes_dec=None + + +class pix2pix_cycleDisc(object): + + def __init__( + + self, + n_H_A=n_H_A, n_W_A=n_W_A, + n_H_B=n_H_B, n_W_B=n_W_B, n_C=n_C, + max_true=max_true, max_reco=max_reco, + d_sizes=d_sizes, g_sizes_enc=g_sizes_enc, g_sizes_dec=g_sizes_dec, + lr=LEARNING_RATE, beta1=BETA1, preprocess=preprocess, + cost_type=COST_TYPE, gan_weight=GAN_WEIGHT, cycl_weight=CYCL_WEIGHT, + discr_steps=DISCR_STEPS, gen_steps=GEN_STEPS, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED, + + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + self.max_true = max_true + self.max_reco = max_reco + + self.seed=seed + + self.n_W_A = n_W_A + self.n_H_A = n_H_A + + self.n_W_B = n_W_B + self.n_H_B = n_H_B + self.n_C = n_C + + self.input_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_A', + ) + + self.input_B = tf.placeholder( + tf.float32, + shape=(None, + n_H_B, n_W_B, n_C), + name='X_B', + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + self.lr = tf.placeholder( + tf.float32, + shape=(), + name='lr' + ) + + D = Discriminator(self.input_A, d_sizes, 'B') + G = pix2pixGenerator(self.input_A, self.n_H_B, self.n_W_B, g_sizes_enc, g_sizes_dec, 'A_to_B') + + with tf.variable_scope('discriminator') as scope: + + logits = D.d_forward(self.input_B) + + with tf.variable_scope('generator') as scope: + + sample_images = G.g_forward(self.input_A) + + with tf.variable_scope('discriminator') as scope: + scope.reuse_variables() + sample_logits = D.d_forward(sample_images, reuse=True) + + self.input_test_A = tf.placeholder( + tf.float32, + shape=(None, + n_H_A, n_W_A, n_C), + name='X_test_A' + ) + #get sample images at test time + with tf.variable_scope('generator') as scope: + scope.reuse_variables() + self.sample_images_test_A_to_B = G.g_forward( + self.input_test_A, reuse=True, is_training=False + ) + + self.d_params = [t for t in tf.trainable_variables() if 'discriminator' in t.name] + self.g_params = [t for t in tf.trainable_variables() if 'generator' in t.name] + + if cost_type == 'GAN': + + #Discriminator cost + #d_cost_real is low if real images are predicted as real + d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits( + logits = logits, + labels = tf.ones_like(logits) + ) + #d_cost_fake is low if fake images are predicted as real + d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits( + logits = sample_logits, + labels = tf.zeros_like(logits) + ) + + self.d_cost = tf.reduce_mean(d_cost_real)+ tf.reduce_mean(d_cost_fake) + + #Generator cost + #g_cost is low if logits from discriminator on samples generated by generator + #are predicted as true (1) + self.g_cost_GAN = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits, + labels=tf.ones_like(sample_logits) + ) + ) + self.g_cost_l1 = tf.reduce_mean(tf.abs(self.input_B - sample_images)) + + self.g_cost=gan_weight*self.g_cost_GAN+cycl_weight*self.g_cost_l1 + + if cost_type == 'WGAN-gp': + self.d_cost = tf.reduce_mean(sample_logits) - tf.reduce_mean(logits) + g_cost_GAN = -tf.reduce_mean(sample_logits) + + g_cost_l1 = tf.reduce_mean(tf.abs(self.input_B - sample_images)) + + self.g_cost=gan_weight*g_cost_GAN+cycl_weight*g_cost_l1 + + alpha = tf.random_uniform( + shape=[self.batch_sz,self.n_H_A,self.n_W_A,self.n_C], + minval=0., + maxval=1. + ) + + interpolated = alpha*self.input_B+(1-alpha)*sample_images + + with tf.variable_scope('discriminator') as scope: + scope.reuse_variables() + disc_interpolates = D.d_forward(interpolated,reuse = True) + + gradients = tf.gradients(disc_interpolates,[interpolated])[0] + slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1, 2, 3])) + gradient_penalty = tf.reduce_mean(tf.square(slopes-1)) + self.d_cost+=LAMBDA*gradient_penalty + + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9 + ).minimize( + self.d_cost, + var_list=self.d_params + ) + + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + beta2=0.9 + ).minimize( + self.g_cost, + var_list=self.g_params + ) + + #saving for later + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr = lr + + self.D=D + self.G=G + + self.sample_images=sample_images + self.preprocess=preprocess + self.cost_type=cost_type + self.cycl_weight=cycl_weight + + self.gen_steps=gen_steps + self.discr_steps=discr_steps + + def set_session(self,session): + + self.session = session + + for layer in self.D.d_conv_layers: + layer.set_session(session) + + for layer in self.G.g_enc_conv_layers: + layer.set_session(session) + + for layer in self.G.g_dec_conv_layers: + layer.set_session(session) + + def fit(self, X_A, X_B, validating_size): + + all_A = X_A + all_B = X_B + gen_steps = self.gen_steps + discr_steps = self.discr_steps + + m = X_A.shape[0] + train_A = all_A[0:m-validating_size] + train_B = all_B[0:m-validating_size] + + validating_A = all_A[m-validating_size:m] + validating_B = all_B[m-validating_size:m] + + seed=self.seed + + d_costs=[] + d_gps=[] + g_costs=[] + g_GANs=[] + g_l1s=[] + N=len(train_A) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training pix2pix (from 1611.07004) GAN with a total of ' +str(N)+' samples distributed in '+ str((N)//self.batch_size) +' batches of size '+str(self.batch_size)+'\n') + print('The validation set consists of {0} images'.format(validating_A.shape[0])) + print('The learning rate is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' batches a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed+=1 + print('Epoch:', epoch) + + batches_A = unsupervised_random_mini_batches(train_A, self.batch_size, seed) + batches_B = unsupervised_random_mini_batches(train_B, self.batch_size, seed) + + for X_batch_A, X_batch_B in zip(batches_A, batches_B): + + bs=X_batch_A.shape[0] + + t0 = datetime.now() + + g_cost=0 + g_GAN=0 + g_l1=0 + for i in range(gen_steps): + + _, g_cost, g_GAN, g_l1 = self.session.run( + (self.g_train_op, self.g_cost, self.g_cost_GAN, self.g_cost_l1), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + g_cost+=g_cost + g_GAN+=g_GAN + g_l1+=g_l1 + + g_costs.append(g_cost/gen_steps) + g_GANs.append(g_GAN/gen_steps) + g_l1s.append(self.cycl_weight*g_l1/gen_steps) + + d_cost=0 + d_gp=0 + for i in range(discr_steps): + + if self.cost_type=='WGAN-gp': + _, d_cost, d_gp = self.session.run( + (self.d_train_op, self.d_cost, self.gradient_penalty), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_gp+=d_gp + + + else: + _, d_cost = self.session.run( + (self.d_train_op, self.d_cost), + feed_dict={self.input_A:X_batch_A, self.input_B:X_batch_B, self.batch_sz:bs}, + ) + + d_cost+=d_cost + + + + d_costs.append(d_cost/discr_steps) + if self.cost_type=='WGAN-gp': + d_gps.append(LAMBDA*d_gp/discr_steps) + + total_iters+=1 + if total_iters % self.save_sample ==0: + + print("At iter: %d - dt: %s" % (total_iters, datetime.now() - t0)) + print("Discriminator cost {0:.4g}, Generator cost {1:.4g}".format(d_costs[-1], g_costs[-1])) + print('Saving a sample...') + + if self.preprocess!=False: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + self.max_true, self.max_reco, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + else: + draw_nn_sample(validating_A, validating_B, 1, self.preprocess, + f=self.get_sample_A_to_B, is_training=True, + total_iters=total_iters, PATH=self.path) + plt.clf() + plt.subplot(1,2,1) + plt.plot(d_costs, label='Discriminator GAN cost') + plt.plot(g_GANs, label='Generator GAN cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + plt.subplot(1,2,2) + plt.plot(g_costs, label='Generator total cost') + plt.plot(g_GANs, label='Generator GAN cost') + plt.plot(g_l1s, label='Generator l1 cycle cost') + plt.xlabel('Epoch') + plt.ylabel('Cost') + plt.legend() + + fig = plt.gcf() + fig.set_size_inches(15,5) + plt.savefig(self.path+'/cost_iteration_gen_disc_B_to_A.png',dpi=150) + + + def get_sample_A_to_B(self, Z): + + one_sample = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: 1}) + + return one_sample + + def get_samples_A_to_B(self, Z): + + many_samples = self.session.run( + self.sample_images_test_A_to_B, + feed_dict={self.input_test_A:Z, self.batch_sz: Z.shape[0]}) + + return many_samples \ No newline at end of file diff --git a/architectures/resCNN.py b/architectures/resCNN.py new file mode 100644 index 0000000..916a1ba --- /dev/null +++ b/architectures/resCNN.py @@ -0,0 +1,439 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * + +def lrelu(x, alpha=0.1): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 + +#tested on mnist +class resCNN(object): + + """ + Builds residual convolutional neural network. Regularization implemented + with dropout, no regularization parameter implemented yet. Minimization through + AdamOptimizer (adaptive learning rate). Supports convolution, max_pooling and avg_pooling + + Constructor inputs: + + -Positional arguments: + - dims of input image: (n_W (rows))*(n_H (colums))*(n_C (input channels)) + + - sizes: (dict) python dictionary containing the size of the + convolutional blocks and the number of classes for classification + + sizes = {'convblock_layer_n' :[(mo, filter_sz, stride, apply_batch_norm, keep_probability, act_f, w_init), + (), + (),] + 'maxpool_layer_n':[(filter_sz, stride, keep_prob)] + 'avgpool_layer_n':[(filter_sz, stride, keep_prob)] + + 'n_classes': n_classes + + } + + convolution blocks and pooling layers can be in any order, the last key has to be 'n_classes' + + mo: (int) number of output channels after convolution + filter_sz: (int) size of the kernel + stride: (int) stride displacement + apply_batch_norm: (bool) apply batch norm at layer n + keep_probability: (float32) probability of activation of output + act_f: (function) activation function for layer n + w_init: (tf initializer) random initializer for weights at layer n + n_classes: number of classes + + -Keyword arguments + + -lr: (float32) learning rate arg for the AdamOptimizer + -beta1: (float32) beta1 arg for the AdamOptimizer + -batch_size: (int) size of each batch + -epochs: (int) number of times the training has to be repeated over all the batches + -save_sample: (int) after how many iterations of the training algorithm performs the evaluations in fit function + -path: (str) path for saving the session checkpoint + + Class attributes: + + - X: (tf placeholder) input tensor of shape (batch_size, input features) + - Y: (tf placeholder) label tensor of shape (batch_size, n_classes) (one_hot encoding) + - Y_hat: (tf tensor) shape=(batch_size, n_classes) predicted class (one_hot) + - loss: (tf scalar) reduced mean of cost computed with softmax cross entropy with logits + - train_op: gradient descent algorithm with AdamOptimizer + + """ + def __init__( + + self, n_H, n_W, n_C, sizes, + lr=LEARNING_RATE, beta1=BETA1, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED + ): + + self.n_classes = sizes['n_classes'] + self.n_H = n_H + self.n_W = n_W + self.n_C = n_C + + self.conv_sizes = sizes + self.seed = seed + + self.keep_prob = tf.placeholder( + tf.float32 + ) + + self.X = tf.placeholder( + tf.float32, + shape=(None, n_H, n_W, n_C), + name = 'X' + ) + + self.X_input = tf.placeholder( + tf.float32, + shape=(None, n_H, n_W, n_C), + name = 'X' + ) + + self.batch_sz=tf.placeholder( + tf.int32, + shape=(), + name='batch_sz', + ) + + self.Y = tf.placeholder( + tf.float32, + shape=(None, self.n_classes), + name='Y' + ) + + self.Y_hat = self.build_resCNN(self.X, self.conv_sizes) + + cost = tf.nn.softmax_cross_entropy_with_logits( + logits= self.Y_hat , + labels= self.Y + ) + + self.loss = tf.reduce_mean(cost) + + self.train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1 + ).minimize(self.loss + ) + + #convolve from input + with tf.variable_scope('convolutional') as scope: + scope.reuse_variables() + self.Y_hat_from_test = self.convolve( + self.X_input, reuse=True, is_training=False, + ) + + self.accuracy = evaluation(self.Y_hat_from_test, self.Y) + + #saving for later + self.lr = lr + self.batch_size=batch_size + self.epochs = epochs + self.path = path + self.save_sample = save_sample + + def build_resCNN(self, X, conv_sizes): + + with tf.variable_scope('convolutional') as scope: + + #dimensions of input + mi = self.n_C + + dim_W = self.n_W + dim_H = self.n_H + + + for key in conv_sizes: + if 'block' in key: + print('Residual Network architecture detected') + break + + self.conv_blocks = [] + #count conv blocks + + steps=0 + for key in conv_sizes: + if 'conv' in key: + if not 'shortcut' in key: + steps+=1 + if 'pool' in key: + steps+=1 + + + #build convblocks + block_n=0 + layer_n=0 + pool_n=0 + + for key in conv_sizes: + + if 'block' and 'shortcut' in key: + + conv_block = ConvBlock(block_n, + mi, conv_sizes, + ) + self.conv_blocks.append(conv_block) + + mo, _, _, _, _, _, _, = conv_sizes['convblock_layer_'+str(block_n)][-1] + mi = mo + dim_H = conv_block.output_dim(dim_H) + dim_W = conv_block.output_dim(dim_W) + block_n+=1 + + if 'conv_layer' in key: + + name = 'conv_layer_{0}'.format(layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = conv_sizes[key][0] + + + conv_layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.conv_blocks.append(conv_layer) + + mi = mo + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + layer_n+=1 + + if 'pool' in key: + pool_n+=1 + + if 'max' in key: + + filter_sz, stride, keep_prob =conv_sizes[key][0] + + maxpool_layer = MaxPool2D(filter_sz, stride, keep_prob) + + self.conv_blocks.append(maxpool_layer) + + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + + if 'avg' in key: + + filter_sz, stride, keep_prob =conv_sizes[key][0] + + avgpool_layer = AvgPool2D(filter_sz, stride, keep_prob) + + self.conv_blocks.append(avgpool_layer) + + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + + assert steps == pool_n + block_n + layer_n, 'Check conv_sizes keys' + count = steps + mi = mi * dim_W * dim_H + self.dense_layers = [] + + for mo, apply_batch_norm, keep_prob, act_f, w_init in conv_sizes['dense_layers']: + + name = 'dense_layer_{0}'.format(count) + count += 1 + + layer = DenseLayer(name,mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.dense_layers.append(layer) + + readout_w_init = conv_sizes['readout_w_init'] + + readout_layer = DenseLayer('readout_layer', + mi, self.n_classes, + False, 1, tf.nn.softmax, + readout_w_init) + + self.dense_layers.append(readout_layer) + + + return self.convolve(X) + + def convolve(self, X, reuse = None, is_training=True): + + print('Convolution') + print('Input for convolution shape ', X.get_shape()) + + output = X + + i=0 + for block in self.conv_blocks: + i+=1 + print('Convolution_block_%i' %i) + print('Input shape', output.get_shape()) + output = block.forward(output, + reuse, + is_training) + + + output = tf.contrib.layers.flatten(output) + # print('After flatten shape', output.get_shape()) + + i=0 + for layer in self.dense_layers: + i+=1 + # print('Dense weights %i' %i) + output = layer.forward(output, + reuse, + is_training) + + # print('After dense layer_%i' %i) + # print('Shape', output.get_shape()) + + print('Logits shape', output.get_shape()) + return output + + def set_session(self, session): + + self.session=session + + for layer in self.conv_blocks: + layer.set_session(session) + + def fit(self, X_train, Y_train, X_test, Y_test): + + """ + + Function is called if the flag is_training is set on TRAIN. If a model already is present + continues training the one already present, otherwise initialises all params from scratch. + + Performs the training over all the epochs, at when the number of epochs of training + is a multiple of save_sample prints out training cost, train and test accuracies + + Plots a plot of the cost versus epoch. + + Positional arguments: + + - X_train: (ndarray) size=(train set size, input features) training sample set + - X_test: (ndarray) size=(test set size, input features) test sample set + + - Y_train: (ndarray) size=(train set size, input features) training labels set + - Y_test: (ndarray) size=(test set size, input features) test labels set + + """ + + seed = self.seed + + N = X_train.shape[0] + test_size = X_test.shape[0] + + n_batches = N // self.batch_size + + print('\n ****** \n') + print('Training residual CNN for '+str(self.epochs)+' epochs with a total of ' +str(N)+ ' samples\ndistributed in ' +str(n_batches)+ ' batches of size '+str(self.batch_size)+'\n') + print('The learning rate set is '+str(self.lr)) + print('\n ****** \n') + + costs = [] + for epoch in range(self.epochs): + + seed += 1 + + train_batches = supervised_random_mini_batches(X_train, Y_train, self.batch_size, seed) + test_batches = supervised_random_mini_batches(X_test, Y_test, self.batch_size, seed) + train_acc = 0 + test_acc =0 + train_accuracies=[] + test_accuracies=[] + + for train_batch in train_batches: + + (X_train, Y_train) = train_batch + + feed_dict = { + + self.X: X_train, + self.Y: Y_train, + self.batch_sz: self.batch_size, + + } + + _, c = self.session.run( + (self.train_op, self.loss), + feed_dict=feed_dict + ) + + train_acc = self.session.run( + self.accuracy, feed_dict={self.X_input:X_train, self.Y:Y_train} + + ) + + + c /= self.batch_size + + train_accuracies.append(train_acc) + costs.append(c) + + train_acc = np.array(train_accuracies).mean() + #model evaluation + if epoch % self.save_sample ==0: + + + for test_batch in test_batches: + + (X_test_batch, Y_test_batch) = test_batch + #print(X_test_batch.sum(),Y_test_batch.sum()) + feed_dict={ + self.X_input: X_test_batch, + self.Y: Y_test_batch, + + } + + test_acc = self.session.run( + self.accuracy, + feed_dict=feed_dict + + ) + + test_accuracies.append(test_acc) + + test_acc = np.array(test_accuracies).mean() + print('Evaluating performance on train/test sets') + print('At epoch {0}, train cost: {1:.4g}, train accuracy {2:.4g}'.format(epoch, c, train_acc)) + print('test accuracy {0:.4g}'.format(test_acc)) + + + plt.plot(costs) + plt.ylabel('cost') + plt.xlabel('iteration') + plt.title('learning rate=' + str(self.lr)) + plt.show() + + print('Parameters trained') + + #get samples at test time + + def predict(self, X): + pred = tf.nn.softmax(self.Y_hat_from_test) + output = self.session.run( + pred, + feed_dict={self.X_input:X} + ) + return output \ No newline at end of file diff --git a/architectures/resDCGAN.py b/architectures/resDCGAN.py new file mode 100644 index 0000000..42d4791 --- /dev/null +++ b/architectures/resDCGAN.py @@ -0,0 +1,316 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.1): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 + + +#tested on mnist (use greater lr on generator than on discriminator) +class resDCGAN(object): + + def __init__( + self, + n_H, n_W, n_C, + d_sizes,g_sizes, + lr_g=LEARNING_RATE_G, lr_d=LEARNING_RATE_D, beta1=BETA1, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed=SEED + ): + + """ + + Positional arguments: + + - width of (square) image + - number of channels of input image + - discriminator sizes + + a python dict of the kind + d_sizes = { 'convblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'convblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + } + - generator sizes + + a python dictionary of the kind + + g_sizes = { + 'z':latent_space_dim, + 'projection': int, + 'bn_after_project':bool + + 'deconvblocklayer_n':[(n_c+1, kernel, stride, apply_batch_norm, weight initializer), + (,,,,), + (,,,,), + ], + 'deconvblock_shortcut_layer_n':[(,,,)], + 'dense_layers':[(n_o, apply_bn, weight_init)] + 'activation':function + } + + Keyword arguments: + + - lr = LEARNING_RATE (float32) + - beta1 = ema parameter for adam opt (float32) + - batch_size (int) + - save_sample = after how many batches iterations to save a sample (int) + - path = relative path for saving samples + + """ + + self.seed = seed + self.n_W = n_W + self.n_H = n_H + self.n_C = n_C + + self.latent_dims = g_sizes['z'] + + #input data + + self.X = tf.placeholder( + tf.float32, + shape=(None, + n_H, n_W, n_C), + name='X', + ) + + self.Z = tf.placeholder( + tf.float32, + shape=(None, + self.latent_dims), + name='Z' + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + + D = resDiscriminator(self.X, d_sizes, 'A') + + with tf.variable_scope('discriminator_A') as scope: + + logits = D.d_forward(self.X) + + G = resGenerator(self.Z, self.n_H, self.n_W, g_sizes, 'A') + + with tf.variable_scope('generator_A') as scope: + + self.sample_images = G.g_forward(self.Z) + + # get sample logits + with tf.variable_scope('discriminator_A') as scope: + scope.reuse_variables() + sample_logits = D.d_forward(self.sample_images, reuse=True) + + # get sample images for test time + with tf.variable_scope('generator_A') as scope: + scope.reuse_variables() + self.sample_images_test = G.g_forward( + self.Z, reuse=True, is_training=False + ) + + #cost building + + #Discriminator cost + self.d_cost_real = tf.nn.sigmoid_cross_entropy_with_logits( + logits=logits, + labels=tf.ones_like(logits) + ) + + self.d_cost_fake = tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits, + labels=tf.zeros_like(sample_logits) + ) + + self.d_cost = tf.reduce_mean(self.d_cost_real) + tf.reduce_mean(self.d_cost_fake) + + #Generator cost + self.g_cost = tf.reduce_mean( + tf.nn.sigmoid_cross_entropy_with_logits( + logits=sample_logits, + labels=tf.ones_like(sample_logits) + ) + ) + + #Measure accuracy of the discriminator + + real_predictions = tf.cast(logits>0,tf.float32) + fake_predictions = tf.cast(sample_logits<0,tf.float32) + + num_predictions=2.0*batch_size + num_correct = tf.reduce_sum(real_predictions)+tf.reduce_sum(fake_predictions) + + self.d_accuracy = num_correct/num_predictions + + #optimizers + self.d_params =[t for t in tf.trainable_variables() if t.name.startswith('d')] + self.g_params =[t for t in tf.trainable_variables() if t.name.startswith('g')] + + self.d_train_op = tf.train.AdamOptimizer( + learning_rate=lr_d, + beta1=beta1, + ).minimize( + self.d_cost, + var_list=self.d_params + ) + + self.g_train_op = tf.train.AdamOptimizer( + learning_rate=lr_g, + beta1=beta1, + ).minimize( + self.g_cost, + var_list=self.g_params + ) + self.batch_size=batch_size + self.epochs=epochs + self.save_sample=save_sample + self.path=path + self.lr_g = lr_g + self.lr_d = lr_d + self.D=D + self.G=G + + def set_session(self, session): + + self.session = session + + for block in self.D.d_blocks: + block.set_session(session) + + for layer in self.D.d_dense_layers: + layer.set_session(session) + + for block in self.G.g_blocks: + block.set_session(session) + + for layer in self.G.g_dense_layers: + layer.set_session(session) + + def fit(self, X): + + seed = self.seed + d_costs = [] + g_costs = [] + + N = len(X) + n_batches = N // self.batch_size + + total_iters=0 + + print('\n ****** \n') + print('Training residual DCGAN with a total of ' +str(N)+' samples distributed in batches of size '+str(self.batch_size)+'\n') + print('The learning rate set for the generator is '+str(self.lr_g)+' while for the discriminator is '+str(self.lr_d)+', and every ' +str(self.save_sample)+ ' epoch a generated sample will be saved to '+ self.path) + print('\n ****** \n') + + for epoch in range(self.epochs): + + seed +=1 + + print('Epoch:', epoch) + + batches = unsupervised_random_mini_batches(X, self.batch_size, seed) + + for X_batch in batches: + + t0 = datetime.now() + np.random.seed(seed) + Z = np.random.uniform(-1,1, size= (self.batch_size, self.latent_dims)) + + _, d_cost, d_acc = self.session.run( + (self.d_train_op, self.d_cost, self.d_accuracy), + feed_dict={self.X: X_batch, self.Z:Z, self.batch_sz: self.batch_size}, + ) + + d_costs.append(d_cost) + + #train the generator averaging two costs if the + #discriminator learns too fast + + _, g_cost1 = self.session.run( + (self.g_train_op, self.g_cost), + feed_dict={self.Z:Z, self.batch_sz:self.batch_size}, + ) + + _, g_cost2 = self.session.run( + (self.g_train_op, self.g_cost), + feed_dict={self.Z:Z, self.batch_sz:self.batch_size}, + ) + + + g_costs.append((g_cost1+g_cost2)/2) # just use the avg + + total_iters += 1 + if total_iters % self.save_sample ==0: + print("At iter: %d - dt: %s - d_acc: %.2f" % (total_iters, datetime.now() - t0, d_acc)) + print('Saving a sample...') + + np.random.seed(seed) + Z = np.random.uniform(-1,1, size=(64,self.latent_dims)) + + samples = self.sample(Z)#shape is (64,D,D,color) + + w = self.n_W + h = self.n_H + samples = samples.reshape(64, h, w) + + + for i in range(64): + plt.subplot(8,8,i+1) + plt.imshow(samples[i].reshape(h,w), cmap='gray') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.axis('off') + + fig = plt.gcf() + fig.set_size_inches(5,7) + plt.savefig(self.path+'/samples_at_iter_%d.png' % total_iters,dpi=300) + + + + plt.clf() + plt.plot(d_costs, label='discriminator cost') + plt.plot(g_costs, label='generator cost') + plt.legend() + plt.savefig(self.path+'/cost vs iteration.png') + + def sample(self, Z): + + samples = self.session.run( + self.sample_images_test, + feed_dict={self.Z:Z, self.batch_sz: self.batch_size}) + + return samples + + def get_sample(self, Z): + + one_sample = self.session.run( + self.sample_images_test, + feed_dict={self.Z:Z, self.batch_sz: 1}) + + return one_sample \ No newline at end of file diff --git a/architectures/resDCVAE.py b/architectures/resDCVAE.py new file mode 100644 index 0000000..742c773 --- /dev/null +++ b/architectures/resDCVAE.py @@ -0,0 +1,577 @@ +#NETWORK ARCHITECTURES + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +from architectures.utils.NN_building_blocks import * +from architectures.utils.NN_gen_building_blocks import * + +def lrelu(x, alpha=0.1): + return tf.maximum(alpha*x,x) + +# some dummy constants +LEARNING_RATE = None +LEARNING_RATE_D = None +LEARNING_RATE_G = None +BETA1 = None +BATCH_SIZE = None +EPOCHS = None +SAVE_SAMPLE_PERIOD = None +PATH = None +SEED = None +rnd_seed=1 + + +#can't seem to work on mnist +class resDCVAE(object): + + def __init__(self, n_H, n_W, n_C, e_sizes, d_sizes, + lr=LEARNING_RATE, beta1=BETA1, + batch_size=BATCH_SIZE, epochs=EPOCHS, + save_sample=SAVE_SAMPLE_PERIOD, path=PATH): + + #size of every layer in the encoder + #up to the latent layer, decoder + #will have reverse shape + self.n_H = n_H + self.n_W = n_W + self.n_C = n_C + + self.e_sizes = e_sizes + self.d_sizes = d_sizes + self.latent_dims = e_sizes['z'] + + + self.X = tf.placeholder( + tf.float32, + shape=(None, n_H, n_W, n_C), + name='X' + ) + + self.batch_sz = tf.placeholder( + tf.int32, + shape=(), + name='batch_sz' + ) + + #builds the encoder and outputs a Z distribution + self.Z = self.build_encoder(self.X, self.e_sizes) + + #builds decoder from Z distribution + logits = self.build_decoder(self.Z, self.d_sizes) + + #builds X_hat distribution from decoder output + self.X_hat_distribution = Bernoulli(logits=logits) + + + #posterior predictive + + with tf.variable_scope('encoder') as scope: + scope.reuse_variables + self.Z_dist = self.encode( + self.X, reuse=True, is_training=False, + )#self.X or something on purpose? + + + with tf.variable_scope('decoder') as scope: + scope.reuse_variables() + sample_logits = self.decode( + self.Z_dist, reuse=True, is_training=False, + ) + + self.posterior_predictive_dist = Bernoulli(logits=sample_logits) + self.posterior_predictive = self.posterior_predictive_dist.sample() + self.posterior_predictive_probs = tf.nn.sigmoid(sample_logits) + + #prior predictive from prob + + standard_normal = Normal( + loc=np.zeros(self.latent_dims, dtype=np.float32), + scale=np.ones(self.latent_dims, dtype=np.float32) + ) + + Z_std = standard_normal.sample(1) + + with tf.variable_scope('decoder') as scope: + scope.reuse_variables() + logits_from_prob = self.decode( + Z_std, reuse=True, is_training=False, + ) + + prior_predictive_dist = Bernoulli(logits=logits_from_prob) + self.prior_predictive = prior_predictive_dist.sample() + self.prior_predictive_probs = tf.nn.sigmoid(logits_from_prob) + + + # prior predictive from input + + self.Z_input = tf.placeholder(tf.float32, shape=(None, self.latent_dims)) + + with tf.variable_scope('decoder') as scope: + scope.reuse_variables() + logits_from_input = self.decode( + self.Z_input, reuse=True, is_training=False, + ) + + input_predictive_dist = Bernoulli(logits=logits_from_input) + self.prior_predictive_from_input= input_predictive_dist.sample() + self.prior_predictive_from_input_probs = tf.nn.sigmoid(logits_from_input) + + + #cost + kl = tf.reduce_sum( + tf.contrib.distributions.kl_divergence( + self.Z.distribution, + standard_normal), + 1 + ) + + + expected_log_likelihood = tf.reduce_sum( + self.X_hat_distribution.log_prob(self.X), + 1 + ) + + self.loss = tf.reduce_sum(expected_log_likelihood - kl) + self.train_op = tf.train.AdamOptimizer( + learning_rate=lr, + beta1=beta1, + ).minimize(-self.loss) + + #saving for later + self.lr = lr + self.batch_size=batch_size + self.epochs = epochs + self.path = path + self.save_sample = save_sample + + def build_encoder(self, X, e_sizes): + + with tf.variable_scope('encoder') as scope: + + mi = self.n_C + dim_H = self.n_H + dim_W = self.n_W + + + for key in e_sizes: + if 'block' in key: + print('Residual Network architecture detected') + break + + self.e_blocks = [] + #count conv blocks + e_steps = 0 + for key in e_sizes: + if 'conv' in key: + if not 'shortcut' in key: + e_steps+=1 + + e_block_n=0 + e_layer_n=0 + + for key in e_sizes: + + if 'block' and 'shortcut' in key: + + e_block = ConvBlock(e_block_n, + mi, e_sizes, + ) + self.e_blocks.append(e_block) + + mo, _, _, _, _, _, _, = e_sizes['convblock_layer_'+str(e_block_n)][-1] + mi = mo + dim_H = e_block.output_dim(dim_H) + dim_W = e_block.output_dim(dim_W) + e_block_n+=1 + + + if 'conv_layer' in key: + + name = 'e_conv_layer_{0}'.format(e_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = e_sizes[key][0] + + e_conv_layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.e_blocks.append(e_conv_layer) + + mi = mo + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + e_layer_n+=1 + + assert e_block_n+e_layer_n==e_steps, '\nCheck keys in d_sizes, \n total convolution steps do not mach sum between convolutional blocks and convolutional layers' + + count=e_steps + + mi = mi * dim_H * dim_W + + #build dense layers + + self.e_dense_layers = [] + for mo, apply_batch_norm, keep_prob, act_f, w_init in e_sizes['dense_layers']: + + name = 'e_dense_layer_%s' %count + count +=1 + + layer = DenseLayer(name,mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.e_dense_layers.append(layer) + + #final logistic layer + name = 'e_dense_layer_%s' %count + + last_enc_layer = DenseLayer(name, mi, 2*self.latent_dims, False, 1, + f=lambda x: x, w_init=e_sizes['last_layer_weight_init']) + + self.e_dense_layers.append(last_enc_layer) + + self.e_steps=e_steps + + return self.encode(X) + + def encode(self, X, reuse=None, is_training=True): + + #propagate X until end of encoder + output=X + + i=0 + for block in self.e_blocks: + i+=1 + # print('Convolution_block_%i' %i) + # print('Input shape', output.get_shape()) + output = block.forward(output, + reuse, + is_training) + # print('After block shape', output.get_shape()) + + + output = tf.contrib.layers.flatten(output) + # print('After flatten shape', output.get_shape()) + + i=0 + for layer in self.e_dense_layers: + # print('Dense weights %i' %i) + # print(layer.W.get_shape()) + output = layer.forward(output, + reuse, + is_training) + i+=1 + # print('After dense layer_%i' %i) + # print('Shape', output.get_shape()) + + + #get means and stddev from last encoder layer + self.means = output[:, :self.latent_dims] + self.stddev = tf.nn.softplus(output[:,self.latent_dims:])+1e-6 + + # get a sample of Z, we need to use a stochastic tensor + # in order for the errors to be backpropagated past this point + + with st.value_type(st.SampleValue()): + Z = st.StochasticTensor(Normal(loc=self.means, scale=self.stddev)) + + return Z + + def build_decoder(self, Z, d_sizes): + + with tf.variable_scope('decoder') as scope: + + #dimensions of input + #dense layers + self.d_dense_layers = [] + count = 0 + + mi = self.latent_dims + + for mo, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['dense_layers']: + name = 'd_dense_layer_%s' %count + count += 1 + + layer = DenseLayer( + name, mi, mo, + apply_batch_norm, keep_prob, + f=act_f, w_init=w_init + ) + self.d_dense_layers.append(layer) + mi = mo + + #checking generator architecture + + d_steps = 0 + for key in d_sizes: + if 'deconv' in key: + if not 'shortcut' in key: + d_steps+=1 + + assert d_steps == self.e_steps, '\nUnmatching discriminator/generator architecture' + + + d_block_n=0 + d_layer_n=0 + + for key in d_sizes: + if 'block' and 'shortcut' in key: + d_block_n+=1 + if 'deconv_layer' in key: + d_layer_n +=1 + + assert d_block_n+d_layer_n==d_steps, '\nCheck keys in g_sizes, \n sum of generator steps do not coincide with sum of convolutional layers and convolutional blocks' + + #dimensions of output generated image + dims_W = [self.n_W] + dims_H = [self.n_H] + + dim_H = self.n_H + dim_W = self.n_W + + + layers_output_sizes={} + blocks_output_sizes={} + + for key, item in reversed(list(d_sizes.items())): + + if 'deconv_layer' in key: + + _, _, stride, _, _, _, _, = d_sizes[key][0] + layers_output_sizes[d_layer_n-1]= [dim_H, dim_W] + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + d_layer_n -= 1 + + + if 'deconvblock_layer' in key: + + for _ ,_ , stride, _, _, _, _, in d_sizes[key]: + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + blocks_output_sizes[d_block_n-1] = [[dims_H[j],dims_W[j]] for j in range(1, len(d_sizes[key])+1)] + d_block_n -=1 + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + + #saving for later + self.d_dims_H = dims_H + self.d_dims_W = dims_W + + #final dense layer + + projection, bn_after_project, keep_prob, act_f, w_init = d_sizes['projection'][0] + + mo = projection*dims_W[0]*dims_H[0] + + #final dense layer + name = 'dec_layer_%s' %count + + layer = DenseLayer(name, mi, mo, not self.bn_after_project, keep_prob, act_f, w_init) + self.d_dense_layers.append(layer) + + + #deconvolution input channel number + mi = projection + + self.d_blocks=[] + + block_n=0 #keep count of the block number + layer_n=0 #keep count of conv layer number + i=0 + for key in d_sizes: + + if 'block' and 'shortcut' in key: + + d_block = DeconvBlock(block_n, + mi, blocks_output_sizes, d_sizes, + ) + self.d_blocks.append(d_block) + + mo, _, _, _, _, _, _, = d_sizes['deconvblock_layer_'+str(block_n)][-1] + mi = mo + block_n+=1 + count+=1 + i+=1 + + if 'deconv_layer' in key: + + name = 'd_conv_layer_{0}'.format(layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = d_sizes[key][0] + + d_conv_layer = DeconvLayer( + name, mi, mo, layers_output_sizes[layer_n], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + self.d_blocks.append(d_conv_layer) + + mi=mo + layer_n+=1 + count+=1 + i+=1 + + assert i==d_steps, 'Check convolutional layer and block building, steps in building do not coincide with g_steps' + assert d_steps==block_n+layer_n, 'Check keys in g_sizes' + #saving for later + self.d_sizes=d_sizes + + self.bn_after_project = bn_after_project + self.projection = projection + + return self.decode(Z) + + def decode(self, Z, reuse=None, is_training=True): + + output = Z + + i=0 + for layer in self.d_dense_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + + + + output = tf.reshape( + output, + + [-1, self.d_dims_H[0], self.d_dims_W[0], self.projection] + + ) + + + if self.bn_after_project: + output = tf.contrib.layers.batch_norm( + output, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training=is_training, + reuse=reuse, + scope='bn_after_project' + ) + # passing to deconv blocks + + i=0 + for block in self.d_blocks: + i+=1 + output = block.forward(output, + reuse, + is_training) + + + + return output + + def set_session(self, session): + + self.session = session + + for layer in self.e_blocks: + layer.set_session(session) + for layer in self.e_dense_layers: + layer.set_session(session) + + for layer in self.d_blocks: + layer.set_session(session) + for layer in self.d_dense_layers: + layer.set_session(session) + + def fit(self, X): + + SEED = 1 + + costs = [] + N = len(X) + n_batches = N // self.batch_size + + + + print('\n ****** \n') + print('Training residual convolutional VAE with a total of ' +str(N)+' samples distributed in batches of size '+str(self.batch_size)+'\n') + print('The learning rate set is '+str(self.lr)+', and every ' +str(self.save_sample)+ ' iterations a generated sample will be saved to '+ self.path) + print('\n ****** \n') + total_iters=0 + + for epoch in range(self.epochs): + + t0 = datetime.now() + print('Epoch: {0}'.format(epoch)) + + SEED = SEED + 1 + + batches = unsupervised_random_mini_batches(X, self.batch_size, SEED) + + for X_batch in batches: + + feed_dict = { + self.X: X_batch, self.batch_sz: self.batch_size + } + + _, c = self.session.run( + (self.train_op, self.loss), + feed_dict=feed_dict + ) + + c /= self.batch_size + costs.append(c) + + total_iters += 1 + + if total_iters % self.save_sample ==0: + print("At iteration: %d - dt: %s - cost: %.2f" % (total_iters, datetime.now() - t0, c)) + print('Saving a sample...') + + probs = [self.prior_predictive_sample_with_probs() for i in range(64)] + + for i in range(64): + plt.subplot(8,8,i+1) + plt.imshow(probs[i].reshape(28,28), cmap='gray') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + plt.axis('off') + + fig = plt.gcf() + fig.set_size_inches(4,4) + plt.savefig(self.path+'/samples_at_iter_%d.png' % total_iters,dpi=150) + + plt.clf() + plt.plot(costs) + plt.ylabel('cost') + plt.xlabel('iteration') + plt.title('learning rate=' + str(self.lr)) + plt.show() + + print('Parameters trained') + + def prior_predictive_with_input(self, Z): + return self.session.run( + self.prior_predictive_from_input_probs, + feed_dict={self.Z_input: Z} + ) + + def posterior_predictive_sample(self, X): + # returns a sample from p(x_new | X) + return self.session.run(self.posterior_predictive_probs, feed_dict={self.X: X, self.batch_sz:BATCH_SIZE}) + + def prior_predictive_sample_with_probs(self): + # returns a sample from p(x_new | z), z ~ N(0, 1) + return self.session.run(self.prior_predictive_probs) \ No newline at end of file diff --git a/architectures/utils/NN_building_blocks.py b/architectures/utils/NN_building_blocks.py new file mode 100644 index 0000000..8b69852 --- /dev/null +++ b/architectures/utils/NN_building_blocks.py @@ -0,0 +1,1077 @@ +#BUILDING BLOCKS +rnd_seed=1 + +import numpy as np +import math +import tensorflow as tf +from architectures.utils.toolbox import * + + +# DENSELY CONNECTED NETWORKS + +class DenseLayer(object): + + """ + Creates a dense layer + + Constructor inputs: + - name of layer + - mi: (int) dimension of input to layer + - mo: (int) dimension of output of the activated output + - apply_batch_norm: (bool) wether applying or not batch_normalization along the 0 axis of input + - f: (function) activation function of the output + - w_init: (tensorflow initializer) which initializer to use for the layer weights + + Created attributes: + + - W: (tf variable) (mi x mo) weight matrix + - bi: (tf variable) (mo, ) bias vector + - bo: (tf variable) (mi, ) bias vector + + Class methods: + + - forward: + input: X (tf tensor) previous output layer + outputs: output (tf tensor) activated layer output as A = act_f(X * W+ bi) + + - forwardT: + input: X (tf tensor) previous output layer + outputs: output (tf tensor) activated layer output as A = act_f(X * tf.transpose(W)+ bo) + + - set_session: + Sets current session + input: + session: (tf session) current session + """ + + def __init__(self, name, mi, mo, apply_batch_norm, + keep_prob=1, act_f=None, w_init=None + ): + + + self.W = tf.get_variable( + "W_%s" %name, + shape=(mi,mo), + initializer=w_init, + ) + + + self.bi = tf.get_variable( + "bi_%s" %name, + shape=(mo,), + initializer=tf.zeros_initializer(), + ) + + + self.act_f=act_f + self.name=name + self.apply_batch_norm=apply_batch_norm + self.keep_prob = keep_prob + + def forward(self, X, reuse, is_training): + + if not is_training: + self.keep_prob=1 + + + Z=tf.matmul(X,self.W)+self.bi + + if self.apply_batch_norm=='bn': + Z=tf.contrib.layers.batch_norm( + Z, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training = is_training, + reuse=reuse, + scope = self.name, + ) + elif self.apply_batch_norm=='in': + Z = tf.contrib.layers.instance_norm( + Z, + center=True, + scale=True, + epsilon=1e-6, + reuse=reuse, + scope = self.name, + ) + elif self.apply_batch_norm=='False': + Z = Z + + activated=self.act_f(Z) + + output=tf.nn.dropout(activated, self.keep_prob, seed=rnd_seed) + + return output + + def set_session(self, session): + + self.session = session + +#2D CONVOLUTIONAL NETWORKS + +class AvgPool2D(object): + + """ + Performs average 2D pooling of the input, with no dropout by default + Note that this layer trains no parameters + + Constructor input: + - filter_sz: (int) width and height of the square pooling window + - stride: (int) horizontal and vertical value for the stride + + ?extend to square windows and stride? + + Class methods: + + forward: + inputs: + - X (tf tensor) previous activated layer output + - output (tf tensor) average pooled layer output + + """ + + def __init__(self, filter_sz, stride, keep_prob=1): + + self.filter_sz = filter_sz + self.stride = stride + self.keep_prob = keep_prob + + def forward(self, X, reuse, is_training): + + if not is_training: + self.keep_prob=1 + + output = X + + output = tf.nn.avg_pool(X, + ksize=[1, self.filter_sz,self.filter_sz,1], + strides=[1,self.stride, self.stride, 1], + padding = 'SAME' + ) + output = tf.nn.dropout(output, self.keep_prob, seed=rnd_seed) + + return output + + def set_session(self, session): + + self.session = session + +class MaxPool2D(object): + + """ + Performs max 2D pooling of the input, with no dropout by default + Note that this layer trains no parameters + + Constructor input: + - filter_sz: (int) width and height of the square pooling window + - stride: (int) horizontal and vertical value for the stride + + ?extend to square windows and stride? + + Class methods: + + forward: + inputs: + - X (tf tensor) previous activated layer output + - output (tf tensor) max pooled layer output + + """ + + def __init__(self, filter_sz, stride, keep_prob=1): + + self.filter_sz = filter_sz + self.stride = stride + self.keep_prob = keep_prob + + def forward(self, X, reuse, is_training): + + output = X + + if not is_training: + self.keep_prob=1 + + output = tf.nn.max_pool(output, + ksize=[1, self.filter_sz,self.filter_sz,1], + strides=[1,self.stride, self.stride, 1], + padding = 'SAME' + ) + + output = tf.nn.dropout(output, self.keep_prob, seed=rnd_seed) + return output + + def set_session(self, session): + + self.session = session + +class ConvLayer(object): + + """ + Performs 2D strided convlution on rectangular sized input tensor. + + Constructor inputs: + + - mi: (int) input channels + - mo: (int) output channels + - filter_sz: (int) width and height of the convolution kernel + - stride: (int) horizontal and vertical size of the stride + - apply_batch_norm: (bool) whether applying batch normalization (along [0] axis of input tensor) or not + - keep_prob: (int) dropout keep probability of propagated tensor + - f: (function) activation layer function. tf.nn.relu by default + - w_init: (tf initializer) initialization of the filter parameters, by default tf.truncated_normal_initializer(stddev=0.02) + + Class attributes + + - W: (tf tensor) variable tensor of dim (filter_sz, filter_sz, mi, mo) of trainable weights + - b: (tf tensor) variable tensor of dim (mo, ) of trainable biases + + Class methods: + + - forward: + inputs: + X: (tf tensor) input tensor of dim (batch_sz, n_W, n_H, mi) output of previous layer + reuse: (bool) whether reusing the stored batch_normalization parameters or not + is_training: (bool) flag that indicates whether we are in the process of training or not + outputs: + output: (tf tensor) activated output of dim (batch_sz, n_W, n_H, mo), input for next layer + + - set_session: + Sets current session + inputs: + session: (tf session) current session + + + """ + + + + def __init__( + self, name, mi, mo, filter_sz, stride, + apply_batch_norm, keep_prob = 1, + f = None, w_init = None + ): + + self.W = tf.get_variable( + "W_%s" %name, + shape=(filter_sz,filter_sz, mi, mo), + initializer=w_init, + ) + + + self.b = tf.get_variable( + "b_%s" %name, + shape = (mo,), + initializer=tf.zeros_initializer(), + ) + + self.name = name + self.f = f + self.stride = stride + self.apply_batch_norm = apply_batch_norm + self.keep_prob=keep_prob + + def forward(self, X, reuse, is_training): + + if not is_training: + self.keep_prob=1 + + conv_out = tf.nn.conv2d( + X, + self.W, + strides=[1,self.stride,self.stride,1], + padding='SAME' + ) + + + conv_out = tf.nn.bias_add(conv_out,self.b) + + #applying batch_norm + if self.apply_batch_norm=='bn': + conv_out=tf.contrib.layers.batch_norm( + conv_out, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training = is_training, + reuse=reuse, + scope = self.name, + ) + elif self.apply_batch_norm=='in': + conv_out = tf.contrib.layers.instance_norm( + conv_out, + center=True, + scale=True, + epsilon=1e-6, + reuse=reuse, + scope = self.name, + ) + elif self.apply_batch_norm=='False': + conv_out = conv_out + + activated = self.f(conv_out) + + output = tf.nn.dropout(activated, self.keep_prob, seed=rnd_seed) + + return output + + def set_session(self, session): + + self.session = session + +class DeconvLayer(object): + + """ + Performs 2D strided deconvlution on rectangular sized input tensor. + + Constructor inputs: + + - mi: (int) input channels + - mo: (int) output channels + - output_shape: list = [int, int], list[0]([1]) is the witdth (height) of the output after deconvolution, the layer computes the resizing + automatically to match the output_shape. Default padding value is of 2 in both directions but can be modified. + - filter_sz: (int) width and height of the convolution kernel + - stride: (int) horizontal and vertical size of the stride + - apply_batch_norm: (bool) whether applying batch normalization (along [0] axis of input tensor) or not + - keep_prob: (int) dropout keep probability of propagated tensor + - f: (function) activation layer function. tf.nn.relu by default + - w_init: (tf initializer) initialization of the filter parameters, by default tf.truncated_normal_initializer(stddev=0.02) + + Class attributes + + - W: (tf tensor) variable tensor of dim (filter_sz, filter_sz, mo, mo) of trainable weights + - W_id: (tf tensor) variable tensor of dim (1, 1, mi, mo) of trainable weights + - b: (tf tensor) variable tensor of dim (mo, ) of trainable biases + + Class methods: + + - forward: + inputs: + X: (tf tensor) input tensor of dim (batch_sz, n_W, n_H, mi) output of previous layer + reuse: (bool) whether reusing the stored batch_normalization parameters or not + is_training: (bool) flag that indicates whether we are in the process of training or not + + The deconvolution is computed in 3 steps according to: (cite article) + 1) conv2d with W_id is performed to match the output channels mo, with filter_sz=1 and stride=1 + 2) the image is reshaped to reshape_size_H and W + 3) conv2d with W is performed with input filter_sz and stride, output width and height will match output_shape + + outputs: + output: (tf tensor) activated output of dim (batch_sz, n_W=output_shape[0], n_H = output_shape[1], mo), input for next layer + + - set_session: + Sets current session + inputs: + session: (tf session) current session + + + """ + def __init__(self, name, + mi, mo, output_shape, + filter_sz, stride, + apply_batch_norm, keep_prob = 1, + f=None, w_init = None + ): + + #using resize + conv2d and not conv2dt + #mi: input channels + #mo: output channels + #output_shape: width and height of the output image + + #performs 2 convolutions, the first augments the number of channels + #the second performs a convolution with != 1 kernel and stride + self.W = tf.get_variable( + "W_%s" %name, + shape=(filter_sz, filter_sz, mo, mo), + initializer=w_init, + ) + self.W_id = tf.get_variable( + 'W_%s_id' %name, + shape=(1,1, mi, mo), + initializer=w_init, + ) + + + self.b = tf.get_variable( + "b_%s" %name, + shape=(mo,), + initializer=tf.zeros_initializer(), + ) + + self.f = f + self.stride = stride + self.filter_sz = filter_sz + self.name = name + self.output_shape = output_shape + self.apply_batch_norm = apply_batch_norm + self.keep_prob = keep_prob + + def forward(self, X, reuse, is_training): + + if not is_training: + self.keep_prob = 1 + + padding_value = 2 + resized_shape_H = (self.output_shape[0] -1)*self.stride+ self.filter_sz-2*padding_value + resized_shape_W = (self.output_shape[1] -1)*self.stride+ self.filter_sz-2*padding_value + + + resized = tf.image.resize_images(X, + [resized_shape_H, + resized_shape_W], + method=tf.image.ResizeMethod.BILINEAR) + + #print('After first resize shape', resized.get_shape()) + + output_id = tf.nn.conv2d( + resized, + self.W_id, + strides=[1,1,1,1], + padding='VALID' + ) + + #print('After id deconvolution shape', output_id.get_shape()) + + paddings = tf.constant([[0,0], + [padding_value, padding_value], + [padding_value, padding_value], + [0,0]]) + + output_id = tf.pad(output_id, + paddings, 'CONSTANT' + ) + + #print('After padding', output_id.get_shape()) + + conv_out = tf.nn.conv2d( + output_id, + self.W, + strides=[1,self.stride,self.stride,1], + padding='VALID' + ) + + #print('After deconvolution', conv_out.get_shape()) + conv_out = tf.nn.bias_add(conv_out,self.b) + + if self.apply_batch_norm=='bn': + conv_out=tf.contrib.layers.batch_norm( + conv_out, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training = is_training, + reuse=reuse, + scope = self.name, + ) + elif self.apply_batch_norm=='in': + conv_out = tf.contrib.layers.instance_norm( + conv_out, + center=True, + scale=True, + epsilon=1e-6, + reuse=reuse, + scope = self.name, + ) + elif self.apply_batch_norm=='False': + conv_out=conv_out + + activated = self.f(conv_out) + output = tf.nn.dropout(activated, self.keep_prob, seed=rnd_seed) + + return output + + def set_session(self, session): + + self.session = session + +class ConvBlock(object): + + """ + Performs series of 2D strided convolutions on rectangular sized input tensor. The convolution proceeds on two parallel + paths: main path is composed by n subsequent convolutions while shortcut path has 1 convolution with different parameters and + can be set as the identity convolution. + + Constructor inputs: + + + - block_id: progressive number of block in the network + - init_mi: (int) input channels + - sizes: (dict) python dictionary with keys: + + sizes = { 'convblock_layer_n':[(n_c+1, kernel, stride, apply_batch_norm, keep_prob, act_f, weight initializer), + (n_c+2,,,,,,,), + (n_c+...,,,,,,,), + (n_c+ last,,,,,,,], + + 'convblock_shortcut_layer_n':[(n_c+3, kernel, stride, apply_batch_norm, act_f, weight initializer)], + 'dense_layers':[(dim output, apply_batch_norm, keep_prob, act_f, weight initializer )] + } + + - filter_sz: (int) width and height of the convolution kernel at that layer + - stride: (int) horizontal and vertical size of the stride at that layer + - apply_batch_norm: (bool) whether applying batch normalization (along [0] axis of input tensor) or not + - keep_prob: (int) dropout keep probability of propagated tensor + - f: (function) activation layer function. tf.nn.relu by default + - w_init: (tf initializer) initialization of the filter parameters, by default tf.truncated_normal_initializer(stddev=0.02) + + sizes['convblock_layer_n'] is a list of tuples of layers specifications for the main path + sizes['convblock_shortcut_layer_n'] is a list composed by 1 tuple of layer specifications for the shortcut path + sizes['dense_layers'] is a list of tuples of layers specifications for the densely connected part of the network + + Class attributes + + - conv_layers: (list) list of conv_layer objects + - shortcut_layer: (list) conv_layer object + + Class methods: + + - forward: + inputs: + X: (tf tensor) input tensor of dim (batch_sz, n_W, n_H, mi) output of previous layer + reuse: (bool) whether reusing the stored batch_normalization parameters or not + is_training: (bool) flag that indicates whether we are in the process of training or not + outputs: + output: (tf tensor) activated output of dim (batch_sz, n_W, n_H, mo), input for next layer + + - set_session: + Sets current session + inputs: + session: (tf session) current session + + """ + + def __init__(self, + block_id, + init_mi, sizes, + ): + + #self.f=f + self.conv_layers = [] + mi = init_mi + self.block_id=block_id + + #build the block + #main path + count=0 + for mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init in sizes['convblock_layer_'+str(self.block_id)][:-1]: + + name = 'convblock_{0}_layer_{1}'.format(block_id, count) + count += 1 + layer = ConvLayer(name, + mi, mo, filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + self.conv_layers.append(layer) + mi = mo + + + name = 'convblock_{0}_layer_{1}'.format(block_id, count) + mo, filter_sz, stride, apply_batch_norm, self.keep_prob_last, self.fin_act , w_init = sizes['convblock_layer_' +str(self.block_id)][-1] + + layer = ConvLayer(name, + mi, mo, filter_sz, stride, + apply_batch_norm, 1, + lambda x: x, w_init) + + self.conv_layers.append(layer) + + #secondary path + #set filter_sz = stride = 1 for an ID block + mo, filter_sz, stride, apply_batch_norm, keep_prob, w_init = sizes['convblock_shortcut_layer_'+str(self.block_id)][0] + name = 'convshortcut_layer_{0}'.format(block_id) + self.shortcut_layer = ConvLayer(name, + init_mi, mo, filter_sz, stride, + apply_batch_norm, keep_prob, + f=lambda x: x, w_init=w_init) + + self.sizes=sizes + + def output_dim(self, input_dim): + + dim = input_dim + for _, _, stride, _, _, _, _, in self.sizes['convblock_layer_'+str(self.block_id)]: + dim = int(np.ceil(float(dim)/stride)) + + return dim + + def set_session(self, session): + + self.session = session + + self.shortcut_layer.set_session(session) + + for layer in self.conv_layers: + layer.set_session(session) + + def forward(self, X, reuse, is_training): + + output = X + shortcut_output = X + #print('Convolutional block %i' %self.block_id) + #print('Input shape ', X.get_shape() ) + i=0 + for layer in self.conv_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + #print('After layer %i' %i) + #print('Output shape ', output.get_shape()) + + shortcut_output = self.shortcut_layer.forward(shortcut_output,reuse, is_training) + #print('Shortcut layer after convolution shape ', shortcut_output.get_shape()) + + assert (output.get_shape().as_list()[1], output.get_shape().as_list()[2]) == (shortcut_output.get_shape().as_list()[1], shortcut_output.get_shape().as_list()[2]), 'image size mismatch at conv block {0} '.format(self.block_id) + + assert output.get_shape().as_list()[-1] == shortcut_output.get_shape().as_list()[-1], 'image channels mismatch at conv block {0}'.format(self.block_id) + + #output = tf.concat((output,shortcut_output),axis=3) + output = output + shortcut_output + activated=self.fin_act(output) + output=tf.nn.dropout(activated, keep_prob=self.keep_prob_last, seed=rnd_seed) + return output + +class DeconvBlock(object): + + """ + Performs series of 2D strided deconvolutions on rectangular sized input tensor. The convolution proceeds on two parallel + paths: main path is composed by n subsequent deconvolutions while shortcut path has 1 deconvolution with different parameters and + can be set as the identity deconvolution. + + Constructor inputs: + + + - block_id: progressive number of block in the network + - init_mi: (int) input channels + - sizes: (dict) python dictionary with keys: + + sizes = { + + 'deconvblock_layer_n':[(n_c+1, kernel, stride, apply_batch_norm, keep_prob, act_f, weight initializer), + (n_c+2,,,,,,,), + (n_c+...,,,,,,,), + (n_c+ last,,,,,,,], + + 'deconvblock_shortcut_layer_n':[(n_c+3, kernel, stride, apply_batch_norm, act_f, weight initializer)], + 'dense_layers':[(dim output, apply_batch_norm, keep_prob, act_f, weight initializer )] + } + + - filter_sz: (int) width and height of the convolution kernel at that layer + - stride: (int) horizontal and vertical size of the stride at that layer + - apply_batch_norm: (bool) whether applying batch normalization (along [0] axis of input tensor) or not + - keep_prob: (int) dropout keep probability of propagated tensor + - f: (function) activation layer function. tf.nn.relu by default + - w_init: (tf initializer) initialization of the filter parameters, by default tf.truncated_normal_initializer(stddev=0.02) + + sizes['deconvblock_layer_n'] is a list of tuples of layers specifications for the main path + sizes['deconvblock_shortcut_layer_n'] is a list composed by 1 tuple of layer specifications for the shortcut path + sizes['dense_layers'] is a list of tuples of layers specifications for the densely connected part of the network + + Class attributes + + - conv_layers: (list) list of conv_layer objects + - shortcut_layer: (list) conv_layer object + + Class methods: + + - forward: + inputs: + X: (tf tensor) input tensor of dim (batch_sz, n_W, n_H, mi) output of previous layer + reuse: (bool) whether reusing the stored batch_normalization parameters or not + is_training: (bool) flag that indicates whether we are in the process of training or not + outputs: + output: (tf tensor) activated output of dim (batch_sz, n_W, n_H, mo), input for next layer + + - set_session: + Sets current session + inputs: + session: (tf session) current session + + """ + + def __init__(self, + block_id, + mi, output_sizes, + sizes): + + #self.f=f + init_mi=mi + self.deconv_layers = [] + self.block_id=block_id + + #output shapes has to be a dictionary of [n_H,n_W] + + #build the block + #main path + + for i in range(len(sizes['deconvblock_layer_'+str(block_id)])-1): + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = sizes['deconvblock_layer_'+str(block_id)][i] + name = 'deconvblock_{0}_layer_{1}'.format(block_id, i) + layer = DeconvLayer(name,mi, mo, output_sizes[block_id][i], + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + self.deconv_layers.append(layer) + mi = mo + + i = len(sizes['deconvblock_layer_'+str(block_id)])-1 + + name = 'deconvblock_{0}_layer_{1}'.format(block_id, i) + mo, filter_sz, stride, apply_batch_norm, self.keep_prob_last, self.fin_act, w_init = sizes['deconvblock_layer_' +str(block_id)][-1] + + + layer = DeconvLayer(name, mi, mo, output_sizes[block_id][len(output_sizes[block_id])-1], + filter_sz, stride, + apply_batch_norm, 1, + lambda x: x, w_init) + + self.deconv_layers.append(layer) + + #secondary path + mo, filter_sz, stride, apply_batch_norm, keep_prob, w_init = sizes['deconvblock_shortcut_layer_'+str(block_id)][0] + name = 'deconvshortcut_layer_{0}'.format(block_id) + self.shortcut_layer = DeconvLayer(name, init_mi, mo, output_sizes[block_id][len(output_sizes[block_id])-1], + filter_sz,stride, + apply_batch_norm, 1, + f=lambda x: x, w_init=w_init) + + def set_session(self, session): + + self.session = session + + self.shortcut_layer.set_session(session) + + for layer in self.deconv_layers: + layer.set_session(session) + + def forward(self, X, reuse, is_training): + + output = X + shortcut_output = X + + #print('Deconvolutional block %i' %self.block_id) + #print('Input shape ', X.get_shape() ) + i=0 + + + for layer in self.deconv_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + #print('After layer %i' %i) + #print('Output shape ', output.get_shape()) + + shortcut_output = self.shortcut_layer.forward(shortcut_output, reuse, is_training) + #print('Shortcut layer after convolution shape ', shortcut_output.get_shape()) + + assert (output.get_shape().as_list()[1], output.get_shape().as_list()[2]) == (shortcut_output.get_shape().as_list()[1], shortcut_output.get_shape().as_list()[2]), 'image size mismatch at deconv block {0} '.format(self.block_id) + + assert output.get_shape().as_list()[-1] == shortcut_output.get_shape().as_list()[-1], 'image channels mismatch at deconv block {0}'.format(self.block_id) + + output = output + shortcut_output + + activated=self.fin_act(output) + + output=tf.nn.dropout(activated, keep_prob=self.keep_prob_last, seed=rnd_seed) + + return output + + def output_dim(self, input_dim): + + dim = input_dim + for _, _, stride, _, _, _, in self.sizes['deconvblock_layer_'+str(self.block_id)]: + dim = int(np.ceil(float(dim)/stride)) + + return dim + + +# class DeconvLayer_v2(object): + +# """ +# Performs 2D strided deconvlution on rectangular sized input tensor. + +# Constructor inputs: + +# - mi: (int) input channels +# - mo: (int) output channels +# - output_shape: list = [int, int], list[0]([1]) is the witdth (height) of the output after transposed convolution +# - filter_sz: (int) width and height of the convolution kernel +# - stride: (int) horizontal and vertical size of the stride +# - apply_batch_norm: (bool) whether applying batch normalization (along [0] axis of input tensor) or not +# - keep_prob: (int) dropout keep probability of propagated tensor +# - f: (function) activation layer function. tf.nn.relu by default +# - w_init: (tf initializer) initialization of the filter parameters, by default tf.truncated_normal_initializer(stddev=0.02) + +# Class attributes + +# - W: (tf tensor) variable tensor of dim (filter_sz, filter_sz, mi, mo) of trainable weights +# - b: (tf tensor) variable tensor of dim (mo, ) of trainable biases + +# Class methods: + +# - forward: +# inputs: +# X: (tf tensor) input tensor of dim (batch_sz, n_W, n_H, mi) output of previous layer +# reuse: (bool) whether reusing the stored batch_normalization parameters or not +# is_training: (bool) flag that indicates whether we are in the process of training or not + +# The deconvolution is computed in 3 steps according to: (cite article) +# 1) conv2d with W_id is performed to match the output channels mo, with filter_sz=1 and stride=1 +# 2) the image is reshaped to reshape_size_H and W +# 3) conv2d with W is performed with input filter_sz and stride, output width and height will match output_shape + +# outputs: +# output: (tf tensor) activated output of dim (batch_sz, n_W=output_shape[0], n_H = output_shape[1], mo), input for next layer + +# - set_session: +# Sets current session +# inputs: +# session: (tf session) current session + + +# """ +# def __init__(self, name, +# mi, mo, output_shape, +# filter_sz, stride, +# apply_batch_norm, keep_prob = 1, +# f=None, w_init = None +# ): + +# #using resize + conv2d and not conv2dt +# #mi: input channels +# #mo: output channels +# #output_shape: width and height of the output image + +# #performs 2 convolutions, the first augments the number of channels +# #the second performs a convolution with != 1 kernel and stride + +# filter_shape = [filter_sz, filter_sz, mo, mi] + +# self.W = tf.get_variable( +# "W_%s" %name, +# filter_shape, +# initializer=w_init, +# ) + +# self.b = tf.get_variable( +# "b_%s" %name, +# shape=(mo,), +# initializer=tf.zeros_initializer(), +# ) + +# self.f = f +# self.stride = stride +# self.filter_sz = filter_sz +# self.name = name +# self.output_shape = output_shape +# self.apply_batch_norm = apply_batch_norm +# self.keep_prob = keep_prob +# self.mo=mo + +# def forward(self, X, reuse, is_training): + +# if not is_training: +# self.keep_prob = 1 + +# m = tf.shape(X)[0] +# output_shape=tf.stack([m, self.output_shape[0], self.output_shape[1], self.mo]) +# strides_shape=[1,self.stride,self.stride,1] + +# conv_out = tf.nn.conv2d_transpose( +# value=X, +# filter=self.W, +# output_shape=output_shape, +# strides=strides_shape, +# padding='SAME' +# ) + +# #print('After deconvolution', conv_out.get_shape()) +# conv_out = tf.nn.bias_add(conv_out,self.b) + +# if self.apply_batch_norm=='bn': +# conv_out=tf.contrib.layers.batch_norm( +# conv_out, +# decay=0.9, +# updates_collections=None, +# epsilon=1e-5, +# scale=True, +# is_training = is_training, +# reuse=reuse, +# scope = self.name, +# ) +# elif self.apply_batch_norm=='in': +# conv_out = tf.contrib.layers.instance_norm( +# conv_out, +# center=True, +# scale=True, +# epsilon=1e-6, +# reuse=reuse, +# scope = self.name, +# ) +# elif self.apply_batch_norm=='False': +# return conv_out + +# activated = self.f(conv_out) +# output = tf.nn.dropout(activated, self.keep_prob, seed=rnd_seed) + +# return output + +# def set_session(self, session): + +# self.session = session + +# class DeconvBlock_v2(object): + +# """ +# Performs series of 2D strided deconvolutions on rectangular sized input tensor. The convolution proceeds on two parallel +# paths: main path is composed by n subsequent deconvolutions while shortcut path has 1 deconvolution with different parameters and +# can be set as the identity deconvolution. + +# Constructor inputs: + + +# - block_id: progressive number of block in the network +# - init_mi: (int) input channels +# - sizes: (dict) python dictionary with keys: + +# sizes = { + +# 'deconvblock_layer_n':[(n_c+1, kernel, stride, apply_batch_norm, keep_prob, act_f, weight initializer), +# (n_c+2,,,,,,,), +# (n_c+...,,,,,,,), +# (n_c+ last,,,,,,,], + +# 'deconvblock_shortcut_layer_n':[(n_c+3, kernel, stride, apply_batch_norm, act_f, weight initializer)], +# 'dense_layers':[(dim output, apply_batch_norm, keep_prob, act_f, weight initializer )] +# } + +# - filter_sz: (int) width and height of the convolution kernel at that layer +# - stride: (int) horizontal and vertical size of the stride at that layer +# - apply_batch_norm: (bool) whether applying batch normalization (along [0] axis of input tensor) or not +# - keep_prob: (int) dropout keep probability of propagated tensor +# - f: (function) activation layer function. tf.nn.relu by default +# - w_init: (tf initializer) initialization of the filter parameters, by default tf.truncated_normal_initializer(stddev=0.02) + +# sizes['deconvblock_layer_n'] is a list of tuples of layers specifications for the main path +# sizes['deconvblock_shortcut_layer_n'] is a list composed by 1 tuple of layer specifications for the shortcut path +# sizes['dense_layers'] is a list of tuples of layers specifications for the densely connected part of the network + +# Class attributes + +# - conv_layers: (list) list of conv_layer objects +# - shortcut_layer: (list) conv_layer object + +# Class methods: + +# - forward: +# inputs: +# X: (tf tensor) input tensor of dim (batch_sz, n_W, n_H, mi) output of previous layer +# reuse: (bool) whether reusing the stored batch_normalization parameters or not +# is_training: (bool) flag that indicates whether we are in the process of training or not +# outputs: +# output: (tf tensor) activated output of dim (batch_sz, n_W, n_H, mo), input for next layer + +# - set_session: +# Sets current session +# inputs: +# session: (tf session) current session + +# """ + +# def __init__(self, +# block_id, +# mi, output_sizes, +# sizes): + +# #self.f=f +# init_mi=mi +# self.deconv_layers = [] +# self.block_id=block_id + +# #output shapes has to be a dictionary of [n_H,n_W] + +# #build the block +# #main path + +# for i in range(len(sizes['deconvblock_layer_'+str(block_id)])-1): + +# mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = sizes['deconvblock_layer_'+str(block_id)][i] +# name = 'deconvblock_{0}_layer_{1}'.format(block_id, i) +# layer = DeconvLayer_v2(name,mi, mo, output_sizes[block_id][i], +# filter_sz, stride, +# apply_batch_norm, keep_prob, +# act_f, w_init) + +# self.deconv_layers.append(layer) +# mi = mo + +# i = len(sizes['deconvblock_layer_'+str(block_id)])-1 + +# name = 'deconvblock_{0}_layer_{1}'.format(block_id, i) +# mo, filter_sz, stride, apply_batch_norm, self.keep_prob_last, self.fin_act, w_init = sizes['deconvblock_layer_' +str(block_id)][-1] + + +# layer = DeconvLayer(name, mi, mo, output_sizes[block_id][len(output_sizes[block_id])-1], +# filter_sz, stride, +# apply_batch_norm, 1, +# lambda x: x, w_init) + +# self.deconv_layers.append(layer) + +# #secondary path +# mo, filter_sz, stride, apply_batch_norm, keep_prob, w_init = sizes['deconvblock_shortcut_layer_'+str(block_id)][0] +# name = 'deconvshortcut_layer_{0}'.format(block_id) +# self.shortcut_layer = DeconvLayer(name, init_mi, mo, output_sizes[block_id][len(output_sizes[block_id])-1], +# filter_sz,stride, +# apply_batch_norm, 1, +# f=lambda x: x, w_init=w_init) + +# def set_session(self, session): + +# self.session = session + +# self.shortcut_layer.set_session(session) + +# for layer in self.deconv_layers: +# layer.set_session(session) + +# def forward(self, X, reuse, is_training): + +# output = X +# shortcut_output = X + +# #print('Deconvolutional block %i' %self.block_id) +# #print('Input shape ', X.get_shape() ) +# i=0 + + +# for layer in self.deconv_layers: +# i+=1 +# output = layer.forward(output, reuse, is_training) +# #print('After layer %i' %i) +# #print('Output shape ', output.get_shape()) + +# shortcut_output = self.shortcut_layer.forward(shortcut_output, reuse, is_training) +# #print('Shortcut layer after convolution shape ', shortcut_output.get_shape()) + +# assert (output.get_shape().as_list()[1], output.get_shape().as_list()[2]) == (shortcut_output.get_shape().as_list()[1], shortcut_output.get_shape().as_list()[2]), 'image size mismatch at deconv block {0} '.format(self.block_id) + +# assert output.get_shape().as_list()[-1] == shortcut_output.get_shape().as_list()[-1], 'image channels mismatch at deconv block {0}'.format(self.block_id) + +# output = output + shortcut_output + +# activated=self.fin_act(output) + +# output=tf.nn.dropout(activated, keep_prob=self.keep_prob_last, seed=rnd_seed) + +# return self.fin_act(output) + +# def output_dim(self, input_dim): + +# dim = input_dim +# for _, _, stride, _, _, _, _, in self.sizes['deconvblock_layer_'+str(self.block_id)]: +# dim = int(np.ceil(float(dim)/stride)) + +# return dim \ No newline at end of file diff --git a/architectures/utils/NN_gen_building_blocks.py b/architectures/utils/NN_gen_building_blocks.py new file mode 100644 index 0000000..b0958e7 --- /dev/null +++ b/architectures/utils/NN_gen_building_blocks.py @@ -0,0 +1,2889 @@ +#GENERATIVE MODELS BUILDING BLOCKS +rnd_seed=1 + +import numpy as np +import os +import math + +import tensorflow as tf +import matplotlib.pyplot as plt +from datetime import datetime + +st = tf.contrib.bayesflow.stochastic_tensor +Normal = tf.contrib.distributions.Normal +Bernoulli = tf.contrib.distributions.Bernoulli + +from architectures.utils.NN_building_blocks import * +from architectures.utils.toolbox import * + +#GANS + +#(residual) convolution to (?,1) shape +class Discriminator(object): + + def __init__(self, X, d_sizes, d_name): + + self.residual=False + + for key in d_sizes: + if not 'block' in key: + self.residual=False + else: + self.residual=True + + _, dim_H, dim_W, mi = X.get_shape().as_list() + + if not self.residual: + print('Convolutional Network architecture detected for discriminator '+ d_name) + + + with tf.variable_scope('discriminator_'+d_name) as scope: + #building discriminator convolutional layers + + self.d_conv_layers =[] + count=0 + for mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['conv_layers']: + + # make up a name - used for get_variable + name = "d_conv_layer_%s" % count + #print(name) + count += 1 + + layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + self.d_conv_layers.append(layer) + mi = mo + + dim_H = int(np.ceil(float(dim_H) / stride)) + dim_W = int(np.ceil(float(dim_W) / stride)) + + mi = mi * dim_H * dim_W + + #building discriminator dense layers + + self.d_dense_layers = [] + for mo, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['dense_layers']: + + name = 'd_dense_layer_%s' %count + #print(name) + count +=1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.d_dense_layers.append(layer) + + #final logistic layer + + name = 'd_dense_layer_%s' %count + w_init_last = d_sizes['readout_layer_w_init'] + #print(name) + self.d_final_layer = DenseLayer(name, mi, 1, + False, keep_prob=1, + act_f=lambda x: x, w_init=w_init_last) + + + self.d_name=d_name + else: + + print('Residual Convolutional Network architecture detected for discriminator'+ d_name) + + with tf.variable_scope('discriminator_'+d_name) as scope: + #building discriminator convolutional layers + + self.d_blocks = [] + #count conv blocks + d_steps = 0 + for key in d_sizes: + if 'conv' in key: + if not 'shortcut' in key: + d_steps+=1 + + d_block_n=0 + d_layer_n=0 + + + for key in d_sizes: + + if 'block' and 'shortcut' in key: + + d_block = ConvBlock(d_block_n, + mi, d_sizes, + ) + self.d_blocks.append(d_block) + + mo, _, _, _, _, _, _, _, = d_sizes['convblock_layer_'+str(d_block_n)][-1] + mi = mo + dim_H = d_block.output_dim(dim_H) + dim_W = d_block.output_dim(dim_W) + d_block_n+=1 + + + if 'conv_layer' in key: + + name = 'd_conv_layer_{0}'.format(d_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = d_sizes[key][0] + + + d_conv_layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.d_blocks.append(d_conv_layer) + + mi = mo + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + d_layer_n+=1 + + assert d_block_n+d_layer_n==d_steps, '\nCheck keys in d_sizes, \n total convolution steps do not mach sum between convolutional blocks and convolutional layers' + + count=d_steps + + mi = mi * dim_H * dim_W + + #build dense layers + + self.d_dense_layers = [] + for mo, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['dense_layers']: + + name = 'd_dense_layer_%s' %count + count +=1 + + layer = DenseLayer(name,mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.d_dense_layers.append(layer) + + #final logistic layer + name = 'd_dense_layer_%s' %count + w_init_last = d_sizes['readout_layer_w_init'] + #print(name) + self.d_final_layer = DenseLayer(name, mi, 1, + False, keep_prob=1, + act_f=lambda x: x, w_init=w_init_last) + + + self.d_steps=d_steps + self.d_name = d_name + + def d_forward(self, X, reuse = None, is_training=True): + + if not self.residual: + print('Discriminator_'+self.d_name) + print('Convolution') + + output = X + print('Input for convolution shape ', X.get_shape()) + i=0 + for layer in self.d_conv_layers: + i+=1 + # print('Convolution_layer_%i' %i) + # print('Input shape', output.get_shape()) + output = layer.forward(output, + reuse, + is_training) + #print('After convolution shape', output.get_shape()) + + output = tf.contrib.layers.flatten(output) + #print('After flatten shape', output.get_shape()) + i=0 + for layer in self.d_dense_layers: + #print('Dense weights %i' %i) + #print(layer.W.get_shape()) + output = layer.forward(output, + reuse, + is_training) + i+=1 + # print('After dense layer_%i' %i) + # print('Shape', output.get_shape()) + + logits = self.d_final_layer.forward(output, + reuse, + is_training) + print('Logits shape', logits.get_shape()) + return logits + else: + print('Redisual discriminator_'+self.d_name) + print('Convolution') + + output = X + + i=0 + print('Input for convolution shape ', X.get_shape()) + for block in self.d_blocks: + i+=1 + #print('Convolution_block_%i' %i) + #print('Input shape', output.get_shape()) + output = block.forward(output, + reuse, + is_training) + #print('After block shape', output.get_shape()) + + + output = tf.contrib.layers.flatten(output) + #print('After flatten shape', output.get_shape()) + + i=0 + for layer in self.d_dense_layers: + #print('Dense weights %i' %i) + #print(layer.W.get_shape()) + output = layer.forward(output, + reuse, + is_training) + i+=1 + #print('After dense layer_%i' %i) + #print('Shape', output.get_shape()) + + logits = self.d_final_layer.forward(output, + reuse, + is_training) + print('Logits shape', logits.get_shape()) + return logits + +#patchGAN architecture, convolution to (?, n_H_d, n_W_d, 1) shape +class pix2pixDiscriminator(object): + + def __init__(self, X, d_sizes, d_name): + + self.residual=False + + for key in d_sizes: + if not 'block' in key: + self.residual=False + else: + self.residual=True + + _, dim_H, dim_W, mi = X.get_shape().as_list() + + mi = 2*mi #takes as an input the concatenated true and fake images + + if not self.residual: + + print('Convolutional pix2pix Network architecture detected for discriminator '+ d_name) + + with tf.variable_scope('discriminator_'+d_name) as scope: + + #building discriminator convolutional layers + self.d_conv_layers =[] + + count=0 + + for mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['conv_layers']: + + # make up a name - used for get_variable + name = "d_conv_layer_%s" % count + #print(name) + count += 1 + + layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + self.d_conv_layers.append(layer) + mi = mo + dim_H = int(np.ceil(float(dim_H) / stride)) + dim_W = int(np.ceil(float(dim_W) / stride)) + + + #final unactivated conv layer + filter_sz, stride, apply_batch_norm, keep_prob, w_init_last = d_sizes['readout_conv_layer'][0] + count +=1 + name = 'last_conv_layer' + self.last_conv_layer = ConvLayer(name, mi, 1, + filter_sz, stride, + apply_batch_norm, keep_prob, + lambda x: x, w_init_last) + self.d_name=d_name + else: + + print('Residual Convolutional pix2pix Network architecture detected for discriminator'+ d_name) + + with tf.variable_scope('discriminator_'+d_name) as scope: + #building discriminator convolutional layers + + self.d_blocks = [] + #count conv blocks + d_steps = 0 + for key in d_sizes: + if 'conv' in key: + if not 'shortcut' in key: + d_steps+=1 + + d_block_n=0 + d_layer_n=0 + + + for key in d_sizes: + + if 'block' and 'shortcut' in key: + + d_block = ConvBlock(d_block_n, + mi, d_sizes, + ) + self.d_blocks.append(d_block) + + mo, _, _, _, _, _, _, _, = d_sizes['convblock_layer_'+str(d_block_n)][-1] + mi = mo + dim_H = d_block.output_dim(dim_H) + dim_W = d_block.output_dim(dim_W) + d_block_n+=1 + + + if 'conv_layer' in key: + + name = 'd_conv_layer_{0}'.format(d_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = d_sizes[key][0] + + + d_conv_layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.d_blocks.append(d_conv_layer) + + mi = mo + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + d_layer_n+=1 + + assert d_block_n+d_layer_n==d_steps, '\nCheck keys in d_sizes, \n total convolution steps do not mach sum between convolutional blocks and convolutional layers' + + #final unactivated conv layer + filter_sz, stride, apply_batch_norm, keep_prob, w_init_last = d_sizes['readout_conv_layer'][0] + count +=1 + name = 'last_conv_layer' + self.last_conv_layer = ConvLayer(name, mi, 1, + filter_sz, stride, + apply_batch_norm, keep_prob, + lambda x: x, w_init_last) + self.d_name=d_name + + def d_forward(self, X, samples, reuse = None, is_training=True): + + if not self.residual: + print('Discriminator_'+self.d_name) + + output = tf.concat([X,samples],axis=3) + print('Input for convolution shape ', X.get_shape()) + + i=0 + for layer in self.d_conv_layers: + i+=1 + # print('Convolution_layer_%i' %i) + # print('Input shape', output.get_shape()) + output = layer.forward(output, + reuse, + is_training) + # print('After convolution shape', output.get_shape()) + + logits = self.last_conv_layer.forward(output, + reuse, + is_training) + + print('Logits shape', logits.get_shape()) + return logits + else: + print('Discriminator_'+self.d_name) + + output = tf.concat([X,samples],axis=3) + print('Input for convolution shape ', X.get_shape()) + + i=0 + for block in self.d_blocks: + i+=1 + # print('Convolution_layer_%i' %i) + # print('Input shape', output.get_shape()) + output = block.forward(output, + reuse, + is_training) + # print('After convolution shape', output.get_shape()) + + logits = self.last_conv_layer.forward(output, + reuse, + is_training) + + print('Logits shape', logits.get_shape()) + return logits + +#convolution to (?, 1) shape with minibatch discrimination, outputs features for feature matching +class Discriminator_minibatch(object): + + def __init__(self, X, d_sizes, d_name): + + self.num_kernels=10 + self.kernel_dim=8 + + _, dim_H, dim_W, mi = X.get_shape().as_list() + + + print('Convolutional Network architecture detected for discriminator '+ d_name) + + + with tf.variable_scope('discriminator_'+d_name) as scope: + #building discriminator convolutional layers + + self.d_conv_layers =[] + count=0 + for mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['conv_layers']: + + # make up a name - used for get_variable + name = "d_conv_layer_%s" % count + #print(name) + count += 1 + + layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + self.d_conv_layers.append(layer) + mi = mo + + dim_H = int(np.ceil(float(dim_H) / stride)) + dim_W = int(np.ceil(float(dim_W) / stride)) + + mi = mi * dim_H * dim_W + + #building discriminator dense layers + + self.d_dense_layers = [] + for i, (mo, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(d_sizes['dense_layers']): + + name = 'd_dense_layer_%s' %count + #print(name) + count +=1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + + self.d_dense_layers.append(layer) + mi = mo + if i == len(d_sizes['dense_layers'])-1: + name = "mb_disc_layer" + self.mb_layer = DenseLayer(name, mi, self.num_kernels*self.kernel_dim, False, + keep_prob=1, act_f=lambda x:x, w_init=tf.truncated_normal_initializer(stddev=0.01)) + + + #final logistic layer + + name = 'd_dense_layer_%s' %count + w_init_last = d_sizes['readout_layer_w_init'] + #print(name) + # + self.d_final_layer = DenseLayer(name, mi + self.num_kernels , 1, + False, keep_prob=1, + act_f=lambda x: x, w_init=w_init_last) + + + self.d_name=d_name + + + def d_forward(self, X, reuse = None, is_training=True): + + print('Discriminator_'+self.d_name) + #print('Convolution') + + output = X + print('Input for convolution shape ', X.get_shape()) + + for i, layer in enumerate(self.d_conv_layers): + # print('Convolution_layer_%i' %i) + # print('Input shape', output.get_shape()) + output = layer.forward(output, + reuse, + is_training) + + #print('After convolution shape', output.get_shape()) + + output = tf.contrib.layers.flatten(output) + #print('After flatten shape', output.get_shape()) + + for i, layer in enumerate(self.d_dense_layers): + #print('Dense weights %i' %i) + #print(layer.W.get_shape()) + output = layer.forward(output, + reuse, + is_training) + if i==len(self.d_dense_layers)-1: + feature_output=output + + if i==len(self.d_dense_layers)-1: + output_mb=self.mb_layer.forward(output, + reuse, + is_training) + # print('After dense layer_%i' %i) + # print('Shape', output.get_shape()) + + activation = tf.reshape(output_mb, (-1, self.num_kernels, self.kernel_dim)) + diffs = tf.expand_dims(activation, 3) - tf.expand_dims( + tf.transpose(activation, [1, 2, 0]), 0) + + eps = tf.expand_dims(tf.eye(tf.shape(X)[0], dtype=np.float32), 1) + abs_diffs = tf.reduce_sum(tf.abs(diffs), 2) + eps + minibatch_features = tf.reduce_sum(tf.exp(-abs_diffs), 2) + print('minibatch features shape', minibatch_features.get_shape()) + output=tf.concat([output, minibatch_features], 1) + + logits = self.d_final_layer.forward(output, + reuse, + is_training) + print('Feature output shape', feature_output.get_shape()) + print('Logits shape', logits.get_shape()) + + return logits, feature_output + +#conditional discriminator, convolution to (?, 1) shape with minibatch discrimination, outputs features for feature matching +class condDiscriminator(object): + def __init__(self, X, dim_y, d_sizes, d_name): + + self.num_kernels=12 + self.kernel_dim=12 + + self.residual=False + + for key in d_sizes: + if not 'block' in key: + self.residual=False + else: + self.residual=True + + _, dim_H, dim_W, mi = X.get_shape().as_list() + + mi = mi + dim_y + self.dim_y=dim_y + if not self.residual: + print('Convolutional Network architecture detected for discriminator '+ d_name) + + + with tf.variable_scope('discriminator_'+d_name) as scope: + #building discriminator convolutional layers + + self.d_conv_layers =[] + count=0 + for mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['conv_layers']: + + # make up a name - used for get_variable + name = "d_conv_layer_%s" % count + #print(name) + count += 1 + + layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + self.d_conv_layers.append(layer) + mi = mo + mi = mi + dim_y + + dim_H = int(np.ceil(float(dim_H) / stride)) + dim_W = int(np.ceil(float(dim_W) / stride)) + + mi = mi * dim_H * dim_W + + #building discriminator dense layers + mi = mi + dim_y + self.d_dense_layers = [] + self.mb_dense_layers = [] + for i, (mo, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(d_sizes['dense_layers'], 0): + + name = 'd_dense_layer_%s' %count + #print(name) + count +=1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + mi = mi + dim_y + self.d_dense_layers.append(layer) + + if i == len(d_sizes['dense_layers'])-1: + name = "mb_disc_layer" + self.mb_layer = DenseLayer(name, mi, self.num_kernels*self.kernel_dim, False, + keep_prob=1, act_f=lambda x:x, w_init=tf.truncated_normal_initializer(stddev=0.01)) + + #final logistic layer + + name = 'd_dense_layer_%s' %count + w_init_last = d_sizes['readout_layer_w_init'] + #mi + self.num_kernels + self.d_final_layer = DenseLayer(name, mi + self.num_kernels, 1, + False, keep_prob=1, + act_f=lambda x: x, w_init=w_init_last) + + + self.d_name=d_name + self.dim_y= dim_y + else: + + print('Residual Convolutional Network architecture detected for discriminator'+ d_name) + + with tf.variable_scope('discriminator_'+d_name) as scope: + #building discriminator convolutional layers + + self.d_blocks = [] + #count conv blocks + d_steps = 0 + for key in d_sizes: + if 'conv' in key: + if not 'shortcut' in key: + d_steps+=1 + + d_block_n=0 + d_layer_n=0 + + + for key in d_sizes: + + if 'block' and 'shortcut' in key: + + d_block = ConvBlock(d_block_n, + mi, d_sizes, + ) + self.d_blocks.append(d_block) + + mo, _, _, _, _, _, _, _, = d_sizes['convblock_layer_'+str(d_block_n)][-1] + mi = mo + dim_H = d_block.output_dim(dim_H) + dim_W = d_block.output_dim(dim_W) + d_block_n+=1 + + + if 'conv_layer' in key: + + name = 'd_conv_layer_{0}'.format(d_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = d_sizes[key][0] + + + d_conv_layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.d_blocks.append(d_conv_layer) + + mi = mo + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + d_layer_n+=1 + + assert d_block_n+d_layer_n==d_steps, '\nCheck keys in d_sizes, \n total convolution steps do not mach sum between convolutional blocks and convolutional layers' + + count=d_steps + + mi = mi * dim_H * dim_W + + #build dense layers + + self.d_dense_layers = [] + for mo, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['dense_layers']: + + name = 'd_dense_layer_%s' %count + count +=1 + + layer = DenseLayer(name,mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.d_dense_layers.append(layer) + + #final logistic layer + name = 'd_dense_layer_%s' %count + w_init_last = d_sizes['readout_layer_w_init'] + #print(name) + self.d_final_layer = DenseLayer(name, mi, 1, + False, keep_prob=1, + act_f=lambda x: x, w_init=w_init_last) + + + self.d_steps=d_steps + self.d_name = d_name + + def d_forward(self, X, y, reuse = None, is_training=True): + + if not self.residual: + print('Discriminator_'+self.d_name) + print('Convolution') + + output = X + output = conv_concat(output, y, self.dim_y) + #output = conv_cond_concat(output, yb) + print('Input for convolution shape ', output.get_shape()) + i=0 + for layer in self.d_conv_layers: + i+=1 + #print('Convolution_layer_%i' %i) + #print('Input shape', output.get_shape()) + output = layer.forward(output, + reuse, + is_training) + output = conv_concat(output, y, self.dim_y) + if i==np.ceil(len(self.d_conv_layers)/2): + feature_output=output + #print('After convolution shape', output.get_shape()) + + output = tf.contrib.layers.flatten(output) + output = lin_concat(output, y, self.dim_y) + + #print('After flatten shape', output.get_shape()) + i=0 + for layer in self.d_dense_layers: + output = layer.forward(output, + reuse, + is_training) + output = lin_concat(output, y, self.dim_y) + + if i==len(self.d_dense_layers)-1: + + output_mb=self.mb_layer.forward(output, + reuse, + is_training) + + activation = tf.reshape(output_mb, (-1, self.num_kernels, self.kernel_dim)) + diffs = tf.expand_dims(activation, 3) - tf.expand_dims( + tf.transpose(activation, [1, 2, 0]), 0) + + #eps = tf.expand_dims(tf.eye(tf.shape(X)[0], dtype=np.float32), 1) + abs_diffs = tf.reduce_sum(tf.abs(diffs), 2) #+ eps + minibatch_features = tf.reduce_sum(tf.exp(-abs_diffs), 2) + print('minibatch features shape', minibatch_features.get_shape()) + output=tf.concat([output, minibatch_features], 1) + + i+=1 + + #print('After dense layer_%i' %i) + #print('Shape', output.get_shape()) + + logits = self.d_final_layer.forward(output, + reuse, + is_training) + print('Logits shape', logits.get_shape()) + return logits, feature_output + else: + print('Redisual discriminator_'+self.d_name) + print('Convolution') + + output = X + + i=0 + print('Input for convolution shape ', X.get_shape()) + for block in self.d_blocks: + i+=1 + #print('Convolution_block_%i' %i) + #print('Input shape', output.get_shape()) + output = block.forward(output, + reuse, + is_training) + #print('After block shape', output.get_shape()) + + + output = tf.contrib.layers.flatten(output) + #print('After flatten shape', output.get_shape()) + + i=0 + for layer in self.d_dense_layers: + #print('Dense weights %i' %i) + #print(layer.W.get_shape()) + output = layer.forward(output, + reuse, + is_training) + i+=1 + #print('After dense layer_%i' %i) + #print('Shape', output.get_shape()) + + logits = self.d_final_layer.forward(output, + reuse, + is_training) + print('Logits shape', logits.get_shape()) + return logits + +#(residual) deconvolution of (?, latent_dims,) to (?,n_H, n_W,n_C) image shape +class Generator(object): + + def __init__(self, Z, dim_H, dim_W, g_sizes, g_name): + + self.residual=False + for key in g_sizes: + if not 'block' in key: + self.residual=False + else : + self.residual=True + + #dimensions of input + latent_dims = g_sizes['z'] + + #dimensions of output generated images + dims_H =[dim_H] + dims_W =[dim_W] + mi = latent_dims + + if not self.residual: + + print('Convolutional architecture detected for generator ' + g_name) + + with tf.variable_scope('generator_'+g_name) as scope: + + #building generator dense layers + self.g_dense_layers = [] + count = 0 + + for mo, apply_batch_norm, keep_prob, act_f, w_init in g_sizes['dense_layers']: + name = 'g_dense_layer_%s' %count + #print(name) + count += 1 + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f=act_f , w_init=w_init + ) + + self.g_dense_layers.append(layer) + mi = mo + + #deconvolutional layers + #calculating the last dense layer mo + + for _, _, stride, _, _, _, _, in reversed(g_sizes['conv_layers']): + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + + dims_H.append(dim_H) + dims_W.append(dim_W) + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + self.g_dims_H = dims_H + self.g_dims_W = dims_W + + #last dense layer: projection + projection, bn_after_project, keep_prob, act_f, w_init = g_sizes['projection'][0] + + mo = projection*dims_H[0]*dims_W[0] + name = 'g_dense_layer_%s' %count + count+=1 + #print(name) + self.g_final_layer = DenseLayer(name, mi, mo, not bn_after_project, keep_prob, act_f, w_init) + # self.g_dense_layers.append(layer) + + mi = projection + self.g_conv_layers=[] + + for i, (mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(g_sizes['conv_layers'] , 1): + name = 'g_conv_layer_%s' %count + count +=1 + + layer = DeconvLayer( + name, mi, mo, [dims_H[i], dims_W[i]], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.g_conv_layers.append(layer) + mi = mo + + if self.residual: + + print('Residual convolutional architecture detected for generator ' + g_name) + with tf.variable_scope('generator_'+g_name) as scope: + + #dense layers + self.g_dense_layers = [] + count = 0 + + mi = latent_dims + + for mo, apply_batch_norm, keep_prob, act_f, w_init in g_sizes['dense_layers']: + name = 'g_dense_layer_%s' %count + count += 1 + + layer = DenseLayer( + name, mi, mo, + apply_batch_norm, keep_prob, + f=act_f, w_init=w_init + ) + self.g_dense_layers.append(layer) + mi = mo + + #checking generator architecture + + g_steps = 0 + for key in g_sizes: + if 'deconv' in key: + if not 'shortcut' in key: + g_steps+=1 + + g_block_n=0 + g_layer_n=0 + + for key in g_sizes: + if 'block' and 'shortcut' in key: + g_block_n+=1 + if 'deconv_layer' in key: + g_layer_n +=1 + + assert g_block_n+g_layer_n==g_steps, '\nCheck keys in g_sizes, \n sum of generator steps do not coincide with sum of convolutional layers and convolutional blocks' + + layers_output_sizes={} + blocks_output_sizes={} + + #calculating the output size for each transposed convolutional step + for key, item in reversed(list(g_sizes.items())): + + if 'deconv_layer' in key: + + _, _, stride, _, _, _, _, = g_sizes[key][0] + layers_output_sizes[g_layer_n-1]= [dim_H, dim_W] + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + g_layer_n -= 1 + + + if 'deconvblock_layer' in key: + + for _ ,_ , stride, _, _, _, _, in g_sizes[key]: + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + blocks_output_sizes[g_block_n-1] = [[dims_H[j],dims_W[j]] for j in range(1, len(g_sizes[key])+1)] + g_block_n -=1 + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + + #saving for later + self.g_dims_H = dims_H + self.g_dims_W = dims_W + + #final dense layer + projection, bn_after_project, keep_prob, act_f, w_init = g_sizes['projection'][0] + + mo = projection*dims_H[0]*dims_W[0] + name = 'g_dense_layer_%s' %count + layer = DenseLayer(name, mi, mo, not bn_after_project, keep_prob, act_f, w_init) + self.g_dense_layers.append(layer) + + #deconvolution input channel number + mi = projection + self.g_blocks=[] + + block_n=0 #keep count of the block number + layer_n=0 #keep count of conv layer number + i=0 + for key in g_sizes: + + if 'block' and 'shortcut' in key: + + g_block = DeconvBlock(block_n, + mi, blocks_output_sizes, g_sizes, + ) + self.g_blocks.append(g_block) + + mo, _, _, _, _, _, _, = g_sizes['deconvblock_layer_'+str(block_n)][-1] + mi = mo + block_n+=1 + count+=1 + i+=1 + + if 'deconv_layer' in key: + + name = 'g_conv_layer_{0}'.format(layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = g_sizes[key][0] + + g_conv_layer = DeconvLayer( + name, mi, mo, layers_output_sizes[layer_n], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + self.g_blocks.append(g_conv_layer) + + mi=mo + layer_n+=1 + count+=1 + i+=1 + + assert i==g_steps, 'Check convolutional layer and block building, steps in building do not coincide with g_steps' + assert g_steps==block_n+layer_n, 'Check keys in g_sizes' + + self.g_sizes=g_sizes + self.g_name = g_name + self.projection = projection + self.bn_after_project = bn_after_project + + def g_forward(self, Z, reuse=None, is_training=True): + + if not self.residual: + + print('Generator_'+self.g_name) + print('Deconvolution') + #dense layers + + output = Z + print('Input for deconvolution shape', Z.get_shape()) + i=0 + for layer in self.g_dense_layers: + output = layer.forward(output, reuse, is_training) + #print('After dense layer %i' %i) + #print('shape: ', output.get_shape()) + i+=1 + + output = self.g_final_layer.forward(output, reuse, is_training) + + output = tf.reshape( + output, + + [-1, self.g_dims_H[0], self.g_dims_W[0], self.projection] + + ) + + # print('Reshaped output after projection', output.get_shape()) + + if self.bn_after_project: + output = tf.contrib.layers.batch_norm( + output, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training=is_training, + reuse=reuse, + scope='bn_after_project' + ) + # passing to deconv blocks + + i=0 + for layer in self.g_conv_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + #print('After deconvolutional layer %i' %i) + #print('shape: ', output.get_shape()) + + + print('Deconvoluted output shape', output.get_shape()) + return output + else: + + print('Generator_'+self.g_name) + print('Deconvolution') + #dense layers + + output = Z + print('Input for deconvolution shape', Z.get_shape()) + i=0 + for layer in self.g_dense_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + #print('After dense layer %i' %i) + #print('shape: ', output.get_shape()) + + + output = tf.reshape( + output, + + [-1, self.g_dims_H[0], self.g_dims_W[0], self.projection] + + ) + + #print('Reshaped output after projection', output.get_shape()) + + if self.bn_after_project: + output = tf.contrib.layers.batch_norm( + output, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training=is_training, + reuse=reuse, + scope='bn_after_project' + ) + # passing to deconv blocks + + + i=0 + for block in self.g_blocks: + i+=1 + output = block.forward(output, + reuse, + is_training) + #print('After deconvolutional block %i' %i) + #print('shape: ', output.get_shape()) + + + print('Deconvoluted output shape', output.get_shape()) + return output + +#conv / residual conv / deconv +class cycleGenerator(object): + + def __init__(self, X, dim_H, dim_W, g_sizes, g_name): + + #input shape + _, input_dim_H, input_dim_W, input_n_C = X.get_shape().as_list() + #output shape + + dims_H =[dim_H] + dims_W =[dim_W] + + self.residual=False + for key in g_sizes: + if not 'block' in key: + self.residual=False + else : + self.residual=True + + if not self.residual: + + print('Convolutional Network architecture detected for generator '+ g_name) + + with tf.variable_scope('generator_'+g_name) as scope: + + count = 0 + #checking generator architecture + g_steps=0 + for key in g_sizes: + g_steps+=1 + + g_convs = 0 + g_deconvs = 0 + for key in g_sizes: + if 'conv' in key: + if not 'deconv' in key: + g_convs+=1 + if 'deconv' in key: + g_deconvs+=1 + + assert g_steps == g_convs + g_deconvs, '\nCheck keys in g_sizes, \n sum of generator steps do not coincide with sum of convolutional layers, convolutional blocks and deconv layers' + + #dimensions of output generated image + deconv_layers_output_sizes={} + + for key, item in reversed(list(g_sizes.items())): + + if 'deconv_layer' in key: + + _, _, stride, _, _, _, _, = g_sizes[key][0] + deconv_layers_output_sizes[g_deconvs-1]= [dim_H, dim_W] + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + g_deconvs -= 1 + + assert g_deconvs == 0 + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + + #saving for later + self.g_dims_H = dims_H + self.g_dims_W = dims_W + + + #convolution input channel number + mi = input_n_C + + self.g_conv_layers=[] + self.g_deconv_layers=[] + + conv_layer_n=0 #keep count of conv layer number + deconv_layer_n=0 #keep count of deconv layer number + i=0 # keep count of the built blocks + + for key in g_sizes: + + if 'conv_layer' in key: + if not 'deconv' in key: + + name = 'g_conv_layer_{0}'.format(conv_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = g_sizes[key][0] + + g_conv_layer = ConvLayer( + name, mi, mo, + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + self.g_conv_layers.append(g_conv_layer) + mi = mo + conv_layer_n +=1 + count +=1 + i+=1 + + if 'deconv_layer' in key: + + name = 'g_deconv_layer_{0}'.format(deconv_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = g_sizes[key][0] + g_deconv_layer = DeconvLayer( + name, mi, mo, deconv_layers_output_sizes[deconv_layer_n], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + self.g_deconv_layers.append(g_deconv_layer) + + mi=mo + deconv_layer_n+=1 + count+=1 + i+=1 + + assert i==conv_layer_n+deconv_layer_n, 'Check convolutional layer and block building, steps in building do not coincide with g_steps' + + #saving for later + self.g_sizes=g_sizes + self.g_name = g_name + + if self.residual: + + print('Residual Convolutional Network architecture detected for generator '+ g_name) + + with tf.variable_scope('generator_'+g_name) as scope: + + count = 0 + + #checking generator architecture + g_steps=0 + for key in g_sizes: + g_steps+=1 + + g_convs = 0 + g_deconvs = 0 + g_conv_blocks = 0 + #g_deconv_blocks = 0 + + for key in g_sizes: + + if 'conv' in key: + if not 'deconv' in key: + if not 'block' in key: + g_convs+=1 + if 'convblock' and 'shortcut' in key: + g_conv_blocks+=1 + + if 'deconv' in key: + if not 'shortcut' in key: + g_deconvs+=1 + + assert g_steps == g_convs +2*(g_conv_blocks)+ g_deconvs, '\nCheck keys in g_sizes, \n sum of generator steps do not coincide with sum of convolutional layers, convolutional blocks and deconv layers' + + #dimensions of output generated image + + deconv_layers_output_sizes={} + + + for key, item in reversed(list(g_sizes.items())): + + if 'deconv_layer' in key: + + _, _, stride, _, _, _, _, = g_sizes[key][0] + deconv_layers_output_sizes[g_deconvs-1]= [dim_H, dim_W] + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + g_deconvs -= 1 + + assert g_deconvs == 0 + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + + + #saving for later + self.g_dims_H = dims_H + self.g_dims_W = dims_W + + + #convolution input channel number + mi = input_n_C + + self.g_blocks=[] + + block_n=0 #keep count of the block number + conv_layer_n=0 #keep count of conv layer number + deconv_layer_n=0 #keep count of deconv layer number + i=0 # keep count of the built blocks + + for key in g_sizes: + + if 'conv_layer' in key: + if not 'deconv' in key: + + name = 'g_conv_layer_{0}'.format(conv_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = g_sizes[key][0] + + g_conv_layer = ConvLayer( + name, mi, mo, + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + self.g_blocks.append(g_conv_layer) + mi = mo + conv_layer_n +=1 + count +=1 + i+=1 + + + if 'block' and 'shortcut' in key: + + g_block = ConvBlock(block_n, + mi, g_sizes, + ) + self.g_blocks.append(g_block) + + mo, _, _, _, _, _, _, = g_sizes['convblock_layer_'+str(block_n)][-1] + mi = mo + block_n+=1 + count+=1 + i+=1 + + if 'deconv_layer' in key: + + name = 'g_deconv_layer_{0}'.format(deconv_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = g_sizes[key][0] + g_deconv_layer = DeconvLayer( + name, mi, mo, deconv_layers_output_sizes[deconv_layer_n], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + self.g_blocks.append(g_deconv_layer) + + mi=mo + deconv_layer_n+=1 + count+=1 + i+=1 + + assert i==block_n+conv_layer_n+deconv_layer_n, 'Check convolutional layer and block building, steps in building do not coincide with g_steps' + + #saving for later + self.g_sizes=g_sizes + self.g_name = g_name + # return self.g_forward(Z) + + def g_forward(self, X, reuse=None, is_training=True): + if not self.residual: + print('Generator_'+self.g_name) + #dense layers + + output = X + print('Input for generator shape', X.get_shape()) + + i=0 + + for conv_layer in self.g_conv_layers: + i+=1 + output = conv_layer.forward(output, + reuse, + is_training) + #print('After block step%i' %i) + #print('shape: ', output.get_shape()) + for deconv_layer in self.g_deconv_layers: + + i+=1 + output = deconv_layer.forward(output, + reuse, + is_training) + + print('Generator output shape', output.get_shape()) + return output + + if self.residual: + print('Generator_'+self.g_name) + #dense layers + + output = X + print('Input for generator shape', X.get_shape()) + + i=0 + + + for block in self.g_blocks: + i+=1 + output = block.forward(output, + reuse, + is_training) + #print('After block step%i' %i) + #print('shape: ', output.get_shape()) + + + print('Generator output shape', output.get_shape()) + return output + +#residual conv / residual deconv +class cycleGenerator_fullresidual(object): + + def __init__(self, X, dim_H, dim_W, g_sizes, g_name): + + _, input_dim_H, input_dim_W, input_n_C = X.get_shape().as_list() + + dims_H =[dim_H] + dims_W =[dim_W] + + print('Residual Convolutional Network architecture (v2) detected for generator '+ g_name) + + with tf.variable_scope('generator_'+g_name) as scope: + + count = 0 + + #checking generator architecture + g_steps=0 + for key in g_sizes: + g_steps+=1 + + g_conv_blocks = 0 + g_deconv_blocks = 0 + + for key in g_sizes: + + if 'conv' in key: + if not 'deconv' in key: + if 'block' in key: + g_conv_blocks+=1 + if 'deconv' in key: + if 'block' in key: + g_deconv_blocks+=1 + + assert g_steps == g_conv_blocks+ g_deconv_blocks, '\nCheck keys in g_sizes, \n sum of generator steps do not coincide with sum of convolutional blocks and deconvolutional blocks' + + #dimensions of output generated image + g_deconv_blocks=g_deconv_blocks//2 + deconv_blocks_output_sizes={} + for key, item in reversed(list(g_sizes.items())): + + if 'deconvblock_layer' in key: + + for _ ,_ , stride, _, _, _, _, in g_sizes[key]: + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + deconv_blocks_output_sizes[g_deconv_blocks-1] = [[dims_H[j],dims_W[j]] for j in range(1, len(g_sizes[key])+1)] + g_deconv_blocks -=1 + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + + assert g_deconv_blocks==0 + + #convolution input channel number + mi = input_n_C + + self.g_blocks=[] + + convblock_n=0 #keep count of the conv block number + deconvblock_n=0 #keep count of deconv block number + + i=0 # keep count of the built blocks + + for key in g_sizes: + + if 'convblock_layer' in key: + if not 'deconv' in key: + + g_block = ConvBlock(convblock_n, + mi, g_sizes, + ) + self.g_blocks.append(g_block) + + mo, _, _, _, _, _, _, = g_sizes['convblock_layer_'+str(convblock_n)][-1] + mi = mo + convblock_n+=1 + i+=1 + + + if 'deconvblock_layer' in key: + + g_block = DeconvBlock(deconvblock_n, + mi, deconv_blocks_output_sizes, g_sizes, + ) + self.g_blocks.append(g_block) + + mo, _, _, _, _, _, _, = g_sizes['deconvblock_layer_'+str(deconvblock_n)][-1] + mi = mo + deconvblock_n+=1 + i+=1 + + assert i==convblock_n+deconvblock_n, 'Check convolutional layer and block building, steps in building do not coincide with g_steps' + + #saving for later + self.g_dims_H = dims_H + self.g_dims_W = dims_W + self.g_sizes=g_sizes + self.g_name = g_name + + def g_forward(self, X, reuse=None, is_training=True): + + print('Generator_'+self.g_name) + #dense layers + + output = X + print('Input for generator shape', X.get_shape()) + + i=0 + + for block in self.g_blocks: + i+=1 + output = block.forward(output, + reuse, + is_training) + #print('After block step%i' %i) + #print('shape: ', output.get_shape()) + + print('Generator output shape', output.get_shape()) + return output + +#pix2pix architecture, u_net +#works with same dim of input and output +class pix2pixGenerator(object): + + def __init__(self, X, output_dim_H, output_dim_W, g_enc_sizes, g_dec_sizes, g_name): + + _, input_dim_H, input_dim_W, input_n_C = X.get_shape().as_list() + + enc_dims_H=[input_dim_H] + enc_dims_W=[input_dim_W] + enc_dims_nC=[input_n_C] + + output_n_C=input_n_C + mi = input_n_C + + with tf.variable_scope('generator_'+g_name) as scope: + + #building generator encoder convolutional layers + self.g_enc_conv_layers =[] + enc_dims=[] + for conv_count, (mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(g_enc_sizes['conv_layers'], 1): + + name = "g_conv_layer_%s" % conv_count + layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + input_dim_H = int(np.ceil(float(input_dim_H)/stride)) + input_dim_W = int(np.ceil(float(input_dim_W)/stride)) + + enc_dims_H.append(input_dim_H) + enc_dims_W.append(input_dim_W) + enc_dims_nC.append(mo) + self.g_enc_conv_layers.append(layer) + + mi = mo + + dec_dims_H = [output_dim_H] + dec_dims_W = [output_dim_W] + + #building generator decoder deconvolutional layers + #calculate outputsize for each deconvolution step + for _, _, stride, _, _, _, _ in reversed(g_dec_sizes['deconv_layers']): + + output_dim_H = int(np.ceil(float(output_dim_H)/stride)) + output_dim_W = int(np.ceil(float(output_dim_W)/stride)) + + dec_dims_H.append(output_dim_H) + dec_dims_W.append(output_dim_W) + + dec_dims_H = list(reversed(dec_dims_H)) + dec_dims_W = list(reversed(dec_dims_W)) + + self.g_dec_conv_layers=[] + + #number of channels of last convolution and of first transposed convolution + # the layer will be reshaped to have dimensions [?, 1, 1, mi*enc_dims_W[-1]*enc_dims_H[-1]] + mi=mi*enc_dims_W[-1]*enc_dims_H[-1] + self.n_C_last=mi + + for deconv_count, (mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(g_dec_sizes['deconv_layers'], 1): + + if deconv_count == 1: + name = 'g_deconv_layer_%s' %deconv_count + #print(name) + + layer = DeconvLayer( + name, mi, mo, [dec_dims_H[deconv_count], dec_dims_W[deconv_count]], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.g_dec_conv_layers.append(layer) + mi = mo + + if deconv_count > 1: + name = 'g_deconv_layer_%s' %deconv_count + #print(name) + + layer = DeconvLayer( + name, 2*mi, mo, [dec_dims_H[deconv_count], dec_dims_W[deconv_count]], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.g_dec_conv_layers.append(layer) + mi = mo + + + assert conv_count==deconv_count, '\n Number of convolutional and deconvolutional layers do not coincide in \n encoder and decoder part of generator '+g_name + + # self.g_dims_H = dec_dims_H + # self.g_dims_W = dims_W + self.conv_count=conv_count + self.deconv_count=deconv_count + self.g_name=g_name + + def g_forward(self, X, reuse=None, is_training=True): + + print('Generator_'+self.g_name) + + output = X + print('Input for generator encoder shape', X.get_shape()) + + skip_conv_outputs=[] + #convolutional encoder layers + + for i, layer in enumerate(self.g_enc_conv_layers, 1): + output = layer.forward(output, + reuse, + is_training) + + skip_conv_outputs.append(output) + # print('After conv layer%i' %i) + # print('shape: ', output.get_shape()) + + assert i == self.conv_count + + if (output.get_shape().as_list()[1], output.get_shape().as_list()[2]) != (1, 1): + output = tf.reshape( + output, + [-1, 1, 1, self.n_C_last] + ) + + print('Output of generator encoder, \n and input for generator decoder shape', output.get_shape()) + + for i, layer in enumerate(self.g_dec_conv_layers, 1): + + skip_layer=self.conv_count - i + if i > 1: + #print('After deconv layer %i' %i) + #print('main path', output.get_shape()) + #print('secondary path', skip_conv_outputs[skip_layer].get_shape()) + output = tf.concat([output, skip_conv_outputs[skip_layer]], axis =3) + #print('After concat shape', output.get_shape()) + output = layer.forward(output, + reuse, + is_training) + + # print('After deconv layer %i' %i) + # print('Shape', output.get_shape()) + + assert i == self.deconv_count + + print('Generator output shape', output.get_shape()) + return output + +#same as pix2pixGenerator with input noise +class bicycleGenerator(object): + + def __init__(self, X, output_dim_H, output_dim_W, g_sizes_enc, g_sizes_dec, g_name): + + _, input_dim_H, input_dim_W, input_n_C = X.get_shape().as_list() + + enc_dims_H=[input_dim_H] + enc_dims_W=[input_dim_W] + enc_dims_nC=[input_n_C] + + output_n_C=input_n_C + self.latent_dims = g_sizes_enc['latent_dims'] + mi = input_n_C + self.latent_dims + + with tf.variable_scope('generator_'+g_name) as scope: + + #building generator encoder convolutional layers + self.g_enc_conv_layers =[] + enc_dims=[] + for conv_count, (mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(g_sizes_enc['conv_layers'], 1): + + name = "g_conv_layer_%s" % conv_count + layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + input_dim_H = int(np.ceil(float(input_dim_H)/stride)) + input_dim_W = int(np.ceil(float(input_dim_W)/stride)) + + enc_dims_H.append(input_dim_H) + enc_dims_W.append(input_dim_W) + enc_dims_nC.append(mo) + self.g_enc_conv_layers.append(layer) + + mi = mo + + dec_dims_H = [output_dim_H] + dec_dims_W = [output_dim_W] + + #building generator decoder deconvolutional layers + #calculate outputsize for each deconvolution step + for _, _, stride, _, _, _, _ in reversed(g_sizes_dec['deconv_layers']): + + output_dim_H = int(np.ceil(float(output_dim_H)/stride)) + output_dim_W = int(np.ceil(float(output_dim_W)/stride)) + + dec_dims_H.append(output_dim_H) + dec_dims_W.append(output_dim_W) + + dec_dims_H = list(reversed(dec_dims_H)) + dec_dims_W = list(reversed(dec_dims_W)) + + self.g_dec_conv_layers=[] + + #number of channels of last convolution and of first transposed convolution + # the layer will be reshaped to have dimensions [?, 1, 1, mi*enc_dims_W[-1]*enc_dims_H[-1]] + mi=mi*enc_dims_W[-1]*enc_dims_H[-1] + self.n_C_last=mi + + for deconv_count, (mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(g_sizes_dec['deconv_layers'], 1): + + if deconv_count == 1: + name = 'g_deconv_layer_%s' %deconv_count + #print(name) + + layer = DeconvLayer( + name, mi, mo, [dec_dims_H[deconv_count], dec_dims_W[deconv_count]], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.g_dec_conv_layers.append(layer) + mi = mo + + if deconv_count > 1: + name = 'g_deconv_layer_%s' %deconv_count + #print(name) + + layer = DeconvLayer( + name, 2*mi, mo, [dec_dims_H[deconv_count], dec_dims_W[deconv_count]], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.g_dec_conv_layers.append(layer) + mi = mo + + + assert conv_count==deconv_count, '\n Number of convolutional and deconvolutional layers do not coincide in \n encoder and decoder part of generator '+g_name + + # self.g_dims_H = dec_dims_H + # self.g_dims_W = dims_W + self.conv_count=conv_count + self.deconv_count=deconv_count + self.g_name=g_name + + def g_forward(self, X, z, reuse=None, is_pretraining=None, is_training=True): + + if is_pretraining: + z=X + elif not is_pretraining: + z = tf.reshape(z, [tf.shape(X)[0], 1, 1, self.latent_dims]) + z = tf.tile(z, [1, tf.shape(X)[1], tf.shape(X)[2], 1]) + + print('Generator_'+self.g_name) + + output = X + output=tf.concat([X,z], axis=3) + + print('Input for generator encoded shape', X.get_shape()) + + skip_conv_outputs=[] + #convolutional encoder layers + + for i, layer in enumerate(self.g_enc_conv_layers, 1): + output = layer.forward(output, + reuse, + is_training) + + skip_conv_outputs.append(output) + #print('After conv layer%i' %i) + #print('shape: ', output.get_shape()) + + assert i == self.conv_count + + if (output.get_shape().as_list()[1], output.get_shape().as_list()[2]) != (1, 1): + output = tf.reshape( + output, + [-1, 1, 1, self.n_C_last] + ) + + print('Output of generator encoder, \n and input for generator decoder shape', output.get_shape()) + + for i, layer in enumerate(self.g_dec_conv_layers, 1): + + skip_layer=self.conv_count - i + if i > 1: + #print('After deconv layer %i' %i) + #print('main path', output.get_shape()) + #print('secondary path', skip_conv_outputs[skip_layer].get_shape()) + output = tf.concat([output, skip_conv_outputs[skip_layer]], axis =3) + #print('After concat shape', output.get_shape()) + output = layer.forward(output, + reuse, + is_training) + + # print('After deconv layer %i' %i) + # print('Shape', output.get_shape()) + + assert i == self.deconv_count + + print('Generator output shape', output.get_shape()) + return output + +#(residual) conditional generator, label injected at every step +class condGenerator(object): + def __init__(self, dim_y, dim_H, dim_W, g_sizes, g_name): + + self.residual=False + for key in g_sizes: + if not 'block' in key: + self.residual=False + else : + self.residual=True + + #dimensions of input + latent_dims = g_sizes['z'] + + #dimensions of output generated images + dims_H =[dim_H] + dims_W =[dim_W] + mi = latent_dims + dim_y + + if not self.residual: + + print('Convolutional architecture detected for generator ' + g_name) + + with tf.variable_scope('generator_'+g_name) as scope: + + #building generator dense layers + self.g_dense_layers = [] + count = 0 + + for mo, apply_batch_norm, keep_prob, act_f, w_init in g_sizes['dense_layers']: + name = 'g_dense_layer_%s' %count + #print(name) + count += 1 + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f=act_f , w_init=w_init + ) + + self.g_dense_layers.append(layer) + mi = mo + mi = mi + dim_y + + #deconvolutional layers + #calculating the last dense layer mo + + for _, _, stride, _, _, _, _, in reversed(g_sizes['conv_layers']): + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + + dims_H.append(dim_H) + dims_W.append(dim_W) + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + self.g_dims_H = dims_H + self.g_dims_W = dims_W + + #last dense layer: projection + projection, bn_after_project, keep_prob, act_f, w_init = g_sizes['projection'][0] + + mo = (projection)*dims_H[0]*dims_W[0] + name = 'g_dense_layer_%s' %count + count+=1 + #print(name) + self.g_final_layer = DenseLayer(name, mi, mo, not bn_after_project, keep_prob, act_f, w_init) + # self.g_dense_layers.append(layer) + + mi = projection+dim_y + self.g_conv_layers=[] + + for i, (mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(g_sizes['conv_layers'] , 1): + name = 'g_conv_layer_%s' %count + count +=1 + + layer = DeconvLayer( + name, mi, mo, [dims_H[i], dims_W[i]], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.g_conv_layers.append(layer) + mi = mo + mi = mi + dim_y + + if self.residual: + + print('Residual convolutional architecture detected for generator ' + g_name) + with tf.variable_scope('generator_'+g_name) as scope: + + #dense layers + self.g_dense_layers = [] + count = 0 + + mi = latent_dims + + for mo, apply_batch_norm, keep_prob, act_f, w_init in g_sizes['dense_layers']: + name = 'g_dense_layer_%s' %count + count += 1 + + layer = DenseLayer( + name, mi, mo, + apply_batch_norm, keep_prob, + f=act_f, w_init=w_init + ) + self.g_dense_layers.append(layer) + mi = mo + + #checking generator architecture + + g_steps = 0 + for key in g_sizes: + if 'deconv' in key: + if not 'shortcut' in key: + g_steps+=1 + + g_block_n=0 + g_layer_n=0 + + for key in g_sizes: + if 'block' and 'shortcut' in key: + g_block_n+=1 + if 'deconv_layer' in key: + g_layer_n +=1 + + assert g_block_n+g_layer_n==g_steps, '\nCheck keys in g_sizes, \n sum of generator steps do not coincide with sum of convolutional layers and convolutional blocks' + + layers_output_sizes={} + blocks_output_sizes={} + + #calculating the output size for each transposed convolutional step + for key, item in reversed(list(g_sizes.items())): + + if 'deconv_layer' in key: + + _, _, stride, _, _, _, _, = g_sizes[key][0] + layers_output_sizes[g_layer_n-1]= [dim_H, dim_W] + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + g_layer_n -= 1 + + + if 'deconvblock_layer' in key: + + for _ ,_ , stride, _, _, _, _, in g_sizes[key]: + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + blocks_output_sizes[g_block_n-1] = [[dims_H[j],dims_W[j]] for j in range(1, len(g_sizes[key])+1)] + g_block_n -=1 + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + + #saving for later + self.g_dims_H = dims_H + self.g_dims_W = dims_W + + #final dense layer + projection, bn_after_project, keep_prob, act_f, w_init = g_sizes['projection'][0] + + mo = projection*dims_H[0]*dims_W[0] + name = 'g_dense_layer_%s' %count + layer = DenseLayer(name, mi, mo, not bn_after_project, keep_prob, act_f, w_init) + self.g_dense_layers.append(layer) + + #deconvolution input channel number + mi = projection + self.g_blocks=[] + + block_n=0 #keep count of the block number + layer_n=0 #keep count of conv layer number + i=0 + for key in g_sizes: + + if 'block' and 'shortcut' in key: + + g_block = DeconvBlock(block_n, + mi, blocks_output_sizes, g_sizes, + ) + self.g_blocks.append(g_block) + + mo, _, _, _, _, _, _, = g_sizes['deconvblock_layer_'+str(block_n)][-1] + mi = mo + block_n+=1 + count+=1 + i+=1 + + if 'deconv_layer' in key: + + name = 'g_conv_layer_{0}'.format(layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = g_sizes[key][0] + + g_conv_layer = DeconvLayer( + name, mi, mo, layers_output_sizes[layer_n], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + self.g_blocks.append(g_conv_layer) + + mi=mo + layer_n+=1 + count+=1 + i+=1 + + assert i==g_steps, 'Check convolutional layer and block building, steps in building do not coincide with g_steps' + assert g_steps==block_n+layer_n, 'Check keys in g_sizes' + + self.g_sizes=g_sizes + self.g_name = g_name + self.projection = projection + self.bn_after_project = bn_after_project + self.dim_y = dim_y + + def g_forward(self, Z, y, reuse=None, is_training=True): + + if not self.residual: + + print('Generator_'+self.g_name) + print('Deconvolution') + #dense layers + + output = Z + output = lin_concat(output, y, self.dim_y) + print('Input for deconvolution shape', output.get_shape()) + i=0 + for layer in self.g_dense_layers: + + + output = layer.forward(output, reuse, is_training) + output= lin_concat(output, y, self.dim_y) + #print('After dense layer and concat %i' %i) + #print('shape: ', output.get_shape()) + i+=1 + + output = self.g_final_layer.forward(output, reuse, is_training) + + output = tf.reshape( + output, + + [-1, self.g_dims_H[0], self.g_dims_W[0], self.projection] + + ) + #print('Reshaped output after projection', output.get_shape()) + + if self.bn_after_project: + output = tf.contrib.layers.batch_norm( + output, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training=is_training, + reuse=reuse, + scope='bn_after_project' + ) + # passing to deconv blocks + output = conv_concat(output, y, self.dim_y) + #print('After reshape and concat', output.get_shape()) + i=0 + for layer in self.g_conv_layers[:-1]: + i+=1 + output = layer.forward(output, reuse, is_training) + output = conv_concat(output, y, self.dim_y) + #print('After deconvolutional layer and concat %i' %i) + #print('shape: ', output.get_shape()) + + output=self.g_conv_layers[-1].forward(output, reuse, is_training) + print('Deconvoluted output shape', output.get_shape()) + return output + else: + + print('Generator_'+self.g_name) + print('Deconvolution') + #dense layers + + output = Z + print('Input for deconvolution shape', Z.get_shape()) + i=0 + for layer in self.g_dense_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + #print('After dense layer %i' %i) + #print('shape: ', output.get_shape()) + + + output = tf.reshape( + output, + + [-1, self.g_dims_H[0], self.g_dims_W[0], self.projection] + + ) + + #print('Reshaped output after projection', output.get_shape()) + + if self.bn_after_project: + output = tf.contrib.layers.batch_norm( + output, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training=is_training, + reuse=reuse, + scope='bn_after_project' + ) + # passing to deconv blocks + + + i=0 + for block in self.g_blocks: + i+=1 + output = block.forward(output, + reuse, + is_training) + #print('After deconvolutional block %i' %i) + #print('shape: ', output.get_shape()) + + + print('Deconvoluted output shape', output.get_shape()) + return output + +#VARIATIONAL_AUTOENCODERS + +class denseEncoder: + + def __init__(self, X, e_sizes, e_name): + + latent_dims = e_sizes['z'] + + _, mi = X.get_shape().as_list() + + with tf.variable_scope('encoder'+e_name) as scope: + + self.e_layers=[] + + count=0 + for mo, apply_batch_norm, keep_prob, act_f, w_init in e_sizes['dense_layers']: + + name = 'layer_{0}'.format(count) + count +=1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.e_layers.append(layer) + mi = mo + + + name = 'e_last_dense_layer_mu' + w_init_last = e_sizes['readout_layer_w_init'] + #print(name) + self.e_final_layer_mu = DenseLayer(name, mi, latent_dims, + False, keep_prob=1, + act_f=lambda x: x, w_init=w_init_last) + name = 'e_last_dense_layer_sigma' + self.e_final_layer_sigma = DenseLayer(name, mi, latent_dims, + False, keep_prob=1, + act_f=tf.nn.softplus, w_init=w_init_last) + + self.e_name=e_name + self.latent_dims=latent_dims + + def e_forward(self, X, reuse = None, is_training=False): + + output=X + for layer in self.e_layers: + output = layer.forward(output, reuse, is_training) + + mu = self.e_final_layer_mu.forward(output, + reuse, + is_training) + + log_sigma = self.e_final_layer_sigma.forward(output, + reuse, + is_training) + + eps= tf.random_normal(shape=tf.shape(self.latent_dims), + mean=0, + stddev=1, + ) + + z = mu + tf.multiply(tf.exp(log_sigma),eps) + + print('Encoder output shape', z.get_shape()) + return z, mu, log_sigma + +class denseDecoder: + + def __init__(self, Z, output_dim, d_sizes, name): + + _, latent_dims=Z.get_shape().as_list() + mi = latent_dims + print(latent_dims) + with tf.variable_scope('decoder'+name) as scope: + + self.d_layers = [] + count = 0 + for mo, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['dense_layers']: + + name = 'layer_{0}'.format(count) + count += 1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.d_layers.append(layer) + mi = mo + + name = 'layer_{0}'.format(count) + + last_dec_layer = DenseLayer(name, mi, output_dim, False, 1, + act_f=lambda x:x, w_init=d_sizes['readout_layer_w_init'] + ) + + self.d_layers.append(last_dec_layer) + + def d_forward(self, Z, reuse=None, is_training=False): + + output=Z + + for layer in self.d_layers: + output = layer.forward(output, reuse, is_training) + + return output + +class convEncoder(object): + + def __init__(self, X, e_sizes, e_name): + + _, dim_H, dim_W, mi = X.get_shape().as_list() + latent_dims=e_sizes['latent_dims'] + + self.residual=False + for key in e_sizes: + if 'block' in key: + self.residual=True + + if not self.residual: + print('Convolutional Network architecture detected for encoder '+ e_name) + + + with tf.variable_scope('encoder_'+e_name) as scope: + #building discriminator convolutional layers + + self.e_conv_layers =[] + count=0 + for mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init in e_sizes['conv_layers']: + + # make up a name - used for get_variable + name = "e_conv_layer_%s" % count + #print(name) + count += 1 + + layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init) + + self.e_conv_layers.append(layer) + mi = mo + + dim_H = int(np.ceil(float(dim_H) / stride)) + dim_W = int(np.ceil(float(dim_W) / stride)) + + mi = mi * dim_H * dim_W + + #building encoder dense layers + + self.e_dense_layers = [] + for mo, apply_batch_norm, keep_prob, act_f, w_init in e_sizes['dense_layers']: + + name = 'e_dense_layer_%s' %count + #print(name) + count +=1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.e_dense_layers.append(layer) + + #final logistic layer + + name = 'e_last_dense_layer_mu' + w_init_last = e_sizes['readout_layer_w_init'] + #print(name) + self.e_final_layer_mu = DenseLayer(name, mi, latent_dims, + False, keep_prob=1, + act_f=lambda x: x, w_init=w_init_last) + name = 'e_last_dense_layer_sigma' + self.e_final_layer_sigma = DenseLayer(name, mi, latent_dims, + False, keep_prob=1, + act_f=tf.nn.relu, w_init=w_init_last) + + self.e_name=e_name + self.latent_dims=latent_dims + else: + print('Residual Convolutional Network architecture detected for Encoder'+ e_name) + + with tf.variable_scope('encoder_'+e_name) as scope: + #building discriminator convolutional layers + + self.e_blocks = [] + #count conv blocks + e_steps = 0 + for key in e_sizes: + if 'conv' in key: + if not 'shortcut' in key: + e_steps+=1 + + e_block_n=0 + e_layer_n=0 + + + for key in e_sizes: + + if 'block' and 'shortcut' in key: + + e_block = ConvBlock(e_block_n, + mi, e_sizes, + ) + self.e_blocks.append(e_block) + + mo, _, _, _, _, _, _, = e_sizes['convblock_layer_'+str(e_block_n)][-1] + mi = mo + dim_H = e_block.output_dim(dim_H) + dim_W = e_block.output_dim(dim_W) + e_block_n+=1 + + + if 'conv_layer' in key: + + name = 'e_conv_layer_{0}'.format(e_layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = e_sizes[key][0] + + + e_conv_layer = ConvLayer(name, mi, mo, + filter_sz, stride, + apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.e_blocks.append(e_conv_layer) + + mi = mo + dim_W = int(np.ceil(float(dim_W) / stride)) + dim_H = int(np.ceil(float(dim_H) / stride)) + e_layer_n+=1 + + assert e_block_n+e_layer_n==e_steps, '\nCheck keys in d_sizes, \n total convolution steps do not mach sum between convolutional blocks and convolutional layers' + + count=e_steps + + mi = mi * dim_H * dim_W + + #building encoder dense layers + + self.e_dense_layers = [] + for mo, apply_batch_norm, keep_prob, act_f, w_init in e_sizes['dense_layers']: + + name = 'e_dense_layer_%s' %count + #print(name) + count +=1 + + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f, w_init) + mi = mo + self.e_dense_layers.append(layer) + + #final logistic layer + + + w_init_last = e_sizes['readout_layer_w_init'] + #print(name) + name = 'e_last_dense_layer_mu' + self.e_final_layer_mu = DenseLayer(name, mi, latent_dims, + 'bn', keep_prob=1, + act_f=lambda x: x, w_init=w_init_last) + name = 'e_last_dense_layer_sigma' + self.e_final_layer_sigma = DenseLayer(name, mi, latent_dims, + 'bn', keep_prob=1, + act_f=tf.nn.relu, w_init=w_init_last) + + self.e_name=e_name + self.latent_dims=latent_dims + + def e_forward(self, X, reuse = None, is_training=True): + + if not self.residual: + print('Encoder_'+self.e_name) + print('Convolution') + + output = X + print('Input for convolution shape ', X.get_shape()) + i=0 + for layer in self.e_conv_layers: + i+=1 + # print('Convolution_layer_%i' %i) + # print('Input shape', output.get_shape()) + output = layer.forward(output, + reuse, + is_training) + #print('After convolution shape', output.get_shape()) + + output = tf.contrib.layers.flatten(output) + #print('After flatten shape', output.get_shape()) + i=0 + for layer in self.e_dense_layers: + #print('Dense weights %i' %i) + #print(layer.W.get_shape()) + output = layer.forward(output, + reuse, + is_training) + i+=1 + # print('After dense layer_%i' %i) + # print('Shape', output.get_shape()) + + mu = self.e_final_layer_mu.forward(output, + reuse, + is_training) + + sigma = self.e_final_layer_sigma.forward(output, + reuse, + is_training)+1e-6 + + eps= tf.random_normal(shape=tf.shape(self.latent_dims), + mean=0, + stddev=1, + ) + + z = mu + sigma*eps + + print('Encoder output shape', z.get_shape()) + return z, mu, sigma + else: + print('Residual encoder_'+self.e_name) + print('Convolution') + + output = X + + i=0 + print('Input for convolution shape ', X.get_shape()) + for block in self.e_blocks: + i+=1 + #print('Convolution_block_%i' %i) + #print('Input shape', output.get_shape()) + output = block.forward(output, + reuse, + is_training) + #print('After block shape', output.get_shape()) + + + output = tf.contrib.layers.flatten(output) + #print('After flatten shape', output.get_shape()) + + i=0 + for layer in self.e_dense_layers: + #print('Dense weights %i' %i) + #print(layer.W.get_shape()) + output = layer.forward(output, + reuse, + is_training) + i+=1 + # print('After dense layer_%i' %i) + # print('Shape', output.get_shape()) + + mu = self.e_final_layer_mu.forward(output, + reuse, + is_training) + + sigma = self.e_final_layer_sigma.forward(output, + reuse, + is_training)+1e-6 + + eps= tf.random_normal(shape=tf.shape(self.latent_dims), + mean=0, + stddev=1, + ) + + z = mu + sigma*eps + + print('Encoder output shape', z.get_shape()) + return z, mu, sigma + +class convDecoder(object): + + def __init__(self, Z, dim_H, dim_W, d_sizes, d_name): + + self.residual=False + for key in d_sizes: + if not 'block' in key: + self.residual=False + else : + self.residual=True + + #dimensions of input + _, latent_dims, = Z.get_shape().as_list() + + #dimensions of output generated images + dims_H =[dim_H] + dims_W =[dim_W] + mi = latent_dims + + if not self.residual: + + print('Convolutional architecture detected for decoder ' + d_name) + + with tf.variable_scope('decoder_'+d_name) as scope: + + #building generator dense layers + self.d_dense_layers = [] + count = 0 + + for mo, apply_batch_norm, keep_prob, act_f, w_init in d_sizes['dense_layers']: + name = 'd_dense_layer_%s' %count + #print(name) + count += 1 + layer = DenseLayer(name, mi, mo, + apply_batch_norm, keep_prob, + act_f=act_f , w_init=w_init + ) + + self.d_dense_layers.append(layer) + mi = mo + + #deconvolutional layers + #calculating the last dense layer mo + + for _, _, stride, _, _, _, _, in reversed(d_sizes['conv_layers']): + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + + dims_H.append(dim_H) + dims_W.append(dim_W) + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + self.d_dims_H = dims_H + self.d_dims_W = dims_W + #last dense layer: projection + projection, bn_after_project, keep_prob, act_f, w_init = d_sizes['projection'][0] + + mo = projection*dims_H[0]*dims_W[0] + name = 'd_dense_layer_%s' %count + count+=1 + self.d_final_layer = DenseLayer(name, mi, mo, not bn_after_project, keep_prob, act_f, w_init) + + mi = projection + self.d_deconv_layers=[] + + for i, (mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init) in enumerate(d_sizes['conv_layers'] , 1): + name = 'd_conv_layer_%s' %count + count +=1 + + layer = DeconvLayer( + name, mi, mo, [dims_H[i], dims_W[i]], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + + self.d_deconv_layers.append(layer) + mi = mo + + if self.residual: + + print('Residual convolutional architecture detected for generator ' + g_name) + with tf.variable_scope('generator_'+g_name) as scope: + + #dense layers + self.g_dense_layers = [] + count = 0 + + mi = latent_dims + + for mo, apply_batch_norm, keep_prob, act_f, w_init in g_sizes['dense_layers']: + name = 'g_dense_layer_%s' %count + count += 1 + + layer = DenseLayer( + name, mi, mo, + apply_batch_norm, keep_prob, + f=act_f, w_init=w_init + ) + self.g_dense_layers.append(layer) + mi = mo + + #checking generator architecture + + g_steps = 0 + for key in g_sizes: + if 'deconv' in key: + if not 'shortcut' in key: + g_steps+=1 + + g_block_n=0 + g_layer_n=0 + + for key in g_sizes: + if 'block' and 'shortcut' in key: + g_block_n+=1 + if 'deconv_layer' in key: + g_layer_n +=1 + + assert g_block_n+g_layer_n==g_steps, '\nCheck keys in g_sizes, \n sum of generator steps do not coincide with sum of convolutional layers and convolutional blocks' + + layers_output_sizes={} + blocks_output_sizes={} + + #calculating the output size for each transposed convolutional step + for key, item in reversed(list(g_sizes.items())): + + if 'deconv_layer' in key: + + _, _, stride, _, _, _, _, = g_sizes[key][0] + layers_output_sizes[g_layer_n-1]= [dim_H, dim_W] + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + g_layer_n -= 1 + + + if 'deconvblock_layer' in key: + + for _ ,_ , stride, _, _, _, _, in g_sizes[key]: + + dim_H = int(np.ceil(float(dim_H)/stride)) + dim_W = int(np.ceil(float(dim_W)/stride)) + dims_H.append(dim_H) + dims_W.append(dim_W) + + blocks_output_sizes[g_block_n-1] = [[dims_H[j],dims_W[j]] for j in range(1, len(g_sizes[key])+1)] + g_block_n -=1 + + dims_H = list(reversed(dims_H)) + dims_W = list(reversed(dims_W)) + + #saving for later + self.g_dims_H = dims_H + self.g_dims_W = dims_W + + #final dense layer + projection, bn_after_project, keep_prob, act_f, w_init = g_sizes['projection'][0] + + mo = projection*dims_H[0]*dims_W[0] + name = 'g_dense_layer_%s' %count + layer = DenseLayer(name, mi, mo, not bn_after_project, keep_prob, act_f, w_init) + self.g_dense_layers.append(layer) + + #deconvolution input channel number + mi = projection + self.g_blocks=[] + + block_n=0 #keep count of the block number + layer_n=0 #keep count of conv layer number + i=0 + for key in g_sizes: + + if 'block' and 'shortcut' in key: + + g_block = DeconvBlock(block_n, + mi, blocks_output_sizes, g_sizes, + ) + self.g_blocks.append(g_block) + + mo, _, _, _, _, _, _, = g_sizes['deconvblock_layer_'+str(block_n)][-1] + mi = mo + block_n+=1 + count+=1 + i+=1 + + if 'deconv_layer' in key: + + name = 'g_conv_layer_{0}'.format(layer_n) + + mo, filter_sz, stride, apply_batch_norm, keep_prob, act_f, w_init = g_sizes[key][0] + + g_conv_layer = DeconvLayer( + name, mi, mo, layers_output_sizes[layer_n], + filter_sz, stride, apply_batch_norm, keep_prob, + act_f, w_init + ) + self.g_blocks.append(g_conv_layer) + + mi=mo + layer_n+=1 + count+=1 + i+=1 + + assert i==g_steps, 'Check convolutional layer and block building, steps in building do not coincide with g_steps' + assert g_steps==block_n+layer_n, 'Check keys in g_sizes' + + self.d_sizes=d_sizes + self.d_name = d_name + self.projection = projection + self.bn_after_project = bn_after_project + + def d_forward(self, Z, reuse=None, is_training=True): + + if not self.residual: + + print('Decoder_'+self.d_name) + print('Deconvolution') + #dense layers + + output = Z + print('Input for deconvolution shape', Z.get_shape()) + i=0 + for layer in self.d_dense_layers: + output = layer.forward(output, reuse, is_training) + # print('After dense layer %i' %i) + # print('shape: ', output.get_shape()) + i+=1 + + output = self.d_final_layer.forward(output, reuse, is_training) + + output = tf.reshape( + output, + + [-1, self.d_dims_H[0], self.d_dims_W[0], self.projection] + + ) + + print('Reshaped output after projection', output.get_shape()) + + if self.bn_after_project: + output = tf.contrib.layers.batch_norm( + output, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training=is_training, + reuse=reuse, + scope='bn_after_project' + ) + # passing to deconv blocks + + i=0 + for layer in self.d_deconv_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + # print('After deconvolutional layer %i' %i) + # print('shape: ', output.get_shape()) + + + print('Deconvoluted output shape', output.get_shape()) + return output + else: + + print('Generator_'+self.g_name) + print('Deconvolution') + #dense layers + + output = Z + print('Input for deconvolution shape', Z.get_shape()) + i=0 + for layer in self.g_dense_layers: + i+=1 + output = layer.forward(output, reuse, is_training) + #print('After dense layer %i' %i) + #print('shape: ', output.get_shape()) + + + output = tf.reshape( + output, + + [-1, self.g_dims_H[0], self.g_dims_W[0], self.projection] + + ) + + #print('Reshaped output after projection', output.get_shape()) + + if self.bn_after_project: + output = tf.contrib.layers.batch_norm( + output, + decay=0.9, + updates_collections=None, + epsilon=1e-5, + scale=True, + is_training=is_training, + reuse=reuse, + scope='bn_after_project' + ) + # passing to deconv blocks + + + i=0 + for block in self.g_blocks: + i+=1 + output = block.forward(output, + reuse, + is_training) + #print('After deconvolutional block %i' %i) + #print('shape: ', output.get_shape()) + + + print('Deconvoluted output shape', output.get_shape()) + return output + + diff --git a/architectures/utils/__init__.py b/architectures/utils/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/architectures/utils/__init__.py diff --git a/architectures/utils/toolbox.py b/architectures/utils/toolbox.py new file mode 100644 index 0000000..601eb14 --- /dev/null +++ b/architectures/utils/toolbox.py @@ -0,0 +1,807 @@ +import numpy as np +import os +import math +import tensorflow as tf +import pickle +import matplotlib.pyplot as plt + +rnd_seed=1 + +def conv_concat(X, y, y_dim): + + yb = tf.reshape(y, [tf.shape(X)[0], 1, 1, y_dim]) + yb = tf.tile(yb, [1, tf.shape(X)[1], tf.shape(X)[2] ,1]) + output = tf.concat([X, yb], 3) + return output + +def lin_concat(X, y, y_dim): + + yb = tf.reshape(y, [tf.shape(X)[0], y_dim]) + output = tf.concat([X, yb], 1) + + return output + +def lrelu(x, alpha=0.2): + + """ + Implements the leakyRELU function: + + inputs X, returns X if X>0, returns alpha*X if X<0 + """ + + + return tf.maximum(alpha*x,x) + +def evaluation(Y_pred, Y): + + """ + Returns the accuracy by comparing the convoluted output Y_hat + with the labels of the samples Y + + """ + + correct = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y, 1)) + accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) + + return accuracy + +def supervised_random_mini_batches(X, Y, mini_batch_size, seed): + + """ + Creates a list of random mini_batches from (X, Y) + + Arguments: + X -- input data, of shape (number of examples, input size) + Y -- true "label" one hot matrix of shape (number of examples, n_classes) + mini_batch_size -- size of the mini-batches, integer + + Returns: + mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y) + """ + + + m = X.shape[0] #number of examples in set + n_classes = Y.shape[1] + mini_batches=[] + + np.random.seed(seed) + permutation = list(np.random.permutation(m)) + #print('Zeroth element of batch permutation:', permutation[0]) + shuffled_X = X[permutation,:] + shuffled_Y = Y[permutation,:] + #partition of (shuffled_X, shuffled_Y) except the last mini_batch + + num_complete_mini_batches = math.floor(m/mini_batch_size) + for k in range(num_complete_mini_batches): + mini_batch_X = shuffled_X[k*mini_batch_size:(k+1)*mini_batch_size,:] + mini_batch_Y = shuffled_Y[k*mini_batch_size:(k+1)*mini_batch_size,:] + + mini_batch = (mini_batch_X, mini_batch_Y) + mini_batches.append(mini_batch) + + # handling the case of last mini_batch < mini_batch_size + if m % mini_batch_size !=0: + + mini_batch_X = shuffled_X[mini_batch_size*num_complete_mini_batches:m,:] + mini_batch_Y = shuffled_Y[mini_batch_size*num_complete_mini_batches:m,:] + + mini_batch = (mini_batch_X, mini_batch_Y) + mini_batches.append(mini_batch) + + return mini_batches + +def unsupervised_random_mini_batches(X, mini_batch_size, seed): + + """ + Creates a list of random mini_batches from (X) + + Arguments: + X -- input data, of shape (number of examples, input size) + mini_batch_size -- size of the mini-batches, integer + + Returns: + mini_batches -- list of mini_batch_X + """ + + m = X.shape[0] #number of examples in set + mini_batches=[] + + np.random.seed(seed) + permutation = list(np.random.permutation(m)) + #print('Zeroth element of batch permutation:', permutation[0]) + shuffled_X = X[permutation,:] + + #partition of shuffled_X except the last mini_batch + + num_complete_mini_batches = math.floor(m/mini_batch_size) + for k in range(num_complete_mini_batches): + mini_batch_X = shuffled_X[k*mini_batch_size:(k+1)*mini_batch_size,:] + mini_batches.append(mini_batch_X) + + # handling the case of last mini_batch < mini_batch_size + if m % mini_batch_size !=0: + + mini_batch_X = shuffled_X[mini_batch_size*num_complete_mini_batches:m,:] + mini_batches.append(mini_batch_X) + + return mini_batches + +def unsupervised_random_mini_batches_labels(X, mini_batch_size, seed): + + """ + Creates a list of random mini_batches from (Y) + + Arguments: + X -- input data, of shape (number of examples, input size) + mini_batch_size -- size of the mini-batches, integer + + Returns: + mini_batches -- list of mini_batch_X + """ + + m = X.shape[0] #number of examples in set + mini_batches=[] + + np.random.seed(seed) + permutation = list(np.random.permutation(m)) + #print('Zeroth element of batch permutation:', permutation[0]) + shuffled_X = X[permutation] + + #partition of shuffled_X except the last mini_batch + + num_complete_mini_batches = math.floor(m/mini_batch_size) + for k in range(num_complete_mini_batches): + mini_batch_X = shuffled_X[k*mini_batch_size:(k+1)*mini_batch_size] + mini_batches.append(mini_batch_X) + + # handling the case of last mini_batch < mini_batch_size + if m % mini_batch_size !=0: + + mini_batch_X = shuffled_X[mini_batch_size*num_complete_mini_batches:m] + mini_batches.append(mini_batch_X) + + return mini_batches + +# def preprocess_true(true): + +# mean_true=true[true!=0].mean() +# std_true=np.std(true[np.where(true!=0)],axis=0) + +# true[true!=0]-=mean_true +# true=np.where(true==0,0,true/std_true) + +# return true, mean_true, std_true +# def preprocess_reco(reco): + +# mean_reco=np.mean(reco,axis=0) +# std_reco=np.std(reco,axis=0) + +# reco-=mean_reco +# reco=np.where(reco==0,0,reco/std_reco) + +# return reco, mean_reco, std_reco + +# def reconstruct(sample, mean, std): +# return np.where(sample!=0,sample*std+mean,0) + +def four_cells(img): + img = img.flatten() + return img[img.argsort()[-4:][::-1]] + +def normalise(X, norm_space=False): + + if norm_space: + X[:,0]=X[:,0]/X[:,0].max() + X[:,1]=X[:,1]/X[:,1].max() + max_X = X[:,2].max() + X[:,2]=X[:,2]/max_X + min_X = 0 + + else: + X=np.where(X>0,X,0) + #temp = X.reshape(X.shape[0],X.shape[1]*X.shape[2]*X.shape[3]) + #temp = temp.sum(axis=1) + max_X = np.max(X) + #max_X = np.max(temp.sum(axis=1)) + + X=X/max_X + min_X=0 + + return X, min_X, max_X + +def denormalise(X, min_X, max_X, norm_space=False): + #mask = X!=0 + #return np.where(X!=0, np.exp(X*max_X), 0) + denormalised = np.zeros_like(X) + if norm_space: + denormalised[:,0]=(X[:,0]*52).astype(int) + denormalised[:,1]=(X[:,1]*64).astype(int) + denormalised[:,2]=X[:,2]*max_X + return denormalised + + else: + return np.where(X!=0, X*max_X, 0) + +# def normalise(X): + +# X=np.where(X>12 ,X,0) +# #X=np.where(X>12,np.log(X),0) + +# # E_max = X.max() +# # E_min = np.min(X[X>0]) +# # X = np.where(X>0, X-(E_max+E_min)/2,0) +# # X/=X.max() + +# E_min=np.min(X[X>0]) +# X=np.where(X>0,X-E_min,0) +# E_max=np.max(X) +# X=np.where(X!=0,X/E_max,0) + +# return X, E_max, E_min + +# def denormalise(X, E_max, E_min): +# # X=X*E_max-E_min)/2 +# # X=np.where(X!=0, X+(E_max+E_min)/2, 0) + +# X=np.where(X!=0,X*E_max,0) +# X=np.where(X!=0, X+E_min, 0) +# #X=np.where(X!=0, np.exp(X), 0) + +# return X + +def delete_undetected_events_single(X): + + pos_rejected=[] + + for i in range(len(X)): + if np.array_equal(X[i],np.zeros_like(X[i])): + pos_rejected.append(i) + + X_filtered=np.delete(X,pos_rejected,axis=0) + + return X_filtered + +def delete_undetected_events_double(true, reco): + + pos_rejected=[] + + for i in range(len(true)): + if np.array_equal(reco[i],np.zeros_like(reco[i])) or np.array_equal(true[i],np.zeros_like(true[i])) : + pos_rejected.append(i) + + reco_filtered=np.delete(reco,pos_rejected,axis=0) + true_filtered=np.delete(true, pos_rejected, axis=0) + + assert len(true_filtered)==len(reco_filtered) + + return true_filtered, reco_filtered + +def delete_undetected_events_triple(true_p, true_K, reco): + + pos_rejected=[] + + for i in range(len(true_p)): + if np.array_equal(reco[i],np.zeros_like(reco[i])) or np.array_equal(true_p[i],np.zeros_like(true_p[i])) or np.array_equal(true_K[i],np.zeros_like(true_K[i])) : + pos_rejected.append(i) + + reco_filtered=np.delete(reco,pos_rejected,axis=0) + true_p_filtered=np.delete(true_p, pos_rejected, axis=0) + true_K_filtered=np.delete(true_K, pos_rejected, axis=0) + + assert len(true_p_filtered)==len(reco_filtered)==len(true_K_filtered) + + + return true_p_filtered, true_K_filtered, reco_filtered + +def selection(true, reco, n_cells, energy_fraction): + + pos_selected=[] + pos_rejected=[] + + for i in range(len(reco)): + tot_E=reco[i].sum() + reshaped=reco[i].flatten() + if (reshaped[reshaped.argsort()[-n_cells:][::-1]].sum())/tot_E 1: + + true, reco = load_batch(true_path, reco_path, 0) + #true, reco = delete_undetected_events_double(true, reco) + + #delete too noisy events + if select: + true_output, reco_output, _, _, = selection(true, reco, n_cells, energy_fraction) + + for i in range(1, n_batches): + + true_temp, reco_temp = load_batch(true_path, reco_path, i) + #true_temp, reco_temp = delete_undetected_events_double(true1, reco1) + + #delete too noisy events + if select: + true_temp, reco_temp, _, _, = selection(true_temp, reco_temp, n_cells, energy_fraction) + + true = np.concatenate((true, true_temp), axis=0) + reco = np.concatenate((reco, reco_temp), axis=0) + + if preprocess =='normalise': + + reco, min_reco, max_reco = normalise(reco) + true, min_true, max_true = normalise(true) + m = reco.shape[0] + train_size = m - test_size + + train_true = true[0:train_size] + test_true = true[train_size:m] + + train_reco = reco[0:train_size] + test_reco = reco[train_size:m] + + return train_true, test_true, min_true, max_true, train_reco, test_reco, min_reco, max_reco + + else: + m = reco.shape[0] + train_size = m - test_size + + train_true = true[0:train_size] + test_true = true[train_size:m] + + train_reco = reco[0:train_size] + test_reco = reco[train_size:m] + + return train_true, test_true, train_reco, test_reco + +def load_data_conditional(true_path, reco_path, n_batches, dim=None, preprocess=None, test_size=None): + + if n_batches == 1: + + #delete undetected particles + true1, reco1 = load_batch(true_path, reco_path, 0) + true2, reco2 = delete_undetected_events_double(true1, reco1) + ETs, reco_output = load_conditional(true2, reco2) + + elif n_batches > 1: + + true1, reco1 = load_batch(true_path, reco_path, 0) + true2, reco2 = delete_undetected_events_double(true1, reco1) + ETs, reco_output = load_conditional(true2, reco2) + + for i in range(1, n_batches): + + true1, reco1 = load_batch(true_path, reco_path, i) + true_temp, reco_temp = delete_undetected_events_double(true1, reco1) + ETs_temp, reco_output_temp = load_conditional(true_temp, reco_temp) + + #delete too noisy events + ETs = np.concatenate((ETs, ETs_temp), axis=0) + reco_output = np.concatenate((reco_output, reco_output_temp), axis=0) + + true = ETs + reco = reco_output + + if preprocess =='normalise': + + reco, min_reco, max_reco = normalise(reco, norm_space=False) + true, min_true, max_true = normalise(true, norm_space=True) + m = reco.shape[0] + train_size = m - test_size + + train_true = true[0:train_size] + test_true = true[train_size:m] + + train_reco = reco[0:train_size] + test_reco = reco[train_size:m] + + return train_true, test_true, min_true, max_true, train_reco, test_reco, min_reco, max_reco + + else: + m = reco.shape[0] + train_size = m - test_size + + train_true = true[0:train_size] + test_true = true[train_size:m] + + train_reco = reco[0:train_size] + test_reco = reco[train_size:m] + + return train_true, test_true, train_reco, test_reco + +def draw_one_sample(train_true, train_reco, preprocess=None, + min_true=None, max_true=None, min_reco=None, max_reco=None, + save=False, PATH=None): + + j = np.random.randint(len(train_true)) + + X_batch_A = train_true[j] + X_batch_B = train_reco[j] + + if preprocess=='normalise': + X_batch_A=denormalise(X_batch_A, min_true, max_true) + X_batch_B=denormalise(X_batch_B, min_reco, max_reco) + + n_H_A, n_W_A ,n_C = X_batch_A.shape + n_H_B, n_W_B ,n_C = X_batch_B.shape + + plt.subplot(2,2,1) + plt.imshow(X_batch_A.reshape(n_H_A,n_W_A)) + plt.xlabel('X') + plt.ylabel('Y') + plt.title('True E_T: {:.6g} MeV'.format(X_batch_A.sum())) + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.subplot(2,2,2) + plt.imshow(X_batch_B.reshape(n_H_B,n_W_B)) + plt.xlabel('X') + plt.ylabel('Y') + plt.title('Reco E_T: {:.6g} MeV'.format(X_batch_B.sum())) + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.suptitle('HCAL MC simulation\n ') + fig = plt.gcf() + fig.set_size_inches(11,4) + if not save: + plt.show() + else: + plt.savefig(PATH+'/HCAL_reconstruction_example_{0}.png'.format(j),dpi=80) + +def draw_one_sample_conditional(train_true, train_reco, preprocess=None, + min_true=None, max_true=None, min_reco=None, max_reco=None, + save=False, PATH=None): + + j = np.random.randint(len(train_true)) + + + if preprocess=='normalise': + X_batch_A=denormalise(train_true, min_true, max_true, norm_space=True) + X_batch_B=denormalise(train_reco, min_reco, max_reco) + + X_batch_A = X_batch_A[j] + X_batch_B = X_batch_B[j] + + n_H_B, n_W_B, n_C = X_batch_B.shape + + plt.imshow(X_batch_B.reshape(n_H_B,n_W_B)) + plt.xlabel('X') + plt.ylabel('Y') + plt.title('HCAL MC simulation \n X: {1} Y: {0} \n True E_T: {2:.6g} MeV, Reco MC E_T: {3:.6g}'.format(X_batch_A[0].sum(), X_batch_A[1].sum(), X_batch_A[2].sum(), X_batch_B.sum())) + fig = plt.gcf() + fig.set_size_inches(11,4) + if not save: + plt.show() + else: + plt.savefig(PATH+'/HCAL_reconstruction_example_{0}.png'.format(j),dpi=80) + +def draw_nn_sample(X_A, X_B, i, preprocess=False, + min_true=None, max_true=None, min_reco=None, max_reco=None, f=None, + save=True, is_training=False, total_iters=None, PATH=None): + + j = np.random.randint(len(X_A)) + + _, n_H_A, n_W_A, n_C = X_A.shape + _, n_H_B, n_W_B, _ = X_B.shape + + #draw the response for one particle + if i ==1 : + X_A = X_A[j] + X_B = X_B[j] + sample_nn = f(X_A.reshape(1, n_H_A, n_W_A, n_C)) + #draw the response for i particles + if i>1: + X_A = X_A[j:j+i] + X_B = X_B[j:j+i] + X_A = X_A.sum(axis=0) + X_B = X_B.sum(axis=0) + + sample_nn = f(X_A.reshape(1, n_H_A, n_W_A, n_C)) + + if preprocess=='normalise': + + X_A=denormalise(X_A, min_true, max_true) + X_B=denormalise(X_B, min_reco, max_reco) + sample_nn=denormalise(sample_nn, min_reco, max_reco) + + plt.subplot(1,3,1) + plt.gca().set_title('True ET {0:.6g}'.format(X_A.sum())) + plt.imshow(X_A.reshape(n_H_A,n_W_A)) + plt.xlabel('X') + plt.ylabel('Y') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.subplot(1,3,2) + plt.gca().set_title('MC Reco ET {0:.6g}'.format(X_B.sum())) + plt.imshow(X_B.reshape(n_H_B,n_W_B)) + plt.xlabel('X') + plt.ylabel('Y') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + + plt.subplot(1,3,3) + plt.gca().set_title('NN Reco ET {0:.6g}'.format(sample_nn.sum())) + plt.imshow(sample_nn.reshape(n_H_B,n_W_B)) + plt.xlabel('X') + plt.ylabel('Y') + plt.subplots_adjust(wspace=0.2,hspace=0.2) + if is_training: + plt.suptitle('At iter {0}'.format(total_iters)) + fig = plt.gcf() + fig.set_size_inches(10,8) + + if save: + if is_training: + plt.savefig(PATH+'/sample_at_iter_{0}.png'.format(total_iters),dpi=80) + else: + plt.savefig(PATH+'/nn_reco_sample_{0}.png'.format(j),dpi=80) + else: + plt.show() + +def draw_nn_sample_conditional(y, reco_MC, i, preprocess=False, + min_true=None, max_true=None, min_reco=None, max_reco=None, f=None, + save=True, is_training=False, total_iters=None, PATH=None): + + j = np.random.randint(len(reco_MC)) + + #_, n_H_A, n_W_A, n_C = X_A.shape + _, n_H_B, n_W_B, _ = reco_MC.shape + + #draw the response for one particle + + y =y[j:j+4] + reco_MC = reco_MC[j:j+4] + sample_nn = f(y.reshape(4, y.shape[1])).reshape(4, n_H_B, n_W_B) + + + if preprocess=='normalise': + + Y=denormalise(y, min_true, max_true, norm_space=True) + X_B_mc=denormalise(reco_MC, min_reco, max_reco) + X_B_nn=denormalise(sample_nn, min_reco, max_reco) + + + for i in range(4): + + plt.subplot(2,4,i+1) + plt.gca().set_title('X: {1}, Y: {0} \n True ET {2:.6g}, \n MC ET {3:.6g}\n'.format(Y[i,0].sum(), Y[i,1].sum(), Y[i,2].sum(), X_B_mc[i].sum())) + plt.imshow(X_B_mc[i].reshape(n_H_B,n_W_B)) + plt.subplots_adjust(wspace=0.25,hspace=0.25) + plt.xlabel('X') + plt.ylabel('Y') + plt.subplot(2,4,i+5) + plt.gca().set_title('NN ET {0:.6g}'.format(X_B_nn[i].sum())) + plt.imshow(X_B_nn[i].reshape(n_H_B,n_W_B)) + plt.xlabel('X') + plt.ylabel('Y') + plt.subplots_adjust(wspace=0.25,hspace=0.25) + + fig = plt.gcf() + fig.set_size_inches(20,10) + + if save: + if is_training: + plt.savefig(PATH+'/sample_at_iter_{0}.png'.format(total_iters),dpi=80) + else: + plt.savefig(PATH+'/nn_reco_sample_{0}.png'.format(j),dpi=80) + else: + plt.show() + + + +def get_inner_HCAL(reco): + inner_HCAL = reco[:,12:40,16:48,:] + return inner_HCAL + +def get_outer_HCAL(reco): + m_tot, h, w, c = reco.shape + outer_HCAL = np.zeros(shape=(m_tot,h//2,w//2,c)) + + for m in range(0, m_tot): + img=reco[m] + for j in range(0, w, 2): + for i in range(0, h, 2): + outer_HCAL[m,i//2,j//2,0]=img[i:i+2,j:j+2].sum() + + outer_HCAL[:,6:20,8:24,:]=0 + return outer_HCAL + +def get_4_max_cells(img): + + c =0 + value = np.zeros(shape=(2,2)) + pos =np.zeros(shape=(2,1)) + + for i in range(img.shape[0]-1): + for j in range(img.shape[1]-1): + c_prime = img[i:i+2,j:j+2].sum() + if c_prime > c: + c = c_prime + value[0,0]=img[i,j] + value[0,1]=img[i,j+1] + value[1,0]=img[i+1,j] + value[1,1]=img[i+1,j+1] + + pos[0]=i + pos[1]=j + return value, pos + +def get_triggered_events(true, reco_inner, reco_outer): + l = [] + for m in range(len(reco_inner)): + value_inner, pos_inner = get_4_max_cells(reco_inner[m]) + value_outer, pos_outer = get_4_max_cells(reco_outer[m]) + + if value_inner.sum()>3680 or value_outer.sum()>3680: + l.append(m) + triggered_true = np.array([true[l[i]].sum() for i in range(len(l))]) + triggered_reco_inner = np.array([reco_inner[l[i]].sum() for i in range(len(l))]) + triggered_reco_outer = np.array([reco_outer[l[i]].sum() for i in range(len(l))]) + + return l, triggered_true, triggered_reco_inner, triggered_reco_outer + +# def crop_conditional(true, reco, dim): + +# ETs=[] +# assert len(reco)==len(true) + +# cropped_reco=np.zeros(shape=(reco.shape[0],2*dim+1,2*dim+1,1)) +# max_x = reco.shape[2] +# max_y = reco.shape[1] +# pos_rejected=[] +# for i in range(len(reco)): + +# reco_y, reco_x, _ = np.where(reco[i]==reco[i].max()) + +# #CENTER OF IMAGE +# if 2*dim0)][0]) + +# else: +# pos_rejected.append(i) +# # + +# # print(len(pos_rejected)) +# ETs=np.array(ETs) +# reco_rejected=np.delete(cropped_reco,pos_rejected,axis=0) + +# assert len(reco_rejected)==len(ETs) + +# return ETs, reco_rejected + +# def crop_function(true, reco, dim): + +# assert len(reco)==len(true) +# j=0 +# cropped_reco=np.zeros(shape=(reco.shape[0],2*dim,2*dim,1)) +# cropped_true=np.zeros(shape=(true.shape[0],2*dim,2*dim,1)) +# max_x = reco.shape[2] +# max_y = reco.shape[1] + +# for i in range(len(reco)): +# y , x , _= np.where(true[i]>0) + +# #CORNERS + +# #top left corner +# # if y[0]<=2*dim and x[0]<=2*dim: +# # #print(i, x, y) +# # cropped_reco[i]=reco[i,0:2*dim, 0:2*dim, :] +# # cropped_true[i]=true[i,0:2*dim, 0:2*dim, :] +# # j+=1 + +# # #top right corner +# # elif y[0]<=2*dim and max_x-2*dim" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAD+FJREFUeJzt3X/oXfV9x/HnOybWmQgzmkRm4twi+0OtuC7qCnbUrYWg/WcISiX/DOcXhu3QMdzmYE1xf6xQ1+EQSiDSTUUEW7qymar7Q2xgawnFxl/pSjZnHOZH1YkGhvma9/64J/Qb+Z5zb8733HPPzef5gPC995x7z/nkk+8r997zvp/PJzITSeVZNesGSJoNwy8VyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqEMv1So1St5ckRsB74OnAP8Q2b+zZjH+3VCacoyMyZ5XLT9em9EfAL4KfAZ4DDwb8BCZv644TmGX5qyScO/krf9NwCvZOahzDwBPAncsoLjSerRSsK/GTi05P6b1TZJc2BFn/knERELwMK0zyPpzKwk/G8CW5bc31xtO01m7gJ2gZ/5pSFZydv+HwFXR8TmiFgD3A7s6aZZkqat9St/Zv5fRPwR8Ayj/0Qey8x9nbVM0lS1LvW1Oplv+6Wp66PUJ2mOGX6pUIZfKpThlwpl+KVCTf0bfrOyZs2a2n0nTpzo/Hl9Ha/t+aZxrqGYh3+zvn8PJuErv1Qowy8VyvBLhTL8UqEMv1Soubja3+cV7DaVgKbn9FlZaPu8pnZ0fa6+DaX60fX56v5ei4uLEx/DV36pUIZfKpThlwpl+KVCGX6pUIZfKlSR03j1Wdoa4oCOoSq1r7ouRzqNl6RGhl8qlOGXCmX4pUIZfqlQhl8q1IpKfRHxOvA+8BGwmJnbxjy+t1Jfn/OwtR2516Tr0YBDmUuwyVDKeUMpObbpw8XFRU6ePDlRqa+LIb03ZebPOziOpB75tl8q1ErDn8BzEfFSRHy5iwZJ6sdK3/Z/OjMPR8RG4PsRcSAzn1v6gIhYABZWeB5JHVvRK39mHq5+HgWeAq5b5jG7MnPbuIuBkvrVOvwRsTYizj91G9gOvNpVwyRN10re9m8CvluV784HngT+qZNWTWgellVat25d7b533333jI83rh1Dn7Cybemzz5GYfffVrP7NWoc/M/8TuKbDtkjqkaU+qVCGXyqU4ZcKZfilQhl+qVCDWauvz5FUbc/VpiTTVM7ruh1tzftafUNpx7zxlV8qlOGXCmX4pUIZfqlQhl8qVK/Lda1atSpXr16+wNDn1e2hzMM2jXasXbt22e3Hjx+vfc6WLVtq9x06dGjFbZrUUKoO8zBgrInLdUlqZPilQhl+qVCGXyqU4ZcKZfilQs1FqW/Tpk3Lbn/nnXdqn9N27rw+l8IaypJibY437phtSo5DWSZr3lnqk9TI8EuFMvxSoQy/VCjDLxXK8EuFGlvqi4hHgC8ARzPz6mrbekbLc10CvAXcnplj156qlvbqTFPbzz333Np98z4KrMnZOmLRMuDkuiz1fYvRIpxLfRXYk5mfBPZU9yXNkbHhz8wXgI9/m+YW4NHq9mPVfUlzpO1n/g2ZeQyg+rmxuyZJ6sPU5+2PiAVgYdrnkXRm2r7yH4uIDQDVz6N1D8zMXZm5LTO3tTyXpCloG/6ngR3V7R2MLvpJmiOTlPqeAD4LXAwcAb4CfJdRqW9Tte22zKwfYveLY3Va6rvwwgtr9zWN3OtaUzs++OCD2n1tS1TzMOFm3d+tbrQfwIcffnjGx+vbPIzEnLTUN/Yzf2Z+sWbX586oRZIGxW/4SYUy/FKhDL9UKMMvFcrwS4XqdQLPrkt9TfocBbZ169bafXfccUftvgceeKB2X9v21012euTIkdrnTEOb8tX3vve92n333Xdf7b4DBw6ccTvaGkrJsYkTeEpqZPilQhl+qVCGXyqU4ZcKZfilQs1Fqa+uXLN+/fra57QtbbUpDbUpvUHzWoNNE5C2Gf02jdF0TX1Vt1Zi0yjHpvUVm/qjqR/rTKNk1/WajdMe1ecrv1Qowy8VyvBLhTL8UqEMv1SoqU/dPak2V9n7HqzS5upr01Xqpn1t1V3VP378eOfnauqP66+/ftnte/furX3OlVdeWbvv7bffrt3X9e9BU2WkqR/bXp2ve16bTCwuLk78WF/5pUIZfqlQhl8qlOGXCmX4pUIZfqlQkyzX9QjwBeBoZl5dbdsJ3AUcqx52f2Y+Pe5kq1atytWrl68uDmVutDaDM5qe03aATpOh9FXXmpY963pgT9O8iwcPHqzdN5S+r/udW1xc5OTJk50N7PkWsH2Z7d/IzGurP2ODL2lYxoY/M18Azvy/VkmDtpLP/HdHxIGIeDwiLuqsRZJ60Tb8DwNXAFcCB4GH6h4YEQsRsS8i9vU5cYikZq2+25+Zpy70ERHfBJ5veOwuYBeMLvi1OZ+k7rV65Y+IjUvu3gq82k1zJPVl7Ct/RDwBfBa4OCLeBL4C3BQR1wDnAm8Ad06zkX1qO2ddnaYS1TRG2s2zpvn92qqb5/Gqq66qfU5Tqa+trpeP66LkODb8mfnFZTbvXvGZJc2U3/CTCmX4pUIZfqlQhl8qlOGXCjUXy3XVaVN6g+GMzGqr7QSTfaprY9uRjNu3Lze2bOTVV+u/ZvLGG2+0Ol8bbX+vul4izuW6JDUy/FKhDL9UKMMvFcrwS4Uy/FKhBlPq63rUU5OmUlkb05iIs6k/mta0q5vM8vDhw5M3bImmUYl1I+agfoRe25F7Tedq6v+6fU0l0aGUkNuu1dflBJ6SzkKGXyqU4ZcKZfilQhl+qVCDudp/tppGFaPpmJdddtmy25sGuExjSbE6bSscTVf72yzl1fT3atvGoQwYc2CPpEaGXyqU4ZcKZfilQhl+qVCGXyrU2FJfRGwBHgfWM1qea3dmfi0i1gNPApcAbwG3Z+a7Y44116W+ujJP10t8TeuYXbejjaGUyqbRh0Ppqy5LfSeAL2Xm1cBvAX8YEdcCXwX2ZOYngT3VfUlzYmz4M/NwZu6vbr8P7AcuBW4BHq0e9lh1X9KcOKPP/BFxOXAdsBfYkJnHAKqfG+ufKWloxq7Se0pErAOeAu7JzPciJvpYQUQsAAvtmidpWiZ65Y+INcC3gScy8zvV5mMRsaHavwE4utxzM3NXZm7LzG1dNFhSN8aGP0Yv8buB1zLzwSW7ngZ2VLd3MLroJ2lOTFLquxH4AfAScLLafD/wQ0alvk3AEeC2zFx+CNUvjjX4Ut9QSlFttSlHtjneSo7Zta6XL2t7vK77atqlvrGf+TNzL1B3sM9NchJJw+M3/KRCGX6pUIZfKpThlwpl+KVCnbUTeLYtk7QZ7TWUktc0DKV81faYbc7lcl2SzmqGXyqU4ZcKZfilQhl+qVCGXypUr6W+VatW5erVy48lalN66bvE1ucEnk3rzzWpG3U27xNWtm3Hli1blt1et4YftBsJOE6fJWTX6pPUyPBLhTL8UqEMv1Qowy8VajBX+7s2jUrAUKoOTfq8qtxmkM6mTZtqn9N0BX4ofdz0d966dWvtvgMHDkyjOcvyar+kRoZfKpThlwpl+KVCGX6pUIZfKtQky3VtAR4H1gPnArsz82sRsRO4CzhWPfT+zHx6zLEGv1xXE+f3O900BgvVGUo/DmX5srp2nMkcfpMU3U8AX8rM/RFxAfDjiHim2veNzPz6RK2VNCiTrNV3GDhc3X4/IvYDl067YZKm64w+80fE5cB1wN5q090RcSAiHo+Iizpum6Qpmjj8EbEOeAq4JzPfAx4GrgCuBA4CD9U8byEi9kXEvg7aK6kjE323PyLWAP8MPJuZDy6z/1eA5zPzN8Ycxwt+ZxEv+J1u3i74jX3lj4gAdgOvLQ1+RGxc8rBbgVcnOaGkYZik1Hcj8APgJeBktfl+4A7gGkblvzeAOzPzUNOx2s7hV6fv/4XnYVRfG/OwtFnTaMAmR44cafW8rvX5uzrpqL5JrvbvBZY7WGNNX9Kw+Q0/qVCGXyqU4ZcKZfilQhl+qVC9TuDZ9CWfoXx5omt9fhEG6vtqGuW8rpdY67uNfR2vb07gKamR4ZcKZfilQhl+qVCGXyqU4ZcKNZi1+oZSQpn3MftdjzxsW6rsuk/6LL9NozzbZ8nRUp+kRoZfKpThlwpl+KVCGX6pUIZfKtRgRvW1MZQyVN/moezVZnRhm+O1Ne8j93qZulvS2cnwS4Uy/FKhDL9UKMMvFWqS5brOY7Qk92pgLfAvwL3AhcCTwCXAW8DtmfnumGPN9UKd0jyYdGDPJOEP4PzMPF6t1rsX+Avg94H/ysy/jYh7gV/LzD8ecyzDL01ZZ6P6cuR4dXcNcA5wFLgFeLTa/lh1X9KcmOgzf0ScExEvMgr985n5MrAhM48BVD83Nh1D0rCMXaUXIDM/Aq6NiF8GnomImyY9QUQsAAst2ydpSs74670R8VfACeAu4IbMPBYRG4B/z8ytY57rZ35pyjr7zB8RF0fEBdXtXwI+D7wMPA3sqB62A9jTrqmSZmGSq/3XAP8IBHAe8ERm7oyIixiV+jYBR4DbMvOdMcfylV+ass5KfV0y/NL0OYGnpEaGXyqU4ZcKZfilQhl+qVATfcOvQz8H/ru6fXF1f9Zsx+lsx+nmrR2/OukBey31nXbiiH2ZuW0mJ7cdtsN2+LZfKpXhlwo1y/DvmuG5l7Idp7Mdpztr2zGzz/ySZsu3/VKhZhL+iNgeES9HxGsR8eezaEPVjtcj4qWIeDEi9vV43kci4mhEvLxk2/qIeK5qz7MRceGM2rEzIv6n6pMXI+LmHtqxJSJeqH4n/iMi/qza3mufNLSj1z6JiPMiYl91rp9FxN/FSLf9kZm9/gE+AbwObGE0J+A+4FN9t6Nqy+vAxTM47+8AnwJeXrLt74E/qW7fCzw0o3bsBP605/64BLimun0B8DPg2r77pKEdvfYJo+Hza6vba4AfAr/bdX/M4pX/BuCVzDyUmScYzQlQ1OSfmfkC8PG5D3qfELWmHb3LzMOZub+6/T6wH7iUnvukoR29ypGpT5o7i/BvBg4tuf9mtW0WEjj1NurLM2rDKUOaEPXuiDgQEY9Xk7b0JiIuB65jNEX8zPrkY+2Anvukj0lzS7/g9+nM/E3g94A/iIjPz7pBA/AwcAVwJXAQeKivE0fEOuAp4J7MfK+v807Qjt77JDM/ysxrGb0wfuZMJs2d1CzC/yajz/unbK629S4zD1c/jzL6x75uFu2onJoIlern0Vk0IjOPVb94J4Fv0lOfVAvCfJvRNHHfqTb33ifLtWNWfVKd+38ZrZL123TcH7MI/4+AqyNic9XRtzODyT8jYm1EnH/qNrAdeLXvdiwxiAlRI2LpW8lb6aFPqlWhdgOvZeaDS3b12id17ei7T3qbNLevK5gfu5p5M/AK8BrwlzNqw68zuqDzE0ZXdf+a6ktPPZz7CUbrG55g9K7nTuAi4F+Bl6qf62fUjseqfjkAPAts6aEdNzK6/rIfeLH6c3PffdLQjl77BLimOvdPgJ8CO6vtnfaH3/CTClX6BT+pWIZfKpThlwpl+KVCGX6pUIZfKpThlwpl+KVC/T+7eUcSMnmA6wAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(cellsET_inner_dict[batch][n],cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAADe1JREFUeJzt3W+IHPd9x/H3t/Yl8V/asySbWqJqa/rAKEJNJdyAU+KWgLHypBhsWgSmuD4ojopTCm1daBTogwYiu7gYgkAmrSWEwQkp1FZt94FxDG6CCIr+WEpjt26kYlkKSm0TXDjb3z7YEZzE7Z+bm5mdvd/7BeJ2Z3Znvve7/Wh257e/30RmIqk8vzDtAiRNh+GXCmX4pUIZfqlQhl8qlOGXCmX4pUIZfqlQhl8q1NWreXJE3A18HbgK+MfM/Lsxj/frhFLLMjMmeVzU/XpvRHwS+BHwOeAc8BqwkJk/GPEcwy+1bNLwr+Zt/x3Aycw8k5mLwDPAzlVsT1KHVhP+jcCZJffPVsskzYBVfeafREQsAAtt70fSyqwm/GeBTUvub6yWXSYz9wH7wM/8Up+s5m3/94EtEbExIuaA+4HDzZQlqW21j/yZ+X8R8SfACwz+EzmQmUcaq0xSq2p39dXamW/7pdZ10dUnaYYZfqlQhl8qlOGXCmX4pUIZfqlQhl8qlOGXCmX4pUIZfqlQhl8qVOvj+afl6quH/2offvjhTNfR9Da7rLHLttdoHvmlQhl+qVCGXyqU4ZcKZfilQhl+qVC96epbq11zo55Td199785bzTa7qqPr9hilyb/nSrblkV8qlOGXCmX4pUIZfqlQhl8qlOGXCrWqK/ZExFvA+8BHwIeZuX3M49PRXpOp2xXVl/atU0cfusqmoem/zaRX7GmiBe7KzJ82sB1JHfJtv1So1YY/gZci4nhE7G6iIEndWO3b/s9m5rmI2AD8a0SczsyXlj4gIhaAhVXuR1LDVnXkz8xz1c/zwLPAjmUesy8zt487GSipW7XDHxHXRcS1l24DdwOvN1WYpHat5m3/zcB3IiKBa4FngH8e9YSIYG5ubtl1fRmp1rQ2Ru413U3VRjt2OQKyjjZG7i0uLg5dt23btqHrTp48uezytl/DtVs0M/8T2NpgLZI6ZFefVCjDLxXK8EuFMvxSoQy/VKhVjepb8c4G3YKawCx0Y3apy+sTjtL3iUlh8lF9HvmlQhl+qVCGXyqU4ZcKZfilQvVnIrMOXXPNNUPXffDBByveXhtnjmfhrHKd37uNgUJdXmJtLfHILxXK8EuFMvxSoQy/VCjDLxXK8EuF6nxgT18uJ1XHLNeucjiwR9JIhl8qlOGXCmX4pUIZfqlQhl8q1Niuvoh4CvgicD4zt1TL5hlcnusW4G3g/sz82did1ZzDr+kutlGj+h577LGh6/bu3bvs8jfeeKNWHZo9szC3YpNdfd9kcBHOpb4KHM7MTwOHq/uSZsjY8GfmK8DFKxbvBJ6ubh+o7kuaIXU/86/PzAsA1c8NzZUkqQutz+QTEQvAQtv7kbQydY/8FyJiPUD18/ywB2bmvszcnpnba+5LUgvqhv95YFd1exeDk36SZsgkXX2HgM8D64B3gK8A32HQ1Xdztey+zLzypOBy2xq6sy4ngxy1r7m5uaHrFhcXG61D5eiyi3DSrr7eXKvP8Gst62P4/YafVCjDLxXK8EuFMvxSoQy/VKjenO0fpelRfbMwMkv91OVrp+6+PNsvaSTDLxXK8EuFMvxSoQy/VCjDLxVqJrr6hqkzGAjszlO/Xjt1urJHPceuPkkjGX6pUIZfKpThlwpl+KVCtT51dxOaHtgjtaHuQJxhU8eNek4Tr32P/FKhDL9UKMMvFcrwS4Uy/FKhDL9UqEku1/UU8EXgfGZuqZbtAR4CLlQPezQznx+7sw4H9rQxv1/T+9LaUvf1OOx5u3fvHvqcxx9/fOi6Jgf2fBO4e7n9Z+a26t/Y4Evql7Hhz8xXgLEX4ZQ0W1bzmf/hiDgdEQcj4qbGKpLUibrhfxK4DbgdeBN4YtgDI2IhIo5ExJGa+5LUglrf7c/MSyf6iIhvAC+PeOw+YF/12O6mDZI0Uq0jf0RsWHL3XuD1ZsqR1JWxR/6IOAR8HlgXEWeBrwB3RcRW4BPAT4AH2yyyaZs3bx667r333qu1TmvHjTfeOHTdqNdAne68Uc977bXXhj6nCWPDn5l/sMzi/S3UIqlDfsNPKpThlwpl+KVCGX6pUIZfKtRMX66rDaO6ZB544IFll+/fb+fHrGljRGhf9ufluiSNZPilQhl+qVCGXyqU4ZcKZfilQs10V1/d7pOdO3cOXffcc8+tqiZpOU139Y26fqVdfZJGMvxSoQy/VCjDLxXK8EuFmomz/XUuodUGL8u1dsz6wB7P9kuqzfBLhTL8UqEMv1Qowy8VyvBLhRrb1RcRm4CDwDyDy3Ptz8yvRcQ88AxwC/A2cH9m/mzMtno/h9/8/PzQdRcvXuywEqmeJrv6FoEvZeYW4LeAP46IbcBXgcOZ+WngcHVf0owYG/7MPJeZx6rb7wPHgFuBncDT1cMOVPclzYgVfeaPiM3ADuBVYH1mXgCofm4Y/kxJfTPx92Yj4nrgWeCRzHw3YqKPFUTEArBQrzxJbZnoyB8Rc8C3gEOZ+e1q8YWIWF+tXw+cX+65mbkvM7dn5vYmCpbUjLHhj8Ehfj9wKjP3Lln1PLCrur2LwUk/STNikq6+O4HvAseBj6vFjwLfY9DVdzPwDnBfZo7sC+tLV1/dUYKO6ls7uh7VV0ed1+lKRvXNxJDephl+GX6/4ScVy/BLhTL8UqEMv1Qowy8VqvOZMUdNPNgHfalD09OXnoC29+WRXyqU4ZcKZfilQhl+qVCGXyqU4ZcKtWYH9vRl8E4bdfSlK2qUpq+v2Jffa5S+/F0c2CNpJMMvFcrwS4Uy/FKhDL9UqN6c7e/LmdK+1FFX3wdONd0LMM4s9N40uS+n8ZI0luGXCmX4pUIZfqlQhl8qlOGXCjXJ5bo2AQeBeeATwP7M/FpE7AEeAi5UD300M58fs61a/Yp1uzyaVqcbba12HUK39c/Pzw9dd/HiyKvELasvv9cobXf1TbL1ReBLmXksIm4AfhARL1TrHs/Mr6+4QklTNzb8mXkOOFfdfj8ijgG3tl2YpHat6DN/RGwGdgCvVosejojTEXEwIm5quDZJLZo4/BFxPfAs8Ehmvgs8CdwG3A68CTwx5HkLEXEkIo40UK+khkz03f6ImAP+BXgxM/cus/6XgZcz8zfGbMcTfhM+ry/6Ur8n/CbT6Hf7IyKA/cCppcGPiA1LHnYv8PpKC5U0PZN09d0JfBc4DnxcLX4U+ENgK4Puv58AD2bmmTHbqjWqb5iu/4fucsTcLByZRpnl0YWz/rpqrKsvM18FltvYyD59Sf3mN/ykQhl+qVCGXyqU4ZcKZfilQnU7m2JNJU7g2ZcusbpK/Js1re3ub4/8UqEMv1Qowy8VyvBLhTL8UqEMv1Sozrv6uhrtVbf7p+lrqtX9vZre5ix0h81CjW0Y9ru13R4e+aVCGX6pUIZfKpThlwpl+KVCGX6pUBNN3d3YzmpO3S1pco1N3S1pbTL8UqEMv1Qowy8VyvBLhZrkWn2fqq6yezQifhwRfx8D8xHxUkQcj4gXI+KXuihYUjMmuVZfANdm5s+rq/W+CvwV8PvAf2XmYxHxZeBXM/NPx2zLrj6pZY119eXAz6u7c8BVwHlgJ/B0tfxAdV/SjJjoM39EXBURRxmE/uXMPAGsz8wLANXPDaO2IalfJprMIzM/ArZFxC8CL0TEXZPuICIWgIWa9UlqyYq/3hsRfwMsAg8Bd2TmhYhYD/x7Zv76mOf6mV9qWWOf+SNiXUTcUN2+BvgCcAJ4HthVPWwXcLheqZKmYZKz/VuBfwIC+BRwKDP3RMRNwDPAzcA7wH2ZeXHMtjzySy2b9MjvqD5pjXFUn6SRDL9UKMMvFcrwS4Uy/FKhur5c10+B/65ur6vuT5t1XM46LjdrdfzKpBvstKvvsh1HHMnM7VPZuXVYh3X4tl8qleGXCjXN8O+b4r6Xso7LWcfl1mwdU/vML2m6fNsvFWoq4Y+IuyPiREScioi/nEYNVR1vVROQHo2IIx3u96mIOB8RJ5Ys63xC1CF17ImI/6na5GhE3NNBHZsi4pXqNfEfEfEX1fJO22REHZ22SWeT5mZmp/+ATwJvAZsYzAl4BPhM13VUtbwFrJvCfn8H+AxwYsmyfwD+rLr9ZeCJKdWxB/jzjtvjFmBrdfsG4MfAtq7bZEQdnbYJg+Hz11W354DvAb/bdHtM48h/B3AyM89k5iKDOQGKmvwzM18Brpz7oPMJUYfU0bnMPJeZx6rb7wPHgFvpuE1G1NGpHGh90txphH8jcGbJ/bPVsmlI4NLbqN1TquGSPk2I+nBEnI6Ig9WkLZ2JiM3ADgZTxE+tTa6oAzpuky4mzS39hN9nM/M3gd8D/igivjDtgnrgSeA24HbgTeCJrnYcEdcDzwKPZOa7Xe13gjo6b5PM/CgztzE4MH5uJZPmTmoa4T/L4PP+JRurZZ3LzHPVz/MM/tg7plFH5dJEqFQ/z0+jiMy8UL3wPga+QUdtUl0Q5lsMpon7drW48zZZro5ptUm17/8FngN+m4bbYxrh/z6wJSI2Vg19P1OY/DMirouIay/dBu4GXu+6jiV6MSFqRCx9K3kvHbRJdVWo/cCpzNy7ZFWnbTKsjq7bpLNJc7s6g3nF2cx7gJPAKeCvp1TDrzE4ofNDBmd1/5bqS08d7PsQ8DaDKdDPAg8CNwH/Bhyvfs5PqY4DVbucBl4ENnVQx50Mzr8cA45W/+7puk1G1NFpmwBbq33/EPgRsKda3mh7+A0/qVCln/CTimX4pUIZfqlQhl8qlOGXCmX4pUIZfqlQhl8q1P8DunFJofehXT0AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(cellsET_outer_dict[batch][n],cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#particle = 'piplus'\n", + "#cal_zone = 'inner'\n", + "#variable = 'X'\n", + "#\n", + "#cellsX_inner_dict={}\n", + "#\n", + "#for j in range(n_batches):\n", + "# cellsX_inner_dict[j]=rn.root2array(\n", + "# filenames=file_path, \n", + "# treename=tree_name,\n", + "# branches=particle+'_L0Calo_HCAL_Cells'+variable+'_'+cal_zone,\n", + "# start=j*batch_size,\n", + "# stop=(j+1)*batch_size,\n", + "# )\n", + "# \n", + "# cellsX_inner_dict[j]=np.array([cellsX_inner_dict[j][i] for i in range(batch_size)])\n", + "# np.save('/disk/lhcb_data/davide/HCAL_project_full_event/npy/'+file_name+'/'+variable+'_'+cal_zone+'/batch_'+str(j), cellsX_inner_dict[j])\n", + "#\n", + "#if N % batch_size != 0:\n", + "# \n", + "# cellsX_inner_dict[j+1]=rn.root2array(\n", + "# filenames=file_path,\n", + "# treename=tree_name,\n", + "# branches=particle+'_L0Calo_HCAL_Cells'+variable+'_'+cal_zone,\n", + "# start=n_batches*batch_size,\n", + "# stop=N,\n", + "# ) \n", + "# cellsX_inner_dict[j+1]=np.array([cellsX_inner_dict[j+1][i] for i in range((N % batch_size))])\n", + "# np.save('/disk/lhcb_data/davide/HCAL_project_full_event/npy/'+file_name+'/'+variable+'_'+cal_zone+'/batch_'+str(j+1), cellsX_inner_dict[j+1])\n", + "# \n", + "#variable = 'Y'\n", + "#\n", + "#cellsY_inner_dict ={}\n", + "#\n", + "#for j in range(n_batches):\n", + "# cellsY_inner_dict[j]=rn.root2array(\n", + "# filenames=file_path, \n", + "# treename=tree_name,\n", + "# branches=particle+'_L0Calo_HCAL_Cells'+variable+'_'+cal_zone,\n", + "# start=j*batch_size,\n", + "# stop=(j+1)*batch_size,\n", + "# )\n", + "# cellsY_inner_dict[j]=np.array([cellsY_inner_dict[j][i] for i in range(batch_size)])\n", + "# np.save('/disk/lhcb_data/davide/HCAL_project_full_event/npy/'+file_name+'/'+variable+'_'+cal_zone+'/batch_'+str(j), cellsY_inner_dict[j])\n", + "#\n", + "#if N % batch_size != 0:\n", + "# \n", + "# cellsY_inner_dict[j+1]=rn.root2array(\n", + "# filenames=file_path,\n", + "# treename=tree_name,\n", + "# branches=particle+'_L0Calo_HCAL_Cells'+variable+'_'+cal_zone,\n", + "# start=n_batches*batch_size,\n", + "# stop=N,\n", + "# ) \n", + "# cellsY_inner_dict[j+1]=np.array([cellsY_inner_dict[j+1][i] for i in range((N % batch_size))])\n", + "# np.save('/disk/lhcb_data/davide/HCAL_project_full_event/npy/'+file_name+'/'+variable+'_'+cal_zone+'/batch_'+str(j+1), cellsY_inner_dict[j+1])\n", + "\n", + "#particle = 'piplus'\n", + "#cal_zone = 'outer'\n", + "#variable = 'X'\n", + "#\n", + "#cellsX_outer_dict={}\n", + "#\n", + "#for j in range(n_batches):\n", + "# cellsX_outer_dict[j]=rn.root2array(\n", + "# filenames=file_path, \n", + "# treename=tree_name,\n", + "# branches=particle+'_L0Calo_HCAL_Cells'+variable+'_'+cal_zone,\n", + "# start=j*batch_size,\n", + "# stop=(j+1)*batch_size,\n", + "# )\n", + "# \n", + "# cellsX_outer_dict[j]=np.array([cellsX_outer_dict[j][i] for i in range(batch_size)])\n", + "# np.save('/disk/lhcb_data/davide/HCAL_project_full_event/npy/'+file_name+'/'+variable+'_'+cal_zone+'/batch_'+str(j), cellsX_outer_dict[j])\n", + "#\n", + "#if N % batch_size != 0:\n", + "# \n", + "# cellsX_outer_dict[j+1]=rn.root2array(\n", + "# filenames=file_path,\n", + "# treename=tree_name,\n", + "# branches=particle+'_L0Calo_HCAL_Cells'+variable+'_'+cal_zone,\n", + "# start=n_batches*batch_size,\n", + "# stop=N,\n", + "# ) \n", + "# cellsX_outer_dict[j+1]=np.array([cellsX_outer_dict[j+1][i] for i in range((N % batch_size))])\n", + "# np.save('/disk/lhcb_data/davide/HCAL_project_full_event/npy/'+file_name+'/'+variable+'_'+cal_zone+'/batch_'+str(j+1), cellsX_outer_dict[j+1])\n", + "# \n", + "#variable = 'Y'\n", + "#\n", + "#cellsY_outer_dict ={}\n", + "#\n", + "#for j in range(n_batches):\n", + "# cellsY_outer_dict[j]=rn.root2array(\n", + "# filenames=file_path, \n", + "# treename=tree_name,\n", + "# branches=particle+'_L0Calo_HCAL_Cells'+variable+'_'+cal_zone,\n", + "# start=j*batch_size,\n", + "# stop=(j+1)*batch_size,\n", + "# )\n", + "# cellsY_outer_dict[j]=np.array([cellsY_outer_dict[j][i] for i in range(batch_size)])\n", + "# np.save('/disk/lhcb_data/davide/HCAL_project_full_event/npy/'+file_name+'/'+variable+'_'+cal_zone+'/batch_'+str(j), cellsY_outer_dict[j])\n", + "#\n", + "#if N % batch_size != 0:\n", + "# \n", + "# cellsY_outer_dict[j+1]=rn.root2array(\n", + "# filenames=file_path,\n", + "# treename=tree_name,\n", + "# branches=particle+'_L0Calo_HCAL_Cells'+variable+'_'+cal_zone,\n", + "# start=n_batches*batch_size,\n", + "# stop=N,\n", + "# ) \n", + "# cellsY_outer_dict[j+1]=np.array([cellsY_outer_dict[j+1][i] for i in range((N % batch_size))])\n", + "# np.save('/disk/lhcb_data/davide/HCAL_project_full_event/npy/'+file_name+'/'+variable+'_'+cal_zone+'/batch_'+str(j+1), cellsY_outer_dict[j+1]) " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/create_reco_images.ipynb b/create_reco_images.ipynb new file mode 100644 index 0000000..88eae01 --- /dev/null +++ b/create_reco_images.ipynb @@ -0,0 +1,413 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import os\n", + "\n", + "import pickle" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def ones_to_zeros(ET_inner_dict, ET_outer_dict):\n", + " \n", + " for k in range(0,len(ET_inner_dict)):\n", + " for i in range(ET_inner_dict[k].shape[0]):\n", + " equal_inner = np.array_equal(ET_inner_dict[k][i],np.ones_like(ET_inner_dict[k][i]))\n", + " equal_outer = np.array_equal(ET_outer_dict[k][i],np.ones_like(ET_outer_dict[k][i]))\n", + " if equal_inner or equal_outer:\n", + " ET_inner_dict[k][i]=np.zeros_like(ET_inner_dict[k][i])\n", + " ET_outer_dict[k][i]=np.zeros_like(ET_outer_dict[k][i])\n", + " \n", + " return ET_inner_dict, ET_outer_dict" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def double_size(X_in):\n", + "\n", + " X_out = np.zeros(\n", + " shape=(2*X_in.shape[0],2*X_in.shape[1],1),\n", + " dtype=np.float32\n", + " )\n", + " \n", + " for i in range(X_in.shape[0]):\n", + " for j in range(X_in.shape[1]):\n", + " \n", + " if X_in[i,j]!=0:\n", + " value = X_in[i,j]/4\n", + " X_out[2*i,2*j]=value\n", + " X_out[2*i+1,2*j]=value\n", + " X_out[2*i,2*j+1]=value\n", + " X_out[2*i+1,2*j+1]=value\n", + " return X_out" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def insert(X_inner, X_outer):\n", + " X=X_outer\n", + " \n", + " for i in range(12,40):\n", + " for j in range(16,48):\n", + " X[i,j]=X_inner[i-12,j-16]\n", + " \n", + " return X" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_HCAL_images(ET_inner_dict, ET_outer_dict):\n", + " \n", + " output_pics = {}\n", + " \n", + " pic = np.array(np.zeros(shape=(52,64,1), dtype=np.float32))\n", + " \n", + " for j in range(len(ET_inner_dict)):\n", + " \n", + " \n", + " output_pics[j]=np.array([pic for i in range(len(ET_inner_dict[j]))]) \n", + " \n", + " \n", + " for j in range(len(ET_inner_dict)):\n", + " \n", + " for k in range(len(ET_inner_dict[j])):\n", + " \n", + " ET_image = insert(ET_inner_dict[j][k], double_size(ET_outer_dict[j][k]))\n", + " ET_image = np.flip(ET_image, axis = 0)\n", + " \n", + " output_pics[j][k]= ET_image\n", + " \n", + " print('Done batch {0}'.format(j+1))\n", + " \n", + " return output_pics" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "ET_inner_dict={}\n", + "ET_outer_dict={}\n", + "\n", + "i=0\n", + "#while os.path.exists('/disk/lhcb_data/davide/HCAL_project_full_event/npy/DplusMuNu_Full/ET_inner/batch_'+str(i)+'.npy'):\n", + "while i < 2:\n", + " ET_inner_dict[i]=np.load('/disk/lhcb_data/davide/HCAL_project_full_event/npy/DplusMuNu_Full/ET_inner/batch_'+str(i)+'.npy')\n", + " ET_outer_dict[i]=np.load('/disk/lhcb_data/davide/HCAL_project_full_event/npy/DplusMuNu_Full/ET_outer/batch_'+str(i)+'.npy')\n", + "\n", + " i+=1\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAzIAAAFZCAYAAABDmRqdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3XusZWd53/Hfb1/OXI2x8QVjO4EQB2E1YaimLonbygmXGlrJWAoVlkpd1dLwB5aMwh91USVI00o0ChBVSh0NsosjAQ4KuFipBbiuI5cKOQzE+JKB2CEGG09mMMbMjOdy9uXpH2ebTMbnzDzPnL32PmvP9yONzjlrnvPu513vu961nrMvyxEhAAAAAGiTzrwTAAAAAIAqChkAAAAArUMhAwAAAKB1KGQAAAAAtA6FDAAAAIDWoZABAAAA0DoUMgAAAABah0IGAAAAQOtQyAAAAABond4sH2ypszm2dM7JBbvQcJxROtPNAWeoupM3wmBXcmhqEjW1HxbcRhg6ae7Dd3R8SMvjY6xwAIBWm2khs6Vzjn713OtzwZ3COXbc0FVBJYcqN9R2NLQvmsrXxScFY9xMHt1uPnY0ysdW+5fV1H6o2gjzrZJDZa1o8vhvas1K+vpP757r4wMAMA3rusqyfa3t79p+0vat00oKAAAAAE7ljAsZ211JfyDpnZKulHSD7SunlRgAAAAArGU9z8hcJenJiPheRCxLukvSddNJCwAAAADWtp5C5lJJT5/w8zOTbQAAAADQqPW82X+1d8K+7B2stndJ2iVJmzvb1/FwAAAAALBiPc/IPCPp8hN+vkzSsycHRcTuiNgZETuXvHkdDwcAAAAAK9ZTyHxD0hW2X2d7SdJ7Jd0znbQAAAAAYG1n/NKyiBjavlnSVyR1Jd0REY9PLTMAAAAAWMO6bogZEfdKundKuQAAAABAyroKmTKrmbtld5u6e3rx7tuVu5FX7iRfuQt4Uzcjr4xbk3dPHxfGutC2C2MXlbFrTD6HSt+qYjRuqOFCu6Xjrrl9UdLQklU69rCqpe6W2NI/Nxdc2d2VqddUu02qHIeVc2up3XzohtlvbdPYnG9w7Wpqbm4Ejabb0GAnQ48ODmp5dOS00bMtZAAA2MC29M/Vr132vlzwuFBwdwrVa1MX+pKiqT/8VfrXtv1W/oNbyy6GKyr7uFf4o99gWM8lqzKHRqPm8miAm/pjolQ7Tit/DO7nSo+vf//OVFxTfxcEAGDubF9r+7u2n7R967zzAQBMD4UMAGAh2e5K+gNJ75R0paQbbF8536wAANNCIQMAWFRXSXoyIr4XEcuS7pJ03ZxzAgBMCYUMAGBRXSrp6RN+fmayDQCwAHizPwBgUa32DtSXvVvZ9i5JuyRpc++cpnMCAEwJz8gAABbVM5IuP+HnyyQ9e3JQROyOiJ0RsXOpu3VmyQEA1odCBgCwqL4h6Qrbr7O9JOm9ku6Zc04AgCnhpWUAgIUUEUPbN0v6ilbuIntHRDw+57QAAFNCIQMAWFgRca+ke+edBwBg+mZfyBTvQpxrs6FXyDWQ6s+aruyHbuGOqZW77haU8u0VxqNyx12puTtCF9otTYtu/s7GMczf2bg2fwp3V5Zq+6Jyl/BKzpFvtzTnK3fcjgbvmNwpjEklj+xa0eDa1nqh2jrTRAqF46p8Z++G1kV1KpOqco6otFuIreyH6nm1cs4u5OFKHg1dC5SUrhuK56mmxq8SW+lfZR5XjulKbPX6u7CPS2vWcJRsNBfGe2QAAAAAtA6FDAAAAIDWoZABAAAA0DoUMgAAAABah0IGAAAAQOtQyAAAAABoHQoZAAAAAK1DIQMAAACgdShkAAAAALQOhQwAAACA1unN9uEsOVk7ddxMBi60W4mt6hRqyNEoHerejId0NZW+Ndl2ZQ6NC+1G5GO7+XZLc7NJ3e68MyjtYxeOj/Ghg+nYzivPTcdGZU5IcmEex7iJebFB5tpGZCl6yWOgX1hvR+N8bGHdiKXiWA7zx0ulf9ErrKFNrXWVdgvHbFTPad18HpX95mFhDlWMCutXZVdU0i3uYg8K10WFKR+b+/l2C+NRmUOVoyN6m/LBlTVIKl17Vo699PqabJJnZAAAAAC0DoUMAAAAgNahkAEAAADQOhQyAAAAAFqHQgYAAABA61DIAAAAAGgdChkAAAAArUMhAwAAAKB1KGQAAAAAtA6FDAAAAIDW6c300Sy520DtZE+/TUnqdptpV6rl3FRspxLbTM0bxfng0bgQXOhfZagjCsEFDe3jcr5NzbdKHsNRIzl0LnhVvt0CNzUnmmq7oSVzIXQ6iq2bUqEeFtajfv50G90GB2ipmdN+NHQejn5hXSyc06Jy/mvqGqOYhzdCzpX1qBBaOrdLpf5VVtDKeHSW8+ep0vHR0PFfbrVXuDCqzIvsvkjG8YwMAAAAgNahkAEAAADQOhQyAAAAAFqHQgYAAABA61DIAAAAAGgdChkAAAAArUMhAwAAAKB1KGQAAAAAtA6FDAAAAIDWoZABAAAA0Dq9eScAAEAbRSf/t8DY1M03bJ9BNjnjpXwe0cnnEf38vvBg3Ei7lXw9jnRsVSWP0VLh78mFaRHdwtgVUugM8/utezQ/zuPKfpDUOzpMxzY1L6KXz7lTmPOjpX6+3WODdGxlvZIkR+EYGeb7p+x+S66DMy5kLHULi3m62cLR3a0sGs2dTFSZUIXJFJX+VVT2RWXyFw+s0oHYK8y14Sgf29S8KCy2pXyrKv0b5xcvF9a50thV5lsltmJU6VxRZTzSx0eDaxsAADPCS8sAAAAAtM66npGx/ZSkQ5JGkoYRsXMaSQEAAADAqUzjpWW/HhHPTaEdAAAAAEjhpWUAAAAAWme9hUxI+qrtb9retVqA7V2299jeszw+us6HAwAAAID1v7Ts6oh41vZFku6z/Z2IePDEgIjYLWm3JJ27dHFzn3cIAAAA4KyxrmdkIuLZydcDku6WdNU0kgIAAACAUznjQsb2NtvnvPS9pHdIemxaiQEAAADAWtbz0rKLJd3tlZu19SR9NiK+PJWsAAAAAOAUzriQiYjvSXrTFHMBAAAAgJRp3Ecmz5I6DXzic7fQ5sozSI2ISh6V/VBpt6KQQ/TysR6O8+1Wx6Pb0Pj1u400G4V97HF+v2kpf+hWxkOSFJXP5MjvtxgV8qjsi8ocKrTrcYOfTdLgOoT1CUvRTc7rpcI4Fo6ryno72lI8jRemdRTW23GvELstn3MUluZKDhUuLqHjfj6PKJzeR4V2K+tXZ5TPocLbCoNXOu9Ig+1L6djusXzb3WP5neHKcbo5HarOcj6H0ZZ+Otaj5s5pncH0J1Ekpzv3kQEAAADQOrN9RgYAgBmy/ZSkQ5JGkoYRsXO+GQEApoVCBgCw6H49Ip6bdxIAgOnipWUAAAAAWodCBgCwyELSV21/0/au1QJs77K9x/aewfDIjNMDAJwpXloGAFhkV0fEs7YvknSf7e9ExIMnBkTEbkm7JekV217T4MfVAQCmiWdkAAALKyKenXw9IOluSVfNNyMAwLRQyAAAFpLtbbbPeel7Se+Q9Nh8swIATAsvLQMALKqLJd3tlRuQ9iR9NiK+PN+UAADTQiEDAFhIEfE9SW+adx4AgGbMuJCx1E2+mm3lL2gp0eueYT6n0VS7kqJfaLu0LwqvFuzkYyOfgryUjx1tLk7ByL8P16PCe3YL/SuNRycf69G4kESeh7V2XXmrc6XtwtT0YFRIotBuId+ojEd2XftZ44V5XJhv6blZmJdnnY413tpPhVbWxcraXFk3xku1uTfaVGi738w82fYnDzXSLjBLR67/x4202z3ezLs+ekeL59XCtcB4c/6atrOcPLcm10HeIwMAAACgdShkAAAAALQOhQwAAACA1qGQAQAAANA6FDIAAAAAWodCBgAAAEDrUMgAAAAAaB0KGQAAAACtQyEDAAAAoHUoZAAAAAC0Tm+mj2Ypet1krPPtdgr1WPbxJUWvWOcVco5uM/0bbWqmf5V8K7Hjyn6QpEp4JTYKoZ1Cww3l4MgHd46PC0lIKvSve3yUb7eQRuf4MB3ryth180l4lO+bB4X9UBTK7wtMR/Ryx8CoXzhHFI6rcfLxJWm0qXaeGmwt5NEvrPuFtW5bPhTYsA6/Jn+9tXQof6LqFa7MXTivjpdq11u9w4VzYCGP7HVqJK+peUYGAAAAQOtQyAAAAABoHQoZAAAAAK1DIQMAAACgdShkAAAAALQOhQwAAACA1qGQAQAAANA6FDIAAAAAWodCBgAAAEDrUMgAAAAAaJ3eTB/NlrrdXGwvGScpus7n0MnXbqOt/Xy7khT50PGmZvo32pzv37ifb3ewJd9u5LumcWXsJEWl9K5Mi0EpjbTKvvC40G6hb91BYWJK6gwLbS/nB6SznM+j28+363G+3c7yKB2ryC+P3aPFCTTMD3alf/lGa8fd2SQ61nBLbuwra+hoKR873FxYm7fXxrLSduVPnaNNpTSA1jt2QT52tCl/3HWP5y8clg7lzw/Vc0l08jn3juTPaenrl2Qcz8gAAAAAaB0KGQAAAACtQyEDAAAAoHUoZAAAAAC0DoUMAAAAgNahkAEAAADQOhQyAAAAAFqHQgYAAABA61DIAAAAAGgdChkAAAAArdOb6aPZin7yIbtONxv9bimHdLudfKwkjZfyeYw252vI0aZ87GBrPufh5nzsYHthXxRCB9vysVUe52M7o3xsFPo32pSPdeRjK/qHa/NYhf22dCjfdnc538FKu51hvt1eZRoP8juicuxLkhta3zxITuTCOni2ia412JZbc0dL+f1YiR1uSYdqtKU2loPt+djhtvyxNbhwUMoDaLtjl+Xn/PLh/DreP5i/5hsWjv/+4XToisI1ybibzzl7vZW9BucZGQAAAACtc9pCxvYdtg/YfuyEbefbvs/2E5Ov5zWbJgAAAAD8ncwzMp+WdO1J226VdH9EXCHp/snPAAAAADATpy1kIuJBSc+ftPk6SXdOvr9T0runnBcAAAAArOlM3yNzcUTsk6TJ14umlxIAAAAAnFrjn1pme5ekXZK0uf+Kph8OAAAAwFngTJ+R2W/7EkmafD2wVmBE7I6InRGxc6nX4GftAgAAADhrnGkhc4+kGyff3yjpS9NJBwAAAABOL/Pxy5+T9HVJb7D9jO2bJH1M0tttPyHp7ZOfAQAAAGAmTvsemYi4YY3/euuUcwEAAACAlMbf7P8yXafCopd/1du4380/fif3+JI03FbbPdHLt718Tj7n4aZ8u8dfmY8dbE+Havm8cTp29IpROnbzecfySUgaDPL7bfzjTenYzvH8fovCCzKjH+lYn7ecju0vDdOxB5/fko6VpM6RfAc3PZ+P7b9Y2cf52N6x/D5WIdRL+b71kuvaSzrH88eIC7HZ9RWnFsn9OC6s+cOt+ccfnFNY888vTGpJwy35+M6F+fX5Da/+UTq2ljGwMf3SL+xLx/7Nj85Pxy738ufs8abKO0Rq5wfnL/vSa+ZKcLbNXNyZvkcGAAAAAOaGQgYA0Hq277B9wPZjJ2w73/Z9tp+YfD1vnjkCAKaLQgYAsAg+Lenak7bdKun+iLhC0v2TnwEAC4JCBgDQehHxoKTnT9p8naQ7J9/fKendM00KANAoChkAwKK6OCL2SdLk60VzzgcAMEUUMgCAs5rtXbb32N4zOP7ivNMBACRRyAAAFtV+25dI0uTrgdWCImJ3ROyMiJ39TdtmmiAA4MxRyAAAFtU9km6cfH+jpC/NMRcAwJRRyAAAWs/25yR9XdIbbD9j+yZJH5P0dttPSHr75GcAwIKo3boeAIANKCJuWOO/3jrTRAAAMzPTQiYsRb+bi+04324v/8RSdCvt5mMlabg5n8fy9nzbg2352GMXRL7dV47TsRe9/sfp2F9+1b507G9dfF86VpK+O8h/6NBtV/xiqe15+zfffTod+/r+qi/1X9V/2/e2Uh6P7H9NOvbIpu3p2PhR7tiXpM5y4TgtPa+cz6F3JH98jIf5406SvJxv25U1K/kke9SWtrNKdKTl5Jo72pRvd/kVhTV/e2UdH+WTkNQ/93g6dufP/SAde9OrH0zH/q5+OR0LbFQf+Ln/k479w8416djv9/P37T36zDnpWJ+bDl2JH1fOw/k1q7ucTSAXxkvLAAAAALQOhQwAAACA1qGQAQAAANA6FDIAAAAAWodCBgAAAEDrUMgAAAAAaB0KGQAAAACtQyEDAAAAoHUoZAAAAAC0DoUMAAAAgNbpzfbhLNm50E4yTqqVY4Vmo5KDpNFSPn5c2POjzfnYwbnjdOyW1xxOx/7Lyx5Lx/7HC76Tjv3nr/m1dOyi+6M3XF6Izsfe9fT/KuXxsa35Mbn7+JvSscMXt+Zjt+SPpc4gHar+kXxsFI7RqC0ViqX8ohWj/DFdTAPrFL2G1vwtkY711mG+YUmvPv9gOva1W3+cjv3lpXy7wCJ459ZD6dhHXvW9dOz+w7+Sjj2yKX9+iH43HStJo0352N7RfKyzKSeXQZ6RAQAAANA6FDIAAAAAWodCBgAAAEDrUMgAAAAAaB0KGQAAAACtQyEDAAAAoHUoZAAAAAC0DoUMAAAAgNahkAEAAADQOhQyAAAAAFqnN9uHCykiGet8qy7EdvKxyqY64WL8vI1G+Tr22Lifjj08PnYm6aAhfdf+XjGOwjFSmPTRKRwghWO6seOukm41h3FDbY9atghtQGEpu9y5sL89LszpYeGcdrybjpWk5VE+vt8ZpWMPjZl7OLv0nT+WBpGPXerljztvGaZj9ZPaWlHhfMpTxzMyAAAAAFqHQgYAAABA61DIAAAAAGgdChkAAAAArUMhAwAAAKB1KGQAAAAAtA6FDAAAAIDWoZABAAAA0DoUMgAAAABah0IGAAAAQOtQyAAAAABond7MH3EUqTBrnG7Sw3xspXZzMteftTzIx/eOOR0bhXKzfzAfvOyt6dj/vf0N6dgfLW9Px/7379+XjpWkbxy7PB37P97w86W25+2mv/qbdOyrez9Nx97yzNtKefzFgUvTsaO/zc+h/pH8nO8eT4fKw3xs5RjtLBdiS2uQ5FEhPmrrENbHIfWO5WJHm/LtdgrztHJ+GC3X/h753Av59fnb2y5Lx96/9JNSHkDbfflIfgF44vBF6difvrgln8Thfjo0uvlmpdqa5cJpyqNaHqfDMzIAAAAAWue0hYztO2wfsP3YCds+avuHth+e/HtXs2kCAAAAwN/JPCPzaUnXrrL9kxGxY/Lv3ummBQAAAABrO20hExEPSnp+BrkAAAAAQMp63iNzs+1HJi89O2+tINu7bO+xvWcwPLKOhwMAAACAFWdayNwm6fWSdkjaJ+njawVGxO6I2BkRO/u9/CccAQAAAMBazqiQiYj9ETGKiLGkT0m6arppAQAAAMDazqiQsX3JCT9eL+mxtWIBAAAAYNpOe0NM25+TdI2kC2w/I+kjkq6xvUNSSHpK0vsbzBEAAAAA/p7TFjIRccMqm29vIBcAAAAASDltITNNDskRqdgYFdod5dr8WRJJncE4366k7nGnY3u9Qs4Fm5/L59A91k3H/mh0QTr2q0+v+SF2L/P/LvyFdKwkHT/WT8eO/mBTOrZzLL/fKsab8+P8Hx56czq2288fIIOf5veDJHUP5efF5h/nX53aP5zPYfNP8sdeZ5BvtzPIj0d3uXD815YKedzM8e9Rbl4UlsGzjsdS71huB427+XWjf7iw0yPf7nBL7RXiI+XXg0fHr0nHvnB8Szp2k55KxwIb1e//4G3p2GcPviIde/S5/Adj9Q/lj/+lF2rXOZU1q7BkTd16Pn4ZAAAAAOaCQgYA0GqT+5kdsP3YCds+avuHth+e/HvXPHMEAEwfhQwAoO0+LenaVbZ/MiJ2TP7dO+OcAAANo5ABALRaRDwo6fl55wEAmC0KGQDAorrZ9iOTl56t+SkktnfZ3mN7z+D4i7PMDwCwDhQyAIBFdJuk10vaIWmfpI+vFRgRuyNiZ0Ts7G/aNqv8AADrRCEDAFg4EbE/IkYRMZb0KUlXzTsnAMB0UcgAABaO7UtO+PF6SY+tFQsAaKeZ3hATAIBps/05SddIusD2M5I+Iuka2zskhaSnJL1/bgkCABpBIQMAaLWIuGGVzbfPPBEAwEzNtpCJkAejXGzX6WY7g3xsRKRjqy+8c6FpF/LoDPOJdIb5fdE/XIkt7IzIxw62n5tvV9LmQT52uDkf2zuWjy10T5HfxYrulkJwPnTL4XysJHWSh6gk9Y4U5nFh7HpH8+32jo3zsS/mO+dRpW+FnSZJ43zb6TUTU+FxqHc0O6fyi8FwVDhPddOh2vq3hUVG0mB7vvHBofwi+v0XL0zH/pKeSscCG9V3/+aS0wdNdF/IX25veSG/rnQL1y5LBwsXDpJ6R/OxnUG+7W4y1sllmPfIAAAAAGgdChkAAAAArUMhAwAAAKB1KGQAAAAAtA6FDAAAAIDWoZABAAAA0DoUMgAAAABah0IGAAAAQOtQyAAAAABoHQoZAAAAAK3Tm+mjRUjLg1So+4XUPMqHDvKxGkc+VpK7+brQ4246tnN8nI8d5tvtH3E6tnc0H+t8uhq9kG9XklwYkqiU6bWhThv387Gd3KFR1l1uqHOS+kcqczOfR2XOdwf5WFdih4W+LRfWFWllLczmUVmzsu0WHv9s5OTa338xPzadQX5BetW9T6Zjn/sXv5SOlaTei/nYTf38+nz8cGGxAxbA5qeX0rG9I/l2+4cK54fC6aF/pLbud4/n4yvXAtmcPco9Ps/IAAAAAGgdChkAAAAArUMhAwAAAKB1KGQAAAAAtA6FDAAAAIDWoZABAAAA0DoUMgAAAABah0IGAAAAQOtQyAAAAABoHQoZAAAAAK3Tm/UDejROxUUnFydJjjjTdE6pXOXZ+dDkfpCkTjefSXeQbzc6+Xz7h5upeSs5SFJUwpsq0wvTrdK/zjDfcGU/dEa148OF+OZiC8fH8ijf7iAfW1Fut9A/D4bp2PR8a2jNXAQehfqHc/t8XFibXZgih/7pL6Zjl17MzyVJGh/LLx7jXn6edAvtAotgy/788dE7mm/XhUO6d6xwjB6vrRWVNat7tNZ2SrJrPCMDAAAAoHUoZAAAAAC0DoUMAAAAgNahkAEAAADQOhQyAAAAAFqHQgYAAABA61DIAAAAAGgdChkAAAAArUMhAwAAAKB1KGQAAAAAtE5v3gmsxcuDdGx0m6nH3EirKzrDcSE4HxrL3Xyw8z2MXjP7uDx2EfnQfqHtUb5d50MVhUnkQg4VLuyzctuVnMf5Oe/jo3y7hWF25bgbFWKr+7gSX4hNj0dzU6L9QvIgN/bd5fwc6SwXFoNo7tQ83JY/R3Tzp2F1l5lUOLts2184pxXOld3jzZwfKutVVafQdva6KHvtwjMyAAAAAFrntIWM7cttP2B7r+3Hbd8y2X6+7ftsPzH5el7z6QIAAABA7hmZoaQPRcQbJb1F0gdsXynpVkn3R8QVku6f/AwAAAAAjTttIRMR+yLiW5PvD0naK+lSSddJunMSdqekdzeVJAAAAACcqPQeGduvlfRmSQ9Jujgi9kkrxY6ki6adHAAAAACsJl3I2N4u6QuSPhgRBwu/t8v2Htt7lkdHziRHAAAAAPh7UoWM7b5WipjPRMQXJ5v3275k8v+XSDqw2u9GxO6I2BkRO5e6W6eRMwAAAICzXOZTyyzpdkl7I+ITJ/zXPZJunHx/o6QvTT89AAAAAHi5zF23rpb0PkmP2n54su3Dkj4m6fO2b5L0A0nvaSZFAAAAAPj7TlvIRMTXtPZN7t863XQAAAAA4PQyz8hMT4Q0GOZiO2vVTi/n8Tifg/Ptahz52KruKB9byNnDwr6IfP+i2823W1H63DxJneovJBX2RSm2km9lHldUmy2k7EEz81ijZuZxU7EeFvZDNY/Kfku32+Da1nYR6hzPnqcKB0uySUlyYXjG/dqa2D2eP7bGS82st8vX/qN0bFRO2Uv54HEvHzvcXEhCUnc5P4DjfuVaJ5/DuHDK7g7ysS5cF40K49EpHB+S1F3O74worKGV46P3Yn7dr+Rb4UG+XY9q674r58DKtUB23Ux2raGrQgAAAABoDoUMAKDVbF9u+wHbe20/bvuWyfbzbd9n+4nJ1/PmnSsAYHooZAAAbTeU9KGIeKOkt0j6gO0rJd0q6f6IuELS/ZOfAQALgkIGANBqEbEvIr41+f6QpL2SLpV0naQ7J2F3Snr3fDIEADSBQgYAsDBsv1bSmyU9JOniiNgnrRQ7ki6aX2YAgGmjkAEALATb2yV9QdIHI+Jg4fd22d5je89geKS5BAEAU0UhAwBoPdt9rRQxn4mIL04277d9yeT/L5F0YLXfjYjdEbEzInb2e1tnkzAAYN0oZAAArWbbkm6XtDciPnHCf90j6cbJ9zdK+tKscwMANGe2N8QEAGD6rpb0PkmP2n54su3Dkj4m6fO2b5L0A0nvmVN+AIAGUMgAAFotIr4maa3bd791lrkAAGZntoVMhDQc5mI7hVe9ea3z1zpjR+N8bJFHhTwK+yLGzeTs4SgfXBm7hvKVVBvrptqNaCa2so9HhbGTpG43H1vIuTSHKvuiKZUcKn2TanNoXGw7YwPs3o3KIXmQ3eeFsSmMeWeYXxcrS4EkRTf/C70X8+2O+/l2x0uFNaay3C43s+b3e7V2u8fz4zdeKuy3bjP98zi/IEQnn0P/UOH8UFyTOsv5fexR5dyaD/Uw325nuYF1XGruGkOSCpdnLlzLOVkHZNvkPTIAAAAAWodCBgAAAEDrUMgAAAAAaB0KGQAAAACtQyEDAAAAoHUoZAAAAAC0DoUMAAAAgNahkAEAAADQOhQyAAAAAFqHQgYAAABA6/Rm/ogRubjRKN9mt5uPHQ6baVeSxuN8bK+w68f5feHs/pXyYyFJnULN6/x+iI7z7UryuJBzRWVfbARRmMdVw8KxNyrM+eJYp1XmRFPHR+XYr2pkbrZsvgMAsIrZFzIAAGxUEfIoWegNCn9QcL6Qdzf/h6PoFV9YUai5XSjQu8uFdof5S4/KH6/K+yKbQ3Y+TIyX8n8ErbTdLfxRo7TfCvOtqT/6Vf9I2Vku/MGtknPlOB02+AesrMIxWp3HUdlSnCCrAAANoklEQVQXlbWwV3yS4DR4aRkAAACA1qGQAQAAANA6FDIAAAAAWodCBgAAAEDrUMgAAAAAaB0KGQAAAACtQyEDAAAAoHUoZAAAAAC0DoUMAAAAgNahkAEAAADQOr2ZPlpIMRrnYjtON+vR6AwTOo1xMtczMRzmYzuFejOinktGdtyKObjXreUxLvSvMIdK/XOh3cp4dAvjXNkPVZWcK7HDwj6uzPkmj9Os4nEXlfgmxrrB6dN6IWlQWJ+zKuepyhpaWbskuVPIo7DWReGY7Rwb5HMotOtBQ9cCxeO7U4iPynmqENs5VpjDlTW0ofNfdGvXAk2NdfQLeTR0vVXqW2WtqpxXJbkyL5q69kzgGRkAAAAArUMhAwAAAKB1KGQAAAAAtA6FDAAAAIDWoZABAAAA0DoUMgAAAABah0IGAAAAQOtQyAAAAABoHQoZAAAAAK1DIQMAAACgdXqzfbiQYpwLHRVadUP1WMe1+HE0koa7hXZdyDmaybdR4+T8kaROYV6MChOuso8rOQwLOVT2Q5OamkOF8YhKDg0do+l17SWVNWvcxNxs4bE/KxHyYJiLrawFlRTU0DonScNm1o7Snuh1Cw0X8t0Ia4Ekjyp5NLSWV+dFVmWtG+Vj3SuOR/YYlUrHafrYl2rzrRJbOT4qinPNlWuSgsiOR3Kf8YwMAAAAgNY5bSFj+3LbD9jea/tx27dMtn/U9g9tPzz5967m0wUAAACA3EvLhpI+FBHfsn2OpG/avm/yf5+MiN9rLj0AAAAAeLnTFjIRsU/Svsn3h2zvlXRp04kBAAAAwFpK75Gx/VpJb5b00GTTzbYfsX2H7fPW+J1dtvfY3rM8PrauZAEAAABAKhQytrdL+oKkD0bEQUm3SXq9pB1aecbm46v9XkTsjoidEbFzqbN5CikDAAAAONulChnbfa0UMZ+JiC9KUkTsj4hRRIwlfUrSVc2lCQAAAAB/J/OpZZZ0u6S9EfGJE7ZfckLY9ZIem356AAAAAPBymU8tu1rS+yQ9avvhybYPS7rB9g6t3FntKUnvbyRDAAAAADhJ5lPLvqbVb9p77/TTAQAAAIDTyzwjs/GNR/lYr1aTrdVu6UPdpBgX8si3HRGFHAqxTRlvkHzHhfGoKOTsQmxlnF2Yx6X5U217VNjHleNjIygco+V5HIU1qyI7Hhtgmdi4Ir92VNa6bn4+ufKBopXzX1F0CufLisEwH9vt5mObOp9UrhuqeVTarqy3lXnR1DVGpzKPi+eHXmFeVFT6N6yc/wrn9+ODfLsVleNOKq1Zpf5l00g2WbxSBwAAAID5o5ABALSa7cttP2B7r+3Hbd8y2f5R2z+0/fDk37vmnSsAYHoW46VlAICz2VDShyLiW7bPkfRN2/dN/u+TEfF7c8wNANAQChkAQKtFxD6t3JhZEXHI9l5Jl843KwBA03hpGQBgYdh+raQ3S3posulm24/YvsP2eWv8zi7be2zvWR4dnVGmAID1opABACwE29slfUHSByPioKTbJL1e0g6tPGPz8dV+LyJ2R8TOiNi51N0ys3wBAOtDIQMAaD3bfa0UMZ+JiC9KUkTsj4hRRIwlfUrSVfPMEQAwXRQyAIBW88rNj26XtDciPnHC9ktOCLte0mOzzg0A0Bze7A8AaLurJb1P0qO2H55s+7CkG2zv0Mqt1Z6S9P75pAcAaAKFDACg1SLia5JWu0X6vbPOBQAwO7MtZELSaDzTh1w9iaRutelC2yrsh2GhXa92Lp8CN/QqxNGomXal5nIuiGimf9Fg36JyjERhHpeOj42g0Ldx2/qGtRxcPvDcl5/65PdX+a8LJD0363xmZJH7Ji12/xa5b9Ji92+R+yatv38/nwniGRkAACYi4sLVttveExE7Z53PLCxy36TF7t8i901a7P4tct+k2fVv/n+yBgAAAIAiChkAAAAArUMhAwDA6e2edwINWuS+SYvdv0Xum7TY/Vvkvkkz6p9jhm/APbd3YfzqudfP7PHWrVus8yr7svKm/KbarWjqzeWVN4tXbYA3+zfWv43QN2mx3+xfOZbm/iEmNV//6d366fBHDS0WAADMxga5GgIAAACAPAoZAADWYPta29+1/aTtW+edz7TZfsr2o7Yftr1n3vmsh+07bB+w/dgJ2863fZ/tJyZfz5tnjuuxRv8+avuHk/F72Pa75pnjmbJ9ue0HbO+1/bjtWybbWz9+p+jboozdZtt/bvvbk/799mT762w/NBm7P7a91MTjU8gAALAK211JfyDpnZKulHSD7Svnm1Ujfj0idizAR8F+WtK1J227VdL9EXGFpPsnP7fVp/Xy/knSJyfjtyMi2noT2KGkD0XEGyW9RdIHJsfaIozfWn2TFmPsjkv6jYh4k6Qdkq61/RZJ/1Ur/btC0k8k3dTEg1PIAACwuqskPRkR34uIZUl3SbpuzjlhDRHxoKTnT9p8naQ7J9/fKendM01qitbo30KIiH0R8a3J94ck7ZV0qRZg/E7Rt4UQKw5PfuxP/oWk35D0J5PtjY0dhQwAAKu7VNLTJ/z8jBboAmQiJH3V9jdt75p3Mg24OCL2SSsXlJIumnM+TbjZ9iOTl5617qVXJ7P9WklvlvSQFmz8TuqbtCBjZ7tr+2FJByTdJ+mvJb0QEcNJSGNrZ6+JRtdycPTcc195/lPfP2nzBZKem2UeM7bI/VvkvkmL3b9F7pu02P2bRt9+fhqJnAVW+2S3ln383mldHRHP2r5I0n22vzP5yz/a4TZJv6OVefk7kj4u6d/NNaN1sL1d0hckfTAiDrqpT2Kdg1X6tjBjFxEjSTtsv1LS3ZLeuFpYE48900ImIi48eZvtPQvwutw1LXL/Frlv0mL3b5H7Ji12/xa5bxvQM5IuP+HnyyQ9O6dcGhERz06+HrB9t1ZeTrdIhcx+25dExD7bl2jlL8YLIyL2v/S97U9J+tM5prMutvtaudD/TER8cbJ5IcZvtb4t0ti9JCJesP1nWnkv0Ctt9ybPyjS2dvLSMgAAVvcNSVdMPn1nSdJ7Jd0z55ymxvY22+e89L2kd0h67NS/1Tr3SLpx8v2Nkr40x1ymbnJx/5Lr1dLx88pTL7dL2hsRnzjhv1o/fmv1bYHG7sLJMzGyvUXS27TyPqAHJP3mJKyxsZvpMzIAALRFRAxt3yzpK5K6ku6IiMfnnNY0XSzp7snLd3qSPhsRX55vSmfO9uckXSPpAtvPSPqIpI9J+rztmyT9QNJ75pfh+qzRv2ts79DKy3aekvT+uSW4PldLep+kRyfvtZCkD2sxxm+tvt2wIGN3iaQ7J5/y2JH0+Yj4U9t/Keku2/9Z0l9opZibOsec77Zte1dE7J5rEg1a5P4tct+kxe7fIvdNWuz+LXLfAAComHshAwAAAABVvEcGAAAAQOvMtZCxfa3t79p+0nYb79Z6Srafsv2o7Ydt75l3Pusx+YzzA7YfO2Hb+bbvs/3E5GubPwN9tf591PYPJ+P3sO13zTPHM2X7ctsP2N5r+3Hbt0y2t378TtG3RRm7zbb/3Pa3J/377cn219l+aDJ2fzx5IzoAAGeVub20bPKmoL+S9HatfMTlNyTdEBF/OZeEGmD7KUk7I6L197Ow/c8kHZb0RxHxDybbflfS8xHxsUkhel5E/Pt55nmm1ujfRyUdjojfm2du6zX5ZJRLIuJbk08o+qZW7rD7b9Xy8TtF3/6VFmPsLGlbRByefHzn1yTdIum3JH0xIu6y/YeSvh0Rt80zVwAAZm2ez8hcJenJiPheRCxLukvSdXPMB6cwuUHa8ydtvk7SnZPv79TKBWQrrdG/hRAR+yLiW5PvD2nlYxEv1QKM3yn6thBixeHJj/3Jv5D0G5L+ZLK9lWMHAMB6zbOQuVTS0yf8/IwW6AJkIiR91fY3be+adzINuDgi9kkrF5SSLppzPk242fYjk5eete6lVyez/VpJb5b0kBZs/E7qm7QgY2e7O/nIzgOS7pP015JemNxkTFrMtRMAgNOaZyHjVbYt2keoXR0R/1DSOyV9YPLyJbTHbZJeL2mHpH2SPj7fdNbH9nat3Fn4gxFxcN75TNMqfVuYsYuIUUTs0Mqdka+S9MbVwmabFQAA8zfPQuYZSZef8PNlkp6dUy6NiIhnJ18PSLpbKxchi2T/S3emnXw9MOd8pioi9k8uIseSPqUWj9/k/RVfkPSZiPjiZPNCjN9qfVuksXtJRLwg6c8kvUXSK22/dEPjhVs7AQDImGch8w1JV0w+fWdJ0nsl3TPHfKbK9rbJm49le5ukd0h67NS/1Tr3SLpx8v2Nkr40x1ym7qWL/Inr1dLxm7xh/HZJeyPiEyf8V+vHb62+LdDYXWj7lZPvt0h6m1beB/SApN+chLVy7AAAWK+53hBz8pGovy+pK+mOiPgvc0tmymz/glaehZGknqTPtrl/tj8n6RpJF0jaL+kjkv6npM9L+jlJP5D0noho5Rvm1+jfNVp5aVJIekrS+196T0mb2P4nkv6vpEcljSebP6yV95K0evxO0bcbtBhj9ytaeTN/Vyt/ePp8RPynyfpyl6TzJf2FpH8dEcfnlykAALM310IGAAAAAM7EXG+ICQAAAABngkIGAAAAQOtQyAAAAABoHQoZAAAAAK1DIQMAAACgdShkAAAAALQOhQwAAACA1qGQAQAAANA6/x+JzNlx5TgIKwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "mean_inner_ET=np.mean(ET_inner_dict[0], axis=0, keepdims=True).reshape(32,32)\n", + "plt.subplot(1,2,1)\n", + "plt.imshow(mean_inner_ET[2:mean_inner_ET.shape[0]-2,0:mean_inner_ET.shape[0]])\n", + "plt.subplot(1,2,2)\n", + "mean_outer_ET=np.mean(ET_outer_dict[0], axis=0, keepdims=True).reshape(32,32)\n", + "plt.imshow(mean_outer_ET[3:mean_outer_ET.shape[0]-3,0:mean_outer_ET.shape[0]])\n", + "fig = plt.gcf()\n", + "fig.set_size_inches(14,6)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "ET_inner_dict, ET_outer_dict = ones_to_zeros(ET_inner_dict, ET_outer_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "#cutting out useless pixels\n", + "for i in range(len(ET_inner_dict)):\n", + " ET_inner_dict[i]=ET_inner_dict[i][:,2:ET_inner_dict[i].shape[1]-2,:]\n", + " ET_outer_dict[i]=ET_outer_dict[i][:,3:ET_outer_dict[i].shape[1]-3,:]\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "((25000, 28, 32), (25000, 26, 32))\n" + ] + } + ], + "source": [ + "#Check cut dict dimensions\n", + "print(ET_inner_dict[0].shape, ET_outer_dict[0].shape)\n", + "#is\n", + "#(?, 28, 32), (?,26, 32)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def delete_undetected_events(X_inner, X_outer):\n", + "\n", + " pos_rejected={}\n", + " \n", + " for i in range(len(X_inner)):\n", + " pos_rejected[i]=[]\n", + " \n", + " for j in range(X_inner[i].shape[0]):\n", + " \n", + " equal_inner = np.array_equal(X_inner[i][j],np.zeros_like(X_inner[i][j]))\n", + " equal_outer = np.array_equal(X_outer[i][j],np.zeros_like(X_outer[i][j]))\n", + " \n", + " \n", + " X_inner_filtered={}\n", + " X_outer_filtered={}\n", + " for i in range(len(X_inner)):\n", + " \n", + " X_inner_filtered[i]=np.delete(X_inner[i],pos_rejected[i],axis=0)\n", + " X_outer_filtered[i]=np.delete(X_outer[i],pos_rejected[i],axis=0)\n", + " \n", + " return X_inner_filtered, X_outer_filtered" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "ET_inner_filtered, ET_outer_filtered = delete_undetected_events(ET_inner_dict,ET_outer_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAACoCAYAAADw6BWzAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAE/JJREFUeJzt3X2MHdV5x/HvYxMDbY28bjC2eXNDkWWoEldC4EKoDJSK8g9ENFKo2rgSkoMUpFJFSkwqNRSpktuQhj9aRSUE4UptUvqSgirU1DK2aCU7iaEuceo6xpYhBmOT2FaNZLMYP/3jzka7M2f3njtz5s7L/j7SaneOZ848e/f6eHyefc4xd0dERLpvQdMBiIhIGhrQRUR6QgO6iEhPaEAXEekJDegiIj2hAV1EpCc0oIuI9IQGdBGRnqg0oJvZXWa238xeM7NNqYISEZHRWdlKUTNbCPwIuBM4AnwfuN/d/yddeCIiEuuCCtfeCLzm7ocAzOxbwD3ArAO6mWmdAamVu1vTMYg0pcqAfjnw42nHR4CbRg7ggmII586dq+261PJxNBFDSMzrU+U1rPv1D/Vf5/1E+qDKgB56Eio8gZvZRmBjhfuIiEiEKknRI8CV046vAN7Kn+TuT7r7De5+Q4V7iYyVEv7SRVWSohcwSIreAbzJICn6O+7+wzmu8VT/le7SlEvM9xy6tux1s2nDNEbKn1votU8xh14m4R+bHzIbHl6Fv5PJ+gr1F+rrkksuKbSdPn16aBypY61bzGtR9/1i3tulp1zc/ZyZPQR8B1gIPD3XYC7SISMn/GMtWrRo6Dnvvfdesr7L9hXqL9TXzTffXGjbvn370DhSx1q3mNeizvtNTk5GXVdlDh13fwF4oUofIi2UJOEvMm6VBnSRnlLCXzpJA7pIUXTCH3gSVGMh7VA6KVrqZiXf9CmTg3UnGlP+HncTvycee11b6wcSJUVHTvgvWLDAxz3PmnfhhReWvjYU6+rVq2ccHz58OOq6mDiqvDah/ut+rfP3bGK+v9akqEhfKeEvXaUBXSRACX/pIi2fKyLSE514Qk85b5y6cKbsvHdMHE2sqxJ7Xcy8euz3HZvXiOlLZD7rRFI0VhuqR5uIoQ3fdyiOlBWsIaH+m1ptsex7OzaRGZN8jE1Q1p3ILJu0TJnsrDtxWuV1zZ8X21fMe1tTLiIiPaEBXUSkJzSgi4j0xNjn0Mus+Fel2KXsdXXPQS9evLjQFlqlLm/lypWFtuPHj5eKoS2bWaSMoU1z6OOex01d7JLvf9WqVVHX7d+/v9C2fPnyGcdvv/126bhC2lD4E5IqnzA5Ocn58+c1hy4iMl9oQBcR6QkN6CIiPVGpsMjMDgOngQ+Ac9pmTkSkOZWSotmAfoO7/yTy/EJStK3VfrFJuImJiRnH69atK5yza9euqL7ySdFQDKG2s2fPFtpCLrrooqExhHSpurNNSdGymiiKiRGK4Yknnii0bd68udB29OjRGcf59+Js/bdhZcXQPZuIS4VFIiLzSNUB3YF/N7OXs91bRESkIVUX57rF3d8ys2XAVjP7X3d/afoJ2qZLRGQ8qm4S/Vb2+biZfZvBbukv5c7RNl3SOUr4SxeVToqa2c8DC9z9dPb1VuAxd/+3Oa6pNSnaRKIuX/F51VVXFc65/vrrC22HDh0qtB08eHBoXGfOnBk1xDn7Kytl9WiqCt9z584lS4qOmvAPbUEXI+UKiWVXURwltrLycVx88cWFc06dOlXb/UZRdmXIsn3FqnsLusuAb5vZVD9/N9dgLiIi9So9oLv7IeBjCWMRaZOphL8Df51NHYq0Wid2LBJpgBL+0jmtXG0x5Tx7ynnd0Lxffk67ShFO/rxQwVBsXylXoyy7MmSoeCT2nmXOgXoKi8zsUeBdd398tnM0hz43zaFXV/ccukgvBRL+vwk8Ntc17l74y7tkyZLCeflBq0rFYcx5bVlGNh9H3XFV6T/mZ5Ky/5h/iCcnJ6P61oAuUqSEv3SSBnSRHCX8pau0louISE+MPSmabytboBK67tZbby20bd++feh1bZFPPoYSj1W20Cv7vceu8Jg/b+3atYVz9uzZU2gr+/Nu+xZ0McomQGO1ZQ59Pki13dxs12m1RRGReUQDuohIT2hAFxHpCQ3oIiI9MfYMYZkq0CqJzFQr+UG4uu39998v1X9M9WXKxGasBx98sNCWTywDvPHGG4W2fPyhBOh8UqVaMYYSnuMTUwwU+/OI6assPaGLiPSEBnQRkZ7QgC4i0hNDB3Qze9rMjpvZ3mltS81sq5kdyD5P1BumiIgMM7RS1Mx+HXgX+Bt3/5Ws7c+BE+6+2cw2ARPu/oWhN4uspiubyIzpq8pSvKHt5U6ePDnn/QCWLVtWaDtx4kSh7Z133hkaQ2hJ2rLL7IZei1D/ofNWrlxZaMsnSqtsUzcxMfMZIf86h/pPuQXdqJrYLzcm6arEaRopE5kxP7dVq1bNOD58+DBnz56tXimaLeqfH33uAbZkX28B7h0aoYiI1KrsHPpl7n4UIPtcfAQVEZGxqj0pamYbzWy3me2u+14io1B+SPqm7IB+zMxWAGSfj892ors/6e43uPsNJe8lUpdngLtybZuAbe5+LbAtOxbphKjlc81sFfCv05KiXwZ+Oi0putTdPx/Rz9gTRzFCVZuxCb38nqJ1C1WrxiYyQ4nFvCqVqPl7puwr1qhJ0cB7ez+w3t2PZg8rO9x9dUQ/tS6fK81KmYAum2BNsnyumX0T2AmsNrMjZvYAsBm408wOAHdmxyJ9oPyQdNbQRyh3v3+WP7ojcSwinWJmG4GNTcchMkWVoiIzKT8kndXe/diGSLntWuw8eKiYJqb/mLnrWKFt6W677bZC286dOwttZYuNLr300kJbKO8QU1gUKoIKSVkQNqLngQ0MphE3AM+V7ajsvGvdW5nJ3GJex9iVNNevX19oW758+YzjHTt2FM7JjxmTk5NR99MTusxbyg9J33T2CV2kKuWHpG/0hC4i0hMa0EVEeiKqsCjZzUoWX9RdoBLbfygp+vrrr884Nmtksb8oKbfju+aaawptBw8eHNp/7DZ++eRpbOxtX22xbFFJ2e3slBQdXd1J6bLJ8iSFRSIi0g0a0EVEekIDuohIT2hAFxHpicZ/Dz2m4jM2UVe2mjCUqAslQENtoWvbKl/dGao6jX1d9+/fP/R+jzzySKHtqaeeKrSFKnUbrBStlSo+2y9lInPcP0s9oYuI9IQGdBGRnohZDz20TdejZvamme3JPu6uN0wRERkm5gn9GYrbdAF81d3XZh8vpA1LRERGVXYLukeBd9398ZFuVnOlaEzyNHROaAu3LVu2FNq2b99eaAsl+WKkTPKFlrINtb344oszjteuXVtrXLGqLLOb1/ZKUWm/JUuWFNpOnTo19LrQ36c9e/YkiQnqrxR9yMxezaZktDO6iEjDyg7oXwOuAdYCR4GvzHaimW00s91mtrvkvUREJEKpAd3dj7n7B+5+Hvg6cOMc52qbLmklJfylb0oN6FN7LmY+Aeyd7VyRFnsGJfylR4YmRbNtutYDHwaOAV/KjtcCDhwGPuPuR4feLGFSNDZ5F5MUjU26hpaMzS+fGxK6Z2gf0FDSNaavKknjGGX3b607wRq636hJ0XEn/MtWHEq7hH6OoarxVatWFdr27ds34zj25x3z3h46EsyyTdc3oiIQ6aaHzOzTwG7gc+6ebpdvkRqpUlRkJiX8pbM0oItMo4S/dFlnV1ss21fsfPPERPFX63fu3Floy6/AGFsQs2vXrkJbytUQQ4VF+VUNq8xxl90WsOx8f8rVNediZium5YMqJfw1X95NZVdIjCk+AlizZs2M41DxUT6GycnJqL4bH9BFmjI94W9mR8gS/mY2I+HfWIAiI9KALvOWEv7SN5pDFxHpCQ3oIiI90fiUS8oClbJJsmXLlhXaYldgzF978mTxV5ZDyc2YhGeVZHBsQrWsmNhCr2HZVRTn0xZ00qyyW9CFxKy2uHz58kLbjh07Zhzfd999UffTE7qISE9oQBcR6QkN6CIiPaEBXUSkJ6K2oEt2s8CKdDGJuroTYqHk3dKlSwttJ06cKLRdffXVM44PHjxYOCd2hcSUqyGGpKy+DL1mMX3V+T2WWW0xlQULFviiRYtmtI07AVq2wlHSiH398+elXG1RT+giIj2hAV1EpCeGDuhmdqWZbTezfWb2QzP7g6x9qZltNbMD2WdtFC0i0qCYJ/RzDBb5XwOsAz5rZtcBm4Bt7n4tsC07FhGRhoycFDWz54C/zD7Wu/vRbI/RHe6+esi1nmqbsrq3XSsr5XZ5IU1UTJbdgi60hG+ogrXstnqh65pKipbdXjGWluIdn5jkZmzlaMzPJKavyclJzp8/nzYpmu2/+KvAd4HLptaNzj4X6+dFRGRsohf3MLNfAP4JeNjd/88s7kHIzDYCG8uFJyIisaKe0M3sQwwG879193/Omo9lUy1kn4+HrtU2XdJWSvhL3wydQ7fBo/gW4IS7Pzyt/cvAT919s5ltApa6++eH9BU1h55qnj3UV2huNnYFwHFtgzbq/crGNe7vJ7XQ+2SUOfTsQWSFu79iZouBl4F7gd9n8H6fem9PuPsX5uorVFgUEjMXGzsXPu559boLl8oW5qSOI6RsMVBKqQqLbgF+D7jdzPZkH3cDm4E7zewAcGd2LNIZ7n7U3V/Jvj4N7AMuB+5h8BBD9vneZiIUGc3QOXR3/09gtn8Z7kgbjkgz5kr4m5kS/tIJjW9wIdI0JfylL1T6L/NaqoR/7D8CInUa+xN6TNItZWIu31fqlQ/LbutW9nUY9zZys90zZdI15fc0iizh/w1gn7v/xbQ/eh7YwCAvtAF4rkz/KVfaS3W/KveskqwtW5jThLKvWcrr8iYnJ4eeA5pykfltKuH/AzOb2vzxiwwG8mfN7AHgDeCTDcUnMhIN6DJvKeEvfaM5dBGRntCALiLSE41vQVdW3Ym0ssnT2ERgaAu32IrVOtVdPVr3dnltX22xDRWHZbWhQjP2nlWqTkPqTIrG0hZ0IiLziAZ0EZGe0IAuItITGtBFRHpi7EnROrdZS9l3nRWgVaRcUrfu5Xljpey/7UnRvJRJuTYbdzK4iQRu3ZQUFRGZRzSgi4j0xNABfY5tuh41szdzm16IiEhDYiaKzwGfm75Nl5ltzf7sq+7+eH3hifRbl+Z1U85Lp84dxMzRt2ULvVTXhcTsWHQUmNq95bSZTW3TVcq496wse7+2JDdjzolN4MacNzFR3A/5zJkzUf13aT9SkT4aaQ49t00XwENm9qqZPa2d0UVEmhU9oOe36QK+BlwDrGXwBP+VWa7baGa7zWx3gnhFRGQWUQN6aJsudz/m7h+4+3ng68CNoWunb9OVKmiRFJTwl74ZOqk62zZdZrZiamd04BPA3npCjJ9vjplfbsvceBtiDfV/8uTJQtvixYsLbSlXhmxw7r3xhH8fC2BiVNnOLqa/Kq9r2S30Un9PZcRk02bbput+M1sLOHAY+EwtEYrUJHXCX6RpMb/lMts2XS+kD0ekGbmE/y0MEv6fBnYzeIov/tdFpGVUKSrznhL+0hca0GVeU8Jf+qTx1RZjEmJlty0LXVslAZdyBcMmjDvpGhPDbMax2mKW8N8CnHD3h6e1/yzhb2Z/CNzk7p8a0tc7wOvAh4GflIm9Jbocf5djh7njv9rdLx3WQb0bc4q0W7KE/9RfNjPb3eUn9i7H3+XYIU38GtBl3lLCX/pGc+giIj2hAV0krSebDqCiLsff5dghQfzjTooqcdS8LscOCRJHIn011gH9ZzdV8qIxXY4duh+/SJ005SKSgJndZWb7zew1M9vUdDzDZEteHzezvdPalprZVjM7kH1u5ZLYcyyq1pX4LzKz75nZf2fx/0nW/ktm9t0s/r83s0Wj9q0BXaQiM1sI/BXwW8B1DH7t8bpmoxrqGeCuXNsmYJu7Xwtsy47baGpRtTXAOuCz2evdlfjfA253948xqEa+y8zWAX/GYFG4a4GTwAOjdtzUgD7vkxcN6nLs0M74bwRec/dD7j4JfAu4p+GY5uTuLwEncs33MCi0Ivt871iDiuTuR939lezr08DUompdid/d/d3s8EPZhwO3A/+YtZeKv5EB3d3b+JcyWpfj73Ls0Nr4Lwd+PO34CN1ctfGyqQrZ7POyhuMZKreoWmfiN7OFWTHbcWArcBA45e5TJdKl3kOachGpLlScNP7fNphnAouqdUa2VtBa4AoG/8NbEzpt1H7HPqAreTQ+XU4e1Zk4qsER4Mppx1cAbzUUSxXHzGwFDNazYfD02EqhRdXoUPxT3P0UsINBLmCJmU1V75d6D411QFfyaOy6nDyqLXFUg+8D12b/2CwCPgU833BMZTwPbMi+3gA812Ass5ptFzW6E/+lZrYk+/pi4DcY5AG2A7+dnVYufncf2wfwa8B3ph0/AjwyzhhKxr0K2DvteD+wIvt6BbC/6Rgjv4/ngDu7Fj/wc8ArwE0MioouCL2fGo7xbuBHDOZC/6jpeCLi/SaDtd7fZ/A/jAeAX2TwD/yB7PPSpuOcJfaPM5iOeBXYk33c3aH4Pwr8Vxb/XuCPs/aPAN8DXgP+Abhw1L7HvThXKHl005hjSGFG8sXMWpt8mTJX8qit8Wf/o3sZ+GUG/7NLkjiqg7u/QIcW9XL3+2f5ozvGGkgJPvuiatCN+F9l8Hcx336IWdbejzXuOXQljxrQ1eSR15Q4EumrcQ/oSh6NWR+SR544cSTSV+Me0JU8GqMuJ49qTRyJ9NTYF+cys7uBJ4CFwNPu/qdjDWBEZvZNYD2DVf6OAV8C/gV4FrgKeAP4pLvnq+4aZ2YfB/4D+AFwPmv+IoN59FbHb2YfZVAtt5DBg8ez7v6YmX2EQSXmUgaJpd919/eai1SkPRpZbVFERNJTpaiISE9oQBcR6QkN6CIiPaEBXUSkJzSgi4j0hAZ0EZGe0IAuItITGtBFRHri/wGpS3uI0kmmbQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.subplot(1,2,1)\n", + "plt.imshow(ET_inner_filtered[0][1].reshape(28,32),cmap='gray')\n", + "plt.subplot(1,2,2)\n", + "plt.imshow(ET_outer_filtered[0][1].reshape(26,32),cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAATEAAAD8CAYAAAAfZJO2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAFWFJREFUeJzt3WuMHXd5x/Hvz+sbuTVxyMXEAQcw1JFKHGSFRKkQxKSkQAkvQsVFlV9E9RuqhpYKklZtQWolIlVAKyEkqwHyAkhSIHWIEDQyiRBVMXFIQi5OYhMSYpLYYGJILMX27j59sWPYmXP2zMyZObPzt38fydqdOXN5zjl7Hs8++8z/r4jAzCxVSxY7ADOzJpzEzCxpTmJmljQnMTNLmpOYmSXNSczMkuYkZmZJcxIzs6Q1SmKSrpL0uKQ9kq5vKygzs6o0bse+pCngCeBKYC9wL/DBiHh0oX2Wa0Ws5OSxzmdmJ46XOcSROKwq2y5tcJ5LgD0R8SSApFuAq4EFk9hKTuYt2tTglBWo5Hn7NqvfK3utoJ3Xq4v3pMpzaRpDH55HKnFWMSKOHbG98mGa/Dp5HvDMvOW92bocSVsk7ZS08yiHG5zOzGxQkyQ2LBUPpNaI2BoRGyNi4zJWNDidmdmgJklsL3D+vOU1wLPNwjEzq6dJErsXWCfpAknLgQ8Ad7QTlplZNWMX9iNiWtJfAd8FpoAvRsQjrUVmZlZBk79OEhHfBr7dUixmZrU1SmK95BaK9Ez4z/WtxVB2jjaO0cbPb9M4q8TQxmvREt92ZGZJcxIzs6Q5iZlZ0pzEzCxpTmJmljQnMTNLmpOYmSXNSczMknb8Nbv2oXEyFX15nn2Ioy/jdHWhizg7fC18JWZmSXMSM7OkOYmZWdKcxMwsaU5iZpY0JzEzS5qTmJklLb0+seOlV6cPupp3sov3JJWfiy7mx2waQ19eq4p8JWZmSXMSM7OkOYmZWdKcxMwsaU5iZpY0JzEzS5qTmJklzUnMzJLWfbPrqEa7VJrsupgtOpWB67qaOXvS52hj1usqejRz9kQ1buqtvqmvxMwsaU5iZpY0JzEzS5qTmJklzUnMzJLmJGZmSStNYpK+KGm/pIfnrVsl6S5Ju7OvZ0w2TDOz4apciX0ZuKqw7npge0SsA7Zny81J5f8iRv8r278rTZ9HH2Ks2gPW9Ln04bXoi6Y/3228Z108jxZjKE1iEfF94NeF1VcDN2ff3wy8r9ZZzcxaMm5N7JyIeA4g+3r2QhtK2iJpp6SdRzk85unMzIabeGE/IrZGxMaI2LiMFZM+nZmdYMZNYvskrQbIvu5vLyQzs+rGTWJ3AJuz7zcD29oJx8ysniotFl8D/g94o6S9kq4FPg1cKWk3cGW2bGbWudKheCLigws8tKnlWMzMaut+PLFJ9/10NUZWwzi0bHl+86NHSg9Z3GfglIVjlJ6jwmtVdoxxnkdtLcQ5iZ+7uu9HtYO2MO5ZChP0lsZQfVPfdmRmSXMSM7OkOYmZWdK6r4kZ0E7taCL1pw7OUbeOVlZ7GsfUmatyyzMHinfWlevi9bdyvhIzs6Q5iZlZ0pzEzCxpTmJmljRPnjtMF81+Y5yjbgG8L4X/xYireM6pM/ODD8+++NLI7aF+nK08z1Qm6O3D5M8ZX4mZWdKcxMwsaU5iZpY0N7sukoncPFxyjjZuMi+qcsxJ1Jbq3pg+/fy+2uewNPhKzMyS5iRmZklzEjOzpPVrUMSqE3+moCTOSdS82ti+7qCHVWpLS04+Obc8e+hQrRjG2aaLwRvHuTG9cRwdDNjZxjG0tDy1LHnd2oX3f+oHlUPxlZiZJc1JzMyS5iRmZklzn1jCmtZ12qjpVHm8rAZW9xxQ/7m3USOrWx+s8jzcr9acr8TMLGlOYmaWNCcxM0taejWxHo1j1ETd+sqwbeqeQ8uX5Zbr1qrGiaGKcV6Lpscc53nU7U2rYuC59WDy3CUrVzbaH2D25ZdLt5l54qcLPhazhyufy1diZpY0JzEzS5qTmJklzUnMzJKWXmH/BNHGAIZdFLP7MvBi3X3GGXixONnIzIEXRh5jrKbdNm7wPsH4SszMklaaxCSdL+luSbskPSLpumz9Kkl3SdqdfT2j7FhmZm2rciU2DXwsItYDlwIfkXQhcD2wPSLWAduzZTOzTpXWxCLiOeC57PsXJe0CzgOuBt6WbXYzcA/wiUbRtNHIl0gzbBv1k6aNp8XBCgG0+uz8OZ7bn19ef0F+hwefKD1P05ucJzGYY3F52GtRVFYDK4tpWFyLcgP4kqnGh5g9XL0ZdUEtfRZr1cQkrQUuBnYA52QJ7liiO3vhPc3MJqNyEpN0CvAN4KMR8dsa+22RtFPSzqO0kL3NzOaplMQkLWMugX0lIr6Zrd4naXX2+Gpg/7B9I2JrRGyMiI3LWNFGzGZmv1NaE5Mk4CZgV0R8Zt5DdwCbgU9nX7dNJMITRBs3PZcZqMesHqwAxEn5/2ge+4/1ueX1//ab/A6nnZJbnDnw68FjTmDSjqbHLG5f5Wb4Yt1sEv1rVl+VZtfLgb8AHpL0QLbu75lLXrdJuhb4OfD+yYRoZrawKn+d/AGw0J/8NrUbjplZPe7YN7OkpXfvZE/6vNrWRp2o7jFmn/7FwLqpc/N1sjs3fSm3/Hd//Y78MSrUkqbOXFWIMz84Y7H/qmjY8ygec/a3L5XGMUqVGmTxuda9d3XYPgPblPVwzc6MfhxKPyNLViwb+XiVAQ37xFdiZpY0JzEzS5qTmJklLb2a2HGqjclb6/ZODavpzDyf71n+2DV/md+Hn+WWB3qnjhwdOOZvNr0ht3zq7T8e2KZunMUaWNP+qyr1qyr71D3PwHs0U6HmZTm+EjOzpDmJmVnSnMTMLGlOYmaWtBOvsF9lIoZEGmobDza4fLDp8cBtr8otH/rBabnl8x8cLNyXOeW2H+aW6766Swo3mQNwxh/kFt/3rR255ds35gdvHPYHh/mqNLuWbTPODOu1/zjQwkQipc2siX1GfCVmZklzEjOzpDmJmVnSTryaWE9VqW/VHTixdGDAlYMj7a765/y6Fa/ON1+OU/dpathAixTWbXv7H+WW40j9m8rLPPnpy3LLb/j8M7nlYqPwsPfHgyK2z1diZpY0JzEzS5qTmJklLb2aWNPJcduYoLcNhTjGuQG87PHSQRGHDSRYmAz31EOvyS1/e/f/5pbf+aoNI8/RlboDKw68lhflb1IHBl6L1/3jffljFvrXqtQLS+uY0yV9eG30Z7UxwXQbn5HFmDzXzKxvnMTMLGlOYmaWtPRqYiewNgY9bOrdb35nbnnpufnHp5/f1/o5X9h82cC6s743ukerTHFClNkj0wPbFCs2xZrX0P61+duP0yfWRT32OOMrMTNLmpOYmSXNSczMktavmlgX9YCuxkrq4LmU1VfKHi9OQAuDE9vG/gO55ZmSSWrbqMMVxw8r1r8AOJrvpyq9T7QQ10ANbUhNre4x6+4/MQ37wIqTvww9xbLRqWP2pQpjqY2aFKXGR9BXYmaWNCcxM0uak5iZJc1JzMyS1q/Cvv1OlQJ53cJzcfuyZk2AqfXrcsvF//WKfwiYWXPW4DH2HcwtTz+zd+Q54+XD+WMOibP43EobfwuNqsWJQ5a8Pn+jO8DMrt2j4yx5/Zeee87AumIz8EDcZTeA2wBfiZlZ0kqTmKSVkn4k6UFJj0j6VLb+Akk7JO2WdKuk9u9xMTMrUeVK7DBwRURcBGwArpJ0KXAj8NmIWAe8AFw7uTDNzIYrrYlFRADHOhyXZf8CuAL4ULb+ZuCTwBcaRdOXAd/aUHKeshrOsHpLWR3o0J9dnFs++Vv31zrnsG0oNLsW61XFZR4cMjjhmWfUiqNYr6pyI3XZa/P031yUW16zPd+MueSJIQ21Jcqex7Cb4ZtOeNxFE3WlyV/68jmjYk1M0pSkB4D9wF3AT4GDEXHs1v+9wHmTCdHMbGGVklhEzETEBmANcAmwfthmw/aVtEXSTkk7j3J42CZmZmOr9dfJiDgI3ANcCpwu6divo2uAZxfYZ2tEbIyIjcsYnOfQzKyJ0pqYpLOAoxFxUNIrgHcwV9S/G7gGuAXYDGybZKDHm7oDHFbZp+72VXrRhk4mUlNxEo+pQo2s+PgkBnd89Y0788csDnBYYVKPo2/NT9C77PsPNY7LmqvS7LoauFnSFHNXbrdFxJ2SHgVukfQvwP3ATROM08xsqCp/nfwJcPGQ9U8yVx8zM1s07tg3s6Qdf/dOdtGf0kKvjpYuG/n4sIHpiv1TRcW+sOIrMc4EvcUBCos1snHqV7MvNj9GXcVzFJ8XQ2pixX2W//Cx/AbLR7+HVeKYiA57tBZU4TOi5SPe58PVP2O+EjOzpDmJmVnSnMTMLGnHX00sUVXunSxTtw+sSi9acSyv4uQi0284P7f8qzedNHDM0/fkj7nykfx4YlqZb4Ku0pvW9B7E4vOqcn9m2T2F49TyGt9Lab4SM7O0OYmZWdKcxMwsaU5iZpa0fhX2u5gBvEojYI8GfKujjaJw6YQbhYlBXrzgFbnlc7c9OSSufJPudElRvXhzdpVB+orNwWWNweM0/pYdY5zX/7gt5Ff4jMThEUNz1fiM+UrMzJLmJGZmSXMSM7Ok9asmdgIbZxKPtgdJXOi88xUnvzjtq/nlaQYVG2TLzrnkNfnpGvT0L0buD4M1sKYTC0+Km1vb5ysxM0uak5iZJc1JzMyS1q+aWBeT57ahhXN00VNUtn0XfWUweLN1mZk9Pxt5joXOU8c4fWJ162wT0ZcJpsu2qfIZaanf0ldiZpY0JzEzS5qTmJklrV81MRtpnEEO6+w/zjHaqC2VPT5Oz1zTGNo4pnXDV2JmljQnMTNLmpOYmSWtXzWxJVPl28zOTD6ODsYLa2MSj7p9SV3UbNrolapSa2p632jdxydl4LlOjx4HrZWfzb70mo06Ro0QfSVmZklzEjOzpDmJmVnS+lUTO4E0rW/1VVfjzDcdW63seFXimkhfXhf3/h5nfCVmZklzEjOzpFVOYpKmJN0v6c5s+QJJOyTtlnSrpOPj9yEzS0qdK7HrgF3zlm8EPhsR64AXgGvbDMzMrIpKhX1Ja4B3A/8K/K0kAVcAH8o2uRn4JPCFCgdb+LGYrRLM6Me7GKxtAscYaHKscI7SfSbQtDtWU2jZc1HJ/6VDGpzbbl6ttH3Nontp4+qwY/ZhYNA2BkVs6zwVVL0S+xzwceBYljkTOBgRxya32QucN2xHM7NJKk1ikt4D7I+I++avHrLp0LQqaYuknZJ2HmXEtOVmZmOo8uvk5cB7Jb0LWAmcxtyV2emSlmZXY2uAZ4ftHBFbga0Ap2nV5G9KNLMTSumVWETcEBFrImIt8AHgexHxYeBu4Jpss83AtolFaWa2gCZ9Yp9grsi/h7ka2U3thGRmVl2t244i4h7gnuz7J4FL2g/JzKw6d+ybWdK6vwG8gwEHJ66LHpk2bgRuYeC6pee9auTjM/v2l4dx4etHPj770OOlxyg/SQuD9JXp4mf3eHkeHfKVmJklzUnMzJLmJGZmSXMSM7OkOYmZWdKcxMwsaU5iZpa0fk0U0sY4XZPeH46bOLViRekhZn51YOTjMT098nEAHt0z+vE+9D518Z5WcSK9Fp4818zMSczMEuckZmZJcxIzs6Q5iZlZ0pzEzCxpTmJmljQnMTNLWr+aXfswcWhbcZRpY/C7hs81DrcwhV6lSX4rNMQ2PEcnzcVdDFjYhj7E2WHzsK/EzCxpTmJmljQnMTNLmpOYmSXNSczMkuYkZmZJcxIzs6R13yc2ciC0HgwI19Y5mvartdBnUzboYRw50vgclTTtW+pDDG3F0VRfeua6OEZFvhIzs6Q5iZlZ0pzEzCxpTmJmljQnMTNLmpOYmSWtUouFpKeAF4EZYDoiNkpaBdwKrAWeAv48Il6YTJhmZsPVuRJ7e0RsiIiN2fL1wPaIWAdsz5bNzDrVpNn1auBt2fc3A/cAnyjdq4vZiSd5/rZ0EEdpM2uFGJaceurIx2dfeqlCICXn6WIQv7687/Z7Hc8AHsD/SLpP0pZs3TkR8RxA9vXs6qc1M2tH1SuxyyPiWUlnA3dJeqzqCbKktwVgJSeNEaKZ2cIqXYlFxLPZ1/3A7cAlwD5JqwGyr/sX2HdrRGyMiI3LGH0vn5lZXaVJTNLJkk499j3wJ8DDwB3A5myzzcC2SQVpZraQKr9OngPcrrki3FLgqxHxHUn3ArdJuhb4OfD+yYVpZjZcaRKLiCeBi4asPwBsmkRQZmZVuWPfzJKm6LB/RtIvgaeBVwK/6uzE43Oc7UkhRnCcbRs3ztdExFlVNuw0if3upNLOeZ3/veU425NCjOA429ZFnP510syS5iRmZklbrCS2dZHOW5fjbE8KMYLjbNvE41yUmpiZWVv866SZJa3TJCbpKkmPS9ojqVfjj0n6oqT9kh6et26VpLsk7c6+nrHIMZ4v6W5JuyQ9Ium6nsa5UtKPJD2YxfmpbP0FknZkcd4qaflixpnFNCXpfkl39jjGpyQ9JOkBSTuzdb16z7OYTpf0dUmPZT+jl3URZ2dJTNIU8HngT4ELgQ9KurCr81fwZeCqwrq+Dfw4DXwsItYDlwIfyV7DvsV5GLgiIi4CNgBXSboUuBH4bBbnC8C1ixjjMdcBu+Yt9zFGSGNQ0n8HvhMRf8jcXT676CLOiOjkH3AZ8N15yzcAN3R1/ooxrgUenrf8OLA6+3418Phix1iIdxtwZZ/jBE4Cfgy8hbmmx6XDfh4WKbY12QfrCuBOQH2LMYvjKeCVhXW9es+B04CfkdXZu4yzy18nzwOembe8N1vXZ70d+FHSWuBiYAc9jDP7Ne0B5oZougv4KXAwIqazTfrw/n8O+Dgwmy2fSf9ihDQGJX0t8EvgS9mv5/+ZjXoz8Ti7TGLDxqL1n0bHIOkU4BvARyPit4sdzzARMRMRG5i72rkEWD9ss26j+j1J7wH2R8R981cP2bQPP6OXR8SbmSvFfETSWxc7oCGWAm8GvhARFwOH6OhX3C6T2F7g/HnLa4BnOzz/OCoN/NglScuYS2BfiYhvZqt7F+cxEXGQufkXLgVOl3Rs5JTFfv8vB96bzeR1C3O/Un6OfsUINBuUtEN7gb0RsSNb/jpzSW3icXaZxO4F1mV//VkOfIC5gRX7rFcDP2puULebgF0R8Zl5D/UtzrMknZ59/wrgHcwVee8Grsk2W9Q4I+KGiFgTEWuZ+1n8XkR8mB7FCOkMShoRzwPPSHpjtmoT8ChdxNlx8e9dwBPM1Uf+YTELkUNi+xrwHHCUuf9VrmWuRrId2J19XbXIMf4xc7/e/AR4IPv3rh7G+Sbg/izOh4F/yta/FvgRsAf4L2DFYr/vWVxvA+7sY4xZPA9m/x459rnp23uexbQB2Jm97/8NnNFFnO7YN7OkuWPfzJLmJGZmSXMSM7OkOYmZWdKcxMwsaU5iZpY0JzEzS5qTmJkl7f8BBLVl/tbIFmcAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "i=1\n", + "plt.imshow(np.flip(insert(ET_inner_filtered[0][i],double_size(ET_outer_filtered[0][i])).reshape(52,64),axis=0))" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Done batch 1\n", + "Done batch 2\n" + ] + } + ], + "source": [ + "output_pics = generate_HCAL_images(ET_inner_filtered,ET_outer_filtered)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(25000, 52, 64, 1)" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "output_pics[0].shape" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(len(output_pics)):\n", + " with open('/disk/lhcb_data/davide/HCAL_project_full_event/reco/sample'+str(i)+'.pickle', 'wb') as handle:\n", + " pickle.dump(output_pics[i], handle, protocol=pickle.HIGHEST_PROTOCOL)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "a=np.array([output_pics[0][i].sum() for i in range(len(output_pics[0]))])" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAEphJREFUeJzt3X+MpVd93/H3p3ZwAmmyNh6Q2V11jbJK4lhJsEbECVWK2LSxjcX6D6jsorIlK60imYb8UliXSqhtUhklwoBEXK1shyVybYhD6hU4IdYCQpVqh1lAjs3ieAqOd/CGHeQfSWM1sO23f9yz9TB7d2f23tm59/q8X9LoPs95zr33nHlm5jPnPD9uqgpJUn/+0aQbIEmaDANAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1KkLJ92As7n00ktrx44dk26GJM2UI0eOfLuq5taqN9UBsGPHDhYWFibdDEmaKUn+ej31nAKSpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROTfWVwBrfjv2f/p71J29984RaImnaOAKQpE6tGQBJ7kpyIsmjK8p+N8nXkjyS5E+SbFmx7ZYki0keT/KLK8qvaWWLSfZvfFckSediPSOAjwLXrCp7ELiyqn4S+CvgFoAkVwA3Aj/RnvP7SS5IcgHwEeBa4ArgplZXkjQhawZAVX0BeGZV2Z9X1cm2+hCwrS3vBu6tqn+oqm8Ai8Dr29diVX29qr4D3NvqSpImZCOOAfwS8KdteStwbMW2pVZ2pvLTJNmXZCHJwvLy8gY0T5I0zFgBkOS9wEng7lNFQ6rVWcpPL6w6UFXzVTU/N7fm5xlIkkY08mmgSfYA1wO7qurUH/MlYPuKatuAp9vymcq1Tp7SKWkjjRQASa4B3gP8s6p6YcWmQ8B/TfIB4DXATuAvGIwAdia5HPgmgwPF/2qchmu41SEhSWeyZgAkuQd4I3BpkiXgfQzO+rkIeDAJwENV9ctV9ViSTwBfZTA1dHNV/Z/2Ou8CPgNcANxVVY+dh/5IktZpzQCoqpuGFN95lvq/A/zOkPIHgAfOqXWSpPPGK4ElqVPeC2iGOd8vaRwGQGeGhYZnE0l9cgpIkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI65ZXAU8rbPEg63wwA+UEzUqecApKkThkAktQpA0CSOuUxAJ3GYwJSHxwBSFKnDABJ6pQBIEmdMgAkqVMGgCR1ygCQpE6teRpokruA64ETVXVlK7sE+DiwA3gS+JdV9WySAB8CrgNeAP5NVX2pPWcP8O/by/52VR3c2K7MNu/9I2mzrWcE8FHgmlVl+4HDVbUTONzWAa4FdravfcDt8P8D433AzwCvB96X5OJxGy9JGt2aI4Cq+kKSHauKdwNvbMsHgc8D72nlH6uqAh5KsiXJZa3ug1X1DECSBxmEyj1j90Dn3bDRiReHSbNv1GMAr66q4wDt8VWtfCtwbEW9pVZ2pnJJ0oRs9EHgDCmrs5Sf/gLJviQLSRaWl5c3tHGSpBeNGgDfalM7tMcTrXwJ2L6i3jbg6bOUn6aqDlTVfFXNz83Njdg8SdJaRr0Z3CFgD3Bre7x/Rfm7ktzL4IDv81V1PMlngP+84sDvvwBuGb3Zs8+zfiRN2npOA72HwUHcS5MsMTib51bgE0n2Ak8Bb2vVH2BwCugig9NA3wlQVc8k+U/AF1u9/3jqgLAkaTLWcxbQTWfYtGtI3QJuPsPr3AXcdU6tkySdN14JLEmdMgAkqVMGgCR1ygCQpE4ZAJLUKQNAkjplAEhSp0a9ElidW30ls3cHlWaPIwBJ6pQBIEmdMgAkqVMGgCR1ygCQpE4ZAJLUKQNAkjplAEhSpwwASeqUVwJvgh4+/9crg6XZ4whAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROjRUASX4tyWNJHk1yT5LvT3J5koeTPJHk40le1upe1NYX2/YdG9EBSdJoRg6AJFuBXwHmq+pK4ALgRuD9wG1VtRN4FtjbnrIXeLaqfgS4rdWTJE3IuFNAFwI/kORC4OXAceBNwH1t+0Hghra8u63Ttu9KkjHfX5I0opEDoKq+Cfwe8BSDP/zPA0eA56rqZKu2BGxty1uBY+25J1v9V65+3ST7kiwkWVheXh61eZKkNYwzBXQxg//qLwdeA7wCuHZI1Tr1lLNse7Gg6kBVzVfV/Nzc3KjNkyStYZwpoF8AvlFVy1X1XeCTwM8BW9qUEMA24Om2vARsB2jbfxh4Zoz3lySNYZwAeAq4OsnL21z+LuCrwOeAt7Y6e4D72/Khtk7b/tmqOm0EIEnaHCPfDrqqHk5yH/Al4CTwZeAA8Gng3iS/3crubE+5E/jDJIsM/vO/cZyGT7Mebv8safZlmv8Jn5+fr4WFhUk345wZAMP5GQHS5khypKrm16rnlcCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpUyNfByCdq9Wnx3paqDRZjgAkqVMGgCR1ygCQpE4ZAJLUKQNAkjplAEhSpzwNVBPjaaHSZDkCkKROGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpU14HoKnmtQLS+eMIQJI65QhgA6z+L1Wj8fsoba6xRgBJtiS5L8nXkhxN8rNJLknyYJIn2uPFrW6SfDjJYpJHkly1MV2QJI1i3CmgDwF/VlU/BvwUcBTYDxyuqp3A4bYOcC2ws33tA24f870lSWMYOQCS/BDw88CdAFX1nap6DtgNHGzVDgI3tOXdwMdq4CFgS5LLRm65JGks44wAXgssA3+Q5MtJ7kjyCuDVVXUcoD2+qtXfChxb8fylViZJmoBxAuBC4Crg9qp6HfD3vDjdM0yGlNVplZJ9SRaSLCwvL4/RPEnS2YwTAEvAUlU93NbvYxAI3zo1tdMeT6yov33F87cBT69+0ao6UFXzVTU/Nzc3RvMkSWczcgBU1d8Ax5L8aCvaBXwVOATsaWV7gPvb8iHgHe1soKuB509NFUmSNt+41wH8W+DuJC8Dvg68k0GofCLJXuAp4G2t7gPAdcAi8EKrK0makLECoKq+AswP2bRrSN0Cbh7n/SRJG8dbQUhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpU+PeDVTaVDv2f/p71p+89c0Taok0+xwBSFKnDABJ6pQBIEmdMgAkqVMeBNZMW31QGDwwLK2XIwBJ6pQBIEmdMgAkqVMGgCR1yoPA52jYQUdJmkWOACSpUwaAJHVq7ABIckGSLyf5VFu/PMnDSZ5I8vEkL2vlF7X1xbZ9x7jvLUka3UaMAN4NHF2x/n7gtqraCTwL7G3le4Fnq+pHgNtaPUnShIwVAEm2AW8G7mjrAd4E3NeqHARuaMu72zpt+65WX5I0AeOeBfRB4LeAf9zWXwk8V1Un2/oSsLUtbwWOAVTVySTPt/rfHrMN0vfwMwOk9Rl5BJDkeuBEVR1ZWTykaq1j28rX3ZdkIcnC8vLyqM2TJK1hnCmgNwBvSfIkcC+DqZ8PAluSnBpZbAOebstLwHaAtv2HgWdWv2hVHaiq+aqan5ubG6N5kqSzGXkKqKpuAW4BSPJG4Der6u1J/gh4K4NQ2APc355yqK3/j7b9s1V12ghA2mhOCUnDnY/rAN4D/HqSRQZz/He28juBV7byXwf2n4f3liSt04bcCqKqPg98vi1/HXj9kDr/G3jbRryfJGl8XgksSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6pSfCbwGPwNY0kuVIwBJ6pQBIEmdMgAkqVMGgCR1yoPA6s6wA/t+RoB65AhAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcrTQKUhPFVUPTAAJLzpn/rkFJAkdcoAkKROOQW0ilMBknox8gggyfYkn0tyNMljSd7dyi9J8mCSJ9rjxa08ST6cZDHJI0mu2qhOSJLO3ThTQCeB36iqHweuBm5OcgWwHzhcVTuBw20d4FpgZ/vaB9w+xntLksY0cgBU1fGq+lJb/jvgKLAV2A0cbNUOAje05d3Ax2rgIWBLkstGbrkkaSwbchA4yQ7gdcDDwKur6jgMQgJ4Vau2FTi24mlLrWz1a+1LspBkYXl5eSOaJ0kaYuwASPKDwB8Dv1pVf3u2qkPK6rSCqgNVNV9V83Nzc+M2T5J0BmOdBZTk+xj88b+7qj7Zir+V5LKqOt6meE608iVg+4qnbwOeHuf9pc20+gwxrwzWrBvnLKAAdwJHq+oDKzYdAva05T3A/SvK39HOBroaeP7UVJEkafONMwJ4A/Cvgb9M8pVW9u+AW4FPJNkLPAW8rW17ALgOWAReAN45xntLE+eIQLNu5ACoqv/O8Hl9gF1D6hdw86jvJ0naWF4JLG0Q7yCqWeO9gCSpUwaAJHXKAJCkThkAktQpA0CSOuVZQNIm8toBTRMDQDqP/IAhTTOngCSpUwaAJHXKKSBpgrx6WJPUdQA4PyupZ10HgDSNPFNIm8VjAJLUKUcA0pRzRKDzxRGAJHXKEYA0YzxzSBvFAJBeAtY6o82A0DBOAUlSpxwBSB1w2kjDOAKQpE51NQLwyl/pRZ5eqq4CQNL6OW300mcASALWN0J21PDSsukBkOQa4EPABcAdVXXrZrdB0uQYItMjVbV5b5ZcAPwV8M+BJeCLwE1V9dVh9efn52thYWHk93POX9pcw/6Yj/J7aCiMJ8mRqppfq95mjwBeDyxW1dcBktwL7AaGBoCk2bJR/3St53VWh4Qji3O32QGwFTi2Yn0J+JlNboOkl4C1QmI9B7FHCZr1WCuMztf7nqvNDoAMKfueOagk+4B9bfV/JXkcuBT49nlu2/lmH6aDfZgem96PvH/Dn7OuPpyH913LP1lPpc0OgCVg+4r1bcDTKytU1QHgwMqyJAvrmc+aZvZhOtiH6fFS6Mes92GzrwT+IrAzyeVJXgbcCBza5DZIktjkEUBVnUzyLuAzDE4DvauqHtvMNkiSBjb9OoCqegB44ByfdmDtKlPPPkwH+zA9Xgr9mOk+bOp1AJKk6eHdQCWpU1MdAEmuSfJ4ksUk+yfdnvVIsj3J55IcTfJYkne38kuSPJjkifZ48aTbupYkFyT5cpJPtfXLkzzc+vDxdiB/qiXZkuS+JF9r++RnZ21fJPm19rP0aJJ7knz/tO+LJHclOZHk0RVlQ7/vGfhw+z1/JMlVk2v5i87Qh99tP0uPJPmTJFtWbLul9eHxJL84mVafm6kNgHbbiI8A1wJXADcluWKyrVqXk8BvVNWPA1cDN7d27wcOV9VO4HBbn3bvBo6uWH8/cFvrw7PA3om06tx8CPizqvox4KcY9Gdm9kWSrcCvAPNVdSWDkyduZPr3xUeBa1aVnen7fi2ws33tA27fpDau5aOc3ocHgSur6icZ3NbmFoD2O34j8BPtOb/f/oZNtakNAFbcNqKqvgOcum3EVKuq41X1pbb8dwz+4Gxl0PaDrdpB4IbJtHB9kmwD3gzc0dYDvAm4r1WZhT78EPDzwJ0AVfWdqnqOGdsXDE7W+IEkFwIvB44z5fuiqr4APLOq+Ezf993Ax2rgIWBLkss2p6VnNqwPVfXnVXWyrT7E4FomGPTh3qr6h6r6BrDI4G/YVJvmABh224itE2rLSJLsAF4HPAy8uqqOwyAkgFdNrmXr8kHgt4D/29ZfCTy34od/FvbHa4Fl4A/aVNYdSV7BDO2Lqvom8HvAUwz+8D8PHGH29gWc+fs+q7/rvwT8aVueyT5McwCseduIaZbkB4E/Bn61qv520u05F0muB05U1ZGVxUOqTvv+uBC4Cri9ql4H/D1TPN0zTJsn3w1cDrwGeAWDKZPVpn1fnM3M/WwleS+D6d67TxUNqTbVfYDpDoA1bxsxrZJ8H4M//ndX1Sdb8bdODWvb44lJtW8d3gC8JcmTDKbe3sRgRLClTUPAbOyPJWCpqh5u6/cxCIRZ2he/AHyjqpar6rvAJ4GfY/b2BZz5+z5Tv+tJ9gDXA2+vF8+jn6k+nDLNATCTt41oc+V3Aker6gMrNh0C9rTlPcD9m9229aqqW6pqW1XtYPB9/2xVvR34HPDWVm2q+wBQVX8DHEvyo61oF4Nbj8/MvmAw9XN1kpe3n61TfZipfdGc6ft+CHhHOxvoauD5U1NF0yaDD7R6D/CWqnphxaZDwI1JLkpyOYMD2n8xiTaek6qa2i/gOgZH2v8n8N5Jt2edbf6nDIZ+jwBfaV/XMZhDPww80R4vmXRb19mfNwKfasuvZfBDvQj8EXDRpNu3jvb/NLDQ9sd/Ay6etX0B/Afga8CjwB8CF037vgDuYXDM4rsM/jvee6bvO4Ppk4+03/O/ZHDG07T2YZHBXP+p3+3/sqL+e1sfHgeunXT71/PllcCS1KlpngKSJJ1HBoAkdcoAkKROGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ36fy8WAVD4I3MKAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.hist(a/1000,bins=70);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/create_true_images.ipynb b/create_true_images.ipynb new file mode 100644 index 0000000..2de21e7 --- /dev/null +++ b/create_true_images.ipynb @@ -0,0 +1,436 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import os\n", + "import pickle\n", + "import sys" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "df_piplus = pd.read_csv('/disk/lhcb_data/davide/HCAL_project_full_event/csv/piplus/MCtrue_piplus.csv', index_col=False)\n", + "df_piplus0 = pd.read_csv('/disk/lhcb_data/davide/HCAL_project_full_event/csv/piplus0/MCtrue_piplus0.csv', index_col=False)\n", + "df_Kminus = pd.read_csv('/disk/lhcb_data/davide/HCAL_project_full_event/csv/Kminus/MCtrue_Kminus.csv', index_col=False)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "true_events_piplus=df_piplus.to_dict(orient='list')\n", + "true_events_piplus0=df_piplus0.to_dict(orient='list')\n", + "true_events_Kminus=df_Kminus.to_dict(orient='list')\n", + "\n", + "for key in true_events_piplus:\n", + " true_events_piplus[key]=np.array(true_events_piplus[key])\n", + " true_events_piplus0[key]=np.array(true_events_piplus0[key])\n", + " true_events_Kminus[key]=np.array(true_events_Kminus[key])" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "#DETERMINE MAX AND MIN X,Y\n", + "max_X=np.max(true_events_piplus['true_x'][np.where(true_events_piplus['region']>=0)])\n", + "min_X=np.min(true_events_piplus['true_x'][np.where(true_events_piplus['region']>=0)])\n", + "\n", + "#width_X=np.ceil(max_X-min_X)\n", + "width_X=8403.0\n", + "\n", + "max_Y=np.max(true_events_piplus['true_y'][np.where(true_events_piplus['region']>=0)])\n", + "min_Y=np.min(true_events_piplus['true_y'][np.where(true_events_piplus['region']>=0)])\n", + "\n", + "#width_Y = np.ceil(max_Y-min_Y)\n", + "width_Y=6812.0" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "#number of events\n", + "batch_size=25000\n", + "\n", + "n_batches=2\n", + "save_test_images = False" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "piplus = np.array([{} for i in range(n_batches)])\n", + "piplus0 = np.array([{} for i in range(n_batches)])\n", + "Kminus = np.array([{} for i in range(n_batches)])\n", + "\n", + "for i in range(n_batches):\n", + " \n", + " for key in true_events_piplus:\n", + " piplus[i][key]= true_events_piplus[key][i*batch_size:(i+1)*batch_size]\n", + " for key in true_events_piplus0:\n", + " piplus0[i][key]= true_events_piplus0[key][i*batch_size:(i+1)*batch_size]\n", + " for key in true_events_Kminus:\n", + " Kminus[i][key]= true_events_Kminus[key][i*batch_size:(i+1)*batch_size]\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "#reconverting to np.arrays\n", + "def create_grid_and_normalise(particle, X_pixels=64, width_X=width_X, width_Y=width_Y):\n", + " \n", + " particle_norm=np.array([{} for i in range(n_batches)])\n", + "\n", + " for i in range(n_batches):\n", + " particle_norm[i][\"true_x\"]=particle[i][\"true_x\"]/(width_X/2)\n", + " particle_norm[i][\"true_y\"]=particle[i][\"true_y\"]/(width_Y/2)\n", + " \n", + " particle_norm[i][\"true_ET\"]=particle[i][\"true_ET\"]\n", + " particle_norm[i][\"region\"]=particle[i][\"region\"]\n", + " # for i in range(n_batches):\n", + " # for j in range(particle[i][\"region\"].shape[0]):\n", + " # if particle[i][\"region\"][j]>=0:\n", + " \n", + " Y_pixels=np.int(np.floor((X_pixels*width_Y)/width_X))\n", + "\n", + " x_values = np.linspace(-X_pixels//2-1,np.ceil(X_pixels/2)+1,X_pixels+3)\n", + " y_values = np.linspace(-Y_pixels//2-1,np.ceil(Y_pixels/2)+1,Y_pixels+3)\n", + "\n", + " X_grid, Y_grid = np.meshgrid(x_values, y_values)\n", + " \n", + " return X_grid, Y_grid, particle_norm" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "X_grid, Y_grid, piplus_norm = create_grid_and_normalise(piplus)\n", + "_, _, piplus0_norm= create_grid_and_normalise(piplus0)\n", + "_, _, Kminus_norm = create_grid_and_normalise(Kminus)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "52" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "X_pixels = 64\n", + "Y_pixels = np.int(np.ceil((X_pixels*width_Y)/width_X))\n", + "Y_pixels" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "def make_pics_dict(X_grid, Y_grid, particle, n_batches=n_batches):\n", + " \n", + " pic = np.zeros(shape=(Y_pixels,X_pixels,1),dtype=np.float32)\n", + " pics_dict = {}\n", + " \n", + " for n in range(n_batches):\n", + " \n", + " batch_size = particle[n][\"region\"].shape[0]\n", + " pics_dict[n]=np.array([pic for i in range(0,batch_size)])\n", + " \n", + " for k in range(batch_size):\n", + " event=k\n", + " if particle[n][\"region\"][event]>=0: #and particle_1[n][\"region\"][event]>=0 and particle_2[n][\"region\"][event]>=0:\n", + " #event = n*batch_size+k\n", + " \n", + " #print(event)\n", + " for i in range(X_grid.shape[0]):\n", + " for j in range(Y_grid.shape[1]):\n", + " \n", + " if X_grid[i,j] < particle[n][\"true_x\"][event]*X_pixels/2 < X_grid[i,j+1]:\n", + " if Y_grid[i,j] < particle[n][\"true_x\"][event]*Y_pixels/2 < Y_grid[i+1,j]:\n", + " \n", + " x_primed=int(Y_pixels/2-Y_grid[i,j])\n", + " y_primed=int(X_pixels/2+X_grid[i,j])\n", + " \n", + " #pics_dict[n][k][x_primed,y_primed,0]=1\n", + " \n", + " pics_dict[n][k][x_primed-1,y_primed-1,0]=particle[n][\"true_ET\"][event]\n", + " #total_pic[x_primed-1,y_primed-1,0]=1\n", + " #total_pic[x_primed-1,y_primed-1,0]=true_events['true_ET'][event]\n", + " \n", + " \n", + " print('Converted '+str(n+1)+'/'+str(n_batches)+' batches of ' +str(batch_size)+' images')\n", + " \n", + " # \n", + " # if N % batch_size != 0:\n", + " # \n", + " # print('Converting last batch of '+str(N%batch_size)+' images') \n", + " # \n", + " # for k in range(batch_size*n_batches,N):\n", + " # #if true_events['region'][k]>=0:\n", + " # \n", + " # for i in range(X_grid.shape[0]):\n", + " # for j in range(Y_grid.shape[1]):\n", + " # if X_grid[i,j-1] < X_norm[n_batches][k]*X_pixels/2 < X_grid[i,j]:\n", + " # if Y_grid[i-1,j] < Y_norm[n_batches][k]*Y_pixels/2 < Y_grid[i,j]:\n", + " # \n", + " # x_primed=int(Y_pixels/2-Y_grid[i,j])\n", + " # y_primed=int(X_pixels/2+X_grid[i,j])\n", + " # \n", + " # pics_dict[n_batches][k-(N-1)][x_primed,y_primed,0]=true_events['true_ET'][event]\n", + " # #total_pic[x_primed,y_primed,0]=true_events['true_ET'][event]\n", + " return pics_dict" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Converted 1/2 batches of 25000 images\n", + "Converted 2/2 batches of 25000 images\n" + ] + } + ], + "source": [ + "pics_dict_piplus = make_pics_dict(X_grid, Y_grid, piplus_norm, n_batches)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Converted 1/2 batches of 25000 images\n", + "Converted 2/2 batches of 25000 images\n" + ] + } + ], + "source": [ + "pics_dict_piplus0 = make_pics_dict(X_grid, Y_grid, piplus0_norm, n_batches)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Converted 1/2 batches of 25000 images\n", + "Converted 2/2 batches of 25000 images\n" + ] + } + ], + "source": [ + "pics_dict_Kminus = make_pics_dict(X_grid, Y_grid, Kminus_norm, n_batches)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(25000, 52, 64, 1)" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pics_dict_piplus[1].shape" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAATEAAAD8CAYAAAAfZJO2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAADJVJREFUeJzt3W+IZYV5x/Hvr7vrmpiKmqhsXammbFN9UdewGIOltBpbm4ToC1OUUPbFwr6xYGgg1RYKgb6Ib6J9UQpLtNkXadSapIqE2GWjlEJZHaMm6kZXrY3LWjdtlaSBbnfN0xf3aEe949z5d+c++P3AcO85cy7nYe/s13PPHM+mqpCkrn5pvQeQpJUwYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaW1HEklyV5JkkzyW5abWGkqRJZblX7CfZADwLXAkcBh4Brq+qpxd6zUnZXCdzyrL2J+m943/4Of9bxzLJthtXsJ9LgOeq6gWAJHcCVwMLRuxkTuFjuWIFu5T0XnCg9k+87Uo+Tp4DvDRv+fCw7i2S7E4yl2TuOMdWsDtJeqeVRGzcod47PptW1Z6q2lFVOzaxeQW7k6R3WknEDgPnzlveChxZ2TiStDQridgjwLYk5yc5CbgOuG91xpKkySz7xH5VnUjyx8ADwAbgjqp6atUmk6QJrOS3k1TVd4DvrNIskrRkXrEvqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyapNSMmqTUjJqk1IyaptUUjluSOJEeTPDlv3RlJ9iU5NDyevrZjStJ4kxyJfQ246m3rbgL2V9U2YP+wLElTt2jEquqfgP962+qrgb3D873ANas8lyRNZLnnxM6uqpcBhsezFtowye4kc0nmjnNsmbuTpPHW/MR+Ve2pqh1VtWMTm9d6d5LeY5YbsVeSbAEYHo+u3kiSNLnlRuw+YOfwfCdw7+qMI0lLM8klFt8A/gX4SJLDSXYBXwauTHIIuHJYlqSp27jYBlV1/QLfumKVZ5GkJfOKfUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrS0asSTnJnkwycEkTyW5cVh/RpJ9SQ4Nj6ev/biS9FaTHImdAL5QVRcAlwI3JLkQuAnYX1XbgP3DsiRN1aIRq6qXq+r7w/OfAQeBc4Crgb3DZnuBa9ZqSElayJLOiSU5D7gYOACcXVUvwyh0wFmrPZwkLWbiiCX5APBN4PNV9dMlvG53krkkc8c5tpwZJWlBE0UsySZGAft6VX1rWP1Kki3D97cAR8e9tqr2VNWOqtqxic2rMbMkvWmS304GuB04WFVfmfet+4Cdw/OdwL2rP54kvbuNE2xzGfBHwA+TPD6s+zPgy8DdSXYBPwY+uzYjStLCFo1YVf0zkAW+fcXqjiNJS+MV+5JaM2KSWjNiklozYpJaM2KSWpvkEgtpXT1w5PF3rPv9X9m+DpNoFnkkJqk1IyapNSMmqTUjJqk1T+xr5nkSX+/GIzFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrRkxSa0ZMUmtGTFJrS0asSQnJ3k4yRNJnkrypWH9+UkOJDmU5K4kJ639uJL0VpMciR0DLq+qi4DtwFVJLgVuAW6tqm3Aq8CutRtTksZbNGI18t/D4qbhq4DLgXuG9XuBa9ZkQk3Vhl//tbd8SbNuonNiSTYkeRw4CuwDngdeq6oTwyaHgXPWZkRJWthEEauq16tqO7AVuAS4YNxm416bZHeSuSRzxzm2/EklaYwl/Xayql4DHgIuBU5LsnH41lbgyAKv2VNVO6pqxyY2r2RWSXqHjYttkORM4HhVvZbkfcAnGJ3UfxC4FrgT2Ancu5aDajpef/b59R5BWpJFIwZsAfYm2cDoyO3uqro/ydPAnUn+EngMuH0N55SksRaNWFX9ALh4zPoXGJ0fk6R14xX7klozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWjNiklozYpJaM2KSWps4Ykk2JHksyf3D8vlJDiQ5lOSuJCet3ZiSNN5SjsRuBA7OW74FuLWqtgGvArtWczBJmsREEUuyFfgU8NVhOcDlwD3DJnuBa9ZiQEl6N5Meid0GfBH4xbD8QeC1qjoxLB8Gzlnl2SRpUYtGLMmngaNV9ej81WM2rQVevzvJXJK54xxb5piSNN7GCba5DPhMkk8CJwOnMjoyOy3JxuFobCtwZNyLq2oPsAfg1JwxNnSStFyLHolV1c1VtbWqzgOuA75XVZ8DHgSuHTbbCdy7ZlNK0gJWcp3YnwJ/kuQ5RufIbl+dkSRpcpN8nHxTVT0EPDQ8fwG4ZPVHkqTJecW+pNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1jZOslGSF4GfAa8DJ6pqR5IzgLuA84AXgT+sqlfXZkxJGm8pR2K/W1Xbq2rHsHwTsL+qtgH7h2VJmqqVfJy8Gtg7PN8LXLPycSRpaSaNWAH/mOTRJLuHdWdX1csAw+NZazGgJL2bic6JAZdV1ZEkZwH7kvxo0h0M0dsNcDLvX8aIkrSwiY7EqurI8HgU+DZwCfBKki0Aw+PRBV67p6p2VNWOTWxenaklabBoxJKckuSX33gO/B7wJHAfsHPYbCdw71oNKUkLmeTj5NnAt5O8sf3fVdV3kzwC3J1kF/Bj4LNrN6YkjbdoxKrqBeCiMev/E7hiLYaSpEl5xb6k1lJV09tZ8hPg34APAf8xtR0vn3Oung4zgnOutuXO+atVdeYkG041Ym/uNJmbd+X/zHLO1dNhRnDO1TaNOf04Kak1IyaptfWK2J512u9SOefq6TAjOOdqW/M51+WcmCStFj9OSmptqhFLclWSZ5I8l2Sm7j+W5I4kR5M8OW/dGUn2JTk0PJ6+zjOem+TBJAeTPJXkxhmd8+QkDyd5YpjzS8P685McGOa8K8lJ6znnMNOGJI8luX+GZ3wxyQ+TPJ5kblg3U+/5MNNpSe5J8qPhZ/Tj05hzahFLsgH4a+APgAuB65NcOK39T+BrwFVvWzdrN348AXyhqi4ALgVuGP4MZ23OY8DlVXURsB24KsmlwC3ArcOcrwK71nHGN9wIHJy3PIszQo+bkv4V8N2q+g1G/5fPQaYxZ1VN5Qv4OPDAvOWbgZuntf8JZzwPeHLe8jPAluH5FuCZ9Z7xbfPeC1w5y3MC7we+D3yM0UWPG8f9PKzTbFuHv1iXA/cDmbUZhzleBD70tnUz9Z4DpwL/ynCefZpzTvPj5DnAS/OWDw/rZtnM3vgxyXnAxcABZnDO4WPa44xu0bQPeB54rapODJvMwvt/G/BF4BfD8geZvRmhx01JPwz8BPjb4eP5V4e73qz5nNOMWMas81ejy5DkA8A3gc9X1U/Xe55xqur1qtrO6GjnEuCCcZtNd6r/l+TTwNGqenT+6jGbzsLP6GVV9VFGp2JuSPLb6z3QGBuBjwJ/U1UXAz9nSh9xpxmxw8C585a3AkemuP/lmOjGj9OUZBOjgH29qr41rJ65Od9QVa8BDzE6h3dakjfunLLe7/9lwGeGf8nrTkYfKW9jtmYEVnZT0ik6DByuqgPD8j2Morbmc04zYo8A24bf/pwEXMfoxoqzbKZu/JjRTd1uBw5W1VfmfWvW5jwzyWnD8/cBn2B0kvdB4Nphs3Wds6purqqtVXUeo5/F71XV55ihGaHPTUmr6t+Bl5J8ZFh1BfA005hzyif/Pgk8y+j8yJ+v54nIMbN9A3gZOM7ovyq7GJ0j2Q8cGh7PWOcZf4vRx5sfAI8PX5+cwTl/E3hsmPNJ4C+G9R8GHgaeA/4e2Lze7/sw1+8A98/ijMM8TwxfT73x92bW3vNhpu3A3PC+/wNw+jTm9Ip9Sa15xb6k1oyYpNaMmKTWjJik1oyYpNaMmKTWjJik1oyYpNb+D1TuIblof2KtAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "i=1\n", + "plt.imshow(pics_dict_piplus[1][i].reshape(52,64)+pics_dict_piplus0[1][i].reshape(52,64)+pics_dict_Kminus[0][i].reshape(52,64))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "pics_dict={}\n", + "\n", + "pics_dict[0]=pics_dict_piplus[0]+pics_dict_piplus0[0]+pics_dict_Kminus[0]\n", + "pics_dict[1]=pics_dict_piplus[1]+pics_dict_piplus0[1]+pics_dict_Kminus[1]" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(n_batches):\n", + " with open('/disk/lhcb_data/davide/HCAL_project_full_event/true/sample'+str(i)+'.pickle', 'wb') as handle:\n", + " pickle.dump(pics_dict[i], handle, protocol=pickle.HIGHEST_PROTOCOL)" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "a=np.array([pics_dict_Kminus[0][i].sum()+pics_dict_piplus[0][i].sum()+pics_dict_piplus0[0][i].sum() for i in range(len(pics_dict_Kminus[0]))])" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAE21JREFUeJzt3X+sXOWd3/H3Zx3yQ5uomHChru3UdOuqIVVjoltAon+wJAsGVjUrLRWo3XgjVG8lUBMpbdfkHzZJkRypG5pIWSRvcGNW2RArP4oF3mVdkijNHwEMIQRDIrzEDbe28N0aSFBUKrPf/jGPm8G+9p17PfdefJ/3SxrNOd/znJnnkcf3M885Z2ZSVUiS+vNrS90BSdLSMAAkqVMGgCR1ygCQpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnXrLUnfgdM4///xat27dUndDks4qjz/++N9U1cRs7d7UAbBu3Tr27du31N2QpLNKkv85SjsPAUlSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI6ZQBIUqfe1J8EPlus2/rgG9YPbrt+iXoiSaNzBiBJnTIAJKlTBoAkdcoAkKROGQCS1KlZAyDJ25M8muSHSfYn+WSrfynJT5M82W4bWj1JPp/kQJKnknxg6LE2J3mu3TYv3LAkSbMZ5TLQ14CrqurVJOcA30vyF23bf6iqr53Q/lpgfbtdBtwNXJbkPOAOYBIo4PEku6vqpXEMRJI0N7POAGrg1bZ6TrvVaXbZBNzb9vs+cG6SVcA1wN6qOtr+6O8FNp5Z9yVJ8zXSOYAkK5I8CRxh8Ef8kbbpznaY564kb2u11cALQ7tPtdqp6pKkJTBSAFTV61W1AVgDXJrknwC3A/8Y+GfAecAftuaZ6SFOU3+DJFuS7Euyb3p6epTuSZLmYU5XAVXVy8B3gI1Vdbgd5nkN+K/Apa3ZFLB2aLc1wKHT1E98ju1VNVlVkxMTs/6ovSRpnka5Cmgiyblt+R3Ah4Aft+P6JAlwA/B022U38OF2NdDlwCtVdRh4CLg6ycokK4GrW02StARGuQpoFbAzyQoGgbGrqh5I8q0kEwwO7TwJ/NvWfg9wHXAA+CXwEYCqOprk08Bjrd2nquro+IYiSZqLWQOgqp4CLpmhftUp2hdw6ym27QB2zLGPkqQF4CeBJalTBoAkdcoAkKROGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkTo3yZXCao3VbH3zD+sFt1y9RTyTp1JwBSFKnnAHM0Ynv7iXpbOUMQJI6ZQBIUqcMAEnqlAEgSZ0yACSpU7MGQJK3J3k0yQ+T7E/yyVa/KMkjSZ5L8tUkb231t7X1A237uqHHur3Vf5LkmoUalCRpdqPMAF4Drqqq9wMbgI1JLgc+A9xVVeuBl4BbWvtbgJeq6h8Cd7V2JLkYuAl4H7AR+JMkK8Y5GEnS6GYNgBp4ta2e024FXAV8rdV3Aje05U1tnbb9g0nS6vdV1WtV9VPgAHDpWEYhSZqzkc4BJFmR5EngCLAX+Gvg5ao61ppMAavb8mrgBYC2/RXg3cP1GfaRJC2ykQKgql6vqg3AGgbv2t87U7N2n1NsO1X9DZJsSbIvyb7p6elRuidJmoc5XQVUVS8D3wEuB85NcvyrJNYAh9ryFLAWoG3/O8DR4foM+ww/x/aqmqyqyYmJibl0T5I0B6NcBTSR5Ny2/A7gQ8CzwLeB323NNgP3t+XdbZ22/VtVVa1+U7tK6CJgPfDouAYiSZqbUb4MbhWws12x82vArqp6IMkzwH1J/hPwA+Ce1v4e4M+SHGDwzv8mgKran2QX8AxwDLi1ql4f73AkSaOaNQCq6ingkhnqzzPDVTxV9X+AG0/xWHcCd869m5KkcfOTwJLUKQNAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI6ZQBIUqcMAEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6tSsPwqfZC1wL/B3gb8FtlfV55L8EfBvgOnW9BNVtaftcztwC/A68O+q6qFW3wh8DlgBfLGqto13OG9O67Y+eFLt4Lbrl6AnkvQrswYAcAz4eFU9keRdwONJ9rZtd1XVfx5unORi4CbgfcDfA/57kn/UNn8B+C1gCngsye6qemYcA5Ekzc2sAVBVh4HDbfkXSZ4FVp9ml03AfVX1GvDTJAeAS9u2A1X1PECS+1pbA0CSlsCczgEkWQdcAjzSSrcleSrJjiQrW2018MLQblOtdqr6ic+xJcm+JPump6dP3CxJGpORAyDJO4GvAx+rqp8DdwO/AWxgMEP44+NNZ9i9TlN/Y6Fqe1VNVtXkxMTEqN2TJM3RKOcASHIOgz/+X66qbwBU1YtD2/8UeKCtTgFrh3ZfAxxqy6eqS5IW2awzgCQB7gGerarPDtVXDTX7HeDptrwbuCnJ25JcBKwHHgUeA9YnuSjJWxmcKN49nmFIkuZqlBnAFcDvAT9K8mSrfQK4OckGBodxDgJ/AFBV+5PsYnBy9xhwa1W9DpDkNuAhBpeB7qiq/WMciyRpDka5Cuh7zHz8fs9p9rkTuHOG+p7T7SdJWjx+EliSOmUASFKnDABJ6pQBIEmdMgAkqVMGgCR1ygCQpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI6NdKPwvds3dYHl7oLkrQgnAFIUqdmDYAka5N8O8mzSfYn+Wirn5dkb5Ln2v3KVk+Szyc5kOSpJB8YeqzNrf1zSTYv3LAkSbMZ5RDQMeDjVfVEkncBjyfZC/w+8HBVbUuyFdgK/CFwLbC+3S4D7gYuS3IecAcwCVR7nN1V9dK4B3U2OPHQ0sFt1y9RTyT1atYZQFUdrqon2vIvgGeB1cAmYGdrthO4oS1vAu6tge8D5yZZBVwD7K2qo+2P/l5g41hHI0ka2ZzOASRZB1wCPAJcWFWHYRASwAWt2WrghaHdplrtVPUTn2NLkn1J9k1PT8+le5KkORg5AJK8E/g68LGq+vnpms5Qq9PU31io2l5Vk1U1OTExMWr3JElzNFIAJDmHwR//L1fVN1r5xXZoh3Z/pNWngLVDu68BDp2mLklaAqNcBRTgHuDZqvrs0KbdwPEreTYD9w/VP9yuBroceKUdInoIuDrJynbF0NWtJklaAqNcBXQF8HvAj5I82WqfALYBu5LcAvwMuLFt2wNcBxwAfgl8BKCqjib5NPBYa/epqjo6llFIkuZs1gCoqu8x8/F7gA/O0L6AW0/xWDuAHXPpoCRpYfhJYEnqlAEgSZ0yACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6pQBIEmdMgAkqVMGgCR1ygCQpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnRrlN4G1CNZtffAN6we3Xb9EPZHUi1lnAEl2JDmS5Omh2h8l+V9Jnmy364a23Z7kQJKfJLlmqL6x1Q4k2Tr+oUiS5mKUQ0BfAjbOUL+rqja02x6AJBcDNwHva/v8SZIVSVYAXwCuBS4Gbm5tJUlLZNZDQFX13STrRny8TcB9VfUa8NMkB4BL27YDVfU8QJL7Wttn5txjSdJYnMlJ4NuSPNUOEa1stdXAC0NtplrtVPWTJNmSZF+SfdPT02fQPUnS6cw3AO4GfgPYABwG/rjVM0PbOk395GLV9qqarKrJiYmJeXZPkjSbeV0FVFUvHl9O8qfAA211Clg71HQNcKgtn6ouSVoC85oBJFk1tPo7wPErhHYDNyV5W5KLgPXAo8BjwPokFyV5K4MTxbvn321J0pmadQaQ5CvAlcD5SaaAO4Ark2xgcBjnIPAHAFW1P8kuBid3jwG3VtXr7XFuAx4CVgA7qmr/2EcjSRrZKFcB3TxD+Z7TtL8TuHOG+h5gz5x6J0laMH4VhCR1ygCQpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI6Na/fA9DCW7f1wZNqB7ddvwQ9kbRcOQOQpE4ZAJLUKQNAkjplAEhSpwwASeqUASBJnZo1AJLsSHIkydNDtfOS7E3yXLtf2epJ8vkkB5I8leQDQ/tsbu2fS7J5YYYjSRrVKDOALwEbT6htBR6uqvXAw20d4FpgfbttAe6GQWAAdwCXAZcCdxwPDUnS0pg1AKrqu8DRE8qbgJ1teSdww1D93hr4PnBuklXANcDeqjpaVS8Bezk5VCRJi2i+5wAurKrDAO3+glZfDbww1G6q1U5VlyQtkXGfBM4MtTpN/eQHSLYk2Zdk3/T09Fg7J0n6lfkGwIvt0A7t/kirTwFrh9qtAQ6dpn6SqtpeVZNVNTkxMTHP7kmSZjPfANgNHL+SZzNw/1D9w+1qoMuBV9ohooeAq5OsbCd/r241SdISmfXbQJN8BbgSOD/JFIOrebYBu5LcAvwMuLE13wNcBxwAfgl8BKCqjib5NPBYa/epqjrxxLIkaRHNGgBVdfMpNn1whrYF3HqKx9kB7JhT7yRJC8ZPAktSp/xBmLPIiT8S4w/ESDoTzgAkqVMGgCR1ykNAJ5jpt3glaTlyBiBJnTIAJKlTBoAkdcoAkKROGQCS1CkDQJI6ZQBIUqf8HMBZbKbPLPj1EJJG5QxAkjplAEhSpwwASeqUASBJnTIAJKlTBoAkdeqMLgNNchD4BfA6cKyqJpOcB3wVWAccBP5lVb2UJMDnGPxo/C+B36+qJ87k+XUyfzVM0qjGMQP4zaraUFWTbX0r8HBVrQcebusA1wLr220LcPcYnluSNE8LcQhoE7CzLe8Ebhiq31sD3wfOTbJqAZ5fkjSCMw2AAv4qyeNJtrTahVV1GKDdX9Dqq4EXhvadajVJ0hI406+CuKKqDiW5ANib5MenaZsZanVSo0GQbAF4z3vec4bdkySdyhkFQFUdavdHknwTuBR4McmqqjrcDvEcac2ngLVDu68BDs3wmNuB7QCTk5MnBYTmxpPCkk5l3oeAkvx6kncdXwauBp4GdgObW7PNwP1teTfw4QxcDrxy/FCRJGnxnckM4ELgm4OrO3kL8OdV9ZdJHgN2JbkF+BlwY2u/h8EloAcYXAb6kTN4bknSGZp3AFTV88D7Z6j/b+CDM9QLuHW+zydJGi8/CSxJnTIAJKlTBoAkdcoAkKROGQCS1Cl/FL4z/pC8pOOcAUhSp5wByK+LkDrlDECSOtX1DGCm4+GS1AtnAJLUKQNAkjrV9SEgzcyTwlIfDADNys8OSMuTh4AkqVPOADQvHiaSzn7OACSpU84ANBbOCKSzT1cB4Ae/Fo8njqU3v0UPgCQbgc8BK4AvVtW2xe6DloazBOnNZVEDIMkK4AvAbwFTwGNJdlfVM4vZD705jBIIhoa0cBZ7BnApcKCqngdIch+wCViQAPCQz9lllH8vA0Ean8UOgNXAC0PrU8Bli9wHLSMLFfLzmY28mc57GJQaxWIHQGao1RsaJFuALW311SQ/OYPnOx/4mzPY/2zluM9QPrN4bcZg1nEvUj8Wm6/zU/v7ozzQYgfAFLB2aH0NcGi4QVVtB7aP48mS7KuqyXE81tnEcffFcfdlnONe7A+CPQasT3JRkrcCNwG7F7kPkiQWeQZQVceS3AY8xOAy0B1VtX8x+yBJGlj0zwFU1R5gzyI93VgOJZ2FHHdfHHdfxjbuVNXsrSRJy45fBidJnVqWAZBkY5KfJDmQZOtS92chJdmR5EiSp4dq5yXZm+S5dr9yKfs4bknWJvl2kmeT7E/y0VZf1uMGSPL2JI8m+WEb+ydb/aIkj7Sxf7VdZLGsJFmR5AdJHmjry37MAEkOJvlRkieT7Gu1sbzWl10ADH3dxLXAxcDNSS5e2l4tqC8BG0+obQUerqr1wMNtfTk5Bny8qt4LXA7c2v6Nl/u4AV4Drqqq9wMbgI1JLgc+A9zVxv4ScMsS9nGhfBR4dmi9hzEf95tVtWHo8s+xvNaXXQAw9HUTVfV/geNfN7EsVdV3gaMnlDcBO9vyTuCGRe3UAquqw1X1RFv+BYM/CqtZ5uMGqIFX2+o57VbAVcDXWn3ZjT3JGuB64IttPSzzMc9iLK/15RgAM33dxOol6stSubCqDsPgjyVwwRL3Z8EkWQdcAjxCJ+Nuh0KeBI4Ae4G/Bl6uqmOtyXJ8zf8X4D8Cf9vW383yH/NxBfxVksfbNyXAmF7ry/H3AGb9ugktD0neCXwd+FhV/XzwpnD5q6rXgQ1JzgW+Cbx3pmaL26uFk+S3gSNV9XiSK4+XZ2i6bMZ8giuq6lCSC4C9SX48rgdejjOAWb9uogMvJlkF0O6PLHF/xi7JOQz++H+5qr7Ryst+3MOq6mXgOwzOg5yb5PgbuuX2mr8C+BdJDjI4pHsVgxnBch7z/1dVh9r9EQaBfyljeq0vxwDw6yYG493cljcD9y9hX8auHf+9B3i2qj47tGlZjxsgyUR750+SdwAfYnAO5NvA77Zmy2rsVXV7Va2pqnUM/j9/q6r+Fct4zMcl+fUk7zq+DFwNPM2YXuvL8oNgSa5j8A7h+NdN3LnEXVowSb4CXMngGwJfBO4A/huwC3gP8DPgxqo68UTxWSvJPwf+B/AjfnVM+BMMzgMs23EDJPmnDE76rWDwBm5XVX0qyT9g8O74POAHwL+uqteWrqcLox0C+vdV9ds9jLmN8Ztt9S3An1fVnUnezRhe68syACRJs1uOh4AkSSMwACSpUwaAJHXKAJCkThkAktQpA0CSOmUASFKnDABJ6tT/A7wp3R/F2ulpAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.hist(a/1000,bins=70);" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'r' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTBrowser\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mNameError\u001b[0m: name 'r' is not defined" + ] + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/trigger_efficiency_bicycle_GAN.ipynb b/trigger_efficiency_bicycle_GAN.ipynb new file mode 100644 index 0000000..d907dea --- /dev/null +++ b/trigger_efficiency_bicycle_GAN.ipynb @@ -0,0 +1,635 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import os \n", + "import pickle\n", + "\n", + "import tensorflow as tf\n", + "import matplotlib.pyplot as plt\n", + "from datetime import datetime\n", + "\n", + "from architectures.bicycle_GAN import *\n", + "from architectures.utils.toolbox import *" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "task='TEST'\n", + "\n", + "PATH='HCAL_bycicleGAN_test31'" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LEARNING_RATE:0.0002\n", + "BETA1:0.5\n", + "BATCH_SIZE:16\n", + "EPOCHS:4\n", + "SAVE_SAMPLE_PERIOD:200\n", + "SEED:1\n", + "d_sizes:{'conv_layers': [(4.0, 4, 2, False, 1, , ), (8, 4, 2, 'bn', 0.8, , ), (16, 4, 2, 'bn', 1, , ), (32, 4, 2, 'bn', 0.8, , ), (64, 4, 1, 'bn', 1, , )], 'dense_layers': [(256, 'bn', 0.8, , ), (8, False, 0.8, , )], 'readout_layer_w_init': }\n", + "g_sizes_dec:{'deconv_layers': [(64, 4, 2, 'bn', 1, , ), (32, 4, 2, 'bn', 0.8, , ), (16, 4, 2, 'bn', 1, , ), (8.0, 4, 2, 'bn', 1, , ), (1, 4, 2, False, 0.8, , )]}\n", + "g_sizes_enc:{'latent_dims': 16, 'conv_layers': [(8.0, 4, 2, False, 1, , ), (16, 4, 2, 'bn', 0.8, , ), (32, 4, 2, 'bn', 1, , ), (64, 4, 2, 'bn', 1, , ), (128, 4, 2, 'bn', 0.8, , )]}\n", + "e_sizes:{'latent_dims': 16, 'conv_layers': [(8.0, 4, 2, False, 1, , ), (16, 4, 2, 'bn', 1, , ), (32, 4, 2, 'bn', 0.8, , ), (64, 4, 2, 'bn', 1, , ), (128, 4, 2, 'bn', 0.8, , )], 'dense_layers': [(256, 'bn', 0.8, , ), (128, 'bn', 0.8, , ), (16, False, 0.8, , )], 'readout_layer_w_init': }\n", + "preprocess:False\n", + "cost_type:FEATURE\n", + "validating_size:1000\n", + "test_size:5000\n", + "n_batches:1\n", + "reco_path:/disk/lhcb_data/davide/HCAL_project/piplus_cells_inout/piplus/reco/\n", + "true_path:/disk/lhcb_data/davide/HCAL_project/piplus_cells_inout/piplus/true/\n", + "discr_steps:1\n", + "gen_steps:4\n", + "vae_steps:4\n", + "latent_weight:100\n", + "cycl_weight:10\n", + "kl_weight:1\n" + ] + } + ], + "source": [ + "if task == 'TEST' and os.path.exists(PATH+'/hyper_parameters.pkl'):\n", + " with open(PATH+'/hyper_parameters.pkl', 'rb') as f: \n", + " hyper_dict = pickle.load(f)\n", + " for key, item in hyper_dict.items():\n", + " print(key+':'+str(item))\n", + " \n", + " reco_path = hyper_dict['reco_path']\n", + " true_path = hyper_dict['true_path']\n", + " #true_path_p = hyper_dict['true_path_p']\n", + " #true_path_K = hyper_dict['true_path_K']\n", + " n_batches = hyper_dict['n_batches']\n", + " test_size = hyper_dict['test_size']\n", + " LEARNING_RATE = hyper_dict['LEARNING_RATE']\n", + " BETA1 = hyper_dict['BETA1']\n", + " BATCH_SIZE = hyper_dict['BATCH_SIZE']\n", + " EPOCHS = hyper_dict['EPOCHS']\n", + " SAVE_SAMPLE_PERIOD = hyper_dict['SAVE_SAMPLE_PERIOD']\n", + " SEED = hyper_dict['SEED']\n", + " d_sizes = hyper_dict['d_sizes']\n", + " g_sizes_enc = hyper_dict['g_sizes_enc']\n", + " g_sizes_dec = hyper_dict['g_sizes_dec']\n", + " e_sizes = hyper_dict['e_sizes']\n", + " preprocess = hyper_dict['preprocess']\n", + " cost_type = hyper_dict['cost_type']\n", + " validating_size=hyper_dict['validating_size']\n", + " cycl_weight=hyper_dict['cycl_weight']\n", + " latent_weight=hyper_dict['latent_weight']\n", + " kl_weight=hyper_dict['kl_weight']\n", + " discr_steps=hyper_dict['discr_steps']\n", + " gen_steps=hyper_dict['gen_steps']\n", + " \n", + "\n", + "if task == 'TEST' and not os.path.exists(PATH+'/hyper_parameters.pkl'):\n", + " \n", + " print('Missing hyperparameter dictionary in save folder')\n", + " \n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "true_path=\"/disk/lhcb_data/davide/HCAL_project/piplus_cells_inout/piplus/true/\"" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "#dim=1\n", + "#select=False\n", + "#test_size=000\n", + "#if preprocess=='normalise':\n", + "# train_true_p, test_true_p, min_true, max_true, train_reco, test_reco, min_reco, max_reco = load_data(true_path, reco_path, n_batches, select=select, n_cells=dim*dim, energy_fraction=1, preprocess=preprocess, test_size=test_size)\n", + "#else:\n", + "# train_true_K, test_true_K, train_reco, test_reco = load_data(true_path_K, reco_path, n_batches, select=select, n_cells=None, energy_fraction=1, preprocess=preprocess, test_size=test_size)\n", + "# train_true_p, test_true_p, _, _ = load_data(true_path_p, reco_path, n_batches, select=select, n_cells=None, energy_fraction=1, preprocess=preprocess, test_size=test_size)\n", + "#\n", + "#train_true_p, train_true_K, train_reco = delete_undetected_events_triple(train_true_p, train_true_K, train_reco)\n", + "#test_true_p, test_true_K, test_reco = delete_undetected_events_triple(test_true_K, test_true_K, test_reco)\n", + "#\n", + "#train_true = train_true_p + train_true_K\n", + "#test_true = test_true_p + test_true_K" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "dim=1\n", + "select=False\n", + "test_size=4000\n", + "if preprocess=='normalise':\n", + " train_true, test_true, min_true, max_true, train_reco, test_reco, min_reco, max_reco = load_data(true_path, reco_path, n_batches, select=select, n_cells=dim*dim, energy_fraction=1, preprocess=preprocess, test_size=test_size)\n", + "else:\n", + " train_true, test_true, train_reco, test_reco = load_data(true_path, reco_path, n_batches, select=select, n_cells=None, energy_fraction=1, preprocess=preprocess, test_size=test_size)\n", + " \n", + "train_true, train_reco = delete_undetected_events_double(train_true, train_reco)\n", + "test_true, test_reco = delete_undetected_events_double(test_true, test_reco)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfwAAACtCAYAAABV9831AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHXJJREFUeJzt3Xm4ZFV97vHv283QKGDTIAg0dmOcIirDRYMiShgUFJTcCIIiaLyiiUYERMEJSZSLSpDkqhgEBQRlnkSiooKPRuQyJlxsEUSk22YUGplp5Hf/WKvofap3Ve19qupUnbPfz/PU073nX+1zzlp7DXstRQRmZmY2s80adQBmZmY2fM7wzczMGsAZvpmZWQM4wzczM2sAZ/hmZmYN4AzfzMysAZzhm1lHkm6UtP0QzrtQUkhaZZLHf1zSiYOOy2wmc4Zv1oGk2yTt1LbuXZJ+3rbu7ZKulvSQpDsk/Yek15QcF5L2alu/vaQlFeO5PJ9j87b1F+T12xfWvVDS2ZLulfSApP+WdLCk2RW/PgARsVlEXF7nmEEru0cRcVRE/K9RxWQ2HTnDN+uDpIOB44CjgA2A5wJfBd7Stuv+wH353378BtivcP11gW2Aewrr/gK4ElgMvCwingXsCWwNrNXn9c1smnKGbzZJkp4F/BPwgYg4LyIejojlEfHdiDi0sN8C4HXAAcAbJG3Qx2VPB95WKKnvA5wPPFHY50jgFxFxcETcARARN0XE2yNiWcn3WE/SxZKWSbpP0s8kzcrbnq7lkPSZXGtwmqQHJd2QaxIOl3S3pMWSXl8474Qaknz8aWVfStK7JS3K571V0vvy+mcC/wFslGtQHpK0Ufu5JL05Nz8syzUhf9kWx0dyLccDks6UNKfebTeb/pzhm03eq4A5pAy3m/2AqyPiXGAR8I4+rrkU+BXQylj3A05t22cn4Jwa5zwEWAI8m1RL8XGg05jbuwPfAtYBrgN+QEpHNiY9/Px7jesW3Q3sBqwNvBv4kqStIuJhYFdgaUSsmT9LiwdKeiHwHeDD+TtcAnxX0mqF3fYCdgE2BV4OvGuScZpNW87wzbq7IJcal0laRqqub1kXuDcinuxxjv2Ab+f/f5v+q/VPBfaT9CJgbkRc0bZ9XeCOGudbDmwILMg1FD+LzpNs/CwifpC/89mkDPboiFgOnAEslDS31rcBIuJ7EfHbSH4K/BDYruLhbwO+FxGX5jiOAdYAXl3Y598iYmlE3Ad8F9iiboxm050zfLPu9oiIua0P8A+FbX8E1uvW01zStqRS5Rl51beBl0nqJ8M5D9gB+EdSabvdH0kZeFVfBG4Bfpir0w/rsu9dhf8/Snrg+XNhGWDNGtcGQNKukn6ZmxSWAW8E1qt4+EbA71sLEfEUqf/CxoV97iz8/5HJxGg23TnDN5u8K4DHgD267LM/IOB6SXeSOtNBoeNdXRHxCKld++8pz/B/BPxtjfM9GBGHRMTzSFX2B0vacbLxFTwMPKOw/JyynSStDpxLKplvkB+sLiHdN+jcvNCyFFhQOJ+ATYA/TC5ss5nJGb7ZJEXEA8Cnga9I2kPSMyStmkurX8gdw/YiddbbovD5R+AdxZoBSXPaPiq5ZNHHgddFxG0l244AXi3pi5Kek8///NzZbqXqdkm75e0C/gT8OX/6dT2wd74nWwNv7bDfasDqpDcNnpS0Kyv6KECqVVg3d5IscxbwJkk7SlqV1CfhceAXA/gOZjOGM3yzPkTEscDBwCdJGdZi4IPABaSS/6PAqRFxZ+sDnATMJnUig1T1/Gjb5y96XHdpRPy8w7bfkjoULgRulPQAqQR9NfBgySEvINUKPESqtfjqgN69/xTpe9xPenPg22U7RcSDwIdIGff9wNuBiwrbf03qlHdr7kuxUdvxNwH7Av8HuJdUS7F7RBTfXDBrPHXum2NmZmYzhUv4ZmZmDeAM38zMrAGc4ZuZmTWAM3wzM7MGGHmGXxgf+yFJT0l6tLDczxCkVa9/mqQn2uK4psv+2xf2e1hplrLisRt1OrZwjoMkXZuv+7WS7btK+k0+/48kzS9se4ekK/J9+n7Jsa+QdL2kRyT9X0kvLWxbQ9JJedzzPyrNslb6bnTbOd+Xv+e+Xfb5Zd7nRW3rv5/Xb9PjGmvm+/fqkm3Hq8MY7GY2UZ47oJWO3inpZElTOtBQvmZ7uvpfXfbfrke6+twK1zxQ0jX5uie2bXt1Tkvvk3SP0nwKGxS2H6Y0F0NrLoeDS85/cL63D0n6ldIkVe37fCvHvrBDjKvk7UtVmLlS0mo5Te41amfrXj0o6Rkl226Q9P5Ox448wy+Mj70mcDvpdZrWutPb99ck58/u4ahiHBHxP7rEe3kh3s3bv0P7ON8dLAE+A6yUiUnaEDgTOJQ00tiv2vb7I/AvwLElx64BXAicQBrr/Gzg/MI9OzTHvBkwn/Su8krnaTvns4GDgJsqfK/2mdyeA7wMeKDXgRHxEOnVsQkD0iiNh74XcEqF65tZsntOo7YAtgQOH0EMX2hLGzfvtGMezrmVrm6WV88tHHt7hev9gTSfw8kl29YBjicN0LSQNGDWSW377AvMBd4EHCTp6XEjcib6TtK8DmsBbybNfklhn+0pDADVw4NMHGtiN9IrpT1FxM9IY1P8z7brb0F6xfbMTseOPMPvRdJn89PYdyQ9COybS+WfKeyzk6TbCsvzJZ2fn+R+J+kDIwi9o4g4OyIuou0XJtuTNNHKhRHxKGlgl1e3nhgj4vsRcQ7lY6XvDDwWEV+NiMdJDwZrAa252TcFLomIe/K5z2LFH1cnXwS+AKw0y1qJ00gDyrQGjdk3X+Ppp1ZJsyV9Kj9F3yvpdK0YDOYUYC+lkddadiO9l/7jCtc3s4I87sMPKMwdIGl1ScdIul3SXZK+lgsLre1vybWEf5L0W0m75PUbSbool5JvkfTeqf9GnUXEORFxISXpap6r4dw8quTDwFeAbQvbj46I6yLizxGxiDTfwraQ0ixSOvzhiFiU53u4JSLubx2vNODTv5IG1ariW0ws3Kw0CZakuZK+KekOSUsk/ZPyLJZ53/bROvcDLirG1W7sM/zsb0iDdjyLLk8v8PQP52LgKtKAJjsDh2owQ4VWIukISXVmKyvaDHi66itPZ3o7vTPmsmOfAv5f4divA9tLek6u4tuHNERrKUnbAS8Evlkx9t/lWLfPy+9k5ZncDiU92b6GVMuwHPhS3nYZ6cl398L+7wROy9/FzGpQag7clTRXQsvnSX/XWwDPJ6WTn877v5L0N3soqbT7WuC2fNx3SLWTG5FGTTxqitPVT0i6YECney1wY4frzCKlT63tC0hzU2yeM95bJX26ULAB+Ahp8KrSc5Y4D9hB0tqS1gW2IeVbRaexYhCurUk1D+/O204F/lrSxjnm2aT0vD29nWC6ZPg/z3OMP5VLpt1sA6wdEUdFxBMRcQup6mbvLsccpsKMaJLaq3pqiYgjI6LTMKK9rMnKVeAPkErq/R67iDQa3B15/QLgqLIT5SfWLwMf7DJzWpnWTG5bALMi4rq27e8DDssjxT1GGoHtbZKUr/P0k6+keaRJVLr+EpvZSi7INaKLSVMPHwFPzzPwXuCgiLgvj3J4FCvSx/cA38gzDz4VEX+IiF9L2oSUCX4sIh6LiOuBE0kP5J18pC1d7atZLiI+FxHd5q2oRNKWwCeAj3bY5Z9JtZKtdKfVh2pnUuFpR1Ia9a58vgXA35GaaatqzYexJymjPp/UxNqKceN8nYMi4pFcU3Mc+eeUh9T+T1ZMtf16Un6+Ur+uomG0hw/D4hr7LgCeqzTjVsts4PIuxxwdEZ+ZRFzD8BBpTvCitSkfErXusSeSfpHnkZ4cP0l6qnxtybk+DPxnRFxbLeynnQ18jvTL215F1ZrU5BJJxYeIWeSpZknV+jfmvgN7Av8VEb+qGYNZ0+0RET+S9DpS7eh6pGa5Z5MmNLqmUEAVKY2E/PdZcr6NgNYDQsvvSSXPTo6JiE9O/isMnqQXAt8DPhARK821IOlAUqa6XWFo5lYh8+g8f8YDkr5OKox8E/g34IiIeLBmH7NTSQ9iawAHtm1bQJpf4q7Cz2kWK2pbIKWVh5CaXN8JnB49puqeLiX89hJmt1m4FgM3F6c0jYi1ImJ3pocbyZ0BAZQmDFlAtaqi9mNnAS8tHLs56en9/ly6/jKwncp78O5ImvjkTqVZ3rYCvizpX7oFkP8gLiOVFE5v2xakjjU7tP185kTEvXmfm0ljvu9DeZOAmVUUET8ldWI7Jq+6l5SBbVb4+3tW7iwHKf0sm8dhKTBPUrGm8blMoxkJJW1KqnY/IiJWmtdB0gGkDHTHts7XvyY1PXaq6dwRODank0vyuqskva1HSJeR0va5EXFF27bFpFqAeYWf09oR8fLCPmcDm+aHurdQIa2cLhl+u+tJs2Otk3u1f6iw7QrgCUmHKM06NlvSyyR17Hk/1ZRezZhDeqqe3Yozbz4HeIWk3fM+RwK/yFU4rU5vc0i1M7Pysa2nykuBNSS9P3d8O4j0cNSaZOUq4F2S1lLq/f73wO9yD/l2+wAvYcUMbzeQqsGOrPAVP0Kaya3sjYWvAUfnKkIkrS+p/WHsFNKENFuR2g3NbPKOA3aWtEXuC/N14EuS1odUfSzpDXnfk4B3K808OCtve3FELCbNPvi/c5rzckoe6kepW7qa05ufAMdGxNdLjt2flLbtHG0zUOZajXOAjym9PrwJ6bu32tyfx4p0spXPvJHCBFBlcgFoN0qm1873+6fAMbmdf5bSjJavLezzEKkvwCnALbmZpbuIGJsPqbpip7Z1nwVOblu3Rv4B/InUSe1g4LbC9o1JnfvuJM2+9Qvgrztc8zTgCVJ1eOtzZ8V4n9/6ubWtPxI4v8txR5OeFoufwwrbdwVuJj3h/QiYX9j2/pJjv1bY/grSA9GjpAz+pYVt6+f7cg+peu+nwJaF7T8BDu4Q8y+Bfbt8p47bSaWKbfL/ZwMfy9/vQVJnoiPa9p+b4+94D/3xx5/yT4d09Hjg3Pz/OaR2+1tzGroI+FBh378B/rvw9/mGvH4+KZO7D/gt8P4uMZxckq7eWzH+hTldW6Vt/aeA73Y57rMlaeMn87Z/zsvFeJYVjl1cEu+XC9vnkt44avWL+CR58rm2GFbJ11nYIcaO24EXA08WltcB/p1Ua/AAcB2wV9sxO+XzHVLl3nq2PDMzswaYrlX6ZmZmVsNIMnxJu0i6KQ/ecNgoYuhF0v6aOLRjz+EhzcyGaTqknd0oDQ1elq5WfX/d+jDlVfq5E8VvSO80LiG1M+8TfvXKzKwjp53Wr1GU8F9J6lF4a6T3HM8gvVJgZmadOe20voxi4J2NmTiQzhLgr7odsJpWjzk8c6hB2Wg9xsM8EY+r955mjTWJtHNOzFGXtLOfGl61/bkWz9Vt26i1x1bUK85+vteQ7kmdtHMUGX5ZYCt98zwIwgEAc3gGfzV1QzbbCFwZnhvHrIdJpZ3brLpLxxPG8ic6busZzKqrdTxXt22j1h5bUa84+/lew7onddLOUVTpLyEN39gynzSK0wQRcUJEbB0RW6/K6u2bzcyapn7aqTlTFpyNv1Fk+FcBL5C0aR7tbW96jEhkZmZOO60/U16lHxFPSvogaY7m2aSx3f1KhplZF4NIO9urkYvVzHWrmPs5V93q7X7O3W5UzQvd7leV/QdhJLPlRcQllM/IZGZmHTjttH54pD0zM7MGcIZvZmbWACOp0jczs6k3zNfl6pxrWK8DVjn3IPst9KNOm37X9v7l1YcvcQnfzMysAZzhm5mZNYCr9M3MZqqIsRrlbhD6/T6DHB2vn1H76uxf95W+TlzCNzMzawBn+GZmZg3gDN/MzKwB3IZvZmYrmcrhb6eqXb3utXoZZv+IbvdzwnKNaXZdwjczM2sAZ/hmZmYN4AzfzMysAdyGb2bWUJXbiXsc277/oIfw7Xbuftv0p0o/cXhoXTMzM6vMGb6ZmVkDOMM3MzNrALfhm5nNVNJAx3uveuyseXMnLP/5rrsBmL3B+hOWB33dbvu3rt0eU7e42q/VT/xDe2ff7+GbmZlZkUv4ZmY2FJ1K9r1KyoOoCehWom9tr3v+fuIZBy7hm5mZNYBL+GZmM1XEpNuO6743Xuc6ZaXt4vo6JelOtQHttQllMVStSRhEjUMvUzGugUv4ZmZmDeASvpmZDVTdtvk6vfjr1gZ0q03oVdNQNb7J9AcYBWf4ZmbWUz+vlfWqXm/fr6Usg62aSVfR6+Gh0/ayDoGDnpa3znTDVblK38zMrAFcwjczsylV9TW9shJ1ler1snOU6fXaYKd4q9ZYjBuX8M3MzBrAJXwzs5mqbWjdOm3BdafHLeo0tG63NvBuqgyDW/WcVc41mbjaTXYI4JZhTOs7tBK+pE0kXSZpkaQbJR2Y18+TdKmkm/O/6wwrBjOz6cZppw3LMEv4TwKHRMS1ktYCrpF0KfAu4McRcbSkw4DDgI8NMQ4zs+lk2qedVUvGdUv6ZfvUHaa3Sn+AYQ75O0pDK+FHxB0RcW3+/4PAImBj4C3AKXm3U4A9hhWDmdl047TThmVK2vAlLQS2BK4ENoiIOyD9YkuaXt0czcymyKDTzmFNlTtZvXq7VylR9zPIT6/+AJ2OLbuWVl21bd97Osbcrwk/x+WqfNzQe+lLWhM4F/hwRPypxnEHSLpa0tXLeXx4AZqZjaGBpJ3x2PACtGlnqCV8SauSfmFPj4jz8uq7JG2Yn1A3BEofmSLiBOAEgLU1L4YZp5nZOBlY2jlr3ZGmnf1Og1unR33d/YqT50xW8VqrzN+4r3NNhWH20hdwErAoIo4tbLoI2D//f3/gwmHFYGY23TjttGEZZgl/W+CdwA2Srs/rPg4cDZwl6T3A7cCeQ4zBzGy6GVza2cf0uD1PXeG8g+qtX2Vymrql9W77V62RKO53+94LJux7w8Hfm7C8y4JXTlju5+cy4dioXokztAw/In4OdOpNsOOwrmtmNp057bRh8Uh7VuoHS1PB4g0bbTHiSMxsputnRLu6+5a18fcaS3+6vnffzmPpm5mZNUDHEr6kS4B/iIjbpi4cGxcu2ZtNTlPTzrIx++u+515F3Xfkq5yralwrXXP7rQBY9d5HWGvxUxO29Wqzbx8Todv2QfXD6FbCPxn4oaRP5FdEzMyst5Nx2mljqGMJPyLOkvQ94NPA1ZK+BTxV2H5sp2PNzJrKaefK6raJl7Wp9xrnvqri8f2O9LfaoiUrtm+2TV9xTYVenfaWAw8DqwNrUfilNTOzjpx2MrjObsPoNFf2ENHK4CczIM/a5107Ybl9iuCn7ls2YbluFf8gdGvD3wU4ljTYw1YR8cjAr25mNsM47bRx1a2E/wlgz4i4caqCMTObAZx2ZpN9va1s2tpBvypXdnynzoV1zjHOurXhbzeVgZiZzQROO21ceeAdM7OGGGQ7cbepdlvqloCrlOKHWaruNnRucXtZLO33sm6cdX4WYzs9rpmZmY2eS/hmZjYUnUrEdQe46bZvp2vWKWEPYprc6dCe7xK+mZlZA7iEb2bWEMOaKrf93HVL8HVK5Z0muqk6ME/ZtXrF06tvQdk1p+K9+rpcwjczM2sAl/DNzGyoJtNm30mv0vUgrtWp9qB9JL7p0G5f5BK+mZlZA7iEb2ZmK+mnDbrfkm+3EnTV0nWvtv6ynvW9+gPU+V79ttl3G+dgwrkjKp/TJXwzM7MGcAnfzMxGolcpvEyvUnbVkn+vdTORS/hmZmYN4BK+mZmtZJDztU/2ffZus+V1OqZqLO3nL4uvH/2+h1/cv8q8BVW4hG9mZtYALuGbmdmUmGxJuttY9VX7AZRtr9N3YCZwCd/MzKwBXMI3M5uppMrtv3Xb7LttrzsWfftxvfbrtk/Va1Z5D7+TKvsNsg9EuwnnWq7KxznDNzOzgZrsgDWTqfKv2xGwytS7nTL09mtNt6YAV+mbmZk1gEv4ZmYzVcSkq44HWSXdq8TczxC2Va9VR9UOgHUM8n6O7dC6kmZLuk7SxXl5U0lXSrpZ0pmSBvOCoZnZDOK00wZtKqr0DwQWFZY/D3wpIl4A3A+8ZwpiMDObbmZM2ll8Ba7M7A3Wn/SgN61zD/IarX1b52xfnq6GmuFLmg+8CTgxLwvYATgn73IKsMcwYzAzm26cdtowDLsN/zjgo8BaeXldYFlEPJmXlwAbDzkGM7PpZsrTzl6v79VpY656rqql5eL5erWFdzpn2fpO7eitfVvbn7pvWdf9e22rsn0qDK2EL2k34O6IuKa4umTX0h4Hkg6QdLWkq5fz+FBiNDMbN047bViGWcLfFnizpDcCc4C1SU+tcyWtkp9U5wNLyw6OiBOAEwDW1rzq3RDNzKY3p502FEMr4UfE4RExPyIWAnsDP4mIdwCXAW/Nu+0PXDisGMzMphunnTYso3gP/2PAGZI+C1wHnDSCGMzMppu+085u7cjDbFPu1Z7da/9usdWNu85Us3Xi7jeOQX7HTqYkw4+Iy4HL8/9vBV45Fdc1M5vOnHbaIHloXTMzswZwhm9mZtYAHkvfzKwhBjYla49z9zvVbj/6GU9gaFPYlpyrznceVFwu4ZuZmTWAM3wzM7MGcIZvZmbWAG7DNzNriDptwXXboLuNd9+rzblOm3/d9uxu5+4V58DmrJ/EubrFOVku4ZuZmTWAM3wzM7MGcJW+mVlDDPM1s0Ee20+c/VSlT+VreaM4l0v4ZmZmDeAM38zMrAGc4ZuZmTWA2/DNzGYqadKvd01lm3OdVwDrxjXMNvthvDo3rPOBS/hmZmaN4AzfzMysAZzhm5mZNYAiYtQx9CTpHuBh4N5Rx1JiPRxXHZ3iWhARz57qYMxmMqedkzLd4qqcdk6LDB9A0tURsfWo42jnuOoZ17jMZqpx/ZtzXPUMIi5X6ZuZmTWAM3wzM7MGmE4Z/gmjDqADx1XPuMZlNlON69+c46qn77imTRu+mZmZTd50KuGbmZnZJI19hi9pF0k3SbpF0mEjjGMTSZdJWiTpRkkH5vXzJF0q6eb87zojim+2pOskXZyXN5V0ZY7rTEmr9TrHEGKaK+kcSb/O9+1V43K/zGY6p52V42tM2jnWGb6k2cBXgF2BlwD7SHrJiMJ5EjgkIv4S2Ab4QI7lMODHEfEC4Md5eRQOBBYVlj8PfCnHdT/wnhHE9K/A9yPixcDmOb5xuV9mM5bTzlqak3ZGxNh+gFcBPygsHw4cPuq4ciwXAjsDNwEb5nUbAjeNIJb5+RdgB+BiQKQBGlYpu49TFNPawO/I/UQK60d+v/zxZ6Z/nHZWjqVRaedYl/CBjYHFheUled1ISVoIbAlcCWwQEXcA5H/XH0FIxwEfBZ7Ky+sCyyLiybw8ivv2POAe4Ju5uuxESc9kPO6X2UzntLOaRqWd457hq2TdSF8rkLQmcC7w4Yj40yhjyfHsBtwdEdcUV5fsOtX3bRVgK+D4iNiSNLynq+/NpsY4pAETOO2sbGhp57hn+EuATQrL84GlI4oFSauSfmFPj4jz8uq7JG2Yt28I3D3FYW0LvFnSbcAZpKqp44C5klbJ+4zivi0BlkTElXn5HNIv8ajvl1kTOO3srXFp57hn+FcBL8i9JlcD9gYuGkUgkgScBCyKiGMLmy4C9s//35/UPjVlIuLwiJgfEQtJ9+cnEfEO4DLgrSOM605gsaQX5VU7Ar9ixPfLrCGcdvbQxLRz7AfekfRG0lPXbOAbEfG5EcXxGuBnwA2saO/5OKkt6izgucDtwJ4Rcd+IYtwe+EhE7CbpeaSn1nnAdcC+EfH4FMezBXAisBpwK/Bu0kPmWNwvs5nMaWetGLenAWnn2Gf4ZmZm1r9xr9I3MzOzAXCGb2Zm1gDO8M3MzBrAGb6ZmVkDOMM3MzNrAGf4YyTPKvU7SfPy8jp5ecGoYzMzG0dON6tzhj9GImIxcDxwdF51NHBCRPx+dFGZmY0vp5vV+T38MZOHoLwG+AbwXmDLiHhitFGZmY0vp5vVrNJ7F5tKEbFc0qHA94HX+5fWzKw7p5vVuEp/PO0K3AG8dNSBmJlNE043e3CGP2byGMo7A9sAB7VmRzIzs3JON6txhj9G8qxSx5Pmi74d+CJwzGijMjMbX043q3OGP17eC9weEZfm5a8CL5b0uhHGZGY2zpxuVuRe+mZmZg3gEr6ZmVkDOMM3MzNrAGf4ZmZmDeAM38zMrAGc4ZuZmTWAM3wzM7MGcIZvZmbWAM7wzczMGuD/A+LMvlBQhbd9AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "if preprocess != False:\n", + " draw_one_sample(train_true, train_reco, preprocess,\n", + " min_true=min_true, max_true=max_true, \n", + " min_reco=min_reco, max_reco=max_reco,\n", + " save=False, PATH=PATH\n", + " )\n", + "else:\n", + " draw_one_sample(train_true,train_reco)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "h=test_reco[0].shape[0]\n", + "w=test_reco[0].shape[1]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def HCAL():\n", + "\n", + " \n", + " tf.reset_default_graph()\n", + " \n", + " _, n_H_A, n_W_A ,n_C = train_true.shape\n", + " _, n_H_B, n_W_B ,n_C = train_reco.shape\n", + " \n", + " gan = bicycle_GAN(n_H_A, n_W_A, n_H_B, n_W_B, n_C,\n", + " min_true=min_true, max_true=max_true, \n", + " min_reco=min_reco, max_reco=max_reco,\n", + " d_sizes=d_sizes, g_sizes_enc=g_sizes_enc, g_sizes_dec=g_sizes_dec, e_sizes=e_sizes,\n", + " lr=LEARNING_RATE, beta1=BETA1,\n", + " preprocess=preprocess, cost_type=cost_type,\n", + " cycl_weight=cycl_weight, latent_weight=latent_weight, kl_weight=kl_weight,\n", + " discr_steps=discr_steps, gen_steps=gen_steps,\n", + " batch_size=BATCH_SIZE, epochs=EPOCHS,\n", + " save_sample=SAVE_SAMPLE_PERIOD, path=PATH, seed= SEED)\n", + " \n", + " vars_D = [v for v in tf.trainable_variables() if 'discriminator' in v.name]\n", + " vars_G = [v for v in tf.trainable_variables() if 'generator' in v.name]\n", + " vars_E = [v for v in tf.trainable_variables() if 'encoder' in v.name]\n", + " \n", + " if task == 'TEST':\n", + " \n", + " vars_to_train=tf.trainable_variables()\n", + " vars_all = tf.global_variables()\n", + " vars_to_init = list(set(vars_all)-set(vars_to_train))\n", + " init_op = tf.variables_initializer(vars_to_init)\n", + " \n", + " saver=tf.train.Saver()\n", + " \n", + " # Add ops to save and restore all the variables.\n", + " \n", + " gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\n", + " \n", + " with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n", + " \n", + " sess.run(init_op)\n", + " \n", + " if task=='TEST':\n", + " \n", + " print('\\n Evaluate model on test set...')\n", + " \n", + " #if os.path.exists(PATH+'/pretrained/'+PATH+'.ckpt.index'):\n", + " # saver.restore(sess,PATH+'/'+PATH+'pretrained.ckpt')\n", + " \n", + " if os.path.exists(PATH+'/'+PATH+'bicycle.ckpt.index'):\n", + " saver.restore(sess, PATH+'/'+PATH+'bicycle.ckpt')\n", + " \n", + " print('Model restored.')\n", + " \n", + " gan.set_session(sess)\n", + " \n", + " #test_reco_NN=gan.get_samples_A_to_B(test_true.reshape(test_true.shape[0],n_H_A,n_W_A,n_C))\n", + " test_reco_NN=np.zeros_like(test_true)\n", + " for i in range(len(test_true)):\n", + " test_reco_NN[i]=gan.get_sample_A_to_B(test_true[i].reshape(1,n_H_A,n_W_A,n_C))\n", + "\n", + " done = True\n", + " while not done:\n", + " \n", + "\n", + " if preprocess:\n", + " draw_nn_sample(test_true, test_reco, 1, preprocess,\n", + " min_true=min_true, max_true=max_true, \n", + " min_reco=min_reco, max_reco=max_reco,\n", + " f=gan.get_sample_A_to_B, save=False, is_training=False, PATH=PATH)\n", + " else:\n", + " draw_nn_sample(test_true, test_reco, 1, preprocess,\n", + " f=gan.get_sample_A_to_B, save=False, is_training=False)\n", + " \n", + " ans = input(\"Generate another?\")\n", + " if ans and ans[0] in ('n' or 'N'):\n", + " done = True\n", + " \n", + " done = True\n", + " while not done:\n", + " \n", + " if preprocess:\n", + " draw_nn_sample(test_true, test_reco, 20, preprocess,\n", + " min_true=min_true, max_true=max_true, \n", + " min_reco=min_reco, max_reco=max_reco,\n", + " f=gan.get_sample_A_to_B, save=False, is_training=False)\n", + " else:\n", + " draw_nn_sample(test_true, test_reco, 20, preprocess,\n", + " f=gan.get_sample_A_to_B, save=False, is_training=False)\n", + " \n", + " ans = input(\"Generate another?\")\n", + " if ans and ans[0] in ('n' or 'N'):\n", + " done = True\n", + " \n", + " return test_reco_NN" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Convolutional Network architecture detected for discriminator B\n", + "Convolutional Network architecture detected for encoder B\n", + "Encoder_B\n", + "Convolution\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "Encoder output shape (?, 16)\n", + "Generator_A_to_B\n", + "Input for generator encoded shape (?, 52, 64, 1)\n", + "Output of generator encoder, \n", + " and input for generator decoder shape (?, 1, 1, 512)\n", + "Generator output shape (?, 52, 64, 1)\n", + "Generator_A_to_B\n", + "Input for generator encoded shape (?, 52, 64, 1)\n", + "Output of generator encoder, \n", + " and input for generator decoder shape (?, 1, 1, 512)\n", + "Generator output shape (?, 52, 64, 1)\n", + "Encoder_B\n", + "Convolution\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "Encoder output shape (?, 16)\n", + "Discriminator_B\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "Feature output shape (?, 8)\n", + "minibatch features shape (?, 10)\n", + "Logits shape (?, 1)\n", + "Discriminator_B\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "Feature output shape (?, 8)\n", + "minibatch features shape (?, 10)\n", + "Logits shape (?, 1)\n", + "Discriminator_B\n", + "Input for convolution shape (?, 52, 64, 1)\n", + "Feature output shape (?, 8)\n", + "minibatch features shape (?, 10)\n", + "Logits shape (?, 1)\n", + "Generator_A_to_B\n", + "Input for generator encoded shape (?, 52, 64, 1)\n", + "Output of generator encoder, \n", + " and input for generator decoder shape (?, 1, 1, 512)\n", + "Generator output shape (?, 52, 64, 1)\n", + "\n", + " Evaluate model on test set...\n", + "INFO:tensorflow:Restoring parameters from HCAL_bycicleGAN_test31/HCAL_bycicleGAN_test31bicycle.ckpt\n", + "Model restored.\n" + ] + } + ], + "source": [ + "if __name__=='__main__':\n", + "\n", + " if task == 'TEST': \n", + " if not os.path.exists(PATH+'/checkpoint'):\n", + " print('No checkpoint to test')\n", + " else:\n", + " test_reco_NN =HCAL()" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "if preprocess:\n", + " test_reco=denormalise(test_reco, min_reco, max_reco)\n", + " reco_MC_hist = test_reco.reshape(test_reco.shape[0], test_reco.shape[1]*test_reco.shape[2])\n", + " reco_MC_hist = np.sum(reco_MC_hist,axis=1)\n", + " max_E=np.max(reco_MC_hist)\n", + " \n", + " test_reco_NN=denormalise(test_reco_NN, min_reco, max_reco)\n", + " reco_NN_hist = test_reco_NN.reshape(test_reco_NN.shape[0], test_reco_NN.shape[1]*test_reco_NN.shape[2])\n", + " reco_NN_hist = np.sum(reco_NN_hist,axis=1)\n", + " max_NN = np.max(reco_NN_hist)\n", + " \n", + " test_true=denormalise(test_true, min_true, max_true)\n", + " true_hist = test_true.reshape(test_true.shape[0], test_true.shape[1]*test_true.shape[2])\n", + " true_hist = np.sum(true_hist,axis=1)\n", + " max_true_E=np.max(true_hist)\n", + "else:\n", + " reco_MC_hist = test_reco.reshape(test_reco.shape[0], test_reco.shape[1]*test_reco.shape[2])\n", + " reco_MC_hist = np.sum(reco_MC_hist,axis=1)\n", + " max_E=np.max(reco_MC_hist)\n", + " \n", + " reco_NN_hist = test_reco_NN.reshape(test_reco_NN.shape[0], test_reco_NN.shape[1]*test_reco_NN.shape[2])\n", + " reco_NN_hist = np.sum(reco_NN_hist,axis=1)\n", + " max_NN = np.max(reco_NN_hist)\n", + " \n", + " true_hist = test_true.reshape(test_true.shape[0], test_true.shape[1]*test_true.shape[2])\n", + " true_hist = np.sum(true_hist,axis=1)\n", + " max_true_E=np.max(true_hist)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Max NN 6096.728515625, Max_MC 6120.0, Max NN rescaled 6120.0\n" + ] + } + ], + "source": [ + "max_NN=test_reco_NN.max()\n", + "max_MC=test_reco.max()\n", + "test_reco_NN_rescaled=(test_reco_NN/test_reco_NN.max())*max_MC\n", + "print('Max NN {0}, Max_MC {1}, Max NN rescaled {2}'.format(max_NN, max_MC, test_reco_NN_rescaled.max()))" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "test_reco_inner = get_inner_HCAL(test_reco)\n", + "test_reco_outer = get_outer_HCAL(test_reco)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "test_reco_inner_NN = get_inner_HCAL(test_reco_NN_rescaled)\n", + "test_reco_outer_NN = get_outer_HCAL(test_reco_NN_rescaled)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "max_true=test_true.max()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "indices_MC, triggered_true_MC, triggered_reco_inner_MC, triggered_reco_outer_MC = get_triggered_events(test_true, test_reco_inner, test_reco_outer)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "indices_NN, triggered_true_NN, triggered_reco_inner_NN, triggered_reco_outer_NN = get_triggered_events(test_true, test_reco_inner_NN, test_reco_outer_NN)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABKwAAAJhCAYAAABsASgZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzs3X98z/X+//H7c2a/jOUgM6zNjyba+oGTH/mVH8PxO8RGVFKcKKQfnI5VskI5HaToh1/52SlShxWF6py+HVJ8iqSSmJNIJubHtuf3j83OfrG9t/d775ftdr1c3pe15+v5ej4frzcul3Pul+fz+TLWWgEAAAAAAABO4ePtAgAAAAAAAICcCKwAAAAAAADgKARWAAAAAAAAcBQCKwAAAAAAADgKgRUAAAAAAAAchcAKAAAAAAAAjkJgBQAAAAAAAEchsAIAAAAAAICjEFgBAAAAAADAUXy9XYBTVa9e3UZERHi7DAAAAAAAgDJj+/btR621NQrrR2B1EREREdq2bZu3ywAAAAAAACgzjDE/FqUfWwIBAAAAAADgKARWAAAAAAAAcBQCKwAAAAAAADgKgRUAAAAAAAAchcAKAAAAAAAAjkJgBQAAAAAAAEchsAIAAAAAAICj+Hq7AAAAAAAAJCklJUVHjhzR+fPnvV0KABdUrFhRV155papUqeK2MQmsAAAAAABel5KSop9//lm1a9dWYGCgjDHeLglAEVhrlZqaqkOHDkmS20IrtgQCAAAAALzuyJEjql27toKCggirgMuIMUZBQUGqXbu2jhw54rZxCawAAAAAAF53/vx5BQYGersMAMUUGBjo1u28BFYAAAAAAEdgZRVw+XL3v18CKwAAAAAAADgKgRUAAAAAAAAchcAKAAAAAAAUyZ49e2SM0YoVK7xdSqkaNGiQGjVq5O0yyhUCKwAAAAAAPGzz5s0yxsgYo+nTpxfY59lnn83us3nz5nzXDx8+rAcffFCNGzdWpUqVFBgYqMaNG+uBBx7Qvn37Ljr38OHDs8ct7IPyY/r06Vq6dKm3y7goX28XAAAAAADApbR++gMd+i3V22Wo9hWB+uSRW0o0RkBAgJYsWaKHHnoo37XFixcrICBAZ86cyXdt69at6t27t86dO6e4uDiNGTNGFSpU0FdffaUVK1bohRde0Llz5wqc85577lGnTp2yfz969KjGjRun3r17q3///i7VHxUVpdTUVPn5+bl0H5xn+vTpatasmYYMGeLtUgpEYAUAAAAAcLRDv6Vq/9N/8nYZinjk3RKP0bNnT61evVpffPGFrr/++uz2Xbt2aefOnRo4cKBWrVqV655Dhw6pT58+qlq1qjZt2qTIyMhc1xMTEzVp0qSLztmyZUu1bNky+/f9+/dr3Lhxuvbaa4scVpw9e1a+vr6qUKGCAgICinSPt50+fVpBQUHeLgPFxJZAAAAAAABKSceOHVWrVi0tWbIkV/uiRYsUFhamjh075rvnmWee0fHjx/XKK6/kC6skKSgoSH/729/cVuOLL74oY4w2btyoiRMnKiwsTIGBgfrll18ueobV119/rS5duigoKEhXXnmlxowZo88//7zEfVNSUvTQQw+pXr168vPzU+3atTV27FilpKTk6teiRQs1aNBAX331lbp06aLKlSurX79+2df/+9//6t5771Xt2rXl5+enyMhIPfbYY/lWpVlrNX36dEVERCgwMFBNmzZVUlKSS99fUWru1q2bQkNDlZ6enu/+SZMmycfHRz/++KPL9YeGhqpTp07asWOH2rVrp6CgINWqVUtPPPGErLWSpDNnzsgYo2PHjikpKSl7O2jOM7rmzp2r6OhoBQcH64orrlBMTIyeeuopl76HkmKFFQAAAAAApaRChQqKi4vT66+/runTp6tChQpKT0/XsmXLFB8fLx+f/OtK1qxZo4iICHXo0KFUa33ggQdUpUoVPfzwwzp79qwCAwP122+/5euXnJysdu3a6ezZsxo3bpxq1qyp1atXa8SIESXqm5qaqvbt2+u7777TyJEj1bBhQ+3evVvz5s3TZ599po8++kgVK1bM7p+SkqLOnTurZ8+e6t+/f/a2xZ9//lk33XSTzp07p7vvvlt16tTRZ599pmnTpunrr7/WP/7xj+wx/vrXv2rq1Knq0KGDJkyYoAMHDmjAgAGqW7dukb6zotYcHx+vDRs2aOPGjYqNjc01xooVK9S6dWtdddVVLtcvSQcOHFC3bt0UHx+vwYMHa+3atZoyZYrq16+v+Ph4+fn5acmSJRo1apTq1auniRMnSpJCQkIkSfPmzdN9992n/v37689//rMyMjK0Z88ebdmyRZMnTy7S9+AW1lo+BXyaNm1qAQAAAACl4+uvv77otasefqcUK7m4ktTx4YcfWkl2wYIF9ssvv7SS7Pr166211m7YsMFKsl9++aVdsGCBlWQ//PBDa621KSkpVpLt1auXOx7BWmvtDz/8YCXZyZMnF3h93rx5VpJt1qyZPX/+fK5ru3fvtpLs8uXLs9tGjx5tJdnNmzdnt50/f962bNmyRH0ff/xxGxAQYHft2pWrhpUrV1pJdsmSJdltN910k5VkZ82ale957rjjDlu9enWbnJycq/2ZZ56xkuxHH31krbX28OHDtmLFirZDhw42LS0tu9+bb75pJdmoqKgCv6+cilrzyZMnbVBQkL399ttz9fvkk0+sJDtv3jyX67fW2po1a+b6u2Wttenp6TYqKsq2adMm1/3VqlWzsbGx+Z6ha9eu9oYbbij0WQtyqX/HF0jaZouQy7AlEAAAAACAUhQTE6OYmJjsbYGLFy/Wddddp5iYmHx9L2wjq1KlSqnWKEl33323fH0L35j17rvv6vrrr1e7du2y23x9fTV69OgS9V2xYoVatGih0NBQHT16NPvTtm1b+fr6atOmTbn6+/j46J577snVlp6ertWrV6t79+6qWLFirnE6d+4sSdnjbNiwQefPn88+0P6Cvn37ql69eoV+D67UHBwcrN69e+utt97Kdcj+smXLVLFiRQ0YMMDl+i+IiIhQ165dc30v7dq103fffVekZwgJCdGPP/6obdu2Fam/pxBYAQAAAABQyoYOHao1a9bo8OHDWrNmjYYOHVpgvwtBVd4zm0pDQedl5ZWRkaEDBw6oYcOG+a5dffXVxe5rrdXevXu1efNm1ahRI9enVq1aSktL05EjR3LdExoaqsDAwFxthw4d0u+//67FixfnG+fGG2+UpOxx9u/fLynzTYiF1VcQV2uOi4vTyZMntW7dOkn/C6e6du2qatWquVz/BREREflqq1q1qn799ddCn0GSJk+erICAADVv3lwNGjTQ3XffrXffLfkLB1zFGVYAAAAAAJSy+Ph4PfLIIxoyZIjOnj2ruLi4AvtVrlxZderU0ZdfflnKFSpf+HMpxph8bTbrkO/i9L2wLaxdu3b6y1/+UuA41atXz/V7QfVmZGRIkm677bYCz8mSpDp16uSqoaD6isLVmmNjY1W9enUtW7ZMAwYM0Pvvv68jR47k+rvgSv0X5Fwdlre+ooiOjtbevXu1YcMGvf/++0pKStLLL7+sHj166O233y729+MqAisAKEWxb8Qq+VSyR+cIqxSmpP6uvckEAAAApatWrVrq2LGj3nvvPcXGxqpWrVoX7dunTx/NmTNHH3zwgW655ZZSrLJwPj4+Cg8P1969e/Nd+/bbb0vUNzIyUikpKerUqVOx67vwhsNz584VOs6FFWV79uzRNddck+va3r17cx3wXhBXa76w9e/VV1/Vb7/9pmXLlik4OFi9evUqVv2uulTwVKlSJd1666269dZblZGRofHjx+v555/XRx99pLZt27q1joshsAKAUpR8Klm7hu3y6BzRi6I9Oj4AAADcIyEhQS1btsz3lri8HnroIS1dulQjRozQBx98kG/LV2pqqiZNmqRZs2Z5sNqL6969u+bNm6ctW7Zkn02VlpamF154oUR9Bw8erKlTp+rNN99Uv379cl1LS0vTyZMnVbVq1UvW5ufnp1tvvVXLly/Xf/7zHzVv3jzX9dTUVKWnpys4OFhdu3aVr6+vZs+erV69emWvVHrrrbf0/fffF7hVsKQ1x8fHa968eVq6dKnWrFmjvn37KigoqFj1uyo4OFjHjx/P137s2LHsLYlSZhB3/fXXS1KRtxW6A4EVAAAAAABe0LJlS7Vs2bLQfnXr1tWbb76pvn37qkmTJoqPj1fTpk3l4+Ojr7/+WqtWrdLRo0e9FlhNmjRJK1euVM+ePTVmzBiFhoZq1apVOnv2rKTcK3lc6fvoo48qKSlJAwYMUFxcnFq0aKH09HTt27dPb7zxhp577jkNGjSo0PpmzpypTz75RG3atNEdd9yh6667TqdPn9aePXu0evVqrV+/Pvug9IkTJyoxMVGdO3dWv3799OOPP+qll15S48aNlZ6eXuhcrtbcqlUrRUREaNKkSTp58qTi4+OLXb+rmjdvrjfffFNTp05VgwYNVKVKFXXv3l1t27bVVVddpVatWiksLEz79+/X3LlzVatWLXXo0MHleYqLwAoAAAAAAIfr0KGDvvrqKz333HP65z//qaVLl8paq8jISN12220aM2aM12qrU6eOtmzZogceeECzZs1SpUqVdNttt2ngwIFq166dAgICitU3KChImzdv1owZM7Ry5UqtXr1agYGBioiI0LBhw4q8Na1mzZr6z3/+o2nTpmnNmjV69dVXVaVKFdWrV0/jx49Xo0aNsvs+9dRTqlKlil544QU9+OCDatKkiVavXq3XXntNX3zxRaFzuVqzMUZxcXGaNm2arrzyygK3/blSvytmzJihlJQUPfPMM/r9998VFRWl7t27689//rNWrVqlv//97zpx4oRCQ0PVp08fPfbYYwoJCSnWXMVhinroVnnTrFkz6+1XOAIoe6IXRZfKlkBPzwEAAOBuu3fvzndu0AWtn/5Ah35LLeWK8qt9RaA+ecRZZ0g52fLlyxUXF6ft27dnv9HOHX3hXJf6d3yBMWa7tbZZYWOxwgoAAAAA4GiERM6Xmpqa6y1958+f1+zZs1W9enVFR0cXuy/KLwIrAAAAAABQIjfddJNat26tmJgYnThxQqtWrdKOHTs0Z86cfG/Xc6Uvyi8CKwAAAAAAUCK9evXSW2+9paVLlyotLU2NGzfWokWLdPvtt5eoL8ovAisAAAAAAFAiU6dO1dSpU93eF+WXj7cLAAAAAAAAAHJihRUAlDFhlcIUvcjzh1WGVQpTUv8kj88DAAAAoPwhsAKAMqa0QqTSCMUAAAAAlE8EVgAcL/aNWCWfSvb4PKwYAgAAAABnILAC4HjJp5K1a9guj8/DiiEAAAAAcAYOXQcAAAAAAICjEFgBAAAAAADAUQisAAAAAAAA4CgEVgAAAAAAoFBnzpyRMUZPP/20t0spVY888ogCAgK8XUa5w6HrAAAAAABnmxUtnTjg7SqkkHBpXPFeBrR582Z16NBBkvTPf/5T3bp1y3V94cKFuuOOO/T++++rU6dOxb4nr4SEBD3++ONFqvGHH35QRESEK4+Fy9T8+fN17tw53Xfffd4u5aIIrAAAAAAAznbigJRwwttVSAkhbhnmscceyxc+eeIeSerXr58aNGiQq23o0KFq1aqVRo0alau9Ro0alxwrICBAqampqlixost1wFnmz5+v33//ncAKAAAAAABIN954o7Zv36633npLffv29dg9F8TExCgmJiZX29ChQxUZGakhQ4YUaYy0tDRlZGTIz8/vstkad/r0aQUFBXm7DJQAZ1gBAAAAAFBK7rrrLoWHh2vKlCmy1nrsnuLasGGDjDFavHixEhMTFRkZKX9/f33++ecXPcPq4MGDGjBggCpXrqyqVatqyJAhOnjwYIn7njt3Tk899ZQaNWokf39/XXnllRo2bJgOHz6cq9+gQYPk6+urgwcPqn///qpataqaNm2afT0lJUUPPfSQ6tWrJz8/P9WuXVtjx45VSkpKvud/7bXX1KhRIwUEBKhx48Z6/fXXXfr+ilLzqFGj5Ofnp19//TXf/fPnz5cxRlu3bnW5/hYtWqhBgwb6/vvv1aNHDwUHB6t69eoaM2aMzp8/n90vNDRU27dv1zfffCNjjIwxuYLIFStWqHnz5goJCVHlypXVqFEjjR071qXvwR1YYQUAAAAAQCnx8/PTY489prvvvlsrV67UoEGDPHJPSU2fPl3WWo0aNUr+/v4X3S546tQp3XLLLdq/f79Gjx6thg0basOGDerVq1eJ+mZkZKhv377atGmT7rzzTl133XX66aefNHv2bH388cf6/PPPFRLyvy2a1lp16dJF1113nRITE7MDmtTUVLVv317fffedRo4cqYYNG2r37t2aN2+ePvvsM3300UfZWxxfeeUVjRgxQjfeeKMSExP122+/acyYMQoPDy/Sd1bUmuPj4/Xiiy9q9erVuueee3KNsWzZMtWtW1dt2rRxuX5JOnnypDp27KguXbpo5syZ2rp1q+bMmaPQ0FBNnjxZkjR37lxNnDhRZ86c0fTp0yVJFSpUkJR5VtrgwYPVsWNHTZs2Tb6+vvruu+/03nvvFek7cCtrLZ8CPk2bNrUAnOHahdeWmXlK61lKQ1l6FgAA4H1ff/31xS9OqVJ6hVxKCer48MMPrSS7YMECe/78edugQQMbFRVl09LSrLXWvvbaa1aSff/990t0T1FIsvHx8QVeW79+vZVka9eubU+ePJnrWmpqqpVkExMTs9umT59uJdlFixbl6jtgwIAS9V20aJGVZN97771cff/9739bY4x98skns9tuu+02K8nef//9+Z7n8ccftwEBAXbXrl252leuXGkl2SVLllhrrT1z5oytVq2ajYqKsqdPn87ut23bNmuMsf7+/gV+XzkVteaMjAx71VVX2bZt2+bqd/DgQevj42Mffvhhl+u31tqbbrrJSrLz5s3L1bdz5862bt26udqaNm1qo6Ki8j3Dvffea6tXr27T09MLfd6CXPLfcRZJ22wRchlWWAEAHCv2jVgln0r2+DxhlcKU1D/J4/MAAABIkq+vr6ZMmaKhQ4dq6dKlGjZsmEfuKYnbb79dwcHBhfZ79913Va1aNcXHx+dqHzt2rFavXl3svitWrFD9+vV1ww036OjRo9ntDRo0UJ06dbRp0yb95S9/yXXP6NGj89W3YsUKtWjRQqGhobnGadu2rXx9fbVp0yYNGTJE//73v3Xs2DFNnjxZgYGB2f2aNm2q9u3b61//+leh30VRazbGKC4uTk8//bQOHjyoOnXqSJKWL1+ujIyMXN9PUeu/oGLFihoxYkSuujp06KCNGzfq3Llz8vPzu+QzhISEKCUlRRs3blSXLl0KfWZPIrACADhW8qlk7RpWvFdHuyJ6UbTH5wAAAMgpLi5OiYmJeuKJJxQXF+exe4orMjKySP3279+vevXqZW8pu+Dqq68uUd89e/bohx9+uOhWxMqVK+f63RijiIiIXG3WWu3du1e7d+++6DhHjhzJrk2SoqKi8vWJiooqUmDlSs3x8fFKTEzU8uXLNXHiREmZ2wGvvfZaRUdHu1z/BbVr15avb+6op2rVqrLW6vjx46pZs+Yln2Hs2LFas2aNYmNjVbt2bbVv3149e/bUrbfemm9cTyOwAgAAAACglPn4+CghIUEDBw7Uq6++Kn9/f4/cU1w5VxldirVWxpgC20vSNyMjQ40aNdLs2bMLnLdSpUq5fvfx8cm3eujC1rJ27drlW411QfXq1XPVUFB9ReVKzU2aNFFMTIyWLVumiRMn6ptvvtGOHTuUmJhYrPovyBsG5lTQ95xXWFiYdu7cqU2bNikpKUnvvfeeXn/9dTVr1kxbt24t8t8LdyCwAgAAAADAC/r376/rrrtOU6dOvWgg4Y57PCkyMlL/93//p/T09Fxhybfffluivg0aNNCXX36pW265RT4+PsWqzcfHR5GRkUpJSVGnTp0KfQ4pc5VUt27dcl375ptvijSfqzXHx8fr4Ycf1u7du7V8+XIZYzR48OBi1e+qSwVzfn5+6tatW/b3MGvWLI0fP16rV6/W7bff7tY6LqV4f+oAAAAAAKBEjDF64okndPDgQc2fP99j93hS9+7ddezYMb3++uu52v/+97+XqO/gwYN19OhR/e1vf8t3zVqb6zynSxk8eLB27NihN998M9+1tLQ0HT9+XJLUsmVLVatWTfPnz1dqamp2n88//1ybN28u8lyu1Dx48GAZY7Rs2TItX75cN998s6666qpi1e+q4ODgAu89duxYvrYbbrhBkvTrr78Wa67iYoUVAAAAAABe0qtXL/3xj3/UZ5995tF7PGX06NF66aWXNGLECO3YsUMNGjTQhg0blJyc+eKcnCt5XOl7xx13aO3atZowYYK2bt2q9u3by8/PT99//73WrFmjESNG6JFHHim0vkcffVRJSUkaMGCA4uLi1KJFC6Wnp2vfvn1644039Nxzz2nQoEHy9/fXtGnTdM899+jmm2/W0KFDdfz4cc2ePVsxMTHas2dPoXO5WnPdunXVtm1bPf/88zp58qQefPDBYtfvqubNm2vz5s168MEHdeONN6pixYoaMGCAhgwZolOnTqlDhw4KDw/Xzz//rBdffFGVKlVS7969XZ6nJAisAAAAAADOFhIuJYR4u4rMOjzgySefVGxsrMfv8YTg4GBt3rxZDzzwgF5++WVVqFBBPXr00IwZM3TNNdcoICCgWH19fHz01ltvac6cOVq4cKEmTZokX19f1a1bV927d1ffvn2LVF9QUJA2b96sGTNmaOXKlVq9erUCAwMVERGhYcOGqW3bttl9R44cqQoVKuiZZ57Rww8/rHr16mn27NnatWtXkQKr4tQcHx+vLVu2ZAdGJanfFY8++qj279+vV155Rc8++6z8/f01YMAADR8+XK+99prmz5+vX3/9VdWrV1fr1q312GOPFfkgfncxRTl0y6MFGNNU0lBJt0iKlHRK0leSEq21G/P09ZX0qKQ7JdWStF/SHElzbZ4HMcZUkvSkpEGS/iBpt6RnrLUrilJXs2bN7LZt24r/YADcJnpRdKm9Kc7T85TWs5SGsvR9laU/FwAALle7d+/WNddc4+0y4Cb//ve/1apVK/3jH/9Qv3793NYXzlaUf8fGmO3W2maFjeWEM6wmSoqX9C9JEyRNl3SlpPeNMaPy9J0n6QlJ70u6T9JOSbMlPVbAuG9KGiPp9ayfxyQtN8aU3glhAAAAAACUcTnPfJIyz2uaNWuWKlasqDZt2hS7L8o3J2wJ/Luk4dbaMxcajDHzJH0h6SljzAJrbZox5jpJIyTNstaOz+r6sjFmtaRJWf0OZ93fW1IXSfdba/+e1faKMkOxGcaYldbas6X2hAAAAAAAlFF9+/ZVzZo11axZM50/f17r1q3LPh+pRo0axe6L8s3rgZW19l8FtKUaY96RNF5SqKSDkm7Luvx8nu7PS+ovqY8yV2Apq2+qpAU5xswwxsyRtESZ2w/Xu/ExAAAAAAAol7p3766FCxdq7dq1Sk1NVf369fXss89q3LhxJeqL8s3rgdUlhElKk3ThPYvNJP1srf0xT7/PJGVIapqjrZmkndba1Dx9P8362VQEVgAAAAAAlNjYsWM1duxYt/dF+eaEM6zyMcY0ltRP0tvW2lNZzWGSDuXta609p8zzqWrnaC6wr6TkrJ+1C7gmY8xIY8w2Y8y2X375pbjlAwAAAAAAoAQcF1gZY6pIWi3ptKScawIDJV3s3KkzWdcL63smx/V8rLXzrbXNrLXN2DsLAAAAAADgHY7aEmiMCZS0TlI9SV2ttQdyXE6V5H+RWwOyrhfWNyDHdQAAAAAAADiQY1ZYGWP8JL0lqaWkAdbaLXm6JCtzq19B91XT/7b7XbRvjrbkAq4BAAAAAADAARwRWBljfCWtktRZ0u3W2ncK6LZdUqgxJjxPe3NlPsf2PH1jjDEBefrelOM6AAAAAAAAHMjrgZUxxkfSYkm9Jd1rrV1xka6rsn7mfZ3AWEnnJK3J0bZSUpCkEXnm+bOko5I+KHnlAAAAAAAA8AQnnGE1U9JgSVskpRpjhuS5/r619mdr7Q5jzKuSxhtjKkv6TFIXSQMlPW6tzbnNb62kTZKey1qR9W1Wv5aS7rTWnhEAAAAAAAAcyQmB1Y1ZP9tlffLqIOnnrP++V9IBSXdIGi5pv6T7Jc3OeYO11hpj+kiaKmmopKqS9kiKt9Yuc2/5AAAAAAAAcCevbwm01ra31ppLfDbn6HveWvu4tTbCWutvrY2y1v7dWmsLGPd3a+0D1tpa1toAa+31hFUAAAAAgPLgm2++UZcuXXTFFVfIGKOFCxd6u6TLWosWLdSgQYNC+w0aNEiNGjUqhYrKPiessAIAAAAA4KJi34hV8invv+w9rFKYkvonFevezZs3q0OHDhe9Hh8fr6lTpyoyMrJI402ZMkUJCQkXvT58+HDt3btXCQkJql69ulq1auVqyaUqISFBjz/+eJH6/vDDD4qIiPBsQfA6AisAAAAAgKMln0rWrmG7vF2GohdFl3iMu+66S+3bt8/XXq9ePdWoUUNLlizJ1T5//nx99NFHWrRokXx8/rdJKiYm5qJznDlzRp9++qn+/Oc/64EHHihxzaWhX79++VYwDR06VK1atdKoUaNytdeoUaM0S3PJkiVLVMAmMBQDgRUAAAAAAKWkRYsWGjIk77vG/ifvtY0bN+qjjz5SXFycfH2L9n/hjxw5Ikm64oorCu2bnp6u9PR0+fn5FWlsT4mJickXwg0dOlSRkZGX/L5ySktLU0ZGhlefpWLFil6bu6zx+hlWAAAAAADAPYYPH66rrrpKkvTUU0/JGCNjjKTMbYnGGC1YsEAzZ85U/fr15e/vr3/961+SJGutZs2apcaNG8vf3181a9bU8OHDlZycezvmwoULZYzRhg0b9Ne//lV16tRRpUqVFBsbqwMHDkiS5s6dq4YNGyogIEDNmzfX559/7tbn3LBhg4wxWrx4sRITExUZGSl/f//seZ5++mndfPPNqlGjhvz9/XX11VfrySefVFpaWr6xTp48qUmTJunqq6/Ofu5u3brp008/vWQNu3fvVp06dXTttddmf0cb0yIDAAAgAElEQVQFnWEVGhqqTp06aceOHWrXrp2CgoJUq1YtPfHEE/lWY1lrlZiYqPDwcAUGBqp58+bauHFjuTwbixVWAAAAAACUkt9//11Hjx7N1165cmX5+/uXePx77rlH119/vcaNG6fevXurf//++fo8//zzSktL08iRI7PDE0kaM2aM5s6dq86dO2v06NHav3+/5syZow8//FCff/65qlWrlmucyZMny9/fXw899JCSk5P17LPPqnfv3oqLi9PixYs1atQonT59Ws8884z69u2rffv2uX0F0vTp02Wt1ahRo+Tv75+9XXDGjBnq06ePbr31Vvn7+2vr1q2aMmWKkpOTNW/evOz7T506pTZt2mjnzp0aPHiw7r//fp0+fVoff/yxPv74Y7Vo0aLAebdt26Zu3bqpXr16Wr9+vf7whz9css4DBw6oW7duio+P1+DBg7V27VpNmTJF9evXV3x8fHa/yZMnKzExUbfccoseeugh/fTTT+rfv7/q1Knjhm/r8kJgBQAAAABAKRk3bpzGjRuXr/21117T8OHDSzx+y5YtVatWLY0bN07XXnttgdvpfvnlF+3du1chISHZbV999ZXmzp2rXr16ac2aNdmrstq0aaM+ffooMTFRM2fOzDWOj4+Ptm7dmr1VMT09XTNnztTx48f11VdfqVKlSpKkqlWr6r777tOGDRvUs2fPEj9jTr/99pv27Nmj4ODgXO0HDhzInl+SRo8ercjISM2cOVNPPPFEdrCVmJioL7/8Ui+//LLuuuuu7P4TJ0686FlUW7ZsUc+ePfXHP/5Ra9asyTd3Qb799lutX79eXbt2lSSNHDlSjRs31ksvvZQdWB0+fFgzZsxQx44d9d5772WfWda2bVv16NFDUVFRLnwzlz8CKwBAsYRVCnPLwaOFzQEAAFCWjB8/Xt26dcvX3qRJk1KrIT4+PldYJUnr1q2TlBnUXAirJKl3796KiorS22+/nS+wuvvuu3Odq9W6dWvNnDlT8fHxucKi1q1bS5L27dvn9me5/fbbCwyMLsyfnp6ulJQUpaenq0OHDnr66af1xRdfqHPnzpKkVatWqWHDhrrzzjvzjZHze7hg3bp1GjhwoLp166bly5cXeVVcREREdlglZYZ97dq10zvvvJPdlpSUpLS0NI0ZMybXAft/+tOf8h1IXx4QWAEAiqW4r3QGAAAoz6655hp16tTJqzVERkbma9u/f7+kzPryuuaaa/Tuu+/ma79wVtYFFw55Dw8PL7D9119/LVa9l1LQs0jSO++8o6lTp2r79u35zq06fvy4pMzzor777jv16dOnwHAqr0OHDqlfv35q3769Vq9erQoVKhS5zoiIiHxtVatWzfWdXPgzaNiwYb6+DRs21Pfff1/k+coCAisAyMKKofKrtP7sCfkAAIATBAYGXvRaQcHNxbbGXSywuVj7xcYpiYKeZcuWLerVq5duvvlmvfDCCwoLC5O/v79++OEHjRw5UhkZGbn6FyWskqQaNWqoUaNG2rJli5KSktS9e/ci11mU7+TCf7vyZ1CWEVgBQBbChPKrNP7sPR2IAQAAlMSFFUBff/21br755lzX9uzZU+AKIadasWKFgoODtXHjRvn5+WW3r127Nlc/Y4zq16+vXbt2FWlcPz8/rV27Vj169FC/fv20bt267K2F7nBhtdjevXvzrXTbt2+fSyu6ygICKwCAc82Klk4c8Pw8IeHSuKL9DxUAAICyqGfPnnr00Uf17LPPqnXr1tmrfNatW6dvvvlG48eP93KFRVehQgX5+PgoPT09uy0tLU3PPvtsvr4DBw7UU089pYULF+Y79N5am2+1U2BgoNatW6du3bqpd+/eeuedd3TLLbe4pe4uXbqoQoUKmj17tnr27Jl9jtW7776rffv2ceg6AACOceKAlHDC8/MkhBTeBwAAwA0+/fRTBQQE5GuvVq1agYexl5YmTZpo9OjReuGFF9S1a1f17NlTP/74o+bMmaPw8HBNmjTJa7W5qk+fPpo7d646duyoIUOG6NSpU1q+fHmBW+0eeeQRrV27Vnfeeac2btyoVq1a6cyZM/r444/VunVrTZgwId89QUFBevfdd7O/p/Xr16tt27YlrjssLEwTJkzQ9OnT1aVLF/Xp00c//fST5s+fryZNmuQK4MoDAisAAAAAgKOVxnmTRa2jpF555RW98sor+dqbNm3q1cBKkmbPnq369etrwYIFmjBhgkJCQjRw4EBNmzZN1apV82ptrujUqZOWLFmip59+WhMmTFC1atU0aNAgxcfH68Ybb8zVNzg4WB9//LGmTp2qf/zjH1q1apWqVq2qZs2aqU2bNhedIzg4WOvXr1eXLl30pz/9SUlJSWrVqlWJa09MTFSVKlX04osv6sEHH9S1116rN954Q7NmzdKhQ4dKPP7lxJTHg7uKolmzZnbbtm3eLgOAMs/+2TWM7VrlUkJI6a2w8vA8/D0GAODSdu/eXeAb6gBIUVFRioqK0ttvv+3tUi6pKP+OjTHbrbXNChvLx21VAQAAAAAAoNhSU1Pztb3zzjvau3ev287KulywJRAAAAAAAMABVq5cqZdeekk9evRQ9erVtXPnTi1YsED16tXTXXfd5e3yShWBFQAAAAAAgAPExMToiiuu0Jw5c3Ts2DFVrVpVgwcP1rRp01S5cmVvl1eqCKwAAAAAAAAc4MYbb9T69eu9XYYjEFgBKJHYN2KVfCrZo3O4420sAAAAAIDLB4EVgBJJPpXMm88AAADgFtZaGWO8XQaAYrDWunU83hIIAAAAAPA6X19fpaWlebsMAMWUlpYmX1/3rYsisAIAAAAAeF1AQIB+//13b5cBoJhOnjypgIAAt41HYAUAAAAA8LoaNWrol19+0enTp92+tQiA51hrdfr0aR09elQ1atRw27icYQUAAAAA8LqAgADVrFlT//3vf3X27FlvlwPABf7+/qpZs6ZbV1gRWAEAAAAAHCEkJEQhISHeLgOAAxBYAQAQEi4lePh/HEeGe3Z8AAAAoAwhsAIAYNwuz8+xKNrzcwAAAABlBIeuAwAAAAAAwFEIrAAAAAAAAOAoBFYAAAAAAABwFAIrAAAAAAAAOAqBFQAAAAAAAByFwAoAAAAAAACOQmAFAAAAAAAARyGwAgAAAAAAgKMQWAEAAAAAAMBRCKwAAAAAAADgKARWAAAAAAAAcBQCKwAAAAAAADgKgRUAAAAAAAAchcAKAAAAAAAAjkJgBQAAAAAAAEchsAIAAAAAAICjEFgBAAAAAADAUXy9XQAAwM1mRUsnDnh+npBwadwuz88DAAAAoNwhsAKAsubEASnhhOfnSQjx/BwAAAAAyiW2BAIAAAAAAMBRCKwAAAAAAADgKARWAAAAAAAAcBQCKwAAAAAAADgKgRUAAAAAAAAchcAKAAAAAAAAjkJgBQAAAAAAAEchsAIAAAAAAICjEFgBAAAAAADAUQisAAAAAAAA4Ci+3i4AAHCZCgmXEkI8PwcAAACAcofACgBQPON2ebsCAAAAAGUUWwIBAAAAAADgKKywAgCgFISdT1P0omjPzlEpTEn9kzw6BwAAAFAaCKwAACgFSQeTpYQTHp3D04EYAAAAUFrYEggAAAAAAABHIbACAAAAAACAo7AlEACA0hASLiWEeHaOyHDPjg8AAACUEgIrAABKw7hdnp+DM6wAAABQRrAlEAAAAAAAAI5CYAUAAAAAAABHIbACAAAAAACAoxBYAQAAAAAAwFEIrAAAAAAAAOAoBFYAAAAAAABwFAIrAAAAAAAAOAqBFQAAAAAAAByFwAoAAAAAAACOQmAFAAAAAAAARyGwAgAAAAAAgKMQWAEAAAAAAMBRCKwAAAAAAADgKARWAAAAAAAAcBQCKwAAAAAAADgKgRUAAAAAAAAchcAKAAAAAAAAjkJgBQAAAAAAAEchsAIAAAAAAICjEFgBAAAAAADAUQisAAAAAAAA4CgEVgAAAAAAAHAUAisAAAAAAAA4CoEVAAAAAAAAHIXACgAAAAAAAI7i6+0CAHhO7BuxSj6V7NE5wiqFeXR8AAAAAED5Q2AFlGHJp5K1a9gub5cBAAAAAIBL2BIIAAAAAAAARyGwAgAAAAAAgKMQWAEAAAAAAMBRCKwAAAAAAADgKARWAAAAAAAAcBQCKwAAAAAAADgKgRUAAAAAAAAchcAKAAAAAAAAjkJgBQAAAAAAAEchsAIAAAAAAICjEFgBAAAAAADAUQisAAAAAAAA4CgEVgAAAAAAAHAUAisAAAAAAAA4itcDK2NMsDEmwRizzhhz2BhjjTELC+gXkXWtoM/LBfT3NcY8Zoz5wRhzxhizxxhznzHGlMqDAQAAAAAAoFh8vV2ApOqSpkg6LGmbpB6F9F8r6Y08bfsK6DdP0ghJCyR9JqmLpNmS/iDpiRLUCwAAAAAAAA9yQmB1WFIda+0hY4yvpPOF9P8/a+3SS3UwxlynzLBqlrV2fFbzy8aY1ZImGWMWWGsPl7hyAAAAAAAAuJ3XAytr7VlJh1y5xxgTmHVv6kW63Jb18/k87c9L6i+pjzJXYAEAUGaEnU9T9KJoz89TKUxJ/ZM8Pg8AAADKL68HVsVwv6TJkmSM2Sfpb9bauXn6NJP0s7X2xzztn0nKkNTU41UCAFDKkg4mSwknPD5PaYRiAAAAKN8up8AqQ9ImSWsk/SgpTJnb/uYYYyKstRNz9A1TAau2rLXnjDHHJNUuhXoBAAAAAABQDJdNYGWtPSCpU862rLcDfiBpvDHmRWvtd1mXAiWlXGSoM1nX8zHGjJQ0UpLCw8PdUTYAAAAAAABc5OPtAkrCWpsuaaYyn6NjjkupkvwvcltA1vWCxptvrW1mrW1Wo0YNt9YKAAAAAACAormsA6ssF86pqp6jLVmZ2wJzMcb4SaqWdR0AAAAAAAAOVBYCqwZZP4/kaNsuKdQYk3dfX3NlPvP20igMAAAAAAAArrtszrAyxvzBWvtrnrYASZMkpUl6L8elVZIekTRW0oM52sdKOqfMg9sBAChbQsKlhBDPzxPJOY8AAADwLEcEVsaY+yRdof+t+Ioxxvwl67/fttbulPRs1oqpTyT9JKmmpNslNZT0l6xD2SVJ1todxphXlXkYe2VJn0nqImmgpMettWwJBACUPeN2lc48i6JLZx4AAACUW44IrJS5CuqqHL/fkPWRpIOSdipzBdWFt/j9QdJpSTskPWKtfbOAMe+VdEDSHZKGS9ov6X5Js91ePQAAAAAAANzGEYGVtTaiCH2WS1ruwpjnJT2e9QEAAAAAAMBloiwcug4AAAAAAIAyxBErrACg3JgVLZ04UHi/kgjhQGwAAAAAlzcCKwAoTScOSAknvF0FAAAAADgaWwIBAAAAAADgKARWAAAAAAAAcBQCKwAAAAAAADgKgRUAAAAAAAAchcAKAAAAAAAAjkJgBQAAAAAAAEchsAIAAAAAAICjEFgBAAAAAADAUQisAAAAAAAA4CgEVgAAAAAAAHAUAisAAAAAAAA4CoEVAAAAAAAAHIXACgAAAAAAAI5CYAUAAAAAAABHKXZgZYzxM8aEGWOqurMgAAAAAAAAlG++Re1ojKksaZCkzpLaSqqR41qapJ2SPpD0prX2/7m5TgAAAAAAAJQThQZWxpjakh6TFC+pUlbzb5K+kfSrpEBJ1SRdL6mppAeNMV9ImmmtXe6JogEAAAAAAFB2XTKwMsY8LmmCJH9J70taIekTa+13BfQNkvRHSbHKDLdeN8bcL2mktXanuwsHAAAAAABA2VTYGVYTJc2XFG6t7W6tXVxQWCVJ1trT1trN1tpHJV0lqbekipL6uLViAAAAAAAAlGmFbQlsYK1NdnVQa62VtE7SOmNMaLEqAwAAAAAAQLl0yRVWxQmrChjjvyUdAwAAAAAAAOVHYVsCAQAAAAAAgFJFYAUAAAAAAABHcWtgZYyJMsakG2PS3DkuAAAAAAAAyo/CDl0vDpP1AQAAAAAAAFzm1sDKWvuN2GYIAAAAAACAEiBcAgAAAAAAgKN4YksgAAAow8LOpyl6UbRn56gUpqT+SR6dAwAAAM5FYAUAAFySdDBZSjjh0Tk8HYgBAADA2VwKrIwxGZJsEbpaay1hGAAAAAAAAFzmaqi0VQUHVldIulpSoKQvJf1WwroAAIBThYRLCSGenSMy3LPjAwAAwNFcCqyste0vds0YU1nSLEmtJPUrWVkAAMCxxu3y/BxsCQQAACjX3PaWQGvtSUkjJaVJespd4wIAAAAAAKB8cVtgJUnW2gxJH0rq485xAQAAAAAAUH544mD0AElVPTAuAACA28S+EavkU8kenyesUpiS+id5fB4AAICyxK2BlTGmkaQBkva5c1wAAAB3Sz6VrF3DPH8eVzTncQEAALjMpcDKGPPqJcapK6m1pAqSJpSwLgAAAAAAAJRTrq6wGl7I9T2SZlhrXyteOQAAAAAAACjvXA2sIi/SniHpuLX29xLWAwAAAAAAgHLOpcDKWvujpwoBypPSPOgXAAAAAIDLjSfeEgigEKV10C8AAAAAAJcjH28XAAAAAAAAAOTk1sDKGBNljEk3xqS5c1wAAAAAAACUH57YEmiyPgAAAAAAAIDL3BpYWWu/EdsMAQBACYWlS9GLoj07x/k0KSHEo3NIkiLDPT8HAABAGcOh6wAAwHGS7ixDL6bwcPAGAABQFrEaCgAAAAAAAI5CYAUAAAAAAABHcTmwMsbUMsbMNcbsM8akZr0VMO+HtwQCAAAAAACgWFw6w8oYU1vSZ5JqSvpKkr+kHyWdlVQva7wvJJ1wb5kAAAAAAAAoL1xdYfVXSaGSulprr8tqe81a20iZgVWSpEBJ/dxXIgAAAAAAAMoTVwOrWEkbrLUb816w1h6UNECZgdXjbqgNAAAAAAAA5ZCrgVWoMrcCXpCuzIBKkmSt/V3S+5J6l7w0AAAAAAAAlEeuBlYpkvxy/H5cUu08fU5IqlGSogAAAAAAAFB+uRpY/Sipbo7fv5R0izEmSJKMMT6Sukg66J7yAAAAAAAAUN64GlhtktTBGFMx6/dFksIk/csYM0PSJ5KaSFrpvhIBAAAAAABQnvi62P8VZW4DrC7psLV2qTGmqaQxkmKy+qyQ9JT7SgQAAAAAAEB54lJgZa39VtIzedrGGWOmSaonab+19mc31gcAAAAAAIByxtUVVgWy1v4i6Rd3jAUAAAAAAIDyzdUzrAAAAAAAAACPumRgZYyZbYwJLe7gxpi+xpjBxb0fAAAAAAAA5U9hK6ziJX1njJlnjLmpKAMaY0KMMfcYYz6X9IakaiUtEgAAAAAAAOVHYWdY1Zf0pKSRkkYaY36S9ImkbZIOK/ONgQHKDKUaSWohqbkkf0m7JfWw1q73TOkAAAAAAAAoiy4ZWFlrj0u6zxjzjKR7JQ2XNDjrY/N0N5LSJW2S9IKkd6y1Ge4uGAAAAAAAAGVbkd4SaK39SdJkSZONMU0k3SwpXJkrq1IlHZG0U9JH1toUD9UKAAAAAACAcqBIgVVO1tqvJH3lgVoAAAAAAACAQg9dBwAAAAAAAEoVgRUAAAAAAAAchcAKAAAAAAAAjkJgBQAAAAAAAEchsAIAAAAAAICjEFgBAAAAAADAUQoNrIwxbY0x4aVRDAAAAAAAAFCUFVYfShru4ToAAAAAAAAASUULrIzHqwAAAAAAAACycIYVAAAAAAAAHIXACgAAAAAAAI5S1MDKerQKAAAAAAAAIEtRA6sEY0y6C580j1YNAAAAAACAMsu3iP1cPXidg9oBAAAAAABQLEUNrBKstU94tBIAAAAUS+wbsUo+lezROcIqhSmpf5JH5wAAALigqIEVAAAAHCr5VLJ2Ddvl0TmiF0V7dHwAAICceEsgAAAAAAAAHIUVVgAAAB4Udj7N46uTwiqFeXR8AACA0kZgBQAA4EFJB5OlhBPeLgMAAOCyUpTAKlLSb54uBAAAoEwKCZcSQjw/xzjPnmEFAABQmooSWA2TtFnS1gsNxpgrJYVaa3fm7WyM6S2pt7X2TncVCQAAcNkqjSDJ04EYAABAKStKYJWQ9dmao22UpL9KqlBA/+uVGXIRWAEAAJSG0ljFFRnu2fEBAABy4AwrAACAy11prOLy8MHxAAAAOfl4uwAAAAAAAAAgJwIrAAAAAAAAOAqBFQAAAAAAAByFwAoAAAAAAACOUtRD1yOMMW1z/i5Jxpg2kkzeviUvCwAAAAAAAOVVUQOrYVmfnIykzQX0NZJsCWoCAAAAAABAOVaUwGqrCKAAAAAAAABQSgoNrKy17UuhDgAAAAAAAEASh64DAAAAAADAYQisAAAAAAAA4CiX3BJojLm9uANbaxcX914AAAAAAACUX4WdYbVQuQ9cL8obAC/0IbACAAAAAACAywoLrO4ooK2fpJ6StkjaLOm/kkIldZDUVtLbkt5yX4kAAAAAAAAoTy4ZWFlrF/3/9u492rKqvhP994eoVSKpEkHllF1dROyO96bEtBiTkKGEGLgJ3a2miebVQW2bYbeKQWNGXR+xSKdzK60tQYzaYAz0Teeh+EBDvKUtSm7QyA3NFW6PaISkKKlDfGCqolgIyLx/7F1wPJz3OWvvtc/5fMZYY1WtNdecv82suefhd9aaa+bfq+qnkvxvSZ7bWvvIrOIXVtVzk7w3ybvWNEoAAAAANozlLrr++iQfnCNZlSRprV2V5ENJ3rjawAAAAADYmJabsDolyS2LlLklyVOXWmFVPbqqdlfVR6rqjqpqVXX5PGWPrqo3VtXfVtXdVfX5qnpFVdUcZY+pqrdW1fSw7I1V9bNLjQsAAACA8VhuwuqeDJJWCzklyb3LqPP4JG9K8vQkf7lI2Xcm+fUkH0/yiiQ3Jbkkc9/R9YEkr0zy34b7O5P84WrefAgAAABA95absPpEkp+a666mGnhlkp9M8t+XUecdSZ7YWptK8vz5ClXVKUlemuSi1tp5rbV3t9ZekOTKJK+rqhNnlH1ukjOTvKa19trW2mXDv382yZur6pHLiA8AAACAEVpuwmpXkr9PcnGSL1bV5VX1W8NH+L6Y5LeTfH1Ybklaa99urR1YQtEXDvcXzzp+cZJHJnnerLKHk1w2o537k7w9yeOSnLHU+AAAAAAYrQXfEjhba+3WqvqhJO9I8pwk3zuryMeTvLy19jdrFN9Mpyb5cmvttlnHr09yfwaPFM4se1Nr7fCssn8x3D89yUc7iBEAAACAVVpWwipJWmu3JDmzqrYl+YEkW5IcSnLjEu+UWqmpJA+pv7V2T1XdmWTbrLI3z1HH9HC/bY5zqarzkpyXJNu3b19VsAAAAACszKIJq6p6XpK9s+9WGianukxQzbY5yT/Mc+7u4fmZZb89T7nMKvuA1tqlSS5NklNPPbWtLEwAAAAAVmMpa1h9IMlXq+r9VfWLVbW166DmcTiDtarmsml4frGym2acBwAAAKCHlpKwemGSDyf58ST/NcmXq+pjVfWymW/mG4HpDB71+y5V9Ygkj82Dj/vNW3bGsek5zgEAAADQA4smrFpr72ut/XySE5KcneSKJDszWHj9S1X16ar6lao6udtQc0OSJ1TV7MWlnpHB57hhVtmnVtWmWWWfOeM8AAAAAD20lDuskiSttXtbax9trZ2XwZ1Kz0rytiSPT/Kfknyhqm6uqt1V9bQOYn3vcH/+rOPnJ7knyYdmHPvjJI9K8tIjB6rqqCQvT/K1JNd0EB8AAAAAa2DZbwlMktZaS/Lnw+3VVXVKkp9O8vwkv5bkjVW1P8kHWmuvWay+qnpFkq15MIH21Kp6w/DPH26t3dRau7Gq3jNs79gk1yc5M8kLklzYWpv5mN9VST6R5K3DO7K+OCz3w0le0lq7OwAAAAD00ooSVrO11j6X5HNJ3lRVT8qDyatXJVk0YZXkV5L84xl//4HhliS3J7lp+OeXJdmf5MVJXpRk37CNS2bF04ZvN/yNJP86yWOSfD7JL7TW/mB5nw4AAACAUVqThNVMrbVbk7w5yZur6glLvGbHEsvdm+TC4bZY2W8m+eXhBgAAAMCEWPIaVivRWvu7LusHAAAAYP1Z9A6rqvqzFdTbWmvPXsF1AAAAAGxwS3kk8EdXUG9bwTUAAAAAsKSE1UlLrOvUJP9HkpOTfGfFEQEAAACwoS2asGqt3bbQ+ar6R0l+M8nPZbAm1p8mee2aRAcAAADAhrPitwRW1bFJXp/k/CSbktyY5Fdaa59co9gAAAAA2ICWnbCqqocl+XdJfi3J8Um+lOQNrbX/c41jAwAAAGADWlbCqqqen2RPButUfSPJ65Jc1Fr7dgexAQAAALABLSlhVVXPTPKWJD+SwYLq70hyYWvtax3GBjBaF+1MDu3vto0t27utHwAAYB1YNGFVVX+U5GeGf70qya+21m7pNCqAcTi0P9l9aNxRAAAAbHhLucPqBUlakluSfDPJr1XVYte01tq5q4wNAAAAgA1oqWtYVZInD7elaEkkrAAAAABYtqUkrH6s8ygAAAAAYGjRhFVr7dpRBAIAAAAASXLUuAMAAAAAgJmWuoYVbBhnXXlWpu+a7rSNqWOmOq0fAAAAJtmCCauq+psV1ttaa09a4bUwVtN3Tefmc28edxgAAACwYS12h9VRGbzxb6ZHJDlx+Of7ktyZ5LEz6rojyT1rFSAAAAAAG8uCa1i11na01k46siU5JcmBJH+RwdsDN7XWTkyyKckZST6b5PYkT+02bAAAAADWq+Uuuv4fk2xNcnpr7drW2v1J0lq7v7X2qQySWMcNywEAAADAsi03YfX8JFe11uZ85K+1dneSq5L89GoDAwAAAGBjWm7C6rFJHr5ImYcPywEAAADAsi226PpstyY5p6re1Fo7NPtkVT0myTlJVvp2QQAAemjq3vuy84qd4w5jTUwdM5W95+wddxgAwAKWm7B6V5K3Jbm+qv5jkj9L8uUkj0/y7CSvT/KEWMMKAGBd2Xv7dLL7Ib+vnH1IfnwAAB8JSURBVEjrJfEGAOvZshJWrbW3V9WTk7wyye/NUaSSXNJae8daBAcAAADAxrPcO6zSWntVVf1Rkpck+YEkW5IcSvI/klzeWvv02oYIAMDYbdme7N4ymnYuuLn7dgCAXlt2wipJWmufSfKZNY4FAIC+GlUSaRRJMQCg95b7lkAAAAAA6JSEFQAAAAC9ImEFAAAAQK9IWAEAAADQKxJWAAAAAPTKit4SCAAAndiyvfs3BZ60vdv6AYBVk7ACAKA/Lri5+zau2Nl9GwDAqkhYAcAInLbnmhw4eLjTNrZt3Zzrdp3RaRsAADAKElYAMAIHDh7Ovj1nd9rGjl1Xd1o/AACMikXXAQAAAOgVCSsAAAAAekXCCgAAAIBekbACAAAAoFckrAAAAADoFQkrAAAAAHpFwgoAAACAXpGwAgAAAKBXjh53AAAAs52255ocOHh43GGsiW1bN+e6XWeMOwwAgIkiYQUA9M6Bg4ezb8/Z4w5jTezYdfW4QwAAmDgeCQQAAACgVySsAAAAAOgVCSsAAAAAesUaVgD01qgW3rYoNgAA9IuEFQC9NaqFty2KDQAA/eKRQAAAAAB6RcIKAAAAgF6RsAIAAACgV6xhBfTfRTuTQ/u7b2fL9u7bAAAAYFESVkD/Hdqf7D407igAAAAYEQkrAIAJd9qea3Lg4OFO29i2dXOu23VGp20AABwhYQUAMOEOHDycfXvO7rSNHbuu7rR+AICZLLoOAAAAQK9IWAEAAADQKxJWAAAAAPSKhBUAAAAAvSJhBQAAAECvSFgBAAAA0CsSVgAAAAD0ioQVAAAAAL1y9LgDAAAmy2l7rsmBg4c7bWPb1s2d1g8AQL9JWAEAy3Lg4OHs23P2uMMAAGAdk7ACgHVi29bN2bHr6pG0AwAAXZKwAoB14rpdZ4w7BAAAWBMSVgBseKO4M8ldSQAAsHQSVgBseO5MAgCAfjlq3AEAAAAAwEzusAIA6JBHTgEAlk/CCgCgQx45BQBYPo8EAgAAANArElYAAAAA9IqEFQAAAAC9Yg0rAAA2lKl778vOK3Z2384xU9l7zt7O2wGA9UjCCgCADWXv7dPJ7kOdtzOKpBgArFcSVgDrzGl7rsmBg4c7b2fb1s3efgasuVF8h31m0wk5cfeWTttIkpy0vfs2AGCdkrACWGcOHDycfXvO7rydHbuu7rwNYOMZxXfYjl0Zyfdk3GEFACtm0XUAAAAAekXCCgAAAIBekbACAAAAoFesYQXAimzburnzday2bd3caf3A0o1izB9pZ72Yuve+zt8UOHXMVPaes7fTNgBgHCSsAFgRbwiEjcWYX769t08nuw912kbXCTEAGBePBAIAAADQKxJWAAAAAPSKhBUAAAAAvSJhBQAAAECvSFgBAAAA0CveEggAABNq6pipzt8UOHXMVPaes7fTNgBgNgkrAACYUKNIJHWdEAOAuXgkEAAAAIBecYcVAAB0Ycv2ZPeW7tu44OZu2wCAMZCwAgCALowikdR1QgwAxkTCCgAAJtUo7uI6aXu39QPAHCSsAABgUo3iLi6LrgMwBhZdBwAAAKBXJKwAAAAA6BUJKwAAAAB6RcIKAAAAgF6RsAIAAACgVySsAAAAAOgVCSsAAAAAemWiElZVtaOq2jzbu2eVPbqq3lhVf1tVd1fV56vqFVVV44ofAAAAgMUdPe4AVuiqJFfOOnbLrL+/M8lLk1yW5PokZya5JMlxSX696wABAAAAWJlJTVj9f62135/vZFWdkkGy6qLW2quHh99dVe9L8rqquqy1dscoAgUAAABgeSbqkcCZqmpzVW2e5/QLh/uLZx2/OMkjkzyvs8AAAAAAWJVJTVi9Ksm3knyrqr5YVS+fdf7UJF9urd026/j1Se5P8vQRxAgAAADACkzaI4H3J/lEkg8luS3JVAaP/r29qna01l47LDeV5MDsi1tr91TVnUm2zVV5VZ2X5Lwk2b59+9pHz6qddeVZmb5rutM2po6Z6rR+AAAAYGETlbBqre1P8pyZx4ZvB7wmyaur6l2ttVuTbE7yD/NUc/fw/Fz1X5rk0iQ59dRT21rFzdqZvms6N59787jDAAAAADo0qY8EPqC19p0kb8ngs/z48PDhDNaqmsum4XkAAAAAemii7rBawJG1qo4f7qeT7JxdqKoekeSxw/MAANCZ0/ZckwMHu/096batm3PdrjM6bQMAxmG9JKxOHu6/MtzfkOQnqmr78DHCI56RwZ1YN4wyOAAANp4DBw9n356zO21jx66rO60fAMZloh4JrKrj5ji2KcnrktyX5GPDw+8d7s+fVfz8JPdksGg7AAAAAD00aXdY/eeq2p7kuiRfSvL4JL+U5MlJ3nDkbqrW2o1V9Z4MFmI/Nsn1Sc5M8oIkF7bWPBIIAAAA0FOTlrD6WJLzhttxSb6V5MYku1prH5hV9mVJ9id5cZIXJdmX5FVJLhlRrAAAAACswEQlrFprf5jkD5dY9t4kFw43AABYd7Zt3dz5OlbHPqXT6gFgThOVsAIAAB40ijcE7rxiV+dtAMBsE7XoOgAAAADrn4QVAAAAAL0iYQUAAABAr0hYAQAAANArElYAAAAA9IqEFQAAAAC9ImEFAAAAQK9IWAEAAADQKxJWAAAAAPSKhBUAAAAAvXL0uAMAJtxFO5ND+7ttY8v2busHAACgVySsgNU5tD/ZfWjcUQAAALCOSFgBjNBpe67JgYOHO21j29bNndYPAADQNQkrgBE6cPBw9u05e9xhAAAA9JqEFQAAG8q2rZuzY9fVI2kHAFgZCSsAADaU63adMe4QAIBFHDXuAAAAAABgJgkrAAAAAHpFwgoAAACAXpGwAgAAAKBXJKwAAAAA6BUJKwAAAAB65ehxBwAAAPTX1L33ZecVO7tv55ip7D1nb6dtnHXlWZm+a7rTNkbxOQA2AgkrAABgXntvn052H+q8nVEkxabvms7N597caRuj+BwAG4FHAgEAAADoFQkrAAAAAHpFwgoAAACAXrGGFQAAMHZTx0x1vv7T1DFTndYPwNqRsAIAAMbOm/UAmEnCCgAAGL+LdiaH9o87itU7afu4IwBYFySsAACA8Tu0P9l9aNxRrF7HjzUCbBQSVgAAwLxub8fnibu3dN/QFncmAfAgCSsAAGBeP/rtt2XfnrPHHQYAG8xR4w4AAAAAAGaSsAIAAACgVySsAAAAAOgVCSsAAAAAekXCCgAAAIBekbACAAAAoFeOHncAAAAAp+25JgcOHu60jW1bN+e6XWd02gYAa0PCCgAAGLsDBw9n356zO21jx66rO60fgLXjkUAAAAAAekXCCgAAAIBekbACAAAAoFckrAAAAADoFQkrAAAAAHpFwgoAAACAXpGwAgAAAKBXjh53AAAAQH9t27o5O3ZdPZJ2RtFG15/ln558X3ZesbPTNkZl6pip7D1n77jDADYoCSsAAGBe1+06Y9whrJmRfJbdP5/sPtR9OyOwXhJvwGTySCAAAAAAvSJhBQAAAECvSFgBAAAA0CsSVgAAAAD0ioQVAAAAAL0iYQUAAABAr0hYAQAAANArElYAAAAA9MrR4w4A6NBFO5ND+7ttY8v2busHAOChRvFz3kl+zgPGR8IK1rND+5Pdh8YdBQAAa20UP+ddsbPb+gEWIGEFAACwRm5vx+eJu7d039AI7nKfuve+7Ow4aTV1zFT2nrO30zaAySRhBQAAsEZ+9Ntvy749Z487jDWx9/bpzu/i6johBkwui64DAAAA0CvusAIAAFgj27Zuzo5dV4+knet2ndF5OwDjImEFAACwRkaVRBpFUgxgnCSsAIZO23NNDhw83Gkb27Zu7rR+AACA9UDCCmDowMHD62aRVAAAgEkmYQUAADBhRrFW1mc2nZATd2/ptI2ctL3b+oGJJWEFAAAwYUaxVtaOXen+7vMrdnZbPzCxjhp3AAAAAAAwkzusAAAAeIhRPHZ47FM6rR6YYBJWAAAAPMQoHjvcecWuztsAJpNHAgEAAADoFQkrAAAAAHpFwgoAAACAXpGwAgAAAKBXJKwAAAAA6BVvCQQAAGAspu69Lzuv2DnuMCbK1DFT2XvO3nGHAZ2TsAIAAGAs9t4+new+NO4wJooEHxuFhBUAAACs1kU7k0P7u2/npO3dtwE9IGEFAAAAq3Vo/2juFnOHFRuERdcBAAAA6BV3WLEmzrryrEzfNd15O1PHTHXeBv1z2p5rcuDg4c7b2bZ1c+dtAAAweqP4eXLfpk6rhw1Hwoo1MX3XdG4+9+Zxh8E6deDg4ezbc/a4wwAAYEKN5OfJ3d1WDxuNhBUAAABjcUdOyIm7t3Tezmc2nZDEL0BhkkhYAQAAMBYn7r5lRO10nxQD1pZF1wEAAADoFQkrAAAAAHpFwgoAAACAXrGGFQAAAOvblu1Jx+tY3ZET8sO7ru60jSQ59imdNwG9IGEFAADA+nbBzZ03cWKSfZ23kuy8YtcIWoHx80ggAAAAAL0iYQUAAABAr0hYAQAAANArElYAAAAA9IqEFQAAAAC94i2BAAAAMCGm7r0vO6/Y2Wkbdd9xuenfXNtpG7AYCSsAAACYEHtvn052H+q0ja4TYrAUHgkEAAAAoFfcYQUAAACTYsv2ZPeWbts4aXu39cMSSFgBAADApLjg5u7b8EggPSBhBazKaXuuyYGDhzttY9vWzZ3WDwAAQL9IWAGrcuDg4ezbc/a4wwAAAGAdseg6AAAAAL3iDisAAADgAY+79/7s7Hgdq6nvJHtfMoL1uJhY6zZhVVVHJ/nfk7wkyYlJ9iV5e5Lfaa21MYYGAAAAvXXrLf+p82U/uk6IMfnWbcIqyTuTvDTJZUmuT3JmkkuSHJfk18cYFyQX7UwO7e++nS1eRwsAAMDkWZcJq6o6JYNk1UWttVcPD7+7qt6X5HVVdVlr7Y7xRciGd2h/svvQuKMAAACAXlqXCaskLxzuL551/OIk5yR5XgZ3YG0IZ115Vqbvmu60jaljpjqtfz3asevqcYewJrZt3TzuEAAAgDW0bevmzv9/5dindFr9SJ31np2Zfli3bWzENb/Wa8Lq1CRfbq3dNuv49UnuT/L00Yc0PtN3TefmczfWP+xJ0PUz4QAAACtx3a4zOm9j5xW7Om9jVKYfls7/n3sjrvl11LgD6MhUkgOzD7bW7klyZ5JtI48IAAAAgCWp9fjCvKq6NYM7rH5kjnP7k/xNa+30Oc6dl+S84V//aZIvdBnnCB2f5GvjDoKR0ucbi/7eePT5xqPPNxb9vfHo841Hn288+vxB/7i1dsJihdbrI4GHkzxynnObhucforV2aZJLuwpqXKrqL1trp447DkZHn28s+nvj0ecbjz7fWPT3xqPPNx59vvHo8+Vbr48ETmfwWOB3qapHJHns8DwAAAAAPbReE1Y3JHlCVW2fdfwZGXzmG0YfEgAAAABLsV4TVu8d7s+fdfz8JPck+dBowxm7dfeYI4vS5xuL/t549PnGo883Fv298ejzjUefbzz6fJnW5aLrSVJVv5vkxUkuS3J9kjOTvCDJha213WMMDQAAAIAFrOeE1cOTvC6DpNWJSfYl+Z0kl7T1+qEBAAAA1oF1m7ACAAAAYDKt1zWs1rWqOrqq3lhVf1tVd1fV56vqFVVVS7z+jKq6rqq+VVVfrarfq6oTuo6blamqp1fVb1fVTVX1jar6u6r6RFU9Z4nXX15VbZ7tiV3Hz/JV1Y4F+uzdS6zjaVX1seG/mYNV9YGq+t6uY2dlFhmnrapev4rrjfMxq6pHV9XuqvpIVd0x7JfL5ym7qjl+WId5foyW2t+rnd+HdRj7PbCMPl/1/D6sxxw/Zsvo81XN70uowzgfgeV8X5vH19bR4w6AFXlnkpfmu9fnuiTJcUl+faELq+rZSfYmuSnJa5I8brh/RlU9o7V2uMO4WZnXJvnxJO9P8vYkj87gUdePV9W/b629c4n1nJvk/lnHvr5mUdKFq5JcOevYLYtdVFXfl+TPknwlyeuTbEpyQZI/r6ofaK19ea0DZdX+S5L/PsfxVyU5NclHl1iPcd5Pxyd5U5I7kvxlkn++QNkVz/GJeb4nltrfazW/J8b+uC1njCcrnN8Tc3yPLLXP12p+T4zzcVrO97V5fC211mwTtCU5JUlL8tZZx9+X5O4kJy5y/Y1J9id59IxjZw/rvGDcn882Z5/9SJJNs45tTvKFDCapoxe5/vJh/y5YztafLcmOYZ/9xgqv/2CSbyTZNuPYziTfSXLxuD+fbcn9+Kgk/5DkpiWUNc57vCV55JHxmMEvC1uSy+cot6o5fljWPD85/b2q+X1Y3tjvwbaMPl/V/D6swxzfg22pfT7PtUue34fljfPx9/eSvq/N42u/eSRw8rxwuL941vGLM/jifN58F1bVP0nytCS/21r75pHjrbWrk9ya5GfXNlTWQmvt0621u2cdO5zkT5I8JskTllhVVdX3VJVxP0GqanNVbV5G+Ucn+akkV7bWDhw53lq7OcknY5xPkucnOTbJFcu4xjjvodbat2eOxwWseI5PzPN9sdT+XsP5PTH2x2oZY/wBy53fh9eY43tiJX0+w0rm98Q4H5tlfF+bx9eYf+yT59QkX26t3Tbr+PUZ3CL69EWuTZLPznHuL5I8raoetvoQGZGpJPcl+fsllr8zyaEk36yq91fVkzqLjLXyqiTfSvKtqvpiVb18Cdc8NckjMv84f5z1DibGuRmM8d9fxjXG+WRbzRx/5PrEPD/plju/J8b+pFnJ/J6Y49eLlczviXHeR7O/r83ja8waVpNnKslDsvmttXuq6s4k2xa5NnNdn2Q6gwnw+CSefe+5qvpfkvx0kg+31u5apPjfJXlrkhuSfDvJDyV5ZZJnVdWpc3yhMn73J/lEkg8luS2DsfvSJG+vqh2ttdcucO1i4zwZfE/cvkax0oGq2pbBWgkfbUtbj8Q4Xx9WM8cfuT5z1RHz/ERY5vyeGPuTZjXze2KOn3grmN8T47yX5vm+No+vMQmrybM5g2ee53L38PxC1yaDL7q5rp1Zhp6qqu/J4Dnob2WwyOaCWmu7Zh16f1V9LMnHklyY5EVrHSOr01rbn+S73joyfHvQNUleXVXvaq3dOs/lxvn68K8zuAv68qUUNs7XjdXM8YnxP9GWO78nxv6kWeX8nhjj68Gy5vfEOO+jBb6vzeNrzCOBk+dwBs+/zmXT8PxC12ae6zfNKkMPDdc6+EiS703yvOEPPsvWWvt4kv+RwVsrmACtte8keUsG39s/vkBR43x9+KUMbi//yEorMM4n0mrm+MT4n1hrNb8nxv6kWcb8nhjj68Gq5/fEOB+nRb6vzeNrTMJq8kznwVsFH1BVj0jy2Dx4O/B812au64fH7knytdUGSDeGffzBJD+c5Gdaa9eussrbMrillMlx5JbvhfptsXE+sww9VFXPSPKUJH/YWpvrN2zLYZxPltXM8Yl5fiJ1ML8nxv6kWcr8npjjJ9oaz++JcT5yS/i+No+vMQmryXNDkidU1fZZx5+RQX/esMi1SfLMOc79YJLPDX/LQ89U1dFJ3pvkJ5L8UmvtT9ag2pOTfGUN6mF0Th7uF+q3m5Pcm7nH+TOH11rbot/OHe6X+/aguRjnk2U1c3xinp84Hc3vibE/aZYyvyfm+Em3lvN7YpyP1BK/r83ja0zCavK8d7g/f9bx8zPIuH4oSarqUVX1fVX1QNa9tfaFJJ9L8pKqOubI8ar6ySRPTvLHXQbOygxfXftfkzw3yctaa380T7mHD/v8xBnHHllVj5qj7DlJdia5uqOwWYWqOm6OY5uSvC6DN5F8bHjsIX3eWvtGkj9Ncs6sfwvfn+THkry3tdY6/gis0PA3cD+b5K9aa9fPcd44X9+WNMcn5vn1YKnz+7Cssb8OLHV+Hx43x68ji83vwzLGeU8t4/vaPL7GLLo+YVprN1bVezJYmPHYDF6ReWaSFyS5sLV25DbCH0zyyQwW4ts9o4oLknw8ybXDRR4fl+Q1Sf4qyTtH8iFYrrck+bkk1yY5XFW/OOv8x4dvGdmWQT9ekQcXXzwxyWer6oNJ/jqDZ56fmcGCj1/Kd//boD/+8/A3M9dl0E+Pz2DNgycnecOMZ+Xn6vNk8IPvZ5P8WVVdksFz8Bck+WqS3xzFB2DF/nkGt4y/eZ7zxvmEqqpXJNmaB39Z+NSqesPwzx9urd20jDk+Mc/32lL6O0uf3xNjv/eW2OdLnd8Tc3zvLbHPj1hsfk+M8z5b0ve1ebwDrTXbhG1JHp7kTUn2ZfAGgS9kkLWtGWVOT9KS7J7j+uck+UwGX3h3ZvCl+Phxfy7bvP39qWFfzredPiy3Y/j3y2dcu3XYv59P8o0MMvu3JnlbkseN+7PZ5u3zn8tgAvu7YZ8dHP79p2eVe0ifzzj3zzKY7L6Z5FAGz9ufPO7PZlu0769K8p0kU/OcN84ndBvO2fN9j79oRrlF5/hhOfN8j7el9PdS5/dhWWO/59sS+3xJ8/t8fT7jnDm+B9tSv9eHZRec3+frc+O8H9syv6/N42u41fA/CAAAAAD0gjWsAAAAAOgVCSsAAAAAekXCCgAAAIBekbACAAAAoFckrAAAAADoFQkrAAAAAHpFwgoAAACAXpGwAgAAAKBXJKwAAHqsqn6sqlpV/cy4Y1muqnpNVd1bVd837lgAgMkiYQUAMI9homix7fSqetESyz6wLbH9o5JclORzSa6cp8xxVbWrqj5VVV+pqnuq6htV9T+r6veq6l9UVa3iv8FvDmP+rSWUvWxY9peHh96R5CtJ3rLS9gGAjalaW9LPSwAAG86MxNKFCxS7PMnWJM+bdXxHknOT3DYs811aa7uX0P7PJ/lvSX6htfYHc5z/l0muGLa/L8m1Se5I8ogkT0ry7OG5K1trK7pDq6pOSnJrkq8meWJr7d55yh0zo+1trbU7h8d/NclvJTmttfbplcQAAGw8ElYAAPM4krBqrS37DqWqOj3JJ5Nc21o7fYXtX5fk+5M8obV2eNa5M5LsTXJfklcmeU9r7f5ZZTYl+cUkZ7bWXrCSGIb1fCzJTyT5V621D8xT5t8keXeSP2it/cKM41NJ9if5o9baL640BgBgY/FIIABADw3XffqRJB+eI1n1sCTvSnJ0kvNba++enaxKktba3a21dyf5+Xna+Lmq+mRV/X1V3V1Vf1VVb6iqR84qeulw/28XCPnIuUtnHmytTSf5v5OcU1Xfs8D1AAAPkLACAOin5wz3fz7HudOTPDnJl5K8Z7GKWmv3zT5WVb+b5A+SnJzkA0l+J8nXk/yHJP9XVR09o/hVGaxFdWZVbZ+jru9P8swkf91au3aOEK5L8sgkz1osVgCAZPBbOQAAFlBVu+c5dXdrbU9Hzf7ocP+Xc5w7bbi/trX2neVWXFUvSvKSJB/MYH2swzPO7U7ypiQvT3JxkrTW7q2qy5P86vC63bOqPHJ31WXzNPn/DPfPSvIny40XANh4rGEFADCPJbzN71Brbes8156eVaxhVVWfTvLDGSxgPj3r3DuS/Lskv9Va2zXHtbvnqPK3W2sHh+dvzGBtrBOOHJtx7cOSfDnJ37TWfnDG8ZOT/HUGd3WddOQRxOHjg9NJHp3BouxfnSOeZyb5iyR/3Fr72aX9FwAANjJ3WAEALGIli66vgccO938/x7kj8cyXUHvTHMcuT3Kwqh6V5JQkX0vyy1VzfrRvJ3nKzAOttVuq6lNJfizJWUk+Ojz1r5Icl0Ey6iHJqqGvD/fHz3MeAOC7SFgBAPTTkcf0Ns348xF3DPfb5rpwZoKtqv48Dz5CmCSPySDhdULmTmwt5NIMElYvzYMJq5cO9/M9Dpgkm4f72Z8DAGBOFl0HAOinrwz3j53j3HXD/elVtdyf5w4N9ze21mqhbY5rP5DBnVn/oqoeX1VPymAB+FuTXLNAm0c+w1cWKAMA8AAJKwCAfrppuP++Oc59KsktSf5Rkhcvp9LW2jeT/M8k/2tVHbfMa+9JckWShyc5N4O7qyrJZW3hhVGPfIb/dzntAQAbl4QVAEA/fWq4/6HZJ4ZvBnxZkvuSXFJVL57rTquqeniSR81R91uTPCLJe6rqIYvGV9VjquqfzRPXkUf//m2SFyW5N4P1sRZy5DN8cpFyAABJvCUQAGBeM94SeOECxT7UWnvInUNr8JbARyU5kOSW1toz5inz3AzueNqSZF+SazN4Y9+mJFNJnpPB43g3JXn2zDcCVtXvJPn3GSyIvjfJ/gwWTz8pybOS/F5r7WXztHvtsEySvL+1ds4Cn+OoYd3fbK3NdbcYAMBDWHQdAGBxCy1Ovi8dPOrWWvtWVV2ewZv8ntJa+6s5ylw1XEfqvCQ/meTsJFuT3J3k9iRXJ3lfkj9trd0/69qXV9VHM7hT6znD676eQXLpzUl+f4HwLs2DCatLF/koz8lgcfgLFikHAPAAd1gBAPRUVe1I8vkk/6W19qrxRrMyVfX+JM9O8qTW2qHFygMAJNawAgDordbaviRvS3JeVW0bczjLVlVPS/L8JLslqwCA5fBIIABAv/1GkruS7MhgTatJcmKSNyZ517gDAQAmi0cCAQAAAOgVjwQCAAAA0CsSVgAAAAD0ioQVAAAAAL0iYQUAAABAr0hYAQAAANArElYAAAAA9Mr/D+TCNSIxxQ/bAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "a_MC=plt.hist(triggered_true_MC/1000,bins=40,histtype='step', range=(0, max_true/1000), label='MC Triggered events')\n", + "a_NN=plt.hist(triggered_true_NN/1000,bins=40,histtype='step', range=(0, max_true/1000), label='NN Triggered events')\n", + "b=plt.hist(true_hist/1000,bins=40,histtype='step', range=(0, max_true/1000), label='ET from Tracking')\n", + "plt.tick_params(labelsize='xx-large')\n", + "plt.xlabel('ET (GeV)', fontsize=20)\n", + "plt.ylabel('dN/dET (a.u.)', fontsize=20)\n", + "plt.legend(fontsize='xx-large')\n", + "fig = plt.gcf()\n", + "fig.set_size_inches(20,10)\n", + "plt.savefig(PATH+'/eff_1.png',dpi=100)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(array([ 1., 12., 50., 90., 129., 159., 189., 210., 193., 215., 211.,\n", + " 194., 205., 193., 172., 151., 134., 127., 131., 111., 112., 87.,\n", + " 87., 64., 61., 58., 37., 35., 37., 26., 26., 21., 14.,\n", + " 14., 14., 6., 3., 0., 0., 1.]),\n", + " array([ 0. , 0.50833315, 1.01666631, 1.52499946, 2.03333262,\n", + " 2.54166577, 3.04999893, 3.55833208, 4.06666523, 4.57499839,\n", + " 5.08333154, 5.5916647 , 6.09999785, 6.60833101, 7.11666416,\n", + " 7.62499731, 8.13333047, 8.64166362, 9.14999678, 9.65832993,\n", + " 10.16666309, 10.67499624, 11.18332939, 11.69166255, 12.1999957 ,\n", + " 12.70832886, 13.21666201, 13.72499517, 14.23332832, 14.74166147,\n", + " 15.24999463, 15.75832778, 16.26666094, 16.77499409, 17.28332725,\n", + " 17.7916604 , 18.29999355, 18.80832671, 19.31665986, 19.82499302,\n", + " 20.33332617]),\n", + " )" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "a_MC\n", + "a_NN\n", + "b" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "eff_MC = np.zeros_like(b[0])\n", + "eff_NN = np.zeros_like(b[0])" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(len(b[0])):\n", + " if b[0][i]!=0:\n", + " eff_MC[i]=a_MC[0][i]/b[0][i]\n", + " eff_NN[i]=a_NN[0][i]/b[0][i]" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "eff_MC;" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABKcAAAJhCAYAAACU4dPuAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzs3X2cj2X+///HITKkwTKIUcNSUqp1tS7qEzZaXxV+i2W6MFQofpupttVqpa1tWbr6yPrStgxdsLKr2FIfVPp8tpRqd5XYLlxsohTZhE3m/P4xFx9jxsx7mHEyHvfbbW5v7+M8jvN8ne+ZsXnucRxniKIISZIkSZIkKQ6V4i5AkiRJkiRJJy7DKUmSJEmSJMXGcEqSJEmSJEmxMZySJEmSJElSbAynJEmSJEmSFBvDKUmSJEmSJMXGcEqSJEmSJEmxMZySJEmSJElSbAynJEmSJEmSFJvKcRdwLKhbt26UlpYWdxmSJEmSJEkVxptvvvl5FEUpJfUznALS0tJYtWpV3GVIkiRJkiRVGCGEjYn0c1mfJEmSJEmSYmM4JUmSJEmSpNgYTkmSJEmSJCk2hlOSJEmSJEmKjeGUJEmSJEmSYmM4JUmSJEmSpNgYTkmSJEmSJCk2leMu4Hixd+9etm3bxt69e/n222/jLkfSCa5KlSrUq1eP5OTkuEuRJEmSpCNiOJWAnTt38umnn5KSkkKDBg2oXLkyIYS4y5J0goqiiD179rB582YAAypJkiRJxzWX9SXg888/JzU1ldq1a1OlShWDKUmxCiFQvXp1GjVqxGeffRZ3OZIkSZJ0RAynEvDNN99QrVq1uMuQpAKqVavGvn374i5DkiRJko6I4VSCnC0l6Vjj30uSJEmSKgLDKUmSJEmSJMXGcEqSJEmSJEmxMZzSYVm5ciWdOnWiRo0ahBB46aWXAFi3bh09evSgVq1ahBCYNWsWL730UoE+pZGWlkZGRkaZ1n4s27BhQ/7nFpdZs2YRQmDDhg2x1XAoidaW9znecccdR6cwSZIkSdJhM5wSQH6AdKivq666Kr/vvn37GDBgAJ988gmTJ09mzpw5nH322QBkZGTw5ptvMn78eObMmcN//Md/xHVLikF2djbjx49n4cKFcZdy1OQFYSEEpk2bVuh43u/W7373uyMaI0mSJEkVVeW4CzjepY35c9wl5NswodcRn+Paa6+lS5cuhdqbNm2a/+cPP/yQTZs2MWnSJEaMGJHfvnfvXl577TVGjhzJ6NGj89vT0tLYs2cPJ598cqnrWbduHZUqnTgZ6hlnnMGePXuoUqVK3KUcluzsbO666y4GDx5Mnz594i7nqPvVr37FkCFDSEpKKtcxkiRJklSRGE6pgA4dOhSYJVWUzz77DIBatWol1F6pUqXD/od31apVD2vc8SqEYEhxnGrdujVvvfUW06ZNIzMzs9zGSJIkSVJFc+JMSVGZ6NKlCxdffDEA119/PSGE/H2hzjjjDCBnJkjekiXgkHtOff7559x00000adKEqlWrctppp9G3b1/efffd/D6H2nPqkUceoXXr1lSrVo1atWrRu3dv1qxZU6DP+PHjCSGwevVqMjMzqVevHtWrV6dnz55s3Lix0DlLqqdDhw6ceeaZRX4uGRkZVKtWjS+//LLYz2/69Omcf/751KhRg5o1a9KqVSvuvPPO/ONF7TmVt8/SkiVLGDduHKmpqZxyyilceumlbNq0CYCpU6fSvHlzkpKSaNeuHW+99VaRn8XBEt3j6pVXXiE9PZ0mTZqQlJRE3bp16devH//4xz8KnCtvxldWVlb+z8CBM/G+/fZbJk6cSMuWLfPPc9VVV/Hxxx8XuuYLL7xAmzZtSEpKIi0tjUmTJhFFUbF1FmXWrFm0aNGCpKQkWrZsyZNPPpl/bP/+/aSmptKjR48ix3bp0oXU1FSys7NLvE7Pnj3p0KEDEyZM4Ouvv06otsMZI0mSJEkVTewzp0IINYBbgTZAW6ABkBVFUUYpznEB8BugI7AfWA7cGkXRR2VecAW3a9cuPv/880Ltp556KlWrVmXs2LF07tyZe++9N38JYI0aNahfvz4XXHABmZmZ9O7dm379+hV7nW3btvH973+fTZs2kZGRQZs2bfjyyy958cUXefPNNznnnHMOOXb06NFMmTKFgQMHct1117Fz506mTp1Kp06dWLVqFc2aNSvQf+jQodSuXZtx48axZcsW7r//fq666ipeeeWVUtUzdOhQhg8fzquvvkrHjh3zx+7evZsFCxbQp0+fQrPGDjRz5kxGjBhB796985dDrlu3jpdffrnYzyrP2LFjqVq1KrfddhuffPIJ9913H7179yY9PZ3Zs2dzww03sHv3biZOnEjfvn354IMPymx54B/+8Ac++eQTBg8eTKNGjdi4cSMzZszgoosu4p133iElJYWUlBSysrIYPHgwF110EcOGDQOgfv36AERRRP/+/Xn22WfJyMjgpptuYvPmzTz88MOsWLGCt99+mzp16gDw8ssv06tXL1JTU7nzzjsJIfDb3/622M+3KIsXL2bz5s2MHDmS5ORkZs2aRXp6OiEEBg4cyEknncQ111zDxIkT+fjjj0lNTc0fu3HjRlasWMHPfvazhJeW3n333XTv3p0pU6YwZsyYchsjSZIkSRVJ7OEUUBe4E9gCrAIuK83gEEILYAXwGTAWSAIygf8OIXwviqJPy7bcii0zM7PI5UUzZ84kIyOD7t27U6VKFe69995CSwBPO+00MjMzOffcc0tcGnj77bezfv16Fi9eTK9evQq0Fzc7ZuXKlTz00ENMmTKFUaNG5bdfffXVtGzZkvHjx/PYY48VGJOamsqf/vSn/Pd16tThlltuYc2aNbRs2TLhegYOHMjo0aPJysoqEE798Y9/ZNeuXSU+VfCZZ56hZcuWh71ZeKVKlVixYgWVK+f82u7fv5/JkyezY8cO3n33XU455RQAateuzahRo1iyZAmXX375YV3rYBMmTMg/f55rrrmGVq1a8eijjzJmzBhOOeUU0tPTGTx4ME2bNi30MzB//nwWLlzIokWLuOyy//0179u3L+3ateOBBx7gnnvuAeDWW2+levXqvPrqqzRo0ADImZ121llnlaru1atX87e//Y1zzz0XyJnt16pVK2699Vb69etH5cqVGTJkCL/+9a957LHHCoRDc+bMIYqiUj0t8pJLLqFLly5MmjSJG2+8keTk5HIZI0mSJEkVybGwrG8LkBpFUUOg72GM/zUQgIujKPrPKIp+A/QA6gM/L7syTww333wz//Vf/1Xo69JLLy2za2RnZ/PUU09x4YUXFgiC8hS1/CzPvHnzOPnkk+nbty+ff/55/lfecrZly5YVGnPDDTcUeN+1a1cgZ2P30tSTnJzMj370I+bNm8fevXvzj2dlZdGoUSO6d+9e7H3XrFmTzZs389prrxXb71Cuv/76/GAKoHPnzgBceeWVBYKjvPYPPvjgsK5TlAPPv2vXLr744gu+853vcNZZZ/HGG28kdI65c+eSmppKhw4dCnzvGjduTNOmTfO/d59++imrVq1i4MCB+cEUQIMGDbjyyitLVfcll1ySH0xBzgzAYcOGsXnz5vylj82bN+fCCy8kKyurwNjZs2fToUOHUgdid999N9u3b+eBBx4o1zGSJEmSVFHEHk5FUfTvKIo2H87Y3CWB/wd46sBzRFG0GngRGFg2VZ44zj77bC655JJCX6eddlqZXWPbtm3s3LmT8847r9Rj165dyzfffENqamr+UrK8r+XLl+dvyn6gvL2w8tSuXRuA7du3l7qeIUOG8OWXX/LMM88AsHnzZpYvX87VV19d4tKvMWPGkJycTMeOHWnatClDhw7lmWeeSXgfpYPvI2+J2+mnn15ke979lYUtW7YwePBgvvOd73DqqadSt25dUlJSWL16NTt27EjoHGvXruXjjz8u9H1LSUnh/fffz//erV+/HqDIUKi0QVFx58i7DuR8X9euXcvrr78OwKuvvsr7779fqllTeS688EJ69OjBAw88kPD34HDGSJIkSVJFcSws6zsS5wEnAyuLOPYa8IMQQmoURYV3W1bsipshdSjZ2dlUr16dp59+OuExJ510UpHtB4dCidTTtWtX0tLSyMrKYsCAATz22GNkZ2cnFGK0aNGCdevW8fzzz/PCCy/w/PPPM3PmTHr06MGzzz57yDpLuo9E7u9Q97Z///4S687OzqZHjx5s3ryZzMxMzjnnHGrUqEGlSpUYPXp0QpuF552nadOmTJ8+vcjj1apVK1D34fx8HCzRcwwYMICf/OQnZGVl0b59e2bPnk1SUhI//vGPD+u699xzD+3bt2fSpEkJzzo8nDGSJEmSVBEc7+FUw9zXomZefZL72ggoFE6FEIYBw6DwzBOVr5SUFGrWrMnf//73Uo9t1qwZzz//PK1atcrfaPto1hNCICMjg7vvvputW7eWeulXtWrV6NOnD3369CGKIm6//XYmTpzIsmXLDvnEuLKQN1tsx44d+X8G+Oijkp8ZsHr1at555538fccOtH37durWrZv/vrgwqFmzZqxYsYIuXboUWJ54sKZNmwI5M60Otm7duhLrPVBx52jSpEl+W40aNejfvz9z585lwoQJzJs3r8QN7ovTrl07rrjiCqZMmcL3vve9chsjVTRpY/5cpufbMKHwUm1Jkiqc8TUT7LczoW7+73Hp+ZkdudiX9R2harmv/y7i2N6D+hQQRdGMKIraRlHUNiUlpVyKU9EqVapEv379eOWVV3juuecKHS9umdugQYMAGDduXJHHt23bVu71ZGRkkJ2dTWZmJmvWrEl46dcXX3xR4H0IgQsuuAAo2yV4RWnevDkAy5cvz2+LooiHHnqoxLF5M7MOniGVlZXFli1bCvVNSkoqcqnfoEGD+Oqrr/jNb35T6FgURflPiaxfvz5t2rRh7ty5bN26Nb/P1q1befzxx0us90BLly7lnXfeyX+/a9cuZsyYQcOGDWndunWBvkOHDmX79u0MGzaMHTt2HNaSvgP98pe/zH96YnmOkSRJkqTj3fE+c2pP7mvVIo4lHdRHCXjttddISkoq1F6nTh169uxZZte59957Wbp0KVdccQVDhgyhdevWfPXVVyxfvpxBgwZxzTXXFDmuc+fO3Hzzzdx///2sWbOGyy+/nFq1arFx40aee+45zjvvPGbNmlWu9Zxxxhl069aNuXPnlmrpV/fu3UlJSaFz5840atSIf/7zn0ydOpWUlJQSN1M/Ut27d+e73/0u1113He+99x41a9ZkwYIF7N69u8SxLVq0oEWLFtxyyy1s3LiRhg0bsnLlShYuXJg/y+lA7dq1Y+nSpUyePJnU1FTq1atHt27dSE9PZ+HChYwdO5a//OUvdOvWjWrVqrF+/XoWLlxIeno648ePB2DSpEl0796djh07MmLECKIoYvr06TRt2pS//vWvCd93q1at6Nq1K6NGjSI5OZmZM2eyceNGHn/88UKzty666CKaN2/O3LlzE9rgviTnn38+/fr1Y/78+eU6RpIkSZKOd8d7OJW3dK9hEccaHtRHCXj00Ud59NFHC7W3adOmTMOpevXqsXLlSu666y4WLVrErFmzqFu3Lh07dqRt27bFjr3vvvto27YtU6dO5e677yY7O5uGDRty4YUXMnz48KNSz9ChQ1m2bFmpln7dcMMNPPnkkzz88MPs3LmT+vXr06tXL37xi19Qp06dw6o7UZUrV2bhwoWMGjWKe+65h+TkZAYNGsTw4cM555xzShy7ePFiMjMzmTJlCvv27aNDhw4sX76c0aNHF+o/bdo0brzxRu688052797NxRdfTLdu3QghMG/ePKZNm8bvf/977rjjDk466SQaN25Mjx49GDBgQP45unbtyuLFi/n5z3/OuHHjaNCgASNHjiQlJYWhQ4cmfN+XXXYZzZo1Y8KECaxfv56mTZsyZ84c0tPTi+yfkZHB2LFjE9rgPhF33XUXCxYsSHhfrsMdI0mSJEnHs5Dok8KOhhBCZWAfkBVFUUYC/U8FvgAei6Jo6EHH/oucDdMbRCXcZNu2baNVq1Yd8vh7773H2WefXfIN6IQxf/58BgwYwJIlS9y8ugKZNGkSt912G2vXri31kwHj4t9POhqO1j4K7tcgSdJhcM+p2PmZHVoI4c0oioqfgcJxNHMqhFAF+C6wM4qiLQBRFH0VQngW6BdCGJvXHkI4F+gKTCspmJIOx/Tp02ncuHG5L8fT0ZOdnc0jjzxCp06djptgStKxy/9IlSRJStwxEU6FEEYBtfjfDdrPCyHckfvnZ6Io+js5T917D8gCMg4Y/nNgJbAihDCFnP2nMoFtwL3lX71OFF9//TWLFi1i5cqVLFu2jAcffLBMln4pXp999hlLly5lyZIlvP/++0yaNCnuklRBGE6oovBnWZIklbdjIpwCbgXOOOD993K/AD4G/n6ogVEUrQkhXAxMJCeM2g8sB36aN5NKKgvbtm1j0KBBJCcnM3z4cEaOHBl3SSoDa9as4corr6ROnTqMGzeO3r17x12SJEmSJJ1QjolwKoqitAT6bADCIY69Bbi+SuUqLS0NV4lWPF26dPH7KkknCGeBSZJ0bHJNkiRJkiRJkmJzTMyckiRJh8eZIJIkSTreOXNKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQb95ySJEk6DrnfmCRJqigMpyRJkiTFxqBVkmQ4JUmSThj+I1iSJOnY455TOiwrV66kU6dO1KhRgxACL730EgDr1q2jR48e1KpVixACs2bN4qWXXirQpzTS0tLIyMgo09qPZRs2bMj/3OIya9YsQghs2LAhthoOJdHa8j7HO+644+gUJkmSJEk6bM6cOlLja8Zdwf8av/Owh7700kt07dr1kMevvPJKHnvsMQD27dvHgAEDCCEwefJkatSowdlnnw1ARkYG//jHPxg/fjx169alU6dObNq06bDr0vElOzubX/7yl1xwwQX06dMn7nKOig0bNtCkSRMAbrjhBn77298W6rNgwQL69esHwMyZMwsFrl9++SUPPfQQCxcu5MMPP+Sbb74hNTWVrl27cuONN/K9732v3O9DknR8cRagJKkiMZxSAddeey1dunQp1N60adP8P3/44Yds2rSJSZMmMWLEiPz2vXv38tprrzFy5EhGjx6d356WlsaePXs4+eSTS13PunXrqFTpxJngd8YZZ7Bnzx6qVKkSdymHJTs7m7vuuovBgwefMOFUnqSkJObNm8eDDz5Y6Gd99uzZJCUlsXfv3kLj3n33XXr27MmWLVvo378/1157LUlJSbz//vvMnz+fRx99lE2bNpGamnq0buWE4D/qJEmSpGOH4ZQK6NChA1dddVWxfT777DMAatWqlVB7pUqVSEpKOqx6qlateljjjlchhMP+rBSvyy+/nPnz5/Pss88WCOa++OILnnvuOfr27csf/vCHAmN27drFFVdcwddff83KlStp3bp1geO/+tWvmDRpElEUHZV7kCRJkqQ4nDhTUlQmunTpwsUXXwzA9ddfTwghf1+oM844A8j5B3UIgRACwCH3nPr888+56aabaNKkCVWrVuW0006jb9++vPvuu/l9DrXn1COPPELr1q2pVq0atWrVonfv3qxZs6ZAn/HjxxNCYPXq1WRmZlKvXj2qV69Oz5492bhxY6FzllRPhw4dOPPMM4v8XDIyMqhWrRpffvllsZ/f9OnTOf/886lRowY1a9akVatW3HnnnfnHi9pzKm+fpSVLljBu3DhSU1M55ZRTuPTSS/OXTE6dOpXmzZuTlJREu3bteOutt4r8LA6W6B5Xr7zyCunp6TRp0oSkpCTq1q1Lv379+Mc//lHgXHkzvrKysvJ/Bg6cifftt98yceJEWrZsmX+eq666io8//rjQNV944QXatGlDUlISaWlphx3SzJo1ixYtWpCUlETLli158skn84/t37+f1NRUevToUeTYLl26kJqaSnZ2donXOfPMM2nfvj1z5swp0P7kk09SqVIl+vfvX2jMjBkz+Oijj5g8eXKhYAqgcuXK3H777TRu3LjE60uSJEnS8cqZUypg165dfP7554XaTz31VKpWrcrYsWPp3Lkz9957b/4SwBo1alC/fn0uuOACMjMz6d27d/7+Ooeybds2vv/977Np0yYyMjJo06YNX375JS+++CJvvvkm55xzziHHjh49milTpjBw4ECuu+46du7cydSpU+nUqROrVq2iWbNmBfoPHTqU2rVrM27cOLZs2cL999/PVVddxSuvvFKqeoYOHcrw4cN59dVX6dixY/7Y3bt3s2DBAvr06VNo1tiBZs6cyYgRI+jdu3f+csh169bx8ssvF/tZ5Rk7dixVq1bltttu45NPPuG+++6jd+/epKenM3v2bG644QZ2797NxIkT6du3Lx988EGZLQ/8wx/+wCeffMLgwYNp1KgRGzduZMaMGVx00UW88847pKSkkJKSQlZWFoMHD+aiiy5i2LBhANSvXx+AKIro378/zz77LBkZGdx0001s3ryZhx9+mBUrVvD2229Tp04dAF5++WV69epFamoqd955JyEEfvvb3xb7+RZl8eLFbN68mZEjR5KcnMysWbNIT08nhMDAgQM56aSTuOaaa5g4cSIff/xxgaVzGzduZMWKFfzsZz9LeGnp1VdfzS233MKOHTuoXbs2kLOk7/LLLy+y9j/96U9UrVqV9PT0Ut2XJEmSJFUkhlMqIDMzk8zMzELteZs4d+/enSpVqnDvvfcWWgJ42mmnkZmZybnnnlvi0sDbb7+d9evXs3jxYnr16lWgvbjZMStXruShhx5iypQpjBo1Kr/96quvpmXLlowfPz5/4/Y8qamp/OlPf8p/X6dOHW655RbWrFlDy5YtE65n4MCBjB49mqysrALh1B//+Ed27dpV4lMFn3nmGVq2bMnChQuL7XcolSpVYsWKFVSunPNru3//fiZPnsyOHTt49913OeWUUwCoXbs2o0aNYsmSJVx++eWHda2DTZgwIf/8ea655hpatWrFo48+ypgxYzjllFNIT09n8ODBNG3atNDPwPz581m4cCGLFi3isssuy2/v27cv7dq144EHHuCee+4B4NZbb6V69eq8+uqrNGjQAMiZnXbWWWeVqu7Vq1fzt7/9jXPPPRfIme3XqlUrbr31Vvr160flypUZMmQIv/71r3nssccYM2ZM/tg5c+YQRVGpnhY5cOBAbr75ZubNm8eIESNYt24db7zxBr/4xS+K7L9mzRrOOuusE275qiRJkiQdyHBKBdx888307NmzUHtxM5lKKzs7m6eeeooLL7ywQBCUp6jlZ3nmzZvHySefTN++fQvM8MpbzrZs2bJCY2644YYC7/OeSvjhhx/SsmXLhOtJTk7mRz/6Uf6m13l7Q2VlZdGoUSO6d+9e7H3XrFmTzZs389prr9GhQ4di+xbl+uuvzw+mADp37szkyZO58sorCwRHnTt3BuCDDz4o9TUO5cDz79q1i3//+9985zvf4ayzzuKNN95I6Bxz584lNTWVDh06FPjeNW7cmKZNm7Js2TLuuecePv30U1atWsWwYcPygymABg0acOWVVzJt2rSE677kkkvygynImQE4bNgwxo4dy1tvvUX79u1p3rw5F154IVlZWQXCqdmzZ9OhQ4dSBWJ169alZ8+ezJkzhxEjRjB79mzq1q3LD3/4wyJnyP3rX/8iOTk54fNLUkXlQwokSTqxGU6pgLPPPptLLrmkXK+xbds2du7cyXnnnVfqsWvXruWbb7455JPLilp+lbcXVp685Vbbt28vdT1Dhgzhscce45lnnmHAgAFs3ryZ5cuXc9ttt5W49GvMmDEsX76cjh070qRJE7p06UKfPn24/PLLiw3kDnUfecvETj/99CLb8+6vLGzZsoUxY8awaNEiduzYUeBY3bp1EzrH2rVr+fjjj0lJSSny+P79+wFYv349QJGhUGlnThV3jvXr19O+fXsg5/t67bXX8vrrr9O+fXteffVV3n//fW655ZZSXQ9yZvH179+fDz74gMcff5yBAwcecnllcnIy//rXv0p9DUmSJEmqSAynFJtEApmDZWdnU716dZ5++umEx5x00klFth+8fDCRerp27UpaWhpZWVkMGDCAxx57jOzs7ISWfrVo0YJ169bx/PPP88ILL/D8888zc+ZMevTowbPPPnvIOku6j0Tu71D3lhcIFSc7O5sePXqwefNmMjMzOeecc6hRowaVKlVi9OjRCW0Wnneepk2bMn369CKPV6tWrUDdh/PzcbBEzzFgwAB+8pOfkJWVRfv27Zk9ezZJSUn8+Mc/LvU18/aXGj58OBs3buTqq68+ZN+WLVvyxhtvsHfvXp/SKEmSJOmEZTiloy4lJYWaNWvy97//vdRjmzVrxvPPP0+rVq3yN9o+mvWEEMjIyODuu+9m69atpV76Va1aNfr06UOfPn2Ioojbb7+diRMnsmzZskM+Ma4s5M0WO3CjboCPPvqoxLGrV6/mnXfeyd937EDbt28vMHOquDCoWbNmrFixgi5duhRYnniwpk2bAjkzrQ62bt26Eus9UHHnaNKkSX5bjRo16N+/P3PnzmXChAnMmzevxA3uD6Vq1aoMGDCAGTNmcNZZZ+XPzipKnz59+O///m+eeOIJhg4dWuprSZIkSVJFkNgjqKQyVKlSJfr168crr7zCc889V+h4cRuiDxo0CIBx48YVeXzbtm3lXk9GRgbZ2dlkZmayZs2ahDfM/uKLLwq8DyFwwQUXAGW7BK8ozZs3B2D58uX5bVEU8dBDD5U4Nm9m1sEzpLKystiyZUuhvklJSYWW/kHO9+6rr77iN7/5TaFjURTl70NVv3592rRpw9y5c9m6dWt+n61bt/L444+XWO+Bli5dyjvvvJP/fteuXcyYMYOGDRvSunXrAn2HDh3K9u3bGTZsGDt27CjVRugHGz16NHfeeScPPPBAsf2GDRtGWloaP/3pT3n77bcLHf/222/znyQoSZIkSRWVM6dUwGuvvVbk8qI6deoUuVH64br33ntZunQpV1xxBUOGDKF169Z89dVXLF++nEGDBnHNNdcUOa5z587cfPPN3H///axZsybA0sNmAAAgAElEQVR/CdXGjRt57rnnOO+885g1a1a51nPGGWfQrVs35s6dW6qlX927dyclJYXOnTvTqFEj/vnPfzJ16lRSUlJK3Ez9SHXv3p3vfve7XHfddbz33nvUrFmTBQsWsHv37hLHtmjRghYtWnDLLbewceNGGjZsyMqVK1m4cGH+LKcDtWvXjqVLlzJ58mRSU1OpV68e3bp1Iz09nYULFzJ27Fj+8pe/0K1bN6pVq8b69etZuHAh6enpjB8/HoBJkybRvXt3OnbsyIgRI4iiiOnTp9O0aVP++te/JnzfrVq1omvXrowaNYrk5GRmzpzJxo0befzxxwvN3rroooto3rw5c+fOTWiD++KcffbZ+fdSnFNPPZVnnnmGnj178v3vf5/+/fvTqVMnkpKS+OCDD3jqqaf46KOPSnz6pSRJkiQdzwynjtT4nXFXUKYeffRRHn300ULtbdq0KdNwql69eqxcuZK77rqLRYsWMWvWLOrWrUvHjh1p27ZtsWPvu+8+2rZty9SpU7n77rvJzs6mYcOGXHjhhQwfPvyo1DN06FCWLVtWqqVfN9xwA08++SQPP/wwO3fupH79+vTq1Ytf/OIX1KlT57DqTlTlypVZuHAho0aN4p577iE5OZlBgwYxfPjwEp/EWLlyZRYvXkxmZiZTpkxh3759dOjQgeXLlzN69OhC/adNm8aNN97InXfeye7du7n44ovp1q0bIQTmzZvHtGnT+P3vf88dd9zBSSedROPGjenRowcDBgzIP0fXrl1ZvHgxP//5zxk3bhwNGjRg5MiRpKSklGr522WXXUazZs2YMGEC69evp2nTpsyZM4f09PQi+2dkZDB27FiuvvrqEje4LyutWrVi9erVPPjggzz99NM8/fTT7Nu3j8aNG/ODH/yABQsW0KhRo6NSiyRJkiTFIRS3hOpE0bZt22jVqlWHPP7ee+9x9tlnH8WKdKybP38+AwYMYMmSJVx66aVxl6MyMmnSJG677TbWrl1b6icDxsW/nw7P0Xhs/dG4xtG6jvdSumscret4L6W7xtG8ztHgvRxanPcixWJ8zQT7JTaxwt/J0vMzO7QQwptRFBU/AwX3nJIOy/Tp02ncuHG5L8fT0ZOdnc0jjzxCp06djptgSpIkSZIqApf1SQn6+uuvWbRoEStXrmTZsmU8+OCDR23pl8rPZ599xtKlS1myZAnvv/8+kyZNirukE5r/r5MkSZJ04jGckhK0bds2Bg0aRHJyMsOHD2fkyJFxl6QysGbNGq688krq1KnDuHHj6N27d9wlSZKOY4bskiSVnuGUlKC0tDTco63i6dKli99XSZIkSYqRa5IkSZIkSZIUG2dOSZIkSZJ0BFzSKx0ZZ04lyGU/ko41/r0kSZIkqSIwnErAySefzJ49e+IuQ5IK2LNnD1WqVIm7DEmSJEk6IoZTCahbty4ff/wx27dvZ9++fc5WkBSrKIrYvXs3mzdvpl69enGXI0mSJElHxD2nElCzZk2qVq3Ktm3b+OKLL/j222/jLknSCa5KlSrUr1+f5OTkuEuRJEmSpCNiOJWgpKQkGjduHHcZkiRJkkrJzaol6djmsj5JkiRJkiTFxnBKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQbwylJkiRJkiTFxnBKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQbwylJkiRJkiTFxnBKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQbwylJkiRJkiTFpnLcBUiSjg9pY/5cpufbMKFXmZ5PklS2/HtfknS0OHNKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQbwylJkiRJkiTFxnBKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQbwylJkiRJkiTFxnBKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQbwylJkiRJkiTFxnBKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQbwylJkiRJkiTFxnBKkiRJkiRJsakcdwGSJEmSdKANSekJ9Uvb+0Q5V3LiShvz5zI934YJvcr0fEdsfM0E++0s3zokAc6ckiRJkiRJUowMpyRJkiRJkhQbwylJkiRJkiTFxnBKkiRJkiRJsTGckiRJkiRJUmx8Wp8kSZIkHScq/FP0JJ2QDKck6Tjnf6RKkiRJOp65rE+SJEmSJEmxMZySJEmSJElSbAynJEmSJEmSFBvDKUmSJEmSJMXGDdElSZIkSToOFPUgnA1JRzDWB+HoGOHMKUmSJEmSJMXGcEqSJEmSJEmxMZySJEmSJElSbAynJEmSJEmSFBvDKUmSJEmSJMXGcEqSJEmSJEmxMZySJEmSJElSbAynJEmSJEmSFBvDKUmSJEmSJMXGcEqSJEmSJEmxMZySJEmSJElSbCrHXUAIoTJwOzAUOA3YADwMTI2iKEpg7A3AdcB3gd3AO8BvoihaUo5lS5IkSceUDUnpCfVL2/tEOVciSVLpxB5OAdPICZceAV4HegBTgO8Av0xw7Dzgt0By7vvnQgj9oyh6qryKliRJkqQTiQGopPISazgVQjifnDDpgSiKbs5t/l0IYT7w8xDCI1EUbTnE2FOBIcCfoigaeED7LGBz7jHDKUmSJElHRdqYP5fp+TZM6FWm55OkY1Xce079OPf1oYPaHwKqAn2KGXsKcBLwyUHtXwB7ga/LokBJkiRJkiSVn7iX9bUFPo2iaONB7a8D2UCbQw2MomhrCGENMCSE8DrwEnAq8FNyQrf7yqViSZIkqRRcCiVJUvHiDqcakrMEr4Aoir4JIXwBNCphfD/gCSDrgLatwCVRFK0ssyolSZIkSZJULuJe1lcN+Pchju3NPV6cXeQ8ne9h4Efk7DP1CTkboncobmAIYVgIYVUIYdW2bdtKV7UkSZIkSZLKRNzh1B5y9pYqSlLu8SLlboj+F+DjKIr+/yiK/hhF0SzgImAnML24C0dRNCOKorZRFLVNSUk5rOIlSZIkSZJ0ZOIOpz4hZ2lfASGEk4E6FN7s/ED9gFTgTwc2RlG0G3gOOC+EULPsSpUkSZIkSVJZizucehNoEEI4/aD2duTU9mYxYxvkvp5UxLG8vbSqHFl5kiRJkiRJKk9xh1N/yH39yUHtPwG+ARYChBCqhxBahBDqHtBnbe7rVQcODCHUBi4DNkZR9HnZlyxJkiRJkqSyEuvT+qIoejuE8Hvg5tw9pF4HegADgLuiKMpb1tceeBG4Cxif27YI+CtwYwjhNGApkAwMI2dW1dVH6z4kSZIkSZJ0eGINp3KNADaR86S9DGADcBMwpbhBURR9G0L4D+BWcp7U1wOIgLeB0VEUPVN+JUuSJEmSJKksxB5ORVG0j5wZUXcV0+clIBTR/hVwZ+6XJEmSJOk4kTbmz4XaNiSlJzZ27xOFx07odcQ1SYpH3HtOSZIkSZIk6QRmOCVJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJio3hlCRJkiRJkmJjOCVJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJik3luAuQJEmSpIpqQ1J6Qv3S9j5RzpVI0rHLmVOSJEmSJEmKjeGUJEmSJEmSYmM4JUmSJEmSpNgYTkmSJEmSJCk2hlOSJEmSJEmKjeGUJEmSJEmSYmM4JUmSJEmSpNgYTkmSJEmSJCk2hlOSJEmSJEmKjeGUJEmSJEmSYmM4JUmSJEmSpNgYTkmSJEmSJCk2hlOSJEmSJEmKjeGUJEmSJEmSYmM4JUmSJEmSpNgYTkmSJEmSJCk2hlOSJEmSJEmKjeGUJEmSJEmSYmM4JUmSJEmSpNgYTkmSJEmSJCk2hlOSJEmSJEmKjeGUJEmSJEmSYlM57gIkSZKkg21ISk+oX9reJ8q5EkmSVN6cOSVJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJio3hlCRJkiRJkmJjOCVJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJio3hlCRJkiRJkmJjOCVJkiRJkqTYGE5JkiRJkiQpNpXjLkCSJEnS8WFDUnpC/dL2PlHOlRy5inQvknS8M5ySJElSqfiPekmSVJZc1idJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJio17TkmSJEnSccx94CQd75w5JUmSJEmSpNgYTkmSJEmSJCk2hlOSJEmSJEmKjeGUJEmSJEmSYmM4JUmSJEmSpNgYTkmSJEmSJCk2leMuQJIqsrQxfy7T822Y0KtMzydJkiRJcXPmlCRJkiRJkmJjOCVJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJio3hlCRJkiRJkmJjOCVJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJio3hlCRJkiRJkmJjOCVJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJio3hlCRJkiRJkmJTOe4CJEmSVDY2JKUn1C9t7xPlXIkkSVLinDklSZIkSZKk2BhOSZIkSZIkKTaGU5IkSZIkSYqN4ZQkSZIkSZJi44bokiRJkqQKK23Mnwu1bUg6grETeiU01odUSIlz5pQkSZIkSZJiYzglSZIkSZKk2BhOSZIkSZIkKTaGU5IkSZIkSYqN4ZQkSZIkSZJiYzglSZIkSZKk2BhOSZIkSZIkKTaGU5IkSZIkSYpN5bgLkCRJkuKyISk9oX5pe58o50okSTpxOXNKkiRJkiRJsTGckiRJkiRJUmwMpyRJkiRJkhQbwylJkiRJkiTFxnBKkiRJkiRJsfFpfZIk6Zjlk9QkSZIqPmdOSZIkSZIkKTaxh1MhhMohhF+EENaHEPaGENaGEEaFEEKC40/K7f92CGF3CGFHCOF/QgiXlnftkiRJkiRJOjLHwrK+acB1wCPA60APYArwHeCXxQ0MIVQCngJ6AlnAVKA60BJoXH4lS5IkSZIkqSzEGk6FEM4nJ5h6IIqim3ObfxdCmA/8PITwSBRFW4o5xSjgMqBbFEWvlHO5kiRJkiRJKmNxL+v7ce7rQwe1PwRUBfocamDurKlbgKejKHolhFAphFCjfMqUJEmSJElSeSj1zKkQwqlAZ+B0oC6wB/gM+GsURe+W8nRtgU+jKNp4UPvrQDbQppixLXJrmBFCmAYMBqqFEDYB90RR9Egpa5EkSZIkSdJRllA4FUKoBgwCrgXa878zrvI2LY9y+30OLACmRVG0OoFTNwQ2H9wYRdE3IYQvgEbFjD0z93U0sAu4CfgKGEZOYHVSFEX/t5h7Gpbbl9NPPz2BUiVJkiRJklTWig2nQgiVgZ8AY4HawF7gNeANYCuwHagG1CFnJlMHYAQwPISwFLgliqJ3irlENeBfhzi2N/f4oeQt4asJtIuiaENuzfOBd4Bf5u5Ztb+owVEUzQBmALRt2zYq5jqSJEmSJEkqJyXNnFoLNAGWkPM0vKejKPp3cQNCCGcCGcA1wNshhGujKJp9iO57yNlbqihJuccPJe/Y/+QFUwBRFO0PIcwFxpPz1L5EZnBJkiRJkiQpBiWFU2uAH0VR9LdETxhF0T/IedLeeOBGip/99AnQ6uDGEMLJ5MzG+qSEsZAzg+tgn+a+1i6pXkmSdGLbkJSeUL+0vU8cF9eRJEk63hQbTkVRdMXhnjiKom+AB0vo9ibQPYRwehRFmw5ob0fOvlZvFjN2NfBvILWIY3lt2xIsV5IkSZIkSTGoVHKXcvWH3NefHNT+E+AbYCFACKF6CKFFCKFuXocoinYBi4BOIYRz89pzN2+/CthIzrJESZIkSZIkHaMSelpfeYmi6O0Qwu+Bm0MIpwKvAz2AAcBdURTlLd1rD7wI3EXOXlJ5bgd+ALwYQvhPcp7WlwE0BvpFUeRG55IkSZIkScewMg2nQgjJQB+AYjZBP9gIYBMwhJxgaQNwEzClpIFRFH0QQugMTABuIWdz9beB/xNF0fOlLF+SJEmSJElHWVnPnDoNmAVEQELhVBRF+8iZEXVXMX1eAsIhjr0H9C5lnZIkSZIkSToGlHU4tZOcUMrldJIkSZIkSSpRmYZTURRtJWdpniRJkiRJklSiuJ/WJ0mSJEmSpBOY4ZQkSZIkSZJiU6plfSGEaxLtW4qn9UmSJEmSJOkEVdo9p2ZR8mbngVI8rU+SJEmSJEknrtKGU0MO0V4LaAcMBBYAfz6SoiRJkiRJknRiKFU4FUVRVnHHQwgzyQmm/vNIipIkSZIkSdKJoUw3RI+iaBmwBPhlWZ5XkiRJkiRJFVN5PK3vH0DbcjivJEmSJEmSKpjyCKdaUvKm6ZIkSZIkSVKpN0QvUgihEtAYuB7oCTxXFueVJEmSJElSxVaqcCqEkE3xs6IC8AXw0yMpSpIkSZIkSSeG0s6cWkHR4VQ2sAN4HZgZRdG2Iy1MkiRJkiRJFV+pwqkoirqUUx2SJEmSJEk6AZXHhuiSJEmSJElSQgynJEmSJEmSFJsyeVpfnhBCMtAHIIqi2WV5bkmSJEmSJFU8ZRpOAacBs8jZNN1wSpIkSZIkScUq63BqJzmhVFFP9JMkSZIkSZIKKNNwKoqirUBGWZ5TkiRJkiRJFZcbokuSJEmSJCk2hlOSJEmSJEmKzWEt6wshVAXaAY2AqkX18Wl9kiRJkiRJKkmpw6kQwlDgN0DtQ3XBp/VJkiRJkiQpAaVa1hdC+CHwO2ALcCs5QdTTwFjgv3LfzweGlm2ZkiRJkiRJqohKu+fULcAXQKcoih7IbftrFEUToij6IXA98P8BH5ZhjZIkSZIkSaqgShtOtQYWRVH0VVHniKLoUeB/yJlJJUmSJEmSJBWrtOHUKeQs6cuzF0g+qM8q4PtHUpQkSZIkSZJODKUNp7YCKQe83wKcdVCfmsBJR1KUJEmSJEmSTgylDafepWAY9QrwgxDCRQAhhHOBAbn9JEmSJEmSpGKVNpx6DugcQmiY+/43wH7gpRDCNuBvwKnAPWVXoiRJkiRJkiqq0oZT04FGwOcAURStAX5ATmj1OfAC0DOKomfLskhJkiRJkiRVTJVL0zmKon3Apwe1vQZcVpZFSZIkSZIk6cRQ2plTkiRJkiRJUpkpduZUCKFaFEV7juQCZXEOSZJ0bNmQlJ5Qv7S9T5RzJZIkSTrelTRzan0I4aYQQtXSnjiEcH4I4Wng1sMrTZIkSZIkSRVdSeHUC8D9wJYQwrQQQtcQQrVDdQ4hNA0h3BBCeBV4CzgfeLHsypUkSZIkSVJFUuyyviiKrgkh/CdwLzAs92t/COE9YAuwA0gC6gBnAXWBQM6m6WOBB6Io+nf5lS9JkiRJkqTjWYlP64uiaBXQI4TQHLgW+AFwAdDqoK7bgD8CC4AFuU/2kyRJkiRJkg6pxHAqTxRF7wNjAEII1YFG5MyY2gN8FkXRlnKpUJIkSZIkSRVWwuHUgaIo2g28n/slSZIkSZIkHZaSNkSXJEmSJEmSyo3hlCRJkiRJkmJjOCVJkiRJkqTYGE5JkiRJkiQpNoZTkiRJkiRJio3hlCRJkiRJkmJTbDgVQtgeQrjtgPfjQgj/Uf5lSZIkSZIk6URQ0sypWkDSAe/HA13KqxhJkiRJkiSdWEoKpz4FUo9GIZIkSZIkSTrxVC7h+GvA1SGE/cCW3LYuIYSSzhtFUXT3kRYnSZIkSZKkiq2kcOqnwJnA8APaulDy0r4IMJySdMxKG/PnMj3fhgm9yvR8kiRJknSiKDaciqLogxBCK6AJ0Ah4CZgFZJV7ZZIkSZIkSarwSpo5RRRF2cCHwIe5y/k2RFH0cnkXJkmSJEmSpIqv2HAqhPAW8H+jKJqR2zQEeLvcq5IkqQLakJSeUL+0vU+UcyWSJEnSsaOkp/VdADQ44P3vgT7lV44kSZIkSZJOJCWFU18AdQ94X+Jj+iRJkiRJkqRElbTn1F+Bq0MIm4EtuW0XhBCuKenEURTNPtLiJEmSJEmSVLGVFE6NAZ4Ffg1EuW29c78OJeT2NZySJEmSJElSsYoNp6IoejOE0AxoDzQCZgFP535JkiRJkiRJR6SkmVNEUfQVsAwghDAL+GsURVnlXJckSZIkSZJOACWGUwdpAnxZHoVIkiRJkiTpxFOqcCqKoo15fw4hnAKcCdSIouiVsi5MkiRJkiRJFV+l0g4IIaSGEBYAO4BVwIsHHLswhLAmhNCl7EqUJEmSJElSRVWqcCqEcBqwkpyn9S0GXiXn6Xx5VgL/r727j9atqusF/v3lETaoXRVRhLJjomlvWoL0cq9iETezFytDo26ilxhcJUysLllcwboNe1XCwgAV76hzzfc38ioVUFLKhWvSaIgv6IESRMAkCY6gzPvHszZsNs/e+3n2fvaZ++XzGWONxZlrzvXMh8lak/E9a83n4UmeM6sOAgAAALB1Tfvk1MszCp+Oaq39RJILFx5srd2Z5G+TfO9sugcAAADAVjZtOPVDSd7dWrt4mTrXJjl41T0CAAAAYNuYNpx6RJJPrlDnziQPWF13AAAAANhOpvq1viRfSPL1K9R5XJLPra47AMBa7Z47dqJ6O/fsWueeAADAyqZ9curSJD9aVQeNO1hVj03yg1nwC34AAAAAsJRpw6nfTTKX5JKqekaS/ZOkqh4w/Pk9Se5K8vsz7SUAAAAAW9JUr/W11j5cVSckeW2S9y449G/D/itJXtBa+6cZ9Q8AAACALWzaNafSWntDVX0wyQuTfFeSA5LckuRDSV7TWvv4bLsIAAAAwFY1dTiVJK21TyZ5yYz7AgAAAMA2M+2aUwAAAAAwM8IpAAAAALoRTgEAAADQjXAKAAAAgG6EUwAAAAB0I5wCAAAAoBvhFAAAAADdCKcAAAAA6EY4BQAAAEA3wikAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHQjnAIAAACgm+7hVFXtqKrTquozVbWnqq6qqpOqqqY8z75V9YmqalX1m+vVXwAAAABmZ0fvDiQ5O8nxSc5NclmSo5OcleShSV4xxXn+e5KDZ947AAAAANZN1yenquqJGQVTr2qtndBaO6+1dkyStyZ5WVU9csLzPDrJrybxxBQAAADAJtL7tb7nDPszF5WfmWTfJM+a8DxnJflwkjfNqF8AAAAA7AW9X+s7LMkNrbVrFpVfluSuJE9e6QRV9eNJfjDJd8y+ewAAAACsp95PTh2c5LOLC1trdyS5OckhyzWuqv2TvDrJH7XW/nFdeggAAADAuukdTu2X5MtLHNszHF/O/0gyN+ynUlUnVNXlVXX5jTfeOG1zAAAAAGagdzh1e0ZrS40zNxwfq6qekOSUJL/aWrtl2g9urZ3TWjustXbYgQceOG1zAAAAAGagdzh1XUav9t1LVe2T5IDh+FJ+J8lnkvxtVR1aVYcm+Ybh2EOGsgfMusMAAAAAzE7vcOqKJAdV1aMWlR+eUd+uWKbt1yd5XJJPJPnksF08HHvh8OcfmGVnAQAAAJit3r/W9+YkpyY5OckvLSg/OckdSd6Z3L3w+aOS3NRau2mo80tJHrzofAcm+eMkb0vypox+9Q8AAACADaprONVa+0hVvT7JKVX1oIzCpKOTHJPkjNba/Gt9T0lyUZIzkpw+tP3Lxeerqp3DP17VWnvrunYeAAAAgDXr/eRUkpyY5Nokz09yXJLdSV6c5Kx+XQIAAABgb+geTrXW7szoiagzlqlzcZKa4Fy7J6kHAAAAwMbQe0F0AAAAALYx4RQAAAAA3QinAAAAAOhGOAUAAABAN8IpAAAAALoRTgEAAADQjXAKAAAAgG529O4AACxn99yxE9XbuWfXOvcEAABYD56cAgAAAKAb4RQAAAAA3QinAAAAAOhGOAUAAABAN8IpAAAAALoRTgEAAADQjXAKAAAAgG6EUwAAAAB0s6N3BwAW23nqBTM93+5XPnOm5wMAAGB2PDkFAAAAQDfCKQAAAAC68VofACTZPXfsRPV27tm1zj0BAIDtxZNTAAAAAHQjnAIAAACgG+EUAAAAAN0IpwAAAADoRjgFAAAAQDfCKQAAAAC62dG7AwAAAMDGtXvu2Inq7dyza517wlblySkAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHQjnAIAAACgG+EUAAAAAN0IpwAAAADoRjgFAAAAQDfCKQAAAAC62dG7AwBsXrvnjp2o3s49u9a5JwAAwGblySkAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHQjnAIAAACgG+EUAAAAAN0IpwAAAADoZkfvDgAwe7vnjp2o3s49u9a5JwAAAMvz5BQAAAAA3QinAAAAAOhGOAUAAABAN8IpAAAAALoRTgEAAADQjXAKAAAAgG6EUwAAAAB0I5wCAAAAoBvhFAAAAADdCKcAAAAA6EY4BQAAAEA3wikAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHQjnAIAAACgG+EUAAAAAN0IpwAAAADoRjgFAAAAQDfCKQAAAAC6EU4BAAAA0I1wCgAAAIBuhFMAAAAAdCOcAgAAAKAb4RQAAAAA3QinAAAAAOhGOAUAAABAN8IpAAAAALoRTgEAAADQjXAKAAAAgG6EUwAAAAB0I5wCAAAAoBvhFAAAAADdCKcAAAAA6EY4BQAAAEA3wikAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHSzo3cHgM1j56kXzPR8u1/5zJmeDwAAgM3Hk1MAAAAAdCOcAgAAAKAb4RQAAAAA3QinAAAAAOhGOAUAAABAN93DqaraUVWnVdVnqmpPVV1VVSdVVa3Q7iFVdUpV/XVVXV9Vt1bVlVX1q1U1t7f6DwAAAMDqdQ+nkpyd5BVJLkxyUpIrk5yV5LQV2h2R5HeS7Eny+0lOGdr+zyR/VVX3W68OAwAAADAbO3p+eFU9McnxSV7VWjtlKD6vqt6S5GVVdW5r7folml+V5HGttU8vKDunqj6dUbD1Y0nevl59BwAAAGDtej859Zxhf+ai8jOT7JvkWUs1bK3tXhRMzXvLsP/mtXcPAAAAgPXUO5w6LMkNrbVrFjpOL0IAABY+SURBVJVfluSuJE9exTkPHvY3rqVjAAAAAKy/3uHUwUk+u7iwtXZHkpuTHDLNyarqa5L8WpLbkrxzhbonVNXlVXX5jTfKsQAAAAB66B1O7Zfky0sc2zMcn8YrkvynJKe21m5YrmJr7ZzW2mGttcMOPPDAKT8GAAAAgFnoHU7dntHaUuPMDccnUlUvyuipqbNba2fNoG8AAAAArLPe4dR1uWeNqLtV1T5JDhiOr6iqjktyVpI/S3LSDPsHAAAAwDrqHU5dkeSgqnrUovLDM+rbFSudoKqek+R1Sd6T5LjW2l0z7yUAAAAA62JH589/c5JTk5yc5JcWlJ+c5I4Mi5pX1f5JHpXkptbaTfOVqupHk/xpkouSHNNa+8pe6jfAqu2eO3aiejv37FrnngAAAPTXNZxqrX2kql6f5JSqelCSy5IcneSYJGe01uZf63tKRgHUGUlOT5KqOjyjcOv2JG9J8lNVtfD0V7fW/n5vfA8AAAAAVqf3k1NJcmKSa5M8P8lxSXYneXFGa0gt51syWkx93ySvHXP8jUmEUwAAAAAbWPdwqrV2Z0ZPRJ2xTJ2Lk9SisvOTnL+OXQMAAABgnfVeEB0AAACAbUw4BQAAAEA3wikAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHQjnAIAAACgG+EUAAAAAN0IpwAAAADoRjgFAAAAQDfCKQAAAAC6EU4BAAAA0I1wCgAAAIBuhFMAAAAAdCOcAgAAAKAb4RQAAAAA3QinAAAAAOhGOAUAAABAN8IpAAAAALoRTgEAAADQjXAKAAAAgG6EUwAAAAB0I5wCAAAAoJsdvTsAzMbOUy+Y6fl2v/KZMz0fAAAAjOPJKQAAAAC6EU4BAAAA0I1wCgAAAIBuhFMAAAAAdCOcAgAAAKAb4RQAAAAA3QinAAAAAOhGOAUAAABAN8IpAAAAALoRTgEAAADQjXAKAAAAgG6EUwAAAAB0I5wCAAAAoBvhFAAAAADdCKcAAAAA6EY4BQAAAEA3wikAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHSzo3cHADaK3XPHTlRv555d69wTAACA7cOTUwAAAAB0I5wCAAAAoBvhFAAAAADdCKcAAAAA6EY4BQAAAEA3wikAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHQjnAIAAACgG+EUAAAAAN0IpwAAAADoRjgFAAAAQDfCKQAAAAC6EU4BAAAA0I1wCgAAAIBuhFMAAAAAdCOcAgAAAKAb4RQAAAAA3QinAAAAAOhmR+8OAExi99yxE9XbuWfXOvcEAACAWfLkFAAAAADdCKcAAAAA6EY4BQAAAEA3wikAAAAAuhFOAQAAANCNcAoAAACAboRTAAAAAHQjnAIAAACgG+EUAAAAAN0IpwAAAADoRjgFAAAAQDfCKQAAAAC6EU4BAAAA0I1wCgAAAIBuhFMAAAAAdCOcAgAAAKAb4RQAAAAA3QinAAAAAOhmR+8OAJvb7rljJ6q3c8+ude4JAAAAm5EnpwAAAADoRjgFAAAAQDde64N1tvPUC2Z6vt2vfOZMzwcAAAA9eXIKAAAAgG66h1NVtaOqTquqz1TVnqq6qqpOqqqasP33VdWlVXVbVd1YVW+oqgPXu98AAAAArF33cCrJ2UlekeTCJCcluTLJWUlOW6lhVT0tyfuTzCV5aZLXJPnJJBdV1X7r1WEAAAAAZqPrmlNV9cQkxyd5VWvtlKH4vKp6S5KXVdW5rbXrlznFq5Ncn+RprbVbh3NenuS9SU5M8qr16z2s3u65Yyeqt3PPrnXuCQAAAPTVe0H05wz7MxeVn5nk2UmeldGTVfdRVY9L8qQkp88HU0nSWrugqq5O8twIp9jmhGAAAABsdL1f6zssyQ2ttWsWlV+W5K4kT16hbZJ8eMyxDyV5UlXdb+1dBAAAAGC99A6nDk7y2cWFrbU7ktyc5JAV2mZc+yTXJdknycPW2kEAAAAA1k+11vp9+Oj1uxtaa98z5ti1ST7dWjtyibanZbSQ+je11j6x6NgrMlpQ/dGttd1LtD8hyQnDH78pycdX+TU2o4clual3J9jrjPv2Y8y3J+O+/Rjz7ceYb0/Gffsx5tvTVhv3b2itHbhSpd5rTt2eZN8ljs0Nx5drmyXazy2qcx+ttXOSnLNSB7eiqrq8tXbYyjXZSoz79mPMtyfjvv0Y8+3HmG9Pxn37Mebb03Yd996v9V2Xe17Pu1tV7ZPkgOH4cm0zrv1Qdke2VtoIAAAAsOX0DqeuSHJQVT1qUfnhGfXtihXaJskRY449JclHW2tfXXsXAQAAAFgvvcOpNw/7kxeVn5zRk0/vTJKq2r+qHl9Vdy9w3lr7eJKPJnlBVT1gvryqnpHksUn+fD07vslty9cZMe7bkDHfnoz79mPMtx9jvj0Z9+3HmG9P23Lcuy6IniRV9bokz09ybpLLkhyd5JgkZ7TWTh/qHJnkooVlQ/nTk1yY5B+SnJfk4UlemtEv+B3WWrttb30PAAAAAKbXe0H0JDkxybUZBVTHJdmd5MVJzlqpYWvtoqr6wSS/keRVSW7L6GmrXxFMAQAAAGx83Z+cAgAAAGD76r3mFDNSVTuq6rSq+kxV7amqq6rqpKqqCdt/X1VdWlW3VdWNVfWGqjpwvfvN6lTVk6vq1VV1ZVV9qao+V1V/VVVHTdj+/KpqS2xft979Z3Wqaucy43behOd4UlV9YPjv5otV9faq+sb17jurs8K12qrq19bQ3rW+AVTVA6vq9Kp6T1VdP4zN+UvUXdNcP5zDfN/ZpGO+1rl+OId7wAYwxZiveZ4fzmOu3wCmGPc1zfUTnMO1vpdMc982p9/XRnitj9k4O8nxuffaXWcleWiSVyzXsKqeluT9Sa7MaM2u+bW7Dq+qw1trt69jv1mdX07y/UneluQ1SR6Y0auxF1bVC1trZ094nucluWtR2Rdm1kvWy7uSvHVR2adWalRVj0/yN0k+n+TXkswleUmSD1bVd7TWbph1R1mzP0nyl2PKX5zksCTvm/A8rvWN62FJXp7k+iSXJ/nhZequeq5PzPcbyKRjPqu5PnEP6G2a6zxZ5TyfmOs3mEnHfVZzfeJa722a+7Y5fbHWmm2Tb0memKQl+YNF5W9JsifJI1do/5GM1v164IKyZw7nfEnv72cbO2bfk2RuUdl+ST6e0QS0Y4X25w/ju2w928bakuwcxu03V9n+HUm+lOSQBWXfluSrSc7s/f1sE4/j/kn+LcmVE9R1rW/wLcm+89dkRn9p2JKcP6bemub6oa75fgNsU4z5mub6ob57wAbYphjzNc3zwznM9Rtkm3Tcl2g78Vw/1Hetb4Bt0vu2OX385rW+reE5w/7MReVnZnRTfNZSDavqcUmelOR1rbVb58tbaxckuTrJc2fbVWahtfZ3rbU9i8puT/LeJA9JctCEp6qq+tqqci/YZKpqv6rab4r6D0zyQ0ne2lr77Hx5a+0fM/o1VNf65vHjSR6U5I1TtHGtb1CttS8vvCaXseq5PjHfbySTjvkM5/rEPaCrKa7zu007zw9tzPUbyGrGfYHVzPWJa72rKe7b5vQx/Ee7NRyW5IbW2jWLyi/L6LHOJ6/QNkk+PObYh5I8qarut/YuspccnOQrSf51wvo3J7klya1V9baqesy69YxZenFGv056W1V9sqpeNEGbb0+yT5a+1h9uTYJN43kZXed/OkUb1/rmt5a5fr59Yr7fCqad6xP3gM1mNfN8Yq7fSlYz1yeu9Y1q8X3bnD6GNae2hoOT3CeVb63dUVU3JzlkhbYZ1z7JdRlNcA9L4v30Da6qvjnJTyR5d2vt31eo/rkkf5DkiiRfTvJdSX4hyVOr6rAxN0o2hruS/FWSdya5JqPr9/gkr6mqna21X16m7UrXejK6V/zLjPrKOqiqQzJay+B9bbJ1Q1zrW8da5vr59hl3jpjvN40p5/rEPWCzWcs8n5jrt4RVzPWJa33DWuK+bU4fQzi1NeyX0TvJ4+wZji/XNhndxMa1XViHDaqqvjajd5Rvy2jRy2W11k5dVPS2qvpAkg8kOSPJcbPuI2vXWrs2yb1+7WP49Z6/TnJKVb22tXb1Es1d61vDf8noqefzJ6nsWt9S1jLXJ+4Bm960c33iHrDZrHGeT1znW8VUc33iWt+olrlvm9PH8Frf1nB7Ru+mjjM3HF+ubZZoP7eoDhvQsB7Be5J8Y5JnDf9jM7XW2oVJ/l9GvxTBJtFa+2qS38vofv79y1R1rW8NP5fRI+HvWe0JXOub1lrm+sQ9YFOb1VyfuAdsNlPM84nrfKtY81yfuNZ7W+G+bU4fQzi1NVyXex7tu1tV7ZPkgNzzGO9SbTOu/VB2R5Kb1tpB1scwxu9I8t1Jfqq1dskaT3lNRo+AsrnMP6q93NitdK0vrMMGVFWHJ3lCkv/dWhv3N2XTcK1vPmuZ6xPz/aa1DnN94h6w2Uwyzyfm+k1vxnN94lrvYoL7tjl9DOHU1nBFkoOq6lGLyg/PaIyvWKFtkhwx5thTknx0+BsbNpiq2pHkzUl+IMnPtdbeO4PTHprk8zM4D3vXocN+ubH7xyR3Zvy1fsTQ1hoUG9vzhv20v9wzjmt981nLXJ+Y7zeldZrrE/eAzWaSeT4x128Fs5zrE9f6XjfhfducPoZwamt487A/eVH5yRmlpu9Mkqrav6oeX1V3p+ettY8n+WiSF1TVA+bLq+oZSR6b5M/Xs+OszvDzsP8ryY8lObG19qYl6t1/GPNHLijbt6r2H1P32Um+LckF69Rt1qiqHjqmbC7JyzL6BZAPDGX3GffW2peS/EWSZy/67+Fbkzw9yZtba22dvwKrNPxN2nOTfKy1dtmY4671rW+iuT4x328Vk871Q133gC1g0nl+KDfXbzErzfVDHdf6BjbFfducPoYF0beA1tpHqur1GS2U+KCMfoLy6CTHJDmjtTb/2N9TklyU0aJ4py84xUuSXJjkkmHRxYcneWmSjyU5e698Cab1e0l+OsklSW6vqp9ddPzC4dc9DsloHN+YexZCfGSSD1fVO5J8IqP3kY/IaPHFf869/9tgY/n94W9YLs1orB6R0boEj03y6wveZR837snof24/nORvquqsjN5Tf0mSG5P81t74AqzaD2f0mPfvLnHctb6JVdVJSR6ce/7S8Nur6teHf353a+3KKeb6xHy/4U0y5pl8rk/cAza8Ccd80nk+MddvChOO+7yV5vrEtb7RTXTfNqcvobVm2wJbkvsneXmS3Rmt2v/xjJLXWlDnyCQtyelj2h+V5O8zupndnNEN7xG9v5dtyfG+eBjLpbYjh3o7hz+fv6Dtg4fxvSrJlzJK569O8odJHt77u9mWHfefzmhy+twwbl8c/vwTi+rdZ9wXHPvOjCayW5PcktH78If2/m62Fcf+XUm+muTgJY671jfxNszdS93Pj1tQb8W5fqhnvt/g2yRjPulcP9R1D9jg24RjPtE8v9SYLzhmrt8g26T396HusnP9UuPuWt8425T3bXP6oq2GLwUAAAAAe501pwAAAADoRjgFAAAAQDfCKQAAAAC6EU4BAAAA0I1wCgAAAIBuhFMAAAAAdCOcAgAAAKAb4RQAAAAA3QinAAA2gKp6elW1qvqp3n2ZVlW9tKrurKrH9+4LALD5CKcAgG1vCIVW2o6squMmrHv3NuHnf02SVyX5aJK3LlHnoVV1alVdXFWfr6o7qupLVfVPVfWGqvqRqqo1/Dv4raHPvz1B3XOHur84FP1xks8n+b3Vfj4AsH1VaxP9PxMAwJa1IEQ6Y5lq5yd5cJJnLSrfmeR5Sa4Z6txLa+30CT7/2CR/luRnWmu7xhz/0SRvHD5/d5JLklyfZJ8kj0nytOHYW1trq3ryqqoeneTqJDcm+brW2p1L1HvAgs8+pLV281D+K0l+O8n3ttb+bjV9AAC2J+EUALDtzYdTrbWpnzyqqiOTXJTkktbakav8/EuTfGuSg1prty869n1J3p/kK0l+IcnrW2t3Laozl+RnkxzdWjtmNX0YzvOBJD+Q5Cdba29fos5/TXJekl2ttZ9ZUH5wkmuTvKm19rOr7QMAsP14rQ8AoKNhnabvSfLuMcHU/ZK8NsmOJCe31s5bHEwlSWttT2vtvCTHLvEZP11VF1XVv1bVnqr6WFX9elXtu6jqOcP+55fp8vyxcxYWttauS/K3SZ5dVV+7THsAgHsRTgEA9HXUsP/gmGNHJnlskn9O8vqVTtRa+8risqp6XZJdSQ5N8vYkf5TkC0l+I8n/qaodC6q/K6O1o46uqkeNOde3JjkiySdaa5eM6cKlSfZN8tSV+goAMG/HylUAALaHqjp9iUN7WmuvXKeP/Y/D/vIxx7532F/SWvvqtCeuquOSvCDJOzJaz+r2BcdOT/LyJC9KcmaStNburKrzk/zK0O70Raecf2rq3CU+8v8O+6cmee+0/QUAtidrTgEA294Ev6p3S2vtwUu0PTJrWHOqqv4uyXdntLj4dYuO/XGS/5bkt1trp45pe/qYU766tfbF4fhHMlrL6sD5sgVt75fkhiSfbq09ZUH5oUk+kdHTWo+ef41weAXwuiQPzGjB9BvH9OeIJB9K8uettedO9m8AANjuPDkFADBYzYLoM3DAsP/XMcfm+7NUePbyMWXnJ/liVe2f5IlJbkryi1Vjv9qXkzxhYUFr7VNVdXGSpyf5z0neNxz6ySQPzSh4uk8wNfjCsH/YEscBAO5DOAUA0Nf8q3ZzC/553vXD/pBxDReGaVX1wdzzGmCSPCSjcOvAjA+xlnNORuHU8bknnDp+2C/1Sl+S7DfsF38PAIAlWRAdAKCvzw/7A8Ycu3TYH1lV0/5/2y3D/iOttVpuG9P27Rk9cfUjVfWIqnpMRouzX53kr5f5zPnv8Pll6gAA3ItwCgCgryuH/ePHHLs4yaeSfH2S509z0tbarUn+Kcm3VNVDp2x7R5I3Jrl/kudl9NRUJTm3Lb9g6fx3+IdpPg8A2N6EUwAAfV087L9r8YHhF/pOTPKVJGdV1fPHPUFVVfdPsv+Yc/9Bkn2SvL6q7rOge1U9pKq+c4l+zb++9/NJjktyZ0brWS1n/jtctEI9AIC7+bU+AGDbW/BrfWcsU+2drbX7PBE0g1/r2z/JZ5N8qrV2+BJ1fiyjJ5n+Q5LdSS7J6Jfz5pIcnOSojF6puzLJ0xb+Ml9V/VGSF2a0WPn7k1yb0cLmj07y1CRvaK2duMTnXjLUSZK3tdaevcz3+Jrh3Le21sY9BQYAMJYF0QEA7rHcwuG7sw6vq7XWbquq8zP6Rb0ntNY+NqbOu4Z1n05I8owkz0zy4CR7kvxLkguSvCXJX7TW7lrU9kVV9b6MnsA6amj3hYyCpN9N8qfLdO+c3BNOnbPCVzkqo4XbX7JCPQCAe/HkFABAZ1W1M8lVSf6ktfbivr1Znap6W5KnJXlMa+2WleoDAMyz5hQAQGettd1J/jDJCVV1SOfuTK2qnpTkx5OcLpgCAKbltT4AgI3hN5P8e5KdGa1BtZk8MslpSV7buyMAwObjtT4AAAAAuvFaHwAAAADdCKcAAAAA6EY4BQAAAEA3wikAAAAAuhFOAQAAANCNcAoAAACAbv4/mk4NgGC0hR8AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "idx = np.arange(0,max_true/1000, step=(max_true/1000)/40)\n", + "\n", + "plt.bar(idx,eff_NN, width=0.4, label='Efficiency simulated by NN')\n", + "plt.bar(idx,eff_MC, width=0.2, label='Efficiency simulated by MC')\n", + "plt.legend(fontsize='xx-large')\n", + "plt.tick_params(labelsize='xx-large')\n", + "plt.xlabel('ET (GeV)', fontsize=20)\n", + "plt.ylabel('eff (a.u.)', fontsize=20)\n", + "fig = plt.gcf()\n", + "fig.set_size_inches(20,10)\n", + "plt.savefig(PATH+'/eff_2.png',dpi=100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/true_root_to_df_DKpi.ipynb b/true_root_to_df_DKpi.ipynb new file mode 100644 index 0000000..8e21448 --- /dev/null +++ b/true_root_to_df_DKpi.ipynb @@ -0,0 +1,207 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/hep/davide/miniconda3/envs/root_env/lib/python2.7/site-packages/root_numpy/_tree.py:5: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility\n", + " from . import _librootnumpy\n" + ] + } + ], + "source": [ + "import root_numpy as rn\n", + "import pandas as pd\n", + "import numpy as np\n", + "import ROOT as r\n", + "import pickle\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "file_name='DplusMuNu_Full'\n", + "file_path='/disk/lhcb_data/davide/HCAL_project_full_event/'+file_name+'.root'\n", + "tree_name='ntuple/DecayTree'" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "f = r.TFile(file_path)\n", + "t = f.Get(tree_name)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def get_tracker_info(particle):\n", + " \n", + " t.SetBranchStatus(\"*\",0)\n", + " \n", + " t.SetBranchStatus(particle+\"_L0Calo_HCAL_xProjection\",1)\n", + " t.SetBranchStatus(particle+\"_L0Calo_HCAL_yProjection\",1)\n", + " t.SetBranchStatus(particle+\"_L0Calo_HCAL_realET\",1)\n", + " t.SetBranchStatus(particle+\"_L0Calo_HCAL_region\", 1)\n", + "\n", + " x_projections=[]\n", + " y_projections=[]\n", + " real_ET=[]\n", + " region=[]\n", + "\n", + " for i, event in enumerate(t):\n", + " \n", + " #if i > 2:\n", + " # break\n", + "\n", + " region.append(getattr(event,particle+\"_L0Calo_HCAL_region\"))\n", + " x_projections.append(getattr(event,particle+\"_L0Calo_HCAL_xProjection\"))\n", + " y_projections.append(getattr(event,particle+\"_L0Calo_HCAL_yProjection\"))\n", + " real_ET.append(getattr(event,particle+\"_L0Calo_HCAL_realET\"))\n", + " \n", + " true_events ={'true_x':np.array(x_projections),'true_y':np.array(y_projections),'true_ET':np.array(real_ET),'region':np.array(region)}\n", + " \n", + " return true_events" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "true_events_piplus = get_tracker_info(\"piplus\")\n", + "true_events_piplus0 = get_tracker_info(\"piplus0\")\n", + "true_events_Kminus = get_tracker_info(\"Kminus\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(465693,)" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "true_events_piplus0['region'].shape" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(24401,)" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "true_events_piplus0['region'][np.where(true_events_piplus0['region']<0)].shape" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(1131.9902543165215, 507.3645231436275, 1)" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "true_events_Kminus['true_x'][0],true_events_Kminus['true_y'][0],true_events_Kminus['region'][0]" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "df = pd.DataFrame.from_dict(true_events_piplus)\n", + "df.to_csv('/disk/lhcb_data/davide/HCAL_project_full_event/csv/piplus/MCtrue_piplus.csv', index=False)\n", + "\n", + "df = pd.DataFrame.from_dict(true_events_piplus0)\n", + "df.to_csv('/disk/lhcb_data/davide/HCAL_project_full_event/csv/piplus0/MCtrue_piplus0.csv', index=False)\n", + "\n", + "df = pd.DataFrame.from_dict(true_events_Kminus)\n", + "df.to_csv('/disk/lhcb_data/davide/HCAL_project_full_event/csv/Kminus/MCtrue_Kminus.csv', index=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "b = r.TBrowser()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.15" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}