diff --git a/1_to_1_multi_layer.ipynb b/1_to_1_multi_layer.ipynb index c850ffc..ad838a5 100644 --- a/1_to_1_multi_layer.ipynb +++ b/1_to_1_multi_layer.ipynb @@ -1,6 +1,13 @@ { "cells": [ { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Import all packages" + ] + }, + { "cell_type": "code", "execution_count": 1, "metadata": {}, @@ -10,7 +17,8 @@ "output_type": "stream", "text": [ "c:\\users\\sa_li\\anaconda3\\envs\\rnn-tf-ker\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", - " from ._conv import register_converters as _register_converters\n" + " from ._conv import register_converters as _register_converters\n", + "Using TensorFlow backend.\n" ] } ], @@ -27,11 +35,37 @@ "from sklearn import preprocessing\n", "import pickle as pkl\n", "from pathlib import Path\n", + "from keras.datasets import imdb\n", + "from keras.models import Sequential\n", + "from keras.layers import Dense\n", + "from keras.layers import LSTM\n", + "from keras.layers import GRU\n", + "from keras.layers import Dropout, BatchNormalization\n", + "from keras.layers import ConvLSTM2D\n", + "from keras.layers import Conv1D\n", + "#from keras.layers.convolutional import Conv1D\n", + "#from keras.layers.convolutional import MaxPooling1D\n", + "from keras.layers.embeddings import Embedding\n", + "from keras.preprocessing import sequence\n", + "from keras.callbacks import History\n", + "from keras.callbacks import EarlyStopping\n", + "from keras.callbacks import ModelCheckpoint\n", + "from keras.models import load_model\n", + "\n", + "from xgboost import XGBClassifier\n", + "from sklearn.metrics import accuracy_score\n", "\n", "#import seaborn as sns" ] }, { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Import the dataset of the matched 8-hit tracks" + ] + }, + { "cell_type": "code", "execution_count": 2, "metadata": {}, @@ -45,22 +79,25 @@ ] }, { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Convert the data to an array (float32)" + ] + }, + { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], "source": [ - "#Check testset with arbitrary particle\n", + "#Convert the data\n", "\n", "tset = np.array(testset)\n", "tset = tset.astype('float32')\n", + "\n", + "#Check testset with arbitrary particle\n", + "\n", "#print(tset.shape)\n", "#for i in range(8):\n", " #print(tset[1,3*i:(3*i+3)])\n", @@ -68,12 +105,48 @@ ] }, { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Transformation between original 2D-array into 3D-array\n", + "\n", + "#### reshapor()\n", + "\n", + "Description:\n", + "\n", + "Transforms 2D-array into 3D array\n", + "\n", + "Arguments:\n", + "\n", + "- arr_orig: Original 2D array\n", + "- num_inputs: Number of inputs per timestep (default value = 3 for X,Y,Z coordinates)\n", + "\n", + "Returns:\n", + "\n", + "- arr: 3D-array of shape(particlenumber, timesteps, input = coordinates)\n", + "\n", + "#### reshapor_inv()\n", + "\n", + "Description:\n", + "\n", + "Inverse transformation from 3D-array into 2D-array\n", + "\n", + "Arguments:\n", + "\n", + "- array_shaped: 3D-array of shape(particlenumber, timesteps, input = coordinates)\n", + "\n", + "Returns:\n", + "\n", + "- arr: 2D-array of shape(particlenumber, inputs)" + ] + }, + { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ - "### Reshape original array into the shape (particlenumber, timesteps, input = coordinates)###\n", + "#Reshapes the 2D-array to a 3D-array\n", "\n", "def reshapor(arr_orig, num_inputs=3):\n", " timesteps = int(arr_orig.shape[1]/num_inputs)\n", @@ -86,7 +159,10 @@ " \n", " return arr\n", "\n", - "def reshapor_inv(array_shaped, num_inputs=3):\n", + "#The inverse transformation of the reshapor function (3D to 2D)\n", + "\n", + "def reshapor_inv(array_shaped):\n", + " num_inputs = array_shaped.shape[2]\n", " timesteps = int(array_shaped.shape[1])\n", " num_examples = int(array_shaped.shape[0])\n", " arr = np.zeros((num_examples, timesteps*num_inputs))\n", @@ -99,6 +175,37 @@ ] }, { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create random training and test sets from the data\n", + "\n", + "#### create_random_sets()\n", + "\n", + "Description:\n", + "\n", + "Splits an dataset into a train and a test set\n", + "\n", + "\n", + "Input:\n", + "\n", + "- dataset: The actual dataset with shape (particles, other dimensions)\n", + "- train_to_total_ratio: The ratio that the training-set should be out of the original set.\n", + " The remaining part will become the test-set\n", + " \n", + "\n", + "Returns:\n", + "\n", + "- train_set: The newly created training set (particles, other dimensions)\n", + "- test_set: The newly created test set (particles, other dimensions)\n", + " \n", + " \n", + "Additional comments:\n", + "\n", + "The data will be randomly shuffled before it gets split up" + ] + }, + { "cell_type": "code", "execution_count": 5, "metadata": {}, @@ -112,7 +219,7 @@ " p = np.random.permutation(num_examples)\n", " dataset = dataset[p,:]\n", " \n", - " #evaluate siye of training and test set and initialize them\n", + " #evaluate size of training and test set and initialize them\n", " train_set_size = np.int(num_examples*train_to_total_ratio)\n", " test_set_size = num_examples - train_set_size\n", " \n", @@ -137,6 +244,8 @@ "metadata": {}, "outputs": [], "source": [ + "#Create the training and test-sets\n", + "\n", "train_set, test_set = create_random_sets(tset, 0.9)\n", "\n", "#print(test_set.shape, train_set.shape, reshapor(tset).shape)\n", @@ -144,6 +253,68 @@ ] }, { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Normalization of the data\n", + "\n", + "### Normalization on a min_max_scaler from sklearn\n", + "\n", + "#### correct_array_steps()\n", + "\n", + "Description: As the scaler will be fixed on arrays of specific length this function returns an array padded with zeros with the correct shape\n", + "\n", + "Input:\n", + "\n", + "- arr: 3D-array of shape(particle_number, timesteps, num_inputs)\n", + "- steps: Required number of timesteps for the scaler (default value = 8)\n", + "- num_inputs: Number of inputs per timestep (default value = 3 for X,Y,Z coordinates)\n", + "\n", + "Returns:\n", + "\n", + "- arr: 3D array of shape(particle_number, steps, num_inputs)\n", + "\n", + "#### set_min_max_scaler()\n", + "\n", + "Description: Sets the min_max_scaler based on the dataset given (sklearn based)\n", + "\n", + "Input:\n", + "\n", + "- arr: 2D of shape(particle_number, inputs) or 3D-array of shape(particle_number, timesteps, num_inputs)\n", + "- feature_range: Tuple which defines the area to which the data should be scaled (default value = (-1,1))\n", + "\n", + "Returns:\n", + "\n", + "- min_max_scalor: min_max_scaler based of the data given\n", + "\n", + "#### min_max_scaler()\n", + "\n", + "Description: Transforms a 3D-array with a given min_max_scaler (sklearn based)\n", + "\n", + "Input:\n", + "\n", + "- arr: 3D-array of shape(particle_number, timesteps, num_inputs)\n", + "- min_max_scalor: The min_max_scaler used for the transformation (default value: min_max_scalor)\n", + "\n", + "Returns:\n", + "\n", + "- arr: Transformed 3D-array\n", + "\n", + "#### min_max_scaler_inv()\n", + "\n", + "Description: Transforms a 3D-array with a given min_max_scaler back to original form (sklearn based)\n", + "\n", + "Input:\n", + "\n", + "- arr: 3D-array of shape(particle_number, timesteps, num_inputs)\n", + "- min_max_scalor: The min_max_scaler used for the transformation (default value: min_max_scalor)\n", + "\n", + "Returns:\n", + "\n", + "- arr: Transformed 3D-array" + ] + }, + { "cell_type": "code", "execution_count": 7, "metadata": {}, @@ -171,19 +342,68 @@ "\n", "\n", "#transform data\n", - "def min_max_scaler(arr, min_max_scalor= min_max_scalor, num_inputs= 3):\n", + "def min_max_scaler(arr, min_max_scalor= min_max_scalor):\n", + " num_inputs = arr.shape[2]\n", " arr = correct_array_steps(arr)\n", - " arr = reshapor(min_max_scalor.transform(reshapor_inv(arr)))\n", + " arr = reshapor(min_max_scalor.transform(reshapor_inv(arr)), num_inputs=num_inputs)\n", " return arr\n", " \n", "#inverse transformation\n", - "def min_max_scaler_inv(arr, min_max_scalor= min_max_scalor, num_inputs= 3):\n", + "def min_max_scaler_inv(arr, min_max_scalor= min_max_scalor):\n", + " num_inputs = arr.shape[2]\n", " arr = correct_array_steps(arr)\n", - " arr = reshapor(min_max_scalor.inverse_transform(reshapor_inv(arr)))\n", + " arr = reshapor(min_max_scalor.inverse_transform(reshapor_inv(arr)), num_inputs=num_inputs)\n", " return arr" ] }, { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Normalization based on a standard_scaler from sklearn\n", + "\n", + "\n", + "#### set_std_scaler()\n", + "\n", + "Description: Sets the std_scaler based on the dataset given (sklearn based)\n", + "\n", + "Input:\n", + "\n", + "- arr: 2D of shape(particle_number, inputs) or 3D-array of shape(particle_number, timesteps, num_inputs)\n", + "- feature_range: Tuple which defines the area to which the data should be scaled (default value = (-1,1))\n", + "\n", + "Returns:\n", + "\n", + "- std_scaler: std_scaler based of the data given\n", + "\n", + "#### std_scaler()\n", + "\n", + "Description: Transforms a 3D-array with a given std_scaler (sklearn based)\n", + "\n", + "Input:\n", + "\n", + "- arr: 3D-array of shape(particle_number, timesteps, num_inputs)\n", + "- std_scaler: The std_scaler used for the transformation (default value: std_scaler)\n", + "\n", + "Returns:\n", + "\n", + "- arr: Transformed 3D-array\n", + "\n", + "#### std_scaler_inv()\n", + "\n", + "Description: Transforms a 3D-array with a given std_scaler back to original form (sklearn based)\n", + "\n", + "Input:\n", + "\n", + "- arr: 3D-array of shape(particle_number, timesteps, num_inputs)\n", + "- min_max_scalor: The std_scaler used for the transformation (default value: std_scaler)\n", + "\n", + "Returns:\n", + "\n", + "- arr: Transformed 3D-array" + ] + }, + { "cell_type": "code", "execution_count": 8, "metadata": {}, @@ -232,6 +452,13 @@ }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], @@ -505,7 +732,7 @@ "\n", " \n", " \n", - " def fit(self, minibatches, epochs, print_step, validation_input, validation_output, checkpoint = 5, patience = 20, patience_trigger= 2./10**6):\n", + " def fit(self, minibatches, epochs, print_step, validation_input, validation_output, checkpoint = 5, patience = 20, patience_trigger= 1.5/10**6):\n", " patience_cnt = 0\n", " start = len(self.loss_list)\n", " epoche_save = start\n", @@ -710,7 +937,7 @@ "ninputs = 3\n", "\n", "#ncells as int or list of int\n", - "ncells = [100, 100, 100]\n", + "ncells = [150, 150, 150]\n", "activation = \"leaky_relu\"\n", "cell_type = \"lstm\"\n", "\n", @@ -735,7 +962,7 @@ "source": [ "tf.reset_default_graph()\n", "rnn = RNNPlacePrediction(time_steps=timesteps, future_steps=future_steps, ninputs=ninputs, \n", - " ncells=ncells, num_output=num_output, cell_type=\"lstm\", activation=\"leaky_relu\")" + " ncells=ncells, num_output=num_output, cell_type=cell_type, activation=activation)" ] }, { @@ -744,7 +971,7 @@ "metadata": {}, "outputs": [], "source": [ - "rnn.set_cost_and_functions()" + "#rnn.set_cost_and_functions()" ] }, { @@ -760,325 +987,10 @@ "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch number 0\n", - "Cost: 316163.4969156842 e-6\n", - "Cost on validation_set: 317156.1284120692 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 0 \n", - "\n", - "Epoch number 10\n", - "Cost: 17620.41081349517 e-6\n", - "Cost on validation_set: 17808.353961553683 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 10 \n", - "\n", - "Epoch number 20\n", - "Cost: 12158.986289314058 e-6\n", - "Cost on validation_set: 12332.019157880959 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 20 \n", - "\n", - "Epoch number 30\n", - "Cost: 8429.908161159865 e-6\n", - "Cost on validation_set: 8557.763253880103 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 30 \n", - "\n", - "Epoch number 40\n", - "Cost: 6765.2886581802095 e-6\n", - "Cost on validation_set: 6839.355951998123 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 40 \n", - "\n", - "Epoch number 50\n", - "Cost: 5792.310645500588 e-6\n", - "Cost on validation_set: 5863.479143658349 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 50 \n", - "\n", - "Epoch number 60\n", - "Cost: 5376.994696467422 e-6\n", - "Cost on validation_set: 5446.046904855689 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 60 \n", - "\n", - "Epoch number 70\n", - "Cost: 4835.011047679324 e-6\n", - "Cost on validation_set: 4899.287358099638 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 70 \n", - "\n", - "Epoch number 80\n", - "Cost: 4453.0257474371165 e-6\n", - "Cost on validation_set: 4506.959038418393 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 80 \n", - "\n", - "Epoch number 90\n", - "Cost: 4041.3462069578645 e-6\n", - "Cost on validation_set: 4103.328707779563 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 90 \n", - "\n", - "Epoch number 100\n", - "Cost: 3746.72157949833 e-6\n", - "Cost on validation_set: 3810.4774051367544 e-6\n", - "Patience: 0 / 20\n", - "Last checkpoint at: Epoch 100 \n", - "\n", - "Epoch number 110\n", - "Cost: 3510.6311173199915 e-6\n", - "Cost on validation_set: 3572.610001144714 e-6\n", - "Patience: 1 / 20\n", - "Last checkpoint at: Epoch 110 \n", - "\n", - "Epoch number 120\n", - "Cost: 3328.6676720477817 e-6\n", - "Cost on validation_set: 3388.9460420712485 e-6\n", - "Patience: 1 / 20\n", - "Last checkpoint at: Epoch 120 \n", - "\n", - "Epoch number 130\n", - "Cost: 3178.6257574378055 e-6\n", - "Cost on validation_set: 3237.9631613662773 e-6\n", - "Patience: 1 / 20\n", - "Last checkpoint at: Epoch 130 \n", - "\n", - "Epoch number 140\n", - "Cost: 3045.4906938207705 e-6\n", - "Cost on validation_set: 3103.9184121804874 e-6\n", - "Patience: 1 / 20\n", - "Last checkpoint at: Epoch 140 \n", - "\n", - "Epoch number 150\n", - "Cost: 3014.904371094565 e-6\n", - "Cost on validation_set: 3073.1504057469065 e-6\n", - "Patience: 1 / 20\n", - "Last checkpoint at: Epoch 150 \n", - "\n", - "Epoch number 160\n", - "Cost: 2844.7661992855546 e-6\n", - "Cost on validation_set: 2901.79907257647 e-6\n", - "Patience: 1 / 20\n", - "Last checkpoint at: Epoch 160 \n", - "\n", - "Epoch number 170\n", - "Cost: 2818.9633171572245 e-6\n", - "Cost on validation_set: 2878.737646739843 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 165 \n", - "\n", - "Epoch number 180\n", - "Cost: 2705.8817240474527 e-6\n", - "Cost on validation_set: 2767.457837931985 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 180 \n", - "\n", - "Epoch number 190\n", - "Cost: 2802.5307259414085 e-6\n", - "Cost on validation_set: 2868.40574902504 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 185 \n", - "\n", - "Epoch number 200\n", - "Cost: 2473.8708554312243 e-6\n", - "Cost on validation_set: 2533.3558035971123 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 200 \n", - "\n", - "Epoch number 210\n", - "Cost: 2456.7929551351904 e-6\n", - "Cost on validation_set: 2513.4514421657764 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 210 \n", - "\n", - "Epoch number 220\n", - "Cost: 2420.2582480515853 e-6\n", - "Cost on validation_set: 2477.84735375019 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 220 \n", - "\n", - "Epoch number 230\n", - "Cost: 2906.6367897876476 e-6\n", - "Cost on validation_set: 2968.231827930309 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 225 \n", - "\n", - "Epoch number 240\n", - "Cost: 2734.680800850308 e-6\n", - "Cost on validation_set: 2792.3258075620547 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 225 \n", - "\n", - "Epoch number 250\n", - "Cost: 2242.8401929954457 e-6\n", - "Cost on validation_set: 2296.759985214056 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 250 \n", - "\n", - "Epoch number 260\n", - "Cost: 2807.354240483323 e-6\n", - "Cost on validation_set: 2874.5259948854527 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 250 \n", - "\n", - "Epoch number 270\n", - "Cost: 2101.4384015701535 e-6\n", - "Cost on validation_set: 2161.6989031953867 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 270 \n", - "\n", - "Epoch number 280\n", - "Cost: 2128.7833589540664 e-6\n", - "Cost on validation_set: 2190.027813708713 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 275 \n", - "\n", - "Epoch number 290\n", - "Cost: 2030.9363811298513 e-6\n", - "Cost on validation_set: 2090.106961902144 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 290 \n", - "\n", - "Epoch number 300\n", - "Cost: 2049.279973170785 e-6\n", - "Cost on validation_set: 2111.154213157851 e-6\n", - "Patience: 2 / 20\n", - "Last checkpoint at: Epoch 290 \n", - "\n", - "Epoch number 310\n", - "Cost: 1975.3764973693462 e-6\n", - "Cost on validation_set: 2037.5676658870868 e-6\n", - "Patience: 3 / 20\n", - "Last checkpoint at: Epoch 310 \n", - "\n", - "Epoch number 320\n", - "Cost: 2357.8102402605637 e-6\n", - "Cost on validation_set: 2425.418484436218 e-6\n", - "Patience: 3 / 20\n", - "Last checkpoint at: Epoch 315 \n", - "\n", - "Epoch number 330\n", - "Cost: 1948.3688953440897 e-6\n", - "Cost on validation_set: 2011.0500397074013 e-6\n", - "Patience: 3 / 20\n", - "Last checkpoint at: Epoch 330 \n", - "\n", - "Epoch number 340\n", - "Cost: 1970.0811163327376 e-6\n", - "Cost on validation_set: 2031.9556784924378 e-6\n", - "Patience: 4 / 20\n", - "Last checkpoint at: Epoch 330 \n", - "\n", - "Epoch number 350\n", - "Cost: 1940.5714647714483 e-6\n", - "Cost on validation_set: 2004.524148719082 e-6\n", - "Patience: 5 / 20\n", - "Last checkpoint at: Epoch 350 \n", - "\n", - "Epoch number 360\n", - "Cost: 1968.3953304233592 e-6\n", - "Cost on validation_set: 2038.7437665661755 e-6\n", - "Patience: 5 / 20\n", - "Last checkpoint at: Epoch 355 \n", - "\n", - "Epoch number 370\n", - "Cost: 1976.2183642448033 e-6\n", - "Cost on validation_set: 2046.2597617446338 e-6\n", - "Patience: 5 / 20\n", - "Last checkpoint at: Epoch 355 \n", - "\n", - "Epoch number 380\n", - "Cost: 2104.1538024884326 e-6\n", - "Cost on validation_set: 2173.166846596571 e-6\n", - "Patience: 5 / 20\n", - "Last checkpoint at: Epoch 355 \n", - "\n", - "Epoch number 390\n", - "Cost: 1913.8040573357841 e-6\n", - "Cost on validation_set: 1984.506214665639 e-6\n", - "Patience: 6 / 20\n", - "Last checkpoint at: Epoch 385 \n", - "\n", - "Epoch number 400\n", - "Cost: 1854.3496478868778 e-6\n", - "Cost on validation_set: 1922.2521764594455 e-6\n", - "Patience: 6 / 20\n", - "Last checkpoint at: Epoch 400 \n", - "\n", - "Epoch number 410\n", - "Cost: 1861.1944058563472 e-6\n", - "Cost on validation_set: 1930.6607739350131 e-6\n", - "Patience: 6 / 20\n", - "Last checkpoint at: Epoch 400 \n", - "\n", - "Epoch number 420\n", - "Cost: 1866.6057852854908 e-6\n", - "Cost on validation_set: 1936.6092286830724 e-6\n", - "Patience: 6 / 20\n", - "Last checkpoint at: Epoch 400 \n", - "\n", - "Epoch number 430\n", - "Cost: 1881.5245793395957 e-6\n", - "Cost on validation_set: 1952.4438295859932 e-6\n", - "Patience: 6 / 20\n", - "Last checkpoint at: Epoch 400 \n", - "\n", - "Epoch number 440\n", - "Cost: 1911.4178085569727 e-6\n", - "Cost on validation_set: 1983.390877369878 e-6\n", - "Patience: 9 / 20\n", - "Last checkpoint at: Epoch 400 \n", - "\n", - "Epoch number 450\n", - "Cost: 1808.151681257715 e-6\n", - "Cost on validation_set: 1876.5825160973986 e-6\n", - "Patience: 9 / 20\n", - "Last checkpoint at: Epoch 450 \n", - "\n", - "Epoch number 460\n", - "Cost: 1807.536144719221 e-6\n", - "Cost on validation_set: 1876.1289024422335 e-6\n", - "Patience: 9 / 20\n", - "Last checkpoint at: Epoch 460 \n", - "\n", - "Epoch number 470\n", - "Cost: 1835.432499213967 e-6\n", - "Cost on validation_set: 1907.6892071901711 e-6\n", - "Patience: 9 / 20\n", - "Last checkpoint at: Epoch 460 \n", - "\n", - "Epoch number 480\n", - "Cost: 1824.4561668875258 e-6\n", - "Cost on validation_set: 1897.5317781401236 e-6\n", - "Patience: 13 / 20\n", - "Last checkpoint at: Epoch 460 \n", - "\n", - "\n", - " Early stopping at epoch 489 , difference: 5.850276021763991e-07\n", - "Cost: 0.0018202092466052882\n", - "Cost on valdiation_set: 0.0018940789727927294\n", - "INFO:tensorflow:Restoring parameters from ./rnn_model_lstm_leaky_relu_[100,100,100]c/rnn_basic\n", - "\n", - "\n", - "State of last checkpoint checkpoint at epoch 460 restored\n", - "Performance at last checkpoint is 0.6962442317938289 % better\n", - "\n", - "\n", - "Model saved in at: ./rnn_model_lstm_leaky_relu_[100,100,100]c/rnn_basic\n", - "Model saved at: ./rnn_model_lstm_leaky_relu_[100,100,100]c/rnn_basic\n", - "Remaining data saved as: rnn_model_lstm_leaky_relu_[100,100,100]c.pkl\n" - ] - } - ], + "outputs": [], "source": [ - "rnn.fit(minibatches, epochs = 5000, print_step=10, validation_input = test_input, validation_output= test_target)\n", - "full_save(rnn)" + "#rnn.fit(minibatches, epochs = 5000, print_step=10, validation_input = test_input, validation_output= test_target)\n", + "#full_save(rnn)" ] }, { @@ -1087,18 +999,7 @@ "metadata": { "scrolled": false }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYsAAAEKCAYAAADjDHn2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xt8VPWd//HX58wlCQSQS/ACIlCxilyiDYjgj0qtitt6+VmtF7bq1l+1tRZXf1Wxbm2XrbVuL1Yfdddq6/a3Xau2tbqsYr3grbVWCUptuahAQVNQbpKES5K5fH5/zEkYkkkmQCYTkvfz8ZhHZr5zzpnvGYZ5z/f7Ped7zN0RERHpSFDsCoiISM+nsBARkbwUFiIikpfCQkRE8lJYiIhIXgoLERHJS2EhIiJ5KSxERCQvhYWIiOQVLXYFusqwYcN89OjRxa6GiMgBZcmSJZvdvSLfcr0mLEaPHk11dXWxqyEickAxs3WdWU7dUCIikpfCQkRE8lJYiIhIXr1mzEJEdkskEtTU1NDQ0FDsqkgPUVpaysiRI4nFYvu0vsJCpBeqqalhwIABjB49GjMrdnWkyNydLVu2UFNTw5gxY/ZpG+qGEumFGhoaGDp0qIJCADAzhg4dul8tTYWFSC+loJBs+/t5UFg0bofnboUanaMhItKePh8WH9bVwUv/yht/XFTsqoj0KuXl5cWuQodeeOEF/vCHP+z1etXV1cydOzfvctOnT9+Xau23b3/72wXZbp8PCywCQFMiUeSKiEh36igskslku+tVVVVx11135d3+vgRRV1BYFEgQCQ8IS6eLWxGRPmDdunWccsopTJo0iVNOOYV3330XgF/96ldMmDCByZMnM3PmTACWLVvG1KlTqaysZNKkSbzzzjtttvfggw8yceJEJkyYwI033thSXl5ezs0338zkyZOZNm0aH3zwwR7rrV27lnvuuYc77riDyspKfve733HZZZdx3XXXMWvWLG688UZee+01pk+fznHHHcf06dN56623gEzIfPrTnwbgm9/8Jp///Oc5+eSTGTt27B4h0tyyeuGFFzj55JM577zzOProo5kzZw7uDsDChQs5+uijOemkk5g7d27LdrO19z7813/9V0v5lVdeSSqVYt68eezatYvKykrmzJmzb/9I7ejzh84GkTAv06niVkSkQP75f5axfH1dl25z/GED+caZx+71eldffTWXXHIJl156Kffffz9z587lscceY/78+Tz11FOMGDGCbdu2AXDPPfdwzTXXMGfOHJqamkil9vw/un79em688UaWLFnC4MGDOe2003jsscc455xz2LFjB9OmTePWW2/lhhtu4L777uOf/umfWtYdPXo0X/ziFykvL+erX/0qAD/96U95++23efbZZ4lEItTV1fHSSy8RjUZ59tln+drXvsYjjzzSZp9WrlzJ888/T319PR/96Ef50pe+1OZchjfeeINly5Zx2GGHMWPGDF5++WWqqqq48soreemllxgzZgwXXXRRzvcs1/uwYsUKHn74YV5++WVisRhXXXUVDzzwAN/5znf40Y9+xNKlS/f63yafPt+yiIQtC3eFhUihvfLKK1x88cUAfO5zn+P3v/89ADNmzOCyyy7jvvvuawmFE088kW9/+9vcfvvtrFu3jrKysj22tXjxYk4++WQqKiqIRqPMmTOHl156CYB4PN7yK/1jH/sYa9eu7VT9zj//fCKRTNd0bW0t559/PhMmTODaa69l2bJlOdf51Kc+RUlJCcOGDWP48OFtWjEAU6dOZeTIkQRBQGVlJWvXrmXlypWMHTu25byH9sIi1/uwaNEilixZwpQpU6isrGTRokWsWbOmU/u4r/p8yyISbe6GUlhI77QvLYDu0nw45z333MOrr77KE088QWVlJUuXLuXiiy/mhBNO4IknnuD000/nJz/5CZ/4xCda1m3uysklFou1bDsSiXQ4BpGtf//+Lfe//vWvM2vWLB599FHWrl3LySefnHOdkpKSlvvtvVauZTqqf7Zc74O7c+mll3Lbbbd1ahtdQS2LQGEh0l2mT5/OQw89BMADDzzASSedBMDq1as54YQTmD9/PsOGDeO9995jzZo1jB07lrlz53LWWWfx5ptv7rGtE044gRdffJHNmzeTSqV48MEH+fjHP97pugwYMID6+vp2n6+trWXEiBEA/OxnP9vLPc3v6KOPZs2aNS2tnocffjjncrneh1NOOYVf//rXbNy4EYCtW7eybl1mpvFYLEaiAAfsFDQszGy2mb1lZqvMbF6O579oZn82s6Vm9nszG5/13E3hem+Z2emFqmNzk5N05355iEjn7Ny5k5EjR7bcfvCDH3DXXXfxH//xH0yaNImf//zn3HnnnQBcf/31LQPVM2fOZPLkyTz88MNMmDCByspKVq5cySWXXLLH9g899FBuu+02Zs2axeTJkzn++OM5++yzO12/M888k0cffbRlgLu1G264gZtuuokZM2a0GS/pCmVlZfzbv/0bs2fP5qSTTuLggw9m0KBBbZbL9T6MHz+eb33rW5x22mlMmjSJU089lQ0bNgBwxRVXMGnSpC4f4LbONoX2esNmEeBt4FSgBlgMXOTuy7OWGejudeH9s4Cr3H12GBoPAlOBw4BngaO8g4GFqqoq39eLHyW/MZjqkZcw7Qt37tP6Ij3NihUrOOaYY4pdDclj+/btlJeX4+58+ctfZty4cVx77bUFe71cnwszW+LuVfnWLWTLYiqwyt3XuHsT8BCwR+w3B0WoP9CcXGcDD7l7o7v/FVgVbq8g0gTgOnRWRLrXfffdR2VlJcceeyy1tbVceeWVxa5Suwo5wD0CeC/rcQ1wQuuFzOzLwHVAHGgevRoB/LHVuiMKU01IEYCOhhKRbnbttdcWtCXRlQrZssg1a1WbPi93v9vdPwLcCDQfCN2pdc3sCjOrNrPqTZs27XNF0wSYBrhFRNpVyLCoAQ7PejwSWN/B8g8B5+zNuu5+r7tXuXtVRUXFPlc0pW4oEZEOFTIsFgPjzGyMmcWBC4EF2QuY2bish58Cms/nXwBcaGYlZjYGGAe8VqiKpi3A1A0lItKugo1ZuHvSzK4GngIiwP3uvszM5gPV7r4AuNrMPgkkgA+BS8N1l5nZL4HlQBL4ckdHQu2vNIHOsxAR6UBBz7Nw94XufpS7f8Tdbw3LbgmDAne/xt2PdfdKd5/l7suy1r01XO+j7v5kIeuZ1gC3SJfrrVOUQ2Yiwl/84hddUo8f/vCH7Ny5s0u2VUh9/gxuyIxZmMYsRPoUhcXeUVgQHg2lloVIwfXkKco3bdrEZz7zGaZMmcKUKVN4+eWXAXjxxReprKyksrKS4447jvr6eubNm8fvfvc7KisrueOOO/bY9oYNG5g5cyaVlZVMmDCh5ezwp59+mhNPPJHjjz+e888/n+3bt3PXXXexfv16Zs2axaxZs7rujS6Agp3B3d325wzuv/3zUWwYOImqa3/dxbUSKY49ztR9ch68/+eufYFDJsIZ3+lwkfLycrZv375H2Zlnnsl5553XMkX5ggULeOyxx5g4cSK//e1vW6YoP+igg/jKV77CtGnT9piaO3vm2fXr1zNt2rQ9piifO3cu55xzDmbGggULOPPMM7nhhhsYOHDgHlOUQ+ZaFNlTlF988cVcddVVnHTSSbz77rucfvrprFixgjPPPJN58+YxY8YMtm/fTmlpKb///e/53ve+x+OPP95mv7///e/T0NDAzTffTCqVYufOnTQ2NnLuuefy5JNP0r9/f26//XYaGxu55ZZbGD16NNXV1QwbNmxf/zU6bX/O4O7zs84CuLqhRLrFK6+8wm9+8xsgM0X5DTfcAOyeovyzn/0s5557LpCZmvvWW2+lpqaGc889l3Hjxu2xrewpyoGWKcrPOeecNlOUP/PMM3nr9uyzz7J8ectsRNTV1VFfX8+MGTO47rrrmDNnDueeey4jR47scDtTpkzh85//PIlEgnPOOYfKykpefPFFli9fzowZMwBoamrixBNP7Mxb1mMoLNDRUNLL5WkBFFNPmqI8nU7zyiuvtLluxrx58/jUpz7FwoULmTZtGs8++2yH25k5cyYvvfQSTzzxBJ/73Oe4/vrrGTx4MKeeeioPPvhg3nr0VBqzIDzPArUsRAqtJ09Rftppp/GjH/2o5XHz1eZWr17NxIkTufHGG6mqqmLlypUdTm++bt06hg8fzhe+8AUuv/xyXn/9daZNm8bLL7/MqlWrgMyMvG+//XbOevRUCgsgTUQD3CJd7ECbovyuu+6iurqaSZMmMX78eO655x4gc7RS8+B7WVkZZ5xxBpMmTSIajTJ58uQ2A9wvvPBCy2D4I488wjXXXENFRQU/+9nPuOiii5g0aRLTpk1j5cqVQGZK8TPOOEMD3N1lfwa4V//LcdSXHEzlDb/t4lqJFIemKJdceuoU5QeMFBECtSxERNqlsABcc0OJiHRIYUHzRIIa4Jbepbd0MUvX2N/Pg8ICcA1wSy9TWlrKli1bFBgCZIJiy5YtlJaW7vM2dJ4FmZZFRIfOSi8ycuRIampq2J+LgknvUlpamveEwo4oLGg+gzv/STsiB4pYLMaYMWOKXQ3pRdQNBbjpaCgRkY4oLAiPhlI3lIhIuxQWNLcsFBYiIu1RWNDcslA3lIhIexQWqGUhIpKPwgKNWYiI5KOwQC0LEZF8FBYAFhBozEJEpF0KC8KWhbqhRETaVdCwMLPZZvaWma0ys3k5nr/OzJab2ZtmtsjMjsh6LmVmS8PbgkLWU91QIiIdK9h0H2YWAe4GTgVqgMVmtsDdl2ct9gZQ5e47zexLwL8CF4TP7XL3ykLVL5tbhIi6oURE2lXIlsVUYJW7r3H3JuAhYI9rHrr78+6+M3z4R2DfZ7naD+qGEhHpWCHDYgTwXtbjmrCsPZcDT2Y9LjWzajP7o5mdk2sFM7siXKZ6v2bXDAKFhYhIBwo566zlKMs5ub6Z/T1QBXw8q3iUu683s7HAc2b2Z3dfvcfG3O8F7oXMNbj3vaZqWYiIdKSQLYsa4PCsxyOB9a0XMrNPAjcDZ7l7Y3O5u68P/64BXgCOK1RF3QIiGuAWEWlXIcNiMTDOzMaYWRy4ENjjqCYzOw74MZmg2JhVPtjMSsL7w4AZQPbAeNdSy0JEpEMF64Zy96SZXQ08BUSA+919mZnNB6rdfQHwXaAc+JWZAbzr7mcBxwA/NrM0mUD7TqujqLpWENGV8kREOlDQK+W5+0JgYauyW7Luf7Kd9f4ATCxk3fZ4PbUsREQ6pDO4AbMIUVNYiIi0R2EBEEQA8LROzBMRyUVhARBk3oZUMlnkioiI9EwKC2hpWaTSCgsRkVwUFgCWCYt0Ut1QIiK5KCxALQsRkTwUFtDSskil1LIQEclFYQFY89FQGuAWEclJYQEQZM5NTKUTRa6IiEjPpLCA3edZaIBbRCQnhQW7u6E0wC0ikpvCgsx0HwBpDXCLiOSksIDd3VAKCxGRnBQWgEUyb0Na3VAiIjkpLAAsPBpKLQsRkZwUFkAQaZ51Vi0LEZFcFBZkHQ2lQ2dFRHJSWJB1BrdaFiIiOSks2B0WaV38SEQkJ4UFYOF0H2pZiIjkprAgqxtKR0OJiOSksGD30VA6g1tEJLeChoWZzTazt8xslZnNy/H8dWa23MzeNLNFZnZE1nOXmtk74e3SgtYz7IbSSXkiIrkVLCwsM+HS3cAZwHjgIjMb32qxN4Aqd58E/Br413DdIcA3gBOAqcA3zGxwweoadkORThfqJUREDmiFbFlMBVa5+xp3bwIeAs7OXsDdn3f3neHDPwIjw/unA8+4+1Z3/xB4BphdqIq2dEOpZSEiklMhw2IE8F7W45qwrD2XA0/u47r7pbkbipTCQkQkl2gBt205yjzngmZ/D1QBH9+bdc3sCuAKgFGjRu1bLcme7kMD3CIiuRSyZVEDHJ71eCSwvvVCZvZJ4GbgLHdv3Jt13f1ed69y96qKiop9rujuAW6FhYhILoUMi8XAODMbY2Zx4EJgQfYCZnYc8GMyQbEx66mngNPMbHA4sH1aWFYQzS0LFBYiIjkVrBvK3ZNmdjWZL/kIcL+7LzOz+UC1uy8AvguUA78yM4B33f0sd99qZv9CJnAA5rv71kLVNYjoDG4RkY4UcswCd18ILGxVdkvW/U92sO79wP2Fq91uuycSVMtCRCQXncENRFpaFgoLEZFcFBZkd0MpLEREclFYoAFuEZF8FBZAEKhlISLSEYUFYJHM2+CusBARyUVhAUSap/tQy0JEJCeFBRCJqhtKRKQjCgvAwqOhTGEhIpKTwgKI6KQ8EZEOKSzY3Q2FBrhFRHJSWABBJJa5o5aFiEhOnQoLM/t5Z8oOVM3TfSgsRERy62zL4tjsB+H1tT/W9dUpjiAw0m7qhhIRaUeHYWFmN5lZPTDJzOrCWz2wEfjvbqlhNzAzUgTg6WJXRUSkR+owLNz9NncfAHzX3QeGtwHuPtTdb+qmOnaLNIG6oURE2tHZbqjHzaw/ZK6XbWY/MLMjClivbpdpWSgsRERy6WxY/Duw08wmAzcA64D/LFitikAtCxGR9nU2LJLu7sDZwJ3uficwoHDV6n5pCzC1LEREcursZVXrzewm4HPA/wqPhooVrlrdTwPcIiLt62zL4gKgEfi8u78PjAC+W7BaFUEatSxERNrTqbAIA+IBYJCZfRpocPfeN2ahsBARyamzZ3B/FngNOB/4LPCqmZ1XyIp1tzQBllY3lIhILp3throZmOLul7r7JcBU4Ov5VjKz2Wb2lpmtMrN5OZ6faWavm1mydfiYWcrMloa3BZ2s5z5Ty0JEpH2dHeAO3H1j1uMt5D/7OwLcDZwK1ACLzWyBuy/PWuxd4DLgqzk2scvdKztZv/2WORpKLQsRkVw6Gxa/NbOngAfDxxcAC/OsMxVY5e5rAMzsITKH3raEhbuvDZ8r+rd0mogGuEVE2tFhWJjZkcDB7n69mZ0LnAQY8AqZAe+OjADey3pcA5ywF3UrNbNqIAl8x90f24t195rOsxARaV++lsUPga8BuPtvgN8AmFlV+NyZHaxrOcp8L+o2yt3Xm9lY4Dkz+7O7r97jBcyuAK4AGDVq1F5suq0UUYWFiEg78g1wj3b3N1sXuns1MDrPujXA4VmPRwLrO1sxd18f/l0DvAAcl2OZe929yt2rKioqOrvpnNKmbigRkfbkC4vSDp4ry7PuYmCcmY0xszhwIdCpo5rMbLCZlYT3hwEzyBrrKIS0RQjSyUK+hIjIAStfWCw2sy+0LjSzy4ElHa3o7kngauApYAXwS3dfZmbzzeyscDtTzKyGzPkbPzazZeHqxwDVZvYn4HkyYxaFDQsNcIuItCvfmMU/Ao+a2Rx2h0MVEAf+d76Nu/tCWh015e63ZN1fTKZ7qvV6fwAm5tt+V0pbhEBhISKSU4dh4e4fANPNbBYwISx+wt2fK3jNulnaIgQoLEREcunUeRbu/jyZ7qBeyy1CJN1Y7GqIiPRInZ3uo9dTN5SISPsUFqG0RRUWIiLtUFiEXGEhItIuhUXIAw1wi4i0R2ERSluUiFoWIiI5KSyaqWUhItIuhUXILaKWhYhIOxQWIQ+iRNSyEBHJSWER8iBChKJfg0lEpEdSWDQztSxERNqjsAh5oKOhRETao7BoFkTUshARaYfCoplFNWYhItIOhUWzIErMUuB7c5lwEZG+QWHRLMjM1p5KqStKRKQ1hUWzSASAZLKpyBUREel5FBbNmlsWyUSRKyIi0vMoLEIWhkVSYSEi0obColkYFumEwkJEpDWFRcgiYcsipbAQEWlNYdEsEgMgnUwWuSIiIj1PQcPCzGab2VtmtsrM5uV4fqaZvW5mSTM7r9Vzl5rZO+Ht0kLWE3aPWaTUshARaaNgYWFmEeBu4AxgPHCRmY1vtdi7wGXAL1qtOwT4BnACMBX4hpkNLlRdYXdYpDXALSLSRiFbFlOBVe6+xt2bgIeAs7MXcPe17v4mtJln43TgGXff6u4fAs8AswtY15Yxi1RK3VAiIq0VMixGAO9lPa4Jy7psXTO7wsyqzax606ZN+1xR2B0WalmIiLRVyLCwHGWdnXipU+u6+73uXuXuVRUVFXtVuTYvGGQGuFMa4BYRaaOQYVEDHJ71eCSwvhvW3SdBc8tCA9wiIm0UMiwWA+PMbIyZxYELgQWdXPcp4DQzGxwObJ8WlhXM7jELhYWISGsFCwt3TwJXk/mSXwH80t2Xmdl8MzsLwMymmFkNcD7wYzNbFq67FfgXMoGzGJgflhVMEJ5n4RrgFhFpI1rIjbv7QmBhq7Jbsu4vJtPFlGvd+4H7C1m/bC3dUBqzEBFpQ2dwh4Jo85iFwkJEpDWFRajl0NmUrmchItKawiIUjZUCkE40FrkmIiI9j8IiFC0pAyCd2FXkmoiI9DwKi1As3g9Qy0JEJBeFRShWkumG8kRDkWsiItLzKCxC8eZuqKRaFiIirSksQvGyTFigloWISBsKi1A8HoaFWhYiIm0oLELRWIyERyClsBARaU1hkSVBVC0LEZEcFBZZGi2OqWUhItKGwiJLghiBwkJEpA2FRZaExTDNDSUi0obCIkvC4gRphYWISGsKiyxJixNRN5SISBsKiyxJixNJKyxERFpTWGRJBnEC1zW4RURaU1hkSQVxohqzEBFpQ2GRJR3EibrCQkSkNYVFllSkhJhaFiIibSgssqSDODE0ZiEi0lpBw8LMZpvZW2a2yszm5Xi+xMweDp9/1cxGh+WjzWyXmS0Nb/cUsp7N0tFSSlxHQ4mItBYt1IbNLALcDZwK1ACLzWyBuy/PWuxy4EN3P9LMLgRuBy4In1vt7pWFql8u6dIhDPR60skkQbRgb42IyAGnkC2LqcAqd1/j7k3AQ8DZrZY5G/h/4f1fA6eYmRWwTh2y8uFEzKnftrFYVRAR6ZEKGRYjgPeyHteEZTmXcfckUAsMDZ8bY2ZvmNmLZva/CljPFtGBFQDUbt7QHS8nInLAKGRfS64WgndymQ3AKHffYmYfAx4zs2PdvW6Plc2uAK4AGDVq1H5XuHTQIQDs2KqwEBHJVsiWRQ1weNbjkcD69pYxsygwCNjq7o3uvgXA3ZcAq4GjWr+Au9/r7lXuXlVRUbHfFe4/JBMWDds+2O9tiYj0JoUMi8XAODMbY2Zx4EJgQatlFgCXhvfPA55zdzezinCAHDMbC4wD1hSwrgAMHHoYAIk6hYWISLaCdUO5e9LMrgaeAiLA/e6+zMzmA9XuvgD4KfBzM1sFbCUTKAAzgflmlgRSwBfdfWuh6trsoKHDSbmR3r6p0C8lInJAKejxoe6+EFjYquyWrPsNwPk51nsEeKSQdcslGo2y2QYR2bW5u19aRKRH0xncrdQFg4g1bCl2NUREehSFRSs7ooMpayp4j5eIyAFFYdFKQ8kQypMfFrsaIiI9isKilWTpUAZ5bbGrISLSoygsWvF+FZSzi6ZdO4pdFRGRHkNh0UowIHNy37bNfytyTUREeg6FRSslQzInnX+4YW1R6yEi0pMoLFoZPCIzq8iO91cVuSYiIj2HwqKVQ0aNI+1GYnPBZxcRETlgKCxaKS3rxwc2lGjtumJXRUSkx1BY5LAlPoKBO9YWuxoiIj2GwiKH2qGVjEmsomGHzrcQEQGFRU79PnoyUUuzpvrpYldFRKRHUFjkMK7qVLb4IOIvfx9PJYpdHRGRolNY5FBePoC/TLyRI5tWsOzH/4Anm4pdJRGRolJYtGPmZ67i6YrLmLDxf1j9vU/wwbKXil0lEZGiUVi0w8w49aofsuiYbzFs1xoO/tWZrPjBp3n/z8+De7GrJyLSrRQWHTAzTrngKzRc/SbPHPJ/OLT2dQ555BzW3jaFFY/fSWpXXbGrKCLSLcx7ya/kqqoqr66uLuhrfLBlC28+fg+j//oQ43iXHZSy+pC/49BPfJGKo04o6GuLiBSCmS1x96q8yyks9l4imaL65adJvHo/U3a8QJk18df4UewcfwGHTz2bgYeN65Z6iIjsL4VFN6nZsIGVT93HmLW/5CO8B8D6yGG8P/QEIodOZPDYj3HoUccTKxvY7XUTEclHYdHNEskUK5a9wealTzLwby9yVOMyBtpOANJu7LRSwFhfMobtAz6C9xsGkTgWjUO0hCBWykFjjqO0fDDxsnJKyvoTBBFKBw7FIrGi7ZeI9G49IizMbDZwJxABfuLu32n1fAnwn8DHgC3ABe6+NnzuJuByIAXMdfenOnqtYodFa02JFO+ufYvN7yyhaf2bBA21kGpkaN1yDkmuZwA7iVo673YyQVNGI3HcjPrgIHYEAwjM8UgJHimBaCnJIE4QK4VoKRbLhE80XkYQLyOIlRKJl2LRUixWShDb/TcSPp+9fDReSjReSiQSwcy64d0SkWLpbFhEC1iBCHA3cCpQAyw2swXuvjxrscuBD939SDO7ELgduMDMxgMXAscChwHPmtlR7p4qVH27WjwW4chx4zly3Pg2z6XTTlMqzc5EgkRTI8mmRrbXbmHT6tdJN+0i3bgTT+zE0ynYsZmgqZ4g1Yilm+jX8AFxkriDNdUTS28hmm4ibk3EPUEJTZSQoNT2/8zzJo/QRJxGYjQRo8nipIiQtgAnCP9GWv66BeEtAln33QKwCB5kyrEIBBHcIgQ46UgJWEAQBCSjZVg6RUntGhqGTcRwYlvfpmnwOCyIQLIRSgfhu7Zi5RWkmxoIPIWVDiIoKcOiJSS3vgslAzAMTzYSKR+aeb1UAk8lAYgNGJY5mi0Swxu2Q9lBBP0Ooum91wksYOC46TTUbSKIlZJ0I2JO1CAo6UeyYQeRWAkAO977CyXDxzBg+BhSac/sRyTGzq3vQTpN47YNxPofxKDDxrH1r29y8DHTSCUSbH7nNYaMqaR+cw0Hj53Mzu3b2PiX5zls8mnsqN3IoOFHYAYfrF5KxZiJbN/6PuWDh1O36W8MHnEkqaYG0u7Ub67hiGOnE41GWb30JYaPPobaTes5ZOwEtn/4AbUbaxg+ejx1W95n2GGj2VG3jVSykZ21Wzj0yEp21W9h26b1jDiykl3bt7J1wzoOPXIy27e+z0EHj2brhtWUDhhCQ/1WBgw9jLpNNQw+ZDR1m//l0tIbAAAKOUlEQVTGoIOPILGrnm0b32PY4UeTTuxiR91Wyoccyo4PP2DAsJFYEJBq2E5TYwORWAnxfgNINmQuWRwt7b/nB849vKV333A8ncLTadLpFOm0E+s3ADyNRUvyf4jdIfsHT7IJovH9/r/RFxWsZWFmJwLfdPfTw8c3Abj7bVnLPBUu84qZRYH3gQpgXvay2cu193o9rWVRDOm005hMsyuRYkdDgp0NO2nYtZNEwy4STbsg0UA60QjJBjzZAImGzJdvKvybbCRINWCpRizZSJAKb+lGglQTkVQjeBLzNKRTmKcy/2k9hWX9xdMYzWXpzJc5aQJPE4TlAZkbQKk3YjjglNKUCRCMEst8sTd5hLhlfickPELMUiQ96FTLTAon+98g+34qDNdGYqSI0I+GlvIGKyHuCQwnaVEipDCcwJ3AOvddlHYjjZGwGFGSpIiQJErz2gZESRLxFDFLsZNSAAJSlJKglvJwOQ9vQHi/uSQdnlUQIZX1bBB+SttvbXv4SR7ADnZRRpIIEVIEpEkRwfCWbTtGnAQOJImStChkfuLgYf06ep1s7/f7KBOv/22n3r/Wit6yAEZAOOKbUQO0Pr60ZRl3T5pZLTA0LP9jq3VHFK6qvUMQGGXxCGXxCEP6x4H+edfpadJpJ5l2YgZb62qJl5bRLx7nw/o6Ssr6EQ0ibPpwM4MGD2XTti306z8At4DtdVtp2rWTpoYdDDnkCBKNu0gmEpT2K2f7tk14OkUkGieIxkglm9ixbTP9Bg4hmWigtN8gEju20bRjK1Y6kGSiiYbaLZQNPphU0y4CnGT4/zaxfSsl5UNINjWAGRaJ0bjtfcAJggA8jacSmRZYMkGkpJxUQy14mqB0EKkdmzCLYP2GkKrbQBAtIZ1owKIxPFaO139AUDoATyfwVIpIST9SO2uxIEI62YBF4pBOgkWIlA0gUlJO4/srAcPipXiiEYv3wxvqsNJBmRZeYz3E+0GiASs7KPM4lYBUE8T6YQbeUA/x/plf8431UDYYmrbjsXIssQMicUjsgHg5NNZBfADWWIvH+kMQhYZaPFaGBVGsqR6P9cca64A0HisP/3FTWGoXBDGcANKJlhZmpsXZ+maZr87weQvCL8jtH4BFId0EQTzzAyaV2KMBkSKKRWI0eUBJKtOScQsy44RN28PWhmWSpfl0s7Cs+UcQOG7RTHnY4jF2/0CxrC/szCKOe+ZrPB0pwZK7Ml/8QRQnwNJJdodSZgseKQEyP76CdPa0QrsjrLVcIZIaNCrv/639VciwyBW/rfeyvWU6sy5mdgVwBcCoUYV/s6TwgsCIh18KQwYPbikffNBBLfcrhh+c+VtxcEtZedlhrba0e/mhQ4bkeCUd3iyyNwp5BncNcHjW45HA+vaWCbuhBgFbO7ku7n6vu1e5e1VFRUUXVl1ERLIVMiwWA+PMbIyZxckMWC9otcwC4NLw/nnAc54ZRFkAXGhmJWY2hszPwNcKWFcREelAwbqhwjGIq4GnyBw6e7+7LzOz+UC1uy8Afgr83MxWkWlRXBiuu8zMfgksB5LAlw+kI6FERHobnZQnItKHdfZoKM06KyIieSksREQkL4WFiIjkpbAQEZG8es0At5ltAtbtxyaGAZu7qDoHEu1339JX9xv67r7n2+8j3D3viWq9Jiz2l5lVd+aIgN5G+9239NX9hr6771213+qGEhGRvBQWIiKSl8Jit3uLXYEi0X73LX11v6Hv7nuX7LfGLEREJC+1LEREJK8+HxZmNtvM3jKzVWY2r9j16Wpmdr+ZbTSzv2SVDTGzZ8zsnfDv4LDczOyu8L1408yOL17N952ZHW5mz5vZCjNbZmbXhOW9er8BzKzUzF4zsz+F+/7PYfkYM3s13PeHw5mgCWd2fjjc91fNbHQx67+/zCxiZm+Y2ePh416/32a21sz+bGZLzaw6LOvyz3qfDous64SfAYwHLgqv/92b/AyY3apsHrDI3ccBi8LHkHkfxoW3K4B/76Y6drUk8H/d/RhgGvDl8N+1t+83QCPwCXefDFQCs81sGpnr298R7vuHwOXh8pcDH7r7kcAd4XIHsmuAFVmP+8p+z3L3yqxDZLv+s958KcC+eANOBJ7KenwTcFOx61WA/RwN/CXr8VvAoeH9Q4G3wvs/Bi7KtdyBfAP+Gzi1D+53P+B1Mpcz3gxEw/KWzz2ZSwicGN6PhstZseu+j/s7Mvxi/ATwOJkrbvaF/V4LDGtV1uWf9T7dsiD3dcL7wrW+D3b3DQDh3+Fhea97P8LuheOAV+kj+x12xSwFNgLPAKuBbe6eDBfJ3r+WfQ+frwWGdm+Nu8wPgRug5ULZQ+kb++3A02a2JLzUNBTgs17Ia3AfCDp1re8+pFe9H2ZWDjwC/KO715nl2r3MojnKDtj99syFwirN7CDgUeCYXIuFf3vFvpvZp4GN7r7EzE5uLs6xaK/a79AMd19vZsOBZ8xsZQfL7vN+9/WWRaeu9d0LfWBmhwKEfzeG5b3m/TCzGJmgeMDdfxMW9/r9zubu24AXyIzbHBRe5x723L+WfQ+fH0TmqpUHmhnAWWa2FniITFfUD+n9+427rw//biTz42AqBfis9/Ww6Mx1wnuj7GufX0qmT7+5/JLwiIlpQG1zU/ZAYpkmxE+BFe7+g6ynevV+A5hZRdiiwMzKgE+SGfB9nsx17qHtvje/J+cBz3nYmX0gcfeb3H2ku48m8//4OXefQy/fbzPrb2YDmu8DpwF/oRCf9WIPzhT7Bvwd8DaZft2bi12fAuzfg8AGIEHmV8XlZPpmFwHvhH+HhMsamaPDVgN/BqqKXf993OeTyDSt3wSWhre/6+37He7LJOCNcN//AtwSlo8FXgNWAb8CSsLy0vDxqvD5scXehy54D04GHu8L+x3u35/C27Lm77BCfNZ1BreIiOTV17uhRESkExQWIiKSl8JCRETyUliIiEheCgsREclLYSGyF8wsFc7u2XzrspmKzWy0Zc0OLNKT9PXpPkT21i53ryx2JUS6m1oWIl0gvKbA7eG1JF4zsyPD8iPMbFF47YBFZjYqLD/YzB4NrzvxJzObHm4qYmb3hdeieDo8C1uk6BQWInunrFU31AVZz9W5+1TgR2TmJSK8/5/uPgl4ALgrLL8LeNEz1504nszZt5C5zsDd7n4ssA34TIH3R6RTdAa3yF4ws+3uXp6jfC2Ziw6tCScxfN/dh5rZZjLXC0iE5RvcfZiZbQJGuntj1jZGA8945oI1mNmNQMzdv1X4PRPpmFoWIl3H27nf3jK5NGbdT6FxRekhFBYiXeeCrL+vhPf/QGYWVIA5wO/D+4uAL0HLxYoGdlclRfaFfrWI7J2y8Cp0zX7r7s2Hz5aY2atkfoRdFJbNBe43s+uBTcA/hOXXAPea2eVkWhBfIjM7sEiPpDELkS4QjllUufvmYtdFpBDUDSUiInmpZSEiInmpZSEiInkpLEREJC+FhYiI5KWwEBGRvBQWIiKSl8JCRETy+v+6HLRK//x40QAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "#Plot the loss\n", "def plot_loss_list(loss_list = rnn.loss_list, loss_validation = rnn.loss_validation):\n", @@ -1109,7 +1010,7 @@ " plt.ylabel(\"Cost\")\n", " plt.show()\n", "\n", - "plot_loss_list(rnn.loss_list)" + "#plot_loss_list(rnn.loss_list)" ] }, { @@ -1123,11 +1024,21 @@ "cell_type": "code", "execution_count": 22, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Restoring parameters from ./rnn_model_lstm_leaky_relu_[150,150,150]c/rnn_basic\n", + "Model succesfully loaded\n", + "Minibatches (=training data) and test_input and test_target in data loaded\n" + ] + } + ], "source": [ "folder = get_rnn_folder(ncells = ncells, cell_type = cell_type, activation = activation)\n", - "#rnn, data = full_load(folder)\n", - "#minibatches, test_input, test_target = data" + "rnn, data = full_load(folder)\n", + "minibatches, test_input, test_target = data" ] }, { @@ -1189,15 +1100,15 @@ "name": "stdout", "output_type": "stream", "text": [ - "[[ 0.22876946 -1.05757492 0.05743555]\n", - " [ 0.4227805 -1.49878024 -0.45964226]\n", - " [ 1.57940168 -0.0964699 -0.29063407]\n", - " [ 8.23204061 -7.21131909 0.2346999 ]\n", - " [ 0.63359657 -1.23937142 0.23865187]\n", - " [ 3.5459507 -2.39105333 0.56137755]\n", - " [ 0.0832766 -0.32882895 0.49288811]\n", - " [ 0. 0. 0. ]]\n", - "Loss on test set: 0.0018604315\n" + "[[-3.18470338e-01 -1.90207179e-01 -1.40898075e-03]\n", + " [-1.59599343e+00 1.97988971e-01 -1.11545922e+00]\n", + " [-7.32249507e-02 4.83462188e-01 6.15174259e-01]\n", + " [ 3.13725194e+00 -6.94178227e-01 -3.67514878e+00]\n", + " [ 5.84199409e-01 4.98911750e-01 -4.66140907e-02]\n", + " [ 1.32929954e+00 5.03890275e-01 -7.67828468e-02]\n", + " [ 4.41919712e-01 -4.32863707e-01 6.22717514e-02]\n", + " [ 0.00000000e+00 0.00000000e+00 0.00000000e+00]]\n", + "Loss on test set: 0.0017876212\n" ] } ], @@ -1219,11 +1130,261 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "metadata": { "scrolled": true }, "outputs": [], + "source": [ + "tset_matched = pd.read_pickle('matched_and_unmatched_8hittracks.pkl')\n", + "#test = pd.read_pickle('matched_and_unmatched_8hittracks2.pkl')\n", + "#tset_matched\n", + "#test" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "tset_matched = np.array(tset_matched)\n", + "tset_matched = tset_matched.astype('float32')\n", + "truth = tset_matched[:,-1]\n", + "tset_matched = scaler(reshapor(tset_matched[:,:-1]), scalerfunc = func, scalor= scalor)\n", + "#print(reshapor_inv(tset_matched).shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "tset_matched = reshapor_inv(tset_matched)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "#print(tset_matched.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "def tracks_to_particle(tset_matched, truth):\n", + " start = 0\n", + " start_points = [0]\n", + " converse = False\n", + " \n", + " if len(tset_matched.shape) == 3:\n", + " tset_matched = reshapor_inv(tset_matched)\n", + " converse = True\n", + " \n", + " for track in range(tset.shape[0]-1):\n", + " \n", + " for coord in range(12):\n", + " \n", + " if tset_matched[track, coord] != tset_matched[track+1, coord]:\n", + " start = track + 1\n", + " \n", + " if start != start_points[-1]:\n", + " start_points.append(start)\n", + "\n", + " num_part = len(start_points)\n", + "\n", + " particle_tracks = []\n", + " track_truth = []\n", + "\n", + " if converse:\n", + " tset_matched = reshapor(tset_matched)\n", + " \n", + " for particle in range(num_part-1):\n", + " particle_tracks.append(reshapor(tset_matched[start_points[particle]:start_points[particle+1]]))\n", + " track_truth.append(truth[start_points[particle]:start_points[particle+1]])\n", + " \n", + " \n", + " return particle_tracks, track_truth" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "particle_tracks, track_truth = tracks_to_particle(tset_matched= tset_matched, truth= truth)" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "#print(particle_tracks[11])\n", + "#print(track_truth[11])" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "#particle_tracks[1][1]" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [], + "source": [ + "#num_particles = len(particle_tracks)\n", + "#num_tracks = len(particle_tracks[1])" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "def find_best_tracks(particle_tracks):\n", + "\n", + " generated_truth_list = []\n", + " loss_list = []\n", + " num_particles = len(particle_tracks)\n", + " \n", + " for particle in range(num_particles):\n", + " \n", + " num_tracks = len(particle_tracks[particle])\n", + " min_loss = 10\n", + " part_loss_list = np.zeros((num_tracks))\n", + " truth = np.zeros((num_tracks))\n", + " \n", + " for track in range(num_tracks):\n", + " inputt = np.zeros((1,7,3))\n", + " inputt[0,:,:] = particle_tracks[particle][track][:-1,:]\n", + " \n", + " true_pred = np.zeros((1,7,3))\n", + " true_pred[0,:,:] = particle_tracks[particle][track][1:,:]\n", + " loss = rnn.sess.run(rnn.cost, feed_dict={rnn.X:inputt, rnn.Y:true_pred})\n", + " if loss < min_loss:\n", + " min_loss = loss\n", + " part_loss_list[track] += loss\n", + " \n", + " #print(min_loss)\n", + " minIndex = np.where(part_loss_list == min_loss)[0]\n", + " truth[minIndex] += 1\n", + " generated_truth_list.append(truth)\n", + " loss_list.append(part_loss_list)\n", + " #print(minIndex)\n", + " \n", + " return generated_truth_list, loss_list" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [], + "source": [ + "#generated_truth, loss_list = find_best_tracks(particle_tracks=particle_tracks)\n", + "#print(generated_truth[1])" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "def check_accuracy(generated_truth, track_truth= track_truth):\n", + " \n", + " num_particles = len(track_truth)\n", + " correct_list = []\n", + "\n", + " for particle in range(num_particles):\n", + " correct = True\n", + " num_tracks = len(particle_tracks[particle])\n", + " \n", + " for track in range(num_tracks):\n", + " if track_truth[particle][track] != generated_truth[particle][track]:\n", + " correct = False\n", + " \n", + " if correct:\n", + " correct_list.append(particle)\n", + " \n", + " accuracy = len(correct_list)/num_particles\n", + " \n", + " print(\"The right track was chosen:\", accuracy*100, \"% of the time\")\n", + " print(len(correct_list), \"particles correctly assigned to their path\")\n", + "\n", + " return correct_list\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [], + "source": [ + "#correct_list = check_accuracy(generated_truth)\n", + "generated_truth = pkl.load( open(\"generated_truth_\" + folder[2:-10] +\".pkl\" , \"rb\" ) )" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "4425\n" + ] + } + ], + "source": [ + "#Count tracks that have no 8track path\n", + "\n", + "num_particles = len(track_truth)\n", + "\n", + "counter = 0\n", + "\n", + "for particle in range(num_particles):\n", + " \n", + " if generated_truth[particle].all() == 0:\n", + " counter +=1\n", + " \n", + "print(counter)" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "#pkl.dump( generated_truth, open(\"generated_truth_\" + folder[2:-10] +\".pkl\" , \"wb\" ) )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [] }, { @@ -1235,6 +1396,752 @@ }, { "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "def compare_Truth_to_Gen_truth(track_truth, generated_truth, loss_list):\n", + " \n", + "\n", + " for particle in range(15, 30):\n", + " print()\n", + " print(\"Particle: \", particle)\n", + " \n", + " num_tracks = len(particle_tracks[particle])\n", + " \n", + " for track in range(num_tracks):\n", + " print(\"Truth: \", track_truth[particle][track])\n", + " print(\"Gen_truth: \", generated_truth[particle][track])\n", + " print(\"Loss: \", loss_list[particle][track])\n" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[-0.71172575 0.69559685 0.27218047]\n", + " [-0.64037859 0.73127269 0.26876878]\n", + " [-0.43078365 0.90224016 0.24776119]\n", + " [-0.38153891 0.92340426 0.25317404]]\n", + "0.0\n" + ] + } + ], + "source": [ + "particle_start_array = np.zeros((num_particles,4,3))\n", + "\n", + "def create_track_exist_truth(particle_start_array, track_truth):\n", + " \n", + "\n", + " for particle in range(num_particles):\n", + " particle_start_array[particle,:,:] += particle_tracks[particle][0][:4,:]\n", + "\n", + " print(particle_start_array[11,:,:])\n", + "\n", + " track_exist_truth = np.zeros((num_particles))\n", + "\n", + " for particle in range(num_particles):\n", + " correct = False\n", + " num_tracks = len(track_truth[particle])\n", + " \n", + " for track in range(num_tracks):\n", + " if track_truth[particle][track] == 1:\n", + " correct = True\n", + " \n", + " if correct:\n", + " track_exist_truth[particle] += 1\n", + " \n", + " print(track_exist_truth[11])\n", + " \n", + " return track_exist_truth\n", + "\n", + "track_exist_truth = create_track_exist_truth(particle_start_array=particle_start_array, track_truth=track_truth)" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [], + "source": [ + "#Input: a = 3d array, b = 1d array\n", + "\n", + "def unison_shuffled_copies2(a, b):\n", + " assert a.shape[0] == b.shape[0]\n", + " p = np.random.permutation(a.shape[0])\n", + " return a[p,:,:], b[p]\n", + "\n", + "def create_random_sets2(particle_start_array= particle_start_array, track_exist_truth= track_exist_truth, train_to_total_ratio= 0.9):\n", + " #shuffle the dataset\n", + " num_examples = particle_start_array.shape[0]\n", + " particle_start_array, track_exist_truth = unison_shuffled_copies2(particle_start_array, track_exist_truth)\n", + " \n", + " #evaluate siye of training and test set and initialize them\n", + " train_set_size = np.int(num_examples*train_to_total_ratio)\n", + " test_set_size = num_examples - train_set_size\n", + " \n", + " train_part_start = np.zeros((train_set_size, particle_start_array.shape[1], particle_start_array.shape[2]))\n", + " train_track_e_tr = np.zeros((train_set_size))\n", + " test_part_start = np.zeros((test_set_size, particle_start_array.shape[1], particle_start_array.shape[2]))\n", + " test_track_e_tr = np.zeros((test_set_size))\n", + " \n", + "\n", + " #fill train and test sets\n", + " for i in range(num_examples):\n", + " if train_set_size > i:\n", + " train_part_start[i,:,:] += particle_start_array[i,:,:]\n", + " train_track_e_tr[i] += track_exist_truth[i]\n", + " else:\n", + " test_part_start[i - train_set_size,:,:] += particle_start_array[i,:,:]\n", + " test_track_e_tr[i - train_set_size] += track_exist_truth[i]\n", + " \n", + " return train_part_start, train_track_e_tr, test_part_start, test_track_e_tr" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[ 0.16609355 0.91964991 -0.27518795]\n", + " [ 0.31077846 0.97898671 -0.26426422]\n", + " [ 0.75010406 0.65960674 -0.20298509]\n", + " [ 0.83411023 0.55392795 -0.18595964]] 1.0\n" + ] + } + ], + "source": [ + "X_train, Y_train, X_test, Y_test = create_random_sets2()\n", + "print(X_test[1], Y_test[1])" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "_________________________________________________________________\n", + "Layer (type) Output Shape Param # \n", + "=================================================================\n", + "lstm_1 (LSTM) (None, 4, 10) 560 \n", + "_________________________________________________________________\n", + "batch_normalization_1 (Batch (None, 4, 10) 40 \n", + "_________________________________________________________________\n", + "lstm_2 (LSTM) (None, 10) 840 \n", + "_________________________________________________________________\n", + "batch_normalization_2 (Batch (None, 10) 40 \n", + "_________________________________________________________________\n", + "dense_1 (Dense) (None, 100) 1100 \n", + "_________________________________________________________________\n", + "dense_2 (Dense) (None, 1) 101 \n", + "=================================================================\n", + "Total params: 2,681\n", + "Trainable params: 2,641\n", + "Non-trainable params: 40\n", + "_________________________________________________________________\n", + "None\n", + "Train on 15057 samples, validate on 1673 samples\n", + "Epoch 1/500\n", + "15057/15057 [==============================] - 13s 844us/step - loss: 0.5526 - acc: 0.7584 - val_loss: 0.5406 - val_acc: 0.7639\n", + "Epoch 2/500\n", + "15057/15057 [==============================] - 9s 619us/step - loss: 0.5449 - acc: 0.7638 - val_loss: 0.5408 - val_acc: 0.7639\n", + "Epoch 3/500\n", + "15057/15057 [==============================] - 9s 620us/step - loss: 0.5437 - acc: 0.7638 - val_loss: 0.5416 - val_acc: 0.7639\n", + "Epoch 4/500\n", + "15057/15057 [==============================] - 9s 582us/step - loss: 0.5435 - acc: 0.7638 - val_loss: 0.5403 - val_acc: 0.7639\n", + "Epoch 5/500\n", + "15057/15057 [==============================] - 10s 632us/step - loss: 0.5429 - acc: 0.7638 - val_loss: 0.5419 - val_acc: 0.7639\n", + "Epoch 6/500\n", + "15057/15057 [==============================] - 9s 608us/step - loss: 0.5425 - acc: 0.7638 - val_loss: 0.5392 - val_acc: 0.7639\n", + "Epoch 7/500\n", + "15057/15057 [==============================] - 9s 612us/step - loss: 0.5424 - acc: 0.7638 - val_loss: 0.5405 - val_acc: 0.7639\n", + "Epoch 8/500\n", + "15057/15057 [==============================] - 9s 615us/step - loss: 0.5416 - acc: 0.7638 - val_loss: 0.5409 - val_acc: 0.7639\n", + "Epoch 9/500\n", + "15057/15057 [==============================] - 9s 616us/step - loss: 0.5420 - acc: 0.7638 - val_loss: 0.5391 - val_acc: 0.7639\n", + "Epoch 10/500\n", + "15057/15057 [==============================] - 9s 596us/step - loss: 0.5414 - acc: 0.7638 - val_loss: 0.5413 - val_acc: 0.7639\n", + "Epoch 11/500\n", + "15057/15057 [==============================] - 10s 648us/step - loss: 0.5411 - acc: 0.7638 - val_loss: 0.5396 - val_acc: 0.7639\n", + "Epoch 12/500\n", + "15057/15057 [==============================] - 9s 607us/step - loss: 0.5413 - acc: 0.7638 - val_loss: 0.5407 - val_acc: 0.7639\n", + "Epoch 13/500\n", + "15057/15057 [==============================] - 9s 609us/step - loss: 0.5409 - acc: 0.7638 - val_loss: 0.5390 - val_acc: 0.7639\n", + "Epoch 14/500\n", + "15057/15057 [==============================] - 9s 617us/step - loss: 0.5405 - acc: 0.7638 - val_loss: 0.5402 - val_acc: 0.7639\n", + "Epoch 15/500\n", + "15057/15057 [==============================] - 10s 634us/step - loss: 0.5407 - acc: 0.7638 - val_loss: 0.5388 - val_acc: 0.7639\n", + "Epoch 16/500\n", + "15057/15057 [==============================] - 10s 635us/step - loss: 0.5401 - acc: 0.7638 - val_loss: 0.5377 - val_acc: 0.7639\n", + "Epoch 17/500\n", + "15057/15057 [==============================] - 9s 579us/step - loss: 0.5399 - acc: 0.7638 - val_loss: 0.5372 - val_acc: 0.7639\n", + "Epoch 18/500\n", + "15057/15057 [==============================] - 9s 601us/step - loss: 0.5402 - acc: 0.7638 - val_loss: 0.5386 - val_acc: 0.7639\n", + "Epoch 19/500\n", + "15057/15057 [==============================] - 10s 639us/step - loss: 0.5399 - acc: 0.7638 - val_loss: 0.5376 - val_acc: 0.7639\n", + "Epoch 20/500\n", + "15057/15057 [==============================] - 9s 604us/step - loss: 0.5398 - acc: 0.7638 - val_loss: 0.5376 - val_acc: 0.7639\n", + "Epoch 21/500\n", + "15057/15057 [==============================] - 9s 604us/step - loss: 0.5402 - acc: 0.7638 - val_loss: 0.5386 - val_acc: 0.7639\n", + "Epoch 22/500\n", + "15057/15057 [==============================] - 9s 628us/step - loss: 0.5396 - acc: 0.7638 - val_loss: 0.5410 - val_acc: 0.7639\n", + "Epoch 23/500\n", + "15057/15057 [==============================] - 9s 624us/step - loss: 0.5396 - acc: 0.7638 - val_loss: 0.5392 - val_acc: 0.7639\n", + "Epoch 24/500\n", + "15057/15057 [==============================] - 9s 625us/step - loss: 0.5395 - acc: 0.7638 - val_loss: 0.5382 - val_acc: 0.7639\n", + "Epoch 25/500\n", + "15057/15057 [==============================] - 9s 629us/step - loss: 0.5395 - acc: 0.7638 - val_loss: 0.5379 - val_acc: 0.7639\n", + "Epoch 26/500\n", + "15057/15057 [==============================] - 9s 628us/step - loss: 0.5391 - acc: 0.7638 - val_loss: 0.5369 - val_acc: 0.7639\n", + "Epoch 27/500\n", + "15057/15057 [==============================] - 9s 606us/step - loss: 0.5388 - acc: 0.7638 - val_loss: 0.5394 - val_acc: 0.7639\n", + "Epoch 28/500\n", + "15057/15057 [==============================] - 9s 600us/step - loss: 0.5389 - acc: 0.7638 - val_loss: 0.5406 - val_acc: 0.7639\n", + "Epoch 29/500\n", + "15057/15057 [==============================] - 9s 587us/step - loss: 0.5385 - acc: 0.7638 - val_loss: 0.5390 - val_acc: 0.7639\n", + "Epoch 30/500\n", + "15057/15057 [==============================] - 9s 627us/step - loss: 0.5389 - acc: 0.7638 - val_loss: 0.5394 - val_acc: 0.7639\n", + "Epoch 31/500\n", + "15057/15057 [==============================] - 9s 610us/step - loss: 0.5384 - acc: 0.7638 - val_loss: 0.5405 - val_acc: 0.7639\n", + "Epoch 32/500\n", + "15057/15057 [==============================] - 9s 625us/step - loss: 0.5385 - acc: 0.7638 - val_loss: 0.5384 - val_acc: 0.7639\n", + "Epoch 33/500\n", + "15057/15057 [==============================] - 9s 577us/step - loss: 0.5377 - acc: 0.7638 - val_loss: 0.5397 - val_acc: 0.7639\n", + "Epoch 34/500\n", + "15057/15057 [==============================] - 9s 618us/step - loss: 0.5382 - acc: 0.7638 - val_loss: 0.5413 - val_acc: 0.7639\n", + "Epoch 35/500\n", + "15057/15057 [==============================] - 9s 614us/step - loss: 0.5378 - acc: 0.7638 - val_loss: 0.5390 - val_acc: 0.7639\n", + "Epoch 36/500\n", + "15057/15057 [==============================] - 9s 610us/step - loss: 0.5377 - acc: 0.7638 - val_loss: 0.5380 - val_acc: 0.7639\n", + "Epoch 37/500\n", + "15057/15057 [==============================] - 10s 640us/step - loss: 0.5375 - acc: 0.7638 - val_loss: 0.5408 - val_acc: 0.7639\n", + "Epoch 38/500\n", + "15057/15057 [==============================] - 9s 588us/step - loss: 0.5376 - acc: 0.7638 - val_loss: 0.5401 - val_acc: 0.7639\n", + "Epoch 39/500\n", + "15057/15057 [==============================] - 10s 637us/step - loss: 0.5363 - acc: 0.7638 - val_loss: 0.5446 - val_acc: 0.7639\n", + "Epoch 40/500\n", + "15057/15057 [==============================] - 9s 618us/step - loss: 0.5367 - acc: 0.7638 - val_loss: 0.5400 - val_acc: 0.7639\n", + "Epoch 41/500\n", + "15057/15057 [==============================] - 11s 753us/step - loss: 0.5367 - acc: 0.7638 - val_loss: 0.5392 - val_acc: 0.7639\n", + "Epoch 42/500\n", + "15057/15057 [==============================] - 12s 783us/step - loss: 0.5356 - acc: 0.7638 - val_loss: 0.5452 - val_acc: 0.7639\n", + "Epoch 43/500\n", + "15057/15057 [==============================] - 11s 753us/step - loss: 0.5356 - acc: 0.7638 - val_loss: 0.5418 - val_acc: 0.7639\n", + "Epoch 44/500\n", + "15057/15057 [==============================] - 12s 767us/step - loss: 0.5353 - acc: 0.7638 - val_loss: 0.5392 - val_acc: 0.7639\n", + "Epoch 45/500\n", + "15057/15057 [==============================] - 11s 763us/step - loss: 0.5354 - acc: 0.7638 - val_loss: 0.5397 - val_acc: 0.7639\n", + "Epoch 46/500\n", + "15057/15057 [==============================] - 11s 741us/step - loss: 0.5346 - acc: 0.7638 - val_loss: 0.5435 - val_acc: 0.7639\n", + "Epoch 47/500\n", + "15057/15057 [==============================] - 11s 759us/step - loss: 0.5344 - acc: 0.7638 - val_loss: 0.5407 - val_acc: 0.7639\n", + "Epoch 48/500\n", + "15057/15057 [==============================] - 12s 773us/step - loss: 0.5343 - acc: 0.7637 - val_loss: 0.5413 - val_acc: 0.7639\n", + "Epoch 49/500\n", + "15057/15057 [==============================] - 11s 754us/step - loss: 0.5336 - acc: 0.7638 - val_loss: 0.5419 - val_acc: 0.7639\n", + "Epoch 50/500\n", + "15057/15057 [==============================] - 12s 782us/step - loss: 0.5337 - acc: 0.7638 - val_loss: 0.5349 - val_acc: 0.7639\n", + "Epoch 51/500\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "15057/15057 [==============================] - 12s 770us/step - loss: 0.5338 - acc: 0.7638 - val_loss: 0.5374 - val_acc: 0.7639\n", + "Epoch 52/500\n", + "15057/15057 [==============================] - 12s 773us/step - loss: 0.5330 - acc: 0.7638 - val_loss: 0.5371 - val_acc: 0.7639\n", + "Epoch 53/500\n", + "15057/15057 [==============================] - 12s 775us/step - loss: 0.5321 - acc: 0.7638 - val_loss: 0.5370 - val_acc: 0.7639\n", + "Epoch 54/500\n", + "15057/15057 [==============================] - 12s 765us/step - loss: 0.5321 - acc: 0.7638 - val_loss: 0.5296 - val_acc: 0.7639\n", + "Epoch 55/500\n", + "15057/15057 [==============================] - 11s 745us/step - loss: 0.5307 - acc: 0.7638 - val_loss: 0.5358 - val_acc: 0.7639\n", + "Epoch 56/500\n", + "15057/15057 [==============================] - 11s 698us/step - loss: 0.5296 - acc: 0.7638 - val_loss: 0.5308 - val_acc: 0.7639\n", + "Epoch 57/500\n", + "15057/15057 [==============================] - 12s 791us/step - loss: 0.5296 - acc: 0.7638 - val_loss: 0.5303 - val_acc: 0.7639\n", + "Epoch 58/500\n", + "15057/15057 [==============================] - 11s 745us/step - loss: 0.5281 - acc: 0.7638 - val_loss: 0.5307 - val_acc: 0.7639\n", + "Epoch 59/500\n", + "15057/15057 [==============================] - 11s 754us/step - loss: 0.5271 - acc: 0.7638 - val_loss: 0.5373 - val_acc: 0.7639\n", + "Epoch 60/500\n", + "15057/15057 [==============================] - 11s 754us/step - loss: 0.5269 - acc: 0.7638 - val_loss: 0.5294 - val_acc: 0.7639\n", + "Epoch 61/500\n", + "15057/15057 [==============================] - 11s 759us/step - loss: 0.5278 - acc: 0.7638 - val_loss: 0.5314 - val_acc: 0.7639\n", + "Epoch 62/500\n", + "15057/15057 [==============================] - 11s 731us/step - loss: 0.5255 - acc: 0.7638 - val_loss: 0.5341 - val_acc: 0.7639\n", + "Epoch 63/500\n", + "15057/15057 [==============================] - 11s 711us/step - loss: 0.5253 - acc: 0.7638 - val_loss: 0.5335 - val_acc: 0.7639\n", + "Epoch 64/500\n", + "15057/15057 [==============================] - 11s 757us/step - loss: 0.5256 - acc: 0.7638 - val_loss: 0.5340 - val_acc: 0.7639\n", + "Epoch 65/500\n", + "15057/15057 [==============================] - 11s 715us/step - loss: 0.5249 - acc: 0.7638 - val_loss: 0.5279 - val_acc: 0.7639\n", + "Epoch 66/500\n", + "15057/15057 [==============================] - 10s 667us/step - loss: 0.5244 - acc: 0.7638 - val_loss: 0.5304 - val_acc: 0.7639\n", + "Epoch 67/500\n", + "15057/15057 [==============================] - 11s 699us/step - loss: 0.5242 - acc: 0.7638 - val_loss: 0.5316 - val_acc: 0.7639\n", + "Epoch 68/500\n", + "15057/15057 [==============================] - 11s 749us/step - loss: 0.5224 - acc: 0.7638 - val_loss: 0.5336 - val_acc: 0.7639\n", + "Epoch 69/500\n", + "15057/15057 [==============================] - 11s 729us/step - loss: 0.5241 - acc: 0.7638 - val_loss: 0.5362 - val_acc: 0.7639\n", + "Epoch 70/500\n", + "15057/15057 [==============================] - 11s 733us/step - loss: 0.5220 - acc: 0.7638 - val_loss: 0.5322 - val_acc: 0.7639\n", + "Epoch 71/500\n", + "15057/15057 [==============================] - 11s 761us/step - loss: 0.5217 - acc: 0.7638 - val_loss: 0.5278 - val_acc: 0.7639\n", + "Epoch 72/500\n", + "15057/15057 [==============================] - 11s 725us/step - loss: 0.5227 - acc: 0.7638 - val_loss: 0.5290 - val_acc: 0.7639\n", + "Epoch 73/500\n", + "15057/15057 [==============================] - 11s 750us/step - loss: 0.5232 - acc: 0.7638 - val_loss: 0.5322 - val_acc: 0.7639\n", + "Epoch 74/500\n", + "15057/15057 [==============================] - 11s 727us/step - loss: 0.5223 - acc: 0.7638 - val_loss: 0.5343 - val_acc: 0.7639\n", + "Epoch 75/500\n", + "15057/15057 [==============================] - 11s 723us/step - loss: 0.5210 - acc: 0.7638 - val_loss: 0.5329 - val_acc: 0.7639\n", + "Epoch 76/500\n", + "15057/15057 [==============================] - 11s 744us/step - loss: 0.5209 - acc: 0.7638 - val_loss: 0.5327 - val_acc: 0.7639\n", + "Epoch 77/500\n", + "15057/15057 [==============================] - 11s 715us/step - loss: 0.5215 - acc: 0.7638 - val_loss: 0.5358 - val_acc: 0.7639\n", + "Epoch 78/500\n", + "15057/15057 [==============================] - 10s 690us/step - loss: 0.5214 - acc: 0.7638 - val_loss: 0.5292 - val_acc: 0.7639\n", + "Epoch 79/500\n", + "15057/15057 [==============================] - 10s 690us/step - loss: 0.5197 - acc: 0.7638 - val_loss: 0.5287 - val_acc: 0.7639\n", + "Epoch 80/500\n", + "15057/15057 [==============================] - 11s 740us/step - loss: 0.5208 - acc: 0.7638 - val_loss: 0.5315 - val_acc: 0.7639\n", + "Epoch 81/500\n", + "15057/15057 [==============================] - 11s 720us/step - loss: 0.5204 - acc: 0.7638 - val_loss: 0.5313 - val_acc: 0.7639\n", + "Epoch 82/500\n", + "15057/15057 [==============================] - 11s 706us/step - loss: 0.5192 - acc: 0.7638 - val_loss: 0.5277 - val_acc: 0.7639\n", + "Epoch 83/500\n", + "15057/15057 [==============================] - 10s 678us/step - loss: 0.5189 - acc: 0.7638 - val_loss: 0.5321 - val_acc: 0.7639\n", + "Epoch 84/500\n", + "15057/15057 [==============================] - 10s 681us/step - loss: 0.5191 - acc: 0.7638 - val_loss: 0.5258 - val_acc: 0.7639\n", + "Epoch 85/500\n", + "15057/15057 [==============================] - 10s 684us/step - loss: 0.5202 - acc: 0.7638 - val_loss: 0.5232 - val_acc: 0.7639\n", + "Epoch 86/500\n", + "15057/15057 [==============================] - 11s 716us/step - loss: 0.5195 - acc: 0.7638 - val_loss: 0.5231 - val_acc: 0.7639\n", + "Epoch 87/500\n", + "15057/15057 [==============================] - 11s 731us/step - loss: 0.5194 - acc: 0.7638 - val_loss: 0.5297 - val_acc: 0.7639\n", + "Epoch 88/500\n", + "15057/15057 [==============================] - 11s 742us/step - loss: 0.5188 - acc: 0.7638 - val_loss: 0.5254 - val_acc: 0.7639\n", + "Epoch 89/500\n", + "15057/15057 [==============================] - 11s 720us/step - loss: 0.5197 - acc: 0.7638 - val_loss: 0.5249 - val_acc: 0.7639\n", + "Epoch 90/500\n", + "15057/15057 [==============================] - 12s 771us/step - loss: 0.5173 - acc: 0.7638 - val_loss: 0.5258 - val_acc: 0.7639\n", + "Epoch 91/500\n", + "15057/15057 [==============================] - 11s 742us/step - loss: 0.5178 - acc: 0.7637 - val_loss: 0.5333 - val_acc: 0.7639\n", + "Epoch 92/500\n", + "15057/15057 [==============================] - 11s 722us/step - loss: 0.5191 - acc: 0.7637 - val_loss: 0.5324 - val_acc: 0.7639\n", + "Epoch 93/500\n", + "15057/15057 [==============================] - 11s 756us/step - loss: 0.5173 - acc: 0.7638 - val_loss: 0.5207 - val_acc: 0.7639\n", + "Epoch 94/500\n", + "15057/15057 [==============================] - 11s 744us/step - loss: 0.5188 - acc: 0.7637 - val_loss: 0.5307 - val_acc: 0.7639\n", + "Epoch 95/500\n", + "15057/15057 [==============================] - 12s 775us/step - loss: 0.5174 - acc: 0.7640 - val_loss: 0.5255 - val_acc: 0.7639\n", + "Epoch 96/500\n", + "15057/15057 [==============================] - 11s 759us/step - loss: 0.5171 - acc: 0.7638 - val_loss: 0.5247 - val_acc: 0.7639\n", + "Epoch 97/500\n", + "15057/15057 [==============================] - 12s 788us/step - loss: 0.5163 - acc: 0.7636 - val_loss: 0.5239 - val_acc: 0.7639\n", + "Epoch 98/500\n", + "15057/15057 [==============================] - 11s 740us/step - loss: 0.5179 - acc: 0.7638 - val_loss: 0.5213 - val_acc: 0.7639\n", + "Epoch 99/500\n", + "15057/15057 [==============================] - 11s 751us/step - loss: 0.5164 - acc: 0.7639 - val_loss: 0.5270 - val_acc: 0.7633\n", + "Epoch 100/500\n", + "15057/15057 [==============================] - 11s 748us/step - loss: 0.5174 - acc: 0.7639 - val_loss: 0.5255 - val_acc: 0.7639\n", + "Epoch 101/500\n", + "15057/15057 [==============================] - 11s 717us/step - loss: 0.5176 - acc: 0.7639 - val_loss: 0.5296 - val_acc: 0.7639\n", + "Epoch 102/500\n", + "15057/15057 [==============================] - 11s 709us/step - loss: 0.5167 - acc: 0.7638 - val_loss: 0.5216 - val_acc: 0.7639\n", + "Epoch 103/500\n", + "15057/15057 [==============================] - 10s 654us/step - loss: 0.5167 - acc: 0.7638 - val_loss: 0.5215 - val_acc: 0.7639\n", + "Epoch 104/500\n", + "15057/15057 [==============================] - 10s 652us/step - loss: 0.5158 - acc: 0.7638 - val_loss: 0.5316 - val_acc: 0.7639\n", + "Epoch 105/500\n", + "15057/15057 [==============================] - 10s 682us/step - loss: 0.5173 - acc: 0.7638 - val_loss: 0.5247 - val_acc: 0.7639\n", + "Epoch 106/500\n", + "15057/15057 [==============================] - 11s 717us/step - loss: 0.5162 - acc: 0.7640 - val_loss: 0.5270 - val_acc: 0.7639\n", + "Epoch 107/500\n", + "15057/15057 [==============================] - 10s 680us/step - loss: 0.5181 - acc: 0.7638 - val_loss: 0.5245 - val_acc: 0.7639\n", + "Epoch 108/500\n", + "15057/15057 [==============================] - 10s 672us/step - loss: 0.5165 - acc: 0.7638 - val_loss: 0.5243 - val_acc: 0.7639\n", + "Epoch 109/500\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "15057/15057 [==============================] - 10s 693us/step - loss: 0.5165 - acc: 0.7638 - val_loss: 0.5233 - val_acc: 0.7639\n", + "Epoch 110/500\n", + "15057/15057 [==============================] - 11s 730us/step - loss: 0.5146 - acc: 0.7639 - val_loss: 0.5233 - val_acc: 0.7639\n", + "Epoch 111/500\n", + "15057/15057 [==============================] - 11s 745us/step - loss: 0.5175 - acc: 0.7638 - val_loss: 0.5218 - val_acc: 0.7639\n", + "Epoch 112/500\n", + "15057/15057 [==============================] - 11s 742us/step - loss: 0.5159 - acc: 0.7640 - val_loss: 0.5273 - val_acc: 0.7639\n", + "Epoch 113/500\n", + "15057/15057 [==============================] - 11s 729us/step - loss: 0.5159 - acc: 0.7635 - val_loss: 0.5263 - val_acc: 0.7639\n", + "Epoch 114/500\n", + "15057/15057 [==============================] - 11s 718us/step - loss: 0.5151 - acc: 0.7638 - val_loss: 0.5267 - val_acc: 0.7639\n", + "Epoch 115/500\n", + "15057/15057 [==============================] - 10s 643us/step - loss: 0.5147 - acc: 0.7637 - val_loss: 0.5234 - val_acc: 0.7639\n", + "Epoch 116/500\n", + "15057/15057 [==============================] - 10s 658us/step - loss: 0.5153 - acc: 0.7636 - val_loss: 0.5237 - val_acc: 0.7639\n", + "Epoch 117/500\n", + "15057/15057 [==============================] - 10s 642us/step - loss: 0.5152 - acc: 0.7639 - val_loss: 0.5268 - val_acc: 0.7639\n", + "Epoch 118/500\n", + "15057/15057 [==============================] - 10s 688us/step - loss: 0.5162 - acc: 0.7638 - val_loss: 0.5269 - val_acc: 0.7639\n", + "Epoch 119/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5155 - acc: 0.7640 - val_loss: 0.5214 - val_acc: 0.7639\n", + "Epoch 120/500\n", + "15057/15057 [==============================] - 17s 1ms/step - loss: 0.5150 - acc: 0.7640 - val_loss: 0.5276 - val_acc: 0.7639\n", + "Epoch 121/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5145 - acc: 0.7639 - val_loss: 0.5233 - val_acc: 0.7633\n", + "Epoch 122/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5155 - acc: 0.7637 - val_loss: 0.5242 - val_acc: 0.7639\n", + "Epoch 123/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5150 - acc: 0.7636 - val_loss: 0.5187 - val_acc: 0.7639\n", + "Epoch 124/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5154 - acc: 0.7638 - val_loss: 0.5243 - val_acc: 0.7639\n", + "Epoch 125/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5142 - acc: 0.7639 - val_loss: 0.5222 - val_acc: 0.7633\n", + "Epoch 126/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5150 - acc: 0.7638 - val_loss: 0.5274 - val_acc: 0.7633\n", + "Epoch 127/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5147 - acc: 0.7640 - val_loss: 0.5239 - val_acc: 0.7633\n", + "Epoch 128/500\n", + "15057/15057 [==============================] - 15s 1ms/step - loss: 0.5130 - acc: 0.7640 - val_loss: 0.5318 - val_acc: 0.7639\n", + "Epoch 129/500\n", + "15057/15057 [==============================] - 15s 1ms/step - loss: 0.5137 - acc: 0.7640 - val_loss: 0.5235 - val_acc: 0.7639\n", + "Epoch 130/500\n", + "15057/15057 [==============================] - 15s 1ms/step - loss: 0.5146 - acc: 0.7637 - val_loss: 0.5338 - val_acc: 0.7639\n", + "Epoch 131/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5136 - acc: 0.7637 - val_loss: 0.5272 - val_acc: 0.7633\n", + "Epoch 132/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5150 - acc: 0.7639 - val_loss: 0.5219 - val_acc: 0.7639\n", + "Epoch 133/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5138 - acc: 0.7638 - val_loss: 0.5301 - val_acc: 0.7639\n", + "Epoch 134/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5150 - acc: 0.7638 - val_loss: 0.5260 - val_acc: 0.7639\n", + "Epoch 135/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5139 - acc: 0.7637 - val_loss: 0.5257 - val_acc: 0.7639\n", + "Epoch 136/500\n", + "15057/15057 [==============================] - 15s 1ms/step - loss: 0.5139 - acc: 0.7638 - val_loss: 0.5249 - val_acc: 0.7633\n", + "Epoch 137/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5135 - acc: 0.7640 - val_loss: 0.5251 - val_acc: 0.7645\n", + "Epoch 138/500\n", + "15057/15057 [==============================] - 14s 914us/step - loss: 0.5134 - acc: 0.7638 - val_loss: 0.5211 - val_acc: 0.7639\n", + "Epoch 139/500\n", + "15057/15057 [==============================] - 15s 980us/step - loss: 0.5136 - acc: 0.7636 - val_loss: 0.5215 - val_acc: 0.7639\n", + "Epoch 140/500\n", + "15057/15057 [==============================] - 15s 975us/step - loss: 0.5139 - acc: 0.7632 - val_loss: 0.5236 - val_acc: 0.7639\n", + "Epoch 141/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5138 - acc: 0.7634 - val_loss: 0.5243 - val_acc: 0.7633\n", + "Epoch 142/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5120 - acc: 0.7641 - val_loss: 0.5228 - val_acc: 0.7639\n", + "Epoch 143/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5132 - acc: 0.7640 - val_loss: 0.5271 - val_acc: 0.7639\n", + "Epoch 144/500\n", + "15057/15057 [==============================] - 15s 983us/step - loss: 0.5130 - acc: 0.7642 - val_loss: 0.5274 - val_acc: 0.7639\n", + "Epoch 145/500\n", + "15057/15057 [==============================] - 15s 1ms/step - loss: 0.5127 - acc: 0.7637 - val_loss: 0.5299 - val_acc: 0.7639\n", + "Epoch 146/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5137 - acc: 0.7640 - val_loss: 0.5331 - val_acc: 0.7639\n", + "Epoch 147/500\n", + "15057/15057 [==============================] - 15s 1ms/step - loss: 0.5130 - acc: 0.7643 - val_loss: 0.5258 - val_acc: 0.7615\n", + "Epoch 148/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5130 - acc: 0.7636 - val_loss: 0.5232 - val_acc: 0.7639\n", + "Epoch 149/500\n", + "15057/15057 [==============================] - 15s 1ms/step - loss: 0.5112 - acc: 0.7639 - val_loss: 0.5216 - val_acc: 0.7639\n", + "Epoch 150/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5131 - acc: 0.7639 - val_loss: 0.5212 - val_acc: 0.7645\n", + "Epoch 151/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5127 - acc: 0.7636 - val_loss: 0.5280 - val_acc: 0.7627\n", + "Epoch 152/500\n", + "15057/15057 [==============================] - 16s 1ms/step - loss: 0.5116 - acc: 0.7640 - val_loss: 0.5227 - val_acc: 0.7639\n", + "Epoch 153/500\n", + "15057/15057 [==============================] - 13s 869us/step - loss: 0.5118 - acc: 0.7634 - val_loss: 0.5300 - val_acc: 0.7633\n", + "Accuracy: 76.39%\n" + ] + } + ], + "source": [ + "# truncate and pad input sequences\n", + "max_review_length = 4\n", + "filepath = \"./keras_model_classifier_LSTM_40_LSTM_40.h5\"\n", + "\n", + "callbacks = [\n", + " EarlyStopping(monitor='val_loss', patience=30, min_delta=0),\n", + " ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True),\n", + " History()\n", + "]\n", + "\n", + "#\n", + "\n", + "# create the model\n", + "model = Sequential()\n", + "#model.add(Dense(12, input_shape=(4,3)))\n", + "model.add(LSTM(10, return_sequences=True, input_shape=(4,3), activation = 'relu'))\n", + "model.add(BatchNormalization())\n", + "model.add(LSTM(10, return_sequences=False, activation = 'relu')) \n", + "model.add(BatchNormalization())\n", + "#model.add(LSTM(40, return_sequences=True, activation = 'relu')) \n", + "#model.add(Dropout(0.5))\n", + "#model.add(LSTM(4, activation = 'relu')) \n", + "#model.add(BatchNormalization())\n", + "model.add(Dense(100, activation='relu'))\n", + "model.add(Dense(1, activation='sigmoid'))\n", + "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n", + "print(model.summary())\n", + "model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=500, batch_size=50, callbacks= callbacks)\n", + "model = load_model(filepath)\n", + "# Final evaluation of the model\n", + "scores = model.evaluate(X_test, Y_test, verbose=0)\n", + "print(\"Accuracy: %.2f%%\" % (scores[1]*100))" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [], + "source": [ + "def keras_track_classifier(X_train, Y_train, X_test, Y_test, kncells, kcelltype, activation= 'tanh', input_shape=(4,3)\n", + " ,dropout_rate = 0.5, epochs= 500, batch_size = 50):\n", + " \n", + " \n", + " filepath = \"keras_classifier_\" + str(kncells) + str(kcelltype)\n", + " \n", + " callbacks = [\n", + " EarlyStopping(monitor='val_loss', patience=30, min_delta=0),\n", + " ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True),\n", + " History()\n", + " ]\n", + " \n", + " model = Sequential()\n", + " \n", + " if activation != 'relu' and activation != 'tanh':\n", + " raise ValueError(\"Uknown activation function: {}\".format(activation))\n", + " \n", + " for layer in range(len(kncells)):\n", + " cells = kncells[layer]\n", + " \n", + " return_seq = False\n", + " \n", + " if layer < len(kncells):\n", + " return_seq = True\n", + " \n", + " \n", + " if layer == 0:\n", + " if kcelltype[layer] == \"LSTM\":\n", + " model.add(LSTM(kncells[layer], return_sequences=return_seq, input_shape=input_shape,\n", + " activation = activation))\n", + " elif kcelltype[layer] == \"GRU\":\n", + " model.add(GRU(kncells[layer], return_sequences=return_seq, input_shape=input_shape,\n", + " activation = activation))\n", + " else:\n", + " raise ValueError(\"Uknown celltype: {}\".format(kcelltype[layer]))\n", + " else:\n", + " if kcelltype[layer] == \"LSTM\":\n", + " model.add(LSTM(kncells[layer], return_sequences=return_seq,\n", + " activation = activation))\n", + " elif kcelltype[layer] == \"GRU\":\n", + " model.add(GRU(kncells[layer], return_sequences=return_seq,\n", + " activation = activation))\n", + " else:\n", + " raise ValueError(\"Uknown celltype: {}\".format(kcelltype[layer]))\n", + " \n", + " if dropout_rate != 0:\n", + " model.add(Dropout(dropout_rate))\n", + " \n", + " model.add(Dense(1, activation='sigmoid'))\n", + " \n", + " model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n", + " \n", + " print(model.summary())\n", + " \n", + " model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, callbacks= callbacks)\n", + " \n", + " model = load_model(filepath)\n", + " \n", + " scores = model.evaluate(X_test, Y_test, verbose=0)\n", + " \n", + " print(\"Accuracy: %.2f%%\" % (scores[1]*100))\n", + " \n", + " return model" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "_________________________________________________________________\n", + "Layer (type) Output Shape Param # \n", + "=================================================================\n", + "lstm_3 (LSTM) (None, 4, 100) 41600 \n", + "_________________________________________________________________\n", + "dropout_1 (Dropout) (None, 4, 100) 0 \n", + "_________________________________________________________________\n", + "gru_1 (GRU) (None, 4, 50) 22650 \n", + "_________________________________________________________________\n", + "dropout_2 (Dropout) (None, 4, 50) 0 \n", + "_________________________________________________________________\n", + "dense_3 (Dense) (None, 4, 1) 51 \n", + "=================================================================\n", + "Total params: 64,301\n", + "Trainable params: 64,301\n", + "Non-trainable params: 0\n", + "_________________________________________________________________\n", + "None\n" + ] + }, + { + "ename": "ValueError", + "evalue": "Error when checking target: expected dense_3 to have 3 dimensions, but got array with shape (15057, 1)", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m model = keras_track_classifier(X_train= X_train, Y_train= Y_train, X_test= X_test, Y_test= Y_test,\n\u001b[1;32m----> 5\u001b[1;33m kncells= kncells, kcelltype= kcelltype)\n\u001b[0m", + "\u001b[1;32m\u001b[0m in \u001b[0;36mkeras_track_classifier\u001b[1;34m(X_train, Y_train, X_test, Y_test, kncells, kcelltype, activation, input_shape, dropout_rate, epochs, batch_size)\u001b[0m\n\u001b[0;32m 53\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msummary\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 54\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 55\u001b[1;33m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mY_train\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mepochs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[1;33m=\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 56\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 57\u001b[0m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mload_model\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mc:\\users\\sa_li\\anaconda3\\envs\\rnn-tf-ker\\lib\\site-packages\\keras\\models.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[0;32m 961\u001b[0m \u001b[0minitial_epoch\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 962\u001b[0m \u001b[0msteps_per_epoch\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0msteps_per_epoch\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 963\u001b[1;33m validation_steps=validation_steps)\n\u001b[0m\u001b[0;32m 964\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 965\u001b[0m def evaluate(self, x=None, y=None,\n", + "\u001b[1;32mc:\\users\\sa_li\\anaconda3\\envs\\rnn-tf-ker\\lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[0;32m 1628\u001b[0m \u001b[0msample_weight\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0msample_weight\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1629\u001b[0m \u001b[0mclass_weight\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mclass_weight\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1630\u001b[1;33m batch_size=batch_size)\n\u001b[0m\u001b[0;32m 1631\u001b[0m \u001b[1;31m# Prepare validation data.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1632\u001b[0m \u001b[0mdo_validation\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mFalse\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mc:\\users\\sa_li\\anaconda3\\envs\\rnn-tf-ker\\lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36m_standardize_user_data\u001b[1;34m(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)\u001b[0m\n\u001b[0;32m 1478\u001b[0m \u001b[0moutput_shapes\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1479\u001b[0m \u001b[0mcheck_batch_axis\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1480\u001b[1;33m exception_prefix='target')\n\u001b[0m\u001b[0;32m 1481\u001b[0m sample_weights = _standardize_sample_weights(sample_weight,\n\u001b[0;32m 1482\u001b[0m self._feed_output_names)\n", + "\u001b[1;32mc:\\users\\sa_li\\anaconda3\\envs\\rnn-tf-ker\\lib\\site-packages\\keras\\engine\\training.py\u001b[0m in \u001b[0;36m_standardize_input_data\u001b[1;34m(data, names, shapes, check_batch_axis, exception_prefix)\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[1;34m': expected '\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mnames\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m+\u001b[0m \u001b[1;34m' to have '\u001b[0m \u001b[1;33m+\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m+\u001b[0m \u001b[1;34m' dimensions, but got array '\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 113\u001b[1;33m 'with shape ' + str(data_shape))\n\u001b[0m\u001b[0;32m 114\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mcheck_batch_axis\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 115\u001b[0m \u001b[0mdata_shape\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdata_shape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mValueError\u001b[0m: Error when checking target: expected dense_3 to have 3 dimensions, but got array with shape (15057, 1)" + ] + } + ], + "source": [ + "kncells = [100, 50]\n", + "kcelltype = ['LSTM', 'GRU']\n", + "\n", + "model = keras_track_classifier(X_train= X_train, Y_train= Y_train, X_test= X_test, Y_test= Y_test,\n", + " kncells= kncells, kcelltype= kcelltype)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pkl.dump( scalor, open(\"scalor.pkl\" , \"wb\" ) )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "filepath = \"./keras_model_classifier_LSTM_40_LSTM_40.h5\"\n", + "\n", + "model.save(filepath)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = load_model(filepath)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "scores = model.evaluate(X_test, Y_test, verbose=0)\n", + "print(\"Accuracy: %.2f%%\" % (scores[1]*100))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "X_train = reshapor_inv(X_train)\n", + "\n", + "X_test = reshapor_inv(X_test)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# fit model no training data\n", + "model = XGBClassifier(max_depth=5, n_estimators=1000, learning_rate=0.05).fit(X_train, Y_train, verbose = 0)\n", + "\n", + "predictions = model.predict(X_test)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "c = 0\n", + "\n", + "for prediction in predictions:\n", + " if prediction == 1:\n", + " c += 1\n", + "\n", + "print(c)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "predictions = [round(value) for value in predictions]\n", + "\n", + "# evaluate predictions\n", + "accuracy = accuracy_score(Y_test, predictions)\n", + "print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [],