diff --git a/1_to_1_multi_compact.ipynb b/1_to_1_multi_compact.ipynb deleted file mode 100644 index 151a3d6..0000000 --- a/1_to_1_multi_compact.ipynb +++ /dev/null @@ -1,145 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\users\\sa_li\\anaconda3\\envs\\rnn-tf-ker\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", - " from ._conv import register_converters as _register_converters\n" - ] - } - ], - "source": [ - "#Here i do all the preprocessing of my data and define my functions and the RNNPlacePrediction class\n", - "\n", - "exec(open(\"requiremements.py\").read())" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "timesteps = 7\n", - "future_steps = 1\n", - "ninputs = 3\n", - "num_output = 3\n", - "\n", - "#ncells as int or list of int\n", - "ncells = [50, 40, 30, 20, 10]\n", - "\n", - "cell_type = \"lstm\"\n", - "activation = \"leaky_relu\"" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "tf.reset_default_graph()\n", - "rnn = RNNPlacePrediction(time_steps=timesteps, future_steps=future_steps, ninputs=ninputs, \n", - " ncells=ncells, num_output=num_output, cell_type=cell_type, activation=activation)\n", - "rnn.set_cost_and_functions()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch number 0\n", - "Cost: 373770.60410824226 e-6\n", - "Patience: 0 / 200\n", - "Last checkpoint at: Epoch 0 \n", - "\n", - "\n", - "\n", - "Model saved in at: ./rnn_model_lstm_leaky_relu_[50,40,30,20,10]c/rnn_basic\n", - "Model saved at: ./rnn_model_lstm_leaky_relu_[50,40,30,20,10]c/rnn_basic\n", - "Remaining data saved as: rnn_model_lstm_leaky_relu_[50,40,30,20,10]c.pkl\n" - ] - } - ], - "source": [ - "rnn.fit(minibatches, epochs = 5, print_step=5)\n", - "full_save(rnn)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "#plot_loss_list(loss_list = rnn.loss_list)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "#folder = get_rnn_folder(ncells = ncells, cell_type = \"lstm\", activation = \"leaky_relu\")\n", - "#rnn, data = full_load(folder)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[[ 0.24988278 -16.92240567 -11.00905584]\n", - " [ -0.95585176 -19.82122722 -9.62447234]\n", - " [ 2.90237107 -13.03493918 -11.99622082]\n", - " [ -2.20826846 4.92884641 12.53874474]\n", - " [-20.9477203 13.2462497 -1.09616262]\n", - " [-31.69245226 0.34849761 4.28013375]\n", - " [ 0.24281463 5.55824599 3.57549133]]\n", - "Loss on test set: 0.17207867\n" - ] - } - ], - "source": [ - "test_pred, test_loss = rnn_test(rnn = rnn)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Requiremements.ipynb b/Requiremements.ipynb deleted file mode 100644 index 9b3135e..0000000 --- a/Requiremements.ipynb +++ /dev/null @@ -1,730 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "c:\\users\\sa_li\\anaconda3\\envs\\rnn-tf-ker\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", - " from ._conv import register_converters as _register_converters\n" - ] - } - ], - "source": [ - "import pandas as pd\n", - "import numpy as np\n", - "import matplotlib as mpl\n", - "import random\n", - "import math\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "import tensorflow as tf\n", - "from tensorflow.python.framework import ops\n", - "from sklearn import preprocessing\n", - "import pickle as pkl\n", - "from pathlib import Path" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "### Reshape original array into the shape (particlenumber, timesteps, input = coordinates)###\n", - "\n", - "def reshapor(arr_orig):\n", - " timesteps = int(arr_orig.shape[1]/3)\n", - " number_examples = int(arr_orig.shape[0])\n", - " arr = np.zeros((number_examples, timesteps, 3))\n", - " \n", - " for i in range(number_examples):\n", - " for t in range(timesteps):\n", - " arr[i,t,0:3] = arr_orig[i,3*t:3*t+3]\n", - " \n", - " return arr\n", - "\n", - "def reshapor_inv(array_shaped):\n", - " timesteps = int(array_shaped.shape[1])\n", - " num_examples = int(array_shaped.shape[0])\n", - " arr = np.zeros((num_examples, timesteps*3))\n", - " \n", - " for i in range(num_examples):\n", - " for t in range(timesteps):\n", - " arr[i,3*t:3*t+3] = array_shaped[i,t,:]\n", - " \n", - " return arr" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "### create the training set and the test set###\n", - "\n", - "def create_random_sets(dataset, train_to_total_ratio):\n", - " #shuffle the dataset\n", - " num_examples = dataset.shape[0]\n", - " p = np.random.permutation(num_examples)\n", - " dataset = dataset[p,:]\n", - " \n", - " #evaluate siye of training and test set and initialize them\n", - " train_set_size = np.int(num_examples*train_to_total_ratio)\n", - " test_set_size = num_examples - train_set_size\n", - " \n", - " train_set = np.zeros((train_set_size, dataset.shape[1]))\n", - " test_set = np.zeros((test_set_size, dataset.shape[1]))\n", - " \n", - "\n", - " #fill train and test sets\n", - " for i in range(num_examples):\n", - " if train_set_size > i:\n", - " train_set[i,:] += dataset[i,:]\n", - " else:\n", - " test_set[i - train_set_size,:] += dataset[i,:]\n", - " \n", - " return train_set, test_set" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "testset = pd.read_pickle('matched_8hittracks.pkl')\n", - "tset = np.array(testset)\n", - "tset = tset.astype('float32')\n", - "train_set, test_set = create_random_sets(tset, 0.99)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "#Normalize the data advanced version with scikit learn\n", - "\n", - "#set the transormation based on training set\n", - "def set_min_max_scaler(arr, feature_range= (-1,1)):\n", - " min_max_scalor = preprocessing.MinMaxScaler(feature_range=feature_range)\n", - " if len(arr.shape) == 3:\n", - " arr = reshapor(min_max_scalor.fit_transform(reshapor_inv(arr))) \n", - " else:\n", - " arr = min_max_scalor.fit_transform(arr)\n", - " return min_max_scalor\n", - "\n", - "min_max_scalor = set_min_max_scaler(train_set)\n", - "\n", - "\n", - "#transform data\n", - "def min_max_scaler(arr, min_max_scalor= min_max_scalor):\n", - " \n", - " if len(arr.shape) == 3:\n", - " if arr.shape[1] == 8:\n", - " arr = reshapor(min_max_scalor.transform(reshapor_inv(arr)))\n", - " else: \n", - " arr_ = np.zeros((arr.shape[0],24))\n", - " arr = reshapor_inv(arr)\n", - " arr_[:,:arr.shape[1]] += arr\n", - " arr = min_max_scalor.transform(arr_)[:,:arr.shape[1]]\n", - " arr = reshapor(arr)\n", - " \n", - " else:\n", - " if arr.shape[1] == 24:\n", - " arr = min_max_scalor.transform(arr)\n", - " else:\n", - " arr_ = np.zeros((arr.shape[0],24))\n", - " arr_[:,:arr.shape[1]] += arr\n", - " arr = min_max_scalor.transform(arr_)[:,:arr.shape[1]]\n", - " \n", - " return arr\n", - " \n", - "#inverse transformation\n", - "def min_max_scaler_inv(arr, min_max_scalor= min_max_scalor):\n", - " \n", - " if len(arr.shape) == 3:\n", - " if arr.shape[1] == 8:\n", - " arr = reshapor(min_max_scalor.inverse_transform(reshapor_inv(arr)))\n", - " else: \n", - " arr_ = np.zeros((arr.shape[0],24))\n", - " arr = reshapor_inv(arr)\n", - " arr_[:,:arr.shape[1]] += arr\n", - " arr = min_max_scalor.inverse_transform(arr_)[:,:arr.shape[1]]\n", - " arr = reshapor(arr)\n", - " \n", - " else:\n", - " if arr.shape[1] == 24:\n", - " arr = min_max_scalor.inverse_transform(arr)\n", - " else:\n", - " arr_ = np.zeros((arr.shape[0],24))\n", - " arr_[:,:arr.shape[1]] += arr\n", - " arr = min_max_scalor.nverse_transform(arr_)[:,:arr.shape[1]]\n", - " \n", - " return arr" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "#Normalize the data advanced version with scikit learn - Standard scaler\n", - "\n", - "#set the transormation based on training set\n", - "def set_std_scaler(arr):\n", - " std_scalor = preprocessing.StandardScaler()\n", - " if len(arr.shape) == 3:\n", - " arr = reshapor(std_scalor.fit(reshapor_inv(arr))) \n", - " else:\n", - " arr = std_scalor.fit(arr)\n", - " return std_scalor\n", - "\n", - "std_scalor = set_std_scaler(train_set)\n", - "\n", - "#transform data\n", - "def std_scaler(arr, std_scalor= std_scalor):\n", - " \n", - " if len(arr.shape) == 3:\n", - " if arr.shape[1] == 8:\n", - " arr = reshapor(std_scalor.transform(reshapor_inv(arr)))\n", - " else: \n", - " arr_ = np.zeros((arr.shape[0],24))\n", - " arr = reshapor_inv(arr)\n", - " arr_[:,:arr.shape[1]] += arr\n", - " arr = std_scalor.transform(arr_)[:,:arr.shape[1]]\n", - " arr = reshapor(arr)\n", - " \n", - " else:\n", - " if arr.shape[1] == 24:\n", - " arr = std_scalor.transform(arr)\n", - " else:\n", - " arr_ = np.zeros((arr.shape[0],24))\n", - " arr_[:,:arr.shape[1]] += arr\n", - " arr = std_scalor.transform(arr_)[:,:arr.shape[1]]\n", - " \n", - " return arr\n", - " \n", - "#inverse transformation\n", - "def std_scaler_inv(arr, std_scalor= std_scalor):\n", - " \n", - " if len(arr.shape) == 3:\n", - " if arr.shape[1] == 8:\n", - " arr = reshapor(std_scalor.inverse_transform(reshapor_inv(arr)))\n", - " else: \n", - " arr_ = np.zeros((arr.shape[0],24))\n", - " arr = reshapor_inv(arr)\n", - " arr_[:,:arr.shape[1]] += arr\n", - " arr = std_scalor.inverse_transform(arr_)[:,:arr.shape[1]]\n", - " arr = reshapor(arr)\n", - " \n", - " else:\n", - " if arr.shape[1] == 24:\n", - " arr = std_scalor.inverse_transform(arr)\n", - " else:\n", - " arr_ = np.zeros((arr.shape[0],24))\n", - " arr_[:,:arr.shape[1]] += arr\n", - " arr = std_scalor.inverse_transform(arr_)[:,:arr.shape[1]]\n", - " \n", - " return arr\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "train_set = reshapor(train_set)\n", - "test_set = reshapor(test_set)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "#Scale data either with MinMax scaler or with Standard scaler\n", - "#Return scalor if fit = True and and scaled array otherwise\n", - "\n", - "def scaler(arr, std_scalor= std_scalor, min_max_scalor= min_max_scalor, scalerfunc= \"std\"):\n", - " \n", - " if scalerfunc == \"std\":\n", - " arr = std_scaler(arr, std_scalor= std_scalor)\n", - " return arr\n", - " \n", - " elif scalerfunc == \"minmax\":\n", - " arr = min_max_scaler(arr, min_max_scalor= min_max_scalor)\n", - " return arr\n", - " \n", - " else:\n", - " raise ValueError(\"Uknown scaler chosen: {}\".format(scalerfunc))\n", - "\n", - "def scaler_inv(arr, std_scalor= std_scalor, min_max_scalor= min_max_scalor, scalerfunc= \"std\"):\n", - "\n", - " if scalerfunc == \"std\":\n", - " arr = std_scaler_inv(arr, std_scalor= std_scalor)\n", - " return arr\n", - " \n", - " elif scalerfunc == \"minmax\":\n", - " arr = min_max_scaler_inv(arr, min_max_scalor= std_scalor)\n", - " return arr\n", - " \n", - " else:\n", - " raise ValueError(\"Uknown scaler chosen: {}\".format(scalerfunc))" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "#scale the data\n", - "\n", - "func = \"minmax\"\n", - "\n", - "train_set = scaler(train_set, scalerfunc = func)\n", - "test_set = scaler(test_set, scalerfunc = func)\n", - "\n", - "if func == \"minmax\":\n", - " scalor = min_max_scalor\n", - "elif func == \"std\":\n", - " scalor = std_scalor\n", - "\n", - "#print(train_set[0,:,:])" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "###create random mini_batches###\n", - "\n", - "\n", - "def unison_shuffled_copies(a, b):\n", - " assert a.shape[0] == b.shape[0]\n", - " p = np.random.permutation(a.shape[0])\n", - " return a[p,:,:], b[p,:,:]\n", - "\n", - "def random_mini_batches(inputt, target, minibatch_size = 500):\n", - " \n", - " num_examples = inputt.shape[0]\n", - " \n", - " \n", - " #Number of complete batches\n", - " \n", - " number_of_batches = int(num_examples/minibatch_size)\n", - " minibatches = []\n", - " \n", - " #shuffle particles\n", - " _i, _t = unison_shuffled_copies(inputt, target)\n", - " #print(_t.shape)\n", - " \n", - " \n", - " for i in range(number_of_batches):\n", - " \n", - " minibatch_train = _i[minibatch_size*i:minibatch_size*(i+1), :, :]\n", - " \n", - " minibatch_true = _t[minibatch_size*i:minibatch_size*(i+1), :, :]\n", - " \n", - " minibatches.append((minibatch_train, minibatch_true))\n", - " \n", - " \n", - " minibatches.append((_i[number_of_batches*minibatch_size:, :, :], _t[number_of_batches*minibatch_size:, :, :]))\n", - " \n", - " \n", - " return minibatches\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "#Create random minibatches of train and test set with input and target array\n", - "\n", - "\n", - "minibatches = random_mini_batches(train_set[:,:-1,:], train_set[:,1:,:], minibatch_size = 1000)\n", - "#_train, _target = minibatches[0]\n", - "test_input, test_target = test_set[:,:-1,:], test_set[:,1:,:]\n", - "#print(train[0,:,:], target[0,:,:])" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "class RNNPlacePrediction():\n", - " \n", - " \n", - " def __init__(self, time_steps, future_steps, ninputs, ncells, num_output, cell_type=\"basic_rnn\", activation=\"relu\", scalor= scalor):\n", - " \n", - " self.nsteps = time_steps\n", - " self.future_steps = future_steps\n", - " self.ninputs = ninputs\n", - " self.ncells = ncells\n", - " self.num_output = num_output\n", - " self._ = cell_type #later used to create folder name\n", - " self.__ = activation #later used to create folder name\n", - " self.loss_list = []\n", - " self.scalor = scalor\n", - " \n", - " #### The input is of shape (num_examples, time_steps, ninputs)\n", - " #### ninputs is the dimentionality (number of features) of the time series (here coordinates)\n", - " self.X = tf.placeholder(dtype=tf.float32, shape=(None, self.nsteps, ninputs))\n", - " self.Y = tf.placeholder(dtype=tf.float32, shape=(None, self.nsteps, ninputs))\n", - "\n", - " \n", - " #Check if activation function valid and set activation\n", - " if self.__==\"relu\":\n", - " self.activation = tf.nn.relu\n", - " \n", - " elif self.__==\"tanh\":\n", - " self.activation = tf.nn.tanh\n", - " \n", - " elif self.__==\"leaky_relu\":\n", - " self.activation = tf.nn.leaky_relu\n", - " \n", - " elif self.__==\"elu\":\n", - " self.activation = tf.nn.elu\n", - " \n", - " else:\n", - " raise ValueError(\"Wrong rnn avtivation function: {}\".format(self.__))\n", - " \n", - " \n", - " \n", - " #Check if cell type valid and set cell_type\n", - " if self._==\"basic_rnn\":\n", - " self.cell_type = tf.contrib.rnn.BasicRNNCell\n", - " \n", - " elif self._==\"lstm\":\n", - " self.cell_type = tf.contrib.rnn.BasicLSTMCell\n", - " \n", - " elif self._==\"GRU\":\n", - " self.cell_type = tf.contrib.rnn.GRUCell\n", - " \n", - " else:\n", - " raise ValueError(\"Wrong rnn cell type: {}\".format(self._))\n", - " \n", - " \n", - " #Check Input of ncells \n", - " if (type(self.ncells) == int):\n", - " self.ncells = [self.ncells]\n", - " \n", - " if (type(self.ncells) != list):\n", - " raise ValueError(\"Wrong type of Input for ncells\")\n", - " \n", - " for _ in range(len(self.ncells)):\n", - " if type(self.ncells[_]) != int:\n", - " raise ValueError(\"Wrong type of Input for ncells\")\n", - " \n", - " self.activationlist = []\n", - " for _ in range(len(self.ncells)-1):\n", - " self.activationlist.append(self.activation)\n", - " self.activationlist.append(tf.nn.tanh)\n", - " \n", - " self.cell = tf.contrib.rnn.MultiRNNCell([self.cell_type(num_units=self.ncells[layer], activation=self.activationlist[layer])\n", - " for layer in range(len(self.ncells))])\n", - " \n", - " \n", - " #### I now define the output\n", - " self.RNNCell = tf.contrib.rnn.OutputProjectionWrapper(self.cell, output_size= num_output)\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " self.sess = tf.Session()\n", - " \n", - " def set_cost_and_functions(self, LR=0.001):\n", - " #### I define here the function that unrolls the RNN cell\n", - " self.output, self.state = tf.nn.dynamic_rnn(self.RNNCell, self.X, dtype=tf.float32)\n", - " #### I define the cost function as the mean_squared_error (distance of predicted point to target)\n", - " self.cost = tf.reduce_mean(tf.losses.mean_squared_error(self.Y, self.output)) \n", - " \n", - " #### the rest proceed as usual\n", - " self.train = tf.train.AdamOptimizer(LR).minimize(self.cost)\n", - " #### Variable initializer\n", - " self.init = tf.global_variables_initializer()\n", - " self.saver = tf.train.Saver()\n", - " self.sess.run(self.init)\n", - " \n", - " \n", - " def save(self, rnn_folder=\"./rnn_model/rnn_basic\"):\n", - " self.saver.save(self.sess, rnn_folder) \n", - " \n", - " \n", - " def load(self, filename=\"./rnn_model/rnn_basic\"):\n", - " self.saver.restore(self.sess, filename)\n", - "\n", - " \n", - " \n", - " def fit(self, minibatches, epochs, print_step, checkpoint = 5, patience = 200):\n", - " patience_cnt = 0\n", - " start = len(self.loss_list)\n", - " epoche_save = start\n", - " \n", - " folder = \"./rnn_model_\" + str(self._)+ \"_\" + self.__ + \"_\" + str(self.ncells).replace(\" \",\"\") + \"c\" + \"_checkpoint/rnn_basic\"\n", - " \n", - " for iep in range(start, start + epochs):\n", - " loss = 0\n", - " \n", - " batches = len(minibatches)\n", - " #Here I iterate over the batches\n", - " for batch in range(batches):\n", - " #### Here I train the RNNcell\n", - " #### The X is the time series, the Y is shifted by 1 time step\n", - " train, target = minibatches[batch]\n", - " self.sess.run(self.train, feed_dict={self.X:train, self.Y:target})\n", - " \n", - " \n", - " loss += self.sess.run(self.cost, feed_dict={self.X:train, self.Y:target})\n", - " \n", - " #Normalize loss over number of batches and scale it back before normaliziation\n", - " loss /= batches\n", - " self.loss_list.append(loss)\n", - " \n", - " #print(loss)\n", - " \n", - " #Here I create the checkpoint if the perfomance is better\n", - " if iep > 1 and iep%checkpoint == 0 and self.loss_list[iep] < self.loss_list[epoche_save]:\n", - " #print(\"Checkpoint created at epoch: \", iep)\n", - " self.save(folder)\n", - " epoche_save = iep\n", - " \n", - " #early stopping with patience\n", - " if iep > 1 and abs(self.loss_list[iep]-self.loss_list[iep-1]) < 1.5/10**7:\n", - " patience_cnt += 1\n", - " #print(\"Patience now at: \", patience_cnt, \" of \", patience)\n", - " \n", - " if patience_cnt + 1 > patience:\n", - " print(\"\\n\", \"Early stopping at epoch \", iep, \", difference: \", abs(self.loss_list[iep]-self.loss_list[iep-1]))\n", - " print(\"Cost: \",loss)\n", - " break\n", - " \n", - " #Note that the loss here is multiplied with 1000 for easier reading\n", - " if iep%print_step==0:\n", - " print(\"Epoch number \",iep)\n", - " print(\"Cost: \",loss*10**6, \"e-6\")\n", - " print(\"Patience: \",patience_cnt, \"/\", patience)\n", - " print(\"Last checkpoint at: Epoch \", epoche_save, \"\\n\")\n", - " \n", - " #Set model back to the last checkpoint if performance was better\n", - " if self.loss_list[epoche_save] < self.loss_list[iep]:\n", - " self.load(folder)\n", - " print(\"\\n\")\n", - " print(\"State of last checkpoint checkpoint at epoch \", epoche_save, \" restored\")\n", - " print(\"Performance at last checkpoint is \" ,(self.loss_list[iep] - self.loss_list[epoche_save])/self.loss_list[iep]*100, \"% better\" )\n", - " \n", - " folder = \"./rnn_model_\" + str(self._)+ \"_\" + self.__ + \"_\" + str(self.ncells).replace(\" \",\"\") + \"c/rnn_basic\"\n", - " self.save(folder)\n", - " print(\"\\n\")\n", - " print(\"Model saved in at: \", folder)\n", - " \n", - " \n", - " \n", - " def predict(self, x):\n", - " return self.sess.run(self.output, feed_dict={self.X:x})\n", - " \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "#saves the rnn model and all its parameters including the scaler used\n", - "#optional also saves the minibatches used to train and the test set\n", - "\n", - "def full_save(rnn, train= True, test= True):\n", - " folder = \"./rnn_model_\" + str(rnn._)+ \"_\" + rnn.__ + \"_\" + str(rnn.ncells).replace(\" \",\"\") + \"c/rnn_basic\"\n", - " rnn.save(folder)\n", - " pkl_name = folder[2:-10] + \".pkl\"\n", - " \n", - " \n", - " pkl_dic = {\"ncells\": rnn.ncells,\n", - " \"ninputs\": rnn.ninputs,\n", - " \"future_steps\": rnn.future_steps,\n", - " \"nsteps\": rnn.nsteps,\n", - " \"num_output\": rnn.num_output,\n", - " \"cell_type\": rnn._, #cell_type\n", - " \"activation\": rnn.__, #Activation\n", - " \"loss_list\": rnn.loss_list,\n", - " \"scalor\": rnn.scalor}\n", - " \n", - " if train == True:\n", - " pkl_dic[\"minibatches\"] = minibatches\n", - " \n", - " if test == True:\n", - " pkl_dic[\"test_input\"] = test_input\n", - " pkl_dic[\"test_target\"] = test_target\n", - " \n", - " pkl.dump( pkl_dic, open(pkl_name , \"wb\" ) )\n", - " \n", - " print(\"Model saved at: \", folder)\n", - " print(\"Remaining data saved as: {}\".format(pkl_name))\n", - "\n", - "\n", - "\n", - "#loads the rnn model with all its parameters including the scaler used\n", - "#Checks if the pkl data also contains the training or test sets an return them accordingly\n", - "def full_load(folder): \n", - " #returns state of rnn with all information and returns the train and test set used\n", - " \n", - " #Directory of pkl file\n", - " pkl_name = folder[2:-10] + \".pkl\"\n", - " \n", - " #Check if pkl file exists\n", - " my_file = Path(pkl_name)\n", - " if my_file.is_file() == False:\n", - " raise ValueError(\"There is no .pkl file with the name: {}\".format(pkl_name))\n", - " \n", - " pkl_dic = pkl.load( open(pkl_name , \"rb\" ) )\n", - " ncells = pkl_dic[\"ncells\"]\n", - " ninputs = pkl_dic[\"ninputs\"]\n", - " scalor = pkl_dic[\"scalor\"]\n", - " future_steps = pkl_dic[\"future_steps\"]\n", - " timesteps = pkl_dic[\"nsteps\"] \n", - " num_output = pkl_dic[\"num_output\"]\n", - " cell_type = pkl_dic[\"cell_type\"]\n", - " activation = pkl_dic[\"activation\"]\n", - " \n", - " #Check if test or trainng set in dictionary\n", - " batch = False\n", - " test = False\n", - " if \"minibatches\" in pkl_dic:\n", - " batch = True\n", - " minibatches = pkl_dic[\"minibatches\"]\n", - " if \"test_input\" in pkl_dic:\n", - " test = True\n", - " test_input = [\"test_input\"]\n", - " test_target = [\"test_target\"]\n", - " \n", - " #loads and initializes a new model with the exact same properties\n", - " \n", - " tf.reset_default_graph()\n", - " rnn = RNNPlacePrediction(time_steps=timesteps, future_steps=future_steps, ninputs=ninputs, \n", - " ncells=ncells, num_output=num_output, cell_type=cell_type, activation=activation, scalor=scalor)\n", - "\n", - " rnn.set_cost_and_functions()\n", - " \n", - " rnn.load(folder)\n", - " \n", - " rnn.loss_list = pkl_dic[\"loss_list\"]\n", - " \n", - " print(\"Model succesfully loaded\")\n", - " \n", - " if batch and test:\n", - " data = [minibatches, test_input, test_target]\n", - " print(\"Minibatches (=training data) and test_input and test_target in data loaded\")\n", - " return rnn, data\n", - " \n", - " elif batch:\n", - " data = [minibatches]\n", - " print(\"Minibatches (=training data) loaded in data\")\n", - " return rnn, data\n", - " \n", - " elif test:\n", - " data = [test_input, test_target]\n", - " print(\"test_input and test_target loaded in data\")\n", - " return rnn, data\n", - " \n", - " else:\n", - " data = []\n", - " print(\"Only Model restored, no trainig or test data found in {}\".format(pkl_name))\n", - " print(\"Returned data is empty!\")\n", - " return rnn, data\n", - " \n", - " \n", - "#returns the folder name used by full_save and full_load for a given architecture\n", - "def get_rnn_folder(ncells, cell_type, activation):\n", - " folder = \"./rnn_model_\" + cell_type + \"_\" + activation + \"_\" + str(ncells).replace(\" \",\"\") + \"c/rnn_basic\"\n", - " return folder" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "#Plot the loss\n", - "def plot_loss_list(loss_list):\n", - " plt.plot(loss_list)\n", - " plt.xlabel(\"Epoch\")\n", - " plt.ylabel(\"Cost\")\n", - " plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "def rnn_test(rnn, test_input= test_input, test_target= test_target):\n", - " \n", - " #Here I predict based on my test set\n", - " test_pred = rnn.predict(test_input)\n", - " \n", - " #Here i subtract a prediction (random particle) from the target to get an idea of the predictions\n", - " #scaler_inv(test_input, scalerfunc = func)[0,:,:]\n", - " diff = scaler_inv(test_pred, scalerfunc = func)-scaler_inv(test_target, scalerfunc = func )\n", - " print(diff[0,:,:])\n", - " \n", - " #Here I evaluate my model on the test set based on mean_squared_error\n", - " loss = rnn.sess.run(rnn.cost, feed_dict={rnn.X:test_input, rnn.Y:test_target})\n", - " print(\"Loss on test set:\", loss)\n", - " \n", - " return test_pred, loss" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/keras_model_classifier.py b/keras_model_classifier.py new file mode 100644 index 0000000..25aba58 --- /dev/null +++ b/keras_model_classifier.py @@ -0,0 +1,206 @@ +import pandas as pd +import numpy as np +import matplotlib as mpl +import random +import math +import numpy as np +import matplotlib.pyplot as plt +import tensorflow as tf +from tensorflow.python.framework import ops +from sklearn import preprocessing +import pickle as pkl +from pathlib import Path +from keras.datasets import imdb +from keras.models import Sequential +from keras.layers import Dense +from keras.layers import LSTM +from keras.layers import GRU +from keras.layers import Dropout +#from keras.layers.convolutional import Conv1D +#from keras.layers.convolutional import MaxPooling1D +from keras.layers.embeddings import Embedding +from keras.preprocessing import sequence +from keras.callbacks import History +from keras.callbacks import EarlyStopping +from keras.callbacks import ModelCheckpoint +from keras.models import load_model + +#import seaborn as sns + +### Reshape original array into the shape (particlenumber, timesteps, input = coordinates)### + +def reshapor(arr_orig, num_inputs=3): + timesteps = int(arr_orig.shape[1]/num_inputs) + number_examples = int(arr_orig.shape[0]) + arr = np.zeros((number_examples, timesteps, num_inputs)) + + for i in range(number_examples): + for t in range(timesteps): + arr[i,t,:] = arr_orig[i,num_inputs*t:num_inputs*t+num_inputs] + + return arr + +def reshapor_inv(array_shaped, num_inputs=3): + timesteps = int(array_shaped.shape[1]) + num_examples = int(array_shaped.shape[0]) + arr = np.zeros((num_examples, timesteps*num_inputs)) + + for i in range(num_examples): + for t in range(timesteps): + arr[i,num_inputs*t:num_inputs*t+num_inputs] = array_shaped[i,t,:] + + return arr + +timesteps = 7 +future_steps = 1 + +ninputs = 3 + +#ncells as int or list of int +ncells = [150, 150, 150] +activation = "leaky_relu" +cell_type = "lstm" + +num_output = 3 + +scalor = pd.read_pickle('scalor.pkl') + +tset_matched = pd.read_pickle('matched_and_unmatched_8hittracks.pkl') + + +tset_matched = np.array(tset_matched) +tset_matched = tset_matched.astype('float32') +truth = tset_matched[:,-1] +#tset_matched = scaler(reshapor(tset_matched[:,:-1]), scalerfunc = func, scalor= scalor) + + +#tset_matched = reshapor_inv(tset_matched) + + +def tracks_to_particle(tset_matched, truth): + start = 0 + start_points = [0] + converse = False + + if len(tset_matched.shape) == 3: + tset_matched = reshapor_inv(tset_matched) + converse = True + + for track in range(tset_matched.shape[0]-1): + + for coord in range(12): + + if tset_matched[track, coord] != tset_matched[track+1, coord]: + start = track + 1 + + if start != start_points[-1]: + start_points.append(start) + + num_part = len(start_points) + + particle_tracks = [] + track_truth = [] + + if converse: + tset_matched = reshapor(tset_matched) + + for particle in range(num_part-1): + particle_tracks.append(reshapor(tset_matched[start_points[particle]:start_points[particle+1]])) + track_truth.append(truth[start_points[particle]:start_points[particle+1]]) + + + return particle_tracks, track_truth + +particle_tracks, track_truth = tracks_to_particle(tset_matched= tset_matched, truth= truth) + +num_particles = len(track_truth) + +particle_start_array = np.zeros((num_particles,4,3)) + +for particle in range(num_particles): + particle_start_array[particle,:,:] += particle_tracks[particle][0][:4,:] + +print(particle_start_array[11,:,:]) + +track_exist_truth = np.zeros((num_particles)) + +for particle in range(num_particles): + correct = False + num_tracks = len(track_truth[particle]) + + for track in range(num_tracks): + if track_truth[particle][track] == 1: + correct = True + + if correct: + track_exist_truth[particle] += 1 + +#Input: a = 3d array, b = 1d array + +def unison_shuffled_copies2(a, b): + assert a.shape[0] == b.shape[0] + p = np.random.permutation(a.shape[0]) + return a[p,:,:], b[p] + +def create_random_sets2(particle_start_array= particle_start_array, track_exist_truth= track_exist_truth, train_to_total_ratio= 0.9): + #shuffle the dataset + num_examples = particle_start_array.shape[0] + particle_start_array, track_exist_truth = unison_shuffled_copies2(particle_start_array, track_exist_truth) + + #evaluate siye of training and test set and initialize them + train_set_size = np.int(num_examples*train_to_total_ratio) + test_set_size = num_examples - train_set_size + + train_part_start = np.zeros((train_set_size, particle_start_array.shape[1], particle_start_array.shape[2])) + train_track_e_tr = np.zeros((train_set_size)) + test_part_start = np.zeros((test_set_size, particle_start_array.shape[1], particle_start_array.shape[2])) + test_track_e_tr = np.zeros((test_set_size)) + + + #fill train and test sets + for i in range(num_examples): + if train_set_size > i: + train_part_start[i,:,:] += particle_start_array[i,:,:] + train_track_e_tr[i] += track_exist_truth[i] + else: + test_part_start[i - train_set_size,:,:] += particle_start_array[i,:,:] + test_track_e_tr[i - train_set_size] += track_exist_truth[i] + + return train_part_start, train_track_e_tr, test_part_start, test_track_e_tr + +X_train, Y_train, X_test, Y_test = create_random_sets2() + +# truncate and pad input sequences +max_review_length = 4 +filepath = "./keras_model_classifier.h5" + +callbacks = [ + EarlyStopping(monitor='val_loss', patience=30, min_delta=0), + ModelCheckpoint(filepath, monitor='val_loss', save_best_only=True), + History() +] + +# +# create the model +model = Sequential() +#model.add(Dense(12, input_shape=(4,3))) +model.add(LSTM(40, return_sequences=True, input_shape=(4,3), activation = 'relu')) +model.add(Dropout(0.5)) +model.add(LSTM(40, return_sequences=True, activation = 'relu')) +model.add(Dropout(0.5)) +model.add(LSTM(40, return_sequences=True, activation = 'relu')) +model.add(Dropout(0.5)) +model.add(LSTM(4, activation = 'relu')) +model.add(Dropout(0.5)) +model.add(Dense(1, activation='sigmoid')) +model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) +print(model.summary()) +model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=500, batch_size=50, callbacks= callbacks, verbose = 2) +model = load_model(filepath) +# Final evaluation of the model +scores = model.evaluate(X_test, Y_test, verbose=0) +print("Accuracy: %.2f%%" % (scores[1]*100)) + +model.save(filepath) + +#keras.models.load_model(filepath) \ No newline at end of file diff --git a/keras_model_classifier_LSTM_40_LSTM_40.h5 b/keras_model_classifier_LSTM_40_LSTM_40.h5 new file mode 100644 index 0000000..920ac57 --- /dev/null +++ b/keras_model_classifier_LSTM_40_LSTM_40.h5 Binary files differ diff --git a/matched_and_unmatched_8hittracks.pkl b/matched_and_unmatched_8hittracks.pkl new file mode 100644 index 0000000..7a1f5e2 --- /dev/null +++ b/matched_and_unmatched_8hittracks.pkl Binary files differ diff --git a/requiremements.py b/requiremements.py deleted file mode 100644 index 7741c59..0000000 --- a/requiremements.py +++ /dev/null @@ -1,645 +0,0 @@ - -# coding: utf-8 - -# In[1]: - - -import pandas as pd -import numpy as np -import matplotlib as mpl -import random -import math -import numpy as np -import matplotlib.pyplot as plt -import tensorflow as tf -from tensorflow.python.framework import ops -from sklearn import preprocessing -import pickle as pkl -from pathlib import Path - - -# In[2]: - - -### Reshape original array into the shape (particlenumber, timesteps, input = coordinates)### - -def reshapor(arr_orig): - timesteps = int(arr_orig.shape[1]/3) - number_examples = int(arr_orig.shape[0]) - arr = np.zeros((number_examples, timesteps, 3)) - - for i in range(number_examples): - for t in range(timesteps): - arr[i,t,0:3] = arr_orig[i,3*t:3*t+3] - - return arr - -def reshapor_inv(array_shaped): - timesteps = int(array_shaped.shape[1]) - num_examples = int(array_shaped.shape[0]) - arr = np.zeros((num_examples, timesteps*3)) - - for i in range(num_examples): - for t in range(timesteps): - arr[i,3*t:3*t+3] = array_shaped[i,t,:] - - return arr - - -# In[3]: - - -### create the training set and the test set### - -def create_random_sets(dataset, train_to_total_ratio): - #shuffle the dataset - num_examples = dataset.shape[0] - p = np.random.permutation(num_examples) - dataset = dataset[p,:] - - #evaluate siye of training and test set and initialize them - train_set_size = np.int(num_examples*train_to_total_ratio) - test_set_size = num_examples - train_set_size - - train_set = np.zeros((train_set_size, dataset.shape[1])) - test_set = np.zeros((test_set_size, dataset.shape[1])) - - - #fill train and test sets - for i in range(num_examples): - if train_set_size > i: - train_set[i,:] += dataset[i,:] - else: - test_set[i - train_set_size,:] += dataset[i,:] - - return train_set, test_set - - -# In[4]: - - -testset = pd.read_pickle('matched_8hittracks.pkl') -tset = np.array(testset) -tset = tset.astype('float32') -train_set, test_set = create_random_sets(tset, 0.99) - - -# In[5]: - - -#Normalize the data advanced version with scikit learn - -#set the transormation based on training set -def set_min_max_scaler(arr, feature_range= (-1,1)): - min_max_scalor = preprocessing.MinMaxScaler(feature_range=feature_range) - if len(arr.shape) == 3: - arr = reshapor(min_max_scalor.fit_transform(reshapor_inv(arr))) - else: - arr = min_max_scalor.fit_transform(arr) - return min_max_scalor - -min_max_scalor = set_min_max_scaler(train_set) - - -#transform data -def min_max_scaler(arr, min_max_scalor= min_max_scalor): - - if len(arr.shape) == 3: - if arr.shape[1] == 8: - arr = reshapor(min_max_scalor.transform(reshapor_inv(arr))) - else: - arr_ = np.zeros((arr.shape[0],24)) - arr = reshapor_inv(arr) - arr_[:,:arr.shape[1]] += arr - arr = min_max_scalor.transform(arr_)[:,:arr.shape[1]] - arr = reshapor(arr) - - else: - if arr.shape[1] == 24: - arr = min_max_scalor.transform(arr) - else: - arr_ = np.zeros((arr.shape[0],24)) - arr_[:,:arr.shape[1]] += arr - arr = min_max_scalor.transform(arr_)[:,:arr.shape[1]] - - return arr - -#inverse transformation -def min_max_scaler_inv(arr, min_max_scalor= min_max_scalor): - - if len(arr.shape) == 3: - if arr.shape[1] == 8: - arr = reshapor(min_max_scalor.inverse_transform(reshapor_inv(arr))) - else: - arr_ = np.zeros((arr.shape[0],24)) - arr = reshapor_inv(arr) - arr_[:,:arr.shape[1]] += arr - arr = min_max_scalor.inverse_transform(arr_)[:,:arr.shape[1]] - arr = reshapor(arr) - - else: - if arr.shape[1] == 24: - arr = min_max_scalor.inverse_transform(arr) - else: - arr_ = np.zeros((arr.shape[0],24)) - arr_[:,:arr.shape[1]] += arr - arr = min_max_scalor.nverse_transform(arr_)[:,:arr.shape[1]] - - return arr - - -# In[6]: - - -#Normalize the data advanced version with scikit learn - Standard scaler - -#set the transormation based on training set -def set_std_scaler(arr): - std_scalor = preprocessing.StandardScaler() - if len(arr.shape) == 3: - arr = reshapor(std_scalor.fit(reshapor_inv(arr))) - else: - arr = std_scalor.fit(arr) - return std_scalor - -std_scalor = set_std_scaler(train_set) - -#transform data -def std_scaler(arr, std_scalor= std_scalor): - - if len(arr.shape) == 3: - if arr.shape[1] == 8: - arr = reshapor(std_scalor.transform(reshapor_inv(arr))) - else: - arr_ = np.zeros((arr.shape[0],24)) - arr = reshapor_inv(arr) - arr_[:,:arr.shape[1]] += arr - arr = std_scalor.transform(arr_)[:,:arr.shape[1]] - arr = reshapor(arr) - - else: - if arr.shape[1] == 24: - arr = std_scalor.transform(arr) - else: - arr_ = np.zeros((arr.shape[0],24)) - arr_[:,:arr.shape[1]] += arr - arr = std_scalor.transform(arr_)[:,:arr.shape[1]] - - return arr - -#inverse transformation -def std_scaler_inv(arr, std_scalor= std_scalor): - - if len(arr.shape) == 3: - if arr.shape[1] == 8: - arr = reshapor(std_scalor.inverse_transform(reshapor_inv(arr))) - else: - arr_ = np.zeros((arr.shape[0],24)) - arr = reshapor_inv(arr) - arr_[:,:arr.shape[1]] += arr - arr = std_scalor.inverse_transform(arr_)[:,:arr.shape[1]] - arr = reshapor(arr) - - else: - if arr.shape[1] == 24: - arr = std_scalor.inverse_transform(arr) - else: - arr_ = np.zeros((arr.shape[0],24)) - arr_[:,:arr.shape[1]] += arr - arr = std_scalor.inverse_transform(arr_)[:,:arr.shape[1]] - - return arr - - -# In[7]: - - -train_set = reshapor(train_set) -test_set = reshapor(test_set) - - -# In[8]: - - -#Scale data either with MinMax scaler or with Standard scaler -#Return scalor if fit = True and and scaled array otherwise - -def scaler(arr, std_scalor= std_scalor, min_max_scalor= min_max_scalor, scalerfunc= "std"): - - if scalerfunc == "std": - arr = std_scaler(arr, std_scalor= std_scalor) - return arr - - elif scalerfunc == "minmax": - arr = min_max_scaler(arr, min_max_scalor= min_max_scalor) - return arr - - else: - raise ValueError("Uknown scaler chosen: {}".format(scalerfunc)) - -def scaler_inv(arr, std_scalor= std_scalor, min_max_scalor= min_max_scalor, scalerfunc= "std"): - - if scalerfunc == "std": - arr = std_scaler_inv(arr, std_scalor= std_scalor) - return arr - - elif scalerfunc == "minmax": - arr = min_max_scaler_inv(arr, min_max_scalor= std_scalor) - return arr - - else: - raise ValueError("Uknown scaler chosen: {}".format(scalerfunc)) - - -# In[9]: - - -#scale the data - -func = "minmax" - -train_set = scaler(train_set, scalerfunc = func) -test_set = scaler(test_set, scalerfunc = func) - -if func == "minmax": - scalor = min_max_scalor -elif func == "std": - scalor = std_scalor - -#print(train_set[0,:,:]) - - -# In[10]: - - -###create random mini_batches### - - -def unison_shuffled_copies(a, b): - assert a.shape[0] == b.shape[0] - p = np.random.permutation(a.shape[0]) - return a[p,:,:], b[p,:,:] - -def random_mini_batches(inputt, target, minibatch_size = 500): - - num_examples = inputt.shape[0] - - - #Number of complete batches - - number_of_batches = int(num_examples/minibatch_size) - minibatches = [] - - #shuffle particles - _i, _t = unison_shuffled_copies(inputt, target) - #print(_t.shape) - - - for i in range(number_of_batches): - - minibatch_train = _i[minibatch_size*i:minibatch_size*(i+1), :, :] - - minibatch_true = _t[minibatch_size*i:minibatch_size*(i+1), :, :] - - minibatches.append((minibatch_train, minibatch_true)) - - - minibatches.append((_i[number_of_batches*minibatch_size:, :, :], _t[number_of_batches*minibatch_size:, :, :])) - - - return minibatches - - - -# In[11]: - - -#Create random minibatches of train and test set with input and target array - - -minibatches = random_mini_batches(train_set[:,:-1,:], train_set[:,1:,:], minibatch_size = 1000) -#_train, _target = minibatches[0] -test_input, test_target = test_set[:,:-1,:], test_set[:,1:,:] -#print(train[0,:,:], target[0,:,:]) - - -# In[12]: - - -class RNNPlacePrediction(): - - - def __init__(self, time_steps, future_steps, ninputs, ncells, num_output, cell_type="basic_rnn", activation="relu", scalor= scalor): - - self.nsteps = time_steps - self.future_steps = future_steps - self.ninputs = ninputs - self.ncells = ncells - self.num_output = num_output - self._ = cell_type #later used to create folder name - self.__ = activation #later used to create folder name - self.loss_list = [] - self.scalor = scalor - - #### The input is of shape (num_examples, time_steps, ninputs) - #### ninputs is the dimentionality (number of features) of the time series (here coordinates) - self.X = tf.placeholder(dtype=tf.float32, shape=(None, self.nsteps, ninputs)) - self.Y = tf.placeholder(dtype=tf.float32, shape=(None, self.nsteps, ninputs)) - - - #Check if activation function valid and set activation - if self.__=="relu": - self.activation = tf.nn.relu - - elif self.__=="tanh": - self.activation = tf.nn.tanh - - elif self.__=="leaky_relu": - self.activation = tf.nn.leaky_relu - - elif self.__=="elu": - self.activation = tf.nn.elu - - else: - raise ValueError("Wrong rnn avtivation function: {}".format(self.__)) - - - - #Check if cell type valid and set cell_type - if self._=="basic_rnn": - self.cell_type = tf.contrib.rnn.BasicRNNCell - - elif self._=="lstm": - self.cell_type = tf.contrib.rnn.BasicLSTMCell - - elif self._=="GRU": - self.cell_type = tf.contrib.rnn.GRUCell - - else: - raise ValueError("Wrong rnn cell type: {}".format(self._)) - - - #Check Input of ncells - if (type(self.ncells) == int): - self.ncells = [self.ncells] - - if (type(self.ncells) != list): - raise ValueError("Wrong type of Input for ncells") - - for _ in range(len(self.ncells)): - if type(self.ncells[_]) != int: - raise ValueError("Wrong type of Input for ncells") - - self.activationlist = [] - for _ in range(len(self.ncells)-1): - self.activationlist.append(self.activation) - self.activationlist.append(tf.nn.tanh) - - self.cell = tf.contrib.rnn.MultiRNNCell([self.cell_type(num_units=self.ncells[layer], activation=self.activationlist[layer]) - for layer in range(len(self.ncells))]) - - - #### I now define the output - self.RNNCell = tf.contrib.rnn.OutputProjectionWrapper(self.cell, output_size= num_output) - - - - - - self.sess = tf.Session() - - def set_cost_and_functions(self, LR=0.001): - #### I define here the function that unrolls the RNN cell - self.output, self.state = tf.nn.dynamic_rnn(self.RNNCell, self.X, dtype=tf.float32) - #### I define the cost function as the mean_squared_error (distance of predicted point to target) - self.cost = tf.reduce_mean(tf.losses.mean_squared_error(self.Y, self.output)) - - #### the rest proceed as usual - self.train = tf.train.AdamOptimizer(LR).minimize(self.cost) - #### Variable initializer - self.init = tf.global_variables_initializer() - self.saver = tf.train.Saver() - self.sess.run(self.init) - - - def save(self, rnn_folder="./rnn_model/rnn_basic"): - self.saver.save(self.sess, rnn_folder) - - - def load(self, filename="./rnn_model/rnn_basic"): - self.saver.restore(self.sess, filename) - - - - def fit(self, minibatches, epochs, print_step, checkpoint = 5, patience = 200): - patience_cnt = 0 - start = len(self.loss_list) - epoche_save = start - - folder = "./rnn_model_" + str(self._)+ "_" + self.__ + "_" + str(self.ncells).replace(" ","") + "c" + "_checkpoint/rnn_basic" - - for iep in range(start, start + epochs): - loss = 0 - - batches = len(minibatches) - #Here I iterate over the batches - for batch in range(batches): - #### Here I train the RNNcell - #### The X is the time series, the Y is shifted by 1 time step - train, target = minibatches[batch] - self.sess.run(self.train, feed_dict={self.X:train, self.Y:target}) - - - loss += self.sess.run(self.cost, feed_dict={self.X:train, self.Y:target}) - - #Normalize loss over number of batches and scale it back before normaliziation - loss /= batches - self.loss_list.append(loss) - - #print(loss) - - #Here I create the checkpoint if the perfomance is better - if iep > 1 and iep%checkpoint == 0 and self.loss_list[iep] < self.loss_list[epoche_save]: - #print("Checkpoint created at epoch: ", iep) - self.save(folder) - epoche_save = iep - - #early stopping with patience - if iep > 1 and abs(self.loss_list[iep]-self.loss_list[iep-1]) < 1.5/10**7: - patience_cnt += 1 - #print("Patience now at: ", patience_cnt, " of ", patience) - - if patience_cnt + 1 > patience: - print("\n", "Early stopping at epoch ", iep, ", difference: ", abs(self.loss_list[iep]-self.loss_list[iep-1])) - print("Cost: ",loss) - break - - #Note that the loss here is multiplied with 1000 for easier reading - if iep%print_step==0: - print("Epoch number ",iep) - print("Cost: ",loss*10**6, "e-6") - print("Patience: ",patience_cnt, "/", patience) - print("Last checkpoint at: Epoch ", epoche_save, "\n") - - #Set model back to the last checkpoint if performance was better - if self.loss_list[epoche_save] < self.loss_list[iep]: - self.load(folder) - print("\n") - print("State of last checkpoint checkpoint at epoch ", epoche_save, " restored") - print("Performance at last checkpoint is " ,(self.loss_list[iep] - self.loss_list[epoche_save])/self.loss_list[iep]*100, "% better" ) - - folder = "./rnn_model_" + str(self._)+ "_" + self.__ + "_" + str(self.ncells).replace(" ","") + "c/rnn_basic" - self.save(folder) - print("\n") - print("Model saved in at: ", folder) - - - - def predict(self, x): - return self.sess.run(self.output, feed_dict={self.X:x}) - - - - -# In[13]: - - -#saves the rnn model and all its parameters including the scaler used -#optional also saves the minibatches used to train and the test set - -def full_save(rnn, train= True, test= True): - folder = "./rnn_model_" + str(rnn._)+ "_" + rnn.__ + "_" + str(rnn.ncells).replace(" ","") + "c/rnn_basic" - rnn.save(folder) - pkl_name = folder[2:-10] + ".pkl" - - - pkl_dic = {"ncells": rnn.ncells, - "ninputs": rnn.ninputs, - "future_steps": rnn.future_steps, - "nsteps": rnn.nsteps, - "num_output": rnn.num_output, - "cell_type": rnn._, #cell_type - "activation": rnn.__, #Activation - "loss_list": rnn.loss_list, - "scalor": rnn.scalor} - - if train == True: - pkl_dic["minibatches"] = minibatches - - if test == True: - pkl_dic["test_input"] = test_input - pkl_dic["test_target"] = test_target - - pkl.dump( pkl_dic, open(pkl_name , "wb" ) ) - - print("Model saved at: ", folder) - print("Remaining data saved as: {}".format(pkl_name)) - - - -#loads the rnn model with all its parameters including the scaler used -#Checks if the pkl data also contains the training or test sets an return them accordingly -def full_load(folder): - #returns state of rnn with all information and returns the train and test set used - - #Directory of pkl file - pkl_name = folder[2:-10] + ".pkl" - - #Check if pkl file exists - my_file = Path(pkl_name) - if my_file.is_file() == False: - raise ValueError("There is no .pkl file with the name: {}".format(pkl_name)) - - pkl_dic = pkl.load( open(pkl_name , "rb" ) ) - ncells = pkl_dic["ncells"] - ninputs = pkl_dic["ninputs"] - scalor = pkl_dic["scalor"] - future_steps = pkl_dic["future_steps"] - timesteps = pkl_dic["nsteps"] - num_output = pkl_dic["num_output"] - cell_type = pkl_dic["cell_type"] - activation = pkl_dic["activation"] - - #Check if test or trainng set in dictionary - batch = False - test = False - if "minibatches" in pkl_dic: - batch = True - minibatches = pkl_dic["minibatches"] - if "test_input" in pkl_dic: - test = True - test_input = ["test_input"] - test_target = ["test_target"] - - #loads and initializes a new model with the exact same properties - - tf.reset_default_graph() - rnn = RNNPlacePrediction(time_steps=timesteps, future_steps=future_steps, ninputs=ninputs, - ncells=ncells, num_output=num_output, cell_type=cell_type, activation=activation, scalor=scalor) - - rnn.set_cost_and_functions() - - rnn.load(folder) - - rnn.loss_list = pkl_dic["loss_list"] - - print("Model succesfully loaded") - - if batch and test: - data = [minibatches, test_input, test_target] - print("Minibatches (=training data) and test_input and test_target in data loaded") - return rnn, data - - elif batch: - data = [minibatches] - print("Minibatches (=training data) loaded in data") - return rnn, data - - elif test: - data = [test_input, test_target] - print("test_input and test_target loaded in data") - return rnn, data - - else: - data = [] - print("Only Model restored, no trainig or test data found in {}".format(pkl_name)) - print("Returned data is empty!") - return rnn, data - - -#returns the folder name used by full_save and full_load for a given architecture -def get_rnn_folder(ncells, cell_type, activation): - folder = "./rnn_model_" + cell_type + "_" + activation + "_" + str(ncells).replace(" ","") + "c/rnn_basic" - return folder - - -# In[15]: - - -#Plot the loss -def plot_loss_list(loss_list): - plt.plot(loss_list) - plt.xlabel("Epoch") - plt.ylabel("Cost") - plt.show() - - -# In[17]: - - -def rnn_test(rnn, test_input= test_input, test_target= test_target): - - #Here I predict based on my test set - test_pred = rnn.predict(test_input) - - #Here i subtract a prediction (random particle) from the target to get an idea of the predictions - #scaler_inv(test_input, scalerfunc = func)[0,:,:] - diff = scaler_inv(test_pred, scalerfunc = func)-scaler_inv(test_target, scalerfunc = func ) - print(diff[random.randint(0,test_pred.shape[0]),:,:]) - - #Here I evaluate my model on the test set based on mean_squared_error - loss = rnn.sess.run(rnn.cost, feed_dict={rnn.X:test_input, rnn.Y:test_target}) - print("Loss on test set:", loss) - - return test_pred, loss - diff --git "a/rnn_model_lstm_leaky_relu_\133150,150,150\135c.pkl" "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c.pkl" new file mode 100644 index 0000000..788eea5 --- /dev/null +++ "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c.pkl" Binary files differ diff --git "a/rnn_model_lstm_leaky_relu_\133150,150,150\135c/checkpoint" "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c/checkpoint" new file mode 100644 index 0000000..28fb53f --- /dev/null +++ "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c/checkpoint" @@ -0,0 +1,2 @@ +model_checkpoint_path: "rnn_basic" +all_model_checkpoint_paths: "rnn_basic" diff --git "a/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.data-00000-of-00001" "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.data-00000-of-00001" new file mode 100644 index 0000000..429e8ac --- /dev/null +++ "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.data-00000-of-00001" Binary files differ diff --git "a/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.index" "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.index" new file mode 100644 index 0000000..b21c996 --- /dev/null +++ "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.index" Binary files differ diff --git "a/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.meta" "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.meta" new file mode 100644 index 0000000..394587e --- /dev/null +++ "b/rnn_model_lstm_leaky_relu_\133150,150,150\135c/rnn_basic.meta" Binary files differ diff --git a/scalor.pkl b/scalor.pkl new file mode 100644 index 0000000..a5aafe6 --- /dev/null +++ b/scalor.pkl Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c.pkl" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c.pkl" deleted file mode 100644 index ad4f185..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/checkpoint" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/checkpoint" deleted file mode 100644 index 22214f6..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_GRU_leaky_relu_[50,30,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index 840e891..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.index" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.index" deleted file mode 100644 index b8a0a0b..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.meta" deleted file mode 100644 index 1fe2734..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,30,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c.pkl" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c.pkl" deleted file mode 100644 index 06604bd..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/checkpoint" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/checkpoint" deleted file mode 100644 index 49de70d..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_GRU_leaky_relu_[50,40,30,20,10,5]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index c286bf6..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.index" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.index" deleted file mode 100644 index 4713c2d..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.meta" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.meta" deleted file mode 100644 index 8fe9cc7..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/checkpoint" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/checkpoint" deleted file mode 100644 index 49de70d..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_GRU_leaky_relu_[50,40,30,20,10,5]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index 92198f7..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.index" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.index" deleted file mode 100644 index 0fc6408..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.meta" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.meta" deleted file mode 100644 index 059ee8c..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_model_GRU_leaky_relu_\13350,40,30,20,10,5\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c.pkl" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c.pkl" deleted file mode 100644 index 869c46c..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/checkpoint" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/checkpoint" deleted file mode 100644 index 244346a..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_GRU_leaky_relu_[50,40,30,20,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index f7ed4e4..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.index" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.index" deleted file mode 100644 index b6d8a5e..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.meta" deleted file mode 100644 index 4b3fb3d..0000000 --- "a/trained_models/rnn_model_GRU_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c.pkl" "b/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c.pkl" deleted file mode 100644 index c79dd15..0000000 --- "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/checkpoint" "b/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/checkpoint" deleted file mode 100644 index 1dc4133..0000000 --- "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_basic_rnn_elu_[50,40,30,20,10,5]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index 11a46ca..0000000 --- "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.index" "b/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.index" deleted file mode 100644 index e5310a0..0000000 --- "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.meta" "b/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.meta" deleted file mode 100644 index 980712d..0000000 --- "a/trained_models/rnn_model_basic_rnn_elu_\13350,40,30,20,10,5\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c.pkl" "b/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c.pkl" deleted file mode 100644 index 895ea15..0000000 --- "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/checkpoint" "b/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/checkpoint" deleted file mode 100644 index f1ea1fa..0000000 --- "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_basic_rnn_tanh_[50,40,30,20,10,5]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index 85acc90..0000000 --- "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.index" "b/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.index" deleted file mode 100644 index f5d8928..0000000 --- "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.meta" "b/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.meta" deleted file mode 100644 index 1d3d55b..0000000 --- "a/trained_models/rnn_model_basic_rnn_tanh_\13350,40,30,20,10,5\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c.pkl" "b/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c.pkl" deleted file mode 100644 index 2af84df..0000000 --- "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/checkpoint" "b/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/checkpoint" deleted file mode 100644 index bc11feb..0000000 --- "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_lstm_elu_[50,40,30,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index 8145414..0000000 --- "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.index" "b/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.index" deleted file mode 100644 index a6a390d..0000000 --- "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.meta" deleted file mode 100644 index 9cc49fa..0000000 --- "a/trained_models/rnn_model_lstm_elu_\13350,40,30,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c.pkl" "b/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c.pkl" deleted file mode 100644 index 2309df4..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/checkpoint" "b/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/checkpoint" deleted file mode 100644 index 28fb53f..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/checkpoint" +++ /dev/null @@ -1,2 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index 7754745..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.index" "b/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.index" deleted file mode 100644 index d91bcd2..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.meta" "b/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.meta" deleted file mode 100644 index 6f58a35..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\133100,100,100\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c.pkl" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c.pkl" deleted file mode 100644 index 54c812d..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/checkpoint" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/checkpoint" deleted file mode 100644 index edbb4df..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_lstm_leaky_relu_[50,30,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index cc1e640..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.index" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.index" deleted file mode 100644 index 76e8780..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.meta" deleted file mode 100644 index 367cd6c..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/checkpoint" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/checkpoint" deleted file mode 100644 index edbb4df..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_lstm_leaky_relu_[50,30,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index df88f77..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.index" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.index" deleted file mode 100644 index 22fd6d4..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.meta" deleted file mode 100644 index 95e8672..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_model_lstm_leaky_relu_\13350,30,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c.pkl" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c.pkl" deleted file mode 100644 index de2ab59..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/checkpoint" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/checkpoint" deleted file mode 100644 index 2e4ecda..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_lstm_leaky_relu_[50,40,30,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index efd8aa5..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.index" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.index" deleted file mode 100644 index bbbfe93..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.meta" deleted file mode 100644 index 4162368..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c.pkl" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c.pkl" deleted file mode 100644 index fd65421..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/checkpoint" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/checkpoint" deleted file mode 100644 index c69cc9c..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_lstm_leaky_relu_[50,40,30,20,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index c9ccb7e..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.index" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.index" deleted file mode 100644 index 6f922e1..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.meta" deleted file mode 100644 index 87885a1..0000000 --- "a/trained_models/rnn_model_lstm_leaky_relu_\13350,40,30,20,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c.pkl" "b/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c.pkl" deleted file mode 100644 index 645b9a6..0000000 --- "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/checkpoint" "b/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/checkpoint" deleted file mode 100644 index 654b86f..0000000 --- "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_lstm_relu_[50,40,30,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index a7823c6..0000000 --- "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.index" "b/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.index" deleted file mode 100644 index 6c96045..0000000 --- "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.meta" deleted file mode 100644 index e72017c..0000000 --- "a/trained_models/rnn_model_lstm_relu_\13350,40,30,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c.pkl" "b/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c.pkl" deleted file mode 100644 index 433b213..0000000 --- "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c.pkl" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/checkpoint" "b/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/checkpoint" deleted file mode 100644 index a61faa0..0000000 --- "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/checkpoint" +++ /dev/null @@ -1,3 +0,0 @@ -model_checkpoint_path: "rnn_basic" -all_model_checkpoint_paths: "../rnn_model_lstm_tanh_[50,40,30,10]c_checkpoint/rnn_basic" -all_model_checkpoint_paths: "rnn_basic" diff --git "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" "b/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" deleted file mode 100644 index 509f2a9..0000000 --- "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.data-00000-of-00001" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.index" "b/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.index" deleted file mode 100644 index a6c3872..0000000 --- "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.index" +++ /dev/null Binary files differ diff --git "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.meta" "b/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.meta" deleted file mode 100644 index a8ec286..0000000 --- "a/trained_models/rnn_model_lstm_tanh_\13350,40,30,10\135c/rnn_basic.meta" +++ /dev/null Binary files differ