{"cells":[{"cell_type":"markdown","metadata":{"id":"Z5wWcly8oSsr"},"source":["# Import\n"]},{"cell_type":"code","execution_count":1,"metadata":{"executionInfo":{"elapsed":1483,"status":"ok","timestamp":1702412990188,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"u2JKlyqcoahg"},"outputs":[],"source":["import datetime as dt\n","import pandas as pd\n","\n","## CPU\n","import numpy as np\n","parallelType = np\n","\n","'''\n","## GPU\n","import cupy as cp\n","parallelType = cp\n","'''\n","from sklearn.model_selection import train_test_split\n","from sklearn.linear_model import LogisticRegression\n","from sklearn.preprocessing import StandardScaler, Normalizer, MaxAbsScaler, RobustScaler, MinMaxScaler, QuantileTransformer\n","from scipy.io import arff\n"]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"background_save":true,"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":29006,"status":"ok","timestamp":1702413019190,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"q_nNZVg8oiyj"},"outputs":[{"name":"stdout","output_type":"stream","text":["Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/gdrive')"]},{"cell_type":"markdown","metadata":{"id":"4ssDkNm0oIXj"},"source":["# ABC-ANN"]},{"cell_type":"markdown","metadata":{"id":"AlgGFMk-o1s0"},"source":["## ABC"]},{"cell_type":"code","execution_count":3,"metadata":{"executionInfo":{"elapsed":2976,"status":"ok","timestamp":1702413022158,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"_0F92BSJ8zeP"},"outputs":[],"source":["import tensorflow as tf\n","import keras as keras\n","from keras import initializers\n","class ABC_ANN_Binary:\n","  def __init__(self, inputX, target, hiddenLayerSize, P, limit, lb, ub, MR, parallelType, activation):\n","    self.comp = parallelType # This code line determines the parallel type using numpy or cupy\n","    self.FVS = inputX.shape[1]\n","    self.X = inputX\n","    # self.XwithBias = self.comp.append(self.comp.ones((self.X.shape[0], 1)), self.X, axis=1)\n","    self.y = target\n","    self.P = P  # P is population size\n","    self.limit = limit\n","    # D refers to dimension\n","    self.HLS = hiddenLayerSize\n","    self.D = (self.FVS + 1) * self.HLS + self.HLS + 1\n","    self.lb = lb  # lower bound for parameters\n","    self.ub = ub  # upper bound for parameters\n","    self.MR = MR  # modification rate\n","    self.evaluationNumber = 0\n","    self.activation = activation\n","    self.tmpID = [-1] * self.P\n","    #self.Foods = self.lb + self.comp.random.rand(self.P, self.D) * (self.ub - self.lb)\n","\n","    initializer = tf.keras.initializers.GlorotUniform()\n","    values = initializer(shape=(self.P, self.D))\n","    values = values.numpy()\n","    #self.Foods =  cp.array(values)\n","    self.Foods =  self.lb + parallelType.array(values)* (self.ub - self.lb)\n","\n","    # self.Foods = self.comp.random.uniform(self.lb, self.ub, size = (self.P, self.D))\n","    self.solution = self.comp.copy(self.Foods)\n","    self.f = self.calculateF(self.Foods)\n","    self.fitness = 1 / (1 + self.f)\n","    self.trial = self.comp.zeros(P)\n","    self.globalMin = self.f[0, 0]\n","    self.globalParams = self.comp.copy(self.Foods[0:1])  # 1st row\n","    self.scoutBeeCounts = 0\n","\n","  def create_new(self, index):\n","    new_sol = self.lb + self.comp.random.rand(1, self.D) * (self.ub - self.lb)\n","    # new_sol = self.comp.random.uniform(self.lb, self.ub, size = (1, self.D))\n","    self.Foods[index, :] = new_sol.flatten()\n","    self.solution[index, :] = self.comp.copy(new_sol.flatten())\n","    self.f[index] = self.calculateF(new_sol)[0]\n","    self.fitness[index] = 1 / (1 + self.f[index])\n","    self.trial[index] = 0\n","    self.scoutBeeCounts += 1\n","\n","  def memorizeBestSource(self):\n","    index = self.comp.argmin(self.f)\n","    if self.f[index, 0] \u003c self.globalMin:\n","      self.globalMin = self.f[index, 0]\n","      self.globalParams = self.comp.copy(self.Foods[index: index + 1])\n","\n","  def calculateProbabilities(self):\n","    maxfit = self.comp.max(self.fitness)\n","    self.prob = (0.9 / maxfit * self.fitness) + 0.1\n","\n","  def sendEmployedBees(self):\n","    for i in range(self.P):  # for each clone\n","      ar = self.comp.random.rand(self.D)\n","      param2change = self.comp.where(ar \u003c self.MR)[0]\n","\n","      neighbour = self.comp.random.randint(0, self.P)\n","      while neighbour == i:\n","        neighbour = self.comp.random.randint(0, self.P)\n","\n","      self.solution[i, :] = self.comp.copy(self.Foods[i, :])\n","\n","      # random number generation between -1 and 1 values\n","      r = -1 + (1 + 1) * self.comp.random.rand()\n","      self.solution[i, param2change] = self.Foods[i, param2change] + r * (self.Foods[i, param2change] - self.Foods[neighbour, param2change])  # self.comp.copy ?\n","      self.solution[i, param2change] = self.comp.where(self.solution[i, param2change] \u003c self.lb, self.lb, self.solution[i, param2change])\n","      self.solution[i, param2change] = self.comp.where(self.solution[i, param2change] \u003e self.ub, self.ub, self.solution[i, param2change])\n","\n","  def sendOnLookerBees(self):\n","    i = 0\n","    t = 0\n","    while t \u003c self.P:\n","      if self.comp.random.rand() \u003c self.prob[i, 0]:\n","        ar = self.comp.random.rand(self.D)\n","        param2change = self.comp.where(ar \u003c self.MR)[0]\n","\n","        neighbour = self.comp.random.randint(self.P)\n","        while neighbour == i:\n","          neighbour = self.comp.random.randint(self.P)\n","\n","        self.solution[t, :] = self.comp.copy(self.Foods[i, :])\n","        # v_{ij} = x_{ij} + phi_{ij}*(x_{kj}-x_{ij})\n","        # random number generation between -1 and 1 values\n","        r = -1 + (1 + 1) * self.comp.random.rand()\n","        self.solution[t, param2change] = self.Foods[i, param2change] + r * (self.Foods[i, param2change] - self.Foods[neighbour, param2change])  # self.comp.copy ?\n","        self.tmpID[t] = i\n","\n","        self.solution[t, param2change] = self.comp.where(self.solution[t, param2change] \u003c self.lb, self.lb, self.solution[t, param2change])\n","        self.solution[t, param2change] = self.comp.where(self.solution[t, param2change] \u003e self.ub, self.ub, self.solution[t, param2change])\n","        t += 1\n","      i += 1\n","      if i \u003e= self.P:\n","        i = 0\n","\n","  def sendScoutBees(self):\n","    index = self.comp.argmax(self.trial)\n","    if self.trial[index] \u003e= self.limit:\n","      self.create_new(index)\n","\n","  def calculateF(self, foods):\n","    predictionMatrix = self.comp.zeros((self.X.shape[0], foods.shape[0]))\n","    predictionMatrix += foods[:,-1] # bias addition\n","    for i in range(0, self.HLS):\n","      W = foods[:, i*self.FVS : (i+1)*self.FVS].T\n","      b = foods[:, self.FVS * self.HLS + self.HLS + i]\n","\n","      if self.activation == 'sigmoid':\n","        z_i = self.sig(self.X.dot(W) + b) # Sigmoid\n","      if self.activation == 'tanh':\n","        z_i = self.comp.tanh(self.X.dot(W) + b) # TanH\n","      predictionMatrix += z_i * foods[:, self.FVS * self.HLS + i]\n","\n","    predictionMatrix = self.sig(predictionMatrix)\n","\n","    ## Mean Absolute Error - MAE\n","    f = self.comp.mean(self.comp.abs(predictionMatrix - self.y), axis=0, keepdims=True).T\n","    self.evaluationNumber += len(f)\n","    # print(f\"Eval Num: {self.evaluationNumber}\")\n","    return f\n","\n","  def ReLU(self, x):\n","    return self.comp.maximum(x, 0)\n","\n","  def sig(self, n):  # Sigmoid function\n","    return 1 / (1 + self.comp.exp(-n))"]},{"cell_type":"markdown","metadata":{"id":"Yru4pQaio6QY"},"source":["## Learn ABC"]},{"cell_type":"code","execution_count":4,"metadata":{"executionInfo":{"elapsed":9,"status":"ok","timestamp":1702413022158,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"b9un0cUjoL9r"},"outputs":[],"source":["class LearnABC:\n","  def __init__(self, inputX, target, hiddenLayerSize, P, limit, lb, ub, MR, parallelType, evaluationNumber, activation):\n","    self.comp = parallelType\n","    self.abc = ABC_ANN_Binary(inputX, target, hiddenLayerSize, P, limit, lb, ub, MR, parallelType, activation)\n","    self.total_numberof_evaluation = evaluationNumber\n","\n","  def learn(self):\n","    self.f_values = []\n","    self.f_values.append(self.comp.min(self.abc.f))\n","    self.abc.memorizeBestSource()\n","\n","      # sayac = 0\n","    while self.abc.evaluationNumber \u003c= self.total_numberof_evaluation:\n","      self.abc.sendEmployedBees()\n","      objValSol = self.abc.calculateF(self.abc.solution)\n","      fitnessSol = 1 / (1 + objValSol)\n","      # a greedy selection is applied between the current solution i and its mutant\n","      # If the mutant solution is better than the current solution i, replace the solution with the mutant and reset the trial counter of solution i\n","\n","      ind = self.comp.where(fitnessSol \u003e self.abc.fitness)[0]\n","      ind2 = self.comp.where(fitnessSol \u003c= self.abc.fitness)[0]\n","      self.abc.trial[ind] = 0\n","\n","      self.abc.Foods[ind, :] = self.abc.solution[ind, :]\n","      self.abc.f[ind] = objValSol[ind]\n","      self.abc.fitness[ind] = fitnessSol[ind]\n","      # if the solution i can not be improved, increase its trial counter\n","      self.abc.trial[ind2] += 1\n","\n","      self.abc.calculateProbabilities()\n","      self.abc.sendOnLookerBees()\n","\n","      objValSol = self.abc.calculateF(self.abc.solution)\n","      fitnessSol = 1 / (1 + objValSol)\n","\n","      for i in range(self.abc.P):\n","        t = self.abc.tmpID[i]\n","        if fitnessSol[i] \u003e self.abc.fitness[t]:\n","          self.abc.trial[t] = 0\n","          self.abc.Foods[t, :] = self.abc.solution[i, :]\n","          self.abc.f[t] = objValSol[i]\n","          self.abc.fitness[t] = fitnessSol[i]\n","        else:\n","          self.abc.trial[t] += 1\n","\n","      self.abc.memorizeBestSource()\n","      self.abc.sendScoutBees()\n","\n","      self.f_values.append(self.comp.min(self.abc.f))\n","      # sayac += 1;\n","      # if sayac % 5000 == 0: print(f\"Sayaç = {sayac}\")\n","\n","    self.net = self.abc.globalParams\n","    self.globalMin = self.abc.globalMin\n","    # print(f\"Evaluation Number: {self.abc.evaluationNumber}\")\n","    print(f\"The number of scout bees: {self.abc.scoutBeeCounts}\")"]},{"cell_type":"markdown","metadata":{"id":"7BU9f3Gz9KX5"},"source":["## ABC-ANN main"]},{"cell_type":"code","execution_count":5,"metadata":{"executionInfo":{"elapsed":9,"status":"ok","timestamp":1702413022159,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"OY4PhJSd9IWh"},"outputs":[],"source":["class ABC_LR_Model():\n","  #def __init__(self, hiddenLayerSize=3, lb=-32, ub=32, evaluationNumber=60000, limit=50, P=40, MR=0.1, thres=0.5, parallelType=None, activation='sigmoid'):\n","  def __init__(self, hiddenLayerSize, lb, ub, evaluationNumber, limit, P, MR, thres, parallelType, activation):\n","    '''\n","    lb is lower bound for parameters to be learned\n","    ub is upper bound for parameters to be learned\n","    limit determines whether a scout bee can be created.\n","    If a solution cannot be improved up to the limit number, a scout bee is created instead of the solution.\n","    '''\n","    self.lb = lb\n","    self.ub = ub\n","    self.evaluationNumber = evaluationNumber\n","    self.limit = limit\n","    self.P = P\n","    self.MR = MR\n","    self.parallelType = parallelType\n","    self.HLS = hiddenLayerSize\n","    self.thres = thres\n","    self.activation= activation\n","\n","  def fit(self, trainX, trainY):\n","    learn = LearnABC(trainX, trainY, self.HLS, self.P, self.limit, self.lb, self.ub, self.MR, self.parallelType, self.evaluationNumber, self.activation)\n","    learn.learn()\n","    self.net = learn.net\n","\n","  def __str__(self) -\u003e str:\n","      return f\"lb={self.lb}, ub={self.ub}, evaNumber={self.evaluationNumber}, limit={self.limit}, P={self.P}, MR={self.MR}, HLS={self.HLS}, act={self.activation}\"\n","\n","  def sig(self, x):\n","    return 1 / (1 + self.parallelType.exp(-x))\n","\n","  def ReLU(self, x):\n","    return self.parallelType.maximum(x, 0)\n","\n","  def score(self, X, y):\n","    D = X.shape[1]\n","    W1 = self.net[:, 0 : D * self.HLS].reshape((D, self.HLS), order='F')\n","    startIndexFirstBias = D * self.HLS + self.HLS\n","    endIndexFirstBias = startIndexFirstBias + self.HLS\n","    b1 = self.net[:, startIndexFirstBias : endIndexFirstBias]\n","\n","    if self.activation=='sigmoid':\n","    \tZ = self.sig(X.dot(W1) + b1) # Sigmoid\n","    if self.activation=='tanh':\n","    \tZ = self.parallelType.tanh(X.dot(W1) + b1) # TanH\n","    W2 = self.net[:, D * self.HLS : startIndexFirstBias].reshape((self.HLS, 1))\n","    b2 = self.net[:, -1]\n","    A = self.sig(Z.dot(W2) + b2)\n","    p = self.parallelType.where(A \u003e= self.thres, 1, 0) #prediction\n","    acc = self.parallelType.average(y == p)\n","    return [acc, p]\n","\n"]},{"cell_type":"markdown","metadata":{"id":"vyuLIfBCjyxv"},"source":["# Prepare Dataset"]},{"cell_type":"markdown","metadata":{"id":"lczZY-K0j_wr"},"source":["## Feature extraction and selection on UNSW-NB15"]},{"cell_type":"code","execution_count":6,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":47514,"status":"ok","timestamp":1702413069664,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"4LVUMEHmjyEw","outputId":"8e88ca98-e996-4d8f-c97f-97a7c439839d"},"outputs":[{"name":"stdout","output_type":"stream","text":["Sparsity ratio train set: 0.8614705467794591\n","Sparsity ratio test set: 0.8572446104225103\n"]}],"source":["#------------Load UNSW-NB15 Dataset---------------------------\n","trainData = pd.DataFrame(arff.loadarff('/content/gdrive/My Drive/federated/datasets/2015/Train.arff')[0])\n","testData = pd.DataFrame(arff.loadarff('/content/gdrive/My Drive/federated/datasets/2015/Test.arff')[0])\n","\n","trainLabels = trainData.iloc[:,-1].astype(\"int32\").values.reshape(-1, 1)\n","testLabels = testData.iloc[:,-1].astype(\"int32\").values.reshape(-1, 1)\n","\n","trainData = trainData.iloc[:,:-1].values\n","testData = testData.iloc[:,:-1].values\n","\n","# Sparcity ratios\n","print(f\"Sparsity ratio train set: {np.sum(trainData == 0) / (trainData.shape[0] * trainData.shape[1])}\")\n","print(f\"Sparsity ratio test set: {np.sum(testData == 0) / (testData.shape[0] * testData.shape[1])}\")\n","\n","# scaler = Normalizer()\n","# scaler = QuantileTransformer(random_state=0)\n","# scaler = MinMaxScaler()\n","# scaler = RobustScaler()\n","# scaler = StandardScaler()\n","scaler = MaxAbsScaler()\n","trainData = scaler.fit_transform(trainData)\n","testData = scaler.transform(testData)"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"S7f3VBE-NtOa","outputId":"8e61fb8c-a4e6-4e04-aab9-418b62ba36e6"},"outputs":[{"name":"stdout","output_type":"stream","text":["\r  0%|          | 0/100 [00:00\u003c?, ?trial/s, best loss=?]"]},{"name":"stderr","output_type":"stream","text":["WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.\n"]},{"name":"stdout","output_type":"stream","text":["Model: \"model\"\n","\n","_________________________________________________________________\n","\n"," Layer (type)                Output Shape              Param #   \n","\n","=================================================================\n","\n"," input_1 (InputLayer)        [(None, 196)]             0         \n","\n"," dense (Dense)               (None, 42)                8274      \n","\n"," batch_normalization (Batch  (None, 42)                168       \n","\n"," Normalization)                                                  \n","\n"," dropout (Dropout)           (None, 42)                0         \n","\n"," dense_1 (Dense)             (None, 92)                3956      \n","\n"," batch_normalization_1 (Bat  (None, 92)                368       \n","\n"," chNormalization)                                                \n","\n"," dense_2 (Dense)             (None, 42)                3906      \n","\n"," batch_normalization_2 (Bat  (None, 42)                168       \n","\n"," chNormalization)                                                \n","\n"," dropout_1 (Dropout)         (None, 42)                0         \n","\n"," dense_3 (Dense)             (None, 196)               8428      \n","\n","=================================================================\n","\n","Total params: 25268 (98.70 KB)\n","\n","Trainable params: 24916 (97.33 KB)\n","\n","Non-trainable params: 352 (1.38 KB)\n","\n","_________________________________________________________________\n","\n","Epoch 1/812\n","\n","  1/160 [..............................] - ETA: 22:44 - loss: 0.2615\n","  8/160 [\u003e.............................] - ETA: 1s - loss: 0.2545   \n"," 14/160 [=\u003e............................] - ETA: 1s - loss: 0.2497\n"," 21/160 [==\u003e...........................] - ETA: 1s - loss: 0.2449\n"," 27/160 [====\u003e.........................] - ETA: 1s - loss: 0.2417\n"," 34/160 [=====\u003e........................] - ETA: 1s - loss: 0.2384\n"," 40/160 [======\u003e.......................] - ETA: 1s - loss: 0.2360\n"," 46/160 [=======\u003e......................] - ETA: 0s - loss: 0.2336\n"," 53/160 [========\u003e.....................] - ETA: 0s - loss: 0.2307\n"," 60/160 [==========\u003e...................] - ETA: 0s - loss: 0.2277\n"," 67/160 [===========\u003e..................] - ETA: 0s - loss: 0.2243\n"," 74/160 [============\u003e.................] - ETA: 0s - loss: 0.2206\n"," 81/160 [==============\u003e...............] - ETA: 0s - loss: 0.2165\n"," 88/160 [===============\u003e..............] - ETA: 0s - loss: 0.2120\n"," 95/160 [================\u003e.............] - ETA: 0s - loss: 0.2071\n","103/160 [==================\u003e...........] - ETA: 0s - loss: 0.2011\n","110/160 [===================\u003e..........] - ETA: 0s - loss: 0.1956\n","117/160 [====================\u003e.........] - ETA: 0s - loss: 0.1899\n","124/160 [======================\u003e.......] - ETA: 0s - loss: 0.1841\n","130/160 [=======================\u003e......] - ETA: 0s - loss: 0.1791\n","137/160 [========================\u003e.....] - ETA: 0s - loss: 0.1734\n","144/160 [==========================\u003e...] - ETA: 0s - loss: 0.1677\n","150/160 [===========================\u003e..] - ETA: 0s - loss: 0.1630\n","157/160 [============================\u003e.] - ETA: 0s - loss: 0.1576\n","160/160 [==============================] - 10s 11ms/step - loss: 0.1557 - val_loss: 0.0477\n","\n","Epoch 2/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 0.0372\n","  8/160 [\u003e.............................] - ETA: 1s - loss: 0.0347\n"," 15/160 [=\u003e............................] - ETA: 1s - loss: 0.0326\n"," 22/160 [===\u003e..........................] - ETA: 1s - loss: 0.0308\n"," 29/160 [====\u003e.........................] - ETA: 0s - loss: 0.0291\n"," 36/160 [=====\u003e........................] - ETA: 0s - loss: 0.0276\n"," 43/160 [=======\u003e......................] - ETA: 0s - loss: 0.0262\n"," 51/160 [========\u003e.....................] - ETA: 0s - loss: 0.0248\n"," 58/160 [=========\u003e....................] - ETA: 0s - loss: 0.0236\n"," 63/160 [==========\u003e...................] - ETA: 0s - loss: 0.0229\n"," 70/160 [============\u003e.................] - ETA: 0s - loss: 0.0220\n"," 77/160 [=============\u003e................] - ETA: 0s - loss: 0.0211\n"," 84/160 [==============\u003e...............] - ETA: 0s - loss: 0.0203\n"," 91/160 [================\u003e.............] - ETA: 0s - loss: 0.0196\n"," 98/160 [=================\u003e............] - ETA: 0s - loss: 0.0189\n","105/160 [==================\u003e...........] - ETA: 0s - loss: 0.0182\n","112/160 [====================\u003e.........] - ETA: 0s - loss: 0.0176\n","120/160 [=====================\u003e........] - ETA: 0s - loss: 0.0170\n","127/160 [======================\u003e.......] - ETA: 0s - loss: 0.0165\n","134/160 [========================\u003e.....] - ETA: 0s - loss: 0.0160\n","141/160 [=========================\u003e....] - ETA: 0s - loss: 0.0156\n","147/160 [==========================\u003e...] - ETA: 0s - loss: 0.0152\n","153/160 [===========================\u003e..] - ETA: 0s - loss: 0.0149\n","160/160 [==============================] - ETA: 0s - loss: 0.0145\n","160/160 [==============================] - 1s 8ms/step - loss: 0.0145 - val_loss: 0.0145\n","\n","Epoch 3/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 0.0061\n","  8/160 [\u003e.............................] - ETA: 1s - loss: 0.0059\n"," 14/160 [=\u003e............................] - ETA: 1s - loss: 0.0058\n"," 21/160 [==\u003e...........................] - ETA: 1s - loss: 0.0057\n"," 28/160 [====\u003e.........................] - ETA: 1s - loss: 0.0056\n"," 35/160 [=====\u003e........................] - ETA: 0s - loss: 0.0055\n"," 41/160 [======\u003e.......................] - ETA: 0s - loss: 0.0054\n"," 48/160 [========\u003e.....................] - ETA: 0s - loss: 0.0053\n"," 55/160 [=========\u003e....................] - ETA: 0s - loss: 0.0052\n"," 61/160 [==========\u003e...................] - ETA: 0s - loss: 0.0051\n"," 68/160 [===========\u003e..................] - ETA: 0s - loss: 0.0050\n"," 74/160 [============\u003e.................] - ETA: 0s - loss: 0.0050\n"," 81/160 [==============\u003e...............] - ETA: 0s - loss: 0.0049\n"," 88/160 [===============\u003e..............] - ETA: 0s - loss: 0.0048\n"," 95/160 [================\u003e.............] - ETA: 0s - loss: 0.0047\n","102/160 [==================\u003e...........] - ETA: 0s - loss: 0.0047\n","109/160 [===================\u003e..........] - ETA: 0s - loss: 0.0046\n","116/160 [====================\u003e.........] - ETA: 0s - loss: 0.0045\n","123/160 [======================\u003e.......] - ETA: 0s - loss: 0.0045\n","130/160 [=======================\u003e......] - ETA: 0s - loss: 0.0044\n","137/160 [========================\u003e.....] - ETA: 0s - loss: 0.0043\n","144/160 [==========================\u003e...] - ETA: 0s - loss: 0.0043\n","149/160 [==========================\u003e...] - ETA: 0s - loss: 0.0042\n","155/160 [============================\u003e.] - ETA: 0s - loss: 0.0042\n","160/160 [==============================] - 1s 9ms/step - loss: 0.0042 - val_loss: 0.0067\n","\n","Epoch 4/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 0.0029\n","  8/160 [\u003e.............................] - ETA: 1s - loss: 0.0029\n"," 14/160 [=\u003e............................] - ETA: 1s - loss: 0.0029\n"," 20/160 [==\u003e...........................] - ETA: 1s - loss: 0.0028\n"," 26/160 [===\u003e..........................] - ETA: 1s - loss: 0.0028\n"," 33/160 [=====\u003e........................] - ETA: 1s - loss: 0.0028\n"," 40/160 [======\u003e.......................] - ETA: 0s - loss: 0.0027\n"," 46/160 [=======\u003e......................] - ETA: 0s - loss: 0.0027\n"," 53/160 [========\u003e.....................] - ETA: 0s - loss: 0.0027\n"," 60/160 [==========\u003e...................] - ETA: 0s - loss: 0.0027\n"," 67/160 [===========\u003e..................] - ETA: 0s - loss: 0.0026\n"," 74/160 [============\u003e.................] - ETA: 0s - loss: 0.0026\n"," 80/160 [==============\u003e...............] - ETA: 0s - loss: 0.0026\n"," 87/160 [===============\u003e..............] - ETA: 0s - loss: 0.0026\n"," 92/160 [================\u003e.............] - ETA: 0s - loss: 0.0025\n"," 98/160 [=================\u003e............] - ETA: 0s - loss: 0.0025\n","104/160 [==================\u003e...........] - ETA: 0s - loss: 0.0025\n","111/160 [===================\u003e..........] - ETA: 0s - loss: 0.0025\n","118/160 [=====================\u003e........] - ETA: 0s - loss: 0.0025\n","124/160 [======================\u003e.......] - ETA: 0s - loss: 0.0024\n","131/160 [=======================\u003e......] - ETA: 0s - loss: 0.0024\n","135/160 [========================\u003e.....] - ETA: 0s - loss: 0.0024\n","140/160 [=========================\u003e....] - ETA: 0s - loss: 0.0024\n","145/160 [==========================\u003e...] - ETA: 0s - loss: 0.0024\n","149/160 [==========================\u003e...] - ETA: 0s - loss: 0.0024\n","154/160 [===========================\u003e..] - ETA: 0s - loss: 0.0024\n","159/160 [============================\u003e.] - ETA: 0s - loss: 0.0023\n","160/160 [==============================] - 2s 10ms/step - loss: 0.0023 - val_loss: 0.0023\n","\n","Epoch 5/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 0.0020\n","  6/160 [\u003e.............................] - ETA: 1s - loss: 0.0019\n"," 11/160 [=\u003e............................] - ETA: 1s - loss: 0.0019\n"," 15/160 [=\u003e............................] - ETA: 1s - loss: 0.0019\n"," 19/160 [==\u003e...........................] - ETA: 1s - loss: 0.0019\n"," 24/160 [===\u003e..........................] - ETA: 1s - loss: 0.0019\n"," 29/160 [====\u003e.........................] - ETA: 1s - loss: 0.0018\n"," 34/160 [=====\u003e........................] - ETA: 1s - loss: 0.0018\n"," 39/160 [======\u003e.......................] - ETA: 1s - loss: 0.0018\n"," 45/160 [=======\u003e......................] - ETA: 1s - loss: 0.0018\n"," 51/160 [========\u003e.....................] - ETA: 1s - loss: 0.0018\n"," 57/160 [=========\u003e....................] - ETA: 1s - loss: 0.0018\n"," 62/160 [==========\u003e...................] - ETA: 1s - loss: 0.0018\n"," 67/160 [===========\u003e..................] - ETA: 0s - loss: 0.0018\n"," 72/160 [============\u003e.................] - ETA: 0s - loss: 0.0018\n"," 77/160 [=============\u003e................] - ETA: 0s - loss: 0.0018\n"," 82/160 [==============\u003e...............] - ETA: 0s - loss: 0.0018\n"," 87/160 [===============\u003e..............] - ETA: 0s - loss: 0.0017\n"," 92/160 [================\u003e.............] - ETA: 0s - loss: 0.0017\n"," 97/160 [=================\u003e............] - ETA: 0s - loss: 0.0017\n","102/160 [==================\u003e...........] - ETA: 0s - loss: 0.0017\n","107/160 [===================\u003e..........] - ETA: 0s - loss: 0.0017\n","112/160 [====================\u003e.........] - ETA: 0s - loss: 0.0017\n","117/160 [====================\u003e.........] - ETA: 0s - loss: 0.0017\n","121/160 [=====================\u003e........] - ETA: 0s - loss: 0.0017\n","126/160 [======================\u003e.......] - ETA: 0s - loss: 0.0017\n","132/160 [=======================\u003e......] - ETA: 0s - loss: 0.0017\n","138/160 [========================\u003e.....] - ETA: 0s - loss: 0.0017\n","144/160 [==========================\u003e...] - ETA: 0s - loss: 0.0017\n","150/160 [===========================\u003e..] - ETA: 0s - loss: 0.0017\n","156/160 [============================\u003e.] - ETA: 0s - loss: 0.0016\n","160/160 [==============================] - 2s 11ms/step - loss: 0.0016 - val_loss: 0.0013\n","\n","Epoch 6/812\n","\n","  1/160 [..............................] - ETA: 4s - loss: 0.0014\n","  6/160 [\u003e.............................] - ETA: 1s - loss: 0.0014\n"," 11/160 [=\u003e............................] - ETA: 1s - loss: 0.0014\n"," 16/160 [==\u003e...........................] - ETA: 1s - loss: 0.0014\n"," 21/160 [==\u003e...........................] - ETA: 1s - loss: 0.0014\n"," 26/160 [===\u003e..........................] - ETA: 1s - loss: 0.0014\n"," 31/160 [====\u003e.........................] - ETA: 1s - loss: 0.0014\n"," 36/160 [=====\u003e........................] - ETA: 1s - loss: 0.0014\n"," 41/160 [======\u003e.......................] - ETA: 1s - loss: 0.0014\n"," 46/160 [=======\u003e......................] - ETA: 1s - loss: 0.0014\n"," 50/160 [========\u003e.....................] - ETA: 1s - loss: 0.0014\n"," 54/160 [=========\u003e....................] - ETA: 1s - loss: 0.0014\n"," 59/160 [==========\u003e...................] - ETA: 1s - loss: 0.0014\n"," 64/160 [===========\u003e..................] - ETA: 1s - loss: 0.0014\n"," 69/160 [===========\u003e..................] - ETA: 0s - loss: 0.0014\n"," 74/160 [============\u003e.................] - ETA: 0s - loss: 0.0014\n"," 79/160 [=============\u003e................] - ETA: 0s - loss: 0.0014\n"," 84/160 [==============\u003e...............] - ETA: 0s - loss: 0.0014\n"," 89/160 [===============\u003e..............] - ETA: 0s - loss: 0.0014\n"," 94/160 [================\u003e.............] - ETA: 0s - loss: 0.0014\n","100/160 [=================\u003e............] - ETA: 0s - loss: 0.0013\n","105/160 [==================\u003e...........] - ETA: 0s - loss: 0.0013\n","110/160 [===================\u003e..........] - ETA: 0s - loss: 0.0013\n","115/160 [====================\u003e.........] - ETA: 0s - loss: 0.0013\n","120/160 [=====================\u003e........] - ETA: 0s - loss: 0.0013\n","126/160 [======================\u003e.......] - ETA: 0s - loss: 0.0013\n","131/160 [=======================\u003e......] - ETA: 0s - loss: 0.0013\n","136/160 [========================\u003e.....] - ETA: 0s - loss: 0.0013\n","140/160 [=========================\u003e....] - ETA: 0s - loss: 0.0013\n","144/160 [==========================\u003e...] - ETA: 0s - loss: 0.0013\n","148/160 [==========================\u003e...] - ETA: 0s - loss: 0.0013\n","153/160 [===========================\u003e..] - ETA: 0s - loss: 0.0013\n","158/160 [============================\u003e.] - ETA: 0s - loss: 0.0013\n","160/160 [==============================] - 2s 12ms/step - loss: 0.0013 - val_loss: 9.6043e-04\n","\n","Epoch 7/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 0.0012\n","  6/160 [\u003e.............................] - ETA: 1s - loss: 0.0012\n"," 12/160 [=\u003e............................] - ETA: 1s - loss: 0.0012\n"," 18/160 [==\u003e...........................] - ETA: 1s - loss: 0.0012\n"," 23/160 [===\u003e..........................] - ETA: 1s - loss: 0.0012\n"," 28/160 [====\u003e.........................] - ETA: 1s - loss: 0.0012\n"," 33/160 [=====\u003e........................] - ETA: 1s - loss: 0.0012\n"," 38/160 [======\u003e.......................] - ETA: 1s - loss: 0.0012\n"," 43/160 [=======\u003e......................] - ETA: 1s - loss: 0.0012\n"," 48/160 [========\u003e.....................] - ETA: 1s - loss: 0.0012\n"," 53/160 [========\u003e.....................] - ETA: 1s - loss: 0.0012\n"," 59/160 [==========\u003e...................] - ETA: 1s - loss: 0.0012\n"," 64/160 [===========\u003e..................] - ETA: 0s - loss: 0.0012\n"," 69/160 [===========\u003e..................] - ETA: 0s - loss: 0.0011\n"," 75/160 [=============\u003e................] - ETA: 0s - loss: 0.0011\n"," 80/160 [==============\u003e...............] - ETA: 0s - loss: 0.0011\n"," 85/160 [==============\u003e...............] - ETA: 0s - loss: 0.0011\n"," 89/160 [===============\u003e..............] - ETA: 0s - loss: 0.0011\n"," 93/160 [================\u003e.............] - ETA: 0s - loss: 0.0011\n"," 98/160 [=================\u003e............] - ETA: 0s - loss: 0.0011\n","103/160 [==================\u003e...........] - ETA: 0s - loss: 0.0011\n","108/160 [===================\u003e..........] - ETA: 0s - loss: 0.0011\n","113/160 [====================\u003e.........] - ETA: 0s - loss: 0.0011\n","118/160 [=====================\u003e........] - ETA: 0s - loss: 0.0011\n","123/160 [======================\u003e.......] - ETA: 0s - loss: 0.0011\n","128/160 [=======================\u003e......] - ETA: 0s - loss: 0.0011\n","132/160 [=======================\u003e......] - ETA: 0s - loss: 0.0011\n","137/160 [========================\u003e.....] - ETA: 0s - loss: 0.0011\n","143/160 [=========================\u003e....] - ETA: 0s - loss: 0.0011\n","148/160 [==========================\u003e...] - ETA: 0s - loss: 0.0011\n","153/160 [===========================\u003e..] - ETA: 0s - loss: 0.0011\n","158/160 [============================\u003e.] - ETA: 0s - loss: 0.0011\n","160/160 [==============================] - 2s 12ms/step - loss: 0.0011 - val_loss: 8.4831e-04\n","\n","Epoch 8/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 0.0011\n","  6/160 [\u003e.............................] - ETA: 1s - loss: 0.0010\n"," 12/160 [=\u003e............................] - ETA: 1s - loss: 0.0010\n"," 18/160 [==\u003e...........................] - ETA: 1s - loss: 0.0010\n"," 23/160 [===\u003e..........................] - ETA: 1s - loss: 0.0010\n"," 29/160 [====\u003e.........................] - ETA: 1s - loss: 0.0010\n"," 35/160 [=====\u003e........................] - ETA: 1s - loss: 0.0010\n"," 41/160 [======\u003e.......................] - ETA: 1s - loss: 0.0010\n"," 47/160 [=======\u003e......................] - ETA: 1s - loss: 0.0010\n"," 53/160 [========\u003e.....................] - ETA: 1s - loss: 0.0010\n"," 59/160 [==========\u003e...................] - ETA: 0s - loss: 0.0010\n"," 65/160 [===========\u003e..................] - ETA: 0s - loss: 0.0010\n"," 71/160 [============\u003e.................] - ETA: 0s - loss: 0.0010\n"," 76/160 [=============\u003e................] - ETA: 0s - loss: 0.0010\n"," 82/160 [==============\u003e...............] - ETA: 0s - loss: 0.0010\n"," 88/160 [===============\u003e..............] - ETA: 0s - loss: 0.0010\n"," 92/160 [================\u003e.............] - ETA: 0s - loss: 9.9858e-04\n"," 97/160 [=================\u003e............] - ETA: 0s - loss: 9.9583e-04\n","102/160 [==================\u003e...........] - ETA: 0s - loss: 9.9437e-04\n","108/160 [===================\u003e..........] - ETA: 0s - loss: 9.9333e-04\n","115/160 [====================\u003e.........] - ETA: 0s - loss: 9.9343e-04\n","121/160 [=====================\u003e........] - ETA: 0s - loss: 9.9060e-04\n","126/160 [======================\u003e.......] - ETA: 0s - loss: 9.8792e-04\n","131/160 [=======================\u003e......] - ETA: 0s - loss: 9.8373e-04\n","136/160 [========================\u003e.....] - ETA: 0s - loss: 9.8226e-04\n","140/160 [=========================\u003e....] - ETA: 0s - loss: 9.8027e-04\n","145/160 [==========================\u003e...] - ETA: 0s - loss: 9.7977e-04\n","149/160 [==========================\u003e...] - ETA: 0s - loss: 9.7844e-04\n","154/160 [===========================\u003e..] - ETA: 0s - loss: 9.7850e-04\n","159/160 [============================\u003e.] - ETA: 0s - loss: 9.7694e-04\n","160/160 [==============================] - 2s 11ms/step - loss: 9.7631e-04 - val_loss: 7.1551e-04\n","\n","Epoch 9/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 9.6665e-04\n","  5/160 [..............................] - ETA: 1s - loss: 9.1018e-04\n"," 11/160 [=\u003e............................] - ETA: 1s - loss: 8.9828e-04\n"," 15/160 [=\u003e............................] - ETA: 1s - loss: 9.0967e-04\n"," 21/160 [==\u003e...........................] - ETA: 1s - loss: 9.0519e-04\n"," 27/160 [====\u003e.........................] - ETA: 1s - loss: 9.0273e-04\n"," 33/160 [=====\u003e........................] - ETA: 1s - loss: 8.9979e-04\n"," 38/160 [======\u003e.......................] - ETA: 1s - loss: 9.0151e-04\n"," 45/160 [=======\u003e......................] - ETA: 1s - loss: 9.0439e-04\n"," 52/160 [========\u003e.....................] - ETA: 1s - loss: 9.0990e-04\n"," 59/160 [==========\u003e...................] - ETA: 0s - loss: 9.0454e-04\n"," 66/160 [===========\u003e..................] - ETA: 0s - loss: 9.0540e-04\n"," 73/160 [============\u003e.................] - ETA: 0s - loss: 9.0449e-04\n"," 79/160 [=============\u003e................] - ETA: 0s - loss: 9.0283e-04\n"," 85/160 [==============\u003e...............] - ETA: 0s - loss: 9.0198e-04\n"," 92/160 [================\u003e.............] - ETA: 0s - loss: 9.0065e-04\n"," 99/160 [=================\u003e............] - ETA: 0s - loss: 9.0061e-04\n","105/160 [==================\u003e...........] - ETA: 0s - loss: 8.9943e-04\n","111/160 [===================\u003e..........] - ETA: 0s - loss: 9.0016e-04\n","117/160 [====================\u003e.........] - ETA: 0s - loss: 9.0115e-04\n","124/160 [======================\u003e.......] - ETA: 0s - loss: 8.9985e-04\n","131/160 [=======================\u003e......] - ETA: 0s - loss: 8.9859e-04\n","135/160 [========================\u003e.....] - ETA: 0s - loss: 8.9765e-04\n","141/160 [=========================\u003e....] - ETA: 0s - loss: 8.9590e-04\n","147/160 [==========================\u003e...] - ETA: 0s - loss: 8.9351e-04\n","153/160 [===========================\u003e..] - ETA: 0s - loss: 8.9148e-04\n","159/160 [============================\u003e.] - ETA: 0s - loss: 8.9038e-04\n","160/160 [==============================] - 2s 10ms/step - loss: 8.9003e-04 - val_loss: 7.0306e-04\n","\n","Epoch 10/812\n","\n","  1/160 [..............................] - ETA: 4s - loss: 8.7476e-04\n","  7/160 [\u003e.............................] - ETA: 1s - loss: 8.6663e-04\n"," 14/160 [=\u003e............................] - ETA: 1s - loss: 8.7198e-04\n"," 21/160 [==\u003e...........................] - ETA: 1s - loss: 8.7063e-04\n"," 28/160 [====\u003e.........................] - ETA: 1s - loss: 8.6682e-04\n"," 35/160 [=====\u003e........................] - ETA: 0s - loss: 8.5709e-04\n"," 41/160 [======\u003e.......................] - ETA: 0s - loss: 8.5645e-04\n"," 47/160 [=======\u003e......................] - ETA: 0s - loss: 8.5103e-04\n"," 53/160 [========\u003e.....................] - ETA: 0s - loss: 8.5123e-04\n"," 60/160 [==========\u003e...................] - ETA: 0s - loss: 8.5083e-04\n"," 67/160 [===========\u003e..................] - ETA: 0s - loss: 8.4826e-04\n"," 74/160 [============\u003e.................] - ETA: 0s - loss: 8.4519e-04\n"," 79/160 [=============\u003e................] - ETA: 0s - loss: 8.4410e-04\n"," 86/160 [===============\u003e..............] - ETA: 0s - loss: 8.4324e-04\n"," 93/160 [================\u003e.............] - ETA: 0s - loss: 8.4402e-04\n","100/160 [=================\u003e............] - ETA: 0s - loss: 8.4343e-04\n","107/160 [===================\u003e..........] - ETA: 0s - loss: 8.4328e-04\n","114/160 [====================\u003e.........] - ETA: 0s - loss: 8.4034e-04\n","119/160 [=====================\u003e........] - ETA: 0s - loss: 8.4195e-04\n","126/160 [======================\u003e.......] - ETA: 0s - loss: 8.4040e-04\n","133/160 [=======================\u003e......] - ETA: 0s - loss: 8.3869e-04\n","139/160 [=========================\u003e....] - ETA: 0s - loss: 8.3728e-04\n","145/160 [==========================\u003e...] - ETA: 0s - loss: 8.3669e-04\n","151/160 [===========================\u003e..] - ETA: 0s - loss: 8.3608e-04\n","157/160 [============================\u003e.] - ETA: 0s - loss: 8.3512e-04\n","160/160 [==============================] - 1s 9ms/step - loss: 8.3437e-04 - val_loss: 6.4723e-04\n","\n","Epoch 11/812\n","\n","  1/160 [..............................] - ETA: 3s - loss: 7.6317e-04\n","  7/160 [\u003e.............................] - ETA: 1s - loss: 8.5616e-04\n"," 13/160 [=\u003e............................] - ETA: 1s - loss: 8.2692e-04\n"," 19/160 [==\u003e...........................] - ETA: 1s - loss: 8.1522e-04\n"," 24/160 [===\u003e..........................] - ETA: 1s - loss: 8.1714e-04\n"," 31/160 [====\u003e.........................] - ETA: 1s - loss: 8.1444e-04\n"," 38/160 [======\u003e.......................] - ETA: 1s - loss: 8.1429e-04\n"," 45/160 [=======\u003e......................] - ETA: 0s - loss: 8.1351e-04\n"," 52/160 [========\u003e.....................] - ETA: 0s - loss: 8.0945e-04\n"," 58/160 [=========\u003e....................] - ETA: 0s - loss: 8.0601e-04\n"," 65/160 [===========\u003e..................] - ETA: 0s - loss: 8.0756e-04\n"," 71/160 [============\u003e.................] - ETA: 0s - loss: 8.0785e-04\n"," 77/160 [=============\u003e................] - ETA: 0s - loss: 8.0697e-04\n"," 83/160 [==============\u003e...............] - ETA: 0s - loss: 8.0478e-04\n"," 89/160 [===============\u003e..............] - ETA: 0s - loss: 8.0243e-04\n"," 96/160 [=================\u003e............] - ETA: 0s - loss: 8.0098e-04\n","102/160 [==================\u003e...........] - ETA: 0s - loss: 7.9962e-04\n","108/160 [===================\u003e..........] - ETA: 0s - loss: 7.9899e-04\n","115/160 [====================\u003e.........] - ETA: 0s - loss: 7.9795e-04\n","122/160 [=====================\u003e........] - ETA: 0s - loss: 7.9813e-04\n","129/160 [=======================\u003e......] - ETA: 0s - loss: 7.9491e-04\n","136/160 [========================\u003e.....] - ETA: 0s - loss: 7.9433e-04\n","143/160 [=========================\u003e....] - ETA: 0s - loss: 7.9311e-04\n","148/160 [==========================\u003e...] - ETA: 0s - loss: 7.9072e-04\n","155/160 [============================\u003e.] - ETA: 0s - loss: 7.8891e-04\n","160/160 [==============================] - 1s 9ms/step - loss: 7.8828e-04 - val_loss: 5.8173e-04\n","\n","Epoch 12/812\n","\n","  1/160 [..............................] - ETA: 3s - loss: 7.2742e-04\n","  8/160 [\u003e.............................] - ETA: 1s - loss: 7.9269e-04\n"," 13/160 [=\u003e............................] - ETA: 1s - loss: 7.8145e-04\n"," 19/160 [==\u003e...........................] - ETA: 1s - loss: 7.7538e-04\n"," 26/160 [===\u003e..........................] - ETA: 1s - loss: 7.6689e-04\n"," 32/160 [=====\u003e........................] - ETA: 1s - loss: 7.6667e-04\n"," 39/160 [======\u003e.......................] - ETA: 1s - loss: 7.6577e-04\n"," 46/160 [=======\u003e......................] - ETA: 0s - loss: 7.6781e-04\n"," 52/160 [========\u003e.....................] - ETA: 0s - loss: 7.6207e-04\n"," 59/160 [==========\u003e...................] - ETA: 0s - loss: 7.5915e-04\n"," 66/160 [===========\u003e..................] - ETA: 0s - loss: 7.5621e-04\n"," 72/160 [============\u003e.................] - ETA: 0s - loss: 7.5494e-04\n"," 78/160 [=============\u003e................] - ETA: 0s - loss: 7.5595e-04\n"," 84/160 [==============\u003e...............] - ETA: 0s - loss: 7.5622e-04\n"," 91/160 [================\u003e.............] - ETA: 0s - loss: 7.5639e-04\n"," 96/160 [=================\u003e............] - ETA: 0s - loss: 7.5511e-04\n","102/160 [==================\u003e...........] - ETA: 0s - loss: 7.5588e-04\n","109/160 [===================\u003e..........] - ETA: 0s - loss: 7.5494e-04\n","116/160 [====================\u003e.........] - ETA: 0s - loss: 7.5426e-04\n","123/160 [======================\u003e.......] - ETA: 0s - loss: 7.5268e-04\n","130/160 [=======================\u003e......] - ETA: 0s - loss: 7.5341e-04\n","136/160 [========================\u003e.....] - ETA: 0s - loss: 7.5189e-04\n","143/160 [=========================\u003e....] - ETA: 0s - loss: 7.5127e-04\n","150/160 [===========================\u003e..] - ETA: 0s - loss: 7.5243e-04\n","157/160 [============================\u003e.] - ETA: 0s - loss: 7.5129e-04\n","160/160 [==============================] - 2s 9ms/step - loss: 7.5061e-04 - val_loss: 5.6035e-04\n","\n","Epoch 13/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 7.4227e-04\n","  7/160 [\u003e.............................] - ETA: 1s - loss: 7.5991e-04\n"," 13/160 [=\u003e............................] - ETA: 1s - loss: 7.3207e-04\n"," 20/160 [==\u003e...........................] - ETA: 1s - loss: 7.3241e-04\n"," 26/160 [===\u003e..........................] - ETA: 1s - loss: 7.3243e-04\n"," 30/160 [====\u003e.........................] - ETA: 1s - loss: 7.3298e-04\n"," 35/160 [=====\u003e........................] - ETA: 1s - loss: 7.3113e-04\n"," 41/160 [======\u003e.......................] - ETA: 1s - loss: 7.2634e-04\n"," 48/160 [========\u003e.....................] - ETA: 1s - loss: 7.2452e-04\n"," 54/160 [=========\u003e....................] - ETA: 0s - loss: 7.2599e-04\n"," 62/160 [==========\u003e...................] - ETA: 0s - loss: 7.2675e-04\n"," 68/160 [===========\u003e..................] - ETA: 0s - loss: 7.2522e-04\n"," 75/160 [=============\u003e................] - ETA: 0s - loss: 7.2345e-04\n"," 82/160 [==============\u003e...............] - ETA: 0s - loss: 7.2261e-04\n"," 89/160 [===============\u003e..............] - ETA: 0s - loss: 7.2342e-04\n"," 95/160 [================\u003e.............] - ETA: 0s - loss: 7.2244e-04\n","100/160 [=================\u003e............] - ETA: 0s - loss: 7.2293e-04\n","107/160 [===================\u003e..........] - ETA: 0s - loss: 7.2038e-04\n","113/160 [====================\u003e.........] - ETA: 0s - loss: 7.2218e-04\n","119/160 [=====================\u003e........] - ETA: 0s - loss: 7.2338e-04\n","125/160 [======================\u003e.......] - ETA: 0s - loss: 7.2426e-04\n","131/160 [=======================\u003e......] - ETA: 0s - loss: 7.2629e-04\n","137/160 [========================\u003e.....] - ETA: 0s - loss: 7.2569e-04\n","143/160 [=========================\u003e....] - ETA: 0s - loss: 7.2410e-04\n","150/160 [===========================\u003e..] - ETA: 0s - loss: 7.2244e-04\n","156/160 [============================\u003e.] - ETA: 0s - loss: 7.2236e-04\n","160/160 [==============================] - 1s 9ms/step - loss: 7.2126e-04 - val_loss: 5.5383e-04\n","\n","Epoch 14/812\n","\n","  1/160 [..............................] - ETA: 3s - loss: 8.5445e-04\n","  8/160 [\u003e.............................] - ETA: 1s - loss: 6.9664e-04\n"," 15/160 [=\u003e............................] - ETA: 1s - loss: 7.0535e-04\n"," 21/160 [==\u003e...........................] - ETA: 1s - loss: 7.1238e-04\n"," 28/160 [====\u003e.........................] - ETA: 1s - loss: 7.0840e-04\n"," 34/160 [=====\u003e........................] - ETA: 0s - loss: 7.0792e-04\n"," 41/160 [======\u003e.......................] - ETA: 0s - loss: 7.0814e-04\n"," 47/160 [=======\u003e......................] - ETA: 0s - loss: 7.0538e-04\n"," 53/160 [========\u003e.....................] - ETA: 0s - loss: 7.0608e-04\n"," 60/160 [==========\u003e...................] - ETA: 0s - loss: 7.1163e-04\n"," 66/160 [===========\u003e..................] - ETA: 0s - loss: 7.1107e-04\n"," 72/160 [============\u003e.................] - ETA: 0s - loss: 7.1030e-04\n"," 79/160 [=============\u003e................] - ETA: 0s - loss: 7.0816e-04\n"," 86/160 [===============\u003e..............] - ETA: 0s - loss: 7.0880e-04\n"," 92/160 [================\u003e.............] - ETA: 0s - loss: 7.1050e-04\n"," 98/160 [=================\u003e............] - ETA: 0s - loss: 7.0984e-04\n","103/160 [==================\u003e...........] - ETA: 0s - loss: 7.0945e-04\n","109/160 [===================\u003e..........] - ETA: 0s - loss: 7.0979e-04\n","116/160 [====================\u003e.........] - ETA: 0s - loss: 7.0920e-04\n","123/160 [======================\u003e.......] - ETA: 0s - loss: 7.0889e-04\n","130/160 [=======================\u003e......] - ETA: 0s - loss: 7.0744e-04\n","136/160 [========================\u003e.....] - ETA: 0s - loss: 7.0547e-04\n","142/160 [=========================\u003e....] - ETA: 0s - loss: 7.0479e-04\n","148/160 [==========================\u003e...] - ETA: 0s - loss: 7.0362e-04\n","154/160 [===========================\u003e..] - ETA: 0s - loss: 7.0231e-04\n","160/160 [==============================] - ETA: 0s - loss: 7.0277e-04\n","160/160 [==============================] - 2s 9ms/step - loss: 7.0277e-04 - val_loss: 5.1069e-04\n","\n","Epoch 15/812\n","\n","  1/160 [..............................] - ETA: 3s - loss: 7.0924e-04\n","  7/160 [\u003e.............................] - ETA: 1s - loss: 6.8974e-04\n"," 13/160 [=\u003e............................] - ETA: 1s - loss: 6.8797e-04\n"," 19/160 [==\u003e...........................] - ETA: 1s - loss: 6.9013e-04\n"," 26/160 [===\u003e..........................] - ETA: 1s - loss: 6.8756e-04\n"," 31/160 [====\u003e.........................] - ETA: 1s - loss: 6.9089e-04\n"," 35/160 [=====\u003e........................] - ETA: 1s - loss: 6.8764e-04\n"," 42/160 [======\u003e.......................] - ETA: 1s - loss: 6.9209e-04\n"," 49/160 [========\u003e.....................] - ETA: 1s - loss: 6.9095e-04\n"," 56/160 [=========\u003e....................] - ETA: 0s - loss: 6.8598e-04\n"," 63/160 [==========\u003e...................] - ETA: 0s - loss: 6.8803e-04\n"," 70/160 [============\u003e.................] - ETA: 0s - loss: 6.8539e-04\n"," 77/160 [=============\u003e................] - ETA: 0s - loss: 6.8537e-04\n"," 84/160 [==============\u003e...............] - ETA: 0s - loss: 6.8729e-04\n"," 91/160 [================\u003e.............] - ETA: 0s - loss: 6.8841e-04\n"," 98/160 [=================\u003e............] - ETA: 0s - loss: 6.8872e-04\n","105/160 [==================\u003e...........] - ETA: 0s - loss: 6.8594e-04\n","111/160 [===================\u003e..........] - ETA: 0s - loss: 6.8472e-04\n","116/160 [====================\u003e.........] - ETA: 0s - loss: 6.8351e-04\n","122/160 [=====================\u003e........] - ETA: 0s - loss: 6.8199e-04\n","128/160 [=======================\u003e......] - ETA: 0s - loss: 6.8045e-04\n","134/160 [========================\u003e.....] - ETA: 0s - loss: 6.8062e-04\n","140/160 [=========================\u003e....] - ETA: 0s - loss: 6.7917e-04\n","147/160 [==========================\u003e...] - ETA: 0s - loss: 6.8055e-04\n","154/160 [===========================\u003e..] - ETA: 0s - loss: 6.7901e-04\n","159/160 [============================\u003e.] - ETA: 0s - loss: 6.7962e-04\n","160/160 [==============================] - 2s 10ms/step - loss: 6.7961e-04 - val_loss: 5.0524e-04\n","\n","Epoch 16/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 7.5892e-04\n","  6/160 [\u003e.............................] - ETA: 1s - loss: 6.7488e-04\n"," 10/160 [\u003e.............................] - ETA: 1s - loss: 6.6929e-04\n"," 14/160 [=\u003e............................] - ETA: 1s - loss: 6.6205e-04\n"," 19/160 [==\u003e...........................] - ETA: 1s - loss: 6.6409e-04\n"," 25/160 [===\u003e..........................] - ETA: 1s - loss: 6.6175e-04\n"," 30/160 [====\u003e.........................] - ETA: 1s - loss: 6.6822e-04\n"," 35/160 [=====\u003e........................] - ETA: 1s - loss: 6.6968e-04\n"," 40/160 [======\u003e.......................] - ETA: 1s - loss: 6.6618e-04\n"," 44/160 [=======\u003e......................] - ETA: 1s - loss: 6.6535e-04\n"," 48/160 [========\u003e.....................] - ETA: 1s - loss: 6.6888e-04\n"," 53/160 [========\u003e.....................] - ETA: 1s - loss: 6.7216e-04\n"," 58/160 [=========\u003e....................] - ETA: 1s - loss: 6.7268e-04\n"," 63/160 [==========\u003e...................] - ETA: 1s - loss: 6.7120e-04\n"," 68/160 [===========\u003e..................] - ETA: 1s - loss: 6.7244e-04\n"," 73/160 [============\u003e.................] - ETA: 0s - loss: 6.7317e-04\n"," 78/160 [=============\u003e................] - ETA: 0s - loss: 6.7392e-04\n"," 83/160 [==============\u003e...............] - ETA: 0s - loss: 6.7064e-04\n"," 88/160 [===============\u003e..............] - ETA: 0s - loss: 6.6881e-04\n"," 93/160 [================\u003e.............] - ETA: 0s - loss: 6.6897e-04\n"," 98/160 [=================\u003e............] - ETA: 0s - loss: 6.6643e-04\n","103/160 [==================\u003e...........] - ETA: 0s - loss: 6.6568e-04\n","108/160 [===================\u003e..........] - ETA: 0s - loss: 6.6545e-04\n","113/160 [====================\u003e.........] - ETA: 0s - loss: 6.6314e-04\n","118/160 [=====================\u003e........] - ETA: 0s - loss: 6.6462e-04\n","123/160 [======================\u003e.......] - ETA: 0s - loss: 6.6522e-04\n","129/160 [=======================\u003e......] - ETA: 0s - loss: 6.6447e-04\n","134/160 [========================\u003e.....] - ETA: 0s - loss: 6.6397e-04\n","139/160 [=========================\u003e....] - ETA: 0s - loss: 6.6301e-04\n","144/160 [==========================\u003e...] - ETA: 0s - loss: 6.6169e-04\n","149/160 [==========================\u003e...] - ETA: 0s - loss: 6.6106e-04\n","154/160 [===========================\u003e..] - ETA: 0s - loss: 6.6104e-04\n","159/160 [============================\u003e.] - ETA: 0s - loss: 6.6069e-04\n","160/160 [==============================] - 2s 12ms/step - loss: 6.6069e-04 - val_loss: 5.2056e-04\n","\n","Epoch 17/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 6.3882e-04\n","  5/160 [..............................] - ETA: 2s - loss: 6.5447e-04\n"," 10/160 [\u003e.............................] - ETA: 1s - loss: 6.6326e-04\n"," 15/160 [=\u003e............................] - ETA: 1s - loss: 6.6421e-04\n"," 20/160 [==\u003e...........................] - ETA: 1s - loss: 6.5101e-04\n"," 26/160 [===\u003e..........................] - ETA: 1s - loss: 6.5534e-04\n"," 32/160 [=====\u003e........................] - ETA: 1s - loss: 6.5554e-04\n"," 38/160 [======\u003e.......................] - ETA: 1s - loss: 6.5439e-04\n"," 44/160 [=======\u003e......................] - ETA: 1s - loss: 6.5548e-04\n"," 50/160 [========\u003e.....................] - ETA: 1s - loss: 6.5903e-04\n"," 56/160 [=========\u003e....................] - ETA: 1s - loss: 6.5591e-04\n"," 62/160 [==========\u003e...................] - ETA: 0s - loss: 6.5058e-04\n"," 67/160 [===========\u003e..................] - ETA: 0s - loss: 6.4840e-04\n"," 73/160 [============\u003e.................] - ETA: 0s - loss: 6.5022e-04\n"," 78/160 [=============\u003e................] - ETA: 0s - loss: 6.5108e-04\n"," 84/160 [==============\u003e...............] - ETA: 0s - loss: 6.4966e-04\n"," 90/160 [===============\u003e..............] - ETA: 0s - loss: 6.4984e-04\n"," 96/160 [=================\u003e............] - ETA: 0s - loss: 6.4842e-04\n","101/160 [=================\u003e............] - ETA: 0s - loss: 6.4707e-04\n","105/160 [==================\u003e...........] - ETA: 0s - loss: 6.4715e-04\n","109/160 [===================\u003e..........] - ETA: 0s - loss: 6.4716e-04\n","114/160 [====================\u003e.........] - ETA: 0s - loss: 6.4680e-04\n","119/160 [=====================\u003e........] - ETA: 0s - loss: 6.4568e-04\n","124/160 [======================\u003e.......] - ETA: 0s - loss: 6.4497e-04\n","129/160 [=======================\u003e......] - ETA: 0s - loss: 6.4531e-04\n","134/160 [========================\u003e.....] - ETA: 0s - loss: 6.4400e-04\n","139/160 [=========================\u003e....] - ETA: 0s - loss: 6.4267e-04\n","144/160 [==========================\u003e...] - ETA: 0s - loss: 6.4235e-04\n","149/160 [==========================\u003e...] - ETA: 0s - loss: 6.4141e-04\n","154/160 [===========================\u003e..] - ETA: 0s - loss: 6.4156e-04\n","159/160 [============================\u003e.] - ETA: 0s - loss: 6.4167e-04\n","160/160 [==============================] - 2s 12ms/step - loss: 6.4144e-04 - val_loss: 4.9101e-04\n","\n","Epoch 18/812\n","\n","  1/160 [..............................] - ETA: 3s - loss: 5.8509e-04\n","  5/160 [..............................] - ETA: 1s - loss: 6.0420e-04\n","  9/160 [\u003e.............................] - ETA: 2s - loss: 6.0651e-04\n"," 14/160 [=\u003e............................] - ETA: 1s - loss: 6.2069e-04\n"," 18/160 [==\u003e...........................] - ETA: 1s - loss: 6.2542e-04\n"," 23/160 [===\u003e..........................] - ETA: 1s - loss: 6.3233e-04\n"," 28/160 [====\u003e.........................] - ETA: 1s - loss: 6.3420e-04\n"," 32/160 [=====\u003e........................] - ETA: 1s - loss: 6.2976e-04\n"," 37/160 [=====\u003e........................] - ETA: 1s - loss: 6.3439e-04\n"," 43/160 [=======\u003e......................] - ETA: 1s - loss: 6.3649e-04\n"," 49/160 [========\u003e.....................] - ETA: 1s - loss: 6.3545e-04\n"," 55/160 [=========\u003e....................] - ETA: 1s - loss: 6.3719e-04\n"," 60/160 [==========\u003e...................] - ETA: 1s - loss: 6.3921e-04\n"," 65/160 [===========\u003e..................] - ETA: 1s - loss: 6.3891e-04\n"," 71/160 [============\u003e.................] - ETA: 0s - loss: 6.4078e-04\n"," 76/160 [=============\u003e................] - ETA: 0s - loss: 6.4210e-04\n"," 82/160 [==============\u003e...............] - ETA: 0s - loss: 6.3972e-04\n"," 88/160 [===============\u003e..............] - ETA: 0s - loss: 6.3881e-04\n"," 93/160 [================\u003e.............] - ETA: 0s - loss: 6.3826e-04\n"," 98/160 [=================\u003e............] - ETA: 0s - loss: 6.3607e-04\n","103/160 [==================\u003e...........] - ETA: 0s - loss: 6.3426e-04\n","109/160 [===================\u003e..........] - ETA: 0s - loss: 6.3271e-04\n","114/160 [====================\u003e.........] - ETA: 0s - loss: 6.3240e-04\n","118/160 [=====================\u003e........] - ETA: 0s - loss: 6.3308e-04\n","122/160 [=====================\u003e........] - ETA: 0s - loss: 6.3199e-04\n","126/160 [======================\u003e.......] - ETA: 0s - loss: 6.3230e-04\n","131/160 [=======================\u003e......] - ETA: 0s - loss: 6.3159e-04\n","136/160 [========================\u003e.....] - ETA: 0s - loss: 6.3181e-04\n","140/160 [=========================\u003e....] - ETA: 0s - loss: 6.3124e-04\n","145/160 [==========================\u003e...] - ETA: 0s - loss: 6.3139e-04\n","149/160 [==========================\u003e...] - ETA: 0s - loss: 6.3042e-04\n","154/160 [===========================\u003e..] - ETA: 0s - loss: 6.2885e-04\n","158/160 [============================\u003e.] - ETA: 0s - loss: 6.2895e-04\n","160/160 [==============================] - 2s 12ms/step - loss: 6.2885e-04 - val_loss: 4.8039e-04\n","\n","Epoch 19/812\n","\n","  1/160 [..............................] - ETA: 2s - loss: 5.9257e-04\n","  6/160 [\u003e.............................] - ETA: 1s - loss: 6.2074e-04\n"," 11/160 [=\u003e............................] - ETA: 1s - loss: 6.1220e-04\n"," 17/160 [==\u003e...........................] - ETA: 1s - loss: 6.2269e-04\n"," 22/160 [===\u003e..........................] - ETA: 1s - loss: 6.2441e-04\n"," 27/160 [====\u003e.........................] - ETA: 1s - loss: 6.2403e-04\n"," 32/160 [=====\u003e........................] - ETA: 1s - loss: 6.2005e-04\n"," 35/160 [=====\u003e........................] - ETA: 1s - loss: 6.1737e-04\n"," 39/160 [======\u003e.......................] - ETA: 1s - loss: 6.2073e-04\n"," 44/160 [=======\u003e......................] - ETA: 1s - loss: 6.1886e-04\n"," 48/160 [========\u003e.....................] - ETA: 1s - loss: 6.2097e-04\n"," 52/160 [========\u003e.....................] - ETA: 1s - loss: 6.2179e-04\n"," 56/160 [=========\u003e....................] - ETA: 1s - loss: 6.2140e-04\n"," 60/160 [==========\u003e...................] - ETA: 1s - loss: 6.2005e-04\n"," 64/160 [===========\u003e..................] - ETA: 1s - loss: 6.1986e-04\n"," 69/160 [===========\u003e..................] - ETA: 1s - loss: 6.1748e-04\n"," 75/160 [=============\u003e................] - ETA: 1s - loss: 6.1718e-04\n"," 80/160 [==============\u003e...............] - ETA: 0s - loss: 6.1556e-04\n"," 86/160 [===============\u003e..............] - ETA: 0s - loss: 6.1658e-04\n"," 92/160 [================\u003e.............] - ETA: 0s - loss: 6.1601e-04\n"," 97/160 [=================\u003e............] - ETA: 0s - loss: 6.1488e-04\n","102/160 [==================\u003e...........] - ETA: 0s - loss: 6.1422e-04\n","106/160 [==================\u003e...........] - ETA: 0s - loss: 6.1345e-04\n","110/160 [===================\u003e..........] - ETA: 0s - loss: 6.1297e-04\n","114/160 [====================\u003e.........] - ETA: 0s - loss: 6.1287e-04\n","118/160 [=====================\u003e........] - ETA: 0s - loss: 6.1199e-04\n","122/160 [=====================\u003e........] - ETA: 0s - loss: 6.1102e-04\n","125/160 [======================\u003e.......] - ETA: 0s - loss: 6.1082e-04\n","129/160 [=======================\u003e......] - ETA: 0s - loss: 6.1040e-04\n","134/160 [========================\u003e.....] - ETA: 0s - loss: 6.1258e-04\n","                                                       "]}],"source":["#------------Autoencoder with Bayesian optimization---------------------------\n","\n","import sys\n","import hyperopt\n","from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n","from keras.layers import BatchNormalization,LeakyReLU,Dense\n","import keras\n","from sklearn.metrics import f1_score\n","from keras.callbacks import ModelCheckpoint, EarlyStopping\n","from keras.models import Model\n","\n","global x\n","x=0\n","da =[]\n","min_reconstr_error=999.0\n","seed=2\n","val_ratio = 0.2\n","trainData, valData, trainLabels, valLabels = train_test_split(trainData, trainLabels, test_size=val_ratio, random_state=33)\n","valData = parallelType.array(valData)\n","valLabels = parallelType.array(valLabels)\n","\n","def objective(params):\n","    global min_reconstr_error\n","    lr=(params['eta'])\n","    u1=int(params['units1'])\n","    u2=int(params['units2'])\n","    d1=(params['dropout1'])\n","    d2=(params['dropout2'])\n","    bs=int(params['batch_size'])\n","    ep=int(params['epochs'])\n","    act=(params['activation'])\n","\n","    input_dim = trainData.shape[1]\n","\n","    input_layer = (keras.layers.Input(shape=(input_dim,)))\n","\n","    encoder=Dense(u1,kernel_initializer=tf.keras.initializers.LecunUniform(),activation=act)(input_layer)\n","    encoder=BatchNormalization()(encoder)\n","    encoder =(keras.layers.Dropout(d1))(encoder)\n","\n","    bottleneck=Dense(u2,activation=act)(encoder)\n","    bottleneck=BatchNormalization()(bottleneck)\n","\n","    decoder = Dense(u1,activation=act)(bottleneck)\n","    decoder=BatchNormalization()(decoder)\n","    decoder =(keras.layers.Dropout(d2))(decoder)\n","\n","    decoder = Dense(input_dim, activation=act)(decoder)\n","\n","    autoencoder = Model(inputs=input_layer, outputs=decoder)\n","\n","    autoencoder.compile(loss='mse',optimizer=keras.optimizers.Adam(lr=lr))\n","    autoencoder.summary()\n","    callback=EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=10)\n","\n","    autoencoder.fit(trainData, trainData,\n","                    epochs=int(ep),\n","                    batch_size=bs,\n","                    validation_data=(valData, valData),\n","                    callbacks=[callback]\n","              #batch_size=bs\n","              #verbose=0\n","    )\n","\n","    reconstr_error = autoencoder.evaluate(testData, testData, verbose=0)\n","    print(reconstr_error)\n","\n","\n","    if reconstr_error \u003c min_reconstr_error:\n","         min_reconstr_error=reconstr_error\n","         da.append(min_reconstr_error)\n","         encoded_model=Model(inputs=input_layer, outputs=bottleneck)\n","         encoded_model.save('UNSW_AE_ENCODER_BEST_100iter.h5')\n","         da.append(min_reconstr_error)\n","\n","\n","    sys.stdout.flush()\n","\n","    return {'loss': reconstr_error, 'status': STATUS_OK, 'model': autoencoder}\n","\n","\n","def optimize(trial):\n","    params = {'eta':hp.uniform('eta',10e-8,10e-1),\n","              'units1':hp.uniform('units1',30,100),\n","              'units2':hp.uniform('units2',30,100),\n","              'dropout1':hp.uniform('dropout1',0,.3),\n","              'dropout2':hp.uniform('dropout2',0,.3),\n","              'batch_size':hp.uniform('batch_size',1,1024),\n","              'epochs':hp.uniform('epochs',1,1000),\n","              'activation': hp.choice('activation',['tanh','sigmoid']),\n","        }\n","    best=fmin(fn=objective,\n","              space=params,\n","              algo=tpe.suggest,\n","              trials=trial,\n","              max_evals=100\n","              )\n","\n","    return best\n","\n","import time\n","start_time = time.time()\n","trial=Trials()\n","trial.mybest = None\n","best=optimize(trial)\n","print(\"--- %s seconds for Bayesian optimization ---\" % (time.time() - start_time))\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"1FksZnU7GBUD"},"outputs":[],"source":["#------------after Autoencoder-load encoded model---------------------------\n","from keras.models import load_model\n","from google.colab import drive\n","drive.mount('/content/gdrive')\n","encoder = load_model('/content/gdrive/My Drive/Colab Notebooks/h5files/UNSW_AE_ENCODER_BEST_100iter.h5')\n","encoder.summary()\n","\n","trainData_encoded=encoder.predict(trainData)\n","testData_encoded=encoder.predict(testData)\n","\n","trainData_all=np.concatenate((trainData,trainData_encoded),axis=1)\n","testData_all=np.concatenate((testData,testData_encoded),axis=1)"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":7,"status":"aborted","timestamp":1702404892374,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"YxPhw_aNF1Et"},"outputs":[],"source":["# plot feature importance (XGBoost 5 fold cv)\n","from numpy import loadtxt\n","from xgboost import XGBClassifier\n","from matplotlib import pyplot as plt\n","from xgboost import plot_importance\n","from sklearn.model_selection import KFold\n","\n","\n","kf = KFold(n_splits=5)\n","import numpy\n","print(trainData_all.shape[1])\n","importances= numpy.zeros(trainData_all.shape[1])\n","for train_indices, test_indices in kf.split(trainData_all):\n","    # fit model no training data\n","    print(train_indices.shape)\n","    print(test_indices.shape)\n","    model = XGBClassifier() #weight\n","#    model = XGBClassifier(importance_type='cover')\n","#    model = XGBClassifier(importance_type='gain')\n","    model.fit(trainData_all[train_indices], trainLabels[train_indices])\n","    # feature importance\n","    print(model.feature_importances_)\n","    importances  = importances+ model.feature_importances_\n","\n","importances=importances/5\n"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":7,"status":"aborted","timestamp":1702404892374,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"c8emqHXBF1lj"},"outputs":[],"source":["#------------select 30 features---------------------------\n","indices = np.argsort(importances)[-30:]\n","trainData_reduced=trainData_all[:, indices]\n","testData_reduced=testData_all[:, indices]\n","\n","plt.figure()\n","plt.title('Feature Importances')\n","plt.barh(range(len(indices)), importances[indices], color='b', align='center')\n","plt.yticks(range(len(indices)), indices+1)\n","plt.xlabel('Relative Importance (weight)')\n","plt.show()\n"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":7,"status":"aborted","timestamp":1702404892375,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"hZiviAtE7RQ3"},"outputs":[],"source":["\n","val = False\n","val_ratio = 0.2\n","\n","if val:\n","  trainData, valData, trainLabels, valLabels = train_test_split(trainData, trainLabels, test_size=val_ratio, random_state=33)\n","  valData = parallelType.array(valData)\n","  valLabels = parallelType.array(valLabels)\n","\n","\n","## GPU\n","import cupy as cp\n","parallelType = cp\n","\n","trainData = parallelType.array(trainData)\n","trainLabels = parallelType.array(trainLabels)\n","testData = parallelType.array(testData)\n","testLabels = parallelType.array(testLabels)\n","\n","trainData_encoded=parallelType.array(trainData_encoded)\n","testData_encoded=parallelType.array(testData_encoded)\n","trainData_all=parallelType.array(trainData_all)\n","testData_all=parallelType.array(testData_all)\n","trainData_reduced=parallelType.array(trainData_reduced)\n","testData_reduced=parallelType.array(testData_reduced)\n","\n"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":6,"status":"aborted","timestamp":1702404892375,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"G8AECOP7M6jR"},"outputs":[],"source":["print(trainData_reduced.shape)\n","print(testData_reduced.shape)\n","\n","print(trainLabels.shape)\n","print(testLabels.shape)"]},{"cell_type":"markdown","metadata":{"id":"X2ur4hhFtSQL"},"source":["# Bayesian Optimization for ABC_ANN"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":6,"status":"aborted","timestamp":1702404892375,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"-F0BEzgsOC0N"},"outputs":[],"source":["#------------Bayesian Optimization---------------------------\n","import sys\n","import hyperopt\n","from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n","from keras.models import load_model,save_model\n","from sklearn.metrics import confusion_matrix\n","from sklearn.metrics import classification_report\n","max_acc = 0\n","\n","\n","def objective(params):\n","    global max_acc\n","    global counter\n","\n","    lb=params['lb']\n","    ub=params['ub']\n","    evaluationNumber=int(params['evaluationNumber'])\n","    limit=int(params['limit'])\n","    P=int(params['P'])\n","    MR=params['MR']\n","    L2=params['L2']\n","    FVS = trainData.shape[1]\n","    HLS=int(params['HLS'])\n","    thres = params['thres']\n","    activation=params['activation']\n","\n","    start_time = dt.datetime.now()\n","    # learn = LearnABC(trainData, trainLabels, FVS, HLS, P, limit, lb, ub, MR,L2, parallelType, evaluationNumber)\n","    # learn.learn()\n","    model = ABC_LR_Model(hiddenLayerSize=HLS, lb=lb, ub=ub, evaluationNumber=evaluationNumber, limit=limit, P=P, MR=MR, thres=thres, parallelType=parallelType, activation=activation)\n","    print(model);\n","\n","\n","    model.fit(trainData_reduced, trainLabels)\n","    [avgTest,p] = model.score(testData_reduced, testLabels)\n","    print(f\"Test Result: {avgTest}\")\n","\n","    time=dt.datetime.now()-start_time\n","\n","    mat=cp.asnumpy(p)\n","    testLabels_cpu=cp.asnumpy(testLabels)\n","    con_matrix=confusion_matrix(testLabels_cpu, mat)\n","\n","    if avgTest \u003e max_acc:\n","      max_acc = avgTest\n","      matrix=p\n","\n","\n","    counter=counter+1\n","    sys.stdout.flush()\n","\n","    return {'loss': -1*avgTest, 'status': STATUS_OK}\n","\n","def optimize(trial):\n","  params = {\n","    'lb': hp.uniform('lb',-30,0),\n","    'ub': hp.uniform('ub',0,30),\n","    'evaluationNumber': hp.uniform('evaluationNumber',10000,120000),\n","    'limit': hp.uniform('limit',10,200),\n","    'P': hp.uniform('P',10,200),\n","    'MR': hp.uniform('MR',.01,.2),\n","    'L2':hp.uniform('L2',.0,.001),\n","    'HLS': hp.uniform('HLS',2,20),\n","    'thres': hp.uniform('thres',0.2,0.8),\n","    'activation': hp.choice('activation',['tanh','sigmoid']),\n","  }\n","\n","\n","  best = fmin(\n","    fn=objective,\n","    space=params,\n","    algo=tpe.suggest,\n","    trials=Trials(),\n","    max_evals=150\n","#   timeout=100,\n","  )\n","  return best\n","\n","import time\n","start_time = time.time()\n","trial=Trials()\n","trial.mybest = None\n","best=optimize(trial)\n","print(best)\n","print(f\"best acc: {max_acc}\" )\n","print(\"--- %s seconds for Bayesian optimization ---\" % (time.time() - start_time))"]},{"cell_type":"markdown","metadata":{"id":"Ku7CRgtHkuDa"},"source":["## Train and Test"]},{"cell_type":"code","execution_count":null,"metadata":{"executionInfo":{"elapsed":6,"status":"aborted","timestamp":1702404892375,"user":{"displayName":"Hilal ColabPro","userId":"01583771971134097379"},"user_tz":-180},"id":"ePIhFSQNkx74"},"outputs":[],"source":["#------------Run 20 times with best parameters---------------------------\n","for i in range(20):\n","  lb = -20\n","  ub = 20\n","  evaluationNumber = 60008\n","  # FVS = trainData.shape[1]\n","  limit = 50\n","  P = 40\n","  MR = 0.054\n","  hiddenLayerSize = 3\n","  thres = 0.5\n","  activation='sigmoid'\n","  parallelType=cp\n","\n","  model = ABC_LR_Model(hiddenLayerSize=hiddenLayerSize, lb=lb, ub=ub, evaluationNumber=evaluationNumber, limit=limit, P=P, MR=MR, thres=thres, parallelType=parallelType, activation=activation)\n","  print(model);\n","  start_time = dt.datetime.now()\n","\n","  #trainLabels=cp.array(trainLabels)\n","  model.fit(trainData_reduced, trainLabels)\n","  [acc, p] = model.score(testData_reduced, testLabels)\n","\n","  print(f\"Run time: {dt.datetime.now()-start_time}\")\n","  print(f\"Result: {acc}\")\n","\n","  from sklearn.metrics import confusion_matrix\n","  from sklearn.metrics import classification_report\n","\n","  matrix=cp.asnumpy(p)\n","  testLabels_cpu=cp.asnumpy(testLabels)\n","  con_matrix=confusion_matrix(testLabels_cpu, matrix)\n","  print(f\"best acc: {acc}\")\n","  print(con_matrix)\n","  print(classification_report(testLabels_cpu, matrix))\n","\n","\n"]}],"metadata":{"accelerator":"GPU","colab":{"collapsed_sections":["Z5wWcly8oSsr","4ssDkNm0oIXj"],"name":"","toc_visible":true,"version":""},"gpuClass":"standard","kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}