{ "cells": [ { "metadata": { "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "trusted": true }, "cell_type": "code", "source": "##################\n# Supplemental file for PeerJ by Iam Palatnik de Sousa\n# This script shows how one of the 10-fold cross-validation runs was performed, namely, the run for fold 4.\n# Sample output of a run of this script is provided below the code.\n# Different runs with this same script might generate different results, even if setting seeds for the random number generators. \n# This behavior is known for GPU runs in Keras https://github.com/keras-team/keras/issues/7937.\n# However the behavior on average is consistent to what has been shown on the manuscript and can be shown by running this script several times.\n##################\n\n\nimport numpy as np \nimport pandas as pd \nimport os\nprint(os.listdir(\"../input\"))\nfrom os import listdir, makedirs\nfrom os.path import join, exists, expanduser\nfrom __future__ import print_function\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras import backend as K\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport warnings\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Activation\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import GlobalAveragePooling2D\nfrom keras.layers import GlobalMaxPooling2D\nfrom keras.engine import get_source_inputs\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras import backend as K\nfrom keras.applications.imagenet_utils import decode_predictions\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.applications.imagenet_utils import _obtain_input_shape\nfrom keras.layers.advanced_activations import LeakyReLU \nfrom keras.preprocessing.image import ImageDataGenerator\n\n#MADbase dataset obtained in https://www.kaggle.com/mloey1/ahdd1/\n\nx_train = pd.read_csv('../input/ahdd1/csvTrainImages 60k x 784.csv',header=None)\ny_train = pd.read_csv('../input/ahdd1/csvTrainLabel 60k x 1.csv',header=None)\n\nx_test = pd.read_csv('../input/ahdd1/csvTestImages 10k x 784.csv',header=None)\ny_test = pd.read_csv('../input/ahdd1/csvTestLabel 10k x 1.csv',header=None)\n\nx_train = x_train.astype('float32')\ny_train = y_train.astype('int32')\nx_test = x_test.astype('float32')\ny_test = y_test.astype('int32')\n\n\n\ny_train = keras.utils.to_categorical(y_train,10)\ny_test = keras.utils.to_categorical(y_test,10)\n\nx_train = np.array(x_train)\nx_test = np.array(x_test)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\nx_train = [np.stack((img,)*3, -1) for img in x_train]\nx_test = [np.stack((img,)*3, -1) for img in x_test]\nx_train = np.array(x_train)\nx_test = np.array(x_test)\n\n\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0],3, 28, 28)\n x_test = x_test.reshape(x_test.shape[0], 3, 28, 28)\n input_shape = (1, 28, 28)\nelse:\n x_train = x_train.reshape(x_train.shape[0], 28, 28,3)\n x_test = x_test.reshape(x_test.shape[0], 28, 28,3)\n input_shape = (28, 28, 3)\n\n#Preprocessing\n \nx_train /= 255\nx_test /= 255\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n#########################\n\n#Creating 10 folds for 10-fold crossvalidation\n#Random state = 77 ensures the folds are the same for different runs of this code.\n\nfrom sklearn.model_selection import train_test_split\n\nx_train, x_fold1, y_train, y_fold1 = train_test_split(x_train, y_train, test_size=1/10, random_state=77, shuffle = True)\nx_train, x_fold2, y_train, y_fold2 = train_test_split(x_train, y_train, test_size=1/9, random_state=77, shuffle = True)\nx_train, x_fold3, y_train, y_fold3 = train_test_split(x_train, y_train, test_size=1/8, random_state=77, shuffle = True)\nx_train, x_fold4, y_train, y_fold4 = train_test_split(x_train, y_train, test_size=1/7, random_state=77, shuffle = True)\nx_train, x_fold5, y_train, y_fold5 = train_test_split(x_train, y_train, test_size=1/6, random_state=77, shuffle = True)\nx_train, x_fold6, y_train, y_fold6 = train_test_split(x_train, y_train, test_size=1/5, random_state=77, shuffle = True)\nx_train, x_fold7, y_train, y_fold7 = train_test_split(x_train, y_train, test_size=1/4, random_state=77, shuffle = True)\nx_train, x_fold8, y_train, y_fold8 = train_test_split(x_train, y_train, test_size=1/3, random_state=77, shuffle = True)\nx_train, x_fold9, y_train, y_fold9 = train_test_split(x_train, y_train, test_size=1/2, random_state=77, shuffle = True)\nx_fold10, y_fold10 = x_train, y_train \n\nx_val_1 = x_fold1\nx_val_2 = x_fold2\nx_val_3 = x_fold3\nx_val_4 = x_fold4\nx_val_5 = x_fold5\nx_val_6 = x_fold6\nx_val_7 = x_fold7\nx_val_8 = x_fold8\nx_val_9 = x_fold9\nx_val_10 = x_fold10\n\nx_train_1 = np.concatenate((x_fold2,x_fold3,x_fold4,x_fold5,x_fold6,x_fold7,x_fold8,x_fold9,x_fold10),axis=0)\nx_train_2 = np.concatenate((x_fold1,x_fold3,x_fold4,x_fold5,x_fold6,x_fold7,x_fold8,x_fold9,x_fold10),axis=0)\nx_train_3 = np.concatenate((x_fold2,x_fold1,x_fold4,x_fold5,x_fold6,x_fold7,x_fold8,x_fold9,x_fold10),axis=0)\nx_train_4 = np.concatenate((x_fold2,x_fold3,x_fold1,x_fold5,x_fold6,x_fold7,x_fold8,x_fold9,x_fold10),axis=0)\nx_train_5 = np.concatenate((x_fold2,x_fold3,x_fold4,x_fold1,x_fold6,x_fold7,x_fold8,x_fold9,x_fold10),axis=0)\nx_train_6 = np.concatenate((x_fold2,x_fold3,x_fold4,x_fold5,x_fold1,x_fold7,x_fold8,x_fold9,x_fold10),axis=0)\nx_train_7 = np.concatenate((x_fold2,x_fold3,x_fold4,x_fold5,x_fold6,x_fold1,x_fold8,x_fold9,x_fold10),axis=0)\nx_train_8 = np.concatenate((x_fold2,x_fold3,x_fold4,x_fold5,x_fold6,x_fold7,x_fold1,x_fold9,x_fold10),axis=0)\nx_train_9 = np.concatenate((x_fold2,x_fold3,x_fold4,x_fold5,x_fold6,x_fold7,x_fold8,x_fold1,x_fold10),axis=0)\nx_train_10 = np.concatenate((x_fold2,x_fold3,x_fold4,x_fold5,x_fold6,x_fold7,x_fold8,x_fold9,x_fold1),axis=0)\n\ny_val_1 = y_fold1\ny_val_2 = y_fold2\ny_val_3 = y_fold3\ny_val_4 = y_fold4\ny_val_5 = y_fold5\ny_val_6 = y_fold6\ny_val_7 = y_fold7\ny_val_8 = y_fold8\ny_val_9 = y_fold9\ny_val_10 = y_fold10\n\ny_train_1 = np.concatenate((y_fold2,y_fold3,y_fold4,y_fold5,y_fold6,y_fold7,y_fold8,y_fold9,y_fold10),axis=0)\ny_train_2 = np.concatenate((y_fold1,y_fold3,y_fold4,y_fold5,y_fold6,y_fold7,y_fold8,y_fold9,y_fold10),axis=0)\ny_train_3 = np.concatenate((y_fold2,y_fold1,y_fold4,y_fold5,y_fold6,y_fold7,y_fold8,y_fold9,y_fold10),axis=0)\ny_train_4 = np.concatenate((y_fold2,y_fold3,y_fold1,y_fold5,y_fold6,y_fold7,y_fold8,y_fold9,y_fold10),axis=0)\ny_train_5 = np.concatenate((y_fold2,y_fold3,y_fold4,y_fold1,y_fold6,y_fold7,y_fold8,y_fold9,y_fold10),axis=0)\ny_train_6 = np.concatenate((y_fold2,y_fold3,y_fold4,y_fold5,y_fold1,y_fold7,y_fold8,y_fold9,y_fold10),axis=0)\ny_train_7 = np.concatenate((y_fold2,y_fold3,y_fold4,y_fold5,y_fold6,y_fold1,y_fold8,y_fold9,y_fold10),axis=0)\ny_train_8 = np.concatenate((y_fold2,y_fold3,y_fold4,y_fold5,y_fold6,y_fold7,y_fold1,y_fold9,y_fold10),axis=0)\ny_train_9 = np.concatenate((y_fold2,y_fold3,y_fold4,y_fold5,y_fold6,y_fold7,y_fold8,y_fold1,y_fold10),axis=0)\ny_train_10 = np.concatenate((y_fold2,y_fold3,y_fold4,y_fold5,y_fold6,y_fold7,y_fold8,y_fold9,y_fold1),axis=0)\n\n# Clearing unused folds from memory, for this run.\n# Since we are using fold 4 for this run, folds 1,2,3,5,6,7,8,9,10 are cleared.\n\nx_val_2,x_val_3,x_val_1,x_val_5,x_val_6,x_val_7,x_val_8,x_val_9,x_val_10 = None,None,None,None,None,None,None,None,None \nx_train_2,x_train_3,x_train_1,x_train_5,x_train_6,x_train_7,x_train_8,x_train_9,x_train_10 = None,None,None,None,None,None,None,None,None \ny_val_2,y_val_3,y_val_1,y_val_5,y_val_6,y_val_7,y_val_8,y_val_9,y_val_10 = None,None,None,None,None,None,None,None,None \ny_train_2,y_train_3,y_train_1,y_train_5,y_train_6,y_train_7,y_train_8,y_train_9,y_train_10 = None,None,None,None,None,None,None,None,None \n\n#########################\n\ndef VGG16_like(pooling=None):\n\n img_input = Input(shape=input_shape)\n\n # Block 1\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n model = Model(img_input, x, name='vgg16_like')\n\n return model\nmodel_input = Input(shape=input_shape)\n\n#Model 1 corresponds to VGG12\n#Model 2 corresponds to REGU\n#Model 3 corresponds to VGG12_aug\n#Model 4 corresponds to REGU_aug\n\ndef model1(model_input):\n\n m1 = VGG16_like()(model_input)\n m1 = Flatten()(m1)\n m1 = Dense(512, activation='relu')(m1)\n m1 = Dropout(0.25)(m1)\n m1 = Dense(10, activation='softmax')(m1)\n\n model = Model(model_input, m1, name='model1')\n \n return model\n\ndef model2(model_input):\n\n m2 = Conv2D(32, (3, 3))(model_input)\n m2 = Activation('relu')(m2)\n m2 = Dropout(0.2)(m2)\n m2 = BatchNormalization(axis=-1)(m2)\n m2 = Conv2D(32, (3, 3))(m2)\n m2 = Activation('relu')(m2)\n m2 = MaxPooling2D(pool_size=(2,2))(m2)\n\n m2 = BatchNormalization(axis=-1)(m2)\n m2 = Conv2D(64,(3, 3))(m2)\n m2 = Activation('relu')(m2)\n m2 = Dropout(0.2)(m2)\n m2 = BatchNormalization(axis=-1)(m2)\n m2 = Conv2D(64, (3, 3))(m2)\n m2 = Activation('relu')(m2)\n m2 = MaxPooling2D(pool_size=(2,2))(m2)\n\n m2 = Flatten()(m2)\n\n m2 = BatchNormalization()(m2)\n m2 = Dense(512)(m2)\n m2 = Activation('relu')(m2)\n m2 = BatchNormalization()(m2)\n m2 = Dropout(0.2)(m2)\n m2 = Dense(10)(m2)\n\n #m2 = Convolution2D(10,3,3, border_mode='same')\n #m2 = GlobalAveragePooling2D()\n m2 = Activation('softmax')(m2)\n\n model = Model(model_input, m2, name='model2')\n \n return model\nmodel_1 = model1(model_input)\nmodel_2 = model2(model_input)\nfrom keras.callbacks import ReduceLROnPlateau\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=7, min_lr=0.0001, verbose = 1)\n\nmodel_1.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='adam',\n metrics=['accuracy'])\n\nhistory_11 = model_1.fit(x_train_4, y_train_4,\n batch_size=256,\n epochs=20,\n verbose=1,\n #callbacks=[reduce_lr],\n validation_data=(x_val_4,y_val_4))\n\nmodel_1.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='sgd',\n metrics=['accuracy'])\n\nhistory_12 = model_1.fit(x_train_4, y_train_4,\n batch_size=256,\n epochs=20,\n verbose=1,\n callbacks=[reduce_lr],\n validation_data=(x_val_4,y_val_4))\n\nscore = model_1.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nfrom keras.callbacks import ReduceLROnPlateau\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=7, min_lr=0.0001, verbose = 1)\n\nmodel_2.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='adam',\n metrics=['accuracy'])\n\nhistory_21 = model_2.fit(x_train_4, y_train_4,\n batch_size=256,\n epochs=20,\n verbose=1,\n #callbacks=[reduce_lr],\n validation_data=(x_val_4,y_val_4))\n\nmodel_2.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='sgd',\n metrics=['accuracy'])\n\nhistory_22 = model_2.fit(x_train_4, y_train_4,\n batch_size=256,\n epochs=20,\n verbose=1,\n callbacks=[reduce_lr],\n validation_data=(x_val_4,y_val_4))\n\n\n\nscore = model_2.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\ndef model3(model_input):\n\n m3 = VGG16_like()(model_input)\n m3 = Flatten()(m3)\n m3 = Dense(512, activation='relu')(m3)\n m3 = Dropout(0.25)(m3)\n m3 = Dense(10, activation='softmax')(m3)\n\n model = Model(model_input, m3, name='model3')\n \n return model\ndef model4(model_input):\n\n m4 = Conv2D(32, (3, 3))(model_input)\n m4 = Activation('relu')(m4)\n m4 = Dropout(0.2)(m4)\n m4 = BatchNormalization(axis=-1)(m4)\n m4 = Conv2D(32, (3, 3))(m4)\n m4 = Activation('relu')(m4)\n m4 = MaxPooling2D(pool_size=(2,2))(m4)\n\n m4 = BatchNormalization(axis=-1)(m4)\n m4 = Conv2D(64,(3, 3))(m4)\n m4 = Activation('relu')(m4)\n m4 = Dropout(0.2)(m4)\n m4 = BatchNormalization(axis=-1)(m4)\n m4 = Conv2D(64, (3, 3))(m4)\n m4 = Activation('relu')(m4)\n m4 = MaxPooling2D(pool_size=(2,2))(m4)\n\n m4 = Flatten()(m4)\n\n m4 = BatchNormalization()(m4)\n m4 = Dense(512)(m4)\n m4 = Activation('relu')(m4)\n m4 = BatchNormalization()(m4)\n m4 = Dropout(0.2)(m4)\n m4 = Dense(10)(m4)\n\n #m4 = Convolution2D(10,3,3, border_mode='same')\n #m4 = GlobalAveragePooling2D()\n m4 = Activation('softmax')(m4)\n\n model = Model(model_input, m4, name='model4')\n \n return model\nmodel_3 = model3(model_input)\nmodel_4 = model4(model_input)\nmodel_3.layers[1].name = 'vgg16_like2'\nmodel_3.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='adam',\n metrics=['accuracy'])\n\ngen = ImageDataGenerator(zoom_range = 0.1,\n height_shift_range = 0.1,\n width_shift_range = 0.1)\n\ntest_gen = ImageDataGenerator()\n\ntrain_generator = gen.flow(x_train_4, y_train_4, batch_size=128)\nval_generator = test_gen.flow(x_val_4, y_val_4, batch_size=128)\n\nhistory_31 = model_3.fit_generator(train_generator, steps_per_epoch=60000/128, epochs=20, validation_data = val_generator, validation_steps = 10000/128)\n\nmodel_3.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='sgd',\n metrics=['accuracy'])\n\nhistory_32 = model_3.fit_generator(train_generator, steps_per_epoch=60000/128, epochs=20, validation_data = val_generator, validation_steps = 10000/128)\n\nscore = model_3.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nmodel_4.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='adam',\n metrics=['accuracy'])\n\ngen = ImageDataGenerator(zoom_range = 0.1,\n height_shift_range = 0.1,\n width_shift_range = 0.1)\n\ntest_gen = ImageDataGenerator()\n\ntrain_generator = gen.flow(x_train_4, y_train_4, batch_size=128)\nval_generator = test_gen.flow(x_val_4, y_val_4, batch_size=128)\n\nhistory_41 = model_4.fit_generator(train_generator, steps_per_epoch=60000/128, epochs=20, validation_data = val_generator, validation_steps = 10000/128)\n\nmodel_4.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='sgd',\n metrics=['accuracy'])\n\nhistory_42 = model_4.fit_generator(train_generator, steps_per_epoch=60000/128, epochs=20, validation_data = val_generator, validation_steps = 10000/128)\n\nscore = model_4.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\nfrom keras.layers.merge import Average\nfrom keras.layers.merge import Maximum\n\nmodels = [model_1, model_2, model_3, model_4]\n\ndef ensemble(models, model_input):\n \n outputs = [model.outputs[0] for model in models]\n y = Average()(outputs)\n \n model = Model(model_input, y, name='ensemble')\n \n return model\n\nmodel_ensemble = ensemble(models, model_input)\n\ndef evaluate_acc(model):\n \n pred = model.predict(x_test)\n pred = np.argmax(pred, axis=1)\n error = np.sum(np.not_equal(pred, np.argmax(y_test,axis=1))) / y_test.shape[0] \n \n return 1-error\n\nevaluate_acc(model_ensemble)\ndef evaluate_acc_val(model):\n \n pred = model.predict(x_val_4)\n pred = np.argmax(pred, axis=1)\n error = np.sum(np.not_equal(pred, np.argmax(y_val_4,axis=1))) / y_val_4.shape[0] \n \n return 1-error\n\nevaluate_acc_val(model_ensemble)\n\ntest1 = model_1.evaluate(x_test,y_test)[1]\ntest2 = model_2.evaluate(x_test,y_test)[1]\ntest3 = model_3.evaluate(x_test,y_test)[1]\ntest4 = model_4.evaluate(x_test,y_test)[1]\nval1 = model_1.evaluate(x_val_4,y_val_4)[1]\nval2 = model_2.evaluate(x_val_4,y_val_4)[1]\nval3 = model_3.evaluate(x_val_4,y_val_4)[1]\nval4 = model_4.evaluate(x_val_4,y_val_4)[1]\ntestens = evaluate_acc(model_ensemble)\nvalens = evaluate_acc_val(model_ensemble)\n\n\nresults = [val1,val2,val3,val4,valens,test1,test2,test3,test4,testens]\n##########[VGG12_val,REGU_val,VGG12_aug_val,_valREGU_aug_val,ENS4_val,VGG12_test,REGU_test,VGG12_aug_test,_valREGU_aug_test,ENS4_test]\nprint(results)\n", "execution_count": 1, "outputs": [ { "output_type": "stream", "text": "['ahdd1', 'keras-pretrained-models']\n", "name": "stdout" }, { "output_type": "stream", "text": "/opt/conda/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n", "name": "stderr" }, { "output_type": "stream", "text": "x_train shape: (60000, 28, 28, 3)\n60000 train samples\n10000 test samples\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/20\n54000/54000 [==============================] - 38s 702us/step - loss: 0.9172 - acc: 0.6546 - val_loss: 0.0631 - val_acc: 0.9830\nEpoch 2/20\n54000/54000 [==============================] - 33s 607us/step - loss: 0.0486 - acc: 0.9864 - val_loss: 0.0352 - val_acc: 0.9900\nEpoch 3/20\n54000/54000 [==============================] - 33s 608us/step - loss: 0.0321 - acc: 0.9908 - val_loss: 0.0385 - val_acc: 0.9898\nEpoch 4/20\n54000/54000 [==============================] - 33s 607us/step - loss: 0.0296 - acc: 0.9920 - val_loss: 0.0241 - val_acc: 0.9925\nEpoch 5/20\n54000/54000 [==============================] - 33s 607us/step - loss: 0.0226 - acc: 0.9939 - val_loss: 0.0202 - val_acc: 0.9942\nEpoch 6/20\n54000/54000 [==============================] - 33s 607us/step - loss: 0.0199 - acc: 0.9943 - val_loss: 0.0270 - val_acc: 0.9938\nEpoch 7/20\n54000/54000 [==============================] - 33s 606us/step - loss: 0.0163 - acc: 0.9953 - val_loss: 0.0229 - val_acc: 0.9935\nEpoch 8/20\n54000/54000 [==============================] - 33s 606us/step - loss: 0.0159 - acc: 0.9952 - val_loss: 0.0287 - val_acc: 0.9923\nEpoch 9/20\n54000/54000 [==============================] - 33s 603us/step - loss: 0.0112 - acc: 0.9968 - val_loss: 0.0230 - val_acc: 0.9935\nEpoch 10/20\n54000/54000 [==============================] - 33s 605us/step - loss: 0.0152 - acc: 0.9958 - val_loss: 0.0202 - val_acc: 0.9957\nEpoch 11/20\n54000/54000 [==============================] - 33s 605us/step - loss: 0.0139 - acc: 0.9963 - val_loss: 0.0319 - val_acc: 0.9930\nEpoch 12/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0134 - acc: 0.9963 - val_loss: 0.0287 - val_acc: 0.9953\nEpoch 13/20\n54000/54000 [==============================] - 33s 605us/step - loss: 0.0126 - acc: 0.9965 - val_loss: 0.0205 - val_acc: 0.9963\nEpoch 14/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0097 - acc: 0.9971 - val_loss: 0.0197 - val_acc: 0.9965\nEpoch 15/20\n54000/54000 [==============================] - 33s 605us/step - loss: 0.0121 - acc: 0.9969 - val_loss: 0.0201 - val_acc: 0.9963\nEpoch 16/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0106 - acc: 0.9973 - val_loss: 0.0233 - val_acc: 0.9942\nEpoch 17/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0141 - acc: 0.9966 - val_loss: 0.0447 - val_acc: 0.9908\nEpoch 18/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0116 - acc: 0.9971 - val_loss: 0.0244 - val_acc: 0.9958\nEpoch 19/20\n54000/54000 [==============================] - 33s 602us/step - loss: 0.0082 - acc: 0.9978 - val_loss: 0.0256 - val_acc: 0.9970\nEpoch 20/20\n54000/54000 [==============================] - 32s 601us/step - loss: 0.0079 - acc: 0.9980 - val_loss: 0.0155 - val_acc: 0.9967\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/20\n54000/54000 [==============================] - 31s 578us/step - loss: 0.0031 - acc: 0.9992 - val_loss: 0.0151 - val_acc: 0.9970\nEpoch 2/20\n54000/54000 [==============================] - 30s 565us/step - loss: 0.0025 - acc: 0.9993 - val_loss: 0.0150 - val_acc: 0.9968\nEpoch 3/20\n54000/54000 [==============================] - 31s 566us/step - loss: 0.0023 - acc: 0.9994 - val_loss: 0.0150 - val_acc: 0.9970\nEpoch 4/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0021 - acc: 0.9994 - val_loss: 0.0150 - val_acc: 0.9972\nEpoch 5/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0020 - acc: 0.9995 - val_loss: 0.0149 - val_acc: 0.9973\nEpoch 6/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0018 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 7/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0017 - acc: 0.9995 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 8/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0016 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 9/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0016 - acc: 0.9996 - val_loss: 0.0149 - val_acc: 0.9973\nEpoch 10/20\n54000/54000 [==============================] - 30s 565us/step - loss: 0.0015 - acc: 0.9996 - val_loss: 0.0149 - val_acc: 0.9973\n\nEpoch 00010: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165.\nEpoch 11/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0149 - val_acc: 0.9973\nEpoch 12/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 13/20\n54000/54000 [==============================] - 30s 565us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0149 - val_acc: 0.9973\nEpoch 14/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 15/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 16/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 17/20\n54000/54000 [==============================] - 30s 565us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\n\nEpoch 00017: ReduceLROnPlateau reducing learning rate to 0.0003999999724328518.\nEpoch 18/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 19/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 20/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\nTest loss: 0.04246884689170474\nTest accuracy: 0.9917\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/20\n54000/54000 [==============================] - 12s 219us/step - loss: 0.0861 - acc: 0.9746 - val_loss: 0.1532 - val_acc: 0.9545\nEpoch 2/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0272 - acc: 0.9916 - val_loss: 0.0382 - val_acc: 0.9885\nEpoch 3/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0173 - acc: 0.9945 - val_loss: 0.0490 - val_acc: 0.9867\nEpoch 4/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0140 - acc: 0.9952 - val_loss: 0.0193 - val_acc: 0.9942\nEpoch 5/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0130 - acc: 0.9959 - val_loss: 0.0145 - val_acc: 0.9968\nEpoch 6/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0119 - acc: 0.9961 - val_loss: 0.0203 - val_acc: 0.9942\nEpoch 7/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0087 - acc: 0.9974 - val_loss: 0.0475 - val_acc: 0.9840\nEpoch 8/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0076 - acc: 0.9973 - val_loss: 0.0157 - val_acc: 0.9952\nEpoch 9/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0070 - acc: 0.9980 - val_loss: 0.0157 - val_acc: 0.9958\nEpoch 10/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0068 - acc: 0.9978 - val_loss: 0.0328 - val_acc: 0.9917\nEpoch 11/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0056 - acc: 0.9981 - val_loss: 0.0240 - val_acc: 0.9938\nEpoch 12/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0060 - acc: 0.9980 - val_loss: 0.0211 - val_acc: 0.9953\nEpoch 13/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0054 - acc: 0.9983 - val_loss: 0.0189 - val_acc: 0.9955\nEpoch 14/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0061 - acc: 0.9981 - val_loss: 0.0168 - val_acc: 0.9957\nEpoch 15/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0052 - acc: 0.9983 - val_loss: 0.0148 - val_acc: 0.9967\nEpoch 16/20\n", "name": "stdout" }, { "output_type": "stream", "text": "54000/54000 [==============================] - 10s 187us/step - loss: 0.0043 - acc: 0.9985 - val_loss: 0.0134 - val_acc: 0.9970\nEpoch 17/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0052 - acc: 0.9981 - val_loss: 0.0182 - val_acc: 0.9962\nEpoch 18/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0038 - acc: 0.9986 - val_loss: 0.0177 - val_acc: 0.9960\nEpoch 19/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0028 - acc: 0.9991 - val_loss: 0.0316 - val_acc: 0.9915\nEpoch 20/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0049 - acc: 0.9984 - val_loss: 0.0163 - val_acc: 0.9968\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/20\n54000/54000 [==============================] - 11s 196us/step - loss: 0.0037 - acc: 0.9989 - val_loss: 0.0148 - val_acc: 0.9968\nEpoch 2/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0030 - acc: 0.9992 - val_loss: 0.0141 - val_acc: 0.9972\nEpoch 3/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0024 - acc: 0.9990 - val_loss: 0.0135 - val_acc: 0.9973\nEpoch 4/20\n54000/54000 [==============================] - 9s 171us/step - loss: 0.0019 - acc: 0.9994 - val_loss: 0.0132 - val_acc: 0.9977\nEpoch 5/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0023 - acc: 0.9992 - val_loss: 0.0129 - val_acc: 0.9973\nEpoch 6/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0019 - acc: 0.9994 - val_loss: 0.0129 - val_acc: 0.9977\nEpoch 7/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0019 - acc: 0.9994 - val_loss: 0.0128 - val_acc: 0.9975\nEpoch 8/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0017 - acc: 0.9995 - val_loss: 0.0127 - val_acc: 0.9977\nEpoch 9/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0015 - acc: 0.9995 - val_loss: 0.0127 - val_acc: 0.9977\nEpoch 10/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9977\nEpoch 11/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0015 - acc: 0.9996 - val_loss: 0.0127 - val_acc: 0.9975\nEpoch 12/20\n54000/54000 [==============================] - 9s 171us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9977\nEpoch 13/20\n54000/54000 [==============================] - 9s 171us/step - loss: 0.0017 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9977\nEpoch 14/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9973\nEpoch 15/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0015 - acc: 0.9996 - val_loss: 0.0127 - val_acc: 0.9973\nEpoch 16/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0012 - acc: 0.9997 - val_loss: 0.0127 - val_acc: 0.9973\nEpoch 17/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9975\nEpoch 18/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9977\nEpoch 19/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0013 - acc: 0.9997 - val_loss: 0.0126 - val_acc: 0.9975\n\nEpoch 00019: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165.\nEpoch 20/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9975\nTest loss: 0.04305995555087147\nTest accuracy: 0.9911\nEpoch 1/20\n469/468 [==============================] - 52s 110ms/step - loss: 0.5602 - acc: 0.7981 - val_loss: 0.0356 - val_acc: 0.9922\nEpoch 2/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0531 - acc: 0.9866 - val_loss: 0.0330 - val_acc: 0.9912\nEpoch 3/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0391 - acc: 0.9903 - val_loss: 0.0455 - val_acc: 0.9904\nEpoch 4/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0376 - acc: 0.9911 - val_loss: 0.0226 - val_acc: 0.9935\nEpoch 5/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0342 - acc: 0.9918 - val_loss: 0.0391 - val_acc: 0.9887\nEpoch 6/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0318 - acc: 0.9925 - val_loss: 0.0228 - val_acc: 0.9948\nEpoch 7/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0318 - acc: 0.9925 - val_loss: 0.0183 - val_acc: 0.9948\nEpoch 8/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0268 - acc: 0.9933 - val_loss: 0.0186 - val_acc: 0.9964\nEpoch 9/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0268 - acc: 0.9931 - val_loss: 0.0220 - val_acc: 0.9945\nEpoch 10/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0258 - acc: 0.9940 - val_loss: 0.0249 - val_acc: 0.9942\nEpoch 11/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0288 - acc: 0.9927 - val_loss: 0.0228 - val_acc: 0.9948\nEpoch 12/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0319 - acc: 0.9928 - val_loss: 0.0209 - val_acc: 0.9956\nEpoch 13/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0212 - acc: 0.9950 - val_loss: 0.0186 - val_acc: 0.9954\nEpoch 14/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0244 - acc: 0.9944 - val_loss: 0.0428 - val_acc: 0.9899\nEpoch 15/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0197 - acc: 0.9951 - val_loss: 0.0285 - val_acc: 0.9936\nEpoch 16/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0197 - acc: 0.9953 - val_loss: 0.0395 - val_acc: 0.9919\nEpoch 17/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0271 - acc: 0.9936 - val_loss: 0.0179 - val_acc: 0.9971\nEpoch 18/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0203 - acc: 0.9951 - val_loss: 0.0210 - val_acc: 0.9960\nEpoch 19/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0205 - acc: 0.9952 - val_loss: 0.0262 - val_acc: 0.9935\nEpoch 20/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0203 - acc: 0.9954 - val_loss: 0.0312 - val_acc: 0.9936\nEpoch 1/20\n469/468 [==============================] - 45s 96ms/step - loss: 0.0134 - acc: 0.9965 - val_loss: 0.0215 - val_acc: 0.9948\nEpoch 2/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0111 - acc: 0.9967 - val_loss: 0.0210 - val_acc: 0.9947\nEpoch 3/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0093 - acc: 0.9973 - val_loss: 0.0184 - val_acc: 0.9954\nEpoch 4/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0090 - acc: 0.9972 - val_loss: 0.0172 - val_acc: 0.9956\nEpoch 5/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0095 - acc: 0.9972 - val_loss: 0.0169 - val_acc: 0.9955\nEpoch 6/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0098 - acc: 0.9972 - val_loss: 0.0172 - val_acc: 0.9957\nEpoch 7/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0077 - acc: 0.9978 - val_loss: 0.0148 - val_acc: 0.9962\nEpoch 8/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0090 - acc: 0.9975 - val_loss: 0.0136 - val_acc: 0.9960\nEpoch 9/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0074 - acc: 0.9977 - val_loss: 0.0153 - val_acc: 0.9956\nEpoch 10/20\n469/468 [==============================] - 43s 93ms/step - loss: 0.0080 - acc: 0.9975 - val_loss: 0.0133 - val_acc: 0.9960\nEpoch 11/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0077 - acc: 0.9976 - val_loss: 0.0143 - val_acc: 0.9957\nEpoch 12/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0080 - acc: 0.9977 - val_loss: 0.0128 - val_acc: 0.9965\nEpoch 13/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0076 - acc: 0.9976 - val_loss: 0.0132 - val_acc: 0.9958\nEpoch 14/20\n", "name": "stdout" }, { "output_type": "stream", "text": "469/468 [==============================] - 44s 93ms/step - loss: 0.0068 - acc: 0.9979 - val_loss: 0.0129 - val_acc: 0.9963\nEpoch 15/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0074 - acc: 0.9977 - val_loss: 0.0144 - val_acc: 0.9962\nEpoch 16/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0073 - acc: 0.9979 - val_loss: 0.0144 - val_acc: 0.9962\nEpoch 17/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0086 - acc: 0.9977 - val_loss: 0.0139 - val_acc: 0.9961\nEpoch 18/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0069 - acc: 0.9978 - val_loss: 0.0128 - val_acc: 0.9963\nEpoch 19/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0069 - acc: 0.9979 - val_loss: 0.0121 - val_acc: 0.9960\nEpoch 20/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0072 - acc: 0.9978 - val_loss: 0.0125 - val_acc: 0.9962\nTest loss: 0.030220891384417293\nTest accuracy: 0.9939\nEpoch 1/20\n469/468 [==============================] - 35s 75ms/step - loss: 0.1125 - acc: 0.9661 - val_loss: 0.0260 - val_acc: 0.9922\nEpoch 2/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0406 - acc: 0.9879 - val_loss: 0.0166 - val_acc: 0.9942\nEpoch 3/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0311 - acc: 0.9906 - val_loss: 0.0301 - val_acc: 0.9907\nEpoch 4/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0271 - acc: 0.9912 - val_loss: 0.0223 - val_acc: 0.9929\nEpoch 5/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0246 - acc: 0.9929 - val_loss: 0.0113 - val_acc: 0.9966\nEpoch 6/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0239 - acc: 0.9928 - val_loss: 0.0168 - val_acc: 0.9951\nEpoch 7/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0215 - acc: 0.9933 - val_loss: 0.0139 - val_acc: 0.9965\nEpoch 8/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0201 - acc: 0.9935 - val_loss: 0.0154 - val_acc: 0.9955\nEpoch 9/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0205 - acc: 0.9933 - val_loss: 0.0131 - val_acc: 0.9971\nEpoch 10/20\n469/468 [==============================] - 34s 72ms/step - loss: 0.0191 - acc: 0.9942 - val_loss: 0.0185 - val_acc: 0.9946\nEpoch 11/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0192 - acc: 0.9941 - val_loss: 0.0134 - val_acc: 0.9959\nEpoch 12/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0154 - acc: 0.9951 - val_loss: 0.0101 - val_acc: 0.9976\nEpoch 13/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0154 - acc: 0.9952 - val_loss: 0.0108 - val_acc: 0.9971\nEpoch 14/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0158 - acc: 0.9950 - val_loss: 0.0107 - val_acc: 0.9970\nEpoch 15/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0158 - acc: 0.9949 - val_loss: 0.0126 - val_acc: 0.9961\nEpoch 16/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0150 - acc: 0.9951 - val_loss: 0.0142 - val_acc: 0.9953\nEpoch 17/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0133 - acc: 0.9955 - val_loss: 0.0101 - val_acc: 0.9970\nEpoch 18/20\n469/468 [==============================] - 34s 72ms/step - loss: 0.0145 - acc: 0.9953 - val_loss: 0.0106 - val_acc: 0.9973\nEpoch 19/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0123 - acc: 0.9960 - val_loss: 0.0153 - val_acc: 0.9968\nEpoch 20/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0139 - acc: 0.9957 - val_loss: 0.0119 - val_acc: 0.9962\nEpoch 1/20\n469/468 [==============================] - 35s 74ms/step - loss: 0.0105 - acc: 0.9967 - val_loss: 0.0112 - val_acc: 0.9969\nEpoch 2/20\n469/468 [==============================] - 32s 68ms/step - loss: 0.0101 - acc: 0.9968 - val_loss: 0.0106 - val_acc: 0.9977\nEpoch 3/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0099 - acc: 0.9968 - val_loss: 0.0090 - val_acc: 0.9971\nEpoch 4/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0090 - acc: 0.9969 - val_loss: 0.0103 - val_acc: 0.9969\nEpoch 5/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0091 - acc: 0.9969 - val_loss: 0.0088 - val_acc: 0.9973\nEpoch 6/20\n469/468 [==============================] - 32s 69ms/step - loss: 0.0088 - acc: 0.9972 - val_loss: 0.0090 - val_acc: 0.9968\nEpoch 7/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0090 - acc: 0.9973 - val_loss: 0.0100 - val_acc: 0.9974\nEpoch 8/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0083 - acc: 0.9975 - val_loss: 0.0089 - val_acc: 0.9973\nEpoch 9/20\n469/468 [==============================] - 32s 69ms/step - loss: 0.0083 - acc: 0.9970 - val_loss: 0.0096 - val_acc: 0.9973\nEpoch 10/20\n469/468 [==============================] - 32s 68ms/step - loss: 0.0082 - acc: 0.9973 - val_loss: 0.0083 - val_acc: 0.9971\nEpoch 11/20\n469/468 [==============================] - 32s 69ms/step - loss: 0.0078 - acc: 0.9974 - val_loss: 0.0098 - val_acc: 0.9969\nEpoch 12/20\n469/468 [==============================] - 33s 69ms/step - loss: 0.0082 - acc: 0.9973 - val_loss: 0.0091 - val_acc: 0.9976\nEpoch 13/20\n469/468 [==============================] - 33s 69ms/step - loss: 0.0078 - acc: 0.9974 - val_loss: 0.0093 - val_acc: 0.9973\nEpoch 14/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0073 - acc: 0.9975 - val_loss: 0.0096 - val_acc: 0.9972\nEpoch 15/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0081 - acc: 0.9975 - val_loss: 0.0083 - val_acc: 0.9974\nEpoch 16/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0074 - acc: 0.9977 - val_loss: 0.0076 - val_acc: 0.9975\nEpoch 17/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0080 - acc: 0.9974 - val_loss: 0.0094 - val_acc: 0.9971\nEpoch 18/20\n469/468 [==============================] - 33s 69ms/step - loss: 0.0081 - acc: 0.9975 - val_loss: 0.0090 - val_acc: 0.9972\nEpoch 19/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0075 - acc: 0.9975 - val_loss: 0.0091 - val_acc: 0.9976\nEpoch 20/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0078 - acc: 0.9975 - val_loss: 0.0091 - val_acc: 0.9975\nTest loss: 0.024713177538345555\nTest accuracy: 0.9939\n10000/10000 [==============================] - 4s 377us/step\n10000/10000 [==============================] - 1s 133us/step\n10000/10000 [==============================] - 4s 377us/step\n10000/10000 [==============================] - 1s 138us/step\n6000/6000 [==============================] - 2s 375us/step\n6000/6000 [==============================] - 1s 137us/step\n6000/6000 [==============================] - 2s 379us/step\n6000/6000 [==============================] - 1s 139us/step\n", "name": "stdout" } ] }, { "metadata": { "trusted": true, "collapsed": true, "_uuid": "ada138653eb6e0c4067c8383b62fea43271df83b" }, "cell_type": "code", "source": "# Sample ouput of a run of this script\n# Should take between 1 and 2 hours to run on a Tesla K80\n'''\n['ahdd1', 'keras-pretrained-models']\n/opt/conda/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\nx_train shape: (60000, 28, 28, 3)\n60000 train samples\n10000 test samples\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/20\n54000/54000 [==============================] - 38s 702us/step - loss: 0.9172 - acc: 0.6546 - val_loss: 0.0631 - val_acc: 0.9830\nEpoch 2/20\n54000/54000 [==============================] - 33s 607us/step - loss: 0.0486 - acc: 0.9864 - val_loss: 0.0352 - val_acc: 0.9900\nEpoch 3/20\n54000/54000 [==============================] - 33s 608us/step - loss: 0.0321 - acc: 0.9908 - val_loss: 0.0385 - val_acc: 0.9898\nEpoch 4/20\n54000/54000 [==============================] - 33s 607us/step - loss: 0.0296 - acc: 0.9920 - val_loss: 0.0241 - val_acc: 0.9925\nEpoch 5/20\n54000/54000 [==============================] - 33s 607us/step - loss: 0.0226 - acc: 0.9939 - val_loss: 0.0202 - val_acc: 0.9942\nEpoch 6/20\n54000/54000 [==============================] - 33s 607us/step - loss: 0.0199 - acc: 0.9943 - val_loss: 0.0270 - val_acc: 0.9938\nEpoch 7/20\n54000/54000 [==============================] - 33s 606us/step - loss: 0.0163 - acc: 0.9953 - val_loss: 0.0229 - val_acc: 0.9935\nEpoch 8/20\n54000/54000 [==============================] - 33s 606us/step - loss: 0.0159 - acc: 0.9952 - val_loss: 0.0287 - val_acc: 0.9923\nEpoch 9/20\n54000/54000 [==============================] - 33s 603us/step - loss: 0.0112 - acc: 0.9968 - val_loss: 0.0230 - val_acc: 0.9935\nEpoch 10/20\n54000/54000 [==============================] - 33s 605us/step - loss: 0.0152 - acc: 0.9958 - val_loss: 0.0202 - val_acc: 0.9957\nEpoch 11/20\n54000/54000 [==============================] - 33s 605us/step - loss: 0.0139 - acc: 0.9963 - val_loss: 0.0319 - val_acc: 0.9930\nEpoch 12/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0134 - acc: 0.9963 - val_loss: 0.0287 - val_acc: 0.9953\nEpoch 13/20\n54000/54000 [==============================] - 33s 605us/step - loss: 0.0126 - acc: 0.9965 - val_loss: 0.0205 - val_acc: 0.9963\nEpoch 14/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0097 - acc: 0.9971 - val_loss: 0.0197 - val_acc: 0.9965\nEpoch 15/20\n54000/54000 [==============================] - 33s 605us/step - loss: 0.0121 - acc: 0.9969 - val_loss: 0.0201 - val_acc: 0.9963\nEpoch 16/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0106 - acc: 0.9973 - val_loss: 0.0233 - val_acc: 0.9942\nEpoch 17/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0141 - acc: 0.9966 - val_loss: 0.0447 - val_acc: 0.9908\nEpoch 18/20\n54000/54000 [==============================] - 33s 604us/step - loss: 0.0116 - acc: 0.9971 - val_loss: 0.0244 - val_acc: 0.9958\nEpoch 19/20\n54000/54000 [==============================] - 33s 602us/step - loss: 0.0082 - acc: 0.9978 - val_loss: 0.0256 - val_acc: 0.9970\nEpoch 20/20\n54000/54000 [==============================] - 32s 601us/step - loss: 0.0079 - acc: 0.9980 - val_loss: 0.0155 - val_acc: 0.9967\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/20\n54000/54000 [==============================] - 31s 578us/step - loss: 0.0031 - acc: 0.9992 - val_loss: 0.0151 - val_acc: 0.9970\nEpoch 2/20\n54000/54000 [==============================] - 30s 565us/step - loss: 0.0025 - acc: 0.9993 - val_loss: 0.0150 - val_acc: 0.9968\nEpoch 3/20\n54000/54000 [==============================] - 31s 566us/step - loss: 0.0023 - acc: 0.9994 - val_loss: 0.0150 - val_acc: 0.9970\nEpoch 4/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0021 - acc: 0.9994 - val_loss: 0.0150 - val_acc: 0.9972\nEpoch 5/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0020 - acc: 0.9995 - val_loss: 0.0149 - val_acc: 0.9973\nEpoch 6/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0018 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 7/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0017 - acc: 0.9995 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 8/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0016 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 9/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0016 - acc: 0.9996 - val_loss: 0.0149 - val_acc: 0.9973\nEpoch 10/20\n54000/54000 [==============================] - 30s 565us/step - loss: 0.0015 - acc: 0.9996 - val_loss: 0.0149 - val_acc: 0.9973\n\nEpoch 00010: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165.\nEpoch 11/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0149 - val_acc: 0.9973\nEpoch 12/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 13/20\n54000/54000 [==============================] - 30s 565us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0149 - val_acc: 0.9973\nEpoch 14/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 15/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 16/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 17/20\n54000/54000 [==============================] - 30s 565us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\n\nEpoch 00017: ReduceLROnPlateau reducing learning rate to 0.0003999999724328518.\nEpoch 18/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 19/20\n54000/54000 [==============================] - 30s 564us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\nEpoch 20/20\n54000/54000 [==============================] - 31s 565us/step - loss: 0.0014 - acc: 0.9997 - val_loss: 0.0150 - val_acc: 0.9973\nTest loss: 0.04246884689170474\nTest accuracy: 0.9917\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/20\n54000/54000 [==============================] - 12s 219us/step - loss: 0.0861 - acc: 0.9746 - val_loss: 0.1532 - val_acc: 0.9545\nEpoch 2/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0272 - acc: 0.9916 - val_loss: 0.0382 - val_acc: 0.9885\nEpoch 3/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0173 - acc: 0.9945 - val_loss: 0.0490 - val_acc: 0.9867\nEpoch 4/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0140 - acc: 0.9952 - val_loss: 0.0193 - val_acc: 0.9942\nEpoch 5/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0130 - acc: 0.9959 - val_loss: 0.0145 - val_acc: 0.9968\nEpoch 6/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0119 - acc: 0.9961 - val_loss: 0.0203 - val_acc: 0.9942\nEpoch 7/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0087 - acc: 0.9974 - val_loss: 0.0475 - val_acc: 0.9840\nEpoch 8/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0076 - acc: 0.9973 - val_loss: 0.0157 - val_acc: 0.9952\nEpoch 9/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0070 - acc: 0.9980 - val_loss: 0.0157 - val_acc: 0.9958\nEpoch 10/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0068 - acc: 0.9978 - val_loss: 0.0328 - val_acc: 0.9917\nEpoch 11/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0056 - acc: 0.9981 - val_loss: 0.0240 - val_acc: 0.9938\nEpoch 12/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0060 - acc: 0.9980 - val_loss: 0.0211 - val_acc: 0.9953\nEpoch 13/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0054 - acc: 0.9983 - val_loss: 0.0189 - val_acc: 0.9955\nEpoch 14/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0061 - acc: 0.9981 - val_loss: 0.0168 - val_acc: 0.9957\nEpoch 15/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0052 - acc: 0.9983 - val_loss: 0.0148 - val_acc: 0.9967\nEpoch 16/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0043 - acc: 0.9985 - val_loss: 0.0134 - val_acc: 0.9970\nEpoch 17/20\n54000/54000 [==============================] - 10s 187us/step - loss: 0.0052 - acc: 0.9981 - val_loss: 0.0182 - val_acc: 0.9962\nEpoch 18/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0038 - acc: 0.9986 - val_loss: 0.0177 - val_acc: 0.9960\nEpoch 19/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0028 - acc: 0.9991 - val_loss: 0.0316 - val_acc: 0.9915\nEpoch 20/20\n54000/54000 [==============================] - 10s 186us/step - loss: 0.0049 - acc: 0.9984 - val_loss: 0.0163 - val_acc: 0.9968\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/20\n54000/54000 [==============================] - 11s 196us/step - loss: 0.0037 - acc: 0.9989 - val_loss: 0.0148 - val_acc: 0.9968\nEpoch 2/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0030 - acc: 0.9992 - val_loss: 0.0141 - val_acc: 0.9972\nEpoch 3/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0024 - acc: 0.9990 - val_loss: 0.0135 - val_acc: 0.9973\nEpoch 4/20\n54000/54000 [==============================] - 9s 171us/step - loss: 0.0019 - acc: 0.9994 - val_loss: 0.0132 - val_acc: 0.9977\nEpoch 5/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0023 - acc: 0.9992 - val_loss: 0.0129 - val_acc: 0.9973\nEpoch 6/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0019 - acc: 0.9994 - val_loss: 0.0129 - val_acc: 0.9977\nEpoch 7/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0019 - acc: 0.9994 - val_loss: 0.0128 - val_acc: 0.9975\nEpoch 8/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0017 - acc: 0.9995 - val_loss: 0.0127 - val_acc: 0.9977\nEpoch 9/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0015 - acc: 0.9995 - val_loss: 0.0127 - val_acc: 0.9977\nEpoch 10/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0014 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9977\nEpoch 11/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0015 - acc: 0.9996 - val_loss: 0.0127 - val_acc: 0.9975\nEpoch 12/20\n54000/54000 [==============================] - 9s 171us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9977\nEpoch 13/20\n54000/54000 [==============================] - 9s 171us/step - loss: 0.0017 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9977\nEpoch 14/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9973\nEpoch 15/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0015 - acc: 0.9996 - val_loss: 0.0127 - val_acc: 0.9973\nEpoch 16/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0012 - acc: 0.9997 - val_loss: 0.0127 - val_acc: 0.9973\nEpoch 17/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9975\nEpoch 18/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9977\nEpoch 19/20\n54000/54000 [==============================] - 9s 172us/step - loss: 0.0013 - acc: 0.9997 - val_loss: 0.0126 - val_acc: 0.9975\n\nEpoch 00019: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165.\nEpoch 20/20\n54000/54000 [==============================] - 9s 173us/step - loss: 0.0013 - acc: 0.9996 - val_loss: 0.0126 - val_acc: 0.9975\nTest loss: 0.04305995555087147\nTest accuracy: 0.9911\nEpoch 1/20\n469/468 [==============================] - 52s 110ms/step - loss: 0.5602 - acc: 0.7981 - val_loss: 0.0356 - val_acc: 0.9922\nEpoch 2/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0531 - acc: 0.9866 - val_loss: 0.0330 - val_acc: 0.9912\nEpoch 3/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0391 - acc: 0.9903 - val_loss: 0.0455 - val_acc: 0.9904\nEpoch 4/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0376 - acc: 0.9911 - val_loss: 0.0226 - val_acc: 0.9935\nEpoch 5/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0342 - acc: 0.9918 - val_loss: 0.0391 - val_acc: 0.9887\nEpoch 6/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0318 - acc: 0.9925 - val_loss: 0.0228 - val_acc: 0.9948\nEpoch 7/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0318 - acc: 0.9925 - val_loss: 0.0183 - val_acc: 0.9948\nEpoch 8/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0268 - acc: 0.9933 - val_loss: 0.0186 - val_acc: 0.9964\nEpoch 9/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0268 - acc: 0.9931 - val_loss: 0.0220 - val_acc: 0.9945\nEpoch 10/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0258 - acc: 0.9940 - val_loss: 0.0249 - val_acc: 0.9942\nEpoch 11/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0288 - acc: 0.9927 - val_loss: 0.0228 - val_acc: 0.9948\nEpoch 12/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0319 - acc: 0.9928 - val_loss: 0.0209 - val_acc: 0.9956\nEpoch 13/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0212 - acc: 0.9950 - val_loss: 0.0186 - val_acc: 0.9954\nEpoch 14/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0244 - acc: 0.9944 - val_loss: 0.0428 - val_acc: 0.9899\nEpoch 15/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0197 - acc: 0.9951 - val_loss: 0.0285 - val_acc: 0.9936\nEpoch 16/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0197 - acc: 0.9953 - val_loss: 0.0395 - val_acc: 0.9919\nEpoch 17/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0271 - acc: 0.9936 - val_loss: 0.0179 - val_acc: 0.9971\nEpoch 18/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0203 - acc: 0.9951 - val_loss: 0.0210 - val_acc: 0.9960\nEpoch 19/20\n469/468 [==============================] - 48s 103ms/step - loss: 0.0205 - acc: 0.9952 - val_loss: 0.0262 - val_acc: 0.9935\nEpoch 20/20\n469/468 [==============================] - 48s 102ms/step - loss: 0.0203 - acc: 0.9954 - val_loss: 0.0312 - val_acc: 0.9936\nEpoch 1/20\n469/468 [==============================] - 45s 96ms/step - loss: 0.0134 - acc: 0.9965 - val_loss: 0.0215 - val_acc: 0.9948\nEpoch 2/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0111 - acc: 0.9967 - val_loss: 0.0210 - val_acc: 0.9947\nEpoch 3/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0093 - acc: 0.9973 - val_loss: 0.0184 - val_acc: 0.9954\nEpoch 4/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0090 - acc: 0.9972 - val_loss: 0.0172 - val_acc: 0.9956\nEpoch 5/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0095 - acc: 0.9972 - val_loss: 0.0169 - val_acc: 0.9955\nEpoch 6/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0098 - acc: 0.9972 - val_loss: 0.0172 - val_acc: 0.9957\nEpoch 7/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0077 - acc: 0.9978 - val_loss: 0.0148 - val_acc: 0.9962\nEpoch 8/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0090 - acc: 0.9975 - val_loss: 0.0136 - val_acc: 0.9960\nEpoch 9/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0074 - acc: 0.9977 - val_loss: 0.0153 - val_acc: 0.9956\nEpoch 10/20\n469/468 [==============================] - 43s 93ms/step - loss: 0.0080 - acc: 0.9975 - val_loss: 0.0133 - val_acc: 0.9960\nEpoch 11/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0077 - acc: 0.9976 - val_loss: 0.0143 - val_acc: 0.9957\nEpoch 12/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0080 - acc: 0.9977 - val_loss: 0.0128 - val_acc: 0.9965\nEpoch 13/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0076 - acc: 0.9976 - val_loss: 0.0132 - val_acc: 0.9958\nEpoch 14/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0068 - acc: 0.9979 - val_loss: 0.0129 - val_acc: 0.9963\nEpoch 15/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0074 - acc: 0.9977 - val_loss: 0.0144 - val_acc: 0.9962\nEpoch 16/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0073 - acc: 0.9979 - val_loss: 0.0144 - val_acc: 0.9962\nEpoch 17/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0086 - acc: 0.9977 - val_loss: 0.0139 - val_acc: 0.9961\nEpoch 18/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0069 - acc: 0.9978 - val_loss: 0.0128 - val_acc: 0.9963\nEpoch 19/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0069 - acc: 0.9979 - val_loss: 0.0121 - val_acc: 0.9960\nEpoch 20/20\n469/468 [==============================] - 44s 93ms/step - loss: 0.0072 - acc: 0.9978 - val_loss: 0.0125 - val_acc: 0.9962\nTest loss: 0.030220891384417293\nTest accuracy: 0.9939\nEpoch 1/20\n469/468 [==============================] - 35s 75ms/step - loss: 0.1125 - acc: 0.9661 - val_loss: 0.0260 - val_acc: 0.9922\nEpoch 2/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0406 - acc: 0.9879 - val_loss: 0.0166 - val_acc: 0.9942\nEpoch 3/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0311 - acc: 0.9906 - val_loss: 0.0301 - val_acc: 0.9907\nEpoch 4/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0271 - acc: 0.9912 - val_loss: 0.0223 - val_acc: 0.9929\nEpoch 5/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0246 - acc: 0.9929 - val_loss: 0.0113 - val_acc: 0.9966\nEpoch 6/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0239 - acc: 0.9928 - val_loss: 0.0168 - val_acc: 0.9951\nEpoch 7/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0215 - acc: 0.9933 - val_loss: 0.0139 - val_acc: 0.9965\nEpoch 8/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0201 - acc: 0.9935 - val_loss: 0.0154 - val_acc: 0.9955\nEpoch 9/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0205 - acc: 0.9933 - val_loss: 0.0131 - val_acc: 0.9971\nEpoch 10/20\n469/468 [==============================] - 34s 72ms/step - loss: 0.0191 - acc: 0.9942 - val_loss: 0.0185 - val_acc: 0.9946\nEpoch 11/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0192 - acc: 0.9941 - val_loss: 0.0134 - val_acc: 0.9959\nEpoch 12/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0154 - acc: 0.9951 - val_loss: 0.0101 - val_acc: 0.9976\nEpoch 13/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0154 - acc: 0.9952 - val_loss: 0.0108 - val_acc: 0.9971\nEpoch 14/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0158 - acc: 0.9950 - val_loss: 0.0107 - val_acc: 0.9970\nEpoch 15/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0158 - acc: 0.9949 - val_loss: 0.0126 - val_acc: 0.9961\nEpoch 16/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0150 - acc: 0.9951 - val_loss: 0.0142 - val_acc: 0.9953\nEpoch 17/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0133 - acc: 0.9955 - val_loss: 0.0101 - val_acc: 0.9970\nEpoch 18/20\n469/468 [==============================] - 34s 72ms/step - loss: 0.0145 - acc: 0.9953 - val_loss: 0.0106 - val_acc: 0.9973\nEpoch 19/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0123 - acc: 0.9960 - val_loss: 0.0153 - val_acc: 0.9968\nEpoch 20/20\n469/468 [==============================] - 33s 71ms/step - loss: 0.0139 - acc: 0.9957 - val_loss: 0.0119 - val_acc: 0.9962\nEpoch 1/20\n469/468 [==============================] - 35s 74ms/step - loss: 0.0105 - acc: 0.9967 - val_loss: 0.0112 - val_acc: 0.9969\nEpoch 2/20\n469/468 [==============================] - 32s 68ms/step - loss: 0.0101 - acc: 0.9968 - val_loss: 0.0106 - val_acc: 0.9977\nEpoch 3/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0099 - acc: 0.9968 - val_loss: 0.0090 - val_acc: 0.9971\nEpoch 4/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0090 - acc: 0.9969 - val_loss: 0.0103 - val_acc: 0.9969\nEpoch 5/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0091 - acc: 0.9969 - val_loss: 0.0088 - val_acc: 0.9973\nEpoch 6/20\n469/468 [==============================] - 32s 69ms/step - loss: 0.0088 - acc: 0.9972 - val_loss: 0.0090 - val_acc: 0.9968\nEpoch 7/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0090 - acc: 0.9973 - val_loss: 0.0100 - val_acc: 0.9974\nEpoch 8/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0083 - acc: 0.9975 - val_loss: 0.0089 - val_acc: 0.9973\nEpoch 9/20\n469/468 [==============================] - 32s 69ms/step - loss: 0.0083 - acc: 0.9970 - val_loss: 0.0096 - val_acc: 0.9973\nEpoch 10/20\n469/468 [==============================] - 32s 68ms/step - loss: 0.0082 - acc: 0.9973 - val_loss: 0.0083 - val_acc: 0.9971\nEpoch 11/20\n469/468 [==============================] - 32s 69ms/step - loss: 0.0078 - acc: 0.9974 - val_loss: 0.0098 - val_acc: 0.9969\nEpoch 12/20\n469/468 [==============================] - 33s 69ms/step - loss: 0.0082 - acc: 0.9973 - val_loss: 0.0091 - val_acc: 0.9976\nEpoch 13/20\n469/468 [==============================] - 33s 69ms/step - loss: 0.0078 - acc: 0.9974 - val_loss: 0.0093 - val_acc: 0.9973\nEpoch 14/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0073 - acc: 0.9975 - val_loss: 0.0096 - val_acc: 0.9972\nEpoch 15/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0081 - acc: 0.9975 - val_loss: 0.0083 - val_acc: 0.9974\nEpoch 16/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0074 - acc: 0.9977 - val_loss: 0.0076 - val_acc: 0.9975\nEpoch 17/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0080 - acc: 0.9974 - val_loss: 0.0094 - val_acc: 0.9971\nEpoch 18/20\n469/468 [==============================] - 33s 69ms/step - loss: 0.0081 - acc: 0.9975 - val_loss: 0.0090 - val_acc: 0.9972\nEpoch 19/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0075 - acc: 0.9975 - val_loss: 0.0091 - val_acc: 0.9976\nEpoch 20/20\n469/468 [==============================] - 33s 70ms/step - loss: 0.0078 - acc: 0.9975 - val_loss: 0.0091 - val_acc: 0.9975\nTest loss: 0.024713177538345555\nTest accuracy: 0.9939\n10000/10000 [==============================] - 4s 377us/step\n10000/10000 [==============================] - 1s 133us/step\n10000/10000 [==============================] - 4s 377us/step\n10000/10000 [==============================] - 1s 138us/step\n6000/6000 [==============================] - 2s 375us/step\n6000/6000 [==============================] - 1s 137us/step\n6000/6000 [==============================] - 2s 379us/step\n6000/6000 [==============================] - 1s 139us/step\n\n[0.9973333333333333, 0.9975, 0.9963333333333333, 0.9975, 0.9983333333333333, 0.9917, 0.9911, 0.9939, 0.9939, 0.9949]\n'''", "execution_count": null, "outputs": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "name": "python", "version": "3.6.4", "mimetype": "text/x-python", "codemirror_mode": { "name": "ipython", "version": 3 }, "pygments_lexer": "ipython3", "nbconvert_exporter": "python", "file_extension": ".py" } }, "nbformat": 4, "nbformat_minor": 1 }