Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
ethen8181
GitHub Repository: ethen8181/machine-learning
Path: blob/master/keras/cnn_image_keras.ipynb
1470 views
Kernel: Python 3
# code for loading the format for the notebook import os # path : store the current path to convert back to it later path = os.getcwd() os.chdir(os.path.join('..', 'notebook_format')) from formats import load_style load_style(plot_style=False)
os.chdir(path) # 1. magic to print version # 2. magic so that the notebook will reload external python modules %load_ext watermark %load_ext autoreload %autoreload 2 import numpy as np import pandas as pd import keras.backend as K from keras.datasets import mnist from keras.utils import np_utils from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Activation, Flatten %watermark -a 'Ethen' -d -t -v -p numpy,pandas,keras
Using TensorFlow backend.
Ethen 2017-03-24 10:55:22 CPython 3.5.2 IPython 5.3.0 numpy 1.12.1 pandas 0.19.2 keras 2.0.2

Convolutional Network

# loading the mnist dataset as an example (X_train, y_train), (X_test, y_test) = mnist.load_data() print('X_train shape:', X_train.shape) print(X_train.shape[0], 'train samples') print(X_test.shape[0] , 'test samples')
X_train shape: (60000, 28, 28) 60000 train samples 10000 test samples
# input image dimensions img_rows, img_cols = 28, 28 # load training data and do basic data normalization (X_train, y_train), (X_test, y_test) = mnist.load_data() # the keras backend supports two different kind of image data format, # either channel first or channel last, we can detect it and transform # our raw data accordingly, if it's channel first, we add another dimension # to represent the depth (RGB color) at the very beginning (it is 1 here because # mnist is a grey scale image), if it's channel last, we add it at the end if K.image_data_format() == 'channels_first': X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols) X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # images takes values between 0 - 255, we can normalize it # by dividing every number by 255 X_train /= 255 X_test /= 255 print('train shape:', X_train.shape)
train shape: (60000, 28, 28, 1)
# one-hot encode the class (target) vectors n_class = 10 y_train = np_utils.to_categorical(y_train, n_class) y_test = np_utils.to_categorical(y_test, n_class) print('y_train shape:', y_train.shape)
y_train shape: (60000, 10)

The following code chunk takes A WHILE if you're running it on a laptop!!
model = Sequential() # apply a 32 3x3 filters for the first convolutional layer # then we specify the `padding` to be 'same' so we get # the same width and height for the input (it will automatically do zero-padding), # the default stride is 1, # and since this is the first layer we need to specify the input shape of the image model.add(Conv2D(32, kernel_size = (3, 3), padding = 'same', input_shape = input_shape)) # some activation function after conv layer model.add(Activation('relu')) model.add(Conv2D(64, kernel_size = (3, 3), padding = 'same')) model.add(Activation('relu')) # pooling layer, we specify the size of the filters for the pooling layer # the default `stride` is None, which will default to pool_size model.add(MaxPooling2D(pool_size = (2, 2))) # before calling the fully-connected layers, we'll have to flatten it model.add(Flatten()) model.add(Dense(n_class)) model.add(Activation('softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) n_epoch = 12 batch_size = 2056 model.fit(X_train, y_train, batch_size = batch_size, epochs = n_epoch, verbose = 1, validation_data = (X_test, y_test)) # evaluating the score, categorical cross entropy error and accuracy score = model.evaluate(X_test, y_test, verbose = 0) print('Test score:', score[0]) print('Test accuracy:', score[1])
Train on 60000 samples, validate on 10000 samples Epoch 1/12 60000/60000 [==============================] - 67s - loss: 0.9482 - acc: 0.7719 - val_loss: 0.3632 - val_acc: 0.8982 Epoch 2/12 60000/60000 [==============================] - 65s - loss: 0.3059 - acc: 0.9115 - val_loss: 0.2427 - val_acc: 0.9294 Epoch 3/12 60000/60000 [==============================] - 71s - loss: 0.2126 - acc: 0.9388 - val_loss: 0.1648 - val_acc: 0.9513 Epoch 4/12 60000/60000 [==============================] - 67s - loss: 0.1419 - acc: 0.9589 - val_loss: 0.1065 - val_acc: 0.9704 Epoch 5/12 60000/60000 [==============================] - 68s - loss: 0.0985 - acc: 0.9719 - val_loss: 0.0786 - val_acc: 0.9766 Epoch 6/12 60000/60000 [==============================] - 65s - loss: 0.0752 - acc: 0.9784 - val_loss: 0.0665 - val_acc: 0.9802 Epoch 7/12 60000/60000 [==============================] - 73s - loss: 0.0636 - acc: 0.9816 - val_loss: 0.0605 - val_acc: 0.9813 Epoch 8/12 60000/60000 [==============================] - 69s - loss: 0.0560 - acc: 0.9838 - val_loss: 0.0578 - val_acc: 0.9815 Epoch 9/12 60000/60000 [==============================] - 64s - loss: 0.0519 - acc: 0.9848 - val_loss: 0.0517 - val_acc: 0.9831 Epoch 10/12 60000/60000 [==============================] - 66s - loss: 0.0463 - acc: 0.9864 - val_loss: 0.0511 - val_acc: 0.9834 Epoch 11/12 60000/60000 [==============================] - 63s - loss: 0.0429 - acc: 0.9872 - val_loss: 0.0512 - val_acc: 0.9834 Epoch 12/12 60000/60000 [==============================] - 68s - loss: 0.0402 - acc: 0.9884 - val_loss: 0.0490 - val_acc: 0.9844 Test score: 0.0489532888404 Test accuracy: 0.9844

Reference