Convolutional Neural Networks

Method 1: Use Deep Feedforward Neural Networks

In [1]:
import tensorflow as tf
from tensorflow import keras

import numpy as np
import matplotlib.pyplot as plt

import random
In [2]:
fashion = keras.datasets.fashion_mnist

(data_train,labels_train),(data_test,labels_test) = fashion.load_data()
In [3]:
data_train.shape
Out[3]:
(60000, 28, 28)
In [4]:
rnumb = random.randint(0,6000)
plt.imshow(data_train[rnumb])
Out[4]:
<matplotlib.image.AxesImage at 0xb37f7b320>
In [5]:
data_train = data_train/255.0
data_test = data_test/255.0
In [6]:
model = keras.Sequential()

model.add(keras.layers.Flatten(input_shape=(data_train.shape[1],data_train.shape[2])))
model.add(keras.layers.Dense(128,activation='relu'))
model.add(keras.layers.Dense(10,activation = 'softmax'))
In [7]:
model.compile(optimizer='adam', 
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

#model.compile(optimizer=tf.train.AdamOptimizer(),loss='categorical_crossentropy',metrics=['accuracy'])
In [8]:
model.fit(data_train,labels_train,epochs = 10)
Epoch 1/10
60000/60000 [==============================] - 7s 110us/step - loss: 0.4960 - acc: 0.8248
Epoch 2/10
60000/60000 [==============================] - 6s 102us/step - loss: 0.3747 - acc: 0.8646
Epoch 3/10
60000/60000 [==============================] - 6s 102us/step - loss: 0.3361 - acc: 0.8772
Epoch 4/10
60000/60000 [==============================] - 6s 101us/step - loss: 0.3112 - acc: 0.8850
Epoch 5/10
60000/60000 [==============================] - 6s 105us/step - loss: 0.2929 - acc: 0.8905
Epoch 6/10
60000/60000 [==============================] - 6s 103us/step - loss: 0.2779 - acc: 0.8960
Epoch 7/10
60000/60000 [==============================] - 6s 103us/step - loss: 0.2658 - acc: 0.9004
Epoch 8/10
60000/60000 [==============================] - 6s 103us/step - loss: 0.2561 - acc: 0.9038
Epoch 9/10
60000/60000 [==============================] - 6s 103us/step - loss: 0.2467 - acc: 0.9076
Epoch 10/10
60000/60000 [==============================] - 6s 103us/step - loss: 0.2363 - acc: 0.9104
Out[8]:
<tensorflow.python.keras.callbacks.History at 0xb37fcfb00>
In [9]:
model.evaluate(data_test,labels_test)
10000/10000 [==============================] - 0s 31us/step
Out[9]:
[0.3598747498512268, 0.8717]

Method 2: Use Convolutional Neural Networks

In [28]:
(data_train,labels_train),(data_test,labels_test) = fashion.load_data()

data_train = data_train.reshape(60000,28,28,1)
data_test = data_test.reshape(10000,28,28,1)
In [29]:
from tensorflow.keras.utils import to_categorical

labels_train = to_categorical(labels_train)
labels_test = to_categorical(labels_test)
print(labels_train.shape)
(60000, 10)
In [36]:
convmodel = keras.Sequential()

convmodel.add(
    keras.layers.Conv2D(filters = 64, 
                        kernel_size= 4, 
                        input_shape=(data_train.shape[1],data_train.shape[2],1),
                        padding='same' , 
                        activation='relu')
)
convmodel.add(keras.layers.MaxPooling2D(pool_size=2))
convmodel.add(
    keras.layers.Conv2D(filters = 32, 
                        kernel_size= 4, 
                        input_shape=(data_train.shape[1],data_train.shape[2],1),
                        padding='same' , 
                        kernel_regularizer = tf.keras.regularizers.l2(0.00001),
                        activation='relu')
)
convmodel.add(keras.layers.MaxPooling2D(pool_size=2))


convmodel.add(keras.layers.Flatten())
convmodel.add(keras.layers.Dense(10,activation='softmax'))
In [37]:
convmodel.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
In [38]:
convmodel.fit(data_train,labels_train,epochs = 5)
Epoch 1/5
60000/60000 [==============================] - 42s 702us/step - loss: 3.3606 - acc: 0.6901
Epoch 2/5
60000/60000 [==============================] - 42s 695us/step - loss: 0.3346 - acc: 0.8808
Epoch 3/5
60000/60000 [==============================] - 41s 690us/step - loss: 0.3024 - acc: 0.8892
Epoch 4/5
60000/60000 [==============================] - 42s 706us/step - loss: 0.2830 - acc: 0.8977
Epoch 5/5
60000/60000 [==============================] - 41s 688us/step - loss: 0.2714 - acc: 0.9013
Out[38]:
<tensorflow.python.keras.callbacks.History at 0xb7d5b8d68>
In [39]:
convmodel.evaluate(data_test,labels_test,batch_size=30)
10000/10000 [==============================] - 2s 225us/step
Out[39]:
[0.335212633792311, 0.8803999922871589]
In [ ]: