Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
hackassin
GitHub Repository: hackassin/learnopencv
Path: blob/master/CharClassification/train_model.py
3118 views
1
# import required modules
2
from keras.preprocessing.image import ImageDataGenerator
3
from keras import optimizers
4
import matplotlib.pyplot as plt
5
6
# import created model
7
from net import Net
8
9
# Dimensions of our images
10
img_width, img_height = 32, 32
11
12
# 3 channel image
13
no_of_channels = 3
14
15
# train data Directory
16
train_data_dir = 'train/'
17
# test data Directory
18
validation_data_dir = 'test/'
19
20
epochs = 80
21
batch_size = 32
22
23
#initialize model
24
model = Net.build(width = img_width, height = img_height, depth = no_of_channels)
25
print('building done')
26
# Compile model
27
rms = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
28
print('optimizing done')
29
30
model.compile(loss='categorical_crossentropy',
31
optimizer=rms,
32
metrics=['accuracy'])
33
34
print('compiling')
35
36
# this is the augmentation configuration used for training
37
# horizontal_flip = False, as we need to retain Characters
38
train_datagen = ImageDataGenerator(
39
featurewise_center=True,
40
featurewise_std_normalization=True,
41
rescale=1. / 255,
42
shear_range=0.1,
43
zoom_range=0.1,
44
rotation_range=5,
45
width_shift_range=0.05,
46
height_shift_range=0.05,
47
horizontal_flip=False)
48
49
# this is the augmentation configuration used for testing, only rescaling
50
test_datagen = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True, rescale=1. / 255)
51
52
train_generator = train_datagen.flow_from_directory(
53
train_data_dir,
54
target_size=(img_width, img_height),
55
batch_size=batch_size,
56
class_mode='categorical')
57
58
validation_generator = test_datagen.flow_from_directory(
59
validation_data_dir,
60
target_size=(img_width, img_height),
61
batch_size=batch_size,
62
class_mode='categorical')
63
64
# fit the model
65
history = model.fit_generator(
66
train_generator,
67
steps_per_epoch=train_generator.samples / batch_size,
68
epochs=epochs,
69
validation_data=validation_generator,
70
validation_steps=validation_generator.samples / batch_size)
71
72
# evaluate on validation dataset
73
model.evaluate_generator(validation_generator)
74
# save weights in a file
75
model.save_weights('trained_weights.h5')
76
77
print(history.history)
78
79
# Loss Curves
80
plt.figure(figsize=[8,6])
81
plt.plot(history.history['loss'],'r',linewidth=3.0)
82
plt.plot(history.history['val_loss'],'b',linewidth=3.0)
83
plt.legend(['Training loss', 'Validation Loss'],fontsize=18)
84
plt.xlabel('Epochs ',fontsize=16)
85
plt.ylabel('Loss',fontsize=16)
86
plt.title('Loss Curves',fontsize=16)
87
88
# Accuracy Curves
89
plt.figure(figsize=[8,6])
90
plt.plot(history.history['acc'],'r',linewidth=3.0)
91
plt.plot(history.history['val_acc'],'b',linewidth=3.0)
92
93
plt.legend(['Training Accuracy', 'Validation Accuracy'],fontsize=18)
94
plt.xlabel('Epochs ',fontsize=16)
95
plt.ylabel('Accuracy',fontsize=16)
96
plt.title('Accuracy Curves',fontsize=16)
97
plt.show()
98