-
Notifications
You must be signed in to change notification settings - Fork 5
/
model.py
121 lines (96 loc) · 3.91 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# import tensorflow as tf
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.30
# session = tf.Session(config=config)
# ofrom keras.models import model_from_yaml
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import numpy
import numpy as np
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, Conv2DTranspose
from keras import optimizers
from keras.layers.normalization import BatchNormalization
from keras.layers import Activation, Flatten, Dense
from keras.regularizers import l2
import data
class model:
def __init__(self):
self.trainI, self.trianL, self.testI, self.testL = data.load_data()
self.my_model = self.moodel()
# self.my_model.load_weights("weights.h5")
self.my_model.load_weights("89.1.h5")
# model.my_model.load_weights("89.1.h5")
def moodel(self):
reg = 0
model = Sequential()
model.add(Convolution2D(16, (6, 6), padding='same', strides=(2, 2), input_shape=(28, 28, 1),
activity_regularizer=l2(reg)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(Convolution2D(8, (3, 3), padding='same', strides=(2, 2), activity_regularizer=l2(reg)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(Convolution2D(4, (2, 2), padding='same', strides=(2, 2), activity_regularizer=l2(reg)))
model.add(BatchNormalization(axis=-1))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(8))
model.add(Activation('relu'))
#
# model.add(Dense(8))
# model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(27, activation='softmax'))
return model
def train(self, batch_size, epochs, learning_rate):
self.my_model.summary()
self.adam = optimizers.adam(lr=learning_rate)
# self.RMS = optimizers.RMSprop(lr=learning_rate)
self.my_model.compile(loss='sparse_categorical_crossentropy', optimizer=self.adam, metrics=['accuracy'])
history = self.my_model.fit(self.trainI, self.trianL, batch_size=batch_size, epochs=epochs, shuffle=True,
validation_split=0.1)
self.plot_history(history)
self.my_model.save_weights('weights.h5')
model_yaml = self.my_model.to_yaml()
with open("model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
def evaluate(self):
self.adam = optimizers.adam(lr=5e-3)
# self.RMS = optimizers.RMSprop(lr=5e-3)
self.my_model.compile(loss='sparse_categorical_crossentropy', optimizer=self.adam, metrics=['accuracy'])
hist = self.my_model.evaluate(self.testI, self.testL, batch_size=2056)
print(hist)
# self.plot_history(hist)
def predict(self, x):
y = self.my_model.predict(x)
y = np.argmax(y)
return y
def plot_history(self, history):
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig("acc.png")
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig("loss.png")
plt.show()
# model = model()
# model.train(4, 20, 1e-5)
# print (model.testI.shape)
# model.evaluate()