From dd6fd99d20c86ab0790616bfee9d0c1a16760d09 Mon Sep 17 00:00:00 2001 From: orbxball Date: Fri, 30 Jun 2017 01:31:56 +0800 Subject: [PATCH] hw3: ensemble --- hw3/ensemble1.py | 124 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 hw3/ensemble1.py diff --git a/hw3/ensemble1.py b/hw3/ensemble1.py new file mode 100644 index 0000000..32706f9 --- /dev/null +++ b/hw3/ensemble1.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python +import sys, os +import numpy as np +from keras.utils import to_categorical +from keras.preprocessing.image import ImageDataGenerator +from keras.models import Sequential +from keras.layers import Dense, Activation, Flatten, Dropout, LeakyReLU +from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D +from keras.layers import BatchNormalization +from keras import losses +from keras import optimizers +from keras.callbacks import CSVLogger, ModelCheckpoint + +# Parameter +height = width = 48 +num_classes = 7 +input_shape = (height, width, 1) +batch_size = 128 +epochs = 50 +zoom_range = 0.2 +model_name = 'en1.h5' +isValid = 1 + +# Read the train data +with open(sys.argv[1], "r+") as f: + line = f.read().strip().replace(',', ' ').split('\n')[1:] + raw_data = ' '.join(line) + length = width*height+1 #1 is for label + data = np.array(raw_data.split()).astype('float').reshape(-1, length) + X = data[:, 1:] + Y = data[:, 0] + X /= 255 + Y = Y.reshape(Y.shape[0], 1) + Y = to_categorical(Y, num_classes) + +# Change data into CNN format +X = X.reshape(X.shape[0], height, width, 1) + +# Split the data +if isValid: + valid_num = 3000 + X_train, Y_train = X[:-valid_num], Y[:-valid_num] + X_valid, Y_valid = X[-valid_num:], Y[-valid_num:] + +else: + X_train, Y_train = X, Y + +# Construct the model +model = Sequential() +model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape)) +model.add(LeakyReLU(alpha=0.03)) +model.add(BatchNormalization()) +model.add(Conv2D(32, (3, 3), padding='same')) +model.add(LeakyReLU(alpha=0.03)) +model.add(BatchNormalization()) +model.add(MaxPooling2D(pool_size=(2, 2))) +model.add(Dropout(0.3)) + +model.add(Conv2D(128, (3, 3), padding='same')) +model.add(LeakyReLU(alpha=0.03)) +model.add(BatchNormalization()) +model.add(Conv2D(128, (3, 3), padding='same')) +model.add(LeakyReLU(alpha=0.03)) +model.add(BatchNormalization()) +model.add(MaxPooling2D(pool_size=(2, 2))) +model.add(Dropout(0.4)) + +model.add(Conv2D(512, (3, 3), padding='same')) +model.add(LeakyReLU(alpha=0.03)) +model.add(BatchNormalization()) +model.add(Conv2D(512, (3, 3), padding='same')) +model.add(LeakyReLU(alpha=0.03)) +model.add(BatchNormalization()) +model.add(MaxPooling2D(pool_size=(2, 2))) +model.add(Dropout(0.5)) + +model.add(Flatten()) + +model.add(Dense(128, activation='relu')) +model.add(BatchNormalization()) +model.add(Dropout(0.5)) +model.add(Dense(num_classes)) +model.add(Activation('softmax')) + +model.summary() + +# Compile the model +model.compile(loss='categorical_crossentropy', + optimizer='adam', + metrics=['accuracy']) + +# Image PreProcessing +train_gen = ImageDataGenerator(rotation_range=25, + width_shift_range=0.1, + height_shift_range=0.1, + shear_range=0.1, + zoom_range=[1-zoom_range, 1+zoom_range], + horizontal_flip=True) +train_gen.fit(X_train) + +# Callbacks +callbacks = [] +modelcheckpoint = ModelCheckpoint('en1_ckpt/weights.{epoch:03d}-{val_acc:.5f}.h5', monitor='val_acc', save_best_only=True) +callbacks.append(modelcheckpoint) +csv_logger = CSVLogger('en1_log.csv', separator=',', append=False) +callbacks.append(csv_logger) + + +# Fit the model +if isValid: + model.fit_generator(train_gen.flow(X_train, Y_train, batch_size=batch_size), + steps_per_epoch=10*X_train.shape[0]//batch_size, + epochs=epochs, + callbacks=callbacks, + validation_data=(X_valid, Y_valid)) +else: + model.fit_generator(train_gen.flow(X_train, Y_train, batch_size=batch_size), + steps_per_epoch=10*X_train.shape[0]//batch_size, + epochs=epochs, + callbacks=callbacks) + + +# Save model +model.save(model_name)