-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
1 changed file
with
203 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,203 @@ | ||
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint | ||
|
||
class MetricsCheckpoint(Callback): | ||
"""Callback that saves metrics after each epoch""" | ||
def __init__(self, savepath): | ||
super(MetricsCheckpoint, self).__init__() | ||
self.savepath = savepath | ||
self.history = {} | ||
def on_epoch_end(self, epoch, logs=None): | ||
for k, v in logs.items(): | ||
self.history.setdefault(k, []).append(v) | ||
np.save(self.savepath, self.history) | ||
|
||
def plotKerasLearningCurve(): | ||
plt.figure(figsize=(10,5)) | ||
metrics = np.load('logs.npy')[()] | ||
filt = ['acc'] # try to add 'loss' to see the loss learning curve | ||
for k in filter(lambda x : np.any([kk in x for kk in filt]), metrics.keys()): | ||
l = np.array(metrics[k]) | ||
plt.plot(l, c= 'r' if 'val' not in k else 'b', label='val' if 'val' in k else 'train') | ||
x = np.argmin(l) if 'loss' in k else np.argmax(l) | ||
y = l[x] | ||
plt.scatter(x,y, lw=0, alpha=0.25, s=100, c='r' if 'val' not in k else 'b') | ||
plt.text(x, y, '{} = {:.4f}'.format(x,y), size='15', color= 'r' if 'val' not in k else 'b') | ||
plt.legend(loc=4) | ||
plt.axis([0, None, None, None]); | ||
plt.grid() | ||
plt.xlabel('Number of epochs') | ||
plt.ylabel('Accuracy') | ||
|
||
def plot_confusion_matrix(cm, classes, | ||
normalize=False, | ||
title='Confusion matrix', | ||
cmap=plt.cm.Blues): | ||
""" | ||
This function prints and plots the confusion matrix. | ||
Normalization can be applied by setting `normalize=True`. | ||
""" | ||
plt.figure(figsize = (5,5)) | ||
plt.imshow(cm, interpolation='nearest', cmap=cmap) | ||
plt.title(title) | ||
plt.colorbar() | ||
tick_marks = np.arange(len(classes)) | ||
plt.xticks(tick_marks, classes, rotation=90) | ||
plt.yticks(tick_marks, classes) | ||
if normalize: | ||
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] | ||
|
||
thresh = cm.max() / 2. | ||
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): | ||
plt.text(j, i, cm[i, j], | ||
horizontalalignment="center", | ||
color="white" if cm[i, j] > thresh else "black") | ||
plt.tight_layout() | ||
plt.ylabel('True label') | ||
plt.xlabel('Predicted label') | ||
|
||
def plot_learning_curve(history): | ||
plt.figure(figsize=(8,8)) | ||
plt.subplot(1,2,1) | ||
plt.plot(history.history['acc']) | ||
plt.plot(history.history['val_acc']) | ||
plt.title('model accuracy') | ||
plt.ylabel('accuracy') | ||
plt.xlabel('epoch') | ||
plt.legend(['train', 'test'], loc='upper left') | ||
plt.savefig('./accuracy_curve.png') | ||
#plt.clf() | ||
# summarize history for loss | ||
plt.subplot(1,2,2) | ||
plt.plot(history.history['loss']) | ||
plt.plot(history.history['val_loss']) | ||
plt.title('model loss') | ||
plt.ylabel('loss') | ||
plt.xlabel('epoch') | ||
plt.legend(['train', 'test'], loc='upper left') | ||
plt.savefig('./loss_curve.png') | ||
|
||
|
||
|
||
|
||
|
||
import os | ||
import shutil # for concatenation of dataframes | ||
import pandas as pd | ||
import pickle # for saving Python objects | ||
import numpy as np | ||
import matplotlib.pyplot as plt | ||
import cv2 | ||
from tqdm import tqdm | ||
import scipy | ||
from scipy import ndimage | ||
from keras.utils.np_utils import to_categorical | ||
from PIL import Image,ImageEnhance,ImageFilter | ||
from skimage import filters | ||
import keras | ||
from keras.models import Sequential | ||
from keras.layers import Dense, Activation, Dropout, Conv2D,MaxPooling2D,Flatten | ||
|
||
|
||
import sklearn | ||
|
||
def get_data(folder): | ||
""" | ||
Load the data and labels from the given folder. | ||
""" | ||
X = [] | ||
y = [] | ||
z = [] | ||
for wbc_type in os.listdir(folder): | ||
if not wbc_type.startswith('.'): | ||
if wbc_type in ['NEUTROPHIL']: | ||
label = 1 | ||
label2 = 1 | ||
elif wbc_type in ['EOSINOPHIL']: | ||
label = 2 | ||
label2 = 1 | ||
elif wbc_type in ['MONOCYTE']: | ||
label = 3 | ||
label2 = 0 | ||
elif wbc_type in ['LYMPHOCYTE']: | ||
label = 4 | ||
label2 = 0 | ||
else: | ||
label = 5 | ||
label2 = 0 | ||
for image_filename in tqdm(os.listdir(folder + wbc_type)): | ||
img_file = cv2.imread(folder + wbc_type + '/' + image_filename) | ||
if img_file is not None: | ||
img_file = scipy.misc.imresize(arr=img_file, size=(60, 80, 3)) | ||
img_arr = np.asarray(img_file) | ||
X.append(img_arr) | ||
y.append(label) | ||
z.append(label2) | ||
X = np.asarray(X)/255.0 | ||
y = np.asarray(y) | ||
z = np.asarray(z) | ||
return X,y,z | ||
X_train, y_train, z_train = get_data('../input/dataset2-master/dataset2-master/images/TRAIN/') | ||
X_test, y_test, z_test = get_data('../input/dataset2-master/dataset2-master/images/TEST/') | ||
|
||
# Encode labels to hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0]) | ||
from keras.utils.np_utils import to_categorical | ||
y_trainHot = to_categorical(y_train, num_classes = 5) | ||
y_testHot = to_categorical(y_test, num_classes = 5) | ||
z_trainHot = to_categorical(z_train, num_classes = 2) | ||
z_testHot = to_categorical(z_test, num_classes = 2) | ||
|
||
import keras | ||
dict_characters = {1:'NEUTROPHIL',2:'EOSINOPHIL',3:'MONOCYTE',4:'LYMPHOCYTE'} | ||
dict_characters2 = {0:'Mononuclear',1:'Polynuclear'} | ||
def runKerasCNNAugment(a,b,c,d,e): | ||
batch_size = 128 | ||
num_classes = len(b[0]) | ||
epochs = 50 | ||
# img_rows, img_cols = a.shape[1],a.shape[2] | ||
img_rows,img_cols=60,80 | ||
input_shape = (img_rows, img_cols, 3) | ||
model = Sequential() | ||
model.add(Conv2D(32, kernel_size=(3, 3), | ||
activation='relu', | ||
input_shape=input_shape,strides=e)) | ||
model.add(Conv2D(64, (3, 3), activation='relu')) | ||
model.add(MaxPooling2D(pool_size=(2, 2))) | ||
model.add(Dropout(0.25)) | ||
model.add(Flatten()) | ||
model.add(Dense(128, activation='relu')) | ||
model.add(Dropout(0.5)) | ||
model.add(Dense(num_classes, activation='softmax')) | ||
model.compile(loss=keras.losses.categorical_crossentropy, | ||
optimizer=keras.optimizers.Adadelta(), | ||
metrics=['accuracy']) | ||
datagen = keras.preprocessing.image.ImageDataGenerator( | ||
featurewise_center=False, # set input mean to 0 over the dataset | ||
samplewise_center=False, # set each sample mean to 0 | ||
featurewise_std_normalization=False, # divide inputs by std of the dataset | ||
samplewise_std_normalization=False, # divide each input by its std | ||
zca_whitening=False, # apply ZCA whitening | ||
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180) | ||
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) | ||
height_shift_range=0.1, # randomly shift images vertically (fraction of total height) | ||
horizontal_flip=True, # randomly flip images | ||
vertical_flip=False) # randomly flip images | ||
history = model.fit_generator(datagen.flow(a,b, batch_size=32), | ||
steps_per_epoch=len(a) / 32, epochs=epochs) | ||
score = model.evaluate(c,d, verbose=0) | ||
print('\nKeras CNN #1C - accuracy:', score[1],'\n') | ||
y_pred = model.predict(c) | ||
map_characters = dict_characters | ||
print('\n', sklearn.metrics.classification_report(np.where(d > 0)[1], np.argmax(y_pred, axis=1), target_names=list(map_characters.values())), sep='') | ||
Y_pred_classes = np.argmax(y_pred,axis=1) | ||
Y_true = np.argmax(d,axis=1) | ||
plotKerasLearningCurve() | ||
plt.show() | ||
plot_learning_curve(history) | ||
plt.show() | ||
confusion_mtx = confusion_matrix(Y_true, Y_pred_classes) | ||
plot_confusion_matrix(confusion_mtx, classes = list(dict_characters.values())) | ||
plt.show() | ||
|
||
|
||
runKerasCNNAugment(X_train,y_trainHot,X_test,y_testHot,1) | ||
|