Skip to content

Commit

Permalink
ML by Tensor46
Browse files Browse the repository at this point in the history
  • Loading branch information
Tensor46 committed Jun 30, 2018
0 parents commit bd95f26
Show file tree
Hide file tree
Showing 39 changed files with 2,982 additions and 0 deletions.
114 changes: 114 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
YellowFIN.py
Attentive*
test.jpeg
CIFAR10/
cifar-10-batches-py/
cifar-10-python.tar.gz
processed/
raw/
reinforcement*


# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
117 changes: 117 additions & 0 deletions Capsule.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
""" tensorMONK's :: Capsule Network """

from __future__ import print_function,division
import os
import sys
import timeit
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from core import *
import torch.optim as neuralOptimizer
#==============================================================================#


def trainMONK(args):
tensor_size = (1, 1, 28, 28)
trDataLoader, teDataLoader, n_labels = NeuralEssentials.MNIST(args.trainDataPath, tensor_size, args.BSZ, args.cpus)
file_name = "./models/" + args.Architecture.lower()
Model = NeuralEssentials.MakeCNN(file_name, tensor_size, n_labels,
embedding_net=NeuralArchitectures.CapsuleNet,
embedding_net_kwargs={"replicate_paper" : args.replicate_paper},
loss_net=NeuralLayers.CapsuleLoss, loss_net_kwargs={},
default_gpu=args.default_gpu, gpus=args.gpus,
ignore_trained=args.ignore_trained)
params = Model.netEmbedding.parameters() + Model.netLoss.parameters()
if args.optimizer.lower() == "adam":
Optimizer = neuralOptimizer.Adam(params)
elif args.optimizer.lower() == "sgd":
Optimizer = neuralOptimizer.SGD(params, lr= args.learningRate)
else:
raise NotImplementedError

# Usual training
for _ in range(args.Epochs):
Timer = timeit.default_timer()
Model.netEmbedding.train()
Model.netLoss.train()
for i,(tensor, targets) in enumerate(trDataLoader):
Model.meterIterations += 1

# forward pass and parameter update
Model.netEmbedding.zero_grad()
Model.netLoss.zero_grad()
features, rec_tensor, rec_loss = Model.netEmbedding( (Variable(tensor), Variable(targets)) )
margin_loss, (top1, top5) = Model.netLoss( (features, Variable(targets)) )
loss = margin_loss + 0.0005*rec_loss/features.size(0)
# loss = margin_loss / features.size(0)
loss.backward()
Optimizer.step()

# updating all meters
Model.meterTop1.append(float(top1.cpu().data.numpy() if torch.__version__.startswith("0.4") else top1.cpu().data.numpy()[0]))
Model.meterTop5.append(float(top5.cpu().data.numpy() if torch.__version__.startswith("0.4") else top5.cpu().data.numpy()[0]))
Model.meterLoss.append(float(loss.cpu().data.numpy() if torch.__version__.startswith("0.4") else loss.cpu().data.numpy()[0]))

Model.meterSpeed.append(int(float(args.BSZ)/(timeit.default_timer()-Timer)))
Timer = timeit.default_timer()

print("... {:6d} :: Cost {:2.3f} :: Top1/Top5 - {:3.2f}/{:3.2f} :: {:4d} I/S ".format(Model.meterIterations,
Model.meterLoss[-1], Model.meterTop1[-1], Model.meterTop5[-1], Model.meterSpeed[-1]),end="\r")
sys.stdout.flush()

# save every epoch and print the average of epoch
print("... {:6d} :: Cost {:1.3f} :: Top1/Top5 - {:3.2f}/{:3.2f} :: {:4d} I/S ".format(Model.meterIterations,
np.mean(Model.meterLoss[-i:]), np.mean(Model.meterTop1[-i:]),
np.mean(Model.meterTop5[-i:]), int(np.mean(Model.meterSpeed[-i:]))))
NeuralEssentials.SaveModel(Model)

test_top1, test_top5 = [], []
Model.netEmbedding.eval()
Model.netLoss.eval()
for i,(tensor, targets) in enumerate(teDataLoader):
Model.netEmbedding.zero_grad()
Model.netLoss.zero_grad()
features, rec_tensor, rec_loss = Model.netEmbedding( (Variable(tensor), Variable(targets)) )
margin_loss, (top1, top5) = Model.netLoss( (features, Variable(targets)) )

test_top1.append(float(top1.cpu().data.numpy() if torch.__version__.startswith("0.4") else top1.cpu().data.numpy()[0]))
test_top5.append(float(top5.cpu().data.numpy() if torch.__version__.startswith("0.4") else top5.cpu().data.numpy()[0]))
print("... Test accuracy - {:3.2f}/{:3.2f} ".format(np.mean(test_top1), np.mean(test_top5)))
Model.netEmbedding.train()
Model.netLoss.train()
Timer = timeit.default_timer()

print("\nDone with training")
return Model

# ============================================================================ #
def parse_args():
parser = argparse.ArgumentParser(description="CapsuleNet using tensorMONK!!!")
parser.add_argument("-A", "--Architecture", type=str, default="capsule")
parser.add_argument("-B", "--BSZ", type=int, default=32)
parser.add_argument("-E", "--Epochs", type=int, default=6)

parser.add_argument("--optimizer", type=str, default="adam", choices=["adam", "sgd",])
parser.add_argument("--learningRate", type=float, default=0.06)

parser.add_argument("--default_gpu", type=int, default=1)
parser.add_argument("--gpus", type=int, default=1)
parser.add_argument("--cpus", type=int, default=6)

parser.add_argument("--trainDataPath", type=str, default="./data")
parser.add_argument("--testDataPath", type=str, default="./data")

parser.add_argument("--replicate_paper", action="store_true")

parser.add_argument("-I", "--ignore_trained", action="store_true")

return parser.parse_args()


if __name__ == '__main__':
args = parse_args()
Model = trainMONK(args)
151 changes: 151 additions & 0 deletions ExVAE.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
""" tensorMONK's :: ExVAE """

from __future__ import print_function,division
import os
import sys
import timeit
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.utils as show_utils
from core import *
import torch.optim as neuralOptimizer
#==============================================================================#


def trainMONK(args):
if args.Project.lower() == "mnist":
tensor_size = (1, 1, 28, 28)
trDataLoader, teDataLoader, n_labels = NeuralEssentials.MNIST("./data/MNIST", tensor_size, args.BSZ, args.cpus)
elif args.Project.lower() == "cifar10":
tensor_size = (1, 3, 32, 32)
trDataLoader, teDataLoader, n_labels = NeuralEssentials.CIFAR10("./data/CIFAR10", tensor_size, args.BSZ, args.cpus)
file_name = "./models/" + args.Architecture.lower()

if args.Architecture.lower() == "cvae":
autoencoder_net = NeuralArchitectures.ConvolutionalVAE
autoencoder_net_kwargs = {"embedding_layers" : [(3, 32, 2), (3, 64, 2), (3, 128, 2),], "n_latent" : 64,
"decoder_final_activation" : "tanh", "pad" : True, "activation" : "relu", "batch_nm" : False}
elif args.Architecture.lower() == "lvae":
autoencoder_net = NeuralArchitectures.LinearVAE
autoencoder_net_kwargs = {"embedding_layers" : [1024, 512,], "n_latent" : 32,
"decoder_final_activation" : "tanh", "activation" : "relu", }
else:
raise NotImplementedError

Model = NeuralEssentials.MakeAE(file_name, tensor_size, n_labels,
autoencoder_net, autoencoder_net_kwargs,
default_gpu=args.default_gpu, gpus=args.gpus,
ignore_trained=args.ignore_trained)

if args.optimizer.lower() == "adam":
Optimizer = neuralOptimizer.Adam(Model.netAE.parameters())
elif args.optimizer.lower() == "sgd":
Optimizer = neuralOptimizer.SGD(Model.netAE.parameters(), lr= args.learningRate)
else:
raise NotImplementedError

if args.meta_learning:
transformer = NeuralLayers.ObfuscateDecolor(tensor_size, 0.4, 0.6, 0.5)

# Usual training
for _ in range(args.Epochs):
Timer = timeit.default_timer()
Model.netAE.train()
for i,(tensor, targets) in enumerate(trDataLoader):
Model.meterIterations += 1

# forward pass and parameter update
Model.netAE.zero_grad()
if args.meta_learning:
org_tensor = Variable(tensor)
tensor = transformer(org_tensor)
encoded, mu, log_var, latent, decoded, kld, mse = Model.netAE((org_tensor, tensor))
else:
encoded, mu, log_var, latent, decoded, kld, mse = Model.netAE(Variable(tensor))
loss = kld * 0.1 + mse
loss.backward()
Optimizer.step()

# updating all meters
Model.meterLoss.append(float(loss.cpu().data.numpy() if torch.__version__.startswith("0.4") else loss.cpu().data.numpy()[0]))
kld = float(kld.cpu().data.numpy() if torch.__version__.startswith("0.4") else kld.cpu().data.numpy()[0])
mse = float(mse.cpu().data.numpy() if torch.__version__.startswith("0.4") else mse.cpu().data.numpy()[0])

Model.meterSpeed.append(int(float(args.BSZ)/(timeit.default_timer()-Timer)))
Timer = timeit.default_timer()

print("... {:6d} :: Cost {:2.3f}/{:2.3f}/{:2.3f} :: {:4d} I/S ".format(Model.meterIterations,
Model.meterLoss[-1], kld, mse, Model.meterSpeed[-1]),end="\r")
sys.stdout.flush()
if i%100 == 0:
original = tensor[:min(32,tensor.size(0))].cpu()
reconstructed = decoded[:min(32,tensor.size(0))].cpu().data

if original.dim !=4 :
original = original.view(original.size(0), *tensor_size[1:])
if reconstructed.dim !=4 :
reconstructed = reconstructed.view(reconstructed.size(0), *tensor_size[1:])

original = (original - original.min(2, keepdim=True)[0].min(3, keepdim=True)[0]) / \
(original.max(2, keepdim=True)[0].max(3, keepdim=True)[0] - original.min(2, keepdim=True)[0].min(3, keepdim=True)[0])
reconstructed = (reconstructed - reconstructed.min(2, keepdim=True)[0].min(3, keepdim=True)[0]) / \
(reconstructed.max(2, keepdim=True)[0].max(3, keepdim=True)[0] - reconstructed.min(2, keepdim=True)[0].min(3, keepdim=True)[0])

show_utils.save_image(torch.cat([original, reconstructed], 0), "./models/CVAE_train.png", normalize=True)


# save every epoch and print the average of epoch
print("... {:6d} :: Cost {:2.3f}/{:2.3f}/{:2.3f} :: {:4d} I/S ".format(Model.meterIterations,
Model.meterLoss[-1], kld, mse, Model.meterSpeed[-1]))
NeuralEssentials.SaveModel(Model)

# test_top1, test_top5 = [], []
# Model.netAE.eval()
# for i,(tensor, targets) in enumerate(teDataLoader):
#
# Model.netEmbedding.zero_grad()
# Model.netLoss.zero_grad()
# encoded, mu, log_var, latent, decoded, kld, mse = Model.netAE(Variable(tensor))
#
#
#
# test_top1.append(float(top1.cpu().data.numpy() if torch.__version__.startswith("0.4") else top1.cpu().data.numpy()[0]))
# test_top5.append(float(top5.cpu().data.numpy() if torch.__version__.startswith("0.4") else top5.cpu().data.numpy()[0]))
# print("... Test accuracy - {:3.2f}/{:3.2f} ".format(np.mean(test_top1), np.mean(test_top5)))
# Model.netEmbedding.train()
# Model.netLoss.train()
Timer = timeit.default_timer()

print("\nDone with training")
return Model

# ============================================================================ #
def parse_args():
parser = argparse.ArgumentParser(description="VAEs using tensorMONK!!!")
parser.add_argument("-A", "--Architecture", type=str, default="cvae", choices=["cvae", "lvae",])
parser.add_argument("-P", "--Project", type=str, default="mnist", choices=["mnist", "cifar10",])

parser.add_argument("-B", "--BSZ", type=int, default=32)
parser.add_argument("-E", "--Epochs", type=int, default=6)

parser.add_argument("--optimizer", type=str, default="adam", choices=["adam", "sgd",])
parser.add_argument("--learningRate", type=float, default=0.01)

parser.add_argument("--meta_learning", action="store_true")

parser.add_argument("--default_gpu", type=int, default=1)
parser.add_argument("--gpus", type=int, default=1)
parser.add_argument("--cpus", type=int, default=6)

parser.add_argument("-I", "--ignore_trained", action="store_true")

return parser.parse_args()


if __name__ == '__main__':
args = parse_args()
Model = trainMONK(args)
Loading

0 comments on commit bd95f26

Please sign in to comment.