Skip to content

Commit

Permalink
Commit with mem test
Browse files Browse the repository at this point in the history
  • Loading branch information
lvyiwei1 committed May 16, 2021
1 parent d4910c2 commit ae1abda
Show file tree
Hide file tree
Showing 39 changed files with 827 additions and 139 deletions.
Binary file modified datasets/avmnist/__pycache__/get_data.cpython-38.pyc
Binary file not shown.
6 changes: 5 additions & 1 deletion datasets/avmnist/get_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from torch.utils.data import DataLoader

#data dir is the avmnist folder
def get_dataloader(data_dir,batch_size=40,num_workers=8,train_shuffle=True,flatten_audio=False,flatten_image=False,unsqueeze_channel=True,generate_sample=False,normalize_image=True):
def get_dataloader(data_dir,batch_size=40,num_workers=8,train_shuffle=True,flatten_audio=False,flatten_image=False,unsqueeze_channel=True,generate_sample=False,normalize_image=True,normalize_audio=True):
trains=[np.load(data_dir+"/image/train_data.npy"),np.load(data_dir+"/audio/train_data.npy"),np.load(data_dir+"/train_labels.npy")]
tests=[np.load(data_dir+"/image/test_data.npy"),np.load(data_dir+"/audio/test_data.npy"),np.load(data_dir+"/test_labels.npy")]
if flatten_audio:
Expand All @@ -14,6 +14,9 @@ def get_dataloader(data_dir,batch_size=40,num_workers=8,train_shuffle=True,flatt
if normalize_image:
trains[0] /= 255.0
tests[0] /= 255.0
if normalize_audio:
trains[1] = trains[1]/255.0
tests[1] = tests[1]/255.0
if not flatten_image:
trains[0]=trains[0].reshape(60000,28,28)
tests[0]=tests[0].reshape(10000,28,28)
Expand All @@ -30,6 +33,7 @@ def get_dataloader(data_dir,batch_size=40,num_workers=8,train_shuffle=True,flatt
tests = DataLoader(testlist,shuffle=False,num_workers=num_workers,batch_size=batch_size)
trains = DataLoader(trainlist[0:55000],shuffle=train_shuffle,num_workers=num_workers,batch_size=batch_size)
return trains,valids,tests

# this function creates an image of 100 numbers in avmnist
def saveimg(outa):
from PIL import Image
Expand Down
30 changes: 30 additions & 0 deletions examples/avmnist_MFM.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.MFM import train_MFM,test_MFM
from fusions.common_fusions import Concat
from unimodals.MVAE import LeNetEncoder,DeLeNet
from unimodals.common_models import MLP
from torch import nn
import torch
from objective_functions.recon import recon_weighted_sum,sigmloss1dcentercrop
from datasets.avmnist.get_data import get_dataloader



traindata, validdata, testdata = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist')
channels=6

classes=10
n_latent=200
fuse=Concat()

encoders=[LeNetEncoder(1,channels,3,n_latent,twooutput=False).cuda(),LeNetEncoder(1,channels,5,n_latent,twooutput=False).cuda()]
decoders=[DeLeNet(1,channels,3,n_latent).cuda(),DeLeNet(1,channels,5,n_latent).cuda()]

intermediates=[MLP(n_latent,n_latent//2,n_latent//2).cuda(),MLP(n_latent,n_latent//2,n_latent//2).cuda(),MLP(2*n_latent,n_latent,n_latent//2).cuda()]
head=MLP(n_latent//2,40,classes).cuda()
recon_loss=recon_weighted_sum([sigmloss1dcentercrop(28,34),sigmloss1dcentercrop(112,130)],[1.0,1.0])
train_MFM(encoders,decoders,head,intermediates,fuse,recon_loss,traindata,validdata,25)
model=torch.load('best.pt')
test_MFM(model,testdata)
28 changes: 28 additions & 0 deletions examples/avmnist_MVAE_mixed.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.MVAE_mixed import train_MVAE,test_MVAE
from fusions.MVAE import ProductOfExperts
from unimodals.common_models import MLP
from unimodals.MVAE import LeNetEncoder,DeLeNet
from torch import nn
import torch
from objective_functions.recon import elbo_loss,sigmloss1dcentercrop
from datasets.avmnist.get_data import get_dataloader

traindata, validdata, testdata = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist')

classes=10
n_latent=200
fuse=ProductOfExperts((1,40,n_latent))


channels=6
encoders=[LeNetEncoder(1,channels,3,n_latent).cuda(),LeNetEncoder(1,channels,5,n_latent).cuda()]
decoders=[DeLeNet(1,channels,3,n_latent).cuda(),DeLeNet(1,channels,5,n_latent).cuda()]
head=MLP(n_latent,40,classes).cuda()
elbo=elbo_loss([sigmloss1dcentercrop(28,34),sigmloss1dcentercrop(112,130)],[1.0,1.0],0.0)
train_MVAE(encoders,decoders,head,fuse,traindata,validdata,elbo,20)
mvae=torch.load('best1.pt')
head=torch.load('best2.pt')
test_MVAE(mvae,head,testdata)
14 changes: 14 additions & 0 deletions examples/avmnist_architecture_search_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.architecture_search import train,test
from fusions.common_fusions import Concat
from datasets.avmnist.get_data import get_dataloader
from unimodals.common_models import LeNet,MLP,Constant
from torch import nn
import torch
import utils.surrogate as surr

traindata, validdata, testdata = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist',batch_size=32)
model = torch.load('temp/best.pt').cuda()
test(model,testdata)
26 changes: 26 additions & 0 deletions examples/avmnist_gradient_blend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.gradient_blend import train, test
from fusions.common_fusions import Concat
from datasets.avmnist.get_data import get_dataloader
from unimodals.common_models import LeNet,MLP,Constant
from torch import nn
import torch

filename='best3.pt'
traindata, validdata, testdata = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist')
channels=6
encoders=[LeNet(1,channels,3).cuda(),LeNet(1,channels,5).cuda()]
mult_head=MLP(channels*40,100,10).cuda()
uni_head = [MLP(channels*8,100,10).cuda(),MLP(channels*32,100,10).cuda()]

fusion=Concat().cuda()

train(encoders,mult_head,uni_head,fusion,traindata,validdata,300,gb_epoch=10,optimtype=torch.optim.SGD,lr=0.01,savedir=filename)

print("Testing:")
model=torch.load(filename).cuda()
test(model,testdata)


24 changes: 24 additions & 0 deletions examples/avmnist_low_rank_tensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.Simple_Late_Fusion import train, test
from fusions.common_fusions import LowRankTensorFusion
from datasets.avmnist.get_data import get_dataloader
from unimodals.common_models import LeNet,MLP,Constant
from torch import nn
import torch
filename = 'lowrank.pt'
traindata, validdata, testdata = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist')
channels=6
encoders=[LeNet(1,channels,3).cuda(),LeNet(1,channels,5).cuda()]
head=MLP(channels*20,100,10).cuda()

fusion=LowRankTensorFusion([channels*8,channels*32],channels*20,40).cuda()

train(encoders,fusion,head,traindata,validdata,30,optimtype=torch.optim.SGD,lr=0.05,weight_decay=0.0002,save=filename)

print("Testing:")
model=torch.load(filename).cuda()
test(model,testdata)


26 changes: 26 additions & 0 deletions examples/avmnist_multi_interac_matrix.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.Simple_Late_Fusion import train, test
from fusions.common_fusions import Concat, MultiplicativeInteractions2Modal
from datasets.avmnist.get_data import get_dataloader
from unimodals.common_models import LeNet,MLP,Constant
from torch import nn
import torch

filename='bestmi.pt'
traindata, validdata, testdata = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist')
channels=6
encoders=[LeNet(1,channels,3).cuda(),LeNet(1,channels,5).cuda()]
head=MLP(channels*40,100,10).cuda()

#fusion=Concat().cuda()
fusion = MultiplicativeInteractions2Modal([channels*8,channels*32],channels*40,'matrix')

train(encoders,fusion,head,traindata,validdata,20,optimtype=torch.optim.SGD,lr=0.05,weight_decay=0.0001,save=filename)

print("Testing:")
model=torch.load(filename).cuda()
test(model,testdata)


2 changes: 1 addition & 1 deletion examples/avmnist_simple_late_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import torch

traindata, validdata, testdata = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist')
channels=3
channels=6
encoders=[LeNet(1,channels,3).cuda(),LeNet(1,channels,5).cuda()]
head=MLP(channels*40,100,10).cuda()

Expand Down
2 changes: 1 addition & 1 deletion examples/avmnist_unimodal_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

modalnum=1
traindata, validdata, testdata = get_dataloader('/data/yiwei/avmnist/_MFAS/avmnist')
channels=3
channels=6
#encoders=[LeNet(1,channels,3).cuda(),LeNet(1,channels,5).cuda()]
encoder = LeNet(1,channels,5).cuda()
head=MLP(channels*32,100,10).cuda()
Expand Down
23 changes: 23 additions & 0 deletions examples/mimic_architecture_search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.architecture_search import train
from fusions.common_fusions import Concat
from datasets.mimic.get_data import get_dataloader
from unimodals.common_models import LeNet,MLP,Constant,GRUWithLinear
from torch import nn
import torch
import utils.surrogate as surr

traindata, validdata, testdata = get_dataloader(1, imputed_path='datasets/mimic/im.pk')


s_data=train(['pretrained/mimic/static_encoder_mortality.pt','pretrained/mimic/ts_encoder_mortality.pt'],16,2,[(5,10,10),(288,720,360)],
traindata,validdata,surr.SimpleRecurrentSurrogate().cuda(),(3,3,2),epochs=6)

"""
print("Testing:")
model=torch.load('best.pt').cuda()
test(model,testdata)
"""

15 changes: 15 additions & 0 deletions examples/mimic_architecture_search_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.architecture_search import train,test
from fusions.common_fusions import Concat
from datasets.mimic.get_data import get_dataloader
from unimodals.common_models import LeNet,MLP,Constant
from torch import nn
import torch
import utils.surrogate as surr

traindata, validdata, testdata = get_dataloader(1, imputed_path='datasets/mimic/im.pk')

model = torch.load('temp/best.pt').cuda()
test(model,testdata,auprc=True)
25 changes: 25 additions & 0 deletions examples/mimic_low_rank_tensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import sys
import os
sys.path.append(os.getcwd())
from training_structures.Simple_Late_Fusion import train, test
from fusions.common_fusions import LowRankTensorFusion
from datasets.mimic.get_data import get_dataloader
from unimodals.common_models import MLP, GRU
from torch import nn
import torch

#get dataloader for icd9 classification task 7
traindata, validdata, testdata = get_dataloader(1, imputed_path='datasets/mimic/im.pk')

#build encoders, head and fusion layer
encoders = [MLP(5, 10, 10,dropout=False).cuda(), GRU(12, 30,dropout=False).cuda()]
head = MLP(100, 40, 2, dropout=False).cuda()
fusion = LowRankTensorFusion([10,720],100,40).cuda()

#train
train(encoders, fusion, head, traindata, validdata, 50, auprc=True)

#test
print("Testing: ")
model = torch.load('best.pt').cuda()
test(model, testdata, auprc=True)
6 changes: 3 additions & 3 deletions examples/mimic_unimodal_0.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,14 @@
#build encoders, head and fusion layer
#encoders = [MLP(5, 10, 10,dropout=False).cuda(), GRU(12, 30,dropout=False).cuda()]
encoder = MLP(5,10,10).cuda()
head = MLP(10, 40, 6, dropout=False).cuda()
head = MLP(10, 40, 2, dropout=False).cuda()


#train
train(encoder, head, traindata, validdata, 20, auprc=True,modalnum=modalnum)
train(encoder, head, traindata, validdata, 20, auprc=False,modalnum=modalnum)

#test
print("Testing: ")
encoder = torch.load('encoder.pt').cuda()
head = torch.load('head.pt').cuda()
test(encoder,head , testdata, auprc=True, modalnum=modalnum)
test(encoder,head , testdata, auprc=False, modalnum=modalnum)
10 changes: 5 additions & 5 deletions examples/mimic_unimodal_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
sys.path.append(os.getcwd())
from training_structures.unimodal import train, test
from datasets.mimic.get_data import get_dataloader
from unimodals.common_models import MLP, GRU
from unimodals.common_models import MLP, GRUWithLinear
from torch import nn
import torch

Expand All @@ -12,15 +12,15 @@
modalnum =1
#build encoders, head and fusion layer
#encoders = [MLP(5, 10, 10,dropout=False).cuda(), GRU(12, 30,dropout=False).cuda()]
encoder = GRU(12,30,flatten=True).cuda()
head = MLP(720, 40, 6, dropout=False).cuda()
encoder = GRUWithLinear(12,30,15,flatten=True).cuda()
head = MLP(360, 40, 2, dropout=False).cuda()


#train
train(encoder, head, traindata, validdata, 20, auprc=True,modalnum=modalnum)
train(encoder, head, traindata, validdata, 20, auprc=False,modalnum=modalnum)

#test
print("Testing: ")
encoder = torch.load('encoder.pt').cuda()
head = torch.load('head.pt').cuda()
test(encoder,head , testdata, auprc=True, modalnum=modalnum)
test(encoder,head , testdata, auprc=False, modalnum=modalnum)
Binary file modified fusions/__pycache__/common_fusions.cpython-38.pyc
Binary file not shown.
17 changes: 10 additions & 7 deletions fusions/common_fusions.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,37 +182,40 @@ def forward(self, modalities, training=False):

class LowRankTensorFusion(nn.Module):
# https://github.com/Justin1904/Low-rank-Multimodal-Fusion
def __init__(self, input_dims, output_dim, rank):
def __init__(self, input_dims, output_dim, rank, flatten=True):
super(LowRankTensorFusion, self).__init__()

# dimensions are specified in the order of audio, video and text
self.input_dims = input_dims
self.output_dim = output_dim
self.rank = rank
self.flatten = flatten

# low-rank factors
self.factors = []
for input_dim in input_dims:
factor = nn.Parameter(torch.Tensor(self.rank, input_dim+1, self.output_dim))
factor = nn.Parameter(torch.Tensor(self.rank, input_dim+1, self.output_dim)).cuda()
nn.init.xavier_normal(factor)
self.factors.append(factor)

self.fusion_weights = nn.Parameter(torch.Tensor(1, self.rank))
self.fusion_bias = nn.Parameter(torch.Tensor(1, self.output_dim))

self.fusion_weights = nn.Parameter(torch.Tensor(1, self.rank)).cuda()
self.fusion_bias = nn.Parameter(torch.Tensor(1, self.output_dim)).cuda()
# init the fusion weights
nn.init.xavier_normal(self.fusion_weights)
self.fusion_bias.data.fill_(0)

def forward(self, modalities, training=False):
batch_size = modalities[0].shape[0]

# next we perform low-rank multimodal fusion
# here is a more efficient implementation than the one the paper describes
# basically swapping the order of summation and elementwise product
fused_tensor = 1
for (modality, factor) in zip(modalities, self.factors):
modality_withones = torch.cat((Variable(torch.ones(batch_size, 1).type(modality.dtype), requires_grad=False), modality), dim=1)
ones = Variable(torch.ones(batch_size, 1).type(modality.dtype), requires_grad=False).cuda()
if self.flatten:
modality_withones = torch.cat((ones, torch.flatten(modality,start_dim=1)), dim=1)
else:
modality_withones = torch.cat((ones, modality), dim=1)
modality_factor = torch.matmul(modality_withones, factor)
fused_tensor = fused_tensor * modality_factor

Expand Down
1 change: 1 addition & 0 deletions fusions/searchable.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ def forward(self, inputs, training=False):
aout = feats
if layer==0:
fused = torch.cat(aout,1)
#print(fused.size())
out = self.fusion_layers[layer](fused)
else:
aout.append(out)
Expand Down
Binary file modified objective_functions/__pycache__/recon.cpython-38.pyc
Binary file not shown.
Loading

0 comments on commit ae1abda

Please sign in to comment.