Skip to content

Commit

Permalink
Affect bug fix
Browse files Browse the repository at this point in the history
  • Loading branch information
lvyiwei1 committed Sep 21, 2021
1 parent 1a12949 commit 3e5085f
Show file tree
Hide file tree
Showing 22 changed files with 93 additions and 123 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -150,3 +150,7 @@ dmypy.json

# End of https://www.gitignore.io/api/macos,python,visualstudiocode.DS_Store
*.pt
*.pkl
*.hdf5
*.png
.vector_cache/*
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ We welcome new contributions to MultiBench through new research areas, datasets,

We release the processed datasets [here](https://drive.google.com/drive/folders/1IXZAjOEWFOGLxAK9JKvwlG2D9LThK6c5?usp=sharing). The raw datasets are also publicly available at [MultimodalSDK](https://github.com/A2Zadeh/CMU-MultimodalSDK) for MOSI and MOSEI, [MUsTARD](https://github.com/soujanyaporia/MUStARD) and [UR-Funny](https://github.com/ROC-HCI/UR-FUNNY). You can obtain processed data with `datasets/affect/get_data.py`, note that `sarcasm` means [MUsTARD](https://github.com/soujanyaporia/MUStARD) and `humor` means [UR-FUNNY](https://github.com/ROC-HCI/UR-FUNNY), please remember to use `regression` for MOSI and MOSEI `task` and `classcification` for MUsTARD and UR-FUNNY.

There are several example scripts for running affect datasets under examples/affect/. For example, to run UR-Funny with simple late fusion, do
There are several example scripts for running affect datasets under examples/affect/. For example, to run affect datasets with simple late fusion, do

```
python3 examples/affect/humor_late_fusion.py
python3 examples/affect/affect_late_fusion.py
```

### Healthcare
Expand Down
2 changes: 1 addition & 1 deletion datasets/affect/get_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def __len__(self):
def get_dataloader(
filepath: str, batch_size: int = 32, max_seq_len=50, train_shuffle: bool = True,
num_workers: int = 4, flatten_time_series: bool = False, task=None,
raw_path='/home/pliang/multibench/affect/mosi/mosi.hdf5') -> DataLoader:
raw_path='/home/paul/MultiBench/mosi.hdf5') -> DataLoader:
with open(filepath, "rb") as f:
alldata = pickle.load(f)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import sys
impor:t sys
import os
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
Expand All @@ -16,6 +16,7 @@
traindata, validdata, testdata = get_dataloader('/home/pliang/multibench/affect/processed/mosi_raw.pkl')

# mosi/mosei

encoders = [Transformer(409, 512).cuda()]
head = MLP(512, 256, 1).cuda()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from private_test_scripts.all_in_one import all_in_one_train

# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
traindata, validdata, testdata = get_dataloader('/home/pliang/multibench/affect/processed/mosi_raw.pkl')
traindata, validdata, testdata = get_dataloader('/home/paul/MultiBench/mosi_raw.pkl')

# mosi/mosei
encoders=[GRU(35,70,dropout=True,has_padding=True).cuda(), \
Expand All @@ -31,7 +31,7 @@

all_modules = [*encoders, head, *unimodal_heads]

fusion = Concat.cuda()
fusion = Concat().cuda()

def trainprocess():
train(encoders, head, unimodal_heads, fusion, traindata, validdata, 300, lr=0.005, AUPRC=False, savedir='gb.pt')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
from private_test_scripts.all_in_one import all_in_one_train

# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
traindata, validdata, testdata = get_dataloader('/home/pliang/multibench/affect/processed/mosi_raw.pkl')
traindata, validdata, testdata = get_dataloader('/home/paul/MultiBench/mosi_raw.pkl')

# mosi/mosei
encoders = [Transformer(74, 150).cuda(), Transformer(35, 70).cuda(), Transformer(300, 600).cuda()]
encoders = [Transformer(74, 150).cuda(), Transformer(35, 75).cuda(), Transformer(300, 600).cuda()]

head = MLP(820, 512, 1).cuda()
head = MLP(825, 512, 1).cuda()

# humor/sarcasm
# encoders = [Transformer().cuda()] * 3
Expand All @@ -30,11 +30,11 @@


def trainprocess():
train(encoders, fusion, head, traindata, validdata, 1000, task="regression", optimtype=torch.optim.AdamW,
train(encoders, fusion, head, traindata, validdata, 10, task="regression", optimtype=torch.optim.AdamW, is_packed=True,
lr=1e-4, save='mosi_lf_best.pt', weight_decay=0.01, objective=torch.nn.L1Loss())

all_in_one_train(trainprocess, all_modules)

print("Testing:")
model = torch.load('mosi_lf_best.pt').cuda()
test(model, testdata, True, torch.nn.L1Loss(), "regression")
test(model, testdata, 'affect',True, torch.nn.L1Loss(), "posneg-classification")
File renamed without changes.
File renamed without changes.
41 changes: 25 additions & 16 deletions deprecated_examples/affect/affect_mult.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,40 @@
import sys
import os
sys.path.append(os.getcwd())

sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
import torch
from torch import nn
from fusions.mult import MULTModel
from training_structures.unimodal import train, test
from unimodals.common_models import MLP

from fusions.common_fusions import ConcatEarly
from datasets.affect.get_data import get_dataloader
from unimodals.common_models import MLP
from fusions.mult import MULTModel

from training_structures.Supervised_Learning import train, test

traindata, validdata, testdata = get_dataloader('../affect/processed/mosei_senti_data.pkl')
from private_test_scripts.all_in_one import all_in_one_train

#mosi
# encoders=GRU(325,512,dropout=True,has_padding=True).cuda()
# head=MLP(512,256, 1).cuda()
# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
traindata, validdata, testdata = get_dataloader('/home/pliang/multibench/affect/processed/mosi_raw.pkl')

#mosei
encoders = MULTModel(3).cuda()
head = nn.Identity()
# mosi/mosei
encoders = [MULTModel(409, 512).cuda()]
head = MLP(512, 256, 1).cuda()

train(encoders, head, traindata, validdata, 1000, True, True, task="regression", optimtype=torch.optim.AdamW, lr=1e-5,
save='mosei_mult_best.pt', weight_decay=0.01,criterion=torch.nn.L1Loss(), regularization=False)
# humor/sarcasm
# encoders = [Transformer(early=True).cuda()]
# head = MLP(1128, 512, 1).cuda()

all_modules = [*encoders, head]

fusion = ConcatEarly().cuda()


def trainprocess():
train(encoders, fusion, head, traindata, validdata, 1000, task="regression", optimtype=torch.optim.AdamW,
lr=1e-4, save='mosi_ef_best.pt', weight_decay=0.01, objective=torch.nn.L1Loss())

all_in_one_train(trainprocess, all_modules)

print("Testing:")
model = torch.load('mosei_mult_best.pt').cuda()
model = torch.load('mosi_ef_best.pt').cuda()
test(model, testdata, True, torch.nn.L1Loss(), "regression")

Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from private_test_scripts.all_in_one import all_in_one_train

# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
traindata, validdata, testdata = get_dataloader('/home/pliang/multibench/affect/processed/mosi_raw.pkl')
traindata, validdata, testdata = get_dataloader('/home/paul/MultiBench/mosi_raw.pkl')


modal_num = 2
Expand Down
10 changes: 5 additions & 5 deletions deprecated_examples/affect/humor_late_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@

import torch

from training_structures.Simple_Late_Fusion import train, test
from training_structures.Supervised_Learning import train, test
from fusions.common_fusions import Concat
from get_data import get_dataloader
from datasets.affect.get_data import get_dataloader
from unimodals.common_models import GRU, MLP

# Support mosi/mosi_unaligned/mosei/mosei_unaligned
traindata, validdata, testdata = get_dataloader('../affect/processed/humor_data.pkl')
traindata, validdata, testdata = get_dataloader('/home/pliang/multibench/affect/processed/humor_data.pkl')

# humor 371 81 300
encoders = GRU(752, 1128, dropout=True, has_padding=True).cuda()
Expand All @@ -23,9 +23,9 @@
fusion = Concat().cuda()

# Support simple late_fusion and late_fusion with removing bias
train(encoders, fusion, head, traindata, validdata, 1000, True, True, \
train(encoders, fusion, head, traindata, validdata, 1000, is_packed=True, early_stop=True, \
task="classification", optimtype=torch.optim.AdamW, lr=1e-5, save='humor_lf_best.pt', \
weight_decay=0.01, criterion=torch.nn.MSELoss(), regularization=False)
weight_decay=0.01, objective=torch.nn.MSELoss())

print("Testing:")
model=torch.load('humor_lf_best.pt').cuda()
Expand Down
14 changes: 7 additions & 7 deletions examples/affect/affect_early_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,21 @@
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
import torch

os.environ['CUDA_VISIBLE_DEVICES']='1'
from fusions.common_fusions import ConcatEarly
from datasets.affect.get_data import get_dataloader
from unimodals.common_models import GRU, MLP
from unimodals.common_models import GRU, MLP,Sequential,Identity

from training_structures.Supervised_Learning import train, test

from private_test_scripts.all_in_one import all_in_one_train

# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
traindata, validdata, testdata = get_dataloader('/home/pliang/multibench/affect/processed/mosi_raw.pkl')
traindata, validdata, testdata = get_dataloader('/home/paul/MultiBench/mosi_raw.pkl')

# mosi/mosei
encoders = [GRU(409, 512, dropout=True, has_padding=True).cuda()]
head = MLP(512, 256, 1).cuda()
encoders = [Identity().cuda(),Identity().cuda(),Identity().cuda()]
head = Sequential(GRU(409,512,dropout=True,has_padding=True),MLP(512, 256, 1)).cuda()

# humor/sarcasm
# encoders = GRU(752, 1128, dropout=True, has_padding=True).cuda()
Expand All @@ -29,11 +29,11 @@


def trainprocess():
train(encoders, fusion, head, traindata, validdata, 1000, task="regression", optimtype=torch.optim.AdamW,
train(encoders, fusion, head, traindata, validdata, 1000, task="regression", optimtype=torch.optim.AdamW,is_packed=True,
lr=1e-4, save='mosi_ef_best.pt', weight_decay=0.01, objective=torch.nn.L1Loss())

all_in_one_train(trainprocess, all_modules)

print("Testing:")
model = torch.load('mosi_ef_best.pt').cuda()
test(model, testdata, True, torch.nn.L1Loss(), "regression")
test(model, testdata,'affect', is_packed=True, criterion=torch.nn.L1Loss(), task="posneg-classification")
6 changes: 3 additions & 3 deletions examples/affect/affect_late_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
traindata, validdata, test_robust = \
get_dataloader('/home/pliang/leslie/test/MultiBench/datasets/affect/mosi_raw.pkl')
get_dataloader('/home/paul/MultiBench/mosi_raw.pkl')

# mosi/mosei
encoders=[GRU(35,70,dropout=True,has_padding=True).cuda(), \
Expand All @@ -34,7 +34,7 @@
fusion = Concat().cuda()

def trainprocess():
train(encoders, fusion, head, traindata, validdata, 1000, task="regression", optimtype=torch.optim.AdamW, is_packed=True,
train(encoders, fusion, head, traindata, validdata, 200, task="regression", optimtype=torch.optim.AdamW, is_packed=True,
early_stop=True, lr=1e-4, save='mosi_lf_best.pt', weight_decay=0.01, objective=torch.nn.L1Loss())


Expand All @@ -43,7 +43,7 @@ def trainprocess():
print("Testing:")
model = torch.load('mosi_lf_best.pt').cuda()

test(model=model, test_dataloaders_all=test_robust, dataset='mosi', is_packed=True, criterion=torch.nn.L1Loss(), task='regression')
test(model=model, test_dataloaders_all=test_robust, dataset='mosi', is_packed=True, criterion=torch.nn.L1Loss(), task='posneg-classification')



40 changes: 0 additions & 40 deletions examples/affect/affect_mult.py

This file was deleted.

35 changes: 0 additions & 35 deletions examples/affect/humor_late_fusion.py

This file was deleted.

Binary file modified fusions/__pycache__/__init__.cpython-38.pyc
Binary file not shown.
Binary file modified fusions/__pycache__/common_fusions.cpython-38.pyc
Binary file not shown.
2 changes: 2 additions & 0 deletions objective_functions/objectives_for_supervised_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ def criterioning(pred,truth,criterion):
return criterion(pred,truth.long().cuda())
elif type(criterion)==torch.nn.modules.loss.BCEWithLogitsLoss or type(criterion)==torch.nn.MSELoss:
return criterion(pred,truth.float().cuda())
elif type(criterion)==torch.nn.L1Loss:
return criterion(pred,truth.float().cuda())

# objective for MFM
# ce_weight: weight of simple supervised loss
Expand Down
Loading

0 comments on commit 3e5085f

Please sign in to comment.