Skip to content

Commit

Permalink
add
Browse files Browse the repository at this point in the history
  • Loading branch information
WAMAWAMA committed Nov 7, 2022
1 parent 413808e commit ad02226
Show file tree
Hide file tree
Showing 131 changed files with 10,281 additions and 13 deletions.
88 changes: 76 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,24 +21,58 @@ pip install git+https://github.com/WAMAWAMA/wama_modules.git

Or you can directly copy the *wama_modules* folder to use


- 💧1.2 [`segmentation_models_pytorch`](https://github.com/qubvel/segmentation_models.pytorch) (*Optional*)

*segmentation_models_pytorch* (called *smp*)
<details>
<summary> Introduction and installation command </summary>

`segmentation_models_pytorch` (called *smp*)
is a 2D CNN lib includes many backbones and decoders, which is highly recommended to install for cooperating with this library.
Install *smp* use ↓

Install with pip:
```
pip install segmentation-models-pytorch
```
pip install git+https://github.com/qubvel/segmentation_models.pytorch
Install the latest version:
```
pip install git+https://github.com/rwightman/pytorch-image-models.git
```

</details>

- 💧1.3 [`transformers`](https://github.com/huggingface/transformers) (*Optional*)

*transformer* is a lib includes abundant Transformer structures, which is highly recommended to install for cooperating with this library.

<details>
<summary> Introduction and installation command </summary>

`transformer` is a lib includes abundant Transformer structures, which is highly recommended to install for cooperating with this library.
Install *transformer* use ↓
```
pip install transformers
```


</details>

- 💧1.4 [`timm`](https://github.com/rwightman/pytorch-image-models) (*Optional*)

<details>
<summary> Introduction and installation command </summary>

`timm`*` is a lib includes abundant CNN and Transformer structures, which is highly recommended to install for cooperating with this library.
Install *transformer* use ↓

Install with pip:
```
pip install timm
```
Install the latest version:
```
pip install git+https://github.com/rwightman/pytorch-image-models.git
```
</details>


## 2. Update list
- 2022/11/5: Open the source code, version `v0.0.1-beta`
- ...
Expand Down Expand Up @@ -81,7 +115,7 @@ input = torch.ones([3,3,128,128])



Here are more demos shown below ↓ (Click to view codes), or you can visit the `demo` folder for more demo codes
Here are more demos shown below ↓ (Click to view codes, or visit the `demo` folder)



Expand Down Expand Up @@ -339,7 +373,37 @@ input = torch.ones([3,3,128,128])


## 5. Guideline 2: Use pretrained weights
How to use pretrained weights?

All pretrained weights are from third-party codes or repos

current pretrained support: (这里给一个表格,来自哪里,多少权重,预训练数据类型,2D还是3D))
- 2D: smp, timm, radimagenet...
- 3D: medicalnet, 3D resnet, 3D densenet...


### 5.1 smp encoders `2D`
???

### 5.2 timm encoders `2D`
???

### 5.2 radimagenet `2D` `medical image`
???


### 5.3 ??? `3D` `video`
???

### 5.3 ??? `3D` `video`
???

### 5.3 ??? `3D` `video`
???

### 5.3 ??? `3D` `medical image`
???





Expand Down Expand Up @@ -536,12 +600,12 @@ _ = [print(i.shape) for i in f_listB]
</details>


## 7. Acknowledgment
Thanks to ......
## 7. Acknowledgment 🥰
Thanks to these authors and their codes:
1) https://github.com/ZhugeKongan/torch-template-for-deep-learning
2) pytorch vit
3) smp
3) SMP: https://github.com/qubvel/segmentation_models.pytorch
4) transformers
5) medicalnet
6)
6) timm: https://github.com/rwightman/pytorch-image-models

24 changes: 24 additions & 0 deletions tmp.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,27 @@
print(1)
print(1)
print(1)

import torch
from wama_modules.thirdparty_lib.ResNets3D_kenshohara.models.resnet import generate_model
m = generate_model(18, n_classes = 1039)
m.load_state_dict(torch.load(r"D:\pretrainedweights\kenshohara_ResNets3D\weights\r3d18_KM_200ep.pth", map_location='cpu')['state_dict'])




import torch
from wama_modules.thirdparty_lib.SMP_qubvel.encoders import get_encoder
m = get_encoder('resnet18', in_channels=3, depth=5, weights='ssl')
m = get_encoder('name', in_channels=3, depth=5, weights='ssl')
m = get_encoder('resnet18', in_channels=3, depth=5, weights='ss')
f_list = m(torch.ones([2,3,128,128]))
_ = [print(i.shape) for i in f_list]

import timm
m = timm.create_model(
'adv_inception_v3',
features_only=True,
pretrained=False,)
f_list = m(torch.ones([2,3,128,128]))
_ = [print(i.shape) for i in f_list]
2 changes: 1 addition & 1 deletion wama_modules/Decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def forward(self, f_list):
# try this https://blog.csdn.net/m0_51436734/article/details/124073901


# NestedUNet
# NestedUNet(Unet++)



Expand Down
Empty file.
153 changes: 153 additions & 0 deletions wama_modules/thirdparty_lib/C3D_jfzhang95/c3d.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
import torch
import torch.nn as nn
from mypath import Path

class C3D(nn.Module):
"""
The C3D network.
"""

def __init__(self, num_classes, pretrained=False):
super(C3D, self).__init__()

self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))

self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))

self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))

self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))

self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1))

self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, num_classes)

self.dropout = nn.Dropout(p=0.5)

self.relu = nn.ReLU()

self.__init_weight()

if pretrained:
self.__load_pretrained_weights()

def forward(self, x):

x = self.relu(self.conv1(x))
x = self.pool1(x)

x = self.relu(self.conv2(x))
x = self.pool2(x)

x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)

x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)

x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)

x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)

logits = self.fc8(x)

return logits

def __load_pretrained_weights(self):
"""Initialiaze network."""
corresp_name = {
# Conv1
"features.0.weight": "conv1.weight",
"features.0.bias": "conv1.bias",
# Conv2
"features.3.weight": "conv2.weight",
"features.3.bias": "conv2.bias",
# Conv3a
"features.6.weight": "conv3a.weight",
"features.6.bias": "conv3a.bias",
# Conv3b
"features.8.weight": "conv3b.weight",
"features.8.bias": "conv3b.bias",
# Conv4a
"features.11.weight": "conv4a.weight",
"features.11.bias": "conv4a.bias",
# Conv4b
"features.13.weight": "conv4b.weight",
"features.13.bias": "conv4b.bias",
# Conv5a
"features.16.weight": "conv5a.weight",
"features.16.bias": "conv5a.bias",
# Conv5b
"features.18.weight": "conv5b.weight",
"features.18.bias": "conv5b.bias",
# fc6
"classifier.0.weight": "fc6.weight",
"classifier.0.bias": "fc6.bias",
# fc7
"classifier.3.weight": "fc7.weight",
"classifier.3.bias": "fc7.bias",
}

p_dict = torch.load(Path.model_dir())
s_dict = self.state_dict()
for name in p_dict:
if name not in corresp_name:
continue
s_dict[corresp_name[name]] = p_dict[name]
self.load_state_dict(s_dict)

def __init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()

def get_1x_lr_params(model):
"""
This generator returns all the parameters for conv and two fc layers of the net.
"""
b = [model.conv1, model.conv2, model.conv3a, model.conv3b, model.conv4a, model.conv4b,
model.conv5a, model.conv5b, model.fc6, model.fc7]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
yield k

def get_10x_lr_params(model):
"""
This generator returns all the parameters for the last fc layer of the net.
"""
b = [model.fc8]
for j in range(len(b)):
for k in b[j].parameters():
if k.requires_grad:
yield k

if __name__ == "__main__":
inputs = torch.rand(1, 3, 16, 112, 112)
net = C3D(num_classes=101, pretrained=True)

outputs = net.forward(inputs)
print(outputs.size())
Empty file.
72 changes: 72 additions & 0 deletions wama_modules/thirdparty_lib/C3D_yyuanad/c3d.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# coding: utf-8

import torch.nn as nn


class C3D(nn.Module):
"""
nb_classes: nb_classes in classification task, 101 for UCF101 dataset
"""

def __init__(self, nb_classes):
super(C3D, self).__init__()

self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))

self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))

self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))

self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))

self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))
self.pool5 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1))

self.fc6 = nn.Linear(8192, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, nb_classes)

self.dropout = nn.Dropout(p=0.5)

self.relu = nn.ReLU()

def forward(self, x, feature_layer):
h = self.relu(self.conv1(x))
h = self.pool1(h)
h = self.relu(self.conv2(h))
h = self.pool2(h)

h = self.relu(self.conv3a(h))
h = self.relu(self.conv3b(h))
h = self.pool3(h)

h = self.relu(self.conv4a(h))
h = self.relu(self.conv4b(h))
h = self.pool4(h)

h = self.relu(self.conv5a(h))
h = self.relu(self.conv5b(h))
h = self.pool5(h)

h = h.view(-1, 8192)
out = h if feature_layer == 5 else None
h = self.relu(self.fc6(h))
out = h if feature_layer == 6 and out == None else out
h = self.dropout(h)
h = self.relu(self.fc7(h))
out = h if feature_layer == 7 and out == None else out
h = self.dropout(h)
logits = self.fc8(h)
return logits, out





Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
extends:
- "@commitlint/config-conventional"
Loading

0 comments on commit ad02226

Please sign in to comment.