GlobalAvgPool
Global average poolingGlobalMaxPool
Global maximum poolingGlobalMaxAvgPool
GlobalMaxAvgPool = (GlobalAvgPool + GlobalMaxPool) / 2.
Click here to see demo code
""" demo """
# import libs
import torch
from wama_modules.BaseModule import GlobalAvgPool, GlobalMaxPool, GlobalMaxAvgPool
# make tensor
inputs1D = torch.ones([3,12,13]) # 1D
inputs2D = torch.ones([3,12,13,13]) # 2D
inputs3D = torch.ones([3,12,13,13,13]) # 3D
# build layer
GAP = GlobalAvgPool()
GMP = GlobalMaxPool()
GAMP = GlobalMaxAvgPool()
# test GAP & GMP & GAMP
print(inputs1D.shape, GAP(inputs1D).shape)
print(inputs2D.shape, GAP(inputs2D).shape)
print(inputs3D.shape, GAP(inputs3D).shape)
print(inputs1D.shape, GMP(inputs1D).shape)
print(inputs2D.shape, GMP(inputs2D).shape)
print(inputs3D.shape, GMP(inputs3D).shape)
print(inputs1D.shape, GAMP(inputs1D).shape)
print(inputs2D.shape, GAMP(inputs2D).shape)
print(inputs3D.shape, GAMP(inputs3D).shape)
customLayerNorm
a custom implementation of layer normalizationMakeNorm
make normalization layer, includes BN / GN / IN / LNMakeActive
make activation layer, includes Relu / LeakyReluMakeConv
make 1D / 2D / 3D convolutional layer
Click here to see demo code
""" demo """
ConvNormActive
'Convolution→Normalization→Activation', used in VGG or ResNetNormActiveConv
'Normalization→Activation→Convolution', used in DenseNetVGGBlock
the basic module in VGGVGGStage
a VGGStage = few VGGBlocksResBlock
the basic module in ResNetResStage
a ResStage = few ResBlocksDenseLayer
the basic module in DenseNetDenseBlock
a DenseBlock = few DenseLayers
Click here to see demo code
""" demo """
resizeTensor
scale torch tensor, similar to scipy's zoomtensor2array
transform tensor to ndarrayload_weights
load torch weights and print loading details(miss keys and match keys)
Click here to see demo code
""" demo """
SCSEModule
NonLocal
Click here to see demo code
""" demo """
VGGEncoder
ResNetEncoder
DenseNetEncoder
???
Click here to see demo code
""" demo """
UNet_decoder
Click here to see demo code
""" demo """
FPN
Click here to see demo code
""" demo """
import torch
from wama_modules.Neck import FPN
# make multi-scale feature maps
featuremaps = [
torch.ones([3,16,32,32,32]),
torch.ones([3,32,24,24,24]),
torch.ones([3,64,16,16,16]),
torch.ones([3,128,8,8,8]),
]
# build FPN
fpn_AddSmall2Big = FPN(in_channels_list=[16, 32, 64, 128],
c1=128,
c2=256,
active='relu',
norm='bn',
gn_c=8,
mode='AddSmall2Big',
dim=3,)
fpn_AddBig2Small = FPN(in_channels_list=[16, 32, 64, 128],
c1=128,
c2=256,
active='relu',
norm='bn',
gn_c=8,
mode='AddBig2Small', # Add big size feature to small size feature, for classification
dim=3,)
# forward
f_listA = fpn_AddSmall2Big(featuremaps)
f_listB = fpn_AddBig2Small(featuremaps)
_ = [print(i.shape) for i in featuremaps]
_ = [print(i.shape) for i in f_listA]
_ = [print(i.shape) for i in f_listB]
FeedForward
MultiHeadAttention
TransformerEncoderLayer
TransformerDecoderLayer
Click here to see demo code
""" demo """