Skip to content

Commit

Permalink
[Feature] Support Delving into High-Quality Synthetic Face Occlusion …
Browse files Browse the repository at this point in the history
…Segmentation Datasets (open-mmlab#2194)

add custom dataset

add face occlusion dataset

add config file for occlusion face

fix format

update prepare.md

formatting

formatting

fix typo error for doc

update downloading process

Update dataset_prepare.md

PR fix version to original repository. change to original repository.
  • Loading branch information
huajiangjiangLi committed Apr 11, 2023
1 parent 820f2ca commit e697cea
Show file tree
Hide file tree
Showing 4 changed files with 166 additions and 1 deletion.
78 changes: 78 additions & 0 deletions configs/_base_/datasets/occlude_face.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
dataset_type = 'FaceOccludedDataset'
data_root = 'data/occlusion-aware-face-dataset'
crop_size = (512, 512)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(512, 512)),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomRotate', degree=(-30, 30), prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='ResizeToMultiple', size_divisor=32),
dict(type='RandomFlip'),
dict(
type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]

dataset_train_A = dict(
type=dataset_type,
data_root=data_root,
img_dir='NatOcc_hand_sot/img',
ann_dir='NatOcc_hand_sot/mask',
split='train.txt',
pipeline=train_pipeline)

dataset_train_B = dict(
type=dataset_type,
data_root=data_root,
img_dir='NatOcc_object/img',
ann_dir='NatOcc_object/mask',
split='train.txt',
pipeline=train_pipeline)

dataset_train_C = dict(
type=dataset_type,
data_root=data_root,
img_dir='RandOcc/img',
ann_dir='RandOcc/mask',
split='train.txt',
pipeline=train_pipeline)

dataset_valid = dict(
type=dataset_type,
data_root=data_root,
img_dir='RealOcc/image',
ann_dir='RealOcc/mask',
split='RealOcc/split/val.txt',
pipeline=test_pipeline)

data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=[dataset_train_A, dataset_train_B, dataset_train_C],
val=dataset_valid)
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# +
_base_ = '../_base_/datasets/occlude_face.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab:https://resnet101_v1c',
backbone=dict(
type='ResNetV1c',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='DepthwiseSeparableASPPHead',
in_channels=2048,
in_index=3,
channels=512,
dilations=(1, 12, 24, 36),
c1_in_channels=256,
c1_channels=48,
dropout_ratio=0.1,
num_classes=2,
norm_cfg=dict(type='SyncBN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=2,
norm_cfg=dict(type='SyncBN', requires_grad=True),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
train_cfg=dict(),
test_cfg=dict(mode='whole'))
log_config = dict(
interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
lr_config = dict(policy='poly', power=0.9, min_lr=0.0001, by_epoch=False)
runner = dict(type='IterBasedRunner', max_iters=30000)
checkpoint_config = dict(by_epoch=False, interval=400)
evaluation = dict(
interval=400, metric=['mIoU', 'mDice', 'mFscore'], pre_eval=True)
auto_resume = False
3 changes: 2 additions & 1 deletion mmseg/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from .dataset_wrappers import MultiImageMixDataset
from .decathlon import DecathlonDataset
from .drive import DRIVEDataset
from .face import FaceOccludedDataset
from .hrf import HRFDataset
from .isaid import iSAIDDataset
from .isprs import ISPRSDataset
Expand Down Expand Up @@ -51,5 +52,5 @@
'BioMedicalGaussianNoise', 'BioMedicalGaussianBlur',
'BioMedicalRandomGamma', 'BioMedical3DPad', 'RandomRotFlip',
'SynapseDataset', 'REFUGEDataset', 'MapillaryDataset_v1',
'MapillaryDataset_v2'
'MapillaryDataset_v2', 'FaceOccludedDataset'
]
23 changes: 23 additions & 0 deletions mmseg/datasets/face.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp

from .builder import DATASETS
from .custom import CustomDataset


@DATASETS.register_module()
class FaceOccludedDataset(CustomDataset):
"""Face Occluded dataset.
Args:
split (str): Split txt file for Pascal VOC.
"""

CLASSES = ('background', 'face')

PALETTE = [[0, 0, 0], [128, 0, 0]]

def __init__(self, split, **kwargs):
super(FaceOccludedDataset, self).__init__(
img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs)
assert osp.exists(self.img_dir) and self.split is not None

0 comments on commit e697cea

Please sign in to comment.