From e9e196f3c9d07c6ad342f0ebc841c5ddb2f9630b Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Tue, 20 Sep 2022 08:47:02 +0000 Subject: [PATCH 01/10] add custom dataset --- mmseg/datasets/__init__.py | 60 +++++++++++++++++++------------------- mmseg/datasets/face.py | 23 +++++++++++++++ 2 files changed, 53 insertions(+), 30 deletions(-) create mode 100755 mmseg/datasets/face.py diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index 5d42a11c26..9933feaf05 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -1,30 +1,30 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .ade import ADE20KDataset -from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset -from .chase_db1 import ChaseDB1Dataset -from .cityscapes import CityscapesDataset -from .coco_stuff import COCOStuffDataset -from .custom import CustomDataset -from .dark_zurich import DarkZurichDataset -from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset, - RepeatDataset) -from .drive import DRIVEDataset -from .hrf import HRFDataset -from .isaid import iSAIDDataset -from .isprs import ISPRSDataset -from .loveda import LoveDADataset -from .night_driving import NightDrivingDataset -from .pascal_context import PascalContextDataset, PascalContextDataset59 -from .potsdam import PotsdamDataset -from .stare import STAREDataset -from .voc import PascalVOCDataset - -__all__ = [ - 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', - 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', - 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', - 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', - 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', - 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset', - 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset' -] +# Copyright (c) OpenMMLab. All rights reserved. +from .ade import ADE20KDataset +from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset +from .chase_db1 import ChaseDB1Dataset +from .cityscapes import CityscapesDataset +from .coco_stuff import COCOStuffDataset +from .custom import CustomDataset +from .dark_zurich import DarkZurichDataset +from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset, + RepeatDataset) +from .drive import DRIVEDataset +from .hrf import HRFDataset +from .isaid import iSAIDDataset +from .isprs import ISPRSDataset +from .loveda import LoveDADataset +from .night_driving import NightDrivingDataset +from .pascal_context import PascalContextDataset, PascalContextDataset59 +from .potsdam import PotsdamDataset +from .stare import STAREDataset +from .voc import PascalVOCDataset +from .face import FaceOccluded + +__all__ = [ + 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', + 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', + 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', + 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', + 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', + 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset', + 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset',"FaceOccluded"] diff --git a/mmseg/datasets/face.py b/mmseg/datasets/face.py new file mode 100755 index 0000000000..cd0dcd1216 --- /dev/null +++ b/mmseg/datasets/face.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class FaceOccluded(CustomDataset): + """Face Occluded dataset. + + Args: + split (str): Split txt file for Pascal VOC. + """ + + CLASSES = ('background', 'face') + + PALETTE = [[0, 0, 0], [128, 0, 0]] + + def __init__(self, split, **kwargs): + super(FaceOccluded, self).__init__( + img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) + assert osp.exists(self.img_dir) and self.split is not None From 23c9fd73585e6505a3ce684d9622dafc1c3d6b4c Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Sun, 16 Oct 2022 05:14:49 +0000 Subject: [PATCH 02/10] add config file for occlusion face --- configs/_base_/datasets/occlude_face.py | 82 +++++++++++++++++++ ...3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py | 66 +++++++++++++++ 2 files changed, 148 insertions(+) create mode 100644 configs/_base_/datasets/occlude_face.py create mode 100644 configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py diff --git a/configs/_base_/datasets/occlude_face.py b/configs/_base_/datasets/occlude_face.py new file mode 100644 index 0000000000..c0ef4fbaa8 --- /dev/null +++ b/configs/_base_/datasets/occlude_face.py @@ -0,0 +1,82 @@ +dataset_type = 'FaceOccluded' +data_root = 'data/occlusion-aware-dataset' +crop_size = (512, 512) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(512, 512)), + dict(type='RandomFlip', prob=0.5), + dict(type='RandomRotate', degree=(-30, 30), prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(512, 512), + img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=True, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +dataset_train_A = dict( + type='FaceOccluded', + data_root=data_root, + img_dir='CelebAMask-HQ-original/image', + ann_dir='CelebAMask-HQ-original/mask_edited', + split='CelebAMask-HQ-original/split/train_ori.txt', + pipeline=train_pipeline) + +dataset_train_B = dict( + type='FaceOccluded', + data_root=data_root, + img_dir='NatOcc-SOT/image', + ann_dir='NatOcc-SOT/mask', + split='NatOcc-SOT/split/train.txt', + pipeline=train_pipeline) + + +dataset_valid = dict( + type='FaceOccluded', + data_root=data_root, + img_dir='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/image', + ann_dir='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/mask', + split='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/split/val.txt', + pipeline=test_pipeline) + +dataset_test = dict( + type='FaceOccluded', + data_root=data_root, + img_dir='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/image', + ann_dir='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/mask', + split='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/test.txt', + pipeline=test_pipeline) + +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=[ + dataset_train_A,dataset_train_B, + ], + val= dataset_valid, + test=dataset_test) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py b/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py new file mode 100644 index 0000000000..eb2c8e0073 --- /dev/null +++ b/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py @@ -0,0 +1,66 @@ +# + +_base_ = '../_base_/dataset/occlude_face.py' +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet101_v1c', + backbone=dict( + type='ResNetV1c', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=dict(type='SyncBN', requires_grad=True), + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='DepthwiseSeparableASPPHead', + in_channels=2048, + in_index=3, + channels=512, + dilations=(1, 12, 24, 36), + c1_in_channels=256, + c1_channels=48, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=2, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + train_cfg=dict(), + test_cfg=dict(mode='whole')) +log_config = dict( + interval=50, hooks=[dict(type='TextLoggerHook', by_epoch=False)]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +lr_config = dict(policy='poly', power=0.9, min_lr=0.0001, by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=30000) +checkpoint_config = dict(by_epoch=False, interval=400) +evaluation = dict( + interval=400, metric=['mIoU', 'mDice', 'mFscore'], pre_eval=True) + +work_dir = './work_dirs/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT' +gpu_ids = range(0, 2) +auto_resume = False From e1cc8008a0252cb4175e9133df5b4719269850d8 Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Sun, 16 Oct 2022 05:45:44 +0000 Subject: [PATCH 03/10] add face occlusion dataset --- docs/en/dataset_prepare.md | 56 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/docs/en/dataset_prepare.md b/docs/en/dataset_prepare.md index 4982ce1828..c671961445 100644 --- a/docs/en/dataset_prepare.md +++ b/docs/en/dataset_prepare.md @@ -1,3 +1,4 @@ + ## Prepare datasets It is recommended to symlink the dataset root to `$MMSEGMENTATION/data`. @@ -376,3 +377,58 @@ python tools/convert_datasets/isaid.py /path/to/iSAID ``` In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33978 images for training and 11644 images for validation. + + +### Delving into High-Quality Synthetic Face Occlusion Segmentation Datasets + +The dataset is generated by two techniques, Naturalistic occlusion generation, Random occlusion generation. you must install face-occlusion-generation and dataset. see more guide in https://github.com/kennyvoo/face-occlusion-generation.git + + +## Dataset Preparation + +Please download the masks from this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing) and the images from [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ), [11k Hands](https://sites.google.com/view/11khands) and [DTD](https://www.robots.ox.ac.uk/~vgg/data/dtd/). + +The extracted and upsampled COCO objects images and masks can be found in this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). + +Please extract CelebAMask-HQ and 11k Hands images based on the splits found in [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). + +**Dataset Organization:** + +```none + +├── dataset +│ ├── CelebAMask-HQ-WO-Train_img +│ │ ├── {image}.jpg +│ ├── CelebAMask-HQ-WO-Train_mask +│ │ ├── {mask}.png +│ ├── DTD +│ │ ├── images +│ │ │ ├── {classA} +│ │ │ │ ├── {image}.jpg +│ │ │ ├── {classB} +│ │ │ │ ├── {image}.jpg +│ ├── 11k-hands_img +│ │ ├── {image}.jpg +│ ├── 11k-hands_mask +│ │ ├── {mask}.png +│ ├── object_image_sr +│ │ ├── {image}.jpg +│ ├── object_image_x4 +│ │ ├── {mask}.png + +``` + +## Data Generation + +Example script to generate NatOcc dataset + +bash NatOcc.sh + +Example script to generate RandOcc dataset + +bash RandOcc.sh + + +```python + +``` From 1ca1780f5a79254920a073f55b5a3462b5857816 Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Mon, 17 Oct 2022 01:47:59 +0000 Subject: [PATCH 04/10] fix format --- configs/_base_/datasets/occlude_face.py | 28 ++++----- ...3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py | 2 +- mmseg/datasets/__init__.py | 60 +++++++++---------- mmseg/datasets/face.py | 46 +++++++------- 4 files changed, 68 insertions(+), 68 deletions(-) diff --git a/configs/_base_/datasets/occlude_face.py b/configs/_base_/datasets/occlude_face.py index c0ef4fbaa8..db6de35989 100644 --- a/configs/_base_/datasets/occlude_face.py +++ b/configs/_base_/datasets/occlude_face.py @@ -1,5 +1,5 @@ -dataset_type = 'FaceOccluded' -data_root = 'data/occlusion-aware-dataset' +dataset_type = 'FaceOccludedDataset' +data_root = 'data/occlusion-aware-face-dataset' crop_size = (512, 512) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) @@ -40,15 +40,15 @@ ] dataset_train_A = dict( - type='FaceOccluded', + type=dataset_type, data_root=data_root, img_dir='CelebAMask-HQ-original/image', ann_dir='CelebAMask-HQ-original/mask_edited', - split='CelebAMask-HQ-original/split/train_ori.txt', + split='CelebAMask-HQ-original/split/train.txt', pipeline=train_pipeline) dataset_train_B = dict( - type='FaceOccluded', + type=dataset_type, data_root=data_root, img_dir='NatOcc-SOT/image', ann_dir='NatOcc-SOT/mask', @@ -57,26 +57,26 @@ dataset_valid = dict( - type='FaceOccluded', + type=dataset_type, data_root=data_root, - img_dir='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/image', - ann_dir='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/mask', - split='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/split/val.txt', + img_dir='RealOcc/image', + ann_dir='RealOcc/mask', + split='RealOcc/split/val.txt', pipeline=test_pipeline) dataset_test = dict( - type='FaceOccluded', + type=dataset_type, data_root=data_root, - img_dir='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/image', - ann_dir='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/mask', - split='occlusion-aware-dataset/HQ-FO-dataset/RealOcc/test.txt', + img_dir='RealOcc/image', + ann_dir='RealOcc/mask', + split='RealOcc/test.txt', pipeline=test_pipeline) data = dict( samples_per_gpu=2, workers_per_gpu=2, train=[ - dataset_train_A,dataset_train_B, + dataset_train_A,dataset_train_B ], val= dataset_valid, test=dataset_test) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py b/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py index eb2c8e0073..7806d4dc21 100644 --- a/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py +++ b/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py @@ -1,5 +1,5 @@ # + -_base_ = '../_base_/dataset/occlude_face.py' +_base_ = '../_base_/datasets/occlude_face.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index 9933feaf05..3c5396fba4 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -1,30 +1,30 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .ade import ADE20KDataset -from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset -from .chase_db1 import ChaseDB1Dataset -from .cityscapes import CityscapesDataset -from .coco_stuff import COCOStuffDataset -from .custom import CustomDataset -from .dark_zurich import DarkZurichDataset -from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset, - RepeatDataset) -from .drive import DRIVEDataset -from .hrf import HRFDataset -from .isaid import iSAIDDataset -from .isprs import ISPRSDataset -from .loveda import LoveDADataset -from .night_driving import NightDrivingDataset -from .pascal_context import PascalContextDataset, PascalContextDataset59 -from .potsdam import PotsdamDataset -from .stare import STAREDataset -from .voc import PascalVOCDataset -from .face import FaceOccluded - -__all__ = [ - 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', - 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', - 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', - 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', - 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', - 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset', - 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset',"FaceOccluded"] +# Copyright (c) OpenMMLab. All rights reserved. +from .ade import ADE20KDataset +from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset +from .chase_db1 import ChaseDB1Dataset +from .cityscapes import CityscapesDataset +from .coco_stuff import COCOStuffDataset +from .custom import CustomDataset +from .dark_zurich import DarkZurichDataset +from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset, + RepeatDataset) +from .drive import DRIVEDataset +from .hrf import HRFDataset +from .isaid import iSAIDDataset +from .isprs import ISPRSDataset +from .loveda import LoveDADataset +from .night_driving import NightDrivingDataset +from .pascal_context import PascalContextDataset, PascalContextDataset59 +from .potsdam import PotsdamDataset +from .stare import STAREDataset +from .voc import PascalVOCDataset +from .face import FaceOccludedDataset + +__all__ = [ + 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', + 'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset', + 'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset', + 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', + 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', + 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset', + 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset', 'FaceOccludedDataset'] diff --git a/mmseg/datasets/face.py b/mmseg/datasets/face.py index cd0dcd1216..cbc2345b09 100755 --- a/mmseg/datasets/face.py +++ b/mmseg/datasets/face.py @@ -1,23 +1,23 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class FaceOccluded(CustomDataset): - """Face Occluded dataset. - - Args: - split (str): Split txt file for Pascal VOC. - """ - - CLASSES = ('background', 'face') - - PALETTE = [[0, 0, 0], [128, 0, 0]] - - def __init__(self, split, **kwargs): - super(FaceOccluded, self).__init__( - img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) - assert osp.exists(self.img_dir) and self.split is not None +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp + +from .builder import DATASETS +from .custom import CustomDataset + + +@DATASETS.register_module() +class FaceOccludedDataset(CustomDataset): + """Face Occluded dataset. + + Args: + split (str): Split txt file for Pascal VOC. + """ + + CLASSES = ('background', 'face') + + PALETTE = [[0, 0, 0], [128, 0, 0]] + + def __init__(self, split, **kwargs): + super(FaceOccludedDataset, self).__init__( + img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) + assert osp.exists(self.img_dir) and self.split is not None From 69b49cddbb641573a0af0f580086444030a0cd01 Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Mon, 17 Oct 2022 15:30:23 +0000 Subject: [PATCH 05/10] update prepare.md --- configs/_base_/datasets/occlude_face.py | 33 +++-- ...3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py | 2 - docs/en/dataset_prepare.md | 116 ++++++++++++++++-- 3 files changed, 125 insertions(+), 26 deletions(-) diff --git a/configs/_base_/datasets/occlude_face.py b/configs/_base_/datasets/occlude_face.py index db6de35989..efa90129fe 100644 --- a/configs/_base_/datasets/occlude_face.py +++ b/configs/_base_/datasets/occlude_face.py @@ -42,20 +42,28 @@ dataset_train_A = dict( type=dataset_type, data_root=data_root, - img_dir='CelebAMask-HQ-original/image', - ann_dir='CelebAMask-HQ-original/mask_edited', - split='CelebAMask-HQ-original/split/train.txt', + img_dir='NatOcc_hand_sot/img', + ann_dir='NatOcc_hand_sot/mask', + split='train.txt', pipeline=train_pipeline) dataset_train_B = dict( type=dataset_type, data_root=data_root, - img_dir='NatOcc-SOT/image', - ann_dir='NatOcc-SOT/mask', - split='NatOcc-SOT/split/train.txt', + img_dir='NatOcc_object/img', + ann_dir='NatOcc_object/mask', + split='train.txt', pipeline=train_pipeline) +dataset_train_C = dict( + type=dataset_type, + data_root=data_root, + img_dir='RandOcc/img', + ann_dir='RandOcc/mask', + split='train.txt', + pipeline=train_pipeline) + dataset_valid = dict( type=dataset_type, data_root=data_root, @@ -64,19 +72,10 @@ split='RealOcc/split/val.txt', pipeline=test_pipeline) -dataset_test = dict( - type=dataset_type, - data_root=data_root, - img_dir='RealOcc/image', - ann_dir='RealOcc/mask', - split='RealOcc/test.txt', - pipeline=test_pipeline) - data = dict( samples_per_gpu=2, workers_per_gpu=2, train=[ - dataset_train_A,dataset_train_B + dataset_train_A, dataset_train_B, dataset_train_C ], - val= dataset_valid, - test=dataset_test) + val= dataset_valid) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py b/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py index 7806d4dc21..ad7085a7d2 100644 --- a/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py +++ b/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py @@ -61,6 +61,4 @@ evaluation = dict( interval=400, metric=['mIoU', 'mDice', 'mFscore'], pre_eval=True) -work_dir = './work_dirs/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT' -gpu_ids = range(0, 2) auto_resume = False diff --git a/docs/en/dataset_prepare.md b/docs/en/dataset_prepare.md index c671961445..46317bf870 100644 --- a/docs/en/dataset_prepare.md +++ b/docs/en/dataset_prepare.md @@ -392,11 +392,58 @@ The extracted and upsampled COCO objects images and masks can be found in this [ Please extract CelebAMask-HQ and 11k Hands images based on the splits found in [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). +download file to ./data_materials +```none +CelebAMask-HQ.zip +CelebAMask-HQ-masks_corrected.7z +RealOcc.7z +RealOcc-Wild.7z +11k-hands_mask.7z +11k-hands_image.7z +coco_object.7z +dtd-r1.0.1.tar.gz +``` +--- + +```bash +apt-get install p7zip-full + +cd data_materials + +#extract celebAMask-HQ and split by train-set +unzip CelebAMask-HQ.zip +7za x CelebAMask-HQ-masks_corrected.7z -o./CelebAMask-HQ +#suggest better code if you have +rsync -a ./CelebAMask-HQ/CelebA-HQ-img/ --files-from=./CelebAMask-HQ-WO-train.txt ./CelebAMask-HQ-WO-Train_img +basename -s .jpg ./CelebAMask-HQ-train/* > train.txt +xargs -n 1 -i echo {}.png < train.txt > mask_train.txt +rsync -a ./CelebAMask-HQ/CelebAMask-HQ-masks_corrected/ --files-from=./mask_train.txt ./CelebAMask-HQ-WO-Train_mask +mv train.txt ../data/occlusion-aware-face-dataset + +#extact DTD +tar -zxvf dtd-r1.0.1.tar.gz +mv dtd DTD + +#extract hands dataset and split by 200 samples +7za x 11k-hands_masks.7z -o. +unzip Hands.zip +rsync -a ./Hands/ --files-from=./11k_hands_sample.txt ./11k-hands_img + +#extract upscaled coco object +7za x coco_object.7z -o. +mv coco_object/* . + +#extract validation set +7za x RealOcc.7z -o../data/occlusion-aware-face-dataset + +``` + + **Dataset Organization:** ```none -├── dataset +├── data_materials │ ├── CelebAMask-HQ-WO-Train_img │ │ ├── {image}.jpg │ ├── CelebAMask-HQ-WO-Train_mask @@ -413,20 +460,75 @@ Please extract CelebAMask-HQ and 11k Hands images based on the splits found in [ │ │ ├── {mask}.png │ ├── object_image_sr │ │ ├── {image}.jpg -│ ├── object_image_x4 +│ ├── object_mask_x4 │ │ ├── {mask}.png +├── data +│ ├── occlusion-aware-face-dataset +│ │ ├── train.txt +│ │ ├── NatOcc_hand_sot +│ │ │ ├── img +│ │ │ │ ├── {image}.jpg +│ │ │ ├── mask +│ │ │ │ ├── {mask}.png +│ │ ├── NatOcc_object +│ │ │ ├── img +│ │ │ │ ├── {image}.jpg +│ │ │ ├── mask +│ │ │ │ ├── {mask}.png +│ │ ├── RandOcc +│ │ │ ├── img +│ │ │ │ ├── {image}.jpg +│ │ │ ├── mask +│ │ │ │ ├── {mask}.png +│ │ ├── RandOcc +│ │ │ ├── img +│ │ │ │ ├── {image}.jpg +│ │ │ ├── mask +│ │ │ │ ├── {mask}.png +│ │ │ ├── split +│ │ │ │ ├── val.txt ``` ## Data Generation -Example script to generate NatOcc dataset - -bash NatOcc.sh - +```bash +git clone https://github.com/jinwonkim93/face-occlusion-generation.git +cd face_occlusion-generation +``` +Example script to generate NatOcc hand dataset + +```bash +CUDA_VISIBLE_DEVICES=0 NUM_WORKERS=4 python main.py \ +--config ./configs/natocc_hand.yaml \ +--opts OUTPUT_PATH "path/to/mmsegmentation/data/occlusion-aware-face-dataset/NatOcc_hand_sot"\ +AUGMENTATION.SOT True \ +SOURCE_DATASET.IMG_DIR "path/to/data_materials/CelebAMask-HQ-WO-Train_img" \ +SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_mask" \ +OCCLUDER_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/11k-hands_img" \ +OCCLUDER_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/11k-hands_masks" +``` +Example script to generate NatOcc object dataset + +```bash +CUDA_VISIBLE_DEVICES=0 NUM_WORKERS=4 python main.py \ +--config ./configs/natocc_objects.yaml \ +--opts OUTPUT_PATH "path/to/mmsegmentation/data/occlusion-aware-face-dataset/NatOcc_object" \ +SOURCE_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_img" \ +SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_mask" \ +OCCLUDER_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/object_image_sr" \ +OCCLUDER_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/object_mask_x4" +``` Example script to generate RandOcc dataset -bash RandOcc.sh +```bash +CUDA_VISIBLE_DEVICES=0 NUM_WORKERS=4 python main.py \ +--config ./configs/randocc.yaml \ +--opts OUTPUT_PATH "path/to/mmsegmentation/data/occlusion-aware-face-dataset/RandOcc" \ +SOURCE_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_img/" \ +SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_mask" \ +OCCLUDER_DATASET.IMG_DIR "path/to/jw93/mmsegmentation/data_materials/DTD/images" +``` ```python From e4a9dd790a8b540635345d4ca2f3c06fcdd9d66f Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Tue, 18 Oct 2022 00:45:58 +0900 Subject: [PATCH 06/10] formatting --- configs/_base_/datasets/occlude_face.py | 19 +++++------ docs/en/dataset_prepare.md | 43 +++++++++++++++++-------- 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/configs/_base_/datasets/occlude_face.py b/configs/_base_/datasets/occlude_face.py index efa90129fe..a15df84cf3 100644 --- a/configs/_base_/datasets/occlude_face.py +++ b/configs/_base_/datasets/occlude_face.py @@ -55,7 +55,6 @@ split='train.txt', pipeline=train_pipeline) - dataset_train_C = dict( type=dataset_type, data_root=data_root, @@ -65,17 +64,15 @@ pipeline=train_pipeline) dataset_valid = dict( - type=dataset_type, - data_root=data_root, - img_dir='RealOcc/image', - ann_dir='RealOcc/mask', - split='RealOcc/split/val.txt', - pipeline=test_pipeline) + type=dataset_type, + data_root=data_root, + img_dir='RealOcc/image', + ann_dir='RealOcc/mask', + split='RealOcc/split/val.txt', + pipeline=test_pipeline) data = dict( samples_per_gpu=2, workers_per_gpu=2, - train=[ - dataset_train_A, dataset_train_B, dataset_train_C - ], - val= dataset_valid) + train=[dataset_train_A, dataset_train_B, dataset_train_C], + val=dataset_valid) diff --git a/docs/en/dataset_prepare.md b/docs/en/dataset_prepare.md index 46317bf870..8c6a372259 100644 --- a/docs/en/dataset_prepare.md +++ b/docs/en/dataset_prepare.md @@ -1,4 +1,5 @@ + ## Prepare datasets It is recommended to symlink the dataset root to `$MMSEGMENTATION/data`. @@ -139,6 +140,21 @@ mmsegmentation │ │ ├── ann_dir │ │ │ ├── train │ │ │ ├── val +│ ├── occlusion-aware-face-dataset +│ │ ├── train.txt +│ │ ├── NatOcc_hand_sot +│ │ │ ├── img +│ │ │ ├── mask +│ │ ├── NatOcc_object +│ │ │ ├── img +│ │ │ ├── mask +│ │ ├── RandOcc +│ │ │ ├── img +│ │ │ ├── mask +│ │ ├── RealOcc +│ │ │ ├── img +│ │ │ ├── mask +│ │ │ ├── split ``` ### Cityscapes @@ -378,21 +394,20 @@ python tools/convert_datasets/isaid.py /path/to/iSAID In our default setting (`patch_width`=896, `patch_height`=896, `overlap_area`=384), it will generate 33978 images for training and 11644 images for validation. - ### Delving into High-Quality Synthetic Face Occlusion Segmentation Datasets The dataset is generated by two techniques, Naturalistic occlusion generation, Random occlusion generation. you must install face-occlusion-generation and dataset. see more guide in https://github.com/kennyvoo/face-occlusion-generation.git - ## Dataset Preparation -Please download the masks from this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing) and the images from [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ), [11k Hands](https://sites.google.com/view/11khands) and [DTD](https://www.robots.ox.ac.uk/~vgg/data/dtd/). +Please download the masks from this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing) and the images from [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ), [11k Hands](https://sites.google.com/view/11khands) and [DTD](https://www.robots.ox.ac.uk/~vgg/data/dtd/). The extracted and upsampled COCO objects images and masks can be found in this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). -Please extract CelebAMask-HQ and 11k Hands images based on the splits found in [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). +Please extract CelebAMask-HQ and 11k Hands images based on the splits found in [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). download file to ./data_materials + ```none CelebAMask-HQ.zip CelebAMask-HQ-masks_corrected.7z @@ -403,7 +418,8 @@ RealOcc-Wild.7z coco_object.7z dtd-r1.0.1.tar.gz ``` ---- + +______________________________________________________________________ ```bash apt-get install p7zip-full @@ -420,7 +436,7 @@ xargs -n 1 -i echo {}.png < train.txt > mask_train.txt rsync -a ./CelebAMask-HQ/CelebAMask-HQ-masks_corrected/ --files-from=./mask_train.txt ./CelebAMask-HQ-WO-Train_mask mv train.txt ../data/occlusion-aware-face-dataset -#extact DTD +#extract DTD tar -zxvf dtd-r1.0.1.tar.gz mv dtd DTD @@ -438,7 +454,6 @@ mv coco_object/* . ``` - **Dataset Organization:** ```none @@ -481,7 +496,7 @@ mv coco_object/* . │ │ │ │ ├── {image}.jpg │ │ │ ├── mask │ │ │ │ ├── {mask}.png -│ │ ├── RandOcc +│ │ ├── RealOcc │ │ │ ├── img │ │ │ │ ├── {image}.jpg │ │ │ ├── mask @@ -496,7 +511,8 @@ mv coco_object/* . git clone https://github.com/jinwonkim93/face-occlusion-generation.git cd face_occlusion-generation ``` -Example script to generate NatOcc hand dataset + +Example script to generate NatOcc hand dataset ```bash CUDA_VISIBLE_DEVICES=0 NUM_WORKERS=4 python main.py \ @@ -508,7 +524,8 @@ SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO- OCCLUDER_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/11k-hands_img" \ OCCLUDER_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/11k-hands_masks" ``` -Example script to generate NatOcc object dataset + +Example script to generate NatOcc object dataset ```bash CUDA_VISIBLE_DEVICES=0 NUM_WORKERS=4 python main.py \ @@ -519,6 +536,7 @@ SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO- OCCLUDER_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/object_image_sr" \ OCCLUDER_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/object_mask_x4" ``` + Example script to generate RandOcc dataset ```bash @@ -529,8 +547,5 @@ SOURCE_DATASET.IMG_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-T SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO-Train_mask" \ OCCLUDER_DATASET.IMG_DIR "path/to/jw93/mmsegmentation/data_materials/DTD/images" ``` - -```python - -``` + From 1fc898c7df7ed82393532e82453005a04b7301d7 Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Tue, 18 Oct 2022 08:49:07 +0900 Subject: [PATCH 07/10] formatting --- .../deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py | 1 - mmseg/datasets/__init__.py | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py b/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py index ad7085a7d2..c94385c7e4 100644 --- a/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py +++ b/configs/deeplabv3plus/deeplabv3plus_r101_512x512_C-CM+C-WO-NatOcc-SOT.py @@ -60,5 +60,4 @@ checkpoint_config = dict(by_epoch=False, interval=400) evaluation = dict( interval=400, metric=['mIoU', 'mDice', 'mFscore'], pre_eval=True) - auto_resume = False diff --git a/mmseg/datasets/__init__.py b/mmseg/datasets/__init__.py index 3c5396fba4..9060564c0d 100644 --- a/mmseg/datasets/__init__.py +++ b/mmseg/datasets/__init__.py @@ -9,6 +9,7 @@ from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset, RepeatDataset) from .drive import DRIVEDataset +from .face import FaceOccludedDataset from .hrf import HRFDataset from .isaid import iSAIDDataset from .isprs import ISPRSDataset @@ -18,7 +19,6 @@ from .potsdam import PotsdamDataset from .stare import STAREDataset from .voc import PascalVOCDataset -from .face import FaceOccludedDataset __all__ = [ 'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', @@ -27,4 +27,5 @@ 'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset', 'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset', 'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset', - 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset', 'FaceOccludedDataset'] + 'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset', 'FaceOccludedDataset' +] From 2aaa757960c38358f695b66fcbd00bd7806089a5 Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Tue, 1 Nov 2022 13:38:51 +0000 Subject: [PATCH 08/10] fix typo error for doc --- docs/en/dataset_prepare.md | 72 +++++++++++++++++++++++--------------- 1 file changed, 44 insertions(+), 28 deletions(-) diff --git a/docs/en/dataset_prepare.md b/docs/en/dataset_prepare.md index 8c6a372259..eca248db1a 100644 --- a/docs/en/dataset_prepare.md +++ b/docs/en/dataset_prepare.md @@ -406,6 +406,7 @@ The extracted and upsampled COCO objects images and masks can be found in this [ Please extract CelebAMask-HQ and 11k Hands images based on the splits found in [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). +mkdir data_materials download file to ./data_materials ```none @@ -426,13 +427,19 @@ apt-get install p7zip-full cd data_materials +#make occlusion-aware-face-dataset folder +mkdir path-to-mmsegmentaion/data/occlusion-aware-face-dataset + #extract celebAMask-HQ and split by train-set unzip CelebAMask-HQ.zip 7za x CelebAMask-HQ-masks_corrected.7z -o./CelebAMask-HQ -#suggest better code if you have +#copy training data to train-image-folder rsync -a ./CelebAMask-HQ/CelebA-HQ-img/ --files-from=./CelebAMask-HQ-WO-train.txt ./CelebAMask-HQ-WO-Train_img -basename -s .jpg ./CelebAMask-HQ-train/* > train.txt +#create a file-name txt file for copying mask +basename -s .jpg ./CelebAMask-HQ-WO-Train_img/* > train.txt +#add .png to file-name txt file xargs -n 1 -i echo {}.png < train.txt > mask_train.txt +#copy training data to train-mask-folder rsync -a ./CelebAMask-HQ/CelebAMask-HQ-masks_corrected/ --files-from=./mask_train.txt ./CelebAMask-HQ-WO-Train_mask mv train.txt ../data/occlusion-aware-face-dataset @@ -454,7 +461,7 @@ mv coco_object/* . ``` -**Dataset Organization:** +**Dataset material Organization:** ```none @@ -478,31 +485,6 @@ mv coco_object/* . │ ├── object_mask_x4 │ │ ├── {mask}.png -├── data -│ ├── occlusion-aware-face-dataset -│ │ ├── train.txt -│ │ ├── NatOcc_hand_sot -│ │ │ ├── img -│ │ │ │ ├── {image}.jpg -│ │ │ ├── mask -│ │ │ │ ├── {mask}.png -│ │ ├── NatOcc_object -│ │ │ ├── img -│ │ │ │ ├── {image}.jpg -│ │ │ ├── mask -│ │ │ │ ├── {mask}.png -│ │ ├── RandOcc -│ │ │ ├── img -│ │ │ │ ├── {image}.jpg -│ │ │ ├── mask -│ │ │ │ ├── {mask}.png -│ │ ├── RealOcc -│ │ │ ├── img -│ │ │ │ ├── {image}.jpg -│ │ │ ├── mask -│ │ │ │ ├── {mask}.png -│ │ │ ├── split -│ │ │ │ ├── val.txt ``` ## Data Generation @@ -548,4 +530,38 @@ SOURCE_DATASET.MASK_DIR "path/to/mmsegmentation/data_materials/CelebAMask-HQ-WO- OCCLUDER_DATASET.IMG_DIR "path/to/jw93/mmsegmentation/data_materials/DTD/images" ``` +**Dataset Organization:** + +```none +├── data +│ ├── occlusion-aware-face-dataset +│ │ ├── train.txt +│ │ ├── NatOcc_hand_sot +│ │ │ ├── img +│ │ │ │ ├── {image}.jpg +│ │ │ ├── mask +│ │ │ │ ├── {mask}.png +│ │ ├── NatOcc_object +│ │ │ ├── img +│ │ │ │ ├── {image}.jpg +│ │ │ ├── mask +│ │ │ │ ├── {mask}.png +│ │ ├── RandOcc +│ │ │ ├── img +│ │ │ │ ├── {image}.jpg +│ │ │ ├── mask +│ │ │ │ ├── {mask}.png +│ │ ├── RealOcc +│ │ │ ├── img +│ │ │ │ ├── {image}.jpg +│ │ │ ├── mask +│ │ │ │ ├── {mask}.png +│ │ │ ├── split +│ │ │ │ ├── val.txt +``` + + +```python + +``` From dd59be35833accfc0ae1d8bed872475ae733e1ed Mon Sep 17 00:00:00 2001 From: jinwonkim93 Date: Wed, 2 Nov 2022 05:02:39 +0000 Subject: [PATCH 09/10] update downloading process --- docs/en/dataset_prepare.md | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/docs/en/dataset_prepare.md b/docs/en/dataset_prepare.md index eca248db1a..2340dcffc9 100644 --- a/docs/en/dataset_prepare.md +++ b/docs/en/dataset_prepare.md @@ -400,22 +400,37 @@ The dataset is generated by two techniques, Naturalistic occlusion generation, R ## Dataset Preparation -Please download the masks from this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing) and the images from [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ), [11k Hands](https://sites.google.com/view/11khands) and [DTD](https://www.robots.ox.ac.uk/~vgg/data/dtd/). +step 1 -The extracted and upsampled COCO objects images and masks can be found in this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). - -Please extract CelebAMask-HQ and 11k Hands images based on the splits found in [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). +Create a folder for data generation materials on mmsegmentation folder. +```shell mkdir data_materials +``` + +step 2 + +Please download the masks (11k-hands_mask.7z,CelebAMask-HQ-masks_corrected.7z) from this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing) + +Please download the images from [CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ), [11k Hands.zip](https://sites.google.com/view/11khands) and [dtd-r1.0.1.tar.gz](https://www.robots.ox.ac.uk/~vgg/data/dtd/). + +step 3 + +Download a upsampled COCO objects images and masks (coco_object.7z). files can be found in this [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). + +Download CelebAMask-HQ and 11k Hands images split txt files. (11k_hands_sample.txt, CelebAMask-HQ-WO-train.txt) found in [drive](https://drive.google.com/drive/folders/15nZETWlGMdcKY6aHbchRsWkUI42KTNs5?usp=sharing). + download file to ./data_materials ```none CelebAMask-HQ.zip CelebAMask-HQ-masks_corrected.7z +CelebAMask-HQ-WO-train.txt RealOcc.7z RealOcc-Wild.7z 11k-hands_mask.7z -11k-hands_image.7z +11k Hands.zip +11k_hands_sample.txt coco_object.7z dtd-r1.0.1.tar.gz ``` From 70b2853c62a1b6aa87efaf6eb120dde355deb678 Mon Sep 17 00:00:00 2001 From: whooray Date: Thu, 10 Nov 2022 22:30:12 +0900 Subject: [PATCH 10/10] Update dataset_prepare.md PR fix version to original repository. change to original repository. --- docs/en/dataset_prepare.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/dataset_prepare.md b/docs/en/dataset_prepare.md index 2340dcffc9..a4878d0d83 100644 --- a/docs/en/dataset_prepare.md +++ b/docs/en/dataset_prepare.md @@ -505,7 +505,7 @@ mv coco_object/* . ## Data Generation ```bash -git clone https://github.com/jinwonkim93/face-occlusion-generation.git +git clone https://github.com/kennyvoo/face-occlusion-generation.git cd face_occlusion-generation ```