From cb7933cf82e1d12394a6757b9b526e829a256257 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 11 Nov 2020 20:57:09 +0800 Subject: [PATCH 001/210] update `category` to `class` --- dygraph/paddleseg/core/val.py | 10 +++++----- dygraph/paddleseg/utils/metrics.py | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/dygraph/paddleseg/core/val.py b/dygraph/paddleseg/core/val.py index 07952d22ee..d7f605def8 100644 --- a/dygraph/paddleseg/core/val.py +++ b/dygraph/paddleseg/core/val.py @@ -132,13 +132,13 @@ def evaluate(model, eval_dataset=None, iter_id=None, num_workers=0): if local_rank == 0: progbar_val.update(iter + 1) - category_iou, miou = metrics.mean_iou(intersect_area_all, pred_area_all, - label_area_all) - category_acc, acc = metrics.accuracy(intersect_area_all, pred_area_all) + class_iou, miou = metrics.mean_iou(intersect_area_all, pred_area_all, + label_area_all) + class_acc, acc = metrics.accuracy(intersect_area_all, pred_area_all) kappa = metrics.kappa(intersect_area_all, pred_area_all, label_area_all) logger.info("[EVAL] #Images={} mIoU={:.4f} Acc={:.4f} Kappa={:.4f} ".format( len(eval_dataset), miou, acc, kappa)) - logger.info("[EVAL] Category IoU: \n" + str(np.round(category_iou, 4))) - logger.info("[EVAL] Category Acc: \n" + str(np.round(category_acc, 4))) + logger.info("[EVAL] Class IoU: \n" + str(np.round(class_iou, 4))) + logger.info("[EVAL] Class Acc: \n" + str(np.round(class_acc, 4))) return miou, acc diff --git a/dygraph/paddleseg/utils/metrics.py b/dygraph/paddleseg/utils/metrics.py index 7106d8e3c0..ad5b3c9758 100644 --- a/dygraph/paddleseg/utils/metrics.py +++ b/dygraph/paddleseg/utils/metrics.py @@ -88,15 +88,15 @@ def mean_iou(intersect_area, pred_area, label_area): pred_area = pred_area.numpy() label_area = label_area.numpy() union = pred_area + label_area - intersect_area - category_iou = [] + class_iou = [] for i in range(len(intersect_area)): if union[i] == 0: iou = 0 else: iou = intersect_area[i] / union[i] - category_iou.append(iou) - miou = np.mean(category_iou) - return np.array(category_iou), miou + class_iou.append(iou) + miou = np.mean(class_iou) + return np.array(class_iou), miou def accuracy(intersect_area, pred_area): @@ -113,15 +113,15 @@ def accuracy(intersect_area, pred_area): """ intersect_area = intersect_area.numpy() pred_area = pred_area.numpy() - category_acc = [] + class_acc = [] for i in range(len(intersect_area)): if pred_area[i] == 0: acc = 0 else: acc = intersect_area[i] / pred_area[i] - category_acc.append(acc) + class_acc.append(acc) macc = np.sum(intersect_area) / np.sum(pred_area) - return np.array(category_acc), macc + return np.array(class_acc), macc def kappa(intersect_area, pred_area, label_area): From ed0e1a3052e4960debe2c26d51b81107b1400e23 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 13 Nov 2020 10:41:40 +0800 Subject: [PATCH 002/210] add voc and ade20k config --- dygraph/configs/_base_/ade20k.yml | 6 ++- dygraph/configs/_base_/pascal_voc12.yml | 47 +++++++++++++++++++ dygraph/configs/_base_/pascal_voc12aug.yml | 41 +--------------- .../fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml | 14 ++++++ 4 files changed, 67 insertions(+), 41 deletions(-) create mode 100644 dygraph/configs/_base_/pascal_voc12.yml create mode 100644 dygraph/configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/_base_/ade20k.yml b/dygraph/configs/_base_/ade20k.yml index e5ad008d66..115a9371ba 100644 --- a/dygraph/configs/_base_/ade20k.yml +++ b/dygraph/configs/_base_/ade20k.yml @@ -1,5 +1,5 @@ batch_size: 4 -iters: 160000 +iters: 80000 train_dataset: type: ADE20K @@ -12,6 +12,10 @@ train_dataset: - type: RandomPaddingCrop crop_size: [512, 512] - type: RandomHorizontalFlip + - type: RandomDistort + brightness_range: 0.4 + contrast_range: 0.4 + saturation_range: 0.4 - type: Normalize mode: train diff --git a/dygraph/configs/_base_/pascal_voc12.yml b/dygraph/configs/_base_/pascal_voc12.yml new file mode 100644 index 0000000000..d307a77721 --- /dev/null +++ b/dygraph/configs/_base_/pascal_voc12.yml @@ -0,0 +1,47 @@ +batch_size: 4 +iters: 40000 + +train_dataset: + type: PascalVOC + dataset_root: data/VOCdevkit/ + transforms: + - type: ResizeStepScaling + min_scale_factor: 0.5 + max_scale_factor: 2.0 + scale_step_size: 0.25 + - type: RandomPaddingCrop + crop_size: [512, 512] + - type: RandomHorizontalFlip + - type: RandomDistort + brightness_range: 0.4 + contrast_range: 0.4 + saturation_range: 0.4 + - type: Normalize + mode: train + +val_dataset: + type: PascalVOC + dataset_root: data/VOCdevkit/ + transforms: + - type: Padding + target_size: [512, 512] + - type: Normalize + mode: val + + +optimizer: + type: sgd + momentum: 0.9 + weight_decay: 4.0e-5 + +learning_rate: + value: 0.01 + decay: + type: poly + power: 0.9 + end_lr: 0.0 + +loss: + types: + - type: CrossEntropyLoss + coef: [1] diff --git a/dygraph/configs/_base_/pascal_voc12aug.yml b/dygraph/configs/_base_/pascal_voc12aug.yml index d85700b02e..66fec2a5e2 100644 --- a/dygraph/configs/_base_/pascal_voc12aug.yml +++ b/dygraph/configs/_base_/pascal_voc12aug.yml @@ -1,43 +1,4 @@ -batch_size: 4 -iters: 40000 +_base_: './pascal_voc12.yml' train_dataset: - type: PascalVOC - dataset_root: data/VOCdevkit/ - transforms: - - type: ResizeStepScaling - min_scale_factor: 0.5 - max_scale_factor: 2.0 - scale_step_size: 0.25 - - type: RandomPaddingCrop - crop_size: [512, 512] - - type: RandomHorizontalFlip - - type: Normalize mode: trainaug - -val_dataset: - type: PascalVOC - dataset_root: data/VOCdevkit/ - transforms: - - type: Padding - target_size: [512, 512] - - type: Normalize - mode: val - - -optimizer: - type: sgd - momentum: 0.9 - weight_decay: 4.0e-5 - -learning_rate: - value: 0.01 - decay: - type: poly - power: 0.9 - end_lr: 0.0 - -loss: - types: - - type: CrossEntropyLoss - coef: [1] diff --git a/dygraph/configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml b/dygraph/configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml new file mode 100644 index 0000000000..4003814df2 --- /dev/null +++ b/dygraph/configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml @@ -0,0 +1,14 @@ +_base_: '../_base_/pascal_voc12aug.yml' + +model: + type: FCN + backbone: + type: HRNet_W18 + align_corners: False + pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz + num_classes: 21 + pretrained: Null + backbone_indices: [-1] + +optimizer: + weight_decay: 0.0005 From c6a0e903f4592eccf0789c2868b9bb7ea883a84e Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Fri, 13 Nov 2020 10:44:02 +0800 Subject: [PATCH 003/210] add ms_flip and sliding window (#576) --- dygraph/configs/ann/README.md | 8 +- dygraph/configs/deeplabv3/README.md | 8 +- dygraph/configs/deeplabv3p/README.md | 8 +- ...p_resnet101_os8_cityscapes_769x769_80k.yml | 18 ++ dygraph/configs/fastscnn/README.md | 6 +- dygraph/configs/fcn/README.md | 8 +- dygraph/configs/gcnet/README.md | 8 +- dygraph/configs/ocrnet/README.md | 8 +- dygraph/configs/pspnet/README.md | 8 +- dygraph/configs/unet/README.md | 6 +- dygraph/paddleseg/core/__init__.py | 3 +- dygraph/paddleseg/core/infer.py | 233 ++++++++++++++++++ dygraph/paddleseg/core/predict.py | 60 +++-- dygraph/paddleseg/core/train.py | 2 +- dygraph/paddleseg/core/val.py | 79 +++--- dygraph/predict.py | 63 ++++- dygraph/val.py | 60 ++++- 17 files changed, 481 insertions(+), 105 deletions(-) create mode 100644 dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml create mode 100644 dygraph/paddleseg/core/infer.py diff --git a/dygraph/configs/ann/README.md b/dygraph/configs/ann/README.md index 847d03e631..959e7d2b70 100644 --- a/dygraph/configs/ann/README.md +++ b/dygraph/configs/ann/README.md @@ -8,7 +8,7 @@ ### Cityscapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|ANN|ResNet50_OS8|1024x512|80000|79.09%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ann_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ann_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=b849c8e06b6ccd33514d436635b9e102)| -|ANN|ResNet101_OS8|1024x512|80000|80.61%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ann_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ann_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=ed1cb9321385f1480dda418db71bd4c0)| +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|ANN|ResNet50_OS8|1024x512|80000|79.09%|79.31%|79.56%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ann_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ann_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=b849c8e06b6ccd33514d436635b9e102)| +|ANN|ResNet101_OS8|1024x512|80000|80.61%|80.98%|81.25%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ann_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/ann_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=ed1cb9321385f1480dda418db71bd4c0)| diff --git a/dygraph/configs/deeplabv3/README.md b/dygraph/configs/deeplabv3/README.md index 52eead50fb..2949ea40a7 100644 --- a/dygraph/configs/deeplabv3/README.md +++ b/dygraph/configs/deeplabv3/README.md @@ -8,7 +8,7 @@ ### Cityscapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|DeepLabV3|ResNet50_OS8|1024x512|80000|79.90%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=7e30d1cb34cd94400e1e1266538dfb6c)| -|DeepLabV3|ResNet101_OS8|1024x512|80000|80.85%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=1ff25b7f3c5e88a051b9dd273625f942)| +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|DeepLabV3|ResNet50_OS8|1024x512|80000|79.90%|80.22%|80.47%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=7e30d1cb34cd94400e1e1266538dfb6c)| +|DeepLabV3|ResNet101_OS8|1024x512|80000|80.85%|81.09%|81.54%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=1ff25b7f3c5e88a051b9dd273625f942)| diff --git a/dygraph/configs/deeplabv3p/README.md b/dygraph/configs/deeplabv3p/README.md index 65b3467890..13f03c59ae 100644 --- a/dygraph/configs/deeplabv3p/README.md +++ b/dygraph/configs/deeplabv3p/README.md @@ -8,6 +8,8 @@ ### Cityscapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|DeepLabV3P|ResNet50_OS8|1024x512|80000|80.36%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=860bd0049ba5495d629a96d5aaf1bf75)| +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|DeepLabV3P|ResNet50_OS8|1024x512|80000|80.36%|80.57%|80.81%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=860bd0049ba5495d629a96d5aaf1bf75)| +|DeepLabV3P|ResNet101_OS8|1024x512|80000|81.10%|81.38%|81.24%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3p_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3p_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=8b11e75b8977a0fd74180145350c27de)| +|DeepLabV3P|ResNet101_OS8|769x769|80000|81.53%|81.88%|82.12%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3p_resnet101_os8_cityscapes_769x769_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/deeplabv3p_resnet101_os8_cityscapes_769x769_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=420039406361cbc3cf7ec14c1084d886)| diff --git a/dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml b/dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml new file mode 100644 index 0000000000..752a3d926f --- /dev/null +++ b/dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml @@ -0,0 +1,18 @@ +_base_: '../_base_/cityscapes_769x769.yml' + +batch_size: 2 +iters: 80000 + +model: + type: DeepLabV3P + backbone: + type: ResNet101_vd + output_stride: 8 + multi_grid: [1, 2, 4] + pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz + num_classes: 19 + backbone_indices: [0, 3] + aspp_ratios: [1, 12, 24, 36] + aspp_out_channels: 256 + align_corners: True + pretrained: null diff --git a/dygraph/configs/fastscnn/README.md b/dygraph/configs/fastscnn/README.md index 6ded1f0ce5..541fea2a88 100644 --- a/dygraph/configs/fastscnn/README.md +++ b/dygraph/configs/fastscnn/README.md @@ -8,6 +8,6 @@ ### Cityscapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|Fast SCNN|-|1024x1024|160000|69.31%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fastscnn_cityscapes_1024x1024_160k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fastscnn_cityscapes_1024x1024_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=3b4c3f01c9213cac14e53c69d262a337)| +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|Fast SCNN|-|1024x1024|160000|69.31%|-|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fastscnn_cityscapes_1024x1024_160k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fastscnn_cityscapes_1024x1024_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=3b4c3f01c9213cac14e53c69d262a337)| diff --git a/dygraph/configs/fcn/README.md b/dygraph/configs/fcn/README.md index bcacfdca18..8fbe94e976 100644 --- a/dygraph/configs/fcn/README.md +++ b/dygraph/configs/fcn/README.md @@ -7,7 +7,7 @@ ### Cityscapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|FCN|HRNet_W18|1024x512|80000|80.34%|-|[model](https://paddleseg.bj.bcebos.com/dygraph/fcn_hrnetw18_cityscapes_1024x512_80k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/fcn_hrnetw18_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=141ed1c7aa77474ec2a2d063713570f9)| -|FCN|HRNet_W48|1024x512|80000|81.17%|-|[model](https://paddleseg.bj.bcebos.com/dygraph/fcn_hrnetw48_cityscapes_1024x512_80k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/fcn_hrnetw48_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=6f219d4b9bab266385ab6023ea097aa6)| +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|FCN|HRNet_W18|1024x512|80000|78.97%|79.49%|79.74%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw18_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw18_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=bebec8e1a3802c4babd3c69e1bf50d51)| +|FCN|HRNet_W48|1024x512|80000|80.70%|81.24%|81.56%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw48_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw48_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=ae1cb76014cdc54406c36f1e3dc2a530)| diff --git a/dygraph/configs/gcnet/README.md b/dygraph/configs/gcnet/README.md index f49ce03442..06899ed741 100644 --- a/dygraph/configs/gcnet/README.md +++ b/dygraph/configs/gcnet/README.md @@ -8,7 +8,7 @@ ### Cityscapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|GCNet|ResNet50_OS8|1024x512|80000|79.50%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=e3801edb9a6f5b33eb890f5a1ae6ed7b)| -|GCNet|ResNet101_OS8|1024x512|80000|81.01%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://www.paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=aa88e7980f4d6839537662a3a3d18851)| +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|GCNet|ResNet50_OS8|1024x512|80000|79.50%|79.77%|79.69%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=e3801edb9a6f5b33eb890f5a1ae6ed7b)| +|GCNet|ResNet101_OS8|1024x512|80000|81.01%|81.30%|81.64%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://www.paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=aa88e7980f4d6839537662a3a3d18851)| diff --git a/dygraph/configs/ocrnet/README.md b/dygraph/configs/ocrnet/README.md index 8710390c48..51a224c616 100644 --- a/dygraph/configs/ocrnet/README.md +++ b/dygraph/configs/ocrnet/README.md @@ -8,7 +8,7 @@ ### CityScapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|-|-|-|-|-|-|-| -|OCRNet|HRNet_w18|1024x512|160000|80.67%||[model](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw18_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw18_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=901a5d0a78b71ca56f06002f05547837)| -|OCRNet|HRNet_w48|1024x512|160000|82.15%||[model](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw48_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw48_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=176bf6ca4d89957ffe62ac7c30fcd039) | +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|OCRNet|HRNet_w18|1024x512|160000|80.67%|81.21%|81.30|[model](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw18_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw18_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=901a5d0a78b71ca56f06002f05547837)| +|OCRNet|HRNet_w48|1024x512|160000|82.15%|82.59%|82.85|[model](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw48_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw48_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=176bf6ca4d89957ffe62ac7c30fcd039) | diff --git a/dygraph/configs/pspnet/README.md b/dygraph/configs/pspnet/README.md index ac27272939..d6a4651985 100644 --- a/dygraph/configs/pspnet/README.md +++ b/dygraph/configs/pspnet/README.md @@ -8,7 +8,7 @@ ### Cityscapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|PSPNet|ResNet50_OS8|1024x512|80000|78.83%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/pspnet_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/pspnet_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=2758d49b826d614abc53fb79562ebd10)| -|PSPNet|ResNet101_OS8|1024x512|80000|80.48%|-|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/pspnet_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/pspnet_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=899c080f0c38e0f5481e0dd28038bb6f)| +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|PSPNet|ResNet50_OS8|1024x512|80000|78.83%|79.03%|79.32%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/pspnet_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/pspnet_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=2758d49b826d614abc53fb79562ebd10)| +|PSPNet|ResNet101_OS8|1024x512|80000|80.48%|80.74%|81.04%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/pspnet_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/pspnet_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=899c080f0c38e0f5481e0dd28038bb6f)| diff --git a/dygraph/configs/unet/README.md b/dygraph/configs/unet/README.md index f8d7adf2c5..84c9091e52 100644 --- a/dygraph/configs/unet/README.md +++ b/dygraph/configs/unet/README.md @@ -7,6 +7,6 @@ ### Cityscapes -| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (multi-scale) | Links | -|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|UNet|-|1024x512|160000|62.20%|-|[model](https://paddleseg.bj.bcebos.com/dygraph/unet_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/unet_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=cbf444781f46612a30dbab5efc4d6715)| +| Model | Backbone | Resolution | Training Iters | Batch Size | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|UNet|-|1024x512|160000|4|65.00%|66.02%|66.89%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/unet_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/unet_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=67b3338de34ad09f0cb5e7c6856305cc)| diff --git a/dygraph/paddleseg/core/__init__.py b/dygraph/paddleseg/core/__init__.py index c6c1775d6d..35189064a6 100644 --- a/dygraph/paddleseg/core/__init__.py +++ b/dygraph/paddleseg/core/__init__.py @@ -15,5 +15,6 @@ from .train import train from .val import evaluate from .predict import predict +from . import infer -__all__ = ['train', 'evaluate', 'infer'] +__all__ = ['train', 'evaluate', 'predict'] diff --git a/dygraph/paddleseg/core/infer.py b/dygraph/paddleseg/core/infer.py new file mode 100644 index 0000000000..816106a6e9 --- /dev/null +++ b/dygraph/paddleseg/core/infer.py @@ -0,0 +1,233 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from itertools import combinations + +import numpy as np +import paddle +import paddle.nn.functional as F + + +def get_reverse_list(ori_shape, transforms): + """ + get reverse list of transform. + + Args: + ori_shape (list): Origin shape of image. + transforms (list): List of transform. + + Returns: + list: List of tuple, there are two format: + ('resize', (h, w)) The image shape before resize, + ('padding', (h, w)) The image shape before padding. + """ + reverse_list = [] + h, w = ori_shape[0], ori_shape[1] + for op in transforms: + if op.__class__.__name__ in ['Resize', 'ResizeByLong']: + reverse_list.append(('resize', (h, w))) + h, w = op.target_size[0], op.target_size[1] + if op.__class__.__name__ in ['Padding']: + reverse_list.append(('padding', (h, w))) + w, h = op.target_size[0], op.target_size[1] + return reverse_list + + +def reverse_transform(pred, ori_shape, transforms): + """recover pred to origin shape""" + reverse_list = get_reverse_list(ori_shape, transforms) + for item in reverse_list[::-1]: + if item[0] == 'resize': + h, w = item[1][0], item[1][1] + pred = F.interpolate(pred, (h, w), mode='nearest') + elif item[0] == 'padding': + h, w = item[1][0], item[1][1] + pred = pred[:, :, 0:h, 0:w] + else: + raise Exception("Unexpected info '{}' in im_info".format(item[0])) + return pred + + +def flip_combination(flip_horizontal=False, flip_vertical=False): + """ + Get flip combination. + + Args: + flip_horizontal (bool): Whether to flip horizontally. Default: False. + flip_vertical (bool): Whether to flip vertically. Default: False. + + Returns: + list: List of tuple. The first element of tuple is whether to flip horizontally, + and the second is whether to flip vertically. + """ + + flip_comb = [(False, False)] + if flip_horizontal: + flip_comb.append((True, False)) + if flip_vertical: + flip_comb.append((False, True)) + if flip_horizontal: + flip_comb.append((True, True)) + return flip_comb + + +def tensor_flip(x, flip): + """Flip tensor according directions""" + if flip[0]: + x = x[:, :, :, ::-1] + if flip[1]: + x = x[:, :, ::-1, :] + return x + + +def slide_inference(model, im, crop_size, stride): + """ + Infer by sliding window. + + Args: + model (paddle.nn.Layer): model to get logits of image. + im (Tensor): the input image. + crop_size (tuple|list). The size of sliding window, (w, h). + stride (tuple|list). The size of stride, (w, h). + + Return: + Tensor: The logit of input image. + """ + h_im, w_im = im.shape[-2:] + w_crop, h_crop = crop_size + w_stride, h_stride = stride + # calculate the crop nums + rows = np.int(np.ceil(1.0 * (h_im - h_crop) / h_stride)) + 1 + cols = np.int(np.ceil(1.0 * (w_im - w_crop) / w_stride)) + 1 + # TODO 'Tensor' object does not support item assignment. If support, use tensor to calculation. + final_logit = None + count = np.zeros([1, 1, h_im, w_im]) + for r in range(rows): + for c in range(cols): + h1 = r * h_stride + w1 = c * w_stride + h2 = min(h1 + h_crop, h_im) + w2 = min(w1 + w_crop, w_im) + h1 = max(h2 - h_crop, 0) + w1 = max(w2 - w_crop, 0) + im_crop = im[:, :, h1:h2, w1:w2] + im_pad = F.pad(im_crop, [0, w_crop, 0, h_crop]) + logit = model(im_crop)[0].numpy() + if final_logit is None: + final_logit = np.zeros([1, logit.shape[1], h_im, w_im]) + final_logit[:, :, h1:h2, w1:w2] += logit[:, :, :h2 - h1, :w2 - w1] + count[:, :, h1:h2, w1:w2] += 1 + if np.sum(count == 0) != 0: + raise RuntimeError( + 'There are pixel not predicted. It is possible that stride is greater than crop_size' + ) + final_logit = final_logit / count + final_logit = paddle.to_tensor(final_logit) + return final_logit + + +def inference(model, + im, + ori_shape=None, + transforms=None, + is_slide=False, + stride=None, + crop_size=None): + """ + Inference for image. + + Args: + model (paddle.nn.Layer): model to get logits of image. + im (Tensor): the input image. + ori_shape (list): Origin shape of image. + transforms (list): Transforms for image. + is_slide (bool): Whether to infer by sliding window. Default: False. + crop_size (tuple|list). The size of sliding window, (w, h). It should be probided if is_slide is True. + stride (tuple|list). The size of stride, (w, h). It should be probided if is_slide is True. + + Returns: + Tensor: If ori_shape is not None, a prediction with shape (1, 1, h, w) is returned. + If ori_shape is None, a logit with shape (1, num_classes, h, w) is returned. + """ + if not is_slide: + logits = model(im) + logit = logits[0] + else: + logit = slide_inference(model, im, crop_size=crop_size, stride=stride) + if ori_shape is not None: + pred = paddle.argmax(logit, axis=1, keepdim=True, dtype='int32') + pred = reverse_transform(pred, ori_shape, transforms) + return pred + else: + return logit + + +def aug_inference(model, + im, + ori_shape, + transforms, + scales=1.0, + flip_horizontal=False, + flip_vertical=False, + is_slide=False, + stride=None, + crop_size=None): + """ + Infer with augmentation. + + Args: + model (paddle.nn.Layer): model to get logits of image. + im (Tensor): the input image. + ori_shape (list): Origin shape of image. + transforms (list): Transforms for image. + scales (float|tuple|list): Scales for resize. Default: 1. + flip_horizontal (bool): Whether to flip horizontally. Default: False. + flip_vertical (bool): Whether to flip vertically. Default: False. + is_slide (bool): Whether to infer by sliding wimdow. Default: False. + crop_size (tuple|list). The size of sliding window, (w, h). It should be probided if is_slide is True. + stride (tuple|list). The size of stride, (w, h). It should be probided if is_slide is True. + + Returns: + Tensor: Prediction of image with shape (1, 1, h, w) is returned. + """ + if isinstance(scales, float): + scales = [scales] + elif not isinstance(scales, (tuple, list)): + raise TypeError( + '`scales` expects float/tuple/list type, but received {}'.format( + type(scales))) + final_logit = 0 + h_input, w_input = im.shape[-2], im.shape[-1] + flip_comb = flip_combination(flip_horizontal, flip_vertical) + for scale in scales: + h = int(h_input * scale + 0.5) + w = int(w_input * scale + 0.5) + im = F.interpolate(im, (h, w), mode='bilinear') + for flip in flip_comb: + im_flip = tensor_flip(im, flip) + logit = inference( + model, + im_flip, + is_slide=is_slide, + crop_size=crop_size, + stride=stride) + logit = tensor_flip(logit, flip) + logit = F.interpolate(logit, (h_input, w_input), mode='bilinear') + + logit = F.softmax(logit, axis=1) + final_logit = final_logit + logit + + pred = paddle.argmax(final_logit, axis=1, keepdim=True, dtype='int32') + pred = reverse_transform(pred, ori_shape, transforms) + return pred diff --git a/dygraph/paddleseg/core/predict.py b/dygraph/paddleseg/core/predict.py index 4fc38e8161..0aa424a37f 100644 --- a/dygraph/paddleseg/core/predict.py +++ b/dygraph/paddleseg/core/predict.py @@ -17,10 +17,10 @@ import cv2 import numpy as np import paddle -import tqdm from paddleseg import utils -import paddleseg.utils.logger as logger +from paddleseg.core import infer +from paddleseg.utils import logger, progbar def mkdir(path): @@ -34,7 +34,14 @@ def predict(model, transforms, image_list, image_dir=None, - save_dir='output'): + save_dir='output', + aug_pred=False, + scales=1.0, + flip_horizontal=True, + flip_vertical=False, + is_slide=False, + stride=None, + crop_size=None): """ predict and visualize the image_list. @@ -55,24 +62,37 @@ def predict(model, pred_saved_dir = os.path.join(save_dir, 'pseudo_color_prediction') logger.info("Start to predict...") - for im_path in tqdm.tqdm(image_list): - im, im_info, _ = transforms(im_path) + progbar_pred = progbar.Progbar(target=len(image_list), verbose=1) + for i, im_path in enumerate(image_list): + im = cv2.imread(im_path) + ori_shape = im.shape[:2] + im, _ = transforms(im) im = im[np.newaxis, ...] im = paddle.to_tensor(im) - logits = model(im) - pred = paddle.argmax(logits[0], axis=1) - pred = pred.numpy() - pred = np.squeeze(pred).astype('uint8') - for info in im_info[::-1]: - if info[0] == 'resize': - h, w = info[1][0], info[1][1] - pred = cv2.resize(pred, (w, h), cv2.INTER_NEAREST) - elif info[0] == 'padding': - h, w = info[1][0], info[1][1] - pred = pred[0:h, 0:w] - else: - raise ValueError("Unexpected info '{}' in im_info".format( - info[0])) + + if aug_pred: + pred = infer.aug_inference( + model, + im, + ori_shape=ori_shape, + transforms=transforms.transforms, + scales=scales, + flip_horizontal=flip_horizontal, + flip_vertical=flip_vertical, + is_slide=is_slide, + stride=stride, + crop_size=crop_size) + else: + pred = infer.inference( + model, + im, + ori_shape=ori_shape, + transforms=transforms.transforms, + is_slide=is_slide, + stride=stride, + crop_size=crop_size) + pred = paddle.squeeze(pred) + pred = pred.numpy().astype('uint8') # get the saved name if image_dir is not None: @@ -93,3 +113,5 @@ def predict(model, pred_saved_path = os.path.join(pred_saved_dir, im_file) mkdir(pred_saved_path) cv2.imwrite(pred_saved_path, pred_im) + + progbar_pred.update(i + 1) diff --git a/dygraph/paddleseg/core/train.py b/dygraph/paddleseg/core/train.py index beb8161415..f0b4179abe 100644 --- a/dygraph/paddleseg/core/train.py +++ b/dygraph/paddleseg/core/train.py @@ -152,7 +152,7 @@ def train(model, if (iter % save_interval == 0 or iter == iters) and (val_dataset is not None): mean_iou, acc = evaluate( - model, val_dataset, iter_id=iter, num_workers=num_workers) + model, val_dataset, num_workers=num_workers) model.train() if (iter % save_interval == 0 or iter == iters) and local_rank == 0: diff --git a/dygraph/paddleseg/core/val.py b/dygraph/paddleseg/core/val.py index d7f605def8..5455038912 100644 --- a/dygraph/paddleseg/core/val.py +++ b/dygraph/paddleseg/core/val.py @@ -19,51 +19,21 @@ import paddle.nn.functional as F from paddleseg.utils import metrics, Timer, calculate_eta, logger, progbar +from paddleseg.core import infer np.set_printoptions(suppress=True) -def get_reverse_list(ori_label, transforms): - """ - get reverse list of transform. - - Args: - ori_label (Tensor): Origin label - transforms (List): List of transform. - - Returns: - list: List of tuple, there are two format: - ('resize', (h, w)) The image shape before resize, - ('padding', (h, w)) The image shape before padding. - """ - reverse_list = [] - h, w = ori_label.shape[-2], ori_label.shape[-1] - for op in transforms: - if op.__class__.__name__ in ['Resize', 'ResizeByLong']: - reverse_list.append(('resize', (h, w))) - h, w = op.target_size[0], op.target_size[1] - if op.__class__.__name__ in ['Padding']: - reverse_list.append(('padding', (h, w))) - w, h = op.target_size[0], op.target_size[1] - return reverse_list - - -def reverse_transform(pred, ori_label, transforms): - """recover pred to origin shape""" - reverse_list = get_reverse_list(ori_label, transforms) - for item in reverse_list[::-1]: - if item[0] == 'resize': - h, w = item[1][0], item[1][1] - pred = F.interpolate(pred, (h, w), mode='nearest') - elif item[0] == 'padding': - h, w = item[1][0], item[1][1] - pred = pred[:, :, 0:h, 0:w] - else: - raise Exception("Unexpected info '{}' in im_info".format(item[0])) - return pred - - -def evaluate(model, eval_dataset=None, iter_id=None, num_workers=0): +def evaluate(model, + eval_dataset, + aug_eval=False, + scales=1.0, + flip_horizontal=True, + flip_vertical=False, + is_slide=False, + stride=None, + crop_size=None, + num_workers=0): model.eval() nranks = paddle.distributed.ParallelEnv().nranks local_rank = paddle.distributed.ParallelEnv().local_rank @@ -92,11 +62,28 @@ def evaluate(model, eval_dataset=None, iter_id=None, num_workers=0): for iter, (im, label) in enumerate(loader): label = label.astype('int64') - logits = model(im) - pred = logits[0] - pred = paddle.argmax(pred, axis=1, keepdim=True, dtype='int32') - pred = reverse_transform(pred, label, - eval_dataset.transforms.transforms) + ori_shape = label.shape[-2:] + if aug_eval: + pred = infer.aug_inference( + model, + im, + ori_shape=ori_shape, + transforms=eval_dataset.transforms.transforms, + scales=scales, + flip_horizontal=flip_horizontal, + flip_vertical=flip_vertical, + is_slide=is_slide, + stride=stride, + crop_size=crop_size) + else: + pred = infer.inference( + model, + im, + ori_shape=ori_shape, + transforms=eval_dataset.transforms.transforms, + is_slide=is_slide, + stride=stride, + crop_size=crop_size) intersect_area, pred_area, label_area = metrics.calculate_area( pred, diff --git a/dygraph/predict.py b/dygraph/predict.py index 792f6275d5..d262f04ad2 100644 --- a/dygraph/predict.py +++ b/dygraph/predict.py @@ -23,15 +23,15 @@ def parse_args(): - parser = argparse.ArgumentParser(description='Model evaluation') + parser = argparse.ArgumentParser(description='Model prediction') - # params of evaluate + # params of prediction parser.add_argument( "--config", dest="cfg", help="The config file.", default=None, type=str) parser.add_argument( '--model_path', dest='model_path', - help='The path of model for evaluation', + help='The path of model for prediction', type=str, default=None) parser.add_argument( @@ -48,6 +48,53 @@ def parse_args(): type=str, default='./output/result') + # augment for prediction + parser.add_argument( + '--aug_pred', + dest='aug_pred', + help='Whether to use mulit-scales and flip augment for prediction', + action='store_true') + parser.add_argument( + '--scales', + dest='scales', + nargs='+', + help='Scales for augment', + type=float, + default=1.0) + parser.add_argument( + '--flip_horizontal', + dest='flip_horizontal', + help='Whether to use flip horizontally augment', + action='store_true') + parser.add_argument( + '--flip_vertical', + dest='flip_vertical', + help='Whether to use flip vertically augment', + action='store_true') + + # sliding window prediction + parser.add_argument( + '--is_slide', + dest='is_slide', + help='Whether to prediction by sliding window', + action='store_true') + parser.add_argument( + '--crop_size', + dest='crop_size', + nargs=2, + help= + 'The crop size of sliding window, the first is width and the second is height.', + type=int, + default=None) + parser.add_argument( + '--stride', + dest='stride', + nargs=2, + help= + 'The stride of sliding window, the first is width and the second is height.', + type=int, + default=None) + return parser.parse_args() @@ -109,7 +156,15 @@ def main(args): transforms=transforms, image_list=image_list, image_dir=image_dir, - save_dir=args.save_dir) + save_dir=args.save_dir, + aug_pred=args.aug_pred, + scales=args.scales, + flip_horizontal=args.flip_horizontal, + flip_vertical=args.flip_vertical, + is_slide=args.is_slide, + crop_size=args.crop_size, + stride=args.stride, + ) if __name__ == '__main__': diff --git a/dygraph/val.py b/dygraph/val.py index a8a934dc53..9f02a0f52e 100644 --- a/dygraph/val.py +++ b/dygraph/val.py @@ -41,6 +41,53 @@ def parse_args(): type=int, default=0) + # augment for evaluation + parser.add_argument( + '--aug_eval', + dest='aug_eval', + help='Whether to use mulit-scales and flip augment for evaluation', + action='store_true') + parser.add_argument( + '--scales', + dest='scales', + nargs='+', + help='Scales for augment', + type=float, + default=1.0) + parser.add_argument( + '--flip_horizontal', + dest='flip_horizontal', + help='Whether to use flip horizontally augment', + action='store_true') + parser.add_argument( + '--flip_vertical', + dest='flip_vertical', + help='Whether to use flip vertically augment', + action='store_true') + + # sliding window evaluation + parser.add_argument( + '--is_slide', + dest='is_slide', + help='Whether to evaluate by sliding window', + action='store_true') + parser.add_argument( + '--crop_size', + dest='crop_size', + nargs=2, + help= + 'The crop size of sliding window, the first is width and the second is height.', + type=int, + default=None) + parser.add_argument( + '--stride', + dest='stride', + nargs=2, + help= + 'The stride of sliding window, the first is width and the second is height.', + type=int, + default=None) + return parser.parse_args() @@ -70,7 +117,18 @@ def main(args): model.set_dict(para_state_dict) logger.info('Loaded trained params of model successfully') - evaluate(model, val_dataset, num_workers=args.num_workers) + evaluate( + model, + val_dataset, + aug_eval=args.aug_eval, + scales=args.scales, + flip_horizontal=args.flip_horizontal, + flip_vertical=args.flip_vertical, + is_slide=args.is_slide, + crop_size=args.crop_size, + stride=args.stride, + num_workers=args.num_workers, + ) if __name__ == '__main__': From 946ef0ff088fc7f7c2ca8a2cb12ccc4adbbb9434 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 13 Nov 2020 11:45:07 +0800 Subject: [PATCH 004/210] add result of hrnetw18 on voc --- dygraph/configs/fcn/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/dygraph/configs/fcn/README.md b/dygraph/configs/fcn/README.md index 8fbe94e976..726cdd7ebd 100644 --- a/dygraph/configs/fcn/README.md +++ b/dygraph/configs/fcn/README.md @@ -11,3 +11,9 @@ |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| |FCN|HRNet_W18|1024x512|80000|78.97%|79.49%|79.74%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw18_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw18_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=bebec8e1a3802c4babd3c69e1bf50d51)| |FCN|HRNet_W48|1024x512|80000|80.70%|81.24%|81.56%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw48_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw48_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=ae1cb76014cdc54406c36f1e3dc2a530)| + +### Pascal VOC 2012 + Aug + +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|FCN|HRNet_W18|512x512|40000|75.39%|76.04%|77.09%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw18_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw18_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=fbe6caaca0f7d7ea1dba1c60b8db2a7e)| From 9bc06efeece9027bdde07d4471e4f5f433a88df6 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 13 Nov 2020 12:44:00 +0800 Subject: [PATCH 005/210] update README.md --- dygraph/configs/fcn/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dygraph/configs/fcn/README.md b/dygraph/configs/fcn/README.md index 726cdd7ebd..3546e95a21 100644 --- a/dygraph/configs/fcn/README.md +++ b/dygraph/configs/fcn/README.md @@ -16,4 +16,4 @@ | Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|FCN|HRNet_W18|512x512|40000|75.39%|76.04%|77.09%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw18_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/fcn_hrnetw18_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=fbe6caaca0f7d7ea1dba1c60b8db2a7e)| +|FCN|HRNet_W18|512x512|40000|75.39%|76.04%|77.09%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/fcn_hrnetw18_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/fcn_hrnetw18_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=fbe6caaca0f7d7ea1dba1c60b8db2a7e)| From 3dd47736ec4b8af4ea48064351c8c08c7512b91e Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 16 Nov 2020 10:59:54 +0800 Subject: [PATCH 006/210] add results of hrnetw48 on voc12aug --- dygraph/configs/fcn/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/dygraph/configs/fcn/README.md b/dygraph/configs/fcn/README.md index 3546e95a21..8bb9786c53 100644 --- a/dygraph/configs/fcn/README.md +++ b/dygraph/configs/fcn/README.md @@ -17,3 +17,4 @@ | Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| |FCN|HRNet_W18|512x512|40000|75.39%|76.04%|77.09%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/fcn_hrnetw18_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/fcn_hrnetw18_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=fbe6caaca0f7d7ea1dba1c60b8db2a7e)| +|FCN|HRNet_W48|512x512|40000|78.72%|79.52%|80.10%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/fcn_hrnetw48_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/fcn_hrnetw48_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=20b404212fcbb5b7b329ab0c16124553)| From 3b474b7043ce48a221efa6b2571c352ccb002adf Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 17 Nov 2020 10:28:15 +0800 Subject: [PATCH 007/210] update progbar.py --- dygraph/paddleseg/utils/progbar.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/dygraph/paddleseg/utils/progbar.py b/dygraph/paddleseg/utils/progbar.py index 26a1753a3f..563cc5ebae 100644 --- a/dygraph/paddleseg/utils/progbar.py +++ b/dygraph/paddleseg/utils/progbar.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys import time @@ -22,7 +23,7 @@ class Progbar(object): """ Displays a progress bar. It refers to https://github.com/keras-team/keras/blob/keras-2/keras/utils/generic_utils.py - + Args: target (int): Total number of steps expected, None if unknown. width (int): Progress bar width on screen. @@ -51,8 +52,8 @@ def __init__(self, else: self.stateful_metrics = set() - self._dynamic_display = ((hasattr(sys.stdout, 'isatty') - and sys.stdout.isatty()) + self._dynamic_display = ((hasattr(sys.stderr, 'isatty') + and sys.stderr.isatty()) or 'ipykernel' in sys.modules or 'posix' in sys.modules or 'PYCHARM_HOSTED' in os.environ) @@ -114,10 +115,10 @@ def update(self, current, values=None, finalize=None): prev_total_width = self._total_width if self._dynamic_display: - sys.stdout.write('\b' * prev_total_width) - sys.stdout.write('\r') + sys.stderr.write('\b' * prev_total_width) + sys.stderr.write('\r') else: - sys.stdout.write('\n') + sys.stderr.write('\n') if self.target is not None: numdigits = int(np.log10(self.target)) + 1 @@ -136,7 +137,7 @@ def update(self, current, values=None, finalize=None): bar = '%7d/Unknown' % current self._total_width = len(bar) - sys.stdout.write(bar) + sys.stderr.write(bar) if current: time_per_unit = (now - self._start) / current @@ -181,8 +182,8 @@ def update(self, current, values=None, finalize=None): if finalize: info += '\n' - sys.stdout.write(info) - sys.stdout.flush() + sys.stderr.write(info) + sys.stderr.flush() elif self.verbose == 2: if finalize: @@ -199,8 +200,8 @@ def update(self, current, values=None, finalize=None): info += ' %.4e' % avg info += '\n' - sys.stdout.write(info) - sys.stdout.flush() + sys.stderr.write(info) + sys.stderr.flush() self._last_update = now From c61c178590183098e1f49a122492b1d0cc5584ae Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 19 Nov 2020 17:06:42 +0800 Subject: [PATCH 008/210] update BatchNorm to BatchNorm2D --- dygraph/paddleseg/models/layers/layer_libs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dygraph/paddleseg/models/layers/layer_libs.py b/dygraph/paddleseg/models/layers/layer_libs.py index dae6a26f54..1ce869f5e2 100644 --- a/dygraph/paddleseg/models/layers/layer_libs.py +++ b/dygraph/paddleseg/models/layers/layer_libs.py @@ -18,9 +18,9 @@ def SyncBatchNorm(*args, **kwargs): - """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm instead""" + """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead""" if paddle.get_device() == 'cpu': - return nn.BatchNorm(*args, **kwargs) + return nn.BatchNorm2D(*args, **kwargs) else: return nn.SyncBatchNorm(*args, **kwargs) From 6eee23fc53a28604081d0bae726a11f00a492f58 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 20 Nov 2020 11:53:00 +0800 Subject: [PATCH 009/210] add ms_flip and sliding window docs --- dygraph/docs/quick_start.md | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/dygraph/docs/quick_start.md b/dygraph/docs/quick_start.md index dc529a7ce6..94f9275638 100644 --- a/dygraph/docs/quick_start.md +++ b/dygraph/docs/quick_start.md @@ -93,8 +93,29 @@ python val.py \ --model_path output/iter_1000/model.pdparams ``` +如果想进行多尺度翻转评估可通过传入`--aug_eval`进行开启,然后通过`--scales`传入尺度信息, +`--flip_horizontal`开启水平翻转, `flip_vertical`开启垂直翻转。使用示例如下: +```shell +python val.py \ + --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml \ + --model_path output/iter_1000/model.pdparams 、 + --aug_eval \ + --scales 0.75 1.0 1.25 \ + --flip_horizontal +``` + +如果想进行滑窗评估可通过传入`--is_slide`进行开启, 通过`--crop_size`传入窗口大小, `--stride`传入步长。使用示例如下: +```shell +python val.py \ + --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml \ + --model_path output/iter_1000/model.pdparams 、 + --is_slide \ + --crop_size 256 256 \ + --stride 128 128 +``` + ## 效果可视化 -当保存完模型后,可以通过PaddleSeg提供的脚本对模型预测结果进行可视化,查看分割效果 +当保存完模型后,可以通过PaddleSeg提供的脚本对模型预测结果进行可视化,查看分割效果。 ```shell python predict.py \ --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml \ @@ -105,3 +126,5 @@ python predict.py \ 其中`image_path`也可以是一个目录,这时候将对目录内的所有图片进行预测并保存可视化结果图。效果如下: ![](images/quick_start_predict.jpg) +同样的,可以通过`--aug_pred`开启多尺度翻转预测, `--is_slide`开启滑窗预测。 + From de7efab04267155b760cf015946e91da31889696 Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Fri, 20 Nov 2020 11:54:35 +0800 Subject: [PATCH 010/210] Update quick_start.md --- dygraph/docs/quick_start.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dygraph/docs/quick_start.md b/dygraph/docs/quick_start.md index 94f9275638..2696ac93fb 100644 --- a/dygraph/docs/quick_start.md +++ b/dygraph/docs/quick_start.md @@ -98,7 +98,7 @@ python val.py \ ```shell python val.py \ --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml \ - --model_path output/iter_1000/model.pdparams 、 + --model_path output/iter_1000/model.pdparams \ --aug_eval \ --scales 0.75 1.0 1.25 \ --flip_horizontal @@ -108,7 +108,7 @@ python val.py \ ```shell python val.py \ --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml \ - --model_path output/iter_1000/model.pdparams 、 + --model_path output/iter_1000/model.pdparams \ --is_slide \ --crop_size 256 256 \ --stride 128 128 From 57db636a051fad43dd0a1441a3f648ff6419324b Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 20 Nov 2020 14:34:11 +0800 Subject: [PATCH 011/210] update hrnet.py --- dygraph/paddleseg/models/backbones/hrnet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dygraph/paddleseg/models/backbones/hrnet.py b/dygraph/paddleseg/models/backbones/hrnet.py index dd6e374a9c..2f6ecadf93 100644 --- a/dygraph/paddleseg/models/backbones/hrnet.py +++ b/dygraph/paddleseg/models/backbones/hrnet.py @@ -658,10 +658,10 @@ def HRNet_W18_Small_V2(**kwargs): stage2_num_modules=1, stage2_num_blocks=[2, 2], stage2_num_channels=[18, 36], - stage3_num_modules=1, + stage3_num_modules=3, stage3_num_blocks=[2, 2, 2], stage3_num_channels=[18, 36, 72], - stage4_num_modules=1, + stage4_num_modules=2, stage4_num_blocks=[2, 2, 2, 2], stage4_num_channels=[18, 36, 72, 144], **kwargs) From 890f1328695e90aa3e0ba1c2d9191987cdaf93fc Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 23 Nov 2020 11:03:24 +0800 Subject: [PATCH 012/210] limit num_workers for evaluation while training --- dygraph/paddleseg/core/train.py | 1 + dygraph/paddleseg/core/val.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/dygraph/paddleseg/core/train.py b/dygraph/paddleseg/core/train.py index f0b4179abe..b9d4030a9d 100644 --- a/dygraph/paddleseg/core/train.py +++ b/dygraph/paddleseg/core/train.py @@ -151,6 +151,7 @@ def train(model, if (iter % save_interval == 0 or iter == iters) and (val_dataset is not None): + num_workers = 1 if num_workers > 0 else 0 mean_iou, acc = evaluate( model, val_dataset, num_workers=num_workers) model.train() diff --git a/dygraph/paddleseg/core/val.py b/dygraph/paddleseg/core/val.py index 5455038912..11f14cbc51 100644 --- a/dygraph/paddleseg/core/val.py +++ b/dygraph/paddleseg/core/val.py @@ -59,7 +59,9 @@ def evaluate(model, logger.info("Start evaluating (total_samples={}, total_iters={})...".format( len(eval_dataset), total_iters)) progbar_val = progbar.Progbar(target=total_iters, verbose=1) + timer = Timer() for iter, (im, label) in enumerate(loader): + reader_cost = timer.elapsed_time() label = label.astype('int64') ori_shape = label.shape[-2:] @@ -115,9 +117,12 @@ def evaluate(model, intersect_area_all = intersect_area_all + intersect_area pred_area_all = pred_area_all + pred_area label_area_all = label_area_all + label_area + batch_cost = timer.elapsed_time() + timer.restart() if local_rank == 0: - progbar_val.update(iter + 1) + progbar_val.update(iter + 1, [('batch_cost', batch_cost), + ('reader cost', reader_cost)]) class_iou, miou = metrics.mean_iou(intersect_area_all, pred_area_all, label_area_all) From ed282423244528130b56fea72be9c71eedae45f8 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 23 Nov 2020 11:43:26 +0800 Subject: [PATCH 013/210] update hrnet.py --- dygraph/paddleseg/models/backbones/hrnet.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/dygraph/paddleseg/models/backbones/hrnet.py b/dygraph/paddleseg/models/backbones/hrnet.py index 2f6ecadf93..5de97497e5 100644 --- a/dygraph/paddleseg/models/backbones/hrnet.py +++ b/dygraph/paddleseg/models/backbones/hrnet.py @@ -434,7 +434,7 @@ class SELayer(nn.Layer): def __init__(self, num_channels, num_filters, reduction_ratio, name=None): super(SELayer, self).__init__() - self.pool2d_gap = nn.AdaptiveAvgPool2d(1) + self.pool2d_gap = nn.AdaptiveAvgPool2D(1) self._num_channels = num_channels @@ -443,23 +443,23 @@ def __init__(self, num_channels, num_filters, reduction_ratio, name=None): self.squeeze = nn.Linear( num_channels, med_ch, - act="relu", - param_attr=paddle.ParamAttr( + weight_attr=paddle.ParamAttr( initializer=nn.initializer.Uniform(-stdv, stdv))) stdv = 1.0 / math.sqrt(med_ch * 1.0) self.excitation = nn.Linear( med_ch, num_filters, - act="sigmoid", - param_attr=paddle.ParamAttr( + weight_attr=paddle.ParamAttr( initializer=nn.initializer.Uniform(-stdv, stdv))) def forward(self, x): pool = self.pool2d_gap(x) pool = paddle.reshape(pool, shape=[-1, self._num_channels]) squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) excitation = self.excitation(squeeze) + excitation = F.sigmoid(excitation) excitation = paddle.reshape( excitation, shape=[-1, self._num_channels, 1, 1]) out = x * excitation From 9f78ca4f266f532ce0315b84344dc2fe4c21a726 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 24 Nov 2020 16:16:33 +0800 Subject: [PATCH 014/210] rm name argument --- .../paddleseg/models/backbones/resnet_vd.py | 45 ++++++------------- 1 file changed, 13 insertions(+), 32 deletions(-) diff --git a/dygraph/paddleseg/models/backbones/resnet_vd.py b/dygraph/paddleseg/models/backbones/resnet_vd.py index 3d953c2f62..36673e1ad2 100644 --- a/dygraph/paddleseg/models/backbones/resnet_vd.py +++ b/dygraph/paddleseg/models/backbones/resnet_vd.py @@ -36,7 +36,6 @@ def __init__( groups=1, is_vd_mode=False, act=None, - name=None, ): super(ConvBNLayer, self).__init__() @@ -73,16 +72,14 @@ def __init__(self, stride, shortcut=True, if_first=False, - dilation=1, - name=None): + dilation=1): super(BottleneckBlock, self).__init__() self.conv0 = ConvBNLayer( in_channels=in_channels, out_channels=out_channels, kernel_size=1, - act='relu', - name=name + "_branch2a") + act='relu') self.dilation = dilation @@ -92,14 +89,12 @@ def __init__(self, kernel_size=3, stride=stride, act='relu', - dilation=dilation, - name=name + "_branch2b") + dilation=dilation) self.conv2 = ConvBNLayer( in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, - act=None, - name=name + "_branch2c") + act=None) if not shortcut: self.short = ConvBNLayer( @@ -107,8 +102,7 @@ def __init__(self, out_channels=out_channels * 4, kernel_size=1, stride=1, - is_vd_mode=False if if_first or stride == 1 else True, - name=name + "_branch1") + is_vd_mode=False if if_first or stride == 1 else True) self.shortcut = shortcut @@ -142,8 +136,7 @@ def __init__(self, out_channels, stride, shortcut=True, - if_first=False, - name=None): + if_first=False): super(BasicBlock, self).__init__() self.stride = stride self.conv0 = ConvBNLayer( @@ -151,14 +144,12 @@ def __init__(self, out_channels=out_channels, kernel_size=3, stride=stride, - act='relu', - name=name + "_branch2a") + act='relu') self.conv1 = ConvBNLayer( in_channels=out_channels, out_channels=out_channels, kernel_size=3, - act=None, - name=name + "_branch2b") + act=None) if not shortcut: self.short = ConvBNLayer( @@ -166,8 +157,7 @@ def __init__(self, out_channels=out_channels, kernel_size=1, stride=1, - is_vd_mode=False if if_first else True, - name=name + "_branch1") + is_vd_mode=False if if_first else True) self.shortcut = shortcut @@ -225,26 +215,19 @@ def __init__(self, dilation_dict = {3: 2} self.conv1_1 = ConvBNLayer( - in_channels=3, - out_channels=32, - kernel_size=3, - stride=2, - act='relu', - name="conv1_1") + in_channels=3, out_channels=32, kernel_size=3, stride=2, act='relu') self.conv1_2 = ConvBNLayer( in_channels=32, out_channels=32, kernel_size=3, stride=1, - act='relu', - name="conv1_2") + act='relu') self.conv1_3 = ConvBNLayer( in_channels=32, out_channels=64, kernel_size=3, stride=1, - act='relu', - name="conv1_3") + act='relu') self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) # self.block_list = [] @@ -283,7 +266,6 @@ def __init__(self, and dilation_rate == 1 else 1, shortcut=shortcut, if_first=block == i == 0, - name=conv_name, dilation=dilation_rate)) block_list.append(bottleneck_block) @@ -303,8 +285,7 @@ def __init__(self, out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, - if_first=block == i == 0, - name=conv_name)) + if_first=block == i == 0)) block_list.append(basic_block) shortcut = True self.stage_list.append(block_list) From 3c18d6d6d314b33ca971a592ba95f19e14e76e2e Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 24 Nov 2020 16:30:28 +0800 Subject: [PATCH 015/210] update resnet_vd.py --- dygraph/paddleseg/models/backbones/resnet_vd.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dygraph/paddleseg/models/backbones/resnet_vd.py b/dygraph/paddleseg/models/backbones/resnet_vd.py index 36673e1ad2..64a01842be 100644 --- a/dygraph/paddleseg/models/backbones/resnet_vd.py +++ b/dygraph/paddleseg/models/backbones/resnet_vd.py @@ -169,7 +169,8 @@ def forward(self, inputs): short = inputs else: short = self.short(inputs) - y = paddle.elementwise_add(x=short, y=conv1, act='relu') + y = paddle.add(x=short, y=conv1) + y = F.relu(y) return y From af249ac25c3052ae4e0a7228ab7bcc8947cd9878 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 30 Nov 2020 18:08:23 +0800 Subject: [PATCH 016/210] add gcnet and ocrnet result of voc12 --- .../fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml | 6 +++++ dygraph/configs/gcnet/README.md | 5 ++++ ...net_resnet101_os8_voc12aug_512x512_40k.yml | 6 +++++ ...cnet_resnet50_os8_voc12aug_512x512_40k.yml | 22 ++++++++++++++++ dygraph/configs/ocrnet/README.md | 11 ++++++-- .../ocrnet_hrnetw18_voc12aug_512x512_40k.yml | 25 +++++++++++++++++++ .../ocrnet_hrnetw48_voc12aug_512x512_40k.yml | 6 +++++ 7 files changed, 79 insertions(+), 2 deletions(-) create mode 100644 dygraph/configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml create mode 100644 dygraph/configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml create mode 100644 dygraph/configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml create mode 100644 dygraph/configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml create mode 100644 dygraph/configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml b/dygraph/configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml new file mode 100644 index 0000000000..9315453f11 --- /dev/null +++ b/dygraph/configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml @@ -0,0 +1,6 @@ +_base_: './fcn_hrnetw18_voc12aug_512x512_40k.yml' + +model: + backbone: + type: HRNet_W48 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz diff --git a/dygraph/configs/gcnet/README.md b/dygraph/configs/gcnet/README.md index 06899ed741..ee49e31d2a 100644 --- a/dygraph/configs/gcnet/README.md +++ b/dygraph/configs/gcnet/README.md @@ -12,3 +12,8 @@ |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| |GCNet|ResNet50_OS8|1024x512|80000|79.50%|79.77%|79.69%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=e3801edb9a6f5b33eb890f5a1ae6ed7b)| |GCNet|ResNet101_OS8|1024x512|80000|81.01%|81.30%|81.64%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet101_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gcnet_resnet101_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://www.paddlepaddle.org.cn/paddle/visualdl/service/app/scalar?id=aa88e7980f4d6839537662a3a3d18851)| + +### Pascal VOC 2012 + Aug + +|GCNet|ResNet50_OS8|512x512|40000|80.32%|80.39%|80.54%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/gcnet_renet50_os8_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/gcnet_renet50_os8_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=86cbaac3fe98fdbb635e246c2c02e87b)| +|GCNet|ResNet101_OS8|512x512|40000|79.64%|79.59%|79.94%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/gcnet_renet101_os8_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/gcnet_renet101_os8_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=73f0484b034f6c27bf481c7a3b05e9ae)| diff --git a/dygraph/configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml b/dygraph/configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml new file mode 100644 index 0000000000..8411307f53 --- /dev/null +++ b/dygraph/configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml @@ -0,0 +1,6 @@ +_base_: './gcnet_resnet50_os8_voc12aug_512x512_40k.yml' + +model: + backbone: + type: ResNet101_vd + pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet101_vd_ssld.tar.gz diff --git a/dygraph/configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml b/dygraph/configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml new file mode 100644 index 0000000000..912de526e6 --- /dev/null +++ b/dygraph/configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml @@ -0,0 +1,22 @@ +_base_: '../_base_/pascal_voc12aug.yml' + +learning_rate: + decay: + end_lr: 1.0e-5 + +loss: + types: + - type: CrossEntropyLoss + coef: [1, 0.4] + +model: + type: GCNet + backbone: + type: ResNet50_vd + output_stride: 8 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz + gc_channels: 512 + ratio: 0.25 + enable_auxiliary_loss: True + align_corners: False + pretrained: null diff --git a/dygraph/configs/ocrnet/README.md b/dygraph/configs/ocrnet/README.md index 51a224c616..1fbac14b52 100644 --- a/dygraph/configs/ocrnet/README.md +++ b/dygraph/configs/ocrnet/README.md @@ -10,5 +10,12 @@ | Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|OCRNet|HRNet_w18|1024x512|160000|80.67%|81.21%|81.30|[model](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw18_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw18_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=901a5d0a78b71ca56f06002f05547837)| -|OCRNet|HRNet_w48|1024x512|160000|82.15%|82.59%|82.85|[model](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw48_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw48_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=176bf6ca4d89957ffe62ac7c30fcd039) | +|OCRNet|HRNet_w18|1024x512|160000|80.67%|81.21%|81.30%|[model](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw18_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw18_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=901a5d0a78b71ca56f06002f05547837)| +|OCRNet|HRNet_w48|1024x512|160000|82.15%|82.59%|82.85%|[model](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw48_cityscapes_1024x512_160k/model.pdparams) \| [log](https://paddleseg.bj.bcebos.com/dygraph/ocrnet_hrnetw48_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=176bf6ca4d89957ffe62ac7c30fcd039) | + +### Pascal VOC 2012 + Aug + +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|OCRNet|HRNet_w18|1024x512|160000|75.76%|76.39%|77.95%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/ocrnet_hrnetw18_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/ocrnet_hrnetw18_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=74707b83bc14b7d236146ac4ceaf6c9c)| +|OCRNet|HRNet_w48|1024x512|160000|79.98%|80.47%|81.02%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/ocrnet_hrnetw48_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/ocrnet_hrnetw48_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=8f695743c799f8966a72973f3259fad4) | diff --git a/dygraph/configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml b/dygraph/configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml new file mode 100644 index 0000000000..f625bcc028 --- /dev/null +++ b/dygraph/configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml @@ -0,0 +1,25 @@ +_base_: '../_base_/pascal_voc12aug.yml' + +model: + type: OCRNet + backbone: + type: HRNet_W18 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz + num_classes: 19 + backbone_indices: [0] + +optimizer: + type: sgd + +learning_rate: + value: 0.01 + decay: + type: poly + power: 0.9 + + +loss: + types: + - type: CrossEntropyLoss + - type: CrossEntropyLoss + coef: [1, 1] diff --git a/dygraph/configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml b/dygraph/configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml new file mode 100644 index 0000000000..2599f5d338 --- /dev/null +++ b/dygraph/configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml @@ -0,0 +1,6 @@ +_base_: './ocrnet_hrnetw18_voc12aug_512x512_40k.yml' + +model: + backbone: + type: HRNet_W48 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz From f0a4ce9031bebf867a7c02e69172d48b3e9e8c53 Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Mon, 30 Nov 2020 19:36:24 +0800 Subject: [PATCH 017/210] Update README.md --- dygraph/configs/gcnet/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dygraph/configs/gcnet/README.md b/dygraph/configs/gcnet/README.md index ee49e31d2a..e330abdc6a 100644 --- a/dygraph/configs/gcnet/README.md +++ b/dygraph/configs/gcnet/README.md @@ -15,5 +15,7 @@ ### Pascal VOC 2012 + Aug +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| |GCNet|ResNet50_OS8|512x512|40000|80.32%|80.39%|80.54%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/gcnet_renet50_os8_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/gcnet_renet50_os8_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=86cbaac3fe98fdbb635e246c2c02e87b)| |GCNet|ResNet101_OS8|512x512|40000|79.64%|79.59%|79.94%|[model](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/gcnet_renet101_os8_voc12aug_512x512_40k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pascal_voc12/gcnet_renet101_os8_voc12aug_512x512_40k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=73f0484b034f6c27bf481c7a3b05e9ae)| From 96f5777670bb22524c90c975b28d2583060b484f Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 4 Dec 2020 11:05:40 +0800 Subject: [PATCH 018/210] make dygraph as home page --- README.md | 122 +++----- README_CN.md | 260 ++++------------- .../benchmark => benchmark}/deeplabv3p.yml | 0 {dygraph/benchmark => benchmark}/hrnet.yml | 0 {dygraph/configs => configs}/README.md | 0 .../configs => configs}/_base_/ade20k.yml | 0 .../configs => configs}/_base_/cityscapes.yml | 0 .../_base_/cityscapes_1024x1024.yml | 0 .../_base_/cityscapes_769x769.yml | 0 .../_base_/pascal_voc12.yml | 0 .../_base_/pascal_voc12aug.yml | 0 {dygraph/configs => configs}/ann/README.md | 0 ..._resnet101_os8_cityscapes_1024x512_80k.yml | 0 ...ann_resnet101_os8_voc12aug_512x512_40k.yml | 0 ...n_resnet50_os8_cityscapes_1024x512_80k.yml | 0 .../ann_resnet50_os8_voc12aug_512x512_40k.yml | 0 .../configs => configs}/bisenet/README.md | 0 .../bisenet_cityscapes_1024x1024_160k.yml | 0 {dygraph/configs => configs}/danet/README.md | 0 ..._resnet101_os8_cityscapes_1024x512_80k.yml | 0 ...t_resnet50_os8_cityscapes_1024x512_80k.yml | 0 .../configs => configs}/deeplabv3/README.md | 0 ..._resnet101_os8_cityscapes_1024x512_80k.yml | 0 ...bv3_resnet101_os8_voc12aug_512x512_40k.yml | 0 ...3_resnet50_os8_cityscapes_1024x512_80k.yml | 0 ...abv3_resnet50_os8_voc12aug_512x512_40k.yml | 0 .../configs => configs}/deeplabv3p/README.md | 0 ..._resnet101_os8_cityscapes_1024x512_80k.yml | 0 ...p_resnet101_os8_cityscapes_769x769_80k.yml | 0 ...v3p_resnet101_os8_voc12aug_512x512_40k.yml | 0 ...p_resnet50_os8_cityscapes_1024x512_80k.yml | 0 ...bv3p_resnet50_os8_voc12aug_512x512_40k.yml | 0 .../configs => configs}/fastscnn/README.md | 0 .../fastscnn_cityscapes_1024x1024_160k.yml | 0 {dygraph/configs => configs}/fcn/README.md | 0 .../fcn_hrnetw18_cityscapes_1024x512_80k.yml | 0 .../fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml | 0 .../fcn_hrnetw48_cityscapes_1024x512_80k.yml | 0 .../fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml | 0 {dygraph/configs => configs}/gcnet/README.md | 0 ..._resnet101_os8_cityscapes_1024x512_80k.yml | 0 ...net_resnet101_os8_voc12aug_512x512_40k.yml | 0 ...t_resnet50_os8_cityscapes_1024x512_80k.yml | 0 ...cnet_resnet50_os8_voc12aug_512x512_40k.yml | 0 {dygraph/configs => configs}/ocrnet/README.md | 0 ...rnet_hrnetw18_cityscapes_1024x512_160k.yml | 0 .../ocrnet_hrnetw18_voc12aug_512x512_40k.yml | 0 ...rnet_hrnetw48_cityscapes_1024x512_160k.yml | 0 .../ocrnet_hrnetw48_voc12aug_512x512_40k.yml | 0 {dygraph/configs => configs}/pspnet/README.md | 0 ..._resnet101_os8_cityscapes_1024x512_80k.yml | 0 ...net_resnet101_os8_voc12aug_512x512_40k.yml | 0 ...t_resnet50_os8_cityscapes_1024x512_80k.yml | 0 ...pnet_resnet50_os8_voc12aug_512x512_40k.yml | 0 .../bisenet_optic_disc_512x512_1k.yml | 0 {dygraph/configs => configs}/unet/README.md | 0 .../unet/unet_cityscapes_1024x512_160k.yml | 0 .../remote_sensing/README.md | 0 .../fcn_hrnetw48_ccf_256x256_160k.yml | 0 .../ocrnet_hrnetw48_ccf_256x256_80k.yml | 0 {dygraph/docs => docs}/add_new_model.md | 0 {dygraph/docs => docs}/apis/README.md | 0 {dygraph/docs => docs}/apis/backbones.md | 0 {dygraph/docs => docs}/apis/core.md | 0 {dygraph/docs => docs}/apis/cvlibs.md | 0 {dygraph/docs => docs}/apis/datasets.md | 0 {dygraph/docs => docs}/apis/models.md | 0 {dygraph/docs => docs}/apis/transforms.md | 0 {dygraph/docs => docs}/apis/utils.md | 0 docs/data_prepare.md | 275 +++++++----------- .../images/quick_start_predict.jpg | Bin .../docs => docs}/images/quick_start_vdl.jpg | Bin {dygraph/docs => docs}/quick_start.md | 0 dygraph/README.md | 85 ------ dygraph/README_CN.md | 74 ----- dygraph/docs/data_prepare.md | 100 ------- legacy/README.md | 115 ++++++++ legacy/README_CN.md | 230 +++++++++++++++ .../configs}/cityscape_fast_scnn.yaml | 0 .../configs}/deepglobe_road_extraction.yaml | 0 .../deeplabv3p_mobilenet-1-0_pet.yaml | 0 .../deeplabv3p_mobilenetv2_cityscapes.yaml | 0 ...eplabv3p_mobilenetv3_large_cityscapes.yaml | 0 .../deeplabv3p_resnet50_vd_cityscapes.yaml | 0 .../deeplabv3p_xception65_cityscapes.yaml | 0 .../configs}/deeplabv3p_xception65_optic.yaml | 0 .../configs}/fast_scnn_pet.yaml | 0 {configs => legacy/configs}/hrnet_optic.yaml | 0 {configs => legacy/configs}/icnet_optic.yaml | 0 ...ovasz_hinge_deeplabv3p_mobilenet_road.yaml | 0 ...z_softmax_deeplabv3p_mobilenet_pascal.yaml | 0 .../configs}/ocrnet_w18_bn_cityscapes.yaml | 0 {configs => legacy/configs}/pspnet_optic.yaml | 0 {configs => legacy/configs}/unet_optic.yaml | 0 {contrib => legacy/contrib}/ACE2P/README.md | 0 {contrib => legacy/contrib}/ACE2P/__init__.py | 0 {contrib => legacy/contrib}/ACE2P/config.py | 0 .../contrib}/ACE2P/download_ACE2P.py | 0 .../contrib}/ACE2P/imgs/117676_2149260.jpg | Bin .../contrib}/ACE2P/imgs/117676_2149260.png | Bin .../contrib}/ACE2P/imgs/net.jpg | Bin .../contrib}/ACE2P/imgs/result.jpg | Bin {contrib => legacy/contrib}/ACE2P/infer.py | 0 {contrib => legacy/contrib}/ACE2P/reader.py | 0 .../contrib}/ACE2P/utils/__init__.py | 0 .../contrib}/ACE2P/utils/palette.py | 0 .../contrib}/ACE2P/utils/util.py | 0 .../contrib}/HumanSeg/README.md | 0 .../contrib}/HumanSeg/bg_replace.py | 0 .../contrib}/HumanSeg/data/background.jpg | Bin .../contrib}/HumanSeg/data/download_data.py | 0 .../contrib}/HumanSeg/data/human_image.jpg | Bin .../contrib}/HumanSeg/datasets/__init__.py | 0 .../contrib}/HumanSeg/datasets/dataset.py | 0 .../datasets/shared_queue/__init__.py | 0 .../HumanSeg/datasets/shared_queue/queue.py | 0 .../datasets/shared_queue/sharedmemory.py | 0 .../contrib}/HumanSeg/export.py | 0 {contrib => legacy/contrib}/HumanSeg/infer.py | 0 .../contrib}/HumanSeg/models/__init__.py | 0 .../contrib}/HumanSeg/models/humanseg.py | 0 .../contrib}/HumanSeg/models/load_model.py | 0 .../contrib}/HumanSeg/nets/__init__.py | 0 .../HumanSeg/nets/backbone/__init__.py | 0 .../HumanSeg/nets/backbone/mobilenet_v2.py | 0 .../HumanSeg/nets/backbone/xception.py | 0 .../contrib}/HumanSeg/nets/deeplabv3p.py | 0 .../contrib}/HumanSeg/nets/hrnet.py | 0 .../contrib}/HumanSeg/nets/libs.py | 0 .../contrib}/HumanSeg/nets/seg_modules.py | 0 .../contrib}/HumanSeg/nets/shufflenet_slim.py | 0 .../download_pretrained_weights.py | 0 .../contrib}/HumanSeg/quant_offline.py | 0 .../contrib}/HumanSeg/quant_online.py | 0 .../contrib}/HumanSeg/requirements.txt | 0 {contrib => legacy/contrib}/HumanSeg/train.py | 0 .../contrib}/HumanSeg/transforms/__init__.py | 0 .../HumanSeg/transforms/functional.py | 0 .../HumanSeg/transforms/transforms.py | 0 .../contrib}/HumanSeg/utils/__init__.py | 0 .../HumanSeg/utils/humanseg_postprocess.py | 0 .../contrib}/HumanSeg/utils/logging.py | 0 .../contrib}/HumanSeg/utils/metrics.py | 0 .../HumanSeg/utils/post_quantization.py | 0 .../contrib}/HumanSeg/utils/utils.py | 0 {contrib => legacy/contrib}/HumanSeg/val.py | 0 .../contrib}/HumanSeg/video_infer.py | 0 {contrib => legacy/contrib}/LaneNet/README.md | 0 .../contrib}/LaneNet/configs/lanenet.yaml | 0 .../contrib}/LaneNet/data_aug.py | 0 .../LaneNet/dataset/download_tusimple.py | 0 {contrib => legacy/contrib}/LaneNet/eval.py | 0 .../LaneNet/imgs/0005_pred_binary.png | Bin .../LaneNet/imgs/0005_pred_instance.png | Bin .../contrib}/LaneNet/imgs/0005_pred_lane.png | Bin {contrib => legacy/contrib}/LaneNet/loss.py | 0 .../contrib}/LaneNet/models/__init__.py | 0 .../contrib}/LaneNet/models/model_builder.py | 0 .../LaneNet/models/modeling/__init__.py | 0 .../LaneNet/models/modeling/lanenet.py | 0 {contrib => legacy/contrib}/LaneNet/reader.py | 0 .../contrib}/LaneNet/requirements.txt | 0 {contrib => legacy/contrib}/LaneNet/train.py | 0 .../contrib}/LaneNet/utils/__init__.py | 0 .../contrib}/LaneNet/utils/config.py | 0 .../contrib}/LaneNet/utils/dist_utils.py | 0 .../utils/generate_tusimple_dataset.py | 0 .../LaneNet/utils/lanenet_postprocess.py | 0 .../LaneNet/utils/load_model_utils.py | 0 {contrib => legacy/contrib}/LaneNet/vis.py | 0 ...download_mini_mechanical_industry_meter.py | 0 ...download_unet_mechanical_industry_meter.py | 0 .../imgs/1560143028.5_IMG_3091.JPG | Bin .../imgs/1560143028.5_IMG_3091.png | Bin .../unet_mechanical_meter.yaml | 0 {contrib => legacy/contrib}/README.md | 0 .../contrib}/RemoteSensing/README.md | 0 .../contrib}/RemoteSensing/__init__.py | 0 .../docs/data_analyse_and_check.md | 0 .../RemoteSensing/docs/data_prepare.md | 0 .../docs/imgs/data_distribution.png | Bin .../RemoteSensing/docs/imgs/dataset.png | Bin .../contrib}/RemoteSensing/docs/imgs/vis.png | Bin .../RemoteSensing/docs/imgs/visualdl.png | Bin .../contrib}/RemoteSensing/docs/transforms.md | 0 .../contrib}/RemoteSensing/models/__init__.py | 0 .../contrib}/RemoteSensing/models/base.py | 0 .../contrib}/RemoteSensing/models/hrnet.py | 0 .../RemoteSensing/models/load_model.py | 0 .../contrib}/RemoteSensing/models/unet.py | 0 .../RemoteSensing/models/utils/visualize.py | 0 .../contrib}/RemoteSensing/nets/__init__.py | 0 .../contrib}/RemoteSensing/nets/hrnet.py | 0 .../contrib}/RemoteSensing/nets/libs.py | 0 .../contrib}/RemoteSensing/nets/loss.py | 0 .../contrib}/RemoteSensing/nets/unet.py | 0 .../contrib}/RemoteSensing/predict_demo.py | 0 .../RemoteSensing/readers/__init__.py | 0 .../contrib}/RemoteSensing/readers/base.py | 0 .../contrib}/RemoteSensing/readers/reader.py | 0 .../contrib}/RemoteSensing/requirements.txt | 0 .../RemoteSensing/tools/cal_norm_coef.py | 0 .../tools/create_dataset_list.py | 0 .../tools/data_analyse_and_check.py | 0 .../tools/data_distribution_vis.py | 0 .../RemoteSensing/tools/split_dataset_list.py | 0 .../contrib}/RemoteSensing/train_demo.py | 0 .../RemoteSensing/transforms/__init__.py | 0 .../contrib}/RemoteSensing/transforms/ops.py | 0 .../RemoteSensing/transforms/transforms.py | 0 .../contrib}/RemoteSensing/utils/__init__.py | 0 .../contrib}/RemoteSensing/utils/logging.py | 0 .../contrib}/RemoteSensing/utils/metrics.py | 0 .../RemoteSensing/utils/pretrain_weights.py | 0 .../contrib}/RemoteSensing/utils/utils.py | 0 .../contrib}/RemoteSensing/visualize_demo.py | 0 .../contrib}/SpatialEmbeddings/README.md | 0 .../contrib}/SpatialEmbeddings/config.py | 0 .../data/kitti/0007/kitti_0007_000512.png | Bin .../data/kitti/0007/kitti_0007_000518.png | Bin .../contrib}/SpatialEmbeddings/data/test.txt | 0 .../download_SpatialEmbeddings_kitti.py | 0 .../imgs/kitti_0007_000518_ori.png | Bin .../imgs/kitti_0007_000518_pred.png | Bin .../contrib}/SpatialEmbeddings/infer.py | 0 .../contrib}/SpatialEmbeddings/models.py | 0 .../SpatialEmbeddings/utils/__init__.py | 0 .../SpatialEmbeddings/utils/data_util.py | 0 .../SpatialEmbeddings/utils/palette.py | 0 .../contrib}/SpatialEmbeddings/utils/util.py | 0 {dataset => legacy/dataset}/README.md | 0 .../dataset}/convert_voc2012.py | 0 .../dataset}/download_and_convert_voc2012.py | 0 .../dataset}/download_cityscapes.py | 0 ...download_mini_deepglobe_road_extraction.py | 0 {dataset => legacy/dataset}/download_optic.py | 0 {dataset => legacy/dataset}/download_pet.py | 0 {deploy => legacy/deploy}/README.md | 0 {deploy => legacy/deploy}/cpp/CMakeLists.txt | 0 .../deploy}/cpp/CMakeSettings.json | 0 {deploy => legacy/deploy}/cpp/INSTALL.md | 0 {deploy => legacy/deploy}/cpp/LICENSE | 0 {deploy => legacy/deploy}/cpp/README.md | 0 .../deploy}/cpp/conf/humanseg.yaml | 0 {deploy => legacy/deploy}/cpp/demo.cpp | 0 {deploy => legacy/deploy}/cpp/docs/demo.jpg | Bin .../deploy}/cpp/docs/demo_jpg.png | Bin .../deploy}/cpp/docs/linux_build.md | 0 {deploy => legacy/deploy}/cpp/docs/vis.md | 0 .../deploy}/cpp/docs/vis_result.png | Bin .../deploy}/cpp/docs/windows_vs2015_build.md | 0 .../deploy}/cpp/docs/windows_vs2019_build.md | 0 .../deploy}/cpp/external-cmake/yaml-cpp.cmake | 0 .../deploy}/cpp/images/humanseg/demo1.jpeg | Bin .../deploy}/cpp/images/humanseg/demo2.jpeg | Bin .../cpp/images/humanseg/demo2.jpeg_result.png | Bin .../images/humanseg/demo2_jpeg_recover.png | Bin .../deploy}/cpp/images/humanseg/demo3.jpeg | Bin .../deploy}/cpp/predictor/seg_predictor.cpp | 0 .../deploy}/cpp/predictor/seg_predictor.h | 0 .../deploy}/cpp/preprocessor/preprocessor.cpp | 0 .../deploy}/cpp/preprocessor/preprocessor.h | 0 .../cpp/preprocessor/preprocessor_seg.cpp | 0 .../cpp/preprocessor/preprocessor_seg.h | 0 .../deploy}/cpp/tools/visualize.py | 0 .../deploy}/cpp/utils/seg_conf_parser.h | 0 {deploy => legacy/deploy}/cpp/utils/utils.h | 0 {deploy => legacy/deploy}/lite/README.md | 0 .../deploy}/lite/example/human_1.png | Bin .../deploy}/lite/example/human_2.png | Bin .../deploy}/lite/example/human_3.png | Bin .../lite/human_segmentation_demo/.gitignore | 0 .../human_segmentation_demo/app/.gitignore | 0 .../human_segmentation_demo/app/build.gradle | 0 .../app/gradle/wrapper/gradle-wrapper.jar | Bin .../gradle/wrapper/gradle-wrapper.properties | 0 .../lite/human_segmentation_demo/app/gradlew | 0 .../human_segmentation_demo/app/gradlew.bat | 0 .../app/local.properties | 0 .../app/proguard-rules.pro | 0 .../lite/demo/ExampleInstrumentedTest.java | 0 .../app/src/main/AndroidManifest.xml | 0 .../image_segmentation/images/human.jpg | Bin .../image_segmentation/labels/label_list | 0 .../AppCompatPreferenceActivity.java | 0 .../lite/demo/segmentation/MainActivity.java | 0 .../lite/demo/segmentation/Predictor.java | 0 .../demo/segmentation/SettingsActivity.java | 0 .../paddle/lite/demo/segmentation/Utils.java | 0 .../lite/demo/segmentation/config/Config.java | 0 .../segmentation/preprocess/Preprocess.java | 0 .../demo/segmentation/visual/Visualize.java | 0 .../drawable-v24/ic_launcher_foreground.xml | 0 .../res/drawable/ic_launcher_background.xml | 0 .../app/src/main/res/layout/activity_main.xml | 0 .../src/main/res/menu/menu_action_options.xml | 0 .../res/mipmap-anydpi-v26/ic_launcher.xml | 0 .../mipmap-anydpi-v26/ic_launcher_round.xml | 0 .../src/main/res/mipmap-hdpi/ic_launcher.png | Bin .../res/mipmap-hdpi/ic_launcher_round.png | Bin .../src/main/res/mipmap-mdpi/ic_launcher.png | Bin .../res/mipmap-mdpi/ic_launcher_round.png | Bin .../src/main/res/mipmap-xhdpi/ic_launcher.png | Bin .../res/mipmap-xhdpi/ic_launcher_round.png | Bin .../main/res/mipmap-xxhdpi/ic_launcher.png | Bin .../res/mipmap-xxhdpi/ic_launcher_round.png | Bin .../main/res/mipmap-xxxhdpi/ic_launcher.png | Bin .../res/mipmap-xxxhdpi/ic_launcher_round.png | Bin .../app/src/main/res/values/arrays.xml | 0 .../app/src/main/res/values/colors.xml | 0 .../app/src/main/res/values/strings.xml | 0 .../app/src/main/res/values/styles.xml | 0 .../app/src/main/res/xml/settings.xml | 0 .../paddle/lite/demo/ExampleUnitTest.java | 0 .../lite/human_segmentation_demo/build.gradle | 0 .../human_segmentation_demo/gradle.properties | 0 .../gradle/wrapper/gradle-wrapper.jar | Bin .../gradle/wrapper/gradle-wrapper.properties | 0 .../lite/human_segmentation_demo/gradlew | 0 .../lite/human_segmentation_demo/gradlew.bat | 0 .../human_segmentation_demo/settings.gradle | 0 .../deploy}/paddle-serving/README.md | 0 .../deploy}/paddle-serving/postprocess.py | 0 .../deploy}/paddle-serving/seg_client.py | 0 {deploy => legacy/deploy}/python/README.md | 0 .../python/docs/PaddleSeg_Infer_Benchmark.md | 0 .../docs/compile_paddle_with_tensorrt.md | 0 {deploy => legacy/deploy}/python/infer.py | 0 .../deploy}/python/requirements.txt | 0 .../deploy}/serving/COMPILE_GUIDE.md | 0 {deploy => legacy/deploy}/serving/README.md | 0 {deploy => legacy/deploy}/serving/UBUNTU.md | 0 .../deploy}/serving/requirements.txt | 0 .../serving/seg-serving/CMakeLists.txt | 0 .../serving/seg-serving/conf/gflags.conf | 0 .../seg-serving/conf/model_toolkit.prototxt | 0 .../seg-serving/conf/resource.prototxt | 0 .../serving/seg-serving/conf/seg_conf.yaml | 0 .../serving/seg-serving/conf/seg_conf2.yaml | 0 .../serving/seg-serving/conf/service.prototxt | 0 .../seg-serving/conf/workflow.prototxt | 0 .../data/model/paddle/fluid_reload_flag | 0 .../data/model/paddle/fluid_time_file | 0 .../serving/seg-serving/op/CMakeLists.txt | 0 .../serving/seg-serving/op/image_seg_op.cpp | 0 .../serving/seg-serving/op/image_seg_op.h | 0 .../serving/seg-serving/op/reader_op.cpp | 0 .../serving/seg-serving/op/reader_op.h | 0 .../serving/seg-serving/op/seg_conf.cpp | 0 .../deploy}/serving/seg-serving/op/seg_conf.h | 0 .../serving/seg-serving/op/write_json_op.cpp | 0 .../serving/seg-serving/op/write_json_op.h | 0 .../serving/seg-serving/proto/CMakeLists.txt | 0 .../serving/seg-serving/proto/image_seg.proto | 0 .../serving/seg-serving/scripts/start.sh | 0 .../deploy}/serving/tools/image_seg_client.py | 0 .../deploy}/serving/tools/images/1.jpg | Bin .../deploy}/serving/tools/images/2.jpg | Bin .../deploy}/serving/tools/images/3.jpg | Bin .../cityscapes_demo_dataset.yaml | 0 ...art_000021_000019_gtFine_labelTrainIds.png | Bin ...art_000072_000019_gtFine_labelTrainIds.png | Bin ...urt_000001_062250_gtFine_labelTrainIds.png | Bin ...urt_000001_063045_gtFine_labelTrainIds.png | Bin .../stuttgart_000021_000019_leftImg8bit.png | Bin .../stuttgart_000072_000019_leftImg8bit.png | Bin .../frankfurt_000001_062250_leftImg8bit.png | Bin .../frankfurt_000001_063045_leftImg8bit.png | Bin .../annotation/cityscapes_demo/train_list.txt | 0 .../annotation/cityscapes_demo/val_list.txt | 0 .../docs}/annotation/jingling2seg.md | 0 .../annotation/jingling_demo/jingling.jpg | Bin .../outputs/annotations/jingling.png | Bin .../jingling_demo/outputs/class_names.txt | 0 .../jingling_demo/outputs/jingling.json | 0 .../docs}/annotation/labelme2seg.md | 0 .../annotation/labelme_demo/2011_000025.jpg | Bin .../annotation/labelme_demo/2011_000025.json | 0 .../annotation/labelme_demo/class_names.txt | 0 {docs => legacy/docs}/check.md | 0 {docs => legacy/docs}/config.md | 0 {docs => legacy/docs}/configs/.gitkeep | 0 {docs => legacy/docs}/configs/basic_group.md | 0 .../docs}/configs/dataloader_group.md | 0 .../docs}/configs/dataset_group.md | 0 {docs => legacy/docs}/configs/freeze_group.md | 0 .../docs}/configs/model_deeplabv3p_group.md | 0 {docs => legacy/docs}/configs/model_group.md | 0 .../docs}/configs/model_hrnet_group.md | 0 .../docs}/configs/model_icnet_group.md | 0 .../docs}/configs/model_pspnet_group.md | 0 .../docs}/configs/model_unet_group.md | 0 {docs => legacy/docs}/configs/solver_group.md | 0 {docs => legacy/docs}/configs/test_group.md | 0 {docs => legacy/docs}/configs/train_group.md | 0 {docs => legacy/docs}/data_aug.md | 0 legacy/docs/data_prepare.md | 175 +++++++++++ {docs => legacy/docs}/deploy.md | 0 {docs => legacy/docs}/dice_loss.md | 0 {docs => legacy/docs}/faq.md | 0 {docs => legacy/docs}/imgs/VOC2012.png | Bin .../docs}/imgs/annotation/image-1.png | Bin .../docs}/imgs/annotation/image-10.jpg | Bin .../docs}/imgs/annotation/image-11.png | Bin .../docs}/imgs/annotation/image-2.png | Bin .../docs}/imgs/annotation/image-3.png | Bin .../docs}/imgs/annotation/image-4-1.png | Bin .../docs}/imgs/annotation/image-4-2.png | Bin .../docs}/imgs/annotation/image-5.png | Bin .../docs}/imgs/annotation/image-6-2.png | Bin .../docs}/imgs/annotation/image-6.png | Bin .../docs}/imgs/annotation/image-7.png | Bin .../docs}/imgs/annotation/jingling-1.png | Bin .../docs}/imgs/annotation/jingling-2.png | Bin .../docs}/imgs/annotation/jingling-3.png | Bin .../docs}/imgs/annotation/jingling-4.png | Bin .../docs}/imgs/annotation/jingling-5.png | Bin {docs => legacy/docs}/imgs/aug_method.png | Bin {docs => legacy/docs}/imgs/cityscapes.png | Bin .../docs}/imgs/cosine_decay_example.png | Bin .../docs}/imgs/data_aug_example.png | Bin .../docs}/imgs/data_aug_flip_mirror.png | Bin {docs => legacy/docs}/imgs/data_aug_flow.png | Bin {docs => legacy/docs}/imgs/deepglobe.png | Bin {docs => legacy/docs}/imgs/deeplabv3p.png | Bin {docs => legacy/docs}/imgs/dice.png | Bin {docs => legacy/docs}/imgs/dice2.png | Bin {docs => legacy/docs}/imgs/dice3.png | Bin {docs => legacy/docs}/imgs/fast-scnn.png | Bin {docs => legacy/docs}/imgs/file_list.png | Bin {docs => legacy/docs}/imgs/file_list2.png | Bin {docs => legacy/docs}/imgs/gn.png | Bin {docs => legacy/docs}/imgs/hrnet.png | Bin {docs => legacy/docs}/imgs/icnet.png | Bin .../docs}/imgs/loss_comparison.png | Bin .../docs}/imgs/lovasz-hinge-vis.png | Bin {docs => legacy/docs}/imgs/lovasz-hinge.png | Bin {docs => legacy/docs}/imgs/lovasz-softmax.png | Bin .../docs}/imgs/piecewise_decay_example.png | Bin .../docs}/imgs/poly_decay_example.png | Bin {docs => legacy/docs}/imgs/pspnet.png | Bin {docs => legacy/docs}/imgs/pspnet2.png | Bin {docs => legacy/docs}/imgs/qq_group2.png | Bin {docs => legacy/docs}/imgs/rangescale.png | Bin {docs => legacy/docs}/imgs/seg_news_icon.png | Bin {docs => legacy/docs}/imgs/softmax_loss.png | Bin {docs => legacy/docs}/imgs/unet.png | Bin {docs => legacy/docs}/imgs/usage_vis_demo.jpg | Bin {docs => legacy/docs}/imgs/visualdl_image.png | Bin .../docs}/imgs/visualdl_scalar.png | Bin .../imgs/warmup_with_poly_decay_example.png | Bin {docs => legacy/docs}/loss_select.md | 0 {docs => legacy/docs}/lovasz_loss.md | 0 {docs => legacy/docs}/model_export.md | 0 {docs => legacy/docs}/model_zoo.md | 0 {docs => legacy/docs}/models.md | 0 ...le_gpus_train_and_mixed_precision_train.md | 0 {docs => legacy/docs}/release_notes.md | 0 {docs => legacy/docs}/usage.md | 0 {pdseg => legacy/pdseg}/__init__.py | 0 {pdseg => legacy/pdseg}/check.py | 0 {pdseg => legacy/pdseg}/data_aug.py | 0 {pdseg => legacy/pdseg}/data_utils.py | 0 {pdseg => legacy/pdseg}/eval.py | 0 {pdseg => legacy/pdseg}/export_model.py | 0 .../pdseg}/export_serving_model.py | 0 {pdseg => legacy/pdseg}/loss.py | 0 {pdseg => legacy/pdseg}/lovasz_losses.py | 0 {pdseg => legacy/pdseg}/metrics.py | 0 {pdseg => legacy/pdseg}/models/__init__.py | 0 .../pdseg}/models/backbone/__init__.py | 0 .../pdseg}/models/backbone/mobilenet_v2.py | 0 .../pdseg}/models/backbone/mobilenet_v3.py | 0 .../pdseg}/models/backbone/resnet.py | 0 .../pdseg}/models/backbone/resnet_vd.py | 0 .../pdseg}/models/backbone/vgg.py | 0 .../pdseg}/models/backbone/xception.py | 0 .../pdseg}/models/libs/__init__.py | 0 .../pdseg}/models/libs/model_libs.py | 0 .../pdseg}/models/model_builder.py | 0 .../pdseg}/models/modeling/__init__.py | 0 .../pdseg}/models/modeling/deeplab.py | 0 .../pdseg}/models/modeling/fast_scnn.py | 0 .../pdseg}/models/modeling/hrnet.py | 0 .../pdseg}/models/modeling/icnet.py | 0 .../pdseg}/models/modeling/ocrnet.py | 0 .../pdseg}/models/modeling/pspnet.py | 0 .../pdseg}/models/modeling/unet.py | 0 {pdseg => legacy/pdseg}/reader.py | 0 {pdseg => legacy/pdseg}/solver.py | 0 {pdseg => legacy/pdseg}/tools/__init__.py | 0 .../pdseg}/tools/create_dataset_list.py | 0 .../pdseg}/tools/gray2pseudo_color.py | 0 {pdseg => legacy/pdseg}/tools/jingling2seg.py | 0 {pdseg => legacy/pdseg}/tools/labelme2seg.py | 0 {pdseg => legacy/pdseg}/train.py | 0 {pdseg => legacy/pdseg}/utils/__init__.py | 0 {pdseg => legacy/pdseg}/utils/collect.py | 0 {pdseg => legacy/pdseg}/utils/config.py | 0 {pdseg => legacy/pdseg}/utils/dist_utils.py | 0 {pdseg => legacy/pdseg}/utils/fp16_utils.py | 0 .../pdseg}/utils/load_model_utils.py | 0 {pdseg => legacy/pdseg}/utils/paddle_utils.py | 0 {pdseg => legacy/pdseg}/utils/timer.py | 0 {pdseg => legacy/pdseg}/vis.py | 0 .../pretrained_model}/download_model.py | 0 {dygraph => legacy}/requirements.txt | 3 - {slim => legacy/slim}/distillation/README.md | 0 .../slim}/distillation/cityscape.yaml | 0 .../slim}/distillation/cityscape_teacher.yaml | 0 .../slim}/distillation/model_builder.py | 0 .../slim}/distillation/train_distill.py | 0 {slim => legacy/slim}/nas/README.md | 0 {slim => legacy/slim}/nas/deeplab.py | 0 {slim => legacy/slim}/nas/eval_nas.py | 0 .../slim}/nas/mobilenetv2_search_space.py | 0 {slim => legacy/slim}/nas/model_builder.py | 0 {slim => legacy/slim}/nas/train_nas.py | 0 {slim => legacy/slim}/prune/README.md | 0 {slim => legacy/slim}/prune/eval_prune.py | 0 {slim => legacy/slim}/prune/train_prune.py | 0 {slim => legacy/slim}/quantization/README.md | 0 .../slim}/quantization/deploy/README.md | 0 .../slim}/quantization/deploy/infer.py | 0 .../slim}/quantization/eval_quant.py | 0 .../slim}/quantization/export_model.py | 0 .../quantization/images/ConvertToInt8Pass.png | Bin .../slim}/quantization/images/FreezePass.png | Bin .../images/TransformForMobilePass.png | Bin .../quantization/images/TransformPass.png | Bin .../slim}/quantization/train_quant.py | 0 {test => legacy/test}/ci/check_code_style.sh | 0 .../test}/ci/test_download_dataset.sh | 0 .../deeplabv3p_xception65_cityscapes.yaml | 0 {test => legacy/test}/configs/unet_pet.yaml | 0 .../test}/local_test_cityscapes.py | 0 {test => legacy/test}/local_test_pet.py | 0 {test => legacy/test}/test_utils.py | 0 .../tutorial}/finetune_deeplabv3plus.md | 0 .../tutorial}/finetune_fast_scnn.md | 0 .../tutorial}/finetune_hrnet.md | 0 .../tutorial}/finetune_icnet.md | 0 .../tutorial}/finetune_ocrnet.md | 0 .../tutorial}/finetune_pspnet.md | 0 .../tutorial}/finetune_unet.md | 0 {tutorial => legacy/tutorial}/imgs/optic.png | Bin .../tutorial}/imgs/optic_deeplab.png | Bin .../tutorial}/imgs/optic_hrnet.png | Bin .../tutorial}/imgs/optic_icnet.png | Bin .../tutorial}/imgs/optic_pspnet.png | Bin .../tutorial}/imgs/optic_unet.png | Bin {dygraph/paddleseg => paddleseg}/__init__.py | 0 .../paddleseg => paddleseg}/core/__init__.py | 0 .../paddleseg => paddleseg}/core/infer.py | 0 .../paddleseg => paddleseg}/core/predict.py | 0 .../paddleseg => paddleseg}/core/train.py | 0 {dygraph/paddleseg => paddleseg}/core/val.py | 0 .../cvlibs/__init__.py | 0 .../cvlibs/callbacks.py | 0 .../paddleseg => paddleseg}/cvlibs/config.py | 0 .../paddleseg => paddleseg}/cvlibs/manager.py | 0 .../cvlibs/param_init.py | 0 .../datasets/__init__.py | 0 .../paddleseg => paddleseg}/datasets/ade.py | 0 .../datasets/cityscapes.py | 0 .../datasets/dataset.py | 0 .../datasets/optic_disc_seg.py | 0 .../paddleseg => paddleseg}/datasets/voc.py | 0 .../models/__init__.py | 0 .../paddleseg => paddleseg}/models/ann.py | 0 .../models/backbones/__init__.py | 0 .../models/backbones/hrnet.py | 0 .../models/backbones/mobilenetv3.py | 0 .../models/backbones/resnet_vd.py | 0 .../models/backbones/xception_deeplab.py | 0 .../paddleseg => paddleseg}/models/bisenet.py | 0 .../paddleseg => paddleseg}/models/danet.py | 0 .../paddleseg => paddleseg}/models/deeplab.py | 0 .../models/fast_scnn.py | 0 .../paddleseg => paddleseg}/models/fcn.py | 0 .../paddleseg => paddleseg}/models/gcnet.py | 0 .../models/layers/__init__.py | 0 .../models/layers/activation.py | 0 .../models/layers/layer_libs.py | 0 .../models/layers/pyramid_pool.py | 0 .../models/losses/__init__.py | 0 .../models/losses/cross_entroy_loss.py | 0 .../paddleseg => paddleseg}/models/ocrnet.py | 0 .../paddleseg => paddleseg}/models/pspnet.py | 0 .../paddleseg => paddleseg}/models/unet.py | 0 .../transforms/__init__.py | 0 .../transforms/functional.py | 0 .../transforms/transforms.py | 0 .../paddleseg => paddleseg}/utils/__init__.py | 0 .../paddleseg => paddleseg}/utils/download.py | 0 .../utils/env/__init__.py | 0 .../utils/env/seg_env.py | 0 .../utils/env/sys_env.py | 0 .../paddleseg => paddleseg}/utils/logger.py | 0 .../paddleseg => paddleseg}/utils/metrics.py | 0 .../paddleseg => paddleseg}/utils/progbar.py | 0 .../paddleseg => paddleseg}/utils/timer.py | 0 .../paddleseg => paddleseg}/utils/utils.py | 0 .../utils/visualize.py | 0 dygraph/predict.py => predict.py | 0 requirements.txt | 3 + .../tools => tools}/convert_cityscapes.py | 0 {dygraph/tools => tools}/voc_augment.py | 0 dygraph/train.py => train.py | 0 dygraph/val.py => val.py | 0 610 files changed, 721 insertions(+), 721 deletions(-) rename {dygraph/benchmark => benchmark}/deeplabv3p.yml (100%) rename {dygraph/benchmark => benchmark}/hrnet.yml (100%) rename {dygraph/configs => configs}/README.md (100%) rename {dygraph/configs => configs}/_base_/ade20k.yml (100%) rename {dygraph/configs => configs}/_base_/cityscapes.yml (100%) rename {dygraph/configs => configs}/_base_/cityscapes_1024x1024.yml (100%) rename {dygraph/configs => configs}/_base_/cityscapes_769x769.yml (100%) rename {dygraph/configs => configs}/_base_/pascal_voc12.yml (100%) rename {dygraph/configs => configs}/_base_/pascal_voc12aug.yml (100%) rename {dygraph/configs => configs}/ann/README.md (100%) rename {dygraph/configs => configs}/ann/ann_resnet101_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/ann/ann_resnet101_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/ann/ann_resnet50_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/ann/ann_resnet50_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/bisenet/README.md (100%) rename {dygraph/configs => configs}/bisenet/bisenet_cityscapes_1024x1024_160k.yml (100%) rename {dygraph/configs => configs}/danet/README.md (100%) rename {dygraph/configs => configs}/danet/danet_resnet101_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/danet/danet_resnet50_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/deeplabv3/README.md (100%) rename {dygraph/configs => configs}/deeplabv3/deeplabv3_resnet101_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/deeplabv3/deeplabv3_resnet101_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/deeplabv3/deeplabv3_resnet50_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/deeplabv3/deeplabv3_resnet50_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/deeplabv3p/README.md (100%) rename {dygraph/configs => configs}/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml (100%) rename {dygraph/configs => configs}/deeplabv3p/deeplabv3p_resnet101_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/deeplabv3p/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/fastscnn/README.md (100%) rename {dygraph/configs => configs}/fastscnn/fastscnn_cityscapes_1024x1024_160k.yml (100%) rename {dygraph/configs => configs}/fcn/README.md (100%) rename {dygraph/configs => configs}/fcn/fcn_hrnetw18_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/fcn/fcn_hrnetw48_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/gcnet/README.md (100%) rename {dygraph/configs => configs}/gcnet/gcnet_resnet101_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/gcnet/gcnet_resnet50_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/ocrnet/README.md (100%) rename {dygraph/configs => configs}/ocrnet/ocrnet_hrnetw18_cityscapes_1024x512_160k.yml (100%) rename {dygraph/configs => configs}/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/ocrnet/ocrnet_hrnetw48_cityscapes_1024x512_160k.yml (100%) rename {dygraph/configs => configs}/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/pspnet/README.md (100%) rename {dygraph/configs => configs}/pspnet/pspnet_resnet101_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/pspnet/pspnet_resnet101_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/pspnet/pspnet_resnet50_os8_cityscapes_1024x512_80k.yml (100%) rename {dygraph/configs => configs}/pspnet/pspnet_resnet50_os8_voc12aug_512x512_40k.yml (100%) rename {dygraph/configs => configs}/quick_start/bisenet_optic_disc_512x512_1k.yml (100%) rename {dygraph/configs => configs}/unet/README.md (100%) rename {dygraph/configs => configs}/unet/unet_cityscapes_1024x512_160k.yml (100%) rename {dygraph/contrib => contrib}/remote_sensing/README.md (100%) rename {dygraph/contrib => contrib}/remote_sensing/fcn_hrnetw48_ccf_256x256_160k.yml (100%) rename {dygraph/contrib => contrib}/remote_sensing/ocrnet_hrnetw48_ccf_256x256_80k.yml (100%) rename {dygraph/docs => docs}/add_new_model.md (100%) rename {dygraph/docs => docs}/apis/README.md (100%) rename {dygraph/docs => docs}/apis/backbones.md (100%) rename {dygraph/docs => docs}/apis/core.md (100%) rename {dygraph/docs => docs}/apis/cvlibs.md (100%) rename {dygraph/docs => docs}/apis/datasets.md (100%) rename {dygraph/docs => docs}/apis/models.md (100%) rename {dygraph/docs => docs}/apis/transforms.md (100%) rename {dygraph/docs => docs}/apis/utils.md (100%) rename {dygraph/docs => docs}/images/quick_start_predict.jpg (100%) rename {dygraph/docs => docs}/images/quick_start_vdl.jpg (100%) rename {dygraph/docs => docs}/quick_start.md (100%) delete mode 100644 dygraph/README.md delete mode 100644 dygraph/README_CN.md delete mode 100644 dygraph/docs/data_prepare.md create mode 100644 legacy/README.md create mode 100644 legacy/README_CN.md rename {configs => legacy/configs}/cityscape_fast_scnn.yaml (100%) rename {configs => legacy/configs}/deepglobe_road_extraction.yaml (100%) rename {configs => legacy/configs}/deeplabv3p_mobilenet-1-0_pet.yaml (100%) rename {configs => legacy/configs}/deeplabv3p_mobilenetv2_cityscapes.yaml (100%) rename {configs => legacy/configs}/deeplabv3p_mobilenetv3_large_cityscapes.yaml (100%) rename {configs => legacy/configs}/deeplabv3p_resnet50_vd_cityscapes.yaml (100%) rename {configs => legacy/configs}/deeplabv3p_xception65_cityscapes.yaml (100%) rename {configs => legacy/configs}/deeplabv3p_xception65_optic.yaml (100%) rename {configs => legacy/configs}/fast_scnn_pet.yaml (100%) rename {configs => legacy/configs}/hrnet_optic.yaml (100%) rename {configs => legacy/configs}/icnet_optic.yaml (100%) rename {configs => legacy/configs}/lovasz_hinge_deeplabv3p_mobilenet_road.yaml (100%) rename {configs => legacy/configs}/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml (100%) rename {configs => legacy/configs}/ocrnet_w18_bn_cityscapes.yaml (100%) rename {configs => legacy/configs}/pspnet_optic.yaml (100%) rename {configs => legacy/configs}/unet_optic.yaml (100%) rename {contrib => legacy/contrib}/ACE2P/README.md (100%) rename {contrib => legacy/contrib}/ACE2P/__init__.py (100%) rename {contrib => legacy/contrib}/ACE2P/config.py (100%) rename {contrib => legacy/contrib}/ACE2P/download_ACE2P.py (100%) rename {contrib => legacy/contrib}/ACE2P/imgs/117676_2149260.jpg (100%) rename {contrib => legacy/contrib}/ACE2P/imgs/117676_2149260.png (100%) rename {contrib => legacy/contrib}/ACE2P/imgs/net.jpg (100%) rename {contrib => legacy/contrib}/ACE2P/imgs/result.jpg (100%) rename {contrib => legacy/contrib}/ACE2P/infer.py (100%) rename {contrib => legacy/contrib}/ACE2P/reader.py (100%) rename {contrib => legacy/contrib}/ACE2P/utils/__init__.py (100%) rename {contrib => legacy/contrib}/ACE2P/utils/palette.py (100%) rename {contrib => legacy/contrib}/ACE2P/utils/util.py (100%) rename {contrib => legacy/contrib}/HumanSeg/README.md (100%) rename {contrib => legacy/contrib}/HumanSeg/bg_replace.py (100%) rename {contrib => legacy/contrib}/HumanSeg/data/background.jpg (100%) rename {contrib => legacy/contrib}/HumanSeg/data/download_data.py (100%) rename {contrib => legacy/contrib}/HumanSeg/data/human_image.jpg (100%) rename {contrib => legacy/contrib}/HumanSeg/datasets/__init__.py (100%) rename {contrib => legacy/contrib}/HumanSeg/datasets/dataset.py (100%) rename {contrib => legacy/contrib}/HumanSeg/datasets/shared_queue/__init__.py (100%) rename {contrib => legacy/contrib}/HumanSeg/datasets/shared_queue/queue.py (100%) rename {contrib => legacy/contrib}/HumanSeg/datasets/shared_queue/sharedmemory.py (100%) rename {contrib => legacy/contrib}/HumanSeg/export.py (100%) rename {contrib => legacy/contrib}/HumanSeg/infer.py (100%) rename {contrib => legacy/contrib}/HumanSeg/models/__init__.py (100%) rename {contrib => legacy/contrib}/HumanSeg/models/humanseg.py (100%) rename {contrib => legacy/contrib}/HumanSeg/models/load_model.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/__init__.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/backbone/__init__.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/backbone/mobilenet_v2.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/backbone/xception.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/deeplabv3p.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/hrnet.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/libs.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/seg_modules.py (100%) rename {contrib => legacy/contrib}/HumanSeg/nets/shufflenet_slim.py (100%) rename {contrib => legacy/contrib}/HumanSeg/pretrained_weights/download_pretrained_weights.py (100%) rename {contrib => legacy/contrib}/HumanSeg/quant_offline.py (100%) rename {contrib => legacy/contrib}/HumanSeg/quant_online.py (100%) rename {contrib => legacy/contrib}/HumanSeg/requirements.txt (100%) rename {contrib => legacy/contrib}/HumanSeg/train.py (100%) rename {contrib => legacy/contrib}/HumanSeg/transforms/__init__.py (100%) rename {contrib => legacy/contrib}/HumanSeg/transforms/functional.py (100%) rename {contrib => legacy/contrib}/HumanSeg/transforms/transforms.py (100%) rename {contrib => legacy/contrib}/HumanSeg/utils/__init__.py (100%) rename {contrib => legacy/contrib}/HumanSeg/utils/humanseg_postprocess.py (100%) rename {contrib => legacy/contrib}/HumanSeg/utils/logging.py (100%) rename {contrib => legacy/contrib}/HumanSeg/utils/metrics.py (100%) rename {contrib => legacy/contrib}/HumanSeg/utils/post_quantization.py (100%) rename {contrib => legacy/contrib}/HumanSeg/utils/utils.py (100%) rename {contrib => legacy/contrib}/HumanSeg/val.py (100%) rename {contrib => legacy/contrib}/HumanSeg/video_infer.py (100%) rename {contrib => legacy/contrib}/LaneNet/README.md (100%) rename {contrib => legacy/contrib}/LaneNet/configs/lanenet.yaml (100%) rename {contrib => legacy/contrib}/LaneNet/data_aug.py (100%) rename {contrib => legacy/contrib}/LaneNet/dataset/download_tusimple.py (100%) rename {contrib => legacy/contrib}/LaneNet/eval.py (100%) rename {contrib => legacy/contrib}/LaneNet/imgs/0005_pred_binary.png (100%) rename {contrib => legacy/contrib}/LaneNet/imgs/0005_pred_instance.png (100%) rename {contrib => legacy/contrib}/LaneNet/imgs/0005_pred_lane.png (100%) rename {contrib => legacy/contrib}/LaneNet/loss.py (100%) rename {contrib => legacy/contrib}/LaneNet/models/__init__.py (100%) rename {contrib => legacy/contrib}/LaneNet/models/model_builder.py (100%) rename {contrib => legacy/contrib}/LaneNet/models/modeling/__init__.py (100%) rename {contrib => legacy/contrib}/LaneNet/models/modeling/lanenet.py (100%) rename {contrib => legacy/contrib}/LaneNet/reader.py (100%) rename {contrib => legacy/contrib}/LaneNet/requirements.txt (100%) rename {contrib => legacy/contrib}/LaneNet/train.py (100%) rename {contrib => legacy/contrib}/LaneNet/utils/__init__.py (100%) rename {contrib => legacy/contrib}/LaneNet/utils/config.py (100%) rename {contrib => legacy/contrib}/LaneNet/utils/dist_utils.py (100%) rename {contrib => legacy/contrib}/LaneNet/utils/generate_tusimple_dataset.py (100%) rename {contrib => legacy/contrib}/LaneNet/utils/lanenet_postprocess.py (100%) rename {contrib => legacy/contrib}/LaneNet/utils/load_model_utils.py (100%) rename {contrib => legacy/contrib}/LaneNet/vis.py (100%) rename {contrib => legacy/contrib}/MechanicalIndustryMeter/download_mini_mechanical_industry_meter.py (100%) rename {contrib => legacy/contrib}/MechanicalIndustryMeter/download_unet_mechanical_industry_meter.py (100%) rename {contrib => legacy/contrib}/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.JPG (100%) rename {contrib => legacy/contrib}/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.png (100%) rename {contrib => legacy/contrib}/MechanicalIndustryMeter/unet_mechanical_meter.yaml (100%) rename {contrib => legacy/contrib}/README.md (100%) rename {contrib => legacy/contrib}/RemoteSensing/README.md (100%) rename {contrib => legacy/contrib}/RemoteSensing/__init__.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/docs/data_analyse_and_check.md (100%) rename {contrib => legacy/contrib}/RemoteSensing/docs/data_prepare.md (100%) rename {contrib => legacy/contrib}/RemoteSensing/docs/imgs/data_distribution.png (100%) rename {contrib => legacy/contrib}/RemoteSensing/docs/imgs/dataset.png (100%) rename {contrib => legacy/contrib}/RemoteSensing/docs/imgs/vis.png (100%) rename {contrib => legacy/contrib}/RemoteSensing/docs/imgs/visualdl.png (100%) rename {contrib => legacy/contrib}/RemoteSensing/docs/transforms.md (100%) rename {contrib => legacy/contrib}/RemoteSensing/models/__init__.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/models/base.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/models/hrnet.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/models/load_model.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/models/unet.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/models/utils/visualize.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/nets/__init__.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/nets/hrnet.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/nets/libs.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/nets/loss.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/nets/unet.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/predict_demo.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/readers/__init__.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/readers/base.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/readers/reader.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/requirements.txt (100%) rename {contrib => legacy/contrib}/RemoteSensing/tools/cal_norm_coef.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/tools/create_dataset_list.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/tools/data_analyse_and_check.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/tools/data_distribution_vis.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/tools/split_dataset_list.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/train_demo.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/transforms/__init__.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/transforms/ops.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/transforms/transforms.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/utils/__init__.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/utils/logging.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/utils/metrics.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/utils/pretrain_weights.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/utils/utils.py (100%) rename {contrib => legacy/contrib}/RemoteSensing/visualize_demo.py (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/README.md (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/config.py (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/data/kitti/0007/kitti_0007_000512.png (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/data/kitti/0007/kitti_0007_000518.png (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/data/test.txt (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/download_SpatialEmbeddings_kitti.py (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/imgs/kitti_0007_000518_ori.png (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/imgs/kitti_0007_000518_pred.png (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/infer.py (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/models.py (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/utils/__init__.py (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/utils/data_util.py (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/utils/palette.py (100%) rename {contrib => legacy/contrib}/SpatialEmbeddings/utils/util.py (100%) rename {dataset => legacy/dataset}/README.md (100%) rename {dataset => legacy/dataset}/convert_voc2012.py (100%) rename {dataset => legacy/dataset}/download_and_convert_voc2012.py (100%) rename {dataset => legacy/dataset}/download_cityscapes.py (100%) rename {dataset => legacy/dataset}/download_mini_deepglobe_road_extraction.py (100%) rename {dataset => legacy/dataset}/download_optic.py (100%) rename {dataset => legacy/dataset}/download_pet.py (100%) rename {deploy => legacy/deploy}/README.md (100%) rename {deploy => legacy/deploy}/cpp/CMakeLists.txt (100%) rename {deploy => legacy/deploy}/cpp/CMakeSettings.json (100%) rename {deploy => legacy/deploy}/cpp/INSTALL.md (100%) rename {deploy => legacy/deploy}/cpp/LICENSE (100%) rename {deploy => legacy/deploy}/cpp/README.md (100%) rename {deploy => legacy/deploy}/cpp/conf/humanseg.yaml (100%) rename {deploy => legacy/deploy}/cpp/demo.cpp (100%) rename {deploy => legacy/deploy}/cpp/docs/demo.jpg (100%) rename {deploy => legacy/deploy}/cpp/docs/demo_jpg.png (100%) rename {deploy => legacy/deploy}/cpp/docs/linux_build.md (100%) rename {deploy => legacy/deploy}/cpp/docs/vis.md (100%) rename {deploy => legacy/deploy}/cpp/docs/vis_result.png (100%) rename {deploy => legacy/deploy}/cpp/docs/windows_vs2015_build.md (100%) rename {deploy => legacy/deploy}/cpp/docs/windows_vs2019_build.md (100%) rename {deploy => legacy/deploy}/cpp/external-cmake/yaml-cpp.cmake (100%) rename {deploy => legacy/deploy}/cpp/images/humanseg/demo1.jpeg (100%) rename {deploy => legacy/deploy}/cpp/images/humanseg/demo2.jpeg (100%) rename {deploy => legacy/deploy}/cpp/images/humanseg/demo2.jpeg_result.png (100%) rename {deploy => legacy/deploy}/cpp/images/humanseg/demo2_jpeg_recover.png (100%) rename {deploy => legacy/deploy}/cpp/images/humanseg/demo3.jpeg (100%) rename {deploy => legacy/deploy}/cpp/predictor/seg_predictor.cpp (100%) rename {deploy => legacy/deploy}/cpp/predictor/seg_predictor.h (100%) rename {deploy => legacy/deploy}/cpp/preprocessor/preprocessor.cpp (100%) rename {deploy => legacy/deploy}/cpp/preprocessor/preprocessor.h (100%) rename {deploy => legacy/deploy}/cpp/preprocessor/preprocessor_seg.cpp (100%) rename {deploy => legacy/deploy}/cpp/preprocessor/preprocessor_seg.h (100%) rename {deploy => legacy/deploy}/cpp/tools/visualize.py (100%) rename {deploy => legacy/deploy}/cpp/utils/seg_conf_parser.h (100%) rename {deploy => legacy/deploy}/cpp/utils/utils.h (100%) rename {deploy => legacy/deploy}/lite/README.md (100%) rename {deploy => legacy/deploy}/lite/example/human_1.png (100%) rename {deploy => legacy/deploy}/lite/example/human_2.png (100%) rename {deploy => legacy/deploy}/lite/example/human_3.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/.gitignore (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/.gitignore (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/build.gradle (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.jar (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.properties (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/gradlew (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/gradlew.bat (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/local.properties (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/proguard-rules.pro (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/androidTest/java/com/baidu/paddle/lite/demo/ExampleInstrumentedTest.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/AndroidManifest.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/images/human.jpg (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/labels/label_list (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/AppCompatPreferenceActivity.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/MainActivity.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Predictor.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/SettingsActivity.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Utils.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/config/Config.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/preprocess/Preprocess.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/visual/Visualize.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/drawable-v24/ic_launcher_foreground.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/drawable/ic_launcher_background.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/layout/activity_main.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/menu/menu_action_options.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/values/arrays.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/values/colors.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/values/strings.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/values/styles.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/main/res/xml/settings.xml (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/app/src/test/java/com/baidu/paddle/lite/demo/ExampleUnitTest.java (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/build.gradle (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/gradle.properties (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.jar (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.properties (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/gradlew (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/gradlew.bat (100%) rename {deploy => legacy/deploy}/lite/human_segmentation_demo/settings.gradle (100%) rename {deploy => legacy/deploy}/paddle-serving/README.md (100%) rename {deploy => legacy/deploy}/paddle-serving/postprocess.py (100%) rename {deploy => legacy/deploy}/paddle-serving/seg_client.py (100%) rename {deploy => legacy/deploy}/python/README.md (100%) rename {deploy => legacy/deploy}/python/docs/PaddleSeg_Infer_Benchmark.md (100%) rename {deploy => legacy/deploy}/python/docs/compile_paddle_with_tensorrt.md (100%) rename {deploy => legacy/deploy}/python/infer.py (100%) rename {deploy => legacy/deploy}/python/requirements.txt (100%) rename {deploy => legacy/deploy}/serving/COMPILE_GUIDE.md (100%) rename {deploy => legacy/deploy}/serving/README.md (100%) rename {deploy => legacy/deploy}/serving/UBUNTU.md (100%) rename {deploy => legacy/deploy}/serving/requirements.txt (100%) rename {deploy => legacy/deploy}/serving/seg-serving/CMakeLists.txt (100%) rename {deploy => legacy/deploy}/serving/seg-serving/conf/gflags.conf (100%) rename {deploy => legacy/deploy}/serving/seg-serving/conf/model_toolkit.prototxt (100%) rename {deploy => legacy/deploy}/serving/seg-serving/conf/resource.prototxt (100%) rename {deploy => legacy/deploy}/serving/seg-serving/conf/seg_conf.yaml (100%) rename {deploy => legacy/deploy}/serving/seg-serving/conf/seg_conf2.yaml (100%) rename {deploy => legacy/deploy}/serving/seg-serving/conf/service.prototxt (100%) rename {deploy => legacy/deploy}/serving/seg-serving/conf/workflow.prototxt (100%) rename {deploy => legacy/deploy}/serving/seg-serving/data/model/paddle/fluid_reload_flag (100%) rename {deploy => legacy/deploy}/serving/seg-serving/data/model/paddle/fluid_time_file (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/CMakeLists.txt (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/image_seg_op.cpp (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/image_seg_op.h (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/reader_op.cpp (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/reader_op.h (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/seg_conf.cpp (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/seg_conf.h (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/write_json_op.cpp (100%) rename {deploy => legacy/deploy}/serving/seg-serving/op/write_json_op.h (100%) rename {deploy => legacy/deploy}/serving/seg-serving/proto/CMakeLists.txt (100%) rename {deploy => legacy/deploy}/serving/seg-serving/proto/image_seg.proto (100%) rename {deploy => legacy/deploy}/serving/seg-serving/scripts/start.sh (100%) rename {deploy => legacy/deploy}/serving/tools/image_seg_client.py (100%) rename {deploy => legacy/deploy}/serving/tools/images/1.jpg (100%) rename {deploy => legacy/deploy}/serving/tools/images/2.jpg (100%) rename {deploy => legacy/deploy}/serving/tools/images/3.jpg (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/cityscapes_demo_dataset.yaml (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000021_000019_gtFine_labelTrainIds.png (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000072_000019_gtFine_labelTrainIds.png (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_062250_gtFine_labelTrainIds.png (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_063045_gtFine_labelTrainIds.png (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000021_000019_leftImg8bit.png (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000072_000019_leftImg8bit.png (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/train_list.txt (100%) rename {docs => legacy/docs}/annotation/cityscapes_demo/val_list.txt (100%) rename {docs => legacy/docs}/annotation/jingling2seg.md (100%) rename {docs => legacy/docs}/annotation/jingling_demo/jingling.jpg (100%) rename {docs => legacy/docs}/annotation/jingling_demo/outputs/annotations/jingling.png (100%) rename {docs => legacy/docs}/annotation/jingling_demo/outputs/class_names.txt (100%) rename {docs => legacy/docs}/annotation/jingling_demo/outputs/jingling.json (100%) rename {docs => legacy/docs}/annotation/labelme2seg.md (100%) rename {docs => legacy/docs}/annotation/labelme_demo/2011_000025.jpg (100%) rename {docs => legacy/docs}/annotation/labelme_demo/2011_000025.json (100%) rename {docs => legacy/docs}/annotation/labelme_demo/class_names.txt (100%) rename {docs => legacy/docs}/check.md (100%) rename {docs => legacy/docs}/config.md (100%) rename {docs => legacy/docs}/configs/.gitkeep (100%) rename {docs => legacy/docs}/configs/basic_group.md (100%) rename {docs => legacy/docs}/configs/dataloader_group.md (100%) rename {docs => legacy/docs}/configs/dataset_group.md (100%) rename {docs => legacy/docs}/configs/freeze_group.md (100%) rename {docs => legacy/docs}/configs/model_deeplabv3p_group.md (100%) rename {docs => legacy/docs}/configs/model_group.md (100%) rename {docs => legacy/docs}/configs/model_hrnet_group.md (100%) rename {docs => legacy/docs}/configs/model_icnet_group.md (100%) rename {docs => legacy/docs}/configs/model_pspnet_group.md (100%) rename {docs => legacy/docs}/configs/model_unet_group.md (100%) rename {docs => legacy/docs}/configs/solver_group.md (100%) rename {docs => legacy/docs}/configs/test_group.md (100%) rename {docs => legacy/docs}/configs/train_group.md (100%) rename {docs => legacy/docs}/data_aug.md (100%) create mode 100644 legacy/docs/data_prepare.md rename {docs => legacy/docs}/deploy.md (100%) rename {docs => legacy/docs}/dice_loss.md (100%) rename {docs => legacy/docs}/faq.md (100%) rename {docs => legacy/docs}/imgs/VOC2012.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-1.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-10.jpg (100%) rename {docs => legacy/docs}/imgs/annotation/image-11.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-2.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-3.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-4-1.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-4-2.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-5.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-6-2.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-6.png (100%) rename {docs => legacy/docs}/imgs/annotation/image-7.png (100%) rename {docs => legacy/docs}/imgs/annotation/jingling-1.png (100%) rename {docs => legacy/docs}/imgs/annotation/jingling-2.png (100%) rename {docs => legacy/docs}/imgs/annotation/jingling-3.png (100%) rename {docs => legacy/docs}/imgs/annotation/jingling-4.png (100%) rename {docs => legacy/docs}/imgs/annotation/jingling-5.png (100%) rename {docs => legacy/docs}/imgs/aug_method.png (100%) rename {docs => legacy/docs}/imgs/cityscapes.png (100%) rename {docs => legacy/docs}/imgs/cosine_decay_example.png (100%) rename {docs => legacy/docs}/imgs/data_aug_example.png (100%) rename {docs => legacy/docs}/imgs/data_aug_flip_mirror.png (100%) rename {docs => legacy/docs}/imgs/data_aug_flow.png (100%) rename {docs => legacy/docs}/imgs/deepglobe.png (100%) rename {docs => legacy/docs}/imgs/deeplabv3p.png (100%) rename {docs => legacy/docs}/imgs/dice.png (100%) rename {docs => legacy/docs}/imgs/dice2.png (100%) rename {docs => legacy/docs}/imgs/dice3.png (100%) rename {docs => legacy/docs}/imgs/fast-scnn.png (100%) rename {docs => legacy/docs}/imgs/file_list.png (100%) rename {docs => legacy/docs}/imgs/file_list2.png (100%) rename {docs => legacy/docs}/imgs/gn.png (100%) rename {docs => legacy/docs}/imgs/hrnet.png (100%) rename {docs => legacy/docs}/imgs/icnet.png (100%) rename {docs => legacy/docs}/imgs/loss_comparison.png (100%) rename {docs => legacy/docs}/imgs/lovasz-hinge-vis.png (100%) rename {docs => legacy/docs}/imgs/lovasz-hinge.png (100%) rename {docs => legacy/docs}/imgs/lovasz-softmax.png (100%) rename {docs => legacy/docs}/imgs/piecewise_decay_example.png (100%) rename {docs => legacy/docs}/imgs/poly_decay_example.png (100%) rename {docs => legacy/docs}/imgs/pspnet.png (100%) rename {docs => legacy/docs}/imgs/pspnet2.png (100%) rename {docs => legacy/docs}/imgs/qq_group2.png (100%) rename {docs => legacy/docs}/imgs/rangescale.png (100%) rename {docs => legacy/docs}/imgs/seg_news_icon.png (100%) rename {docs => legacy/docs}/imgs/softmax_loss.png (100%) rename {docs => legacy/docs}/imgs/unet.png (100%) rename {docs => legacy/docs}/imgs/usage_vis_demo.jpg (100%) rename {docs => legacy/docs}/imgs/visualdl_image.png (100%) rename {docs => legacy/docs}/imgs/visualdl_scalar.png (100%) rename {docs => legacy/docs}/imgs/warmup_with_poly_decay_example.png (100%) rename {docs => legacy/docs}/loss_select.md (100%) rename {docs => legacy/docs}/lovasz_loss.md (100%) rename {docs => legacy/docs}/model_export.md (100%) rename {docs => legacy/docs}/model_zoo.md (100%) rename {docs => legacy/docs}/models.md (100%) rename {docs => legacy/docs}/multiple_gpus_train_and_mixed_precision_train.md (100%) rename {docs => legacy/docs}/release_notes.md (100%) rename {docs => legacy/docs}/usage.md (100%) rename {pdseg => legacy/pdseg}/__init__.py (100%) rename {pdseg => legacy/pdseg}/check.py (100%) rename {pdseg => legacy/pdseg}/data_aug.py (100%) rename {pdseg => legacy/pdseg}/data_utils.py (100%) rename {pdseg => legacy/pdseg}/eval.py (100%) rename {pdseg => legacy/pdseg}/export_model.py (100%) rename {pdseg => legacy/pdseg}/export_serving_model.py (100%) rename {pdseg => legacy/pdseg}/loss.py (100%) rename {pdseg => legacy/pdseg}/lovasz_losses.py (100%) rename {pdseg => legacy/pdseg}/metrics.py (100%) rename {pdseg => legacy/pdseg}/models/__init__.py (100%) rename {pdseg => legacy/pdseg}/models/backbone/__init__.py (100%) rename {pdseg => legacy/pdseg}/models/backbone/mobilenet_v2.py (100%) rename {pdseg => legacy/pdseg}/models/backbone/mobilenet_v3.py (100%) rename {pdseg => legacy/pdseg}/models/backbone/resnet.py (100%) rename {pdseg => legacy/pdseg}/models/backbone/resnet_vd.py (100%) rename {pdseg => legacy/pdseg}/models/backbone/vgg.py (100%) rename {pdseg => legacy/pdseg}/models/backbone/xception.py (100%) rename {pdseg => legacy/pdseg}/models/libs/__init__.py (100%) rename {pdseg => legacy/pdseg}/models/libs/model_libs.py (100%) rename {pdseg => legacy/pdseg}/models/model_builder.py (100%) rename {pdseg => legacy/pdseg}/models/modeling/__init__.py (100%) rename {pdseg => legacy/pdseg}/models/modeling/deeplab.py (100%) rename {pdseg => legacy/pdseg}/models/modeling/fast_scnn.py (100%) rename {pdseg => legacy/pdseg}/models/modeling/hrnet.py (100%) rename {pdseg => legacy/pdseg}/models/modeling/icnet.py (100%) rename {pdseg => legacy/pdseg}/models/modeling/ocrnet.py (100%) rename {pdseg => legacy/pdseg}/models/modeling/pspnet.py (100%) rename {pdseg => legacy/pdseg}/models/modeling/unet.py (100%) rename {pdseg => legacy/pdseg}/reader.py (100%) rename {pdseg => legacy/pdseg}/solver.py (100%) rename {pdseg => legacy/pdseg}/tools/__init__.py (100%) rename {pdseg => legacy/pdseg}/tools/create_dataset_list.py (100%) rename {pdseg => legacy/pdseg}/tools/gray2pseudo_color.py (100%) rename {pdseg => legacy/pdseg}/tools/jingling2seg.py (100%) rename {pdseg => legacy/pdseg}/tools/labelme2seg.py (100%) rename {pdseg => legacy/pdseg}/train.py (100%) rename {pdseg => legacy/pdseg}/utils/__init__.py (100%) rename {pdseg => legacy/pdseg}/utils/collect.py (100%) rename {pdseg => legacy/pdseg}/utils/config.py (100%) rename {pdseg => legacy/pdseg}/utils/dist_utils.py (100%) rename {pdseg => legacy/pdseg}/utils/fp16_utils.py (100%) rename {pdseg => legacy/pdseg}/utils/load_model_utils.py (100%) rename {pdseg => legacy/pdseg}/utils/paddle_utils.py (100%) rename {pdseg => legacy/pdseg}/utils/timer.py (100%) rename {pdseg => legacy/pdseg}/vis.py (100%) rename {pretrained_model => legacy/pretrained_model}/download_model.py (100%) rename {dygraph => legacy}/requirements.txt (69%) rename {slim => legacy/slim}/distillation/README.md (100%) rename {slim => legacy/slim}/distillation/cityscape.yaml (100%) rename {slim => legacy/slim}/distillation/cityscape_teacher.yaml (100%) rename {slim => legacy/slim}/distillation/model_builder.py (100%) rename {slim => legacy/slim}/distillation/train_distill.py (100%) rename {slim => legacy/slim}/nas/README.md (100%) rename {slim => legacy/slim}/nas/deeplab.py (100%) rename {slim => legacy/slim}/nas/eval_nas.py (100%) rename {slim => legacy/slim}/nas/mobilenetv2_search_space.py (100%) rename {slim => legacy/slim}/nas/model_builder.py (100%) rename {slim => legacy/slim}/nas/train_nas.py (100%) rename {slim => legacy/slim}/prune/README.md (100%) rename {slim => legacy/slim}/prune/eval_prune.py (100%) rename {slim => legacy/slim}/prune/train_prune.py (100%) rename {slim => legacy/slim}/quantization/README.md (100%) rename {slim => legacy/slim}/quantization/deploy/README.md (100%) rename {slim => legacy/slim}/quantization/deploy/infer.py (100%) rename {slim => legacy/slim}/quantization/eval_quant.py (100%) rename {slim => legacy/slim}/quantization/export_model.py (100%) rename {slim => legacy/slim}/quantization/images/ConvertToInt8Pass.png (100%) rename {slim => legacy/slim}/quantization/images/FreezePass.png (100%) rename {slim => legacy/slim}/quantization/images/TransformForMobilePass.png (100%) rename {slim => legacy/slim}/quantization/images/TransformPass.png (100%) rename {slim => legacy/slim}/quantization/train_quant.py (100%) rename {test => legacy/test}/ci/check_code_style.sh (100%) rename {test => legacy/test}/ci/test_download_dataset.sh (100%) rename {test => legacy/test}/configs/deeplabv3p_xception65_cityscapes.yaml (100%) rename {test => legacy/test}/configs/unet_pet.yaml (100%) rename {test => legacy/test}/local_test_cityscapes.py (100%) rename {test => legacy/test}/local_test_pet.py (100%) rename {test => legacy/test}/test_utils.py (100%) rename {tutorial => legacy/tutorial}/finetune_deeplabv3plus.md (100%) rename {tutorial => legacy/tutorial}/finetune_fast_scnn.md (100%) rename {tutorial => legacy/tutorial}/finetune_hrnet.md (100%) rename {tutorial => legacy/tutorial}/finetune_icnet.md (100%) rename {tutorial => legacy/tutorial}/finetune_ocrnet.md (100%) rename {tutorial => legacy/tutorial}/finetune_pspnet.md (100%) rename {tutorial => legacy/tutorial}/finetune_unet.md (100%) rename {tutorial => legacy/tutorial}/imgs/optic.png (100%) rename {tutorial => legacy/tutorial}/imgs/optic_deeplab.png (100%) rename {tutorial => legacy/tutorial}/imgs/optic_hrnet.png (100%) rename {tutorial => legacy/tutorial}/imgs/optic_icnet.png (100%) rename {tutorial => legacy/tutorial}/imgs/optic_pspnet.png (100%) rename {tutorial => legacy/tutorial}/imgs/optic_unet.png (100%) rename {dygraph/paddleseg => paddleseg}/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/core/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/core/infer.py (100%) rename {dygraph/paddleseg => paddleseg}/core/predict.py (100%) rename {dygraph/paddleseg => paddleseg}/core/train.py (100%) rename {dygraph/paddleseg => paddleseg}/core/val.py (100%) rename {dygraph/paddleseg => paddleseg}/cvlibs/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/cvlibs/callbacks.py (100%) rename {dygraph/paddleseg => paddleseg}/cvlibs/config.py (100%) rename {dygraph/paddleseg => paddleseg}/cvlibs/manager.py (100%) rename {dygraph/paddleseg => paddleseg}/cvlibs/param_init.py (100%) rename {dygraph/paddleseg => paddleseg}/datasets/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/datasets/ade.py (100%) rename {dygraph/paddleseg => paddleseg}/datasets/cityscapes.py (100%) rename {dygraph/paddleseg => paddleseg}/datasets/dataset.py (100%) rename {dygraph/paddleseg => paddleseg}/datasets/optic_disc_seg.py (100%) rename {dygraph/paddleseg => paddleseg}/datasets/voc.py (100%) rename {dygraph/paddleseg => paddleseg}/models/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/models/ann.py (100%) rename {dygraph/paddleseg => paddleseg}/models/backbones/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/models/backbones/hrnet.py (100%) rename {dygraph/paddleseg => paddleseg}/models/backbones/mobilenetv3.py (100%) rename {dygraph/paddleseg => paddleseg}/models/backbones/resnet_vd.py (100%) rename {dygraph/paddleseg => paddleseg}/models/backbones/xception_deeplab.py (100%) rename {dygraph/paddleseg => paddleseg}/models/bisenet.py (100%) rename {dygraph/paddleseg => paddleseg}/models/danet.py (100%) rename {dygraph/paddleseg => paddleseg}/models/deeplab.py (100%) rename {dygraph/paddleseg => paddleseg}/models/fast_scnn.py (100%) rename {dygraph/paddleseg => paddleseg}/models/fcn.py (100%) rename {dygraph/paddleseg => paddleseg}/models/gcnet.py (100%) rename {dygraph/paddleseg => paddleseg}/models/layers/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/models/layers/activation.py (100%) rename {dygraph/paddleseg => paddleseg}/models/layers/layer_libs.py (100%) rename {dygraph/paddleseg => paddleseg}/models/layers/pyramid_pool.py (100%) rename {dygraph/paddleseg => paddleseg}/models/losses/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/models/losses/cross_entroy_loss.py (100%) rename {dygraph/paddleseg => paddleseg}/models/ocrnet.py (100%) rename {dygraph/paddleseg => paddleseg}/models/pspnet.py (100%) rename {dygraph/paddleseg => paddleseg}/models/unet.py (100%) rename {dygraph/paddleseg => paddleseg}/transforms/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/transforms/functional.py (100%) rename {dygraph/paddleseg => paddleseg}/transforms/transforms.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/download.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/env/__init__.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/env/seg_env.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/env/sys_env.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/logger.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/metrics.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/progbar.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/timer.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/utils.py (100%) rename {dygraph/paddleseg => paddleseg}/utils/visualize.py (100%) rename dygraph/predict.py => predict.py (100%) rename {dygraph/tools => tools}/convert_cityscapes.py (100%) rename {dygraph/tools => tools}/voc_augment.py (100%) rename dygraph/train.py => train.py (100%) rename dygraph/val.py => val.py (100%) diff --git a/README.md b/README.md index 963c23fc3a..0d3460bab8 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ English | [简体中文](README_CN.md) -# PaddleSeg +# PaddleSeg(Dynamic Graph) [![Build Status](https://travis-ci.org/PaddlePaddle/PaddleSeg.svg?branch=master)](https://travis-ci.org/PaddlePaddle/PaddleSeg) [![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) @@ -8,108 +8,78 @@ English | [简体中文](README_CN.md) ![python version](https://img.shields.io/badge/python-3.6+-orange.svg) ![support os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-yellow.svg) - *[2020-12-02] PaddleSeg has released the [dynamic graph](./dygraph) version, which supports PaddlePaddle 2.0rc. For the static graph, we only fix bugs without adding new features. See detailed [release notes](./docs/release_notes.md).* +Welcome to the dynamic version! PaddleSeg is the first development kit which supports PaddlePaddle 2.0. Currently, we provide an experimental version that allows developers to have full-featured experience on dynamic graph. In the near future, the dynamic version will be set as default, and the static one will be moved to "legacy" directory. -## Introduction +The full-detailed documents and tutorials are coming soon. So far there are minimum tutorials that help you to enjoy the strengths of dynamic version. -PaddleSeg is an end-to-end image segmentation development kit based on PaddlePaddle, which aims to help developers in the whole process of training models, optimizing performance and inference speed, and deploying models. Currently PaddleSeg supports seven efficient segmentation models, including DeepLabv3+, U-Net, ICNet, PSPNet, HRNet, Fast-SCNN, and OCRNet, which are extensively used in both academia and industry. Enjoy your Seg journey! +## Model Zoo -![demo](./docs/imgs/cityscapes.png) +|Model\Backbone|ResNet50|ResNet101|HRNetw18|HRNetw48| +|-|-|-|-|-| +|[ANN](./configs/ann)|✔|✔||| +|[BiSeNetv2](./configs/bisenet)|-|-|-|-| +|[DANet](./configs/danet)|✔|✔||| +|[Deeplabv3](./configs/deeplabv3)|✔|✔||| +|[Deeplabv3P](./configs/deeplabv3p)|✔|✔||| +|[Fast-SCNN](./configs/fastscnn)|-|-|-|-| +|[FCN](./configs/fcn)|||✔|✔| +|[GCNet](./configs/gcnet)|✔|✔||| +|[OCRNet](./configs/ocrnet/)|||✔|✔| +|[PSPNet](./configs/pspnet)|✔|✔||| +|[UNet](./configs/unet)|-|-|-|-| -## Main Features +## Dataset -- **Practical Data Augmentation Techniques** - -PaddleSeg provides 10+ data augmentation techniques, which are developed from the product-level applications in Baidu. The techniques are able to help developers improve the generalization and robustness ability of their customized models. - -- **Modular Design** - -PaddleSeg supports seven popular segmentation models, including U-Net, DeepLabv3+, ICNet, PSPNet, HRNet, Fast-SCNN, and OCRNet. Combing with different components, such as pre-trained models, adjustable backbone architectures and loss functions, developer can easily build an efficient segmentation model according to their practical performance requirements. - -- **High Performance** - -PaddleSeg supports the efficient acceleration strategies, such as multi-processing I/O operations, and multi-GPUs parallel training. Moreover, integrating GPU memory optimization techniques in the PaddlePaddle framework, PaddleSeg significantly reduces training overhead of the segmentation models, which helps developers complete the segmentation tasks in a high-efficient way. - -- **Industry-Level Deployment** - -PaddleSeg supports the industry-level deployment in both **server** and **mobile devices** with the high-performance inference engine and image processing ability, which helps developers achieve the high-performance deployment and integration of segmentation model efficiently. Particularly, using another paddle tool [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite), the segmentation models trained in PaddleSeg are able to be deployed on mobile/embedded devices quickly and easily. - -- **Rich Practical Cases** - -PaddleSeg provides rich practical cases in industry, such as human segmentation, mechanical meter segmentation, lane segmentation, remote sensing image segmentation, human parsing, and industry inspection, etc. The practical cases allow developers to get a closer look at the image segmentation area, and get more hand-on experiences on the real practice. +- [x] Cityscapes +- [x] Pascal VOC +- [x] ADE20K +- [ ] Pascal Context +- [ ] COCO stuff ## Installation -### 1. Install PaddlePaddle +1. Install PaddlePaddle System Requirements: -* PaddlePaddle >= 1.7.0 and < 2.0 -* Python >= 3.5+ +* PaddlePaddle >= 2.0.0rc +* Python >= 3.6+ -> Note: the above requirements are for the **static** graph version. If you intent to use the dynamic one, please refers to [here](./dygraph). +> Note: the above requirements are for the **dynamic** graph version. If you intent to use the static one, please refers to [here](../README.md). Highly recommend you install the GPU version of PaddlePaddle, due to large overhead of segmentation models, otherwise it could be out of memory while running the models. -For more detailed installation tutorials, please refer to the official website of [PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick)。 +For more detailed installation tutorials, please refer to the official website of [PaddlePaddle](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-beta/install/index_cn.html)。 -### 2. Download PaddleSeg + +### Download PaddleSeg ``` git clone https://github.com/PaddlePaddle/PaddleSeg ``` -### 3. Install Dependencies +### Install Dependencies Install the python dependencies via the following commands,and please make sure execute it at least once in your branch. -``` -cd PaddleSeg +```shell +cd PaddleSeg/dygraph +export PYTHONPATH=`pwd` +# Run the following one on Windows +# set PYTHONPATH=%cd% pip install -r requirements.txt ``` -## Tutorials - -For a better understanding of PaddleSeg, we provide comprehensive tutorials to show the whole process of using PaddleSeg on model training, evaluation and deployment. Besides the basic usages of PaddleSeg, the design insights will be also mentioned in the tutorials. - -### Quick Start - -* [PaddleSeg Start](./docs/usage.md) - -### Basic Usages - -* [Customized Data Preparation](./docs/data_prepare.md) -* [Scripts and Config Guide](./docs/config.md) -* [Data and Config Verification](./docs/check.md) -* [Segmentation Models](./docs/models.md) -* [Pretrained Models](./docs/model_zoo.md) -* [DeepLabv3+ Tutorial](./tutorial/finetune_deeplabv3plus.md) - -### Inference and Deployment - -* [Model Export](./docs/model_export.md) -* [Python Inference](./deploy/python/) -* [C++ Inference](./deploy/cpp/) -* [Paddle-Lite Mobile Inference & Deployment](./deploy/lite/) -* [PaddleServing Inference & Deployment](./deploy/paddle-serving) - - -### Advanced features - -* [Data Augmentation](./docs/data_aug.md) -* [Loss Functions](./docs/loss_select.md) -* [Practical Cases](./contrib) -* [Multiprocessing and Mixed-Precision Training](./docs/multiple_gpus_train_and_mixed_precision_train.md) -* Model Compression ([Quantization](./slim/quantization/README.md), [Distillation](./slim/distillation/README.md), [Pruning](./slim/prune/README.md), [NAS](./slim/nas/README.md)) - +## Quick Training +```shell +python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml +``` -### Online Tutorials +## Tutorials -We further provide a few online tutorials in Baidu AI Studio:[Get Started](https://aistudio.baidu.com/aistudio/projectdetail/100798), [U-Net](https://aistudio.baidu.com/aistudio/projectDetail/102889), [DeepLabv3+](https://aistudio.baidu.com/aistudio/projectDetail/226703), [Industry Inspection](https://aistudio.baidu.com/aistudio/projectdetail/184392), [HumanSeg](https://aistudio.baidu.com/aistudio/projectdetail/475345), [More](https://aistudio.baidu.com/aistudio/projectdetail/226710). +* [Get Started](./docs/quick_start.md) +* [Data Preparation](./docs/data_prepare.md) +* [Training Configuration](./configs/) +* [Add New Components](./docs/add_new_model.md) ## Feedbacks and Contact -* If your question is not answered properly in [FAQ](./docs/faq.md) or you have an idea on PaddleSeg, please report an issue via [Github Issues](https://github.com/PaddlePaddle/PaddleSeg/issues). +* The dynamic version is still under development, if you find any issue or have an idea on new features, please don't hesitate to contact us via [GitHub Issues](https://github.com/PaddlePaddle/PaddleSeg/issues). * PaddleSeg User Group (QQ): 850378321 or 793114768 - - -## Contributing - -All contributions and suggestions are welcomed. If you want to contribute to PaddleSeg,please summit an issue or create a pull request directly. diff --git a/README_CN.md b/README_CN.md index c953b3cca0..084f5ad903 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,230 +1,74 @@ 简体中文 | [English](README.md) -# PaddleSeg +# PaddleSeg(动态图版本) + +本目录提供了PaddleSeg的动态图版本,目前已经完成了模型训练、评估、数据处理等功能,在未来的版本中,PaddleSeg将会启动默认的动态图模式。目前该目录处于实验阶段,如果您在使用过程中遇到任何问题,请通过issue反馈给我们,我们将会在第一时间跟进处理。 + +## 模型库 + +|模型\骨干网络|ResNet50|ResNet101|HRNetw18|HRNetw48| +|-|-|-|-|-| +|[ANN](./configs/ann)|✔|✔||| +|[BiSeNetv2](./configs/bisenet)|-|-|-|-| +|[DANet](./configs/danet)|✔|✔||| +|[Deeplabv3](./configs/deeplabv3)|✔|✔||| +|[Deeplabv3P](./configs/deeplabv3p)|✔|✔||| +|[Fast-SCNN](./configs/fastscnn)|-|-|-|-| +|[FCN](./configs/fcn)|||✔|✔| +|[GCNet](./configs/gcnet)|✔|✔||| +|[OCRNet](./configs/ocrnet/)|||✔|✔| +|[PSPNet](./configs/pspnet)|✔|✔||| +|[UNet](./configs/unet)|-|-|-|-| + +## 数据集 + +- [x] Cityscapes +- [x] Pascal VOC +- [x] ADE20K +- [ ] Pascal Context +- [ ] COCO stuff -[![Build Status](https://travis-ci.org/PaddlePaddle/PaddleSeg.svg?branch=master)](https://travis-ci.org/PaddlePaddle/PaddleSeg) -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) -[![Version](https://img.shields.io/github/release/PaddlePaddle/PaddleSeg.svg)](https://github.com/PaddlePaddle/PaddleSeg/releases) -![python version](https://img.shields.io/badge/python-3.6+-orange.svg) -![support os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-yellow.svg) - - *[2020-12-02] PaddleSeg已经发布了全新的[动态图版本](./dygraph),全面适配 PaddlePaddle 2.0rc, 静态图版本只作维护不再添加新功能,更多信息请查看详细[更新日志](./docs/release_notes.md).* - -## 简介 - -PaddleSeg是基于[PaddlePaddle](https://www.paddlepaddle.org.cn)开发的端到端图像分割开发套件,覆盖了DeepLabv3+, U-Net, ICNet, PSPNet, HRNet, Fast-SCNN等主流分割网络。通过模块化的设计,以配置化方式驱动模型组合,帮助开发者更便捷地完成从训练到部署的全流程图像分割应用。 - -- [特点](#特点) -- [安装](#安装) -- [使用教程](#使用教程) - - [快速入门](#快速入门) - - [基础功能](#基础功能) - - [预测部署](#预测部署) - - [高级功能](#高级功能) -- [在线体验](#在线体验) -- [FAQ](#FAQ) -- [交流与反馈](#交流与反馈) -- [更新日志](#更新日志) -- [贡献代码](#贡献代码) - -## 特点 - -- **丰富的数据增强** - -基于百度视觉技术部的实际业务经验,内置10+种数据增强策略,可结合实际业务场景进行定制组合,提升模型泛化能力和鲁棒性。 - -- **模块化设计** - -支持U-Net, DeepLabv3+, ICNet, PSPNet, HRNet, Fast-SCNN六种主流分割网络,结合预训练模型和可调节的骨干网络,满足不同性能和精度的要求;选择不同的损失函数如Dice Loss, Lovasz Loss等方式可以强化小目标和不均衡样本场景下的分割精度。 - -- **高性能** - -PaddleSeg支持多进程I/O、多卡并行等训练加速策略,结合飞桨核心框架的显存优化功能,可大幅度减少分割模型的显存开销,让开发者更低成本、更高效地完成图像分割训练。 - -- **工业级部署** - -全面提供**服务端**和**移动端**的工业级部署能力,依托飞桨高性能推理引擎和高性能图像处理实现,开发者可以轻松完成高性能的分割模型部署和集成。通过[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite),可以在移动设备或者嵌入式设备上完成轻量级、高性能的人像分割模型部署。 +## 安装 -- **产业实践案例** +1. 安装PaddlePaddle -PaddleSeg提供丰富地产业实践案例,如[人像分割](./contrib/HumanSeg)、[工业表计检测](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib#%E5%B7%A5%E4%B8%9A%E8%A1%A8%E7%9B%98%E5%88%86%E5%89%B2)、[遥感分割](./contrib/RemoteSensing)、[人体解析](contrib/ACE2P),[工业质检](https://aistudio.baidu.com/aistudio/projectdetail/184392)等产业实践案例,助力开发者更便捷地落地图像分割技术。 +版本要求 -## 安装 +* PaddlePaddle >= 2.0.0rc -### 1. 安装PaddlePaddle +* Python >= 3.6+ -版本要求 -* PaddlePaddle >= 1.7.0 and < 2.0 -* Python >= 3.5+ +由于图像分割模型计算开销大,推荐在GPU版本的PaddlePaddle下使用PaddleSeg。推荐安装10.0以上的CUDA环境。 -由于图像分割模型计算开销大,推荐在GPU版本的PaddlePaddle下使用PaddleSeg. -安装教程请见[PaddlePaddle官网](https://www.paddlepaddle.org.cn/install/quick)。 +安装教程请见[PaddlePaddle官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-beta/install/index_cn.html)。 -### 2. 下载PaddleSeg代码 -``` +2. 下载PaddleSeg代码 +```shell git clone https://github.com/PaddlePaddle/PaddleSeg ``` -### 3. 安装PaddleSeg依赖 +3. 安装PaddleSeg依赖 通过以下命令安装python包依赖,请确保在该分支上至少执行过一次以下命令: -``` -cd PaddleSeg -pip install -r requirements.txt -``` - -## 使用教程 - -我们提供了一系列的使用教程,来说明如何使用PaddleSeg完成语义分割模型的训练、评估、部署。 - -这一系列的文档被分为**快速入门**、**基础功能**、**预测部署**、**高级功能**四个部分,四个教程由浅至深地介绍PaddleSeg的设计思路和使用方法。 - -### 快速入门 - -* [PaddleSeg快速入门](./docs/usage.md) - -### 基础功能 - -* [自定义数据的标注与准备](./docs/data_prepare.md) -* [脚本使用和配置说明](./docs/config.md) -* [数据和配置校验](./docs/check.md) -* [分割模型介绍](./docs/models.md) -* [预训练模型下载](./docs/model_zoo.md) -* [DeepLabv3+模型使用教程](./tutorial/finetune_deeplabv3plus.md) -* [U-Net模型使用教程](./tutorial/finetune_unet.md) -* [ICNet模型使用教程](./tutorial/finetune_icnet.md) -* [PSPNet模型使用教程](./tutorial/finetune_pspnet.md) -* [HRNet模型使用教程](./tutorial/finetune_hrnet.md) -* [Fast-SCNN模型使用教程](./tutorial/finetune_fast_scnn.md) -* [OCRNet模型使用教程](./tutorial/finetune_ocrnet.md) - -### 预测部署 - -* [模型导出](./docs/model_export.md) -* [Python预测](./deploy/python/) -* [C++预测](./deploy/cpp/) -* [Paddle-Lite移动端预测部署](./deploy/lite/) -* [PaddleServing预测部署](./deploy/paddle-serving) - - -### 高级功能 - -* [PaddleSeg的数据增强](./docs/data_aug.md) -* [PaddleSeg的loss选择](./docs/loss_select.md) -* [PaddleSeg产业实践](./contrib) -* [多进程训练和混合精度训练](./docs/multiple_gpus_train_and_mixed_precision_train.md) -* 使用PaddleSlim进行分割模型压缩([量化](./slim/quantization/README.md), [蒸馏](./slim/distillation/README.md), [剪枝](./slim/prune/README.md), [搜索](./slim/nas/README.md)) -## 在线体验 - -我们在AI Studio平台上提供了在线体验的教程,欢迎体验: -|在线教程|链接| -|-|-| -|快速开始|[点击体验](https://aistudio.baidu.com/aistudio/projectdetail/100798)| -|U-Net图像分割|[点击体验](https://aistudio.baidu.com/aistudio/projectDetail/102889)| -|DeepLabv3+图像分割|[点击体验](https://aistudio.baidu.com/aistudio/projectDetail/226703)| -|工业质检(零件瑕疵检测)|[点击体验](https://aistudio.baidu.com/aistudio/projectdetail/184392)| -|人像分割|[点击体验](https://aistudio.baidu.com/aistudio/projectdetail/475345)| -|PaddleSeg特色垂类模型|[点击体验](https://aistudio.baidu.com/aistudio/projectdetail/226710)| -## FAQ - -#### Q: 安装requirements.txt指定的依赖包时,部分包提示找不到? - -A: 可能是pip源的问题,这种情况下建议切换为官方源,或者通过`pip install -r requirements.txt -i `指定其他源地址。 - -#### Q:图像分割的数据增强如何配置,Unpadding, StepScaling, RangeScaling的原理是什么? - -A: 更详细数据增强文档可以参考[数据增强](./docs/data_aug.md) - -#### Q: 训练时因为某些原因中断了,如何恢复训练? - -A: 启动训练脚本时通过命令行覆盖TRAIN.RESUME_MODEL_DIR配置为模型checkpoint目录即可, 以下代码示例第100轮重新恢复训练: -``` -python pdseg/train.py --cfg xxx.yaml TRAIN.RESUME_MODEL_DIR /PATH/TO/MODEL_CKPT/100 +```shell +cd PaddleSeg/dygraph +export PYTHONPATH=`pwd` +# windows下请执行以下命令 +# set PYTHONPATH=%cd% +pip install -r requirements.txt ``` -#### Q: 预测时图片过大,导致显存不足如何处理? - -A: 降低Batch size,使用Group Norm策略;请注意训练过程中当`DEFAULT_NORM_TYPE`选择`bn`时,为了Batch Norm计算稳定性,batch size需要满足>=2 - - -## 交流与反馈 -* 欢迎您通过[Github Issues](https://github.com/PaddlePaddle/PaddleSeg/issues)来提交问题、报告与建议 -* 微信公众号:飞桨PaddlePaddle -* QQ群: 703252161 - -

     

-

   微信公众号                官方技术交流QQ群

- -## 更新日志 - -* 2020.10.28 - - **`v0.7.0`** - * 全面支持Paddle2.0-rc动态图模式,推出PaddleSeg[动态图体验版](./dygraph/) - * 发布大量动态图模型,支持11个分割模型,4个骨干网络,3个数据集: - * 分割模型:ANN, BiSeNetV2, DANet, DeeplabV3, DeeplabV3+, FCN, FastSCNN, GCNet, OCRNet, PSPNet, UNet - * 骨干网络:ResNet, HRNet, MobileNetV3, Xception - * 数据集:Cityscapes, ADE20K, Pascal VOC - - * 提供高精度骨干网络预训练模型以及基于Cityscapes数据集的语义分割[预训练模型](./dygraph/configs/)。Cityscapes精度超过**82%**。 - - -* 2020.08.31 - - **`v0.6.0`** - * 丰富Deeplabv3p网络结构,新增ResNet-vd、MobileNetv3两种backbone,满足高性能与高精度场景,并提供基于Cityscapes和ImageNet的[预训练模型](./docs/model_zoo.md)4个。 - * 新增高精度分割模型OCRNet,支持以HRNet作为backbone,提供基于Cityscapes的[预训练模型](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/docs/model_zoo.md#cityscapes%E9%A2%84%E8%AE%AD%E7%BB%83%E6%A8%A1%E5%9E%8B),mIoU超过80%。 - * 新增proposal free的实例分割模型[Spatial Embedding](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/SpatialEmbeddings),性能与精度均超越MaskRCNN。提供了基于kitti的预训练模型。 - -* 2020.05.12 - - **`v0.5.0`** - * 全面升级[HumanSeg人像分割模型](./contrib/HumanSeg),新增超轻量级人像分割模型HumanSeg-lite支持移动端实时人像分割处理,并提供基于光流的视频分割后处理提升分割流畅性。 - * 新增[气象遥感分割方案](./contrib/RemoteSensing),支持积雪识别、云检测等气象遥感场景。 - * 新增[Lovasz Loss](docs/lovasz_loss.md),解决数据类别不均衡问题。 - * 使用VisualDL 2.0作为训练可视化工具 - -* 2020.02.25 - - **`v0.4.0`** - * 新增适用于实时场景且不需要预训练模型的分割网络Fast-SCNN,提供基于Cityscapes的[预训练模型](./docs/model_zoo.md)1个 - * 新增LaneNet车道线检测网络,提供[预训练模型](https://github.com/PaddlePaddle/PaddleSeg/tree/release/v0.4.0/contrib/LaneNet#%E4%B8%83-%E5%8F%AF%E8%A7%86%E5%8C%96)一个 - * 新增基于PaddleSlim的分割库压缩策略([量化](./slim/quantization/README.md), [蒸馏](./slim/distillation/README.md), [剪枝](./slim/prune/README.md), [搜索](./slim/nas/README.md)) - - -* 2019.12.15 - - **`v0.3.0`** - * 新增HRNet分割网络,提供基于cityscapes和ImageNet的[预训练模型](./docs/model_zoo.md)8个 - * 支持使用[伪彩色标签](./docs/data_prepare.md#%E7%81%B0%E5%BA%A6%E6%A0%87%E6%B3%A8vs%E4%BC%AA%E5%BD%A9%E8%89%B2%E6%A0%87%E6%B3%A8)进行训练/评估/预测,提升训练体验,并提供将灰度标注图转为伪彩色标注图的脚本 - * 新增[学习率warmup](./docs/configs/solver_group.md#lr_warmup)功能,支持与不同的学习率Decay策略配合使用 - * 新增图像归一化操作的GPU化实现,进一步提升预测速度。 - * 新增Python部署方案,更低成本完成工业级部署。 - * 新增Paddle-Lite移动端部署方案,支持人像分割模型的移动端部署。 - * 新增不同分割模型的预测[性能数据Benchmark](./deploy/python/docs/PaddleSeg_Infer_Benchmark.md), 便于开发者提供模型选型性能参考。 - - -* 2019.11.04 - - **`v0.2.0`** - * 新增PSPNet分割网络,提供基于COCO和cityscapes数据集的[预训练模型](./docs/model_zoo.md)4个。 - * 新增Dice Loss、BCE Loss以及组合Loss配置,支持样本不均衡场景下的[模型优化](./docs/loss_select.md)。 - * 支持[FP16混合精度训练](./docs/multiple_gpus_train_and_mixed_precision_train.md)以及动态Loss Scaling,在不损耗精度的情况下,训练速度提升30%+。 - * 支持[PaddlePaddle多卡多进程训练](./docs/multiple_gpus_train_and_mixed_precision_train.md),多卡训练时训练速度提升15%+。 - * 发布基于UNet的[工业标记表盘分割模型](./contrib#%E5%B7%A5%E4%B8%9A%E7%94%A8%E8%A1%A8%E5%88%86%E5%89%B2)。 - -* 2019.09.10 - - **`v0.1.0`** - * PaddleSeg分割库初始版本发布,包含DeepLabv3+, U-Net, ICNet三类分割模型, 其中DeepLabv3+支持Xception, MobileNet v2两种可调节的骨干网络。 - * CVPR19 LIP人体部件分割比赛冠军预测模型发布[ACE2P](./contrib/ACE2P)。 - * 预置基于DeepLabv3+网络的[人像分割](./contrib/HumanSeg/)和[车道线分割](./contrib/RoadLine)预测模型发布。 - -
+## 训练 +```shell +python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml +``` -## 贡献代码 +## 使用教程 -我们非常欢迎您为PaddleSeg贡献代码或者提供使用建议。如果您可以修复某个issue或者增加一个新功能,欢迎给我们提交Pull Requests. +* [快速入门](./docs/quick_start.md) +* [数据集准备](./docs/data_prepare.md) +* [配置项](./configs/) +* [Add New Components](./docs/add_new_model.md) diff --git a/dygraph/benchmark/deeplabv3p.yml b/benchmark/deeplabv3p.yml similarity index 100% rename from dygraph/benchmark/deeplabv3p.yml rename to benchmark/deeplabv3p.yml diff --git a/dygraph/benchmark/hrnet.yml b/benchmark/hrnet.yml similarity index 100% rename from dygraph/benchmark/hrnet.yml rename to benchmark/hrnet.yml diff --git a/dygraph/configs/README.md b/configs/README.md similarity index 100% rename from dygraph/configs/README.md rename to configs/README.md diff --git a/dygraph/configs/_base_/ade20k.yml b/configs/_base_/ade20k.yml similarity index 100% rename from dygraph/configs/_base_/ade20k.yml rename to configs/_base_/ade20k.yml diff --git a/dygraph/configs/_base_/cityscapes.yml b/configs/_base_/cityscapes.yml similarity index 100% rename from dygraph/configs/_base_/cityscapes.yml rename to configs/_base_/cityscapes.yml diff --git a/dygraph/configs/_base_/cityscapes_1024x1024.yml b/configs/_base_/cityscapes_1024x1024.yml similarity index 100% rename from dygraph/configs/_base_/cityscapes_1024x1024.yml rename to configs/_base_/cityscapes_1024x1024.yml diff --git a/dygraph/configs/_base_/cityscapes_769x769.yml b/configs/_base_/cityscapes_769x769.yml similarity index 100% rename from dygraph/configs/_base_/cityscapes_769x769.yml rename to configs/_base_/cityscapes_769x769.yml diff --git a/dygraph/configs/_base_/pascal_voc12.yml b/configs/_base_/pascal_voc12.yml similarity index 100% rename from dygraph/configs/_base_/pascal_voc12.yml rename to configs/_base_/pascal_voc12.yml diff --git a/dygraph/configs/_base_/pascal_voc12aug.yml b/configs/_base_/pascal_voc12aug.yml similarity index 100% rename from dygraph/configs/_base_/pascal_voc12aug.yml rename to configs/_base_/pascal_voc12aug.yml diff --git a/dygraph/configs/ann/README.md b/configs/ann/README.md similarity index 100% rename from dygraph/configs/ann/README.md rename to configs/ann/README.md diff --git a/dygraph/configs/ann/ann_resnet101_os8_cityscapes_1024x512_80k.yml b/configs/ann/ann_resnet101_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/ann/ann_resnet101_os8_cityscapes_1024x512_80k.yml rename to configs/ann/ann_resnet101_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/ann/ann_resnet101_os8_voc12aug_512x512_40k.yml b/configs/ann/ann_resnet101_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/ann/ann_resnet101_os8_voc12aug_512x512_40k.yml rename to configs/ann/ann_resnet101_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/ann/ann_resnet50_os8_cityscapes_1024x512_80k.yml b/configs/ann/ann_resnet50_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/ann/ann_resnet50_os8_cityscapes_1024x512_80k.yml rename to configs/ann/ann_resnet50_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/ann/ann_resnet50_os8_voc12aug_512x512_40k.yml b/configs/ann/ann_resnet50_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/ann/ann_resnet50_os8_voc12aug_512x512_40k.yml rename to configs/ann/ann_resnet50_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/bisenet/README.md b/configs/bisenet/README.md similarity index 100% rename from dygraph/configs/bisenet/README.md rename to configs/bisenet/README.md diff --git a/dygraph/configs/bisenet/bisenet_cityscapes_1024x1024_160k.yml b/configs/bisenet/bisenet_cityscapes_1024x1024_160k.yml similarity index 100% rename from dygraph/configs/bisenet/bisenet_cityscapes_1024x1024_160k.yml rename to configs/bisenet/bisenet_cityscapes_1024x1024_160k.yml diff --git a/dygraph/configs/danet/README.md b/configs/danet/README.md similarity index 100% rename from dygraph/configs/danet/README.md rename to configs/danet/README.md diff --git a/dygraph/configs/danet/danet_resnet101_os8_cityscapes_1024x512_80k.yml b/configs/danet/danet_resnet101_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/danet/danet_resnet101_os8_cityscapes_1024x512_80k.yml rename to configs/danet/danet_resnet101_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/danet/danet_resnet50_os8_cityscapes_1024x512_80k.yml b/configs/danet/danet_resnet50_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/danet/danet_resnet50_os8_cityscapes_1024x512_80k.yml rename to configs/danet/danet_resnet50_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/deeplabv3/README.md b/configs/deeplabv3/README.md similarity index 100% rename from dygraph/configs/deeplabv3/README.md rename to configs/deeplabv3/README.md diff --git a/dygraph/configs/deeplabv3/deeplabv3_resnet101_os8_cityscapes_1024x512_80k.yml b/configs/deeplabv3/deeplabv3_resnet101_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/deeplabv3/deeplabv3_resnet101_os8_cityscapes_1024x512_80k.yml rename to configs/deeplabv3/deeplabv3_resnet101_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/deeplabv3/deeplabv3_resnet101_os8_voc12aug_512x512_40k.yml b/configs/deeplabv3/deeplabv3_resnet101_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/deeplabv3/deeplabv3_resnet101_os8_voc12aug_512x512_40k.yml rename to configs/deeplabv3/deeplabv3_resnet101_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/deeplabv3/deeplabv3_resnet50_os8_cityscapes_1024x512_80k.yml b/configs/deeplabv3/deeplabv3_resnet50_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/deeplabv3/deeplabv3_resnet50_os8_cityscapes_1024x512_80k.yml rename to configs/deeplabv3/deeplabv3_resnet50_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/deeplabv3/deeplabv3_resnet50_os8_voc12aug_512x512_40k.yml b/configs/deeplabv3/deeplabv3_resnet50_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/deeplabv3/deeplabv3_resnet50_os8_voc12aug_512x512_40k.yml rename to configs/deeplabv3/deeplabv3_resnet50_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/deeplabv3p/README.md b/configs/deeplabv3p/README.md similarity index 100% rename from dygraph/configs/deeplabv3p/README.md rename to configs/deeplabv3p/README.md diff --git a/dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_1024x512_80k.yml b/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_1024x512_80k.yml rename to configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml b/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml similarity index 100% rename from dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml rename to configs/deeplabv3p/deeplabv3p_resnet101_os8_cityscapes_769x769_80k.yml diff --git a/dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_voc12aug_512x512_40k.yml b/configs/deeplabv3p/deeplabv3p_resnet101_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/deeplabv3p/deeplabv3p_resnet101_os8_voc12aug_512x512_40k.yml rename to configs/deeplabv3p/deeplabv3p_resnet101_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/deeplabv3p/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml b/configs/deeplabv3p/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/deeplabv3p/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml rename to configs/deeplabv3p/deeplabv3p_resnet50_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml b/configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml rename to configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/fastscnn/README.md b/configs/fastscnn/README.md similarity index 100% rename from dygraph/configs/fastscnn/README.md rename to configs/fastscnn/README.md diff --git a/dygraph/configs/fastscnn/fastscnn_cityscapes_1024x1024_160k.yml b/configs/fastscnn/fastscnn_cityscapes_1024x1024_160k.yml similarity index 100% rename from dygraph/configs/fastscnn/fastscnn_cityscapes_1024x1024_160k.yml rename to configs/fastscnn/fastscnn_cityscapes_1024x1024_160k.yml diff --git a/dygraph/configs/fcn/README.md b/configs/fcn/README.md similarity index 100% rename from dygraph/configs/fcn/README.md rename to configs/fcn/README.md diff --git a/dygraph/configs/fcn/fcn_hrnetw18_cityscapes_1024x512_80k.yml b/configs/fcn/fcn_hrnetw18_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/fcn/fcn_hrnetw18_cityscapes_1024x512_80k.yml rename to configs/fcn/fcn_hrnetw18_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml b/configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml rename to configs/fcn/fcn_hrnetw18_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/fcn/fcn_hrnetw48_cityscapes_1024x512_80k.yml b/configs/fcn/fcn_hrnetw48_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/fcn/fcn_hrnetw48_cityscapes_1024x512_80k.yml rename to configs/fcn/fcn_hrnetw48_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml b/configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml rename to configs/fcn/fcn_hrnetw48_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/gcnet/README.md b/configs/gcnet/README.md similarity index 100% rename from dygraph/configs/gcnet/README.md rename to configs/gcnet/README.md diff --git a/dygraph/configs/gcnet/gcnet_resnet101_os8_cityscapes_1024x512_80k.yml b/configs/gcnet/gcnet_resnet101_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/gcnet/gcnet_resnet101_os8_cityscapes_1024x512_80k.yml rename to configs/gcnet/gcnet_resnet101_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml b/configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml rename to configs/gcnet/gcnet_resnet101_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/gcnet/gcnet_resnet50_os8_cityscapes_1024x512_80k.yml b/configs/gcnet/gcnet_resnet50_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/gcnet/gcnet_resnet50_os8_cityscapes_1024x512_80k.yml rename to configs/gcnet/gcnet_resnet50_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml b/configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml rename to configs/gcnet/gcnet_resnet50_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/ocrnet/README.md b/configs/ocrnet/README.md similarity index 100% rename from dygraph/configs/ocrnet/README.md rename to configs/ocrnet/README.md diff --git a/dygraph/configs/ocrnet/ocrnet_hrnetw18_cityscapes_1024x512_160k.yml b/configs/ocrnet/ocrnet_hrnetw18_cityscapes_1024x512_160k.yml similarity index 100% rename from dygraph/configs/ocrnet/ocrnet_hrnetw18_cityscapes_1024x512_160k.yml rename to configs/ocrnet/ocrnet_hrnetw18_cityscapes_1024x512_160k.yml diff --git a/dygraph/configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml b/configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml rename to configs/ocrnet/ocrnet_hrnetw18_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/ocrnet/ocrnet_hrnetw48_cityscapes_1024x512_160k.yml b/configs/ocrnet/ocrnet_hrnetw48_cityscapes_1024x512_160k.yml similarity index 100% rename from dygraph/configs/ocrnet/ocrnet_hrnetw48_cityscapes_1024x512_160k.yml rename to configs/ocrnet/ocrnet_hrnetw48_cityscapes_1024x512_160k.yml diff --git a/dygraph/configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml b/configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml rename to configs/ocrnet/ocrnet_hrnetw48_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/pspnet/README.md b/configs/pspnet/README.md similarity index 100% rename from dygraph/configs/pspnet/README.md rename to configs/pspnet/README.md diff --git a/dygraph/configs/pspnet/pspnet_resnet101_os8_cityscapes_1024x512_80k.yml b/configs/pspnet/pspnet_resnet101_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/pspnet/pspnet_resnet101_os8_cityscapes_1024x512_80k.yml rename to configs/pspnet/pspnet_resnet101_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/pspnet/pspnet_resnet101_os8_voc12aug_512x512_40k.yml b/configs/pspnet/pspnet_resnet101_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/pspnet/pspnet_resnet101_os8_voc12aug_512x512_40k.yml rename to configs/pspnet/pspnet_resnet101_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/pspnet/pspnet_resnet50_os8_cityscapes_1024x512_80k.yml b/configs/pspnet/pspnet_resnet50_os8_cityscapes_1024x512_80k.yml similarity index 100% rename from dygraph/configs/pspnet/pspnet_resnet50_os8_cityscapes_1024x512_80k.yml rename to configs/pspnet/pspnet_resnet50_os8_cityscapes_1024x512_80k.yml diff --git a/dygraph/configs/pspnet/pspnet_resnet50_os8_voc12aug_512x512_40k.yml b/configs/pspnet/pspnet_resnet50_os8_voc12aug_512x512_40k.yml similarity index 100% rename from dygraph/configs/pspnet/pspnet_resnet50_os8_voc12aug_512x512_40k.yml rename to configs/pspnet/pspnet_resnet50_os8_voc12aug_512x512_40k.yml diff --git a/dygraph/configs/quick_start/bisenet_optic_disc_512x512_1k.yml b/configs/quick_start/bisenet_optic_disc_512x512_1k.yml similarity index 100% rename from dygraph/configs/quick_start/bisenet_optic_disc_512x512_1k.yml rename to configs/quick_start/bisenet_optic_disc_512x512_1k.yml diff --git a/dygraph/configs/unet/README.md b/configs/unet/README.md similarity index 100% rename from dygraph/configs/unet/README.md rename to configs/unet/README.md diff --git a/dygraph/configs/unet/unet_cityscapes_1024x512_160k.yml b/configs/unet/unet_cityscapes_1024x512_160k.yml similarity index 100% rename from dygraph/configs/unet/unet_cityscapes_1024x512_160k.yml rename to configs/unet/unet_cityscapes_1024x512_160k.yml diff --git a/dygraph/contrib/remote_sensing/README.md b/contrib/remote_sensing/README.md similarity index 100% rename from dygraph/contrib/remote_sensing/README.md rename to contrib/remote_sensing/README.md diff --git a/dygraph/contrib/remote_sensing/fcn_hrnetw48_ccf_256x256_160k.yml b/contrib/remote_sensing/fcn_hrnetw48_ccf_256x256_160k.yml similarity index 100% rename from dygraph/contrib/remote_sensing/fcn_hrnetw48_ccf_256x256_160k.yml rename to contrib/remote_sensing/fcn_hrnetw48_ccf_256x256_160k.yml diff --git a/dygraph/contrib/remote_sensing/ocrnet_hrnetw48_ccf_256x256_80k.yml b/contrib/remote_sensing/ocrnet_hrnetw48_ccf_256x256_80k.yml similarity index 100% rename from dygraph/contrib/remote_sensing/ocrnet_hrnetw48_ccf_256x256_80k.yml rename to contrib/remote_sensing/ocrnet_hrnetw48_ccf_256x256_80k.yml diff --git a/dygraph/docs/add_new_model.md b/docs/add_new_model.md similarity index 100% rename from dygraph/docs/add_new_model.md rename to docs/add_new_model.md diff --git a/dygraph/docs/apis/README.md b/docs/apis/README.md similarity index 100% rename from dygraph/docs/apis/README.md rename to docs/apis/README.md diff --git a/dygraph/docs/apis/backbones.md b/docs/apis/backbones.md similarity index 100% rename from dygraph/docs/apis/backbones.md rename to docs/apis/backbones.md diff --git a/dygraph/docs/apis/core.md b/docs/apis/core.md similarity index 100% rename from dygraph/docs/apis/core.md rename to docs/apis/core.md diff --git a/dygraph/docs/apis/cvlibs.md b/docs/apis/cvlibs.md similarity index 100% rename from dygraph/docs/apis/cvlibs.md rename to docs/apis/cvlibs.md diff --git a/dygraph/docs/apis/datasets.md b/docs/apis/datasets.md similarity index 100% rename from dygraph/docs/apis/datasets.md rename to docs/apis/datasets.md diff --git a/dygraph/docs/apis/models.md b/docs/apis/models.md similarity index 100% rename from dygraph/docs/apis/models.md rename to docs/apis/models.md diff --git a/dygraph/docs/apis/transforms.md b/docs/apis/transforms.md similarity index 100% rename from dygraph/docs/apis/transforms.md rename to docs/apis/transforms.md diff --git a/dygraph/docs/apis/utils.md b/docs/apis/utils.md similarity index 100% rename from dygraph/docs/apis/utils.md rename to docs/apis/utils.md diff --git a/docs/data_prepare.md b/docs/data_prepare.md index de1fd7965c..b05665301f 100644 --- a/docs/data_prepare.md +++ b/docs/data_prepare.md @@ -1,175 +1,100 @@ -# PaddleSeg 数据准备 - -## 数据标注 - -### 标注协议 -PaddleSeg采用单通道的标注图片,每一种像素值代表一种类别,像素标注类别需要从0开始递增,例如0,1,2,3表示有4种类别。 - -**NOTE:** 标注图像请使用PNG无损压缩格式的图片。标注类别最多为256类。 - -### 灰度标注vs伪彩色标注 -一般的分割库使用单通道灰度图作为标注图片,往往显示出来是全黑的效果。灰度标注图的弊端: -1. 对图像标注后,无法直接观察标注是否正确。 -2. 模型测试过程无法直接判断分割的实际效果。 - -**PaddleSeg支持伪彩色图作为标注图片,在原来的单通道图片基础上,注入调色板。在基本不增加图片大小的基础上,却可以显示出彩色的效果。** - -同时PaddleSeg也兼容灰度图标注,用户原来的灰度数据集可以不做修改,直接使用。 -![](./imgs/annotation/image-11.png) - -### 灰度标注转换为伪彩色标注 -如果用户需要转换成伪彩色标注图,可使用我们的转换工具。适用于以下两种常见的情况: -1. 如果您希望将指定目录下的所有灰度标注图转换为伪彩色标注图,则执行以下命令,指定灰度标注所在的目录即可。 -```buildoutcfg -python pdseg/tools/gray2pseudo_color.py -``` - -|参数|用途| -|-|-| -|dir_or_file|指定灰度标注所在目录| -|output_dir|彩色标注图片的输出目录| - -2. 如果您仅希望将指定数据集中的部分灰度标注图转换为伪彩色标注图,则执行以下命令,需要已有文件列表,按列表读取指定图片。 -```buildoutcfg -python pdseg/tools/gray2pseudo_color.py --dataset_dir --file_separator -``` -|参数|用途| -|-|-| -|dir_or_file|指定文件列表路径| -|output_dir|彩色标注图片的输出目录| -|--dataset_dir|数据集所在根目录| -|--file_separator|文件列表分隔符| - -### 标注教程 -用户需预先采集好用于训练、评估和测试的图片,然后使用数据标注工具完成数据标注。 - -PddleSeg已支持2种标注工具:LabelMe、精灵数据标注工具。标注教程如下: - -- [LabelMe标注教程](annotation/labelme2seg.md) -- [精灵数据标注工具教程](annotation/jingling2seg.md) - - -## 文件列表 - -### 文件列表规范 - -PaddleSeg采用通用的文件列表方式组织训练集、验证集和测试集。在训练、评估、可视化过程前必须准备好相应的文件列表。 - -文件列表组织形式如下 -``` -原始图片路径 [SEP] 标注图片路径 -``` - -其中`[SEP]`是文件路径分割符,可以在`DATASET.SEPARATOR`配置项中修改, 默认为空格。文件列表的路径以数据集根目录作为相对路径起始点,`DATASET.DATA_DIR`即为数据集根目录。 - -如下图所示,左边为原图的图片路径,右边为图片对应的标注路径。 - -![cityscapes_filelist](./imgs/file_list.png) - -**注意事项** - -* 务必保证分隔符在文件列表中每行只存在一次, 如文件名中存在空格,请使用"|"等文件名不可用字符进行切分 - -* 文件列表请使用**UTF-8**格式保存, PaddleSeg默认使用UTF-8编码读取file_list文件 - -若数据集缺少标注图片,则文件列表不用包含分隔符和标注图片路径,如下图所示。 - -![cityscapes_filelist](./imgs/file_list2.png) - -**注意事项** - -此时的文件列表仅可在调用`pdseg/vis.py`进行可视化展示时使用, -即仅可在`DATASET.TEST_FILE_LIST`和`DATASET.VIS_FILE_LIST`配置项中使用。 -不可在`DATASET.TRAIN_FILE_LIST`和`DATASET.VAL_FILE_LIST`配置项中使用。 - - -**符合规范的文件列表是什么样的呢?** - -请参考目录[`./docs/annotation/cityscapes_demo`](../docs/annotation/cityscapes_demo/)。 - -### 数据集目录结构整理 - -如果用户想要生成数据集的文件列表,需要整理成如下的目录结构(类似于Cityscapes数据集): - -``` -./dataset/ # 数据集根目录 -├── annotations # 标注目录 -│   ├── test -│   │   ├── ... -│   │   └── ... -│   ├── train -│   │   ├── ... -│   │   └── ... -│   └── val -│   ├── ... -│   └── ... -└── images # 原图目录 - ├── test - │   ├── ... - │   └── ... - ├── train - │   ├── ... - │   └── ... - └── val - ├── ... - └── ... -Note:以上目录名可任意 -``` - -### 文件列表生成 -PaddleSeg提供了生成文件列表的使用脚本,可适用于自定义数据集或cityscapes数据集,并支持通过不同的Flags来开启特定功能。 -``` -python pdseg/tools/create_dataset_list.py ${FLAGS} -``` -运行后将在数据集根目录下生成训练/验证/测试集的文件列表(文件主名与`--second_folder`一致,扩展名为`.txt`)。 - -**Note:** 生成文件列表要求:要么原图和标注图片数量一致,要么只有原图,没有标注图片。若数据集缺少标注图片,仍可自动生成不含分隔符和标注图片路径的文件列表。 - -#### 命令行FLAGS列表 - -|FLAG|用途|默认值|参数数目| -|-|-|-|-| -|--type|指定数据集类型,`cityscapes`或`自定义`|`自定义`|1| -|--separator|文件列表分隔符|"|"|1| -|--folder|图片和标签集的文件夹名|"images" "annotations"|2| -|--second_folder|训练/验证/测试集的文件夹名|"train" "val" "test"|若干| -|--format|图片和标签集的数据格式|"jpg" "png"|2| -|--postfix|按文件主名(无扩展名)是否包含指定后缀对图片和标签集进行筛选|"" ""(2个空字符)|2| - -#### 使用示例 -- **对于自定义数据集** - -若您已经按上述说明整理好了数据集目录结构,可以运行下面的命令生成文件列表。 - -``` -# 生成文件列表,其分隔符为空格,图片和标签集的数据格式都为png -python pdseg/tools/create_dataset_list.py --separator " " --format png png -``` -``` -# 生成文件列表,其图片和标签集的文件夹名为img和gt,训练和验证集的文件夹名为training和validation,不生成测试集列表 -python pdseg/tools/create_dataset_list.py \ - --folder img gt --second_folder training validation -``` -**Note:** 必须指定自定义数据集目录,可以按需要设定FLAG。无需指定`--type`。 - -- **对于cityscapes数据集** - -若您使用的是cityscapes数据集,可以运行下面的命令生成文件列表。 - -``` -# 生成cityscapes文件列表,其分隔符为逗号 -python pdseg/tools/create_dataset_list.py --type cityscapes --separator "," -``` -**Note:** - -必须指定cityscapes数据集目录,`--type`必须为`cityscapes`。 - -在cityscapes类型下,部分FLAG将被重新设定,无需手动指定,具体如下: - -|FLAG|固定值| -|-|-| -|--folder|"leftImg8bit" "gtFine"| -|--format|"png" "png"| -|--postfix|"_leftImg8bit" "_gtFine_labelTrainIds"| - -其余FLAG可以按需要设定。 +# 数据集准备 + +PaddleSeg目前支持CityScapes、ADE20K、Pascal VOC等数据集的加载,在加载数据集时,如若本地不存在对应数据,则会自动触发下载(除Cityscapes数据集). + +## 关于CityScapes数据集 +Cityscapes是关于城市街道场景的语义理解图片数据集。它主要包含来自50个不同城市的街道场景, +拥有5000张(2048 x 1024)城市驾驶场景的高质量像素级注释图像,包含19个类别。其中训练集2975张, 验证集500张和测试集1525张。 + +由于协议限制,请自行前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集, +我们建议您将数据集存放于`PaddleSeg/dygraph/data`中,以便与我们配置文件完全兼容。数据集下载后请组织成如下结构: + + cityscapes + | + |--leftImg8bit + | |--train + | |--val + | |--test + | + |--gtFine + | |--train + | |--val + | |--test + +运行下列命令进行标签转换: +```shell +pip install cityscapesscripts +python tools/convert_cityscapes.py --cityscapes_path data/cityscapes --num_workers 8 +``` +其中`cityscapes_path`应根据实际数据集路径进行调整。 `num_workers`决定启动的进程数,可根据实际情况进行调整大小。 + +## 关于Pascal VOC 2012数据集 +[Pascal VOC 2012](http://host.robots.ox.ac.uk/pascal/VOC/)数据集以对象分割为主,包含20个类别和背景类,其中训练集1464张,验证集1449张。 +通常情况下会利用[SBD(Semantic Boundaries Dataset)](http://home.bharathh.info/pubs/codes/SBD/download.html)进行扩充,扩充后训练集10582张。 +运行下列命令进行SBD数据集下载并进行扩充: +```shell +python tools/voc_augment.py --voc_path data/VOCdevkit --num_workers 8 +``` +其中`voc_path`应根据实际数据集路径进行调整。 + +**注意** 运行前请确保在dygraph目录下执行过下列命令: +```shell +export PYTHONPATH=`pwd` +# windows下请执行相面的命令 +# set PYTHONPATH=%cd% +``` + +## 关于ADE20K数据集 +[ADE20K](http://sceneparsing.csail.mit.edu/)由MIT发布的可用于场景感知、分割和多物体识别等多种任务的数据集。 +其涵盖了150个语义类别,包括训练集20210张,验证集2000张。 + +## 自定义数据集 + +如果您需要使用自定义数据集进行训练,请按照以下步骤准备数据. + +1.推荐整理成如下结构 + + custom_dataset + | + |--images + | |--image1.jpg + | |--image2.jpg + | |--... + | + |--labels + | |--label1.jpg + | |--label2.png + | |--... + | + |--train.txt + | + |--val.txt + | + |--test.txt + +其中train.txt和val.txt的内容如下所示: + + images/image1.jpg labels/label1.png + images/image2.jpg labels/label2.png + ... + +2.标注图像的标签从0,1依次取值,不可间隔。若有需要忽略的像素,则按255进行标注。 + +可按如下方式对自定义数据集进行配置: +```yaml +train_dataset: + type: Dataset + dataset_root: custom_dataset + train_path: custom_dataset/train.txt + num_classes: 2 + transforms: + - type: ResizeStepScaling + min_scale_factor: 0.5 + max_scale_factor: 2.0 + scale_step_size: 0.25 + - type: RandomPaddingCrop + crop_size: [512, 512] + - type: RandomHorizontalFlip + - type: Normalize + mode: train +``` diff --git a/dygraph/docs/images/quick_start_predict.jpg b/docs/images/quick_start_predict.jpg similarity index 100% rename from dygraph/docs/images/quick_start_predict.jpg rename to docs/images/quick_start_predict.jpg diff --git a/dygraph/docs/images/quick_start_vdl.jpg b/docs/images/quick_start_vdl.jpg similarity index 100% rename from dygraph/docs/images/quick_start_vdl.jpg rename to docs/images/quick_start_vdl.jpg diff --git a/dygraph/docs/quick_start.md b/docs/quick_start.md similarity index 100% rename from dygraph/docs/quick_start.md rename to docs/quick_start.md diff --git a/dygraph/README.md b/dygraph/README.md deleted file mode 100644 index 0d3460bab8..0000000000 --- a/dygraph/README.md +++ /dev/null @@ -1,85 +0,0 @@ -English | [简体中文](README_CN.md) - -# PaddleSeg(Dynamic Graph) - -[![Build Status](https://travis-ci.org/PaddlePaddle/PaddleSeg.svg?branch=master)](https://travis-ci.org/PaddlePaddle/PaddleSeg) -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) -[![Version](https://img.shields.io/github/release/PaddlePaddle/PaddleSeg.svg)](https://github.com/PaddlePaddle/PaddleSeg/releases) -![python version](https://img.shields.io/badge/python-3.6+-orange.svg) -![support os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-yellow.svg) - -Welcome to the dynamic version! PaddleSeg is the first development kit which supports PaddlePaddle 2.0. Currently, we provide an experimental version that allows developers to have full-featured experience on dynamic graph. In the near future, the dynamic version will be set as default, and the static one will be moved to "legacy" directory. - -The full-detailed documents and tutorials are coming soon. So far there are minimum tutorials that help you to enjoy the strengths of dynamic version. - -## Model Zoo - -|Model\Backbone|ResNet50|ResNet101|HRNetw18|HRNetw48| -|-|-|-|-|-| -|[ANN](./configs/ann)|✔|✔||| -|[BiSeNetv2](./configs/bisenet)|-|-|-|-| -|[DANet](./configs/danet)|✔|✔||| -|[Deeplabv3](./configs/deeplabv3)|✔|✔||| -|[Deeplabv3P](./configs/deeplabv3p)|✔|✔||| -|[Fast-SCNN](./configs/fastscnn)|-|-|-|-| -|[FCN](./configs/fcn)|||✔|✔| -|[GCNet](./configs/gcnet)|✔|✔||| -|[OCRNet](./configs/ocrnet/)|||✔|✔| -|[PSPNet](./configs/pspnet)|✔|✔||| -|[UNet](./configs/unet)|-|-|-|-| - -## Dataset - -- [x] Cityscapes -- [x] Pascal VOC -- [x] ADE20K -- [ ] Pascal Context -- [ ] COCO stuff - -## Installation - -1. Install PaddlePaddle - -System Requirements: -* PaddlePaddle >= 2.0.0rc -* Python >= 3.6+ - -> Note: the above requirements are for the **dynamic** graph version. If you intent to use the static one, please refers to [here](../README.md). - -Highly recommend you install the GPU version of PaddlePaddle, due to large overhead of segmentation models, otherwise it could be out of memory while running the models. - -For more detailed installation tutorials, please refer to the official website of [PaddlePaddle](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-beta/install/index_cn.html)。 - - -### Download PaddleSeg - -``` -git clone https://github.com/PaddlePaddle/PaddleSeg -``` - -### Install Dependencies -Install the python dependencies via the following commands,and please make sure execute it at least once in your branch. -```shell -cd PaddleSeg/dygraph -export PYTHONPATH=`pwd` -# Run the following one on Windows -# set PYTHONPATH=%cd% -pip install -r requirements.txt -``` - -## Quick Training -```shell -python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml -``` - -## Tutorials - -* [Get Started](./docs/quick_start.md) -* [Data Preparation](./docs/data_prepare.md) -* [Training Configuration](./configs/) -* [Add New Components](./docs/add_new_model.md) - - -## Feedbacks and Contact -* The dynamic version is still under development, if you find any issue or have an idea on new features, please don't hesitate to contact us via [GitHub Issues](https://github.com/PaddlePaddle/PaddleSeg/issues). -* PaddleSeg User Group (QQ): 850378321 or 793114768 diff --git a/dygraph/README_CN.md b/dygraph/README_CN.md deleted file mode 100644 index 084f5ad903..0000000000 --- a/dygraph/README_CN.md +++ /dev/null @@ -1,74 +0,0 @@ -简体中文 | [English](README.md) - -# PaddleSeg(动态图版本) - -本目录提供了PaddleSeg的动态图版本,目前已经完成了模型训练、评估、数据处理等功能,在未来的版本中,PaddleSeg将会启动默认的动态图模式。目前该目录处于实验阶段,如果您在使用过程中遇到任何问题,请通过issue反馈给我们,我们将会在第一时间跟进处理。 - -## 模型库 - -|模型\骨干网络|ResNet50|ResNet101|HRNetw18|HRNetw48| -|-|-|-|-|-| -|[ANN](./configs/ann)|✔|✔||| -|[BiSeNetv2](./configs/bisenet)|-|-|-|-| -|[DANet](./configs/danet)|✔|✔||| -|[Deeplabv3](./configs/deeplabv3)|✔|✔||| -|[Deeplabv3P](./configs/deeplabv3p)|✔|✔||| -|[Fast-SCNN](./configs/fastscnn)|-|-|-|-| -|[FCN](./configs/fcn)|||✔|✔| -|[GCNet](./configs/gcnet)|✔|✔||| -|[OCRNet](./configs/ocrnet/)|||✔|✔| -|[PSPNet](./configs/pspnet)|✔|✔||| -|[UNet](./configs/unet)|-|-|-|-| - -## 数据集 - -- [x] Cityscapes -- [x] Pascal VOC -- [x] ADE20K -- [ ] Pascal Context -- [ ] COCO stuff - -## 安装 - -1. 安装PaddlePaddle - -版本要求 - -* PaddlePaddle >= 2.0.0rc - -* Python >= 3.6+ - -由于图像分割模型计算开销大,推荐在GPU版本的PaddlePaddle下使用PaddleSeg。推荐安装10.0以上的CUDA环境。 - - -安装教程请见[PaddlePaddle官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-beta/install/index_cn.html)。 - - -2. 下载PaddleSeg代码 -```shell -git clone https://github.com/PaddlePaddle/PaddleSeg -``` - -3. 安装PaddleSeg依赖 -通过以下命令安装python包依赖,请确保在该分支上至少执行过一次以下命令: - - -```shell -cd PaddleSeg/dygraph -export PYTHONPATH=`pwd` -# windows下请执行以下命令 -# set PYTHONPATH=%cd% -pip install -r requirements.txt -``` - -## 训练 -```shell -python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml -``` - -## 使用教程 - -* [快速入门](./docs/quick_start.md) -* [数据集准备](./docs/data_prepare.md) -* [配置项](./configs/) -* [Add New Components](./docs/add_new_model.md) diff --git a/dygraph/docs/data_prepare.md b/dygraph/docs/data_prepare.md deleted file mode 100644 index b05665301f..0000000000 --- a/dygraph/docs/data_prepare.md +++ /dev/null @@ -1,100 +0,0 @@ -# 数据集准备 - -PaddleSeg目前支持CityScapes、ADE20K、Pascal VOC等数据集的加载,在加载数据集时,如若本地不存在对应数据,则会自动触发下载(除Cityscapes数据集). - -## 关于CityScapes数据集 -Cityscapes是关于城市街道场景的语义理解图片数据集。它主要包含来自50个不同城市的街道场景, -拥有5000张(2048 x 1024)城市驾驶场景的高质量像素级注释图像,包含19个类别。其中训练集2975张, 验证集500张和测试集1525张。 - -由于协议限制,请自行前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集, -我们建议您将数据集存放于`PaddleSeg/dygraph/data`中,以便与我们配置文件完全兼容。数据集下载后请组织成如下结构: - - cityscapes - | - |--leftImg8bit - | |--train - | |--val - | |--test - | - |--gtFine - | |--train - | |--val - | |--test - -运行下列命令进行标签转换: -```shell -pip install cityscapesscripts -python tools/convert_cityscapes.py --cityscapes_path data/cityscapes --num_workers 8 -``` -其中`cityscapes_path`应根据实际数据集路径进行调整。 `num_workers`决定启动的进程数,可根据实际情况进行调整大小。 - -## 关于Pascal VOC 2012数据集 -[Pascal VOC 2012](http://host.robots.ox.ac.uk/pascal/VOC/)数据集以对象分割为主,包含20个类别和背景类,其中训练集1464张,验证集1449张。 -通常情况下会利用[SBD(Semantic Boundaries Dataset)](http://home.bharathh.info/pubs/codes/SBD/download.html)进行扩充,扩充后训练集10582张。 -运行下列命令进行SBD数据集下载并进行扩充: -```shell -python tools/voc_augment.py --voc_path data/VOCdevkit --num_workers 8 -``` -其中`voc_path`应根据实际数据集路径进行调整。 - -**注意** 运行前请确保在dygraph目录下执行过下列命令: -```shell -export PYTHONPATH=`pwd` -# windows下请执行相面的命令 -# set PYTHONPATH=%cd% -``` - -## 关于ADE20K数据集 -[ADE20K](http://sceneparsing.csail.mit.edu/)由MIT发布的可用于场景感知、分割和多物体识别等多种任务的数据集。 -其涵盖了150个语义类别,包括训练集20210张,验证集2000张。 - -## 自定义数据集 - -如果您需要使用自定义数据集进行训练,请按照以下步骤准备数据. - -1.推荐整理成如下结构 - - custom_dataset - | - |--images - | |--image1.jpg - | |--image2.jpg - | |--... - | - |--labels - | |--label1.jpg - | |--label2.png - | |--... - | - |--train.txt - | - |--val.txt - | - |--test.txt - -其中train.txt和val.txt的内容如下所示: - - images/image1.jpg labels/label1.png - images/image2.jpg labels/label2.png - ... - -2.标注图像的标签从0,1依次取值,不可间隔。若有需要忽略的像素,则按255进行标注。 - -可按如下方式对自定义数据集进行配置: -```yaml -train_dataset: - type: Dataset - dataset_root: custom_dataset - train_path: custom_dataset/train.txt - num_classes: 2 - transforms: - - type: ResizeStepScaling - min_scale_factor: 0.5 - max_scale_factor: 2.0 - scale_step_size: 0.25 - - type: RandomPaddingCrop - crop_size: [512, 512] - - type: RandomHorizontalFlip - - type: Normalize - mode: train -``` diff --git a/legacy/README.md b/legacy/README.md new file mode 100644 index 0000000000..963c23fc3a --- /dev/null +++ b/legacy/README.md @@ -0,0 +1,115 @@ +English | [简体中文](README_CN.md) + +# PaddleSeg + +[![Build Status](https://travis-ci.org/PaddlePaddle/PaddleSeg.svg?branch=master)](https://travis-ci.org/PaddlePaddle/PaddleSeg) +[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) +[![Version](https://img.shields.io/github/release/PaddlePaddle/PaddleSeg.svg)](https://github.com/PaddlePaddle/PaddleSeg/releases) +![python version](https://img.shields.io/badge/python-3.6+-orange.svg) +![support os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-yellow.svg) + + *[2020-12-02] PaddleSeg has released the [dynamic graph](./dygraph) version, which supports PaddlePaddle 2.0rc. For the static graph, we only fix bugs without adding new features. See detailed [release notes](./docs/release_notes.md).* + +## Introduction + +PaddleSeg is an end-to-end image segmentation development kit based on PaddlePaddle, which aims to help developers in the whole process of training models, optimizing performance and inference speed, and deploying models. Currently PaddleSeg supports seven efficient segmentation models, including DeepLabv3+, U-Net, ICNet, PSPNet, HRNet, Fast-SCNN, and OCRNet, which are extensively used in both academia and industry. Enjoy your Seg journey! + +![demo](./docs/imgs/cityscapes.png) + +## Main Features + +- **Practical Data Augmentation Techniques** + +PaddleSeg provides 10+ data augmentation techniques, which are developed from the product-level applications in Baidu. The techniques are able to help developers improve the generalization and robustness ability of their customized models. + +- **Modular Design** + +PaddleSeg supports seven popular segmentation models, including U-Net, DeepLabv3+, ICNet, PSPNet, HRNet, Fast-SCNN, and OCRNet. Combing with different components, such as pre-trained models, adjustable backbone architectures and loss functions, developer can easily build an efficient segmentation model according to their practical performance requirements. + +- **High Performance** + +PaddleSeg supports the efficient acceleration strategies, such as multi-processing I/O operations, and multi-GPUs parallel training. Moreover, integrating GPU memory optimization techniques in the PaddlePaddle framework, PaddleSeg significantly reduces training overhead of the segmentation models, which helps developers complete the segmentation tasks in a high-efficient way. + +- **Industry-Level Deployment** + +PaddleSeg supports the industry-level deployment in both **server** and **mobile devices** with the high-performance inference engine and image processing ability, which helps developers achieve the high-performance deployment and integration of segmentation model efficiently. Particularly, using another paddle tool [Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite), the segmentation models trained in PaddleSeg are able to be deployed on mobile/embedded devices quickly and easily. + +- **Rich Practical Cases** + +PaddleSeg provides rich practical cases in industry, such as human segmentation, mechanical meter segmentation, lane segmentation, remote sensing image segmentation, human parsing, and industry inspection, etc. The practical cases allow developers to get a closer look at the image segmentation area, and get more hand-on experiences on the real practice. + +## Installation + +### 1. Install PaddlePaddle + +System Requirements: +* PaddlePaddle >= 1.7.0 and < 2.0 +* Python >= 3.5+ + +> Note: the above requirements are for the **static** graph version. If you intent to use the dynamic one, please refers to [here](./dygraph). + +Highly recommend you install the GPU version of PaddlePaddle, due to large overhead of segmentation models, otherwise it could be out of memory while running the models. + +For more detailed installation tutorials, please refer to the official website of [PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick)。 + +### 2. Download PaddleSeg + +``` +git clone https://github.com/PaddlePaddle/PaddleSeg +``` + +### 3. Install Dependencies +Install the python dependencies via the following commands,and please make sure execute it at least once in your branch. +``` +cd PaddleSeg +pip install -r requirements.txt +``` + +## Tutorials + +For a better understanding of PaddleSeg, we provide comprehensive tutorials to show the whole process of using PaddleSeg on model training, evaluation and deployment. Besides the basic usages of PaddleSeg, the design insights will be also mentioned in the tutorials. + +### Quick Start + +* [PaddleSeg Start](./docs/usage.md) + +### Basic Usages + +* [Customized Data Preparation](./docs/data_prepare.md) +* [Scripts and Config Guide](./docs/config.md) +* [Data and Config Verification](./docs/check.md) +* [Segmentation Models](./docs/models.md) +* [Pretrained Models](./docs/model_zoo.md) +* [DeepLabv3+ Tutorial](./tutorial/finetune_deeplabv3plus.md) + +### Inference and Deployment + +* [Model Export](./docs/model_export.md) +* [Python Inference](./deploy/python/) +* [C++ Inference](./deploy/cpp/) +* [Paddle-Lite Mobile Inference & Deployment](./deploy/lite/) +* [PaddleServing Inference & Deployment](./deploy/paddle-serving) + + +### Advanced features + +* [Data Augmentation](./docs/data_aug.md) +* [Loss Functions](./docs/loss_select.md) +* [Practical Cases](./contrib) +* [Multiprocessing and Mixed-Precision Training](./docs/multiple_gpus_train_and_mixed_precision_train.md) +* Model Compression ([Quantization](./slim/quantization/README.md), [Distillation](./slim/distillation/README.md), [Pruning](./slim/prune/README.md), [NAS](./slim/nas/README.md)) + + +### Online Tutorials + +We further provide a few online tutorials in Baidu AI Studio:[Get Started](https://aistudio.baidu.com/aistudio/projectdetail/100798), [U-Net](https://aistudio.baidu.com/aistudio/projectDetail/102889), [DeepLabv3+](https://aistudio.baidu.com/aistudio/projectDetail/226703), [Industry Inspection](https://aistudio.baidu.com/aistudio/projectdetail/184392), [HumanSeg](https://aistudio.baidu.com/aistudio/projectdetail/475345), [More](https://aistudio.baidu.com/aistudio/projectdetail/226710). + + +## Feedbacks and Contact +* If your question is not answered properly in [FAQ](./docs/faq.md) or you have an idea on PaddleSeg, please report an issue via [Github Issues](https://github.com/PaddlePaddle/PaddleSeg/issues). +* PaddleSeg User Group (QQ): 850378321 or 793114768 + + +## Contributing + +All contributions and suggestions are welcomed. If you want to contribute to PaddleSeg,please summit an issue or create a pull request directly. diff --git a/legacy/README_CN.md b/legacy/README_CN.md new file mode 100644 index 0000000000..c953b3cca0 --- /dev/null +++ b/legacy/README_CN.md @@ -0,0 +1,230 @@ +简体中文 | [English](README.md) + +# PaddleSeg + +[![Build Status](https://travis-ci.org/PaddlePaddle/PaddleSeg.svg?branch=master)](https://travis-ci.org/PaddlePaddle/PaddleSeg) +[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) +[![Version](https://img.shields.io/github/release/PaddlePaddle/PaddleSeg.svg)](https://github.com/PaddlePaddle/PaddleSeg/releases) +![python version](https://img.shields.io/badge/python-3.6+-orange.svg) +![support os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-yellow.svg) + + *[2020-12-02] PaddleSeg已经发布了全新的[动态图版本](./dygraph),全面适配 PaddlePaddle 2.0rc, 静态图版本只作维护不再添加新功能,更多信息请查看详细[更新日志](./docs/release_notes.md).* + +## 简介 + +PaddleSeg是基于[PaddlePaddle](https://www.paddlepaddle.org.cn)开发的端到端图像分割开发套件,覆盖了DeepLabv3+, U-Net, ICNet, PSPNet, HRNet, Fast-SCNN等主流分割网络。通过模块化的设计,以配置化方式驱动模型组合,帮助开发者更便捷地完成从训练到部署的全流程图像分割应用。 + +- [特点](#特点) +- [安装](#安装) +- [使用教程](#使用教程) + - [快速入门](#快速入门) + - [基础功能](#基础功能) + - [预测部署](#预测部署) + - [高级功能](#高级功能) +- [在线体验](#在线体验) +- [FAQ](#FAQ) +- [交流与反馈](#交流与反馈) +- [更新日志](#更新日志) +- [贡献代码](#贡献代码) + +## 特点 + +- **丰富的数据增强** + +基于百度视觉技术部的实际业务经验,内置10+种数据增强策略,可结合实际业务场景进行定制组合,提升模型泛化能力和鲁棒性。 + +- **模块化设计** + +支持U-Net, DeepLabv3+, ICNet, PSPNet, HRNet, Fast-SCNN六种主流分割网络,结合预训练模型和可调节的骨干网络,满足不同性能和精度的要求;选择不同的损失函数如Dice Loss, Lovasz Loss等方式可以强化小目标和不均衡样本场景下的分割精度。 + +- **高性能** + +PaddleSeg支持多进程I/O、多卡并行等训练加速策略,结合飞桨核心框架的显存优化功能,可大幅度减少分割模型的显存开销,让开发者更低成本、更高效地完成图像分割训练。 + +- **工业级部署** + +全面提供**服务端**和**移动端**的工业级部署能力,依托飞桨高性能推理引擎和高性能图像处理实现,开发者可以轻松完成高性能的分割模型部署和集成。通过[Paddle-Lite](https://github.com/PaddlePaddle/Paddle-Lite),可以在移动设备或者嵌入式设备上完成轻量级、高性能的人像分割模型部署。 + +- **产业实践案例** + +PaddleSeg提供丰富地产业实践案例,如[人像分割](./contrib/HumanSeg)、[工业表计检测](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib#%E5%B7%A5%E4%B8%9A%E8%A1%A8%E7%9B%98%E5%88%86%E5%89%B2)、[遥感分割](./contrib/RemoteSensing)、[人体解析](contrib/ACE2P),[工业质检](https://aistudio.baidu.com/aistudio/projectdetail/184392)等产业实践案例,助力开发者更便捷地落地图像分割技术。 + +## 安装 + +### 1. 安装PaddlePaddle + +版本要求 +* PaddlePaddle >= 1.7.0 and < 2.0 +* Python >= 3.5+ + +由于图像分割模型计算开销大,推荐在GPU版本的PaddlePaddle下使用PaddleSeg. + +安装教程请见[PaddlePaddle官网](https://www.paddlepaddle.org.cn/install/quick)。 + +### 2. 下载PaddleSeg代码 + +``` +git clone https://github.com/PaddlePaddle/PaddleSeg +``` + +### 3. 安装PaddleSeg依赖 +通过以下命令安装python包依赖,请确保在该分支上至少执行过一次以下命令: +``` +cd PaddleSeg +pip install -r requirements.txt +``` + +## 使用教程 + +我们提供了一系列的使用教程,来说明如何使用PaddleSeg完成语义分割模型的训练、评估、部署。 + +这一系列的文档被分为**快速入门**、**基础功能**、**预测部署**、**高级功能**四个部分,四个教程由浅至深地介绍PaddleSeg的设计思路和使用方法。 + +### 快速入门 + +* [PaddleSeg快速入门](./docs/usage.md) + +### 基础功能 + +* [自定义数据的标注与准备](./docs/data_prepare.md) +* [脚本使用和配置说明](./docs/config.md) +* [数据和配置校验](./docs/check.md) +* [分割模型介绍](./docs/models.md) +* [预训练模型下载](./docs/model_zoo.md) +* [DeepLabv3+模型使用教程](./tutorial/finetune_deeplabv3plus.md) +* [U-Net模型使用教程](./tutorial/finetune_unet.md) +* [ICNet模型使用教程](./tutorial/finetune_icnet.md) +* [PSPNet模型使用教程](./tutorial/finetune_pspnet.md) +* [HRNet模型使用教程](./tutorial/finetune_hrnet.md) +* [Fast-SCNN模型使用教程](./tutorial/finetune_fast_scnn.md) +* [OCRNet模型使用教程](./tutorial/finetune_ocrnet.md) + +### 预测部署 + +* [模型导出](./docs/model_export.md) +* [Python预测](./deploy/python/) +* [C++预测](./deploy/cpp/) +* [Paddle-Lite移动端预测部署](./deploy/lite/) +* [PaddleServing预测部署](./deploy/paddle-serving) + + +### 高级功能 + +* [PaddleSeg的数据增强](./docs/data_aug.md) +* [PaddleSeg的loss选择](./docs/loss_select.md) +* [PaddleSeg产业实践](./contrib) +* [多进程训练和混合精度训练](./docs/multiple_gpus_train_and_mixed_precision_train.md) +* 使用PaddleSlim进行分割模型压缩([量化](./slim/quantization/README.md), [蒸馏](./slim/distillation/README.md), [剪枝](./slim/prune/README.md), [搜索](./slim/nas/README.md)) +## 在线体验 + +我们在AI Studio平台上提供了在线体验的教程,欢迎体验: + +|在线教程|链接| +|-|-| +|快速开始|[点击体验](https://aistudio.baidu.com/aistudio/projectdetail/100798)| +|U-Net图像分割|[点击体验](https://aistudio.baidu.com/aistudio/projectDetail/102889)| +|DeepLabv3+图像分割|[点击体验](https://aistudio.baidu.com/aistudio/projectDetail/226703)| +|工业质检(零件瑕疵检测)|[点击体验](https://aistudio.baidu.com/aistudio/projectdetail/184392)| +|人像分割|[点击体验](https://aistudio.baidu.com/aistudio/projectdetail/475345)| +|PaddleSeg特色垂类模型|[点击体验](https://aistudio.baidu.com/aistudio/projectdetail/226710)| + +## FAQ + +#### Q: 安装requirements.txt指定的依赖包时,部分包提示找不到? + +A: 可能是pip源的问题,这种情况下建议切换为官方源,或者通过`pip install -r requirements.txt -i `指定其他源地址。 + +#### Q:图像分割的数据增强如何配置,Unpadding, StepScaling, RangeScaling的原理是什么? + +A: 更详细数据增强文档可以参考[数据增强](./docs/data_aug.md) + +#### Q: 训练时因为某些原因中断了,如何恢复训练? + +A: 启动训练脚本时通过命令行覆盖TRAIN.RESUME_MODEL_DIR配置为模型checkpoint目录即可, 以下代码示例第100轮重新恢复训练: +``` +python pdseg/train.py --cfg xxx.yaml TRAIN.RESUME_MODEL_DIR /PATH/TO/MODEL_CKPT/100 +``` + +#### Q: 预测时图片过大,导致显存不足如何处理? + +A: 降低Batch size,使用Group Norm策略;请注意训练过程中当`DEFAULT_NORM_TYPE`选择`bn`时,为了Batch Norm计算稳定性,batch size需要满足>=2 + + +## 交流与反馈 +* 欢迎您通过[Github Issues](https://github.com/PaddlePaddle/PaddleSeg/issues)来提交问题、报告与建议 +* 微信公众号:飞桨PaddlePaddle +* QQ群: 703252161 + +

     

+

   微信公众号                官方技术交流QQ群

+ +## 更新日志 + +* 2020.10.28 + + **`v0.7.0`** + * 全面支持Paddle2.0-rc动态图模式,推出PaddleSeg[动态图体验版](./dygraph/) + * 发布大量动态图模型,支持11个分割模型,4个骨干网络,3个数据集: + * 分割模型:ANN, BiSeNetV2, DANet, DeeplabV3, DeeplabV3+, FCN, FastSCNN, GCNet, OCRNet, PSPNet, UNet + * 骨干网络:ResNet, HRNet, MobileNetV3, Xception + * 数据集:Cityscapes, ADE20K, Pascal VOC + + * 提供高精度骨干网络预训练模型以及基于Cityscapes数据集的语义分割[预训练模型](./dygraph/configs/)。Cityscapes精度超过**82%**。 + + +* 2020.08.31 + + **`v0.6.0`** + * 丰富Deeplabv3p网络结构,新增ResNet-vd、MobileNetv3两种backbone,满足高性能与高精度场景,并提供基于Cityscapes和ImageNet的[预训练模型](./docs/model_zoo.md)4个。 + * 新增高精度分割模型OCRNet,支持以HRNet作为backbone,提供基于Cityscapes的[预训练模型](https://github.com/PaddlePaddle/PaddleSeg/blob/develop/docs/model_zoo.md#cityscapes%E9%A2%84%E8%AE%AD%E7%BB%83%E6%A8%A1%E5%9E%8B),mIoU超过80%。 + * 新增proposal free的实例分割模型[Spatial Embedding](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/SpatialEmbeddings),性能与精度均超越MaskRCNN。提供了基于kitti的预训练模型。 + +* 2020.05.12 + + **`v0.5.0`** + * 全面升级[HumanSeg人像分割模型](./contrib/HumanSeg),新增超轻量级人像分割模型HumanSeg-lite支持移动端实时人像分割处理,并提供基于光流的视频分割后处理提升分割流畅性。 + * 新增[气象遥感分割方案](./contrib/RemoteSensing),支持积雪识别、云检测等气象遥感场景。 + * 新增[Lovasz Loss](docs/lovasz_loss.md),解决数据类别不均衡问题。 + * 使用VisualDL 2.0作为训练可视化工具 + +* 2020.02.25 + + **`v0.4.0`** + * 新增适用于实时场景且不需要预训练模型的分割网络Fast-SCNN,提供基于Cityscapes的[预训练模型](./docs/model_zoo.md)1个 + * 新增LaneNet车道线检测网络,提供[预训练模型](https://github.com/PaddlePaddle/PaddleSeg/tree/release/v0.4.0/contrib/LaneNet#%E4%B8%83-%E5%8F%AF%E8%A7%86%E5%8C%96)一个 + * 新增基于PaddleSlim的分割库压缩策略([量化](./slim/quantization/README.md), [蒸馏](./slim/distillation/README.md), [剪枝](./slim/prune/README.md), [搜索](./slim/nas/README.md)) + + +* 2019.12.15 + + **`v0.3.0`** + * 新增HRNet分割网络,提供基于cityscapes和ImageNet的[预训练模型](./docs/model_zoo.md)8个 + * 支持使用[伪彩色标签](./docs/data_prepare.md#%E7%81%B0%E5%BA%A6%E6%A0%87%E6%B3%A8vs%E4%BC%AA%E5%BD%A9%E8%89%B2%E6%A0%87%E6%B3%A8)进行训练/评估/预测,提升训练体验,并提供将灰度标注图转为伪彩色标注图的脚本 + * 新增[学习率warmup](./docs/configs/solver_group.md#lr_warmup)功能,支持与不同的学习率Decay策略配合使用 + * 新增图像归一化操作的GPU化实现,进一步提升预测速度。 + * 新增Python部署方案,更低成本完成工业级部署。 + * 新增Paddle-Lite移动端部署方案,支持人像分割模型的移动端部署。 + * 新增不同分割模型的预测[性能数据Benchmark](./deploy/python/docs/PaddleSeg_Infer_Benchmark.md), 便于开发者提供模型选型性能参考。 + + +* 2019.11.04 + + **`v0.2.0`** + * 新增PSPNet分割网络,提供基于COCO和cityscapes数据集的[预训练模型](./docs/model_zoo.md)4个。 + * 新增Dice Loss、BCE Loss以及组合Loss配置,支持样本不均衡场景下的[模型优化](./docs/loss_select.md)。 + * 支持[FP16混合精度训练](./docs/multiple_gpus_train_and_mixed_precision_train.md)以及动态Loss Scaling,在不损耗精度的情况下,训练速度提升30%+。 + * 支持[PaddlePaddle多卡多进程训练](./docs/multiple_gpus_train_and_mixed_precision_train.md),多卡训练时训练速度提升15%+。 + * 发布基于UNet的[工业标记表盘分割模型](./contrib#%E5%B7%A5%E4%B8%9A%E7%94%A8%E8%A1%A8%E5%88%86%E5%89%B2)。 + +* 2019.09.10 + + **`v0.1.0`** + * PaddleSeg分割库初始版本发布,包含DeepLabv3+, U-Net, ICNet三类分割模型, 其中DeepLabv3+支持Xception, MobileNet v2两种可调节的骨干网络。 + * CVPR19 LIP人体部件分割比赛冠军预测模型发布[ACE2P](./contrib/ACE2P)。 + * 预置基于DeepLabv3+网络的[人像分割](./contrib/HumanSeg/)和[车道线分割](./contrib/RoadLine)预测模型发布。 + +
+ +## 贡献代码 + +我们非常欢迎您为PaddleSeg贡献代码或者提供使用建议。如果您可以修复某个issue或者增加一个新功能,欢迎给我们提交Pull Requests. diff --git a/configs/cityscape_fast_scnn.yaml b/legacy/configs/cityscape_fast_scnn.yaml similarity index 100% rename from configs/cityscape_fast_scnn.yaml rename to legacy/configs/cityscape_fast_scnn.yaml diff --git a/configs/deepglobe_road_extraction.yaml b/legacy/configs/deepglobe_road_extraction.yaml similarity index 100% rename from configs/deepglobe_road_extraction.yaml rename to legacy/configs/deepglobe_road_extraction.yaml diff --git a/configs/deeplabv3p_mobilenet-1-0_pet.yaml b/legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml similarity index 100% rename from configs/deeplabv3p_mobilenet-1-0_pet.yaml rename to legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml diff --git a/configs/deeplabv3p_mobilenetv2_cityscapes.yaml b/legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml similarity index 100% rename from configs/deeplabv3p_mobilenetv2_cityscapes.yaml rename to legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml diff --git a/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml b/legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml similarity index 100% rename from configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml rename to legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml diff --git a/configs/deeplabv3p_resnet50_vd_cityscapes.yaml b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml similarity index 100% rename from configs/deeplabv3p_resnet50_vd_cityscapes.yaml rename to legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml diff --git a/configs/deeplabv3p_xception65_cityscapes.yaml b/legacy/configs/deeplabv3p_xception65_cityscapes.yaml similarity index 100% rename from configs/deeplabv3p_xception65_cityscapes.yaml rename to legacy/configs/deeplabv3p_xception65_cityscapes.yaml diff --git a/configs/deeplabv3p_xception65_optic.yaml b/legacy/configs/deeplabv3p_xception65_optic.yaml similarity index 100% rename from configs/deeplabv3p_xception65_optic.yaml rename to legacy/configs/deeplabv3p_xception65_optic.yaml diff --git a/configs/fast_scnn_pet.yaml b/legacy/configs/fast_scnn_pet.yaml similarity index 100% rename from configs/fast_scnn_pet.yaml rename to legacy/configs/fast_scnn_pet.yaml diff --git a/configs/hrnet_optic.yaml b/legacy/configs/hrnet_optic.yaml similarity index 100% rename from configs/hrnet_optic.yaml rename to legacy/configs/hrnet_optic.yaml diff --git a/configs/icnet_optic.yaml b/legacy/configs/icnet_optic.yaml similarity index 100% rename from configs/icnet_optic.yaml rename to legacy/configs/icnet_optic.yaml diff --git a/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml b/legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml similarity index 100% rename from configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml rename to legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml diff --git a/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml b/legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml similarity index 100% rename from configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml rename to legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml diff --git a/configs/ocrnet_w18_bn_cityscapes.yaml b/legacy/configs/ocrnet_w18_bn_cityscapes.yaml similarity index 100% rename from configs/ocrnet_w18_bn_cityscapes.yaml rename to legacy/configs/ocrnet_w18_bn_cityscapes.yaml diff --git a/configs/pspnet_optic.yaml b/legacy/configs/pspnet_optic.yaml similarity index 100% rename from configs/pspnet_optic.yaml rename to legacy/configs/pspnet_optic.yaml diff --git a/configs/unet_optic.yaml b/legacy/configs/unet_optic.yaml similarity index 100% rename from configs/unet_optic.yaml rename to legacy/configs/unet_optic.yaml diff --git a/contrib/ACE2P/README.md b/legacy/contrib/ACE2P/README.md similarity index 100% rename from contrib/ACE2P/README.md rename to legacy/contrib/ACE2P/README.md diff --git a/contrib/ACE2P/__init__.py b/legacy/contrib/ACE2P/__init__.py similarity index 100% rename from contrib/ACE2P/__init__.py rename to legacy/contrib/ACE2P/__init__.py diff --git a/contrib/ACE2P/config.py b/legacy/contrib/ACE2P/config.py similarity index 100% rename from contrib/ACE2P/config.py rename to legacy/contrib/ACE2P/config.py diff --git a/contrib/ACE2P/download_ACE2P.py b/legacy/contrib/ACE2P/download_ACE2P.py similarity index 100% rename from contrib/ACE2P/download_ACE2P.py rename to legacy/contrib/ACE2P/download_ACE2P.py diff --git a/contrib/ACE2P/imgs/117676_2149260.jpg b/legacy/contrib/ACE2P/imgs/117676_2149260.jpg similarity index 100% rename from contrib/ACE2P/imgs/117676_2149260.jpg rename to legacy/contrib/ACE2P/imgs/117676_2149260.jpg diff --git a/contrib/ACE2P/imgs/117676_2149260.png b/legacy/contrib/ACE2P/imgs/117676_2149260.png similarity index 100% rename from contrib/ACE2P/imgs/117676_2149260.png rename to legacy/contrib/ACE2P/imgs/117676_2149260.png diff --git a/contrib/ACE2P/imgs/net.jpg b/legacy/contrib/ACE2P/imgs/net.jpg similarity index 100% rename from contrib/ACE2P/imgs/net.jpg rename to legacy/contrib/ACE2P/imgs/net.jpg diff --git a/contrib/ACE2P/imgs/result.jpg b/legacy/contrib/ACE2P/imgs/result.jpg similarity index 100% rename from contrib/ACE2P/imgs/result.jpg rename to legacy/contrib/ACE2P/imgs/result.jpg diff --git a/contrib/ACE2P/infer.py b/legacy/contrib/ACE2P/infer.py similarity index 100% rename from contrib/ACE2P/infer.py rename to legacy/contrib/ACE2P/infer.py diff --git a/contrib/ACE2P/reader.py b/legacy/contrib/ACE2P/reader.py similarity index 100% rename from contrib/ACE2P/reader.py rename to legacy/contrib/ACE2P/reader.py diff --git a/contrib/ACE2P/utils/__init__.py b/legacy/contrib/ACE2P/utils/__init__.py similarity index 100% rename from contrib/ACE2P/utils/__init__.py rename to legacy/contrib/ACE2P/utils/__init__.py diff --git a/contrib/ACE2P/utils/palette.py b/legacy/contrib/ACE2P/utils/palette.py similarity index 100% rename from contrib/ACE2P/utils/palette.py rename to legacy/contrib/ACE2P/utils/palette.py diff --git a/contrib/ACE2P/utils/util.py b/legacy/contrib/ACE2P/utils/util.py similarity index 100% rename from contrib/ACE2P/utils/util.py rename to legacy/contrib/ACE2P/utils/util.py diff --git a/contrib/HumanSeg/README.md b/legacy/contrib/HumanSeg/README.md similarity index 100% rename from contrib/HumanSeg/README.md rename to legacy/contrib/HumanSeg/README.md diff --git a/contrib/HumanSeg/bg_replace.py b/legacy/contrib/HumanSeg/bg_replace.py similarity index 100% rename from contrib/HumanSeg/bg_replace.py rename to legacy/contrib/HumanSeg/bg_replace.py diff --git a/contrib/HumanSeg/data/background.jpg b/legacy/contrib/HumanSeg/data/background.jpg similarity index 100% rename from contrib/HumanSeg/data/background.jpg rename to legacy/contrib/HumanSeg/data/background.jpg diff --git a/contrib/HumanSeg/data/download_data.py b/legacy/contrib/HumanSeg/data/download_data.py similarity index 100% rename from contrib/HumanSeg/data/download_data.py rename to legacy/contrib/HumanSeg/data/download_data.py diff --git a/contrib/HumanSeg/data/human_image.jpg b/legacy/contrib/HumanSeg/data/human_image.jpg similarity index 100% rename from contrib/HumanSeg/data/human_image.jpg rename to legacy/contrib/HumanSeg/data/human_image.jpg diff --git a/contrib/HumanSeg/datasets/__init__.py b/legacy/contrib/HumanSeg/datasets/__init__.py similarity index 100% rename from contrib/HumanSeg/datasets/__init__.py rename to legacy/contrib/HumanSeg/datasets/__init__.py diff --git a/contrib/HumanSeg/datasets/dataset.py b/legacy/contrib/HumanSeg/datasets/dataset.py similarity index 100% rename from contrib/HumanSeg/datasets/dataset.py rename to legacy/contrib/HumanSeg/datasets/dataset.py diff --git a/contrib/HumanSeg/datasets/shared_queue/__init__.py b/legacy/contrib/HumanSeg/datasets/shared_queue/__init__.py similarity index 100% rename from contrib/HumanSeg/datasets/shared_queue/__init__.py rename to legacy/contrib/HumanSeg/datasets/shared_queue/__init__.py diff --git a/contrib/HumanSeg/datasets/shared_queue/queue.py b/legacy/contrib/HumanSeg/datasets/shared_queue/queue.py similarity index 100% rename from contrib/HumanSeg/datasets/shared_queue/queue.py rename to legacy/contrib/HumanSeg/datasets/shared_queue/queue.py diff --git a/contrib/HumanSeg/datasets/shared_queue/sharedmemory.py b/legacy/contrib/HumanSeg/datasets/shared_queue/sharedmemory.py similarity index 100% rename from contrib/HumanSeg/datasets/shared_queue/sharedmemory.py rename to legacy/contrib/HumanSeg/datasets/shared_queue/sharedmemory.py diff --git a/contrib/HumanSeg/export.py b/legacy/contrib/HumanSeg/export.py similarity index 100% rename from contrib/HumanSeg/export.py rename to legacy/contrib/HumanSeg/export.py diff --git a/contrib/HumanSeg/infer.py b/legacy/contrib/HumanSeg/infer.py similarity index 100% rename from contrib/HumanSeg/infer.py rename to legacy/contrib/HumanSeg/infer.py diff --git a/contrib/HumanSeg/models/__init__.py b/legacy/contrib/HumanSeg/models/__init__.py similarity index 100% rename from contrib/HumanSeg/models/__init__.py rename to legacy/contrib/HumanSeg/models/__init__.py diff --git a/contrib/HumanSeg/models/humanseg.py b/legacy/contrib/HumanSeg/models/humanseg.py similarity index 100% rename from contrib/HumanSeg/models/humanseg.py rename to legacy/contrib/HumanSeg/models/humanseg.py diff --git a/contrib/HumanSeg/models/load_model.py b/legacy/contrib/HumanSeg/models/load_model.py similarity index 100% rename from contrib/HumanSeg/models/load_model.py rename to legacy/contrib/HumanSeg/models/load_model.py diff --git a/contrib/HumanSeg/nets/__init__.py b/legacy/contrib/HumanSeg/nets/__init__.py similarity index 100% rename from contrib/HumanSeg/nets/__init__.py rename to legacy/contrib/HumanSeg/nets/__init__.py diff --git a/contrib/HumanSeg/nets/backbone/__init__.py b/legacy/contrib/HumanSeg/nets/backbone/__init__.py similarity index 100% rename from contrib/HumanSeg/nets/backbone/__init__.py rename to legacy/contrib/HumanSeg/nets/backbone/__init__.py diff --git a/contrib/HumanSeg/nets/backbone/mobilenet_v2.py b/legacy/contrib/HumanSeg/nets/backbone/mobilenet_v2.py similarity index 100% rename from contrib/HumanSeg/nets/backbone/mobilenet_v2.py rename to legacy/contrib/HumanSeg/nets/backbone/mobilenet_v2.py diff --git a/contrib/HumanSeg/nets/backbone/xception.py b/legacy/contrib/HumanSeg/nets/backbone/xception.py similarity index 100% rename from contrib/HumanSeg/nets/backbone/xception.py rename to legacy/contrib/HumanSeg/nets/backbone/xception.py diff --git a/contrib/HumanSeg/nets/deeplabv3p.py b/legacy/contrib/HumanSeg/nets/deeplabv3p.py similarity index 100% rename from contrib/HumanSeg/nets/deeplabv3p.py rename to legacy/contrib/HumanSeg/nets/deeplabv3p.py diff --git a/contrib/HumanSeg/nets/hrnet.py b/legacy/contrib/HumanSeg/nets/hrnet.py similarity index 100% rename from contrib/HumanSeg/nets/hrnet.py rename to legacy/contrib/HumanSeg/nets/hrnet.py diff --git a/contrib/HumanSeg/nets/libs.py b/legacy/contrib/HumanSeg/nets/libs.py similarity index 100% rename from contrib/HumanSeg/nets/libs.py rename to legacy/contrib/HumanSeg/nets/libs.py diff --git a/contrib/HumanSeg/nets/seg_modules.py b/legacy/contrib/HumanSeg/nets/seg_modules.py similarity index 100% rename from contrib/HumanSeg/nets/seg_modules.py rename to legacy/contrib/HumanSeg/nets/seg_modules.py diff --git a/contrib/HumanSeg/nets/shufflenet_slim.py b/legacy/contrib/HumanSeg/nets/shufflenet_slim.py similarity index 100% rename from contrib/HumanSeg/nets/shufflenet_slim.py rename to legacy/contrib/HumanSeg/nets/shufflenet_slim.py diff --git a/contrib/HumanSeg/pretrained_weights/download_pretrained_weights.py b/legacy/contrib/HumanSeg/pretrained_weights/download_pretrained_weights.py similarity index 100% rename from contrib/HumanSeg/pretrained_weights/download_pretrained_weights.py rename to legacy/contrib/HumanSeg/pretrained_weights/download_pretrained_weights.py diff --git a/contrib/HumanSeg/quant_offline.py b/legacy/contrib/HumanSeg/quant_offline.py similarity index 100% rename from contrib/HumanSeg/quant_offline.py rename to legacy/contrib/HumanSeg/quant_offline.py diff --git a/contrib/HumanSeg/quant_online.py b/legacy/contrib/HumanSeg/quant_online.py similarity index 100% rename from contrib/HumanSeg/quant_online.py rename to legacy/contrib/HumanSeg/quant_online.py diff --git a/contrib/HumanSeg/requirements.txt b/legacy/contrib/HumanSeg/requirements.txt similarity index 100% rename from contrib/HumanSeg/requirements.txt rename to legacy/contrib/HumanSeg/requirements.txt diff --git a/contrib/HumanSeg/train.py b/legacy/contrib/HumanSeg/train.py similarity index 100% rename from contrib/HumanSeg/train.py rename to legacy/contrib/HumanSeg/train.py diff --git a/contrib/HumanSeg/transforms/__init__.py b/legacy/contrib/HumanSeg/transforms/__init__.py similarity index 100% rename from contrib/HumanSeg/transforms/__init__.py rename to legacy/contrib/HumanSeg/transforms/__init__.py diff --git a/contrib/HumanSeg/transforms/functional.py b/legacy/contrib/HumanSeg/transforms/functional.py similarity index 100% rename from contrib/HumanSeg/transforms/functional.py rename to legacy/contrib/HumanSeg/transforms/functional.py diff --git a/contrib/HumanSeg/transforms/transforms.py b/legacy/contrib/HumanSeg/transforms/transforms.py similarity index 100% rename from contrib/HumanSeg/transforms/transforms.py rename to legacy/contrib/HumanSeg/transforms/transforms.py diff --git a/contrib/HumanSeg/utils/__init__.py b/legacy/contrib/HumanSeg/utils/__init__.py similarity index 100% rename from contrib/HumanSeg/utils/__init__.py rename to legacy/contrib/HumanSeg/utils/__init__.py diff --git a/contrib/HumanSeg/utils/humanseg_postprocess.py b/legacy/contrib/HumanSeg/utils/humanseg_postprocess.py similarity index 100% rename from contrib/HumanSeg/utils/humanseg_postprocess.py rename to legacy/contrib/HumanSeg/utils/humanseg_postprocess.py diff --git a/contrib/HumanSeg/utils/logging.py b/legacy/contrib/HumanSeg/utils/logging.py similarity index 100% rename from contrib/HumanSeg/utils/logging.py rename to legacy/contrib/HumanSeg/utils/logging.py diff --git a/contrib/HumanSeg/utils/metrics.py b/legacy/contrib/HumanSeg/utils/metrics.py similarity index 100% rename from contrib/HumanSeg/utils/metrics.py rename to legacy/contrib/HumanSeg/utils/metrics.py diff --git a/contrib/HumanSeg/utils/post_quantization.py b/legacy/contrib/HumanSeg/utils/post_quantization.py similarity index 100% rename from contrib/HumanSeg/utils/post_quantization.py rename to legacy/contrib/HumanSeg/utils/post_quantization.py diff --git a/contrib/HumanSeg/utils/utils.py b/legacy/contrib/HumanSeg/utils/utils.py similarity index 100% rename from contrib/HumanSeg/utils/utils.py rename to legacy/contrib/HumanSeg/utils/utils.py diff --git a/contrib/HumanSeg/val.py b/legacy/contrib/HumanSeg/val.py similarity index 100% rename from contrib/HumanSeg/val.py rename to legacy/contrib/HumanSeg/val.py diff --git a/contrib/HumanSeg/video_infer.py b/legacy/contrib/HumanSeg/video_infer.py similarity index 100% rename from contrib/HumanSeg/video_infer.py rename to legacy/contrib/HumanSeg/video_infer.py diff --git a/contrib/LaneNet/README.md b/legacy/contrib/LaneNet/README.md similarity index 100% rename from contrib/LaneNet/README.md rename to legacy/contrib/LaneNet/README.md diff --git a/contrib/LaneNet/configs/lanenet.yaml b/legacy/contrib/LaneNet/configs/lanenet.yaml similarity index 100% rename from contrib/LaneNet/configs/lanenet.yaml rename to legacy/contrib/LaneNet/configs/lanenet.yaml diff --git a/contrib/LaneNet/data_aug.py b/legacy/contrib/LaneNet/data_aug.py similarity index 100% rename from contrib/LaneNet/data_aug.py rename to legacy/contrib/LaneNet/data_aug.py diff --git a/contrib/LaneNet/dataset/download_tusimple.py b/legacy/contrib/LaneNet/dataset/download_tusimple.py similarity index 100% rename from contrib/LaneNet/dataset/download_tusimple.py rename to legacy/contrib/LaneNet/dataset/download_tusimple.py diff --git a/contrib/LaneNet/eval.py b/legacy/contrib/LaneNet/eval.py similarity index 100% rename from contrib/LaneNet/eval.py rename to legacy/contrib/LaneNet/eval.py diff --git a/contrib/LaneNet/imgs/0005_pred_binary.png b/legacy/contrib/LaneNet/imgs/0005_pred_binary.png similarity index 100% rename from contrib/LaneNet/imgs/0005_pred_binary.png rename to legacy/contrib/LaneNet/imgs/0005_pred_binary.png diff --git a/contrib/LaneNet/imgs/0005_pred_instance.png b/legacy/contrib/LaneNet/imgs/0005_pred_instance.png similarity index 100% rename from contrib/LaneNet/imgs/0005_pred_instance.png rename to legacy/contrib/LaneNet/imgs/0005_pred_instance.png diff --git a/contrib/LaneNet/imgs/0005_pred_lane.png b/legacy/contrib/LaneNet/imgs/0005_pred_lane.png similarity index 100% rename from contrib/LaneNet/imgs/0005_pred_lane.png rename to legacy/contrib/LaneNet/imgs/0005_pred_lane.png diff --git a/contrib/LaneNet/loss.py b/legacy/contrib/LaneNet/loss.py similarity index 100% rename from contrib/LaneNet/loss.py rename to legacy/contrib/LaneNet/loss.py diff --git a/contrib/LaneNet/models/__init__.py b/legacy/contrib/LaneNet/models/__init__.py similarity index 100% rename from contrib/LaneNet/models/__init__.py rename to legacy/contrib/LaneNet/models/__init__.py diff --git a/contrib/LaneNet/models/model_builder.py b/legacy/contrib/LaneNet/models/model_builder.py similarity index 100% rename from contrib/LaneNet/models/model_builder.py rename to legacy/contrib/LaneNet/models/model_builder.py diff --git a/contrib/LaneNet/models/modeling/__init__.py b/legacy/contrib/LaneNet/models/modeling/__init__.py similarity index 100% rename from contrib/LaneNet/models/modeling/__init__.py rename to legacy/contrib/LaneNet/models/modeling/__init__.py diff --git a/contrib/LaneNet/models/modeling/lanenet.py b/legacy/contrib/LaneNet/models/modeling/lanenet.py similarity index 100% rename from contrib/LaneNet/models/modeling/lanenet.py rename to legacy/contrib/LaneNet/models/modeling/lanenet.py diff --git a/contrib/LaneNet/reader.py b/legacy/contrib/LaneNet/reader.py similarity index 100% rename from contrib/LaneNet/reader.py rename to legacy/contrib/LaneNet/reader.py diff --git a/contrib/LaneNet/requirements.txt b/legacy/contrib/LaneNet/requirements.txt similarity index 100% rename from contrib/LaneNet/requirements.txt rename to legacy/contrib/LaneNet/requirements.txt diff --git a/contrib/LaneNet/train.py b/legacy/contrib/LaneNet/train.py similarity index 100% rename from contrib/LaneNet/train.py rename to legacy/contrib/LaneNet/train.py diff --git a/contrib/LaneNet/utils/__init__.py b/legacy/contrib/LaneNet/utils/__init__.py similarity index 100% rename from contrib/LaneNet/utils/__init__.py rename to legacy/contrib/LaneNet/utils/__init__.py diff --git a/contrib/LaneNet/utils/config.py b/legacy/contrib/LaneNet/utils/config.py similarity index 100% rename from contrib/LaneNet/utils/config.py rename to legacy/contrib/LaneNet/utils/config.py diff --git a/contrib/LaneNet/utils/dist_utils.py b/legacy/contrib/LaneNet/utils/dist_utils.py similarity index 100% rename from contrib/LaneNet/utils/dist_utils.py rename to legacy/contrib/LaneNet/utils/dist_utils.py diff --git a/contrib/LaneNet/utils/generate_tusimple_dataset.py b/legacy/contrib/LaneNet/utils/generate_tusimple_dataset.py similarity index 100% rename from contrib/LaneNet/utils/generate_tusimple_dataset.py rename to legacy/contrib/LaneNet/utils/generate_tusimple_dataset.py diff --git a/contrib/LaneNet/utils/lanenet_postprocess.py b/legacy/contrib/LaneNet/utils/lanenet_postprocess.py similarity index 100% rename from contrib/LaneNet/utils/lanenet_postprocess.py rename to legacy/contrib/LaneNet/utils/lanenet_postprocess.py diff --git a/contrib/LaneNet/utils/load_model_utils.py b/legacy/contrib/LaneNet/utils/load_model_utils.py similarity index 100% rename from contrib/LaneNet/utils/load_model_utils.py rename to legacy/contrib/LaneNet/utils/load_model_utils.py diff --git a/contrib/LaneNet/vis.py b/legacy/contrib/LaneNet/vis.py similarity index 100% rename from contrib/LaneNet/vis.py rename to legacy/contrib/LaneNet/vis.py diff --git a/contrib/MechanicalIndustryMeter/download_mini_mechanical_industry_meter.py b/legacy/contrib/MechanicalIndustryMeter/download_mini_mechanical_industry_meter.py similarity index 100% rename from contrib/MechanicalIndustryMeter/download_mini_mechanical_industry_meter.py rename to legacy/contrib/MechanicalIndustryMeter/download_mini_mechanical_industry_meter.py diff --git a/contrib/MechanicalIndustryMeter/download_unet_mechanical_industry_meter.py b/legacy/contrib/MechanicalIndustryMeter/download_unet_mechanical_industry_meter.py similarity index 100% rename from contrib/MechanicalIndustryMeter/download_unet_mechanical_industry_meter.py rename to legacy/contrib/MechanicalIndustryMeter/download_unet_mechanical_industry_meter.py diff --git a/contrib/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.JPG b/legacy/contrib/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.JPG similarity index 100% rename from contrib/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.JPG rename to legacy/contrib/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.JPG diff --git a/contrib/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.png b/legacy/contrib/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.png similarity index 100% rename from contrib/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.png rename to legacy/contrib/MechanicalIndustryMeter/imgs/1560143028.5_IMG_3091.png diff --git a/contrib/MechanicalIndustryMeter/unet_mechanical_meter.yaml b/legacy/contrib/MechanicalIndustryMeter/unet_mechanical_meter.yaml similarity index 100% rename from contrib/MechanicalIndustryMeter/unet_mechanical_meter.yaml rename to legacy/contrib/MechanicalIndustryMeter/unet_mechanical_meter.yaml diff --git a/contrib/README.md b/legacy/contrib/README.md similarity index 100% rename from contrib/README.md rename to legacy/contrib/README.md diff --git a/contrib/RemoteSensing/README.md b/legacy/contrib/RemoteSensing/README.md similarity index 100% rename from contrib/RemoteSensing/README.md rename to legacy/contrib/RemoteSensing/README.md diff --git a/contrib/RemoteSensing/__init__.py b/legacy/contrib/RemoteSensing/__init__.py similarity index 100% rename from contrib/RemoteSensing/__init__.py rename to legacy/contrib/RemoteSensing/__init__.py diff --git a/contrib/RemoteSensing/docs/data_analyse_and_check.md b/legacy/contrib/RemoteSensing/docs/data_analyse_and_check.md similarity index 100% rename from contrib/RemoteSensing/docs/data_analyse_and_check.md rename to legacy/contrib/RemoteSensing/docs/data_analyse_and_check.md diff --git a/contrib/RemoteSensing/docs/data_prepare.md b/legacy/contrib/RemoteSensing/docs/data_prepare.md similarity index 100% rename from contrib/RemoteSensing/docs/data_prepare.md rename to legacy/contrib/RemoteSensing/docs/data_prepare.md diff --git a/contrib/RemoteSensing/docs/imgs/data_distribution.png b/legacy/contrib/RemoteSensing/docs/imgs/data_distribution.png similarity index 100% rename from contrib/RemoteSensing/docs/imgs/data_distribution.png rename to legacy/contrib/RemoteSensing/docs/imgs/data_distribution.png diff --git a/contrib/RemoteSensing/docs/imgs/dataset.png b/legacy/contrib/RemoteSensing/docs/imgs/dataset.png similarity index 100% rename from contrib/RemoteSensing/docs/imgs/dataset.png rename to legacy/contrib/RemoteSensing/docs/imgs/dataset.png diff --git a/contrib/RemoteSensing/docs/imgs/vis.png b/legacy/contrib/RemoteSensing/docs/imgs/vis.png similarity index 100% rename from contrib/RemoteSensing/docs/imgs/vis.png rename to legacy/contrib/RemoteSensing/docs/imgs/vis.png diff --git a/contrib/RemoteSensing/docs/imgs/visualdl.png b/legacy/contrib/RemoteSensing/docs/imgs/visualdl.png similarity index 100% rename from contrib/RemoteSensing/docs/imgs/visualdl.png rename to legacy/contrib/RemoteSensing/docs/imgs/visualdl.png diff --git a/contrib/RemoteSensing/docs/transforms.md b/legacy/contrib/RemoteSensing/docs/transforms.md similarity index 100% rename from contrib/RemoteSensing/docs/transforms.md rename to legacy/contrib/RemoteSensing/docs/transforms.md diff --git a/contrib/RemoteSensing/models/__init__.py b/legacy/contrib/RemoteSensing/models/__init__.py similarity index 100% rename from contrib/RemoteSensing/models/__init__.py rename to legacy/contrib/RemoteSensing/models/__init__.py diff --git a/contrib/RemoteSensing/models/base.py b/legacy/contrib/RemoteSensing/models/base.py similarity index 100% rename from contrib/RemoteSensing/models/base.py rename to legacy/contrib/RemoteSensing/models/base.py diff --git a/contrib/RemoteSensing/models/hrnet.py b/legacy/contrib/RemoteSensing/models/hrnet.py similarity index 100% rename from contrib/RemoteSensing/models/hrnet.py rename to legacy/contrib/RemoteSensing/models/hrnet.py diff --git a/contrib/RemoteSensing/models/load_model.py b/legacy/contrib/RemoteSensing/models/load_model.py similarity index 100% rename from contrib/RemoteSensing/models/load_model.py rename to legacy/contrib/RemoteSensing/models/load_model.py diff --git a/contrib/RemoteSensing/models/unet.py b/legacy/contrib/RemoteSensing/models/unet.py similarity index 100% rename from contrib/RemoteSensing/models/unet.py rename to legacy/contrib/RemoteSensing/models/unet.py diff --git a/contrib/RemoteSensing/models/utils/visualize.py b/legacy/contrib/RemoteSensing/models/utils/visualize.py similarity index 100% rename from contrib/RemoteSensing/models/utils/visualize.py rename to legacy/contrib/RemoteSensing/models/utils/visualize.py diff --git a/contrib/RemoteSensing/nets/__init__.py b/legacy/contrib/RemoteSensing/nets/__init__.py similarity index 100% rename from contrib/RemoteSensing/nets/__init__.py rename to legacy/contrib/RemoteSensing/nets/__init__.py diff --git a/contrib/RemoteSensing/nets/hrnet.py b/legacy/contrib/RemoteSensing/nets/hrnet.py similarity index 100% rename from contrib/RemoteSensing/nets/hrnet.py rename to legacy/contrib/RemoteSensing/nets/hrnet.py diff --git a/contrib/RemoteSensing/nets/libs.py b/legacy/contrib/RemoteSensing/nets/libs.py similarity index 100% rename from contrib/RemoteSensing/nets/libs.py rename to legacy/contrib/RemoteSensing/nets/libs.py diff --git a/contrib/RemoteSensing/nets/loss.py b/legacy/contrib/RemoteSensing/nets/loss.py similarity index 100% rename from contrib/RemoteSensing/nets/loss.py rename to legacy/contrib/RemoteSensing/nets/loss.py diff --git a/contrib/RemoteSensing/nets/unet.py b/legacy/contrib/RemoteSensing/nets/unet.py similarity index 100% rename from contrib/RemoteSensing/nets/unet.py rename to legacy/contrib/RemoteSensing/nets/unet.py diff --git a/contrib/RemoteSensing/predict_demo.py b/legacy/contrib/RemoteSensing/predict_demo.py similarity index 100% rename from contrib/RemoteSensing/predict_demo.py rename to legacy/contrib/RemoteSensing/predict_demo.py diff --git a/contrib/RemoteSensing/readers/__init__.py b/legacy/contrib/RemoteSensing/readers/__init__.py similarity index 100% rename from contrib/RemoteSensing/readers/__init__.py rename to legacy/contrib/RemoteSensing/readers/__init__.py diff --git a/contrib/RemoteSensing/readers/base.py b/legacy/contrib/RemoteSensing/readers/base.py similarity index 100% rename from contrib/RemoteSensing/readers/base.py rename to legacy/contrib/RemoteSensing/readers/base.py diff --git a/contrib/RemoteSensing/readers/reader.py b/legacy/contrib/RemoteSensing/readers/reader.py similarity index 100% rename from contrib/RemoteSensing/readers/reader.py rename to legacy/contrib/RemoteSensing/readers/reader.py diff --git a/contrib/RemoteSensing/requirements.txt b/legacy/contrib/RemoteSensing/requirements.txt similarity index 100% rename from contrib/RemoteSensing/requirements.txt rename to legacy/contrib/RemoteSensing/requirements.txt diff --git a/contrib/RemoteSensing/tools/cal_norm_coef.py b/legacy/contrib/RemoteSensing/tools/cal_norm_coef.py similarity index 100% rename from contrib/RemoteSensing/tools/cal_norm_coef.py rename to legacy/contrib/RemoteSensing/tools/cal_norm_coef.py diff --git a/contrib/RemoteSensing/tools/create_dataset_list.py b/legacy/contrib/RemoteSensing/tools/create_dataset_list.py similarity index 100% rename from contrib/RemoteSensing/tools/create_dataset_list.py rename to legacy/contrib/RemoteSensing/tools/create_dataset_list.py diff --git a/contrib/RemoteSensing/tools/data_analyse_and_check.py b/legacy/contrib/RemoteSensing/tools/data_analyse_and_check.py similarity index 100% rename from contrib/RemoteSensing/tools/data_analyse_and_check.py rename to legacy/contrib/RemoteSensing/tools/data_analyse_and_check.py diff --git a/contrib/RemoteSensing/tools/data_distribution_vis.py b/legacy/contrib/RemoteSensing/tools/data_distribution_vis.py similarity index 100% rename from contrib/RemoteSensing/tools/data_distribution_vis.py rename to legacy/contrib/RemoteSensing/tools/data_distribution_vis.py diff --git a/contrib/RemoteSensing/tools/split_dataset_list.py b/legacy/contrib/RemoteSensing/tools/split_dataset_list.py similarity index 100% rename from contrib/RemoteSensing/tools/split_dataset_list.py rename to legacy/contrib/RemoteSensing/tools/split_dataset_list.py diff --git a/contrib/RemoteSensing/train_demo.py b/legacy/contrib/RemoteSensing/train_demo.py similarity index 100% rename from contrib/RemoteSensing/train_demo.py rename to legacy/contrib/RemoteSensing/train_demo.py diff --git a/contrib/RemoteSensing/transforms/__init__.py b/legacy/contrib/RemoteSensing/transforms/__init__.py similarity index 100% rename from contrib/RemoteSensing/transforms/__init__.py rename to legacy/contrib/RemoteSensing/transforms/__init__.py diff --git a/contrib/RemoteSensing/transforms/ops.py b/legacy/contrib/RemoteSensing/transforms/ops.py similarity index 100% rename from contrib/RemoteSensing/transforms/ops.py rename to legacy/contrib/RemoteSensing/transforms/ops.py diff --git a/contrib/RemoteSensing/transforms/transforms.py b/legacy/contrib/RemoteSensing/transforms/transforms.py similarity index 100% rename from contrib/RemoteSensing/transforms/transforms.py rename to legacy/contrib/RemoteSensing/transforms/transforms.py diff --git a/contrib/RemoteSensing/utils/__init__.py b/legacy/contrib/RemoteSensing/utils/__init__.py similarity index 100% rename from contrib/RemoteSensing/utils/__init__.py rename to legacy/contrib/RemoteSensing/utils/__init__.py diff --git a/contrib/RemoteSensing/utils/logging.py b/legacy/contrib/RemoteSensing/utils/logging.py similarity index 100% rename from contrib/RemoteSensing/utils/logging.py rename to legacy/contrib/RemoteSensing/utils/logging.py diff --git a/contrib/RemoteSensing/utils/metrics.py b/legacy/contrib/RemoteSensing/utils/metrics.py similarity index 100% rename from contrib/RemoteSensing/utils/metrics.py rename to legacy/contrib/RemoteSensing/utils/metrics.py diff --git a/contrib/RemoteSensing/utils/pretrain_weights.py b/legacy/contrib/RemoteSensing/utils/pretrain_weights.py similarity index 100% rename from contrib/RemoteSensing/utils/pretrain_weights.py rename to legacy/contrib/RemoteSensing/utils/pretrain_weights.py diff --git a/contrib/RemoteSensing/utils/utils.py b/legacy/contrib/RemoteSensing/utils/utils.py similarity index 100% rename from contrib/RemoteSensing/utils/utils.py rename to legacy/contrib/RemoteSensing/utils/utils.py diff --git a/contrib/RemoteSensing/visualize_demo.py b/legacy/contrib/RemoteSensing/visualize_demo.py similarity index 100% rename from contrib/RemoteSensing/visualize_demo.py rename to legacy/contrib/RemoteSensing/visualize_demo.py diff --git a/contrib/SpatialEmbeddings/README.md b/legacy/contrib/SpatialEmbeddings/README.md similarity index 100% rename from contrib/SpatialEmbeddings/README.md rename to legacy/contrib/SpatialEmbeddings/README.md diff --git a/contrib/SpatialEmbeddings/config.py b/legacy/contrib/SpatialEmbeddings/config.py similarity index 100% rename from contrib/SpatialEmbeddings/config.py rename to legacy/contrib/SpatialEmbeddings/config.py diff --git a/contrib/SpatialEmbeddings/data/kitti/0007/kitti_0007_000512.png b/legacy/contrib/SpatialEmbeddings/data/kitti/0007/kitti_0007_000512.png similarity index 100% rename from contrib/SpatialEmbeddings/data/kitti/0007/kitti_0007_000512.png rename to legacy/contrib/SpatialEmbeddings/data/kitti/0007/kitti_0007_000512.png diff --git a/contrib/SpatialEmbeddings/data/kitti/0007/kitti_0007_000518.png b/legacy/contrib/SpatialEmbeddings/data/kitti/0007/kitti_0007_000518.png similarity index 100% rename from contrib/SpatialEmbeddings/data/kitti/0007/kitti_0007_000518.png rename to legacy/contrib/SpatialEmbeddings/data/kitti/0007/kitti_0007_000518.png diff --git a/contrib/SpatialEmbeddings/data/test.txt b/legacy/contrib/SpatialEmbeddings/data/test.txt similarity index 100% rename from contrib/SpatialEmbeddings/data/test.txt rename to legacy/contrib/SpatialEmbeddings/data/test.txt diff --git a/contrib/SpatialEmbeddings/download_SpatialEmbeddings_kitti.py b/legacy/contrib/SpatialEmbeddings/download_SpatialEmbeddings_kitti.py similarity index 100% rename from contrib/SpatialEmbeddings/download_SpatialEmbeddings_kitti.py rename to legacy/contrib/SpatialEmbeddings/download_SpatialEmbeddings_kitti.py diff --git a/contrib/SpatialEmbeddings/imgs/kitti_0007_000518_ori.png b/legacy/contrib/SpatialEmbeddings/imgs/kitti_0007_000518_ori.png similarity index 100% rename from contrib/SpatialEmbeddings/imgs/kitti_0007_000518_ori.png rename to legacy/contrib/SpatialEmbeddings/imgs/kitti_0007_000518_ori.png diff --git a/contrib/SpatialEmbeddings/imgs/kitti_0007_000518_pred.png b/legacy/contrib/SpatialEmbeddings/imgs/kitti_0007_000518_pred.png similarity index 100% rename from contrib/SpatialEmbeddings/imgs/kitti_0007_000518_pred.png rename to legacy/contrib/SpatialEmbeddings/imgs/kitti_0007_000518_pred.png diff --git a/contrib/SpatialEmbeddings/infer.py b/legacy/contrib/SpatialEmbeddings/infer.py similarity index 100% rename from contrib/SpatialEmbeddings/infer.py rename to legacy/contrib/SpatialEmbeddings/infer.py diff --git a/contrib/SpatialEmbeddings/models.py b/legacy/contrib/SpatialEmbeddings/models.py similarity index 100% rename from contrib/SpatialEmbeddings/models.py rename to legacy/contrib/SpatialEmbeddings/models.py diff --git a/contrib/SpatialEmbeddings/utils/__init__.py b/legacy/contrib/SpatialEmbeddings/utils/__init__.py similarity index 100% rename from contrib/SpatialEmbeddings/utils/__init__.py rename to legacy/contrib/SpatialEmbeddings/utils/__init__.py diff --git a/contrib/SpatialEmbeddings/utils/data_util.py b/legacy/contrib/SpatialEmbeddings/utils/data_util.py similarity index 100% rename from contrib/SpatialEmbeddings/utils/data_util.py rename to legacy/contrib/SpatialEmbeddings/utils/data_util.py diff --git a/contrib/SpatialEmbeddings/utils/palette.py b/legacy/contrib/SpatialEmbeddings/utils/palette.py similarity index 100% rename from contrib/SpatialEmbeddings/utils/palette.py rename to legacy/contrib/SpatialEmbeddings/utils/palette.py diff --git a/contrib/SpatialEmbeddings/utils/util.py b/legacy/contrib/SpatialEmbeddings/utils/util.py similarity index 100% rename from contrib/SpatialEmbeddings/utils/util.py rename to legacy/contrib/SpatialEmbeddings/utils/util.py diff --git a/dataset/README.md b/legacy/dataset/README.md similarity index 100% rename from dataset/README.md rename to legacy/dataset/README.md diff --git a/dataset/convert_voc2012.py b/legacy/dataset/convert_voc2012.py similarity index 100% rename from dataset/convert_voc2012.py rename to legacy/dataset/convert_voc2012.py diff --git a/dataset/download_and_convert_voc2012.py b/legacy/dataset/download_and_convert_voc2012.py similarity index 100% rename from dataset/download_and_convert_voc2012.py rename to legacy/dataset/download_and_convert_voc2012.py diff --git a/dataset/download_cityscapes.py b/legacy/dataset/download_cityscapes.py similarity index 100% rename from dataset/download_cityscapes.py rename to legacy/dataset/download_cityscapes.py diff --git a/dataset/download_mini_deepglobe_road_extraction.py b/legacy/dataset/download_mini_deepglobe_road_extraction.py similarity index 100% rename from dataset/download_mini_deepglobe_road_extraction.py rename to legacy/dataset/download_mini_deepglobe_road_extraction.py diff --git a/dataset/download_optic.py b/legacy/dataset/download_optic.py similarity index 100% rename from dataset/download_optic.py rename to legacy/dataset/download_optic.py diff --git a/dataset/download_pet.py b/legacy/dataset/download_pet.py similarity index 100% rename from dataset/download_pet.py rename to legacy/dataset/download_pet.py diff --git a/deploy/README.md b/legacy/deploy/README.md similarity index 100% rename from deploy/README.md rename to legacy/deploy/README.md diff --git a/deploy/cpp/CMakeLists.txt b/legacy/deploy/cpp/CMakeLists.txt similarity index 100% rename from deploy/cpp/CMakeLists.txt rename to legacy/deploy/cpp/CMakeLists.txt diff --git a/deploy/cpp/CMakeSettings.json b/legacy/deploy/cpp/CMakeSettings.json similarity index 100% rename from deploy/cpp/CMakeSettings.json rename to legacy/deploy/cpp/CMakeSettings.json diff --git a/deploy/cpp/INSTALL.md b/legacy/deploy/cpp/INSTALL.md similarity index 100% rename from deploy/cpp/INSTALL.md rename to legacy/deploy/cpp/INSTALL.md diff --git a/deploy/cpp/LICENSE b/legacy/deploy/cpp/LICENSE similarity index 100% rename from deploy/cpp/LICENSE rename to legacy/deploy/cpp/LICENSE diff --git a/deploy/cpp/README.md b/legacy/deploy/cpp/README.md similarity index 100% rename from deploy/cpp/README.md rename to legacy/deploy/cpp/README.md diff --git a/deploy/cpp/conf/humanseg.yaml b/legacy/deploy/cpp/conf/humanseg.yaml similarity index 100% rename from deploy/cpp/conf/humanseg.yaml rename to legacy/deploy/cpp/conf/humanseg.yaml diff --git a/deploy/cpp/demo.cpp b/legacy/deploy/cpp/demo.cpp similarity index 100% rename from deploy/cpp/demo.cpp rename to legacy/deploy/cpp/demo.cpp diff --git a/deploy/cpp/docs/demo.jpg b/legacy/deploy/cpp/docs/demo.jpg similarity index 100% rename from deploy/cpp/docs/demo.jpg rename to legacy/deploy/cpp/docs/demo.jpg diff --git a/deploy/cpp/docs/demo_jpg.png b/legacy/deploy/cpp/docs/demo_jpg.png similarity index 100% rename from deploy/cpp/docs/demo_jpg.png rename to legacy/deploy/cpp/docs/demo_jpg.png diff --git a/deploy/cpp/docs/linux_build.md b/legacy/deploy/cpp/docs/linux_build.md similarity index 100% rename from deploy/cpp/docs/linux_build.md rename to legacy/deploy/cpp/docs/linux_build.md diff --git a/deploy/cpp/docs/vis.md b/legacy/deploy/cpp/docs/vis.md similarity index 100% rename from deploy/cpp/docs/vis.md rename to legacy/deploy/cpp/docs/vis.md diff --git a/deploy/cpp/docs/vis_result.png b/legacy/deploy/cpp/docs/vis_result.png similarity index 100% rename from deploy/cpp/docs/vis_result.png rename to legacy/deploy/cpp/docs/vis_result.png diff --git a/deploy/cpp/docs/windows_vs2015_build.md b/legacy/deploy/cpp/docs/windows_vs2015_build.md similarity index 100% rename from deploy/cpp/docs/windows_vs2015_build.md rename to legacy/deploy/cpp/docs/windows_vs2015_build.md diff --git a/deploy/cpp/docs/windows_vs2019_build.md b/legacy/deploy/cpp/docs/windows_vs2019_build.md similarity index 100% rename from deploy/cpp/docs/windows_vs2019_build.md rename to legacy/deploy/cpp/docs/windows_vs2019_build.md diff --git a/deploy/cpp/external-cmake/yaml-cpp.cmake b/legacy/deploy/cpp/external-cmake/yaml-cpp.cmake similarity index 100% rename from deploy/cpp/external-cmake/yaml-cpp.cmake rename to legacy/deploy/cpp/external-cmake/yaml-cpp.cmake diff --git a/deploy/cpp/images/humanseg/demo1.jpeg b/legacy/deploy/cpp/images/humanseg/demo1.jpeg similarity index 100% rename from deploy/cpp/images/humanseg/demo1.jpeg rename to legacy/deploy/cpp/images/humanseg/demo1.jpeg diff --git a/deploy/cpp/images/humanseg/demo2.jpeg b/legacy/deploy/cpp/images/humanseg/demo2.jpeg similarity index 100% rename from deploy/cpp/images/humanseg/demo2.jpeg rename to legacy/deploy/cpp/images/humanseg/demo2.jpeg diff --git a/deploy/cpp/images/humanseg/demo2.jpeg_result.png b/legacy/deploy/cpp/images/humanseg/demo2.jpeg_result.png similarity index 100% rename from deploy/cpp/images/humanseg/demo2.jpeg_result.png rename to legacy/deploy/cpp/images/humanseg/demo2.jpeg_result.png diff --git a/deploy/cpp/images/humanseg/demo2_jpeg_recover.png b/legacy/deploy/cpp/images/humanseg/demo2_jpeg_recover.png similarity index 100% rename from deploy/cpp/images/humanseg/demo2_jpeg_recover.png rename to legacy/deploy/cpp/images/humanseg/demo2_jpeg_recover.png diff --git a/deploy/cpp/images/humanseg/demo3.jpeg b/legacy/deploy/cpp/images/humanseg/demo3.jpeg similarity index 100% rename from deploy/cpp/images/humanseg/demo3.jpeg rename to legacy/deploy/cpp/images/humanseg/demo3.jpeg diff --git a/deploy/cpp/predictor/seg_predictor.cpp b/legacy/deploy/cpp/predictor/seg_predictor.cpp similarity index 100% rename from deploy/cpp/predictor/seg_predictor.cpp rename to legacy/deploy/cpp/predictor/seg_predictor.cpp diff --git a/deploy/cpp/predictor/seg_predictor.h b/legacy/deploy/cpp/predictor/seg_predictor.h similarity index 100% rename from deploy/cpp/predictor/seg_predictor.h rename to legacy/deploy/cpp/predictor/seg_predictor.h diff --git a/deploy/cpp/preprocessor/preprocessor.cpp b/legacy/deploy/cpp/preprocessor/preprocessor.cpp similarity index 100% rename from deploy/cpp/preprocessor/preprocessor.cpp rename to legacy/deploy/cpp/preprocessor/preprocessor.cpp diff --git a/deploy/cpp/preprocessor/preprocessor.h b/legacy/deploy/cpp/preprocessor/preprocessor.h similarity index 100% rename from deploy/cpp/preprocessor/preprocessor.h rename to legacy/deploy/cpp/preprocessor/preprocessor.h diff --git a/deploy/cpp/preprocessor/preprocessor_seg.cpp b/legacy/deploy/cpp/preprocessor/preprocessor_seg.cpp similarity index 100% rename from deploy/cpp/preprocessor/preprocessor_seg.cpp rename to legacy/deploy/cpp/preprocessor/preprocessor_seg.cpp diff --git a/deploy/cpp/preprocessor/preprocessor_seg.h b/legacy/deploy/cpp/preprocessor/preprocessor_seg.h similarity index 100% rename from deploy/cpp/preprocessor/preprocessor_seg.h rename to legacy/deploy/cpp/preprocessor/preprocessor_seg.h diff --git a/deploy/cpp/tools/visualize.py b/legacy/deploy/cpp/tools/visualize.py similarity index 100% rename from deploy/cpp/tools/visualize.py rename to legacy/deploy/cpp/tools/visualize.py diff --git a/deploy/cpp/utils/seg_conf_parser.h b/legacy/deploy/cpp/utils/seg_conf_parser.h similarity index 100% rename from deploy/cpp/utils/seg_conf_parser.h rename to legacy/deploy/cpp/utils/seg_conf_parser.h diff --git a/deploy/cpp/utils/utils.h b/legacy/deploy/cpp/utils/utils.h similarity index 100% rename from deploy/cpp/utils/utils.h rename to legacy/deploy/cpp/utils/utils.h diff --git a/deploy/lite/README.md b/legacy/deploy/lite/README.md similarity index 100% rename from deploy/lite/README.md rename to legacy/deploy/lite/README.md diff --git a/deploy/lite/example/human_1.png b/legacy/deploy/lite/example/human_1.png similarity index 100% rename from deploy/lite/example/human_1.png rename to legacy/deploy/lite/example/human_1.png diff --git a/deploy/lite/example/human_2.png b/legacy/deploy/lite/example/human_2.png similarity index 100% rename from deploy/lite/example/human_2.png rename to legacy/deploy/lite/example/human_2.png diff --git a/deploy/lite/example/human_3.png b/legacy/deploy/lite/example/human_3.png similarity index 100% rename from deploy/lite/example/human_3.png rename to legacy/deploy/lite/example/human_3.png diff --git a/deploy/lite/human_segmentation_demo/.gitignore b/legacy/deploy/lite/human_segmentation_demo/.gitignore similarity index 100% rename from deploy/lite/human_segmentation_demo/.gitignore rename to legacy/deploy/lite/human_segmentation_demo/.gitignore diff --git a/deploy/lite/human_segmentation_demo/app/.gitignore b/legacy/deploy/lite/human_segmentation_demo/app/.gitignore similarity index 100% rename from deploy/lite/human_segmentation_demo/app/.gitignore rename to legacy/deploy/lite/human_segmentation_demo/app/.gitignore diff --git a/deploy/lite/human_segmentation_demo/app/build.gradle b/legacy/deploy/lite/human_segmentation_demo/app/build.gradle similarity index 100% rename from deploy/lite/human_segmentation_demo/app/build.gradle rename to legacy/deploy/lite/human_segmentation_demo/app/build.gradle diff --git a/deploy/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.jar b/legacy/deploy/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.jar similarity index 100% rename from deploy/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.jar rename to legacy/deploy/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.jar diff --git a/deploy/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.properties b/legacy/deploy/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.properties similarity index 100% rename from deploy/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.properties rename to legacy/deploy/lite/human_segmentation_demo/app/gradle/wrapper/gradle-wrapper.properties diff --git a/deploy/lite/human_segmentation_demo/app/gradlew b/legacy/deploy/lite/human_segmentation_demo/app/gradlew similarity index 100% rename from deploy/lite/human_segmentation_demo/app/gradlew rename to legacy/deploy/lite/human_segmentation_demo/app/gradlew diff --git a/deploy/lite/human_segmentation_demo/app/gradlew.bat b/legacy/deploy/lite/human_segmentation_demo/app/gradlew.bat similarity index 100% rename from deploy/lite/human_segmentation_demo/app/gradlew.bat rename to legacy/deploy/lite/human_segmentation_demo/app/gradlew.bat diff --git a/deploy/lite/human_segmentation_demo/app/local.properties b/legacy/deploy/lite/human_segmentation_demo/app/local.properties similarity index 100% rename from deploy/lite/human_segmentation_demo/app/local.properties rename to legacy/deploy/lite/human_segmentation_demo/app/local.properties diff --git a/deploy/lite/human_segmentation_demo/app/proguard-rules.pro b/legacy/deploy/lite/human_segmentation_demo/app/proguard-rules.pro similarity index 100% rename from deploy/lite/human_segmentation_demo/app/proguard-rules.pro rename to legacy/deploy/lite/human_segmentation_demo/app/proguard-rules.pro diff --git a/deploy/lite/human_segmentation_demo/app/src/androidTest/java/com/baidu/paddle/lite/demo/ExampleInstrumentedTest.java b/legacy/deploy/lite/human_segmentation_demo/app/src/androidTest/java/com/baidu/paddle/lite/demo/ExampleInstrumentedTest.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/androidTest/java/com/baidu/paddle/lite/demo/ExampleInstrumentedTest.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/androidTest/java/com/baidu/paddle/lite/demo/ExampleInstrumentedTest.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/AndroidManifest.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/AndroidManifest.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/AndroidManifest.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/AndroidManifest.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/images/human.jpg b/legacy/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/images/human.jpg similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/images/human.jpg rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/images/human.jpg diff --git a/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/labels/label_list b/legacy/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/labels/label_list similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/labels/label_list rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/assets/image_segmentation/labels/label_list diff --git a/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/AppCompatPreferenceActivity.java b/legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/AppCompatPreferenceActivity.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/AppCompatPreferenceActivity.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/AppCompatPreferenceActivity.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/MainActivity.java b/legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/MainActivity.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/MainActivity.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/MainActivity.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Predictor.java b/legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Predictor.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Predictor.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Predictor.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/SettingsActivity.java b/legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/SettingsActivity.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/SettingsActivity.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/SettingsActivity.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Utils.java b/legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Utils.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Utils.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/Utils.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/config/Config.java b/legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/config/Config.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/config/Config.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/config/Config.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/preprocess/Preprocess.java b/legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/preprocess/Preprocess.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/preprocess/Preprocess.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/preprocess/Preprocess.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/visual/Visualize.java b/legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/visual/Visualize.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/visual/Visualize.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/java/com/baidu/paddle/lite/demo/segmentation/visual/Visualize.java diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/drawable-v24/ic_launcher_foreground.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/drawable-v24/ic_launcher_foreground.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/drawable-v24/ic_launcher_foreground.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/drawable-v24/ic_launcher_foreground.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/drawable/ic_launcher_background.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/drawable/ic_launcher_background.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/drawable/ic_launcher_background.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/drawable/ic_launcher_background.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/layout/activity_main.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/layout/activity_main.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/layout/activity_main.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/layout/activity_main.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/menu/menu_action_options.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/menu/menu_action_options.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/menu/menu_action_options.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/menu/menu_action_options.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-hdpi/ic_launcher_round.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-mdpi/ic_launcher_round.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/values/arrays.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/values/arrays.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/values/arrays.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/values/arrays.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/values/colors.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/values/colors.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/values/colors.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/values/colors.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/values/strings.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/values/strings.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/values/strings.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/values/strings.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/values/styles.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/values/styles.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/values/styles.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/values/styles.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/main/res/xml/settings.xml b/legacy/deploy/lite/human_segmentation_demo/app/src/main/res/xml/settings.xml similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/main/res/xml/settings.xml rename to legacy/deploy/lite/human_segmentation_demo/app/src/main/res/xml/settings.xml diff --git a/deploy/lite/human_segmentation_demo/app/src/test/java/com/baidu/paddle/lite/demo/ExampleUnitTest.java b/legacy/deploy/lite/human_segmentation_demo/app/src/test/java/com/baidu/paddle/lite/demo/ExampleUnitTest.java similarity index 100% rename from deploy/lite/human_segmentation_demo/app/src/test/java/com/baidu/paddle/lite/demo/ExampleUnitTest.java rename to legacy/deploy/lite/human_segmentation_demo/app/src/test/java/com/baidu/paddle/lite/demo/ExampleUnitTest.java diff --git a/deploy/lite/human_segmentation_demo/build.gradle b/legacy/deploy/lite/human_segmentation_demo/build.gradle similarity index 100% rename from deploy/lite/human_segmentation_demo/build.gradle rename to legacy/deploy/lite/human_segmentation_demo/build.gradle diff --git a/deploy/lite/human_segmentation_demo/gradle.properties b/legacy/deploy/lite/human_segmentation_demo/gradle.properties similarity index 100% rename from deploy/lite/human_segmentation_demo/gradle.properties rename to legacy/deploy/lite/human_segmentation_demo/gradle.properties diff --git a/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.jar b/legacy/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.jar similarity index 100% rename from deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.jar rename to legacy/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.jar diff --git a/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.properties b/legacy/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.properties similarity index 100% rename from deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.properties rename to legacy/deploy/lite/human_segmentation_demo/gradle/wrapper/gradle-wrapper.properties diff --git a/deploy/lite/human_segmentation_demo/gradlew b/legacy/deploy/lite/human_segmentation_demo/gradlew similarity index 100% rename from deploy/lite/human_segmentation_demo/gradlew rename to legacy/deploy/lite/human_segmentation_demo/gradlew diff --git a/deploy/lite/human_segmentation_demo/gradlew.bat b/legacy/deploy/lite/human_segmentation_demo/gradlew.bat similarity index 100% rename from deploy/lite/human_segmentation_demo/gradlew.bat rename to legacy/deploy/lite/human_segmentation_demo/gradlew.bat diff --git a/deploy/lite/human_segmentation_demo/settings.gradle b/legacy/deploy/lite/human_segmentation_demo/settings.gradle similarity index 100% rename from deploy/lite/human_segmentation_demo/settings.gradle rename to legacy/deploy/lite/human_segmentation_demo/settings.gradle diff --git a/deploy/paddle-serving/README.md b/legacy/deploy/paddle-serving/README.md similarity index 100% rename from deploy/paddle-serving/README.md rename to legacy/deploy/paddle-serving/README.md diff --git a/deploy/paddle-serving/postprocess.py b/legacy/deploy/paddle-serving/postprocess.py similarity index 100% rename from deploy/paddle-serving/postprocess.py rename to legacy/deploy/paddle-serving/postprocess.py diff --git a/deploy/paddle-serving/seg_client.py b/legacy/deploy/paddle-serving/seg_client.py similarity index 100% rename from deploy/paddle-serving/seg_client.py rename to legacy/deploy/paddle-serving/seg_client.py diff --git a/deploy/python/README.md b/legacy/deploy/python/README.md similarity index 100% rename from deploy/python/README.md rename to legacy/deploy/python/README.md diff --git a/deploy/python/docs/PaddleSeg_Infer_Benchmark.md b/legacy/deploy/python/docs/PaddleSeg_Infer_Benchmark.md similarity index 100% rename from deploy/python/docs/PaddleSeg_Infer_Benchmark.md rename to legacy/deploy/python/docs/PaddleSeg_Infer_Benchmark.md diff --git a/deploy/python/docs/compile_paddle_with_tensorrt.md b/legacy/deploy/python/docs/compile_paddle_with_tensorrt.md similarity index 100% rename from deploy/python/docs/compile_paddle_with_tensorrt.md rename to legacy/deploy/python/docs/compile_paddle_with_tensorrt.md diff --git a/deploy/python/infer.py b/legacy/deploy/python/infer.py similarity index 100% rename from deploy/python/infer.py rename to legacy/deploy/python/infer.py diff --git a/deploy/python/requirements.txt b/legacy/deploy/python/requirements.txt similarity index 100% rename from deploy/python/requirements.txt rename to legacy/deploy/python/requirements.txt diff --git a/deploy/serving/COMPILE_GUIDE.md b/legacy/deploy/serving/COMPILE_GUIDE.md similarity index 100% rename from deploy/serving/COMPILE_GUIDE.md rename to legacy/deploy/serving/COMPILE_GUIDE.md diff --git a/deploy/serving/README.md b/legacy/deploy/serving/README.md similarity index 100% rename from deploy/serving/README.md rename to legacy/deploy/serving/README.md diff --git a/deploy/serving/UBUNTU.md b/legacy/deploy/serving/UBUNTU.md similarity index 100% rename from deploy/serving/UBUNTU.md rename to legacy/deploy/serving/UBUNTU.md diff --git a/deploy/serving/requirements.txt b/legacy/deploy/serving/requirements.txt similarity index 100% rename from deploy/serving/requirements.txt rename to legacy/deploy/serving/requirements.txt diff --git a/deploy/serving/seg-serving/CMakeLists.txt b/legacy/deploy/serving/seg-serving/CMakeLists.txt similarity index 100% rename from deploy/serving/seg-serving/CMakeLists.txt rename to legacy/deploy/serving/seg-serving/CMakeLists.txt diff --git a/deploy/serving/seg-serving/conf/gflags.conf b/legacy/deploy/serving/seg-serving/conf/gflags.conf similarity index 100% rename from deploy/serving/seg-serving/conf/gflags.conf rename to legacy/deploy/serving/seg-serving/conf/gflags.conf diff --git a/deploy/serving/seg-serving/conf/model_toolkit.prototxt b/legacy/deploy/serving/seg-serving/conf/model_toolkit.prototxt similarity index 100% rename from deploy/serving/seg-serving/conf/model_toolkit.prototxt rename to legacy/deploy/serving/seg-serving/conf/model_toolkit.prototxt diff --git a/deploy/serving/seg-serving/conf/resource.prototxt b/legacy/deploy/serving/seg-serving/conf/resource.prototxt similarity index 100% rename from deploy/serving/seg-serving/conf/resource.prototxt rename to legacy/deploy/serving/seg-serving/conf/resource.prototxt diff --git a/deploy/serving/seg-serving/conf/seg_conf.yaml b/legacy/deploy/serving/seg-serving/conf/seg_conf.yaml similarity index 100% rename from deploy/serving/seg-serving/conf/seg_conf.yaml rename to legacy/deploy/serving/seg-serving/conf/seg_conf.yaml diff --git a/deploy/serving/seg-serving/conf/seg_conf2.yaml b/legacy/deploy/serving/seg-serving/conf/seg_conf2.yaml similarity index 100% rename from deploy/serving/seg-serving/conf/seg_conf2.yaml rename to legacy/deploy/serving/seg-serving/conf/seg_conf2.yaml diff --git a/deploy/serving/seg-serving/conf/service.prototxt b/legacy/deploy/serving/seg-serving/conf/service.prototxt similarity index 100% rename from deploy/serving/seg-serving/conf/service.prototxt rename to legacy/deploy/serving/seg-serving/conf/service.prototxt diff --git a/deploy/serving/seg-serving/conf/workflow.prototxt b/legacy/deploy/serving/seg-serving/conf/workflow.prototxt similarity index 100% rename from deploy/serving/seg-serving/conf/workflow.prototxt rename to legacy/deploy/serving/seg-serving/conf/workflow.prototxt diff --git a/deploy/serving/seg-serving/data/model/paddle/fluid_reload_flag b/legacy/deploy/serving/seg-serving/data/model/paddle/fluid_reload_flag similarity index 100% rename from deploy/serving/seg-serving/data/model/paddle/fluid_reload_flag rename to legacy/deploy/serving/seg-serving/data/model/paddle/fluid_reload_flag diff --git a/deploy/serving/seg-serving/data/model/paddle/fluid_time_file b/legacy/deploy/serving/seg-serving/data/model/paddle/fluid_time_file similarity index 100% rename from deploy/serving/seg-serving/data/model/paddle/fluid_time_file rename to legacy/deploy/serving/seg-serving/data/model/paddle/fluid_time_file diff --git a/deploy/serving/seg-serving/op/CMakeLists.txt b/legacy/deploy/serving/seg-serving/op/CMakeLists.txt similarity index 100% rename from deploy/serving/seg-serving/op/CMakeLists.txt rename to legacy/deploy/serving/seg-serving/op/CMakeLists.txt diff --git a/deploy/serving/seg-serving/op/image_seg_op.cpp b/legacy/deploy/serving/seg-serving/op/image_seg_op.cpp similarity index 100% rename from deploy/serving/seg-serving/op/image_seg_op.cpp rename to legacy/deploy/serving/seg-serving/op/image_seg_op.cpp diff --git a/deploy/serving/seg-serving/op/image_seg_op.h b/legacy/deploy/serving/seg-serving/op/image_seg_op.h similarity index 100% rename from deploy/serving/seg-serving/op/image_seg_op.h rename to legacy/deploy/serving/seg-serving/op/image_seg_op.h diff --git a/deploy/serving/seg-serving/op/reader_op.cpp b/legacy/deploy/serving/seg-serving/op/reader_op.cpp similarity index 100% rename from deploy/serving/seg-serving/op/reader_op.cpp rename to legacy/deploy/serving/seg-serving/op/reader_op.cpp diff --git a/deploy/serving/seg-serving/op/reader_op.h b/legacy/deploy/serving/seg-serving/op/reader_op.h similarity index 100% rename from deploy/serving/seg-serving/op/reader_op.h rename to legacy/deploy/serving/seg-serving/op/reader_op.h diff --git a/deploy/serving/seg-serving/op/seg_conf.cpp b/legacy/deploy/serving/seg-serving/op/seg_conf.cpp similarity index 100% rename from deploy/serving/seg-serving/op/seg_conf.cpp rename to legacy/deploy/serving/seg-serving/op/seg_conf.cpp diff --git a/deploy/serving/seg-serving/op/seg_conf.h b/legacy/deploy/serving/seg-serving/op/seg_conf.h similarity index 100% rename from deploy/serving/seg-serving/op/seg_conf.h rename to legacy/deploy/serving/seg-serving/op/seg_conf.h diff --git a/deploy/serving/seg-serving/op/write_json_op.cpp b/legacy/deploy/serving/seg-serving/op/write_json_op.cpp similarity index 100% rename from deploy/serving/seg-serving/op/write_json_op.cpp rename to legacy/deploy/serving/seg-serving/op/write_json_op.cpp diff --git a/deploy/serving/seg-serving/op/write_json_op.h b/legacy/deploy/serving/seg-serving/op/write_json_op.h similarity index 100% rename from deploy/serving/seg-serving/op/write_json_op.h rename to legacy/deploy/serving/seg-serving/op/write_json_op.h diff --git a/deploy/serving/seg-serving/proto/CMakeLists.txt b/legacy/deploy/serving/seg-serving/proto/CMakeLists.txt similarity index 100% rename from deploy/serving/seg-serving/proto/CMakeLists.txt rename to legacy/deploy/serving/seg-serving/proto/CMakeLists.txt diff --git a/deploy/serving/seg-serving/proto/image_seg.proto b/legacy/deploy/serving/seg-serving/proto/image_seg.proto similarity index 100% rename from deploy/serving/seg-serving/proto/image_seg.proto rename to legacy/deploy/serving/seg-serving/proto/image_seg.proto diff --git a/deploy/serving/seg-serving/scripts/start.sh b/legacy/deploy/serving/seg-serving/scripts/start.sh similarity index 100% rename from deploy/serving/seg-serving/scripts/start.sh rename to legacy/deploy/serving/seg-serving/scripts/start.sh diff --git a/deploy/serving/tools/image_seg_client.py b/legacy/deploy/serving/tools/image_seg_client.py similarity index 100% rename from deploy/serving/tools/image_seg_client.py rename to legacy/deploy/serving/tools/image_seg_client.py diff --git a/deploy/serving/tools/images/1.jpg b/legacy/deploy/serving/tools/images/1.jpg similarity index 100% rename from deploy/serving/tools/images/1.jpg rename to legacy/deploy/serving/tools/images/1.jpg diff --git a/deploy/serving/tools/images/2.jpg b/legacy/deploy/serving/tools/images/2.jpg similarity index 100% rename from deploy/serving/tools/images/2.jpg rename to legacy/deploy/serving/tools/images/2.jpg diff --git a/deploy/serving/tools/images/3.jpg b/legacy/deploy/serving/tools/images/3.jpg similarity index 100% rename from deploy/serving/tools/images/3.jpg rename to legacy/deploy/serving/tools/images/3.jpg diff --git a/docs/annotation/cityscapes_demo/cityscapes_demo_dataset.yaml b/legacy/docs/annotation/cityscapes_demo/cityscapes_demo_dataset.yaml similarity index 100% rename from docs/annotation/cityscapes_demo/cityscapes_demo_dataset.yaml rename to legacy/docs/annotation/cityscapes_demo/cityscapes_demo_dataset.yaml diff --git a/docs/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000021_000019_gtFine_labelTrainIds.png b/legacy/docs/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000021_000019_gtFine_labelTrainIds.png similarity index 100% rename from docs/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000021_000019_gtFine_labelTrainIds.png rename to legacy/docs/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000021_000019_gtFine_labelTrainIds.png diff --git a/docs/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000072_000019_gtFine_labelTrainIds.png b/legacy/docs/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000072_000019_gtFine_labelTrainIds.png similarity index 100% rename from docs/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000072_000019_gtFine_labelTrainIds.png rename to legacy/docs/annotation/cityscapes_demo/gtFine/train/stuttgart/stuttgart_000072_000019_gtFine_labelTrainIds.png diff --git a/docs/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_062250_gtFine_labelTrainIds.png b/legacy/docs/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_062250_gtFine_labelTrainIds.png similarity index 100% rename from docs/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_062250_gtFine_labelTrainIds.png rename to legacy/docs/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_062250_gtFine_labelTrainIds.png diff --git a/docs/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_063045_gtFine_labelTrainIds.png b/legacy/docs/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_063045_gtFine_labelTrainIds.png similarity index 100% rename from docs/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_063045_gtFine_labelTrainIds.png rename to legacy/docs/annotation/cityscapes_demo/gtFine/val/frankfurt/frankfurt_000001_063045_gtFine_labelTrainIds.png diff --git a/docs/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000021_000019_leftImg8bit.png b/legacy/docs/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000021_000019_leftImg8bit.png similarity index 100% rename from docs/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000021_000019_leftImg8bit.png rename to legacy/docs/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000021_000019_leftImg8bit.png diff --git a/docs/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000072_000019_leftImg8bit.png b/legacy/docs/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000072_000019_leftImg8bit.png similarity index 100% rename from docs/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000072_000019_leftImg8bit.png rename to legacy/docs/annotation/cityscapes_demo/leftImg8bit/train/stuttgart/stuttgart_000072_000019_leftImg8bit.png diff --git a/docs/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png b/legacy/docs/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png similarity index 100% rename from docs/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png rename to legacy/docs/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png diff --git a/docs/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png b/legacy/docs/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png similarity index 100% rename from docs/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png rename to legacy/docs/annotation/cityscapes_demo/leftImg8bit/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png diff --git a/docs/annotation/cityscapes_demo/train_list.txt b/legacy/docs/annotation/cityscapes_demo/train_list.txt similarity index 100% rename from docs/annotation/cityscapes_demo/train_list.txt rename to legacy/docs/annotation/cityscapes_demo/train_list.txt diff --git a/docs/annotation/cityscapes_demo/val_list.txt b/legacy/docs/annotation/cityscapes_demo/val_list.txt similarity index 100% rename from docs/annotation/cityscapes_demo/val_list.txt rename to legacy/docs/annotation/cityscapes_demo/val_list.txt diff --git a/docs/annotation/jingling2seg.md b/legacy/docs/annotation/jingling2seg.md similarity index 100% rename from docs/annotation/jingling2seg.md rename to legacy/docs/annotation/jingling2seg.md diff --git a/docs/annotation/jingling_demo/jingling.jpg b/legacy/docs/annotation/jingling_demo/jingling.jpg similarity index 100% rename from docs/annotation/jingling_demo/jingling.jpg rename to legacy/docs/annotation/jingling_demo/jingling.jpg diff --git a/docs/annotation/jingling_demo/outputs/annotations/jingling.png b/legacy/docs/annotation/jingling_demo/outputs/annotations/jingling.png similarity index 100% rename from docs/annotation/jingling_demo/outputs/annotations/jingling.png rename to legacy/docs/annotation/jingling_demo/outputs/annotations/jingling.png diff --git a/docs/annotation/jingling_demo/outputs/class_names.txt b/legacy/docs/annotation/jingling_demo/outputs/class_names.txt similarity index 100% rename from docs/annotation/jingling_demo/outputs/class_names.txt rename to legacy/docs/annotation/jingling_demo/outputs/class_names.txt diff --git a/docs/annotation/jingling_demo/outputs/jingling.json b/legacy/docs/annotation/jingling_demo/outputs/jingling.json similarity index 100% rename from docs/annotation/jingling_demo/outputs/jingling.json rename to legacy/docs/annotation/jingling_demo/outputs/jingling.json diff --git a/docs/annotation/labelme2seg.md b/legacy/docs/annotation/labelme2seg.md similarity index 100% rename from docs/annotation/labelme2seg.md rename to legacy/docs/annotation/labelme2seg.md diff --git a/docs/annotation/labelme_demo/2011_000025.jpg b/legacy/docs/annotation/labelme_demo/2011_000025.jpg similarity index 100% rename from docs/annotation/labelme_demo/2011_000025.jpg rename to legacy/docs/annotation/labelme_demo/2011_000025.jpg diff --git a/docs/annotation/labelme_demo/2011_000025.json b/legacy/docs/annotation/labelme_demo/2011_000025.json similarity index 100% rename from docs/annotation/labelme_demo/2011_000025.json rename to legacy/docs/annotation/labelme_demo/2011_000025.json diff --git a/docs/annotation/labelme_demo/class_names.txt b/legacy/docs/annotation/labelme_demo/class_names.txt similarity index 100% rename from docs/annotation/labelme_demo/class_names.txt rename to legacy/docs/annotation/labelme_demo/class_names.txt diff --git a/docs/check.md b/legacy/docs/check.md similarity index 100% rename from docs/check.md rename to legacy/docs/check.md diff --git a/docs/config.md b/legacy/docs/config.md similarity index 100% rename from docs/config.md rename to legacy/docs/config.md diff --git a/docs/configs/.gitkeep b/legacy/docs/configs/.gitkeep similarity index 100% rename from docs/configs/.gitkeep rename to legacy/docs/configs/.gitkeep diff --git a/docs/configs/basic_group.md b/legacy/docs/configs/basic_group.md similarity index 100% rename from docs/configs/basic_group.md rename to legacy/docs/configs/basic_group.md diff --git a/docs/configs/dataloader_group.md b/legacy/docs/configs/dataloader_group.md similarity index 100% rename from docs/configs/dataloader_group.md rename to legacy/docs/configs/dataloader_group.md diff --git a/docs/configs/dataset_group.md b/legacy/docs/configs/dataset_group.md similarity index 100% rename from docs/configs/dataset_group.md rename to legacy/docs/configs/dataset_group.md diff --git a/docs/configs/freeze_group.md b/legacy/docs/configs/freeze_group.md similarity index 100% rename from docs/configs/freeze_group.md rename to legacy/docs/configs/freeze_group.md diff --git a/docs/configs/model_deeplabv3p_group.md b/legacy/docs/configs/model_deeplabv3p_group.md similarity index 100% rename from docs/configs/model_deeplabv3p_group.md rename to legacy/docs/configs/model_deeplabv3p_group.md diff --git a/docs/configs/model_group.md b/legacy/docs/configs/model_group.md similarity index 100% rename from docs/configs/model_group.md rename to legacy/docs/configs/model_group.md diff --git a/docs/configs/model_hrnet_group.md b/legacy/docs/configs/model_hrnet_group.md similarity index 100% rename from docs/configs/model_hrnet_group.md rename to legacy/docs/configs/model_hrnet_group.md diff --git a/docs/configs/model_icnet_group.md b/legacy/docs/configs/model_icnet_group.md similarity index 100% rename from docs/configs/model_icnet_group.md rename to legacy/docs/configs/model_icnet_group.md diff --git a/docs/configs/model_pspnet_group.md b/legacy/docs/configs/model_pspnet_group.md similarity index 100% rename from docs/configs/model_pspnet_group.md rename to legacy/docs/configs/model_pspnet_group.md diff --git a/docs/configs/model_unet_group.md b/legacy/docs/configs/model_unet_group.md similarity index 100% rename from docs/configs/model_unet_group.md rename to legacy/docs/configs/model_unet_group.md diff --git a/docs/configs/solver_group.md b/legacy/docs/configs/solver_group.md similarity index 100% rename from docs/configs/solver_group.md rename to legacy/docs/configs/solver_group.md diff --git a/docs/configs/test_group.md b/legacy/docs/configs/test_group.md similarity index 100% rename from docs/configs/test_group.md rename to legacy/docs/configs/test_group.md diff --git a/docs/configs/train_group.md b/legacy/docs/configs/train_group.md similarity index 100% rename from docs/configs/train_group.md rename to legacy/docs/configs/train_group.md diff --git a/docs/data_aug.md b/legacy/docs/data_aug.md similarity index 100% rename from docs/data_aug.md rename to legacy/docs/data_aug.md diff --git a/legacy/docs/data_prepare.md b/legacy/docs/data_prepare.md new file mode 100644 index 0000000000..de1fd7965c --- /dev/null +++ b/legacy/docs/data_prepare.md @@ -0,0 +1,175 @@ +# PaddleSeg 数据准备 + +## 数据标注 + +### 标注协议 +PaddleSeg采用单通道的标注图片,每一种像素值代表一种类别,像素标注类别需要从0开始递增,例如0,1,2,3表示有4种类别。 + +**NOTE:** 标注图像请使用PNG无损压缩格式的图片。标注类别最多为256类。 + +### 灰度标注vs伪彩色标注 +一般的分割库使用单通道灰度图作为标注图片,往往显示出来是全黑的效果。灰度标注图的弊端: +1. 对图像标注后,无法直接观察标注是否正确。 +2. 模型测试过程无法直接判断分割的实际效果。 + +**PaddleSeg支持伪彩色图作为标注图片,在原来的单通道图片基础上,注入调色板。在基本不增加图片大小的基础上,却可以显示出彩色的效果。** + +同时PaddleSeg也兼容灰度图标注,用户原来的灰度数据集可以不做修改,直接使用。 +![](./imgs/annotation/image-11.png) + +### 灰度标注转换为伪彩色标注 +如果用户需要转换成伪彩色标注图,可使用我们的转换工具。适用于以下两种常见的情况: +1. 如果您希望将指定目录下的所有灰度标注图转换为伪彩色标注图,则执行以下命令,指定灰度标注所在的目录即可。 +```buildoutcfg +python pdseg/tools/gray2pseudo_color.py +``` + +|参数|用途| +|-|-| +|dir_or_file|指定灰度标注所在目录| +|output_dir|彩色标注图片的输出目录| + +2. 如果您仅希望将指定数据集中的部分灰度标注图转换为伪彩色标注图,则执行以下命令,需要已有文件列表,按列表读取指定图片。 +```buildoutcfg +python pdseg/tools/gray2pseudo_color.py --dataset_dir --file_separator +``` +|参数|用途| +|-|-| +|dir_or_file|指定文件列表路径| +|output_dir|彩色标注图片的输出目录| +|--dataset_dir|数据集所在根目录| +|--file_separator|文件列表分隔符| + +### 标注教程 +用户需预先采集好用于训练、评估和测试的图片,然后使用数据标注工具完成数据标注。 + +PddleSeg已支持2种标注工具:LabelMe、精灵数据标注工具。标注教程如下: + +- [LabelMe标注教程](annotation/labelme2seg.md) +- [精灵数据标注工具教程](annotation/jingling2seg.md) + + +## 文件列表 + +### 文件列表规范 + +PaddleSeg采用通用的文件列表方式组织训练集、验证集和测试集。在训练、评估、可视化过程前必须准备好相应的文件列表。 + +文件列表组织形式如下 +``` +原始图片路径 [SEP] 标注图片路径 +``` + +其中`[SEP]`是文件路径分割符,可以在`DATASET.SEPARATOR`配置项中修改, 默认为空格。文件列表的路径以数据集根目录作为相对路径起始点,`DATASET.DATA_DIR`即为数据集根目录。 + +如下图所示,左边为原图的图片路径,右边为图片对应的标注路径。 + +![cityscapes_filelist](./imgs/file_list.png) + +**注意事项** + +* 务必保证分隔符在文件列表中每行只存在一次, 如文件名中存在空格,请使用"|"等文件名不可用字符进行切分 + +* 文件列表请使用**UTF-8**格式保存, PaddleSeg默认使用UTF-8编码读取file_list文件 + +若数据集缺少标注图片,则文件列表不用包含分隔符和标注图片路径,如下图所示。 + +![cityscapes_filelist](./imgs/file_list2.png) + +**注意事项** + +此时的文件列表仅可在调用`pdseg/vis.py`进行可视化展示时使用, +即仅可在`DATASET.TEST_FILE_LIST`和`DATASET.VIS_FILE_LIST`配置项中使用。 +不可在`DATASET.TRAIN_FILE_LIST`和`DATASET.VAL_FILE_LIST`配置项中使用。 + + +**符合规范的文件列表是什么样的呢?** + +请参考目录[`./docs/annotation/cityscapes_demo`](../docs/annotation/cityscapes_demo/)。 + +### 数据集目录结构整理 + +如果用户想要生成数据集的文件列表,需要整理成如下的目录结构(类似于Cityscapes数据集): + +``` +./dataset/ # 数据集根目录 +├── annotations # 标注目录 +│   ├── test +│   │   ├── ... +│   │   └── ... +│   ├── train +│   │   ├── ... +│   │   └── ... +│   └── val +│   ├── ... +│   └── ... +└── images # 原图目录 + ├── test + │   ├── ... + │   └── ... + ├── train + │   ├── ... + │   └── ... + └── val + ├── ... + └── ... +Note:以上目录名可任意 +``` + +### 文件列表生成 +PaddleSeg提供了生成文件列表的使用脚本,可适用于自定义数据集或cityscapes数据集,并支持通过不同的Flags来开启特定功能。 +``` +python pdseg/tools/create_dataset_list.py ${FLAGS} +``` +运行后将在数据集根目录下生成训练/验证/测试集的文件列表(文件主名与`--second_folder`一致,扩展名为`.txt`)。 + +**Note:** 生成文件列表要求:要么原图和标注图片数量一致,要么只有原图,没有标注图片。若数据集缺少标注图片,仍可自动生成不含分隔符和标注图片路径的文件列表。 + +#### 命令行FLAGS列表 + +|FLAG|用途|默认值|参数数目| +|-|-|-|-| +|--type|指定数据集类型,`cityscapes`或`自定义`|`自定义`|1| +|--separator|文件列表分隔符|"|"|1| +|--folder|图片和标签集的文件夹名|"images" "annotations"|2| +|--second_folder|训练/验证/测试集的文件夹名|"train" "val" "test"|若干| +|--format|图片和标签集的数据格式|"jpg" "png"|2| +|--postfix|按文件主名(无扩展名)是否包含指定后缀对图片和标签集进行筛选|"" ""(2个空字符)|2| + +#### 使用示例 +- **对于自定义数据集** + +若您已经按上述说明整理好了数据集目录结构,可以运行下面的命令生成文件列表。 + +``` +# 生成文件列表,其分隔符为空格,图片和标签集的数据格式都为png +python pdseg/tools/create_dataset_list.py --separator " " --format png png +``` +``` +# 生成文件列表,其图片和标签集的文件夹名为img和gt,训练和验证集的文件夹名为training和validation,不生成测试集列表 +python pdseg/tools/create_dataset_list.py \ + --folder img gt --second_folder training validation +``` +**Note:** 必须指定自定义数据集目录,可以按需要设定FLAG。无需指定`--type`。 + +- **对于cityscapes数据集** + +若您使用的是cityscapes数据集,可以运行下面的命令生成文件列表。 + +``` +# 生成cityscapes文件列表,其分隔符为逗号 +python pdseg/tools/create_dataset_list.py --type cityscapes --separator "," +``` +**Note:** + +必须指定cityscapes数据集目录,`--type`必须为`cityscapes`。 + +在cityscapes类型下,部分FLAG将被重新设定,无需手动指定,具体如下: + +|FLAG|固定值| +|-|-| +|--folder|"leftImg8bit" "gtFine"| +|--format|"png" "png"| +|--postfix|"_leftImg8bit" "_gtFine_labelTrainIds"| + +其余FLAG可以按需要设定。 diff --git a/docs/deploy.md b/legacy/docs/deploy.md similarity index 100% rename from docs/deploy.md rename to legacy/docs/deploy.md diff --git a/docs/dice_loss.md b/legacy/docs/dice_loss.md similarity index 100% rename from docs/dice_loss.md rename to legacy/docs/dice_loss.md diff --git a/docs/faq.md b/legacy/docs/faq.md similarity index 100% rename from docs/faq.md rename to legacy/docs/faq.md diff --git a/docs/imgs/VOC2012.png b/legacy/docs/imgs/VOC2012.png similarity index 100% rename from docs/imgs/VOC2012.png rename to legacy/docs/imgs/VOC2012.png diff --git a/docs/imgs/annotation/image-1.png b/legacy/docs/imgs/annotation/image-1.png similarity index 100% rename from docs/imgs/annotation/image-1.png rename to legacy/docs/imgs/annotation/image-1.png diff --git a/docs/imgs/annotation/image-10.jpg b/legacy/docs/imgs/annotation/image-10.jpg similarity index 100% rename from docs/imgs/annotation/image-10.jpg rename to legacy/docs/imgs/annotation/image-10.jpg diff --git a/docs/imgs/annotation/image-11.png b/legacy/docs/imgs/annotation/image-11.png similarity index 100% rename from docs/imgs/annotation/image-11.png rename to legacy/docs/imgs/annotation/image-11.png diff --git a/docs/imgs/annotation/image-2.png b/legacy/docs/imgs/annotation/image-2.png similarity index 100% rename from docs/imgs/annotation/image-2.png rename to legacy/docs/imgs/annotation/image-2.png diff --git a/docs/imgs/annotation/image-3.png b/legacy/docs/imgs/annotation/image-3.png similarity index 100% rename from docs/imgs/annotation/image-3.png rename to legacy/docs/imgs/annotation/image-3.png diff --git a/docs/imgs/annotation/image-4-1.png b/legacy/docs/imgs/annotation/image-4-1.png similarity index 100% rename from docs/imgs/annotation/image-4-1.png rename to legacy/docs/imgs/annotation/image-4-1.png diff --git a/docs/imgs/annotation/image-4-2.png b/legacy/docs/imgs/annotation/image-4-2.png similarity index 100% rename from docs/imgs/annotation/image-4-2.png rename to legacy/docs/imgs/annotation/image-4-2.png diff --git a/docs/imgs/annotation/image-5.png b/legacy/docs/imgs/annotation/image-5.png similarity index 100% rename from docs/imgs/annotation/image-5.png rename to legacy/docs/imgs/annotation/image-5.png diff --git a/docs/imgs/annotation/image-6-2.png b/legacy/docs/imgs/annotation/image-6-2.png similarity index 100% rename from docs/imgs/annotation/image-6-2.png rename to legacy/docs/imgs/annotation/image-6-2.png diff --git a/docs/imgs/annotation/image-6.png b/legacy/docs/imgs/annotation/image-6.png similarity index 100% rename from docs/imgs/annotation/image-6.png rename to legacy/docs/imgs/annotation/image-6.png diff --git a/docs/imgs/annotation/image-7.png b/legacy/docs/imgs/annotation/image-7.png similarity index 100% rename from docs/imgs/annotation/image-7.png rename to legacy/docs/imgs/annotation/image-7.png diff --git a/docs/imgs/annotation/jingling-1.png b/legacy/docs/imgs/annotation/jingling-1.png similarity index 100% rename from docs/imgs/annotation/jingling-1.png rename to legacy/docs/imgs/annotation/jingling-1.png diff --git a/docs/imgs/annotation/jingling-2.png b/legacy/docs/imgs/annotation/jingling-2.png similarity index 100% rename from docs/imgs/annotation/jingling-2.png rename to legacy/docs/imgs/annotation/jingling-2.png diff --git a/docs/imgs/annotation/jingling-3.png b/legacy/docs/imgs/annotation/jingling-3.png similarity index 100% rename from docs/imgs/annotation/jingling-3.png rename to legacy/docs/imgs/annotation/jingling-3.png diff --git a/docs/imgs/annotation/jingling-4.png b/legacy/docs/imgs/annotation/jingling-4.png similarity index 100% rename from docs/imgs/annotation/jingling-4.png rename to legacy/docs/imgs/annotation/jingling-4.png diff --git a/docs/imgs/annotation/jingling-5.png b/legacy/docs/imgs/annotation/jingling-5.png similarity index 100% rename from docs/imgs/annotation/jingling-5.png rename to legacy/docs/imgs/annotation/jingling-5.png diff --git a/docs/imgs/aug_method.png b/legacy/docs/imgs/aug_method.png similarity index 100% rename from docs/imgs/aug_method.png rename to legacy/docs/imgs/aug_method.png diff --git a/docs/imgs/cityscapes.png b/legacy/docs/imgs/cityscapes.png similarity index 100% rename from docs/imgs/cityscapes.png rename to legacy/docs/imgs/cityscapes.png diff --git a/docs/imgs/cosine_decay_example.png b/legacy/docs/imgs/cosine_decay_example.png similarity index 100% rename from docs/imgs/cosine_decay_example.png rename to legacy/docs/imgs/cosine_decay_example.png diff --git a/docs/imgs/data_aug_example.png b/legacy/docs/imgs/data_aug_example.png similarity index 100% rename from docs/imgs/data_aug_example.png rename to legacy/docs/imgs/data_aug_example.png diff --git a/docs/imgs/data_aug_flip_mirror.png b/legacy/docs/imgs/data_aug_flip_mirror.png similarity index 100% rename from docs/imgs/data_aug_flip_mirror.png rename to legacy/docs/imgs/data_aug_flip_mirror.png diff --git a/docs/imgs/data_aug_flow.png b/legacy/docs/imgs/data_aug_flow.png similarity index 100% rename from docs/imgs/data_aug_flow.png rename to legacy/docs/imgs/data_aug_flow.png diff --git a/docs/imgs/deepglobe.png b/legacy/docs/imgs/deepglobe.png similarity index 100% rename from docs/imgs/deepglobe.png rename to legacy/docs/imgs/deepglobe.png diff --git a/docs/imgs/deeplabv3p.png b/legacy/docs/imgs/deeplabv3p.png similarity index 100% rename from docs/imgs/deeplabv3p.png rename to legacy/docs/imgs/deeplabv3p.png diff --git a/docs/imgs/dice.png b/legacy/docs/imgs/dice.png similarity index 100% rename from docs/imgs/dice.png rename to legacy/docs/imgs/dice.png diff --git a/docs/imgs/dice2.png b/legacy/docs/imgs/dice2.png similarity index 100% rename from docs/imgs/dice2.png rename to legacy/docs/imgs/dice2.png diff --git a/docs/imgs/dice3.png b/legacy/docs/imgs/dice3.png similarity index 100% rename from docs/imgs/dice3.png rename to legacy/docs/imgs/dice3.png diff --git a/docs/imgs/fast-scnn.png b/legacy/docs/imgs/fast-scnn.png similarity index 100% rename from docs/imgs/fast-scnn.png rename to legacy/docs/imgs/fast-scnn.png diff --git a/docs/imgs/file_list.png b/legacy/docs/imgs/file_list.png similarity index 100% rename from docs/imgs/file_list.png rename to legacy/docs/imgs/file_list.png diff --git a/docs/imgs/file_list2.png b/legacy/docs/imgs/file_list2.png similarity index 100% rename from docs/imgs/file_list2.png rename to legacy/docs/imgs/file_list2.png diff --git a/docs/imgs/gn.png b/legacy/docs/imgs/gn.png similarity index 100% rename from docs/imgs/gn.png rename to legacy/docs/imgs/gn.png diff --git a/docs/imgs/hrnet.png b/legacy/docs/imgs/hrnet.png similarity index 100% rename from docs/imgs/hrnet.png rename to legacy/docs/imgs/hrnet.png diff --git a/docs/imgs/icnet.png b/legacy/docs/imgs/icnet.png similarity index 100% rename from docs/imgs/icnet.png rename to legacy/docs/imgs/icnet.png diff --git a/docs/imgs/loss_comparison.png b/legacy/docs/imgs/loss_comparison.png similarity index 100% rename from docs/imgs/loss_comparison.png rename to legacy/docs/imgs/loss_comparison.png diff --git a/docs/imgs/lovasz-hinge-vis.png b/legacy/docs/imgs/lovasz-hinge-vis.png similarity index 100% rename from docs/imgs/lovasz-hinge-vis.png rename to legacy/docs/imgs/lovasz-hinge-vis.png diff --git a/docs/imgs/lovasz-hinge.png b/legacy/docs/imgs/lovasz-hinge.png similarity index 100% rename from docs/imgs/lovasz-hinge.png rename to legacy/docs/imgs/lovasz-hinge.png diff --git a/docs/imgs/lovasz-softmax.png b/legacy/docs/imgs/lovasz-softmax.png similarity index 100% rename from docs/imgs/lovasz-softmax.png rename to legacy/docs/imgs/lovasz-softmax.png diff --git a/docs/imgs/piecewise_decay_example.png b/legacy/docs/imgs/piecewise_decay_example.png similarity index 100% rename from docs/imgs/piecewise_decay_example.png rename to legacy/docs/imgs/piecewise_decay_example.png diff --git a/docs/imgs/poly_decay_example.png b/legacy/docs/imgs/poly_decay_example.png similarity index 100% rename from docs/imgs/poly_decay_example.png rename to legacy/docs/imgs/poly_decay_example.png diff --git a/docs/imgs/pspnet.png b/legacy/docs/imgs/pspnet.png similarity index 100% rename from docs/imgs/pspnet.png rename to legacy/docs/imgs/pspnet.png diff --git a/docs/imgs/pspnet2.png b/legacy/docs/imgs/pspnet2.png similarity index 100% rename from docs/imgs/pspnet2.png rename to legacy/docs/imgs/pspnet2.png diff --git a/docs/imgs/qq_group2.png b/legacy/docs/imgs/qq_group2.png similarity index 100% rename from docs/imgs/qq_group2.png rename to legacy/docs/imgs/qq_group2.png diff --git a/docs/imgs/rangescale.png b/legacy/docs/imgs/rangescale.png similarity index 100% rename from docs/imgs/rangescale.png rename to legacy/docs/imgs/rangescale.png diff --git a/docs/imgs/seg_news_icon.png b/legacy/docs/imgs/seg_news_icon.png similarity index 100% rename from docs/imgs/seg_news_icon.png rename to legacy/docs/imgs/seg_news_icon.png diff --git a/docs/imgs/softmax_loss.png b/legacy/docs/imgs/softmax_loss.png similarity index 100% rename from docs/imgs/softmax_loss.png rename to legacy/docs/imgs/softmax_loss.png diff --git a/docs/imgs/unet.png b/legacy/docs/imgs/unet.png similarity index 100% rename from docs/imgs/unet.png rename to legacy/docs/imgs/unet.png diff --git a/docs/imgs/usage_vis_demo.jpg b/legacy/docs/imgs/usage_vis_demo.jpg similarity index 100% rename from docs/imgs/usage_vis_demo.jpg rename to legacy/docs/imgs/usage_vis_demo.jpg diff --git a/docs/imgs/visualdl_image.png b/legacy/docs/imgs/visualdl_image.png similarity index 100% rename from docs/imgs/visualdl_image.png rename to legacy/docs/imgs/visualdl_image.png diff --git a/docs/imgs/visualdl_scalar.png b/legacy/docs/imgs/visualdl_scalar.png similarity index 100% rename from docs/imgs/visualdl_scalar.png rename to legacy/docs/imgs/visualdl_scalar.png diff --git a/docs/imgs/warmup_with_poly_decay_example.png b/legacy/docs/imgs/warmup_with_poly_decay_example.png similarity index 100% rename from docs/imgs/warmup_with_poly_decay_example.png rename to legacy/docs/imgs/warmup_with_poly_decay_example.png diff --git a/docs/loss_select.md b/legacy/docs/loss_select.md similarity index 100% rename from docs/loss_select.md rename to legacy/docs/loss_select.md diff --git a/docs/lovasz_loss.md b/legacy/docs/lovasz_loss.md similarity index 100% rename from docs/lovasz_loss.md rename to legacy/docs/lovasz_loss.md diff --git a/docs/model_export.md b/legacy/docs/model_export.md similarity index 100% rename from docs/model_export.md rename to legacy/docs/model_export.md diff --git a/docs/model_zoo.md b/legacy/docs/model_zoo.md similarity index 100% rename from docs/model_zoo.md rename to legacy/docs/model_zoo.md diff --git a/docs/models.md b/legacy/docs/models.md similarity index 100% rename from docs/models.md rename to legacy/docs/models.md diff --git a/docs/multiple_gpus_train_and_mixed_precision_train.md b/legacy/docs/multiple_gpus_train_and_mixed_precision_train.md similarity index 100% rename from docs/multiple_gpus_train_and_mixed_precision_train.md rename to legacy/docs/multiple_gpus_train_and_mixed_precision_train.md diff --git a/docs/release_notes.md b/legacy/docs/release_notes.md similarity index 100% rename from docs/release_notes.md rename to legacy/docs/release_notes.md diff --git a/docs/usage.md b/legacy/docs/usage.md similarity index 100% rename from docs/usage.md rename to legacy/docs/usage.md diff --git a/pdseg/__init__.py b/legacy/pdseg/__init__.py similarity index 100% rename from pdseg/__init__.py rename to legacy/pdseg/__init__.py diff --git a/pdseg/check.py b/legacy/pdseg/check.py similarity index 100% rename from pdseg/check.py rename to legacy/pdseg/check.py diff --git a/pdseg/data_aug.py b/legacy/pdseg/data_aug.py similarity index 100% rename from pdseg/data_aug.py rename to legacy/pdseg/data_aug.py diff --git a/pdseg/data_utils.py b/legacy/pdseg/data_utils.py similarity index 100% rename from pdseg/data_utils.py rename to legacy/pdseg/data_utils.py diff --git a/pdseg/eval.py b/legacy/pdseg/eval.py similarity index 100% rename from pdseg/eval.py rename to legacy/pdseg/eval.py diff --git a/pdseg/export_model.py b/legacy/pdseg/export_model.py similarity index 100% rename from pdseg/export_model.py rename to legacy/pdseg/export_model.py diff --git a/pdseg/export_serving_model.py b/legacy/pdseg/export_serving_model.py similarity index 100% rename from pdseg/export_serving_model.py rename to legacy/pdseg/export_serving_model.py diff --git a/pdseg/loss.py b/legacy/pdseg/loss.py similarity index 100% rename from pdseg/loss.py rename to legacy/pdseg/loss.py diff --git a/pdseg/lovasz_losses.py b/legacy/pdseg/lovasz_losses.py similarity index 100% rename from pdseg/lovasz_losses.py rename to legacy/pdseg/lovasz_losses.py diff --git a/pdseg/metrics.py b/legacy/pdseg/metrics.py similarity index 100% rename from pdseg/metrics.py rename to legacy/pdseg/metrics.py diff --git a/pdseg/models/__init__.py b/legacy/pdseg/models/__init__.py similarity index 100% rename from pdseg/models/__init__.py rename to legacy/pdseg/models/__init__.py diff --git a/pdseg/models/backbone/__init__.py b/legacy/pdseg/models/backbone/__init__.py similarity index 100% rename from pdseg/models/backbone/__init__.py rename to legacy/pdseg/models/backbone/__init__.py diff --git a/pdseg/models/backbone/mobilenet_v2.py b/legacy/pdseg/models/backbone/mobilenet_v2.py similarity index 100% rename from pdseg/models/backbone/mobilenet_v2.py rename to legacy/pdseg/models/backbone/mobilenet_v2.py diff --git a/pdseg/models/backbone/mobilenet_v3.py b/legacy/pdseg/models/backbone/mobilenet_v3.py similarity index 100% rename from pdseg/models/backbone/mobilenet_v3.py rename to legacy/pdseg/models/backbone/mobilenet_v3.py diff --git a/pdseg/models/backbone/resnet.py b/legacy/pdseg/models/backbone/resnet.py similarity index 100% rename from pdseg/models/backbone/resnet.py rename to legacy/pdseg/models/backbone/resnet.py diff --git a/pdseg/models/backbone/resnet_vd.py b/legacy/pdseg/models/backbone/resnet_vd.py similarity index 100% rename from pdseg/models/backbone/resnet_vd.py rename to legacy/pdseg/models/backbone/resnet_vd.py diff --git a/pdseg/models/backbone/vgg.py b/legacy/pdseg/models/backbone/vgg.py similarity index 100% rename from pdseg/models/backbone/vgg.py rename to legacy/pdseg/models/backbone/vgg.py diff --git a/pdseg/models/backbone/xception.py b/legacy/pdseg/models/backbone/xception.py similarity index 100% rename from pdseg/models/backbone/xception.py rename to legacy/pdseg/models/backbone/xception.py diff --git a/pdseg/models/libs/__init__.py b/legacy/pdseg/models/libs/__init__.py similarity index 100% rename from pdseg/models/libs/__init__.py rename to legacy/pdseg/models/libs/__init__.py diff --git a/pdseg/models/libs/model_libs.py b/legacy/pdseg/models/libs/model_libs.py similarity index 100% rename from pdseg/models/libs/model_libs.py rename to legacy/pdseg/models/libs/model_libs.py diff --git a/pdseg/models/model_builder.py b/legacy/pdseg/models/model_builder.py similarity index 100% rename from pdseg/models/model_builder.py rename to legacy/pdseg/models/model_builder.py diff --git a/pdseg/models/modeling/__init__.py b/legacy/pdseg/models/modeling/__init__.py similarity index 100% rename from pdseg/models/modeling/__init__.py rename to legacy/pdseg/models/modeling/__init__.py diff --git a/pdseg/models/modeling/deeplab.py b/legacy/pdseg/models/modeling/deeplab.py similarity index 100% rename from pdseg/models/modeling/deeplab.py rename to legacy/pdseg/models/modeling/deeplab.py diff --git a/pdseg/models/modeling/fast_scnn.py b/legacy/pdseg/models/modeling/fast_scnn.py similarity index 100% rename from pdseg/models/modeling/fast_scnn.py rename to legacy/pdseg/models/modeling/fast_scnn.py diff --git a/pdseg/models/modeling/hrnet.py b/legacy/pdseg/models/modeling/hrnet.py similarity index 100% rename from pdseg/models/modeling/hrnet.py rename to legacy/pdseg/models/modeling/hrnet.py diff --git a/pdseg/models/modeling/icnet.py b/legacy/pdseg/models/modeling/icnet.py similarity index 100% rename from pdseg/models/modeling/icnet.py rename to legacy/pdseg/models/modeling/icnet.py diff --git a/pdseg/models/modeling/ocrnet.py b/legacy/pdseg/models/modeling/ocrnet.py similarity index 100% rename from pdseg/models/modeling/ocrnet.py rename to legacy/pdseg/models/modeling/ocrnet.py diff --git a/pdseg/models/modeling/pspnet.py b/legacy/pdseg/models/modeling/pspnet.py similarity index 100% rename from pdseg/models/modeling/pspnet.py rename to legacy/pdseg/models/modeling/pspnet.py diff --git a/pdseg/models/modeling/unet.py b/legacy/pdseg/models/modeling/unet.py similarity index 100% rename from pdseg/models/modeling/unet.py rename to legacy/pdseg/models/modeling/unet.py diff --git a/pdseg/reader.py b/legacy/pdseg/reader.py similarity index 100% rename from pdseg/reader.py rename to legacy/pdseg/reader.py diff --git a/pdseg/solver.py b/legacy/pdseg/solver.py similarity index 100% rename from pdseg/solver.py rename to legacy/pdseg/solver.py diff --git a/pdseg/tools/__init__.py b/legacy/pdseg/tools/__init__.py similarity index 100% rename from pdseg/tools/__init__.py rename to legacy/pdseg/tools/__init__.py diff --git a/pdseg/tools/create_dataset_list.py b/legacy/pdseg/tools/create_dataset_list.py similarity index 100% rename from pdseg/tools/create_dataset_list.py rename to legacy/pdseg/tools/create_dataset_list.py diff --git a/pdseg/tools/gray2pseudo_color.py b/legacy/pdseg/tools/gray2pseudo_color.py similarity index 100% rename from pdseg/tools/gray2pseudo_color.py rename to legacy/pdseg/tools/gray2pseudo_color.py diff --git a/pdseg/tools/jingling2seg.py b/legacy/pdseg/tools/jingling2seg.py similarity index 100% rename from pdseg/tools/jingling2seg.py rename to legacy/pdseg/tools/jingling2seg.py diff --git a/pdseg/tools/labelme2seg.py b/legacy/pdseg/tools/labelme2seg.py similarity index 100% rename from pdseg/tools/labelme2seg.py rename to legacy/pdseg/tools/labelme2seg.py diff --git a/pdseg/train.py b/legacy/pdseg/train.py similarity index 100% rename from pdseg/train.py rename to legacy/pdseg/train.py diff --git a/pdseg/utils/__init__.py b/legacy/pdseg/utils/__init__.py similarity index 100% rename from pdseg/utils/__init__.py rename to legacy/pdseg/utils/__init__.py diff --git a/pdseg/utils/collect.py b/legacy/pdseg/utils/collect.py similarity index 100% rename from pdseg/utils/collect.py rename to legacy/pdseg/utils/collect.py diff --git a/pdseg/utils/config.py b/legacy/pdseg/utils/config.py similarity index 100% rename from pdseg/utils/config.py rename to legacy/pdseg/utils/config.py diff --git a/pdseg/utils/dist_utils.py b/legacy/pdseg/utils/dist_utils.py similarity index 100% rename from pdseg/utils/dist_utils.py rename to legacy/pdseg/utils/dist_utils.py diff --git a/pdseg/utils/fp16_utils.py b/legacy/pdseg/utils/fp16_utils.py similarity index 100% rename from pdseg/utils/fp16_utils.py rename to legacy/pdseg/utils/fp16_utils.py diff --git a/pdseg/utils/load_model_utils.py b/legacy/pdseg/utils/load_model_utils.py similarity index 100% rename from pdseg/utils/load_model_utils.py rename to legacy/pdseg/utils/load_model_utils.py diff --git a/pdseg/utils/paddle_utils.py b/legacy/pdseg/utils/paddle_utils.py similarity index 100% rename from pdseg/utils/paddle_utils.py rename to legacy/pdseg/utils/paddle_utils.py diff --git a/pdseg/utils/timer.py b/legacy/pdseg/utils/timer.py similarity index 100% rename from pdseg/utils/timer.py rename to legacy/pdseg/utils/timer.py diff --git a/pdseg/vis.py b/legacy/pdseg/vis.py similarity index 100% rename from pdseg/vis.py rename to legacy/pdseg/vis.py diff --git a/pretrained_model/download_model.py b/legacy/pretrained_model/download_model.py similarity index 100% rename from pretrained_model/download_model.py rename to legacy/pretrained_model/download_model.py diff --git a/dygraph/requirements.txt b/legacy/requirements.txt similarity index 69% rename from dygraph/requirements.txt rename to legacy/requirements.txt index 237cf295bd..6200a94b71 100644 --- a/dygraph/requirements.txt +++ b/legacy/requirements.txt @@ -3,6 +3,3 @@ yapf == 0.26.0 flake8 pyyaml >= 5.1 visualdl >= 2.0.0 -opencv-python -tqdm -filelock diff --git a/slim/distillation/README.md b/legacy/slim/distillation/README.md similarity index 100% rename from slim/distillation/README.md rename to legacy/slim/distillation/README.md diff --git a/slim/distillation/cityscape.yaml b/legacy/slim/distillation/cityscape.yaml similarity index 100% rename from slim/distillation/cityscape.yaml rename to legacy/slim/distillation/cityscape.yaml diff --git a/slim/distillation/cityscape_teacher.yaml b/legacy/slim/distillation/cityscape_teacher.yaml similarity index 100% rename from slim/distillation/cityscape_teacher.yaml rename to legacy/slim/distillation/cityscape_teacher.yaml diff --git a/slim/distillation/model_builder.py b/legacy/slim/distillation/model_builder.py similarity index 100% rename from slim/distillation/model_builder.py rename to legacy/slim/distillation/model_builder.py diff --git a/slim/distillation/train_distill.py b/legacy/slim/distillation/train_distill.py similarity index 100% rename from slim/distillation/train_distill.py rename to legacy/slim/distillation/train_distill.py diff --git a/slim/nas/README.md b/legacy/slim/nas/README.md similarity index 100% rename from slim/nas/README.md rename to legacy/slim/nas/README.md diff --git a/slim/nas/deeplab.py b/legacy/slim/nas/deeplab.py similarity index 100% rename from slim/nas/deeplab.py rename to legacy/slim/nas/deeplab.py diff --git a/slim/nas/eval_nas.py b/legacy/slim/nas/eval_nas.py similarity index 100% rename from slim/nas/eval_nas.py rename to legacy/slim/nas/eval_nas.py diff --git a/slim/nas/mobilenetv2_search_space.py b/legacy/slim/nas/mobilenetv2_search_space.py similarity index 100% rename from slim/nas/mobilenetv2_search_space.py rename to legacy/slim/nas/mobilenetv2_search_space.py diff --git a/slim/nas/model_builder.py b/legacy/slim/nas/model_builder.py similarity index 100% rename from slim/nas/model_builder.py rename to legacy/slim/nas/model_builder.py diff --git a/slim/nas/train_nas.py b/legacy/slim/nas/train_nas.py similarity index 100% rename from slim/nas/train_nas.py rename to legacy/slim/nas/train_nas.py diff --git a/slim/prune/README.md b/legacy/slim/prune/README.md similarity index 100% rename from slim/prune/README.md rename to legacy/slim/prune/README.md diff --git a/slim/prune/eval_prune.py b/legacy/slim/prune/eval_prune.py similarity index 100% rename from slim/prune/eval_prune.py rename to legacy/slim/prune/eval_prune.py diff --git a/slim/prune/train_prune.py b/legacy/slim/prune/train_prune.py similarity index 100% rename from slim/prune/train_prune.py rename to legacy/slim/prune/train_prune.py diff --git a/slim/quantization/README.md b/legacy/slim/quantization/README.md similarity index 100% rename from slim/quantization/README.md rename to legacy/slim/quantization/README.md diff --git a/slim/quantization/deploy/README.md b/legacy/slim/quantization/deploy/README.md similarity index 100% rename from slim/quantization/deploy/README.md rename to legacy/slim/quantization/deploy/README.md diff --git a/slim/quantization/deploy/infer.py b/legacy/slim/quantization/deploy/infer.py similarity index 100% rename from slim/quantization/deploy/infer.py rename to legacy/slim/quantization/deploy/infer.py diff --git a/slim/quantization/eval_quant.py b/legacy/slim/quantization/eval_quant.py similarity index 100% rename from slim/quantization/eval_quant.py rename to legacy/slim/quantization/eval_quant.py diff --git a/slim/quantization/export_model.py b/legacy/slim/quantization/export_model.py similarity index 100% rename from slim/quantization/export_model.py rename to legacy/slim/quantization/export_model.py diff --git a/slim/quantization/images/ConvertToInt8Pass.png b/legacy/slim/quantization/images/ConvertToInt8Pass.png similarity index 100% rename from slim/quantization/images/ConvertToInt8Pass.png rename to legacy/slim/quantization/images/ConvertToInt8Pass.png diff --git a/slim/quantization/images/FreezePass.png b/legacy/slim/quantization/images/FreezePass.png similarity index 100% rename from slim/quantization/images/FreezePass.png rename to legacy/slim/quantization/images/FreezePass.png diff --git a/slim/quantization/images/TransformForMobilePass.png b/legacy/slim/quantization/images/TransformForMobilePass.png similarity index 100% rename from slim/quantization/images/TransformForMobilePass.png rename to legacy/slim/quantization/images/TransformForMobilePass.png diff --git a/slim/quantization/images/TransformPass.png b/legacy/slim/quantization/images/TransformPass.png similarity index 100% rename from slim/quantization/images/TransformPass.png rename to legacy/slim/quantization/images/TransformPass.png diff --git a/slim/quantization/train_quant.py b/legacy/slim/quantization/train_quant.py similarity index 100% rename from slim/quantization/train_quant.py rename to legacy/slim/quantization/train_quant.py diff --git a/test/ci/check_code_style.sh b/legacy/test/ci/check_code_style.sh similarity index 100% rename from test/ci/check_code_style.sh rename to legacy/test/ci/check_code_style.sh diff --git a/test/ci/test_download_dataset.sh b/legacy/test/ci/test_download_dataset.sh similarity index 100% rename from test/ci/test_download_dataset.sh rename to legacy/test/ci/test_download_dataset.sh diff --git a/test/configs/deeplabv3p_xception65_cityscapes.yaml b/legacy/test/configs/deeplabv3p_xception65_cityscapes.yaml similarity index 100% rename from test/configs/deeplabv3p_xception65_cityscapes.yaml rename to legacy/test/configs/deeplabv3p_xception65_cityscapes.yaml diff --git a/test/configs/unet_pet.yaml b/legacy/test/configs/unet_pet.yaml similarity index 100% rename from test/configs/unet_pet.yaml rename to legacy/test/configs/unet_pet.yaml diff --git a/test/local_test_cityscapes.py b/legacy/test/local_test_cityscapes.py similarity index 100% rename from test/local_test_cityscapes.py rename to legacy/test/local_test_cityscapes.py diff --git a/test/local_test_pet.py b/legacy/test/local_test_pet.py similarity index 100% rename from test/local_test_pet.py rename to legacy/test/local_test_pet.py diff --git a/test/test_utils.py b/legacy/test/test_utils.py similarity index 100% rename from test/test_utils.py rename to legacy/test/test_utils.py diff --git a/tutorial/finetune_deeplabv3plus.md b/legacy/tutorial/finetune_deeplabv3plus.md similarity index 100% rename from tutorial/finetune_deeplabv3plus.md rename to legacy/tutorial/finetune_deeplabv3plus.md diff --git a/tutorial/finetune_fast_scnn.md b/legacy/tutorial/finetune_fast_scnn.md similarity index 100% rename from tutorial/finetune_fast_scnn.md rename to legacy/tutorial/finetune_fast_scnn.md diff --git a/tutorial/finetune_hrnet.md b/legacy/tutorial/finetune_hrnet.md similarity index 100% rename from tutorial/finetune_hrnet.md rename to legacy/tutorial/finetune_hrnet.md diff --git a/tutorial/finetune_icnet.md b/legacy/tutorial/finetune_icnet.md similarity index 100% rename from tutorial/finetune_icnet.md rename to legacy/tutorial/finetune_icnet.md diff --git a/tutorial/finetune_ocrnet.md b/legacy/tutorial/finetune_ocrnet.md similarity index 100% rename from tutorial/finetune_ocrnet.md rename to legacy/tutorial/finetune_ocrnet.md diff --git a/tutorial/finetune_pspnet.md b/legacy/tutorial/finetune_pspnet.md similarity index 100% rename from tutorial/finetune_pspnet.md rename to legacy/tutorial/finetune_pspnet.md diff --git a/tutorial/finetune_unet.md b/legacy/tutorial/finetune_unet.md similarity index 100% rename from tutorial/finetune_unet.md rename to legacy/tutorial/finetune_unet.md diff --git a/tutorial/imgs/optic.png b/legacy/tutorial/imgs/optic.png similarity index 100% rename from tutorial/imgs/optic.png rename to legacy/tutorial/imgs/optic.png diff --git a/tutorial/imgs/optic_deeplab.png b/legacy/tutorial/imgs/optic_deeplab.png similarity index 100% rename from tutorial/imgs/optic_deeplab.png rename to legacy/tutorial/imgs/optic_deeplab.png diff --git a/tutorial/imgs/optic_hrnet.png b/legacy/tutorial/imgs/optic_hrnet.png similarity index 100% rename from tutorial/imgs/optic_hrnet.png rename to legacy/tutorial/imgs/optic_hrnet.png diff --git a/tutorial/imgs/optic_icnet.png b/legacy/tutorial/imgs/optic_icnet.png similarity index 100% rename from tutorial/imgs/optic_icnet.png rename to legacy/tutorial/imgs/optic_icnet.png diff --git a/tutorial/imgs/optic_pspnet.png b/legacy/tutorial/imgs/optic_pspnet.png similarity index 100% rename from tutorial/imgs/optic_pspnet.png rename to legacy/tutorial/imgs/optic_pspnet.png diff --git a/tutorial/imgs/optic_unet.png b/legacy/tutorial/imgs/optic_unet.png similarity index 100% rename from tutorial/imgs/optic_unet.png rename to legacy/tutorial/imgs/optic_unet.png diff --git a/dygraph/paddleseg/__init__.py b/paddleseg/__init__.py similarity index 100% rename from dygraph/paddleseg/__init__.py rename to paddleseg/__init__.py diff --git a/dygraph/paddleseg/core/__init__.py b/paddleseg/core/__init__.py similarity index 100% rename from dygraph/paddleseg/core/__init__.py rename to paddleseg/core/__init__.py diff --git a/dygraph/paddleseg/core/infer.py b/paddleseg/core/infer.py similarity index 100% rename from dygraph/paddleseg/core/infer.py rename to paddleseg/core/infer.py diff --git a/dygraph/paddleseg/core/predict.py b/paddleseg/core/predict.py similarity index 100% rename from dygraph/paddleseg/core/predict.py rename to paddleseg/core/predict.py diff --git a/dygraph/paddleseg/core/train.py b/paddleseg/core/train.py similarity index 100% rename from dygraph/paddleseg/core/train.py rename to paddleseg/core/train.py diff --git a/dygraph/paddleseg/core/val.py b/paddleseg/core/val.py similarity index 100% rename from dygraph/paddleseg/core/val.py rename to paddleseg/core/val.py diff --git a/dygraph/paddleseg/cvlibs/__init__.py b/paddleseg/cvlibs/__init__.py similarity index 100% rename from dygraph/paddleseg/cvlibs/__init__.py rename to paddleseg/cvlibs/__init__.py diff --git a/dygraph/paddleseg/cvlibs/callbacks.py b/paddleseg/cvlibs/callbacks.py similarity index 100% rename from dygraph/paddleseg/cvlibs/callbacks.py rename to paddleseg/cvlibs/callbacks.py diff --git a/dygraph/paddleseg/cvlibs/config.py b/paddleseg/cvlibs/config.py similarity index 100% rename from dygraph/paddleseg/cvlibs/config.py rename to paddleseg/cvlibs/config.py diff --git a/dygraph/paddleseg/cvlibs/manager.py b/paddleseg/cvlibs/manager.py similarity index 100% rename from dygraph/paddleseg/cvlibs/manager.py rename to paddleseg/cvlibs/manager.py diff --git a/dygraph/paddleseg/cvlibs/param_init.py b/paddleseg/cvlibs/param_init.py similarity index 100% rename from dygraph/paddleseg/cvlibs/param_init.py rename to paddleseg/cvlibs/param_init.py diff --git a/dygraph/paddleseg/datasets/__init__.py b/paddleseg/datasets/__init__.py similarity index 100% rename from dygraph/paddleseg/datasets/__init__.py rename to paddleseg/datasets/__init__.py diff --git a/dygraph/paddleseg/datasets/ade.py b/paddleseg/datasets/ade.py similarity index 100% rename from dygraph/paddleseg/datasets/ade.py rename to paddleseg/datasets/ade.py diff --git a/dygraph/paddleseg/datasets/cityscapes.py b/paddleseg/datasets/cityscapes.py similarity index 100% rename from dygraph/paddleseg/datasets/cityscapes.py rename to paddleseg/datasets/cityscapes.py diff --git a/dygraph/paddleseg/datasets/dataset.py b/paddleseg/datasets/dataset.py similarity index 100% rename from dygraph/paddleseg/datasets/dataset.py rename to paddleseg/datasets/dataset.py diff --git a/dygraph/paddleseg/datasets/optic_disc_seg.py b/paddleseg/datasets/optic_disc_seg.py similarity index 100% rename from dygraph/paddleseg/datasets/optic_disc_seg.py rename to paddleseg/datasets/optic_disc_seg.py diff --git a/dygraph/paddleseg/datasets/voc.py b/paddleseg/datasets/voc.py similarity index 100% rename from dygraph/paddleseg/datasets/voc.py rename to paddleseg/datasets/voc.py diff --git a/dygraph/paddleseg/models/__init__.py b/paddleseg/models/__init__.py similarity index 100% rename from dygraph/paddleseg/models/__init__.py rename to paddleseg/models/__init__.py diff --git a/dygraph/paddleseg/models/ann.py b/paddleseg/models/ann.py similarity index 100% rename from dygraph/paddleseg/models/ann.py rename to paddleseg/models/ann.py diff --git a/dygraph/paddleseg/models/backbones/__init__.py b/paddleseg/models/backbones/__init__.py similarity index 100% rename from dygraph/paddleseg/models/backbones/__init__.py rename to paddleseg/models/backbones/__init__.py diff --git a/dygraph/paddleseg/models/backbones/hrnet.py b/paddleseg/models/backbones/hrnet.py similarity index 100% rename from dygraph/paddleseg/models/backbones/hrnet.py rename to paddleseg/models/backbones/hrnet.py diff --git a/dygraph/paddleseg/models/backbones/mobilenetv3.py b/paddleseg/models/backbones/mobilenetv3.py similarity index 100% rename from dygraph/paddleseg/models/backbones/mobilenetv3.py rename to paddleseg/models/backbones/mobilenetv3.py diff --git a/dygraph/paddleseg/models/backbones/resnet_vd.py b/paddleseg/models/backbones/resnet_vd.py similarity index 100% rename from dygraph/paddleseg/models/backbones/resnet_vd.py rename to paddleseg/models/backbones/resnet_vd.py diff --git a/dygraph/paddleseg/models/backbones/xception_deeplab.py b/paddleseg/models/backbones/xception_deeplab.py similarity index 100% rename from dygraph/paddleseg/models/backbones/xception_deeplab.py rename to paddleseg/models/backbones/xception_deeplab.py diff --git a/dygraph/paddleseg/models/bisenet.py b/paddleseg/models/bisenet.py similarity index 100% rename from dygraph/paddleseg/models/bisenet.py rename to paddleseg/models/bisenet.py diff --git a/dygraph/paddleseg/models/danet.py b/paddleseg/models/danet.py similarity index 100% rename from dygraph/paddleseg/models/danet.py rename to paddleseg/models/danet.py diff --git a/dygraph/paddleseg/models/deeplab.py b/paddleseg/models/deeplab.py similarity index 100% rename from dygraph/paddleseg/models/deeplab.py rename to paddleseg/models/deeplab.py diff --git a/dygraph/paddleseg/models/fast_scnn.py b/paddleseg/models/fast_scnn.py similarity index 100% rename from dygraph/paddleseg/models/fast_scnn.py rename to paddleseg/models/fast_scnn.py diff --git a/dygraph/paddleseg/models/fcn.py b/paddleseg/models/fcn.py similarity index 100% rename from dygraph/paddleseg/models/fcn.py rename to paddleseg/models/fcn.py diff --git a/dygraph/paddleseg/models/gcnet.py b/paddleseg/models/gcnet.py similarity index 100% rename from dygraph/paddleseg/models/gcnet.py rename to paddleseg/models/gcnet.py diff --git a/dygraph/paddleseg/models/layers/__init__.py b/paddleseg/models/layers/__init__.py similarity index 100% rename from dygraph/paddleseg/models/layers/__init__.py rename to paddleseg/models/layers/__init__.py diff --git a/dygraph/paddleseg/models/layers/activation.py b/paddleseg/models/layers/activation.py similarity index 100% rename from dygraph/paddleseg/models/layers/activation.py rename to paddleseg/models/layers/activation.py diff --git a/dygraph/paddleseg/models/layers/layer_libs.py b/paddleseg/models/layers/layer_libs.py similarity index 100% rename from dygraph/paddleseg/models/layers/layer_libs.py rename to paddleseg/models/layers/layer_libs.py diff --git a/dygraph/paddleseg/models/layers/pyramid_pool.py b/paddleseg/models/layers/pyramid_pool.py similarity index 100% rename from dygraph/paddleseg/models/layers/pyramid_pool.py rename to paddleseg/models/layers/pyramid_pool.py diff --git a/dygraph/paddleseg/models/losses/__init__.py b/paddleseg/models/losses/__init__.py similarity index 100% rename from dygraph/paddleseg/models/losses/__init__.py rename to paddleseg/models/losses/__init__.py diff --git a/dygraph/paddleseg/models/losses/cross_entroy_loss.py b/paddleseg/models/losses/cross_entroy_loss.py similarity index 100% rename from dygraph/paddleseg/models/losses/cross_entroy_loss.py rename to paddleseg/models/losses/cross_entroy_loss.py diff --git a/dygraph/paddleseg/models/ocrnet.py b/paddleseg/models/ocrnet.py similarity index 100% rename from dygraph/paddleseg/models/ocrnet.py rename to paddleseg/models/ocrnet.py diff --git a/dygraph/paddleseg/models/pspnet.py b/paddleseg/models/pspnet.py similarity index 100% rename from dygraph/paddleseg/models/pspnet.py rename to paddleseg/models/pspnet.py diff --git a/dygraph/paddleseg/models/unet.py b/paddleseg/models/unet.py similarity index 100% rename from dygraph/paddleseg/models/unet.py rename to paddleseg/models/unet.py diff --git a/dygraph/paddleseg/transforms/__init__.py b/paddleseg/transforms/__init__.py similarity index 100% rename from dygraph/paddleseg/transforms/__init__.py rename to paddleseg/transforms/__init__.py diff --git a/dygraph/paddleseg/transforms/functional.py b/paddleseg/transforms/functional.py similarity index 100% rename from dygraph/paddleseg/transforms/functional.py rename to paddleseg/transforms/functional.py diff --git a/dygraph/paddleseg/transforms/transforms.py b/paddleseg/transforms/transforms.py similarity index 100% rename from dygraph/paddleseg/transforms/transforms.py rename to paddleseg/transforms/transforms.py diff --git a/dygraph/paddleseg/utils/__init__.py b/paddleseg/utils/__init__.py similarity index 100% rename from dygraph/paddleseg/utils/__init__.py rename to paddleseg/utils/__init__.py diff --git a/dygraph/paddleseg/utils/download.py b/paddleseg/utils/download.py similarity index 100% rename from dygraph/paddleseg/utils/download.py rename to paddleseg/utils/download.py diff --git a/dygraph/paddleseg/utils/env/__init__.py b/paddleseg/utils/env/__init__.py similarity index 100% rename from dygraph/paddleseg/utils/env/__init__.py rename to paddleseg/utils/env/__init__.py diff --git a/dygraph/paddleseg/utils/env/seg_env.py b/paddleseg/utils/env/seg_env.py similarity index 100% rename from dygraph/paddleseg/utils/env/seg_env.py rename to paddleseg/utils/env/seg_env.py diff --git a/dygraph/paddleseg/utils/env/sys_env.py b/paddleseg/utils/env/sys_env.py similarity index 100% rename from dygraph/paddleseg/utils/env/sys_env.py rename to paddleseg/utils/env/sys_env.py diff --git a/dygraph/paddleseg/utils/logger.py b/paddleseg/utils/logger.py similarity index 100% rename from dygraph/paddleseg/utils/logger.py rename to paddleseg/utils/logger.py diff --git a/dygraph/paddleseg/utils/metrics.py b/paddleseg/utils/metrics.py similarity index 100% rename from dygraph/paddleseg/utils/metrics.py rename to paddleseg/utils/metrics.py diff --git a/dygraph/paddleseg/utils/progbar.py b/paddleseg/utils/progbar.py similarity index 100% rename from dygraph/paddleseg/utils/progbar.py rename to paddleseg/utils/progbar.py diff --git a/dygraph/paddleseg/utils/timer.py b/paddleseg/utils/timer.py similarity index 100% rename from dygraph/paddleseg/utils/timer.py rename to paddleseg/utils/timer.py diff --git a/dygraph/paddleseg/utils/utils.py b/paddleseg/utils/utils.py similarity index 100% rename from dygraph/paddleseg/utils/utils.py rename to paddleseg/utils/utils.py diff --git a/dygraph/paddleseg/utils/visualize.py b/paddleseg/utils/visualize.py similarity index 100% rename from dygraph/paddleseg/utils/visualize.py rename to paddleseg/utils/visualize.py diff --git a/dygraph/predict.py b/predict.py similarity index 100% rename from dygraph/predict.py rename to predict.py diff --git a/requirements.txt b/requirements.txt index 6200a94b71..237cf295bd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,3 +3,6 @@ yapf == 0.26.0 flake8 pyyaml >= 5.1 visualdl >= 2.0.0 +opencv-python +tqdm +filelock diff --git a/dygraph/tools/convert_cityscapes.py b/tools/convert_cityscapes.py similarity index 100% rename from dygraph/tools/convert_cityscapes.py rename to tools/convert_cityscapes.py diff --git a/dygraph/tools/voc_augment.py b/tools/voc_augment.py similarity index 100% rename from dygraph/tools/voc_augment.py rename to tools/voc_augment.py diff --git a/dygraph/train.py b/train.py similarity index 100% rename from dygraph/train.py rename to train.py diff --git a/dygraph/val.py b/val.py similarity index 100% rename from dygraph/val.py rename to val.py From d98663a8ea02afad9b062c0c38d10b53e833d15a Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 7 Dec 2020 15:23:30 +0800 Subject: [PATCH 019/210] add gscnn --- configs/gscnn/README.md | 13 + ...n_resnet50_os8_cityscapes_1024x512_80k.yml | 29 ++ paddleseg/core/train.py | 48 +-- paddleseg/datasets/ade.py | 12 +- paddleseg/datasets/cityscapes.py | 4 +- paddleseg/datasets/dataset.py | 13 +- paddleseg/datasets/optic_disc_seg.py | 8 +- paddleseg/datasets/voc.py | 4 +- paddleseg/models/__init__.py | 1 + paddleseg/models/backbones/resnet_vd.py | 2 + paddleseg/models/gscnn.py | 344 ++++++++++++++++++ paddleseg/models/losses/__init__.py | 3 + .../models/losses/binary_cross_entroy_loss.py | 158 ++++++++ paddleseg/models/losses/dual_task_loss.py | 139 +++++++ .../models/losses/edge_attention_loss.py | 75 ++++ paddleseg/transforms/functional.py | 66 ++++ 16 files changed, 890 insertions(+), 29 deletions(-) create mode 100644 configs/gscnn/README.md create mode 100644 configs/gscnn/gscnn_resnet50_os8_cityscapes_1024x512_80k.yml create mode 100644 paddleseg/models/gscnn.py create mode 100644 paddleseg/models/losses/binary_cross_entroy_loss.py create mode 100644 paddleseg/models/losses/dual_task_loss.py create mode 100644 paddleseg/models/losses/edge_attention_loss.py diff --git a/configs/gscnn/README.md b/configs/gscnn/README.md new file mode 100644 index 0000000000..a3806879f9 --- /dev/null +++ b/configs/gscnn/README.md @@ -0,0 +1,13 @@ +# Gated-scnn: Gated shape cnns for semantic segmentation + +## Reference + +> Takikawa T, Acuna D, Jampani V, et al. Gated-scnn: Gated shape cnns for semantic segmentation[C]//Proceedings of the IEEE International Conference on Computer Vision. 2019: 5229-5238. + +## Performance + +### Cityscapes + +| Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|GSCNN|ResNet50_OS8|1024x512|80000|80.67%|80.88%|80.88%|[model](https://bj.bcebos.com/paddleseg/dygraph/cutyscapes/gscnn_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cutyscapes/gscnn_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=11b79b6a2899739c0d009b1ce34bad77)| diff --git a/configs/gscnn/gscnn_resnet50_os8_cityscapes_1024x512_80k.yml b/configs/gscnn/gscnn_resnet50_os8_cityscapes_1024x512_80k.yml new file mode 100644 index 0000000000..eacde21a0f --- /dev/null +++ b/configs/gscnn/gscnn_resnet50_os8_cityscapes_1024x512_80k.yml @@ -0,0 +1,29 @@ +_base_: '../../configs/_base_/cityscapes.yml' + +batch_size: 2 +iters: 80000 + +model: + type: GSCNN + backbone: + type: ResNet50_vd + output_stride: 8 + multi_grid: [1, 2, 4] + pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz + num_classes: 19 + backbone_indices: [0, 1, 2, 3] + aspp_ratios: [1, 12, 24, 36] + aspp_out_channels: 256 + align_corners: False + pretrained: null + +loss: + types: + - type: CrossEntropyLoss + - type: EdgeAttentionLoss + - type: BCELoss + - type: DualTaskLoss + coef: [1, 1, 20, 1] + +train_dataset: + edge: True diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index b9d4030a9d..4c2e33087e 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -22,29 +22,25 @@ from paddleseg.core.val import evaluate -def check_logits_losses(logits, losses): - len_logits = len(logits) +def check_logits_losses(logits_list, losses): + len_logits = len(logits_list) len_losses = len(losses['types']) if len_logits != len_losses: raise RuntimeError( - 'The length of logits should equal to the types of loss config: {} != {}.' + 'The length of logits_list should equal to the types of loss config: {} != {}.' .format(len_logits, len_losses)) -def loss_computation(logits, label, losses): - check_logits_losses(logits, losses) +def loss_computation(logits_list, labels, losses, edges=None): + check_logits_losses(logits_list, losses) loss = 0 - for i in range(len(logits)): - logit = logits[i] - if logit.shape[-2:] != label.shape[-2:]: - logit = F.interpolate( - logit, - label.shape[-2:], - mode='bilinear', - align_corners=True, - align_mode=1) - loss_i = losses['types'][i](logit, label) - loss += losses['coef'][i] * loss_i + for i in range(len(logits_list)): + logits = logits_list[i] + loss_i = losses['types'][i] + # Whether to use edges as labels According to loss type . + if loss_i.__class__.__name__ in ('BCELoss', ): + labels = edges + loss += losses['coef'][i] * loss_i(logits, labels) return loss @@ -111,14 +107,21 @@ def train(model, train_reader_cost += timer.elapsed_time() images = data[0] labels = data[1].astype('int64') + edges = None + if len(data) == 3: + edges = data[2].astype('int64') + if nranks > 1: - logits = ddp_model(images) - loss = loss_computation(logits, labels, losses) - loss.backward() + logits_list = ddp_model(images) else: - logits = model(images) - loss = loss_computation(logits, labels, losses) - loss.backward() + logits_list = model(images) + loss = loss_computation( + logits_list=logits_list, + labels=labels, + losses=losses, + edges=edges) + loss.backward() + optimizer.step() lr = optimizer.get_lr() if isinstance(optimizer._learning_rate, @@ -127,6 +130,7 @@ def train(model, model.clear_gradients() avg_loss += loss.numpy()[0] train_batch_cost += timer.elapsed_time() + if (iter) % log_iters == 0 and local_rank == 0: avg_loss /= log_iters avg_train_reader_cost = train_reader_cost / log_iters diff --git a/paddleseg/datasets/ade.py b/paddleseg/datasets/ade.py index b1a445285d..72620d2d87 100644 --- a/paddleseg/datasets/ade.py +++ b/paddleseg/datasets/ade.py @@ -22,6 +22,7 @@ from paddleseg.utils import seg_env from paddleseg.cvlibs import manager from paddleseg.transforms import Compose +import paddleseg.transforms.functional as F URL = "http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip" @@ -35,9 +36,10 @@ class ADE20K(Dataset): transforms (list): A list of image transformations. dataset_root (str, optional): The ADK20K dataset directory. Default: None. mode (str, optional): A subset of the entire dataset. It should be one of ('train', 'val'). Default: 'train'. + edge (bool): Whether to compute edge while training. Defualt: False """ - def __init__(self, transforms, dataset_root=None, mode='train'): + def __init__(self, transforms, dataset_root=None, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() @@ -45,6 +47,7 @@ def __init__(self, transforms, dataset_root=None, mode='train'): self.file_list = list() self.num_classes = 150 self.ignore_index = 255 + self.edge = edge if mode not in ['train', 'val']: raise ValueError( @@ -97,4 +100,9 @@ def __getitem__(self, idx): else: im, label = self.transforms(im=image_path, label=label_path) label = label - 1 - return im, label + if self.edge: + edge_mask = F.mask_to_binary_edge( + label, radius=2, num_classes=self.num_classes) + return im, label, edge_mask + else: + return im, label diff --git a/paddleseg/datasets/cityscapes.py b/paddleseg/datasets/cityscapes.py index 27f0eb9d62..af7c122426 100644 --- a/paddleseg/datasets/cityscapes.py +++ b/paddleseg/datasets/cityscapes.py @@ -44,9 +44,10 @@ class Cityscapes(Dataset): transforms (list): Transforms for image. dataset_root (str): Cityscapes dataset directory. mode (str): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. + edge (bool): Whether to compute edge while training. Defualt: False """ - def __init__(self, transforms, dataset_root, mode='train'): + def __init__(self, transforms, dataset_root, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) self.file_list = list() @@ -54,6 +55,7 @@ def __init__(self, transforms, dataset_root, mode='train'): self.mode = mode self.num_classes = 19 self.ignore_index = 255 + self.edge = edge if mode not in ['train', 'val', 'test']: raise ValueError( diff --git a/paddleseg/datasets/dataset.py b/paddleseg/datasets/dataset.py index 25e8a9035e..10decc1c05 100644 --- a/paddleseg/datasets/dataset.py +++ b/paddleseg/datasets/dataset.py @@ -20,6 +20,7 @@ from paddleseg.cvlibs import manager from paddleseg.transforms import Compose +import paddleseg.transforms.functional as F @manager.DATASETS.add_component @@ -41,6 +42,7 @@ class Dataset(paddle.io.Dataset): test_path (str): The test dataset file. When mode is 'test', test_path is necessary. The annotation file is not necessary in test_path file. separator (str): The separator of dataset list. Default: ' '. + edge (bool): Whether to compute edge while training. Defualt: False Examples: @@ -68,7 +70,8 @@ def __init__(self, val_path=None, test_path=None, separator=' ', - ignore_index=255): + ignore_index=255, + edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) self.file_list = list() @@ -76,6 +79,7 @@ def __init__(self, self.mode = mode self.num_classes = num_classes self.ignore_index = ignore_index + self.edge = edge if mode.lower() not in ['train', 'val', 'test']: raise ValueError( @@ -149,7 +153,12 @@ def __getitem__(self, idx): return im, label else: im, label = self.transforms(im=image_path, label=label_path) - return im, label + if self.edge: + edge_mask = F.mask_to_binary_edge( + label, radius=2, num_classes=self.num_classes) + return im, label, edge_mask + else: + return im, label def __len__(self): return len(self.file_list) diff --git a/paddleseg/datasets/optic_disc_seg.py b/paddleseg/datasets/optic_disc_seg.py index c447f36858..dfb8806d5c 100644 --- a/paddleseg/datasets/optic_disc_seg.py +++ b/paddleseg/datasets/optic_disc_seg.py @@ -33,9 +33,14 @@ class OpticDiscSeg(Dataset): transforms (list): Transforms for image. dataset_root (str): The dataset directory. Default: None mode (str): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. + edge (bool): Whether to compute edge while training. Defualt: False """ - def __init__(self, dataset_root=None, transforms=None, mode='train'): + def __init__(self, + dataset_root=None, + transforms=None, + mode='train', + edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() @@ -43,6 +48,7 @@ def __init__(self, dataset_root=None, transforms=None, mode='train'): self.file_list = list() self.num_classes = 2 self.ignore_index = 255 + self.edge = edge if mode not in ['train', 'val', 'test']: raise ValueError( diff --git a/paddleseg/datasets/voc.py b/paddleseg/datasets/voc.py index 64511bc436..17ddc182d3 100644 --- a/paddleseg/datasets/voc.py +++ b/paddleseg/datasets/voc.py @@ -34,9 +34,10 @@ class PascalVOC(Dataset): dataset_root (str): The dataset directory. Default: None mode (str): Which part of dataset to use. it is one of ('train', 'trainval', 'trainaug', 'val'). If you want to set mode to 'trainaug', please make sure the dataset have been augmented. Default: 'train'. + edge (bool): Whether to compute edge while training. Defualt: False """ - def __init__(self, transforms, dataset_root=None, mode='train'): + def __init__(self, transforms, dataset_root=None, mode='train', edge=False): self.dataset_root = dataset_root self.transforms = Compose(transforms) mode = mode.lower() @@ -44,6 +45,7 @@ def __init__(self, transforms, dataset_root=None, mode='train'): self.file_list = list() self.num_classes = 21 self.ignore_index = 255 + self.edge = edge if mode not in ['train', 'trainval', 'trainaug', 'val']: raise ValueError( diff --git a/paddleseg/models/__init__.py b/paddleseg/models/__init__.py index 29212be0b2..2f525e8b2d 100644 --- a/paddleseg/models/__init__.py +++ b/paddleseg/models/__init__.py @@ -25,3 +25,4 @@ from .ocrnet import * from .pspnet import * from .unet import UNet +from .gscnn import GSCNN diff --git a/paddleseg/models/backbones/resnet_vd.py b/paddleseg/models/backbones/resnet_vd.py index 64a01842be..f521942e19 100644 --- a/paddleseg/models/backbones/resnet_vd.py +++ b/paddleseg/models/backbones/resnet_vd.py @@ -184,6 +184,7 @@ def __init__(self, pretrained=None): super(ResNet_vd, self).__init__() + self.conv1_logit = None # for gscnn shape stream self.layers = layers self.lr_mult_list = lr_mult_list supported_layers = [18, 34, 50, 101, 152, 200] @@ -298,6 +299,7 @@ def forward(self, inputs): y = self.conv1_1(inputs) y = self.conv1_2(y) y = self.conv1_3(y) + self.conv1_logit = y.clone() y = self.pool2d_max(y) # A feature list saves the output feature map of each stage. diff --git a/paddleseg/models/gscnn.py b/paddleseg/models/gscnn.py new file mode 100644 index 0000000000..401c5092c7 --- /dev/null +++ b/paddleseg/models/gscnn.py @@ -0,0 +1,344 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cv2 +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager +from paddleseg.models import layers +from paddleseg.models.backbones import resnet_vd +from paddleseg.models import deeplab +from paddleseg.utils import utils + + +@manager.MODELS.add_component +class GSCNN(nn.Layer): + """ + The GSCNN implementation based on PaddlePaddle. + + The original article refers to + Towaki Takikawa, et, al. "Gated-SCNN: Gated Shape CNNs for Semantic Segmentation" + (https://arxiv.org/pdf/1907.05740.pdf) + + Args: + num_classes (int): The unique number of target classes. + backbone (paddle.nn.Layer): Backbone network, currently support Resnet50_vd/Resnet101_vd. + backbone_indices (tuple, optional): Two values in the tuple indicate the indices of output of backbone. + Default: (0, 1, 2, 3). + aspp_ratios (tuple, optional): The dilation rate using in ASSP module. + If output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). + If output_stride=8, aspp_ratios is (1, 12, 24, 36). + Default: (1, 6, 12, 18). + aspp_out_channels (int, optional): The output channels of ASPP module. Default: 256. + align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + pretrained (str, optional): The path or url of pretrained model. Default: None. + """ + + def __init__(self, + num_classes, + backbone, + backbone_indices=(0, 1, 2, 3), + aspp_ratios=(1, 6, 12, 18), + aspp_out_channels=256, + align_corners=False, + pretrained=None): + super().__init__() + self.backbone = backbone + backbone_channels = self.backbone.feat_channels + self.head = GSCNNHead(num_classes, backbone_indices, backbone_channels, + aspp_ratios, aspp_out_channels, align_corners) + self.align_corners = align_corners + self.pretrained = pretrained + self.init_weight() + + def forward(self, x): + feat_list = self.backbone(x) + logit_list = self.head(x, feat_list, self.backbone.conv1_logit) + seg_logit, edge_logit = [ + F.interpolate( + logit, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) for logit in logit_list + ] + return [seg_logit, (seg_logit, edge_logit), edge_logit, seg_logit] + + def init_weight(self): + if self.pretrained is not None: + utils.load_entire_model(self, self.pretrained) + + +class GSCNNHead(nn.Layer): + """ + The GSCNNHead implementation based on PaddlePaddle. + + Args: + num_classes (int): The unique number of target classes. + backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. + the first index will be taken as a low-level feature in Decoder component; + the last one will be taken as input of ASPP component; the second to fourth + will be taken as input for GCL component. + Usually backbone consists of four downsampling stage, and return an output of + each stage. If we set it as (0, 1, 2, 3), it means taking feature map of the first + stage in backbone as low-level feature used in Decoder, feature map of the fourth + stage as input of ASPP, and the feature map of the second to fourth stage as input of GCL. + backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. + aspp_ratios (tuple): The dilation rates using in ASSP module. + aspp_out_channels (int): The output channels of ASPP module. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + """ + + def __init__(self, num_classes, backbone_indices, backbone_channels, + aspp_ratios, aspp_out_channels, align_corners): + super().__init__() + self.backbone_indices = backbone_indices + self.align_corners = align_corners + + self.dsn1 = nn.Conv2D( + backbone_channels[backbone_indices[1]], 1, kernel_size=1) + self.dsn2 = nn.Conv2D( + backbone_channels[backbone_indices[2]], 1, kernel_size=1) + self.dsn3 = nn.Conv2D( + backbone_channels[backbone_indices[3]], 1, kernel_size=1) + + self.res1 = resnet_vd.BasicBlock(64, 64, stride=1) + self.d1 = nn.Conv2D(64, 32, kernel_size=1) + self.gate1 = GatedSpatailConv2d(32, 32) + self.res2 = resnet_vd.BasicBlock(32, 32, stride=1) + self.d2 = nn.Conv2D(32, 16, kernel_size=1) + self.gate2 = GatedSpatailConv2d(16, 16) + self.res3 = resnet_vd.BasicBlock(16, 16, stride=1) + self.d3 = nn.Conv2D(16, 8, kernel_size=1) + self.gate3 = GatedSpatailConv2d(8, 8) + self.fuse = nn.Conv2D(8, 1, kernel_size=1, bias_attr=False) + + self.cw = nn.Conv2D(2, 1, kernel_size=1, bias_attr=False) + + self.aspp = ASPPModule( + aspp_ratios=aspp_ratios, + in_channels=backbone_channels[-1], + out_channels=aspp_out_channels, + align_corners=self.align_corners, + image_pooling=True) + + self.decoder = deeplab.Decoder( + num_classes=num_classes, + in_channels=backbone_channels[0], + align_corners=self.align_corners) + + def forward(self, x, feat_list, s_input): + input_size = x.shape[-2:] + m1f = F.interpolate( + s_input, + input_size, + mode='bilinear', + align_corners=self.align_corners) + + l1, l2, l3 = [ + feat_list[self.backbone_indices[i]] + for i in range(1, len(self.backbone_indices)) + ] + s1 = F.interpolate( + self.dsn1(l1), + input_size, + mode='bilinear', + align_corners=self.align_corners) + s2 = F.interpolate( + self.dsn2(l2), + input_size, + mode='bilinear', + align_corners=self.align_corners) + s3 = F.interpolate( + self.dsn3(l3), + input_size, + mode='bilinear', + align_corners=self.align_corners) + + # Get image gradient + im_arr = x.numpy().transpose((0, 2, 3, 1)) + im_arr = ((im_arr * 0.5 + 0.5) * 255).astype(np.uint8) + canny = np.zeros((x.shape[0], 1, input_size[0], input_size[1])) + for i in range(x.shape[0]): + canny[i] = cv2.Canny(im_arr[i], 10, 100) + canny = canny / 255 + canny = paddle.to_tensor(canny).astype('float32') + canny.stop_gradient = True + + cs = self.res1(m1f) + cs = F.interpolate( + cs, input_size, mode='bilinear', align_corners=self.align_corners) + cs = self.d1(cs) + cs = self.gate1(cs, s1) + + cs = self.res2(cs) + cs = F.interpolate( + cs, input_size, mode='bilinear', align_corners=self.align_corners) + cs = self.d2(cs) + cs = self.gate2(cs, s2) + + cs = self.res3(cs) + cs = F.interpolate( + cs, input_size, mode='bilinear', align_corners=self.align_corners) + cs = self.d3(cs) + cs = self.gate3(cs, s3) + + cs = self.fuse(cs) + cs = F.interpolate( + cs, input_size, mode='bilinear', align_corners=self.align_corners) + edge_out = F.sigmoid(cs) # Ouput of shape stream + + cat = paddle.concat([edge_out, canny], axis=1) + acts = self.cw(cat) + acts = F.sigmoid(acts) # Input of fusion module + + x = self.aspp(l3, acts) + + low_level_feat = feat_list[self.backbone_indices[0]] + logit = self.decoder(x, low_level_feat) + logit_list = [logit, edge_out] + return logit_list + + +class GatedSpatailConv2d(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + groups=1, + bias_attr=False): + super().__init__() + self._gate_conv = nn.Sequential( + layers.SyncBatchNorm(in_channels + 1), + nn.Conv2D(in_channels + 1, in_channels + 1, kernel_size=1), + nn.ReLU(), nn.Conv2D(in_channels + 1, 1, kernel_size=1), + layers.SyncBatchNorm(1), nn.Sigmoid()) + self.conv = nn.Conv2D( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias_attr=bias_attr) + + def forward(self, input_features, gating_features): + cat = paddle.concat([input_features, gating_features], axis=1) + alphas = self._gate_conv(cat) + x = input_features * (alphas + 1) + x = self.conv(x) + return x + + +class ASPPModule(nn.Layer): + """ + Atrous Spatial Pyramid Pooling. + + Args: + aspp_ratios (tuple): The dilation rate using in ASSP module. + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + use_sep_conv (bool, optional): If using separable conv in ASPP module. Default: False. + image_pooling (bool, optional): If augmented with image-level features. Default: False + """ + + def __init__(self, + aspp_ratios, + in_channels, + out_channels, + align_corners, + use_sep_conv=False, + image_pooling=False): + super().__init__() + + self.align_corners = align_corners + self.aspp_blocks = nn.LayerList() + + for ratio in aspp_ratios: + if use_sep_conv and ratio > 1: + conv_func = layers.SeparableConvBNReLU + else: + conv_func = layers.ConvBNReLU + + block = conv_func( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1 if ratio == 1 else 3, + dilation=ratio, + padding=0 if ratio == 1 else ratio) + self.aspp_blocks.append(block) + + out_size = len(self.aspp_blocks) + + if image_pooling: + self.global_avg_pool = nn.Sequential( + nn.AdaptiveAvgPool2D(output_size=(1, 1)), + layers.ConvBNReLU( + in_channels, out_channels, kernel_size=1, bias_attr=False)) + out_size += 1 + self.image_pooling = image_pooling + + self.edge_conv = layers.ConvBNReLU( + 1, out_channels, kernel_size=1, bias_attr=False) + out_size += 1 + + self.conv_bn_relu = layers.ConvBNReLU( + in_channels=out_channels * out_size, + out_channels=out_channels, + kernel_size=1) + + self.dropout = nn.Dropout(p=0.1) # drop rate + + def forward(self, x, edge): + outputs = [] + for block in self.aspp_blocks: + y = block(x) + y = F.interpolate( + y, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + outputs.append(y) + + if self.image_pooling: + img_avg = self.global_avg_pool(x) + img_avg = F.interpolate( + img_avg, + x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + outputs.append(img_avg) + + edge_features = F.interpolate( + edge, + size=x.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + edge_features = self.edge_conv(edge_features) + outputs.append(edge_features) + + x = paddle.concat(outputs, axis=1) + x = self.conv_bn_relu(x) + x = self.dropout(x) + return x diff --git a/paddleseg/models/losses/__init__.py b/paddleseg/models/losses/__init__.py index f58a9fe1dc..f566a58511 100644 --- a/paddleseg/models/losses/__init__.py +++ b/paddleseg/models/losses/__init__.py @@ -13,3 +13,6 @@ # limitations under the License. from .cross_entroy_loss import CrossEntropyLoss +from .binary_cross_entroy_loss import BCELoss +from .dual_task_loss import DualTaskLoss +from .edge_attention_loss import EdgeAttentionLoss diff --git a/paddleseg/models/losses/binary_cross_entroy_loss.py b/paddleseg/models/losses/binary_cross_entroy_loss.py new file mode 100644 index 0000000000..5af84b0a80 --- /dev/null +++ b/paddleseg/models/losses/binary_cross_entroy_loss.py @@ -0,0 +1,158 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + + +@manager.LOSSES.add_component +class BCELoss(nn.Layer): + """ + This operator combines the sigmoid layer and the :ref:`api_nn_loss_BCELoss` layer. + Also, we can see it as the combine of ``sigmoid_cross_entropy_with_logits`` + layer and some reduce operations. + This measures the element-wise probability error in classification tasks + in which each class is independent. + This can be thought of as predicting labels for a data-point, where labels + are not mutually exclusive. For example, a news article can be about + politics, technology or sports at the same time or none of these. + First this operator calculate loss function as follows: + .. math:: + Out = -Labels * \\log(\\sigma(Logit)) - (1 - Labels) * \\log(1 - \\sigma(Logit)) + We know that :math:`\\sigma(Logit) = \\frac{1}{1 + \\e^{-Logit}}`. By substituting this we get: + .. math:: + Out = Logit - Logit * Labels + \\log(1 + \\e^{-Logit}) + For stability and to prevent overflow of :math:`\\e^{-Logit}` when Logit < 0, + we reformulate the loss as follows: + .. math:: + Out = \\max(Logit, 0) - Logit * Labels + \\log(1 + \\e^{-\|Logit\|}) + Then, if ``weight`` or ``pos_weight`` is not None, this operator multiply the + weight tensor on the loss `Out`. The ``weight`` tensor will attach different + weight on every items in the batch. The ``pos_weight`` will attach different + weight on the positive label of each class. + Finally, this operator applies reduce operation on the loss. + If :attr:`reduction` set to ``'none'``, the operator will return the original loss `Out`. + If :attr:`reduction` set to ``'mean'``, the reduced mean loss is :math:`Out = MEAN(Out)`. + If :attr:`reduction` set to ``'sum'``, the reduced sum loss is :math:`Out = SUM(Out)`. + Note that the target labels ``label`` should be numbers between 0 and 1. + Args: + weight (Tensor | str, optional): A manual rescaling weight given to the loss of each + batch element. If given, it has to be a 1D Tensor whose size is `[N, ]`, + The data type is float32, float64. If type is str, it should equal to 'dynamic'. + It will compute weight dynamically in every step. + Default is ``'None'``. + pos_weight (float|str, optional): A weight of positive examples. If type is str, + it should equal to 'dynamic'. It will compute weight dynamically in every step. + Default is ``'None'``. + ignore_index (int64): Specifies a target value that is ignored + and does not contribute to the input gradient. Default ``255``. + Shapes: + logit (Tensor): The input predications tensor. 2-D tensor with shape: [N, *], + N is batch_size, `*` means number of additional dimensions. The ``logit`` + is usually the output of Linear layer. Available dtype is float32, float64. + label (Tensor): The target labels tensor. 2-D tensor with the same shape as + ``logit``. The target labels which values should be numbers between 0 and 1. + Available dtype is float32, float64. + Returns: + A callable object of BCEWithLogitsLoss. + Examples: + .. code-block:: python + import paddle + paddle.disable_static() + logit = paddle.to_tensor([5.0, 1.0, 3.0], dtype="float32") + label = paddle.to_tensor([1.0, 0.0, 1.0], dtype="float32") + bce_logit_loss = paddle.nn.BCEWithLogitsLoss() + output = bce_logit_loss(logit, label) + print(output.numpy()) # [0.45618808] + """ + + def __init__(self, weight=None, pos_weight=None, ignore_index=255): + super().__init__() + self.weight = weight + self.pos_weight = pos_weight + self.ignore_index = ignore_index + + if self.weight is not None: + if isinstance(self.weight, str): + if self.weight != 'dynamic': + raise ValueError( + "if type of `weight` is str, it should equal to 'dynamic', but it is {}" + .format(self.weight)) + elif isinstance(self.weight, paddle.VarBase): + raise TypeError( + 'The type of `weight` is wrong, it should be Tensor or str, but it is {}' + .format(type(self.pos_weight))) + + if self.pos_weight is not None: + if isinstance(self.pos_weight, str): + if self.pos_weight != 'dynamic': + raise ValueError( + "if type of `pos_weight` is str, it should equal to 'dynamic', but it is {}" + .format(self.pos_weight)) + elif isinstance(self.pos_weight, float): + self.pos_weight = paddle.to_tensor( + self.pos_weight, dtype='float32') + else: + raise TypeError( + 'The type of `pos_weight` is wrong, it should be float or str, but it is {}' + .format(type(self.pos_weight))) + + def forward(self, logit, label): + """ + Forward computation. + + Args: + logit (Tensor): Logit tensor, the data type is float32, float64. Shape is + (N, C), where C is number of classes, and if shape is more than 2D, this + is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor, the data type is int64. Shape is (N, C), where each + value is 0 or 1, and if shape is more than 2D, this is + (N, C, D1, D2,..., Dk), k >= 1. + """ + eps = 1e-6 + if len(label.shape) != len(logit.shape): + label = paddle.unsqueeze(1) + mask = (label != self.ignore_index).astype('float32') + if isinstance(self.weight, str): + pos_index = (label == 1) + neg_index = (label == 0) + pos_num = paddle.sum(pos_index.astype('float32')) + neg_num = paddle.sum(neg_index.astype('float32')) + sum_num = pos_num + neg_num + weight_pos = 2 * neg_num / (sum_num + eps) + weight_neg = 2 * pos_num / (sum_num + eps) + self.weight = weight_pos * label + weight_neg * (1 - label) + if isinstance(self.pos_weight, str): + pos_index = (label == 1) + neg_index = (label == 0) + pos_num = paddle.sum(pos_index.astype('float32')) + neg_num = paddle.sum(neg_index.astype('float32')) + sum_num = pos_num + neg_num + self.pos_weight = 2 * neg_num / (sum_num + eps) + label = label.astype('float32') + loss = paddle.nn.functional.binary_cross_entropy_with_logits( + logit, + label, + weight=self.weight, + reduction='none', + pos_weight=self.pos_weight) + loss = loss * mask + loss = paddle.mean(loss) / paddle.mean(mask) + label.stop_gradient = True + mask.stop_gradient = True + + return loss diff --git a/paddleseg/models/losses/dual_task_loss.py b/paddleseg/models/losses/dual_task_loss.py new file mode 100644 index 0000000000..964cecc82b --- /dev/null +++ b/paddleseg/models/losses/dual_task_loss.py @@ -0,0 +1,139 @@ +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + + +@manager.LOSSES.add_component +class DualTaskLoss(nn.Layer): + """ + The dual task loss implement of GSCNN + + Args: + ignore_index (int64): Specifies a target value that is ignored + and does not contribute to the input gradient. Default ``255``. + tau (float): the tau of gumbel softmax sample. + """ + + def __init__(self, ignore_index=255, tau=0.5): + super().__init__() + self.ignore_index = ignore_index + self.tau = tau + + def _gumbel_softmax_sample(self, logit, tau=1, eps=1e-10): + """ + Draw a sample from the Gumbel-Softmax distribution + + based on + https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb + (MIT license) + """ + gumbel_noise = paddle.rand(logit.shape) + gumbel_noise = -paddle.log(eps - paddle.log(gumbel_noise + eps)) + logit = logit + gumbel_noise + return F.softmax(logit / tau, axis=1) + + def compute_grad_mag(self, x): + eps = 1e-6 + n, c, h, w = x.shape + if h <= 1 or w <= 1: + raise ValueError( + 'The width and height of tensor to compute grad must be greater than 1, but the shape is {}.' + .format(x.shape)) + + x = self.conv_tri(x, r=4) + kernel = [[-1, 0, 1]] + kernel = paddle.to_tensor(kernel).astype('float32') + kernel = 0.5 * kernel + + kernel_x = paddle.concat([kernel.unsqueeze((0, 1))] * c, axis=0) + grad_x = F.conv2d(x, kernel_x, padding='same', groups=c) + kernel_y = paddle.concat([kernel.t().unsqueeze((0, 1))] * c, axis=0) + grad_y = F.conv2d(x, kernel_y, padding='same', groups=c) + mag = paddle.sqrt(grad_x * grad_x + grad_y * grad_y + eps) + + return mag / mag.max() + + def conv_tri(self, input, r): + """ + Convolves an image by a 2D triangle filter (the 1D triangle filter f is + [1:r r+1 r:-1:1]/(r+1)^2, the 2D version is simply conv2(f,f')) + """ + if r <= 1: + raise ValueError( + '`r` should be greater than 1, but it is {}.'.format(r)) + + kernel = [ + list(range(1, r + 1)) + [r + 1] + list(reversed(range(1, r + 1))) + ] + kernel = paddle.to_tensor(kernel).astype('float32') + kernel = kernel / (r + 1)**2 + input_ = F.pad(input, [1, 1, 0, 0], mode='replicate') + input_ = F.pad(input_, [r, r, 0, 0], mode='reflect') + input_ = [input_[:, :, :, :r], input, input_[:, :, :, -r:]] + input_ = paddle.concat(input_, axis=3) + tem = input_.clone() + + input_ = F.pad(input_, [0, 0, 1, 1], mode='replicate') + input_ = F.pad(input_, [0, 0, r, r], mode='reflect') + input_ = [input_[:, :, :r, :], tem, input_[:, :, -r:, :]] + input_ = paddle.concat(input_, axis=2) + + c = input.shape[1] + kernel_x = paddle.concat([kernel.unsqueeze((0, 1))] * c, axis=0) + output = F.conv2d(input_, kernel_x, padding=0, groups=c) + kernel_y = paddle.concat([kernel.t().unsqueeze((0, 1))] * c, axis=0) + output = F.conv2d(output, kernel_y, padding=0, groups=c) + return output + + def forward(self, logit, labels): + # import pdb; pdb.set_trace() + n, c, h, w = logit.shape + th = 1e-8 + eps = 1e-10 + mask = (labels != self.ignore_index) + mask.stop_gradient = True + logit = logit * mask + + labels = labels * mask + if len(labels.shape) == 4: + labels = labels.squeeze(1) + labels.stop_gradient = True + labels = F.one_hot(labels, logit.shape[1]).transpose((0, 3, 1, 2)) + labels.stop_gradient = True + + g = self._gumbel_softmax_sample(logit, tau=self.tau) + g = self.compute_grad_mag(g) + g_hat = self.compute_grad_mag(labels) + loss = F.l1_loss(g, g_hat, reduction='none') + loss = loss * mask + + g_mask = (g > th).astype('float32') + g_mask.stop_gradient = True + g_mask_sum = paddle.sum(g_mask) + loss_g = paddle.sum(loss * g_mask) + if g_mask_sum > eps: + loss_g = loss_g / g_mask_sum + + g_hat_mask = (g_hat > th).astype('float32') + g_hat_mask.stop_gradient = True + g_hat_mask_sum = paddle.sum(g_hat_mask) + loss_g_hat = paddle.sum(loss * g_hat_mask) + if g_hat_mask_sum > eps: + loss_g_hat = loss_g_hat / g_hat_mask_sum + + total_loss = 0.5 * loss_g + 0.5 * loss_g_hat + + return total_loss diff --git a/paddleseg/models/losses/edge_attention_loss.py b/paddleseg/models/losses/edge_attention_loss.py new file mode 100644 index 0000000000..1fe8b8d8f6 --- /dev/null +++ b/paddleseg/models/losses/edge_attention_loss.py @@ -0,0 +1,75 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager +from paddleseg.models import losses + + +@manager.LOSSES.add_component +class EdgeAttentionLoss(nn.Layer): + """ + Implements the cross entropy loss function. It only compute the edge part. + + Args: + edge_threshold (float): The pixels greater edge_threshold as edges. + ignore_index (int64): Specifies a target value that is ignored + and does not contribute to the input gradient. Default ``255``. + """ + + def __init__(self, edge_threshold=0.8, ignore_index=255): + super().__init__() + self.edge_threshold = edge_threshold + self.ignore_index = ignore_index + self.EPS = 1e-5 + + def forward(self, logits, label): + """ + Forward computation. + + Args: + logits (tuple|list): (seg_logit, edge_logit) Tensor, the data type is float32, float64. Shape is + (N, C), where C is number of classes, and if shape is more than 2D, this + is (N, C, D1, D2,..., Dk), k >= 1. C =1 of edge_logit . + label (Tensor): Label tensor, the data type is int64. Shape is (N, C), where each + value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is + (N, C, D1, D2,..., Dk), k >= 1. + """ + seg_logit, edge_logit = logits[0], logits[1] + if len(label.shape) != len(seg_logit.shape): + label = paddle.unsqueeze(label, 1) + if edge_logit.shape != label.shape: + raise ValueError( + 'The shape of edge_logit should equal to the label, but they are {} != {}' + .format(edge_logit.shape, label.shape)) + + filler = paddle.ones_like(label) * self.ignore_index + label = paddle.where(edge_logit > self.edge_threshold, label, filler) + + seg_logit = paddle.transpose(seg_logit, [0, 2, 3, 1]) + label = paddle.transpose(label, [0, 2, 3, 1]) + loss = F.softmax_with_cross_entropy( + seg_logit, label, ignore_index=self.ignore_index, axis=-1) + + mask = label != self.ignore_index + mask = paddle.cast(mask, 'float32') + loss = loss * mask + avg_loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS) + + label.stop_gradient = True + mask.stop_gradient = True + return avg_loss diff --git a/paddleseg/transforms/functional.py b/paddleseg/transforms/functional.py index d3a9c2d3fe..d53fa8b84f 100644 --- a/paddleseg/transforms/functional.py +++ b/paddleseg/transforms/functional.py @@ -15,6 +15,7 @@ import cv2 import numpy as np from PIL import Image, ImageEnhance +from scipy.ndimage.morphology import distance_transform_edt def normalize(im, mean, std): @@ -92,3 +93,68 @@ def rotate(im, rotate_lower, rotate_upper): rotate_delta = np.random.uniform(rotate_lower, rotate_upper) im = im.rotate(int(rotate_delta)) return im + + +def mask_to_onehot(mask, num_classes): + """ + Convert a mask (H, W) to onehot (K, H, W). + + Args: + mask (np.ndarray): Label mask with shape (H, W) + num_classes (int): Number of classes. + + Returns: + np.ndarray: Onehot mask with shape(K, H, W). + """ + _mask = [mask == i for i in range(num_classes)] + _mask = np.array(_mask).astype(np.uint8) + return _mask + + +def onehot_to_binary_edge(mask, radius): + """ + Convert a onehot mask (K, H, W) to a edge mask. + + Args: + mask (np.ndarray): Onehot mask with shape (K, H, W) + radius (int|float): Radius of edge. + + Returns: + np.ndarray: Edge mask with shape(H, W). + """ + if radius < 1: + raise ValueError('`radius` should be greater than or equal to 1') + num_classes = mask.shape[0] + + edge = np.zeros(mask.shape[1:]) + # pad borders + mask = np.pad( + mask, ((0, 0), (1, 1), (1, 1)), mode='constant', constant_values=0) + for i in range(num_classes): + dist = distance_transform_edt( + mask[i, :]) + distance_transform_edt(1.0 - mask[i, :]) + dist = dist[1:-1, 1:-1] + dist[dist > radius] = 0 + edge += dist + + edge = np.expand_dims(edge, axis=0) + edge = (edge > 0).astype(np.uint8) + return edge + + +def mask_to_binary_edge(mask, radius, num_classes): + """ + Convert a segmentic segmentation mask (H, W) to a binary edge mask(H, W). + + Args: + mask (np.ndarray): Label mask with shape (H, W) + radius (int|float): Radius of edge. + num_classes (int): Number of classes. + + Returns: + np.ndarray: Edge mask with shape(H, W). + """ + mask = mask.squeeze() + onehot = mask_to_onehot(mask, num_classes) + edge = onehot_to_binary_edge(onehot, radius) + return edge From 431b218a591cbd56d331bccec1b66ca5723b4dfe Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 7 Dec 2020 15:47:55 +0800 Subject: [PATCH 020/210] add gscnn --- README.md | 1 + README_CN.md | 1 + configs/gscnn/README.md | 2 +- paddleseg/datasets/ade.py | 2 +- paddleseg/datasets/cityscapes.py | 2 +- paddleseg/datasets/dataset.py | 2 +- paddleseg/datasets/optic_disc_seg.py | 2 +- paddleseg/datasets/voc.py | 2 +- 8 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 4e3b056e38..2f09f2502f 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ The full-detailed documents and tutorials are coming soon. So far there are mini |[Fast-SCNN](./configs/fastscnn)|-|-|-|-| |[FCN](./configs/fcn)|||✔|✔| |[GCNet](./configs/gcnet)|✔|✔||| +|[GSCNN](./configs/gscnn)|✔|✔||| |[OCRNet](./configs/ocrnet/)|||✔|✔| |[PSPNet](./configs/pspnet)|✔|✔||| |[UNet](./configs/unet)|-|-|-|-| diff --git a/README_CN.md b/README_CN.md index 7f8a8b1e15..01e2507831 100644 --- a/README_CN.md +++ b/README_CN.md @@ -16,6 +16,7 @@ |[Fast-SCNN](./configs/fastscnn)|-|-|-|-| |[FCN](./configs/fcn)|||✔|✔| |[GCNet](./configs/gcnet)|✔|✔||| +|[GSCNN](./configs/gscnn)|✔|✔||| |[OCRNet](./configs/ocrnet/)|||✔|✔| |[PSPNet](./configs/pspnet)|✔|✔||| |[UNet](./configs/unet)|-|-|-|-| diff --git a/configs/gscnn/README.md b/configs/gscnn/README.md index a3806879f9..81e99c7c96 100644 --- a/configs/gscnn/README.md +++ b/configs/gscnn/README.md @@ -10,4 +10,4 @@ | Model | Backbone | Resolution | Training Iters | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|GSCNN|ResNet50_OS8|1024x512|80000|80.67%|80.88%|80.88%|[model](https://bj.bcebos.com/paddleseg/dygraph/cutyscapes/gscnn_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cutyscapes/gscnn_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=11b79b6a2899739c0d009b1ce34bad77)| +|GSCNN|ResNet50_OS8|1024x512|80000|80.67%|80.88%|80.88%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gscnn_resnet50_os8_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/gscnn_resnet50_os8_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=11b79b6a2899739c0d009b1ce34bad77)| diff --git a/paddleseg/datasets/ade.py b/paddleseg/datasets/ade.py index 72620d2d87..5ace6a3e56 100644 --- a/paddleseg/datasets/ade.py +++ b/paddleseg/datasets/ade.py @@ -36,7 +36,7 @@ class ADE20K(Dataset): transforms (list): A list of image transformations. dataset_root (str, optional): The ADK20K dataset directory. Default: None. mode (str, optional): A subset of the entire dataset. It should be one of ('train', 'val'). Default: 'train'. - edge (bool): Whether to compute edge while training. Defualt: False + edge (bool): Whether to compute edge while training. Default: False """ def __init__(self, transforms, dataset_root=None, mode='train', edge=False): diff --git a/paddleseg/datasets/cityscapes.py b/paddleseg/datasets/cityscapes.py index af7c122426..7d3659bd6f 100644 --- a/paddleseg/datasets/cityscapes.py +++ b/paddleseg/datasets/cityscapes.py @@ -44,7 +44,7 @@ class Cityscapes(Dataset): transforms (list): Transforms for image. dataset_root (str): Cityscapes dataset directory. mode (str): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. - edge (bool): Whether to compute edge while training. Defualt: False + edge (bool): Whether to compute edge while training. Default: False """ def __init__(self, transforms, dataset_root, mode='train', edge=False): diff --git a/paddleseg/datasets/dataset.py b/paddleseg/datasets/dataset.py index 10decc1c05..7ac6098693 100644 --- a/paddleseg/datasets/dataset.py +++ b/paddleseg/datasets/dataset.py @@ -42,7 +42,7 @@ class Dataset(paddle.io.Dataset): test_path (str): The test dataset file. When mode is 'test', test_path is necessary. The annotation file is not necessary in test_path file. separator (str): The separator of dataset list. Default: ' '. - edge (bool): Whether to compute edge while training. Defualt: False + edge (bool): Whether to compute edge while training. Default: False Examples: diff --git a/paddleseg/datasets/optic_disc_seg.py b/paddleseg/datasets/optic_disc_seg.py index dfb8806d5c..cad32a3307 100644 --- a/paddleseg/datasets/optic_disc_seg.py +++ b/paddleseg/datasets/optic_disc_seg.py @@ -33,7 +33,7 @@ class OpticDiscSeg(Dataset): transforms (list): Transforms for image. dataset_root (str): The dataset directory. Default: None mode (str): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. - edge (bool): Whether to compute edge while training. Defualt: False + edge (bool): Whether to compute edge while training. Default: False """ def __init__(self, diff --git a/paddleseg/datasets/voc.py b/paddleseg/datasets/voc.py index 17ddc182d3..dc5bf6c8b1 100644 --- a/paddleseg/datasets/voc.py +++ b/paddleseg/datasets/voc.py @@ -34,7 +34,7 @@ class PascalVOC(Dataset): dataset_root (str): The dataset directory. Default: None mode (str): Which part of dataset to use. it is one of ('train', 'trainval', 'trainaug', 'val'). If you want to set mode to 'trainaug', please make sure the dataset have been augmented. Default: 'train'. - edge (bool): Whether to compute edge while training. Defualt: False + edge (bool): Whether to compute edge while training. Default: False """ def __init__(self, transforms, dataset_root=None, mode='train', edge=False): From 0e5f8936fb1b0cc7ca1763e30a66069ae2dff705 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 16 Dec 2020 11:51:08 +0800 Subject: [PATCH 021/210] add apis docs --- docs/apis/backbones.md | 182 +++++++ docs/apis/core.md | 72 +++ docs/apis/cvlibs.md | 179 +++++++ docs/apis/datasets.md | 106 ++++ docs/apis/models.md | 430 +++++++++++++++-- docs/apis/transforms.md | 183 +++++++ paddleseg/core/predict.py | 15 +- paddleseg/core/train.py | 19 + paddleseg/core/val.py | 21 + paddleseg/cvlibs/param_init.py | 3 + paddleseg/datasets/ade.py | 2 +- paddleseg/datasets/cityscapes.py | 4 +- paddleseg/datasets/dataset.py | 12 +- paddleseg/datasets/optic_disc_seg.py | 4 +- paddleseg/datasets/voc.py | 4 +- paddleseg/models/backbones/hrnet.py | 44 +- paddleseg/models/backbones/mobilenetv3.py | 16 +- paddleseg/models/backbones/resnet_vd.py | 35 +- .../models/backbones/xception_deeplab.py | 21 +- paddleseg/models/u2net.py | 452 +++++++++--------- 20 files changed, 1469 insertions(+), 335 deletions(-) diff --git a/docs/apis/backbones.md b/docs/apis/backbones.md index e69de29bb2..632ffac587 100644 --- a/docs/apis/backbones.md +++ b/docs/apis/backbones.md @@ -0,0 +1,182 @@ +# paddleseg.models.backbone + +The models subpackage contains backbones extracting features for sementic segmentation models. + +- [ResNet_vd](#ResNet_vd) +- [HRNet](#HRNet) +- [MobileNetV3](#MobileNetV3) +- [Xception_deeplab](Xception_deeplab) + + +## [ResNet_vd](../../paddleseg/models/backbones/resnet_vd.py) +ResNet_vd backbone from ["Bag of Tricks for Image Classification with Convolutional Neural Networks"](https://arxiv.org/pdf/1812.01187.pdf) + +> CLASS paddleseg.models.backbones.Resnet_vd(layers=50, output_stride=None, multi_grid=(1, 1, 1), lr_mult_list=(0.1, 0.1, 0.2, 0.2), pretrained=None) + +> > Args +> > > - **layers** (int, optional): The layers of ResNet_vd. The supported layers are [18, 34, 50, 101, 152, 200]. Default: 50. +> > > - **output_stride** (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 8. +> > > - **multi_grid** (tuple|list, optional): The grid of stage4. Defult: (1, 1, 1). +> > > - **pretrained** (str, optional): The path of pretrained model. + +> paddleseg.models.backbones.ResNet18_vd(**args) + + Return a object of ResNet_vd class which layers is 18. + +> paddleseg.models.backbones.ResNet34_vd(**args) + + Return a object of ResNet_vd class which layers is 34. + +> paddleseg.models.backbones.ResNet50_vd(**args) + + Return a object of ResNet_vd class which layers is 50. + +> paddleseg.models.backbones.ResNet101_vd(**args) + + Return a object of ResNet_vd class which layers is 101. + +> paddleseg.models.backbones.ResNet152_vd(**args) + + Return a object of ResNet_vd class which layers is 152. + +> padddelseg.models.backbones.ResNet200_vd(**args) + + Return a object of ResNet_vd class which layers is 200. + +## [HRNet](../../paddleseg/models/backbones/hrnet.py) +HRNet backbone from ["HRNet:Deep High-Resolution Representation Learning for Visual Recognition"](https://arxiv.org/pdf/1908.07919.pdf) + +> CLASS paddleseg.models.backbones.HRNet(pretrained=None, stage1_num_modules=1, stage1_num_blocks=(4,), stage1_num_channels=(64,), stage2_num_modules=1, stage2_num_blocks=(4, 4), stage2_num_channels=(18, 36), stage3_num_modules=4, stage3_num_blocks=(4, 4, 4), stage3_num_channels=(18, 36, 72), stage4_num_modules=3, stage4_num_blocks=(4, 4, 4, 4), stage4_num_channels=(18, 36, 72, 14), has_se=False, align_corners=False) + +> > Args +> > > - **pretrained** (str, optional): The path of pretrained model. +> > > - **stage1_num_modules** (int, optional): Number of modules for stage1. Default 1. +> > > - **stage1_num_blocks** (list, optional): Number of blocks per module for stage1. Default (4,). +> > > - **stage1_num_channels** (list, optional): Number of channels per branch for stage1. Default (64,). +> > > - **stage2_num_modules** (int, optional): Number of modules for stage2. Default 1. +> > > - **stage2_num_blocks** (list, optional): Number of blocks per module for stage2. Default (4, 4). +> > > - **stage2_num_channels** (list, optional): Number of channels per branch for stage2. Default (18, 36). +> > > - **stage3_num_modules** (int, optional): Number of modules for stage3. Default 4. +> > > - **stage3_num_blocks** (list, optional): Number of blocks per module for stage3. Default (4, 4, 4). +> > > - **stage3_num_channels** (list, optional): Number of channels per branch for stage3. Default (18, 36, 72). +> > > - **stage4_num_modules** (int, optional): Number of modules for stage4. Default 3. +> > > - **stage4_num_blocks** (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4). +> > > - **stage4_num_channels** (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144). +> > > - **has_se** (bool, optional): Whether to use Squeeze-and-Excitation module. Default False. +> > > - **align_corners** (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + +> paddleseg.models.backbones.HRNet_W18_Small_V1(**kwargs) + + Return a object of HRNet class which width is 18 and it is smaller than HRNet_W18_Small_V2. + +> paddleseg.models.backbones.HRNet_W18_Small_V2(**kwargs) + + Return a object of HRNet class which width is 18 and it is smaller than HRNet_W18. + +> paddleseg.models.backbones.HRNet_W18(**kwargs) + + Return a object of HRNet class which width is 18. + +> paddleseg.models.backbones.HRNet_W30(**kwargs) + + Return a object of HRNet class which width is 30. + +> paddleseg.models.backbones.HRNet_W32(**kwargs) + + Return a object of HRNet class which width is 32. + +> paddleseg.models.backbones.HRNet_W40(**kwargs) + + Return a object of HRNet class which width is 40. + +> paddleseg.models.backbones.HRNet_W44(**kwargs) + + Return a object of HRNet class which width is 44. + +> paddleseg.models.backbones.HRNet_W48(**kwargs) + + Return a object of HRNet class which width is 48. + +> paddleseg.models.backbones.HRNet_W60(**kwargs) + + Return a object of HRNet class which width is 60. + +> paddleseg.models.backbones.HRNet_W64(**kwargs) + + Return a object of HRNet class which width is 64. + + + +## [MobileNetV3](../../paddleseg/models/backbones/mobilenetv3.py) +MobileNetV3 backbone from ["Searching for MobileNetV3"](https://arxiv.org/pdf/1905.02244.pdf). + +> CLASS paddleseg.models.backbones.MobileNetV3(pretrained=None, scale=1.0, model_name="small", output_stride=None) +> > Args +> > > - **pretrained** (str, optional): The path of pretrained model. +> > > - **scale** (float, optional): The scale of channels . Default: 1.0. +> > > - **model_name** (str, optional): Model name. It determines the type of MobileNetV3. The value is 'small' or 'large'. Defualt: 'small'. +> > > - **output_stride** (int, optional): The stride of output features compared to input images. The value should be one of [2, 4, 8, 16, 32]. Default: None. + +> paddleseg.models.backbones.MobileNetV3_small_x0_35(**args) + + Return a object of MobileNetV3 class which scale is 0.35 and model_name is small. + +> paddleseg.models.backbones.MobileNetV3_small_x0_5(**args) + + Return a object of MobileNetV3 class which scale is 0.5 and model_name is small. + +> paddleseg.models.backbones.MobileNetV3_small_x0_75(**args) + + Return a object of MobileNetV3 class which scale is 0.75 and model_name is small. + +> paddleseg.models.backbones.MobileNetV3_small_x1_0(**args) + + Return a object of MobileNetV3 class which scale is 1.0 and model_name is small. + +> paddleseg.models.backbones.MobileNetV3_small_x1_25(**args) + + Return a object of MobileNetV3 class which scale is 1.25 and model_name is small. + +> paddleseg.models.backbones.MobileNetV3_large_x0_35(**args) + + Return a object of MobileNetV3 class which scale is 0.35 and model_name is large. + +> paddleseg.models.backbones.MobileNetV3_large_x0_5(**args) + + Return a object of MobileNetV3 class which scale is 0.5 and model_name is large. + +> paddleseg.models.backbones.MobileNetV3_large_x0_75(**args) + + Return a object of MobileNetV3 class which scale is 0.75 and model_name is large. + +> paddleseg.models.backbones.MobileNetV3_large_x1_0(**args) + + Return a object of MobileNetV3 class which scale is 1.0 and model_name is large. + +> paddleseg.models.backbones.MobileNetV3_large_x1_25(**args) + + Return a object of MobileNetV3 class which scale is 1.25 and model_name is large. + + +## [XceptionDeeplab](../../paddleseg/models/backbones/xception_deeplab.py) +Xception backbone of DeepLabV3+ from ["Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation"](https://arxiv.org/abs/1802.02611) + +> CLASS paddleseg.models.backbones.XceptionDeeplab(backbone, pretrained=None, output_stride=16) + +> > Args +> > > - **backbone** (str): Which type of Xception_DeepLab to select. It should be one of ('xception_41', 'xception_65', 'xception_71'). +> > > - **pretrained** (str, optional): The path of pretrained model. +> > > - **output_stride** (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 16. + +> paddleseg.models.backbones.Xception41_deeplab(**args) + + Return a object of XceptionDeeplab class which layers is 41. + +> paddleseg.models.backbones.Xception65_deeplab(**args) + + Return a object of XceptionDeeplab class which layers is 65. + +> paddleseg.models.backbones.Xception71_deeplab(**args) + + Return a object of XceptionDeeplab class which layers is 71. diff --git a/docs/apis/core.md b/docs/apis/core.md index e69de29bb2..86acd380e8 100644 --- a/docs/apis/core.md +++ b/docs/apis/core.md @@ -0,0 +1,72 @@ +# paddleseg.core + +The interface for training, evaluation and prediction. +- [Training](#Training) +- [Evaluation](#Evaluation) +- [Prediction](#Prediction) + +## [Training](../../paddleseg/core/train.py) +> paddleseg.core.train(model, train_dataset, val_dataset=None, optimizer=None, save_dir='output', iters=10000, batch_size=2, resume_model=None, save_interval=1000, log_iters=10, num_workers=0, use_vdl=False, losses=None) + + Launch training. + +> Args +> > - **mode**l(nn.Layer): A sementic segmentation model. +> > - **train_dataset** (paddle.io.Dataset): Used to read and process training datasets. +> > - **val_dataset** (paddle.io.Dataset, optional): Used to read and process validation datasets. +> > - **optimizer** (paddle.optimizer.Optimizer): The optimizer. +> > - **save_dir** (str, optional): The directory for saving the model snapshot. Default: 'output'. +> > - **iters** (int, optional): How may iters to train the model. Defualt: 10000. +> > - **batch_size** (int, optional): Mini batch size of one gpu or cpu. Default: 2. +> > - **resume_model** (str, optional): The path of resume model. +> > - **save_interval** (int, optional): How many iters to save a model snapshot once during training. Default: 1000. +> > - **log_iters** (int, optional): Display logging information at every log_iters. Default: 10. +> > - **num_workers** (int, optional): Num workers for data loader. Default: 0. +> > - **use_vdl** (bool, optional): Whether to record the data to VisualDL during training. Default: False. +> > - **losses** (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). + The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. + +## [Evaluation](../../paddleseg/core/val.py) +> paddleseg.core.evaluate(model, eval_dataset, aug_eval=False, scales=1.0, flip_horizontal=True, flip_vertical=False, is_slide=False, stride=None, crop_size=None, num_workers=0) + + Launch evaluation. + +> Args +> > - **model**(nn.Layer): A sementic segmentation model. +> > - **eval_dataset** (paddle.io.Dataset): Used to read and process validation datasets. +> > - **aug_eval** (bool, optional): Whether to use mulit-scales and flip augment for evaluation. Default: False. +> > - **scales** (list|float, optional): Scales for augment. It is valid when `aug_eval` is True. Default: 1.0. +> > - **flip_horizontal** (bool, optional): Whether to use flip horizontally augment. It is valid when `aug_eval` is True. Default: True. +> > - **flip_vertical** (bool, optional): Whether to use flip vertically augment. It is valid when `aug_eval` is True. Default: False. +> > - **is_slide** (bool, optional): Whether to evaluate by sliding window. Default: False. +> > - **stride** (tuple|list, optional): The stride of sliding window, the first is width and the second is height. + It should be provided when `is_slide` is True. +> > - **crop_size** (tuple|list, optional): The crop size of sliding window, the first is width and the second is height. + It should be provided when `is_slide` is True. +> > - **num_workers** (int, optional): Num workers for data loader. Default: 0. + +> Returns +> > - **float**: The mIoU of validation datasets. +> > - **float**: The accuracy of validation datasets. + +## [Prediction](../../paddleseg/core/predict.py) + +> paddleseg.core.predict(model, model_path, transforms, image_list, image_dir=None, save_dir='output', aug_pred=False, scales=1.0, flip_horizontal=True, flip_vertical=False, is_slide=False, stride=None, crop_size=None) + + Launch predict and visualize. + +> Args +> > - **model** (nn.Layer): Used to predict for input image. +> > - **model_path** (str): The path of pretrained model. +> > - **transforms** (transform.Compose): Preprocess for input image. +> > - **image_list** (list): A list of image path to be predicted. +> > - **image_dir** (str, optional): The root directory of the images predicted. Default: None. +> > - **save_dir**** (bool, optional): Whether to use mulit-scales and flip augment for predition. Default: False. +> > - **scales** (list|float, optional): Scales for augment. It is valid when `aug_pred` is True. Default: 1.0. +> > - **flip_horizontal** (bool, optional): Whether to use flip horizontally augment. It is valid when `aug_pred` is True. Default: True. +> > - **flip_vertical** (bool, optional): Whether to use flip vertically augment. It is valid when `aug_pred` is True. Default: False. +> > - **is_slide** (bool, optional): Whether to predict by sliding window. Default: False. +> > - **stride** (tuple|list, optional): The stride of sliding window, the first is width and the second is height. + It should be provided when `is_slide` is True. +> > - **crop_size** (tuple|list, optional): The crop size of sliding window, the first is width and the second is height. + It should be provided when `is_slide` is True. diff --git a/docs/apis/cvlibs.md b/docs/apis/cvlibs.md index e69de29bb2..fb73819bc0 100644 --- a/docs/apis/cvlibs.md +++ b/docs/apis/cvlibs.md @@ -0,0 +1,179 @@ +# paddleseg.cvlibs + +- [Manager](#Manager) +- [Parameters Initialization](#Parameters Initialization) +- [Config](#Config) + + +## [Manager](../../paddleseg/cvlibs/manager.py) +> CLASS paddleseg.cvlibs.manager.ComponentManager(name) + + Implement a manager class to add the new component properly. + The component can be added as either class or function type. + +> > Args +> > > - **name** (str): The name of component. + +> > Examples 1 + +```python +from paddleseg.cvlibs.manager import ComponentManager + +model_manager = ComponentManager() + +class AlexNet: ... +class ResNet: ... + +model_manager.add_component(AlexNet) +model_manager.add_component(ResNet) + +# Or pass a sequence alliteratively: +model_manager.add_component([AlexNet, ResNet]) +print(model_manager.components_dict) +# {'AlexNet': , 'ResNet': } +``` + +> > Examples 2: + +```python +# Or an easier way, using it as a Python decorator, while just add it above the class declaration. +from paddleseg.cvlibs.manager import ComponentManager + +model_manager = ComponentManager() + +@model_manager.add_component +class AlexNet: ... + +@model_manager.add_component +class ResNet: ... + +print(model_manager.components_dict) +# {'AlexNet': , 'ResNet': } +``` + +> > add_component(components) + + Add component(s) into the corresponding manager. + +> > > Args +> > > > - **components** (function|class|list|tuple): Support four types of components. +> > Returns +> > > > - **components** (function|class|list|tuple): Same with input components. + +## [Parameters Initialization](../../paddleseg/cvlibs/param_init.py) +initialization method + +### Constant Initialization + +> constant_init(param, **kwargs): + + Initialize the `param` with constants. + +> > Args +> > > - **param** (Tensor): Tensor that needs to be initialized. + +> > Examples + +```python +from paddleseg.cvlibs import param_init +import paddle.nn as nn + +linear = nn.Linear(2, 4) +param_init.constant_init(linear.weight, value=2.0) +print(linear.weight.numpy()) +# result is [[2. 2. 2. 2.], [2. 2. 2. 2.]] +``` + +### Normal Initialization +> normal_init(param, **kwargs) + + Initialize the `param` with a Normal distribution. + +> > Args +> > > - **param** (Tensor): Tensor that needs to be initialized. + +> > Examples + +```python +from paddleseg.cvlibs import param_init +import paddle.nn as nn + +linear = nn.Linear(2, 4) +param_init.normal_init(linear.weight, loc=0.0, scale=1.0) +``` + +### Kaiming Normal Initialization +> kaiming_normal_init(param, **kwargs): + + Initialize the input tensor with Kaiming Normal initialization. + + This function implements the `param` initialization from the paper + `Delving Deep into Rectifiers: Surpassing Human-Level Performance on + ImageNet Classification ` + by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a + robust initialization method that particularly considers the rectifier + nonlinearities. In case of Uniform distribution, the range is [-x, x], where + .. math:: + x = \sqrt{\\frac{6.0}{fan\_in}} + In case of Normal distribution, the mean is 0 and the standard deviation + is + .. math:: + \sqrt{\\frac{2.0}{fan\_in}} + +> > Args +> > > - **param** (Tensor): Tensor that needs to be initialized. + +> > Examples + +```python +from paddleseg.cvlibs import param_init +import paddle.nn as nn + +linear = nn.Linear(2, 4) +# uniform is used to decide whether to use uniform or normal distribution +param_init.kaiming_normal_init(linear.weight) +``` + +## [Config](../../paddleseg/cvlibs/config.py) +> CLASS Config(path, learning_rate, batch_size, iters) + + Training configuration parsing. The only yaml/yml file is supported. + + The following hyper-parameters are available in the config file: + batch_size: The number of samples per gpu. + iters: The total training steps. + train_dataset: A training data config including type/data_root/transforms/mode. + For data type, please refer to paddleseg.datasets. + For specific transforms, please refer to paddleseg.transforms.transforms. + val_dataset: A validation data config including type/data_root/transforms/mode. + optimizer: A optimizer config, but currently PaddleSeg only supports sgd with momentum in config file. + In addition, weight_decay could be set as a regularization. + learning_rate: A learning rate config. If decay is configured, learning _rate value is the starting learning rate, + where only poly decay is supported using the config file. In addition, decay power and end_lr are tuned experimentally. + loss: A loss config. Multi-loss config is available. The loss type order is consistent with the seg model outputs, + where the coef term indicates the weight of corresponding loss. Note that the number of coef must be the same as the number of + model outputs, and there could be only one loss type if using the same loss type among the outputs, otherwise the number of + loss type must be consistent with coef. + model: A model config including type/backbone and model-dependent arguments. + For model type, please refer to paddleseg.models. + For backbone, please refer to paddleseg.models.backbones. + +> > Args +> > > - **path** (str) : The path of config file, supports yaml format only. + +> > Examples + +```python +from paddleseg.cvlibs.config import Config + +# Create a cfg object with yaml file path. +cfg = Config(yaml_cfg_path) + +# Parsing the argument when its property is used. +train_dataset = cfg.train_dataset + +# the argument of model should be parsed after dataset, +# since the model builder uses some properties in dataset. +model = cfg.model +... +``` diff --git a/docs/apis/datasets.md b/docs/apis/datasets.md index e69de29bb2..1ad6917dda 100644 --- a/docs/apis/datasets.md +++ b/docs/apis/datasets.md @@ -0,0 +1,106 @@ +# paddleseg.datasets +- [Custom Dataset](#Custom) +- [Cityscapes](#Cityscapes) +- [PascalVOC](#PascalVOC) +- [ADE20K](#ADE20K) +- [OpticDiscSeg](#OpticDiscSeg) + +## [Custom Dataset](../../paddleseg/datasets/dataset.py) +> CLASS paddleseg.datasets.Dataset(transforms, dataset_root, num_classes, mode='train', train_path=None, val_path=None, test_path=None, separator=' ', ignore_index=255, edge=False) + + Pass in a custom dataset that conforms to the format. + +> > Args +> > > - **transforms** (list): Transforms for image. +> > > - **dataset_root** (str): The dataset directory. +> > > - **num_classes** (int): Number of classes. +> > > - **mode** (str, optional): which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. +> > > - **train_path** (str, optional): The train d**ataset file. When mode is 'train', train_path is necessary. + The contents of train_path file are as follow: + image1.jpg ground_truth1.png + image2.jpg ground_truth2.png +> > > - **val_path** (str, optional): The evaluation dataset file. When mode is 'val', val_path is necessary. + The contents is the same as train_path +> > > - **test_path** (str, optional): The test dataset file. When mode is 'test', test_path is necessary. + The annotation file is not necessary in test_path file. +> > > - **separator** (str, optional): The separator of dataset list. Default: ' '. +> > > - **edge** (bool, optional): Whether to compute edge while training. Default: False + +> > Examples + +```python +import paddleseg.transforms as T +from paddleseg.datasets import Dataset + +transforms = [T.RandomPaddingCrop(crop_size=(512,512)), T.Normalize()] +dataset_root = 'dataset_root_path' +train_path = 'train_path' +num_classes = 2 +dataset = Dataset(transforms = transforms, + dataset_root = dataset_root, + num_classes = 2, + train_path = train_path, + mode = 'train') +``` + +## [Cityscapes](../../paddleseg/datasets/cityscapes.py) +> CLASS paddleseg.datasets.Cityscapes(transforms, dataset_root, mode='train', edge=False) + + Cityscapes dataset `https://www.cityscapes-dataset.com/`. + The folder structure is as follow: + + cityscapes + | + |--leftImg8bit + | |--train + | |--val + | |--test + | + |--gtFine + | |--train + | |--val + | |--test + + Make sure there are **labelTrainIds.png in gtFine directory. If not, please run the conver_cityscapes.py in tools. + +> > Args +> > > - **transforms** (list): Transforms for image. +> > > - **dataset_root** (str): Cityscapes dataset directory. +> > > - **mode** (str, optional): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. +> > > - **edge** (bool, optional): Whether to compute edge while training. Default: False + + +## [PascalVOC](../../paddleseg/datasets/voc.py) +> CLASS paddleseg.datasets.PascalVOC(transforms, dataset_root=None, mode='train', edge=False) + + PascalVOC2012 dataset `http://host.robots.ox.ac.uk/pascal/VOC/`. + If you want to augment the dataset, please run the voc_augment.py in tools. + +> > Args +> > > - **transforms** (list): Transforms for image. +> > > - **dataset_root** (str): The dataset directory. Default: None +> > > - **mode** (str, optional): Which part of dataset to use. it is one of ('train', 'trainval', 'trainaug', 'val'). + If you want to set mode to 'trainaug', please make sure the dataset have been augmented. Default: 'train'. +> > > - **edge** (bool, optional): Whether to compute edge while training. Default: False + +## [ADE20K](../../paddleseg/datasets/ade.py) +> CLASS paddleseg.datasets.ADE20K(transforms, dataset_root=None, mode='train', edge=False) + + ADE20K dataset `http://sceneparsing.csail.mit.edu/`. + +> > Args +> > > - **transforms** (list): A list of image transformations. +> > > - **dataset_root** (str, optional): The ADK20K dataset directory. Default: None. +> > > - **mode** (str, optional): A subset of the entire dataset. It should be one of ('train', 'val'). Default: 'train'. +> > > - **edge** (bool, optional): Whether to compute edge while training. Default: False + +## [OpticDiscSeg](../../paddleseg/datasets/optic_disc_seg.py) +> CLASS paddleseg.datasets.OpticDiscSeg(dataset_root=None, transforms=None, mode='train', edge=False) + + OpticDiscSeg dataset is extraced from iChallenge-AMD `https://ai.baidu.com/broad/subordinate?dataset=amd`. + +> > Args +> > > - **transforms** (list): Transforms for image. +> > > - **dataset_root** (str): The dataset directory. Default: None +> > > - **mode** (str, optional): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. +> > > - **edge** (bool, optional): Whether to compute edge while training. Default: False diff --git a/docs/apis/models.md b/docs/apis/models.md index e97f7c7834..54b5138afd 100644 --- a/docs/apis/models.md +++ b/docs/apis/models.md @@ -1,53 +1,379 @@ +# paddleseg.models -# API References ------ - -```python -class paddleseg.models.OCRNet( - num_classes, - backbone, - backbone_indices=None, - ocr_mid_channels=512, - ocr_key_channels=256, - pretrained=None -) -``` - -The OCRNet implementation based on PaddlePaddle. The original article refers to [Yuan, Yuhui, et al. "Object-Contextual Representations for Semantic Segmentation"](https://arxiv.org/pdf/1909.11065.pdf) - -> **Parameters** -> -> *num_classes(int)*: the unique number of target classes. -> -> *backbone(Paddle.nn.Layer)*: backbone network. -> -> *backbone_indices(tuple)*: two values in the tuple indicate the indices of output of backbone. the first index will be taken as a deep-supervision feature in auxiliary layer; the second one will be taken as input of pixel representation. -> -> *ocr_mid_channels(int)*: the number of middle channels in OCRHead. -> -> *ocr_key_channels(int)*: the number of key channels in ObjectAttentionBlock. -> -> *pretrained(str)*: the path or url of pretrained model. Default to None. - ------ - -```python -class paddleseg.models.BiSeNetv2( - num_classes, - backbone, - backbone_indices=None, - ocr_mid_channels=512, - ocr_key_channels=256, - pretrained=None -) -``` - -The BiSeNet V2 implementation based on PaddlePaddle. The original article refers to [Yu, Changqian, et al. "BiSeNet V2: Bilateral Network with Guided Aggregation for Real-time Semantic Segmentation"](https://arxiv.org/abs/2004.02147) - -> **Parameters** -> -> *num_classes(int)*: the unique number of target classes. -> -> *lambd(float)*: factor for controlling the size of semantic branch channels. Default to 0.25. -> -> *pretrained(str)*: the path or url of pretrained model. Default to None. +The models subpackage contains the following model for image sementic segmentaion. +- [DeepLabV3+](#DeepLabV3+) +- [DeepLabV3](#DeepLabV3) +- [FCN](#FCN) +- [OCRNet](#OCRNet) +- [PSPNet](#PSPNet) +- [ANN](#ANN) +- [BiSeNetV2](#BiSeNetV2) +- [DANet](#DANet) +- [FastSCNN](#FastSCNN) +- [GCNet](#GCNet) +- [GSCNN](#GSCNN) +- [HarDNet](#HarDNet) +- [UNet](#UNet) +- [U2Net](#U2Net) +- [U2Net+](#U2Net+) +- [AttentionUNet](#AttentionUNet) + +## [DeepLabV3+](../../paddleseg/models/deeplab.py) +> CLASS paddleseg.models.DeepLabV3P(num_classes, backbone, backbone_indices=(0, 3), aspp_ratios=(1, 6, 12, 18), aspp_out_channels=256, align_corners=False, pretrained=None) + + The DeepLabV3Plus implementation based on PaddlePaddle. + + The original article refers to + Liang-Chieh Chen, et, al. "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" + (https://arxiv.org/abs/1802.02611) + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (paddle.nn.Layer): Backbone network, currently support Resnet50_vd/Resnet101_vd/Xception65. +> > > - **backbone_indices** (tuple, optional): Two values in the tuple indicate the indices of output of backbone. + Default: (0, 3). +> > > - **aspp_ratios** (tuple, optional): The dilation rate using in ASSP module. + If output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). + If output_stride=8, aspp_ratios is (1, 12, 24, 36). + Default: (1, 6, 12, 18). +> > > - **aspp_out_channels** (int, optional): The output channels of ASPP module. Default: 256. +> > > - **align_corners** (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [DeepLabV3](../../paddleseg/models/deeplab.py) +> CLASS paddleseg.models.DeepLabV3(num_classes, backbone, backbone_indices=(3, ), aspp_ratios=(1, 6, 12, 18), aspp_out_channels=256, align_corners=False, pretrained=None) + + The DeepLabV3 implementation based on PaddlePaddle. + + The original article refers to + Liang-Chieh Chen, et, al. "Rethinking Atrous Convolution for Semantic Image Segmentation" + (https://arxiv.org/pdf/1706.05587.pdf). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (paddle.nn.Layer): Backbone network, currently support Resnet50_vd/Resnet101_vd/Xception65. +> > > - **backbone_indices** (tuple, optional): Two values in the tuple indicate the indices of output of backbone. + Default: (3, ). +> > > - **aspp_ratios** (tuple, optional): The dilation rate using in ASSP module. + If output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). + If output_stride=8, aspp_ratios is (1, 12, 24, 36). + Default: (1, 6, 12, 18). +> > > - **aspp_out_channels** (int, optional): The output channels of ASPP module. Default: 256. +> > > - **align_corners** (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [FCN](../../paddleseg/models/deeplab.py) +> CLASS paddleseg.models.FCN(num_classes, + backbone_indices=(-1, ), + backbone_channels=(270, ), + channels=None) + + A simple implementation for FCN based on PaddlePaddle. + + The original article refers to + Evan Shelhamer, et, al. "Fully Convolutional Networks for Semantic Segmentation" + (https://arxiv.org/abs/1411.4038). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (paddle.nn.Layer): Backbone networks. +> > > - **backbone_indices** (tuple, optional): The values in the tuple indicate the indices of output of backbone. + Default: (-1, ). +> > > - **channels** (int, optional): The channels between conv layer and the last layer of FCNHead. + If None, it will be the number of channels of input features. Default: None. +> > > - **align_corners** (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None + +## [OCRNet](../../paddleseg/models/ocrnet.py) +> CLASS paddleseg.models.OCRNet(num_classes, + backbone, + backbone_indices, + ocr_mid_channels=512, + ocr_key_channels=256, + align_corners=False, + pretrained=None) + + The OCRNet implementation based on PaddlePaddle. + The original article refers to + Yuan, Yuhui, et al. "Object-Contextual Representations for Semantic Segmentation" + (https://arxiv.org/pdf/1909.11065.pdf) + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (Paddle.nn.Layer): Backbone network. +> > > - **backbone_indices** (tuple): A tuple indicates the indices of output of backbone. + It can be either one or two values, if two values, the first index will be taken as + a deep-supervision feature in auxiliary layer; the second one will be taken as + input of pixel representation. If one value, it is taken by both above. +> > > - **ocr_mid_channels** (int, optional): The number of middle channels in OCRHead. Default: 512. +> > > - **ocr_key_channels** (int, optional): The number of key channels in ObjectAttentionBlock. Default: 256. +> > > - **align_corners** (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [PSPNet](../../paddleseg/models/pspnet.py) +> CLASS paddleseg.models.PSPNet(num_classes, + backbone, + backbone_indices=(2, 3), + pp_out_channels=1024, + bin_sizes=(1, 2, 3, 6), + enable_auxiliary_loss=True, + align_corners=False, + pretrained=None) + + The PSPNet implementation based on PaddlePaddle. + + The original article refers to + Zhao, Hengshuang, et al. "Pyramid scene parsing network" + (https://openaccess.thecvf.com/content_cvpr_2017/papers/Zhao_Pyramid_Scene_Parsing_CVPR_2017_paper.pdf). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (Paddle.nn.Layer): Backbone network, currently support Resnet50/101. +> > > - **backbone_indices** (tuple, optional): Two values in the tuple indicate the indices of output of backbone. +> > > - **pp_out_channels** (int, optional): The output channels after Pyramid Pooling Module. Default: 1024. +> > > - **bin_sizes** (tuple, optional): The out size of pooled feature maps. Default: (1,2,3,6). +> > > - **enable_auxiliary_loss** (bool, optional): A bool value indicates whether adding auxiliary loss. Default: True. +> > > - **align_corners** (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [ANN](../../paddleseg/models/ann.py) +> CLASS paddleseg.models.ANN(num_classes, + backbone, + backbone_indices=(2, 3), + key_value_channels=256, + inter_channels=512, + psp_size=(1, 3, 6, 8), + enable_auxiliary_loss=True, + align_corners=False, + pretrained=None) + + The ANN implementation based on PaddlePaddle. + + The original article refers to + Zhen, Zhu, et al. "Asymmetric Non-local Neural Networks for Semantic Segmentation" + (https://arxiv.org/pdf/1908.07678.pdf). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (Paddle.nn.Layer): Backbone network, currently support Resnet50/101. +> > > - **backbone_indices** (tuple, optional): Two values in the tuple indicate the indices of output of backbone. +> > > - **key_value_channels** (int, optional): The key and value channels of self-attention map in both AFNB and APNB modules. + Default: 256. +> > > - **inter_channels** (int, optional): Both input and output channels of APNB modules. Default: 512. +> > > - **psp_size** (tuple, optional): The out size of pooled feature maps. Default: (1, 3, 6, 8). +> > > - **enable_auxiliary_loss** (bool, optional): A bool value indicates whether adding auxiliary loss. Default: True. +> > > - **align_corners** (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [BiSeNetV2](../../paddleseg/models/bisenet.py) +> CLASS paddleseg.models.BiSeNetV2(num_classes, + lambd=0.25, + align_corners=False, + pretrained=None) + + The BiSeNet V2 implementation based on PaddlePaddle. + + The original article refers to + Yu, Changqian, et al. "BiSeNet V2: Bilateral Network with Guided Aggregation for Real-time Semantic Segmentation" + (https://arxiv.org/abs/2004.02147) + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **lambd** (float, optional): A factor for controlling the size of semantic branch channels. Default: 0.25. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [DANet](../../paddleseg/models/danet.py) +> CLASS paddleseg.models.DANet(num_classes, + lambd=0.25, + align_corners=False, + pretrained=None) + + The DANet implementation based on PaddlePaddle. + + The original article refers to + Fu, jun, et al. "Dual Attention Network for Scene Segmentation" + (https://arxiv.org/pdf/1809.02983.pdf) + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (Paddle.nn.Layer): A backbone network. +> > > - **backbone_indices** (tuple): The values in the tuple indicate the indices of + output of backbone. +> > > - **align_corners** (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [FastSCNN](../../paddleseg/models/fast_scnn.py) +> CLASS paddleseg.models.FastSCNN(num_classes, + enable_auxiliary_loss=True, + align_corners=False, + pretrained=None) + + The FastSCNN implementation based on PaddlePaddle. + As mentioned in the original paper, FastSCNN is a real-time segmentation algorithm (123.5fps) + even for high resolution images (1024x2048). + The original article refers to + Poudel, Rudra PK, et al. "Fast-scnn: Fast semantic segmentation network" + (https://arxiv.org/pdf/1902.04502.pdf). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **enable_auxiliary_loss** (bool, optional): A bool value indicates whether adding auxiliary loss. + If true, auxiliary loss will be added after LearningToDownsample module. Default: False. +> > > - **align_corners** (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769.. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [GCNet](../../paddleseg/models/gcnet.py) +> CLASS paddleseg.models.GCNet(num_classes, + backbone, + backbone_indices=(2, 3), + gc_channels=512, + ratio=0.25, + enable_auxiliary_loss=True, + align_corners=False, + pretrained=None) + + The GCNet implementation based on PaddlePaddle. + + The original article refers to + Cao, Yue, et al. "GCnet: Non-local networks meet squeeze-excitation networks and beyond" + (https://arxiv.org/pdf/1904.11492.pdf). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (Paddle.nn.Layer): Backbone network, currently support Resnet50/101. +> > > - **backbone_indices** (tuple, optional): Two values in the tuple indicate the indices of output of backbone. +> > > - **gc_channels** (int, optional): The input channels to Global Context Block. Default: 512. +> > > - **ratio** (float, optional): It indicates the ratio of attention channels and gc_channels. Default: 0.25. +> > > - **enable_auxiliary_loss** (bool, optional): A bool value indicates whether adding auxiliary loss. Default: True. +> > > - **align_corners** (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + + +## [GSCNN](../../paddleseg/models/gscnn.py) +> CLASS paddleseg.models.GSCNN(num_classes, + backbone, + backbone_indices=(0, 1, 2, 3), + aspp_ratios=(1, 6, 12, 18), + aspp_out_channels=256, + align_corners=False, + pretrained=None) + + The GSCNN implementation based on PaddlePaddle. + + The original article refers to + Towaki Takikawa, et, al. "Gated-SCNN: Gated Shape CNNs for Semantic Segmentation" + (https://arxiv.org/pdf/1907.05740.pdf) + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **backbone** (paddle.nn.Layer): Backbone network, currently support Resnet50_vd/Resnet101_vd. +> > > - **backbone_indices** (tuple, optional): Two values in the tuple indicate the indices of output of backbone. + Default: (0, 1, 2, 3). +> > > - **aspp_ratios** (tuple, optional): The dilation rate using in ASSP module. + If output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). + If output_stride=8, aspp_ratios is (1, 12, 24, 36). + Default: (1, 6, 12, 18). +> > > - **aspp_out_channels** (int, optional): The output channels of ASPP module. Default: 256. +> > > - **align_corners** (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [HarDNet](../../paddleseg/models/hardnet.py) +> CLASS paddleseg.models.HarDNet(num_classes, + stem_channels=(16, 24, 32, 48), + ch_list=(64, 96, 160, 224, 320), + grmul=1.7, + gr=(10, 16, 18, 24, 32), + n_layers=(4, 4, 8, 8, 8), + align_corners=False, + pretrained=None) + + [Real Time] The FC-HardDNet 70 implementation based on PaddlePaddle. + The original article refers to + Chao, Ping, et al. "HarDNet: A Low Memory Traffic Network" + (https://arxiv.org/pdf/1909.00948.pdf) + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **stem_channels** (tuple|list, optional): The number of channels before the encoder. Default: (16, 24, 32, 48). +> > > - **ch_list** (tuple|list, optional): The number of channels at each block in the encoder. Default: (64, 96, 160, 224, 320). +> > > - **grmul** (float, optional): The channel multiplying factor in HarDBlock, which is m in the paper. Default: 1.7. +> > > - **gr** (tuple|list, optional): The growth rate in each HarDBlock, which is k in the paper. Default: (10, 16, 18, 24, 32). +> > > - **n_layers** (tuple|list, optional): The number of layers in each HarDBlock. Default: (4, 4, 8, 8, 8). +> > > - **align_corners** (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [UNet](../../paddleseg/models/unet.py) +> CLASS paddleseg.models.UNet(num_classes, + align_corners=False, + use_deconv=False, + pretrained=None) + + The UNet implementation based on PaddlePaddle. + + The original article refers to + Olaf Ronneberger, et, al. "U-Net: Convolutional Networks for Biomedical Image Segmentation" + (https://arxiv.org/abs/1505.04597). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **align_corners** (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **use_deconv** (bool, optional): A bool value indicates whether using deconvolution in upsampling. + If False, use resize_bilinear. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model for fine tuning. Default: None. + +## [U2Net](../../paddleseg/models/u2net.py) +> CLASS paddleseg.models.U2Net(num_classes, in_ch=3, pretrained=None) + + The U^2-Net implementation based on PaddlePaddle. + + The original article refers to + Xuebin Qin, et, al. "U^2-Net: Going Deeper with Nested U-Structure for Salient Object Detection" + (https://arxiv.org/abs/1505.04597). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **in_ch** (int, optional): Input channels. Default: 3. +> > > - **pretrained** (str, optional): The path or url of pretrained model for fine tuning. Default: None. + +## [U2Net+](../../paddleseg/models/u2net.py) +> CLASS paddleseg.models.U2Netp(num_classes, in_ch=3, pretrained=None) + + The U^2-Netp implementation based on PaddlePaddle. + + The original article refers to + Xuebin Qin, et, al. "U^2-Net: Going Deeper with Nested U-Structure for Salient Object Detection" + (https://arxiv.org/abs/1505.04597). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **in_ch** (int, optional): Input channels. Default: 3. +> > > - **pretrained** (str, optional): The path or url of pretrained model for fine tuning. Default: None. + +## [AttentionUNet](../../paddleseg/models/attention_unet.py) +> CLASS paddleseg.models.AttentionUNet(num_classes, pretrained=None) + + The Attention-UNet implementation based on PaddlePaddle. + As mentioned in the original paper, author proposes a novel attention gate (AG) + that automatically learns to focus on target structures of varying shapes and sizes. + Models trained with AGs implicitly learn to suppress irrelevant regions in an input image while + highlighting salient features useful for a specific task. + + The original article refers to + Oktay, O, et, al. "Attention u-net: Learning where to look for the pancreas." + (https://arxiv.org/pdf/1804.03999.pdf). + +> > Args +> > > - **num_classes** (int): The unique number of target classes. +> > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. diff --git a/docs/apis/transforms.md b/docs/apis/transforms.md index e69de29bb2..956a2ac7d7 100644 --- a/docs/apis/transforms.md +++ b/docs/apis/transforms.md @@ -0,0 +1,183 @@ +# [paddleseg.transforms](../../paddleseg/transforms/transforms.py) + +## Compose +> CLASS paddleseg.transforms.Compose(transforms, to_rgb=True) + + Do transformation on input data with corresponding pre-processing and augmentation operations. + The shape of input data to all operations is [height, width, channels]. + +> > Args +> > > - **transforms** (list): A list contains data pre-processing or augmentation. +> > > - **to_rgb** (bool, optional): If converting image to RGB color space. Default: True. + +> > Raises +> > > - TypeError: When 'transforms' is not a list. +> > > - ValueError: when the length of 'transforms' is less than 1. + +## RandomHorizontalFlip + +> CLASS paddleseg.transforms.RandomHorizontalFlip(prob=0.5) + + Flip an image horizontally with a certain probability. + +> > Args +> > > - **prob** (float, optional): A probability of horizontally flipping. Default: 0.5. + +## RandomVerticalFlip + +> CLASS paddleseg.transforms.RandomVerticalFlip(prob=0.1) + + Flip an image vertically with a certain probability. + +> > Args +> > > - **prob** (float, optional): A probability of vertical flipping. Default: 0.1. + +## Resize +> CLASS paddleseg.transforms.Resize(target_size=(512, 512), interp='LINEAR') + + Resize an image. + +> > Args +> > > - **target_size** (list|tuple, optional): The target size of image. Default: (512, 512). +> > > - **interp** (str, optional): The interpolation mode of resize is consistent with opencv. + ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']. Note that when it is + 'RANDOM', a random interpolation mode would be specified. Default: "LINEAR". + +> > Raises +> > > - TypeError: When 'target_size' type is neither list nor tuple. +> > > - ValueError: When "interp" is out of pre-defined methods ('NEAREST', 'LINEAR', 'CUBIC', + 'AREA', 'LANCZOS4', 'RANDOM'). + +## ResizeByLong +> CLASS paddleseg.transforms.ResizeByLong(long_size) + + Resize the long side of an image to given size, and then scale the other side proportionally. + +> > Args +> > > - **long_size** (int): The target size of long side. + +## ResizeRangeScaling +> CLASS paddleseg.transforms.ResizeRangeScaling(min_value=400, max_value=600) + + Resize the long side of an image into a range, and then scale the other side proportionally. + +> > Args +> > > - **min_value** (int, optional): The minimum value of long side after resize. Default: 400. +> > > - **max_value** (int, optional): The maximum value of long side after resize. Default: 600. + +## ResizeStepScaling +> CLASS paddleseg.transforms.ResizeStepScaling(min_scale_factor=0.75, + max_scale_factor=1.25, + scale_step_size=0.25) + + Scale an image proportionally within a range. + +> > Args +> > > - **min_scale_factor**** (float, optional): The minimum scale. Default: 0.75. +> > > - **max_scale_factor** (float, optional): The maximum scale. Default: 1.25. +> > > - **scale_step_size** (float, optional): The scale interval. Default: 0.25. + +> > Raises +> > > - ValueError: When min_scale_factor is smaller than max_scale_factor. + +## Normalize +> CLASS paddleseg.transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) + + Normalize an image. + +> > Args +> > > - **mean** (list, optional): The mean value of a data set. Default: [0.5, 0.5, 0.5]. +> > > - **std** (list, optional): The standard deviation of a data set. Default: [0.5, 0.5, 0.5]. + +> > Raises +> > > - ValueError: When mean/std is not list or any value in std is 0. + +## Padding +> CLASS paddleseg.transforms.Padding(target_size, + im_padding_value=(127.5, 127.5, 127.5), + label_padding_value=255) + + Add bottom-right padding to a raw image or annotation image. + +> > Args +> > > - **target_size** (list|tuple): The target size after padding. +> > > - **im_padding_value** (list, optional): The padding value of raw image. + Default: [127.5, 127.5, 127.5]. +> > > - **label_padding_value** (int, optional): The padding value of annotation image. Default: 255. + +> > Raises +> > > - TypeError: When target_size is neither list nor tuple. +> > > - ValueError: When the length of target_size is not 2. + +## RandomPaddingCrop +> CLASS paddleseg.transforms.RandomPaddingCrop(crop_size=(512, 512), + im_padding_value=(127.5, 127.5, 127.5), + label_padding_value=255) + + Crop a sub-image from a raw image and annotation image randomly. If the target cropping size + is larger than original image, then the bottom-right padding will be added. + +> > Args +> > > - **crop_size** (tuple, optional): The target cropping size. Default: (512, 512). +> > > - **im_padding_value** (list, optional): The padding value of raw image. + Default: [127.5, 127.5, 127.5]. +> > > - **label_padding_value** (int, optional): The padding value of annotation image. Default: 255. + +> > Raises +> > > - TypeError: When crop_size is neither list nor tuple. +> > > - ValueError: When the length of crop_size is not 2. + + +## RandomBlur +> CLASS paddleseg.transforms.RandomBlur(prob=0.1) + + Blurring an image by a Gaussian function with a certain probability. + +> > Args +> > > - **prob** (float, optional): A probability of blurring an image. Default: 0.1. + +## RandomRotation +> CLASS paddleseg.transforms.RandomRotation(max_rotation=15, + im_padding_value=(127.5, 127.5, 127.5), + label_padding_value=255) + + Rotate an image randomly with padding. + +> > Args +> > > - **max_rotation** (float, optional): The maximum rotation degree. Default: 15. +> > > - **im_padding_value** (list, optional): The padding value of raw image. + Default: [127.5, 127.5, 127.5]. +> > > - **label_padding_value** (int, optional): The padding value of annotation image. Default: 255. + +## RandomScaleAspect +> CLASS paddleseg.transforms.RandomScaleAspect(min_scale=0.5, aspect_ratio=0.33) + + Crop a sub-image from an original image with a range of area ratio and aspect and + then scale the sub-image back to the size of the original image. + +> > Args +> > > - **min_scale** (float, optional): The minimum area ratio of cropped image to the original image. Default: 0.5. +> > > - **aspect_ratio** (float, optional): The minimum aspect ratio. Default: 0.33. + + +## RandomDistort +> CLASS paddleseg.transforms.RandomDistort(brightness_range=0.5, + brightness_prob=0.5, + contrast_range=0.5, + contrast_prob=0.5, + saturation_range=0.5, + saturation_prob=0.5, + hue_range=18, + hue_prob=0.5) + + Distort an image with random configurations. + +> > Args +> > > - **brightness_range** (float, optional): A range of brightness. Default: 0.5. +> > > - **brightness_prob** (float, optional): A probability of adjusting brightness. Default: 0.5. +> > > - **contrast_range** (float, optional): A range of contrast. Default: 0.5. +> > > - **contrast_prob** (float, optional): A probability of adjusting contrast. Default: 0.5. +> > > - **saturation_range** (float, optional): A range of saturation. Default: 0.5. +> > > - **saturation_prob** (float, optional): A probability of adjusting saturation. Default: 0.5. +> > > - **hue_range** (int, optional): A range of hue. Default: 18. +> > > - **hue_prob** (float, optional): A probability of adjusting hue. Default: 0.5. diff --git a/paddleseg/core/predict.py b/paddleseg/core/predict.py index 6eaa5453af..c95c45c493 100644 --- a/paddleseg/core/predict.py +++ b/paddleseg/core/predict.py @@ -49,9 +49,18 @@ def predict(model, model (nn.Layer): Used to predict for input image. model_path (str): The path of pretrained model. transforms (transform.Compose): Preprocess for input image. - image_list (list): A list of images to be predicted. - image_dir (str): The directory of the images to be predicted. Default: None. - save_dir (str): The directory to save the visualized results. Default: 'output'. + image_list (list): A list of image path to be predicted. + image_dir (str, optional): The root directory of the images predicted. Default: None. + save_dir (str, optional): The directory to save the visualized results. Default: 'output'. + aug_pred (bool, optional): Whether to use mulit-scales and flip augment for predition. Default: False. + scales (list|float, optional): Scales for augment. It is valid when `aug_pred` is True. Default: 1.0. + flip_horizontal (bool, optional): Whether to use flip horizontally augment. It is valid when `aug_pred` is True. Default: True. + flip_vertical (bool, optional): Whether to use flip vertically augment. It is valid when `aug_pred` is True. Default: False. + is_slide (bool, optional): Whether to predict by sliding window. Default: False. + stride (tuple|list, optional): The stride of sliding window, the first is width and the second is height. + It should be provided when `is_slide` is True. + crop_size (tuple|list, optional): The crop size of sliding window, the first is width and the second is height. + It should be provided when `is_slide` is True. """ para_state_dict = paddle.load(model_path) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 4c2e33087e..d3cc4b42af 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -57,6 +57,25 @@ def train(model, num_workers=0, use_vdl=False, losses=None): + """ + Launch training. + + Args: + model(nn.Layer): A sementic segmentation model. + train_dataset (paddle.io.Dataset): Used to read and process training datasets. + val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. + optimizer (paddle.optimizer.Optimizer): The optimizer. + save_dir (str, optional): The directory for saving the model snapshot. Default: 'output'. + iters (int, optional): How may iters to train the model. Defualt: 10000. + batch_size (int, optional): Mini batch size of one gpu or cpu. Default: 2. + resume_model (str, optional): The path of resume model. + save_interval (int, optional): How many iters to save a model snapshot once during training. Default: 1000. + log_iters (int, optional): Display logging information at every log_iters. Default: 10. + num_workers (int, optional): Num workers for data loader. Default: 0. + use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False. + losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). + The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. + """ nranks = paddle.distributed.ParallelEnv().nranks local_rank = paddle.distributed.ParallelEnv().local_rank diff --git a/paddleseg/core/val.py b/paddleseg/core/val.py index 11f14cbc51..c5447cbcb8 100644 --- a/paddleseg/core/val.py +++ b/paddleseg/core/val.py @@ -34,6 +34,27 @@ def evaluate(model, stride=None, crop_size=None, num_workers=0): + """ + Launch evalution. + + Args: + model(nn.Layer): A sementic segmentation model. + eval_dataset (paddle.io.Dataset): Used to read and process validation datasets. + aug_eval (bool, optional): Whether to use mulit-scales and flip augment for evaluation. Default: False. + scales (list|float, optional): Scales for augment. It is valid when `aug_eval` is True. Default: 1.0. + flip_horizontal (bool, optional): Whether to use flip horizontally augment. It is valid when `aug_eval` is True. Default: True. + flip_vertical (bool, optional): Whether to use flip vertically augment. It is valid when `aug_eval` is True. Default: False. + is_slide (bool, optional): Whether to evaluate by sliding window. Default: False. + stride (tuple|list, optional): The stride of sliding window, the first is width and the second is height. + It should be provided when `is_slide` is True. + crop_size (tuple|list, optional): The crop size of sliding window, the first is width and the second is height. + It should be provided when `is_slide` is True. + num_workers (int, optional): Num workers for data loader. Default: 0. + + Returns: + float: The mIoU of validation datasets. + float: The accuracy of validation datasets. + """ model.eval() nranks = paddle.distributed.ParallelEnv().nranks local_rank = paddle.distributed.ParallelEnv().local_rank diff --git a/paddleseg/cvlibs/param_init.py b/paddleseg/cvlibs/param_init.py index 106dc8f628..335281242e 100644 --- a/paddleseg/cvlibs/param_init.py +++ b/paddleseg/cvlibs/param_init.py @@ -74,6 +74,9 @@ def kaiming_normal_init(param, **kwargs): .. math:: \sqrt{\\frac{2.0}{fan\_in}} + Args: + param (Tensor): Tensor that needs to be initialized. + Examples: from paddleseg.cvlibs import param_init diff --git a/paddleseg/datasets/ade.py b/paddleseg/datasets/ade.py index 5ace6a3e56..dae7de4360 100644 --- a/paddleseg/datasets/ade.py +++ b/paddleseg/datasets/ade.py @@ -36,7 +36,7 @@ class ADE20K(Dataset): transforms (list): A list of image transformations. dataset_root (str, optional): The ADK20K dataset directory. Default: None. mode (str, optional): A subset of the entire dataset. It should be one of ('train', 'val'). Default: 'train'. - edge (bool): Whether to compute edge while training. Default: False + edge (bool, optional): Whether to compute edge while training. Default: False """ def __init__(self, transforms, dataset_root=None, mode='train', edge=False): diff --git a/paddleseg/datasets/cityscapes.py b/paddleseg/datasets/cityscapes.py index 7d3659bd6f..a501722098 100644 --- a/paddleseg/datasets/cityscapes.py +++ b/paddleseg/datasets/cityscapes.py @@ -43,8 +43,8 @@ class Cityscapes(Dataset): Args: transforms (list): Transforms for image. dataset_root (str): Cityscapes dataset directory. - mode (str): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. - edge (bool): Whether to compute edge while training. Default: False + mode (str, optional): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. + edge (bool, optional): Whether to compute edge while training. Default: False """ def __init__(self, transforms, dataset_root, mode='train', edge=False): diff --git a/paddleseg/datasets/dataset.py b/paddleseg/datasets/dataset.py index 7ac6098693..913f3df10c 100644 --- a/paddleseg/datasets/dataset.py +++ b/paddleseg/datasets/dataset.py @@ -32,17 +32,17 @@ class Dataset(paddle.io.Dataset): transforms (list): Transforms for image. dataset_root (str): The dataset directory. num_classes (int): Number of classes. - mode (str): which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. - train_path (str): The train dataset file. When mode is 'train', train_path is necessary. + mode (str, optional): which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. + train_path (str, optional): The train dataset file. When mode is 'train', train_path is necessary. The contents of train_path file are as follow: image1.jpg ground_truth1.png image2.jpg ground_truth2.png - val_path (str): The evaluation dataset file. When mode is 'val', val_path is necessary. + val_path (str. optional): The evaluation dataset file. When mode is 'val', val_path is necessary. The contents is the same as train_path - test_path (str): The test dataset file. When mode is 'test', test_path is necessary. + test_path (str, optional): The test dataset file. When mode is 'test', test_path is necessary. The annotation file is not necessary in test_path file. - separator (str): The separator of dataset list. Default: ' '. - edge (bool): Whether to compute edge while training. Default: False + separator (str, optional): The separator of dataset list. Default: ' '. + edge (bool, optional): Whether to compute edge while training. Default: False Examples: diff --git a/paddleseg/datasets/optic_disc_seg.py b/paddleseg/datasets/optic_disc_seg.py index cad32a3307..12f8bafd3c 100644 --- a/paddleseg/datasets/optic_disc_seg.py +++ b/paddleseg/datasets/optic_disc_seg.py @@ -32,8 +32,8 @@ class OpticDiscSeg(Dataset): Args: transforms (list): Transforms for image. dataset_root (str): The dataset directory. Default: None - mode (str): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. - edge (bool): Whether to compute edge while training. Default: False + mode (str, optional): Which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'. + edge (bool, optional): Whether to compute edge while training. Default: False """ def __init__(self, diff --git a/paddleseg/datasets/voc.py b/paddleseg/datasets/voc.py index dc5bf6c8b1..91589b3775 100644 --- a/paddleseg/datasets/voc.py +++ b/paddleseg/datasets/voc.py @@ -32,9 +32,9 @@ class PascalVOC(Dataset): Args: transforms (list): Transforms for image. dataset_root (str): The dataset directory. Default: None - mode (str): Which part of dataset to use. it is one of ('train', 'trainval', 'trainaug', 'val'). + mode (str, optional): Which part of dataset to use. it is one of ('train', 'trainval', 'trainaug', 'val'). If you want to set mode to 'trainaug', please make sure the dataset have been augmented. Default: 'train'. - edge (bool): Whether to compute edge while training. Default: False + edge (bool, optional): Whether to compute edge while training. Default: False """ def __init__(self, transforms, dataset_root=None, mode='train', edge=False): diff --git a/paddleseg/models/backbones/hrnet.py b/paddleseg/models/backbones/hrnet.py index 5de97497e5..40ed660d9d 100644 --- a/paddleseg/models/backbones/hrnet.py +++ b/paddleseg/models/backbones/hrnet.py @@ -37,20 +37,20 @@ class HRNet(nn.Layer): (https://arxiv.org/pdf/1908.07919.pdf). Args: - pretrained (str): The path of pretrained model. - stage1_num_modules (int): Number of modules for stage1. Default 1. - stage1_num_blocks (list): Number of blocks per module for stage1. Default [4]. - stage1_num_channels (list): Number of channels per branch for stage1. Default [64]. - stage2_num_modules (int): Number of modules for stage2. Default 1. - stage2_num_blocks (list): Number of blocks per module for stage2. Default [4, 4] - stage2_num_channels (list): Number of channels per branch for stage2. Default [18, 36]. - stage3_num_modules (int): Number of modules for stage3. Default 4. - stage3_num_blocks (list): Number of blocks per module for stage3. Default [4, 4, 4] - stage3_num_channels (list): Number of channels per branch for stage3. Default [18, 36, 72]. - stage4_num_modules (int): Number of modules for stage4. Default 3. - stage4_num_blocks (list): Number of blocks per module for stage4. Default [4, 4, 4, 4] - stage4_num_channels (list): Number of channels per branch for stage4. Default [18, 36, 72. 144]. - has_se (bool): Whether to use Squeeze-and-Excitation module. Default False. + pretrained (str, optional): The path of pretrained model. + stage1_num_modules (int, optional): Number of modules for stage1. Default 1. + stage1_num_blocks (list, optional): Number of blocks per module for stage1. Default (4). + stage1_num_channels (list, optional): Number of channels per branch for stage1. Default (64). + stage2_num_modules (int, optional): Number of modules for stage2. Default 1. + stage2_num_blocks (list, optional): Number of blocks per module for stage2. Default (4, 4). + stage2_num_channels (list, optional): Number of channels per branch for stage2. Default (18, 36). + stage3_num_modules (int, optional): Number of modules for stage3. Default 4. + stage3_num_blocks (list, optional): Number of blocks per module for stage3. Default (4, 4, 4). + stage3_num_channels (list, optional): Number of channels per branch for stage3. Default [18, 36, 72). + stage4_num_modules (int, optional): Number of modules for stage4. Default 3. + stage4_num_blocks (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4). + stage4_num_channels (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144). + has_se (bool, optional): Whether to use Squeeze-and-Excitation module. Default False. align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. """ @@ -58,17 +58,17 @@ class HRNet(nn.Layer): def __init__(self, pretrained=None, stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], + stage1_num_blocks=(4, ), + stage1_num_channels=(64, ), stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[18, 36], + stage2_num_blocks=(4, 4), + stage2_num_channels=(18, 36), stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[18, 36, 72], + stage3_num_blocks=(4, 4, 4), + stage3_num_channels=(18, 36, 72), stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[18, 36, 72, 144], + stage4_num_blocks=(4, 4, 4, 4), + stage4_num_channels=(18, 36, 72, 144), has_se=False, align_corners=False): super(HRNet, self).__init__() diff --git a/paddleseg/models/backbones/mobilenetv3.py b/paddleseg/models/backbones/mobilenetv3.py index b01199264e..7a83f04c00 100644 --- a/paddleseg/models/backbones/mobilenetv3.py +++ b/paddleseg/models/backbones/mobilenetv3.py @@ -39,11 +39,25 @@ def make_divisible(v, divisor=8, min_value=None): class MobileNetV3(nn.Layer): + """ + The MobileNetV3 implementation based on PaddlePaddle. + + The original article refers to Jingdong + Andrew Howard, et, al. "Searching for MobileNetV3" + (https://arxiv.org/pdf/1905.02244.pdf). + + Args: + pretrained (str, optional): The path of pretrained model. + scale (float, optional): The scale of channels . Default: 1.0. + model_name (str, optional): Model name. It determines the type of MobileNetV3. The value is 'small' or 'large'. Defualt: 'small'. + output_stride (int, optional): The stride of output features compared to input images. The value should be one of (2, 4, 8, 16, 32). Default: None. + + """ + def __init__(self, pretrained=None, scale=1.0, model_name="small", - class_dim=1000, output_stride=None): super(MobileNetV3, self).__init__() diff --git a/paddleseg/models/backbones/resnet_vd.py b/paddleseg/models/backbones/resnet_vd.py index f521942e19..068a7e2b00 100644 --- a/paddleseg/models/backbones/resnet_vd.py +++ b/paddleseg/models/backbones/resnet_vd.py @@ -176,17 +176,30 @@ def forward(self, inputs): class ResNet_vd(nn.Layer): + """ + The ResNet_vd implementation based on PaddlePaddle. + + The original article refers to Jingdong + Tong He, et, al. "Bag of Tricks for Image Classification with Convolutional Neural Networks" + (https://arxiv.org/pdf/1812.01187.pdf). + + Args: + layers (int, optional): The layers of ResNet_vd. The supported layers are (18, 34, 50, 101, 152, 200). Default: 50. + output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 8. + multi_grid (tuple|list, optional): The grid of stage4. Defult: (1, 1, 1). + pretrained (str, optional): The path of pretrained model. + + """ + def __init__(self, layers=50, - output_stride=None, + output_stride=8, multi_grid=(1, 1, 1), - lr_mult_list=(0.1, 0.1, 0.2, 0.2), pretrained=None): super(ResNet_vd, self).__init__() self.conv1_logit = None # for gscnn shape stream self.layers = layers - self.lr_mult_list = lr_mult_list supported_layers = [18, 34, 50, 101, 152, 200] assert layers in supported_layers, \ "supported layers are {} but input layer is {}".format( @@ -314,22 +327,6 @@ def forward(self, inputs): def init_weight(self): utils.load_pretrained_model(self, self.pretrained) - # for idx, stage in enumerate(self.stage_list): - # for layer in stage: - # for sublayer in layer.sublayers(): - # if isinstance(sublayer, nn.Conv2D): - # sublayer.weight.optimize_attr[ - # 'learning_rate'] = self.lr_mult_list[idx] - # if sublayer.bias: - # sublayer.bias.optimize_attr[ - # 'learning_rate'] = self.lr_mult_list[idx] - - # if isinstance(sublayer, nn.SyncBatchNorm): - # sublayer.weight.optimize_attr[ - # 'learning_rate'] = self.lr_mult_list[idx] - # sublayer.bias.optimize_attr[ - # 'learning_rate'] = self.lr_mult_list[idx] - @manager.BACKBONES.add_component def ResNet18_vd(**args): diff --git a/paddleseg/models/backbones/xception_deeplab.py b/paddleseg/models/backbones/xception_deeplab.py index ad42e993c3..b83caec51d 100644 --- a/paddleseg/models/backbones/xception_deeplab.py +++ b/paddleseg/models/backbones/xception_deeplab.py @@ -247,14 +247,21 @@ def forward(self, inputs): class XceptionDeeplab(nn.Layer): + """ + The Xception backobne of DeepLabv3+ implementation based on PaddlePaddle. - # def __init__(self, backbone, class_dim=1000): - # add output_stride - def __init__(self, - backbone, - pretrained=None, - output_stride=16, - class_dim=1000): + The original article refers to + Liang-Chieh Chen, et, al. "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" + (https://arxiv.org/abs/1802.02611) + + Args: + backbone (str): Which type of Xception_DeepLab to select. It should be one of ('xception_41', 'xception_65', 'xception_71'). + pretrained (str, optional): The path of pretrained model. + output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 16. + + """ + + def __init__(self, backbone, pretrained=None, output_stride=16): super(XceptionDeeplab, self).__init__() diff --git a/paddleseg/models/u2net.py b/paddleseg/models/u2net.py index 83d017c4a5..50dd3d94cb 100644 --- a/paddleseg/models/u2net.py +++ b/paddleseg/models/u2net.py @@ -6,64 +6,66 @@ __all__ = ['U2Net', 'U2Netp'] + class REBNCONV(nn.Layer): - def __init__(self,in_ch=3,out_ch=3,dirate=1): - super(REBNCONV,self).__init__() + def __init__(self, in_ch=3, out_ch=3, dirate=1): + super(REBNCONV, self).__init__() - self.conv_s1 = nn.Conv2D(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate) + self.conv_s1 = nn.Conv2D( + in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate) self.bn_s1 = nn.BatchNorm2D(out_ch) self.relu_s1 = nn.ReLU() - def forward(self,x): + def forward(self, x): hx = x xout = self.relu_s1(self.bn_s1(self.conv_s1(hx))) return xout + ## upsample tensor 'src' to have the same spatial size with tensor 'tar' -def _upsample_like(src,tar): +def _upsample_like(src, tar): - src = F.upsample(src,size=tar.shape[2:],mode='bilinear') + src = F.upsample(src, size=tar.shape[2:], mode='bilinear') return src ### RSU-7 ### -class RSU7(nn.Layer):#UNet07DRES(nn.Layer): - +class RSU7(nn.Layer): #UNet07DRES(nn.Layer): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): - super(RSU7,self).__init__() + super(RSU7, self).__init__() - self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1) + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) - self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1) - self.pool1 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool2 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool3 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool4 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool5 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool5 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1) + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1) - self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2) + self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2) - self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1) + self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) - def forward(self,x): + def forward(self, x): hx = x hxin = self.rebnconvin(hx) @@ -87,56 +89,56 @@ def forward(self,x): hx7 = self.rebnconv7(hx6) - hx6d = self.rebnconv6d(paddle.concat((hx7,hx6),1)) - hx6dup = _upsample_like(hx6d,hx5) + hx6d = self.rebnconv6d(paddle.concat((hx7, hx6), 1)) + hx6dup = _upsample_like(hx6d, hx5) - hx5d = self.rebnconv5d(paddle.concat((hx6dup,hx5),1)) - hx5dup = _upsample_like(hx5d,hx4) + hx5d = self.rebnconv5d(paddle.concat((hx6dup, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) - hx4d = self.rebnconv4d(paddle.concat((hx5dup,hx4),1)) - hx4dup = _upsample_like(hx4d,hx3) + hx4d = self.rebnconv4d(paddle.concat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) - hx3d = self.rebnconv3d(paddle.concat((hx4dup,hx3),1)) - hx3dup = _upsample_like(hx3d,hx2) + hx3d = self.rebnconv3d(paddle.concat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) - hx2d = self.rebnconv2d(paddle.concat((hx3dup,hx2),1)) - hx2dup = _upsample_like(hx2d,hx1) + hx2d = self.rebnconv2d(paddle.concat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) - hx1d = self.rebnconv1d(paddle.concat((hx2dup,hx1),1)) + hx1d = self.rebnconv1d(paddle.concat((hx2dup, hx1), 1)) return hx1d + hxin -### RSU-6 ### -class RSU6(nn.Layer):#UNet06DRES(nn.Layer): +### RSU-6 ### +class RSU6(nn.Layer): #UNet06DRES(nn.Layer): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): - super(RSU6,self).__init__() + super(RSU6, self).__init__() - self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1) + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) - self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1) - self.pool1 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool2 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool3 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool4 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1) + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) - self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2) + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2) - self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1) + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) - def forward(self,x): + def forward(self, x): hx = x @@ -158,50 +160,49 @@ def forward(self,x): hx6 = self.rebnconv6(hx5) + hx5d = self.rebnconv5d(paddle.concat((hx6, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) - hx5d = self.rebnconv5d(paddle.concat((hx6,hx5),1)) - hx5dup = _upsample_like(hx5d,hx4) - - hx4d = self.rebnconv4d(paddle.concat((hx5dup,hx4),1)) - hx4dup = _upsample_like(hx4d,hx3) + hx4d = self.rebnconv4d(paddle.concat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) - hx3d = self.rebnconv3d(paddle.concat((hx4dup,hx3),1)) - hx3dup = _upsample_like(hx3d,hx2) + hx3d = self.rebnconv3d(paddle.concat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) - hx2d = self.rebnconv2d(paddle.concat((hx3dup,hx2),1)) - hx2dup = _upsample_like(hx2d,hx1) + hx2d = self.rebnconv2d(paddle.concat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) - hx1d = self.rebnconv1d(paddle.concat((hx2dup,hx1),1)) + hx1d = self.rebnconv1d(paddle.concat((hx2dup, hx1), 1)) return hx1d + hxin -### RSU-5 ### -class RSU5(nn.Layer):#UNet05DRES(nn.Layer): +### RSU-5 ### +class RSU5(nn.Layer): #UNet05DRES(nn.Layer): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): - super(RSU5,self).__init__() + super(RSU5, self).__init__() - self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1) + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) - self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1) - self.pool1 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool2 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool3 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1) + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) - self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2) + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2) - self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) - def forward(self,x): + def forward(self, x): hx = x @@ -220,42 +221,42 @@ def forward(self,x): hx5 = self.rebnconv5(hx4) - hx4d = self.rebnconv4d(paddle.concat((hx5,hx4),1)) - hx4dup = _upsample_like(hx4d,hx3) + hx4d = self.rebnconv4d(paddle.concat((hx5, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) - hx3d = self.rebnconv3d(paddle.concat((hx4dup,hx3),1)) - hx3dup = _upsample_like(hx3d,hx2) + hx3d = self.rebnconv3d(paddle.concat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) - hx2d = self.rebnconv2d(paddle.concat((hx3dup,hx2),1)) - hx2dup = _upsample_like(hx2d,hx1) + hx2d = self.rebnconv2d(paddle.concat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) - hx1d = self.rebnconv1d(paddle.concat((hx2dup,hx1),1)) + hx1d = self.rebnconv1d(paddle.concat((hx2dup, hx1), 1)) return hx1d + hxin -### RSU-4 ### -class RSU4(nn.Layer):#UNet04DRES(nn.Layer): +### RSU-4 ### +class RSU4(nn.Layer): #UNet04DRES(nn.Layer): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): - super(RSU4,self).__init__() + super(RSU4, self).__init__() - self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1) + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) - self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1) - self.pool1 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1) - self.pool2 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) - self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2) + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2) - self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1) - self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) - def forward(self,x): + def forward(self, x): hx = x @@ -271,35 +272,35 @@ def forward(self,x): hx4 = self.rebnconv4(hx3) - hx3d = self.rebnconv3d(paddle.concat((hx4,hx3),1)) - hx3dup = _upsample_like(hx3d,hx2) + hx3d = self.rebnconv3d(paddle.concat((hx4, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) - hx2d = self.rebnconv2d(paddle.concat((hx3dup,hx2),1)) - hx2dup = _upsample_like(hx2d,hx1) + hx2d = self.rebnconv2d(paddle.concat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) - hx1d = self.rebnconv1d(paddle.concat((hx2dup,hx1),1)) + hx1d = self.rebnconv1d(paddle.concat((hx2dup, hx1), 1)) return hx1d + hxin -### RSU-4F ### -class RSU4F(nn.Layer):#UNet04FRES(nn.Layer): +### RSU-4F ### +class RSU4F(nn.Layer): #UNet04FRES(nn.Layer): def __init__(self, in_ch=3, mid_ch=12, out_ch=3): - super(RSU4F,self).__init__() + super(RSU4F, self).__init__() - self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1) + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) - self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1) - self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2) - self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4) + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4) - self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8) + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8) - self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4) - self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2) - self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) - def forward(self,x): + def forward(self, x): hx = x @@ -311,9 +312,9 @@ def forward(self,x): hx4 = self.rebnconv4(hx3) - hx3d = self.rebnconv3d(paddle.concat((hx4,hx3),1)) - hx2d = self.rebnconv2d(paddle.concat((hx3d,hx2),1)) - hx1d = self.rebnconv1d(paddle.concat((hx2d,hx1),1)) + hx3d = self.rebnconv3d(paddle.concat((hx4, hx3), 1)) + hx2d = self.rebnconv2d(paddle.concat((hx3d, hx2), 1)) + hx1d = self.rebnconv1d(paddle.concat((hx2d, hx1), 1)) return hx1d + hxin @@ -321,46 +322,60 @@ def forward(self,x): ##### U^2-Net #### @manager.MODELS.add_component class U2Net(nn.Layer): - def __init__(self,in_ch=3,num_classes=1,pretrained=None): - super(U2Net,self).__init__() + """ + The U^2-Net implementation based on PaddlePaddle. + + The original article refers to + Xuebin Qin, et, al. "U^2-Net: Going Deeper with Nested U-Structure for Salient Object Detection" + (https://arxiv.org/abs/1505.04597). + + Args: + num_classes (int): The unique number of target classes. + in_ch (int, optional): Input channels. Default: 3. + pretrained (str, optional): The path or url of pretrained model for fine tuning. Default: None. - self.stage1 = RSU7(in_ch,32,64) - self.pool12 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + """ - self.stage2 = RSU6(64,32,128) - self.pool23 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + def __init__(self, num_classes, in_ch=3, pretrained=None): + super(U2Net, self).__init__() - self.stage3 = RSU5(128,64,256) - self.pool34 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.stage1 = RSU7(in_ch, 32, 64) + self.pool12 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.stage4 = RSU4(256,128,512) - self.pool45 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.stage2 = RSU6(64, 32, 128) + self.pool23 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.stage5 = RSU4F(512,256,512) - self.pool56 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.stage3 = RSU5(128, 64, 256) + self.pool34 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.stage6 = RSU4F(512,256,512) + self.stage4 = RSU4(256, 128, 512) + self.pool45 = nn.MaxPool2D(2, stride=2, ceil_mode=True) + + self.stage5 = RSU4F(512, 256, 512) + self.pool56 = nn.MaxPool2D(2, stride=2, ceil_mode=True) + + self.stage6 = RSU4F(512, 256, 512) # decoder - self.stage5d = RSU4F(1024,256,512) - self.stage4d = RSU4(1024,128,256) - self.stage3d = RSU5(512,64,128) - self.stage2d = RSU6(256,32,64) - self.stage1d = RSU7(128,16,64) + self.stage5d = RSU4F(1024, 256, 512) + self.stage4d = RSU4(1024, 128, 256) + self.stage3d = RSU5(512, 64, 128) + self.stage2d = RSU6(256, 32, 64) + self.stage1d = RSU7(128, 16, 64) - self.side1 = nn.Conv2D(64,num_classes,3,padding=1) - self.side2 = nn.Conv2D(64,num_classes,3,padding=1) - self.side3 = nn.Conv2D(128,num_classes,3,padding=1) - self.side4 = nn.Conv2D(256,num_classes,3,padding=1) - self.side5 = nn.Conv2D(512,num_classes,3,padding=1) - self.side6 = nn.Conv2D(512,num_classes,3,padding=1) + self.side1 = nn.Conv2D(64, num_classes, 3, padding=1) + self.side2 = nn.Conv2D(64, num_classes, 3, padding=1) + self.side3 = nn.Conv2D(128, num_classes, 3, padding=1) + self.side4 = nn.Conv2D(256, num_classes, 3, padding=1) + self.side5 = nn.Conv2D(512, num_classes, 3, padding=1) + self.side6 = nn.Conv2D(512, num_classes, 3, padding=1) - self.outconv = nn.Conv2D(6*num_classes,num_classes,1) + self.outconv = nn.Conv2D(6 * num_classes, num_classes, 1) self.pretrained = pretrained self.init_weight() - - def forward(self,x): + + def forward(self, x): hx = x @@ -386,93 +401,95 @@ def forward(self,x): #stage 6 hx6 = self.stage6(hx) - hx6up = _upsample_like(hx6,hx5) + hx6up = _upsample_like(hx6, hx5) #-------------------- decoder -------------------- - hx5d = self.stage5d(paddle.concat((hx6up,hx5),1)) - hx5dup = _upsample_like(hx5d,hx4) + hx5d = self.stage5d(paddle.concat((hx6up, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) - hx4d = self.stage4d(paddle.concat((hx5dup,hx4),1)) - hx4dup = _upsample_like(hx4d,hx3) + hx4d = self.stage4d(paddle.concat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) - hx3d = self.stage3d(paddle.concat((hx4dup,hx3),1)) - hx3dup = _upsample_like(hx3d,hx2) + hx3d = self.stage3d(paddle.concat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) - hx2d = self.stage2d(paddle.concat((hx3dup,hx2),1)) - hx2dup = _upsample_like(hx2d,hx1) - - hx1d = self.stage1d(paddle.concat((hx2dup,hx1),1)) + hx2d = self.stage2d(paddle.concat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + hx1d = self.stage1d(paddle.concat((hx2dup, hx1), 1)) #side output d1 = self.side1(hx1d) d2 = self.side2(hx2d) - d2 = _upsample_like(d2,d1) + d2 = _upsample_like(d2, d1) d3 = self.side3(hx3d) - d3 = _upsample_like(d3,d1) + d3 = _upsample_like(d3, d1) d4 = self.side4(hx4d) - d4 = _upsample_like(d4,d1) + d4 = _upsample_like(d4, d1) d5 = self.side5(hx5d) - d5 = _upsample_like(d5,d1) + d5 = _upsample_like(d5, d1) d6 = self.side6(hx6) - d6 = _upsample_like(d6,d1) + d6 = _upsample_like(d6, d1) - d0 = self.outconv(paddle.concat((d1,d2,d3,d4,d5,d6),1)) + d0 = self.outconv(paddle.concat((d1, d2, d3, d4, d5, d6), 1)) - return [d0,d1,d2,d3,d4,d5,d6] + return [d0, d1, d2, d3, d4, d5, d6] def init_weight(self): if self.pretrained is not None: utils.load_entire_model(self, self.pretrained) + ### U^2-Net small ### @manager.MODELS.add_component class U2Netp(nn.Layer): - def __init__(self,in_ch=3,num_classes=1,pretrained=None): - super(U2Netp,self).__init__() + """Please Refer to DeepLabV3P above.""" + + def __init__(self, num_classes, in_ch=3, pretrained=None): + super(U2Netp, self).__init__() - self.stage1 = RSU7(in_ch,16,64) - self.pool12 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.stage1 = RSU7(in_ch, 16, 64) + self.pool12 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.stage2 = RSU6(64,16,64) - self.pool23 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.stage2 = RSU6(64, 16, 64) + self.pool23 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.stage3 = RSU5(64,16,64) - self.pool34 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.stage3 = RSU5(64, 16, 64) + self.pool34 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.stage4 = RSU4(64,16,64) - self.pool45 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.stage4 = RSU4(64, 16, 64) + self.pool45 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.stage5 = RSU4F(64,16,64) - self.pool56 = nn.MaxPool2D(2,stride=2,ceil_mode=True) + self.stage5 = RSU4F(64, 16, 64) + self.pool56 = nn.MaxPool2D(2, stride=2, ceil_mode=True) - self.stage6 = RSU4F(64,16,64) + self.stage6 = RSU4F(64, 16, 64) # decoder - self.stage5d = RSU4F(128,16,64) - self.stage4d = RSU4(128,16,64) - self.stage3d = RSU5(128,16,64) - self.stage2d = RSU6(128,16,64) - self.stage1d = RSU7(128,16,64) + self.stage5d = RSU4F(128, 16, 64) + self.stage4d = RSU4(128, 16, 64) + self.stage3d = RSU5(128, 16, 64) + self.stage2d = RSU6(128, 16, 64) + self.stage1d = RSU7(128, 16, 64) - self.side1 = nn.Conv2D(64,num_classes,3,padding=1) - self.side2 = nn.Conv2D(64,num_classes,3,padding=1) - self.side3 = nn.Conv2D(64,num_classes,3,padding=1) - self.side4 = nn.Conv2D(64,num_classes,3,padding=1) - self.side5 = nn.Conv2D(64,num_classes,3,padding=1) - self.side6 = nn.Conv2D(64,num_classes,3,padding=1) + self.side1 = nn.Conv2D(64, num_classes, 3, padding=1) + self.side2 = nn.Conv2D(64, num_classes, 3, padding=1) + self.side3 = nn.Conv2D(64, num_classes, 3, padding=1) + self.side4 = nn.Conv2D(64, num_classes, 3, padding=1) + self.side5 = nn.Conv2D(64, num_classes, 3, padding=1) + self.side6 = nn.Conv2D(64, num_classes, 3, padding=1) - self.outconv = nn.Conv2D(6*num_classes,num_classes,1) + self.outconv = nn.Conv2D(6 * num_classes, num_classes, 1) self.pretrained = pretrained self.init_weight() - def forward(self,x): + def forward(self, x): hx = x @@ -498,46 +515,45 @@ def forward(self,x): #stage 6 hx6 = self.stage6(hx) - hx6up = _upsample_like(hx6,hx5) + hx6up = _upsample_like(hx6, hx5) #decoder - hx5d = self.stage5d(paddle.concat((hx6up,hx5),1)) - hx5dup = _upsample_like(hx5d,hx4) + hx5d = self.stage5d(paddle.concat((hx6up, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) - hx4d = self.stage4d(paddle.concat((hx5dup,hx4),1)) - hx4dup = _upsample_like(hx4d,hx3) + hx4d = self.stage4d(paddle.concat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) - hx3d = self.stage3d(paddle.concat((hx4dup,hx3),1)) - hx3dup = _upsample_like(hx3d,hx2) + hx3d = self.stage3d(paddle.concat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) - hx2d = self.stage2d(paddle.concat((hx3dup,hx2),1)) - hx2dup = _upsample_like(hx2d,hx1) - - hx1d = self.stage1d(paddle.concat((hx2dup,hx1),1)) + hx2d = self.stage2d(paddle.concat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + hx1d = self.stage1d(paddle.concat((hx2dup, hx1), 1)) #side output d1 = self.side1(hx1d) d2 = self.side2(hx2d) - d2 = _upsample_like(d2,d1) + d2 = _upsample_like(d2, d1) d3 = self.side3(hx3d) - d3 = _upsample_like(d3,d1) + d3 = _upsample_like(d3, d1) d4 = self.side4(hx4d) - d4 = _upsample_like(d4,d1) + d4 = _upsample_like(d4, d1) d5 = self.side5(hx5d) - d5 = _upsample_like(d5,d1) + d5 = _upsample_like(d5, d1) d6 = self.side6(hx6) - d6 = _upsample_like(d6,d1) + d6 = _upsample_like(d6, d1) - d0 = self.outconv(paddle.concat((d1,d2,d3,d4,d5,d6),1)) + d0 = self.outconv(paddle.concat((d1, d2, d3, d4, d5, d6), 1)) - return [d0,d1,d2,d3,d4,d5,d6] + return [d0, d1, d2, d3, d4, d5, d6] def init_weight(self): if self.pretrained is not None: - utils.load_entire_model(self, self.pretrained) \ No newline at end of file + utils.load_entire_model(self, self.pretrained) From e488c14ea2efd32964cd00476d4158ffbce9e3c9 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 16 Dec 2020 21:26:33 +0800 Subject: [PATCH 022/210] add unet++ --- docs/apis/models.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/apis/models.md b/docs/apis/models.md index 54b5138afd..51e63d1a34 100644 --- a/docs/apis/models.md +++ b/docs/apis/models.md @@ -17,6 +17,7 @@ The models subpackage contains the following model for image sementic segmentaio - [U2Net](#U2Net) - [U2Net+](#U2Net+) - [AttentionUNet](#AttentionUNet) +- [UNet++](#UNet++) ## [DeepLabV3+](../../paddleseg/models/deeplab.py) > CLASS paddleseg.models.DeepLabV3P(num_classes, backbone, backbone_indices=(0, 3), aspp_ratios=(1, 6, 12, 18), aspp_out_channels=256, align_corners=False, pretrained=None) @@ -377,3 +378,27 @@ The models subpackage contains the following model for image sementic segmentaio > > Args > > > - **num_classes** (int): The unique number of target classes. > > > - **pretrained** (str, optional): The path or url of pretrained model. Default: None. + +## [UNet++](../../paddleseg/models/unet_plusplus.py) +> class UNetPlusPlus(in_channels, + num_classes, + use_deconv=False, + align_corners=False, + pretrained=None, + is_ds=True) + + The UNet++ implementation based on PaddlePaddle. + + The original article refers to + Zongwei Zhou, et, al. "UNet++: A Nested U-Net Architecture for Medical Image Segmentation" + (https://arxiv.org/abs/1807.10165). + +> > Args +> > > - **in_channels** (int): The channel number of input image. +> > > - **num_classes** (int): The unique number of target classes. +> > > - **use_deconv** (bool, optional): A bool value indicates whether using deconvolution in upsampling. + If False, use resize_bilinear. Default: False. +> > > - **align_corners** (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. +> > > - **pretrained** (str, optional): The path or url of pretrained model for fine tuning. Default: None. +> > > - **is_ds** (bool): use deep supervision or not. Default: True From 063e3a887ede42cf8fd582dc3593aa07e9b4b1c2 Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Wed, 16 Dec 2020 21:37:43 +0800 Subject: [PATCH 023/210] Update models.md --- docs/apis/models.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/apis/models.md b/docs/apis/models.md index 51e63d1a34..4511f7e3bd 100644 --- a/docs/apis/models.md +++ b/docs/apis/models.md @@ -1,8 +1,8 @@ # paddleseg.models The models subpackage contains the following model for image sementic segmentaion. -- [DeepLabV3+](#DeepLabV3+) -- [DeepLabV3](#DeepLabV3) +- [DeepLabV3+](#DeepLabV3) +- [DeepLabV3](#DeepLabV3-1) - [FCN](#FCN) - [OCRNet](#OCRNet) - [PSPNet](#PSPNet) @@ -14,10 +14,10 @@ The models subpackage contains the following model for image sementic segmentaio - [GSCNN](#GSCNN) - [HarDNet](#HarDNet) - [UNet](#UNet) -- [U2Net](#U2Net) -- [U2Net+](#U2Net+) +- [U2Net](#U2Net) +- [U2Net+](#U2Net-1) - [AttentionUNet](#AttentionUNet) -- [UNet++](#UNet++) +- [UNet++](#UNet-1) ## [DeepLabV3+](../../paddleseg/models/deeplab.py) > CLASS paddleseg.models.DeepLabV3P(num_classes, backbone, backbone_indices=(0, 3), aspp_ratios=(1, 6, 12, 18), aspp_out_channels=256, align_corners=False, pretrained=None) From a58530ce9bac4d9566f15ee645f7945537af47a2 Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Wed, 16 Dec 2020 21:39:08 +0800 Subject: [PATCH 024/210] Update backbones.md --- docs/apis/backbones.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/apis/backbones.md b/docs/apis/backbones.md index 632ffac587..e7eaeebc86 100644 --- a/docs/apis/backbones.md +++ b/docs/apis/backbones.md @@ -5,7 +5,7 @@ The models subpackage contains backbones extracting features for sementic segmen - [ResNet_vd](#ResNet_vd) - [HRNet](#HRNet) - [MobileNetV3](#MobileNetV3) -- [Xception_deeplab](Xception_deeplab) +- [XceptionDeeplab](XceptionDeeplab) ## [ResNet_vd](../../paddleseg/models/backbones/resnet_vd.py) From 74529e75186b6b3e3d02ebc026a50bcec2d2e9de Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Wed, 16 Dec 2020 21:41:47 +0800 Subject: [PATCH 025/210] Update backbones.md --- docs/apis/backbones.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/apis/backbones.md b/docs/apis/backbones.md index e7eaeebc86..45b863b305 100644 --- a/docs/apis/backbones.md +++ b/docs/apis/backbones.md @@ -5,7 +5,7 @@ The models subpackage contains backbones extracting features for sementic segmen - [ResNet_vd](#ResNet_vd) - [HRNet](#HRNet) - [MobileNetV3](#MobileNetV3) -- [XceptionDeeplab](XceptionDeeplab) +- [XceptionDeeplab](#xceptiondeeplab) ## [ResNet_vd](../../paddleseg/models/backbones/resnet_vd.py) From f5712fc192c2ce717d1dc8634d0685ba773a9ee6 Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Wed, 16 Dec 2020 21:44:03 +0800 Subject: [PATCH 026/210] Update cvlibs.md --- docs/apis/cvlibs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/apis/cvlibs.md b/docs/apis/cvlibs.md index fb73819bc0..9ea3ad9343 100644 --- a/docs/apis/cvlibs.md +++ b/docs/apis/cvlibs.md @@ -1,7 +1,7 @@ # paddleseg.cvlibs - [Manager](#Manager) -- [Parameters Initialization](#Parameters Initialization) +- [Parameters Initialization](#parameters-initialization) - [Config](#Config) From fb09b9c21c53deceb090ad642cfb96a991ad561a Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Wed, 16 Dec 2020 21:45:11 +0800 Subject: [PATCH 027/210] Update datasets.md --- docs/apis/datasets.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/apis/datasets.md b/docs/apis/datasets.md index 1ad6917dda..bef2aa7a87 100644 --- a/docs/apis/datasets.md +++ b/docs/apis/datasets.md @@ -1,5 +1,5 @@ # paddleseg.datasets -- [Custom Dataset](#Custom) +- [Custom Dataset](#custom-dataset) - [Cityscapes](#Cityscapes) - [PascalVOC](#PascalVOC) - [ADE20K](#ADE20K) From 751193a6092f344c014ee276e73b8b162de294ac Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 18 Dec 2020 10:30:13 +0800 Subject: [PATCH 028/210] add api tutorial --- README.md | 1 + README_CN.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index ae4bdbfe78..0513545a76 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,7 @@ python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml ## Tutorials * [Get Started](./docs/quick_start.md) +* [API Tutorial](https://aistudio.baidu.com/aistudio/projectdetail/1339458) * [Data Preparation](./docs/data_prepare.md) * [Training Configuration](./configs/) * [Add New Components](./docs/add_new_model.md) diff --git a/README_CN.md b/README_CN.md index 522d21041b..33211a0dea 100644 --- a/README_CN.md +++ b/README_CN.md @@ -74,6 +74,7 @@ python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml ## 使用教程 * [快速入门](./docs/quick_start.md) +* [API使用教程](https://aistudio.baidu.com/aistudio/projectdetail/1339458) * [数据集准备](./docs/data_prepare.md) * [配置项](./configs/) * [Add New Components](./docs/add_new_model.md) From 8b25d0672e84ef07e797aaea5bf17337d9ff17b9 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 18 Dec 2020 11:58:35 +0800 Subject: [PATCH 029/210] fix bce loss bug --- ...gscnn_resnet50_os8_cityscapes_1024x512_80k.yml | 1 + paddleseg/core/train.py | 3 ++- paddleseg/models/losses/__init__.py | 2 +- .../models/losses/binary_cross_entroy_loss.py | 15 +++++++++++++-- ...{dual_task_loss.py => gscnn_dual_task_loss.py} | 0 5 files changed, 17 insertions(+), 4 deletions(-) rename paddleseg/models/losses/{dual_task_loss.py => gscnn_dual_task_loss.py} (100%) diff --git a/configs/gscnn/gscnn_resnet50_os8_cityscapes_1024x512_80k.yml b/configs/gscnn/gscnn_resnet50_os8_cityscapes_1024x512_80k.yml index eacde21a0f..35a5f32c3a 100644 --- a/configs/gscnn/gscnn_resnet50_os8_cityscapes_1024x512_80k.yml +++ b/configs/gscnn/gscnn_resnet50_os8_cityscapes_1024x512_80k.yml @@ -22,6 +22,7 @@ loss: - type: CrossEntropyLoss - type: EdgeAttentionLoss - type: BCELoss + edge_label: True - type: DualTaskLoss coef: [1, 1, 20, 1] diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index d3cc4b42af..06d003caf9 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -39,7 +39,8 @@ def loss_computation(logits_list, labels, losses, edges=None): loss_i = losses['types'][i] # Whether to use edges as labels According to loss type . if loss_i.__class__.__name__ in ('BCELoss', ): - labels = edges + if loss_i.edge_label: + labels = edges loss += losses['coef'][i] * loss_i(logits, labels) return loss diff --git a/paddleseg/models/losses/__init__.py b/paddleseg/models/losses/__init__.py index 110ff3fe02..c62b560770 100644 --- a/paddleseg/models/losses/__init__.py +++ b/paddleseg/models/losses/__init__.py @@ -14,7 +14,7 @@ from .cross_entroy_loss import CrossEntropyLoss from .binary_cross_entroy_loss import BCELoss -from .dual_task_loss import DualTaskLoss +from .gscnn_dual_task_loss import DualTaskLoss from .edge_attention_loss import EdgeAttentionLoss from .bootstrapped_cross_entropy import BootstrappedCrossEntropyLoss from .dice_loss import DiceLoss diff --git a/paddleseg/models/losses/binary_cross_entroy_loss.py b/paddleseg/models/losses/binary_cross_entroy_loss.py index 49e11f57f0..cb1f2ce8d8 100644 --- a/paddleseg/models/losses/binary_cross_entroy_loss.py +++ b/paddleseg/models/losses/binary_cross_entroy_loss.py @@ -58,8 +58,9 @@ class BCELoss(nn.Layer): pos_weight (float|str, optional): A weight of positive examples. If type is str, it should equal to 'dynamic'. It will compute weight dynamically in every step. Default is ``'None'``. - ignore_index (int64): Specifies a target value that is ignored + ignore_index (int64, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default ``255``. + edge_label (bool, optional): Whether to use edge label. Default: False Shapes: logit (Tensor): The input predications tensor. 2-D tensor with shape: [N, *], N is batch_size, `*` means number of additional dimensions. The ``logit`` @@ -80,11 +81,16 @@ class BCELoss(nn.Layer): print(output.numpy()) # [0.45618808] """ - def __init__(self, weight=None, pos_weight=None, ignore_index=255): + def __init__(self, + weight=None, + pos_weight=None, + ignore_index=255, + edge_label=False): super().__init__() self.weight = weight self.pos_weight = pos_weight self.ignore_index = ignore_index + self.edge_label = edge_label if self.weight is not None: if isinstance(self.weight, str): @@ -126,6 +132,11 @@ def forward(self, logit, label): eps = 1e-6 if len(label.shape) != len(logit.shape): label = paddle.unsqueeze(label, 1) + # label.shape should equal to the logit.shape + if label.shape[1] != logit.shape[1]: + label = label.squeeze(1) + label = F.one_hot(label, logit.shape[1]) + label = label.transpose((0, 3, 1, 2)) mask = (label != self.ignore_index) mask = paddle.cast(mask, 'float32') if isinstance(self.weight, str): diff --git a/paddleseg/models/losses/dual_task_loss.py b/paddleseg/models/losses/gscnn_dual_task_loss.py similarity index 100% rename from paddleseg/models/losses/dual_task_loss.py rename to paddleseg/models/losses/gscnn_dual_task_loss.py From 160b5c457eaa131d2c607bb2cbe7aa1070d65a02 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 21 Dec 2020 17:27:25 +0800 Subject: [PATCH 030/210] update paddle-2.0rc1 --- paddleseg/core/predict.py | 117 ++++++++++++++++++----------------- paddleseg/core/val.py | 127 +++++++++++++++++++------------------- 2 files changed, 123 insertions(+), 121 deletions(-) diff --git a/paddleseg/core/predict.py b/paddleseg/core/predict.py index c95c45c493..1f7a16a0a6 100644 --- a/paddleseg/core/predict.py +++ b/paddleseg/core/predict.py @@ -72,61 +72,62 @@ def predict(model, logger.info("Start to predict...") progbar_pred = progbar.Progbar(target=len(image_list), verbose=1) - for i, im_path in enumerate(image_list): - im = cv2.imread(im_path) - ori_shape = im.shape[:2] - im, _ = transforms(im) - im = im[np.newaxis, ...] - im = paddle.to_tensor(im) - - if aug_pred: - pred = infer.aug_inference( - model, - im, - ori_shape=ori_shape, - transforms=transforms.transforms, - scales=scales, - flip_horizontal=flip_horizontal, - flip_vertical=flip_vertical, - is_slide=is_slide, - stride=stride, - crop_size=crop_size) - else: - pred = infer.inference( - model, - im, - ori_shape=ori_shape, - transforms=transforms.transforms, - is_slide=is_slide, - stride=stride, - crop_size=crop_size) - pred = paddle.squeeze(pred) - pred = pred.numpy().astype('uint8') - - # get the saved name - if image_dir is not None: - im_file = im_path.replace(image_dir, '') - else: - im_file = os.path.basename(im_path) - if im_file[0] == '/': - im_file = im_file[1:] - - # save added image - added_image = utils.visualize.visualize(im_path, pred, weight=0.6) - added_image_path = os.path.join(added_saved_dir, im_file) - mkdir(added_image_path) - cv2.imwrite(added_image_path, added_image) - - # save pseudo color prediction - pred_mask = utils.visualize.get_pseudo_color_map(pred) - pred_saved_path = os.path.join(pred_saved_dir, - im_file.rsplit(".")[0] + ".png") - mkdir(pred_saved_path) - pred_mask.save(pred_saved_path) - - # pred_im = utils.visualize(im_path, pred, weight=0.0) - # pred_saved_path = os.path.join(pred_saved_dir, im_file) - # mkdir(pred_saved_path) - # cv2.imwrite(pred_saved_path, pred_im) - - progbar_pred.update(i + 1) + with paddle.no_grad(): + for i, im_path in enumerate(image_list): + im = cv2.imread(im_path) + ori_shape = im.shape[:2] + im, _ = transforms(im) + im = im[np.newaxis, ...] + im = paddle.to_tensor(im) + + if aug_pred: + pred = infer.aug_inference( + model, + im, + ori_shape=ori_shape, + transforms=transforms.transforms, + scales=scales, + flip_horizontal=flip_horizontal, + flip_vertical=flip_vertical, + is_slide=is_slide, + stride=stride, + crop_size=crop_size) + else: + pred = infer.inference( + model, + im, + ori_shape=ori_shape, + transforms=transforms.transforms, + is_slide=is_slide, + stride=stride, + crop_size=crop_size) + pred = paddle.squeeze(pred) + pred = pred.numpy().astype('uint8') + + # get the saved name + if image_dir is not None: + im_file = im_path.replace(image_dir, '') + else: + im_file = os.path.basename(im_path) + if im_file[0] == '/': + im_file = im_file[1:] + + # save added image + added_image = utils.visualize.visualize(im_path, pred, weight=0.6) + added_image_path = os.path.join(added_saved_dir, im_file) + mkdir(added_image_path) + cv2.imwrite(added_image_path, added_image) + + # save pseudo color prediction + pred_mask = utils.visualize.get_pseudo_color_map(pred) + pred_saved_path = os.path.join(pred_saved_dir, + im_file.rsplit(".")[0] + ".png") + mkdir(pred_saved_path) + pred_mask.save(pred_saved_path) + + # pred_im = utils.visualize(im_path, pred, weight=0.0) + # pred_saved_path = os.path.join(pred_saved_dir, im_file) + # mkdir(pred_saved_path) + # cv2.imwrite(pred_saved_path, pred_im) + + progbar_pred.update(i + 1) diff --git a/paddleseg/core/val.py b/paddleseg/core/val.py index c5447cbcb8..cdf0a348b9 100644 --- a/paddleseg/core/val.py +++ b/paddleseg/core/val.py @@ -81,69 +81,70 @@ def evaluate(model, len(eval_dataset), total_iters)) progbar_val = progbar.Progbar(target=total_iters, verbose=1) timer = Timer() - for iter, (im, label) in enumerate(loader): - reader_cost = timer.elapsed_time() - label = label.astype('int64') - - ori_shape = label.shape[-2:] - if aug_eval: - pred = infer.aug_inference( - model, - im, - ori_shape=ori_shape, - transforms=eval_dataset.transforms.transforms, - scales=scales, - flip_horizontal=flip_horizontal, - flip_vertical=flip_vertical, - is_slide=is_slide, - stride=stride, - crop_size=crop_size) - else: - pred = infer.inference( - model, - im, - ori_shape=ori_shape, - transforms=eval_dataset.transforms.transforms, - is_slide=is_slide, - stride=stride, - crop_size=crop_size) - - intersect_area, pred_area, label_area = metrics.calculate_area( - pred, - label, - eval_dataset.num_classes, - ignore_index=eval_dataset.ignore_index) - - # Gather from all ranks - if nranks > 1: - intersect_area_list = [] - pred_area_list = [] - label_area_list = [] - paddle.distributed.all_gather(intersect_area_list, intersect_area) - paddle.distributed.all_gather(pred_area_list, pred_area) - paddle.distributed.all_gather(label_area_list, label_area) - - # Some image has been evaluated and should be eliminated in last iter - if (iter + 1) * nranks > len(eval_dataset): - valid = len(eval_dataset) - iter * nranks - intersect_area_list = intersect_area_list[:valid] - pred_area_list = pred_area_list[:valid] - label_area_list = label_area_list[:valid] - - for i in range(len(intersect_area_list)): - intersect_area_all = intersect_area_all + intersect_area_list[i] - pred_area_all = pred_area_all + pred_area_list[i] - label_area_all = label_area_all + label_area_list[i] - else: - intersect_area_all = intersect_area_all + intersect_area - pred_area_all = pred_area_all + pred_area - label_area_all = label_area_all + label_area - batch_cost = timer.elapsed_time() - timer.restart() - - if local_rank == 0: - progbar_val.update(iter + 1, [('batch_cost', batch_cost), - ('reader cost', reader_cost)]) + with paddle.no_grad(): + for iter, (im, label) in enumerate(loader): + reader_cost = timer.elapsed_time() + label = label.astype('int64') + + ori_shape = label.shape[-2:] + if aug_eval: + pred = infer.aug_inference( + model, + im, + ori_shape=ori_shape, + transforms=eval_dataset.transforms.transforms, + scales=scales, + flip_horizontal=flip_horizontal, + flip_vertical=flip_vertical, + is_slide=is_slide, + stride=stride, + crop_size=crop_size) + else: + pred = infer.inference( + model, + im, + ori_shape=ori_shape, + transforms=eval_dataset.transforms.transforms, + is_slide=is_slide, + stride=stride, + crop_size=crop_size) + + intersect_area, pred_area, label_area = metrics.calculate_area( + pred, + label, + eval_dataset.num_classes, + ignore_index=eval_dataset.ignore_index) + + # Gather from all ranks + if nranks > 1: + intersect_area_list = [] + pred_area_list = [] + label_area_list = [] + paddle.distributed.all_gather(intersect_area_list, intersect_area) + paddle.distributed.all_gather(pred_area_list, pred_area) + paddle.distributed.all_gather(label_area_list, label_area) + + # Some image has been evaluated and should be eliminated in last iter + if (iter + 1) * nranks > len(eval_dataset): + valid = len(eval_dataset) - iter * nranks + intersect_area_list = intersect_area_list[:valid] + pred_area_list = pred_area_list[:valid] + label_area_list = label_area_list[:valid] + + for i in range(len(intersect_area_list)): + intersect_area_all = intersect_area_all + intersect_area_list[i] + pred_area_all = pred_area_all + pred_area_list[i] + label_area_all = label_area_all + label_area_list[i] + else: + intersect_area_all = intersect_area_all + intersect_area + pred_area_all = pred_area_all + pred_area + label_area_all = label_area_all + label_area + batch_cost = timer.elapsed_time() + timer.restart() + + if local_rank == 0: + progbar_val.update(iter + 1, [('batch_cost', batch_cost), + ('reader cost', reader_cost)]) class_iou, miou = metrics.mean_iou(intersect_area_all, pred_area_all, label_area_all) From 3ab11b0851383cd9189d87e731f3bea94d091694 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 22 Dec 2020 11:48:07 +0800 Subject: [PATCH 031/210] only save latest model and best model --- paddleseg/core/train.py | 14 +++++++++++--- paddleseg/utils/utils.py | 18 ++++++++++++++---- train.py | 8 +++++++- 3 files changed, 32 insertions(+), 8 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 06d003caf9..2749c183f6 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -57,7 +57,8 @@ def train(model, log_iters=10, num_workers=0, use_vdl=False, - losses=None): + losses=None, + save_latest_only=False): """ Launch training. @@ -181,14 +182,21 @@ def train(model, model.train() if (iter % save_interval == 0 or iter == iters) and local_rank == 0: - current_save_dir = os.path.join(save_dir, - "iter_{}".format(iter)) + if save_latest_only: + current_save_dir = os.path.join(save_dir, 'latest_model') + else: + current_save_dir = os.path.join(save_dir, + "iter_{}".format(iter)) if not os.path.isdir(current_save_dir): os.makedirs(current_save_dir) paddle.save(model.state_dict(), os.path.join(current_save_dir, 'model.pdparams')) paddle.save(optimizer.state_dict(), os.path.join(current_save_dir, 'model.pdopt')) + if save_latest_only: + with open(os.path.join(current_save_dir, 'iter.txt'), + 'w') as f: + f.write(str(iter)) if val_dataset is not None: if mean_iou > best_mean_iou: diff --git a/paddleseg/utils/utils.py b/paddleseg/utils/utils.py index 69bf8ecf8d..20a461be64 100644 --- a/paddleseg/utils/utils.py +++ b/paddleseg/utils/utils.py @@ -108,10 +108,20 @@ def resume(model, optimizer, resume_model): opti_state_dict = paddle.load(ckpt_path) model.set_state_dict(para_state_dict) optimizer.set_state_dict(opti_state_dict) - epoch = resume_model.split('_')[-1] - if epoch.isdigit(): - epoch = int(epoch) - return epoch + + iter_txt = os.path.join(resume_model, 'iter.txt') + if os.path.exists(iter_txt): + with open(iter_txt, 'r') as f: + iter = f.read() + iter = int(iter) + elif 'iter' in resume_model: + iter = resume_model.split('_')[-1] + iter = int(iter) + else: + raise ValueError( + 'Can not get iter. Please check your resume_model: {}'. + format(resume_model)) + return iter else: raise ValueError( 'Directory of the model needed to resume is not Found: {}'. diff --git a/train.py b/train.py index 177b0bb265..c940c4c2c1 100644 --- a/train.py +++ b/train.py @@ -62,6 +62,11 @@ def parse_args(): help='The directory for saving the model snapshot', type=str, default='./output') + parser.add_argument( + '--save_latest_only', + dest='save_latest_only', + help='Save latest model only', + action='store_true') parser.add_argument( '--num_workers', dest='num_workers', @@ -133,7 +138,8 @@ def main(args): log_iters=args.log_iters, num_workers=args.num_workers, use_vdl=args.use_vdl, - losses=losses) + losses=losses, + save_latest_only=args.save_latest_only) if __name__ == '__main__': From d7bc1e65ddabbe46d788abe66f38f455600951e5 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 29 Dec 2020 11:35:29 +0800 Subject: [PATCH 032/210] add keep_checkpoint_max --- docs/quick_start.md | 1 + paddleseg/core/train.py | 22 +++++++++++----------- paddleseg/utils/utils.py | 14 ++------------ train.py | 11 ++++++----- 4 files changed, 20 insertions(+), 28 deletions(-) diff --git a/docs/quick_start.md b/docs/quick_start.md index 30f14b1c17..b4d4a232ea 100644 --- a/docs/quick_start.md +++ b/docs/quick_start.md @@ -40,6 +40,7 @@ python train.py \ |do_eval|是否在保存模型时启动评估, 启动时将会根据mIoU保存最佳模型至best_model|否|否| |log_iters|打印日志的间隔步数|否|10| |resume_model|恢复训练模型路径,如:`output/iter_1000`|否|None| +|keep_checkpoint_max|最新模型保存个数|否|5| **注意**:如果想要使用多卡训练的话,需要将环境变量CUDA_VISIBLE_DEVICES指定为多卡(不指定时默认使用所有的gpu),并使用paddle.distributed.launch启动训练脚本(windows下由于不支持nccl,无法使用多卡训练): diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 8331829fa8..46b14a991f 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -14,6 +14,8 @@ import os import time +from collections import deque +import shutil import paddle import paddle.nn.functional as F @@ -58,7 +60,7 @@ def train(model, num_workers=0, use_vdl=False, losses=None, - save_latest_only=False): + keep_checkpoint_max=5): """ Launch training. @@ -77,7 +79,7 @@ def train(model, use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False. losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. - save_latest_only (bool, optional): Save latest model only. Default: False. + keep_checkpoint_max (int, optional): The max checkpoints to save. Default: 5. """ nranks = paddle.distributed.ParallelEnv().nranks local_rank = paddle.distributed.ParallelEnv().local_rank @@ -118,6 +120,7 @@ def train(model, best_model_iter = -1 train_reader_cost = 0.0 train_batch_cost = 0.0 + save_models = deque() timer.start() iter = start_iter @@ -183,21 +186,18 @@ def train(model, model.train() if (iter % save_interval == 0 or iter == iters) and local_rank == 0: - if save_latest_only: - current_save_dir = os.path.join(save_dir, 'latest_model') - else: - current_save_dir = os.path.join(save_dir, - "iter_{}".format(iter)) + current_save_dir = os.path.join(save_dir, + "iter_{}".format(iter)) if not os.path.isdir(current_save_dir): os.makedirs(current_save_dir) paddle.save(model.state_dict(), os.path.join(current_save_dir, 'model.pdparams')) paddle.save(optimizer.state_dict(), os.path.join(current_save_dir, 'model.pdopt')) - if save_latest_only: - with open(os.path.join(current_save_dir, 'iter.txt'), - 'w') as f: - f.write(str(iter)) + save_models.append(current_save_dir) + if len(save_models) > keep_checkpoint_max > 0: + model_to_remove = save_models.popleft() + shutil.rmtree(model_to_remove) if val_dataset is not None: if mean_iou > best_mean_iou: diff --git a/paddleseg/utils/utils.py b/paddleseg/utils/utils.py index 20a461be64..73a298d196 100644 --- a/paddleseg/utils/utils.py +++ b/paddleseg/utils/utils.py @@ -109,18 +109,8 @@ def resume(model, optimizer, resume_model): model.set_state_dict(para_state_dict) optimizer.set_state_dict(opti_state_dict) - iter_txt = os.path.join(resume_model, 'iter.txt') - if os.path.exists(iter_txt): - with open(iter_txt, 'r') as f: - iter = f.read() - iter = int(iter) - elif 'iter' in resume_model: - iter = resume_model.split('_')[-1] - iter = int(iter) - else: - raise ValueError( - 'Can not get iter. Please check your resume_model: {}'. - format(resume_model)) + iter = resume_model.split('_')[-1] + iter = int(iter) return iter else: raise ValueError( diff --git a/train.py b/train.py index c940c4c2c1..58319f7051 100644 --- a/train.py +++ b/train.py @@ -63,10 +63,11 @@ def parse_args(): type=str, default='./output') parser.add_argument( - '--save_latest_only', - dest='save_latest_only', - help='Save latest model only', - action='store_true') + '--keep_checkpoint_max', + dest='keep_checkpoint_max', + help='The max checkpoints to save', + type=int, + default=5) parser.add_argument( '--num_workers', dest='num_workers', @@ -139,7 +140,7 @@ def main(args): num_workers=args.num_workers, use_vdl=args.use_vdl, losses=losses, - save_latest_only=args.save_latest_only) + keep_checkpoint_max=args.keep_checkpoint_max) if __name__ == '__main__': From 25852a0d0c29a64a1b08d637290829334927df02 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 4 Jan 2021 11:11:12 +0800 Subject: [PATCH 033/210] update unet.py --- paddleseg/models/unet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddleseg/models/unet.py b/paddleseg/models/unet.py index 2c9eaa9e50..0dc8cf75e8 100644 --- a/paddleseg/models/unet.py +++ b/paddleseg/models/unet.py @@ -127,7 +127,7 @@ def __init__(self, self.use_deconv = use_deconv if self.use_deconv: - self.deconv = nn.ConvTranspose2D( + self.deconv = nn.Conv2DTranspose( in_channels, out_channels // 2, kernel_size=2, From ca8d5da8dd16c15d3d72efab24a33f377e6904dd Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 8 Jan 2021 15:24:23 +0800 Subject: [PATCH 034/210] add flops calculation --- paddleseg/core/train.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 881e1da4cf..dfb8eb6653 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -216,6 +216,20 @@ def train(model, log_writer.add_scalar('Evaluate/Acc', acc, iter) timer.restart() + # Calculate flops. + if local_rank == 0: + + def count_syncbn(m, x, y): + x = x[0] + nelements = x.numel() + m.total_ops += int(2 * nelements) + + _, c, h, w = images.shape + flops = paddle.flops( + model, [1, c, h, w], + custom_ops={paddle.nn.SyncBatchNorm: count_syncbn}) + logger.info(flops) + # Sleep for half a second to let dataloader release resources. time.sleep(0.5) if use_vdl: From e559aa2c6d36ced5ba8eebc65f078c889e34cccc Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 13 Jan 2021 17:59:32 +0800 Subject: [PATCH 035/210] rm prepare_context --- paddleseg/core/train.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index dfb8eb6653..746abc8948 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -96,8 +96,7 @@ def train(model, if nranks > 1: # Initialize parallel training environment. paddle.distributed.init_parallel_env() - strategy = paddle.distributed.prepare_context() - ddp_model = paddle.DataParallel(model, strategy) + ddp_model = paddle.DataParallel(model) batch_sampler = paddle.io.DistributedBatchSampler( train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) From 47d39e7094fcdbb2e7c49c6b62b3f5d51dcbc404 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 18 Jan 2021 15:01:27 +0800 Subject: [PATCH 036/210] add amp --- paddleseg/core/train.py | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index edcdc8afc8..27f312f14e 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -108,6 +108,10 @@ def train(model, return_list=True, ) + # use amp + logger.info('use amp to train') + scaler = paddle.amp.GradScaler(init_loss_scaling=1024) + if use_vdl: from visualdl import LogWriter log_writer = LogWriter(save_dir) @@ -136,19 +140,24 @@ def train(model, if len(data) == 3: edges = data[2].astype('int64') - if nranks > 1: - logits_list = ddp_model(images) - else: - logits_list = model(images) - loss_list = loss_computation( - logits_list=logits_list, - labels=labels, - losses=losses, - edges=edges) - loss = sum(loss_list) - loss.backward() - - optimizer.step() + with paddle.amp.auto_cast(enable=True): + if nranks > 1: + logits_list = ddp_model(images) + else: + logits_list = model(images) + loss_list = loss_computation( + logits_list=logits_list, + labels=labels, + losses=losses, + edges=edges) + loss = sum(loss_list) + # loss.backward() + # optimizer.step() + + scaled = scaler.scale(loss) # scale the loss + scaled.backward() # do backward + scaler.minimize(optimizer, scaled) # update parameters + lr = optimizer.get_lr() if isinstance(optimizer._learning_rate, paddle.optimizer.lr.LRScheduler): From c03e3435ff6a98645a4adc7e4fbc670816b5ecb5 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 20 Jan 2021 17:41:06 +0800 Subject: [PATCH 037/210] update static api to paddle2.0 --- legacy/pdseg/eval.py | 27 +- legacy/pdseg/loss.py | 151 +----- legacy/pdseg/models/backbone/mobilenet_v2.py | 315 ------------ legacy/pdseg/models/backbone/mobilenet_v3.py | 363 -------------- legacy/pdseg/models/backbone/resnet.py | 341 ------------- legacy/pdseg/models/backbone/resnet_vd.py | 59 +-- legacy/pdseg/models/backbone/vgg.py | 82 --- legacy/pdseg/models/backbone/xception.py | 317 ------------ legacy/pdseg/models/libs/model_libs.py | 69 ++- legacy/pdseg/models/model_builder.py | 77 +-- legacy/pdseg/models/modeling/deeplab.py | 148 ++---- legacy/pdseg/models/modeling/fast_scnn.py | 304 ------------ legacy/pdseg/models/modeling/hrnet.py | 66 +-- legacy/pdseg/models/modeling/icnet.py | 197 -------- legacy/pdseg/models/modeling/ocrnet.py | 493 ------------------- legacy/pdseg/models/modeling/pspnet.py | 115 ----- legacy/pdseg/models/modeling/unet.py | 135 ----- legacy/pdseg/reader.py | 1 - legacy/pdseg/solver.py | 62 +-- legacy/pdseg/train.py | 62 +-- legacy/pdseg/utils/dist_utils.py | 2 +- 21 files changed, 230 insertions(+), 3156 deletions(-) delete mode 100644 legacy/pdseg/models/backbone/mobilenet_v2.py delete mode 100644 legacy/pdseg/models/backbone/mobilenet_v3.py delete mode 100644 legacy/pdseg/models/backbone/resnet.py delete mode 100644 legacy/pdseg/models/backbone/vgg.py delete mode 100644 legacy/pdseg/models/backbone/xception.py delete mode 100644 legacy/pdseg/models/modeling/fast_scnn.py delete mode 100644 legacy/pdseg/models/modeling/icnet.py delete mode 100644 legacy/pdseg/models/modeling/ocrnet.py delete mode 100644 legacy/pdseg/models/modeling/pspnet.py delete mode 100644 legacy/pdseg/models/modeling/unet.py diff --git a/legacy/pdseg/eval.py b/legacy/pdseg/eval.py index 4fce36baee..b54058a52f 100644 --- a/legacy/pdseg/eval.py +++ b/legacy/pdseg/eval.py @@ -26,7 +26,7 @@ import pprint import numpy as np import paddle -import paddle.fluid as fluid +import paddle.static as static from utils import paddle_utils from utils.config import cfg @@ -74,11 +74,16 @@ def parse_args(): return parser.parse_args() -def evaluate(cfg, ckpt_dir=None, use_gpu=False, use_xpu=False, use_mpio=False, **kwargs): +def evaluate(cfg, + ckpt_dir=None, + use_gpu=False, + use_xpu=False, + use_mpio=False, + **kwargs): np.set_printoptions(precision=5, suppress=True) - startup_prog = fluid.Program() - test_prog = fluid.Program() + startup_prog = static.Program() + test_prog = static.Program() dataset = SegDataset( file_list=cfg.DATASET.VAL_FILE_LIST, mode=ModelPhase.EVAL, @@ -104,17 +109,17 @@ def data_generator(): # Get device environment if use_gpu: - places = fluid.cuda_places() + places = static.cuda_places() elif use_xpu: xpu_id = int(os.environ.get('FLAGS_selected_xpus', 0)) - places = [fluid.XPUPlace(xpu_id)] + places = [paddle.XPUPlace(xpu_id)] else: - places = fluid.cpu_places() + places = static.cpu_places() place = places[0] dev_count = len(places) print("#Device count: {}".format(dev_count)) - exe = fluid.Executor(place) + exe = static.Executor(place) exe.run(startup_prog) test_prog = test_prog.clone(for_test=True) @@ -127,9 +132,9 @@ def data_generator(): if ckpt_dir is not None: print('load test model:', ckpt_dir) try: - fluid.load(test_prog, os.path.join(ckpt_dir, 'model'), exe) + static.load(test_prog, os.path.join(ckpt_dir, 'model'), exe) except: - fluid.io.load_params(exe, ckpt_dir, main_program=test_prog) + paddle.fluid.io.load_params(exe, ckpt_dir, main_program=test_prog) # Use streaming confusion matrix to calculate mean_iou np.set_printoptions( @@ -163,7 +168,7 @@ def data_generator(): calculate_eta(all_step - step, speed))) timer.restart() sys.stdout.flush() - except fluid.core.EOFException: + except paddle.fluid.core.EOFException: break category_iou, avg_iou = conf_mat.mean_iou() diff --git a/legacy/pdseg/loss.py b/legacy/pdseg/loss.py index c5ea306d4d..4b8d757abd 100644 --- a/legacy/pdseg/loss.py +++ b/legacy/pdseg/loss.py @@ -14,7 +14,9 @@ # limitations under the License. import sys -import paddle.fluid as fluid + +import paddle +import paddle.nn.functional as F import numpy as np import importlib from utils.config import cfg @@ -25,104 +27,29 @@ def softmax_with_loss(logit, ignore_mask=None, num_classes=2, weight=None): - ignore_mask = fluid.layers.cast(ignore_mask, 'float32') - label = fluid.layers.elementwise_min( - label, fluid.layers.assign(np.array([num_classes - 1], dtype=np.int32))) - logit = fluid.layers.transpose(logit, [0, 2, 3, 1]) - logit = fluid.layers.reshape(logit, [-1, num_classes]) - label = fluid.layers.reshape(label, [-1, 1]) - label = fluid.layers.cast(label, 'int64') - ignore_mask = fluid.layers.reshape(ignore_mask, [-1, 1]) - if weight is None: - loss, probs = fluid.layers.softmax_with_cross_entropy( - logit, - label, - ignore_index=cfg.DATASET.IGNORE_INDEX, - return_softmax=True) - else: - label = fluid.layers.squeeze(label, axes=[-1]) - label_one_hot = fluid.one_hot(input=label, depth=num_classes) - if isinstance(weight, list): - assert len( - weight - ) == num_classes, "weight length must equal num of classes" - weight = fluid.layers.assign(np.array([weight], dtype='float32')) - elif isinstance(weight, str): - assert weight.lower( - ) == 'dynamic', 'if weight is string, must be dynamic!' - tmp = [] - total_num = fluid.layers.cast( - fluid.layers.shape(label)[0], 'float32') - for i in range(num_classes): - cls_pixel_num = fluid.layers.reduce_sum(label_one_hot[:, i]) - ratio = total_num / (cls_pixel_num + 1) - tmp.append(ratio) - weight = fluid.layers.concat(tmp) - weight = weight / fluid.layers.reduce_sum(weight) * num_classes - elif isinstance(weight, fluid.layers.Variable): - pass - else: - raise ValueError( - 'Expect weight is a list, string or Variable, but receive {}'. - format(type(weight))) - weight = fluid.layers.reshape(weight, [1, num_classes]) - weighted_label_one_hot = fluid.layers.elementwise_mul( - label_one_hot, weight) - probs = fluid.layers.softmax(logit) - loss = fluid.layers.cross_entropy( - probs, - weighted_label_one_hot, - soft_label=True, - ignore_index=cfg.DATASET.IGNORE_INDEX) - weighted_label_one_hot.stop_gradient = True + ignore_mask = paddle.cast(ignore_mask, 'float32') + label = paddle.minimum( + label, paddle.assign(np.array([num_classes - 1], dtype=np.int32))) + logit = paddle.transpose(logit, [0, 2, 3, 1]) + logit = paddle.reshape(logit, [-1, num_classes]) + label = paddle.reshape(label, [-1, 1]) + label = paddle.cast(label, 'int64') + ignore_mask = paddle.reshape(ignore_mask, [-1, 1]) + loss, probs = F.softmax_with_cross_entropy( + logit, + label, + ignore_index=cfg.DATASET.IGNORE_INDEX, + return_softmax=True) loss = loss * ignore_mask - avg_loss = fluid.layers.mean(loss) / (fluid.layers.mean(ignore_mask) + cfg.MODEL.DEFAULT_EPSILON) + avg_loss = paddle.mean(loss) / ( + paddle.mean(ignore_mask) + cfg.MODEL.DEFAULT_EPSILON) label.stop_gradient = True ignore_mask.stop_gradient = True return avg_loss -# to change, how to appicate ignore index and ignore mask -def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001): - if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1: - raise Exception( - "dice loss is only applicable to one channel classfication") - ignore_mask = fluid.layers.cast(ignore_mask, 'float32') - logit = fluid.layers.transpose(logit, [0, 2, 3, 1]) - label = fluid.layers.transpose(label, [0, 2, 3, 1]) - label = fluid.layers.cast(label, 'int64') - ignore_mask = fluid.layers.transpose(ignore_mask, [0, 2, 3, 1]) - logit = fluid.layers.sigmoid(logit) - logit = logit * ignore_mask - label = label * ignore_mask - reduce_dim = list(range(1, len(logit.shape))) - inse = fluid.layers.reduce_sum(logit * label, dim=reduce_dim) - dice_denominator = fluid.layers.reduce_sum( - logit, dim=reduce_dim) + fluid.layers.reduce_sum( - label, dim=reduce_dim) - dice_score = 1 - inse * 2 / (dice_denominator + epsilon) - label.stop_gradient = True - ignore_mask.stop_gradient = True - return fluid.layers.reduce_mean(dice_score) - - -def bce_loss(logit, label, ignore_mask=None): - if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1: - raise Exception("bce loss is only applicable to binary classfication") - label = fluid.layers.cast(label, 'float32') - loss = fluid.layers.sigmoid_cross_entropy_with_logits( - x=logit, - label=label, - ignore_index=cfg.DATASET.IGNORE_INDEX, - normalize=True) # or False - loss = fluid.layers.reduce_sum(loss) - label.stop_gradient = True - ignore_mask.stop_gradient = True - return loss - - def multi_softmax_with_loss(logits, label, ignore_mask=None, @@ -133,50 +60,16 @@ def multi_softmax_with_loss(logits, for i, logit in enumerate(logits): if label.shape[2] != logit.shape[2] or label.shape[ 3] != logit.shape[3]: - logit_label = fluid.layers.resize_nearest(label, logit.shape[2:]) + logit_label = F.interpolate( + label, logit.shape[2:], mode='nearest', align_corners=True) else: logit_label = label logit_mask = (logit_label.astype('int32') != cfg.DATASET.IGNORE_INDEX).astype('int32') - loss = softmax_with_loss(logit, logit_label, logit_mask, num_classes, weight=weight) + loss = softmax_with_loss( + logit, logit_label, logit_mask, num_classes, weight=weight) avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss else: avg_loss = softmax_with_loss( logits, label, ignore_mask, num_classes, weight=weight) return avg_loss - - -def multi_dice_loss(logits, label, ignore_mask=None): - if isinstance(logits, tuple): - avg_loss = 0 - for i, logit in enumerate(logits): - if label.shape[2] != logit.shape[2] or label.shape[ - 3] != logit.shape[3]: - logit_label = fluid.layers.resize_nearest(label, logit.shape[2:]) - else: - logit_label = label - logit_mask = (logit_label.astype('int32') != - cfg.DATASET.IGNORE_INDEX).astype('int32') - loss = dice_loss(logit, logit_label, logit_mask) - avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss - else: - avg_loss = dice_loss(logits, label, ignore_mask) - return avg_loss - - -def multi_bce_loss(logits, label, ignore_mask=None): - if isinstance(logits, tuple): - avg_loss = 0 - for i, logit in enumerate(logits): - if label.shape[2] != logit.shape[2] or label.shape[ - 3] != logit.shape[3]: - logit_label = fluid.layers.resize_nearest(label, logit.shape[2:]) - else: - logit_label = label - logit_mask = (logit_label.astype('int32') != - cfg.DATASET.IGNORE_INDEX).astype('int32') - loss = bce_loss(logit, logit_label, logit_mask) - avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss - else: - avg_loss = bce_loss(logits, label, ignore_mask) - return avg_loss diff --git a/legacy/pdseg/models/backbone/mobilenet_v2.py b/legacy/pdseg/models/backbone/mobilenet_v2.py deleted file mode 100644 index eefeba8d15..0000000000 --- a/legacy/pdseg/models/backbone/mobilenet_v2.py +++ /dev/null @@ -1,315 +0,0 @@ -# coding: utf8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import paddle.fluid as fluid -from paddle.fluid.initializer import MSRA -from paddle.fluid.param_attr import ParamAttr -from utils.config import cfg - -__all__ = [ - 'MobileNetV2', 'MobileNetV2_x0_25', 'MobileNetV2_x0_5', 'MobileNetV2_x1_0', - 'MobileNetV2_x1_5', 'MobileNetV2_x2_0', 'MobileNetV2_scale' -] - -train_parameters = { - "input_size": [3, 224, 224], - "input_mean": [0.485, 0.456, 0.406], - "input_std": [0.229, 0.224, 0.225], - "learning_strategy": { - "name": "piecewise_decay", - "batch_size": 256, - "epochs": [30, 60, 90], - "steps": [0.1, 0.01, 0.001, 0.0001] - } -} - - -class MobileNetV2(): - def __init__(self, scale=1.0, change_depth=False, output_stride=None): - self.params = train_parameters - self.scale = scale - self.change_depth = change_depth - self.bottleneck_params_list = [ - (1, 16, 1, 1), - (6, 24, 2, 2), - (6, 32, 3, 2), - (6, 64, 4, 2), - (6, 96, 3, 1), - (6, 160, 3, 2), - (6, 320, 1, 1), - ] if change_depth == False else [ - (1, 16, 1, 1), - (6, 24, 2, 2), - (6, 32, 5, 2), - (6, 64, 7, 2), - (6, 96, 5, 1), - (6, 160, 3, 2), - (6, 320, 1, 1), - ] - self.modify_bottle_params(output_stride) - - def modify_bottle_params(self, output_stride=None): - if output_stride is not None and output_stride % 2 != 0: - raise Exception("output stride must to be even number") - if output_stride is None: - return - else: - stride = 2 - for i, layer_setting in enumerate(self.bottleneck_params_list): - t, c, n, s = layer_setting - stride = stride * s - if stride > output_stride: - s = 1 - self.bottleneck_params_list[i] = (t, c, n, s) - - def net(self, input, class_dim=1000, end_points=None, decode_points=None): - scale = self.scale - change_depth = self.change_depth - #if change_depth is True, the new depth is 1.4 times as deep as before. - bottleneck_params_list = self.bottleneck_params_list - decode_ends = dict() - - def check_points(count, points): - if points is None: - return False - else: - if isinstance(points, list): - return (True if count in points else False) - else: - return (True if count == points else False) - - #conv1 - input = self.conv_bn_layer( - input, - num_filters=int(32 * scale), - filter_size=3, - stride=2, - padding=1, - if_act=True, - name='conv1_1') - layer_count = 1 - - #print("node test:", layer_count, input.shape) - - if check_points(layer_count, decode_points): - decode_ends[layer_count] = input - - if check_points(layer_count, end_points): - return input, decode_ends - - # bottleneck sequences - i = 1 - in_c = int(32 * scale) - for layer_setting in bottleneck_params_list: - t, c, n, s = layer_setting - i += 1 - input, depthwise_output = self.invresi_blocks( - input=input, - in_c=in_c, - t=t, - c=int(c * scale), - n=n, - s=s, - name='conv' + str(i)) - in_c = int(c * scale) - layer_count += n - - #print("node test:", layer_count, input.shape) - if check_points(layer_count, decode_points): - decode_ends[layer_count] = depthwise_output - - if check_points(layer_count, end_points): - return input, decode_ends - - #last_conv - input = self.conv_bn_layer( - input=input, - num_filters=int(1280 * scale) if scale > 1.0 else 1280, - filter_size=1, - stride=1, - padding=0, - if_act=True, - name='conv9') - - input = fluid.layers.pool2d( - input=input, - pool_size=7, - pool_stride=1, - pool_type='avg', - global_pooling=True) - - output = fluid.layers.fc( - input=input, - size=class_dim, - param_attr=ParamAttr(name='fc10_weights'), - bias_attr=ParamAttr(name='fc10_offset')) - return output - - def conv_bn_layer(self, - input, - filter_size, - num_filters, - stride, - padding, - channels=None, - num_groups=1, - if_act=True, - name=None, - use_cudnn=True): - conv = fluid.layers.conv2d( - input=input, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=padding, - groups=num_groups, - act=None, - use_cudnn=use_cudnn, - param_attr=ParamAttr(name=name + '_weights'), - bias_attr=False) - bn_name = name + '_bn' - bn = fluid.layers.batch_norm( - input=conv, - param_attr=ParamAttr(name=bn_name + "_scale"), - bias_attr=ParamAttr(name=bn_name + "_offset"), - moving_mean_name=bn_name + '_mean', - moving_variance_name=bn_name + '_variance') - if if_act: - return fluid.layers.relu6(bn) - else: - return bn - - def shortcut(self, input, data_residual): - return fluid.layers.elementwise_add(input, data_residual) - - def inverted_residual_unit(self, - input, - num_in_filter, - num_filters, - ifshortcut, - stride, - filter_size, - padding, - expansion_factor, - name=None): - num_expfilter = int(round(num_in_filter * expansion_factor)) - - channel_expand = self.conv_bn_layer( - input=input, - num_filters=num_expfilter, - filter_size=1, - stride=1, - padding=0, - num_groups=1, - if_act=True, - name=name + '_expand') - - bottleneck_conv = self.conv_bn_layer( - input=channel_expand, - num_filters=num_expfilter, - filter_size=filter_size, - stride=stride, - padding=padding, - num_groups=num_expfilter, - if_act=True, - name=name + '_dwise', - use_cudnn=False) - - depthwise_output = bottleneck_conv - - linear_out = self.conv_bn_layer( - input=bottleneck_conv, - num_filters=num_filters, - filter_size=1, - stride=1, - padding=0, - num_groups=1, - if_act=False, - name=name + '_linear') - - if ifshortcut: - out = self.shortcut(input=input, data_residual=linear_out) - return out, depthwise_output - else: - return linear_out, depthwise_output - - def invresi_blocks(self, input, in_c, t, c, n, s, name=None): - first_block, depthwise_output = self.inverted_residual_unit( - input=input, - num_in_filter=in_c, - num_filters=c, - ifshortcut=False, - stride=s, - filter_size=3, - padding=1, - expansion_factor=t, - name=name + '_1') - - last_residual_block = first_block - last_c = c - - for i in range(1, n): - last_residual_block, depthwise_output = self.inverted_residual_unit( - input=last_residual_block, - num_in_filter=last_c, - num_filters=c, - ifshortcut=True, - stride=1, - filter_size=3, - padding=1, - expansion_factor=t, - name=name + '_' + str(i + 1)) - return last_residual_block, depthwise_output - - -def MobileNetV2_x0_25(): - model = MobileNetV2(scale=0.25) - return model - - -def MobileNetV2_x0_5(): - model = MobileNetV2(scale=0.5) - return model - - -def MobileNetV2_x1_0(): - model = MobileNetV2(scale=1.0) - return model - - -def MobileNetV2_x1_5(): - model = MobileNetV2(scale=1.5) - return model - - -def MobileNetV2_x2_0(): - model = MobileNetV2(scale=2.0) - return model - - -def MobileNetV2_scale(): - model = MobileNetV2(scale=1.2, change_depth=True) - return model - - -if __name__ == '__main__': - image_shape = [-1, 3, 224, 224] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - model = MobileNetV2_x1_0() - logit, decode_ends = model.net(image) - #print("logit:", logit.shape) diff --git a/legacy/pdseg/models/backbone/mobilenet_v3.py b/legacy/pdseg/models/backbone/mobilenet_v3.py deleted file mode 100644 index e0a6a8df3c..0000000000 --- a/legacy/pdseg/models/backbone/mobilenet_v3.py +++ /dev/null @@ -1,363 +0,0 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import paddle.fluid as fluid -from paddle.fluid.param_attr import ParamAttr - -__all__ = [ - 'MobileNetV3', 'MobileNetV3_small_x0_35', 'MobileNetV3_small_x0_5', - 'MobileNetV3_small_x0_75', 'MobileNetV3_small_x1_0', - 'MobileNetV3_small_x1_25', 'MobileNetV3_large_x0_35', - 'MobileNetV3_large_x0_5', 'MobileNetV3_large_x0_75', - 'MobileNetV3_large_x1_0', 'MobileNetV3_large_x1_25' -] - - -class MobileNetV3(): - def __init__(self, - scale=1.0, - model_name='small', - lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], - output_stride=None): - self.scale = scale - self.inplanes = 16 - - self.lr_mult_list = lr_mult_list - assert len(self.lr_mult_list) == 5, \ - "lr_mult_list length in MobileNetV3 must be 5 but got {}!!".format( - len(self.lr_mult_list)) - self.curr_stage = 0 - self.decode_point = None - self.end_point = None - - if model_name == "large": - self.cfg = [ - # k, exp, c, se, nl, s, - [3, 16, 16, False, 'relu', 1], - [3, 64, 24, False, 'relu', 2], - [3, 72, 24, False, 'relu', 1], - [5, 72, 40, True, 'relu', 2], - [5, 120, 40, True, 'relu', 1], - [5, 120, 40, True, 'relu', 1], - [3, 240, 80, False, 'hard_swish', 2], - [3, 200, 80, False, 'hard_swish', 1], - [3, 184, 80, False, 'hard_swish', 1], - [3, 184, 80, False, 'hard_swish', 1], - [3, 480, 112, True, 'hard_swish', 1], - [3, 672, 112, True, 'hard_swish', 1], - # The number of channels in the last 4 stages is reduced by a - # factor of 2 compared to the standard implementation. - [5, 336, 80, True, 'hard_swish', 2], - [5, 480, 80, True, 'hard_swish', 1], - [5, 480, 80, True, 'hard_swish', 1], - ] - self.cls_ch_squeeze = 480 - self.cls_ch_expand = 1280 - self.lr_interval = 3 - elif model_name == "small": - self.cfg = [ - # k, exp, c, se, nl, s, - [3, 16, 16, True, 'relu', 2], - [3, 72, 24, False, 'relu', 2], - [3, 88, 24, False, 'relu', 1], - [5, 96, 40, True, 'hard_swish', 2], - [5, 240, 40, True, 'hard_swish', 1], - [5, 240, 40, True, 'hard_swish', 1], - [5, 120, 48, True, 'hard_swish', 1], - [5, 144, 48, True, 'hard_swish', 1], - # The number of channels in the last 4 stages is reduced by a - # factor of 2 compared to the standard implementation. - [5, 144, 48, True, 'hard_swish', 2], - [5, 288, 48, True, 'hard_swish', 1], - [5, 288, 48, True, 'hard_swish', 1], - ] - self.cls_ch_squeeze = 288 - self.cls_ch_expand = 1280 - self.lr_interval = 2 - else: - raise NotImplementedError( - "mode[{}_model] is not implemented!".format(model_name)) - - self.modify_bottle_params(output_stride) - - def modify_bottle_params(self, output_stride=None): - if output_stride is not None and output_stride % 2 != 0: - raise Exception("output stride must to be even number") - if output_stride is None: - return - else: - stride = 2 - for i, _cfg in enumerate(self.cfg): - stride = stride * _cfg[-1] - if stride > output_stride: - s = 1 - self.cfg[i][-1] = s - - def net(self, input, class_dim=1000, end_points=None, decode_points=None): - scale = self.scale - inplanes = self.inplanes - cfg = self.cfg - cls_ch_squeeze = self.cls_ch_squeeze - cls_ch_expand = self.cls_ch_expand - - # conv1 - conv = self.conv_bn_layer( - input, - filter_size=3, - num_filters=self.make_divisible(inplanes * scale), - stride=2, - padding=1, - num_groups=1, - if_act=True, - act='hard_swish', - name='conv1') - - i = 0 - inplanes = self.make_divisible(inplanes * scale) - for layer_cfg in cfg: - conv = self.residual_unit( - input=conv, - num_in_filter=inplanes, - num_mid_filter=self.make_divisible(scale * layer_cfg[1]), - num_out_filter=self.make_divisible(scale * layer_cfg[2]), - act=layer_cfg[4], - stride=layer_cfg[5], - filter_size=layer_cfg[0], - use_se=layer_cfg[3], - name='conv' + str(i + 2)) - inplanes = self.make_divisible(scale * layer_cfg[2]) - i += 1 - self.curr_stage = i - - conv = self.conv_bn_layer( - input=conv, - filter_size=1, - num_filters=self.make_divisible(scale * cls_ch_squeeze), - stride=1, - padding=0, - num_groups=1, - if_act=True, - act='hard_swish', - name='conv_last') - - return conv, self.decode_point - - conv = fluid.layers.pool2d( - input=conv, pool_type='avg', global_pooling=True, use_cudnn=False) - conv = fluid.layers.conv2d( - input=conv, - num_filters=cls_ch_expand, - filter_size=1, - stride=1, - padding=0, - act=None, - param_attr=ParamAttr(name='last_1x1_conv_weights'), - bias_attr=False) - conv = fluid.layers.hard_swish(conv) - drop = fluid.layers.dropout(x=conv, dropout_prob=0.2) - out = fluid.layers.fc( - input=drop, - size=class_dim, - param_attr=ParamAttr(name='fc_weights'), - bias_attr=ParamAttr(name='fc_offset')) - return out - - def conv_bn_layer(self, - input, - filter_size, - num_filters, - stride, - padding, - num_groups=1, - if_act=True, - act=None, - name=None, - use_cudnn=True, - res_last_bn_init=False): - lr_idx = self.curr_stage // self.lr_interval - lr_idx = min(lr_idx, len(self.lr_mult_list) - 1) - lr_mult = self.lr_mult_list[lr_idx] - - conv = fluid.layers.conv2d( - input=input, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=padding, - groups=num_groups, - act=None, - use_cudnn=use_cudnn, - param_attr=ParamAttr(name=name + '_weights', learning_rate=lr_mult), - bias_attr=False) - bn_name = name + '_bn' - bn = fluid.layers.batch_norm( - input=conv, - param_attr=ParamAttr( - name=bn_name + "_scale", - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0)), - bias_attr=ParamAttr( - name=bn_name + "_offset", - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0)), - moving_mean_name=bn_name + '_mean', - moving_variance_name=bn_name + '_variance') - if if_act: - if act == 'relu': - bn = fluid.layers.relu(bn) - elif act == 'hard_swish': - bn = fluid.layers.hard_swish(bn) - return bn - - def make_divisible(self, v, divisor=8, min_value=None): - if min_value is None: - min_value = divisor - new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) - if new_v < 0.9 * v: - new_v += divisor - return new_v - - def se_block(self, input, num_out_filter, ratio=4, name=None): - lr_idx = self.curr_stage // self.lr_interval - lr_idx = min(lr_idx, len(self.lr_mult_list) - 1) - lr_mult = self.lr_mult_list[lr_idx] - - num_mid_filter = num_out_filter // ratio - pool = fluid.layers.pool2d( - input=input, pool_type='avg', global_pooling=True, use_cudnn=False) - conv1 = fluid.layers.conv2d( - input=pool, - filter_size=1, - num_filters=num_mid_filter, - act='relu', - param_attr=ParamAttr( - name=name + '_1_weights', learning_rate=lr_mult), - bias_attr=ParamAttr(name=name + '_1_offset', learning_rate=lr_mult)) - conv2 = fluid.layers.conv2d( - input=conv1, - filter_size=1, - num_filters=num_out_filter, - act='hard_sigmoid', - param_attr=ParamAttr( - name=name + '_2_weights', learning_rate=lr_mult), - bias_attr=ParamAttr(name=name + '_2_offset', learning_rate=lr_mult)) - scale = fluid.layers.elementwise_mul(x=input, y=conv2, axis=0) - return scale - - def residual_unit(self, - input, - num_in_filter, - num_mid_filter, - num_out_filter, - stride, - filter_size, - act=None, - use_se=False, - name=None): - - conv0 = self.conv_bn_layer( - input=input, - filter_size=1, - num_filters=num_mid_filter, - stride=1, - padding=0, - if_act=True, - act=act, - name=name + '_expand') - - conv1 = self.conv_bn_layer( - input=conv0, - filter_size=filter_size, - num_filters=num_mid_filter, - stride=stride, - padding=int((filter_size - 1) // 2), - if_act=True, - act=act, - num_groups=num_mid_filter, - use_cudnn=False, - name=name + '_depthwise') - - if self.curr_stage == 5: - self.decode_point = conv1 - if use_se: - conv1 = self.se_block( - input=conv1, num_out_filter=num_mid_filter, name=name + '_se') - - conv2 = self.conv_bn_layer( - input=conv1, - filter_size=1, - num_filters=num_out_filter, - stride=1, - padding=0, - if_act=False, - name=name + '_linear', - res_last_bn_init=True) - if num_in_filter != num_out_filter or stride != 1: - return conv2 - else: - return fluid.layers.elementwise_add(x=input, y=conv2, act=None) - - -def MobileNetV3_small_x0_35(): - model = MobileNetV3(model_name='small', scale=0.35) - return model - - -def MobileNetV3_small_x0_5(): - model = MobileNetV3(model_name='small', scale=0.5) - return model - - -def MobileNetV3_small_x0_75(): - model = MobileNetV3(model_name='small', scale=0.75) - return model - - -def MobileNetV3_small_x1_0(**args): - model = MobileNetV3(model_name='small', scale=1.0, **args) - return model - - -def MobileNetV3_small_x1_25(): - model = MobileNetV3(model_name='small', scale=1.25) - return model - - -def MobileNetV3_large_x0_35(): - model = MobileNetV3(model_name='large', scale=0.35) - return model - - -def MobileNetV3_large_x0_5(): - model = MobileNetV3(model_name='large', scale=0.5) - return model - - -def MobileNetV3_large_x0_75(): - model = MobileNetV3(model_name='large', scale=0.75) - return model - - -def MobileNetV3_large_x1_0(**args): - model = MobileNetV3(model_name='large', scale=1.0, **args) - return model - - -def MobileNetV3_large_x1_25(): - model = MobileNetV3(model_name='large', scale=1.25) - return model diff --git a/legacy/pdseg/models/backbone/resnet.py b/legacy/pdseg/models/backbone/resnet.py deleted file mode 100644 index 60a7bc5dcc..0000000000 --- a/legacy/pdseg/models/backbone/resnet.py +++ /dev/null @@ -1,341 +0,0 @@ -# coding: utf8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import numpy as np -import paddle.fluid as fluid -from paddle.fluid.param_attr import ParamAttr - -__all__ = [ - "ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152" -] - -train_parameters = { - "input_size": [3, 224, 224], - "input_mean": [0.485, 0.456, 0.406], - "input_std": [0.229, 0.224, 0.225], - "learning_strategy": { - "name": "piecewise_decay", - "batch_size": 256, - "epochs": [30, 60, 90], - "steps": [0.1, 0.01, 0.001, 0.0001] - } -} - - -class ResNet(): - def __init__(self, layers=50, scale=1.0, stem=None): - self.params = train_parameters - self.layers = layers - self.scale = scale - self.stem = stem - - def net(self, - input, - class_dim=1000, - end_points=None, - decode_points=None, - resize_points=None, - dilation_dict=None): - layers = self.layers - supported_layers = [18, 34, 50, 101, 152] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format(supported_layers, layers) - - decode_ends = dict() - - def check_points(count, points): - if points is None: - return False - else: - if isinstance(points, list): - return (True if count in points else False) - else: - return (True if count == points else False) - - def get_dilated_rate(dilation_dict, idx): - if dilation_dict is None or idx not in dilation_dict: - return 1 - else: - return dilation_dict[idx] - - if layers == 18: - depth = [2, 2, 2, 2] - elif layers == 34 or layers == 50: - depth = [3, 4, 6, 3] - elif layers == 101: - depth = [3, 4, 23, 3] - elif layers == 152: - depth = [3, 8, 36, 3] - num_filters = [64, 128, 256, 512] - - if self.stem == 'icnet' or self.stem == 'pspnet': - conv = self.conv_bn_layer( - input=input, - num_filters=int(64 * self.scale), - filter_size=3, - stride=2, - act='relu', - name="conv1_1") - conv = self.conv_bn_layer( - input=conv, - num_filters=int(64 * self.scale), - filter_size=3, - stride=1, - act='relu', - name="conv1_2") - conv = self.conv_bn_layer( - input=conv, - num_filters=int(128 * self.scale), - filter_size=3, - stride=1, - act='relu', - name="conv1_3") - else: - conv = self.conv_bn_layer( - input=input, - num_filters=int(64 * self.scale), - filter_size=7, - stride=2, - act='relu', - name="conv1") - - conv = fluid.layers.pool2d( - input=conv, - pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') - - layer_count = 1 - if check_points(layer_count, decode_points): - decode_ends[layer_count] = conv - - if check_points(layer_count, end_points): - return conv, decode_ends - - if layers >= 50: - for block in range(len(depth)): - for i in range(depth[block]): - if layers in [101, 152] and block == 2: - if i == 0: - conv_name = "res" + str(block + 2) + "a" - else: - conv_name = "res" + str(block + 2) + "b" + str(i) - else: - conv_name = "res" + str(block + 2) + chr(97 + i) - dilation_rate = get_dilated_rate(dilation_dict, block) - - conv = self.bottleneck_block( - input=conv, - num_filters=int(num_filters[block] * self.scale), - stride=2 - if i == 0 and block != 0 and dilation_rate == 1 else 1, - name=conv_name, - dilation=dilation_rate) - layer_count += 3 - - if check_points(layer_count, decode_points): - decode_ends[layer_count] = conv - - if check_points(layer_count, end_points): - return conv, decode_ends - - if check_points(layer_count, resize_points): - conv = self.interp( - conv, - np.ceil( - np.array(conv.shape[2:]).astype('int32') / 2)) - - pool = fluid.layers.pool2d( - input=conv, pool_size=7, pool_type='avg', global_pooling=True) - stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) - out = fluid.layers.fc( - input=pool, - size=class_dim, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) - else: - for block in range(len(depth)): - for i in range(depth[block]): - conv_name = "res" + str(block + 2) + chr(97 + i) - conv = self.basic_block( - input=conv, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - is_first=block == i == 0, - name=conv_name) - layer_count += 2 - if check_points(layer_count, decode_points): - decode_ends[layer_count] = conv - - if check_points(layer_count, end_points): - return conv, decode_ends - - pool = fluid.layers.pool2d( - input=conv, pool_size=7, pool_type='avg', global_pooling=True) - stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) - out = fluid.layers.fc( - input=pool, - size=class_dim, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) - return out - - def zero_padding(self, input, padding): - return fluid.layers.pad( - input, [0, 0, 0, 0, padding, padding, padding, padding]) - - def interp(self, input, out_shape): - out_shape = list(out_shape.astype("int32")) - return fluid.layers.resize_bilinear(input, out_shape=out_shape) - - def conv_bn_layer(self, - input, - num_filters, - filter_size, - stride=1, - dilation=1, - groups=1, - act=None, - name=None): - - if self.stem == 'pspnet': - bias_attr = ParamAttr(name=name + "_biases") - else: - bias_attr = False - - conv = fluid.layers.conv2d( - input=input, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2 if dilation == 1 else 0, - dilation=dilation, - groups=groups, - act=None, - param_attr=ParamAttr(name=name + "_weights"), - bias_attr=bias_attr, - name=name + '.conv2d.output.1') - - if name == "conv1": - bn_name = "bn_" + name - else: - bn_name = "bn" + name[3:] - return fluid.layers.batch_norm( - input=conv, - act=act, - name=bn_name + '.output.1', - param_attr=ParamAttr(name=bn_name + '_scale'), - bias_attr=ParamAttr(bn_name + '_offset'), - moving_mean_name=bn_name + '_mean', - moving_variance_name=bn_name + '_variance', - ) - - def shortcut(self, input, ch_out, stride, is_first, name): - ch_in = input.shape[1] - if ch_in != ch_out or stride != 1 or is_first == True: - return self.conv_bn_layer(input, ch_out, 1, stride, name=name) - else: - return input - - def bottleneck_block(self, input, num_filters, stride, name, dilation=1): - if self.stem == 'pspnet' and self.layers == 101: - strides = [1, stride] - else: - strides = [stride, 1] - - conv0 = self.conv_bn_layer( - input=input, - num_filters=num_filters, - filter_size=1, - dilation=1, - stride=strides[0], - act='relu', - name=name + "_branch2a") - if dilation > 1: - conv0 = self.zero_padding(conv0, dilation) - conv1 = self.conv_bn_layer( - input=conv0, - num_filters=num_filters, - filter_size=3, - dilation=dilation, - stride=strides[1], - act='relu', - name=name + "_branch2b") - conv2 = self.conv_bn_layer( - input=conv1, - num_filters=num_filters * 4, - dilation=1, - filter_size=1, - act=None, - name=name + "_branch2c") - - short = self.shortcut( - input, - num_filters * 4, - stride, - is_first=False, - name=name + "_branch1") - - return fluid.layers.elementwise_add( - x=short, y=conv2, act='relu', name=name + ".add.output.5") - - def basic_block(self, input, num_filters, stride, is_first, name): - conv0 = self.conv_bn_layer( - input=input, - num_filters=num_filters, - filter_size=3, - act='relu', - stride=stride, - name=name + "_branch2a") - conv1 = self.conv_bn_layer( - input=conv0, - num_filters=num_filters, - filter_size=3, - act=None, - name=name + "_branch2b") - short = self.shortcut( - input, num_filters, stride, is_first, name=name + "_branch1") - return fluid.layers.elementwise_add(x=short, y=conv1, act='relu') - - -def ResNet18(): - model = ResNet(layers=18) - return model - - -def ResNet34(): - model = ResNet(layers=34) - return model - - -def ResNet50(): - model = ResNet(layers=50) - return model - - -def ResNet101(): - model = ResNet(layers=101) - return model - - -def ResNet152(): - model = ResNet(layers=152) - return model diff --git a/legacy/pdseg/models/backbone/resnet_vd.py b/legacy/pdseg/models/backbone/resnet_vd.py index 4197fe9aa5..8d405eeb2f 100644 --- a/legacy/pdseg/models/backbone/resnet_vd.py +++ b/legacy/pdseg/models/backbone/resnet_vd.py @@ -20,8 +20,10 @@ import math import numpy as np -import paddle.fluid as fluid -from paddle.fluid.param_attr import ParamAttr +import paddle +import paddle.static as static +import paddle.nn.functional as F +from paddle import ParamAttr __all__ = [ "ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152" @@ -128,12 +130,7 @@ def get_dilated_rate(dilation_dict, idx): act='relu', name="conv1") - conv = fluid.layers.pool2d( - input=conv, - pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max') + conv = F.max_pool2d(conv, kernel_size=3, stride=2, padding=1) layer_count = 1 if check_points(layer_count, decode_points): @@ -179,14 +176,13 @@ def get_dilated_rate(dilation_dict, idx): np.ceil( np.array(conv.shape[2:]).astype('int32') / 2)) - pool = fluid.layers.pool2d( - input=conv, pool_size=7, pool_type='avg', global_pooling=True) + pool = F.adaptive_avg_pool2d(conv, output_size=(1, 1)) stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) - out = fluid.layers.fc( + out = static.nn.fc( input=pool, size=class_dim, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) + param_attr=ParamAttr( + initializer=paddle.nn.initializer.Uniform(-stdv, stdv))) else: for block in range(len(depth)): self.curr_stage += 1 @@ -205,23 +201,22 @@ def get_dilated_rate(dilation_dict, idx): if check_points(layer_count, end_points): return conv, decode_ends - pool = fluid.layers.pool2d( - input=conv, pool_size=7, pool_type='avg', global_pooling=True) + pool = F.adaptive_avg_pool2d(conv, output_size=(1, 1)) stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) - out = fluid.layers.fc( + out = static.nn.fc( input=pool, size=class_dim, - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.Uniform(-stdv, stdv))) + param_attr=ParamAttr( + initializer=paddle.nn.initializer.Uniform(-stdv, stdv))) return out def zero_padding(self, input, padding): - return fluid.layers.pad( - input, [0, 0, 0, 0, padding, padding, padding, padding]) + return F.pad(input, [padding, padding, padding, padding]) def interp(self, input, out_shape): out_shape = list(out_shape.astype("int32")) - return fluid.layers.resize_bilinear(input, out_shape=out_shape) + return F.interpolate( + input, out_shape, mode='bilinear', align_corners=True) def conv_bn_layer(self, input, @@ -239,7 +234,7 @@ def conv_bn_layer(self, else: bias_attr = False - conv = fluid.layers.conv2d( + conv = static.nn.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, @@ -256,7 +251,7 @@ def conv_bn_layer(self, bn_name = "bn_" + name else: bn_name = "bn" + name[3:] - return fluid.layers.batch_norm( + return static.nn.batch_norm( input=conv, act=act, name=bn_name + '.output.1', @@ -276,15 +271,10 @@ def conv_bn_layer_new(self, act=None, name=None): lr_mult = self.lr_mult_list[self.curr_stage] - pool = fluid.layers.pool2d( - input=input, - pool_size=2, - pool_stride=2, - pool_padding=0, - pool_type='avg', - ceil_mode=True) + pool = F.avg_pool2d( + input, kernel_size=2, stride=2, padding=0, ceil_mode=True) - conv = fluid.layers.conv2d( + conv = static.nn.conv2d( input=pool, num_filters=num_filters, filter_size=filter_size, @@ -298,7 +288,7 @@ def conv_bn_layer_new(self, bn_name = "bn_" + name else: bn_name = "bn" + name[3:] - return fluid.layers.batch_norm( + return static.nn.batch_norm( input=conv, act=act, param_attr=ParamAttr( @@ -360,8 +350,7 @@ def bottleneck_block(self, is_first=is_first, name=name + "_branch1") - return fluid.layers.elementwise_add( - x=short, y=conv2, act='relu', name=name + ".add.output.5") + return F.relu(short + conv2) def basic_block(self, input, num_filters, stride, is_first, name): conv0 = self.conv_bn_layer( @@ -379,7 +368,7 @@ def basic_block(self, input, num_filters, stride, is_first, name): name=name + "_branch2b") short = self.shortcut( input, num_filters, stride, is_first, name=name + "_branch1") - return fluid.layers.elementwise_add(x=short, y=conv1, act='relu') + return F.relu(short + conv1) def ResNet18(): diff --git a/legacy/pdseg/models/backbone/vgg.py b/legacy/pdseg/models/backbone/vgg.py deleted file mode 100644 index 443fa43556..0000000000 --- a/legacy/pdseg/models/backbone/vgg.py +++ /dev/null @@ -1,82 +0,0 @@ -# coding: utf8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import paddle -import paddle.fluid as fluid -from paddle.fluid import ParamAttr - -__all__ = ["VGGNet"] - - -def check_points(count, points): - if points is None: - return False - else: - if isinstance(points, list): - return (True if count in points else False) - else: - return (True if count == points else False) - - -class VGGNet(): - def __init__(self, layers=16): - self.layers = layers - - def net(self, input, class_dim=1000, end_points=None, decode_points=None): - short_cuts = dict() - layers_count = 0 - layers = self.layers - vgg_spec = { - 11: ([1, 1, 2, 2, 2]), - 13: ([2, 2, 2, 2, 2]), - 16: ([2, 2, 3, 3, 3]), - 19: ([2, 2, 4, 4, 4]) - } - assert layers in vgg_spec.keys(), \ - "supported layers are {} but input layer is {}".format(vgg_spec.keys(), layers) - - nums = vgg_spec[layers] - channels = [64, 128, 256, 512, 512] - conv = input - for i in range(len(nums)): - conv = self.conv_block( - conv, channels[i], nums[i], name="conv" + str(i + 1) + "_") - layers_count += nums[i] - if check_points(layers_count, decode_points): - short_cuts[layers_count] = conv - if check_points(layers_count, end_points): - return conv, short_cuts - - return conv - - def conv_block(self, input, num_filter, groups, name=None): - conv = input - for i in range(groups): - conv = fluid.layers.conv2d( - input=conv, - num_filters=num_filter, - filter_size=3, - stride=1, - padding=1, - act='relu', - param_attr=fluid.param_attr.ParamAttr( - name=name + str(i + 1) + "_weights"), - bias_attr=False) - return fluid.layers.pool2d( - input=conv, pool_size=2, pool_type='max', pool_stride=2) diff --git a/legacy/pdseg/models/backbone/xception.py b/legacy/pdseg/models/backbone/xception.py deleted file mode 100644 index d45adc21af..0000000000 --- a/legacy/pdseg/models/backbone/xception.py +++ /dev/null @@ -1,317 +0,0 @@ -# coding: utf8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import contextlib -import paddle -import math -import paddle.fluid as fluid -from models.libs.model_libs import scope, name_scope -from models.libs.model_libs import bn, bn_relu, relu -from models.libs.model_libs import conv -from models.libs.model_libs import separate_conv - -__all__ = ['xception_65', 'xception_41', 'xception_71'] - - -def check_data(data, number): - if type(data) == int: - return [data] * number - assert len(data) == number - return data - - -def check_stride(s, os): - if s <= os: - return True - else: - return False - - -def check_points(count, points): - if points is None: - return False - else: - if isinstance(points, list): - return (True if count in points else False) - else: - return (True if count == points else False) - - -class Xception(): - def __init__(self, backbone="xception_65"): - self.bottleneck_params = self.gen_bottleneck_params(backbone) - self.backbone = backbone - - def gen_bottleneck_params(self, backbone='xception_65'): - if backbone == 'xception_65': - bottleneck_params = { - "entry_flow": (3, [2, 2, 2], [128, 256, 728]), - "middle_flow": (16, 1, 728), - "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, - 2048]]) - } - elif backbone == 'xception_41': - bottleneck_params = { - "entry_flow": (3, [2, 2, 2], [128, 256, 728]), - "middle_flow": (8, 1, 728), - "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, - 2048]]) - } - elif backbone == 'xception_71': - bottleneck_params = { - "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]), - "middle_flow": (16, 1, 728), - "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, - 2048]]) - } - else: - raise Exception( - "xception backbont only support xception_41/xception_65/xception_71" - ) - return bottleneck_params - - def net(self, - input, - output_stride=32, - num_classes=1000, - end_points=None, - decode_points=None): - self.stride = 2 - self.block_point = 0 - self.output_stride = output_stride - self.decode_points = decode_points - self.short_cuts = dict() - with scope(self.backbone): - # Entry flow - data = self.entry_flow(input) - if check_points(self.block_point, end_points): - return data, self.short_cuts - - # Middle flow - data = self.middle_flow(data) - if check_points(self.block_point, end_points): - return data, self.short_cuts - - # Exit flow - data = self.exit_flow(data) - if check_points(self.block_point, end_points): - return data, self.short_cuts - - data = fluid.layers.reduce_mean(data, [2, 3], keep_dim=True) - data = fluid.layers.dropout(data, 0.5) - stdv = 1.0 / math.sqrt(data.shape[1] * 1.0) - with scope("logit"): - out = fluid.layers.fc( - input=data, - size=num_classes, - act='softmax', - param_attr=fluid.param_attr.ParamAttr( - name='weights', - initializer=fluid.initializer.Uniform(-stdv, stdv)), - bias_attr=fluid.param_attr.ParamAttr(name='bias')) - - return out - - def entry_flow(self, data): - param_attr = fluid.ParamAttr( - name=name_scope + 'weights', - regularizer=None, - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.09)) - with scope("entry_flow"): - with scope("conv1"): - data = bn_relu( - conv( - data, 32, 3, stride=2, padding=1, - param_attr=param_attr)) - with scope("conv2"): - data = bn_relu( - conv( - data, 64, 3, stride=1, padding=1, - param_attr=param_attr)) - - # get entry flow params - block_num = self.bottleneck_params["entry_flow"][0] - strides = self.bottleneck_params["entry_flow"][1] - chns = self.bottleneck_params["entry_flow"][2] - strides = check_data(strides, block_num) - chns = check_data(chns, block_num) - - # params to control your flow - s = self.stride - block_point = self.block_point - output_stride = self.output_stride - with scope("entry_flow"): - for i in range(block_num): - block_point = block_point + 1 - with scope("block" + str(i + 1)): - stride = strides[i] if check_stride(s * strides[i], - output_stride) else 1 - data, short_cuts = self.xception_block( - data, chns[i], [1, 1, stride]) - s = s * stride - if check_points(block_point, self.decode_points): - self.short_cuts[block_point] = short_cuts[1] - - self.stride = s - self.block_point = block_point - return data - - def middle_flow(self, data): - block_num = self.bottleneck_params["middle_flow"][0] - strides = self.bottleneck_params["middle_flow"][1] - chns = self.bottleneck_params["middle_flow"][2] - strides = check_data(strides, block_num) - chns = check_data(chns, block_num) - - # params to control your flow - s = self.stride - block_point = self.block_point - output_stride = self.output_stride - with scope("middle_flow"): - for i in range(block_num): - block_point = block_point + 1 - with scope("block" + str(i + 1)): - stride = strides[i] if check_stride(s * strides[i], - output_stride) else 1 - data, short_cuts = self.xception_block( - data, chns[i], [1, 1, strides[i]], skip_conv=False) - s = s * stride - if check_points(block_point, self.decode_points): - self.short_cuts[block_point] = short_cuts[1] - - self.stride = s - self.block_point = block_point - return data - - def exit_flow(self, data): - block_num = self.bottleneck_params["exit_flow"][0] - strides = self.bottleneck_params["exit_flow"][1] - chns = self.bottleneck_params["exit_flow"][2] - strides = check_data(strides, block_num) - chns = check_data(chns, block_num) - - assert (block_num == 2) - # params to control your flow - s = self.stride - block_point = self.block_point - output_stride = self.output_stride - with scope("exit_flow"): - with scope('block1'): - block_point += 1 - stride = strides[0] if check_stride(s * strides[0], - output_stride) else 1 - data, short_cuts = self.xception_block(data, chns[0], - [1, 1, stride]) - s = s * stride - if check_points(block_point, self.decode_points): - self.short_cuts[block_point] = short_cuts[1] - with scope('block2'): - block_point += 1 - stride = strides[1] if check_stride(s * strides[1], - output_stride) else 1 - data, short_cuts = self.xception_block( - data, - chns[1], [1, 1, stride], - dilation=2, - has_skip=False, - activation_fn_in_separable_conv=True) - s = s * stride - if check_points(block_point, self.decode_points): - self.short_cuts[block_point] = short_cuts[1] - - self.stride = s - self.block_point = block_point - return data - - def xception_block(self, - input, - channels, - strides=1, - filters=3, - dilation=1, - skip_conv=True, - has_skip=True, - activation_fn_in_separable_conv=False): - repeat_number = 3 - channels = check_data(channels, repeat_number) - filters = check_data(filters, repeat_number) - strides = check_data(strides, repeat_number) - data = input - results = [] - for i in range(repeat_number): - with scope('separable_conv' + str(i + 1)): - if not activation_fn_in_separable_conv: - data = relu(data) - data = separate_conv( - data, - channels[i], - strides[i], - filters[i], - dilation=dilation) - else: - data = separate_conv( - data, - channels[i], - strides[i], - filters[i], - dilation=dilation, - act=relu) - results.append(data) - if not has_skip: - return data, results - if skip_conv: - param_attr = fluid.ParamAttr( - name=name_scope + 'weights', - regularizer=None, - initializer=fluid.initializer.TruncatedNormal( - loc=0.0, scale=0.09)) - with scope('shortcut'): - skip = bn( - conv( - input, - channels[-1], - 1, - strides[-1], - groups=1, - padding=0, - param_attr=param_attr)) - else: - skip = input - return data + skip, results - - -def xception_65(): - model = Xception("xception_65") - return model - - -def xception_41(): - model = Xception("xception_41") - return model - - -def xception_71(): - model = Xception("xception_71") - return model - - -if __name__ == '__main__': - image_shape = [-1, 3, 224, 224] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - model = xception_65() - logit = model.net(image) diff --git a/legacy/pdseg/models/libs/model_libs.py b/legacy/pdseg/models/libs/model_libs.py index ade921d75b..f41f593b85 100644 --- a/legacy/pdseg/models/libs/model_libs.py +++ b/legacy/pdseg/models/libs/model_libs.py @@ -17,11 +17,12 @@ from __future__ import division from __future__ import print_function import paddle -import paddle.fluid as fluid +import paddle.static as static +import paddle.nn.functional as F from utils.config import cfg import contextlib -bn_regularizer = fluid.regularizer.L2DecayRegularizer(regularization_coeff=0.0) +bn_regularizer = paddle.regularizer.L2Decay(coeff=0.0) name_scope = "" @@ -35,22 +36,13 @@ def scope(name): def max_pool(input, kernel, stride, padding): - data = fluid.layers.pool2d( - input, - pool_size=kernel, - pool_type='max', - pool_stride=stride, - pool_padding=padding) + data = F.max_pool2d( + input, kernel_size=kernel, stride=stride, padding=padding) return data def avg_pool(input, kernel, stride, padding=0): - data = fluid.layers.pool2d( - input, - pool_size=kernel, - pool_type='avg', - pool_stride=stride, - pool_padding=padding) + data = F.avg_pool(input, kernel_size=kernel, stride=stride, padding=padding) return data @@ -68,7 +60,7 @@ def group_norm(input, G, eps=1e-5, param_attr=None, bias_attr=None): # print "use group size:", G break assert C % G == 0 - x = fluid.layers.group_norm( + x = static.nn.group_norm( input, groups=G, param_attr=param_attr, @@ -80,13 +72,13 @@ def group_norm(input, G, eps=1e-5, param_attr=None, bias_attr=None): def bn(*args, **kargs): if cfg.MODEL.DEFAULT_NORM_TYPE == 'bn': with scope('BatchNorm'): - return fluid.layers.batch_norm( + return static.nn.batch_norm( *args, epsilon=cfg.MODEL.DEFAULT_EPSILON, momentum=cfg.MODEL.BN_MOMENTUM, - param_attr=fluid.ParamAttr( + param_attr=paddle.ParamAttr( name=name_scope + 'gamma', regularizer=bn_regularizer), - bias_attr=fluid.ParamAttr( + bias_attr=paddle.ParamAttr( name=name_scope + 'beta', regularizer=bn_regularizer), moving_mean_name=name_scope + 'moving_mean', moving_variance_name=name_scope + 'moving_variance', @@ -97,36 +89,36 @@ def bn(*args, **kargs): args[0], cfg.MODEL.DEFAULT_GROUP_NUMBER, eps=cfg.MODEL.DEFAULT_EPSILON, - param_attr=fluid.ParamAttr( + param_attr=paddle.ParamAttr( name=name_scope + 'gamma', regularizer=bn_regularizer), - bias_attr=fluid.ParamAttr( + bias_attr=paddle.ParamAttr( name=name_scope + 'beta', regularizer=bn_regularizer)) else: raise Exception("Unsupport norm type:" + cfg.MODEL.DEFAULT_NORM_TYPE) def bn_relu(data): - return fluid.layers.relu(bn(data)) + return F.relu(bn(data)) def qsigmoid(data): - return fluid.layers.relu6(data + 3) * 0.16667 + return F.relu6(data + 3) * 0.16667 def relu(data): - return fluid.layers.relu(data) + return F.relu(data) def conv(*args, **kargs): kargs['param_attr'] = name_scope + 'weights' if 'bias_attr' in kargs and kargs['bias_attr']: - kargs['bias_attr'] = fluid.ParamAttr( + kargs['bias_attr'] = paddle.ParamAttr( name=name_scope + 'biases', regularizer=None, - initializer=fluid.initializer.ConstantInitializer(value=0.0)) + initializer=paddle.nn.initializer.Constant(value=0.0)) else: kargs['bias_attr'] = False - return fluid.layers.conv2d(*args, **kargs) + return static.nn.conv2d(*args, **kargs) def deconv(*args, **kargs): @@ -135,15 +127,14 @@ def deconv(*args, **kargs): kargs['bias_attr'] = name_scope + 'biases' else: kargs['bias_attr'] = False - return fluid.layers.conv2d_transpose(*args, **kargs) + return static.nn.conv2d_transpose(*args, **kargs) def separate_conv(input, channel, stride, filter, dilation=1, act=None): - param_attr = fluid.ParamAttr( + param_attr = paddle.ParamAttr( name=name_scope + 'weights', - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0), - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.33)) + regularizer=paddle.regularizer.L2Decay(coeff=0.0), + initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=0.06)) with scope('depthwise'): input = conv( input, @@ -158,10 +149,10 @@ def separate_conv(input, channel, stride, filter, dilation=1, act=None): input = bn(input) if act: input = act(input) - param_attr = fluid.ParamAttr( + param_attr = paddle.ParamAttr( name=name_scope + 'weights', regularizer=None, - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.06)) + initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=0.33)) with scope('pointwise'): input = conv( input, channel, 1, 1, groups=1, padding=0, param_attr=param_attr) @@ -180,7 +171,7 @@ def conv_bn_layer(input, if_act=True, name=None, use_cudnn=True): - conv = fluid.layers.conv2d( + conv = static.nn.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, @@ -189,16 +180,16 @@ def conv_bn_layer(input, groups=num_groups, act=None, use_cudnn=use_cudnn, - param_attr=fluid.ParamAttr(name=name + '_weights'), + param_attr=paddle.ParamAttr(name=name + '_weights'), bias_attr=False) bn_name = name + '_bn' - bn = fluid.layers.batch_norm( + bn = static.nn.batch_norm( input=conv, - param_attr=fluid.ParamAttr(name=bn_name + "_scale"), - bias_attr=fluid.ParamAttr(name=bn_name + "_offset"), + param_attr=paddle.ParamAttr(name=bn_name + "_scale"), + bias_attr=paddle.ParamAttr(name=bn_name + "_offset"), moving_mean_name=bn_name + '_mean', moving_variance_name=bn_name + '_variance') if if_act: - return fluid.layers.relu6(bn) + return F.relu6(bn) else: return bn diff --git a/legacy/pdseg/models/model_builder.py b/legacy/pdseg/models/model_builder.py index 49650295af..80566218d1 100644 --- a/legacy/pdseg/models/model_builder.py +++ b/legacy/pdseg/models/model_builder.py @@ -15,17 +15,16 @@ import struct -import paddle.fluid as fluid +import paddle +import paddle.static as static +import paddle.nn.functional as F import numpy as np from paddle.fluid.proto.framework_pb2 import VarType import solver from utils.config import cfg from loss import multi_softmax_with_loss -from loss import multi_dice_loss -from loss import multi_bce_loss -from lovasz_losses import multi_lovasz_hinge_loss, multi_lovasz_softmax_loss -from models.modeling import deeplab, unet, icnet, pspnet, hrnet, fast_scnn, ocrnet +from models.modeling import deeplab, hrnet class ModelPhase(object): @@ -72,20 +71,10 @@ def is_valid_phase(phase): def seg_model(image, class_num): model_name = cfg.MODEL.MODEL_NAME - if model_name == 'unet': - logits = unet.unet(image, class_num) - elif model_name == 'deeplabv3p': + if model_name == 'deeplabv3p': logits = deeplab.deeplabv3p(image, class_num) - elif model_name == 'icnet': - logits = icnet.icnet(image, class_num) - elif model_name == 'pspnet': - logits = pspnet.pspnet(image, class_num) elif model_name == 'hrnet': logits = hrnet.hrnet(image, class_num) - elif model_name == 'fast_scnn': - logits = fast_scnn.fast_scnn(image, class_num) - elif model_name == 'ocrnet': - logits = ocrnet.ocrnet(image, class_num) else: raise Exception( "unknow model name, only support unet, deeplabv3p, icnet, pspnet, hrnet, fast_scnn" @@ -94,9 +83,9 @@ def seg_model(image, class_num): def softmax(logit): - logit = fluid.layers.transpose(logit, [0, 2, 3, 1]) - logit = fluid.layers.softmax(logit) - logit = fluid.layers.transpose(logit, [0, 3, 1, 2]) + logit = paddle.transpose(logit, [0, 2, 3, 1]) + logit = F.softmax(logit) + logit = paddle.transpose(logit, [0, 3, 1, 2]) return logit @@ -104,11 +93,11 @@ def sigmoid_to_softmax(logit): """ one channel to two channel """ - logit = fluid.layers.transpose(logit, [0, 2, 3, 1]) - logit = fluid.layers.sigmoid(logit) + logit = paddle.transpose(logit, [0, 2, 3, 1]) + logit = F.sigmoid(logit) logit_back = 1 - logit - logit = fluid.layers.concat([logit_back, logit], axis=-1) - logit = fluid.layers.transpose(logit, [0, 3, 1, 2]) + logit = paddle.concat([logit_back, logit], axis=-1) + logit = paddle.transpose(logit, [0, 3, 1, 2]) return logit @@ -126,17 +115,18 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): grt_shape = [-1, 1, height, width] class_num = cfg.DATASET.NUM_CLASSES - with fluid.program_guard(main_prog, start_prog): - with fluid.unique_name.guard(): + with static.program_guard(main_prog, start_prog): + with paddle.utils.unique_name.guard(): # 在导出模型的时候,增加图像标准化预处理,减小预测部署时图像的处理流程 # 预测部署时只须对输入图像增加batch_size维度即可 - image = fluid.data(name='image', shape=image_shape, dtype='float32') - label = fluid.data(name='label', shape=grt_shape, dtype='int32') - mask = fluid.data(name='mask', shape=grt_shape, dtype='int32') + image = static.data( + name='image', shape=image_shape, dtype='float32') + label = static.data(name='label', shape=grt_shape, dtype='int32') + mask = static.data(name='mask', shape=grt_shape, dtype='int32') # use DataLoader when doing traning and evaluation if ModelPhase.is_train(phase) or ModelPhase.is_eval(phase): - data_loader = fluid.io.DataLoader.from_generator( + data_loader = paddle.io.DataLoader.from_generator( feed_list=[image, label, mask], capacity=cfg.DATALOADER.BUF_SIZE, iterable=False, @@ -178,24 +168,6 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): weight)) loss_valid = True valid_loss.append("softmax_loss") - if "dice_loss" in loss_type: - avg_loss_list.append(multi_dice_loss(logits, label, mask)) - loss_valid = True - valid_loss.append("dice_loss") - if "bce_loss" in loss_type: - avg_loss_list.append(multi_bce_loss(logits, label, mask)) - loss_valid = True - valid_loss.append("bce_loss") - if "lovasz_hinge_loss" in loss_type: - avg_loss_list.append( - multi_lovasz_hinge_loss(logits, label, mask)) - loss_valid = True - valid_loss.append("lovasz_hinge_loss") - if "lovasz_softmax_loss" in loss_type: - avg_loss_list.append( - multi_lovasz_softmax_loss(logits, label, mask)) - loss_valid = True - valid_loss.append("lovasz_softmax_loss") if not loss_valid: raise Exception( "SOLVER.LOSS: {} is set wrong. it should " @@ -222,7 +194,8 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): logit = logits if logit.shape[2:] != label.shape[2:]: - logit = fluid.layers.resize_bilinear(logit, label.shape[2:]) + logit = F.interpolate( + logit, label.shape[2:], mode='bilinear', align_corners=True) # return image input and logit output for inference graph prune if ModelPhase.is_predict(phase): @@ -236,12 +209,12 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): if class_num == 1: out = sigmoid_to_softmax(logit) - out = fluid.layers.transpose(out, [0, 2, 3, 1]) + out = paddle.transpose(out, [0, 2, 3, 1]) else: - out = fluid.layers.transpose(logit, [0, 2, 3, 1]) + out = paddle.transpose(logit, [0, 2, 3, 1]) - pred = fluid.layers.argmax(out, axis=3) - pred = fluid.layers.unsqueeze(pred, axes=[3]) + pred = paddle.argmax(out, axis=3) + pred = paddle.unsqueeze(pred, axis=[3]) if ModelPhase.is_visual(phase): if class_num == 1: logit = sigmoid_to_softmax(logit) diff --git a/legacy/pdseg/models/modeling/deeplab.py b/legacy/pdseg/models/modeling/deeplab.py index 4809036a2c..c3eb46151a 100644 --- a/legacy/pdseg/models/modeling/deeplab.py +++ b/legacy/pdseg/models/modeling/deeplab.py @@ -18,15 +18,14 @@ from __future__ import print_function import contextlib import paddle -import paddle.fluid as fluid +import paddle.static as static +import paddle.static.nn as nn +import paddle.nn.functional as F from utils.config import cfg from models.libs.model_libs import scope, name_scope from models.libs.model_libs import bn, bn_relu, relu, qsigmoid from models.libs.model_libs import conv from models.libs.model_libs import separate_conv -from models.backbone.mobilenet_v2 import MobileNetV2 as mobilenet_v2_backbone -from models.backbone.mobilenet_v3 import MobileNetV3 as mobilenet_v3_backbone -from models.backbone.xception import Xception as xception_backbone from models.backbone.resnet_vd import ResNet as resnet_vd_backbone @@ -46,29 +45,27 @@ def encoder(input): else: aspp_ratios = cfg.MODEL.DEEPLAB.ENCODER.ASPP_RATIOS - param_attr = fluid.ParamAttr( + param_attr = paddle.ParamAttr( name=name_scope + 'weights', regularizer=None, - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.06)) + initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=0.06)) concat_logits = [] with scope('encoder'): channel = cfg.MODEL.DEEPLAB.ENCODER.ASPP_CONVS_FILTERS with scope("image_pool"): if not cfg.MODEL.DEEPLAB.ENCODER.POOLING_CROP_SIZE: - image_avg = fluid.layers.reduce_mean( - input, [2, 3], keep_dim=True) + image_avg = paddle.mean(input, [2, 3], keepdim=True) else: pool_w = int((cfg.MODEL.DEEPLAB.ENCODER.POOLING_CROP_SIZE[0] - 1.0) / cfg.MODEL.DEEPLAB.OUTPUT_STRIDE + 1.0) pool_h = int((cfg.MODEL.DEEPLAB.ENCODER.POOLING_CROP_SIZE[1] - 1.0) / cfg.MODEL.DEEPLAB.OUTPUT_STRIDE + 1.0) - image_avg = fluid.layers.pool2d( + iamge_avg = F.avg_pool2d( input, - pool_size=(pool_h, pool_w), - pool_stride=cfg.MODEL.DEEPLAB.ENCODER.POOLING_STRIDE, - pool_type='avg', - pool_padding='VALID') + kernel_size=(pool_h, pool_w), + stride=cfg.MODEL.DEEPLAB.ENCODER.POOLING_STRIDE, + padding='VALID') act = qsigmoid if cfg.MODEL.DEEPLAB.ENCODER.SE_USE_QSIGMOID else bn_relu image_avg = act( @@ -80,7 +77,8 @@ def encoder(input): groups=1, padding=0, param_attr=param_attr)) - image_avg = fluid.layers.resize_bilinear(image_avg, input.shape[2:]) + image_avg = F.interpolate( + image_avg, input.shape[2:], mode='bilinear', align_corners=True) if cfg.MODEL.DEEPLAB.ENCODER.ADD_IMAGE_LEVEL_FEATURE: concat_logits.append(image_avg) @@ -144,7 +142,7 @@ def encoder(input): concat_logits.append(aspp3) with scope("concat"): - data = fluid.layers.concat(concat_logits, axis=1) + data = paddle.concat(concat_logits, axis=1) if cfg.MODEL.DEEPLAB.ENCODER.ASPP_WITH_CONCAT_PROJECTION: data = bn_relu( conv( @@ -155,7 +153,7 @@ def encoder(input): groups=1, padding=0, param_attr=param_attr)) - data = fluid.layers.dropout(data, 0.9) + data = F.dropout(data, 0.1, mode='downscale_in_infer') if cfg.MODEL.DEEPLAB.ENCODER.ASPP_WITH_SE: data = data * image_avg @@ -163,8 +161,11 @@ def encoder(input): def _decoder_with_sum_merge(encode_data, decode_shortcut, param_attr): - encode_data = fluid.layers.resize_bilinear(encode_data, - decode_shortcut.shape[2:]) + encode_data = F.interpolate( + encode_data, + decode_shortcut.shape[2:], + mode='bilinear', + align_corners=True) encode_data = conv( encode_data, cfg.MODEL.DEEPLAB.DECODER.CONV_FILTERS, @@ -199,10 +200,12 @@ def _decoder_with_concat(encode_data, decode_shortcut, param_attr): padding=0, param_attr=param_attr)) - encode_data = fluid.layers.resize_bilinear(encode_data, - decode_shortcut.shape[2:]) - encode_data = fluid.layers.concat([encode_data, decode_shortcut], - axis=1) + encode_data = F.interpolate( + encode_data, + decode_shortcut.shape[2:], + mode='bilinear', + align_corners=True) + encode_data = paddle.concat([encode_data, decode_shortcut], axis=1) if cfg.MODEL.DEEPLAB.DECODER_USE_SEP_CONV: with scope("separable_conv1"): encode_data = separate_conv( @@ -249,10 +252,10 @@ def decoder(encode_data, decode_shortcut): # encode_data:编码器输出 # decode_shortcut: 从backbone引出的分支, resize后与encode_data concat # DECODER_USE_SEP_CONV: 默认为真,则concat后连接两个可分离卷积,否则为普通卷积 - param_attr = fluid.ParamAttr( + param_attr = paddle.ParamAttr( name=name_scope + 'weights', regularizer=None, - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.06)) + initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=0.06)) with scope('decoder'): if cfg.MODEL.DEEPLAB.DECODER.USE_SUM_MERGE: return _decoder_with_sum_merge(encode_data, decode_shortcut, @@ -261,78 +264,6 @@ def decoder(encode_data, decode_shortcut): return _decoder_with_concat(encode_data, decode_shortcut, param_attr) -def mobilenet(input): - if 'v3' in cfg.MODEL.DEEPLAB.BACKBONE: - model_name = 'large' if 'large' in cfg.MODEL.DEEPLAB.BACKBONE else 'small' - return _mobilenetv3(input, model_name) - return _mobilenetv2(input) - - -def _mobilenetv3(input, model_name='large'): - # Backbone: mobilenetv3结构配置 - # DEPTH_MULTIPLIER: mobilenetv3的scale设置,默认1.0 - # OUTPUT_STRIDE:下采样倍数 - scale = cfg.MODEL.DEEPLAB.DEPTH_MULTIPLIER - output_stride = cfg.MODEL.DEEPLAB.OUTPUT_STRIDE - lr_mult_list = cfg.MODEL.DEEPLAB.BACKBONE_LR_MULT_LIST - if lr_mult_list is None: - lr_mult_list = [1.0, 1.0, 1.0, 1.0, 1.0] - model = mobilenet_v3_backbone( - scale=scale, - output_stride=output_stride, - model_name=model_name, - lr_mult_list=lr_mult_list) - data, decode_shortcut = model.net(input) - return data, decode_shortcut - - -def _mobilenetv2(input): - # Backbone: mobilenetv2结构配置 - # DEPTH_MULTIPLIER: mobilenetv2的scale设置,默认1.0 - # OUTPUT_STRIDE:下采样倍数 - # end_points: mobilenetv2的block数 - # decode_point: 从mobilenetv2中引出分支所在block数, 作为decoder输入 - if cfg.MODEL.DEEPLAB.BACKBONE_LR_MULT_LIST is not None: - print( - 'mobilenetv2 backbone do not support BACKBONE_LR_MULT_LIST setting') - - scale = cfg.MODEL.DEEPLAB.DEPTH_MULTIPLIER - output_stride = cfg.MODEL.DEEPLAB.OUTPUT_STRIDE - model = mobilenet_v2_backbone(scale=scale, output_stride=output_stride) - end_points = 18 - decode_point = 4 - data, decode_shortcuts = model.net( - input, end_points=end_points, decode_points=decode_point) - decode_shortcut = decode_shortcuts[decode_point] - return data, decode_shortcut - - -def xception(input): - # Backbone: Xception结构配置, xception_65, xception_41, xception_71三种可选 - # decode_point: 从Xception中引出分支所在block数,作为decoder输入 - # end_point:Xception的block数 - cfg.MODEL.DEFAULT_EPSILON = 1e-3 - model = xception_backbone(cfg.MODEL.DEEPLAB.BACKBONE) - backbone = cfg.MODEL.DEEPLAB.BACKBONE - output_stride = cfg.MODEL.DEEPLAB.OUTPUT_STRIDE - if '65' in backbone: - decode_point = 2 - end_points = 21 - if '41' in backbone: - decode_point = 2 - end_points = 13 - if '71' in backbone: - decode_point = 3 - end_points = 23 - data, decode_shortcuts = model.net( - input, - output_stride=output_stride, - end_points=end_points, - decode_points=decode_point) - decode_shortcut = decode_shortcuts[decode_point] - return data, decode_shortcut - - def resnet_vd(input): # backbone: resnet_vd, 可选resnet50_vd, resnet101_vd # end_points: resnet终止层数 @@ -370,19 +301,10 @@ def resnet_vd(input): def deeplabv3p(img, num_classes): # Backbone设置:xception 或 mobilenetv2 - if 'xception' in cfg.MODEL.DEEPLAB.BACKBONE: - data, decode_shortcut = xception(img) - if cfg.MODEL.DEEPLAB.BACKBONE_LR_MULT_LIST is not None: - print( - 'xception backbone do not support BACKBONE_LR_MULT_LIST setting' - ) - elif 'mobilenet' in cfg.MODEL.DEEPLAB.BACKBONE: - data, decode_shortcut = mobilenet(img) - elif 'resnet' in cfg.MODEL.DEEPLAB.BACKBONE: + if 'resnet' in cfg.MODEL.DEEPLAB.BACKBONE: data, decode_shortcut = resnet_vd(img) else: - raise Exception( - "deeplab only support xception, mobilenet, and resnet_vd backbone") + raise Exception("deeplab only support resnet_vd backbone") # 编码器解码器设置 cfg.MODEL.DEFAULT_EPSILON = 1e-5 @@ -392,15 +314,14 @@ def deeplabv3p(img, num_classes): data = decoder(data, decode_shortcut) # 根据类别数设置最后一个卷积层输出,并resize到图片原始尺寸 - param_attr = fluid.ParamAttr( + param_attr = paddle.ParamAttr( name=name_scope + 'weights', - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0), - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.01)) + regularizer=paddle.regularizer.L2Decay(coeff=0.0), + initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=0.01)) if not cfg.MODEL.DEEPLAB.DECODER.OUTPUT_IS_LOGITS: with scope('logit'): - with fluid.name_scope('last_conv'): + with static.name_scope('last_conv'): logit = conv( data, num_classes, @@ -412,5 +333,6 @@ def deeplabv3p(img, num_classes): else: logit = data - logit = fluid.layers.resize_bilinear(logit, img.shape[2:]) + logit = F.interpolate( + logit, img.shape[2:], mode='bilinear', align_corners=True) return logit diff --git a/legacy/pdseg/models/modeling/fast_scnn.py b/legacy/pdseg/models/modeling/fast_scnn.py deleted file mode 100644 index 99019dc6e0..0000000000 --- a/legacy/pdseg/models/modeling/fast_scnn.py +++ /dev/null @@ -1,304 +0,0 @@ -# coding: utf8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import paddle.fluid as fluid -from models.libs.model_libs import scope -from models.libs.model_libs import bn, bn_relu, relu, conv_bn_layer -from models.libs.model_libs import conv, avg_pool -from models.libs.model_libs import separate_conv -from utils.config import cfg - - -def learning_to_downsample(x, dw_channels1=32, dw_channels2=48, - out_channels=64): - x = relu(bn(conv(x, dw_channels1, 3, 2))) - with scope('dsconv1'): - x = separate_conv( - x, dw_channels2, stride=2, filter=3, act=fluid.layers.relu) - with scope('dsconv2'): - x = separate_conv( - x, out_channels, stride=2, filter=3, act=fluid.layers.relu) - return x - - -def shortcut(input, data_residual): - return fluid.layers.elementwise_add(input, data_residual) - - -def dropout2d(input, prob, is_train=False): - if not is_train: - return input - channels = input.shape[1] - keep_prob = 1.0 - prob - shape = fluid.layers.shape(input) - random_tensor = keep_prob + fluid.layers.uniform_random( - [shape[0], channels, 1, 1], min=0., max=1.) - binary_tensor = fluid.layers.floor(random_tensor) - output = input / keep_prob * binary_tensor - return output - - -def inverted_residual_unit(input, - num_in_filter, - num_filters, - ifshortcut, - stride, - filter_size, - padding, - expansion_factor, - name=None): - num_expfilter = int(round(num_in_filter * expansion_factor)) - - channel_expand = conv_bn_layer( - input=input, - num_filters=num_expfilter, - filter_size=1, - stride=1, - padding=0, - num_groups=1, - if_act=True, - name=name + '_expand') - - bottleneck_conv = conv_bn_layer( - input=channel_expand, - num_filters=num_expfilter, - filter_size=filter_size, - stride=stride, - padding=padding, - num_groups=num_expfilter, - if_act=True, - name=name + '_dwise', - use_cudnn=False) - - depthwise_output = bottleneck_conv - - linear_out = conv_bn_layer( - input=bottleneck_conv, - num_filters=num_filters, - filter_size=1, - stride=1, - padding=0, - num_groups=1, - if_act=False, - name=name + '_linear') - - if ifshortcut: - out = shortcut(input=input, data_residual=linear_out) - return out, depthwise_output - else: - return linear_out, depthwise_output - - -def inverted_blocks(input, in_c, t, c, n, s, name=None): - first_block, depthwise_output = inverted_residual_unit( - input=input, - num_in_filter=in_c, - num_filters=c, - ifshortcut=False, - stride=s, - filter_size=3, - padding=1, - expansion_factor=t, - name=name + '_1') - - last_residual_block = first_block - last_c = c - - for i in range(1, n): - last_residual_block, depthwise_output = inverted_residual_unit( - input=last_residual_block, - num_in_filter=last_c, - num_filters=c, - ifshortcut=True, - stride=1, - filter_size=3, - padding=1, - expansion_factor=t, - name=name + '_' + str(i + 1)) - return last_residual_block, depthwise_output - - -def psp_module(input, out_features): - - cat_layers = [] - sizes = (1, 2, 3, 6) - for size in sizes: - psp_name = "psp" + str(size) - with scope(psp_name): - pool = fluid.layers.adaptive_pool2d( - input, - pool_size=[size, size], - pool_type='avg', - name=psp_name + '_adapool') - data = conv( - pool, - out_features, - filter_size=1, - bias_attr=False, - name=psp_name + '_conv') - data_bn = bn(data, act='relu') - interp = fluid.layers.resize_bilinear( - data_bn, - out_shape=input.shape[2:], - name=psp_name + '_interp', - align_mode=0) - cat_layers.append(interp) - cat_layers = [input] + cat_layers - out = fluid.layers.concat(cat_layers, axis=1, name='psp_cat') - - return out - - -class FeatureFusionModule: - """Feature fusion module""" - - def __init__(self, - higher_in_channels, - lower_in_channels, - out_channels, - scale_factor=4): - self.higher_in_channels = higher_in_channels - self.lower_in_channels = lower_in_channels - self.out_channels = out_channels - self.scale_factor = scale_factor - - def net(self, higher_res_feature, lower_res_feature): - h, w = higher_res_feature.shape[2:] - lower_res_feature = fluid.layers.resize_bilinear( - lower_res_feature, [h, w], align_mode=0) - - with scope('dwconv'): - lower_res_feature = relu( - bn(conv(lower_res_feature, self.out_channels, - 1))) #(lower_res_feature) - with scope('conv_lower_res'): - lower_res_feature = bn( - conv(lower_res_feature, self.out_channels, 1, bias_attr=True)) - with scope('conv_higher_res'): - higher_res_feature = bn( - conv(higher_res_feature, self.out_channels, 1, bias_attr=True)) - out = higher_res_feature + lower_res_feature - - return relu(out) - - -class GlobalFeatureExtractor(): - """Global feature extractor module""" - - def __init__(self, - in_channels=64, - block_channels=(64, 96, 128), - out_channels=128, - t=6, - num_blocks=(3, 3, 3)): - self.in_channels = in_channels - self.block_channels = block_channels - self.out_channels = out_channels - self.t = t - self.num_blocks = num_blocks - - def net(self, x): - x, _ = inverted_blocks(x, self.in_channels, self.t, - self.block_channels[0], self.num_blocks[0], 2, - 'inverted_block_1') - x, _ = inverted_blocks(x, self.block_channels[0], self.t, - self.block_channels[1], self.num_blocks[1], 2, - 'inverted_block_2') - x, _ = inverted_blocks(x, self.block_channels[1], self.t, - self.block_channels[2], self.num_blocks[2], 1, - 'inverted_block_3') - x = psp_module(x, self.block_channels[2] // 4) - with scope('out'): - x = relu(bn(conv(x, self.out_channels, 1))) - return x - - -class Classifier: - """Classifier""" - - def __init__(self, dw_channels, num_classes, stride=1): - self.dw_channels = dw_channels - self.num_classes = num_classes - self.stride = stride - - def net(self, x): - with scope('dsconv1'): - x = separate_conv( - x, - self.dw_channels, - stride=self.stride, - filter=3, - act=fluid.layers.relu) - with scope('dsconv2'): - x = separate_conv( - x, - self.dw_channels, - stride=self.stride, - filter=3, - act=fluid.layers.relu) - - x = dropout2d(x, 0.1, is_train=cfg.PHASE == 'train') - x = conv(x, self.num_classes, 1, bias_attr=True) - return x - - -def aux_layer(x, num_classes): - x = relu(bn(conv(x, 32, 3, padding=1))) - x = dropout2d(x, 0.1, is_train=(cfg.PHASE == 'train')) - with scope('logit'): - x = conv(x, num_classes, 1, bias_attr=True) - return x - - -def fast_scnn(img, num_classes): - size = img.shape[2:] - classifier = Classifier(128, num_classes) - - global_feature_extractor = GlobalFeatureExtractor(64, [64, 96, 128], 128, 6, - [3, 3, 3]) - feature_fusion = FeatureFusionModule(64, 128, 128) - - with scope('learning_to_downsample'): - higher_res_features = learning_to_downsample(img, 32, 48, 64) - with scope('global_feature_extractor'): - lower_res_feature = global_feature_extractor.net(higher_res_features) - with scope('feature_fusion'): - x = feature_fusion.net(higher_res_features, lower_res_feature) - with scope('classifier'): - logit = classifier.net(x) - logit = fluid.layers.resize_bilinear(logit, size, align_mode=0) - - if len(cfg.MODEL.MULTI_LOSS_WEIGHT) == 3: - with scope('aux_layer_higher'): - higher_logit = aux_layer(higher_res_features, num_classes) - higher_logit = fluid.layers.resize_bilinear( - higher_logit, size, align_mode=0) - with scope('aux_layer_lower'): - lower_logit = aux_layer(lower_res_feature, num_classes) - lower_logit = fluid.layers.resize_bilinear( - lower_logit, size, align_mode=0) - return logit, higher_logit, lower_logit - elif len(cfg.MODEL.MULTI_LOSS_WEIGHT) == 2: - with scope('aux_layer_higher'): - higher_logit = aux_layer(higher_res_features, num_classes) - higher_logit = fluid.layers.resize_bilinear( - higher_logit, size, align_mode=0) - return logit, higher_logit - - return logit diff --git a/legacy/pdseg/models/modeling/hrnet.py b/legacy/pdseg/models/modeling/hrnet.py index 19d687ab02..a79f88fc15 100644 --- a/legacy/pdseg/models/modeling/hrnet.py +++ b/legacy/pdseg/models/modeling/hrnet.py @@ -18,9 +18,8 @@ from __future__ import print_function import paddle -import paddle.fluid as fluid -from paddle.fluid.initializer import MSRA -from paddle.fluid.param_attr import ParamAttr +import paddle.static.nn as nn +import paddle.nn.functional as F from utils.config import cfg @@ -33,7 +32,7 @@ def conv_bn_layer(input, num_groups=1, if_act=True, name=None): - conv = fluid.layers.conv2d( + conv = nn.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, @@ -41,21 +40,23 @@ def conv_bn_layer(input, padding=(filter_size - 1) // 2, groups=num_groups, act=None, - param_attr=ParamAttr(initializer=MSRA(), name=name + '_weights'), + param_attr=paddle.ParamAttr( + initializer=paddle.nn.initializer.KaimingUniform(), + name=name + '_weights'), bias_attr=False) bn_name = name + '_bn' - bn = fluid.layers.batch_norm( + bn = nn.batch_norm( input=conv, - param_attr=ParamAttr( + param_attr=paddle.ParamAttr( name=bn_name + "_scale", - initializer=fluid.initializer.Constant(1.0)), - bias_attr=ParamAttr( + initializer=paddle.nn.initializer.Constant(1.0)), + bias_attr=paddle.ParamAttr( name=bn_name + "_offset", - initializer=fluid.initializer.Constant(0.0)), + initializer=paddle.nn.initializer.Constant(0.0)), moving_mean_name=bn_name + '_mean', moving_variance_name=bn_name + '_variance') if if_act: - bn = fluid.layers.relu(bn) + bn = F.relu(bn) return bn @@ -80,7 +81,7 @@ def basic_block(input, num_filters, stride=1, downsample=False, name=None): num_filters=num_filters, if_act=False, name=name + '_downsample') - return fluid.layers.elementwise_add(x=residual, y=conv, act='relu') + return F.relu(residual + conv) def bottleneck_block(input, num_filters, stride=1, downsample=False, name=None): @@ -109,7 +110,7 @@ def bottleneck_block(input, num_filters, stride=1, downsample=False, name=None): num_filters=num_filters * 4, if_act=False, name=name + '_downsample') - return fluid.layers.elementwise_add(x=residual, y=conv, act='relu') + return F.relu(residual + conv) def fuse_layers(x, channels, multi_scale_output=True, name=None): @@ -127,10 +128,12 @@ def fuse_layers(x, channels, multi_scale_output=True, name=None): num_filters=channels[i], if_act=False, name=name + '_layer_' + str(i + 1) + '_' + str(j + 1)) - y = fluid.layers.resize_bilinear( - input=y, out_shape=[height, width]) - residual = fluid.layers.elementwise_add( - x=residual, y=y, act=None) + y = F.interpolate( + y, + size=[height, width], + mode='bilinear', + align_corners=True) + residual = residual + y elif j < i: y = x[j] for k in range(i - j): @@ -151,10 +154,9 @@ def fuse_layers(x, channels, multi_scale_output=True, name=None): stride=2, name=name + '_layer_' + str(i + 1) + '_' + str(j + 1) + '_' + str(k + 1)) - residual = fluid.layers.elementwise_add( - x=residual, y=y, act=None) + residual = residual + y - residual = fluid.layers.relu(residual) + residual = F.relu(residual) out.append(residual) return out @@ -268,11 +270,14 @@ def high_resolution_net(input, num_classes): # upsample shape = st4[0].shape height, width = shape[-2], shape[-1] - st4[1] = fluid.layers.resize_bilinear(st4[1], out_shape=[height, width]) - st4[2] = fluid.layers.resize_bilinear(st4[2], out_shape=[height, width]) - st4[3] = fluid.layers.resize_bilinear(st4[3], out_shape=[height, width]) - - out = fluid.layers.concat(st4, axis=1) + st4[1] = F.interpolate( + st4[1], size=[height, width], mode='bilinear', align_corners=True) + st4[2] = F.interpolate( + st4[2], size=[height, width], mode='bilinear', align_corners=True) + st4[3] = F.interpolate( + st4[3], size=[height, width], mode='bilinear', align_corners=True) + + out = paddle.concat(st4, axis=1) last_channels = sum(channels_4) out = conv_bn_layer( @@ -282,17 +287,20 @@ def high_resolution_net(input, num_classes): stride=1, if_act=True, name='conv-2') - out = fluid.layers.conv2d( + out = nn.conv2d( input=out, num_filters=num_classes, filter_size=1, stride=1, padding=0, act=None, - param_attr=ParamAttr(initializer=MSRA(), name='conv-1_weights'), + param_attr=paddle.ParamAttr( + initializer=paddle.nn.initializer.KaimingUniform(), + name='conv-1_weights'), bias_attr=False) - out = fluid.layers.resize_bilinear(out, input.shape[2:]) + out = F.interpolate( + out, size=input.shape[2:], mode='bilinear', align_corners=True) return out @@ -304,6 +312,6 @@ def hrnet(input, num_classes): if __name__ == '__main__': image_shape = [-1, 3, 769, 769] - image = fluid.data(name='image', shape=image_shape, dtype='float32') + image = nn.data(name='image', shape=image_shape, dtype='float32') logit = hrnet(image, 4) print("logit:", logit.shape) diff --git a/legacy/pdseg/models/modeling/icnet.py b/legacy/pdseg/models/modeling/icnet.py deleted file mode 100644 index aee0461459..0000000000 --- a/legacy/pdseg/models/modeling/icnet.py +++ /dev/null @@ -1,197 +0,0 @@ -# coding: utf8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import paddle.fluid as fluid -from utils.config import cfg -from models.libs.model_libs import scope -from models.libs.model_libs import bn, avg_pool, conv -from models.backbone.resnet import ResNet as resnet_backbone -import numpy as np - - -def interp(input, out_shape): - out_shape = list(out_shape.astype("int32")) - return fluid.layers.resize_bilinear(input, out_shape=out_shape) - - -def pyramis_pooling(input, input_shape): - shape = np.ceil(input_shape / 32).astype("int32") - h, w = shape - pool1 = avg_pool(input, [h, w], [h, w]) - pool1_interp = interp(pool1, shape) - pool2 = avg_pool(input, [h // 2, w // 2], [h // 2, w // 2]) - pool3 = avg_pool(input, [h // 3, w // 3], [h // 3, w // 3]) - pool4 = avg_pool(input, [h // 4, w // 4], [h // 4, w // 4]) - # official caffe repo eval use following hyparam - # pool2 = avg_pool(input, [17, 33], [16, 32]) - # pool3 = avg_pool(input, [13, 25], [10, 20]) - # pool4 = avg_pool(input, [8, 15], [5, 10]) - pool2_interp = interp(pool2, shape) - pool3_interp = interp(pool3, shape) - pool4_interp = interp(pool4, shape) - conv5_3_sum = input + pool4_interp + pool3_interp + pool2_interp + pool1_interp - return conv5_3_sum - - -def zero_padding(input, padding): - return fluid.layers.pad(input, - [0, 0, 0, 0, padding, padding, padding, padding]) - - -def sub_net_4(input, input_shape): - tmp = pyramis_pooling(input, input_shape) - with scope("conv5_4_k1"): - tmp = conv(tmp, 256, 1, 1) - tmp = bn(tmp, act='relu') - tmp = interp(tmp, out_shape=np.ceil(input_shape / 16)) - return tmp - - -def sub_net_2(input): - with scope("conv3_1_sub2_proj"): - tmp = conv(input, 128, 1, 1) - tmp = bn(tmp) - return tmp - - -def sub_net_1(input): - with scope("conv1_sub1"): - tmp = conv(input, 32, 3, 2, padding=1) - tmp = bn(tmp, act='relu') - with scope("conv2_sub1"): - tmp = conv(tmp, 32, 3, 2, padding=1) - tmp = bn(tmp, act='relu') - with scope("conv3_sub1"): - tmp = conv(tmp, 64, 3, 2, padding=1) - tmp = bn(tmp, act='relu') - with scope("conv3_sub1_proj"): - tmp = conv(tmp, 128, 1, 1) - tmp = bn(tmp) - return tmp - - -def CCF24(sub2_out, sub4_out, input_shape): - with scope("conv_sub4"): - tmp = conv(sub4_out, 128, 3, dilation=2, padding=2) - tmp = bn(tmp) - tmp = tmp + sub2_out - tmp = fluid.layers.relu(tmp) - tmp = interp(tmp, np.ceil(input_shape / 8)) - return tmp - - -def CCF124(sub1_out, sub24_out, input_shape): - tmp = zero_padding(sub24_out, padding=2) - with scope("conv_sub2"): - tmp = conv(tmp, 128, 3, dilation=2) - tmp = bn(tmp) - tmp = tmp + sub1_out - tmp = fluid.layers.relu(tmp) - tmp = interp(tmp, input_shape // 4) - return tmp - - -def resnet(input): - # ICNET backbone: resnet, 默认resnet50 - # end_points: resnet终止层数 - # decode_point: backbone引出分支所在层数 - # resize_point:backbone所在的该层卷积尺寸缩小至1/2 - # dilation_dict: resnet block数及对应的膨胀卷积尺度 - scale = cfg.MODEL.ICNET.DEPTH_MULTIPLIER - layers = cfg.MODEL.ICNET.LAYERS - model = resnet_backbone(scale=scale, layers=layers, stem='icnet') - end_points = 49 - decode_point = 13 - resize_point = 13 - dilation_dict = {2: 2, 3: 4} - data, decode_shortcuts = model.net( - input, - end_points=end_points, - decode_points=decode_point, - resize_points=resize_point, - dilation_dict=dilation_dict) - return data, decode_shortcuts[decode_point] - - -def encoder(data13, data49, input, input_shape): - # ICENT encoder配置 - # sub_net_4:对resnet49层数据进行pyramis_pooling操作 - # sub_net_2:对resnet13层数据进行卷积操作 - # sub_net_1: 对原始尺寸图像进行3次下采样卷积操作 - sub4_out = sub_net_4(data49, input_shape) - sub2_out = sub_net_2(data13) - sub1_out = sub_net_1(input) - return sub1_out, sub2_out, sub4_out - - -def decoder(sub1_out, sub2_out, sub4_out, input_shape): - # ICENT decoder配置 - # CCF: Cascade Feature Fusion 级联特征融合 - sub24_out = CCF24(sub2_out, sub4_out, input_shape) - sub124_out = CCF124(sub1_out, sub24_out, input_shape) - return sub24_out, sub124_out - - -def get_logit(data, num_classes, name="logit"): - param_attr = fluid.ParamAttr( - name=name + 'weights', - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0), - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.01)) - - with scope(name): - data = conv( - data, - num_classes, - 1, - stride=1, - padding=0, - param_attr=param_attr, - bias_attr=True) - return data - - -def icnet(input, num_classes): - # Backbone resnet: 输入 image_sub2: 图片尺寸缩小至1/2 - # 输出 data49: resnet第49层数据,原始尺寸1/32 - # data13:resnet第13层数据, 原始尺寸1/16 - input_shape = input.shape[2:] - input_shape = np.array(input_shape).astype("float32") - image_sub2 = interp(input, out_shape=np.ceil(input_shape * 0.5)) - data49, data13 = resnet(image_sub2) - - # encoder:输入:input, data13, data49,分别进行下采样,卷积和金字塔pooling操作 - # 输出:分别对应sub1_out, sub2_out, sub4_out - sub1_out, sub2_out, sub4_out = encoder(data13, data49, input, input_shape) - - # decoder: 对编码器三个分支结果进行级联特征融合 - sub24_out, sub124_out = decoder(sub1_out, sub2_out, sub4_out, input_shape) - - # get_logit: 根据类别数决定最后一层卷积输出 - logit124 = get_logit(sub124_out, num_classes, "logit124") - logit4 = get_logit(sub4_out, num_classes, "logit4") - logit24 = get_logit(sub24_out, num_classes, "logit24") - return logit124, logit24, logit4 - - -if __name__ == '__main__': - image_shape = [-1, 3, 320, 320] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - logit = icnet(image, 4) - print("logit:", logit.shape) diff --git a/legacy/pdseg/models/modeling/ocrnet.py b/legacy/pdseg/models/modeling/ocrnet.py deleted file mode 100644 index 8ab8925eb5..0000000000 --- a/legacy/pdseg/models/modeling/ocrnet.py +++ /dev/null @@ -1,493 +0,0 @@ -# coding: utf8 -# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import paddle -import paddle.fluid as fluid -from paddle.fluid.initializer import MSRA -from paddle.fluid.param_attr import ParamAttr - -from utils.config import cfg - - -def conv_bn_layer(input, - filter_size, - num_filters, - stride=1, - padding=1, - num_groups=1, - if_act=True, - name=None): - conv = fluid.layers.conv2d( - input=input, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) // 2, - groups=num_groups, - act=None, - # param_attr=ParamAttr(initializer=MSRA(), learning_rate=1.0, name=name + '_weights'), - param_attr=ParamAttr( - initializer=fluid.initializer.Normal(scale=0.001), - learning_rate=1.0, - name=name + '_weights'), - bias_attr=False) - bn_name = name + '_bn' - bn = fluid.layers.batch_norm( - input=conv, - param_attr=ParamAttr( - name=bn_name + "_scale", - initializer=fluid.initializer.Constant(1.0)), - bias_attr=ParamAttr( - name=bn_name + "_offset", - initializer=fluid.initializer.Constant(0.0)), - moving_mean_name=bn_name + '_mean', - moving_variance_name=bn_name + '_variance') - if if_act: - bn = fluid.layers.relu(bn) - return bn - - -def basic_block(input, num_filters, stride=1, downsample=False, name=None): - residual = input - conv = conv_bn_layer( - input=input, - filter_size=3, - num_filters=num_filters, - stride=stride, - name=name + '_conv1') - conv = conv_bn_layer( - input=conv, - filter_size=3, - num_filters=num_filters, - if_act=False, - name=name + '_conv2') - if downsample: - residual = conv_bn_layer( - input=input, - filter_size=1, - num_filters=num_filters, - if_act=False, - name=name + '_downsample') - return fluid.layers.elementwise_add(x=residual, y=conv, act='relu') - - -def bottleneck_block(input, num_filters, stride=1, downsample=False, name=None): - residual = input - conv = conv_bn_layer( - input=input, - filter_size=1, - num_filters=num_filters, - name=name + '_conv1') - conv = conv_bn_layer( - input=conv, - filter_size=3, - num_filters=num_filters, - stride=stride, - name=name + '_conv2') - conv = conv_bn_layer( - input=conv, - filter_size=1, - num_filters=num_filters * 4, - if_act=False, - name=name + '_conv3') - if downsample: - residual = conv_bn_layer( - input=input, - filter_size=1, - num_filters=num_filters * 4, - if_act=False, - name=name + '_downsample') - return fluid.layers.elementwise_add(x=residual, y=conv, act='relu') - - -def fuse_layers(x, channels, multi_scale_output=True, name=None): - out = [] - for i in range(len(channels) if multi_scale_output else 1): - residual = x[i] - shape = residual.shape - width = shape[-1] - height = shape[-2] - for j in range(len(channels)): - if j > i: - y = conv_bn_layer( - x[j], - filter_size=1, - num_filters=channels[i], - if_act=False, - name=name + '_layer_' + str(i + 1) + '_' + str(j + 1)) - y = fluid.layers.resize_bilinear( - input=y, out_shape=[height, width]) - residual = fluid.layers.elementwise_add( - x=residual, y=y, act=None) - elif j < i: - y = x[j] - for k in range(i - j): - if k == i - j - 1: - y = conv_bn_layer( - y, - filter_size=3, - num_filters=channels[i], - stride=2, - if_act=False, - name=name + '_layer_' + str(i + 1) + '_' + - str(j + 1) + '_' + str(k + 1)) - else: - y = conv_bn_layer( - y, - filter_size=3, - num_filters=channels[j], - stride=2, - name=name + '_layer_' + str(i + 1) + '_' + - str(j + 1) + '_' + str(k + 1)) - residual = fluid.layers.elementwise_add( - x=residual, y=y, act=None) - - residual = fluid.layers.relu(residual) - out.append(residual) - return out - - -def branches(x, block_num, channels, name=None): - out = [] - for i in range(len(channels)): - residual = x[i] - for j in range(block_num): - residual = basic_block( - residual, - channels[i], - name=name + '_branch_layer_' + str(i + 1) + '_' + str(j + 1)) - out.append(residual) - return out - - -def high_resolution_module(x, channels, multi_scale_output=True, name=None): - residual = branches(x, 4, channels, name=name) - out = fuse_layers( - residual, channels, multi_scale_output=multi_scale_output, name=name) - return out - - -def transition_layer(x, in_channels, out_channels, name=None): - num_in = len(in_channels) - num_out = len(out_channels) - out = [] - for i in range(num_out): - if i < num_in: - if in_channels[i] != out_channels[i]: - residual = conv_bn_layer( - x[i], - filter_size=3, - num_filters=out_channels[i], - name=name + '_layer_' + str(i + 1)) - out.append(residual) - else: - out.append(x[i]) - else: - residual = conv_bn_layer( - x[-1], - filter_size=3, - num_filters=out_channels[i], - stride=2, - name=name + '_layer_' + str(i + 1)) - out.append(residual) - return out - - -def stage(x, num_modules, channels, multi_scale_output=True, name=None): - out = x - for i in range(num_modules): - if i == num_modules - 1 and multi_scale_output == False: - out = high_resolution_module( - out, - channels, - multi_scale_output=False, - name=name + '_' + str(i + 1)) - else: - out = high_resolution_module( - out, channels, name=name + '_' + str(i + 1)) - - return out - - -def layer1(input, name=None): - conv = input - for i in range(4): - conv = bottleneck_block( - conv, - num_filters=64, - downsample=True if i == 0 else False, - name=name + '_' + str(i + 1)) - return conv - - -def aux_head(input, last_inp_channels, num_classes): - x = conv_bn_layer( - input=input, - filter_size=1, - num_filters=last_inp_channels, - stride=1, - padding=0, - name='aux_head_conv1') - x = fluid.layers.conv2d( - input=x, - num_filters=num_classes, - filter_size=1, - stride=1, - padding=0, - act=None, - # param_attr=ParamAttr(initializer=MSRA(), learning_rate=1.0, name='aux_head_conv2_weights'), - param_attr=ParamAttr( - initializer=fluid.initializer.Normal(scale=0.001), - learning_rate=1.0, - name='aux_head_conv2_weights'), - bias_attr=ParamAttr( - initializer=fluid.initializer.Constant(0.0), - name="aux_head_conv2_bias")) - return x - - -def conv3x3_ocr(input, ocr_mid_channels): - x = conv_bn_layer( - input=input, - filter_size=3, - num_filters=ocr_mid_channels, - stride=1, - padding=1, - name='conv3x3_ocr') - return x - - -def f_pixel(input, key_channels): - x = conv_bn_layer( - input=input, - filter_size=1, - num_filters=key_channels, - stride=1, - padding=0, - name='f_pixel_conv1') - x = conv_bn_layer( - input=x, - filter_size=1, - num_filters=key_channels, - stride=1, - padding=0, - name='f_pixel_conv2') - return x - - -def f_object(input, key_channels): - x = conv_bn_layer( - input=input, - filter_size=1, - num_filters=key_channels, - stride=1, - padding=0, - name='f_object_conv1') - x = conv_bn_layer( - input=x, - filter_size=1, - num_filters=key_channels, - stride=1, - padding=0, - name='f_object_conv2') - return x - - -def f_down(input, key_channels): - x = conv_bn_layer( - input=input, - filter_size=1, - num_filters=key_channels, - stride=1, - padding=0, - name='f_down_conv') - return x - - -def f_up(input, in_channels): - x = conv_bn_layer( - input=input, - filter_size=1, - num_filters=in_channels, - stride=1, - padding=0, - name='f_up_conv') - return x - - -def object_context_block(x, proxy, in_channels, key_channels, scale): - batch_size, _, h, w = x.shape - if scale > 1: - x = fluid.layers.pool2d(x, pool_size=[scale, scale], pool_type='max') - - query = f_pixel(x, key_channels) - query = fluid.layers.reshape( - query, - shape=[batch_size, key_channels, query.shape[2] * query.shape[3]]) - query = fluid.layers.transpose(query, perm=[0, 2, 1]) - - key = f_object(proxy, key_channels) - key = fluid.layers.reshape( - key, shape=[batch_size, key_channels, key.shape[2] * key.shape[3]]) - - value = f_down(proxy, key_channels) - value = fluid.layers.reshape( - value, - shape=[batch_size, key_channels, value.shape[2] * value.shape[3]]) - value = fluid.layers.transpose(value, perm=[0, 2, 1]) - - sim_map = fluid.layers.matmul(query, key) - sim_map = (key_channels**-.5) * sim_map - sim_map = fluid.layers.softmax(sim_map, axis=-1) - - context = fluid.layers.matmul(sim_map, value) - context = fluid.layers.transpose(context, perm=[0, 2, 1]) - context = fluid.layers.reshape( - context, shape=[batch_size, key_channels, x.shape[2], x.shape[3]]) - context = f_up(context, in_channels) - - if scale > 1: - context = fluid.layers.resize_bilinear(context, out_shape=[h, w]) - - return context - - -def ocr_gather_head(feats, probs, scale=1): - feats = fluid.layers.reshape( - feats, - shape=[feats.shape[0], feats.shape[1], feats.shape[2] * feats.shape[3]]) - feats = fluid.layers.transpose(feats, perm=[0, 2, 1]) - probs = fluid.layers.reshape( - probs, - shape=[probs.shape[0], probs.shape[1], probs.shape[2] * probs.shape[3]]) - probs = fluid.layers.softmax(scale * probs, axis=2) - ocr_context = fluid.layers.matmul(probs, feats) - ocr_context = fluid.layers.transpose(ocr_context, perm=[0, 2, 1]) - ocr_context = fluid.layers.unsqueeze(ocr_context, axes=[3]) - return ocr_context - - -def ocr_distri_head(feats, - proxy_feats, - ocr_mid_channels, - ocr_key_channels, - scale=1, - dropout=0.05): - context = object_context_block(feats, proxy_feats, ocr_mid_channels, - ocr_key_channels, scale) - x = fluid.layers.concat([context, feats], axis=1) - x = conv_bn_layer( - input=x, - filter_size=1, - num_filters=ocr_mid_channels, - stride=1, - padding=0, - name='spatial_ocr_conv') - x = fluid.layers.dropout(x, dropout_prob=dropout) - return x - - -def cls_head(input, num_classes): - x = fluid.layers.conv2d( - input=input, - num_filters=num_classes, - filter_size=1, - stride=1, - padding=0, - act=None, - # param_attr=ParamAttr(initializer=MSRA(), learning_rate=1.0, name='cls_head_conv_weights'), - param_attr=ParamAttr( - initializer=fluid.initializer.Normal(scale=0.001), - learning_rate=1.0, - name='cls_head_conv_weights'), - bias_attr=ParamAttr( - initializer=fluid.initializer.Constant(0.0), - name="cls_head_conv_bias")) - return x - - -def ocr_module(input, last_inp_channels, num_classes, ocr_mid_channels, - ocr_key_channels): - out_aux = aux_head(input, last_inp_channels, num_classes) - feats = conv3x3_ocr(input, ocr_mid_channels) - context = ocr_gather_head(feats, out_aux) - feats = ocr_distri_head(feats, context, ocr_mid_channels, ocr_key_channels) - out = cls_head(feats, num_classes) - return out, out_aux - - -def high_resolution_ocr_net(input, num_classes): - - channels_2 = cfg.MODEL.HRNET.STAGE2.NUM_CHANNELS - channels_3 = cfg.MODEL.HRNET.STAGE3.NUM_CHANNELS - channels_4 = cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS - - num_modules_2 = cfg.MODEL.HRNET.STAGE2.NUM_MODULES - num_modules_3 = cfg.MODEL.HRNET.STAGE3.NUM_MODULES - num_modules_4 = cfg.MODEL.HRNET.STAGE4.NUM_MODULES - - ocr_mid_channels = cfg.MODEL.OCR.OCR_MID_CHANNELS - ocr_key_channels = cfg.MODEL.OCR.OCR_KEY_CHANNELS - - last_inp_channels = sum(channels_4) - - x = conv_bn_layer( - input=input, - filter_size=3, - num_filters=64, - stride=2, - if_act=True, - name='layer1_1') - x = conv_bn_layer( - input=x, - filter_size=3, - num_filters=64, - stride=2, - if_act=True, - name='layer1_2') - - la1 = layer1(x, name='layer2') - tr1 = transition_layer([la1], [256], channels_2, name='tr1') - st2 = stage(tr1, num_modules_2, channels_2, name='st2') - tr2 = transition_layer(st2, channels_2, channels_3, name='tr2') - st3 = stage(tr2, num_modules_3, channels_3, name='st3') - tr3 = transition_layer(st3, channels_3, channels_4, name='tr3') - st4 = stage(tr3, num_modules_4, channels_4, name='st4') - - # upsample - shape = st4[0].shape - height, width = shape[-2], shape[-1] - st4[1] = fluid.layers.resize_bilinear(st4[1], out_shape=[height, width]) - st4[2] = fluid.layers.resize_bilinear(st4[2], out_shape=[height, width]) - st4[3] = fluid.layers.resize_bilinear(st4[3], out_shape=[height, width]) - - feats = fluid.layers.concat(st4, axis=1) - - out, out_aux = ocr_module(feats, last_inp_channels, num_classes, - ocr_mid_channels, ocr_key_channels) - - out = fluid.layers.resize_bilinear(out, input.shape[2:]) - out_aux = fluid.layers.resize_bilinear(out_aux, input.shape[2:]) - - return out, out_aux - - -def ocrnet(input, num_classes): - logit = high_resolution_ocr_net(input, num_classes) - return logit diff --git a/legacy/pdseg/models/modeling/pspnet.py b/legacy/pdseg/models/modeling/pspnet.py deleted file mode 100644 index f8c2943af4..0000000000 --- a/legacy/pdseg/models/modeling/pspnet.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import paddle.fluid as fluid -from paddle.fluid.param_attr import ParamAttr -from models.libs.model_libs import scope, name_scope -from models.libs.model_libs import avg_pool, conv, bn -from models.backbone.resnet import ResNet as resnet_backbone -from utils.config import cfg - - -def get_logit_interp(input, num_classes, out_shape, name="logit"): - # 根据类别数决定最后一层卷积输出, 并插值回原始尺寸 - param_attr = fluid.ParamAttr( - name=name + 'weights', - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0), - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.01)) - - with scope(name): - logit = conv( - input, - num_classes, - filter_size=1, - param_attr=param_attr, - bias_attr=True, - name=name + '_conv') - logit_interp = fluid.layers.resize_bilinear( - logit, out_shape=out_shape, name=name + '_interp') - return logit_interp - - -def psp_module(input, out_features): - # Pyramid Scene Parsing 金字塔池化模块 - # 输入:backbone输出的特征 - # 输出:对输入进行不同尺度pooling, 卷积操作后插值回原始尺寸,并concat - # 最后进行一个卷积及BN操作 - - cat_layers = [] - sizes = (1, 2, 3, 6) - for size in sizes: - psp_name = "psp" + str(size) - with scope(psp_name): - pool = fluid.layers.adaptive_pool2d( - input, - pool_size=[size, size], - pool_type='avg', - name=psp_name + '_adapool') - data = conv( - pool, - out_features, - filter_size=1, - bias_attr=True, - name=psp_name + '_conv') - data_bn = bn(data, act='relu') - interp = fluid.layers.resize_bilinear( - data_bn, out_shape=input.shape[2:], name=psp_name + '_interp') - cat_layers.append(interp) - cat_layers = [input] + cat_layers[::-1] - cat = fluid.layers.concat(cat_layers, axis=1, name='psp_cat') - - psp_end_name = "psp_end" - with scope(psp_end_name): - data = conv( - cat, - out_features, - filter_size=3, - padding=1, - bias_attr=True, - name=psp_end_name) - out = bn(data, act='relu') - - return out - - -def resnet(input): - # PSPNET backbone: resnet, 默认resnet50 - # end_points: resnet终止层数 - # dilation_dict: resnet block数及对应的膨胀卷积尺度 - scale = cfg.MODEL.PSPNET.DEPTH_MULTIPLIER - layers = cfg.MODEL.PSPNET.LAYERS - end_points = layers - 1 - dilation_dict = {2: 2, 3: 4} - model = resnet_backbone(layers, scale, stem='pspnet') - data, _ = model.net( - input, end_points=end_points, dilation_dict=dilation_dict) - - return data - - -def pspnet(input, num_classes): - # Backbone: ResNet - res = resnet(input) - # PSP模块 - psp = psp_module(res, 512) - dropout = fluid.layers.dropout(psp, dropout_prob=0.1, name="dropout") - # 根据类别数决定最后一层卷积输出, 并插值回原始尺寸 - logit = get_logit_interp(dropout, num_classes, input.shape[2:]) - return logit diff --git a/legacy/pdseg/models/modeling/unet.py b/legacy/pdseg/models/modeling/unet.py deleted file mode 100644 index be9b8e5bb2..0000000000 --- a/legacy/pdseg/models/modeling/unet.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding: utf8 -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import contextlib -import paddle -import paddle.fluid as fluid -from utils.config import cfg -from models.libs.model_libs import scope, name_scope -from models.libs.model_libs import bn, bn_relu, relu -from models.libs.model_libs import conv, max_pool, deconv - - -def double_conv(data, out_ch): - param_attr = fluid.ParamAttr( - name='weights', - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0), - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.33)) - with scope("conv0"): - data = bn_relu( - conv(data, out_ch, 3, stride=1, padding=1, param_attr=param_attr)) - with scope("conv1"): - data = bn_relu( - conv(data, out_ch, 3, stride=1, padding=1, param_attr=param_attr)) - return data - - -def down(data, out_ch): - # 下采样:max_pool + 2个卷积 - with scope("down"): - data = max_pool(data, 2, 2, 0) - data = double_conv(data, out_ch) - return data - - -def up(data, short_cut, out_ch): - # 上采样:data上采样(resize或deconv), 并与short_cut concat - param_attr = fluid.ParamAttr( - name='weights', - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0), - initializer=fluid.initializer.XavierInitializer(), - ) - with scope("up"): - if cfg.MODEL.UNET.UPSAMPLE_MODE == 'bilinear': - data = fluid.layers.resize_bilinear(data, short_cut.shape[2:]) - else: - data = deconv( - data, - out_ch // 2, - filter_size=2, - stride=2, - padding=0, - param_attr=param_attr) - data = fluid.layers.concat([data, short_cut], axis=1) - data = double_conv(data, out_ch) - return data - - -def encode(data): - # 编码器设置 - short_cuts = [] - with scope("encode"): - with scope("block1"): - data = double_conv(data, 64) - short_cuts.append(data) - with scope("block2"): - data = down(data, 128) - short_cuts.append(data) - with scope("block3"): - data = down(data, 256) - short_cuts.append(data) - with scope("block4"): - data = down(data, 512) - short_cuts.append(data) - with scope("block5"): - data = down(data, 512) - return data, short_cuts - - -def decode(data, short_cuts): - # 解码器设置,与编码器对称 - with scope("decode"): - with scope("decode1"): - data = up(data, short_cuts[3], 256) - with scope("decode2"): - data = up(data, short_cuts[2], 128) - with scope("decode3"): - data = up(data, short_cuts[1], 64) - with scope("decode4"): - data = up(data, short_cuts[0], 64) - return data - - -def get_logit(data, num_classes): - # 根据类别数设置最后一个卷积层输出 - param_attr = fluid.ParamAttr( - name='weights', - regularizer=fluid.regularizer.L2DecayRegularizer( - regularization_coeff=0.0), - initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.01)) - with scope("logit"): - data = conv( - data, num_classes, 3, stride=1, padding=1, param_attr=param_attr) - return data - - -def unet(input, num_classes): - # UNET网络配置,对称的编码器解码器 - encode_data, short_cuts = encode(input) - decode_data = decode(encode_data, short_cuts) - logit = get_logit(decode_data, num_classes) - return logit - - -if __name__ == '__main__': - image_shape = [-1, 3, 320, 320] - image = fluid.data(name='image', shape=image_shape, dtype='float32') - logit = unet(image, 4) - print("logit:", logit.shape) diff --git a/legacy/pdseg/reader.py b/legacy/pdseg/reader.py index ac3757de65..61320ffb69 100644 --- a/legacy/pdseg/reader.py +++ b/legacy/pdseg/reader.py @@ -25,7 +25,6 @@ import numpy as np import paddle -import paddle.fluid as fluid import cv2 from PIL import Image diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index 43efcd5b64..6e7008fece 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -14,9 +14,9 @@ # limitations under the License. import sys -import paddle.fluid as fluid + +import paddle import numpy as np -import importlib from utils.config import cfg try: from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecison, decorate, AutoMixedPrecisionLists @@ -36,53 +36,11 @@ def __init__(self, main_prog, start_prog): self.total_step = cfg.SOLVER.NUM_EPOCHS * self.step_per_epoch self.main_prog = main_prog self.start_prog = start_prog - self.warmup_step = cfg.SOLVER.LR_WARMUP_STEPS if cfg.SOLVER.LR_WARMUP else -1 - self.decay_step = self.total_step - self.warmup_step - self.decay_epochs = cfg.SOLVER.NUM_EPOCHS - self.warmup_step / self.step_per_epoch - - def lr_warmup(self, learning_rate, start_lr, end_lr): - linear_step = end_lr - start_lr - lr = fluid.layers.tensor.create_global_var( - shape=[1], - value=0.0, - dtype='float32', - persistable=True, - name="learning_rate_warmup") - - global_step = fluid.layers.learning_rate_scheduler._decay_step_counter() - warmup_counter = fluid.layers.autoincreased_step_counter( - counter_name='@LR_DECAY_COUNTER_WARMUP_IN_SEG@', begin=1, step=1) - global_counter = fluid.default_main_program().global_block( - ).vars['@LR_DECAY_COUNTER@'] - warmup_counter = fluid.layers.cast(warmup_counter, 'float32') - - with fluid.layers.control_flow.Switch() as switch: - with switch.case(warmup_counter <= self.warmup_step): - decayed_lr = start_lr + linear_step * ( - warmup_counter / self.warmup_step) - fluid.layers.tensor.assign(decayed_lr, lr) - # hold the global_step to 0 during the warm-up phase - fluid.layers.increment(global_counter, value=-1) - with switch.default(): - fluid.layers.tensor.assign(learning_rate, lr) - return lr - - def piecewise_decay(self): - gamma = cfg.SOLVER.GAMMA - bd = [self.step_per_epoch * e for e in cfg.SOLVER.DECAY_EPOCH] - lr = [cfg.SOLVER.LR * (gamma**i) for i in range(len(bd) + 1)] - decayed_lr = fluid.layers.piecewise_decay(boundaries=bd, values=lr) - return decayed_lr def poly_decay(self): power = cfg.SOLVER.POWER - decayed_lr = fluid.layers.polynomial_decay( - cfg.SOLVER.LR, self.decay_step, end_learning_rate=0, power=power) - return decayed_lr - - def cosine_decay(self): - decayed_lr = fluid.layers.cosine_decay( - cfg.SOLVER.LR, self.step_per_epoch, self.decay_epochs) + decayed_lr = paddle.optimizer.lr.PolynomialDecay( + cfg.SOLVER.LR, self.total_step, end_lr=0, power=power) return decayed_lr def get_lr(self, lr_policy): @@ -96,17 +54,14 @@ def get_lr(self, lr_policy): raise Exception( "unsupport learning decay policy! only support poly,piecewise,cosine" ) - - decayed_lr = self.lr_warmup(decayed_lr, 0, cfg.SOLVER.LR) return decayed_lr def sgd_optimizer(self, lr_policy, loss): decayed_lr = self.get_lr(lr_policy) - optimizer = fluid.optimizer.Momentum( + optimizer = paddle.optimizer.Momentum( learning_rate=decayed_lr, momentum=self.momentum, - regularization=fluid.regularizer.L2Decay( - regularization_coeff=self.weight_decay), + weight_decay=self.weight_decay, ) if cfg.MODEL.FP16: if cfg.MODEL.MODEL_NAME in ["pspnet"]: @@ -139,12 +94,11 @@ def sgd_optimizer(self, lr_policy, loss): def adam_optimizer(self, lr_policy, loss): decayed_lr = self.get_lr(lr_policy) - optimizer = fluid.optimizer.Adam( + optimizer = paddle.optimizer.Adam( learning_rate=decayed_lr, beta1=self.momentum, beta2=self.momentum2, - regularization=fluid.regularizer.L2Decay( - regularization_coeff=self.weight_decay), + weight_decay=self.weight_decay, ) optimizer.minimize(loss) return decayed_lr diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 3df4d74fb5..fc3d6d7455 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -27,9 +27,9 @@ import random import shutil -import paddle import numpy as np -import paddle.fluid as fluid +import paddle +import paddle.static as static from paddle.fluid import profiler from utils.config import cfg @@ -133,7 +133,7 @@ def save_checkpoint(program, ckpt_name): if not os.path.isdir(ckpt_dir): os.makedirs(ckpt_dir) - fluid.save(program, os.path.join(ckpt_dir, 'model')) + static.save(program, os.path.join(ckpt_dir, 'model')) return ckpt_dir @@ -147,7 +147,7 @@ def load_checkpoint(exe, program): if not os.path.exists(model_path): raise ValueError( "TRAIN.PRETRAIN_MODEL {} not exist!".format(model_path)) - fluid.load(program, os.path.join(model_path, 'model'), exe) + static.load(program, os.path.join(model_path, 'model'), exe) # Check is path ended by path spearator if model_path[-1] == os.sep: @@ -188,9 +188,9 @@ def print_info(*msg): def train(cfg): - startup_prog = fluid.Program() - train_prog = fluid.Program() - test_prog = fluid.Program() + startup_prog = static.Program() + train_prog = static.Program() + test_prog = static.Program() if args.enable_ce: startup_prog.random_seed = 1000 train_prog.random_seed = 1000 @@ -227,14 +227,14 @@ def data_generator(): gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) xpu_id = int(os.environ.get('FLAGS_selected_xpus', 0)) if args.use_gpu: - place = fluid.CUDAPlace(gpu_id) - places = fluid.cuda_places() + place = paddle.CUDAPlace(gpu_id) + places = static.cuda_places() elif args.use_xpu: - place = fluid.XPUPlace(xpu_id) + place = paddle.XPUPlace(xpu_id) places = [place] else: - place = fluid.CPUPlace() - places = fluid.cpu_places() + place = paddle.CPUPlace() + places = static.cpu_places() # Get number of GPU dev_count = cfg.NUM_TRAINERS if cfg.NUM_TRAINERS > 1 else len(places) @@ -250,19 +250,19 @@ def data_generator(): data_loader, avg_loss, lr, pred, grts, masks = build_model( train_prog, startup_prog, phase=ModelPhase.TRAIN) - build_model(test_prog, fluid.Program(), phase=ModelPhase.EVAL) + build_model(test_prog, static.Program(), phase=ModelPhase.EVAL) data_loader.set_sample_generator( data_generator, batch_size=batch_size_per_dev, drop_last=drop_last) - exe = fluid.Executor(place) + exe = static.Executor(place) exe.run(startup_prog) - exec_strategy = fluid.ExecutionStrategy() + exec_strategy = static.ExecutionStrategy() # Clear temporary variables every 100 iteration if args.use_gpu: - exec_strategy.num_threads = fluid.core.get_cuda_device_count() + exec_strategy.num_threads = len(paddle.get_cuda_rng_state()) exec_strategy.num_iteration_per_drop_scope = 100 - build_strategy = fluid.BuildStrategy() + build_strategy = static.BuildStrategy() if cfg.NUM_TRAINERS > 1 and args.use_gpu: dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog) @@ -280,10 +280,11 @@ def data_generator(): if args.use_xpu: compiled_train_prog = train_prog else: - compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel( - loss_name=avg_loss.name, - exec_strategy=exec_strategy, - build_strategy=build_strategy) + compiled_train_prog = static.CompiledProgram( + train_prog).with_data_parallel( + loss_name=avg_loss.name, + exec_strategy=exec_strategy, + build_strategy=build_strategy) # Resume training begin_epoch = cfg.SOLVER.BEGIN_EPOCH @@ -297,7 +298,7 @@ def data_generator(): 'Pretrained model dir {} not exists, training from scratch...'. format(cfg.TRAIN.PRETRAINED_MODEL_DIR)) - fetch_list = [avg_loss.name, lr.name] + fetch_list = [avg_loss.name] if args.debug: # Fetch more variable info and use streaming confusion matrix to # calculate IoU results if in debug mode @@ -344,7 +345,7 @@ def data_generator(): if args.debug: # Print category IoU and accuracy to check whether the # traning process is corresponed to expectation - loss, lr, pred, grts, masks = exe.run( + loss, pred, grts, masks = exe.run( program=compiled_train_prog, fetch_list=fetch_list, return_numpy=True) @@ -360,7 +361,7 @@ def data_generator(): print_info(( "epoch={} step={} lr={:.5f} loss={:.4f} acc={:.5f} mIoU={:.5f} step/sec={:.3f} | ETA {}" - ).format(epoch, step, lr[0], avg_loss, mean_acc, + ).format(epoch, step, lr.get_lr(), avg_loss, mean_acc, mean_iou, speed, calculate_eta(all_step - step, speed))) print_info("Category IoU: ", category_iou) @@ -371,7 +372,7 @@ def data_generator(): log_writer.add_scalar('Train/mean_acc', mean_acc, step) log_writer.add_scalar('Train/loss', avg_loss, step) - log_writer.add_scalar('Train/lr', lr[0], step) + log_writer.add_scalar('Train/lr', lr.ger_lr(), step) log_writer.add_scalar('Train/step/sec', speed, step) sys.stdout.flush() avg_loss = 0.0 @@ -379,7 +380,7 @@ def data_generator(): timer.restart() else: # If not in debug mode, avoid unnessary log and calculate - loss, lr = exe.run( + loss = exe.run( program=compiled_train_prog, fetch_list=fetch_list, return_numpy=True) @@ -391,11 +392,11 @@ def data_generator(): speed = args.log_steps / timer.elapsed_time() print(( "epoch={} step={} lr={:.5f} loss={:.4f} step/sec={:.3f} | ETA {}" - ).format(epoch, step, lr[0], avg_loss, speed, + ).format(epoch, step, lr.get_lr(), avg_loss, speed, calculate_eta(all_step - step, speed))) if args.use_vdl: log_writer.add_scalar('Train/loss', avg_loss, step) - log_writer.add_scalar('Train/lr', lr[0], step) + log_writer.add_scalar('Train/lr', lr.get_lr(), step) log_writer.add_scalar('Train/speed', speed, step) sys.stdout.flush() avg_loss = 0.0 @@ -407,8 +408,9 @@ def data_generator(): elif args.is_profiler and epoch == 1 and step == args.log_steps + 5: profiler.stop_profiler("total", args.profiler_path) return + lr.step() - except fluid.core.EOFException: + except paddle.fluid.core.EOFException: data_loader.reset() break except Exception as e: @@ -475,7 +477,7 @@ def main(args): if __name__ == '__main__': paddle_utils.enable_static() args = parse_args() - if fluid.core.is_compiled_with_cuda() != True and args.use_gpu == True: + if paddle.is_compiled_with_cuda() != True and args.use_gpu == True: print( "You can not set use_gpu = True in the model because you are using paddlepaddle-cpu." ) diff --git a/legacy/pdseg/utils/dist_utils.py b/legacy/pdseg/utils/dist_utils.py index 9954e1a1b9..f046b74790 100755 --- a/legacy/pdseg/utils/dist_utils.py +++ b/legacy/pdseg/utils/dist_utils.py @@ -80,8 +80,8 @@ def prepare_for_multi_process(exe, build_strategy, train_prog): # prepare for multi-process trainer_id = int(os.environ.get('PADDLE_TRAINER_ID', 0)) num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) + print(num_trainers, '***************') if num_trainers < 2: return - build_strategy.num_trainers = num_trainers build_strategy.trainer_id = trainer_id # NOTE(zcd): use multi processes to train the model, From c95b82ff11cbcceafb54e8d714d124734be5155c Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 14 Jan 2021 10:18:06 +0800 Subject: [PATCH 038/210] add LimitLong transform --- paddleseg/core/infer.py | 17 ++++++++ paddleseg/core/val.py | 6 ++- paddleseg/transforms/transforms.py | 65 ++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+), 2 deletions(-) diff --git a/paddleseg/core/infer.py b/paddleseg/core/infer.py index bfb3888909..9d6df78b8a 100644 --- a/paddleseg/core/infer.py +++ b/paddleseg/core/infer.py @@ -42,6 +42,23 @@ def get_reverse_list(ori_shape, transforms): if op.__class__.__name__ in ['Padding']: reverse_list.append(('padding', (h, w))) w, h = op.target_size[0], op.target_size[1] + if op.__class__.__name__ in ['LimitLong']: + long_edge = max(h, w) + short_edge = min(h, w) + if ((op.max_long is not None) and (long_edge > op.max_long)): + reverse_list.append(('resize', (h, w))) + long_edge = op.max_long + short_edge = int(round(short_edge * op.max_long / long_edge)) + elif ((op.min_long is not None) and (long_edge < op.min_long)): + reverse_list.append(('resize', (h, w))) + long_edge = op.min_long + short_edge = int(round(short_edge * op.min_long / long_edge)) + if h > w: + h = long_edge + w = short_edge + else: + w = long_edge + h = short_edge return reverse_list diff --git a/paddleseg/core/val.py b/paddleseg/core/val.py index cdf0a348b9..a1a4526b2e 100644 --- a/paddleseg/core/val.py +++ b/paddleseg/core/val.py @@ -120,7 +120,8 @@ def evaluate(model, intersect_area_list = [] pred_area_list = [] label_area_list = [] - paddle.distributed.all_gather(intersect_area_list, intersect_area) + paddle.distributed.all_gather(intersect_area_list, + intersect_area) paddle.distributed.all_gather(pred_area_list, pred_area) paddle.distributed.all_gather(label_area_list, label_area) @@ -132,7 +133,8 @@ def evaluate(model, label_area_list = label_area_list[:valid] for i in range(len(intersect_area_list)): - intersect_area_all = intersect_area_all + intersect_area_list[i] + intersect_area_all = intersect_area_all + intersect_area_list[ + i] pred_area_all = pred_area_all + pred_area_list[i] label_area_all = label_area_all + label_area_list[i] else: diff --git a/paddleseg/transforms/transforms.py b/paddleseg/transforms/transforms.py index 7f285ed340..52ba7a29f7 100644 --- a/paddleseg/transforms/transforms.py +++ b/paddleseg/transforms/transforms.py @@ -228,6 +228,71 @@ def __call__(self, im, label=None): return (im, label) +@manager.TRANSFORMS.add_component +class LimitLong: + """ + Limit the long edge of image. + + If the long edge is larger than max_long, resize the long edge + to max_long, while scale the short edge proportionally. + + If the long edge is smaller than min_long, resize the long edge + to min_long, while scale the short edge proportionally. + + Args: + max_long (int, optional): If the long edge of image is larger than max_long, + it will be resize to max_long. Default: None. + min_long (int, optional): If the long edge of image is smaller than min_long, + it will be resize to min_long. Default: None. + """ + + def __init__(self, max_long=None, min_long=None): + if max_long is not None: + if not isinstance(max_long, int): + raise TypeError( + "Type of `max_long` is invalid. It should be int, but it is {}" + .format(type(max_long))) + if min_long is not None: + if not isinstance(min_long, int): + raise TypeError( + "Type of `min_long` is invalid. It should be int, but it is {}" + .format(type(min_long))) + if (max_long is not None) and (min_long is not None): + if min_long > max_long: + raise ValueError( + '`max_long should not smaller than min_long, but they are {} and {}' + .format(max_long, min_long)) + self.max_long = max_long + self.min_long = min_long + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + h, w = im.shape[0], im.shape[1] + long_edge = max(h, w) + target = long_edge + if (self.max_long is not None) and (long_edge > self.max_long): + target = self.max_long + elif (self.min_long is not None) and (long_edge < self.min_long): + target = self.min_long + + if target != long_edge: + im = functional.resize_long(im, target) + if label is not None: + label = functional.resize_long(label, target, cv2.INTER_NEAREST) + + if label is None: + return (im, ) + else: + return (im, label) + + @manager.TRANSFORMS.add_component class ResizeRangeScaling: """ From 3a2ca98b5c7d020a5c2808a26ee7ab86a8ed5ef0 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 20 Jan 2021 22:26:04 +0800 Subject: [PATCH 039/210] align the dygraph structure --- .../deeplabv3p_resnet50_vd_cityscapes.yaml | 16 ++++---- legacy/pdseg/models/libs/model_libs.py | 14 +++++-- legacy/pdseg/models/modeling/deeplab.py | 37 +++++++++---------- legacy/pdseg/models/modeling/hrnet.py | 10 +++-- 4 files changed, 42 insertions(+), 35 deletions(-) diff --git a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml index 41b39ee978..366d9cab32 100644 --- a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml +++ b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml @@ -1,5 +1,5 @@ -EVAL_CROP_SIZE: (2049, 1025) # (width, height), for unpadding rangescaling and stepscaling -TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling +EVAL_CROP_SIZE: (2048, 1024) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (1024, 512) # (width, height), for unpadding rangescaling and stepscaling AUG: AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling FIX_RESIZE_SIZE: (2048, 1024) # (width, height), for unpadding @@ -11,7 +11,7 @@ AUG: SCALE_STEP_SIZE: 0.25 # for stepscaling MIRROR: True TO_RGB: True -BATCH_SIZE: 16 +BATCH_SIZE: 4 DATASET: DATA_DIR: "./dataset/cityscapes/" IMAGE_TYPE: "rgb" # choice rgb or rgba @@ -23,8 +23,8 @@ DATASET: IGNORE_INDEX: 255 SEPARATOR: " " FREEZE: - MODEL_FILENAME: "model" - PARAMS_FILENAME: "params" + MODEL_FILENAME: "__model__" + PARAMS_FILENAME: "__params__" MODEL: DEFAULT_NORM_TYPE: "bn" MODEL_NAME: "deeplabv3p" @@ -32,11 +32,11 @@ MODEL: ASPP_WITH_SEP_CONV: True DECODER_USE_SEP_CONV: True BACKBONE: "resnet_vd_50" - BACKBONE_LR_MULT_LIST: [0.1, 0.1, 0.2, 0.2, 1.0] + OUTPUT_STRIDE: 8 TRAIN: PRETRAINED_MODEL_DIR: u"pretrained_model/resnet50_vd_imagenet" MODEL_SAVE_DIR: "saved_model/deeplabv3p_resnet50_vd_bn_cityscapes" - SNAPSHOT_EPOCH: 10 + SNAPSHOT_EPOCH: 1 SYNC_BATCH_NORM: True TEST: TEST_MODEL: "saved_model/deeplabv3p_resnet50_vd_bn_cityscapes/final" @@ -44,4 +44,4 @@ SOLVER: LR: 0.05 LR_POLICY: "poly" OPTIMIZER: "sgd" - NUM_EPOCHS: 700 + NUM_EPOCHS: 3 diff --git a/legacy/pdseg/models/libs/model_libs.py b/legacy/pdseg/models/libs/model_libs.py index f41f593b85..834361a4af 100644 --- a/legacy/pdseg/models/libs/model_libs.py +++ b/legacy/pdseg/models/libs/model_libs.py @@ -116,7 +116,7 @@ def conv(*args, **kargs): name=name_scope + 'biases', regularizer=None, initializer=paddle.nn.initializer.Constant(value=0.0)) - else: + elif 'bias_attr' not in kargs: kargs['bias_attr'] = False return static.nn.conv2d(*args, **kargs) @@ -145,7 +145,8 @@ def separate_conv(input, channel, stride, filter, dilation=1, act=None): padding=(filter // 2) * dilation, dilation=dilation, use_cudnn=False, - param_attr=param_attr) + param_attr=param_attr, + bias_attr=None) input = bn(input) if act: input = act(input) @@ -155,7 +156,14 @@ def separate_conv(input, channel, stride, filter, dilation=1, act=None): initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=0.33)) with scope('pointwise'): input = conv( - input, channel, 1, 1, groups=1, padding=0, param_attr=param_attr) + input, + channel, + 1, + 1, + groups=1, + padding=0, + param_attr=param_attr, + bias_attr=None) input = bn(input) if act: input = act(input) return input diff --git a/legacy/pdseg/models/modeling/deeplab.py b/legacy/pdseg/models/modeling/deeplab.py index c3eb46151a..19bcf68a10 100644 --- a/legacy/pdseg/models/modeling/deeplab.py +++ b/legacy/pdseg/models/modeling/deeplab.py @@ -54,19 +54,7 @@ def encoder(input): with scope('encoder'): channel = cfg.MODEL.DEEPLAB.ENCODER.ASPP_CONVS_FILTERS with scope("image_pool"): - if not cfg.MODEL.DEEPLAB.ENCODER.POOLING_CROP_SIZE: - image_avg = paddle.mean(input, [2, 3], keepdim=True) - else: - pool_w = int((cfg.MODEL.DEEPLAB.ENCODER.POOLING_CROP_SIZE[0] - - 1.0) / cfg.MODEL.DEEPLAB.OUTPUT_STRIDE + 1.0) - pool_h = int((cfg.MODEL.DEEPLAB.ENCODER.POOLING_CROP_SIZE[1] - - 1.0) / cfg.MODEL.DEEPLAB.OUTPUT_STRIDE + 1.0) - iamge_avg = F.avg_pool2d( - input, - kernel_size=(pool_h, pool_w), - stride=cfg.MODEL.DEEPLAB.ENCODER.POOLING_STRIDE, - padding='VALID') - + image_avg = F.adaptive_avg_pool2d(input, output_size=(1, 1)) act = qsigmoid if cfg.MODEL.DEEPLAB.ENCODER.SE_USE_QSIGMOID else bn_relu image_avg = act( conv( @@ -91,7 +79,10 @@ def encoder(input): 1, groups=1, padding=0, - param_attr=param_attr)) + param_attr=param_attr, + bias_attr=None)) + aspp0 = F.interpolate( + aspp0, input.shape[2:], mode='bilinear', align_corners=True) concat_logits.append(aspp0) if aspp_ratios: @@ -109,6 +100,8 @@ def encoder(input): dilation=aspp_ratios[0], padding=aspp_ratios[0], param_attr=param_attr)) + aspp1 = F.interpolate( + aspp1, input.shape[2:], mode='bilinear', align_corners=True) concat_logits.append(aspp1) with scope("aspp2"): if cfg.MODEL.DEEPLAB.ASPP_WITH_SEP_CONV: @@ -124,6 +117,8 @@ def encoder(input): dilation=aspp_ratios[1], padding=aspp_ratios[1], param_attr=param_attr)) + aspp2 = F.interpolate( + aspp2, input.shape[2:], mode='bilinear', align_corners=True) concat_logits.append(aspp2) with scope("aspp3"): if cfg.MODEL.DEEPLAB.ASPP_WITH_SEP_CONV: @@ -139,6 +134,8 @@ def encoder(input): dilation=aspp_ratios[2], padding=aspp_ratios[2], param_attr=param_attr)) + aspp3 = F.interpolate( + aspp3, input.shape[2:], mode='bilinear', align_corners=True) concat_logits.append(aspp3) with scope("concat"): @@ -152,7 +149,8 @@ def encoder(input): 1, groups=1, padding=0, - param_attr=param_attr)) + param_attr=param_attr, + bias_attr=None)) data = F.dropout(data, 0.1, mode='downscale_in_infer') if cfg.MODEL.DEEPLAB.ENCODER.ASPP_WITH_SE: @@ -198,7 +196,8 @@ def _decoder_with_concat(encode_data, decode_shortcut, param_attr): 1, groups=1, padding=0, - param_attr=param_attr)) + param_attr=param_attr, + bias_attr=None)) encode_data = F.interpolate( encode_data, @@ -213,16 +212,14 @@ def _decoder_with_concat(encode_data, decode_shortcut, param_attr): cfg.MODEL.DEEPLAB.DECODER.CONV_FILTERS, 1, 3, - dilation=1, - act=relu) + dilation=1) with scope("separable_conv2"): encode_data = separate_conv( encode_data, cfg.MODEL.DEEPLAB.DECODER.CONV_FILTERS, 1, 3, - dilation=1, - act=relu) + dilation=1) else: with scope("decoder_conv1"): encode_data = bn_relu( diff --git a/legacy/pdseg/models/modeling/hrnet.py b/legacy/pdseg/models/modeling/hrnet.py index a79f88fc15..dde1644079 100644 --- a/legacy/pdseg/models/modeling/hrnet.py +++ b/legacy/pdseg/models/modeling/hrnet.py @@ -31,7 +31,8 @@ def conv_bn_layer(input, padding=1, num_groups=1, if_act=True, - name=None): + name=None, + bias_attr=False): conv = nn.conv2d( input=input, num_filters=num_filters, @@ -43,7 +44,7 @@ def conv_bn_layer(input, param_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.KaimingUniform(), name=name + '_weights'), - bias_attr=False) + bias_attr=bias_attr) bn_name = name + '_bn' bn = nn.batch_norm( input=conv, @@ -286,7 +287,8 @@ def high_resolution_net(input, num_classes): num_filters=last_channels, stride=1, if_act=True, - name='conv-2') + name='conv-2', + bias_attr=None) out = nn.conv2d( input=out, num_filters=num_classes, @@ -297,7 +299,7 @@ def high_resolution_net(input, num_classes): param_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.KaimingUniform(), name='conv-1_weights'), - bias_attr=False) + bias_attr=None) out = F.interpolate( out, size=input.shape[2:], mode='bilinear', align_corners=True) From e423131d8ce3e38f25d0a04d02eeabe7f8fbdc38 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 22 Jan 2021 11:30:54 +0800 Subject: [PATCH 040/210] add static amp --- legacy/pdseg/solver.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index 6e7008fece..941ec0567a 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -18,10 +18,7 @@ import paddle import numpy as np from utils.config import cfg -try: - from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecison, decorate, AutoMixedPrecisionLists -except: - from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecision, decorate, AutoMixedPrecisionLists +from paddle.static.amp import decorate, AutoMixedPrecisionLists class Solver(object): @@ -64,10 +61,8 @@ def sgd_optimizer(self, lr_policy, loss): weight_decay=self.weight_decay, ) if cfg.MODEL.FP16: - if cfg.MODEL.MODEL_NAME in ["pspnet"]: - custom_black_list = {"pool2d"} - else: - custom_black_list = {} + print('use amp') + custom_black_list = {} amp_lists = AutoMixedPrecisionLists( custom_black_list=custom_black_list) assert isinstance(cfg.MODEL.SCALE_LOSS, float) or isinstance(cfg.MODEL.SCALE_LOSS, str), \ From dc3032c50f3e4629220acaa010dc80c133734abd Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 22 Jan 2021 11:31:47 +0800 Subject: [PATCH 041/210] dygraph add parameters to control amp --- paddleseg/core/train.py | 37 +++++++++++++++++++++++++++---------- train.py | 5 ++++- 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 27f312f14e..c6acd3db7f 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -60,7 +60,8 @@ def train(model, num_workers=0, use_vdl=False, losses=None, - keep_checkpoint_max=5): + keep_checkpoint_max=5, + fp16=False): """ Launch training. @@ -80,6 +81,7 @@ def train(model, losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. + fp16: Whther to use amp. """ nranks = paddle.distributed.ParallelEnv().nranks local_rank = paddle.distributed.ParallelEnv().local_rank @@ -109,8 +111,9 @@ def train(model, ) # use amp - logger.info('use amp to train') - scaler = paddle.amp.GradScaler(init_loss_scaling=1024) + if fp16: + logger.info('use amp to train') + scaler = paddle.amp.GradScaler(init_loss_scaling=1024) if use_vdl: from visualdl import LogWriter @@ -140,7 +143,25 @@ def train(model, if len(data) == 3: edges = data[2].astype('int64') - with paddle.amp.auto_cast(enable=True): + if fp16: + with paddle.amp.auto_cast(enable=True): + if nranks > 1: + logits_list = ddp_model(images) + else: + logits_list = model(images) + loss_list = loss_computation( + logits_list=logits_list, + labels=labels, + losses=losses, + edges=edges) + loss = sum(loss_list) + # loss.backward() + # optimizer.step() + + scaled = scaler.scale(loss) # scale the loss + scaled.backward() # do backward + scaler.minimize(optimizer, scaled) # update parameters + else: if nranks > 1: logits_list = ddp_model(images) else: @@ -151,12 +172,8 @@ def train(model, losses=losses, edges=edges) loss = sum(loss_list) - # loss.backward() - # optimizer.step() - - scaled = scaler.scale(loss) # scale the loss - scaled.backward() # do backward - scaler.minimize(optimizer, scaled) # update parameters + loss.backward() + optimizer.step() lr = optimizer.get_lr() if isinstance(optimizer._learning_rate, diff --git a/train.py b/train.py index f9f4465d96..c809d6e952 100644 --- a/train.py +++ b/train.py @@ -90,6 +90,8 @@ def parse_args(): dest='use_vdl', help='Whether to record the data to VisualDL during training', action='store_true') + parser.add_argument( + '--fp16', dest='fp16', help='Whther to use amp', action='store_true') return parser.parse_args() @@ -140,7 +142,8 @@ def main(args): num_workers=args.num_workers, use_vdl=args.use_vdl, losses=losses, - keep_checkpoint_max=args.keep_checkpoint_max) + keep_checkpoint_max=args.keep_checkpoint_max, + fp16=args.fp16) if __name__ == '__main__': From f1cf052badfb09e03f1e7b1af64540568cdd8f12 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 22 Jan 2021 17:21:19 +0800 Subject: [PATCH 042/210] update time calculation --- legacy/pdseg/eval.py | 24 +++++--- legacy/pdseg/train.py | 110 ++++++++++++++---------------------- legacy/pdseg/utils/timer.py | 55 ++++++++---------- 3 files changed, 80 insertions(+), 109 deletions(-) diff --git a/legacy/pdseg/eval.py b/legacy/pdseg/eval.py index b54058a52f..fbd3607f51 100644 --- a/legacy/pdseg/eval.py +++ b/legacy/pdseg/eval.py @@ -24,13 +24,15 @@ import sys import argparse import pprint +import time + import numpy as np import paddle import paddle.static as static from utils import paddle_utils from utils.config import cfg -from utils.timer import Timer, calculate_eta +from utils.timer import TimeAverager, calculate_eta from models.model_builder import build_model from models.model_builder import ModelPhase from reader import SegDataset @@ -144,11 +146,13 @@ def data_generator(): num_images = 0 step = 0 all_step = cfg.DATASET.TEST_TOTAL_IMAGES // cfg.BATCH_SIZE + 1 - timer = Timer() - timer.start() + reader_cost_averager = TimeAverager() + batch_cost_averager = TimeAverager() + batch_start = time.time() data_loader.start() while True: try: + reader_cost_averager.record(time.time() - batch_start) step += 1 loss, pred, grts, masks = exe.run( test_prog, fetch_list=fetch_list, return_numpy=True) @@ -160,13 +164,15 @@ def data_generator(): _, iou = conf_mat.mean_iou() _, acc = conf_mat.accuracy() - speed = 1.0 / timer.elapsed_time() - + batch_cost_averager.record( + time.time() - batch_start, num_samples=cfg.BATCH_SIZE) + batch_cost = batch_cost_averager.get_average() + reader_cost = reader_cost_averager.get_average() + eta = calculate_eta(all_step - step, batch_cost) print( - "[EVAL]step={} loss={:.5f} acc={:.4f} IoU={:.4f} step/sec={:.2f} | ETA {}" - .format(step, loss, acc, iou, speed, - calculate_eta(all_step - step, speed))) - timer.restart() + "[EVAL]step={} loss={:.5f} acc={:.4f} IoU={:.4f} batch_cost={:.4f}, reader_cost={:.5f} | ETA {}" + .format(step, loss, acc, iou, batch_cost, batch_cost, eta)) + batch_start = time.time() sys.stdout.flush() except paddle.fluid.core.EOFException: break diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index fc3d6d7455..5c53ffe8d2 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -26,6 +26,7 @@ import pprint import random import shutil +import time import numpy as np import paddle @@ -33,7 +34,7 @@ from paddle.fluid import profiler from utils.config import cfg -from utils.timer import Timer, calculate_eta +from utils.timer import TimeAverager, calculate_eta from metrics import ConfusionMatrix from reader import SegDataset from models.model_builder import build_model @@ -325,9 +326,10 @@ def data_generator(): avg_loss = 0.0 best_mIoU = 0.0 + reader_cost_averager = TimeAverager() + batch_cost_averager = TimeAverager() - timer = Timer() - timer.start() + batch_start = time.time() if begin_epoch > cfg.SOLVER.NUM_EPOCHS: raise ValueError( ("begin epoch[{}] is larger than cfg.SOLVER.NUM_EPOCHS[{}]").format( @@ -342,72 +344,42 @@ def data_generator(): data_loader.start() while True: try: - if args.debug: - # Print category IoU and accuracy to check whether the - # traning process is corresponed to expectation - loss, pred, grts, masks = exe.run( - program=compiled_train_prog, - fetch_list=fetch_list, - return_numpy=True) - cm.calculate(pred, grts, masks) - avg_loss += np.mean(np.array(loss)) - step += 1 - - if step % args.log_steps == 0: - speed = args.log_steps / timer.elapsed_time() - avg_loss /= args.log_steps - category_acc, mean_acc = cm.accuracy() - category_iou, mean_iou = cm.mean_iou() - - print_info(( - "epoch={} step={} lr={:.5f} loss={:.4f} acc={:.5f} mIoU={:.5f} step/sec={:.3f} | ETA {}" - ).format(epoch, step, lr.get_lr(), avg_loss, mean_acc, - mean_iou, speed, - calculate_eta(all_step - step, speed))) - print_info("Category IoU: ", category_iou) - print_info("Category Acc: ", category_acc) - if args.use_vdl: - log_writer.add_scalar('Train/mean_iou', mean_iou, - step) - log_writer.add_scalar('Train/mean_acc', mean_acc, - step) - log_writer.add_scalar('Train/loss', avg_loss, step) - log_writer.add_scalar('Train/lr', lr.ger_lr(), step) - log_writer.add_scalar('Train/step/sec', speed, step) - sys.stdout.flush() - avg_loss = 0.0 - cm.zero_matrix() - timer.restart() - else: - # If not in debug mode, avoid unnessary log and calculate - loss = exe.run( - program=compiled_train_prog, - fetch_list=fetch_list, - return_numpy=True) - avg_loss += np.mean(np.array(loss)) - step += 1 - - if step % args.log_steps == 0 and cfg.TRAINER_ID == 0: - avg_loss /= args.log_steps - speed = args.log_steps / timer.elapsed_time() - print(( - "epoch={} step={} lr={:.5f} loss={:.4f} step/sec={:.3f} | ETA {}" - ).format(epoch, step, lr.get_lr(), avg_loss, speed, - calculate_eta(all_step - step, speed))) - if args.use_vdl: - log_writer.add_scalar('Train/loss', avg_loss, step) - log_writer.add_scalar('Train/lr', lr.get_lr(), step) - log_writer.add_scalar('Train/speed', speed, step) - sys.stdout.flush() - avg_loss = 0.0 - timer.restart() - - # NOTE : used for benchmark, profiler tools - if args.is_profiler and epoch == 1 and step == args.log_steps: - profiler.start_profiler("All") - elif args.is_profiler and epoch == 1 and step == args.log_steps + 5: - profiler.stop_profiler("total", args.profiler_path) - return + reader_cost_averager.record(time.time() - batch_start) + loss = exe.run( + program=compiled_train_prog, + fetch_list=fetch_list, + return_numpy=True) + avg_loss += np.mean(np.array(loss)) + step += 1 + batch_cost_averager.record( + time.time() - batch_start, num_samples=cfg.BATCH_SIZE) + + if step % args.log_steps == 0 and cfg.TRAINER_ID == 0: + avg_train_batch_cost = batch_cost_averager.get_average() + avg_train_reader_cost = reader_cost_averager.get_average() + eta = calculate_eta(all_step - step, avg_train_batch_cost) + print( + "epoch={} step={} lr={:.5f} loss={:.4f} batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" + .format(epoch, step, lr.get_lr(), avg_loss, + avg_train_batch_cost, avg_train_reader_cost, + batch_cost_averager.get_ips_average(), eta)) + if args.use_vdl: + log_writer.add_scalar('Train/loss', avg_loss, step) + log_writer.add_scalar('Train/lr', lr.get_lr(), step) + log_writer.add_scalar('Train/batch_cost', + avg_train_batch_cost, step) + log_writer.add_scalar('Train/reader_cost', + avg_train_reader_cost, step) + sys.stdout.flush() + avg_loss = 0.0 + batch_start = time.time() + + # NOTE : used for benchmark, profiler tools + if args.is_profiler and epoch == 1 and step == args.log_steps: + profiler.start_profiler("All") + elif args.is_profiler and epoch == 1 and step == args.log_steps + 5: + profiler.stop_profiler("total", args.profiler_path) + return lr.step() except paddle.fluid.core.EOFException: diff --git a/legacy/pdseg/utils/timer.py b/legacy/pdseg/utils/timer.py index ce58c367d1..1bba0ac7ab 100644 --- a/legacy/pdseg/utils/timer.py +++ b/legacy/pdseg/utils/timer.py @@ -19,7 +19,7 @@ def calculate_eta(remaining_step, speed): if remaining_step < 0: remaining_step = 0 - remaining_time = int(remaining_step / speed) + remaining_time = int(remaining_step * speed) result = "{:0>2}:{:0>2}:{:0>2}" arr = [] for i in range(2, -1, -1): @@ -28,34 +28,27 @@ def calculate_eta(remaining_step, speed): return result.format(*arr) -class Timer(object): - """ Simple timer class for measuring time consuming """ - +class TimeAverager(object): def __init__(self): - self._start_time = 0.0 - self._end_time = 0.0 - self._elapsed_time = 0.0 - self._is_running = False - - def start(self): - self._is_running = True - self._start_time = time.time() - - def restart(self): - self.start() - - def stop(self): - self._is_running = False - self._end_time = time.time() - - def elapsed_time(self): - self._end_time = time.time() - self._elapsed_time = self._end_time - self._start_time - if not self.is_running: - return 0.0 - - return self._elapsed_time - - @property - def is_running(self): - return self._is_running + self.reset() + + def reset(self): + self._cnt = 0 + self._total_time = 0 + self._total_samples = 0 + + def record(self, usetime, num_samples=None): + self._cnt += 1 + self._total_time += usetime + if num_samples: + self._total_samples += num_samples + + def get_average(self): + if self._cnt == 0: + return 0 + return self._total_time / float(self._cnt) + + def get_ips_average(self): + if not self._total_samples or self._cnt == 0: + return 0 + return float(self._total_samples) / self._total_time From 2f49ebfe4502575e0f946212d9c53f3545cc0f52 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 22 Jan 2021 18:14:06 +0800 Subject: [PATCH 043/210] add instruction --- README.md | 139 ++++-------------- .../deeplabv3p_resnet50_vd_cityscapes.yaml | 2 +- 2 files changed, 28 insertions(+), 113 deletions(-) diff --git a/README.md b/README.md index 09f88e7e7a..2226498935 100644 --- a/README.md +++ b/README.md @@ -1,124 +1,39 @@ -English | [简体中文](README_CN.md) +# PaddleSeg Benchmark with AMP -# PaddleSeg +## 动态图 -[![Build Status](https://travis-ci.org/PaddlePaddle/PaddleSeg.svg?branch=master)](https://travis-ci.org/PaddlePaddle/PaddleSeg) -[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) -[![Version](https://img.shields.io/github/release/PaddlePaddle/PaddleSeg.svg)](https://github.com/PaddlePaddle/PaddleSeg/releases) -![python version](https://img.shields.io/badge/python-3.6+-orange.svg) -![support os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-yellow.svg) +通过**--fp16**开启amp训练。 -![demo](./docs/images/cityscapes.gif) - -Welcome to PaddleSeg! PaddleSeg is an end-to-end image segmentation development kit developed based on [PaddlePaddle](https://www.paddlepaddle.org.cn), which covers a large number of high-quality segmentation models in different directions such as *high-performance* and *lightweight*. With the help of modular design, we provide two application methods: *Configuration Drive* and *API Calling*. So one can conveniently complete the entire image segmentation application from training to deployment through configuration calls or API calls. - -## Core features - -**High performance model**: Based on the high-performance backbone trained by Baidu's self-developed [semi-supervised label knowledge distillation scheme (SSLD)](https://paddleclas.readthedocs.io/zh_CN/latest/advanced_tutorials/distillation/distillation.html#ssld), combined with the state of the art segmentation technology, we provides 50+ high-quality pre-training models, which are better than other open source implementations. - -**Modular design**: PaddleSeg support 15+ mainstream *segmentation networks*, developers can start based on actual application scenarios and assemble diversified training configurations combined with modular design of *data enhancement strategies*, *backbone networks*, *loss functions* and other different components to meet different performance and accuracy requirements. - -**High efficiency**: PaddleSeg provides multi-process asynchronous I/O, multi-card parallel training, evaluation and other acceleration strategies, combined with the memory optimization function of the PaddlePaddle, which can greatly reduce the training overhead of the segmentation model, all this allowing developers to lower cost and more efficiently train image segmentation model. - -## Model Zoo - -|Model\Backbone|ResNet50|ResNet101|HRNetw18|HRNetw48| -|-|-|-|-|-| -|[ANN](./configs/ann)|✔|✔||| -|[BiSeNetv2](./configs/bisenet)|-|-|-|-| -|[DANet](./configs/danet)|✔|✔||| -|[Deeplabv3](./configs/deeplabv3)|✔|✔||| -|[Deeplabv3P](./configs/deeplabv3p)|✔|✔||| -|[Fast-SCNN](./configs/fastscnn)|-|-|-|-| -|[FCN](./configs/fcn)|||✔|✔| -|[GCNet](./configs/gcnet)|✔|✔||| -|[GSCNN](./configs/gscnn)|✔|✔||| -|[HarDNet](./configs/hardnet)|-|-|-|-| -|[OCRNet](./configs/ocrnet/)|||✔|✔| -|[PSPNet](./configs/pspnet)|✔|✔||| -|[U-Net](./configs/unet)|-|-|-|-| -|[U2-Net](./configs/u2net)|-|-|-|-| -|[Att U-Net](./configs/attention_unet)|-|-|-|-| -|[U-Net++](./configs/unet_plusplus)|-|-|-|-| -|[DecoupledSegNet](./configs/decoupled_segnet)|✔|✔||| - -## Dataset - -- [x] Cityscapes -- [x] Pascal VOC -- [x] ADE20K -- [x] Pascal Context -- [ ] COCO stuff - -## Installation - -#### step 1. Install PaddlePaddle - -System Requirements: -* PaddlePaddle >= 2.0.0rc -* Python >= 3.6+ - -Highly recommend you install the GPU version of PaddlePaddle, due to large overhead of segmentation models, otherwise it could be out of memory while running the models. For more detailed installation tutorials, please refer to the official website of [PaddlePaddle](https://www.paddlepaddle.org.cn/documentation/docs/zh/2.0-beta/install/index_cn.html)。 - - -#### step 2. Install PaddleSeg -Support to construct a customized segmentation framework with *API Calling* method for flexible development. - -```shell -pip install paddleseg +单机单卡使用如下命令进行训练: ``` - - -#### step 3. Download PaddleSeg repo -Support to complete the whole process segmentation application with *Configuration Drive* method, simple and fast. - -```shell -git clone https://github.com/PaddlePaddle/PaddleSeg +export CUDA_VISIBLE_DEVICES=0 +python train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` -#### step 4. Verify installation -Run the following command. If you can train normally, you have installed it successfully. - -```shell -python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml +单机多卡使用如下命令进行训练: +``` +export CUDA_VISIBLE_DEVICES=0,1 +python -m paddle.distributed.launch train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` -## Tutorials - -* [Get Started](./docs/quick_start.md) -* [API Tutorial](https://aistudio.baidu.com/aistudio/projectdetail/1339458) -* [Data Preparation](./docs/data_prepare.md) -* [Training Configuration](./configs/) -* [API References](./docs/apis) -* [Add New Components](./docs/add_new_model.md) - - -## Feedbacks and Contact -* The dynamic version is still under development, if you find any issue or have an idea on new features, please don't hesitate to contact us via [GitHub Issues](https://github.com/PaddlePaddle/PaddleSeg/issues). -* PaddleSeg User Group (QQ): 850378321 or 793114768 - -## Acknowledgement -* Thanks [jm12138](https://github.com/jm12138) for contributing U2-Net. -* Thanks [zjhellofss](https://github.com/zjhellofss) (Fu Shenshen) for contributing Attention U-Net, and Dice Loss. -* Thanks [liuguoyu666](https://github.com/liguoyu666) for contributing U-Net++. +deeplabv3p 模型的配置文件为: +benchmark/deeplabv3p.yml -## Citation -If you find our project useful in your research, please consider citing: +## 静态图 -```latex -@misc{liu2021paddleseg, - title={PaddleSeg: A High-Efficient Development Toolkit for Image Segmentation}, - author={Yi Liu and Lutao Chu and Guowei Chen and Zewu Wu and Zeyu Chen and Baohua Lai and Yuying Hao}, - year={2021}, - eprint={2101.06175}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} +**MODEL.FP16 True**开启amp训练 +单机单卡使用如下命令进行训练: +``` +cd legacy +export CUDA_VISIBLE_DEVICES=0 +python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_500.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True +``` -@misc{paddleseg2019, - title={PaddleSeg, End-to-end image segmentation kit based on PaddlePaddle}, - author={PaddlePaddle Authors}, - howpublished = {\url{https://github.com/PaddlePaddle/PaddleSeg}}, - year={2019} -} +单机单卡使用如下命令进行训练: +``` +export CUDA_VISIBLE_DEVICES=0,1 +python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_500.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` + +deeplabv3p模型的配置文件为: +configs/deeplabv3p_resnet50_vd_cityscapes.yaml diff --git a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml index 366d9cab32..440e22339f 100644 --- a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml +++ b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml @@ -41,7 +41,7 @@ TRAIN: TEST: TEST_MODEL: "saved_model/deeplabv3p_resnet50_vd_bn_cityscapes/final" SOLVER: - LR: 0.05 + LR: 0.01 LR_POLICY: "poly" OPTIMIZER: "sgd" NUM_EPOCHS: 3 From 4080e5eabd16a54b4080b13946d60289d802cec6 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 27 Jan 2021 14:49:46 +0800 Subject: [PATCH 044/210] update dataset raise information --- paddleseg/cvlibs/config.py | 6 +++--- train.py | 6 +++++- val.py | 6 +++++- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/paddleseg/cvlibs/config.py b/paddleseg/cvlibs/config.py index 892e601dfe..f2224f09fa 100644 --- a/paddleseg/cvlibs/config.py +++ b/paddleseg/cvlibs/config.py @@ -157,7 +157,7 @@ def optimizer(self) -> paddle.optimizer.Optimizer: lr = self.learning_rate args = self.optimizer_args optimizer_type = args.pop('type') - + if optimizer_type == 'sgd': return paddle.optimizer.Momentum( lr, parameters=self.model.parameters(), **args) @@ -235,14 +235,14 @@ def model(self) -> paddle.nn.Layer: @property def train_dataset(self) -> paddle.io.Dataset: - _train_dataset = self.dic.get('train_dataset').copy() + _train_dataset = self.dic.get('train_dataset', {}).copy() if not _train_dataset: return None return self._load_object(_train_dataset) @property def val_dataset(self) -> paddle.io.Dataset: - _val_dataset = self.dic.get('val_dataset').copy() + _val_dataset = self.dic.get('val_dataset', {}).copy() if not _val_dataset: return None return self._load_object(_val_dataset) diff --git a/train.py b/train.py index f9f4465d96..950a6f8fe9 100644 --- a/train.py +++ b/train.py @@ -115,9 +115,13 @@ def main(args): batch_size=args.batch_size) train_dataset = cfg.train_dataset - if not train_dataset: + if train_dataset is None: raise RuntimeError( 'The training dataset is not specified in the configuration file.') + elif len(train_dataset) == 0: + raise ValueError( + 'The length of train_dataset is 0. Please check if your dataset is valid' + ) val_dataset = cfg.val_dataset if args.do_eval else None losses = cfg.loss diff --git a/val.py b/val.py index cbc49d63cb..39826ffc6a 100644 --- a/val.py +++ b/val.py @@ -102,10 +102,14 @@ def main(args): cfg = Config(args.cfg) val_dataset = cfg.val_dataset - if not val_dataset: + if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) + elif len(val_dataset) == 0: + raise ValueError( + 'The length of val_dataset is 0. Please check if your dataset is valid' + ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) From 552fd5c688cd6d7a9800c7821bde5b94d46d4525 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 27 Jan 2021 16:34:33 +0800 Subject: [PATCH 045/210] rm unused config files --- legacy/configs/cityscape_fast_scnn.yaml | 53 ----------------- legacy/configs/deepglobe_road_extraction.yaml | 45 -------------- .../configs/deeplabv3p_mobilenet-1-0_pet.yaml | 47 --------------- .../deeplabv3p_mobilenetv2_cityscapes.yaml | 47 --------------- ...eplabv3p_mobilenetv3_large_cityscapes.yaml | 58 ------------------- .../deeplabv3p_xception65_cityscapes.yaml | 44 -------------- .../configs/deeplabv3p_xception65_optic.yaml | 34 ----------- .../deeplabv3p_xception65_optic_kunlun.yaml | 34 ----------- legacy/configs/fast_scnn_pet.yaml | 43 -------------- legacy/configs/hrnet_optic.yaml | 39 ------------- legacy/configs/icnet_optic.yaml | 35 ----------- ...ovasz_hinge_deeplabv3p_mobilenet_road.yaml | 50 ---------------- ...z_softmax_deeplabv3p_mobilenet_pascal.yaml | 49 ---------------- legacy/configs/ocrnet_w18_bn_cityscapes.yaml | 54 ----------------- legacy/configs/pspnet_optic.yaml | 35 ----------- legacy/configs/unet_optic.yaml | 32 ---------- 16 files changed, 699 deletions(-) delete mode 100644 legacy/configs/cityscape_fast_scnn.yaml delete mode 100644 legacy/configs/deepglobe_road_extraction.yaml delete mode 100644 legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml delete mode 100644 legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml delete mode 100644 legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml delete mode 100644 legacy/configs/deeplabv3p_xception65_cityscapes.yaml delete mode 100644 legacy/configs/deeplabv3p_xception65_optic.yaml delete mode 100644 legacy/configs/deeplabv3p_xception65_optic_kunlun.yaml delete mode 100644 legacy/configs/fast_scnn_pet.yaml delete mode 100644 legacy/configs/hrnet_optic.yaml delete mode 100644 legacy/configs/icnet_optic.yaml delete mode 100644 legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml delete mode 100755 legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml delete mode 100644 legacy/configs/ocrnet_w18_bn_cityscapes.yaml delete mode 100644 legacy/configs/pspnet_optic.yaml delete mode 100644 legacy/configs/unet_optic.yaml diff --git a/legacy/configs/cityscape_fast_scnn.yaml b/legacy/configs/cityscape_fast_scnn.yaml deleted file mode 100644 index 34bd76be31..0000000000 --- a/legacy/configs/cityscape_fast_scnn.yaml +++ /dev/null @@ -1,53 +0,0 @@ -EVAL_CROP_SIZE: (2048, 1024) # (width, height), for unpadding rangescaling and stepscaling -TRAIN_CROP_SIZE: (1024, 1024) # (width, height), for unpadding rangescaling and stepscaling -AUG: - AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (640, 640) # (width, height), for unpadding - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - MAX_SCALE_FACTOR: 2.0 # for stepscaling - MIN_SCALE_FACTOR: 0.5 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling - MIRROR: True - FLIP: False - FLIP_RATIO: 0.2 - RICH_CROP: - ENABLE: True - ASPECT_RATIO: 0.0 - BLUR: False - BLUR_RATIO: 0.1 - MAX_ROTATION: 0 - MIN_AREA_RATIO: 0.0 - BRIGHTNESS_JITTER_RATIO: 0.4 - CONTRAST_JITTER_RATIO: 0.4 - SATURATION_JITTER_RATIO: 0.4 -BATCH_SIZE: 12 -MEAN: [0.5, 0.5, 0.5] -STD: [0.5, 0.5, 0.5] -DATASET: - DATA_DIR: "./dataset/cityscapes/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 19 - TEST_FILE_LIST: "dataset/cityscapes/val.list" - TRAIN_FILE_LIST: "dataset/cityscapes/train.list" - VAL_FILE_LIST: "dataset/cityscapes/val.list" - VIS_FILE_LIST: "dataset/cityscapes/val.list" - IGNORE_INDEX: 255 -FREEZE: - MODEL_FILENAME: "model" - PARAMS_FILENAME: "params" -MODEL: - DEFAULT_NORM_TYPE: "bn" - MODEL_NAME: "fast_scnn" - -TEST: - TEST_MODEL: "snapshots/cityscape_fast_scnn/final/" -TRAIN: - MODEL_SAVE_DIR: "snapshots/cityscape_fast_scnn/" - SNAPSHOT_EPOCH: 10 -SOLVER: - LR: 0.001 - LR_POLICY: "poly" - OPTIMIZER: "sgd" - NUM_EPOCHS: 100 diff --git a/legacy/configs/deepglobe_road_extraction.yaml b/legacy/configs/deepglobe_road_extraction.yaml deleted file mode 100644 index d6770287a3..0000000000 --- a/legacy/configs/deepglobe_road_extraction.yaml +++ /dev/null @@ -1,45 +0,0 @@ -EVAL_CROP_SIZE: (1025, 1025) # (width, height), for unpadding rangescaling and stepscaling -TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling -AUG: - AUG_METHOD: u"stepscaling" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (640, 640) # (width, height), for unpadding - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - MAX_SCALE_FACTOR: 2.0 # for stepscaling - MIN_SCALE_FACTOR: 0.5 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling -BATCH_SIZE: 8 -DATASET: - DATA_DIR: "./dataset/MiniDeepGlobeRoadExtraction/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 2 - TEST_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" - TRAIN_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/train.txt" - VAL_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" - VIS_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" - IGNORE_INDEX: 255 - SEPARATOR: '|' -FREEZE: - MODEL_FILENAME: "model" - PARAMS_FILENAME: "params" - SAVE_DIR: "freeze_model" -MODEL: - DEFAULT_NORM_TYPE: "bn" - MODEL_NAME: "deeplabv3p" - DEEPLAB: - BACKBONE: "mobilenetv2" - DEPTH_MULTIPLIER: 1.0 - ENCODER_WITH_ASPP: False - ENABLE_DECODER: False -TEST: - TEST_MODEL: "./saved_model/deeplabv3p_mobilenetv2-1-0_bn_deepglobe_road_extraction/final" -TRAIN: - MODEL_SAVE_DIR: "./saved_model/deeplabv3p_mobilenetv2-1-0_bn_deepglobe_road_extraction/" - PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_coco/" - SNAPSHOT_EPOCH: 10 -SOLVER: - LR: 0.001 - LR_POLICY: "poly" - OPTIMIZER: "adam" - NUM_EPOCHS: 300 diff --git a/legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml b/legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml deleted file mode 100644 index 7578034ddc..0000000000 --- a/legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml +++ /dev/null @@ -1,47 +0,0 @@ -TRAIN_CROP_SIZE: (512, 512) # (width, height), for unpadding rangescaling and stepscaling -EVAL_CROP_SIZE: (512, 512) # (width, height), for unpadding rangescaling and stepscaling -AUG: - AUG_METHOD: "unpadding" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (512, 512) # (width, height), for unpadding - - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - - MAX_SCALE_FACTOR: 1.25 # for stepscaling - MIN_SCALE_FACTOR: 0.75 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling - MIRROR: True -BATCH_SIZE: 4 -DATASET: - DATA_DIR: "./dataset/mini_pet/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 3 - TEST_FILE_LIST: "./dataset/mini_pet/file_list/test_list.txt" - TRAIN_FILE_LIST: "./dataset/mini_pet/file_list/train_list.txt" - VAL_FILE_LIST: "./dataset/mini_pet/file_list/val_list.txt" - VIS_FILE_LIST: "./dataset/mini_pet/file_list/test_list.txt" - IGNORE_INDEX: 255 - SEPARATOR: " " -FREEZE: - MODEL_FILENAME: "__model__" - PARAMS_FILENAME: "__params__" -MODEL: - MODEL_NAME: "deeplabv3p" - DEFAULT_NORM_TYPE: "bn" - DEEPLAB: - BACKBONE: "mobilenetv2" - DEPTH_MULTIPLIER: 1.0 - ENCODER_WITH_ASPP: False - ENABLE_DECODER: False -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_cityscapes/" - MODEL_SAVE_DIR: "./saved_model/deeplabv3p_mobilenetv2-1-0_bn_pet/" - SNAPSHOT_EPOCH: 10 -TEST: - TEST_MODEL: "./saved_model/deeplabv3p_mobilenetv2-1-0_bn_pet/final" -SOLVER: - NUM_EPOCHS: 100 - LR: 0.005 - LR_POLICY: "poly" - OPTIMIZER: "sgd" diff --git a/legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml b/legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml deleted file mode 100644 index 8a7808525d..0000000000 --- a/legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml +++ /dev/null @@ -1,47 +0,0 @@ -EVAL_CROP_SIZE: (2049, 1025) # (width, height), for unpadding rangescaling and stepscaling -TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling -AUG: - AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (2048, 1024) # (width, height), for unpadding - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - MAX_SCALE_FACTOR: 2.0 # for stepscaling - MIN_SCALE_FACTOR: 0.5 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling - MIRROR: True -BATCH_SIZE: 4 -DATASET: - DATA_DIR: "./dataset/cityscapes/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 19 - TEST_FILE_LIST: "dataset/cityscapes/val.list" - TRAIN_FILE_LIST: "dataset/cityscapes/train.list" - VAL_FILE_LIST: "dataset/cityscapes/val.list" - VIS_FILE_LIST: "dataset/cityscapes/val.list" - IGNORE_INDEX: 255 - SEPARATOR: " " -FREEZE: - MODEL_FILENAME: "model" - PARAMS_FILENAME: "params" -MODEL: - DEFAULT_NORM_TYPE: "bn" - MODEL_NAME: "deeplabv3p" - DEEPLAB: - BACKBONE: "mobilenetv2" - ASPP_WITH_SEP_CONV: True - DECODER_USE_SEP_CONV: True - ENCODER_WITH_ASPP: False - ENABLE_DECODER: False -TRAIN: - PRETRAINED_MODEL_DIR: u"pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_coco" - MODEL_SAVE_DIR: "saved_model/deeplabv3p_mobilenetv2_cityscapes" - SNAPSHOT_EPOCH: 10 - SYNC_BATCH_NORM: True -TEST: - TEST_MODEL: "saved_model/deeplabv3p_mobilenetv2_cityscapes/final" -SOLVER: - LR: 0.01 - LR_POLICY: "poly" - OPTIMIZER: "sgd" - NUM_EPOCHS: 100 diff --git a/legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml b/legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml deleted file mode 100644 index a844e28c19..0000000000 --- a/legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml +++ /dev/null @@ -1,58 +0,0 @@ -EVAL_CROP_SIZE: (2049, 1025) # (width, height), for unpadding rangescaling and stepscaling -TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling -AUG: - AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling - MAX_SCALE_FACTOR: 2.0 # for stepscaling - MIN_SCALE_FACTOR: 0.5 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling - MIRROR: True -BATCH_SIZE: 32 -DATASET: - DATA_DIR: "./dataset/cityscapes/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 19 - TEST_FILE_LIST: "dataset/cityscapes/val.list" - TRAIN_FILE_LIST: "dataset/cityscapes/train.list" - VAL_FILE_LIST: "dataset/cityscapes/val.list" - VIS_FILE_LIST: "dataset/cityscapes/val.list" - IGNORE_INDEX: 255 - SEPARATOR: " " -FREEZE: - MODEL_FILENAME: "model" - PARAMS_FILENAME: "params" -MODEL: - DEFAULT_NORM_TYPE: "bn" - MODEL_NAME: "deeplabv3p" - DEEPLAB: - BACKBONE: "mobilenetv3_large" - ASPP_WITH_SEP_CONV: True - DECODER_USE_SEP_CONV: True - ENCODER_WITH_ASPP: True - ENABLE_DECODER: True - OUTPUT_STRIDE: 32 - BACKBONE_LR_MULT_LIST: [0.15,0.35,0.65,0.85,1] - ENCODER: - POOLING_STRIDE: (4, 5) - POOLING_CROP_SIZE: (769, 769) - ASPP_WITH_SE: True - SE_USE_QSIGMOID: True - ASPP_CONVS_FILTERS: 128 - ASPP_WITH_CONCAT_PROJECTION: False - ADD_IMAGE_LEVEL_FEATURE: False - DECODER: - USE_SUM_MERGE: True - CONV_FILTERS: 19 - OUTPUT_IS_LOGITS: True - -TRAIN: - PRETRAINED_MODEL_DIR: u"pretrained_model/mobilenetv3-1-0_large_bn_imagenet" - MODEL_SAVE_DIR: "saved_model/deeplabv3p_mobilenetv3_large_cityscapes" - SNAPSHOT_EPOCH: 1 - SYNC_BATCH_NORM: True -TEST: - TEST_MODEL: "saved_model/deeplabv3p_mobilenetv3_large_cityscapes/final" -SOLVER: - LR: 0.2 - LR_POLICY: "poly" - OPTIMIZER: "sgd" - NUM_EPOCHS: 850 diff --git a/legacy/configs/deeplabv3p_xception65_cityscapes.yaml b/legacy/configs/deeplabv3p_xception65_cityscapes.yaml deleted file mode 100644 index 1dce747745..0000000000 --- a/legacy/configs/deeplabv3p_xception65_cityscapes.yaml +++ /dev/null @@ -1,44 +0,0 @@ -EVAL_CROP_SIZE: (2049, 1025) # (width, height), for unpadding rangescaling and stepscaling -TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling -AUG: - AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (2048, 1024) # (width, height), for unpadding - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - MAX_SCALE_FACTOR: 2.0 # for stepscaling - MIN_SCALE_FACTOR: 0.5 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling - MIRROR: True -BATCH_SIZE: 4 -DATASET: - DATA_DIR: "./dataset/cityscapes/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 19 - TEST_FILE_LIST: "dataset/cityscapes/val.list" - TRAIN_FILE_LIST: "dataset/cityscapes/train.list" - VAL_FILE_LIST: "dataset/cityscapes/val.list" - VIS_FILE_LIST: "dataset/cityscapes/val.list" - IGNORE_INDEX: 255 - SEPARATOR: " " -FREEZE: - MODEL_FILENAME: "model" - PARAMS_FILENAME: "params" -MODEL: - DEFAULT_NORM_TYPE: "bn" - MODEL_NAME: "deeplabv3p" - DEEPLAB: - ASPP_WITH_SEP_CONV: True - DECODER_USE_SEP_CONV: True -TRAIN: - PRETRAINED_MODEL_DIR: u"pretrained_model/deeplabv3p_xception65_bn_coco" - MODEL_SAVE_DIR: "saved_model/deeplabv3p_xception65_bn_cityscapes" - SNAPSHOT_EPOCH: 10 - SYNC_BATCH_NORM: True -TEST: - TEST_MODEL: "saved_model/deeplabv3p_xception65_bn_cityscapes/final" -SOLVER: - LR: 0.01 - LR_POLICY: "poly" - OPTIMIZER: "sgd" - NUM_EPOCHS: 100 diff --git a/legacy/configs/deeplabv3p_xception65_optic.yaml b/legacy/configs/deeplabv3p_xception65_optic.yaml deleted file mode 100644 index 7ec86926db..0000000000 --- a/legacy/configs/deeplabv3p_xception65_optic.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# 数据集配置 -DATASET: - DATA_DIR: "./dataset/optic_disc_seg/" - NUM_CLASSES: 2 - TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" - VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" - VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - -# 预训练模型配置 -MODEL: - MODEL_NAME: "deeplabv3p" - DEFAULT_NORM_TYPE: "bn" - DEEPLAB: - BACKBONE: "xception_65" - -# 其他配置 -TRAIN_CROP_SIZE: (512, 512) -EVAL_CROP_SIZE: (512, 512) -AUG: - AUG_METHOD: "unpadding" - FIX_RESIZE_SIZE: (512, 512) -BATCH_SIZE: 4 -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_xception65_bn_coco/" - MODEL_SAVE_DIR: "./saved_model/deeplabv3p_xception65_bn_optic/" - SNAPSHOT_EPOCH: 5 -TEST: - TEST_MODEL: "./saved_model/deeplabv3p_xception65_bn_optic/final" -SOLVER: - NUM_EPOCHS: 10 - LR: 0.001 - LR_POLICY: "poly" - OPTIMIZER: "adam" \ No newline at end of file diff --git a/legacy/configs/deeplabv3p_xception65_optic_kunlun.yaml b/legacy/configs/deeplabv3p_xception65_optic_kunlun.yaml deleted file mode 100644 index bad5c9b04c..0000000000 --- a/legacy/configs/deeplabv3p_xception65_optic_kunlun.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# 数据集配置 -DATASET: - DATA_DIR: "./dataset/optic_disc_seg/" - NUM_CLASSES: 2 - TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" - VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" - VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - -# 预训练模型配置 -MODEL: - MODEL_NAME: "deeplabv3p" - DEFAULT_NORM_TYPE: "bn" - DEEPLAB: - BACKBONE: "xception_65" - -# 其他配置 -TRAIN_CROP_SIZE: (512, 512) -EVAL_CROP_SIZE: (512, 512) -AUG: - AUG_METHOD: "unpadding" - FIX_RESIZE_SIZE: (512, 512) -BATCH_SIZE: 1 -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_xception65_bn_coco/" - MODEL_SAVE_DIR: "./saved_model/deeplabv3p_xception65_bn_optic/" - SNAPSHOT_EPOCH: 2 -TEST: - TEST_MODEL: "./saved_model/deeplabv3p_xception65_bn_optic/final" -SOLVER: - NUM_EPOCHS: 20 - LR: 0.001 - LR_POLICY: "poly" - OPTIMIZER: "adam" diff --git a/legacy/configs/fast_scnn_pet.yaml b/legacy/configs/fast_scnn_pet.yaml deleted file mode 100644 index 2b9b659f18..0000000000 --- a/legacy/configs/fast_scnn_pet.yaml +++ /dev/null @@ -1,43 +0,0 @@ -TRAIN_CROP_SIZE: (512, 512) # (width, height), for unpadding rangescaling and stepscaling -EVAL_CROP_SIZE: (512, 512) # (width, height), for unpadding rangescaling and stepscaling -AUG: - AUG_METHOD: "unpadding" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (512, 512) # (width, height), for unpadding - - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - - MAX_SCALE_FACTOR: 1.25 # for stepscaling - MIN_SCALE_FACTOR: 0.75 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling - MIRROR: True -BATCH_SIZE: 4 -DATASET: - DATA_DIR: "./dataset/mini_pet/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 3 - TEST_FILE_LIST: "./dataset/mini_pet/file_list/test_list.txt" - TRAIN_FILE_LIST: "./dataset/mini_pet/file_list/train_list.txt" - VAL_FILE_LIST: "./dataset/mini_pet/file_list/val_list.txt" - VIS_FILE_LIST: "./dataset/mini_pet/file_list/test_list.txt" - IGNORE_INDEX: 255 - SEPARATOR: " " -FREEZE: - MODEL_FILENAME: "__model__" - PARAMS_FILENAME: "__params__" -MODEL: - MODEL_NAME: "fast_scnn" - DEFAULT_NORM_TYPE: "bn" - -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/fast_scnn_cityscapes/" - MODEL_SAVE_DIR: "./saved_model/fast_scnn_pet/" - SNAPSHOT_EPOCH: 10 -TEST: - TEST_MODEL: "./saved_model/fast_scnn_pet/final" -SOLVER: - NUM_EPOCHS: 100 - LR: 0.005 - LR_POLICY: "poly" - OPTIMIZER: "sgd" diff --git a/legacy/configs/hrnet_optic.yaml b/legacy/configs/hrnet_optic.yaml deleted file mode 100644 index 7154bceeea..0000000000 --- a/legacy/configs/hrnet_optic.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# 数据集配置 -DATASET: - DATA_DIR: "./dataset/optic_disc_seg/" - NUM_CLASSES: 2 - TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" - VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" - VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - -# 预训练模型配置 -MODEL: - MODEL_NAME: "hrnet" - DEFAULT_NORM_TYPE: "bn" - HRNET: - STAGE2: - NUM_CHANNELS: [18, 36] - STAGE3: - NUM_CHANNELS: [18, 36, 72] - STAGE4: - NUM_CHANNELS: [18, 36, 72, 144] - -# 其他配置 -TRAIN_CROP_SIZE: (512, 512) -EVAL_CROP_SIZE: (512, 512) -AUG: - AUG_METHOD: "unpadding" - FIX_RESIZE_SIZE: (512, 512) -BATCH_SIZE: 4 -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/hrnet_w18_bn_cityscapes/" - MODEL_SAVE_DIR: "./saved_model/hrnet_optic/" - SNAPSHOT_EPOCH: 5 -TEST: - TEST_MODEL: "./saved_model/hrnet_optic/final" -SOLVER: - NUM_EPOCHS: 10 - LR: 0.001 - LR_POLICY: "poly" - OPTIMIZER: "adam" diff --git a/legacy/configs/icnet_optic.yaml b/legacy/configs/icnet_optic.yaml deleted file mode 100644 index 0f2742e6cf..0000000000 --- a/legacy/configs/icnet_optic.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# 数据集配置 -DATASET: - DATA_DIR: "./dataset/optic_disc_seg/" - NUM_CLASSES: 2 - TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" - VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" - VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - -# 预训练模型配置 -MODEL: - MODEL_NAME: "icnet" - DEFAULT_NORM_TYPE: "bn" - MULTI_LOSS_WEIGHT: "[1.0, 0.4, 0.16]" - ICNET: - DEPTH_MULTIPLIER: 0.5 - -# 其他配置 -TRAIN_CROP_SIZE: (512, 512) -EVAL_CROP_SIZE: (512, 512) -AUG: - AUG_METHOD: "unpadding" - FIX_RESIZE_SIZE: (512, 512) -BATCH_SIZE: 4 -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/icnet_bn_cityscapes/" - MODEL_SAVE_DIR: "./saved_model/icnet_optic/" - SNAPSHOT_EPOCH: 5 -TEST: - TEST_MODEL: "./saved_model/icnet_optic/final" -SOLVER: - NUM_EPOCHS: 10 - LR: 0.001 - LR_POLICY: "poly" - OPTIMIZER: "adam" diff --git a/legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml b/legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml deleted file mode 100644 index 45f5fc724a..0000000000 --- a/legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml +++ /dev/null @@ -1,50 +0,0 @@ -EVAL_CROP_SIZE: (1025, 1025) # (width, height), for unpadding rangescaling and stepscaling -TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling -AUG: - AUG_METHOD: u"stepscaling" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (640, 640) # (width, height), for unpadding - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - MAX_SCALE_FACTOR: 2.0 # for stepscaling - MIN_SCALE_FACTOR: 0.5 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling - FLIP: True -BATCH_SIZE: 24 -DATASET: - DATA_DIR: "./dataset/MiniDeepGlobeRoadExtraction/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 2 - TEST_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" - TRAIN_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/train.txt" - VAL_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" - VIS_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" - IGNORE_INDEX: 255 - SEPARATOR: '|' -FREEZE: - MODEL_FILENAME: "model" - PARAMS_FILENAME: "params" - SAVE_DIR: "freeze_model" -MODEL: - DEFAULT_NORM_TYPE: "bn" - MODEL_NAME: "deeplabv3p" - DEEPLAB: - BACKBONE: "mobilenetv2" - DEPTH_MULTIPLIER: 1.0 - ENCODER_WITH_ASPP: False - ENABLE_DECODER: False -TEST: - TEST_MODEL: "./saved_model/lovasz_hinge_deeplabv3p_mobilenet_road/final" -TRAIN: - MODEL_SAVE_DIR: "./saved_model/lovasz_hinge_deeplabv3p_mobilenet_road/" - PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_coco/" - SNAPSHOT_EPOCH: 10 -SOLVER: - LR: 0.1 - LR_POLICY: "poly" - OPTIMIZER: "sgd" - NUM_EPOCHS: 300 - LOSS: ["lovasz_hinge_loss","bce_loss"] - LOSS_WEIGHT: - LOVASZ_HINGE_LOSS: 0.5 - BCE_LOSS: 0.5 diff --git a/legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml b/legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml deleted file mode 100755 index b1c6ff7990..0000000000 --- a/legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml +++ /dev/null @@ -1,49 +0,0 @@ -TRAIN_CROP_SIZE: (500, 500) # (width, height), for unpadding rangescaling and stepscaling #训练时图像裁剪尺寸(宽,高) -EVAL_CROP_SIZE: (500, 500) # (width, height), for unpadding rangescaling and stepscaling #验证时图像裁剪尺寸(宽,高) -AUG: - AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (500, 500) # (width, height), for unpadding - - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - - MAX_SCALE_FACTOR: 1.25 # for stepscaling - MIN_SCALE_FACTOR: 0.75 # for stepscaling - SCALE_STEP_SIZE: 0.05 # for stepscaling - MIRROR: True - FLIP: True -BATCH_SIZE: 16 #批处理大小 -DATASET: - DATA_DIR: "./dataset/VOCtrainval_11-May-2012/VOC2012/" #图片路径 - IMAGE_TYPE: "rgb" # choice rgb or rgba #图片类别“RGB” - NUM_CLASSES: 21 #类别数(包括背景类别) - TEST_FILE_LIST: "dataset/VOCtrainval_11-May-2012/VOC2012/ImageSets/Segmentation/val.list" - TRAIN_FILE_LIST: "dataset/VOCtrainval_11-May-2012/VOC2012/ImageSets/Segmentation/train.list" - VAL_FILE_LIST: "dataset/VOCtrainval_11-May-2012/VOC2012/ImageSets/Segmentation/val.list" - VIS_FILE_LIST: "dataset/VOCtrainval_11-May-2012/VOC2012/ImageSets/Segmentation/val.list" - IGNORE_INDEX: 255 - SEPARATOR: " " -MODEL: - MODEL_NAME: "deeplabv3p" - DEFAULT_NORM_TYPE: "bn" #指定norm的类型,此处提供bn和gn(默认)两种选择,分别指batch norm和group norm。 - DEEPLAB: - BACKBONE: "mobilenetv2" - DEPTH_MULTIPLIER: 1.0 - ENCODER_WITH_ASPP: False - ENABLE_DECODER: False -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_coco/" - MODEL_SAVE_DIR: "./saved_model/lovasz-softmax-voc" #模型保存路径 - SNAPSHOT_EPOCH: 10 -TEST: - TEST_MODEL: "./saved_model/lovasz-softmax-voc/final" #为测试模型路径 -SOLVER: - NUM_EPOCHS: 100 #训练epoch数,正整数 - LR: 0.0001 #初始学习率 - LR_POLICY: "poly" #学习率下降方法, 选项为poly、piecewise和cosine - OPTIMIZER: "sgd" #优化算法, 选项为sgd和adam - LOSS: ["lovasz_softmax_loss","softmax_loss"] - LOSS_WEIGHT: - LOVASZ_SOFTMAX_LOSS: 0.2 - SOFTMAX_LOSS: 0.8 diff --git a/legacy/configs/ocrnet_w18_bn_cityscapes.yaml b/legacy/configs/ocrnet_w18_bn_cityscapes.yaml deleted file mode 100644 index 15fb92ad5a..0000000000 --- a/legacy/configs/ocrnet_w18_bn_cityscapes.yaml +++ /dev/null @@ -1,54 +0,0 @@ -EVAL_CROP_SIZE: (2048, 1024) # (width, height), for unpadding rangescaling and stepscaling -TRAIN_CROP_SIZE: (1024, 512) # (width, height), for unpadding rangescaling and stepscaling -AUG: -# AUG_METHOD: "unpadding" # choice unpadding rangescaling and stepscaling - AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling - FIX_RESIZE_SIZE: (1024, 512) # (width, height), for unpadding - INF_RESIZE_VALUE: 500 # for rangescaling - MAX_RESIZE_VALUE: 600 # for rangescaling - MIN_RESIZE_VALUE: 400 # for rangescaling - MAX_SCALE_FACTOR: 2.0 # for stepscaling - MIN_SCALE_FACTOR: 0.5 # for stepscaling - SCALE_STEP_SIZE: 0.25 # for stepscaling - MIRROR: True -BATCH_SIZE: 4 -#BATCH_SIZE: 4 -DATASET: - DATA_DIR: "./dataset/cityscapes/" - IMAGE_TYPE: "rgb" # choice rgb or rgba - NUM_CLASSES: 19 - TEST_FILE_LIST: "./dataset/cityscapes/val.list" - TRAIN_FILE_LIST: "./dataset/cityscapes/train.list" - VAL_FILE_LIST: "./dataset/cityscapes/val.list" - VIS_FILE_LIST: "./dataset/cityscapes/val.list" - IGNORE_INDEX: 255 - SEPARATOR: " " -FREEZE: - MODEL_FILENAME: "model" - PARAMS_FILENAME: "params" -MODEL: - MODEL_NAME: "ocrnet" - DEFAULT_NORM_TYPE: "bn" - HRNET: - STAGE2: - NUM_CHANNELS: [18, 36] - STAGE3: - NUM_CHANNELS: [18, 36, 72] - STAGE4: - NUM_CHANNELS: [18, 36, 72, 144] - OCR: - OCR_MID_CHANNELS: 512 - OCR_KEY_CHANNELS: 256 - MULTI_LOSS_WEIGHT: [1.0, 1.0] -TRAIN: - PRETRAINED_MODEL_DIR: u"./pretrained_model/ocrnet_w18_cityscape/best_model" - MODEL_SAVE_DIR: "output/ocrnet_w18_bn_cityscapes" - SNAPSHOT_EPOCH: 1 - SYNC_BATCH_NORM: True -TEST: - TEST_MODEL: "output/ocrnet_w18_bn_cityscapes/first" -SOLVER: - LR: 0.01 - LR_POLICY: "poly" - OPTIMIZER: "sgd" - NUM_EPOCHS: 500 diff --git a/legacy/configs/pspnet_optic.yaml b/legacy/configs/pspnet_optic.yaml deleted file mode 100644 index 589e2b53cc..0000000000 --- a/legacy/configs/pspnet_optic.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# 数据集配置 -DATASET: - DATA_DIR: "./dataset/optic_disc_seg/" - NUM_CLASSES: 2 - TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" - VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" - VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - -# 预训练模型配置 -MODEL: - MODEL_NAME: "pspnet" - DEFAULT_NORM_TYPE: "bn" - PSPNET: - DEPTH_MULTIPLIER: 1 - LAYERS: 50 - -# 其他配置 -TRAIN_CROP_SIZE: (512, 512) -EVAL_CROP_SIZE: (512, 512) -AUG: - AUG_METHOD: "unpadding" - FIX_RESIZE_SIZE: (512, 512) -BATCH_SIZE: 4 -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/pspnet50_bn_cityscapes/" - MODEL_SAVE_DIR: "./saved_model/pspnet_optic/" - SNAPSHOT_EPOCH: 5 -TEST: - TEST_MODEL: "./saved_model/pspnet_optic/final" -SOLVER: - NUM_EPOCHS: 10 - LR: 0.001 - LR_POLICY: "poly" - OPTIMIZER: "adam" diff --git a/legacy/configs/unet_optic.yaml b/legacy/configs/unet_optic.yaml deleted file mode 100644 index cd564817c7..0000000000 --- a/legacy/configs/unet_optic.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# 数据集配置 -DATASET: - DATA_DIR: "./dataset/optic_disc_seg/" - NUM_CLASSES: 2 - TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" - VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" - VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" - -# 预训练模型配置 -MODEL: - MODEL_NAME: "unet" - DEFAULT_NORM_TYPE: "bn" - -# 其他配置 -TRAIN_CROP_SIZE: (512, 512) -EVAL_CROP_SIZE: (512, 512) -AUG: - AUG_METHOD: "unpadding" - FIX_RESIZE_SIZE: (512, 512) -BATCH_SIZE: 4 -TRAIN: - PRETRAINED_MODEL_DIR: "./pretrained_model/unet_bn_coco/" - MODEL_SAVE_DIR: "./saved_model/unet_optic/" - SNAPSHOT_EPOCH: 5 -TEST: - TEST_MODEL: "./saved_model/unet_optic/final" -SOLVER: - NUM_EPOCHS: 10 - LR: 0.001 - LR_POLICY: "poly" - OPTIMIZER: "adam" From a9b241e91b05e106ccad139a8846e368a1b752ce Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 28 Jan 2021 14:25:30 +0800 Subject: [PATCH 046/210] update align corners True to False --- legacy/pdseg/loss.py | 2 +- legacy/pdseg/models/backbone/resnet_vd.py | 2 +- legacy/pdseg/models/model_builder.py | 5 +++- legacy/pdseg/models/modeling/deeplab.py | 28 ++++++++++++++++------- legacy/pdseg/models/modeling/hrnet.py | 10 ++++---- 5 files changed, 31 insertions(+), 16 deletions(-) diff --git a/legacy/pdseg/loss.py b/legacy/pdseg/loss.py index 4b8d757abd..5f657b0ba7 100644 --- a/legacy/pdseg/loss.py +++ b/legacy/pdseg/loss.py @@ -61,7 +61,7 @@ def multi_softmax_with_loss(logits, if label.shape[2] != logit.shape[2] or label.shape[ 3] != logit.shape[3]: logit_label = F.interpolate( - label, logit.shape[2:], mode='nearest', align_corners=True) + label, logit.shape[2:], mode='nearest', align_corners=False) else: logit_label = label logit_mask = (logit_label.astype('int32') != diff --git a/legacy/pdseg/models/backbone/resnet_vd.py b/legacy/pdseg/models/backbone/resnet_vd.py index 8d405eeb2f..f5f50f7f10 100644 --- a/legacy/pdseg/models/backbone/resnet_vd.py +++ b/legacy/pdseg/models/backbone/resnet_vd.py @@ -216,7 +216,7 @@ def zero_padding(self, input, padding): def interp(self, input, out_shape): out_shape = list(out_shape.astype("int32")) return F.interpolate( - input, out_shape, mode='bilinear', align_corners=True) + input, out_shape, mode='bilinear', align_corners=False) def conv_bn_layer(self, input, diff --git a/legacy/pdseg/models/model_builder.py b/legacy/pdseg/models/model_builder.py index 80566218d1..97a811add7 100644 --- a/legacy/pdseg/models/model_builder.py +++ b/legacy/pdseg/models/model_builder.py @@ -195,7 +195,10 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): if logit.shape[2:] != label.shape[2:]: logit = F.interpolate( - logit, label.shape[2:], mode='bilinear', align_corners=True) + logit, + label.shape[2:], + mode='bilinear', + align_corners=False) # return image input and logit output for inference graph prune if ModelPhase.is_predict(phase): diff --git a/legacy/pdseg/models/modeling/deeplab.py b/legacy/pdseg/models/modeling/deeplab.py index 19bcf68a10..b98f3b1fe8 100644 --- a/legacy/pdseg/models/modeling/deeplab.py +++ b/legacy/pdseg/models/modeling/deeplab.py @@ -66,7 +66,10 @@ def encoder(input): padding=0, param_attr=param_attr)) image_avg = F.interpolate( - image_avg, input.shape[2:], mode='bilinear', align_corners=True) + image_avg, + input.shape[2:], + mode='bilinear', + align_corners=False) if cfg.MODEL.DEEPLAB.ENCODER.ADD_IMAGE_LEVEL_FEATURE: concat_logits.append(image_avg) @@ -82,7 +85,7 @@ def encoder(input): param_attr=param_attr, bias_attr=None)) aspp0 = F.interpolate( - aspp0, input.shape[2:], mode='bilinear', align_corners=True) + aspp0, input.shape[2:], mode='bilinear', align_corners=False) concat_logits.append(aspp0) if aspp_ratios: @@ -101,7 +104,10 @@ def encoder(input): padding=aspp_ratios[0], param_attr=param_attr)) aspp1 = F.interpolate( - aspp1, input.shape[2:], mode='bilinear', align_corners=True) + aspp1, + input.shape[2:], + mode='bilinear', + align_corners=False) concat_logits.append(aspp1) with scope("aspp2"): if cfg.MODEL.DEEPLAB.ASPP_WITH_SEP_CONV: @@ -118,7 +124,10 @@ def encoder(input): padding=aspp_ratios[1], param_attr=param_attr)) aspp2 = F.interpolate( - aspp2, input.shape[2:], mode='bilinear', align_corners=True) + aspp2, + input.shape[2:], + mode='bilinear', + align_corners=False) concat_logits.append(aspp2) with scope("aspp3"): if cfg.MODEL.DEEPLAB.ASPP_WITH_SEP_CONV: @@ -135,7 +144,10 @@ def encoder(input): padding=aspp_ratios[2], param_attr=param_attr)) aspp3 = F.interpolate( - aspp3, input.shape[2:], mode='bilinear', align_corners=True) + aspp3, + input.shape[2:], + mode='bilinear', + align_corners=False) concat_logits.append(aspp3) with scope("concat"): @@ -163,7 +175,7 @@ def _decoder_with_sum_merge(encode_data, decode_shortcut, param_attr): encode_data, decode_shortcut.shape[2:], mode='bilinear', - align_corners=True) + align_corners=False) encode_data = conv( encode_data, cfg.MODEL.DEEPLAB.DECODER.CONV_FILTERS, @@ -203,7 +215,7 @@ def _decoder_with_concat(encode_data, decode_shortcut, param_attr): encode_data, decode_shortcut.shape[2:], mode='bilinear', - align_corners=True) + align_corners=False) encode_data = paddle.concat([encode_data, decode_shortcut], axis=1) if cfg.MODEL.DEEPLAB.DECODER_USE_SEP_CONV: with scope("separable_conv1"): @@ -331,5 +343,5 @@ def deeplabv3p(img, num_classes): logit = data logit = F.interpolate( - logit, img.shape[2:], mode='bilinear', align_corners=True) + logit, img.shape[2:], mode='bilinear', align_corners=False) return logit diff --git a/legacy/pdseg/models/modeling/hrnet.py b/legacy/pdseg/models/modeling/hrnet.py index dde1644079..49b5d7f1ea 100644 --- a/legacy/pdseg/models/modeling/hrnet.py +++ b/legacy/pdseg/models/modeling/hrnet.py @@ -133,7 +133,7 @@ def fuse_layers(x, channels, multi_scale_output=True, name=None): y, size=[height, width], mode='bilinear', - align_corners=True) + align_corners=False) residual = residual + y elif j < i: y = x[j] @@ -272,11 +272,11 @@ def high_resolution_net(input, num_classes): shape = st4[0].shape height, width = shape[-2], shape[-1] st4[1] = F.interpolate( - st4[1], size=[height, width], mode='bilinear', align_corners=True) + st4[1], size=[height, width], mode='bilinear', align_corners=False) st4[2] = F.interpolate( - st4[2], size=[height, width], mode='bilinear', align_corners=True) + st4[2], size=[height, width], mode='bilinear', align_corners=False) st4[3] = F.interpolate( - st4[3], size=[height, width], mode='bilinear', align_corners=True) + st4[3], size=[height, width], mode='bilinear', align_corners=False) out = paddle.concat(st4, axis=1) last_channels = sum(channels_4) @@ -302,7 +302,7 @@ def high_resolution_net(input, num_classes): bias_attr=None) out = F.interpolate( - out, size=input.shape[2:], mode='bilinear', align_corners=True) + out, size=input.shape[2:], mode='bilinear', align_corners=False) return out From 0c93c273502158a168d5cbeb7c0662320abf10e5 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 28 Jan 2021 14:27:15 +0800 Subject: [PATCH 047/210] update hrentw18 config file --- .../hrnetw18_cityscapes_1024x512_215.yaml | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml diff --git a/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml b/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml new file mode 100644 index 0000000000..9b08458c53 --- /dev/null +++ b/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml @@ -0,0 +1,51 @@ +EVAL_CROP_SIZE: (2048, 1024) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (1024, 512) # (width, height), for unpadding rangescaling and stepscaling +AUG: +# AUG_METHOD: "unpadding" # choice unpadding rangescaling and stepscaling + AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (1024, 512) # (width, height), for unpadding + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True +BATCH_SIZE: 8 + +DATASET: + DATA_DIR: "./dataset/cityscapes/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 19 + TEST_FILE_LIST: "./dataset/cityscapes/val.list" + TRAIN_FILE_LIST: "./dataset/cityscapes/train.list" + VAL_FILE_LIST: "./dataset/cityscapes/val.list" + VIS_FILE_LIST: "./dataset/cityscapes/val.list" + IGNORE_INDEX: 255 + SEPARATOR: " " + +MODEL: + MODEL_NAME: "hrnet" + DEFAULT_NORM_TYPE: "bn" + HRNET: + STAGE2: + NUM_CHANNELS: [18, 36] + STAGE3: + NUM_CHANNELS: [18, 36, 72] + STAGE4: + NUM_CHANNELS: [18, 36, 72, 144] + +TRAIN: + PRETRAINED_MODEL_DIR: u"./pretrained_model/hrnet_w18_ssld" + MODEL_SAVE_DIR: "output/hrnetw18_bn_cityscapes" + SNAPSHOT_EPOCH: 10 + SYNC_BATCH_NORM: True + +TEST: + TEST_MODEL: "output/hrnetw18_bn_cityscapes/best_model" + +SOLVER: + LR: 0.01 + LR_POLICY: "poly" + OPTIMIZER: "sgd" + NUM_EPOCHS: 215 From 7487e5c82c3959370820d054168a334055c5d6c8 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 29 Jan 2021 17:51:14 +0800 Subject: [PATCH 048/210] update train.py --- legacy/pdseg/train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 5c53ffe8d2..61d843d634 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -358,6 +358,7 @@ def data_generator(): avg_train_batch_cost = batch_cost_averager.get_average() avg_train_reader_cost = reader_cost_averager.get_average() eta = calculate_eta(all_step - step, avg_train_batch_cost) + avg_loss /= args.log_steps print( "epoch={} step={} lr={:.5f} loss={:.4f} batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" .format(epoch, step, lr.get_lr(), avg_loss, From 0e6153c7ae8dfb5b9669a05c5dbc88474f87e42c Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 29 Jan 2021 17:53:13 +0800 Subject: [PATCH 049/210] update deeplabv3p_resnet50_vd_cityscapes.yaml --- legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml index 440e22339f..1d49929600 100644 --- a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml +++ b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml @@ -11,7 +11,7 @@ AUG: SCALE_STEP_SIZE: 0.25 # for stepscaling MIRROR: True TO_RGB: True -BATCH_SIZE: 4 +BATCH_SIZE: 8 DATASET: DATA_DIR: "./dataset/cityscapes/" IMAGE_TYPE: "rgb" # choice rgb or rgba @@ -35,13 +35,13 @@ MODEL: OUTPUT_STRIDE: 8 TRAIN: PRETRAINED_MODEL_DIR: u"pretrained_model/resnet50_vd_imagenet" - MODEL_SAVE_DIR: "saved_model/deeplabv3p_resnet50_vd_bn_cityscapes" - SNAPSHOT_EPOCH: 1 + MODEL_SAVE_DIR: "output/deeplabv3p_resnet50_vd_bn_cityscapes" + SNAPSHOT_EPOCH: 10 SYNC_BATCH_NORM: True TEST: - TEST_MODEL: "saved_model/deeplabv3p_resnet50_vd_bn_cityscapes/final" + TEST_MODEL: "output/deeplabv3p_resnet50_vd_bn_cityscapes/final" SOLVER: LR: 0.01 LR_POLICY: "poly" OPTIMIZER: "sgd" - NUM_EPOCHS: 3 + NUM_EPOCHS: 215 From 8d0847807ef4db1bf092b821adb01ba343cbc7ef Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 29 Jan 2021 18:19:53 +0800 Subject: [PATCH 050/210] add bias config to fcn head for hrnet --- benchmark/hrnet.yml | 1 + .../hrnetw18_cityscapes_1024x512_215.yaml | 2 ++ legacy/pdseg/models/modeling/hrnet.py | 4 ++-- legacy/pdseg/utils/config.py | 2 ++ paddleseg/models/fcn.py | 20 +++++++++++++------ 5 files changed, 21 insertions(+), 8 deletions(-) diff --git a/benchmark/hrnet.yml b/benchmark/hrnet.yml index 6bbfda73c1..f7e0528856 100644 --- a/benchmark/hrnet.yml +++ b/benchmark/hrnet.yml @@ -29,6 +29,7 @@ model: type: HRNet_W18 num_classes: 19 backbone_indices: [-1] + bias: False optimizer: type: sgd diff --git a/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml b/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml index 9b08458c53..83f272bc30 100644 --- a/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml +++ b/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml @@ -34,6 +34,8 @@ MODEL: NUM_CHANNELS: [18, 36, 72] STAGE4: NUM_CHANNELS: [18, 36, 72, 144] + BIAS: + False TRAIN: PRETRAINED_MODEL_DIR: u"./pretrained_model/hrnet_w18_ssld" diff --git a/legacy/pdseg/models/modeling/hrnet.py b/legacy/pdseg/models/modeling/hrnet.py index 49b5d7f1ea..88ad31360d 100644 --- a/legacy/pdseg/models/modeling/hrnet.py +++ b/legacy/pdseg/models/modeling/hrnet.py @@ -288,7 +288,7 @@ def high_resolution_net(input, num_classes): stride=1, if_act=True, name='conv-2', - bias_attr=None) + bias_attr=cfg.MODEL.HRNET.BIAS) out = nn.conv2d( input=out, num_filters=num_classes, @@ -299,7 +299,7 @@ def high_resolution_net(input, num_classes): param_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.KaimingUniform(), name='conv-1_weights'), - bias_attr=None) + bias_attr=cfg.MODEL.HRNET.BIAS) out = F.interpolate( out, size=input.shape[2:], mode='bilinear', align_corners=False) diff --git a/legacy/pdseg/utils/config.py b/legacy/pdseg/utils/config.py index 39540fcbf4..0df689e37b 100644 --- a/legacy/pdseg/utils/config.py +++ b/legacy/pdseg/utils/config.py @@ -248,6 +248,8 @@ # HRNET STAGE4 设置 cfg.MODEL.HRNET.STAGE4.NUM_MODULES = 3 cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS = [40, 80, 160, 320] +# FCN Head的卷积是否用bias +cfg.MODEL.HRNET.BIAS = None ########################## OCNET模型配置 ###################################### cfg.MODEL.OCR.OCR_MID_CHANNELS = 512 diff --git a/paddleseg/models/fcn.py b/paddleseg/models/fcn.py index d774f252b5..4d2915976c 100644 --- a/paddleseg/models/fcn.py +++ b/paddleseg/models/fcn.py @@ -47,7 +47,8 @@ def __init__(self, backbone_indices=(-1, ), channels=None, align_corners=False, - pretrained=None): + pretrained=None, + bias=True): super(FCN, self).__init__() self.backbone = backbone @@ -55,8 +56,12 @@ def __init__(self, backbone.feat_channels[i] for i in backbone_indices ] - self.head = FCNHead(num_classes, backbone_indices, backbone_channels, - channels) + self.head = FCNHead( + num_classes, + backbone_indices, + backbone_channels, + channels, + bias=bias) self.align_corners = align_corners self.pretrained = pretrained @@ -95,7 +100,8 @@ def __init__(self, num_classes, backbone_indices=(-1, ), backbone_channels=(270, ), - channels=None): + channels=None, + bias=True): super(FCNHead, self).__init__() self.num_classes = num_classes @@ -108,13 +114,15 @@ def __init__(self, out_channels=channels, kernel_size=1, padding='same', - stride=1) + stride=1, + bias_attr=bias) self.cls = nn.Conv2D( in_channels=channels, out_channels=self.num_classes, kernel_size=1, stride=1, - padding=0) + padding=0, + bias_attr=bias) self.init_weight() def forward(self, feat_list): From a451ff8acc1e6c8e40276e2e9bd5932237112fb3 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 1 Feb 2021 11:10:59 +0800 Subject: [PATCH 051/210] using BatchNorm in one gpu --- paddleseg/models/layers/layer_libs.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/paddleseg/models/layers/layer_libs.py b/paddleseg/models/layers/layer_libs.py index 1ce869f5e2..84746a6913 100644 --- a/paddleseg/models/layers/layer_libs.py +++ b/paddleseg/models/layers/layer_libs.py @@ -21,6 +21,9 @@ def SyncBatchNorm(*args, **kwargs): """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead""" if paddle.get_device() == 'cpu': return nn.BatchNorm2D(*args, **kwargs) + elif paddle.distributed.ParallelEnv().nranks == 1: + print('using batch norm') + return nn.BatchNorm2D(*args, **kwargs) else: return nn.SyncBatchNorm(*args, **kwargs) From ad006be7b59f8de14db4348e736601846bd37a8a Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 1 Feb 2021 11:21:59 +0800 Subject: [PATCH 052/210] update README.md --- README.md | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 2226498935..55e721ef9a 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## 动态图 -通过**--fp16**开启amp训练。 +通过 **--fp16** 开启amp训练。 单机单卡使用如下命令进行训练: ``` @@ -16,24 +16,23 @@ export CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` -deeplabv3p 模型的配置文件为: -benchmark/deeplabv3p.yml - ## 静态图 -**MODEL.FP16 True**开启amp训练 +通过 **MODEL.FP16 True** 开启amp训练 单机单卡使用如下命令进行训练: ``` cd legacy export CUDA_VISIBLE_DEVICES=0 -python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_500.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True +python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` 单机单卡使用如下命令进行训练: ``` export CUDA_VISIBLE_DEVICES=0,1 -python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_500.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True +python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` -deeplabv3p模型的配置文件为: -configs/deeplabv3p_resnet50_vd_cityscapes.yaml +## 竞品 +竞品为[mmsegmentation](https://github.com/open-mmlab/mmsegmentation) +对应竞品配置文件为:configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py +相关执行方式其参考官方仓库。 From dde3ea8b78e375346526f1289a25762319de8966 Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Mon, 1 Feb 2021 20:02:23 +0800 Subject: [PATCH 053/210] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 55e721ef9a..ebaa0b18ef 100644 --- a/README.md +++ b/README.md @@ -34,5 +34,7 @@ python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_ ## 竞品 竞品为[mmsegmentation](https://github.com/open-mmlab/mmsegmentation) + 对应竞品配置文件为:configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py -相关执行方式其参考官方仓库。 + +相关执行方式请参考其官方仓库。 From 84dff5d45bd428841df67533cbaef1a9f3e1520c Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 1 Feb 2021 21:56:53 +0800 Subject: [PATCH 054/210] add config check module --- .../bisenet_optic_disc_512x512_1k.yml | 1 - paddleseg/cvlibs/config.py | 12 ++++- paddleseg/utils/__init__.py | 1 + paddleseg/utils/config_check.py | 51 +++++++++++++++++++ train.py | 4 +- val.py | 4 +- 6 files changed, 68 insertions(+), 5 deletions(-) create mode 100644 paddleseg/utils/config_check.py diff --git a/configs/quick_start/bisenet_optic_disc_512x512_1k.yml b/configs/quick_start/bisenet_optic_disc_512x512_1k.yml index d04b1056e1..181bdf0941 100644 --- a/configs/quick_start/bisenet_optic_disc_512x512_1k.yml +++ b/configs/quick_start/bisenet_optic_disc_512x512_1k.yml @@ -39,5 +39,4 @@ loss: model: type: BiSeNetV2 - num_classes: 2 pretrained: Null diff --git a/paddleseg/cvlibs/config.py b/paddleseg/cvlibs/config.py index f2224f09fa..755e0263f0 100644 --- a/paddleseg/cvlibs/config.py +++ b/paddleseg/cvlibs/config.py @@ -225,10 +225,18 @@ def loss(self) -> dict: @property def model(self) -> paddle.nn.Layer: model_cfg = self.dic.get('model').copy() - model_cfg['num_classes'] = self.train_dataset.num_classes - if not model_cfg: raise RuntimeError('No model specified in the configuration file.') + if not 'num_classes' in model_cfg: + if self.train_dataset: + model_cfg['num_classes'] = self.train_dataset.num_classes + elif self.val_dataset: + model_cfg['num_classes'] = self.val_dataset.num_classes + else: + raise ValueError( + '`num_classes` is not found. Please set it in model, train_dataset or val_dataset' + ) + if not self._model: self._model = self._load_object(model_cfg) return self._model diff --git a/paddleseg/utils/__init__.py b/paddleseg/utils/__init__.py index d621193545..b11c17d4d8 100644 --- a/paddleseg/utils/__init__.py +++ b/paddleseg/utils/__init__.py @@ -19,3 +19,4 @@ from .utils import * from .timer import TimeAverager, calculate_eta from . import visualize +from .config_check import config_check diff --git a/paddleseg/utils/config_check.py b/paddleseg/utils/config_check.py new file mode 100644 index 0000000000..e570efe5a8 --- /dev/null +++ b/paddleseg/utils/config_check.py @@ -0,0 +1,51 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + + +def config_check(cfg): + """ + To check config。 + + Args: + cfg (paddleseg.cvlibs.Config): An object of paddleseg.cvlibs.Config. + """ + + num_classes_check(cfg) + + +def num_classes_check(cfg): + """" + Check that the num_classes in model, train_dataset and val_dataset is consistent. + """ + num_classes_set = set() + if cfg.train_dataset and hasattr(cfg.train_dataset, 'num_classes'): + num_classes_set.add(cfg.train_dataset.num_classes) + if cfg.val_dataset and hasattr(cfg.val_dataset, 'num_classes'): + num_classes_set.add(cfg.val_dataset.num_classes) + if cfg.dic.get('model', None) and cfg.dic['model'].get('num_classes', None): + num_classes_set.add(cfg.dic['model'].get('num_classes')) + if (not cfg.train_dataset) and (not cfg.val_dataset): + raise ValueError( + 'One of `train_dataset` or `val_dataset should be given, but there are none.' + ) + if len(num_classes_set) == 0: + raise ValueError( + '`num_classes` is not found. Please set it in model, train_dataset or val_dataset' + ) + elif len(num_classes_set) > 1: + raise ValueError( + '`num_classes` is not consistent: {}. Please set it consistently in model or train_dataset or val_dataset' + .format(num_classes_set)) diff --git a/train.py b/train.py index 950a6f8fe9..7300f9cb06 100644 --- a/train.py +++ b/train.py @@ -17,7 +17,7 @@ import paddle from paddleseg.cvlibs import manager, Config -from paddleseg.utils import get_sys_env, logger +from paddleseg.utils import get_sys_env, logger, config_check from paddleseg.core import train @@ -130,6 +130,8 @@ def main(args): msg += '------------------------------------------------' logger.info(msg) + config_check(cfg) + train( cfg.model, train_dataset, diff --git a/val.py b/val.py index 39826ffc6a..ac3b66e16e 100644 --- a/val.py +++ b/val.py @@ -19,7 +19,7 @@ from paddleseg.cvlibs import manager, Config from paddleseg.core import evaluate -from paddleseg.utils import get_sys_env, logger +from paddleseg.utils import get_sys_env, logger, config_check def parse_args(): @@ -122,6 +122,8 @@ def main(args): model.set_dict(para_state_dict) logger.info('Loaded trained params of model successfully') + config_check(cfg) + evaluate( model, val_dataset, From eb5877039f7cec4f18b21f2587718292d8233450 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 2 Feb 2021 10:35:06 +0800 Subject: [PATCH 055/210] update config.py --- paddleseg/cvlibs/config.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddleseg/cvlibs/config.py b/paddleseg/cvlibs/config.py index 755e0263f0..5ab1d29872 100644 --- a/paddleseg/cvlibs/config.py +++ b/paddleseg/cvlibs/config.py @@ -228,9 +228,10 @@ def model(self) -> paddle.nn.Layer: if not model_cfg: raise RuntimeError('No model specified in the configuration file.') if not 'num_classes' in model_cfg: - if self.train_dataset: + if self.train_dataset and hasattr(self.train_dataset, + 'num_classes'): model_cfg['num_classes'] = self.train_dataset.num_classes - elif self.val_dataset: + elif self.val_dataset and hasattr(self.val_dataset, 'num_classes'): model_cfg['num_classes'] = self.val_dataset.num_classes else: raise ValueError( From 20f73d060fe9cb46f0145aabc3effc13418291dc Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 2 Feb 2021 11:04:00 +0800 Subject: [PATCH 056/210] update num_classes check --- paddleseg/core/train.py | 7 +++---- paddleseg/utils/config_check.py | 22 +++++++++++++++------- train.py | 2 +- val.py | 2 +- 4 files changed, 20 insertions(+), 13 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 31ea673e86..942e4aa970 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -160,8 +160,7 @@ def train(model, for i in range(len(loss_list)): avg_loss_list[i] += loss_list[i] batch_cost_averager.record( - time.time() - batch_start, - num_samples=batch_size) + time.time() - batch_start, num_samples=batch_size) if (iter) % log_iters == 0 and local_rank == 0: avg_loss /= log_iters @@ -176,7 +175,8 @@ def train(model, "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" .format((iter - 1) // iters_per_epoch + 1, iter, iters, avg_loss, lr, avg_train_batch_cost, - avg_train_reader_cost, batch_cost_averager.get_ips_average(), eta)) + avg_train_reader_cost, + batch_cost_averager.get_ips_average(), eta)) if use_vdl: log_writer.add_scalar('Train/loss', avg_loss, iter) # Record all losses if there are more than 2 losses. @@ -248,7 +248,6 @@ def count_syncbn(m, x, y): flops = paddle.flops( model, [1, c, h, w], custom_ops={paddle.nn.SyncBatchNorm: count_syncbn}) - logger.info(flops) # Sleep for half a second to let dataloader release resources. time.sleep(0.5) diff --git a/paddleseg/utils/config_check.py b/paddleseg/utils/config_check.py index e570efe5a8..47a7049823 100644 --- a/paddleseg/utils/config_check.py +++ b/paddleseg/utils/config_check.py @@ -15,26 +15,28 @@ import numpy as np -def config_check(cfg): +def config_check(cfg, train_dataset=None, val_dataset=None): """ To check config。 Args: cfg (paddleseg.cvlibs.Config): An object of paddleseg.cvlibs.Config. + train_dataset (paddle.io.Dataset): Used to read and process training datasets. + val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. """ - num_classes_check(cfg) + num_classes_check(cfg, train_dataset, val_dataset) -def num_classes_check(cfg): +def num_classes_check(cfg, train_dataset, val_dataset): """" Check that the num_classes in model, train_dataset and val_dataset is consistent. """ num_classes_set = set() - if cfg.train_dataset and hasattr(cfg.train_dataset, 'num_classes'): - num_classes_set.add(cfg.train_dataset.num_classes) - if cfg.val_dataset and hasattr(cfg.val_dataset, 'num_classes'): - num_classes_set.add(cfg.val_dataset.num_classes) + if train_dataset and hasattr(train_dataset, 'num_classes'): + num_classes_set.add(train_dataset.num_classes) + if val_dataset and hasattr(val_dataset, 'num_classes'): + num_classes_set.add(val_dataset.num_classes) if cfg.dic.get('model', None) and cfg.dic['model'].get('num_classes', None): num_classes_set.add(cfg.dic['model'].get('num_classes')) if (not cfg.train_dataset) and (not cfg.val_dataset): @@ -49,3 +51,9 @@ def num_classes_check(cfg): raise ValueError( '`num_classes` is not consistent: {}. Please set it consistently in model or train_dataset or val_dataset' .format(num_classes_set)) + else: + num_classes = num_classes_set.pop() + if train_dataset: + train_dataset.num_classes = num_classes + if val_dataset: + val_dataset.num_classes = num_classes diff --git a/train.py b/train.py index 7300f9cb06..76be634c7c 100644 --- a/train.py +++ b/train.py @@ -130,7 +130,7 @@ def main(args): msg += '------------------------------------------------' logger.info(msg) - config_check(cfg) + config_check(cfg, train_dataset=train_dataset, val_dataset=val_dataset) train( cfg.model, diff --git a/val.py b/val.py index ac3b66e16e..8a3f9c328b 100644 --- a/val.py +++ b/val.py @@ -122,7 +122,7 @@ def main(args): model.set_dict(para_state_dict) logger.info('Loaded trained params of model successfully') - config_check(cfg) + config_check(cfg, val_dataset=val_dataset) evaluate( model, From 7e28e99a2ebecc144751b8ff79add537bfa2b984 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 2 Feb 2021 11:09:32 +0800 Subject: [PATCH 057/210] add config check to predict.py --- predict.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/predict.py b/predict.py index d262f04ad2..8ac2bb3b6d 100644 --- a/predict.py +++ b/predict.py @@ -18,7 +18,7 @@ import paddle from paddleseg.cvlibs import manager, Config -from paddleseg.utils import get_sys_env, logger +from paddleseg.utils import get_sys_env, logger, config_check from paddleseg.core import predict @@ -150,6 +150,8 @@ def main(args): transforms = val_dataset.transforms image_list, image_dir = get_image_list(args.image_path) + config_check(cfg, val_dataset=val_dataset) + predict( model, model_path=args.model_path, From d5805bd110c651b39eb5dddfdcbc0640760aa28c Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Tue, 2 Feb 2021 15:03:45 +0800 Subject: [PATCH 058/210] Update README.md --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index ebaa0b18ef..0accd77b3c 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,9 @@ export CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` +deeplabv3p 模型的配置文件为: +benchmark/deeplabv3p.yml + ## 静态图 通过 **MODEL.FP16 True** 开启amp训练 @@ -32,6 +35,9 @@ export CUDA_VISIBLE_DEVICES=0,1 python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` +deeplabv3p模型的配置文件为: +configs/deeplabv3p_resnet50_vd_cityscapes.yaml + ## 竞品 竞品为[mmsegmentation](https://github.com/open-mmlab/mmsegmentation) From 939109d23c514490aa963512ca2433044ae983e1 Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Tue, 2 Feb 2021 16:33:01 +0800 Subject: [PATCH 059/210] Update layer_libs.py --- paddleseg/models/layers/layer_libs.py | 1 - 1 file changed, 1 deletion(-) diff --git a/paddleseg/models/layers/layer_libs.py b/paddleseg/models/layers/layer_libs.py index 84746a6913..b9913cd945 100644 --- a/paddleseg/models/layers/layer_libs.py +++ b/paddleseg/models/layers/layer_libs.py @@ -22,7 +22,6 @@ def SyncBatchNorm(*args, **kwargs): if paddle.get_device() == 'cpu': return nn.BatchNorm2D(*args, **kwargs) elif paddle.distributed.ParallelEnv().nranks == 1: - print('using batch norm') return nn.BatchNorm2D(*args, **kwargs) else: return nn.SyncBatchNorm(*args, **kwargs) From dba30d564568c3f8697fb89ab5fbc9d81c808659 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Sat, 6 Feb 2021 13:54:42 +0800 Subject: [PATCH 060/210] paddleseg/core/train.py --- paddleseg/core/train.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index b6f29e8071..18dc77fa7b 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -186,18 +186,16 @@ def train(model, model.clear_gradients() avg_loss += loss.numpy()[0] if not avg_loss_list: - avg_loss_list = [l for l in loss_list] + avg_loss_list = [l.numpy() for l in loss_list] else: for i in range(len(loss_list)): - avg_loss_list[i] += loss_list[i] + avg_loss_list[i] += loss_list[i].numpy() batch_cost_averager.record( time.time() - batch_start, num_samples=batch_size) if (iter) % log_iters == 0 and local_rank == 0: avg_loss /= log_iters - avg_loss_list = [ - l.numpy()[0] / log_iters for l in avg_loss_list - ] + avg_loss_list = [l[0] / log_iters for l in avg_loss_list] remain_iters = iters - iter avg_train_batch_cost = batch_cost_averager.get_average() avg_train_reader_cost = reader_cost_averager.get_average() From e0c1c851b30194b597baf86f914071c475f13d2a Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Sun, 7 Feb 2021 15:53:16 +0800 Subject: [PATCH 061/210] update README.md --- deploy/python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/python/README.md b/deploy/python/README.md index 6861ecff4b..b5c89535e4 100644 --- a/deploy/python/README.md +++ b/deploy/python/README.md @@ -20,7 +20,7 @@ python deploy/python/infer.py --config /path/to/deploy.yaml --image_path 参数说明如下: |参数名|用途|是否必选项|默认值| |-|-|-|-| -|config|配置文件|是|-| +|config|**导出模型时生成的配置文件**, 而非configs目录下的配置文件|是|-| |image_path|预测图片的路径或者目录|是|-| |batch_size|单卡batch size|否|配置文件中指定值| |save_dir|保存预测结果的目录|否|output| From 519abd539e38bfd190fb4b525fd13a4cd228bd80 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 22 Feb 2021 20:04:10 +0800 Subject: [PATCH 062/210] add dygraph fleet --- paddleseg/core/train.py | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 18dc77fa7b..a1c25a9f27 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -96,14 +96,19 @@ def train(model, os.remove(save_dir) os.makedirs(save_dir) + # if nranks > 1: + # # Initialize parallel environment if not done. + # if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( + # ): + # paddle.distributed.init_parallel_env() + # ddp_model = paddle.DataParallel(model) + # else: + # ddp_model = paddle.DataParallel(model) + if nranks > 1: - # Initialize parallel environment if not done. - if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( - ): - paddle.distributed.init_parallel_env() - ddp_model = paddle.DataParallel(model) - else: - ddp_model = paddle.DataParallel(model) + paddle.distributed.fleet.init(is_collective=True) + optimizer = paddle.distributed.fleet.distributed_optimizer(optimizer) + ddp_model = paddle.distributed.fleet.distributed_model(model) batch_sampler = paddle.io.DistributedBatchSampler( train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) @@ -159,8 +164,6 @@ def train(model, losses=losses, edges=edges) loss = sum(loss_list) - # loss.backward() - # optimizer.step() scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward @@ -180,9 +183,15 @@ def train(model, optimizer.step() lr = optimizer.get_lr() - if isinstance(optimizer._learning_rate, - paddle.optimizer.lr.LRScheduler): - optimizer._learning_rate.step() + + # update lr + if isinstance(optimizer, paddle.distributed.fleet.Fleet): + lr_sche = optimizer.user_defined_optimizer._learning_rate + else: + lr_sche = optimizer._learning_rate + if isinstance(lr_sche, paddle.optimizer.lr.LRScheduler): + lr_sche.step() + model.clear_gradients() avg_loss += loss.numpy()[0] if not avg_loss_list: From bc02429ddd63e3c88d671efcb0aa440a7c6836ca Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 23 Feb 2021 17:25:44 +0800 Subject: [PATCH 063/210] fix padding to symmetry --- paddleseg/models/backbones/hrnet.py | 23 +++++++++-------------- paddleseg/models/fcn.py | 2 -- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/paddleseg/models/backbones/hrnet.py b/paddleseg/models/backbones/hrnet.py index 40ed660d9d..52cec1718c 100644 --- a/paddleseg/models/backbones/hrnet.py +++ b/paddleseg/models/backbones/hrnet.py @@ -94,7 +94,7 @@ def __init__(self, out_channels=64, kernel_size=3, stride=2, - padding='same', + padding=1, bias_attr=False) self.conv_layer1_2 = layers.ConvBNReLU( @@ -102,7 +102,7 @@ def __init__(self, out_channels=64, kernel_size=3, stride=2, - padding='same', + padding=1, bias_attr=False) self.la1 = Layer1( @@ -243,7 +243,7 @@ def __init__(self, in_channels, out_channels, name=None): in_channels=in_channels[i], out_channels=out_channels[i], kernel_size=3, - padding='same', + padding=1, bias_attr=False)) else: residual = self.add_sublayer( @@ -253,7 +253,7 @@ def __init__(self, in_channels, out_channels, name=None): out_channels=out_channels[i], kernel_size=3, stride=2, - padding='same', + padding=1, bias_attr=False)) self.conv_bn_func_list.append(residual) @@ -322,7 +322,6 @@ def __init__(self, in_channels=num_channels, out_channels=num_filters, kernel_size=1, - padding='same', bias_attr=False) self.conv2 = layers.ConvBNReLU( @@ -330,14 +329,13 @@ def __init__(self, out_channels=num_filters, kernel_size=3, stride=stride, - padding='same', + padding=1, bias_attr=False) self.conv3 = layers.ConvBN( in_channels=num_filters, out_channels=num_filters * 4, kernel_size=1, - padding='same', bias_attr=False) if self.downsample: @@ -345,7 +343,6 @@ def __init__(self, in_channels=num_channels, out_channels=num_filters * 4, kernel_size=1, - padding='same', bias_attr=False) if self.has_se: @@ -390,13 +387,13 @@ def __init__(self, out_channels=num_filters, kernel_size=3, stride=stride, - padding='same', + padding=1, bias_attr=False) self.conv2 = layers.ConvBN( in_channels=num_filters, out_channels=num_filters, kernel_size=3, - padding='same', + padding=1, bias_attr=False) if self.downsample: @@ -404,7 +401,6 @@ def __init__(self, in_channels=num_channels, out_channels=num_filters, kernel_size=1, - padding='same', bias_attr=False) if self.has_se: @@ -567,7 +563,6 @@ def __init__(self, in_channels=in_channels[j], out_channels=out_channels[i], kernel_size=1, - padding='same', bias_attr=False)) self.residual_func_list.append(residual_func) elif j < i: @@ -582,7 +577,7 @@ def __init__(self, out_channels=out_channels[i], kernel_size=3, stride=2, - padding='same', + padding=1, bias_attr=False)) pre_num_filters = out_channels[i] else: @@ -594,7 +589,7 @@ def __init__(self, out_channels=out_channels[j], kernel_size=3, stride=2, - padding='same', + padding=1, bias_attr=False)) pre_num_filters = out_channels[j] self.residual_func_list.append(residual_func) diff --git a/paddleseg/models/fcn.py b/paddleseg/models/fcn.py index 4d2915976c..921c4d827a 100644 --- a/paddleseg/models/fcn.py +++ b/paddleseg/models/fcn.py @@ -113,7 +113,6 @@ def __init__(self, in_channels=backbone_channels[0], out_channels=channels, kernel_size=1, - padding='same', stride=1, bias_attr=bias) self.cls = nn.Conv2D( @@ -121,7 +120,6 @@ def __init__(self, out_channels=self.num_classes, kernel_size=1, stride=1, - padding=0, bias_attr=bias) self.init_weight() From 19c5702b7c909eadc8cdc108b71fddfb6edd1e08 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 24 Feb 2021 15:55:40 +0800 Subject: [PATCH 064/210] add fleet to static --- legacy/pdseg/models/model_builder.py | 4 ++-- legacy/pdseg/solver.py | 4 ++-- legacy/pdseg/train.py | 14 +++++++++++--- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/legacy/pdseg/models/model_builder.py b/legacy/pdseg/models/model_builder.py index 97a811add7..9ef816afb0 100644 --- a/legacy/pdseg/models/model_builder.py +++ b/legacy/pdseg/models/model_builder.py @@ -230,12 +230,12 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): if ModelPhase.is_train(phase): optimizer = solver.Solver(main_prog, start_prog) - decayed_lr = optimizer.optimise(avg_loss) + decayed_lr, optimizer_ = optimizer.optimise(avg_loss) if class_num == 1: logit = sigmoid_to_softmax(logit) else: logit = softmax(logit) - return data_loader, avg_loss, decayed_lr, pred, label, mask + return data_loader, avg_loss, decayed_lr, pred, label, mask, optimizer_ def to_int(string, dest="I"): diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index 941ec0567a..73c5ccb2d3 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -85,7 +85,7 @@ def sgd_optimizer(self, lr_policy, loss): use_dynamic_loss_scaling=True) optimizer.minimize(loss) - return decayed_lr + return decayed_lr, optimizer def adam_optimizer(self, lr_policy, loss): decayed_lr = self.get_lr(lr_policy) @@ -96,7 +96,7 @@ def adam_optimizer(self, lr_policy, loss): weight_decay=self.weight_decay, ) optimizer.minimize(loss) - return decayed_lr + return decayed_lr, optimizer def optimise(self, loss): lr_policy = cfg.SOLVER.LR_POLICY diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 61d843d634..f63bc1d14a 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -32,6 +32,7 @@ import paddle import paddle.static as static from paddle.fluid import profiler +import paddle.distributed.fleet as fleet from utils.config import cfg from utils.timer import TimeAverager, calculate_eta @@ -249,7 +250,7 @@ def data_generator(): batch_size_per_dev = cfg.BATCH_SIZE // dev_count print_info("batch_size_per_dev: {}".format(batch_size_per_dev)) - data_loader, avg_loss, lr, pred, grts, masks = build_model( + data_loader, avg_loss, lr, pred, grts, masks, optimizer = build_model( train_prog, startup_prog, phase=ModelPhase.TRAIN) build_model(test_prog, static.Program(), phase=ModelPhase.EVAL) data_loader.set_sample_generator( @@ -266,8 +267,14 @@ def data_generator(): build_strategy = static.BuildStrategy() if cfg.NUM_TRAINERS > 1 and args.use_gpu: - dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog) - exec_strategy.num_threads = 1 + strategy = fleet.DistributedStrategy() + strategy.execution_strategy = exec_strategy + strategy.build_strategy = build_strategy + fleet.init(is_collective=True, strategy=strategy) + optimizer = paddle.distributed.fleet.distributed_optimizer(optimizer) + # if cfg.NUM_TRAINERS > 1 and args.use_gpu: + # dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog) + # exec_strategy.num_threads = 1 if cfg.TRAIN.SYNC_BATCH_NORM and args.use_gpu: if dev_count > 1: @@ -441,6 +448,7 @@ def main(args): cfg.TRAINER_ID = int(os.getenv("PADDLE_TRAINER_ID", 0)) cfg.NUM_TRAINERS = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) + print('************NUM_TRAINERS**********', cfg.NUM_TRAINERS) cfg.check_and_infer() print_info(pprint.pformat(cfg)) From 7ecb72f94b00480ca056932b143f3d4a5b305fde Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 25 Feb 2021 14:55:34 +0800 Subject: [PATCH 065/210] update README.md --- README.md | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 0accd77b3c..fb2094d060 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # PaddleSeg Benchmark with AMP ## 动态图 +数据集cityscapes 放置于data目录下, 下载链接:https://paddleseg.bj.bcebos.com/dataset/cityscapes.tar 通过 **--fp16** 开启amp训练。 @@ -14,12 +15,12 @@ python train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` export CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 +# fleet开启多卡训练 +fleetrun train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` -deeplabv3p 模型的配置文件为: -benchmark/deeplabv3p.yml - ## 静态图 +数据集cityscapes 放置于legacy/dataset目录下 通过 **MODEL.FP16 True** 开启amp训练 单机单卡使用如下命令进行训练: @@ -29,14 +30,16 @@ export CUDA_VISIBLE_DEVICES=0 python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` -单机单卡使用如下命令进行训练: +单机多卡使用如下命令进行训练: ``` export CUDA_VISIBLE_DEVICES=0,1 python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` -deeplabv3p模型的配置文件为: -configs/deeplabv3p_resnet50_vd_cityscapes.yaml +分布式训练 +``` +fleetrun pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True +``` ## 竞品 竞品为[mmsegmentation](https://github.com/open-mmlab/mmsegmentation) From 638a544bc5727112442ba8de0498e866cdbe304a Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 25 Feb 2021 16:59:39 +0800 Subject: [PATCH 066/210] fix fp16 to fleet and add interpolate to black list --- paddleseg/core/train.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index a1c25a9f27..faf2503d11 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -107,7 +107,8 @@ def train(model, if nranks > 1: paddle.distributed.fleet.init(is_collective=True) - optimizer = paddle.distributed.fleet.distributed_optimizer(optimizer) + optimizer = paddle.distributed.fleet.distributed_optimizer( + optimizer) # The return is Fleet object ddp_model = paddle.distributed.fleet.distributed_model(model) batch_sampler = paddle.io.DistributedBatchSampler( @@ -153,7 +154,8 @@ def train(model, edges = data[2].astype('int64') if fp16: - with paddle.amp.auto_cast(enable=True): + with paddle.amp.auto_cast( + enable=True, custom_black_list={'bilinear_interp_v2'}): if nranks > 1: logits_list = ddp_model(images) else: @@ -167,7 +169,10 @@ def train(model, scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward - scaler.minimize(optimizer, scaled) # update parameters + if isinstance(optimizer, paddle.distributed.fleet.Fleet): + scaler.minimize(optimizer.user_defined_optimizer, scaled) + else: + scaler.minimize(optimizer, scaled) # update parameters else: if nranks > 1: logits_list = ddp_model(images) From 7078655c0204dc0c103ec3647b1eebc64829bb6c Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 24 Feb 2021 15:55:40 +0800 Subject: [PATCH 067/210] add fleet to static --- legacy/pdseg/models/model_builder.py | 4 ++-- legacy/pdseg/solver.py | 4 ++-- legacy/pdseg/train.py | 14 +++++++++++--- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/legacy/pdseg/models/model_builder.py b/legacy/pdseg/models/model_builder.py index 97a811add7..9ef816afb0 100644 --- a/legacy/pdseg/models/model_builder.py +++ b/legacy/pdseg/models/model_builder.py @@ -230,12 +230,12 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): if ModelPhase.is_train(phase): optimizer = solver.Solver(main_prog, start_prog) - decayed_lr = optimizer.optimise(avg_loss) + decayed_lr, optimizer_ = optimizer.optimise(avg_loss) if class_num == 1: logit = sigmoid_to_softmax(logit) else: logit = softmax(logit) - return data_loader, avg_loss, decayed_lr, pred, label, mask + return data_loader, avg_loss, decayed_lr, pred, label, mask, optimizer_ def to_int(string, dest="I"): diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index 941ec0567a..73c5ccb2d3 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -85,7 +85,7 @@ def sgd_optimizer(self, lr_policy, loss): use_dynamic_loss_scaling=True) optimizer.minimize(loss) - return decayed_lr + return decayed_lr, optimizer def adam_optimizer(self, lr_policy, loss): decayed_lr = self.get_lr(lr_policy) @@ -96,7 +96,7 @@ def adam_optimizer(self, lr_policy, loss): weight_decay=self.weight_decay, ) optimizer.minimize(loss) - return decayed_lr + return decayed_lr, optimizer def optimise(self, loss): lr_policy = cfg.SOLVER.LR_POLICY diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 61d843d634..f63bc1d14a 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -32,6 +32,7 @@ import paddle import paddle.static as static from paddle.fluid import profiler +import paddle.distributed.fleet as fleet from utils.config import cfg from utils.timer import TimeAverager, calculate_eta @@ -249,7 +250,7 @@ def data_generator(): batch_size_per_dev = cfg.BATCH_SIZE // dev_count print_info("batch_size_per_dev: {}".format(batch_size_per_dev)) - data_loader, avg_loss, lr, pred, grts, masks = build_model( + data_loader, avg_loss, lr, pred, grts, masks, optimizer = build_model( train_prog, startup_prog, phase=ModelPhase.TRAIN) build_model(test_prog, static.Program(), phase=ModelPhase.EVAL) data_loader.set_sample_generator( @@ -266,8 +267,14 @@ def data_generator(): build_strategy = static.BuildStrategy() if cfg.NUM_TRAINERS > 1 and args.use_gpu: - dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog) - exec_strategy.num_threads = 1 + strategy = fleet.DistributedStrategy() + strategy.execution_strategy = exec_strategy + strategy.build_strategy = build_strategy + fleet.init(is_collective=True, strategy=strategy) + optimizer = paddle.distributed.fleet.distributed_optimizer(optimizer) + # if cfg.NUM_TRAINERS > 1 and args.use_gpu: + # dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog) + # exec_strategy.num_threads = 1 if cfg.TRAIN.SYNC_BATCH_NORM and args.use_gpu: if dev_count > 1: @@ -441,6 +448,7 @@ def main(args): cfg.TRAINER_ID = int(os.getenv("PADDLE_TRAINER_ID", 0)) cfg.NUM_TRAINERS = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) + print('************NUM_TRAINERS**********', cfg.NUM_TRAINERS) cfg.check_and_infer() print_info(pprint.pformat(cfg)) From abafbb6b1301537d6c802b0b80e6095e4ec5fdb0 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 25 Feb 2021 14:55:34 +0800 Subject: [PATCH 068/210] update README.md --- README.md | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 0accd77b3c..fb2094d060 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # PaddleSeg Benchmark with AMP ## 动态图 +数据集cityscapes 放置于data目录下, 下载链接:https://paddleseg.bj.bcebos.com/dataset/cityscapes.tar 通过 **--fp16** 开启amp训练。 @@ -14,12 +15,12 @@ python train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` export CUDA_VISIBLE_DEVICES=0,1 python -m paddle.distributed.launch train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 +# fleet开启多卡训练 +fleetrun train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` -deeplabv3p 模型的配置文件为: -benchmark/deeplabv3p.yml - ## 静态图 +数据集cityscapes 放置于legacy/dataset目录下 通过 **MODEL.FP16 True** 开启amp训练 单机单卡使用如下命令进行训练: @@ -29,14 +30,16 @@ export CUDA_VISIBLE_DEVICES=0 python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` -单机单卡使用如下命令进行训练: +单机多卡使用如下命令进行训练: ``` export CUDA_VISIBLE_DEVICES=0,1 python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` -deeplabv3p模型的配置文件为: -configs/deeplabv3p_resnet50_vd_cityscapes.yaml +分布式训练 +``` +fleetrun pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True +``` ## 竞品 竞品为[mmsegmentation](https://github.com/open-mmlab/mmsegmentation) From ead484f1d92eada213a4c161840fb5616b0b9366 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 25 Feb 2021 16:59:39 +0800 Subject: [PATCH 069/210] fix fp16 to fleet and add interpolate to black list --- paddleseg/core/train.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index a1c25a9f27..faf2503d11 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -107,7 +107,8 @@ def train(model, if nranks > 1: paddle.distributed.fleet.init(is_collective=True) - optimizer = paddle.distributed.fleet.distributed_optimizer(optimizer) + optimizer = paddle.distributed.fleet.distributed_optimizer( + optimizer) # The return is Fleet object ddp_model = paddle.distributed.fleet.distributed_model(model) batch_sampler = paddle.io.DistributedBatchSampler( @@ -153,7 +154,8 @@ def train(model, edges = data[2].astype('int64') if fp16: - with paddle.amp.auto_cast(enable=True): + with paddle.amp.auto_cast( + enable=True, custom_black_list={'bilinear_interp_v2'}): if nranks > 1: logits_list = ddp_model(images) else: @@ -167,7 +169,10 @@ def train(model, scaled = scaler.scale(loss) # scale the loss scaled.backward() # do backward - scaler.minimize(optimizer, scaled) # update parameters + if isinstance(optimizer, paddle.distributed.fleet.Fleet): + scaler.minimize(optimizer.user_defined_optimizer, scaled) + else: + scaler.minimize(optimizer, scaled) # update parameters else: if nranks > 1: logits_list = ddp_model(images) From 9b820d1ad8156f2f1430cb9e37658b1ba33564b1 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 26 Feb 2021 10:46:56 +0800 Subject: [PATCH 070/210] update hrnet config --- benchmark/hrnet.yml | 4 ++-- benchmark/hrnet48.yml | 50 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 benchmark/hrnet48.yml diff --git a/benchmark/hrnet.yml b/benchmark/hrnet.yml index f7e0528856..362eb2009d 100644 --- a/benchmark/hrnet.yml +++ b/benchmark/hrnet.yml @@ -1,5 +1,5 @@ -batch_size: 2 -iters: 500 +batch_size: 8 +iters: 80000 train_dataset: type: Cityscapes diff --git a/benchmark/hrnet48.yml b/benchmark/hrnet48.yml new file mode 100644 index 0000000000..e5751bf2e0 --- /dev/null +++ b/benchmark/hrnet48.yml @@ -0,0 +1,50 @@ +batch_size: 8 +iters: 80000 + +train_dataset: + type: Cityscapes + dataset_root: data/cityscapes + transforms: + - type: ResizeStepScaling + min_scale_factor: 0.5 + max_scale_factor: 2.0 + scale_step_size: 0.25 + - type: RandomPaddingCrop + crop_size: [1024, 512] + - type: RandomHorizontalFlip + - type: RandomDistort + - type: Normalize + mode: train + +val_dataset: + type: Cityscapes + dataset_root: data/cityscapes + transforms: + - type: Normalize + mode: val + +model: + type: FCN + backbone: + type: HRNet_W48 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz + num_classes: 19 + backbone_indices: [-1] + bias: False + +optimizer: + type: sgd + weight_decay: 0.0005 + +learning_rate: + value: 0.01 + decay: + type: poly + power: 0.9 + end_lr: 0.0 + +loss: + types: + - type: CrossEntropyLoss + ignore_index: 255 + coef: [1] From 45129e906aad87a5732155c65ad3565809e60fe6 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 26 Feb 2021 11:33:38 +0800 Subject: [PATCH 071/210] update some --- benchmark/hrnet.yml | 2 +- benchmark/hrnet48.yml | 2 +- legacy/pdseg/train.py | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/benchmark/hrnet.yml b/benchmark/hrnet.yml index 362eb2009d..54a837d108 100644 --- a/benchmark/hrnet.yml +++ b/benchmark/hrnet.yml @@ -1,4 +1,4 @@ -batch_size: 8 +batch_size: 2 iters: 80000 train_dataset: diff --git a/benchmark/hrnet48.yml b/benchmark/hrnet48.yml index e5751bf2e0..702caf4c41 100644 --- a/benchmark/hrnet48.yml +++ b/benchmark/hrnet48.yml @@ -1,4 +1,4 @@ -batch_size: 8 +batch_size: 2 iters: 80000 train_dataset: diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index f63bc1d14a..94c46b4f0e 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -359,7 +359,8 @@ def data_generator(): avg_loss += np.mean(np.array(loss)) step += 1 batch_cost_averager.record( - time.time() - batch_start, num_samples=cfg.BATCH_SIZE) + time.time() - batch_start, + num_samples=cfg.BATCH_SIZE / dev_count) if step % args.log_steps == 0 and cfg.TRAINER_ID == 0: avg_train_batch_cost = batch_cost_averager.get_average() From 60d47bac318c54c5cf990549726902734400e9fa Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Sat, 27 Feb 2021 14:34:01 +0800 Subject: [PATCH 072/210] update hrnetw18_cityscapes_1024x512_215.yaml --- legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml b/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml index 83f272bc30..5a5d8c0b83 100644 --- a/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml +++ b/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml @@ -20,7 +20,6 @@ DATASET: TEST_FILE_LIST: "./dataset/cityscapes/val.list" TRAIN_FILE_LIST: "./dataset/cityscapes/train.list" VAL_FILE_LIST: "./dataset/cityscapes/val.list" - VIS_FILE_LIST: "./dataset/cityscapes/val.list" IGNORE_INDEX: 255 SEPARATOR: " " From 92dce23f5d4b693eb88f6830b481271d6d95a1f0 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Sun, 28 Feb 2021 18:47:55 +0800 Subject: [PATCH 073/210] update hrnet.yml --- benchmark/hrnet.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmark/hrnet.yml b/benchmark/hrnet.yml index 54a837d108..04c0767e80 100644 --- a/benchmark/hrnet.yml +++ b/benchmark/hrnet.yml @@ -27,6 +27,7 @@ model: type: FCN backbone: type: HRNet_W18 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz num_classes: 19 backbone_indices: [-1] bias: False From a2dfcc34ae36205108571606bbf9b3fd80683b42 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 1 Mar 2021 10:57:30 +0800 Subject: [PATCH 074/210] update train.py --- legacy/pdseg/train.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 94c46b4f0e..2f11d36409 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -403,6 +403,8 @@ def data_generator(): save_infer_program(test_prog, ckpt_dir) if args.do_eval: + tmp = cfg.BATCH_SIZE + cfg.BATCH_SIZE = batch_size_per_dev print("Evaluation start") _, mean_iou, _, mean_acc = evaluate( cfg=cfg, @@ -421,6 +423,7 @@ def data_generator(): ckpt_dir, os.path.join(cfg.TRAIN.MODEL_SAVE_DIR, 'best_model'), mean_iou)) + cfg.BATCH_SIZE = tmp # Use VisualDL to visualize results if args.use_vdl and cfg.DATASET.VIS_FILE_LIST is not None: From 72123af267a511ca98959a33dd36daefff93384c Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 2 Mar 2021 16:27:15 +0800 Subject: [PATCH 075/210] fix fleetrun bug --- legacy/pdseg/solver.py | 4 ++-- legacy/pdseg/train.py | 43 ++++++++++++++++++++++++++---------------- 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index 73c5ccb2d3..b2ceca0500 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -84,7 +84,7 @@ def sgd_optimizer(self, lr_policy, loss): amp_lists=amp_lists, use_dynamic_loss_scaling=True) - optimizer.minimize(loss) + #optimizer.minimize(loss) return decayed_lr, optimizer def adam_optimizer(self, lr_policy, loss): @@ -95,7 +95,7 @@ def adam_optimizer(self, lr_policy, loss): beta2=self.momentum2, weight_decay=self.weight_decay, ) - optimizer.minimize(loss) + #optimizer.minimize(loss) return decayed_lr, optimizer def optimise(self, loss): diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 2f11d36409..ebd30d8f94 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -190,8 +190,9 @@ def print_info(*msg): def train(cfg): - startup_prog = static.Program() - train_prog = static.Program() + # Use the default program for fleetrun + startup_prog = static.default_startup_program() + train_prog = static.default_main_program() test_prog = static.Program() if args.enable_ce: startup_prog.random_seed = 1000 @@ -256,9 +257,6 @@ def data_generator(): data_loader.set_sample_generator( data_generator, batch_size=batch_size_per_dev, drop_last=drop_last) - exe = static.Executor(place) - exe.run(startup_prog) - exec_strategy = static.ExecutionStrategy() # Clear temporary variables every 100 iteration if args.use_gpu: @@ -266,16 +264,6 @@ def data_generator(): exec_strategy.num_iteration_per_drop_scope = 100 build_strategy = static.BuildStrategy() - if cfg.NUM_TRAINERS > 1 and args.use_gpu: - strategy = fleet.DistributedStrategy() - strategy.execution_strategy = exec_strategy - strategy.build_strategy = build_strategy - fleet.init(is_collective=True, strategy=strategy) - optimizer = paddle.distributed.fleet.distributed_optimizer(optimizer) - # if cfg.NUM_TRAINERS > 1 and args.use_gpu: - # dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog) - # exec_strategy.num_threads = 1 - if cfg.TRAIN.SYNC_BATCH_NORM and args.use_gpu: if dev_count > 1: # Apply sync batch norm strategy @@ -285,7 +273,27 @@ def data_generator(): print_info( "Sync BatchNorm strategy will not be effective if GPU device" " count <= 1") - if args.use_xpu: + + if cfg.NUM_TRAINERS > 1 and args.use_gpu: + strategy = fleet.DistributedStrategy() + exec_strategy.num_threads = 1 + strategy.execution_strategy = exec_strategy + strategy.build_strategy = build_strategy + strategy.cudnn_exhaustive_search = False + strategy.cudnn_batchnorm_spatial_persistent = False + strategy.conv_workspace_size_limit = 512 + fleet.init(is_collective=True, strategy=strategy) + optimizer = paddle.distributed.fleet.distributed_optimizer(optimizer) + optimizer.minimize(avg_loss) + # if cfg.NUM_TRAINERS > 1 and args.use_gpu: + # dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog) + # exec_strategy.num_threads = 1 + + with open("train_prog_{}".format(cfg.NUM_TRAINERS), "w") as f: + if cfg.TRAINER_ID == 0: + f.writelines(str(train_prog)) + + if args.use_xpu or (cfg.NUM_TRAINERS > 1 and args.use_gpu): compiled_train_prog = train_prog else: compiled_train_prog = static.CompiledProgram( @@ -294,6 +302,9 @@ def data_generator(): exec_strategy=exec_strategy, build_strategy=build_strategy) + exe = static.Executor(place) + exe.run(startup_prog) + # Resume training begin_epoch = cfg.SOLVER.BEGIN_EPOCH if cfg.TRAIN.RESUME_MODEL_DIR: From b8a29bbb8a7d0417dd1c816e48538ab970534516 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 10 Mar 2021 10:30:34 +0800 Subject: [PATCH 076/210] fix syncbn in fleet --- legacy/pdseg/train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index ebd30d8f94..0b363ea4a2 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -276,6 +276,7 @@ def data_generator(): if cfg.NUM_TRAINERS > 1 and args.use_gpu: strategy = fleet.DistributedStrategy() + strategy.sync_batch_norm = True exec_strategy.num_threads = 1 strategy.execution_strategy = exec_strategy strategy.build_strategy = build_strategy From dcd91fd2470a15a7d7521bf11cb53bde1c046e6d Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 10 Mar 2021 20:15:47 +0800 Subject: [PATCH 077/210] update deeplabv3p_resnet101_os8_voc12aug_512x512_40k.yml --- .../deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml b/configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml index 4db67b1e40..af5999e8aa 100644 --- a/configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml +++ b/configs/deeplabv3p/deeplabv3p_resnet50_os8_voc12aug_512x512_40k.yml @@ -7,7 +7,6 @@ model: output_stride: 8 multi_grid: [1, 2, 4] pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz - num_classes: 19 backbone_indices: [0, 3] aspp_ratios: [1, 12, 24, 36] aspp_out_channels: 256 From dc3a6911d0770a4de1660955f5529098aa75a1c7 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 12 Mar 2021 19:15:37 +0800 Subject: [PATCH 078/210] fix variable confict --- legacy/pdseg/models/model_builder.py | 5 +++-- legacy/pdseg/solver.py | 2 +- legacy/pdseg/train.py | 6 ++++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/legacy/pdseg/models/model_builder.py b/legacy/pdseg/models/model_builder.py index 9ef816afb0..2ccb0b98bf 100644 --- a/legacy/pdseg/models/model_builder.py +++ b/legacy/pdseg/models/model_builder.py @@ -116,7 +116,8 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): class_num = cfg.DATASET.NUM_CLASSES with static.program_guard(main_prog, start_prog): - with paddle.utils.unique_name.guard(): + _new_generator = paddle.utils.unique_name.UniqueNameGenerator() + with paddle.utils.unique_name.guard(_new_generator): # 在导出模型的时候,增加图像标准化预处理,减小预测部署时图像的处理流程 # 预测部署时只须对输入图像增加batch_size维度即可 image = static.data( @@ -235,7 +236,7 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): logit = sigmoid_to_softmax(logit) else: logit = softmax(logit) - return data_loader, avg_loss, decayed_lr, pred, label, mask, optimizer_ + return data_loader, avg_loss, decayed_lr, pred, label, mask, optimizer_, _new_generator def to_int(string, dest="I"): diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index b2ceca0500..a519910f2f 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -84,7 +84,7 @@ def sgd_optimizer(self, lr_policy, loss): amp_lists=amp_lists, use_dynamic_loss_scaling=True) - #optimizer.minimize(loss) + # optimizer.minimize(loss) return decayed_lr, optimizer def adam_optimizer(self, lr_policy, loss): diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 0b363ea4a2..24b57c5d54 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -251,7 +251,7 @@ def data_generator(): batch_size_per_dev = cfg.BATCH_SIZE // dev_count print_info("batch_size_per_dev: {}".format(batch_size_per_dev)) - data_loader, avg_loss, lr, pred, grts, masks, optimizer = build_model( + data_loader, avg_loss, lr, pred, grts, masks, optimizer, _new_generator = build_model( train_prog, startup_prog, phase=ModelPhase.TRAIN) build_model(test_prog, static.Program(), phase=ModelPhase.EVAL) data_loader.set_sample_generator( @@ -285,7 +285,9 @@ def data_generator(): strategy.conv_workspace_size_limit = 512 fleet.init(is_collective=True, strategy=strategy) optimizer = paddle.distributed.fleet.distributed_optimizer(optimizer) - optimizer.minimize(avg_loss) + + with paddle.utils.unique_name.guard(_new_generator): + optimizer.minimize(avg_loss) # if cfg.NUM_TRAINERS > 1 and args.use_gpu: # dist_utils.prepare_for_multi_process(exe, build_strategy, train_prog) # exec_strategy.num_threads = 1 From 9ea41f090eeb666d8efead86ffe7e2dd3f3e061a Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 15 Mar 2021 11:29:36 +0800 Subject: [PATCH 079/210] update time calculation --- legacy/pdseg/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 24b57c5d54..38cb92fcba 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -395,6 +395,8 @@ def data_generator(): avg_train_reader_cost, step) sys.stdout.flush() avg_loss = 0.0 + reader_cost_averager.reset() + batch_cost_averager.reset() batch_start = time.time() # NOTE : used for benchmark, profiler tools From 0b9fcb39c5818512389d706e415adc63512b4933 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 15 Mar 2021 19:59:22 +0800 Subject: [PATCH 080/210] update logger format --- legacy/pdseg/eval.py | 11 ++++++++--- legacy/pdseg/train.py | 13 +++++++------ paddleseg/core/train.py | 2 +- paddleseg/core/val.py | 4 ++-- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/legacy/pdseg/eval.py b/legacy/pdseg/eval.py index 4fce36baee..d2c1d80c3a 100644 --- a/legacy/pdseg/eval.py +++ b/legacy/pdseg/eval.py @@ -74,7 +74,12 @@ def parse_args(): return parser.parse_args() -def evaluate(cfg, ckpt_dir=None, use_gpu=False, use_xpu=False, use_mpio=False, **kwargs): +def evaluate(cfg, + ckpt_dir=None, + use_gpu=False, + use_xpu=False, + use_mpio=False, + **kwargs): np.set_printoptions(precision=5, suppress=True) startup_prog = fluid.Program() @@ -158,7 +163,7 @@ def data_generator(): speed = 1.0 / timer.elapsed_time() print( - "[EVAL]step={} loss={:.5f} acc={:.4f} IoU={:.4f} step/sec={:.2f} | ETA {}" + "[EVAL]step: {} loss: {:.5f} acc: {:.4f} IoU: {:.4f} step/sec: {:.2f} | ETA {}" .format(step, loss, acc, iou, speed, calculate_eta(all_step - step, speed))) timer.restart() @@ -168,7 +173,7 @@ def data_generator(): category_iou, avg_iou = conf_mat.mean_iou() category_acc, avg_acc = conf_mat.accuracy() - print("[EVAL]#image={} acc={:.4f} IoU={:.4f}".format( + print("[EVAL]#image: {} acc: {:.4f} IoU: {:.4f}".format( num_images, avg_acc, avg_iou)) print("[EVAL]Category IoU:", category_iou) print("[EVAL]Category Acc:", category_acc) diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 3df4d74fb5..144f632f8d 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -280,10 +280,11 @@ def data_generator(): if args.use_xpu: compiled_train_prog = train_prog else: - compiled_train_prog = fluid.CompiledProgram(train_prog).with_data_parallel( - loss_name=avg_loss.name, - exec_strategy=exec_strategy, - build_strategy=build_strategy) + compiled_train_prog = fluid.CompiledProgram( + train_prog).with_data_parallel( + loss_name=avg_loss.name, + exec_strategy=exec_strategy, + build_strategy=build_strategy) # Resume training begin_epoch = cfg.SOLVER.BEGIN_EPOCH @@ -359,7 +360,7 @@ def data_generator(): category_iou, mean_iou = cm.mean_iou() print_info(( - "epoch={} step={} lr={:.5f} loss={:.4f} acc={:.5f} mIoU={:.5f} step/sec={:.3f} | ETA {}" + "epoch: {} step: {} lr: {:.5f} loss: {:.4f} acc: {:.5f} mIoU: {:.5f} step/sec: {:.3f} | ETA {}" ).format(epoch, step, lr[0], avg_loss, mean_acc, mean_iou, speed, calculate_eta(all_step - step, speed))) @@ -390,7 +391,7 @@ def data_generator(): avg_loss /= args.log_steps speed = args.log_steps / timer.elapsed_time() print(( - "epoch={} step={} lr={:.5f} loss={:.4f} step/sec={:.3f} | ETA {}" + "epoch: {} step: {} lr: {:.5f} loss: {:.4f} step/sec: {:.3f} | ETA {}" ).format(epoch, step, lr[0], avg_loss, speed, calculate_eta(all_step - step, speed))) if args.use_vdl: diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 6ffc51d96c..4e3a5f2b77 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -175,7 +175,7 @@ def train(model, avg_train_reader_cost = reader_cost_averager.get_average() eta = calculate_eta(remain_iters, avg_train_batch_cost) logger.info( - "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" + "[TRAIN] epoch: {}, iter: {}/{}, loss: {:.4f}, lr: {:.6f}, batch_cost: {:.4f}, reader_cost: {:.5f}, ips: {:.4f} samples/sec | ETA {}" .format((iter - 1) // iters_per_epoch + 1, iter, iters, avg_loss, lr, avg_train_batch_cost, avg_train_reader_cost, diff --git a/paddleseg/core/val.py b/paddleseg/core/val.py index 606f6afe0b..60c52bff64 100644 --- a/paddleseg/core/val.py +++ b/paddleseg/core/val.py @@ -82,7 +82,7 @@ def evaluate(model, if print_detail: logger.info( - "Start evaluating (total_samples={}, total_iters={})...".format( + "Start evaluating (total_samples: {}, total_iters: {})...".format( len(eval_dataset), total_iters)) progbar_val = progbar.Progbar(target=total_iters, verbose=1) reader_cost_averager = TimeAverager() @@ -167,7 +167,7 @@ def evaluate(model, if print_detail: logger.info( - "[EVAL] #Images={} mIoU={:.4f} Acc={:.4f} Kappa={:.4f} ".format( + "[EVAL] #Images: {} mIoU: {:.4f} Acc: {:.4f} Kappa: {:.4f} ".format( len(eval_dataset), miou, acc, kappa)) logger.info("[EVAL] Class IoU: \n" + str(np.round(class_iou, 4))) logger.info("[EVAL] Class Acc: \n" + str(np.round(class_acc, 4))) From a5b8bb6d6b211c8c1348e5d880a99140355033d8 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 15 Mar 2021 20:18:47 +0800 Subject: [PATCH 081/210] update logger format --- legacy/pdseg/eval.py | 4 ++-- legacy/pdseg/train.py | 2 +- paddleseg/core/train.py | 2 +- paddleseg/core/val.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/legacy/pdseg/eval.py b/legacy/pdseg/eval.py index fbd3607f51..1b428ec90d 100644 --- a/legacy/pdseg/eval.py +++ b/legacy/pdseg/eval.py @@ -170,7 +170,7 @@ def data_generator(): reader_cost = reader_cost_averager.get_average() eta = calculate_eta(all_step - step, batch_cost) print( - "[EVAL]step={} loss={:.5f} acc={:.4f} IoU={:.4f} batch_cost={:.4f}, reader_cost={:.5f} | ETA {}" + "[EVAL]step: {} loss: {:.5f} acc: {:.4f} IoU: {:.4f} batch_cost: {:.4f}, reader_cost: {:.5f} | ETA {}" .format(step, loss, acc, iou, batch_cost, batch_cost, eta)) batch_start = time.time() sys.stdout.flush() @@ -179,7 +179,7 @@ def data_generator(): category_iou, avg_iou = conf_mat.mean_iou() category_acc, avg_acc = conf_mat.accuracy() - print("[EVAL]#image={} acc={:.4f} IoU={:.4f}".format( + print("[EVAL]#image: {} acc: {:.4f} IoU: {:.4f}".format( num_images, avg_acc, avg_iou)) print("[EVAL]Category IoU:", category_iou) print("[EVAL]Category Acc:", category_acc) diff --git a/legacy/pdseg/train.py b/legacy/pdseg/train.py index 38cb92fcba..3c564579f5 100644 --- a/legacy/pdseg/train.py +++ b/legacy/pdseg/train.py @@ -382,7 +382,7 @@ def data_generator(): eta = calculate_eta(all_step - step, avg_train_batch_cost) avg_loss /= args.log_steps print( - "epoch={} step={} lr={:.5f} loss={:.4f} batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" + "epoch: {} step: {} lr: {:.5f} loss: {:.4f} batch_cost: {:.4f}, reader_cost: {:.5f}, ips: {:.4f} samples/sec | ETA {}" .format(epoch, step, lr.get_lr(), avg_loss, avg_train_batch_cost, avg_train_reader_cost, batch_cost_averager.get_ips_average(), eta)) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index faf2503d11..361cd605b6 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -215,7 +215,7 @@ def train(model, avg_train_reader_cost = reader_cost_averager.get_average() eta = calculate_eta(remain_iters, avg_train_batch_cost) logger.info( - "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" + "[TRAIN] epoch: {}, iter: {}/{}, loss: {:.4f}, lr: {:.6f}, batch_cost: {:.4f}, reader_cost: {:.5f}, ips: {:.4f} samples/sec | ETA {}" .format((iter - 1) // iters_per_epoch + 1, iter, iters, avg_loss, lr, avg_train_batch_cost, avg_train_reader_cost, diff --git a/paddleseg/core/val.py b/paddleseg/core/val.py index 606f6afe0b..60c52bff64 100644 --- a/paddleseg/core/val.py +++ b/paddleseg/core/val.py @@ -82,7 +82,7 @@ def evaluate(model, if print_detail: logger.info( - "Start evaluating (total_samples={}, total_iters={})...".format( + "Start evaluating (total_samples: {}, total_iters: {})...".format( len(eval_dataset), total_iters)) progbar_val = progbar.Progbar(target=total_iters, verbose=1) reader_cost_averager = TimeAverager() @@ -167,7 +167,7 @@ def evaluate(model, if print_detail: logger.info( - "[EVAL] #Images={} mIoU={:.4f} Acc={:.4f} Kappa={:.4f} ".format( + "[EVAL] #Images: {} mIoU: {:.4f} Acc: {:.4f} Kappa: {:.4f} ".format( len(eval_dataset), miou, acc, kappa)) logger.info("[EVAL] Class IoU: \n" + str(np.round(class_iou, 4))) logger.info("[EVAL] Class Acc: \n" + str(np.round(class_acc, 4))) From 38d2cbaf58a4657d93b0235a65c31af814d0fb36 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 17 Mar 2021 17:55:29 +0800 Subject: [PATCH 082/210] update deeplabv3p.yml --- benchmark/deeplabv3p.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/benchmark/deeplabv3p.yml b/benchmark/deeplabv3p.yml index bf9db712eb..e600a7867e 100644 --- a/benchmark/deeplabv3p.yml +++ b/benchmark/deeplabv3p.yml @@ -1,5 +1,5 @@ batch_size: 2 -iters: 500 +iters: 80000 train_dataset: type: Cityscapes @@ -29,6 +29,7 @@ model: type: ResNet50_vd output_stride: 8 multi_grid: [1, 2, 4] + pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz num_classes: 19 backbone_indices: [0, 3] aspp_ratios: [1, 12, 24, 36] From fd967f91b00fc1f795f899798cfaced693433390 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 17 Mar 2021 20:25:04 +0800 Subject: [PATCH 083/210] fix ade label bug --- paddleseg/datasets/ade.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddleseg/datasets/ade.py b/paddleseg/datasets/ade.py index dae7de4360..d0db901e02 100644 --- a/paddleseg/datasets/ade.py +++ b/paddleseg/datasets/ade.py @@ -100,6 +100,8 @@ def __getitem__(self, idx): else: im, label = self.transforms(im=image_path, label=label_path) label = label - 1 + # Recover the ignore pixels adding by transform + label[label == 254] = 255 if self.edge: edge_mask = F.mask_to_binary_edge( label, radius=2, num_classes=self.num_classes) From 64d6eb8621b762bcadec8bc206cd5bcfed721b41 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 19 Mar 2021 14:52:01 +0800 Subject: [PATCH 084/210] suport NHWC data format for DeepLabv3+ --- README.md | 27 ++++-- .../deeplabv3p_resnet50_vd_cityscapes.yaml | 1 - paddleseg/core/infer.py | 4 + paddleseg/core/train.py | 4 +- paddleseg/models/backbones/resnet_vd.py | 91 ++++++++++++------- paddleseg/models/deeplab.py | 87 ++++++++++++++---- paddleseg/models/layers/layer_libs.py | 22 ++++- paddleseg/models/layers/pyramid_pool.py | 34 +++++-- paddleseg/models/losses/cross_entropy_loss.py | 15 ++- train.py | 20 ++++ val.py | 13 +++ 11 files changed, 244 insertions(+), 74 deletions(-) diff --git a/README.md b/README.md index fb2094d060..5078391f28 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,16 @@ python -m paddle.distributed.launch train.py --config benchmark/hrnet.yml --iter fleetrun train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 ``` +DeepLabv3+ 模型的配置文件为: +benchmark/deeplabv3p.yml + +**注意** + +* 动态图中batch_size设置为每卡的batch_size +* DeepLabv3+ 支持通过传入 **--data_format NHWC**进行‘NHWC’数据格式的训练。 + + + ## 静态图 数据集cityscapes 放置于legacy/dataset目录下 @@ -33,17 +43,22 @@ python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_ 单机多卡使用如下命令进行训练: ``` export CUDA_VISIBLE_DEVICES=0,1 -python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True -``` - -分布式训练 -``` fleetrun pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` +deeplabv3p模型的配置文件为: +configs/deeplabv3p_resnet50_vd_cityscapes.yaml + +**注意** +静态图中的BATCH_SIZE为总的batch size。 + ## 竞品 竞品为[mmsegmentation](https://github.com/open-mmlab/mmsegmentation) -对应竞品配置文件为:configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py +对应竞品配置文件为: + +configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py + +configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py 相关执行方式请参考其官方仓库。 diff --git a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml index 1d49929600..76214f1f31 100644 --- a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml +++ b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml @@ -19,7 +19,6 @@ DATASET: TEST_FILE_LIST: "dataset/cityscapes/val.list" TRAIN_FILE_LIST: "dataset/cityscapes/train.list" VAL_FILE_LIST: "dataset/cityscapes/val.list" - VIS_FILE_LIST: "dataset/cityscapes/val.list" IGNORE_INDEX: 255 SEPARATOR: " " FREEZE: diff --git a/paddleseg/core/infer.py b/paddleseg/core/infer.py index 9d6df78b8a..f509ca4697 100644 --- a/paddleseg/core/infer.py +++ b/paddleseg/core/infer.py @@ -182,6 +182,8 @@ def inference(model, Tensor: If ori_shape is not None, a prediction with shape (1, 1, h, w) is returned. If ori_shape is None, a logit with shape (1, num_classes, h, w) is returned. """ + if model.data_format == 'NHWC': + im = im.transpose((0, 2, 3, 1)) if not is_slide: logits = model(im) if not isinstance(logits, collections.abc.Sequence): @@ -191,6 +193,8 @@ def inference(model, logit = logits[0] else: logit = slide_inference(model, im, crop_size=crop_size, stride=stride) + if model.data_format == 'NHWC': + logit = logit.transpose((0, 3, 1, 2)) if ori_shape is not None: pred = paddle.argmax(logit, axis=1, keepdim=True, dtype='int32') pred = reverse_transform(pred, ori_shape, transforms) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 361cd605b6..4202e02d91 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -81,7 +81,7 @@ def train(model, losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. - fp16: Whther to use amp. + fp16 (bool, optional): Whther to use amp. """ model.train() nranks = paddle.distributed.ParallelEnv().nranks @@ -152,6 +152,8 @@ def train(model, edges = None if len(data) == 3: edges = data[2].astype('int64') + if model.data_format == 'NHWC': + images = images.transpose((0, 2, 3, 1)) if fp16: with paddle.amp.auto_cast( diff --git a/paddleseg/models/backbones/resnet_vd.py b/paddleseg/models/backbones/resnet_vd.py index 068a7e2b00..46a5f85e28 100644 --- a/paddleseg/models/backbones/resnet_vd.py +++ b/paddleseg/models/backbones/resnet_vd.py @@ -26,22 +26,25 @@ class ConvBNLayer(nn.Layer): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - dilation=1, - groups=1, - is_vd_mode=False, - act=None, - ): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + dilation=1, + groups=1, + is_vd_mode=False, + act=None, + data_format='NCHW'): super(ConvBNLayer, self).__init__() self.is_vd_mode = is_vd_mode self._pool2d_avg = nn.AvgPool2D( - kernel_size=2, stride=2, padding=0, ceil_mode=True) + kernel_size=2, + stride=2, + padding=0, + ceil_mode=True, + data_format=data_format) self._conv = nn.Conv2D( in_channels=in_channels, out_channels=out_channels, @@ -50,9 +53,11 @@ def __init__( padding=(kernel_size - 1) // 2 if dilation == 1 else 0, dilation=dilation, groups=groups, - bias_attr=False) + bias_attr=False, + data_format=data_format) - self._batch_norm = layers.SyncBatchNorm(out_channels) + self._batch_norm = layers.SyncBatchNorm( + out_channels, data_format=data_format) self._act_op = layers.Activation(act=act) def forward(self, inputs): @@ -72,14 +77,17 @@ def __init__(self, stride, shortcut=True, if_first=False, - dilation=1): + dilation=1, + data_format='NCHW'): super(BottleneckBlock, self).__init__() + self.data_format = data_format self.conv0 = ConvBNLayer( in_channels=in_channels, out_channels=out_channels, kernel_size=1, - act='relu') + act='relu', + data_format=data_format) self.dilation = dilation @@ -89,12 +97,14 @@ def __init__(self, kernel_size=3, stride=stride, act='relu', - dilation=dilation) + dilation=dilation, + data_format=data_format) self.conv2 = ConvBNLayer( in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, - act=None) + act=None, + data_format=data_format) if not shortcut: self.short = ConvBNLayer( @@ -102,7 +112,8 @@ def __init__(self, out_channels=out_channels * 4, kernel_size=1, stride=1, - is_vd_mode=False if if_first or stride == 1 else True) + is_vd_mode=False if if_first or stride == 1 else True, + data_format=data_format) self.shortcut = shortcut @@ -114,7 +125,9 @@ def forward(self, inputs): # The performance drops down without the follow padding. if self.dilation > 1: padding = self.dilation - y = F.pad(y, [padding, padding, padding, padding]) + y = F.pad( + y, [padding, padding, padding, padding], + data_format=self.data_format) ##################################################################### conv1 = self.conv1(y) @@ -136,7 +149,8 @@ def __init__(self, out_channels, stride, shortcut=True, - if_first=False): + if_first=False, + data_format='NCHW'): super(BasicBlock, self).__init__() self.stride = stride self.conv0 = ConvBNLayer( @@ -144,12 +158,14 @@ def __init__(self, out_channels=out_channels, kernel_size=3, stride=stride, - act='relu') + act='relu', + data_format=data_format) self.conv1 = ConvBNLayer( in_channels=out_channels, out_channels=out_channels, kernel_size=3, - act=None) + act=None, + data_format=data_format) if not shortcut: self.short = ConvBNLayer( @@ -157,7 +173,8 @@ def __init__(self, out_channels=out_channels, kernel_size=1, stride=1, - is_vd_mode=False if if_first else True) + is_vd_mode=False if if_first else True, + data_format=data_format) self.shortcut = shortcut @@ -195,9 +212,11 @@ def __init__(self, layers=50, output_stride=8, multi_grid=(1, 1, 1), - pretrained=None): + pretrained=None, + data_format='NCHW'): super(ResNet_vd, self).__init__() + self.data_format = data_format self.conv1_logit = None # for gscnn shape stream self.layers = layers supported_layers = [18, 34, 50, 101, 152, 200] @@ -230,20 +249,28 @@ def __init__(self, dilation_dict = {3: 2} self.conv1_1 = ConvBNLayer( - in_channels=3, out_channels=32, kernel_size=3, stride=2, act='relu') + in_channels=3, + out_channels=32, + kernel_size=3, + stride=2, + act='relu', + data_format=data_format) self.conv1_2 = ConvBNLayer( in_channels=32, out_channels=32, kernel_size=3, stride=1, - act='relu') + act='relu', + data_format=data_format) self.conv1_3 = ConvBNLayer( in_channels=32, out_channels=64, kernel_size=3, stride=1, - act='relu') - self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) + act='relu', + data_format=data_format) + self.pool2d_max = nn.MaxPool2D( + kernel_size=3, stride=2, padding=1, data_format=data_format) # self.block_list = [] self.stage_list = [] @@ -281,7 +308,8 @@ def __init__(self, and dilation_rate == 1 else 1, shortcut=shortcut, if_first=block == i == 0, - dilation=dilation_rate)) + dilation=dilation_rate, + data_format=data_format)) block_list.append(bottleneck_block) shortcut = True @@ -300,7 +328,8 @@ def __init__(self, out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, - if_first=block == i == 0)) + if_first=block == i == 0, + data_format=data_format)) block_list.append(basic_block) shortcut = True self.stage_list.append(block_list) diff --git a/paddleseg/models/deeplab.py b/paddleseg/models/deeplab.py index 826cae688b..84d83bdaff 100644 --- a/paddleseg/models/deeplab.py +++ b/paddleseg/models/deeplab.py @@ -45,6 +45,7 @@ class DeepLabV3P(nn.Layer): align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. pretrained (str, optional): The path or url of pretrained model. Default: None. + data_format(str, optional): Data format that specifies the layout of input. It can be "NCHW" or "NHWC". Default: "NCHW". """ def __init__(self, @@ -54,7 +55,8 @@ def __init__(self, aspp_ratios=(1, 6, 12, 18), aspp_out_channels=256, align_corners=False, - pretrained=None): + pretrained=None, + data_format="NCHW"): super().__init__() self.backbone = backbone @@ -62,23 +64,34 @@ def __init__(self, backbone.feat_channels[i] for i in backbone_indices ] - self.head = DeepLabV3PHead(num_classes, backbone_indices, - backbone_channels, aspp_ratios, - aspp_out_channels, align_corners) + self.head = DeepLabV3PHead( + num_classes, + backbone_indices, + backbone_channels, + aspp_ratios, + aspp_out_channels, + align_corners, + data_format=data_format) self.align_corners = align_corners self.pretrained = pretrained + self.data_format = data_format self.init_weight() def forward(self, x): feat_list = self.backbone(x) logit_list = self.head(feat_list) + if self.data_format == 'NCHW': + ori_shape = x.shape[2:] + else: + ori_shape = x.shape[1:3] return [ F.interpolate( logit, - x.shape[2:], + ori_shape, mode='bilinear', - align_corners=self.align_corners) for logit in logit_list + align_corners=self.align_corners, + data_format=self.data_format) for logit in logit_list ] def init_weight(self): @@ -104,10 +117,17 @@ class DeepLabV3PHead(nn.Layer): aspp_out_channels (int): The output channels of ASPP module. align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + data_format(str, optional): Data format that specifies the layout of input. It can be "NCHW" or "NHWC". Default: "NCHW". """ - def __init__(self, num_classes, backbone_indices, backbone_channels, - aspp_ratios, aspp_out_channels, align_corners): + def __init__(self, + num_classes, + backbone_indices, + backbone_channels, + aspp_ratios, + aspp_out_channels, + align_corners, + data_format='NCHW'): super().__init__() self.aspp = layers.ASPPModule( @@ -116,8 +136,13 @@ def __init__(self, num_classes, backbone_indices, backbone_channels, aspp_out_channels, align_corners, use_sep_conv=True, - image_pooling=True) - self.decoder = Decoder(num_classes, backbone_channels[0], align_corners) + image_pooling=True, + data_format=data_format) + self.decoder = Decoder( + num_classes, + backbone_channels[0], + align_corners, + data_format=data_format) self.backbone_indices = backbone_indices def forward(self, feat_list): @@ -228,29 +253,55 @@ class Decoder(nn.Layer): in_channels (int): The number of input channels in decoder module. """ - def __init__(self, num_classes, in_channels, align_corners): + def __init__(self, + num_classes, + in_channels, + align_corners, + data_format='NCHW'): super(Decoder, self).__init__() + self.data_format = data_format self.conv_bn_relu1 = layers.ConvBNReLU( - in_channels=in_channels, out_channels=48, kernel_size=1) + in_channels=in_channels, + out_channels=48, + kernel_size=1, + data_format=data_format) self.conv_bn_relu2 = layers.SeparableConvBNReLU( - in_channels=304, out_channels=256, kernel_size=3, padding=1) + in_channels=304, + out_channels=256, + kernel_size=3, + padding=1, + data_format=data_format) self.conv_bn_relu3 = layers.SeparableConvBNReLU( - in_channels=256, out_channels=256, kernel_size=3, padding=1) + in_channels=256, + out_channels=256, + kernel_size=3, + padding=1, + data_format=data_format) self.conv = nn.Conv2D( - in_channels=256, out_channels=num_classes, kernel_size=1) + in_channels=256, + out_channels=num_classes, + kernel_size=1, + data_format=data_format) self.align_corners = align_corners def forward(self, x, low_level_feat): low_level_feat = self.conv_bn_relu1(low_level_feat) + if self.data_format == 'NCHW': + low_level_shape = low_level_feat.shape[-2:] + axis = 1 + else: + low_level_shape = low_level_feat.shape[1:3] + axis = -1 x = F.interpolate( x, - low_level_feat.shape[2:], + low_level_shape, mode='bilinear', - align_corners=self.align_corners) - x = paddle.concat([x, low_level_feat], axis=1) + align_corners=self.align_corners, + data_format=self.data_format) + x = paddle.concat([x, low_level_feat], axis=axis) x = self.conv_bn_relu2(x) x = self.conv_bn_relu3(x) x = self.conv(x) diff --git a/paddleseg/models/layers/layer_libs.py b/paddleseg/models/layers/layer_libs.py index 9a1ccad16b..aabbcb8b7e 100644 --- a/paddleseg/models/layers/layer_libs.py +++ b/paddleseg/models/layers/layer_libs.py @@ -41,7 +41,11 @@ def __init__(self, self._conv = nn.Conv2D( in_channels, out_channels, kernel_size, padding=padding, **kwargs) - self._batch_norm = SyncBatchNorm(out_channels) + if 'data_format' in kwargs: + data_format = kwargs['data_format'] + else: + data_format = 'NCHW' + self._batch_norm = SyncBatchNorm(out_channels, data_format=data_format) def forward(self, x): x = self._conv(x) @@ -60,7 +64,11 @@ def __init__(self, super().__init__() self._conv = nn.Conv2D( in_channels, out_channels, kernel_size, padding=padding, **kwargs) - self._batch_norm = SyncBatchNorm(out_channels) + if 'data_format' in kwargs: + data_format = kwargs['data_format'] + else: + data_format = 'NCHW' + self._batch_norm = SyncBatchNorm(out_channels, data_format=data_format) def forward(self, x): x = self._conv(x) @@ -101,8 +109,16 @@ def __init__(self, padding=padding, groups=in_channels, **kwargs) + if 'data_format' in kwargs: + data_format = kwargs['data_format'] + else: + data_format = 'NCHW' self.piontwise_conv = ConvBNReLU( - in_channels, out_channels, kernel_size=1, groups=1) + in_channels, + out_channels, + kernel_size=1, + groups=1, + data_format=data_format) def forward(self, x): x = self.depthwise_conv(x) diff --git a/paddleseg/models/layers/pyramid_pool.py b/paddleseg/models/layers/pyramid_pool.py index 0fb50bc608..dc98107629 100644 --- a/paddleseg/models/layers/pyramid_pool.py +++ b/paddleseg/models/layers/pyramid_pool.py @@ -39,10 +39,12 @@ def __init__(self, out_channels, align_corners, use_sep_conv=False, - image_pooling=False): + image_pooling=False, + data_format='NCHW'): super().__init__() self.align_corners = align_corners + self.data_format = data_format self.aspp_blocks = nn.LayerList() for ratio in aspp_ratios: @@ -56,36 +58,49 @@ def __init__(self, out_channels=out_channels, kernel_size=1 if ratio == 1 else 3, dilation=ratio, - padding=0 if ratio == 1 else ratio) + padding=0 if ratio == 1 else ratio, + data_format=data_format) self.aspp_blocks.append(block) out_size = len(self.aspp_blocks) if image_pooling: self.global_avg_pool = nn.Sequential( - nn.AdaptiveAvgPool2D(output_size=(1, 1)), + nn.AdaptiveAvgPool2D( + output_size=(1, 1), data_format=data_format), layers.ConvBNReLU( - in_channels, out_channels, kernel_size=1, bias_attr=False)) + in_channels, + out_channels, + kernel_size=1, + bias_attr=False, + data_format=data_format)) out_size += 1 self.image_pooling = image_pooling self.conv_bn_relu = layers.ConvBNReLU( in_channels=out_channels * out_size, out_channels=out_channels, - kernel_size=1) + kernel_size=1, + data_format=data_format) self.dropout = nn.Dropout(p=0.1) # drop rate def forward(self, x): outputs = [] - interpolate_shape = x.shape[2:] + if self.data_format == 'NCHW': + interpolate_shape = x.shape[2:] + axis = 1 + else: + interpolate_shape = x.shape[1:3] + axis = -1 for block in self.aspp_blocks: y = block(x) y = F.interpolate( y, interpolate_shape, mode='bilinear', - align_corners=self.align_corners) + align_corners=self.align_corners, + data_format=self.data_format) outputs.append(y) if self.image_pooling: @@ -94,10 +109,11 @@ def forward(self, x): img_avg, interpolate_shape, mode='bilinear', - align_corners=self.align_corners) + align_corners=self.align_corners, + data_format=self.data_format) outputs.append(img_avg) - x = paddle.concat(outputs, axis=1) + x = paddle.concat(outputs, axis=axis) x = self.conv_bn_relu(x) x = self.dropout(x) diff --git a/paddleseg/models/losses/cross_entropy_loss.py b/paddleseg/models/losses/cross_entropy_loss.py index da71cb3461..5144137d10 100644 --- a/paddleseg/models/losses/cross_entropy_loss.py +++ b/paddleseg/models/losses/cross_entropy_loss.py @@ -29,10 +29,11 @@ class CrossEntropyLoss(nn.Layer): and does not contribute to the input gradient. Default ``255``. """ - def __init__(self, ignore_index=255): + def __init__(self, ignore_index=255, data_format='NCHW'): super(CrossEntropyLoss, self).__init__() self.ignore_index = ignore_index self.EPS = 1e-5 + self.data_format = data_format def forward(self, logit, label): """ @@ -46,13 +47,17 @@ def forward(self, logit, label): value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is (N, D1, D2,..., Dk), k >= 1. """ + if self.data_format == 'NCHW': + axis = 1 + else: + axis = -1 if len(label.shape) != len(logit.shape): - label = paddle.unsqueeze(label, 1) + label = paddle.unsqueeze(label, axis) - logit = paddle.transpose(logit, [0, 2, 3, 1]) - label = paddle.transpose(label, [0, 2, 3, 1]) + # logit = paddle.transpose(logit, [0, 2, 3, 1]) + # label = paddle.transpose(label, [0, 2, 3, 1]) loss = F.softmax_with_cross_entropy( - logit, label, ignore_index=self.ignore_index, axis=-1) + logit, label, ignore_index=self.ignore_index, axis=axis) mask = label != self.ignore_index mask = paddle.cast(mask, 'float32') diff --git a/train.py b/train.py index a951745e07..9d784e3112 100644 --- a/train.py +++ b/train.py @@ -92,11 +92,25 @@ def parse_args(): action='store_true') parser.add_argument( '--fp16', dest='fp16', help='Whther to use amp', action='store_true') + parser.add_argument( + '--data_format', + dest='data_format', + help= + 'Data format that specifies the layout of input. It can be "NCHW" or "NHWC". Default: "NCHW".', + type=str, + default='NCHW') return parser.parse_args() def main(args): + # import numpy as np + # from paddle.fluid import core + # np.random.seed(0) + # core.default_cuda_generator(0).manual_seed(256) + # core.default_cpu_generator().manual_seed(256) + # print(paddle.rand((1,))) + env_info = get_sys_env() info = ['{}: {}'.format(k, v) for k, v in env_info.items()] info = '\n'.join(['', format('Environment Information', '-^48s')] + info + @@ -116,6 +130,12 @@ def main(args): iters=args.iters, batch_size=args.batch_size) + cfg.dic['model']['data_format'] = args.data_format + cfg.dic['model']['backbone']['data_format'] = args.data_format + loss_len = len(cfg.dic['loss']['types']) + for i in range(loss_len): + cfg.dic['loss']['types'][i]['data_format'] = args.data_format + train_dataset = cfg.train_dataset if train_dataset is None: raise RuntimeError( diff --git a/val.py b/val.py index 556ff89b77..2807953fea 100644 --- a/val.py +++ b/val.py @@ -87,6 +87,13 @@ def parse_args(): 'The stride of sliding window, the first is width and the second is height.', type=int, default=None) + parser.add_argument( + '--data_format', + dest='data_format', + help= + 'Data format that specifies the layout of input. It can be "NCHW" or "NHWC". Default: "NCHW".', + type=str, + default='NCHW') return parser.parse_args() @@ -101,6 +108,12 @@ def main(args): raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) + cfg.dic['model']['data_format'] = args.data_format + cfg.dic['model']['backbone']['data_format'] = args.data_format + loss_len = len(cfg.dic['loss']['types']) + for i in range(loss_len): + cfg.dic['loss']['types'][i]['data_format'] = args.data_format + val_dataset = cfg.val_dataset if val_dataset is None: raise RuntimeError( From b3ce899a8f1e58423e71ab8e8bd8dabbf1288e31 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 22 Mar 2021 14:53:47 +0800 Subject: [PATCH 085/210] fix the data_format bug --- paddleseg/models/fcn.py | 6 +++++- train.py | 15 ++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/paddleseg/models/fcn.py b/paddleseg/models/fcn.py index 921c4d827a..5b4bf6e306 100644 --- a/paddleseg/models/fcn.py +++ b/paddleseg/models/fcn.py @@ -48,9 +48,12 @@ def __init__(self, channels=None, align_corners=False, pretrained=None, - bias=True): + bias=True, + data_format="NCHW"): super(FCN, self).__init__() + if data_format != 'NCHW': + raise ('fcn only support NCHW data format') self.backbone = backbone backbone_channels = [ backbone.feat_channels[i] for i in backbone_indices @@ -65,6 +68,7 @@ def __init__(self, self.align_corners = align_corners self.pretrained = pretrained + self.data_format = data_format self.init_weight() def forward(self, x): diff --git a/train.py b/train.py index 9d784e3112..9041b3c385 100644 --- a/train.py +++ b/train.py @@ -130,11 +130,16 @@ def main(args): iters=args.iters, batch_size=args.batch_size) - cfg.dic['model']['data_format'] = args.data_format - cfg.dic['model']['backbone']['data_format'] = args.data_format - loss_len = len(cfg.dic['loss']['types']) - for i in range(loss_len): - cfg.dic['loss']['types'][i]['data_format'] = args.data_format + # Only support for the DeepLabv3+ model + if args.data_format == 'NHWC': + if cfg.dic['model'] != 'DeepLabV3P': + raise ValueError( + 'The "NHWC" data format only support the DeepLabV3P model!') + cfg.dic['model']['data_format'] = args.data_format + cfg.dic['model']['backbone']['data_format'] = args.data_format + loss_len = len(cfg.dic['loss']['types']) + for i in range(loss_len): + cfg.dic['loss']['types'][i]['data_format'] = args.data_format train_dataset = cfg.train_dataset if train_dataset is None: From cc8b349090d5b83ffdfb77a0bf17cfcb397640f9 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 23 Mar 2021 14:36:54 +0800 Subject: [PATCH 086/210] fix the data_format bug --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 9041b3c385..ebce2f29b0 100644 --- a/train.py +++ b/train.py @@ -132,7 +132,7 @@ def main(args): # Only support for the DeepLabv3+ model if args.data_format == 'NHWC': - if cfg.dic['model'] != 'DeepLabV3P': + if cfg.dic['model']['type'] != 'DeepLabV3P': raise ValueError( 'The "NHWC" data format only support the DeepLabV3P model!') cfg.dic['model']['data_format'] = args.data_format From 33e3cc5b34f39e4426c03b08710336b45ecbbd65 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 29 Mar 2021 14:59:47 +0800 Subject: [PATCH 087/210] fix data_format for evaluation --- val.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/val.py b/val.py index 2807953fea..ef90c25ae6 100644 --- a/val.py +++ b/val.py @@ -108,11 +108,16 @@ def main(args): raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) - cfg.dic['model']['data_format'] = args.data_format - cfg.dic['model']['backbone']['data_format'] = args.data_format - loss_len = len(cfg.dic['loss']['types']) - for i in range(loss_len): - cfg.dic['loss']['types'][i]['data_format'] = args.data_format + # Only support for the DeepLabv3+ model + if args.data_format == 'NHWC': + if cfg.dic['model']['type'] != 'DeepLabV3P': + raise ValueError( + 'The "NHWC" data format only support the DeepLabV3P model!') + cfg.dic['model']['data_format'] = args.data_format + cfg.dic['model']['backbone']['data_format'] = args.data_format + loss_len = len(cfg.dic['loss']['types']) + for i in range(loss_len): + cfg.dic['loss']['types'][i]['data_format'] = args.data_format val_dataset = cfg.val_dataset if val_dataset is None: From d7dfcf785b3d71c3a87513e473a359bb42677ec7 Mon Sep 17 00:00:00 2001 From: zhangting2020 Date: Tue, 30 Mar 2021 06:50:25 +0000 Subject: [PATCH 088/210] add script for fp16 training --- paddleseg/core/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 4202e02d91..151a95adeb 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -157,7 +157,7 @@ def train(model, if fp16: with paddle.amp.auto_cast( - enable=True, custom_black_list={'bilinear_interp_v2'}): + enable=True, custom_white_list={"elementwise_add", "batch_norm"}, custom_black_list={'bilinear_interp_v2'}): if nranks > 1: logits_list = ddp_model(images) else: From c44de62e2d8620f0f8c0d766ac2ff3d094292e58 Mon Sep 17 00:00:00 2001 From: zhangting2020 Date: Tue, 30 Mar 2021 07:15:18 +0000 Subject: [PATCH 089/210] add script for fp32 training --- script/run_fp16.sh | 12 ++++++++++++ script/run_fp32.sh | 11 +++++++++++ 2 files changed, 23 insertions(+) create mode 100755 script/run_fp16.sh create mode 100755 script/run_fp32.sh diff --git a/script/run_fp16.sh b/script/run_fp16.sh new file mode 100755 index 0000000000..2551785583 --- /dev/null +++ b/script/run_fp16.sh @@ -0,0 +1,12 @@ +export FLAGS_conv_workspace_size_limit=2000 #MB +export FLAGS_cudnn_exhaustive_search=1 +export FLAGS_cudnn_batchnorm_spatial_persistent=1 + +python train.py --config benchmark/deeplabv3p.yml \ + --iters=500 \ + --batch_size 4 \ + --learning_rate 0.01 \ + --num_workers 8 \ + --log_iters 20 \ + --data_format NHWC \ + --fp16 diff --git a/script/run_fp32.sh b/script/run_fp32.sh new file mode 100755 index 0000000000..bf216b81aa --- /dev/null +++ b/script/run_fp32.sh @@ -0,0 +1,11 @@ +export FLAGS_conv_workspace_size_limit=2000 #MB +export FLAGS_cudnn_exhaustive_search=1 +export FLAGS_cudnn_batchnorm_spatial_persistent=1 + +python train.py --config benchmark/deeplabv3p.yml \ + --iters=500 \ + --batch_size 2 \ + --learning_rate 0.01 \ + --num_workers 8 \ + --log_iters 20 \ + --data_format NCHW \ From a598693a2d2793e33c2f20d261c92bdf4033f4c3 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 16 Apr 2021 17:47:48 +0800 Subject: [PATCH 090/210] fix reverse for ResizeByLong --- paddleseg/core/infer.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/paddleseg/core/infer.py b/paddleseg/core/infer.py index 20e97ba0a7..b68b1e6cb7 100644 --- a/paddleseg/core/infer.py +++ b/paddleseg/core/infer.py @@ -36,9 +36,21 @@ def get_reverse_list(ori_shape, transforms): reverse_list = [] h, w = ori_shape[0], ori_shape[1] for op in transforms: - if op.__class__.__name__ in ['Resize', 'ResizeByLong']: + if op.__class__.__name__ in ['Resize']: reverse_list.append(('resize', (h, w))) h, w = op.target_size[0], op.target_size[1] + if op.__class__.__name__ in ['ResizeByLong']: + reverse_list.append(('resize', (h, w))) + long_edge = max(h, w) + short_edge = min(h, w) + short_edge = int(round(short_edge * op.long_size / long_edge)) + long_edge = op.long_size + if h > w: + h = long_edge + w = short_edge + else: + w = long_edge + h = short_edge if op.__class__.__name__ in ['Padding']: reverse_list.append(('padding', (h, w))) w, h = op.target_size[0], op.target_size[1] From 1a544d73951a84513c1642e087ffbdcac7787796 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 20 Apr 2021 16:34:47 +0800 Subject: [PATCH 091/210] fix decouplde_segnet grad bug --- paddleseg/models/decoupled_segnet.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paddleseg/models/decoupled_segnet.py b/paddleseg/models/decoupled_segnet.py index 74f9dfe19d..cb74e25b19 100644 --- a/paddleseg/models/decoupled_segnet.py +++ b/paddleseg/models/decoupled_segnet.py @@ -213,6 +213,7 @@ def forward(self, x): def flow_warp(self, input, flow, size): input_shape = paddle.shape(input) norm = size[::-1].reshape([1, 1, 1, -1]) + norm.stop_gradient = True h_grid = paddle.linspace(-1.0, 1.0, size[0]).reshape([-1, 1]) h_grid = h_grid.tile([size[1]]) w_grid = paddle.linspace(-1.0, 1.0, size[1]).reshape([-1, 1]) From 3b3aaa854dc58ace450c8b1de4a93887cf1c9e28 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 27 Apr 2021 19:45:03 +0800 Subject: [PATCH 092/210] add panoptic segmentation --- contrib/PanopticDeepLab/README.md | 135 +++ .../configs/_base_/cityscapes_panoptic.yml | 55 ++ ...32_cityscapes_1025x513_bs8_90k_lr00005.yml | 19 + ...2_cityscapes_2049x1025_bs1_90k_lr00005.yml | 23 + .../PanopticDeepLab/docs/panoptic_deeplab.jpg | Bin 0 -> 117991 bytes .../docs/visualization_instance.png | Bin 0 -> 33748 bytes .../docs/visualization_instance_added.jpg | Bin 0 -> 68681 bytes .../docs/visualization_panoptic.png | Bin 0 -> 79483 bytes .../docs/visualization_panoptic_added.jpg | Bin 0 -> 67547 bytes .../docs/visualization_semantic.png | Bin 0 -> 74300 bytes .../docs/visualization_semantic_added.jpg | Bin 0 -> 75334 bytes contrib/PanopticDeepLab/paddleseg/__init__.py | 17 + .../paddleseg/core/__init__.py | 20 + .../PanopticDeepLab/paddleseg/core/infer.py | 349 +++++++ .../PanopticDeepLab/paddleseg/core/predict.py | 188 ++++ .../PanopticDeepLab/paddleseg/core/train.py | 315 +++++++ contrib/PanopticDeepLab/paddleseg/core/val.py | 181 ++++ .../paddleseg/cvlibs/__init__.py | 17 + .../paddleseg/cvlibs/callbacks.py | 279 ++++++ .../paddleseg/cvlibs/config.py | 297 ++++++ .../paddleseg/cvlibs/manager.py | 143 +++ .../paddleseg/cvlibs/param_init.py | 91 ++ .../paddleseg/datasets/__init__.py | 15 + .../paddleseg/datasets/cityscapes_panoptic.py | 192 ++++ .../paddleseg/models/__init__.py | 18 + .../paddleseg/models/backbones/__init__.py | 18 + .../paddleseg/models/backbones/hrnet.py | 820 ++++++++++++++++ .../paddleseg/models/backbones/mobilenetv3.py | 364 +++++++ .../paddleseg/models/backbones/resnet_vd.py | 361 +++++++ .../models/backbones/xception_deeplab.py | 415 ++++++++ .../paddleseg/models/layers/__init__.py | 19 + .../paddleseg/models/layers/activation.py | 73 ++ .../paddleseg/models/layers/attention.py | 143 +++ .../paddleseg/models/layers/layer_libs.py | 165 ++++ .../paddleseg/models/layers/nonlocal2d.py | 154 +++ .../paddleseg/models/layers/pyramid_pool.py | 185 ++++ .../paddleseg/models/losses/__init__.py | 17 + .../models/losses/cross_entropy_loss.py | 74 ++ .../paddleseg/models/losses/l1_loss.py | 72 ++ .../models/losses/mean_square_error_loss.py | 60 ++ .../paddleseg/models/panoptic_deeplab.py | 339 +++++++ .../paddleseg/transforms/__init__.py | 17 + .../paddleseg/transforms/functional.py | 160 ++++ .../paddleseg/transforms/target_transforms.py | 281 ++++++ .../paddleseg/transforms/transforms.py | 888 ++++++++++++++++++ .../paddleseg/utils/__init__.py | 23 + .../paddleseg/utils/config_check.py | 59 ++ .../paddleseg/utils/download.py | 163 ++++ .../paddleseg/utils/evaluation/__init__.py | 17 + .../paddleseg/utils/evaluation/instance.py | 345 +++++++ .../paddleseg/utils/evaluation/panoptic.py | 220 +++++ .../paddleseg/utils/evaluation/semantic.py | 85 ++ .../PanopticDeepLab/paddleseg/utils/logger.py | 49 + .../paddleseg/utils/metrics.py | 146 +++ .../PanopticDeepLab/paddleseg/utils/paddle.py | 125 +++ .../paddleseg/utils/progbar.py | 209 +++++ .../PanopticDeepLab/paddleseg/utils/timer.py | 53 ++ .../PanopticDeepLab/paddleseg/utils/utils.py | 120 +++ .../paddleseg/utils/visualize.py | 195 ++++ contrib/PanopticDeepLab/predict.py | 147 +++ contrib/PanopticDeepLab/train.py | 176 ++++ contrib/PanopticDeepLab/val.py | 109 +++ 62 files changed, 9220 insertions(+) create mode 100644 contrib/PanopticDeepLab/README.md create mode 100644 contrib/PanopticDeepLab/configs/_base_/cityscapes_panoptic.yml create mode 100644 contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml create mode 100644 contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml create mode 100644 contrib/PanopticDeepLab/docs/panoptic_deeplab.jpg create mode 100644 contrib/PanopticDeepLab/docs/visualization_instance.png create mode 100644 contrib/PanopticDeepLab/docs/visualization_instance_added.jpg create mode 100644 contrib/PanopticDeepLab/docs/visualization_panoptic.png create mode 100644 contrib/PanopticDeepLab/docs/visualization_panoptic_added.jpg create mode 100644 contrib/PanopticDeepLab/docs/visualization_semantic.png create mode 100644 contrib/PanopticDeepLab/docs/visualization_semantic_added.jpg create mode 100644 contrib/PanopticDeepLab/paddleseg/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/core/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/core/infer.py create mode 100644 contrib/PanopticDeepLab/paddleseg/core/predict.py create mode 100644 contrib/PanopticDeepLab/paddleseg/core/train.py create mode 100644 contrib/PanopticDeepLab/paddleseg/core/val.py create mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/callbacks.py create mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/config.py create mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/manager.py create mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/param_init.py create mode 100644 contrib/PanopticDeepLab/paddleseg/datasets/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/datasets/cityscapes_panoptic.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/hrnet.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/mobilenetv3.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/resnet_vd.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/xception_deeplab.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/activation.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/attention.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/layer_libs.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/nonlocal2d.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/pyramid_pool.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/losses/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/losses/cross_entropy_loss.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/losses/l1_loss.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/losses/mean_square_error_loss.py create mode 100644 contrib/PanopticDeepLab/paddleseg/models/panoptic_deeplab.py create mode 100644 contrib/PanopticDeepLab/paddleseg/transforms/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/transforms/functional.py create mode 100644 contrib/PanopticDeepLab/paddleseg/transforms/target_transforms.py create mode 100644 contrib/PanopticDeepLab/paddleseg/transforms/transforms.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/config_check.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/download.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/evaluation/__init__.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/evaluation/instance.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/evaluation/panoptic.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/evaluation/semantic.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/logger.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/metrics.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/paddle.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/progbar.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/timer.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/utils.py create mode 100644 contrib/PanopticDeepLab/paddleseg/utils/visualize.py create mode 100644 contrib/PanopticDeepLab/predict.py create mode 100644 contrib/PanopticDeepLab/train.py create mode 100644 contrib/PanopticDeepLab/val.py diff --git a/contrib/PanopticDeepLab/README.md b/contrib/PanopticDeepLab/README.md new file mode 100644 index 0000000000..f829a277f6 --- /dev/null +++ b/contrib/PanopticDeepLab/README.md @@ -0,0 +1,135 @@ + +# Panoptic DeepLab + +基于paddle实现[Panoptic Deeplab](https://arxiv.org/abs/1911.10194)全景分割算法。 + +Panoptic DeepLab首次证实了bottem-up算法能够达到state-of-the-art的效果。Panoptic DeepLab预测三个输出:Semantic Segmentation, Center Prediction 和 Center Regression。实例类别像素根据最近距离原则聚集到实例中心点得到实例分割结果。最后按照majority-vote规则融合语义分割结果和实例分割结果,得到最终的全景分割结果。 +其通过将每一个像素赋值给每一个类别或实例达到分割的效果。 +![](./docs/panoptic_deeplab.png) + +## Model Baselines + +### Cityscapes +| Backbone | Batch Size |Resolution | Training Iters | PQ | SQ | RQ | AP | mIoU | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|ResNet50_OS32| 8 | 2049x1025|90000|100%|100%|100%|100%|100%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/train.log)| +|ResNet50_OS32| 64 | 1025x513|90000|100%|100%|100%|100%|100%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/train.log)| + +## 环境准备 + +1. 系统环境 +* PaddlePaddle >= 2.0.0 +* Python >= 3.6+ +推荐使用GPU版本的PaddlePaddle版本。详细安装教程请参考官方网站[PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/windows-pip.html) + +2. 下载PaddleSeg repo +```shell +git clone https://github.com/PaddlePaddle/PaddleSeg +``` + +3. 进入PaddleSeg/contrib/PanopticDeepLab目录 +```shell +cd PaddleSeg/contrib/PanopticDeepLab +``` + +4. 将当前目录添加到PYTHONPATH +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +## 数据集准备 + +将数据集放置于`data`目录下。 + +### Cityscapes + +数据集目录结构: +``` +cityscapes/ +|--gtFine/ +| |--train/ +| | |--aachen/ +| | | |--*_color.png, *_instanceIds.png, *_labelIds.png, *_polygons.json, +| | | |--*_labelTrainIds.png +| | | |--... +| |--val/ +| |--test/ +| |--cityscapes_panoptic_train_trainId.json +| |--cityscapes_panoptic_train_trainId/ +| | |-- *_panoptic.png +| |--cityscapes_panoptic_val_trainId.json +| |--cityscapes_panoptic_val_trainId/ +| | |-- *_panoptic.png +|--leftImg8bit/ +| |--train/ +| |--val/ +| |--test/ + +``` + +安装CityscapesScripts +```shell +pip install git+https://github.com/mcordts/cityscapesScripts.git +``` + +*_panoptic.png 生成命令: +```shell +python /path/to/cityscapesscripts/preparation/createPanopticImgs.py --dataset-folder data/cityscapes/gtFine/ --output-folder data/cityscapes/gtFine/ --use-train-id +``` + +## 训练 +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 根据实际情况进行显卡数量的设置 +python -m paddle.distributed.launch train.py \ + --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ + --do_eval \ + --use_vdl \ + --save_interval 5000 \ + --save_dir output +``` + +**note:** 使用--do_eval会影响训练速度及增加显存消耗,根据选择进行开闭。 + +更多参数信息请运行如下命令进行查看 +```shell +python train.py --help +``` + +## 评估 +```shell +python val.py \ + --config panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ + --model_path output/iter_90000/model.pdparams +``` +更多参数信息请运行如下命令进行查看 +```shell +python val.py --help +``` + +## 预测及可视化结果保存 +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 根据实际情况进行显卡数量的设置 +python -m paddle.distributed.launch predict.py \ + --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_120k.yml \ + --model_path output/iter_90000/model.pdparams \ + --image_path data/cityscapes/leftImg8bit/val/ \ + --save_dir ./output/result +``` +更多参数信息请运行如下命令进行查看 +```shell +python predict.py --help +``` +全景分割结果: +
+ +
+ +语义分割结果: +
+ +
+ +实例分割结果: +
+ +
diff --git a/contrib/PanopticDeepLab/configs/_base_/cityscapes_panoptic.yml b/contrib/PanopticDeepLab/configs/_base_/cityscapes_panoptic.yml new file mode 100644 index 0000000000..aa9466ac47 --- /dev/null +++ b/contrib/PanopticDeepLab/configs/_base_/cityscapes_panoptic.yml @@ -0,0 +1,55 @@ +train_dataset: + type: CityscapesPanoptic + dataset_root: data/cityscapes + transforms: + - type: ResizeStepScaling + min_scale_factor: 0.5 + max_scale_factor: 2.0 + scale_step_size: 0.25 + - type: RandomPaddingCrop + crop_size: [2049, 1025] + label_padding_value: [0, 0, 0] + - type: RandomHorizontalFlip + - type: RandomDistort + brightness_range: 0.4 + contrast_range: 0.4 + saturation_range: 0.4 + - type: Normalize + mode: train + ignore_stuff_in_offset: True + small_instance_area: 4096 + small_instance_weight: 3 + +val_dataset: + type: CityscapesPanoptic + dataset_root: data/cityscapes + transforms: + - type: Padding + target_size: [2049, 1025] + label_padding_value: [0, 0, 0] + - type: Normalize + mode: val + ignore_stuff_in_offset: True + small_instance_area: 4096 + small_instance_weight: 3 + + +optimizer: + type: adam + +learning_rate: + value: 0.00005 + decay: + type: poly + power: 0.9 + end_lr: 0.0 + +loss: + types: + - type: CrossEntropyLoss + top_k_percent_pixels: 0.2 + - type: MSELoss + reduction: "none" + - type: L1Loss + reduction: "none" + coef: [1, 200, 0.001] diff --git a/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml b/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml new file mode 100644 index 0000000000..445b11fbdb --- /dev/null +++ b/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml @@ -0,0 +1,19 @@ +_base_: ./panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml + +batch_size: 8 + +train_dataset: + transforms: + - type: ResizeStepScaling + min_scale_factor: 0.5 + max_scale_factor: 2.0 + scale_step_size: 0.25 + - type: RandomPaddingCrop + crop_size: [1025, 513] + label_padding_value: [0, 0, 0] + - type: RandomHorizontalFlip + - type: RandomDistort + brightness_range: 0.4 + contrast_range: 0.4 + saturation_range: 0.4 + - type: Normalize diff --git a/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml b/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml new file mode 100644 index 0000000000..d35e90d98c --- /dev/null +++ b/contrib/PanopticDeepLab/configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005.yml @@ -0,0 +1,23 @@ +_base_: ../_base_/cityscapes_panoptic.yml + +batch_size: 1 +iters: 90000 + +model: + type: PanopticDeepLab + backbone: + type: ResNet50_vd + output_stride: 32 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/resnet50_vd_ssld_v2.tar.gz + backbone_indices: [2,1,0,3] + aspp_ratios: [1, 3, 6, 9] + aspp_out_channels: 256 + decoder_channels: 256 + low_level_channels_projects: [128, 64, 32] + align_corners: True + instance_aspp_out_channels: 256 + instance_decoder_channels: 128 + instance_low_level_channels_projects: [64, 32, 16] + instance_num_classes: [1, 2] + instance_head_channels: 32 + instance_class_key: ["center", "offset"] diff --git a/contrib/PanopticDeepLab/docs/panoptic_deeplab.jpg b/contrib/PanopticDeepLab/docs/panoptic_deeplab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ace44918e4ab3877f23d4767db57c42f857add10 GIT binary patch literal 117991 zcmeEu2Ut^0*6>Y2mm(4rkX|GN=^cg8LKPvQ7p1q*yP^<8MUC{PAPUmEqI5)1DJn%k zz(Ns4x~NE#mi#wZ-m>rhyWj5q-+udl@6B^_&&-@@=gvKI=FB-mdPy1wcI#{FX#)@l z0O*20fba2R#M&%w#lC5YG2#m&P< zh5u7yGe56~vkJe3oS~$lpQekuhi+Jai%Hl~Q>QRbCnaZolq&Dx6UryN{k&a*9C%N7 zd-((^pHSi7DO?$Z$_z5y=0a^kY$lKf!q0B2Watd{ooykJO$ z|9c}tLqo+wWyF00+$5xwl$0bSr6r`L#Xt_{f4kyHX0tJ4Qpyd+i6yV_(Y+7x13{#n@_+uV4v_^t0C+B@Ys}00Gy3lXn4SCrfuV!!7d;SI|9PdAs#NFAUpuVf?mPi9w7V`gyG&U4uJqbK~Ihka&dA8VM!3C4=}}Q zg79Hb!lifn73}ycILPG$*iHb@^z}O#;Nj*T#EWne;RPi_MP6N(P%oFDATeW5dUXhJ z1|>{yKL?+a0PwBP3hOva%d1sKl zUBATrlJ}Q5-yCpEt$?&i_$AIU9RO<20|49WU*ZI_0f6Zo0MtDDRe$!9$IGuIWe*o8 zaWX-FUVjteC+1%VepR11xxSyY_>)g{2i2dv5*9AzFpZs6jKT3cOU;^L(JHQPH00=+~kOmZhLx3892J`_Vz!b0ojss4BJKzlj z0HMHX;0zEATm}+=R3HP$0`h=jpaQr9)B^W_2S7W}3G@Pkz%VclOak-3XJ8fBgg_wF z5C#YfgdM^I5rT+9q#=qBRR|h#1cHTFLF^%J5Fbb|LCvx zPau7eSCF@mS;!a21{4ORhr*#;&;w8js3KGyY5+Bb9)r3;{h?vdbI>^GRcH>h3|a$y z0PTbhLdT)=&=nW}+XdSTmdk!0ieSob~ zP*Jc@@KT6T9HP*rFr#py@TZ8Nh@r@!D59vLXr*{g@s{E<1(A}0l9N)DQkhbZ(vs4R zGL$l!GL5o;vYN7ua*%S8a+QjTYA=;Al>(J6l@%3^>J(KB)pe>{R4r6}R1;Jy)YR1M z)S}cX)JD_})IrqI)ale^)XmiW)RWX-X?D@@&`8s0(^%8^&_vOs(Uj2Kr|GAerrD(3 zO)EsJOlw5zOnZ_xo;HuRf%X~gB<&_0Go3J<3Y{q(j_wTIRk{kgHo6hIrCqyr@$XXF zh1un?D{|M>UAK2V-t~6ZI{hAcB)ulREqyS39DN~u3;iqlB?bnD0}N^m)(k-maSX)_ zj~K=nzA~~fiZkjlIy2%KuQApz_A`EDqGb|dQfE5O6vmXsbeHKF(}&%(yASNv-0iSC zV)wP(^}Ao}USVcomSQ$y_GXS@E@6JcJhO*-&w)MYJuvc%d*WUQOclHkL z-Pp&qPko=uzDxTm_Vw>uW82TB#^%CCV7tXO$hN`G%Z_IEWKU$TWgq3B;6QREPJkk3S+qVl31qSr%9`PtEm|-w9W8=Z4|)&U z44r`<)8^N9(=OHitRts$N~cAaQrAE?QTK%&m!6YevEFBW1^saSRs(ti6N3zccSn#% z0**8oLJjo|lMP3X3LW)1T5ALt=@}&(jT#FZ`x`f6s4&KuYnUml6!tW>!-U1e&ZNX- z)l|bY-gMXuVHRxm(45KK*1Xt!%|go}$zt46!ZO^l(~85&!>Z1j+S=T@z1V=y9>< zlFX&_OGJV*;pt_`%W0RlVw_^0#!AIrjU~mo#`VO@$KObxO7KY-N<<|VB{3(3B~2t7 zCfB6!rVvt=Q|(fpq{*gbU7@=Yd}aLVk*js-g6RqATN&;dgV!{!-OA+5yp*|i-T8X| z4YeB;SzK9{vo^9lvR~fRxmlBQASW%CDmNr|D$hKxBVRGUxPYVJa=~_?PvLkGwy3pO zp}4q&vn0M0S{htBTV`9( zYL;q34W#B|&F5P8+VMK8y1sh-`iBjN8mb$`8w>C8-MiMr+7y4E?*6&^q~@^Zl@`C2 z4-ecPOgyxEIQ+=+(LgJvwWrOX?Qy$y`=bu^j^@WIkMBKEep3Hb@o8gtFP&e`yz+Xr z_&VhE=5XW)$BzO$;E(0(wCSe_NBsQ+2w~T z#w%m1-m9Bygs<#hi`Es_J2osfrZ+>ksJE_ei)=R#4TvKoZxV^r0x;8%B>)(NAAmAL zV9XFwE5HLv5|rSi4PImkfqX%rFbYa4Y8qO)T_8gBZctYYgFz`^l$7KJ3?v*(2Pl{+ z_wY%nQ?XzisQLZj(q|HH(g+s%vWN>Kh(DYHe%pc>LsP|MP*tp%*V-y?*;{;{D{*^vvws;+Li6mDRPc z>l@^LLBM|gxc;zT%wWHu6cjKDYI46I&`@&6nJFmwq^S0&W2hbcS@@;T(7-hkZ&p5_ z6_CMxW_1kcquVPe`&MX?+_jya{e2ya`iFY$i87n?8lZjY{5uM9jf zJigMd{p>Lbh(&E;H;L&)oh?r!(+WP;N=w{B!CoAXv%^0}k<)ozUwO0q>~v-lEEH!~i{q{&1LyQL zoe7mzJ>Du5uTaC)OqL`-fG8W@XuJ|nE*yhdV5=}%UCf_aAHeqK@{ZTt}^~|M7z-s>5whUN9-*$00 z2{^KoSAzgF{}Pn_g$Ty^*}LBSAcW43tb=scK+|8rpI6)0TrJT&oAn;w9=~AlX~S)u zXR$<4sSOVK!$#di19rcnCF)1sqJCq_PYX6AKx?P1rTCdFGOcqB)b`=4eYaV_PS5w_ zac?$+CrQAI5>sQ!H{YCKIq{ zcCXh+z^EM=QD?OYM6w_w4eBGHZ&usLLE<`ULgyFI43WJHKfI5eXY)OIi2MpR1bm4h;t<1jqsK`arg&_Yl1^itE1CD>KUER*+IcIi2+92wI^qq*Peh~ zM%7BF>?60t7;8)!ztH&%O#<#4f$y(A^czj0lA)0Y@jk~kgh84oZw7qoLxJOgit+$; zcJ?lw;o68x;48@84EYvshuff&zx78cHjZI*^1@&W>SvzsW&QOOi_ZAXoOmSaBLS%X z*&tM{5BP#m6K1E2NI(PPOD=H%uqKW+E*tH9Sl>%8xo?Ue&HLiCOeX4;-!{_ZNqm3u z6C$4Y1@jEGEUfaagZ_RANg@UL*)GSMk?j+ZtcJQ z*jTTlTKaWx{lz1Zk>U7_0f-75qecnGvP2r?17fV${x63HB-nUiasJ!L(O+u*(SrZR zCu@2czT@n+&QiH@*AQyr0Wo*Ims)~)qq+T7tE%nBAPs78D8|`~^}6A?jy~giO;?tR zYMaYli);3Z_3C~yiwvEX7DS&MqW9*x>k(H$#1*}$|h<*=*bF0x2qS=i5Rh! zoDJ<|woXe*YtY}5mPD4|(jxZ|5ZRP%`T~CH@WJqV%!%fvQHrxxJ%+TG<5n1e z<1R(2y9Fy07WsKO4ODQ9ff_9uZVWNd17BBF3J9R#0CtCB25OW-V9IY&(eSEosozkA zKw|$u6B7c&$oS-vKwuKvfs092V<5PjmuJkODdCtS zP_U2x={FOp|DXPPAKV7!EUZ?D)s?c6PxcFz$R~COR(1p*xEUbk%$zW?m^s*9IpDfq zxNe=vmg#dgMIU>O{&kGaSrdcO)-voUhvT7J`pw?nta<#14aU5+HMy&DY>)0(syRP- z9|y^%br{SEWj+$i!t*Jp^o50p$G*(*#7uv1zAj_XpPV|&($d$EVAc@dQ-M2oT}*E- zq?h6jt}pWN)iLc3Ud9`(18EJJ7JE(}EKpdM6VKX%_F%)rL>kqXEtsh_)+G3aLNnGNVs{^l5q4e)MkN|FD5>SVLDjle`nR(~FB*3^oo&+4TtgDY6 zJp3e%+SkO&0`-Nhc7x|V4(!Q1R0Qma(eY)7#E%Y&Y7c$c)h5RLKA5_w)Do~ab6*g9 zn_d{}*F6+@u+K`2vBWYpUF{jC=};ezR@}#h{^|^sQZ`*!;Z}t8w$XBVQ|XK-izx|+ z@3bo>rq-*mZKOuY8u|mn{HI6!GtJ7C-;Y|C#jagoSYcJ^EDQN^h!ZHNTz=T=k@PI) z{avOOrwwDu;p;~cJWtUcc73NzrRHht#9EE^o=R+Uo9!P(Gbf1#iUcNYHVl@g`&Kx( zAAepn{*jZ%8W^Rg8DZiIw35LsmR?<$@F1*+_LtlpvGJf3!j>`i&7VbQzDQf0OL)CU z>iTfQW-sMSIZKxWG_vYQn9r)dAmY_FKd>RAeS=+0O9p*X_t7t{CfCt3@uz|2Y&_l%7Ss1FNnWZW!kiO0Fk zEKX3*wH}K_Z4HIgSbCMU?L~nmpQ%pJ+qCw2MY@yRC1>-uZ|> ze^M^&PQ@+n>`Y|G)DbF0&iM97{$ozQ8(+DoD^MM*MS^-amjsl|sj2Sd-MgVn7rA0g;Af%42Vl^+ zdj!$+nl(YB2&}SY7Tc#0GC*gVdPtjEsmL?`&iOS z!W2fXY%kf!UCzmD6CnXUHKoPyA2aiR`V&uj{!hdN2P1b7yzhM0rrkdD9i3ms$?5(tZ;nsjUSX|IT&13^VISb1hey zQDjQ6P3L<<;wa&ef*@Nwah-XKub z`cQFk*=^QM%iT46o%jBX8k-+lW&7RO(R=};hQe~4vJ0Nq{4Ox`$jr9|nO!Ims2*)Z zCP9>VSw@UuS1c@}J0HJR;5sqBjbylss}&L8({)msZVI9VTiL&yO~;XGBzG;(Nw@-; zADzqixLp3W|M_+mQCu=^T9G}Q-t$~wY0HD7!25}~3r%9%Red93+eMucV|ExDSz{`N z)xkPjWkvkQiHwYPo6bwz9?_5EN3lxfTqQ?!zl+xo2hU!xm-!av=%uM0t9k zkgszhl~xV5jnAi^z8aWxC}Y@Cz*37&RZS`$mqq}l9n!poGT0L{I43Pl`*B3SFJ+h; zn!OWeY|g=h$Ag*WFrB`4scp(y8gmC*S2o_>Hby}4@b{wH2%AWrwVsZR0e0|W+iIlrYZCYqX2 zi6W$^;=na?QD3j~PCW)3qvzGnz?j~0MWjjseu~+27E%84 zs-R<2i(Fry8oTkF;lWb&GcWwxb;r&2N)*iPvDH66k*w%2EZ5hoc9;WbO~!gwOIq?B z9Pv3!nX3~!aVODscPMu**LiP%S{?khCuL{F%P@KF>AAvFKmb#@U9zR#zRoJ`lT1gI z%_n)yI3pJYb7`>F|F|QRL#qPDx`SojreIb&H?<~ zi&?A*%SWbsP)vh~g!Y2k;DO_GOOa7!KEHA5)z0M)%VTrV}Per*R|&7Bt`Beb6Ya&+?$m4BLs3(kO?Co9te~YM4((x2H7lugZ{56!p8@hjIv_T?fg^-11Q3 z&ASU*^Ge4QjXkwL5mkmi6N4`w`+8!GLbuT@zx@Nj_v+VnkJNj7ZUJ82?pbqrBp|G2 zo<)lUm_m5yHvve>Uv27|1@9!)}-vDcZ%1XoNwHt18xwCS06X+|B}0q7^X0( zws)o^%PYRG8L9eeUv7jc#D|=`{YS|>e=Yg)?{fcP;lJemUGlfw|6KB4ZTWASR(1u` zNC8BJqpkjDtU~X~#ee;bFCqpP{-P?M-uE#+UksKFe#AE_OzllX)P%G1`sXOC?H}={ zWuy;BNfCkrRi1zfmxLW@Pf;oFrw+a#cHC@O3dpiHjM5Txb}YTB%T6ulv#VQlL_4|{gFy?dH+8{fT)-AX z_jY$MEY3YWf=d}3lKzu2ByQL#ivX}bJa(HL(B@4KpV<#m1L z1gxZ=dwIK%7(4~2N~uZ#u>BadU7%12z*DL~?)Q@|z`>B7f!>caY%kO#WsjdowU4T? z=~Htx)r>is9%+vO3+WKSxFILlTM{r>JLBn`KxL%g@r4^R+3G)%Z1I}fox+H3qL^zQ z`BYqQq)&zZq>JaJn8Nd%h}D9DJMB&Zk{UO%^y3=eC7ElT2V2mh|D6*8rSctn25~pu zY`bSOL z-uoBNw-D0czl4t=&t)yHqDD(sG>pD(@_g*2zd=Bh@*?tPEjvS{Qf#N!=qCZ9&Qc!L zAS?FE#D`wl$UUe06SKl_W7=Y^{n3~fB{pyaK_6&Do7)tnTOZZ96(GsfpR;*dk@3Mp z?ikZ|krEi=dyBdJEL~Y4mt1v2Ed{V29QSRzOjuie9cEOUC7gOUAFJA?iXKUOUMI?tk2m8-7#j9^>KTc?^rI= zNqg`&5GFj^hj7Cyqz~WHy%Y+<)$Zx+k^{FZjR)j_*BT`7P+&@21?yAIrlFBCs z@}xOg{h#SaH{9G0J?OLz?y{}85s!fzMz^f#MaaumkiAeN93!mnl)y~{H0~u2<(CPI z&YXMfv-=4PVt%N2H#(YTI^5$%YKB#8+`E%+%=DrTTICAbDFkLzn|Z%?H8YvTUefl% z3GMR}%A=56H+OieWHAdAE5EZ+ILLId zy&#(frDig7B}u2zGSO9fx@ zE)#Iiu$-0B$d_1UOkubdE1x-P$V;)DO;PMf0xX-YU5A$C1bNrb@<$3bY);IY;Rag{ z9HqwHs%}C+loySdYy3_W#FU;Q)MY0yGHAUYYB38E!3;&Ws}#rkbg3Fc^4V332^}5^1Mk{;EA_BQ~t60)~gzwTDHv& z)bZ4u-u5pC%cMK|^Yf@=li90kN7n|t9X>LsetLernGpQ?lmyaq#WtNLS62jv&B0fIaD(OVtNp(m#(Pp}LZ_C-L1ZAimPbz>t zbmhaT4UqRDDDlR~%8>z0Z3bd_dumoG6p94#$RPPe^{j*TH}bw1y>o|%=X!X*j}5z% zfE1-QHweb8rP#-7THx~1%)c~W?Tl8@PeP?a1y5b)80m*#uowJ`BkitRUK}tq|@JluenvkW>=w zaA%wyyR5>EGBkcHt^PMTF7Cm91 zH^PzOF;SCk0lCmrXU9kWEhSM7BPK&yDhzH&L!ZdXfJrC5umQh7k7sMMf(JHZx}-Rv3)yxkkCHz&Y^A+^Otuy`FVpajk;Fq59F+@;%)p&V@^0q$8pRBDd3&uD;dF^# z3nH&MCifTa{W{~H5gTCXRDQf9x#kg}-tIAKEL+R;Y~~(i)!3jLgf1m#|6pW(>;s-~0hNNYq%pq-7{l+u2B1ay)$YrEKGn*{XV zieiH*a{$(ef{%9A_R0izdr+kWz_C~#?kZqT1VWt;t`ft?=gDH05 z1=-ENUAZGFV*xU{eV``LIyfC%;dNXFtp;vFI>4=nnS>uL|9hUOE-&wHs2=nK#YBHP z+6egpR$E(~_hcn6=XwIu63_kL!yD1KzR8xO>_F9LsRv_hK0_D z;&ZZPHs~BupB)t`k#-1p;=*O9&~0R0h{hF(=hXXl>tp!2SB?@?&S^}Y7s6V&%-~#7 zuTHR+O`^-A(>*-c0yE5JS8kSO6%Qkb^OzKjI&W}^Vq0EOh6|TcQCC-(zcmu;Snqn( zMSZMfuW)g<5Lj-Rh1mKNgQ>!Dlbq6*t>VY&*_=6o-3AtwdY+HnP2DegbEFatKOeQC zSVkqLwq*YnkxBy2MaUg*5b2+5eI=2({6=cqBL&=M`=Cn#K=7f5HsI8TMst?l-ox%J zEOhBWywo;M88LY*@=8zGiLA|srG41hwq>p-$FU=flsd`tdiqqluqG_QhDO@`$x88o zOE<$~)KyLt6_=~oW?A3$>ga-&46MGVRacS3Ij2U)sx_LL&@?tuZRFD)n*(*t!$A)VZZ=%Q-R!>bcAH}^a_d}n)A_;pSW zVN}#heZbvrj%Ryrq)cVkwk8Q^vPo@OBda;p{+=Q7KOG1E-3a#ma{M|is;~KaaOzVJ zo-cdwxp?J~fx@)0flk^-P?Z_zHfxto$#KIy>OKlRf%TQopTEAw@0k!UdzUjnjhC1L z8R)b9pgg{*Bt!qS378q=**g01LPnwE4YkL1#3mqUz(SO<@RXZm^rq0M_W{&JD)S{2 z%!z80tj~_)LJy3;wz^5rNGp)l5@w2bdm+`_HT~R=1gscS8=wP2UYB>7Tt2nZq1Yp- zLjtTD^Z`WKp>4}Y!mk#zr+aAU&(5gK<+N2i+3ao$e3<|)q-SR;oN=LP!`V&8%5%rr zzAU%6fFD(UG%Lu=`0L@%$fvBLk%4*;AqPonlPdx!1W}{b^G2hEO-%;1hAkwnus>xL z$sEwf_hPQptA$aWXL>8+^@VLLz5$gQcsIPjg<1D8{6^jKhHHaj%{mT#Hj)Z$sB%G3 z|4@#8skt8DylC2aX6HpCgW9`d<8iRc{t@t_`7nz%o9Ra-(Hy&YnN(<%M zm@aExqm#W=OoNp)#VqVTB{cH1Sit-sU0kZ~<9MZxx8|XVnnhQ&vqCH$EX!FtKN!7H zo!y^`9ZOC7*e~T;RJiL<0jmJ(T@F3r14#tY?AD)O!S}BLsr$f(^iyC?eeZK_?&&6Er9L)biVbTR=<)*vzF(t#^z-*4zh>mF%OkU zdB0+R;!!GFF~!WuXu~Bcl%Dy~DOp#~sXk6GuVO|smGdU}9g^+&fH_%-6(-_}k2&{w z;!P4TR{nHpY!VcVb^p0>@IS*ej_JE`>ZI&^G*%gIfSeWn*ft;<4eeGGC6=;KxeK6_*YKKiAr}L^m$BIeJEq z=w4!js&_gqcY8fzzoKvXq7;4RD*<@id(%oN>Fv5`%d_G+&!P%DjjKAM!_AcS>?>U8 zU^wJ@iCVH%y+F$TGaF_MuBqCtHi0A0zJw^QkT$~5Vj z&k*IV-R!)xkV8Egyse`@YgkNZN5 zhp5`IS*_0{ul?vqNc_ef!p@kcBn+R%x}EwqPSM2K+i$eauG~|s%#&SnY44no3FaBjIo2s= z!K`eAd)nEb`OcZ^z*6E{m549fxQ0XRuQW^tHw|1zr=od8+C|lD+Um1>$L6uwuVruW zOD5aI()71BUKESV&DUPMQ)H~tEqm!;kA>Ji{}Hpc;6xbTW4V%WM)vlxFE7tNaeKtA zf_wZhCa6t9|ASp{ib5?CjlNdr--Jnlv^b^1PNGBLS)39~Q)?JWJ1)L1&A;?G)5>;Vftp3c4!%ufQ=^c4WLW@tN$t z5_^zatC>un%V(T6wTIt)$=&%cApt*#i@I}RYk3>Bu?+bW;ZHhjhy6#6hXR8ln|ofl zN&@{kKfn9ug3qCRco{|Zoll?sW`FskQ-=@3>cbgKiyfn_;smhao5ob{6+Rn0I(^|y z;WP%f^-@(uXQ|*(0b(k!Zi6ueS5#bhC%c7hV(}*A@qbz4@%!bhb&XR}Q^_c{W^;Ph z!ZV{_>hnub!ioa*$Dq|x8o|)EuIB-`fz_LQw+_@h3E3w%lAmPjho9~y?7|y08|K~~ zdc^Yksn=}!Up_5(i-wyQID9v=g8Xv1PS5anYbpGn()y!nQ`X&z>_IKAz%lalO-+oq_$~bybHpMt6t;oml zBiI)V-}l0)R#QW{st$6pq%Bxyy-vUflK?)VTyp+vv95zw7axU`qdtIg%ze*ir_w+N zNT!d?pc75}f-b57RNI4VMz>W{{2N)ki_Z3vE;R|jsc*m7xQe@QU(Mqg#XrV_u9Rt@&zvs^$?T-`G(_5T zP8L67&t^>Ia#ouOpm|(qW+U-+C4Sbt=s(f13ijO|atNXN zYAFrr)_7bq8o)fyPI}$vnBs6ob$f0FZcN3HAKPVp*|s6_LMYyP43y_P2Fs=!En9iH z@}@J*W2?Nms{B!AYE0wEFa#_k%b=k)bHtKcIZ`>^SM%snb-hj+i|>74+cOt5@bsv% zi9aZP{fm^)CDg<*hB9x~D*tBs5=bdn4;L}THDSd1`mKSubk%0-5}_BnyKe=8Puzxp*h)-j<$`bOg;8JqHr?RMJV^)dthUo^ z#p}-@vKf!v9Fk}b6*oII4Mbe`^l5KDgjg*+7_k&K`1%js>0taSGGYy`6TRk{ejPNe zI7(2r&#Jv+a4|RsDkUPAeo;#3WdO=pr}}(Y{XFPQ1UCs=&IWEoV$Tqa^n-cnG-I5X z^IT1_U4E39)+-k>%j}BtUghzHAAcwt9}uGDm;S)_b7`ekLd5_Zyrd)-7XvYPKC5xe zQa{wT@@24GlSiOl+*EqH`EyF9&GrhjL+YD%iHpQwFYfk~5ZThj+a(e#YGckkK`VBj zOItTgbl*i+Ai7(frSm$Q@XWI&W+LS;6|Vb!Y4(3PaQo<;7@pJ%7>#gtY*ejmi157O zjJC(}g`xsyG5A=PzO_ADH~R+Q8GaQrlGB6*=yrQ`7%qHh@%+}?o zr(*k_;~%Huur>r~Z6kP!0nGw?eafF9cdhN-8m6c*Mxx3~( z!mieb=?4Msw>?LJQWn}v+pk(W-&0I9@v-hEE6;<2&!t9|Yz z(TAefFWYaJ?0x$jF(DX~zknUJJbmTWm7yn?Q#d}l~*< zM+7dHbNAU?RpR{CHSKTK+(2 zZ$72BZccnBwXono6~?MI+(7>U8jk&2ycfde%xpDgn>8wzkG6GlblIO`%~kp7Fpqvz zl?!{!yStymMJ@H`O_WM$(bl!c^9IY9^o6iJkBl3nXUdN-=}W30!*3;Qr>CFQ28}Ro zmo-Td5_X4~726Ie25nqDWz*XirA!ztxD!pO`&iCG*eyFrk;8^;RCEP#&_;r{_|9XBV z!^*KgRNohF;*0QWr2+TW>Ost3dR~FYf_o}iUui0$>bDJus`zpKNV&4+i6KH6zZ_F+D{8=7lCVE{@jB1%Vk)a{^FI8CzZ}#Y_F7xHRbR zFrYF{-dSx&6zPFG^=G$V@0Jcj6if@lYOU{33g6DA6LAM}k_%ToEZ%piL(C zAm^Ptk*2x**NPSf4T625k`U~}Jy~fV4$-A;2jL<#7$NlfukOie7OOs>1TB>J^BUDt^RWE zoa`I$)Ciy4n_Rei77Uy@uLBf9RI3$gV(Nv2x>+=?#RP~9n5EM&dU!~U7}KbT0N9`n zlcE9$T6XK+YCiShUJ53%X`1?FpWdNr!)8tBptH+_Ei$|(q4k}NIb7KykVwx~B z%Xme&{WQa8wjH}?Q1SVvXlmD{M=suT_NO^X*E6iJ+f7&n(*K#!Ye-127=^)$Db?#MWtvw?}Ip zi+nVaeaP1#IH0*;*b~xkTl_d7fKp%FQ9$rre&dtkLkwapy5$PjZ_}#H4^^}g%Oj$S zjrdZW=@D|yigBz?Gf4XvGNGKwle$skG$YEKa(Z5+QQMZ!9W)lE^}b>i<-hpaHH7tB ztA2Z4VwG_=Q++NpPku`i}i?Oh;d8)eD zt?Oi*hk(fmv7SwKeTv9kJ*iSMpjRpNrb!R2WLSwH+c~PM|C3w@_UOj)=ZPjwH1?V{ zAurL1cR9agVElt98WSvQb_E+2^>$S&{A=L_uDPZ+-Ma-UOTq0HY{a~KcMbvhO!oI0 z^S@6i{e!PI&`qyMd|f92kpy4}^rVZoB&Om@Ug0~+@cPw%V6Ewl0r%P9|2zJ7y3g}3 z^12{fD}N%~buG(tciJQnOH!}6| zKHe{{9sWvbHpXl&N4v76fRSnFhW4p`xNsAo3Z5q*;DJb2 z^@VP`lA%YFPUfB&xdUUn)Ji@9PW4A+%n`+g<=;@(z(n^n9d z;FB>$mcrUFaYOd|W3Ea-P1&^odz)3&_fzfmvio}T{WoiUrBSlfiUn2rCpk?wwM0`R zev_uE|NL-4x18)gRso>^&=m4h?$IyJ!}!ga0fHp3oL^=;+KQfJ`h)^Y`yO9~>DoNA zFO>+Y>)&X{i*DC~s-ai|H0QHA=98M6&qzQxhbR9nb8->O#sw{98cuv~P@gz1PZ2lq zZ?yA*Ux$|{j~#;Zd~5pYH*b75XZrh0q2HjsC46}Er(Dq3>wBk~zu{rjsJpjU$yI&N zSY`aX9J&HRp>&2XX^6k>Utx+1yX;ezx*wT^zZW7VCb^$nlZ?)@?bWSr&~)!A{!{Ig z`y;UV%`yfCYV@sHOMT5U+Tbg{5xbtXScvVVIFU>YQ<%cH$AFTm%KjzP1Qu%ylalZt zgW3naBNmGwpoa}X1ST@3(RToJ1Q%!}0ZY9^w=EAT)YdX89n_phn==wN$!AJ2DG z|5wkZa5!t=7BP8yiA-BmfAc;3r(Bc%UP}D(txr6gGkwI=?Fn+yjkHKn5|EESvGZoW zC4whTS>|Qatv3)CLE?cYcwM{oNnv{#-(H68rSuxbpKO*P%3iBzvH|;G?w(CI{0-E* zKR*bj$l(Y;={Hc;w|>R93oOn$ClYz`ifTDXl-#a*uHhv)9|BM~H4+ArZ^=i+W=SrD z0ObEZJZ-c+wbdJqu`Eca`h3t(BL1NLFYlsr|2;QM&?wSRr*?GWWc z3F&l{>yde*kzGsR2#RpqatEL5Aj;6g$dLuMLT@{Dt4$+5+qUn~ebe3BOJI5B<)t$s zbbND^+%F!BJB87c9?apar>`~2$2|Z|c4Pk4t^ZM;2+d>OHE<@EZt!z2b~UUB zXOd$LE(|{!QU*?WQomONacTds?|9VPJTdx*mLC6rQ0?~u|7AC*1F5gzo^dnAuN6U0 zwA-4M6veRK!MUa|?n}1p5v!E8Ju{RnFjSnR_axh?3FW+3?9=hc=~+X2+c+sg49zB| zX=hxAoS2P098|#epjw(j1s zwbrLPN(p*m>Q_!jKbDg|@PI%Nb_`i5o~NxnB_)+4;_|M2;T0x*bZ>LYm9H+W)M1vUtFQy1dSjDHG^noi*g9F zh<}xPjRHZ5@tC^miVz$^{VJO{p1&!}Gr}qdhcJ;txgUD*ex?J)6g;lA#PO}-h;HgN zN<%cDzUq#uL+qfc{9(qtO}XYZN+a+flV32wLiMK*IcM^;Y|kVnntF{G$#-bA(~g_R ze%cCqvRqcKK-QEUh?H0D_I+XhyUIlVxayr&=KW~MAB2AP0OfjiGb;YB`t}18jN0z- z45p8CcHcwH2Y25bDS;Hb|FRGF7Yp!T?a}R|i83D3_7^|`&?Kc*a5Jo`0{?2O2y}F= zCjkqhS*v&?>d&bEk(AhRq_G1XX|Tk8@MXDuDe@7YtiSk3Ts})(jZN0%s>4ODSotO> z6kMDAg?iScgEqnlngq~i4sJXRthw0Fd_MXh)$2;(T)i0?#T;sRA|MKEouS*e>6GXbZNxXtmwnWW#~oyx6?t%S0y`!!@Z$lj z-ss1*&>0G_%C~@F2PTs6H14I{C&>B!4|(=={pGt(h_D(iuDYESqZffiU_9ITce35!c;vsdGk^JC2IhaeeIA_umH{bV6Ep#-`~1Wb40o1v z*SEhyod$bnx7>5UU9Z+cVKXpNp|myMYD2X2QQJ z!GCtk5#AHGX}X+K8tXtFs`1D*0L~tL{ULo$>UHgI7{&%a05^h zbc-|xS-4FcJUhJR+&teNZ<58Cy~BtBLmp(WOB85bc?dgDxIQz+0@WL=5a5@+#^>`B z!&8&+B0^IA@e|xsZ3jGGl)_S4?(}*xU+TURcs`(KTW2To)ea|0_=SV5fbZA?RPo^U zILP`3SkQ#gQRYa1hb)=NeSy{qfV}Hx?&EOfZfZyvZEDbF*fz=Oeizj?Vm!v=J&=-r zfx5h=5~O~1X?*4s0L(5xYL5jdyUBMS!QHZMgJgw;X}qEh?~uf0-S#T7O_r6v+s+!- zS8|c2pUJC|he8!?kqQpdIxGQ%KB4Hdg^FurfEu_b=**P2ViilS>E! zR^9O)iQdaVzWxc2rE}25^y9<;6AWrgqv0HW9tekc{)cGze~{>j{~3v%cW<*_U091_ zbZ!t=#XM!8GV>epnLiL9{z(b=s{Q=1HJ{yiRj>hw&T<4a!J2hqD}N=4Ol!Vn4>!v9{My}WRkW>)|?zLS>wy| zani%gMYK?muD`ME&g=A z7I(deoU&1tk$tgKFL%dfcqv2ZOoH9K6*W$|1eAb9)@X??@TAa^iky{*IalPE#u$W# zK~s8ijSnNkcpzKpnW}6*rs&ZO>XGHGy^j@Gn-x+A^AvgNK^wQ}gMnUrmRvoehOD{RstAs!c|(27fA~&V64E+)<6$HHl+zS_aL6LB{x(a+lL-hK)|mxU9=5tq`1Gi+op#|h>*`%AA=5__qW zh>Oyr6CwfqC0n1pOOMn;p@-xrBqI!Qm>ergJH%DBJH2&JvCpnjDNc@`p8EvpOK-RJ zBbLSsab?Duk;$k|#=L3*b&t@xoR5OR_>=r<%mvK2XLEDvjgEiUME<|O<-3<5a zvAz+z#BFh;BVVeq8{MG8W>oe3Hs5gTEJ}E4>rvSx$2)&EbfY!XBS$Tk5Xr-*p7+@p z-Mg z`nt&eyS6_F*q>Bp)Z>4P#{8d^ucOQFMwd>nU^!ZQ>&BaFa{zlUFLAbKu(_ zT8c5QQhMA~1?#jzr`qlwiL#Abg&_9?8ZoE90>xH0NfA@7r6ZA0j^a>C7A&+fP}P#% z@nBppZ-a0_dhGpsZY`JqV@J9Zv)r%KJ+L06NKAk^LrVoXRqT5CDohitWin|NSgNpa z>Ff*l9(CmPY9_OwHB-f*k@OJs1(KNWm7#y4O?|S@zjYSO`vU}Bfp7URH)WR9)sgl? zi3@-s))fd#4J-CxpV!w6%AKh`K}z$Tv-yxNV&+EFmP2%|1DK86S@xAPljczs5BuVd z`>h)5s3Th#TKY#4QdTmqFopX|D>6KrI%gru$?6_389&*)JFXlh)Rrg#mO9rxd)@lr zx2DzpAB5FEAs!p{#A7I`#vt4t#P}MAMwe9ZIi5@0BTzdGyC;n(l=jNO$vdhoAt1`AWHEe{1 zJ49TD%^tb`*$5BY_2+NX!@M025hYJ1UvaK-nU}+k=So|2svI$sIu_by^)lG!!9!!O zwN_r^2CI{oTZ0u(wf6Kj(4Q$707RciA1XjMwMRa+4|tb>a=pj%2e|ycvHk&x3KYaW z2Lre_zzRwQS!EMow0bqus-`0MI7dF!L5|;Ff~S|wrt}$I0J=VBnpH1f-sd&4oaZrK zW^NHHB*S|k_LM){HSHm(Ia(5D zRdY0=g?!+6rCc;(;U~i^U+vFcy``7fkaJUUL9|e*?s0}k!lmnHr94d< zE3WdhJg{%qy`>^gCR?xlArNcIZ9RR&{H8Qx)_UFY203shcjey{OUGzmbX7TXJ`(4f z$F;^7-PI`tyMXu$Y6AQbvHFihB=92rg);K4&%Rhs4(5_Qe}UA_tOJxrl+=-0EKNS)+x~v1g{ewZuXy6(|dX~Oj_7=ONB@!mj;k1SXx4e@dCVn3G+{g zgWo;oS2A?9x^nj%b6-&ScdpA*P?*zhn{a8n6GVyOxF^-PRTKyl6GNV3%058~YytfO z^CaqN;5>cZlFTOHzax?VJNu`c_&Wz&is7~X?MpQO-DCdVMgDB3{erW$2pZ{Ns z4=E`TPIPp4b~tzVT$=(s`Z!C7W8)?)?MLJHoA2BS);?>r44@rQDk%1=y`u3I+4!UQ zd9%bAv8Y16tYxpywXt(s0k>wrzl6`K_dxwJ9cmzbjX7KsQ||P3i$a&$VOw>#=+Y`o z6>a!}(X)EJZxIuB+CytCn@V;4&^+P&8l_pwQR{~z#qg{f7SL?&WYOidj0bN;X*5M) zrw=lX*%E5fs>SF&B?t&N=1xO;xAzrCz;+`DvFw|^^uaoWZ%ud9pRtyYm{{gAR2A@( zD6FRAJ>d286f2Dkm9k^tDi6}H<4lS!kS6CFemTn#4zu-Iq~`V_msA2h*s6=tuaiQO zJWJ+#!y2%L(DG<>m|= z=@;6uqS}0n+JweAwWHZ+5i6+_qTK{J^EPE90fXUjc_o>dN(5?gOna?lpjiI}Y>$h~SJ`!_cjUoUWc1 z0(jlRSb9Lb1v09w-_|xe(AI!-_C6qWQ6i{e{i}+eaIg8E?cyubB zNJx6gBTZwLZNx6h9yCtGXyq0y;1JRg_(ECU8j>lVmCY_TlNx*AXwbOWnn})6Y1u}8 zlelsQ7vMrOSEqG=Y-G`tQlc8`>vo-RzBx6!RjbEqy`o89RaD=>8z)cf$`8@3d0@k+ z0Z85Deh1$FM^^Pevd<4I{~z!^NliT!uELpk9>-u-#y+pm=<^>DwRKTR2P`rleMC%Q za3+Cb$e|eZRh9b0gl^U+o^{rNJ)J?!!NfAo}35f=coH^Ecd3tPbdEQ zCxZ700iUPMAB~jd&JD{SLp+_-Ki8G4Ag0JR0ad;|36w#O= zM6`@``;`OfcTZnnq&w*PZvThmvZ?cH^$2_Svb=HNBcFTHeCV@|`h zE-SsS@Y#k*<>eL;V!JqZs7j}9HqSiuNlK=-3CGp?(K%8lrB87i&NZbmg5^j?5?#6f zG0BwBG-pB@NI8w%*YZ+(9HyJwd;4SV;ob*&2c!VAobF|@=~mr`?hJQ(?)too(@^6~ z__(F+;ntTOXs=&lj_fdxo=TMWLU*d(&r=31q~Ib)4VAm#`oTzo`%2J^UgApHAoeB42@o|I4ATSzgPxjkh}no@`ER{ zQ>;(y4i{?9B@j>(CAC_Ga7DJBYEOWiM8B(Pjk~6wju__GrGH>=exY~N`_%7oPvOI29i3L!kN__-v)+Ly8kB{!5=g=l~AEpFgc+ z1DDix-|Wl^hcK^Y;obuZ@oDP~nXZW7+tXWn+}gh{H;5S1F(aeJ`lLhDPu74p$gmm$ zp%;I=nrFMBkZ_>3*DSQ{x^Q6HVQWWe7Xz8lP3D)Nx`8;g9beSqaUBye9o#8YLTN5q z1244VPJdtappl+|J-u;V)eU~fG|17T66S({0ntXOUe7X&IbC+A-dOq67IyS?vY`yA!Wpq zP2kXh^C`!cX3AL+;k?$>){YP2@Mn5(k};F~2iOADxTr@6Oy_iiEIO%%T@*wduA|z( zsp(%@dzZ08#o__4bBrA81J|Y}>aORtIJ2~OJyX}`T|xsMd8-aeisj*)Umm8jZ=@CL zffiN8KRnk7-*F(C>dNQjguDP{N@4!o53Z?f9uKc(n9Q~at!i1fYkC)GBHTfwpgvJo ze0Sj{p^S0!E_=*fYU)GR%55?#W`bv8vIrmAW|BVP9^NK~bfDL}OpXXqg$^d~`0M7m ztw{TuSaQh`IF@`M)#+Rud5h0*?^OJ5Cccu~*xfsv*;muw?xScY;jGmnCcl(W+CDc$ z%G}Y7Lg=?FazBhP1U%g0ZUGS;^!*t{}Gkt_5>h!E3}7yV4P zvaPf7b{I6x!`_xF{eDyO`r&d`p~f^*3&&@NW80iBEi}l|&S|ZY3ZIm-eRysxb-Wy} zh%X1ebiVyea5+t5tpz?dKs%k_`Rte*L$5%f;5&kQFp%qgx zQ+aZ@*Z8~1;*4gza;6Vl<)6r831%+l9qA*CuDk2BCYlpnR$kGYZaI04MX-I93R<(9 zVH#h1c_#oNToEF}vE=VfuHvY3RjR0wz8J8$>*)=Py+3&Uisr6N4aAvBMc5HkiEgp} zaPH$<9K+RR?3{vyO|r&)(*R{hjZ%83JI#Pg)v8!+L!oJP4`1iRrAmozQ3Sp?Bdj-7 zz&mgBI8v}2+UI8YPs=hrp;Qc<@F-m{8oa@6plZ4KP$^SK>3R0pC}=^Rs``7 z!D&cp)mmFTM>2?P0LOB-GzCk~%Dl@&|5Euu9REYp^qT2y zjcx{>9Yu((Oy+CZ>qVX(A92nma%1=KwA9-#1#xn%DqKjF)=|!L)L)nyIBIMy)E>EU zJMoP;<0}?F^aE~|;GL3SoqLxh_@=>|nctyxB;E1W@?hNAkjiyY3;f)Ot*#@3Y~Ett zpsy|Y>D;@ z!Ya1HDGNcohw&7MQ>9r)@0m97LFvcoY?TS>G(cKU0+vjntIQQwHlu3G!Tk@eW%e1^0dy^SVgww+t|bmzs~V>}zM%omn__tk~S5S)?#-TV-FL znbfK05aCdDJ+(r$@>Zhh1I$s`F+M)MLvy8#;WN9VD7RoIYnF=3+T&S#&}7(iO24PG zqmrs3sa6I}D#D?U^E7ews~^hGwYhkI^w#SP@44o?2+9u6iPH!Vbt;@Pr`CEJH|A|X z;2Pn@Pk3b-abJrkTc^={x-i5Q?Y->w zyNHsNr~>Xh@IAN`uTos%z1pKfxF!J#nljJDv9Gox!c}_h{8NpOZ7+3f5vN>Dr44Pb zHeGQnaR&3#Iz7eg=oc}Q=e55n*OE^`wc>t}K%S${y6f4Qek0S0i35$@qV*7`#TmLt z2P(4E_v~4GyzDoIb!1fcvuGIXUAZyM@!TOrH_`%|Lj20w5@LaF-Sp{$H6YX)ZEUM*8XX)_)nZNFvKdHIy>1jZFRgd@71=#Qmdk{2G|7pS1wAu49 z0MJkR9j{t)NvVa*u8Mm9(M7+viR(A(skXV`kNkbThq5nJ;!DZ_7g}48ocz zV)f)x1lzlE`qF5qcFTi#)2m=1ol$s7XVs2Z++Q3HRnBmN#mi%J=)3OZEW7*|IX~y?sn?^*l6~H1C>z3UbEZE4+UPwlsjn+}3i`A{wSwc%5aeEsQI&U+7&E=febpbl=MZh-dDIGb>M za<_?Vi^sbbb?JRJl~oW?c|k_6&$P$IZ3V&c6MLMN3ZahIp2I~&$@|J!Dg=bBudi!Zr45YCi zG5!)qz+u3QlK}a7kL#i{OY8Qk+1w$HlmYBow@~j-PXb-F(nbJxp~!=-DrTZ&bd$Jw z)e6$vQ_5TjzepSGEA0lI(^=NDiZpEtQS}VqmE9%eV01aB7Zyvansl3G6}#;|XgN+2 zAnb79oreYf@r6=7GcJbO%Z)5?0ISvm*g#*ShJl(mYGu!6&S^R+V*la8K9vr^7$mL< z=LA_mElI8NcRq(a5MPD0Sp)0+baGw*V$FJ$3Nqf&&xt`b?j$ZUPjY69k~&nK-a$3v z#!=H3Jca3(KCZ;>HsCZKyulTNn?qr3hjT=8Gi!AAGlx4?F+M+2lEt}?%a5|#52O`B zoHd&DKS7$VfFa5Xut;lu;BD&d9W#-KddG_xp@OF6qZ%=&T10S;t}N1+X%sc3YBaE$ z_Zq`|!=VnT9b@*fgs^Xp13I<^?lBXS<5ARYyTs441`FoKo+?*>1q}&12m?}4h!d46 z$#_CZ3B4(-9@-kyX#eHcQ0=l1_P(vt*R~ILbP(VSD|f+*5C9$&)2~f~ z(q66%i5f4&^&2}`E3=wG$u@7=H`L#C5Sj%fy6+oH5I^HsG1ZsaU)KYwbC@dDR6aV@ z#j;;mK>687&fMrMzlZBUr6Gm0M^Q4ba|{!4FGM<8hwK3 zQEz8#_A7jTL{t20B5!ZMI#zbHw&YCCCYq2)ttkm#6%0N*YM4JU=@TR~be2={ryt;W z30dCR()8)noBbNvbge11gB4X7`p%7}A2l9wX&x{=C_->us9o}72VspP-AAS?Hlwie zHW_cj&&C;&F;Pa>`EQi`dsT7aoZ;Lo?A260~bR;))TpszHz zKmg2W7clq2@f|8{^l%q=P>)gZ&ipT}Mn4>Why$7xe%ALYTP~@>!vyzC>=xD~yMu>? zf`|%wKeGHVi@)sSnR=1!3sc+AKBN{%g^Y3f`7xDNu3MuP$zOQ#AWCe((gG5%K5qv1 zGSG-7WL{%GK`Ia&kmtD0O5FRMtPn$GUYa2<@=`mGn9*(T5806g7b51HJelD5#9=$EoAC->a0nZ=_AG%p!6omSZH+AwnHx+*ff z^2J(*TS`2^gp>&GJINP@yr9`(4Ct};5Z@4uRs;@V43>hd-mk6VtoMZ}6(CFHZpvSj ztN1?^35@jD$?zLnsNvhbgK2}jQ;g*HI`<^+CPv8Mca=>i5g!?_!4{6BIJ%Yd-$ujn zXcyj8Du%+1brh2yD``sOcYEtOsSrBXMq1lGd2evfWwOKqhpbOSF)i4pX%4L{tS5j3pOnic7BM*YWd%rw_lzH5S#*(RiA6v)Cg>D8&BoZy`e+D?gS3#)Mox(eR7ZeLw508ds_DtlfW&ZXq&U_5 zd{;9u>yaVHR{qtwtBDI-@tZ{XX@(%9EW^>*#(6~+P)6J%%tMOPpkAp=?V94k(`?M_ ztPl>5K)p4q-Y;Y7mwZQ)mZi5bwUCn4;U{aJMnxS;mB7pj4S;G;N$46VVJPBae@y z;NS1Lj}el|ZF96*rT8l3-Q~Bro#G^Deqa6vVzYk49#ks(;H%p=GPdQXZnCYd<&=Q@ zwsKvAn(ocG6s9!y?)j3B^AD*^>I?e^@;k+0w$~OGAFo8RIk9@{q)2+ZT=L%n1y?Yh zaFCy=d-DXOLe=5=O@zO8d3nG950%b<(9}pe#`9>x{ybKm7rdk(ENA?rTFi938x0O( zjbKYSOS#)O)}z@MG=NtyMcUllssm=j%S5@-s0bBP(_z%pQaE^G9Sc1Z!%=QdI1MZn zk9dN52vaTT z7Q`Xz8G9h~r=zIGcXdlz&R?M%U_l|&0P-_4==-(tH`-8()HLiNuW$LPaDr9ZQx)YB$f3L#(}5{k&#k?975w1TeU~D^D__-1M*eoMzl1Zp zM~t1k*h_(oIbA$uUBL0EiFQ4wetF&8=(YLkuGL9tzXf~rK=#VB$vfU&>_aE-X>MG~ z?kJw_g_ef7X?!&O5NdaO^_&!)gKG7~l4b^JpLQ#ct0O!^l;yfI8af{uCS+WK7ELc< z&?zmp+hBpbB=LPDM2qhe^1S1RlJ5!;QM9RyB4>KWhb9RJofj14jOFNU3d@|69H;qBiK zbN$oT(^v$I8n|$ioMBXB^)4KrUJ0n)$+($3-^v9MoY=kX^eVa^qy?u>E!d+OHIg@5 zp||vLCvtbTEAlx>4#*?h3-JEKJ0-}X6nxQ*Yd=}4Xv@Rg)3X+v+2@NUZg5OFNjeg( zxv1sYt6F|HFYjV_6xofOmw3?j6WP+$am1au+0EF)Dh3xJWc#Gw=ftgMS>&o&t6TLp zslud(<9d3Kk~WJ6_B2ZIpLEkdJx>IAsFqJ+rKD6trLsztXqbHCHL{-EpV7>A<=3bV z?K|IQctP2g^OD@0jw9swHMhRP5O-RW<4(C!AhsQ6{PX>PdjWs@RqXgn>dbyG%YUo= zL;U!se2%{bD{Fn{f2+LD7hWYDS9|cReV`;7GL@MZxDBnF0;CbOsIAp|;6 z3Jq}Hnm#*z&!`V0%=iko1VYYzNhG?Q8YS+Xoo8414|JcF(T-i+tI3rtrDH7;w%u9& z4wZJHQ#SF&;We5hLZmb)&B-Ky9Y15~5(>&s>YOPSXH=h1A|t{|6xNVaK?c~bTb{9i zVEhgbfVC^@FuwG{us!k*1%Ut5o1rR+D@BYjx20qq1qI3ym&m8YnSAS z1Yp^3iN7d8N;y;f1mD2{4cSOjum*ry&Lde&Q>qaI^ z;7R+B3=9j(_IT`*WtHfZnE-k}C4M@5pw}rcV$o`}uy?RI50A%n^&b`Vqcgukz z^EWoApt|U4rbO4%L;iKAQ0Fy$Y9ENiwYiYDbP&S0$y8O;weZ@w#m zJ{=1v(Xu`fd}Zv7JhO`Rv8jvEWS2!6GbC|TP&GM=Bl8EAn;y4g<#P3+Y+bY8FxUs` za$U1EuDLIJ*D>+22d9xfWB||oOcI$in*Bn^b&(D{r0$s+tx$(Up6=z$yQr?xq>ix5 zZJX;x%SmLWL|uN&@{3xR3Xy@Qm>pZ$yH6?a6j4E+b{hCq@uvia!aXW+&fb}p?N=o6 zE6GuAkQNL&VPHfd`YwA9&dc+d?1u3!FT-}hc@`F zJOu;ib4E8OM>9Yfif|9$_EDE%S#KT;e#gEdEA~snz0i68ZU%5sSJ$ zcB9OCv5`LOh(U^}&D+kSLG)f{6$9XJ=}lEq20Vp$Fa`8&P*BM8*5gWjtr<*yf-EQg4F~&3=Y%pOmm$7?TCMWQn1%5E5{8%?> zYe*HO$jdR#5B)NsPn*1rkX%V3iFj4DC|uAO4YK4lXqz->PvpP0>^3|n^TIoaZjS5P z&zS3@t<2;%o{nvi3jOK&~QwJ{QpV1*Iy-;e{T{V)i}vU z($y)xj~3Q8M=?Cu#B}nqA5Y_@eqr;lxCkM!Nl#IdqJUJjOThuT2;r4co_0+{bUKBy zp#^^DGft8$q8ViK`7_`hzg(yL=4f?Ad6UnJz5KoIp#n?o%IUM>jEMsjv+8=5yi^nS zS66pt_%Hg;C$_Lbn^>;KB<00O-EZe@9vlTHK+d>g#`x2khl~W zBv!_d5U~wMeX}0I0oS{qAP2^01~58|(y;qmbdXgmULxJG7~FDCiu`jr&sO%?#uYy3 z^i8IcD{lSqn?GJARF0bCw}yo`S)YVMRUqA&>dH z&`@!TK`Nb&rt1Fl`B+zbsYA!i6jL*4Kn!nMMS5}NOtaxWXTG2Pi_q|#ria%K-zO+| zjL_}!^A%TrXIg4HmsR!JXdAmT7wh$5a>!xQaatG zLOxIA99e?aXGtN>SMc_W&c&mTcjxb5?}R8nt_)cFA^3RtyI zuOuCO`A3nBYIQNPTuj;lwDy5*m5B?nOG_YzAi}}vk9%DU;slw$Z19UP6{BDN8~DGS z{x<|K{+}I!@{-uByf>~yQ^4!1qGJeWE z`*Tnz=qY=?xK*aon}=15Em>*&WlF^g!t!ir_9~=>czv~w{`8h3w_HT2=iOGeNV1Y!T7E+#7=l-7B^Bvvm9UJM`ew{HC#<6R)ceSxvyK zY$)DFE6gt~XtMgKfvOl5$*l!Q**jyJ6F2xZ#+_z4rP2189fBv!kXi`@wsCa1%fikK znw>G`N?eBlwEc?D?<~9B|L5&91d+0-RJ1^`Kf5+=r!BWvwNKlA(zjQ zg6LFNg?upOT%HzXFY97Edn(|-fa8;T2`PwC-6QIv19&!%>w&WMgO6>dguMD>Ulqrs z6A?7^f_+EdME$#S^#u1s+6duzy;Nj3fk*#4Q~2zf6shS6canZ`gN96d(~}kT*HCg6_%KdYD_T&Z1x@5bby`< zxP>A&I|#+NciDDg{0z3_Hu;@CFHfFAU%t^)uZDiGnwbU`5cTbl+ltbk_pAbDNmTu4 zYGNqzVVxY}L|#I0?F`667qLG7)QSE90f2QfI;?|?iVulPT0iRYv;+A&H{@2C+bYlt zYepa6hfX8n?SFUH|NIvGN55-nC7~Jt8mMk}pF?=>H1qO2NW1t1kr1Z`E|G7J$A5#H zMd@(mbb3$&$jUjsn*PX!_L(bY`cwlD63?lip=b_mEhq26m`fJ5goJ>mtg`j#pB7EO zI(-P`2U)pvM6ET=xM=Z;ve_Rx+4%=EM5T-{dHi(8>(6q0PPEwtDL0(Y1A817q~fGSrro`I9b@dxZMhghN|(MT z>c1fK=5LsA{-8JJ2l=C-L>WW)kQjM>9^Y5U&!3f^g=1+i4XN#`umSmb&B z@)6Sj3w-&QyZ~8MGU2xsa`}skh(1~SX+aoQL<(0(5yqFqu@&N^+gbmBDOHQ>s{Oj& z-KY=HsTnoH*|UZ|TF?vYFo0rf=;v}_;JnJn8Ojy{P_#b^k?#e{*EU4bhr^^{(^_&11G+EUFZV6P3lo`9A&+6r}&ZD;hP|@Q*R|@q1@wq-_9(S3VL~X zD1pR(v-@^8Aoc?#2KY&-paKK7h^ds@=2b66RR?6%FTF9M5n9VThlb3oJ)Aa#5?rDw zY;LrgglDjjyis-(XLNaJTd4b>4y}YD>n%h+qj*N~Jf7;Zde{%Ixdy4io)=7EXLk2@ z+y<;3-2k@kah!;6MtOWcS>Pyo;%ph?&XL|-9lY!b)bCQ3of zf+U`#VlQNm-Y}0k>gQNiy6$_+_lBlj)mbfLzoeJW{9-&^=xC)0_^(B0zh3=sw!o;g zbYj%L?F5A(fvwqhv?{bib@IZbv=1zHJ=()ktW#hkbCT?eWv7m`rSdYJB-_0l=C%L;;LgnT z06a*%9u||-LItvnis}(;E*=+9`8pvo(g3{zRmsTg(MfK}DSlo+{)PQ?fxMkM(C0Q9E%hlL@Rm6sy(!D=h?Q%r}Y1VJNr(#`NN~}2akb$r$*_g zIlB?cg)$%KJ6yf%UtuB?D3ax)L*})pLR1Ush^YT*b_FmZtASG?MsFw2KzciVMKuD zG8KGFf0$Ht3aOzuGL(Alqmhl zLNi8Ls%Lw$%&b!(N~o36@zAtfdThx&sS4nJ<2Kh9xczXei@t0glZ}+0*Ka9Y&+^Es zvxx0W#yl(l>~CS8Acs@Ar?U74*!TZd0Qg_|&7eh?QfY;a`8i4$XaMj89;=p|+3WRHMy`z(zIsa-O|yx=wxDe1%Xm@ZRDT@#lYBMgTjKb=zTSHxXnW=d0V= zE!sw2!-lUi5JNJ4h7LV*r?*wIV&ad#uj{##cldaN= z`T(a7S2rJ2lnQTu(=WLgdP5!au5HC z#qQvimwficdXoz#j=^=SU#B}}WiEq4znSIT5y35UPnPdxz+ctQ;Th4d-(WKiM9p;N zAjRp72Kq-qDLu&@j=LevoRKbU36s%M5W}(_TbmowF91yU11F}e&t{S)cEe`twsT`= zoKq6H3<-^yk`&i<&5iOS89e8RGVESd(c9Rvi|O4d*a`sf3|&tFP7!s)^t0VHoU@04 zzU_tmqV-p26PM=^qefHWbez8tmHJ)_{c1iV^=rOpp*=r90RN|jerI?6Ff%HT*Y>1G zW9~dWq>OJT@lkzyZB!a81gNK;!d9Hof$EP+gJj%N1MR)Hu3M)BTnr}&j>~Z%-7CEM zI}0F3Qf-+fu8n4?ndZr?=(6~cX-q-sO*_^rhinfF%;i3=#wglYMsM3)Qp4`jvkoH{#}0@v z0aDMo4#L*&wevsHh9Q@*DY5g5&&9+W$kn>zG^)O767bCQ7?B$-MRD*Qw=M~L*o&J% zaem{;%x#`0Fu}#%K{y^C22T$Ce z+#hOkuySFLhsS7QQ!_3m#h@XmslsMFxEpJt9t2Pa${Y7qlE@`Lr&&MT6GFenJfycX zW)${>5C}K?s$xzo0I0|WVkGZJ$L876ui*sP;B4LUo0AzIS-Q& z^ZjGVVvqcGt#ym|Nzbv5TOL&@BD=A)mRX`#7IV%^G9+Fs4C=Os<2MQwy2{k7AB#8|DVM~Oo)+DrAlxJ{O%As?(p(=8q zNyL>h;B5PhL8uHv61B=mbe<}_d)uP%PPx1NU~&@aOB+@Ctc&p61MT`QTXZqt6fEgd zCtcGKSvdy?;I79Y9^$!Ns70BzB+}_CloyLKSp2oy0&nc9Y&CphY*h02T!0@@of~Ahv`V4kQ4ayhcpOR|)c-~I86V7{ z7;ZphW%4P1=3-!;(Q$iqVi|9Z1CFmbr&V5Va$lfeE`acM-5V9jyo-&LN0GB+sK>o? zoHSSykR>P6qWmE{rao8Esyo|rCP2nM22x}1VP``VJSgI;PUfg%H2BPgKp$1p{-JSB zqvWB4!=>(>rZ|X)TfcnAg;`{LOF#X$V&5b zhUu+sGOu@C7mJ^URQ6(%j#KiUnG5h<&bG{Fzjj`msqy4mJwm?F$5`)yC-;!wIxKHl zuc?|*-Fr2OpsdPwM&SAFq%{~@(SB1)*(PKh@x&G8*D>f#8TfVkd3Pm!N|X9fYwP0#q|mANk|Mhc6WyOXcz@pRt(o+NvFj4FFd2#X`!~p0Zawv+&9YKyw_SgcOV1kW zcl~zhk@&1{!r zVrgY?!+cX~)|ykkyYI@jM$>S`4#?v_D^B)pI_IeNtCUxZsD#@|YVKs-Z|Dq6bBsy5 zUs^w$-voU})>udd?bf`e8%J@eBS`BqTd1G?$DVZoW~(wCWy5>3oM8dOd8LGRuOFni z?d5sSy#JWw{h;znFwT^!gL7x(pRf_~y00k)rXk^82X zWzNc?`ZBhkZFUeP=l=#k`};^MpoaS#X_fjtcR@v|=_ca8`O7#)}v4JnqA@E^&*_9@{@BmwvZf&&SXUK5#xbu9vRTGBGyssZV9N z^1TZs=)*4pdR|mIdEeXNh$qPu$pIK>*(3~FCAX3IEJ%jZRS?p?0vOGAeU+Dp18!{K zg~ROTRks4$Fzz?iGIuJ|(-T#WV!ZYX@@aqvji0a&LZumYM3O#zC;L^IrxOxzV8CD< zo5w;s_MFfixkU&8HLgsQX@=C+{{+dqRPOCuRnH1YdR%PEVnv(Ril}TN@zhN zz3fJ630N^1<1nNjQr-Mpgh=h_R(FV4b^Y^>KL>u7vp?6O7aVyBblG-Io!sgFE zJa0M))LDO%!DjAYr)G``y)`CGDYIU=`U_49cP4}`q`vo5NOO?%^PH>RgRov*Fwf(R z)>=>;@A7foXSrG*57r%(7{6m+lfQGb{u4wW=!+zbl%H9HodEWCmNTLg0my`%5iMG3 zfc$P_>pm_W3qL9Ic-9yfu4M=?#55cH8KxkJr?STb5)Lu_=+RG+ahuf1`Q?zVzGXSW zvXmfs$9~=;dtfeTfqwkj#-JDAr?RPp9uS4(nM%^ZO0Fet5L>^VmU2F8 zeO(km3;_=P{@0g+lV%C62ImTGc86>d*9-Qk7ZoS0&n-l<9*kRQP`)ghiJx{K<}We7 z&!H351a7o;v@vlM})wbpIvuvJ??$I{pxf*Q;?^^AaWx;x$Qb`j`g$DFW{(tO! zXIK>5wrxQZ1PLNJgCL;+l_;48B`C^?CMN)pLYl1L5$ zk|p{I&)yzSy6?RE-nnPr_uU_ST~%F8RjpcUuDRwMV@&W+B#2CN-OfeIEIvOqh|ai1 z3T1j_I3Fnzji@&^@_t4|&9UBlKQB$Jt&}MaYoO0qrR0XbOL+Mw=W~TVrXei{RD1a_ z&U8JH>oeGAfMJa}Mt3TOzS}RgW=H)afw;p~#`ZZ%o8_}b9{OB7fcH=A?VT~9#hCemG36Cvo zj#H3Qa&ZT_`Sv%*&j(zdmNTPS-KCf$2&%MYl&2fn3Raj`_QAg9F;yE-EiUy|AiS?S z@Et-P6e_h{CCX}UURRV64?c1y@}NIWJoS3+%h#PrR>M<8a~OcY0FF!PoAciRY~kc$ zMBC0SFcqVA159sR{YG{g|Km_#6G9wVfrsXST~Xi!3c_r_g?8&LG@#pym>v2Ke3EH5 zoVQw%G;KUuJRqFNATb8TuRIYACs3CQkjG{(Cr_Z{;Mi+IjGe|e`ogPSX_A8nndM>aw*f!WuLX@iTcVI^#D~u0Nb1dfPzg>{IH0l}CSiU5Xd0wr;_p$=_9T7+ z7n+29^%hmSTwfB49kr@&Sp=XHTZ0F3rSM-2SG_>Y_TSpq0Br4V+{McHGk?D)HfWZ#j)_efvV7b4x=Le!tYO*R$>v8v1E zu$cL!)BCe}6**&1KCQy&>Uxgv$$*H=Nclg#j3U1pl#9G;G=TX`;M!TKq(Yb|{=~$-amoP}!LAz_x zC~!P3z!kn@mX#QGuFWq%cJ4jN)%Y`i4*5&yAG^3T_2bQ5EF3QkUFhAr3-#PaehS(4 zJ(;?oK*xY~4x3PgE__funM~T-i|d{^+o>!;i^Sf9CDkOT2OhMo>X}fGrMP}9itiE+mtb?G>>s9t zuyIc1X;WA!;)-J17c(SJ+}2T3CUtcH&Wg=kFjr)My!n2MhpB2VK}eS-xJ~t4Nz(O~ z&)Ho&u1@H}cT*dsZGxj*x$j!EZ|xmtH`oLCs-tp!XA75>U*6D$PWuDd;NhICpxl8v z!E;boe?7`-yV04FiTv{meqO~(kDCvNHAN({jgsCC>ZsQA2;;4dI?8WhQ^BVNSbFi`{r(L1hoK8doy+!PI zM4EH;TM69iDKF4sV)@_zxfXTy!^ zrR-e?7be-D>;ETy=6@pZ|I;%Du~94r_$**aQ?Nn6zmafui!v@tf4UIfHEXAii@__n zpoGtb!n6!Jb8m0~1S=E+eiQ_PlIkRB1Jh?1GLtWBHcmYkV^S9Axv-5 zLGh~(_kvGsZpm8^UU~&B!J=2Xh1MAqn!VrN{ALZuU2tR*!)}pJ-HQdga5=TusrHg~ z0IWy|Os5FXs!-oW(J-}w#Bo_h%qzY+t|6^_N`3<=MF;x~01!R9eM5f<6MBz#IcanH zeJu>~ci|NNpn%ekhtRXQC|lWN8Q?IejcZ$0AzynNrc`ONe71n&K8Ir&1`-=7HNu~Z zUCRx8P+V9T#s&C;s#)hWv83Egybp7z^=KHqBgX58F*X*<7bf^)*h(XB$-}4v-`b^4 z>Dn#v2%6MA;X8f*(AoLz%fe3|lMmubgE)C{XT+|*m-$6}~ zZ!@>Ph}Y`?&iIr#*w;Yp^94YhNViV`;27$56+CNtu-WM?a zfYp?NyazVOISP_siVE=4KuepLk3t(;peVdpfYe(1GP?LKljxB0`q@h&EZIva6Is4w zD4k<~;=+2u;8xN;$hAW;T__Hg49~#Mh#4L;oMeZ$4WiOe036iiV#9(h;c|hm{|D8R zRU4G>1WzPvv?}kuAJ@}gj6553U`Q^iV;1a z0$%`PPq@hb83D<7!o-2(?(7!j9S}BD{ItPvs3epy=gP>bEsXKFNs-&%2kF7UElWA^+i zxC&#Wq@);7a-mJo3!!{0EHR$j5@$}bnEsU+11hQ-Nu3n<-W_dt8n(-T*ljeO47ES2 zQ}aFvh(V$K11Mv#k4u-b8*I_*MbbBELZ>DWx{CIY%&eEQLmxdD`+eQ9qSHb#%OjWo zdI>IzJO*vNaLQ2kFIyohI z(egx)*0zYW&X=sc`+yyz1sOwL(NVB9gOf_lj+G&)B|#0)p{#OYp8e8u2J+55x8Nm* zIpx>mt(g;>+$V!Fi0gP#oYb@{U3T(Ar4vtlDeGOt&EJ(P_2py3p{esbhRWx++Oi^Y z%VLi7YYx_CumaNuqha*r7fbo^aUdq*!tX}N+6VY=pMwa~t%S*&O>rj7fwud;%xU4R z{WFnT&{DWX7EG~!kbEOottoz|{#=fUwFz}L(a2>dpw_Vlb`Z(4i<8vfyYBOD_=B&e zQg1Kbu3lr!5nquiX;*ImeBML6BHYV|G0q}VM(-3Gd1>H9Np8LJTr?^zJMwEuNv(af z_3*2T=_|l-h^I#n<10lI5+H*hzvLd=XkX*>2{oj4ad%qVOJdxxWzO6$Lgrmy1y*mL zj=Yc$*^}F1hQ&u~8hkW2o;LJ2zd;Jym)xIR#k&8AbAG4IrAG#BRyx?f{Hg%2LxQAj z8Aj>lU$C|iZsnf&Nc3=cg&~8pzqY~WRGRe=VY*DdezjK{l3#2VZgzL(y#BUXCb-@n ze*9jW7D?n6)T@mHMmYWg{PlAZ2nS_b6O!hjf=oy{V8QA4OlGa}r_MN)u<3#=-jD;A#xV$dMZrjnY?mBNm7H^*YwBVhgepjcw7q3c{PsnBsOX6|@zcS6-j{8( z+#~%3{Jd&RJ9~%d3?AB`jx`w z2`Jt}HB@f;ngd@np;BD~95E=vAJuTUk(tjmWm`0_rTq$DOI14fr7tXJ$ zXhojfK5+)PBTceAED3O**aE5L$$YpcoJFhLE*wpR+az5ncrqi!Bi1aUbX8C&zxY+UX*>s4j@M3?d@f1s zl5vuDu>GpOg9`gxzhQKExHUa4r~vo!tdUF7<-oO~@1J68WBnTn6<0#e+m^TWCtdPq z)S!$#DNNt)YUEeiD@62&9JlC1V-#$@1Ms}#ZI=%^zkLa#8#L7V?F8nr0#I;dd>bt1 z_?u3c(r;ZZ`~HnBexFQxR=)!)Szs`u zU&*nEX0W)4$ZZ?3C-j=-J77S4)LI2bk#y5#VKkp?hGiCCf?>DDBf8$#&)14UgwA6? zhP>rwp=lVOR7&&2}_=#CVdbe}ZP zT@$r*bvrbyAgBFuBd>_HxXayZYxtw4NrdtqhQ~gP>O8sEIRvOS0g(@4e~xyG368{g z#dp~=``vm|X9~<$cBoCDU%M6xeoN|mq&qPEObuYAd$_4r`#s4aHT{oEh;~W2CT3?(37J3;?L9Hx7t{YUVtT8@5h_azW zXCU%pK?Z=)uGsK-rH*E+pxr`HF#d_m6gZix>4m8k7TaYz;nn5$S|IZsc1xX+OrHs=(=P4YMFoX<^bF-{rFQYzY#r{ud7W=RM%Xq%4+^ za$MMJ?sZ|e-yB@IJxyK{ba?7DWW-b0vDf*o@=*!{gGUD#yaI-Yxvi*K=q7t+T_r3#@6hP;k2_L)RPH`6y z>pa`nh<3C6#4bSBKs>bMjiwI4?8kd0#Q+{YcCB*q7x8)mQ6qM{ zj}Z!9o>=m_Rc22O+YjPw11nQMgVACzT=|Me^YpV5=6Odm+xc}z!M9*nMT1>c8y~dj z+W5jbh&C_+rFYz=bWPm3@G=AYBhDqi(@KtL5D-0ZqO~oYS#0LJhaTBbYCrVJASTE) zwsJ_$3LBBizA-|5@s?UgH~!#mVKw!_%o`qC88K0pjOByflBDhl({XmTd|Da3VdS;8 z<44J>C%B%vi4UC?t?!8Ss2hpP%t?N_wFC|D9hIkx2&v7$N8B})f1Dww@7W3*bH}VC zRBOK?=hk=|w%xOXk)XfFT78QPQiE+aoSc7>>SVE~?J8}4xYq*MEfKZSUf97Q`!-}Y zAJd~teYZX;O6gkwyzSBHT%*p3qyEI07Iy`^*g6QNXM3t&_ALBJE8ER5hU+?Qw!y@t zu}`7d=F|DN9 z0PLOw(!mEKm-X2Eq~uUs`Qq6B!AyaR6Wn5g&n9e`6aLcGPrt%_#=+x?ZLJiAj<#>+ zF9Zn)yQsWAd~ogI;gIBrH!Ye`V;O_5qrfU!lDk%rQSk`P@mX2ZQEbrgJOL4S5%fm=Q?(0`8X_+d33SDQN z30)U0or7LTe(uH@5iorD~?SNiQ(OWS%4DSr)2kdM}$B)goB zEGq3eH6Gncwb#+N0f`ECK)!3`LEoRgGNsVQL;UG$=8sk1fwe%dOi&)~Z_mOt=KUO0 zo$Ik-nE#iUy1$5gas6?>lleo^8GoPBpB0*@`8_VZAQGdgDU*qziOYdw28r~RevDuu zH4J-2qkXwk`Jo#qUeg??uHr3c~+gl z2cX0^p5k*W;b;$Ty?RCxL&iWwca1#pj3`UmFsCVn6{wI|nG?;fx~`_nQc~6Aqd4 z+&;X(10jt*eQN&@G6IUr74!IYkD8u=03-6IkX+`=+Zut@%0~8Wx*lzx&CY@@?@Q1O zHcaP)dpgkOX6?2wV`;x=l7fiRQui9wuD8?b$9%&u3M!8*5%UHj9W}6kNPW? zGVoOBI_VW>p>OeO`|?((bg9am-Up_f$j;Q6azP+pOv9J4clVSg6=kAk&}9QQm9HF5 zEgoI~*wYY%V_8T6FEv`18@DfIJA1@N)a(go6AHOzT{i$2^eeP+^fF_p6jl`R;1SR5 z_~JBUqSQq0iLLEW<|zQ$uU|&&Dh3)c(v|fy-Wl0UqH6o5Y13dowQQaP~OTmu!- z@uk8&uRa4!^8Loy6KG39;%3)gQSK+s#h&3RP%1t^O)F{Z#ywPca4p**-H1*Iut!k= zcDd}_G~mfB~P$uWz5g13Gk>w zZ0<6T92#kucm14)eznZMFbe+p==?7>imy*&Dt(GMm40B*&|6ZCe-NA$a4A5Q?OI`j z5{8pL6q>3tjQ&cW$!lvWqBhZ@TMm)E1FrVY$?{jUWDVddHrw=UXeHAY6Z&QoV{dR0 zap1zBbtEI4eV-fWo{?k*@9yXM8p(V*f!oUr@oDJL`KU}(izK$cg#wEl>FUuaR;#gO zqR<-?0ggBN@8_5_sMu!}HS?&YHn}a!$7rx#Mt4#e_}W3oFW3xRpCnNotV8W_%O))4 z-%eDOI+tbgP<}9N$b4nu&Ad;kFr{9=fIB|LT#ZDPsz_42ffq9yFDTcV`D+LGBW~mW z0f?*rC=UC>W%$*7_;p+Vs*4Nj#AXq*E>=N)23{eQI3G;aqQGjT|MF)3>4fIwgFqW_ z6|tT^4`W#Seb5^Tf*uL8&ItnTIr!@%V5R<~jK=&Qdym+`{V2y2?o5LEOAQn{rYRg5 zVnD=BrJUGDya^lq(8UObPoEe323%A#51|02+c<5V9&eX+i|E?MPN9OIH zI8N9ASAFbN05>TX0bQ!-zp);_3{{U`#;~5E$LIvemkxV#<|t=?*>!!m--E>yo&18Y zWtnv>5!g#KB7#~+f@hVW(_7KOC%S_}WN4~{koP$y?Y1-zBjHXj4qvpQCR=9F?`(N1k~ZOaf_Up_UX`|DNUi z10BQv1F#zZVZ`XyG4pTR*7raZAi#2J1yHfZ!A7v`7YI50Ijo01d;~q(LBy?P3*oD{Piy%y#zHy-F%SZBM6QKC|6d6*nhN| z{J;GU00%ELAyxnf+|O|0DnXA=!KS zYC>A6V67P$x`cwn{_9xxCOlx|Qt!yBMqG)7Cd~?(-Sl5LjkdEhN(`B>5{-kIHJ2(g zM31Z%^);plVvh~7wO)fIzKO3BZb>lv$`9xFQwZ(GF@ll&2=VUM`mo1hQky1Ol$;xG z3hz7-PL-TYmR+Kg<1dV`Yjb$W8B(X>e7{V1W=(_NRkYr3IbII@2v@w{Y(7JB-Ns{8 zodv9(=gw8b$T^N7#a1|8(W(TI*Y}O2F%0i4t%r*#+gqq1c5BxV_jLnuNjFe!1i>kN zl3^bQTp(KAY%Qm=kMBU)>;-{MPu2=PHH* z+!gV$zHh6n;;l#Usd~SnX3v-ACC+g z_gk*7B{W^Mp<#4nk*jIB8dC$_lJQN`xNXVi>fz@nnYII6x4O%3aN+T{aH06eo`&_E z_Lg@`4lTqU$#sfu8*LYV2lzs8ss51y?hOx&MMi+@(?k>owJlQj(w z$i*g@5}k>xXm0BkD}F^prie=o(Kc_oN5XyMYg!ms>s~Kw$zEOYMJn@t2xmH#TaBSZ zeg`mCvkVYYvC^rlRp!q)zL(s$r&CrFYro3MSN zV(FvW1qp3x;p3%4XUg+aW3A&i74Z9uP#q2D7Z#A$`?rR+%V2K9Nmsx0s2FXR1dh|h zGEWwMgRH0Y;#(}Xgss!$P+y3SdQ)a}V)R*s`LGmbK5}$J>N=rTrEH1|ZSfrz5w7T?BFmhz^QbH|k2b7>;IG&wfHMY63cG4!)uYyB5vtkc% zaKSKqX4i##CkTH|BpMOP;;Qp1FrA;QCJRHT8bRDjFuk@DByQ@|!(pyv++a=GMfRxo zHO^vP3Uu+&0h5TttXT=rdM5t#c>W6)YjSd7Nsgk7V!~MLDM@H@?jf}sQYqDp1v3gQ z@(KoFl!~bx$1VAj)p}f+Eb)pvpx30w@~<>z+$;BTwMn=Ivc7cK=dM%KQiMB)WW`=e zuIaN9W0yw3WYHNupec`fbLNfJ1)M*Xn)@LQvcQwq+jtgO{|;p2hAo!Xf;D1jL;eZ3 z@Moj{LC{t%Tx+1 zCDw?vG@)b4qeRLqd&263enreRxY)csQnfVPJ*6`Rv={8`^HWK_FzDvC$Or?1jg-)A zcZq@HAx7#X!_@MDbW^~&7iMrYNNFQ=Z$s0hnPKpa{iqXO6j^a}a2wOErCJ+BUpXVL z634l*Jeo`_nIs3vfi?X;b7cW{L7k4b)x(8Nw*-=61k94&cRMN$o^EVr5F{zchYCG~ z@{bWKi90h2>d1wdU*uOnHt0OZhkKAkz=`aVV4C4+_IJj(i!=x&)*cQ~SMK@j;?sS_)$v+#Y$g zqaM=>R<#A!OT@C8-7lK^)}J#cLNH1i;nxlp${W=#Ez3 zg(y7`*&g69iWi(zl|q;=D(obNc7nMT#$Gd=2!MX1)b-@ON1Ji(Vc{fJp7UOA8$Gr= z)X=cG32jNHsmYJ>mKeCLg!61e&l<&x%Ur3HVmbWKoE-JxNN`bf!_+OpA?SAbx+yAfEb;5O9r4<5r7p z%5R+)YhmS`gqmW}(_ZcoCKu7fC3>|j`zHMv2H>Fann6ENt3&!-6FY`qIA7hR3;o<1 zHtP>}3<*otUwOX&75iu+h07GU9awk? zS!f)Tp^)CXih|HJp%sNZ*T29X{KD(`&DRzD;p_eqhWqch|8J=`@xMdc8OgZutZhxq zg-VXdnh*8ScS|`ypn`ZTo-H!=Upy4~wMbygNpf`DFQNy8HY6iN2Ja|!!&(3%iE<;% z+J5jWkFhMQ(pO<5Ljq5L_WPvpD>xc-*>|2JmCWg?q95V&#YoTZ>G>s=-KPB_Nxd4{ zH@tpB?RY~iD$%Pjwn}QOIY<`=kf*0ICSZ?!$#=E?0oJOr=H6Nh4_HqpszWIwkAbO@ zrbR-i=^8gZT7qSOpiSspq$3lM*NQe1%}`CNcn$A!<(PWs4t?FalXp<0lvJ^fJsqK( zzsH2sJ08l}HjIDshKsEdX2~7{TPXG|QduZiBBsWc;jY4MQ#CMRAk*45>g`*IlvijH zVOF)s19ZS`r6eh038d4d+^|h;-`B0^$0;5PO;Lk(7|B7R(Yv=3Nwcy)t$`XXaF~jU z`lmIO#<4`>n3M)MgyrTd?Q#xZNp0L-N0Ft|JxuZDMG7XZN`qP@2Z4+f8Mu~vUd-W){>7{XDigq}2mv}PGUl=Ve=)O9)_z)9Jh%A%i>uScL$S& z-^u6WWz2#L@D=`MW2$DC0=bm`ud-P#$!mJ-PO2(5=PLAXx>Pc`RBG`E@0G5rDabFR zy@_fcJTOi*j_ot(EZxxP5OR-Mz4Ev+N}|sI?s+%Nhe9--@R>I=h%!qGk!M|`(baSgkAXoy7TY&tyo{~b-)U*EIL9{@=Fw=SKLpx4{$!*%cl=hxc-_T;SIgjmiyz3qNvsnZ`Ni2|svJeu248YPt?3Mx6K<}_W2eVV z+t9`3PG{^DJeDGcM8uWgT3_g)Bx9Jx%Zt0(1_7b4;#jaCp(iHE_*jd&>#u+ z$-9^@KiWs}t#l}osf`$Ab(;jrwvZjy#xk%!cl;I)N$plJJx6ZvL@Gpsz;+r%b5Z?6 z24y~#?mLJfyY?)8UZor>aHI`+5Hyu27#k7Kb0L_s@`gEoTP1{3S9uY2H?s7+pWY)? zTKNLK-hRmSwTlVU(}uYMxcWy>k_8O^*sB5Tg7(|EqSNGGp!^*jR>P^hC<3MoJu+yS z9nG4$rAfQygG?0zP2+*%T|-`poeTm(fv;1$`t5HU)URMMi*>=?`ltdm*-&D^E20z$ z^WE~E1*!|kOWsi|mj3;U>s_J3>`xP$_VeSF#VD}s9CmXo<#uxuETxuxE+UgGg?*@K zg}uZL6c!hX9NKfRlk!|!G?ma6!lw4e-GV4feG}U3JL^zZiGAU_W+8I5tLjw2M}xr2 za%naFIX9g`Uba|ebBJzJKI2<3BpqT#bgt`qrwYmJa27iW)@%cRg41K4J3Grd6ZHhH z1eEY`yJvAUJsABucC`@(y$kPrSj)6jpZfbH8BBc~n-m|Ae0#D245%M4hlmAEAL@}` zf~2cPOHUtaVUX`tEpGgJEa`}lW!{&TXJ z|H#1pH#iRY1E50xCbMTG=#ko$u4f_aab2=JKm*$vz@?z`=j`fYVb#{9Rr?Az1KvQI z7M=wTgw15dx^G8F1;9n7A9w^S6C6+MFHSuR1inCP$D1Wl79a6l;J{1c{5DhfD8Jpp z4{sri4)TmAVjD2>C{oj4LQ)2U5YaiqFfye}2`qs^SP^7eoRn@xka^Lpc`C`E2x0?j zUK0MPRAeo$NvzmT{uoOe`@o%A)=+%-qcyE1?jY+9NGAGxb=X4TNkId*bZ=WF%T)&b zyG=eVhDY*D{OVU%VIA$WZ>LbhAnMomYl;Vs}#+Y)*_6L#QrD@@4*1>aLq{#Nc zTw@>OCgh$;4qc5^zue{rQzcVUO%iM|pntroX8+;+c!X~*ZW&x~I1NtC>dTQxl9jnl zJsim|sxeoj^ zhg`^)k0^$n!u?14jUg7u+bqzE^(=LP(8B@j^N4!Rqe^3g!FbnEm}vg-&>CGY?&fLE z33ul{et$Opwb(T+ts3b#U|_%CZGQfot)^MSCGpaksEaxpGg$%|V8k_UC$}|F)h@B* z9A08sVHlkc_F2RL*`)8*9^Rjspm33BC*wv$bIwd!x^G}sLA}UfgqSi{ZMXTEeGC78 zY~No0v~QRG(!TxCR)i}%q~G=sXA!#+-9OtlE{ONP!?^wnu1WfbM8p}|^o`GEbVv@T zy2Z9#O_}rIKnkOk_nkE337%-t6vuGfGPT%#;i0TJ_I7JJg^xAJ*l$?0E(JlUT)jy0vC)4m#J*0>F z`B-_j8mM3^RHm$J2`V}s9m+C<+$Pzfo>L(r%Dj@{Pow_Ft2>$uvz>|IokxY_!LMTWL8p7 zTj$|GaKuA04HhP@mqdb;t5dN)&%1bH9$pX}ixUfcrH%7Bv4jB4w98;Khl;byg)4Hh zOnazo>oo(tTEcQP7EbaWTg<||W+OjiQQIRDM)+|IMgV)$E zA1*w&HQ4X5s>E0TIJ>oKT<7&m@)GKW(B|L(@RsyR7mS{*7(T|Vl9jUBtxE{^{R68l zRBV^3^=+LHDNq4{t3_ij9WEKW3L^^lNWxJkq3I7beu6W1NGR`K+s*Zo%#()Fe&-h2 z@H*MqA>#;fe{I}$#1IPY7R<^iw6rdgJ3($R=fYoYiL%6s_h?>3xpXc$)GV?OJch)= zsI6~Xv4mObFN5~3{MFXemn8hKS6(!L8;r^sOSDCaMwAV$=t5r?Y?xqg3o{B`=f+al zC)uUi?$Wp_Ib^*CBbN)juQy6W-k%-(oL{%5iDdwrmF)!t%~oq2t&;{f{_HCzNva1U zBJ%HVOBX&9yEPo26q zAU7-f^gw3T;xbrC^E+V4e6S+35qm0x_CY!B{dx0$e)B)=_CI~+|D*9^ zT1BP!UL>q)tt$;r!r<^Pu`K@$R!6@sXn);?{IT$m0xB0-h#mo(Fw(533vZ$B-1b0; z^R3sP^eirW2-X#c5iAN3JW0~5q)vIgv%${|%+g((BP{9VTCCK(Y^rF>c3xe?b z)Yb9^kHl+7)hie1EcI*V7LfJNgj9XIE{HbEiR)U*olX|4_yi@zD45T=Y4hL)H00AH zKfLR|ki|>TrNwR|f-me&gUQi)9%#D?qwOmT#Yu~Iu+Z{*&Q|7u+Q4Aw&d*oe$U-=pCc);m3TFc{j{$MT>OaU#FvgY@+RfTF|YCkNKLO0n5Tw*j-JYD zZEao@W@)GC!OI*E?bf0|y1M3oSZuwklAD_Mo@VeDSg&eQ2}Y;fMzWDacn9m+Vkh7v z8I;!;;wp%aOq0v9vc^I*KDD#d!9mT`s6;kvGjs!Ck{k{Uzb+y-s? z44NeWNe;qfp+hgnTZ98Eb@hjKv724_(J>U*1n9!hBuZOiL+8D0*4LLRK=2pKv z;SPCZ0|^_@s=Ri>JxlS(%80MUL=HR5nsGUrV-5Z`-A6AFZH0)}GSL~tJUqc(cgtS_ zRri?MDB;v-h zx>^8x9R=Yz<+xW5p%Dv6Muj^frML6wpX@_WmALbjJr`;?2O0pAT;J&R@fy1U$w3eE zW|v5@H;cKB@g|??8q;fh{hv&J)5f<}FhK3~+mW@f+>ru`*hAy-VTOQae<`5gyF{36v*;aQCpF5yIZ5n z(OU{Ev;y!zKnxGW$Q2604P6;arlUta)FTZ`qt-rEd& zYiuKSNi4Ue@T00Lv%`155e4*RMZ5x2w1wBJH%+)ExA~W{UKm|=r*g9EOqut=>dEE8 zc3IO^}gxr=m(WDblqz0=j2??9QLZgc*}i3_DQ9ZRHm$SZvy zpioDjWAMwggvnL_b&|0ckJumIt#EZ=gA6T*Qcd#-K!nAwyvK2-mKR8h+C zRld9`{gAg8rl$jlwQ_Hcy|sz%WHF>|m(c@@uj=Xa>t9rK&&$l`d{qU4kj-vu+~Y2ap_z}c{X~w(l!atg-)5VvcJ0aa&DC%tt|1o0#m{w&jk%{;!bv3A zu;Q_U4OUifAG$hUJ{pyOdAnn;#XRD$--xX3tiPe_{1s&P8tPSMUE8~WKPhl@*Q>N2 z7tSl3m)qnHrkAirD=X{NMgY4RxLM^kL&erk_4|mrvv*`_exbBoz@Ld2u*gEa5FTo% z#kM2VQt!n@K`ZtXhF9o}Ad*2-g}EY#vutY{NsJiRD?l<6NDyZL_t#rk&u zsQ)bYW=w`JQDrA_ukB_QQap(Og7J{n7)PAn-p&5mHESOZO%*@ZhxK+*4OH5itk1o4S0t=46ZDswOs) z(ouzk`fKqz(2>XcH02JG`6`rOn=Rhy4Tkzn!f|1S*j#U`vc*&9+$x$mohSte1kJ)^ zfm%D?^TO|mji+@)j^$wTV@lFzu_?Wf3lgt~=7tX!?eQ@-?{46;r3}X@xvzI*;HzDY zt6{!3#9Lm_jMboFWsxqw!|&$p3MX2(EoZ(3xQ!+M-x(`6&Bg}VSaj->tioy zTwlx=@FUkC`j~-J6ULw{zF+vcYR*-5O-+IdVbx-o;ZRw6_XGa`O+llluXR!uSj|q= z6^|f?IBCqYQR0R;lfqqK_rthqZ#%nYO;PLVbQlE1i`UKEa77}i>BxNcTe!?x>>iw# zTBM!Dt3{qY>vAV~B-*C(!`e`|(qiZ26Q($0Vo^4Sln=8Jg9ow9qAQh&S2-Y9FQMF= zzS2kJ3dphWcZG}^$~wqmn#FssKett?>~-{P(t-bRFXc`gE1Pi8VpmG2&~AOI=!E@C z^NFn<49%rdjh#8mO2IV{H4{&{Xy7J{D%rK(B74w+1Qww?zW`XA2{{ z3n$-SeoBPT6fb`%JZ$mUqG~@?^_aiFi>7OeVK(>XNR$hN{ z1l4zY`%W^?KKt{(1GX~9JIwLSAGN@$z4-I6&QD`hjh{}4nZb{#6KLcf*hD?D2RbKl z-+}GkKz%I#Be@ZfSB5@2;nRO`LT3K_>ni(?^6s(_RMGFh9v)!Sui9+tQ3ZwpmvmsLhm^Ldo{dTH|Y>`Hu;vRQdu7oWYP(^MKSu<5O zWS{9w%6y~V77q6Xbre7kH$WyagIL1p5-w~+%tta!Q|55jOt=4Y1YR0Z!A<3E1y2Kz zi*zbE{~ho~LhT1$4JQVwGS(J1+*XR&3XQMMIg z;I6sJTTcp&*y>yoo_I6kh@w4RlCF1Ni+LDtv~8!lJHKRUfSl9@>GP#RqYX6jeyc_R6Qv{RMDweIAIG z(^ecUo1}GPe`1oxMCXFJx2iH`L|Z}?jedlePv$NUQOTJXFh5iMqV{pcDNLfwSQ%!TR`cSso5mQ#G4*~ z>%Cyu9CLT|Pre5pM~K8L7l=DKP5>!fou1ewyXjPvc|R z!!ArLe3_zlD@x{mhP&=`N%in#?avVt%znT?d0(dTTXWmQy>8&RZm^n8s^aneD4lwb z=mWYJds5b?{*gyupV<@&hy;1iX|tz?GRGQ#Qud^3S!Gz;`l7Sn198;SYHC$~$xOZ3 zb4E=Pl@87WBLx_)^@_MzhEsdO7Cq~->8aOP8vsE>{~mfd0?T>b13v8ia^j*_kuJdY1Dc%x@dRxkQNZ3`^wl_LC;ErI0(TOAp5zsh>l{0;8VvbjF%N|#L z)PYN1{o9LpQYG*ma8N8~D67#+x3=KGfFtqhH$$P((+QeWFx$;)CSHAfukkaC#?L8v zD)JNmh1PqJTH&fgh1lF7-Mm5a6Sx%lsie|TYvmFoBy3Va@T1(PfT|1Pu5L-oR{eZz zurlnm2uwS-b!z~`a*2Oi$PX5*26UR#$G6wNP?&`&PrPkni!x702 zd?^|XISeUb9w)ef$H;r$(V6}61-022 zEo~?2$=lHs)@FS{|t2$e1 zw)D)8rDW)8Po4^FJ;Rr(k(o82R`Fn5xjf)(RgEtO%3O?0jJ@^Dyn}=EXl`+LH@i{y z;v-DrdeBIk+f=>k9B474z$XYmfWx@mvs0Pf#2Vjyr-LJZ71V=a`wsXY(oGdJB2J}d ze{2H{XpsK(5TW|6F3m}E`%6>`i|F2LfpB>)XtJfGLrZsve7F48Kov&}RkR%)a=Lbx z9qd;v2Cw>NxNIulzgz?z{!|wou!l*le#o7&PSAX~+hQ|gW8{zd!v{|YjXZW?7HksI|s}jSgOiI1Zyy=<8pPDtGdwIIPd%tWZ>vjr7;XR*kidH54=;@d9DYlL(@&);+h^bt=|(TB zXMu4HRW7WipusyyS{XT6NVZtsYtYN>tA_jyK2T$o+hJTK16Qj-?@qQpTdb~S?kKtRJJL#s&j!EU9 z=ah3eqW1SSA*@zEZLXwmM9w+7XQYkM=Madm19R?5G9iH8pN(B>J!onA5wFuf80ir4 zH`Ab4Lm2UIHkt-~ak5D?7q@N37n4!~*Zj_fXQg#h8rO&FY@&1Wt)pzs9)K>yPv0#$ z0O4!rU60)IxAbB*9j&f+4b>a%#Z}(r;U;&DBDHQV=DWEv z^(DvqJ0JlWB)vJ5KXy5m0r_^6d9%pyFaTXTdGY>KUM%e5m|~xq68)|LFd``NRKvPE zj)}R)5VqunAI23 zZ$s_Hd{*IEH$J!04}#&h+u#1=wdpwi=C#$)`>Xj|Ov$ZHhHYtSVc7Q!tFJKGOeg_! zk#qLgL%)z2qL57mOZPHJTx7okCNiH-91go;6R4vW**2`lGcn2j68^gC=PRmjs+`^_ z?#1IvD|NYqTO(~2@j5g&-KhMDK={ISOYUt4L%ph*WR=qm<~-F1DY_PVH{DNqr!*Po zJd2)=$MmZsywXHBzycOhce=)@T~Fp$BNxry7iK`iuzCqifp_J@EYa8#sS)^`rA6fM zVI5Z6fW?=lo?`}aUyO76{o+MW#P!{lhPlR335V%lGtO0X7LDgQT!y?~WC(l!mXz!|OlB{(B27OL9&(W#i(aERydl#Q565K6m z*wvpN;)}<6KR@RJSKK&i>YY8bgg$p-oV*~Z%t`q zoOVe(7DmA*{M6SPd-}jxd@=J-{~29FjyX`%YTkx`rn~=< z9i=m2fjm&*7vzpuiC zEl^&kAyHz{7dZ^T6#Qw}8QDV;5CshTtR zD0Bol`3`IV&u|n*UP{GJ!c?N4@Tx>TktjN%bl9;E(l8SEK?+v%(>3N*U09QrgC9Q%1c^cU7YDs zf}g53fB=-fU+9xOv<%iZlod zBsfSx4QL;GGU!ML!z#(OASwETBrw!P%yAlIb>9nhK@4d|8MPAFi6TbYt5_hS4T)SdZ_^p_)&;-KL(&E*3I_P~N*pJ)K#Bn&3Rfum(b7 zPe4^1M^O(r>D%Oq!(Cbl_=CH=GaCB*-cD%j18&b?9}0%u3BUq(Ry*F0V0D=2XowAl zBa-@+qxQfB%^pU9B^jHc4|mKAb5Ojt?2YYzz*P241|)<44DsL+N}BBV~Y*Y7`kwZ8pnb^j9hLcuW zU?SatK{ci2!FVhnhhOT=l-vflqpanwlxA)~j%WYkaH>4wl_wSR3ib-D9COi|FpIJY z9(zi(93&+87TMmlEfh6h;X$72EjH6BFzZ4+$Dz2{095?j-7NnHHq_k+`H$lk-z32w zJPuTU&xA56TRa#u7!c~yJ0yZzo9%1e14WFuXa=J8fGIs-wMB4O31@N?Y`k^xP%|Jt zLauF0q&7nCp))H*8e(y7G=>N??#(sT2%)7ZwEz?`{EbEjxnsc}4go8FfGOhJQnNX# zn++i1D2`*0a~4cdFi(rzV``p0zIBn(E&v`BrmqbLLt+)B$eAA452VzxvJ>pPZo)C5 zrPO+=k5e`%uV3ge2cML07JNC=(a}2m0co1ogi@8osNa<^bP+sO(;(JmUMsoLSl(^3 zJf|^8jzPVvyX@>$TXan5PWToblf)H}WleE0H=- zxvAQ1DhQi8-WFZuQ1X1MmW32|9awBo1);SR+>NoQM}blVZcCs9eU1NZYJXeX&!*%k zA(g<3O}4}(8WR!sr6b}i~|_J!YzVq>iwW7knaK%I7POF zKs%hSaxxy*0fyM(cD++ZossKqY+WFc{qEI2j^Eb0zhW%DN21C%XovyQW*zhpRH*_s zZJP+Hoj<*A;&4b9w}w*%&dQ2GO*|+P<1u_v$|dg1)hRt7bxOVzIU)2TVmOl6 zomN?vI$_VIR1hHA524o3P0Dbn8K{7%VB-wqus{z7UTanB z4v(wG9&POb>Gqr4(^3aC5jQW>@zn~~0Iw&3yCJCuT;e!-ndp)3TN~B77-{g%Cj}U=UhW0kA8olGrd{l~WYtUQpr`kJ>s&)I~Z1 zk{EN5lmw#WR2Iq1|og@0|{| zyF33E9^roji1u#?*im8JJWln7BS`t(C+;_tOWJ}2>T7w3Q0U8jE_q|hxKP{4%2)#a zS=p~G;kJ`SkVuS?Zv2u?eP$bO`*W!a;_g_A`3A7{Jte_tzr_o$-35HLoDL!5-|8`B zt#qIZtR?j36Udv|km$E(z?Qb{y8~BRiBOwgT+JJkYR@HcOd$Ud$jlo5A;-d>|2kiI z3cfkM)0~j&Vrm%kGw?z)^~s_%m3VqxcmAa|3>M!M-)R9a zUxAGmK2~gA*HoFq*5uliws}drKaXv{IUz5LV=}otGEF&V8EM|03;w16pBy+^S6U{E z&{z6ga*pwTX0OVDZ{8qjjq`n{g-2w2kRvQ>YnZ&0O83}R#sZpeI22lt?OXlY?i}w2 zNo-A0R}qLPosrND{J8a;*&6D_mJ~d*=x#Y zT`^av-{)MwHUEldtEBR_%A+~wMO8NkZ}4Muyw3Ut06H4gn~;e2`C0LA z&qy@l9V*|Rp~rxOt8$dbH-D>E)gUt&&Q)!5V45R&fPa7Ch4GQ+^&L7gvu0eOwHn+)ook(SwX}bsM!GPL<^+mmVa7Z;BlZV{PnI<-)FXj>S+k$E z5b7~bgiJ+e$q6Zaq5x#`Q*2T}kleT-v&bIZaC_1Eb#(Ot<0=e5;`@B?H{+Yo{>qev zmdsYywfYv;Yf;`t4luC7<8$$o@1%mrJ%z!gL}kC6JQL!<~H4=bLqzP;#uMM+pN zo%{62ng$VuOQ_ftW>qmi17Rvk2NE+{(3aLW8a9A42_qd$*pDBq9({JB4IGm?GM?xu zbriJiIY_J6+S4V!^$St&WMk()Qp!dz*1G40(}c_G%#<6ZBK0l-pS16`t8;|q*Qs%d zfpyo6;qTU)%?DUxE)#!rf3wKU{5gRF>KIIzCwCM&DFpz$fEG)Hu->g~sLN(8$tKZf zLBf5vONFx@_eozrbM9S!HxwcyWK25~bUXa~@Zw{xA{a){*P zjx64L;yeqv7tMKi#Jv#$F^2|Ww>jRGB3d6`7Z%PuJv)&&5^YEBB~IYHIP5Mso|vV6 z)5_*WE@a`;iUv^>mxz!dt1#~q+>op}$b;HwM`bHTCMzrWx!2w7V!}eRJokqpb_RLi zK=9=4Fc32TT2=RDuQjgn6S?laV}1c)U-jVK$-aC1;aT|Iq1hdx?|6iK55UhqwWW3j z1o8^S_^dPOei@MWrCY1LEY{@o-AS?Hqja}T%Bim?4jX8UoN5L@UTh7B(P*9{ODIn~ z$t~-8JC}XqQ-NjoUS<`^mK?rxcQrr)grN*76?+4Y2TzM|w~TPcC8f#))B0Y@4~?OW z_el|3V|tIjtv-VOk|~|^+W8C%o@4{SB#=cJcNiNIjGau|(+y`*=E-2;<3oHrE7?Ew z=<(yz$M52L%O(OREg1&!(t-GS4Pq?vN{q#05MS|x^>Hz+ZJLq1M2Xat--1w=o>(5P z0feiS%*s3NHMo1WdMfaP(Tj-3JlPRL^gbo(^Hat~7bYXexnd*n()Sk`8(N8*PM_}j zG%AyPnhzU4YRL#fL5Dv$d=Yh8?N&5$t+~<}$Gt`>&hQL`iVk`y7u_c)Adz@w52ER^ zr`6RInAddj8Z}11I5%hH&->Q&oZ`BI#69@B4sfBv+P^>DY3-X1Sq5(}3M0};N*Ip709+5c z9&j(5bOF)2#uE0TRm6DU<{qjsDH>+|!unZ|4vjT^0F~-+dMVRx{c->rV#w6k!Z@jg zUxA&xewOl9vUW=XfjfsqgjxF_)pC63v{8RHf4{LrIp4k)5vzp8#dbYs*3rrf@Yn7_ zb)hClED&zXbXa8Zd&6_gzCNLWU>&1Tp-#BDFB=f>Qy9eVx$-@^6J@mhW!2;Gy43Ra zoKgR$;d{Tlrz2m(WR|OZgdAZbc>^L}wLg9I4P04)2X6qw>5y{nzQFQ!gyPxYc(ps) z$eb5Xj&P+_PsiJ#KEy>>@rci}&g^BTp868?=m2T%snD{j)nG7fsdqy6aROzcT>XhM zM)zqQUWXU{l5JP#MD0B~U==Ds>HfYB)y;r?6oggr{sLoHQuhYXU622?m{D}{=vC%4 z#wM&NsIPtjBNX7==qSv#eMsw`#9C%~Ia0se;wlT%4f!E#8ur{2idB7k(hH*+|77)H)VQ=`k;HlEf zWd-VleIXMOcDZ{lQDx5RFBK@K4!Q+MKX^0yWkIN;n>*)H9+S-48~jrZq8v0%2K@=G zWDHkVOft->91x&{j1#d|;uGOuxk1!hbba!L84Qr(mTD+S`8h#eGx&)+nlk*bkHAao zwlk6j)T=aaqH$U{ii=;+GpdUhHj6j^o_77s&#2czzlcZxMoFy5iT*6OF)~2wOPn~k^O%L50W$#pfOpua5 zPAyba*Bf@+2k{gin@JXnuxwedWVvYQ>H2=v%`zcmfK=^@zy6xL#RQGq=7S}G2tID+ zC}Eg2qw6xfy$mOYKi7_ax&btGYsqf8^I=Vs;PSfxkhG%5bviVqfQhXQyd)5eHnqEY ztxti2yRQX%&3lAa&*wY{%~EPjbQj68QZwXRvU~yO1(xd`$WV`v}#wWoR{s7(CrYnu%__ z6kRo4#kzdPt)pka8aCi)6A$rEi+gfCaJF{v4HaZ|ke{o6w9HV91?%ut_D1xcv9cO? zfG&x&^}WglAXfpFGL--`Xf%x_go2H%uJ2tCg!uB5?aQ0deE94O4CWm+-$y%D z?G=*_e>Y)hVPkWk$u=|oB9W>T_2!;*4~&H3!!tlvEJHB5oU;=Op&OixAGaiVz5&?T zWP*JkPyhYk@D8{ar4(>sQ5{6cYeQOgqwcvVkn=Ef%9wj60JJ?Pw!%EWR{;G-`yV=1 z=EXafDpJln->{XmcW(a4WK;(mWm^vK_N{tEtBeg8hY4ElROIexMF z{>noQd1#xy!vCO6;7JB+0=^TyG71+>aRT)*em8W=HF?u(4as>dF0DlB(|TY&tm%Bx z8c0w)_F#Q-dC4tr8I6P&PNOSUTZtI|+s=R5?%#!vYZqof6J36%-{U)D^b8koi5u(+ z{9m08U6brytIPoqJ2!xopq1Nq4cvJ{ zS%YtvD*JKfyQk6rf2S+AzHNd2ci;Q}Zd@z2hduJ&8yPpx&EaO;?mt1`Qr2b#88E&J zDz)=Xw;f$S9{wK(=Hljf%5TpLAZh1Y4ro;6`aEOo`8A?Vd62B$oEGlvU_pqjX2zbp z4M4np3`9m`4Q}7y4fpRz%+`svC9>b1aNF(MT>ySLC3bqY^#fASU%k?;c2!F_$`}yLAx>D?xZnYd;hW#v$^|v7!l{*NHt2@BJ9|(=_g#P$0)$9M6+aA(% z9$nuQ57z8txiHy1VbnjieoJu?AKosD&$$iaioVlW?X$#nETm~&n286|+zz9sl2$R3 zmJuMD&zKR+Yx)+B{!J(0H_+b?C*fZU^7|!_@Na-g`lZ$STND1}fAWOi%EsQ!RoeBB zf0K~kbFcqs|5L{3yMS5#n-=-VokjjI5=l-9vg8hIy{%q;_on7*~RLuRC=e(h>JrtE*zXzi46hAx9*1f9p zQqIVYNLJALUHuvl);(|kDOtPa7w1@ph%0-1%7oT z>Qcii2OV{xiCd#-O6tb5ecHbK@u&*Y+K{mrO;xr>X_pZ!?8yy_zm@-K8A2?( z&yL@m-UsyO?)|5WUlNnwW-i~a?iB^*G~hUBWX?t$(}tVhe<>p(EWz zoa9Q+MYiA^(3-}o$G?imuXm<@kkeg5CAphRX{PgO2bwPici5e@_p@{9P@1#XB4YlWbd;IiQsxL(_(F0u zOp=JTE#9Uk=lJ{qcUw6Dgh%U|hJ21gmB_t(rRuwxxQiBZ*gv^fcBysuI#1U!8 zvA=~SCZB4^r70?7h^B7VS3wNNVs3D19$~I!Z9@$E7bl(@W$IOsRgx9#YNNb_QI}S) zm6PRt=+%%BD^`(E#>!qR?vPSz7F2dO3$~n5-O!M3t3d66X@Budal9eDKplw;xTJq$ zQnfC^3zK~O%=`2dN|3ubDv>pV%dg}n@q-we047SQjB*-P1NTYx^j99^K^4S0I=vh+>+ZFDk}STJ#I4yL^jSDvnU7j1Eajro&}((Iu-9Py zYVb*7UmXo}QPF;oT#J-0W7s>P7$O!G3GHZs6_5q51U#cq3%ThvkQEikLWfDCO}KGo z9Wnfn*5_#2Xud+TKU1*eBDouf_Qlc6A;pRq8l;)P*`Zrc5&Ks;0OpKlegj>;Q0xMf zf@|DIER&+?I+v(nDmq-4F-Mm9X&UdBeP#o;I#7q2C}aV1h+QAH>-}=2M`|#l`bF82 zP{o^e8#%ML+sjAB*X6^W?)Wc~s^E>uyChp6(;?mBPOjqUZhF-(axHfofCMER`1chy4j4?I{!+P zx{sVu33x`Cns&c#(6+s(t^I7-Ugn-MP8Wq)`nHg?9`_RBWMzA?zQgLi7r{Mp zJpJm(e&P4yT$~j&T772hRn3ZE<`sL-h@ory&<1cXgHMi2hAa4hDsxn{73%=1)yR)9 zu*Ppw+8ktfWMofaVlaMP=}yO((mXG^XT?Ao(xY3kS0yqUIhC3i$`79CyJxqaF^$ZW z2A)64)(<~g6*RJgaDa3I!b$U#2CAXO=Th`BB=}1>f zJ@i3NqE3TtPyBw|*pP)nir=jbz$1sDnk|X{8i-7lUxZ=1Pmn#pG&~J%5qgBnfG_JU z=Guq#CqAw$X2Vb7i_j=_Tqn+)@CK#^kBwi;T5a3DD1oA92kE2Zzul5-rDTRk(F+b( zfDr9?PDiu1!v{ZBpzpNGlKe%X=e>I_cA^Um(JeyjnYB>Jg^v+~^UY{Z$(L4O5UhJH z3#vi1dpdUU>4Dw|nSFL51jU>pCW?`lV*-jGuaa$!_fns9%Tl916(40EcnL3*doMT_ z3s9jP0lRMY{HF03zAX~Q*@y5BYIKd&+?vm#1XQ(xD7fup>0)X^Yh|!=C?-o8jEX7EeSZmm zna)h)#(jGQl%vd0+N#f4!yK8T-dotK&(-oqa9tMTKj?gpsGDp$}^rWRX@R`^4N zN3h(i&QJxl>tb4bMg?&ta`&BRyZp4OQfgUr8fI(HSgB%Ebdu>_T#-)^#60j)VR~}b z?jU>MCp>YCtwKEdvyjb28|lNHIv@P?FVmToGv*^VW2r{C_|=45uvkQ%P;1^J*phhu=`nh253?~wvluXQaI@4)r{=Qf& z>T#um)`>m2ve#MdOAU;&RgWR2JycoKo{&W3Fx-1ES$p`^y1WowS8e_WcOs+2LM_*5 z{WL4*M2ugBlkG$Xj*={zq#jony7f!&w_kl314(o zS1iG(>bA6DK78ES!*vPveXmgKkKg?VNW@=7r0~0<)V3_&stW_R$jz{1aVu3qw&gX7 zH=%-B0&=%_m^35gtv))BVNc7LN)g${P9SYlI^5(`kzDbx^Cp zY3>K2Tth^)Nha`amfCk896}f*M2uX0B4qGISY%lp`^F|0{q*)Hd(u_o#6k2gv76`L6-yb$y^ZDJ)Y+jIprj68nbYT zx@-4w7RLjdB(KG+>h!#{S2ui1BPOrfh_<)_7ah)F&i3N8P#`~b6N>dWrPkDeu`Iff z7Ii%zPiA3fS%5LdGW5S4&UwlGQn3KZ35{hD(-UGAO?X9zK&ToMs$atD*c8jh*lV%e zXX_Vvq{b^#H{3Pu2JO0XwyazuH}?#$l9S%!9IPXhK_r#MmyOdN!`rIMC@avcx+c&p z*Ugtve<6iUIOnYr_X1mDqGD30xA>}dQHQ!VYfdTXwS4gdO8>`$J<)#=t-$Zfv7IyH zS%V|n9(dg(x{Hp(1v&Y#@(Js0QGPVnvkx?lh|}Cg@zoxkFcusp4H@;r8by%~mmg$2 zm5jaI9;zY%k44I6kejx^4uFD!J=|O6G%=yuqx7-Z=@CMh!PI-miM2?w&i4?L+bBKi z4MdKFAR<3pcS%U$g{aXpLU+k!O637C^_G9?8&3fOy&Ed zENw~Co5@|ZFJsz{b5NKF)QU1L#(SjS&){AO0!bE7DW3~v76nf38n?Vx@#XOJ+{7}~ zbGw{|S;purMr5I8W1I;rJ?be^R`zY`{`ZYWE?Tdz+v;$SkgcT+1%%sS?4F(jc_~Nx zHURsD)S~&klL@zBt|^+~PglS?yF?T|k=GoG)LbtY_JO(+=@ccI){j1Xi^I)x-E~gFWTFI?o zYM18}LL@mCb;>UlvDwRNmsvx5ODCcoXnkA^Z5E|xQ_I6k>J!oo(V5W5y1X|w?^tch zZ-?u4%oOVfdp=^#>T&DJ3|(%GtZ7I_EYr%nl;+DN0sGRM4v(>=T*yccAE`h65#w}# zh{5E#tp5GwI_kw#qf`0~GtMbb5c%ZGqfzZQ&bibhQ0$2%0zb5gO?Kfw_7DCSMY?|} zkef>O@TQW@HYw>@XtSenZ+-w56mO>)e|n3R>gx1~n}nFg7;D>9#foRNcR9t|D#~)- z$RxisZ%Dn>@3EeKJQC!6@ZP5?r|u#!p(>y7+Ck{YS38|80AMyGxraAoZDW**Oy-H?i#V9vS91rFXPQI`!dQb&$nm1IUI; z>#-EeeQ<`SoTcNJKi((Yb2G=NL&f{>!@%vuAhpaAzSxG7brgDi?dN-^Gz%A-1Xa#C zrcyOnX;zrFs6|CYYL_>h6$HQP_vzewuU~g$K7&0`upO&rH7?X`7rqE>Fy-UvXc6Lu z8D%cR79MLKtYQ)KsLmKq)KoR_v_fH`d3+!lZM{REf<`){IAKEA+#AOs9$KxP>a6-L zGl9gpnra$|gQ2pTWR(J^-7*zS_y|Z}qfqSH?M@`87uZF!jHV?h6hElNFx$%2t;wz1 zPU%jupIg3GrqsoGlDxUxG#>Rt7FJ}EluxNT19nF9Yjof{xt#tLv*h10#zB_XH3JiPYZAQ(1KjCzDH~(?` zOU8)mFG?Z(JrT$afiGGvdn3E~sX8-Q-*S+IutSG(?y-D&>WeN!v@;YyC1 zlUIX8U{}d>-JZYS+&zD%B*FiK&jR_2BCx-jwKl{&$t2yYT6K>c5`b48)5e7Iv@zvg zi>WL7Hvn6Z6s-o|R)L?xU%x<*r_ z{KN}-Il6N!dS1Uh(*%?6$T3yStd4ywMJ4eCM^XxTxB!w<OydZG=NF0<&=?3YVfPs?sg!JAr%F{l!?8Tb$7t!EITCbZzR;ZX^T4Ib6vO-uQ$Anc z{85GV`35KiC4p$veZy!0ZW|^={CrV0*o8Ml6{-Yxu8a{rVpNKjdpK6u%O*xSSmVw< zb>zl;jW5~V0DD4ZJtgH51Qd5 zT?qs!q)$T{8Zzhmd%^HO*wYOloPTZzX(*VKKICS3L^XfS%!X&KH;}M+vP~FjT143LbWq#(Jz;fc!^{j^=9e4to|;y>J51}2JWNhvq|5Lq&G+&d z-OH(M^W?Lb+aCBtb&z5XxF*zDu5|1wq{VviyqnT%;y0fn=%vPBUYSSXMOD6^_5m@SFO;Lx+e8H=?r>x(KqdF z@mleRw=9RM7QtR?5GPJ9fp0n2HApH4>JThlCr#L=bEe~3?2VO6Zrxt1x+e->rcN?m zF@4bYet!IR5d?b>dL;nJUhcdWG$i?HS}Yovis3*o1_z+L zLoD9C&2HchxUN{Wj(Uf99L|zYhFuibgFiy&MvucECQwGCD`yzg+=zU+aNbf(w<4^e zX_Wyc&Da^eMSrbR> zL@X8Ya}F)%#WaYYJ~<_IuIZh!Lo>f?uch^g+ioOmbJ-Hk!6#3+xT7Crb-7$VyQf>a zDR@P!3<@#fR(riI*Or*pYzi$k533&MBc{A8ln+*Qo-y5sYqA-6)C;EmqW%wBJYRtZ zf78B0z7y1BbED%D&~y=Zw_5AbHs%|V4;+xoivSi@wS{pv0tJ6Lbd2MOlElYiM znqI!u2|B)56Mv#f{`yqlXF*77oBlz|HjjHrVAb*Bs&uP&SR3oZqq=eAYydfv z>;!{L;vfaZ92#_H)M^d2)6jaCS~$T(Cc2;Gu#}HL&~|=JZqCqYKHdSjvNE%Mecm9y z8)c0ktnZemH-nG=iBE~O^=;aL{ zA&2(Um-$Fshd~e3x=ZES2GFWOU^zHD6Aw~ycI1Q5USV|Kg%3JHzD{x7%cdtNZKn;o zTxz7PA2-}7@FNgC(BsZCuw#NaOUF}-WKhas{Ur!0=9J>@Z zCT~yPfaG@u7emGgsUz@wkOLz=bcGSm87_F$Ai^x#7D07vY2BhF*U-C>`Ovs?DrlYB zl$M^+;!|k(yaYchMbW(hVCC`43ps|oUlx3EZ3ZsC;-_OL=mfJqzHV#UqQ*Hz5$1C; zerh+5(ZuV6+=#9?y%mgL;FnqTI(NRAM^-$u)}}^v^=WBOzUZlwXNX&#yvq5U>7l)K zq(%|H1N>3$jhdT+QxS!w@Z<2{#Z?->EV~RFBw$5;Bwh`*{IeIOsJxdzivHH!>^BpsjdEy#8}Ix~8L^#W{+AoXvjk0=tJv~}xa8F^ zqwMJRm0-=v#i&Xs2K) z8mC&>@tS1D57yL+*K#Pk#^Z`0z@aqOEm7y@-Y@;Ec0OiDHRky}x>@*8YI9W0Hbt zu+t0IrFAN>NM_}mc@W|EavkJqgeeH>%^#MF-+qB8xo&RH51RKECRVY!+-N8Jr3~ui z^Rf3n5~E{*Ykh{Yjz_}T8$P;vO>!MtW11n1HmIc3TbDKLGDdx?|WOs423XS4<@hvbQ$^dGPAAwR?H7dUSD@M)QwK|7iGYv>|;%mW!2 z@r3JgFtV)aA_2+KT0mD+;stkk`!)MGJ9q>-T*k(cTf)7KkaF-2!?Cg|>Y zQpR^Wc{TybL^(y5=dp_kl=rU~#e0sY!v7RI2mpM>&wTpPa%-bxt%t0Z=e-qQ?KP{N^ zXFe~7+STIzj5DHOuBns&;vkpB9B=hh$>B zPyk87aI7p8Li?ONrU5gkLBT0cXL=0$g=u; SvkL@2qDz00J40m37#T{TD5T6o zAsHgW2?>c(o^`)BojRZM{r+CR=lS#b{8jAtUiVtpy4JPUy5FuD>T7OT&%K_CifV(l z)&XNGDjfWmh6=Y9{`qRZla-2!pGy0Hs;SqSiK^>`Z1rT?mXCGol~k_Oa+~UK?83)P z39#S0%KscD?> z;Q!w`P+m+<`sm2D#;Y8STvTr72+9#1(rc%Wv!fpe9uj?$QtW94noZG6uU=qD=S_;LAzFL6%}iJ_mKkcmF}ufTFT z<)EA7q}h^CstZ}zSe9U$F)C)MK~-r=u>^+M!SFH4Il^({Ww*ibzB|Yg)&KrutW3N6 zQTXm9L(J4pmyFSeVEIQfcF>U?z2zb z{NGQOJ*?0`PaZc~IV-&@a?7Om0#|Q8($2+5m^Vh3c*MKpj<=F zKB3-X)lA*7cP*9MEuxR#9sA)bfHfUp6))MUysrZJzEElV!MmVG6M}8gYi245RqE1f zE-40nq?e`!FPETDjwzHTX=Y!fp&ICGm|G&>hzzFq=5^J3PDw9^W$J*7CeyJV!0d8A zoEU|jxR2<=n_)j}11H|Gr{Q(wRW|5!3gj_=)B5xaa&DScu{0?Yv+CCXW?nwO1y&)e zUIk#)Du?B?8CbnU1c)x*YA*b&WH1SN_B@d>YGbM4=a@12zBu&BEwX0w`(p0FdB9_u z$~TOPYRN7{yamLzA7X1p)O%tH z{;FaA(h-yeB=EVXc$I}u?WuOTGdVWcjZ%F0dEw_H`Y8JQE*Zhgozm5}!L@rUuI;o# zt_cF}?=jcj5PcTFCk8-f(K5`92MGBf0cCos|5r00JvQ_!&S-!$v8BivWys0PER^Ze z#<^(+ON|E(zdR${g#g=G_2w@K-oRY00++Rr%d)N+!I0ITE^h#rkF+Aq`7+SuvomELoR61n ze`;GC>K6PIUyk=&rCcL~6$xR<{z8jG_faNXAQQTGRx<&-=#2rA<*tw{wg5e| z;kRcP;h6}^GqTA*M9MSBjzi9>9VK|?+Aq&=AgOwi2ldT>W&J8--5H(<_~n^cL{mu{ z-DcPMP^x3{s{}D`oO9a-kCy-PCm;PF_cEmTL9v9gL5Twm@5k9kuNMOzFU|Is*#LQLo%p^D-)Cx zpc;5&BA^Fz52|-UHTVKW2fRz?l%6KBEWAM9HO0Qm1K%x$@2b0xTtVlO;M{iU>O2Z9 zr*ETk&EVW=Q#jYA87Dc3d_IQ0J9~TYW8XyGZ!h;PC*Hl__eJ5-bnK-cim~p_%AU99 zBWD}Qbxsmg{T_1{@-!}60d6M&XYJXsvfJkP@;?!7ZvZ!?z8gsnU4&}gg{7%;Nu!d> zOP)K?8D-+w8SzjsRwWS$C!3TGo?5fOq}j~Wfpd!|`pB8Q#K{tQl@Gmk3XC-bO=yHd zy{y^OHp}R%@gTqXqxERpkD2C6qut2MZ*wD^-+GIBH1!D%NvS1NOSe)-4LWgdr@)ZK zOWb`sUS-L&PTZkGU`QDR=yF{b#n5eJNDp(x>sEh7Q=;$eUl*q=Ch?rd=i`{b$fxOb^CK4AhG&|5V?<36(Z z(?B$t*V^0_+@@iJX zg|93jb+pil#*2lZTwZ*GCI1AkvMlJwYhwVi0$Sf?zMeMsZIF>W0A%nfZ-m~FNqnq~ z!_GS;RI>J`mG^bbM>5F!@TO~A{s7f01I2aEd#<dUL z#k1AQXD5_S^E`^@C_>{qpmBDjfy;v5! zQb+Y+2)KZ%xCp9v;l+(4-8Y140;2wr*YSrw7KQX^?zC%4NGo}AoxEru;rZUyZ#Y(| zp%-Qp2-yvT`w*f1=|Ly1K^iCo);tZlV-VvPSBUWk2nhXr=(H)&Y~$jKp1{Z3o?G!;7p)}gX}@DRx10KMv{l)_5btR-vet5-reV%)78D@? zk{x4?PsoLC!VWXrH0zr-cXnTm1p)#>auAYj0qrn$mqUJX$&)<|N~SM^Dh}>_&)_;6 zWU!bL;az-eWCZ(Y-#S)2sNyv$;>WO?JIuhAdB#c6)S+^=)|_S|Hv zatUPf8tKbQ+dvJYvmIewoZWb>Jng8B|=c z!zqw}kS|8aOI!KmpKiwW&j5|5)lfQ1I`+;exsTkmWJs_dIb*qV!o}~i=<-LG%1M*T z_qM!~u^0M*gISaK2+z4AI$HK{i$kZKfiC-jE+&XBprHeq(D{NUxXM`T zBD0t74?TZs_P!9+oplr9_>CUTB%7uq$4*^&8Sh z`$6oO=jfOOIL4`|Jo?qz*xj>i68(E-H%MH1=i>K8^s!I;$FI?@i*cC?g2%{K%_b!m zhpWPquIEI??s4BB;pRe6?v1&@lH|av`~}A4%q$SY%)-~WCvBy3<;BW)*>SnEs%GAQ z{Fa`~n*_-_CDS5xuVAmJeoaie^ZpUh?@PpmK@-1W7v4dW${8txPr8-B<58%+r(2XA z?nH9z{-4fsfQjeH?b5m4tIGKM!*XY@W-oO?&wct5>EA2kZ_lF|$Q-XN7Yc1ZECQhU z_Pm-T^O*QOvmPkYoJ+$zd*46m8)4b|UYu{1?eVZK@H+|k{iM~M#O&y#J-zYU@UU+YtWOJJD>`52C#qiWo z(>*iB#HP-()TK$mU7Ck$fKAXUGhs$~3v47^#d*&T+XX^VmeIu%vk~-d+ zNssWpT6XdINhn=9&E{^WVfBWDocHN2$_51yZle&kEQI~^@35(t^JX-CHK?YlZX{WK z9^_oCH+_#9OnQa)=e@Ar4NHwQY;clT3Wf;|s+rf8SM!Hn&I^W{Wob8Dz2bwvM)y}V zvR?ceckDaq*u{p~hROZbiC7i!^WNfp**pevX(Q1;EyMI# z^s#Te@Ygx1$KM1O<3`DVI|yWb@7Mlig|n61WtC+2lJYxwqvUzm^`Mo&2!IHu48t8F z+`zIauVXXri7qs&H+#3>_eEX$K6&oJ8q`iup%uux7g;ha4|VzI8-9CE4TTm^l6|DR zLDaHx(1rIe6TdGq26N-KJDq^#8Kn_rpC2!u8@{mT>_w<;?`^PbPT+e}paZ~?-MlLz z$rm=?cGzm4@R=n!!?Elx<7F-=+qI8>%q#R>MMP5vjz38#v(F1!hK+Xg&c)&8|Ml3) ze;k<(N5T~PA4gugbEGAS<+3{v%o`o~1UoWx<;W$ny!#MD3VW44kd3Kv=s6xDZHGvI zv^U)S5lOeWn&|5{0pyP#FKv}S7Ivf`)WrJvD|d@;xG%OD!QWb?t7l=vN`i@PH;h=* zB`>rgxksMgNQ(agsO-w>lwAur0Z`qAFx87h=4v}!>jC> z--#xiKBr+>bo9X(lS#NTO_EUlGVoOD-a{<28&GB+I`J*ukayR#d}%NR@q&bU0>n|p z*+;4xu31*@nq9`?EpKcGUhBp~xZ@CRSqP$2drA49d85wXaI(%B!HN(r0m5y9aN(ZC zDr_mEG?lJ0^DIpIR0j9bNA*EOZ_M9)T(MDh2u(nTKof2fJTps{1j6%P@12RU{U8rp zNIz7wL0k6Otprf;>W1ZSnxG~#G6r9OS}urMD92(efp02JB0!EKAjhgeD*_A_usmJA zscF#WBmiyl3g4Y0b&zS;ecXkC(;vQ~g0ey*%+eMoKKBvck{jOElZ%Qk*o-WH$1}~K4xPC$CpC(+Y~<$2gI*7(mD$h zls5E`rK}g769D~X3A&}Oy4Al0Q2sImX}FE0K@g?EU}!&2nL(btRNbrDizN|DcR^xc z_O1kpNy;{RuRzpqoe3gKF5xe|!wT)wxNVcveJ1%vsO_0!6IJ{4k665(p6ox{g6HSQ zrv%Y#^bxtym77L4U^8L)bb;1Q_`{ZMOz8nWI%)^i4pif|lz%(*=6FDsf40K%hujZ; z1#y0NSIA`CX8+KT$3`JRk5jn@7v^2BcPoU&@}1!9ff|7rJ=`1Suco$v%+>Lm$m4fH z&bXc489|SCpC2R?8D&Ww%-%vGNxPDIyo`-AIE;Bdc!};#-ahwqJaEQ;w%Xq_*UO`6 zuet8S!K~YaLmV1ko^gh>%cpyawg>g#+6V2b$3!(88+3_ZO+#BYNs@)dgtF|^x=1ZW z-1tCd87;evRI%EsvN9#1(E9G+<_7Kb+TB@awRB!M3zd9IOVc}+CiCi(v7?ImSa3W2 zd(S3GhQ7jgt@7UvON?L53~4%Yl(T}_mRX-Knm1jK!wIh!7DbQG;7r+IE#01ZBJhS+ z{e{dEm(xh}Ho|JnPNB?dUV@S2@NCZD_aifLyM!G0_7Vs&-0f*Ewuy=sUFoPL2-<($ zUQ!qyt40u0V@l8$dF<|IF&)kx(bV1Tr0t+&<}vbD^;i&RfZNyPIS~w0CjeEv2In$O zlE;Z@I7Y-W-r?j&F~+n!MlmJ{NJkm74_)p(!po{LGV#=5B+c0No~b;ipuNnW7^uf6 zpr!(-CvVLtjs~rBwfHoakeGMFqt-;-A%AD6^&e8WOeUQO;|zupmW6Y7V^bcX_84}Fl?AI0}GtUb#rq|-OH5}ef*kdNOvAXfSl+A^vy%&i3 zVta&iV*-qfiPTwP!hoybDz5ty=0yS5`^QrOSMQhWn-Az z@gDn)>jPU9Y#Km!s^xL*4QUnS~(|d z*9mM)>>a2Sp4Bw2f9kJWYtwMt6)40-5|8@-sjD^q<~ zp1f;!4esd7FBOZgHtm24qSvt#x-C{4jVefEf+B=NDo4Nbxw6_O=)5T3UJ?_{^#@k8 zzD6tx@0aGL>*!tNzIxQXNeDySj(3Obzm$(K*Y$yB!voFc@iVA3iF8d-!XG`jp&`5K z7D#+ECGpg#c=hSqjUn+F_(-z&}~ZW3I+ToetEv?^-KUrWL3$k-3DWwyr)fMO0%fiBeH? zsG^RtX5>Icg<5SV`23S@bd!HAcO3|o5*Hm(^Puf-2S+HXnh5eo~Glb+X?1?!N-5P;dL;yUR0EjkZpMfXHd?j zB5uRsXSSfPJ{CUv^eLZk@#t9lR##|A$HV?-4=!qx*L4272O1O)@Sc9g>5A7HM9ga9 zD&t?;R{xQBaiO8X)M-m`&hn7#2|Xii@}Oy_n)*Ujcf>LFN)r}^cT3a2yF3MqclUQ3 zw*3bK-T(u({edyym@!f1wW5on# z6F6DTR+8{zXm@(~Nc!{v^{#jd0Dt`kKy3&ImIAy1d)~v96f}uKZ~h&CU^SRTqEHI- z;)+oUwt6O+q~otOEST2F;ZWVPC_>#YW_8FJW=A);K8w3UlBUkeUYESxdc8sMruk%# z=adRK2NfVI_wjH+Er^YUYmMaS(ml&I2FmfY+kpXlk;K~o{;WS4@b~csaSze)48Q>W z?F1nRzXtUcEPOw)JsYDTd>*NcoU{h!Y^OCC1@Z!qiYB`A{*X&?(Y!U~6)+pV?o=-H zvd`vAXPYE*1S_UZTeEW*Z23>X@Ocf?p3B@NP-;c(8+1o5L&r8lMX!pZOGf3l|9?GL zkLSnI=%w6=NI2F8}N{f7&c>mFNuYLx$wx9~pU7)5u?4sq0~JcEYBnrgRGn|$&6V)L?B z^W#h38+DC&;^h=g6h@Vn&ZxZ!-}|=kT2ajZay(zU?r+C~0IgX)o_fc8A=K>?K3#yVs=D1?|s&8r`>StGWE+ zvuv}Li}Sk)z43%uD9Ej-5r{J#)4vQIx%^bsM>GTYi2KQpwmNAaQ!*?5>#u6gKRCa; zeNFY)VNVO)SP#1Kni|~OS(3aQ4CVo73t$YSHn9!7CeC(#8_tH>^GB+V=O%J1>^vf9 z2r+R(JCAAB85PsT6gz8H+uN{*3^rf-QD^<*`v=#g$eyle-Q+g;QBdiJXP`hFcqg0#WR^1h)oaL=a&>JIBK-vNzsUnazRS#xKRnxGq|F$V` zn)FYQuE_yNWdITz0_oH>scnMxDo&V!*NO|;4}g1`)CewvG?~7E2@MKdiShuKC&1-3 z^8eDk?dEbdkP#qHOl2*jsJ8+BC6R)d#S(>D9s3R-AAB=jKqW z^x71|_rW9!-B5Z_QEFqJkD{W>KySwZ#I0)`wLHMBw`Wm-6QIBsrUqZqOK!bbVH@># zoo~z$REEJEmAM=$^8gskxpZ6tBhN^GG-{|1+zFjFD7K->NUGYp+>U)nQCpC;`VKBQ zyB61OBr>7SJn}42BC9R&08tM{*?uHBPN29M?4SKh+Wd{OJL2}5`I$&RW*Op7OiF5N zK!pd*Ult1Q;rpeZdHsm;+6KgzvG`m6V8UH(fOrj8Xn@duPy^%>E+U_}97TT=b}|M@ zazN^dz>1$cSs}|tvoMy7RKu=B{w)fg^T~*CQmcy#JsQG<{#y4@hd~3@iLa*Kd3H&x z|LPlRN-m0!)Tm%d;qtR{pTkQw=N3|Cq6QMLcedl3&LF8RXcp6gS&T7Vrg}k++{i{v zA)K{W{T3kHM3e4_B0DvbD7?Uy0&aY$K?<L7)yGw4=UB)cjX*B+tVzJlCNWLg)b z7azBSZB9R?1P!Vi+s4O58<)Pl?pK1vC!mOsZq>xZbSa_bN~(e9g)h_Z0}sK(Soq3+ z4;q-m*qe8ZM`nhU9zDwWlqFSG|4_7FdV`@8K9m&Its?d0XHK^nOI3%L+f|xKsi30J zuU?ZQRUOB+-hgKCk(!OCzne(!$;egiVQW|h8Y0gdOLm!A?vjq-j!J(aBsd$zNjE+> z7hQ`A7PeiNrl#mVr3>;{Pq_@xIlBRLqiCFrb%$~GAJD0fAAP}i`#0zs4yP2XQ*{iW zutMTjM;3hi4Ts&b zxIhDPgTZ$;Y{tJmTKdoE@nbH5pUz0fi`IJT3wdZr5wi@tBKda@3A($Ph8EygFI_n0 z2QNb*I|fzL`cnl}pQo{T*E{~lZ-{QJhK%Q$Nji#1aKgG-Z)_h$TGS4uHZi6}U1Ik< zG)pLKtFz~m=RQLDN`T6sgl|AALa+ZZ^YZtwdMXtSd-5$3wdqz=IhwQLOr=rZ=!6QF z{)@TX6?BIbg;^C+ISD2ryZLb&fS%AaCZ6k#Tnj_~kFliAVd}LyC{AyzL;520H);KmgCU<1qv#WjLghu- zxN}~p)pW&TT^A8I=}sK#FiU~tJ5p*N*9Xbqs-w%=Ma>jqo`-6fg^HqzYIxfHS@~ev zI$PtcLdDJo@hOdurr!JO|2X5N^Ww>^<)w%f+S>Xyl}4u^+7?1hfr@ucfQs*vSg@~A zgw3yppzF8zoI$q-R(uK@I3-3ezQCpFxk6oqMs@2+CCQOnxN72})KWH>Sx|}9f?942 z3Sl`vyn5#s#owSA#`4unWH%l``x=|nxmULpCWqHwCL}Ntyv-uYxXJpYy-ZB z$Hp$9ff(}XS$$xWNUby5`pK*nFyU6BdJ0nkr0fDXcWBp@Z0j3WzZZW$uj&vyppl6u&T0N5lQ#A5z37tLRoOvLCkIfVwN4kxn6mNHGPey~)03C^i31 z(484dOHq>i(Nqtq6?-*fC+t8$YSjL_&P;je#N-P~+rqwo4~-S>XU|`3=56z@gn7o$ zcmxV-YZkF@__T*X3|UcGIh}lI`Ky(aEmPCC=rvIPDL-`-Bj;0pS99kN37X>8mNSi- zuMW3~QShWiM@cK+LF6P7YP{arsX4IO@~#ish^wwFx63f7!9AKKE$U;pysXdCtl&%C$T7cBeovYrCT@@S5Y;QN2KL=j*C4@y+q1^hSB8g~= z6`cHJ+Ij^Wb3$*-ZnWQGOw=ujly2^Eha#0x3)~gW zJygK9-ikGF|FHf(({akCAvA{eY+Y2n+a$)Wchu4f+E?auP?`01Ct*nCn9`qu$>(0| zn>SYTBe|KORZnACA9~Jx-k7ebYxzFwHZ`{1fSzo9A(z%5<76u=I(q~g!2S>Z$n2?w z5f=QRey`7DIZtXlq%G2kcxp<+cg2}haOS4FUuNhtiZkQk))FTpGDA+o z^*%L=Z84;D=uc^@?6Ar72BdJ;qP(L{C=J=E(XrXmt`FKtkC9yu-fW=Ok)e(oGPTt^ zkW#QmRnBl~ZhwrQfOM)6yRAP~jOU;j^V|HJpcpgL7)_lfwDuG+c%~|62bSm?7<9Kz zPD{oaMGA>*G0w)^DbyUx9dPN^s#ikT zI}+Q!y{hc(zUoBuoYK%;$=h|v8xdASdSA3{=%Q`oJ=|Jcw3->i_wP9Qg!1xY+aFT; zAZ;n>4f;;DzM`}7J-F0CyV7J%NqKUMSWVm&wG>_d0!c}-sEL%CEn47DvQ+nWa7u`> zl?vhLUL@|#YKx#K)eEFlwJS=sCM)d5gV~!-#A8z$w%LIPQT7l-vk0QM0FiAX2$d3Y zjq0BeMa}NDcN8s(`aONR`a;E;QX?sbV}F+$*3>NncggP2;8`DZ6jy!aY^Fi>96pjO zDw{LvEZok=6&8K)osZgFe9$RXaH>4xW_RqP)=;s*4IoJq@8;)kczZ#23`ZM%H{`4m z4e|fs5Yhlc(W2mQ_qXw8PY#7ibOY_s%D3~x9ZR=Ab3eva?*xx>QxQiAp zMWIvE#z=4ItbJoVQU-Uc19qjUSi*Q~;)2yu2>t~eu z3i(8eqMtMh^%h}Mz@z>q&jw|tf!eTyT3iZuDp}2j!{6;e?|78hcP%@O46#}d^zv!_q26+q#nl@#pW#H%`>v|={jCdVV$vhR8b+Q*Gbakv(Y9bi9I}nU2dE?+n32G zW`8p$?BGunGC1$bkqabi;?gk`UJ zmCO#5O#n4cghHw=mhhfqxv_f)yV`~;CQf0P>5`2z%6blqW5IJ{!G}cA`x=E>h_O8s zSZVH5lH_`0DK*01rPIi^{t+g`ATbBA+ee{yNMc+mW1VA6hlX#jxBi~?&_{(FluURx zL+K1M@A%aR@e|Q{ub**la}Y;|hMwDqK5F_VF?xN;aIWXDN( z{V59**`a3(2KB}Ip?o?Tp@nBk(;0>BdyIMZr8ii@?d#Ke=YvwO;2KadL<$Bl2O1km z>w6)eLSq|tgTisGDBNkMP$G7NQFSI$HhWGelIs?{4?wR-<0?@3-oVONd1@)`=E;V) zU5R3YE>OPCFj45rZPDeu7$_L#jSV%QLO7R2$)$ft(Kl_39!%4-avIu)b!<@CM}{GV z-&It;X9I$K=!^#D~*)UZ*WlHZVr>txrfWR3%41*xA1VRVBj-a5U7MkUDKAD1yXpuIi z`i~4lHzepC)|7$`U#*TNMcG=~1a2o*OV$EQ(8Fyw)?w6(Otuqr%%n81n|fa$%I?U` z#X3%@6ilu?D#TNWVlfJm=4OayW=*W?z!WpYF7?AidIQ`|!Mc4VO^*p{(%nHPoCoZx zbwgOJfvv*mH7=POZk6zGs}#3+8|)vghf`XlRO2>!`lRaI+C(c!rR&tw?S+|nQy+}=FU0TPzaJQIZ#W(;YN44S8(~P0y!L5-{e)`hwX^3l z7zONo;BOuD5*l{0*%E|VlZ-;A^-@|GR2^et8`9v8c$E@+okDBZ2TjpeA9&EtWA-)r zd8n~jXe+j+^?`tHNXL{SLUG?6V$aa&3AGsJYERvT=c$5?l=1cxi4*Q|^`FJ)v)n zifs#w#Z&=pdg3SG4)6`D?Zbrcq7}MiURY3Ag^TbtU2r!hjQC%z@*hVA94e%uN!bvj z_Jzh~d+&I#n6j>&MwVFZ7lap6nLH%gQZ|etC*)LVe*XZ`7|BeU<`t~EAmw>s{uI{nC|e*E8nhEpmvR0-|d0i zlnD`gr_^iFUbKQeWHrs z-*PyGxiaU-Sfto@UAx+BvD!aSLWm!sOO>uBB%3_{fJf0-FmU<>iKt{Yp>}GB-GD3% z31O9jdrYfMLsW7+q7e!GqKZ1?9>?wWUyXQdwj?xIW7JoXA|@NgtyM*Tf3qG|2U?04 z4)UAr@X}!JF%${>D8}KFrn9_>TnMFi;%DheoK($8lj(^*8ULt|!AuQz7fEr=&SDvv z>1b5$*Ek=G!aL(LRIt`)HZi=IGR}~>N!gd_go`*3D`*T+gU(Q4=AI>y*REc5OQ^{y zu`^h23WUm=4&_I~`48BX&Dd?9q2av9^ZZ2vt2X~qZI@!vb}>&|j_g2YYm2tanIT6V zUCJ}B2q0t3EB{#JkuG@|Lmg&H{D9s3ZB8D!71TX&YPQzWLmBW#Kr50 z978>cfxIJDb$J?CX>E-n)|7K(OR-5hvfjc4k)sN3?l* zgF#8*{pfL+=u_m$7e#A{`owu`+s|SNyy#sYN`k-3K{xhsTefYv8nW&KP7P!qj(!5W zzH~Gt*v@|o*7+b1TtRpKQzVNB`QS4-fhJj<9ODsLbX@m-m$Zg_C;Y$1xf|jXL~#z| z2B}PUz-1k`2wSy+Kh>1S!I%^QX}meRZS>E~K5Fxy*K#QqYM1e}Jq4M4WpS@4B2>G6 zNh7o%CfYk+X8wlsqwFv~W1?^g%heIP`E*o8-6j~5G?=-kNnAHWSRM+m#CSPm9S_`d ztPd)c5S>TqDKkg8X0V)z{d=rdqX^D3a@ zYL+d$Sz)!kMvWd%-gbq9Zg03TF@WDzenVO@D{K@kWGE#!+GoHWDr@LENILVR+f5m{PLs1 ztx%ou65X#f)nG_0HRXA%O`e2TvNxknsvuI%W^lS2i|7UO?;a3rS7gV}>7y}OT`jIS zWStGa|MauOS6R}@zeIwj)Ny^wmQ7h<#NNdSrwr<;+ViIL+saSYoA2FJEhIXdkH%3r z$;MGXV-0FN4X{<}rR-Yg-x3FkMo@YP7;T55AfPCi+Wd(i zlmnC~UI9=Or|9|UZD7qk;^Hi7Zw)&Ow|)lKDozF|yH5L~zk{5DAmn~O;au-Yt(Ynb zPJf@Uug#zK_Y(q{)%O2l(l}+;3Cv_Tm^A#wWWc6CFsb&xn&hEm_diXV`?v5)lK;iz zhTiUYu>hDLa#x@xPC&!zOI!C1)+)^m!haf!I{~jycIo)Hh)I%n{=>)ptbt%K0vYTv zu~pgLo|MgLi!IeTFqMCe-aMh}l)9DN^nmA$ksx@9+aOOSqTWWZ4UvNNcLAsNrrb^e zhT0T#^&_N%Yd0(8XFSL(M-{WHO-rH1suo_+*9j$ zP0tqupdJZ8J}Mgv%L2GPePyRGblO=p2Atitd)V%#5sW1bp`x`%*w+u?ac`$d<5xpi zu3|d9bDN%JjsGqYRj(+;HzxLso5(U9oXrB#5?Sz`0XHcYbQBxW{sMr1q_6Xh>~TDZ z0NxBVqhw49b=n317)SsBZ;&LP2Ie{<5{y%d14tn8V0NdO#`egQ`@=rz0TkS+$I=U4 z{q*`6=CzFd!@*y?<~Bhhbwspw2m5+k!T<33D0sa^wd@cO!aLEf;&$d+=|k6NU(Jwo+PiB;l4^+kU% zCg~X|mGXBr}cK7^4-ud2G!3LGm|xfA*2#OFpA1yGfm!Y4@EHkiEC-L5|pu z=D$sEEnzqmVqZ`;y!l^EYh$L{D5irg=;t_9ignWVGkC0(Jks)=rmAc=>D*@8$L(6D zZEV~QnJwK~{xM4JqqBVBN35a~M?Txj<4pBV?i>3uJO#Ad9vvigDg+$TBV~4QDUok` zIja_=Th!C~FnTZ*kz9R3!E;THF&gk(k>20^q`-$AjMlAM=2o1&R_x2O{mU0rnn!On zFQ4_Ctlo2iYhs&)OHWJ&wOx@UF#ZT43yCVF!A9qB;`EfEb;$b^L)B%xTl~II<_9`ORHO?|H1kg>tZ>O8P@MCdFlIiwQulU2X zirTNp?$g8yyf&%NhSaO(F#QnAo~>9&JH7v8qmE3AZSvMe9ly!F|bFdy|D{n@2Bs zi%eGYJd^IyzSooxAp2pfHfe5S)bM77fY%2}4xLL{J_v6Kl$_rngaD7Ke8$O!WTt(UiL%La@%-ErzN5Gpq zv1$!_jVMi3IK}6A@R{`==27~@x$DDrFiuTE`jp}#ZatoPhRhQ&cH|uOQr{8q{kJ0Q z%OyOB)V`P66esYdg=>q6(H0BliPISFF0SS?imhUiO~NfH6>h4Yn`r7@qslJNkL<0k z9$0f>aqdd+r%$to=4eqcE)dLDpkgJ4)V=eAo`94vLJHFnOIuR;Z9-RrK)1TXbfZA` zQ3p3Y^)rW6-9Vojq3ogh523QZr4l@kmHn0@cnb%cX<3Yw)C-Varh%c+yN(1nYyl?u1T5b>p@1hLvrDXUPzuY~Idk{k- zcm4X63YwZk#&uVXSDqi_C}fSuPVy6yYj^Yovd zTi`7~vePwYcCH%#3mLclH{<6}nQn9;P246dbPIGFIk?%WpV^}7cBqIX@tM<+r-<|# z)&4(?S7@BA2&%$h`7#YTD2}}?7jk}(4(Jnue5ONYo9r;NHOO-}H}x~j|C3oW<}K!U z@4G=&zI#aG5){gJy{>CM)>;|zPNgU4BcxISrP33b)hCji?-CX$W-q9pq4{Z+x`?zd z1aLOL68-@yDTiM!S7XMSA>fOXz#&U$2~ht9*MXI{6-7g+PjXa(z}`pVO-bJ^?RynX z0~}Fyl8IHKY&B0A{2$$nan7n!xi2HW7>^Wyee40~_M`RwPn`rlmh3v55_!bdW||67 zd>-f*GL7qFgiL8!hKYmG-eXdR5$A|!BA4(9hNc&XoZ^htm zS>9MZAce}(<-*buyYkiBayR)O1pEeYVbcIR1~3@@-Z7iMjcw^rX47R7xyv6|wmU+=!LavU0h$KEJATWk)q zRtSg^3#ntJ`LavA+dAUPX*&=mW!im|yltEWra|74F{W8L3b+>S@)AIbbRJQHSKDaI zLFQh~d)2>W&DddD^A{9+<1iFtrNtK$c>}OsTd1P>1?#n246(up+f9-^%&C-y7~F&k zBS17HmEQlGmU)~z6;>1{NzPe>g;Y#}C^?$~ss13<5(b?hvu?lvNvXuDlmZ$0PT9EO z!-icq)6#tcn)OY9MvQG^uW6;JdMog8&nOkfu{MEjinp4HRpV?msq6hOHewv(p-=(K znN@Ns%9#KzJR`FFb@hM-cYXg3#_$p70!yvP+x=o8)a(5pY4@FKYDKe5u2B6Ag@AoG z1r+~c)Xx;*Ju%Bq z2}YR~Ex*EDf1W?YM#o0;CAz#$1uClti&{3yjuJIMlGms+-)5U^6^B(82L(*}ayQjS z4F0xG0SDKkt*gm$2LRv9g|v6r-Xy%^(p^> z=iFszlH{Omo5i>h{AW341zH|tkQU<`WDt7zdd!L=`15hak@0m34>3ph#}9FDmf1PV5sZ49>g7F`lS$wYO4+Jsv9Aef|h(zW7C(I}NtKR65Dm-XMjoE}Z(Y%h2nYdG@@}WV;y4802I!I|IWf{1*?n;`wXvh$5tu`sG zj-XxI!Bq}jNZr9rLH*2jRX5ln!hINvn{tQGQ3`I35hF$MUH(5kcA`cFIL&!sY?6%^ zcr=FCq%VNWhZVsn;>s&>OnE+q`bnx7N6&N6ko42g*esw;;st09qv;9hKIG_t{{VLp z=_0f}faZe_qaQO&I9%8)>3B>PL32V^m8~X><_c^)0b~K54~+Rz+NH0#67-D{w3v_B zn%}$2OPwbp?tPS^k;>PwGM*I22_$M6ISb(`MMJExSm_AbGOxIfK+)zB^7V0- z(o51z#+rlxPGIXXqh3?y^4lWj7_P8^@_EociW8u?^fNb0fJ_DST8c~N@DJLRMp?#M zu$6SJcHJR1S$3Mc;vt^&h>IXi{pVKg$gU!+#k|yt+3};K>1}oHuq(<@1ZDQphB64c z{UDLw>!?w=E&}*_tH5GDp$ldeYX`Rq?R(IufM^>21M#;XMDZ`ATGab~=3JsA>|vxu zJs%8PKD<}3rViN{QFZ#^ZLe5_)7Mn(nWL*X`Cv%{oD@{2HPzQu@vzmbhes(E zoy}*g;l=`ah`RCc8lrlw53*)R0vAt$Ms68eNv~W#0{?EpMoPG{JT@$Cc zMu%Jz0oRO-dI4c0qa%`-YebTYLjg22C{Bo#BeALsMd)@){Y(Y_LcB%&=f905f|SAZ z?jv(0d<^6WpjXJc{U8z^yZyj;B@c=LZx50t&(XeK)d@gA#*ppMdVkrtm?L-V20V#^ zA3G_-6B6eldXEF3tnwpzn7HUa_m%an%1fkNTM~elKt= z1{{+Tw@9f|k%grkskAWLm>Upn66CH(JfzcqU_;0|ZcMviH9qK206POjc~q!=ULk-W z79zf`sy5`ED<((czzpm*{!$mP)W8I5jb*o8B!mwxEgE#DGV4@$pwIF{5?I%J z?0_WHf;aA%H#+K{WHE1uQ4@^wdr)QO?m`+o#zqwMgbuv{#vP=2^y?IEKufa)-9E`y z zHyzi-3U57f`ij0Crui@?zRP$dG|_jhUgwQxCOa(~zc zZAC5P4cI{8(x8JDfVqUVTc5+BWI*ad$h)H$&m#3mt_lHR2T>pXpoh4taMz;V6E%sl zL|$qaBIVFgwlNhr)CCUBG+n7g=r5Jyj50E)qvk?i1$^oobWW$$sXRh?Svl(QscLkD(r%jtt~4FD#ij$NC_CT8D`|D;3J7e6fb9&3 zxl%mjKBm9HyS0-ET_8w$==uQ$0{-zUNE-F8gujD%JB$C%w2vY0TrikEVK7Y|BsF@_ z<{Tu!nnmJ0jMSZNOayRw0|viZ7%gGEQ_hCuLwkrHXEeujKtQJ~k_Pr}j0~)BT zf488p@&H_Y{QCpG2x0KyyimQ2LV#5q8f|`c)y7tp3Qb6qb4vaPN4hkamu`!MV{$5> z%>!&7V?2$~@6Wj>NPII@G(S}b@n912bUjEoq*Vqxl821sPZ?JpSM7y{}H*_cF;&I@QOLNS74 z4rmzq=~!?9tWoNpv{uaA#mr=&N$)3=b8(w#H4WKdw?!wV`Nm;poDhY7nu$1R}n0iJ3R1^X%7 z{`o*aoWT88T)n`2Jpk5bMbhNg`G_!3Bg9DIwxnK5hk`38@hWXkSJcvd{hZ-uTJ*WV z);bjFaq1{L9;mZmDaz$Al*E}GjOcT1g=p=wrxL3m+q8ga1YD-0-U5X%1gi`(;UN+= z{7NCfMUS)rRn*hmVYEUfnIzM}x`a%OI||k3ouiE>BVVg%c=G8EWSQr)BXBELqx?MlJCCjRp$(WZrh3dn=i7B*sASVoyH49dp zxGYqE@Ek2r=za^A*2DWNgMiwVsna%CakQ;DNV58XS_Ry}0s&TL7{lammMd}UMGa>t zpy8ksfbiTS?!D5nD_5ax!AQd^RIdO|euJH2C#5rKu10YZ1?a{UWmf{pfIAp#4y*w^ zj9sBb-ENFh*kvvMI9v)|V*C98I+&iJke&)yEdFW6>esLMh!%DS*0GV5Oehy>&kz%)Yt+|jDR+7>r z8g*V>gWJdR@WE@Xx$8ea2>bhkzVyhkM?XJ+w>ReK-pfgs@zqHIgB5y0JRC)&PH-%- zXH!5YcRj1sPkdhH+LeH?jFvn@*Fv*@qYak{=mTN+AcMP}@b?FADIYi#KRZqxHZ1=W z7hpB^4fFxe|I^-;Mm2S&;dCse1uCal>jH*q1lk}vN>PYx3MGP0R4}Y9P*Ff+I|0O` zpokTQQ!x!RP?i)6GAse2f`9@kQCk8E$|fj?VGM*F8lb>{IPaIhz4XkPbNXY>{PI^$ z?)QH0@_f&`+aEs7;4Oke-dlJy=`Y@vYmVa3y|1Vbb^Plx_Mtxvavn~V;?2f zt)2puZ91MHTIIi$?YB&n!H<<^PWe{v9rjNg^UIhKn{ zcAhK^67Z$>-M1}2^`#&#I&!WK-wN3X$@y=kj`9^j4w5gF}-=h=)z%(zpA z5_d_;`b-+>0(RYC>7Y19I~A=3y7`)=BNC@9Q>g{+4T3j=+OPf~)s1rHp8Ih{wTH$9 zUfsR~V8!3MujsZ=dtgdur~^!^PbVmQG(6&i2aO*AVK=pEEqhH#b*ndmJfVEAr|hB7 z1MNzav{8nrqYNR>iL@^2mNj6@Q#!tSd$fwRPTj&TU5i+{#KSMOFI~EXUxMd|-3P$$ z8QgDlv!003RDg@YL)QNggL_@%B`XydgXjGU6e8U)6z&xKcd=m%zpc-oDPj2X6o$+x zc;|ys`A=T*!8gJ?v@fsJbZuMa&DK$W zc=`or-Q$Zp25+z<;S{HKA5?ZCU`bVmvJYtEJ5fK`h<18oI|tAC>V=>}@jk}FBx0?y zxxQ+|U4yO?)!{>c&5O9%!5fVY#EZ~}8R`cwJ15WraJ8n{bU8S)m`~46#u-4l;?G@Z z*1EXoG?X$P@J~d=w;aN^;2}}h1YV_Qm%3scprm`R zD%RrN#r=FsMcbb{0I84Z{-o26Z(CP^tF;#GcGsg+;u!m=?QSJEJmXMI;1~zv3b?tr0#wo4Nu@if9Z(G;k5W z54&ynwlgaXIf^XG9Kv025!u>qd%kVt3M~PmS7i=mRXB2`>SM+9>>8Y578fKv?t0I9 za1>hKC0g@sJ#Y@N^hG}YTz_rc&|&?v8pT@ApW&OF)QE6s6{T2=Q$?}V7IL%E3QIR6 zB5~;`K`=hYCCQzRHYe9s5#8=O8!P@!SaSPDSW@#KlU0iUc}LtjN~leurR7jZ z?)X<7q}LW6jf&-3)%@>3JZxE9_aAqE9%V^M*#DxtZW6)U?9DF%^2~?Uq1S<)!`wI2XMD`Lm~XB z^|4^&`4R}GHb_sUfI5bV)^r4YyK8KiGav8{A;uVeGAw4ECA|O~m2@A2C5e#8yo$E% zLEB7+ICV%c%n1qR5Sfm^{ZME0Nw3g0(&0j+(E(|^!jjq}({AYdWwl4y<^@QH;gSv+ z@==z9e2ipCRgmCZJ`&^+nfs7NONGWfs^<|%#Z0yutaT4^*8RC{ZKOv9$Pwz2BWAe% zqwH!$Fl&|^QE2qbMvhP|l}Quz-H{^Z{^fB4H`mM+IqCPe@3$ zK_PNop?cAp-Hkx7Ji26C#bnaif^iH~MWvu9K$Utx|KC_rA+lAvj4-XS+LLXLf>nxj zcR&nj9m5<|g!UuHrDHKOnPgeWt&tmU$c+hWwz;!$NDE05A(NGh;?SHD`hM-v5gdoAFjc)xET4UL5C%Yy#<4hk>{+V za+{Jo!~sqYCJ3zPQv{W>0_eG*8$01Wfvr*ps!ud#<8TRaS&tzsHzjsr=V`X15L#Xm?`lXpq>Aj_iC-g~a%qQa44;Ko0`YKk( zcRzFKkrqzs&o>Oeil`JjEefmawMR>Cj;N=tP+Rv-kF>`+25hB0*oIVyfPar(tgXPQuqFG_ux+(E+yGB+K zHK2cw-Qvmq^LFR*B8|^WL&eea4eBM7@iM^zn6G*j%}kb!mN5LfR0*ygD8vy(8;Mo8 zZ{;WbDee5_>U`Kb6e?ad-{9lIbGqHLouPK#t4I226<;R^?5Zp5LJKeQ^?Z!rKphkM zTAPN!ZaF(G-tNcdsR2Q>n zzcXu|(>+5wW%g?cv!1Bc$#PLg-Z8+DwLMg4Nu3)fezDs%nZ%*8_Jpa$+XRmqKDs@W z`FQ#a$7z0Eu5SgX&NlCvJJ}`sh7uCWCyi1 zC|jpp!^>NGN{Mo2Q}1^k^9{{(*~>3N*3GTA`j@^{(0t|*S>twreBbK%g$rtgZD?|s z$ZCPcI%&<0icL2gf{q<`co$*)z0Fx8tSYhntAUJ#o1ipMkz(>|v#PO$ zGP(-E6MZxH;1CKc+CuKyIB|<5r8PqT&&u&-NpGb0mO<}?X)2$wzb0dj5N~7o6k=R6 zr;P0t#UPByP2M{)sn31gF-Pqy9lbMoxtP4;Swsl^Er~XqNbN?r2U%tk4b2OJX6W<% zLgJjeN!Sz6z6f{<({HzuG>_48BaL+>q2koap6&8r2>EsS=wuy5_n-1@-5zNK!m88u zw)>8e4Z=HRugJQ=9lVaGh4R*ty@#I`{-w48rpK6*Q{vNY4}k=U>C9wk?!nV&9LXfk z>6n0lwuh0ANhXuJ3J>yO1kH39_oa<04KlWX!I{AMYO3u1b^(LjWvLnN={T3wWJS&j z>freW(wV=O?6nG74$Ofih2*$^M#gG;U|9tkB`00c95tgDvz*b`IV%|Cq%*Hl#}vUC zM3_ns!g-n%0O7takM79{+XaN<24$s2IDYkM)mSl^@WK-kz_|s+*|b!oC(i0IX$L<| z6`{nW0ZSex18SywVI*Q`!8t?5d-cINt1c1l2ER!tci^q5tZvZl{Jl4$GeVYFUPy>K z2zl*#2#sO_G-<^p4?q<`YV&$y-sxSTHz2TctCD%#kA&`LnY>;8I@{cgMc6o!|Wb6 z?k|molnDSLwV^be>;yWof6EDBJSu+_?ZS)h<5I>Wm|RuR+<)e|*(C(XApk^JtT@wL zbURSlPMSd|K!pU}!^E3nBDVy3ddJ%c?<-PJNzX^&G>M$788|<|16~Gu&Y8$S9>9t>KxftH39B-ae5y~MTTAt+q{%2`X6v^b#Oge9F0 zh?7l;k2ab=A+g&v3GD6^%Hex*EGBK$J)oF8AU zcE%Y98VTZST{0de@8|`C zkvo&w%R~`oK%w2xbPNdi;urB6U(a1~r*U7)2fg13B5J;rj-tw*H&A`;FCnGFO-|ta zgcG1iNHIy83hsXiKB7A4*$Z_rs(?nG@T?j$ze!h#@mkaji)UIL&%cVK3HG1UY)kfg G$Nn3$;peab literal 0 HcmV?d00001 diff --git a/contrib/PanopticDeepLab/docs/visualization_instance_added.jpg b/contrib/PanopticDeepLab/docs/visualization_instance_added.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a9ce9f7f7a5bac7578d338eabb2b768cf55754e7 GIT binary patch literal 68681 zcmbTd3p`Zo-#)x%#yCulH3}hzFf*MTh7dJq943c3ipegAl9@^lC27;3R1-N3X-I=P zGaW}M*^#J}(wMPJQb`Is?TT$j>c2+6{d<1T^Zeh>=Y8MhbFW$ExbC&?`}$tr>wDcl zp8j|R7!v%0`~e6A00O~3;Kyqq0-w%^2Y`?e00RJkHUNbz1YqD11pEVzF#wJEV*s!R z9{~WygZ$U4Jm^2KY6CpjKga41fZzN9a3?10-M2S2VQ-R^%W6l!-8U#imU|@Z)h|GPaOt>YQW%d_0K>u!Pfx|BltpVCm+p4q!=xmWTbO;agDaE@1-_l z@{>Q8yTqpC=pYs^F)=l>vv)wDF<4i(weB9CejD-r0f9jT3Y8YVIUIH91L+4da*rI%J9hj8yQGv;R(`tT47aw9FA$2v66xhD^2V#zny%lt-F~N|^X|R- zU5Z|%s_*xw&-z~vy%~Nx^6vfU*r(55Ca32Ux?kY+{MjvV|FdWRqhCf~zn~f# zFbyqrzaY>9>W&*}z^$D$7y6L2Vv-lxIA?1keT!=@wdvTpkpDD}O?iS?yd3-5?vuJ} z^F8~2*Rh=cUp@PG$NsBd&jCNcWPU@Ln4mmRD3nKt2e^gAhN!pLwAlGA?O$8L{3cLu z*Z;l!0KWucVQIN$n;m87=Z-MMZ5pSK?`z+uVy(GTdtAmCtv837xBJDVP5#eA#x zN0)UnuRZ<{bbWkz<5PSt>g5MwaO*YW#UaMLW71FD4D?Dg`>Ba!DCQPcH*$%yZtt1o zR{ohkU!78|+-*L>LVJ82dx+8L4sg!;OL#0NLBlTzNW`2O4?n+j&7?GsUrjqaCg9Sf#4%gc@NO`kJB#(bTXVy z6+q8pj<$_7UhT`glRk39KK==Tfni}HPAz4#g-3RD03KJkcu@!^Besa4-FtO5um0Zp zxbRg^wF_v=hkM`*Pu0o9y!xQNhAjt#@E&K(mASEt2F9>Y-Bw0L@%0bWp~976uHjMd zxWFmlQ9pOhn)>iM=WJZJUX!)Nacyb1+b8w~tNxdC4g9%tt_MT$WZVK>RioDsYk6&D zZZkIjDONXmI9AfW_ia#-JMyn{aVgGdz5cjUa;L@brOaRMCp2lv==YzO2FFP~4y&#y zxRr$uI|cVX2059mY#OR?cg^@t*&Vw{f#ihqJak4L_3Na{c><@SEEzvDy;Tj)dn|LszU!4wpSWhScrr-qH2^ z+$ZD2VK*2o^4k>8|3=cuNn?xK6UPgazjb)pzc9g$Sm|ewdUjv1HMo*J2H$b{^_TF7 z&p0o{(NhUVCg#}_8_sS#{&(`J!0)xP)@D9bE})tGx{s&j^SyS%biii`6E$!(z14V? z(;2g(b-nMI3oPv)Uy^m=-{i&Lj;-fJ zEm-S-UNc*JnfUA9uA+z6w|z_wE-@4xP7U4;`$((L{Y?ASFoQC(JD+HC@jZk0>*QF< zsZ^ZCx2~{@vdd3Nkt<>5+{<5dV~g$*o^=zC#N0Bg`fV&W-xV4+Jo{>ndF0lws_Aj= z`9H5%w72MFo5xw>zEA(XdE-s1nedH9aln*$U**M!D<47}otv|amwmgkO*AE0N74R@ zn>685e`&D0Hc<2fxO}+Gw;dkv^+R|xnU(wU8^(bgv5XMA((ilA;l~-Jq#r<}1>OjX zcj#-_YF&cgF$^$jP0Q=vN+>d`C;;7_i@R_4-C+7?RN=cS$;7c4nmzpLn@h%71M#sx zfIr?$@eU|kz@h30&UfH{GzgrmbK~IBxtbY73rYIikC7aUGnb52ll<+ zJW{8GsDv}&>&ry(>!W75-9Fj$tB1$FOdLLIPsTjjaQFuRF=nKnc`V-cmiTbPuVELz zH?H+}_#K(|EHJ;r;bvY#`wzg9m3jEA-!<*QiI9lnv4*#{y$ddpi954|)$PtBXAPf! zJ@<|f(9PBOYpG+`g}$nFJ>RmY1Uw_--<_pn|Qm!J_+MwK|yRF zXoIr&Ltw!DLT0LN4brFs(nESaO1?hVfe%fcy-6{gazCo2sJON7JNC?N6n|M-c2V~1 zL*>f#so}V%*LeHG_1vPq}8c zekdRbBavk8)r=Zr(fjp{-lu^%ypQ9(`o%9X+_*Iz9T#V4?vEe7e5Yv0yJI3{Cf_I; z966NvW{NNH4zZAA$69};9sfQ$Lk)Cn#QdpeZJ7P|4Q0*DlVBJ(efCukjQ@7bYJCj( z+A7j1IdD0JaiPEZ1w+sJ;<9nAp5A~#l62$0wyF zX2-fI3jYnqegO2-9upx@6HleU{hkog9qDDwhZD-3{m6GJ`>P&Y4JK-QcDT>o4V+?j zI&<#bYePbiO<(YcEGGL^(O1#N6rY+LtJ5FNa%54ut~4ybd9A1%tnwtnB&b@JO;(v+ z=o|(S?VcR=syDdd;?MIrP!>K{^Ow~4hokay_at7y7- z!g7mI$ghr1;SmKh;VIt17y4OtWmoJ2m{qr+ylA&Xk@X`Kw9D;}-Z`n%&74hj$a*uF zT|6MoHyRWdZ(hB0)udFIZ8XHihiZnRA204PkO5R%1)IZ;;l-iRA#NxjT-B3=&0~$R=;5~NVU8=a5K2mfhzn!}$isL|{8PNH6>_QA)8tZLZ@)Fq@l$0{H zaKB`y)&bXr`+0%aOkc76Lg>kS_(4y_osNUZ5^8jDu%6vvdK$!!+PC{pGy62tg%jP*&j6p%Sl7?q+RO|n*0`qeB!Z{1Bnl;D1nomJgf1MU-)SQn((NQj;v(d zy*(*NNj;SXQIxYx?wO2~v9rG%5YbkMhOIu}965fglLq?Y^34u9c<^JRh80GJ?${3l zy@9nA-6q>oW>v-6k=k!V@xLrwLa)?+dq(@|E~_b5W=LByR}!M_7qkiQxR6ap0WdCS zHO5iLi~6AY?0Z=FK(1j&K-w)7lZ({OIdOX5cA{=ay-~ETQrD5u6$0#PdTC6=++}&v zn&Z2Q4$%sD7BAjI7sOS38YSPu`G(LI9)D{!X?hB~cbSk9%u#cSamCeveUgOf+E!!#vEXmH*XUPP=3pOZsY-(4 zN`rCIh@r-q!q`Jx(eBD>xkMKuf8}^LIh<)qEpAGy$tFP@ERH7V#<3qQm+(R#^eBu3 zfq|q_8oup$3C47FrF&HD+1C9Jk0)RIBDK5HDp*&gdMZm=>ut>;nHFq%E1jwrH(^Ws zb+C76&&uwPldm85*YuG7q3zy{edpB!B!$@`kGv7+r=${CarQ;P?){R;o_q*#QHRTN zGoTV)XuP3ZdQj-woTnF&aQ_<$@`>XGA-|#}{Pvd?p)mwPk2YA)CpLelDHl+#t%z*U zPf36X7+s=92NJpSk17!RU8-VPYYol#XQtmI@Y-z(J_Kqyzk1Dhp5#7G-iXYY4M-HYpyqoR3&>zQ`65d&4P+iyO)q{CkFM z*xNQ#!kOapV%4XS$EFhiR4p4oqj+2ZEoka+s^-F6oe*1=L&%Ebf#g9BM;Bu+VY>0O zo5{TUyh%Y=3+{wL6v=1YhZmRJ0WHtP9jR+U*0L-a@;4zoC>zCtmV?<10>r+BT=)AT zvSl92RW%!WjIdU#LB!&%am2*fqh?@O;#ILdgG{N5mNSlIi z@Ks16Ebx)H4jdp&n}M&;>Lw9}+P(0Ndn?qR%&)!R;a*3{t^YQ{Y% zvtWwI8`e67`!%mf#_eDED1{jqW^?6XenxL!iu@1w zgZxAjXseF`VluN&e`rKBN|9E@)OfPIkHG8f(l5@waC=A&oM5HB{RPY7e);zdw0HJJ$ zt;`1xFqgB(^Ofwh+{ogc=^b0+`iqX)ejAvU=vGD3$jsmZ4sfAsq(<_1=Eku`V7*f* zhbdlZVa#BXzG*=bpOA@?&8Zu-@ATg-U)TT@p|ZAD6g$?Sg1Un0ac zCWP_|xbf28dWoCXFIk|R79>Yqi~OA+eP%J{|?BE-dJ(#w;S}Ib!463LRuSY2~N);ml+~;H0i7Z74M+l1>OF&Yre>TG96eB_GkSUSx$97V{tNRBA@Zgnlt4&T;DWIy$AIQvZS1PJiL;=LZ z>xkSm+&FFItRpcM$NpRQx&_k>aovJ}JMbFsPiku$;%hGPM&EXLesfZ^a;Gi(hJJ+g zDDK{(^RS0|bz(aZmQ##;aZ zJS;{hNx{%7_Ey&((BrQR=S%K?49bp3N+ruXPBV}z@ya4k3UQAY8;IDXsdXOH9vo%l zFiO6gnBRQGPsDLI)nuV3?jmnL^P)+NJ(WJ1YsnfL59l}A!%&i&*J6rL)czxm?-~J` zi58n{Q*)WQsIQtg0Zd{c%h{&ix2BRB;?gw*#TVPckH$6}NX40HvvU`OiOW7s7t+hc zi)_jk2vrT8N87yi$=JcL4wy^awY1?=m{-?usR~hTb_9r`nn)5J_+QOZGlB0761}Rs z+>Fd8!OO-*N2l>twtHJbL~93yL9`b;c~y$2K~e z472$Dss3uF9%=7Q=9I`U*l!pF^$($?AA7enS{EedoLfeS+h@Ey69p;gs|@vKpddf- zy!Iyq*Ax{igyO9gMr*b*2katmXQ&8Qc74B|U?f68ZQwh@IWNb$<9nPiZW4^!I4;kU z<1qLW{fw0Bp5Zo|K_hTFTSK&9|d(Wb-*@t z1aV`qKtGzH2OxX#ST2wN%zE~qkSy148`O~*>6*BtkpUj_U&37+PL$8iFCq4c*My+ zWZeUiKY$(CO^LHXF`8`nU*PVBN>*&M{**)2?YbQuj;iCF2i&rQrqZ$c>JQ_hU)rkB-bP}@l=J^67{@DRS-JskduoYi99BSXI}Wgay#(Ki8F06?=vc`z*F>@&JPWo`eT{3oH%eoxAb=(3 zbz66sSplHh2JW>Ht}8FcO>)s zB1Nbv5_eg;DnP%?JkS=m!85!FmY<9>Uf!>uvA)xz3kCLz?Eco+Cru$d zI_SFAsCMb_;9yzhase1^85N?_{WaZ0gRx!V$fv9W4et?J4I883&R8C4g0o# z#!zts)n({{p!>*Iz$3gvMF-gHuX|&H{0GmDtvX^(J!zhw5zktz-F|gTV*(tA zMzE3n9^;f=pthV15ZD_ByvOJ$h%N49A>GU_hUFB7lI+BH{ta7rK z`H3DRIEA|U} zAb2}#EWqzg?e^d1g-EIK;sPZl>I+sdMktVE8%JFI0Yo*z)em&J_LIuF$cC|>dy_lj z=dvIYSHp2q5GtPscOC8fBKVYum|zmij!8n-{PLZ11f7JgJ7oS>J?Z%YFVzJ!EdvGFMy&V(Y&iYjgoKrb~D$!94Sa5U;n^ zuh?_9JLDRuae^He4Zjh%79XRxuzj!db_836W#q{SHJe?-Sqs@j9f=zY1$9Ipi5?OT z)GKNn9I!n{g=?Anoti0N-1rIpYMQ?Q6gwgR(V%R)Pv2=x7J?y49S!l|X{ZB@&@9uj z2WVl8)M+8Fd+@YM*|^l;IKo*JltABO3Ic!c{n?EUfBygo>FxQF?(KOoJ$)T#c1B(5 zUX^o4BYKF%WNg-DA~aSffQf^-W+BC+lpbk^1=ewAjH5E;WVU6M0>C#kE+)h5(G~4-ZPa`?<@%rg66lkLV^v z{i<>anr+E#@qdUoY9Y=~+-(+XOnE!#sT^w#__*1&uQKPR{+asKS-uI-Tuiy0Cv~SF zY>RCcGUArw;$Dsfg|C4wz;CHf4>- zP}L};)s4yio3Rj$Tzk=p!)Z+W@o<}2j_$t0Y3G7NIWvZOiM1SsD_R{i(Ai77^E=tn7YPtKnVdaX0VaU8tmOp@E<&%`7Q{vA zH-eY}r|WHUjHHMj-YW{5m4DCIZIBn7P;ZBZ0BuyH!U`OoYA12%32ZMC1b^A_P>f z06UOnA!%pULaq7-5C~k1FVwHsiN?D+&U-Mv9WFqIObvcNcQN>sD^@S3(;^Fne$Jo|Bc+ZgZ!JsgA z?iar`{zA=|ar)wO`}#5y=$XBKhNJ7}tl#T-hP7~sEu*@?&{+Zl1%>sMl-=k}fGibI zn>tkI>K}&OZh{JT; z(q=^5_T28x1Go2_JyG8TB!{*;Z;?6kvl=a+^VgeBpabZJR*EE_*8^KntsGbs1})Dn zSP%;1(n&Z25XKDE>H~J!IGholdGSHtQJ4=N$$=o-G0Y0h3+vOWX!gs=kQAxMIO)uS zWKwB|)p)SqmmpBdk#~-vC&9)7zksF24!ZgV|7Rn*ge=#WALQ-h>99IhZT?co4094g z?+dxPhsaBq(F8`LY01MH%FF6LSWD|g7X2=~yvVQ{Xhw^RG zE_&P5^Je1JAH#Zo0J6!|9~a;YjdjD8`|XHyUXWl|@zyGuVyK?O>$jBp2n zcWeLhC+3fPB!A2TH^etp^Jy{<;aTuUyykLM@vWWE@mujkM&cLCntL0mu!Ck^{!{>Yo_&f5}_C9sI78Kxh}e0=62evXC7h8*Zj7E5Ui!q zw|m>K;rXraut&N|4KrvN+|V*TN0S5rmNQQXNPV(p&9nge^}_NZL$f{ZQ^Fo7nXSH? z4?3(wJCVSf@$jLyL7+|={<;`imWTqdyrLBJLjZ=fdeO?m-|O6*EV?$(VUwq=C2;di z$Li*#wr(THD1_&ym}VNTU&RFFNGTE$#U@nA9Az+@4iixszm4a|OgP-PrmwCeXi3+r zbM!c@Xzy$3Lq~k%*%UuS!wZHsV|+956I+X%1wVAsaxI4HlC9qr?Bdybj2!X=r$yQaxi=eYGi<4P^;mfB|n;VDjR!taza zP4Nr-x7f3+8x1DgiEmPh$0ILAnRMzgR%jC2TeNjIn=p4oUXKc7qTV>W!GWMe7b{tY zNO%1fT9+XlNjsFcW2>iGwA&EvnLH(Ar)5GMmu8A(rF2HH*7Y4p(lDQ@)o2}Hb^rly zPfflhwxE@y=zqGT?iL(~Ri~Rp7qv9YS++5=moaC2HgB0QljM%#j%d4tZ<&h_1MADS z=TMLk`q5dHL-`;_BRVyBqY^CFgj%5W^gf32KW@UQze@)s8z9Mm|73%hwc}(ec|Wq^ z397bepFn7(rW@ktAK4|fw(q*_v77KnihAD8C#%$~-3L1vj|8V-Z0VlojldRN13xak zX1J>b?!F0QzIFp0p7H_78itK;!rfLU(dt$-Dd2mfARCTT0zOf09VZ(M@u3FSL$@`D zkjf+|=*?pq-0UT`$==LJ>ZMMGaaw+SHuCK+vW~E-^9c#L=A}W`qB|THKA(%JCr?SQ zf+$KqEwetdEQMEDNBuLiD<=`;6uW$TC%BJg3&O7Z8M&<9f*s4mDolEVLis$zJ5X@W z00C2Pb7Et(QZHEZK>ZVN;(WuL&bWDQ>=puGvr&8~MQ?otgVj!XepqGew0J(uC143;KwfX zT}?2mKKy1lllk>Mv+&zZ1KCz@J*O$>bFzT2EP0nNf&i-0bTwF_uyPL21!}lXXeCFk zO-J$7-WfK~xM%vY;(}^H+!wQEuAlFdAZ~CEw7Secv2k~GGHvHL&KT5j`?wjD(-NVz z5)4zToNll)U2m|X!3Y=#Wup`3L#+Bn{;NT>HP2k6yyt0Zbhz=mtvdj>)24N77o6r3 ze}MlY&;Sdu!zvVJfKRB94F&HhUfjcr9ie!yds}31JSL#2w`rmO zofOQ2$1RTOu9mGi_3N3H!IgSDJwyjZCLMS-T30W^@AFVS?f4A2sAoC~A=y0q)*q8S zPS&_oMa#BYKqF@N2?N{F9z4WE%Cq|9%r5CSbLyv(B?2J$2as~5Sv3+PSFKD9hGg_) zC@#P0*HHaQH?=jb9ob*}=sj5fF@gSUD|B$;TS;X%&0gTQ;9~xThGm=*63dwW^@q>{G-$6W(Jlq~Ff)+ZH#25?3uF z&0VX8r!($0^vgRK57fMh8`B@tYg2_wib!^+iB zJ^DW>m@CH>^(q$317qw6_c zQQ7lxoRfNH{rilL308q>vCcQ&q632O);M!64rLC?298#TL8WuUAhsW8=Rb!b6sVKq zAHY0D(ZGx~f~jmdi~u6C-fpg}qaDwB>*EG0*t%Vc#vj1u$VSnw@6znWXuVa|d?z_a zp2u2ojs7$Knz584HQ3g@88^<2{QwYx!SkY-Ten#@V6eg=+rxxr(T>W$z#qcOR@|5qKf5<-S&m`pcSc?Rh{d{lCM!)H1BGlD^nMyfCgsH&|M5z^)FXxzZ zyJ9Pl8{WRkWPbVwtoGi>;^^K>l=ml(hw!cV0uYo0pth)na*clu=5ZZ-LZ>FPfP3ZL zyL?&#YgKn3P7k+e3FtCNj0=##WY(wh&4-n1wMObO7=HUIobfEBXUC;YEV$=-bBdvH zTZgmEd1}3iegqPnD4&Vj|1-q=TLC45sd11$<2p}^#EoYN#E`H2?ts!DE>Ma+BmNR# z&$8e$m~KvWF-YJHw!TV%D5Lhk-gh#df^*f%L`2%89v13U+ZPAoS6{mUH`9L0ZyPXC z3g6`g7a0-x%Gh)HY=BXd(Sx@d$C(w+^*?R`H3p_Kte?02iyaeE=8Q?d{h{0a`K$Qa zwyh|jID3>>Oz`24upnH6raJoqB`|;}OY)?oa%nuB!WYs2Nj?8t? ztP6283G-5pTER5yxk(>z3khwP2(av&+%2iy{N{>WK8vXwE0UM4`RcY%7x zCa8_b4F^6+m_@q98Ygoj9yM<=^nmzr~xG#Q_kL^mE=}}|mzcoCx0fFwPVCheS=%Hq45ZK(CV9~8XAsK=$P@D_? zSLG#KF#b1f%igIxSU(&N<%EL)>D8nxB9{M2;aPA}u zRga3lBOw7hf=r&iZb2u9b~r7(kJ{f44}VUMJ&lAc?oWKE{JRdx%6%Zojd4>9l_Fb@ zpHFy3iOnZ!kj~%L8{M4MVW z+sY$7urGo1x^D9x*fsE3t?T?&9t_(Q*F)Z^U@=&=Yq6gUWFE^dNyJd6h3s@1QhtU9 zZx!D|PRL_fw%RV-EOY{YUTp+RKK9AD1tIoDjrKY`fD3fHukvJV`0@K44eox2idzHF zf>(d>kE{y3`H7TgB8j!d0)IuVX6jGvzHzw-BY-XsZ|kNX*Z<( z>t-5j#7Z`4KT0qPE)L=*M^PLTAOj>hMW4~y1IZ_%UAj>`Ez2_MRK0^B+?&DTyn0GT zgX{p=d%6Vf%pp30HMXCr^%5*kL1qJv281Zb(GU?r1N}JTY}N-{M0MkifO}}j(n(MT zoS|hzBlF7hiB58IaS(1{!1U2w1O8qus#C|bTpL|rd(TCY#|+|3=Ems$<9OhxkRth6 z!lP5bDbbZQD=ev6KPrTw(F_zIT0;sEi3X7nFA$F|S+z+>Avq1Vvw~zLNnEuuxU>W8yAN6S$=d&f@#eT zyQ>%5c<_=CQNnO(}A0DFa zc-jknl7z9f5<4?cMr$ijdZnb;DikzCc}DnU6B}>|;zp5sU`bh=Jt9_!mo-oO28qkL zOT1j%R{@$Vz>p;nDp+A)-g$PZEth$BK-mfbp;Jx<)T4f0Xt1+p#s~3XCo{+?44jAv z1xcSenAKq)Du`M?dfsWAq^#*yNItADW4dIsaqjsfu3u>f=9CA|FV{#>pzPPeLUvfB zKxD2a=HXWEu9ue9i*2$?Xq)nL!#6?tBw;q!kaxQHy)HgBB}JNyj4vSqB+!J^m)DkF z=fS-(m&U^}&HIH=bxp=mpsrTtsnfSVNZI2)!XE3V0_f?Ox;P>v$Y4CMKGb})Us=~V4$ETBu9`#C>1LJpbS1ARh3VS$IQ6sLTI(-P#b!U`h=MdH)}*m4Z4se#sWG_$c889td3FzrL($g^&N@BHhHsMO0b$M&%uJeP9+2BdOP!hFh%^ zzroHhCq-&I`+R!#&kTl~PD!ELOi8pvmZ8+x2-P| zkh?7PR$(d9;+1|%$`+6ap%OX3y|AvVM)a%rnj=XJ#o0@j{^IFM^xrL?7C*O;gwYv1 zP3noq$aZq)x*p9ctp&M+&nZZnW@$Ah(0RfoP!Db~$54`}|A~hGw_{k~D^;dYw0xwp zbR2j5>N!FM>p1U`>p<=JL)u44e%uu`bc?BdG=8d zzu~@LS5atb9@vVZV6dntb)TVhTYw6#@SP&>j%WR)uYXIvH=4rIhf2{4^*M496|@H^ z^1)jJ;+z`mD9f((oeR!-Ne433-}ZlL#xL`*-M7zLX;i6@rlp!iC$}7vx!xes0{Yu?;-CKj8e9c|&>FCWD3R*?090I{a~^IY8rtdMFULCEH2z>VT$?Y$C$)jm3-iuZ8E%Geiz`NuGz`8St+MiXwk8V^Zv-EGFLR~~;+ z06einl66T6RiZmc{*SJ8et&_Q3|5Vk0mFpE6eKLImY?}5HUS;wZu}^2YCfelv;&1I_%kY|hhLv5o zUWb1>W2lOq%3Y2H+WO>G-#=uPRd}jCKQ9gt=x#Sq)T>U9(!_@Iz^0xZ0rYFl)3H)o z_B6_9u@9e*l;?qP2YP?hg=j)T9TMpAkU(0oJveMeui+CyzyktY?Nl2bOKR<91kizN zD-w;v8#cTE`OzNGIs`*fWlE~AFs1|rrH-u#QDXXHPwn#cO=E$~UPt_rC((17LBtwP z)*H<|p*<#CS@r_Ss%rA-_V{H>OL*nFbTQ?SCES`zr8<_ky`a$kfhd5od>yfPG)(&Y zs^g!=!ODJL9rjHh=-jOZi?0jI+2vLXSP9T6`Hm$HEs?x9Bo*mSk-%5c?eaIIlezx0 zx2qW7s?b*HbbwKuQpFXdr$fcM-@b%e?de{b{^Bvg?(UxosFgF*_r+0ss5w!iCjEYW zUws?1uGv)t+=*q`{}0Ig_j##7;h)wy4jRqqd!TA_;=I}^(ITAUZ{+}klVf{7Q6%6S z?Evuda)8&H0?~|ol4{S)Rn6L+0f|A3&nxLjxGewe=dX=^|gvNJ(fx(4h8r6ZXeT*O0aA;hrn4azC{tM7mGX!_Lu#V@-X6FG8$zNoj+P2;qM6gIuM zAu3J0bb^h;&b3iGrkSv5eAto~JPj~;6UpoV>rtXv@t0GWm5cf+3?dP8{l33Ewr9a3 zj>0S%z<{O7E2+ePjANxKKmyX^EpR`FjB~EKEu-0V4w-V)+UTawqY#~il6&K1MUO_j z=eKKYAZ4uWn1+s|!#=H4f?8{TaMC!GJi^Nx&qM9sZ%3JGUq(iVMJ%;T*znKHIR z0{wC3h$0oE(wykrT*XCh?`7^YL3YK7Yikd>RUx#swhq-vloO`nGf4?(XZ&vVew@BO zGbzOd#|7Ct&*xvv9fG&VW1DzdibW*TbqF91r*D4>-DaZ9Swjc%@)w0`#8!g{(_qLg z!h`1!tf@f?ZB>&TaDdD!BVcd>|Kaxr@w65N^;P1n7PG;cep*7eNe8GTsfjXNvkI=s zvc>j;o(pyA4za3p2gMTuIuP}V8(cjJ=|G$JXn?B^XL>Tx$sGV{6f87!z&3#HyX@b5fVF`i8a{723jwNvz9SV(fWo11OEa$ThfUk~(`2jvV&fxbgN0-Aj%kx#-BbIt_gi?Zl@E6@Qi#9;2LA@u35_3+|m zwT*`XaY%^XmTk>kX}e~FHR$PBF)0OCQy6cZ&wygo1tL}1vSw-H?qx5CHn%cd0zDN9 z6ApO@wB>-*$y`t4O>v0eE)u9RBTWgP1aHj69y!GsDE3e7V=V&ZrtZ~n_js25-e%g^ zvt`ZQRGy}ST#Sc7!nd%5^$2-DmmHX!y)BQoSh2RI{?ny2G6U%It#ke52I9%-3A4xE z=;H>2YU?7*_0e$Nhd`ZYS@ghG(+=<7M#Z3Pf-;$W<7yXR`iMy|H0(FyXhBt1Si%vsb@fouOU9WS2&!{Px$X@j&eYJ0u4nv(c?Y81rpI%+u0-=^Jj3 z+!wN!4m4h~Kzmq73Fruot!f6sikm{l7e8_eDhe=OsRQZWIk>FKfY}3sUYWT2m>W&C z%;C1?cAIusFRxAj)S{@?8v3)sBg+U~Cl0t9UvxK7>o)%_--OJMUhGwUJ$J*5|K|6B zA6$-jD*pC*bmOgbB~rWcktOH11C5&n7AWZ64gG~1`gt8t)*4tbw=9FYqKEnbw9~un10#DmV&>bWv z$%!x*{Dy?OXrA^Le9%Ske*js=@5Jnzt*vxm0)aE!^4da)gu(hR<`s9(a`Jy?dVXNW z>f$lr4H|7bYGav!zr7Ow0Nxf$Y-V$cjNIYJT`fB%CEGrl?XX&e?3{=~^pa>N0i%S?~WAS0EQGAQ3M$AIj@#!KI}Bs|FIaqdg#raj1bQa;675XtYR+08#| z13v4cxxEPO6`q~#GdG zUZ68mKS=WWJ>xhB(1dGjCqD^9498ZMpNcGZJVMAp`Zf7sNmQP^$&IuOY{P=2V# zy14+$E$Tc(b@u=ltotN#K0&;h3BM;u7}<#1wR{BJ6P$G5!V)OxoiQW@6$h`b%`PER z?Y9x~o6UgBfx7ncp4z~X#{%`8Cqa%G6~s)9O%7*0|nYt$^r;Z zzqZa9B{a=M!G831gtUKjV}eVzrpXf$-&MvYLv_zdAJ@x2C|lF|eQOJpA8xa3QR~QX zNfWjL2>}YLGbn^ARjnf$EN~Qu+On6)tUzB|8()%|&0bkfiGfO?YrP})W5^X@fgKHxN-K7K%?K5T2I ztfxn>{_aHT{ZkIgDCo9}=yU-%C4f^64uq4580shvwoi;{-zS{}ZLna92{JLDe!(=R zCG=KGwhB)fv4EfO6Ei8U-$+c>BFfMBPIjzQ$~scQS+6^df05<*f%3XbGH(6HUhb-%d#%qHBv5PaDt^-%g3q49NPo zQ*{|LO5{KfykMv_U7QLQOHL;V1~G;QMf-&sl^xbej55;?h{TSWo`Y`X-Su43{x*AJB{N`a!B}}9DCpW*WmoDPxCBfCD*P4s5ZOEB(*yBMn<7UAGSJ=flmp&5S{@YL>8DCw>fL{~i;0gMH!#0*9S6 zUb!u;76i0QbN$iSDPeCRoq)3gsXSnu^1Md6AKVEw=0@>rTR^L1y&I_8%RT#kZC_{x zoPU9$FBL$u_0>TqCP=*`49bQB*1S}oaYCJa_ToLSQGTU=((dEQsw zoQQ>NH_V_WwsiP5P*p{K2+iY|hubY1VY+5GkfMzxFmW7&Tq~?NVxJ1gT*{WrWWf_{9rGQpuO<7v%*s3|z)I7I+zMt=NJiq66%+Peq z9}OeCulu^L^L3v078^MkmcCXP7#~MK16Gs|x8dQpw!UnI^xQ+TIY?Tg2n@u$-wtJ0TmU%0e7avD@k2wWq$|yw&(*^uh6m9Ip1mzNJ+Le<>N9uSn zYIPi}&tAvr1O(LXd&c)%j%w5w)LnctFM1H(GIyit-1Jr@bam~JxkozbEvB&r5@lf= zN{s=vQFoFXPo_LxC4>+RtWA5#em4mU&hq`8U0OyjEy_kp5w?Y>&ZbFm*^NBfmr%H% z5pzle12hK-E0Z~*aZ}+k3k#W{bgHG3uW=C%RRc#pUvGZ>8vSYPAt%WvY1 z^HQbD6X_7&qjMt6t`1z(mZH5#uX=aWLA6AbbL1)O>(0~^6Whal4d|)nI@5DH;0V06020Ef>qN;8~o4#A4v6FjR3j$_H`t)0+!AR{7}lHLT#|S!8y4k?z-00h5^kEhK<4j z;|qdMgnP4X3J>1BqK?0R^d$0}eZbOE&~PpV9e1Y;ww%MUCBdXo!c+m5mCReZ7i~sfcN+mD&|0jL0ygfdzf=R>DRfliz4voghAxBcnD~`zHbMw z|7pI!QKFLXU?CK_Tq{x>Y>d#cV?x!r%!7>-a$@PyZ;2;XzYsNIpz4(aYb!r2&Q2SA z&B6D68W256f~OzC238>;Bk9Jo$Tb@)dz(IM*P}>J15r82oMNNuFXaEc^QKEbKl)7% z)3^u6*$#`R_J3cLE$?}I-EK|sRo|n0+SaCqJeNX2Y=@i8;S}oO&kugGf`7f_Yn%p6 znBT8Yz>Edt!0EU>e2ql{fOpk_3PwyIcR$BjnCu?>$ug;*_q!c1-?ZTO4wk>It;sFx zvCv)h@?PY!jPKM-^N8-wo-C{-C5LQzUsM`wBt8#gTlz6kTflI+_!UDYKWn7nFGD-6 zs+YTlAvhgVb5XF%fJcbhiNBoeS*n2D*u$qp$vGQVZXyggGPUVRmMkVK#s|}s#)6I4 z7FY3?sCJ9%nf1x*#>cDw+^--&d~aN(ki2?jK5aqR!8b;3v5o#Y;r!5&fbO9ewT-O7 zNC%z?2eNa!w3FTXkl?81M1WZ(uF00(b)E^ktL0&CJ43&6u@V3PFIDNaaaTDE>KRu?shao$Akb0WVV3=@XidtZlkWtlm4Km zakl~LREoOsc!U>kCNZ-0r0sT+C$DRT?zG}X(RQ7thl3(%w49fKR0k}q2akCxjtn30 zNq`x(%M%4p2?;V46M#WXLlK<`NV}onKzk@Sy}((_i6*KrQGbhgzelq5{|~@houkbduvJq~%HlGDgC;1P%KwlygLA{9Z(Z(}6+NCE?pz z&(<(=*|vxsJjVUTTMStAaJw@=sd!ahFwmQw%mrOdrtb9l(507#yNWt zP_$y|c(?#ca$Y42&Y}b%AZtU7vpu{3EOG{H(KbA1ECswl_1!AKv}F(+zk~_}stp0* zU!YBMts3dI^(Zb0P=a7n$N(d`S+J7I5dwS$L+!Tmp}13)_Mx5;>~vHX8`dR3CV}Xy zS(ns%X*ULcGbypk;ssm~*<%c$v2li8)mBxvbKBEHL}h{d_JZ+d^UCDi9kg(KtZ^3g zW35(R8)3zB0bB;<=8?W_*S=P+@lpPbns7k z7Q1e)voF1ZB)=$Q85z;Y!1SVBqu9Jv5e%zEwZ6g{M;y(IL<<{Uh0Hha>HKd$ z^?&`Yo5@-xwNQ^w?VH3$~MI8{)1k+jF*ABF;YB()s@+r^nuf5k_z)>#%uStcxodg<;n_ibN7CUtaxx{&dciQ+AU(hd11ScF`(5Xy)<1Gj4Lg4zuX@hc zSR4}900=a1K=tKbz9zE>L}>J^!U*0ZvW>vRI4nIti&{T2eC+@kdSQ>-wznOdkDh6I z>^GIOq17R%pZBQwYvaU#wx&QGQ!NX|SqfBAaq2<@R1<6l&3~&ZA=o99PB0}ZrFNYR zm4FnpcWatl!d5#A($Z^biHvftrz``gxDixmU^0-z{lqv!ydKij=G~H3;~9RBGfA zH0od*0^Vncxj~3V!?AC{zy8W2wBGG5KMwEB-60INel}7C?;Hpwz}oYKfJr#T50JLX z7e}OWln2HB4j(eabDklLHA?yFi4{C%We|Ql_bXpBh0)W3-CFEa&M?}MrYou#@E@eH z>|yE7e>sOb_H?*ZU^=6DC|3ZJB0!y+ERq8+g%{=@N@NRxqZ-5srB*ireglQxMMe&& zSg9i|SWH0f)L@JYO(}NN{T`&*|#RftH&Sg7CLPxzI_iEh>Gbu z<3{<6YEYsKZL_CdaX;c*)ymnnP6bBvb3>Qrkxq%*3QoUW)}=iNH@EL>PKZl>BH`&j z8C3NY+!5N}t@)w_*~YXVW9}n54LH6IDFdbLMzI~iuu&BNhyX)OWNY&R!W!5{96;}? zFvKLRPQyt=EKpLx4iPMyTYqaSOR$Jn;WWO+eHr@VHtzXQzUGk^4W}p1%+RE$!nPlm z{R_xAG>|De)`}=tbWkQ6$^`?frPu)+{A24LxB@FJ{d`zYc{#P?@F^d_uk4b zvP{nRL+?K{YvE|Com1?li)N{9JO&^np7(g&2B@Y=i$lE6W#>1FfcAGvji*f>#hrBV z+djP)nC^_YDld_Wjx89Efh7RwMP6Bh`;j)QugM^coCwxZGz$R7VBrQjEEp2`fJwHd4>b2KI4DLs@t*NMi7R5(QCV*L|KEX@evRQ6)IMsx3d=;_$nMEUt0a)W>t zOBTNfq#VxP|8We$cz^CuNLv;NkX5N*xGeBV$wpmV_YV)dR?AEHWqJ7@H)!vm5Bygl zx5im6P?bbWJzvU(m$5asCwo{`Ic427CaKaV??oi6LAQGgwcVzqGR8>-snatiP|Id+ zIoy19R1W%FD(b>2j3P{cVFNV?lqWqpK4KB3c@<-GUTwu=yzN$kAj|tQ`Z%(M32x0KSlH|;xd0X^6U%kF9 z@oCyW!`tT1u-do2Xw_)Nik6~)QT|@10JB(P!B-xNXnfrO&_ygV~^YM zn945f(rZoUSY4A%7wmG%wRg3Y~+J5Q;9=lmo?!%L8eU!Bu zqushyJ&Rd+KG(-2r)DhYZeCD;zsR|uoC{Px6cfdOfd9(^kYIuUB*f!MHsV#uTyEW!(k1zr znjjXTwcRQQl4v$ft6o6D5l}|3s9raV=1sh}hrgs6bDMtlveOJjGtCPD=|z2&?>fwi zFVe%u$5cUxn4~ov9z;u^?!MkF$JgF_t4FW#lt6_JSdF)Kr|`^FQ1j|I?t>}};V3hM2T5`W zwzd;*iJ@M^K*^j7R@>%o(Fd*CZT3i)2O%A0(glpgMTvBB+mEicya$s?qZv<`MIotImn0X{UQb~NQ8RuG5P2zN_X9(>xg_Y8A^ zV26nw2q`0xLYyu-2I1)i++|^}V(F;nFi~~kn|G+Xrl2X(S3&22D^L0lM4)^}iu=bM zZnf2f!g8I%R5~!VgAnw@rR3CE+VZxR*H2FQ5g<|jou>agl#y_jo<=~;9*JqRt9ctV zL%KOsR?-S}N=PoK+x(+Kscd%l?8USp`}a^yYIS;q7^hVh^7eiVH83<|z~hmMqzRUV zzB;PDJpy_kNaK+p8AFEcn#2GWt{rH@+{7aCpz3kCzznrQ=$%t`x+4ssSdiW=%`L4Z z4{s`ZLv-n$m=}!s?KC^qJ6W;HjG>=h_=uj2BTZJ=9oLp?`!V`rz^bQT28X`J0l?Cz@vj$iCVN%RV>UzXjwLo22t5oVk--(bHEKtB z^{w@+tdT=Z#)TE3y)!|zZDCwe3?{Z6C~iMRYqRG5vDx|%YA16FUq67!&5*&Pd7lv5pO z?_>tpsAI@PXIButq{cQQ2n84j`8*vD5(4%c?TWa$vkh|giQ1IwlZdJben7EpV4}ZL zN!V-n0lPevU?p{XH%oMT24pj_%;{3eeASNh7uB0V4?3`-GT4GfD-BFz!GGMX{7zJz zeCr1fR9u^)$s8c4%12p=;Vp|tB0UKKX-x!=YV;7`sUav-DqzC!s8ld3+KU0y@cUd` z%raR#rlCQ2{C-mkczlIInITXT>o{p=095pVa)ozEvb;c;Vu|&a>fzyFL95Q%kyXwE zu_$tma~NN3L5xc9qHuO+0e$`R`*0gYxC%zmPri!BMQzyMXqs3$hZsLZhZHGT{_>g< zdwnB5k+Jypz(-uVXfH|v?9R%62Pdn9NCRa<=@O3frcKzwK7f|gv(VseGx)KTJAP>E zFv3kTt47H+Hhi^iKBqJ9|% zEL(TSCz=SQ%pCWBz6!>40RptX1wkunD5B(}6k?!006K%mi z(RQBE@>me!y9I#7l(C6pA_~N=oI|vS-P7#o1U5*f`HIYDE?|BBQ(NL@VU7RYimI^ID?9b_Gx-#Cng?autvT8M9 z-8<18UCEXz8G-sNt^ayyZwf=ahCeUfO}W!>*RnzML@W&KzrwcN#+_c|?_69Lq)fdR zLc|dVYWr-&w6KSzw!`BEhTF1GYyqf4J5bA zW;?a7A|9=CtZ8=NS&)L*gN$~wtm({kL9vPjMSt4uM>7A68E~>`C|%0X*6ifNC5c8w zO65`YJgm{Q3UoGXv~{6S+DBdQ^;#I5=apWf9Cr;L) z;26Oi*>0$rQzEdq1)F{$%555SWDGOP-gm;q*v=q z7K>Ya>y?>xUspl`CCm@gSCtsS&Fvrd^?VO48Lfu4ms{V*;qWqN|W%cl>77tb}M7wkvvVavgK z=Qce-Ww9&&Ju{LmdPOmtnvt_S7|BaU;p%nqb^F}aE0 zEjbc`7QaO5m{)e_TLeb}Wb+qz)EDc|ANAboDmGPMkKZ`Ta;1J|?$gIEpE~ju#0ZdX z*4sn>+Arzt!KZ6$F%giO(US*=5OMy1icz~F&vZ_lWL4=EV-MRi211@DB86TnR5-Ri zARW~3QtdT2q}MgfXbhsEydDUkjhGghk8ooc13FaHjs)WC(5Xv45xJJXydgB9_z2<# zIqK8uRlXWlx^dfVaMVgTxGvumtt#2@cRvbzrJtZGLLAh;BWsWR^zDp#Dp zFBlGCbvv;SP~QC?g%+H<@F3K^#M8Wj?Ue%-uJ0O(XodETh*k|e-5ETALG%kiZxWaG*)#FNtRE!L2GG@jW0+kj=UHtKa*83}@p|4~r)BLzTn^NMn8ONKVSP7!$>HGC*5F= zgU`cVzT&G;r(W6VKE#PWjo+4yg50Pv%OZcMP%47tCE4~%XFtq@bo^wdyx@+V#;ZBL z1#RuG)`Ks&;BUyEH6o_F^5`mlBR z1CA@(!NU6NXz8W++Efy}!FH|1c#aA3WJcJ(5Z17TKIb8z<>OqX2gt^$FNyFQu0IC- zc5X=hacQUHlX2`$a&jEzh(~5>=Xhzd`a16YG-9mij!H#v+?}y&Gqla$=A0|of)lVjFVGJ~+uSZvNe>Jcdgkf^+p=n}pjS-E!M`#M~<{D2(F zUPc@%Yt_0ml|MsGAg{1vucozAclElFTe^z5PnIR9`;&cQy^m472Zk+q{je z)NhWn5}^+l4yx`@v7y>~c#4BJ6!YZ)wG)BNoYl>)DfXYANu{ZlGcpss^k9vDZc{e3 zsdudJH=D}f&tjMQu$|E7Sj?kta3^hf;-bPe1Vkl;43ZbHMA11Ey@u z9LNvDCpl@QDVY6O-QW7_VmedWVo{w_8nF({W7v62(Ofemv9b{iED-{+KIfqGw@Xb~ zie(wLSFdJXzM3;b4-wz>+Qn-<6G!AY!F~3o<3S|TCCLj}#HFj&kho7xl&I_JD<7#J+Z~1r&>T~0+ zg|p*n%rgZ{Fi)&#bFUjM9Rxm+9a)((gb$(W@-nQ@4+YghkU}*h-z%6i3rsE>J*v`5 z!`~`8gVV;Y7!g2&b}2mM$pYuuywT=LuW`Wu0;*3&yBirw`zk^vc(Ldt%=V_Yl^=qt z{21+h`!O-Y<;^pM)wrHmH$&-)7Tk4VVfmQ$V2GoRq1(FNaMm9nRH?M@Z`M|k_=nE| z=ELQZNw)ocXOg~bOT9(4jGD~tEF84QXvO^Al$Q?q1Na6g+l%>q5GfqkgP!eEQV#F? zIkfpp<^4qYNR`#Em!fYeD_=F%dTDDpzL{fekqIw4l>91)0MU&ykBd>k3KAhR$_>+x)AOP)PQngo>O{ z?a!$)z%czJ1Ywuc3Y>X6_egw{brEcbr>5&i+IH3lqSJJuOEO*57)FvT+GbsH?bc6s znq(&e8^#lo=RAtLF@*6K6XPb`k0`N>x&avN& z1E^n1$SNTBp08d4vbghQ5&i?4xu1##eHal99J=P>v957~4m?_THAE{Yb|6a;`rGf0 zYN!QixckIfJ|wKJZ;SJF>g_uu4?8vleJHm9Cf0nxw)V0*tpS~mre1^?mo8JIx1?MB z)!96JDk~Gl289y1(&s%vG_1-oFcbam`h&n%M* zVA7)*o>P$!nT_)agEp_gVOSku6))C|I%%ERyqAu%J=XBD?;CQv-unUE1g}yJAr)C^ zbD&1zZvi7G*67Yble{U@o{w1NIb~O0G!*<$lA$b=+EQiPvY%bpqUo!~1;>*p&4WKj z)ktWsuPqu$uc@j<6;zX-+rN)o%Dywl-=NWGCOz2t$UnTHx?9zMP-|_wRZeWMeJ3pv$V~Hx~@|p?Af6#()G3Pm0k!k zY%k+-r)-VT-Ff`f3Z1apU+)wu>&`y?7lNcBV+ZSin1o?f4)%ft5mmgqcG+k(Vlt<} z+HYEIhJH1Qk%K(>Cx%TbQ?#*qEXtSFx1Sw;NnE+z^BwSs8NUiZeV3Pq)A^UL{~WD! zR6CR!9AOum<}5GEqMizhZVAzHe8GTSML=}6*aNrb-6mo=a70l51|hvWwiUMi{Tk{8 zT-VJ%^c1;Drh3I8 zryasuG_<rxnTVHKMR^Fa1gH=OtoD(z@H({MJ1KCP9+SeE6!f6$QTkJ zKSeJ4JlSXlT%`Oj_%6gKq^eZ@wV@OYy_^_7Z$(W@UcFC|-%Gs#|!zmXcT zX8q%3&ydNtbT3Aa}Bl2=>^M2XUjV(Z9yIPry}ee z<2$$2QV+z>wYvq;SmYqJZX#$|e+8_Pk zzN_nxNB#{o9@IB7kED83+MEf-j>zS&uL(aJh{5g$TJukQ7G-4)#p&m(rqAjA*;>O{ zk3JYTMmp#kDqIsqi~(1`V=k`T1@dGVd-Zd3IMEON*8>wIL0WWVXcJZhK{14&t1-LXQK8gRXM-g%Uz4lzfKozx!>mi--Ds5sEJf&XOmvyYxz%oi(mO2x1EZM-KPy1=qR8x!aY4+fZ>*@Nh(VWo&hY5^ zHfR6fbL}%>V}49Xjq?j$iKjd(v-EUz~Dpaf|!r}rzVo&FTvN^{h6 zskZ-!5TNczpd2g#hB0%7V7zom1+jmDKv?|68S(f7Wpajeo}R8SLfQ*}RvV)rtYt9; zzkTxL@p2Hb4hLXRu(&M3<{g?pL-9QYi7izalymohB=BG^_cZ~mJX#RuV47Oo{v&{K z4h}>C2-0#YRQ#2%I=dJLhx*alm-?eMPr}!vrBjrfKu{o}z=v10PM38noxD1ZryBjJ zC4Q^>lrH+&fGp9ge;Gj|2|xhaAZ>vAG8L;AC$lPHb`S`~4Q%(w_a}tjQhy))0wY

s)X@XF=HS^{GmGHs7*3*O$^ph=J=ZS2;72jx?^6Px_qB8p}Tqf z4?5w}zmTls`N!eaMU;TRMs3`NgEAwBk%njW^C4S}yN$P=e7yaKz&Lw{h?J)yKYzn9 z&09aDSu5p=gdXNf#5@par!Zxgpi}ra21>n!CLE?`CPc*uC}l&TWzHdh|A6FiF4Uoo ztJZO~1Ai!6ix;E}h@|GdaaCiAJMgoAh-~=LwgsGz4-Tr}IwgP3=#8|UinLPhmsXYbY&^trG)2da6y|6%uLisk z9U;Czkt2%UlSt4C4wrq1e#RjC963ud8Jog7rD}ZX&lFHjzZ?tI??xY6>4A(}dE+@z zj~-yy1E7;hkYpOyE(3Oedz*7UzBDgr>r}~!g0+p8=|kgs5$ruh-$olw8_b3p=e%!J zFJax-=Jhf)c*z4=rW@`3h9URkN zVJ_@-{zS~fr;}`bp^6Z@$%CF^B(QFCGKLeZRg7~+8{Uw2a3D$e^6SM74PsqxXJ*-> zEK;sZIA#pu>MwwO)bWb_0<~^)Y3X#*fso@9rcsp=tRG7-lye^d$*&0F7N_O8oG*=2UMB!Q*pVi%x8-O!XT=LV0XrIJ`$euo* z;l6G%v-Wc>x9D0}3QKl;`_pV(5GMTg#5SMUv-D2N+tl8v*+?%(@>|=!KM?sHD3o*b zTl;wB*C#H;-FyHBUVZrG#yRE@whOZ%^y5E26!mV5&o*zT=+bjy>dw`yxUrB)FY7ki zz_tUCs@ylmq9Jd0!aTB2e|g#+skod@*!LP%#(Wp) z$BL>wb6>)W`aZmrC4FtjNEtk5I+MKK~ zxp`R8@30TJ8b=4>4jvA@f0bus2yWYh-6@TnFqA7cdK(IKbW9NB1-FBPoVIF#47df7 z5Ptz5r$Oym*DqGY!ql**5h1Ch zRDTjJt*gW6&@}cKt(W^3oR zx%bM^6d*WxS7s@+#sN#7_J*u(mkSjhXLfUw zrQ2{chcX%2Dlnn{L+R@_n-E@3$riUn7h0y^-f>g(u$oB?(z?Hl6MP*=$;FL`!fULW zewQ@#m{U_)CMG(5+KEq*H2lA%pZccsa$H+DsuLM<{Yyx(o=ryc(%$x%;dO0^2#fau z)k{uOyx99t@Dpyh*ei%C1uN5<9HA%D@y@1JQjFDoe%YvS6s*iHAVr>}8BUW~Gd`*- zs6FHCYZ>c`xAldZhdT)wm0NFB*5;){d`6n>qwW7f1ylYGjLD*GK15chXCs$;^##ZO z+13wRx{$2K^@l28qMkX-$8$b3uu#0ox^oHV9j{{~m7ZS+mscB3L%c8@U-15O4W2Io zTFEiaA2Sgjz9pg~ua+H9CS*tuoz)v>4STZk$IH~{t$f2Ga`d^l@8k>7_Y*J=BHHum z=0R@$FZaGRW57C)9#vVff=&C*AhAVM@7l&N{cYs05CT^z&b`^~jwLe7H zNUTbcpJeDd!Ag8>Q0GA4YqjcSn%bxBXUNsmb9yg6&@90vP)c<4#C7V$C;bN-lhkV)-Q=9Ie=T_VA2JS8Y|rprCWG15rBsGFS2bT%cOHhMq+@%$CL4vTFx*a8jtc{9;cL~Ht#dQj z<=a)sAtSYylcFg`SHTjRc)010o^QkKTW!coW*nWU1oTecmdl(i7d;V)BeJ&xecIhlg8(UByzHv}>P5}a#vH*0-ZRK`yvq$ly1>h~C-M3nk1`v)* zMi#Sdanr$NRq>o~2=mWKNi@jql6>X@@~wh_)A zVH{&&D!;zeUhJB{gwh50?P7^YwY6O|Uat$c)Lw;npx~Y2s5vtA@Oy!=dnu3P&!<2M zwqV`nNj9bj^3}jT4*Ku#Ljmwn0$J|CF2t#Zno#(FEn;E@80zt@w9RQ~7yyAm^-__# zVujsINVTAm=dFPHOQEVUN@^<<(S*FXiE}bqKnp=4 z@X3k@&0Vql!i1)$rbMV=pWIWrB42O_2ca$c&Yb2O@fKeW z_`kek+_&e@rZXP~BjFIl1i1%R69L#QD)RYJ%|FU8ckiMtZF=f@t~v5QR{b zY^pe6jR@4r)lu14OAGl$ns45Ve=~=$NbnyKIbtR1^i<3}j}E^uZwjivm%rrG8;8&% z?*&+`oUmuDa{Oz%0Z-%R_A|bTCw#xt(wr_kl4!UdNwd~;mlxu%yIqn-YvrLmr_39_ zG(3o~s9qk^I&yp;{$S{fR2|N>Yq)j(0#dNXdbg-f6_`jJlY~giFe^0X?vZ9VSQce? z3h&?TvKM}S)M7~ve!_SYds%A(cA^CBwYBke2WiKu7A-r@>9mac zdE;5Hs9cTN$mMTN-yR?#Ug2_@d4&N#6?lJjwaWqe^rg3}k9d&C=3^xFfaSZtL$QIr5+fc2>U>x53i#k`88*P4NP!ys_9_uO(zL z=%nXmVg+=LM(n?ln;szvO@cf9oX(D@A{6Op<9KNCXlX{{Z0dI+{CdEk(&`lfbxOmt z;bp{jocK>x0O3l?rI-alPSyK$6FaWr71?J6iI?kA?}%JxFWUJUZ^$aiwFknY)eOO$u&ul&Fu?f z(_Im5bYr`#7&CMdK zd8OaX-syk;Sq=%71|bbtJKs^uiEg%z0xU{?j!hLrIdu=HK`yoY&zmTd39_O3@SU9z;f*G}(qRfOM>c^L8`E9n|m zCHB3*O%1>~b|Y^a&eqcyi~Us5cZfzksjk*)k2XzWwXJ*8{uN|S`5+eZ8-3^I1+ty# zF4iL@$`(3>roRbR>=wjc&MKD3VWmNS@POtp$MYtS(wDC`O6K=jboUWN^#8UYyFf( zF#(HYt-O8+xZ^BF+gfa;DPt!wJ1{R=Uu{*px7++z9TSPKXMUTkHC)-T`C;AsM!W~6 z@kbl&BCl=b^ZQfOKR@s$S=|vzJqTIN>|L^Q3O@<<)&Yxo6-~N55{k!sW zgT0}|8LReXX49lciwEGtCIS5WbBv5#np*BP|+!jCHS}4w?wf7Ga6yUx4*MaS>U@} zp;AOWpou9=Et@4<@^L%z`{QZNd+tD`A(Le#x;Q7hF(SBH?Oxt(By?30#&i(g=|8oJ zwxXUzOTVEylw#LV)2@;iCW40Ueg3C#9Vb+aZ$CRgN2-4UArsCPd-?DtkF8DfAUQrcT1M|ww{%dn0Qo>>N7U&z&<&CLYSN3~JZ0{So zxiq7CC5dNL$E)sYnwT-4^;@bc47|+J8Zr6!T#Y-3`xia;J{u{V7)4o}7zpa2-9aYW z!rY2G-+mudbx8S<(|l!|`3M3x+ai}luc~@=AfsjC?w0NqC%DF^@cr0je>JKbzz$W` zCXC90-@CJxeBA$Cysf#FP_;aqXQ`7k|fz7m(X!#2@>?HGMX;gOl9vXJIH|ADqqXWjkB zpIY7}HRix{hxJ`+-eDto50&coi{AzR^DpElt)0ed{VH`q`27nBpv5jzPAq5-=f|>E z6?RXUfc(w1NV|rU{jm+0kLz|RbRd}F8m9JcS%ljfe7IFLed730mv*tev!mX6Jht7- zj}iBTLUN=2C_h%>5BUTlD6Ma2HE9m`y&2VIq%IRDq}>$rVt$C~EFbE4{LKpOrVzY= z!|?D+$z!2kSR``k4<(y2QaTvEICCrtfx1XbaJ5l5hJB&~C4g}iQ2r1vAOx!`qi9bIak;=b-E;k_ zvl-Zev(!ZQRXNm!rlPa`M`|~-8D`9__=}Wh9RHE)VR9LWO_a9C|L45<k9f=wFk(((CLi2KC$D9Sg(_B<2C!*@$$?c&7r;bnY?Fu>v+1@i zM}@E*|2`HNW(bliqf^Pc2Oc|Ls1|~icvro>k(no1p96Q+6|3a&@H!TD>zxKR&_T`x zA$0&^H?89WlK04;LI8*bqG?cUzPy&KMJkztkmTIg649XQog9;>c;mP&?%lKe7USsMDoqX(Q8a;@h>^N zmvj73$y>z0Ke4sX(JnX@>kr|{+`a>QM5@$9+;G)LIcfA{o5fOKT1w%=P05LQ1Q6W_ zavxNWhx?(9hhrcrGL)|}who(b2kI6`Ctn;}ARvR66ClqiT13pWxO*hhP9B6yYdL*` zGDEB2-L`~b5KuyaE~?Gu=%kw&Ol5)KM0xZaa|hd-kKv^>u=RtCFqg`%)Z8C$EUn{H z^mTv?QLxGQUx@xcXckM`UK|;ZEW1f7ZRC*CLqe#;R`6LZ=huqwXBiJJ4a{^Hxyb5U z?euV`@x|!`(q6YCDbNzsi)i!Xn*;64rp^|r)4RI^Z#`N6lwYX6we9Gm7O%80SKQI` zolfVi5f4uOsMYFC>PzKa-0iwLW=>Rk>Tut-^Syk*esIbBQ|`H6owT)Al3t!;=;Pl% z*6Bbu6d$jjY^A@^ziP^RSaxZQ!kK0wT!2&H($U-?d;k`6(-=Zb#T$YcgD%AOAQ%XiZRe}WI%sx_%u=g;~AG;0j61kg{RP0!7%)~Yq;>?8?UCeSjXiB z)QE;l3JqyiE%h>gTcusTH)!HOTAWDr;(RlJsvr}G*w->|?U_wBiDOH&X5?W*;JpQ2WaJ!)xH`UHBSpu*G>>t-_X1iNw_F^pShl1Wrri%n4DmVuT{U~ z?N1(VD_T%qh>I+0_t~=k=maq-0kWP>^}_}KJ*p=RZ4~ybXemrV=x6}}m1Q*td8JRk+VEH2?J-N~&8Lg@N`o!H_ z^e$$n`xc2=H>*Rwd_&iT7L&w?BQW(oXWB&9YL!lz>#9jyqI*F5iJQy^Y3add*F7@) z10Nu-E(j|RFYqsKJbm{{ptgR=kD$*@?w$YiTtQF1dU3Jl9Y)J3o##l4KXJ&V|Ig9l zE_rd+1?ln2?GN%+Z4X_ADL>b%_1!pm=Wy8cJLRcAOpW6?hRf-Rtl($4I-%o&+{c(T zjY|w}XT1H~oN%q_>%R~i?197HT+_#HEoXWhdJ=g(u&N@JksQ;ne7#yj-NqKG8Jxw! z!m1a3Rh|MaD&T@xp?W|W>WshXx2A)sT{v8mlG!wz{HI9>khBzIl}#{KgfEqr_!4#b zbhTEhp!#4xev7_e;kLX%Vh=B1b2SJC28RgGvX;ZODp8i|RTc+(V%W!CM@-#my0hS3zK=LA(4`!QMO1T~{7F zL^GTT&XSk3jZ{{aD9@fH5I`2d&OQA2-scE!sG3_kS&_o$ZP1tI<}$&B z*#yfee{k)KVHdWclmbi{)sdMtQMZNMby~5U=<9R6be^6X?3a$1g$H%V?!X4s0Ih|J^?OH zg#O(_*O(K%$0V|C11-Kp1SCo)Yfa=88~f1s2iJJ6#5L^s8E_4U)tSZu;i{Dzh|DmJ zE^W>Uj=hUT`9t?f*Q=w=*}SM{HmTFAgD@|(}7c=`z0)8DgJaOVM8Qm2*cnVSPCxtIt31iBF^Fs;VVZ=l)79|sRl}! z3w}z#+Mp_I1cOGiyl%8i4Y!_&VHZ+>#zbTm((&7rT!vmicqs_1bPj8OMWt>+ExBxlvdva%90NCkJp?Y4^?MW% zwTEUkcIl>;JZSAf-^8Q>ptf#!SSD&IL>b0(*3DUqpc>uHmqQx!O<>C~nkroJ1!3ru>aO5d^=!k>|}21`zBNBR6Isoc2dY#19arVI1bwd~~7o zZF`e8>v{9ufI+#v1m`ZUba!4{?}3M@GwmZQl_m>AIjzlW*E&$ECDu$#x%Rpx9QDh1 zpd~DH)3BBP7gODNoBn)Q9Rva{?R^+v@FF*l+Zf33V~8wW!$7$7|3liF2ST}jf8b** zMM}%K*(1@6(oF^lMHE>kM#)T3h{|mttrN0_u{1F=mguHN(RQ_rq?El9-BOWC z-HN((TYl&L%#5h+{eFJG-ydVf%ro;m&-;DeuXE1pocB2lP10;fNbnp{^sXpy{DikF z*^4Wzr&Q>jQ(3E;XR?EoXx&w?zVpzC9B-0wzXiiHph{yMGse36phfsB6JHCS`n@Lw zVb|0RabS}TjtYb_^9&!T;t!`xQhHi|zm>93qMZ;z&^r5ozv ziZreKNp`p|K74w?=WcUx@ad}TXw4gz734~#Lkp+hwHWPn%st_)%8BZ<+FmY)cnu1i zEbp+i`y-dMZdABX**2OvQh!Cu(+zIXDr>ldAU?R8t` zR*Z7XOE);EvZY9M`+#ljA0cD9c1E|dG}hpSsV0B;702iJ7*Jhbg!s*sbG(0e&bSGO z1APiM#A~$nUI@+{^Pt&*&h|R+nfeu%*Z(EP3 zxOqWldy;n2`<{=!!N*cIKT6ls{=@O<+HC!}xEhniVJo?7$457}Zesec=t^D{88G2Z zvd^>4o^iTH8-81{iA066a3H3^a!F%Q|t57sZ^boz%6gsyF*tk7OO+DPvGc{2J zvj>V`@RU6})JIddKcNf;z?6~WqV-xQzo)^wRRs^k8O}Ss_WkT-U3wzeWvGm5!$X1~ z&}C2(29<(B^w4Z3D_Cyv&iFiB>GS=zHP!zV2;4UGL|tq?@6B${iOwY-X^n>}Mh3jd zvV{G;Wleb`uXHuBBd=n2Lg>ObP%@!)E^th5C$_~Hys6h) zTzsyD|JFBVGQ6f2+de6{>4!TAIlz^#7+Wqk@d8u1j@oW<6c-)2rWmcUT$L{V+ik_R zx7fI#cFX2Zx7g_ALBa~=`+u0q^*E>-OlbFSz<0%~EN$s%klzD6V&R&Ja?#$6e*}6x z)>71Gf1LGd1eY2Mv}{#z@WZ2&t3Jm?@|@6M{tEZf^M4DbD>^`1E`@)@(}mWL?mS9v zXkFLHwXME>Xhg#PnuTqde@0cj^*T4MvNx`8iPP^9`4(rh$Ua&6!Ld`5pP5uDa9hdm zENOOo$D}bH=`f(;Ig|m0iG=aP@mJQnC|c}|#zl`;Bt>u*Xc9ih5#+|_b#vSUIdpq9 zJCiv{{X#E;^SvIy(9*5er8_;kbsv4oI)lGEu34^=hgHp(jiIltR(snz?Zd*oJi%P_dv05u?7|pEUv0rfy$>PEt z%6w?VmQuDrN#kX}+_-sdr8V(apD27bd2k{rZ{ehUf-p;tp1<*|i-}*(9rS$r+}qRd zKKG8pD3`xyYiPWGwP?!Q`GiFvnRQ0{9+p36wr;LkFIaS}WVy1)QckRt!f7~;vi8D{ z7*6Jo<8E9Yb@-f0O+j0Yh2AO6CtTZ8FIfMGXFOc2nfxK-xyPGpI?ZoL)eQ=BT?Y!T z&8?UV84r8%*S(^9T+16lF4HebylXvBvOmn>{7JF)J#ss>z~RQ-XQXqR_+&49?)hR7 z-5s1@!ZUkksxkp`?qCBX>TdS5*o-zgM%zP$2Ae383EiddN*Q(jYm-U_Rdm8cv29EY^t*_}g7~$vS$6jr8 zY}a3xPNdw}py-=E-|Q-NxyZ7kP*bOMan1$vcA-y5x$!P@-qOM{|K(g`>jxii&na9h z?-yRjAZrX1YIfo~PVfl&k#_7|~(pIzmnV%a3nXWV(*fj%0YIDr2U>1 zlWP^Wyc2HYMbj^IN4VOJ_^{GR?v}tc(aZ-}CHAJg3u%k86TRCb=#|7OU7F3UWibpJ zwexK!v(&Ypl;g|inHm@hh_e;X17`86S9L1-Ec4&S?k`bYm1ov;BynEqjtD=>UG4d< zMqw|vi2a?7oGVM;O&K`)TCFq2&g3RHd-UEfb{Vg~M0)MGdV5|Ery_JmWA(?cxH+Y~ z>~m^w#Y#?(rw}GlmBrRd6Y&M16*tZ96^*WU_E;uT-SbLs(LjArk9yR=$ElANe_s2? zm@@fAWyANGe+WFG*M1m-!d8 zFl7}A**B9(I=^^1v;8!87LjMI615z4b9Jo`iWqZb`(L-ptjJHEjm9nG3+#IHW94{W zEJb-88gsiwYk`l(lLUP^#+u3bf(|o#6*K5C*uauBhJq$6ay>6OiFw(;Y4MWn13eV{SUXz?L%xs;9hyKu1;Fk#7z`mJ|*th5@Bx2%)p4OnF}- zKIwbx!e1YK=Jf8VeX;k_Zt@t>9@yxzcolQGU^YDTQ7tE-!ueuiA4Q)KS=gOP`kO3A zcB>Mvou|Aws;NdPN@Ws^wJs4R)HL&13iJ|1bp zhc3|!&RQ>dSk#jJ6^cTxpCk0W=RrfR=9k^6@?V>-T?78Q$4a#kY0kk1k$}U|bWOZ| zQ&H*RDmYSKlj*(TjibPeJ|NAKhi%V>y| z9Z^XBoS&J?_Sd>3>{hqZ7BRXqloEs|^}Px-1v)Bf@=$eDA&Vc>mBn4Fg>#zpYuiyP zzpOvweWFFdyJ5FoP*FfEYB%Kt(oSdsNx*d@cTcscI|XW(aUChYq9|GiT?{3U@RWI+ z`F5~@PknK?)f(N`evMORk9V5WX0g0{{kg37liE@jc!w0sFX@bZvNJkP>{s&M#BZ{5 zAa~blwf&lDbjsrP**e}O$G+mCj_9)A7>uggtdu(H!1y^i3eIOK zr|*Ww6a(dfaVs_!g^me)6z{W()?HF{-|{tQ)XtD3`!4IaytpPoW$rTH)xGo&j@I=` zw(pBiowhp7Fy7W0;-P3id0Dq$N=kDMi6=tqOnGMk{1r8Av5 zjr3gX+{6i(Uaom*+x=GMTjU}S=wUYJ?P6C@FhMiJ*&X$D$0ByCYp6`zK&T0+=N63_ zMRyzGkc(9j={89l$x2pwUFObt3@9q>lZcaQmn{9J8^OCd8L=*YK1|0 z#9&Xxwkmq!Uhdu_M{x-UI0x6;wyE~8Z@4QxJi{1w%0jQ@lU%>hvTb=Pk2hyr+K04{ z+;W@DFAE;X)h}2w68D^LHf8*8ZH;%HGS1O0jy{~PxA#MHK-Z@=w?BH1OBDs|tnc2h z=HK&1SQBz>V{v%-!85Z1U+%cX4}0sY=lVE@dxx3ko|D4+r@vI7t-56rk$fQjt;f~t z^T)7p_2<9h{O6`GYCU@~%em?3ia_;V#^2-4MQNDSSK%qQpucopmgd~__4M-zAy>&2 zTDf)FEkVLQ|Gn~>E>5($sX7hvQ4en0gnfbExj*mb8OxiSOSS*}`@{OnAH1&bXF&7X zMd`NgCDzZ_Do}5tqC1)~?d5C>jaT!xy{652G&P(_?}=hNdi}1z37>K7(m3ermab$t zlU1$d=Z7oTUh^bW^eKRM@Rdp$)PJcb*sQy%S-)VlU5rqn!so*4kFDt;mn{~3el);S z)P)YXiBL!fSKO4;B+p^GqckO4eoBs90gH$G+~{7c%`18eeeR&XKCF)Mhf??nn`)2V zHjcGqo4oKUsF<()d76Clh!3rt;@pfS*+;(OR2PyXtFDh;GK0Z@{%&4D0JRXT7a$;) zFk5JoHXivlgwP9rfApbNYz+UqU)}f2k@CB0yXByDWAL&H%AwsBAHASKpuRrZF7>5_ z-X1f{9$2e=1(UY?VIo@m8}U<7^;N> zh~@q;&EpMTj|k_QBK)$_SM3u}7EaOhVCQ;)mo=yganT&dwwy3Ad4P|fKohV-W=&IPv4>czIxz z5@UBG2kvK&*^$B~D0i&0LM7_iF&po6)kosc{VDciT z5roo-7K~N&>Y^E|o%hG~KYH$UQDf_#*<#<^bmxqYw|_HthGZy4mC}10P^PYim{UsA6S(rnmk zPdG5)e!=}qg;crrb?{m7>3dyaK3MJH2JT~zON zKbcrr?{(BF$uXhNM0$AeHGXZuiI7Qii8K zJ=t9`-?6+Nm-wkAZ-uVnstC1%_0#2b;>5-(DWnVQdBw_|ZxaX1|6aV?r)4WcBYX2R zBG)(bB|9h=)@aVb8#X5$GH`v**i$MdyPle7;8pzGrD%!3F_kf0;H1d# zB~2Uu!HF9{&WoIy%?;Fab%h2%Q6`?#TR;>v6>(OfB=7iUSEU$xYrff&y3eC6dT^Tx z$BtfT$n_wlbY)(f5~QK|z9{WXP8_qTYu6dP>onYyH^G5ob)CKVOtF((81>2q_5~;b zQ>q1UQ3b*mk@+(>F@g#ymzpUTm+oWBy+7Di zmR|A(S2$Pncqwg96P-(q8=Ie@l&aH=zp`T^H*zw50u*f5bsXsP$%;xbX?*TtKYMBg z&RNgLS})?ZLQxY*D4=)Skl*d{iHbB;Am_t8E$P|&-uB;nS_>TO3& ze#>P1eIPlhu~;wjQ*rCE#zbn=bKmM~-9q*arf(oA%=4a;NvM4XqYFAgX+yynvI@>$ zWh7v}{73<47}^w*Lv~d{d|%NrZ^yW6?@HfSCV$LPbD)P>oDg(8P0)Mv*e3HZlOC`$ zg2E2fW!ZP9r0KC@Eetp2Cqv~h6QHNm>_|{z;bVjPJ)T97hKtQXzr9nEnx;Qsjx2o8 zDDpMFVqCglW_;^7x=Zzmv2&W||2AP2W4xJn)af#-U*AV8(KuAA#uaWR|Fue&l#uh5 zhC>ByWe`1zX2%NFZ<5=S#7JV1V)c7nS_%XfMRI9$C=iSav`eZUQ9&6(wei~gltWeZ zl6togCWhoRRQUGC20`FcVUZOglxM=CJIYiN)Z}rQ!^uz|c@+A9Wa(69zNqOh9zkv* z$%$R-<`Dwop*u?lBjB(rWCxp32qU4U12ohM;6S%5Iky}j+(&W!LS_WxTioa9_5a0D zHZGYK@VnE6O_O66m2OZ|&z+tvnA_p|S~T@MbDKr@@~PZsP-N^y{1%`1HS0M0RC84B ze!1z`nlRU9cE0w>LjxsMl#LELD?OJDnDkwp%)0Lwpz)=;J))1$ut!vrZtbCPe zziV~E1z%0VpC|8)ncwr8ysJB&t7NeAPwRUhn|J;4XXj^{F^w2#uXa!6ud&Sb1uOS> zZLj#{VIJGwyFp*i*GvTEY$&n*INTJy=xFaXa+SiHdxMi_sYHlJt5BcjR$5GsaW>|0 z987l4+wq|#CVjR=*3DMYisxXVRQjjJm{eaZxt(yLb6<_;q+DEb%$4%#(WuvO!!u-? z==`yH8@rWxw4%|uSYQStey0Vj4jvx5gC(Zvc-Jo-XK@7W<=1=UM2pZEA3_eX(4%Q5 z;9NybV_De=27z(zBR$Vsa!l&yV_6;=689JVH1;qD!>FnNbiAaHDkd;XU^|}Qj?ORn zCBEsLtwP5?V_jdkzx#D3MWH)|bE8MUlJ{SGWGq?sg-o=(4CSGWgeX0)bg{Sw0-a#pfXWv10*kyh)`!v18et)YShEK z@l8Qee?HEYD?NbF0U0w!zFm8|^9w@Pl2KtZI{j@i7G&|!x0 zi#2~b1wTo|?3#4p;OB>*o{gtz@3S7Ez|d@6p)Ne4-&J&R8zFAIZu|)A-oL$@{AW9gDNdJ1z>X=GKce!BQw5OLa42%BJZHWq=d?P=pSaUqQ+(wss;e_EfPIYoY9CCP;`K#n`a?&l2oJF z1BJ(RdvW*}t>`8ur#qDe#e@VGq2yot?Sv@(d zlHoap#kxemn4v6IIU7*RJorwL$F z3>&Hg>+8n)mQ}ZJe&xenba-@JL(8>b?`49{{IoE4C~s0q+dILPyPh0@b#52ouBj(3 zb$MS>=h9tTd&lvE54BBO<5u|68t#_vEH2&G1sbuS2!@wAz?MJb&{n> zmG9zM1tIPfa|JGt|4T)m`C$1%6rhD4=KhYS)K?H<`#T|f1Dx|zLO%a0;f4y1mLIQ@ zL^R}vDsAQ|+@r1J946d`hIA;Xs7gf9dlc>VLyH?uk4u}uru1^y6B<-3nn|SaDaa;jY*E0)wb=QPn^BSA{f(8IL-Y1>XDug zZR_4>uA0FzDjv5xYc%X-p%)=$t%9}v=|>;G@<|=)6}s08D?bwR&y3zP;(A|UD}#FB zx9-xoB(<_N%FB9_HT|I*gn9w#K-HX}gzL|}-#8OOEv`o7R8`q%zz2xs&`P# zAA`JUQ3`LmeC+H?_YrLC3U7b#?OYIFG8c#)Q`e+)Lhl8`zFSY2a$KTT)TWuebsJc3 zuRf{u-q`1^z9xLrLY?}0t4NEtl}#(!JR|a!oS?OaYnuPk*b-U#Ny}d+KuAARp7ZEN zX|R^r{Ufn=iXHC>JVuHmTo>PYlD6)JK~Z)bqj{3T1K+f~{Q1S7?`<3HeUx>vq|~x} zIkn>{|D!UnOIinMfxRJIfp@nZdr(Cg`q$UJ=s39|Z;q3h{Z#7eS9CXrBwP zEyAUtgc7C6xo7QIX=!_G0`mIz%=o|;s8VoKGn_6mlP)BxxE3`{Qr@*)T+9oJETqdsxjeIx)57dJwKNfc7LxwlX7XVd?BbmW2 zdm{}{oC;zMRHuUw0GoY0!eXhLfocPgL-k4-9O$26uh5-A=wP12YBLGyaZ}0QCymmx z6@TP3mHu6`YI#waO3kqzi;#wN#cJc3&QOz|`4v|(Z|~cp#HTkWjVBd;ghOwuCyr!j zywh(x(SNjrwPYi84#Vp_vDAG5U=>fH5YJMa3w628D6>^?j@6~lPd!}tx=gA2#4*=d zZ|{WpG5xqr%U&~Jm+u)59x`G{6zD!IXGCD`EZ3BWy$Bl=a>j`6MpJYGxAkX%`qypv))&2r0tA&U(8;<+3% zmspt87f|L}tbtWM&*iwI2!Wi37`H3`(4iP@C#Z;ymt&Nqif0KEVxHTNep7W(usKF^ zndtr2u-o^u|47_Y#tLOoo;BhWizpqzzuqF0T-^3Ng6}+gwo<0zh+{OD%vbKphx94- zb>43mKFYIQF`%w{nIFe-QH+^>V5bK=aHOu^$*!a;|CHQ4Ga~-sLSAh_5dvSxEVUZ*-Xy`Fs4Fe1)(LD2|X?(&MMRHcQRm#mq5@-CHo+bY<`ht#?1 z!(M{ENz5Ykfv%Hs(QO<@?U#`MCcfxw;A3ygS*QBu@`y0~cSXf7#Wx?h^YAIq=^p4P zV10`pO)_=xWxiBt%edli1;LOByN%B*IVBXZ=6)z{PE+^GDZSoQRbWHyHcQXFP7Wl? zSFU2VETjgBNrb)R8(Jr=?oXm}s-_kMjqf;YO>+Hd!j#iw z;KE?N@q&_$t7`_Z{Zl5aIv34x=j_R_u}gmW|qNXN3OZ^?1%{gDFyd~@3Y?SH9Hbil@SKHa;-KxTQqf2 zpgw1IRea|iB(D4AsB@ZG5%%iG0%py4_94mOc6__#$Lqg_1{>ug~r2^}Xuo-Ix77)2_y({k{4vW(4>5E9B-y(Dm8c z&Wti|U;Q|FFPHGgO_gEwHHol_3$)~XLvWY(D{e*^U@<{sQ(%KXzqmy_l8PUWldab9VvrI3JsY`jhx&s zIOZ77;**~_+N<80T}&h~ zE8!xQi0|IC0tP>)<(^-@v$5gEEBWz^0Dg~(KRwo2FAc{CbGs@q!s&n=gLQa8Vqo^% zE%v!9q#*w7w}DO%O0&Svs5#P7{M=_1V=v*Tfi7uvP|_Op4N-vyM8|xla>M4P)lDnv z)AK*==Vec*OES`*u%XZG41aYz&OxXX#apndJ59$cj3zYe(JYSj_lXYY+S9}aSJ|Ny z*d*&PmB8>)h-AF_+q12VTefk#&+=u#_yoc;Dm0}+x)+r>;8l%CZAq#4iArlb1CQr;-?!d3avu)HlT?q^kX2ah>z(Vw^^U zzva!{4S&59$}R5_{%MPSc+{j!a7kV6=E%|#r;i0%j1a4knb1Gv4wx~$ruuE@GjG4~M@XJ;8EWep%a=qdppX*L? z9KFxeg!%71N)B{dKzzE{zL3)m<=>WMUUv#>2{##V`XDCv6*Zr%y4A>VIXh)_Zyf#h z;`ZDta8-d5xL{%P6zgG=wz6S(Deft4S!rV*RMjc%PSCHt<)=teN>kZ+#Tps|KY3p7 z;yq4`pVn1;0;rj-oIo4|8|YDl^g8eK;46FEc^iq_j#8%F=T5*d&?}7HKzl z?&?8K!`eKcOwmy^)Ouk&f(wK!XiF#VG_K6vOuS|Zr%@?&+}DO{0u@gvwJthq-*wz_ z+Kcs6ny_pYb2}CPBt#TYx-~5;F{TP?f_i55S{dOe95cp5ztGn%N&y*!P{P?Fxyt;s z3gc%uf4b9K*UAR`TMq@!K@3>72Hww?5_QlTR2G@R;!zZw-58TQ0TiY#JJQzsnnu9H zv*GTI+>oj2ma~sR5LEb>&YeCj5h{RH0Yp_o-GWe>PQyf9=pAFlC5}44HFtF)j;$hO z5dyLy#~GRrn&Red`!zG#d*g+E+#XJtO^M_CnTnAGRqGr*+H0~KTJN;X8sM#7Z(2lT zuX%LdnTy*O&)7XBD2cmaHdLae&4#+G4fud~Mi&wur~~Z8p0@A0b7kbR9V!{;r<|M; zIO2B!ZZ!(np%4Yy)g(!L09}g}Oe9p2A6yXO8q7UMxm62O?MEe%l!Z+b>{V#w!&Hy+ ztYGLMjTBxg;h7mC83BEaziB{7;=pn*0ywD2wje(i9RazEb2fj)E&h9lUsK;=U)4<~ zq73I{e8nBBAQf1L8kwr=Tp781UMKmAz}*hCxy?;7~r`@%srINHK8fOdR#$(&Rt-Zp<4m11C9}L zm7pXTumjQnA_Zr82wPXM#zhg(xksBa2W?hGV}Y7?Bj}Q;b4;(x7C6Wppc1S#SfbKZ zNm9k)YJ~Bmd*{K^lKWLDBZR{B3YNPE9X4K)s|a#>qt*4hcZxh**aSD=Y4IgEg;kVyB~64 zwh&549#CO#ICPqUG=Tb$lj^EIVFrumNQ82=R2((Z2v$ZD-TtadMrDzMIs|k`vy6ZL zWiQ={SY%rb)P0PKA9b@Pc)C#bZd^Uiicl#K=%X^mW*!#{V9%U;v4_bVtghcm4}3}n zS>G{3^IBns-5I8$X=Lu@Nyk_|`yOa9%<27PITCPLuzf6-hQqCD$x*?HO>z2Uk>t`y z;F#`eN~OKGBg&QD8~cYKD8==fwT11Q4}0048$Y5BmuSlfGIb%b4VN3$pZv=4qHte|QR%(jkyi|lJZ?)5xPPIy z_RCYTs$rF1Md@ST1J}P8_4O?#tcpvnA^p;NGyb^z^bK~jfQ~w&v=eU%V!h6#%=!Nkmn`)lA>2hWOMAVFi*=v zM|(nhYGO-3j3K{xB>$c}h|CJG2T;M=s5NJniXPdtXXHNNQ*_r{&r)KTeZ1dzuTB z-<0S2#m!3S>bCu;)8-RecQ)zwBJs*3hl|HJo^HD2WrxP2M5y^_5RhR^jkb2|qvfYM zRt4TF45oRD4MakBM(`MyBpbEco=2b)P{@laAQ>iB24DHkaX4)eT z)nu0oYh?Y*@uZ8jyb&c2n8!)kVr{X$4qIgld-Y6-U`n$=u)V18n-Z7F#K+cqBkz#_ z$`>@%<`=%X-Ih@N+`s#ZMuB?Z`+$T-&I$*P8FRfS)mHq1h}VUMTDZW&LEn~~U=PvRq3$By1@3o~)ZHt)%dBcB12Z0^odVCXkr5$??5%1agv1v64s^pvy z?MiG(1L-1onW4}=a9JQcmtY{|A$bA{M-#?qFoJTcw8Pih!En zPH)94ydLXMq(9j^9F|U9xhc}Xc2%pqZK3TWi*eKc+?+bV8|`tok2=k4O7NH7PbN(r z2LF7;kvPA<>4w2Zt%;-iAFo*PTdem@ zKr3SoaTFR}qX%l>^w?G$~gzTy~v z?tNR5_%y6j)0<9-%-z-Ycz=exg~(ccXQ@xznEX?ZR@7;1>ONj_CTHMKaY{f25JFbI z#)gZ+7NzVcU0+-#tPgPgQhq^rN>Ejyd#8?=e|nA#)@08L0sx|+;Dko)n_5ImU-AZ) zb0isDx*ImiP>l8(Q0h5%h)|MG5~W4~4WOJyLZpxvfRWHfKM@oRyy1O}zcCpgr1ejl zc_h^|%X9`QxISphPB6Gv(YZ?CC>lSrLNgwUY~zn6w$O;-*u2RJmkRP@DnGYm37a@L zP{**R!+xJ|MRIayW$DL5EZeKKPiL#EZ=7%Gyma{g4FJ# z9JyS*Rk>D^Zf3dJj|}&4y@u693?-R)f~Q3QGg&^2>knC<1YAdSbCGmZsPkFYJcl!b zWC5LmR{^pIs>_0$Vq0sZ6oyQ22iF6 zt?YUwZy<6DBz(|PH8ql$AV9{W5tTS|>i}$T=-$o??`%ZD-fYpAYwp z3cD3G?KkgP@mKElTbNasoSqdDpV-4Otfa8x>mYjY+(xl%l&jp*T=5!a<6Z6WmYX2s zVcou#qoM?zoSjzVXFR0R!rQMJE;#$#PCcx4oxs- z|BYTUp&aroLko$^V1$Ol&U>t@(ECocSl$^WYJl{23|%}#)Wiv+`d|+yV9bN2J~EUF z{+!V3_5{*tD6}V9%ctFs(FaYox9rzLA~{9%z<|aIs3WLxbhe*+8E&CiJ#cdBWH>^g zueA?MP><#3pcuf4HB&28q5AbC%IuK!XB<8<*P7Q$7AZ38E>cF|W3;I(MF~_wZ8hI8 zt%>BC^sAprX4RB_LY9&VDIzQ|UP*$Ze;z`Z1nmE>oN zydZyW7&!qS5LDqP1@sd}2$A89YVWKPPWwo9oQZ5f-|{pyg*=j`ZSn!GgDQ{eR;%zAi&~oxc)U2Xfz_D+vKGN3 z0>End*8O67zrvuT+~Wwos>a08*8UN+$(YeIz3XMaQOSg{s{>3f`#VjXcCBPVimM%M z>FkQ5WWg^LD=7runu`+g7D;H6!5Ngz_`y1ctK{BL1vkcF?>7x49e+Sd>%q&)H37cpFY#{bJ7JW8*$7UrvqKJ>YDy zXt}pve%i;xwDi~;NcONuf@D;9Yknu-ctORU`kOBGt4~i|^JL$H=+>nbB7=~{jUKsCXPtR75S2#HRwJ>*h)sL8AEtU zPA|4pzO4}1N`WPT78Vg|FG>U)TTHsH6isa1$h;olXkZ~BZ3mn^*agzC_~5T#^rOsj zhfnbs_+hJ2hJ@0kDf`Oz&tN3-&7jeXWt_d*XpdS8b%LC!G3>Ft##0mjb+M8KLMr<(vP3|*ju zpr|uOA#Z2Qw3~m<)Js=I;%Zct^1Lz>Z_1SpZ>8q*2}I=t?F6{eSJXMk~eDv@S{KtB6YdIdU1h-ol;1~jg+gcN~CPLF8$NopOz9W0QRzMA-E zR+#;$Gr2kM_&AV8jvq9`jhQ$(9;F=U$N(LH9D-s} zQ{j>XU?q9|!}8eS1Ki`L2t>UhlL^^C5)ntGj~ED@AvIbe=hC3}x3QI+8zmng4?!06 zgDxo8dB1u3Nbnn_5C%|%&4P3~K!r;uOG1jknIv`(Vrd6ILEgi+wFck+_BGi-KRgdH znv@VdA#soq5MyP`! zW)4)f3n3Ev;My_;Ftidtf{#h&uE0K9Vro4Yuyq9C96j0oN^O1y2r_^}I9mKv;#iPe zBohG5$kve<(IE855JpV!pg<$=L^kvAhm_6-Qw4;Al_CyhB>1;{t6+~BJpqm2QnunnwW}#qdNZ$^DxTg$w`BFDa_H)u_u#F`8XSg3H z`(*$}uz9`{7uo;yntZW_7%G7gc@9Y02bT}5b`3sw%{U-4Ah(T5ya=}fy*oMfv$diq z&TjVO>Fx#Ok=uzvXn~5EZjii#d|gf?xWYsMYM_Uy4)CDih0F(#T4jif$F5rPf(ojr z_@k<~d{Hs9jUzWxS+LQ+0ZI#}EwrxuVZljP>-X=AZrgYtuFIu+M60ltrcPbugYr(6 zWQ`VsQwi2sJm$}#oD=ylH$+JgM6fFrLmg9$sgP-oixKvt3l$4-il#VYSjE`e5}VO4 z^qG4DkRqtGbOhvGi6Mip-bf-maw9!(~ha zsR5)rg)6)iHhO0gDhYDx>YxG@aTJPctP^^2lRde{+K+2wtf0eFlXoRS`VGlb0Y1hs zpI==Nj5e-YqQ=|A!jOUxn46`dfLLW$Hx9_^VASJ#IhXz~^E}}~0kOd`#B3Ub3242b z!N>I5cW^0LOlkk}n`lje;|t%E@zOG-f}UYfONoc?o>7jpY_^d&df@}W%Fuvc2|yMJ zU%sf`@(`ewe>2$Mt{|Myo{C}&fhc|<7~!~fKYz$V%Fo40%!Pe(aleOpIp#nS(2ZVZ z!I0X_AMv&E+C@P-+dS;4iDlNUt=p#QIk9dY(55K_l)T#UN-NK}qtdP@>^R9Ej5_{t z*@F7{vmegOq~KJyahc6J_xB}{{E1BK3$f0?OrU*iQYQ+YD+|c00FRt#()$I~0xFL; z!Csk4XpRvB_Yb9RAvqogY8nkOf+PoFH{5zCSaWz-#MmR13j{t08E(+V8*){05=4as z3B!x5I%qpdNe{0ot+pp&VDRMEsjV^D{U&T1#jFXBiO(@lPz) zfao$K*dT|%2&vy69*8mmKpH}ZU_gz%0iQ^0FX`7{X~ISq*?mcDl0l~#1J&;Y4ZR0g zG&qnMgfvO$VE8Ki59yX&3;?AcE`;!ms3BGF0Wcda?SA$)zJ!!bf*ZRbnwm%#xJcjs293FeOnB-ICj~IA->@paCEtyaJ(rTLM^- zdzLCpBamD+L!Kz;Lu_EB+g}(> zu+tNClf=*PgG^h%Pw62t-46UAsKsILu7v5=2f;?T}aK{n0tcz+;X-A!XvaRI-Juw zI~{KM9r5rr2{&1-oj61MjHDi?%JI9>BAuRZNQO}DFFTew=l3c1F8K7)rloZ5pOW@# z-FfzDKk78^(p8<8IK&djnHzMe0LvmMa5HqOTww@beok`?$QVEzxFh5&FnlJGCG)on z2&xI-kf=)i0OsJT zVY^Wn2ebBvTUH>#V638@&EI=JNLD1~&TttXD$g<&9lVHmByUJQU{o+8LIML)vi6&_ z$o!ihkP>c5a-XslAsGg#0{^*<`%R9Zq7((AM!s{#jHDPlM7qH+ka=Y&+ZRAj4l=lo zxb}iX*#t&I5+HaR?S3mvHGu{h5tKo$^TTZ;)XDKi(4B&7q0QlC&NgUb(b6qN2#g4k ztGiJ+xvaG4^0B!U)I)-rM~w0#6ffV^Ps14d??h@AZCG8L->Sa+)&Sx4tVwMD(yg_J zIQDg&V_2<*<2iFHCft~~`*Ab%LFaz9W%|O{YbEuDR!s4%GRLYh+xDJ38A(n{^WBB5 zZA6=llqkx0bo&oTcf>d{o>dJh(H?S2iE?-O$jHM)#tqaN5p)&Ue}Jwyx~rsOJchEE z!u=+K?l9l{pH0e;_mHf8lQYpbQvlVW|0c_5;-nIb1khdrw{U7?_9LkpBs*mmz@Vso zb3TSJs1oKBWNgUclGidM%LgnDmRbPFxlk-Xha$%XqhbNUA*D=0r%D2vfbHKwv-JP( z_#(Lc;V#er@IS`h!WE%~DOq!a*Nv`s*lI#+4~}9v7z)71NR5bYRwDynk;w-z8J#c9 z$wp8I6vMOG0l9kq)U9^MetD{8t%s zXgWg?`kK$fL!bo@TpG;JDq2ix0`t59s&6oNSpu=C$gPzbxAaL#TX)hH3_S3(#vq{YZa6EVjQ zIsj6v5Mfe^0BIO#NVPCvFcR>f@9+|nBQQlV*aa5on;apzLzDS!VkA}qcBE7u24ACZ zB6c4?r+J?P!r>zmCXI>{|%_5m6?IQw+X9i2}%%(dHm$Ggu%t zfOc$c-9#)R#4LBh_PfB2UCVf85zjHq4Kb!>6zIkVsp-fzkOWsjt24S|#>X?u-8lAE zgojS(-Uf|XWKN;8&u3ND<7RW$`=OIc@B`~%Azh4sA)=p((Kko`f!r(i0)lE-7p!g+ zlaV2f6!4r0;E_shT(REN5`2Bpx#kFo~;L0RsOV!e={=o!4;Sa<<%7Gjh{gxwY~D0gq9Vs z(q<96U{f(Z%oS@cV2oy<$K?8%=5DZAl4RKg)&x2pF>nQBNE^Vxc2GLvNPwBqAe@s} z*T$qU7`XssQ6XCeE)ael%^&s$CV>f6+@)5bi~utbNXRys_xN+Q1=1(M1S#rZh=VX4 zahEs@&8UMTKm1S$rGy!aLl98{V}q30@UINTwr{VIA^P16qTMGFRFbG{3ApADInQ^m z|NUV8N+1(}jO&4O0BPmsg4wp;mLRG;fYLSq!AvzkqsXvv@QS!J+?-T$M=HSHT>>gF zAO=)M^cM(H0U-thQb8?@m_!H2$jt8|zVNBkfca+8zyPDr7pid$B*n0K2!LcD%vby+ z9`q_mY8vQF%s;a)j&g#p;p&lF6wnQUkl?XF@j$C!7J<<0F4(Lj;K&huZxg9q zECSe~!#^{10j1!Rz80X(%`k#|(BZ{&*U(@wW}QefV+IH0=aM8z9Ebu=(UOx9gK>?FAy{TJIa`Vq2#O?lF~nm3DWDDc`VXIjy&;807`x%< z2EGYq|D*H$AHFnje@GQJUcT#k;xKn5d;VBxq5|B#)ESQi>b`~*=D~vv=MpRi4`?VT zD?6h;8)Nv!*<3UGQ7Oph{?7b^NG6&>r1b_hoDN+KAcg!UbX(w-g9r?A0tf{}vLQxr zikbD&v6ErJ55w`(p#mO3X5L9w7Xa)q+jNL&^Iz{{HV7;u^kop8e+M8i^!QVoVsz{Y zHxS`>O}LY0qxr!5jlS~k7ZHrkT~7kWF51K^f8|Q?x~$*m%0WAG{esDWrLYu+iqF5j zmd?u%1Hy;^MjRNbh-De%frrZjG9Zvl$WAfn8$$k2BPJv;5kTPmyPAMm7^2wz^);GN zND8f<(#*2ba_7$1rI>R!C^X<3+YUpiKCr(0{K3+NnpQ?Q8)$)xj7eC^=)&O2je`L* zK=ej&0l|m*Pgo-sI447*A;GR1w7N*xgaAbX(<`<4(5F%#M6${;jtkx$j=NZ;OBY4V zclxiXN1q{l1HKiz4#W-VgDRn}_*W79zeW<7<^&ONv{6{8G$0!>s~W*9oM^ZjSV_ip zMS!6%`2c!=c9o6Gvt#F{Xau&EwJ|&Dgiq!WAKB9VxVwRwz}?()*oC5R3vAL4*!CZW zIvPCUPmFcERE7TdEL{Raw^U*mF8Eic^S@UbrfD!mduW(af!-}pf~_aGhE>gI8XF_m zmJ7gve~tEh&f$a*aL-60*g&734O>a^U8#g1Fjc~!`jm$V2buGYu>5DB_GGklb4BT=GQ z-1Fb$X802P!2}u-2SKq&Dt4X0;B2C_6@&>t2(=`h^8bC*0Iw%~ZK(!sFlqPqzYc|U zjKLN$4u&8PL>;99j6e$=3A6PLmju{_A%2lM^rj>k*O)maxpIUV`riyS_({xH!p2fY zlfXYeqbYxSkFr4@ath=IuK)r4-vImHL0wDYPh&$5rsUxL!{v=_cJ|w5euOsyCBKg1 zB+K@Q_=!G-j7XsE3#vP0%jqK;w>+8C|qSEl0V=%;pQA9T+ znNbXRB#PmClmNj(vV?&G#&G>a0*2l^@{1)_{NM`^NG#SN{RoAe&`=|7K6uRF3v4F7 zzli_%e?u>=P_+xF`POQHl)-(*M4%_Z0yPJ%i`GA8jbZ!Ye>N`|5WokLZ25mz9x|jr zkVCh>B7jr?Fh#Y%?1EX8F9+BIvcL(lM+lJQBuN`(G@|+`@#x>-@OFt~{zKpfS;~Xe z(G{WD5L_E~L7^rd>#gK(Pf5vDbdIA*2X^-3z#j238ZR z($xqCgOvQGz-10TBm?#Rt^c3@BR%@o=xaDsrHj{ zB;c*Uc!^|rdhn0TGeXNjaxBVqmwgk(l)-X^xiLcPrYqYG;@y3dQ%ORlXvyh1?T^EPp`R4Fy9~;0FnA%7pX?5!e`RtCz3@SO<;&q z2v>g4LBqZddx7B~nMg~xfN!9Sc9aJHu$B`6?iU-?>zvX)Rftb}&>16Ne^m=#P+Cz_?1^l&(}1 z#mB;N)u1-`GSRH2f({3V4)LY3sUU?pk$|ADL?!niy9kyAHHrv6r<?$a=}n|L5Cu?y6U$H8C>&3l10?||ruuL}nBNJz?WK0j zzfA_FA;C}-AXt*g!vFA$2u#=jj0p`A7%7V>Nsqx;W%MpW5;!vkGXG*9{vaFwCO7D0 z(B20Tjr0FsT&c{8kfeWrBE-5-NHrqz;*qE!oi72K7>M43iGi(i*{8#WQ4EkE-N`lF zj*%#H5ymXdR6)Eh7^+Da91PAPN(d!^Ehaya5P)GVkvAV%u9r9<9;%I8EV&ca%tWeo zETm{~lrV$sHB?hG!)zUtv_Gud#E&f05Mdf_U?6})D1{=AjBx-!8UkcTAwov70G!}f zu?j1gxWMirMC3?*L&|?6R}!2{7=a&`BlaFv6auN>|NpF_WckU~<`2(kV18=ZcnF(| zpf$(;S4sKaKpkqtpadcqz%)@>3;-Rpx<%_$k|Ts#DT3^vqyeOr{(Chc1K_)`;Q#&( zItN(4sA^v^BpCthro<+KW>MC~a`dv_&U+o5sU6}oYBk`mQ!rQ3PlZz#3SIe*M7i|?DY=JWo3&+qv? z&-=Vp#${y9d`UEoy0gTkm-*Hs%vjTJF{vXxpSREr`kGjWe@<8QR(k4-lzg@!p)9&2 z)mAxhpb@r`V%B0fUz*_g)XxZJ{Zn0Atbsm{&=k#6FgJNgf!`v*p&)-%%+XN$u`62? zmem-CcUY>;Q8llj^u91Kr~!wtE(NcVs~(^?D(wOdBljo=ji*G(7B2Wv0)g01_3VI} zg>=|cG`^=9LIC;{EvT-T??*SmYe{Y@Q%>av#CYLVQau<9`MGjFUS>k@{r_wJf8k=H zvjf*p9iKT4mfkmd_se%1_l8usfaQlG9ef#-5}}0}4eMDsNq?zwE6|m2A0)LH%Hv7e z4*sx1LR9eg(4?1|2MopzWAPS-oYlQOO8H|?Tf`OEPb8^Fte8aPm|wVj6Qq=p)4ZOv5A$ zWoDatu${dV-h?p}rtfKRb+GvPiNf@uHm&CPG}9H>^Ep!I*x&4_&1sP4ogzG{-k`%` zxixS$#!Be=thcfB2PQSQ9%s>4j>gqB7+I9EuZ2HXD>n@FXxEPmC=q*W_lILj1HaTL zMGxQ4b%nZiK|7ECBco4(!NCOUe<3d5V9W-mDi67=Pr@~Z61?1Zxp4!@yi9)a*V^sN zg_>4Yob*|J7C)s{b2L~b$8M&_6W8bJJq;<})G)8gTqG^6vU8XgT#mRJ-cYTen2XE+ zU<#rF?I{P41OoEQ&~PP`=prR*X9ZKf(5`bU^wb4ttQ7KOoK*o5o8ym%4u-K=9x!XAvO_D zR+wOkPbKT9Pw)z<;yUqRmxTH#^AD;+V9?!EZEv>*;_H7kVAgI@vM?`u{qb0VCuYk0 zL22GG7oB+K|9%T~%E9-gAr9*j@BQ+pMP>kB80}zAdf&M?$MB5J zoicCZW|qob*=q`J=Me5OL;@= z?lymY>0u^p}qh`U_2?g)2ua+(#Fmf1%n$X>|WR~1oGcwP2&{asPS zn*ykCN<>h@@i|Z9^`rO!U0c$(#*dy!zLgai zaX9be?A9k?H_F|XU5v>{;6>$U7yj*Zxk;9_I^|AVs^|LFhphhg+f+(-hRopuFT8DN zT}Q5Ki?f`4_dy1y?uADzHp`~Ejc1oFr}j>#CenR8OAQ^Z2h9=#rc%n^ZZ5izV&l(d z9?+P(i)-v>Xwg8{tq?EWuWl4?!z5nn+1%8=%6Q)k@>AL)n2d*VUkc$Wq!mVp4aS!o zrPwoGH7^epY?0VU?y3Uy58^@>{V;*W>1E@|BIrDn@gmon0Zc-Ola@K_(v>hQQ_T!6 z)`eUO+2lJLFh@~%L0Bv?%~4!N;+D$Ljzs?+2-osdG0!%r8ICC^Wm zck-Hk<(%{In`7@~eO&CA`&@73eQlBU$#+hp+oW@P7zzE`>iV3I#&8Uy^2R#PF_TEd zk-*WUd81OI-gN0RU<$#Z(9qwhM=ewKtk4){t(J0jRrFE_vBT`;3|3#I`mna|MSjIVm1#I1T>vlsiWG}TYy|c_ucRUQFQ4e1D?%yZxz~l}S<=ky-oHt0e$Ii-9`g-;m z=I-jsPdP!nzTvF?-Vv{o1;0~_w-08*%)p2!`GT=#Ec3${Tc<{x{Hko(KFJ4)fz=O| z+cjPe+T716T$Ir)@4nz4$mNSzg>44)wnb4+EC2b%Z7{08>0sSmtYPWtT1oZaxj)g1 zq#hTV>T`r!5@`L6)5o$*)_JtsDwp_lcNNqecNb>7d={*co;^a|F(9M$*T1r#M_c}c zR+N-A#`a(9U2y7VTl>PPq*u-hnipP-6>eIipSY^^o>6&cjBUoQytzAFZo0b26>E>B zHlJKnqp{0mxt~M2<`Qlp&8l{)ugG=Ksbxd8&&qoDg7>F~j_9<#lJBXRrom?h&L@l> zt}mj+xN-M>UmCQA8MMuUG+(PKQe0tvO>MWy(Z{0KuEkeNHpV{~37vJxtHnEVh8s~Q zP4v3L>2Ui3wNQp|%gLYRCHYhMS#Mc}Oi&@rqDYv3^yLN$YQ8}&I zIq8Hrtk=MHX^U4PA*#;IL3rX(#~)Wn|EM!%acm5o&#k507JJQHR&V)X`P+{yGve8} z<6zpz7Dk?bdq}-oa;Y|?yEeJC+U?`YlY>;3EtZKhHDi*ir3lG-#J0y(JBXR7gmKSE5W6DY7+C(L^a_mmSJbhK*8^D5Nx? z5{XKKl8Q!+q5)BoB))4s&-?OHd!KW@-}n3DoOb28pZi|xx~_GtbwB%@z**+TzxMvU zH;2Rdm1kmT$>E6NAA52{yW^K;7iB39N1nqo)VE&Wt@>hl;7F~z$4azUNSr)0?Dhs} zHTi8l<0MuoOJ0%8*z{24)Zpl(lyj$At-OzA^i#iExUA;hmV2(v;brFEd1*-tGd$mE zW$amS`%$u+>qMOwwaxcK<+i+a$=Ks-vFpY?hv5A^&&x+CaX58pMi%+oR(U!6CV@?S zn_OfjcPPW^LP}|aV%omuaJ!ioTx;&Fah;fRAj3)}wX`9$%$%38r?sH`vz*@Mmc*!1 z*Y3Ld%2r+zy(Q~T&QIcS^1ck#5fi^szE0l|hpT?Cs_x@gX09_k^{s`S_GkNyJ^Po| z-0Ooq9cC9iUUtiFW}AnXPE}#(g@#zWnR!)LUEUotvgp3x?;H1wrFGuQoi*#m@pZ#K z4OV-7x20v5*Ql!J*t;EGFe~+K7Q5``(nrbIjkh`br+RkUZWDR6#|E`D)kp2ON)>Aw zde*GG^bIf1{q^|Ix(SNk$I$UTZoc8IH>o}LY=YZQy*`C6=kiu!K zZ1rUl8_uUI8X|7==Wxy}Z{SYhdfksR^h1Qpkne0Z!JAXrZEB_M^EOXQ8pL1rRjv0! z%J&@PyLk7FdpRBX#vYkC@&s|D^n6dTz3Yw@v-k%``cEQhhi&in!681sDqPq+CvD{O zyA|WtxhE)gQ@QE_3~%zsA=f`~7}UNgFQhh(f5=pJMFQdT+U9UMz1A>mWWfwsc#14Y zF-iTP>Mbc7mi>Nfz;7JRgGWw_n@8dx`e{jScG@FKUC#k@76?yF_WP{OElo2lu?O|n zyH>{_{m)#HJXPW36(CN>T_-vqP3DH8O`Fb| zr8zcmr(S2G3NY2>J#p9~DS(c6!p}BTSH~{CqU++r0pJVl;`6ZhAMD~&&NJln8Uk__ zm48;07{=kGXA~4Qm;l+Kb~B^SW^W#&&~sDL{3KgS$^r()*}?n$j9=HD8>!Hf!)aU> z9V5$gJABA)=5Yv0TEK60F{@+jE+Qi9*=z!}vXR;E_b}PK@3h#Po3;;u&)J^+-UXoy zM<@g6bgN^UXMurC8ay82dF$F^ZVJ;l9LI(JF>iOD_AGPt9^rNWhQhQw#TzcJ63FqN zU~N3;70MhVE@;1BuS%hPW!`?Il*}Nm^P@5eicW54i%P=xLsSfc_cz2c&))zEdaUts zkP?Vq_R^Z8r49CJ`vBMC^3Sn2+gF^e@D1;QowC)PH$1@Yp79W!w<63*2X%eFX9=MI zQsg!(|#F>%)equsANI)K9MiYJGm!5qo^i7P%4_fTD4iFF7u%*=muvS`}w+u-MF_EXIa1^#%C9@SG}^~&_bZW(Z~X4 zZ3E5L=XY5`EH4L8JAgPw%}>(3^@b?W{&ZJK#h(SYdah&P7h;US!$z*$i6Nh$C)(V`wqZTgVX9=bLlM( zfq35C>;6QBxXG`oEKn*uHX^u-ApnPfF{CDnf@@@Cac*o>ayh`Rc-G9x3n{vPhA)tm z*>_tYg=ezBb9EFMprL#E&XVv2!=sYZg7>eTsciM&KJ?k;r-3QO!8Zj`5*7hDS6y5e z;#fv_1Odu(`{%dV&|Cgu-!c;$GsVV?u`%e#IeAuFT3V73{go`U;I zInbjANMzoa4v;QR8bC_XU3RXr)eM-9rk<24Z2ma7=4oW%ojN~>j^?Y;5$__iF|77> z3*|;7C(?0F10v5raEjf1KqPW?%p<(X9n>q+K!p0WN5Vg5nMm5cb4j*R=?U|b{vMCN zj0Z6PfRv2}z7{TRI5g+Fi>o%a3l}iA4BIvCAV-l|*8NAx7O*07VVIZ=16w}1EE@CX z4Ay!38Lz(ahWBCDjs8q>Ry}dJ&lJ~ORQw#&=hS$*rbkdte#Z1o=0X%fX2B9R!p(zkem_Z{tu@fnEOQ^hEyO@a7x zc(6WTscyKCte-nkW?oLU*bOWTL4dzNeB6*$GJu=#2Y5Q!0b-;JuFSwzpPAp68!qJ@A|hRofDVbBIr5bjGz05`n*O+61I!RR~p!+V~8swj-qt$IHxD#A7Ge zXPPtkRgP>YUud!QMyNgJ7*VfB~}$m1VS6X-n&lqy9jJ$DM)&jpfJvg-|G(S^1AwUACjp~tuqZrqt|?>q? zg`yELwoDaUkE0<-&!=jR5#t?bf&}SgsUaWcntD55=WgUs2Rg+5_V%bhM^|Ys}F{F6GD9|PH1P6{A$?S8jJKCpfaD($;-c~kn9gT&xggRlvv?P_1N69rP6i*SP=~O77 zL<^i74&4CkR^r!U%nPBj!ZNjh7|(FXP4NA? z_wX0B!b`+KH?c(PA>x>QM(3n{1ALkG5<*9%g$4*de#l=rEXYo3|0sWY=(aG;_-;#{ zR|CIE5@LcUA`MEB@(|e0048wIlGGl8t&(+|Ck|25siUX*PMg6Dlv>%1{X)j`_WTA_ScP>>&<-P$S&~4r8F8jk%9CfTp;l46D6-)KisT-xpdAudj$T zYT#|b%y+}lf@mXgkWXKK*s!sJ77ximG^$XLa`{J@3@*qbkYk`-S}5sHu?-0O=aKNb zSD+r@Sfbafe_g_omM~Mc(f~Vv!+a=Cq){l{h5GX0-czl+s}V|DzvcQC5B z50b-qE+Y&^m`zu0D4Yf=H0lfEmWLqyFvs~C2o=Zo3rayd?!KU?2oz!>W=&JV<|l!E zObH$anBXuz2$;PAtPuQ3c_D~Oo8Y%GHtJb>4ATYvz~L#1P85ocJ&MjzV7U+z zQVdGoreXOdc%Gvhk^O#-#4wt`byyNg?+m0DKSic0kD51on5=$a$Qr05NA6PjXIqJ3 z%R3_3%m&1b`Zc9D74iZ8)?}QO+S@M+e|SQQi(^ z!fb$w7z~cLLX=VqcEYJc-ykmjerS2)@%6S_L_9^kS2i3-N41Q^%?M`o!VGZxZ>+ z!pP{2{p}RmUmB)^J>URQG3r8*>F0eg|5H)$a}W*%+D6ymd^pWRuz`E!1f zEF_gv=ff0<^gcrAJ{?eIUUQJO#c7zvI$A+13f}NE5Rd{`El%yl^3UjTJcXrnAS$r~ zzY<3xVat)Yjliue{DvcCdBvIR1vrwC@R1bwMwuSa+wJu zurFx;+99wl`zdytpEwi;b_T>U34XUjDh11whsn&jn3C|7U}PcY37jXP%phy|B_X9sbugl19*Uz??VkSArPSt!Pqo^!Oy}VAZ0LS&w2<0I4zD|jdNy<*L^98 zVbzzryvUwj1U9)5@TSAAqjuiJi=^pA+5hmOzW||ch<6g=?SL1}z>83=Xz0Q@qxR#A z(lD52PSoW^-?2sph3@GS2Ma^L^g+T1+R8`JN&tK*5Yz_<0(-w8?76hL$7_C!f&ky> zk!hagI}V$3W;eH&m}Cq3DRf~v!Fv$k?Zu9xy4eE+R{-z0`AM0SqOrJbFfXeP&txx&*3Q5L$^}7~>(2}i8$oks7M$rB9b=1zkwG^*G3$90zHbOL z>=@<{S3#rne(ix{nr6DRX%8Hl0LswZ`K%{!!1*wC;5aJP9$PrwFI`4p9WkO_yAhZ( zfzqNqx1-v19hifhuY{fYOyEvsMxg{Bc3MLBBUCbo;}ircU}L~>_!cs~12NU%9KG^m z5NJNb=qtt-RKGD4ws9aFW5M3%|=||zJA*%y`1k@6aqkio%MX+_J zz+B^!gQTt|kw14pGw(PLNO=5)HwK+G?KN*tJ9^l;had>#CTPq+K{&|FE@)SJ3+f;OSY1yX|hn{f*4kd1SLrGuyN9S#MKDnHyA)AG5&D>fQg z*uk@sN8+H>BE;3O!#?r6^5N4sb-VZxr=|$KF>{JOcn}e{!*v!7yEwL$T^<|(>-)TZ z=;9F2Usfw4OKZc_}9l$p8oNAWa{7_9o!z{zbi2zIeZypL5 z$sIq;#_Y{8r;YIrMDFJmLenlT+hamy_2{8^NMF>yYI4qhVj-wYF;?L4u z9_1gF?x;A>eiAYPF77dR-QAe$Q9`j*h8>1c)JnnRlG#c}Ke^JKyRVEeYx6u2D^ohX>EB;v1m6N!v14bZz4hUfzXo!N6A<}9 zl%AZu!bJA1K^OR$5EH`2KO;e?ygh~>!eOS#GukG`nbe8TbcDThgc?3Csh$0R3Pq`- z;XENmge4h=8hn)C<%P`k%Yzwiho6FroaI%j>LiOy7YkXW(0e!|0B8sxi|f$yb)!C9 z2r()c4)|mIpB}}Rd)!w&r^Ax54(>QC+D0%7byAV3c{E1Zh#h~U9S3*WG0}rc1SS?V zNLY#Ra~|wt+zFWmlq6=y7uD_mAI|tm(58Seud5gT0B85K3W*x zK?dgx+&vI_r#EYkIM1EVWbc<^3=S%kZ!q$+jkY}QCtIfRK7Y$h54xUPd#sAJ5-0|a zgTSp>1Q8!04I+J~#wDAt)qtlLM*o~C(q8x1v=^WBon_`eM<~j<{3z4Jgo-Lf85NlB z)d?5cB-gS*okBIu?Xa#sv~TVce*HQERFjjT&K(v1Tn-CI7|5O+eEvaKsC>6@6kLkK zz@Kxpnhi74qN5Nqz<|TxOD;2Wqd|gDmMF%IMlSmrtv49WkIeV3!!o#f&o7kYaGRJqCAQ+G($w z!w3zg-bw%6I<{D67Oiu;(>fUub~Cm4LPMsIFLf(%!N*xYMc789Cn1w(DU*BJGno_# z$(xw5#}mbq$XW{%!zr3T-U5{~qNqke>=iF_PEBgR0oTwN#DjA9GK6PQ~X zb}I-_Tn`k(VyG}P$Goi&tC^%SmL0{PVl7hYO7mxi2bk;e1(&>gx4c)vPR+LzJg`Sa z@?FR00Zc#|W-@({pnd$@syicB+9RI&<2anafm`BOoDFRD^IQ5(s=$&|Xq1lGT^+%fRj2_SlO`{5Ni;R2l9DL=)kux_FEczoaj6*mpKd>vv z)zCLacjyZqh_T>6#%$m4oPQuH{p|l^86;<1V9$j!`S8?npaKFzRrm`_4oF&BGa02(w#n{|jxcdO6kft=0%+ z3v^1wanWuLg!%R8nk{@gLS!Doww=^4WMeGJ@uDwD@HxU(d=+#@F_obst_1I5YRini zC|u}r$V;)|!bc??0Y3x#Ts{~4Ig^U-R~+H`=^HQInQ2L%JI?0XfOJKP>C()cupM4IjAJ)H5=Br<2>*u!B!tPEb_ zv#LiZuzy>oE&~qe(x1V>iIVWY(Y~{)i|=}%x&&;1>Ir6{Wf%!?R#vL2>!1)Ioh?wF z|6v6i=BNK@1q_tgJ)jl)r#)CQRK#!u1@^~1w6B0u*dn|uh@PQ0JDmb6l#%|geD$AJ zXx~GZ6)fBi+uCAlBwLs_^e8mQQ#(g>p3owl_}i!Ga^H*0i(5<>a~}x;Q4KG8$Cit&$%_V z&&JZ8xr5hoCu}ue`BXmQxc+(hH%d>GG?)I?AaDE9L^Srgq|Q=Y2&w^>;clFHk;WxIQRi?P%0ml)x9H*mwtSw?+SABRUP zHm*4Bn-I8Rkp+IQo;@k7=}+#1z+IPyb1yEpaIMkYZ7Eq)qa z7$}W+v3AjZ&4`ljMUC9bK%XJwo-bXr|3Nr@Uvw;(yIEe$Q-NDA%2RzDY^QC0IHITe zsn9hulwK?{StP?tkHgMpnesIEMJt9lpXQv}hqo6R_0c?l&HQniqkhV5c%Yog)_tP9 zDbpm2JQK=7?kbJASXBM$iKXco>9D!RlGRJFvptrQw??7f%0jHhAQa2j{BRB_p_FXyijj#=^kZ_E78yDm)*bNrB!@rpLFJ6f@y3-Te@M0-B?n-Jkm zF_$dTwM&z^Kg)D$%){*~l}60(%R4$!tnrwgHU+bN6T#R&HsLG?rn)Z^iGkdDO`htv zSs9-5jg-U|FCKC(F~UQKdm@DUAR7TbfQ%|s1{w@TMm5-shA8nfTEt<}!9T{UdyiJB zF~@k_evBJfA&Fgq!33?4Rnq-mR^ZVJIZxSy0^5HhIA)35*y%-5g=%)b4_98Aa>6}9 zWT;h8PhUlQkDytS1pt0=ZuF1{?}20@}alCmMrJ33q}^JrO!^XL&T#`1CL zDUTR}qE``+KM9xBW5UjWMEy-(t_pL^JZq<2ol4MNQ29@>tWLN*WmQi^VqTGywkk!! zXS(DUP_#P(iAi(Z{C%tl5^h9MS%#txILEAxb2P?dH(Nnb<;uX&Nz4)kU6){lV$hkN zVS+MRZNT5f{30p26-+d@b8C0^)D{U_AIptiky_U8n?6OexFecgNa<9el=|=3@KO{B zITNj@`)Fym#fuw%SOV~TkWchDr6fHffF~0R6U2fg zdp`zStm-{BOp|FmLp*CuZBI#>m0O`iM3{$yn#IsUWKnK3TTM|?@D-Uzbj(99lb5pX zk~7k^CcW(tZKJKCovW#(k4|g^xbRv=wFsZ2IjAYR`Fvy$-N&+Y4+P!yn0l($5zuX} zH4m~nw6ppZl#kLndSrIdTIHPueLF(tx}kWIz?nc=J|9Hg`V zF(H?cko%*kJUg(C4JO>)mQ4tb&T<|JAqRcv=LtChqD*A)eZM^v1NjM=Effl(eDco| zl8l5TQYilJbdJK=rulRZ23Y@oj!2wi&S(PG!}d*(TN;XdfagqIq&%oNa$~7ey4N@)1wj*Kucsk?gwl`X&K40 zupzOT+zFT^{9;5(@fetDkxwAoU0zujRF!Hlg;^8@+7Trs9HkP=$mC85e(K|(Vbs}$8y%&>`mM6_8zKXi8%qWWxFN@wwW?`UIjI?OwqKar68xd1zOb#1@fjhn zSdY+~ZSj2Bn##0zzakdSsP7I%QjdQ}zg)C=Dr3fxT5we^r7R?E)7Ot0Pn%zbw!KKR zl=QXJ&Sk8i8ec*5*&ZCcA^XGdhQGnXXjIth*1kbBMh{!>J*w|c8{F91^lA%-1Wh}E zH4kGV4Ox7eb1Drsq|=FOhXrm#8}%2>MNV0vZ%b8K*m`9=?JK$6)X=_qK_+Qm6Cm#^ zAQFVedqCq8DxUm(DKKc<=a43o7p1brIR=$$AI(fKC_rCHGn2x|FnBCJ#Y+fOkzFa? zo$*J83^%fU?Y{kFCEFtz&KZ`HxoABv<8aXX_QTB)PA3j0!qhRshRCi<*<6&MLmeHW zd78|ah_JeCcG|7sSkB5#WyFi|Xwde6q?s^D3*fh01%4~rpF5Bl#}GCKO_lr*1_K)2 zq2h3*bJHSv_V~&3Olh~DD%pp7(cF&*fvi)Rt;iK&v+{QUe9HkYD-TD}uPEXTLw zpqktv#=fQ>=Z9kw8u?s=b3wb)8N~NnBw!9bcw;Flz~5(6UV3Q9Xebouo&$7$FO+L5 z&`9Jy(BQ`hZmd2I$+Ibv`UY*?AUI~VgTM-+M0oznJz*wUq>NF(eT)Zm`E`PSOakdE z?J&n@7#b}le~h`{=7)!iyU~I?fle+T{6pLx+MwBpw3HMcK#68F5_2$<$mK7$aZZ{{ zaoyWBc_I9BGq&B{h+x1coSS^03wCLf;ki69qUYl$?ODJqq3z>qcl6|qIAE^{XtO*n2{9(iXujyh@$ftM5>Yxba}B%C~NLltapAy$9m{8_E-I1 zt`~2o{j?eDZ9+qpih0>mNQ+2@=agauk18F4*SB$555czp{DR}aAO{6UhNqehvjwhQWV@Zia(*EgOu8A(>}X9Q^|lYv%65sgoIK8j-hChMb>@P zj^zc!zELAy;5p3$P}cTC-eK*f@FDp<2Y3PD_T=G^Fw2=e+ULAvmKMM(e}c;Sg0l9I zhfkZDho;Yf?qjoT8MhTkCEWlBt%Yf8ructeOOIV^?+~&C|FjmYYvz!DT`OE@U(8z7 z`=KvrV{MrB3i%Y;o(_&mODg`?OEO2TMvGsD;d>a<$gg1=c@&54VAvsq3Vqw-;C6pO z#{W3#IK*<*RMhctpaxIZr{9CblJ2+QFn^`)FMEz#VKIbtAs~QJv~nQE41TAMWJHB| zY+!CP_|%QEb}Q@ys%5WgpW_dLexpgF-lts>3LptYThddiL(8AJ)~q*F5_=D*_W_M$ z2{z{OW&I9B_+i3Mg9xGhCq&>aB>TPi#uk?FXV`oJAtR?m!gw3iGZX)sP0O4_?k2ST zXb~`7$9!NYOtpJtSo+bj1*ZAQyXkVL`gfB>f1>_arpbtdhuRsQo2*RXnf6;sR&V3G zahN>xT>1E<*UgWa>;A zorAnA{Dd+qNCqx%)U^Zbv^|pr$r$Te1FPKxySEU>`?c#~fcA9!9ffR?S0^xw0L(C8 z445%BY1wKK*Mn|!)X@_ffItuR?>#y(Baw?62bAh1v)u|Y302tV$5 z^8+yUm@XVIGr37o9rlO=`5Gj(#ZP|mG|Bn%3UQh^E zl+U+Mn5dkHq;$`6t!bK=$bAALs$l7VKgdb;AkWB>b_VYtJd1XNvF4j+rpfdn@Zw&} zT(L4Rji&bgmdp#B!0|R4CAx_EVv;`(=^FMi6I163hn zdy#Uw9Hv<#lk#{77hqJU4m>FS$JED1_rI7StldZEVd@Y=ZK zqQTsD6t`0bct0isPW$Y%W#x6WC)M z1^2h$R)iH71&9mC2Px~UUi+bMSqz!d#aI7~DNNV~@du+Ay|coFd{IvHfPq(#d@kcz z*-5KVG5N%{Vo(BM>BTU2eF~%(7iQFtR6hg=aDhg1Dkkr}P$0G-owymQN57A9#dc|Q zX%0ofZyt_-hD>1~sM0w$eD{T+3WKZt7!nPa9{1c(5Syt)c}q!*_^8$J;88O{7YLon zL8Iozs&-zJ-z0zN%lC&%FfO4naeFi{%%)l+0lS9z%nSu;0qC-+lUJF7nNsGA_5$%3 z55310R`A857?2U%1~7V;WT$NlibR2e=1O97rc2&Huh~^l*N35JiE76m+5+mXUuk1- zpm!cId<|bf{k;x)4r5jJ(t;szG)Np{DY+l!&I<}^!vq2sr=0_#h(LJ0MIbOQ$A$Xd z;eqW_4t_UToy=yBrX@_tDMa=_`UUJ|$C=S#>!Y|2>UYz)-E%Ot3yl1UC&)RT?QK_u}}L*2~*kTfS=`7p$a zVyJ~g&_KF8V|iie$lHSKW{M2P#7uxWO0A`MZ%Hk4M=gll!$u2D3!7nWmx?eCfV}ihANrii7eh@Zv z4nqF;Fb??tmVPWY(Tn;ZijfJf#`Q7t(C~Nn>u~{o?@6;w4(`*95#TXt%J`ixj&-RWw z;+QY^ePGswWy2^G$sb&vM6tHK*0<*X;FtVscV>2YI$y9Q=6iW_C`fHU>_x0KqNim!&dP*%Tk6W)!svtI1 zGIvQhX*-ZOT z0UWXWQn5+?CsblIj`+L=a29}RA-`Y|NZIS_Nt8_7GWSZ%KAOQxA0!`0 zH(e}N8Hru-{p&Y(e8hJPSVhH*+wG?aZ-mQtj}^p5!O)ZtMP(7Uyqo;sK8va!GI*a( z*n`7C+XB(?L?%fUGwo`Fq{VpCL9Hu7RG-3n3f@gnJg2;2?JbPHe6d* z{Kd$_7@FMFbtieq{LHdg8@hufHycTAhE*-b1DEOGQMk~cZl>`Dzd|DKH<`;=73zmx z;5(xKd2toY8hE?m*hmcpy7|oovT+e66QN=!FNn4XHN_#l;A#gDIP8c0=lCx~`{IW9 z8*xXx=!O4$#J*{|vl$;pk#|r6uxL!Td<=KQMaGEbV}c1A3k^U5I!VSq07->EPQWKU zrjkz;l$anf1)72sz>LuTZ)Mo{x{=TK@Les@I zUYqz5V@bW?V#TxE+S(p&bH1%v+w!8#T_V3nH?g=ov1<0ghfNH-Z)oiO=ZoJ=HmN_K zaM7)t)e@(E)1Hg>+3Ij8(lbLnHKx4y{dBeb+iFIJeg-L~8WOW@M(&sU!+PfL8>Q7J z=v_S$Fro2?uHIEm@nzLcL4yZKbh93yKG@Lj)sfLUhO_hM{S_}IG1W$G_%MaO(`?ih zhXsi092vdg@so1rir+u~entAOlk4`8S}|VQ*8Tw=zuSxupB*tPNOf(Hgv5(oDT_j0 z^%`swens|5-<#s&Q{;nnu56Y2Bi4UHqw1BQ9RZw_9Wi|s@>f|$${rl$B$`&{V{;jQ ztSV}AIhv=S&~PB7V#OM-(A{qL+@zh&{F4Xo33_t=@r(0;qK9YOeEe;?*kw_J!u1jo zkE{o*8H%Iq!Mcl=Zr(LpWLdS^z!v#eQx2W*S6ggt*mIUm(I3A_ge0U4Roo%@@we^$ z)Fl=2<*f%ej|>dBy)7jy>t?x*QR?jDPC;trNsE_Sj{mgM|K-2FMiZ}*U|-{0apysS zwE8rK?ng#1?#=|LFz}#Xi(mZCgMJ21Z&Wu(N&FI@BCXCd^pmxYJins0R?RNh`&Z|n zx05CsgswJu><$i~0gHStNkTx9Cq;l0VGma>RoEBb?s6K_*r>z0nK~@EB^72*gAH z{AOD^0;a)-m?0Z5b+9Gy^yW56qCa4-CC1R-=5p*6;S=fOM18uP=rw-4K+M3nm&6Or z6jKUyu_W#am#Eaip+0RNufjOsH{}B}k;R=Om z;wjhT@!A;nwcBP1UfX+lZSAUqMma{$CV734jx@^Iu6AXp8l5x?CoN=8Y5;=AI3k!6 zLI0xPTI3TH?mTcF+oXTxYT^lhrL11pq?$9UD^(K}2_R2`B{-S| z>X__TJ>a6@uLOK;OIc((9NH?iy&uEnM+Zm6N95mjYL;&=9Ez5ufR6dyBlq7FBhQKc)Q0)3iTp|X}tg`BZL|o zmRD#yC67)#4h=S1f??Ek%zQ|_XI8qREQ^dT&>ETVM z>&Awfhf<=)Lut-u*nN4cZ<8=8{t0kqK3oWAHvLdz8B$S&veu^;AHs`woTe9(k%;aF zc_#~@*qi_2t@L7bXs;>HIdliA;O-gxWDCDI3@@JlFE4%s62CD($TjK3ru5?I&M!U% z#qT?Ahhzwt;yfAUR|E`JTk^8k;K;BmvIDAm4IUnKMK{0ii6Ek>n8w(plGx z+PeiWU#>G%=_^xd*sjVR3N|A2UVEUML~Ou)Fw2=Rww1vcS+vEPz}O4J+1*dueHk?9 zcn{QpN>KsQ%n*1>AnhfPb{cVDkfudQ`zRqX`3N6r@Mqq0=btx!OX)UgFFHs!&MJ*; zN7rYZ@R@qiHY!Q&wdzCzDfQDEvI=gK%}DCi^;OqdP&?0u$qdZDJzI$~yaX9GH8Om2 zfXy%yXs1>6lWkxM?(Pjxm1m7%>{6$iew9Swpk#tmW_wm+i?U+_dYTHp1(4Y;uiy*G zTZQwr=l3044&}GR)H!q&xcePtVfqx4{k!5w$_6u1cAdLJa&@U|`CYDaB5APcz;cM zpLy+y-2&}_Vr$LrKOzhsVLkdH!Lj(1UgXwNWt zuTm&x0+f@J2cxG2rrb7IEVgW zZQZsYS9Aqd-U|&vo)fs9`fs~-mWSGKQE1RIVu(@OmSV8%0M)hDmtYq+%w>x>Ae7{4*c0Vjc6-Q5(7@@()^rVm(|NClo@B?>%VkYZ%WfgVs>W-4dsH_0Z^ zo;p&^kbv9ZtjKTe0~>~%eTkeYjsinv>PV)&1=bbAk)#yqHCIve#!y4;2R>eU)I4@Y z+?LbD9+1U7tfO)^LlY*BSR4?px<89W!%SK-8Z$1YGgZY7(YUB;ErT@Oc`T47Ns=b5 zIiFTybkKDJ{T)~M{C#&*Yl-d>$YN;)rO;cR`;9^>!)$pN(x9N(gIn{|jB5Cg}EyyovSU}A!_aN0_`P-0OSGVLF{9xmCY}ZJvgGLtmO+j zezQCW8$gr$?;D`1Oj6mtfz=tXqne2a?nQ!36+1jV`XJjQO9nB)KO_K&CV_IWR@ zuK%Pmjwu#lF!`~m{=EEE$0pM#l?1?&;R7@qgIP>S4x~VWZC-0K>8hB?`hJv-kW+9e zR!Gfur4s_B(sCzL_&Obvixt>4HFVdhuhNHXZZ0cZqLo^#_vo-5L_!}TVTbu?$mSNO z-MZ($Mh$fN2-Tc7E?di})+x+mPwFiiS+9h8_NP%YAWD;)Ei^8K79U8VsaYqY3wh)j5hdg?=}sJ~9yggScVG&CZTG-4Y{Y3$Lo;_-g88O*(B&tx|<+efc8{p!3N zZ02ry1B3M)#|sFE!`GK99*5FJQQ$jTALZ`0l?&*Dlr zc>q7*hK8Dp(Ca0cQ0p-stw+!kSklTVRD&X(o_~DNY&5FRNvb}-ZQivzJmd4c?R8&` zzcmbl_r3-X4pla90OBiZZeBPo{U}s#tmRCLFCS*R+*lcsKgCRPyGNeXUY1%sxR1fOP1bV-zl!%!(U*ZWW3K{@VQi)<&KIsEO*k&}p;8lq-! z=H1W%>^yH`AejcZV1a=!Wt372)k@!HunM-;5Ra|(m3U#sADgeBsI5oT);xt)Xu6Z1 z-AzNT(UoegY4R+M4@}zIsR2>=5 z-0P(9NGHv?H1lFL6P&~rSCQYjE`2s~%3*iF43{BG4${R3G+frFwXK9Araq0fWljF7P);#;T5#m(V15wtuqOAMfdGJaTd<;-} zaQFL0>nA7o4tEC^r_5g$a-E&e?O~v8C}c7iyF>7>fQj4ktFfX zYrMqQ9HR!`kAjm*!8t-mx4urn>5Jf;5f08rLV7H^OeQ$^HKYk}GOOQT`z+xl+~gjTVfe)(;=r1tgx%{o7YRS;5yZ0B2&;y+M9(k+0OM?%m zD;OzNo`(r2G9eeZh_E!9riaOU2upo|rB#qJvn_lqRaB6ajqBLgR!IScW2qXB^{>uQS zJ2a;VxtPgR@ZT*KAGHfNJj*GP*Zul+uuoG%#++*fX%$-AmY(Z_s;wyn zeIagtryeu(Q=e@UG}sHX#Ar)gTt;^%ut^ZuUNQ>t3?v8|E+Bz8 z6e1OJ-x!|l>R&nd+h|Ilnl%36% z;ReUa^eANHBl(VXM`4V6<7QV%U70Fq5HR?ee1{Kw$NaRkV#{dq9pDF3p+*JgFhPAn zLEX&{>R)FmsJJ1g9WN8!ad)y|Y`MJCsCVr{LzT~$@rS>Hy$v;Bugg+m?=i4<46!%z z?xx1$hhwkZ922EhNf*!TK3CH?E3sprp4ev+t{D=Vi8Oby;ioWbN_Rk-!zYYg;G9Jh zPmpnp-n|iwQ9D^(dG%TWH(qtlzJlo45)zK}ua0ZYeBU?`Rh{x~Wg`*)_ej!&4CoA zWkn7N&8$_v?ezM}@X0MT$BWSphpew^*6902u*<$LDWIZM_-*bw$m=QO&r2R%__cbW z#WgmdQz-E%2ax#HClSz63h0grl=vhHC`Q~&K+VpNGR0*a#%2{O(3;KO&=#yWdm>0b zy7^(R=rteLw#hpG@$lwuf7o!7`Rn>Q@93|yS^rr_)+vbGeS?}`1~u;pNRR%TfzKm z*T8(6Q^fo`)J4+`bTqZg?@;R0FQRA9Qe8`4`4}KD`0STwO;V*V&_f=K^YPVEgm+R~ zfZAe!+7fi#rb;zQy5!ZMz6aOg|2go;Xxz%5OM9}u=kjug=eM!xq4il8bcN%Z;q~|{K~GR{k`$;AE`{Z@zD-j|6KX7+G3jl7L8IOvB_QV*1!Ij}H6PrkXg1ik~wW|tJ}=Ea7?Oi8Q2=e>Z>GkSx1V-@tK zzRxcu=X`?Uqwc~H+pe6_9U$;n3J`>xTIcoW@TX^6f9>H74R9x=@8$!g?^_F{k0qs- zSBQ;Fx$^>1P2@09-Hq>H3asgNx-Z?f^tOZM_g_Z0El;4QX22uE{1_AVh7Dki16Xfe zqmAw`348vbtx2)2SiH^rS!-+Pmhtl6t9ulE-2jHwgJIJ71ze|!W0U>WHtdwW;5aqy z@qPLdBRJsp3T*cg_RSfiAzM;VRxZKB0=NDxe^OL;+p zFp~r!pS+P-5D5ZqvZVXKgN&qG>z6m&bylB5^SfaqG0l`GxeK57zKUzm*Zam%8xTkp_jZbp0uU$PD3jJf3$5+Hlwxf#)};AP>$yPoxW6B%Wop_~2L`hZ;Q{=j46 zodr9O?Sv{?9wqj&09Ox1YP&ybw6J-)MbFq=8N+LQU67`_3#ymE#Ka& zqX4fYm}DX1Yd4a<5lqUd!WB@3drKbSGCV?G+^nW%>9%a$T7G`@U}vM1aHCce$*S8D z3CoCtszgGm#UP<+u@DJ!eZHm{%TmP%C%N!33G=<){7tX*T42K|an-7dx2-x?Zk^og z>;Zec8g?NmD~%iPKIYc$xDR1|{oJ*Um8OR-RP`&XacfA2i5S#o^-7b!_kP&BT7Y2h zvDbi>=P(buQ^`C?m7DIjicZST^MHBS@V-E0)C1sK$^ZD~mvcYlgbb#e{^%3}%_$n{ zpCp|BMaDJ8gmAtH#?^;FW7B^`Llr=?9Rs5o*XB2WcBoq9jYFKM`?94aT$VXRmEaIl zafmx~2>&#$lLrprk3+1)A+Eupsm9_ELzkc&{<*U)xugK~ukUXXlaD@Ge@ULIP0had z&s6;ux?b3wMF)^2;8?Q2i8)Vbe+#t7W*y&rP>v2TgbvV~%uv?@IMM-3e>ea}J~NaY zm;+4a^70;hr7TD=W#%)L8C_tEuC)j5Ewqm_ydd9)kd8%EPb*0sj#m-T!#o;Ar@{VtH36v$0+)HCnC zOHd^(&o#WyHrr0TU3;74Jd5O98*-kjVRNFAPYTO6^j$R zA)#lG!p<8&mqV##H#U+Nt&!OPnBss)_b6-S&nfoyz#h-^?Dh`siyt#wG# zg)eR5byNgysboAGRYX$y7-nYUvJWGAm#rNn9TK2=h7?HwiWKwdQ>jJl!;QzCN8_7N zVP$&Qu1rZakvq3|T-xitDRi(FiR+lu(ZPam0R%*UK}08^4J4k7EQ(vQr7$9o{Ng^C z?`QCfF6&CQjAqb)0~0EE*&QcvDqyVnlS9~TVC+>aohtR`r`ns@wy~{^0oo@58hM@W z@DkODIZ>k)fa;7YshQN;;VS)EN(3DYZX4N>~D6HDHutf9_~nXhA5+TU>uZ&conH4QI7 z7I$~;!llsNS-$^pkon(1a|LohKiht9GU~Sq=IHI-N!u@>?OV3vODHD=;K=i%48U$< zYBCldo5nc85BaNH_$-}|sn~1JxA(nbq_^zRbr?rO*0kZo~tdD(PV~Z@L5j zus6lh7iB|w%&k>>AXvFMHA53n^YptBK`sGQ4NKnFc3**dv1(*-+c)>spDFxb?mV5* z|2TNjlg|qgvY1sbQsb`PAS^8*EGePNsrx~oQkV}LNT8(e5|(zwkwU4ry9?YA#a0UN z^OnI+9)q7Ngr5W#jlyNX&udTM$Ao5zbgePX=31#Ode8w?nk68%FNiI%L-xV}4*5Ci zne#5U-DAl82+ruG+5WaK&nn(UJ#z2=p(|uMVt$-rewAWAnPRRU7SG3%NIXHhrr85WRXmb#xikm2Ya$DvQk7=j_LPDUMsQ;ntD! zSBwywZnrArY3>ZkUm7yk1erE8iCkVJ?L_4#C+L?Yd zCWt4;LB^UWU`rJE*vKdlC=g^Qq&y6ajA~QkM&%O){vZm75CvkXMz2JTrYhIN5&uBf zjA}VKtLw#$UQ(GuUwCCMK7w@i4memL$i#Fe6Ze@+Tmuozw!3J2QRN*ufLjwuW?V6W zMgZ64GsFBck%!DkZh!a7THZ~{JUGsM3G2~8_D?_uDKk*7b&mn;UxGN=U0*8OPf#fK zE+WH`fBy6r$>Aj^3tRbRfsAU*?r-lDchsR;_r}+i1@eV3pYdq?xPia+pAnUGSt zu)Pvn2~4SucF(QX38#M@0X<}p)^52!7{BrE3`8_2fe}&a9HXV%cD5N$#Kp?)-{@8h zHv;(Y9MB;rO|vi_-j8b=n?T<0l=ovTBL(z4-D$eOv<@#oi`b=nIZ``Q)%IQEa#LdC zkiRydv>6@jDo4Y=lv?zEk#k>dz)70V;j(f2;791vOS90WGyS?h2yz(3z)X(H_3(zu zA0HL6>%{8CsRi zbrMA}FhNygiUC85$@4)ADe8Er+x+DVsGTHD)K(Qxy8uImiZt;H2OQB0FFp-wE1U-} zrV%d^=?g>y8qGEnwYAV@K1UnhfPq8M+hSA%$J3-vBSUdTvJh7!YZSooTPOylt56IE zYcs`QBozZ*>!7w!*K_ZN;@?0)Ek;Fd)7w}l_sDwxsFt?!A=XK{HT|6K`FU)A&mEwY zIYa5xlMT%+yDzUE8+7!^-mlI(_IXad^0#qw+L#l^P7HiuTlMIdGP~-Dx~fHMEz9&) z4xC){m-m~xn{wC3MufbXbG_j7;2VBnNBbR2_I#4&bm5uerZv2*nKwh%nBU^*9v}4n zuW7?hijMC6aNX7M%k-z~tH_)zTD(%J_jFC}jW>&0vTY{R-Mkz+_rvqED!fyAQ4cIV z?#(ncUKrh?<1^TJ$PCNwH|HK4xK?e{1)jtdS#gnNlh+Nd`StXf+cz#vv3qmWSasit zv!k~D`FYX|QP;ZSqmG-Omi^u{vT2E}NquG+$0nn%g6P*HnTjQIoD0TA9y&H|>(_B+ z+7f=_N;9U93UR-*V}s_RnutMr*Zq3YWMJI<#QbWGT) zn(sy?Ca9S0-=71-?d*oPY36m&-e5F zUDxj~&-K34v+upvT6^t%-_E<8L*Jmm++^xp`%7^Byug=T@!s1|bU~E}p2pdpDvn2&9XY2n zE7O8i3w<5kXPZt)Bm`~oRIwfNWZr#&zTFe=9I|Vk{&LCwFPigSg=w1cPj>JtDdrb@ z@b1ckD5-oWZNu#165n&nWc{xUZyNp7X6>*uIjYJR)@;?_A39euaiPo^4f)1#T9($5 zo<=KD3dSE>)h#>c+;CLlm=ivsG;6Qz5LDwCwPBa`TG3OZ6V6?|f0wf^NxDgm=kEG4 zz6j1oG+&-&$pVKsUO_nu!B?`p2qct}q}iUp?m>bkkg(|88qT)`v%8l`97 zPYpvBHL@17s8QO^riNZouYsu7yDP!xPCt?^AHKYD$|2*I&Ohg!_VnDKBo#uxl0*_D*=}Pp_0#}K zRuH`Ibh6>Ki@_D|P2j2&xROal=;p?RE3-`~#UhkKtoGy`)4g(yla4h@EKOc@ygY5* zyY~e^PMZ^64SpIWNn~@vBRF zzqJ^tJhV4x;?rYEcQ}-V7?s)qm6AD2TpfR9 zr`I5$TCS#f|Ev|yucD;kS;w=*>WbG`DjrMrE>Ium9g(D`^32`E;&}YKT{o4VX>6G5 z%6ZJ`TGLAJnA^sDe)tVKe0-NfI8_{>TAnpRw@p+|Z(d?yk#I{yprqRG`ue~r5~dbk z^X65w?pKJOqVTF|udafgfBCd4v#g(q%s?T^ZkCL}8s>)2ws$^16WHyk%Zob64j_T`hDPW+xczB`UDYCc!OSurGg< zdD?4z*YH!<^f|^2+pyuapTGt*zwC`*ganW|&h0ar2yekcldScVZ#(AaTWnR8zLq7v z>8kXS&Dr^H1I!caoA<9+vOg|(et?%rZhXEI3t2O-%sQt6y-fqMbbzd;Q!YuXDjKwZ ztZa0??xR4$yRH5oMBzMr|0qPuij?;)OL?b2dE@cjz-p81&ge(iU@ZwKEmSA*{s#CP4fEWV5VcqEbV_JdOF?YBXSuaD%p zY@6PcAG)hHALMcTX0*^sl0OS7@V#Hd1mRUKp>EcDSoLy7y@o_7Rf@YDvRM%Sb|Jq~ zwF4Ed0PV$^=MMz7x0z3Fn!9r3d{YnoXJz5tVnPtsBgd`8gPZp&@J4?uHBX%XtXf5C zLTkf~gsEw{*>#bemtMY>5ZV4jDM3Z=+ufJbwnts_S01K3^hNs&_p(>gUTJ?zvlq_$ z7fJJ2NV6;>%{$oAoczN)D3Q*#(6ZfnI@y*)*AWlOAAg86ikI*c0T?f<`n!6nctGfc z-mK*vwReSo@vDw~*Q33A?Zc|uQeJQ#94Mf7PjV&)OX&t!)&zFl-wbwTfL*aHQ0hS= z1lcWA6D_oDp3IqEv{3GyXrVegsD=6uv`{&wcP8rY(l!~7-YK984-*dzsPPa}!^c;h zoanbFvqDCe4B2BcWWJw`g64gH7m#Y9$(*aEX4K+6$+M0~N{TOJ902*KPR25GLv=K z<@0ZqI;RG$7<>v^vFaviMd*N5EMv67A6lVx>9(`KnZ2=avhs=$8kPjIWNp(`Wa!n|REf;%h!%mR*yX|M!{%pe7t- znBfIxT)H(kUb?B);m;#oB4acEzmJr{K9bbHkzQ$`rQPY&xFHZc{pIN=<*=F>cVIOq zT8zj$cV|Ru_k!TX36?_1g2J0}{qoeendynSM$U>>sP01>QdWA2anx8pIZT;y``qPqCZmu}_}|*tW&b zn5h#yb4LU(dhE)6r*L4(AN0|_3cn*zFra@;1Nt|b@jDHX(7#0JU$OcCeM;J^w)Ir? zMO`=tiJ0^a67ko7dt*z)?44r|ckYy1ghMVCVh(v+ABQAWFO5hgFK0AV$O@!P~7`Y)MmK)O2bN-fRm_YiTTi$U` zrPn~xb4K30#^Do>my{1bVHR}m@{+1`XI-3ct3Bni>^8g@vfYSHuWJMJ3Sp_Ja0eS7xjpVh%qX0?SonVvyMZ~*TNRSE5*ac@J66h&6h)`a-Pt7P6zr|OkU}4{oDHU)`Xn^l9zS-^fLGh! zd>-7=`0}hi+ZFycvqxE_zV19z`}JXycI`XVthVlq&&ZPMqNURqx_tuO7%w=NhHiA? zp^$Z^TfM}<$L>~Fo{qnxBl=@V&Jmqin9oEq(i8o-0n2Lk7{1-wB(#KJdG2>uN)jx0 zTxVdp6|j6lusk{d%Nt33?K^Nn3p!yYPG~^G>;HVhD=PgbT>C1kuQ^>%e3xZr`-70a z6`_T%d)=~eFoB+rOLr{c5fFLcTK~vQv%P*im1`L&R)9aF9omksq@hm|1IM*^wYleV zQy$B#{gNsmMyHw&9Vs4|WarChTak}?O3Q^o|ML+`=!mK{g@;R@vxnPD9tT!khlA1~2gUJ>98^9W)B$XLrd^342Nj_PTs~ES z_1!@pUXna~ItREnpZf9)knKbI0NGY7*?jr)K|ap2E!Ufa&Dat->tx>XzaWvSI=Zt; z`E62j8tr>J<~D9vQe)N}+w3CBoK5HOM|r`A#Z%=bxU{Fi4C}r=`j}I1GTOjmj9Qze z{%8XgXan}Q(FV?=4XAw?e%j}Kua|Mjq$%a5$rUwwja38$K;?6ABzKh=K9Hx1_%on;+LJ~6F#m2QU`-v4r{J?^OC z{X+kL99N-XY2}ZN(_>B19k%^fclZn4!Q&0r;zI(h?<*9G-T{f{pooW;V%ns?f5(9BA2Kr{jK#go}QfA zr=+2X(+XvV9g;>PNwl3Nt>QkSGKQJ+er+b~(+;KKE)T=q2LCI^OOp8)X}EVuxv$*$ zru>W(DUuZJ)}TmiXLGJ>q@n!B_N~g#*3!_bNJDE54Xy6X&}xcZ$m}H;5Nc@`l8ils ziN)%%!n#);IPaTI!u!|R$(|8{m=>nnW>(DRqH%8}t#>X}q|N(Koq#nVH5XZe`2!qU z6wPIszd5a;G1)1ri(IM@v~+!Oa&$*^(~nsyWoN2yFzEjOlzIAG z!Qg08R z5hZ1_s`zA8pH||f-;k7YtxYCN+!I~R^S=TgSm`&kgq3anibX*5EYEZ&FE8GV52A(`l`Dm( zPpJJ|Rn{9+{VzG$fIJAIQxBraOb*s@#ervsE_{a!%;4&>^BDcdbjIezE*3AcIer*& zFNE^#BIUb;)!_ZWz?k(=K5uMx?j72cD|ztajCTXf_>WY$vQxqAIlW00l1UY+pbE41 zK?;3Sf5FG;P8xQ^5@Ej7J0ja-AFe?vIMC7Aso=cVUB6)7#qxyJy1en54Wgwb`8$2F z7wwjrr`+3ns3~mr8sdoDmq*JSm5L6p34Oc&hw1tcr`peo^nM=745<~A3TLEZG*V$E z`=@i6^5u~S0b)l2TnhoBjn>o`1hJ+LKqtS1PQG1X>Y3O3j(<)MSTkgI)x=?QX+XG- zI~^TIa$=nr70y=gx;dC>2Hr7f1_$9?m@U!=b9k2n^A!);@B6xY9{yD8MN?SaS>fV* zUS;M9lkM49cCEm&ORJDN{!Q5H*}438TxKrZ>d_=Xi|yp1A%p3%WUXZ1fm4Z>o%MG0YkF$>@J2;4akr5Fu@ zEPDzMQ2ZDkVD{Mi+6l^4TTWSLWB=gFTHcePi~jd?RM6HQ|0l=nbc_WKM7+LT@;mZ+ z-*9D3YKoH76!FhQN2`S5Q#8hlBMdiGuYnte#0?m@h=&9S4*+7tg9rN-&W3XMq!LLa zK@xSvUrFLeOQd{jkffyyP#2MNgg!eLd`5K|aAwE`BT-zClQbpE}Y(w@b_hnnp(=Ov#vi zRcgzz!nmUs$+xP0+dBa#y5`%(jcdL#3|6$hm_PMQPSXPw$M>4zgOk3FI_3{Y``4?{ z=0TGu2TZw=2J2T&)~}Y@P%TQunvJ$unuNFkd)af|D6<{%d4Cfz>N z-0sYKvoXi!sO^v8+v+JNIg}F)GR&^bk)!ed8woO z9YkM2bJw*GRlXwFA2jXxvHE4#@z$|LqD|XPS%daRSSeL0=*UpW={)XAz34^wu*%qE zY7ZIzQ#m!ZUAN9|?I>Bf_RXU?oELBdbY;cDbLK7f297$D*&2IR9G|S-zP!-id9Sl@ z6irtTQWyvG!#}>C9)Axwde-)BTyio#h*1ZwVEYaq_>RDfn8vaC3NtX5H7xBtUu{40 zt|_gP6Yt`lYZ_goet_JrSc2VyAg{W}E?b{t$p&T~744QEeTi zteg#_yY_(5`N^E#aGI<6u!;3e!;M%IQsyUDuB2X3{C@hf7cDkVXd&x(uS=}4C%tpT zJH6;BquCzpDm+-jC0H+`aQg2mRO-#Jq;#2?Obz+ZbV(y!o_mn4VMx-4EHvnUrt9h; z(2hp>JS@&+x3e7S>Q&D(me;=f^0qRt91D!?V!!xu<~|Cx26Gj+WMiL0?|I<`l&~2~ zHjBBGIhJq!npv~xQru`<+RnzMEwk07jkVcAxK^G4O_~j)l_pHO?bA*^E)hO%cy=X! zZUISB0$Y+E7MABOE&X=2wJX8rkQw!BD`>Noi(PkU;*;LlC8x&-i(J~#nG}s7jAUu0 zyWyg~Wls}UESZE=FEc!^#qj)07bUb*-N$zz1U+l!0+}p6-(2A`vSs|Dd|2La6D zE^wv43Vzm?054@&eddvyQO$T#qrgEib-*xSLO4pW!r1YG1pleU;XseYqkjyiM!s ze^^fM%DwgVrbY>@d`WLzhoY5V&${R-Q!_`xl88c|z1Klc|-g4cyNkdz%gU_-&Irq=1=G9|CT*%J<10eD9T16}5e< z(qS%dw6Zf{mP+CyY#w&WOv2!|KZHrmGNW4MA6D^nzcU~9=3MIH!`kj_+}tGGu=WyR zkG%a_xax&-ze$vwuK2an+w+b(o}2S5MEx#qJ|atT_qLj~PkKUL$BKG=L7SZBdT*hp z^FFQYR=o+wr^4QOx^?3dz2#t5pMlDE6jvT^zWB%^2+ufV&Y;E3Dum~eN$5%1RJ<1J zm~7fvXRvnGZl1Qi=fjSHTT!z0lU>A1@|{32ZBQ)t3n+GfzoEb4)Uot1Lgmz%IlMWJ zv)0&lmpHWCZauMu(e51ZsS12*m1pNAHw}F9+%rvj2!yD>pL|~ygy;hak#W3>XJS;w z@)g(hd*{yXzh`)l)#ziXi!Mg4K+XC)zoI4p)d>Y^d@FI4XBrr|To@z225yLFK0wcp zPCm4WXa4io61sxBXXRa)+dTFT8#K%fehaqvB2c7$(2NIY&gT$l64{{PB+%3nXfy$u zNv-{J@s#KB==gs<0ccvv0Gizw?aU@viDiuOfAaj!R=Kze1l6gpkgR-XEn{`_9@-SbY&zS0C7b;q9^;*%4gxcM6on1~aa)4dVx9MxzkQjh@T81@G%ZOu% z5Yckc<%763{BgsR#W|Nn3!<@4VAN%V_|%-@S6%U+aY;P#xQf}2oZQURMN6LSK4@S( z6sg(qO|z^xpGgh>*U6$xUi?XM6W_BGcY7@5!#cHNteh+h8oAQtN?A@*4!tLV;e*S$ zVhNWs7z-oDasHJr6Bxdf5MN5cm(YuL)_;{~ReLF}U z;LEP=EWw;t5RSHZ5{^$(n zC(fL$M8o~0hr89s*jCF^!&OJa)kecDFM&D?4Ga`t9`ao)-|uQd=qa(f^7W8V+8ULw zg}o~ouy=#tSLWNnugn4n#0Uhl)-Wyi0M3@_TYRR`R1 z2K})iTfT3J%RP1FZ4d2Es`MyyEe<|VQ+cM?7kl3^g&OSU3bsfp z>hgV0wR`W6e_ede?(}z*)F$zSmXAe=W1Lp#t)_UN1hm#G^-_h4oUkKI!E{p-a0^Bk z=MJTFN7A_)Q%D4ggqq$vFz40|sY(;dG*cC<&d86ZBTw+_U9Dz|V zFbG3W#CB#ke0l9IZBYUI2QF3{Vd8ne0FzE3&BH;MhbL~Gal)h_9rLg>&BM8vhp()! zH|%64zYEO|lti+GnsByqh#3ADY}4T~nB6mTJaTMHTlp;~r@&ktL0vBx-aoC|)cv0C zfKzj!NGWX(qe%f9a)p|VH5mnrDg0Wb(SG|9I|*bsja^v|!dR2d^wF%)#6MAHBYsi?1{y+qA`LWzc#Aqhygq~YtAu!ZK#|w@rqAWAxh7ot zc2d2)cI^5$%tZb9+4YyB`d=ET|B50Z^~YrsjJfOMT4TFwW9QtS zGP7daM+zo(DqjKR(nTr;U-`mb^ey->JU{tg-ks08MxLxOC@~^Hze0l z+Nl^zN={RlHI%&n!$-a7kaMv^g@>GYV6)Z@eT|;MUfRR%Ycgo^Wmo{|uAR_YdnqCG z&FcKteKb^!i>s1IR!Y5BAcQfDsaq|f70H6k6C z0+GH0lD3W{Eeevhm9-x|eU+}}CVpm1+R(#4#IMvEbPN$Pcx@h~11Xzb0V#`b)hyEk zZAG%M?SDhC?%WSpKRvdvBEugi_DQ7^2cfjlRO887?b<$$_8pu;>?Xa+(j;_<{YyWrvl!|s6LnXEx-adZUsqzFU-L-6^4a=z@elo4ABsy4&zG1D7t<2! z!%o-~`*W}%k!;Gb5SI@z(_mAs6nyO+_8mMmO4<3qn)f~Yz52a|M@P<9vN;+l zK|Zw(M_WvY-E!e`nA5XEKf%&k3zl<#WyJnaf`&ZXaKustOnuc@AtV~JTGMtZw5C*6 zYf}G+o4V*ir@M0ft;tm&*vmH{5$%`vQ)Jf=6-Vm1h17HR!d4ntv*G!_ z#3`A{S_kymbA_1-S&cWugFOz+qcu<_Jmp1rO0%KZGO9$-ma$~CxnKddjI~ABEhf_w zJznO5xnE|VKF4~|mSk3#2Ci=kQ?=pTTu6fgpg}35LEf-c2S|g?H$j62D9KyE^w^`& zT(f(3^E7PND4bq**6RxwXE~`)@&Qtx(+ewHkFNgTOHC6tBFq5AXsi5$R^Hfz(ABHS zIZjPmgxm632~6EWxN6`TRf6f#hpf}bd!MJdz&fpk(i}lEdEWBJDMt;Q(uGd>>fcUz z2B%y(aLVJKAUqpLcmzrjlgn7XFBh3SbxHsZpZz`>!H=`Eeyk!UerDF(9-g9`k!pUxq zJ5%-?R)6Z6YePGx6xNQ(H<%W1cx0(B-`I(Fq}JDdt!pq=zSC+iLtU#YS?a0`HsYA` zAV1RLf09y~O-g1K^nO~?O!GJGcb!&Z59ftFTr=%=kJ5g(nf7p3u!k$H+vjr3sqnbe zSw~FBks+6F!GydaqdetMp1pX26saawr>kkH*caw;K<)+03AK!a6DlAO-z~-4k7#No zuWZYY(!bUSJWH))(ce~hy)V0!Fl$DQ>gN3f9=w*Am_lwi3vPHnJ%xTuZg@Z3FthT% z*786}S+PEyf5n5ldG3=yPQTOva_SLyKNJGISJ;@9zz4Y7_YUZgN8x*X_p%)xhPt@4 z!S$OSqn0`u_`m>wRvmj~;esyWo#rebj{~J8dPSKEQfCuV8eFLvm^c(~v}S3{$yWRPHP)$6fT#%59*^C8Khy zsB$q>xh)m2v2tYU{&I(`ia=He!<*|Q!jiv(Z;PJH>VcEc18?km+&OXfSkY7wmk}2Y zeS(2Y`*G-qX~1O*tulGB%2Z~};ZK&M6~>dbF~d~C!&sXv;PD#ak?x~@!z2Cs2*%n} zkhQsw2Y0mJAcMY*40KF)fGO7cKZ`ZQz9*^gAr7K{OEzozf{Va zIHr$lHHKqUsUt9LSL$Kf4%~-nn`Tyjs$FL=!z`8oE4A>@fA#K2>zuWa3e`eLg{U?m z?L)&i9~WCk-}ao`VS;rveIY7zAY20%soTvU7V(2cKYwS@Yp|&FHx|+4^a%^YHK8Q`oLgEnoM-JrlxB-qrx?|e*VK35 zYwXvAqce9HrwlEOUEz$5mb~~~w`Vut7!iOy>8p~1-Kp!&Lg#;82wXmb^$={P3+_Ij zq+aU2G{*YcBenL&}o|)&Txxrm)%SU){%1Sr!HJ55` zoTivHmxse{anzBYs|3X4&h6biBzs!7|B?G$m;Soqy-Z+YOGeXSz0Va^^2Wxt^QMiF zyCm&%GhJO<^ftfY!&f`!6sJxPnclNB?89QI8q1x_bLRWkP5h}g=f!Q_1r2I#*>$3) zc>GO%%;%Xt_tUIBbti>XFrv!`YqN3~a((q8%>_l8Jq>&s0R-hJ??d0%I0 z*t}V%mY?9>*wV~~pN=3)$c9o4d=Y`SZ-7Jjth|&!0*e z74mwkVZ;KLmXSmJ>mPcbxH)CYge?~~HB7jZwENV7LP6V8~Jlpt5SIE;iL!m7b~jvWnN!yzr7&UlSepeqlx!@ zoFLh-ul-D0UDZQz`-V*uhD&yo^>r+l>`?!}Z}2VX>k`S1#x$GlMV61gHW*0ySn8w) z@d&rJG-&GIQ*5<-hl3k5SABUCIiYCV_~g}6p($pAM3cr{j=#Mr!tO!ioWwfqrOJ<6 zE(XZN-H9@F(rg{o6Mp{FX17W9qD~feJ{p{wx1A@Q95>N{_uf0IZUPEJg8b;uU`>T*rdq>ot%0o)q3 za_KYV+W)dQ-*@Qv%kC%QoSQ>i=W*P1w7+3wwJpL@|LW<9SCqvgg{>t^IE`mJY7dq1 zj66MrzZ)?9`5rJ8E$qIrL?ApnN|i&hbN{G>wPU`Tn#ivE%3~_^eOr`vr1*rk^K~xE z`tBK;eN5W_{p6Y(otwr*j@~gddqv*NmWy5AZTeQ&^cFa)h#y{{8-CoS=M2E0+c~^` zY3rdJa~H*q(M_HCKcFLSS2G@;sb+tfoVxsoh<^MeAG8}3bO3R1G$UCbXbdZ0|w zD3jQ2lxYr?=^d*~%~Yo0D3jvzRSq|njg}wRV>D1Ep9v__=YcZaoAyFNY;3B@wEwA6 zHdLva!#=vz+Vq z^WeYq7&#iuia!2Y)}<~#FG_0hK5#8DuEA?hh^17gFsn#fC=xfjNPNE)$&^*3 z_TNR)N0ByBk(}DI8imL4M_B-%R?tM@t57#Ig91) zj5oFaF#bSc_s=Ak=n;O8l=h^h_U0&+RK$vnYRfPjdstP74z>x{%;vIGNqkSdsc+8@ zOa6>cD3g{D-=T4m5RE>Oy}{cQoS!^a+dbWVw`r_Ol4IGX?FC^6(_d`?Ktlqa?}iaGH%0hzMdInEmr0hLJ z+!%56M@QSMVv?#+g!U?(Z}SW@n4`9Fi#(t?=H9ti%CC&Ww% z6!|0gTcAkPOm^^7E+BYWV#mGD2)=DvfADT2yW5gD`TJA-aeb{^-}8h3`x$$MWbB2U zC*HsF;E~GC-r5P-Zy~=7uNM%npRICmUkzSoCnT?!?)Ku9;%K+;>Qjd6cf8Mzv2P@) z(qFN_>*7kJ+=rd=U!qQe1RwVLCF-m}&7a0K>9xj+swI`K@*d}_S)2ThNqJyf&r#g~ z&Dx`T#4;U#{FITFDuLPBE?$F9_qBO*0r2wZk0G9K>Rg=KeNVFiKb8QW#s<6`054$< zz>m@^oQ`m7 zP>|CJ0q3hfCUKl(1+LiD-~KsHZaTBQ=z_~c5vOppLr=8B;i58p)>%V0lIH?PHe? z9Dlq0lDz9Bj*yeaafH+1<-qBA;574W^aXWE2!gud0u)){_afV%$Qs0{vEbA{i~Mu> z&CIxXD)M`+ld=@xu96=kGKaGRETaIGeO@QFe4Z&hkriM{`M(AjYIR9|X!L&sc#8rI zBNQ?L1_OnD3<@bg4x*0`Oz7c`KT-H|H3GcM3eb)MH2y8XN(8uPN{)+Dc96QE+nmsp z+9`D#@)CQbwq$rIFahChi0cr z-Z7clLlCtG8Gol(nt-J9?w>4Q+_7quuer=`llXU%Cn zE31Tg0#Nt+Z^Z;DQ4!|r4Z5)Xqg`MW?E?ZmZHY6yw@86>VI z!szKPA1tE)BsR_%n;N;0-24`}d9llI^Ja{j-~1^hdl|X;qp(d%-MjZW*g0_wg}Y7M zMef#JcaBVE;FFmx_A@Vz6mf!8nC5Qzj~M(BgO)Wb23LyV${#VTXU8z&e;b1-#b8M> zXsksHi`X&z+*Qo4%$=FN?fBczw_=}NQ0jDRzN=7gF0rerA@D`XLzO-to@1TsnoON? zgXnST7e0q_q{pM9n_O92AUc=wu;o8u2&X*wQyyR+Wwh90VXSaxysGyJ15xcwFzx9A zUXM7_jnEmK64;&L6&drpaD1!K84g`K|4CpaI)kb?Ili6o&XYLvJmp87G}c_;l?Yk& zyhVVv2IJa_q7MlzTDmpdg@o$~3D>< z{2nnkfpB%hVZ}=S9>`Lf5H2GS4Fry^%Dh7pbA?x|>uuqmWyF+D4qpzBfw;`bNWDqQz?xMpFkjINV*12Af&j@o#lHeu}A2$7J> ztOtbzL?Ar3FhIJEl=vxnvXG2Y0oy2^XRUS=g)-KNh$8eRYY6ojf8nMwLrK?Mzo~*t$IK|Y>cpmCT3f<2sQJ9!abAs|+aEgoE)(di5 z3E-3opqvf2DNZOf(jj`$-A&}oAs;NYB$JDFtGV0}(%c_?~9!7yzuU=h$ zmQeY9qW(AWFzH*g_wd8s!w!27Y2S_4Ob5aGbGvT5Hgw0ck*`Sfo|5SOB+=Us(No+n zkm~w`@DseUORj7&1c$)QeIEF^$Z%Z~T+f&QuB(FU*>J&Lh47A+KozSl&o^#4yQ_?WQA9hV`w0eD8xW=L+aD4Y3?&#NR+sq zXkHSi1+u3gE6>wGcEw<%#~SGw%8*MOx!U zSBWyihs;bsW`@4UXta>6f^H5_!Sow&kz3)&Ic+V9`O|i=c}~j>Z(7Hhnu6z}i09GZ zIc)-l#+?6Tz-$8=6TtOG;`;lQ3%u;V7 z;IyHKaioWCbuN#W(W+zhK(A5;ucU*(t9tY*6`JjhoY>W3rkJ9Zi~NnRD~gZ89FDE% zcZtJYVZwSYqiRR7u8?G$;&*T?bHzz^Yz^$#C=}uBuY(M2dOhr@V`QLp7~4(mK1{&w zj()o{a2bGo7r^Fr>r(fBJK!Z=Mg!Q-3D}bG5}7&B>cA58@xyHAYHD?$(Ca+pl3`5&=Z%pDj7{9g`_UBi@J!wMCxq5>`L037xNpxB6MZ2 zJDVmt+YNLP%Ua5kHaklzkfnUeQdIhHtn(nGZ5xg1Ho*sYEnxrKaLId;<3n*D{16FmZ*)GC?iYLEzw5yqm2xv zHnI$JoH@090TI@y**_cIRrIb(yWx8#rlI_&Qs0vLM{m0))SuSoZ#V&s%5ZFKT9+BP z5<{FHywI*^-D3qfA=xS9gy6lUwd6o%tN02xCQH}=TRj6zGr&}c2GiCq61NCkaP-~7H}!ePVw!T0vr zCi#SMEnxf%T>T$mWxFFR?z0GMBf>iQUaw8P;UzF0+H@=Sn=<-Zp+wPvqZrp}%z*Vv z0F(Dc5b-FxztM+V#J|k#Sm<(kkv51x8xotqKMPD3>P6==`eb`uI*uT?$3bBSmp>-6 zBu$#NJLQn4fH(w83{w8i^dCq+*$gXivX`9n95B!=8A%(k0v+Ktk2>ACfG}@A$ks*( zx~vLhi$k4dnK)DctaJugSpyPEavsVCR!{ik-Y|n^zpoAKj%N*6WnC}{>+|-Bcj=rN zH4At@2)rLW`w!)kd5b<%oB+Hhk#cP=h?E*)4I}pq-6gO0V_2`p*q?R5HutPeW7A(@ zYupbtW2h^uL6aU3Y7Y+#4H0`&7b6 zQ}1rE2{V~@yU4}D+q|zuliLpi+eVlKeL@^B8*$8LV;Be!IRbr6BL%p(*qsvpfecLma_ATxTFx)GKWM(3n1|%LjZ1GSe@!;X7U}<5{ zAM88?J3MP^ohkS$+*Yu#YEKA-dRw9XLq5=X6e|rz;Qa5*kCo{9*x%wKyk@o4>nuNj154ik=aZtYK7jX{~mtR4uSL2`nxny0CNF z5lG$q&qLPL|Yr;LSH-oRM+tQ8DX#Oe1{Uf^QJ(!Gv^i{jkR3TD9 zM3KW}P75I(lZR*yOT{jZw8iRwFc`x96ri+2TjkgZ7Mr7EZ7zVwe1o>Q>$30~Px<7y*UPL0@!8Uv5JHt>z-I{*MQzKf}S;OhdiL?z6#LuMf;>V zBatS-le%W?u_wb1V^1KPzlwpdW65Zo@(WpV@>CjN6O@_tyeoLK8WfLKQYdo~L8d@x20=`}WU#DY9kd9Gf%}%B@ zsI#_vgJoz@aEACgoA`PYy`WMaepMCu(t7vubqx2$&#htOToB+u>*kf+JaeReC+R=YzIcjFGtz4@}Uf{)mJiG@*mM#1iC% z!f~P@Rh%7;%r;7p>Vg+9CtYX11z;yD>epmhP_j&>^@oNl$$`*ZXMbtI(BVKp&jiqQ zZ?n98Mid;ZHXJOiI1HALmS1d1a4M!L+8heM~ zPs?BT&DXj!cX%{8n%7Ig6GprBHE4h4qT*Apa%{`LFyD!KN zo`WAfLEt1BRFpXYeuDtM8~_gmz<&bi$tt7e`#oY*9yO^sq_2OWNs;>z_=>2Sm8qMP zFxZeVWU__9-2)1nKJ4<3g}2W&JRxWOIJw_hX9h7+Wvnwt#*HU(Gz;JaC+^Gl@(k$u zj6`Y~6;hoF370561BGm+LMl;HDh>DJV)WoX>A}D@D#TeJRaKmvPXOG|d$MGas_gKJ z5Z-oL5ZRG#Wm5x4pG@Ih_+)+YD9vbO&SV+6SG@#lEuyybzhWbhSv9?OwQRG7jeJM5 zK1`bN3f9Un(glJw2SN7xSSH8;(dq&A>ybE_S^ulZjL9~-4n!t~`v647g~<3=B>XLF z6j^dLA`>t|WJeI07Ov&~caiatfQ|+5+i1jYM?aT2g??UQgMO|NnBCSZwsBp^DOqlk z2Oh`+gZT8x4C3#tk@TsT$NHKNWCY^p1;7klEQfl|A{p&yeIv?gO|t zVnC{zsxY)o{|M6w>yRbxqDUzfLt1qfq2WlAtHr8QEes#Z(0L?jJ(Af#E2IJIxcM3S za^(d`yEqD%NHfuF(D2+_l6K>HM8nk%a)r_~M=C#b8b(8Z|3<`(#}fVcChX3wsInmf zD{TWhr={>2>0tlxIB7(Y^eOB~LMfIK#PS#;Idy3vkh0NP$4o}1Ve-M$R8>X1AaV(-`480M_#x(C8gRy>T zF~*80jt~Q;|NB@#u=)6D?diLN@Ad_D#~%cI7|5XY$IrLnGhR)|&;c^O5i$&#zM681 zNT@U4e*NFReHrW9kAN}P!I*}HtSgh^vB<$hFlHjt>#YXHz&&8hPGZaiFh)&?zEqF> zr8*?OYRs43Sjl{8*nfTLb{v_!#co_c-~~xxIA(!JVV0uJVPu!dJF^dahk4*7>Bej5 z#!0#DZza;7670eusu6nuyU>cN9E=}fnB&|8#35T_M_H;EM6mEL0Q?^TH;dyNyl4|p zg$oB8e3dfDr=|oJZo-#uV)}OUz>@MY9K;Ps6sT7)5jm#QzQ$=~#;;13UgYG zbIdn8{PE3JuB-y*92Cqfl zD1m(c_4^Iz_wy1Zr}Y1R6EX+s)qln>hxn(VGCr+{f4UjsS4G%^;4>o^lFuAa?4bks z*-ZI4O!>J_`8oZ`dNKw(PRft5GV(v*8bPqc)v?0`IJ34|)IMB%FOQS(eRktD*=5ub zPGJtYFv)&z5X~alM+RmQgKK(f-V+Lf)x8rx_N=}UB`>rb6rxKUmCb4_!!?oRZPvgq zXRmu>u2C55`oQ}vb^>dFCg$drZb)z99Q+KdxCho2Mu1i3{$~&MG6e$PR%NbuQQmZzsPoccV8dSRZ`6ILUqp zim{1|kq}Vpitn>yRAKBuRT$JpQdKNa6&VdE&~W;`8%+8%P7^((Phn5Mh3?&mv~eJ9 z^3foumTiFGVw*8v=IO&0R=}*iZpZ*ke!tGn4 z@@V;8v>E(6n*zj{^^{F-`W72}U0KlEs^}36xMkLm707&yc~c*E!i?-_+Xj3*yN%08 z8sOtiVS#Tuw*Aj1E@-H|*#DuG$n1gA3H57ZCb*aq$20wOGz}3M^#gO_>B z%7QaxfeQX-76@mWHcOii6^+2BF2qss4G}4JWd2aRnc{Tl%U|NVMxx>MIkRD6|%oZQ{NvAvtTi~2w91H@F+cqp0@qkX+qH{7eh;d z(|o2APd!-O75c~tJ^2=W8tX(sBp;|*Ty_AT7|>F3Xb~K@bYP!0f%a*aX`dFpKH+5C z5cG(XYlxTPs27tZRyb|TC^_7$aFSW!FqWL0A4$;~R%1#;W6~UpnrWv}_3Ej5IS)~* zLaJ3GeWI#O_M=cF$TBd{k)=8*6Le-#ot&snA;cSPBxI?DlZ)S&4VE>WSYJ#4h)?Q5 zXmKlQfcJ_GI;;_jM3{P)d`#I8Za0?bUz!uv@hE2yx|*(9a4lwFOmvblC0oi1TbhlR5}eOTJ{n#Uogf>}-Ju`y>lyusj3t%S@Y=?O zW&a*MJZq&*=N$HqhmxX>_Jdk6?WaEy0gA+fA_=AMkDy4hu9y92NGlL35~S;lp;(ah zN777@Y{Vn(y=rWO{OOt@|;oKMTy6Js9>QfK6Y@${{BNHi$CzJ&! zX0%)L1gdf-4L$LLDOF0G7<2;vt)s$;vD>PmMl7^Zur`<`l|83gsbY;)eK2z&{!@qNWR-II71^KIL68_e?hpvz6Hr8y|+_F(T1};NfOlPs1D?>6xJuP zenQm`?w9yB~0a%j)sLDxx0+AF0g+dh}C69a&ZdHMl^Hxh$vTL|_0e$3<16 zvv6ks{o`(;&i1(H>EE0&kdv4R$a$Xw^p=9V#XMc7@IkkRnkmOs-G*#Gjrc-4|mkRG-N2|KOPiw$6nxM1W`=j1}Jua11RSD zv8SLvc~L4Pfcx?Lxca+;J8SROwyaW85-p4Q-259cqd*`Y}pRt9;-%$co4k(Vy-}RT4A~{GzO_+56FVX%Y1n&>0$mG&Hv( z4f6I>%|XnTT!Lm?2i$n88PYIfMD8oF2$CIgg)u%N_alk^fW;u*eS)s&@fvI2m^-oT z2G&f>d-ZH4Dm98~14ZReQR$JIu$UWZNm2FS$BrOt3dou?vR1?b85qW_HJCc{QJoLr zHP^VI(ZH>-3E-L-)6b0tS|pteC8vRCYVfDKZhth?wWhmC%P_>ANyI)0VmE|APy~2M zQ08F5sx|Gm2%N5zb80cj%~7;r?x=ftZCb3H<0~0i(j-|L2U#i>r^@jV6!_qOo=oib zqnVExruu^V-0(QU|9P06vvimU90ppVa&jplNO}TtAb=bwu}XtckGbW( zKfkFvuc~M0%K)6#otVwUJ)3eXfRaeN}3cwS4AKlFCNAa|!jb4u^_Og*V6yX~!tN=bw{dJX$(k1RSfk>= zj|DeQYA+FPxz?NVJys&6uji{v`oTnV{y~FY>OLcVGHECm7OvqThF@0};Zt^eb>;0i*ytE>oYtSH}@Q;m;?dT`Q;3s64 z>>HaCl;B#KH%Rk_fEn+r6-&N8a^LXHs$SLqNorE>dGnUkz8>vB*SnvG40>E=^;7=a zF!~8s{6v0fWrl6tN24F}9*>{HAvj#Fr0_wk@$5qtoo6?E`+C#m!v;j$H^JiO+y{dO z#Wi1eG^V?le&QW|Le>GnACP~Q!X20CsjPAIRATFwotsyMIW9GO(e2!5s25Jhjn}A% z&zms+P{@HDd6I($eM`6;JM_#wBmOJ9LB!3aIXnBXl0b5k|M; z2y_5Hhcf@Ib;z26-WJEfgN|74KoTnd~rc`;>BYxi#L)~y36(5yoW;;NZyTgP3F&Bz`|*-J{%e1S-7Gbb*fff>h7wzHFIll*?BBVWL`B~H)*!V&>hg;j3d<=ugDj%) zl|Lyu<}1&z({$v+9(-lk#DTAzkFU&?c$j=R*i3(QOJ#6l(t$UL7u=K>fINOP|In0; zFY(nDP5A0oTNXpO<0eyz5(dcOp8;|N9bZ^x)9Pxq;DB9^Tpxa2`d5ZsMn?hFIUi7; z1V=D4nV3m{U`rE9iS9BtNV=K zBkMqQ9!YiPCvw-7gWM7)h}`pRZdKJ~ArZk0O$KRRMj~zuBqEi5Pa)-gCMCjLceD^m3!N3Dt8RwOMw_ZlNi3OuHgIY{M^I~ zX081PpNRDNKcVz}Li)Jukv=^L4EMJRO5c)!&>#Mv_O3H3s8dR8Q~yK~gGptqmp~#nQVEnyMiRY$ zKoWgyrAbU7!C;dZ9`B}6JcA6D!>M_RtWz5kT3@RNNZ{likUCZ438(H5G1+`{8AzS7 zju4#e{}Z9@$A2Nv?TZAPB(NE)$=FI!-OoDT4CjX~OWC<1 ze$4fg(7ijE&QO)XSHOqV>>_j;$n}IPthv&a6*bMp4}v#BMOQ%f3b zyVXH^i-yC3OBw))AkEM$Qs*wy|AmDpSSU5GsPRdsJS=+F{C(r$p~mk`7bedNXImrF zm4MK&y{R4naY-YivTWtuLghuBm-;5YGUj^5{ImJ@9o$GR&~|}=?ZU+OPYAaE2KZjV zimeL@UR|cAd8Z~aERJMLJ@qU$)RSbOaWU7JVElGnjNrUTLN>wRR zrB9hU|FL&w|7jk?HMIP~dO|3cBJ|6Obl9O#@GsYdG1or_(nd5!#-#%sDz~KVLa^P* z%AhCW6(8OjddG5>x|U$x3Y4dyI?SQ{iY5D*(5iHP)o@x>upOuh*8a^_3UECkWDN6`o~UP!zO@f z0hJh!RAeXDh|>Sr{R8WKjuP(QIPX$t!#kmfqUlW0^xCCae^2BE$PPT18iK4zvw?jq zC~LLL^;UBKX)37ROaHo`=% z6=JMu#hchoCR7LoYof;sCJvJc5syNSObDu_Cd^=>)SOJ{^CtYrM2ysg*AAGNOePlc zCVD_dMsQPVVkMba2@};4M;yq+L8&9G312di%sX=AAxwmp{@sKbnK0*2)asCl#ZnZR zog^77rcoUNe z=AabCZlHK^nmm=`VcKKpU`or#Q#R{rl(Gj5FT)>6JsDi)%s?IZ1!_|M(ZJlQr4_{@?xe~Aw1kX<4 z3<#3YOiC>a)X0}q#5u@z;cc3SgK2O`Z1>2L!~21wDqv89q|lnWmY^C2@jl~=?iVOW z8~y!#8qk@<76dATBqJ#afui-jN`P+lHK2#@C$cyLI_7z%@FW4s1|XLSRJ2tn7Xri% z?VJX48{FeJE?tgXg%*PlF4UF7C=*jr4pY+t{Gt>rgYTushMJ*1Bjb|9a^G>~TD45|-VSZ49|;fz&y5ryUQ zGvO_OLa=!0@XClNAm38{2PJ{>I1KicR!H#nt^w2~dCn7{&Jk}D0iEFsEC#@f&i)E@BLVV?h7hb>IfQtw#W zZW-o~v}^6%PzF&3I}0#{3P^C%F{(cH_P~==;vI$zqfF;B3fv@xdjp1vq5u}q=*qG` z>KTCTM=XYL#KfHEy7_TB|B6<3k#h91B1*d8eH7bIxYGJM={&!!1I{|BZ>2@gZu=F? zIA_u|1;g0WWy1*SxvXrIZzF-!@QbwI!x^?8{!MD7iYFrL)PXp%;F*9uYJ(uv$e+hy z(u}3(XkAXE;tmxx*4y|1`hX~@ZwSD3B1#+y>>&JZ+sF=A3@t{Fg1>A#*D1pWo{$-V zEjuwY!e(rcbO{^upC)FyftlgBX2`h5XEG%Y9%xgB7E;NYZiNTu;DH=21;|71O$br- zeGnq*5`}0VLPUKyDV-0|BcwCco)9__#m6Bt3rrEB9zH~&ra-7ck`_R<(|-~MT#{jc z8}pdLu}DbjQ8Ms!s6Torf(o)%i2{Lv)LOzM3zVC%0~Le6GbJt%BVDJgdu{U?3v|b9 zY(tX2MW!}#M@%MFn{^ugMzwhM=mz{A;5W6p5|LR5rdQkWkKFGJ2ZS_S^@!6;^+)@bFjz9$`5hv2Wh*UID#?x zw8PROoRLSrPbt53XbF1f4Hyvd>Fb5H&qd}qm9%UikJ8~$KEUT1abXN>#0zy9GN4Na z?rFn71PmNzkK7=TE%N{E92`nKGmr7`k>Z3Y$ z)MFUB@+#~ck2B}~TCm8l6I3Z(#(R|wuevfEbtY^1MT(!`aY+X~#$}GrPsn^Mdq86f zWdXGJ>NjKV=p%Rilu75F?A;12xGb?Z_8*uRKTm)qj1*POAlH?1&qrl>R z=pm}f7n2rMl^vIQM$-bElBZBTL)u-_hYh2ICxO5!hNjEdic~xxpV+9qSZ0>#u%H?C6Mw-M0ps+i;8`ayW?V* zqMNN%YI@b0jrFQ7=P zn@Zn;J9_L}3X-?JKujCavBrjFN^H$@>&pQ#3o!_@6DebFK`6iF#fed-&N?iE@0PgU zAi(xXJmAzt1Xj|^8DTgX_ueZp9zr0yuJfT{N@$KKb^^fioML1ofKE3s%ZI&hdI1bx)npzSC50%fXgkep9m+M%sPpr>Du#PBn| zIj4Rm0W;}5z=$yvrp8L&x?k zG3Zx`DUuy;(vH#SaRM}Qma>7J(zgsGZz)g@E7vg z?KH&k-wNOgppm8ue<*VQnsqxdEsai*_*^)HwxcnUrdmusxBifD+1r-93Wglw7{E@I zj*>^cm&iML)EkaU?8JIe5)1Er^a!0Y$oUc1!*QW0aVhjiiAyh8{1^|sH}N^eagGmc zvxdZ2*afq zD$|7ko?6n&c=l!Lgf}Uw?~j_`8);UcDO3M+w-V}^x2#0pg@@%Z>x(2s#f@?{$($` z0G+~)x(*u_VP3t1-E&vYZWZZ66sI0ZnPk_4vwIeH|3F_NAh9_BqQ>D*AO+#S`s*sA zom{yOoFX7T9HR>ed1w2S$l*-j>9vT1rUjd%CYcYG^5V$6ODCxpKoN?m$0Jz%O@nRU z6=ZOj4G@DmA&WJz6M^ggjr)K$eV`lF5P%yo7mjT#gsIoL53*y;0zxV{}GtRj-QfFD}-zngAGMl?Xg{KJDHZELVbh`Kgm`!mc zALNRuDV#87eS&cq?l9<*)1EsmbQ!^GpZbP}`nXt{LY+U2?^(R4>og*cQ`0_iVhQK{ z%NvN}j8Cv_jAN_3%l8wD?AXQYMpP%_$a~AGW)kWad@Ni@PJI1|6J=Zj>Ai%U>o{^L zBwg)(3~acZiL~Zgz0Mo5Si3~KxDhPC?}#HyeEW4cI-Nx&596@jk?bH|2XNd@dnscf ze8&8Ny&uO0MQ(k-_5iOFcRHp-J{Kc?Asku0pcZ3@u<1Yy`H7^-SrT8J3dAX&%VjXp zDimi{76YerO-0g`cmAq@2{FjJax}7zuS#fYqIf7%4NyFM(*Gj8GBM9u00MoDmE5rs zcWZnOx}Au;bzB-*w)b%i%Ge4|R>Fbj#k7A0$AsCGXcbO%T$+jA6hzNYYHtli$DJ#H z(whVr6bN}q`;FmpJ`7bK62r~)-ZN?Yo7g_871kONIaF5>1uTUdkW-Cl`(Lp=8>kQ# zN7qE!Rh18H?*pbYh6Kml@gyEBnmvF_LdK3

Nz)xTP^uJp~iB|Op1py zT&(H3d}wt#h{FVkpoz4Wuq^b&srg}NxOmPmB&0g{nm)Ef?XqRA}5@V56% zN|Ni%(%uk;k*n7KX_+|+jqmsD>4dSG$S&dEmRwCUsZO*HosnqH$5DibE+#RiBNL4p zdXjOlkR4MS`2$O%6uUKQ3DZLq4lb+&022Q2na*T<(~MtMY8a&G>nk)SdyD~-*&l^@ zG$pY=7Vf5bhy5|IXDy~8_DAM*3?XRS*5KOf$-Sm1Qw{DHX% z2D=RtLOQPJM~^wKa>8dE=K{mr4ZWKSS?7K^89Fr#m%K=yxnK)7&fuN8vEf>rs~G=0 z4wgnpRxQ2uQ;V_W#k6(biDpS&{Ck^!2kqZX`*+>^pNLKFs3b>LcUDd3RjO%|Q0^ov zUxBwIl9c;4$!Z>V&VtJqr_F~UjC>`eKae64|}cYx=aA_qHgzFA^u)yn+U zkGwt8Wc44M$g171_=^)cml9!Nw5c@Wu~pHcVJ*f%X@^>ulE*?1$*v~YwXa0pVB!YV zo$g=iKWMUTUp{uxg6mPLs*!dNi_;Ias<#*$3+6}uLlmnim>=Up+J1W}FYy~L2~=LI ziQ)QO?Yr1@KP^ zfT;km_AUTeasUEt+bhS0oc}BvK3t*CxBb3j=ZagR+-@~@_vhCte@|am^W;{pby4Mu zj2oT$eJw5H{uWQE;Sn(Ws2GL~VR&4~`AQg`qqr=7#W6v^+ZV~nYN7sT3odHH(}Wj^ zdB-Z>Im8XtJgSqMQY^Yx@Jjtiedy)WPjg13g+)|O6YP9#(DA{nz+7#}xif7y<*)78 zbu9-KvFWGb2p1%VYW(JV;d0t@$2iUDBMmMHR1EvNUYl0UR9rTvGQnA2!G9qB^6}xB z**!bf7&~`dh%_49nd^eT9ZSLc!~WjbzaY9V=4eNdnnQd+V}ox@ z?98Eq46`(L1=p>+1o0LnZ6Ru>Djkw-1lpj%9IGP5aIHLn>duhmV=Ie`OxCX2x7IVw zOl|r|?Zd&5qsNDwALR0Dg1GxisgG%bL zObk7}Jb(4tGd7v$fD+`JB#Jf3bYJW++wC2PV}ksW%#r%5zf|4XhFAq{ORHE953#qixP7Ai>4nz&aF6Z&fFHovN!n62VvU6*WVQ)=v9+w#FxLs8FHY!Erv5*%&! zWKVpk`nEO31s}^CD!a?dl-xHo?C>yq{h{i`+jpLs=N9j>h#&Y^Fp$`{=R>4Im7!lt z4#Rw-#A#dLE!5~gM6)KJ4z^phukQ*XJHugid;t=y+r2-e&nm(yVo&P4IS#Wy{i59X zvd8~%*5=TWi$C8kGKt@>_-ZLUn8A8r_T}`EZAC>Uo8SmM=#h752tnZOFRn~%OV4=v zKz-5nbsI$KxVzRoM5+!kZ>w|r|-!a35U#dyoq#IOVRa!1Bjf$QKy9ys4~ z2BD7^sBBysA-g!U^oLrDlI?y!+NnC^+j@+jnQWXCf5!3Bm9Mv^RZIim=-|jcS^d=k zsyhvo6qn_JyT+^bJ^tyKpsECd0v8O{gD1^y&#x&aw^vuCqdM=Fc9y#@y4sOvZWfj` zyLDHAIQVRSw)m&KFs&H%On-;Zck2w;UV8gxiJtq`p77Vhm}%oU9dG){s;Dx^CxhhK z4RQ&+%|Q|jT8zIp0-+7ToH&I4;S<}t@y>53cSA4l;7?-W@nYN=k&Nxy&-Y=88wIMCXj_#G*9zT z+xFM3e?saqvuayc0Xqsc2-qp;(-HK_gcoB92F+C(1N==@;!N`%bhUeEEgA+zG^wVN zeEzt>F!NsrgG2m3L=^aXiVo)Rk^ZsUyD$4d$J1T@mDAc5*32~4{U}gD$hEC^wuSel z$}iMD9=Y2^&1E?A%?6J3>JtU?M@9IUsb#ub&q%dxeDDjps!6g<=qZrK99_*hJAZm{ zZanTrRkUW6CxZU*#>qC-8#A+ySG*c%Emo*3fg}2=JGB!!^$I>T*F?K71^st$4r*lA z#-Eut)c+xIs8_FQ`-|uy`%Qyy>jKtBkb}nLV5CX(?hDiF>Nfu}9JQS{TJ$kP``X4m zYE^CqLzZ@>5o^P23R1rI28C{kK%ur;IHutUe;%dDFLX~gE3XXB3;UW8Ds!S(coz&( zY%!KeuqZKDpPhdpcGAh0r~d%sf6bcj+PC#Sz9 z$v`lF%4wg9Y0%Zt8Mck9wa0ab0Uuq_d?*POKSTKiGR?D)a_aRBZ<~( zjqU4pPm$J;&8?$#T zW7OaCMemi$4ndz|pSGExlYC;>+08~#mH{ZMDaU*&dePLhSLd$|`CT(DHZ)u2hI>Z# zaWe(NpN=2Y@Pii86v2|u1!;N=;F$ovEzL|D)Voz=(u&@3GB`5F7cS^i0jW~~>7an@ zPl^Yuj}&I+opB#I4n5fa*)jLN*Z-kczp=l|b7*7CLg>|<_o$)~yISfk*6Y;!XI=g5 zDT_ap$6qFtGlMRlfV5^}yfKIqsIX6onzKDy_en?J!byd5V>K~GuTBh|`9gMP41TYoLO+)H!DF+D z4C=;S1<4V`zQuf1MY3Fi=6AMbYI_1PDqO(#k@(u#b2a3b=uaQ%j$FhYAKvrQF>XBc zjjtLd`6WGLL%#aKrs^%_!vp#F8N-IE8~UIs>VD!8zG%!N;F15EfMjCTYj3d1GLF1> z*75$9*Qi_YV>q*67B_U5oU<^JS+Hed8cK3J1lj#@z4`U*#REZ9BW|pEx#glAQm)$4 zHPbXv=YAfl&nw-~Q7)rI8pam=yhaKATIFv;(kYWw`*P6!4B2Gd$v_SKCanPvN$4Xm z3$e9*v;(#6rS179LH+h-J=>Zzjx_!}X@*-Do!aMPrj!`wQ}Gte8RbaQRAxM-=}lvl zC3O^izUxs5uSF?S^)`3|+7?ZOO`aXxPAzBqo!J=-&pSMpJQ)>v>gUadDeRmkX$tPv zl4|@$8n+GYh})DZHg3=6@^Sl@h0(9-TinwQ#TVzcUIA-I><|TPnZ5r;z#jW}(BQ%` zM3iP*Qb;=}qjrMhrP*$q9F1q7{q}@zO*4x)K0M}`W85vUcnsz^@t|lARU2h@$DbsH zx|G!el-2RGR1@4Xd-pyHhnzS2K+aWM@7u{eXdz@R_hV#-1wnKprIw@P>C#_ECQ1u~;JIoOw8Jh^JiuvOwtvE7{3)SanJ2+|> z1Nfs>GqoLIL-_-iUVU*Dor!Lk3C304-PGDcgV%A}CwsD=>pgr+g`?hX*Lylp6@vA8 z=`K~fub9Jm+&S_aB^XjcUc50QR^HBy$2^-$nuNV0p|+Lg7)Q=$9@Nms8v~fAQ0mQ? z{=s~jVC~p*4jnrs08TyvfXuT1(6n<%wfZ2nxWV_T{RV~8z#U69cK7ph_|xSMoPReo zEcz#!z}?(dmGyK&UX(>gh;(R)3I=%t8yxX)I@Ac)a;~H!Uk3kR?*5ebx*@-VyRV45 zPazwp|9I0_v^yDv)k*F?%O2QjXZOZ7XBxA#WJpFY$K(9$Id0B$|EIsgDUDAw&%thjE%CnpDo2spQa6JIy$y2|33QgE=9U zqNH?^q*9C-yIt5)DMY&>QR-b|@9+2b{k{L+dtLwcU30C&wdV9ZYpv)0-1q(YJing* z8V0n99^M`R1OfnF;0yTm3J4|~VMYUhpC5n)06-OhLY4tA@CX9FfRju>Y4I2U%)mVW z!1$2=dY2FV*So3!ANH?f#TUR|o&+37G5ZhfPmI|gXJotH3UF}q_EY-%&A*S=E`Cs3 zW{~yA8Q`+g7+bC6!#B6&Uv~fI6Ap+C3tvxc@wELdSFoSHbz8bRP^o`CM)s4 z!9$0S98F41OV2o&d5W7~P*_xawxpC-T_X^R#FE;&%U7D3uU>1p-g>Y7e#e8(hh2}P zeKL9f@6Z2u@oMDto42Fy-j9uc`ut^j=IiXYxp_su!0Y*Ux4`Y+J^SDKr33a0s-y%{ zQc?5^0zItgxQ>#tu{C^|D_JEhez}PaM-}0gSJik|ZH+DEFJyQ^ulfp8+^e;p6kS{F z+5f+erTjnj?B5;xuYSD*+yULi#X?ut!o|YE!iDMrE>tE}u`rXFi%ar9OUdHWs8~Ax zz5D`y1hU%L*mi?$(1r~`J=q7cd;YJNUxVOd$^P{M(11a}hY6+wYytRZ%YU@b1!hs| zx%FSZlZ($wWrRGFQXqm8xEqbg;VMVI>D^kD%y$XO>d!tOiJo;lKNAYazS+2bsnd69 z?FxBrZ}h~PGdr;kfL7DN4p-;%Pt_bdHgD8OK$DvGLJkB~O}sd52Pd)A?lKenS;r!- zx;}NR^60%}BL73J*sv)3#)(I7W=5Kt)i!AfBM*1`p~vDZB}=5|V5&`qZ=yxms#mMuoC;#8eNTv(~q zK{sg@rgV)g0i->XHTh{C3#5_Rk& zvng!rOhfSUENQ5tHn#tGzn5JDw<_Myu?mt+u+UhBvEYYMJmTaAKH87Dz_H!9l;XYh zeTY;qk8{@s>OO0g$TLx@-GQcG@W{xCK+Ac(i{^}qKijOuzQwp;I+U@CSXK; z5Kr(L76N*0QNJTS&cAH&?BZ+Ib>7_dXYp5Lm44RLi5v6_zmIl9u^#aZ`^f_C6McHw zA>$61orQhiz%L-PJ_vFNMPHiKvu2xv>$K$Ej$WOqEhS=)!$!DKqoqkC`=H#P!&<^< zn^#`~gFoY))zfogbWr-7sV(O`GM|U#c>Szyz~2x+o1*Bby$ARzu0Mra23J24N@^1R zcn#2+9z>4gKL%7u_8uwQ{NO6rBB9-GrOrEhe$1gz1&7*xO z8q$wx)ehKgK3x^`ID2K!YFU3u9sflQK=-Lotx9jr^-55t!4`}mQ|)u|wzLbZ0Ub2t z;b?9jX6V7rgPpS{8g}b^LC#({)w9uMTu__|pBZOzN`i!5HKV&4h@{jM%t{Eyr54&d zP?7j>UR}7sp;mCbo0s{;mvBg5c1ddp7N9dVK{+3G6aiQd(+{mSf4zVWk9pZbfqvLB z9{x4q#5qDjB6flK>5>lQJ!|ErG8|;(j~lnIZ+R+cdewniWj`3O>TD0@Y6l=<_( zuD$5<)euL!FcGT5IH%|eEgZG~f!MOOq(&2evTOG4yj(YVuKwBYcCdR-4lkWHT9Xxt zUOn^+Q2%qM_HhfWY9l)-XOG|Wb=$s*;6eNE)%*gA_e}q6HfJ$cqYYE?$cHW!{~7=6 zyZIb#og?hW*Lv+*lIuru>s!vW$mIT(iL*au#jMp6Ya}BMdEPViD`O(_wjBlZVv3{D zbF8fs_z(?B4jSrxB6S@5<^=9-z=18&S}V(I2ATE!uWeWS0(K*o&Y$(Z)9p&}eoi|m zID|%5bNTA%YWBZZA_EOkrl_?ZUA0g(uii$k8!i}Us%1UEI>)gBCww~qYtP5rRebRw z(QTrNEa&Ay;&GKPc8B$O!hJuRZ>7%hZhNUeY^mJxL2mJ(6zKta zs~2+MI99f1j(0v4+R#4h`TEEED3h%A*#wQBHG?FcGuLw^dfzg>5pm#8%~dJgq+uJRqxvzw>%b(Ph_ zT@4P0tL9)6K6n$03*oLGeNgxDRN<;3PvMe#Bb>%94@z^I*P5XrroR9R!FaWqOYIYz zG?FJm#nqEl^lS=?O{^RoDLGktAG^7**W&3pH;QW))}tItp(zFp`ontFF#m1H2pP>Yay9C(_e8r@l%zD?TmQCV22X-*l}jd z!pQLx4V!b1&&>CbegUt|YT}D)1E{_(F{Xjzn9-7?J_$NZ6fyb(>+YQsqrJg>hCeM{ znvtKY0edI7_Wj+2e`ojB5bOCqUDPjNdu4=|r{>Jh!dJ6bCW&y@Ux14wG%fA7%50aD z8@`cI_s%ppoN4niym#?WVoH3ea`3L3SVwrUUN{mC)CQ2r+<2TZQatue_VqW|3{s=r zM11X)pY_@5k>|&;g59J0zl;UC-bdB3;#+fv-on3kPIwcOc$=uu`|fXRUw;?XxTe>x ze^<{QE@U4Ly$M7lE1a>i2h!{Q?+QD-YGbp2cFrW?5+)+h8BL=+#NN4?D>- zVy)KcrPXJzkd(G`exo(ao?YnuQD?O|V!C$wM=kL*->qvl;Cs{KfR^N@+4m;~MK9wG zl$V}pO>Q!Gbw7Zs$DjGtQ<^zlcUf=mWusiqn$?nDzy#H{Vop$TR=c-ffO)`VCk6(n z;gQ(jr+USvL$c8w7r6j$>zu7Ou!dKuON@*RQ%$AX| z6ekMm2bCyQaT$BX;lslVOld8mJUoy%Bv`76_rPE>n|TN68U!y>gE7dFjSrQ#d5&!x zm!p$_;=m`R26YJo>KyhTt>t0|{u0%fSNy-$jXzJokj}k4lLYrnsI&4+5)ihxIPMqH zajMZ60qsuYNcpEpV7^D0;)0F(UdKVeRWDwPYf+_IN#TQHMLao%{N(wEsD|3tpvuKpXlbaSEV{`f!&Kf%hHnxF|d zm+MpIutb_^?$kSO!qXt3d%<(UsWqm#*wRyX-_R?aJU6lHUCUxa63JnC+arxtnu+?e zqN$0T=^{bIKx7u@FXj{4@$pQJa@xiXSw@o-dD=ZPqV(?QmEcfWTKOpN{`D@24$1*H zInd`N5ZSt!58RrG2@NDVQuBh8()&g2*Q4F+^&YygWMwHcb%i}?I7+XtQZ<(W;2mpI zMrwe94&wDOyk7AeR0ejFaL%39!{3VVy-{PrjA%T-+ zSl3DFB=D{caNm34@EKyU8*~{bYa(Cjhp`y=%lUbDAxjbl6zv5z(7e{IcQ35z*&+yuEAU z?$&GSZb=86Y)-PVA?gh}n_B8ubx?w!YHa5XjOxU4z24kq*BMVk6AQfHMPi9dt#uy7 z<3l+YZ(8~k4Ksz;u08(krE3OZ6_1U2oSH7)cW0zhn_}}Ix3UtN`{3%Z{fyA#=ZdOI zc=g^HzI&I^8`9^MKoUtDC(?O_xZ`WsZ1h64d#g91)U-$GjjrjPk3J?xJ@DuBBSfj5YSJn2h4~g%Np}Mh;jIhzy zl;o1k&v)kKf!`mTCuGEJuXb5;NbEB^mfb_L219=5EHk@@`g+U@UU8E`sQ{U6 zA(>#M-RnazTE<25pQTeq(NNF)MSvs#utU-dhw#;kC< zYV!=#)Fqyv(g5e2%E2gG!a2$Y@A)KHw|zMi4FM?$fMH@{fa+o{u+QkV**QNKO~9tS zX-HPz&t3~dfO-d-nAp4y7eKttV_5A|IqCV z?+CF_Jr2%RMvTYpVyGfiqhJ@9-w^#MzVd`O0RfPG2!O#DCWf!1_%~OAC3p%vEw$w) zGtqolGfw>=0*H7EEi}YzR7V3fZ3q&SatJrh}qC|_ftP_oj26S#i&^3qw(AK zwqSSu{&1apeD(mH8uWb447BX)ky8_X^c`*;OQ&B+FWMC!4IHUikSUyFfj)2K@4B^!N%)oUr=Pg3oz2Ee zJ?E}}46upk&`;Ic*pr~Q*Vt+LpODL3B#=*8yo&uT%{y!>uuo325j{!}S6$q|Z`H#6 zMXW8akT%a%$C{d!`bEQf5*gN;hp_ArecJPvUTUM2m+ZD)H_JO^R@!Z?hM8kE2Pex{ zC^HQ*Y{b`ebJa7sziq8g!9Lh_6L6vJY_@A(v6sh5lIlb;W%AmfkLtihs|Q&1=J?r0 zx&(r8HOc3O5>bXt)*o0-AtUVNhDD~HOEHziV&k)jjF^b>9Qms2e^xS%hT344CEVkv zYnfbJK6K?7Ifyfdh8F(du`QMo7-tJ8Sl9|b_#7z|Cj@WH0MXYZQ? zO`6G7^^8}obhQ!H=la}z!Rm5cEbSAC2NA&2TCCs9)FHzJgD7}C1rH7bt7XOcAmK29 zMfRt~2 zGAtC*lS47*j@k)UlB-L;{{phT-T=u>y|ZJx%DLC~np_v?npIHsrFE|hq?bT9EzvLc zK!-zZVtP&3&-F5h3RFPQxT6<$`Y+?vJ+d=^6MS$1S^k5ZblxxrXd zn?KVc^1Su?V{}ZWGgQInyJ!o%c&ySj18XgrgPp_AvgHNa6$NXHIK4bC0K5!MO zn0Y#7^>l5-HMbjAh+S`&!d?05#~M4Vt_C2Z3I5F}*DNX21a+|1N|)gC_J#(>?Sjns zq>BXFa>tq0h}0|~RW!}0#Hm#ARUTGVzI})5$l8CmcE=^R2KA2MsZsWZYOIFk=IWr% zAv@zS9^W^Iw!VxI-LziqwJ_!*?#5>;LXd?4m;CZZ0%5zhk>i;E`eX6FPm_A8SLk=7cpELG;i zhk0>~BZyDB&zwm%&eW6wsxp*|#~B$gWne_b_0@_>miWfX<|7V?jKM&JM>G{S{kO~b zuS-hd@Q6no_^MAL%cZZ8W=ug3jEP}t0*F2Wjt3;n2;=j-+L)v4HLKdN46^rPv|jy7 z+O=cum2%AePW>#Jse^-N-n3l;N+yZuk`&cQxH$>38lh?B#HtCmgLN&ctYh)tpUkVL z-% zUl^kulh5_-H>qWm6Ptwz$s`QS*0kHRMSjZj5w;m>y4(td8y=QN5UlJhe@_Gr0B&$r zpzrdMG8aOOa3B^xHY0$Uk#5ppQ?f7G@7_8}PGpF-Ly%~`G%(aB_o#4pw}+;WUhHV@ zQk@L(V{Q6KgL{hg0OcX%PMDX*sg*)E*9MD{Pbp4x8;h6^N&xjFT%7G1%SuF{y$rBy z@mh~I)?K%SO|@tmZM?p9kitg}Mycp(Xl(!LJIMeTc z%t+1*iLc?>XKPidFD}47hu2BWL{zfklq9K>f9=z;72UAXtIr8YLTK85DEd-=_)k)-9a`URfFF}ZW#|gYn$oltR z#dE?mRx|5DL5-f%D&pDvzg;VN<6ZvXXHcSD)0`bRu_L@Z72^NpU`zSWbJCc@Cc3 zB;89$h{~0n-s|<4_J>2qS_=DvS5-s^Eur)FsA1t-pn~OwpQr4phE{#AG0&rzq}U|U z(mec`f_FJ7`KQML|05qT=<2JwIxn<{S#Qc^qxl@osDacM{MRVEnAeXT>vs5Cs9A3H z<$A8ElDz4R;1#WX`S}_#JNNpdrA-k1;|WQ${Oq<(vhAu4wJRJ^)7`*2>yvx0v99I9 zSJb#R#}AzgLd}FCD7_2~;wlZRB&n3b8ult9$2rdM)55aCc^GCw)ftM!3S(-VI~-j` zSWgaSdvs*Dng}Cl&eYGxS-Sce?|3KSxYNcm^@xX?C`J?;&Gk<{Q6(S9zrpTEQVnO~ zN6gvS+h=8lP0Jog)_EhcZ?pxcSa!Y;MESi8zMh2rkCof+S-vZ`ZGr3%0bbcoPgVYv9htv3wEvQ0P{U| z>zLmMm84t^4GMHke|}MGA4uNwb9tc{(P+tbXZ3_CXC81ILH1IT3<*SbRwlKRxBIyn zVo;SZ&^k36=C5p%FK%Vl=^lg3>2`dEVWBxgi%9z}13o*7m1o3cd^>`=gyV%A;;4tH z#|(AaMQ=(TJwJ!X2yC7En3MgbzODoW3F71JgI@w3fMEPwl)r+#)^&pT2Ex5Ksv|AS z_X7e5MnX`ip!v$GA;a|atFL+cZqGlu?G+VZZh$$TZ|{n8Sv?6QNqwx zqYJMlgN-xa@7ej}%7x%j5xMo!(iZyD8Alh*K)9$V?m<;G_R&54?(3(Qj+PM+cbZv( z#x3dF^Ws=V^*Tm7A6yvF*Lzoae{-74*lMAjzhA(WxUTarWl_WgLzX*Ayzcl|h;q2S zJc8YfWGx=LCV!P0ldX1RU+O^idaOS~LQD?VfVGh%3O~I;f z4XaMf94t=iPrrVTWF5PCGCHvQ9ojFNs$JjDUD<%paL4Ob7m{Ex;A7di|K}z8AE*9I zr8PWUmL-3gvoqI5*Plcu^4#LNDIAUJRDyN7E_eymkk1V@LsZ_>E$P4bR}lBqUNc)q zQD3cv6{Nqx;|`tNFLek45QU!*%my%|Qc z`bg3PF0Lg|iJAn^m^o`$SO_$MLK89bh8yLTy3QK^I4y!3_FcZo_+^XC{3Au%usP;H zowy-{T+|KVU-K%~&S`EPWF5Kn7G|)Tc-$H@ZKRjPrFzMl6u)Do*X}IMDQu#6shY1w z*A3^K>GdY2sYt7WD1UVJS50|q-Yi?K(#FzaJm$G%Ur*Azu^)Z# z?u1tqS9qvR?}BXQl9Ff5RyK*LL9dHwXWX!y3GV_gH9_c8ud%>}>Ss@n(Q&#C4QV^- zl&#jpeZRZ&c!hCVfmgKsA%rfwc_dzKI{GV5^BM9K+NA;MW*u$R`TfF&?D`N9O z#GiA!13Klo5PrU0rh4ip)D^m(TF_84uyKcw$hogVsHLAHp0FxacvI9J+FF&Cr ztUd9i1Srl7ys&#VH!bd*d#o`TmA*KjU0Sqx#(0c)E=K!n?gk`k-RLQmZa0$h^U3eL zt83_p=L1N9IxhtVpHo+v>Rr5^_Hxtmpg3UUTt?JJMwHhA%SmbRqA3`1(}TU#u`GU% zjZG(Ca2Gd>kpXv_h8=}sb>JX)!&5+DRO5r@lZBh)WmelxevIPIamht?B=y$07xA=` zJp9r}lF=HQ6(cp~T-Sb%R`*&E#A~x8Tlhl1;@_kFK3Vp_FnW>*OUZvRzK)1XQsw$7RJFxLkCi)o;cmYz`wWmLF zdkY|U?ar^8PJf8VfZr>H>a?hDsL<4B5e3GrmyynR5f(|?ga=jVHMIf~iz zy1#7OqTXqGJCaKQGwWX{=%;>oe-{Vk{Q?H%X!e8>bmHOusJ za5nbJz~+HySB9K|I;3~a4V1ta`vzhA$6Ok*K&6 zPCyxR@|iT{yrWT{(*7UL3z&P}D>L3rfFnJTz0@h{%(i$K=Pl7m*l0L|s|$XOlZR+Tv$4b*+qspFAw9itN3(+1USGHE zzku_0(L0+NFBihbfI&0i@JE|f_T0a;DVh~66qbc0xMtw_Wx|6nTbG) z9OR_Q`)8uBOt>kwk_h!Mai1n&H7)dSIw`~+P z8c6iZkCnn5b@rX3c875pmWa|C9AH>PjBVaq5l`Pefk$ehC%lp<7a5^WV58moR!M3x zAUghojQ>3wpop=mc-&x0r8i!433s2jp93E*eT|Fvhyh+;;xN|*Dm(y$Ldh-OD$b%!X1%O;-L~}z0;wt^gr$VL>8(~ z6MaU@Bakc50&Blzm4lFv-o9t6f7-==%hh%orW0?QW36ccj4{)ETkd632&+!pNmh%B zGRHw+<;{%KQ}n>$5ph;-8ydR%dr{@kg)7izpEfIGQcn`-d}=Y^_W6ZiT0p`7WyAe7 zm#kAKwwYX*dIa%O6^xN&(gPAqrn@&W#)NYU{0uaw`M4IU4%V=PTCP{R*9dgNj!`X) z6wCso10Ene;<`@y((cAxmzcfBbjFzs+&k~S`N3m)h%}HN>lv4?uU?u# z-|+W=_CmL>qazWOU~huZ$ZYY+;mbg5rJ z%M_fGZ>XI*M*BEb*Lz6^CX>hxlI{x%+=wAdOSmh9SxzfVL}+BkHQ|&T*x=U3VTU0e@-aUP_HrI5)KL_WyMfrU2;T|_SFEsY?vxy5 zFbyO1GxLLUmCXEfab!H)Pan+pC_5Yw^`!&mNeut#MORJ(18OGi>f-!jPP$@zx%~o? zM3VB~WF&4&pf*NZ}U*`4aFLaiVT^TCta$H zPi}%BPOfJbHq4&s&;PHQ7zKj$Ku;izKKcx?9dT^3w3v(Gz!t{$Io_JF&sFn{Eu4=y zz*n7ADCa917!9D}k(WC4K-(7q%KzjP^@zqF(|mP#?3S5`Pjy8s+wYgq+Ve~?)z3Iv z0B!+1h!J4y#jCQGQ}xpNAk7E2$bTaJFPP8YzQ;CM)Qu$yVXRr(0R9O#P4*RJDFo;S zzn|y(kMF7k9n^U^qCG&Q2ZVu)j;Tc~(q0%>va8)h`K_ho6NZ-n>e;hJ@%-S&uhR zC+QNrr9$NQk7L+{WP%YGzyA@<7&IhC&}|k2iRre*rAr`lqS4XAp7Y4MJmZNG&{^j6Kc zzbg#KS_eEJ0JxsE7?a$Ce9ag~td>L^a-zYGF@)FOEu&xc`)rpe*9|feXRROT%f5wO zha71{n}#OQwo-DLH+zh9`3vZ-nuDa4qMaJ`HJP&J@Aa-tkIJflwK|d=p72-zu!by}&D}mj}c-OZ!}P#qoGe z6Skue7tP74KF4{Hr3bhM{yrsVG~GVL)-@lLV2?V{5UTFPB#NFb{(dz`+8)`&2mV2X z&|iUkdUJE~hr|w}NZn@);-;QGI*ZSGVX40aOS`uJCFl!=fmUAW*)yH)z1E z{j!EbrNs7Fml){sKatD-A~@?Ok{h3*A$sS( zFM+Kf`e?zk21~%Mg-VMG@8q@q0uJ;aI|!y&Y{3u>rc=3S1TcPW&+MtMbtw%mQylWPr@QKH z-bK0VP3=AO(GB_}g0=g)m`sQ*%3W;#KX?QoY<*cch4?E04UzqaOvLpu{cPj-?h;Wf zV(JgRa&-YNnr+~Zcywe8uQpU%&}R9!qEys>Ru|_|dKPuC@ex}}fp_hJ!(Y$&P@%&_;pMpRUnIKV7)ne>(m;p;!HQiVdBp%>D!?OLHj0|k> zoYj(YG!(!T=Kr~CRW0`Stv}Gv;z#+`jqOPkFjuU~^LD=g)V*Z+QbyP&Fa7@DRU*RK zMt@K;?oA36RD(O3U9=wE0eI?th#M+OoPF66$L7wU!bP3T8fwC1AyY0y3%<8sOi?o7$SacH1JiB-P)ynK>*3OxPYw zwt?)8KJ^q3*#otHVS}xDtS@_J_KHJFO~peHLm9$B-X0BgxNqm1x@NPF1xTGLRYur2 z<`0pf!n^SR@klh9)dTJ2g|V18;D zw@DYOGo(L$B2bHXA?f$M5mAv|g~QJ&7*W1N8TS@+z0my7Bt>nxA|R}1x+)?sN=3LH zrHH{$SjCLf!i%&<7uLNqg%rpcX}$d97=4UgsK>CzKjGO2;KVLDTzR#&l5&!wh{Lj`PNuq%TdogSBF`k2VEf3!-tkUu} zX44Fq``bN6qMw2*2l3rws=08N;M9&7f)%wpnp<@0p#Ywlt3Q%8D#?<|bP=kQWJdN$ zcZvhFCZdM~wX%ypA?Hd4u`f>4^VL=g9S%tP8obabaFAL3TcCgp%B1Ag=4QP7M(RUD zxo^MCfP0KkAXua$=`rp<&j%i9Fh!BMk}2F~F1i8l-k;Y>;dy`(rel^xj3EUXE&wwTL)@@Iuz|(L378~1adlBJ2%bb+5jm~C1 zVCG%utz_i8q0$W`4aSBwV4Io%fjJDI;J|jfQC8G)RRF-3iun6v_yLGqUBynX1t|Iv}L#TX?YR* z5kMbU<^)m}Afwt2BgwW}6vyW=Ww|*%$w-?+ ze%ULQlz$>WtOy-Yd4gfO-tA8C@Y_`5!KKkp6`Y&snE@H+#Oc;nTkw`kz!>n~vdj=? zb0yxj-+97S5q4O65*H{E(uWgR(2nc4p~AmzV^~1cT4SSwB3%T@hDaF*l~b)J$!8<* zD^7FBy2`Ev1gO~;WyodLItmv!d(ez|vzH5m^d1r>B?PNifMzHABe;$;{USgez{G&I$=`R-7n+~jywOw&@kyd2F!5p%6!a0g zb@|FQxB)3iAI55MbWk#F5WqdLP|IyBzNLeN%zUEW%yCH<(-L9qs`T= zcKSgUy6WQr;TDo?xlAWy<{H6-SR0?q>I!v|JH81VX@t|wN_k5(z*2(IoSS`20?dHp z8;i`G#(nZV(fX&mU+fx(>AH6f!F7}UH%=*7jTIk6DL7ZtY0ItKWN%I=qdck%UihNo z3AR^oVL#LOxoF=T1FINqpRu5?diHwB^7O1zOoXx>J+DzSUoF@gtL5J0{ipr!?isx)P zu&p4E&U*7hgYKkc1A<{Vx2TnyNo$aiU>z}@Qi=Mlnk8QFjf_)u9-}p^aD-g2#Ry50 zr+u%^U4e+_9k&C$B8C0En8sdYTh&ELTtFO`oH*GLXlE*3=SK)uC4w`HKpqArS=Q)= zF@WLlTKv)v@<)=wlc_#_i1A)kcNulN5HpSjD?lEed6e92>8?F2%ZU^8_NcEv>}XaJ z|01=Yw?{`)e9@VzNla}6m~L?`xCZl01oG}_aeMK8p#>x~X}(WV-3nJkjQ?>hRG`*c zO8}Rn>DnZ_U*4B+JVs|l9~C$hUSQsz69cK(PSWO{*xZzeUlL$|b;>=!SNiDV_t9U0 z#fx6Yf7ty+%ilEn(VxGOMr?gNOK_?=BXkCXw6}L9ah3Ks=ghMXzZsGCMiA4EjDcA} zno%`aD+M}Qf6KbY9~R%}N#~MB7Gj_)6rkmQ0%TS%{>L0DXI^$J-3hPeSm$MHrfIz5 znigZv0e)RBUcWEN61MEmg)l`)70AZ-c;qUZ|8)&iiwphpB>2%qQkA_epheRjRwCcF zOXNfqn}*T?Qfd(FJ;Ra?Kdk|Y)&SCZO9ESaMkG~KR4zi9pViWt*exx|MLU+q*O$S>PZb)^#oK$lM4RQ6Z@Pf_VC&GmjBUVV`}}N-DOd>a#kAK75;W?$z_u;@ z4O%~&V@}M}yZ-_j82DfjFGQ-RVh|^6|C6IZxuC^@w6T$N zNti8sm8O)EKI>J-s9vH79^BYe8eYk@#_Npa3tyn`7)YSb%YB+;g;m~scYE$3ELbo2 z3H1@L9bS&$T|%fcLfzKPh|`d3!8xW7FD4TWbl@&9WhCXGGy2@qO7iw@BN7^_48obZ z3h^KU)FTBrpov1}fdMYP(Ql9k9Az;bMWbLMsoPmS1|=pzynS3jyG*M12+`IX|Lj-q zqc60t;s|H16$al)bkm1>;-+Lzcspc(SO_gR%mVc>hqCUncrb!2B}tEQ99mnRF5xj- za%Wg4ze!L}u}6bK*A?VyH8^f-rN*bPCjQ0>{^Tc+mpoX)sVJwV9B>)Gai3D6lkG0l zzJ9oS7kN%bd31xAntvQyg{saM6tsjeIIXg#(7_vWR2UTQ_Dw7vEf7_xt&k1=1hlhJr2+3ROf$idq6 zW$4sCySQ(;6%US0*Uf~105@R@3g|^6MIAfzaZv7bY4cmaL_0A+^4fq!e6&3iU0p0t zT1<9>pb?wFHqz(CIlyzZH4U3vG!Z%-XoO**MoxuTNiWLbAb&~1)mL;@`@)L`e40Wh z`~z%)1T>UBZs9G%_Oo*KxOTmOObe9EYLZg~xX$;WJtsZyIZFs~!QMXsmMA2=Qo+4F z$hv5M=o4xES7(wfk(;qBE|IFJEJH7@0(12MzECo{DPo>iy4Q>x+tf~EHs>lUCQu-n ziq=J}R|dA7QeS#Z*3?9=?m&#Y`UIXrCI2PFWzVKNt2<%TfxUtP?F}kWLtm>tRrj+86d)(NWnVN+89#|bb zMDHvJsQRfr(g@$Ag0OB}1~BZjh$Nn2Lb**k^D*gcn=D6()u zhheD5gosI(QI4jREF=4BXm0)+@(T5_l}TLDgNR)XfK9+HX5rlCIfb(N-`ef%ms{jU zF!e-ZAggBVToXs*gDPvgDek9i{Z!AnY9tZ|G1Cvmydn>Q29#z@KUhD}$Dpw>;;C!W zbH4jqJ72KTyb2p4=XrEJk7sjaHDfP);E*I>0Et_uTk*Y`t(W}1cI11r8aT!D^pq3= z>Prkr8C8>LaN%#S!4W$ng7v%IK9qYlu^UjYKxiBl!0;}0k(v1$X;pw7C>Ch+_gKs( z*&mXqkG|^DO@*(#w(i$XEO5IIP6!35aXMt*P$5G3+H5VFulnQC_QOefR`;zZDP+9P zWs595)_q9=mPG`z_?9hS`FzV8E#T+i)Ed9ZXRiIJ)V#N%p@ngC3uA#tr&*DQpS{SH z6Oxg25J3wt>*GeIUW_Np#t&=Ad|Pmu)|W3#4-T?~Au1|%8V^;TwrTDso`@4(C5upS?=9mrtnL_a{5Ct-Kc8za-70lP-lQ718DbCxQJ^J z6p~VP)(A(MJHuCgrYq?BNK`@SbORoqNm>n8rq6l8Pdv z)hfK0@*1ZRR$a)AbBlqw{ta94D3S_mUpyk%0CS@Q{k>UL7st+hxQVvgyukm33v*Z_ zoNX+AqEfhOC@(|PDPRU(w}EDIdb&=&$q?IiXc|RFeHv+%ckF8&<ngh6h&D z;9Zp0K(%nC9ZLB}&$B^7$N-XN8aBKlPtr_wJXMY{Y$gqWwAF&V`0V~S{U&|xfV=*T z_+R`9hDuSg~I$hc12U3DzJ7H%0P(+Z9JU@%f=FZ{e<&>HyTs&QLyG zyZWQvKEUihZ8V16Ifb|XIo?ZIVo;{BiN<|-g`2bM1|G>e(&$u z8_kl3Z|^U#obV-_@8zXvJbdLna$y&91^_EJ$W#DD#n0s#(<=9&df&eIgvLtQ1M<^-_Nmp2G)??x2t_q@KXH(G)Sf zEbtGd(>V*-l+ZiFGZ$I{N_CsV61`h+C_~SI9)aar1khBds15PuhQ*;#^VsF)LaQ5kz$GtHDNy$Zu2i5j8bCbt z#-;YU_deJl$YrsZZ5h&uq#-i>U-B_A-!7g5h;72!CX}7DJEY%8xwI z=MUP4`#?N236pYNrRVVK%IlRk;y!?x-nACgB@p~lp(Klo5$z%Zzxt}nP7dvrwZrSD zgoBhc2G+X6lY8dqj5t;VYxJU#ThFzSl%iBv_~wmp(C}k>KEvI+DX+G|WUviTbIQJ+ zNWF#L02Lg>6P_=43P*$VR44QuPSAhKE9}l2nb*5K|92VfgXTaB3|y-4WJo2 z1k}TNJ4QJiYG6d<%M9f9qvE3#GSwbagc22t68R!yvCkfdTnUT|BvS?d*$xijBRQTfIP01p7g;Kl!1#P4C%D+C`AlDo=g1; z#|iH-d*DWj3L(W5r6t}bKT3s&xleB|O3M#JixA}P8VdD@@l+JLtn^wKq}EG3i(Tt> zHyCTaYhF$E24{C!0rZq)dfi-YWjaFj5Np5_W#{eBnqQBI&V9Vj)t#of?a;m9!*wFg z5*fW@6Foc*dn5qLw%j;qrxY=2hpRkPx-tpw2v@7c+HJ9}MdR&SNy(F#G0?t-g2l9b z&{GdpCP>Qzqmi_Jt$2m+u*Cl9foFHJUbWl80$&m1zS1 zi>)sYhkAYgf6v&3!i*5IGo&nIOKPOC48v^5q_QP5l@mhgRD-A{yHN}=F+0;{lomxo zsVT;AN;riSrIq8fo%4J2`F{WS{I2gGOkG#k6*JHKe(w9V-C^wx5i`VfxJvNjn@MMB zZ-0%cEQkcgUoY?x+54gjOZN9tJ=aMhACx%d!0w6hjW=c_LuDyotd!YmOZtya2e&{Tpod`J$~RWuW^N)@>Gm z00)9HJZ?0;`OGSPny^nai|XE--wii;4IYhO4Ca4GQSdgZAV@A9GRR^>4t{la$SCO; z(CPvzIH`X1D020W7I*7o`w7awEs9u+-+QVtD!?@;Dv~pdmP*~JQxi?kf9WsiP>|sz zZ*Em$Qu&|fd8wc4CFYJ(L!jbCHn|)biFHkBXh_NB)d+<(9IjHh9|>TiR*Tq>)j+Hi zPQ{S{p{fUJeyK{#)i?BW;2SN+Gn)uzdj!;4@A<8ZYPx4*+$n7+Dut!FiU-fp`K3}> z>Wo$3Oclt2lMbZ3jkY-kvB%Hr;J1^%z`q$SR?dEb%5%g z(t#3TA~1xL_PY@{quoWyMG@~&16ka-%U2vQjY5u^Tu+S}I`=kW?3yujMcvoIbH{?_ zWpbpSmOb1!)z5qpQRtkkrpnID!n&tfq5DF{= ze;+Yu&QTj6R4=E;WujrRr6^v9_?aye6S`}jZ)7-#D+BT;$eNAI93e=dK8F|x>Z zOkqu=(WF}bq3Fp_K8(~o+#pJLi#au8-%xt@93IH#!07|>I`GTegAUNtk1Ml?!)Sz} zdBY9sWJ;!3w6=XjPGt?H6AZKfv8=GPnYBLa(PnwZgW-wyL|cQMINb zAX({OMek~ACXHd7iGt=7H$^NKZ0)!03)%?v%ePs+k+QYP@ZoQs5!)3vg-U_m#Cbtr z;TJ+cS}qp>(}f08LsieBq4zXDOXmJ3bck&o;9br=Xqi$=Xz5WkB)Q%7vyzyjLROG- zB4ptf@3o_3&YCE<09qjx<%V1T1*31R^JCs@;Sj<8ZXu;fUeBCljWRE*qEgB6>`_gI zRX3yy+6E~!TFkFO`5UQy6b$d`(zo?>#L?e{#wc}iKfAhW`?V=fyE$d(ttowL#fI%JI)R4OFhe7Tx1?0+p6(xnw}*n^p{HxSj*~ylKpuS~AVC--UbEqn6k=)E zfivyCbNW#&W{|k}MI<<6l7L@WRing$XPOKj=#xY=t$BW?iC_Uf0#&D=-d9Ld-`j6E zY8KYR)jf#Ak)#X>!iT|yd(RQgDlHa;PjJc>Gh}eumE?jG@B39w8Sd-ed{H|1ER8#q zq0ELZ>+cfFB98w)L_hALZ7~z#H-lMHjPq5D!XMGwLqLT=#+vgGBUDJ+9ua>Iv%EyI z)6^k1qiJEkAvmSf&9Ju^L&GM(LRBy`fcTo{vTf=ax(Ky%jyf@kkSgE}=G}l|0wFc6-?JaRE7e9K5-|v>GKt(W^j?uVU zv1=2$mY`(xXipC5UT_TiqWOb~+QL4U&}9@#a`z1dt7EAUqn->M)7-aYT{(mtI z?l+7xz-F`|?>#`C*2S-(rLsM~MG=)y4tQxB0%o9XkF)_VF1SW^ zYELo(YR?{!pJ4}qKM45-^V~Jba9o0M3f==0{BwB$gP8j=5AM_Cr@pit(~#9veAC-7 z(&_QiQ_1`fJE?J{nVdvvL(|uSid2AWAoBAgA1rWxB*Qz)Xaz*Vr(lfUSANAKI>s&J z3S(89JgNRl=>-&{&tB$>v1AxwgQeu4ZU~6x!KKYZv;p9bd^sq2NfrnFJ0u$!oADSb zl8hHg)g@PpT4Q;(eVC}{!B?C2HSU9=x@{dgi9svsSg@Ul_~xqnv-Y}qnB^yaZWW!C zxRdFcI;q`I6e5dXIPfPLwwA8&8ll(WLrij#;i!YQ98l`U0OKiTk@{U+f|qi$Nd{HN9GvvGb9dw4!J2OiS6e%R_z*qbBG9%VBTGl;wVXL~j z6uA5PuO4@IR|j(&XpLr`@Q@|~^&@W%E7@P4ZV^He86g$v< zgpTp%b$e#ZKTx3T{xg%I z-^K^hwR5ASEO5~OiLRF{6|Qq+vjs~qH0J;FiBINwSu4zv?cfX>P`gszrKRJ-fG(hP zsebK9jfnjhijL%R;eP#tF`w_ATkanU=nPgzZgYU3K)evYF}DC_?#-V>q7Z|CnaYzh zq2oZ*Q35z28~{dCL@!P0?Y`DewVlzPFuz!DJ0x4~?fuxHWo5zx8I=4WjX7#Egi88i zcnPWFOAGMP>W8(l_oG!3=E9!MCc|zDo!+3HD)CeqmOdp^k8gu-II44o6DR-q>16ry z8+7={Bwnz(JwW3HK`Gons**=md4ur?aSH*D#Aba3V*yA$rJJVzU#c%<U*XYkGz+Lsr9a3~k`N}8gmNJPj6``0nf^No$x+Yg4l-s()?oCfOBJybbvntqtF%bx z=fbVrSNdZf&?I5}R=6Pzg||`qb=3l^@zRV7FneHN7|#3sU#PpJuOc&h_+XOhOy@}H z>1Vu-xd>%`oi!TfqFPf+dM~r1LI)kP<#W(0d>~K?O(Yg$&?S0xPsOuAK&qScjs`Q@ zE<`nTieAO`sq?#5R@cYUavikWyH%ntH{nwbQ?&JUfZX%}_28}zx}hL2(Av4+75!$w zLj|R?I@|bIZMc>j2GPdtXiBuo?B3E}+{3WwHbnCb$w%Wb-P7eoJ!dl@ylH1q=A^sb z$dkom)TXjPq>gU()3sqKiDR{&W+F+1Nvn&z-N56kzr6Mzq}%bmD&;23m7)e z*HUP=L%tDg;z+y)3)eWTZ^Yn>T3Gy5zE>l$RPFc=lDo-HhIM{?U0o?#yo(*rHrdAS5@@M zR8qgIb*PC#Ixu{0GfYEC;^fy-akJtBPIZ0!m5kD@?4}qG2r)M&7YBuXw*TvFzmaZ3 z`I*w(94wq+P?_C-5b<}xHYOxpKg2wZbfOo4l1uniazpwF(%_L&$|YOzHSG~eHmg2T4Atuw|%UteL^P!wqr>G87TplFlQ55K|LPL*oTk;cFdQC z+9q+ngy^8A&@}9Lo?78N$Om1`8&RX~1h6B}QJj1*OULuiNf$X=4kJh(QQxsHB|If# zY6ZHW0`>1*)e8qmdIt zQ;a}><+q3VeKk5}oS5KqYF<$MFNC9;sD2|T;hhI(3UYM4#TV#VvdCDxk`+(M4slc4 z7qf#}b=vXn1e4LH<`?SbG8426eY|*DyfS^fMyBMn(r>(K*N(d!E=QwK(2BlkwqWYy zj3}o`rT}J%Lkie^P364((gPJ@IzZ{&OZru9l1bz6ySj-l~ zhKr{?{;^^jk0H`V2iyA0<+|)Exi+geru$c&yeok_GPO;$RjYc8abye!_$I+1Nye`) z?N>@(>H+`gg~$aygV;^`%vWmCK%eG}g(KW_7R=8MpKo9|X&?r3p5O#wAkGH5mKU-h ze(0vt>UGwN4b}gxveC#xf8O2f1tGpvYic=8^>mYEtJjN#=6|6o^1TzoIr^@=fC%li z!Td@t{LsdSbeJr*MGvaDvd1xlQIv>~kC`~CdZk1o+4drx6$p19YN>p>?uz8LhMs7*IEIUGYb zx>r8!Td6MeGo8JI%$!NT_DNjhD6Y8`@nBT|DwBk?Y*tcbV(@G60L6MeQgVi&SKC+? zpyE)Rd>AC8;v98#5N-UtkP3_rwZ=;b<2eN>r&kbyhk58;lcC=IThQcB!B?e*Bh>BE zeQ1b9$#tdQeGccH;%FXdjosoi@Ja$G+Ro>hJeBL5~>@DbT^tI8K0cz!!wsCKQ;Q9wJb&(Bc3Z{eIhCKBpbHz_<3w< ztjq0B!l5wQ>kr0O+n+tSpj)&zN%&wWlvQbG@71^Y7e z+CB1u-(vE%BK>LZeW6gIZ+a?iJ&{Pt3ws$)MKzqms~3mm){NBsM4b}>39veTx4vv! z<;N;Ad`>}#1j|q-Hc>M#hwD^<^QL1kG}w#(ZT54LiTU$y4F=P zBg3~CebvY}v?2dA%4rX;2k7J58qSY)r1+w8a=cYf7q(!x>@*eKK4^*svazP0oJ^$G zR1(9W>Dst^JSR>Dxh~yJ%7~;wB1W&6^+{d^)H_B--xV+Mo3d+B5YVnun^H#@AU{r8 zb3&4M*&AKGQL29H-foGfr6?W$prH$$r@6grxW$#`=iSgjo9Brv^LOFu~}7Fu^B$6EEbI9V>CH9KQvTAq;yF_SCU7ok$K;7S4>E3soLr04cPX)BK~-l`REooKXL)eTPT>Q0t4(o1jR0b{{8SOn zpCX44T;`7tx*jGQoqUTq;YK+yD(`pZqCu6b=JDAC&_w{GmjkcNmXnI5&_~y&lH@<= zInkF2Zy#|CN$a=hsR?y6pQYR0D9qw4Gvr!|++WNQTAzb3n#OMDE5!@!-Ra|1TkH5a z?0BqG4u0L`V6pQt2{LJC+ff$G==wZzRoHZsW(I_+e3G3MF0lc(2MUnhzdFb1LOH1) zP7DF+7>$r$-?2Ujkixc9gO{ijFz+q#yEYqiP*ogd-jd2%x#1i$?K~QKM83+tA~Yxy z#i*CApx*3xHs@+kvk8|Q|+9BK0k8a@b7R+GYS!^H{y9H%yrfzbA z0@7%xNuH+(!J+{0F`!QXjhUu@2pG+!;F37@Z+|b9MF}A>F^0TuCxm*OA)nIAX8z)G z3|Sw;aVO=XX$E@b&#eNTc?Ga9qyD@L-XLAuQawESwfh=69|>>StZSY&c$YoR zT_4{RJJj$vEQ)7Q`Y&WWeS_0nwx)@>=RlL)!{6wq28+F$bR#w{gfsK~v}Id42_8&* zE&y>I%kpq{vMX=&-^o64Ne?J+!N}|abd*YXoAreRcnTl>`0VH+DLuzNI)EdcH1VmUQ)azL)Vnd%n^aJemW1 zc9DPNBMA|m9ZPLAiq;e+-jxqnJw_SGToXi`cag{&=SZ7yC+E5MqFB@C1>O(Ij~U-`d9NBT;New<%?{QqeO^Ftbz2lE1{XzGi)9fhJ`rM2dvmH~3OVT-RmB;^ zb|}Epr6VOVp)u*~#yj#0FR!FV4^J^x{g`hWPtS7iCGNo5#Z8v9cZeT4JLTO(ly7%j zJ6ciiN4fQf1T{EF^w?uQZj`f8F%+rgsvS`<8QW>Ccf_HWeJ|1YF_D%NIr)=P|GGVn z{jAM;%d-z#huWQv_YBT7aN(^mdqI2+W;wzW=Z6D%*Oe)ce)P^z9yCS%9$Z4l(BjXz zbWPU;d%w1f57@gh}AS21#3QS_qb!ar>fB58ldf}ntg(iO>C z8XX)JA>YWKBsT$eHK(#f(yCGy65fPnhDfkupKQ7pvL%(BUl;9_u=VrpJ&e&;kA27| zWt(Cti+ueY>dE8cJm%3(c~!G(umC_HH?f9BPb9W4-z1NMfJQX53iR|ZD0_pal-Q*v zh5gEXM`K8v#?VjbmhRHb(-nVpo~7yW)K!;L2rDwYxucDoM#xwLq&RMl zZdY+7KXwTEJ7y@;8x%Uy0RagR=S&y;6bKMlPa4!%@@yFIUh?O#LxE|74b_JBIZFqv zPl((N@W6=iqi%l0jY*U_1GthI3NmEaID7cPO~cHVb)z5*H}5BX==#ygBlb$J=kPpG zQJMBY#0sp4{eVu8LZpH42wTPq&F!t3BPwZGnN3Z<>D5k@8otyJoQ#=Aik~PS2@-$E zZai-EO%AWP`{sHa!MMe#tIvDME-{`wl0A58F0lGJ0{rA;nFXR_AGk2}){XH8|9C%0 zgIKRfW2n|$Y6#~Q0X_-_aF1Akh#WeuaN)cZX<~0?ACjh^ZRR)jOMRe$N%{v z**2Oyx`(az>WvTLuOM$|sEU`guVlD(D~Xn11E+vJJt^i)`0q z_>(G!-s;t78HbvGuw&TDF{3_2wxA+x6ez6t`gL^exq#0HIck?dFor4Woipk<0oE=A zQFo}9>6o7P=<~(d!g3i3|kg ztfYd8bMi(zG-GSe)(CN~jrCLnm;3`@w9CQ-%7~k&!d*^lyPnGT#bHmRz76AFI6NpW zTeD|gX!0^x^ymAb;9`hc?d)i5-_Wu`hcKzQrtuRzX#Ri#Ya%l(9J)Gi$1&!SS7Ein87T&{xf;G`IHNy-jlc5lLt9& zDC%-S%UUfSyg_aJ4F~V?7|6qAWs~&ASx{9YVpqHU*Pr&V?o@pDy2rHrKf57MWc<_o z3hTdwxdX;$N>edKAQ2+mtg1;lF}~qkz`#7;ZS9We$nq_kFht2e(&eF)79}a*{FX}} z|M+V|#LS(ciimd;Q@?pVXj;_WYm%nDnVr%ZfA#Yytb@Di#736=yr49Y9I659n-;fx znISbow}yUK3RM|nCwChnY6XGK2grl2i)+P3O3|QqcNY??~B&m z-EM9LvD0;>%yj-riGK?{aS`3IE4Vc?7atfl31_UdU$0sASns?EJG%Z7M}2Y?yZKNE zF+fttEEWA@h0*dd>fW4}&iws2EC+wW`#u8Z=;*fJ{_}H@DAnR^x61PLAoU-2$N>tZ z-5DSG9C3tUo(085#3fHnY)HWms|`!>{6>{d#z_5PyjOaHQfW;`6BwZ>{1Zzu%zCtRxG z`IYIgP_>@?6UexIiVTq|B9`=?d!UECt*naNy1Bn>kbNZ?q21{!?i~}yXv^l1(q2)diaTom%AI808M9Na)T04Bp?{Kha9cQy1C zGZ(f{+p^UO5t$1{87#CVd%*AQlJayZ{_U6hmI?uQ-m6~;17t1jB70cx`@wCz;2%hR z(BG2}APteuVe?Q|IjWyw%6@JNE$pUOpwA^!pGuGSBEfzopWIay@0DvEov<%lVgoHy-?eH$^pG z&{o-cHmz8CU9};-HGKxhy(C*{+f~maR$du8P~PZX+iT~pmz8)KjA21K$du81TMyXRr9gtP^fExZ^_8pfJy1JG#El9Tc-9wVgI7vrwo0Lro=)QyxN)Pqlz?*OE z+v!J2XHsLgBoMsdrIxz2l-d{59P6BJTjiRSaxPq~mgzMU+0kw7Qe@of9WVqxhqq(L zn?~4J9cqf8Vr^Qmou!GCH^(yRo7O0V&TZ&>eq|WT`d7=T;cIl}Vf)sTXVJc5oNB>_qC*(iuJ_01&W@7N-Oo-3Yg{sUyW$iv$< zvhJCmQlsy5a+B9%l?-*&;zBO_s;p;5!kkfX;apfll}64?z|cIHQ1#)3ngBw*NvWMU zeA$7RN5mL1T9-Y_WX_n zJvF%4#rjJW_qVf5F5oB`YaPlP*<@p|yCfQ6&qsH$F`FlA6_>!=Fh%a*YvAVjs%Jjo zX(~z8Tfqv2gmDI1i<8Cw+P8d1r9ayrV_=4%G4cp(EMy#G4AExrBK5E7p}@4n;(Wn%Ax5C`(m^3m?F+#~c;aX5 zx1s5`6szxV7nXRzn0o&Vl1MZ0_Oqbz}xt=dNB0c z-BjzxYbw~Wo7iMI*NT~NfUArEImhsb@6wU%pCh}>HKmQOro}@i|A`0LlbWdgt@|tp z-qYNtd9CL5us5ieG4s@0&Zu+@;)e)tpX;v}YL+K4%nc^5^FHhGLH31`&)@gw`J3SU zxbIzrV43STq6v4Zu$$*o}4VNDrvx{!^BWuhqBlJ010gmrrNt`zi33sVgv(f)TT z?D9>w9B$nAQG6=h8auoXjoy)Qe^SUB!6<(tpX!jIx1YC2G|_Q&G%nkhtZ@p1_?)oG zwa!7c+H;?&?;gYMYw;ICp_IcU7J~V(SUT4=e1mlLx$_2{TCnp5 z?IizBHn&EyrJeR6G?CI!&LSvJbHgfE)4_*yYOjTYh)se8fU>QOSTb6ay6a^MXUswi zBkpnUH54n_BpQ0m3=f7CU~qu4$N!gg?F^v4cmUO;9-0Xi`4HeBQH%+Z7eQ>xA`K`C zH6qfQAGg=onNVO8O!d zqhrU!n#eQU*LpA^`5czQ%-o<9Kr^_h zCBn?(JD1X0Tpggb95*yBD8sFZKt0>Oocn=*e#F(v5S+O$+x{rnLGAmHB|Bl^ypj>q zB?PNn)NKgjUez5AfW#(|Jm=(6&A+|i1R+-)=^{~5R0lCeW3%%h@#|2aB)Wu=40ksg zTW~*k0EyN3dUBx&ctM^2+iUng*$|@9BQNKhDoK+cmh)_tk(P>sqR7(TEl26U-zvH- zy`Q56(7%QAHyuxkrb?)3fv*cXEWEX@jW8#qH>+ z?E;y1d%~hdBh9Rneox3LlNjrd8n@n?EVOuiwND|bu7 zy8#BDRS~p3f7!BYcFZbk+F~&+NAE1X$|}K@)dzjWV&pfoxNtP*fG{}eZI}{T?n8z_ zO)tAPi+xfl-pKfM*Z=NUnkjKaLF8GZjf*csRgFv*d4^WjUxFDbs-Wb?EqKldc##p?e2@{fpAw~}J;r&>9!ABKyixC<({=$}=C-U>>GFz8cW&lEP zEsZY;xZ~t!-bI4t@=YCdhXtngvA&4Qnm$7>ldeBB29Zdudq5=6>Esji#E+a3^N1Tv zkgir`b^WsbB3Fk>(~RGySm%mpJDr?OA0AARB7DJquu zfPX5Y7}h+UnNUul05Abfg881M)$5?}6lowY9OFhW-0(J|I61)NqTsg-9V4)zmNXeIjfsCTey3qJu~}V}rhT2AVE#G|ci8hr+rW{^4ZlTqIBZf*W8GVqokhp(pCTEV2BCPV;Ru*Z z8cM0c50dZWsD0R2i6|*!9&=v>By<ht*MDNI2UpLH&%+MkO}kVpWNkB02V=p*`G=$ zTz;6;9Duv%o@ageu7qHf9?zwvxdg9_>4$H+Ur)_n=@hwLtIATqM23aiN{hQKcNqdT zB9W^8zlb{YphFb4@|CGWbI~>BY(?iaXuLIl&l^gxVctj;rYBHhN?Y~)c4ei<*Mj?r zR(#rNbCV3g4OMTnIxEdlSXPsd>JWJ_w>O-I8DN#aQEmcvmVwg77ekoWt+dfCn(s}U z(4Pp8-|Rg;o=)wbbMW{UN!5O&CfO^_hjbK9_r*U4_WPom@1T?<8=3N6qhPI?zu46U z+9B6!6|R>eI{~iRI4=E-svL|3GZ_H96e2>3NVntrD8WUxD3k@fzuCqew}(^XHW>d_ zC9Ns2hvx8L%Uo;B)#13e!P+Iqr|Koi>mbd(lO8|2TIr7P^+W1QSmY7``Cng=ReD{> zBoO~w0l&~YFUU_Fk&yb8b(>TlAU*nf*;KdUaJq57IxBWcz93+v-L@JFU}TAa0%TKQkPo?_28qmjW6^E&^=1b<4aeR zyX1tIyw{A|%v}Tnd~TH{T?5<=WOlguWhcyvItzTP_kuM4U45vedk!)U9jWZoyxMgKGpDtQg|;%I_m7Bp`Z>+musmGVbPjhwe~ z%|Qg?jI&jI>yD`(^IcX3W8+niwoVPgShF~4EVbmlLdVlv)c9T6wRnoF?A42&>&Xia z)j;-@K2$^$+$t?RAdY8Zwimz0ceqSQZOnVO0*cMYOoOwf<$|6#z;!4sy=wNPmp3P& z_6t#75ge+7P8 z_lIfexQY>%%Q+7k>k*LK0Vq{Sngm)yFe!Kd`No!kjU9BNw-R!Dfj6RK^o9KL6OzY} zUf-*;wj9x__lkh(s@3gaKp6uYp`bc;Hfb;%2NeL6rAn{lCSI`oCY}o)YO`FEmK}k+ zVsx32tb4wLJAS*>zd|we@F45LWKaSdyQ5XNt++l7nrOj0ul>mj_aorGi>=>=rBu42 zdDl&yAQHQR1C*%;%#+INwmITcc9Y}VgIIlO!za9*)}ma()tH;D>=2`a>}p_K08Rto zt5T+DYQw8^&`T8Y%OD!o0z{8u5wL%U?sCDkYGGD+DYpVmrZKKJl-IT=!(4(RnN4f4 z7%b#^1{lRMOh}qBNu?T0oHvgTY2!+YE47_0*q^+TJLkbJ(A|z#K58K2yK?* zBHX@EK3y_G!oJT#e;HDXx#=B9)5OIUL^(5oK(We-OT*j#$lw?DV|`4EYY!V#wh7)p z=#JWv%cWMFYVt8o-biI`F`D3k4a;g3c078|mM#-QdvZcuG7)paa|~TpSWFAILIh~$ zYAbEI6>-Q*wpBc6rl`A{XeGhLfbuR?@m(&%Kp#~C-dTB> zu!kKv52FQ2(FKQ)Eg^sP&Le*>jB>Yces$aXE2AkmbyeZ()l~1T@58Zy)4e}m2G*=j zLPY17bhN055S4~KSW+;TRy^Sn83I-b2r`5fHj!zEVPQhp6U+KGIi>t*mYJfVaSh&f zy7Aei>rP0W*9(Y4g<&fZUe31s7V*zXrUd=C-ieT8CrNtsbR2#7EcZqXk{JE&5SMGq zeS;g(i8Ps zo~w^GzqV{BKWnZfPr$vukDB!sYS^+|Ocd#>4zG57igYX)Q#ho-1RG9Y`z^agRnlk$ z&%c|IiWkM())>Is|m_IWWVB|}6J(OD3Suzc z|7syI@9E$tnW4LlmNg@cZsF$A0l;}(!pZ&55Cdz;O)zO8GO}Oud#)hX6txb)_UQ+Rk5tnheqfW6 zT|A9>P*8s}NPVO{Z}?1=I!YU45yKp|-e5E9jlPO<=ok*1T0yr&^Uh(>A*e&E6{euUH?ie;AL1gpo?h%kMCyji>2@92ZpPy=Ldla zCY^~{($9&(7oEKqcyP%_f9IMH9iI8qIpq|8|s|P7;lJu%QcBbEE}H+ja!w%=8w# zm;hS~A+_OL^+u*E78q`T8rVes8HMVQ!y{Oe%7fQDB~gNP)U#$?Yv43RKkDyh#>oZV z98Pgg2*Jzi+O=)LNAgKt9gbk6UGzTRhIUM6u983-+zyC>W3LM<(@BqRV&FNtAL4>~ z9ob3RUOz`vBR+&5Wj&r>oC#GIfsK&xyO{zqS@1Hfnb(#Ngq{=znZZT82dPOl0m%p$ zI#Bti1IbIcViNtqIRTjpCxBH{06tok6C{fSj>}npuz7+2QsF?3HrOzxd|*i`T4_H+ zq%?xC=1r}x+&PSP!&w9iT746b0I-e}wqE`b?+m)bLI`4BeEe=`K(kH*UGDda@vv^a z;xP6Sa@z%z)J$3lH>et;EiCiEqxWGC-<>X;t{G-Um?rdpVYc+AQd(p|SIp)mik<@( z#U2&!ha!9E_tqG}kkS8qYVu~cv?z6O)rJe^@SQR!q*lSD;JgIoX@HxI@Lpv-boE7q z4)=wAFn>5V4y#tok*65FwmfTnQ=JNy6(U!finqj`NS!05FxFf%i^RO87*B6bXYYN| z{y^9SF4CY~>59RuYZC7jW_XL9{U}CF(b`E&Pd4R~*m>qYN8NKZ)zg?w!J1HjmSXe> z?f0Gq{zM@e?uX7hFIZN6gl+=lid(pj-<)~I zCbmQIFo_Z|vNrU4l>))9jyb_uU%n)gaI&!g+KZvnSbZ{=CO>oW5QmjO_ z%5)EY+Q(fdM!bK5T(dV_y2$H>?Fp&9YVrd0mc0*U#If)EMl$?#B1XrB_|95x(Sr>t z$XS6-OPoC4Wbigui4cx5*Wp9Pu-5;{LDJ-izirG$obD(#T4 zcpVg_JQ?mUZy79y=SNcG7n}?C{-}odiQNa6kxH2rzGX|;cU9k+({5%Q za^NVn;27%#84;J2wCzj2U9GR9`_`$5mz`6Z{ zcCaiYi8aZ4mRQzlI*aldWSKUiANUM874jvP8b>GO*hPG>5ZqI&_MwzKWl5RM~o)%L}s!wC3f? zgigoPKGlXKk?w`%?kl}zR%Unk(|vX9Gsl1As^Lg4a~3{O{JcdQWY^84F(kpR)bcu7 z-h27|beY}9AtNn5JHvvn{4dl=yok%Lz2IQ6D1rnM3r-yk#Yj3+69yawhkQl3bu`$@ zY%&JHuhItT19F0rko<*A3#3BldLKs2KB`Y^0_)6h0QTT3wE&rl6v!_?I0V-EQLgj;ZwR?7PD2@rpk@6s}s zSk8&^NS=>eLaPJb2$M~dQ?!*NYA9cK;Fy;H23WpC2JC#vm;VLKSVP4_U}1P23mUmj zzrJ^9{mtFujKr+KX|a3S)Q_RC99~u&4JawrgT_=75QhM`(5scQrc8#m^oeITo&^NO zIZCYCTl_}FWVhRfey})dCJen)-;If&lXo^K2Oau;!0Q4DPBbG+JVZBRY1IGwN4J&_ zVa%azf@`uUVT6*(0?{v@AYi6mBweP8i;h4?8%Xq6Kf2s?>1nmbdqC)(Cjk%CEzZ1& z@(QpEhb1LI+D_&?5MrUS4yUu8xep4v63-t`t&ITO+~1;_{fgf-Sxl1!5RK2bU^59!+p12A6xAitYmZo!Eo-c*!{VC8GRANoDT@%C6KE zy7h;rRXAa<2<%upnXr@uRs%9&s6Mowfz8FJV3`LqDR%%Qk5CI`6pg5h zezzycdVaTi@`C$kfr$6jcaPP>xN-LVjtoh2al-DjV}}we~Kdi3XO4>2J1#m*I;^Ek|dk6EM(*+!pF*UfywNFsM62U zyAC@9Nb5H%dFiJ4R2(o)uZS~pUh^q!~QXDYcVd3ykXZJ@a|3wWoeIjnU%01oI}X}IlV1F%S*iIE4uuk z^=A=&eQx!%et~Kp5FVnk;IWY?$<0>p?Yl6J-v5GAvV(hewaF$l# zY1rV=m#EM=G*PCgEHIdhJoRYLy!eQ?VAUn8*fF~nyEaO#b^Y&pf!V_9PLZe1{Dah%Hl}kryXB7W z?CqB`0WY5tlneajRbjN_Ujpcu>O{-hr?EjGvBfGy9j(zb#W*sujEuZI%t#O7HaGjA zhrM`>&;sAJCXr3-Cr?3Us9TxhZ6+;$S6jX)$*xyZawi_lZu_YwuU69byp2-S?nd)2 z^ERDKpjayk4+_O$Ims0t;!6i>f4|GN zsvnuH_s}D$Q*we{{$E-H!Aznb<5L*_BK0s6S<^7bj2sG|vC>Ro`KAf%Rf;3hk`fzI&i(_jIdMlWu??8T_r#AiA+*37LdA10ZN}s46>xe$498@V{$dk$$X3Hh(kfw6on8N zifAoT6kO_pI@;;@yY7<&!P@CO&+~rXKf*~ua&pdnU+eeUpy~L&D{@~ll@NzopLDTc*G5!brl<*B=eyjo@!lG&*{ThybJ&loiv$<~1VAvNk*KCSDY`Mf3rdQ2# zKbpDs_&~k$%1}9@z|Xes%Ef~`!Zw|xpvlRMw#6l8n7ne~YEa92=V~3Wo|hm<|Mrz{ zyxjr8K|lK5Ora07ABJ|6fEA{Y5OkQ#sDf4(i*xyFGUc{B!eFs28VxYZRxp>Fz!T|{ zTjZaxGBC-UZP?k~rs1=jQkdMu)TeEc%fJ6>nVcCD{%B3@oINL&*i8%I(92A)U3fvJ zahmeH$kKI&d58V)26RU*e(&gB^U;BGh=-gC3Cp~^R1izgxZqSp+Bf`(MxzW2=amLX%!xw$4CA_cz`=)I{gMa-wTVY>|TvB6d z5`V(wm-T1u{$A2#YFyF2hmq6#QaVsHVDhWwC*=ys)rBxjZ~n_`rOP6^YYvJ#8OxZn zN;4NnsobF=d-T;QQMJ#Sn}(H3yoFR0yq6F-fK%w9gVyEkZJ$nGD+R08|xZs zB+b+lO>r&NJcofRKRMPOQmV@~}O(epy#@{Pp_AVgBD5uk-zqB8rMECg?iujAvq$nk;XuK;~{xs1^GX zO4Z}Hd6r>EZe(V7Cq~5NpS)bilr;?nbOguG8L^nPQgpCko&U4*%ET;7;g`YuO0$9= z_F66AbK}k%?Hmf*RW97IE?arP!k}~Ko9u6jUFZLlmX|qj?{-O{j9GJ@bLA!$wVbSV zpv8ix-x&Lb6F_~qq<)_MS*;oMq^)j}8se2RxrU227T;W9Zr+*q?<`cc4FO#sFd5AcZzE`?}2xn`$`D#{Tej!GopW* zqVuy(zlc29^^sxk?sRQ$iB%ap*{z{*I6PG>c@*HUZzOvjWD)P9nDjx|PxTbD&Rh8~EX(_t0Dfa4y3o#Vycty+u#H<5JV~!App;7bbXQ-S zCmlS{$jbL3N`>6b+9o`bG!I2R!Ijyk_`8F6kA-!$pA`1w~7ugj1; zO?TwgXr}|DvtovKWaz<98y1v4@|t|pxA)6ZNacS7t&6h_zJL}he0PyH^bD3k4qP76);<*%Cf&jr|LJA3Pd+mnA1er0rFE{(k2`gXd@>f=Qbk12&8QH8iMrLuU>loT9pn-?3jg+IE;Y-y)Ps=Fd#e0tCo0A;(VyL)N9Wx ziF@T)kBq$hHakVp<#?~V%g9aiC3D-B=m-n9Uk>WJueJY{jNK?aPSWO4UlnE>M3ic8 zn#6L+-YSZ)G;CWqhvGJ@>s~y;&@o0A5sc?Q8BY9L)!srvUZ{Rzg5m1OILXbq5=-*W zE&AOD)4GZ$Wni@-uG6E=W8|8t|1giMsEK^a3bQ`?&3SEr>}waJoJ!r^)W_ASF6P1$ zQyvf7-w9V16}vCIy2kKX+k&Fo4Yp5xf994bp)$62_SM%=&Btwxqn1EM^XFdO^DFsW2lGgFQLCijv(G>^ve_*GZ*Gnq^y z!`OsXrTdn4rTON~2q^D7zGOuQ6p)xS)#N>8W)y}SCz7?U`DFJWPA(K4Tx4ujmv>&0 zu{1tt>9*>Y-U-xO4=XKdoicCd?QOI14@IrEpvt3-n#jM!BZlVmDUW9O8BfnPSMDzG zOPjhRcEZQe%j{OTRX_dh+6-)UY-qo1O?l_GfZXgUuHN<00mg+1J}_f6*ok{}RajbS zh0P}OaP1`P1(DMiiq;bz-}mg+&QrML4hXlVWfHyZ%L^|zd=y_qo$UP6{;dY4p-0hJEMum7&a3vQ^Kf54zl zfgcMy+2Hc`=)lmpFBm-6#$`r!yQlBI@P%<=-)~_w2T|{rQ-APg<;s4~$qxM+Z<+j~ zgFgr!?v0-}aAMc32X>LwSB5vPtzuVcH^f{?YPr`d<=f{hDrnDIb*LyWUm=(?DKw_u z|LdaPXFsWGYIdC*6nbZyxU|geAtnFQ?S4!9^PGl)<_otE$pe)8Y#^zzXzso4eIEH< zZwZ`#@8p**t9+&-oq=r`ah;taI=qd&XRs20O+w*W+awUGDK#-r@LXw~8gI8nyBAvd z%^q4}5dZsQ9(D5Ceu3i+R}0r_;=>=QifsLEp4|LF$p%WAN@V#IrAZRXZu=yyS?nSO~2_ijY>U20nhS!zb#NDhj)tB&szTgr~@6~%- z4jzfF$Pac25OUJ;z&g|W8T|-gpY=sZn<&@P^o#JRO&HnqN~B}F**b6XUDq|*-3JRuztyw5E=6AYLUti&$!e2z!tB0qqr&@EX?9a& zA5}dFc$sl<|Hs$Y?pdQH849Jf&%N-b^M~%`XWl4YZh5GCK7O%#ZT~M*m^6RetTXv9 zDt~O*3V=0C`MV;0Y~k{nJ(Hw3b|r*E_h{F@S%|=AmgYR4{b@7=) zsrr@4FVL1StPU*-^^wM3+igN4gjHnH2%c_whi^wn76#<%Mwn7D|FUbZ#Lw3(<4t@J zvXv0aHu-v)RwJ?pa;{XL#LCLe zIgT6CJ~L1GVIGtTRz)eIy7yRRnMlkHrf8E&Mf!tzryx*m(n)NN1LXYY##hxtlOsC< zprw%)GNn<6QUid0Ndy@pPnbi%W@>*IK>G-K=2H4ta)Mr-6L%YFcN+UtQjKfvoLGw* z$v!D_;S!yb)R#H;rJTZgpPt6YsI33Wkhzgvn9)zEhb=a)AeZjpWOzkdVP0F( zLTk_ZgbrsD^eV~cUd`Fh&f4F?4%!t;bk>EO_~%i!E~^Lb#C>OSVL?p5!T$Vd;>53& zowseXPM%6THG_O(jU#E8SEV$|HN==(uI4JOK6Q25tzF)=!v5^UqHfF7i+`ubX4fC$ z%A(jeA*N6Hb#d*E>y_m0skj}`(o&b@u(s!QL=vp-nZ$`#j2(MCjQEF~B$w+q9obVJ ze&N;K`lbF(!mlLu=I5@ro@nA5M!45093ou(Q;zgb>WdoamVHh(rZJ~3&Sk6xA7r}T z$g$^FaKF zr5!!jd9D9y(5IhWZWxyOye<0G)Vcy+4@KKK_ZMPqooOr2lnf}Q_I)nux|qFrup(pI zf@OC9@WvEQ!&iUETulu6Ei7?)O6I?J)J6T`B{e3*bQk9CEB)z~&x*-$HGjWdPgGR5 zejF3bI$o{#n8}}IS{3ulVSU$<326!;+Zo#0VMR_AlI;EpE3x-EvD2AcK7ap5Fb`** zDfNw08^I;PkkPZ#&i49tlpV%$t~(}U(N>9*1CK0%-N{fFwiW-!^|LdHU-3P{%M0;=-mz#f*_!XkQ)d{MqYWj zvUYiDyx9T00ZX>W2FYHY#F`{98<(I2=b5N!oiO1RMeuScNV+8?RWIv-_u;hmfLPAo zBw8?lO3RGSrKJB`DL7kAN>9t)67RD>L@FU&@E>Lc%(83@{W)uO{>88N53P2MGc{V= zPVt_xiAtIuoy!}{Jm480THw6jI5O|^@-C0e8qc~c_)*nGKum0#=tWf;Cxk0Y4(X4c z0aM<4D*|+_p+#qozaEn;mTKPzvuI|X0x~b?LYH&KhIZxbn6Hn&?Ysx5AoN>TC)}cA zF?77rsdc6VCSlprJ($4&(x_)lynoC1Yqu1!h^f2lLY$qwGPU}?S4)AHk$;>2hGpr- ztJwV|`zBxitaH-wUjv{_SL-pPEJ8baY8)iq8PM^TPn0B>+DhTc$sAb0U@*uDX#uBp zxo{TkJ4ka{cjZ>l`g8Jyx@{}>lM>0#&%yL%!L+Zg#AJuIi}V+K8((~^CH51Su-a>1 zVfCx_u85yC@RaRtRS9_u?6?l@lx*WoUHR5?q;;>e`AaW}r{`SN-S;c&u~mt=;<2l5 z>aiyQ59s`in&x&(or89%^DmFMPF>nne7OLIimqvZY@AD!-A`ePd&44nK!4>U`jOD5 z{wG>2YG-|JD5LJizHuK=X!$?SWOJD7zL34yB1I+GPdVfbC_qB76CBgp3x=e5@W5|7aK$8dzveDd3OG**~6e`z$9jiyF8Au->v@BCGFws zue|>5!RnUyQt>A_)BNV;oXV}(x1PTyIdrRbZ^rb)SK0!Zlyh>rVO+=eYdcaWw5+{w zmT@dsT1T6w&8wNXpnhXWj2%o8aPKc9u`{vu>koN3E&bwWvzsK({T~Gm9-`|e{XOoL zwB-ELa>aMYrEz(aGJJc3Iy0k7rc6r;9O(SK)32OIERZVQYiBzf**(mT3^m9$Vgyng zGfdOE=Lt2-%$4rdQ9x_zPF1nofVXwqYJsaM^9} z!0zRi1|~V8uIPsh+m}m2Z?2^0?mZUE3On#!0Tdae{%iMdifVVKDSf07^UxjVGuT!1 zi-$$A9lI#a>vEPPxf?HvNh)MNAt_fy@z*4pD@@2ORfme}t&BzjgL@-&t}LTmHs_Ey zt@nhaH9dNc?jZaiE^gLY&|4aohP>wFeXPJkMG|pJUnxw~F;9W4g(qg|j+AAF^JXRO z$xjms>K)S!=?=B+bb6#B%tV*eNG%yGnbcbvVu`KS4@&jPv8b8CjeP3uoOP+4Ox=~~ zK6Kb}$e#}~P?SMQhQfILY5vf?c)9_X2R$w3lt8Ag@GNv3hVhYNRna`7A z0>?a$PoevEeKD;d@ls_Sd-C(J<`z-nHmHuc$&>5u!y=*cypEi}aV4@UH$y+MOL{U;lt{_4mH^Ei?Up5=ytb zI>c&gRc^W`EXv(We`8pIQD)je#sPt?fJ!CtGM*?iJeU73ABLsGQV}+N=~i((OOanq zM9KjYF95QJ**xmje9Avc#jz6d{pUsSZN#C|1#C7(_%c3$az2}xari9du8>bk?=P7m zH>sq`_AG%`!B8~|vn@!3=xmr$IW-!K1Qf@V!W-zl(a;e&R}g_^tUSy zZnMgm8nwZd8rzl03rXXjinZ8yp!HWygvFx0w@%oZ9co=rBX&CKB8*s&zh7*Xbu*Xd z)Rf@K`EJO+YkJ14FUSjL#eXAQ=k}|YQEO(1jaTgelm}^DJd@?-tTd$_&T(2jqm&?d z{?D6OX5tN3OFO>N`Ny2@gU0lGt`nOR>@G%aNN}Te|GaU3PgJK;%iVQRp5uU#U87B~ zq2a2wI)ewff?HL_Zs#PIt@0*iCqWTr9D?$-=IjyG8SAn=cq!8{a;*ohTuPduRZqZy z@59dhSlSiw_3X@mOx9y+f~z92;1P8*Gco+kK`qAO}g5!=t z=&aL*#^oX{qV6zCVq>M7+#b74CoiC~!+k4i⪚llm^pQ7IV0tiexXMr;u9Hvdcqf zn;HDb*C_zn0}yGpyfz3#3xnEWq#5KX zlAs|RVVDlHNsQ2lWU;Q`a}27mjqI3r&vN5xBl=cAS=b}JN^9fw`wAIQxU__`Eg2)M zttjM)7bSbgcp3GQ*F6*RZOf~y^(sD_wrp?Ge(?%#`NGNUs>ty0^;3xZTBMJxB@AD? zTU2MqDQS=<4?X#~yBgAYCQ`TggV0P!e5XEY9;a6ViK8Zx3D43?Ay=lPwxG#5r)wAH zVg zVca<-7HpCIyYRlZSIx_hB(~=Q^Yb5;8u1%k7D#*n8{}-9ZL@xzjDtCfEnkTO`uGx- z5GL50DEo+y{Q164{yfXg_HG7|24H?QilDi7$^w!sePd@t{jzkiHmR8p!*KVv*>BeVbyieyu5RBRW5@mmK zcL{V=LQy3y$PKk!2G5qi?Yc*nlZN|riq%wiDVUVs&knBrIZ>mc@v3KD0X|o#a`1H$H3cpDUia0K@xuT6#|6}r{+CteQ zeXH_D*EaHxW%66oU+;b;{jQw#EVp6y)DY61W%_ATT9bqC7-W^%rt57wNbyi`iVHD9 zzSW|5-*oZ=&Z5uR0%+zjbJ&l&(L1ADc^f*cFJ=dK{pw|W(mbxLjc0!*I{(oVawAFb zye*jYvf9-D@LItWo;D-w$7HO2{$zJx1@;AUgO0<*CSLN*A5B`mQ(xjvDAFJCQ=5roC+eX@>Z&5pV*Ox(jn7{JU-2y`w zo$%bdVN&y7Ux@Z`PMo#`zrw`;bq5>72O*>Q8~(@^!jobuLBK|Hi`jz(1+YmFFGOR& z53Oz+aj*g$MmNiSJGI><&}+fw0DfyCzf$5B?#Eri=SF7}RKumCn+yS?Y%siGmFo!DR+@)x#8W zeGsjpj7IRt?u4LM9Qvy(fpiH~kPMoJ;#WkFNISfYDoK4EAo$2_B!WNyewWXQk*e3} zPvY(9{1XD9Z7(!TmjBAUM|wIB8pPDRviL=@$=I^sKof%%>qd$S?cuH?HOIlMtvCd5 z!Cm4eEp#=;fdp6(idj*9oqK6$&*tUFq?|LMm$#s}Eo+U17E%T%c7ewgP#I-5R_n3kAw@`4NGDNqF&ciUEH`t-M zKatD=?{sjr^xhJ{*q_Go?}28Xn+c>l3@S*UEqo{1QUe12PUxce z@WI0gGHZ`-Zdr>f3aSTDt)q^_0(UPVgfo~oDUeYU8pRBaNhOP**i3oA3+Au8iQ1hE zZS)FCh|=~n1;=gRu^gypsuW5TzVm2jzl5j^!(fe*3}(>D+*iWCU}`yd3O@K09W;Lf zMwtP>LYj^VNt8@uOi!Th*+1gh>5m$%R#=3shhr&a_!8e&y<8$b<-9H8)_|# z>cEcM-BG%UVv>9*JnpG+(eoOwEHh6$(UNv5*Rki z@oi2K>An8EU$VV>gB0dWl}N z)XcEBuNAg;8uP^drCs;$1x39uHh)|IU1p*T9_$K^kC|Y>t)|V}_bRM%>lE7D982?Q zNv~>cn6VEOg^w(JJJ>5OnBJul26ZMM{Osv&>@BFN=WNYv z;2h`#L0T-Wj8`IwysuOo$6LS;li)`mGOJ=HHj^mE+ZHQopmT zA-i(xkA@~~?t)Bu#R*q)QGc;s`B_R==d8n%;|ymrplY#eLR@E1et-`r4ocJN?#Hp& zLb?$|)&Z=b>+#P`9Aek%z7qE+ta@$>bduY{gf5(GAG4=WekVUW1g(!NsZAxX>9;v= zm@!GcM+OV2uS>SDxwnJuJ4ABn22nMNNvcc|`Pf|VJ>p^Z-GYT-5sfwL=SijB&y>DLd^(M%_cSID zx-t|cQw?^8T6G^;Fn?qPm|i?sOHgk}e<7)kXL`s`oFCpnki|sEas@*T$}0mIFsKC( zOk}r#Nnnfg<6v=;xFSR9w#@%qrh+p_VHCa6_H|Lz?y)*Jzjn}`0g#MB%$#4Pr(f^9 z_kNAD;cS|tbwzo2A@pb|q~cU>L$EjOE~IYguHb*?vG44z;)VsJ(i&`=)wFFE$V!Y) z05egAF{z|K2t4%DIRD~LfE4hU0D@Bmw<@6t#=?o>SJpk`XTJ~ADO(P2v9jM&7!#^w z53Kx+>b=K;-w;D8^OGzvlFQeuFeq^NET_VtTZ5|+O3hxnuNksA77=&$Xe z&?@~CL4&^ko`M4mDxo*6JQRuuzNMQ^+kbOqGq!OqJy^h8wSPuviR)~R)t8e@<+dQG z%`0I10<@~^V`1B`wBeR@a{foQ*$ANEh7 z5dSAPQLCa2^7xy~zFI34MKSXk)2AuQA6|FC*b!a_4nVE1;1GSY@%4nIO$jnlIz{kA zkj}W!;4qwDxMRQfYUV z)pRGeuY2Xy`1}atX_-Gr7Tc3#;RBPCF2zG#^$l*qxz)jjXDSMHhJ-0(0t>p#t(yp4 zOV9B4#*uHbqYR)W?C!i{(V?-Sy8gpX+&vZ16Xb?HmzZlxM@HBo4I#gLEAcdT@Qt@Z zZBN-u0lOc1WvW`B(m=JsdN-opYAoo9s!NJ4hNtKgIv=57RwnUrc63z$NTMwyFez7G z$!+YO^{3l+_>$$$D=;U33#V9iaV~_NK6*dkinzVKQz6Mb>u4QXyI9#%*CWK@_&?T9 z0OJX2O4#g9KyoQ6FERk~Z*#%)(uaL>#t7R;u^c{+aHWz862X_R!hG?hAZYy0!vSCr z%s>>yq-f}I>J-psIaA;yP4;CtRYf!T+swYA-nrdC_r-QzB|L2-TOIDBB!{kcB6J$S z)b`0dq9fl4hESB75poN}FutT@GPHH2|0$5Cx@CY6pl5};BR-fm8tH(JNJ8K}Y`^7( ztP6|_WGcHRK459vt@W+J2k8;zr{$a(5$)*<>OJE-e`BpRAZO|9*~DE8!%!Z6)p};y!x3tknWUmBfBI0x z!5^H&D>9W~O1;e!Ie1b9g6Q;FR@qn(n>p`C$*mv&*?yJWP7B>S zOxS7T>3%7m^U6_o%Xi{E2O4sgNFuZKpwVK)v))~Mq+fU{cZgQ5j22L=sk=KfnqeRZ z^$D_VLFRap5C?&{zQDPdnE08}Vt75f&uBmWRXguHn!R!RDgn}X?>}Two#&ykRhygr zM_NJ#!PAlxAePS3N^g7?@7ym8pze2Ao89*tF`7@|Rl76_N~&N613of}=tKiquS(wm zU1+_ zeq*Urm;{qRAg6Tg%DqiPY%DvaMt95E)|rsisuU2X4$25iMfZ-FS8{g6x0 z#*!JBpg0aH86{SAj$V`KoS0)UY;!_w%GhUtkULP^De+q>LFdf+(7#L#!d2|NjERJ%66 zVV>O2OV? zO2srw?L7%hZLQ8-8GxX88C@YM_!LRHNS`w)1JYQW*uV6&-8eQK8WnNvCTv1CVq^ai zVsSO%saaUWc)G~pK@<%L4(?JBTJ%D@5?#V?EDd8Dl$)8rnA(rhupj$5Ff9bv zJD^R+vcn25*vp_yI058eBe?_oI!;}weLi6AS@W4@1=u!9z~R@Vrt14u*Vv_G>XS<% z9mSs7K^moE=Y_K$=^cj3hSs?OnAb)2WBq!V-%I&dY9-`!nbu5@K9XwlBw%Qbmu_IZ zBilYeyAW|`bND2Be^Y}8tBg_w%Pk%`G!mRq~N4U_rcl7EctC#BQ*7DFR~-qYV{xzG*W z(s|ADn1=%g+9!5r8@P*K*DWz!QcIw|iPB%%`G6ZxheELR>8iCRi*zpW7k@)fu_3O5 z1K+zB%>Q|A(iNfD=GD^udWSC@FRSX=CFaOIqZN-VYAqI@pT<7~IW~N|onJrjRlcad z=pBe1dKI11fkk;^PMl9e=u71zq6sI3i-pB)FU8M8Bm|e3H1_MwLxF=03ra=^n;@gI zFgZ;T`$j?h_v*^4ucWci?yXEe*>^^FC3I|iy^QZHXeAS0bNSW{HaILlIa25hPMk*m z=>pgcyG`1PWl54)?m(WTHyoq@Zq?n>5sY`d z2g`_?Xx{0^{m$I}%p!w zMUd#nWkswuK8+o1&wsT;e5YM#0yBnpLo1}x>S+%AC80Y2wtS{k@Ezkr_a0@3@y{b_w!ivV=K;(t14{E>W;&%H0<82g5F{)^WO~66RzEwrm}4yq4Rc2Kt&-HH2$IH1mx9eJ^*?Q`Fil5 z7|R6N8?u(u^vQ2G>{=M!3VAodLSd4*dF>odlZx)ci0l?7~o-gl$y=(H%1jt zq>xU@k^_Ix7Gs}ZlPvZ(^h~Nw4lE2Q`C%*Lbg|XpTfus@zu1;Sl%e`LeNM*LCkr=w zrhKFab3#dP=uyr^NRZA3;~(x@eHTc7&JG9$N1&C`BfjZ1(U0^<7M(s7T7eS-Baa-W zEjjBcl5VTAXy$UsM+;WES`KhL6g7`lDcAab6kb_TdMf-G;<1h*_|F6z$N3?;dY7Hv-&Nc=6uee&{vA zZ8cnhYOdt=rcxp_Nkd^Fp!i$L1d%=~=oTQvNh6^iU{@39=HVss3hVEwXKE}3D7^5=AV2H74vzOroSf+V8mACJ2_`+~q|Tr=7u zN+WIPP)LO@pPKr{+_8Gaadc_yM!{DIVu-z6SRJ~*DAVt)@*c>AjfpD547tE~md>bL zLf(2Kbu)DWopN)wqWCIga2YQU@IU`Hjeucgt#f2QaNBiAVL4y>c8Lr|qK~ZOwYKYi z&I)&?IFq#u?AXNWD`39sVaz6IXcIz)zy*vPrtol{WpmDBJ{evUy+z=RLn`1QiubMl=c6RsY0C2H`u!(N1*~+bPxiOz?G5!1!VSlr+?k6px^6WYB zW=&=PU}w2aH=?cS{b83t*McC4-sIS4`2A!v%xW+wH{^5WZxQY6bbW8EKCL+Qw)p$I zw}n9^ccxq~Od_3W`OWUC-7n}1YS}l*{tSn8$uG@k|6`<*+Uj0e`t@X0-xea+p&fB(PPH3=Kyh>ZGfBP}0Y){OEINXDRs;u|ffU7_*5e zUlut^u**e={9)w)?^j2ORD6IKV$8`@JcBeV`Z<(G3|Kty2tbq&K!KT(P|bqB!UZK> zID-DiLTSFZ-O2Jth!}hZ8KLk3{WfU!bR=X^QpF^&zS1@Hr#VI{27pzPr0u^e8!YTy ztpvXnFd2S@!8&2KU$cY0xEpx7l;-@^09%RLG(&n&MvlYJK|0VsG7+3R=R&tC#ndU> zpb1xvl%=G*CXa%8HqP<&%%J`<6x74N6J0xvpJ@?5Gp^nR7o&H)I%k>Spx~DD7{4zj zK4plNwQQIbp{q23usrv#h12LRpzT7zC$v`d_t zi+7Cgo~6i!PKz?d!%ykQ8`+ax>(Dyi09B`$=Va8EU&O zrElACOlnmS-yOe(GS6bkZIK&^ zKop)ad-O?~&^q0SE4|6G<@)fdHdh+!;t_V_=q9S+FGOP?h=EV-$)sTd5Bw{@XMomF zGUPrj|nWV<r7v_XvH#hlzg~5`66IyU)xvq2@yQPr*gI5>GL8V?ydV%Y zkee5{OsX-~8p3|>=rz_Kv3FoB#TQSc-{AuRojJ55@i9zJh|3n@GTV1$R;i zpy7>r)motS_-`LY$6x3*++8hL{Z|uUF3xoMQkly2^0eY>n+|{I`vh~IohZIDo zfG2`5QLUhgqu@=DEUG0{n?ou*4Vj1%=w=?MzT{D(b-X=_*b6<7A&+AL?_@IGPD)jr zp%T*Q4YtONNgs1l<7IUlc=-+tWYO215R}>&&*5+3Kc98|s}4q+eON2QvCWg(OUd3& zZ8ElS;1NeSE_5@9m1P7rvg6WGjKOPSK(H9c<0PQZRrrMF;*c5{-GlK9`~Ld^g?p7^ z+Ul^LX%>si`3=`y-u|N*?7O}d9T325*M?N9>~`%M7qrzVCm|W4E2!b0i`V5iKxOd( zz3={3fct|>rzSLfjaV2c%#XQpmGiyim1L{u)WK}!;QU8I@2S7i+PCDzN%nn-9r(o{ zj|@Fjqw-LE0*_TdJfas5MECM(%V=<4v|sGWIezg4n8U(}7=_PQr_tb=7KcQcE)Y-E z_1tU^zy(kIj%W(96-)v{w-FGzMU9n`V<>pk#CWFzG{h@(3Ba5%FN9e&Bbf_A zS*-Eev<4J|Wi-zdO(hR8?s}V#*F7JVwDJBarj_+JET3~wVGrp2KPA;e$zDbksFE9T zsSe&DSDTD5Zpb^s15j$F(lFPb9d|1_8SBCxrt_sjPQa=R2t`0yi%o(`>HXQ4#=LL8 zY*v!o`N(-U+d>_#2oek~hK7oW{ui)=>>X5V*wX@hW05RXNhiuV$RxWXyQ2%_hxQ$}I8i<;ow5X2 zl?3@DV5-5YLRK!X%|@fSk(WE>8pjL6F+MVql;D<(-6!>PKp=W4|CB(Sg2j$vWq3{N zP#qMZjie;6X)N{~b`*5%3N0a_!+yX9NJL{}TR0F}?Sc#(p(}x1kmh?Q)2%BGK@K=! zy}jpV+g<{lo(kwCYV8Fw1(1qA(9yeVhv7wPOBs(TU=*0i3Ad~Z!d^o_?}WB0z6NB4 z9LW!ecSLH?8bB2@Y#w8@=YjEpyOQ{q3IUYW5apO7n}DK;qh^i+!-n*j+G{>RB4veL z;~q*H;cVe=q1+;gzXGq)ISXZ1MR`&m;^QBX)7Mn0(=+O zqfjRg6tN)M0c9ZIwv`A5w@<>Ub#wt2P+^%hs&Sm=z?~Fa%>at^9(igI?J*mJ zZxhALZk~P+xKn;L@Hc7&bElM{z|hV!mvRR%y;}jAVp*D8oC~1*1ePa z8e;(ocARC%Fd_MJ3JB zoz&3OO}f1H0a(TtyRLP!jSQWS1|O2Y?pi@HSA?hHpn)pP0zyoshsH?qClo^4Dr{Nb zL4o1I`}?B5(N6l61MW%g-b?4f1a!{A!6S|LtO2gfyhyz}OsjN^4NV(w-KWuY17#dL z?r}pZEQzVu61OH)$YH5S7wCbCnlB)DWRzVbxHQofBhfBx?|7lxP{}Ze5 zGP;Ua5YOPUe#|e8!UBls-dz}O(W@HrjG;uJJR5sUl7DtZ}H`UAKncHAmdc07-)jfeOqEME&SI zIyGuml!r%OG@@@(Vc=;n1CG}?y(TqS;B+~^qD&Zx)l~=zEr3~Cr@oL#JF^J(4v=%G z{fsPyK5}9)$X61?xV;P^!5WxUFsxEA0Ed`yA$Ys|ZRgd=e#VvwcIIYzi3@ps_p{!7 zbIW&ly)i9!fwf-Em9i-fTyd01RC`e6R;IYh(JqlM%Z(a);~FVSX-;dOBH?07KX2>K zxMZJ`yXe_1yDl5j^}X_Wyb8=N!hj1txUSe1l60X0LM1j;fGFZ+jk%{;*Re-6t>EOTUT4jE|G^4WOM|f_ zNmpS=I7ouPr@tcbqCdzM{A_Cf61hvbT*Un*1YxP(DVd})HbCHk8HaJ!8HTR0GZmOOpm*D6jTKQnk!NPtyvj| zen(oqdPlm-lz1sil)3_yJdrg zw#Ck~JZ#!*0m2F_IhgdzS&D$gzSWZOGYuL$?#f2(ukgymQ7wd^2&BhVf#)&OdaRln z_go#|9TTn@1%$>K0v~iUZB#Blj!KO?>wCW}Q^&;-RPu+@;}GZQ?}N7tC>8F@HX{zw z_)epkiOPARbLkoZrm`4OPN8}?$D5Eq^dI1~|MESY302qu?#^j}Er%XBg9FF#)TbV! zE>z+Gzqpx#=^h-OSo_tIdRqWYqLx#eq^yvVhQ%LaaSVre1Y4zmRU~^H*3M?~q)y4O z3R*D9oaHt5ZPzsrH1|C&B60J6%t0$JeLEGT>I8zhfy#yEMuIDr6o>(^h9e_n3Ai0| z1b1ZS(SbCAEAMv;<1qUc%1nhts8r#Ntl<>fqgZe3;3q(lBwSH$MLg59hY6%Ki zrUOkz;W^F7AAk1|uo-Q@c~G5gJSv2sCM~>0-@D(^eDbf4C|)r7d%#Bm92NK=Yobqr z*@q7Rg-ca^67}9gc0P_DsMdkR25e8b**GGG9dlpGg6ML?$Ef74*fpR?T>7@Vt)eJ9 z!n+&NM**P5-6Ne!5_q9dF#yCEgjN^FrDzDtQ!O>PsFA3ifKVc-B;}-2ChlFMC<_Q& zeJXk8W1z5kDE5t$nmwshI+$+GSW*wGg^d;Pn&(LMAO}-N0-QAJe&cirtuET}C8iUo z;I+c&2GzZd`Z2goFH1t{O+f$1<54xvP=1dCT74a0Q9@ieIj{{f-fh$82C`y13caRx zu$~9D5k4*elk8J!GK37E&V_f&4}6nJr*o9I$;~2tNYcMefzc}fhCv}cq0M$U5(w2$ znNX>f%_QarK(d8Mr#~0fQc&=b#;x>Jpn4z~)qRh$8nB%GJe75EpOKvuxaNRAA~2_YIB|`=6BVHW!#IFEcEXjM zVg~k&u;`)B{;!Ipb9fbp&y+Sr0)MVh38gkFR89rG0h>X;TP?OV;#+kbBfSCp8nY8f z>TAex>=6k_+~mTIA0W|6(qkYBy*nM5!I0mKMjW5123Qbc`$FWHDoc<+1^W?MVFwuS zi8tIhzzsqC!{6nkGQNv1j{tyKGQ#Pg0*9Cn#79X4`f}hkgTG4#XJiH`8cqS}*lK43 zE&_u3fIH%U-lrZ!HQG3{X>{(NkpsMs6s#BQkH!rE#W@;``B35@{4%bNRQ}6o=pNV8 z?}9vP=r522BMfrIn@!TC5Rk??0?_#x5#IxsdPH>yg&Sls`u63e`heM$Q9Nc_uA0E)dmi1iHeBF zUWAGs!FoaL53bFq8KYsn_m6b04_Im)N36Aq*Dt4@-(ezwguQ%)=O+J0tO0>1V~nMfWHYIdBV-mxYZr4!uzXF z#9?Jz<09=YgD;u0=||KDRYKMd@x zvGRS)b9R4*Kcr5&-QhU%c_MAwvcmE>!4tdFOK!AjACH}BF%&pwM8LGXLS;u6N(dfB zLPLAp5do%npRizmk+8<)40?h@_&8-iA4>uM;W;7-I1Bz|oI>DCx+;w8{ZsxQUgs$~ zfT9%D0b=Ol;k+^VNDApXzuAmHzEZVWzkasvf-iQT8JR6i{8;&rsK{nj&wsPek2Fp0 zm+6x_k$#~hlhI#*wUwMo0)$YVKEMh9El3Xnzfkx-`Hp=JSxq_;94tUWejIPA7v-IQ zg%c)7u|E;?s?)p!nm)|3qd_mUTFArtfDqTb_upxRNX%{Y(ly@VDB7jIh!O_Dt^5fY zg|*SiW1dLsv@?Y#-V(NU#hdy!(JUpRJaI))$z&+rvr69~)~<$6fv?Gp0)OHIj>U&4 zSn!wZgtR@f(}Co1Uj#kT*y$fMs7M;F{p%wh;l>NM-$j&=_Z>hdYrpN31Y~zU1tTk( z;Mv)T0228;f`T(Cjs0npy`)iWuE_30BW$JjuMcfnT3BunB#dL2N|M~2crX)Y7#BT2R}G&!$tuSkgKZPn^7f8q-niVK!_++0zkt@Q4fNbQR=$W=c){B zQCzz97bUe_TdU%aa$c4Ei)R*l^_90oJQV1)_rm&GdVDo$n3&~q_T$pqFgB~82&^wQ z2MAXJ4Dko4G~U?483WfbAL3NadunZoOj%W~;dsHM3lPAEdGo)1A7P5`P6{3yG>Cnx ztb;}m_f{b~*ctDVt9-7gtjRC$LH#YGRofU>CIrhQE_vmv`j}cx z*4S6(gXO_s8BqnpfS_y;rcl43L(0a`fuR7HU&}{y_{XgTXMzTfK8{;xmd55U@03jt zLzWBzIyAY9*r?>QRHqS(Q8W!AU#hHk8x>38FW$Wi-uF1B`fnHyKJ&ApEZ9^|`0{O6 z9nHoVqEm1ZeTg4En7?rGJNoDg zNXMa%!8f7w6C-%>mAza77iB1TLQ965jLAcFL9u1yIz(+vI$>1o+nvox*s$v`i@%}mfs1e5K z=**Ld_1|`jL)Q5l87{`oyr2$t-@Tf~0SMG5>SnZp72xlUuiA%D8bo{e{;Cb`G5h(K zY&8C}I8=i))&EY)AUoLxoIcnT8iKV{tfn~|Y?%z|=J5aEwHFAK8|x&hZV>2n97_Ko zEJ*Xoao367By9y&fbLs?Pn!i3koUvSNT<5$c0u`2mneVgodU z=;;6UKpcQL1ULZ58wdhVSqCu&H-iH3FqA~&$03XF9q^AV^if%>rYn`*udWUl{pWiE z8NCl#y3pgw5;`B;5Vy^%s6IZ{C01ceD)}?|dp8hT)ekKk>XHw6TLB1{LrEmjX17gX6JWOB>v;o#g-8XsO`}!R zPY}wd)-&h{I5B_4LMn_DNEWpnQ4t~%I%9D2X4qmcU~U9@y(5@0W{iqX#wgXl48x)? z3(P^f>%VPptHA8O9wm%F+0g0Ph>6kNjkA`=y+&mJf90S5{C!xTu@?Zlq7J+vnT+c@ zwY80B@u|8?pfW<(YdK2Z>;s7em1Wcq=z1i(M|TsKd@_6^KPV2UFHo`|85zdt|C0(l zI1}4Eggg*x;E#cK2>RQw zh=at<}hC1Rknx1nvw#m{D^VM|o$E z8j;W*JsYH)vY>|vP_qe$X;?Hod^!HiqxA9KFRI>GlLgov75`!R-u-Vh4Hw;R6fzv( zVF`Puq00Zc8uTa&2Gd3H=R^V|{GYE5k5Rm*r!;GXC<(a?<0KnSOlWm+#7y(izc{Ko zN~qP126e$XQe}9m5>Qk;s)E5s-{Fs@xln+^l`10`#fy-hS4$=~h;btog2$7qqmFoW ztGZ(V$PGH)*_tdt+3|Rhv$!f6r5+EX$n$ZqZNt;nc`v>2Ev7kr0jp6d@VHL&5>c z5KNUE2c9C_H+X;VnbxC=@%}%?yjJlTya7lV$O1%}NHD2r2|f8MTKyoG!B#+VDOzFy zVVEM5fmTBHAgrHC*5ivgW(!oQ)ky2e;9s3jgNz-aM@5n^o;y7OqJf`98DJ{hU!qMF z{BoyV`>8J_0Hviy7enXG6oLaKRiS6Qi$O4{C@X~ao5PVDvS^}H07jbOtpn$p!gcZY zHH67S-ogEQ|B=ENfTraGoGpgSIyXBS@d%a1AImRu8FgOMCU>+2R~uUEl8Cydl1&@1 z+;lBInQA~FTkt?S-QjyGfJE6mQm-z-y{19{`$6YA;r3M3>!|7`b>5P64YBO;C1Hsiw5=86? zV88%L^y5A>E~`M`@SuKKt&B#slIDr3)*okAA)O9M1Sq(M3$7xFok4oPnyp8Rhg8PL z7%?v!qc6w#f0`HC%4E(4^H4hf=L{<0F3s^+Whzh0rA6W;+4ERXA{I@WrDf?SNDiw3 zdBvyMz>>-ri{-Ov1VWNOZMliqO%zS;&kVMp7;2gEs$!@mV9XrACWt0hq%f0No)$k! zw11(VHzE_Z=}R1A&U%vW>;~`cD6#+Oy-8tV(6e<@j4+6Ir0;n3KlP4KcMNc5E)_ax z@0n1o{JA!c7=hIsoh{PAjC=u|1x_VYsaG_SY zczRDWUhpNru@(Tpl!7>wD@egxIe?ZyZX^06kT!0*P(WSdb-F5@qA@zgXmVs{A+7=G zJx&MUtcXN9E`J$v)qCm} zm)|O6XN-djOo98YpixGnv-mfC0{wYJU5`8jv_elx{8QuYTJ#eVl^r~K3NpGB46VaE z!%L&8G)HxO8pumM-*Nwz-XYzSmW~9Z#-`^m^yozF=P{ zHRUt#(x(XvEkr#Q9S7d{k~Uwvbph~Z_vC}3v2Xiln0;B!xs74(%JU_5uP#`}%oTCl zv1+>Rv;)g#-db4x-u2L7ucwa7H5gazoN(EE<71N;ov50!73I7)&YOmQms<|JM*LCt z4Pgj9-E3!l(dx9I>DeWtEm~IT;}ZQ<-tBr^Tyc-UZ$Rq6*CuJ z2cB&$GE)v%4NnPl1EzOXP33Eo6)pff1CT1>A$S}J(M1O?L-7P%NdW3CL6&`jD{fE? zj+}?UStlq6*78{fn$1xFpYIRMa9i3Y0LLLO1BVzOjYdt-fW|yS2cE^E>LN21umy#* z@Pw3rx}OGIz@{uH?K=RoGO#oOme0Tv0RBiyPIM8Sy6>=viCxdbew%o}As!5rE z_F-Vq@Fi@z1XA$=?<#0u^!5PGeu7Tx!_)zFH9`Vp1h^al4)SA<_e$P5i$Gx6Cu;7o*Yi*2q44u-ZGPT-)Z%_ecBPQd(*D5yV{6ynU3KUA ztu@QNQU$E%U1c_Yl2A7__rUAW8yjr6oR#%F9xced@nmwCgyn0$m?z@rY}xFOO=jx~ zS*$!UN^jbk-5ZK(t^PB#h6v903M_x&fsa@lRHn)Tu-&Sm@&8p;k!+2}q z&vuzByQgiF;aqBKx$Mv3jrLv=GU*aa(~OjTmd{Ex?DW00<&~k2uaLTz`%ACAn(|AT zlzWt#XqaK$xE6!Fu8r!ED`tsxoHt!;>2QR~g zqQZCaE-2EPGqrk!sh>;EMU{}qkgtnPmdJcroLv>NaHD4CD~{KG3}Mq0dYWIl9qyWN z@m4qMiLz~(kLxtA1aF9Ooc9HIBBLU3g9ZJgQzFr{rKEvGu`5J~ib5z76&j@2Q7RGbB1Dox z(I!fUM3hLP%tOCpo!52O9rk{n=l8vTe6MZjJlAlnV;yUq*Y^Zjm>K>0OP^l^0>Q7w zQw?Vb1Y-Eljsmgv_~nzchKxWkKwxY*apvlFbr&84sf;c;dP8rq)QN)%f3NASIbdVQ zIH_eC()rREzU9NxWg`=kPp5yg^gf!=O}nJvkC(UnZn=I6FE#sWoSHB&si7t~%NKGc1Rt+-}-YYG#}f{q5p|xBbVj`*JAawrl%w z6E!TobiAb-PS__11Uc_z$BK8)f3R|*A)KpwF!mxT8SWFh=Lf+$#})CQHBWgoFF z@x&z?VCRjyYPGM>OOC?)P!FAu~qfn<&^JP z$aisQ;jQe}e8WZ-M`K|eDLr44txJ~T#FM<^NdE~WEz!C}0#3ZGDwz9eR;o&6$rG)W zOXF4B4ZG|N4Et_@ldJDL^lCB63#pCcolN0QBoJ2Ce+utE{#)2gWWfYks6ZBESW;gN z_m=J#cIL%~KuLk%&fR$~pH$$;#MA^gTfKp|T~7ma<_J&JnHMM5efcnb25iuNsI8?w&Kc?bK$Ty@&TsYN992#)ReMX3e( z@jz5-Bu)|*uULQPMJFkR%EqT!8OWx;-xIA)0>Qar;Ki~2==dPveaCLzot;wq z8G%Qzk1q2XD<#$boS}xLdhfB~JC{cx{SRG{yy2qByNEa)bJcM`n#>HvKKN#tq&hY! zPq@mW3NSU~+;{Mk7DA_c{Lc((Xy6WC{Qcod0PuP4@Yy(g0e86mSyr50eNmi658kRu zDF_5<85fJ1rUKbJY)vDw&a6{c?C6_dpI}W%nZsawW_xUa(bM|VDvBKif_Ez;qxu=U zB_6ajJqAKa4U}w%S{`M40TEfvWfQ3FxBJYCXqL^gc`n|{srwN4?9FFhI3twd2xTC7 zw>;{T1uBrG!J`~MKePU*o1%e0;5c_nRBh;At4dwH2YQtiDjMXd7CO61A;;@cwFgjM zJD3}B!Lb2dszvO|{QIt25<^_uAC-?+o#&QSbR#?#q%vuHY*P&T`9c&y2Tv~t86oMN zS@iPA?Iyd_eSmB6gSRp8?IV0EcxHUZR>Sh$O_-(p3#Y~pS)g<;cRm=O#EkDMOBam$x4Nr?WEVG&8}k`P?t?#f+)f0+$F zx`RaKsJDXDC7}l~Mf-j>8kW-`IzDuyRKfUTsI^s7%)L|W6Ua4x-PU*)nW%Hz+ir() zMA9L0=Py9yAqt#gR|bgeULJK9H!Vf!mFglw-C82CCF+k(srybRSj(iv+b8VN!e1T$ zFc*Nz1_NJn7d0K6b=BEb59UGz*p$Ltqt@amGC5gxH_04Q#0G+i84PT#J;$LjXO`^P z+PAp6@R{+eorS-#NDdR8kS4RMDgzrIRoUJv(vDAfztUP1`~E8F}Tvdc!c*&y(d~3h*^bdgqm*<{u~-i z0#!OLT!hw7A1^g~K)u)vRThE(zX$oaA+00;eennQ=_Ch`(Kyt~beMV{E&c>+3b*FD z&`+l#h7IBz!RDMnF>gYprP5FT1^63iSlXe|)-I3Ajew1(&d@O%!e<`)J(y6Z5Wag{(K@b@2lfwYrPM7l-&feg#$!?&A`m6|O+083zJ z>U`AK17y=6Ue$quu|wPYW|u3@u4_@q0rm+ER}h6IS01c170TKNliU)&LDtSq-FJ5~ zf*~*BP6pfwCwF$j9T4pcq(|wA)+TdwNRR2F&8{EX?=c8VpmGlC5~#bSh?^?FLc@>_ z9YFen7)&^fdIfzJfC}P>xb}!<^;f8m@vb^wAryR)5EP(qn@Dnx;)in3%A0P$M4+<& zP$U;T<8QZIqy_3?R-m9j;V&j(tCD3h)Co6j1e*B2|Bmp&TQvxR4v3)Nm1huQJ8)DY;;V+EfSWn^?J-DcM+n|q z9iUPu>kti4DU|gN7)Gp8Rgy9+?~#MXyn~~F-V;EnT7+8Vyx>@6AZZDAUQdn=pU(mY zz=o#}o5?26eM zo=pcMxK`A@{+cK@5Qf;px4#bpiitD;3bp-6VAKv2u7#7fSes-XcHh>wU}sTq$cdZ zAYwElMSL|}Ly*7^TyG_Y%{jrjTrscd;4t(gjER3G*;g};p`5py4km5UOnx^xz=#9F z!-3sn_zA);&4c|x-QdDsY%pIDrayxXP#~ud-3af;m#ocp5#zjPvw$^yM95GMIS?5P zH#EF#0Vx9^{vbVKlx#@wham6Ip!{ae-}ZZ2H7fc@^D*m^V=xNCI7TqilRwi*YjotRD0e zJEb{Cfv!&X;c%P^vvb8#j89jf24_ROEooAAG!o?!nuHBg6NcTmoAeTmq5?shOpO9e zG-rO(!9u`p34Sd`o;*>|pv?MtP0IEtO`_R55hD4ZIhAJiXqt#B()*#{ELBi;uw&;mDA>=D0(-#!RLpbRR-9PRY z{-R#=h&b>jj`((vYRoR9ZBjo2zHGb%(NSxm1;W|@k%{P2onuUT~r5fmVEZ9uDVD>)GugK76|5`;kTZSg;OTv8)ME9NFSXJ-@jqF@I_$@1gpD< z_N=AAq#2J%;3uaF{*FZ5soLch$mQ5&_D0NTWcu= z_cjOxhM4D=P4*B4^oHlw@Z4O(au}{0k1IPN05mvc+Zl@KL7}yYayAh=CIDxO@@6m- zb_3MJAaJ}DBhva* z4)Q893KW~CBpRA`2u-L+Xu9yBLHTdOmF@k_E9<>a21<^6ELvBHe_vjQQvB~YpzzTr z0H$Bgvv^{fSgUAoXOnO&z^>?YHYC_V=uT^J73jdxi%Z%$kmm;sb%KVE_Z?nIO66*Z zBI6g>w@t*pRBQ)3Kmnp-H0(g8DSfGAQ}pcjjl>OoMt1~c7@K*JEL^g+b8q` zr3xC>vL=xxA(HOD0!z(a{>jPWRBU4%Ex{ERpBd{SAQvIE1oamlyv2;ON|Z`_q7yst zop?7A=8nYq0Jr_1HykO;i%*@I1DB?Vx}?avMADhaSspI+jaPjzNyBmnHi?2$GG1la z1lDWLyBM3HI*z)il%ADE84s^Z*a_TLCJB6)62|nDmgx*J}Wl)V%5U7EvqsF0INc8r^R!49o|HmNE zeunWY))&;pj^F|xI8?!5U63~5M~5B=TN$EDtt{;bR5fU|2ateXB5<5oe^eFKx=kpq z@yJ0`_wo?Gcfc_3I2%Z~_sm!wlQr3z6WxLyZto!s0;vgFGf)sta6zy}qM`{sI$398 z!QOhpZIv4~>BQbNg%h+1J zR?S_SZJ@RVT)x!zhb^=U{y^`gLc%86S+vf zfDKsvx666VMG|nrTaKd}wg?3e{hH@OF#hz%Au8#0YMb!KKehRBY$98|DRdTy68A8i~OT&x`H-w=Cnz^#N9YSe9_; zhNbuVj{QT5+w%yh8{>T-6G9#?vmkqL2ei@kNXW6z_6V#^F$ETd?Yg2*MYm4>7xt11FZsHparPBD6s3-q z^MsgAT#*TAz()&qFJR9vcc!}~R-hII?lr@;i5A%u3tFVmd(1}wFc5$iS7PRCM{~Fc zVuoQkum$V?vNsA>Vm4oYc1b4M`HQ4!vbhoh0@~D!gL1- zoKsNuKtPB%4=urOKk)MrZ^3b1{OzVB#WHy7YR<1uYjSu)&VI734}5~v`l2!-EdiO#qD)4&WHMnlC{HIN zdKH={6>B|23{US5ifE|8mm0hzB^*rzj=B+!ba)(rBh*1zuC6*S2XPJJKEy38yA_2f zehm~uVhm$D$D9oyD^p^b8LiEqV%_PsEA5|I9bm7=6E11@b~#m|O3nW^>c9>i$$uqZ z2M__N*vUvBLHqdJYI9tJj!ZjLbh0OmaWUFb@u#5py{JoVLvJMcCV$)27HR4^GvyCG z9-JL(F2<9U%tV0A#UU4V%e@WjM z-J~ygK*mCcjJUa>l6Qz&e~UxBp2wXHu71KYF(lM@yu7G4zR0>isVHp$IJK%hirweS` zK%dMlwMzK|lx;gLH34&c20gm%ZB=_5N#oNXB57`(_n41=O(b(Fj&lwRVhz+KKC8Nm z2D@iQ${)Z1J^C{o9KR912je@(y7;aKtV_TKsP1DIT8fo`U`h3G?XhSCNaq(Y&;M|O z75mr!bOIL2oDCSo{>g?JtQ4^vL4*C#hL#iHg*C#vn96g(eqs1S)Yv2*4V6E8rt#m?m*csGu-lPO`Iz& zyr<)uq^@;#OT~?FLhQRHa948*@z66n8KNzL*h?%Oy3uEH7Dfl9*+r@vUj%Q$Rdp$ z%IgM*uToTIq1YZMyKp9jV()!~qFdi>6gdJc*k_FZIR1#>MA!4m!8qwyv2|8l|%I^mcXVCBtV z!R8%|V*By-0(gy?;7;h-wYglwD z*;c!#XwEAH;_c47F=38{C@{-Z*@f_*p{hS$V5KOq13;Db=dBEwJ(XXNG9K=;B~Z4^ znt-{DY81dUl6Ie+7d$^%r)o%omWLcd_c8~7Ja#fbY46pXZZOKlWo~F*cjXJ$Hz@a0 z6?b_hsaf^L_1bW4pF?)nWy>Z>znHpVC^BS%>bS&LhdlhLM|CWKJL{=B>eRzdfen6} z`gmEGV`f&M>^m!p#4swbpGM-{5&(1qBH<2v{!NisJ6ZZYh@!o*zL|8<$X=Qk&m{p> zbvjE%FSPG&d_+n7ou!m^x5KT$d}qxa(QV1#4zEht5tF_@!jeKE!Ph7ZOhhy-TBBK) zikNf+UEQDAxgwQCbF*@NXh*$H4BLyS33)G#Ihx)jCVmciNiVGB}dYHvIoZG!ja@39U0`bd1wlSb4_bF)q%}~ z(qK#E@&I5!;k=VwAx&1jDq-Q=B7&@Yu;OzPfiw0=3pme9YgSpHO1JSN^5g)03m zL91aLRcX1d^0FPuceomJpCqMSH;X}Qa|^V-PRk3Pl*FNP5Pl^4Hiu3!>oY^=!<~6! z!yI|8NHr`<>+#(YsX*)~-U=gx@72iN>({~xLs+5mqZRPmJ%b!sM;!#Ti4I~7DwPuf zxEE^U900CpDZ4tH0lY5*_z(`@Xjz$zHqY0z#6lgk8PXOPEU93yvy3)*TyTYYZ(3eg zA495<^N(7Nc!47}aLg-&R?88S7%t>Rtq4MyogzoRrL=@IRYtQ)jOR^?1!aU{VUJjJ zW3lj>`nX>UQ-EVnIsoqv}shg83tI9VrhAs4T>Wvi~<#b~R=RGk5Fw7|> zpUqprwLehyvo%M)M4diypw2l<#i==ZQ70=p$JJ^lR_=3#q0lyyEB#uzvtV-3o#P?_ z{fs_DRT7cZ)%_@gcp$4)^r4-~k&6{^L@YaE1=rPejME+h8K$n+q_pdKgc1K`7jp>n zM2oMcmYBXn7?Lg^;TMyO(jGDj-AL|qH9n#u{%$U8NkL{%8!R*bWQ$bT>Hze_IY=(& z&axI;$eok__eVTLMD~3b5fLlGC?eLxUYg3sqm}P$1OZM=l`YWadBc=zCo$J&b{<*A zir$cMD5uvbFJsLKr7ws>A^A+D-S?p@{_d8q#`jg9EnI3BqOUG~OYNk8C>1QP*UGj< z&NH_n_WFgBeAeL|?Vx=~ul3|bN(I7trpzu^7CdOjKx)KjgVSquZvoPDCY99Nba$b| zU;z8iRur?O9&2d^)EDD{3zP=yi`f@BTxdsM&B+wOg^WUw(Nqo>y+~u!CAPpkb4Xj^ z`Cm99!Q`)Hiz9YDISNG%_V+A;_hlJeumLvRTO_8_Qda*n`dr1~psA zw?#67gy*pzi~tOa<_vM#oXr&5_?0cxMS3f~G`Iw%&3allgy49E^mVtv9qv6R+8L)9 zw6~zLUkCydWt$oU=@gLnHo&U+Ajz$VZKdb`c7!vVs*2hmlh=G~B+5i_&PPYQ z`yFdaP_u9u))V5DVY_uCqB8dvQA&M9B4IC5qlg4Bfo7J!n^2Y6p-n9R=>)FIJ20ZK zpX#K8Vcj|}xP%0>dj!u`1KJ2^s*cP9-^X(N0IH7bn^<+|21OoE9X9t{zSp-;FZ@>9 zQyVDY6Vu5!EpBV%vI(4I>UIbd^RECFRqL6SYHi0@hw~^K6I!b8$2Yp;KD{{B{3za5 z+dokDAyfE~tp-Axbvq=IY~H{`2)b*mwX_(BDKq_RP~_|ObI*O*?n4F&2)%mo94o3% z-5?Z}P=Sn)7K7V-ZrkOSw-mrw)?~8Sn6;{-l=hz6(Z>fkon}&Q z6r-7xbSBNDrpTEczawZt^Q!JbkAdcWwt2zF&iQ`qTd0h|{a{Pact=kD$vJt0vap-W zLaUSSnczkLBt(}_E)x91Dg9=-g+w0T%`v7CBzw7aD=sr{Q#T*V63Ww^L}29kAMMFB z$P2#hdhLUXQ~tKw(aX>@Mn$9VWJ0NBKOg$2uB8uQm}CnO0;q$)_{%Qk$f$j-Eu<81 zF-Vmi9~k*M?pE~ceSuk}!G-8VEk0thY2#yi<21BZBGL9!A1AgAQYN%*3R*r!)sO^$ zD;2Ha7{HYw1ee_eX^9pdmlzNf7ls8(c|(D%-)XoW+cBpxFz()y`Aw8>@cKA z(G(qBue`^2LsYqOYZt(%vhGl6*UzqeFZx+zJ28<%Nz&Pwr)4}m{UM4R60~k@E26tG zShev(*wUDWZR3aAfFS2E$5r#BwC5Zt?dlWWg5_-EykH;KYiY-T0HK=6A~oYK`$OEzt)Pt2>@u@N z&-?PU@FDH&gchzF?3AzV`zeO;ZkdpGhbjdj3m|_iDfld$Ne+~?1F?+cCkas1waG<{ zCX_Fn1MRj0laO!=7oK)Yc-q(Bm~p*mYeJyti9vpr97pUBIS;(i`2Y93K~DKb zWZ$c)8`ff6>j0tRJ~1!2=3~?7c?1>`VFZ?t=blNzZvnJFtFdzAI;TSiGCW;k8>0hv zPp)ZTqlhsx*oWxjAZv(j1}M>MS#r9|pe~*x z)9wE6&Wk-*8tf({3Sk^)p}V$ZxlLX)Hqz8bzP|Q`Z6XU;%nABfJN1Hs{=rXS47ES^ z5RF@lpH-JoUhnzUAC{n)0#|2(Ss}Q(+yb+12$U^uto6**fO;Vv$9i^VdtO&FO61O( zg(g8~o#eR)XF-&#-VSjM5&B=8Mf>G5ovD=<9R=pq!Gf<=adHSyhGcyW@lLVnb{NFi zNlH7m21Vuosy+*HV?|asYpPSry1T9q{U*|3MqV&pb1$x{Ds!oX4gHtUF=%2*P{LS~Ve{Wdy0fX!;ZfhvPo)`F%eQQ{>dcCc-yKo%=<%6+l z4(0;lHCFzB87u$k9L!j=ET1K%T?)k<#t z0~ki_2eV;mmU@QE7o6M=i{ybs(jGmfJX2+{L#NjaFe>VQR*{mm}js2}r(JAacMH|_FaO+o6 zd2nm~G1qI@UZV&?5ND$d7yG`V86Jp=!Wx$_fR$vOd~3h`Nurm2=6N(8T!)&`)cp@>!%F~m=kKTH zsu>nRntqs=XYZEp%pRabDiXrW21)#XzGSvx5nwjJsk4P!K5~v%E|E)C;(ENuoX4JT zas5A5Kc>Aeu>kP9WNv7;CID{?j66Oa(PzSTp_FC^K$2364EOmyV8?DnA2-Vjj>F_5 z(tC-p+Jmq#Dlx*sgud<~NQP!n+V@NXxXULR7Tv)<0k2^7p#kZ*lMp=MI|LWHUR!OS z7u*S?U)?t0_}7-0u}NvtBSD+<)#?0Y|IPUl2-C0lY1H|5VFEvq@?@f)R^I17i69EI zkG5WjD582wY4=1q%Ce_IP&rR?^FdeVRT-N9&4E*avg)KHXyb~lTqd62qU6!WAL2o6 zEX8_(46YZR-3%in7%dgwIxSv=OD91Bqs49c3jI`d1V=kdvuAo@HRrP#H_-%P!Mqe zUXQ=1iolCP6oE&zG=h4F;x`)vM+j#kvHk;KFNx%f%Uj+pwWk#N%!Ehbwzz8DbcuIM88 zsVt(63CW_ma$JHpFIwkw;IhaIZcZ)@#uGS&NF#`JFaKcAcU0nA3^w&ak#5)0bla*V zJXl`GZi&sG>HJ$5CS;Cj@eFYOKxr_Fo|1s0F=y?=&L7_bv_0D0ucf4E_A|!^0iF0k z!Y7kkFw@%sLr)h-hKU(2`dE@|1TdAuY*fTo;W2^}PC`*(4B~cZ6=9p%K$xEa3u!tL zJv5b`nF@Z>oWIFddmWy$!I!57vqSNQ_pgU$Dpt2FNIu|cvQWrEGl71*ZeDjD+`*2% zVF-a@0zbZ(xHUA-lJF7UhLI%afovLM{G;lVe9PA(~miNztS`I zw#9)jQqg5dicZyTMVI@?zEx7Y12pjPYuE>r@~|;{wgLENhnE zXYEL!EP+jhgi7a~{D~qt!@%}oEI<~*&&v~~-94E~OcAkCv_y*PWNE~I)Hm$IH0e5v zmYOh;UNzv`$6gZ8ouZU|CrS4=&NWiT zB=)kau%d92_2W3d*E5-wnXOuXuM!aFv4$6`v2fPtZ@XN4pus=5gwqD&!)E1iVF5d) zw%-{Lr>G{Lslc`)7`Sk(3LV>?9lMHuH-!owX@tN$dc%vR-O6`VxJnpE#WP0*SlD~m?^`=OND>)J8-LlVe5ZQqV|8P} z)W>~W&)4S9$E!Q{IO7Ah8X(S{A0H_D9z=tX4;0T7=X$m{$~_<=$nV=F6{@tNI@*#( z7)qa46we$TCgAI`D_SS3h^~A^;8kw#3wkt(-HL4F;^A1y_yZIk6d1ra*LcHXt~zEJ zfM*`*Ad$xpZ~?!LF=@JD(A-d}4&x8q$e7NsnuZLY%^aQ67cJM=QrCPk{k$Gkh%ZJN zh+!(}u#WdSaG0yWT@pk>7h&~Yf&+so${q|*vOfz5Fb*1Ggf}B9@gLvx~45)pt7J%_}SYSg`cfX zd`9x&@Big(S#ugbEqk~VSjR08{%H%9*p8FCW&*CswEF%UeKsC$#uqUD%Dtg3XaiJt z65^PiyT7{zkE>qFlZ$K;OM{nEq3%$u#st)fE&b0(lOe;1{1y}WMU{KC07)M!#D|cJ z8B~Up3)ThsT+nAE+_QY_b_zB_r$9>E8l9yt0CFLdTSjv=upTwwp7QNu7rf%rD+>nZ zS;~)Oxi$k>aZWI;Aq&nDewM;M9pGZsKXg~XQZ!`JTdZ=q7YB6(%2s<+?j38c#OU)1 zh@E3obC+U{aEDavijr*Mv32)2!l92vxTpMHG*CBS2>4%2KM8q*g@M`DgUY^SzI_Ex z5D)S2K;8DYmdy$Ry3Am6Y#kvgilWMqb{R$W8{g8^e5$gX|B7V z;9hm-Ov9tONzE&akC#QvEm=DIpiI@;Wv{0#w2;s})j@kv$V|B}VUw?i=DwRTciqq` zg^i{);fhw%ZY7xZxt9_Ce$BHj??WUM74-8G({NCwRo%gKB%L=QNl&XkNUZD^$b^KW`dr7HBQOQno!h8H`X&IrGA2U?FPr`l; zOAQ0-5x$#Gn`q{Doopzux@6x)>XA9Wt3CV8r-M&ToKo7-#NJYe@ ztg!ciVn!BLrzH&z_OH;>2nj@>DMS;6A^PRJ5Y3xzr8#SY;iw5#<-H7^G_HO%JW)o< z>|kXBITv|a{*O&5I%E=fsQT{^3x z-bdxIhGp<*bL$Z%6CD!l1CA`}ZO^-a@+mqbLqS>P>xZ zmS@GNuh|~x(b4LVgo2?r9JY@-vM6#t;9so|KRT_9S}ui@c)*`v*`0w6CRUmeX3Tsk zg9RPWdPQ0WimA@93eHkd%x#xEt<2!I)_^$G-t&tjbkm;P96jrVIm-z`$gu3V=suCb0(4!&qao^@y%VkrT}h z(o(Tt315T2m;#@IZj3xaBo*TyWnYvtG8`3}>{O&9CDqN+D%0Tha{po(GpXb~6F=x* z=skaqXNT!W@067IjWS_QX^>Ncaee~jTEwXjU6GFb#qvrrP5r^5 zD^vq6mm}ipdxUfd$5j60=#U&MI+Z&A$s*61;qj`(&_$O6Bx^Uk4;f0-;jA8T`2&nC zkdiVxz(M)q%pPJ^g?oKtJ^lg8wUW^runw~O6aFt1kuZ6|?)XhK^&P&P)S zSc312nPN7=tDE)`#oTUImx8CTB%>I(>jtfPHQY$Dp$nbuSbo#5>+VSp5gCB@1EbshEzWO;D$*o(U#{Ro|{95um#{r#ZMX0$PDb;+V z04J4H6zkAo7QxAo_uW7_mR17-F5BSJSFJwkUhAnIB6{bF1Fjzj#i>S0fv2m7*GwBD z&016M4nop2!o@2@I+?chi{2j(XL&{*mzG+E{xt&pf7M5oSbu_8s8nOcyb2}77y~QK z>gYhRr890@WXKiNh`$oIDy&2Xi*5ixBXQsH)3|T!_1!T;11=lE##zJoO89sf;SD)$ z5_OIvh{)r%1((-UU!OO{m{pr5x`EoOeMETeQ3*vpTh{DoX+39+ttQ=GE*GbIR&D0c zINY0mR(0mk2d~#`kd5nW;}lY9Z|r3dC|BUKMbi!rniA#Ja`f$d^~9f#v=L$1l$F6T z3+v@vb{PgNOx`LRE~)5n&pj6Ow_Z|FsXF`OS(L;kt|TfBCQmqQ8&cUApt~vgY5GU~ zM>+>w*Ty;?wL5I2(bSYY+5g4Pl+qEBnvFipOi&K1TMQ134Y>(Lp9Z9hH1 zecfmSzR*YFt;YBCW$e%+W-_#36MHuwJMq7Q*$#WQEr{y4t65>`n(#pv>W49OqXKfOwlYd<8Gb#23Z0)35*kvp?_%(dz_dwLn)9U%4qEQL zCex+J#ZS{4G2PqRWlWR%B=_t9*COER*hb#9>(%yG%Bclc`wLslB*10Hb1i}kwV35< z27za<_!JyGGXJ0+RiP5A!bnu%Etkf;cIoAFP1b|fB3sz{>>rsn^T-?f!xkA$MK$6s za!VY8Y6Cq7E|ip-fU8=X@fHM=N-qzfNdwQ!2#_2d@;)TUreLZU6~q+7I|q^@sUVWB zRR?u&Nk89vEHY=Rm>a|?%7%n;r%h`v)QDGa2uMEU2k)vpI!rh5icuf!ASQLkJr-Iq zvP?8(#4H4ni^+w%wn3%-R-u>RD6jM&N^gs-tkk?M$xivo0aFu4?p5s;dF`816X;jB zwc+h`+18_4KnauN6PAyJId~p@GD^WPcJWAaXceaVHb8v^$3l51h4(mLb^d|yTL8vv zbbkl_{z+%Og71x)a#|a#s>QLYu~?`Xj*=U`_v@u+V2-8IEny2Q}DqF)OSpKvH+FDjIA4*_~YLb!h^YWW#xa zI|YJ+u&@vKpRPnBt{7o=WH`!kL(~x-K_du3+dT)OoBhZ8Oy-J(Ld5k3&8=_+&G{Mz z$b;s#L+;!a(%k%~9L@2apIul07L7^}y)epiAX>}MFN9EH3=RkX;X?50gK~D^=pg}@ z7m75Ft7sh2h-wu@)lxL7#aUs18~AGeJDx~o$3nvAD|3xM0Zn8xNUoPcz~wbS&h0T% z#^?Y!(-d>H(T{aKePK!EmOr zXnJmYX*$Ad_E)+7H5b2S9l?Yv_#PG<$e3{SCWxz!o01PLa$lbhcQiF31*P zHR;|O0#gb>XM~_uj0?e(N`#;o7VMdT_F#?na9C2Y%j#E=`$5;FN*AzNiRvTOZsu-G z@|c`3Q)JHEjkyzrTcRWN$PlG8Yl`70N9c?(k5Sv+2V85T0DUpc6^%(T>XTWu6{Fft z(5}jI!Q@HZ>(5ubiCyZ~TsaBaCK`Wb18sA_Ag~V2a2a5K7tL@H;|6UVDt8b+SPkrg z86{DYCV>PuPIqX`I&2%KT8D8y5*!{vsGG>E)AoeAOw6Wf=FmB_A=}_!Z_lkq7CpQB zwam`!^~+cNV=%xasDa0=P>jC-@jDRpWh`1QGZ?(UE*OKA0>$i-ud1=;RU6HztHK`1 zJ)u%D!1_b@)Y2^V4CQW)_$xv96p87N{kjFsPP2{gW$=CZpXB=@fX)=4BOoYchJ?<$ zw(p{^Gy{kVTUo(hjQXQD1g0HIjxk{J3;&ZHY?)wB!aohG0}A`;Dn9JteAul3cu8y^o3tdbCWC@?|g_!AFI@0ut?iVQ!$7a*OqjuuY1A6c0F8z3z zd>#d#C*rTL(a2h%*|W~$=z_dbO)V|Ynh!Hy^j>rA{%DPDft0UsManh6Mhi?MtQno- zAmEO%dJyLRD3~^yLcbn<2l7lC_Y!r!Y_<2N?sk0WuL@!3MX>w*;kR!WmH~Dfc-RFw z4H>n;bKv|FA)&{k^$IF6;KpWoV);olfka^Gf*EY}_T=SqR0Z;c$NyjlY2Uc#u;cZz zqj{03DE1Rn?39C0%E2SbfkA5y5{Bh(*s=WR0y%_YiDymkzsF1o(e?_Nt{}`v@yy*& zBB(wtBMMl&EvO9!J1Ze+-Xly(LVF*S*f^cx_*N{};VS7vCS9V8S*ke@IjR;LyzV z5_wXu1m`Wu%O_%CGXi0mVE93jvLv0ns&REesr&KVu9q=bhGTg(0yb@434S#kHOqpV3|hbb4zzwY5&CfuN9)Iej^}iJ+^f6E-}gs*r-g*3U)wkTDqFThsW8w2 z5&p^0Q~+?f0_6QFMmE97YE{DTsnHIYNuS~9_y)I^1{ zh_YY|491mDzIq5d!XV$+ztLzX(Ud*qux(KytwNxr^U0V&gpsYlh$}R79+fy@gi8F7 z1eN$+p6Li92Em6dU4h+}>YFtNcoS)j z$2FKBiFjxt9tWs;bP$ghG=Q>rVBvh)6V($!@z{bmbmZ{iYy?wmL`;F!$fw>14iQPm z8Wq~4&S6UqZ)|0BKk6S)0#c!@!Fvp;E5)p~$#%i*u~<&sCul4OG&+t3G$x?q)6U;B z2GF<;Xq;{&Xkb@N9iK*vVr1pwr%N4VbiG3Au~KovN~KzxgKyN03+py`A z$A(c&BrlVYmqidCP1pp+(}WX%;J{ho}Yb5vbWUriX3kV7Y1;ko;5( z#Aa~V*6dMZ02D(1Ke1xQ&pJ)XuMtn^kF}N<0`!gG>WuUhg50?DU^g*lCDLASu+s2|})udP8BzCK>RZMauR2oj}>MB&vA zbKmjDrh5zYXypCD9BeJ+u#GZ82`xjaHd$oEfJ634ON}$M8jWbZ`=~!}?qS0cM`;wja;&-8@&{Lk(pW zx-z)Q1z5(*C`92UpOHJfXaQhE;?19g4c8*wvA;!#R-0p;i-@J;w<{2_U4ab>$ya?) zZwfG>8h^|gMejPa2xUe+hEcS7NH~T^k2QENTJiGRi=k^jy?eg{m^#9(dH4isT)IDc z41`Su){>D20>#krRu@;l4nYrqn*Ti0%J7b^-4X22kkUC8*q+Da)`%}fr@Vo8iA?+! zufr6uX;z~L%pAH9nnE)UwCK`=n4OkM-eZg{O>H?^dBo)`X>A%H*HSC@f#!2VE0+x@ zmLVGM-1PO4M71{Bq=c4Nhz1hqhXf}0p_H$;TH~_qHhv?YerYQba7S8d4ir6QjO;Br53z-*15G&v zztO|0@w0ENfGL@+GT6MEax^p!Qmf|m8UrK4fE_ce=3v(JA~-DsD@CgzSRhIux=v)6SwH{SQrqPcq>j=u4J%dklEH`}^sT)#j1I(qyC zj8c(!DSm3$N)FhZx-ahcipsJb{*<8$K|h9A)gp9%0#9hD3uOrHcnNF*RpocZdfsIQ z@^>9z7f0W%EdkhPUqXJ_~b6VKc(> zU^vxbJ@|12*#&%)PKN$D2ct@>U(=t+rL8$zMfrqL4PdEpFbXe{)5~DvNaPbpByF#G z@fSc0P>yT_#G+xMHy%feZhHV?hSnqOp`SI&Xq9sAYxqqXI(+dC7Ca-k(}RyEzFe0= z=q~t%fwX2u zp9>XxvjcK#WccB5xzl8li$!VRc@?~8R_lZ3RbXHRbb+U7jJl>JbV08J7&+hKnQ`mV z=7TF9fw`&>0?nY~`@0~MYf31S#f`{h;r-F$7RW2bEv}cVMzNK|HNU|F(&iH(QjvNE zHmUR7*L~jDVkH<1@4BB68>4&r6~~um+AJFbQo}lb{YPS&Jxxj@o_bX0$8h+;!E(dS11S&vif5RV< z=pP3fL&(iIR;k@yEQ8NJd@s8n{}Y?T-di9zz-rZx(V`V{+Jr1fBsWcfM!qom|=yl&Nfh= zu%t$h@z)ZI2@_CqH!&92iwJBb5*@QpUTIj-zWF1-E1BS#V^Jt06zc%RI%*h;fYK14 z^aB!x=h0f&CJZMwGaqKcIgnw|WMb$6K<5s0mj&2OH{nZ$-W@3a7?HQ(jkMZ!ljP{@ zC;etzCk1=_)7P3f`}Hug?gLeIIZuW+K!)QW=G)_8V6${`Xj3g=P6fqdV~W2STi zTE#$i5=1%Lc1K3#@;y!lTy#g%?nv)}45+lG_2>tc4-996;u2vN7~L@emFBVm|3RH5 z{3%E~$sz;o5f9q=`kqKWR4!NSg99!vL$NR4jA9SI^J;walQAI4nsQcA?>KG@!J6qc zxM-oIl-1Kkw8_}W1y^ar`L7q~V}z|MgU|j^FBgXv;Xo}0`;|*lP%&*{YXjwE#CE#` zfkco4v1J@AOr)m;VQr0uDpn~(N7hA0j_d^nh{HgjfyGB8K~;>PT1-%V`998XxekEp z=l5a5NB8rqD<2o_tNQIlZ`rskPv6c6EnX8a34Ho3dF0oR!;IWe`xkj*F*(H1t2+qm zbuOxE6at-zATEUU?Vu&38rS6Ftqji&ZEVGl^hY$cozZ_Ru|I449c-n=v(@}=h-_E9 zKeV;=B5a-V3br~GrHQ298dbiBc678DPE2l6nz{H><6N^FU*2_D`ese;GHe9`;6@l4 zj4Qy2UC7odcO)N=PB9A`S3`jH){Rp|Np*&N$Mn$>PuK`;!JffdxeBXQ##Ne$u(1gr zI0o4Xs{7Y~~q`0sRY2)8quzv6VSMu;2~<%U zX#3Udg3C8ROZ58a_+9)WkAgJ2D(2QA;dn;Rh3=YeL{)E#G`%JM#H>UzO9^I;!GDmPXfexW0?g8dRwc9M z<13nW!ooVD=@C3^vwXdXK-O0>XWT-rBDPVB*LJjO=18r3>=^I=e+61XB#C&}Tg*UlEN>uv*O6;v22o-_+ypCQOY*KK4bZ0Cr*{c35KFm~QrQlui z)H1TdDKuame2=F?E4I} zP04T9v*NnukE;2!_p$#g%d^g|;PNf>41E6D z&59ljC6ce!etT2Cbcpa*>Y8Q#Y+)RALN-|gzO{4*h#YiME6jxtgmNRC`I6U^Fw@mLY z>>V_{&>rtQ40i67yr&<%+Mu5lJwqR+;iLo`l#)=34E@G6*yaSw_J>_o|BGD>WY^hR zme3&#gP*YCDLXR%Nmr1yAyNgE&A>+@jM_zlCE~RM^Eb*EFLv{=w3pP~ZTAlsO2|Ho+u$J9;l@5k`)n=0(`rWRy3DaK^z420_XWOhrd=^O zHQx5LnpeZo7>RoQ+??4XECvO8M1)Q_?6BZbeuRttr#G8b^{>qTGfG(|yy^YN{arV1 znz?nIabn4>WeySV4wn4!{M6Cu5?6fJ_q<-?lBD!?z_Pbd=WM1PT2#FwUHf$U&S_F( zYr7ccq?`7U-81Q){=I`&)8ux|kvTo3*Uj1y@|r`oJQH__pL?*sWV-`ygDy__)b8qj z>C>BUO5E(;Til?`!rSOmy8$bH?Y&WMt5(+;NtWxnX>Xgn`QfEM+E30ZD{>0eIP%I| z%RS-Pp5J#3dT?UBR@a_0)IJT|XJ%Qj(!uuf!IBZDEB!njB3!~gwjUZ(d~Jr(SO4U^ z+Y3KADeV3;YW(%<&#oH`o;-Kz`2A%tEp*=Kzpka5CU-5ISgUiuZH2-@VgJ@7i_wpsN3qiIu92X9y=5<}ZY_Yy>MXTZ@L zn^uf5HR@V;U|7IW7VJsS`dm#5UFWZbzdX z#@X=Nke;&ZdL@eg_H+&`KAm=M{1wUQbGB1{|8rSk=c_xB!)FmVS-xy#(v^cHe?8jJ z9QWe%i14Cbiyrwd+0&=C%Y?_zwZER&IqgPK|Ah-zEJQ?y9qcdYzx3RBS-&YeR%aS3 zU0l9s@4~$^k$Jb+#eMc{ln(n_=k!jq#M;1lqnkGUt@EeqSN}y~4wGa@-t2DCc?V}@ z&Q!!%dOI0|zD1xElWay!cs7s0uW8~zNm^XTm0x&62yCOQI_nt{}#T=BpZ*~vd4D048eHA$Ggu%5swOxWc0tGi1 zGNpmc%?|;`P>*HN6MnbdJA25@eQ&=SZ~nXKw$@hZ?RFzAHs=9o=TR@NF3+ZwuKYtK zYS`aJAMX#*x!GNMR$T2dRF%uOQF`%~F!Zb9SHXwXjq!e`OOt)E}Y#*ayv8&Xj=e89K*ZPG$8nHGb_)o*iJ-_aHbQ}ro1>d7(Pw!so5N%v7 zaevu3-Raw#Hd5WV3G3!Exr4fSUUkhYxgvOcyhQBco53vwbf8o5$aWW(Tja`|&d&Ze>Vg)@)aRFes(C=?4B<+b?F{jZk7N;K29nN3(spRmEb(>87^YtvRnOS8=rI zHq8j7lw0H^b!wF9-zLD55~4oLpe!)5se_`@ieH@%PaQu&wktU4PX(^>2lko2bJFiq z90sqvx-)Y25G`p9Gnqlt+^Cq+QA{4Jm=rz!KhnNDuE+Fy|J^jiL?(<-iOL?OR8q;3 zq#{X3N+}U#32jmtWNk$Wl`Son7KuuVrcDvav{Fb5Nh&Rre%E=P=N;zrneX@a`u*k~ z@qRzgeV=o#bDis)`(}_#%}A!h7ID^#E4n*~tq)(FPvCdgM4q0h^A`6Is}0cX@f_pd@6H%&XTk_gzc`9xiWr|MkS6 z3k_E{3?Ki?ZTsdEqhy@(`)AsXJ0NjJa}f?OQ971-R7gmxAtqA6@E5C?~A~0 zLhdnmSDlYs{9k3~KFT@!vGS8`_oxq_>`xp*263#vrrCa&f}Bu<@%)lawzlyD0KrWY zSca_^zOY5x;ETXsS@=0_vDREyd-AjFkAC)0j4qzMf3t0Tt}Y^Ue$sXDS3~)iZ?X$E zec3!}VZ$vmmCml?0jV4IU$_Ma>V6UMIdDS&ZUovYfIMaIPr}MocZcj62Nu(-6`r|s zj;b&Kyc8_97ogvf0~TAcVZCvp|Mio1tjTmd+=Up7Ab8gT=hcrom71FWt5L%9OYT_z zY!n57n5JhujPm*yqgKKw{Do(}QPaS64FSX)5BWI%!6+kCuXfZLO)e(t_gtmwSNy#@ znn=0Ued|it{8LD#3_E-AmVJcwCB*13aP}CXN&N7iR-8ekrXX8WP$6uQtsvVoDDk`g zij%k~yUPd3Gq-9WXnF-`ddyj1YtmvZOIdnARHAXKKq?n`BM~Wk0f7@f=D$|;9btGe&c(;3Y?u&%`3RCCoU8oE6`kmaESa-H# zpMS!Qd`UTIOa5xGf_B`iIALlR*-wcP3 zo56SZ$#E;(zZ@lb(0_8_fClU~X@hyNx?)3{lz8un+58<3$@}X{!l|L=ASl}SO#}O* z?KV@#Z$=EHfj0FyFWo0z24_ZLryZz2{u&j(LTZdcHJX832JEA-4U2OVTy$B={C)g| zEp5lz1jif!OaxFb-^K$uN5VJ?8bB0e)O1QOv`8K((Rn~h&JGEn8+#o}ZV;oBHjz#` zaMW;}D2i^m%-d1p_O{OXU59|aI5PT+^&8LSyXUA*?eVe&+LKpJQ0a#)rz~^-qE|n9 zH>e14`uwR@{X^bLy<(^XZKz6TB7a?2ZIp9!Nz;baBI8_>27;TE!W@=cgZ#w~+%QtU zx%%#`CC&b9K^>W}Y~*#Au?~ z@ ziq7+b^`BnPyuW}ew38rXAjz6tpaceuY!I5{w**q4?Xr}@P|^mLkgP|GyaAIb2$jH;A@(mQ~(wLcS)deYVuk*8yjyh|P`J@|?;rjFzdXcJrmy;|!F<9+s1Tq<6wEMMLhqK?=`qK^`do*AyoHi3!Pi4Y9B%0_lJ_Q93}MSAU{( zQ1~$&R^U$>ZZRob0kv>(CO?He<{&5c^px(IKlAU7iK7&rt;(OIm(Iu3jzb&IdcR$^ z2pOmAs5ELjPN#`Y!e*4 z7kCn*!tn4%JY`A(PdU)#)2St31==m(No-L5xb@HyNP{sXbAL_@DB+V9;)JII0eYcB zL;7m-T*YrD8m;G2*mm~|>}@x6PBE1S7X`(Ha;76A zKo)-y@B@Q9980|uk1_<3L`EAaW9UPyHJeyVomeZ2=|55Lb!`Y?BN>EAy-1iRt{6Xg zOyKpCQJ_^X`-lTWeg12J+JHFp%3ia%+<&?zX}R=ey^98y0I>&q#CzvEztr?hcTLvW z@_%Q~8M3FfqjW?X$ZYSpT|Ou}-(%9>3jjluEE6eZLYb6tM#{KZL3l(lYRVbH<46Ij z-hL}aO{JQt+ySvJ6hm>)i$zXf3}72MNjCpyx1e~-+n5K>hDbVb=Zk@GNa^s7hb#3G zF9c3{_5k6al(}I1F)TguaqZjDk50y}V#JgM3U~qZw}RXuUzeXL7#;Xy&nZI1-I_@Y zKZ2V{%!2q_RzM5(Khh?6k|qJ;p~^;%!=JcqWavu<*fJ+~c29kQ+s zQsVVQBHB~ePu3IB&TL^+&n74w6;MGWQNbFP3i<-$Z6ER1f+clUe?+v8wyR+^W?6PTf^s#+dGIa$ppWoe4p8Z|e&U7-< z>21osxnwg9PSBkccNxC*I+LE0R4)l1h(X&^YJqWoNG(I87XCy}o)HAn{@}eL)EuK7 zJI+{kqQ`iE9%G^1xDa`sR(sg*AyiDWshAcy-?o2JFf#c=={O_6ZO|!LIUVKS?LueU zA5d2`Uuyb2C3^w5(3rST_I*mF{ucpdq*`UbktL!!Aop8(7Y9+?)VJRAkiPs$EVd!^ zQND81q;;{&NH}Oxg)(88JcaEJ`>HI+V|H(}#4>)e^+>bH7Qodf7la&&PJ1y|X&u2M zZ4XGaji(020iyv1s)KIY+?yMTYGOnkRdvqGVbatzjt!v+%$KI1!J~ZwE^StK()|k| z^xJs660|=Kv`-0XPC`_M9SGP$;hj0Tan~y-yk>UrApqB%@|bJl{`$0f`$PGGlOZhD zj5qS==+>N8>E5F~@{PR3Gxf86evBk!~f3)ABDlY94I%3 z#zR`D(DY^;8Vc`~C}zfx7Ts;o7|wmjm1W4Y>P3RU@qjnJ2L!)4->0 zve^HvAM#ZPDo+s&ou{(L1!08vKrm)~`nAFt6`f*Z5>|OE9tj=tw^N<-rVq96C(a-3 z`RUBdvb$?98SxX8h5%7)^4I(RM~05>iFD)<2N_~+)yB@$`b6N6Pt4d2%4$J_Fr=Ua z#G&N($(r6eG}o0|K&u0iXDv}EfTfaZ;b-4&3`B=kNR`|cm3$u477Yr7u{}ga!806- zE7{CVJAvQNM*$p-B8<^ngwzw=`q3sBZ0YowdlKe;b^a8dNC~O{JY4_q_~VoEM_ zW`29Eta%G{Bedo7ZzdSUJu_zOASM5cPt<@PvPGP;Nup5x0?K4+EKEHKBw_5}B1?3O z0f-7SZGzgLcZ9|X_xwFFaz!zk6>SiLL>*0XrA>dzZf4-2lMBfpmjhG*P zhIIctr$+xpD2<1UMyiblPf@}6r?r>?6^o_>EsnH%6`L);tA_2d^p$$2e>>4kP?iA{ z%A2}HuD<||bqUADz;W?pE^*V^Z&z+BM(J?=N1Fm6AP+@@iPQJq-ixCC(N>7s-WP3g}N@t5hRX212NV)GWo#$UHqxR}P~r}&W#C=ja;mD*!? zn9&M0q!mUfRKzbBV*&;93;wYLC^>$$y@(JOLr=oaN5Z3VOh8QmG&R7i{%v#jd@8t` zUj1U*sfEt;jHg)d;u1bx6(GhPDP3QV_-C)Xd!+=0catqVV)50iNfl-gN1<=t4k?dl z?=Qlzu|W)jdbEpQ!1aD;ryqJjp<-BehRN?qXd|)>A?F;>CCTe*S=NBA2cjyO%rEgG zJ@ilpqx)!@0wiKCHF79Ah}noQerX0rkMh6|1AeBght*}ik}WPsKhcpgi!Pa|xadMJ zmEiZGklazNw%ui_6?f0?d!IrRK5&*Y?4C_YOl2u?yMWTn&e;9q-6+z{cwekyU#x`pi3B7jM7??ox3B4t5xdM4?=p) zT$9`5G*1u*30Twz+p{UM+x!y(s=mFqJQ8;Fk2fHNR~x2|KT{T3bwN{goz~MNs?N8l zI^P1b`*lz2I15ED7C!4r?WXq!&OF~g{ycgYNoF2#NaxcLYy9@lqW&x(co>swBj^}H z%RuRPQj?<4u&W%2(8^A{FxmA9$9$AJ&*{JgDP+Qh^86A;deB@vh? zV3AGer5P z(Qhh6^c^VrVf^Tq$07P95TocL2FO##_w?_9%2$Q-SDZp3uoJ*2q=j)L!r$^c!Y@-I zR3PBVzYDP2?krkwXtkUPJzh9~@K&_ML=ogb{4o@Dq1ynzN6ysKc^Nh2KO}^rrN^V5 z{jZ!MORLLg?#ZfSb*6iD{_lBLQb~Xz>G70Ui)tze$-M z8rX8Y8Ppfmb@fv|9H890J5DcLNR<7_+)(Qkm`bm{oYd-uu{~$L3S~B;3N)h%?5aauw+bo{ zJ6uF6#{RpCIRX_Un!i$S*L5_0_3~MsPwRsQ&j$t8ub`%unh?O{8T8tX_eGNt^rh|> z45+;|97z!lz<4_JMVI7*R0RLf@pP9ZzY^PL0^;VKEZfgNin!Ks*}j=t5Wp5AuN7Gn zeUmRC-j4E=?j^e^ajy?Fw^o2zI1FWGUO3TXMy%RkU@ak>kIk(g4Z+3^fH97VIdFq% z?)#UB5a2ZC3Y_Aa_jeY93+j{jrT(XF@Hn5gPFx#Xw`o;IW*8CqnYNH zKbriKOkTk^nVJ!%&>g0%{=wvBD0D+ZVDciESnbd)JgqyTQ~n+BtqXihg6i-OYJSJJ zxqr*?5wC?exV=M8x8;&74aP(Sj6Uuz%y-G{JXWG@iEdJ*@zlDa(Db(+IEzz2d7C-A zeD;OM@9qy(25;fUz`Kz_!@%J&F@*jvk1w=nZ*6yO?lXtN9(SNtMtI{4b z?}wZk$B6Ta^5nuhGWV8U5hHlpFwG_0_6TC;6$I;FX<+aPqaj!mtx)uDq~Vx+581sl zargXCRI+yx4xvwS%qV(}a0qaDK8;ItbyEJie(1mq7d(RKqh-` zJoVRk)L$#wCs`lT9t)ivfYd!hla5wG_|E+P<50?XQ8PxL%*Ft5q&ar zVoU@8@arXGOoJRDW>b{bqs${rY>AI7h<$M<-Rn}}vboerOeM+m3aew+7*x-Urc zO%>2*GE<<>(#YVK|1cOM2j1o4X?iQqR8tk<%qP=zq96WV4jC4Tj4Z(rWPo*DVXr`AL2o+DF~WFZlNofV zE>wmugelETzPtue4?4wfJ-n+h*6hHX^Rh8=JgRJJ`mc1(VT6iw2n4n4rd!-!lAQY3 z5leFh=5CK8nz0}7{3ae+!T|yox(j3c1*1m7RJ7kI`bS zv4dz1&fGD$FbFa$5u;t)&e~su+YOxqb+yLQ=^@UEyh9)*S%q;jjKV?_ur`G8yaRqX z9t9hvsNAljvo>Aktj!9+zATLRSda`}Gr)gljZV8!SNZ&n^JLdxOd z&{Gv0#^WBIq5@PgnkhRQw2dEOdfh*HSB2qSH=0x$h88|5qN{)k6g8v&HEMp{-|9r@ z*k^z|x}%p!R=q7hMT%+pbm^q(IvN|AXR7cm_!j_b7w$HHfaCo%01W5-zvJd!IrO+F}fuQAPpNP?r z=QK0qDaDYdft1`LRnOtMT`D3IuA;xrq@^?I(JIYi^aLcY40IJKo8oNe&E1^QwmiE=g&*Vbn0SV%5=0C zq-9)iv!NbyhbG`{HkANK7`or|&*+c?jK+O0cO{_iwqjJd7pU(Ca4{{v9(Q@#o&+AmI^uEW%=%#*gm)1&NFOm9dV(PcQ^C@ElQS1Ui9bNobs(M(wZx5>8tKqc zn_%O=P#N79RoAGHbz|EGrW;0b+(11V#i%Vd-Eq?}ITK&?g_V)87PD>{jRv6=1I-07 zA5DZd*~J{aUDUQj()(dNSi~QP4occJ`vYQ3RrxJdFZ*DpTQ*wXjz2|3Ektsm0_H7u@wi`Pb;m! zT^MzB9zjSz%M=pKIWPwqGz}$|OO>ZIx}f3Cu8GL@0OWuF)w4sL%!5g5^t9l-x+ejR zel>GGSp&qF+8d-zq{7jOOmad+DfRK z8DQceG_ih5=k|k6LG3R?4_|^Rv;ty@4NwsXI1%S#evtZ_7+c`F?M%9aNP@uul#QE~ zlRjW-V+dm~z0q03Ttdb$&-(-lP7^cBJ-LZE>L5`W?xoaI*6g1Wx-|8Mz6-e7qYD+) zgU;ZanTon#1ERSInW1?L74;)=a|dybE2dm@ACLj`plAjepQ`zM@5gTYuzl=vt-4{)ev3tCPN#@-PZZG-yJ zbiH~U4dq##JAO1WI>ia2_Y%Y-jfQVyG2s1=#L=MBq=(e8GKphICnM^VF?5ndj4%N6 zG@*!F5IdW!C}UnAWA?w}GG-0-iiZ33IJN=BMQ|$@G%ov4NYj3Yk|$HYJlJhcH3c&6 zO#q^SYoky){b28H-n=sD*C|D5)hcud1+`P_+qmvWnl>f`CBL?e^F^k-8FkKhWomFOwC#IGo~U*rQHOk410hnIvil^ zNdhyU+(T`#B&190vr^>ASmX&04dO1;`(NNm+{hkugd1c&IA$pEEbM>TJ+7^s#|e`6 za&Q1$g9yXeW7$7kZ%(&Yw_vyRq*t6V4&H4(pNdF66_KY@MC_O%GKPu>b6J3jNL`1P z^QLKFEE>=;j5Vo)a+~%{&7BN9%pS>(6NR5?6gv{|e?WQ?^WL`>MMDN)BC!_*jhS3P zSEX5uH2;cdV_{e_1G3%?f(8;U=TDl9OC)27okYV>}9Nf84R4f+~x&#*8l z227sJhs-D6TcLR1x-4?`(=gf$T4--=XsH;W+5t`-Rs_iOB)G*Tp6lGU`I~w7#NEKo zF5HCaL2<4p?T(8*blDP@riCJCj)qHcbRybDd~@_4d_zYCX*er5{1QJbkOrBGk#t%| z(&;iIom_~PmV#EC`O@jaK@QRnAbIB*HBkYGq5^f9=4LmkJUw8npQ&41jnA zr$3PgQYx>)&vbKp9ol24y_5668_gIL;P%Bb95#JwMe^Z(xsD%j9Q6Z^v-vnC!O9>( z3_rwxEa}QS9eNc_!rtZo$Gt~G_FV|U-suJcla3cFw?OTMXV7#Sr%%o_orgCor*fm& z513I8d&xQcuLbxhaRgJ$wjX+;{J@|7iUy7$#=3D`@dF%F{0(NrUydQYs*nLoWiav> ziqBJ6=`Bs|0(BL;reVK}@B{rF4KEnNfErrEo-ZxZNm}SK3`t|mBDxyvN7BL=47osw z8&)SefFT#U=7Jb6HGvp0WvWkw&j}T@*(*yCjdnz1;75Gw!RO009LwPHg${Z>Y_uPk z9Ehjc&0H7MEL_E!1pC4v_4*@JZjeq3NE&g%yASC%ARkWV&=s-0w^4G>GxzA3+4>Ed zt#9lrxo92g2}ewM(i2DXGt!P4Qyc($avSX?rMVlp#O~WVeGj__2sAYeZ z9o%n|jC5dxTYP5Yw@=;9mtPG@?-n*aC?8oBw=1?vbA0T*ub+7V%0}`|x|wCq$4^$9 zth~6;FZ@j9rv;zPM(lLjomae6Z;AGmW#T4fo6_FQDY3ZgTIe`R-SW>#PRWl-1#T~l z&);=iQX%!_j_?i9i4Qd<95tVy;FPh-sL^qMyxjgYc#|;m&9@m8} z?(FJpA3yA_OY0bc!uV1Vd?JExSh2s(FIwiMA}u}Ys+f8~y;PxR&Vz5w`RV@Rqiwg3 z*0i2{@0>?wQJP1`$-7^gN(8n4)=n!OH+bW!;LCNFi)A&nJT^O449VJNS^#T|)h8>9 zhAeehvEkBZ->>eUUq9Ltlv^s`cVvA@slYJp?>mo(N_=QA?)%PR*f5DkNk=Iy172bL zYA>k?a>wdc>xy(*Jy<*E-rl@lk8bcs1G$Q-ov(nh(g;Ah)br<8mQ3tUyzaubJ5UaCki!pEa7S z*>L6etmz#yoH87Y8b_;J{wB2NP$j%vtu{iqIrG4VPZGkW%321eOLA_@hcsrL?XcJ6 zyuKA)x9~XaZ;iM#Or9$)8W4d!8Pt9Edc{AzF7cJT?ig#)=pS?Qs(#V-<(mTorL>fj zKOA~}`|>MT(*SD@7hF_{NjTz`v-d`tOKVv2NER^nO9kwXtq&Fgek=f{QCH#iX-Cbs z0x%Cm+C{n^4jFdC)i>*Iw}O?aM$Y_|kA@7Z{ru^J{)9cBYBkLY;qd)%xOidw>0mfK z5DqUVhuchkmnHJ7E2p(8{@e(}zuf}q zqvjc<0u4vj2UoDpQ&2CA$9{F-!mG%-)-Qne2{QV6o!W?s`$LBP?Nhh9Ab-d($IP8_ zvig*d33AxSWCkD^3_!yFW51=$ey#DnLFdfP6XO15ztgebM@9Vo0^6=bv!>sMdj&Z6 z*32EAS_vSuwqza{1=p?uV+a?<`fYN18?9zEZ^9q$FC)mF+otMzz4ckOAKtk(V_ zR_Jhz3BINeOv=?>F1E2EO3MMIEzW~?{oq|u)77A~9_*&F9h7#(x{;Usw|bLOs@~Ot z2}jolH^3_n@Sidl=YH<|I6EQE4zCesK#?gTfM~)l29mQsuNN}C;XP{mhna3Vyf&&TXqzF9hO zUwPESlm^L;fCq3&-)zK6)8R@X=6ItHe)Z)obPNoelHiKYD1+l{Ex| z>+eG7)*^I8&2J(nICn4W4kEH{Ie%jLuz-SM9TS^;iXJv0BMgqvUJ2VrwDQYC1u`)y z5M|+X-RjGK4H>2g?<6e$?v0D^M)$Uj|=o|MJ4vu`zYskl*usz;H!aM==#7 zJCllnBoze{B%nq;ih`{EEu%lmCICn~K~lQqC{zIvsH<5&rPcMCLjzsks*Py)lzHH| zD-g2cJ3_8n^*12xVnGOb{+%=IZ?cf9L8whrj=Tzkfv~tZku6_yLvha!#oLYFa}V zQ^wC*G{#B8&;l4LJD4w?7FY1|+Vo<1)KU9HuwXKh$mR&y-iEwbPb$a;Drn;|6qOMu zCf%PTjwNONsJQYn2mQ~HhvsHYq?FKT7DZZ5F6kg)ia zkHsXyViF&V3BQr|wK?zSPJt|w7RGwty*~n^HVyFa0=L-VlLGMVwKa_Qb&;PdBdz*t z)XsFvE(WPB5Ft`KIa0m)>LjPdg-nq}t{nOUv~QyW+MnF@$@ql0$0QH^a%{@=$>QiE z@FbwOpAS8??QzbB3m*RnJ+lHp?-ZctSGU?GbI7m+reZO)`Wq$UE$>my=K($AuYjIf zGoV*w+2|#Hv2^@+2*He1>)P*4G%=jxIoD=0_VS$eGUC7QMRyLR0Z~7b2Fbgy7maM} zrJd5ig;Wylh0*{>_?NvX9K>Gyy0Dk7ANQh}EwcK?#Bi;#jtdZz2#QG-V&X>pP!6`w z%K$1)5OLKQIILLs_sWlV3fE4#t{AundNX$~sN1Tq23`k$P6Zk)Tzwa+bSsP#C-PcJ zvR;3cypzAgrqcq(go-jq;#?m{;sgb2p-RFfRO~yAu5N)cH@L!tr0{0Aq5)xPy^MXh zl#X}kOU#X%FHKeWer#*uizm34V_LvsH{8*;52mQyp4SA_My6GJeqzZC?| zuJ0V@oSOulb&zz`KvE4!Er8}5KoeXP0%91{2TeJ1K(pn7Re!nanQr|_0BDc^5LRMD zeBe-Pp=-q_3Z?u%Kvu23g=Dzm1d53N6sPaT$EEH)!sQY^F3-M2p|}ns|6x9;um#kZ z0Dc-p=A#=!PviC72|AgBR?qEuzSFT5^dOgAD$oIOI}`dWBFz1pm;G(G_yr=+po*cO z$3~w+=HDe<3ksO@`|}(RC}f%XuHbu^SN$509Ii$z1_jpg78Q@ZG@ZnZ0mBPnXp%&{+HN0kqb_Yg5^~tsH9o zu0^6|)aZa68o>_IerniFFcur!r>-a{p-@#=e#cq1gU&?k+?~~A5c~y<`E`3 zHq((Y*Um^yxcur(MX&*h`6STxwTW5=rL+DYwsC8clj&)`PlCuNGzFoSf$>QnbL3`6 z!zYgXDiY`op9G;p{qB>BhwzEN)C7Bp!uZ@UE;L6Fnj|JP2Z=l>G;0u=mUsUM&6?q< zm(l58ZLet+vdM?+bFAgCA(sXI)a3KG%_R_+>>hsl*ffH`26bA^AKfP|%pfjCgHazr z+3NH8dj!&l6&M$gK3zzkF*`?VzPSp4;lGL)#H#mmcyDdFXK#4#d&BfznRN@}^+z9< zjN42lHpu|Z)kY2-dCQLI|#=L9C($R@qFfa+O#muD9o-7XF>5j^2LV9lc%Yo$qL4 zsCUTkxgt!x8;$(#=69b=e)n}(Wpj|?}x#iA?6B?K}QXoNGXGW{K~N5Q2{tY3e%PD_qu#pMG}sw5Xn#Fdb|xTU}G~vZ5wXaM)vl{qb(%j^QdHOK*

    FUmm`XFZ1IiN>U#ASdNQSuR*@1`qxQn2iF=u*B)}6w^q+LKleYfrbAFL_}VA2Dad1iqS6?ryxX)F>Y0PUlQGDI z#Y6Wc2G}j8V?+Qo|{X@>Ps!i9ohU?mlSCxXq7>w&?}|Ny*MAseAeUq1JONAk?X$$&cZw!SPr8{*wlZg-YqoO)jD^?%0ZBVr_g*)C?11=Thj!l3|OlXgcwyY+w975`9Gnzlqeegsc(%%`g z-{nXpN!AOBV`v&`hGM0?wH{^bTG5o9;gg*5(AQUD zAsRWnFT{*g^LZu?(fm3wpW;x5I#G~cD$t5Jj2gwoVfjqNVXER7 zCsQd9&01i9wxh4-)b96tfE)^hw)O~OWK$XxqRe7+eTPywYp0ybpJ;Qik-mj7^lCoM zEPRU=Hb8+|O4^~i(6j5{7WiG>T1XDs($MO{WB5(za?~dm4IV4{rTK>xzRyH9c7;PylL{(rSs=x?T0n-%x z8<%2D!?LEgDmrDXr}-hety{4G@QmHC|l4?7BD1tT+&|<6>+Q* zrKAX0^zTQHzlrKI0vkU_Go80+z+_O90j7G%hQm{B3Q7f3>0pFX@Btzaz&4uK4kPrK zLdj~P+(;opjJ$23`s|7U03&ZrySwDmx0@jzH}KE(rYl^6Si!V+1Vrw$8BTNOpa|}> zCops=01T??Ul+-}fLa)jA843CK$!6U2LwXs(4@Eq@`(;-4BBrOYQ@Kql0k3YlEV@d z837oxJ6u$zex^56q zBJd8QUK}qGTRv3~{CnB(R5{R-Fa&l3=t&jyG`IXrM3sLfboI5f;DVJ_AO&xR6r@l; zq@_D(IA<7PVvA$AJo1D(JTVngWdvUTY8(D-P8!wZ9cO0}*N`Yoc*b+#`OghdUfLaI zH(*E`aH9=qjQY#kPyc>ff{dVX`09XxQ8L&6*}zd#6EI;fCL(fCf7hZ zLT!|_N(e7LA~m5B$k@stL4%aX93Vk?IFpuh^)T5)_qlo)vci%nRMXM!GFib8Q~_m0 z;3}}PsOXPe7#J@&2CvY!Y4qBS;ZSZdv_vd41zMSLd(dG`20wfSUof3S;6y6hKHj6^ zwgPwHW8KBWQ#H_azpsVJ(TD;c=S6^zN0*mrtzQUQ5H2m#ny<~Y+R|pRs0Ilf?)0-o zTnIV=@q&Lp!DR3R3V<&JGij`L6oTa#^mjlLRxoFhG++%B2OYgY?yp7e$4qhxK{~&0 zz_OK*v@E6z?6SMDY%A8Ek8?hB_|g^Eoh$3+omv^94!F~@23Yo3yr4=hmR*PCsJm|e zuhuG&wTC!souC5fJPP!CYxli9`>sX)xZ=$UTJdi^6dmm(6G*uIY_-!5Ixg9K8n}|S z7P9w8SfRk_Ozkp}5Xyk;V4(UC>X8x6m(X(qjC)iUcZw2ca zf;Xf{meJ|ckQlbGn}VFJ5tbw97EA%&9tEpM@aO}SSUQg^!r;J}N-ycJKrNU?qrJE= zJh2%E9+S^~v>5x|9@WGf)oXP$jN8SOvCohaq5%##r)xcr;7Ov4jkgw}qjEZqroqh< zm=s>OS`MSU|1>Fi1xz{xA@N@Bs5!=DqFFcM1#LFhrHbY1@EO9^V*k+d|o_?{59Ro zLH-T|Ci{!<_ZKEvWOEo7)c(T-(YULyoO3~cB*rLK!W|^9Pe2`K6D0?Uk{~@;X>L*> z0Vf&2d2da4!7T}Lfihgsr$!J)I=~KkP%0n}fuM32F33d;p^t55;0P`Ruw=v_9gq4y zw>vG6u;j6`yuXhM2_7nKhp$~A7hF5?UEb+u<7odFi7cep(&4H0zvICBAj(`Yre}mj zMSg{49D5ZVu#Gxl&kP_4j0o+y#1~MaraKAadGY^$FBhhlS5OTK0sY=1foR2egvJ@7 z0o^puIL`?~6eC-Z;V29;v%%$qArNmuCzZMS(pl(QWYGxps6)_5Y+_W(dj9OA;KtLJL&2Minzi3o!1!1W9+uS1_N7vWoLqX8g+0;q|*p| zf)|xW?WE<1T{1@oW8SE( zWSnM56Y9%w+H7?=?N^u)O*QFHq}eY_@|JBSaD_ln(*=$`q5OkcdyvDkShHSJI>`TE z76yI>tW2bHB9e(Vn#2x{4qO8?0nJEv(WGKLMJ0jNH2&j4Oe5u}6QQQUS3)U*~V zn9eA3FCW;9*~hi?z2A5>BV6Ot9f=#E*St(2@zkGe8S1by^LKG>Sl!gRD zjDf}A&4q5?+PflAujpt)v$;+wrFfslVF025fEbcb>@D4L9%#>Xf)?C@ zg_fcW7NZQ}nw5S#U9;jqZ-fF2%>c!Nn@q-7k2=c)Zh1WG3Xl%R@)MUkLXdc4gqFvs z08df@LqKkS=YZU#hj6{#;XFilXBpLB=Bk%y*-E3vmggAbPra^aOsaeS4@n`$;+E1I z%Z@ccVv|&2B^l*wpBTeK@`F(j_I5GAbg6|H%H+L6SZNRGQ6M(5k z22%lLr1X=Zi88QyS4`0qrDGs3esLfSqk*Qd+n-|_h~=1a?8NM9KdlQXjy9zL(}8OT3LAeQycZT}PgU4ZHOP&1Ek>jtBax#idcL=NdAx;#V&DXwUAHv)~OW~hkaOcFC* zSPCyxQexRS(S#V3_vtjeaGdqRNn#2WL>V*lnj~QTI7p+(nCrp{O9gHs$Pvgo3IfeE z1Q7%oAQko4c4MO#NHYH}L`)b~&clG_4pWC2_+5qCmhE8dFM<7o34Wk<#Y>F+vL<9d z>}i7gldr@5G^Za3%X&|U)O@zB`^?9UQd+y7zJhoj6Zx>H=E=F%?j_) zm z-mc!B=H72o8*}fXIMk4+kd8*)$yuV^H-*5uf+_*Vky{Ajwi%2!t)ig&BIrb^bT`BR zaHN8I7e1bhWiH^?*>qZqJD519;=1|m5F$9X;w57|tgFXAMqzSJZWYj`LA6g-93Jln zkIy&;M=98*SnGGbEyMo^!I}AzQ%0_ z-8n>Wfz-!PsW|=A1)UY^>&JxbmMKSH;4+uvBCMc;5=A$3CV<6`1789xzH)W&`#)wl zc~#I&0Yvlhzjk8F9X=;WnQ zi^)J4geG(9%n^J;-NR?bYND;rMv_Z&});}pcy?o=MQgvKty*eep= zi=n*{SYa2i=30PR1MUFi;#%K!rm1IAJfDi`LP~Md0Jvo>vuDxdODJgx(%*^Q6kwKv z$7gUJuf^|CbYQ|_KQtIRG-;|sAO9K$;>nDPWe79^hxZA;QQ^E+j2v=eoPJp23cVc~ zrsY(;pdyAk)JtK|MfK8u8cH+H7rf*a_Q52f1bh60+2bwjk!63;kTsM9$S!54x$SJT zxtf~IgV;|#T#RF!vHDD($86=eGHQ4V&G90dIp3dVIrtzk z0cQb9Jxzzmg2Oed86V;pYn~!0e^bVXLcnGYOQeDEq#)Lk4>Oj-XSQIBI2M+Nb$w|v ztoxWI!x#q44SEgH9iTjOj8ihx_q547DfHAnM{j{ii!p>g$lW5 zo&a{BYsqM^=Az+a+P*7LJ9b-63k0T7i144RIQhjLP#y%wjAwE8)22+Ysf1mq5r^@g zoIar;0^S#7=OnE6zx!|TVt8MUcwHL*OZSh08{m$l4a1KMGmkhB#o>`Fnsw-}tN&SZJxig;5gIc^P~g_mYjI zN-4^ahcxZrM>)*B>}%L=x(yeBC|v%)MWM`%+$jWi_Gf2Pd6=qc%jU4Nt9y-m3sck( zIc*HE_M^5a94wF03*ww2noRdl6zAdC85j4XSytma#qmx4%Z_Ww@JIu?Z)!6C><6os zJ8~GywV{P+u;l=Zp$6I@+4wmsj+paX>>kI<=PG>fY7Dml_+$T z2#Pr4Qz7`&@7h@|(%LqE!WR2PQ2$xbH*%+p;Tqh?u4JUMTJwgFdu$}BtP}dBZ_;jT zfxkrPTZqrV!L*yL*bVbHP7*fLoi-)hkf=(2Lr2|;$M`GBj)%idJ@JT&wU72E!k^-it1Wp~2XR#8ct?K^{-yw8ytWMiwXZnT5uUK8)$P=WuI6vXUV;j-tGec$ zS}Rn2O777Hi`wwsZ;yhydp^b9UA#cEKaV%dvZwf9rc&&xvZNs&wKmAtex731@~S72 z7wcdV+oELRDCxB-VP>#vi08Mjs%9-6Iq5C!gFcVs@v29*8H#Uc0>G@l83>No`3VhFNe1QkEzCSpuyR{1c!^iHak?W&!csyQCba+a$8*TM7w%YjO zuEpCdY+`t|$)oosV~oa^2$ZHqBp2k}Uu1PIJwf{JW)Ey*+0gzJ7hPuIoPiC`JC) zsKSq5$)9@6%bJ7WPbW)@*aQbSE4)tdUthN#uXCq&wAa%+u2P88+h|K^^kd;C`q77e)au0MUH@9< zxA;qRzACdoMj`h5tr!keB?qRR?Tf!R_WP0I@3ajI{6Y(eF!8@Snmo_*j6w7a;N=&c z6#uQ*<6tKKqY=?`V~Sox(bm8=Tk-XLgY zso3_hy)M0zGsyQElrxqUdZ~FUKN|;6{7Ued+1mT4>vd+YXYMnKU}FaJ8W$7nVG+Hi zmR=K!*QmJv{F*&@%~_!STQ2hilXKxAaM*L~x`oV^Z>OBw11!0opUAjVR_bVat13A|M`T4dVlNyB1uxrF1>my$L ziZoO`k1#0Vn-j@n78UrWAG7bN*qMeYQN{+n)PZ>Cw#9g7Csq$@V9-r3Rg0;a&677S zUxLqF_8<=;*IUFg3oWBBMq{CE%k5OVbh^5ycLx-0{RJsk@wcHluQUV>Y6YCDKTQ5G zL2FWY)xnKZK1{%CnoYEJ3s_db8-=UJm+ZS8nRvGQyt{{3Sajl9e9QojXJHSzScJu! z+u@?{$#{I?RQB;-dy4ycX(@v5WRQ9;Ro@mbRd7FPq`XDo)tP1dp37XCLpR$1lP2?Bf^lxF9_~n0jqFs*eBjxoYwT2!re3O-!c$P-C_M=_!%sP#Fh^kCYIAWV^krBPUowv` z#p7x8xIXhZFA!m8N(ox|Scv3H4dHmn*Ku{hO< zw;Oa2c$M++?z?7E*iWIK`)tPtA@Z(ar_`>w?13t{tTq;rMivO0cC_EGNJ|v70B&<$-Btu8D@OMOWb6) zy!{XM~L20+^RRR=;MW5gO3kEEF}!4Z7yXa<5dUWWH5qk0bJ{=$GXH) zoL;SqSDP)zs~zA`KOn?!R(kp)Rrp60!R20HPWBP1?HA((7BEC(p4RTJ`%FFHRr8SW zjND1O3_Cd@=FL)rouyQQf@qKZSWojJuVRm1k)LIOu(Lawvr`r+IljeA#!?iE!_P5V{Ay`I`^?K-2J^?)B!1}C5Gm{ zWo-NzAl+R|W)YxK;!dO?Y&kHC_8I2@b6y^YIl_d4Jebo2&&qISlt#deNC>Z%Nb>AV zn9%_U>|&T* z#lgNAUYRuxW>*nkengEXik!HSHG6vof_?5H%w7dSsYfYB)!PhdzVs>g25MVoskORM z=irW3q0BoXStC7t@I>I;9zsXWj@u};Db>SKal&jNWA*DgZofjsIWY}ko9Mu;caGLO zIwkqB{Io|K!Wz53sW1gZ6W$ukv>E8klHuz`SM_eRrlBPfK37%cO;y4JUUCPz#vrIK zy*T5e$$0yiq9BiV37WLv0TLk~0xi6Zh+>fpr@8&*2uoM48>bW)q8O6Z1zTKxoFbo`)Czn za8+1rsKRDL^;MX*XT`Kf=OjU3gBRUMJWGt~a*th}r}fOx+%lM!7pLVG@5J=D9G<77CJjHPU7spw4 z0K;Csbd-QpFT%tWhKP$ilZgv(oW2RX072s)JgsL zdO%uDm)>mm+ppwGmU@EO1ESPJ2@s;l_S zNR2*&hg`3EyA@FBis#ZM!vYL(kDiaG?^MKb-=VNsapf)JJu+{b8FmPZ|v>j`kz&Y^JF&Y^J1wwKHNTp<7w)^U!T1X3yKHx#BGG{+)=#st}DV<7KQZPY;aG)A4ZW9FNOEHtcG4Mv@7fb_NZ~u6)vYXQd5x=3 zE~yB;%7c$u-N79mSjhvq=FTM+Pa+J`AOn8PQ~@{CUd8Tcg>-Po7|t&|5z+^;c+b8x zd_)A0&P)oi314VlH6H(?D%3MWu6po%^$;N{PRvuN48a@O`4T%HY&6H#tE)L&dA)%( z?HAXaQHv8>pjnH;)$%JB#p)V7DhXJLG$O!Vpv;!%90**L0Gr1^zmOBB6M4;);3l-{ z3l2`a?xmndZ#XrhiCc{q1_jz3O@Cy;@gOe@si|R%Jz2_gIl`-e5cP=W_Jmn1!hfBL zJ<>*^6oe{Bf0QAD?MX3v;L0{Hj?|bqvj={#8jZlNjCl^6SzfDvHd+oTf>|a0i;bbB z#`8ZHTt>@y^Ovzg(umwG_^y?RAs_DK`%r|EwpxvE(4p%vsM&^Z(Ee30$mI;*pc!P423N~?)w*O5U)k`s zYmh;Hee0ZMs&>W@{;PAMr#K)Pn){;gG)B)1BT2DNu3+)D*MLuD!DP2NG059Kh5sj< ze&TIE%5d%jaXw~enWJ2SQKxZq#XFZs$r!{pa4Bj4!sa5*-v_@sH}~g%<)s&@6qJJ4 z(qewB9#P@(K?Ta=zxM8UtV4~S)`BK5}2(80qKARM}MXtu}4?aMg4oU*L1 zaPrpCm(UQrisJ8}QwN1g3cm2gKo|oW!1g1338~sw_IaK2@Fn6CN@WOy2UGcbYgWSE zh$E24COTPv&u^|2AE9clbdBa@#5pYM^ouh6XwxsS&hmU z=gimk<9W%Dz%D`RfeELvAFjZMvTCV!e&g{LMIi_5^O!))8aTvRwd-}`>nUhEE)cNR ze}7qx_(ri+0=()0CJ@x?yxki6+NN_y52c=UfAjq@h6kCd!sOESHDj8#wxr!Wb`?h+ z_O#^_jH|p(v~PDl@iCOyr8SWj)f>R!Wei@}UCqAmEcS7iB_%I;dXW;lk0|70-3K;S z-PNy}CTQ(++VKY%Uk^F{GKN_>L-n5zTxTiX{mnlgh+`jE1rNCXRhDIfldUnJAvF!K zeK63W!g~3h%N=w|kEh@Gg_rS(WWqk|fC@iHb)t8UMCn*+#ZjwUp!GG8ni9L=?n`2q7Y1$gFSg;Nv_qf6sG% zobrQBcCz>PzUy6Uz3;c-QMDdC5f%DdE9b%b$b{HR3#FoqgibDeoedbN8{}_56{1o9 z?qH-~Kx1G!4B~76y&tp<|pGnXObg+viC^HSg(jt!i z3#qs{)*SpVEI!gsmA2(1xG@hbRbw6)OrXj46=eD7Bdi(yD^XNG#iD1MfUN%c1#j*I zKwq~a&@vkPm*>F(OWKg~4apkUvBp_K@eZ0NV_RSWU9@eq#7&5+ZU8ClqD_KAZ#6BB z(j>Av(R1ApK9NVmB_eR?J#NM=vU*rl7zLB#Zi)ii*CNpEb(E7l_!M<{bd06|k-VDz zB_-p?Q$%%(X7?IWSwos*lL(xTXxR|wdDceXCm;8D+o{IrnZce%(tP?apGOI!GJ%G^ zCA=VrzLlUaUF9?9{)t_GHlbrkR)-2`4|z#UaVq_sAoLtCe~U5f3A^PjCE^lL*J9lk z2?<-N?`VuDGMDM2MT7Gp?1a>*r+)&Uhk&z4|>iBQ~9E5dLp26}<_VdJ6NV zXSSRtx$2*Y0rFgu>%{mL+eWrnZ{}9sB9iOgH!u<6ziDU;3h6fuEmlildehK&{e(Go zYaJ{w2k89~_~iYB<;W(03>tM4oivt9NKSSm7%j+eerc$wL9_?UcTa_f22RXHp*(VH z=uT~i(^Ds+qbAW2?BS>}eqwlI%#$K$(z8W)UNW(9V#KcXuu**7_L3N3x6JT>ZDFu2;%@Vdd| zwFapN`T}McOgkj_qG8hdA6GBD<16m@tnR~C2U{=QGO6?Z)c(n(PnK(cIo)aah*vWF z>y1Bs^Wl7sW!3T4$SR#t|GA2!cYQt%3VM=xO`5du*6!h9H8r|37+W{-jIAl2<6FMh zyQe+%Z!lt;KW@2!8I%6XO|7ulI`M*6_}*8SXm#M!h0pVkVsxXwJZ<8fb0=O@@Tcwk zZt}#dKbHD`3Sj@iAd?Z)}w zkFIU+_hN}zRRVbcLf}>i0aOgNEZRu_Y`N|mOhN_JQ1bs&{(dgBH*Dv~%Nh@BB z8^6PdWq+tI+1*th-FyqN-Nnz7nTpVOH$Mw@TF>;X>Kxm0_0aXiVbl1q%sEteKj}6a z>rAv`$-mb{eC8nuk%(Fv#9Dr7bZM6Q$*H(nzehrTuHO`|fJZ{F?jyU(w-whMd3*zKV2^4mz+ zBlTr};NYHeHM7#*O`db7+0!JDZ^7i0sWdJW32aX_1;tsPVJ^}qrU;jtEBDF=Ja_fg8SOtO%-HA8TGI0TeFh`9Kwsw< z+p}+PGte*Sgav8osM2k-Pdu8Q6>Y&z+Sb{0$}u4|EWYtuCw_z#*S^_~Dr9%t{>x*3 z-!F{@OLMHkeSTJSIoRxx4-}hSi7*{&_T=`GMg2*Z{QS0h_x>PO%k!XzI-Npb7Na`~ znD>53iz`HwrFNSglzh>{U*-n#fJw@c_%Qro)?G(ElLO%jwNPB>W(&3~G-q?l zRX%_gg5DgHz?`s9)r03X+@wnTV~0=7f{BO^dQVN`XB0&GJMo)NS)Xz7-|{mPB0T1p zzzmIVw56%!Pcb%b0oUHf4RN;IbT#{>`q$PkmRMKH@8B{=>g4!7VI2|3I{&4S_9r4wmuU8 zNAj<lzxr2VQV0Hl=Pi zSRPz#!7YJ&7O^iJLsN6^uxD0Fx`F=%LGk7D+1D2B2yRE&GSBJ;ucie9$Y>BxBn$Pb zmU6Oo$GbTsGuiM|z)Pxa>>)AW=9yh#4M0>41f|Ic!wF&NP|ms4-;3G4i5Rhdl+c{7 z@PLPZ3B&x7m}JY>?TupaqmZZGOz-?t1Dm zH+Kd{=YT8e%Mf?- z-ti+HdX?pU+TvDby?Zvx~%eM@sV+J zMX`D)&O%f=ysa3C>b#b1ZUDv9JvW`U%F9c2xr zRp~Zu3+i+)ylwXE9+Oyr1)r6_8oYKrYC{qrM(rW^CNWLTqr?oK-rSpbC`4)GoyNR+ zwtGazwF3Z|02b!Kl`pPkLA$WmM$NBt{hIS{+BE0S^>c3NN@XxY7Sdi6nu2C{3?qU1 zm)qL{TM=q0*x|7Y?5NqPwfMX3qUR3xa28ZAbkX0m>%rYPDaG2(+SGvi`$qm#GedA? zK^uOknfH8WuYB;eQ90G^f^S1$w679n>B9&02}2ZP1MTLWLMSQ#-NG$zsp*lh%m9F+ zFs_!kc%(#|1XfP(ou4q}_xbNs+%ur3ol*l}oDEwzgAdxmEhRxF!4Eane$BrVW?$@h zkN==~~;+y(Mvl8`Vo3TGSh}O1;Pmasit3+aP6IOIBUHP;tC@GzXHH* z3kZj`{M@6WmMsZ5`mYd;u4*98OS-ljI#gSL>FO~=~# zgXTB~!$SLs_WF${1F77S^|=`$HwCm^1cgW!*EYcsiBt23#uK%T{p0I3+wbp{W}{nn zXuNij!34aF3{|Z%CY%?;{f91OXx*(Z?rTVmAgBA1uQ2VXcKiiqo^aa#Fn_#N4l$J@ z;r+e5YT;~p`UqROvv&=KkXbRWhvKBOhfRBJk=WBdY63a$*2nEaaPY~rr$=nq`8yKE zd^c&pEnhiOEBSy-LOg|jnw9ym#1U2dPd)UU=4V@yb{fYsnVccj*K&$<%Khh=r*G~hdxJpSNlogFJf)~7no7RiaKq7r;TY&hlhF6wXGF9^vA~sVH z`b?&`v$FMo(6mg&#_&zH8tai~S`meM)~n(DZj;9LYq?&TW>*S$U=tsaXJdB$5d#tF zJL?PSHvQywf$Ke?E8UUP{-!dBV=&%1>9}sSJD~g)VOL2xcA|mDf|4l2NfKOoOT-<}p_6qi z4cr^9Flx zStyym2}-u`2@uM`DMtQF{mLpS-fTiV_ZP#{K7cwht<-1%%|RIj?0$Hb1w^0Th~*q1 ziKp}LFT)%0MLe><^2cCqgZr{b_ZFi`EdNP@n=45Wa{N#cjbyz7h)#Kb(r@Wl#+1N0B?r>G>M0QBi5YT3_v#2J`G7_I*zQDh%dT26m pZO?A655mn~MCs#rk8x_$>{N}%C^zNFm*gRZ<9e5MvNgN|{{TD}P literal 0 HcmV?d00001 diff --git a/contrib/PanopticDeepLab/docs/visualization_semantic_added.jpg b/contrib/PanopticDeepLab/docs/visualization_semantic_added.jpg new file mode 100644 index 0000000000000000000000000000000000000000..082b00eb95f767ffb252b6e04ec29920e0204664 GIT binary patch literal 75334 zcmbTd2{@GR`!{@#-PoBaMD}GYWf?-rpb?W{%wm~T_RL6GTeKL7YO-%>5@VS$GmKP( zN{K8jqA?^%g|rAQr1;{!_5J^U&-*;@^Bl+fzL(>=@7pnR&s^s{*Y!C+=jS}Hwc)jQ zfFj1t!wrBy0Kgsm1J>RH{^;YB2mruh0TTcKqyQ0!G9U_GLBK!YJOvQ@dkp}F;28i! znUMeSDpTaYUzGxwqW^upegpW)Gk_f~@<_sw_{bwM+SXev06P~Ctk}OV{(Eik_l=4| z&5T#sz)i8wH0k26zg^C(wE;>}V&F(bA-aHw5=2x9vi1nj0!Jhc`P=^91|A?HqGI9_ zl2X!88So8_ir~;iMMcC!#l_dZ267sF9S~CzSJts~mQe8tmeh@fS*7HbOX<1XcnHV8 z_@-|i5_eV_x?!WLn!16ZkukyqX=A(1&fdXw7us#Ny9b6qB>DRJ2arR ziARqeKarGrE-n51g^TpO{DQ)w%f(k16_r&ib~T5~yLqdj@pe;l%boU)N1a{WJ&&IV z284q{FNa@^ydR&K{4n+L)8{YWfBc+Z__g@^&(gYG;Pd>~Eb#cRW&ev^N}ydLVq&6V zlIwOsM2@W+t|TU|V=1BR>?0W*tD%h>0#Q{{GtxJ5)z^1rMh8SQ|3A02H{i6St&IS(q7ZO0i7Ek3z_YC7vSO`)e@uPV z2D_3-u2V@+%0YNWXkGi3i^M0Q8e2}(95b}2O2BRR-He5O^uEG(RyLW8XgoBov%94? zypm=SVsAhzE|9P%9MqwjjVg$9hAN{$;$L_ zq*(RQ6FmH0hxj3y@%iw|xTgJxujh#9NJx4{68;_C8YTOQ`sDOOTuANpH(D6_RRE6{ z_@e9*Yht?}i14b(Beydk3{T13s_4$}v9Q#=SEH=Hn8mvUJ%b|rgD+IoF`-3z=T$kp zNQp3*Z0=76m67mgH(N&{C-bBiZZKf?^D~VRugN#e&s4HWoT($Q#oABtCmpyvLO(43RgnJ^f_h9S$?B?kct|H- zJ4VjFPAobj40j~7`{_`9{@oV!jfm=_$3GJXOI^rYU<)-ZI&n$;#oa_gGVe$X+SY1v zF0`A4&2bQTy^Hs}dz>vV7dh6&dAs#YlwH5XwVH9~Iqw$jo?54~w@5vzn$!c97dr5g zT!Tj`{l+uIC$p+G)5=YTjQYHP^Z)kP6%0Y?IeP>6E5D!%?ud#ZbE{ z7P3HAjzamz)^Xalj@+XcgB5XrKD_TepME5m9Ex%My^&{4Q zg5Gjy(p^sHsy#f{`f$z~aL?&l;2pUJtJ1=&edI%Kqo;JUgM9+)eq)xe|AhVCn?OCy z{fZ*oWobeR!-IbXIihNh2727#-;G1ri?aOU8R8+@R;Wd-XGEW_<-nlHj58H=ui1Z4 zi+oCH@bX@1tIZ`tt9hFzgWz#t_g z%fL!o#v{G!^y6o}3$>s9e)C={Kl7OLH7^g)lC|32FFHT@1~B>^X0!%qc0x;u?&s^B zxO@^C4NV1(!d<>9cEBw6?k)Ua??ZH116W-OE**ac-(=v_Tce?A^1FM71&HmDIAI87 zi=Wo$YGZ-lN7K1|fw}2_ZuIb!hr1+;Q>Pr0)k9ix2rNwMl+Ub5=qAKN)hd+e*^_vw zPqY3lhw7K0<=gDIlc#$b=YD#sk!(GGX;GwCu&zgsc|GvKKYAYwtO3%!emS$J#|j_{|J->z>-`gMkS))?EE?=h zcF*eSnL$6=d-qWHw&j|CrjhD3mY9i84o7=zt^ysa zHsNK7Zk2M*W_bq|Xe&{+M;6Y1TP0Pk#FI+j6whjY^4zZE#ySgGnfq2(emvtO^wI6} z6v&;dsC~m`_P^N0&-TAfL$3j511oQ*@oRvbMTW_BX6GIg`Ce&CyZdRQK)XzlIqM$V(J8dcZBc56|9l{9?x>>Wii+{i^yioH5l+9IqmZ-IL zVij(cSS3S)RLjfhWNze5(qasZI*C5xw+)z>&c6|f7&p28{V36dg;`kY)vN8I2SLK ztO2(Ima;tCwgwKh9T+?Ot21e@m?<#vU(q{j%{7QRVvF<24{GnOs(>GEH}nzVg)?cK?TZljJ`qj2E4rGhbgA zPHM4OWL*eRL=4D1sq7bU+EY-&FIFhU)A)hdA)4}m1zAU~)XJD6ch*`Bb}sfePSecp z=o0aH@{1l0<#}=sWUpp=_DhidrrD0=+rBY#wlTr zy9#dw3ofyQ6(_!@UFWV26VxxB{w*y<&aY~8hl;x$&XMbvAuJ#||J*jja$bJWmFuUr zG$1_HyvW`Izk8Ze{OraW(7Hl(efgc8eZP7v=W+6fO0DE+T#wa){qiH>AuHh{`}^Oj z8Nt(-NJPfrvP^3&r}ubu&-mYX#hMrQ8^FO2oOku0L%zC6&N(@lAJ4tt@b%QIs`Kea zZ17OpRZPUP)5aBZJJ&&4r)>!Ju|l|?j8Hn{dDXe#dgS?m)_jT zT=&f#`E%W0lUcqkYB?m<^3xd~OiN6p;*AW*Jon-xF{;z9lu{1glBB>7sDJOjOuuf| z1CMz5fv~`g$k;(vVBs(P)3u4?N374kT=HATXhN zn_>ElJ6mS8G5HO$ZD%lk-I_>^0fUQ1daI)DWeJ4WWlwV5#C7hc2CE);^g{K|1BWORcgo^wLKmSX?PWz*9Npv_w!VcOW1jv3)%E$gb*Oab*Md|IYjpz?`uk7c z@`DLN)7t}!yMJ)n{`3i0$@g}a2~y-Q&uT3qiUQrRX1CVLhcWLjRrvZ|O;Ri8;; z9$hnm2H79CF_O5FL8pi%REb@suqYKS4#Jcarc@8|iY~ho$p^G^vKozJs4c2hOc|zt z&eot8hHHKjb+`D@RukJie)C1A*Y#n%NI;p@~-`f zF}&=?VSLx#GgEV36FPU%s(ReoA0`U};j4w7{T^zt_QoK<3-JS8FFNfWu3VK+W4gFo zQ0&Xn6yNm=(l8Rs9tv_VJMO!1SER&OHaY1wzsn(k8)`ndH;-jG##hOr*4{*CQIww_ z4)4ex7TWM*^d9O7ZhG%GtthEWKg&*C3F1)wkH^yc!|n#F|xkOP7;nq zR2JviGsEB^qvI&FA2;|zr>KKem8|_VE-<-P3v)3}gP3=A;Iy&g^YJ;ZEB9 zVpZ?PtB49eeZy|vUOmp*o7ZZJ@c3$4^x@(~ZIVQq@P{0Ec~{-*f*{gh#H-61rG*>r z-V}3<#B@2&^m^(${;3*0(I$!<8L}9_Ukng^f6FU^1Z+!6bMrE3ouq|Wj#T*EFbg=R zaxm)DV9tajT*t`1>gbj@s-mu94CQr8FOrT4qP8tx9^==Ehv*g9!%v4>neTCJIgHiKp4CZiPCcdr6%pS3fJKbQZ|rKb3405%8}QPQ*Da(g#J2j z`R*5&6?7x-v`KLR+FGo7|^6g@L`>&mURYE6O0;O|4fGJoJj9rYsY7P$8so%?KJ z{YBGYR)=N1Oj`EUwejm=kKYMzB6YPbf081^y{tX$`*w_1x#U&-uypsb+h5>*OrxI` zls}Y0J0wp%nyVVT+r~c!ZLO~^<_QVy3{W!3WrWEz2W2&eSL$a|Q9G^9y+?bYY`m0@ zy-R>cJtH^NiBp8zWQK0-wSUN`5DT8f25i}6k1UomO~sp(wqMPl7a1M@154LuepKx= z-gH3@-)B_3F|sPsfHWoqx&m!CTx$}|YBbYvAMGAtc=x3tX`!>CN67wfe;;7U=}59% zUM+W>6CKWpv-Uc%ha;JR*BQ0+see3PES)MORz?PwR+Q^!w_nZkMnWP}&C2~+&2IcL ziu!^0#cTUh+yf8!VVmn}j#ny%-DqE|`P`_cm-EA6(0?Ks(m;i~SktsRTh|`g6X`xf z`YTi2cDgYef`;M#?_Jb?22WA^;9QkItM0%0jw$taj)0h3N&RANp>)e3Is%YysnXjac!84kU=Z9!f_8P-Ad|Pk+FgVZ z-WZ@k09>WAzaAXB@>4y=n=FA+DG^@M#E9=eI3fV!@lv~Lu?1#|=dR-{&3Nw-M6D$X zdSWV%ZVarOxb>6(FTeqj6?8xn1|=H<6?9`rt#vl_48@o!+K7ZE!hq08k%G+#8QBoJ z7%Y`MCMHLU=o=xl03ne6td$7-I4a>_%n{J!l89Kvn@G4r`EFdr`in^1cg6 zUV32-m?rjg`ILm*@Kc+*JyY*TKhMgM)06W));`02+A-j5$jRG#OzNLSu6~d}mb7z0 zKj0t`hc&eJygmGr#FVc+CSo%0bN)%wx76Hu-c9!>ZxNm04!2(@aNXWF*<$xVCW)Ck zvF9gG4&?1R@LsFXyXl9F;=a(BZWi8#-EQxYf?YP#|K`rFsjIkgWvsN-6y5MoNlmPe zj2i7DoLYOpXPE2ZD|fHMX)ER|-^1~orLeG<#E+}|`lt4$J)7pUNNK6Qj&YHQ6xqBG z9CC2EQCgKcd>j4aVR^>0aB0ogF`vlob`S z;{-BY>WB5`_4T!yXbFBuK#Gl^tPOH8{UtW+lxX7zuOZ!QIVi#6Ywo$bhpiA z-?8ZL0F|Y0Q9;LqKoQ1F8AU}c!-nxpeVuL}|2L+X$It)SJYU>PmFnp%s<)nC@6{X#I93B z-HXzY$n`aJZM$>y)h-sx!J3esC&TY8?LU!ziR>DvBX(vJP zoZ>{dn*HQZ!Gti9`C!^fJ$D#*OO9Lv0)7uveboETH>#&+x?5r0A7^NhT`Ws={-oyM|^jF5#d;9*v~95W9-CpJCxyS%A|m-mP`xme3HN3rGQsSzc2 zqIFB^ufXY91wC!Pf6`Dgkf3)Hb~WbqIZ8(F#Ic&Shwk1QDx6T1mv|Q`lKwS_AQSf8 z_k;WD_c>f`klY)pzpk=IB795#CzE8vt2lA!Rb0BpFD9tLd?&OMZ9RB154kYSw$14@ z{X$o#r_ZT27fz(?WnuNAgKC7h+9%z|do7R?nW2H^J1YV#GrLpq`7|skWqU4GE!WmO zY=<&pM*{zdCHL4_3-kd!8MPPWyFYfrkMDJpPayO#uo0tOn!B0Hw6D4!S6Zcc;5i;HM&z2sKU>IY+NJjBH4GhXN|D88`n0S9WI zdM~gWKFcDu)5#HE?e@1YWj`)=Rj0E2lJ8NUIUT*K1bc9%cn#?AzC1Uuw@HxG<@9FV z=?SA3(H=DlrtHU4%g=>~qt-`{p?5xv9MYjav5}f3-4^ zT<$8=k9_`t%ZNs%XFiIq>t24^{W(Ce^5xVo=hrVNT!d`0Rw-$Gq%p!ln9V{SI zB{R2K1umh%*oWlHy|8z#c*V}#^RDu2gb%}Xm9@km{$YX5ZZlEcDIW?{8A>nfa;K>T!p zyP165gqdir#=zcLR7uEkGa{HKQRG=O#oZXDZ&P}kpQxbZp2pyX+&!6ZOkP}2eF_oO z%KkY`HJJ9z2w$C}o)49nqM7Z^`04#zo^r8^(nZs0(JmRJ7Ln6I&R0<^15#5F;Wyh? zu+l3W#N)*@Z5xB-R|_14w_MeaaJA)mU}EGt_m3rUQ{z5DS|{LpmBWRnoR78&?p5px z@R+X>A9flP4$A6D69}%R80jFi{kWKD8Q<}|_CT1}&6I)GT@x`ndGgY{aQRsJ@DHJE z`9Gf`CEq^BhDKbp4oQ1cZ@MRhIr1RWR+sZ}M?^_%sVRo~HIky8SUzZwkt$5!9JB)l z{c^KKSq`*UK3&pqP503Oxd?4mRRs--h-8X!XU2aXI2`K}QtPVHGwdZNy2Rns)FCF$e?ZBx!Em?Q zCx@XPAc0fvtPyl30a@;4+0EamhQVIns>LGv{g`;Dg~8sKwUPy;|4OrGAM*w5Nk+;V zaICe}I>5?v-&1wBYtPOcpk8N`XD|2e#qIvkC7JZ3$yh7ln)u$6?Owy(6R5yD&pB4z zaMEnsdx5ER@_GU>@a>jN#xKwj9BjNy2lccg7~&~_>4`XW?~;x`V*=u z=$|kh+S{E!``CK)1|SQRErrOa8Pe~r0YCX2m5jbGF{X??J{$D#7N~bZ{JS`LYFOF9 zo)v&|MfSY09TEaNaMFm$HQeN|xp70g{79j0VV|b}Xu7cd!yRaAj)V}dEO)8tLDK0c zsfAGR=i|E>$col&;+w-5_s8|K4@EV^mo`{+O{hL()5)c~tk4M6k%m0Q??e-)4eHALhO5U4=@ciz>`I_;{ zLP4Zl{^Z~v9V41G>0WKPx5QaB^|Oz1T{jvr8l&Z>J~Fuhp>G;~dOf2A(q9Fn1jdpj zrS8TvGTqT5sUmre8QU*vSI|veKt~2fGyjKPA$IK**LtK4 zIRPi;y~u6&xGa1EU^178g`4J)5Y6hP0dnHysMUrcmNt}Et)hJTOON(E@4*iN&7^6} zCPM2j$(!7$@P_>ez4a~I=?KeX>s!w2iWP)x)ae)a?lyLO17wKplR^j5?hH`&@@>JT zAct%O>=+TVu|AjkWW4+z|H%aQcz`a2xRpTkMqP;Yjdps;_p7ljn=UF=zqJPNYzoQ$ zo?KOl*r6)UYrc+BX?o$#1xc34=`TU<&gNb+)7~(3S6PUMCk0@q<#}B*R^)4IBADAt ze$~X17k3Qy8IZ6*c%yCU3ZLV{LrHOvw`Uy%LU@0w z64)Xhy+A-k4qos#1S83mKpSqKQ(#<)TD{$9EatOpuZhUWa2U!$u$-CU<{XvlHsXO- z;C?nC(5kY2Z$kwOQYSVO~zHA-u3p!yd_;rnXY z3iUyh*&&vze3GA)+IiQ#w+*gA;u#ko+5`_L3Ji~N#Xd^HaT>&*2;ZGoc=K+yzWOI` ze>B@(GC)C4&jsFc@o#$X$HKC2eI31c_Log%!tf411Mh779rzkRziNb`YBc))Sd?Wv zZrvSH=qNAe9s--c)M}a$pL>&=@2hX8%A4;$dx^G#<@ghAHnFHT?Wb^2bolw1LUL^r zhQF^Wf488t`Ax5_4&!9Gh9C&NW!rc1?FV1dLbY1Sw`+HunrDl=Io;tucq7Ky1s&Qi zMVdy-3>6i3F@RokW2OXN;Dg%0Pky;|s9}rr+f<^tyULc+)Rh;{O7IfS_7axFOmD>Y zSkfmYwe_rK7pw&YUjq+~&2k4M7b9-@@hLF`k&2Vjx%mtX8S_Y7lRD`sP6&*s>LGL* zN}Tgjx(NGh_;|@T-cdUayFsZk#&wrE$5vh(J9QYF&YWZnT@`d_aCY~OR8u4iZ=4h! zddKk@$+K*!r?!!@C&v`*YmD!1T&&5R_8jp`8L)}&n|-_hYkQvP@~C{g;&5Sz`ykF@ zj+Bjr_>$9WJ*$YSY!M%?Zk7?|FAE|k$=0)pxo7<$sEs|{pidj;?+Dv6LApAJ2M#wT zB=8RM50IWj&`jN}(Ee%3N2wd9eap2Bbh5H%g@x(~HU>!1O&k_7bEG5^0xAeWmX~)7 zoW~ef<$0q_M>9A)=5^Q3z_Qu+fGIZSosLU9okp+Eh|eac-5J9AyCNtE)SSCB#+U_z znE6~$HRcx(&qa$GweX5AHEjGxoXpqR^yP`^8*P27tVZXYCEtegN~xboL%YmNPDk0+ zJ~kbB`r*Dp(yPsl*|8hC##$A0ufO)xM=AKYhu?YL>Xk{h+5h3#uG748!5<|W#;*}m zhv`opBOk-{j0#hGF!MDuZVG<@fy8bt=gcD?_n&997ltU!8nl)=bZBGl7G zHYOJ**l6^hz^E?RR4}!>z*2yHa}ArtO(E_B6mR73i7jnV0PP!^;`c6F>NoC$`PlphttO4>`>MDMr3NtmRWoPLibh`jQDb&GyEwo4L|Rq8s<0gwjp4nWHlb$T|IKTpUo?mL5J7@Si#3@7 zZv9lTvYts6buz5Ld?sLJ+Qsb8nc*s(f`SH6x-;Y=GEqvK7eZcMEMnS(2qVJTdidl8 zx0{*NzE#eUPkDxk;WVBYJ(QEaxq!RKRlpFJg>AVXWV(|sY{(hU^ZAKq0gy=18rngZo|1+8R zHvq};vP$Qq*;E=HE%7~>XAF6T)>z!B4sg#oh4%IBodC>5Hx1MkG-JnRG?b;*rEeNZe+8!1*%4@Xa_i!xmG*$iA9 zf=yGzmt+;3E$A21S3bdgnc}INZ%Es`|7vq-L5U9oSQAdwM;-Bsu1O!iMYKIL_jwH% zs_8GW=vCgstP5z?ZY0)N{p|Iv0+}kf^lk2+`k*(|FnecJgzIAazB|;nPP#X4=y$$x z_`UE^2zjGaC`#P#MtFdI&E>-^6g;gF@iih4!B9X&WVyqebld|Bo`n>pr5wd*A6KmxCE#Dn^F4%#0i^{{q1 zPBflue%8tnA!<1ZIKj`TESEY`&y}62b6rH#?^VcO38=jz{CfoA7MKfF`x~nHqRJ%> z1C;n)kxk&zw(|GVI2}a}loT8CJaFoC6h2XZ>avg3wTF@{68FPr908J^S0KQF(vmUc z=}-eh-NcV{8x7&q&-tmaWFZAbJ=}5CK=ryX;|>8|(pN)W!eoFe zZZ0KZJpL9HW($ogEB>joDsr~VK1pEcmiwEe9TV!ir3jU{6ALmfx|gQ?TJ>Xu8TT z7`usP)}nX&Z4sKW%jwpp(J+WPZ^~zHqMOu?+cgb3mpt~64qqvha8M7~6mKOqBJZ(c zYWx$^jpSHSfXNFtfJ7T+PSu_)_JzwKv$IKrY4`S}8^ghj0y`~qA&Kik6-7mu_(s$QKw&vYe8P+u#&fsopcU>ck)Ct5{J`pZrc zOZO(>Zqy$dFIE}o#)R)ks6uXQb+|fq4fPk%VLl!xd04~QqLhmue+0~Ttx(D7UKRQ3 z2Aamqgo@&{s`W3tc>nhju~AyB0RcTt?>MGq)Zy@4e(Qb^z6p#eaAF5r1Hf+$G-(0rv65e!TUvYlPvDNp5EiOc>6~9@_O1eu<6B7E zBU4$9O9Q(x0AtM8$|ON-=&e^_YSe1WnZ^e$q84jFcmL8*?x!fb?D_HqzDoaq76HB_ zOw?Q8e3n{g`Va~z9|3rEvN5E%qUbXC160t#Qbyw{rg%jj9cZDf)~}el-HKKKS!yLi zBCR4Hd4(zNXzGyM>gVyelhC}}PBSho%d|WFh4T6Lz~wph5Y0Uvdr&eyO+I*n7Uy3% z3k1*qN7N-|qCh@agqX*PiiBJhRP`d?^@~zs8FrOO;1H}?gh9^PhJ+X(2e=@F6Sx&s zA~ZF6#Lx_3{4xw8m9bcTp^b}(6kQv9rdFNR(5BUDhw})!_7q3_{sVC?mvez|t(NBnA)eO?p2gbm)&iI!}x*2I9)w-pEqG@N+jF)?pMnuajUH{^%6Y$vC zW8Dqks9I*)yu1i*!{ho)y@+ccm78^$4=ajNVJ|G7X(utqG;#yK$YCL65-a&rLm_6@+iUGQEj3yaig|5SdYqyaErW$3&W zbKTeAGwBnAoma5H%tB;fKt)ocH40<>5YA|ga`tk!vGQ{XdP6DHBbWlpAafI!gcab& zX@Iq7$>~wz`!C)%8xO{?e3mKa!y)LkE+YNIAiX5jPyC;U zM@u6`Iu*_DULls2Y9f2`pw&Y&Cr7O7bT2a z_h#kX!>WR%9Mf=ZHW6-Pn5;sdn{t*?-Hn0So2dCqgO8TM5x+QVMYYH*E2_QzthqW( zl%;dFBr9ovQ^AqQWq`A9-S4+yKw(IVy_|ll_|zbK!jxVfb5~)g!mEuGnvZ263ooRD z!x52RNa~O9Zs$V$%v05Nk|iiUSXqUkYy0%nlju?N)mrxu*9BD)sJQdfgm)GuCkLZ~ zFMuUGx4d5Zif%@aJW?0j$Tz|L;%phnnP~M(cU75%KcCmXaWZ=w-Aq{2!(q#MP0jW0 zzZGamv8?!*M5b`aL0PbQq@g(dNz2t|hqgNVZZR@ATCFx6&~0BGD+TE;%waSzAVyF!)<4VSUfIcSHE@~i ztKX`+F2aJ*TTXO}GAm(HWJI&J;6==aEB_>??JVw=`;F^bXK9cA*6yv#&+FVfGC0?I z`X3&j?7a}Rz*G~f-a;w(xcu=8rQv@NuYZBn2&B$)UaLAN4F(E`^#TJazjd+SX$(1% zv&rWHCa>~LL0i_#6-rHf;y)&=>q`T^Q`5ikkZ2I>2AnRg7V2xoU1^KxV*!LYzYwiE z2fDcGca@$6ou=GSU^74=OLfF`4;Bdt02L9C9HXSDcS)&&nnnPb$d_6NOt@?70q=za zX@KjxEK<VZy&of-s1c!!c_O~$<&zj&uVp*lIQ zIiP`G{!Mj*(b<0`1oDwUL&24*USK=&3i~j-h$(H2ed99=LbJ3C;TZ8G=T9~UI6RThZOf2#(n^r2$OYeP>J7gaq3ho(={LP{Uwpn zve43Wp7JeE*`JH1j&~USF{GoVQneF9tzxKS+=uPTCavz$!xUQkiK*J%#-tG{ciMZu z5MoxVda6+Zt26Bu32|1dKE^(4?FIS|ccwhWD^emQqv9GZ2_KD=65n^=j!BWFpWiGU zWp&^#iTCWm(HfknH<=*F>Gy_pfhikf$S12d(o2G>8m~XO_5h#q7E=~Fg0g*3r6qSGE$ zL~M%L-Y*^!TSCOgpxx!|Ie{NxK&CGknsNqXC41xm|7@0rPc`PWRLEa+^i&dDLz{xG z0#ATMR`v1C0h?xgl+{7A(V>E}m?6SKEl)akJqr>*D}aJq6CADwWLWMT+(lZ(IUAN7 z6C)c-_wLLqYEm5#5)QEJ7GN)z8;a#pXV2K!I7^Opw|ZlNG}p}Y54`zd?yODlo2_vt z;{z@mC8V8CdcqzL|5O=J4!J|W%~Ogo)s-pAi00c(`326`o}Czd7ja8w?y4a_(4+Z~ z*8USotS<5qznxQGpbOV60N#T*8TpqZ61R3lusLCBy7}s(xk&^rPbyHItXK@XTLQ$y*2xZTj#>lG ze{eCyG2v z%AvfFHLzP;QU{(nQYR0@kgSL5(2L@>x)@3|0jqNniv){(;MoLh^=9xwDv5NoMuLe1 z#ukoR|4^DC-W?o1Kidc9TER!<3O!cv?CxCxbD;KYw?srGG;?w;5~w?rux^cp@XgN4v^F79Yj_)ONKY%cVJ zg%nFS|EchH^;Q39g3_ek2ZL*EJ&C+^)fmA0=G+!>Z26O&)EDHcc#_yf-3t`>pe?}c z)ZYvh7;LzUgZWV!NYrUXh9hsuw|mYkkW!A>@#Rxtzy*ldfXMBoIC;b*%_&IbLIB-ZEkYdl27SHU(<4?wrg1kN>ve*CF@_)_D^kF0;z1bDa4yBu93|Fo zVTRSlQ&CI;M|ADwGqKY7B~UnNmYi_=c9K{wgRkB}bWS&k^gqZ5avYFhTpz2uRZ}j) z#py;CA5nXEknvf2(>z)zGFHeQzL zUa4Dn8P;1xfQ;g2Br5HzQ&G|l&<*)U$eJ-4qR(WHlpe>?RDYpnH;d#L)t^Bm<%&|? zf2&Fwkb?(_O_f&aL$eBwGJ{4n@IGh}C>cYRP(!{uO=);9NRfbiqM(8WSG@x1fNx(? zeOUr2?$Lxzw40(j3T^a;H%RrqOQO&U(l65klW?NOEZw}6yw1hW!+~A0d#9!4Rn^C? zrS28kK7JW@Qq8q-XQih2&ibNpXtcvs&^=+I#*g`c@)_e=hY=UCMKYgB)`35ch0$pn zMOoyxG-4vp@k`_dS!u2sH+tiy*LTh&bVvjpG8<2Fjo*1+t5@XCZ;i^>w;!@z6_4J9A>nL5=AmI0U=lWRw`4Zrf z?}IiE&;cLLzgs}oDP*|QVD(T9iD_}cu@mr1zJG>&)$8x^ug>sSZvYu9=&eBEVLjpn zOFk$Ci_2XgG^(#>)KoxdJPJbN{&2GiN0UsDIFbu!K&J%=hj|P)S1o6h+Cl!lOS90b zq`~3)@7Ryrsn%Y|E@V2A5t7rPThpu4{dA0Vev=(j0z^kIN31PPovK1$(E3GB4i2xr zUji%j;+&e#6g)zMDJE6Y#On2LJew1%AK#-x1BFxjbGLxW#-onV5i z=CP^Xj3`q7(d{l(Bye+2{fwv~Q`ri5 zqn*uI7R)e422-8YKaBf(FkJ0mAhlGd(vt$TJ z#Aup+>EJPYwc1awBlU)A^_0)41BGn#`x7?%t`tuWI1uR%5TPigsl1#V2Vt}oU1AUB zK!!Sdpmb0vH#PcuWFQTyUAIG%u-{R37}I( z!_xoTXTu^G{sb#5VAsUjmJg;Ry*{I)He3^2%>^0UtEygW!#tYPT7QQ!%j>~5Ol89o zsV7SU0gPLskmrr)aT)TFisi!zm9gx8d#23L4@4nk^;_Kylqh{XBINoSp&``SUH91lZKEa0Q=b?*VfThy` zd)UDta3d(xlDt16`Zl9?{5s?U7ouE3Kh8Fksvc4v1*@PCKmpX4>B?cj)B(1aM!&s8 z`tnEEf)=7Kt-c3qupJgk58Z?}8iPH_+Y*QqZ#fajsP>zc05df@vfw<*@EKw!-!oAf zVxOs?>BG4jG0))aN*VWEM_$5Zg#6xJ6D+`#)YI1^PM|ib{SgPqJ}Hd6;%kU`owWZ< z$#tHD@SB4IeRuSp=?&01R7%!IBcagd8D`UZ%+#JoCyVV-dLCuuLh()*f)KwA)l2zEM5S?ZW!QN0;#R8wwJ4HhjU5zx+oT{EIyQC42q{ zj(j}O05VI?cVsa5V(&KdY1D%g>d&) zV|A6^T;cQiB@_%)Y1a4S5+LnGIrx5yb!AhTc?wKpk*=D16e=eyS6uVyUZhNAojsXV zkq0u4ZNW06;nkn?vdxWZqQ}Qs1s8XNu8@4wh3+|?(Z4Qw{EfFs*WY`$^9Bpo0N*k) zB**Ek6Uh0T-r6m&``3Z{f9f8fQ}fMvpNVcx8u$R@GoEKe1gd*TU*5BoZhZ8I&-e{^gpdO|DG)>1o|1xI&)A6507|2g0m> z2K1~8Gy^9Zrp@W}KC)=i0uIRJkyiY7rkEq8VQL!7lwh0_c9kO^871A_xO97o6ge?A zm@K2NB*=jyKsObN-5*-;k4fSF*dMDfNFXTVm7(Ya+gAmpLJ8$(QZswD?D){DH}msy zIG|$TQJ4vB4@i{*2w&oxEDrC3(Rm{aCHPDQfYICUK;!$^nuAdY;u!c-%<8U zP0E{B>gYs`nr4AgD*c*-xcuF;knLYkQp@}v8!z_}+lQZr2k+o3(8Ec;Z1^T6*Kv7Y z4!*!VAr=OoMVL197_SE+;3k#7a|i~OK)^$`k<<)p@Jc8J%5G|AoknJ>I5NdYsnm$2643C%8WpbuE>D&<8-y9XS}4Sm>7Jdh($$-I7ZiqkI@z71HPShIyRBUurENJ z?gCegYJxFVq(Pj)FEa$ID5kx^Xd(`s<;w(0#UoW1kvL0vI@@akjJdH+Xf?Rgif}hqkh79^3N;iyqFfwy#aiwZ_NxcM+Hpf zRm3(-5-RcXkzm>{=%?@XzS;35b)JK;DNEh1n;nHYpe}i%XvKt2j(rHG*H8sG3W5f* zzaXt~zzuvgt}FX=q}3#C3|tkb2_ZL1-xpNJ38Lyfis?wxdaW;`RrS z@5Y_@-lQ$rfRFjPC-#r46>TGmb{zh52miF`caopo1!9skybE^zN-+Ur-n#i`{n>K2%fgolSoJ_hW{=XU(f z4sWs^Z+^opJ3foiUL(F-*;PlbdVEFX5kIxH@iV@qf?;4)a<)u` zpu=vC8bryV>cy7U(Q%90tWrSI6F_~Y?3z7)E#T_H?vfAxiWC2P*%x0?sB}A;8FjZ) z(h46{;pBd-!`Rm*DN554o3Hysz-Rj1;+}UBoz61%0_YbHXroeg+oI~YeV0 zY@q{U0T$gAjBq5PAPaF^-%w$H00ZK5P^V?fzv)*AthCvxk24D7P@^WhN?$RpzQNBF zl0W7_^yF33yo8;z;aJ7N4{(tq1#ujydhY!;4dN1naoZG?(xvES8{!`Ak+e#4fL2`% z)68D_`Lm~&ZsM9AEE2Y&<%TWm5`6JHU*fbd>}NWlO|cDjnnbe+&|YWEn}HbvCU(!2 zb+-BQz4Uz@=d;=d?_yQ!FddbN@Y>{^i64pzPaMrd>`}ZtK%?*8&KWB_(j=sWTkNvi zsCoZkM@cW&(W&fv3-a@>duQ?wzbyT+%^uHTSluWBt?%N_WpS4u`JT8A#r?IJy<**Q zBs9*4PGIt=kL*en-Kt?qIu=sZ>;i2t6e}~e5H9=G!$Vz6@+!)Y2W#bj`cc4h`+5P+ zt%$!J`A80ZlMjqbH(cd=u1x9%7xbsFb9FSZRADmptYDA`Gm69-nVMprXPMh}!%$$q z1!kFMpz?b5a#y)Ta058dP-Y4fZnCM16OlOK--?8aMH?GjC}F8D{*kpPo=JCasc2Pb zpTlE!w}xDK($ly1n-|np_*$?-dr$oE{3C(Xcb`V>>un$HITS4eZ4-gfSg{dk{6va8 zf2KgeWh^!B<4e93{KKYHy>xr8=nrfwAcgZ%_mC6}u3-8G5|eu3J|$63D*MBo`waC*?nZi3_71E`kjucX}mI&0h`~%Uhdk|0~2T3aIy(gNp zn;wl_8?$kY>%XB)^jUt*n|*QQ8*5%g>LUm@V8w?}ry<+&W6x1Kp+nbEopnE&Lk#c7 zf86IQE2T?Z=#0-si{r$9yRDjo-qW|~=%MQaUB+oCXuodL`RZTe+3$>tt|Bu*!Ag(U zRHT|a>LP3O^U=k%Z84e*@~oJ=J>JS9HBFYY1X=fj0TCZ$LLQ<cXxUjF3t z6qb;xhM}agK))K83!%!2q?JDdxn}+$ar*XMTy zek*#KLF6ri`NrGWm}}!~X!jyFb2P`P6_?%$Dt-#zj5h2?JxJJNk-L>ocm~fz5TbMi zisji==go5~%WB7NzBXQ3J6C93x#hK;EF-O7Saf$3J+z*1#M}mBvT+g>OH3U`o=tER zHfAp4&{t=C|D%0e4N)B9#-{f9Rk$i)n^crD99>AhmKIgZfWpgzI7g(XdBy#=jdwg( zVsG3f3fZU!rhC%=yT1Yz>)(5ux-|4%cf5)S=wbhv@h>(i+QLk0Ez-|n*`)_R%Vt!o ze#C!Zes2{8Hd_IXW`85v7?qPF)*svi$_^$a zK*ClsOYa~yCvYL*4c3NXX>hGGgC&}S+Ai)#xjGyfH5YKZdTi&^ zZXnn+@2D|gi>)_zPuD;J9*Q%?4K5@Y@1~uuJK@%T`}eO;keh8+^N(YFZl}Z&bYE~b zuN%9H4Ol z!YmsiENna8z3Y}Q@gfF|CT~kD6l`gPY*D%~cK$_NbXp?Majz9e@7gODUP5xs`j}j6 z(}V1TJNGQV;ePQjHm@m*cULj!pmSbZd$~rZ#e1S^%k|NX9J%LGJr+B)CA!(O=5``Z zgTC)Q+aof7QDD}GW{2V`(egqhwbbgrbO(jx^O<}NOpi0^u^R;?&yr5cbeWaN$7o)O zFBo1ewFsFhG0Uo_#**y>Dwp!;7f07cv6BNd8BlF>e-0~_D73>n=b&a(7e~WN!lHD* z6I%&Xq-s_SJ+3KMw^ZGW#q9^m`_iSd1a*@ckx+pv38lg$3080uC!xgED;NRAX)npb zV>&;e;{WM>jvM~$b_S7m5KD8l;$)uPVP%&_K3I}?opJP6s5?`fi7cFGI%i~JwTYrw z`Zp$252`R3FUWi(!A9Ls0{_?Ft!?NS$DyKKwP;*yGZgVoMtkxuEDZ;;ku)@7{VlO2 z?Xb(Hx@B&u%P`$KlPb%%~7V7rUGEgvPOtIpPH>Jurv30~9kWxGo2Q7v)a#*s;8;wJXszk0N)x$qKt>Ie^(38i6+l z_Ogm42r^M3U7RPkU-xH!3ap{PA_vi(`~3vKL4f<8iN@ko!nw<{`Cdeu z|5jaO#Qa!!PaQ4WvE8ab^*-SVZy=(2u$}Xj>`6BVoEclGJ^Nc|GdV{|h zJcMu=_<)QT>1^mdfurXk%bdZqc-BStCQnV#bt3qVrSR#|S@Gvx4Cz+f%8fUrYr=l@ za3Y!T=d(h9pvn?)D?TG1glg~bC%Q|atz!sQ8WF>xM&nkQI(hhI9+rS{(sE^2RJ^_` zF}TDekeZI>gla2vbVRMfx|&0{b*mgh@r-o{i*9lzRZit8E#+-CG=9oY0YfZMi0yNl z{}qb={mI2ukJ7CmOGQma=%oDNlj`elgo>d2%G2VS$Y-9epft}ZL3qnIn2wVR!(=8~KP2OtE>>RlV3Vmf<@F*Z01R(;!WzeW)1$Lq6bftRrLz(rH?Ln^N`86tlGD?1 zlcMbxrcYz~E>43A{7X|rjF!{Zcg3MVH(VsSFfF&7=JD69&n3qPtdj|S+sL#7tvZ)oBpVXe0Hd8TfjZZcxk$7??rcJNR$-2{RMH8}Dc|+R=HtBK_4_Z`VyN_wl_iTF` z4blff6J&dz*+QL=hp&HlhMxeEdlj%9aE~Mxq#o2}V5nwU z359knFrh)JF79y{2Z3gfL=eb04S{SAW!0K$C~Nf*L0gShYDA7kQSn>f`&~-hK0#Yi z`5{as2Ih3Ni|TiSZP1e3!dc^iWt_z)uuo8W|`Dg7bIWO4B>(9tDYSssn8k*-40eJKkyQ7x~^cPpftvT!%h| z*dOIs0$)plDT@uj9C`3@sn|c>ubsYgdw<89M~oRZ~4vG8!m3Oa3hpHL}h_4*)mR_)KpPZ&E* zf~PD~wkFdSc~~SwRyTHDdl3GUPNjCvUFj9v2-}=VWifSD01zAjysl|c9#T8mZPU?O z@KPhTHPfi3Xsh>fZS5G)B1Xg8(a(96Y9#YiA_g-CCd}ISdeZ;{#%}-s*nG)F52Z}Pfaq$-Y|9k9Tu8cRi zGZ{ER^v~p`0}V!TeO|XyX~$TRNxb@43QJB>i1tFUn20hc=6IZ@;sR0jAB07&L7 z(Be?lKUJ(}kU4;LI<>$8j?efN4lc3>hXH%=b}D(tbwSzUYe?QV6!d!M zg(<>6P>Wk;HlUoCXNG@U;ppXBhYgbh%k9GBXT?u8oGvPiygXAX)-}oL)A`Kv)FYkK zM(d|!%9bc#O}^OSv|mfjt&f26x#$r<)bD>;mbLw^qnef0eqgYXr3K~7e;w5c-0=bo z^s=&!{7>oiY30F$CIki(QwOR{op3Gc+&Rb{QxelxHrnjG!MutG-D)v+`A+;^HMh!; zap}QVG1%g#drz`IdfQq30^w{gJ~6Bckyf9A!`58VR`g4>sE|h|sdZ1K9!w()95k5t zut93+)0eu>q2`F z_KOTMOxLYk<270~5+?kv?&TkV;d#ESYWl!NnV{N600Xzxa=OyrNm|lg8hmY?h%!Rk zn=>Z~x8x&!1+lIfm~Xvx70xwo5+Up;@5s1iX*{|vW;o%5Xr~p7Gb;$C{19hDUGN_Q zPWMs{b*mnfT^!r&UxhVPDRL z2HTEJ6kV(pc6%pg@Y9qZ{uz&T#9&hF`{X=NUUOYZi6Rx2Nvp?WxK^;mh7d{Ng^(0jsK3D_K8UQ&g#FXg);Bn@ zp+}nJ{PIq`_J)^tq&gE1b{JV76uu5TO8F!}|I?*eJm1vTAq!S|Y>klQIzx|DUT(smm2TC}-r0gd)j9Twl zcE4NE#vDY;_))_0@d~SXO3pk8K+NX&)ykyDwVl2@S-VM9mhqH=%$Lo^@ENFk!hvt4 zQfJ61Bm)UwS?0Wmz-fPZ_Y zOKr%^M1knWQTU=;Zt3EV_5#h}M=5j9`Xst%bY@Ap@=Aj;*YkJb&Wxkoz6n;a>S{9X z>4nwGu4oCTT4nk>{p7zT)^?@vP%Rpr_28WV?pAY?wM}bQT2A|^K+*Z@m3k|uj6?tH z&jK|vQ+4-5V5lSGW=z_YgjloOoA-L_XSSb=Zne#E;4$!rRN)=o2Gj{QY@p zs3|`Q50c$6D+ChR%}CV%X|S}g$^(z2FJe0XML+-hHQ>6{BQxa_{@_U@UjhnlsP@t$ zrDf$#?L+Z)a@xrkqSd*Jak#|;ai|zM%$(4G)}pQ_z|`D1*Zsi5=ief+oW~$QQ4{{+ zn*>I+v6L7U(SN)FGExOPp)BMidW6iYAi@7WAEubF0ksL$5>?ws_{M>W8 z#%xy_H1@Pc^tSGh6E}j@A_T3aBAt&8xFxmx<7Sj|v7h|EEf5t}*UM$AW{|(FK3KVQ znXIV0_wsh{^{qIye!FI^@i^x+w*|-*)Y$GTZ;BPafZqC@o-8Sy7Z8Omjps$=Bs)hr ztvP?IT*bVQ9C1B|`KE`y*X&eB?5Dt0`@{1h>UKD-JGWzLT?4cIGv$Qm{NwM%Y=s1Q z-LBn(T6|Wyjb6p+P$zA~vd)@XWRDXzI@ZKFgx2(4rLfu5FL8SPIFWKSbj+klauCy6 zdn!79rYt4^H6H!ju{`+1kyTD-SmM2~rcigi=6w-PX8ePU$H&6PBi{Udspb9Ou_Qus ziV8$qITc?kuZY?2^D=42laW4+%=f;jsQPk>_hIpjI9nXSH8Z0`mi#PM<|=qGTbyN) z9`Y`DqHm{dN1_Px>?8uZ$|_zXF2ANTSJG}Ng|^%1OtQp#43y|am(?owMLyQnO?yH0 z>$G5TKk^BFFL6t~SU}pk4HRU7iU!dg`9xp-w_wY0;cLoJ29kTWR9LPf&Qi{4^JpgA zBwF(h!q-Nm%00l;zSGEfccQ;%e}NZ%izB9ypCg~#Pk}r55`d-|C86S-{oKZ*oZjFFgGmq5W1`)9xR+cGUCXXCkcwN8>fNx>vG_F8rAwj7 zv-c`f1 zK2OnNs!xe;A1)f~Uz$xEsK2YwDhf&2ZPe!SD{yCj-Jzu=-&H@g{w=3TR;$~;KL8K# zpb>}1N1-^@zKFk)=E_bKAt`?+j20h5J-0>Qz74XJ_*T+z92 z7eWS&U@@}HZMP@9*~Z)^_9Bpqnjsq~qClCA523(U>XiJ|k;W?^lv?h3bYpeARbEO_ zfw^aI871(2*!VO*agj%+oZsEmwbIgd;{m;WAijA};5Yn6lJc>1gan7Sob)N%6U$H@ z+TbL5ra6Jr6{lDG*H@Yda&wDA@RxOrnlFk9 z$nz`IAA?xy0K%PJb9i>)1TOJ!HuLnSK&R$yiPI#d4=<3I-6^c9R5V+8bK1LctSjb7 zvyCd0>im-a`~GsBX}1|c`1DZak)9t0mnNm%p-PfNky&05o7$d7d)JHwR$f~oEXqzZ z#udePDy+?Q#@`wxVsjScdy?4SVT=R{jd-Z4PIV13P7KJ>N z6>N&Yp=!Spv$n2t<|)&5^CSB+<0sXmx*29xXU9@hJ-wdRo&a|FDIxj&y) zU!Mo70`;gY@(jVR9i))KeDE9BPxuX53Ir;=&8WIF>|MDHkU1fijS%les?={ zw3MlJLv=!Hq>#C%WW z%UDZnKm)6P-Kt|!<5h_G{u~;IdP&@6SBpHKPZS%Y080Z!xtp#ZCO@eg2q-LF^-y+A zxABk&vh7xP%aOwr_!3k4H3zzGFkjizU%?Xj*`{<-`vg18=7u&7Nso0jQ1aRr+s(sm z7pE7OmU(%*5*_$Z?H-ReIT1S_m51?^{Ucn@*|xPd;J4y5%=o({fNEcCz5Z>{0K`oK zd;?G@|Fg+k#H%GFh2r_1Fu|42^epIBVpp2)#ej7I)7Lw$fM>&wJ1T_D(IIbx<9JnR zqwNcie^P&6dRYI9CmZw^PCaM}U-3X4dya7rBDPAf%e`cGdZu>R9SbtM$@9mvq={#= z;1_C=gRbgIqOZOC)g2rm6~udgwkKLwo9$kOR4r++VrzCVF^x$w3SUV9gbacEJyj;t z0}V78U=^<#A!^H}Zg0$&I-x?PID`4g_b%V|Q9Q!n0w-&qYjaRB1{OsS7ie(kYTb!i zU0q7Q^>%LR!Pf5Xt+;f{3CRV?&1;*t9$KFrP^erqAk}HcdN?V_a!m1i10@E+*llO* z<(E;jzb9b3(>Bq}!`@?rtO;+gLC298Nj(9Kh{4VondF@|Xb?q3r;(+S#F?M+~ z^Ub=Wf3V(9Z%q1s4Khz2O48f4F11u!psbGUY87pY1q9Q&_XFfCfuh9f=Q*7<)uX%k z8GWAfG8s7-dhg#PR^aVuNnMT_*LI7h$BWkERL5=Td*iYAPY5QOF=Lab9mv$q5&Z_zctulaKAQxGy_@XJhm>**{PP zR%CK^^IsL|F}|)-%;ir+;_h+}gfi=0G#|IAt0h%60LvbT1svX|%9W*&m~EW|)e@F9 z?IZotHwPYCk3`geRE0%0V}1#zO+Y`tkDczHWyto^GTmTmam#n?rvKkpv^}N!Hdr9F zcu_iIIsR-#tMXKfVSY(b^?PFZY#FZ}*cf=$(dYi35=~iAz2^sr2feMNi#5zQlzN+e zT^1yTf`bC*C77DrI#(j$uFkNTOdhoEC23{U#zqim zUHg*mCf|gp)&NtG8v38F<@9Z>&zLK8@$0ObA#>egWUXLHkO<%!mV_e;rtX|Plmygz zATD6>H-d3FA5o~?h~07qv;vw_DZuMoEPXqQXV4eUqG-KEq61$ z-rXhi_=aap>%WXLlZ5O$-nl`XtBhC9(c1V1SVRa6$YarbvN(Hysdn16cr(&L2BH_y z$hxCZT`!N{kqpvVoi-O5YFFsm4Twk;thQ}bv$tuDb19U2KH77i zK0U@Op!fP1+)kEr4h=tunYGlxYE0Y8_v}N$!ZGyFM`CIk9~W#m`Ta97`r8VB!^www z`_GiSpHtn-sJ@^IX1~?FW(R(Chhb3F4Q0#U>45`b5SN4orDtzbgAeNS!%<^YxA*Z5NffBJr7}TB82q&lFmWEq?9(chP zmL$9I9*KU|l4sY#()94R92Kah5Y2s@snPMci^=dz86wmfG+3LvK?W}qR6v%M!arS2 zA+$$@j2sGNnkO5|&W_i*HFS-tl}4HoC;(2C1dpaz1JPGvJQmNt^rwV+;f%=MeRi|( z&x#qr7Nsjv%u0*YaV&-q;3$M8I8!cNJB~Fjdv>Czar-t;+QW4YS31hfxBWpt8^a5K zPb7IvKFo5ld`jFMk6X6JNf)VNB2^s7DtHZWyZP_^8Bti}F-ar_`XeqdJwv$YXn^;- z|#enXFfSA^qZo_3OL3iW$R2I^e6re0i5B%b z1^ph3ICaF!A7z}7dJ+A#l_!r!<5uE>10QM86%+Eu5Q@&(xoQ5-nNYdb5qP=$Vw8tq z$+WUgSh>9W&iJsbu&IR!i=`xf`%b2eCy}5*uzrZdm4N>@onr^5z)E&a%ZlThKI2w- z{wP(@54(NWw)9lDyh}O91rZl|bA?^7HR5 z1&M619BSD$=AxxWEsZq;DC@MoY`b5yxH9hv@``xM6I$L#PqcsPXtI6Pfz262q&O3y z7yzG+W*pf$Xq(%>?%Ypg#UudA&T8<`XBt29&hXCZ5`} zYT$O{oY4M&o*%WB#Oc0U=!n^N@qD_ZPa@hfRo$fs$SJoRa0tT(U9EZfdmzu}p;XcN zuh@08>3Wib`Jb=CMFTRI3PXwWeVDN7Vkr^|3ktkpM{N%SK z>Twj7@Mgs1{UycgguCOL)JiRG=jm_vtn(UQ@OwC3dm}1kzo2fd@qvFJ?NGgmq6S&; zOMjuZk#^;htpvQWP~n`qER}l2Bxgxbg+|B~wt%?3a){zu3ajEB9$&6N6c1EB$HUd% zsdqMe|F{^N+>?7G>dDXWb7i|r6oiVlZ68?sH*^FY8q8?ar~hRbs`4gh`AZ*FkO2da zvZ`&Rty+4|fyYlwY!V=P(}ZPBEx6{bF67pg%YZD+tw^n6`a6&uUZ5W9H8p+T`3o8t zeeR5i)*ZTDMR9Y&>N2_z~Q@6yO8YW%85cJ7s2p0ZC`{Wqn6 z(eTf@z?;=7+DwZ$jpP2}`F1$Wq?$b}IYE;?j~uc)bwQGqHHL$Lj)N9E3A~P6~lSJr;qe zJ}WS>$@f9yDT648OZg;Sz4h?XP4j^XuQy8^h00J;L4q*YEHdFv|5oO`H2N;G$CLfn zrO>|iLuUK4b;??#ov~(*$KKHyp$_~V5~)jpyx@>dGvv)9oIsuhTOT0I6CNJT4cJxh zCxrRaY-G8C!-h6$$`>ziG9gFgB-%@R#gS6B#S{?ts2UyQ(M^TW-{CdES+oKscnFSF~gCg^U}Ym zDuZGbRVg33yUfxNYn@Qc_2 zlS71JxyVPZ>I5Im00O%B*}zHsGr5a*9nJ+Cz8&{uf}f?kK9dc1J<7ZTCs-9#g07GN z0f~i(#rxfl#c5{5)2}f2UN4X>D$SADyu>sqBrs%i8pDrz6y0TwfGtAeiK2KaXhJ+pFC#S?L zW@Md6IxbkE|GP%Z?69QrQMal<@w?vDlO(7VzOP2X^bM;i*A+GIBf4@Gp1RU>V4>{W zXpqQh)zW6`JYx*EEpxOJ9}r2juQCIykWWH<3SIfa(Dg^WOCavnVA)e%eQ(*LBwy2- zkS*@5jK(mjbAcA?D56~5-kCwUP*uDi5opjVb<3&y98jL}DBN(m=a}PW_TzQ19@1w?AhMvWd-xCDf>TzAmElqi04e<Dp_jj1~$p(flJY>dXWgk>Bw zKFy(=f5C<}yu6G2c%9%hBUlo6BJF7TGAoDYkJU#0V0Ff86x=c^xW}$pg42wCy0W6) z4vI$~*83^F`BS~bd?)?lFTH@=`RY@DP2d!Kt-n0;nGqi>)fj%seEf^jwbuJil3w)N z2|x1%6gMo1e08RH{RB<}1=7wkPh8*~)dwddJfoM}uMEjZK^xT-nXk8M6nK52-?O~p zM=yful-!^lG$eq=)ac&LpJVAXI_YV%*@d}AjuzElqfy1BOEKGNxkBGaU~=OcGB)th znxJQn?Gub+q@STsj3$;}y7DI}xO8lljzYYTD1I;{)9Rz7!N*f=+7-=BHF#Q3LmU_{ zKnD*fHkCb57Hpn=!C|(56IH%W;aW)n$Z%N}IhUvC;3ki>tZ^(=LFRSY*~eK8uuce_ z1d8o5BC+*!T-E{4f*IH{#qPZ1vvH`;x=VY@f1V6q%gMlL$?Jo{vfb{}Xn$?!Rpg>%|QI1b1F z7#9de*w34D@dqp34V84dR6JSfSbovZZo|o5s(xPAxzJ8?AXFX|N{$LKYOE>0&jGHi z{E2uYk!b@wb4IAR_}gXymY~vA(^M2hZ)@sD=QCR-myw=W?P8=V8r=4K>-!+0^<)d$!D051WF`3z6ozsRy7$UY~} zmf}jbT&R!yje5h9x@Jrh+gWTwhu>rQV?5W4rRg9y?5!*a z!tYHyy|yF$rAZyN*(tBKsf0Ef!?a;6w-W@86*6rd_;*yrPakzD?{*x@h!UO_7m1Z> zqLdwop@i&@=sPFMJ#NExaiF@;Mz||DmXRIgu-d*r1N||`enfqM*v!V_Tg1XkZ>f~K~*;~F{4Xrz1e9Tj9PM!17RKGj>Y&w-JPbc?xi zc50WRx6~Xpg;!XFC0FwPl|dK~Fi;KS)D`L^h@J9G=zB-Ep`i54X#VB_z9O1e)kQ<2 zA4y_;_8d${{dlKRTGcN;3k_Pqqn>^y1(h(LYy@e5b$dqsWT1fOtpqUBBY)iG&UwXWeaRI;hnkof5_ZDc;w;v~k_ z8J)KJuEGhO@Y`8W+?2H^paGmg(Ku7GXA{*m+JE!5V~2}|m?szBw%oQj_Dp172@f(aApxNRlnwR-`>j{gg}Y4eVyF0_uM_jZQxA_cmMBP`F~y&bbbhpY?~(h zO|d(@4^NOmp`Ap0)#=(c@V59y?5 z>C|CDbc3}5Nr}28r;4dY{pWnA{L=5d$C+X+(_NY0xL_kn75S1{jyG@h*m7{L6mJ?g zN%rv|``GE0vUQz2X9Q~Z>h+W_93;QM7OM>WW-2(gxP^a+W<+iGDp{gyQIdM*#6%k7 z5?nRhp!8J9_dGe-4%7&Y9}!V6#{Z(=AhJ-8H+il776t@FkI7QVdUZdzK>j zal>(I)_|lWH|m)2cmsBDVDts!kXJ{#gYWZ`TEAW~%2#?@>pU3=7hyTHXUN1>Lb~D( ziAl`>LWsb@#%^L*FSNDql_sTc;zJiiZWj9dpd&4E#wl{im9$Q1G3mOW`U*MF1tVHy z@>v2^0Br5Frme9yTzl(0nOc3z_GijVPxwH3QPK+f+||u}!&suwqdOx=`Sa~k*XK;! z7WUr1;tTxIdUw~r5eK#$c&1EeCstYVWmzqcuP|O@e5QD=FIryAr!EJ3zvR87M1Jy( zRRwV+@cQ^X`Hz7;Jtxel=d^WlB!i#*)Z>PGb7b)?`^}z>H#nSJGgF~l?61N()Q?=# zVC}$Xb#8*OeLcpC6}3oYJooAu%%?{J;0|B!uYuM>amNmP#Zg#AVsf*QZnbP!+X9v3 zBv2l^ic!MU`v{Z^GM~>nXkNwY=y$`csGUydQJFBTvFH=tNEd{LL3|DKT~g*l#S63DsVeGswh1}GkXZe!+Cimqu=i6224s!3BMQ{8Wp)nmy7R;b;W zM6)68I#X>Zo~mnRDsoYQpv?L`e?D2~QUAV1I2M(I^|~<_si>??C|&l5$3R`&L?v#j z!F1VaG9ols<5JrdeCR4)laO;IyErlvwlh6bMui`l&+OnVbnG=k+`IfOsRw;~)pM{A zn?x1&UR@fLJ_uZ9ncg~|eAK#Jk=;G|GYpxIfs(FZuck%#YvlVw#*^%6Fk|CAlAqBM z&11qOoyHio4glnx<tb0Y4f>i4U+a{cik| z)3dMk%F*W0xcvpoVveFmxeBp;l$>U)!G>AM^e)CHp6|BhEgxGsyLUbknjKb7f>0L^ zVP1_!u@)v)i=e%}a=C>hbR--mhIwmf$m>0*}C&ay{yyz|5S^d*VY>!qlw-PI5P$YvTR|sitA1cNx7pjhzpLIHqox8jt* z2LY1tZ6LVNl(*NnKYmF+;7Vane%BasPRv)ol#}fDg!H753dckPO2(HviL2E4(sUw0 zdY8X>ed4yOerjhmdYp3h@n57)hp{bwE#JT+Ft`W~Z?B6!$?oHEp+}oogBXxpl&Uv) z&W3#ceF`mU=tg21|3k{-m4dwY-o28ttCm^FzTrAcc5>~3?%U1QYDsebQln>wXDDUL zEW0{8uhw1n-saou{PkD1!-wM)Wp&Y}?@ZO`w)P;(76;B|%82OCH(4D^YKw06Q7z`z zBRgu7X3MsAZrOGCnsyXpfH_!Hd@)1ELMZL}zoUK1;VIlwJjT8N20ZGX#KuTeOm~ZR=VZ(Qekyw9oE~wkZ0OKu%E8=udEJGur%p{JTfUU^1JhYf zN3qgve(7yXGdUqnql~s9C_)~yd0Z7w4ED|GvvsxJ&&jjJc^$WHeOKa=Zl1{H3RCx-iM`o|jjv zqF2tJ7nkJ0#a#R2oh;8eBJGq|d+jFL&K@%i^E?{p3iy)O@~p!cXPD zwA*5OEs{d|H^SyrdhB zm@%DXZ_yz_B*QK^mGHu1Kus+omAeZrW5Z2gcW7;D|_!EWeZ&@tgG|uLoLRHzatFNXheEUu>`1xXK^Wo)8 zCw#jL-MgNy!+3FG$Io!`7}wHar8`5tic|wZDd1&wx>)DdRIBl?YfMlPkIaKECe02v-+x}}Cpxg!Owc8Je%Iivu zSBJwzZ9?N%Uq10s#<3d6IW>>6r)S%!T~eY4`EtfBOBP+_8SApMT_ZO2n?qieI3n#~ z<#)`;{76rPAquM>llC~)vpp>yN4Soz3!W`~7n7VuqE*FB-p3gyL=NrARw2YF1Mp zsr6lPL!A;SE;Vk|j%8wNU~Z|) zG+=$BtDVoJ0Aug%KHAecyl21IKxv=rStN;B!Ci!02o4@a@tWHD4QvnUeP$3=h}f>J zGKid(zk6td($N;2FUafw2V7~bsm^q47b+mtq88n;VAcboD#2f8T&=-V!$*;-d(AiL zQ<<|W2 zk^*A}vXK;!dI2aL!IGhnyVx*~R@t>BFjmOdpA_KMhC+Kvp3~@Y!qR5kQq;Sldn>HE zEB?wI|3ym(YGovlLcV&2U)sAPB$S#ZKUKlz`kMYMm1T!&pjO3_In+s^<~Q}8O+N9y zZDoGX8TRM8r&H&ne^NGIn<6lzk6*mbvXcmEXC$?knZ~lO*cyDL&-uel%yA zeSd#;dAir$CWEpk%hsN;8|kg=6K?aU!AU4&#>K@*w8b4Kw8YV1GW^`nAj$W#mWc=u;v>3kzfo{oFSaLQW=tj=t#ZY_9Ql= z`QZIcEd%e4%?h_4zB0|5C?-m`+2j<^@|!IlC-qj>KHt=%g7X#~FuCJc=-;a%tGM{L zZb=>_Z_Fd?eUu`p@$a3as+>1qC#ftios%Y<_spK4w4|l{O#YGByaA6Wmdm@hoOoHf!-~9uzTe;P_s6VUBDUxA{+JS#RJ6~wM z?%5PEx$hwst(^W2Lx~2^Jkn9sd9AQ@Q~V9L`itS;Ua>P~kXYvq2&UA*Zr?kUbnk-* z6VE;zEI+HQ0sj7GdE^(itR`pOcvs7e$lJW^)Dg~(l~rdJiRK2Gf25|t_gg2q)@i2I z(oqaozEmOA&k76ld4#Ott=v$tLh6&inDdnGEfk^V(?H`x&7pmt`-=_mKoEt_K0$kZ zu#y>p=a)P)DSobEz?a}~pq@(tK(kwy^UC?X}VO6QBJ!v;% zP;h54$FGRe+J0km$n0aQCfTDElwQc7(I;Y@2yE?|P!$Kz<8Of>B~aP30rfUSW8fK| zaQYcvqCj81Wm)t}B4A!KX@#KG$EFE~8$}5h2ZYVvS{tlh1*)N&y6Jj{)(CBTj8|g@ z(c1l|CuN^ih_%>d!$UkqpXTK0yS_j)@(DvT@lhD|EY4={!`?)CU)-D9j32noH_U`K zoT)~05~_3XO4&?EK@IU(SL1c`1Dt(YU?yJTjDh5iAkffg7x{j#3*HIArF%JmZ8Wui z$L6TRwkjLfmydhpN)mFg;3a^uW!ILCS_e)5TXi zqGgUiv{C8D)2GKWDXIs3&2E|2+K)Fp{j}5C{0gLm-2uBj&Xfk@ z8O*`BZ}5PW#P2c%9ljKx7qX!z zKiC~=ywnu})4Y0-PCJ70^AQ6)oS|1}F8WZwcF)MOS%XajKlj;LF!COW{P2UJ5J9s> zvS~xMpM5?xP!PZl(l$Up`*R`CG0<;!$;A9XThiWa{Kug_V%QB7X?TB9s8!4R1OD46 zry~15qK^DC-So&d4D+lYFW;pubL5Q+8o^bX`oXVWGv)dr)ZCP5rsA!dEjoLoAJy`Q zEq#nw^NssW&vZ|ReX1&|b0O5K%_8}z5LP0ohnsz+Wgm!FSQ46VIs{SbSw99VGCQ!v z;-7M{UgpLNX`!lZg%1Qz`rNgx8WM+qK={eD9TjL$GtD89%A7V^W9V z>n^Qc$=g_K`v95x`yKO|b^E}AqyTf-zGP?HBgZI{yK$4E3noEL>8|BmOS9fvx!;r~ zFOd`;nQ<2lOF0*Mwh2+4Fxk6`~;s(#I z6rl>P>k95f@0YY!4ZR-6XJok1M&}|DSdRt7Ce694m5XeQMoZBkx4c+z*Ur3v%!(M6 zc(WhCrfHXFSOoHZZ{yLSm9TB)HfeQdM8|d(FiAI<9nP}3izn%c2H z!AAwjG4!9y^0jAXU@hU6y3A&Wv`ZARi%);kb=(PLoi2)Hv4PTNystGZgq=OLukyri znCnXUg<&UYZ5yAeB2$j(g$s`I=Ykcu zji2!`wQ*h(gv%4W$kMV>nx|LBKfCSPoW65;n>Uen$K-V>wW zCVpzIQ~a>;ExohLcd}-`kG9)r3d(K)|D<5!HhvwwLgEE_i3i6^jjX!Bl+vd$9YcRm zy;FVIhd@W5KbmO7TD)E_D{CbVEyG=LrLMyQbM_Vyal)*6um1-vh+Vb|U@9D?aT_KWtmp+RvB$C86rIT}lsq$qbByC@c&IAF12R z%wMxgHJNolGc$g0TSue0V(w~8UT{2wlTcDQopZi7JF3w>(Eo*)5C;eqn$PZ3!?^WW<>=lunuayFaZqru6cNh@z?>TgA_YQY57xRKJnNk`I z;Di7|8V`i+U?~D3uCE5G>}Kz>?&pwR7YKzT(m_o-+BuoCIUWCf5a*7zU8G7C5 zLjZDLB-V}OU-u0z!vgPhQ!q&6E8qDjrO6+VdXcR?y9qT2P}ffjDc9Lvz4jwxn6|_R zcP6tP67LMhmr*WUOhk-q-R4$YAftL2OKvI&sMCM4< zB73h`u$Kw0V{oLrCeWrwlo#y#^`+OZi%5-)lzT+!nD%UOP~$5mXq{NOv>3MS&>Fwc zVST8f17cPrCa_Xy_P|)eThaii$jY7(H6v4o@FE5vrDtYYMx4x&wL~JEptga+Mr;#k zMEn>qp&6HU39i-7)xJp0LKcZv=0i+qpCpxoG`Z%D5-R)qz=`e9l$QOSc|ayagovk+ z5S|B4(J%>#K9#+8qsyjwg54{p-P#I&6K;)OhjUdi>JuvVf5%pCn1zie|GvywBQZR* zk)u8gGeqcvQKWc1OTh$ajWh^F<-ZO-*`+(-vP->)H7r&qu;w{KEqH}Y_Pq&FFfJ9Y{6EhNDQXCd=tMI0HbZ34RG@p{%IVQ6@3y9Fg~J#Bf_ z)-ZVZ)bmp-dBX<7c4?tX@VW`l)$a6hJC~32{G)?HvTu)*$Bn7p^vfTMPHpc*Rljng ztFG2J6Ao@mBY$(9Y;RZXQ7nm68rF0psJn8_4$L*0XX6AjT#MHy5c=z_w~_B}5ba4; zXn7vl2C1dFToYTK4IVH4A~Q^$eQ0Fp^+MrR%_PS!sjiG6hF z_3JH^`-xqVZ_q=p9bBEXGQ*jHJNPssA^+1sN;=i3EbwVs7AhOo`dz(-g8l3}r~pbE)FtVe3ew%WTz?ULPP%OuT$P4(xbJiePP*%B=^KZ`MD0OIdoNnQ`J;d(X{ERBGloHMScl63*@+y zSxZg$wmeGIq-fyUk1vEB1MXp&yTvMV;;uU}UlD|YoF#rd0W^^n3-sP1HH&r3l8loC z$FGL^nHlSPHWATx$wzoYPe;aX5^pQyDKGl!YWqP#BkN4l8_#+#^pPz-45PPZj;MV0 zo)0DxYEPij({9qxAC;E~tD2Tw8V|iDD9~N?4xdyB3JS^&_bHkhYa;>Rb)!LFXrRdv zjvD#I8~Pta+X0OJJ=Nl7J@yf}J+=Y85i*be(cRP*tBoX`MQ2srAR|Lr-NwmWx7P46IJZ1!J+<)M=djGPW9Rgk>0UYP z25TqbvDBjtHDB209wPdSb#CcNRhw@8ackg!#1=gTkhjJCogdW&<1f$Skj*P=NQq^w z#h%my>GRLY`(CE}E<3@=B>UKJwhlVtYKp5OMVH<%nMI3kx~SIE$ZHX<@B_nHM+S_^ z=qej_$PJw2{mo8eo#*uz{uJWYIxA)f=Rp~Os9_E<8H$JxA$CxbF-_hyu6V>%F4I9_5VtrAkfzNxH?x1dhob#Ig zd!>aFO8?C{CT78}54ANSBz}Nha6%f&dZRX8{R8$u)ljanv)c?zYqs~+hnAZe^HDl0 zLP(z+Bdyly1cx* zh`M<-z{9}uE#;$;|7-HC+%(rnZsN$YhZQ&1*xha_-AEv))&0X@+U}@(z)T8sleFIE z?&%0HNbK-`hFt1@h*VGkXm~rT4H?y;{}?8j!T<=l>5gb8uwyZbczVXAC@El~DvtI^h`Apr8mJ)f>P(P$kljRG|r zR;Ej1B^37!1J?y+?5~w~1)j@?K%~F>h~iukOLSep$EoCANZPrm?O4Ek2)q-MX87S@ zjH&iJ>AV5h1EqV?O!CS5?W; zrevNv0ym93_X&D^n;ojMd_CAOWK(WHq885MROO5MUcO)J8eWqG{NDo;Y z&)q537>i~*0$FW7b2Zpdr>w>R*AvwM;86{M+s)&ox6ulS=Gwg<%e(3IX~Q&(*I5-s zMCa{32Oev-2!(#+|G7bLgut0HfOv9rt%+d~@Q& z`_8j_VfkA(nJzc;%QE(ZcRwu+mD#c7;}_YY_%4M`~Jl{LtOw(7VxZ8SIWkck7rNBnvC*U&yh)(E2k2&t|w8 z`0DQp#BI;oQ4wf$ef6T%&pv!0HC`xe;BEh~@gIB##{WJ;(oTHrZ0`$ROc5l-BAqj0 zcp=A2v4)US{+1Ck?D7hxo{|zaj7g!Jl|s9lQ0+C)K6m-)#lfJz$tS2@P`OKADs zOcfMR;6Q*2e01TgLAYTH93R?2y;RZEsyrsNHg@D~a<0Ss_w9!-NuicDb4JByZvVj!buuUiI5n!TSlCLa-*xApDWTAU2~Frl2u z%(cS0b~dKzG^LEWX|Kv?y1$YaWxfWudDZDus9q9x-uu?n{UsvC{J#0`pc!d06+NQj z^x4vkRovlYtgmAIyr!uo2~ zILXz6yJ~BqMRC~pIK6HD?`2;~2Q@xLez|TzE^OaFyCgDuNRgFB__aXKKHJ76=4Lq@ zBPrey@SI2NWFP;kI%Kjt6(xE>kH^0+oA^SuCBh!@Ts$|2nq9KK;V9E2iUv6Wl`F@j zW2p>cQ38?P6fV8BgQ?H&f`Ds`7r`Tx}P~Ge&clT z!QcYP`3leW%LI-PVL=OJunTHr@r(Yn($K3qczbP)_2mR7i?A$j?k2d6!Il-;sx)Td zF6Ftbv|)8_OUrij_yH)Q$CIj9HH7{$La2cp;r4t8c_rwc!*rTIZ;Ae3u+$WMhdFXG z=Pj~$2YsunDUsFRpZ3AmslI#0d48$s^&-JBg$^xi2@fs%M9>Pn^9D9rzBjM1Or*xC zdxSpCb1bIurG=J|;0+L|zrwJq+P$AFHH#{FVLZ0_qs6TzHK5w)43_HjJ?6AqnF1;21FE41ojyCT`xXy=5$FnR4 zV3G;mFj&D$zW4gz5a$rrlNLZ}$i8{AY>xd5`BPXZ$zCJJ`YRrM$(MZNa+OX~TIrn& z1iheBYsD>U_XeI{ZQr#u@tX19Bj#@Jf6NI<%`Kmt@Kqg_pTMFr-mSL+xT(lzILL*X zFCjwjK2_51x-s6vXgf|>$ScEV$a4$6hE$G*zBCM@wGq@#Y!%h)kT@Y&oQM4C4MLKl za(IO1ku~cqM%Mm5@bAcM>M>~+Um@)BRfsOKXE6lLQgBRl49{Wu&=3Y@mneHsD1?p` z?e6Q?dSC$ID6cFzGc|#NuZUrYTWNI}F#lrivr#f(JA}#c?;8pZ@ysPvog*5Y9fPxB zPOf*px;RCkr{8oyf~7hBG^J)uXVq#iUDd{X!I1_wq`lme9@#TxN3RHM%s1yH%=6tV z&c&rJ9C{~Nv;`b7u>KZ#k!9+2D z%|NX<_JYUE1*Ym&rgIj}SIJAKnC4T~Bc#<`t3@a}a1D>_aiFK$Z0vz8>aD;ot5m+i zHz>gt>N;>lYkQJL_CRJ3_r-a-O6=MNY=bM8uXz0#dd#44!wu^BIv~X^tp!?f{Yu!t zG}NFZ4Bck^dV&sksaE~MA>{BIA=^HWy#{(bM zuk1|9>htnS*1}KGzO*O9$5YnkYJ?9N)~PlQnOltQj4I0sjIqc*4cCujC;M%!EXh=)sy6-E&1w9DFBj&$;3wY>3FvpMolO}yhZ=x+pf<2Uw?<0ksHOeFjVuO%{<_y{ zikBnXwQ;)|Gl79LYmA<)KS$)V)b)KtDx?`P_KHYxZCV0|{dd44=c&oJsF9;ihruL)jJ2ZDlnI;ge z^{Yi8=HY|9Aln+S-PL+ z@4!q8|4b>7ajP84#zn~WI7$&5ug^X@{aWe|=Y8{0b%#o!YtlaXxcHXg+co!hnAg20 zc7FdCQobW#h+XkpH`eqo$oIJ_jY$Lvp!;1ekGM+}{_=wT3#lv;7XlSMzmzs{km9pC z6LN45wL#ydKHL6_AmMi7j;<5pl8Ykt@%X>EBWOLdM#ahoX6atGH};cU?39oVe<3mz z?mFnSHib4dD>A~otTDV^N=ln2^?r&Y2lfiS)fs4^hgRQH*;Zft8B`@`-WM}HDQI8m zALbjDwPG%fd?&(PUBJ4y-d2%t)gg{#L$~(=G35+R3(5T?AJZrnf2GMDE^j&^;t<@W z^8iz@kt01bQx!79GCsUv1Er*&?()xah)mZzkd{{F%~wvB@9n+;ei0I!@-P^_p`tq2Yc+bD z$EEAsC-YvQ%;m>{FUD<5ze2_72BFU$sJy#>)g>RWybnPjPwv<#7@)8u3h*Eo_#e9O zbCo8@9-3>tu*rH~_fJ*=GdIWur^tds&A|Q-4m@{l>>y_cps78ydUJ>0RW z`h%CwhlZq}x$jVqDS(6c5|kb0aEuT%Y^n*3g6>10doo7BkoX20YibN>Oj2!lh$iyp(Im*jiqRj~W9!BcOWa^K^!x$QQa)QauH4)rR980Z7MgJ2Dqn>!gw|ApjA z;|w_Q3#45~PHII(#Vg@IoRbv4-=D;4by*1Wq%VBvQ7Ryrr6#(z1%m7Hlz-iC>o@fDZnoKmO7FsN1*O?_iL1)axsoj%j{`aGKAy|ZWI5pe_KZ!I(I}#XY1ic%;>A^VV`&dLFI_j z^=n7Ci$*|nir;)}TP8DlAX6KOJVUtNftE-JYgmLlw_+X1DL=OyFD(xVp@#>y68(ah zG#F&^2p*h7Pzc*{TeL{~+|HNRQc<@rrW|{0Lv=-uKJnqkt*zt=lCfcWh=Z&}V&>uk zMq4X(n)YF5=Hbm#WAw8%WkKD7S;M9qrE_KIo}T4KqL|J;rac^b+Ki*L3BGR~mD|57 z_c(W^Iqd=me@LA{rsE#r4!^IM{!Q*( zx=i`_)qV%`PZv+r??JIB>84?}D7&*HZ1oAuI)mvYfg$8{di4J1E#I0%j7#P2hcDMf zwdf_j)-)c!^1Ok?5v&Nwd`u(be-lNuy#ZRG4Q;N%k4_uU4GK1 zu{##4?%zAtVbK+P(T$w;aFxBepfAy)Y-IMv+u(hA=o+>x5f^ZevGbZV7xnBIY4Pw| zRGt1)ToOeB+jaG_o2+)%E4bR^c}D1W447*E6|S|zBj3;Fd0VoE-z%YLpbLLmNzkJ4 zbZ+R)Bx|N@hP1Ve@hmn^XDSsehOh9QAf7e$jrtR-uQ;hML?{J ztp9;k$h$2bwT>Y+Y{ClNjr#J^-9EIoIKMnoY9U6Rm(&8w^J2IVoO9V=T?g#gWTh+s zogM6lA*2oZk+BPJ(l+kb)Wyn#%4&X8axL~8{1AT!xchhn|GxvDuKFX<0^fezZ|o{35}>{K_L*Uf;dr#whPSRO_|pj1V@4c zF2KM1t|7etSZn`>CI(1LBT6|s1Z7uzopYiv%S5N|~2x;DXm& zfu4W=2QQ5xS_V5~fcKT_DNv|HL(A@;T&msM4KX!n9_>RIwXylIOqtL;U}Z1d#!BUf z-lN|z1+s)ib8Q7d;W#$+Ue0$@;Qn}m_uI|CB`0{^-~SK=v|Wn1z#?{Ad+b5{vtj(= zx6z+3#?QxX>)C5nAIEO89l2Dgvm_)R0py$$e!)D;ksUoR8DUCh?8rESSCj0hSTLHa zSCWmmNl#UH5KpZ$F6rCd+w18=KdR=WqG;}_#xyz-C8-rZ*{ek52%4{&4AgP|#@AQ4 zz-dW?=AQFLTT67UYp2L9VOLF2HqC~`?##16{RjuvW$V&?s;4~&`afjOnE0%DvL)V( zl9fIj#ma&FJz`dbqym`yk1Q_xay)WmnRk@Y)6`%g*;g=e zntLe9gnt8hfbsax{131s*!vCitsV4O1TY%EknQ~v^tu!+pJ5Ybs@QRckQ4*K#A2A% z*Y7g_Z02fMY_VaK6ry+mLZAZtx*rVEI9VXavJM-d0lg_|lnpMIC;T$ME#T}y{s`}b zsHs-rkBcRqh}Mq^ZR54-=1gXJsMbCg>)nPv$=~8x;eKh=tJtZB_(hpSrX4DN3C+Lw zF|ZNtVExB41A&^Ac;=LUR6m1#@A1TZ^1b&P^GWVSjQNnK-DhHC<1EGv4*4~+adUF} zr#JA&p-?x`LpRA>U=dXo(9`lY$C%vGcf37kXZby`8ymkvxwhQE$%7%js{UBWt5wau z)Yeqt!;wY9!swXPp8ae4{7g~Ctu{-holSl7o6IU0_?@Ow@{ef~XrK6x?u*K-GZvXAKJwAQ?_AlgAy-!?k(;)A+gIC`t6w#f;A2T~|gVT=lkXqz<#m+Z53!!8K;eb2qjIZ>}o|?#XBTiYw+G@t-XO+p?<#5gQ1zEhv%4*HSd5d zG0v2__yUewJv9vwACz0#IA}qA-awph(b(N1gA-Cz zTu_YiWrjrOfK=FY?9t1;Jks$u%+^aO1b?S*CcvGqfrTL>%-@JCeYbcobM&qWe27be z_F7~>CUpVR-&%9I{4RRwwi@~l=Hk3L=DVpGn>JTk^}vzV2$93bvtF@%?ETPX_=oqf zo$ofGS&LaJR!8{STyg?oVeMyAk(`Fm42>)OBDJ#(yDDTLsAnyl5L(Ryg)k z7?e6eg@D84kaarT#14v~AQTXC4ik^ts4Z5E7&&HKO5eAk%i<>;+M)kUN0Ix!_AFe$ zGgqzl=7aX65PDM_C)2%U1K*83magk zMQ8@;&&pgWayW$VjR06AH@wqEoe{vF9KcKcY!uhBX)4hsIGCInz*N9-p_$`B@J;b6#+N;F z+0d3Sc+6S5pISbvosz!1HU@xh;s-YEE$xXB-O9sIA(?E>X2UHz1vd#L_tqm#RIa)k zX1`c#ziaHERf$W1@y}-yz48+BH2iwShd|xGhvo*-4@G*0wxiGG zcjQ;h8MkaV!aKC5aJlBXoA8JEV^QMdL(!6LJb)Sf@XSFPVwjn6dPI#RLwSs~@?;5&NZEv^=4>{QSKMKW=(o$k~6 zhMBDK$)DEq>bcndreT}=daa;u2_I>_vF_n-HeggToZsgzo3Texj2el!K=xn+kYKKr zp_h|xEpvSD8WV1v&)i=_JH9yYrOEabr0u+^psBK_yr6t{c==U2#l}8qly!XU(3HZkt3CP=v3M1sqf=kacJvyE<$f&$Z@z0WPAOxi(Fdj2BQ42t z3eUWNeP?+$anl~_ZGlngW*vjdqy&8zr%;V*bir$;pDVqLmfVqg0Vczf4SmgapS#B0 z#KaW&7I`-&ZQj)S^R}02`I$*AB2Ay2ykZVVxtYOsmr(E33kRQG*IDp0`Yj`@8@u~l za_@#t68D(Tb;Cp4517o+62GB~+miB&SUDn1ni?g+2`vr^z|eSV%s5>9 z)(dDBLE$wX>gZ=`NeJZwyElmaJNxn4rM`s_74#$n_|yCn+w0bf?;V*BV&fxnQYvVDbCi=hJ^&A%Z;^Vq(P7Na3=Vq{Vlh<^uF}}>7{pT(Ep{{hb9)zT{E9G*G8hy@_;=}-i$he83N_ZKx4yjNhQ6%^j}%=dC9r(e^P z|2~88{r`{9AY!Z3S6qOdj*;6ALU=G1*p~T#?Q_A;WKTrTaE+ybTs5G>Rhzj0OQGxUrDHlZPUoP|o5 zJ>e>-v30q3ce7VADzI{_EMadqdh&*LeHXjE9bM7vgETVZ7_-}7x4rX2-{tH_M-}-K zS2h+BcZpSI<3{Rt2u;*yceLq;-D>|pmh&FyJF+Hn||#T zMt@|jUyiAX@CEg+j8{xir`|9IYN!_Tbo2)E2oV(;23P1giIREkPj;cx=o+vI2;Gll zUPico;8n3p6Q|q-?Ubad-T5{xsCA}Ai+_PS$epNeeGeSosP(O`xjf)6;AYPX_3cYH`84=Dv~-=p?7LuNHzjABXp1#iL@Gd!g+s zFWnDl3BRgyv~xu9JAwKu1_KCm^r$?6085gfBn2vUKq!_c@bX>e(f|3liVfyb5E~O3 z@~uM}0l71Z5s_i38NpMY%L$6wAE>Pr{Ft3z+gO7yb!xPBUoC_)Yf)|`>ll3r>4s;X z;7-uSG>;?TR*8IfV?A=xD)yBW)hXGNCaJ0iu;bop%Umh=<_gS=G5#7{?x|hOOxd0B zx@7UP2XV*{pE=Q`_Pn{nUy0elzOhShZ+Z|rMCusK9_cw@>5RrH3FJQ|fFc`kR2pDa zykUrbeHl8T@)Q$vDt*_=i&YxJ126X*#W(`!J7d@xoseb2t%8Y$iONOZ!W}u?G=sy= z1=1-Eb7t}@0$eNn>P$=9%|iS9kK$_K&4&!HRjv2~u)K$~Rlpv40ul4oFeWaHJa}6m zY`chM{;7g&Yg2>G>AS#$kM%E~jA#=LG55&=6Of z!qBY=8#+(fx7`D3+5x41^#u&KGWWJ3&>bunH#}%LRHE`1<9XF=eA;+REHmni;f;1{ z{lWQR`x~TQltb1s-1bu3miQZRpnh1?hLqqP2m)1sjB5z{tjQnh+{s@JRP4oohQdPIgO=#Oe z*;r=ni%*)I(GG=3J5sa9%h~fz+0kp~YEIP~v)76%Sk!R;&(- zNu<9@Kv_^3KI6PqHT$5|1-2@ZKY~R|b*$zFD1H`c;edNEINL1a}XNC!k)Zu71ko;c`972lHJ62GE^Pt6v6Ms$zw5&*h| z*xeSeYeO@ohLxJ~Agu%rvk{nIGF2eI{X6zr$OsC(-0P(&d5>N)mUVLcPUIKn zx9EgJ?n)GHY8j!hT$}<&{Tu$h5c4G*Omv<1*-jqagWNiukY#kWuwE`LfS;gsfFi$xT^ z^3S>AHLLcSeU_^JE1S` zkgA{~Mfd!*?6$%!_C0hPb4{u|J~4A;*MrEUO;@NErPGDflF<4(ce>W@$gEu$f6#i}{QQz=wC)A zRRxH`1yV4Tdjk0MfT5w?fSyDs$nfDID)rUB&K0}@l@AERhxGY#a*um|aEi-Se(?Bx zZ|Py^^@eqQmgq?&wmwNPmE(Q-ce-W?eseBKXoxx|_+&j^cn+_T7BCJ6`j(7)9Ag_BJGmUjo@cExYKG(kWHru--UgMQqKbwgK#i^eoG3t&VTZg4`_?#$=@}cmRq~FOZ}owU5T@*K zW9*B)tZE-PrLaI2LUUxq8z%~$!AHvx@c4QFf5gjf9Jt_DOvu}Q&$Dpj*>_O}+BFw^ z9MVGj&_Wa16QPBDa83|AYD58$jBOx}=8|fBELd`o`X*S0Dg2!E#ryu#ilc4MyAc2V z^gKzSIoTnXWI6BaP}go{f)qDPm}MN$QJ?|j`YBpHm!o7c%2e@yLTJ-OO?tu{2R&LI zK`p`K*-1MG-Q++Euo@vWZlfV4AoH*clQJKKal;`G^i~w(r7j?XfkaIn&-fQZ=&D=g z8q-zs7jG22l3+*J8(kg?o1$skCq3El@AzxsOm!IL!;aOV9dU5TG+6U5Q{WQaud&<0 zR?mP0#w5CzD$`7)O z@Q_@yFlQSOPRjm7etEoR{ZcM*lG?YkJcVwIx6$m=Z=5N>tu3xw6>C!YshltBr7}4+7Q-AE*^j!NAH1qf zENpf+YD|w%sQz4!o|3J5Q-lof{Wh$m>zk8Bw{!F!a&_8#WB{$;)~h~GdIDXah_OD? z`|<{pITtv)!Nx-IDQd8D^I>RfQqQ~PDoy@6t?|np)1I)C=t#BAiRb^oH7RY`!+Oz? z;vJ6fS-9>3;&i9!KF1LIdsEZwYwiB3yObqAuX)TA!@1elQneD?%AKwqj0215Q#bA} zYC_k*<2I`ggi|=(3BJg)E0ERW*L44?+V=JpbFzB8uczhXPn6s7g7~EVs_;E4{!Z6@ z@C2cK)SMz--V0Ij-Sk}e1-Jcq^Vx2dPnz}2OwAhd>7B8ND%Se`=0UE#S`NAVi9bxL zf6z6qjB?DXyr9m&>Qft@r73+9h(@$L5zMN=-W#SaHAWEkoY`>Aqcu(F@sF^4I`s%Kecub8#F$&^{E(G zLLd0eHzLiS`bOL5EZM{xR_RoCqfmCt>$PJmwU27TeVrZj^j^hhShEq7BF-*a)y)$Nk@`?^omi(c@R|=YN|(x&>@-WUNLHp94_6@?v&a z>UNtG^B#X>R)8G#(*E^#t+p<6C&oNs-RyN&&+gI$61ZJ`R%1(o^qcF{1}rSA z^b4@1?=ooviXe|RB&OC5ioinOMSqW3lu>#0f>(M;Ml^;W#F@rb zQoS_1aTwx)%alNO_uyz4a^|@j9Cl-4ea?{3tv`!K)3`lT2Dr7^w+9>7SyeNWOcjsT zS5y;A$}u%JOu@{h28>8p5a8Z|cdz@*0W~kqj@Qr)d#Kv-9=x53ph`hz5wIXxjHAME z;Kc_1*lPi6H_O=C{|PELjsfpbe3^zHemn31=-OY;-^YS3silE1Cku6%hR{iSKsi|1 zL1rrUT|wj5!6_H4vf2_EeJW>~o>q?+ls(U~TGVJbMg;EN&tv8Tpvo_2*t&bxB z6Rd-D>|AMqAp{B9bV~nuhd`j$0+pfscvQ*@o>CnLV5punF);Ch{w|(w#?93jW*e?) zx;niqup$?>g>Pd3HvUdb{ga<)3;&8l3cG^S;nvy_qWAO@>P@1hn>U8jK z;y^Hqk1{C_{a=8xxm z!c>f~vgP^Qng29iSek4I4tam24<@|Ngj^5yNAyt>VBz3$&_wg-OYR>1?AAOg9YU109Bglsn} z5IIz2ture_$AV39c8COMyX1TFFp!3U%mc7Y@S1euE+HqAIU3w0rrYvbr_w?--AF0D7o7Z_Iizx=(7y@J1mOOeEDd5_ zIPxjzHeVS{yh*!BRRjpBlUiFrehktRd5ZFXJ^rtN11{(PHl+Uh48*G-x0e50@<{{f zdv7T)+`?Bps5&r&^(M&FMEgVf&*PouL%~KpzpIGDuVuYAOvEXc-qu1DKyg+MKTc^MTyGwA%g`Yb%c1HQA0s1G1Z^mGpunocNtp zz4nq{1nT`Zzln4Am(Y^)%fh$9^if+D=Ffw^*k)q%`i^Z6P6xDG=Y%z~lo?p%*sC=M z5<_j>9L&eFy;W<};HsyLM~jf}T<3TwF2jr2V@NqliJ4(L3bkVp4d{fbN2p}C*7|}` zZ$)`#R9f=<+FH5#VQ&IK+T|I1SGVB@z+>xk5 zd}RrIpxZZy{YzYseJ)7OX#-O%2ulnMKMP8P`AAYHI3SZuY|>Sw_>8o(gQ znO$N*KgAEv8{Qv2X(^?gq4v3497BGuRO~7oM81rax*7hVn;%-A?{K9;rav56Hv7s+ zo2Cvz!6{wG!2TL-Rb}z|W@)*DCp`WOHr5`f!P{3Qxp=<2p4r;8H*bD{ zWc0$jeijzK+o}$PQh7K955oVsN?D5X#s*}>F${`KRY7+^+)W9=1iHgCPU~r^58dHa zhkp>=9`|NqNcLW(Qi7Kd4V)QB`%n-UDjVb}Dacvnpbzl*HrIFsPpB0_Ta`f}u_L=KII+OP(Ggq^QG|4}bhOM*O4ttxw7u`#b z^jAPvOy7qQQfaA)J}RIv0G2C5|I-0ENXbYevTcGKlC_YO)9C5`$^qbRi_w>Sg!P

    NETzC=!8#NH^CyE6}Ect11S0fLV}5&YV}9`(03 z`vr&B8ZGQ3QM7bQK(!d*Ax*4_yXt2&LHN;DBnAo?+R#SL%~)qA=L%0+K__=+d^#Y2 zF5lGVDMgTVSJciiAx|g(bBD}jp zbChk5(K%1o?RrGsQtj>N=dt4$d$0BLD zUh^91u0oDHw9BtEQ1&ULKKEMf)NJs`tW}|py6e&P9Q>$E zK>$7AQ2fHKn;>WkR*UMQmZ&L!GZ!sTfDVF>l8r|!W2+3WB?ql)}Y|fqBaz* zSO`cJ(IP4eTH89bSM9Ce+WVY@;LzLqJl_vEhj4OoviDl=de^(x-W$enn{VGcugpC; zXcQe6gy3u(#{{%@PhVdkN~m`Pv8tSw0DT0?K2@jtX>;Kc*f}q1Jdhlpf(*4e6POW(-K zt(K=}M_Q?s1vzK6p^sXpOx`oe^Q3c2M@DApF2T>YWLpJ4_*Ra7+8F4%h0-NIl<2*M zR##N|RO73;Z2-l;Y+1{{X=PM+_7I5}JtRCeNP0c|JTA}8YKIkN#)OW#I4NZ{bJyI% zX|1iN=Fb_wyz)To=H>|Yf^V=*CkJ-#MT{L$qj`hx&#R+R9R-q`A@@UB2H51nLLn-N z{Ln)B(Put2F1A@m5+h>*f18{hOkDvhs)#FUOV)NUlm(fC*uk=ZGV5kM?vKd7(dj|X zBX5pQpa1}pHdu}&XE`sfC{3%-67$wuyQ7-CQ!-+7^Xw;3_=S!!S;Z! zO`FStA_VACr0)zj&ZOa@L7qmMV7L{GGDtUq1wq9e1(h!lUqOzV1%CyINhDZ=P$mdA zKUJwTC2S^(>nP~nr`%tv=0BZits0VeIDvVl=kd79a~5<28~)Ig*|2J>-pwtMh1|AX z)0xp^nZjwP44BRUmhfKr6n9Z_J0%G==!R`73uJ!qnI<4M0x~`h_$b`%H`m0phKilj zC0|hLWS97BPmGH`8M&TKJ)FjG7L7_+o9sDqrk96%5C1!{E^_>Z1L3EwV4oh_^i*f- z=}he*=2ccpt8k(8IaDxkMwG{FC|}ZKcsS*JtZ4R}*3CIChSnip{PLqnG~o$k+5Zl_YS?#ePM zM7gs30o7Dy)@eP56UFVlXnVUwp4q1Iv>Q!viJNhWbKU;dI~PBzsG|*SE&80>SkLR- zJfQXRz_^JD#W9uQ^fzZ=U!XaJJwn6N{o3*ppWBKUhZiJYo?fZ(y(Y_rby;(}_S`!Z zy`!|n^E#{QxTvw^!E(>(vL~I1A9zXDctvQwjk5QC@X~Pm-u^M7=Z2rM)*d?R9S0*F zGf&Wxw?>belu&4W;L}~d(e@<%Mx$D{L>xKoxhl9x@aeY(>G((2422g~ao3hU*|YL_ zlf|b=5qF$sqGQ%H_&VM$FZvp&Hz z)1Lz?QH>n?^^KBKhJ-?z)y8zXBqicl=9T3PZ4&T?FW^1{ydteBAh3QA&RJEoI4)MW zCcHhjcHs=&$*NyBPYy~bxjpgphYzkEpTvBqxfObxS_h}NMan$-(FujDWQUO;l@1@J8QJJa*MxI!Khfm>=DIaM zVvOf)e$t&2Etg}1Y}c<8w;VfAJ*<^qlOb54V~9ZEvQl;D9Alo@R%UfZrT6|hLuAoK z32v!x=BdhT@_KkI`6el>^cwlDl~3(-X1+J5lu_rlk1&)xRjUqMtIf+>x1GbL4McI= z6cm9v1EfL1ZuD0oAQl)SwTd*)rEbZ(=L`$?;oa(bhBAM{O~hodBnTh{Fs+72hGwp! zd!woyF=pLpx6eN50xcCrEEtvt*+noNW)9ORIusk^KxWzl5o{oSpaV>y^2W1zxs(uJ z1UpEug)e5g-WwT5m78mhCUIxV15(CDXwbs6;hi z87;Y)9jbW0e*f-~RpaWPzhC#X>cweE-E$#tq-xJM|F%)n+jp`$yNmSKc-Q8=69M?K zpxj}iA&&;@d7$0rfWHvB2H~%W68YzCY@Crtqg>#wZ|CVKrF?yGGUHmvkVU1!9*gGq zf23}r{n({_LOat|6DmS`_`}#AQT5*La&(KYP`i8jsW?}oIw$?MT~~BgoMH*Ct7lumEPNGYC65wdWLMp zEly4IlK5QNcIex8M|m`;Iy{58aB`j3=r50UZ*7ln$>*@B(@(7{9UHYWtXwwJE2ZOU zrDs%o%AL>%Zq<7mc0B!bX{@8g(cSFfiLud4tEEjN!b4rgObUT5jJY3eJ@EC^* zhw39n9MU^;MZPyq*zfa=eDuQ$CBT5w9qsSzF`^9H&u9lWoSP-}VO)z+Yb>F501Ecl zUaVvGR_^C3Q|AP#nB&{BZ^%0YUFExTdVS7ASx*Xwd)xm8nDvUBP3Ox$)r{0Ul3d?e zykqhRG-;nvHu|eBNNp`_Ve_Z~$nFV0F+V~!iH(u8e6r;Uc8h2pVK*cLBAtYA5W;rB zev#yZJ%*^ZhGq3|7R`joptyz{1!d?kqya)D(BLpeqc`v$HmpNVNywwYC>|vtw-C0> zB0mm25lJx%a7`uIY4~Y|9Ln7OT$mv|rG-ryV`6SZ2v6^whgCuJLCgknNAJdpq`PH49)a&QF)H!aEV{AT(Lwg(Tyc_=h z_g35fNPKXOcXaN>_S9vkK5Q?YcIwf*JF&kHi|BT~R_yB1Q!x6%%(df+{9jUr>De{E zv%WBNPuz2%At(QP8&+|}(TMZq#oOC`UsO)!0hvtN)_BA>B241R!SRSWNYBa@Q#qPP zVzXV!5%)9)LbG#B95EL1BcNi_goQ-J;Ilojv|$8GD&aVIs89!=j(b zP!SLhsIO5BgKdss5DX?Df~ZAiGhCE`qX3FuG5uwXPsmP~$mh9a!0L}1X!%x1rWr`@ zB_INP62;H7h?*9$Hbv9+O-bd|ii#>B$oCxffYx={0X|c>;mthX7hAQ-8h%XlA2QJn z*a4|XrFUBzQ3IMpmh)QcLK@Yt@-B)spKQUD+NEHL#)-fav(N#t>d)?>E2lAFSeb%iq7T(J2mQ?`1~cRUG#0 z(H~rZV$&5eHbDY_b2|t}fAfel6ry@rw?#%-IV>w?g}k)1Wo2&a$y&)+D3iFiKVgFU z%Esz7!&23bQ_=L9-71ik zD8Uy*A9yiKvgtfZM72%g2RSh?8-cns)v(IJibkDU#GH0PCZk{pYO;*7=2qA(o>DrB z4l`WL%AjD)PuO2JQ&DZ}di8-PqxHzQcjq5{@6!8&WxZ2cGP;gl51*n|Y!3f@*^X+T zeBILA+P&<+&369B+%Q9fH9!e-jbI1H9H}cCU`f{{@V0+1x9P)i`$lOV|&}t?OF^(_LqiEI=A^(Cygo#Pk2-b;oACln_0T4z5 zkXxDj22Jq9uM|UMI>m=m^Q_NlcwAf~#N` zC6iw{PAF}eYn_v54`I+NZ?$@Wv#g+!S5P%WB^OSXtj%cYW>;-ptxaZKlnG-~TjnK% zwANZHl|>^K!)W|@7%ih-(R2zP!f2Xb65W%_z~V9t2>ZbX1sfr=MT#3ZS01f5vI%fU z;nxGnr1xi#+(Ki(4)mzLTH-{O1^WkwSI5QripEu#xM#$ zGwQ(TlT)}2ppyE_W;>0uzMWpREcUr9{P#yeZMzD?BFnE-vI8m?mDBR~6#>5KB2Q8i92BBU(4gp@7Kei zJLi-k8I^#%$foxudD~Hf(;0z3g;3H_g&s{;tO6$;mp3jqSL(WJE=^vnr+B%q;;sU0 zT*UK$o(7Q<%S#m8<>tomrgM}Mn9Lf|K?@f}6wJ-&vt)1$WHN;DnKh$MZZcJL0+-?2 z5RI$DZAy@2(4lnj$^D2ovhzN&tV16W%ECBEm?klD*3VW=Y7Krg!ivF~`q+D45BD>C z>_kWuLCP-N!~?^ulNtusC_mK~t_J*9eJ2zb$ab<$X3q~yRE`akoZRFkkQAu)oKdV! zOrq6?I|{$Fn;oG5WpafC7I&=lVXn1}l));+!U~X+qV)83U_~iVr^hhnUrc7J9OYYN zghSEmzx{05Lf2GWNk6dvBNm&db(S1T-TCr|Zdlo?F+InN2Ho!C8YZcQ-k%9W6hK{3 znQ1rB)jygQu=s`LSSapHgOm{d$%adcAq=@Hm@sgN-i6NnU=D~Sy>QB8{jLJ-r_7)E zmt=RA|FikTfug0~4(QIAb-K}|!NqAIU9&0U_|18b=6{tKvM>;S)fotofkYASltLhSXll8QzMNy|Vgl|?$k zE}9p(pE<(scmoK?#_*!Uo!H+yO=OR7K0uY$$VBeeHitb8Vu@yq{1)#(*yk`V2Gp8y z=X|8%*UXL(fLW$HWL7{DS|A@%8f}>p=DApz zt=zw>@WEAk<~1Id;>t{v2g@secOS^JN)ZZ%+PPFDD~^4^im9U-c5R{6e(cfmW$j29 z|8cNRNRWv{V1ochY$VI_Wvkzkw@0U&oru{?(s7Y1$ zalrjyYz<7P(Mbsd&dp|6`6iIUoWY2^q$u1UcG+GGC`Ivvy*_LyJ7k3xS+*f;c0?xZ z_`&7~7xObC2Z}2XGkq8qo}TfxaB%kg2sQ?x$nd4;ATAe|4XmhKBlDNRpCw{dJHsN# zlLrkX9j^wVE{WowTN;O4TCIov1W)b5WD=j9%yL@1y@n|$X8}f=`WcN__pwrhw&oa2 z1`{$4pe2Cc)F9|Jq=}m_8v9j|nbOt|LgOL%Fg^&VXv?UJ5sgc?E_8+sWSu#&sZpngAP( ztbxaYd_cxe2Rk1Bxb>G+x|*g7T-XD(w*GTikHiNiT{n+BcQR)Ckd@bFitg7YW)vL%-A=bTqb7`ldE&bR*pnT>c z*@gfZB)YzWs74vLzr8~#1_}`UuFgexj>|lNc#&2Plmw42p?Rg`H~p)lyha6UjJ+PJ z%+c@-=g?6j3n!lTFHo$35|EkW3d)t0k=4Rl4_YH|nyPMg<-S(IGJfRA?NJ&-3l$(9 zFkrnTF(|&tvIm0FF6h?bVAFJFjg}uWL{EDTi{B`BLw$=K*0dlA$F`{1wMU1PTWPr< zDX`wNo}pj&|^u z6B3H`0gp}&3S%bm2(snkp%b=0JT}K!6prkWtKF+ACtZJ(?flfQa;_HyUs7za1G9gvT>l7}_;c z=-Z~+ie&PX7;OhKUoXFO0BvzI$G24g!pxR~LB%&pfiGQ!xk~ReD53hq#vlMUr@+=% zP*+GR(!$=;%H1Z+o*e;&0@Rl+_4Xlpu48A+XkK`L!>LuRy_=rrCYPsF+}qC@6LoXr zjiYyVE_rHwsO7!c*#|=%+%IOGdm0sDWtwnxUpBR`X%kUsh-N zY2djGrJ_;B+jer`MPEoe1aYuIxcm@=TIN9D&UER>oNE7E=_r<54u*!Up0qwMgr#q@ zp9laWBaYx{fhd*`qe^BEguI%p+#m+1h>&9}G?RhfMK}%5ANdew0 zzk#gUMBb3aq8S%pc(4aD@iLez;J;u+fpiFeBbv^j2AUBi?pO4RAli6KF4-aIvrVd@ z@t?*c0a>U%rO$21FNeOV*)95=<41Zu3J@a=Xy6?+BWg5)i~_&A6eM^!y+$} zxRV`3La#Tfo+=Hj5&-s!)$Nj?*pa(Y0hIw(JjUGPcC%tR4dRn#g;Y#^B1l#@lnv0- zbnT4@0g1c;mieuCNDjg>s9CaGLp<}&_I~2@JZ=-MVfU2ro-)I-xEJT93|`25p^^=8 z9o3{|i(X7EU!U)^A+%}52G>CEFId82r}e*#D~XgU3!OS+dMu}U)%{28l5pc2$L}q) z;ZC*5R-y&XLH3^|a_0w)pZjd{1o7UF*H7{Apk*75odsn!WH3SL;3PaMsJTv)nz-=D zOy*E)R#1k9i%5pEi3d>5cA7?ngIA zlz2YZBv+H<}b~1@ni5i(p*G-16~LAbUn$ zX1oV_i}ACp&`r(n@uqWJK!TbIH4%KH{!PO$Yd2dAci_E}(D7>j0_oV(r(y2Zaxcp6 zV`FOscc-{IBz{!Qe>y=~oELGjZGW+|=T@j&r|BLFBoC}}H?n=VE8vkwvC4pKzw`2P z18LQL9Yc&#z}GC3hP}h8f%KUL!;&-|Izt;7im7mh@&_D=1({HQ_73+Gkv&CEr#~4V zyXYyyGoa$$fz|_ywMkoi2mH$6v0{!ztZj`qENhlAh5|aGFh77MOyhi?VI2U&<9);o z74=}H3Uku8;VAh!8r-l#TTk@?t``JUZ{}*q4}`xk>QIrDo{lshy(5bv7amjiD3*KlRWpcsZV}(f+Nb8Pe z>!=P6^mhNI0UyVdh_8hVYo1pJ#1{&0h?*rtX893%B4|TkCn-=D<^-r)3AgIQDWeCW zqm>QqLl*FFqx@y03YZC?Bov8bi1=6&U`;?Cwo@Dpu-OArN|1pPF@H;%2x(JE7D4e|2&hY?H-}knHy;}TbYPukAjy&e$+9}-S zRlC1!=3NgkSbaWW=3fL;W9V*IKF1DSOeAaIb4OSCugCL%K6RdyNIFtaKMXjz>@aSnF zo9KCYkN};d2}s4XzeV*Ks4;`UM^F(6$7a@9-Y5ZLZVQVhyk@gtk~CckbR9^cf^Gn@ zR0*0aFt*VJCZLL`9frq|P_&z4-(9HKnWE|tTZlwkps;|VkZo72h733l%_1pnZVVom zC8H!u*7EfeX_S+jyl+N!W!X~J-wf+rh0>;@97RC6h19OKq%wsAAC+95?%fPR1fA_7 z=!72Z75c9S$1@d_KW>`8lBa7Y=yT0#BvS<%H?9q>Q8#RJHky)-o zeoS&|_@gh8@IaET*-!^@nH<_0H5a-Aacew4h1EDhxG?#E+2j44*D#AmoFAXORy6`n z)PLmD*bH#FIheqs@d1u_iK3O!HZOnp&c)ERsXjnEUl1I3E+s+++rT;E;gWu}iTL>t zdyjxCC?P0tf;&!h7{iOm-zqnZSzw#>xJ@=`>ZD>WZxf9lo`amwmeJ^=psbP9J}JKs zwQAEPkXQ6~Koq-h@OuTP$rxzjVQ>2zl}qbtv?&oV)?-~L(cb2t$gyh|UyHiVIRWVg znp0H=&IaWRcnlZAL)UJ-e-;UxHHwDYTeyxv6V$zB(#?-?28E_;<0Zdh4=9B@>-w@ybMyq3Ti#P!H?*S@7o8EM> z;vB1&VfnJ@!Xi5kAGSkR9N+wH?DQ{`c{YLC*pXnQLPw)^2NO}C-j8t~*&0mo4YPuZ z3W-{}?}&dvSFVjH`MD%eb)e&<IYGERmE*y`{I zJ}zfRSjCU(c*Ps~K#`D#-t;#b>g+dkgm8i!Wg^0S1S*SrlNWoTarsfK6<{gE%6g<$ z>y5^*U117iJD0M)cd8IHLIAhP1?I286t9Qy8Vq-%p+E2>BOOJwk<#@_2V0=Okwqfg zF!94TxeoK17+6HJ;6XzJt6snu+796+;WhPWh1vay_=uwtwr?&mJqWI_x$_y*1a&0j z0ZZsVI{ZH(9csr{PzzzT+$j8F1^5l3F@iv2s1i7fWUrL#eD?K4TMR4j_;hs`}WYQz#Y{U*aQMlgc+y&7m(Di1yuBR#3=;M5tCGKe>(D3k(d0VoAl^Ct6p z^))2X_fvYzFX(4>2;={UAJS2K}wJr zqIdu3Ntw-1ie?!SYx8(R(z05%)iuV2J$tmy@R4L>=8_xo+J{Vr#;t)R7{1jqgT6(i=Vd)hLgj+4!Q#q~u@_$jNaZl4G^~AItbb z#-oYS6Fm?vL5YV!$$|ZGYN3iH-Su3BCM_zONAZ?S_T)*QmM65HeB$wA*+toE*s`?F zm7yOrqb#rYY?f?RK*eEk@>%(T=`zJCTKgpW!-VTkN3n!rl{@5cMfNIK9*3eU)^sCn zW-qMYM3Pi+LI4Ep_SjW12{!)oua59Wb6DVF+%T##03iMTYknn?u2D`34}fYW;6#lR zJM*V7o@0H(nDybS5Si{m$~F2xpzIK@8i_7NzD+7l&=J_$Bl1cHa=$jEha0}O?IZ&9drb3_%F5CnIgRaDpyoRU26MB?s9*|v;A)<(4#-<<AxRI?*t?^TwpUzjUZ4~H^bC4V5l(GRd}D~bF^52w1dn01`C!H#h~5*9O=dXNnZ z;`K})kMrBvQ(3i@6JSs?(l8Cw%<20n)dmE%b=(bTqFIsXQl z31cQQ;(;47IhxJi!3)n2jyg!0fnu>r4N5B|`8sj>-*>9vDuMOO8vo!cdCQ( zw6xl)30eLhw5v3pne$Yc+ag^PxjRq$Oh6hFCZ$+r!ERx6AM%KjYvg0EQei9{{WW8R z<^p^m6IpaSF8HkRl~?bFMx~6o?_VD=8HA!A3=)L)=?hCfFkp!z>dRI&RLXNYps1N2 zV$)V0yiWN2Y<>){IA#f(7UM)p_=;rl&|1}{3_(Ic<~Es8=LY|TM@DTJq%5X6Xc#5?;v^xCM3>o7F#L(iTIw44Gj|@%3s#zV<)h(mSd9o3Cc;$izJN}8o{1C~ z-N)k^*GP)Z23x-bJ7(AYG*gEK{Nss699(KRsG4~zo2xEZy09RvDs zVV)~fezO!()yrU8`G&OsEON@2d(&ER)0) z!47>Ce9Ay|M!2+}A0}>uPgju6mDNn-e<%d@nOvVC5jKbo66?$@-6uQ1^v1xY|%8~%Op`FktK=lrQOCx%hv9XXUz@V13 zRDO}LH^j;te8UDNCV&m%^#unjB3H;@i+Y1EK?RAAv=%o)Q`f?RQgE5cfic*id_heD zCKD!KynPJC+_m0#j)zuA(0S7ZNTOE4*Fir>dl-&=0C5cR^Z)XA9xAk8zvN3R9x!R0 zk9kVnSI7NR*Vg=&e6}Qy!0I3>?7N<9Pv=GFlGdhCfc>lkNYzikY$YRNy1Ly=obVPSmfDDf`q5IDMd(Iz79~q0Qk{7q_vCC z0pF7(%{qd=ncAfPvl*Z9^&@H=rANM67Iu2hr7{Et1_v;Bqus`gKILiROfMbCv zUORe0w)KV zfVhO@y3jiKIFO`0R_K(`N12q@;=&O>}%mWqR)`AMS@C;TX)~q9)2#Tdv7F*qTC&h zq6C^v$D3v_0IZNGguCR`?lA8fBs=uc2INt%%NFK&5OEG3f*+p*HL`AqENDj!iXt!Y z6=K8>S%c9uP$7-cxXhoUUp@nG$DQv8SPUg&t<;yz4N943Bk5`X4dwst|BWEuKa?0l z3J8uy@(aX6Pk@4f=rx?|0w_Q`>-uoR_}J)(E;sXAC@!~V=plKafDxFrXRQjD3kBF! zBNV=Vs)D6*(Aa_p#hZZ?wHku%7$pW$U!}=WC%q~X>c|Nx=65%eRU#t5+fY#{t<(V2 zHk=JbGa+`~K&mVW>qgnDOEU_fZJrDMoUFU)U0p)XWxZ=6q~@}x4kP0c2Ki?X&_DKExk+<2ve{BFMksiI4HBj zqygF8TWks!HgJhYuna!j!b( zRQidmXp7r?MUn-~j1Y;{EC4BgsVmzJ7TrP53oYD8aaT3M zO2?K*BY3plR|_}5)ZXybUjMS>E3mRF_$8hk2&)i=5W<(gEZv`oAW%a9Uk}v>yk4DA z&U&-`vEKm1uR;H>hGIyYUbg%#vjP_T7K`{_?Ck&LS}d5LmC5mb4fbdu+@M?}0^kG# zouIN?^mS1@Ap8@x2L=)3V&PU{o@b#BxTxHL4ka5F!Rb{nZV|=L<7xk8 zEgcIhmrf9_wWT1@t@(u16;^z2a#YM(*d`q{Kp;#NcS~KPEXvF2QjcpthtI#UNgx^M z)+Ke$XjVuDI7c=OD5pJSY+HMI=lC7ELstKkEpco8(p|LmAFMjxFwY+fith8;YwvSA zXZg*YvV5uat_}9Ocw3!HHs5kuOkj7BEzf^r2*9=5v?j5hGR;SIizRhtQ0V!R-92o| zBxkYKN7IEuHjYv<3P1FIsH4{B7K?I$c|`>#;{lLJ zaJaaUrC984o-Z>1R8TgJpvmfga*e)=5oSQw!d?iZxq`ff?o0j*fbK?n=U4e-P!(aiV>L|N^0MmaUt4Bo~ekQ7E9&_Yqg zmRn&7c*z`+zG`cx-Obz89@@ow6mVD;~beD zO^dWn$PbHR)fTyr&HZ&b<2?Fj?h8h?J zbe$ZU26A%HAqbvirP3zk0J|fp9_-Tqs9`O$^XMPb!3h9npew->$a(>$M5m+lGIfwo zz?%mdBYgyi3+4+miD7ITm4)eaM#t+THUzUUWx1GABCsMzlKBZRYP8-x=?DESIut(A zM`;YOuN+bF115t;Wtq^{~;$qfUu(3 z9WJI~?oPGyV8W-o!O^Uc+CsW3TDc!ht3j*_mo!2OrS9`)D2g`T17)PzHJT=it{mCw z``qhdDnY;^LhKzpHDQVFad{6779-<@F5q-4C%2)H7XT9+n4II2~pTrRoFY6j|Z=1@+;yAzLJk z)517i2Y!nEDdKAvD)(l>SdY`pZWm3k;h9SCuBue%u+`A9`B~Ms@?l>xAI?gR^!>^F?gC+g8(=2?1didCL* zmBd{bG5YA3b)Rz&9DTOf`x9lv&E4B%Jh991g@xnU+t$d+Kl$dE4R_n2Qs+aDM_gjm zJg|bbM0d?PmROm8LZJG$+pf~}F=_9}kI;i-)-C*bXv+P#;m>ED`0nt!nC=C?6};>J zr1I{g$*C*%l^$vM_RfHoizD8-x!?D?|0RbI)jG{se=F}b;-1z?>QmXRi`N-Amis!B z$6OA&QP1h{ecrs%In!zW?=%bXq)T@1UvRYY z_7vZ`uD36dWG<4;ntRYY6wN}XE&K2w1&liXY1z;W6W_AjOgG3yhTa6Pcga9I>*U$*q>6f|$2RsCd z^Z*frZ~qstKyR|ZJJZ3l1H;&yD<>0CBUF+{3j?tfp0T;1g;2qT0aa-kbp&HOgC0oG z7#&c)eFBs|;YG`@*|ezVtH(SkQq-!y9OX!zXHX<{UF+HL ztb!)WeC)FKpDlY9$Q+x-CQk~UvDtrZ)3ySi8F3x!^MgLS;d%Jl@zT3b_A`H9Is4=J zxwEeb=03LWuyLK7@+{_%jp)$TpM(hqKO4GZ5&c}n^Ra(czkj?SkaN~*NsNSNYmYHLfxc-JkZf5zGr+k||+o4=;GMCSk&-UOr4_8sr z9_?NAOV!4I(Xy_UtnuBWQy!BYQ7v6w(YqFXHU2E&t(TMq%=`(T-?wsmcY4hF{G{IU zCuOs~y>aj#HylK6^C-(MpH?rhc6{DcA9zh{U2d-p?EI~=$Re=9pE1S$&LpbmpzEdU zV#GhrO!d=;C#-$3u|0or?&PUEo_ruk-nQ&~chO+`e80^Nnoffaeb%*-gKh(24%kt7 z^i0mysUJ5k*er1$Fl5-hL^n;Lb^aF*pDdg2{XyE8?*04fD%FB8O2y^y&dU>cMgM8- z?0C${jI#X7^Qm*BVer;7je?TVmpre$*zo1Usap~zQ(4ow1t)DYE=X=xOs)^v;pQtA#ex7C(NXy$~1H(9?eO$m7^Mj*eQ#t3}*F+ENpG(v-QS496+{`IQ8T zM&Tdou|4C9t2TT#7phcQA5JJhI~NO4jXWuM65 w: + h = long_edge + w = short_edge + else: + w = long_edge + h = short_edge + if op.__class__.__name__ in ['Padding']: + reverse_list.append(('padding', (h, w))) + w, h = op.target_size[0], op.target_size[1] + if op.__class__.__name__ in ['LimitLong']: + long_edge = max(h, w) + short_edge = min(h, w) + if ((op.max_long is not None) and (long_edge > op.max_long)): + reverse_list.append(('resize', (h, w))) + long_edge = op.max_long + short_edge = int(round(short_edge * op.max_long / long_edge)) + elif ((op.min_long is not None) and (long_edge < op.min_long)): + reverse_list.append(('resize', (h, w))) + long_edge = op.min_long + short_edge = int(round(short_edge * op.min_long / long_edge)) + if h > w: + h = long_edge + w = short_edge + else: + w = long_edge + h = short_edge + return reverse_list + + +def reverse_transform(pred, ori_shape, transforms): + """recover pred to origin shape""" + reverse_list = get_reverse_list(ori_shape, transforms) + for item in reverse_list[::-1]: + if item[0] == 'resize': + h, w = item[1][0], item[1][1] + pred = F.interpolate(pred, (h, w), mode='nearest') + elif item[0] == 'padding': + h, w = item[1][0], item[1][1] + pred = pred[:, :, 0:h, 0:w] + else: + raise Exception("Unexpected info '{}' in im_info".format(item[0])) + return pred + + +def find_instance_center(ctr_hmp, threshold=0.1, nms_kernel=3, top_k=None): + """ + Find the center points from the center heatmap. + Arguments: + ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output. + threshold (float, optional): Threshold applied to center heatmap score. Default: 0.1. + nms_kernel (int, optional): NMS max pooling kernel size. Default: 3. + top_k (int, optional): An Integer, top k centers to keep. Default: None + Returns: + Tensor: A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). + """ + # thresholding, setting values below threshold to 0 + ctr_hmp = F.thresholded_relu(ctr_hmp, threshold) + + #NMS + nms_padding = (nms_kernel - 1) // 2 + ctr_hmp = ctr_hmp.unsqueeze(0) + ctr_hmp_max_pooled = F.max_pool2d( + ctr_hmp, kernel_size=nms_kernel, stride=1, padding=nms_padding) + ctr_hmp = ctr_hmp * (ctr_hmp_max_pooled == ctr_hmp) + + ctr_hmp = ctr_hmp.squeeze((0, 1)) + if len(ctr_hmp.shape) != 2: + raise ValueError('Something is wrong with center heatmap dimension.') + + if top_k is None: + top_k_score = 0 + else: + top_k_score, _ = paddle.topk(paddle.flatten(ctr_hmp), top_k) + top_k_score = top_k_score[-1] + # non-zero points are candidate centers + ctr_hmp_k = (ctr_hmp > top_k_score[-1]).astype('int64') + if ctr_hmp_k.sum() == 0: + ctr_all = None + else: + ctr_all = paddle.nonzero(ctr_hmp_k) + return ctr_all + + +def group_pixels(ctr, offsets): + """ + Gives each pixel in the image an instance id. + + Args: + ctr (Tensor): A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). + offsets (Tensor): A Tensor of shape [2, H, W] of raw offset output, where N is the batch size, + for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). + + Returns: + Tensor: A Tensor of shape [1, H, W], ins_id is 1, 2, ... + """ + height, width = offsets.shape[-2:] + y_coord = paddle.arange(height, dtype=offsets.dtype).reshape([1, -1, 1]) + y_coord = paddle.concat([y_coord] * width, axis=2) + x_coord = paddle.arange(width, dtype=offsets.dtype).reshape([1, 1, -1]) + x_coord = paddle.concat([x_coord] * height, axis=1) + coord = paddle.concat([y_coord, x_coord], axis=0) + + ctr_loc = coord + offsets + ctr_loc = ctr_loc.reshape((2, height * width)).transpose((1, 0)) + + # ctr: [K, 2] -> [K, 1, 2] + # ctr_loc = [H*W, 2] -> [1, H*W, 2] + ctr = ctr.unsqueeze(1) + ctr_loc = ctr_loc.unsqueeze(0) + + # distance: [K, H*W] + distance = paddle.norm((ctr - ctr_loc).astype('float32'), axis=-1) + + # finds center with minimum distance at each location, offset by 1, to reserve id=0 for stuff + instance_id = paddle.argmin( + distance, axis=0).reshape((1, height, width)) + 1 + + return instance_id + + +def get_instance_segmentation(semantic, + ctr_hmp, + offset, + thing_list, + threshold=0.1, + nms_kernel=3, + top_k=None): + """ + Post-processing for instance segmentation, gets class agnostic instance id map. + + Args: + semantic (Tensor): A Tensor of shape [1, H, W], predicted semantic label. + ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output, where N is the batch size, + for consistent, we only support N=1. + offsets (Tensor): A Tensor of shape [2, H, W] of raw offset output, where N is the batch size, + for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). + thing_list (list): A List of thing class id. + threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. + nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3. + top_k (int, optional): An Integer, top k centers to keep. Default: None. + + Returns: + Tensor: Instance segmentation results which shape is [1, H, W]. + Tensor: A Tensor of shape [1, K, 2] where K is the number of center points. The order of second dim is (y, x). + """ + thing_seg = paddle.zeros_like(semantic) + for thing_class in thing_list: + thing_seg = thing_seg + (semantic == thing_class).astype('int64') + thing_seg = (thing_seg > 0).astype('int64') + center = find_instance_center( + ctr_hmp, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k) + if center is None: + return paddle.zeros_like(semantic), center + ins_seg = group_pixels(center, offset) + return thing_seg * ins_seg, center.unsqueeze(0) + + +def merge_semantic_and_instance(semantic, instance, label_divisor, thing_list, + stuff_area, ignore_index): + """ + Post-processing for panoptic segmentation, by merging semantic segmentation label and class agnostic + instance segmentation label. + + Args: + semantic (Tensor): A Tensor of shape [1, H, W], predicted semantic label. + instance (Tensor): A Tensor of shape [1, H, W], predicted instance label. + label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + thing_list (list): A List of thing class id. + stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. + ignore_index (int): Specifies a value that is ignored. + + Returns: + Tensor: A Tensor of shape [1, H, W] . The pixels whose value equaling ignore_index is ignored. + The stuff class is represented as format like class_id, while + thing class as class_id * label_divisor + ins_id and ins_id begin from 1. + """ + # In case thing mask does not align with semantic prediction + pan_seg = paddle.zeros_like(semantic) + ignore_index + thing_seg = instance > 0 + semantic_thing_seg = paddle.zeros_like(semantic) + for thing_class in thing_list: + semantic_thing_seg += semantic == thing_class + + # keep track of instance id for each class + class_id_tracker = {} + + # paste thing by majority voting + ins_ids = paddle.unique(instance) + for ins_id in ins_ids: + if ins_id == 0: + continue + # Make sure only do majority voting within semantic_thing_seg + thing_mask = paddle.logical_and(instance == ins_id, + semantic_thing_seg == 1) + if paddle.all(paddle.logical_not(thing_mask)): + continue + # get class id for instance of ins_id + sem_ins_id = paddle.gather( + semantic.reshape((-1, )), paddle.nonzero( + thing_mask.reshape((-1, )))) # equal to semantic[thing_mask] + v, c = paddle.unique(sem_ins_id, return_counts=True) + class_id = paddle.gather(v, c.argmax()) + class_id = class_id.numpy()[0] + if class_id in class_id_tracker: + new_ins_id = class_id_tracker[class_id] + else: + class_id_tracker[class_id] = 1 + new_ins_id = 1 + class_id_tracker[class_id] += 1 + + # pan_seg[thing_mask] = class_id * label_divisor + new_ins_id + pan_seg = pan_seg * (paddle.logical_not(thing_mask)) + ( + class_id * label_divisor + new_ins_id) * thing_mask.astype('int64') + + # paste stuff to unoccupied area + class_ids = paddle.unique(semantic) + for class_id in class_ids: + if class_id.numpy() in thing_list: + # thing class + continue + # calculate stuff area + stuff_mask = paddle.logical_and(semantic == class_id, + paddle.logical_not(thing_seg)) + area = paddle.sum(stuff_mask.astype('int64')) + if area >= stuff_area: + # pan_seg[stuff_mask] = class_id + pan_seg = pan_seg * (paddle.logical_not(stuff_mask) + ) + stuff_mask.astype('int64') * class_id + + return pan_seg + + +def inference( + model, + im, + transforms, + thing_list, + label_divisor, + stuff_area, + ignore_index, + threshold=0.1, + nms_kernel=3, + top_k=None, + ori_shape=None, +): + """ + Inference for image. + + Args: + model (paddle.nn.Layer): model to get logits of image. + im (Tensor): the input image. + transforms (list): Transforms for image. + thing_list (list): A List of thing class id. + label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. + ignore_index (int): Specifies a value that is ignored. + threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. + nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3. + top_k (int, optional): An Integer, top k centers to keep. Default: None. + ori_shape (list): Origin shape of image. + + Returns: + list: A list of [semantic, semantic_softmax, instance, panoptic, ctr_hmp]. + semantic: Semantic segmentation results with shape [1, 1, H, W], which value is 0, 1, 2... + semantic_softmax: A Tensor represent probabilities for each class, which shape is [1, num_classes, H, W]. + instance: Instance segmentation results with class agnostic, which value is 0, 1, 2, ..., and 0 is stuff. + panoptic: Panoptic segmentation results which value is ignore_index, stuff_id, thing_id * label_divisor + ins_id , ins_id >= 1. + """ + logits = model(im) + # semantic: [1, c, h, w], center: [1, 1, h, w], offset: [1, 2, h, w] + semantic, ctr_hmp, offset = logits + semantic = paddle.argmax(semantic, axis=1, keepdim=True) + semantic = semantic.squeeze(0) # shape: [1, h, w] + semantic_softmax = F.softmax(logits[0], axis=1).squeeze() + ctr_hmp = ctr_hmp.squeeze(0) # shape: [1, h, w] + offset = offset.squeeze(0) # shape: [2, h, w] + + instance, center = get_instance_segmentation( + semantic=semantic, + ctr_hmp=ctr_hmp, + offset=offset, + thing_list=thing_list, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k) + panoptic = merge_semantic_and_instance(semantic, instance, label_divisor, + thing_list, stuff_area, ignore_index) + + # Recover to origin shape + # semantic: 0, 1, 2, 3... + # instance: 0, 1, 2, 3, 4, 5... and the 0 is stuff. + # panoptic: ignore_index, stuff_id, thing_id * label_divisor + ins_id , ins_id >= 1. + results = [semantic, semantic_softmax, instance, panoptic, ctr_hmp] + if ori_shape is not None: + results = [i.unsqueeze(0) for i in results] + results = [ + reverse_transform(i, ori_shape=ori_shape, transforms=transforms) + for i in results + ] + + return results diff --git a/contrib/PanopticDeepLab/paddleseg/core/predict.py b/contrib/PanopticDeepLab/paddleseg/core/predict.py new file mode 100644 index 0000000000..eb0249033d --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/core/predict.py @@ -0,0 +1,188 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import math + +import cv2 +import numpy as np +import paddle + +from paddleseg import utils +from paddleseg.core import infer +from paddleseg.utils import logger, progbar + + +def mkdir(path): + sub_dir = os.path.dirname(path) + if not os.path.exists(sub_dir): + os.makedirs(sub_dir) + + +def partition_list(arr, m): + """split the list 'arr' into m pieces""" + n = int(math.ceil(len(arr) / float(m))) + return [arr[i:i + n] for i in range(0, len(arr), n)] + + +def get_save_name(im_path, im_dir): + """get the saved name""" + if im_dir is not None: + im_file = im_path.replace(im_dir, '') + else: + im_file = os.path.basename(im_path) + if im_file[0] == '/': + im_file = im_file[1:] + return im_file + + +def add_info_to_save_path(save_path, info): + """Add more information to save path""" + fname, fextension = os.path.splitext(save_path) + fname = '_'.join([fname, info]) + save_path = ''.join([fname, fextension]) + return save_path + + +def predict(model, + model_path, + image_list, + transforms, + thing_list, + label_divisor, + stuff_area, + ignore_index, + image_dir=None, + save_dir='output', + threshold=0.1, + nms_kernel=7, + top_k=200): + """ + predict and visualize the image_list. + + Args: + model (nn.Layer): Used to predict for input image. + model_path (str): The path of pretrained model. + image_list (list): A list of image path to be predicted. + transforms (transform.Compose): Preprocess for input image. + thing_list (list): A List of thing class id. + label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. + ignore_index (int): Specifies a value that is ignored. + image_dir (str, optional): The root directory of the images predicted. Default: None. + save_dir (str, optional): The directory to save the visualized results. Default: 'output'. + threshold(float, optional): Threshold applied to center heatmap score. Defalut: 0.1. + nms_kernel(int, optional): NMS max pooling kernel size. Default: 7. + top_k(int, optional): Top k centers to keep. Default: 200. + """ + utils.utils.load_entire_model(model, model_path) + model.eval() + nranks = paddle.distributed.get_world_size() + local_rank = paddle.distributed.get_rank() + if nranks > 1: + img_lists = partition_list(image_list, nranks) + else: + img_lists = [image_list] + + semantic_save_dir = os.path.join(save_dir, 'semantic') + instance_save_dir = os.path.join(save_dir, 'instance') + panoptic_save_dir = os.path.join(save_dir, 'panoptic') + + colormap = utils.cityscape_colormap() + + logger.info("Start to predict...") + progbar_pred = progbar.Progbar(target=len(img_lists[0]), verbose=1) + with paddle.no_grad(): + for i, im_path in enumerate(img_lists[local_rank]): + ori_im = cv2.imread(im_path) + ori_shape = ori_im.shape[:2] + im, _ = transforms(ori_im) + im = im[np.newaxis, ...] + im = paddle.to_tensor(im) + + semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference( + model=model, + im=im, + transforms=transforms.transforms, + thing_list=thing_list, + label_divisor=label_divisor, + stuff_area=stuff_area, + ignore_index=ignore_index, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k, + ori_shape=ori_shape) + semantic = semantic.squeeze().numpy() + instance = instance.squeeze().numpy() + panoptic = panoptic.squeeze().numpy() + + im_file = get_save_name(im_path, image_dir) + + # visual semantic segmentation results + save_path = os.path.join(semantic_save_dir, im_file) + mkdir(save_path) + utils.visualize_semantic( + semantic, save_path=save_path, colormap=colormap) + # Save added image for semantic segmentation results + save_path_ = add_info_to_save_path(save_path, 'add') + utils.visualize_semantic( + semantic, save_path=save_path_, colormap=colormap, image=ori_im) + # panoptic to semantic + ins_mask = panoptic > label_divisor + pan_to_sem = panoptic.copy() + pan_to_sem[ins_mask] = pan_to_sem[ins_mask] // label_divisor + save_path_ = add_info_to_save_path(save_path, + 'panoptic_to_semantic') + utils.visualize_semantic( + pan_to_sem, save_path=save_path_, colormap=colormap) + save_path_ = add_info_to_save_path(save_path, + 'panoptic_to_semantic_added') + utils.visualize_semantic( + pan_to_sem, + save_path=save_path_, + colormap=colormap, + image=ori_im) + + # vusual instance segmentation results + pan_to_ins = panoptic.copy() + ins_mask = pan_to_ins > label_divisor + pan_to_ins[~ins_mask] = 0 + save_path = os.path.join(instance_save_dir, im_file) + mkdir(save_path) + utils.visualize_instance(pan_to_ins, save_path=save_path) + # Save added image for instance segmentation results + save_path_ = add_info_to_save_path(save_path, 'added') + utils.visualize_instance( + pan_to_ins, save_path=save_path_, image=ori_im) + + # visual panoptic segmentation results + save_path = os.path.join(panoptic_save_dir, im_file) + mkdir(save_path) + utils.visualize_panoptic( + panoptic, + save_path=save_path, + label_divisor=label_divisor, + colormap=colormap, + ignore_index=ignore_index) + # Save added image for panoptic segmentation results + save_path_ = add_info_to_save_path(save_path, 'added') + utils.visualize_panoptic( + panoptic, + save_path=save_path_, + label_divisor=label_divisor, + colormap=colormap, + image=ori_im, + ignore_index=ignore_index) + + progbar_pred.update(i + 1) diff --git a/contrib/PanopticDeepLab/paddleseg/core/train.py b/contrib/PanopticDeepLab/paddleseg/core/train.py new file mode 100644 index 0000000000..4245489dd9 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/core/train.py @@ -0,0 +1,315 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +from collections import deque +import shutil + +import paddle +import paddle.nn.functional as F + +from paddleseg.utils import TimeAverager, calculate_eta, resume, logger +from paddleseg.core.val import evaluate + + +def check_logits_losses(logits_list, losses): + len_logits = len(logits_list) + len_losses = len(losses['types']) + if len_logits != len_losses: + raise RuntimeError( + 'The length of logits_list should equal to the types of loss config: {} != {}.' + .format(len_logits, len_losses)) + + +def loss_computation(logits_list, semantic, semantic_weights, center, + center_weights, offset, offset_weights, losses): + # semantic loss + semantic_loss = losses['types'][0](logits_list[0], semantic, + semantic_weights) + semantic_loss = semantic_loss * losses['coef'][0] + + # center loss + center_loss = losses['types'][1](logits_list[1], center) + center_weights = (center_weights.unsqueeze(1)).expand_as(center_loss) + center_loss = center_loss * center_weights + if center_loss.sum() > 0: + center_loss = center_loss.sum() / center_weights.sum() + else: + center_loss = center_loss.sum() * 0 + center_loss = center_loss * losses['coef'][1] + + # offset loss + offset_loss = losses['types'][2](logits_list[2], offset) + offset_weights = (offset_weights.unsqueeze(1)).expand_as(offset_loss) + offset_loss = offset_loss * offset_weights + if offset_weights.sum() > 0: + offset_loss = offset_loss.sum() / offset_weights.sum() + else: + offset_loss = offset_loss.sum() * 0 + offset_loss = offset_loss * losses['coef'][2] + + loss_list = [semantic_loss, center_loss, offset_loss] + + return loss_list + + +def train(model, + train_dataset, + val_dataset=None, + optimizer=None, + save_dir='output', + iters=10000, + batch_size=2, + resume_model=None, + save_interval=1000, + log_iters=10, + num_workers=0, + use_vdl=False, + losses=None, + keep_checkpoint_max=5, + threshold=0.1, + nms_kernel=7, + top_k=200): + """ + Launch training. + + Args: + model(nn.Layer): A sementic segmentation model. + train_dataset (paddle.io.Dataset): Used to read and process training datasets. + val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. + optimizer (paddle.optimizer.Optimizer): The optimizer. + save_dir (str, optional): The directory for saving the model snapshot. Default: 'output'. + iters (int, optional): How may iters to train the model. Defualt: 10000. + batch_size (int, optional): Mini batch size of one gpu or cpu. Default: 2. + resume_model (str, optional): The path of resume model. + save_interval (int, optional): How many iters to save a model snapshot once during training. Default: 1000. + log_iters (int, optional): Display logging information at every log_iters. Default: 10. + num_workers (int, optional): Num workers for data loader. Default: 0. + use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False. + losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). + The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. + keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. + threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. + nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 7. + top_k (int, optional): An Integer, top k centers to keep. Default: 200. + """ + model.train() + nranks = paddle.distributed.ParallelEnv().nranks + local_rank = paddle.distributed.ParallelEnv().local_rank + + start_iter = 0 + if resume_model is not None: + start_iter = resume(model, optimizer, resume_model) + + if not os.path.isdir(save_dir): + if os.path.exists(save_dir): + os.remove(save_dir) + os.makedirs(save_dir) + + if nranks > 1: + # Initialize parallel environment if not done. + if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( + ): + paddle.distributed.init_parallel_env() + ddp_model = paddle.DataParallel(model) + else: + ddp_model = paddle.DataParallel(model) + + batch_sampler = paddle.io.DistributedBatchSampler( + train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) + + loader = paddle.io.DataLoader( + train_dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + return_list=True, + ) + + if use_vdl: + from visualdl import LogWriter + log_writer = LogWriter(save_dir) + + avg_loss = 0.0 + avg_loss_list = [] + iters_per_epoch = len(batch_sampler) + best_pq = -1.0 + best_model_iter = -1 + reader_cost_averager = TimeAverager() + batch_cost_averager = TimeAverager() + save_models = deque() + batch_start = time.time() + + iter = start_iter + while iter < iters: + for data in loader: + iter += 1 + if iter > iters: + break + reader_cost_averager.record(time.time() - batch_start) + images = data[0] + semantic = data[1] + semantic_weights = data[2] + center = data[3] + center_weights = data[4] + offset = data[5] + offset_weights = data[6] + foreground = data[7] + + if nranks > 1: + logits_list = ddp_model(images) + else: + logits_list = model(images) + + loss_list = loss_computation( + logits_list=logits_list, + losses=losses, + semantic=semantic, + semantic_weights=semantic_weights, + center=center, + center_weights=center_weights, + offset=offset, + offset_weights=offset_weights) + loss = sum(loss_list) + loss.backward() + + optimizer.step() + lr = optimizer.get_lr() + if isinstance(optimizer._learning_rate, + paddle.optimizer.lr.LRScheduler): + optimizer._learning_rate.step() + model.clear_gradients() + avg_loss += loss.numpy()[0] + if not avg_loss_list: + avg_loss_list = [l.numpy() for l in loss_list] + else: + for i in range(len(loss_list)): + avg_loss_list[i] += loss_list[i].numpy() + batch_cost_averager.record( + time.time() - batch_start, num_samples=batch_size) + + if (iter) % log_iters == 0 and local_rank == 0: + avg_loss /= log_iters + avg_loss_list = [l[0] / log_iters for l in avg_loss_list] + remain_iters = iters - iter + avg_train_batch_cost = batch_cost_averager.get_average() + avg_train_reader_cost = reader_cost_averager.get_average() + eta = calculate_eta(remain_iters, avg_train_batch_cost) + logger.info( + "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" + .format((iter - 1) // iters_per_epoch + 1, iter, iters, + avg_loss, lr, avg_train_batch_cost, + avg_train_reader_cost, + batch_cost_averager.get_ips_average(), eta)) + logger.info( + "[LOSS] loss={:.4f}, semantic_loss={:.4f}, center_loss={:.4f}, offset_loss={:.4f}" + .format(avg_loss, avg_loss_list[0], avg_loss_list[1], + avg_loss_list[2])) + if use_vdl: + log_writer.add_scalar('Train/loss', avg_loss, iter) + # Record all losses if there are more than 2 losses. + if len(avg_loss_list) > 1: + avg_loss_dict = {} + for i, value in enumerate(avg_loss_list): + avg_loss_dict['loss_' + str(i)] = value + for key, value in avg_loss_dict.items(): + log_tag = 'Train/' + key + log_writer.add_scalar(log_tag, value, iter) + + log_writer.add_scalar('Train/lr', lr, iter) + log_writer.add_scalar('Train/batch_cost', + avg_train_batch_cost, iter) + log_writer.add_scalar('Train/reader_cost', + avg_train_reader_cost, iter) + + avg_loss = 0.0 + avg_loss_list = [] + reader_cost_averager.reset() + batch_cost_averager.reset() + + # save model + if (iter % save_interval == 0 or iter == iters) and local_rank == 0: + current_save_dir = os.path.join(save_dir, + "iter_{}".format(iter)) + if not os.path.isdir(current_save_dir): + os.makedirs(current_save_dir) + paddle.save(model.state_dict(), + os.path.join(current_save_dir, 'model.pdparams')) + paddle.save(optimizer.state_dict(), + os.path.join(current_save_dir, 'model.pdopt')) + save_models.append(current_save_dir) + if len(save_models) > keep_checkpoint_max > 0: + model_to_remove = save_models.popleft() + shutil.rmtree(model_to_remove) + + # eval model + if (iter % save_interval == 0 or iter == iters) and ( + val_dataset is + not None) and local_rank == 0 and iter > iters // 2: + num_workers = 1 if num_workers > 0 else 0 + panoptic_results, semantic_results, instance_results = evaluate( + model, + val_dataset, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k, + num_workers=num_workers, + print_detail=False) + pq = panoptic_results['pan_seg']['All']['pq'] + miou = semantic_results['sem_seg']['mIoU'] + map = instance_results['ins_seg']['mAP'] + map50 = instance_results['ins_seg']['mAP50'] + logger.info( + "[EVAL] PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}" + .format(pq, miou, map, map50)) + model.train() + + # save best model and add evaluate results to vdl + if (iter % save_interval == 0 or iter == iters) and local_rank == 0: + if val_dataset is not None and iter > iters // 2: + if pq > best_pq: + best_pq = pq + best_model_iter = iter + best_model_dir = os.path.join(save_dir, "best_model") + paddle.save( + model.state_dict(), + os.path.join(best_model_dir, 'model.pdparams')) + logger.info( + '[EVAL] The model with the best validation pq ({:.4f}) was saved at iter {}.' + .format(best_pq, best_model_iter)) + + if use_vdl: + log_writer.add_scalar('Evaluate/PQ', pq, iter) + log_writer.add_scalar('Evaluate/mIoU', miou, iter) + log_writer.add_scalar('Evaluate/mAP', map, iter) + log_writer.add_scalar('Evaluate/mAP50', map50, iter) + batch_start = time.time() + + # Calculate flops. + if local_rank == 0: + + def count_syncbn(m, x, y): + x = x[0] + nelements = x.numel() + m.total_ops += int(2 * nelements) + + _, c, h, w = images.shape + flops = paddle.flops( + model, [1, c, h, w], + custom_ops={paddle.nn.SyncBatchNorm: count_syncbn}) + + # Sleep for half a second to let dataloader release resources. + time.sleep(0.5) + if use_vdl: + log_writer.close() diff --git a/contrib/PanopticDeepLab/paddleseg/core/val.py b/contrib/PanopticDeepLab/paddleseg/core/val.py new file mode 100644 index 0000000000..9ee82ad219 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/core/val.py @@ -0,0 +1,181 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from collections import OrderedDict + +import numpy as np +import time +import paddle +import paddle.nn.functional as F + +from paddleseg.utils import metrics, TimeAverager, calculate_eta, logger, progbar +from paddleseg.core import infer +from paddleseg.utils.evaluation import SemanticEvaluator, InstanceEvaluator, PanopticEvaluator + +np.set_printoptions(suppress=True) + + +def evaluate(model, + eval_dataset, + threshold=0.1, + nms_kernel=7, + top_k=200, + num_workers=0, + print_detail=True): + """ + Launch evaluation. + + Args: + model(nn.Layer): A sementic segmentation model. + eval_dataset (paddle.io.Dataset): Used to read and process validation datasets. + threshold (float, optional): Threshold applied to center heatmap score. Defalut: 0.1. + nms_kernel (int, optional): NMS max pooling kernel size. Default: 7. + top_k (int, optional): Top k centers to keep. Default: 200. + num_workers (int, optional): Num workers for data loader. Default: 0. + print_detail (bool, optional): Whether to print detailed information about the evaluation process. Default: True. + + Returns: + dict: Panoptic evaluation results which includes PQ, RQ, SQ for all, each class, Things and stuff. + dict: Semantic evaluation results which includes mIoU, fwIoU, mACC and pACC. + dict: Instance evaluation results which includes mAP and mAP50, and also AP and AP50 for each class. + + """ + model.eval() + nranks = paddle.distributed.ParallelEnv().nranks + local_rank = paddle.distributed.ParallelEnv().local_rank + if nranks > 1: + # Initialize parallel environment if not done. + if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( + ): + paddle.distributed.init_parallel_env() + batch_sampler = paddle.io.DistributedBatchSampler( + eval_dataset, batch_size=1, shuffle=False, drop_last=False) + loader = paddle.io.DataLoader( + eval_dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + return_list=True, + ) + + total_iters = len(loader) + semantic_metric = SemanticEvaluator( + eval_dataset.num_classes, ignore_index=eval_dataset.ignore_index) + instance_metric_AP50 = InstanceEvaluator( + eval_dataset.num_classes, + overlaps=0.5, + thing_list=eval_dataset.thing_list) + instance_metric_AP = InstanceEvaluator( + eval_dataset.num_classes, + overlaps=list(np.arange(0.5, 1.0, 0.05)), + thing_list=eval_dataset.thing_list) + panoptic_metric = PanopticEvaluator( + num_classes=eval_dataset.num_classes, + thing_list=eval_dataset.thing_list, + ignore_index=eval_dataset.ignore_index, + label_divisor=eval_dataset.label_divisor) + + if print_detail: + logger.info( + "Start evaluating (total_samples={}, total_iters={})...".format( + len(eval_dataset), total_iters)) + progbar_val = progbar.Progbar(target=total_iters, verbose=1) + reader_cost_averager = TimeAverager() + batch_cost_averager = TimeAverager() + batch_start = time.time() + with paddle.no_grad(): + for iter, data in enumerate(loader): + reader_cost_averager.record(time.time() - batch_start) + im = data[0] + raw_semantic_label = data[1] # raw semantic label. + raw_instance_label = data[2] + raw_panoptic_label = data[3] + ori_shape = raw_semantic_label.shape[-2:] + + semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference( + model=model, + im=im, + transforms=eval_dataset.transforms.transforms, + thing_list=eval_dataset.thing_list, + label_divisor=eval_dataset.label_divisor, + stuff_area=eval_dataset.stuff_area, + ignore_index=eval_dataset.ignore_index, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k, + ori_shape=ori_shape) + semantic = semantic.squeeze().numpy() + semantic_softmax = semantic_softmax.squeeze().numpy() + instance = instance.squeeze().numpy() + panoptic = panoptic.squeeze().numpy() + ctr_hmp = ctr_hmp.squeeze().numpy() + raw_semantic_label = raw_semantic_label.squeeze().numpy() + raw_instance_label = raw_instance_label.squeeze().numpy() + raw_panoptic_label = raw_panoptic_label.squeeze().numpy() + + # update metric for semantic, instance, panoptic + semantic_metric.update(semantic, raw_semantic_label) + + gts = instance_metric_AP.convert_gt_map(raw_semantic_label, + raw_instance_label) + # print([i[0] for i in gts]) + preds = instance_metric_AP.convert_pred_map(semantic_softmax, + panoptic) + # print([(i[0], i[1]) for i in preds ]) + ignore_mask = raw_semantic_label == eval_dataset.ignore_index + instance_metric_AP.update(preds, gts, ignore_mask=ignore_mask) + instance_metric_AP50.update(preds, gts, ignore_mask=ignore_mask) + + panoptic_metric.update(panoptic, raw_panoptic_label) + + batch_cost_averager.record( + time.time() - batch_start, num_samples=len(im)) + batch_cost = batch_cost_averager.get_average() + reader_cost = reader_cost_averager.get_average() + + if local_rank == 0: + progbar_val.update(iter + 1, [('batch_cost', batch_cost), + ('reader cost', reader_cost)]) + reader_cost_averager.reset() + batch_cost_averager.reset() + batch_start = time.time() + + semantic_results = semantic_metric.evaluate() + panoptic_results = panoptic_metric.evaluate() + instance_results = OrderedDict() + ins_ap = instance_metric_AP.evaluate() + ins_ap50 = instance_metric_AP50.evaluate() + instance_results['ins_seg'] = OrderedDict() + instance_results['ins_seg']['mAP'] = ins_ap['ins_seg']['mAP'] + instance_results['ins_seg']['AP'] = ins_ap['ins_seg']['AP'] + instance_results['ins_seg']['mAP50'] = ins_ap50['ins_seg']['mAP'] + instance_results['ins_seg']['AP50'] = ins_ap50['ins_seg']['AP'] + + if print_detail: + logger.info(panoptic_results) + print() + logger.info(semantic_results) + print() + logger.info(instance_results) + print() + + pq = panoptic_results['pan_seg']['All']['pq'] + miou = semantic_results['sem_seg']['mIoU'] + map = instance_results['ins_seg']['mAP'] + map50 = instance_results['ins_seg']['mAP50'] + logger.info( + "PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}".format( + pq, miou, map, map50)) + + return panoptic_results, semantic_results, instance_results diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/__init__.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/__init__.py new file mode 100644 index 0000000000..5fcb1d6c10 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/cvlibs/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import manager +from . import param_init +from .config import Config diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/callbacks.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/callbacks.py new file mode 100644 index 0000000000..1188b2cdac --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/cvlibs/callbacks.py @@ -0,0 +1,279 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time + +import numpy as np +import paddle +from paddle.distributed.parallel import ParallelEnv +from visualdl import LogWriter +from paddleseg.utils.progbar import Progbar +import paddleseg.utils.logger as logger + + +class CallbackList(object): + """ + Container abstracting a list of callbacks. + + Args: + callbacks (list[Callback]): List of `Callback` instances. + """ + + def __init__(self, callbacks=None): + callbacks = callbacks or [] + self.callbacks = [c for c in callbacks] + + def append(self, callback): + self.callbacks.append(callback) + + def set_params(self, params): + for callback in self.callbacks: + callback.set_params(params) + + def set_model(self, model): + for callback in self.callbacks: + callback.set_model(model) + + def set_optimizer(self, optimizer): + for callback in self.callbacks: + callback.set_optimizer(optimizer) + + def on_iter_begin(self, iter, logs=None): + """Called right before processing a batch. + """ + logs = logs or {} + for callback in self.callbacks: + callback.on_iter_begin(iter, logs) + self._t_enter_iter = time.time() + + def on_iter_end(self, iter, logs=None): + """Called at the end of a batch. + """ + logs = logs or {} + for callback in self.callbacks: + callback.on_iter_end(iter, logs) + self._t_exit_iter = time.time() + + def on_train_begin(self, logs=None): + """Called at the beginning of training. + """ + logs = logs or {} + for callback in self.callbacks: + callback.on_train_begin(logs) + + def on_train_end(self, logs=None): + """Called at the end of training. + """ + logs = logs or {} + for callback in self.callbacks: + callback.on_train_end(logs) + + def __iter__(self): + return iter(self.callbacks) + + +class Callback(object): + """Abstract base class used to build new callbacks. + """ + + def __init__(self): + self.validation_data = None + + def set_params(self, params): + self.params = params + + def set_model(self, model): + self.model = model + + def set_optimizer(self, optimizer): + self.optimizer = optimizer + + def on_iter_begin(self, iter, logs=None): + pass + + def on_iter_end(self, iter, logs=None): + pass + + def on_train_begin(self, logs=None): + pass + + def on_train_end(self, logs=None): + pass + + +class BaseLogger(Callback): + def __init__(self, period=10): + super(BaseLogger, self).__init__() + self.period = period + + def _reset(self): + self.totals = {} + + def on_train_begin(self, logs=None): + self.totals = {} + + def on_iter_end(self, iter, logs=None): + logs = logs or {} + #(iter - 1) // iters_per_epoch + 1 + for k, v in logs.items(): + if k in self.totals.keys(): + self.totals[k] += v + else: + self.totals[k] = v + + if iter % self.period == 0 and ParallelEnv().local_rank == 0: + + for k in self.totals: + logs[k] = self.totals[k] / self.period + self._reset() + + +class TrainLogger(Callback): + def __init__(self, log_freq=10): + self.log_freq = log_freq + + def _calculate_eta(self, remaining_iters, speed): + if remaining_iters < 0: + remaining_iters = 0 + remaining_time = int(remaining_iters * speed) + result = "{:0>2}:{:0>2}:{:0>2}" + arr = [] + for i in range(2, -1, -1): + arr.append(int(remaining_time / 60**i)) + remaining_time %= 60**i + return result.format(*arr) + + def on_iter_end(self, iter, logs=None): + + if iter % self.log_freq == 0 and ParallelEnv().local_rank == 0: + total_iters = self.params["total_iters"] + iters_per_epoch = self.params["iters_per_epoch"] + remaining_iters = total_iters - iter + eta = self._calculate_eta(remaining_iters, logs["batch_cost"]) + current_epoch = (iter - 1) // self.params["iters_per_epoch"] + 1 + loss = logs["loss"] + lr = self.optimizer.get_lr() + batch_cost = logs["batch_cost"] + reader_cost = logs["reader_cost"] + + logger.info( + "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.4f} | ETA {}" + .format(current_epoch, iter, total_iters, loss, lr, batch_cost, + reader_cost, eta)) + + +class ProgbarLogger(Callback): + def __init__(self): + super(ProgbarLogger, self).__init__() + + def on_train_begin(self, logs=None): + self.verbose = self.params["verbose"] + self.total_iters = self.params["total_iters"] + self.target = self.params["total_iters"] + self.progbar = Progbar(target=self.target, verbose=self.verbose) + self.seen = 0 + self.log_values = [] + + def on_iter_begin(self, iter, logs=None): + #self.seen = 0 + if self.seen < self.target: + self.log_values = [] + + def on_iter_end(self, iter, logs=None): + logs = logs or {} + self.seen += 1 + for k in self.params['metrics']: + if k in logs: + self.log_values.append((k, logs[k])) + + #if self.verbose and self.seen < self.target and ParallelEnv.local_rank == 0: + #print(self.log_values) + if self.seen < self.target: + self.progbar.update(self.seen, self.log_values) + + +class ModelCheckpoint(Callback): + def __init__(self, + save_dir, + monitor="miou", + save_best_only=False, + save_params_only=True, + mode="max", + period=1): + + super(ModelCheckpoint, self).__init__() + self.monitor = monitor + self.save_dir = save_dir + self.save_best_only = save_best_only + self.save_params_only = save_params_only + self.period = period + self.iters_since_last_save = 0 + + if mode == "min": + self.monitor_op = np.less + self.best = np.Inf + elif mode == "max": + self.monitor_op = np.greater + self.best = -np.Inf + else: + raise RuntimeError("`mode` is neither \"min\" nor \"max\"!") + + def on_train_begin(self, logs=None): + self.verbose = self.params["verbose"] + save_dir = self.save_dir + if not os.path.isdir(save_dir): + if os.path.exists(save_dir): + os.remove(save_dir) + os.makedirs(save_dir) + + def on_iter_end(self, iter, logs=None): + logs = logs or {} + self.iters_since_last_save += 1 + current_save_dir = os.path.join(self.save_dir, "iter_{}".format(iter)) + current_save_dir = os.path.abspath(current_save_dir) + #if self.iters_since_last_save % self.period and ParallelEnv().local_rank == 0: + #self.iters_since_last_save = 0 + if iter % self.period == 0 and ParallelEnv().local_rank == 0: + if self.verbose > 0: + print("iter {iter_num}: saving model to {path}".format( + iter_num=iter, path=current_save_dir)) + + paddle.save(self.model.state_dict(), + os.path.join(current_save_dir, 'model.pdparams')) + + if not self.save_params_only: + paddle.save(self.optimizer.state_dict(), + os.path.join(current_save_dir, 'model.pdopt')) + + +class VisualDL(Callback): + def __init__(self, log_dir="./log", freq=1): + super(VisualDL, self).__init__() + self.log_dir = log_dir + self.freq = freq + + def on_train_begin(self, logs=None): + self.writer = LogWriter(self.log_dir) + + def on_iter_end(self, iter, logs=None): + logs = logs or {} + if iter % self.freq == 0 and ParallelEnv().local_rank == 0: + for k, v in logs.items(): + self.writer.add_scalar("Train/{}".format(k), v, iter) + + self.writer.flush() + + def on_train_end(self, logs=None): + self.writer.close() diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/config.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/config.py new file mode 100644 index 0000000000..5ab1d29872 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/cvlibs/config.py @@ -0,0 +1,297 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import codecs +import os +from typing import Any + +import paddle +import yaml + +from paddleseg.cvlibs import manager + + +class Config(object): + ''' + Training configuration parsing. The only yaml/yml file is supported. + + The following hyper-parameters are available in the config file: + batch_size: The number of samples per gpu. + iters: The total training steps. + train_dataset: A training data config including type/data_root/transforms/mode. + For data type, please refer to paddleseg.datasets. + For specific transforms, please refer to paddleseg.transforms.transforms. + val_dataset: A validation data config including type/data_root/transforms/mode. + optimizer: A optimizer config, but currently PaddleSeg only supports sgd with momentum in config file. + In addition, weight_decay could be set as a regularization. + learning_rate: A learning rate config. If decay is configured, learning _rate value is the starting learning rate, + where only poly decay is supported using the config file. In addition, decay power and end_lr are tuned experimentally. + loss: A loss config. Multi-loss config is available. The loss type order is consistent with the seg model outputs, + where the coef term indicates the weight of corresponding loss. Note that the number of coef must be the same as the number of + model outputs, and there could be only one loss type if using the same loss type among the outputs, otherwise the number of + loss type must be consistent with coef. + model: A model config including type/backbone and model-dependent arguments. + For model type, please refer to paddleseg.models. + For backbone, please refer to paddleseg.models.backbones. + + Args: + path (str) : The path of config file, supports yaml format only. + + Examples: + + from paddleseg.cvlibs.config import Config + + # Create a cfg object with yaml file path. + cfg = Config(yaml_cfg_path) + + # Parsing the argument when its property is used. + train_dataset = cfg.train_dataset + + # the argument of model should be parsed after dataset, + # since the model builder uses some properties in dataset. + model = cfg.model + ... + ''' + + def __init__(self, + path: str, + learning_rate: float = None, + batch_size: int = None, + iters: int = None): + if not path: + raise ValueError('Please specify the configuration file path.') + + if not os.path.exists(path): + raise FileNotFoundError('File {} does not exist'.format(path)) + + self._model = None + self._losses = None + if path.endswith('yml') or path.endswith('yaml'): + self.dic = self._parse_from_yaml(path) + else: + raise RuntimeError('Config file should in yaml format!') + + self.update( + learning_rate=learning_rate, batch_size=batch_size, iters=iters) + + def _update_dic(self, dic, base_dic): + """ + Update config from dic based base_dic + """ + base_dic = base_dic.copy() + for key, val in dic.items(): + if isinstance(val, dict) and key in base_dic: + base_dic[key] = self._update_dic(val, base_dic[key]) + else: + base_dic[key] = val + dic = base_dic + return dic + + def _parse_from_yaml(self, path: str): + '''Parse a yaml file and build config''' + with codecs.open(path, 'r', 'utf-8') as file: + dic = yaml.load(file, Loader=yaml.FullLoader) + + if '_base_' in dic: + cfg_dir = os.path.dirname(path) + base_path = dic.pop('_base_') + base_path = os.path.join(cfg_dir, base_path) + base_dic = self._parse_from_yaml(base_path) + dic = self._update_dic(dic, base_dic) + return dic + + def update(self, + learning_rate: float = None, + batch_size: int = None, + iters: int = None): + '''Update config''' + if learning_rate: + self.dic['learning_rate']['value'] = learning_rate + + if batch_size: + self.dic['batch_size'] = batch_size + + if iters: + self.dic['iters'] = iters + + @property + def batch_size(self) -> int: + return self.dic.get('batch_size', 1) + + @property + def iters(self) -> int: + iters = self.dic.get('iters') + if not iters: + raise RuntimeError('No iters specified in the configuration file.') + return iters + + @property + def learning_rate(self) -> paddle.optimizer.lr.LRScheduler: + _learning_rate = self.dic.get('learning_rate', {}).get('value') + if not _learning_rate: + raise RuntimeError( + 'No learning rate specified in the configuration file.') + + args = self.decay_args + decay_type = args.pop('type') + + if decay_type == 'poly': + lr = _learning_rate + return paddle.optimizer.lr.PolynomialDecay(lr, **args) + else: + raise RuntimeError('Only poly decay support.') + + @property + def optimizer(self) -> paddle.optimizer.Optimizer: + lr = self.learning_rate + args = self.optimizer_args + optimizer_type = args.pop('type') + + if optimizer_type == 'sgd': + return paddle.optimizer.Momentum( + lr, parameters=self.model.parameters(), **args) + elif optimizer_type == 'adam': + return paddle.optimizer.Adam( + lr, parameters=self.model.parameters(), **args) + else: + raise RuntimeError('Only sgd and adam optimizer support.') + + @property + def optimizer_args(self) -> dict: + args = self.dic.get('optimizer', {}).copy() + if args['type'] == 'sgd': + args.setdefault('momentum', 0.9) + + return args + + @property + def decay_args(self) -> dict: + args = self.dic.get('learning_rate', {}).get('decay', { + 'type': 'poly', + 'power': 0.9 + }).copy() + + if args['type'] == 'poly': + args.setdefault('decay_steps', self.iters) + args.setdefault('end_lr', 0) + + return args + + @property + def loss(self) -> dict: + args = self.dic.get('loss', {}).copy() + if 'types' in args and 'coef' in args: + len_types = len(args['types']) + len_coef = len(args['coef']) + if len_types != len_coef: + if len_types == 1: + args['types'] = args['types'] * len_coef + else: + raise ValueError( + 'The length of types should equal to coef or equal to 1 in loss config, but they are {} and {}.' + .format(len_types, len_coef)) + else: + raise ValueError( + 'Loss config should contain keys of "types" and "coef"') + + if not self._losses: + self._losses = dict() + for key, val in args.items(): + if key == 'types': + self._losses['types'] = [] + for item in args['types']: + item['ignore_index'] = self.train_dataset.ignore_index + self._losses['types'].append(self._load_object(item)) + else: + self._losses[key] = val + if len(self._losses['coef']) != len(self._losses['types']): + raise RuntimeError( + 'The length of coef should equal to types in loss config: {} != {}.' + .format( + len(self._losses['coef']), len(self._losses['types']))) + return self._losses + + @property + def model(self) -> paddle.nn.Layer: + model_cfg = self.dic.get('model').copy() + if not model_cfg: + raise RuntimeError('No model specified in the configuration file.') + if not 'num_classes' in model_cfg: + if self.train_dataset and hasattr(self.train_dataset, + 'num_classes'): + model_cfg['num_classes'] = self.train_dataset.num_classes + elif self.val_dataset and hasattr(self.val_dataset, 'num_classes'): + model_cfg['num_classes'] = self.val_dataset.num_classes + else: + raise ValueError( + '`num_classes` is not found. Please set it in model, train_dataset or val_dataset' + ) + + if not self._model: + self._model = self._load_object(model_cfg) + return self._model + + @property + def train_dataset(self) -> paddle.io.Dataset: + _train_dataset = self.dic.get('train_dataset', {}).copy() + if not _train_dataset: + return None + return self._load_object(_train_dataset) + + @property + def val_dataset(self) -> paddle.io.Dataset: + _val_dataset = self.dic.get('val_dataset', {}).copy() + if not _val_dataset: + return None + return self._load_object(_val_dataset) + + def _load_component(self, com_name: str) -> Any: + com_list = [ + manager.MODELS, manager.BACKBONES, manager.DATASETS, + manager.TRANSFORMS, manager.LOSSES + ] + + for com in com_list: + if com_name in com.components_dict: + return com[com_name] + else: + raise RuntimeError( + 'The specified component was not found {}.'.format(com_name)) + + def _load_object(self, cfg: dict) -> Any: + cfg = cfg.copy() + if 'type' not in cfg: + raise RuntimeError('No object information in {}.'.format(cfg)) + + component = self._load_component(cfg.pop('type')) + + params = {} + for key, val in cfg.items(): + if self._is_meta_type(val): + params[key] = self._load_object(val) + elif isinstance(val, list): + params[key] = [ + self._load_object(item) + if self._is_meta_type(item) else item for item in val + ] + else: + params[key] = val + + return component(**params) + + def _is_meta_type(self, item: Any) -> bool: + return isinstance(item, dict) and 'type' in item + + def __str__(self) -> str: + return yaml.dump(self.dic) diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/manager.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/manager.py new file mode 100644 index 0000000000..cd1d105a5e --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/cvlibs/manager.py @@ -0,0 +1,143 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from collections.abc import Sequence + + +class ComponentManager: + """ + Implement a manager class to add the new component properly. + The component can be added as either class or function type. + + Args: + name (str): The name of component. + + Returns: + A callable object of ComponentManager. + + Examples 1: + + from paddleseg.cvlibs.manager import ComponentManager + + model_manager = ComponentManager() + + class AlexNet: ... + class ResNet: ... + + model_manager.add_component(AlexNet) + model_manager.add_component(ResNet) + + # Or pass a sequence alliteratively: + model_manager.add_component([AlexNet, ResNet]) + print(model_manager.components_dict) + # {'AlexNet': , 'ResNet': } + + Examples 2: + + # Or an easier way, using it as a Python decorator, while just add it above the class declaration. + from paddleseg.cvlibs.manager import ComponentManager + + model_manager = ComponentManager() + + @model_manager.add_component + class AlexNet: ... + + @model_manager.add_component + class ResNet: ... + + print(model_manager.components_dict) + # {'AlexNet': , 'ResNet': } + """ + + def __init__(self, name=None): + self._components_dict = dict() + self._name = name + + def __len__(self): + return len(self._components_dict) + + def __repr__(self): + name_str = self._name if self._name else self.__class__.__name__ + return "{}:{}".format(name_str, list(self._components_dict.keys())) + + def __getitem__(self, item): + if item not in self._components_dict.keys(): + raise KeyError("{} does not exist in availabel {}".format( + item, self)) + return self._components_dict[item] + + @property + def components_dict(self): + return self._components_dict + + @property + def name(self): + return self._name + + def _add_single_component(self, component): + """ + Add a single component into the corresponding manager. + + Args: + component (function|class): A new component. + + Raises: + TypeError: When `component` is neither class nor function. + KeyError: When `component` was added already. + """ + + # Currently only support class or function type + if not (inspect.isclass(component) or inspect.isfunction(component)): + raise TypeError( + "Expect class/function type, but received {}".format( + type(component))) + + # Obtain the internal name of the component + component_name = component.__name__ + + # Check whether the component was added already + if component_name in self._components_dict.keys(): + raise KeyError("{} exists already!".format(component_name)) + else: + # Take the internal name of the component as its key + self._components_dict[component_name] = component + + def add_component(self, components): + """ + Add component(s) into the corresponding manager. + + Args: + components (function|class|list|tuple): Support four types of components. + + Returns: + components (function|class|list|tuple): Same with input components. + """ + + # Check whether the type is a sequence + if isinstance(components, Sequence): + for component in components: + self._add_single_component(component) + else: + component = components + self._add_single_component(component) + + return components + + +MODELS = ComponentManager("models") +BACKBONES = ComponentManager("backbones") +DATASETS = ComponentManager("datasets") +TRANSFORMS = ComponentManager("transforms") +LOSSES = ComponentManager("losses") diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/param_init.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/param_init.py new file mode 100644 index 0000000000..335281242e --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/cvlibs/param_init.py @@ -0,0 +1,91 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.nn as nn + + +def constant_init(param, **kwargs): + """ + Initialize the `param` with constants. + + Args: + param (Tensor): Tensor that needs to be initialized. + + Examples: + + from paddleseg.cvlibs import param_init + import paddle.nn as nn + + linear = nn.Linear(2, 4) + param_init.constant_init(linear.weight, value=2.0) + print(linear.weight.numpy()) + # result is [[2. 2. 2. 2.], [2. 2. 2. 2.]] + + """ + initializer = nn.initializer.Constant(**kwargs) + initializer(param, param.block) + + +def normal_init(param, **kwargs): + """ + Initialize the `param` with a Normal distribution. + + Args: + param (Tensor): Tensor that needs to be initialized. + + Examples: + + from paddleseg.cvlibs import param_init + import paddle.nn as nn + + linear = nn.Linear(2, 4) + param_init.normal_init(linear.weight, loc=0.0, scale=1.0) + + """ + initializer = nn.initializer.Normal(**kwargs) + initializer(param, param.block) + + +def kaiming_normal_init(param, **kwargs): + """ + Initialize the input tensor with Kaiming Normal initialization. + + This function implements the `param` initialization from the paper + `Delving Deep into Rectifiers: Surpassing Human-Level Performance on + ImageNet Classification ` + by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a + robust initialization method that particularly considers the rectifier + nonlinearities. In case of Uniform distribution, the range is [-x, x], where + .. math:: + x = \sqrt{\\frac{6.0}{fan\_in}} + In case of Normal distribution, the mean is 0 and the standard deviation + is + .. math:: + \sqrt{\\frac{2.0}{fan\_in}} + + Args: + param (Tensor): Tensor that needs to be initialized. + + Examples: + + from paddleseg.cvlibs import param_init + import paddle.nn as nn + + linear = nn.Linear(2, 4) + # uniform is used to decide whether to use uniform or normal distribution + param_init.kaiming_normal_init(linear.weight) + + """ + initializer = nn.initializer.KaimingNormal(**kwargs) + initializer(param, param.block) diff --git a/contrib/PanopticDeepLab/paddleseg/datasets/__init__.py b/contrib/PanopticDeepLab/paddleseg/datasets/__init__.py new file mode 100644 index 0000000000..fefa6a07ea --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/datasets/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cityscapes_panoptic import CityscapesPanoptic diff --git a/contrib/PanopticDeepLab/paddleseg/datasets/cityscapes_panoptic.py b/contrib/PanopticDeepLab/paddleseg/datasets/cityscapes_panoptic.py new file mode 100644 index 0000000000..a3789e4c2d --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/datasets/cityscapes_panoptic.py @@ -0,0 +1,192 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import glob + +import numpy as np +import paddle +from paddleseg.cvlibs import manager +from paddleseg.transforms import Compose, PanopticTargetGenerator, SemanticTargetGenerator, InstanceTargetGenerator, RawPanopticTargetGenerator +import PIL.Image as Image + + +@manager.DATASETS.add_component +class CityscapesPanoptic(paddle.io.Dataset): + """ + Cityscapes dataset `https://www.cityscapes-dataset.com/`. + The folder structure is as follow: + + cityscapes/ + |--gtFine/ + | |--train/ + | | |--aachen/ + | | | |--*_color.png, *_instanceIds.png, *_labelIds.png, *_polygons.json, + | | | |--*_labelTrainIds.png + | | | |--... + | |--val/ + | |--test/ + | |--cityscapes_panoptic_train_trainId.json + | |--cityscapes_panoptic_train_trainId/ + | | |-- *_panoptic.png + | |--cityscapes_panoptic_val_trainId.json + | |--cityscapes_panoptic_val_trainId/ + | | |-- *_panoptic.png + |--leftImg8bit/ + | |--train/ + | |--val/ + | |--test/ + + Args: + transforms (list): Transforms for image. + dataset_root (str): Cityscapes dataset directory. + mode (str, optional): Which part of dataset to use. it is one of ('train', 'val'). Default: 'train'. + ignore_stuff_in_offset (bool, optional): Whether to ignore stuff region when training the offset branch. Default: False. + small_instance_area (int, optional): Instance which area less than given value is considered small. Default: 0. + small_instance_weight (int, optional): The loss weight for small instance. Default: 1. + stuff_area (int, optional): An Integer, remove stuff whose area is less tan stuff_area. Default: 2048. + """ + + def __init__(self, + transforms, + dataset_root, + mode='train', + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + stuff_area=2048): + self.dataset_root = dataset_root + self.transforms = Compose(transforms) + self.file_list = list() + self.ins_list = [] + mode = mode.lower() + self.mode = mode + self.num_classes = 19 + self.ignore_index = 255 + self.thing_list = [11, 12, 13, 14, 15, 16, 17, 18] + self.label_divisor = 1000 + self.stuff_area = stuff_area + + if mode not in ['train', 'val']: + raise ValueError( + "mode should be 'train' or 'val' , but got {}.".format(mode)) + + if self.transforms is None: + raise ValueError("`transforms` is necessary, but it is None.") + + img_dir = os.path.join(self.dataset_root, 'leftImg8bit') + label_dir = os.path.join(self.dataset_root, 'gtFine') + if self.dataset_root is None or not os.path.isdir( + self.dataset_root) or not os.path.isdir( + img_dir) or not os.path.isdir(label_dir): + raise ValueError( + "The dataset is not Found or the folder structure is nonconfoumance." + ) + json_filename = os.path.join( + self.dataset_root, 'gtFine', + 'cityscapes_panoptic_{}_trainId.json'.format(mode)) + dataset = json.load(open(json_filename)) + img_files = [] + label_files = [] + for img in dataset['images']: + img_file_name = img['file_name'] + img_files.append( + os.path.join(self.dataset_root, 'leftImg8bit', mode, + img_file_name.split('_')[0], + img_file_name.replace('_gtFine', ''))) + for ann in dataset['annotations']: + ann_file_name = ann['file_name'] + label_files.append( + os.path.join(self.dataset_root, 'gtFine', + 'cityscapes_panoptic_{}_trainId'.format(mode), + ann_file_name)) + self.ins_list.append(ann['segments_info']) + + self.file_list = [[ + img_path, label_path + ] for img_path, label_path in zip(img_files, label_files)] + + self.target_transform = PanopticTargetGenerator( + self.ignore_index, + self.rgb2id, + self.thing_list, + sigma=8, + ignore_stuff_in_offset=ignore_stuff_in_offset, + small_instance_area=small_instance_area, + small_instance_weight=small_instance_weight) + + self.raw_semantic_generator = SemanticTargetGenerator( + ignore_index=self.ignore_index, rgb2id=self.rgb2id) + self.raw_instance_generator = InstanceTargetGenerator(self.rgb2id) + self.raw_panoptic_generator = RawPanopticTargetGenerator( + ignore_index=self.ignore_index, + rgb2id=self.rgb2id, + label_divisor=self.label_divisor) + + @staticmethod + def rgb2id(color): + """Converts the color to panoptic label. + Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. + Args: + color: Ndarray or a tuple, color encoded image. + Returns: + Panoptic label. + """ + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, + 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) + + def __getitem__(self, idx): + image_path, label_path = self.file_list[idx] + dataset_dict = {} + im, label = self.transforms(im=image_path, label=label_path) + label_dict = self.target_transform(label, self.ins_list[idx]) + for key in label_dict.keys(): + dataset_dict[key] = label_dict[key] + dataset_dict['image'] = im + if self.mode == 'val': + raw_label = np.asarray(Image.open(label_path)) + dataset_dict['raw_semantic_label'] = self.raw_semantic_generator( + raw_label, self.ins_list[idx])['semantic'] + dataset_dict['raw_instance_label'] = self.raw_instance_generator( + raw_label)['instance'] + dataset_dict['raw_panoptic_label'] = self.raw_panoptic_generator( + raw_label, self.ins_list[idx])['panoptic'] + + image = np.array(dataset_dict['image']) + semantic = np.array(dataset_dict['semantic']) + semantic_weights = np.array(dataset_dict['semantic_weights']) + center = np.array(dataset_dict['center']) + center_weights = np.array(dataset_dict['center_weights']) + offset = np.array(dataset_dict['offset']) + offset_weights = np.array(dataset_dict['offset_weights']) + foreground = np.array(dataset_dict['foreground']) + if self.mode == 'train': + return image, semantic, semantic_weights, center, center_weights, offset, offset_weights, foreground + elif self.mode == 'val': + raw_semantic_label = np.array(dataset_dict['raw_semantic_label']) + raw_instance_label = np.array(dataset_dict['raw_instance_label']) + raw_panoptic_label = np.array(dataset_dict['raw_panoptic_label']) + return image, raw_semantic_label, raw_instance_label, raw_panoptic_label + else: + raise ValueError( + '{} is not surpported, please set it one of ("train", "val")'. + format(self.mode)) + + def __len__(self): + return len(self.file_list) diff --git a/contrib/PanopticDeepLab/paddleseg/models/__init__.py b/contrib/PanopticDeepLab/paddleseg/models/__init__.py new file mode 100644 index 0000000000..37e457c547 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .backbones import * +from .losses import * + +from .panoptic_deeplab import PanopticDeepLab diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/__init__.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/__init__.py new file mode 100644 index 0000000000..8bc32c14b4 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/backbones/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .hrnet import * +from .resnet_vd import * +from .xception_deeplab import * +from .mobilenetv3 import * diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/hrnet.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/hrnet.py new file mode 100644 index 0000000000..40ed660d9d --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/backbones/hrnet.py @@ -0,0 +1,820 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager, param_init +from paddleseg.models import layers +from paddleseg.utils import utils + +__all__ = [ + "HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30", + "HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60", "HRNet_W64" +] + + +class HRNet(nn.Layer): + """ + The HRNet implementation based on PaddlePaddle. + + The original article refers to + Jingdong Wang, et, al. "HRNet:Deep High-Resolution Representation Learning for Visual Recognition" + (https://arxiv.org/pdf/1908.07919.pdf). + + Args: + pretrained (str, optional): The path of pretrained model. + stage1_num_modules (int, optional): Number of modules for stage1. Default 1. + stage1_num_blocks (list, optional): Number of blocks per module for stage1. Default (4). + stage1_num_channels (list, optional): Number of channels per branch for stage1. Default (64). + stage2_num_modules (int, optional): Number of modules for stage2. Default 1. + stage2_num_blocks (list, optional): Number of blocks per module for stage2. Default (4, 4). + stage2_num_channels (list, optional): Number of channels per branch for stage2. Default (18, 36). + stage3_num_modules (int, optional): Number of modules for stage3. Default 4. + stage3_num_blocks (list, optional): Number of blocks per module for stage3. Default (4, 4, 4). + stage3_num_channels (list, optional): Number of channels per branch for stage3. Default [18, 36, 72). + stage4_num_modules (int, optional): Number of modules for stage4. Default 3. + stage4_num_blocks (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4). + stage4_num_channels (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144). + has_se (bool, optional): Whether to use Squeeze-and-Excitation module. Default False. + align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + """ + + def __init__(self, + pretrained=None, + stage1_num_modules=1, + stage1_num_blocks=(4, ), + stage1_num_channels=(64, ), + stage2_num_modules=1, + stage2_num_blocks=(4, 4), + stage2_num_channels=(18, 36), + stage3_num_modules=4, + stage3_num_blocks=(4, 4, 4), + stage3_num_channels=(18, 36, 72), + stage4_num_modules=3, + stage4_num_blocks=(4, 4, 4, 4), + stage4_num_channels=(18, 36, 72, 144), + has_se=False, + align_corners=False): + super(HRNet, self).__init__() + self.pretrained = pretrained + self.stage1_num_modules = stage1_num_modules + self.stage1_num_blocks = stage1_num_blocks + self.stage1_num_channels = stage1_num_channels + self.stage2_num_modules = stage2_num_modules + self.stage2_num_blocks = stage2_num_blocks + self.stage2_num_channels = stage2_num_channels + self.stage3_num_modules = stage3_num_modules + self.stage3_num_blocks = stage3_num_blocks + self.stage3_num_channels = stage3_num_channels + self.stage4_num_modules = stage4_num_modules + self.stage4_num_blocks = stage4_num_blocks + self.stage4_num_channels = stage4_num_channels + self.has_se = has_se + self.align_corners = align_corners + self.feat_channels = [sum(stage4_num_channels)] + + self.conv_layer1_1 = layers.ConvBNReLU( + in_channels=3, + out_channels=64, + kernel_size=3, + stride=2, + padding='same', + bias_attr=False) + + self.conv_layer1_2 = layers.ConvBNReLU( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=2, + padding='same', + bias_attr=False) + + self.la1 = Layer1( + num_channels=64, + num_blocks=self.stage1_num_blocks[0], + num_filters=self.stage1_num_channels[0], + has_se=has_se, + name="layer2") + + self.tr1 = TransitionLayer( + in_channels=[self.stage1_num_channels[0] * 4], + out_channels=self.stage2_num_channels, + name="tr1") + + self.st2 = Stage( + num_channels=self.stage2_num_channels, + num_modules=self.stage2_num_modules, + num_blocks=self.stage2_num_blocks, + num_filters=self.stage2_num_channels, + has_se=self.has_se, + name="st2", + align_corners=align_corners) + + self.tr2 = TransitionLayer( + in_channels=self.stage2_num_channels, + out_channels=self.stage3_num_channels, + name="tr2") + self.st3 = Stage( + num_channels=self.stage3_num_channels, + num_modules=self.stage3_num_modules, + num_blocks=self.stage3_num_blocks, + num_filters=self.stage3_num_channels, + has_se=self.has_se, + name="st3", + align_corners=align_corners) + + self.tr3 = TransitionLayer( + in_channels=self.stage3_num_channels, + out_channels=self.stage4_num_channels, + name="tr3") + self.st4 = Stage( + num_channels=self.stage4_num_channels, + num_modules=self.stage4_num_modules, + num_blocks=self.stage4_num_blocks, + num_filters=self.stage4_num_channels, + has_se=self.has_se, + name="st4", + align_corners=align_corners) + self.init_weight() + + def forward(self, x): + conv1 = self.conv_layer1_1(x) + conv2 = self.conv_layer1_2(conv1) + + la1 = self.la1(conv2) + + tr1 = self.tr1([la1]) + st2 = self.st2(tr1) + + tr2 = self.tr2(st2) + st3 = self.st3(tr2) + + tr3 = self.tr3(st3) + st4 = self.st4(tr3) + + x0_h, x0_w = st4[0].shape[2:] + x1 = F.interpolate( + st4[1], (x0_h, x0_w), + mode='bilinear', + align_corners=self.align_corners) + x2 = F.interpolate( + st4[2], (x0_h, x0_w), + mode='bilinear', + align_corners=self.align_corners) + x3 = F.interpolate( + st4[3], (x0_h, x0_w), + mode='bilinear', + align_corners=self.align_corners) + x = paddle.concat([st4[0], x1, x2, x3], axis=1) + + return [x] + + def init_weight(self): + for layer in self.sublayers(): + if isinstance(layer, nn.Conv2D): + param_init.normal_init(layer.weight, std=0.001) + elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)): + param_init.constant_init(layer.weight, value=1.0) + param_init.constant_init(layer.bias, value=0.0) + if self.pretrained is not None: + utils.load_pretrained_model(self, self.pretrained) + + +class Layer1(nn.Layer): + def __init__(self, + num_channels, + num_filters, + num_blocks, + has_se=False, + name=None): + super(Layer1, self).__init__() + + self.bottleneck_block_list = [] + + for i in range(num_blocks): + bottleneck_block = self.add_sublayer( + "bb_{}_{}".format(name, i + 1), + BottleneckBlock( + num_channels=num_channels if i == 0 else num_filters * 4, + num_filters=num_filters, + has_se=has_se, + stride=1, + downsample=True if i == 0 else False, + name=name + '_' + str(i + 1))) + self.bottleneck_block_list.append(bottleneck_block) + + def forward(self, x): + conv = x + for block_func in self.bottleneck_block_list: + conv = block_func(conv) + return conv + + +class TransitionLayer(nn.Layer): + def __init__(self, in_channels, out_channels, name=None): + super(TransitionLayer, self).__init__() + + num_in = len(in_channels) + num_out = len(out_channels) + self.conv_bn_func_list = [] + for i in range(num_out): + residual = None + if i < num_in: + if in_channels[i] != out_channels[i]: + residual = self.add_sublayer( + "transition_{}_layer_{}".format(name, i + 1), + layers.ConvBNReLU( + in_channels=in_channels[i], + out_channels=out_channels[i], + kernel_size=3, + padding='same', + bias_attr=False)) + else: + residual = self.add_sublayer( + "transition_{}_layer_{}".format(name, i + 1), + layers.ConvBNReLU( + in_channels=in_channels[-1], + out_channels=out_channels[i], + kernel_size=3, + stride=2, + padding='same', + bias_attr=False)) + self.conv_bn_func_list.append(residual) + + def forward(self, x): + outs = [] + for idx, conv_bn_func in enumerate(self.conv_bn_func_list): + if conv_bn_func is None: + outs.append(x[idx]) + else: + if idx < len(x): + outs.append(conv_bn_func(x[idx])) + else: + outs.append(conv_bn_func(x[-1])) + return outs + + +class Branches(nn.Layer): + def __init__(self, + num_blocks, + in_channels, + out_channels, + has_se=False, + name=None): + super(Branches, self).__init__() + + self.basic_block_list = [] + + for i in range(len(out_channels)): + self.basic_block_list.append([]) + for j in range(num_blocks[i]): + in_ch = in_channels[i] if j == 0 else out_channels[i] + basic_block_func = self.add_sublayer( + "bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1), + BasicBlock( + num_channels=in_ch, + num_filters=out_channels[i], + has_se=has_se, + name=name + '_branch_layer_' + str(i + 1) + '_' + + str(j + 1))) + self.basic_block_list[i].append(basic_block_func) + + def forward(self, x): + outs = [] + for idx, input in enumerate(x): + conv = input + for basic_block_func in self.basic_block_list[idx]: + conv = basic_block_func(conv) + outs.append(conv) + return outs + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + has_se, + stride=1, + downsample=False, + name=None): + super(BottleneckBlock, self).__init__() + + self.has_se = has_se + self.downsample = downsample + + self.conv1 = layers.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=1, + padding='same', + bias_attr=False) + + self.conv2 = layers.ConvBNReLU( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=3, + stride=stride, + padding='same', + bias_attr=False) + + self.conv3 = layers.ConvBN( + in_channels=num_filters, + out_channels=num_filters * 4, + kernel_size=1, + padding='same', + bias_attr=False) + + if self.downsample: + self.conv_down = layers.ConvBN( + in_channels=num_channels, + out_channels=num_filters * 4, + kernel_size=1, + padding='same', + bias_attr=False) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters * 4, + num_filters=num_filters * 4, + reduction_ratio=16, + name=name + '_fc') + + def forward(self, x): + residual = x + conv1 = self.conv1(x) + conv2 = self.conv2(conv1) + conv3 = self.conv3(conv2) + + if self.downsample: + residual = self.conv_down(x) + + if self.has_se: + conv3 = self.se(conv3) + + y = conv3 + residual + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride=1, + has_se=False, + downsample=False, + name=None): + super(BasicBlock, self).__init__() + + self.has_se = has_se + self.downsample = downsample + + self.conv1 = layers.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=3, + stride=stride, + padding='same', + bias_attr=False) + self.conv2 = layers.ConvBN( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=3, + padding='same', + bias_attr=False) + + if self.downsample: + self.conv_down = layers.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=1, + padding='same', + bias_attr=False) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters, + num_filters=num_filters, + reduction_ratio=16, + name=name + '_fc') + + def forward(self, x): + residual = x + conv1 = self.conv1(x) + conv2 = self.conv2(conv1) + + if self.downsample: + residual = self.conv_down(x) + + if self.has_se: + conv2 = self.se(conv2) + + y = conv2 + residual + y = F.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels, num_filters, reduction_ratio, name=None): + super(SELayer, self).__init__() + + self.pool2d_gap = nn.AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = nn.Linear( + num_channels, + med_ch, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Uniform(-stdv, stdv))) + + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = nn.Linear( + med_ch, + num_filters, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Uniform(-stdv, stdv))) + + def forward(self, x): + pool = self.pool2d_gap(x) + pool = paddle.reshape(pool, shape=[-1, self._num_channels]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = F.sigmoid(excitation) + excitation = paddle.reshape( + excitation, shape=[-1, self._num_channels, 1, 1]) + out = x * excitation + return out + + +class Stage(nn.Layer): + def __init__(self, + num_channels, + num_modules, + num_blocks, + num_filters, + has_se=False, + multi_scale_output=True, + name=None, + align_corners=False): + super(Stage, self).__init__() + + self._num_modules = num_modules + + self.stage_func_list = [] + for i in range(num_modules): + if i == num_modules - 1 and not multi_scale_output: + stage_func = self.add_sublayer( + "stage_{}_{}".format(name, i + 1), + HighResolutionModule( + num_channels=num_channels, + num_blocks=num_blocks, + num_filters=num_filters, + has_se=has_se, + multi_scale_output=False, + name=name + '_' + str(i + 1), + align_corners=align_corners)) + else: + stage_func = self.add_sublayer( + "stage_{}_{}".format(name, i + 1), + HighResolutionModule( + num_channels=num_channels, + num_blocks=num_blocks, + num_filters=num_filters, + has_se=has_se, + name=name + '_' + str(i + 1), + align_corners=align_corners)) + + self.stage_func_list.append(stage_func) + + def forward(self, x): + out = x + for idx in range(self._num_modules): + out = self.stage_func_list[idx](out) + return out + + +class HighResolutionModule(nn.Layer): + def __init__(self, + num_channels, + num_blocks, + num_filters, + has_se=False, + multi_scale_output=True, + name=None, + align_corners=False): + super(HighResolutionModule, self).__init__() + + self.branches_func = Branches( + num_blocks=num_blocks, + in_channels=num_channels, + out_channels=num_filters, + has_se=has_se, + name=name) + + self.fuse_func = FuseLayers( + in_channels=num_filters, + out_channels=num_filters, + multi_scale_output=multi_scale_output, + name=name, + align_corners=align_corners) + + def forward(self, x): + out = self.branches_func(x) + out = self.fuse_func(out) + return out + + +class FuseLayers(nn.Layer): + def __init__(self, + in_channels, + out_channels, + multi_scale_output=True, + name=None, + align_corners=False): + super(FuseLayers, self).__init__() + + self._actual_ch = len(in_channels) if multi_scale_output else 1 + self._in_channels = in_channels + self.align_corners = align_corners + + self.residual_func_list = [] + for i in range(self._actual_ch): + for j in range(len(in_channels)): + if j > i: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}".format(name, i + 1, j + 1), + layers.ConvBN( + in_channels=in_channels[j], + out_channels=out_channels[i], + kernel_size=1, + padding='same', + bias_attr=False)) + self.residual_func_list.append(residual_func) + elif j < i: + pre_num_filters = in_channels[j] + for k in range(i - j): + if k == i - j - 1: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}_{}".format( + name, i + 1, j + 1, k + 1), + layers.ConvBN( + in_channels=pre_num_filters, + out_channels=out_channels[i], + kernel_size=3, + stride=2, + padding='same', + bias_attr=False)) + pre_num_filters = out_channels[i] + else: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}_{}".format( + name, i + 1, j + 1, k + 1), + layers.ConvBNReLU( + in_channels=pre_num_filters, + out_channels=out_channels[j], + kernel_size=3, + stride=2, + padding='same', + bias_attr=False)) + pre_num_filters = out_channels[j] + self.residual_func_list.append(residual_func) + + def forward(self, x): + outs = [] + residual_func_idx = 0 + for i in range(self._actual_ch): + residual = x[i] + residual_shape = residual.shape[-2:] + for j in range(len(self._in_channels)): + if j > i: + y = self.residual_func_list[residual_func_idx](x[j]) + residual_func_idx += 1 + + y = F.interpolate( + y, + residual_shape, + mode='bilinear', + align_corners=self.align_corners) + residual = residual + y + elif j < i: + y = x[j] + for k in range(i - j): + y = self.residual_func_list[residual_func_idx](y) + residual_func_idx += 1 + + residual = residual + y + + residual = F.relu(residual) + outs.append(residual) + + return outs + + +@manager.BACKBONES.add_component +def HRNet_W18_Small_V1(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[1], + stage1_num_channels=[32], + stage2_num_modules=1, + stage2_num_blocks=[2, 2], + stage2_num_channels=[16, 32], + stage3_num_modules=1, + stage3_num_blocks=[2, 2, 2], + stage3_num_channels=[16, 32, 64], + stage4_num_modules=1, + stage4_num_blocks=[2, 2, 2, 2], + stage4_num_channels=[16, 32, 64, 128], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W18_Small_V2(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[2], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[2, 2], + stage2_num_channels=[18, 36], + stage3_num_modules=3, + stage3_num_blocks=[2, 2, 2], + stage3_num_channels=[18, 36, 72], + stage4_num_modules=2, + stage4_num_blocks=[2, 2, 2, 2], + stage4_num_channels=[18, 36, 72, 144], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W18(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[18, 36], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[18, 36, 72], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[18, 36, 72, 144], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W30(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[30, 60], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[30, 60, 120], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[30, 60, 120, 240], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W32(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[32, 64], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[32, 64, 128], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[32, 64, 128, 256], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W40(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[40, 80], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[40, 80, 160], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[40, 80, 160, 320], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W44(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[44, 88], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[44, 88, 176], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[44, 88, 176, 352], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W48(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[48, 96], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[48, 96, 192], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[48, 96, 192, 384], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W60(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[60, 120], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[60, 120, 240], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[60, 120, 240, 480], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W64(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[64, 128], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[64, 128, 256], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[64, 128, 256, 512], + **kwargs) + return model diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/mobilenetv3.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/mobilenetv3.py new file mode 100644 index 0000000000..7a83f04c00 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/backbones/mobilenetv3.py @@ -0,0 +1,364 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager +from paddleseg.utils import utils +from paddleseg.models import layers + +__all__ = [ + "MobileNetV3_small_x0_35", "MobileNetV3_small_x0_5", + "MobileNetV3_small_x0_75", "MobileNetV3_small_x1_0", + "MobileNetV3_small_x1_25", "MobileNetV3_large_x0_35", + "MobileNetV3_large_x0_5", "MobileNetV3_large_x0_75", + "MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25" +] + + +def make_divisible(v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class MobileNetV3(nn.Layer): + """ + The MobileNetV3 implementation based on PaddlePaddle. + + The original article refers to Jingdong + Andrew Howard, et, al. "Searching for MobileNetV3" + (https://arxiv.org/pdf/1905.02244.pdf). + + Args: + pretrained (str, optional): The path of pretrained model. + scale (float, optional): The scale of channels . Default: 1.0. + model_name (str, optional): Model name. It determines the type of MobileNetV3. The value is 'small' or 'large'. Defualt: 'small'. + output_stride (int, optional): The stride of output features compared to input images. The value should be one of (2, 4, 8, 16, 32). Default: None. + + """ + + def __init__(self, + pretrained=None, + scale=1.0, + model_name="small", + output_stride=None): + super(MobileNetV3, self).__init__() + + inplanes = 16 + if model_name == "large": + self.cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, False, "relu", 1], + [3, 64, 24, False, "relu", 2], + [3, 72, 24, False, "relu", 1], # output 1 -> out_index=2 + [5, 72, 40, True, "relu", 2], + [5, 120, 40, True, "relu", 1], + [5, 120, 40, True, "relu", 1], # output 2 -> out_index=5 + [3, 240, 80, False, "hard_swish", 2], + [3, 200, 80, False, "hard_swish", 1], + [3, 184, 80, False, "hard_swish", 1], + [3, 184, 80, False, "hard_swish", 1], + [3, 480, 112, True, "hard_swish", 1], + [3, 672, 112, True, "hard_swish", + 1], # output 3 -> out_index=11 + [5, 672, 160, True, "hard_swish", 2], + [5, 960, 160, True, "hard_swish", 1], + [5, 960, 160, True, "hard_swish", + 1], # output 3 -> out_index=14 + ] + self.out_indices = [2, 5, 11, 14] + self.feat_channels = [ + make_divisible(i * scale) for i in [24, 40, 112, 160] + ] + + self.cls_ch_squeeze = 960 + self.cls_ch_expand = 1280 + elif model_name == "small": + self.cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, True, "relu", 2], # output 1 -> out_index=0 + [3, 72, 24, False, "relu", 2], + [3, 88, 24, False, "relu", 1], # output 2 -> out_index=3 + [5, 96, 40, True, "hard_swish", 2], + [5, 240, 40, True, "hard_swish", 1], + [5, 240, 40, True, "hard_swish", 1], + [5, 120, 48, True, "hard_swish", 1], + [5, 144, 48, True, "hard_swish", 1], # output 3 -> out_index=7 + [5, 288, 96, True, "hard_swish", 2], + [5, 576, 96, True, "hard_swish", 1], + [5, 576, 96, True, "hard_swish", 1], # output 4 -> out_index=10 + ] + self.out_indices = [0, 3, 7, 10] + self.feat_channels = [ + make_divisible(i * scale) for i in [16, 24, 48, 96] + ] + + self.cls_ch_squeeze = 576 + self.cls_ch_expand = 1280 + else: + raise NotImplementedError( + "mode[{}_model] is not implemented!".format(model_name)) + + ################################################### + # modify stride and dilation based on output_stride + self.dilation_cfg = [1] * len(self.cfg) + self.modify_bottle_params(output_stride=output_stride) + ################################################### + + self.conv1 = ConvBNLayer( + in_c=3, + out_c=make_divisible(inplanes * scale), + filter_size=3, + stride=2, + padding=1, + num_groups=1, + if_act=True, + act="hard_swish") + + self.block_list = [] + + inplanes = make_divisible(inplanes * scale) + for i, (k, exp, c, se, nl, s) in enumerate(self.cfg): + ###################################### + # add dilation rate + dilation_rate = self.dilation_cfg[i] + ###################################### + self.block_list.append( + ResidualUnit( + in_c=inplanes, + mid_c=make_divisible(scale * exp), + out_c=make_divisible(scale * c), + filter_size=k, + stride=s, + dilation=dilation_rate, + use_se=se, + act=nl, + name="conv" + str(i + 2))) + self.add_sublayer( + sublayer=self.block_list[-1], name="conv" + str(i + 2)) + inplanes = make_divisible(scale * c) + + self.pretrained = pretrained + self.init_weight() + + def modify_bottle_params(self, output_stride=None): + + if output_stride is not None and output_stride % 2 != 0: + raise ValueError("output stride must to be even number") + if output_stride is not None: + stride = 2 + rate = 1 + for i, _cfg in enumerate(self.cfg): + stride = stride * _cfg[-1] + if stride > output_stride: + rate = rate * _cfg[-1] + self.cfg[i][-1] = 1 + + self.dilation_cfg[i] = rate + + def forward(self, inputs, label=None): + x = self.conv1(inputs) + # A feature list saves each downsampling feature. + feat_list = [] + for i, block in enumerate(self.block_list): + x = block(x) + if i in self.out_indices: + feat_list.append(x) + + return feat_list + + def init_weight(self): + if self.pretrained is not None: + utils.load_pretrained_model(self, self.pretrained) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + in_c, + out_c, + filter_size, + stride, + padding, + dilation=1, + num_groups=1, + if_act=True, + act=None): + super(ConvBNLayer, self).__init__() + self.if_act = if_act + self.act = act + + self.conv = nn.Conv2D( + in_channels=in_c, + out_channels=out_c, + kernel_size=filter_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=num_groups, + bias_attr=False) + self.bn = layers.SyncBatchNorm( + num_features=out_c, + weight_attr=paddle.ParamAttr( + regularizer=paddle.regularizer.L2Decay(0.0)), + bias_attr=paddle.ParamAttr( + regularizer=paddle.regularizer.L2Decay(0.0))) + self._act_op = layers.Activation(act='hardswish') + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + if self.if_act: + x = self._act_op(x) + return x + + +class ResidualUnit(nn.Layer): + def __init__(self, + in_c, + mid_c, + out_c, + filter_size, + stride, + use_se, + dilation=1, + act=None, + name=''): + super(ResidualUnit, self).__init__() + self.if_shortcut = stride == 1 and in_c == out_c + self.if_se = use_se + + self.expand_conv = ConvBNLayer( + in_c=in_c, + out_c=mid_c, + filter_size=1, + stride=1, + padding=0, + if_act=True, + act=act) + + self.bottleneck_conv = ConvBNLayer( + in_c=mid_c, + out_c=mid_c, + filter_size=filter_size, + stride=stride, + padding='same', + dilation=dilation, + num_groups=mid_c, + if_act=True, + act=act) + if self.if_se: + self.mid_se = SEModule(mid_c, name=name + "_se") + self.linear_conv = ConvBNLayer( + in_c=mid_c, + out_c=out_c, + filter_size=1, + stride=1, + padding=0, + if_act=False, + act=None) + self.dilation = dilation + + def forward(self, inputs): + x = self.expand_conv(inputs) + x = self.bottleneck_conv(x) + if self.if_se: + x = self.mid_se(x) + x = self.linear_conv(x) + if self.if_shortcut: + x = inputs + x + return x + + +class SEModule(nn.Layer): + def __init__(self, channel, reduction=4, name=""): + super(SEModule, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2D(1) + self.conv1 = nn.Conv2D( + in_channels=channel, + out_channels=channel // reduction, + kernel_size=1, + stride=1, + padding=0) + self.conv2 = nn.Conv2D( + in_channels=channel // reduction, + out_channels=channel, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, inputs): + outputs = self.avg_pool(inputs) + outputs = self.conv1(outputs) + outputs = F.relu(outputs) + outputs = self.conv2(outputs) + outputs = F.hard_sigmoid(outputs) + return paddle.multiply(x=inputs, y=outputs, axis=0) + + +def MobileNetV3_small_x0_35(**kwargs): + model = MobileNetV3(model_name="small", scale=0.35, **kwargs) + return model + + +def MobileNetV3_small_x0_5(**kwargs): + model = MobileNetV3(model_name="small", scale=0.5, **kwargs) + return model + + +def MobileNetV3_small_x0_75(**kwargs): + model = MobileNetV3(model_name="small", scale=0.75, **kwargs) + return model + + +@manager.BACKBONES.add_component +def MobileNetV3_small_x1_0(**kwargs): + model = MobileNetV3(model_name="small", scale=1.0, **kwargs) + return model + + +def MobileNetV3_small_x1_25(**kwargs): + model = MobileNetV3(model_name="small", scale=1.25, **kwargs) + return model + + +def MobileNetV3_large_x0_35(**kwargs): + model = MobileNetV3(model_name="large", scale=0.35, **kwargs) + return model + + +def MobileNetV3_large_x0_5(**kwargs): + model = MobileNetV3(model_name="large", scale=0.5, **kwargs) + return model + + +def MobileNetV3_large_x0_75(**kwargs): + model = MobileNetV3(model_name="large", scale=0.75, **kwargs) + return model + + +@manager.BACKBONES.add_component +def MobileNetV3_large_x1_0(**kwargs): + model = MobileNetV3(model_name="large", scale=1.0, **kwargs) + return model + + +def MobileNetV3_large_x1_25(**kwargs): + model = MobileNetV3(model_name="large", scale=1.25, **kwargs) + return model diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/resnet_vd.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/resnet_vd.py new file mode 100644 index 0000000000..068a7e2b00 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/backbones/resnet_vd.py @@ -0,0 +1,361 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager +from paddleseg.models import layers +from paddleseg.utils import utils + +__all__ = [ + "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd" +] + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + dilation=1, + groups=1, + is_vd_mode=False, + act=None, + ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = nn.AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2 if dilation == 1 else 0, + dilation=dilation, + groups=groups, + bias_attr=False) + + self._batch_norm = layers.SyncBatchNorm(out_channels) + self._act_op = layers.Activation(act=act) + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + y = self._act_op(y) + + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + dilation=1): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu') + + self.dilation = dilation + + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + dilation=dilation) + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None) + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first or stride == 1 else True) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + + #################################################################### + # If given dilation rate > 1, using corresponding padding. + # The performance drops down without the follow padding. + if self.dilation > 1: + padding = self.dilation + y = F.pad(y, [padding, padding, padding, padding]) + ##################################################################### + + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu') + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None) + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv1) + y = F.relu(y) + + return y + + +class ResNet_vd(nn.Layer): + """ + The ResNet_vd implementation based on PaddlePaddle. + + The original article refers to Jingdong + Tong He, et, al. "Bag of Tricks for Image Classification with Convolutional Neural Networks" + (https://arxiv.org/pdf/1812.01187.pdf). + + Args: + layers (int, optional): The layers of ResNet_vd. The supported layers are (18, 34, 50, 101, 152, 200). Default: 50. + output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 8. + multi_grid (tuple|list, optional): The grid of stage4. Defult: (1, 1, 1). + pretrained (str, optional): The path of pretrained model. + + """ + + def __init__(self, + layers=50, + output_stride=8, + multi_grid=(1, 1, 1), + pretrained=None): + super(ResNet_vd, self).__init__() + + self.conv1_logit = None # for gscnn shape stream + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, 1024 + ] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + # for channels of four returned stages + self.feat_channels = [c * 4 for c in num_filters + ] if layers >= 50 else num_filters + + dilation_dict = None + if output_stride == 8: + dilation_dict = {2: 2, 3: 4} + elif output_stride == 16: + dilation_dict = {3: 2} + + self.conv1_1 = ConvBNLayer( + in_channels=3, out_channels=32, kernel_size=3, stride=2, act='relu') + self.conv1_2 = ConvBNLayer( + in_channels=32, + out_channels=32, + kernel_size=3, + stride=1, + act='relu') + self.conv1_3 = ConvBNLayer( + in_channels=32, + out_channels=64, + kernel_size=3, + stride=1, + act='relu') + self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) + + # self.block_list = [] + self.stage_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + block_list = [] + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + + ############################################################################### + # Add dilation rate for some segmentation tasks, if dilation_dict is not None. + dilation_rate = dilation_dict[ + block] if dilation_dict and block in dilation_dict else 1 + + # Actually block here is 'stage', and i is 'block' in 'stage' + # At the stage 4, expand the the dilation_rate if given multi_grid + if block == 3: + dilation_rate = dilation_rate * multi_grid[i] + ############################################################################### + + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 + and dilation_rate == 1 else 1, + shortcut=shortcut, + if_first=block == i == 0, + dilation=dilation_rate)) + + block_list.append(bottleneck_block) + shortcut = True + self.stage_list.append(block_list) + else: + for block in range(len(depth)): + shortcut = False + block_list = [] + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BasicBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block], + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0)) + block_list.append(basic_block) + shortcut = True + self.stage_list.append(block_list) + + self.pretrained = pretrained + self.init_weight() + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + self.conv1_logit = y.clone() + y = self.pool2d_max(y) + + # A feature list saves the output feature map of each stage. + feat_list = [] + for stage in self.stage_list: + for block in stage: + y = block(y) + feat_list.append(y) + + return feat_list + + def init_weight(self): + utils.load_pretrained_model(self, self.pretrained) + + +@manager.BACKBONES.add_component +def ResNet18_vd(**args): + model = ResNet_vd(layers=18, **args) + return model + + +def ResNet34_vd(**args): + model = ResNet_vd(layers=34, **args) + return model + + +@manager.BACKBONES.add_component +def ResNet50_vd(**args): + model = ResNet_vd(layers=50, **args) + return model + + +@manager.BACKBONES.add_component +def ResNet101_vd(**args): + model = ResNet_vd(layers=101, **args) + return model + + +def ResNet152_vd(**args): + model = ResNet_vd(layers=152, **args) + return model + + +def ResNet200_vd(**args): + model = ResNet_vd(layers=200, **args) + return model diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/xception_deeplab.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/xception_deeplab.py new file mode 100644 index 0000000000..b83caec51d --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/backbones/xception_deeplab.py @@ -0,0 +1,415 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager +from paddleseg.utils import utils +from paddleseg.models import layers + +__all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"] + + +def check_data(data, number): + if type(data) == int: + return [data] * number + assert len(data) == number + return data + + +def check_stride(s, os): + if s <= os: + return True + else: + return False + + +def check_points(count, points): + if points is None: + return False + else: + if isinstance(points, list): + return (True if count in points else False) + else: + return (True if count == points else False) + + +def gen_bottleneck_params(backbone='xception_65'): + if backbone == 'xception_65': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + elif backbone == 'xception_41': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (8, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + elif backbone == 'xception_71': + bottleneck_params = { + "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + else: + raise ValueError( + "Xception backbont only support xception_41/xception_65/xception_71" + ) + return bottleneck_params + + +class ConvBNLayer(nn.Layer): + def __init__(self, + input_channels, + output_channels, + filter_size, + stride=1, + padding=0, + act=None, + name=None): + super(ConvBNLayer, self).__init__() + + self._conv = nn.Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=filter_size, + stride=stride, + padding=padding, + bias_attr=False) + self._bn = layers.SyncBatchNorm( + num_features=output_channels, epsilon=1e-3, momentum=0.99) + + self._act_op = layers.Activation(act=act) + + def forward(self, inputs): + return self._act_op(self._bn(self._conv(inputs))) + + +class Seperate_Conv(nn.Layer): + def __init__(self, + input_channels, + output_channels, + stride, + filter, + dilation=1, + act=None, + name=None): + super(Seperate_Conv, self).__init__() + + self._conv1 = nn.Conv2D( + in_channels=input_channels, + out_channels=input_channels, + kernel_size=filter, + stride=stride, + groups=input_channels, + padding=(filter) // 2 * dilation, + dilation=dilation, + bias_attr=False) + self._bn1 = layers.SyncBatchNorm( + input_channels, epsilon=1e-3, momentum=0.99) + + self._act_op1 = layers.Activation(act=act) + + self._conv2 = nn.Conv2D( + input_channels, + output_channels, + 1, + stride=1, + groups=1, + padding=0, + bias_attr=False) + self._bn2 = layers.SyncBatchNorm( + output_channels, epsilon=1e-3, momentum=0.99) + + self._act_op2 = layers.Activation(act=act) + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._bn1(x) + x = self._act_op1(x) + x = self._conv2(x) + x = self._bn2(x) + x = self._act_op2(x) + return x + + +class Xception_Block(nn.Layer): + def __init__(self, + input_channels, + output_channels, + strides=1, + filter_size=3, + dilation=1, + skip_conv=True, + has_skip=True, + activation_fn_in_separable_conv=False, + name=None): + super(Xception_Block, self).__init__() + + repeat_number = 3 + output_channels = check_data(output_channels, repeat_number) + filter_size = check_data(filter_size, repeat_number) + strides = check_data(strides, repeat_number) + + self.has_skip = has_skip + self.skip_conv = skip_conv + self.activation_fn_in_separable_conv = activation_fn_in_separable_conv + if not activation_fn_in_separable_conv: + self._conv1 = Seperate_Conv( + input_channels, + output_channels[0], + stride=strides[0], + filter=filter_size[0], + dilation=dilation, + name=name + "/separable_conv1") + self._conv2 = Seperate_Conv( + output_channels[0], + output_channels[1], + stride=strides[1], + filter=filter_size[1], + dilation=dilation, + name=name + "/separable_conv2") + self._conv3 = Seperate_Conv( + output_channels[1], + output_channels[2], + stride=strides[2], + filter=filter_size[2], + dilation=dilation, + name=name + "/separable_conv3") + else: + self._conv1 = Seperate_Conv( + input_channels, + output_channels[0], + stride=strides[0], + filter=filter_size[0], + act="relu", + dilation=dilation, + name=name + "/separable_conv1") + self._conv2 = Seperate_Conv( + output_channels[0], + output_channels[1], + stride=strides[1], + filter=filter_size[1], + act="relu", + dilation=dilation, + name=name + "/separable_conv2") + self._conv3 = Seperate_Conv( + output_channels[1], + output_channels[2], + stride=strides[2], + filter=filter_size[2], + act="relu", + dilation=dilation, + name=name + "/separable_conv3") + + if has_skip and skip_conv: + self._short = ConvBNLayer( + input_channels, + output_channels[-1], + 1, + stride=strides[-1], + padding=0, + name=name + "/shortcut") + + def forward(self, inputs): + if not self.activation_fn_in_separable_conv: + x = F.relu(inputs) + x = self._conv1(x) + x = F.relu(x) + x = self._conv2(x) + x = F.relu(x) + x = self._conv3(x) + else: + x = self._conv1(inputs) + x = self._conv2(x) + x = self._conv3(x) + if self.has_skip is False: + return x + if self.skip_conv: + skip = self._short(inputs) + else: + skip = inputs + return x + skip + + +class XceptionDeeplab(nn.Layer): + """ + The Xception backobne of DeepLabv3+ implementation based on PaddlePaddle. + + The original article refers to + Liang-Chieh Chen, et, al. "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" + (https://arxiv.org/abs/1802.02611) + + Args: + backbone (str): Which type of Xception_DeepLab to select. It should be one of ('xception_41', 'xception_65', 'xception_71'). + pretrained (str, optional): The path of pretrained model. + output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 16. + + """ + + def __init__(self, backbone, pretrained=None, output_stride=16): + + super(XceptionDeeplab, self).__init__() + + bottleneck_params = gen_bottleneck_params(backbone) + self.backbone = backbone + self.feat_channels = [128, 2048] + + self._conv1 = ConvBNLayer( + 3, + 32, + 3, + stride=2, + padding=1, + act="relu", + name=self.backbone + "/entry_flow/conv1") + self._conv2 = ConvBNLayer( + 32, + 64, + 3, + stride=1, + padding=1, + act="relu", + name=self.backbone + "/entry_flow/conv2") + """ + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) + } + + if output_stride == 16: + entry_block3_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + middle_block_dilation = 2 + exit_block_dilations = (2, 4) + + """ + self.block_num = bottleneck_params["entry_flow"][0] + self.strides = bottleneck_params["entry_flow"][1] + self.chns = bottleneck_params["entry_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + + self.entry_flow = [] + self.middle_flow = [] + + self.stride = 2 + self.output_stride = output_stride + s = self.stride + + for i in range(self.block_num): + stride = self.strides[i] if check_stride(s * self.strides[i], + self.output_stride) else 1 + xception_block = self.add_sublayer( + self.backbone + "/entry_flow/block" + str(i + 1), + Xception_Block( + input_channels=64 if i == 0 else self.chns[i - 1], + output_channels=self.chns[i], + strides=[1, 1, self.stride], + name=self.backbone + "/entry_flow/block" + str(i + 1))) + self.entry_flow.append(xception_block) + s = s * stride + self.stride = s + + self.block_num = bottleneck_params["middle_flow"][0] + self.strides = bottleneck_params["middle_flow"][1] + self.chns = bottleneck_params["middle_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + s = self.stride + + for i in range(self.block_num): + stride = self.strides[i] if check_stride(s * self.strides[i], + self.output_stride) else 1 + xception_block = self.add_sublayer( + self.backbone + "/middle_flow/block" + str(i + 1), + Xception_Block( + input_channels=728, + output_channels=728, + strides=[1, 1, self.strides[i]], + skip_conv=False, + name=self.backbone + "/middle_flow/block" + str(i + 1))) + self.middle_flow.append(xception_block) + s = s * stride + self.stride = s + + self.block_num = bottleneck_params["exit_flow"][0] + self.strides = bottleneck_params["exit_flow"][1] + self.chns = bottleneck_params["exit_flow"][2] + self.strides = check_data(self.strides, self.block_num) + self.chns = check_data(self.chns, self.block_num) + s = self.stride + stride = self.strides[0] if check_stride(s * self.strides[0], + self.output_stride) else 1 + self._exit_flow_1 = Xception_Block( + 728, + self.chns[0], [1, 1, stride], + name=self.backbone + "/exit_flow/block1") + s = s * stride + stride = self.strides[1] if check_stride(s * self.strides[1], + self.output_stride) else 1 + self._exit_flow_2 = Xception_Block( + self.chns[0][-1], + self.chns[1], [1, 1, stride], + dilation=2, + has_skip=False, + activation_fn_in_separable_conv=True, + name=self.backbone + "/exit_flow/block2") + + self.pretrained = pretrained + self.init_weight() + + def forward(self, inputs): + x = self._conv1(inputs) + x = self._conv2(x) + feat_list = [] + for i, ef in enumerate(self.entry_flow): + x = ef(x) + if i == 0: + feat_list.append(x) + for mf in self.middle_flow: + x = mf(x) + x = self._exit_flow_1(x) + x = self._exit_flow_2(x) + feat_list.append(x) + return feat_list + + def init_weight(self): + if self.pretrained is not None: + utils.load_pretrained_model(self, self.pretrained) + + +@manager.BACKBONES.add_component +def Xception41_deeplab(**args): + model = XceptionDeeplab('xception_41', **args) + return model + + +@manager.BACKBONES.add_component +def Xception65_deeplab(**args): + model = XceptionDeeplab("xception_65", **args) + return model + + +@manager.BACKBONES.add_component +def Xception71_deeplab(**args): + model = XceptionDeeplab("xception_71", **args) + return model diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/__init__.py b/contrib/PanopticDeepLab/paddleseg/models/layers/__init__.py new file mode 100644 index 0000000000..86ec36c08d --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/layers/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .layer_libs import ConvBNReLU, ConvBN, SeparableConvBNReLU, DepthwiseConvBN, AuxLayer, SyncBatchNorm +from .activation import Activation +from .pyramid_pool import ASPPModule, PPModule +from .attention import AttentionBlock +from .nonlocal2d import NonLocal2D diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/activation.py b/contrib/PanopticDeepLab/paddleseg/models/layers/activation.py new file mode 100644 index 0000000000..89b6cf0e81 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/layers/activation.py @@ -0,0 +1,73 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.nn as nn + + +class Activation(nn.Layer): + """ + The wrapper of activations. + + Args: + act (str, optional): The activation name in lowercase. It must be one of ['elu', 'gelu', + 'hardshrink', 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', + 'softmax', 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', + 'hsigmoid']. Default: None, means identical transformation. + + Returns: + A callable object of Activation. + + Raises: + KeyError: When parameter `act` is not in the optional range. + + Examples: + + from paddleseg.models.common.activation import Activation + + relu = Activation("relu") + print(relu) + # + + sigmoid = Activation("sigmoid") + print(sigmoid) + # + + not_exit_one = Activation("not_exit_one") + # KeyError: "not_exit_one does not exist in the current dict_keys(['elu', 'gelu', 'hardshrink', + # 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', 'softmax', + # 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', 'hsigmoid'])" + """ + + def __init__(self, act=None): + super(Activation, self).__init__() + + self._act = act + upper_act_names = nn.layer.activation.__all__ + lower_act_names = [act.lower() for act in upper_act_names] + act_dict = dict(zip(lower_act_names, upper_act_names)) + + if act is not None: + if act in act_dict.keys(): + act_name = act_dict[act] + self.act_func = eval( + "nn.layer.activation.{}()".format(act_name)) + else: + raise KeyError("{} does not exist in the current {}".format( + act, act_dict.keys())) + + def forward(self, x): + if self._act is not None: + return self.act_func(x) + else: + return x diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/attention.py b/contrib/PanopticDeepLab/paddleseg/models/layers/attention.py new file mode 100644 index 0000000000..f4be94f608 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/layers/attention.py @@ -0,0 +1,143 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.models import layers + + +class AttentionBlock(nn.Layer): + """General self-attention block/non-local block. + + The original article refers to refer to https://arxiv.org/abs/1706.03762. + Args: + key_in_channels (int): Input channels of key feature. + query_in_channels (int): Input channels of query feature. + channels (int): Output channels of key/query transform. + out_channels (int): Output channels. + share_key_query (bool): Whether share projection weight between key + and query projection. + query_downsample (nn.Module): Query downsample module. + key_downsample (nn.Module): Key downsample module. + key_query_num_convs (int): Number of convs for key/query projection. + value_out_num_convs (int): Number of convs for value projection. + key_query_norm (bool): Whether to use BN for key/query projection. + value_out_norm (bool): Whether to use BN for value projection. + matmul_norm (bool): Whether normalize attention map with sqrt of + channels + with_out (bool): Whether use out projection. + """ + + def __init__(self, key_in_channels, query_in_channels, channels, + out_channels, share_key_query, query_downsample, + key_downsample, key_query_num_convs, value_out_num_convs, + key_query_norm, value_out_norm, matmul_norm, with_out): + super(AttentionBlock, self).__init__() + if share_key_query: + assert key_in_channels == query_in_channels + self.key_in_channels = key_in_channels + self.query_in_channels = query_in_channels + self.out_channels = out_channels + self.channels = channels + self.share_key_query = share_key_query + self.key_project = self.build_project( + key_in_channels, + channels, + num_convs=key_query_num_convs, + use_conv_module=key_query_norm) + if share_key_query: + self.query_project = self.key_project + else: + self.query_project = self.build_project( + query_in_channels, + channels, + num_convs=key_query_num_convs, + use_conv_module=key_query_norm) + + self.value_project = self.build_project( + key_in_channels, + channels if with_out else out_channels, + num_convs=value_out_num_convs, + use_conv_module=value_out_norm) + + if with_out: + self.out_project = self.build_project( + channels, + out_channels, + num_convs=value_out_num_convs, + use_conv_module=value_out_norm) + else: + self.out_project = None + + self.query_downsample = query_downsample + self.key_downsample = key_downsample + self.matmul_norm = matmul_norm + + def build_project(self, in_channels, channels, num_convs, use_conv_module): + if use_conv_module: + convs = [ + layers.ConvBNReLU( + in_channels=in_channels, + out_channels=channels, + kernel_size=1, + bias_attr=False) + ] + for _ in range(num_convs - 1): + convs.append( + layers.ConvBNReLU( + in_channels=channels, + out_channels=channels, + kernel_size=1, + bias_attr=False)) + else: + convs = [nn.Conv2D(in_channels, channels, 1)] + for _ in range(num_convs - 1): + convs.append(nn.Conv2D(channels, channels, 1)) + + if len(convs) > 1: + convs = nn.Sequential(*convs) + else: + convs = convs[0] + return convs + + def forward(self, query_feats, key_feats): + b, c, h, w = query_feats.shape + query = self.query_project(query_feats) + if self.query_downsample is not None: + query = self.query_downsample(query) + query = query.reshape([*query.shape[:2], -1]).transpose([0, 2, 1]) + + key = self.key_project(key_feats) + value = self.value_project(key_feats) + + if self.key_downsample is not None: + key = self.key_downsample(key) + value = self.key_downsample(value) + + key = key.reshape([*key.shape[:2], -1]) + value = value.reshape([*value.shape[:2], -1]).transpose([0, 2, 1]) + sim_map = paddle.matmul(query, key) + if self.matmul_norm: + sim_map = (self.channels**-0.5) * sim_map + sim_map = F.softmax(sim_map, axis=-1) + + context = paddle.matmul(sim_map, value) + context = paddle.transpose(context, [0, 2, 1]) + context = paddle.reshape(context, [b, -1, *query_feats.shape[2:]]) + + if self.out_project is not None: + context = self.out_project(context) + return context diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/layer_libs.py b/contrib/PanopticDeepLab/paddleseg/models/layers/layer_libs.py new file mode 100644 index 0000000000..2b4845d10d --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/layers/layer_libs.py @@ -0,0 +1,165 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + + +def SyncBatchNorm(*args, **kwargs): + """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead""" + if paddle.get_device() == 'cpu': + return nn.BatchNorm2D(*args, **kwargs) + else: + return nn.SyncBatchNorm(*args, **kwargs) + + +class ConvBNReLU(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + padding='same', + **kwargs): + super().__init__() + + self._conv = nn.Conv2D( + in_channels, out_channels, kernel_size, padding=padding, **kwargs) + + self._batch_norm = SyncBatchNorm(out_channels) + + def forward(self, x): + x = self._conv(x) + x = self._batch_norm(x) + x = F.relu(x) + return x + + +class ConvBN(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + padding='same', + **kwargs): + super().__init__() + self._conv = nn.Conv2D( + in_channels, out_channels, kernel_size, padding=padding, **kwargs) + self._batch_norm = SyncBatchNorm(out_channels) + + def forward(self, x): + x = self._conv(x) + x = self._batch_norm(x) + return x + + +class ConvReLUPool(nn.Layer): + def __init__(self, in_channels, out_channels): + super().__init__() + self.conv = nn.Conv2D( + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + dilation=1) + + def forward(self, x): + x = self.conv(x) + x = F.relu(x) + x = F.pool2d(x, pool_size=2, pool_type="max", pool_stride=2) + return x + + +class SeparableConvBNReLU(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + padding='same', + **kwargs): + super().__init__() + self.depthwise_conv = ConvBNReLU( + in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + padding=padding, + groups=in_channels, + **kwargs) + self.piontwise_conv = ConvBNReLU( + in_channels, out_channels, kernel_size=1, groups=1, bias_attr=False) + + def forward(self, x): + x = self.depthwise_conv(x) + x = self.piontwise_conv(x) + return x + + +class DepthwiseConvBN(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + padding='same', + **kwargs): + super().__init__() + self.depthwise_conv = ConvBN( + in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + padding=padding, + groups=in_channels, + **kwargs) + + def forward(self, x): + x = self.depthwise_conv(x) + return x + + +class AuxLayer(nn.Layer): + """ + The auxiliary layer implementation for auxiliary loss. + + Args: + in_channels (int): The number of input channels. + inter_channels (int): The intermediate channels. + out_channels (int): The number of output channels, and usually it is num_classes. + dropout_prob (float, optional): The drop rate. Default: 0.1. + """ + + def __init__(self, + in_channels, + inter_channels, + out_channels, + dropout_prob=0.1): + super().__init__() + + self.conv_bn_relu = ConvBNReLU( + in_channels=in_channels, + out_channels=inter_channels, + kernel_size=3, + padding=1) + + self.dropout = nn.Dropout(p=dropout_prob) + + self.conv = nn.Conv2D( + in_channels=inter_channels, + out_channels=out_channels, + kernel_size=1) + + def forward(self, x): + x = self.conv_bn_relu(x) + x = self.dropout(x) + x = self.conv(x) + return x diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/nonlocal2d.py b/contrib/PanopticDeepLab/paddleseg/models/layers/nonlocal2d.py new file mode 100644 index 0000000000..bd577c1a16 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/layers/nonlocal2d.py @@ -0,0 +1,154 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.models import layers + + +class NonLocal2D(nn.Layer): + """Basic Non-local module. + This model is the implementation of "Non-local Neural Networks" + (https://arxiv.org/abs/1711.07971) + + Args: + in_channels (int): Channels of the input feature map. + reduction (int): Channel reduction ratio. Default: 2. + use_scale (bool): Whether to scale pairwise_weight by `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`. Default: True. + sub_sample (bool): Whether to utilize max pooling after pairwise function. Default: False. + mode (str): Options are `gaussian`, `concatenation`, `embedded_gaussian` and `dot_product`. Default: embedded_gaussian. + """ + + def __init__(self, + in_channels, + reduction=2, + use_scale=True, + sub_sample=False, + mode='embedded_gaussian'): + super(NonLocal2D, self).__init__() + self.in_channels = in_channels + self.reduction = reduction + self.use_scale = use_scale + self.sub_sample = sub_sample + self.mode = mode + if mode not in [ + 'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation' + ]: + raise ValueError( + "Mode should be in 'gaussian', 'concatenation','embedded_gaussian' or 'dot_product'." + ) + + self.inter_channels = max(in_channels // reduction, 1) + + self.g = nn.Conv2D( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1) + self.conv_out = layers.ConvBNReLU( + in_channels=self.inter_channels, + out_channels=self.in_channels, + kernel_size=1, + bias_attr=False) + + if self.mode != "gaussian": + self.theta = nn.Conv2D( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1) + self.phi = nn.Conv2D( + in_channels=self.in_channels, + out_channels=self.inter_channels, + kernel_size=1) + + if self.mode == "concatenation": + self.concat_project = layers.ConvBNReLU( + in_channels=self.inter_channels * 2, + out_channels=1, + kernel_size=1, + bias_attr=False) + + if self.sub_sample: + max_pool_layer = nn.MaxPool2D(kernel_size=(2, 2)) + self.g = nn.Sequential(self.g, max_pool_layer) + if self.mode != 'gaussian': + self.phi = nn.Sequential(self.phi, max_pool_layer) + else: + self.phi = max_pool_layer + + def gaussian(self, theta_x, phi_x): + pairwise_weight = paddle.matmul(theta_x, phi_x) + pairwise_weight = F.softmax(pairwise_weight, axis=-1) + return pairwise_weight + + def embedded_gaussian(self, theta_x, phi_x): + pairwise_weight = paddle.matmul(theta_x, phi_x) + if self.use_scale: + pairwise_weight /= theta_x.shape[-1]**0.5 + pairwise_weight = F.softmax(pairwise_weight, -1) + return pairwise_weight + + def dot_product(self, theta_x, phi_x): + pairwise_weight = paddle.matmul(theta_x, phi_x) + pairwise_weight /= pairwise_weight.shape[-1] + return pairwise_weight + + def concatenation(self, theta_x, phi_x): + h = theta_x.shape[2] + w = phi_x.shape[3] + theta_x = paddle.tile(theta_x, [1, 1, 1, w]) + phi_x = paddle.tile(phi_x, [1, 1, h, 1]) + + concat_feature = paddle.concat([theta_x, phi_x], axis=1) + pairwise_weight = self.concat_project(concat_feature) + n, _, h, w = pairwise_weight.shape + pairwise_weight = paddle.reshape(pairwise_weight, [n, h, w]) + pairwise_weight /= pairwise_weight.shape[-1] + return pairwise_weight + + def forward(self, x): + n, c, h, w = x.shape + g_x = paddle.reshape(self.g(x), [n, self.inter_channels, -1]) + g_x = paddle.transpose(g_x, [0, 2, 1]) + + if self.mode == 'gaussian': + theta_x = paddle.reshape(x, [n, self.inter_channels, -1]) + theta_x = paddle.transpose(theta_x, [0, 2, 1]) + if self.sub_sample: + phi_x = paddle.reshape( + self.phi(x), [n, self.inter_channels, -1]) + else: + phi_x = paddle.reshape(x, [n, self.in_channels, -1]) + + elif self.mode == 'concatenation': + theta_x = paddle.reshape( + self.theta(x), [n, self.inter_channels, -1, 1]) + phi_x = self.phi(x).view(n, self.inter_channels, 1, -1) + + else: + theta_x = paddle.reshape( + self.theta(x), [n, self.inter_channels, -1, 1]) + theta_x = paddle.transpose(theta_x, [0, 2, 1]) + phi_x = paddle.reshape(self.phi(x), [n, self.inter_channels, -1]) + + pairwise_func = getattr(self, self.mode) + pairwise_weight = pairwise_func(theta_x, phi_x) + y = paddle.matmul(pairwise_weight, g_x) + y = paddle.transpose(y, [0, 2, 1]) + y = paddle.reshape(y, [n, self.inter_channels, h, w]) + + output = x + self.conv_out(y) + + return output diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/pyramid_pool.py b/contrib/PanopticDeepLab/paddleseg/models/layers/pyramid_pool.py new file mode 100644 index 0000000000..87e19f36bd --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/layers/pyramid_pool.py @@ -0,0 +1,185 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn.functional as F +from paddle import nn + +from paddleseg.models import layers + + +class ASPPModule(nn.Layer): + """ + Atrous Spatial Pyramid Pooling. + + Args: + aspp_ratios (tuple): The dilation rate using in ASSP module. + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + use_sep_conv (bool, optional): If using separable conv in ASPP module. Default: False. + image_pooling (bool, optional): If augmented with image-level features. Default: False + """ + + def __init__(self, + aspp_ratios, + in_channels, + out_channels, + align_corners, + use_sep_conv=False, + image_pooling=False, + drop_rate=0.1): + super().__init__() + + self.align_corners = align_corners + self.aspp_blocks = nn.LayerList() + + for ratio in aspp_ratios: + if use_sep_conv and ratio > 1: + conv_func = layers.SeparableConvBNReLU + else: + conv_func = layers.ConvBNReLU + + block = conv_func( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1 if ratio == 1 else 3, + dilation=ratio, + padding=0 if ratio == 1 else ratio, + bias_attr=False) + self.aspp_blocks.append(block) + + out_size = len(self.aspp_blocks) + + if image_pooling: + self.global_avg_pool = nn.Sequential( + nn.AdaptiveAvgPool2D(output_size=(1, 1)), + layers.ConvBNReLU( + in_channels, out_channels, kernel_size=1, bias_attr=False)) + out_size += 1 + self.image_pooling = image_pooling + + self.conv_bn_relu = layers.ConvBNReLU( + in_channels=out_channels * out_size, + out_channels=out_channels, + kernel_size=1, + bias_attr=False) + + self.dropout = nn.Dropout(p=drop_rate) # drop rate + + def forward(self, x): + outputs = [] + for block in self.aspp_blocks: + y = block(x) + interpolate_shape = x.shape[2:] + y = F.interpolate( + y, + interpolate_shape, + mode='bilinear', + align_corners=self.align_corners) + outputs.append(y) + + if self.image_pooling: + img_avg = self.global_avg_pool(x) + img_avg = F.interpolate( + img_avg, + interpolate_shape, + mode='bilinear', + align_corners=self.align_corners) + outputs.append(img_avg) + + x = paddle.concat(outputs, axis=1) + x = self.conv_bn_relu(x) + x = self.dropout(x) + + return x + + +class PPModule(nn.Layer): + """ + Pyramid pooling module originally in PSPNet. + + Args: + in_channels (int): The number of intput channels to pyramid pooling module. + out_channels (int): The number of output channels after pyramid pooling module. + bin_sizes (tuple, optional): The out size of pooled feature maps. Default: (1, 2, 3, 6). + dim_reduction (bool, optional): A bool value represents if reducing dimension after pooling. Default: True. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + """ + + def __init__(self, in_channels, out_channels, bin_sizes, dim_reduction, + align_corners): + super().__init__() + + self.bin_sizes = bin_sizes + + inter_channels = in_channels + if dim_reduction: + inter_channels = in_channels // len(bin_sizes) + + # we use dimension reduction after pooling mentioned in original implementation. + self.stages = nn.LayerList([ + self._make_stage(in_channels, inter_channels, size) + for size in bin_sizes + ]) + + self.conv_bn_relu2 = layers.ConvBNReLU( + in_channels=in_channels + inter_channels * len(bin_sizes), + out_channels=out_channels, + kernel_size=3, + padding=1) + + self.align_corners = align_corners + + def _make_stage(self, in_channels, out_channels, size): + """ + Create one pooling layer. + + In our implementation, we adopt the same dimension reduction as the original paper that might be + slightly different with other implementations. + + After pooling, the channels are reduced to 1/len(bin_sizes) immediately, while some other implementations + keep the channels to be same. + + Args: + in_channels (int): The number of intput channels to pyramid pooling module. + size (int): The out size of the pooled layer. + + Returns: + conv (Tensor): A tensor after Pyramid Pooling Module. + """ + + prior = nn.AdaptiveAvgPool2D(output_size=(size, size)) + conv = layers.ConvBNReLU( + in_channels=in_channels, out_channels=out_channels, kernel_size=1) + + return nn.Sequential(prior, conv) + + def forward(self, input): + cat_layers = [] + for stage in self.stages: + x = stage(input) + x = F.interpolate( + x, + input.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + cat_layers.append(x) + cat_layers = [input] + cat_layers[::-1] + cat = paddle.concat(cat_layers, axis=1) + out = self.conv_bn_relu2(cat) + + return out diff --git a/contrib/PanopticDeepLab/paddleseg/models/losses/__init__.py b/contrib/PanopticDeepLab/paddleseg/models/losses/__init__.py new file mode 100644 index 0000000000..e4d5cc9e76 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/losses/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cross_entropy_loss import CrossEntropyLoss +from .mean_square_error_loss import MSELoss +from .l1_loss import L1Loss diff --git a/contrib/PanopticDeepLab/paddleseg/models/losses/cross_entropy_loss.py b/contrib/PanopticDeepLab/paddleseg/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000000..87320c1598 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/losses/cross_entropy_loss.py @@ -0,0 +1,74 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + + +@manager.LOSSES.add_component +class CrossEntropyLoss(nn.Layer): + """ + Implements the cross entropy loss function. + + Args: + ignore_index (int64): Specifies a target value that is ignored + and does not contribute to the input gradient. Default ``255``. + """ + + def __init__(self, ignore_index=255, top_k_percent_pixels=1.0): + super(CrossEntropyLoss, self).__init__() + self.ignore_index = ignore_index + self.top_k_percent_pixels = top_k_percent_pixels + self.EPS = 1e-5 + + def forward(self, logit, label, semantic_weights): + """ + Forward computation. + + Args: + logit (Tensor): Logit tensor, the data type is float32, float64. Shape is + (N, C), where C is number of classes, and if shape is more than 2D, this + is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor, the data type is int64. Shape is (N), where each + value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is + (N, D1, D2,..., Dk), k >= 1. + """ + if len(label.shape) != len(logit.shape): + label = paddle.unsqueeze(label, 1) + + logit = paddle.transpose(logit, [0, 2, 3, 1]) + label = paddle.transpose(label, [0, 2, 3, 1]) + loss = F.softmax_with_cross_entropy( + logit, label, ignore_index=self.ignore_index, axis=-1) + + mask = label != self.ignore_index + mask = paddle.cast(mask, 'float32') + loss = loss * mask + if semantic_weights is not None: + loss = loss.squeeze(-1) + loss = loss * semantic_weights + + label.stop_gradient = True + mask.stop_gradient = True + if self.top_k_percent_pixels == 1.0: + avg_loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS) + return avg_loss + + loss = loss.reshape((-1, )) + top_k_pixels = int(self.top_k_percent_pixels * loss.numel()) + loss, _ = paddle.topk(loss, top_k_pixels) + return loss.mean() diff --git a/contrib/PanopticDeepLab/paddleseg/models/losses/l1_loss.py b/contrib/PanopticDeepLab/paddleseg/models/losses/l1_loss.py new file mode 100644 index 0000000000..5fbbae2880 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/losses/l1_loss.py @@ -0,0 +1,72 @@ +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + + +@manager.LOSSES.add_component +class L1Loss(nn.MSELoss): + r""" + This interface is used to construct a callable object of the ``L1Loss`` class. + The L1Loss layer calculates the L1 Loss of ``input`` and ``label`` as follows. + If `reduction` set to ``'none'``, the loss is: + .. math:: + Out = \lvert input - label\rvert + If `reduction` set to ``'mean'``, the loss is: + .. math:: + Out = MEAN(\lvert input - label\rvert) + If `reduction` set to ``'sum'``, the loss is: + .. math:: + Out = SUM(\lvert input - label\rvert) + Parameters: + reduction (str, optional): Indicate the reduction to apply to the loss, + the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + If `reduction` is ``'none'``, the unreduced loss is returned; + If `reduction` is ``'mean'``, the reduced mean loss is returned. + If `reduction` is ``'sum'``, the reduced sum loss is returned. + Default is ``'mean'``. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + Shape: + input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64. + label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64. + output (Tensor): The L1 Loss of ``input`` and ``label``. + If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` . + If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1]. + Examples: + .. code-block:: python + + import paddle + import numpy as np + input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") + label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + l1_loss = paddle.nn.L1Loss() + output = l1_loss(input, label) + print(output.numpy()) + # [0.35] + l1_loss = paddle.nn.L1Loss(reduction='sum') + output = l1_loss(input, label) + print(output.numpy()) + # [1.4] + l1_loss = paddle.nn.L1Loss(reduction='none') + output = l1_loss(input, label) + print(output) + # [[0.20000005 0.19999999] + # [0.2 0.79999995]] + """ + + def __init__(self, reduction='mean', ignore_index=255): + super().__init__(reduction=reduction) diff --git a/contrib/PanopticDeepLab/paddleseg/models/losses/mean_square_error_loss.py b/contrib/PanopticDeepLab/paddleseg/models/losses/mean_square_error_loss.py new file mode 100644 index 0000000000..fa66c9c5f3 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/losses/mean_square_error_loss.py @@ -0,0 +1,60 @@ +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + + +@manager.LOSSES.add_component +class MSELoss(nn.MSELoss): + r""" + **Mean Square Error Loss** + Computes the mean square error (squared L2 norm) of given input and label. + If :attr:`reduction` is set to ``'none'``, loss is calculated as: + .. math:: + Out = (input - label)^2 + If :attr:`reduction` is set to ``'mean'``, loss is calculated as: + .. math:: + Out = \operatorname{mean}((input - label)^2) + If :attr:`reduction` is set to ``'sum'``, loss is calculated as: + .. math:: + Out = \operatorname{sum}((input - label)^2) + where `input` and `label` are `float32` tensors of same shape. + Parameters: + reduction (string, optional): The reduction method for the output, + could be 'none' | 'mean' | 'sum'. + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. + If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned. + Default is ``'mean'``. + Shape: + input (Tensor): Input tensor, the data type is float32 or float64 + label (Tensor): Label tensor, the data type is float32 or float64 + output (Tensor): output tensor storing the MSE loss of input and label, the data type is same as input. + Examples: + .. code-block:: python + import numpy as np + import paddle + input_data = np.array([1.5]).astype("float32") + label_data = np.array([1.7]).astype("float32") + mse_loss = paddle.nn.loss.MSELoss() + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = mse_loss(input, label) + print(output) + # [0.04000002] + """ + + def __init__(self, reduction='mean', ignore_index=255): + super().__init__(reduction=reduction) diff --git a/contrib/PanopticDeepLab/paddleseg/models/panoptic_deeplab.py b/contrib/PanopticDeepLab/paddleseg/models/panoptic_deeplab.py new file mode 100644 index 0000000000..923340bf32 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/models/panoptic_deeplab.py @@ -0,0 +1,339 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager +from paddleseg.models import layers +from paddleseg.utils import utils + +__all__ = ['PanopticDeepLab'] + + +@manager.MODELS.add_component +class PanopticDeepLab(nn.Layer): + """ + The PanopticDeeplab implementation based on PaddlePaddle. + + The original article refers to + Bowen Cheng, et, al. "Panoptic-DeepLab: A Simple, Strong, and Fast Baseline for Bottom-Up Panoptic Segmentation" + (https://arxiv.org/abs/1911.10194) + + Args: + num_classes (int): The unique number of target classes. + backbone (paddle.nn.Layer): Backbone network, currently support Resnet50_vd/Resnet101_vd/Xception65. + backbone_indices (tuple, optional): Two values in the tuple indicate the indices of output of backbone. + Default: (0, 3). + aspp_ratios (tuple, optional): The dilation rate using in ASSP module. + If output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). + If output_stride=8, aspp_ratios is (1, 12, 24, 36). + Default: (1, 6, 12, 18). + aspp_out_channels (int, optional): The output channels of ASPP module. Default: 256. + align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + pretrained (str, optional): The path or url of pretrained model. Default: None. + """ + + def __init__(self, + num_classes, + backbone, + backbone_indices=(2, 1, 0, 3), + aspp_ratios=(1, 6, 12, 18), + aspp_out_channels=256, + decoder_channels=256, + low_level_channels_projects=None, + align_corners=False, + pretrained=None, + **kwargs): + super().__init__() + + self.backbone = backbone + backbone_channels = [ + backbone.feat_channels[i] for i in backbone_indices + ] + + self.head = PanopticDeepLabHead( + num_classes, backbone_indices, backbone_channels, aspp_ratios, + aspp_out_channels, decoder_channels, align_corners, + low_level_channels_projects, **kwargs) + + self.align_corners = align_corners + self.pretrained = pretrained + self.init_weight() + + def _upsample_predictions(self, pred, input_shape): + """Upsamples final prediction, with special handling to offset. + Args: + pred (dict): stores all output of the segmentation model. + input_shape (tuple): spatial resolution of the desired shape. + Returns: + result (OrderedDict): upsampled dictionary. + """ + # Override upsample method to correctly handle `offset` + result = OrderedDict() + for key in pred.keys(): + out = F.interpolate( + pred[key], + size=input_shape, + mode='bilinear', + align_corners=self.align_corners) + if 'offset' in key: + if input_shape[0] % 2 == 0: + scale = input_shape[0] // pred[key].shape[2] + else: + scale = (input_shape[0] - 1) // (pred[key].shape[2] - 1) + out *= scale + result[key] = out + return result + + def forward(self, x): + feat_list = self.backbone(x) + logit_dict = self.head(feat_list) + results = self._upsample_predictions(logit_dict, x.shape[-2:]) + + # return results + logit_list = [results['semantic'], results['center'], results['offset']] + return logit_list + # return [results['semantic']] + + def init_weight(self): + if self.pretrained is not None: + utils.load_entire_model(self, self.pretrained) + + +class PanopticDeepLabHead(nn.Layer): + """ + The DeepLabV3PHead implementation based on PaddlePaddle. + + Args: + num_classes (int): The unique number of target classes. + backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. + the first index will be taken as a low-level feature in Decoder component; + the second one will be taken as input of ASPP component. + Usually backbone consists of four downsampling stage, and return an output of + each stage. If we set it as (0, 3), it means taking feature map of the first + stage in backbone as low-level feature used in Decoder, and feature map of the fourth + stage as input of ASPP. + backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. + aspp_ratios (tuple): The dilation rates using in ASSP module. + aspp_out_channels (int): The output channels of ASPP module. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + """ + + def __init__(self, num_classes, backbone_indices, backbone_channels, + aspp_ratios, aspp_out_channels, decoder_channels, + align_corners, low_level_channels_projects, **kwargs): + super().__init__() + self.semantic_decoder = SinglePanopticDeepLabDecoder( + backbone_indices=backbone_indices, + backbone_channels=backbone_channels, + aspp_ratios=aspp_ratios, + aspp_out_channels=aspp_out_channels, + decoder_channels=decoder_channels, + align_corners=align_corners, + low_level_channels_projects=low_level_channels_projects) + self.semantic_head = SinglePanopticDeepLabHead( + num_classes=[num_classes], + decoder_channels=decoder_channels, + head_channels=decoder_channels, + class_key=['semantic']) + self.instance_decoder = SinglePanopticDeepLabDecoder( + backbone_indices=backbone_indices, + backbone_channels=backbone_channels, + aspp_ratios=aspp_ratios, + aspp_out_channels=kwargs['instance_aspp_out_channels'], + decoder_channels=kwargs['instance_decoder_channels'], + align_corners=align_corners, + low_level_channels_projects=kwargs[ + 'instance_low_level_channels_projects']) + self.instance_head = SinglePanopticDeepLabHead( + num_classes=kwargs['instance_num_classes'], + decoder_channels=kwargs['instance_decoder_channels'], + head_channels=kwargs['instance_head_channels'], + class_key=kwargs['instance_class_key']) + + def forward(self, features): + # pred = OrdereDict() + pred = {} + + # Semantic branch + semantic = self.semantic_decoder(features) + semantic = self.semantic_head(semantic) + for key in semantic.keys(): + pred[key] = semantic[key] + + # Instance branch + instance = self.instance_decoder(features) + instance = self.instance_head(instance) + for key in instance.keys(): + pred[key] = instance[key] + + return pred + + +class SinglePanopticDeepLabDecoder(nn.Layer): + """ + The DeepLabV3PHead implementation based on PaddlePaddle. + + Args: + num_classes (int): The unique number of target classes. + backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. + the first index will be taken as a low-level feature in Decoder component; + the second one will be taken as input of ASPP component. + Usually backbone consists of four downsampling stage, and return an output of + each stage. If we set it as (0, 3), it means taking feature map of the first + stage in backbone as low-level feature used in Decoder, and feature map of the fourth + stage as input of ASPP. + backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. + aspp_ratios (tuple): The dilation rates using in ASSP module. + aspp_out_channels (int): The output channels of ASPP module. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + """ + + def __init__(self, backbone_indices, backbone_channels, aspp_ratios, + aspp_out_channels, decoder_channels, align_corners, + low_level_channels_projects): + super().__init__() + self.aspp = layers.ASPPModule( + aspp_ratios, + backbone_channels[-1], + aspp_out_channels, + align_corners, + use_sep_conv=False, + image_pooling=True, + drop_rate=0.5) + self.backbone_indices = backbone_indices + self.decoder_stage = len(low_level_channels_projects) + if self.decoder_stage != len(self.backbone_indices) - 1: + raise ValueError( + "len(low_level_channels_projects) != len(backbone_indices) - 1, they are {} and {}" + .format(low_level_channels_projects, backbone_indices)) + self.align_corners = align_corners + + # Transform low-level feature + project = [] + # Fuse + fuse = [] + # Top-down direction, i.e. starting from largest stride + for i in range(self.decoder_stage): + project.append( + layers.ConvBNReLU( + backbone_channels[i], + low_level_channels_projects[i], + 1, + bias_attr=False)) + if i == 0: + fuse_in_channels = aspp_out_channels + low_level_channels_projects[ + i] + else: + fuse_in_channels = decoder_channels + low_level_channels_projects[ + i] + fuse.append( + layers.SeparableConvBNReLU( + fuse_in_channels, + decoder_channels, + 5, + padding=2, + bias_attr=False)) + self.project = nn.LayerList(project) + self.fuse = nn.LayerList(fuse) + + def forward(self, feat_list): + x = feat_list[self.backbone_indices[-1]] + x = self.aspp(x) + + for i in range(self.decoder_stage): + l = feat_list[self.backbone_indices[i]] + l = self.project[i](l) + x = F.interpolate( + x, + size=l.shape[-2:], + mode='bilinear', + align_corners=self.align_corners) + x = paddle.concat([x, l], axis=1) + x = self.fuse[i](x) + + return x + + +class SinglePanopticDeepLabHead(nn.Layer): + """ + Decoder module of DeepLabV3P model + + Args: + num_classes (int): The number of classes. + in_channels (int): The number of input channels in decoder module. + """ + + def __init__(self, num_classes, decoder_channels, head_channels, class_key): + super(SinglePanopticDeepLabHead, self).__init__() + self.num_head = len(num_classes) + if self.num_head != len(class_key): + raise ValueError( + "len(num_classes) != len(class_key), they are {} and {}".format( + num_classes, class_key)) + + classifier = [] + for i in range(self.num_head): + classifier.append( + nn.Sequential( + layers.SeparableConvBNReLU( + decoder_channels, + head_channels, + 5, + padding=2, + bias_attr=False), + nn.Conv2D(head_channels, num_classes[i], 1))) + + self.classifier = nn.LayerList(classifier) + self.class_key = class_key + + def forward(self, x): + pred = OrderedDict() + # build classifier + for i, key in enumerate(self.class_key): + pred[key] = self.classifier[i](x) + + return pred + + +if __name__ == '__main__': + paddle.set_device('cpu') + from paddleseg.models.backbones import ResNet50_vd + backbone = ResNet50_vd(output_stride=32) + model = PanopticDeepLab( + num_classes=2, + backbone=backbone, + backbone_indices=(2, 1, 0, 3), + aspp_ratios=(1, 3, 6, 9), + aspp_out_channels=256, + decoder_channels=256, + low_level_channels_projects=[128, 64, 32], + align_corners=True, + instance_aspp_out_channels=256, + instance_decoder_channels=128, + instance_low_level_channels_projects=[64, 32, 16], + instance_num_classes=[1, 2], + instance_head_channels=32, + instance_class_key=["center", "offset"]) + flop = paddle.flops(model, (1, 3, 512, 1024), print_detail=True) + x = paddle.rand((1, 3, 512, 1024)) + result = model(x) + print(result) diff --git a/contrib/PanopticDeepLab/paddleseg/transforms/__init__.py b/contrib/PanopticDeepLab/paddleseg/transforms/__init__.py new file mode 100644 index 0000000000..72332d8eef --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/transforms/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .transforms import * +from . import functional +from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator, InstanceTargetGenerator, RawPanopticTargetGenerator diff --git a/contrib/PanopticDeepLab/paddleseg/transforms/functional.py b/contrib/PanopticDeepLab/paddleseg/transforms/functional.py new file mode 100644 index 0000000000..d53fa8b84f --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/transforms/functional.py @@ -0,0 +1,160 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cv2 +import numpy as np +from PIL import Image, ImageEnhance +from scipy.ndimage.morphology import distance_transform_edt + + +def normalize(im, mean, std): + im = im.astype(np.float32, copy=False) / 255.0 + im -= mean + im /= std + return im + + +def resize(im, target_size=608, interp=cv2.INTER_LINEAR): + if isinstance(target_size, list) or isinstance(target_size, tuple): + w = target_size[0] + h = target_size[1] + else: + w = target_size + h = target_size + im = cv2.resize(im, (w, h), interpolation=interp) + return im + + +def resize_long(im, long_size=224, interpolation=cv2.INTER_LINEAR): + value = max(im.shape[0], im.shape[1]) + scale = float(long_size) / float(value) + resized_width = int(round(im.shape[1] * scale)) + resized_height = int(round(im.shape[0] * scale)) + + im = cv2.resize( + im, (resized_width, resized_height), interpolation=interpolation) + return im + + +def horizontal_flip(im): + if len(im.shape) == 3: + im = im[:, ::-1, :] + elif len(im.shape) == 2: + im = im[:, ::-1] + return im + + +def vertical_flip(im): + if len(im.shape) == 3: + im = im[::-1, :, :] + elif len(im.shape) == 2: + im = im[::-1, :] + return im + + +def brightness(im, brightness_lower, brightness_upper): + brightness_delta = np.random.uniform(brightness_lower, brightness_upper) + im = ImageEnhance.Brightness(im).enhance(brightness_delta) + return im + + +def contrast(im, contrast_lower, contrast_upper): + contrast_delta = np.random.uniform(contrast_lower, contrast_upper) + im = ImageEnhance.Contrast(im).enhance(contrast_delta) + return im + + +def saturation(im, saturation_lower, saturation_upper): + saturation_delta = np.random.uniform(saturation_lower, saturation_upper) + im = ImageEnhance.Color(im).enhance(saturation_delta) + return im + + +def hue(im, hue_lower, hue_upper): + hue_delta = np.random.uniform(hue_lower, hue_upper) + im = np.array(im.convert('HSV')) + im[:, :, 0] = im[:, :, 0] + hue_delta + im = Image.fromarray(im, mode='HSV').convert('RGB') + return im + + +def rotate(im, rotate_lower, rotate_upper): + rotate_delta = np.random.uniform(rotate_lower, rotate_upper) + im = im.rotate(int(rotate_delta)) + return im + + +def mask_to_onehot(mask, num_classes): + """ + Convert a mask (H, W) to onehot (K, H, W). + + Args: + mask (np.ndarray): Label mask with shape (H, W) + num_classes (int): Number of classes. + + Returns: + np.ndarray: Onehot mask with shape(K, H, W). + """ + _mask = [mask == i for i in range(num_classes)] + _mask = np.array(_mask).astype(np.uint8) + return _mask + + +def onehot_to_binary_edge(mask, radius): + """ + Convert a onehot mask (K, H, W) to a edge mask. + + Args: + mask (np.ndarray): Onehot mask with shape (K, H, W) + radius (int|float): Radius of edge. + + Returns: + np.ndarray: Edge mask with shape(H, W). + """ + if radius < 1: + raise ValueError('`radius` should be greater than or equal to 1') + num_classes = mask.shape[0] + + edge = np.zeros(mask.shape[1:]) + # pad borders + mask = np.pad( + mask, ((0, 0), (1, 1), (1, 1)), mode='constant', constant_values=0) + for i in range(num_classes): + dist = distance_transform_edt( + mask[i, :]) + distance_transform_edt(1.0 - mask[i, :]) + dist = dist[1:-1, 1:-1] + dist[dist > radius] = 0 + edge += dist + + edge = np.expand_dims(edge, axis=0) + edge = (edge > 0).astype(np.uint8) + return edge + + +def mask_to_binary_edge(mask, radius, num_classes): + """ + Convert a segmentic segmentation mask (H, W) to a binary edge mask(H, W). + + Args: + mask (np.ndarray): Label mask with shape (H, W) + radius (int|float): Radius of edge. + num_classes (int): Number of classes. + + Returns: + np.ndarray: Edge mask with shape(H, W). + """ + mask = mask.squeeze() + onehot = mask_to_onehot(mask, num_classes) + edge = onehot_to_binary_edge(onehot, radius) + return edge diff --git a/contrib/PanopticDeepLab/paddleseg/transforms/target_transforms.py b/contrib/PanopticDeepLab/paddleseg/transforms/target_transforms.py new file mode 100644 index 0000000000..ce646f5ea1 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/transforms/target_transforms.py @@ -0,0 +1,281 @@ +import numpy as np + + +class PanopticTargetGenerator(object): + """ + Generates panoptic training target for Panoptic-DeepLab. + Annotation is assumed to have Cityscapes format. + Arguments: + ignore_index: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + thing_list: List, a list of thing classes + sigma: the sigma for Gaussian kernel. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for small instances. + ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in semantic segmentation branch, + crowd region is ignored in the original TensorFlow implementation. + """ + + def __init__(self, + ignore_index, + rgb2id, + thing_list, + sigma=8, + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + ignore_crowd_in_semantic=False): + self.ignore_index = ignore_index + self.rgb2id = rgb2id + self.thing_list = thing_list + self.ignore_stuff_in_offset = ignore_stuff_in_offset + self.small_instance_area = small_instance_area + self.small_instance_weight = small_instance_weight + self.ignore_crowd_in_semantic = ignore_crowd_in_semantic + + self.sigma = sigma + size = 6 * sigma + 3 + x = np.arange(0, size, 1, float) + y = x[:, np.newaxis] + x0, y0 = 3 * sigma + 1, 3 * sigma + 1 + self.g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2)) + + def __call__(self, panoptic, segments): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py + reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: + panoptic: numpy.array, colored image encoding panoptic label. + segments: List, a list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - semantic: Tensor, semantic label, shape=(H, W). + - foreground: Tensor, foreground mask label, shape=(H, W). + - center: Tensor, center heatmap, shape=(1, H, W). + - center_points: List, center coordinates, with tuple (y-coord, x-coord). + - offset: Tensor, offset, shape=(2, H, W), first dim is (offset_y, offset_x). + - semantic_weights: Tensor, loss weight for semantic prediction, shape=(H, W). + - center_weights: Tensor, ignore region of center prediction, shape=(H, W), used as weights for center + regression 0 is ignore, 1 is has instance. Multiply this mask to loss. + - offset_weights: Tensor, ignore region of offset prediction, shape=(H, W), used as weights for offset + regression 0 is ignore, 1 is has instance. Multiply this mask to loss. + """ + panoptic = self.rgb2id(panoptic) + height, width = panoptic.shape[0], panoptic.shape[1] + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_index + foreground = np.zeros_like(panoptic, dtype=np.uint8) + center = np.zeros((1, height, width), dtype=np.float32) + center_pts = [] + offset = np.zeros((2, height, width), dtype=np.float32) + y_coord = np.ones_like(panoptic, dtype=np.float32) + x_coord = np.ones_like(panoptic, dtype=np.float32) + y_coord = np.cumsum(y_coord, axis=0) - 1 + x_coord = np.cumsum(x_coord, axis=1) - 1 + # Generate pixel-wise loss weights + semantic_weights = np.ones_like(panoptic, dtype=np.uint8) + # 0: ignore, 1: has instance + # three conditions for a region to be ignored for instance branches: + # (1) It is labeled as `ignore_index` + # (2) It is crowd region (iscrowd=1) + # (3) (Optional) It is stuff region (for offset branch) + center_weights = np.zeros_like(panoptic, dtype=np.uint8) + offset_weights = np.zeros_like(panoptic, dtype=np.uint8) + for seg in segments: + cat_id = seg["category_id"] + if self.ignore_crowd_in_semantic: + if not seg['iscrowd']: + semantic[panoptic == seg["id"]] = cat_id + else: + semantic[panoptic == seg["id"]] = cat_id + if cat_id in self.thing_list: + foreground[panoptic == seg["id"]] = 1 + if not seg['iscrowd']: + # Ignored regions are not in `segments`. + # Handle crowd region. + center_weights[panoptic == seg["id"]] = 1 + if self.ignore_stuff_in_offset: + # Handle stuff region. + if cat_id in self.thing_list: + offset_weights[panoptic == seg["id"]] = 1 + else: + offset_weights[panoptic == seg["id"]] = 1 + if cat_id in self.thing_list: + # find instance center + mask_index = np.where(panoptic == seg["id"]) + if len(mask_index[0]) == 0: + # the instance is completely cropped + continue + + # Find instance area + ins_area = len(mask_index[0]) + if ins_area < self.small_instance_area: + semantic_weights[panoptic == + seg["id"]] = self.small_instance_weight + + center_y, center_x = np.mean(mask_index[0]), np.mean( + mask_index[1]) + center_pts.append([center_y, center_x]) + + # generate center heatmap + y, x = int(center_y), int(center_x) + # outside image boundary + if x < 0 or y < 0 or \ + x >= width or y >= height: + continue + sigma = self.sigma + # upper left + ul = int(np.round(x - 3 * sigma - 1)), int( + np.round(y - 3 * sigma - 1)) + # bottom right + br = int(np.round(x + 3 * sigma + 2)), int( + np.round(y + 3 * sigma + 2)) + + c, d = max(0, -ul[0]), min(br[0], width) - ul[0] + a, b = max(0, -ul[1]), min(br[1], height) - ul[1] + + cc, dd = max(0, ul[0]), min(br[0], width) + aa, bb = max(0, ul[1]), min(br[1], height) + center[0, aa:bb, cc:dd] = np.maximum(center[0, aa:bb, cc:dd], + self.g[a:b, c:d]) + + # generate offset (2, h, w) -> (y-dir, x-dir) + offset_y_index = (np.zeros_like(mask_index[0]), mask_index[0], + mask_index[1]) + offset_x_index = (np.ones_like(mask_index[0]), mask_index[0], + mask_index[1]) + offset[offset_y_index] = center_y - y_coord[mask_index] + offset[offset_x_index] = center_x - x_coord[mask_index] + + return dict( + semantic=semantic.astype('long'), + foreground=foreground.astype('long'), + center=center.astype(np.float32), + center_points=center_pts, + offset=offset.astype(np.float32), + semantic_weights=semantic_weights.astype(np.float32), + center_weights=center_weights.astype(np.float32), + offset_weights=offset_weights.astype(np.float32)) + + +class SemanticTargetGenerator(object): + """ + Generates semantic training target only for Panoptic-DeepLab (no instance). + Annotation is assumed to have Cityscapes format. + Arguments: + ignore_index: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + """ + + def __init__(self, ignore_index, rgb2id): + self.ignore_index = ignore_index + self.rgb2id = rgb2id + + def __call__(self, panoptic, segments): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py + reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: + panoptic: numpy.array, colored image encoding panoptic label. + segments: List, a list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - semantic: Tensor, semantic label, shape=(H, W). + """ + panoptic = self.rgb2id(panoptic) + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_index + for seg in segments: + cat_id = seg["category_id"] + semantic[panoptic == seg["id"]] = cat_id + + return dict(semantic=semantic.astype('long')) + + +class InstanceTargetGenerator(object): + """ + Generates instance target only for Panoptic-DeepLab. + Annotation is assumed to have Cityscapes format. + Arguments: + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + """ + + def __init__(self, rgb2id): + self.rgb2id = rgb2id + + def __call__(self, panoptic): + """Generates the instance target. + Args: + panoptic: numpy.array, colored image encoding panoptic label. + Returns: + A dictionary with fields: + - instance: Tensor, shape=(H, W). 0 is background. 1, 2, 3 ... is instance, so it is class agnostic. + """ + panoptic = self.rgb2id(panoptic) + instance = np.zeros_like(panoptic, dtype=np.int64) + ids = np.unique(panoptic) + ins_id = 1 + for i, id in enumerate(ids): + if id > 1000: + instance[panoptic == id] = ins_id + ins_id += 1 + + return dict(instance=instance) + + +class RawPanopticTargetGenerator(object): + """ + Generator the panoptc ground truth for evaluation, where values are 0,1,2,3,... + 11000, 11001, ..., 18000, 18001, ignore_index(general 255). + Arguments: + ignore_index: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + """ + + def __init__(self, ignore_index, rgb2id, label_divisor=1000): + self.ingore_index = ignore_index + self.rgb2id = rgb2id + self.label_divisor = label_divisor + + def __call__(self, panoptic, segments): + """ + Generates the raw panoptic target + + Args: + panoptic (numpy.array): colored image encoding panoptic label. + segments (list): A list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - panoptic: Tensor, panoptic label, shape=(H, W). + """ + panoptic = self.rgb2id(panoptic) + raw_panoptic = np.zeros_like(panoptic) + self.ingore_index + for seg in segments: + cat_id = seg['category_id'] + # if seg['iscrowd'] == 1: + # continue + if seg['id'] < 1000: + raw_panoptic[panoptic == seg['id']] = cat_id + else: + ins_id = seg['id'] % self.label_divisor + raw_panoptic[panoptic == + seg['id']] = cat_id * self.label_divisor + ins_id + return dict(panoptic=raw_panoptic.astype('long')) diff --git a/contrib/PanopticDeepLab/paddleseg/transforms/transforms.py b/contrib/PanopticDeepLab/paddleseg/transforms/transforms.py new file mode 100644 index 0000000000..cb8f1b69bf --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/transforms/transforms.py @@ -0,0 +1,888 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random + +import cv2 +import numpy as np +from PIL import Image + +from paddleseg.cvlibs import manager +from paddleseg.transforms import functional + + +@manager.TRANSFORMS.add_component +class Compose: + """ + Do transformation on input data with corresponding pre-processing and augmentation operations. + The shape of input data to all operations is [height, width, channels]. + + Args: + transforms (list): A list contains data pre-processing or augmentation. Empty list means only reading images, no transformation. + to_rgb (bool, optional): If converting image to RGB color space. Default: True. + + Raises: + TypeError: When 'transforms' is not a list. + ValueError: when the length of 'transforms' is less than 1. + """ + + def __init__(self, transforms, to_rgb=True): + if not isinstance(transforms, list): + raise TypeError('The transforms must be a list!') + self.transforms = transforms + self.to_rgb = to_rgb + + def __call__(self, im, label=None): + """ + Args: + im (str|np.ndarray): It is either image path or image object. + label (str|np.ndarray): It is either label path or label ndarray. + + Returns: + (tuple). A tuple including image, image info, and label after transformation. + """ + if isinstance(im, str): + im = cv2.imread(im).astype('float32') + if isinstance(label, str): + label = np.asarray(Image.open(label)) + if im is None: + raise ValueError('Can\'t read The image file {}!'.format(im)) + if self.to_rgb: + im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) + + for op in self.transforms: + outputs = op(im, label) + im = outputs[0] + if len(outputs) == 2: + label = outputs[1] + im = np.transpose(im, (2, 0, 1)) + return (im, label) + + +@manager.TRANSFORMS.add_component +class RandomHorizontalFlip: + """ + Flip an image horizontally with a certain probability. + + Args: + prob (float, optional): A probability of horizontally flipping. Default: 0.5. + """ + + def __init__(self, prob=0.5): + self.prob = prob + + def __call__(self, im, label=None): + if random.random() < self.prob: + im = functional.horizontal_flip(im) + if label is not None: + label = functional.horizontal_flip(label) + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class RandomVerticalFlip: + """ + Flip an image vertically with a certain probability. + + Args: + prob (float, optional): A probability of vertical flipping. Default: 0.1. + """ + + def __init__(self, prob=0.1): + self.prob = prob + + def __call__(self, im, label=None): + if random.random() < self.prob: + im = functional.vertical_flip(im) + if label is not None: + label = functional.vertical_flip(label) + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class Resize: + """ + Resize an image. + + Args: + target_size (list|tuple, optional): The target size of image. Default: (512, 512). + interp (str, optional): The interpolation mode of resize is consistent with opencv. + ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']. Note that when it is + 'RANDOM', a random interpolation mode would be specified. Default: "LINEAR". + + Raises: + TypeError: When 'target_size' type is neither list nor tuple. + ValueError: When "interp" is out of pre-defined methods ('NEAREST', 'LINEAR', 'CUBIC', + 'AREA', 'LANCZOS4', 'RANDOM'). + """ + + # The interpolation mode + interp_dict = { + 'NEAREST': cv2.INTER_NEAREST, + 'LINEAR': cv2.INTER_LINEAR, + 'CUBIC': cv2.INTER_CUBIC, + 'AREA': cv2.INTER_AREA, + 'LANCZOS4': cv2.INTER_LANCZOS4 + } + + def __init__(self, target_size=(512, 512), interp='LINEAR'): + self.interp = interp + if not (interp == "RANDOM" or interp in self.interp_dict): + raise ValueError("`interp` should be one of {}".format( + self.interp_dict.keys())) + if isinstance(target_size, list) or isinstance(target_size, tuple): + if len(target_size) != 2: + raise ValueError( + '`target_size` should include 2 elements, but it is {}'. + format(target_size)) + else: + raise TypeError( + "Type of `target_size` is invalid. It should be list or tuple, but it is {}" + .format(type(target_size))) + + self.target_size = target_size + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label), + + Raises: + TypeError: When the 'img' type is not numpy. + ValueError: When the length of "im" shape is not 3. + """ + + if not isinstance(im, np.ndarray): + raise TypeError("Resize: image type is not numpy.") + if len(im.shape) != 3: + raise ValueError('Resize: image is not 3-dimensional.') + if self.interp == "RANDOM": + interp = random.choice(list(self.interp_dict.keys())) + else: + interp = self.interp + im = functional.resize(im, self.target_size, self.interp_dict[interp]) + if label is not None: + label = functional.resize(label, self.target_size, + cv2.INTER_NEAREST) + + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class ResizeByLong: + """ + Resize the long side of an image to given size, and then scale the other side proportionally. + + Args: + long_size (int): The target size of long side. + """ + + def __init__(self, long_size): + self.long_size = long_size + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + im = functional.resize_long(im, self.long_size) + if label is not None: + label = functional.resize_long(label, self.long_size, + cv2.INTER_NEAREST) + + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class LimitLong: + """ + Limit the long edge of image. + + If the long edge is larger than max_long, resize the long edge + to max_long, while scale the short edge proportionally. + + If the long edge is smaller than min_long, resize the long edge + to min_long, while scale the short edge proportionally. + + Args: + max_long (int, optional): If the long edge of image is larger than max_long, + it will be resize to max_long. Default: None. + min_long (int, optional): If the long edge of image is smaller than min_long, + it will be resize to min_long. Default: None. + """ + + def __init__(self, max_long=None, min_long=None): + if max_long is not None: + if not isinstance(max_long, int): + raise TypeError( + "Type of `max_long` is invalid. It should be int, but it is {}" + .format(type(max_long))) + if min_long is not None: + if not isinstance(min_long, int): + raise TypeError( + "Type of `min_long` is invalid. It should be int, but it is {}" + .format(type(min_long))) + if (max_long is not None) and (min_long is not None): + if min_long > max_long: + raise ValueError( + '`max_long should not smaller than min_long, but they are {} and {}' + .format(max_long, min_long)) + self.max_long = max_long + self.min_long = min_long + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + h, w = im.shape[0], im.shape[1] + long_edge = max(h, w) + target = long_edge + if (self.max_long is not None) and (long_edge > self.max_long): + target = self.max_long + elif (self.min_long is not None) and (long_edge < self.min_long): + target = self.min_long + + if target != long_edge: + im = functional.resize_long(im, target) + if label is not None: + label = functional.resize_long(label, target, cv2.INTER_NEAREST) + + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class ResizeRangeScaling: + """ + Resize the long side of an image into a range, and then scale the other side proportionally. + + Args: + min_value (int, optional): The minimum value of long side after resize. Default: 400. + max_value (int, optional): The maximum value of long side after resize. Default: 600. + """ + + def __init__(self, min_value=400, max_value=600): + if min_value > max_value: + raise ValueError('min_value must be less than max_value, ' + 'but they are {} and {}.'.format( + min_value, max_value)) + self.min_value = min_value + self.max_value = max_value + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + if self.min_value == self.max_value: + random_size = self.max_value + else: + random_size = int( + np.random.uniform(self.min_value, self.max_value) + 0.5) + im = functional.resize_long(im, random_size, cv2.INTER_LINEAR) + if label is not None: + label = functional.resize_long(label, random_size, + cv2.INTER_NEAREST) + + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class ResizeStepScaling: + """ + Scale an image proportionally within a range. + + Args: + min_scale_factor (float, optional): The minimum scale. Default: 0.75. + max_scale_factor (float, optional): The maximum scale. Default: 1.25. + scale_step_size (float, optional): The scale interval. Default: 0.25. + + Raises: + ValueError: When min_scale_factor is smaller than max_scale_factor. + """ + + def __init__(self, + min_scale_factor=0.75, + max_scale_factor=1.25, + scale_step_size=0.25): + if min_scale_factor > max_scale_factor: + raise ValueError( + 'min_scale_factor must be less than max_scale_factor, ' + 'but they are {} and {}.'.format(min_scale_factor, + max_scale_factor)) + self.min_scale_factor = min_scale_factor + self.max_scale_factor = max_scale_factor + self.scale_step_size = scale_step_size + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + if self.min_scale_factor == self.max_scale_factor: + scale_factor = self.min_scale_factor + + elif self.scale_step_size == 0: + scale_factor = np.random.uniform(self.min_scale_factor, + self.max_scale_factor) + + else: + num_steps = int((self.max_scale_factor - self.min_scale_factor) / + self.scale_step_size + 1) + scale_factors = np.linspace(self.min_scale_factor, + self.max_scale_factor, + num_steps).tolist() + np.random.shuffle(scale_factors) + scale_factor = scale_factors[0] + w = int(round(scale_factor * im.shape[1])) + h = int(round(scale_factor * im.shape[0])) + + im = functional.resize(im, (w, h), cv2.INTER_LINEAR) + if label is not None: + label = functional.resize(label, (w, h), cv2.INTER_NEAREST) + + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class Normalize: + """ + Normalize an image. + + Args: + mean (list, optional): The mean value of a data set. Default: [0.5, 0.5, 0.5]. + std (list, optional): The standard deviation of a data set. Default: [0.5, 0.5, 0.5]. + + Raises: + ValueError: When mean/std is not list or any value in std is 0. + """ + + def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)): + self.mean = mean + self.std = std + if not (isinstance(self.mean, (list, tuple)) + and isinstance(self.std, (list, tuple))): + raise ValueError( + "{}: input type is invalid. It should be list or tuple".format( + self)) + from functools import reduce + if reduce(lambda x, y: x * y, self.std) == 0: + raise ValueError('{}: std is invalid!'.format(self)) + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + mean = np.array(self.mean)[np.newaxis, np.newaxis, :] + std = np.array(self.std)[np.newaxis, np.newaxis, :] + im = functional.normalize(im, mean, std) + + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class Padding: + """ + Add bottom-right padding to a raw image or annotation image. + + Args: + target_size (list|tuple): The target size after padding. + im_padding_value (list, optional): The padding value of raw image. + Default: [127.5, 127.5, 127.5]. + label_padding_value (int, optional): The padding value of annotation image. Default: 255. + + Raises: + TypeError: When target_size is neither list nor tuple. + ValueError: When the length of target_size is not 2. + """ + + def __init__(self, + target_size, + im_padding_value=(127.5, 127.5, 127.5), + label_padding_value=255): + if isinstance(target_size, list) or isinstance(target_size, tuple): + if len(target_size) != 2: + raise ValueError( + '`target_size` should include 2 elements, but it is {}'. + format(target_size)) + else: + raise TypeError( + "Type of target_size is invalid. It should be list or tuple, now is {}" + .format(type(target_size))) + self.target_size = target_size + self.im_padding_value = im_padding_value + self.label_padding_value = label_padding_value + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + im_height, im_width = im.shape[0], im.shape[1] + if isinstance(self.target_size, int): + target_height = self.target_size + target_width = self.target_size + else: + target_height = self.target_size[1] + target_width = self.target_size[0] + pad_height = target_height - im_height + pad_width = target_width - im_width + if pad_height < 0 or pad_width < 0: + raise ValueError( + 'The size of image should be less than `target_size`, but the size of image ({}, {}) is larger than `target_size` ({}, {})' + .format(im_width, im_height, target_width, target_height)) + else: + im = cv2.copyMakeBorder( + im, + 0, + pad_height, + 0, + pad_width, + cv2.BORDER_CONSTANT, + value=self.im_padding_value) + if label is not None: + label = cv2.copyMakeBorder( + label, + 0, + pad_height, + 0, + pad_width, + cv2.BORDER_CONSTANT, + value=self.label_padding_value) + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class RandomPaddingCrop: + """ + Crop a sub-image from a raw image and annotation image randomly. If the target cropping size + is larger than original image, then the bottom-right padding will be added. + + Args: + crop_size (tuple, optional): The target cropping size. Default: (512, 512). + im_padding_value (list, optional): The padding value of raw image. + Default: [127.5, 127.5, 127.5]. + label_padding_value (int, optional): The padding value of annotation image. Default: 255. + + Raises: + TypeError: When crop_size is neither list nor tuple. + ValueError: When the length of crop_size is not 2. + """ + + def __init__(self, + crop_size=(512, 512), + im_padding_value=(127.5, 127.5, 127.5), + label_padding_value=255): + if isinstance(crop_size, list) or isinstance(crop_size, tuple): + if len(crop_size) != 2: + raise ValueError( + 'Type of `crop_size` is list or tuple. It should include 2 elements, but it is {}' + .format(crop_size)) + else: + raise TypeError( + "The type of `crop_size` is invalid. It should be list or tuple, but it is {}" + .format(type(crop_size))) + self.crop_size = crop_size + self.im_padding_value = im_padding_value + self.label_padding_value = label_padding_value + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + if isinstance(self.crop_size, int): + crop_width = self.crop_size + crop_height = self.crop_size + else: + crop_width = self.crop_size[0] + crop_height = self.crop_size[1] + + img_height = im.shape[0] + img_width = im.shape[1] + + if img_height == crop_height and img_width == crop_width: + if label is None: + return (im, ) + else: + return (im, label) + else: + pad_height = max(crop_height - img_height, 0) + pad_width = max(crop_width - img_width, 0) + if (pad_height > 0 or pad_width > 0): + im = cv2.copyMakeBorder( + im, + 0, + pad_height, + 0, + pad_width, + cv2.BORDER_CONSTANT, + value=self.im_padding_value) + if label is not None: + label = cv2.copyMakeBorder( + label, + 0, + pad_height, + 0, + pad_width, + cv2.BORDER_CONSTANT, + value=self.label_padding_value) + img_height = im.shape[0] + img_width = im.shape[1] + + if crop_height > 0 and crop_width > 0: + h_off = np.random.randint(img_height - crop_height + 1) + w_off = np.random.randint(img_width - crop_width + 1) + + im = im[h_off:(crop_height + h_off), w_off:( + w_off + crop_width), :] + if label is not None: + label = label[h_off:(crop_height + h_off), w_off:( + w_off + crop_width)] + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class RandomBlur: + """ + Blurring an image by a Gaussian function with a certain probability. + + Args: + prob (float, optional): A probability of blurring an image. Default: 0.1. + """ + + def __init__(self, prob=0.1): + self.prob = prob + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + if self.prob <= 0: + n = 0 + elif self.prob >= 1: + n = 1 + else: + n = int(1.0 / self.prob) + if n > 0: + if np.random.randint(0, n) == 0: + radius = np.random.randint(3, 10) + if radius % 2 != 1: + radius = radius + 1 + if radius > 9: + radius = 9 + im = cv2.GaussianBlur(im, (radius, radius), 0, 0) + + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class RandomRotation: + """ + Rotate an image randomly with padding. + + Args: + max_rotation (float, optional): The maximum rotation degree. Default: 15. + im_padding_value (list, optional): The padding value of raw image. + Default: [127.5, 127.5, 127.5]. + label_padding_value (int, optional): The padding value of annotation image. Default: 255. + """ + + def __init__(self, + max_rotation=15, + im_padding_value=(127.5, 127.5, 127.5), + label_padding_value=255): + self.max_rotation = max_rotation + self.im_padding_value = im_padding_value + self.label_padding_value = label_padding_value + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + if self.max_rotation > 0: + (h, w) = im.shape[:2] + do_rotation = np.random.uniform(-self.max_rotation, + self.max_rotation) + pc = (w // 2, h // 2) + r = cv2.getRotationMatrix2D(pc, do_rotation, 1.0) + cos = np.abs(r[0, 0]) + sin = np.abs(r[0, 1]) + + nw = int((h * sin) + (w * cos)) + nh = int((h * cos) + (w * sin)) + + (cx, cy) = pc + r[0, 2] += (nw / 2) - cx + r[1, 2] += (nh / 2) - cy + dsize = (nw, nh) + im = cv2.warpAffine( + im, + r, + dsize=dsize, + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=self.im_padding_value) + label = cv2.warpAffine( + label, + r, + dsize=dsize, + flags=cv2.INTER_NEAREST, + borderMode=cv2.BORDER_CONSTANT, + borderValue=self.label_padding_value) + + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class RandomScaleAspect: + """ + Crop a sub-image from an original image with a range of area ratio and aspect and + then scale the sub-image back to the size of the original image. + + Args: + min_scale (float, optional): The minimum area ratio of cropped image to the original image. Default: 0.5. + aspect_ratio (float, optional): The minimum aspect ratio. Default: 0.33. + """ + + def __init__(self, min_scale=0.5, aspect_ratio=0.33): + self.min_scale = min_scale + self.aspect_ratio = aspect_ratio + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + if self.min_scale != 0 and self.aspect_ratio != 0: + img_height = im.shape[0] + img_width = im.shape[1] + for i in range(0, 10): + area = img_height * img_width + target_area = area * np.random.uniform(self.min_scale, 1.0) + aspectRatio = np.random.uniform(self.aspect_ratio, + 1.0 / self.aspect_ratio) + + dw = int(np.sqrt(target_area * 1.0 * aspectRatio)) + dh = int(np.sqrt(target_area * 1.0 / aspectRatio)) + if (np.random.randint(10) < 5): + tmp = dw + dw = dh + dh = tmp + + if (dh < img_height and dw < img_width): + h1 = np.random.randint(0, img_height - dh) + w1 = np.random.randint(0, img_width - dw) + + im = im[h1:(h1 + dh), w1:(w1 + dw), :] + label = label[h1:(h1 + dh), w1:(w1 + dw)] + im = cv2.resize( + im, (img_width, img_height), + interpolation=cv2.INTER_LINEAR) + label = cv2.resize( + label, (img_width, img_height), + interpolation=cv2.INTER_NEAREST) + break + if label is None: + return (im, ) + else: + return (im, label) + + +@manager.TRANSFORMS.add_component +class RandomDistort: + """ + Distort an image with random configurations. + + Args: + brightness_range (float, optional): A range of brightness. Default: 0.5. + brightness_prob (float, optional): A probability of adjusting brightness. Default: 0.5. + contrast_range (float, optional): A range of contrast. Default: 0.5. + contrast_prob (float, optional): A probability of adjusting contrast. Default: 0.5. + saturation_range (float, optional): A range of saturation. Default: 0.5. + saturation_prob (float, optional): A probability of adjusting saturation. Default: 0.5. + hue_range (int, optional): A range of hue. Default: 18. + hue_prob (float, optional): A probability of adjusting hue. Default: 0.5. + """ + + def __init__(self, + brightness_range=0.5, + brightness_prob=0.5, + contrast_range=0.5, + contrast_prob=0.5, + saturation_range=0.5, + saturation_prob=0.5, + hue_range=18, + hue_prob=0.5): + self.brightness_range = brightness_range + self.brightness_prob = brightness_prob + self.contrast_range = contrast_range + self.contrast_prob = contrast_prob + self.saturation_range = saturation_range + self.saturation_prob = saturation_prob + self.hue_range = hue_range + self.hue_prob = hue_prob + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + brightness_lower = 1 - self.brightness_range + brightness_upper = 1 + self.brightness_range + contrast_lower = 1 - self.contrast_range + contrast_upper = 1 + self.contrast_range + saturation_lower = 1 - self.saturation_range + saturation_upper = 1 + self.saturation_range + hue_lower = -self.hue_range + hue_upper = self.hue_range + ops = [ + functional.brightness, functional.contrast, functional.saturation, + functional.hue + ] + random.shuffle(ops) + params_dict = { + 'brightness': { + 'brightness_lower': brightness_lower, + 'brightness_upper': brightness_upper + }, + 'contrast': { + 'contrast_lower': contrast_lower, + 'contrast_upper': contrast_upper + }, + 'saturation': { + 'saturation_lower': saturation_lower, + 'saturation_upper': saturation_upper + }, + 'hue': { + 'hue_lower': hue_lower, + 'hue_upper': hue_upper + } + } + prob_dict = { + 'brightness': self.brightness_prob, + 'contrast': self.contrast_prob, + 'saturation': self.saturation_prob, + 'hue': self.hue_prob + } + im = im.astype('uint8') + im = Image.fromarray(im) + for id in range(len(ops)): + params = params_dict[ops[id].__name__] + prob = prob_dict[ops[id].__name__] + params['im'] = im + if np.random.uniform(0, 1) < prob: + im = ops[id](**params) + im = np.asarray(im).astype('float32') + if label is None: + return (im, ) + else: + return (im, label) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/__init__.py b/contrib/PanopticDeepLab/paddleseg/utils/__init__.py new file mode 100644 index 0000000000..1d01505947 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import logger +from . import download +from . import metrics +from .env import seg_env, get_sys_env +from .utils import * +from .timer import TimeAverager, calculate_eta +from .visualize import cityscape_colormap +from .visualize import visualize_semantic, visualize_instance, visualize_panoptic +from .config_check import config_check diff --git a/contrib/PanopticDeepLab/paddleseg/utils/config_check.py b/contrib/PanopticDeepLab/paddleseg/utils/config_check.py new file mode 100644 index 0000000000..47a7049823 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/config_check.py @@ -0,0 +1,59 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + + +def config_check(cfg, train_dataset=None, val_dataset=None): + """ + To check config。 + + Args: + cfg (paddleseg.cvlibs.Config): An object of paddleseg.cvlibs.Config. + train_dataset (paddle.io.Dataset): Used to read and process training datasets. + val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. + """ + + num_classes_check(cfg, train_dataset, val_dataset) + + +def num_classes_check(cfg, train_dataset, val_dataset): + """" + Check that the num_classes in model, train_dataset and val_dataset is consistent. + """ + num_classes_set = set() + if train_dataset and hasattr(train_dataset, 'num_classes'): + num_classes_set.add(train_dataset.num_classes) + if val_dataset and hasattr(val_dataset, 'num_classes'): + num_classes_set.add(val_dataset.num_classes) + if cfg.dic.get('model', None) and cfg.dic['model'].get('num_classes', None): + num_classes_set.add(cfg.dic['model'].get('num_classes')) + if (not cfg.train_dataset) and (not cfg.val_dataset): + raise ValueError( + 'One of `train_dataset` or `val_dataset should be given, but there are none.' + ) + if len(num_classes_set) == 0: + raise ValueError( + '`num_classes` is not found. Please set it in model, train_dataset or val_dataset' + ) + elif len(num_classes_set) > 1: + raise ValueError( + '`num_classes` is not consistent: {}. Please set it consistently in model or train_dataset or val_dataset' + .format(num_classes_set)) + else: + num_classes = num_classes_set.pop() + if train_dataset: + train_dataset.num_classes = num_classes + if val_dataset: + val_dataset.num_classes = num_classes diff --git a/contrib/PanopticDeepLab/paddleseg/utils/download.py b/contrib/PanopticDeepLab/paddleseg/utils/download.py new file mode 100644 index 0000000000..7b4a1c3a36 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/download.py @@ -0,0 +1,163 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import os +import shutil +import sys +import tarfile +import time +import zipfile + +import requests + +lasttime = time.time() +FLUSH_INTERVAL = 0.1 + + +def progress(str, end=False): + global lasttime + if end: + str += "\n" + lasttime = 0 + if time.time() - lasttime >= FLUSH_INTERVAL: + sys.stdout.write("\r%s" % str) + lasttime = time.time() + sys.stdout.flush() + + +def _download_file(url, savepath, print_progress): + if print_progress: + print("Connecting to {}".format(url)) + r = requests.get(url, stream=True, timeout=15) + total_length = r.headers.get('content-length') + + if total_length is None: + with open(savepath, 'wb') as f: + shutil.copyfileobj(r.raw, f) + else: + with open(savepath, 'wb') as f: + dl = 0 + total_length = int(total_length) + starttime = time.time() + if print_progress: + print("Downloading %s" % os.path.basename(savepath)) + for data in r.iter_content(chunk_size=4096): + dl += len(data) + f.write(data) + if print_progress: + done = int(50 * dl / total_length) + progress("[%-50s] %.2f%%" % + ('=' * done, float(100 * dl) / total_length)) + if print_progress: + progress("[%-50s] %.2f%%" % ('=' * 50, 100), end=True) + + +def _uncompress_file_zip(filepath, extrapath): + files = zipfile.ZipFile(filepath, 'r') + filelist = files.namelist() + rootpath = filelist[0] + total_num = len(filelist) + for index, file in enumerate(filelist): + files.extract(file, extrapath) + yield total_num, index, rootpath + files.close() + yield total_num, index, rootpath + + +def _uncompress_file_tar(filepath, extrapath, mode="r:gz"): + files = tarfile.open(filepath, mode) + filelist = files.getnames() + total_num = len(filelist) + rootpath = filelist[0] + for index, file in enumerate(filelist): + files.extract(file, extrapath) + yield total_num, index, rootpath + files.close() + yield total_num, index, rootpath + + +def _uncompress_file(filepath, extrapath, delete_file, print_progress): + if print_progress: + print("Uncompress %s" % os.path.basename(filepath)) + + if filepath.endswith("zip"): + handler = _uncompress_file_zip + elif filepath.endswith("tgz"): + handler = functools.partial(_uncompress_file_tar, mode="r:*") + else: + handler = functools.partial(_uncompress_file_tar, mode="r") + + for total_num, index, rootpath in handler(filepath, extrapath): + if print_progress: + done = int(50 * float(index) / total_num) + progress( + "[%-50s] %.2f%%" % ('=' * done, float(100 * index) / total_num)) + if print_progress: + progress("[%-50s] %.2f%%" % ('=' * 50, 100), end=True) + + if delete_file: + os.remove(filepath) + + return rootpath + + +def download_file_and_uncompress(url, + savepath=None, + extrapath=None, + extraname=None, + print_progress=True, + cover=False, + delete_file=True): + if savepath is None: + savepath = "." + + if extrapath is None: + extrapath = "." + + savename = url.split("/")[-1] + if not os.path.exists(savepath): + os.makedirs(savepath) + + savepath = os.path.join(savepath, savename) + savename = ".".join(savename.split(".")[:-1]) + savename = os.path.join(extrapath, savename) + extraname = savename if extraname is None else os.path.join( + extrapath, extraname) + + if cover: + if os.path.exists(savepath): + shutil.rmtree(savepath) + if os.path.exists(savename): + shutil.rmtree(savename) + if os.path.exists(extraname): + shutil.rmtree(extraname) + + if not os.path.exists(extraname): + if not os.path.exists(savename): + if not os.path.exists(savepath): + _download_file(url, savepath, print_progress) + + if (not tarfile.is_tarfile(savepath)) and ( + not zipfile.is_zipfile(savepath)): + if not os.path.exists(extraname): + os.makedirs(extraname) + shutil.move(savepath, extraname) + return extraname + + savename = _uncompress_file(savepath, extrapath, delete_file, + print_progress) + savename = os.path.join(extrapath, savename) + shutil.move(savename, extraname) + return extraname diff --git a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/__init__.py b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/__init__.py new file mode 100644 index 0000000000..7c86ed1641 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .semantic import SemanticEvaluator +from .instance import InstanceEvaluator +from .panoptic import PanopticEvaluator diff --git a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/instance.py b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/instance.py new file mode 100644 index 0000000000..1230c4d98e --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/instance.py @@ -0,0 +1,345 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict, OrderedDict + +import numpy as np + + +class InstanceEvaluator(object): + """ + Refer to 'https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py' + Calculate the matching results of each image, each class, each IoU, and then get the final + matching results of each class and each IoU of dataset. Base on the matching results, the AP + and mAP can be calculated. + we need two vectors for each class and for each overlap + The first vector (y_true) is binary and is 1, where the ground truth says true, + and is 0 otherwise. + The second vector (y_score) is float [0...1] and represents the confidence of + the prediction. + We represent the following cases as: + | y_true | y_score + gt instance with matched prediction | 1 | confidence + gt instance w/o matched prediction | 1 | 0.0 + false positive prediction | 0 | confidence + The current implementation makes only sense for an overlap threshold >= 0.5, + since only then, a single prediction can either be ignored or matched, but + never both. Further, it can never match to two gt instances. + For matching, we vary the overlap and do the following steps: + 1.) remove all predictions that satisfy the overlap criterion with an ignore region (either void or *group) + 2.) remove matches that do not satisfy the overlap + 3.) mark non-matched predictions as false positive + In the processing, 0 represent the first class of 'thing'. So the label will less 1 than the dataset. + Args: + num_classes (int): The unique number of target classes. Exclude background class, labeled 0 usually. + overlaps (float|list): The threshold of IoU. + thing_list (list|None): Thing class, only calculate AP for the thing class. + """ + + def __init__(self, num_classes, overlaps=0.5, thing_list=None): + super().__init__() + self.num_classes = num_classes + if isinstance(overlaps, float): + overlaps = [overlaps] + self.overlaps = overlaps + self.y_true = [[np.empty(0) for _i in range(len(overlaps))] + for _j in range(num_classes)] + self.y_score = [[np.empty(0) for _i in range(len(overlaps))] + for _j in range(num_classes)] + self.hard_fns = [[0] * len(overlaps) for _ in range(num_classes)] + + if thing_list is None: + self.thing_list = list(range(num_classes)) + else: + self.thing_list = thing_list + + def update(self, preds, gts, ignore_mask=None): + """ + compute y_true and y_score in this image. + preds (list): tuple list [(label, confidence, mask), ...]. + gts (list): tuple list [(label, mask), ...]. + ignore_mask (np.ndarray): Mask to ignore. + """ + + pred_instances, gt_instances = self.get_instances( + preds, gts, ignore_mask=ignore_mask) + + for i in range(self.num_classes): + if i not in self.thing_list: + continue + for oi, oth in enumerate(self.overlaps): + cur_true = np.ones((len(gt_instances[i]))) + cur_score = np.ones(len(gt_instances[i])) * (-float("inf")) + cur_match = np.zeros(len(gt_instances[i]), dtype=np.bool) + for gti, gt_instance in enumerate(gt_instances[i]): + found_match = False + for pred_instance in gt_instance['matched_pred']: + overlap = float(pred_instance['intersection']) / ( + gt_instance['pixel_count'] + + pred_instance['pixel_count'] - + pred_instance['intersection']) + if overlap > oth: + confidence = pred_instance['confidence'] + + # if we already has a prediction for this groundtruth + # the prediction with the lower score is automatically a false positive + if cur_match[gti]: + max_score = max(cur_score[gti], confidence) + min_score = min(cur_score[gti], confidence) + cur_score = max_score + # append false positive + cur_true = np.append(cur_true, 0) + cur_score = np.append(cur_score, min_score) + cur_match = np.append(cur_match, True) + # otherwise set score + else: + found_match = True + cur_match[gti] = True + cur_score[gti] = confidence + + if not found_match: + self.hard_fns[i][oi] += 1 + # remove not-matched ground truth instances + cur_true = cur_true[cur_match == True] + cur_score = cur_score[cur_match == True] + + # collect not-matched predictions as false positive + for pred_instance in pred_instances[i]: + found_gt = False + for gt_instance in pred_instance['matched_gt']: + overlap = float(gt_instance['intersection']) / ( + gt_instance['pixel_count'] + + pred_instance['pixel_count'] - + gt_instance['intersection']) + if overlap > oth: + found_gt = True + break + if not found_gt: + proportion_ignore = 0 + if ignore_mask is not None: + nb_ignore_pixels = pred_instance[ + 'void_intersection'] + proportion_ignore = float( + nb_ignore_pixels) / pred_instance['pixel_count'] + if proportion_ignore <= oth: + cur_true = np.append(cur_true, 0) + cur_score = np.append(cur_score, + pred_instance['confidence']) + self.y_true[i][oi] = np.append(self.y_true[i][oi], cur_true) + self.y_score[i][oi] = np.append(self.y_score[i][oi], cur_score) + + def evaluate(self): + ap = self.cal_ap() + map = self.cal_map() + + res = {} + res["AP"] = [{i: ap[i] * 100} for i in self.thing_list] + res["mAP"] = 100 * map + + results = OrderedDict({"ins_seg": res}) + return results + + def cal_ap(self): + """ + calculate ap for every classes + """ + self.ap = [0] * self.num_classes + self.ap_overlap = [[0] * len(self.overlaps) + for _ in range(self.num_classes)] + for i in range(self.num_classes): + if i not in self.thing_list: + continue + for j in range(len(self.overlaps)): + y_true = self.y_true[i][j] + y_score = self.y_score[i][j] + if len(y_true) == 0: + self.ap_overlap[i][j] = 0 + continue + score_argsort = np.argsort(y_score) + y_score_sorted = y_score[score_argsort] + y_true_sorted = y_true[score_argsort] + y_true_sorted_cumsum = np.cumsum(y_true_sorted) + + # unique thresholds + thresholds, unique_indices = np.unique( + y_score_sorted, return_index=True) + + # since we need to add an artificial point to the precision-recall curve + # increase its length by 1 + nb_pr = len(unique_indices) + 1 + + # calculate precision and recall + nb_examples = len(y_score_sorted) + nb_true_exampels = y_true_sorted_cumsum[-1] + precision = np.zeros(nb_pr) + recall = np.zeros(nb_pr) + + # deal with the first point + # only thing we need to do, is to append a zero to the cumsum at the end. + # an index of -1 uses that zero then + y_true_sorted_cumsum = np.append(y_true_sorted_cumsum, 0) + + # deal with remaining + for idx_res, idx_scores in enumerate(unique_indices): + cumsum = y_true_sorted_cumsum[idx_scores - 1] + tp = nb_true_exampels - cumsum + fp = nb_examples - idx_scores - tp + fn = cumsum + self.hard_fns[i][j] + p = float(tp) / (tp + fp) + r = float(tp) / (tp + fn) + precision[idx_res] = p + recall[idx_res] = r + + # add first point in curve + precision[-1] = 1. + # In some calculation,make precision the max after this point in curve. + #precision = [np.max(precision[:i+1]) for i in range(len(precision))] + recall[-1] = 0. + + # compute average of precision-recall curve + # integration is performed via zero order, or equivalently step-wise integration + # first compute the widths of each step: + # use a convolution with appropriate kernel, manually deal with the boundaries first + recall_for_conv = np.copy(recall) + recall_for_conv = np.append(recall_for_conv[0], recall_for_conv) + recall_for_conv = np.append(recall_for_conv, 0.) + + step_widths = np.convolve(recall_for_conv, [-0.5, 0, 0.5], + 'valid') + + # integrate is now simply a dot product + ap_current = np.dot(precision, step_widths) + self.ap_overlap[i][j] = ap_current + + ap = [np.average(i) for i in self.ap_overlap] + self.ap = ap + + return ap + + def cal_map(self): + """ + calculate map for all classes + """ + self.cal_ap() + valid_ap = [self.ap[i] for i in self.thing_list] + map = np.mean(valid_ap) + self.map = map + + return map + + def get_instances(self, preds, gts, ignore_mask=None): + """ + In this method, we create two dicts of list + - pred_instances: contains all predictions and their associated gt + - gtInstances: contains all gt instances and their associated predictions + Args: + preds (list): Prediction of image. + gts (list): Ground truth of image. + Return: + dict: pred_instances, the type is dict(list(dict))), e.g. {0: [{'pred_id':0, 'label':0', + 'pixel_count':100, 'confidence': 0.9, 'void_intersection': 0, + 'matched_gt': [gt_instance0, gt_instance1, ...]}, ], 1: } + dict: gt_instances, the type is dict(list(dict))), e.g. {0: [{'inst_id':0, 'label':0', + 'pixel_count':100, 'mask': np.ndarray, 'matched_pred': [pred_instance0, pred_instance1, ...]}, ], 1: } + """ + + pred_instances = defaultdict(list) + gt_instances = defaultdict(list) + + gt_inst_count = 0 + for gt in gts: + label, mask = gt + gt_instance = defaultdict(list) + gt_instance['inst_id'] = gt_inst_count + gt_instance['label'] = label + gt_instance['pixel_count'] = np.count_nonzero(mask) + gt_instance['mask'] = mask + gt_instances[label].append(gt_instance) + gt_inst_count += 1 + + pred_inst_count = 0 + for pred in preds: + label, conf, mask = pred + pred_instance = defaultdict(list) + pred_instance['label'] = label + pred_instance['pred_id'] = pred_inst_count + pred_instance['pixel_count'] = np.count_nonzero(mask) + pred_instance['confidence'] = conf + if ignore_mask is not None: + pred_instance['void_intersection'] = np.count_nonzero( + np.logical_and(mask, ignore_mask)) + + # Loop through all ground truth instances with matching label + matched_gt = [] + for gt_num, gt_instance in enumerate(gt_instances[label]): + # print(gt_instances) + intersection = np.count_nonzero( + np.logical_and(mask, gt_instances[label][gt_num]['mask'])) + if intersection > 0: + gt_copy = gt_instance.copy() + pred_copy = pred_instance.copy() + + gt_copy['intersection'] = intersection + pred_copy['intersection'] = intersection + + matched_gt.append(gt_copy) + gt_instances[label][gt_num]['matched_pred'].append( + pred_copy) + + pred_instance['matched_gt'] = matched_gt + pred_inst_count += 1 + pred_instances[label].append(pred_instance) + + return pred_instances, gt_instances + + @staticmethod + def convert_gt_map(seg_map, ins_map): + """ + Convet the ground truth with format (h*w) to the format that satisfies the AP calculation. + Args: + seg_map (np.ndarray): the sementic segmentation map with shape H * W. Value is 0, 1, 2, ... + ins_map (np.ndarray): the instance segmentation map with shape H * W. Value is 0, 1, 2, ... + Returns: + list: tuple list like: [(label, mask), ...] + """ + gts = [] + instance_cnt = np.unique(ins_map) + for i in instance_cnt: + if i == 0: + continue + mask = ins_map == i + label = seg_map[mask][0] + gts.append((label, mask.astype('int32'))) + return gts + + @staticmethod + def convert_pred_map(seg_pred, pan_pred): + """ + Convet the predictions with format (h*w) to the format that satisfies the AP calculation. + Args: + seg_pred (np.ndarray): the sementic segmentation map with shape C * H * W. Value is probability. + pan_pred (np.ndarray): panoptic predictions, void_label, stuff_id * label_divisor, thing_id * label_divisor + ins_id , ins_id >= 1. + Returns: + list: tuple list like: [(label, score, mask), ...] + """ + preds = [] + instance_cnt = np.unique(pan_pred) + for i in instance_cnt: + if (i < 1000) or (i % 1000 == 0): + continue + mask = pan_pred == i + label = i // 1000 + score = np.mean(seg_pred[label][mask]) + preds.append((label, score, mask.astype('int32'))) + return preds diff --git a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/panoptic.py b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/panoptic.py new file mode 100644 index 0000000000..9c930bcc69 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/panoptic.py @@ -0,0 +1,220 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------------------------------------------------------------------------ +# Reference: https://github.com/mcordts/cityscapesScripts/blob/aeb7b82531f86185ce287705be28f452ba3ddbb8/cityscapesscripts/evaluation/evalPanopticSemanticLabeling.py +# Modified by Guowei Chen +# ------------------------------------------------------------------------------ + +from collections import defaultdict, OrderedDict + +import numpy as np + +OFFSET = 256 * 256 * 256 + + +class PQStatCat(): + def __init__(self): + self.iou = 0.0 + self.tp = 0 + self.fp = 0 + self.fn = 0 + + def __iadd__(self, pd_stat_cat): + self.iou += pd_stat_cat.iou + self.tp += pd_stat_cat.tp + self.fp += pd_stat_cat.fp + self.fn += pd_stat_cat.fn + return self + + def __repr__(self): + s = 'iou: ' + str(self.iou) + ' tp: ' + str(self.tp) + ' fp: ' + str( + self.fp) + ' fn: ' + str(self.fn) + return s + + +class PQStat(): + def __init__(self, num_classes): + self.pq_per_cat = defaultdict(PQStatCat) + self.num_classes = num_classes + + def __getitem__(self, i): + return self.pq_per_cat[i] + + def __iadd__(self, pd_stat): + for label, pq_stat_cat in pd_stat.pq_per_cat.items(): + self.pd_per_cat[label] += pq_stat_cat + return self + + def pq_average(self, isthing=None, thing_list=None): + """ + Calculate the average pq for all and every class. + + Args: + num_classes (int): number of classes. + isthing (bool|None): calculate average pq for thing class if isthing is True, + for stuff class if isthing is False and for all if isthing is None. Default: None. + thing_list (list|None): A list of thing class. It should be provided when isthing is equal to True or False + """ + pq, sq, rq, n = 0, 0, 0, 0 + per_class_results = {} + for label in range(self.num_classes): + if isthing is not None: + if isthing: + if label not in thing_list: + continue + else: + if label in thing_list: + continue + iou = self.pq_per_cat[label].iou + tp = self.pq_per_cat[label].tp + fp = self.pq_per_cat[label].fp + fn = self.pq_per_cat[label].fn + if tp + fp + fn == 0: + per_class_results[label] = {'pq': 0.0, 'sq': 0.0, 'rq': 0.0} + continue + n += 1 + pq_class = iou / (tp + 0.5 * fp + 0.5 * fn) + sq_class = iou / tp if tp != 0 else 0 + rq_class = tp / (tp + 0.5 * fp + 0.5 * fn) + + per_class_results[label] = { + 'pq': pq_class, + 'sq': sq_class, + 'rq': rq_class + } + pq += pq_class + sq += sq_class + rq += rq_class + + return { + 'pq': pq / n, + 'sq': sq / n, + 'rq': rq / n, + 'n': n + }, per_class_results + + +class PanopticEvaluator: + """ + Evaluate semantic segmentation + """ + + def __init__(self, + num_classes, + thing_list, + ignore_index=255, + label_divisor=1000): + self.pq_stat = PQStat(num_classes) + self.num_classes = num_classes + self.thing_list = thing_list + self.ignore_index = ignore_index + self.label_divisor = label_divisor + + def update(self, pred, gt): + # get the labels and counts for the pred and gt. + gt_labels, gt_labels_counts = np.unique(gt, return_counts=True) + pred_labels, pred_labels_counts = np.unique(pred, return_counts=True) + gt_segms = defaultdict(dict) + pred_segms = defaultdict(dict) + for label, label_count in zip(gt_labels, gt_labels_counts): + category_id = label // self.label_divisor if label > self.label_divisor else label + gt_segms[label]['area'] = label_count + gt_segms[label]['category_id'] = category_id + gt_segms[label]['iscrowd'] = 1 if label in self.thing_list else 0 + for label, label_count in zip(pred_labels, pred_labels_counts): + category_id = label // self.label_divisor if label > self.label_divisor else label + pred_segms[label]['area'] = label_count + pred_segms[label]['category_id'] = category_id + + # confusion matrix calculation + pan_gt_pred = gt.astype(np.uint64) * OFFSET + pred.astype(np.uint64) + gt_pred_map = {} + labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True) + for label, intersection in zip(labels, labels_cnt): + gt_id = label // OFFSET + pred_id = label % OFFSET + gt_pred_map[(gt_id, pred_id)] = intersection + + # count all matched pairs + gt_matched = set() + pred_matched = set() + for label_tuple, intersection in gt_pred_map.items(): + gt_label, pred_label = label_tuple + if gt_label == self.ignore_index or pred_label == self.ignore_index: + continue + if gt_segms[gt_label]['iscrowd'] == 1: + continue + if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][ + 'category_id']: + continue + union = pred_segms[pred_label]['area'] + gt_segms[gt_label][ + 'area'] - intersection - gt_pred_map.get( + (self.ignore_index, pred_label), 0) + iou = intersection / union + if iou > 0.5: + self.pq_stat[gt_segms[gt_label]['category_id']].tp += 1 + self.pq_stat[gt_segms[gt_label]['category_id']].iou += iou + gt_matched.add(gt_label) + pred_matched.add(pred_label) + + # count false negtive + crowd_labels_dict = {} + for gt_label, gt_info in gt_segms.items(): + if gt_label in gt_matched: + continue + if gt_label == self.ignore_index: + continue + # ignore crowd + if gt_info['iscrowd'] == 1: + crowd_labels_dict[gt_info['category_id']] = gt_label + continue + self.pq_stat[gt_info['category_id']].fn += 1 + + # count false positive + for pred_label, pred_info in pred_segms.items(): + if pred_label in pred_matched: + continue + if pred_label == self.ignore_index: + continue + # intersection of the segment with self.ignore_index + intersection = gt_pred_map.get((self.ignore_index, pred_label), 0) + if pred_info['category_id'] in crowd_labels_dict: + intersection += gt_pred_map.get( + (crowd_labels_dict[pred_info['category_id']], pred_label), + 0) + # predicted segment is ignored if more than half of the segment correspond to self.ignore_index regions + if intersection / pred_info['area'] > 0.5: + continue + self.pq_stat[pred_info['category_id']].fp += 1 + + def evaluate(self): + metrics = [("All", None), ("Things", True), ("Stuff", False)] + results = {} + for name, isthing in metrics: + results[name], per_class_results = self.pq_stat.pq_average( + isthing=isthing, thing_list=self.thing_list) + if name == 'All': + results['per_class'] = per_class_results + return OrderedDict(pan_seg=results) + + +if __name__ == '__main__': + panoptic_metirc = PanopticEvaluator(2, [1]) + pred = np.zeros((100, 100)) + gt = np.zeros((100, 100)) + pred[0:50, 0:50] = 1 + gt[0:60, 0:60] = 1 + panoptic_metirc.update(pred, gt) + print(panoptic_metirc.evaluate()) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/semantic.py b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/semantic.py new file mode 100644 index 0000000000..ca59a6a503 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/semantic.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------------------------------------------------------------------------ +# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/evaluation/semantic.py +# Modified by Guowei Chen +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +import numpy as np + + +class SemanticEvaluator: + """ + Evaluate semantic segmentation + """ + + def __init__(self, num_classes, ignore_index=255): + """ + Args: + num_classes (int): number of classes + ignore_index (int): value in semantic segmentation ground truth. Predictions for the + corresponding pixels should be ignored. + """ + self._num_classes = num_classes + self._ignore_index = ignore_index + self._N = num_classes + 1 # store ignore label in the last class + + self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64) + + def update(self, pred, gt): + pred = pred.astype(np.int) + gt = gt.astype(np.int) + gt[gt == self._ignore_index] = self._num_classes + + # raw: pred, column: gt + self._conf_matrix += np.bincount( + self._N * pred.reshape(-1) + gt.reshape(-1), + minlength=self._N**2).reshape(self._N, self._N) + + def evaluate(self): + """ + Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): + * Mean intersection-over-union averaged across classes (mIoU) + * Frequency Weighted IoU (fwIoU) + * Mean pixel accuracy averaged across classes (mACC) + * Pixel Accuracy (pACC) + """ + acc = np.zeros(self._num_classes, dtype=np.float) + iou = np.zeros(self._num_classes, dtype=np.float) + tp = self._conf_matrix.diagonal()[:-1].astype(np.float) + pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) + class_weights = pos_gt / np.sum(pos_gt) + pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) + + acc_valid = pos_pred > 0 + acc[acc_valid] = tp[acc_valid] / pos_pred[acc_valid] + iou_valid = (pos_gt + pos_pred) > 0 + union = pos_gt + pos_pred - tp + iou[acc_valid] = tp[acc_valid] / union[acc_valid] + macc = np.sum(acc) / np.sum(acc_valid) + miou = np.sum(iou) / np.sum(iou_valid) + fiou = np.sum(iou * class_weights) + pacc = np.sum(tp) / np.sum(pos_gt) + + res = {} + res["mIoU"] = 100 * miou + res["fwIoU"] = 100 * fiou + res["mACC"] = 100 * macc + res["pACC"] = 100 * pacc + + results = OrderedDict({"sem_seg": res}) + return results diff --git a/contrib/PanopticDeepLab/paddleseg/utils/logger.py b/contrib/PanopticDeepLab/paddleseg/utils/logger.py new file mode 100644 index 0000000000..e7ef757635 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/logger.py @@ -0,0 +1,49 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import time + +import paddle + +levels = {0: 'ERROR', 1: 'WARNING', 2: 'INFO', 3: 'DEBUG'} +log_level = 2 + + +def log(level=2, message=""): + if paddle.distributed.ParallelEnv().local_rank == 0: + current_time = time.time() + time_array = time.localtime(current_time) + current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array) + if log_level >= level: + print( + "{} [{}]\t{}".format(current_time, levels[level], + message).encode("utf-8").decode("latin1")) + sys.stdout.flush() + + +def debug(message=""): + log(level=3, message=message) + + +def info(message=""): + log(level=2, message=message) + + +def warning(message=""): + log(level=1, message=message) + + +def error(message=""): + log(level=0, message=message) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/metrics.py b/contrib/PanopticDeepLab/paddleseg/utils/metrics.py new file mode 100644 index 0000000000..ad5b3c9758 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/metrics.py @@ -0,0 +1,146 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import paddle +import paddle.nn.functional as F + + +def calculate_area(pred, label, num_classes, ignore_index=255): + """ + Calculate intersect, prediction and label area + + Args: + pred (Tensor): The prediction by model. + label (Tensor): The ground truth of image. + num_classes (int): The unique number of target classes. + ignore_index (int): Specifies a target value that is ignored. Default: 255. + + Returns: + Tensor: The intersection area of prediction and the ground on all class. + Tensor: The prediction area on all class. + Tensor: The ground truth area on all class + """ + if len(pred.shape) == 4: + pred = paddle.squeeze(pred, axis=1) + if len(label.shape) == 4: + label = paddle.squeeze(label, axis=1) + if not pred.shape == label.shape: + raise ValueError('Shape of `pred` and `label should be equal, ' + 'but there are {} and {}.'.format( + pred.shape, label.shape)) + + # Delete ignore_index + mask = label != ignore_index + pred = pred + 1 + label = label + 1 + pred = pred * mask + label = label * mask + pred = F.one_hot(pred, num_classes + 1) + label = F.one_hot(label, num_classes + 1) + pred = pred[:, :, :, 1:] + label = label[:, :, :, 1:] + + pred_area = [] + label_area = [] + intersect_area = [] + + for i in range(num_classes): + pred_i = pred[:, :, :, i] + label_i = label[:, :, :, i] + pred_area_i = paddle.sum(pred_i) + label_area_i = paddle.sum(label_i) + intersect_area_i = paddle.sum(pred_i * label_i) + pred_area.append(pred_area_i) + label_area.append(label_area_i) + intersect_area.append(intersect_area_i) + pred_area = paddle.concat(pred_area) + label_area = paddle.concat(label_area) + intersect_area = paddle.concat(intersect_area) + return intersect_area, pred_area, label_area + + +def mean_iou(intersect_area, pred_area, label_area): + """ + Calculate iou. + + Args: + intersect_area (Tensor): The intersection area of prediction and ground truth on all classes. + pred_area (Tensor): The prediction area on all classes. + label_area (Tensor): The ground truth area on all classes. + + Returns: + np.ndarray: iou on all classes. + float: mean iou of all classes. + """ + intersect_area = intersect_area.numpy() + pred_area = pred_area.numpy() + label_area = label_area.numpy() + union = pred_area + label_area - intersect_area + class_iou = [] + for i in range(len(intersect_area)): + if union[i] == 0: + iou = 0 + else: + iou = intersect_area[i] / union[i] + class_iou.append(iou) + miou = np.mean(class_iou) + return np.array(class_iou), miou + + +def accuracy(intersect_area, pred_area): + """ + Calculate accuracy + + Args: + intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.. + pred_area (Tensor): The prediction area on all classes. + + Returns: + np.ndarray: accuracy on all classes. + float: mean accuracy. + """ + intersect_area = intersect_area.numpy() + pred_area = pred_area.numpy() + class_acc = [] + for i in range(len(intersect_area)): + if pred_area[i] == 0: + acc = 0 + else: + acc = intersect_area[i] / pred_area[i] + class_acc.append(acc) + macc = np.sum(intersect_area) / np.sum(pred_area) + return np.array(class_acc), macc + + +def kappa(intersect_area, pred_area, label_area): + """ + Calculate kappa coefficient + + Args: + intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.. + pred_area (Tensor): The prediction area on all classes. + label_area (Tensor): The ground truth area on all classes. + + Returns: + float: kappa coefficient. + """ + intersect_area = intersect_area.numpy() + pred_area = pred_area.numpy() + label_area = label_area.numpy() + total_area = np.sum(label_area) + po = np.sum(intersect_area) / total_area + pe = np.sum(pred_area * label_area) / (total_area * total_area) + kappa = (po - pe) / (1 - pe) + return kappa diff --git a/contrib/PanopticDeepLab/paddleseg/utils/paddle.py b/contrib/PanopticDeepLab/paddleseg/utils/paddle.py new file mode 100644 index 0000000000..c4f514b3a7 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/paddle.py @@ -0,0 +1,125 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +import paddle + +dtype_map = { + paddle.fluid.core.VarDesc.VarType.FP32: "float32", + paddle.fluid.core.VarDesc.VarType.FP64: "float64", + paddle.fluid.core.VarDesc.VarType.FP16: "float16", + paddle.fluid.core.VarDesc.VarType.INT32: "int32", + paddle.fluid.core.VarDesc.VarType.INT16: "int16", + paddle.fluid.core.VarDesc.VarType.INT64: "int64", + paddle.fluid.core.VarDesc.VarType.BOOL: "bool", + paddle.fluid.core.VarDesc.VarType.INT16: "int16", + paddle.fluid.core.VarDesc.VarType.UINT8: "uint8", + paddle.fluid.core.VarDesc.VarType.INT8: "int8", +} + + +def convert_dtype_to_string(dtype: str) -> paddle.fluid.core.VarDesc.VarType: + if dtype in dtype_map: + return dtype_map[dtype] + raise TypeError("dtype shoule in %s" % list(dtype_map.keys())) + + +def get_variable_info(var: paddle.static.Variable) -> dict: + if not isinstance(var, paddle.static.Variable): + raise TypeError("var shoule be an instance of paddle.static.Variable") + + var_info = { + 'name': var.name, + 'stop_gradient': var.stop_gradient, + 'is_data': var.is_data, + 'error_clip': var.error_clip, + 'type': var.type + } + + try: + var_info['dtype'] = convert_dtype_to_string(var.dtype) + var_info['lod_level'] = var.lod_level + var_info['shape'] = var.shape + except: + pass + + var_info['persistable'] = var.persistable + + return var_info + + +def convert_syncbn_to_bn(model_filename): + """ + Since SyncBatchNorm does not have a cpu kernel, when exporting the model, the SyncBatchNorm + in the model needs to be converted to BatchNorm. + """ + + def _copy_vars_and_ops_in_blocks(from_block: paddle.device.framework.Block, + to_block: paddle.device.framework.Block): + for var in from_block.vars: + var = from_block.var(var) + var_info = copy.deepcopy(get_variable_info(var)) + if isinstance(var, paddle.device.framework.Parameter): + to_block.create_parameter(**var_info) + else: + to_block.create_var(**var_info) + + for op in from_block.ops: + all_attrs = op.all_attrs() + if 'sub_block' in all_attrs: + _sub_block = to_block.program._create_block() + _copy_vars_and_ops_in_blocks(all_attrs['sub_block'], _sub_block) + to_block.program._rollback() + new_attrs = {'sub_block': _sub_block} + for key, value in all_attrs.items(): + if key == 'sub_block': + continue + new_attrs[key] = copy.deepcopy(value) + else: + new_attrs = copy.deepcopy(all_attrs) + + op_type = 'batch_norm' if op.type == 'sync_batch_norm' else op.type + op_info = { + 'type': op_type, + 'inputs': { + input: [ + to_block._find_var_recursive(var) + for var in op.input(input) + ] + for input in op.input_names + }, + 'outputs': { + output: [ + to_block._find_var_recursive(var) + for var in op.output(output) + ] + for output in op.output_names + }, + 'attrs': new_attrs + } + to_block.append_op(**op_info) + + paddle.enable_static() + with open(model_filename, 'rb') as file: + desc = file.read() + + origin_program = paddle.static.Program.parse_from_string(desc) + dest_program = paddle.static.Program() + _copy_vars_and_ops_in_blocks(origin_program.global_block(), + dest_program.global_block()) + dest_program = dest_program.clone(for_test=True) + + with open(model_filename, 'wb') as file: + file.write(dest_program.desc.serialize_to_string()) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/progbar.py b/contrib/PanopticDeepLab/paddleseg/utils/progbar.py new file mode 100644 index 0000000000..563cc5ebae --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/progbar.py @@ -0,0 +1,209 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import time + +import numpy as np + + +class Progbar(object): + """ + Displays a progress bar. + It refers to https://github.com/keras-team/keras/blob/keras-2/keras/utils/generic_utils.py + + Args: + target (int): Total number of steps expected, None if unknown. + width (int): Progress bar width on screen. + verbose (int): Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) + stateful_metrics (list|tuple): Iterable of string names of metrics that should *not* be + averaged over time. Metrics in this list will be displayed as-is. All + others will be averaged by the progbar before display. + interval (float): Minimum visual progress update interval (in seconds). + unit_name (str): Display name for step counts (usually "step" or "sample"). + """ + + def __init__(self, + target, + width=30, + verbose=1, + interval=0.05, + stateful_metrics=None, + unit_name='step'): + self.target = target + self.width = width + self.verbose = verbose + self.interval = interval + self.unit_name = unit_name + if stateful_metrics: + self.stateful_metrics = set(stateful_metrics) + else: + self.stateful_metrics = set() + + self._dynamic_display = ((hasattr(sys.stderr, 'isatty') + and sys.stderr.isatty()) + or 'ipykernel' in sys.modules + or 'posix' in sys.modules + or 'PYCHARM_HOSTED' in os.environ) + self._total_width = 0 + self._seen_so_far = 0 + # We use a dict + list to avoid garbage collection + # issues found in OrderedDict + self._values = {} + self._values_order = [] + self._start = time.time() + self._last_update = 0 + + def update(self, current, values=None, finalize=None): + """ + Updates the progress bar. + + Args: + current (int): Index of current step. + values (list): List of tuples: `(name, value_for_last_step)`. If `name` is in + `stateful_metrics`, `value_for_last_step` will be displayed as-is. + Else, an average of the metric over time will be displayed. + finalize (bool): Whether this is the last update for the progress bar. If + `None`, defaults to `current >= self.target`. + """ + + if finalize is None: + if self.target is None: + finalize = False + else: + finalize = current >= self.target + + values = values or [] + for k, v in values: + if k not in self._values_order: + self._values_order.append(k) + if k not in self.stateful_metrics: + # In the case that progress bar doesn't have a target value in the first + # epoch, both on_batch_end and on_epoch_end will be called, which will + # cause 'current' and 'self._seen_so_far' to have the same value. Force + # the minimal value to 1 here, otherwise stateful_metric will be 0s. + value_base = max(current - self._seen_so_far, 1) + if k not in self._values: + self._values[k] = [v * value_base, value_base] + else: + self._values[k][0] += v * value_base + self._values[k][1] += value_base + else: + # Stateful metrics output a numeric value. This representation + # means "take an average from a single value" but keeps the + # numeric formatting. + self._values[k] = [v, 1] + self._seen_so_far = current + + now = time.time() + info = ' - %.0fs' % (now - self._start) + if self.verbose == 1: + if now - self._last_update < self.interval and not finalize: + return + + prev_total_width = self._total_width + if self._dynamic_display: + sys.stderr.write('\b' * prev_total_width) + sys.stderr.write('\r') + else: + sys.stderr.write('\n') + + if self.target is not None: + numdigits = int(np.log10(self.target)) + 1 + bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target) + prog = float(current) / self.target + prog_width = int(self.width * prog) + if prog_width > 0: + bar += ('=' * (prog_width - 1)) + if current < self.target: + bar += '>' + else: + bar += '=' + bar += ('.' * (self.width - prog_width)) + bar += ']' + else: + bar = '%7d/Unknown' % current + + self._total_width = len(bar) + sys.stderr.write(bar) + + if current: + time_per_unit = (now - self._start) / current + else: + time_per_unit = 0 + + if self.target is None or finalize: + if time_per_unit >= 1 or time_per_unit == 0: + info += ' %.0fs/%s' % (time_per_unit, self.unit_name) + elif time_per_unit >= 1e-3: + info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name) + else: + info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name) + else: + eta = time_per_unit * (self.target - current) + if eta > 3600: + eta_format = '%d:%02d:%02d' % (eta // 3600, + (eta % 3600) // 60, eta % 60) + elif eta > 60: + eta_format = '%d:%02d' % (eta // 60, eta % 60) + else: + eta_format = '%ds' % eta + + info = ' - ETA: %s' % eta_format + + for k in self._values_order: + info += ' - %s:' % k + if isinstance(self._values[k], list): + avg = np.mean( + self._values[k][0] / max(1, self._values[k][1])) + if abs(avg) > 1e-3: + info += ' %.4f' % avg + else: + info += ' %.4e' % avg + else: + info += ' %s' % self._values[k] + + self._total_width += len(info) + if prev_total_width > self._total_width: + info += (' ' * (prev_total_width - self._total_width)) + + if finalize: + info += '\n' + + sys.stderr.write(info) + sys.stderr.flush() + + elif self.verbose == 2: + if finalize: + numdigits = int(np.log10(self.target)) + 1 + count = ('%' + str(numdigits) + 'd/%d') % (current, self.target) + info = count + info + for k in self._values_order: + info += ' - %s:' % k + avg = np.mean( + self._values[k][0] / max(1, self._values[k][1])) + if avg > 1e-3: + info += ' %.4f' % avg + else: + info += ' %.4e' % avg + info += '\n' + + sys.stderr.write(info) + sys.stderr.flush() + + self._last_update = now + + def add(self, n, values=None): + self.update(self._seen_so_far + n, values) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/timer.py b/contrib/PanopticDeepLab/paddleseg/utils/timer.py new file mode 100644 index 0000000000..d7d74670d1 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/timer.py @@ -0,0 +1,53 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + + +class TimeAverager(object): + def __init__(self): + self.reset() + + def reset(self): + self._cnt = 0 + self._total_time = 0 + self._total_samples = 0 + + def record(self, usetime, num_samples=None): + self._cnt += 1 + self._total_time += usetime + if num_samples: + self._total_samples += num_samples + + def get_average(self): + if self._cnt == 0: + return 0 + return self._total_time / float(self._cnt) + + def get_ips_average(self): + if not self._total_samples or self._cnt == 0: + return 0 + return float(self._total_samples) / self._total_time + + +def calculate_eta(remaining_step, speed): + if remaining_step < 0: + remaining_step = 0 + remaining_time = int(remaining_step * speed) + result = "{:0>2}:{:0>2}:{:0>2}" + arr = [] + for i in range(2, -1, -1): + arr.append(int(remaining_time / 60**i)) + remaining_time %= 60**i + return result.format(*arr) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/utils.py b/contrib/PanopticDeepLab/paddleseg/utils/utils.py new file mode 100644 index 0000000000..73a298d196 --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/utils.py @@ -0,0 +1,120 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import filelock +import math +import os +import tempfile +from urllib.parse import urlparse, unquote + +import paddle + +from paddleseg.utils import logger, seg_env +from paddleseg.utils.download import download_file_and_uncompress + + +@contextlib.contextmanager +def generate_tempdir(directory: str = None, **kwargs): + '''Generate a temporary directory''' + directory = seg_env.TMP_HOME if not directory else directory + with tempfile.TemporaryDirectory(dir=directory, **kwargs) as _dir: + yield _dir + + +def load_entire_model(model, pretrained): + if pretrained is not None: + load_pretrained_model(model, pretrained) + else: + logger.warning('Not all pretrained params of {} are loaded, ' \ + 'training from scratch or a pretrained backbone.'.format(model.__class__.__name__)) + + +def load_pretrained_model(model, pretrained_model): + if pretrained_model is not None: + logger.info('Loading pretrained model from {}'.format(pretrained_model)) + # download pretrained model from url + if urlparse(pretrained_model).netloc: + pretrained_model = unquote(pretrained_model) + savename = pretrained_model.split('/')[-1] + if not savename.endswith(('tgz', 'tar.gz', 'tar', 'zip')): + savename = pretrained_model.split('/')[-2] + else: + savename = savename.split('.')[0] + with generate_tempdir() as _dir: + with filelock.FileLock( + os.path.join(seg_env.TMP_HOME, savename)): + pretrained_model = download_file_and_uncompress( + pretrained_model, + savepath=_dir, + extrapath=seg_env.PRETRAINED_MODEL_HOME, + extraname=savename) + + pretrained_model = os.path.join(pretrained_model, + 'model.pdparams') + + if os.path.exists(pretrained_model): + para_state_dict = paddle.load(pretrained_model) + + model_state_dict = model.state_dict() + keys = model_state_dict.keys() + num_params_loaded = 0 + for k in keys: + if k not in para_state_dict: + logger.warning("{} is not in pretrained model".format(k)) + elif list(para_state_dict[k].shape) != list( + model_state_dict[k].shape): + logger.warning( + "[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})" + .format(k, para_state_dict[k].shape, + model_state_dict[k].shape)) + else: + model_state_dict[k] = para_state_dict[k] + num_params_loaded += 1 + model.set_dict(model_state_dict) + logger.info("There are {}/{} variables loaded into {}.".format( + num_params_loaded, len(model_state_dict), + model.__class__.__name__)) + + else: + raise ValueError( + 'The pretrained model directory is not Found: {}'.format( + pretrained_model)) + else: + logger.info( + 'No pretrained model to load, {} will be trained from scratch.'. + format(model.__class__.__name__)) + + +def resume(model, optimizer, resume_model): + if resume_model is not None: + logger.info('Resume model from {}'.format(resume_model)) + if os.path.exists(resume_model): + resume_model = os.path.normpath(resume_model) + ckpt_path = os.path.join(resume_model, 'model.pdparams') + para_state_dict = paddle.load(ckpt_path) + ckpt_path = os.path.join(resume_model, 'model.pdopt') + opti_state_dict = paddle.load(ckpt_path) + model.set_state_dict(para_state_dict) + optimizer.set_state_dict(opti_state_dict) + + iter = resume_model.split('_')[-1] + iter = int(iter) + return iter + else: + raise ValueError( + 'Directory of the model needed to resume is not Found: {}'. + format(resume_model)) + else: + logger.info('No model needed to resume.') diff --git a/contrib/PanopticDeepLab/paddleseg/utils/visualize.py b/contrib/PanopticDeepLab/paddleseg/utils/visualize.py new file mode 100644 index 0000000000..27c950ec0b --- /dev/null +++ b/contrib/PanopticDeepLab/paddleseg/utils/visualize.py @@ -0,0 +1,195 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/utils/save_annotation.py + +import os + +import cv2 +import numpy as np +from PIL import Image as PILImage + +# Refence: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L14 +_COLORS = np.array([ + 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, + 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, + 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, + 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, + 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, + 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, + 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, + 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, + 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, + 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, + 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, + 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, + 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, + 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, + 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, + 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, + 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, + 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, + 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, + 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, + 1.000, 1.000 +]).astype(np.float32).reshape(-1, 3) + + +def random_color(rgb=False, maximum=255): + """ + Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L111 + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + Returns: + ndarray: a vector of 3 numbers + """ + idx = np.random.randint(0, len(_COLORS)) + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + + +def cityscape_colormap(): + """Get CityScapes colormap""" + colormap = np.zeros((256, 3), dtype=np.uint8) + colormap[0] = [128, 64, 128] + colormap[1] = [244, 35, 232] + colormap[2] = [70, 70, 70] + colormap[3] = [102, 102, 156] + colormap[4] = [190, 153, 153] + colormap[5] = [153, 153, 153] + colormap[6] = [250, 170, 30] + colormap[7] = [220, 220, 0] + colormap[8] = [107, 142, 35] + colormap[9] = [152, 251, 152] + colormap[10] = [70, 130, 180] + colormap[11] = [220, 20, 60] + colormap[12] = [255, 0, 0] + colormap[13] = [0, 0, 142] + colormap[14] = [0, 0, 70] + colormap[15] = [0, 60, 100] + colormap[16] = [0, 80, 100] + colormap[17] = [0, 0, 230] + colormap[18] = [119, 11, 32] + colormap = colormap[:, ::-1] + return colormap + + +def visualize_semantic(semantic, save_path, colormap, image=None, weight=0.5): + """ + Save semantic segmentation results. + + Args: + semantic(np.ndarray): The result semantic segmenation results, shape is (h, w). + save_path(str): The save path. + colormap(np.ndarray): A color map for visualization. + image(np.ndarray, optional): Origin image to prediction, merge semantic with + image if provided. Default: None. + weight(float, optional): The image weight when merge semantic with image. Default: 0.6. + """ + semantic = semantic.astype('uint8') + colored_semantic = colormap[semantic] + if image is not None: + colored_semantic = cv2.addWeighted(image, weight, colored_semantic, + 1 - weight, 0) + cv2.imwrite(save_path, colored_semantic) + + +def visualize_instance(instance, save_path, stuff_id=0, image=None, weight=0.5): + """ + Save instance segmentation results. + + Args: + instance(np.ndarray): The instance segmentation results, shape is (h, w). + save_path(str): The save path. + stuff_id(int, optional): Id for background that not want to plot. + image(np.ndarray, optional): Origin image to prediction, merge instance with + image if provided. Default: None. + weight(float, optional): The image weight when merge instance with image. Default: 0.6. + """ + # Add color map for instance segmentation result. + ids = np.unique(instance) + num_colors = len(ids) + colormap = np.zeros((num_colors, 3), dtype=np.uint8) + # Maps label to continuous value + for i in range(num_colors): + instance[instance == ids[i]] = i + colormap[i, :] = random_color(maximum=255) + if ids[i] == stuff_id: + colormap[i, :] = np.array([0, 0, 0]) + colored_instance = colormap[instance] + + if image is not None: + colored_instance = cv2.addWeighted(image, weight, colored_instance, + 1 - weight, 0) + cv2.imwrite(save_path, colored_instance) + + +def visualize_panoptic(panoptic, + save_path, + label_divisor, + colormap, + image=None, + weight=0.5, + ignore_index=255): + """ + Save panoptic segmentation results. + + Args: + panoptic(np.ndarray): The panoptic segmentation results, shape is (h, w). + save_path(str): The save path. + label_divisor(int): Used to convert panoptic id = semantic id * label_divisor + instance_id. + colormap(np.ndarray): A color map for visualization. + image(np.ndarray, optional): Origin image to prediction, merge panoptic with + image if provided. Default: None. + weight(float, optional): The image weight when merge panoptic with image. Default: 0.6. + ignore_index(int, optional): Specifies a target value that is ignored. + """ + colored_panoptic = np.zeros((panoptic.shape[0], panoptic.shape[1], 3), + dtype=np.uint8) + taken_colors = set((0, 0, 0)) + + def _random_color(base, max_dist=30): + color = base + np.random.randint( + low=-max_dist, high=max_dist + 1, size=3) + return tuple(np.maximum(0, np.minimum(255, color))) + + for lab in np.unique(panoptic): + mask = panoptic == lab + + ignore_mask = panoptic == ignore_index + ins_mask = panoptic > label_divisor + if lab > label_divisor: + base_color = colormap[lab // label_divisor] + elif lab != ignore_index: + base_color = colormap[lab] + else: + continue + if tuple(base_color) not in taken_colors: + taken_colors.add(tuple(base_color)) + color = base_color + else: + while True: + color = _random_color(base_color) + if color not in taken_colors: + taken_colors.add(color) + break + colored_panoptic[mask] = color + + if image is not None: + colored_panoptic = cv2.addWeighted(image, weight, colored_panoptic, + 1 - weight, 0) + cv2.imwrite(save_path, colored_panoptic) diff --git a/contrib/PanopticDeepLab/predict.py b/contrib/PanopticDeepLab/predict.py new file mode 100644 index 0000000000..95644295d3 --- /dev/null +++ b/contrib/PanopticDeepLab/predict.py @@ -0,0 +1,147 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +import paddle + +from paddleseg.cvlibs import manager, Config +from paddleseg.utils import get_sys_env, logger, config_check +from paddleseg.core import predict + + +def parse_args(): + parser = argparse.ArgumentParser(description='Model prediction') + + # params of prediction + parser.add_argument( + "--config", dest="cfg", help="The config file.", default=None, type=str) + parser.add_argument( + '--model_path', + dest='model_path', + help='The path of model for prediction', + type=str, + default=None) + parser.add_argument( + '--image_path', + dest='image_path', + help= + 'The path of image, it can be a file or a directory including images', + type=str, + default=None) + parser.add_argument( + '--save_dir', + dest='save_dir', + help='The directory for saving the predicted results', + type=str, + default='./output/result') + parser.add_argument( + '--threshold', + dest='threshold', + help='Threshold applied to center heatmap score', + type=float, + default=0.1) + parser.add_argument( + '--nms_kernel', + dest='nms_kernel', + help='NMS max pooling kernel size', + type=int, + default=7) + parser.add_argument( + '--top_k', + dest='top_k', + help='Top k centers to keep', + type=int, + default=200) + + return parser.parse_args() + + +def get_image_list(image_path): + """Get image list""" + valid_suffix = [ + '.JPEG', '.jpeg', '.JPG', '.jpg', '.BMP', '.bmp', '.PNG', '.png' + ] + image_list = [] + image_dir = None + if os.path.isfile(image_path): + if os.path.splitext(image_path)[-1] in valid_suffix: + image_list.append(image_path) + elif os.path.isdir(image_path): + image_dir = image_path + for root, dirs, files in os.walk(image_path): + for f in files: + if '.ipynb_checkpoints' in root: + continue + if os.path.splitext(f)[-1] in valid_suffix: + image_list.append(os.path.join(root, f)) + else: + raise FileNotFoundError( + '`--image_path` is not found. it should be an image file or a directory including images' + ) + + if len(image_list) == 0: + raise RuntimeError('There are not image file in `--image_path`') + + return image_list, image_dir + + +def main(args): + env_info = get_sys_env() + place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ + 'GPUs used'] else 'cpu' + + paddle.set_device(place) + if not args.cfg: + raise RuntimeError('No configuration file specified.') + + cfg = Config(args.cfg) + val_dataset = cfg.val_dataset + if not val_dataset: + raise RuntimeError( + 'The verification dataset is not specified in the configuration file.' + ) + + msg = '\n---------------Config Information---------------\n' + msg += str(cfg) + msg += '------------------------------------------------' + logger.info(msg) + + model = cfg.model + transforms = val_dataset.transforms + image_list, image_dir = get_image_list(args.image_path) + logger.info('Number of predict images = {}'.format(len(image_list))) + + config_check(cfg, val_dataset=val_dataset) + + predict( + model, + model_path=args.model_path, + transforms=transforms, + thing_list=val_dataset.thing_list, + label_divisor=val_dataset.label_divisor, + stuff_area=val_dataset.stuff_area, + ignore_index=val_dataset.ignore_index, + image_list=image_list, + image_dir=image_dir, + save_dir=args.save_dir, + threshold=args.threshold, + nms_kernel=args.nms_kernel, + top_k=args.top_k) + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/contrib/PanopticDeepLab/train.py b/contrib/PanopticDeepLab/train.py new file mode 100644 index 0000000000..91b44245c9 --- /dev/null +++ b/contrib/PanopticDeepLab/train.py @@ -0,0 +1,176 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import paddle + +from paddleseg.cvlibs import manager, Config +from paddleseg.utils import get_sys_env, logger, config_check +from paddleseg.core import train + + +def parse_args(): + parser = argparse.ArgumentParser(description='Model training') + # params of training + parser.add_argument( + "--config", dest="cfg", help="The config file.", default=None, type=str) + parser.add_argument( + '--iters', + dest='iters', + help='iters for training', + type=int, + default=None) + parser.add_argument( + '--batch_size', + dest='batch_size', + help='Mini batch size of one gpu or cpu', + type=int, + default=None) + parser.add_argument( + '--learning_rate', + dest='learning_rate', + help='Learning rate', + type=float, + default=None) + parser.add_argument( + '--save_interval', + dest='save_interval', + help='How many iters to save a model snapshot once during training.', + type=int, + default=1000) + parser.add_argument( + '--resume_model', + dest='resume_model', + help='The path of resume model', + type=str, + default=None) + parser.add_argument( + '--save_dir', + dest='save_dir', + help='The directory for saving the model snapshot', + type=str, + default='./output') + parser.add_argument( + '--keep_checkpoint_max', + dest='keep_checkpoint_max', + help='Maximum number of checkpoints to save', + type=int, + default=5) + parser.add_argument( + '--num_workers', + dest='num_workers', + help='Num workers for data loader', + type=int, + default=0) + parser.add_argument( + '--do_eval', + dest='do_eval', + help='Eval while training', + action='store_true') + parser.add_argument( + '--log_iters', + dest='log_iters', + help='Display logging information at every log_iters', + default=10, + type=int) + parser.add_argument( + '--use_vdl', + dest='use_vdl', + help='Whether to record the data to VisualDL during training', + action='store_true') + parser.add_argument( + '--threshold', + dest='threshold', + help='Threshold applied to center heatmap score', + type=float, + default=0.1) + parser.add_argument( + '--nms_kernel', + dest='nms_kernel', + help='NMS max pooling kernel size', + type=int, + default=7) + parser.add_argument( + '--top_k', + dest='top_k', + help='Top k centers to keep', + type=int, + default=200) + + return parser.parse_args() + + +def main(args): + env_info = get_sys_env() + info = ['{}: {}'.format(k, v) for k, v in env_info.items()] + info = '\n'.join(['', format('Environment Information', '-^48s')] + info + + ['-' * 48]) + logger.info(info) + + place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ + 'GPUs used'] else 'cpu' + + paddle.set_device(place) + if not args.cfg: + raise RuntimeError('No configuration file specified.') + + cfg = Config( + args.cfg, + learning_rate=args.learning_rate, + iters=args.iters, + batch_size=args.batch_size) + + train_dataset = cfg.train_dataset + if train_dataset is None: + raise RuntimeError( + 'The training dataset is not specified in the configuration file.') + elif len(train_dataset) == 0: + raise ValueError( + 'The length of train_dataset is 0. Please check if your dataset is valid' + ) + val_dataset = cfg.val_dataset if args.do_eval else None + losses = cfg.loss + + msg = '\n---------------Config Information---------------\n' + msg += str(cfg) + msg += '------------------------------------------------' + logger.info(msg) + + config_check(cfg, train_dataset=train_dataset, val_dataset=val_dataset) + + train( + cfg.model, + train_dataset, + val_dataset=val_dataset, + optimizer=cfg.optimizer, + save_dir=args.save_dir, + iters=cfg.iters, + batch_size=cfg.batch_size, + resume_model=args.resume_model, + save_interval=args.save_interval, + log_iters=args.log_iters, + num_workers=args.num_workers, + use_vdl=args.use_vdl, + losses=losses, + keep_checkpoint_max=args.keep_checkpoint_max, + threshold=args.threshold, + nms_kernel=args.nms_kernel, + top_k=args.top_k, + ) + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/contrib/PanopticDeepLab/val.py b/contrib/PanopticDeepLab/val.py new file mode 100644 index 0000000000..b662ad3404 --- /dev/null +++ b/contrib/PanopticDeepLab/val.py @@ -0,0 +1,109 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +import paddle + +from paddleseg.cvlibs import manager, Config +from paddleseg.core import evaluate +from paddleseg.utils import get_sys_env, logger, config_check, utils + + +def parse_args(): + parser = argparse.ArgumentParser(description='Model evaluation') + + # params of evaluate + parser.add_argument( + "--config", dest="cfg", help="The config file.", default=None, type=str) + parser.add_argument( + '--model_path', + dest='model_path', + help='The path of model for evaluation', + type=str, + default=None) + parser.add_argument( + '--num_workers', + dest='num_workers', + help='Num workers for data loader', + type=int, + default=0) + parser.add_argument( + '--threshold', + dest='threshold', + help='Threshold applied to center heatmap score', + type=float, + default=0.1) + parser.add_argument( + '--nms_kernel', + dest='nms_kernel', + help='NMS max pooling kernel size', + type=int, + default=7) + parser.add_argument( + '--top_k', + dest='top_k', + help='Top k centers to keep', + type=int, + default=200) + + return parser.parse_args() + + +def main(args): + env_info = get_sys_env() + place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ + 'GPUs used'] else 'cpu' + + paddle.set_device(place) + if not args.cfg: + raise RuntimeError('No configuration file specified.') + + cfg = Config(args.cfg) + val_dataset = cfg.val_dataset + if val_dataset is None: + raise RuntimeError( + 'The verification dataset is not specified in the configuration file.' + ) + elif len(val_dataset) == 0: + raise ValueError( + 'The length of val_dataset is 0. Please check if your dataset is valid' + ) + + msg = '\n---------------Config Information---------------\n' + msg += str(cfg) + msg += '------------------------------------------------' + logger.info(msg) + + model = cfg.model + if args.model_path: + utils.load_entire_model(model, args.model_path) + logger.info('Loaded trained params of model successfully') + + config_check(cfg, val_dataset=val_dataset) + + evaluate( + model, + val_dataset, + threshold=args.threshold, + nms_kernel=args.nms_kernel, + top_k=args.top_k, + num_workers=args.num_workers, + ) + + +if __name__ == '__main__': + args = parse_args() + main(args) From db18e81b13131cf5c1fc555433bf1730d134c32b Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 27 Apr 2021 19:55:02 +0800 Subject: [PATCH 093/210] update README.md --- contrib/PanopticDeepLab/README.md | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/contrib/PanopticDeepLab/README.md b/contrib/PanopticDeepLab/README.md index f829a277f6..0a1f460244 100644 --- a/contrib/PanopticDeepLab/README.md +++ b/contrib/PanopticDeepLab/README.md @@ -5,15 +5,15 @@ Panoptic DeepLab首次证实了bottem-up算法能够达到state-of-the-art的效果。Panoptic DeepLab预测三个输出:Semantic Segmentation, Center Prediction 和 Center Regression。实例类别像素根据最近距离原则聚集到实例中心点得到实例分割结果。最后按照majority-vote规则融合语义分割结果和实例分割结果,得到最终的全景分割结果。 其通过将每一个像素赋值给每一个类别或实例达到分割的效果。 -![](./docs/panoptic_deeplab.png) +![](./docs/panoptic_deeplab.jpg) ## Model Baselines ### Cityscapes | Backbone | Batch Size |Resolution | Training Iters | PQ | SQ | RQ | AP | mIoU | Links | |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|ResNet50_OS32| 8 | 2049x1025|90000|100%|100%|100%|100%|100%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/train.log)| -|ResNet50_OS32| 64 | 1025x513|90000|100%|100%|100%|100%|100%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/train.log)| +|ResNet50_OS32| 8 | 2049x1025|90000|58.35%|80.03%|71.52%|25.80%|79.18%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/train.log)| +|ResNet50_OS32| 64 | 1025x513|90000|60.32%|80.56%|73.56%|26.77%|79.67%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/train.log)| ## 环境准备 @@ -43,7 +43,8 @@ export PYTHONPATH=`pwd`:$PYTHONPATH ### Cityscapes -数据集目录结构: +前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集并整理成如下结构。 + ``` cityscapes/ |--gtFine/ @@ -74,7 +75,10 @@ pip install git+https://github.com/mcordts/cityscapesScripts.git *_panoptic.png 生成命令: ```shell -python /path/to/cityscapesscripts/preparation/createPanopticImgs.py --dataset-folder data/cityscapes/gtFine/ --output-folder data/cityscapes/gtFine/ --use-train-id +python /path/to/cityscapesscripts/preparation/createPanopticImgs.py \ + --dataset-folder data/cityscapes/gtFine/ \ + --output-folder data/cityscapes/gtFine/ \ + --use-train-id ``` ## 训练 @@ -98,7 +102,7 @@ python train.py --help ## 评估 ```shell python val.py \ - --config panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ + --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ --model_path output/iter_90000/model.pdparams ``` 更多参数信息请运行如下命令进行查看 @@ -121,15 +125,15 @@ python predict.py --help ``` 全景分割结果:

    - +
    语义分割结果:
    - +
    实例分割结果:
    - +
    From da06a8adcbedda156d18e938376686433e0ef3eb Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 27 Apr 2021 20:32:38 +0800 Subject: [PATCH 094/210] update README.md --- contrib/PanopticDeepLab/README.md | 12 ++++++------ .../docs/visualization_instance.png | Bin 33748 -> 264029 bytes .../docs/visualization_instance_added.jpg | Bin 68681 -> 0 bytes .../docs/visualization_panoptic.png | Bin 79483 -> 286634 bytes .../docs/visualization_panoptic_added.jpg | Bin 67547 -> 0 bytes .../docs/visualization_semantic.png | Bin 74300 -> 302773 bytes .../docs/visualization_semantic_added.jpg | Bin 75334 -> 0 bytes 7 files changed, 6 insertions(+), 6 deletions(-) delete mode 100644 contrib/PanopticDeepLab/docs/visualization_instance_added.jpg delete mode 100644 contrib/PanopticDeepLab/docs/visualization_panoptic_added.jpg delete mode 100644 contrib/PanopticDeepLab/docs/visualization_semantic_added.jpg diff --git a/contrib/PanopticDeepLab/README.md b/contrib/PanopticDeepLab/README.md index 0a1f460244..f234947f8b 100644 --- a/contrib/PanopticDeepLab/README.md +++ b/contrib/PanopticDeepLab/README.md @@ -124,16 +124,16 @@ python -m paddle.distributed.launch predict.py \ python predict.py --help ``` 全景分割结果: -
    - +
    +
    语义分割结果: -
    - +
    +
    实例分割结果: -
    - +
    +
    diff --git a/contrib/PanopticDeepLab/docs/visualization_instance.png b/contrib/PanopticDeepLab/docs/visualization_instance.png index a9b3db346242f61e3130c1b8833da231dc2c8b21..ad9204f42a026fdf2b8c4baa9823db8044a3a3e4 100644 GIT binary patch literal 264029 zcmZs@Wl$YV6eai`?hxEHxVu}h;0_PhhX;3kc!EQ4cY?b+1b24{?(VwZ?9}Ye)b5Y2 zuD(_Muj}4(&pEe$sVK>yA`v110030k?~-Z&089q}0Hj8M`!8}4^NRc526R!A`39(( zBtHJ{fav&L#{~eu!~7o$Xqhwn-xV={tfaVxC-AKEHI`(=?U3N{y`t^j;(Fn0JN-zV zdG9GfuVB3)3N8pmi1TycL-|(w%rszS3xz3)J1ULcBAS~k!KWxsZ^M+AY`}Y|^40(B z(B;NLGhVSykF-1miep%h*TKU4?=LJ`%3a^vOst~BqS)t`+Xbg4AvEINSXjph8 zG;CrzzAJw;p?^0<9in5R?+XEs-Rmzc`ekok?cDOGoze4E8;aKcqFMJu*0rg{A3CCb z*8_OgT%;xLt;sgg(sD_OO-sD$1sig#hC=cg8EcuXWW~a*@wW(b^Mq+M`ZnI)?I*8cMspl=rWezG3vtE&ACdHbrTx8`8QZ7c}>Z zmcjE@+-lWp@*DPyS(!QTowF$i)i}?aWZ=t(XSYuDuBIqUSP;%R*)oSPB! zXRRW%j2(J>C#m1()#9#|>1vILB$b5c$HlQ^0isE^=XZN6k#{SANEs@kLRjen#j@b(BP~C99V@vdd*hj z?!N_0D<9I*#&!I3XBu9!;+c&M#w!hQl$oQY zoG>b?7Ww3wupqc>t918XUsHx42f4jW6g#EnS?9L(FaCg0o#ABUh4Y;>n4bc{TA5m6stxcO8dcWH{ zo{DSMUESPN{#-J82YWv(L~7W=m9(I@ie6IfZrp8u6x=y{90p)DJjs;#dtGt84}3m! zm;C#O=<5^rac1=XczhWbU1B7xcvy014aj-$xGW6+yrDgIbjp5cvF=vLXnDkOcD>ml zpBRd~+j@@_RTTZa$2*?O2|z;srlDSBphUI2yZrLekm2w8=ybBRrO^GdQ_lrKpE4vD z^pma*oq>&ZrxNz`6|J1v{C)dd;i9D@=IkT(b1sMeBU4bm`)P&M%80j-@ciy{Dx2J4 zOZjuBL{#y;zu?Zu|JwRxWy>$U?KJ8%Ya$h$bTgpm((vWEM6_h! zR6s6cE#r9WqdkD=O+ZjIlK^(cO;=hr;{LwPV_Vo04Pg z?kDie+UFgaL;@qqBB9G)IM+=hvuv}N)V}6$pI7SuKf#UXC-Gq{sW*Epv+hw}^}tmu zi^XpLo7VBZ&i`WE2nTV6a9W>c&0yDw{F!qdM49*ue3}Z%eTZeg_vH!7TR4V@QL{b4;sl z(ctawSK)vMf=7=zf~AgT6E`CL)(&GtIRIn2ThwF z58FmbKIx@`QgLbUL-VIDHSMk0m@cjZ-h&0MVZ@jBx$s^pM@cUGCIE@ST@%9C;&Fm? z8NIy`;%xL*5$8#Q)n(ZJk>|E#()D*f;pey81}k;Hv{rb-9Q2ao$xkwJLR3GnQLw;4!AK7T|#{+XEQCa>kALs#b{^Pgi881Vp=XV}C zA54v@4(Qok-@EfKt1uPMSGQh;=B&qP&g;lx(3E!x)~sf?hJ#!-=PakJ)>17g-e(My zg)fASN$L9QDLQU+R4pz_RSs9|hHop=-PJfJtmo=i=``~S{3cuet#pmJ-2FV%O8L0q zG*-&ro6pwH{e4k_3NI6W48Qd$e$uoyJVY0b*MGNj#U|``&ybzM{IywjD=Kv@;A-2) zdQ`}pgHMu@ew-Wl=59}h5?1S8dhJ_iH7abnSy~sfB5vn(*5c0rtgdMUp-R!-wRYn4)iBuO1DeT>6_IxZPMt?R{8FdnEA;D z&DcBxWC8^&i5jM9H*#-EkKLly_}5J-z8MpuNwsjo^dZk;Zy@5lVTRj$ubUOhcZKr| z(sS-gOit7`O_Pn^o4QF(mbbrENS2eIa#7%SO82RB8}V}i1RlFWdG1w%8JKsN$cHHt zn>6y7E)EwRk}8V7QFB9LG1CJUBJyfG3jLVe-)q#+_M6o7zQHC=j2py_36JJqf*=ORb-NWKyIee3DJv2pam57e&x;I>-7oOF;U)xeefB%)a zYIIZ4QPXU95D}jFo9Hbn_E9ql=M|Jb!jx7+Mkr*a7r(I2bi^VcCxalLC9R?;xwu)(~{&K55P+9jIZ}IGeN)>)T8F+K84qt$@1c`HiNP;kse$ z!||+di12Rd1R<*a>E#u`6#N`bW)c$SQ-V-S=vscks6Jn0Z4Oa}f|5`NPgoZ3%rZSCji1r&a$b}`$V zdW|XX{tbi5K3Omlg)a8n0(zw+pord_(7Wt6wHa`87;Dug!wT>zbigX~rw`YMQ7mKfO7ya+Rg9cbp8*|!^C zi;v;&hC>~0UXwni#A&Prf|F`^+J7<>4xBOXGa`^p{jfM3m ztcne*__gSMzP3Oq&f0CTSL?Cd|K%U7KIg3lO9psm%@B0AY6O<3^6=_3-DxIimS1VX zbF_k+c4eN?{Ux-=D>r3GC^BN4PMhxVU1sSf%3nJNYyILISL6!A%bY(PSrahaq$V0B z*lmkB{Jvh+$uaeNcOLj!Cfn(+Ome&@1By&Lt^~;*6HKE&l5szz4g6Cia^D`>Tp2sG zhU(21wq^;zvax=){7$Ul+4c4Z*4BT%1%H|<=oY^<3f+3;obP;oGknpvoa{Uq>c}py z8=3M`C41x#02vOuCGNUuf0ttTKeGZh@l)q^&(z0dy^8M=&*|~ShL^V^dTuD}+rJr? zBg@`!DBc0eT(mz6_8z;=VuCBJDJ2AxwD1j-Ms8}91@;^CN*VIdt; zjY9G}0Cp8Ul|chL37lkU40D?U1p{Lp4m#hW;Kt>-z>e17FR~O%G7s1I!5G) z&BWoov7b3m<&Zes3*bUHREsKgJ@?5YmZ;ESmw**8&Ke?ssMs=}ak5VvHK2og8qDyItMB`%bpXC4kWRP=d-EH4+j z#(9e)@tZAgd;nq`!sVVNV$q*aDHG@qra6R2M5g0y>wp*qF(X)H2!l{t$R=k6G~@c3 z{U2FV3W(ZQMJNU8iQn7xi~13iNBEk(+DWqha3oHFa}0c6_pQYJo*k0%EDUTV#@XmR z!cfpyS?2}@q|y-vCr>~TQM3HX;N1~~wBB!_?BtL$(2@J<@eL-wv_(>RH3e$$(nLda14t1ZQT0Mq@=oPfw$T)mj8$XJ2xJ_G8OmA2FAU{DZz-Uwpz_v znRY{*R|9>-`ZE5J>u{umq#x#sT5H}jFn3SR-$)+=*(!4kVU{ipI&*A`ZkU#P@a>6#jhlhS^Z%F=<8$BFgjlK9C7ez78Q1mMyO4( z;lH`u?cKiJ;(jbN*c4RLOuG37e=VUt-0Sp9xHp1(vA#Z)hbejI06+Qor ziWsQm2PIMYXVqle3I{TvG$0qL4kLPDAm)=ZnbTT8uN2rJMoRhD?zLlT@-)hA0U3!N zbqsx&$nCORYb-0ed9dqp2@pdq%;k5T1&N$;A(Q&uYt-#SfC%2OM5P|A>do`YVSKY> z0xKkhu#BNwE()Tf`TVb4-^DOr>BK!IX9f3m#N$Ig-1kwhMn&L|*r!_7aVZ50=fJj? zAg8-4wUwqv$d_+F6=!+=#WQTcV*Dqs`Mh_50^q9=E?+VQ-vQHUv?a8cT}i`BNBK2V z$O-w-M?)pSwz(dOqN?eGhg5jz%ant)(KPO*+xp6+=f~*s*+I*aXy%9r(gMS?LgkG6 z;TVHUGWmW&OcNQj&^YJK52s0!g_jeZ3UWT)r^YgQQjnW;Hgik-@b4UYKsAvsJsR@= z-Aw;K;Dw9>-rb>D#RY_*RdVe7#z4~X!reXTXifaLkCllIw4SUX*9d2#dJNUkTsukm zCA1xVxhKCqm`DOq>fhB<>MN+PzVG4u%@>M1w+MeE^yXfTTj@Wp0C`&TfbY)GzZ_x0 zRWbD+<*(*Rgi!62;xJ@G<`~dfn-$s^oYim-V{#=EL{q*xA#zEDP^0jq!IG*c-B(pq zhCU-d4w+@y9@xPW`RvxyIb)SdBT?~1B>|5P1qv+%@`EAKMCzZOLqbHh?jK`0i~!B& zd+}s-T@|K1D!|&qgVO5yDvDHNh$-cF%Be$rBddu7&V#m8l`4a`vGCD2sct0vCHcFo z2+Szp5vImJvIxt&c%%{}<-1rUkXCN8P3_g;Cchs))OcpknWl#%>x0m3xWQM*Tc| zM;elAymHg1UJ_*>rV{kq1k_K&BD5Mu6hg}C%gt7K5q(QwWN`twrFhsDr@QwJn{>NL z9sa0y!G)~3H}W?gfaLel5&<|{(J_f%SmAvNSm;Q^5DOqF_FIpv(b^O4keeUu)C1K0 zcJ*|O3j?Td;lmP&NrW1b&fpF~BGTUpvc6R68|-#mC_qK*>uyB>rB}3}*AZadSO6U- zmX|SCZ%V23y>H}zdy)?cO6w$mc%}g6$`d_+ZSqi8}iH1 z{^smq6pvRsv9B)Ki0+)1=(WbdnNEIVbpX9iG&(;Y*va);rH3j>R& z$@Srk)r{J)-tD^ey2bQhKdhg5Wr~xdC=9wz08}`R9nNlWbm96;GLT?e>#&S0!0^Qd z&mo7Sv@QANeOJx;ZZn6zR9KO`H<8IZDEJWX7_+&^B~aSRl~|p=E;=(MuAc#H>O`|? z{GQb)aG?vSL*%VTe3#5gr5RqL?r9L3T~&H{Up`E0w5GuedH`d;$tjj6B#n;c;?Es? z@ndx42}GwJEQ@gFIhDfct{06Np}zaGcS_id=-Dp0%BCb#*=>EOB*GL zV7mNB6XWB*?5gw_Flrw%uW9ULbGQ@_=}I7K4B$6ACpZ)Jd^Hnd%5PzXUL+5be(lg< z$ar({uY^ipeRO#yxaoI^*)oqrLudP*tJM=KP?$l(hc&R=9QqOa(2K>qX3^F?#%L~R z6}kE}arSkC`>6TC6AiMdc#P#m|6{VGD|fb%*apn&3IwY*-^s@AC&P|(L<}R3P63t= zTet+ehdqx`bn|Gl1FsavOfOSpn%FD4{;Yk}oDDmO!U@kSc7hKKriMU8=n{@kS$dhh z@pMKa3G6p7c8tBsy{uE6%wD@GSk=u2acwn|O!{ziZz~ywcrh&VK!KX7(k#un#S4~Et?*@uaBH|Hb#U#` zJ-b-&XnVv@Mvbk#1G#x_^&6>!`!t1_pMrnrjS?-8elB(7?+6VhSa9hV0}u6|%EWA$ zRSN8JnDI_(M@vwW0?(@NYliQmB8_pUu8WspnUO*T`k-t%aDut9o9~;DZLoiscJ%!D z`RT0C5ic*6O^^$sgQi1+GS|fi#bSre?(TpH%?p3^33E)HB1!?MeX>%;bT=ygorKE; zI(L27Thlu}IucT3d#sDy%Zt3skgXYS;svVHR0Bc*&FODiYjsrw>;^1A)&>BFQeuPx zos{pCaB)EKl{mRV^CX4ex!;YLd}Z2bFs;_YszqgGIs|>zOef`V7|oXrEg4sGF^@Vh zH&6}?8~Xjq4v?cX89oy*!Skaq<{0*zQY+^1^6bKxLh3E)9}uvM%Mtz!WykH1g&h*( zKFgkalK4>GKmz>~g$B@xiBYYq1xQ$*R9;6HMUtl@jbEH^sJPnR^Veo)f5VGT1)pmL zMs($MsGw>4HkRt{k60#|GjA9Q1!OZ=MmMzAM{;9dTh{Cm7@_pN*oL@`@H+*vJT?5} zclCA7R8pQ&nT?y60f2ooPjtVU!hWqO*1MRY*siHTxcg&S>Uw^@1vUMm`nM|`6?g}j zX#$j(cMKxvRW{y;6*lihYUQS7zDTP`2Lgt$gs_HaKV{sVoRmoV0R|eGBwmB0Oi2wi zdc|n9y$3EE7L3*&4$!u3aJRF7704sNO*e!oZl9vhpf_K&{A*bOGr$f?5J~7WX^g|o zy~(@un~_`!i~B53h$Ysc3LuXI*PME%c1CkcF{ml@2IlF4Z>?{qsUMUEO(W+6!aFd*}MD~l|wk@|=r+4*fr%m*wjHI9R z(xDLwl`7Lnl)0-AYX)y;5Cd)7P~oonyiTu3jcQU&Kg3A;M&0fO#V`lCJq9|11g^h+ z%^^3}*$(fDx0gq}iuQgY6H3X6rnwi3lttcP=&0D1rkhF7v_yNb2&8M}2Cv`Z=aXJY zNZSg*yQ)VWvfqAUu<2wgdL^4LdZ}2pTh;rWE!P=*xT7#IjzYxZr%VIcj_=!q;^1y! z0O(r{G+2_kzJ9pSg)LKbsU|;oFi^Q|M2#J!b(XE=#+iYc&i*3 zm~DA^HqWD8(^_xz|DQ&5o4;o5^^oaoKCos_%%sKKqzz;$kffvv(ci8|*iRV^6`!On z5A`4$s{)DEwTM7Lj;bo9#4gcB$FQ%)mq{6RpaB5p$ZxGqezIOG9KvRbOg0A7&f7u4 z4D}+6(UB|k)!Mb@KSSxc{@}M@Xj;!rEq=a4CW0`1gyPY7$ZA-OCquWFjSQLmsq35RD|HR!k%B|O5xJ8ffc^AdxGRpzb;=4fbc7oUEz5b!Nh zPd!Nxay3zYzRuv$5q)fM6&xr8qu^`*Q+&*4N7iodH-ikIGoWq>})!||I>p&!b)h~MnDm(L9>P5z<%<~dkskG$WTlw zg|!EL?^jT4NBiZ~H9rMnySC2kOGMv-$;>vILKD(-LhmqHYISV^nM@2}Y;b>J%o$J1 zta1u!5Q$6wB-YG21&0a*#_|4?Sb}@fgZ9OMu_GA9XG9cd*!}xw+_(HIzI4v0UT@#w zRCY;tEGc#HK0458>7QG-dpZ!|fSUf4`NhP;j4=z?N1w4fgK{csUI%>@5(LILl_k@b z0r3NwK#t0={MLE74#+f4-GI}9E8-qaJk5%%ma2SBi(R;GMv$)QW3n_PH>~FiWF3a7 zv$H)X5G^X2@O=vfmS9YDuS}8cd;=AV-wgo@m@Za?7 z_Y-d~SN3xPlb)hMi*dhkxi&30!}-Mu#0M?y_#8d&I7px`jyXy-Q^d4db>Pf5QO2^q zywChqt3s+&7t8%Oz(yhxkmimLKNd?`jCJB>UqA%~qm_2yTnYQ>da4P@V(lv5W6J%s z^3sud5Geh{zm^*m$@*1NQu<|pKthB=0Rb*138lLeJw_@3QToqchg#dg)GkpXV9f7U z88L70k4gMgAj5cEy;LXccB8b>^kUI132?=_`&~;aEcy$+eH-L}^Q9PZi=YxhdWYqQ z{(#*$Mqx~M(81D!vdcVF&ZO2QJc> z*q|4xE%zBtT0!a%T>uoc91_DL1xr*olTtoF4#KOi?VqSBZr{r?RHS7gL~RZNaf023 z6c9vVU2}1|EFgp-usG()-TIMDj`e2W@l8P98Lwn;n?>F*dYF_9ceMFT4oX<-fV&gFPHi1gojO*>TpblMM%W$Z9B zaPdG6wXxq{m9IJP;G59EI%-dZp<}j~XUxCOUH56QCY=t`w;?7=69DWMXrUU`zN=9pcD31@GNFBqoRJ#X=y>xi z5eqh;fVzge981uhbNbhHJF*+Z2bM=xDApEZd)1lQeWn4b4zj(}T`=NZ+S7*#{SO zTln4xA&S(Dci0se40M5QNiFJ>(L1~<(GGtNtO|gMs&D;uA)!7X?q#GH&Ql=bNH5ja z8FYgiu+{+QZM4p_VF6 z2knjNh)>pVnLgIzY6&v<|6sCmMV~Dz9jk4>=!98Uhr20MAjW#NKF=qsT{SPXchF_~ zqhfWg6=rWB7MEEw81VsZXPxiq=9EK}4~nTlisXIx@g&{Nqc@Eh0F&g})Y${p>`{=i z)Hf|@jCNl&fv;uFQD?aL-Gt6Wm1yUo*P$_DVc8(yH)m3!(woc=$d(hr#}R_W>XPJg zbh3_6CmB9d0QPB6xY#!OC~c52_F?>+|3&(ZQr!H+lo5GVvTY+Z2BE8n$Kh=l!)2XwFJ|moKQ59Q!~14ozW=&rtj;9*hI<^z{bHP@+>MU! zb&L-pmnQ-Yx4@gBmaSb7vTsL`E?n3bZ9KDe`hI|4?(xn}MxVG@VgVse=}dZ8J$5w)_m?FjIIAkyY_Uh`2oJ3C zVw(2Iri@e2JXlQ6!-X00<521_DKfa<^>VZGZY1md4$5CNfZ}LCKhPJME1TAeZ&UU&PezhVz;3irM*H;(HdDXqI66no1(nA9I3OtL$laLw2 z%t!EZGygHt@q^9#kfB9pyieAfTS(~?a87i#__Q*Y0*W)~MVjXS>+LeqQpUrR=?5qU zbf%s%3@0y&1RYG4Fq@rIEMXmLoET*Q&41ebG`=<(R7eYk^=mLqxG+wBrx__+&?cTr zB`U-!s0Vx_?u1IFV(xe{WP`UoUnt(Jp~3ZI*mWKQO)D;CQNRjk?e=#P>gYxC(iZFNd`Qc=1iOIs0a^z|@&06e*X>oe2wNHL2UqWl5m@Qkf76a%u$kzOoOsVK0 zCNqA6f7fHfW6i8jd!*^Gqn{C9#pRGF{35jU&2yBmCF&+G)>5%WW={Hf3Gg%d@ta&O zH%FLd(UhLYa;W9xJ&xxFq--|aaZZl1+;Wo4f`yGQ47=52q zlxdutIq$XVQ*zfiRqy?4gCO3F*{5hgpZd*EuMJ_0HfQu_xzVJ1!n?V`b!nd zo)idem1UG;&;m8L0?CB?NVN*CW$<;jgmQV9xUi^t%KCX*=#q1uzxn(BvgQ0gOky`Y zF`uAdOlam>C6T8d9#3U4XfV7yFyh$sSH}7o!T{91_(0A=fm7?Mp&7*yRKq}!4kKd{ zA&y2fp*FwUKjLb|eb`8)lv15hY{(hP5R(`cbii&zbEUI)7`L_lH1$-^JWG$6j8vDK zf$krKc0J07N~Gq|*hQW>0`|ctTu(=eY9Gxk-V?aASU#!-OEf;0zyYo4h%WLT9@Sr_ zd4iNpvr?!kVcz1TrazvA(t zp;;?3oxP%D$+q`W*A*)dVr)qaTDwM0un})Bti3=lMYn=)J6rQakgb=Sp%#BF%IS#O zLl!g^9!P&7qk&MRSk4E%nX~q+( zDg%-j9c(cnM?LG)RppN5Q|e=kHXejuZGk z+(pj2#l8={mubJhOO=4fP}8H62={yaPlBXt4aV={m_rk|PX8wVRW2e@iSaQL;_fwe zy83H;C9kNMSK;BU>B2lpXja6oT71Ecz{e$k`1h;guIoW0Fb<#g*gBt1bRaysWoPvx zz2|Gq&{64To%?+C0h%`gEgX}umPh?xeE(opPtgc+!46g^BX?;}`3IN(GqF=6H)}{J z-_>nLjw@&kcUpx(Im5%bP-hZkwKi%=)Jbs$PVaXPcBcb>+AtS`Q+y5Kg>W3_oWV$- z(lP7$8L$FJ%S@)kucB<4b9R;#Kq;=bZc^Da1LKB0a(1>#_?vXzkU&N0K+e!8^6qLt zJ*N1Q>H!HOEeMMTU-{;f7=p{3a&}j3mMp+TzmurQVr5CS5pV^$-M>5D`Xu&OoD+a- zZEhO=2yL<`W30=yZZnmR$DGxU^RaGm%R+qb2Ff~EXjP7t9qpp6& zq{oRLk45E8aT6jaF*if4_T!O=IlMux$!bs`{(4A^dM7#?BsUf1Ie_%{y)z5p7$>0R z?jYi#q(M2<7q7oD99@B%%?qC&E<&+?miVl`Tski((bq9lrkgx#)@1>Ye-fq`*Wx((KL3Kgf2_ga>As&wTQ%%copO@<$&dbe>kzQOBsn5 zOc_G&%b>6h`k%R@pyyxi$!61y<&;zIl7F7d6PRVD35wkL5nw1bJLLGe?2W%Bk{+_S z;6St|mv`F#PndQU=(5e*{fvrjL-N%z)H|w=(1{H}q`b6SF3_RW_V4mcof)&f_Rp36 z{OKn)l66&>R@YZ5I|T>^bntNX?zaKTHXtasZ`X9@vkS-LpPjSfY5Zs?3+K`Z@8~kD zO2E1#3tlH&(f#>*NT{GClsP6{>CMbdVXu-oTyf(Ys1O;F_kE2;{EJxzf{+HgpGxT> zk~o-hCoYq~15TdBOUy@KthwfZu(~IoVLuFqm^^knUQV*T8IEr{w(0m|ryX3IW5v1! zrK}F1+HJ53C0D^@@k~5#1bqE7^#-SLfF41urAy~j+Hf%CiCnc1m<(>VhsI5e1s-|| z#!~9s+`llC0{$w6Fg;MUIA)dro^b8mQh7t~lS*X|NdM^=Dc=VASw)epy<6O5dL_ZX%7Gaxxc7)YIiys_N(B6v z5W-H0e1+inPP)94c*p`+g?;bn+^L8uB&zvX$r^%u?oS}@W#1{`*Dcp*$+j0sL2Hi=WeVUnl&rr< z$%L)o`R{1<#?cZlMZ4l>80gP**-zF%6tT>D=2Zl`rEvczSmbTw+#dm?xZ1ike4Cna z@GYlhO}5t76N7Y|n;;yIo~(2GRq&(gvnl!cGjM?dzLIq2^vu80NmM11u%>O-p<&U(=S&zENQ=-_g$;&ymD^4z`=r2 zhTz#}z9;!tm^c{2f{mqtPhK){xrdH9XCypJudpYW&qV5>RG~Jx^&a}F-+R%YxfA|Z z`XE=YXg!#+H&Wn(QH|F?p=gKq10OC4R#1w_CDhf<+E7F@!Q=t8R=Z^=z{?r!s@0p*OSoe7 zh<9viY%lQF0FxPE+{pxXM;wOQUN|oA*j1w15npXJ6`bCfFXHn~3!r1q zH2!}Yj{kRL!*&sGJFmiT@ZE*D%8eyRwF^~*F?Bo0!R$A_I4GgR*A0J66)zt((JTPL z-wLZ>=V&telrF0XwSwF8wTwF{a@HYBvbk{ha^c(NfQ3D~MFj?P6C8w(VKT_jq*cm* zVTt7fnl0Ers$v!c)+aFEqP-!XkSK1`M-_^&jTr-4l9x+dXsu$WzO@wxvN3Wk3;70x zDG_8-{=;DR zl$d9HDn-sF$sgql%z*OvoDYKQPQ{4}S zK?dv~c3m`5Dpou|F$1=Yj7*EF<}9JTsxXr~41o{Cm_d6Ta}*v&s_wJs6LWhHhXp*l zN911_p;56hp&D6$SS6Qt=SXIdG6@gDI}SoLHtq39!VVN>-l*vDzDeHODrdMo7-;@6 z9WfPavqt!~vbj=VFw9@(f~Va|$`Z4igjq^t1=~75z{?{9K(U|cN&yvT?I@Kbu+(@V zphE(KvlY@7O`{-3FvQ(G?|07>0>&%I(oSvTU-oWL=feQ6x?u-sOsg>U4r}hA(Ndgc862cV!AzCgJ+=Zv^i~#aVU9}rO ziZQw|-dU1Q8kwUur-+_q{urBByBJq(UZ#)6VD$A~F~U^KkO4j{1ok=1Zxx1)I@O|c zP-f1=jSs$pF-4`|I6W$S$E-?NG})bSC#;{%s`gA16Kc(*>s+qERvI*pFa9bA@?l$n_+`m)P!7| zM<=w4EnTmmxXtUlPhV1emnDbkxa~~YJrjT!1+galW&_nFwY$MPmUyK&>W9~9)tcV~ zGQ+m>gW&HxD}(i&(9=w(50Zesli2ICF|-``%?XPk3csxYiRMs=n<7W@*z~*88$&B+x3?Q z#lpr&)|E7P#}6ftZTWnr1x&r;&1Dl@llkRMZH7ZDswZ+LCWEf|!phUz8xyJZ+*y_w zyN=R*sBYpO^`f`zC?xBJP+!6+$IyxyhE5o__HJ)N_fADz?#b*Xo*p^OE5}&R zY6IVj6-3vpaT+l_IkFOe?z_b+DYHQPsuDz4G37m3pUbeFa3uR(1ygPpU0CQidTtF5 z5&1{7{RJx+k3HE337-YXRIrHQVb8f#ICNO_oa&Kn070#hm`8-# zwGq2N{2=~xTi)SK43$3>sflQopH+|&NEgur^{9(E=lC8vPDCkME-KO5|Q z?h}?Z_}bDm?t%!eBlUJftcS6#$aAG*K}r{+R;x4?n$X7J7{rk4ibXxZ*T1>mn%-$T z-=|9$hNz&ba||*8=@`pIqZ-ZCcu&htNA7)9sn_a|=$c(zg*npH_Gl(mN3fMifh5|OaE zx7d3NP?uE@$r<nlqlOVCgdu5z8mY898cnBdy!=I#@*<{3lSt+0;bQ~vpR8WA$3q11feCh)cNCB0 z|HhXe&$H+;+ZkKqNY(N-!+v3N*L_hS2r@t{?(%)cl+U*CemdH12*+7X0ez4VN6$sp zBfrzBPM2yUqsz@fN!2JX?GTV#gBYQtiH3cABD2J-AOG{B&|-PR_;2M0h z#=VMqy}=*vd`LW9G5jaJer(lvRxe=z2jqwelbKJ^!?H(Q zqS#G4%j-Nl$j+vIq2db|u|T(cpq|3_DBLhknbPxVo-Gi347xl7oa){RU_bc!z%&#u?H~f}lHd3h*vRz;EenfdVxSV+>ULV{$cU^0bqwht$*zXnB z0FLdi7)Bp;w!eELPw-x#J$#@3&+ItG{lvNVY?9duD7J10d_5hvZo3REBD{si;hg#c zKelWDFbF!pS%TrxLY^$+`i?T?-0kb8d9yO^a-WgF0WC8UDfw)8P_~g-Ai7sF#Wtpm zdn?q`Z~LI0d`e_PnInYch#q-DGu4aK)AT4P)yb^mAmrTPlq95olOP$lnL@QWg_3OK zf38PBulB#sQvDBT({l-g&klITiIl@lrDLm9B1p-*2XI(OR7ipDo|6N9n)1OOsff>m zhf{IagMz}L`iSo|y^_-TSL5VjA|P_nIUfY0y+yq?w*Oc{N5wEs&K*Ay6S{>4)xvKN%)pBB|B#W)5MkxFhnHo-0jPdK z=%Rw2N!k9-Tv@kksb5*9hcd(M?n0wnoO{i^bd!J7l#b9D0^Xkmoo(@Mj zH4dhvHr@}Xb&iW~&9>Oisq{2La^e_^xICCxQ@q`-2TH-fD}G=K_S;pCY*>sBNc2gvOt&Om_Kk4IgmB1~~)f%2AQY0n-d(cvR&sH`*gZk=hFlSmu7yD$n-z0!Mjk ztA=y$1La)NzBB8LAg>|}{88|+Tgl_P@Iby~J%skEt{*b;4Ei+p|tu2JQ-i@`@PG|pmkIdxxy&4Y6Lpvr%PpXr2s z?vVyQm=fO{@e1uWU^ERYh>}=W$=~DY`m&JR=urk?)K#?_=;~Qc*l=~QkO_8qWJ~mh z%QIKq#d82VeATDZQGhG;wUYeU0e}JgCt}k)(rC#&3 zVz+fTtsq^}m8IF0@S*S)88s(R9w^xr@jwd9xC8Z+AJAa)djcu|YKfs4N+?EqsB+&P zGw1ZTOb$TF?B8%I-p@1lCi0&xo`3F79x3Tej_EdH@WKjyXt&zA;cG_?iGb@!EL3u5 ztyFB`>w7;M)9*l#2|o9j0kmW%R51=5EB=O;ScO)z3N1_f^&T{9yQf^eU3TLw@;c zm|A_r?9--wcoD*=k8sf!69B`5J(0Ew^pPM-W;a7(#*g%x6UgP!7<=l3j&3rAyhFLR z`1oP?g4^2Hjm=N*(H?R_l6TXLb#n1?CFEF&vDQawX4>E?g{LoBnk4Hqm0qJ3_Wt}& zRmz7c-5=+jzKMc|n7az7weI)e&>ISj{mk?QzgzdyW-x-YwEb;Hj%Ezwxb zNRdSNr#L4~IdP7fJ9V7~(T0Nbql$>H5-6YPgs@3S5)!ZX-CnxhtIzB|IN&?$zp}Q# zsYu@_I3b{u^NZe3>W*xmcMV^q75miE^H`gZE*Px2%Y1XMYh7ubPpIb!SMX*Q(-%`b z)Tq!rci2BGZdv1ePW{iM9#uK8_5V(N|4*a>3lAdNh05KJW-oL-lt|mJH@4)%aY^Z8 z2xWLK0kCy>8SYf!Ad6vRKx>fI{niF=Dqw9kmC;QUTmGwS0JR{Hg*cy?s=+Qx71-1I zzrqPZ57P|9y)A|C-U!kP}eiT zub_jdZHk-5SsEtiptY6fBQDPZJE8YC-qKk4{M?}2xzNS$!+BbYaN3jN=woz*rHB9P zFMt{DS%06KJ+hyzuK02>$wFni7loGkIV+jV=F|5U39(4OxI*+LiJXgbHK6@<4+Pkw zFSh#g(a9N<8O8^TR@aw@ddKcM+^E}o@3^t;G`1VN zX{^S!ZSUB&ZQEvJ+h}b2^nUS-aXd2_%bUysFmc(=yQM&n3o zw&TFp*k1_V<-aMYFd9$sv@5xz1>Ma1Zw$?U^8PN4N;8WT^z)CHs^NIo<%=3yo zcS!+;!XKq(V0$=ZX89EvEO|{Zh8nS=mdrxrexXN<*=W36Jv=?cIvY?jv0Wimr*+>>4J55-lmA=P?8%adZN{G@P4B@M%JV4-6eO33}a)B zsg2_LBePuKLz zmPS}XH7J7fq1$z-P(Rx60g*dXY>XZ9Y_+X`7@IIJr<~Hdc>-HKT8PF|7tY#bd!cmd zc`^X=W~J9%csk+3VmXMV`iHuA$T(4isWK*3`MY@9;|0$cj)pk^n(ov&W^DT)LsNr( z7P1lU-7Ahr5)aayKN0z^J2?{9WFi560Yz{oTyRH_d|j-`hKxq$Mup4FVMv6f)RujaR*VxrHiCu|GtOG|ycLuQ6P_zDq z9`t~BNRUNPRDcHN;F#ku5B&8Js;2qo`pEjpy!4y#XxHXf4gk5mVDBH`p*Fc4B9)9E zE7{P{%p-_g9Zjz~;30HL^-@biWofmjwjT;nUq}D!m{>?g2w>csDzQFJ;O0+G>80{$ z>WI+~%i!}?TkS`Vvk#$OdSU_}T1a;m%z-E`4OdU-g>i4P7q_`#eXH4q(utzg+2t$1 z_jOvaFsNaP1d@)BG0bkV)AVus5>hkC+U3~&y^tM||RS zdnlx`pAi-LL(Kg!!XBZCzh_;Q3iO0b1s-Rfo?T&M+&8>zEQn-dkNo)*E8i#F#4hOL zJPTEtD5j?eKX)A~xh`9RH9==U{S8l|G6Ba=A)g*Zv@xs17&9#aL ze+d0vYwZjw3;i0gt@_Q+Ke6kyQ)`DA=U4b8~Uv3`>*^}nRke4jiD!Yc_M`2SB8 z|KKu6ap0Q_OXO6K7l=Z=VO8jwJq723(QcjW4>IjGc0M7-@s{3uty;OGgi#D&y&6-s z_av#$kreM@1` z-+!`10rjzx)#&cdsZO%2~_X~%{W99SF{X$Tyc2F5QwE_~OVyc><4ewH)Db{H` zwD+>Ng+5$VQ>*6hjhx23DuOp!gCBYjasH8_b7ZJvEl^fXX}JS4ayAGb>c=DWj3HK!6(PXU1_;wA09+aRp$B~(j{bO9PfZ!BgRJS7 zqGMJi>KFn2rS^QRA*S~T+l*n-dd=dywZlGq?$rg%UnL!fbcg?u}4^{S(C|`auCDtLOB{| zn9d&9*pJ&Cn~Z;u61e3iM*b1@f6a1rAt(xszHh&UBP0%KkuZrUpAB_?zm)!q@K+kH zs)pEVqy#m=Vfy-;pLJn6ix4sR2D4R@K4su}&NL=Z9fe#&=phrD zZlozxZBl^4!fsooR5uiL#mHPtrUcpQ8xjn_1Sq=6kAj%zm)h9ZNWaUzx;bBOxI`BlAgnS6kLilaZ0Gu1&xe?)|2d zPx_Ka6LyBbj)s>^Y0Z$E0vzYHJ6ubzHCQWNrcO#91+h~@2iyq=)Eb=(*|)j_q$D=z zucM^~b8zXG4W?zP1b3rV<00)%W|(Tn7x1`dT0=0x-S->Qk-FX5L|taq$gozOn_By+ zcD=dY6=JL>U|^>lilLGQ7Rl~uUeJ{$tAkH^H}7O=iSQa1{*%Z0@)8=5dv;0PAjJU52-u-o9_{_CXCr`M+=VL01dAhNEL|?&9$MWdHsinO z4c!HLm~?l655tn=+?YKxU&9DsK%y^rUlnMPlgzHvTD~?aAx!;{k#GCV5 zwNg)prCN%2`wo4GO8l5#avVku*L#G!gL3Qixw_bylFpCW5~Ynq$eBoEsyJJxj}D1; zd-78dDEqmJV@$7F%&7F63PEfGOE$uiEB z{9rI5WlrQZL#hxE0^Eo}+pRj)-_>dFFx$fG!!4!}9UWcdoZ_sBoyJjm(*ZUopBO^= z&b|wL5!yK$FJ*r5J&PI=I20mK-4LLI2o@*|WQKXg!2`6)Um@d}BjpOa(j^IQ zBE3y|+mQinzgFX3M!8seyK?4k-@rit_ zWGOfRg&~jJr3}K2WjJ2wg^2$(mH5^Z2jGX1$sn*0ypGd%-NG^7P$LUKaC95LZE*RU z8QX7}ZS)|Ovt*X$#XiT6Db+5cgP1LvN$-~ct_90XDCzomrO=Y>xkRE?sk#3XH}YHg zv_K#F$n@%qIaMlJbjiJXiOh~SNd2uSLfGT#^8?1GntdVgw#mk7-$ZRV{_-WFWYi^y z;P|rG6r#cR@ti9cb16QO(v@#xeO+XD4d{SSyT(nP`*m}opyqs1Z;VV^ zli{oEb|PxZNxU1CbkU}0@$3jbmwa;ZxQ_b~kOC+W$C@w)T>>hNjl{eEg+jhH(?_O2 zJpp9FJ8_??C!E)}ggzf@5fe-$qcBCekY^xh}h$#=eec&$QHS zt9+D-dz>W*S#<$LTE^3!{~~TLUIosIY7*4E?(tQH*t)l@t?60lfwLNFOlw(ARX^?e zSfp3I5@I%gU^sQEaH`ubG>Ov3s`#fa6@kV6H@aRvfZj}R!_=o1LMz4zb70M} z&$!=9=6p|ZbT|2xt+%9u3MV9=O2ghH-rVg}K5N<`Bk%MfcwxMun_-Fb$x~R?5{N${0z#u1bZG;^2l);j~ph`?&z+M`zxp#~`@9{*Apd6VskZ>tGEJo}8;qYImzKcA9Gx{nV z%__Rk?j=maS6i5#%|ElYNoVUVwhfN^;hMTzioNu@;bH*}-%58x&HZ|6DT)i6a$w=Pwpi|d1- zJcFclkg<(ocnvZHw?2uSH2&?e9JSTx6V8ztuPIowHk=rCnbwa^N;V~Zea4{mr98YN zX1`ssYOD+GE9*dGonha@Cz(&B@Ac*%E;55}`~1KbfK;oVW~V?)J!wvmCrSN3cnIiM$C>A!ea$L0~Wdf*#WWQe8rKg zWeP3}J+W_uW&IiZc~GBq$HEN`_Cc>&W0bKyRbpFgg*b8RX&dQ%UA)uKWy9qH#_K8q zLqQ%ck6X_*cqR)ZL4wzPli}RQtv^Y%>l_!jJF@S{w{pYD^;=HM$RtZdJN&h*f5k4F zIpsFRd`XOZ`wOjlxUNWpQzh>=T-H)aWY6l@dCSu{cs7)p>KwyMokd2k#Q?L7r|L9x zjuJvSYB8s2V(*Skl(ZvD&MR2C1j0M!n;i}^wHj*N;9<^sR3{yurdZ?ndq z5i6Os4t;m+(av}B02ujB2*9X*<9zQ`vi0_0moDYs(?~|XYQS(s!xtO$mSjrDP zGT-`w(u0g+R;6pS><(ZwX8ny~Uy3rp2hUHdq6JSi3kzO4;u2@1CWzzQgeNyxA7L~?PV+uif$K5E^%kos}2(Q7v zQ&Q6)XX@@JN)*l{*1@bnknwWw>Y|1K3bmWI@l{H;N(RR2;7zhnWs zoEeb9IQJDm>gwf%sv^K=fcXs__CQa!eoXEg!_*>KQ4Ym4;B>)w!`{YIbDRMH2WU`f z7SvJ=9Q>n_0F7rEXc6^1cyP-=LHZ zrvK6~&mcd!ZSQK3z;WE;w+=q^=Xt<6liDfOSY!_|>2=$@$VRI;eLf&h4q3U%g~ubK zJ2N#e5RBKookHKPF!VKjgDXESIM0I!S%hBlaCZF?hEnE_isOJZVxNW?&qKV7yj3jn zv|P`^uovKMKWG_??|{nZ73o=u^;Z%Zn<1?e*}mLvllgR^Qa-{`BL_M>aBy$1N_|}; z$<2LJ@f%l9_@NR4Y##cdpNUFAYjhIpro;#XAyD5L)pUPT5HpVn>z=`4AL{=Bj|f3W zXKEyPkE7ccf%@VxifM?#6hRNO1L}?cs58J6e>7IhIQ|&BI*;4|^(46il>P>xxuS|a zLX>%QvAc=TG5!t4?ilTZ$}Q^s4GmXC{M{^1SCrw_s6}tcOfm7Cs6)JE#g6^(O~zQ@ zmZ==RUN>O+R>O6KX350a4fUoOKv~{siKoMWO>6x62-fm5X&I4W?#YgFvB_fws&lj7 z!lnvZekv1P6oQLrAt#kAF3!@mDw6S*=1k)y`G$GwPm7t~{@C5S%g*TjGeL=ei@#DD zJN*Z$n9-q|ugjoy1^8L*!7;e33P4(NFo;zr(NXi0@rw2Cb@~eqbZz$s>KK)`p1xat zhGof}{#N~{oIv%V=2eu5&atxCM`AK;Kf9#_BTI$B_#lPB7VUWK?Aw_(fI(Lf#>tog zbb-C@X_bt8a}=ACGOY{dmetu+Zd+*Vyk72;qh5=!I!UKD0={1i?3@wze*wM&-G*}A zJfrM6j{IKCQ6|}@34~o{6K_{XDyEA$K-u1uJ0oa}f}^`Lrl@UJ z3EKzG-FMx#^An@{GGB?*XZ^>lC$Kn}*pp|YCy^c(mTNBvY{&iRPFR!(yE{iFipa?ZjRA*&Iu@G>i-@Rg@siW8Lx%iDuqw z7_9^Q2{5)@*MFH2u>oz$3yR%tnkA_&%*$*IxW0iRp@abYVF@AFfm3K=|KLC|g6+J@ z)RFesC{{s6*Eh6E6tlZmf9^=KV&^=`avQa86`iO#p{9i%!Z>XP87-?zNBB&#Sv%1V z??HMFtmDN~+;f0>?q4qZk8$Vf}U|m zgR<05wY)867hDzifmXFssShF&R=h>^8D`yXq7*3z^9S=`5riy z+|JHNe>5QWeKU`&X9a9}B#lI>yit|RHj571J{Q0uMszY31GN~qQtQ+OA(4w9M#-s` zgO*;0W!FSkzuw2U12Z?k0@ulxmJNn4Bh}`x8!9C|2Vt8K*(Lg0NXZ<0v3!q_Kt?Y_ z3AKh)B{Ng|`(Ara-1=LOYS$+HoouG{W_?Hvrq*M2$-&q|BhO)jNgN+B1}cAEJKgfXT(I2xAuG%ZLf_2 zcI?=l>an*5U>qVQXJn`WU6yMl4^65HHX`>Q;_FGP=5G(G|qY; z&gVW>A74715)gI4p5w*NifpBa>6E6ju2L(!RQ|dupe8UmQaR}9;+-kvRv5^8N_Wum z3V3N~vBb1%mU-{|BxlwuP)4k#+dRhVw@LCv zOr4NK$R;{1w?knd73rqH7|2CI{wfCO(9xWoYeCx?3Z7e8N9Ri*Z`MlZSz+iZt76pD zawC%)+?=55L;!$gG{6poYJxuFGEbo1KYq0vZMCovj=n@wKGzizR&!fYN_Y_U*F~M> zDZsI`AZC5*h8&ZQ&ZmM3no}943sMT86Ys*B1anZeXa7yj-p3(&t{%n}8$2pyr>r`d zE-#NBTw!7GhZs>Q5jJ>us|bWW5kfJ*6ruu4PY}nlzE)Lt(RqGFdpEosk6b%n86=eC zJgYu{Fz2YbzP=lNcc>}{44%+dVvj%vo;>wY2gaVnK3;W<8MIe_AmfRQ!A7rS8>J=#@Mo-Vytgx7w9z$=c<4%au?*BhdDlDb}wx%w{} zXU=wWnB0I5QcT}FCt>-QfPs{g-2mvsFmfp1ju=4(ME&+guSLZTskSsyKkVcu)66UC zkm8e)dsEQudn~rb@PYg1DRQnAeH998dGN!^>K(r^Zq6AAL(+$n(;wNSjS`1+KW8um ze*!pwf0I)N0q)gymv|=whb8EUgB@2YKzU8~OfpfwwJ9F>4|nV5!3zDf{cf)CU9RkX zjWZt_ZqHRFc8FWjA;exzygtFe1ZQrE28FEyC{R9leP&?pT+xETGfaR6&FJXed>8l4 zNMNs3!AjPA>1SDI+?McsR0Yt$C>L^{kc+C$4?J!sHDT>%jhr!v1jEx6&Wy~rs`(DV zeh6^r0^A?`m~+AgaIY~ZyD%x(eTc}^BSlY!X_su^J|URsC{&zbAPgELnvfumP2qGz zbpQB~SHZ2;c!9dI?H=+cxH4KX;cojS$PwdCKspQy*-Q5i#Hy2Lz<}K^!fF86L_%%L zWTh^UxYH(1GM9xcD(>8|4`TR$EBBgMBv)78jY|n z9t{`FyeCTl!zam6P_}W1Z819M8EN0`V*%`NN@*@i@g!Zjl8^4Oyo)b2f@$l;ofgxg%^I#L>}x>$HPuOR z#n?FOmq}#Vw%VWM-fKI#OEIt*ZRz@+mf0S+gY&gHDjdN(A{NH1VUT&hTD9P&4_4-6 z{k_N%1w_NiHCr9;Bq|^-aA>-HJLwk5?>Szy{hAm_#R%Kk~Jb0<~$~x8+Av@w|FW9R)MbWP(F-d%Glb0XiN2Vr>7X zHjI=7F7TEyZ6H+jHjNqnB>qGM_~krzz9`n3F(^FEIYNQePDg4eLnQT!m>%;l!UoBd z{9i0Mu=DyNovuQbpFV<776|epkaJ?CaFU?Ro50*BIw}OC7)f+7)1ih`CoxGr)juL* z^*;nC7%S-X^b+XxNPa;^9(rp{WA58OFA3D?9T8$!8T!Ahi&+=jIgXcmV0Cnh={&NI z>b(WqzG0OmX>65tm4FT-srdG!4dDM)O4%~?gZYky7XG@sWc09gz^O&&obW(I8Cxa zB^ZlHnvj!hL&6wQQ1aQziS9gFmfLWgovP_n5HHho>W2sv03}c;O0BNg%wh&X%K97&V#TJal2vGqj-VcLioiDz5fo#(gIKsE&j!cWwtIr$POF^$(0i^A(&g)> zx6+vd9j`6}YS$F&JZ^QrNC@ZLCjN1yL>F?yRf%pK=zQz~ImS%FHm_vO*=rdtFH*BmT)O*k_j**NaBy-0&FbEP ziB8Yr-m_plY0;eq{7s&-_c|K&bXV_|1n)`|6@Y&ID2cym&O_{1PFR8*U?5+IiMC33 z;zp+7ohevOurK`k^A$ z*Y9{dvpnuZHzLG?ZPVzER67C0;rm)#KGw2cBAxiEW4dab_Fv=n9t&Iok!~aqS8U9B4ZtX z4@w@LAk)a=SUo6OU5MhLc`c^d^4M%XL*3*gBy;SrR$#CM`1pT-`JtS}xwJ-VLRj$0kHoL|N3QzY?zScPfz zKRWBy5amf^Z;COt2gsN38K_DiA_=t&^5R$8s*C|PE`o+FKf#yaZryS^W|cP}BgBka zz3Q%)+bNfegeBDJt!#g%-NZZ996?0|7m@M7%?o+0#VU?4W(H$?x2{bK)M#RvA=3lgENXl2rYS0xQ)$pNKmJ?}=T zlIL_1?;8Tf-HihC?4BOC-xQ<@vsylk9lNgg(2xQmGItgxqXkZY+EAN=~n=a{K5o?BSeXVSv&YAeJeZ>@_Pk?*RUOsFT!- z%j@}bnyU7EuqEcs#b;k#U ze7>iyTfozZUni3V6S&b0Qv_v4;}T-QBfpk zrVXVssoIXnLutY9_Z`)?$qq@046enhS#&9nwv8sHin{!F-X zRx_nfMvTwz&ua49jMG<}ni@59M?d0f$IwRF9xhJnl$_2{*d@E26*g!iA?lhxZ+5^s`=>mbo+5yw{ zOQ(f8Kc2tMcz(@o^N$>3=ABzs_dlb^`{I&j{6yI#e&)9Ul~5%)xAgKtHM-mP&1A-i zpFc_#%5=G$bQa<{pNCsrtNHlIS9oqJ`CrWlh${myO77I5o+SosvvL_5ej5<}+wB+` zZcT;ni5k_cmQvLDu4)X~S<-57X9y$P+yhtudC^Q?V_&e-Zzw9-SQ;)s70W1#BDjgj zas5ZP1`yKD59}&BVx>`OOhY*Nh`ODK1~>BnV@((!0`uo8{?kIJx!(E2Q2GF^+qkYrxANlq<2)d%bkIg|Whf8g_s7mfP zE|nQV&gdVtIAW$O7j|5Zqtc3CU3}V;gwN?~xry8DK`Or5RHW zE;*Wv>f8_o_t%YRov%LC5nuqR%nteuxLvqxe51*M_7Ij zQDI&kd2f7`ninx0JaxYiz+#3Ezs18Y!>FPG$&&P3^)hu3MSu?W1Zo3s5%P?I{9ncr zOcbh@4$CvGs5qnIJiV9lvTCI<>Z9O+Fm8XKIkG5!`~bkNo2h8F0`v4Vu7iLB3Tn!6 zPG^p;-Jy9DR8YT6-%~cby5P|V%X?ofuiJihk^)CV#rLl;rZ+K^7X){ zYAjfbk6OFK$154zcZ;C~j8gU)i)@C|bI7Cy#gR$=*54k!EfJEgwhLPHhGSYHelh(b z^1R9;7mqG7Pd}SMStyf~zF0kJc>9#AKb>%zmQ__zR{4jufvMWydwlc3-r(~mG-6_{ z$Y>f749>uT-`f+$zKn^8aJ7Oi;GLd%e7)?j&G7wL_O!Q}Q(TRA4F9=3-qR!ew$G+5#Q}qA zwxg4K=6cZMN5IsgC-%45?X3LU)mvtNj%?M>&DT$PcTqlA(L!N}=;IuxnO!?F)eDZS zfh$FQ3?ns7_djPuj}2>VbULS99?jNzW!6Qn zGYETDS76iYX5etBUOO}b0}xvb**KQiJop0An(|GFQ^d}LjB&T#>d#FQ*EFBh-;3-b z6*{a45QvbA#K}c{Fy+0ZJu!P7-uZyJpEyyBeJ3;D%mjM+U3N6PqkIj3rh)>-86Vs& z((1TZu8*b{CJ$U%M$u5}h{3T~qJQ!Ts14h&v|b9y_%dp@<@#`0Tj=046Q~0oik0iawF6@Z=AhzBRb(9|Cq7Q-4`73iw4&v zhOTuA?A=R50j5$vC0aXuAV3hqgE|ze>9Mz|kIf)jVO03Zh?&NhPA4M^#_NLnIU;GB znIAGt%}6eYcr8C&c&%lb0xKsIs;at7`Z-Dy5*q$q^9W;MoN=>gwvlqRI)^skUq9jXZ9lGRS3}#LbHIt+3?(gmsX&({`eS zdP!GlVXSz$IDa&Qm+B>ZdAKii^6=Q$%qXsdnRU!^NM42haiuMcTc#x7W$DDVwi#fX zM)9@K7u%=#RvP5jOExv6+X$VSyL^EQtCLRJoy$@Tv9d2Z(!~eCNO5?@Udlia*rw+> z120qL{oFI8(gGBOB@cAGrhZZIOLS&C;aqnFmD0*ykP0AlAW4vExS3U{Pp*C4Y|s7S ze4dbkRCX_xFV$vgkz5rtK!frr?^8YBKn}87vdnQws^WHgKvsk-Nfec4_a}T=toj|S z*&hfIO8fBB_%~8@Q8L-NA}2Tkwk9WjW-f1m04Xs}FYr#XBUXDx^$f9x>!Bmcx+;K@ z(d=r-Chnq3P6JJ_6wl4a!)4_hfa>!!d6i+?{Z{@mp7S;5_4V-;IZ4#S?}49F+Puu6 zp}W@7E^6r?9YjEK8&pEJ_If#m$%6A(Z6G&JUp}K>$5tyBR)cb^^KtR^cckaNr!lC( zit=}WE#CDDBu7y9=U===2IkU8cB23s>RiECk-MBgCBp6}lCRnEzlnqn2^C$`$gbOrcB(Fz?88L!Ug+1HLi3eT*K z9uNgVe+)AT|83oX9T1a4cTa%&Q=Fh*a$E~Ky(h^b#%7brY$a0N0vTp#t0h;?ol->cGEMrM2=H~$ za@4HbtFcizkCdA_o@n%>{f%p)dzKJirH+I5vC-M$pSPwRu6@FiB30nHvXkU3T|C8| zJYG3hMbO@*v#(+nEpZdC10+_f3!w0z3*_9Ap#p)^T&`$-(yp zv5y|Ok>7lZ9{+vQf#&{M4zY}n#BQHQ4xG)l#WFI@X&M=sd@~VUQ4aXNvGy`~hqX$0 z&`fw{7S2b_&PVzqgZhwZ%S|MM*@9VN`C3j>pUXNfCA=4d5{8^#&9-&%RW)Nm2_8}m zT@s8!0h4g%wIh5u84qrE8vH7 z6wrYkz*}~cUO}wI%%%m=3-Fx_skqPsN46m9wTzfQL7&A4elNis%xoqn5(n{iz=T0( zrc2OT`S^TvyFZ*d-_4H`t)AXPlS|BjXM(-2t*s%{W=N9Jo@wYy;h7{`%C>`PHNzar zpB<(r^lf!}E_PDz34N0zAwZ-sn>p7LeY0%r@EQ4&nAv&qElY*anbAruy~EtQyWt&n z(~>}LW4p0P`ID+r zl@g@Wtgj2Tfei#*;b*beFiZRR*dv14Cf(|VQBTTmoW|p3xx1;JEo)@22YnQ^&B`im z`LKN0k(b37{+_t?vfHHnCamy%s9rwR#m=*RD~3{ID*gUer<-o~5{(lgAP3?ztz4MW z97dd*;*565KA6`Rh-OmFZYT#nWSr0u|*>@M}@ygw1+hpqX5>8?^cY8l=E@Pi-u33sirYH@0LU{kfz(*_`Ks8 z*jGuRG^|KL`E1N1u9CkSM7xptC8s^)){)>qalY8RiauiwJDJSv)K4Tw#wyeTHVK25 zlnMl(lpw-l;;2|hR>n5O%kEt(1E9D8fLwKPA&kd zMVi`f8-R7|hW+(ydll7sR?8Eg21Sd(A6g*8dSv}XTROl>LYbDnUr%$Wsd^xy`4YnE zam7ayiK*0GeTw9Qr^FJmnQ875UZzyL8q~f`Sl4vZ!|D6+83HhtL+Z}Y8~}8Cnh}3S zTGRF^;ze-yam>c9?ZhZ9T_##p`U=ITFn(>lAJ%*|xDU9{G{GG{VCMF>m^puZKJ_5= z5{J%SfqSIcD8}g$G1bR^$)HeBeW09V8f+9zMG{wV+gJKiebk@0+u6VceJG_<%upV-SL_ zj~W-Wo9J=An#~Q5xftBu%kBwpx4J2)Y`F{}vN;U#mcrb%fcX~NcB3bWOUe8F*tLpy zzwYYxSHRt+UMp;RRgSCpOZ4K!5oh!|Yttc)j?9D}R<163oEf?gXlHN{zRx8i=2#Q9 zIBKNy&(#zjk_xt~>zJ`tUYw4D&Mwy&jJ9r>U8d*wWg_dZmHZP;+1KD{=}>)H>?6HO z;i@P${^osVRAm^LR&r*==eSxvA4v&gUxBBDT0H%M?m=m~NU5EzP54d7+I-hdSGY6e zE!%eA6`^vnNOXXVG#*Q+{?f>SowtMbO?_>9aJ9QYJKaM#VHsAOb~!?GY7stqlH+dD z_gVb&1@jNw=Zp0ssvfg+HyzZ2T z6FQTTVdZw|#BsQ?Hr^>|cuUJCq4}bMJ|)(^e(Kxq3-|PE!t4NizroA)je}CdP%rP$DbCKjb?G?37ca zXxOa%18@)HEG4w?LNas6u3b~E5Okyh7wAJ7(hf&GRVK-vi6QO*Q;m~_n`lkB8&f0q zguB?d^h77L=#siQ+&j#|!TumhNXZPZ#IN6STOr$p>|*Pu=<7e`x*)qGq~h`pi_nen z!mTNl2`?+{4h>rlsHaDl6L8>WLh77*8T^n!=^E1mjm-xB#u4T4NejMdVs@lyOcTC@& z>6ZUxp|hTbQ>4vCG3ifds|KGsk`eLn?~`5h&2p+fLU{I)q6FE}%FR7)bU4DBK z;8~wpbXibLwC;H4xru^LQe(P)Moib3?U}|1hpX`p`UL9nX_%(uRvxi@eLqcx?!Cw2w71 z*-(!FP7$EMiJc%|`?>!0T-TOn#UTy=wKH}2xf|90fpli`^Vr!pHNF0&y zXWuY#)<9l+G8sqXn=C!Fbg8EW9Ft>=HzP&1s`OY)G-BPwWFdN=%K{iKt88+50;k=d z_)+dV=lb@&%pKJkOXW}<7_4t_wID%xqX;fs3(v{7*|)zm&0{LJY7$Q*E&htK#T9N1 z>577QTiXt@^?9lQnx7ujJx(5T`%l}x+g5oJY`KWlGraS9AC7_R;51HwE)qxutgMil z5Zm*H2)WA};mAg^{%Es%``s--ZPw;51$A(h$+v?r=R`AqBWGhJ7Ww7^k@dtAPcvpbM~%bOCEt?|;2lG1B<3}!-ZQQ0#atZg`^gN^cg;g7`i7VI z*I!poBQEF=?&WGVn?oq}UAO;@p?8nsr+c=IyR?8H@G;k!$j$CDWN%jMZ2P^FuSdo& zcFm0qO!Y?LHNLI`5~>2IT^!BH1&zuz3LBvJ>pjj%-61xiF`o7D=g9j?^~4vp-dsD% zuSLYf%+y-;;~kXYN6jM%(+og`e~~9P{q_dUdz8sH-l#bo1yg?=)*?YmkO^~tl7ab(PvTxf)e<3 z5A-WfpQuU~(`2zwk!=cXB+MJe{T)&q+vfnAtFc`&a;xEf0DiP#v{N7eA`W8UHf_RX z0B2tYn0w=un;^_ZFjF3tK$>u!Z|iBR-KLPv@FA zwg^hK(b^n>0oKi;Mlf11mR+9+Ce~P~E|=Fc;qvOaQdi1dcBrwRR_QYD=&lRS7W3v; z4^=$5W7?pu6y(!m8`z%0&+J8mDgTV!+{JnQMgGV~ET8BOvS!NYIzBa23bnD*0vA}m zxHS6DNZ<*0GLkAtt$|~+=nPOp&e-1ZUky+CjmR+-UeKio0MoWGpgiPId_3{HqH7E5 z7el5lth@f|c{aFrf8}%WLg1f#5FlGDbqZa734C1`&k74y_uyWzZTIk6Rx^qAd>ehZ zdoy>8&O!%fhPiMcXCHxE8?`B4p&D*t0pU2BSNCf1(of0Q}n;6A?-Jk3yT*{PR#FcFXC`GT7yB7o|&H| zCbI>UfbSp$n<6j;tKkxCJj%?@Qr;UO5A@2;i1U2FFC1p@$Zza4vNA;5Hx5$`k07?t zb0D-K1ch9CdTUUk=#$IS4?I(r zR7s4$`B{3rCxqvYm=S{a#DYo7*L~G?Dh4v{I`Z03;))y2Mn;~{jUBjv3(Ti@mYnIn zU=S|AoSg9?E1Pd{wNl<6?64=Qsv?jr^V-07L>~BN{aQlw1-ldKl2W4EKTVQlYT5rS za$x`vt6K2Lwx)>kZ-3m@vSaF>7Wf){VEdS9S01qV(#vsq!VrYV^=iuMrQ|Mc5?05^ z!dO~(es#84TfBX?34yE}c=y2Q$Cn)$Cvv+mZ05Lo_d2AzC zS%+cI`!Wiw0<>V^+r=1#vmfKnr%02iE6no|^!UAMwcdr6^x|J9hF7|0jvr~KlCY=u z59@lhGo?-~kLelpz}(`akE8Tg?I{>b?@>tNVcz!@I~0qS|BnTr{ptF{E3U;2Vd&=_>B2z>1`AuxmAV4~yKpVb1*!*L)@fzNIz z-hGi!iM~r@C^iT0R5Pj=N3d~!G1Fx@B)qx*k^Qv;+6X35`lJQ7JS9i(o@<6I0$^$0 zw$1vWqt*iR9b0?}C*rW6`aPNW+tzKzbLH0qavJUN9dZI3i1Fl-6>%D<-DGe4_S;kG zQ@|xj>T@tOBV}=f+)OfyNO76hr`c-(8#DO!91Tf7}_o!&N1s(OyAQ zDc<4*CM;Yd6covw{LDq^9fo@|JK^9#4iPL5;e@_~9u-&q#A)E*na=<*%7PdsL{k6zVpmGkqw+N~(MxG%)`WSK@L zU(^L)G3l|}E*fVpozoUY0;54uJh?41x2kRi2U|!tQr^)A{6ct$VGUWBD}}_3BT{_} zmFd84wbEKge>pBqz;L0h`NOlPy1Mg*P$!Ybb$_Zigoph_5ZA=A3FAz2<{BvQ`+hsZ zX7HysgMZIktBhEWMUcAZ{-jWK3YCAoUAfHVMpG&PTQ18oeEn^?pY~lqV2v^nNL1 z7=IXO{dF&G9^89!Xu-Fsx`2XoH&9l?ELIDM-7e<>mV zwP;?3IP|F9;om)k3cpgp{t;R@{c@%P8qf-UAgOll-6Zh}Uu?CTx-zqFqWbddD;TUo3#*H_v+_Zh0(KhWB^qGaYQg#YFjNDQB zbJwV?woR@v5NQYt;;8%&!O4aF#(zbPG?4*^fA$gC4y_*pne%Vv{TTzppbFOHnPQ}d z6GcQ|;u7YTZc)!5b#?~+qw#!h8!nBKKZZH z)2)3SNWI71Y}#od*S{a2oiN^BfS#+^tm1(DUU*K`w_79icZ-+6Bg7Qf+LQ3_HT1nu zqdq?@wa=Ql9-k|4XZ9@ng-)c-Wjvk{kl-Jo*%bYBJ_AT!aU>%EdY1YfA9p-Use7uH z>-v5$(l02@JB7v#mI7vfzBA*#|LE?x3}ed=d5mEQoVzxM^HfxRgBwo;e$v~%#DK3_ zf>0URuJLzBad6YE^=USRX*6{dDP#6In61sVXGo%dLL33g)jfk3O8phdCz-}~S8$b$ zRZbu~%R;mK7PuuUI^o7^XrOar7H5fuDTie^*uB~E&wC<^KXTU41*7Nro0$b%Eh-pY zA@udeHEXTfHrr(;r!r`|O4H0?WOiu^g z*1CJ~+f&gvdlruKr>Y21Oy)7{&Y-Er<^#K-zAje%kj=)fDQ%%5K2I~$@7h_ePS=Fd zl$zn#j5lc;NE34RP$%O>m5govnd0F2pBWy<5Z)xnBwD~h% zRvH>Q`z_xF_I(`%ano z_a<%|Tb)fe;&kTJ*?K?zT$Vj`zsG*GKV5uG`$dX21YkBoJ=WLN;Ql4LWTRF4^aQUUe- z?|hcYcpt>xcjYtt3=t`m5Hm5?1sm_)lw~)&P3ZS*9+B6(n#ub@AwUcBm|QwLu5ETj zTAo6>vFNDViu2mqrUc0z9p{_{a1N2JHejQ@M&EnG`(@(-Q*hk%i_T>N?8%a^5$QwT zHzRiDV;EnS!q;noYpu~|%HyPGj-u$&0h#EOl9}gYHKFfmFTRPerap1hvM&*(Rw?ro za>8J^0Plt@V>N}|P3DXW&J{bZTcJD=kLS$SwYlJL6pM$;EauZgQxe&nU{;UR9$8kyG|QV9 zPt-Kl2J1Pln(0 ziNFx%QRb}FH2B(cxXVvrgymX2;tj~T3v~R;qY7mC)GE}G@h&Iogk zzA4HdoDfzloi_o0z`Y}-;?O|$K{u`y<+I#BA)vptDwy+~(6wsw<8YsvqIk+4+QQ91 z2onm>G`-)|klQ}0Cjje410$oBecw<7{8gPFA|up+wB4J{A2Y!m#s%V(OL zj#IS$9e~*5%>@~#E+H&G5;LtLq9Dm6CQ-MA)pi*Dv`Thv6&jOIM8jG zi&0?Akq!a@BH4jlQ!~7txp?RG38yB+AZRY=JOOb4SO^zi56u;uk@<%JW1>#b&&R>X z8x-1A8=?h6r`(7J98)eeTo^||{-Q6iPjm+5*-1K!CrXskMc&Jox7ZX{SrILPJgb5u ztp}28*_WB`6IK32^Eeaj^@^2rbT5ajqO$2w2-CBT763w}M{|$t%L|dd+UGqV#X}}- zMhbcwsk|ROGw2z?+O+it%%adzoe?klrv!-DPK~!yD=96qo#Z?OfEf5WfbR}e9QAA6 z(>guZRZJ1pwjd|QmhU6iczHg+SB*qLztRe3QGb?^?bLn@qw&Rax&y3vc$KmCUQs1|D3RE zrfa)xIoX`C7&%@RRpT!*L?+$`kn9-Cghy6m zko+_mO+dPdpF21u03wK&L1@?5hOSY0xnXXh)=!7uJU^$tf9*%7D0;Utzvjls?w{&I zFL;vOs&QNr%+PEUIS{CLMT}&&~%FHTGg}B$wX?h_zjn zH$RR9*nhiJ2R8~+((HAQvisTBNw&S}U(R)yMrK&fR@1Ow2oY(<&H7ps`&w5v?9c*x zpq_ML9Vc9kh6bao*Qy|ctTQ%U9sGNu*yuZky=Iagi(^fIl>9aML^Ijx7D>{U$DXLX z$9N;C?L3UGQDcIWzj)cF|Hseszq;uo-FDCF6Oh)I`16n}0{kJzqU&5Z{Pf`JxAZ^! zL~g2M1MYm&E_ht?y0rPW;j{PnMvV*u!5)!FC5b9fWsz!%hOpu6AT$r_S6wHCDSABA zVM1aJ`MJYc09v%QX!4G@{g;XsY{x=M$KV3%o}j&l;Yp;SzD03W32Ewcs}f5$xhf{5 z!V{x7LCoX|h`fef_^H)p9IxVv<_A;mGy~l+{e)zhc z)IrBB=#8RrYvytGaDLk6RIv9DkH#nv9Ve@|#_5rOL<`ZO(lPV>n_OXFMN|A4F(~8~ z%TsTsI?*W+_jBd>;jhF!%ILf7%QpoTJyIuHIz*MPYQg=rFis^?7b`n%IVL7HgQsRoSxQM5mI*>^TuTNDc$aGEY{j9z z6R_}Q_*>=lJ2s@ONqC~b`xN`}n{UNa<>vCh{wj!|CrM|VA@ zEx7Ol%)pC*8HZHpAWj_af~rvT+}_iscEl{0hZ{l-3`w* zHZs(8==M*;@z4wZzS<*<(Kr$oy_}YoG=icu7A*dLKx{n9_~J=+cYhHum3KW#b}B|d zO#09$DvUvso`KH^x#!^*MM8veAog5=GDBNGaicd zm9`8NWy=_mau9|Eh=_y_Jhi=2bDEc;sNNHwjC)bDfqH+O$FM;1m^(x=oN9c!THJzh z{Y2JnZ1W}ZbMYvy{y>?RrMw)vRwzvc{=0R#kv5lgA1hULD^lj8e!19R{CiNN9t?85 zR%5Tm1!H4pHPq>I5-{#v2sneGRNIcYBC;*ua>0Ja+ewVCS!h zjoWT0MnY9H1a0p4GQZ9wHVKSBhK6H9=#Kfp(U8x+qP~ zO(b&6_zM;M)@waS2PA+`an}D&+?<9A@c&xpvirwGvt5sFrN;4VdKEm@zqqvTMz9*V zs^uw36$RxE?q3tcvQ5Zy(KGtpE;60Ko_yn|{ySipbG_mpeVCl50+cjHA>_?kQ_^tW zjz5Xp(%4d&xfCtfUCAbCc5gQE&LR&xJYG^jPAfIoTJtFjn6-@ac6|cUxzY!LNYzgI zV+(~*int1B zcigP|lI84?&z)vDPM-Z*kwG$8`YJwpvUm2Pal)<89O4=04~V+fCa1LM;FG!$6B8!^ z!VYcShFSn@>}+8W6@E_Jn1^|t$Yp8+@0aUrCxI6_;rK zT!e6$QiwFG3;SGu4&&)K*oHp~KRz;Wuuae83d=J2UH69+OvQ-=79RZ&upH6zQa(xgs>9m0AV&k$eOrI8hqQBnKsK)Vu(`VS zy>IErT%f+Oa;eHNG{cj;%>{|$JkDsSN>Yp9us`X&!6)VJiSamjmAU^Q`=i6Qptlu; zd>q`8EV$3%Qw{#DV}9>siL8u0<3TeeorWjyeV-xRpo`}8BwFyeFqTXR%6`kv*ZU33 zx0|^u8+mV}gz~miBT}@;=lc)g!w=9`-O>7eR<}9H3!32e?!`B&#RGE{5Djo`v5KG{ zh#*w745!VT4=S!|mcrU+71tl2w{E2nvD(|aMa9I>>?8_pIgN?zExrUH9uVxSA z7sK9?MEAFWW;uMB-yQj<)_j@Cv^LZc|J`xV^H`>{PHNCs`pkk;Qo zNWSqttiAPDBNcx*YOcmh7Es);MAVN2?syn^6{&70vVFDz{UX+~L{KvM?*@*x~Co_93uvX$+Z<}5x;u+x97l>f)&%7UQ&U_O~3Yr_718ocq8 zlPCcC%C~qwVyo$!4mPtT6h%cVH--(Q1h&C;xy~38i+C>Fu7B@At}q-qo>f8l#VafI z$DVf7FP20}cuMz5S!*UVEpbu){>~h`;Yi7TQ6TY?;8@DH@ zqi&Bzv?v>2?V(g!FvJKay7MdTRthEb*LTGcxNiA=SulqcT-y)(;?GjZJ4&kDcf4ocuYQGOC^aeU-fd^Q5`Y`duIo7}659pbI$n=c29mwhEDV! zcMZTUayX(Z9*CmH8Qk(BkMZ)EWL(R-%gnYDYw!zD4K78~-VAZY(7joxyb0%wNj69x zVvVmFb@A|11s-O5mBn*qNHAQSxZ!ML`GMCX?LyVd^btj?k$Bx1_Gi&IiqU&@|4i($ z*Kjkct{&z&@W0H&1&w&q z9Yo$u#i4)j_5>yRDgj=eEwb_HwqWDN+_cuLL=;E{sExfy_+PQzcliLss5f5n+_P=q z*rz!=Wh6OKY))non5P`=c$;OO9Edu{=2s0nEEW*0O86w9)z2R|lb0cGp9sa%Cpdui zIAi~E(oY7wNqNQ|KL6I%oH^W$*uo>&N8Qt+BZot4;H{T0Z~yNTW?qY~r61 z8X4K{Z3(CQ3*IM4zG}fRbloMS0r_~!Stk1II~_j*(RT-fIn>^R@!*{1Dw*W!pE%H( z3mo@M#k`?`#!lxsF> z>ktt;)EHRyy#MNlQOvmTeegPIS?(}LIlsS$N8l5rAqL@){G^Wh2HCs^i06vgg;f|FZcP+$twqM!@dfUk5<9A zoPrpLBt}iqYKl7zrWzR4joF!ebkp^V&4ffi6CDX3tmU;*&6kJ%lS`caak~$TVevtj z6WgG(PFs>#omiJKKiA)MS~`B?eQVJ3C5VL;d8rS@dY%th(%i+Kt)rbHzT|7Lkg{W+ zMr$#ara)91p@%2#gQU&JBnN%2UDI5)&ZSudIM+`qhfMBTOBShcr&>sw{mT~h{-878 zX5DKCXJ8cqL$Vvx;EENiz&{=#UB2IC;Z_)y@hGhh@Lo)$>e^xeEFyQ5@=^F8t;=^g zkjgvkz*WA1Pfve7TDPo8u-bM`uv`;&-~=&>^(7;RVtn_?<%4;LdX!hz7aLCE^hm#G z+zVP{h@IehxE2QQRN_Dhoe7u2m6(|k*>Uo-?))YaEbe_J%iq!DG%v1fOfIrMd~FSP zrd~Oa9IWGiUuV%pG8bMvSn@#fz^}T4hJW5jV@p{ZD&kl(Es+U6_@kkAT>amh+h>Gz zWdEq1fdorB9_G&j`w7QFeS=9=p9}ebH4BeM(7Zwpi!LeiPcbah8HYYKt2^{JHFYG{ z?&jTmFe@tKCl=oSnjJQh%#1PBrl!@{K1VvcrE}ou32T=S-F6_WSQT4`;&Z3mb?-Uu zyn6wAC#tAg$+k;9F&z4SZe>eCFoZq*+R>5nz7Ny>m;ZFq>L?S0B3qELazY1uIV-qGQKb>jl2AU$0i!c!rUBqm%Z1I3_Tz%=J%kh~+_U-s_DCZZF%+I&p4V+KwZ zhPf@z9H+nhWuiNbi4Xp^&Ze^oP{@ZJlLW$>N*)BC_-15GL(WIBY7Pn?Ue@FX*1vhxQ@;wA4*=7SBdE5#Ac zVxgjnr#FboYJ8-P**Shi+SbPHZKae^hvoA>FrSaVVYLJO%@T@EHLDVl*Gh`leNG3v zKAB;Ahf*Hv<&o>l2N!H8du^CQyyJN0Y*Fs>4Ws>@mQ87%V%-6G{|jnJ9UN;Iwg5$d z%b>^!lMXxJX)p&nz^cbQIshF!VuTJ}v(z!2;M0Cq($o3@}asjLV zYwGlQ!OzLyWXw*RZ)5Qto^WNWWO~c6<`j1x`U2YQj_n6U)Wu^esN45`!95ft@x@bq zwVJT`6QkgPa`caYvaA?Hf%Px{LWOOsAFEYt2}b{RFBSKsGBv!va<90S=zC~pA*|WQ zMQ{1v`uHryl(6d0`xeywc&!B9T(w_7r}_;?ag=n&rq~?f_-ZN-{XeMoy5;!g%WKIDDK>ei^Ro8QzN`cXjEFev-_;9I;fT3 z+To}OWlMUgK^#Kxf}X#=PINzfgjM$CS~{ck?@?iJC>ULJ>|ra21QK=~p57VTK5dQ) zun;kxF;MK{D9zh=M9|UB5nii8EIk~-V5oP---y8Y|?iu}_|?n%M& zN&AfspPP(Cf>^4@%xlapdS!?kI@VmH*v;$evW=w=GWmuh5IpP2Ho!u0rD@>v?ZqCF zPjwAyuH-pG*TdX;KjK#Gp&n~$eh9v0qyR@*3&L^NG*z)~32(>z)BvXs0ibH@7gyM! zC%LjpL?($?7ktyb9RJYi!cKkb=LrFKMJ;b{Cm^NR98)BACPhftWXo zi)ZRkY?X|hb7raEELcQ=fo~u8bWFA$RF6ysf&)K0hEsR}3sJF;Dre;ULJ;0N(;>S? z2UgCQsg(QCrO%d24;XC;hd$J>RpshT+H!Uy$i%9uLyYgjtCiRK(}HR-OKOG(2NlaR z9%P0m$G0js`>V!OizzC*gyinj$Cv^a5Oq2KK~^7>Kv;_!zN~{!GaFb%2;suLin-vz z?;Lg!+5<0Vb_MbBd{a=Hv_eqLMoyvJ%R94_%Z_=GxjY`a&p}bQT{F8{&ZCcxgH#I) zTlOGMPmAb1Yy6T&Ln3w2X^@YPV%5Y0-+R{{`4*~P-OV?}B9Yw}O2o5H|0FQyBJ4c` zw0Y0dR0oqnV6Sk;yzHDwkFS{)^pe1`KJRPRH zfvxAw^m-g%oz}_nl7)XNZZ|7oRz?w+3R8{Heem~K_uieC{D-n{>SRbjA_7eiWD!A8 z#E6!^w3`xJKZ()bW%27DL?|CHpdk)s1AkQ#C7~;UHp@ih`;1Eo!8>P9l+x^7gh$!# z{j3=sFKnsH_S<#$kA`fBBVE$k;n$55!McB2>DC}epa72c{4Xe7jT9k}3RFLmg+@_& zqnE-8aUHGukyYArY87e_B|9UFEJ}b-Gz=BPBPobWo|FGFXSVu z?FNV7-J!IDn?^+vK-K4%$fhVN-KsMuTXatc{5sih4_mvrR!LwQ4}a4&&9Fbr#y=W| zcZKMobcy(-Ls*)6_l;7AZQ(c+Jc5p%5hVSBg~;@h1TtC*h9=t;KiLrPRZReQ0YrY2 zJbeLK!TSUcyvP`w`XRM=I!E2Bm`aj-7e+iPW?8!Y>|Nr6zEKZNu7*UuZ%D#m>3vy# z#|A-Zq^-uxsNX=wXx^eX`SOZkx##yhpUaE6jpb6U#~DRd_;kqK#y}i3ZSa>9vZC04 z<~xBG%@k5i6>j~&LNU)DRjnXHRtZM9xz~QE1Gm2L z6SL}5t*!=Gd%!~B@A;;ms){_fuUK@nH*ggCp<7Ja0 zNpgweSx%b=zrh#(F5%z($cZ@(AOEi1%$3f!| zP6fHF-8NKDA(cY5flC(+%9-)UTggIqc&V#=Ppa(if@r~mOMQC17 z^_gzmAN6eFo17sCQZ{rS#|KJ8K?{9&fEx&K0x6EA@da(Evgfwk=0E_UQ9wfO?9;X$ zh}Xl)C*^`bXG?I%e+-{7vS|&d#YaFxLMqBA|1zk{P3b=-RMr$2tMeAU&xyXYPLm3CPdiK80r61c zl@&G6DAl!|R=J6)rNM)TeMlnm0eDQdx#=?92865nol%B+}?10(#X(G)4DIqR#tcY6z`CMxXmztkdYY( zQ#t5fCR!p%zko~iWW5w9elz>xmK7`bE439%P>QJ@4mgOtBqi1;&Zd3ofludW2&S%k=c&}a)We^`1>Qxh2QIoJ&Zb2e#-hj>h!xPqv(CW@CeWVxf3QJ^6R{AY*jjgQoK?gIIufiGpAuc}x8ORH#LkMk>#E#k zHpyD9l3=LO^N)mpPrBX?L{V@0K@jLwa-!;SX-Pb)iYAIJ)9 z!*EKW-q(*rd^*o?2%jlmv0htfFUazi2d;vQdYB95=Woo~4t9~~rmmS~9$$rT&C#fZ zu@|2)*Ka6@&?*Z}%qB}veTSQyn^&erHCz;r?q2rOeDRoB!0!4h@8kF1nklB%`7>5% z`u+Wjj6AKp#zPBy6zFrDE{BmpaNhkv_AInfT7NDfEXs?LWf*VVNjn|!U?%#o0{iU4i%9*$F73*cmF|)p7IyNpI8BW^H zk+j4f+G@o8ZZ9q_hkFd|xcmnx0;JnNTIcot!^mmCi=en^m?#TEqb-5as7LUD#WQo_ zS($PCDqwc~nt`+h{72Uk>WxGaWdDk_VFJxB_7WK`ubuw-bK6buP|FK!6-|4w`feXe z%BB=Dr{$%Mmy@uCnW>M=1TBh%kS2|S-sHX1$>!AgOz2jP>=KEcDRT5TvHeR@|(!j(SggksQ+YNxvg5APcQGgTdx%>%qLg~ct`xAdk;h* zT29~+xyO6XK2xQ>jX8Ld_JcRI1=n2^-7n#}RvcP5qm^x8(SHyDW16Kh1G2rMG=&JM zV1)qd_5!=QmpSf2Jzp`5Rse`9Fi*FSEgsL|LA2Y{lcB2GzK%&2PbaQ|=w*Kdmadk* zVRS<+0~mRR{e_Z-9kv@*?J`;9=?awudLdCkbQUbg^&m&9dSfEN+H1R+UH+q@l|nZ< zfC62VECG~f&31?>Qfy}lb+Tg#cF~q_wGdNH(jeSd49KOB{%-`k`fx+<*GNil&cGdW zzJUT#YQ(a`q%bRatuo&I&qo|!#d1<>;EY!1UJ*q$LQdIR+xYL^9AP4fhqZ71-8_6q z2YaaXH6)58dbSPf4SzjGH=*)V+LI9#LT&@mqbr2 z^XCA&+W6bQSBZy&RalZgH}Uc-OX5F|j=#RDt<1}I1F1P31xqW3v+SRmmoGAr68~Zy z7*1i}6*CEevoP7Omfqtu;G&o$%8qESc2W&x;MSIgx=)FW>Hs_`mQ^S|$acuk;>5NQ z`E7X{-B=xG{SE%QWM6s&PB|zD6vR9a2GIi3#J@!?*X(Spb&#LP3bYmIGkN{3vJbh! z`d$__GIW}t3i;-A?giXCoaF{u)o|%dxn`1K68dq6@P$4jXdg7|>j+z@w@yd+UPhh! zn+?-I?|btNXrzCjDCmFD@Nsi!yva6 z^}M)0!=8gzfW}jv=NOcWsfh5g7b%^xOiDb&2mLC*F8+22H1b5fTYHo!{LDac ziB7k`jv#e;o^e*}1ITQ=;vVn2Q9Am768l&s!`;9(K$=*gbBO}3Ey>)F02^$T7(KeGnW zm~IdG82MKm>{$8&Da;9>p>+ZLR|8LdQ7XJev zX@D-8h!ar~C3vD{+VW#g;7-M`tKT>|5se#dGvtUC9$- z&7^>(qK~pv^VLHGTt?1*vq*jiF)=9uga(6H~~$5?!i_4a`>zxQOsKaJ&X0Tyku@MfeE{aHP=85hnW`!b3Rnz z%xpC?F1{Uv)=fU7CW*>5dP$& zB1>6mPhb)*Nb(Eu+g7jYt06C%i9_LRT|Tq~?{|80RWlwVGd-i+muXXyiJqgGAx{DH?%I0Z6m{V#Bt5#UHs97G~=z94auq8W8&}eIz3P2h2-ar z(QH+^0cKdfw%qB{WrGJh^=#00kMQ65biUz(ysW7~ygk^ra!Oi84y~5|_=xPOb@|Y% zs(eKl4>K7Vg$#=l4NVw%++sMEIJKcp>X{_q+6O|$ZFQ%h$*$fd+B3&^Du&9olk z)TJPC>BmAp5|O*8RcI(6M>TcS^lwW*3SBf$HO z@5NU;@DC1vC7A(w0ax&onPkMFt6Z~Gol5ZY=l;KK?jJUaH40|zk8Q(G+cA6ur4&YP z5N^W4B{rt2mg;18HX1+3glZ&0)&|0BDiEG{`b4eR{uR;ASbCV5v`Fvo{v}m>O2`Ad zYeQTgXU~Sjp!^JI?7Hjy7qX<(jJ-9*hs;m*7hT#!0XQ1EQbos$wtWdc-79pNU}aiD z&63#(ztt4uXZGOffpyu=z6k)7`LDx-4Ad!KEIV5o(z0+Q!B+Ukk^L4lN}=SEGK>KO z0ESiESLn|a>M~Wo&0IXt&ZIc>K49H+*;&C-fJIC*I8P?EV8TrXUU%}idOWg=>w$UR zX3VZbfB=dDUpT!u+N*_F=Hj)Cv`aF|VFfd5WKsLL$9NlZ6b8X%Gk6eLvf!JcQtnD$MNV zwL&!PxUDakYu^3o#1#Df>Aa$*JK8LWNQyC{u}jjNuV!utwgj#g6k9xYR=9RJ-_K1j z?A0FWecNd`#Ok;YT%$$0`beB&;}kt(&?c~Q;*1jc;{Npg#fs;*2x8mLek-3R544-p zGm3r+C{cJvqZV3)1*zt2%(F?cb90$TSoX+I9Wc_2;oi3C)kE53-sO4UvDzP+47&gI z4Uy}{W*~;qN`Av7kka<))~5>krO_~TyZ36x!O77R+rK{q8RSvCInQxk;`-5Gywtnt z+tA&vX+h;X$y%4kzDJvbvI$J(9tx2Qw@LK!zT~CDApM?7B-ZhNNpJLfqIeD8iMmd&4#9h;z4@Q*p#k*VEhAgEC(Y$HZ#b`qBm)}vJGB-D#iUr=K-H?$laXaV zUpnVQl4!m;rm5O{Is${*x7Oxi_X&KNGoOgzDk8xI)j3e2R_C@IP@%n9s9w|@h$i~a z3YcD~W9=(_z>{v0HKr$*s}slR^+$taq3ZE+A_~c_Gt6-A6u{cL)cXI{RgVZ1g6sWi zTl^}y=yX{w=*xLM_BL^g>eT*BcMcF6P>O4VN}lJxIM1OG<0tB53X}hH0)Lp1_HlT{ z<>`3R8n(b#6f1zJm-gtFX+lNqu8$-MZ6e>AP@?g<$B|?jgSl7LbUN8lbYGh7WL8Uu zg5tfHhR|i=lg_Y8$fOL`*#ll6{mSe+0bMWpU!SP4F_?*cL8>3%qu0a zNt}tSN}%0hNsO4w5OE7gdZ*{RrHAsS@AIrNB!WL1>ojggz^_j=dyt}jL zLJs~N(FNXs#`a^25A9}IuI3qS9n^%#?5ntTnl}CTGcx3C8#*-@@$&Q!$RK`mqTlh> z=Y;dVC~|;{zo5d*7k8<;S(M*O+qg)s7{mzRhEO3MA~Bs-s(s)C47^3&jD-k1g-P#k zB-Djxn_H62JdMhY!k40nqoUwf)Fj6f>TjXEs|k-_;XlFno0BD(I_}WjV~&o1yskaj z@|7Azf2F6W3msj{+&UT&W-HO) zA7kMC1NvlLxs--$-n92~Pt)CkdG9X2@gn@&WSySZFKF&ei=R*-fb}cA0zKWI%g2BA zdABj6sSLGmwvqNZZDP$#>G!s^CfP-M`CQp*cCOGm z+W4WFt|euLvKzle&??L`ifq1m68J_C#@qclTJ~j1g?2ACxTGaYByU78*yYa9PYFwf z!O!zt*;Nm63SysDW2N)nGA_!w_*I8|cTZ;(DYDw+Y+9-GLsMxyD}x!7EOQJbj?-+A zkB0D`IaReJV6)3kJPn^v1spU%{h?bqqsodDbCGB1;fug3xh(PZx-wR&s0x5BLoiD- z0Edq8K-9S0DBeH;{5>f8&ei`R0!;S=m~RP<7fnYLK`2>fkkcr6JqwbZMl0=!1J96+ zZT=dbz(a-oFa&(!qR;yL@f6n0eZfmQM!g=r|K)}qXvtEm>5N3NE$eC39hFAk-u7ka ztE2^`_Uj^x6AHFeXG3Q=YMO$TdP}7+j0HeYVRT+WRZ*9E-ri+yDvPTpT0B+U6fOh# z!si%Fv&SY&7g#Zh1D4~R&AyrfDa`HQ{~Eo6&p@$pbC`?m`I&Rnc?uvAf|KNw83DAl zApen!@`u0fMA2V3LL^D=t_T@Y&lq(Akf&#k@>@Dga#PQ7<@iC>O_X+7lO#%)OG8G~ z4Tn(y(;yllaNc?k43(;g6t%)1?91<-f~QBnUYasUhMHg3wi2KMtVS-3V{Ty`TruTw zscHaFi&XI#vLrcW%wHG$1)LT2OzB32p(mU}L*Z_}RO*x%oJ>fYr?UVwrW>-$lN1bp z{-8DmkqtO`&D~D{s-Tr{XsQe=)>m5aV^k^GrrF(D{BTw2GA+1ovBnl&CHnVvvv8p5 zR%yQ3j;p?)_hGj2J|(>Xr_W6X&8T)K?SUIe zw;aAUv|;ou6Flh2ihRL^(`9lo^1QsIo_ZH4@Vn!K^*@y2v^X=S_X^(lj81JI_h%B; z$J2F zv<%i+%=E^bmVD?c#uoF?WZ9p%!)p9~K{+*fuPd4p=ajTW9!^8K;=0{~ErYV2$9G}= zB#-du`iq>31Vr^RU@)?2X?9=6al3U{@C6`I9(Yh z%vOmD`X<;gl5rP6d3Hx%UQsq)vGX*3d z*XU(PmtaI5Nw#YjJOJ(&1n#xT_M`tjxiTL9XFYG0MOe7rn|53A0~dRH$x6Qqghu=q z3qZjJVj;|{3+H^=+!tJU3|AUf(%<`KH1~??ta;L0N(5~Zcc3aRX7;++!fTdgC*+3k z<`__;!?)4&9~Bub{Z3)t>NgSY;8V(k4opRI8oTY8=Qkws^(1BZA>>S z{|-yEN!BH&1xH0U?moJj$Y{2D2`#gBsL{t2-=3|8asUpva z&<~a1-_j;r)s5+zgSd{lnd}$96)PpeKaeNf7k+DV*0N~Vo_;aGH}#H4neLrNsKiX3 zXi2v^+KyQ#T*K9cC7Qr)Sg^ zjN>2L?`}1c4vXFqz+$RGIMkGJc=u8sM^gmlf{k0;0;P)+RC{{?!exZp)CtF93x^kBK!b;b# zra6}KVhxcF`lgRRt{{&JGzNTsV3j+2*CLY5=OT`PjjUoJq`rv1;@BKgBn_#@V0s=8 z`=Rmm_d6)t>hv!v8DCe$j^$Y7yq&d{k1UY*Q@5sG4uRzru1;xG??qzUnemD~N}#KQ`LEzWkx3vJdnT%|{j<3`5$Lh_8#?dyw=$Co6vo{7dGfC0Q~)_N~| zb|l6UlmdLT378z(2K6|Tr2B=%xae3*gk&ZD+KQ|_n83m*OF2>bat<^BCeTB7+VM&a z`E)$%;Bczz=Z3%rQS>(qqWlC3vWo%=NWv zMyX@*qaZX&{qT|I|9M z8QYLaxcdaD1nK`1C0}CE{TE67t(#{{=FHX%Tn;Y_VEqd;f8joKWXd|BO_eZ;i*9aQ z#`0qcgqN$RVV8y3f?;NnJUH3ResQPyCw!cirGz%!2%NKX`|?LMEYlPgm~3qYEZan# zKp99CcHTDQz8XHaZ_vHrr+sWQTX5jDO5d`>k?n1 zUpWH7nJ#iv6cP#9)`-I~6`5eCF$`V%_pBxO~Yv zY`Vwo^PCGi#?~fYe*t^z@6q;|GAs40bDE-(KOy9QAZxM(y=LAn_|7azTVehoCMj9x zCJz_qp}lgWN01n{G;bZ3_i$EL!fKgT8a`zCp8aCseUN8syMJFcNvykDJ;7S+ zk8Jt7rN61fYDfde*m?!*Tn*7z;#FVGa3z`d%-YLsHptUnIB3;FuV*{-4 zf7C~!uL2{!K)54#T3whWKF%R%th-W$?j<>YI0sj{{HYpZ(Zt^gG9D0kM%Y`~v|Bu0 z)LZv9H$-ui>NsSoQ^h#Y)TbLd>!O_y^d4_9QS=~f9a;}Y{ z+WYCAbyTpI;ShAc5o0Gk{gxYI{k>u=!D!uD&Y%B~1iRlwY!(Aj@-?Ntw8;`sHe;Xj zuC;q)((mOEuN{3Y!~Zh@%&MIQJaeSk^jyPP2^ywu&nKKS1p&Y{)Y8~kvh+Pc(-&pf z$2NyzlXj(4V6MPHVFIrPEPbh%$Hcde=!49H$L(g|?v}yw*H7>B#W=@!`&pbl^e1}g z_h|`na{GSHX|&MYu%$hG7Am+8JcW$-2Pv6&hM90@i}V~4?&xt8*1#XzIljrAHr>zG z&S5p)dvmhSnYr6N%Oe<)P8^I#=IgKT^UEm|BE1T=tenHCv50f+X4`V-7GF9vU`cY1 zEm%x!2S29~Obw($a8LIXTC(ZegJf?g?^BYs$x<7PeS^lpRbMsC>3#@ewE7<_f9O0{ zPuKonaG7X3nYrGcE?x@^)9W2Q@^)d?aVlmFJRXNE>8*yAE{MxEjKgmGCWdXwym zqv4qqxv!_fvx_O8*eIfBm62#+j1r(N+IXo_S0Zl`JMb#bR2$h9Jh>pa+b0<wzHnSgWK^*?42r>61@1nP@V!)Z^EA3lD%aK>sLP?&EWljSo9xP3if}D z#*#u@a-8;CRQVFCKKO@`@o+)tqOa9(qsM`iLXYHyp_D8cWPELyl@mQFG(>J>&5s8ggBx z7<-9~^rG8zvHk@+A_=%~dj6sV-GLPgThujep(DsF<{fr)9M$|GI7IMF42Dlar5uff zruy;-0Bf1t#N;64l&_P&_#J_f8`y}}F2=$!q|rlg)FX0KBF=@u3xfRvbc;2Y%38_) zHbLr$0K3#OcSgI<_%4Z*Zy8FF@ETwXVLiSCQ~259TC900-1mOo;ZcQ6nwzD4z2QW% zq6Yo|AOQQu@Tcb8g1SviIF<-5EW;Vr=s7Fw`x`Z(~m?aK#(r z$ix}3W{l_Pbc9c4F`Ai^L2i(HWkiggISGLKZNDJutayLBmh?n2m~^Cf+?PonCpRf> z5ss$sE;ALd(Rr|E+#c|~O)#xJ5H%7Vjk100I20_-HA5+88o%&cLp)_YUagkt5$imM z<-3jbpiuM^>#BZB%C2Zcl*R70dL8nK2|ka- z6W3Z|hg&yyI}sG;X8C45ubWKE%p5f0DF#rK^}dN=4?l^kE;|Nvi~)3!1AY-}6+MIy@}C)ij?AXb>#-boAe!{Y-j z7lt7yf9AI?@1$a+9d@iz4tQ`DNh_f-La{aTX!Z#vDmbD>>}&PSv|K{V4X6!70SDWZdfK_(Pwo`0eeUWXWr5(R-S z@FX|B@n2=~n-K<>L9INakU?^lAjO3cNY%EB#(~E3A5XJ?rk(HMtER2uiWM>-tW>%l zi?BQ&0-UnqGZ`N5*NY0Q%c+`M2ttrr@fU4vfzafYIvqIFU&AYwJXHvh#hX(BSPhfZ zoGOZQUJbJIuLevCSyO-7b2DPfpiiFbZI`e|Jj81S%dSAr0XD{6(6jL^4n%@Rx$G2l zz?A1ARXc3{^-XzvM*(|LMu#|GFMD#Q0FowcJAA!B}YCnAqPeLf6tKvO0Iq6Qg-FE0_GNJ@k zITLy62)=Wl>8u;2$bTW_R)>an<#!%g?yB`UkNc8Ig@3O|p@4N3_Hk+*Js!6>xpf9| z6zZRdO^L}TS?0%67cf9?;+b#}W`GeC7cr~BV{Y4gPB9BCK|VVes$ty0|GIE_yxF#e zo(@mayk--4;p4x?F9^cNF0{Y-cUk_+51<=puT*@Rw{3#r(upd(w!Q%cxe4kIOHV)e zA*5Wew4!MuxgxVd3B*wub1`^g`{APR{WoA8MSYcbmNTdk>&5rSYkP#esHZdXmdD=X z&r8f*yV&zR2wS0gwE9}xr3$D zC#!{|o#bRnc{?YuFw;1g-3WJ#CQ?g7wOxkvu{?d3NQ}~H~6SVC`%Rs-o>}&%sn5U_Rl7ag(;KMb!@FkUjvsO#c;L<0h{YHLIJL!=90(pdQV z+%rtEf)E^=G3zfRP*Ncn@$;MQ3uUYFwD~epko&~;J&KR_f;l0!wtjQAOW034dRONb zVs0f9FwHcmBzJgK3(_P0aoZDl<`GH7C)d(G^lO$dxW0T@dkF;=mXVy7`>^+pIG7Az z@K3X(+Pa{pII$M`MfCYJ$KmrzY#pZHdIZd7W&=5OSIK+xvdmrH=IQWgf5G7PxFw+QI*`a+Frtxz3|Znyac0fY39GOnanGgp$| z50k3cR&p|g>3#3?_=>7dY18QzOVG9$9^+Qb8y%p8f1@G6?AjF8)d31(?;`5IMsk>Mu)^z97r4hhVBKF-08R@{3&5u({Y*_svk#pq zv?DeW-b-j0;OTX+@1)$U{-$MIBG=0@>0`5`BI5kWzWN-4G>JsM6B0QFn0R5Y`7Gq^~ zB=@_Ef<;<^zVF9HVH!~zQPwqqscHXcbjqF&{FT3(G@nacm_y-iCVpXtudLk|c*TG~ zGn*m!h=ECG_E%ax(F?SJtv9!vEQM+CgOw7Eu2~sEj}_i5>>tMVE7OS|^g&u*Uz2^^ zA9ip(7u$7bTj;H^M_l62%PZ-RwyPg0^*u|m@Aer;B$?8@5 z68p^8{KP7{W02Et?pe^-@-egtT{ovvx>>u~Y7+5T2>U5t5}E zj^AMImBHZ*Sn@h=d3419?U*Y2n^;557J&%xQB=l>#!~H5yrt;-`!w^o=cAJLO`xlPkK>5I`BblV;~-M6e;lz2z{%JY3!GM zKM}aRTNNkqeR!i6))Yp*k>yi+(s<9qs<&p7V2QC){fQ%u=%a5b*Qh%Osr^sA` z5w^aC@cEP>Y#*-=d8lZEgU1quWcYi_BD25R@(oVmcX;Y++PC1VgpH z`fcb-1mHA7LVyRNw{%rGbrwIoNediU-Md}*CAq@^_xD9rLmp`CNTL0ib>ws(V}zz9 zXyFpsrC6O>q3^a&aFt#Ik5}EP=4=|3Mt4M5zX}JC{6XSNx`5PyJqc51RoRtzUehFbEQsXm>4=OFZfXeY{!>+l&d|3As!J(pTf=H74B2 z;D89^;hhm&to8ahlXh{u!cC9rlJ3b0;unZsvXG@c4@WWoM)`(VWxc6we!GahNU7^G zN^#EWMmDu)vjOLro|5kSn)#h}gyW}!*ou{QqLXjk);IP zwba1ndbGp7o=yoMrxB_rB}h+os2^|h*5*t(+c@Q4Rkll`@wp9tPmf4~&utuxZz8*D zH)qP`&P;RX-D#mkfWFf~dXIO4N7LKigs-r@3?++n!|>2_{sZRm2D^d^S^`z7FIftA z<$uUhj#3!Z4|yR2*7qA5rCyOlNr%h^xl=*zI0?%L>>dim!;t;0ED3M8Z06u+gOj6n z=AmhFyJ!@{IIy(vOt5S&kcpK`ylECUZ^Wzmv+@eo#yc0&n|0IuW3AxPochrCan!au zEXNwSkq?1Zag9NxE7}v{F8%2VsvF0VfFX)zLGrg!`H%^uZ8(TFczQIlR&SmB2UFwK z;Kd+T{1G5YaAuY<3Y4(f#7QHDA5>}CDp|P2P1+OHV(cdkw^FBV+6;6`rL$~gf;CjR zTydKw3D*xV{0y2k7(Fd%gZ6dnxOp!T`X+*{GLi&^{gO;poUJt+AMFM>^3=G3l#hEKk{ZXqzYo#nm1s{+B(k7VMEJu*^!-c8uBR zNB@mF^&ispg(TySAeUZ|71|7)1fa8VdX)|s=bwBpQG+R*-`t|bZ~PgWiX6N%zq@#< zyAw2nTCf;ci`1!!;0tw^7%%{dp_WCJ&@V`r^zsWPC%@ki9Iw67mS8MkTj7`C+i#rm zoJ86fF^JJ36WIK%GsA(&yvhTG?~c+OS9$ng5nJtd?g^m_!(qSIDQ-M3rc_-v*c={kjB{WG2B~!~ zb60ZTZ3ym0x7$vP4f`lVP&tCt1iU?@`vxM?xke+tv$;lZHLx`X;yb3i0Lm}!yiJ5| zZ?Y@OeYVNW_;8|HL87HbS1=YU8{dPx-wO|Z zCNX?2Bg^nK+`AT%P77znxSO|*!d4P=Q;u!LO}cU62h`0A;Zl48VEY;X@!;2}Gbw2? z?wQe;tI2CVydeQQKr#MqiTe63}7oTJ0?BL;{Er(0q&ySBa=_xpwjzfhnguNxP zftSg|A>pG#=E0i@&sG}CS#LIn{n{L}{YE(Ax!|yl=BJ3W2pFElk89jk7P{nHf!mOn zL$<$A{gtF}kt1@Aa3k6rV~o;94ybh_VUeCB;=SVF*uA0~g&scZ9*fuG)}_)9?n5qI zFQWe>Cdo2pCCt7%P5k@(M7yuM$xKs7;!Rp13*yUC*gWtu(BsZRWW&^9D>`vO+wLU5 z{?!+V;8WAL#T56{ps7s|qb5V8^$oH1AK*d?{I(J=ePdrUQ13NJ&kUYk*u((AwaX_t z=g5arP(>-oHJw?jDR1;lkn@j}5~MzB?ih3;x!(ks2_jgx^TSiD6WcWv&}z<`+;Cak z**Xw~j{MRZiW#57=2U=!IgJ&C}y#yiHMP02#F`@;O+AUp+XFW<0 z-Y01*WSPRrM(~01@0-RxSMg@LI?ib=8h9NA>!ISccTkl9rMggkG>zl9TX}o|?HA^G0i%q*4vO(=YNybszq6%0EKv#-30sa8euk(UEb#-Xs4BoLX`FgN13-6M^k8 zqTDTONJd&7ankQ90LV__>9X2+syE~f`S)6l-#|fNpc4jej`4kXe>(TZ8b7@ zO$vYh6M>s59#d%sDT(4l_Gd$0=26hZSQw}^Alpbl#_wDH)iJZ*R3U$Ke`p@qa_TGG}mY+ww%IP+xH$34`dH z!~57Q)JD+B7P(?usqw1%iC8 zH=(mKZT5>$xW(e2L}suF3tSLXU?eLpwQe|Ugkp@4GwTy6_Xl1nmo&O@WY)Y}1VO1k zOlR$i#>%j)Pm{e1gX8NLn)K#-%n+R!6kbr3SBBlE4fS(Ab`@eH`UO0!4C-9(h=IjA z)Wd9f=f9O!TChwxK%xw&2-{kYjkAqGj~lv|5CI zq6(&%dh-0GVvQ@qr=3N$S*q_A`iAr?wY-v=lQHXi4X_!)yA7}00C!!Y^v4nd5?>907cj#RN!D#Ke`^*c-ek*FH zNZM8Lvjx8yR(Y|2nzEHgS~sG)x-qc@jlGLMFu6TaO^!=C0##!1D_@Y1K|N2C%;{V- zT^Hz9*%~}!YQL9=M>z=?M!39ck_NX&rEe9rn5}v02T>+NpJQjz>l3TUv@vbmFMc%> zF|fY~VCos4#azvP<0-Vh9DyNrt^ovON>%n1)T0!5xGT91hs_}v@sJeJ`hTEmTZ!N^ zszCOXbBv=fj(1l6g4tKaJ1U8_60qD%J`(= z+wR0Pm_I34ul+^(D3atR0=2IzuYk4tonOf0fPmma>H#u6z1c!{bC1uzODg~R#w+wC zKYRvd$Dt3?F6&L2Yg1^ypS-Wv``I2fpmv;2keh#vxJHu8Ev@_ru6)uR1np&g9t6tk z%U|+ty9#E0KN1yuRyoNmn$^3BuL$^uv|HF5o@mSa0}g3m7zKOwbE*jeHy4-CRNrV| z(*7#$c{6QDD;ngfqS>erPcMc`td6z}0=QLefK;Sun%A0+?Kh+_@*p z#kN8aPcjD8PdQ29~!$-sMU|El8}Oj{b51^BPKu@kEeoh~r@GB@k+ z{`B0mk11>|7mKrjQdi==U!G5DEirkAJhbd(s*K*?PQ4p0yqm(T!jg|t=zdMH6fijG zU_>mOBq4N1S)+znxg}2Gd-{T)j!;Dt(_Y#h!*+-D2-}}-gey^e)#80J+6+YBS=E0u z*jb%GR^_{U^J;IvsNHK&PFW||L{isx(t3+a9~n9%Y%-E4xC;d1T(?n}ybEBcd8_ZJ zsX?|O*UDsU4S=%n3T%ZEir3hB${`27(o@)ENb1xiitgqs7RMJ2zc$J9-mapZ(F;rL zFlQg`H$!LrS8c0mYOB0cAcZasL9UV`?Std~>if;aLdAnBvz%1-w2N>1pSk;97cUGg zaT%G3sl^PDAGco{)?&sa4hL01K(9bWfxRN=C_VU_DK4zo{!y}wXgWNhv3N%4i1P0( z^2UaX@l>DNlJI0I?j}t&vK|C`%Dykln_Fg<7=)v@Vfz&v{sKChexSCB)AT13P7Bnt zOoiot0KDVv)#%9iBcYShmF|8r@zd%+{2)JFuVTM02?e0(I9m?=BCoFIQ~Cb+GUa>o z57n^6LJmQ{ELI(*g4kJq9>8}}s8QzR=4MU*j08-|EI4+RAuJ}YN4fQxR1LWtvMx#J zVJ(;zCwMG&>X1Qn=5y1|H?G@gX1~{@T_IoQCu%fwYvl7HIO$zUz$U?Q3*IX3BW_NN z0@b*K;A`}wa!uMyjD=y36;tpif@eA>2$>;_vFH*HTqc!wSZnRYxTyf+zX59RVk<2EUQ32w!t*_^{#f2g0j?{~8p^EWT-Eh7qNEd3Kg z$9vCkGt2htY1i3rDOu1f%|sKjE%C*igHHY#DL~0=l(or#PupSTVqMz&kG$2OTb7UHbQkwrxqgspPbFX3ogvkL9I=~1m1xmtYQ5{v(f7hdLiEYeBLBKOh}AVi#<)Y5G-zw2h$LZ$l{&dJpAtHZP~4oX zDfE3?eGemsZxXR80y2C(#l5p@fM5!lu|dvd?CTaJ5wve%0Ts)g_h0 zg1$xbSHi16EtXKP7w#?>@_gP7TF~tLY#kPR{Akg z_P6rC{tVb-mRI%e6YCQ=3kztC3jdceuq&e8=DAu}PTS%?(Ach2;p$3Yvl#l-Sk|+PCLm zU@LuAoS2o(Yr&(6;!0DfD7h`ghAgPSX01eWc#WGj%-j#(s%!sAXt4*auX+4ZxqsOA zPxed@%40|}qLfh#^E<1!WwYQIrWYknbyb7O2*f61*x}87UTI3b|F!=Z1C+(u3w3Yx z=WcAk-uqwV=#-Xmk4?XU>Rg<>;p@>az-!VoQ``c*a2?-91|I|3qOon*rphVbhMz0v z4Wt%WVHbCYu)D>$pwTjI{TBM7ATag7CHcE9GvFVd(*e1=i|L{6zMP@*P5ck;c#I8~ z9|5y7OVt`AO*b1e@X}rwTaV-u@mZ!87w~1X&j4P+(QgH5cO{8rVqhI3T-X_=nQVMyV$kNt7ODwCuh?K(V_y0 zB_{ZEd!A4sCJZJRz_k&H zVZ0A6mLu*%@&})M_fp}Qjp;L!MwQn25rqz)tCGldc$G{(c|y@T=A^L5(*nlJ3 zOGIj{0^{L@K@uI4p1%8LfG_K{f+cC6FxJ@rssBbRZ2(h7MyeMWGkPl6ubKd=PnEYgSgUVTqA(?kae*Zqjnc zZZu9-$yrcSjFBK${5cm`B#xgj_jaF0eubqIzR0EMPj@h*o>Dfpo7d4!YC80;<>e(h zj?(TsSio8InBDQIH{?7-nWba4vJjVcNOq9j$skg+W~Tp82gE0{Sv~#-F3M>)US!fc zelPDXmpI(~6XYVsd{clpZ{!bVJUaW(az0M)q+|;P3!ZxWuhX`JVSR6{=$KmcM7=c}@+I6OrQm;)8WgggJV=mcbhOWD3C%3}| zWRtlnLwIdI`FJ=A&Vo$@zst;8)+W2=yT>^C6sYbQ%yc?8`Wm<0R#ixNL`F!Z)zopq z^J0cCC3?6fV^PPk3b3=CQ)hBtEYAE%bU3?VKqGe5XCnkX2-JBMp#@@XRRFR(ROvh( zXjgLqZAAf4YGf59eVknK@ax`3enK)xOK{oASm)DamkMGp)K>arXHmLoMJ_IM zwf|CUun2rn_2!Cii9Z}W8pGK7L)@-@hvmkZkgh%agJ#h~$UOC#e6uF_j@(vVzX@hw z74ttkGIT81JZEl_ItM3D718 zEB+p{l9cye9dAc_=5@Hf5=fiHtXr6N*u!-~X_hWRz{x&;I*f8M-yrEymS9J{Nyj-! zi!n)M?;!IVOC@d^%$cwXC!cdP`bRgt?_;eo6jV-p+d|H2B%7vI2wdRVw#|@xcEOc= zjW(H#pg+;10A~Hp8BxdpWepv!X{CxXCVQX~X8h>VZ+PqJg+Jf}Osn5zvLR>1qPEN8 z6K~>j(#K9|M)$a8fU!b>ie}3Jjje4IHE*LTVg&U_c@P#ucW@LaFZvRYb@n0BZ0}PH zyCYuovFQ(^%HlV7ciao4>OZ~F_b>hNdP~j;Gyo4Iii%d76YOI~aime-FJ^kq51?G< zX(&MP;lrxp895}KMyArcw?uCX%Wag>+RkTnXS=f)7v!>9FZ@X&pvUYx1Twks2FSP6 zzn`jcy1Q?wN}w@r>nuE3)u~6b2lQ{N?JI_5s6w|4+Po7ZD2!=uz0(6Z26boF1Vg`~ zucvNSW%tW$Y5qt$FWSml#yZcr*8!Um9bzT6B#CE~E!i|pAoA;{2a$#U0GV;4qpv7Z z7j%;*M+XUkEvlf{DbTkRQF!B(7HWoh!)pWY+b=`^oA-+E?P3o{${|iT82dGHxLe8= za~u3kYgyDYkcg0zUT|j3xk~+(HN$>svyp*?$S5pQsk0{E38JT)R) ztG&51j9_&9uv4BE+hcMYqj+-x`_0)ND{22} zEC`9|((#$n|LoU zU^S=nkUvF`X4e^Ddf_$fVO5OK)YfXDG?9Q6;SvptM@t>u7BqR)i$bSEJj3bF??hZz znB%p{IfYVY@|M|Jq{wSR;td~S-f%mqL362jyIb6dL1<#VSs{7pV4!Q@n)hG&_af-$ zY0dIJLVyAUA#3c_?964u+GNdWkj@{*C32-E=(U-!2okL(wdt3Ew5IM{$ex8EH&Kvj zyOtz3t9$;aAu0-W8%DQvN<@JS0szjb=lQ@UImQ=6p+*ny5oQ`OQo8tdt@s&QaJYVe zShP*V2VszD$e}4`38BEcEZLRsNp+Ogn*8>9jU>L1R{H{jz%nvcXu#MzHR0kizd?s# zee~C3IGuU2<0%b44Ys9Jf5e`Qi1>ju+-*zS^(n`KtYh@ys5-2^;%aO} z#?*J;RGns2^}2(_WLW|FmY_T}iLF3I-+{cH&V&@a=c;^>6j_ZS_i%)k2OAWef56wg zZTLquoX%sCpn-lp-xc%l^CJnBLj9Ybt7yc*H=^xiH`av^h6!Z<$!m5HMtNA#tBn3W0z4aK#9=wRKG~)6F5B zvWC|?n;9(XI=L#pu?kUOuMcl|WL7Lw&gZQNP$S9i^>cq0J?v1Y4{i^!RygXik<-{3 z<s!d*(&F)jG4>{u5w>R!O&bh(VmTv}yzlFg}5`Xts%-vX) zNvv-ijsZ^5JLzrV>dy86#xsN^s_@jukmcBupQ0g`<#IX4?{{eh*f(piBK6EqM6lMf zqV-{a5_A5Qq*#87tEp|j-x-kSz+=Xr1;3#B2x((--jTd*RaXXC9hP8}WI8P4D)gG{ zZ2KWqEYj-R{**wbnLv}#*9d-spKe(-^pkbt-8E=D${Cn2%LQ+OM$81OQFx>w+XV z^&5QS@)fU^n?S?AXw6=g!EOtS+dLQ7*euU;lpKbbhv)bhZ42)`}59BQ*n$;(w9{H}#^sXrAgu5i0OM#a4kiE^rn0XH&})F60nA|V#ONpZd)VvOcS zd9*|7Kj>;P5IqbqsbFl{YNK=ZGP2O-ZF1>pJ8awcM~!Yj(RYKpe6h@LjvCxIjs#Nr zkkn06N6J0nG4a%p>)!&O4iYY#`8~9=WopO54CB--N8;EZIg`g(Wd%R3CIgJTMhtG@ zu#Frx_1SYi_oHYqd6}3K7Loxs3KvBHs;&NfPdS5B3l8LM zq^f0LC)NVt9Di=g{o?f#?Zg;VpX*9BxH0^@R-5bb1vX_b9FI&8RH?0!a=w%aZHKRN z0L>;_4;Oa=^&a!#=%WvY00tOYD5KIbldY0YB6tM(Z8`2(M>&o8A)RSIrA`5>svoE% zhN`wIe@}q6TuByjrCBEd@ABrOzO5TzDsrve9jOCh$gnmwM!5J_*`+IotiU&2v+kyX z5;J?uP;6xbV>LU}1Fp*Km}}y61)k1`sq6PyzFN8Es?NRzv|Vm03|%7H2Wf#Ap%i)k z`0=$>rl01YDymlwAj09%<=yycMTOQG5wYd=-c3H^SXZtCP4|mw0}z+^sn74JQn4%G zyHecdfQSC9&ph2mGMzl#%AHrXg?6?Ho|~+0-qGu%jhP1C2pbI@n)YF;)5M3OKG~uM zUvzII=G4JyP0vueDVU|#px(RYN4`UD-Aaz^>(z#lyQb zK^u-D;hP{+)ghyJ3??UGY2sb`SvxP$?4f2rpeVf=8L2IH9bJb<_v`<30AUeH;jte5PZ*%oWi@Xx0VjSQW=Uas+(ysK@X(GWx=sw4lX1yH2+zRv=4aR70EpyMoC7vmr#;XIWusx5X8AFn)DmeW&I zy$o-+%;}jw3O$PmWsc!(h+b5UE+!;c0>>j87)MolYoC{^kB+@(m3k0J{bhV(gZs|Z z*v4z-@9mA7mMl(pZvo_BWQYqAtXf{0|84=4(y0mpj-{${u3pj(t4y^2aAVA}ByAnG zLl2+LMyJ4VSu?wdNTD-26zWbT#fhQ-d~Xr4B3uoY=Rs*ci6u_Sy!2z?%w(aoew!hn z99<+mYF;M;uu}gI;pSwoyZCmc@r}7~;0*7)G-N4%eM1mnfR-YOSt6Pwh)HmAf!vx0zG2ZvTkGJm z5xaaLxy$ zEYUT185g}FG3x~0wSw-`v}rPmLbDUE*V|V@!4x4KpVAC?iDDtOAv+T(UVr-av$}%? zBuBgG6;*)_prS>PE&WSL72o}MV$w_QSXTI^jDH7rDqo}$VKwN@u>J<5MH4ZBh`18S ztE7|EQyvdlHRm+$paW7KQ#8jDi|%#I;Hn{~uKhr-tQwAEf&EzEFR~NFAFhFm5X|ij|3S&9Nr#~GbmP0B zxsy}NV&|jvV|J|Jr&rX=KTND;_+d;o8Ui1L@i!k9>ko;)Nakm}|7b98!gAJ**kDAv z%+CrVZ-vG*X?QKm(OpPCNERwz_<4s zVyGs5urop{E#CHbQ?Fz5vXq=@V9woGf=SYiM&Ru@DRD#MwTE0X; zqV3A`tQJSTx6zUuPM9axw4GAJ;Wp*qRMQH~W6AQl7Vd`>pqWgTtXt~*e!77-49EKr4NOYM9JrsvRC+!UEbk~A6aVEKjTEg#$yc^z(rjt%0T zPxCE9s2A}M${7tW9XtCF?1j5$tz4F7jKn$Y&;rIQUpRcEO3+8DU$0XDp%h(}h|HwH z>Eoz&I`__K4_7BS(>|Q3A5`Jgh%0Omvb?~27;YQ zS(dM%`WK@8G6p3>vqqsIlQE?8YM5=mYU~lj?@xNYx&5%~9welQ|J{EnfbLQ+{hd8# zo$OxpVO`I(q-P+0-zw1Wl&AkP#lH^Rbs0LjZ+k$*2%IF#$eonk5vQY-Zt)hU+TWc)3Wcwj{1R;DDXVFVF z0|IV$;oV=0C4h!Vqw5gHp6eS^88bj!7M(Ai2TVhqe4%&Z%d}D{b zVB-uaug9BL`OK&cUt5Bc9;f(e*lX~LkOZ_oU`x<*6sH)IM_wtfO;N^c>e?&6vaSuV zgXw_XiCN%OP*)UL$xHrn-@379yHSdWjAyHhD&W1fBmX8PyvaK?ay25-Rt~@FPSt5O zC-GYE7dg_Pw7lRIARqk5IKJC#b@N?hoACN&m&3^*p6t9&Dzsk#ZYWs#b^@qjGq(?&2B!0&-58GQo^GemSJNDmP(>CR!o8fv}F>XiUy` zGV6OegE8DsXOdELCLLw(dHJyZCR$w4iW9h6DNPw#d->Z#b>m=_VxBr^y z8(yKlEzrHS03eQoGp45nPv;c&>{e$^vddA&Go1b?`{q=^PZ<*d0lv~50?x>;zOoKC zvu}1#pAQ5Skn|jklv=_xo{eCJQIncf?9%rD2`XpB_@oWXW#Y!zV4feug6t-k zNyvXQwOu$P-2N74HoLG~H3i8mP6YVP!I57A9ME_%w*V z#s&1ktPOD)W|oZ+b{T0ib7E@UCHnlEKfONz$ktYo%4%9K`#rxreL4LYjH8X=5}^zQ zfr4YiP9v)?NhFga(RKeUWG!#?<2?A1ZvE_P?RBpHt{~wtBmVLMXnEiE6EpbMZX)7_ zz4||~l6oI9Di@Ia%#I0zOXB@HG& zpwL~J^X0Ld-(_M+>?iNR|4^HH05H%P3~H&aFIXJk6p_JMAqaIUQcswJ3FHiPS@IXk zvTXUX5KF%|{@ZhLtgj{;C|n!0evM6Yp=wun*lk^hV-mVO44}Hvkt8C@VK@9rs%z^U z@_>=$yCTT0jQAUmO7&5Wq|C<1{&#On{hjU<2@4xbh5C!w`=Xs6*;WG1K2M#9;gp!H z-@S8)w;yaC5qqN)Nx(q+*w*{D+35{1YRI6cM-)mMQQhPqSwbGGPi6};)>t(230Y5; z2;`ivM1e9XztT#P+ukQS-g8EZ*v(01y#ILE+mNIq|1e{C{k{Wm8;R7cSaZ zaQC1I4vhzQcXxMpcMtB?1a}MWZVB!*7F-)baCbP|54TReyUssYwW{VEV?H|BivGuH zlSo&l3J&u&2IdusH(5``IEJ}i`_}oq6eISNx0CW5*w6%}4_yiE1Fr387t8$VsZp5$ zYax4#lU_F}$L4IscEk#W{(yU?Cgd~Y=yHgENUKX*Gq?rnp`^)OilqOpOS9zpXWyyB zh_+(Aa#ld&Dlf$>Ka65Q%Of}F2wi`^>}tqw{AvVlZOy$jAg{~^ke4S&x6Jwb)gH;( zH}#-Ch*`PUDh`h61PcM7p3+B>+MjktmFugBqOb8^dj1Ak_EpF610=Jj+JcPk7&r{G zVt&Lw5bZRobmBZj4eF9A@Qr+PVyDI9!N3mK8jtl z6?`*Nd(Gy9o4n4m+~mdUn6^MvPFEr3K0*U){?diY7IOwqXSV_nDHIi5^QBAecc-ga zs-J=A6x!dVf$WTaP2CL_KjF0pSBD4N@`#u;wnv_>+^WCBX(MHGnUTV? zX5A1XU9~P5ham7Exp!Pn_>QJ104ma{QDikw>Jg?9P4uZD;#0=UwWvSgYZ>6#Ax@ZZMBo|XPaw#`EfgqekCZ&&z@1)8_8 zqahG6z%uLk;f>eTMZmh%$_w}t#CO_Bq}PWAS<=F-@ha&K0^FJllcQ4b)Ms%EfXoB?U9_@(Kzmpr8=DdVA9sNs^Jl5s4l|`w6 zyWC?OwU2xacmTqJ%D*t;WM2Z|W>U!VX2xFf#+r>vHIzrWAFGT8NokC>rh4SX;ezw% zPZV_-XSU|8(r?Q+^E6>}E#fC5uiWJgX!mtnWON)lRtxRk%+x`9?3eNJWCwAMW2 zJroJGFBh?1VBvCqUuGOhA^tsI?AV8~M6=#TJ;f_l{HRA^mb|r<=h9w!FS4lVVpZFz zMYF)!y}8y@E1jhx514y(vtLR$aemb(mb9)?gF&}X9!>BU9v%UNoA`MSw&^e3SAy@@ zwtTkn&PTN=&6!x4PpvNTKaNWn{2LiP{#0Lm6C~JkelboRewcxW99*WJ`X8r?=5I?u z?QpVL!Lrfb(l;1n*$72*VBOg7S6SZ4k?v?cY`g`4(ppL4u!ho};7X>`?-AG-X&lf> z=zg9w@dVCHr6dQvj!n?RP&ZeQ)ELxvCnvzIMd4B9vlc-cN>WPKfq>~}u5GW#@t0Z} zAHU+ty+kE}*1BMLI$)%l>e0HVq($xsieqO&>PVCGMF6$MQ0ZLAtA|mkRPzezm+pu9 zE&1w4VNbIfV4d(q%~Pry#<(#s6wgVUH*d>pYv`vYAa)(v+2+Ws$FzX~ zRsUIDSznmh3_Ca((D%iR>2l9YjwQ}-EVlR-Em-b#979p)j_2{F4sJ9?+;0ZMjw@k^ z9<49t^H&o@JPh?LZ#?93O=`2;A%SzL-hxGo%x0t;?7j_FwemX(!K9pC-*k)AE5Ke^ zT=|!%ZqzHOUacTKU8kW#JReINTOyi<=yDXxxU$coe!8vTB9}KNghTHmKYv-@8lC{I zCrsMz$?4xr==*zcdp#D*`OOISg82X1weu|!pn{|>WLTJ86;{IaUZ!$HHhm4B;yrGx zeAqXOGlhT;#+1rjrbnFs`!(j*`W3{GP$fO6JBH-TzCf zTqN!adkQeHAMtWcQu=pTx<(5n@)Az5VNfqBW@(`1Nb{L^;nP?aPpcMFe)_hjN^?HU z(Dl&J{A)}ATSxP%kOk_61DPUJGBVWKDzz3fI*2M4T%|XYm{Dy;wnr ztP-lC>#-|Z>N0S0{)ylvv{s1jTn4`B3FmP6ArdFAk2r-a^{OHxi=;pI=XM4AvA>q_ zjLZwpsf5%7Q|t;_dY&&cNLDjA$tp)nLobMJk2PkUzz!~^g+v_*n2*cW3{(ChyXMkt z=ui)7j=9JWtQMsILEM|Aq^LqZLW`}ko&+EADPVk%Ew(~EqZ?ufHKhO3Jxr;o$HQgU zo*Es;L5*BN!Upv`+MJMH@d>r7w_Bd5;~UkHUS36i<&Rpp%_3>Md>eW@rqVR6;*cdh z4d~X1mkS>Ud`8mNuprVnoxzaM&5RE}m%NeAzR&P8?|2*<%!ZN7^^ zEL<1NP4$i{b8$wxIjLQH*vOW_x{7iox+#}!=6Q6jswrB#=EA?kB|hCi@-C0>JNEqV zU*qE)AFyLH&w_53ue$T(9E zO^milb*)CHZntj}QV+rw{^t+V9!!|AGfzy-b*b87Lb_}o$@#4o5WVpxr3Ro;pLuWc zlfAPl$mGiK7q|*+EwBu|u(T1B*0cMnXLrYzyY*fI)7_gdWf1pY#3mF59zIn*bOJbw zSdFdALH;6NsqHeA!&)w-l`o87-(;M=G zaIxUtWXMF_{j$zpSl?h9N^a_o02JfXUFec{0vmu5A(`@#|_w7zGnK++C13d zq<>0GdsK}o8vdPC+|AOuycC4rZ*zV@u=-rx+7Szl6WdKFixHEU8Xz(Z;MA}BBdbdN zPTQu-b7Mmm#+Y}=13Cs7lhIHzT7fOHiBHIewU(YDAO`89oQ^n_;xr+gmLix&>kYHe z-)8S)0+V6sW2yxxx2DTnhl}y(GH8Em>3Yv;ssw9@exV=Fc}dGKyk6(kDwW#SFzisB z;0+JrxE9@kfBQU_pJI{yr95W>=BQn`*5GG5iT@q<_DK9y;)R@6jR~pHJET?}23}ik zPW=0b`!Yi!{UZD7q$l>QCZ6020?r|vwsYmrfN=wE5sb}=R3b966pvFkeQyOK>jn?E zi4;j|bYY8ZvD9}IQaHyLf7Z`*&x0zYaa)pp>IPo@>;@p+)uXA%Z@Qg-xV)2Gn0izH zYB9wwg3YCK(BUiwSce+n9MdvS5Jm-{l({sOsW&q+Gu%);_Pc6Yk)7>Ab|X7T zpHFwVoWwKR9#Wd(VWZ+`+5wTeT2kTYUk}FmP%6R+z7MjRRVzt?(WDuixLznV|0S2v z`-ki$((f9$f$}-~v6^=FMFN&C4xNkwhZ=c64KRbWb{{MpQrR_>_^h-Xt@KQL>HLGe zyh*~W<=Xhx!&IX>h3{153p37RFvcTexzBWEaM}dBl9E!E8EA3qVh3_}lF|9SDPIMK7?omco9M-Apz=a{hj^gd%369&Ls+KV@hX; zmTN|k2?5Bs-O-OJZ=)p6CD$vQM|>7`1tiv<(?JW$sgs`l4&x0^Z$;1iu6x?}t4Iq? z4?E#ux(PwrF83}CarDRL{Mn_;F9=iP-+rO>JmG#nfa5Fwnjz3zVAD{U&+KKMOCgo* z6UFRqK6n3sj>^1P3ez1(OVjo69~{Rh3I=`z{_(2jR{?H6(#XCXb*exbN}qm}Jg>`= z>(E$Zsy!oj1n;#(gF91YDG2G{s0!PkL*a*UaoVOrQ}Ie%&A(;E1D^!F`vdE5Hl7cm zH7P|B!3A;ia|XRpU6TU^*Xv;hX-jZ?!3_*~*q%0Kpf$zriAj#GC{`IJ?7*^#*;Da_ zX!cAi(wav0-|ta@($LMNu)E4M*YRxo-?L@BGtDy))@3l#2$gHMN$EuG%J%eR)reZk ztmkFW_f@^pWyNL)xwv}B!oTxZKYJ<@)@*A6c>ZeHc6vm>+bHS<;6rd18>-tJj ztw+B>=hZ2Hvh*j+@9Ku3`l4w}%RzttQG*ESVFi#=%hj0RN3;>E8XZT&F5KU9hBC@$eTgrqAO$K~XnnZu4Yv!{1w+e$Z z?C!X-9aQZ_j8yLRA*A1W!G$A1X11{vYX^hN8%rj0>+K&nCU{UctG>0#1crotn+;mA za%7|H+WDfewuqn*!{F$8F(hU*nCzY3%RMWqQiUiAVmtCuXc7$t+fN|4+bM+nc`GsA z(yLv^=(mUEP&V(tLs6NAF;+27Q{8iB3Qh|gU|kRnlkL265-d|;3ee0jMZ7~Q!MzMY zf5Aq%z_+8_=OIt-k9hQ*>cgS6VTN5>$-k-~esa6#=3<%sB{hLt(;H}AA=~cbcQ#Y% z?6^dZun+&;Z`A@$nZn%5$7_u^hm`C2B+y$y$d&(3A?ZqbM(bd3eER`JAOTwCLKptz z@nOK|nBR1Hps?$}*P-J@j;sy!fo7KAjn{CM#=Ju+H0T!QZ!{5BOn6N_xC(DTSAFiM z_th^Xg5EB_9bPgig1zmoy#zoH-|g)VMSSXgT(<{B2+uk3mf8TPwc7|!Q3)!^jD1b# zMA3G^vS~JhP&@f_(x(623eD8}W4RE=vvysFOw2^XwIVQ=*Q`L)!M77VDzbe+1hFqK?s%hJExM7`{@Qh+c<{K8S>qw zqn>hp?;eI6QB5C89NgY0RmMKDzykE<7VA&kj?2}{6V_66bpbEU<1e`_y&Q%g1vZNA zBmbS$IWR_nm-y7V!DO_!x@UvviqtJ|?SBT6+nQ!bu@$U-AzS7LW*Rj@p1tv<`uAkm z|BR%>LWJmD;UlqoQ~GQTG4G*cb8eTn8rO=9np?V!QxrCJQbtNPqnj0@6%N09pt3+z zGYvqY#ED8-&sz6WkVgripJT(nS>{MQf#22~lWuD|F0FoESLc&IWExU+X|4pP#ksMM zJPGyJNCjiDpmK+unszPus2tC$wz)%?@!EpGQ@RaOo+oOUL?ZF?+_b-o=`G+2= z2G{g4lv)lQOdSo=x?0j<5=q4drNZBtDb$$q5@x^G!@%^(;D zDFHJ$z-hzkwFnmFcx+G{Sj>+qGOa3~#1<_Y(5Y>cli!0(29!bjN$faJjjB7C1MPi! zdJ1n$wOU7s^LC?CyPt`)Gr#@%bX~bo+brlR%!h|pTvIhi)PPDA@6zhH6e)BOWvhTB zo5)`|*Thh~r;c+2ZRn6%tCar*kGn(a^4Fg?JxeZr*RZAF2MjXf<#n4jIAgJkTtG-O z)oUG-RO{#A`1~qm3V|&nOpC%BfdO~`(lgRt9!MN?#GaUFe(>C+;J+q@MXUr@M|?Or zL-k{PC-*fM!7osMyJNz4c~IeT@OG0Gujp=zZ+J_vt9+F#@}(T9dd!K~0>( zFm^xAH`irl>(ozU^#*#nKsEl}*%4fHVlk56SYuI!G=YW6fyBfloOOyyP@W{$p1Uyg z6K|@0qQ(4}+~cmiY)OI7;av>lav~)p0qU%VR`u!VjH$$X&4#h*_~(tl4l^LUs;oZC zu67tbO<;GhD0T+8(hjI)!xy40B>X6tmdBkw^4c$OZaxEvBy+Qsel^i2Yc`VWQ0V*S zq|yn(AC}vtOmq4W+5c9ZnknvPytJ^9+xPsAp8Jf}FcoFCdXW?hj+lYK4W&gw=>xRZ zR5x#q>aRH8W?)|kC_rbsqw^%g!HWVuJl`y$M4{EIEt&Q-0s$$7ls^H#$j^OE7>eDDo4c2g@ISplhNl+G4 zR380sUS!#U#Civ8E+LB&!XO@_j^aO5k!-A9N2|eOd}N{6InrYE*qc}Bt7jv%?KGV*pT#PG(d|HLaZJ_C!nTgm&?beD z2-Q43)q}tXtK?CvwFVHi4zN}k@SNg$+DIWFZT0U z;fi&LvEh|y}<#LJIPnkY==?i zM)8vq=}+61uR*zmr!-_@`Zb|aPj)L~=w;TwqF!_kBQItXNzYy{n)YqFl0ASaNteL} zPg0~c7n;SsT*4wyu-hZZv!`1D$CQ#On>-vh((D%iz7`oyJZFCi%eL_8FX@P2dYJa z)_m$|BT1{i0A*Xqcw8P46i5n}LBhMy3ZxA@3Hxu;L4n63c+aH#G`68{&-y9l9%&9n z{>Lh8Uro=ae+j9u4inTN_b`%}?|OJDO|pA`WuinUr19KIpD9%$(-}{pGxec_Dm%nq zvA$;O6>3DN<20_CgV9|RJLfnGN_yvooV<9$Vlj0kdmw6vkoca z6GYp%6a>LTb>2Kn5Mjr+H#^G@mHciS7OM}SZ+~{a|7}&q>M!4U%FVGczb+Q0AS3oz zD2Fqyi|p1*ty|CG zL)?O|aP0`j_QF-_Gi49(C?hHFn(T+N(wAfzGMcL95fuCtIpzZ8@FUnR+}Zc)=cCj6 z-BuQrCahMlwpwr;Vd{gQFjtM&&#hdl@kz*#1ZeBTHeCF%&alrGEfF329hBP*yX1Ir z4_UsVrJmhUCDh=4ztyu~TGFYx`V5KnmQTeVH}A@oz1z>>8N*Jhk{65h9?eq8@6E9teVfRYl4kcqwXcg}I_qN=@_a(u86i0V%VQ zSx3IB`i6NDh(rYdYnyMP$^3$*D|8OUqHV|z?%oedJ3ZvnBooRS)O&oN;N(i>4Uw`c zyE{l;V0pL)N8)9#ySxr*35RHsj1FT}Bjtso&Pp{OYm(L~%8v|Ve+4$e!Qbn-wCDd)GNA#c?Nk%O8FiHqUW2l?+;vm2QyR-I* zJ#9~F$!MGT=YkuN2p+(QL|+`H>+-kD7C672`m?3A?hH5Y5r9E(o@scQM8lFJG&NXg zc~wS^ZgsXb*?+BPZG6G zZL&DqwH^Yc&NRdi=r@Fp;YZNIyrVVFbJOX@o&ZZ5TLMjM<(R@@%>x?+Q`n^U%>5r5 zx8}oj*nHRhpdrth?Yrzf?8&jAN$ZBuP0e~Y?d`yC(wU2N%hvrBB3L!I_C``s|z zc`PMeC)~3x1^*XCe$nM&F~!QsO;x{ns1stUX%rS>V+)pX`u zs96^&T!h4GnCa z_aA6L6?ZasZ@=a;zCO(Ahgb?*KPVNEeEn1;$7V(uqLKavr4QykZY`bTRvA`4i=psH57WGKOz#0REktr7Ke!3C81@{T zOnFHZ;t&XrDet%wR(9!il*mkU7Z<}kXjG4>J9+`S@6P_1VCrQC)^VeNSxKd0A<-QaLJFa-F2P+}AhE9TBA1bxTC_o8Q|1tce`p?e2G!)kahP z{uE~p%x#^=V{$y)Z{+N%i>Y}vM8b-qZ!2wbif93l`c`@<_E?ZYcc0fzV;L1l^31!6 z38^(qizbXSd4ytvtftd~Cy&FDkcG`cP>}(nh2JkmITWK29fDbSDY?Q(h5Zv*6;}4p zUABo}H;(tmS)(~O29_854UWBQ%_*I*mP-R~b!}~x@dE8fA4GSxJ(4?O`T&jDe*V{| zTvO#$O4$M+1pIsja{30{RAjKxem{TnZuepeva`vcw{bq`u<_ta{k{*>h4|r3Z+nRNZmcz*pYX80v%KrC}4|dJQPu>r)J)re3s!Vn& z*3=vIaz&HUiCduZSQnWgq(|=cBvQwju@12{y}3K^_UGd~SrEMR0oL5JKfy7~+uh%Z zk0FW6_C-2j0j%UrLNNkZ{buWLO-=T3V~&p{K72$u3WeGjr2J=jW*&g*W9|YyL@;xTt-rmM5^V4GXy{Z#M_D1_zdvn@qsS5{RrFrHy&S%znd0dMl3oSdPi@5oD%&*t4ZF;+}olV?9f;=tB!-e_&n$r@0(H3-x5W)7 zN^C24UbW#7nV@BLdLeM%_V{l;ms?&4jL_m0Vv>hkmE#T}O8iuzHBO_b=UmAhq?hee zdzLymJ9tfn$*DVsDo}C40TI4vRHo0+dxQ&tD^dKC-P$4I)IJHJ97sRj^?WGHCAIt+VAEdGd$>$Vu&vY>%KWXVHKM6pUeF+A_%;RQw^{ zq|GSe3Fy^$>rr`GI!&dqnEez=D%+%IUP7|qJ%+0%ngA$kMHPyf_`#A&-k%a@HY!7> zXP1MK!%8##yEOZSB;(U>hHB^O-S^tNfDHc;C$$|wI_-ouOmN#307{5k8y+Ixuf5vr zo-vMwr2qj*9nMsdHdB&rvo z9%f0FAB+u*cXxE^${qnW!jOyQ&=s(+qbE8r&v#N923ZAGU=TOe&4s(0al80oTrHdyE}xtTvt^z zh*SG~UaLbvTzKEw<))ApDeCRUZD8E}xwevp9-$>d*>CykrcX#3k8DfHq^p5kCP|w}+U%%{A`Fjpa z@}lXDRC5dXT!emukD-ID<$0#IA3h-`OX!npZ#4XojZy3)_|Q?jO2cWHfNZ&Ui0p z+zCJTXaR`NTu332+-n0lD7QLb&G3+Kl?9mm_x(zF5|3)v zTb;MGGb8tmQWAqkJZ;8xkofMf`q?tqYOu>_VKV*6ydNJA@2%=~odV@kVvrESWHEMl)7vvu+koLJ>>afQaw zY)Sy=Eag-60Cl;8hzSpL?N(lFf6ORn;V;`vg09Pnr8JCFXsJ_3TW%!{jO#1s zfi4r=4c438JgE=hd}NtXSsk#YkULN25v*e+q1lGU_1P5Bupsc>`^=R)H~0MwJ!tfS zDtAppaCGZbx$fBId1c;hKEq}4(s9_;xw=KWH~kX5iku5>W#rrdNCWq;VuEC^2rx%V zh9^%Jd2`dpCn)^)S!q@x<~ux`Y~%G8|H=|4wc`%{uH4OCnv-syA=gGsa(wxf(c6zU z9I6B4zjGr+y#xw-SKeVuyx>FcMq?|r5EDjExaA07`Tgo8%{JzdbdDeLFh%aa=Q)bE z#rE^P&uo&v^qz|#bS{fr#_`w}vE4ON``v;CtzgLW!9Fh9%M|>k!^kzi;GTA%14)Z) zOp}iY-@fK*S#TKi_cPLeXW38X{?~U7!>Te0J#iCQT~wn5MapXd&$O?I{P*kb*?y3* z11GpzPOiM11^+guzO-e}oWtq1Yu_xXp3MZc>pugg;8qe7On%sYz5ccIbN%%%v^H0* zFLX3IZ!$TN{K1f08GQbJxwMZ^n-c}xJj=EzCSxAfGS zUqENnTO;w*= z4z-;amA#v8U?9S=80p`nQ=U+|Nyg9vLApf=QECrsX zzE)K=QVd~_VgdUcb#06s^}?W5p`Mx$Zqro2bsI22!-ZiS(2TkK4eYp4gu)WC#WcfK z{NPdzGAiRrfFL1gl&(;D-p@9DAF$E8VoSht%PA6W*T+F-4uB~HS}jjL9E*lzLv!}C zX9%4YRp&7uHwuQZBJHDdY?sVZobnc0oi_w~E@?$BU`RgWIIGDCKN8i$a+*gceJhTt zg%XZuKwy)9R$m;!S{2qm#yln(pHB%#p(qcv)P+K8@lPHZtr-t&u&2K|b@UHKMCKsa zrEsOH=cC9p9-D*aqPI)-OMAP%*2WrN9ydz2Prrk;2ES(!eQSS_B;)w}a{7-~IMXgnwW7uB7{f!LIf9i2h=E5aa>hKZc~IMD+2HX!Lj} zB_B;iE+yU8r#R~iR>30aI&5bz0LLH}t<}s+p?}CabMJ%E6ql9L#c#Lt-ntu`bCxuw8CR65e$=N=l(U@q;NPVV)Om#d5fz~=<$2+?=s?|%J2WW zXeT0eBua(QAIE8$)^|cp>a&45Q%_Nsn(iDdh_$IM z%hXmstFBl4YBa#2J#Wszs>k(r&6vh?6*An*K-tMy9&V{~gwK#PQl4%D=JOMq-FCz# z_TZV%o`X>Ze>plmRP_J(>5*Wuf(Gwr4-$Dxf3x%C!hQIWG&1moPnte8Zt<>$cZtq> z{3_>uC@qoVJ}!VVq!yfamYfuTMLzrV$1x$lEW;6sX-h|t{iaYUR+osC@%J37MsNu_ zLGe%{=tmj4G;_BM7_OjVFWi-~-lP0n8rV{H;XCAKj)YK7#vxMsKu`i4Q3zq-YPk@T zZ;5O49;u9fXLZ8ZZ(+O8_oipo(2UbtD@bo zV>||8k0<}f)WuC^f?Hi$FNas|)m{D(E$%b&2dlN=)QQ9xwcC`q6%~Sbe;e=vT{T>z zRQm{$xirUM&a~abX_jM0MoqK6!*z~+<)Tl@i#8`oIkYfGVn@dOI@*5ib9y|BP%dk6oVy(n<;r#RrCTV%YjsPWQ)-V~FHS{!sRY(YlV7#%WsXlrXsBT&#JN}w|MJ2`T z)aCLMZ|?+-sW{$7bC#?UHsv%wk*@=T2K}F*YMKZ5^qcgZWMRq2VTyYN2>s@&kgARX*(-1Lz+H24Bz}1Ztd6 zmsau#tpm!s5vW5?$VUc45AynP3um5y0Yg!M3gb%qOvMW+5^0tavML%|l&IkGVTGfe zA5XZmcb|^q*@>63mm1UgGG;3DM|cySsa57)Z{}yRlmuVDStOCWzYqtd5eAKmfi%A+ z>VKL9>s;Hnt^u`WKjqEm>;RsFOS{TbO+_dYi^NS&7m7(NM?(S}FljEd6)(?kX+FYU zMx!07P^??hH-sWD+G>Oudg!!xzkYfd>jN<}FKXz9*+O97<}IN+p{dn4o03kU*6TI@ zF$B}g1rQ7N zXVvn1ZA!*^`0OvRuMnGWJ`!t2l=b;fJ1)W_7S1rM(@kj#6jwE9h7Ur zM>186co|i(wvj|keyVxnYt**3rwGqo`YTKbhYTWSgYhC4sNeuvjxr?c;kM{}vPC_- z@@}p3q$a^jj*-lw-9yq+PGap}M45WdC+Br@8Y2K{MkKQCiY2r&`*;@A)#DifYuP#! z=aY&_hQ(NyA0WV-ncQa8E|`4CLZ0vF+!l9*O6VV6a13XwIAiE75l<8UKfRwiPqm`U zs&Iu6ezTCHQK?Zs${Desnz`R|Yh@WUVr+K;7Jc=CPjgp2JB`*Ems@~YL@-^p*E;@* zyNtIG$lU^k7H&oAg~j}A9NsKcPNX9NMYn7@owlyfpi>}Fe$*Q#G1X>&;r{!wLmBBL zgNK*>?x)j~L?C#8-Nz}dnJ4WiyBd6nh5GcM&MDutFE;B3#dB{!A?@HCrzMDz}Pb*C6j{dH^3bw%FE$g#9TEeor7po?Qo#W&az+923>yobZ z)n*h<1mM#PT$=z10r|eqHPD~93$PBqcc0@O%D=&@8-{E1MZeX<{rN#F7q%@;y(@lN zj8ZvRqw>~lt0uz456$wWQPv;u6O`vKV`*x@Sf7Fo8I@{e63Wv;rlC3KNDMZE!Yvs2 zcB%wfYChNzNKdex(!d4KJc+&JbPSr^KNMsVo4SOg@aB}URdyY(Bc#y-+eRciM(w9RXS|ui(=VPNj+s$q>AM|es1lAUu z=2&Mr2-J)L|5T(1y=^4+*#AxzE+;qXuWK@b4)TpF#gem{;o1itVd{H{|i?* zSZa2*ycZ=d*k91q2frV6#v;XH(tw`nRWr^{r+3twz{kWMht{lp;*WWH8X@B?>`%YA z4Y8wx0Ra1`;Hw!D#?;^3OTNZau=tfTG2;Zf)x*@9jpF^~6czWUKQ56}7aYcA=Wzl+ z^vK{yJ3!e@koEl0GXbG8ZJI!hmjQ2|GjRdNQO3RI0Qp~)cCFgi<3CnL?r{drjwD$7 z*!L$*OIyaaBV~x^XwP#WB;AZo3U^QMQ0OsQa0L2ifF6_GjABwr6?x&K<)rYPzf-2! zumcET1m7eE!`@3rz)WsUYiYbWE)Xj%DC~wGpMDlHmw!buja#28xC>7=W9NY#&bl^* z%f{K+RS|apZ}%HU7hfjlANjMVV@>qbyaV{Nq7V9WJNa&Nc8P8$N%i$bahwK0NN@ z+psyponR8K@awC-zI}n7K=z{I$17P`4B^Ey;_rj|mT3~tVh-veW$&;=oQ|XNR3lj~ zQbzq@-!({+72Fc?>8(tjrax@php$b@wz>ubMeko^T@kVB9ObxN2R#_vbj-(ibamr# zFiAQL?&RMtO@fPQ-b_g$l~50r)^<9wX6Osui4Z*ydSx|r^Nc`7B$WNC+j(UiUg;-Y zYa*yP&5_te?cPwevVLWpZcQ1i7q&itB)#{+5Gx))@y@5B(SInV!RN9&=YgKpp~h{g z+>zI0t5}d81v-=gmu)<)(2<1M%Bz0An0_Z(R$BHjVs26!4^iG$#4U_`()xPNl=w5X zHu33>5q$x{rMl#uHkO}nOz5N#t~leA@5zg22rd?n4pFQULQrK}cdC}k0C25*lcKD* zUs9V8$#7%GCjh_hnCUjAR7;lHGpa>*l>aDl@ObX5Lgr%PGh)LPx^HzF-@buKVM;4y z@6DfKWSo1z$iYaESERp(?=Q!Uk&Dq*me=cXAbOy4l)p4b|JhxGzWgnGjYEiY+sakb`mOO>{t|6C(85_ zFIs_MLuRr#R_xqjfB(bS{0xB6i5k3;@7iGnn(dH4$K^)#l~~ z1pi&aT0qjl%1pnTvUu}8eFcPRw|AoW(OQf5;lR40%>T5-?;lNGA(1hVJaE16m)<7u z9#E7qgywXeGPn$`0pH&=914suhLFPbv&n50Y$erA_<22zq66lLU2nVoa+3#DH_53n zUksJ0J&6T-Jo1SBaomXp887W!Y_rG}iTUYC9BaI?*qSVsGY)J2K%zNUwls1k*3}2s zc+`M@#h26?C*vo;Cq&;#KwVg@8ER41&S@@CTGN9&K`M+*-$KB~Bj9|>)r*kt|f|NZxo z^pTWO7Xt&0!CP&$M9`UYf|$3yWtU#ibVjp(wayLw4JyF<1kIk$LEMdwC%xDxTZ1zv zZfi9s>}iS=dm8(l8ubX}AbpI}L){1ehW-{p=&~O#8~jfN!Wu2w<$ss)Fj!-p z5tCz_A#`2-Fqj;BK%rC$b=pSElX%|psfsY2wN?WUl8)rc$j7^`>GvTLGVaGReYAv~ z*t0tA?i;o>K{Kv+`^12){^mCt2jY2pZU}&lXs*p35Xz`HJ!|v>AzdNa34V)67N*R` zl)e+(-aQX|riXa%VK5XE^_M#JK7-_l2b{teg5(NY@+#3?iqgJBUhNJ9p@|PS7n+-r zOojav7i?_|Jpbm96Jtgf?lH?FH(sPXFL|)j(OvhF?CI6zAAZhcp$7@-I=M%BNahk1 z1Vl^(fpzI052$J!{FbZ5XbNKQuL&G&GC6um6Ahc_oR_9U}5)SjAOrNxj8i4Q)GJ`=?$uLBecA$Q!V)kvS( z>-**}cC=-Me&LEb`Wkc0F+n2rfc;_O`KtN_{(E9jm8N1^zJ1a5T`26ck-*Z`@!WGd zNPIUjoBUzsdAu8bCav}V&jRpoIG4^jQT_xQ=Kk=OF(Wk7DDIW@vf=bT`!dn@!n&uu zKc~O?l;tImXz#MsdrrXF4!pAK0@`SKy)|WbqO4si=9{?H&?loZmcl|;CkvN;*~Yi9 z&?)mO^nu$RqAQTTe3#h8F#7uy6T&c4XPDb)0w;)z9GiqH>@zI!Ea~a_y&Sur9^x7i zZawTi>{g>8Wai@pdJt_3?7EqX~`v{N>`y@tM}YO{q)(?4==H@pG~xJ~gr|dv%d#WL_JUyT_ow zxg3*#zm0k|?^kaH~L30TwBoTX}c=rs-qCL@_S>W%J^hMNcQtOKE2V zqQVsPYCP4lr@p&HjHoMWY!)j;hiJ+nxFZ}M{HQVR36p>sk6#)78FvV6`KFJw14c_< z2sR&dftO+=cZUrKZV5HIJ_Mr9g^GFXfHe=+eemqvZdv%!?W_P%P>rmo4 z=tPsP3+)o(Q~bpi&dy`I0@?5t-T&gU$mOdg4`caN+7=jTzfO4PJ={Az%Xq4`{9Y(%d4`^9tfUEU;mLpQbk(aWtOt zVUJKpsZ3@ohD1@pMUy%~LE}W}zj!!6_%E@*OPUudwg{urP>wLG1ghL6zP5P5ZWJ9R zI5+iLM)q$5L6Eb*7ZpC?%rp|`0US#ULITz+BF9O|3o7|J>HY#i%%fGG`G9-3g9n+AB|=~?ux@@yQxLA@r#};cGEPaM_jk7SV#(t5K-waBH>3TdbC&7!rLgKb<=)?dvxKp=|DkomO4znWKc0F{I`401RLZ zXm?^sUvEWukiHQxlo1hkTG5E4S<~C!-C%knj+*pB!@Swn^M^G{M=qx`?M*bvwiU$l zE8oSHOF+8XxTYZ4PTBA0%t26q--i`{a$a>ueWA3l|T zPefOKYx0i|+bRFPY-heroE;9CE>7{m(vnO}j&#d;C&l@e4dp2=!`YDNk!V>u80^7U zCfqKp7YMP&={F+D{KQw!=xmih8#-EH)w{hVd5P2WQEAbV}! zn0egLuE@2}+jIU0ru7`>33U;p#KmZfe+q`U0lS#8nwpv}lrf|;;B*1-KMr*B{v71| z3JQ7(DmJjq*7{XCjLzj7uOn{0~9XwE-;Hx5t+Fqk!Jcw|@K1zwcdc!I_T2|A(n>3ePOqw*6z< zww-kBq+{E*ZL4G3?%3|wwr$%^ZuY)9Z?zuRLyd3LoC8=C^9!u^3gi*%y-?;|wkW+O zv}-^dKHh%N5?rSLPA0q+hSmg2_|vnvPU(7rQ7(wO(fFD-;BHRB$mpe4%m5@oUINsp zWC@@H=J}1%cPeLw8UjdxmRCE)3BF;09K8Rmwrcy=eQ3KM)Ii{5*A(=#FP=2YU50;q z9INshT#$zs2aa9xaa)$bcD4INe#awC{gYdrL^=me(oc=N*!9)}BPPU<5*CPIZc{zg zD#VUsg$d!fohI$LYgDn6R@&pcsiXeQcfVnTinb;mvf!w) z_G9^Dc#u6)QLXBKQnN`YmQksEXYuE$Kv=z;DemZ88nVS_THb>fvn1`t@N)Kbndb zSoDs~IiuKX!e$=@>~=%IMz|jdrb*VoFdkwJ0f0SeHFRqRaPjcf0piW&*;`f<~M)13Mv%IE& zF+lll5s$*IZsGlEoyGf})`-kREPE&Eu|$e z_YC~}rx^^ahN+3XYy)np(;?j_CRajS+8C&WBsE|S&E^F!6ABhcOLPv*zZ`pVD``NK zcMu{ougxfNCs4Lg$MUasMx=ksW4@ID2#;Wnc1kjG-eO}igX8Qb2)tC^G-!NxS!MlT>j4lYc(Z_%rC1od$3l z2-7=#WC*=mLR0Q|;d~(qb)EHAPIsiw$(u^ps7I06tOSh$XCr9xrMo=s$Jq&Hmr z*2v#zLOqV2#2jDPXkl+UW*0 zt%hH-WQ|WaRmY9=$fTX=z^)1ypClPu^^M?`AtG5G^@-gP?jrd#YD(F0{miYaDTSc#KCxy!60{KB~bvPf{P`@9f* z_llUzQ|?o-MK;KHLg=JPuGi7|t2(05dp$$LLHG#c{1*8H!1R{}uODxZg7=~PA1pl> zDYe!!cTblZ_nVnDnb%#f^k96Sx5_n}Tq(Z4u-4FL9#H)ZW4fsU6xSlETkUDu}){LXR#um6eb!D5Q-O zBlfXC zoZLUjo%4FJdy&3vH~2NR`n=`TP){8ysdm-smTMQIUS}xcUH7%ENu%ta=8yBD;WlL2 zIa>5DpkVW_H83+m_5?^;yc&#s(P-)xS`t-LFq$A6`4lh#_y+u8SVt6DG0Y^@Z6o%- zK>2H=v9$YZ=7?1;dSZs{e<^3`PLDv_&S=O#RA#lrn{IoU6?7Rli7|PM*&&0ixmhuK zaVV?NP2r!I?1ix0PV8Y}?Y{(N>95@?2|h@AQ2q_cex9-p2`|#>^ggfzvkJ;+O?Axo z$}yHXbXDXFt>sp?u4s@ib$+_hsBu#&gU10^AzhMF1r39WKq7UND>+;>P|#Y*ba%WE z)!P1Zr1cU)IcEj|K66twLR<0L3;AlEzmrwdCzyQbE<%4`|6YAkPe3b!_0)>#zg1N= zsnb=-XxTQhQ~CWw>eRssQP@U=lIvPcqy9&k62N<@Q7@&QFqwEDWTHwN@A@m%~ ziU`yo|!B}4XO$sq`0eqW6OvF=Ew?nS{|o)Zt&5z zG|mhs$r?ov>y@#9spExn9V?xH6zlpmVn(&>%|Hupj)aa&{AQj^hVm4_7_nBv^6A&I zGi6-S*zfvmD4*2{py%vOi1W@w{G-(w1y;e8DRH)85xZhDpJw?Yt?j#E%ubl;wU=<7 z=5|jFyrVJcI%Yj)is1SeiFU2G4w}7)Pbl<}?=U4%3NS!f6$XlI$1n3J%gF}%Vv`Tn9 z_P&r5U(<-Xxhk7dM$~Yh;a}%Axv*A*PoEt6)I)Gq!vTKN393Sx;A(ZSsI|Cv3Ih0G zap(wHo7l%m{P#5e)g`gT!nH}c0oN}om`-CO<;CD>;5xlO6I~nS2#3kK@O!PMg|3*# z^Cn44Ya{oxo1OB<%@D#Jr#d+)I`Qr(LsM%A8ucQ*(f&r5r3HsVj5}v(c-yg8Csq2VhJ$6YlL+RzW*$d z)-;puk4IAt1pk1Ufm*%a+QYaML)XltS?=17Fy@oj!u74RN&2jFROjnB=t%&Pf~V!I z{m+3P2_lNufkH>dkJpFy)6tV8e~E?F(6B%93pDNAhOovj;fwTB!86jx2$OC;0c7yt zuFydUte->Omx<0lWl!QL{XQvKH95+V_$*}Q7*}alo;V8;M)~D1*Pk0eqps5gFrWSH zf~7Rd_EP5{p{npU=C9YUWE6u4nS<7SHk0oq<^@V9j9;BoQgHffHM}Wt8_o1!kTfmZ zo(0zJ7kS?hYd*ScoiOLZUS0(95)o!sPeMfcFRlQ$yUM_WWiKm?YB}#{GfRShN&d0W zAWAk#il}Jhd$L>Wt+!<_eg{Q;M6pNHFGXW2{&xPdO0)#P#sN8yjr21tYOoB{`&q}P znd`zK^?4(U6V{kjEkdV_+V`qdgV1t^vutyc{X6eqI>53H1d=h(Q8vVQnyHxl6iL)8 zv%{N}vG0jIu}=WdD6voPLclr&WiP%AbRpKPssO9 zsqC)AkOo1k^}}P!JLrqJU(OhZ4*M0GWEAta$e0>)_?4KJh_KVIZb|_|Mh7AW4_=JiX0oA0c^{J>XOYom zSAoerj8wqvFAx@(=dnA|my4xeEm-vfD2h!{w_>Su{k$zNQAmaJ?s*BjJW-b{G;C;gcv8mM3Y23PFpkS zf^i|pPj7A@H8L3$YKOFga0u~nDAXmb{Zu-Gmx*NotA7K zU@UjsQpm)+h$C{QkvM|zcC}2XvMapD-dX4*&p}pFLxWp@Tw9hpvL$0L(&O_zDdqz$+pSPs0JMQsq?5bCZucW~% z8#;C|808ie+SFV-_{Za(l`*fA2`q45@RiWTRq4tx#9;h7te<#(d}iwli$3QaCCRz9 zuu)&7G_)1&Y&JM+3D2j1NczpALj@EX%edX0#7ir^5vJQD6^r8RqUe7Rt21Q!7sKfh zJdHp85-5G7f(dpIs}ayUG?zGX1Jjq>;W+oR5WkD}!B(eP8L6Ig;%eGyy8jIz|3Q)) zDyJT(n@eZTPp{MEdGsUtG}`a|he^5g0+S~{Bo0O* z%=U`4=`=*&g`!2221!~nP!D?FF(@O-5XxB~KBmhB%dv33a9|gOnWjF5bLx0aeTzqu zkz$kOV6@4;&8YB7ln2}r5CFI^P%FRe7N(&izl>yzGs$FTx%rAaNy57VwlYN$G!es$s378x>k+gJ|`QxK7B? zTX|l^s^;h_!^AcPA*jDp>+l`Xx(R3|)3L``FNxhrD2e1iF81E`jCr&U>5_VU?nKYg ztIiAHadFfplE;f?kN1Z;6Q8|u$ADw|DTZQvyN31G1tX^g#1>S@u=rNcQ~uTy+Ze4W zRdiW91PG0{DuZMGvd!=)^+q#!uar!o5z=jQCEILuh4W`&P_E^kH3aHPXPBN)VSz%> zoD`0elIjeTLb-&R0f%#$QnTSsas}Z!Cs!Rprc~7vmZ+Cj5vFrit^F0>{J{V!;u_qS zN+&5#5L9xYWFFla;Lm#eDSEbBGES%84fLsbbDy&ba! zDbrR(`l}*|I)wT_1KfKGyR`AoyRLGW6W=)63aZw*LHDAKjUqDS6Vu*tS=PKCJ|9;M zPxLH0dOD6~3*F6~aGE}6h)K*PrA%`RVck+5IJ8mEV#UlG z(h6Ym0p8|bL+;vZkM|D33-t=}As7=+;1j;Te6zM2W^Jz;>uPh3*`O*1B+@~NPV%t$ zW6r(R!>!4ioxbdMx9KX{CDFyGOZhy)iC+THttXs9x%?ApD8jtI@fWlU_md9eaOI7*d)Clg6*wH+E43BHhRrdeTo8i4RbWOS_qbhP0wRO$B!9 ze{7ETV_fR{4m|w$W$%248Ovx_@l7rH4scXUlJxv#eE4QyJ!9kob)tB0Dw=g|ZSq zXu!J8{_PoIv4_}H0(kXS!MXW3c~!EU+JjNr&%msF{S|XJb>+t{XUw;H-yOS z-hS%~PMs~=dIx+HGo+n4iyt-RSU_HsQI;*M4XtF62V5j- zKsvPJpJ;vR{c0m$-byo5Kn^oCEw8>1)?pbgTD_vimJR9QL6KA6(t9$+Tc`^KK^rDC z`fziyLsU|+90}O8ykoIoo>1KsVZ1-04Zu#gya28ku;qdByBt@OwPrs1sD-M%nW|p6 zi>g33*elZqchSo+u&%U?)n+!e9Jos=uivn*?xsH%JuMn^RW=Z5-;NQB< zOe=*YzpBcFb`wWV$00hB=c7F?dC$J2$GKNGE4wv4O+A^%k3PTC!$<9|0;HU(7bwj$ z!LU5)ExltfBi_kjBW0;zqRr=3a@(e@QD+Z#AMftBRwjNVKSMJ^RW(-SqwL{G1V0>4 zrOFPTkC3Wwj-ctj;vfQp@w&4I|4F}(JD?t;suZZV)En4HiMBA)Csk3Z8Tr-E*KZUR ze?+t*Mg!id+17}oE^%j#u$3O(0~wk%zVDSJ)kPAx5&JjD`EraE{YX8UPgB&Divlla zR#RcT<(||niDJEnNur|CNL6uhtU?5rFOjA`kZ2yiwJi5(4gjbG&*L?Ky;O{GU4G`r80=9#iEq=kImIc*Oy57&2++BF-nU;n#85fGxisNkx^}vCa(z!3`Pv7iU~AXs%J1 zZbDs)CAxUzl(#0Gs^uJgP@iEtw3lY?pGM|Pa!$M|-PCZ5D&67+4M1XHKE$&;CFA*# zzw5V3IhsY0WufCXFi8D~SjtCG+T$~o!kE$Zs|T_`PGaYn0Ov@KWS_oa8HAQYSLMv` zDh@y1O+l#qh-73V)Q+Za&fL{)ZfJb8JNX-dwFuJaxx4)zk6Sar@$HZO@c+Jq{);|| z_WCSUz{Z`pC~@}|JM)|#TNcyk&-EPd$TyxDym}KPYL-TO=F%lRw<7t5KTOPCL6y^) zgSx^xu@SPY%=#xXfpguRu$s$g(6H~|x8efmV#Ro?EtJ1uT^I$yf>4j56Y=1e+}e4o<2f9!8|96I@0ZS>c|dWnR&P zg1}ho3nSB#P}uR{+mO0B<~q1f^gXq0pscn(JM}`oenA>o&k)*cCMOBiJ=BwvBT0`cyyiU$VrUP?{|fwVshna3m!zS65qy=L!8^(31g z(&cg<(_ZLZSA|WbQRinxF7axTL#n&DawHfUzC#JosQ9oLmKeu54lNMRM??j3B$Kt;(-Q}s_++3_+b;|qG2oEDt+ND0r% zb?SQR8#i=r%iF)PZFX`C=Zsn>QDJ%%d1meXoTvitRSydLEL_D)%~31T50f8Epz`AO z2sf5SaB7N`WR^EJOW@8|`17);$SL@(VSTjpa9?z)0j{a z3UyQgea?v{KfF-X#A>-_un(&Qh1PYaX^CO(DxySqj}Ynv8%Jhb@ifX6HOaI_ z@>?ta3$Aik6#(km%+_aV%|XgQx7oUVRVb~AA&6yh4kZ8J6;;e$VPS+9i8Y3%pE;>% z;Z!S;OZ>d*8>PoWXMrhfB9h%2?u&SW66NRj9adKyoP6mSH6N%T+yfP~PLQi#_j~)- z)biR8m;R0A16+W}@T7$LhCIqpX{}?S@9eRKFbuV^*_k@9Qps;*6~3_1fD85PvYk=IxV5_pN)5p|$b04Y+UzVjRWC0TYhDt_k&3vEdEF zxISa1)mW*1>Fqc!i%UMioi*6F270MXGy@29^B?bvQ-ek2!p|gDojN+Y*oH1yGdM%( zs~K{m4a{()Rs}(H+2PaKejnf^7|S_~GVPZ^X}qS#@t#N)em zN7+Q1{q_uZf17u}bXHFblYSYDP@DqjS+~P6J$+7N89^Thh7mz5lrF^PffPpM;_rry zm1QTih_`=D+i%tmQ{3p!_bAiflm2Q^4Q+iIlsVlF9;TxKWDqI1>W=#VIq+e@bJPjw zh_7?dm;2NH408yQK};r??l4>_e5BE#nsEY>(v4I!)AT%jTvqf*_CFX1Os|ZZ6Ex$0tW%aJTK`>Eeix!vEOB0|loUWO zS%?lv?U>)X#%s^*sEC{BYuYPYB&UEMO4(ikur}xvVB9BaEdQA9qeIb?Jnt{sIuG(~ z{>KKI=+_gneDn3Qes>>9mRsvmF3dWP*vVMuJYy zAsH&x>YpEFEC~&K&(@*?PT%wZGmbUJ7|TU?a%dwkhdzdQc_f)E08E)-qWTB7ge&0q zycKpTq{z8V-g|XlhyObMgnTv1SJxny7m=Z4h6w1OU*^t68!xL8}Z< z)hw%rItid9z=L}09S5}R!nnk+YK*!~Jl<|-Lbmf5*Wy9%8z+E>1$hJh>Id}tI2HqF zbM_!EQG0@Y1R24JX_SjLh9@y!nO0i-QwiPF>zsjijHr|8-76|CC{H_4T2p0bVnT z$n)M#N)z#sf=;3JKe_aWFs)LIRIPsvn6_%oLA%kFpaSK8@XVmc<#~x)G#xv-QfOfG4oIS zMj@k&ha!O4WWG*Qzh*P&j#cN!+Q1+$^}$huL}tTWJd|Z>?Nd-j@0i~GcFyuW8 zU32@@X&}A=`f{hLc4`h_*2=>gX~997xjQh`;{%oBj&mzsn|;uoU?0zj*k{&CW~&LW zQjJDuU-xnTz%le`$fQMV6DxD5QLU{HrZ!FqW-s)UaHr#P#H(tf7dXOgy{=*iUu^wo zQQD_wrK>Flw}%tqc-Z*YHQp>iCCg%@@#f~^mv=iLVIPv$PFAXm<(pMZWQ-$)enK9} z9o4=?@9b>4+dYP-v1`s`sczvDR?#UNm?Q;idak;i9E zfNtvdWGazRqOrFW0;0RxL^{Rm(Q*rvpqPxRdqny^YSG*stC4P?v6Cqs`~P5a-k|X2 zShJ$13P>YL{*eS@HJ&QL2<_*4W~pX=Y@9boF3Bc6G+!a-Ff1B$h*^2}5E{J^;S$Fd z#Z!AS6PFy?3j@|0t}jxt7)U^&2AcX? zctY%V%crMsQq-Eb>Pi?41(x=wtF8m=oR@96^agYxhk5SukJZ1774w+uwQ$% zhaV7Bn;|SaMp;hHLrp}_LCEnpGnHO9vYHDFEb>qbVtrDbSAif#xYtChigo_&48G@( zZDFQ)B?WA{?_Hd^=#hUmi8fqp|rnqkGiILI9_Jxz?$p}R6?_LPH z{K~oq2df$Nk=59}pn6h5vd>%9jQgp3Izzq1c#@`TE43PaC$`VdUvC^dyhRMYUG5w6BxcE-a(yMo2^$U;@BR7sSb;@!}eni@9Q# zKkp>+*avid_>JOE5*7gA+YksY$)jqIr$k#^Ece)%2A7dEvZ!w8$u$;|D>fM?NXj3Ajhd(V8vF&!=V<@uHq2P+JMyEC*y;GkR&-)k z4Ed`)aS96hg?v1{p6sMq<@~eG$W_&SvN+F=GJ7`Lc(46^{|1@Iey4Wzl8%dmXGWn^ zxr<4l=Q8FPOp7`GA&3>|T0c{`{TTNnH-Ku$fyTz2`i|T5p4xZf zGV6#%QwgxWN4-I=eZb$I7x!+AmRlje%YQl4HrB(Rj@&nI-_tk1@n2^D>h&9;VjlXw zdq4s)rU=ri_xp=*9VClwzxB%~?}GQ}#Ua%pdFrhwRn`qEVg#C332_tZ7MI-ir))uU zFmas!e?So}=Z{P(hO7(0=y33ik3W!XjT@I@5Bb(EJq7pWv8EFlUDuHql_-glAC-Gn z$w1M=aj}XoaT+L55UmhWl83VR6C}wl8Rf{4Z{47&c^*oyQl-&-sHpEiP1U(xTvj1l zJu_>8n{%;#NL^Z%t^X}3+_F{ZU|pWLW4erCuIfKyAm~6TV1coYWgWq(7u6x&@S68l zuF@AT-y#tt6qCe6OX%@Bms;r=O@X*Wx$e(P=kMMA#vsDb?o`K;__t_zvVK`4*-LPwzzjs_ZI( zoJU}PVD&g*z=aJwRw`Qv3M#^6P@~hMg(?RYJfGg?51r7iVYg4 z*;+?z<`4LsMY{B#Sb^jlby`}?ZEVH#SL>UU35j*;g_-i1quK}rMFE1)8Fggj>gA0X ziBeBF?c(~)`3ul_#{jp(KTDwUqVfic`sQ=Sav?@|Zfca6{0Q0!1gGEe?L4}mGFtw} ziZP20$*D%Qq?&S6GgjnlVXaFQ3^rD@kQPm=m*PFpaqp==!G;piA~VsU(WqC;WE}sT z+H3B>{eTxs`wl2Ef-VnplxR42%r-v9Ce$o8^4**{K{0+9<2`$SLi|}4EOHMxn9wf3 z5eUSDp-12((jjtPJ9&nwp#Y2;x+CwWEvb4{#y61ZA9Xe6LFREX*8R&aH$Ukg9T=%x z3D7o-Rtw;`N<$`74a>=j%O>YjWS}VrmBw6isJ7Gcv?vLBeft4^IumU?MpassUWlv- z7gL@R*{_0fM#(qOuV?tDI{9;brr*+~Mn~Y@ze`iquo_qpFI!dm--f^lwUY$hW+h*L z?X9U0Z(+Q(JDT9K_zP9_J6k1Xb}?x4x;bC#c0qiNpsZ`xR_tV6p0y7=xpvgL_@wlx zUK~F8sHRj#?xgACR-p*Q*$@Lx-1n#1oUECtVjXfmoZ7Ig>F#bbrjOVf2xdb`!*X`T zh~DFllag!IpXPX8dk0-pM2ANK8+l|yzW9t+*>%%Ge?0z1;J78G#p_h+6wk87{7P+r zJvqEBmU*w9f7S+TSOUcb=U5z^zCw&Xb8GDD2r}+ao}M7MX{y}bFd@=stG;#^0UDOzL)dh_w|havK~mX&PRaO5Efx|APIw_auc z&lfeFztQlv2e7D2rabPl)WogNIhcu7S5Ajw_y={b3}fVS--TI~c?%Xz z#g30JxBHOx>zB1>xDjShD3SPrtxXwm7+S50n6X0(L|rqr--!C5#fz_wbZM^Qw26J$ zeIK7fo5bJfOm=*V8FGEu_6&d1oRyxr0J_OlfN@xMnN5k{2gl7L@4K|z9uZm2nir`q z0vR+e2Hs;=-;11wSDGZwd!8WHN{|PW9)C7b4f!%gdnX(dKO4AC0GeRPs2|!!5-@2z z^V%pFw5(25jOlpNaebAH!$lX*td1m@8kp1Hy>U)8@s2UeSWVBeos|!zq$8xUMdcP8 z53yHaP&{jNHB}%We!h`By2i6J&z>vK}i_OO2r3vk~I2&M3+DE^DmRw>9Y zNbO2s@K(jVMjvXbv4t^+a~6NtAb*1Fth+KJRvE44HoAZW_>zw@c=Uw4EZ9?!t-{J@ z$UZxq<|T&N(3ANCj8>rMQvE+#6pxgNSve5eW%(y zj!YIle#aWh@6!eh0xFb1Diu)fXY6Eea{+f97gYg0H<~KFV|!?2LtaS4Dl~1^!M+jU zXO^BpX!?i>b$slBFUUxxVsUvrc^R6vE~c88SdQ`cbZV?PFFeNcxu$W8`#Ump19h2m z_13C3%D%jme?P054c!B?Hk}t6l2$+%%?chKK2w(0YinD=U9%x*>dx;dto}{q_o0Lyk@QOHa$uo!&v18-sB6xuX-Mm;9g4+7 z;ZO&D?fhHV9_an$#1;B;aHY>l@q}CbT=F{mEM@)lKF2hx;YPaAD;`>)g2Pp7#MFWQ z#q?b}4)~^CQ-kyf?cdH&iL3-vU&*)GJ-J?~oo$g+w{vLX>#KrUTuk>P>E?HLS1SHz zVGE$g)55u@zGN7~`_qGZ|FHKz8YS-x+}iUc4$$@})LGAY_9Yz$OQ)o( zO=}w<2u474M);JPOLL}IcuX!PsM55?0&%FUB`Z^uUZvV2d3iUcZuPSsf4ZThfV4Iv zM!7{97HXOkI#GntXl?-5ZfChz9TF{p>2=6YZg(|V@K6xMD{v@UWI0%nDE+AkH&@2mrudqu`;`b@}&hC0gx%do;5yvW!U>;LRfAS`|$-qL=o^B&O0ST0k)eJMFw#qR2 z4FMn0ZTz*7AHRLcC(OsnVikRZ`*?*$K|I6n_M1Jn?ue6&$7+Pdu*M;il{WL03$#Y$ zts-6f4ojrllM#=Y{p0FZhW-_^wXz$afdP3^3Bq4OnMWdX4A$!lM3tiT)LUfd9gtMP z%gHb0gQ{`8o{4$BDhR(WBujatDM-?tEz73@VP+~^mBbhLkb$HxBAei}{1Z)NuE3%6 zYUGaStUDloIQCGc6M*#yBg19pyhIW3s?VUWaX`o$fqW(=4mTS7-H!Nd1$M)rda%F+ z-?gZ)q-I)~5gKU>$KHjlYKO9i)yu{W%|n#NCAh>3A8k{;L+yz7QUsH`6RDbJKxDAJ zvlT~`6KbO&dMY&(rrGN#v5bQ+K_Mfnb2#^nAnc;A^h;IVQ60 z_=w2^bx%8W3R@V!v4&N;Sr(fGvhI)dgcn#1d1a&9^)6CRYqe!FuDQS+lDO7(RwNAajW-+$qO-(wXuz?C(QIq#U=pd$xu zDT21_OAgOPo0plP!|KJz+Y+f)!1{=3^Q{A5Vi&`jWoKi}U0vve%cHYU(}>(K{^diM z?Ohpyf9##iP*uKS5oG5fu6cn*fCmb_t44rVOlkdc!Nj6wiw>g&@^2BFb|RWHV&zx@ z*I}rkP^}iN0KWXRsFzead^q%bJo_)m;J>0)|Ju8`S-a%)4~Nx~{>?r}bvW;t`udpa@Fxyby>r_|EYs1sjOeoJMIMK)#^m=KvO>G1R{`a<0FD!i+Yy)irY8R z3^zG1t_2ldV#~xijeaO@{I^+BRhwtj!5>f)3nVW#vK8j7EiEeV8sczVX*EKz5pUq) z>vj_ZBPF>F^FZ{9U%eGoHx@;oMn9xD0T%IgOotGceTkCNlNJ0H$6m8rT1)7}k>t^?PCDecg1Mzv^~phjRt% zW;{2l)&BdRssGCb$IeWhj#aVU#fEH6<@JooEAm*+<7NNwsUDuMeY5tPnb>T75gCMU zGbiE7W9odx>9R&7k_)#F%_re!_vv(+?0tcm;#cqp;&5qybq{7kBFxI}`yxY>bXgYu zV9`zB`d43$qvE2r(P148rI;@q87C1QB7VM`yX4L3hsr&JH5flwpIcU|Xq;8|5;S%h zHKST|+fUmr7ZbQFSK;->K_qIB?N#Uxcp)696fkEiawu+uZs}4WqxU+qR9je8oIn(%I21S zgUg6A?0Xl56_Nt8fQJcW3CSF-+`&dsIYrSCoQw=_)(Zo>4Zh(iNrLp|IE@ZxnC3uo z-P6W*gwTuctQiA!7l0+*$&$waZ}aM8K48_mKy4#PgU0 zod8HY#<&kb>*epIPn5KTtIZ2{N!4mhsqLx-yYowpU1kPngzhPf^qN4D982@nVvRK$ zYm%XpTjtv*C{wqnBh8J%As?aK5vLL5)KoR^{Jk$A)7+Jjmv1piIZ}X{cLro!5N6Q3 zVB9o~T~|27voF?>NRs4L2@i%$CoI)Z&UE06qm(~irCh;1%F{b`2+H2DNH@HbODlpk zz2reebB@1+BCi};0#`XEEA~J|hYz-%N7B;j)04}K#g*T*XwWpTq&fkzQl9@?w)}Fz zK&4gbWpOyEEC85}LHUtZj8d#~Rox+viLxe(eW&xo1ck8jS(4z{9|kn+;{Iu0sWKBA z03Yxo6uHWy!q2M0o#6c_)+GRC9S=W*1;p^4R2LIW*ARk1JkVW|$musX`X!z7#H&Dm z>U@#+sq>34$)%@+B(OPGKP`W~g(t^A<|TCCv|g5V!_}@<^CV7$^)2}9bq8dKKY(5U z=+%<<&xnqJ3@Kz0S~5?>mmrM!WovtTs_wscE070ja{{3Kxw#bSSji@{?fvm7xP~ z_&#^Wv2TXZqBqXxc3Ya+P=WGr08E|G*;P0DcH@s<_-U$PPqT6Bag}lOnB%Hg^{W@- z9}{vZUN!YU7o(Hee5A#LwI>lfB|oOyhjvahm@m-rI?T#o^|vSspfj5$buLx+TTwlw zdYMfqrSWHTH<>2a?uB3uf`O~gb0fx?+2F2{ghXIcTC!Be zduQp4wG9q1i;{q_+fCRQT!UE~rbQ*0(J84}8BZc8P5(|ToEdKG7-l^fj4foPrrGY) z+ImOochJG-PcGfi=x!xNv&JeE}Oem9j{i06+5RhkLVKOCeld?r3FF6zsCDZqPk~OqenzAHMtEG6F_p% z8V;M11|#jRhX6Dh5dwNJn)SgMTXex=(zTj$&xOvOyY1Ng%mh@^;GMHtw=YbZ8^*TP zk-)aykz)Im%}~<+eXZd6z|AlH-tC8wIxJT9%txcw6K10MpB1TGE(o2p^!Hnn+MM6B zlOM^Zg&kwe4PIpL*|Y1m-1lY6x5HbB+*UXPe=Z-{IPwp~8rt{7J=hhc&e;LHPCHyE zbR=Sd+P-KzF<@`ds(d2P>{RLKkH*RM3csm6aFNjt6!xT8?9>yV4E*9Zan)YxyS;4Z#6YH2PUKZ66Y;EZ-RmzKKb1lK3Dm@*zx!dXqsUTX_f4Eu*W(hf<_ri zjgA#$)m21a84%#WH}8kftniXbu~3}4s&qw}2oiwh0sD|7$p)m=di!J8f_kQ zHr7@W4;trs(&yY~3_HZUlSo+I1XVfL>t!ME{SkuX1=wrWqmCsNMjtoA3O4Q_6sl74 zs{hHu<@bEx&~ERW)1TTF0vZLA*RB)B5_YeUiWV?{-{OTkv~YnN{A`mf{f!f)wWH{3 zaT<6yHIQaRdA>lTZP9uL?eT949$?cLRXeCGa1Hk2JJdiPEO{3``!ucV!RKB5^I5`x3D zI^YoPYS;z*G{@1OoBOHF>Ch=*^m-9^Bi{a7VynHaZs}8;0PJMb8~DY<%wHvLKAhYZ z)>_U2FY+98_f-C}0X$XOR{nLZ8#n9?i9_O7Mc9^;k9E@Sm zMOm%uK3ue{`_%wfoX#f?ywlMak@0Wz66g<;gt@i+Q_^O7L}YGrlebP`v3 z&He|{Besk?d^+vHz+JRbGZaCQSzyTB&pY@h3rC`AB1t92kf+rZ%_x1{ZLd|DKwXf{ zhWoxHcB!4Shc`VQqHR7d&?B-8U}c@SK@abv@f2BwZl*HJJjMMN-a{=%UuLaiRytdS z7Wh?GX&oZ0EfkD}hWTyv1a+q(AnLbYklSo1)rwoRwJMt{!t*==8~;uaf<6_YJ(^Kt zi(F0-1O-76Q)CS!bj)c5O_E&Qt!~sa+Vt?de+KuGBPRd)q>+&hTWv?wO1gFf+g(-d z&Ypb^XjgL4hquF$jrv|M%Dt*}((6ugR3miO_%&Ay8qK_RFs@zn{0h-SFJFZZHN{Yp zRc(SvyN*-^VBDlCU^mu@1#!-5m}93}snNvT~gqG%TTC*XO>U#+ zZm_!hf8a6}gdNiP#m%-8YNcuPBtBW}&)ttljeXwl>x`k8pU*q(>GiHw`oFrmQG0(X zXHRzCo`iD_8-S*c9QUc3e-A#84p!yYF_YK!!0s&jVPpadSX9q^cTQJA7;HjU`WEoh zo)l3-oDT3-7fi4ysf>Otyj$0c$#=nrU&b+eb>p1fS4nr=*X8$xniAglO`vbduWxLl zdG?%+_^%rRUxrWq@A+GLC?W2& z0{wjLi~Z`x&_z|dY92~zNhy~~=>0Ltp;Ty zl^WT#4q<0MAOQgjweWWDS4pm_>|kb#xZ}k|Hp1{cA#i7vgKYU&1p$?}Y=EdXQupAio`JJI6r$2DfG){5mv7!ugNLl9j{ev}DK4 z<7{Vs_JAk+B=o9P=_9;=fYmNawAZZJ-e{!rP;F6EOW5*$KB;-(IjY1rHf7)rz0~wT3!Jm4mzKFNROQJe|A`i zTj!cXk6fJ#{m_4dq&^jZkpaIe@RK}W@W10fCKN4hFVpxyB<3H}mve7btR z7B4%LfPjKn?>iP@Rnuy&L4-R>fq5i? z!6~;ki8J!eqK3@xQweCM9*-d*NTjkIyithX#eM^E(5SIhm|$EMCHIfCihJkSAGY?B z076jqS0}kEkpbI1*=4!@&|D}U)^G}-xijegx@BGXd6PT6m@wJ8YeQ$p_Ch{T-h6p< z85KVvc_zU2LOXfN7KH1R<^SwsK)pt;`$#Xpgs1;>a^JQr zwrXbs4_0}y*lC%W%0E&w8pZsF{{@UdbH5hHpVl^0o9p8^P=Y=|97)kFjnO6C+zg0q zKd_3;7T^+Y&+C1d8bY2Q}EPzbXaQ+4&~@`a+;OlCQgD ze58^0+FHMdr(mbo@T;RK+F#HR^NAk3re8)p%>R+gY-| z016#;kJ7h>-%Zz>o2heSgK@r*-fulh-SU3w-KT#(M3(lIQ)r;?Y;J4?J;srnV9DuN z>zG%J)AE=bg6H{zQ0A#Tj(VLDaK0lcKI6DVp?8>O2ueqyyqytos9Vm}_7%Yg4W+(LO@Snnvt zd4PG)l3F?*AF?zPuB!1anzxk&7j?)~T>Q5fYtP%*nm(OAWx#8(Fb4q;SlX?Q>%!f? z_jBp;Z@irLe)s)!@BjEt+Wn`vJF@a)2>;?^%l#z>mK^xl9PkUkZLGR}b?YXh-T7eV z(8u?~^cq1>Ut8Tyf3Uw)DnB-}ml-TMFy}y(zQxoLbyh%u6|>m;Iv|%*wRUQ<|5{v& zNyUe7&FfAIUQai9`c_P+ojeelJG~El_Ex%|{y7eldz|S2fB7x)^PUVMeDf=p(y#qT zU&9><7l2|`wLtjB<#zhzzxkE)&YwP90$vMzdB9}SFX}ZWt4>%0B;tCufnro=(p*M~ z(jLo6?^sQm$!#cs$WctSLzCcMCYCB(Oe>v+@|RmDSa9*&L^l`|XydhBH>L@$9DU<) zR8G?>F;fQYKRGx__aD4Zc?65bU(x*h3cR=b1ce?+9B+oQqk^I$uPcZEl0m%Dx&S7^ z9*w!X_9gt2)}VrkHlUSm4$>X&Gtd<31zVK>6#_w&ij#V_Dm}&zrG|<#BU^)g+Ps z6N0|$w&l27!D3Ks>XcYZZ4_HM;4_l{QIogY()Onkz;2I)6HbUDXCG*>x?dZ*Y5|zw z!j7_NKWh*xZ(}Vh%F4ban@i39>5wb9)emTk zIzS-(u0KYJWChj+V2SQPWAFx^pnE|f{GJyQB|ZJe@)pimAsH}df_>H`kQ-KuYzu|b z^EIq8uCh|6?WtV*xbdoT-xB!Bn)2c^KKH348qfA~Tv*Y%O=(nCIq52u4Xlw?aQmy^ ziY>rZ$HEVi0hI)sd$Fypw6V-0V79}d>MGY6UKuxZ&7v+mpoOL}(;O|Mf7@}n*|PJQ z%x}*c=j*-q-b-)&fR9uigUp`*QYj03T#!!2hQbP_PMoUiSj%n@7jeqR1cT;(aDSJv zJO^XfD$ViExL(z!W545?^8#lE#4`l^a@?IkXIM^g)_w2MvR3Ox!LOnH6q(ctZ9+u@ zK*7?QERav?2Yn_%uj6$;U>G}8hGhOaRw*+AXcc631g#VSP;EckNDrDWkiZFLMy8OI zBl~%UF+W!1r7a>q{BE;|`A+ao?q}Q?089_D7S>{s;T&+fj>Q}AB)%Bj8Qh%wIk>h=s->HtSrnDq~*Y4_nl2H>Dw`XgI1x6andpTKkgV!2QYUP!ip_2bEB zt_vP2yUqt%xH2~3t>}|huby_e@LN$d2RT~4A}%;T#xnI7>#oBX@11SHZ|~*0AGRJ1 z4^xlu2R-HteQ1;yMUvKio;&KXFoOk|CvA;Da+fmBA_=@7Gr4&lv1o0(C`;z)@v}gf z?H~Mlrp^Toc_vUCeMZ};EEA~KOQ-^OzJ}G;?ey+%eJj2DzkV}yzQ4~`7dzK7Ejh5{ zz$e84zt-Pqu($+f)lUOnl)%8zCN{)Nt&Jt%^+_>&na`2~a6qu@>*>pEwURNYH^4#K z2D17U-}g)-1tr}3(aHO~`#jBvq|``nrk4T2Zl|xY{bqVQ{j2od^gUp(nADu#{JGn! z;SP7@B?1E9T%%sa^y+Cco!oYq_&)yO;nNqs;72t+PQ-){KB$bIeZ$E5#eCAIgAp9$ zdbp8bJy2@#^_ER5d@ z5y();CTN|;V2W5E&|E=5@m{N{WkD#P=fk|AjNaHaZQ)XA&TK*>v|uaH6au3Yhg5kKgOm}p$%GiPHQ}Y zNBM^q9`TRN$yFyY{fb2eLIL95v&_u!xs-vdbrjym}1i3GWa0iD=4Sp?uY-Cp;mw!e88aor-Xl@RzrXb7Uhc- zMq31gZvoU*YQwaBnSO4&VJ-3akX84NJLwYcRL0VQL9|=+sOKxf`2fqISDaZ+nquoY ze$kIkNRQg@ujrpp2))Km@>#ECGV9AQOlf7ATBUv}&Eox=Nnn zKD8Zm*_+p~gu9zbKAUX6GS+2%z|Xh-{Lf=NnsJYSrr?v~fh*-12wmM=x&?Klp$AAZ zp7jAEuudarKKKA`XJY`ndVou(qfVLte)>V80r2H2hOSsL07SB_!s>>yV{8Y+@ZIY8 z>4GH;u(6d+sS!Y`)Q~wqzm<9h9kfKVjkVG>IR2wY4`V!csMZA#*n=`lSnxKHv7Kn^ zK2~SeA>KvQE7$;irbSCf7btPOc1$sTq<09o>4w}VrkX+KS*0Pp_vde?F)K>;0q{D2 z;hVVezW&;mpoiFzR6`3l$7SmnfSrAY{g6@ebgSGMpkJO8=L=cPR$q)(mPc(pZ+pxd zEP8NhyI_>oV@4m+b<3GW@wg(NY>gfj|#{h`V?W|vwWe@ngMzf6On;!ME{$f`UW(^e4lxA36ekS=SX)nU;t?=qq4)H#QAIenG-bLFgf6@QBw%F!pM6zRR|CEwbAZ28dns?XJ>;J_s zr`6ZC()a)S-%b1f@;((0&i|BXTNb%==*CtgdFRS)^b&x0&TG?#VhaHX~?0ep?W zHA1;m(J%;e#PU)hvXHPYz)rylE3#m)Yh@#95Jb!?!Nzn(PCm?k&5I}eLOkT_vJ-O4 zQi()ejABLrIDM=nOc&-6SLT1nFXzZHJUY8S_;Gf$_yn19zGwuui}!LZ+=m1;#d&sTyjaLLE^(~o)hZStf@l-s64e1bV`@Us=#%atGJ|4{ zA|}u{Wkt~82&ZjaUH10}#6;R4>nXvwdueTbGp+6z6g&%x@tGb!ev$_LAr?FJa1C2s zU6aY7{0e$CiH{`UG1R3>z=Lor8(V9fH*j+ya786vyv=uJMcV-RyOeu!e8eIUeB2-( zWe_4$vyE#n_2>b9Xo1s32J3)*(rgSntzHD;EE-ifgCKOrDoV8H1j)?b)k?v@2XN}`_jzaNL6xwMb>SI|&*z-q-9hZ)fDTv!*elA<)} zifPdHb?)o+j>C$`-XTag!%8JAP2rccm#d9*>DoR|2~hK$ z<2}mmAg5a~MhMKd8(2nChV5agS}jUH#0Lq*pMtI+jRR%E`pb_sjZUyO+}zqsJFG%k z13Z58;BmIVBm?VTfC$#9kevYS=ud6r`Wk?VA4?7aK{~iGdY5HNm-pwbd2u*T=Ii+u z&nrjrL^tOEUfCzZ2%7&8piSAs0XelF5&MfWUwP$~$VJOZEyc{ok0xecd}LWH$NpB( zP@FHexCO|Wte7erkQjYBpB%rzEb6`Ai~CA)jDAAqfvZ}L{z(Y;rTUq6mTW0PIx zuWd~~`O!eZ; zLgnho!v|@7`!wA~kvpv6<}gBmVTNw8p(vq<7@Yau`%ISEck(a2ex0uXl!StAc}!|o zh&g25eyw+MJp{OrcGXEvN3wkL^-FP8H%_19=LA{BAgV**@%0y?%`X%VCipSojwv+L zX6kg7Y0mn&zoBTDFPZusU!~M?1OnYxIlCwob&weEGkLGdZ(f~^00OwxjiJf-W-6XC z%$O}%d8Qoy@r-2)D)?pH=j+eCx5;~c?qX^dC?r3rkvEnV<#JqDJQR1Dk7awGF?TFi z9;(5JGh8WG+38I;0O1u$6qLi-{dSs;lYkx8Dv~vrW(9d{K(PfbO;esQz>f+}vW3D5T=a?qgyf@hJ-M z%yCq3)cz)adH|slXi294vQ6H1#6xp2f;^rIMAQILvF2MtUSGNmJ+9VBEHEsHjH4(W z_98hV?efO`6_8}<6FR=Vv}ez0S*j@t$SN`%^vuz5T-< zh6ST})&Rk7-n_-Q+XT4pr+vVB7cMaNAVR>HqeW?(vd%x&MxUW5Ev9qZLLM}EUJJfc z78)w}B8>scQkHF6!X4TVVu(r5AFl4?<-1of(-iGV*{=R-8!C&%dAEyg9T)gxImJ8c zS$X(e0A86;n7~W#@;C0LulzTEC4J}b|4usj?ru?s<#x$|B?mqR2YP%t`0nv;dXG2) zZ){yrO&391%>VfhS5PWrd3VkP*yh%9pGjE- zc;@6Yhd!kepdu1Mu91yWfTQPi@Sa?}Wux2z$k~o6LMnTLYaR=R*rs3{SnUx*3T4~t zYUTo^wv?%0Pqds`d&pBT3NAB1Lt;+v=HmVKk7#N zyRzxl&RQ(2aOE;_leSRgtdQy!q$(n=CT9DbrG?$IbL@<88#9==+mIENROGA)pt3>i zum*Lg0Nh=@c8%Onrr{wi!5%VC5@4E3KKn>J<=V`3Ryy0LZKbtao9Vq>mOmL9|(pyX&MfF3Gc<)@rz zU1t#K0d0LsTMJ;Ag#fZ(O1NB+pByq)pvt*43pfsj+0sYFvky3Q3NYh}u`3cN=q4TU zCvSvlOqmDr-8SN;OdeOFVsTG#F1Je_6C5MFr^r^z*8p@E$2s?l{Meemz*dcMZaSpC zDCc)?-9^#fNP8?`Fa%VUttBjcN?3_b01{WQd}*%{3l5O#&g~nK#}hyS!K)Y%1IX2? z5R0ix+i4w8ELH}Eck=A;XcwSJH&Z|*Rv4Ae)m4Eo+`s_42Jm?$fPVTEAjA45E@zjT zX@8IL0p4w6i6y*IZsZ!fvf=gymRV)o?&?^t0?2i+5{vP2-lkQog|6dLd-v{5#z{U* z=#GXZp1d02&b$9u3mn|j0mJHyO#%%9c25H+0u9RQIlq+$S7UT_R^4#Ntlm4&pVfZN zyC+%{IXWA!ERZA^VpGJpJwLWxiuUxjxJRDF1B@y0;e)jRjnFBc0$LJSJb^HOGyJ zDJ8uj%SEP84G$gt&XOJMnKWlXoV9s8F0D|P^=ca3s-^C|gEW17z`{h^5C^a4eI&CWA|v~>kJISx4x@B9w1eEJ2~B2kXD z34U>(Wf#Aq?Yz%@&o6F^^KOgeJw^H)0bY_s1=ecm#^3x>8uG&Xv%mjO(kYYSWmranU&KhH(k)_LO0M4bsc z^G<#$CCsGav#ARuaHPV(L^&*oin*T;KAU()z+#eCAi84YNhM;8GQmCyUGcbk?yHQb z9NlQ6zypB&U=KwZ#jV=Hk^lh1vW-hcQ={xLktI{$4U`KlrF<1R0cQ(+@k3D<0HBz! zVJ(UYt<;WVvJZ_YdQ%io%wrkO8qW^ON>XEVBTQ2FCU| z>0oitOz5S!^i2S@9;IEZMejXk#adPoTLCnxq7{z-jSN)3zjqjIS4CMClxa|}OWQjD z4lOJldbk;Costzw!JLu+N2!Ud;w~9dkhvHml+~Q48Ff9Kby(@K8!|rv$QTi4%wX=2qaNJf!RiR+!izAvfz-coF-KCLpdlZjSV5 zVs@b#!v_>e>R94fB~n|&8K7IvRsL%YH^rg|Fb-Ga2(XQ8g)te$L_c6Fn4z1TuEqv? zcfSE>{Nxbu@aQCDqDh}}@rV(zt^^si(A1jBvZ@5SDn&qa6k=q@)d^=D4~Ttru>T;f zHHr7ccp%sV3wRbwD}_$EW`B%=J*K^Evj*|ko?7*eT13KClb#;W8;>ij$l#NEQ?WBYxD% zgm*dhEz^Eu1{cIL^^hO?!p_c4x^d$MK+^#CMB@=waFxv~71n8g$Koi~mVqygx+@d{ zAPWYt0EjCVHi@e%h@wkZ575nFpt;Fvn;QVI0K8f?wy}O4jw&n;@-Q7?JvH1Nrrig7 z>B`kB#E83`s%!L7Vgm~H>!~rt8c8s_N}es+=#?v%(pPWaNssqV(g}+~^jJY}Fl@5e z1RoZlUCW)@H*bb)Din^$#R`GGZw>fw-Ru{}922|^UBY-y$0t}w5oDjbPWTBl+QyC_ z#aI;E7}MZ`A59#5NX8RX-bcQxu%-eL30@>uK;*m@uJ)G|uSZNa&H>sP<9mYZbB>=z zpT!MdI-ebzqmT>!6m)43)|uiay-R$wDb{Zrj14ScUwH*L&+W^M5lzNr%0I?UpN3I( zcnSI^$~w>53s&0gJh>a98-Mai>f}+-X&%-KS+3%iU4N}f4!rxmg{A2C2^TO0XboXS zp4G*uW2Rf)Nkin6^ZlrQ$&aBj!08J0sPS>kzFuWqHufQ0PbUCj4chr^^-rEDY7iyb zF5Fw^W0vjg#KX3Bl?TDNG2>Q_jzHT|2F-G+S9G?SLu_-ZarR1pEJNq(m=4c2w?diyqN|MPl)ODduhy9#ARA?K(On2 z`bv5!{bg(>jLk7)eX@7b{q&u*&)=V=Qm%0W!z8Fpj3fy;w`eUXBlZmU1)%0>} zGyOX^zns3ZeuXX;)3&E_y2}cV|K;nymM-IJaF4hK4*|X&4!Y@p3C>4Jj4hZ==O6D` zCil#doue$%k^^4=4)|&&9Bu@szmHXwsew9v3%HAKr@YmE{JtBR`~-iTOyG>fDETU% z0tQ%lu{1h+pU2eW!`3~*7ySI+c>RUsmG}z3x`mYlfpsS+waWwt1SN2=go|lpHWi3?j>k5En#J%W( z`MC=w)5XNuJEY6Wawr|HFpcdV%Eqi`OmAXUQxqWsE7K>xS{GS_H*DMM*ea zVVQcX;(extvd+#uLvP&4eS&?K7uE75JWrmhpzOx^`8gE;E!^Bj{4VGy@|W*X1b;kd z>pI<-pq#7#Ua0W7^gL4uzzx>_qrFsKTLgtSV0OromGDWP5rlH3&(+zbbhr9ux>0)# z{GkK@bDH*g_tWm+_fvP}1Z4}bqJp&*%3qZjOD$w*dmGnA;>2}{-PdJB!V&GNbwCZ7 z8VaN!1Fi^IArSrA8WwW`F3gVv(YnOYLR&}vkP+hU98LhUdd8qp0fHOUsS7yK23#<1 z%`xs^b_eCVzH)^8*h2si0O_jimsPYw`tn~f*Z6T(ei_eh2%TkU-&$Ku>&=~rUDGn4 zH-J~Ifm;+ni9zdaSCv|Mq^>)ZZjQ)_|D5N)E^HORjmCif!m5i00DIj>yUhLQYxH^L ztAnN5_uqXt)v@?$?EnI9LocAmq&7=kK4?y9U;jBi(8S~}D1ai#A6?R7l~jnIAPZV@ z3RF~}vxGbi;F%5OApqn6r*6d7f5+QFpfP%Wb!L&*wbDJ<#07Qfky>oFN`sv;=v{qY`~;1Xu^L zSb70ycCmIL=39waa;MCTA3b~+@z?~2Uwio#R#v>pc^Buvs*l)gjvHa|VyRJ_{HF|L z(6OmXUNxrGjRtwnu&RQ;_Ty7x)U6Oezrm`a5BB!b?hkou@1LZNA%X0%lG_7-)ICui zj-a1T0==|_gBoR4C_h{S0iCAs3P>VW2wf-M*E-<(VF^n=@);2XUSM=G!!nGI0ae<5 zCJ2XHvKE^|EWZwLl{7ed0MwL0Rmm-LR|~@u{d+|DCBW*4d8R-EI%=KhcGj($N4x6|UAQJY{z|-Mg|!y_CO|Rd zR_f$&0C*M-Pqh+rtZ;nr9b#K4s)EoZJ`y-C`Mh9W#P9MQEvV?YPCc_lA-B(Ez-(WX z7Vild`T7#LWZ`1arXxpe&9y>#@gN9puFrkKaftd{8u$pO1N2EM>u zzZz?t<*9f|8$e)MRUDw!A?BY8-Z+8UWTM|-(&dYtkCbQWlv$*cwTrMThqF!WORp1G zVuLT#x7zG8u(X+!d~1J{zI{Z{QU?1T4-1Bi#G4-$|4${4nSPK*BmLawwe;`b{A&7* z%P*xntu>V^gp+>0en54EE)N*lcQx-|pW zV;#d}zNDf{3^rY%A|4C0`0n-x3rSj%PoMQ|U2;$v}SWw29Vuqc1)gBnw94I1R?^{eB#ku+UXRWvv?-gJ> zOF<(?Z2d`F1pkM3Dp)m?C-1Is95GG+q%5a`!mIM-aUmvRuV^GDt+@!Ti z)EC-wooy$->;rNj9Jfm^rLR`Lj+^zJkbzSyP}VBj>0tc*bj0%ZjRsa@1cjafm54c}SP;l3Ekop+irbL7OqCy$EMpuYo$ABLo7P4GQpWg+>V#bA z=H)^Pbu4J6tTGD3!*gw1%8Zq!bs)+h_m@z3o5Tk4VQCZSQnl9dP5xNr*$7~&zmaaN zZ>K98Te#Y4c}Off?<^C`YYNDa=nq;xaLwmd*dLJxR0UB1!~q_(utsYVmA-OB+wNik z_hdKi9}dz8M7c)c6qn0%mDOWi4R_P&qvNzi5dZaC+o^JWJ*);#0A?l&7Mh{7Qm~Md zt8w-)Q|jWjRmL)>0r09koN|4I_H98&o$*%bwJtUIVS7ZCbG1XJLY{&zc3puFl~mm| z@xNd$sViy^3qAvz@8Et390#wdv8x*LT%EDVzB$1<@PrlYx*%UGYgJZy?1Yt6@Y{AT z61#!~6XFg{u~5SI6^ptJ=pqw;@{;?*%9C~r%OuKB!4C^aP?8cs%B+WNkV=L^XN-tP ztt~s`UqXf=m^M5q0qVM-fPhpy#-AbMhGUCvqAmzf0KK}Ys$dIBUX%n+4sjtUK`jiu2FOmp!9ML<5< z7hpT>&WeIVYC&G|fU{hO$68Oqt0~q>j+Tx)D{mb;8kwp)G^%(G&%Sg!lh?$6}0bn1%DKtwx}MF|TztPr>F?yw@{<@hi# zVO%LQR@9JzKJp1!IgjWJLZr|vo1fx96!C&T@~o&Y&%x6Qd5K->hTp~wcEosh`g6zW z@b_4xfOuZI3y&di3R)yzOpeKmN%{V9Q&`|Zw|NRk*(_7$4lZz^tm^Vu#E-P!X>BJ1 zEJ=%H!7|5{De@vUx?0PLBH0fdn|;4~KE}AXo@tk7%3_qosr;n!oB87LVjC9ue%Q8< zH=lF=j9>G8+uYIqbDCyp?bYq{^Z&!&N=JYEFunT^{vM={Q zzRTyVCu|1`1DDSVa{aYywe+{|G}AUp!a9=!t-KsEcFE(rN5s70x%YX#&H3xCGL~4g zbTt1uUX*b%0_-Zil-}I9n||}k-Spb}Wk)1OYqs8XQDQLTa+4SS&E^^|DjVrG+hZn> zzy1EV8Ky6O(7ErMZ>IEj|87dJzUpL^dQxZif9tnW`rh|0Dr4Z zpx1gze~dxo|MJb0ewf3ZeAeXrOqPdT0!oZuR{7G)*GgAQe3id1>N}qW6E?6f)4lOK z>7DctJJ`=ls)D+1UTvpWfAMB|^Ed7?x?DWI1ieghZ+z)e`rH52ucrTvFYbT-2k$dk z{*e1WFO~ch>2RDCSa7B&$mLhsu$o|u_e;3*&oiDnA@)9IlMQ0EY!R^7z~9HjU=b`( z2?<3FmMW{EE&))5OI=tCpfE6|s{omgZ{5feh0kjL@_gYbg?=jzm9h9p9%2iQj$q6;@9xVf$NUH#z`jG{|ERYADp_ouyElgL5XnpTT z8Fw|Th#>@13aN0io{8i^vJ49WwEb+YqJ@_=Qxg7SmbGWIo=Ru14!x~zULHoHDj%)^f;d|Z{rO`plP2&iPHsX zgqxioA}`IZrzX})r{#OmRx1EuK%yv6YbbZP&|!hff_l)Ql8Zu&5^9V$El-9h_hBu@ z`A~jTTqsX>x;j>7Dp@`gIu&J}$uaY)97H|4y4rT}yp^=_Y@3+Qw@@-?lQm=;;Gy3j z9vy%RZ6|;_;+YXbd0NK(5uj4bvnl|A%Ar|kt)fN4xXa3fd((9F=IwNSosS#TUrQd- zth5DxltY1K;sklld;#FP^d+qvv-=+WVsVFU+JCy&P5bu`Qhygq9P+j!*94w<)`bqV zsI1~TTn03&!k;Sr{*=}69w-eYLvHIWa z?v*rN-NGWwJTn*u0-k2Ls8sxYo@@U@xzd0FT2G<0qjT>bU@+#wc6L;Bmt(~8Nre~A!e)=besiajN0MG_9 z;Kz_DbyAk)?ErVf!$W|;whA{gDiI{DTC2>>T^Y}RjxPi1tIQ}<6WXYU9IvsMfvbA9 z8%^f!SYocPr47bQTRp}A%1|D(LX!tBIAC%t^Eh0F*(;L6=_4M-BzLtjnKYMQ)B-xeS_uZ_l%FNpL>U}{2XbgbC z0JCsL7z~FP&TzDF5haExTcS++gGGmE2R~T+pihz+_7^)G;jrbfrJziP9bttW!B8+9 zLCG4Dwmicbk~5qIvj77O&}j7D)m?j4?v?)i&dXPoRbACxy#NhhaJye+=6m3mepp^oB8z39c zvOck0Z|N1=;wd}Ojk=Mr$U3e?;>qHkDfkbywIn1Syl$e+a~$OxT6g- zt+%Y*4_JhV7WqR1njZeg`+)>87txR6)JyA1%$$b41NpedRDG0~~%ljXpjDljuut{fqB$jByTlJjAknko7xC0hJLsMAoSj z#+Wlhm=?bWxjzIPw%Ro%dPfV^&CDIa!)Rl@c%+0e)c4Nz@w?Me4ti!%`=H7g;Gi_E<_^ z_(Do(f8z(@;JJSg^X@26Vh3-6)2!T0*Q{ZlFZ%5yxR<2-+Y zH9lSAzx%2Sj9rmG_<7G1P}Dt^X3(m79I)$n`0!#R-*bBR^P&q$7x#~*qv@y9N7L`8 zuOezb{0$?W&_D6%6X}It{&4#6i;tw4M{uxw8yZ;;{dj%k#WR>J)zgiCxSVdj_2bP{ z@VWz^&4fIMZ&AbuvjhjT<}`>V(REn<@tIu$SPI~32vkNZfKr%x0a78u!}DF-XF@>C zEAhugZ?+g!qriFB+b+~&=az+PF1*I#S92ETfKESxBU&Uwvwi6z8Y(x|*~vw_!xJF2 zd@0~aOTREW6mP2aLB1UXAs4tVR4u2bHTngz?B*GZVan*Ia2F|sz~t#y3eFKHi^x;{ zAq+?#>*j*D09a5&I0=mq(mQBy5gm0jZ_?Q-N(Ey!~xg&Xy&=dLNN@Z^(a1cg>U5aUvdON-}*PazE(B*C=n# z`<6vvWE0^d1Il!;k7I1XDJOdRL7jF}gDN!aB4gemMeE{m9b<(tw2FpO74to$AN)@N z*3i!5?di1AzKYot+E37@!2M7@03hh1CH~oMdL*x2G z(pU~!T1?&(I8fs)ep5z;Yuh3dvLaFdRf4KT%p=j=005p%&FV;MuCYn6ib)_ot5&I_ z>!?ixwKnC>01f~GPLNl#%=CjU(63&DCW|;EM?<7iTcJ;?t;YBQ9MeBjU!VSF0h)>e zeUQMm1|u0GU7KJNT6j9%AOOf%^?Evg{%TrB!>2B22FZj|1RNAFTdwWs!yV|TpsgPs z$Dz_tpQl$|!jb3C+_q1yGZ&jY--jexf#bE^X|Su3w9cw^5wk zuijis)ul_R<>`L_6D>5OJekphBBWaj2jS6Icqzs+V3+;9%KAg_q=9Lve)AdtN-Hbt zp}jPUA2_we`be*6J)pktmJo;!9U4tjFc^LFX^OvBkSCCP5^(LHom3j+eaC*7If^(? z7TW``^aB33+V#t^!7P|%TUIb(9iJQxP0!pPmD*+YFXOCYws`6Cg;bma2!ubD@a@*; znipEzuNqA)rJ}Zw9CK{`YXPu)Od3Y#Q$JGe1|URj-_%78o&=H z@&DDrWObGI8=OF@32%vh*^A?XPC?~RPtkq`B5kM^bEk4&WTpWyUI zG|6>zWj<;-1)!v=G$G#UP2`sE7QQ#!p-@C^Lu+|p6YacB%vsT7NQ0-xQu!my&lgrw z`-)n9YU*YA%d5943mwDH8FO9S?#((U2t~+2ws_C>3I=8}RPe&D*I#e7}eD~51kxM zkN(;xQ|&so%zyG0PVHG)$>BZ-KNb{lDjWvX65yiYd+Ln&AAWL9;j!km^k?iBZkrVK zM0#7lx>kE+v_Jjw>7n#DPvh{A$w;%sy+R4jehPmJXNbQwR}e9VG}RL53KjlcFPq$6 z_jg5$bhLyoun@?AuD#2#`@LI#qd5mxdu(_Hv&ci~%kyvD62sW4AK`8w*6u6si;pq5)MwI%(=Vl;qCP?d&R`EOm^-d<(`Vt2*D?J5D!N`Q(;r%a z`cXAXPzU1x06+jqL_t(M`^jVJSO1&8!p`-fnA>j$cI5??j{PI}CHtu}>D9mfMEcS{ z{R?)tvxR6L=f~@l0gS+_Nq~!U*NN`T>gR?PC8X#w92S)dbzz#JX%36rWlp;q!klJc z1k)CUB)}ygDFhk8gG~5Q)KC*ipEfLWgKw17kYF+F=}RtPb&gDCM zQ@aKLDkl2J(h^R*tE%S)$y}no&tMATEZoyw|+~4V_3Xy{t5)q zxV`yW7A}FJ(1?p%eTUKReJ9|a;3hiB(_Ls!1uW=h9FxqsQ?5A>fLy#wiRfQCgziIY zZ0K7pirUe^xVAAR(v!8(rWJ80_+(zD2`pKd0wsr)~P@1WD0 zXQ=N|nn#052j@Txm}ym0tumAv(5YA)0ly_OKu|yG2W}shpjG&TqkO~o%aS%V1YfNZ zrK19Sm|sFyO|aB#LYB|u1SM*M+9+z$3>N1o`!vq#OPuV8G5RW|mGqk$U`i99BYZGP zAJSSYaGKr4wE%2`kdaU%7k^xnF28j(okPH*wW1FTc$G1wtqcrCUsO{@fvk+sr50pA zCY60?__XmaCU7;3mecxLJ1wq21Nt1@I_8e(3e>^AqV|~i$LX5%=O*_A!PE}2ULKM+ zzqFiYJCl@yzJwF}vB`3J^zoydJh+l-mllIwZ4?LU(hcYTahfY>kV2~KKz)2BHCE9G zs@+T-G?Cg^6Eqkx~dC$YHzUlPckBR$p$`I|N<( z7qJA<)K03R9jBI*{6XPcP-*}dA7Uh-=|z7i+HvS(9pW`8@Ab>)QN~+}`nKAbR{{oJ zpTCCnDCU2ZSEt+hE$f5Xt#X3pI(@~4YBkZu14L?6(Vpeh#F4SSG=l?l-&w(=b{Su2 z;+OsKyV=HEI$b`ShSrWS8*{>F{Re4vb1C|HSs=3y6IS?KnR<;JK1N3$OMTZbB$Omm zX%g9D4z0YUdFZgo$)ihfdFV~t^>xz1G*k^SngH5UFX&aJ4p-M!(i= zPs^BVnwe(wFY*jN%(}UAiuC?6vdmff2z|;!EIgQ@hQ?nN^V#4B(B#&B*_#g|Rpz!8 zs8Ed8ZJKO7;!6h7P=B;%3svViw3OkC{q#Fe15FyEi@5O@tB z$u^))8Q(U4M?n^xMnkiTzG5TkV~)u;9tv`)Z7YD~*zdII-)L734^8q5fV{gIV>!bN zmcHA-HxEDKGLa&asn4XN>i}IFWcAAWr1f`Op`hlX=D~C$PmK<9YXEys$~C19RH{R8OO2ecl zK6)s9U_ov#18b7=zvv_AjN103&aQoXMW=YZZFV3r7Ur_*yV^k%w{zKb}= z5w)@4-yQeu`F)7proa2xaQfSiXeW=T;V9lG_=K@bS@(~#&#MQzE$eY-v>(CyL4;#W zxIcUNZ2HrimsoH(@ofj@FBUKv=%w7$8zcc*-|1(6HoNZ_1*lx{N3tgcbnXO#UGg( zOwWGtv2+@+OY_gW3D#qHu980Wi%+F*|C=ABE3e&S$Gw|O-=%Rii4zPeurn-JkvS0; zF7bpbchKFXWvFZR#O{NRsMWA=#n;5Cv^)9XoEj6y4HlH7 zrdePOrum19Tum6|DY2;U&UY@N)gaSU!3D5!CJO86agJTTz=HkK0^k7)U!tVJ68WI* z6T8XIhm=z-5``Kyvs?gLK&8L9<^a-`c_R5xDlRHUynfA;5MuR>rBLf4S97=;84Lk7 zh;ghq>2MO6@zu=m{Vb45x7_)i?ayr7Dn#UQase*l66jQGy!)wJVA3Uea{F?He<)0UO}UH65vSRv2=VC%^QG_ zjlP>{5H}2|t7w0cd-=QxCoLw^RgNY0~ah%&}H53)Eat z;Wz3@6*~&M00{!U(p!K~=l3P70#@f+>B^-yI7yKHfiEt^|Il=j?vg+dfK2E|_IvAP z|0EfciX6B_#O$Bnxgr?1E1dCy2L;%YK6d<+0QmZ-Ly`VpLExSm8&4&G$j88%O;R6$soC1drr)z-F+76_{ zvuN4@e$3A^zIork%MHr7LFJBM?m5gUfps*PHZhs=bixi=L>v%^2G|V#w?1i=05JdEy9`7x?fJ=q-}pGWo3n5|!acp0+uFVBZh;HxTq8rA%Ec zE6q-|KO=zo+NN4tSRr+k-Wgb+FBJS_DCOq!Y9v6rsIzt zN>4p~lrr|EH_mUQ*ZvFz4Sd9nj+WBn6VIm8)hE-1A1wkj>ZMjQVu|JuG^Q zK3;Rv9NP8`VAq1_DR%)h8G}P(0K00-0vu6x$K4>>W}X16cB4Re;4GvQz8GoG4*c)( z>*rGw4X*?ceP-xTnyZhd^>fv>8J1&9d$O^o@XyVeebZZI#)7a_je z`cg6b+kFxr@`BJ_C7!%|>ph+tJItU1M9O|=@6yxhQ@&@2{?&ak=fmC?kSo%kv&|7J z%1xO)6*pdQT;uN*e8mlb!fYW;JTsFfI01G5Ft&jozXl+XAXlA3yEL2hR9#0-Z%Ow` z^Va7%vG$AYh6>;^z;aNLy7$ZHuoNmyqR8?5;nZI1PdESYYU*Fi>a{fw^|Xtr_ttsq zQ8Y_7NBPy@JZ?GVY0*#Ou1_(uhgWFM>l^xWlQ)=dPUo$3;~oD<@11$)kFs)RJkZlw zTY})GY>*;~XASg6Al+yhj2x zhk=YeGnbwL^jduBYP$5VUP*lrEyh za6O%4!pJmXZjFEEJB~{?Y`t9S{2pUOmZR|87>hdlaWALzGe!Z!~zZ32c(qCrxgn1)<`ODeq zjlFT+ci-J$IK?H<_Zcpm;745lBUcW1@A=VdxQ|f*LQ;960zt5)$86M~I z;h!-VQ+fhdZaL23`rsICFo0-eyjnnHGrHjKVUH#0);1yxo~t~ zE9A?Z8HjBbl0H{JV|t1OtXdC9V1Qz5i;N5-&|wPJtTNZRo6oaI0?i8qp76^-8X@o_ zVG2C-V;-dQa~ZxG69(JUIa+{0fJ4AdA3A;5IO_O&=rCqW`1aDo@#;+$_-HZcz*=(} zoq)Tb^$HNL``vOILU4*a;%CG&Z+$K4Ps+LqdRTUG2|*1tQq-#11T0ajfZM5UefXE9 z+bwvOqexuSS18e6OCGkgS-e4B_a0~`N*2}Gm5k+410yusnAkUIgBs=00X(=U zPU``f8iF=xi1cMn?89U*1S!iUjRY3$ANnLxfR--M0ANN_R{M$0x+{ef=@{wM3YR%? z5T9R7v{?Y2Km_A7IUPBNHd^G*Jv0gnfH4((?AX=gEgV84)smD4`EmWs(F}A ztZY39CfbgS4}B*MY>uY+%WHsMXd=a187L)?QlfwB(Asfg``XtvyAd!oUp3qmgsrLM zQ!^n#5R`8`u*axXXc0oAMevD#9h_gUL&Mk%0o34t%Cs=RpIngBsb zM>AkU-rTlNG@SM4erQ@U`#L_`Xv;KMpe~)i1fW(CXm4RDxlQ{z(#1z zb<3KMG1FRyM%DTn+CPll865AoF&$i9$Ei75dQ`;ruGLoK&;*YFjFJSSK^%!!aC#%38azK%etWs3@Db+`Uh zH_VRhfWLqN%t&%&{gRc+NoMT= zO>G*-mNe8PS52sa{%osLKDWgKv)69<>+|Tlly?j4Fwt%>Z_T6)(HFY+v)}f48qN6j z6M|z}z+@A82X7`_Kl68c?^)&uW(SKZof$}zm_7_+s-l*%r&`-jsvHzi1t@F)d|Ag} zDA}72?60K{(3bJ8rd7z6G@%@lb2QT?ZgGY`F`m{?>}h?qYewA7-8wlYd&su4TqJJ& zrF2j!rxmdr8`*&$9wm_nUx3%iv$z#=F7KFz^Q)gRC-!70FN{38_bih^8v8xJ{2g;7 zb3Fivszs=r?WpmYbzci0LZNFPqgJsz(c-#Et7m8Z%p^yy} z5zy&R9Zw^VOmoC*IlcbBd>wmN?AP;&=RH)T4~lwc3NTC%B`5iJn8?|n+WnS2gDDfN z;UoCXIgwsXUq!I{4q~V1c^0ycpa0tXlQFnurg!vcN;5MV=w)mf(wlFh5%VfeeN#jwn@-JNjv ziMsb~NsbnJ`fFS-aOL&P`LNFcpTas(j9%=@AWDR3Xsj^At6XVZ=cV=(Up~ZTLiyA* zON)%9@k$*N#ZzhA#igri?v~jBJ%&j_87+c36Q}`qN$KaGNa?@$G#YnephoS6BW(En z9n4Pu{8CEapU>~!{?n>hs*iDfhD-f3OJ)^vh}Gf?f~nMYf$OeA2v3aj{W{fMC-~01 zz@9Zi%Ov#3+v=Uk_aM2*8`zhIKhAN^?UQT@%)jI1R45xokUI3JG}@bw-?=WBTC-Dr zsK0pFxX2F{fiWI06lr#gu z8bmYE;ZX!U1SU6YWKj+~1)WTG0<_@WU||4M@Han|)0ZE|0yzX27q;ZfJ+rcW1^9WD zN!fV0V5YD;h$fN_nOz8LYNWJm`i7`S{PA`VGmNJ@|HPuY`;Pf`UyDdSGk7n`8H;!3 z4JNl(fD^wrs3o8olv)#lU2TNSEDcP_sDu7f^fTsLnkvdwP1k%SZk}5H8M#{quU&bW zZ&o_LWo6v{t#%4&BXFnzqWOrtVR!J3G!?9ga!@|~C)H}r=yL) z8fKq@x>^5ot`=I;VuhYbJ@skC+7WFbTslQlX3d&`r5!ZndV}P(-#Aun8+`yNA8NXk z36+fVrS;T*9btV^|9MNPeRBabJe-ni4C#swt%QlW_#NaK(0BRx5BmD2|c&|q@X^`-zlbs>GIV<&`;auEtButA&X@5gnFU}OmWw3l#~ zG|>5V2)1ZIsYy6Eq*fx%+i}7#Fr(9L`_ZsI_0ZNrnJUeUPSUpcK}A!mUR$7CYWaBh z3^D~Y9>FZNR2~4Z#%vLKTaF{gjz`~`!s&a(W3P+nI0U4b4jq|fJXh0AT9io-S(Da(r$^Fgi>7_saO4Q%JA;&X{O=^dX99$aXLYVF0r-AE4_7|*_&11P=mxA?4Oln&hyrTJ|9_MT}7)cBQnrjx&CTKRA9#XMl+ zT;V8v`G~&P0E=nc8vf^6^yLlAQ1y+bhO>ubz?vU;$dZ(9^f&TyJPrc1m+|eq$?y6g z^A=$6*yAH<<(rv#vEZITKH@l}i{3U!lipzOkfUU6lRe2&;*TzC=ic|zR2RCSE|cj`QY}jdHH&}TwP7`*yq2+BGCHCL$(4;eVh*R*e5@kmabh(Z~Vny1k>Moey1<4 zXOD1wf$MoL*T5!kLGo3B2tIr$rT^?x^ixr)-t`D&LUu^c zQs%$)bV~o_|EGYE<#M~fRzZ;KKj!j#o@8D{Zbj8yHrfU*i{Y(e; zv!^EziqIZ;h9ZnZBi`%qqs_fSALfq;<(|-1KmtM+ zBqDVXwwst!jO!0+VjN8kc63#e6rH>w;NDJeYbZNeZ*%g}+XuVjb%yF%NoU;xiNVtXyteMjQ-3Ig5=f^pvs9)Tn9Eu4?=}=d(xs|{V213yh>==tXJ0p7yxhR_7}5Nu)x<)=Xg5YIg|Pu0A30)z6Tf~ zJ=C7s#7B0;hF(nh*BfYe2`2M>GXN(SK>kOdCJoy{?LxH}Oiz3dCGA)hGZ6AZ)g3Vij`=%E?Bifc6p|@a9 z83A$_GhS&tJ`9~P86GIEQ)V}#s55u3sHY$?fB>dWPt0FD4G z0VGvX@2_gt-~m2Ev}==oqJw!LP0ET+UbK;B^v=&?q%{Tzbc8%l&K-#p9~Z7(OY79R z0ASZo`THoxFeZf`JNqOWR}<;=%NNpXZ=R!$#dH||S{_2NjL$U3{UmeRB0y9hHn@cb zn-NXQK(&WCvGUl&WV$YWt{&$!L_kM;Yi(lc*?|`i;!EpL-=k@xaUCtyYxq?hN@qU) ze0t=QKSkfxZ;}q$Ar_^Ui;m3Aq{lw~@$|!2U*@m`j4z=vFkAqH`u$S#&;V(nu|Og3 zur4`WKAa90rjjnNs?e&&=7{5eK=0w0pw=kg0=~gkbxZ*WDVShCwoBx}dgkpb5LAIy zgN#r8X*tJuPtB~HpXQ(W`x!Wr@x$0$VSL8vd?tE_ke4UQM4>urdj=Tn0A~C-XlU0w zSk@Rr)FblEpYd)B-&LVW3jo|dgZ3PUtTZ_7zRi5t#w529CzP_e0?#f`C*RM~&r{#) z)BK_3jDsom*YP=_MfouWzt+-k+~qD_Dl%leQ7gyWVu*`iXC*nn|kWUnBd4O!bp4tqs| zPuNECJHSJRE{zv!r|44@rp36<&HOh3=c75}jlKWa5Ayak?cV<3v)*8-vNnWA^gej+ zeFxb`!|N1k3S&>rrc3|Z%W2{FE~d4491pU6iK1Xs$!_#PcyCew&j9HpV3$6+?8f^K zsl0v+RAP?yO{BkcGND){ijLo#ztMJ6PZ?{ZyR&H2y^%XXa%AE1G=%rIgH< z?l1={=)SSkX_$gCBQ)l_cfQGl@CvPm$hvP`-mV8FcD!d#PH-}sORMdEd)N7~SQ-F4 z+r6;4>vZnp1$SCC?Q2v+M7@uc?(tP@RW^~<3oH~1@QV_f#wf8OtYvf=0H_KxG?}rlJx$v|Z_HPX z5;7C>RZH_QwF5<1@bRW*PHUl<7QTy#*YIXKbo%jB_RS{wE^>PNdQG5;KE~#ihq?$*jUAeS*qn_0jjyK)Co&E-rqN2mR7q$Da4ZA5 z0OA0zmeIn?(kE@2qLg`023W17%U7&^Z2NckS~o zEy~n&81P3^;?eQR)Brdvv@aqc>)Q@tnR0bz$J3&g2$)osp??Q0D09ok6!e85Kuz*w zyyH@#hN<5w+IM4UWDej!yUh9teM+vZ-`eL|;a`n1%3tuQ7QT3(ML}c*`gIV{Jsq_P zzu3U}e~12U@xhcB&#cSP>{x^DZGg-s+D9{opGxx^H_|5Su@k2r<67zqF~#82)bQ)oFxwUjww)d4E6kJvqaPq;sXT^tB1bo&A7A5ujJI0sme?%c^rW zo$O;A^ev~(bbmU2<{9V(U|-uv185vse#gH6)jG7=uy2h{rTT^I0DQuq_EUg8){JWU zdWeJv8!%Gn|40PQel)o%^jpk9>033{@|x;}b9(5s3jJyWEr49K8VBrq(k<9fw3mD! z2VNb=&V4=~Mo=1|oy=QR%fZSTrkgi2b6?&u_-@WedO;C4AM7h`8VRiAA&(RMu($8$ z5%{z$nfW5^Y44Bu%vmGTip_x#wF(WU8N8Wzf}NWvK!h(qOm!=CsKy4?VT|QA?H=Zf z^vx1;E2M!}13_7itU8h2-hYT7uzTA8Z|(BRJQ`ddNBID6^cA&y<@)tOfKa~E6sg7` zFx3IHVO7PWUzfP*@Y7BFk43$sCGynn{OP#@&ilcTsaH`x6Z4syp=!KRd7RU?VRRkl za!uMjXePFFc9lJ7O8ZLhJnXJten0$y$!$2DppX8RXq101EzeKmXO4Q2`;)9SsZVK z5X6F)Ip+9r%-KGZ0fWq22MY2^wbaj?>0&wAM%01cz1v771fu6Aj;3EZ{Vc~jQ(zVW z?rzVrn>brJo}QUJoSvPSOaI_I|2AC%SaAV+;_0W;i@*A-_((XKhQ9EHw7Rg6{`cSf zO-{sFx^woD`+2UP=Aw=}x~W}5+)6;~5|?UEPVmOVVSE5!emA&^FgVjz=wyX2a&KqH zbtzVB;=gF^Mmj!z!N~E7f1ECvF1SCJ{^paPP2ag%N-v&%gGmH*cCVpBe$FSOti3q^ z=stmGlyo8N+N)WKEq zu1WV`8NB_mgnu}VStT+^lGz|78YTQ$Xvtgj3M}P9WbxGju7J!Wt?PH zUgMNQv@T?Mp7i7j*7CcU7Ch1qko7W8%O_yqj&d8JU*RVNAd5x2b*ta4)!rWgkaZuq zh?ai5zFjBsbb*~~!0k>Y7UVrQecyaFk!b+*ZOZFR7n%qz}Gtrhwj8=epbLb^NM-Zc4c9m1bi_q!TBf_qJUF{ynb=;O1< zAMgdice?LT8t-ERgXA~Wra+jpRhnp(3dW3`l-p4}3aNIQKGwpN3(D2$e+9G>Ex$hW!k--{t5rAkfma5w z<77rp19YyUNXi*@Mp=b@-+v^?9KH^17`F(ot$rN61O65P4m2}ulb)w}b}%JuavJHr zL7E5MiNROXRwJVdkhr0lC17!p^+S_2P-_}M4Rg6RTAUq(SxgD%x8ue2fi_;m2y^Fxq{mIDrw3p;Z8@jY7Kg2ENDuC&uxKF@6k?XZZpE z(RrF@1dxl_GU4A zox{)A7>6)yQjQHyv21Y4VbcQ{Y<$eB>>kB2G_@w#Sp?mmDi5c#bH~#X0GDG?v)7FO zpp^)?_Uvx3b@TFj3b!Dp5MT%b`=0np&#u#)eD#Tje=1c7(S6s zjJ}Z8hc+<7oWitl9ql&cRyt0mH7FfvH~Nn4HO@f?>#Up91T~JP-Hwq8^wV^66QHa^ z+smIyXv~Ku6+Ff^SMV2D7px6#$@^i7?O67>MAFwCNDj8z*Ly>NtmNf^1R0Mc73wm~ zZ}SuMauY(@80;$~8vp%ma>mOx9vE{LYgdRqxjW--oZ!QIy|gP0>9RHF^*-ZWH99rF zRnv6^kXut(fjmv>l%d>E6R6F7iS@p|_1$t)nJf=}bjxJf4cX5u(mqw89pEpF#o#gY z>n7k<-zszYTkzQ%nZ{`g-@SHFd0-GSa=N=e+vLv5EZ-mPTA^0h1!_ZbvN-U3y= zyZ3LGj}XH#ChiyU53tSz@osQb@2HkF74c7j3(ngW|E`|t4Q!gl!4b?0=5gYA4)O zERtlFODGsUtvd&gVV3+8nq~4~&528kR__3*d`_6&0b@k}~he&44GxbNdasx9Y zoC3eoVR;#+^(@dUfcOngI#ZBz7dGTr1Q~_R5XkWh6AM@Po`8xAdrgz<4QhsIz9 z#b=2Qp-ZHt?~(w8s1!0YnhuWoEV!`LSBt`zf+LRie7WijHfdFboNavA&`nqZX2(*&hPXpHgcLO-Ys{Um14 znOZ1Ns!3yTjT_F)`5w=E^4uz0#LS+DutvJ!ZwYW>2;j#P-3mA{4^uRRD%Z!i3+5y3 zY*R`TT2RtlVWf&VtTa-aL}9{O4ghXU;uxDV`j`g^>ueED3e9=!n&gm90R0YwPf)o~&%cxgW7r>3FW+xBlDhcq^O1R;DDe_Ml; zPZLr=7v~eq(^_bB%>XLY$oJ;@RmyClrcL{G(^uiF6aqi@DUL#ynJyYcbdt>S&;j z;lXzh)4e54{~*`UL|QL$!t5Hok`q2}03gqDC{S7_)MkwO z5Yt0B=FqNOTwDwjZt3V)DG@(3aETMzsP-*R>{M&Xy!HzF-Iv$Je8Q)6-Y?|u+Xr^? zM|nT<1LbPK!`gJHezf*%<7j($hG4$$``+#H2$ONzCW3P!EyuIdAJN^6*iQuB+w1E7TLR?VIANfgm(i4Q1nD-mVsyJ@ZFTj#aVT+ z#Nz2xx_Mz7N68C!^ZNdA8QaZy)aSoEl-AC{bb^`fAN`$rBFhoK)_)h?9QftR>VAU@ zpJI9%jiJAP_7my5<1eL0zkoT)<5{|Q;B&WRF5^T4I?PHhgIzp`Fof@l#{qX9!6a^! zovg1fUQADp&ZeI`dKQMc|5(=K@gw7h0`QtdE&iF$e?FLUHuVDnq3QKWJng^qr7v+J z4ieXXL0JR0!#&Baj*;o18G^0OXx413B$iu5Nmgi8b4Sg1t(p#8T}StOC$g0Au^b*>PW= zUBfyV+g%890h~-Tk7WM5))~W?13Zwfy`up4{{yi+-Czz`W?o@Gngnl0q5IG8Zd0&L z1mo21dWF9aBUJD$Gjqz#i+iVyeVa9W8W}@?#}8BkKSB>9OdTCfpZFUeO5gs2*V3hz zZ@lA0&}qHP;}EJ?5bDDR1_sLmjIuNcz&eI5qj92Eht91d_>hIUt60aqJX|5ShH0bU z6_^D$!mN?snt8A!hE=f$?Jh(M3I^oTh7ZL{S<2a8)8m=*Z!k6i5xyx5o1u3b>2WUbRWi?h45Rd<4+FIq%=Z*Q*)YA8=HVEf=12Oa`YWvQ6r#io8#}Rj#iFf z*!=aIXt<$K>Ml>z*PFp7Aj~xjil8%4sQT} zxVZ}L=5eGBa92X8T?6Q`?bIHkB=Ilv4;DqE-(`IcoHF%c&etMc`GDH+!+;lqo|H-X zGtIuB3^Wm3&&C)kT=ZotX;+3eq-QnysQUm~RVDm&_Oa2{D$?d0GBJUM_c^BbWi-Q% zaT=lNsGZrw6tzy@>x`n&g&AlC8VW>JJ$Qh1QU@EY`g$|10K9c*_p$MzG&+Rewc22s znw`NRK0rPuro$fQ1Jb^@cq7cQ3ma&4wV{bVLn)ykpunxZ#iZv38*755B}`;DuPvs5 zqcdrRe!qcPer>&x*4HpIqm1%OK?;@B3#e+N$Ar81@;mDcwm6zR)Ermk^ei)umb_yOpO9(`7X;+UCjxmH1V9}d0&y1C5PU29ddpwq=Fjub z9Mtf41~A=H&|A{6FXo{`nH!jN7XVC|H6WFJlyNGLV=UDG!itO&LAu~gy)Wm9-13=~ z!TA2mf|Zw#`pFE zcArmH47`)|9ko}3uhL!vv}r}{R5S!PF|Ex;EkHg0ApSn79sIWKrTm{$Hs{T0HkV=ue^4*#_n{VZ~Z3#Xu$a6>Q0xSzsw z(&5rb`g>1*Bz^zctLglsS6PJKI}dbi#~R^*g+~Fp{@Z82$iV@}(zkA2ViD9#fBWRK z>6iV&c=~2|q<{o*(sUU)sE3p%P{D~*h6u{S&*Iw&B+2cb^tqWY&xK43p zG6r9If{o+lGuP1qU#w-Li3GlEuHLW-Dp*J5!5z|21?zn%#iZO<%C zn<<8gJg?QxH1c20A^53fS2n+M`%kND(Qg;sCG-pzHAr9Ox@~a5ZCj0E^XUp~ub}_9 zn-??CYp>_;f_wHQkF>st4%HzLP{E~lPskQ+8Zv$*eU~!2pm}G5zB(;W+*LPeK>$uIN(Pua&Ub2;JvS^9r)!E&bf7->|#``B!CEo4tT2$!gW}z zlkp0`m|9y^1oMas^Jrqyq>n9m(k4QYLcN+ZYIC?--;Y<>miJj0EB+}s#d00+LE3v) zAy$Dl7eMlY$HLaU2s)Ff6Gw-<(dP0%f&yX(VTt`*gf|xOoLV-%F_50ZWN~_BApPKI zf4V%11`YhX-2_6e;OD1OOyjeoX?S)bjeZ18-US4+R}czc9!#xkXwVMgqN1mSahb{c2!`0$~fSMUwgB+W42BR)-J6G#3gZCw>c)n?KUQP7G`rdiY&^JSZU zQRlZlF*8jm-y-myKn>y(-~`WzUu@$$2%>jX%d>(6%b4{WzvaQX3OaLZ03c%!p{Re1 zH)*R4Hk|4R&%;fH>?{Wo1yG}Gl(RgBk15eL@>p5AlrCLn-8eU$uHn~dZLygKVRwz2 zq~Bf(|3Ym+iWocFw6qk_FQB>erfrmIqyQT5J@aeF{1o7+H35tPG<5FV6eaP$ZDSR? z)$;w^gvX92ou!d=@M{C}I?jX)2NXs67~(7fC_*7%?c^k<_j&Mw8Ylo1(N-Dxcyl|L zP8}_{BJItHppNVGbNo$V0z3)$bMYvS)wy5A*Ptesl_8ETL8H13SXsxnlg{h~r=n;w z68fKM1xO74u;(?%zh8|+#_;zLuuRMG(iQ_b_dV{P-GK; z!G6>Nm>L{ElBOT~iB!XP-_;9$id(HAtWl>@`}$fM2gGbKR;y|)A^3+jD1P3`^(D+S zDKh|A3kUZr_%~h00sIh}V4G+eZJbuQWuIcaebU`$8w zwat23LIbT#JFIgmq2qg_Q^gVpl?T$U!2{7xOIWFL9Q$h8xN;@6(2%RqzH89&=KMUM z8UQ?X4d8`wJpo+@(IjiOT(i+nY3u_1(SELZ>ZapplJVb%31eusVbY3^v=V6ypvl&x zf44ZiVFXjn)&9lQfwqJFY*ui3sT%vytw>9g#+KS#)OpQa1Ny7=&^Mu-Zv)c_%jBt? zj-A@TM%vh9{Hy?mq6w!-xSBPgIjUqu+Wlx!y0JJ>ISg+gZ79(TWZG^ye<2gJDml%R z_UZ$4r0J=1rtR3lWY)NCPGe}=XVLDod-bCoXupyco-7;y7U{U!QwxgXM(}yY7qZ{^ zU-Qm3VKRM3>leUYH-hg7QtcO_PttGgDri4q>RN0`M}b}W zpqnM|n|N=()bsmPf;M*?MaB4t?=)O!C&dqL?lFda#SrzU!=q2CMQtjqGH%~Q+w3|A zFi@V*jwPEgzmx&qWuUh*wwWs#V~_!^fR-KKoBU7{Z~!ZwO*V|eQh~Nps}D+bE0Xc7 zyUa48TQA#OJ|^I5pXg(gyUgaAoQJLvb!ziKuoP~_mCaxB%ix>c`sV%6^l~6H=+@06 zzh}mD^O)D{<9+h#rPcQ-;AL%9koXU{9^?9-xc(g%yMlSJYYL*ku8Hc|TC?dGjyC7h zo5-%OBeu+k78rYlbQ9VB2jQKjfci>9p+0l3IkOYAQcf3tFqb~|SAWD{%DK+HrN_Y; zsrpJeE&ZDzj(~?F#cK54(t40@*_UM#DD^|HV zf5E7Brhc!#^fNHYLgsP65*J2)a^o$uyvpgX9c9qNT=x%5dMsYl2+J1sFjbiCZi+d{ z4th-IodS3aC_+l7_lf9S?0VmL&@Y|gdWFk-y_r%6!NkPLJ*j?b*F%181sun+u)47^ z6rj@9?Vd08hz6$+RT~^dT?V4+&8H`x?Sb!3PEYH(d7H0N)oR=+$bO&Y`UY25w`A{E z+!`I7V-*FRouT#$b~@Co-|c~er9;CjTb{mKN-HneMEh(d{XgkD>1%g!l)!_mfdJ9u z(UCL?P^Cb5zrmj3Lg!(Hn-^EoAOG`jp*_6xP>RsN{6?W%{-y6KK^4un)Z_~PR?tT6 zjDa}Ki5Y_53JDIe(CT1Cty!CV!G&!*F1{1N1-kLvkyYEp`6SE_SO>9m&4hR&WaL+n ziDb6u;C!?d`085`_>lLP(drNgh_qwZw~>}MT50xLAss^KXduK63Shjx;T z*}|Wnu?fMAJfd{rM}SWJ5h!qxFCS4?B&>rJ?TZ8kw#u51qTb^Z|{ofScmYF#=RC=Q}&ar<@2{}UtB6| zP61kIN(IP5`>M&5Nti^>gLK&)$j>!-8-c!pLvLwfzqI`oq&4-M1B|c}sg1J;u&Jhv zWo-a{EM7OL(1?D8>?9y0H^Fr)GX^K-8}GRX0TmBI5NQwf(~N}=YbLNfV4fSY;kEJ zO>rvV1c1%T3TT}^w_G^%q#cPxf|P*1q4}9$hGR`Lvkk!G))XfK(xz3M`L_US)H<8s z#6W{i-95$B@h(_SWheqX1sa*2A`0NC3|KrYP=NN;Kx2^co4JDVAQHi?CK_~> zPd=oUsO72a=S^)o^6Td$z%tK6gNlv3CfY=+jg@rcW-T2(&MAeIWe9+-2;j0-1$?00 z8}zX-;L;qHO)tTf4w_~i${76}Uul9Z?Xvlzse?vN8^332$_eO?;zw%&--Nbz9k8x* z^BPX?iAx`C!h`DgN?QR0^H6~$PFrlE`8dEyu(n}?cF<&34brt63u&}|JdKgo(mYOB zX(!`&sV!OJ0EZ!*pOYmATVhUIhbHi00S2OEfKq2^2`0e1j#2tei9-(B%&7r}^4sxlc|2uMlUhyQj0eO( zcKQ{=q|y!47~}MB%GIPZHqZ+kI$Kw+UGa@plIS5BNGMOO86AV3GN* z@CGZ&SF33d0K26Mq6v3-j)myW!ruo8QXvMmk)?tE=_i$8PQ=A8ob$Yz(SrN~@TM}_ zoH2&8N$4P1>=`He4<%OHSXTk2c(p%q~W0A*1T*?+5_mYp0tc4OTv!Yd5E3v>}z9Cuo9|B3NB@Uc}yX#l$ zYw24{SJO|-o`M~2;+TFIfr?Dzp0^1@F!@tMV}N7jhi1??_$Y$rLOZSgh|{L7qs7vPhRHnI4)d5E0T`&gqn}5e z;5Oi$IyyIGw2QA>f1+OWtpJ`7$~=)VzLRez1ZB^*x*v3pws0X1je;=UMR}fRI2o2H z5;D1u`NA@gw!YN_88Y*-ERKIeGtBs=Wjxx?Huiqr=K4s3s-($jUret0=`wC3zzhM) zOiRkNr9ll-EkAQ1GIFybQ2}2C5Fi~70f^ml=uGlaZ=-m!o2Gv{$!;SU*%s0!3%n~t z$LV*QXiA_r0bh4QQNYCyP&d}?KFm)4ZZ~lvE+`dhq&7pvs zK^;xBz5tIZj62pv`h07`12;Iedx8G3#z}pw>RHzh0KPTTbwI~;z~MeNRMjABtyk0Z zp}Dj~9WPVI;I*`)C!S7E<2Q_xhYNjZv#DiEQ>zUZbvA$6*%M9sO8|L&tY0{d5@rT) z4?h9`(C^hIz=E1`ak4I2T>8igR~7UZ%jL+jjct`8CZ>}B*kjX^slG6e(jjeVdr@Cc z2HoUP1!n+j9<>)g@F711+XWo#h=PwQG?jVI!GIVVxP~Pi`#g2r9UOP|75i%}*CH8# zA?ZPEvm{8H0mi9)+jnwa<71<_xoo?%d9G#_T_oc*rq8>+(;K$l?>_fgp6>3rz3<)W zdz1$j?6{2+D+_G=KoDsRH@?&Bw|y?1{AVyd|J^AVM^kC7pqp>f*59tBu>~kaL*cuA zi!RpL%YNm2B)FVulHMtYozL_3(6`krvd;|sRsJLo9`*E1`lWmpRnVO;-or1~2l8He zPP`p{AiI4GF3NCVgM|K_X-sD{vs2}5Pc`AQcb9Sg?$%qRXTN)Y2m9P{uK$#awLtnG zxGr(gjyprvCFybyrGl__n(HJ&>spvR-bm-s`8tp2prd=3JyUCUpJ!(>AN+b?6cAus z26jB0&fZg+4(MW~ohIgRh3hE3pTO&_o9veT(24DS5_-Gd6&!>CcA4x0%caW!HH{PW zx}V@Y=6d`r&G`lzPLHDobOHcK-<1L_UpmJ@1FDhVMObD5|MKdM^!)6Z@HOVX@Iiuf zav@=3mRQ7;VeoMR7)((*HaWTF{7WyPA&4{6{Q_AH9K&?)^X2q69>q;B@18?&w5PEQ zOK-5N_Qv;`>FWOp-BeI|j7yFy=#?vub%mgK$<;a_R_Z6wwul%jfjt9U3ZZR-g{HgA zw+opz!EItt*kiQG-+eBnuU!P2lQ-UHu>L{;-{Mk>QHx<)|`wgn# z_lubSn1%+czkcSlk}E;@P6kFXCKwkwQAP$qn1F-e^R9ys96~;ELonYOI`VTD&RKkS zdez!#VyA@%m*9=2x5b6X$LB6y^?4Km*uLY?OyH&QPEUK2*9H#qW3g`TJTCx56%eFX zs80i7y@;SZfS;Vl5N?Oi(7HT@CdR1cATD(aN4t(PSVt9bL^C8YgP~*PG;w$qO_ZTj z#_|2~dA2D>5U?*(A5P}0pn1`cudvP{$U#KqXAnvKIn8 z^lKhWBT^%-Ap|!iIq!3Y6Lb2k;7|#3EBzDshLjcrPzaTOWV1(>s%`4~<{JVI?dKw1 z;8H+E?ISOX8V;tpF9=kwl+ze?c2k zRTTjB;iu7<@VpfH~i*}vsh62K*^ttt&o?wpa8H_UBxU7&94#6t{Q{GX|<@a zIW!YoV52flAteuKA0g(@ZNWUd>EAs{OFay>i6^9LE?fgR(SdnB9@JdiDTtGf=4YnY z&CBnOA%*RzGj-UcPY6`%gG-I0lP6E6@yYR2zoxcO7WODnckJ5_NDCTNSx~Ld0~)f~ zwF=->#Sc{pjiyOX=v3$=78*&I_~GA5Ug14D8U&}sX zcajaj$#iW6Ps*Imy3YIdbHSi~d{#{Y{Eeb%KQzd=MI&5p;u?Gfv&7C2B6MLmjg>KP z+`O4?K(|f$>tK0^K0|+$p488|2OH-!NS*phNN^*~^j48Rj7HSz45qFCS~wI-6?|(g zEv={PI^73=s-TipgdbJ`2}dYvF-$nKF|CiP6%K%yn>)mqL!-XIst>KkVrL}!f5%7AaGAG#Q@di?@@kY!D|Pg1umh`h6ALey%%tnzA7Nj zKi4t4gMEVJL<MFN=*8$6Wl2M;X7OFASqGR6cY#twJBisr@#4AK-&@qJP<6d0<;?JCH<_9XbaA?v$X>Xv=|{ zZ0x+;Orr}!m=fbN&UIT)aq_ym1~Ps7w_{>Qq&wx6KbMB2A-E+-9P0=QYhO}(IB&Z= z`n`UJkFKC8ZO6Zu+eG#l2hlgFU_Y`zQBAu5{kk)u{VVG3pBMyD1joTn9=o$-_RZ~( z^^+Kj+c)3m-*hVTA8`S$r+>!v9WDm@-XT~-JgVU7%Tog9aL}^>^tA{8vz#u6$)j%X zm(T=b`rkX55AN49S zm32^h7F)MdM^kzR^`DP2|5vaI`jd+({TVpM-2z@KoWS(8`M1(fPM=86O&m@$oW8>< zVvm~l(#75ZgN4DDId-~)nbqRJrF7-`5Apdhnq~(Nv&*=IxzdH04DwyBbEc(*xg3EyQ10S~VTJ94&gqgnkf9Y zK!Mz}Q)FQV41q$yRk37;1`(aqyEcOhN=?Kv@LVH0cHQm7+>5@<^Gx{7_^OL+L9)>7 z=|;#*AG_u8qnkI0`E@(EP$lum+h;N#?)3qTG?-T#)MdcmOB-ourJ5d_M~Hj0lFo6h z_8Sj@((<7F(?SakwG+(e;C~2kyq4<4D`}n6U5Dln7N$$-$fKvy%4JS#yfU8#*U6Rd z>)(d|FKAW)Xqr2i?z$&%F)r|A(ej{x;qQEb?eD!pyrban!h_I)PS#bRsa(c)kLKGJ z63vl1CfYm=0b0N#cXCekiNk;13r!$?>Q~LY)u_@(kHC)RKxKB1)LK(}qvcWZs7SYQ z@IF|aNQXIyrR*uvn1@}vaWh@NbS1QU1gjj2I-?_+?_?>;4yWjJ${aqT_R}{k3kaTMy)4*fyrww!M zp8QQ3jj!}8$|$WQ8|045IMPuE()>Z-#z)--zNIMNBxa3^*KUwz)R2)KIR-s^FUluD z7_06jl-UT2yepWTP0WNwWR3o&Pbem)nNus^OLsP-M2 zMkA+{R*qrzH;3lQI-2y~9ZHuMHgI!=rWg^`Nb{6HPq=e2q$6};PWh4!;9K3#`t{4{ z%8y=3<42FPhjloO&z(p^)1&F~wQpyq{()Y%(8_eI45RrqJ2!!AiBZM~{<+HIfHOA$ zu2)m@{2Sq4Y!h?I{_zP+vfu+~!vVoXd1?TE9$r?#-(j=H7-nrX$NF%6a4F5ttfni| zoHPpU)-h{qaDrh0`?~3y_)MaY)BrLE*-$KzwqV%Wk1pbAf&;YRJ0&!%WLq1IEzM7x zO`OhmFx98L`d3nD7r@WOgb?=OFT)d<<<4xTl?4Dy0LWlDw3+2>1D@%#ui=eyNJxJ{R%IW(os%V6Hqv&Ex0oo&zJP{jiNjnfoYLq?rFHAO-)5z020ac? zsGca%Zv0lW+Kv8o_<0LHp((JZF8e%6N80v+-6icw>OfyJ;2B;3ALt19I?qs7egB2_ zVdRnJqR%bIhU*LiMewhGqTOwK``MlQ%y!8g4{E3Pid&K3`}&VU=HKB$c1pj+bz9Jj z=LEZ&s>X&vB-UXh*5m1MmV^z!udC^Y=__b6T;!JqOb(e3!ks7}=z;bmN9$&%_I(Rg z5VfCo2kE(t_nC|7WjuKu-~D*s-`NlaUzl4@&;Q)_)7j@Q?i=@@4>kq7IGqvylreD} z^u2d39usYW(_3P?c>o+cFE~6SCP$c*CM6jQ${*g_Cu<^hF4>`D^A8R7GzoU`f4z*~ z_j{1ZtLzf|(e*ddJf@Amc=W0CpFjQ+IdAM1uFEtX$1+MjSx&?WEEC6$qha=Bsy(-n ze&?0nNb{?2akAlbdTi>GX?posT3upcMQ0tRz*T@N%^Z6}k$ODx^g#LfVqQDjGL=7+RG5|F?WxhsmUAT_yNU2ac~2m91=xKvUgd$>UXzK+RS6Ja6~`RKX^rP0N+YeH8Y0#+UTUv&`fgX1>kYOkO^zUkF~~;!>@4G+>b8 zd?$odHWFND=4sfa2Xt7Epki9FD{IC*JK?5jI~%&2xs=Z`?##V+eApMvqYocP{iKhm zL>eq+EgPB@?WIOKe7TZ_5a3@s#==|;9r&ZZZ8~U36g|;aO$q*?a}y>>IG70`p^C%o z&M08h6KH%qGn|IeIH_JKq??8FsIvk%QLKK5Qz8wGEjF3s6y8j$*W^)vDFU?2PJLv1 zsk<1@9&G=$E=A^y&~{-_BTz&xXf^^Pb{Fb31XSIW;7&WVI@HqBv37^POtrv9XbXu? z2yl^4f0=iHZ9LP=YyeXx{V*vo6aY&)fL#>?&1oEJ&y0<5!W}|+jY$PxL65e-8hCxsITLcBt#$Lt17D*&CA9y>@x?ZZ)9fL%aWE&7%n_6L z8U*b_!j0qaS|hG6#-4{?m(x+hU?p&|n7oaJ1Xp=}2ji61|B1JU( zic@UBl{pD(@=RNP2vQ7QombAC)2{hQo1pJjj}_>J~a87oKgqvh6g6n)Zj!q20(OV z@nX7s`68Q@Lur$>+z958%@MSzIxNgE>uUi-_Ok&HqyzXG#Zl=jb*c}prmMr%bP-d= z5sr!!*egNzMiH5Acs`Awnb-$UDDYW?r)~i7)~g)(GJ(nE7(5W4VUUNiq3sC3*2dxe zW&zNte~h>rXsB(FuiV*SpJ2^dhPMp2)5!FCT9xiC7lHzGWlo`FUc|gqFs*+I8sR6c zg)gise$t$}o!{(NrlZzP13FeA2Ks3D-vZ2oSC=Y`1svkn0pTIE`JNHRpRSMa9yni=qwLghen>9X?pTE`GS2eGjHU@JA}L}JGFT3@X{WS)#R!T z(|(NG0w$(y%2GhaaU5n$XOEW-dW$52FY%d&7+Zjuj#`mqs^1g52l&Jt;NvvH+FrkN zD`-^~0aKw)Pu%<~?LnEPYc@xjAioa~)Doseqm4G*IisCC)w$r|63i6_`)_EE@l1Ac zKOw*=D-8dNborDY_Y2<2YzbM(i9>%X&?iFkRDK8l%A#~*kj}vXFA{~OPE@*UNPSg$ z^N(9l3Fh-$((bpozQ?swj^)PQ!48sxU!J5e0|-+|e-(p~m(WD=ghzIh=&pI3y;FI2 z-E|P6n9bsg0Ov94+fOMtU#6%N#WwT|tYEB}>=+`L%DdvjVbmpFt<}O?-ECRQrytnfpxe0l#kFlv+94cyP(XRGB2p)$ugE zI-M3LuN;u2?q33#m+P4I#gtxsBc(^-NngLrnknP6`@gu)6HjLRbIjc8Od_9~dnA2o z_5`GmiMqSB$%40$uGiPojV8Oo+&f#DNQa7}0M(q}#OYPbo0rmsg_l@3G}3bQTDq|K zavFGr1<#+em~+BLgylJDj!CS(-+TaO z!)tr+9>!jAo{Jr^w#+J<4EVAu&@S9Dscah9`MAJ#+hG3^Yn|nt4{r0TE34in#=#z0 zo}*l!<@!A?mc#o80YMh&yJr8r<4Z_<)-a&ix~Idu3qv*z-yH?BZ|F&f^S=!XWd20?#+Gx4NZjvFwDb6@ABdXbs0^QpB_mIeYI4( zjL)5A1Q1TN8(IT&XU*X0f-Umh&5z6{cA_nJmv-H(GkWqv`e*dY+G3MMdw)!JXh=iX zs4H~V{7tRlCUc843twI)CVje>v``@MXXMRyH(yH8PRb|H10Vu~L?jH__ObC%NmFdj z3{8xs8p8e}rddmvpTab6QQNm|rWTxguUARg}IiWFxfLlQ0 zxCoFqG>dj-KlACic}(2Y+KTA$$8l-DQ=_E{I3nsw6PxqCrSNWp0U1bcP zlt);7PT~|BpsG)83+F%SZCUvh@9^8ctv@fpB9w>%S^AQpnqT8L(iU#3Nz|{G=7F;0;0!=!SnU-(k&Q|29t@;oELof$zlV z(l|@Ah$fEynaZ@k>j&!~&5Rl5sq|Y7l+--a*OjMX)@g?t2m+WXr^wlr8C5KxLZi$a zx@{&gyeDnStW!tPrmYk?;SeFaNV9{h>i`-&{ARobAWZx^V=>HLNn6lxm=gyb+nE5(`!!A} zTzC~9r$_j{k``)g-ZeXEYGx`ua)v_>UjI?LvhdAxX`z)?e>k6x9Q`on$|nJWG0#LB z3vX^|dhU2Cudb(Q)^m$?AYYgQnc3^fMhKM@HQKh1$a&ybH)l|tdI3j1D+=!;%2Vq z&Zxmee+)iK#-YQ8HV$3bQe;|E!B;quQ4p5CvmNY#S>IE0#3agzHOA-=_bLEg zM*tpg&_8eRf^8psb338zcUEzFigB`dQMmY>b$;*LC#6Z4g7Yl;D7;Xbc)t;S-q`_~ zo91moHa@Zld71Vq-))lE_fa1v8GSMlNHzc{?>L525;yi#R#G^5{`3B1HQzpktee{KiH(>6W#1y}Aa7 zYyW`Pzz5_RVeXqJa1WoB!9M9GgLPBW%8KqTTb6WBXXsXBH_dwcLvL~RPA`jLyEw~edu`p~ne2^grLF72W(E9aa1`#tBrRky0EIyc#) zne*1I``-6G`8+2)|MNW0;d+YeFc)WeP=eFQQ@xI4#%*OYsI1Z2RQine403P*A2TBN#9*(IRjqI<&T`pLj;CJyK_up zEc#>raL0fhBOsX<@n$gHzB0Q_k(ellm)TXj@WHe+HWLg1bLBldX+Xg161dJAZ>99v zPp0&@PNeh-G{M|{&u$sQdS&5CI>=66i(M~OHnhd?r}O92Hx@3Z%T3fUXuw~adM5o_ z`<_m}|MJ&R;rn4KJbNYC5_fgR2-{e*9C_^c5C4YT|?ll-s<8vk{{qVPaZGXs7;{g{Q4=ZPE=f84mN= z*?nvpkHsFSEyE1=FMhVd-|O9-HoG!g&+}ogm2vSF&J&lg=YfF?Jl&@8i^L_MN7k81 z=!wg%12+x|%9NJs* zqA{;oij<39R01R_vnj9v1Gok#w2Bm7fMqNYT?A{3rwZty+KiEP7-yAW3fI>c^CCK{ zs2G+9ALmp2co8p*p-9;JLP% zRLRSWcj6>c1r-IRL!^S=Xy657B9rKjx9wGDmvDm5h#yXB;~-wt?@`}_IXW~kl_oLf zt+dbzOmkC-BaYw|sBFPxPz0g|^kQys&DGVwHs4A%XfRl2$~K5{Dj9-QN*8O^qa&k8 ztPG|$adlCRYoglLRkF(+D#8*|)JIU7V1VgZR^s&Aa)mj7$N*3LE*y<*jY1LZdm!y{ zh!>;UBtp8&JoTyMM_rko`Q;<1X~lIQcB3@!BCTDdRn`}m(h}gz!gY+_BVkmcGEGT8 zN;ZZqJ2Dn0J6g|x3(ZhXG-KvE7tOvE_RL66ni9~lyn+w@4u+mQWLrQHZ1bNrDahw3 zlTkMjFVE<2$nwy)I7JfQ>i{HzCZ3=vVCANB1<9!nVao$p6qXrZr7pB}cMt??rHx=0yT(mQ86r)=gufJW}EYg-}?d9co;59%%K zI$4*G?vs5WtprSpfAUFVL;@XpApVplzk47jb3BtOjWg zGREu0k_mPeV6Nqm-lGpvnUbA!9Y6+kTsZwi9*D#KAN2uE7iw!kPyU9s$SQS3J^V5Y=H*Vc#8b)v*t^}P_kcKM(dfD||D=C#G$C*dL`-ps(pgI4RA&*!6Uh+HZ zDQ|2jx!$C`x{Tr0W7)Bg`WCS4h8n&EeQRepD>=qRyapB7hTPjm(t)bdAsuX+(gtp1 zDc!5{cg$maKNv@!Auh`m;H|XMYtudB=e-9CDDg;+!kc*)kl*PF`q=qA3QllU=JSYm zlaKengF@0v^~Qg~2eUAkAGm9mJ4Tto_aL7`0ADWvdc75Dc(Yit!KCV4$N8`WI2g)^ zJ{cLr^(TL_cS@f6 z)4(nc8qIe+2-&4;LmYGd1Y-B>+TO2Ix@+|;BJigvh@8YD=NzQ5P13zYU7blIrKf3O zR&lx*P`GDjDb2H*T%iH5$CbveB4&4o1kda?fIj*0b01Fg`>v$($KG_h&GGmCv*yc5 z6-jP;d-D$`)f+4%PF}_aIjfLgdM>4Z_5)PlSZCaetGWOsPhY)yI(_5%*)+p3<@-7A zc^YG)-(R_r2XAt1t^D=U)$~*BOwO}#@~B!6>TG3Ql!l3qnJ*(Xs`%YPKx%`!z0Tx4 z$D}a=p%)7rLd6(6zykDB&(+ee{>(@^I8jbN_546Og@nsH8YeYgS6{kKlA^P0&?#D9 zWJW?HjL~B+*I*)ie0)BQ4tflCudO#dJF#F?wl+t(X{XKjH$B|*jje9B&6!hdoZsw|u{@Aw1yZ;~sn1i%GT5FB?BUJ5|TOkgPC)*Y{PJ^tTJEN>7(IHZ!`4v8=i;ZZsrS_ zF(nCN`Y}!40dBh<2qW!;R4uaKE~3j>)0I0NqA#hL(?k_X`fZvh>W$C6!AP+@n0|Py znof@c05Hy3K97-Cf*}C3UjEwFg-n9n4ANmr<#emS31t917}wfSsP+iJ%JZneokErF z6z@)A?`d%uUYGR{%y$5GFh-V(9P_aNy#%;5P9^QC06;SLH5kSKy?8c2o;xt4CGt>b z0i-Q10k9SPY0TMj5!@L0N4d>k-1CICDDM5%kLsl3ETEo${`vT>!R%K!9d2!TDV;xg zCQXcvr2(W>&Rs#x3vi;%{IlW$i~I`OcoNp&FhB$w455Zceeu$msVCB=4wX1T&ok90 zP?h4KfZ7yBJg^6LXkrAx+HvwwOK*JpwX_6NRH0mE+c*kl74|Ba>M~3$w^3D*|Dw){ z?_SDLeu#qBW7Ke)cp=TVF^ax|T_&04D)ndmQg5cAgn+qj^IQ5VQSYkzN!PZK08?vz zDYa&i6a!r7&^M)h!3mkG@Mqf8Z&t-Lw#@8>&7G)pp7@wN1zqVQ7C3xMhy3O8!ngqv z2r#McYoG{bP@HM0JSz6m5nx7u=0Aph;st1tzb0t&*^gFV`$%mBv!c>{AgH#?TQ7eUI zIBC@WrM)nXK^>}%N8Rsn^`NVi|d?a0Zdo>+rBepy; z5E7LF*V33klYQB16kAmu0#NkjEEx!`kv}pM?bQWDU!Mc0=P&`)#;Oo^jNjCW-LOJF z22ee;-`EEwHzTwi9AmPRei_ebF0v7EM-7vZW4rX4feh<9f9%d}0L>QbMQt^Cc!>b9 zQcZR0Xo$RMb8MNzy{=O~3ur-<`Hj~|AWK2=o4WCGRxl4HV6P26z=9HAUoLxn4R569 zm~6MP?kG;YD-89wAws1c_j2~3TnDCGq>z2@{JmEko6iYuRmQN#L&KL>C_n0qU4XZB z7?FW-G&{fsij?vwEOk5>x1$K#A}!L4%8ItNrbTJK%d!j(d8!cXNGP50)u*o^-cH;!+S-yjC) z0Kb;U_+nh#tblm2BPL%)=y-0J-P~sV^u08sNh&@0-UYm@5C@G9@%hjB$h`@A!Gzzv zbJ)=#bpdenbovS4uP4)KPR6>(8Ck1L;!fs|iW>t!`f0A`xR$vb&UD(Knc_Og^&lK` zKstIff3(;2fe$23o?+ue?ExrHn*I9M!C+SQPR&7S*MRmXn*OMF&)41#hB_?* zRy3b>M%vq7@kYYRtFhwABlFGKV6@r|M%81G)M`4(hR2C87-l`*SY%Mka^S(GO1j$N{hYkB_G> z{CgiyfAPP)lFoe3zWSqeGv`hP6K;3V9#2Rykp-Du7k$C_@E!Mh>baQf^I|2QIP~Pf z+PRXUh%wypC;$-VPNfe*q9M9nU%*~`JHLvBXtD6@@6knQnSJN=$V-0&qyc*yWmu8n z0(j}dn8FIfq!EV2_XLLL56o23C$;+n`2E`0TAIa%VHqZL1O`@nM{PkfoW=Iz?y$!< z#3m5TjU115kh-WWH37A%EO#1HNZ}qIW?}!Ru)4bpNCsF|hnZ74qFF;V4h|BQ zGvaq?QDqVwU`t#3bliC_uOb!2yc?>*fB;R}Zk=}*5uM&Z)oz4^R(l!s zAwaJ|q=^PtbTwf9VXFgxwCx3xWPs{XA27uSfaj+gsd4;Js_w6*1CJd@W52B%IHQ8^P6Co=}VwWc3F2m8^EqxMX5XrU>;QKY@wRAHb;FgA-T6i`m}u) z6+orEX|4dCWMZRB5WlCfiA70^zSqf^31%qG6CfH*A;V;tz!fYlO&8KH+^UW#pkAbgW++tNf$9Td$4gZJwGr7z*IC}IW?pBIe37@drvVCB06GbF2v7)!S?FQvtq#a3fL%h;vNgS$E-uZd@uT~(xrKT& zSs6rKah1aiR8^Crt>2RU0dQ4&P3G0wRgzS7sQ^G&v#ilSTI_mmu<6*qe;a^xb&!Jz zs@I4QMvOeQXoHP1Ky+t1HJ3Rcj1&JByKCtJlAc`vvj%gC5+L*Bd7l1vnUfON$@?hz z-C9o4{`8LuO#2!egzaVOY63>SK9pK4T-LFlrDSQH##QQSi4z=$0EkL}Z`YZ#_HjD< zRBe*w*eL5h*57UoH z({fN?d|J0r`v$uYXC2a}dpVPU9y~y!-BEFsjEYJd#>__^%#%YNW0(G^6URfxp8THw zF&6HYUjh={&|o(`d9aoyv6(0sT015>Lgo(D6cSpUr0)e>EkkgD1w@YHjd%{?6u4$O} zOC@Rcz3w0Y*6Jz!CLj6B|D8{d59aQ!T`_Tvph)sU`Z?@;Jr3LRCZ^ZV1@J|8y6ZR} zbbtftK>824KF@WE>nzttxZIsP&-GQV`*p0(;xxWr`!#TcF)* z4US+w0)YEu8hq_DsW$ga?)av)3Yv5EX^u+#K98FTM0MhDImZeLj_*c!Fce_95*(B~E@%l8+rrE@Di@h#R?>CgGVPXyMaU&jJ= z`sf;ax-NoAr@quo|K!_q>8;PT(m(i>v2+-tqdh(*2K5H3laD>ki9bl14U@gkmIu;5 zhf$e(he`+M2?0A&C*u=YxJ?+d z{rh~Bvw~L)T=3I;{}dm?`~K^E1io(TIBWpKfLR#j$73Jr^KMLq+nKBWcZHuH|2Io$ zNGP8`$bLAR9n)-etbm<{yvmP1gAX_dN(q0K>mPIdB$vN+N_V|deL4Lf`+qn6LHiFl z{Zxn)EO)h)PJQQc`sVMynm+#o z-2Gp~vAeJB$-3CR+2vW5%^Pos3!CRe1~4TmB*R)~;{_>+1EqcGL#WxU03NNtge}Xc zz+|Z+(-z2J4p5>%n@L@mG3!R~gr~gAhOAwy#UKsi(?$|}=y)|v9l?GO1K7%0BthOn z_3$d{n+wDN0ETi=tV43H0lBCSgv?R{k$}1?WZ32byGU!K!<0#0VCvy?t4KqwBUROC zaP&KjeHn0~r7oiNN!Ge>p&IipkYS>TcS^#kVp0VRX<$3(I3n@x!Yj442#_$sDUGX0 z-K+p|c`};}{2G(0hSZI?L}p6VhkTp1Bn4AEc~R9J)p3JqqFUt%RB5bp^6A8@)@ zm9W{%xyJCM^9?qFv?O0gwX8izdeoh)QP6LXu544)X;slHvti{)omr(fxA6wHq$&U* zHB^E&0DU$9EM3>OsRv!=*PNqKT@A@2nOonH_g)?gMlRHDc-oW+q@3C5mwZ8wVRMUL zP2!f`oR9k%LjBA1yXGyNxr|C8J#oMm%$?G5E70Hy;J`3;rlw9DP7^=*L|Q~;u&ymC z>E$5j#UGhU%~zdWb1%s;xkjTrNb6|7$#r*ou$-(CFbVyY zk*d;gy5R)9*72jtu3+dYs)E*u(qb7bwA>h7r_39aw}PQ~)#+4c9UQ<`YU>7Sgs4>` z1z!P~aOe>$DPc=%g%c4gq=n9Enx*Y)8`X3IK&K`XvAmwfk#=lAKT@Ub6{@RN5>E1I zF%GOOBgJ|JvmXGl<1qQl08gG~+Cg=Z4x2WrYi!5^^bTX!i#V1>klRMeZn(WKwUC}W z^Y-hh!=iP50do z?84A@q1_twT8D0|`wnAJ3(!&ZN?;+dj_o72V0)c;r^*_#LXn${ZMlcgWT!4s`iY^yaaf?i@s@kyC}=Ba1v-&7oM-y`i23e! z0fOX?8;^JPF&`GsioV+)-g_y+C}J2PJr4m@y00-z{-J?x3+W%slav64%dGoj#v@S7 z5j!L6|81dkCZOB-J3sCF-}6}bE8Z00dl7|Ha+o!-uREUaaOS^Pr?yjnbk>DsEvl@D zT51{vaqVm7Zf!&WfP;wdeOLOvE>*&!#`V{8oB)cq;unqX1|uj*6PF z9oH~8`mJw$1kme4>FKA>q+j`!ze=C_)Y}YH?(*Cmwa;vO);X~l{~-brrvRNe9r}Sd zrMRxKYxmNPOKEk4)shJnPP$_ucb?tOxe(Lk1@syEcYn(FaXv$QbOgPflbN_Q-A;e> z_2u-@r|aq8{#$?>#@BPMrPnA|jRmddP@kgjJ;Q?hMMW6E8l!8=)S<1A2AJ3??{M=k zc>e$NUVfNw+jX~!%d$(W90Bd&$i)B-#FbuAk-Vcd9r3ROhGD6twoPux%SCq*VVUXCoSOtJs zqVSWA{pkY?SXW^TH-_tJqt2!ncBMK1KG}X0i)G%0ttUdqD$9EEq8v)K=q!%l&pPT( z4OAtM{w!+9&0}ft^_6t;Yv0kT3WszKN>R3c|)vBSA; zEtsCeEh|j2JNno}I`DyG=^BR4SJoF$$pjRVsEA8Hs>!a+AyIdOlmGZ#NOVO;;-58~ z^QHYCGO_jQ7IzlKWGr|{z$qQWnoVGil;frCl~rwE$W_k_2@%Q=YO;CZAc)n%URMJ^ zAgd?(o}MD;q*^Pn37F{wt0K3gnWan2Kz0>4F7wm>Ts1E9skEAPU(~-Ll>XD@_i@*| z&x~3DoQ0>JKWTs_2vDO$c~(hqQR)DAxpeMAdW;ij2PWA(fgv>(C^^8UTu((zgeU(p zs;VOo1E!811cW#|l&%hMq;uURHrz2(56ChA@KuM7Lb8yiDIv*In>?SoBb2o=!3l_m zklJ(OpqFRvluB9x_|zs<0|}2|fQx`Qj1y(-^$i^y2GCni z3)KbsiesvvF#Q|`c?p`?pia9S)UeLUq}S(X>0cPP2dvW`+8`oQQq7suSCQIB!VSLe z;DN)?7VGSiYSYjf{W$tq^d-tq_4->nFNJ+o#tcDOfV!nQ*2~M3Z46pNBBcgs*N4nU5yRQnYmXSj*BbF2}3Q0m4*4~A*ex@tUtt@fD=T;*D~?-*C60c(jUdSUH> z##5D_;!nWrR<6IV75@7B;BM~=fBoN9BgKaHpg%GJUYM#A^uqyET6xf2q`aK^x(029 zbXR}i-R_A`iuSUey*7W7MnV5K1tpw(c)u&c`z)6yKgoV<|6GUpH)SA?VUMl%X8YU1 zSMS?FgHMhA^aQ7j9%G}i&0!{s9L&&Ud|6vwNOM=Or>UdI)Be#Z=6itd&M1MIi=cx# z;TrXQ0chsKYIekb7eB2Z$AQSN`NBDGDy3eGx6+X=-a<$Dc|6+um!(uQJ#Id{#f@>S zAwj$~f8WCMd*YS75O5{|(x8dBlfHOxFU}=}l1VibTb^ccIR+EtzW-S!w;Q}+cO#-@ z66Iprx+xK#v4us(t zabfOrx8QgD411L6BOloUyLxneQ+RXFue{B~%*ix6#;99_dNf{HYpurhwD8!M(#WM>O!Y+xyyqm5-b7kF+WyUg znMK4~%X8^p0(SjR?|dhHYT`)x>@ytw%w?Opz|%EZ+;0FdHreHQ>#bwhWcw`k*``rv zon|1*e)|6la=t_#J98#Kb@O#z(B}a;i#CTk#R7&<$68})|J44?nC$~gh@O9U;Rb$$ zX?o7v(|peHIn3vEK4&VIC`2L=%0#Aofuh@5%$?hUQin@_J z1;5t0>70kol!&npB7qm62_Vo8r`U57Ne0Wh)7dU9c8BKHjmQ} zhO`2UfV(|7z&uzKKSHXofJs>&HAU~p!8{=|P1%XT6`|uGssm~I0+I536^melBeV5%=cTv>a zdJQJP?a}*ihL*_*bupMD{(@0y!VqmBeZU|ec7KYo|2~qsqo75Z1Td8N&=9;fXIiMM zbvV6o3~*x<^hg2l)>J1=BbJ*mdyG zp#Zl8ed$W?@Mn3N zs2JBja3IZdI`G26byOdf%BjZ*b~4r4{F~k< z&M9xD@-S+5^k?!TeK>_~5YfQsdYT#@XM+^UtSj;#&fp|#`%-XKK}P|725I*?i?jiZ z#jlTGx+Ur!@Bn8(n7?QQh7_y%4Zy1r7yg7Hd*R~5K6T1bwgf4 zWq6+cMZ4FKLg^qGRE7S=0kj4N0DF-R_8@`=?>7K@HULY?(8-ndS{i4;(Scs4F;#)m zaY$KKkk%q+y_`gAv8n(l0$i>mSw?T;6jJ!osJ5SVm0V&yv@lFN zURg|6Fqa`sD&=M$SXzR6qF&y4^E~}!C=CN9T)Q%t4q+ECcozIw-^S(Xw(IosC_Cdq z#Xdz_mjXbeJ&+(pm@hRM*Q)z6sATy?;gr6V=*0bPZ|TSWSfP#_8{Bl17WblQ`-_0> zfbjt&cabD(^NlfSK9zmUi-uIOieo1fyd(`%7d?&CY(Qbr0v z(}tFyM&YZ2NZf(zG?7%TBQ0F!pokjn0Q5!8D9;;|dvip6Pw0)*3lZBky>)5)){kRY zv{+vNZVeNEl&9Yn;Z;#DQ}_M$Ew}ljzq<*fPMY!g!*A1=P)sL0akdL>}4jFeE=9tjEQmypv^J- zE%JUhfJnpizsU6uxr$R6UqqzsA8~z)>p?ho1D<>mWvtJ^JPZEcjNxr2!zNvv*0}j# zj~mO*GK<`^0Elj~hsVCh_R}?NaI7xE@DJVlE_`Q|-Q%-N&Hxo9PJ?-OIjIWD8^BJ& z=*jJX*E)-nnPY5|U)rTe89Y6M%HMzc#@EtcPM--sH(03u$+g$g#I#{dTM$VN`bP{voL8i7;dTqs}!8OzP2P#bq{E zw&t1#Qv<3m9_N|}y4)j9$(8Z5SgaJjS^xq|H)KujuN@7)vb3JAo&pg45xW(bzmw`> z|JYrwL;uG@`d;%xmy=?YZg5d51MXs1u%nKxBvr0d7c0Pkdrjt^!#S|f-p)+^wN;~1 zeQm9phLG&2lfO_YBkvWY)9OeY18Jp#w1o_-QYHCGW)@tXA!h=U0U)rBq}Zhn zhTkt?gYF_~ii~UfV30q}unEd84+pcR(gm0+Js08Z51abcKnT+x}2E;u@v*rU4nmrwX zBO+Q!Zkt#_?Na7gfRoyYWN23!ILaYyS_i#kElkAjd>`sn*zsEDlx0s))kab{B8mCm zGUG3%Xt~8*o%I&U@itQ5=1V$L4JYb$^NsNuCcjha)U{8xt+eT>cSAWS4iHxFH|3;! zRT$k08&zEx@6ruaS*hpth)gN8L46GFA4{!KXp6FS@o&A8TGV$J39f2mAPpVf$Ds@} zX>4dXEn@6zO;n8yuNGis3m{AzTEWZ%)K*Fp>DbYs^!ULO=|${gtXEI@2bdl9Sp(6Lj`_OBGbL zGRh`tP!K6MPC-OktitAD4WO$_+j$@l#}}}UMvL8k+H zRdo!BtSb4cP=C=qp{<$#tO$6o9UU+59-r)qd*116=de*myBHTwXy;0r$F|$_^!3g3 z#>dCg#02X$oa368t3-lRc4~{dvOblPuF(boUYLa0h(4WxH#YqvK1$CR$cp(+o$n^vwOt@r`QVQ7Ol^3`1cQfG`!XU8)S z)=uxK3#j!*_{L;Z{$lS0Z@?J9n;7n>=r!<)dU<9Uji{w1wTnB@Fjh>VV`j^3pDs zIGzAonesUQFR`-&!`SB>I6utw54Z%obgD4^A=iTfyNr|dS$g4xE$J?w?dBwO zXfS|)-QM><6C6Z?zyHxK?;Ska?4rzE9Y#oGB#j@s_g(l720dHX-5tGUl8^MuZk5w% z8avRN4I1jk;{d$h%cJa$5;#;}#IXC9uDqS5Q4iFe#VGuL^VdI2lcziBgC9)k3ts?+ zVOQ~MU;8LK&cYlH&fPSRVb1S<_k-!vpME>+WN>i0>M&WyK+)F*MI7;+=bG@tJabRZ zJh)d-OeT9JwP)nm?npCWFJ$mD#c;qh^T8~T+g0*%*R8T=3cheY!L&6% zr%>naZk7FZZ9@88IGc9-&`5gvCqb2MYRL0`a^~WwiWKF0**VU*IQ7)8-4=&;R5!$7 zUApv^3DN$I(jBaGW4GDeWXDc-#(x}6`fX#l#$9@CHp%ceS(J~nTfCpK@T~@F5rB1r zEZ`g1-uUzVt%-Y8caX^R+U?ZK2;hBEJNb8oqM zY}z|r8Pz5IhtnT<*Pby7lf1z2HQ*$VG?Hq!{6z(};9eE7A*FOy0A2vaN@g%@e6bnqG)Ou{|4^c+ z1R%H586ST*;-n27ul90EbM-sJ7Or^L|EzfDcQAKFSHgJz)18QcGanpe__tP4TYaOsenUrfIYycpor`n&pi)Uv5vM!%VjWYn6 z2LKhuFuacKt%1?k1Nf<-guhCkSg%}6t)bD>UBa+4pi)JxgnSF|?%ZwJHqx5FkYzP_G0qRRhdMgOYXv&;#CD26S{6=cLP9D#*7@WkW+WraX z2(@mcVm!d2joRe~=|Wbd&)ioXp}pAXEX@LBmC|x^GIi(II51)#Wm9cc=`jFr@(hxI ziXzWc;X;kFtp#}`iaeQdtT7Du05FGbtYON#F+iJB7DlwRY}!iB44R!Lf9!HWL%7TSp_M35~UnBTB)-wM-kdQHO6* z=jAhuRL{XtAE~633Uv(X>ERho%4i8pGrqC=mFd}! zb2K8((+@pK*uGJv4Jt?vSC-0Y=yW5^VDqtw4Za#uivo-tOq3MuVw^D_0)}Z~fXa^e zcK+*fu$7wiZots?V(IRR4Fq@jR}|!qlvC(=nChl5Uo1}H0IkmHL4FSSDTHzl=IAH*wN z-oh1r3m)YbUf)BnY?0k*=b==>oh6ltu0I0;TnHG(qyD!HX)sn(upy&20bb)sf&C2E zC&SWCj?L4wv@C{DBSTLy`1GfA_xIAB;P4)Oa9f0oi?=+XvEOqZX8NDp`rf)-U1+4s zZyrcbf9lGuJ>TlB>tIeadG|U7M^B>~*dbEHE>l`Kh{ha}HTy3zurs)9I)~>Hy8(5K zzmIJKUe@W#8e;Mnvh6h`_&)c!0qn3LnX}n(oV=>(!i9rKc|D!}&fj@4k88cSS)Zf! zm~oBG6eJj{UEm-bnxHZ_TEUKT}V~4zRFcH|?DZt@P%( z+_!$~Ut$mvW@x=C=PjHvpEVdV_&ZL6(&kN{=f3%Y^<28n z`i=vb?#V$(+vI%mG=CRSIg5@cy;(R)H^k21hrfX27c~0AujKda@}-4q%zbyPELlYO zP`}-sJ^&yo;KlMG(z1GOB~NR-2I!=+V`kMJW>#{l?BgTTwZQqn=hF|;NfhYsWBl7A zyGnT7?lfOjpJ%xG=avfeKY47FW*ttKPToLEm3D^-x)Kt+80&wO4mHr~U!#Pk-DyFS>( z1ql}n4r#&Ogan4;!8J~yTdBYVBaJnPlxn#-o#tmbZH|4o8=RtC1LUZ20^%UX#%0jW zug+5y_oK?F!EeDFRnoMbBjBa9m?t|5ZV3X(WNrX(=v13hwXs-)DREPFvBhqr(qI8- zGG+zyi2o2ch$6!Dz+}__EC%pfN42cI=6v2v3-fbnV+pWqfk^@o(aldq!Sq&_(>WuLJ`FHh$H`uZ<%w~t9iX(5qxi+{q#I?x6|owFL$eVhRJrnOvn0R zjYi)K?1DIz4r{SSS%c}V!1(D@0W71AG<@_xT6*(FK6Q}fJ)}aoNHB5@yMi;P(eD_Z zO>0PA*#sUoAamJ-Zpy2i^ohahIi;9jAj@#Wr%#8xK@Dfm@DM3UL5bm!;WWXzpo^4>O{!|4 zo0p4Ai|`RHUZ~G#66hHES4w0EJA{J(xLrZpjg{PA2&XZp>dFw3bgYdrV&7)GYqGZO z0*+T$Di33#V{L}bQS2PGM}`3YXd~8hs_$t(X^Ao^p%o{jszpFJ4Yw~Wu22udv@`Kf(AU^ixRDG`-T!|WPa!Q8eI@1|I;vn~p^XvG^lk@^?;?W& zrb-7&1=$sVM$lL9*|t7NP$9fBpk$~E0(8|M16=mdgm?!@a4B6`o}r%|9_H{77RAyE z0KR3EMsvBjr<~MErdunoaKvTPN4rzs<{RIU{gxX@O05EN#YP>VSPh`7%Ha$3c~WgE_r6(A=Dk0P;A`kwB7YN zE_db&cLY_N-{4}pi=B9uU@&LbkPX`|9ai|ilW+PY1JEEw+q*Y z(yM>@MEdDpe~k&$#nW9ng0wKo*=E9Jl)THpkC1mJ%=Vy?LOa|tI*HewMye6OYlmZ6 zNRKtf)6edIJbiEOW`y-2}Xz;IkFB z$-;h)-S({~yvF5eD&PB`sU&G;=-U@k`t=teW25C`URS7wYph-0VuAPn(w47WX18`{ zgj-SVdPjFpNBR6LAD8Afv$z^CDynpM=oD3F!MA zd^~t?7ghmq?`hiHR^QvnmI!+FJ0(S~yfu@4??3xWdiFEN(%<{XpQl+>|Gc-FaW3)r z_xGA}aUS9py@K1VAB1yUaB$Ebl19&kMK&}P-2-7BL>l)KM_&BGn6Oi?l+(td3^hXn zQYYH*s`KrkdQCtk0Tau!X<)s#NLS6OSVV8yN!d|>zTaY}$T9CLfeiI>zk^(*Um0Kl zBbk}#?EH)I7$eiGFxYKQWw`;faASTtt&OpvG>8HDmGLxk2FaRr8FXh|Gr|gw`v#!X zI3U>?OydyJ0HdgrjWUmY01)BQB@SfaTc%F~;Gu%z;){F-^T+e1_q-ES5QGpg^Iew6 zAZM1R2LzhWP*LyVKShll0t#k7cb!>5N zv})Ol7$fMk^Xfk7;sQH5la4~Rb;<*8b3G9!(yanfUJ5ota^oPPF3 z8rV|D@!r@nBVVW<8l`RE=V80tR8gW{QruGv)owi*uZitS0o_N1m?O7#Is06DZA1 zp#%==EmUB&{^j;Jef2!7dLrGe;2JXRG>DVhQD*up-l5@Y7eK4V=DkpIChzy?xX zWdN&v?J2;twKO~msDotPMj44iKmeB}szEC~WSeZDcIl~@)pkh*D-_clhu?eYTItF&7a^rcCB)7Q_TzB&dF z$AJT!Hd@98pem_K{EgKyX&zgCB#>#y>fPb&gVMbqxWZh5M}q)rhw5W# zLgV^KskH%B6``;#1l^@vWll8lBH+@dZ~1=OFa|4~uADV%E?rJkfso&8Gsw4WG> zZBa>EtdDyIewfjrNwhdWu7B1vfhlZj$hK2lFT`-3<`yGvo%6m4&P*^*SEsk z=0CHbihh-EC0U26>uD5Y^@4l$5hcSifJa^0A1IGCJ$>zZI(-e$t)o^MGz;z8MqM&H z&={eP&4>Ms%x@KaGu_+WNLO)ss^8WJmAJi~C|Zw}is!|*aToW_WB1}>JiV8I?}vGG zY5*E$VpP7Xge<{1_HJ%g;nUX2vtgBgE8io?gZ&bhK-f7hjn@AQuK$xO7Svl_x82F` zc&W~!(R)F(LP~6Ccr8692Yd@Boip!*{VvAEhvMu?EhMw9o!*z0=LRtmboWvrj;&op z?7K>M-9zVoCB^ywJG`(JU77%aOMZ@u++`p$QruYU*+v;lJ#N0g`Zuii=Nt7lXC#<`T5XMW{_Fe~?#UrEHB_xGBk zYsUioL62m+bh1^66$N>Xmi6dmbmDbm5xD;aj?pRDx96F zJ{raL9VV3VTSyV)i1K2)KLgtnjiD+yik=@T#%IB_#G~#0&4Q<(DHXRZZt~F>XQjo& zw}S1+rDZ?>^0|sS(34}&hAqd^ML;@^@@;Zzu#&$*tyOCzYL|a9dp^j|chnXBb~^ii z3CW_yI!3gavsY-tE>a1F-JtMq8<=R%MOxH{cDpRM^L-e97xA>QZ1>2m`H{iO0hJDs zVdW8y5gA6fi?O!+o1nMPZjR`_{>xzZM_{FyPbrr19Q81zl&Y-3R25UgL;$*i`rY7R z)aC%ZwD+U2_Acu)RaC`J8rYdrWor?Jw2YLL%L}NBVlx$aHLI$XfspP zfO>s^#W=RJ)({5|iB43i78lBC7KxztEMOR5kL~0k59FS<_fQB_UfZ1N2~EXw@*=}3 zNWRX&K_0F{);FEtBGK`DHKekNc2WAK1ZcO4B-T2haJ9iqik-yPjH*q8sRdv(-MOA- z7v=&KBn!l0+qu>R1(3vVB+mq5TB@22P=AQz&tABe8c$*T9s~1r zR0vB94AYmdrStE+ljZ@tuU@{01_5W+%Ro8SpbsQ-}@2(52w+ZIu0|02F z0E!zLZs+%C1zWGEbgpBkR{+wUQ#4z2rTCTW&T++$t{Ch5F2E~+E)M~4?d_UdddTgZ zWAt9CgFfZ$%0ZTm?0?+atQW_wuXGUvf^7V#E&ikE%=0`PLc0{<;@FhFMv zPzlxFl5NR&=7xpl7+cq%>l+xdA7}nRQnEq+lbk}rn5>(p*wnC2a#13c$W3grkazpB z@#=E1NfZjDE)%g@s~u7UDFz7cvKHxFRSiK-Uu3Y=op`J-Ox{EoQXl; z&p9HJw{FZ+Ut0D#pf0K6d#MBLJ zAg(FRh-9xO07{H8v6+KiJK$>SS|*H=O2mOsaykPan;<61GKXGuZF?ZRdH zh`FSA#CJc<^xyY++4J^&z-!N{d3U^!oahe74|oRTAj}RZ#11DoCEzs~c7V3ObfEQ} z*9%;SxXyAZ?PWM8wtkfqL9TsVQ(S(l5-DSI7-_Lf_0wtX$agqV=-MU`Z%=LO7i+b2 z=8OyLt!I1P(Le6mITUo&sb9bl>kP+n-vi)f&un84!pY>_>l|dUA+YlJ%|&oPR;BUA zv6POzPGj^B2LN77v_G-$SbA=BGW~G=`c2^lyk2}!iLKpRXk}$EjM=vo-OfWZyC&F# zf9$5H9@+x|E=8>0ea?mWOO!NWq1CM-^4$`S$DZsaBfY1bvC8{Re~jzjs`D^TWUz}0Qwe)ILmcihdU6>Y{~8PcyXQ!tAaVfnoEeh_ zlchkFB62Gi8!{xmi* z&R7A!whBY1!~#rj`#3;6#+yq>?!aHC4pJ@agkJ;1Xu|xonLHXWCaVC{>(@~^v&NAC zvIav+%qo)U;v*wx@9Y3pwc#FZ6-tyV@mBvSCnbR4M8X9r=nYPPS+8QJOaw4mtrwX1 zb(@sHych6`&-rvvODln>Xite(!K6wbx>gw2)*B2h*_TN+SZ11}mFzD(-@`xaJZHl^ z`q|rAUE9=X<2N+Qiw<>NMgnR8RXQ0>4`48l+I?ybVHfQt8_rTVFaBV+JgPgckt1apEs7v z)+7(+Pexeqs53{avC-_dx*SehTXHJ^R<0;WH_{+@8OA-UsT$W-DsJj^08W|7*` z66FthcpFFusJnsW)Zh>&31ZE?j^x-nYJ#F6Z4IgeiaH_)T-3~u@j;v55Il#T`BSp4 zN?Yl(Mi{6nUAcIJT7Z6CWGTgkWZW`RL!K<#7#NSG&OH6IhPqh|dvl9Od|hpyL!zym z#v2Dyms2*cT)mX0PhU6~>ORbth00ZJNCe3=kfA&=r6~ zKd9xsAJ^V-poaBNz3bOc5v~Bp;z949_3VfdU=Cp%O9)Nzq_2KQ{aQDQg+~A0{dXQcWa|_qc;oidAd+3$)b?Z8NWGX`vaIifJ zYxN81Cz+hC0C(NY+zU*7hHE##tG_S;T-x~hU$_>yhQbszk}-V}W7=*}71Eu4rMk2O z=1!r0uMeC|-#PwYr|JE_9TP-<+{Jy+JQ-IK_vFdq>8-yZ=eTa|>*lvk$P4qs*zuZ5 zM?Y|Lz1$pP`xA|GHm@xW`Av)6X4v za#z4hpzG8rrcVr%?l$@fbS*4!j5TLF4-UGR$YU65^7q{x)qpf7jHH}%Z{p~M>%hb< zu3t^3XD_E?>_j!N4KsuNjjx<}IsMtGm(r^TkES_JXsj@C{NWoZ{oDyAFrwPW1jsrU z>9G3;yUdD!)_Fe4PhR5l=X_4^QD7`D4Y+YY70{YIO2Vfo%GMj--ZbDwSL2@x8pHa< zgdQg~4gsev-J>{~Nj+faX2S%MvWEe@`j=#?Ic7ukww!0cW%_@>$wzc93$^%h2IKUr zA*b=+QgMUh<6QqGmt+3jIP>)k*MH1)gX^nY&MSpelHmMn*O7d|qREg9{q0;{bh0i3 z_iUym^l@MM$~)dgMbBL-zml$9tO)oI2Xt=P%OSZ1v1Tf^&J z{EKXnqg(e)8MK=twGM4JVM3+0uni3Ws#H=zngB6_ggpdJ?8mOwg{1%}WguHfOf3W8 z)aSB9-}of!t^thA%UBy9by0?atzhiD)|m^iuSMLkX+iaxopo$Fm1x&aZHDKML$j>x zIbPIi6&jHtk9n9rr~hJn@HPMvZ_F$oRiErvySaGBi(pRi16CC4YsVZxvubw;u#7b& zIDjB^piDtUN?qB1$p_(R`f!$CTVj!PZ;Z%{uTmzbky2@e@gYcxxbcf&?-7i(SL@8^d2M-cmYSK9}A^Orb`f9JfmoL4Usw~!8bBiHm z#<*0XgZxMdnjllogHpsb3cpjQT$ z8E0t^@BJ?P_2;+jc+sO9>)vspL^-8kRs3vsN0|(Sh`QLOj@=N8?Sa3&oTurA`Xk^~ z_W*G)#+0<7#VU4#x@eCHTvDG5aV<6kN-UI}9$FBCfRC=*1I>#xB4^~!A{PL5jEm;m z?C2mCd_WV7O90C?jKjAo9H21E$*O2ytk)b<4R7A1Ds=<4yCUw8)+ORx@7a%`2ACVb zZW%wZjfVlgPaJKe$5ER%Tr>w`bro$8B|bX{eb%$63BRQ>#S_q?*%D3Q{t!bc z14Dsuaez-|*-qvz`_FNyf+8?=J121E4*s3GekN#e=^%!HKAz5hs+6984&tU0MQ7U) znY(LB2Wi0kP8?keM($=c&D)1%|6+D9o&CX~^vPd*o57mSb2kp}%nR>&r$N&ah6{If zO$PGWqtH7BMT#enGPjY~^V z)lgozF)v2jZ}a;8k6YA$JzXN~P3%$60!FW-i%Y+qe&_q|q$ekiq(c~eKaH)m*RP*J zf@>pv_ws2Zy<#cALNBGizL3&y|8+{Af10Ky_;()=MqsXpI)JWIe80?xWCGsVgm{?P z(kPOI2Tp*x;Tu(l?ane5&Ow7%R~a1iw-4HMvU7E-A^TxIj}YJ`u%+Y{vKq8Ze>vQR z&Eu`hcmXfh6c5Xp#(cnz4>pCI_qEvc=pXCvG!+@D4{`k?uIpUi<|>?OT-VRerJ0NK z>FBcupzxhzNa54(JejPZmU9&wV5fg@Ie@O0zWf%a+rd@52c7e~E*P)g{dNkxvr5U~ z@67K*@kP`2_l^GhTczjXxDFsQ#>Uh*Qc=t7L|0&7+=W)PtpnIoM}d83A(;Od}frP14>7 zbt~ja04zd<59YH0bEN`4j4q*I5UZWfYf{8#868}_NAvVUVnWl4TFZnzK|XO zT(z0lrL9~u2VGOpn3pK8bZh<9Lc$PwMNg}0-{75qk=0;b1jBCCQPoOGqU*#uZ!OZ% zPEoXL&o5t_&OXrlBQ5TCaZ%MfDm+xl!mpr<;rL{a@l@|c+&iNTZ#hI?nmKL%7h_w* z+LtHuw*ez!ozzgjb91*EWn~uPoq%$jOlpEGNAxdPlBGs96_M%Nyh*!)c2*gL7j@Y7 zB-GL>M*p!1g?+CZ3rp!+uf4$mMo3xJ5E!U&S|*3@TyI}XbM>nsS@gyl>ccNn-?aBA zVA9*K{U9AJKZC8mM!J6SN?HLpGjCn`yRN^Ei+4GmZ`bOdCnA>ZB5-jMpM+ z1#T0ai!|v+EjHuo^sOQ6oM|LpP*j!1f}SEy6Gx~*iP0GOA~S+!j=PRajKe_{DMCTb|5Fc$GQF2+qrYuygZe@_2qqOcxWaahgY>z zgdYgLmZIEnMBj5dZB0+fP>K>gnWPKZapRj?5k2WWjG2G~RbM7(`|m zxZb5s8i*Z?wF7BjaSsraK{+k%2ep9cpWR|ciy~{EV3I*=7`3tmjTH1bH=p;R3%`I- ze^kG3a_ZTyts!~D0{_qm)>H< zV|n@=MBy_*tntHna$AErdk0!%S2KO$-#|JH$s%V=fv|h$B>EG5VD|z#sxLYg7n8dE zj%5;Zw-Y?YCIjQw3~G(V`v;czj)+`_JZR^ECa1Xmlr<;~f2{Mmzh(+`0x*$T#mU;LBT)0h9p7t^^PTutlj!oF8ssDrrJrUJT*(X0=} zGh%UhH+8ZV{=w7Eo#w-ojBD{H7WoDR39($nQ<)AGLA)OnhdM(3CmyBzG&UZD+9E7>=d z4j(>}R#D-yomFkL?`W6HQztbT?5TG$C|wMRdm5#-R|1?MM`$rbE%jR5V9aWm8dRhaxYL}O($qeB2 zr&AcOFHsidcdTobgHt=(vd`09-4=9m|M3ZSP3)UQ4HYS_ z{!x)kEA7(c^vv#42UF+tbSj@`?j%XU7V9L8Y5QA(#DCN~_bukmD)p@WGW(e2uaOSW zTF@3hZC0YP97;|iEs6g&5>MK#LfkK;{{+$a3(s5qf#23AxgdMDcGAucALccfelw-x zyI`VopCt^>0_@o>e&SSp(tZ(T%TYua$41*ZBxJ0Uo{RX2GnjeGsU)R-8x1rTkZ2JY z2;hiuq>x4`#)uXYCmX0gT7LUgnYz_hYG%;M>wGi4{q_}Xc0Go{d{+pA>GJv6^vd_x zjBK*`%*NDpRF_?PD?wBPq%Cvc#(EXWJJp#1S2j8u>Ds8= zu$n?!j&ciXTb2T3GLHdhP*(2?Ak490po$tJmz!_bS~nu^nZAgVI;sMlj2EGd*c!5j%nt6Ue(Go$7Pb_S?Ma-{0o9-}%l@(f)qxER*>U z;I}8JAT0;TW(D9+HE$0tr~v9)DMeT*G2cgyb8tdMVLled`JvhZQ7k z%Sd*$R@T#SmraBiTZ;Gyg4A%~jJ=pc%&%{x3HxAxI+j8E$-OeQFy|5NM*EWCh^F@{ z?(gJZmaJnVg}R_&@|b6R(5KfK3z2nBYtZ72#np6x`EU@4O$ONjxXhyq%KLb zsFxzFvDo_NCqS~uuj_yq-3o8_^_^S$zM}XW7I>WUe3Z|hKicJz1l=uEvVi?CGFwl- z%=HUgI^PR;LC!Au;@QLgH-t5t=T|?ThQ9l`^nbyS|H?0P(^F67fu_GtU`H~YKOcSP z4zpxgT#(+}v2p}YX^q8 zt0}$lS1CR935GFf5s^TB$uj@|KmbWZK~#Q>J29a{U?-6od;A4DA}J^xwlcRfh9B$i zfdIBp9p&mb6fe2^&hb~k>l%>PUM%&4D!|Pn;Dd?zJW8wD;Q9iW8z^_{IG8-0<`OvD z=~m{~(y3RjfNMS*AjHo1{UZ?Ap!Wd+J$lh|Y79Gh=2*HtS&L zfV~@W6oc|!1N>%Wy_+J=UE<0NMqdE@xIPX>#K->bd%g44cVS)<@uq%prFzTHI>E%F+^2Z2${R zR4(1L>t~LFG6-&&EfKlKk4ziC*Z5F3nTob2yY)*Z+)Vh9)^=ua%V)lWYW6HPeb1uc zZt4U1@#g;=M-kChxWa>_(GFC#K9CL^nL_2XPFrg=h=kQVViEW^m&taYm^(^b&s>?N!|NF$tJo&`< zVjbTL8_)V0!#oOT2`tOBQ`Csj>WFxJCJM(`P?6ck<(S0?jGKN0c&xNng-zW$_KxbF z(6~_pa9(C@i_v@S*b$G?k5N~3ZI=NA0SrhwCDcH)sVC@WeixAr`qtM^rGkdMB^AcZ7XSZj(!Ba*2y0= z6E@oz=5Ww4ZRtswR6#g_r9zS?KjFUw%AdoSa&bBB)x*h`PWN^H9dy*DXS`rg>;tym8ZEX<)!o9{OBh@6$dWd7Hwp}PQc5!ig(82 zr|@U0)`uX7>eBl41jmqI1$@asKd_=eFrZJN##h9V`z8Hc;0G1m)&$>!j?2cX%Aa&V zy;7-N%G+J#dzJKQj-d0R&_R5W&8W}b-L@~i5Gcy4sTU=EEr<5u%twVeD5U5%$hX83 z6&s`>_!hEySiAy)y;mr;ptKErOMd zB+Tqw&=k(e?MLlA!RzN&elh*SZ~a+XoLx%4{_6SJpJI`4m;iX|_ z_oQ<7+zRh|kB`L+EN^=W^-|~&2QcXPZ&U2 zoS3rkVdl5S-uZPvSwhO|?1!nkM%y|GW z$_MioM;=LYZ-0a12j3)a2uLZhbrz0~kaN5pjd5rDK;ih@|72&+c+f>axe2wyE#~M! z#_ypeuW@~a>)++NTX#8KPXCHaJ6=2861GZS``YRBxBr7rqwjYpkF8dVvbAsQ1<-f2;}lCw=Y`q8HG2T*V~5 z`83}YOlkNkA|XPV9agTfP!9TMF4@h++~DG$upyX(XBXoAE}~@t?9;p?#xEAuH()5r zFzusA8DBiC8V4#faJ_0kyV6Iykp(*oT^P(l#aQ^K%)4M-i#$aF+tV!4FBZ{$7Yugs zuK1SW4(DbO43sei18$jEhHb2~c40l#oZLvmPoTm$f)oi%^*X@PDymHzYZ#+I0wAQg zU=Zd|fmD@kXejRv$+T1f%vyjOEtsb!e%gQ#wx0~C_PPXx1jkgtk#Xrz|Hh|AoOQe* zxB&xSR*HdaD*e^rq^^C9VJvw}q*LF%kVar~CO&kW)7u!1QOR2FuAtt?rs1Wf)Wwi^ z@F+#0clq|DKAGta)avIem(yd-<1qFhG$k^Dvzo2P7E&&p`AAzGjOSfW$LqEmnAHKm zgJy==B-_9S(kl52!}u0dNA#IBr~Rn&Hh`LKT~Sm9DL)NV3h;!~7k>A00jLmXZqr+M zB!ZlDFZ~@6TCc{TpG+G&JoJ9b{K?E&16kro|H2E|88)N=OKYhVh6G%6(1%yesH1h0ZT0hH}`5LDhG7ojAd&kc%b+A!GRkgC1h5&<=`k;dNLlTLN zBg3}LBftol z_~-|=0ePmpRhjE;5{z?-qsBoih6<$QAlYV>Yeyxe z6?4Ah%dFv>>W}X-`LtXi8OC#qYC8zm7|Krp7IC}HDZNAE;mAkyDaHsdq=dvfv>T_y z4v%OP^TelMAAKfm`E3~dDLv~)7vntN(zigEfLrn0cezdC5j4Ym5dDDoX?Nch5@`Zo zf@hXNKYrq#gazuP42w|4pU(}alSU{7chDd8=@?~BvkIa4rC#_{I;dzeXH-Twuz>lK z7$`G95I}pY%3M`Py&OQQ!MIpPP+$#-xKd>;jo7eOZV_kHLDZfPx4C#qTG>H+bgZnV zOJ5*fsvaPJBh}{F4rM%D?zo8%87>0d^4mHQXbuoJH&=}L&N%jHt+{#-!M3 z9JP+E3+;Cy(MDOs1*9S4Xmewi>i`B*Gd6$N@4C*xAu9ml&b1Xl^A-m%=%WAN%j(Ef z7mv67%k8VM1DrVrU&MG##OA}sDCi;g1;I_*4*H`_B5Y(%@RC3dOyO+(Tf2g83SY)0 zuxw7FNuc3^qzgaqe=|2=lm&Y+e3l;V>Ttrk0psf_VEbUZ9tG#|^*@`a1t_TG=}=mq4M<%>5fnn zd1&17vp6@9UisRAG+jCuU|aE~|C_leO-?c>GFS-8c&`q#YK`ws=Lo}2$ofujJ8T%xsPag_UtisNg?QdPb z@9wq+tX~%rc`iQ0#`Tc>FJ+gxAb+O4K!+NaOXr?cO?nx>A7F?Q!gY=1f~nHJ`j z)0vmA^gn+8KGt%=R?MrtmA;E%fe)n@VpVD_xzLQi-olSZt^;!zBVhqc7}uBCZQk@2 zr^H79rLS~31mA|?ia!FIrYbiv$n)6U-6jUa3+bGQ-@rRvMA4%TR^PGc-RXzl#q*p` z0wMJvqdCm?h^jXjmG0DKu%sbotsWoYQdyOqrV<|1MV$O6Q$@`=9~ob)vGoU7B7f8G z^rI^nto(~WeQr~dB_3Oslm|}I^|~vU4(S46r8CzqrytbcN>?u~f$yUxII|4sg*u|; z#$PbzFeWRg<<&WeL?BD1aRZ=g0}8=>;uSUvS7^(=GVgwC@|}L2FW)^eemV zZRNAv#;zxA^PAtti@tva?e#qh+VOV|zL{8fDQG79&rE&LB;I87&1KGH_ThhX(Z+cA z;6XMiCmVo$`pNa_dw~ecC^*AYui{*e2_) zF6|A{mDZO4K`)`+DE)9?(Hi!g+I7HS^917=Hr{H`2NABV;+J)xHl74#Y{JNr+27&J z8oEop86tXC)K80wh*R)-{{MgW-ZWOSBsE(w3*?>X#kM+lZ4cjnm$g%{BvSFBnOxvQo$OK7IBt>dS4^7PsXYZNm zy{o#q_NrHJf4TYlPTa`6nJ?c~RozuXzPK;ny*HK_7fy5cK&IS8-Hm z5Zgs-6>NLkv5jVM6oJ@%JRp1dq^Xt_MbF_m3T?cfutr$?`rZLoP6xlrGg_`l1!lCV z;mDDq|F7UmoEVG!9)x4WiN-|x1dc-B7`_8XBGh0yJu`D@RRUs+!>`^qd@kdp;b9~m zzj1x6#4Umf;}qeUxnv=+*+OB$LWocMwoNG*3;1j^QDbVz`A+<3XWXri_;#s1%yvVC z>n?vdudq0ATBT~CXn!>rZxOkXoNax66eO$?p~{u4f(*s8e>YzRfVskZ2$tz&0rGB41ykCA;@Iq@otRpm zI%QA^W*HeO&x&YQM!XK)B@hUeCt&!|l-wL)gKI>!=L# z``)98qFC98?3gdJ-Q@QX_WVVJJ?}kpbvSxFmL9`Q{*R_Fx4)de``u3Z#a~S6hkht) zdNDssUqMmjXm*^KtTIX3MuUO1%;+=)6Nl;*HVnOk2Eu6!uyFu*35vUt@3rgG>F@oW zv+3(U&#tPwL&5zQ@p|eh%mr~*1L>dpv6TMhPqXTBoemqTGX1FE7Y`0{O;$9f);^eK zRz7wJoJp{;%(k)Tv%u>RDQtng-dn`{J$_1wKg(iFE1GV+c7pb?V#-1HX2^ zve2tq2I19a9B#cArfD zI@ix|9o1p3JGq@vEw8=YI$Jj0{G)f%Xa0p3_lm-VjBfzU{-fe1In`_V9IIkXq;8@8 z@-Uq?H)Ld-WKJLiQAjcsOd73&Kec|eZ=<1FWkF6n!-f)pK8Jvem7We;pqw}hN9Kor zu{BU8NVrqjN^!Ef6I&FFi6|5`0Abtaoy||P9m`35Ou&iTageXgRRLuVL*#NQ8tHi` z_ps$P6l?d3a5O5s2!e}fpDZC zSFIFz@v`Mg+UWWWv)Ba_Vf-AH!(F}SDL9L&G!;DGCdp6jn@xmiS6Zv-3s?RqoqG4p zG=FI!&10*o$*FHLcN0y{s$z29@{|!6Xl>-kFev(~`Gg<`sNxZMP6;V3!H4gXt49>4hhsNlom5 zy?Ohcw1qMBt#4mUXI^|Z&7Pf4w^ugO^*64fIb>miSUl|Sq^Ecn;YK}n8)oS$OO&m~ z^|UzoLYm(>MSWyVwh~WUW!G=Mn{G~AOjD{wL|x>%q?TY{U?V+|(=ABQYXC%KM<6^{cyxT=yWZdHB;iH*nwcHa|TFX>2oD2L zQD0Raeb4@beb|?(ULdONK)MRw-tV8|lYKR`(2QO=W7%&)sKs*&d~%)AH|gLr2v@Xg zC!WQYlp2(fIOr6((Z}1Wm0DAg9l0|;>7xK#Jocg=fqS|^`rQPLFuX_0Q-I}hu)Njp zrqt1H!PS1030VAd=wAN|Z49@cmwbxmRu=5$)W#j>q+=Fr`OTq*Dw+@CLoEm!G?T#A z58o68W5$^=zFG>!@ox3n(cJ!+ABABl6>2I3GFXoRPCiW{_@75BW5*8!@mdJ39J}4# zsXe<`APR^h|0;3`&hq3Rd5fEuIDv~Ax@u^yqjky0fKxjWtRj#?uY&wWV2->NdK4O$ z;7AtvKo_*SrqIfrM@#nn;#7M2%p%P^pEOGC8or7{Ax}QP+>My9+b*yPVke0hp2xrKjlZxsyLkoX=oc>@RZt zL$2?3zZ(YXfBBbFT1K9tu;VU5C;Gvmon2M4^*ac>8o;hsik;9fg<+RD;P*UwURfwM z0N+N{{qkFLOtD;r**jwm=@<8vFN5p9lhWt>2z*~UCiK`2m|gh<$LwN9;4p5RN#|vl z&L`MjHX{>vm_bNYy)W{ka$`vREcXG8Rpnk^pTp?pTzc%WmC){b`su_oyVx!f_1GO# z;QN>YPH6TTbJdJ;^l-Aly0Pp-^eD%y2k8_yTuZlO6ue)!GfZ+$SBnNK2+$U}{u0-} z%XKfOG_Lb~Qs7lhx5lEk#VL!IUcDK%RJ1R&n?5iYeAcvo@u;{{PcEe&|F?fI%^^tm zAOFK&eYj{rFs7W~kSCo}Gcaceo}8pgOby-czzJ&iqsa<)5st-|jW{xMZiCxmX6QCJ z)3_q-7K9F450zQk!)jJ*DN;-1swJelB|h96NQJX&cDJ#8F?Q|J}Wx=;|AX(?0FSfF=3 zek!fcbkg<9Z>A4)UQ8z&O9-PV16#Q@ZY{3coSHZZgEWo6OJU_E+k_NSDFn;36$>@d zXVTL03GQswfetbn3OgNdItaG5sFO`JR+g6+((=j0sK3ytym2#KyK*yaAn>|z>lSym ziJyHeEuK4@W;Sk+H)9qyU~YW%&D34&q&rO5RxaN`Cqk`=!M8n-82uNhQCJAy!N@iT zD1r^|nR47=Y$5VpG>+DGZl)7Eucv1wFT^&y%=9>-swIIyZf7mD_w$x7L0I21TRi92 z*wU!La)oV4OLM2vV+)U?@xg9~sHhw-$F1t8N|!_Gk#gS|q}cx{>-~mY3B;se$UWn&jJuTKWfy;$hzIC ziKfd07R=ko8P%TQyr(pKa*hM3?qGAu`V-O~j`)TaEvx}$YVnOjq#lAf#~2x2+qH4W z2?J{>i7!pY=dX7WR!15DGJ@6WM-S>NGK>)<@EE|i7KlUTSyxd{kY73~LsgyWo@4D@3%7S8a9_p)pzKu@7j-jg)IMWuLSO>V zCw~f`{Eo#|)GX!1wvTSj>DD4k(mGol{alt76vuEZIe0lP(f-KrR$oq5qry9ajEQCl>VaXFq!e}d$ zcZ=mIY6mXN z^9_7(S^P1A#Llib?C{sQzy0Wz%ldR^xyY8jU&F@FIV>@p<@d0iM)E(y^|M@molB#| z^ivG6W?`4lM|E^_7)EUw4Ld(J3ovNzOJ3pF`wjljAhE0qw(L*~Xj{yneYADReX0t- z{{H`#(#JnyhrBme@8Wy)_d4VlIQdskR}Zg0A3Qwx@tRB1YtPq{?f=ZAD6OKYb?a%= z-4J>)fW`!1|5R1z2KTr4eP&E-2g!`FbWrg5Bz^2y;5CL+$6nu86i}=7G(sVn%HVeN);fhkWY%lP6KSvy*kIx4|91Mqtl>2;<~`~ zZ*ZODx)-O0*E?K$O|*sCVy1Tc(n?ytvxU}FT|+eIC?#gmW;u7DoL)PgodX#mk zx0sf1Y^1ei?8q!h51ABJ>Cw)v4*3En%O<^?=og4Qlc@(zNRPpU-8jP-C=<}B@U0h z9_XkWg_Tm2?Q$*D!VtY$8xX>cQxt;na&q!?+C-Ror*|biJGl;{wuq320x2ZK@bm;4 zU-O9AHMH)jc)gzO8ROwvYOb8E*ZAtwwh-!Wpyj&OoMntfco_2y8ENQM>Z3|$o40+4 zm1$$E6b(=tMy1_ZSJ68Zj?|s-dL=!l$rsRlbN+4uKnkGB* z4f4*+rOBndecZb2H!z|_zENa)IQ?*wHqcm@O$*O)V&k_irp66KV6@q`>F94wG(MUv zAWS>2$DM4@2r#l6e3vqFO9FMP}M#x z%>t29xG{Ia3&Wcy&!v>LxP2f-rbfua?o()3i#hz*Pgl{ShRYy`GAJ0unRCa6&lBk9 zaR#BFZNTmw$qNj^dyIpDJCVIdkZr+daMXak-|^7mL749%zyxF71lnK5)y~$uv|<~n z!g$A)E<%LZ-b@VPs4Y~S0?YvpVyi3n{MP@Ex2OQ$ z?W&F&uR*ISVwA(Y2gMr#5rWc)G~``-fi|SCYxi1DO*K5BVT4(8&`@(eI=p!rWN4hK z>SHVyo!q9N9CEReg3c)n{P#f!hHxXh2-em+vwrD@O z2-eXc-xw>v)jHn-_AH%QQK;mK(^F|zuIXqHUi)y}8R~J4K*mHx(V31Ja=m`dh2r7{ zc`uxZD)VTvu_Zf9*s zSBepI;z7gyo6Hqju2fL=oWJF;tiEM@C?@pH<&Sz1z=-o(TE3?vK|`pWHq7I2iPwfEa$)Kg#c?`28(@H327YkCd}{s5|hT^d`Tz z)1OP9cqH&Uf#B`4TxturoyE4uoGb1ctv{;MF#URL*|EgM*@ZDO=md&SGhI9cT6usQ z#IgIZ{>KFB#GwUVE_%NH7RLY1_tKN_V9o~)?cxLJs&P)?jkz5kqrHLH!Kav!Fm5rT z+BR!4X-Um!5h4Dcm`w9!Cm~|7}C!E z2`iJH=k(5}S?+i(UA||Mq7RDw#m}XGpG)mDHpK+2xuI>!hX=1km)E%dkZV7uq2jGo zVzbSPO=?l5dVI(8#S^pXnNK{HzVVx{*MlC)^Yp@0dh$aoEj_V_c=6r^jQ58;lTat< zjsp$z#PL^I%vV8G%ZfXv}`>=i~yDMEQ13O2QHFZ z3I#M36BFeQ5oBybo4$*&_B9xZS+@JXbT!RhImt3pY2*wB!laI)V%9tA#-3=GtvE zzBo0_?We?h?81pO$F`T9x7s04+eD+Qh1S8=3IggkF2leh{Gc*r(zaJ&oJiMc96S-0 zX`JxnIhe?9<1X8S5PHF|c0Cymje|yq6PtT;X{o!M-fdu~YI+@B3I?%v3P$k;VF(ad zsGGw`^10pz)3x?D)2-Pp7~4gJVrU*U-F91ZGG>fhN+H{{=QZr1x%^=5DEt|xO=apy z+L;Wv?X`mdBx`lR{1FxmFHg2XPn#@Gn6`_E6rNTt5B!4Zi?(0{>M+6w|6*E)z+gh0 zLU2F~a5tVBF_9FZ)Z!vG&0tWsrB(RX8yHE%*{*>QaDomMr%hg%Nwd$LOq;hk)zd!c zwmSAaBEn7^)Y%MURCGavw6=r7Q%j)W;K)U(e@cTE`nWufwK>Cz*=b$ET zYP^nfh*lV{>`Mq#oU2<`;N`qsx=DKh+NK;wdv0AsC_I5yTpW}^N;SY1Ifi@=EnBxX zD!c$q+8X@_O)=qUcTS-;J<-89U@8=gh8693xXv@Kx$Ti6=xRCYE(GA>X_@&+bMHrW zj@1-z4>y7r@QWtjAOQtbvN+-b6TcY;;$+ML-a?9@!x5h{4azlwOO;=MuaZl5ooQm* zK3Yp%`o-3KZky9+g$6w4IWwEsDP5UGONzw?wINY!oW6qaXNGop4k7U@ZAdtLr=kte zKj>)CIKx+ixY=eLVexL2Q*NhdYua$_aX5sFNghD5&iHHnPa;%qbHGaTdD_|~g#F+F zaA1L-G}S&?MqTX55C5vuDPUC$wL`zM>71F~PA@L)(9;>)Pk4$aOA37ZT6{ZN;4~HA zKH8#vOHd^0QN9lS-|f$B+M?}Rdt+TRpf_1~xv}Na0%c+Qv)_1UW%(nXk+NNR`lMkB zex*y@mG)P{j5p5N2ckbvCbK9`ZMq(PMR%AsES#vvz^m3SAfj^;=R$KV;GKf$9DLDB zZpZgbfycQi@cKXby`8-t<^QaGVHbf@%pMUZ!{pY+r@4y2E82CfVIKumr^l4%$%AN^?b)eh<;C} zyUs~U-(A7BSY@qpn}O%@2DTK&_xk|G{1OAM6X^s@!amU=t|SDdbdLCbTl~%NZ&4sP z(>EFD9@{vdCfA<{zeE3PGYDUMo;LqUPAmPxSfO3<6ukqTSnchYcjo~YGJ0Ua_3L}b zJq|gh!1oIUIM<00#j2pmV!wPy13qaT_wxeK^{Fl@7Xk*=vz)N=dDwjg44{_bApv$`3;Q9*J z|I8)410Cbn=)|d{vFtNT)9J!XXF|B*wycNgPCmYvKKbJxz}WW`6dTVxy?^*Q#dYk> zllq_`O~uyYO0pA5x81m%(JdNVTbnSId@tPcXgnv2O1EM*EY`UmT&Zp#2v86P*J?oj z)9VCs3azR(TO)SZYUc@Vp5|7x#*FI3+=-}@-pB_(vCLm6W89+y6}OnE(c{*P32?Lg z(@Qhy%*@Gj77d_NFchmO1x)kZougcC&kTZ$$uhp#WBl{tRBmT{(j&e7IOAW@BTAGL zkG5R}D0%y#OcHeg&7|7`8dtMuQLQ12Txg$4%Z;k^7BqHi;)M(6U{s_xXP(}?6$b;jt@N?;kHxmfHBWm)z~w}Lb8CzI=F{8)Hkl?` ztn{N1z`H_{`T41|w(i!e$#nUhYqSj*JKDjSGiSr-eFq`bI*ivA`3J*d9m}OCCYDCPUrM5JIZL-Z%*mu4` z8tNGEO^>YWw<3|EqJayW7z`YwN)5K{?Z+rtFP)}iD)c7LF72g;{mrc8a&lLnQVHrJ59uf5{)HZhyIl4bleFiIB-IHiMvK z;XF(@Q?L*eb%)tAvz2!Y4Knia-BT)CC#cs?J(bq4ucpbXo+@X1qwmt*=)%mL0UnGp z{h&DL++}}NE{g43TI%dg6QR|)gO=eMeXWb8;KceSLPCY+j>aW!PBHqQ+`@UcT#i9| zxrkd55V>D(g~qjn`+%v%+Urr+xj*;&YAuf;*nNC}70=RyaU_NjTAG3^f+9KLZpSV{ zG{?L+4G(%x5<#t3HDeWoHMIXly;;UB#^P-j7Zjq~zob*-<(OUaBur{&9LN_5PP9b( znVCnt5pE*an*#m_E_}%3xVU|Jq>JFnhRMJUb zzkd_7`8MiWQ`mH#z0CIyX?Js*?-<{()^!9%*MWasMFI}dSXoc4KU_<*Z(-8-hZj=k zOl~^`W)uT@x==z737c4x*Dxi}o=XexWo_)@$&)wOvZ!ctlkMekSV+_z{gb{my*ST2 z4;xz&o$rN<6BE3|7DY3%9(ivo@ba|Q${F2nk%vKS(H3uGvgh>NJZZDw$8EL9j2wC6 z+owjij~GDSn^h_jcv?$lh|NTW9i+CuQ~4^YD;9 zg^1}-aeax)L1%UK-f{7Kg*lkIQRjg1F1EY2(En1)LgV#ne4S0tq50tU7>5TtKT=*a zD~{ssaO%@{?zGa6G6^}T^V`AJ;eFoX6r5kZw3UA4;#Rt)S{F_Xl(rw_tTnZ~4synr zA>LV5vgJqooyLgr`8h`^#x`&{0|kDz`|0%J_WAHT(!VX}^yUv>#O!LiyZn{(ox6vT zPFcS48!ZDX60EMQVHWJ&cT@WP-%sh)SC53zapYqPJdza9Sp5HyzJO@wl}bp(sc87! z;#YT$&EG``=J@>Q(+{AJb()FL@Fc1p|2FA=1>1N}(2sv2{TW8K{f8iXoD-b;acXpZ zk?R+@eva#2w=t^OrLgNi=K5W(`*n^g6O3(7eBewvaek3QJBMj|zwnQeesX3aJ^hh$ z=@UQp9LzEky8ER0QS#m^t`kPLElg_T%&dPvJ*Fr5+`(U=!xVIkYdht1{rSIxsDf>0>KnJm)gk^C)!it^h1-p3TgrMTB14 z2)ynvSGd*MOy^;KmJoJqZ!tk)o**-%R@VA@*3@##=^Sa6mX_1mv!}7ywT!{~6NGQF zcyt}p0GHAd6Z&lo+v~T%$+oeji-IjKEQXM4{^Tq|46K{7HCTa31Fe*k%x|pEVx~G;tBG{CLr4wFTKsVuQyyoHGZ8s(k(O8rM%X&F+xz2pF+S!%fiz(q^1vf zPqH}KxRO@d??5~@cXscjw>sZSJ($GDS{Gmxp(9#i9)7TZ0CN$6;GO1b&@TiC;4AG1 zF4u)x3$|56St1ebs|Yv-ar09MoaiqyqOHan+jv)C99P)_>bOE(DU=cqgmVdP30=;S zmaq1K9Zo@z{s^t6x(d7$lI47jKk78HZzl@4+$Yecn4Dx=k;9022^ng4!t)Xlyi)w0VM?Jh}BbM;%Nd446louz6*bt(yqVoopUrE*lvEUn(`i zo?iHj{U?SZ6Pjyir-4B*|NY2hDb66(_d4o>bruUH)N5(06c}1b1AhLiocOp6+%)=! zb}P>c5p*HEv;v}h!~riC_^lM%fC~#4HLSL>QIDP^NFUZFpX09r-37;M>M?{~2s}IZ zS?*}ZX2dVXCWIQ!86scsg$TL*RsIU*#k?6~k|>xikG(EB==4*wGo+v4GyG5J(RTGF z?tA6js#rC_u~|gU{43?HN3u-1`2HLbN7Fj~_7L=UkubJiMlQl|Eb+oas zvh{Li=>**E3JVx2C!neA0SkOby6(p$KfA0Nz)jk0WFW+-W1S2`k&+XEBtnwkJ>xk`y67mYA5Czt7d8js7m=LI8ST*39et_y5ox8 zeY<*xp#yQP0au*_nm=Pk7aF{Q-)&6Se>S}cqnJNwC7quJsa4+hN63%B<{>`r+kA9h zMlkjV*V^d|tgwB9=bb~O8_iv;4x3J`vAdN(EyLl1R&y^hdHE|7A7tPCtAU}Saaz3n`Kwx+5 zjw$e9p8nWl4^`lG3hl1X{N^X#m@hVf`658g}HhzW9e83g;}Bsq?--)SN=L2Hc(_42}8 zIzKa?mbN$3jRlw%1OYPg?cit!>E$tkMk2$n;xlpt@(ckP3{uFp!~p`?Z`FE=leXv) zPMpQ8%+t+37?cc!{{qiW_-DXy?`)dY^$mn6w>G-z-Hnwrzp&&4 zirgx;nmV&+Q>pnyJvGo~>R9*8AGS6*lz_MvbA4kiUA+cgH*fN-{$5&VE2Cky16?VR z$hida00OJ@Ef3eUNsiAUYGnjH7k zy$zkU`-&|!@M_Bm0tYfiT`ElIz)(v2e%+S5#7U5}eb`~MFlqjI^SS1X8_s+D+%8x# zv3RLTIWs+%8cPVFVU(N9+wyim>T47j@nxFb>MFxL*`aRPW9_J zbT0#|m{xSO4t7Bq1P_PRB38*GyaEj>MWmvk6ofD~Z^wvm zC7%Hh^Vao^vHN13^eZ(y{0C%Q1IvQ1b_PT4iK@CT z{h&*|b{H#!)z#oV&*qi0ls45RiqQ@gXyuVeFUM%N#TLe2{9~*HhU`G02~o2Ha4BQ) z$^;$IpzlBQa8USAX<^_O$PQdaTLdPd=d|}!0%lkWM)n1{l zo|{cGPm-1@W|w+=2V>!fbXl-W-?lKgTrObqUf{>L#f!8szT>J}8XX5hki>g~EwYZT z95IoutuaR2+)5uf0WR$9(Zs+qTD`RFw6bxB#VN)U+LsG18tu2GNWEwuv{kQd;0tFJ zvDqvds571IK0cN_i&e!~QdG$ET-sO!jQLlOJ0>{lbWn0&8>u2hfnDz7((mP0(pct2 zG^$UWpj-$uj|E(X@^HG zF}FjiwRH)hm-e|zj~bVXsSG1z7C4LxKdhfC@~?3HXIvVa|5v#_&UF;$X>MwVX%np2 z0oyPA6xtnUo@V3bDYZBDcJpjod-6l4)7O4u?>O&eh!e>RA3m2p`;#xF%ip`1{+GZ0 zTj?qc;lpvXhM0AeJNGW*J$x0s7Ir+eq12O>Oz-$csbQlqL=7x8ICP%A)|K&+@g-8+ zcyimFE7H~>v+EF$xy>vXC^ZtcSRp2@Cj=_QP@qEY@m+L#V;9YaV8=+S_DKt^Bfpah zz|17)9!QDCSXbisDGd#V3g_X-0wspRH8N;njD2loHJzDUP9L~4n{F?&7&g~QOK7ER zGl3uaEC;=WmKn#uMkbYqI0evrN#HiRh($hp{-I&!7GtZZG$W3)30-|+Usle8hXsU2 zvxNj7n^rg1h}%i?C+5={Lb)r`Tj>;(WxeA-yuM6>+e(9mjP)?lZ_Y1Wiy0t5cnF$!YpolDpXQG7Vf3@*%LNuz(3+Ccv|w{iZoBJ+nO zjPa{C*kEfQ_RwjQ2;?R?^%1QpW>qlxlvA5YFrF(Nwt)AR(g){0&-TVeve?1)8n(T# zZ_}RAMiT9AdlrFjbmIUaxLQAYF$ou zyO)WJKnUg*L~?f$B-Kz(B5dLx*_qgaLf2eg;~TlOex7qg?g}poRd@{l+)f~<9yl^F zBb+h25NMOSUfp>*k$=#F*cQ(8QBKBNieXvQMmBvkJKONcn7X^P(#hQ4UpGz* z3>jeHFOGqy#jI%KqROG2iK!qLf3h%Mp(A5EcGFtgxTJwJu^rfBEY!~44tZDy+TWT( z3pzKToLe661xWt(V-_AdH;KQ=7XihGsWyvBa|opnz;_vM91^Bzj7=7Wwiy4K^u4AU zXUr)W9$izR4RN=^JUjx(Hc4%7)tzl6gkMquyIU>7w^_v4zJ_)eT3Sz?o=0PtF@Xgs z4?=N)LqV{+lq^`{Qiny zsrc9L`-RoxVP8MWTl@ySI{WOU$yMU-cc0><$t46{b9`&N$O(=GUi(o?%Ae%N!F7+a zz{?`gtixiA+h*J@^QgEbB*tnB+@HXU$$GF)3zV`<94sNycP36TkPj6YRZkekYiNF* zPtV8n!)ZBdpJU<=bsH3#npSX9|-kMD}-Z=~Neb}-YPfb1z;0>JLO}_QVT3rK@96$XDra)nt z_9B5}xk53m$*8$|n{~JSoD=LBt``{X=D2S%Cf<(|-rHRNE!PUyf6a9iEw6d(C4P!a zL768#Iu`BcoaAbwXkt$dubesrG4txbJRCQR9kJ&>aXu}bT1YQ{@nZVwKYAs$_B|YI zGP4_bGc#6b(PcSVdr55={4JzkQx3cut`BViCab)vL9EtCVa~KS;|H<`X?Q~GgO?7#Nowgl1U~0|H3V6vs(^t5Q2)lL$Sf-6X=KE~|EGcgPoJ z9v23VG&1IHvui2%3Sj|>Va(L24SnNXMEw;PJ8{KX& z1|rB+uxi_BbSM{aqJsD@?I{A~g>f~13OET3TNtjFLsr`>bsaBQ>SRk_dlIb<#?_6^ zT}IR!X$w0$ZjsV>yoIwV_gyJkxm#+V`|rk-68^HwbW|82E<@1z{Yj(O1ZP+7@kUu7 zNzS9{+eFmXS3QrMF#ivRlYHC`H>04My6duNuyYn6;SDuF$g2XG>#FM0`1Uvnv(dt~ z+X~_^nE4Hbic@IltNrH~f!5e0jBpFu??4_v1J4s4ZEwRa#)W!Y7!E%8Z4@Bo4%>m^ zDGh|}eT1gA8{4qoOQXmeguq&OL0fPoKxSX;Gfd)DV^tsqTtkfqxLx%YaSrjXOFfB$ z*u#0bSHLUq|da?N~m3ZhM@^)|cgOB7CrYPRveG$CQ)wZ8h}} z(keW+K0#Q24O1lxx3 zqb%MBuKr@g`aATZJ1HJ_Y*(CWolBn%f_M+iXd-pW!XDBh__FP|=ot0pU{k@fu5{8U zzP&Jm9c(uvIZHh>rJCD}N$`x?x3t&T${N6OJl&)n&#>iw@-+R96Ba{XBZJE@{ZCyM z%yS-S3ECz|8(9sq~AFIS`-NwB&|a)}XpG%fT25WJ+=F86KFmIQjlf&5EPVX{!?H0(W`L)7UqgB+^6_#nt{thRY<+O*x zhVOoQd1WX4CtvNQkDZ!G&mtxM)S0RD!6i?hTb>Hq$l8*F8~KL+z~*c>gE+7)go zy>G`LFU=3<6tG?7_aZ+hYI*3|VbXo+(hgc)cpbZA3LH~lPYT%E6mnf>v~sLcc=Z-b z8E$ZJQc#}^6d=a)q{inkTK_>bYQK@*C_(piS3Bz>*CrS2>!D$(Mf(!hU*r0_TpGeh zcY!f@Ed*$zfSiC&YLR+u=}K%$Zag$t_9R4!`X1e<*>-vA{4$ePwg9a0%^6o2?@`|S zgLMmxvtNF$Ov6C6v>^kjJdsafNpYg=B+|Czx5RJIRM!;KcW|`1{`H87;&i zoM{yD-Sv27jF@Y?OL}a(B}BRi-k>aC+l_dH39cq#utLwu)tXwoBeY){Ogopq9}TnM~cjAs*Dz~kgbwh_A5 zCu1!vo}l@5cQviG-b^h{+|=vZ^tBqMyUfM z`dojnB@WtCh`Lw*SsQEqbcju8tK5m2ZLXPyvQzh2b1lcR+)bEjg`yLv!l->{j}uF2 zxR;aWUF;NX-9(7E0;hY9eA(u@0KGa0!Wwt6A$Q{{0syt^906dS$wQEJu=VG?^4ixw z08rK%R7a;!ev8at;<+o#EbX|!vmXUEmZich+B5CKR%%O82x(iLpnWO27oX$K&sVaJ5%$^ zXSe#c$fu{)CVgIeOpzae8zK3UR=NE8XWCRqcq<6iymmiAJ%Jp`k-z1Czn$eJPe?Ox zmN!tr6ZF+ejPj45S5IQ%u(1~p1yn5ok)vM$PhSgM9RQ_g!W}TvF}IjRIIHg zzOK=h6nZt%I$Mvm>$QkY&R&P{mbs*D$5e4gkfk8p@4bi8#J8L9IZWU@d4~RhhVP0C z6D$^NgTsysF+iE6(6-&;?2G=&!dUUI-_5W5^!K@xqaN=;JPTL&dEw6SB9B#SWotuE za|(6q%%=J3gx43?l%dL`i{AL%X)A{F5F%84;bc`9h@pBZK=4jfpPoIFv#h7!YfwVj3 zd*|aRcQe?^=`iNsymYrexq9z%JRIb#!N7zYBE>>1cj*;v1>uPk3_Xc1N;Lbx56trbP}Gv7jMZk8X~_4vKpd!22TR}t9BtYxOet&nch+QxYN zmG0Ns@^d|HBQU$!yp`4`@1i9JgUA+7ZQ@K>aN3%JuE5#5cv4Q8?iTxl+Vg7&+R$W> zdE^wzCIV1JX07f9+7*PMB_`|B9MM{c@<0UXu(#Vgw9v}5qqL4bqW_pM<4f}yPw?bW z0he#pYEpBg)TJ%dHeq|nP6m&5OxbB5x$g?yU~c9wUB^2GrVyubH%;B39>2y$mXDuC zaJI%c&z7s(Y(@Vz0?AjMuwl>7|Lkn=v@?iKtrZ-`LqDpoeKU_?RZ75XyNE(WzqTxD zofUz5j>Tw#)>Di@#LqbKFoK$bGDCGkd4x~(j}9NgC=+LCNj>zy=g2bJrP=5F+&*j_ z3NEx0>61;hUacI*p{gFeSxZGI-lZS62oJk!q<5=g)G=|~R;tje$+puO>O4*sMe;q# zSmIW7qK7RnY31D8cU=>BM|R+K=z1m}yV7WzKXRT&OdzF^+6<)%`o;hrJtj23EPoz} zhoT<(#X9xdA7Uda)Wa^UX7kriL9jxkX|zcv(L(KFYjFoHuW7Ws)W++v#W3uuQD@V$ zn$^L!A|HvZv#V+1S>_Wj(|_GI3|LE}25?!D5J~A;Xj=|Y(bi)Yei?%}G^92I^zuF&9jA;y+N2FxPgtq zYwJDiby9ZeT(zhCZ8ZO=f&N`Q>Q0;;6YJj!{mM74#2tMv{@zQu>wbGZm$-eri0F4B z7DB3gj|EKr-a#<*HY@6nioi=Pfy-R?;~d0RDAQNduQ9C9 zk$JbPt>e25Be<9IhVLgrFu)bX z`WPtKaTbdU%_=9PY5@73ed9m#-+cE!@!gKtVTIc0^+|M&RYmTXa{#I28$Q+K*EG(J zL*su%5@3kqT@5R6cqUKZ5JNT^xjw!T2NhD(&OhFJ;f$mou^@Im+#C;CzTP&Nagj4$Sr z!me2}gj9FgO17OA7EY%2++ylsK;GJdO$fRLSkliRu=3PGwG6~NG&^X3y%2H{H`HNr zhlm~{G5G1D%^>an=nA6}&*8KCpSUYLGfkgS0f(XTOYeG8!gc=7ni2@fnwX{Uyw0Hx z*ep}HM7et~Vz(!5bAskhngS)YFYL+DixW22*VD|2v$Ozc2A$)y)jlv|sgY@Y z&_+Y>7Zk)_4NNkk%pOvq21njLnenwr)LjRmUx)3YGizw!eG5BgY^{Y4N4oc8f5_(#zbH(>;hbwu%t{*}ux1a!1b4h2>( zb=dXr8ZmMdnfHuBJ=%WHqv<@YvyVA_Q(>Ih*#AOnmZ1>)6`Me3FB`2@2QzvAy&r8(i-!Zf>K(T zaOI)gi~m7);*YDR8l4Y<@ZiF&%Uqwrv?Pu!)^91dw3m9JjTbQtt2yYdHehU*MQGHW zQv8IV3LuX6A;eJt%%xyc1#0`F2IGU%!j9MW+pRSF0Xh`A9uo{SK_2?FD*`Iuh5`K< z9@sH7Gf#eu{c2-5#=DG>aRcL0cewx=$#`~juh$dh5d!tDUYCb4FigY(ONVj76I~Um z8>?$M7<(rXoKCadoK2Rg!In%lxTbqMX=xgbvzcysoH6$;n$?@P;G|tasBp-0gp8Q9 zF72}jn##M#7GYM2m)l@fs9c(;FCXjas-XM2`K@#uoYGVVgmi)ESm1RKNi|&m1N`LQ z-##ev!w=UevL}1lWe+W(e-F{tp3AdbuHG~B-g8c#r{Z~x$L??xxQh+5Z*nQLlJOkp z_cNSsXU*`G^uHWX;X*1~z zT3gTkz~g)!oPtXwG&MF_X=}BWmL6l$sF%J>u3Radgc)w{-21mALp$1kCu~z}yPR1< zTV#1Eedtd=nJ&C^CS7^;CR>2|ti#cO-bd^S>@$_|I&YPMmI#>K+NyI>ACpT^b0*+q zHMC%$rN(h})eV0p+cLG`;RLo!R5N3smW1uq$*WOfVhcQ7zPSP%uW^q!tT4~aKr6Sl zA?&(~LHUmR1;@CE57QQXBDA6o-#bFcU|GGidBckO_8MC-R@&+0*+#lLO}WsM!PEki34tl&Ux z2o+cw7)NTuU~EsXrcDg%F03;-XIqTN#J7WC&RQZ{Y){?fmsY3`2(6GOm@gUHieAFB z{);o7E8A-Sl zHe~lMS~Jn6!d@B$pH~9`=5UB^zF=rxBhEdTwmBHGCY-uLFzKc(mMPklc|1Dqa8{kRQK(pXi~Y%kTR31?Y%J7^R2RJDP6gpjVhi#W`Q;)I0{zyK4HxPRU^MOvFo!;Xzpc;BMGZDXIXvBntxwaK*pJX&6#++r*u zZQ~UG=b$z^Rh&5M5HmB$_Q%By;C0i|+G5(C;~UBLYlU51g!Zxc5*YeTCPC$V>LNh( z8&aITzkznwCV1c3VkF?(Foy=Qr+)hVwpl5h)81c~`AU0@6M-ERf#KmK9?aH~9+PMp zqwU>mqy5R&@W(mr@y_N1-*3MKwqwRqY$i^y@DR@syxhw-#dn?e@{73xalM!E(m1B6 zkDDc}VfXLA6*^LK`hg2TE+ZVf{V8Bya7&{HRlU7`)Q27Fz^7?b?Z3D+#E$l`-5E@f z{spdI;nEQDv3q|g;3ri9$#U#UI+*(~fw+gs`!0Z$ zhua$K+E{zVJFLH3Bh~UM0Rq*a%RJ2xi0LdS=DnM_u5bpyf$V~iS2HBU8S7EUoq zmid=~h>5F$rHY|*^*c_7bJAQfPiEEETwrp|u845j2ote@!2>BoZI|okzbmBXVF7W~ zEx!OQ-UWaG?%o=+pX&$^PIL0zX@njyeUV?4sfsYlTV(ojo&;#H-77et%m!7!t2n$7 zSrCPe_CgL%3|IVemB<807MKu1wfW|$i%J?@#jmQ3XYt~|iJc9`gmW-p@IwAzO59;u zy1DOeWs2=g++({N!9^2m8)2BI>dnn%(-DQah?DiqpY+AxKCj{GOotw&TnfpSmzL6n z^XJp*-Mis$8HOAMD9nxiyTgS>*$fC_gt5#c(>2Q{Ed6d5j9iXPxI$*^ueiOEb|xJ> z6;x)KXitzV(hq95tp-YJ14pYZbxIOim78wdo%-8$$C=*+L?6e5mwf-xb`OMQ>V z%doSsG1@!zQY9HGZ;a8{&B+eTi8qNRf=symQ#(PHAn-IxoLl;CUwSva^}TPWXFl|a z^z;WmoF)+d+y7hahw9O2T51k*TB>x9%Z4anaMi7Pmb#RKOM$=?+nR=4q{jyYq+2dT z#124#Qv4IX58R`mLr`|TM3P;;8HDYk&~I$K8vK%_M71R5o%?P@?=gAkB@O%)H^^u3 zRKoVY*C7nY$}}JLi#NenSyZD{_8!Vs-0|2Od~Wwqr-MiZM7C9y+n)W{{y(h%EHv^E ziA*sDF@`Hdvu(C$PaO^?$obJb3tT`KQV((&Q>J_JD%d`F$m7r{xkTHt(IE8d%(Y@H z#h4$jdB;${GNSBxq*CxT$yV~2)1DgZH?{gMQjxj;7w9Et;#fTX^|0@}@zzF~dy%=} zJiK^(C&&*Rv=J%M?SFCFB=sWv4ys>jur5$eGuN+iS4dvS*Ot+H+VoN1RseU3I?B0s>pcx;0=0+knifm-f6yothiJ!}Yhhp6B`s*FVoycQU%) z;5v546gZ~9cnWAY>xVg!a0Q|9Z=;2^pMz~fT(ykOD@Rrzag5eJ)DT=PoMwk zkHn;04KL?)t<_F?`S;#VU;9U|r@!#CpN8qpjJ$%EGv}7lv!6Wwz6-o8_v+2f^!49< zEq&%EUkt(9=l|+QlLEW{>RtuPKp7WLqnVCfNDn|(zLQ?w57JeEsk~% z8VH^IiI-Q*^E@g1pp%%#{J#zuoV~&UzPMmqghgQ@yozGfoFX#-;_BpIK}I+Wpff&R zwrT5T5@Y3_=rzrX{OlqE@vZeZR=&x0#*-&bq@`uI8o*1zILN@L)#S>!Oh(bdnj)^b z1mg;G5gL7gf6h+EqA(YR=@gzRMCt)YC$4jx5_k68x%Ae>i)m|vll$bPYA#Y%>1*D5 zK8A3Oe2S~%QT)6Y+;|8)6mn^Q%ywouqui9eFjg}7{jH3Ihvo%o3j8`_74Z3|pe&xD zdkCH4ZTMezq`ZSr=n(r;GvhY!AOFppEHvll3&OiGC4c!A8C=deQo8CT!^9Wlnx*Kwf-Z8MuJPQy^_KD+9x7AbH*QzqN55*Y=X z!!Y74#z3&GwMW}`9^=NuAN8`V)Xiwy8jUvk9;i@=pi|Jxr%`9QiP^`S@se>X64j#i zgKK^YgLN$g7xriS_oRJ?yI~&e5JCvP7xb=ZZw*z1_y${h_9?fXhY*7dGImu?bz0T( zJnEzM9(4k|sv8~^#J$BQGt1+zfjZ>UbIeN?m($g+-cCJFvGx1MWgBn~k_FybD5Q4j z!u&iR)zEtj`aq90Setc8v0ed`eL5u6xuEwItEDJ)%0U8&vinhQc4A z7d!3@R$RP4Z0A6Fn@d|#Y6=~@_m2WHqnhcLAzk7cb=x##4N`k|`{TE{>H;s*8?LeG zV|Pq}V+xF?fRlbt#KjuQ_z3stJ34w}1~rV==WT!EfjX5vjX>=g4vg^Nk_X|m;q?~R z23Os1y<@xn{~gz>T+eg4xM4hnV;)rUgIqt&Ro_yr)~Mr(ZXSDFAN`T%(sQ4@5E{ub z$zoD<>+;?7^({9PIAn+lRH=QnNjA+Detx` z54*#QiKr(y%3LFmg30cn9TGw^;V8fYqINxk%El*=6IX)6HP}ujt01rt{NSHgsLd$1 zU?;KWRlP4kEne|Ofb%oLg0 zJhdLX|COnZFkF`r;FRl!c`Nl^(@FNNq9C9Hu4=Er!{rIC!D~xwr}6(QRbFwy)N=PQ z6Ykka8SKbU;opY>A`#AxLum^_vmQ}b3NjU_tuEA~dHbR^Tvw4Xc5P;jd(^g=Ei~O< zCl~1{e^A8ZzSJPlHF%_)5?9(wxF(dVa6ulW4XQ*)Qa`4k(lAo^-NBpJvx}S_hsEaR zB=bCphik}Ue78tPsi9OiNhOlDO}MHq_`%@{wt`dTV#c0-PePMySA`tw97(T~izJJe zOI?}b?(Bj>B&WS(WAmc9LBm|S1UM9f&r|c1WjL#8CEhq$=47rd#kwGLPgCTc8^_7> z;Nu~8DQe)$HsNPk@vJ;JYw8zhLq_?z!O(D1xtgVIsoRsT9&OPO0NHTtUGIQXu}|#x zJSy$tKxIwEdE@Cz~iQ z)glAUJQL@%k&Z7`Dk*i1Lfsny5BW1Fhv8F_$>-O?FRPgxG&|cX87GQ^Pm7g-ovP&D zb0e90Bi8d#^C_4ZsjNyv7e2?reD{?xn1j(#Spv5Z%-FF8vV@iv|A=T%sNc7SoAXk|+#b7j6|^#?c2G zpTxm=YWYV0*Gd`ls0^*$32>;O?!p`*r4&pQ)ThB4&FAZ5Kx`(Dx6e;SYv+o<2)}SG62$+N8T#m;4o!SneK>bi z?^^}wD&2L6$J@Cn`Lz@eQiacI@Q6P-ERIK&Q@9??uvr^?pHf;`66FwbdF4;UlBs*R zn7BgVncUHNi+(8bPuA(HoT|Z9uN6<|V%RMHe*EdcVx5NH=2VoJVzEe=PDPjz2BUF6-4mX{cxo1+J33abJY7`!MEVK0?A=v?E5ig;~x^kPceQvwnuRurhRP&qb^+s@wQ;&9u(U-t0 zi{;3U=0inO1Yj=@A)c%MxUK;hPPa2$D2v)#Dqe zmdZyD&F1)|QM=S6(jQQLDOP z*;(y=CeTC+SjM<9@aU;?;#|Ohk$dHP;uY;7eeTd4YjpTFu`owazhva){6%j;S__dr zM*?B;LwS;_5-x{|CXyH)itZH&tiZ=~iH8Cpmu=HIKRTwl#(#C3-K(koPbx>$Rk?<#QO4#@D?qb+d`vgoYXmD_Sfj!jvuc6d-V03K z{V)%jt3_&7y@jGUG)5gxLfJQis4Y7MgBFL0;ZaIo?tr~X_z1xovqbt0N@YRgvpTe) z7^)kTc67rvguM*}DC^(9v3H(Mc2kd%5G?+5FP_?Hb#%N>EYRpV8(#O*+8%RPEF>ll zsk+`e{wCCVx_dKuFurayxG{d<%C|C_m%61PijU|kP!DS9MMqi7mEzf*Cxoj`csBWU zy_~saELe)0ww@Og-JH*ya1(!xP4=anlAk*PAQNdjQ&JEEs!rEA3#L_-J3NBViu@&_ zLwZC$@~fWxq=LBQ$3vm>x#8XLp^~Bv@G6)5ARc%s3F5Jgd4ZTBGBYS_%}oxfv%8Rl z{!Z{ypV$j-_4K@KxK^Ia*v7`=p9yGqZ`b(rt!xqsmb4aD?hrqW#6$D&xVI*xm!$v zl4FGJw`L95<)MbCp{@?L&vN!TQ!-EFx4Pt?e9PArrp)q){ybm9j)2^PbMS4_4cxPW z_VF9($qlfhsaNKObU*<_mD0YDFX@tDrkXR7s0HP|22hbpT2Si8?&yp>p;a$WF5b5! zrc>t6vI##tK}u8^#taVk*b#zX<@X9=q9o7{;tWYQy~Uiv)S|Y2r6lP14BhMXdbpq!pG)n>to1-F-jSy%e($DMUWvf9!X`r(6!&u3tj24S|lZlH>( z?p7`Iude*Jug1}0QXUW-ng}#0YDxd<(>-l)XX^GuSheM(i+DpQR_rln5tEWDUNt~H z+R)WT=vmO`;l9pU(lKUyP>(E;>Si3gy*rbihuj%~%CKDxqN$L|{+SlR=++W|>_U9I z2v3#U4uHA24oss!ziAHfZF*0U><}PbtmLg!7ImwIjmqRHC8K5wJluMNtb&~8Z{5;D0x4O{coqvzi824dx81yXc41G;=mUQKQr_40b2`W zPc3~PkS~}lu<>aWDj>C3)H^Ldos-61tL~G(N0d})69bvhpNsI9y!(%P2`nApdIFCCnA@*>f+bosb4UrNY-JOqN^P+rJieN zva(N8pl+h$NEGiO3z@*mV-ZI(uvy(H)d>s>`M2{?v1K9-#R$Z$+iYo{F z>en*QeWgl-%a@V}B1;s{?XkjSPaAkqLD%NvS>aAhiB?lo9Lg8Fq*QZf{PgeC#HzTp zKOlq`e8pTEh~*>c#R^d9rG*#Z?*;@Ul zD3B@qv-F-iYmup3sYmgaMWY74U&p)x>sxY?R+k{wiBafItw_~+=JU+~$cZEZUh9=Q9zWG8%*!SE0$RqIIkQsZQA;Q2C^)B*rd@NBs8rJYPl zxf%cY!#c|syUU|1KQ+vRlLsUGxdkM|B?})N)RsHPrC0zD-0S8qOMW&AzqWAVNu?h5 z_Lt2j^(x;PQ@7^XWEj(g47b^*3uvf0%(zGZ! zN6RpH;yWWdEi0UMfXK7S-x9tp@C``=n_Ba3G!^Mb4x>x>S$L?o?ga0X!O_#pEfQkG zbEtR>oaqmcHm4R+X9>|BB)8K=yx{28r?SJ{E@V*3Ve<@(RIE35 z@-v9(W3N>>6J3o3e1^S~H=B;TqX`Ya=N1YEuKzj2?WZBynkmg7{rYo}I)YP~mqGLe zLj4**sfWFQ#eLka`g5z8z`F70?ZN6n`EP{h#jpky)+2g^Pu^4JR@sTeEzO`bVq?q) z8S;bV>mix%^_QWFrQvhcPb}i}O5_Qb$BcwWYiieP1y8?9YepWoqJC@?zYSPLBqYM- z^}ZJYJ)AoucL&kFi+{eXlN%Q@{h!`u?>#h?3U#$l{0>RPkeH7omEaDakcoieFVtW4w5!g5r}R5-)^ z3J@AgM5;?X&r$a_)hw(z!nHV+EYl^dMBI~3ff9IxMP2Is)tAJXo^f^;^&?BW-L|3` zrXYkKSVFl;&8NA^eV07EE3amtvrDX|Mch)Kp6Y>KiT;p7H+v`2QJZ?<2TGTxj}?@` zrjt0s;u?hvpmPrhR=cYBxAe?~(kvNM2gq2m{MRNgl;%<6S9!$JsOV|dHKK$3G9^2# zwMr~hq6m>f7#xBWw=H`;IX!3QvbYgc&Lf%yCuRDi4*Il8EZ*xsHDFcfrCt#Rx`R$M z&jp{wmZv+pRYc2WnxH+u#V*mU1&=%y?~}@hT!3;K_JSPc@_p#gS&R7YVjzh8BIhAy{U`b8E-@*|gvUs}X49uK+xUq$Gv;D#s!So#l>(^WIr2Z$^@7sr;P3 zBhzVsjZ_@TI;mgAH7#mF)+lboH@KRYD7RROqT3%p&q-5&BDXX?B89i4)T@afCPF|B zLa*;c(N{_{1*ji92`J*%3+)ngrn)Ka7+UiT2|3xE8`K~csP)&?pjA%u7tm9v!1WDo z0w)A_5NMmgr*XH6hQ!(5%YPqu5O?Kw{dZq$KubaxlZg9@C*`&6T_(3$-wuRb4_2_< zcF*qr9t$fQ*47r-AiKgb>yup+@(1FPzTRdG`@dIvWF*OrR z!JNzCg)%F?Qh=d(JHtujS$+BNm-%{R?eRr6ysI7HW+it|9qWSx{Z05v@AK|t^@z?6 z0Q&g)H;I^x7c#Lcxn$m2o<*S;kt<1w$>r<7bZ$b=FqiBB>8|5^R}~ZB?p!V#rCx-s z9r-Dp7NQW8D;X(xgN|j%xu&jqHWUrjG*|3ekTr-tbgb9>pb(@syPUh0K+%1!_d_kG9eG?q zeAl|g{zm!B&r2(}@R-viv5sbkA$lI7`-o(|5sFy$AeSfpZ;8>#d{IOM%W!c;^&uy z^-X%&D;3e8kGGCoq0LW9o%27<8$qhQ^=oJ!+g$*DH7K{zA32+&u6Z*HmXpounxL^MO>jGB)2s80L=bQ z!mYZNQREHZl0Me})8-T?<(~?GT+?f?5W>=f!xX>}$fQMxB5O|{tFuf>}mloun4bq_2ahvVp$l{9NVLcQyLwyufB+J7&r zS=L@`VJ?-hLr22BLty`Jjsaqc_<&2Iy70eXsHh|RW_4@9x6!!vQp$`Y=nP$aaZPMAE{t0+7C8wIo$9J8f9 z0FWIoZc2Y0Kh?58?|332p&p(&=YmC4C?YlCqIWVq8C^jeV6<2IwLCd>S0!1v2|Kyp zK}TPMc$O6GjD@RM?P+~|GP3ToQLL|)6W-E~{cUmYsxE9E|DV-owCm7)$Wr82I1Vi8 zpSwFlkvwU}L`vOBxl>737=~@;m2x%AY(|AYUj1(~rz}A)-7~l+LRV(v1(#Ww6fd$} z#3vKuD&8#PtC=kQsRE1?NaJ7$HI<)IJXc?TpJ$JyQjU0!8lLMqE^arz+eQ+`Vzucc zfy1ZR<2Qnd!T8_J(2)4wPilRbzn=d*=pP7X;r4Gt{$_{*1I3ae;e7{8o209##o0!Hm>hsod{o*BJ>__0!yhK2{ z0GkJ&1qWE`WM^2#dVxFXhU!#=Z_@lVgosct?8xj&uN(r+2h<+C8-TTe_sw;R9@8ik z#gZcR!=DT8&&b!?w2>4^2fb%fC4muIxa84`|NOaaS_=h7xj{GNv_oH*bUN|SBp14R`<#e^L zRC%hb*(Tfb1;(MxyMKoGbh6{1Ixe(`_w?V@`8CWAe-fO|0>8?d;Ue-6U+-cPsj1Sz4V4%tYK^Hw{<(rcvBR5VOi(7t z>>ts9SNl5VJFB)H6;(*vcs7c13PFXQzPn`6M#1lwiSnwoDb<9D-q=`wECi)s{k3Le zRb!Scdwb3vGP{gBQEc?$iQ^VAhIHFP+)6miPr9&{mLWviy zU4lmgicqC7jf-;3oiV~Dx;P$_LsCq!7Ud(e9vh^MwU2-hG(JKldDQ+g^T~xa6_=;z zAtA=;P)`f0P0GS%WQ-&fezA&BV*I$xX@!-}wSS@>4lysW@|DjwzG`Mmy$M!Owqh0( z&m)pgj%dfH2SwbWN6N&eDZpfLL?NPwyy0=w!$x>|$7B*u{J|Sdw~Zm!ZQHKnzwZW< z7^rA35t8dJxaC|E(So|nyD`Vg@>ar>0mhhbF@0^V(`+dkIl_Q^;Jb|6gLz2VNhIpp zS{=~vNa~DK7De(c>oBm$rP?A6q(puDpzV>)AQsH>c14h+X>vh6`&8XPLt<)QR}tJv zo?$6X)qz+99Ze4V)Xa&+5*$XweV+z9k1?MC`G_%x=FteeI@8{-uU+wj;BA-m0tXyc*OxqqtL}emJmiHp_rGS?YTrB|^@lQ-mCNGr;CAS6~Y(w9QDvqnax! z)+-UEc?3cy3KgP3ow&e=dzvnbqv<#O6eNZMWKV8{3q7?JT+RB?$Bl)!2^Y-cQc{0% zEZ|nNlRPUwT_Bbda9_e0% zG<7U8MdqW(Pmxs-M>}tQvTt}0te%J}w8Srefm{ti3mKP`*-|4sGg^;J_&u-$i8J56#@PByyq;AusoR{6fy%*Ex1o1{0$doPoUlhYD`yurNWWwbf7P&b zFoQb(hZ8t9in_}!SjJ&0=TmxKAYW2N=lb8tCNL`H@}{2XHCI}R>q&^%KM)Jz6tDoA zchEsO?~105zzVK*jyCfqq2g8B4-7rVpRW2<8MRA8R=C2NW`ToKZB$#9J{6mca?EeL z^vBM-N&#Qx5{Y3Rm-0X(2j$*bh8tWK+bCcvJAy?p-8W6`)ehJJPOu*oDW^5t`erCb zCX7DI7;+#kaExw)3`Nxj!iz!yugGSftmL)czb7FaK+lTLJ2KWP+mI#Sphx4k zk|`UPTt!0?nI~KAC8@%oB4%iLn2xd}mh%qDPSTx1Y0Qt`C_>Ch%*1xt*e1nDR+W2R zk>6h9#n%~LI>m>s)b%HlmN9@B+05TZQwRtN#+eV*rbuE*bX+5s=yr%Vo6hl{}ac8C>Zn8LIdb#j+_B2&zzywmFXJ0jADF59C~rfz>9YY ztLe(jGuZD7@ByPxD_LPyXyQUOzm^<$YkAK=jU$xA1zb8<^qdGLC9Dsco0IW2wyG)P zs*^NZyY33A$b@p!G_bN|s;YBYT=>35?W-H*vWUO0Nn97xa)pf9Vg*yI z)^l4>CWm(i~hctAoqo?PTAb^L5O z{M9)SJ%1h2EVFvaK|+->h~**UibadohlK|dwn-hw%r#tB0Tt|0TkZquq9T<~!2)A* z9uPlCk)#o)Vz@8v9bnfHql*H@yP_z&FegdoGPXg*${a(4V72 zx?hZE!0QlkGjGP8aKZ81o{zi&P4iC!?3Cxw_Dms0PJbdZ9G>;}E``Eo=&L#GJ+kL}K6r?fN1aa(k(Z8+&@DnD^j^cI~ z0mTUy_vNPdzUV@SA{k;SOX6rm(U);CYGT%h`d}4_d>%uoP_Y-j?hH)GBQz-YfwaON z8E^_umH^sQVb-U212RiRdaFtXalsX=XDOcxSZqoCsf)mEl$7|)SfON^6%jf%Hte*+ z7kz^oHZ(e++^L%vpV#Ga@}7n~@u3#bDEqZX1Bk7{b<$(s8Q*_gNJkxLsP6Fi^}?>! zp&o6U+!a}L{=kVL(SC-zY;SN&keZxc15Dr_g_1Pn0yNe6S_PQA$I0c&fOxT-s0#6O@)g`3y0>GTTlLoh3*Wz79_+qla2n zRmuR9goZzrqD=x5{T`#EQbPp{v!Z%*@&FL1I^)@hsA9|Pwj2dEHk*r@qG8*9#KBQP za~*(@RnS{}^K;D=Oe@EG5<%EmOXwy$$9)C6RNN{U(z1s<1RFw}HTl~_jhI}LKlw;8 z%LRG1E4Vg6$Mb$ER{tezVFY0w$Jjd*;|dP;eY)ugJmo?r=$LnVM#q$FFDj-G4|Bg* zj-2K9Lp7Ss2b3a+kJP%Jz=C{F0~Q)qO;v`#B*l%kmGQCDb=&P)Hiy{ zC|wmw$tK1Ysf40v=}Li|iw*mjOt+aE!<%woG7|}*R{An2AdPWD^RTW`B48wx*Fy170dO#s=kp_6#BKrXKqh}M0njX{`<6Zg0Ppx`D=NkFV zs^4!PKEuE_n4VhG?3MP8-N3Eeohy*VBt6Q-LwirFy_QiY1Tu1Gab0aedyCbKW~L^X zb)={7uLB=FJ?(t15AN&Rh37o3(~y!7M90xqPa8#!4zI3L8C) z*(3W=a0i0Utl;3IzD@f^r&x39T=fc4sa7bm719xQ--8XmRKVv1b(qrw-0{2@Td2U~ zOT(8#_Ljz0QnmWt@i;sBi64e{scN;_G~mO&?T>V0G?6MbCy;%nWvi<>PzJkG zvnN(f8o1Ygj}li#<7-Bm^J)2itKWc94AkRv^jtiJ_UW(EU*=i2)^j6DR^qm^A1V<5 z9?gJ5-9#}5GQwRHi+h=bO;%XmG1aN^z}13;gl0^Z$tLO^gj?Z)UY33e z#j_j(1aCS-bGZzvu5$Q$h`xVnz_6bMXLe0s%)0Hg%Jne&>A*~zQm>pBy-#&W=I5q@ z!>edo)_@GU4DjIgQ%{F8Bspl@x8@_q6nndvFm7=F18>9F23tW4Lvt)tojdsrtROC%{NKIy~WbcCCyoAIroYn@zF?8(x>CAE%fe+K)id8=5N zdeEF*vHv+7rw=z7D`!yu#aIHUF>>#b`Xz8tC^%4+Bq-apxz)f%_crA#Ps>hFVprO% z&wG8>ElzSXIbl|V{!e7Lkvs)Xwj2%uy7}W(lG}@0l^|J^;FR+=X-?N(l^T&c z?mi5@4#K5f$z}I=#Dt6W?<^^nOjg3TX8?O-Ph#mG_eG9@tBVP?(i9oe)PZpWh^{4O z6OKS|A+#Qg7Ct>~_hSFgL#~#+e-nX~bERfzL09Pa+XQ;su}|&HFzH@1NBG+%K>jRq zg8q$|wg>?fWraY7b`^Httr4b{fY2M}Z*Rzj=sod17A`w|U7&&b@6)s3w&~1S0kHWO zMy%?7F99u4FJME~?`Qoco;NM^Q>xs-23~vYdVVZcTO^Is#hOhIEUUGfw7Pyjo%HMk zwl>bUU^}|_yJ5K#qu(3X)t{uIMG|!!)Gx-!4Zr}Jw4Ll!Y?n4nB>Cq(ij_ma>Jl(+ z#q!ITR*=lgl<5Ea_xOin?87kHHTKlPiw|_5Sc{5e^s-15&7V@Uthi4$#-N1%VWfglqhq!4L%I7;!3@DhavpwWYn>v- zIw;GDg;!xeVd9k&lqGOa@=ofXL{Dv@<_y^5K8NNLTXlqeRcV(%^aH7)?5w2n%&|zN z@KG#xaW@oUX*smEZ6(tD#Sws}z}?evuvQGTDyyuleg2at$% zl~V{l7T}T_0=rXBasDpJxPGIE8oAwI+M4zohcHEk{kFrFnf`YYM9^c^1Fg1g_HC;9 zzF;f+D>P5n&|Ud_UbC87n)YsX|7t{kqUsXT4NWh(1$^M`PPf%}r`^Z)s?Uz5M_ofh zPn`*0MG!Vvd*W!0f2>DWun|PXW`JMk^8`2Ui^B!Ev6>r@*x`D8R(2^z&VWIGY;OG{ z^ZW`rxP{DrHTkWN>pO{d$DesmU+rqL7y?Z*T*FU>%2O~CCh=KcqyjMrM7O=$X6IKz1qhcTkNb>gGk-E%^yYNpBHN!!J5BPAZdqd z656Z#eS5%HQr6Gy?392sm{-h$xX_#M1Xf`weg61kK(JBAJto>;00TR-vy?pIDln-Z zBo3H^O3?Hdq-pRvfVECp=n#WLuF(2wN+B%hw?>dED?r0P%_?j0Dt)c4lk-lBtjYf< z@05l!S=jWVy1bLqJndtcSh_vgvkd#z2%@KxgbHl05#%gB`x)Pu5rdldTOa>(b>rQk?2_JkcK zer~iIk3Q#J2yQs1J%Ilv5ApXmWzk2i<0S-mmp~GaI&rMLF3ohIV2>BHJUhvb{&YGFEc(@ZJBkvl@`#xM^ak1C^rZB z1|Ne$#XWUxH_MgE33dS=Uxhb1A7l}g5E#uI_6?1Lbq1zerMrK79X)eh!m#`Mxl&*Z zlG`>g_6c2Z!B3d?iUmjh3hrkHGdl>G=vk>HEhj<;7o%aTbUye(#-S#K$oIml&SF2F zlrfrPylx~1*Mr>Wk^ht;1e(cda0e8~`qoD+v14$|Nh*$4Uu-P@(TDawoZ>2U3yweu)6&C} zmcOB-rOO7ZA8+iPF~mZ+=0GSQFb=2w1_TRGZ$a1Q&KmJr0ktx~2VmMgNm#At+j@P1 z8yNuJM%Q{2S`gQ!DJVIb;&Yf;O9wuvoJXVv=mRbK)x1m!ov$H)=Y1uIwPT~!CkeTT_P*ojZ9B%&?co}EBkQEvIkh`*ao#;Y{bx=!&lwo^x@HWv)_2=}|DAL2`#tl~ zuFJBH%VN+H_4{@m=8priuBI_h<+~`T>i%S6bP1SYCS>inq!bV=112=kO-< zwa5u48%&1S<3H1sz~ilGXJWG2PR(M<=P-um^UMoY;?M?rNB~IrnUQui-!=tyE*lt| z98}elAvyHp75^OpRz`2t{2jZvsKdqCS{Xi~lZ3?PF>VYn(vtQ4I-Mmvi+I)&$RK!3 zeD*^sITA$698lkhmmnMGWBH7h3c#ks?WjG{Hsju1OT!6kt~eT%Q}uUOfHksim1N<- z8y=*O_npu`+&l~=*da-~eaUuC9JUuc3#~H$2_tcw z4Q}GgLD5&UJFk@L1+$2<`*{^rTo0mtQP~CD~jQanLpS zEM8^k3$~vFTMc;O4bI(7AIc4(98|VaZp!6jXkV?~dZWKl^sZe%HhRTr#OquK3Ryjy z#TFFfsmAC4MBUY0?oR~x{2YL~c92{C${qPp$lvU>c3{VT*=g{(orJb=mk*TmtiBD+ z4@W)U3YiM_gM&Xgsh$x=^^ZNn+DosUBO^V4?;1Y|RsUN7(Fcjp9`@>EV-9`zK&q1o z)+6{}kn_e{_YOY{|3j7)8<6kRjO7;<|I#QGW=Pw@MQBwUcp;}L=r+b}7_^fk^o}k| z5X<7^_OZ3q{;?0PUSOE?8v~VJxg2feghHty@`oyQ2u(5=E)_~L4de@H2F(wR$;stP zzlz*YZ@A!xrSebCY6+egBj}y^&WfFvOey}A!u@3*ery#7W7P4iRRIld!POD}V^X}C z-$sv4d;xe)?2neiI1kaCMjo!SN!i08o!tlFYWXTmhBRK(>-3es?pLs&Y4W z=D4UV3r*C617kpA(ja-wK;UonRCjM{2upr?lR=esL;QXv?I&Z{$m_S}QfA0?RTAkd z=`N9DE79w3Ckxbas5)W4=1ANtduk=-;oF+_6DyjLk}`ch(hV3MvwnC2+Zm*udx8r5){dgMiOP^4%^011tiREYJq{lh{}kI zs6#mVcKp^WhV~wL(i>2fm!o%sa7u~>ioKJ)p9gcJfW<@xJAwqNF8Ywt(eQ!bF6xOA z8;(wEb%WbA@vbFQK}I+EM2zj+_PFyuflZXVkV%)yd(1`__Kr8&Zx}av*BjKlQX~Ry zjl7LawgzRrW9&<=VHSB;uh*iLAd6aQhlY;mxw@QuJ!&9f!fU3I$y;Ox$Ln>A2NW^N zd4GhQ_6^gfF7ge+JjmT{OYvZ?b_io~^gS=Y=S}0|+Rn-WGwDj!XT@+N^m_1xcw=+5 z;zawTh^j2^(F+^@L~%y%A|1Glvb|oy@~r)(Q?*`hvN8)g0mgk&4J4A2n(TfSq)T7z zQ`{InIdtp!E!`qHfewN*G@QE^{Q8Zrgv{wIbH9w?tq=or4>c1JDKQ4fb)Xq2O`j3L zB6;)#v|7r`5lA3}>q*=Rq3RC4DP(AE@qr{gz2~(d<6MINEpoT&qg@4B%fEY&HF{v- zsXdlL)PWdgLnuqI;<{^MSoI9ovNoO36hfrPg^6~Z0Y%7)X5vp(ttu7SkZtmY zD+2O2OCT|$P`n&YdKwm5<^+`=vy!3j&QjWX#Os$9TApinz#hnFXc%=tJ&I z<*&Gdx3FS6^MR`x*{ZB0c0bEJs>xiy2>fkl`5MqD^vTlF9|*jrpNJJ^CO&>X6GD>1 z+3Spx!L<9F1)s|^(K~z9>oAVfGlvzvbO>%@?)l4f;|nV;g~|+_jqNrpCJ)24DmS<|~VQ`vo!R^+NTGQR=RTLPzA(|FOQ zB=CUCdCG{r+l!u`a%tsFtiaz-yPhd;>lmCinm&p=iamzBrm|EYoUwigm3{~Xwf4#g zmau!T!|yMOF$_Ul3qCEm4`hZLLj&5doI!1SV1fU4v@@?h^JlNSM(}y6sBwp>8}dWG z=+8p%p=%Uv%AE%ucV*w%O0b5;@zvRoF4Xn$Qx^axl%NJ!7mm;i4@2{^@qpeYGY~_z zCtKh9Nn;D<4=>!E>wIirLhL-!L3MO!hJ${Wxx{&fvLTl!h%q%OrE$19>bd*(CfEtt z|6I-=PHEC#nDO>w;QK(s+yWsxdXf;MK zufpT!yTk0!Cn;e!%9j14{iG4E0Sg9nVIq zQ6JbM-@;|TxgF=G`PY0ShboxXP#5L2SZ^$fz*ngnai1$I3^@Q3{RFm7k`PA(!#BXZ zuhIO0!A(XM$P zSn2CUZ8JqdqzX#-Je66BNcpo_4jeys&pRI|iBjQz6jv2iijLbx_fgj+<#;E0z&oxC zzvwy?VA2T7OuP?Fp@h*>3WT1%HyVwfJY2*uW&k>`t(_}nG}-=}X!8=*e_AkV9#JnO zBBO4wJf!i6r`>I6x1pv%kS{2+;K_EK=3x8g6TJQxyv6vc;;=ho5bWQPkx&$`7c~z3 EKfw7z>% literal 33748 zcmeFXc|6tY_dcwMEeau1p|)8PyUYq@mYG5+VUviWOr0J40m37#T{TD5T6o zAsHgW2?>c(o^`)BojRZM{r+CR=lS#b{8jAtUiVtpy4JPUy5FuD>T7OT&%K_CifV(l z)&XNGDjfWmh6=Y9{`qRZla-2!pGy0Hs;SqSiK^>`Z1rT?mXCGol~k_Oa+~UK?83)P z39#S0%KscD?> z;Q!w`P+m+<`sm2D#;Y8STvTr72+9#1(rc%Wv!fpe9uj?$QtW94noZG6uU=qD=S_;LAzFL6%}iJ_mKkcmF}ufTFT z<)EA7q}h^CstZ}zSe9U$F)C)MK~-r=u>^+M!SFH4Il^({Ww*ibzB|Yg)&KrutW3N6 zQTXm9L(J4pmyFSeVEIQfcF>U?z2zb z{NGQOJ*?0`PaZc~IV-&@a?7Om0#|Q8($2+5m^Vh3c*MKpj<=F zKB3-X)lA*7cP*9MEuxR#9sA)bfHfUp6))MUysrZJzEElV!MmVG6M}8gYi245RqE1f zE-40nq?e`!FPETDjwzHTX=Y!fp&ICGm|G&>hzzFq=5^J3PDw9^W$J*7CeyJV!0d8A zoEU|jxR2<=n_)j}11H|Gr{Q(wRW|5!3gj_=)B5xaa&DScu{0?Yv+CCXW?nwO1y&)e zUIk#)Du?B?8CbnU1c)x*YA*b&WH1SN_B@d>YGbM4=a@12zBu&BEwX0w`(p0FdB9_u z$~TOPYRN7{yamLzA7X1p)O%tH z{;FaA(h-yeB=EVXc$I}u?WuOTGdVWcjZ%F0dEw_H`Y8JQE*Zhgozm5}!L@rUuI;o# zt_cF}?=jcj5PcTFCk8-f(K5`92MGBf0cCos|5r00JvQ_!&S-!$v8BivWys0PER^Ze z#<^(+ON|E(zdR${g#g=G_2w@K-oRY00++Rr%d)N+!I0ITE^h#rkF+Aq`7+SuvomELoR61n ze`;GC>K6PIUyk=&rCcL~6$xR<{z8jG_faNXAQQTGRx<&-=#2rA<*tw{wg5e| z;kRcP;h6}^GqTA*M9MSBjzi9>9VK|?+Aq&=AgOwi2ldT>W&J8--5H(<_~n^cL{mu{ z-DcPMP^x3{s{}D`oO9a-kCy-PCm;PF_cEmTL9v9gL5Twm@5k9kuNMOzFU|Is*#LQLo%p^D-)Cx zpc;5&BA^Fz52|-UHTVKW2fRz?l%6KBEWAM9HO0Qm1K%x$@2b0xTtVlO;M{iU>O2Z9 zr*ETk&EVW=Q#jYA87Dc3d_IQ0J9~TYW8XyGZ!h;PC*Hl__eJ5-bnK-cim~p_%AU99 zBWD}Qbxsmg{T_1{@-!}60d6M&XYJXsvfJkP@;?!7ZvZ!?z8gsnU4&}gg{7%;Nu!d> zOP)K?8D-+w8SzjsRwWS$C!3TGo?5fOq}j~Wfpd!|`pB8Q#K{tQl@Gmk3XC-bO=yHd zy{y^OHp}R%@gTqXqxERpkD2C6qut2MZ*wD^-+GIBH1!D%NvS1NOSe)-4LWgdr@)ZK zOWb`sUS-L&PTZkGU`QDR=yF{b#n5eJNDp(x>sEh7Q=;$eUl*q=Ch?rd=i`{b$fxOb^CK4AhG&|5V?<36(Z z(?B$t*V^0_+@@iJX zg|93jb+pil#*2lZTwZ*GCI1AkvMlJwYhwVi0$Sf?zMeMsZIF>W0A%nfZ-m~FNqnq~ z!_GS;RI>J`mG^bbM>5F!@TO~A{s7f01I2aEd#<dUL z#k1AQXD5_S^E`^@C_>{qpmBDjfy;v5! zQb+Y+2)KZ%xCp9v;l+(4-8Y140;2wr*YSrw7KQX^?zC%4NGo}AoxEru;rZUyZ#Y(| zp%-Qp2-yvT`w*f1=|Ly1K^iCo);tZlV-VvPSBUWk2nhXr=(H)&Y~$jKp1{Z3o?G!;7p)}gX}@DRx10KMv{l)_5btR-vet5-reV%)78D@? zk{x4?PsoLC!VWXrH0zr-cXnTm1p)#>auAYj0qrn$mqUJX$&)<|N~SM^Dh}>_&)_;6 zWU!bL;az-eWCZ(Y-#S)2sNyv$;>WO?JIuhAdB#c6)S+^=)|_S|Hv zatUPf8tKbQ+dvJYvmIewoZWb>Jng8B|=c z!zqw}kS|8aOI!KmpKiwW&j5|5)lfQ1I`+;exsTkmWJs_dIb*qV!o}~i=<-LG%1M*T z_qM!~u^0M*gISaK2+z4AI$HK{i$kZKfiC-jE+&XBprHeq(D{NUxXM`T zBD0t74?TZs_P!9+oplr9_>CUTB%7uq$4*^&8Sh z`$6oO=jfOOIL4`|Jo?qz*xj>i68(E-H%MH1=i>K8^s!I;$FI?@i*cC?g2%{K%_b!m zhpWPquIEI??s4BB;pRe6?v1&@lH|av`~}A4%q$SY%)-~WCvBy3<;BW)*>SnEs%GAQ z{Fa`~n*_-_CDS5xuVAmJeoaie^ZpUh?@PpmK@-1W7v4dW${8txPr8-B<58%+r(2XA z?nH9z{-4fsfQjeH?b5m4tIGKM!*XY@W-oO?&wct5>EA2kZ_lF|$Q-XN7Yc1ZECQhU z_Pm-T^O*QOvmPkYoJ+$zd*46m8)4b|UYu{1?eVZK@H+|k{iM~M#O&y#J-zYU@UU+YtWOJJD>`52C#qiWo z(>*iB#HP-()TK$mU7Ck$fKAXUGhs$~3v47^#d*&T+XX^VmeIu%vk~-d+ zNssWpT6XdINhn=9&E{^WVfBWDocHN2$_51yZle&kEQI~^@35(t^JX-CHK?YlZX{WK z9^_oCH+_#9OnQa)=e@Ar4NHwQY;clT3Wf;|s+rf8SM!Hn&I^W{Wob8Dz2bwvM)y}V zvR?ceckDaq*u{p~hROZbiC7i!^WNfp**pevX(Q1;EyMI# z^s#Te@Ygx1$KM1O<3`DVI|yWb@7Mlig|n61WtC+2lJYxwqvUzm^`Mo&2!IHu48t8F z+`zIauVXXri7qs&H+#3>_eEX$K6&oJ8q`iup%uux7g;ha4|VzI8-9CE4TTm^l6|DR zLDaHx(1rIe6TdGq26N-KJDq^#8Kn_rpC2!u8@{mT>_w<;?`^PbPT+e}paZ~?-MlLz z$rm=?cGzm4@R=n!!?Elx<7F-=+qI8>%q#R>MMP5vjz38#v(F1!hK+Xg&c)&8|Ml3) ze;k<(N5T~PA4gugbEGAS<+3{v%o`o~1UoWx<;W$ny!#MD3VW44kd3Kv=s6xDZHGvI zv^U)S5lOeWn&|5{0pyP#FKv}S7Ivf`)WrJvD|d@;xG%OD!QWb?t7l=vN`i@PH;h=* zB`>rgxksMgNQ(agsO-w>lwAur0Z`qAFx87h=4v}!>jC> z--#xiKBr+>bo9X(lS#NTO_EUlGVoOD-a{<28&GB+I`J*ukayR#d}%NR@q&bU0>n|p z*+;4xu31*@nq9`?EpKcGUhBp~xZ@CRSqP$2drA49d85wXaI(%B!HN(r0m5y9aN(ZC zDr_mEG?lJ0^DIpIR0j9bNA*EOZ_M9)T(MDh2u(nTKof2fJTps{1j6%P@12RU{U8rp zNIz7wL0k6Otprf;>W1ZSnxG~#G6r9OS}urMD92(efp02JB0!EKAjhgeD*_A_usmJA zscF#WBmiyl3g4Y0b&zS;ecXkC(;vQ~g0ey*%+eMoKKBvck{jOElZ%Qk*o-WH$1}~K4xPC$CpC(+Y~<$2gI*7(mD$h zls5E`rK}g769D~X3A&}Oy4Al0Q2sImX}FE0K@g?EU}!&2nL(btRNbrDizN|DcR^xc z_O1kpNy;{RuRzpqoe3gKF5xe|!wT)wxNVcveJ1%vsO_0!6IJ{4k665(p6ox{g6HSQ zrv%Y#^bxtym77L4U^8L)bb;1Q_`{ZMOz8nWI%)^i4pif|lz%(*=6FDsf40K%hujZ; z1#y0NSIA`CX8+KT$3`JRk5jn@7v^2BcPoU&@}1!9ff|7rJ=`1Suco$v%+>Lm$m4fH z&bXc489|SCpC2R?8D&Ww%-%vGNxPDIyo`-AIE;Bdc!};#-ahwqJaEQ;w%Xq_*UO`6 zuet8S!K~YaLmV1ko^gh>%cpyawg>g#+6V2b$3!(88+3_ZO+#BYNs@)dgtF|^x=1ZW z-1tCd87;evRI%EsvN9#1(E9G+<_7Kb+TB@awRB!M3zd9IOVc}+CiCi(v7?ImSa3W2 zd(S3GhQ7jgt@7UvON?L53~4%Yl(T}_mRX-Knm1jK!wIh!7DbQG;7r+IE#01ZBJhS+ z{e{dEm(xh}Ho|JnPNB?dUV@S2@NCZD_aifLyM!G0_7Vs&-0f*Ewuy=sUFoPL2-<($ zUQ!qyt40u0V@l8$dF<|IF&)kx(bV1Tr0t+&<}vbD^;i&RfZNyPIS~w0CjeEv2In$O zlE;Z@I7Y-W-r?j&F~+n!MlmJ{NJkm74_)p(!po{LGV#=5B+c0No~b;ipuNnW7^uf6 zpr!(-CvVLtjs~rBwfHoakeGMFqt-;-A%AD6^&e8WOeUQO;|zupmW6Y7V^bcX_84}Fl?AI0}GtUb#rq|-OH5}ef*kdNOvAXfSl+A^vy%&i3 zVta&iV*-qfiPTwP!hoybDz5ty=0yS5`^QrOSMQhWn-Az z@gDn)>jPU9Y#Km!s^xL*4QUnS~(|d z*9mM)>>a2Sp4Bw2f9kJWYtwMt6)40-5|8@-sjD^q<~ zp1f;!4esd7FBOZgHtm24qSvt#x-C{4jVefEf+B=NDo4Nbxw6_O=)5T3UJ?_{^#@k8 zzD6tx@0aGL>*!tNzIxQXNeDySj(3Obzm$(K*Y$yB!voFc@iVA3iF8d-!XG`jp&`5K z7D#+ECGpg#c=hSqjUn+F_(-z&}~ZW3I+ToetEv?^-KUrWL3$k-3DWwyr)fMO0%fiBeH? zsG^RtX5>Icg<5SV`23S@bd!HAcO3|o5*Hm(^Puf-2S+HXnh5eo~Glb+X?1?!N-5P;dL;yUR0EjkZpMfXHd?j zB5uRsXSSfPJ{CUv^eLZk@#t9lR##|A$HV?-4=!qx*L4272O1O)@Sc9g>5A7HM9ga9 zD&t?;R{xQBaiO8X)M-m`&hn7#2|Xii@}Oy_n)*Ujcf>LFN)r}^cT3a2yF3MqclUQ3 zw*3bK-T(u({edyym@!f1wW5on# z6F6DTR+8{zXm@(~Nc!{v^{#jd0Dt`kKy3&ImIAy1d)~v96f}uKZ~h&CU^SRTqEHI- z;)+oUwt6O+q~otOEST2F;ZWVPC_>#YW_8FJW=A);K8w3UlBUkeUYESxdc8sMruk%# z=adRK2NfVI_wjH+Er^YUYmMaS(ml&I2FmfY+kpXlk;K~o{;WS4@b~csaSze)48Q>W z?F1nRzXtUcEPOw)JsYDTd>*NcoU{h!Y^OCC1@Z!qiYB`A{*X&?(Y!U~6)+pV?o=-H zvd`vAXPYE*1S_UZTeEW*Z23>X@Ocf?p3B@NP-;c(8+1o5L&r8lMX!pZOGf3l|9?GL zkLSnI=%w6=NI2F8}N{f7&c>mFNuYLx$wx9~pU7)5u?4sq0~JcEYBnrgRGn|$&6V)L?B z^W#h38+DC&;^h=g6h@Vn&ZxZ!-}|=kT2ajZay(zU?r+C~0IgX)o_fc8A=K>?K3#yVs=D1?|s&8r`>StGWE+ zvuv}Li}Sk)z43%uD9Ej-5r{J#)4vQIx%^bsM>GTYi2KQpwmNAaQ!*?5>#u6gKRCa; zeNFY)VNVO)SP#1Kni|~OS(3aQ4CVo73t$YSHn9!7CeC(#8_tH>^GB+V=O%J1>^vf9 z2r+R(JCAAB85PsT6gz8H+uN{*3^rf-QD^<*`v=#g$eyle-Q+g;QBdiJXP`hFcqg0#WR^1h)oaL=a&>JIBK-vNzsUnazRS#xKRnxGq|F$V` zn)FYQuE_yNWdITz0_oH>scnMxDo&V!*NO|;4}g1`)CewvG?~7E2@MKdiShuKC&1-3 z^8eDk?dEbdkP#qHOl2*jsJ8+BC6R)d#S(>D9s3R-AAB=jKqW z^x71|_rW9!-B5Z_QEFqJkD{W>KySwZ#I0)`wLHMBw`Wm-6QIBsrUqZqOK!bbVH@># zoo~z$REEJEmAM=$^8gskxpZ6tBhN^GG-{|1+zFjFD7K->NUGYp+>U)nQCpC;`VKBQ zyB61OBr>7SJn}42BC9R&08tM{*?uHBPN29M?4SKh+Wd{OJL2}5`I$&RW*Op7OiF5N zK!pd*Ult1Q;rpeZdHsm;+6KgzvG`m6V8UH(fOrj8Xn@duPy^%>E+U_}97TT=b}|M@ zazN^dz>1$cSs}|tvoMy7RKu=B{w)fg^T~*CQmcy#JsQG<{#y4@hd~3@iLa*Kd3H&x z|LPlRN-m0!)Tm%d;qtR{pTkQw=N3|Cq6QMLcedl3&LF8RXcp6gS&T7Vrg}k++{i{v zA)K{W{T3kHM3e4_B0DvbD7?Uy0&aY$K?<L7)yGw4=UB)cjX*B+tVzJlCNWLg)b z7azBSZB9R?1P!Vi+s4O58<)Pl?pK1vC!mOsZq>xZbSa_bN~(e9g)h_Z0}sK(Soq3+ z4;q-m*qe8ZM`nhU9zDwWlqFSG|4_7FdV`@8K9m&Its?d0XHK^nOI3%L+f|xKsi30J zuU?ZQRUOB+-hgKCk(!OCzne(!$;egiVQW|h8Y0gdOLm!A?vjq-j!J(aBsd$zNjE+> z7hQ`A7PeiNrl#mVr3>;{Pq_@xIlBRLqiCFrb%$~GAJD0fAAP}i`#0zs4yP2XQ*{iW zutMTjM;3hi4Ts&b zxIhDPgTZ$;Y{tJmTKdoE@nbH5pUz0fi`IJT3wdZr5wi@tBKda@3A($Ph8EygFI_n0 z2QNb*I|fzL`cnl}pQo{T*E{~lZ-{QJhK%Q$Nji#1aKgG-Z)_h$TGS4uHZi6}U1Ik< zG)pLKtFz~m=RQLDN`T6sgl|AALa+ZZ^YZtwdMXtSd-5$3wdqz=IhwQLOr=rZ=!6QF z{)@TX6?BIbg;^C+ISD2ryZLb&fS%AaCZ6k#Tnj_~kFliAVd}LyC{AyzL;520H);KmgCU<1qv#WjLghu- zxN}~p)pW&TT^A8I=}sK#FiU~tJ5p*N*9Xbqs-w%=Ma>jqo`-6fg^HqzYIxfHS@~ev zI$PtcLdDJo@hOdurr!JO|2X5N^Ww>^<)w%f+S>Xyl}4u^+7?1hfr@ucfQs*vSg@~A zgw3yppzF8zoI$q-R(uK@I3-3ezQCpFxk6oqMs@2+CCQOnxN72})KWH>Sx|}9f?942 z3Sl`vyn5#s#owSA#`4unWH%l``x=|nxmULpCWqHwCL}Ntyv-uYxXJpYy-ZB z$Hp$9ff(}XS$$xWNUby5`pK*nFyU6BdJ0nkr0fDXcWBp@Z0j3WzZZW$uj&vyppl6u&T0N5lQ#A5z37tLRoOvLCkIfVwN4kxn6mNHGPey~)03C^i31 z(484dOHq>i(Nqtq6?-*fC+t8$YSjL_&P;je#N-P~+rqwo4~-S>XU|`3=56z@gn7o$ zcmxV-YZkF@__T*X3|UcGIh}lI`Ky(aEmPCC=rvIPDL-`-Bj;0pS99kN37X>8mNSi- zuMW3~QShWiM@cK+LF6P7YP{arsX4IO@~#ish^wwFx63f7!9AKKE$U;pysXdCtl&%C$T7cBeovYrCT@@S5Y;QN2KL=j*C4@y+q1^hSB8g~= z6`cHJ+Ij^Wb3$*-ZnWQGOw=ujly2^Eha#0x3)~gW zJygK9-ikGF|FHf(({akCAvA{eY+Y2n+a$)Wchu4f+E?auP?`01Ct*nCn9`qu$>(0| zn>SYTBe|KORZnACA9~Jx-k7ebYxzFwHZ`{1fSzo9A(z%5<76u=I(q~g!2S>Z$n2?w z5f=QRey`7DIZtXlq%G2kcxp<+cg2}haOS4FUuNhtiZkQk))FTpGDA+o z^*%L=Z84;D=uc^@?6Ar72BdJ;qP(L{C=J=E(XrXmt`FKtkC9yu-fW=Ok)e(oGPTt^ zkW#QmRnBl~ZhwrQfOM)6yRAP~jOU;j^V|HJpcpgL7)_lfwDuG+c%~|62bSm?7<9Kz zPD{oaMGA>*G0w)^DbyUx9dPN^s#ikT zI}+Q!y{hc(zUoBuoYK%;$=h|v8xdASdSA3{=%Q`oJ=|Jcw3->i_wP9Qg!1xY+aFT; zAZ;n>4f;;DzM`}7J-F0CyV7J%NqKUMSWVm&wG>_d0!c}-sEL%CEn47DvQ+nWa7u`> zl?vhLUL@|#YKx#K)eEFlwJS=sCM)d5gV~!-#A8z$w%LIPQT7l-vk0QM0FiAX2$d3Y zjq0BeMa}NDcN8s(`aONR`a;E;QX?sbV}F+$*3>NncggP2;8`DZ6jy!aY^Fi>96pjO zDw{LvEZok=6&8K)osZgFe9$RXaH>4xW_RqP)=;s*4IoJq@8;)kczZ#23`ZM%H{`4m z4e|fs5Yhlc(W2mQ_qXw8PY#7ibOY_s%D3~x9ZR=Ab3eva?*xx>QxQiAp zMWIvE#z=4ItbJoVQU-Uc19qjUSi*Q~;)2yu2>t~eu z3i(8eqMtMh^%h}Mz@z>q&jw|tf!eTyT3iZuDp}2j!{6;e?|78hcP%@O46#}d^zv!_q26+q#nl@#pW#H%`>v|={jCdVV$vhR8b+Q*Gbakv(Y9bi9I}nU2dE?+n32G zW`8p$?BGunGC1$bkqabi;?gk`UJ zmCO#5O#n4cghHw=mhhfqxv_f)yV`~;CQf0P>5`2z%6blqW5IJ{!G}cA`x=E>h_O8s zSZVH5lH_`0DK*01rPIi^{t+g`ATbBA+ee{yNMc+mW1VA6hlX#jxBi~?&_{(FluURx zL+K1M@A%aR@e|Q{ub**la}Y;|hMwDqK5F_VF?xN;aIWXDN( z{V59**`a3(2KB}Ip?o?Tp@nBk(;0>BdyIMZr8ii@?d#Ke=YvwO;2KadL<$Bl2O1km z>w6)eLSq|tgTisGDBNkMP$G7NQFSI$HhWGelIs?{4?wR-<0?@3-oVONd1@)`=E;V) zU5R3YE>OPCFj45rZPDeu7$_L#jSV%QLO7R2$)$ft(Kl_39!%4-avIu)b!<@CM}{GV z-&It;X9I$K=!^#D~*)UZ*WlHZVr>txrfWR3%41*xA1VRVBj-a5U7MkUDKAD1yXpuIi z`i~4lHzepC)|7$`U#*TNMcG=~1a2o*OV$EQ(8Fyw)?w6(Otuqr%%n81n|fa$%I?U` z#X3%@6ilu?D#TNWVlfJm=4OayW=*W?z!WpYF7?AidIQ`|!Mc4VO^*p{(%nHPoCoZx zbwgOJfvv*mH7=POZk6zGs}#3+8|)vghf`XlRO2>!`lRaI+C(c!rR&tw?S+|nQy+}=FU0TPzaJQIZ#W(;YN44S8(~P0y!L5-{e)`hwX^3l z7zONo;BOuD5*l{0*%E|VlZ-;A^-@|GR2^et8`9v8c$E@+okDBZ2TjpeA9&EtWA-)r zd8n~jXe+j+^?`tHNXL{SLUG?6V$aa&3AGsJYERvT=c$5?l=1cxi4*Q|^`FJ)v)n zifs#w#Z&=pdg3SG4)6`D?Zbrcq7}MiURY3Ag^TbtU2r!hjQC%z@*hVA94e%uN!bvj z_Jzh~d+&I#n6j>&MwVFZ7lap6nLH%gQZ|etC*)LVe*XZ`7|BeU<`t~EAmw>s{uI{nC|e*E8nhEpmvR0-|d0i zlnD`gr_^iFUbKQeWHrs z-*PyGxiaU-Sfto@UAx+BvD!aSLWm!sOO>uBB%3_{fJf0-FmU<>iKt{Yp>}GB-GD3% z31O9jdrYfMLsW7+q7e!GqKZ1?9>?wWUyXQdwj?xIW7JoXA|@NgtyM*Tf3qG|2U?04 z4)UAr@X}!JF%${>D8}KFrn9_>TnMFi;%DheoK($8lj(^*8ULt|!AuQz7fEr=&SDvv z>1b5$*Ek=G!aL(LRIt`)HZi=IGR}~>N!gd_go`*3D`*T+gU(Q4=AI>y*REc5OQ^{y zu`^h23WUm=4&_I~`48BX&Dd?9q2av9^ZZ2vt2X~qZI@!vb}>&|j_g2YYm2tanIT6V zUCJ}B2q0t3EB{#JkuG@|Lmg&H{D9s3ZB8D!71TX&YPQzWLmBW#Kr50 z978>cfxIJDb$J?CX>E-n)|7K(OR-5hvfjc4k)sN3?l* zgF#8*{pfL+=u_m$7e#A{`owu`+s|SNyy#sYN`k-3K{xhsTefYv8nW&KP7P!qj(!5W zzH~Gt*v@|o*7+b1TtRpKQzVNB`QS4-fhJj<9ODsLbX@m-m$Zg_C;Y$1xf|jXL~#z| z2B}PUz-1k`2wSy+Kh>1S!I%^QX}meRZS>E~K5Fxy*K#QqYM1e}Jq4M4WpS@4B2>G6 zNh7o%CfYk+X8wlsqwFv~W1?^g%heIP`E*o8-6j~5G?=-kNnAHWSRM+m#CSPm9S_`d ztPd)c5S>TqDKkg8X0V)z{d=rdqX^D3a@ zYL+d$Sz)!kMvWd%-gbq9Zg03TF@WDzenVO@D{K@kWGE#!+GoHWDr@LENILVR+f5m{PLs1 ztx%ou65X#f)nG_0HRXA%O`e2TvNxknsvuI%W^lS2i|7UO?;a3rS7gV}>7y}OT`jIS zWStGa|MauOS6R}@zeIwj)Ny^wmQ7h<#NNdSrwr<;+ViIL+saSYoA2FJEhIXdkH%3r z$;MGXV-0FN4X{<}rR-Yg-x3FkMo@YP7;T55AfPCi+Wd(i zlmnC~UI9=Or|9|UZD7qk;^Hi7Zw)&Ow|)lKDozF|yH5L~zk{5DAmn~O;au-Yt(Ynb zPJf@Uug#zK_Y(q{)%O2l(l}+;3Cv_Tm^A#wWWc6CFsb&xn&hEm_diXV`?v5)lK;iz zhTiUYu>hDLa#x@xPC&!zOI!C1)+)^m!haf!I{~jycIo)Hh)I%n{=>)ptbt%K0vYTv zu~pgLo|MgLi!IeTFqMCe-aMh}l)9DN^nmA$ksx@9+aOOSqTWWZ4UvNNcLAsNrrb^e zhT0T#^&_N%Yd0(8XFSL(M-{WHO-rH1suo_+*9j$ zP0tqupdJZ8J}Mgv%L2GPePyRGblO=p2Atitd)V%#5sW1bp`x`%*w+u?ac`$d<5xpi zu3|d9bDN%JjsGqYRj(+;HzxLso5(U9oXrB#5?Sz`0XHcYbQBxW{sMr1q_6Xh>~TDZ z0NxBVqhw49b=n317)SsBZ;&LP2Ie{<5{y%d14tn8V0NdO#`egQ`@=rz0TkS+$I=U4 z{q*`6=CzFd!@*y?<~Bhhbwspw2m5+k!T<33D0sa^wd@cO!aLEf;&$d+=|k6NU(Jwo+PiB;l4^+kU% zCg~X|mGXBr}cK7^4-ud2G!3LGm|xfA*2#OFpA1yGfm!Y4@EHkiEC-L5|pu z=D$sEEnzqmVqZ`;y!l^EYh$L{D5irg=;t_9ignWVGkC0(Jks)=rmAc=>D*@8$L(6D zZEV~QnJwK~{xM4JqqBVBN35a~M?Txj<4pBV?i>3uJO#Ad9vvigDg+$TBV~4QDUok` zIja_=Th!C~FnTZ*kz9R3!E;THF&gk(k>20^q`-$AjMlAM=2o1&R_x2O{mU0rnn!On zFQ4_Ctlo2iYhs&)OHWJ&wOx@UF#ZT43yCVF!A9qB;`EfEb;$b^L)B%xTl~II<_9`ORHO?|H1kg>tZ>O8P@MCdFlIiwQulU2X zirTNp?$g8yyf&%NhSaO(F#QnAo~>9&JH7v8qmE3AZSvMe9ly!F|bFdy|D{n@2Bs zi%eGYJd^IyzSooxAp2pfHfe5S)bM77fY%2}4xLL{J_v6Kl$_rngaD7Ke8$O!WTt(UiL%La@%-ErzN5Gpq zv1$!_jVMi3IK}6A@R{`==27~@x$DDrFiuTE`jp}#ZatoPhRhQ&cH|uOQr{8q{kJ0Q z%OyOB)V`P66esYdg=>q6(H0BliPISFF0SS?imhUiO~NfH6>h4Yn`r7@qslJNkL<0k z9$0f>aqdd+r%$to=4eqcE)dLDpkgJ4)V=eAo`94vLJHFnOIuR;Z9-RrK)1TXbfZA` zQ3p3Y^)rW6-9Vojq3ogh523QZr4l@kmHn0@cnb%cX<3Yw)C-Varh%c+yN(1nYyl?u1T5b>p@1hLvrDXUPzuY~Idk{k- zcm4X63YwZk#&uVXSDqi_C}fSuPVy6yYj^Yovd zTi`7~vePwYcCH%#3mLclH{<6}nQn9;P246dbPIGFIk?%WpV^}7cBqIX@tM<+r-<|# z)&4(?S7@BA2&%$h`7#YTD2}}?7jk}(4(Jnue5ONYo9r;NHOO-}H}x~j|C3oW<}K!U z@4G=&zI#aG5){gJy{>CM)>;|zPNgU4BcxISrP33b)hCji?-CX$W-q9pq4{Z+x`?zd z1aLOL68-@yDTiM!S7XMSA>fOXz#&U$2~ht9*MXI{6-7g+PjXa(z}`pVO-bJ^?RynX z0~}Fyl8IHKY&B0A{2$$nan7n!xi2HW7>^Wyee40~_M`RwPn`rlmh3v55_!bdW||67 zd>-f*GL7qFgiL8!hKYmG-eXdR5$A|!BA4(9hNc&XoZ^htm zS>9MZAce}(<-*buyYkiBayR)O1pEeYVbcIR1~3@@-Z7iMjcw^rX47R7xyv6|wmU+=!LavU0h$KEJATWk)q zRtSg^3#ntJ`LavA+dAUPX*&=mW!im|yltEWra|74F{W8L3b+>S@)AIbbRJQHSKDaI zLFQh~d)2>W&DddD^A{9+<1iFtrNtK$c>}OsTd1P>1?#n246(up+f9-^%&C-y7~F&k zBS17HmEQlGmU)~z6;>1{NzPe>g;Y#}C^?$~ss13<5(b?hvu?lvNvXuDlmZ$0PT9EO z!-icq)6#tcn)OY9MvQG^uW6;JdMog8&nOkfu{MEjinp4HRpV?msq6hOHewv(p-=(K znN@Ns%9#KzJR`FFb@hM-cYXg3#_$p70!yvP+x=o8)a(5pY4@FKYDKe5u2B6Ag@AoG z1r+~c)Xx;*Ju%Bq z2}YR~Ex*EDf1W?YM#o0;CAz#$1uClti&{3yjuJIMlGms+-)5U^6^B(82L(*}ayQjS z4F0xG0SDKkt*gm$2LRv9g|v6r-Xy%^(p^> z=iFszlH{Omo5i>h{AW341zH|tkQU<`WDt7zdd!L=`15hak@0m34>3ph#}9FDmf1PV5sZ49>g7F`lS$wYO4+Jsv9Aef|h(zW7C(I}NtKR65Dm-XMjoE}Z(Y%h2nYdG@@}WV;y4802I!I|IWf{1*?n;`wXvh$5tu`sG zj-XxI!Bq}jNZr9rLH*2jRX5ln!hINvn{tQGQ3`I35hF$MUH(5kcA`cFIL&!sY?6%^ zcr=FCq%VNWhZVsn;>s&>OnE+q`bnx7N6&N6ko42g*esw;;st09qv;9hKIG_t{{VLp z=_0f}faZe_qaQO&I9%8)>3B>PL32V^m8~X><_c^)0b~K54~+Rz+NH0#67-D{w3v_B zn%}$2OPwbp?tPS^k;>PwGM*I22_$M6ISb(`MMJExSm_AbGOxIfK+)zB^7V0- z(o51z#+rlxPGIXXqh3?y^4lWj7_P8^@_EociW8u?^fNb0fJ_DST8c~N@DJLRMp?#M zu$6SJcHJR1S$3Mc;vt^&h>IXi{pVKg$gU!+#k|yt+3};K>1}oHuq(<@1ZDQphB64c z{UDLw>!?w=E&}*_tH5GDp$ldeYX`Rq?R(IufM^>21M#;XMDZ`ATGab~=3JsA>|vxu zJs%8PKD<}3rViN{QFZ#^ZLe5_)7Mn(nWL*X`Cv%{oD@{2HPzQu@vzmbhes(E zoy}*g;l=`ah`RCc8lrlw53*)R0vAt$Ms68eNv~W#0{?EpMoPG{JT@$Cc zMu%Jz0oRO-dI4c0qa%`-YebTYLjg22C{Bo#BeALsMd)@){Y(Y_LcB%&=f905f|SAZ z?jv(0d<^6WpjXJc{U8z^yZyj;B@c=LZx50t&(XeK)d@gA#*ppMdVkrtm?L-V20V#^ zA3G_-6B6eldXEF3tnwpzn7HUa_m%an%1fkNTM~elKt= z1{{+Tw@9f|k%grkskAWLm>Upn66CH(JfzcqU_;0|ZcMviH9qK206POjc~q!=ULk-W z79zf`sy5`ED<((czzpm*{!$mP)W8I5jb*o8B!mwxEgE#DGV4@$pwIF{5?I%J z?0_WHf;aA%H#+K{WHE1uQ4@^wdr)QO?m`+o#zqwMgbuv{#vP=2^y?IEKufa)-9E`y z zHyzi-3U57f`ij0Crui@?zRP$dG|_jhUgwQxCOa(~zc zZAC5P4cI{8(x8JDfVqUVTc5+BWI*ad$h)H$&m#3mt_lHR2T>pXpoh4taMz;V6E%sl zL|$qaBIVFgwlNhr)CCUBG+n7g=r5Jyj50E)qvk?i1$^oobWW$$sXRh?Svl(QscLkD(r%jtt~4FD#ij$NC_CT8D`|D;3J7e6fb9&3 zxl%mjKBm9HyS0-ET_8w$==uQ$0{-zUNE-F8gujD%JB$C%w2vY0TrikEVK7Y|BsF@_ z<{Tu!nnmJ0jMSZNOayRw0|viZ7%gGEQ_hCuLwkrHXEeujKtQJ~k_Pr}j0~)BT zf488p@&H_Y{QCpG2x0KyyimQ2LV#5q8f|`c)y7tp3Qb6qb4vaPN4hkamu`!MV{$5> z%>!&7V?2$~@6Wj>NPII@G(S}b@n912bUjEoq*Vqxl821sPZ?JpSM7y{}H*_cF;&I@QOLNS74 z4rmzq=~!?9tWoNpv{uaA#mr=&N$)3=b8(w#H4WKdw?!wV`Nm;poDhY7nu$1R}n0iJ3R1^X%7 z{`o*aoWT88T)n`2Jpk5bMbhNg`G_!3Bg9DIwxnK5hk`38@hWXkSJcvd{hZ-uTJ*WV z);bjFaq1{L9;mZmDaz$Al*E}GjOcT1g=p=wrxL3m+q8ga1YD-0-U5X%1gi`(;UN+= z{7NCfMUS)rRn*hmVYEUfnIzM}x`a%OI||k3ouiE>BVVg%c=G8EWSQr)BXBELqx?MlJCCjRp$(WZrh3dn=i7B*sASVoyH49dp zxGYqE@Ek2r=za^A*2DWNgMiwVsna%CakQ;DNV58XS_Ry}0s&TL7{lammMd}UMGa>t zpy8ksfbiTS?!D5nD_5ax!AQd^RIdO|euJH2C#5rKu10YZ1?a{UWmf{pfIAp#4y*w^ zj9sBb-ENFh*kvvMI9v)|V*C98I+&iJke&)yEdFW6>esLMh!%DS*0GV5Oehy>&kz%)Yt+|jDR+7>r z8g*V>gWJdR@WE@Xx$8ea2>bhkzVyhkM?XJ+w>ReK-pfgs@zqHIgB5y0JRC)&PH-%- zXH!5YcRj1sPkdhH+LeH?jFvn@*Fv*@qYak{=mTN+AcMP}@b?FADIYi#KRZqxHZ1=W z7hpB^4fFxe|I^-;Mm2S&;dCse1uCal>jH*q1lk}vN>PYx3MGP0R4}Y9P*Ff+I|0O` zpokTQQ!x!RP?i)6GAse2f`9@kQCk8E$|fj?VGM*F8lb>{IPaIhz4XkPbNXY>{PI^$ z?)QH0@_f&`+aEs7;4Oke-dlJy=`Y@vYmVa3y|1Vbb^Plx_Mtxvavn~V;?2f zt)2puZ91MHTIIi$?YB&n!H<<^PWe{v9rjNg^UIhKn{ zcAhK^67Z$>-M1}2^`#&#I&!WK-wN3X$@y=kj`9^j4w5gF}-=h=)z%(zpA z5_d_;`b-+>0(RYC>7Y19I~A=3y7`)=BNC@9Q>g{+4T3j=+OPf~)s1rHp8Ih{wTH$9 zUfsR~V8!3MujsZ=dtgdur~^!^PbVmQG(6&i2aO*AVK=pEEqhH#b*ndmJfVEAr|hB7 z1MNzav{8nrqYNR>iL@^2mNj6@Q#!tSd$fwRPTj&TU5i+{#KSMOFI~EXUxMd|-3P$$ z8QgDlv!003RDg@YL)QNggL_@%B`XydgXjGU6e8U)6z&xKcd=m%zpc-oDPj2X6o$+x zc;|ys`A=T*!8gJ?v@fsJbZuMa&DK$W zc=`or-Q$Zp25+z<;S{HKA5?ZCU`bVmvJYtEJ5fK`h<18oI|tAC>V=>}@jk}FBx0?y zxxQ+|U4yO?)!{>c&5O9%!5fVY#EZ~}8R`cwJ15WraJ8n{bU8S)m`~46#u-4l;?G@Z z*1EXoG?X$P@J~d=w;aN^;2}}h1YV_Qm%3scprm`R zD%RrN#r=FsMcbb{0I84Z{-o26Z(CP^tF;#GcGsg+;u!m=?QSJEJmXMI;1~zv3b?tr0#wo4Nu@if9Z(G;k5W z54&ynwlgaXIf^XG9Kv025!u>qd%kVt3M~PmS7i=mRXB2`>SM+9>>8Y578fKv?t0I9 za1>hKC0g@sJ#Y@N^hG}YTz_rc&|&?v8pT@ApW&OF)QE6s6{T2=Q$?}V7IL%E3QIR6 zB5~;`K`=hYCCQzRHYe9s5#8=O8!P@!SaSPDSW@#KlU0iUc}LtjN~leurR7jZ z?)X<7q}LW6jf&-3)%@>3JZxE9_aAqE9%V^M*#DxtZW6)U?9DF%^2~?Uq1S<)!`wI2XMD`Lm~XB z^|4^&`4R}GHb_sUfI5bV)^r4YyK8KiGav8{A;uVeGAw4ECA|O~m2@A2C5e#8yo$E% zLEB7+ICV%c%n1qR5Sfm^{ZME0Nw3g0(&0j+(E(|^!jjq}({AYdWwl4y<^@QH;gSv+ z@==z9e2ipCRgmCZJ`&^+nfs7NONGWfs^<|%#Z0yutaT4^*8RC{ZKOv9$Pwz2BWAe% zqwH!$Fl&|^QE2qbMvhP|l}Quz-H{^Z{^fB4H`mM+IqCPe@3$ zK_PNop?cAp-Hkx7Ji26C#bnaif^iH~MWvu9K$Utx|KC_rA+lAvj4-XS+LLXLf>nxj zcR&nj9m5<|g!UuHrDHKOnPgeWt&tmU$c+hWwz;!$NDE05A(NGh;?SHD`hM-v5gdoAFjc)xET4UL5C%Yy#<4hk>{+V za+{Jo!~sqYCJ3zPQv{W>0_eG*8$01Wfvr*ps!ud#<8TRaS&tzsHzjsr=V`X15L#Xm?`lXpq>Aj_iC-g~a%qQa44;Ko0`YKk( zcRzFKkrqzs&o>Oeil`JjEefmawMR>Cj;N=tP+Rv-kF>`+25hB0*oIVyfPar(tgXPQuqFG_ux+(E+yGB+K zHK2cw-Qvmq^LFR*B8|^WL&eea4eBM7@iM^zn6G*j%}kb!mN5LfR0*ygD8vy(8;Mo8 zZ{;WbDee5_>U`Kb6e?ad-{9lIbGqHLouPK#t4I226<;R^?5Zp5LJKeQ^?Z!rKphkM zTAPN!ZaF(G-tNcdsR2Q>n zzcXu|(>+5wW%g?cv!1Bc$#PLg-Z8+DwLMg4Nu3)fezDs%nZ%*8_Jpa$+XRmqKDs@W z`FQ#a$7z0Eu5SgX&NlCvJJ}`sh7uCWCyi1 zC|jpp!^>NGN{Mo2Q}1^k^9{{(*~>3N*3GTA`j@^{(0t|*S>twreBbK%g$rtgZD?|s z$ZCPcI%&<0icL2gf{q<`co$*)z0Fx8tSYhntAUJ#o1ipMkz(>|v#PO$ zGP(-E6MZxH;1CKc+CuKyIB|<5r8PqT&&u&-NpGb0mO<}?X)2$wzb0dj5N~7o6k=R6 zr;P0t#UPByP2M{)sn31gF-Pqy9lbMoxtP4;Swsl^Er~XqNbN?r2U%tk4b2OJX6W<% zLgJjeN!Sz6z6f{<({HzuG>_48BaL+>q2koap6&8r2>EsS=wuy5_n-1@-5zNK!m88u zw)>8e4Z=HRugJQ=9lVaGh4R*ty@#I`{-w48rpK6*Q{vNY4}k=U>C9wk?!nV&9LXfk z>6n0lwuh0ANhXuJ3J>yO1kH39_oa<04KlWX!I{AMYO3u1b^(LjWvLnN={T3wWJS&j z>freW(wV=O?6nG74$Ofih2*$^M#gG;U|9tkB`00c95tgDvz*b`IV%|Cq%*Hl#}vUC zM3_ns!g-n%0O7takM79{+XaN<24$s2IDYkM)mSl^@WK-kz_|s+*|b!oC(i0IX$L<| z6`{nW0ZSex18SywVI*Q`!8t?5d-cINt1c1l2ER!tci^q5tZvZl{Jl4$GeVYFUPy>K z2zl*#2#sO_G-<^p4?q<`YV&$y-sxSTHz2TctCD%#kA&`LnY>;8I@{cgMc6o!|Wb6 z?k|molnDSLwV^be>;yWof6EDBJSu+_?ZS)h<5I>Wm|RuR+<)e|*(C(XApk^JtT@wL zbURSlPMSd|K!pU}!^E3nBDVy3ddJ%c?<-PJNzX^&G>M$788|<|16~Gu&Y8$S9>9t>KxftH39B-ae5y~MTTAt+q{%2`X6v^b#Oge9F0 zh?7l;k2ab=A+g&v3GD6^%Hex*EGBK$J)oF8AU zcE%Y98VTZST{0de@8|`C zkvo&w%R~`oK%w2xbPNdi;urB6U(a1~r*U7)2fg13B5J;rj-tw*H&A`;FCnGFO-|ta zgcG1iNHIy83hsXiKB7A4*$Z_rs(?nG@T?j$ze!h#@mkaji)UIL&%cVK3HG1UY)kfg G$Nn3$;peab diff --git a/contrib/PanopticDeepLab/docs/visualization_instance_added.jpg b/contrib/PanopticDeepLab/docs/visualization_instance_added.jpg deleted file mode 100644 index a9ce9f7f7a5bac7578d338eabb2b768cf55754e7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 68681 zcmbTd3p`Zo-#)x%#yCulH3}hzFf*MTh7dJq943c3ipegAl9@^lC27;3R1-N3X-I=P zGaW}M*^#J}(wMPJQb`Is?TT$j>c2+6{d<1T^Zeh>=Y8MhbFW$ExbC&?`}$tr>wDcl zp8j|R7!v%0`~e6A00O~3;Kyqq0-w%^2Y`?e00RJkHUNbz1YqD11pEVzF#wJEV*s!R z9{~WygZ$U4Jm^2KY6CpjKga41fZzN9a3?10-M2S2VQ-R^%W6l!-8U#imU|@Z)h|GPaOt>YQW%d_0K>u!Pfx|BltpVCm+p4q!=xmWTbO;agDaE@1-_l z@{>Q8yTqpC=pYs^F)=l>vv)wDF<4i(weB9CejD-r0f9jT3Y8YVIUIH91L+4da*rI%J9hj8yQGv;R(`tT47aw9FA$2v66xhD^2V#zny%lt-F~N|^X|R- zU5Z|%s_*xw&-z~vy%~Nx^6vfU*r(55Ca32Ux?kY+{MjvV|FdWRqhCf~zn~f# zFbyqrzaY>9>W&*}z^$D$7y6L2Vv-lxIA?1keT!=@wdvTpkpDD}O?iS?yd3-5?vuJ} z^F8~2*Rh=cUp@PG$NsBd&jCNcWPU@Ln4mmRD3nKt2e^gAhN!pLwAlGA?O$8L{3cLu z*Z;l!0KWucVQIN$n;m87=Z-MMZ5pSK?`z+uVy(GTdtAmCtv837xBJDVP5#eA#x zN0)UnuRZ<{bbWkz<5PSt>g5MwaO*YW#UaMLW71FD4D?Dg`>Ba!DCQPcH*$%yZtt1o zR{ohkU!78|+-*L>LVJ82dx+8L4sg!;OL#0NLBlTzNW`2O4?n+j&7?GsUrjqaCg9Sf#4%gc@NO`kJB#(bTXVy z6+q8pj<$_7UhT`glRk39KK==Tfni}HPAz4#g-3RD03KJkcu@!^Besa4-FtO5um0Zp zxbRg^wF_v=hkM`*Pu0o9y!xQNhAjt#@E&K(mASEt2F9>Y-Bw0L@%0bWp~976uHjMd zxWFmlQ9pOhn)>iM=WJZJUX!)Nacyb1+b8w~tNxdC4g9%tt_MT$WZVK>RioDsYk6&D zZZkIjDONXmI9AfW_ia#-JMyn{aVgGdz5cjUa;L@brOaRMCp2lv==YzO2FFP~4y&#y zxRr$uI|cVX2059mY#OR?cg^@t*&Vw{f#ihqJak4L_3Na{c><@SEEzvDy;Tj)dn|LszU!4wpSWhScrr-qH2^ z+$ZD2VK*2o^4k>8|3=cuNn?xK6UPgazjb)pzc9g$Sm|ewdUjv1HMo*J2H$b{^_TF7 z&p0o{(NhUVCg#}_8_sS#{&(`J!0)xP)@D9bE})tGx{s&j^SyS%biii`6E$!(z14V? z(;2g(b-nMI3oPv)Uy^m=-{i&Lj;-fJ zEm-S-UNc*JnfUA9uA+z6w|z_wE-@4xP7U4;`$((L{Y?ASFoQC(JD+HC@jZk0>*QF< zsZ^ZCx2~{@vdd3Nkt<>5+{<5dV~g$*o^=zC#N0Bg`fV&W-xV4+Jo{>ndF0lws_Aj= z`9H5%w72MFo5xw>zEA(XdE-s1nedH9aln*$U**M!D<47}otv|amwmgkO*AE0N74R@ zn>685e`&D0Hc<2fxO}+Gw;dkv^+R|xnU(wU8^(bgv5XMA((ilA;l~-Jq#r<}1>OjX zcj#-_YF&cgF$^$jP0Q=vN+>d`C;;7_i@R_4-C+7?RN=cS$;7c4nmzpLn@h%71M#sx zfIr?$@eU|kz@h30&UfH{GzgrmbK~IBxtbY73rYIikC7aUGnb52ll<+ zJW{8GsDv}&>&ry(>!W75-9Fj$tB1$FOdLLIPsTjjaQFuRF=nKnc`V-cmiTbPuVELz zH?H+}_#K(|EHJ;r;bvY#`wzg9m3jEA-!<*QiI9lnv4*#{y$ddpi954|)$PtBXAPf! zJ@<|f(9PBOYpG+`g}$nFJ>RmY1Uw_--<_pn|Qm!J_+MwK|yRF zXoIr&Ltw!DLT0LN4brFs(nESaO1?hVfe%fcy-6{gazCo2sJON7JNC?N6n|M-c2V~1 zL*>f#so}V%*LeHG_1vPq}8c zekdRbBavk8)r=Zr(fjp{-lu^%ypQ9(`o%9X+_*Iz9T#V4?vEe7e5Yv0yJI3{Cf_I; z966NvW{NNH4zZAA$69};9sfQ$Lk)Cn#QdpeZJ7P|4Q0*DlVBJ(efCukjQ@7bYJCj( z+A7j1IdD0JaiPEZ1w+sJ;<9nAp5A~#l62$0wyF zX2-fI3jYnqegO2-9upx@6HleU{hkog9qDDwhZD-3{m6GJ`>P&Y4JK-QcDT>o4V+?j zI&<#bYePbiO<(YcEGGL^(O1#N6rY+LtJ5FNa%54ut~4ybd9A1%tnwtnB&b@JO;(v+ z=o|(S?VcR=syDdd;?MIrP!>K{^Ow~4hokay_at7y7- z!g7mI$ghr1;SmKh;VIt17y4OtWmoJ2m{qr+ylA&Xk@X`Kw9D;}-Z`n%&74hj$a*uF zT|6MoHyRWdZ(hB0)udFIZ8XHihiZnRA204PkO5R%1)IZ;;l-iRA#NxjT-B3=&0~$R=;5~NVU8=a5K2mfhzn!}$isL|{8PNH6>_QA)8tZLZ@)Fq@l$0{H zaKB`y)&bXr`+0%aOkc76Lg>kS_(4y_osNUZ5^8jDu%6vvdK$!!+PC{pGy62tg%jP*&j6p%Sl7?q+RO|n*0`qeB!Z{1Bnl;D1nomJgf1MU-)SQn((NQj;v(d zy*(*NNj;SXQIxYx?wO2~v9rG%5YbkMhOIu}965fglLq?Y^34u9c<^JRh80GJ?${3l zy@9nA-6q>oW>v-6k=k!V@xLrwLa)?+dq(@|E~_b5W=LByR}!M_7qkiQxR6ap0WdCS zHO5iLi~6AY?0Z=FK(1j&K-w)7lZ({OIdOX5cA{=ay-~ETQrD5u6$0#PdTC6=++}&v zn&Z2Q4$%sD7BAjI7sOS38YSPu`G(LI9)D{!X?hB~cbSk9%u#cSamCeveUgOf+E!!#vEXmH*XUPP=3pOZsY-(4 zN`rCIh@r-q!q`Jx(eBD>xkMKuf8}^LIh<)qEpAGy$tFP@ERH7V#<3qQm+(R#^eBu3 zfq|q_8oup$3C47FrF&HD+1C9Jk0)RIBDK5HDp*&gdMZm=>ut>;nHFq%E1jwrH(^Ws zb+C76&&uwPldm85*YuG7q3zy{edpB!B!$@`kGv7+r=${CarQ;P?){R;o_q*#QHRTN zGoTV)XuP3ZdQj-woTnF&aQ_<$@`>XGA-|#}{Pvd?p)mwPk2YA)CpLelDHl+#t%z*U zPf36X7+s=92NJpSk17!RU8-VPYYol#XQtmI@Y-z(J_Kqyzk1Dhp5#7G-iXYY4M-HYpyqoR3&>zQ`65d&4P+iyO)q{CkFM z*xNQ#!kOapV%4XS$EFhiR4p4oqj+2ZEoka+s^-F6oe*1=L&%Ebf#g9BM;Bu+VY>0O zo5{TUyh%Y=3+{wL6v=1YhZmRJ0WHtP9jR+U*0L-a@;4zoC>zCtmV?<10>r+BT=)AT zvSl92RW%!WjIdU#LB!&%am2*fqh?@O;#ILdgG{N5mNSlIi z@Ks16Ebx)H4jdp&n}M&;>Lw9}+P(0Ndn?qR%&)!R;a*3{t^YQ{Y% zvtWwI8`e67`!%mf#_eDED1{jqW^?6XenxL!iu@1w zgZxAjXseF`VluN&e`rKBN|9E@)OfPIkHG8f(l5@waC=A&oM5HB{RPY7e);zdw0HJJ$ zt;`1xFqgB(^Ofwh+{ogc=^b0+`iqX)ejAvU=vGD3$jsmZ4sfAsq(<_1=Eku`V7*f* zhbdlZVa#BXzG*=bpOA@?&8Zu-@ATg-U)TT@p|ZAD6g$?Sg1Un0ac zCWP_|xbf28dWoCXFIk|R79>Yqi~OA+eP%J{|?BE-dJ(#w;S}Ib!463LRuSY2~N);ml+~;H0i7Z74M+l1>OF&Yre>TG96eB_GkSUSx$97V{tNRBA@Zgnlt4&T;DWIy$AIQvZS1PJiL;=LZ z>xkSm+&FFItRpcM$NpRQx&_k>aovJ}JMbFsPiku$;%hGPM&EXLesfZ^a;Gi(hJJ+g zDDK{(^RS0|bz(aZmQ##;aZ zJS;{hNx{%7_Ey&((BrQR=S%K?49bp3N+ruXPBV}z@ya4k3UQAY8;IDXsdXOH9vo%l zFiO6gnBRQGPsDLI)nuV3?jmnL^P)+NJ(WJ1YsnfL59l}A!%&i&*J6rL)czxm?-~J` zi58n{Q*)WQsIQtg0Zd{c%h{&ix2BRB;?gw*#TVPckH$6}NX40HvvU`OiOW7s7t+hc zi)_jk2vrT8N87yi$=JcL4wy^awY1?=m{-?usR~hTb_9r`nn)5J_+QOZGlB0761}Rs z+>Fd8!OO-*N2l>twtHJbL~93yL9`b;c~y$2K~e z472$Dss3uF9%=7Q=9I`U*l!pF^$($?AA7enS{EedoLfeS+h@Ey69p;gs|@vKpddf- zy!Iyq*Ax{igyO9gMr*b*2katmXQ&8Qc74B|U?f68ZQwh@IWNb$<9nPiZW4^!I4;kU z<1qLW{fw0Bp5Zo|K_hTFTSK&9|d(Wb-*@t z1aV`qKtGzH2OxX#ST2wN%zE~qkSy148`O~*>6*BtkpUj_U&37+PL$8iFCq4c*My+ zWZeUiKY$(CO^LHXF`8`nU*PVBN>*&M{**)2?YbQuj;iCF2i&rQrqZ$c>JQ_hU)rkB-bP}@l=J^67{@DRS-JskduoYi99BSXI}Wgay#(Ki8F06?=vc`z*F>@&JPWo`eT{3oH%eoxAb=(3 zbz66sSplHh2JW>Ht}8FcO>)s zB1Nbv5_eg;DnP%?JkS=m!85!FmY<9>Uf!>uvA)xz3kCLz?Eco+Cru$d zI_SFAsCMb_;9yzhase1^85N?_{WaZ0gRx!V$fv9W4et?J4I883&R8C4g0o# z#!zts)n({{p!>*Iz$3gvMF-gHuX|&H{0GmDtvX^(J!zhw5zktz-F|gTV*(tA zMzE3n9^;f=pthV15ZD_ByvOJ$h%N49A>GU_hUFB7lI+BH{ta7rK z`H3DRIEA|U} zAb2}#EWqzg?e^d1g-EIK;sPZl>I+sdMktVE8%JFI0Yo*z)em&J_LIuF$cC|>dy_lj z=dvIYSHp2q5GtPscOC8fBKVYum|zmij!8n-{PLZ11f7JgJ7oS>J?Z%YFVzJ!EdvGFMy&V(Y&iYjgoKrb~D$!94Sa5U;n^ zuh?_9JLDRuae^He4Zjh%79XRxuzj!db_836W#q{SHJe?-Sqs@j9f=zY1$9Ipi5?OT z)GKNn9I!n{g=?Anoti0N-1rIpYMQ?Q6gwgR(V%R)Pv2=x7J?y49S!l|X{ZB@&@9uj z2WVl8)M+8Fd+@YM*|^l;IKo*JltABO3Ic!c{n?EUfBygo>FxQF?(KOoJ$)T#c1B(5 zUX^o4BYKF%WNg-DA~aSffQf^-W+BC+lpbk^1=ewAjH5E;WVU6M0>C#kE+)h5(G~4-ZPa`?<@%rg66lkLV^v z{i<>anr+E#@qdUoY9Y=~+-(+XOnE!#sT^w#__*1&uQKPR{+asKS-uI-Tuiy0Cv~SF zY>RCcGUArw;$Dsfg|C4wz;CHf4>- zP}L};)s4yio3Rj$Tzk=p!)Z+W@o<}2j_$t0Y3G7NIWvZOiM1SsD_R{i(Ai77^E=tn7YPtKnVdaX0VaU8tmOp@E<&%`7Q{vA zH-eY}r|WHUjHHMj-YW{5m4DCIZIBn7P;ZBZ0BuyH!U`OoYA12%32ZMC1b^A_P>f z06UOnA!%pULaq7-5C~k1FVwHsiN?D+&U-Mv9WFqIObvcNcQN>sD^@S3(;^Fne$Jo|Bc+ZgZ!JsgA z?iar`{zA=|ar)wO`}#5y=$XBKhNJ7}tl#T-hP7~sEu*@?&{+Zl1%>sMl-=k}fGibI zn>tkI>K}&OZh{JT; z(q=^5_T28x1Go2_JyG8TB!{*;Z;?6kvl=a+^VgeBpabZJR*EE_*8^KntsGbs1})Dn zSP%;1(n&Z25XKDE>H~J!IGholdGSHtQJ4=N$$=o-G0Y0h3+vOWX!gs=kQAxMIO)uS zWKwB|)p)SqmmpBdk#~-vC&9)7zksF24!ZgV|7Rn*ge=#WALQ-h>99IhZT?co4094g z?+dxPhsaBq(F8`LY01MH%FF6LSWD|g7X2=~yvVQ{Xhw^RG zE_&P5^Je1JAH#Zo0J6!|9~a;YjdjD8`|XHyUXWl|@zyGuVyK?O>$jBp2n zcWeLhC+3fPB!A2TH^etp^Jy{<;aTuUyykLM@vWWE@mujkM&cLCntL0mu!Ck^{!{>Yo_&f5}_C9sI78Kxh}e0=62evXC7h8*Zj7E5Ui!q zw|m>K;rXraut&N|4KrvN+|V*TN0S5rmNQQXNPV(p&9nge^}_NZL$f{ZQ^Fo7nXSH? z4?3(wJCVSf@$jLyL7+|={<;`imWTqdyrLBJLjZ=fdeO?m-|O6*EV?$(VUwq=C2;di z$Li*#wr(THD1_&ym}VNTU&RFFNGTE$#U@nA9Az+@4iixszm4a|OgP-PrmwCeXi3+r zbM!c@Xzy$3Lq~k%*%UuS!wZHsV|+956I+X%1wVAsaxI4HlC9qr?Bdybj2!X=r$yQaxi=eYGi<4P^;mfB|n;VDjR!taza zP4Nr-x7f3+8x1DgiEmPh$0ILAnRMzgR%jC2TeNjIn=p4oUXKc7qTV>W!GWMe7b{tY zNO%1fT9+XlNjsFcW2>iGwA&EvnLH(Ar)5GMmu8A(rF2HH*7Y4p(lDQ@)o2}Hb^rly zPfflhwxE@y=zqGT?iL(~Ri~Rp7qv9YS++5=moaC2HgB0QljM%#j%d4tZ<&h_1MADS z=TMLk`q5dHL-`;_BRVyBqY^CFgj%5W^gf32KW@UQze@)s8z9Mm|73%hwc}(ec|Wq^ z397bepFn7(rW@ktAK4|fw(q*_v77KnihAD8C#%$~-3L1vj|8V-Z0VlojldRN13xak zX1J>b?!F0QzIFp0p7H_78itK;!rfLU(dt$-Dd2mfARCTT0zOf09VZ(M@u3FSL$@`D zkjf+|=*?pq-0UT`$==LJ>ZMMGaaw+SHuCK+vW~E-^9c#L=A}W`qB|THKA(%JCr?SQ zf+$KqEwetdEQMEDNBuLiD<=`;6uW$TC%BJg3&O7Z8M&<9f*s4mDolEVLis$zJ5X@W z00C2Pb7Et(QZHEZK>ZVN;(WuL&bWDQ>=puGvr&8~MQ?otgVj!XepqGew0J(uC143;KwfX zT}?2mKKy1lllk>Mv+&zZ1KCz@J*O$>bFzT2EP0nNf&i-0bTwF_uyPL21!}lXXeCFk zO-J$7-WfK~xM%vY;(}^H+!wQEuAlFdAZ~CEw7Secv2k~GGHvHL&KT5j`?wjD(-NVz z5)4zToNll)U2m|X!3Y=#Wup`3L#+Bn{;NT>HP2k6yyt0Zbhz=mtvdj>)24N77o6r3 ze}MlY&;Sdu!zvVJfKRB94F&HhUfjcr9ie!yds}31JSL#2w`rmO zofOQ2$1RTOu9mGi_3N3H!IgSDJwyjZCLMS-T30W^@AFVS?f4A2sAoC~A=y0q)*q8S zPS&_oMa#BYKqF@N2?N{F9z4WE%Cq|9%r5CSbLyv(B?2J$2as~5Sv3+PSFKD9hGg_) zC@#P0*HHaQH?=jb9ob*}=sj5fF@gSUD|B$;TS;X%&0gTQ;9~xThGm=*63dwW^@q>{G-$6W(Jlq~Ff)+ZH#25?3uF z&0VX8r!($0^vgRK57fMh8`B@tYg2_wib!^+iB zJ^DW>m@CH>^(q$317qw6_c zQQ7lxoRfNH{rilL308q>vCcQ&q632O);M!64rLC?298#TL8WuUAhsW8=Rb!b6sVKq zAHY0D(ZGx~f~jmdi~u6C-fpg}qaDwB>*EG0*t%Vc#vj1u$VSnw@6znWXuVa|d?z_a zp2u2ojs7$Knz584HQ3g@88^<2{QwYx!SkY-Ten#@V6eg=+rxxr(T>W$z#qcOR@|5qKf5<-S&m`pcSc?Rh{d{lCM!)H1BGlD^nMyfCgsH&|M5z^)FXxzZ zyJ9Pl8{WRkWPbVwtoGi>;^^K>l=ml(hw!cV0uYo0pth)na*clu=5ZZ-LZ>FPfP3ZL zyL?&#YgKn3P7k+e3FtCNj0=##WY(wh&4-n1wMObO7=HUIobfEBXUC;YEV$=-bBdvH zTZgmEd1}3iegqPnD4&Vj|1-q=TLC45sd11$<2p}^#EoYN#E`H2?ts!DE>Ma+BmNR# z&$8e$m~KvWF-YJHw!TV%D5Lhk-gh#df^*f%L`2%89v13U+ZPAoS6{mUH`9L0ZyPXC z3g6`g7a0-x%Gh)HY=BXd(Sx@d$C(w+^*?R`H3p_Kte?02iyaeE=8Q?d{h{0a`K$Qa zwyh|jID3>>Oz`24upnH6raJoqB`|;}OY)?oa%nuB!WYs2Nj?8t? ztP6283G-5pTER5yxk(>z3khwP2(av&+%2iy{N{>WK8vXwE0UM4`RcY%7x zCa8_b4F^6+m_@q98Ygoj9yM<=^nmzr~xG#Q_kL^mE=}}|mzcoCx0fFwPVCheS=%Hq45ZK(CV9~8XAsK=$P@D_? zSLG#KF#b1f%igIxSU(&N<%EL)>D8nxB9{M2;aPA}u zRga3lBOw7hf=r&iZb2u9b~r7(kJ{f44}VUMJ&lAc?oWKE{JRdx%6%Zojd4>9l_Fb@ zpHFy3iOnZ!kj~%L8{M4MVW z+sY$7urGo1x^D9x*fsE3t?T?&9t_(Q*F)Z^U@=&=Yq6gUWFE^dNyJd6h3s@1QhtU9 zZx!D|PRL_fw%RV-EOY{YUTp+RKK9AD1tIoDjrKY`fD3fHukvJV`0@K44eox2idzHF zf>(d>kE{y3`H7TgB8j!d0)IuVX6jGvzHzw-BY-XsZ|kNX*Z<( z>t-5j#7Z`4KT0qPE)L=*M^PLTAOj>hMW4~y1IZ_%UAj>`Ez2_MRK0^B+?&DTyn0GT zgX{p=d%6Vf%pp30HMXCr^%5*kL1qJv281Zb(GU?r1N}JTY}N-{M0MkifO}}j(n(MT zoS|hzBlF7hiB58IaS(1{!1U2w1O8qus#C|bTpL|rd(TCY#|+|3=Ems$<9OhxkRth6 z!lP5bDbbZQD=ev6KPrTw(F_zIT0;sEi3X7nFA$F|S+z+>Avq1Vvw~zLNnEuuxU>W8yAN6S$=d&f@#eT zyQ>%5c<_=CQNnO(}A0DFa zc-jknl7z9f5<4?cMr$ijdZnb;DikzCc}DnU6B}>|;zp5sU`bh=Jt9_!mo-oO28qkL zOT1j%R{@$Vz>p;nDp+A)-g$PZEth$BK-mfbp;Jx<)T4f0Xt1+p#s~3XCo{+?44jAv z1xcSenAKq)Du`M?dfsWAq^#*yNItADW4dIsaqjsfu3u>f=9CA|FV{#>pzPPeLUvfB zKxD2a=HXWEu9ue9i*2$?Xq)nL!#6?tBw;q!kaxQHy)HgBB}JNyj4vSqB+!J^m)DkF z=fS-(m&U^}&HIH=bxp=mpsrTtsnfSVNZI2)!XE3V0_f?Ox;P>v$Y4CMKGb})Us=~V4$ETBu9`#C>1LJpbS1ARh3VS$IQ6sLTI(-P#b!U`h=MdH)}*m4Z4se#sWG_$c889td3FzrL($g^&N@BHhHsMO0b$M&%uJeP9+2BdOP!hFh%^ zzroHhCq-&I`+R!#&kTl~PD!ELOi8pvmZ8+x2-P| zkh?7PR$(d9;+1|%$`+6ap%OX3y|AvVM)a%rnj=XJ#o0@j{^IFM^xrL?7C*O;gwYv1 zP3noq$aZq)x*p9ctp&M+&nZZnW@$Ah(0RfoP!Db~$54`}|A~hGw_{k~D^;dYw0xwp zbR2j5>N!FM>p1U`>p<=JL)u44e%uu`bc?BdG=8d zzu~@LS5atb9@vVZV6dntb)TVhTYw6#@SP&>j%WR)uYXIvH=4rIhf2{4^*M496|@H^ z^1)jJ;+z`mD9f((oeR!-Ne433-}ZlL#xL`*-M7zLX;i6@rlp!iC$}7vx!xes0{Yu?;-CKj8e9c|&>FCWD3R*?090I{a~^IY8rtdMFULCEH2z>VT$?Y$C$)jm3-iuZ8E%Geiz`NuGz`8St+MiXwk8V^Zv-EGFLR~~;+ z06einl66T6RiZmc{*SJ8et&_Q3|5Vk0mFpE6eKLImY?}5HUS;wZu}^2YCfelv;&1I_%kY|hhLv5o zUWb1>W2lOq%3Y2H+WO>G-#=uPRd}jCKQ9gt=x#Sq)T>U9(!_@Iz^0xZ0rYFl)3H)o z_B6_9u@9e*l;?qP2YP?hg=j)T9TMpAkU(0oJveMeui+CyzyktY?Nl2bOKR<91kizN zD-w;v8#cTE`OzNGIs`*fWlE~AFs1|rrH-u#QDXXHPwn#cO=E$~UPt_rC((17LBtwP z)*H<|p*<#CS@r_Ss%rA-_V{H>OL*nFbTQ?SCES`zr8<_ky`a$kfhd5od>yfPG)(&Y zs^g!=!ODJL9rjHh=-jOZi?0jI+2vLXSP9T6`Hm$HEs?x9Bo*mSk-%5c?eaIIlezx0 zx2qW7s?b*HbbwKuQpFXdr$fcM-@b%e?de{b{^Bvg?(UxosFgF*_r+0ss5w!iCjEYW zUws?1uGv)t+=*q`{}0Ig_j##7;h)wy4jRqqd!TA_;=I}^(ITAUZ{+}klVf{7Q6%6S z?Evuda)8&H0?~|ol4{S)Rn6L+0f|A3&nxLjxGewe=dX=^|gvNJ(fx(4h8r6ZXeT*O0aA;hrn4azC{tM7mGX!_Lu#V@-X6FG8$zNoj+P2;qM6gIuM zAu3J0bb^h;&b3iGrkSv5eAto~JPj~;6UpoV>rtXv@t0GWm5cf+3?dP8{l33Ewr9a3 zj>0S%z<{O7E2+ePjANxKKmyX^EpR`FjB~EKEu-0V4w-V)+UTawqY#~il6&K1MUO_j z=eKKYAZ4uWn1+s|!#=H4f?8{TaMC!GJi^Nx&qM9sZ%3JGUq(iVMJ%;T*znKHIR z0{wC3h$0oE(wykrT*XCh?`7^YL3YK7Yikd>RUx#swhq-vloO`nGf4?(XZ&vVew@BO zGbzOd#|7Ct&*xvv9fG&VW1DzdibW*TbqF91r*D4>-DaZ9Swjc%@)w0`#8!g{(_qLg z!h`1!tf@f?ZB>&TaDdD!BVcd>|Kaxr@w65N^;P1n7PG;cep*7eNe8GTsfjXNvkI=s zvc>j;o(pyA4za3p2gMTuIuP}V8(cjJ=|G$JXn?B^XL>Tx$sGV{6f87!z&3#HyX@b5fVF`i8a{723jwNvz9SV(fWo11OEa$ThfUk~(`2jvV&fxbgN0-Aj%kx#-BbIt_gi?Zl@E6@Qi#9;2LA@u35_3+|m zwT*`XaY%^XmTk>kX}e~FHR$PBF)0OCQy6cZ&wygo1tL}1vSw-H?qx5CHn%cd0zDN9 z6ApO@wB>-*$y`t4O>v0eE)u9RBTWgP1aHj69y!GsDE3e7V=V&ZrtZ~n_js25-e%g^ zvt`ZQRGy}ST#Sc7!nd%5^$2-DmmHX!y)BQoSh2RI{?ny2G6U%It#ke52I9%-3A4xE z=;H>2YU?7*_0e$Nhd`ZYS@ghG(+=<7M#Z3Pf-;$W<7yXR`iMy|H0(FyXhBt1Si%vsb@fouOU9WS2&!{Px$X@j&eYJ0u4nv(c?Y81rpI%+u0-=^Jj3 z+!wN!4m4h~Kzmq73Fruot!f6sikm{l7e8_eDhe=OsRQZWIk>FKfY}3sUYWT2m>W&C z%;C1?cAIusFRxAj)S{@?8v3)sBg+U~Cl0t9UvxK7>o)%_--OJMUhGwUJ$J*5|K|6B zA6$-jD*pC*bmOgbB~rWcktOH11C5&n7AWZ64gG~1`gt8t)*4tbw=9FYqKEnbw9~un10#DmV&>bWv z$%!x*{Dy?OXrA^Le9%Ske*js=@5Jnzt*vxm0)aE!^4da)gu(hR<`s9(a`Jy?dVXNW z>f$lr4H|7bYGav!zr7Ow0Nxf$Y-V$cjNIYJT`fB%CEGrl?XX&e?3{=~^pa>N0i%S?~WAS0EQGAQ3M$AIj@#!KI}Bs|FIaqdg#raj1bQa;675XtYR+08#| z13v4cxxEPO6`q~#GdG zUZ68mKS=WWJ>xhB(1dGjCqD^9498Z
    MpNcGZJVMAp`Zf7sNmQP^$&IuOY{P=2V# zy14+$E$Tc(b@u=ltotN#K0&;h3BM;u7}<#1wR{BJ6P$G5!V)OxoiQW@6$h`b%`PER z?Y9x~o6UgBfx7ncp4z~X#{%`8Cqa%G6~s)9O%7*0|nYt$^r;Z zzqZa9B{a=M!G831gtUKjV}eVzrpXf$-&MvYLv_zdAJ@x2C|lF|eQOJpA8xa3QR~QX zNfWjL2>}YLGbn^ARjnf$EN~Qu+On6)tUzB|8()%|&0bkfiGfO?YrP})W5^X@fgKHxN-K7K%?K5T2I ztfxn>{_aHT{ZkIgDCo9}=yU-%C4f^64uq4580shvwoi;{-zS{}ZLna92{JLDe!(=R zCG=KGwhB)fv4EfO6Ei8U-$+c>BFfMBPIjzQ$~scQS+6^df05<*f%3XbGH(6HUhb-%d#%qHBv5PaDt^-%g3q49NPo zQ*{|LO5{KfykMv_U7QLQOHL;V1~G;QMf-&sl^xbej55;?h{TSWo`Y`X-Su43{x*AJB{N`a!B}}9DCpW*WmoDPxCBfCD*P4s5ZOEB(*yBMn<7UAGSJ=flmp&5S{@YL>8DCw>fL{~i;0gMH!#0*9S6 zUb!u;76i0QbN$iSDPeCRoq)3gsXSnu^1Md6AKVEw=0@>rTR^L1y&I_8%RT#kZC_{x zoPU9$FBL$u_0>TqCP=*`49bQB*1S}oaYCJa_ToLSQGTU=((dEQsw zoQQ>NH_V_WwsiP5P*p{K2+iY|hubY1VY+5GkfMzxFmW7&Tq~?NVxJ1gT*{WrWWf_{9rGQpuO<7v%*s3|z)I7I+zMt=NJiq66%+Peq z9}OeCulu^L^L3v078^MkmcCXP7#~MK16Gs|x8dQpw!UnI^xQ+TIY?Tg2n@u$-wtJ0TmU%0e7avD@k2wWq$|yw&(*^uh6m9Ip1mzNJ+Le<>N9uSn zYIPi}&tAvr1O(LXd&c)%j%w5w)LnctFM1H(GIyit-1Jr@bam~JxkozbEvB&r5@lf= zN{s=vQFoFXPo_LxC4>+RtWA5#em4mU&hq`8U0OyjEy_kp5w?Y>&ZbFm*^NBfmr%H% z5pzle12hK-E0Z~*aZ}+k3k#W{bgHG3uW=C%RRc#pUvGZ>8vSYPAt%WvY1 z^HQbD6X_7&qjMt6t`1z(mZH5#uX=aWLA6AbbL1)O>(0~^6Whal4d|)nI@5DH;0V06020Ef>qN;8~o4#A4v6FjR3j$_H`t)0+!AR{7}lHLT#|S!8y4k?z-00h5^kEhK<4j z;|qdMgnP4X3J>1BqK?0R^d$0}eZbOE&~PpV9e1Y;ww%MUCBdXo!c+m5mCReZ7i~sfcN+mD&|0jL0ygfdzf=R>DRfliz4voghAxBcnD~`zHbMw z|7pI!QKFLXU?CK_Tq{x>Y>d#cV?x!r%!7>-a$@PyZ;2;XzYsNIpz4(aYb!r2&Q2SA z&B6D68W256f~OzC238>;Bk9Jo$Tb@)dz(IM*P}>J15r82oMNNuFXaEc^QKEbKl)7% z)3^u6*$#`R_J3cLE$?}I-EK|sRo|n0+SaCqJeNX2Y=@i8;S}oO&kugGf`7f_Yn%p6 znBT8Yz>Edt!0EU>e2ql{fOpk_3PwyIcR$BjnCu?>$ug;*_q!c1-?ZTO4wk>It;sFx zvCv)h@?PY!jPKM-^N8-wo-C{-C5LQzUsM`wBt8#gTlz6kTflI+_!UDYKWn7nFGD-6 zs+YTlAvhgVb5XF%fJcbhiNBoeS*n2D*u$qp$vGQVZXyggGPUVRmMkVK#s|}s#)6I4 z7FY3?sCJ9%nf1x*#>cDw+^--&d~aN(ki2?jK5aqR!8b;3v5o#Y;r!5&fbO9ewT-O7 zNC%z?2eNa!w3FTXkl?81M1WZ(uF00(b)E^ktL0&CJ43&6u@V3PFIDNaaaTDE>KRu?shao$Akb0WVV3=@XidtZlkWtlm4Km zakl~LREoOsc!U>kCNZ-0r0sT+C$DRT?zG}X(RQ7thl3(%w49fKR0k}q2akCxjtn30 zNq`x(%M%4p2?;V46M#WXLlK<`NV}onKzk@Sy}((_i6*KrQGbhgzelq5{|~@houkbduvJq~%HlGDgC;1P%KwlygLA{9Z(Z(}6+NCE?pz z&(<(=*|vxsJjVUTTMStAaJw@=sd!ahFwmQw%mrOdrtb9l(507#yNWt zP_$y|c(?#ca$Y42&Y}b%AZtU7vpu{3EOG{H(KbA1ECswl_1!AKv}F(+zk~_}stp0* zU!YBMts3dI^(Zb0P=a7n$N(d`S+J7I5dwS$L+!Tmp}13)_Mx5;>~vHX8`dR3CV}Xy zS(ns%X*ULcGbypk;ssm~*<%c$v2li8)mBxvbKBEHL}h{d_JZ+d^UCDi9kg(KtZ^3g zW35(R8)3zB0bB;<=8?W_*S=P+@lpPbns7k z7Q1e)voF1ZB)=$Q85z;Y!1SVBqu9Jv5e%zEwZ6g{M;y(IL<<{Uh0Hha>HKd$ z^?&`Yo5@-xwNQ^w?VH3$~MI8{)1k+jF*ABF;YB()s@+r^nuf5k_z)>#%uStcxodg<;n_ibN7CUtaxx{&dciQ+AU(hd11ScF`(5Xy)<1Gj4Lg4zuX@hc zSR4}900=a1K=tKbz9zE>L}>J^!U*0ZvW>vRI4nIti&{T2eC+@kdSQ>-wznOdkDh6I z>^GIOq17R%pZBQwYvaU#wx&QGQ!NX|SqfBAaq2<@R1<6l&3~&ZA=o99PB0}ZrFNYR zm4FnpcWatl!d5#A($Z^biHvftrz``gxDixmU^0-z{lqv!ydKij=G~H3;~9RBGfA zH0od*0^Vncxj~3V!?AC{zy8W2wBGG5KMwEB-60INel}7C?;Hpwz}oYKfJr#T50JLX z7e}OWln2HB4j(eabDklLHA?yFi4{C%We|Ql_bXpBh0)W3-CFEa&M?}MrYou#@E@eH z>|yE7e>sOb_H?*ZU^=6DC|3ZJB0!y+ERq8+g%{=@N@NRxqZ-5srB*ireglQxMMe&& zSg9i|SWH0f)L@JYO(}NN{T`&*|#RftH&Sg7CLPxzI_iEh>Gbu z<3{<6YEYsKZL_CdaX;c*)ymnnP6bBvb3>Qrkxq%*3QoUW)}=iNH@EL>PKZl>BH`&j z8C3NY+!5N}t@)w_*~YXVW9}n54LH6IDFdbLMzI~iuu&BNhyX)OWNY&R!W!5{96;}? zFvKLRPQyt=EKpLx4iPMyTYqaSOR$Jn;WWO+eHr@VHtzXQzUGk^4W}p1%+RE$!nPlm z{R_xAG>|De)`}=tbWkQ6$^`?frPu)+{A24LxB@FJ{d`zYc{#P?@F^d_uk4b zvP{nRL+?K{YvE|Com1?li)N{9JO&^np7(g&2B@Y=i$lE6W#>1FfcAGvji*f>#hrBV z+djP)nC^_YDld_Wjx89Efh7RwMP6Bh`;j)QugM^coCwxZGz$R7VBrQjEEp2`fJwHd4>b2KI4DLs@t*NMi7R5(QCV*L|KEX@evRQ6)IMsx3d=;_$nMEUt0a)W>t zOBTNfq#VxP|8We$cz^CuNLv;NkX5N*xGeBV$wpmV_YV)dR?AEHWqJ7@H)!vm5Bygl zx5im6P?bbWJzvU(m$5asCwo{`Ic427CaKaV??oi6LAQGgwcVzqGR8>-snatiP|Id+ zIoy19R1W%FD(b>2j3P{cVFNV?lqWqpK4KB3c@<-GUTwu=yzN$kAj|tQ`Z%(M32x0KSlH|;xd0X^6U%kF9 z@oCyW!`tT1u-do2Xw_)Nik6~)QT|@10JB(P!B-xNXnfrO&_ygV~^YM zn945f(rZoUSY4A%7wmG%wRg3Y~+J5Q;9=lmo?!%L8eU!Bu zqushyJ&Rd+KG(-2r)DhYZeCD;zsR|uoC{Px6cfdOfd9(^kYIuUB*f!MHsV#uTyEW!(k1zr znjjXTwcRQQl4v$ft6o6D5l}|3s9raV=1sh}hrgs6bDMtlveOJjGtCPD=|z2&?>fwi zFVe%u$5cUxn4~ov9z;u^?!MkF$JgF_t4FW#lt6_JSdF)Kr|`^FQ1j|I?t>}};V3hM2T5`W zwzd;*iJ@M^K*^j7R@>%o(Fd*CZT3i)2O%A0(glpgMTvBB+mEicya$s?qZv<`MIotImn0X{UQb~NQ8RuG5P2zN_X9(>xg_Y8A^ zV26nw2q`0xLYyu-2I1)i++|^}V(F;nFi~~kn|G+Xrl2X(S3&22D^L0lM4)^}iu=bM zZnf2f!g8I%R5~!VgAnw@rR3CE+VZxR*H2FQ5g<|jou>agl#y_jo<=~;9*JqRt9ctV zL%KOsR?-S}N=PoK+x(+Kscd%l?8USp`}a^yYIS;q7^hVh^7eiVH83<|z~hmMqzRUV zzB;PDJpy_kNaK+p8AFEcn#2GWt{rH@+{7aCpz3kCzznrQ=$%t`x+4ssSdiW=%`L4Z z4{s`ZLv-n$m=}!s?KC^qJ6W;HjG>=h_=uj2BTZJ=9oLp?`!V`rz^bQT28X`J0l?Cz@vj$iCVN%RV>UzXjwLo22t5oVk--(bHEKtB z^{w@+tdT=Z#)TE3y)!|zZDCwe3?{Z6C~iMRYqRG5vDx|%YA16FUq67!&5*&Pd7lv5pO z?_>tpsAI@PXIButq{cQQ2n84j`8*vD5(4%c?TWa$vkh|giQ1IwlZdJben7EpV4}ZL zN!V-n0lPevU?p{XH%oMT24pj_%;{3eeASNh7uB0V4?3`-GT4GfD-BFz!GGMX{7zJz zeCr1fR9u^)$s8c4%12p=;Vp|tB0UKKX-x!=YV;7`sUav-DqzC!s8ld3+KU0y@cUd` z%raR#rlCQ2{C-mkczlIInITXT>o{p=095pVa)ozEvb;c;Vu|&a>fzyFL95Q%kyXwE zu_$tma~NN3L5xc9qHuO+0e$`R`*0gYxC%zmPri!BMQzyMXqs3$hZsLZhZHGT{_>g< zdwnB5k+Jypz(-uVXfH|v?9R%62Pdn9NCRa<=@O3frcKzwK7f|gv(VseGx)KTJAP>E zFv3kTt47H+Hhi^iKBqJ9|% zEL(TSCz=SQ%pCWBz6!>40RptX1wkunD5B(}6k?!006K%mi z(RQBE@>me!y9I#7l(C6pA_~N=oI|vS-P7#o1U5*f`HIYDE?|BBQ(NL@VU7RYimI^ID?9b_Gx-#Cng?autvT8M9 z-8<18UCEXz8G-sNt^ayyZwf=ahCeUfO}W!>*RnzML@W&KzrwcN#+_c|?_69Lq)fdR zLc|dVYWr-&w6KSzw!`BEhTF1GYyqf4J5bA zW;?a7A|9=CtZ8=NS&)L*gN$~wtm({kL9vPjMSt4uM>7A68E~>`C|%0X*6ifNC5c8w zO65`YJgm{Q3UoGXv~{6S+DBdQ^;#I5=apWf9Cr;L) z;26Oi*>0$rQzEdq1)F{$%555SWDGOP-gm;q*v=q z7K>Ya>y?>xUspl`CCm@gSCtsS&Fvrd^?VO48Lfu4ms{V*;qWqN|W%cl>77tb}M7wkvvVavgK z=Qce-Ww9&&Ju{LmdPOmtnvt_S7|BaU;p%nqb^F}aE0 zEjbc`7QaO5m{)e_TLeb}Wb+qz)EDc|ANAboDmGPMkKZ`Ta;1J|?$gIEpE~ju#0ZdX z*4sn>+Arzt!KZ6$F%giO(US*=5OMy1icz~F&vZ_lWL4=EV-MRi211@DB86TnR5-Ri zARW~3QtdT2q}MgfXbhsEydDUkjhGghk8ooc13FaHjs)WC(5Xv45xJJXydgB9_z2<# zIqK8uRlXWlx^dfVaMVgTxGvumtt#2@cRvbzrJtZGLLAh;BWsWR^zDp#Dp zFBlGCbvv;SP~QC?g%+H<@F3K^#M8Wj?Ue%-uJ0O(XodETh*k|e-5ETALG%kiZxWaG*)#FNtRE!L2GG@jW0+kj=UHtKa*83}@p|4~r)BLzTn^NMn8ONKVSP7!$>HGC*5F= zgU`cVzT&G;r(W6VKE#PWjo+4yg50Pv%OZcMP%47tCE4~%XFtq@bo^wdyx@+V#;ZBL z1#RuG)`Ks&;BUyEH6o_F^5`mlBR z1CA@(!NU6NXz8W++Efy}!FH|1c#aA3WJcJ(5Z17TKIb8z<>OqX2gt^$FNyFQu0IC- zc5X=hacQUHlX2`$a&jEzh(~5>=Xhzd`a16YG-9mij!H#v+?}y&Gqla$=A0|of)lVjFVGJ~+uSZvNe>Jcdgkf^+p=n}pjS-E!M`#M~<{D2(F zUPc@%Yt_0ml|MsGAg{1vucozAclElFTe^z5PnIR9`;&cQy^m472Zk+q{je z)NhWn5}^+l4yx`@v7y>~c#4BJ6!YZ)wG)BNoYl>)DfXYANu{ZlGcpss^k9vDZc{e3 zsdudJH=D}f&tjMQu$|E7Sj?kta3^hf;-bPe1Vkl;43ZbHMA11Ey@u z9LNvDCpl@QDVY6O-QW7_VmedWVo{w_8nF({W7v62(Ofemv9b{iED-{+KIfqGw@Xb~ zie(wLSFdJXzM3;b4-wz>+Qn-<6G!AY!F~3o<3S|TCCLj}#HFj&kho7xl&I_JD<7#J+Z~1r&>T~0+ zg|p*n%rgZ{Fi)&#bFUjM9Rxm+9a)((gb$(W@-nQ@4+YghkU}*h-z%6i3rsE>J*v`5 z!`~`8gVV;Y7!g2&b}2mM$pYuuywT=LuW`Wu0;*3&yBirw`zk^vc(Ldt%=V_Yl^=qt z{21+h`!O-Y<;^pM)wrHmH$&-)7Tk4VVfmQ$V2GoRq1(FNaMm9nRH?M@Z`M|k_=nE| z=ELQZNw)ocXOg~bOT9(4jGD~tEF84QXvO^Al$Q?q1Na6g+l%>q5GfqkgP!eEQV#F? zIkfpp<^4qYNR`#Em!fYeD_=F%dTDDpzL{fekqIw4l>91)0MU&ykBd>k3KAhR$_>+x)AOP)PQngo>O{ z?a!$)z%czJ1Ywuc3Y>X6_egw{brEcbr>5&i+IH3lqSJJuOEO*57)FvT+GbsH?bc6s znq(&e8^#lo=RAtLF@*6K6XPb`k0`N>x&avN& z1E^n1$SNTBp08d4vbghQ5&i?4xu1##eHal99J=P>v957~4m?_THAE{Yb|6a;`rGf0 zYN!QixckIfJ|wKJZ;SJF>g_uu4?8vleJHm9Cf0nxw)V0*tpS~mre1^?mo8JIx1?MB z)!96JDk~Gl289y1(&s%vG_1-oFcbam`h&n%M* zVA7)*o>P$!nT_)agEp_gVOSku6))C|I%%ERyqAu%J=XBD?;CQv-unUE1g}yJAr)C^ zbD&1zZvi7G*67Yble{U@o{w1NIb~O0G!*<$lA$b=+EQiPvY%bpqUo!~1;>*p&4WKj z)ktWsuPqu$uc@j<6;zX-+rN)o%Dywl-=NWGCOz2t$UnTHx?9zMP-|_wRZeWMeJ3pv$V~Hx~@|p?Af6#()G3Pm0k!k zY%k+-r)-VT-Ff`f3Z1apU+)wu>&`y?7lNcBV+ZSin1o?f4)%ft5mmgqcG+k(Vlt<} z+HYEIhJH1Qk%K(>Cx%TbQ?#*qEXtSFx1Sw;NnE+z^BwSs8NUiZeV3Pq)A^UL{~WD! zR6CR!9AOum<}5GEqMizhZVAzHe8GTSML=}6*aNrb-6mo=a70l51|hvWwiUMi{Tk{8 zT-VJ%^c1;Drh3I8 zryasuG_<rxnTVHKMR^Fa1gH=OtoD(z@H({MJ1KCP9+SeE6!f6$QTkJ zKSeJ4JlSXlT%`Oj_%6gKq^eZ@wV@OYy_^_7Z$(W@UcFC|-%Gs#|!zmXcT zX8q%3&ydNtbT3Aa}Bl2=>^M2XUjV(Z9yIPry}ee z<2$$2QV+z>wYvq;SmYqJZX#$|e+8_Pk zzN_nxNB#{o9@IB7kED83+MEf-j>zS&uL(aJh{5g$TJukQ7G-4)#p&m(rqAjA*;>O{ zk3JYTMmp#kDqIsqi~(1`V=k`T1@dGVd-Zd3IMEON*8>wIL0WWVXcJZhK{14&t1-LXQK8gRXM-g%Uz4lzfKozx!>mi--Ds5sEJf&XOmvyYxz%oi(mO2x1EZM-KPy1=qR8x!aY4+fZ>*@Nh(VWo&hY5^ zHfR6fbL}%>V}49Xjq?j$iKjd(v-EUz~Dpaf|!r}rzVo&FTvN^{h6 zskZ-!5TNczpd2g#hB0%7V7zom1+jmDKv?|68S(f7Wpajeo}R8SLfQ*}RvV)rtYt9; zzkTxL@p2Hb4hLXRu(&M3<{g?pL-9QYi7izalymohB=BG^_cZ~mJX#RuV47Oo{v&{K z4h}>C2-0#YRQ#2%I=dJLhx*alm-?eMPr}!vrBjrfKu{o}z=v10PM38noxD1ZryBjJ zC4Q^>lrH+&fGp9ge;Gj|2|xhaAZ>vAG8L;AC$lPHb`S`~4Q%(w_a}tjQhy))0wY

    s)X@XF=HS^{GmGHs7*3*O$^ph=J=ZS2;72jx?^6Px_qB8p}Tqf z4?5w}zmTls`N!eaMU;TRMs3`NgEAwBk%njW^C4S}yN$P=e7yaKz&Lw{h?J)yKYzn9 z&09aDSu5p=gdXNf#5@par!Zxgpi}ra21>n!CLE?`CPc*uC}l&TWzHdh|A6FiF4Uoo ztJZO~1Ai!6ix;E}h@|GdaaCiAJMgoAh-~=LwgsGz4-Tr}IwgP3=#8|UinLPhmsXYbY&^trG)2da6y|6%uLisk z9U;Czkt2%UlSt4C4wrq1e#RjC963ud8Jog7rD}ZX&lFHjzZ?tI??xY6>4A(}dE+@z zj~-yy1E7;hkYpOyE(3Oedz*7UzBDgr>r}~!g0+p8=|kgs5$ruh-$olw8_b3p=e%!J zFJax-=Jhf)c*z4=rW@`3h9URkN zVJ_@-{zS~fr;}`bp^6Z@$%CF^B(QFCGKLeZRg7~+8{Uw2a3D$e^6SM74PsqxXJ*-> zEK;sZIA#pu>MwwO)bWb_0<~^)Y3X#*fso@9rcsp=tRG7-lye^d$*&0F7N_O8oG*=2UMB!Q*pVi%x8-O!XT=LV0XrIJ`$euo* z;l6G%v-Wc>x9D0}3QKl;`_pV(5GMTg#5SMUv-D2N+tl8v*+?%(@>|=!KM?sHD3o*b zTl;wB*C#H;-FyHBUVZrG#yRE@whOZ%^y5E26!mV5&o*zT=+bjy>dw`yxUrB)FY7ki zz_tUCs@ylmq9Jd0!aTB2e|g#+skod@*!LP%#(Wp) z$BL>wb6>)W`aZmrC4FtjNEtk5I+MKK~ zxp`R8@30TJ8b=4>4jvA@f0bus2yWYh-6@TnFqA7cdK(IKbW9NB1-FBPoVIF#47df7 z5Ptz5r$Oym*DqGY!ql**5h1Ch zRDTjJt*gW6&@}cKt(W^3oR zx%bM^6d*WxS7s@+#sN#7_J*u(mkSjhXLfUw zrQ2{chcX%2Dlnn{L+R@_n-E@3$riUn7h0y^-f>g(u$oB?(z?Hl6MP*=$;FL`!fULW zewQ@#m{U_)CMG(5+KEq*H2lA%pZccsa$H+DsuLM<{Yyx(o=ryc(%$x%;dO0^2#fau z)k{uOyx99t@Dpyh*ei%C1uN5<9HA%D@y@1JQjFDoe%YvS6s*iHAVr>}8BUW~Gd`*- zs6FHCYZ>c`xAldZhdT)wm0NFB*5;){d`6n>qwW7f1ylYGjLD*GK15chXCs$;^##ZO z+13wRx{$2K^@l28qMkX-$8$b3uu#0ox^oHV9j{{~m7ZS+mscB3L%c8@U-15O4W2Io zTFEiaA2Sgjz9pg~ua+H9CS*tuoz)v>4STZk$IH~{t$f2Ga`d^l@8k>7_Y*J=BHHum z=0R@$FZaGRW57C)9#vVff=&C*AhAVM@7l&N{cYs05CT^z&b`^~jwLe7H zNUTbcpJeDd!Ag8>Q0GA4YqjcSn%bxBXUNsmb9yg6&@90vP)c<4#C7V$C;bN-lhkV)-Q=9Ie=T_VA2JS8Y|rprCWG15rBsGFS2bT%cOHhMq+@%$CL4vTFx*a8jtc{9;cL~Ht#dQj z<=a)sAtSYylcFg`SHTjRc)010o^QkKTW!coW*nWU1oTecmdl(i7d;V)BeJ&xecIhlg8(UByzHv}>P5}a#vH*0-ZRK`yvq$ly1>h~C-M3nk1`v)* zMi#Sdanr$NRq>o~2=mWKNi@jql6>X@@~wh_)A zVH{&&D!;zeUhJB{gwh50?P7^YwY6O|Uat$c)Lw;npx~Y2s5vtA@Oy!=dnu3P&!<2M zwqV`nNj9bj^3}jT4*Ku#Ljmwn0$J|CF2t#Zno#(FEn;E@80zt@w9RQ~7yyAm^-__# zVujsINVTAm=dFPHOQEVUN@^<<(S*FXiE}bqKnp=4 z@X3k@&0Vql!i1)$rbMV=pWIWrB42O_2ca$c&Yb2O@fKeW z_`kek+_&e@rZXP~BjFIl1i1%R69L#QD)RYJ%|FU8ckiMtZF=f@t~v5QR{b zY^pe6jR@4r)lu14OAGl$ns45Ve=~=$NbnyKIbtR1^i<3}j}E^uZwjivm%rrG8;8&% z?*&+`oUmuDa{Oz%0Z-%R_A|bTCw#xt(wr_kl4!UdNwd~;mlxu%yIqn-YvrLmr_39_ zG(3o~s9qk^I&yp;{$S{fR2|N>Yq)j(0#dNXdbg-f6_`jJlY~giFe^0X?vZ9VSQce? z3h&?TvKM}S)M7~ve!_SYds%A(cA^CBwYBke2WiKu7A-r@>9mac zdE;5Hs9cTN$mMTN-yR?#Ug2_@d4&N#6?lJjwaWqe^rg3}k9d&C=3^xFfaSZtL$QIr5+fc2>U>x53i#k`88*P4NP!ys_9_uO(zL z=%nXmVg+=LM(n?ln;szvO@cf9oX(D@A{6Op<9KNCXlX{{Z0dI+{CdEk(&`lfbxOmt z;bp{jocK>x0O3l?rI-alPSyK$6FaWr71?J6iI?kA?}%JxFWUJUZ^$aiwFknY)eOO$u&ul&Fu?f z(_Im5bYr`#7&CMdK zd8OaX-syk;Sq=%71|bbtJKs^uiEg%z0xU{?j!hLrIdu=HK`yoY&zmTd39_O3@SU9z;f*G}(qRfOM>c^L8`E9n|m zCHB3*O%1>~b|Y^a&eqcyi~Us5cZfzksjk*)k2XzWwXJ*8{uN|S`5+eZ8-3^I1+ty# zF4iL@$`(3>roRbR>=wjc&MKD3VWmNS@POtp$MYtS(wDC`O6K=jboUWN^#8UYyFf( zF#(HYt-O8+xZ^BF+gfa;DPt!wJ1{R=Uu{*px7++z9TSPKXMUTkHC)-T`C;AsM!W~6 z@kbl&BCl=b^ZQfOKR@s$S=|vzJqTIN>|L^Q3O@<<)&Yxo6-~N55{k!sW zgT0}|8LReXX49lciwEGtCIS5WbBv5#np*BP|+!jCHS}4w?wf7Ga6yUx4*MaS>U@} zp;AOWpou9=Et@4<@^L%z`{QZNd+tD`A(Le#x;Q7hF(SBH?Oxt(By?30#&i(g=|8oJ zwxXUzOTVEylw#LV)2@;iCW40Ueg3C#9Vb+aZ$CRgN2-4UArsCPd-?DtkF8DfAUQrcT1M|ww{%dn0Qo>>N7U&z&<&CLYSN3~JZ0{So zxiq7CC5dNL$E)sYnwT-4^;@bc47|+J8Zr6!T#Y-3`xia;J{u{V7)4o}7zpa2-9aYW z!rY2G-+mudbx8S<(|l!|`3M3x+ai}luc~@=AfsjC?w0NqC%DF^@cr0je>JKbzz$W` zCXC90-@CJxeBA$Cysf#FP_;aqXQ`7k|fz7m(X!#2@>?HGMX;gOl9vXJIH|ADqqXWjkB zpIY7}HRix{hxJ`+-eDto50&coi{AzR^DpElt)0ed{VH`q`27nBpv5jzPAq5-=f|>E z6?RXUfc(w1NV|rU{jm+0kLz|RbRd}F8m9JcS%ljfe7IFLed730mv*tev!mX6Jht7- zj}iBTLUN=2C_h%>5BUTlD6Ma2HE9m`y&2VIq%IRDq}>$rVt$C~EFbE4{LKpOrVzY= z!|?D+$z!2kSR``k4<(y2QaTvEICCrtfx1XbaJ5l5hJB&~C4g}iQ2r1vAOx!`qi9bIak;=b-E;k_ zvl-Zev(!ZQRXNm!rlPa`M`|~-8D`9__=}Wh9RHE)VR9LWO_a9C|L45<k9f=wFk(((CLi2KC$D9Sg(_B<2C!*@$$?c&7r;bnY?Fu>v+1@i zM}@E*|2`HNW(bliqf^Pc2Oc|Ls1|~icvro>k(no1p96Q+6|3a&@H!TD>zxKR&_T`x zA$0&^H?89WlK04;LI8*bqG?cUzPy&KMJkztkmTIg649XQog9;>c;mP&?%lKe7USsMDoqX(Q8a;@h>^N zmvj73$y>z0Ke4sX(JnX@>kr|{+`a>QM5@$9+;G)LIcfA{o5fOKT1w%=P05LQ1Q6W_ zavxNWhx?(9hhrcrGL)|}who(b2kI6`Ctn;}ARvR66ClqiT13pWxO*hhP9B6yYdL*` zGDEB2-L`~b5KuyaE~?Gu=%kw&Ol5)KM0xZaa|hd-kKv^>u=RtCFqg`%)Z8C$EUn{H z^mTv?QLxGQUx@xcXckM`UK|;ZEW1f7ZRC*CLqe#;R`6LZ=huqwXBiJJ4a{^Hxyb5U z?euV`@x|!`(q6YCDbNzsi)i!Xn*;64rp^|r)4RI^Z#`N6lwYX6we9Gm7O%80SKQI` zolfVi5f4uOsMYFC>PzKa-0iwLW=>Rk>Tut-^Syk*esIbBQ|`H6owT)Al3t!;=;Pl% z*6Bbu6d$jjY^A@^ziP^RSaxZQ!kK0wT!2&H($U-?d;k`6(-=Zb#T$YcgD%AOAQ%XiZRe}WI%sx_%u=g;~AG;0j61kg{RP0!7%)~Yq;>?8?UCeSjXiB z)QE;l3JqyiE%h>gTcusTH)!HOTAWDr;(RlJsvr}G*w->|?U_wBiDOH&X5?W*;JpQ2WaJ!)xH`UHBSpu*G>>t-_X1iNw_F^pShl1Wrri%n4DmVuT{U~ z?N1(VD_T%qh>I+0_t~=k=maq-0kWP>^}_}KJ*p=RZ4~ybXemrV=x6}}m1Q*td8JRk+VEH2?J-N~&8Lg@N`o!H_ z^e$$n`xc2=H>*Rwd_&iT7L&w?BQW(oXWB&9YL!lz>#9jyqI*F5iJQy^Y3add*F7@) z10Nu-E(j|RFYqsKJbm{{ptgR=kD$*@?w$YiTtQF1dU3Jl9Y)J3o##l4KXJ&V|Ig9l zE_rd+1?ln2?GN%+Z4X_ADL>b%_1!pm=Wy8cJLRcAOpW6?hRf-Rtl($4I-%o&+{c(T zjY|w}XT1H~oN%q_>%R~i?197HT+_#HEoXWhdJ=g(u&N@JksQ;ne7#yj-NqKG8Jxw! z!m1a3Rh|MaD&T@xp?W|W>WshXx2A)sT{v8mlG!wz{HI9>khBzIl}#{KgfEqr_!4#b zbhTEhp!#4xev7_e;kLX%Vh=B1b2SJC28RgGvX;ZODp8i|RTc+(V%W!CM@-#my0hS3zK=LA(4`!QMO1T~{7F zL^GTT&XSk3jZ{{aD9@fH5I`2d&OQA2-scE!sG3_kS&_o$ZP1tI<}$&B z*#yfee{k)KVHdWclmbi{)sdMtQMZNMby~5U=<9R6be^6X?3a$1g$H%V?!X4s0Ih|J^?OH zg#O(_*O(K%$0V|C11-Kp1SCo)Yfa=88~f1s2iJJ6#5L^s8E_4U)tSZu;i{Dzh|DmJ zE^W>Uj=hUT`9t?f*Q=w=*}SM{HmTFAgD@|(}7c=`z0)8DgJaOVM8Qm2*cnVSPCxtIt31iBF^Fs;VVZ=l)79|sRl}! z3w}z#+Mp_I1cOGiyl%8i4Y!_&VHZ+>#zbTm((&7rT!vmicqs_1bPj8OMWt>+ExBxlvdva%90NCkJp?Y4^?MW% zwTEUkcIl>;JZSAf-^8Q>ptf#!SSD&IL>b0(*3DUqpc>uHmqQx!O<>C~nkroJ1!3ru>aO5d^=!k>|}21`zBNBR6Isoc2dY#19arVI1bwd~~7o zZF`e8>v{9ufI+#v1m`ZUba!4{?}3M@GwmZQl_m>AIjzlW*E&$ECDu$#x%Rpx9QDh1 zpd~DH)3BBP7gODNoBn)Q9Rva{?R^+v@FF*l+Zf33V~8wW!$7$7|3liF2ST}jf8b** zMM}%K*(1@6(oF^lMHE>kM#)T3h{|mttrN0_u{1F=mguHN(RQ_rq?El9-BOWC z-HN((TYl&L%#5h+{eFJG-ydVf%ro;m&-;DeuXE1pocB2lP10;fNbnp{^sXpy{DikF z*^4Wzr&Q>jQ(3E;XR?EoXx&w?zVpzC9B-0wzXiiHph{yMGse36phfsB6JHCS`n@Lw zVb|0RabS}TjtYb_^9&!T;t!`xQhHi|zm>93qMZ;z&^r5ozv ziZreKNp`p|K74w?=WcUx@ad}TXw4gz734~#Lkp+hwHWPn%st_)%8BZ<+FmY)cnu1i zEbp+i`y-dMZdABX**2OvQh!Cu(+zIXDr>ldAU?R8t` zR*Z7XOE);EvZY9M`+#ljA0cD9c1E|dG}hpSsV0B;702iJ7*Jhbg!s*sbG(0e&bSGO z1APiM#A~$nUI@+{^Pt&*&h|R+nfeu%*Z(EP3 zxOqWldy;n2`<{=!!N*cIKT6ls{=@O<+HC!}xEhniVJo?7$457}Zesec=t^D{88G2Z zvd^>4o^iTH8-81{iA066a3H3^a!F%Q|t57sZ^boz%6gsyF*tk7OO+DPvGc{2J zvj>V`@RU6})JIddKcNf;z?6~WqV-xQzo)^wRRs^k8O}Ss_WkT-U3wzeWvGm5!$X1~ z&}C2(29<(B^w4Z3D_Cyv&iFiB>GS=zHP!zV2;4UGL|tq?@6B${iOwY-X^n>}Mh3jd zvV{G;Wleb`uXHuBBd=n2Lg>ObP%@!)E^th5C$_~Hys6h) zTzsyD|JFBVGQ6f2+de6{>4!TAIlz^#7+Wqk@d8u1j@oW<6c-)2rWmcUT$L{V+ik_R zx7fI#cFX2Zx7g_ALBa~=`+u0q^*E>-OlbFSz<0%~EN$s%klzD6V&R&Ja?#$6e*}6x z)>71Gf1LGd1eY2Mv}{#z@WZ2&t3Jm?@|@6M{tEZf^M4DbD>^`1E`@)@(}mWL?mS9v zXkFLHwXME>Xhg#PnuTqde@0cj^*T4MvNx`8iPP^9`4(rh$Ua&6!Ld`5pP5uDa9hdm zENOOo$D}bH=`f(;Ig|m0iG=aP@mJQnC|c}|#zl`;Bt>u*Xc9ih5#+|_b#vSUIdpq9 zJCiv{{X#E;^SvIy(9*5er8_;kbsv4oI)lGEu34^=hgHp(jiIltR(snz?Zd*oJi%P_dv05u?7|pEUv0rfy$>PEt z%6w?VmQuDrN#kX}+_-sdr8V(apD27bd2k{rZ{ehUf-p;tp1<*|i-}*(9rS$r+}qRd zKKG8pD3`xyYiPWGwP?!Q`GiFvnRQ0{9+p36wr;LkFIaS}WVy1)QckRt!f7~;vi8D{ z7*6Jo<8E9Yb@-f0O+j0Yh2AO6CtTZ8FIfMGXFOc2nfxK-xyPGpI?ZoL)eQ=BT?Y!T z&8?UV84r8%*S(^9T+16lF4HebylXvBvOmn>{7JF)J#ss>z~RQ-XQXqR_+&49?)hR7 z-5s1@!ZUkksxkp`?qCBX>TdS5*o-zgM%zP$2Ae383EiddN*Q(jYm-U_Rdm8cv29EY^t*_}g7~$vS$6jr8 zY}a3xPNdw}py-=E-|Q-NxyZ7kP*bOMan1$vcA-y5x$!P@-qOM{|K(g`>jxii&na9h z?-yRjAZrX1YIfo~PVfl&k#_7|~(pIzmnV%a3nXWV(*fj%0YIDr2U>1 zlWP^Wyc2HYMbj^IN4VOJ_^{GR?v}tc(aZ-}CHAJg3u%k86TRCb=#|7OU7F3UWibpJ zwexK!v(&Ypl;g|inHm@hh_e;X17`86S9L1-Ec4&S?k`bYm1ov;BynEqjtD=>UG4d< zMqw|vi2a?7oGVM;O&K`)TCFq2&g3RHd-UEfb{Vg~M0)MGdV5|Ery_JmWA(?cxH+Y~ z>~m^w#Y#?(rw}GlmBrRd6Y&M16*tZ96^*WU_E;uT-SbLs(LjArk9yR=$ElANe_s2? zm@@fAWyANGe+WFG*M1m-!d8 zFl7}A**B9(I=^^1v;8!87LjMI615z4b9Jo`iWqZb`(L-ptjJHEjm9nG3+#IHW94{W zEJb-88gsiwYk`l(lLUP^#+u3bf(|o#6*K5C*uauBhJq$6ay>6OiFw(;Y4MWn13eV{SUXz?L%xs;9hyKu1;Fk#7z`mJ|*th5@Bx2%)p4OnF}- zKIwbx!e1YK=Jf8VeX;k_Zt@t>9@yxzcolQGU^YDTQ7tE-!ueuiA4Q)KS=gOP`kO3A zcB>Mvou|Aws;NdPN@Ws^wJs4R)HL&13iJ|1bp zhc3|!&RQ>dSk#jJ6^cTxpCk0W=RrfR=9k^6@?V>-T?78Q$4a#kY0kk1k$}U|bWOZ| zQ&H*RDmYSKlj*(TjibPeJ|NAKhi%V>y| z9Z^XBoS&J?_Sd>3>{hqZ7BRXqloEs|^}Px-1v)Bf@=$eDA&Vc>mBn4Fg>#zpYuiyP zzpOvweWFFdyJ5FoP*FfEYB%Kt(oSdsNx*d@cTcscI|XW(aUChYq9|GiT?{3U@RWI+ z`F5~@PknK?)f(N`evMORk9V5WX0g0{{kg37liE@jc!w0sFX@bZvNJkP>{s&M#BZ{5 zAa~blwf&lDbjsrP**e}O$G+mCj_9)A7>uggtdu(H!1y^i3eIOK zr|*Ww6a(dfaVs_!g^me)6z{W()?HF{-|{tQ)XtD3`!4IaytpPoW$rTH)xGo&j@I=` zw(pBiowhp7Fy7W0;-P3id0Dq$N=kDMi6=tqOnGMk{1r8Av5 zjr3gX+{6i(Uaom*+x=GMTjU}S=wUYJ?P6C@FhMiJ*&X$D$0ByCYp6`zK&T0+=N63_ zMRyzGkc(9j={89l$x2pwUFObt3@9q>lZcaQmn{9J8^OCd8L=*YK1|0 z#9&Xxwkmq!Uhdu_M{x-UI0x6;wyE~8Z@4QxJi{1w%0jQ@lU%>hvTb=Pk2hyr+K04{ z+;W@DFAE;X)h}2w68D^LHf8*8ZH;%HGS1O0jy{~PxA#MHK-Z@=w?BH1OBDs|tnc2h z=HK&1SQBz>V{v%-!85Z1U+%cX4}0sY=lVE@dxx3ko|D4+r@vI7t-56rk$fQjt;f~t z^T)7p_2<9h{O6`GYCU@~%em?3ia_;V#^2-4MQNDSSK%qQpucopmgd~__4M-zAy>&2 zTDf)FEkVLQ|Gn~>E>5($sX7hvQ4en0gnfbExj*mb8OxiSOSS*}`@{OnAH1&bXF&7X zMd`NgCDzZ_Do}5tqC1)~?d5C>jaT!xy{652G&P(_?}=hNdi}1z37>K7(m3ermab$t zlU1$d=Z7oTUh^bW^eKRM@Rdp$)PJcb*sQy%S-)VlU5rqn!so*4kFDt;mn{~3el);S z)P)YXiBL!fSKO4;B+p^GqckO4eoBs90gH$G+~{7c%`18eeeR&XKCF)Mhf??nn`)2V zHjcGqo4oKUsF<()d76Clh!3rt;@pfS*+;(OR2PyXtFDh;GK0Z@{%&4D0JRXT7a$;) zFk5JoHXivlgwP9rfApbNYz+UqU)}f2k@CB0yXByDWAL&H%AwsBAHASKpuRrZF7>5_ z-X1f{9$2e=1(UY?VIo@m8}U<7^;N> zh~@q;&EpMTj|k_QBK)$_SM3u}7EaOhVCQ;)mo=yganT&dwwy3Ad4P|fKohV-W=&IPv4>czIxz z5@UBG2kvK&*^$B~D0i&0LM7_iF&po6)kosc{VDciT z5roo-7K~N&>Y^E|o%hG~KYH$UQDf_#*<#<^bmxqYw|_HthGZy4mC}10P^PYim{UsA6S(rnmk zPdG5)e!=}qg;crrb?{m7>3dyaK3MJH2JT~zON zKbcrr?{(BF$uXhNM0$AeHGXZuiI7Qii8K zJ=t9`-?6+Nm-wkAZ-uVnstC1%_0#2b;>5-(DWnVQdBw_|ZxaX1|6aV?r)4WcBYX2R zBG)(bB|9h=)@aVb8#X5$GH`v**i$MdyPle7;8pzGrD%!3F_kf0;H1d# zB~2Uu!HF9{&WoIy%?;Fab%h2%Q6`?#TR;>v6>(OfB=7iUSEU$xYrff&y3eC6dT^Tx z$BtfT$n_wlbY)(f5~QK|z9{WXP8_qTYu6dP>onYyH^G5ob)CKVOtF((81>2q_5~;b zQ>q1UQ3b*mk@+(>F@g#ymzpUTm+oWBy+7Di zmR|A(S2$Pncqwg96P-(q8=Ie@l&aH=zp`T^H*zw50u*f5bsXsP$%;xbX?*TtKYMBg z&RNgLS})?ZLQxY*D4=)Skl*d{iHbB;Am_t8E$P|&-uB;nS_>TO3& ze#>P1eIPlhu~;wjQ*rCE#zbn=bKmM~-9q*arf(oA%=4a;NvM4XqYFAgX+yynvI@>$ zWh7v}{73<47}^w*Lv~d{d|%NrZ^yW6?@HfSCV$LPbD)P>oDg(8P0)Mv*e3HZlOC`$ zg2E2fW!ZP9r0KC@Eetp2Cqv~h6QHNm>_|{z;bVjPJ)T97hKtQXzr9nEnx;Qsjx2o8 zDDpMFVqCglW_;^7x=Zzmv2&W||2AP2W4xJn)af#-U*AV8(KuAA#uaWR|Fue&l#uh5 zhC>ByWe`1zX2%NFZ<5=S#7JV1V)c7nS_%XfMRI9$C=iSav`eZUQ9&6(wei~gltWeZ zl6togCWhoRRQUGC20`FcVUZOglxM=CJIYiN)Z}rQ!^uz|c@+A9Wa(69zNqOh9zkv* z$%$R-<`Dwop*u?lBjB(rWCxp32qU4U12ohM;6S%5Iky}j+(&W!LS_WxTioa9_5a0D zHZGYK@VnE6O_O66m2OZ|&z+tvnA_p|S~T@MbDKr@@~PZsP-N^y{1%`1HS0M0RC84B ze!1z`nlRU9cE0w>LjxsMl#LELD?OJDnDkwp%)0Lwpz)=;J))1$ut!vrZtbCPe zziV~E1z%0VpC|8)ncwr8ysJB&t7NeAPwRUhn|J;4XXj^{F^w2#uXa!6ud&Sb1uOS> zZLj#{VIJGwyFp*i*GvTEY$&n*INTJy=xFaXa+SiHdxMi_sYHlJt5BcjR$5GsaW>|0 z987l4+wq|#CVjR=*3DMYisxXVRQjjJm{eaZxt(yLb6<_;q+DEb%$4%#(WuvO!!u-? z==`yH8@rWxw4%|uSYQStey0Vj4jvx5gC(Zvc-Jo-XK@7W<=1=UM2pZEA3_eX(4%Q5 z;9NybV_De=27z(zBR$Vsa!l&yV_6;=689JVH1;qD!>FnNbiAaHDkd;XU^|}Qj?ORn zCBEsLtwP5?V_jdkzx#D3MWH)|bE8MUlJ{SGWGq?sg-o=(4CSGWgeX0)bg{Sw0-a#pfXWv10*kyh)`!v18et)YShEK z@l8Qee?HEYD?NbF0U0w!zFm8|^9w@Pl2KtZI{j@i7G&|!x0 zi#2~b1wTo|?3#4p;OB>*o{gtz@3S7Ez|d@6p)Ne4-&J&R8zFAIZu|)A-oL$@{AW9gDNdJ1z>X=GKce!BQw5OLa42%BJZHWq=d?P=pSaUqQ+(wss;e_EfPIYoY9CCP;`K#n`a?&l2oJF z1BJ(RdvW*}t>`8ur#qDe#e@VGq2yot?Sv@(d zlHoap#kxemn4v6IIU7*RJorwL$F z3>&Hg>+8n)mQ}ZJe&xenba-@JL(8>b?`49{{IoE4C~s0q+dILPyPh0@b#52ouBj(3 zb$MS>=h9tTd&lvE54BBO<5u|68t#_vEH2&G1sbuS2!@wAz?MJb&{n> zmG9zM1tIPfa|JGt|4T)m`C$1%6rhD4=KhYS)K?H<`#T|f1Dx|zLO%a0;f4y1mLIQ@ zL^R}vDsAQ|+@r1J946d`hIA;Xs7gf9dlc>VLyH?uk4u}uru1^y6B<-3nn|SaDaa;jY*E0)wb=QPn^BSA{f(8IL-Y1>XDug zZR_4>uA0FzDjv5xYc%X-p%)=$t%9}v=|>;G@<|=)6}s08D?bwR&y3zP;(A|UD}#FB zx9-xoB(<_N%FB9_HT|I*gn9w#K-HX}gzL|}-#8OOEv`o7R8`q%zz2xs&`P# zAA`JUQ3`LmeC+H?_YrLC3U7b#?OYIFG8c#)Q`e+)Lhl8`zFSY2a$KTT)TWuebsJc3 zuRf{u-q`1^z9xLrLY?}0t4NEtl}#(!JR|a!oS?OaYnuPk*b-U#Ny}d+KuAARp7ZEN zX|R^r{Ufn=iXHC>JVuHmTo>PYlD6)JK~Z)bqj{3T1K+f~{Q1S7?`<3HeUx>vq|~x} zIkn>{|D!UnOIinMfxRJIfp@nZdr(Cg`q$UJ=s39|Z;q3h{Z#7eS9CXrBwP zEyAUtgc7C6xo7QIX=!_G0`mIz%=o|;s8VoKGn_6mlP)BxxE3`{Qr@*)T+9oJETqdsxjeIx)57dJwKNfc7LxwlX7XVd?BbmW2 zdm{}{oC;zMRHuUw0GoY0!eXhLfocPgL-k4-9O$26uh5-A=wP12YBLGyaZ}0QCymmx z6@TP3mHu6`YI#waO3kqzi;#wN#cJc3&QOz|`4v|(Z|~cp#HTkWjVBd;ghOwuCyr!j zywh(x(SNjrwPYi84#Vp_vDAG5U=>fH5YJMa3w628D6>^?j@6~lPd!}tx=gA2#4*=d zZ|{WpG5xqr%U&~Jm+u)59x`G{6zD!IXGCD`EZ3BWy$Bl=a>j`6MpJYGxAkX%`qypv))&2r0tA&U(8;<+3% zmspt87f|L}tbtWM&*iwI2!Wi37`H3`(4iP@C#Z;ymt&Nqif0KEVxHTNep7W(usKF^ zndtr2u-o^u|47_Y#tLOoo;BhWizpqzzuqF0T-^3Ng6}+gwo<0zh+{OD%vbKphx94- zb>43mKFYIQF`%w{nIFe-QH+^>V5bK=aHOu^$*!a;|CHQ4Ga~-sLSAh_5dvSxEVUZ*-Xy`Fs4Fe1)(LD2|X?(&MMRHcQRm#mq5@-CHo+bY<`ht#?1 z!(M{ENz5Ykfv%Hs(QO<@?U#`MCcfxw;A3ygS*QBu@`y0~cSXf7#Wx?h^YAIq=^p4P zV10`pO)_=xWxiBt%edli1;LOByN%B*IVBXZ=6)z{PE+^GDZSoQRbWHyHcQXFP7Wl? zSFU2VETjgBNrb)R8(Jr=?oXm}s-_kMjqf;YO>+Hd!j#iw z;KE?N@q&_$t7`_Z{Zl5aIv34x=j_R_u}gmW|qNXN3OZ^?1%{gDFyd~@3Y?SH9Hbil@SKHa;-KxTQqf2 zpgw1IRea|iB(D4AsB@ZG5%%iG0%py4_94mOc6__#$Lqg_1{>ug~r2^}Xuo-Ix77)2_y({k{4vW(4>5E9B-y(Dm8c z&Wti|U;Q|FFPHGgO_gEwHHol_3$)~XLvWY(D{e*^U@<{sQ(%KXzqmy_l8PUWldab9VvrI3JsY`jhx&s zIOZ77;**~_+N<80T}&h~ zE8!xQi0|IC0tP>)<(^-@v$5gEEBWz^0Dg~(KRwo2FAc{CbGs@q!s&n=gLQa8Vqo^% zE%v!9q#*w7w}DO%O0&Svs5#P7{M=_1V=v*Tfi7uvP|_Op4N-vyM8|xla>M4P)lDnv z)AK*==Vec*OES`*u%XZG41aYz&OxXX#apndJ59$cj3zYe(JYSj_lXYY+S9}aSJ|Ny z*d*&PmB8>)h-AF_+q12VTefk#&+=u#_yoc;Dm0}+x)+r>;8l%CZAq#4iArlb1CQr;-?!d3avu)HlT?q^kX2ah>z(Vw^^U zzva!{4S&59$}R5_{%MPSc+{j!a7kV6=E%|#r;i0%j1a4knb1Gv4wx~$ruuE@GjG4~M@XJ;8EWep%a=qdppX*L? z9KFxeg!%71N)B{dKzzE{zL3)m<=>WMUUv#>2{##V`XDCv6*Zr%y4A>VIXh)_Zyf#h z;`ZDta8-d5xL{%P6zgG=wz6S(Deft4S!rV*RMjc%PSCHt<)=teN>kZ+#Tps|KY3p7 z;yq4`pVn1;0;rj-oIo4|8|YDl^g8eK;46FEc^iq_j#8%F=T5*d&?}7HKzl z?&?8K!`eKcOwmy^)Ouk&f(wK!XiF#VG_K6vOuS|Zr%@?&+}DO{0u@gvwJthq-*wz_ z+Kcs6ny_pYb2}CPBt#TYx-~5;F{TP?f_i55S{dOe95cp5ztGn%N&y*!P{P?Fxyt;s z3gc%uf4b9K*UAR`TMq@!K@3>72Hww?5_QlTR2G@R;!zZw-58TQ0TiY#JJQzsnnu9H zv*GTI+>oj2ma~sR5LEb>&YeCj5h{RH0Yp_o-GWe>PQyf9=pAFlC5}44HFtF)j;$hO z5dyLy#~GRrn&Red`!zG#d*g+E+#XJtO^M_CnTnAGRqGr*+H0~KTJN;X8sM#7Z(2lT zuX%LdnTy*O&)7XBD2cmaHdLae&4#+G4fud~Mi&wur~~Z8p0@A0b7kbR9V!{;r<|M; zIO2B!ZZ!(np%4Yy)g(!L09}g}Oe9p2A6yXO8q7UMxm62O?MEe%l!Z+b>{V#w!&Hy+ ztYGLMjTBxg;h7mC83BEaziB{7;=pn*0ywD2wje(i9RazEb2fj)E&h9lUsK;=U)4<~ zq73I{e8nBBAQf1L8kwr=Tp781UMKmAz}*hCxy?;7~r`@%srINHK8fOdR#$(&Rt-Zp<4m11C9}L zm7pXTumjQnA_Zr82wPXM#zhg(xksBa2W?hGV}Y7?Bj}Q;b4;(x7C6Wppc1S#SfbKZ zNm9k)YJ~Bmd*{K^lKWLDBZR{B3YNPE9X4K)s|a#>qt*4hcZxh**aSD=Y4IgEg;kVyB~64 zwh&549#CO#ICPqUG=Tb$lj^EIVFrumNQ82=R2((Z2v$ZD-TtadMrDzMIs|k`vy6ZL zWiQ={SY%rb)P0PKA9b@Pc)C#bZd^Uiicl#K=%X^mW*!#{V9%U;v4_bVtghcm4}3}n zS>G{3^IBns-5I8$X=Lu@Nyk_|`yOa9%<27PITCPLuzf6-hQqCD$x*?HO>z2Uk>t`y z;F#`eN~OKGBg&QD8~cYKD8==fwT11Q4}0048$Y5BmuSlfGIb%b4VN3$pZv=4qHte|QR%(jkyi|lJZ?)5xPPIy z_RCYTs$rF1Md@ST1J}P8_4O?#tcpvnA^p;NGyb^z^bK~jfQ~w&v=eU%V!h6#%=!Nkmn`)lA>2hWOMAVFi*=v zM|(nhYGO-3j3K{xB>$c}h|CJG2T;M=s5NJniXPdtXXHNNQ*_r{&r)KTeZ1dzuTB z-<0S2#m!3S>bCu;)8-RecQ)zwBJs*3hl|HJo^HD2WrxP2M5y^_5RhR^jkb2|qvfYM zRt4TF45oRD4MakBM(`MyBpbEco=2b)P{@laAQ>iB24DHkaX4)eT z)nu0oYh?Y*@uZ8jyb&c2n8!)kVr{X$4qIgld-Y6-U`n$=u)V18n-Z7F#K+cqBkz#_ z$`>@%<`=%X-Ih@N+`s#ZMuB?Z`+$T-&I$*P8FRfS)mHq1h}VUMTDZW&LEn~~U=PvRq3$By1@3o~)ZHt)%dBcB12Z0^odVCXkr5$??5%1agv1v64s^pvy z?MiG(1L-1onW4}=a9JQcmtY{|A$bA{M-#?qFoJTcw8Pih!En zPH)94ydLXMq(9j^9F|U9xhc}Xc2%pqZK3TWi*eKc+?+bV8|`tok2=k4O7NH7PbN(r z2LF7;kvPA<>4w2Zt%;-iAFo*PTdem@ zKr3SoaTFR}qX%l>^w?G$~gzTy~v z?tNR5_%y6j)0<9-%-z-Ycz=exg~(ccXQ@xznEX?ZR@7;1>ONj_CTHMKaY{f25JFbI z#)gZ+7NzVcU0+-#tPgPgQhq^rN>Ejyd#8?=e|nA#)@08L0sx|+;Dko)n_5ImU-AZ) zb0isDx*ImiP>l8(Q0h5%h)|MG5~W4~4WOJyLZpxvfRWHfKM@oRyy1O}zcCpgr1ejl zc_h^|%X9`QxISphPB6Gv(YZ?CC>lSrLNgwUY~zn6w$O;-*u2RJmkRP@DnGYm37a@L zP{**R!+xJ|MRIayW$DL5EZeKKPiL#EZ=7%Gyma{g4FJ# z9JyS*Rk>D^Zf3dJj|}&4y@u693?-R)f~Q3QGg&^2>knC<1YAdSbCGmZsPkFYJcl!b zWC5LmR{^pIs>_0$Vq0sZ6oyQ22iF6 zt?YUwZy<6DBz(|PH8ql$AV9{W5tTS|>i}$T=-$o??`%ZD-fYpAYwp z3cD3G?KkgP@mKElTbNasoSqdDpV-4Otfa8x>mYjY+(xl%l&jp*T=5!a<6Z6WmYX2s zVcou#qoM?zoSjzVXFR0R!rQMJE;#$#PCcx4oxs- z|BYTUp&aroLko$^V1$Ol&U>t@(ECocSl$^WYJl{23|%}#)Wiv+`d|+yV9bN2J~EUF z{+!V3_5{*tD6}V9%ctFs(FaYox9rzLA~{9%z<|aIs3WLxbhe*+8E&CiJ#cdBWH>^g zueA?MP><#3pcuf4HB&28q5AbC%IuK!XB<8<*P7Q$7AZ38E>cF|W3;I(MF~_wZ8hI8 zt%>BC^sAprX4RB_LY9&VDIzQ|UP*$Ze;z`Z1nmE>oN zydZyW7&!qS5LDqP1@sd}2$A89YVWKPPWwo9oQZ5f-|{pyg*=j`ZSn!GgDQ{eR;%zAi&~oxc)U2Xfz_D+vKGN3 z0>End*8O67zrvuT+~Wwos>a08*8UN+$(YeIz3XMaQOSg{s{>3f`#VjXcCBPVimM%M z>FkQ5WWg^LD=7runu`+g7D;H6!5Ngz_`y1ctK{BL1vkcF?>7x49e+Sd>%q&)H37cpFY#{bJ7JW8*$7UrvqKJ>YDy zXt}pve%i;xwDi~;NcONuf@D;9Yknu-ctORU`kOBGt4~i|^JL$H=+>nbB7=~{jUKsCXPtR75S2#HRwJ>*h)sL8AEtU zPA|4pzO4}1N`WPT78Vg|FG>U)TTHsH6isa1$h;olXkZ~BZ3mn^*agzC_~5T#^rOsj zhfnbs_+hJ2hJ@0kDf`Oz&tN3-&7jeXWt_d*XpdS8b%LC!G3>Ft##0mjb+M8KLMr<(vP3|*ju zpr|uOA#Z2Qw3~m<)Js=I;%Zct^1Lz>Z_1SpZ>8q*2}I=t?F6{eSJXMk~eDv@S{KtB6YdIdU1h-ol;1~jg+gcN~CPLF8$NopOz9W0QRzMA-E zR+#;$Gr2kM_&AV8jvq9`jhQ$(9;F=U$N(LH9D-s} zQ{j>XU?q9|!}8eS1Ki`L2t>UhlL^^C5)ntGj~ED@AvIbe=hC3}x3QI+8zmng4?!06 zgDxo8dB1u3Nbnn_5C%|%&4P3~K!r;uOG1jknIv`(Vrd6ILEgi+wFck+_BGi-KRgdH znv@VdA#soq5MyP`! zW)4)f3n3Ev;My_;Ftidtf{#h&uE0K9Vro4Yuyq9C96j0oN^O1y2r_^}I9mKv;#iPe zBohG5$kve<(IE855JpV!pg<$=L^kvAhm_6-Qw4;Al_CyhB>1;{t6+~BJpqm2QnunnwW}#qdNZ$^DxTg$w`BFDa_H)u_u#F`8XSg3H z`(*$}uz9`{7uo;yntZW_7%G7gc@9Y02bT}5b`3sw%{U-4Ah(T5ya=}fy*oMfv$diq z&TjVO>Fx#Ok=uzvXn~5EZjii#d|gf?xWYsMYM_Uy4)CDih0F(#T4jif$F5rPf(ojr z_@k<~d{Hs9jUzWxS+LQ+0ZI#}EwrxuVZljP>-X=AZrgYtuFIu+M60ltrcPbugYr(6 zWQ`VsQwi2sJm$}#oD=ylH$+JgM6fFrLmg9$sgP-oixKvt3l$4-il#VYSjE`e5}VO4 z^qG4DkRqtGbOhvGi6Mip-bf-maw9!(~ha zsR5)rg)6)iHhO0gDhYDx>YxG@aTJPctP^^2lRde{+K+2wtf0eFlXoRS`VGlb0Y1hs zpI==Nj5e-YqQ=|A!jOUxn46`dfLLW$Hx9_^VASJ#IhXz~^E}}~0kOd`#B3Ub3242b z!N>I5cW^0LOlkk}n`lje;|t%E@zOG-f}UYfONoc?o>7jpY_^d&df@}W%Fuvc2|yMJ zU%sf`@(`ewe>2$Mt{|Myo{C}&fhc|<7~!~fKYz$V%Fo40%!Pe(aleOpIp#nS(2ZVZ z!I0X_AMv&E+C@P-+dS;4iDlNUt=p#QIk9dY(55K_l)T#UN-NK}qtdP@>^R9Ej5_{t z*@F7{vmegOq~KJyahc6J_xB}{{E1BK3$f0?OrU*iQYQ+YD+|c00FRt#()$I~0xFL; z!Csk4XpRvB_Yb9RAvqogY8nkOf+PoFH{5zCSaWz-#MmR13j{t08E(+V8*){05=4as z3B!x5I%qpdNe{0ot+pp&VDRMEsjV^D{U&T1#jFXBiO(@lPz) zfao$K*dT|%2&vy69*8mmKpH}ZU_gz%0iQ^0FX`7{X~ISq*?mcDl0l~#1J&;Y4ZR0g zG&qnMgfvO$VE8Ki59yX&3;?AcE`;!ms3BGF0Wcda?SA$)zJ!!bf*ZRbnwm%#xJcjs293FeOnB-ICj~IA->@paCEtyaJ(rTLM^- zdzLCpBamD+L!Kz;Lu_EB+g}(> zu+tNClf=*PgG^h%Pw62t-46UAsKsILu7v5=2f;?T}aK{n0tcz+;X-A!XvaRI-Juw zI~{KM9r5rr2{&1-oj61MjHDi?%JI9>BAuRZNQO}DFFTew=l3c1F8K7)rloZ5pOW@# z-FfzDKk78^(p8<8IK&djnHzMe0LvmMa5HqOTww@beok`?$QVEzxFh5&FnlJGCG)on z2&xI-kf=)i0OsJT zVY^Wn2ebBvTUH>#V638@&EI=JNLD1~&TttXD$g<&9lVHmByUJQU{o+8LIML)vi6&_ z$o!ihkP>c5a-XslAsGg#0{^*<`%R9Zq7((AM!s{#jHDPlM7qH+ka=Y&+ZRAj4l=lo zxb}iX*#t&I5+HaR?S3mvHGu{h5tKo$^TTZ;)XDKi(4B&7q0QlC&NgUb(b6qN2#g4k ztGiJ+xvaG4^0B!U)I)-rM~w0#6ffV^Ps14d??h@AZCG8L->Sa+)&Sx4tVwMD(yg_J zIQDg&V_2<*<2iFHCft~~`*Ab%LFaz9W%|O{YbEuDR!s4%GRLYh+xDJ38A(n{^WBB5 zZA6=llqkx0bo&oTcf>d{o>dJh(H?S2iE?-O$jHM)#tqaN5p)&Ue}Jwyx~rsOJchEE z!u=+K?l9l{pH0e;_mHf8lQYpbQvlVW|0c_5;-nIb1khdrw{U7?_9LkpBs*mmz@Vso zb3TSJs1oKBWNgUclGidM%LgnDmRbPFxlk-Xha$%XqhbNUA*D=0r%D2vfbHKwv-JP( z_#(Lc;V#er@IS`h!WE%~DOq!a*Nv`s*lI#+4~}9v7z)71NR5bYRwDynk;w-z8J#c9 z$wp8I6vMOG0l9kq)U9^MetD{8t%s zXgWg?`kK$fL!bo@TpG;JDq2ix0`t59s&6oNSpu=C$gPzbxAaL#TX)hH3_S3(#vq{YZa6EVjQ zIsj6v5Mfe^0BIO#NVPCvFcR>f@9+|nBQQlV*aa5on;apzLzDS!VkA}qcBE7u24ACZ zB6c4?r+J?P!r>zmCXI>{|%_5m6?IQw+X9i2}%%(dHm$Ggu%t zfOc$c-9#)R#4LBh_PfB2UCVf85zjHq4Kb!>6zIkVsp-fzkOWsjt24S|#>X?u-8lAE zgojS(-Uf|XWKN;8&u3ND<7RW$`=OIc@B`~%Azh4sA)=p((Kko`f!r(i0)lE-7p!g+ zlaV2f6!4r0;E_shT(REN5`2Bpx#kFo~;L0RsOV!e={=o!4;Sa<<%7Gjh{gxwY~D0gq9Vs z(q<96U{f(Z%oS@cV2oy<$K?8%=5DZAl4RKg)&x2pF>nQBNE^Vxc2GLvNPwBqAe@s} z*T$qU7`XssQ6XCeE)ael%^&s$CV>f6+@)5bi~utbNXRys_xN+Q1=1(M1S#rZh=VX4 zahEs@&8UMTKm1S$rGy!aLl98{V}q30@UINTwr{VIA^P16qTMGFRFbG{3ApADInQ^m z|NUV8N+1(}jO&4O0BPmsg4wp;mLRG;fYLSq!AvzkqsXvv@QS!J+?-T$M=HSHT>>gF zAO=)M^cM(H0U-thQb8?@m_!H2$jt8|zVNBkfca+8zyPDr7pid$B*n0K2!LcD%vby+ z9`q_mY8vQF%s;a)j&g#p;p&lF6wnQUkl?XF@j$C!7J<<0F4(Lj;K&huZxg9q zECSe~!#^{10j1!Rz80X(%`k#|(BZ{&*U(@wW}QefV+IH0=aM8z9Ebu=(UOx9gK>?FAy{TJIa`Vq2#O?lF~nm3DWDDc`VXIjy&;807`x%< z2EGYq|D*H$AHFnje@GQJUcT#k;xKn5d;VBxq5|B#)ESQi>b`~*=D~vv=MpRi4`?VT zD?6h;8)Nv!*<3UGQ7Oph{?7b^NG6&>r1b_hoDN+KAcg!UbX(w-g9r?A0tf{}vLQxr zikbD&v6ErJ55w`(p#mO3X5L9w7Xa)q+jNL&^Iz{{HV7;u^kop8e+M8i^!QVoVsz{Y zHxS`>O}LY0qxr!5jlS~k7ZHrkT~7kWF51K^f8|Q?x~$*m%0WAG{esDWrLYu+iqF5j zmd?u%1Hy;^MjRNbh-De%frrZjG9Zvl$WAfn8$$k2BPJv;5kTPmyPAMm7^2wz^);GN zND8f<(#*2ba_7$1rI>R!C^X<3+YUpiKCr(0{K3+NnpQ?Q8)$)xj7eC^=)&O2je`L* zK=ej&0l|m*Pgo-sI447*A;GR1w7N*xgaAbX(<`<4(5F%#M6${;jtkx$j=NZ;OBY4V zclxiXN1q{l1HKiz4#W-VgDRn}_*W79zeW<7<^&ONv{6{8G$0!>s~W*9oM^ZjSV_ip zMS!6%`2c!=c9o6Gvt#F{Xau&EwJ|&Dgiq!WAKB9VxVwRwz}?()*oC5R3vAL4*!CZW zIvPCUPmFcERE7TdEL{Raw^U*mF8Eic^S@UbrfD!mduW(af!-}pf~_aGhE>gI8XF_m zmJ7gve~tEh&f$a*aL-60*g&734O>a^U8#g1Fjc~!`jm$V2buGYu>5DB_GGklb4BT=GQ z-1Fb$X802P!2}u-2SKq&Dt4X0;B2C_6@&>t2(=`h^8bC*0Iw%~ZK(!sFlqPqzYc|U zjKLN$4u&8PL>;99j6e$=3A6PLmju{_A%2lM^rj>k*O)maxpIUV`riyS_({xH!p2fY zlfXYeqbYxSkFr4@ath=IuK)r4-vImHL0wDYPh&$5rsUxL!{v=_cJ|w5euOsyCBKg1 zB+K@Q_=!G-j7XsE3#vP0%jqK;w>+8C|qSEl0V=%;pQA9T+ znNbXRB#PmClmNj(vV?&G#&G>a0*2l^@{1)_{NM`^NG#SN{RoAe&`=|7K6uRF3v4F7 zzli_%e?u>=P_+xF`POQHl)-(*M4%_Z0yPJ%i`GA8jbZ!Ye>N`|5WokLZ25mz9x|jr zkVCh>B7jr?Fh#Y%?1EX8F9+BIvcL(lM+lJQBuN`(G@|+`@#x>-@OFt~{zKpfS;~Xe z(G{WD5L_E~L7^rd>#gK(Pf5vDbdIA*2X^-3z#j238ZR z($xqCgOvQGz-10TBm?#Rt^c3@BR%@o=xaDsrHj{ zB;c*Uc!^|rdhn0TGeXNjaxBVqmwgk(l)-X^xiLcPrYqYG;@y3dQ%ORlXvyh1?T^EPp`R4Fy9~;0FnA%7pX?5!e`RtCz3@SO<;&q z2v>g4LBqZddx7B~nMg~xfN!9Sc9aJHu$B`6?iU-?>zvX)Rftb}&>16Ne^m=#P+Cz_?1^l&(}1 z#mB;N)u1-`GSRH2f({3V4)LY3sUU?pk$|ADL?!niy9kyAHHrv6r<?$a=}n|L5Cu?y6U$H8C>&3l10?||ruuL}nBNJz?WK0j zzfA_FA;C}-AXt*g!vFA$2u#=jj0p`A7%7V>Nsqx;W%MpW5;!vkGXG*9{vaFwCO7D0 z(B20Tjr0FsT&c{8kfeWrBE-5-NHrqz;*qE!oi72K7>M43iGi(i*{8#WQ4EkE-N`lF zj*%#H5ymXdR6)Eh7^+Da91PAPN(d!^Ehaya5P)GVkvAV%u9r9<9;%I8EV&ca%tWeo zETm{~lrV$sHB?hG!)zUtv_Gud#E&f05Mdf_U?6})D1{=AjBx-!8UkcTAwov70G!}f zu?j1gxWMirMC3?*L&|?6R}!2{7=a&`BlaFv6auN>|NpF_WckU~<`2(kV18=ZcnF(| zpf$(;S4sKaKpkqtpadcqz%)@>3;-Rpx<%_$k|Ts#DT3^vqyeOr{(Chc1K_)`;Q#&( zItN(4sA^v^BpCthro<+KW>MC~a`dv_&U+o5sU6}oYBk`mQ!rQ3PlZz#3SIe*M7i|?DY=JWo3&+qv? z&-=Vp#${y9d`UEoy0gTkm-*Hs%vjTJF{vXxpSREr`kGjWe@<8QR(k4-lzg@!p)9&2 z)mAxhpb@r`V%B0fUz*_g)XxZJ{Zn0Atbsm{&=k#6FgJNgf!`v*p&)-%%+XN$u`62? zmem-CcUY>;Q8llj^u91Kr~!wtE(NcVs~(^?D(wOdBljo=ji*G(7B2Wv0)g01_3VI} zg>=|cG`^=9LIC;{EvT-T??*SmYe{Y@Q%>av#CYLVQau<9`MGjFUS>k@{r_wJf8k=H zvjf*p9iKT4mfkmd_se%1_l8usfaQlG9ef#-5}}0}4eMDsNq?zwE6|m2A0)LH%Hv7e z4*sx1LR9eg(4?1|2MopzWAPS-oYlQOO8H|?Tf`OEPb8^Fte8aPm|wVj6Qq=p)4ZOv5A$ zWoDatu${dV-h?p}rtfKRb+GvPiNf@uHm&CPG}9H>^Ep!I*x&4_&1sP4ogzG{-k`%` zxixS$#!Be=thcfB2PQSQ9%s>4j>gqB7+I9EuZ2HXD>n@FXxEPmC=q*W_lILj1HaTL zMGxQ4b%nZiK|7ECBco4(!NCOUe<3d5V9W-mDi67=Pr@~Z61?1Zxp4!@yi9)a*V^sN zg_>4Yob*|J7C)s{b2L~b$8M&_6W8bJJq;<})G)8gTqG^6vU8XgT#mRJ-cYTen2XE+ zU<#rF?I{P41OoEQ&~PP`=prR*X9ZKf(5`bU^wb4ttQ7KOoK*o5o8ym%4u-K=9x!XAvO_D zR+wOkPbKT9Pw)z<;yUqRmxTH#^AD;+V9?!EZEv>*;_H7kVAgI@vM?`u{qb0VCuYk0 zL22GG7oB+K|9%T~%E9-gAr9*j@BQ+pMP>kB80}zAdf&M?$MB5J zoicCZW|qob*=q`J=Me5OL;@= z?lymY>0u^p}qh`U_2?g)2ua+(#Fmf1%n$X>|WR~1oGcwP2&{asPS zn*ykCN<>h@@i|Z9^`rO!U0c$(#*dy!zLgai zaX9be?A9k?H_F|XU5v>{;6>$U7yj*Zxk;9_I^|AVs^|LFhphhg+f+(-hRopuFT8DN zT}Q5Ki?f`4_dy1y?uADzHp`~Ejc1oFr}j>#CenR8OAQ^Z2h9=#rc%n^ZZ5izV&l(d z9?+P(i)-v>Xwg8{tq?EWuWl4?!z5nn+1%8=%6Q)k@>AL)n2d*VUkc$Wq!mVp4aS!o zrPwoGH7^epY?0VU?y3Uy58^@>{V;*W>1E@|BIrDn@gmon0Zc-Ola@K_(v>hQQ_T!6 z)`eUO+2lJLFh@~%L0Bv?%~4!N;+D$Ljzs?+2-osdG0!%r8ICC^Wm zck-Hk<(%{In`7@~eO&CA`&@73eQlBU$#+hp+oW@P7zzE`>iV3I#&8Uy^2R#PF_TEd zk-*WUd81OI-gN0RU<$#Z(9qwhM=ewKtk4){t(J0jRrFE_vBT`;3|3#I`mna|MSjIVm1#I1T>vlsiWG}TYy|c_ucRUQFQ4e1D?%yZxz~l}S<=ky-oHt0e$Ii-9`g-;m z=I-jsPdP!nzTvF?-Vv{o1;0~_w-08*%)p2!`GT=#Ec3${Tc<{x{Hko(KFJ4)fz=O| z+cjPe+T716T$Ir)@4nz4$mNSzg>44)wnb4+EC2b%Z7{08>0sSmtYPWtT1oZaxj)g1 zq#hTV>T`r!5@`L6)5o$*)_JtsDwp_lcNNqecNb>7d={*co;^a|F(9M$*T1r#M_c}c zR+N-A#`a(9U2y7VTl>PPq*u-hnipP-6>eIipSY^^o>6&cjBUoQytzAFZo0b26>E>B zHlJKnqp{0mxt~M2<`Qlp&8l{)ugG=Ksbxd8&&qoDg7>F~j_9<#lJBXRrom?h&L@l> zt}mj+xN-M>UmCQA8MMuUG+(PKQe0tvO>MWy(Z{0KuEkeNHpV{~37vJxtHnEVh8s~Q zP4v3L>2Ui3wNQp|%gLYRCHYhMS#Mc}Oi&@rqDYv3^yLN$YQ8}&I zIq8Hrtk=MHX^U4PA*#;IL3rX(#~)Wn|EM!%acm5o&#k507JJQHR&V)X`P+{yGve8} z<6zpz7Dk?bdq}-oa;Y|?yEeJC+U?`YlY>;3EtZKhHPx#32;bRa{vGr5&!@f5&>tQ(oz5bKmbWZK~#7F?7hpA+*fwrdGr0OcNKst00hxw zgH3izBM&WkA{;U52s^ZJOlW57#caLtcK!+f1GDgAA||}h-qOY++0z}4aLBSbA^$gAcaS&wqX^wOT1{Zno0D_!qa+))rq#fAS}{_}S2-Tya@R|NH;(M~zF(N^ODp zvAM9x#jTWD&D2giX_Jp>VViM!JMDb9!1T)4-D;4V$1p)()ifygcH zIr}H-LLIZ3#G8es1?qR}9oEpie|Y;t=-f{Hr>@)a_$@ke%g&l*Yu%&%l+`{{*Xz6L1UhoaPwbSC7?WGRT<@Sfj1o*DWy^isgb)xOA1h!Y@eilVIrfsi%7b)r>5wfq+W_u>vGJ12J-+m5{&{^pZ`EvWu;PF|zHk*t!ewP-G z!4IXKWo$wS+fQCQLFbGoKA`XD(ct~t(3L4AcUAe zCarCA6W$^3NHe-yS;xQa+=e&JEt5ag-^4h&jcoCnT^xN7Wm2Mr(ykqTxb=>Gc1C_t zuKC#}Z=1i*9*KOm;A?9W^ffDJW;t;`e<4*QMNh~v%je(Y{ARx^hpmTc_6F~6QSa

    !>n#ytt$g+rWv%|PhR=>~7u`NA6U`(3Y zjOh~_p^t;s)i6x*hZjY?47&g}Q1nNFwEe^oA*|K+mQl@hNq*D74&fz;r=OumWx ziz%Nk@}5uB;n^yDZ@y0_&>=T~2BlQWpNlO%zpc4x8kTY?S8b+p|9e2ADLhh`redz1 zip?pXCotjD%^%kaDVLwX8(x3r$ER<-_;x{o?oJ%T&-49zu*6E()^-U7Zmu-Qowc7|M{COGNp}$#eY>O5C3JZT1tg-g}Jnr z29ta$A!qWHNh%j=sq9*$5`27fPww z$fa_zoN~2Bsx&&OLOuo7X}J+=w*upzHAsy$+aQ-uMe;20K5dLbv^6J%)a%w$u0BZh z;V`b{d8R;q#U}M(jW=o*SszZ2BX#mFrQvv#>Xlj=j=UcM`5YOU&r?6v0#lX>HTs%zXy%8~D6OruL9o-d|)p7JUU%8nYQ=6IY{bIN)c8kY0Cas_1Ngtp{U zle+X9Q{F4kx0AZ8_j0VApO>oCwU4YV^?GUC9H#KRBEYT# zr#$@G7|{1rX-YffJy~T0z3BWPZ4I8Ifzi%EZ9v{N`i*CsdGe&LPq_%axwkZAEH#s`5Tgr=2Qet(x-adeR{BiZN;Xksq|LRU5A%)ETDXzhl=C^vxuB!nM_OLK|rI6y2@_uS}sq8J;LY z*BX7~_#Hy?Ix?mYZ3@u1G=d*kkL7u$QfK6n0mA~`;d6!lL;zHg*|qWr`ZnQx`iwHl z{ISg?=71);d6j-tcIWc&M6n(DRplLJ70t~dKMKuCqWIvGGW?OJta_q8@P3iH$gd^t z$uoX)^+_twrh2cQ2IXEF!3QU#IJP(jTHuV~^c+yCd z!IZoiKTJwF__CaD@#2;VJVqZ?hu!#IroD-=R4t5=dyXM!Dv@bguEX^l{LQ$l!RsT& z_H;0Sf4K*lRcHPwDEq368OJ_ttRV8nRmKkeP-l?H-||zQIyR77dHAJDTgKE&xem)y zt}>Li*O3R~NjjZ$sArS5A*;xT*)Q7JU{K}q%o#%l7h^8xxF!D_{hg;ijnOy_a~1jm z-ek!Rx80LN%x`a$8di@wz)ad*)0ian~>7_sZ<6oy6H%`+>A1UM=sJXPb*hlD%`JU3fd(HI8Crc2X z0Y>Me3#Dt<+6Yy8WtJ2ufAcpR=`a4`TDo%Oa{9?n{zsl)PbVig`2MSO_iiQa@7E&l z97^K%HRX%6&6UJgNI9Mh+Avt2wdTllR;ASyxZfvB(_AgVh+D>biH&bW0ivkDG zwXl?m!(3WkXYtFrtyEY|U6kcYc>#rvMZ8Or37eWI*EJSzD@)C^yi!fw{nPYtXE)uw z15nX;n}zcRi%k?~6!uYXIh`JlQ>WW!{fbiY0x=*6FhE`?GfqxUQ0!Q|1J>kN@E;u? zryf94zmLjBJ#s9XRR&&|A-`rTC=@=F#43wW6>S&){*Wi%3d%v1g?|l&aA9dVRrU^2 zp?ymIOKAWo;_h;{(`8|evZJDiTtk}0qTNOFWUQ)QNMjVzF$?rBN-p-T%i7vvy237Y z6JX@DeTc%E0fEnZKIAL(>Gyk~0E__A1SICkj{*oq6o~?L&jC!FO|c>2;velOpF0V< zXn|3yqb#G;9CnUqO9swKR?AyjUQTPPt0)(=opuYj2v*oX%K9$-u`pZ+1)2vQcdep+ znro-tpr77)`<=A7T1bma3#`phbXl|6$L%)!1^9{1&ds(RMULO=D1RNwh&-s*kUr=S z2dQ(~L9rS|y*izf)M<6V&rmo~%xU5dzbUP6AjNcz|v~0jNWf zVs7G|F|Xh$0W@mV46scA5&#dy@)WS?l=*E{UUvLL&~==`?PODnE~=K zKFR=^ld*u8Kn`Fb_YB7Xp0v~PBh7r-KSQ6T4|9OMx!jQ71$*hYK_`BfOXXF|H-D35 z**5uDy5(qWiSgw|nb&i?^FPM>xC`KL+NK=x4&KOzg6_S74)f>DTX3u*Q$agHpd9^S z8qhsGiXI9$C_B8D=ZoB%k4}%)l+ypRWk0Rane#PNT&P1Z(PSG3<~>r$AjD@pQ8N0SZ&TZOl`a?RNZFt|`hVZ?cVX;%|{ZG;(~# zx_(~4r?bz#(W(Ay-h$M&-;JO^8qd4`+-ux#`SzU~ZGx}lr5v(;^SYk<=vTZLwWipN z!_LV>aHR|I@Hc;RDIFeG(tr6cmr;D^I2iEve(zCQUw4N;mwxe!^>lPpiF^P4@7Jj8 z6!XzbOeER8V-^p{Rm{wqX?M2{bJx@5%QD_1?d|{_}s%V}cg5_DTP%^7N z77(v>n=dN=+eZO-xpRQ>!D31Q|GFh7LKTD!lxZjNP%K_baJ_CVzNqg71zrydEG}GP zf!a^U$9;gQBP@XS!X;q9j&`?w#ELYZ9^7R^sf44!GMkXs3+d7ZySe4J(zu6`Sy)Pg zE(&LFg>@Jv|M%FPW=FQbj{g)(ts$0T-IFGIH%f7-ovIuClw;k|X=&-XnI7JK$bBwQ zpIWAWj(933V>VA!u2ph!y$L`^E*)2oQS!8yLAgP;m{Gt^EPUPb_5Xa48kr(@T;nT) zvgF3Az>LaKrc}F7R%WSqFXA3jV=-MtDJ!$9ZaFG0%CXT1P>V(SDd2s8OfL4xSLH&* zLu)sn78D_r1{4iJC#}-vB|p_j>gf)*Ze6-;EG;gwZm|VgmjTFChG{_l0uARa7`rL@@GNUQ58y6ob#PWH96Kxtb_ z%gb0fhR6!(r&8=S`OxpyO<Lb z)}&)(P)f`A&EEwue2kfJ&U zP&EX|X;lDtL`5AZ$^q`lt8kJsS&@yA04T70nS(~QwGSxNhaO@LUFtr852w&tFnlz| za>EL|q7<|a+GQZ2vTR|EO@6Fc`jjvK%C&ku2{etZ+vB?MiEGxcfQ*p#YgpTK;aB^3 zfcz?9<)mD&E9_rE7cHCe$ioR%l@m;n`p`T0gEFEY$zKbkvIHS><&U6~G_xLB?N~39 zd>i+2U79l}c_agSEEHp7y3-nGUIH3e3F!Lx1X(hmAIH>{p@r3>@&{g|-yGLkvN?{* zlwZ`+2w5z>e9j!_^Mls}v&_^no`E~igZc!>qb|wsa~@Y7A!x}#*#Nj_-F+T@Q~nDO z$}=Iq&0Cs7J1wM?J(lNl@eDMveO?RD%(A$j%hc8l3IIXIoq0qZup+ZwS)H@@n3J$( z6BM@GEI;T&8IBk4573?OwmM{iG8y6ux?2~M`TP95-QFwRsZoq8Wq34`+lSKFHc~I^ z?K9Hca;Dam&rx57pwHN6g#i3AS_pX_p{~tF5Tn@`Nww}8~qw! z8ufbt;01B8!uj(*zd6gFUkFu=aMS4C38kak% zz00Bk;PJIiENIqm9i)Y8EQ}DS@Z?h|_}LM2kuE6kx>5jHHe&m+e+=No1m#rux+N#) zm}Hq;FVY1CE-3Kbp}?CrZm|%@bu1@nQA`hZPFNS<27sb=dmmsaw~R8hk}Au%HlWzw z`4vj*aEVoFN+17HtBz3s6oI#3Rh&+!vE~@9s&J;jnG{J(TjjJC@z3x$Elk@#b3JRS0yJm19?}Fa^mY1&3R5l40m(vZvh$_42Cr2l#-2vp% z#fCb^0-eVCj7mn1_9&%YoZB|*qh(1UtQ-KQ`1M&SE`(f_kCf z>!ziG(`vy7D35)>kSg%+^ikm{mmwM!sK|Bb(KTQ$kqz{k^?xh&jgti zc`E7!<&s}`&fjJq`Apw=AMeY!o|bTb1HMjeT=9B>PtZt!A(OongpAM?dc?bZuC<)j zMFKq)!7%dIHMaoGq+I|R%ooaD!3bUdly`QoF9Be$JK{2+gX^{j$*WZ&xnvs>Jj0NP z`~$q;enAaonqcJsSta0A;Jf3hM7?#BWe)(}Rqkgp$U6DF@?F_Dr-^kj3F-;t=w`0V z8zc~ViuTzy`Denrvfku1CAuy0idJUIjj$F()6Y`h-wQl{s{gT}y zP;61rF6b4&rE{TuZo9~TLj7!821Ie42D#A?bmNQT%-?eAC(w;%M1Ecm@YsHk9<*Ir z2&_7m0tD#drz+rLNvjJD!{PodWEU zcsAb0`00Q6;VPD0Yt|!}L8~rJ1^fLX7F_POIk9V2tKcsYOs>h!Mvcxip9!GXK@eWS z`|#l=&$PM!La7P@zu=FtuI!W7ISGt5R{N>3etIt06>XD${J_1i`$f8-z-veWCL9)H zy6&EJQ+eGQk}Vn$+_U7QbCE76a6y4@mjZ$dQ^2m#w3Eht+`mvrN99K>umCAoBsg!f zq{eCniyGE&C;RLQcb3w^3f3j3L*fSQ;#Nj1pT$ckm76GhSa|W=fVd=&9(2=ZpB=HO zsXCxVxvpU@=vgiT=8Q*6D3z=W^8#+!`tV$8ke{(sOtDbqy9y|(2Ho}4Gw1}=moF%T~=BL}K)DuKt(@ntFMcQ6K zd#lk8Rbo`ApbY>N`h5TpxB4X%>pE*K*w}8JqHF`O;!fM^^wP@4a=Nra%pvNyv555$ z^ekfSGyp((_%O4qQr*pzwK**_O3wdgpb2F}e`S34=+Q%$k?F?GH&Lum=;0NCr~!bh z7H!UN0&qhV$hAwC(h|VW0GbHI2!aW$Y2lzEt_z{cIYgnK03ih%$RBH&ZMJBYUCaM4@=INH2?IrwNw95ZZjLQ&PKWN~RF z6_;{=f0v2T!`S1u8t!Yx$-1(2JuR;;r=8CP>9B;uYO2eaINf^$z=%68ylgVonpRc< zo$6d#o}A)BYFs!KU@guBujG-#!$bN^S7pX2V`FR_9f4yQLLl@M5VeLi-`3TwP;?zj z2BGeD`t&zI3+hN5FdKw2zku@eZST=oCZot%MCUo0HQ{K zm5MrJb17~Mj1~FRp(9&Mvn+uWm;g`|1k1!?a!mJz0FZE98CN-DT1kf-`Htb?DLhbM z?1xJuJSi_Jdu*q1>y&Mad>FpN8<6R;?4|t{^8VlOcj6 z*5CR9kTBL9{utH9cdKt(29%-9ozl)uk2wrp83Ul$#tPPIQ_!vwn;+#e>uBaA>*%vW zAPr-5!4UGO>rwek`B!7!Fm9Dqj&@UDouPcrN`;Fj-;|%y*!r5U@?2T!%L;g(*9FMJ zdd)e`OC)9hpN@!(7IV6f23Ul#P##RgWSw=X?8EoQ zMzdqHxrh2g7p=b<4PEZx33{prINj|qHw~~vg@=_-&Y@8;>L)-dn5`vaSoT>@>Q!V; zEkaA1YCA)I&f5~-9o<~u^90_H_VG2^O=F_AK^X4ii{%8E1-*4wpCDs`T;xBWp&aKv z$1mh*JU%wFpa&o6A6qXCwJ^ha5gL__!kZN0>JA`Sy^QPyG#OFGUBtzMTuyrNz{2r?1+y)UdA1AGc3Gh z2xXxH)&-CizH((f-M@c_dkNqtMK02FDNragun9&9#469ccFDl~j;#u@MDh&&^Ah}; zI=lF>SJyQ6Wq zT^Xt*2A5|(!t;41j2HR%Lpdl7|o(r*QT(qCTS9u#&I@g_lVQC(7fX)UcU zt)_?T>*>Q^|0*4^@k;dlSP)MGoB%{)0WXk26;ZZT9Wi%fU7JUXQn|~sD7XHua$V|H zQR)cDHX}EX{a6{l;v_5RASd^=*>Q;%8$AVZFy#b5< z2|;~T7Rm(7pRmil`)D^5D|h|{qM}fqv~RV_YMxY)#~cO#fFVFAaB@I?S_Nrsro3() zx9HbwGg;-Ztwqz`K42DZh=2qxu2gOW7F9Y0|C$6g#=r~OT1QOOurMktcG1UaC%kpM zhgA_ES)O(jupnG2t*86|p2H1xvvDIG9zQ@iWA4BWPbGUujHH##rL^0Am^veZncLrN z29>d3$kRvCa}0n~oD#z*tj_?CP-=(7d+Qxy2?!WE8c27PLht1jZY?Slv!v1^gV|=S zMFd*s7E5&$$~V`qpnz6W3tE-n6G6cMb)c0%kN|E8+ApJQ_W%!01^_etPC8*Dei;oc zb6d)~z`mdg)B{>m{-xzLcn^N1oJ)W+!v&Pp1>mjmK`4Yp;^pO3yigLy=tK`60tB`O zsR$5RCxG)CSFUm(1lAnTW#AzI1jk;+4fGJ#M3*7jIp z*bn?uVJ~oyL9wc};xP4y+ottP1)7&AzmKs)d`CVE2gHd6ATUVLG~XfK+*0b`#(Q{# z`!wUmv7$9zltJ6;9C}rQE(^GK?>=OV;i6kQns)gqMzs&0LChOSztW% zkcoQnk_9d-oS#65IDor zl-3r~a0SavtdF?24}GQvk0+5w zAE3j4`+52?+`YLk^ikgCNja9_)!%6+$UG!wUKg1pKpz-@BdUnp zpqwHWg;i)`-#cTB0M7*gJNAB4`qzjjn*?JtS6R8LQ%jMe+b=8Dg>bh%o5oT>()Rwvk@EK>!dvxQr6_yi#HnhXs2$MAC z0+M{-ec>+1^#&UrQ|Ec=eTus@kXph^pYciC1&5JI9*!f;oh^O7s-926@Rxf4y^(Qa zXw23Yb0dHrMAPoL%0Y96Mw}EldD>$wf%aQUepBpQD?YqoN<21MGAAg!xxK z6*L?m%X+R0T%)kzFrblwXJI`C&-*SPBD^*CGs~}+MAMd+dkDu77G0V~7qRYYV9~`1 zFf(M+28g_C<|~o(VkeKU&!MjawZ5Zul-Djg;7fOohfEBJx937V zltOs>bxq3Yd5Q^W{^jERf&v#5_*N*;>pwuj!=k>+#v)+Pa0IXsOJU=%aMBUGy;`K4 zx=7|1K?MWEYw9nE-Nq_ozPqnmif%7s6faNLIzGbsp~K=?Wsmim>OdDodNRcdguFUf zd1$E>>yIa*;rVnxeuS$b;DY7H4m#H&h6fA(d2n|XS{JRuK1v#jcbDMN?sVIhvY?9# zVi&Cd0=hgztR0>YR}?B^%qat*YqjDU)2ijLlq#%Zg+rTl^;5YwewCIax>S^@hcZnW z=kBnrjb*>dlq^W&y0*-Tnaw2@;lx4kDjGOwq^WXx%rfCqw9TJH#U$`2|mw$+VgT{!1SMQazU zoF+iQ>YB>r`NTY@Xg{-zb)UAXP}|?z3pdrZOPoN4!Y6C(;9BW* z0b5Vz+uGU!klC7nbJ>ux96?Mi@eEuqaHmq}Gx(cj^q_1N&f}Bgw7qkWI`1=X1y(T} z;I`$(2650hRDd@07)P}#bS*Cjuqm%*6K&kE4ve0&go8vx~SVb(r99#60bi91& za(o^c)W3ZUh<1s*$e(|sDS!p}=pJ`E1GLahkr9kBXd4!r@S*u@(P*6(aIY+KC;=ov zU(5_3D)2&QNIjtA3Gyi{N*RZakDcuu_(Yunn^VfM+~93~Yjenea_PPIe$2S*rh9kq zhGJi*pVtBS97noSu5YYI{51hDb>(KgoUUzL#?`Zt?(gidUV~PQb*jMl(Tz@EQAqPu zrEK)sAIfB{55H1^CZ-}O5x~nDo;QO;Kzl$gp(+1o^C0(c7F}UOlzFu;|iSv zAcHlSf$8txXNTx47x2x=+Q#cYzlZR*10~G&K7H*b0w~_VtQz{ZwijTE*MnkKv$`}$ zYwzrVSkD;nY~3Hz#odm~tWps60PDc^Bg|JD!?gC+VJa^QEuVex^=SWVT}$MFe(7U& z45ORAhJ1J-9mv;(n&1#?tk*X|Bm*3eIR^Z6mnAkk*;)v^j!BuU;;5W<(R-0DC~!f6 z?*;{qPCrS9k8rIzU5Ul-ICqSkaiK5BK};SLN|hW>)vI$fxeHE}1C>qzBo#p8|@S=ditm11q?&AvdxgZv(TV7kioijUW)&;Tkk20yOUI`O`o{?Y+ON|^> zS7j|xiU3HgZ7~T!`9^h_twEHJ0s_)hB|6*@u_ki?9>5&R&gyy%cRH*kIJo5K9{Fln z;Y2qhoAt^RG+HpDfo>u$hg}yJm_KZrzgeEw1q=mn#v|NiXh9E9M5Vr<6&_YD0vr!U z4Ak)4z)K56!Kx}92`?bIG(aaOpLRvI2k zRZUm7Zl<^2`Z0BaA5H1GPv?jEPHF{}tYTJ9y&J{%*M5Eshtl zwrYioY4ibi3$AK0qWk7DmPl1Vo3Jt?pRg80|5;pICw3HZ@8DGdt2R9H>8GEfAGJd1 zGp^JY2OVgIm36EfX)RTt=?i#Scc9bGX}bH_UBEhG!Zii0u&Cf)?-a|AM-Q;b{20LN zFu*DU=5GK_86@BF?YKLf9y5lz=y${yBX5I**NNpMDCqny4;yDo86oJbRhQR1jKRSC z#+b7%@|ok_woJyjSYnBj!wPDORfgknUN2ol9k2F{WAF+th3n9D2T(}^2Y=T+v4I7j zu9V6E`zL@sVo6C44+q%a*EMu0#L1eULZ1$Ry*zXZ$g0c}^jyXYbP3nLrTRLX>o;IXT!{~|czHRjR?@_NaWVR8 z7NjxW9rI^i6n1Ws?_hppvw#3Zm_IYSTb}n&fcM^gH+Z+l*b*$(1x@$2u%2QKKevd= zalg-KiRC~4D}TRKqHOD6?4_s|51E&BdYt$eaS{b4;T>g`zbb!}RlbyA<|9Zi|2&&Q zmRk-yq}Djr%9i%?P_)b13TV$=*(O*bSs&eZaiv9OSO#;iv^LE_7kNTne^K&T zGRX3q-Dr21l~--q0E@SIlJ6bA6XscIs*9tI_nFM~KC6TE^LwZV)M1EKo%M6985>JW zC!ak&QRm0?n>kI{tqhcA4dn{=`m)WsP6o~RZXL8j(}ETwEabPA+QzQr3w>Z^tatQ2 zV3T#3r>vezQ&~>HH>+=4nE#pKz}{39eV6g*0SJ>B9c+7SpwV8f*h*THLJQ|Y``tO+x<#xxDi~b$kN@%g z^wCFa0al$Y=w&~fc?DOrDzos{%w%Y zzX4KS6mda;Z;S%9MnByHSm+$FgUli?$4QC>6x0F>T&+W#WVMX&B&}-%ZSTjaXj-wL zf1?=b<`g@kSZcYb?V$)xaGhAum9R*#`;t~dy7O^j7?v0UCy$7SV>~Ap>AHBiAa~Im zg*_waysC0#U7WZ5?7D9rFG}TB1xX*!4VceY{HJaES&i51rUGmpf?I+>_CZf8Gw#!3>vX@L?(ZL_-uO`}w(OfA*Ze)}^34uH1+7`qYwX{4(Z z;7XJe{cCFhq|jp@ot&obukWNQmvMKB0NljG0;C)O#KA!lCGL&Cv(ItinLpA_-ZcJK z9#Ct2>sne{#X3#ph&AXkE`6TXsO5{+C0&&*@;Crs5%Ai8hdfR42)EJu_qT(O_V*Nlug~atStboegg9i_BU*s?V;%2GnzJ23H zy7i;CQn$r=jW}xrsKeR;H(vCG;qW#f@LuqmeXjekfSr6AR-e>E>k+@pj|LUj>QKIJ zY}Et!Qf3_D_SYY}35F$>pfK)9#=a_G!DhPzFl>t;fnrFo<^KKq0mikVr`@)`u@-W~ zIN+DCMAFhvfU8X3bO1r()IQoLj|$i=F@^-~1FWUqJ>pzhKNaJUF=Y-uz}-?%OWItz zd?`4>v8d~>hd%5;E9H>jwt?*hJk}QLX^~2nr^K>DU+uKuGul)tB3FosC~(PWu#vp` z`lged46nX`m%nt0Fr$34iY&0IqhqXOXH|A=3n&e7O?^6fLpG-7*I%YCU*=xrjXV{n zvohYae3j4aH!V^Z*hp2DE3ZQi&MR`}J<4&gJx2VNbKZ;bB`5fLFVX?#I-0jh%dtKr4}%!yR~!-Krmt#)JbVRc3kw9vF=*9f_bx+VPA`IFG=U$^4q2L z$=$_t^yy0U+f;vlVyb}C!?ielbcvHYH{Qh(uqrzLvblYW_rv3! zp1S+@8)22G3$AXB->0P9c7pYF-bDX>YU@Q?E-3I_rU2*hvBQf?5lV9|M*#7(OzbP= zjbjrbp2Te^Sn4jcl`W?3x%S;U(3pfvW1F;WC-`X?yytPRG*N*WPR zI|6g#f~0~yVE5N`M6=N(W)vU}&lq%DSI=XtWmJOa0fIw8vOE^aTC!|#+TsA~9cI5& z9AMRoqL|0hZ9Kush-PY05EewpLL4x$2*c8hGIF?4Oo`Rh#RAGWL3g(Arw3Th_4`Mu z*P0S+8!HyuP(}%1ry5m-FWLHck>JlbBBo>Fwse^fhX9NtHbp1AjH~896`CTkNsQZ4 zh$v;*n8fb!s-ma3CaqRSDeN;FU3( z-W7Of@4A#2{eCjunPdW69Mdh^g}_g zMcO1^p~zC7-Mt6t!M#sG_OLLb>x=N}Ds-&XmY^CIEY#aEHPyulK4~nir@B^I0Dg0; zJlj6THC7N%@XhxkeZB^Ow6?sM-o5!hFm|q{kM>T8PuGV334{&{kBG%L<IXGY5XQuVxRp2ixdwf!@ZO1mv3AS!1B?+9AJewZ#V(yxH2OF#Yx z>GUVlbnpHSr(ynzlO)+h#hNS+Z zM`*~Dyy)a_I0@{yewx7eV^G69<5_R02rCrvl0ILcYcF5&6SbpWR_;tf?nd&uau|2u z6fUEHh=QhF_!S!aoM4G#&%B<=<#C3Vg?QmU|B&BP<@vnw-fa96H+u8q=g)p{F4Cf_ zjqLSgPGy^}Uei?#Edyy94_Vma&vjxt@K%f4Y4Gwh-t?g6YKm+8HMiM7@@$>Bnz^lY$=0m=|N-}oWh>6SR?PCjb@Sf$RXY>AT0=gd4 z^efW$@$*uaNqeC6oYFuheH{|*9s_2L07JV6H9!<5c>pXYv?;*@MPg&XpJ|T^947*; ziC)-Fq*;)a`(G1keD`~dei(Ji=>V{607z-yFu%q7H(Gh}l^>4p;Kl%d*xzle{Iuv> z*y0o|7R_g6$TwQ{HWhrM!8&$g?9z#IL#x{*rz_R~Ah34o5T*H)#l3)88Ku_Kwx;g*3fQp-RzVxF zRtTVpIU_(1mIa*?6oWP~j@b29f$~JT3c-ne#<)v8tlD(-av_}uZ0nw44FiY}N{ZE{ zg4wD8HyD5eEzXR4rBzpzpxjFYBOd?;qA8{>=uS?}OC^y7cSt|xCr^uWZal$CrH=bu z538_G@7zf{DBEo;n~Kn5Ow848%SAd*6|u${;gdNNpG9`{R{$&eSkqP5mF{U(E$9(| zmx`ZsL&-sLt6@n4Zpfq3)WyDifa1v->ICbkatXIE6o!Uwe5@0-5-0*N=E{Ijf`HIb z1yMfGtyQZEV>#(6W3E~+1x;MU)7BDsl(5zsa?)c5g?fRtScxF^xgplr1gTe%%8UAt zr}F}tr*ReVuR`22yup@N0pYe`En~%?oi*s&w}1F%^wphDx6|Ew53ouDkPEs(OP6}k zzKwOtE~ja}`PQYd04bJ^XiLUVRP=>O9#Xlh8K3FeCcsl2tCvdvNfM6UhOcS>S;jm& zDDDAp*%9(5fFnSSmPS?LT@`XQEWIwLAH4Ne`o-V=Ex_V&C}L$5eNY}zZU<@@09=^Q1%6-z@#T9){zpHpUr234B zHvOVoqWvc5rD7=9W4{UZXb}=8Hgb28t1<60sZ{^d|P6b+bwjW{jhx;Yy z?~pb7fd2Qi)ec~rZL~Lv)S+AgbX&sms={fGDCA@QImUI5)P2hvE1XVS#pU=%@4S~T zZvq}dOVx!6eTxVIK*1w{LmdVK+%`E4R6zUswVPNFz5y_a%V%n)BPpjOe?eNkV)Q5xu+SLZi3dX#tm6jKH7_N=BO)`JO({3(O{2QKO&lrx4ekWfo$c*ci z0UHt*+J4ah{|fc_UO(bu5721XbMk)8TI$%N{w4vg5p9U?XRc>I%H-^}uJ+RE5BJ$U zVu5(p1t4z9{s^>oaqTSO?pRu2kpWL-0_e<57aw1m0ugYXIB^1!nb?_S@zj@>&i3gC z?E6Sz0$9?0?#u&f3>AvQvrt&v!6rXtj|<>A@b22(&$@m* zI(n4$ak+ZqjcZ|D(!v5JTxC$^wJ>n0FX+^WUF0m*mN*bP&ch!tj`f{kRz1HBTmTM!Sz!&zOlRPEY-Av5!A39%qH=kO(N!% z9WpS$l0{dukgb$IU*4$@S@Ag0kn&2@%S$RQvDS&t+=6l>$Tz0VDo-^4tua8s1ouRh zd0j{aKm#}dfLW?6NB?9%N*4bMJDZ4oR>QqbD;rZ8kWJ7?AWD#|08p7JSk#dUj99~K z0ho$I?gId=0C-`Y4JyXd_7|3I06^m*7Ky~4T3`JEiW0J@^=+GG9bT1Y~X#mjhII7y#wv&1b*@!c}0-NxLZ39 zJQ*$nS){X|mGqGKDxq3J84FJ8+b@ zrTrrg55OOEdEpIuc_rE)_@HHr2}oAY8MD?9UEY>%Sn9((zhNFkpBL?7z0|h*(R-MY6R$?02YsIaF|g- zcYw|7msZk3qA9p1u8~ zg|9|teZX3I>M|ByVU6^-EtWPbiL#w*qAhNo*e2`ay|a3Gr{BX$cb12Mz95|7i800m zP{M+f?~=KzFn&eJF)PIu*}%|(G+QS}EuUMWeC|OHUBFrAM}eFw;I*BZ%r|IDkuYPF4XsT`6wJWRw83!W0(XTESAHc8Le1b{Ac zF3f&;Qpd;(`WO?@-#qa%JK6ckJ@ZciczyJdfdl2EXS15$_W5_g2I$NrH%$NZpMIKt z|Mz#fiQOqcu5Onju(3iC;4)bu0kw}lS^^ZS$8Y;6wWsWS^@wXF05bQtEMeCDGWCc8 zeoOvHmjPhnh}W0zsqZFnp4Z2MEi2bg2qMYOY%SZBWZm$&&tdok_(9Sdem=r8~7 zqf}+5`?0|M+!GfUz7z%Eu^}g9p4?-{{FxYWVUhP-!TP0ge|(eue~NY20C(zO=DED) zJ(P?uxd?ci;j8!{EjVeh3wWQ$VzpFf@rg@{B=Fz}tHW@HCf(TPR$LbL-c3OaV z8rg7sk`~rjVAf6nhqkfuI!+B@d+GmHn$#WB8VsWw~3bBgvsJogD_p<{m zO2qn_7H~^6m^+H?G~(R=5LN29yRC#m<05|QiE+q_*u~{Y^MXK}Ydy3~tzS0Q*V5H% zR{=|wQF0BWzMZB3KPn2StgH!B=hvQdGfQbF;+kiAMRjnQVHeS7)~mPp3XQ$ ziC!isc>rH15?VRA&?g^Qs;kg=~+ z%yh@v$I8pNLvtmbDFo#mkingJV>)3qQXsYxI7EO5-nYHIO@MY39zMI!*AgSz$Ft$W zM0v)X5=azqQW4@t{yzZ^Lm8OeFCY`{t0>dD;pNCL;<@lyMNHs9B~bv(a>5D%zs5r-A_?B62N5( z0ML5s|5wLqZal!E`?N_P=hBVq@1gML(!G1Xgof}Kd=_=0o^ANhQ2r>&X;|x?ZQM#Pp;WIdU4@^sr^3>M_AQ`b8GB8BFdi00Xn=qo;-Fg)_ex9t z4e2QJHITYs&=inmu4oz;Nq|OPdh_O+QI_MZMhqw+2Z5a~&)mItFUFwb()f1`z%J|M z0SuW%L&ne6Lkq*0UwO7n{=38#^F%v=H9;r&H7uPjUjaxerXRlj7SEtG4z8r(aRaQ& z@n-*F6(Z0FznY(Q(aKJKwf_a^bYs*t(C>m-jT~-^fP@jO9$6s(yG`hsR>*)cK=O60 z1$9wG8RzuGNmvz$7zwyaU+N6~P}y-u%fLk4i&qIu-@t8malzA3T}uIeaO!Hm*N(Vp zf}7S+w@K-4J5}gCNI@W5&{{r~rf z;*{Bi(ju;NZ}EHvCU^JT2yjEdaiJ?N6~`Ja33}0M*})&@C>%EL8%h}vlF5*;x}|R% zb6=EvPI(rxo%`hlpK@5tlb~(r;oK)!lKsZt*-dBfe^H6%9Ycoqs4w*YIw^C;%GXUbaF4h=3bIWD{dfU-xW+1im zjJzE49lyPx;fyi67rNa#n?~^5XbKmP}1>C4%=S&-?*JYZ2sEE;zu6B zA>Fj-*1|orYV(OFDFCk`7Bq9FdGgcqKRH<~E%liUM+oi?)?Mt-P>GC}*g+$zZqE^0<~>hLHI-lc1c3 zChY!lE!=SnC3bcJzPv6NC~wA;PQr%I~}YZd0T+6pJpjndi`K%jgl-QPVRwiFf~ zLr<;aerVY_)CCah7<9nZ)nck_31oSyBbL_0M==1ri||&DAoj-vPOht_hj&NBsp9lF zV*eO->8!k%Pav_trHm35LpkP{JaNJduq&v-;fH_k%)R^|7soSDm?^Sa%+ zL$?ff^!orVy1@yc=+Zgnnt|N4=uz2sk(|{{u*{DD34l5()1GXpyI@OqEOwmBY{Jfy z;D?LtA~adVqK9v+b#e!-H&YsyyJb98iwbqa1^ z^xcb+H1d{AQHlmU_jVN{>p70cf;MSk=F&5!JL=w{z1`tK>Jn3K|K#Jezjv3;1Ssv{f(M9Ykp5c1xGWSv zqX&KXDRJ5(ale|2Sd5hc#)v^?j3=}llo_7E#gU#5AP`Chzo8ym{Vzb`5duYq zb(OqOD(aRO9Ubom`q!%92s+sR(pE)Ei>DRFNRGva?xcclTBHPzoGD#iSs~sTUZ1$_ zX5&pa8Wn9pr?3>H2D|&a;SMPu3&1WhK5o4629}N2=m)^HSrUly8Bb(1c>ZUf-6i%E z9u8Q=T)%c5ODW>)=_*M_l^L9tgrw@SrDQ#u^mjdr*Zu>}irG$RH0K$OAMU(La+a){20B^6dto+872M91_2c z_;R*Oz@Sh9IK_f-g2m>T0~QK-04`j#b^U#8-Q{4#qV82E<%8q?8&BlvNqp_o7CdX) zzVQ;ivl+(x#h7qh2zm%cj~JI)kiF`)YHT*Zn{Fn=X|0SaWp4oRFX#iGQ(o!nE9fKe z<=89nxrE&Hx*(2my29lZ4tA`D7(1sYED9+y`(@5w^Y?Qe_?9mTNX2FmvQH~yExDYt zW86nOK3{joPR!}C`7@Vwvxe}SG(Wpuc(#cFcs+aZi@s~c_2VD!;mUZJ?%>Mv?7(tT zem2=p^Q6K!2gUOFR$Tr^|L7r|nbKeW8K8P58~SDdNbO0QRoq7l<(Z;z=9Vx0k^UNSa@7zE&u3+a{b~8f z1k7K18^ufa2hCIQ#f=nZDk4XA1Sq z0^#*;%0=xjDDc`+VC9V-GWaMhuhmm+=^F7)GN5J%prD*AVNo7qs zleDy|09_+MG|{Zb01pPS1;~oi=Qw3=iuIQ6h&`s?b|+I7+%bK4Gz1_3FeBJ=h3^#> z^mr1b?s6+#YTiko{vFCB?vDZnBg|#AE^!@L!2Qu+l9L!g1ihPw4fd?1XF0fsoJCF5= z_f;UGtM|wnH zbr!|~b#cfKu4KBCITuAoXHEay-+BF2B_gy4_0!F*SZJU?HR8a6A@$Jha*E4PfRtpZ z5@yaKIa9zY0gfSV`)v*^7!s#)48UT1B`w657E`TP!8Hs(sDGLoxVGg1A3AXwoWZGi z2GB_0313Gxw^qp)R~%wI0XcBU3#W(@oP6`p+17D6#x~1UinUiO8)9;8Kl)7%B2dSIn{8bS;i9M;X@7<01&MLyb4fu0fmO+ z7K$cE67+DDJ!+*Y)=We4@}y7|XiviX$?yD5x^n3kp++2BCKyOUp|uY#6d;X`Moc z(T%jUg}ZI%@1XNt$N-qhI=cWbH0vBngq7O?K=&!*WJv5VEg_{{8{RdLePeNoQNxwA zvbagSKV8EZ$MAE+{GlhST1Wvv>AJX9yOAc%T)Mh`IRLQXwOi@%5SLDX;Q)J;vSQ@B&;BD~#2Bc=2>XogvpA0A=?SOMAgZ zo>5^wYTYF+7rnZIncI)098vF^0qCgqWBiX?Zu+N5jd zT!zWn0Wu@oe1DO${zbKNUGHHAy0DfCO%9&GjTySZCaH?s@sPEvt>bIFTbBK7KMF`I zYjoRvDv53r_c8Q=`45aU<0NHrFQb38i(O7nQEEJ8W=op_MH~Z8FNy+bQ5+I*Xup%r7FeAIZ ziKILi01jyqV*=Lhko&qc`TwxKY z`zZbO(urq!xHIqGUISbc)c$-P*5w!=-9B7I#%N8|dcbZva%tfT!Fh@6HsHu^T?rc- zoO*;+pS<%p!5ee=^7z_Ieh}yqpzaO-yX^rgKIuHy~a(ssQLv3 zzBd$T6F1QKF9iUEBY>xBv5j>Ii{S~1r-}t9bWMp>lg79sv-B^q2~=V8U|jeto%HUb zY~yk$5TP=I>xckAg&pOBf$wqK(ptjh`bbMDKoMQjy1gSdA~~838CXEEPRPd`%KRb< z>mtheF~QAsHHvs71P{{;kH_}hLh zU?LFr*+OQYUH@vOCyE3(k}aJ;^1kz*Yb;inMW%BOp1JG!L=tdHCRjlRpU{jOE|@#%g-| z<~x89#9JbOzY6mJU}%cf()H_DFderzNa5Fn{&LpO>J)Q$6nCb~8C}kMTcrPf3=pM- zl&8QCXom{o&dyFKkV^nLW-S;fTdwlGPQUBc=&6Dw6ez8kTAbpkTdUSqDppP0==K4G zqODxNd6U5B1Wy+v>p)LW|7!wv^$85W^N_VsWeK;YmGs%2&(b|ksvP4gnqv%U{bB4a zmDf~gg%#N$F}NO3zx{mz%Xj(!5A}3?of8KyUr$X=aIBS@jJpxMSx8&gi4)p;AjOG) z#z77BYiY511*=Tfavm%Z-6?omN_|d1&0O~c3A89ARLC+1TNv{Tkhei#_adN(haMR7 zCdarCpp`bp!s(&3?;YYXfz0^f+wbDu{03H>9Be_L_`Dr+rb}Y&1t`@cj#shVNLN;W zM7`GWywGj~IybKpORERSHUe_Bz>UnzOcnB{e14~oJJiuVJu#q}P$Q0IWXV-)oN1d%S#XXg> zJD$4qioTpx#J|ZR95Uq6he+g>7aLr7S)Jn#?nVbOq)j1M_pfY&r#; z*Fx1zj_*~1HnN)5pl6e^&$&+d)!_gcK{Mynj8C4>AONo+3zwoN&M>j77_S2u6!Ez? z-j=&V=cN$><;s;Fi-T=C*`2}j?s~av)Rhim|B+MLEPpf-)%ylAez|JA+Plvu=c_IA zHOgJEqxFbmx&f}1-(bPSB=mlyMJpdk75a*IGKrz6uBTiLZs~rGHzNFbS zeLDoCeDrrNhTOe4znmzXx*gtGPRHAr&l7LKHYf^OeD~OUuPHBTOM|q0^Kj+@i!K4b zR2JE3_MiiJ;&Z%nZp&sDDbt-QYt@5p2Oq5jz!z3sGg`gq@$chrbE^s$5f`A>_pygB z+Vnb7;74!$&j47sKM*9mQt#kmhr-B7P6+^NG)B2(QC#LEvVovf9`_#6$xwgT)t0y5d(17 z(;ik}1AqgMQy0W>gJQlbuT|K>60#fJe80C3STx2ON6<--gbbOqvPCS?m;cHd!6(a8 z_W16#=gA0V>2xgl)QPM#Tl+()IZ_?=?1~1H^gGAN6eoN;Kqpx6LC|PmI1q1 zL>sHB(Wqeo1&ws6imGXCrQ09sw;&DWsC4vE-aQ4=BtYoy{OZ~Y!I&=-l$?2sf1Xt5 zX_KQ4eZ>y_2w);C;Mj?8Ky&A6W29-BBPeJ&f=x@rlS(ZW*glOpb(24HvH z-fa;di8;Q6AqNA^$?`6 zAIY3Qs&xW2T5kD6^u6y~ldnv15+d~wnDU<8-Q5V3E@iYr+vmG3FoG=pt}9|xjOUHx zCAia~>=ENaz(#)A2QV{e^zq?wfI#xU@waTdCl?CPS)Krq&pBq6aBuV!)BD@^(#GnW zwdT$%JW4Uy@5jOOCCLJ?{JvJuhQ-#`T~MoUb~sru3lp7HQ`aL)UZ;To>tN! zitn`8;>61_R!mpZA#S!u0JhEM3SjL9mUlFV7&pR%!4g3fV|Z!Ngqszh9K#U6kMUY; z(#Kfj0q7O6D7kraJ>qqJdgoKdc;<>LNcQl-!=P0ei6g_~bklcYz)`Eu<(8`twWNBYd`totCRr$m{`1zn-_v-Wv6A_T67ofb3&Pgq&S z+K{j$@b1AeR*YC~e%&b~_J;JU>&gB;qQ(nepJ6pU(X#4+}C1EV{Yob6PC-0UKTa)bpHPD+5^QECt~8 z)1Tf+zb0;vf#m5UYq8>bVa3u69}~E18SY%+NmYh0Bbp87AQts^)xP>rprHh zgp!0n#cedtV#Dk0-Nkh9$#UvrtvRRd^Xg!3gD!DAw0e4(MzZ7#oOv->^WdN_$o5!hnxy_^sC(yILQh{rHrN|AjOUBj$&mIZUKCag?tJ3 zJS=hfy@iXDJEtzvO}gB<$k(Mn(8fUJT`Wtq2Fery3N7wS%Jv>7aLy6Pp2Cn5R|#rqC$3 z#`;>PS>3Foec&}gFW*%V`GaDs(nlZjtg)et4<~l%IV z2vDqxB_LS>C}syG=|XDY{5JjKX@e$#y_+|0#&|FuR}Y1nXz}UoTQ_k3ZKXQ^SVu=E z#4#h58-eLmQUxwcSWp%LIM>%V_-#Mk`RucF)ZHVH`%-%A&3^=+0H93(aIi?3wpHE% zMxp5#06SmnQ%BsB_&n|74q1OAEwg^>_X*>H)ny{^y6v6~$*8BiV?42ZoNmV@ehfYa zSktv{%sQt4n6-2NL0Z4G9s&8o!jSf>Xv>R&2gasbYQ9Z~ku4Bitm6RF`9n9LHpb#R zisO$!J9QXwIvP%e5;8(Ti1fi!LYH|CwqO)yJf`=ceL7kS_s=12q$j(aELtz8jU|9G z02o~wCxEHd3f7bKE$|Kf1rXLpo#)_#>&rik!zp@=4wj>vTw4SD0!RU*E<7CUnvyP@Gxh`+nzOc;tf;#)Nn@7V@ zcG9bTVKQc;aFlb|oIj#W#8CiV+uKcugo_YXAWmY<=9qw`#OcwJ!Y8O?waUpqy5#Zl zo&!!;^SN%fKrF8#et)tn=o4Qh*?t|~2B!ajV8$lM18|)MO|lLiOu=s+X`W;N#-Sqg9LoVcy zGwPQ~K}UhOaytHODYYN4%lIW9jqIZno+;?eHQPA9F90$c*Tq%x0p%@RX{6;hi30)e z#C_)hU(yd*bMVnxxJ{0XE9R~U`99A3nc{z&zP@PD^xH&-i?&`+;JZYDwY32*O$#h= zu}Z47u+GBDwV|9t`Og_wl|?g_yG5+K3>8#B;qDIr^Y_Q8w#eqtDr>niRx89%svt8* z#9x}AM40g577%vj+9EBz} z&|R`4sQDj21fW~%1Xn_roJ;@$4k`@=6c=Z!qS_i0ixvio_qp6r379KOnKH&bJnt(M zgfpGubAnvDLwWL>7EvyOwImZriLjtPr=rKp`OZxuR^R|0o%G*6_;2a&|NGyk4M2d^ z6=L920H9Eu!b|`n>ISF|Ps^hXS~_s1B!F5L*j-o17Wu3}gC)SLRtq;o{&|wz{zE_( zCYAvkIQbc%=*+c>WsvpN+U(luHR`Wh9o8-=ZCQI*(+GaWHw*si$$l`YlvQy9GvAEb zG{^pw{`?#1jv7k{Ehr4CjKv(*P?4}Akhe0`K_%9>U~Y=5VE+H?z1fdrNs^!I++$xe zBiG7WyQgPbjk%4v&& zZ@j5+uCCoaCv$et+L;G{+HPTDixyCY`mqkfN11g`d_Si{u6y#vS6B8;me3-*mwxug zKS#3)Q0cqhVOE3;({Q~5g{0Z=t053SNY7t9MivUtiEaanioenuvBmZ>3#AS7;5wV>(h#AbD5dm3&##w2_Z& z4#1N9K}bfZb)#=EM4r^ALLOPW-v^*3z^GOtztKvoaFXQq?q=Hf%^L0fZu;5JehQ$y zhRHJ8eE9WRS$GT(wt%Jxnr2P=fxsugm35-;E7G*B_`m;$=hC9?hCr7e+f;sB!e3tD zK_{q{Za_U-TnS5_Ljc270QD+uVGeD}20(!tliePc1b2}Gw${@(XwP+d@8RO->EZIv z(#9I6;JpSkpIuCCz}WD$O8c~j0oja9rTV5vvUN*I-O+=v$0_b5XFK=%u7X zECGMPTKk$m&4coISWLxs1AmnN?PD^kO!^HptM%8Z=I%C!rU<+pu&LCw!b!`|>3Q-Z zdfw)?J2AE~mf!+hr;ebzFSq>>>q1u086K~ zg(LsdS}E)J^m@JoXu%xFcvjJI zG2m4T=h{64bDeM3I5lqxA2<{E$XrA1KkeXL9MGzcFQO7Aj%9?PSa|cTRZ$CoYjzgr z+EV}(J1p=41(d`bJ)LiAXp2uISp&pZLP@qO}a`-k+hil)c}C-FU6PD@YNd1j#=Ae#$R1gy|{;4)5Z z!~wZ9#=HU;q`$K+ri*(z*Cw4*0z6}JKYg<6QJx2DP2kW|_q-R3KvlK;@5q z`m->n{QB#!F%Q*8k>XV`X&n{x$`=JMLwjuCZSN~wQw4xg2uKJII`g+|3MQVsIf-!F z!kK%iLOqv}0T8-e17xs8Z3oS36(U$SZ3}Ca(KggvXCh5uzSc#9smuL}W|0WFa5vS< zlZhj6&{Du`ZhALu;=?Jlz>I+@sA~uszyPKBQhL2_ja7(Khv%O?`Xv1WG|Hx?*|=Il zyKD6+8b`D-gk9Us%U9p0`wy10e>P0=;&eWEKaJ+Z^5QaPn{PtXPjg($ZJmU95qwc_ zT1K#c^4X{O#sS=Vvk^Y9RvzEyl+xGf#Y@aG(VV)o_*rOq?BZ97V;XSSZ=0~*5X4%Z z5V`}1T1uaO{y05Z_<8E=v1S8ED21A!4zNR4in7th1&p{+Zq5NFQ$JoSNek6S z_|xSiNciCUHOB@_7DJ$?p+>pLfGh~qyLq{GB3(%ge{%w(IYLxr|1#e2O;)7D7qr32 z`XucwaoS|U>Hh&*r)arN0m^k|01p^lC2raB{d{>a~?+qGS3j>@%pdhnEOAe+Ro z#-p7Vnv+?ZHL5Dcst$)4>|k2AfiFAhTm;ybjR35747o`tqH5OsOB>|86Bf`bjq{ z;G5TX#*u^vl#e$EF*PzCNdu~c-=I`l>(%lfq0nCJ^W}m>V{8C$3TUo!_>0g9?I+Hg6?wXvZggA-tZk)RK-~QWuPGMY6pMACo0LCB$ z(Dmw7;m_-sW11cQ^rvg6)Ll;4#bh5pHO(2cc*Z7;$2=*nTwJKFBH+{E$mfqf(A4a- zInb2>G4|0cG`Iwe5y)!?bHK6Jzq*SJ|8!bHt7l>fza@jlihJ9oqyA<4HDMIdhXHO_)L(J-3>9hj z(GUPU3joZ~ndA-u%eP-l9XWeH1e81kJT&hg1ME5~PrvQ-04)RfdN!S=?y~#N$+9}R zm+lVry#919%xA+)@*4a&%I_n*aB-knL!m1C5f*V=q%j9RNDlnvU;b|d8+{NhqICj!r11e0Sda>LIOi{2wa;6&t;KNPY|KQ@3!SJOy+2DK3O4gflOn9Njk zKy)f-R1eiF;Bl8PdPu$H>Pye-CF8WQ=6C?%P+*LpCRW%c8*ell10-OTu zn$R2-6TM8qAQd?$n}b?nu`Y|p$`lH$YC-_b03LeUqk?g%RAWw|9s$#KHa!6m7^AWb zU~~^ZP5@tyxzhvV^8f!hCR_FM9m_k@qBQM&eS_y_IxEGe-yujI*XuLG>mDUAd+4JPdNlsv#{{#RNhxH1c_1Wl; zyPMUtw}~IX`ZB^M+FZRE+CKjFl#m6m5JYqE_fh8kdpG+Mwp9Dl{-DO)d8Ja6*WMH$whnwu=^6u zw6B!F2Ob&T4>Yj&zW*-pz}ZlEIVggs&)4SD+dH~0ep&=Fv3(;vF} zl%uCO?z~T7=hbw2^Xt3N2yr8}t6Kq`7|~bJ3f(3DGTNBIBe#=ipE}3)G}0mju!a!z z&H{W1|0ghL2j5(pJ67<4F}294V;k&RTz1nyKP{LeQV@zW#4zr$9W3S-M9;Renv^4}MG@t9V)E49P zMdYN$L@mt#Kpc1i8f!!s?G4YhCp8z^f>us7Y6Ijv5I5afcF}r@&Jbo@d@^~)!6<+k zYuh+F-UnDCzNbp+B)bBzlH&wCcP1T^jU9i!rN>2ZQu073TFyx?)F=Dcf6y#Bwp|?3vHU#xj`j>XWel@9`O$M z*%Iux0p7x_PTonU0$0Q}a>U5#3iK(=G|+OxOFcK$SVFr8Ge|YRnp@h>Styr059Z!(gFm)4EOiH z{d+XAUW8ygd6%_aXs1CAX-xv=sdZ=@)TH#y8xHl6ezc6d@2Q^xl4@?a`LndN7=rpC z8YVUTUxokE{T;NHFe%Muyr(K^;OAhQ zFK&CUXsbp0EuIzM0Rlto^Bc$+D)m+2HPbuQ6^m$>e%qSVNRv-$diFdu7T)XU z&P2Ol7CTKgc2Q&16tso!vThp<5%RZR_VA&W*R;oN?$S(u#0b`vK@i-27iPo*e2nM( zwjajW7Cv{uX#i{ly5~52V}EZme9=u$XK~#zKyyCF9Rb-*e6BUwz@5b>oPAKhT(e}` zihaBY)}H;D+tIwI@iFM3Aw_>Oo^8sy&F#!8u&**+a8H0bj~9TcjNLk8x890(eBbgE z?bhE%!SMr{ZBvFg!4Q5}|AwM14}CXckT!%H&$055Yrh4O1284wLgUQwUjD7EZqeTk zY1d0>c6k9{9T30%lJYUvT{FlVX|RuJ(&=~~JQ+w&f02$8#?7T0csS%zzB;xU&Wmqa z=41V3&}}zB$2|a1fgLv}dgMFMLnAHC?y009x&C;!tRqj5tkS1#n*yh{lh8zk%0t?` zdoQ6?2w8`RXfI-3?mT0Auzo$!I2tYQg~k^AKg})Mml}2k51ctZbSAf5`(Exr5hec- z+=6cr)a)t%2Kap@*q8q|4x|74&!5xS`nynPVvTPcaR7d)Nh05_9azB5J=#utoS@ml ztZy7na=^(?`(=Ga%|6%x@Dhj}jo~HC7WO#VX%8QAc*=}=YNN3fdZQzzagv(1@QeCy zhPya52ZkM`x0}Bomq6UG&2+{|U!mDzR{=+;$R2pjvS_o^_ zk6J7WUbe5yrwqEyPIEv3+C@z?l79GZ6>ST(BXb$79|6;!eDy_o@+l`b?y>2%#k!L6 zPM{T%JHj>FI&YsBY*$E^L2tAP){JWL2qyP6Xn^e~Xq4ygU$E`kciU*T z7U0%pRNC*p4erQNM#E*{#fq5=a!FhFgY0yr3Wbmr1l=GY}DP(sVslWaZEV^mPa z1O;0Jc+5A-;!PUZ(Y3!v3A`g<*8m7Usg*R!%TzZV)x%B%qa8O5ntrdXZLnd0W+G{; zG3dtoi|1&7-g!uW!;*tjiEk3@D;{)FMoZL94f}`nd($9*=HVrw(K&25AYZjD<-4a* z>PPK>Ii$r{6q`*qCEzW-@A#u>;trc^YAb5O?YaZfV|bym$X#$-Adz(i4_bmm>70ze5|p&QeX&yshV)h=Hk_ zrXBju*k7AShYv-l-3@!k0h-FR8~tJ(pq(VW()q5!`wDDb$d96JdZ*(K>4L@%+7ev&*SDiKFTcAX*4hm{82GAUsU6z+Y#f4RC~0m~a+c zMl*{YdW^c#bagtd;e)D+08(aAt3cxL8va-?o1C4fq&p8E;G5=NdS3bg=ejRLQ0U-D ze+S^F+uDu=s_8m!51Y{}gMvY^fQ>Q@EI7?iI7+x-b$cU~F#)O|P|e{7=m7JlIGIZ+ zAAy18FrVX)wFZtC#|qr#lG-L{y7BI`hdBX!=x?YEaHbENSn%pZ7@D^>6}s4o?gJYRXC?JlFCfw^dUjY%1Pc*^$y=HS_Sk;7g!2P|;MA#KpZdMBM{?P?myk3ak$wKTK0|Q(+3F z@L>E9La5Ked9e2tz|=r9$hFw1hYo-Ed$cK(Sb|&uMo|{Da;$;?V?)T%v{4~~Hj9Q_ zqCKl!)H}o#t$L@06+jqL_t(1OZez|lfM7%DdxP3>GMxNMS!26ts+F#W-u38fKQku!ha7% zGU%keis_|hkY(Z}POJy=if5L|SUp^@w7bpp^*6sxFJ6C3{p_S#3sX=XO{0DE@BoKE zc)!6yTA9OQ;B_@^tm0>9Mqwk0F`h|5|+zJGDZW0UX5 zPk}AvPRo#k_n`$!!|8fF+vi&6>j<@eU!1|EPvxl~X`6mvi___N^b3Af0c*>&z3T?+ zsaOl=5rr--{7XY4p zQ>`Smyad9EdX|@wFG&_@88^yH{(_f|pMn_qzz{)J!9a4rpH~?-EnNX}$BSXX+t8!< zj7}z?G=2{$`_}4S`k}E-`qO~--a@M*>+a3A5su0tv51oUt`{h_FJ4{K(Rc~qeFWshiP-3 zt2`(~TsZ2Rb`oGPaj6R4r>#a#*ZkwaqzR;Ejh}tSE+MC^+*bIg%^GjYBqrn<4re`q zPp~nMa5qi#BYL0yQ=QP)g54)~rprnvc%xq`0WQbhf~Pum*TLygDly@jEFJRb=4Gkh z{CNqli7otZZGO)}4oChS{9?rjck`sjDcm9l@*kC;hQNY~#pSuuIX6VE>!Yuynr22!bURy^IAVHmEQn@?mvGKLffM{n zeL6G;$i!?2E_YqBda@e}SNze`8=>5b_ZRm?kNpAFlJ~^qXdVIz4~q_llDERtOYfisLWhkZA}!T)Pz0 zxv@M76BQzN;7=Cj(KrL&m;{_TY@=xa@Z-B`ZoGQ^ibGXCPc!;fQb+~=F?UaH?7*KW z7iC6l1H{O_Tv`YKJwUG#wB!iVLw!@vtbAgjFOU(Uy8?m#C}aKz(rv{d0*%n#JJx(~ zzS=`0Zl(cPhj2YH!CDgmISUL@b)8VWXG#1N^sr8&JW6be4n5XuEwvMoSjuHK^sr|O z@QybMfz5A0f-2^8jR{;=%%EWfxTI)lyLa){Ryz?huwDm;-?V`#+UC6#9L{eareFQv zzfa#i{T{Q#c}_{Zn>7d!gy!ZJ!-2P^P$7JimK^sayP8@Bt^ujftTh=m679B)Cc-u+ z2)=rQCZ1Y-0A!O@POt-n`0UA_knbnl=j7~m9Uo4Il*mpcf?h_rm{=|v1KuMy;@Yc@4P>Hy?D5R^7;z;=hc+y~(7 zMb``RAlI2~CsUjI;BksaYeMUiBB{001O!(LtHGGC2e9$t)$8b&(`B^VVmy-`fg!=9 zEXGHQX3`H)m;Ik|NdQ|j#u{xp*YgnGL4$XL))?%H)Vw#4_Q3UVjQ$nR-0V0CM`<3v z@3Z45_ATR6c;+LO_XgfRO6K?i8rA&aMvS$TW)H{pfA_b)Va}RMD|a8zwguV!&mU2) zqZD(>nSAFGQ_c6hIxiGrwrw+F3)tB}2=XP`Y=4f8YUy+My!49u@n*m5F>}G9v(#{h zF;cw6I@CQtP(l8|(ag3X?-jX_CVBWTSaGr%jw9XjmHDY|+#rspJZy84U>E1Sc@%MsVOMb6+j#vB-P;3gcXO+NvQVyBy3>>fhU%QY6W4c1`@Ljp`)7J7rhIZy#tAbuJ_m?rrd=wcE!Y38Nw~JFi3(Zj+8#S#NH55YEFk#xcJ|j|oXCGk) zn`lNb_8zbFGhL(RC-?!^cpbqw(!cn9=_{%E;5Vy!X;QPm9p}mZZneCTA06sQAg~4! zHaUeq7_~Rxi~ea$M`!loSYBc&kLGj&7iwn+M(Mw(!>N)=5aH8?@9;GSiPpcuMhT+J z;a}5e)*Wja_`Z!9YxopGu&})-HiXs#S^2z0872Uf?kwNoxtbmL{*e!sOLC;?n!)!Y z9Z!TLJ!z6|p*`D#!mL^%hkVosQFvC-59-KOp>LS_m&K(IQ%3)6-}&V&dzH+FJL&PNFrB zQoCqy$tB%_gWvt|H2v=B?>N0~gE~+%4^Uycmmb}@pYGrL8P)=TYBoRmR3Q&*mK0!W zaPoRgJmwobf)?x6Z-ae2S}orJyh=MQPOTc-U4Y*M+4^37HNQXb=*PLFeyv;USife& zs3^)Sz>%M4{;M@2I2CK+D^hD;vkqUqdKLN4&d;SPpjQKbW{VtTU_aiWUYoQfYpme! z6|*^tZW@tL|BfcIYTd|u!N{lw8i_urq^mxG4wKqb)*I#4dA-1aS}VR2e8?3S@8Zu+jH5Z|9P>OOwyyJ1tW)|#=k}$)uB?e@+4N&4S>Z2^bOwX*`%H*(+fUS7 z^@RY~h#daWWEyc}qhv6Ft6pa(_f=G*nQ&qU?dBR@GO2|M`B=m2siJ}WqK z95rZW+O^%C8WHqWwfc8UY3qAVuTjftyUwm_CC%L5Nq7JF4JQ@4BRFa<*NDgH*Q9^F z`TOO_`}ow#Yos-P<*jqT9ed42=2moK9Amz_elp?GX;)*lcd4X-u<1xN%)LFeim5<4!8$QDf4R0QENocvZ&nEvjBK zb$lUVq7lLsi{B}P{N=~9>F598NxJuN0zXo}XYRv%0s+ItvA_!u2p|0(VZOn9c<5mz zfL|`&i}6kp1b+PO!n2N~Bgj&K7oPa2z>?RoCl|9t&AF&mxKd~-(k+0S;`dQp;<&4; zu&86-5XyXL2EmU3rW7xY@ErmtfX|_dSx4y&NsDqSJnH)?7UM`L`s7l>rh`wO4qE4p z*#?IU)X=cP$dVKEJS9!gO94PZ*WKt{Hc5yLa07^_VA&6fG@4u002(!@E9<8Qb3mQr z!$``UHDCPB;*Ui~2vEr2pPMcQ7yjn6hX5TKO{N9ieu7uTk%t@Gf?}Q6#F5uWaUzxL z|B*M+rSCJ_fDZo^T9tQ0aFxk?%uh$mayo#4!7O<;1t3?iBfw*(*X#QS6NH`YK28s2 zIbpGJ7a(doyFLvNX;ZcI$>Xom-0a;rb*C+0X`gc}6hyWi?xKBp$olNX z^XF+Ehw&?m%P5p_N-Obh63k1WDw})C>b&`2fd#r7L*PaWYt7EXIOR3L^#6f#+7Z{jAI9vS1A70N)a{@Y+w5 zjo%jso=XdLdl-mU&bxR=en?YpfW_Tm-%vqZ=NUD)(%NewnDu4@z*vt=B&Q;U;nB#wt{*!)$3K5{H&9u_Y zJ&$Uo!)k&w~OKX?L<#LiZs4>UpU}oq(;|Y{ncN^V)*|3`$rCMop|*H;t|RoBRwd{yf5^R z=@@fh%z+Ps1Jm;@gljZ7CRqqW@*YCH$CkUuEHhulvM;WA$ah|d_YmxA2=hAj<%GcU zCSVAH5`fI>HC$4B_YBjSEgS&1()^vNG>rvTsexal+A1dr)&TKmFo{CzfK2ug@Y~yH zk|0I9;C5%V4^5C^G3hd|a=nkk=FQEGG=W3myZ|)@i`>vcE8=FppZrzO%{q5Qw z&9#EK0*G2&O-`6A;b^OZ1;oqOFVgCZN?KgH7d&sfIRenQa(^YwVN$BRV$j+77Kah& zP(Hv56ECvnLbDI!>Hxn4ZGvSAeP!}PMT!S~f1|`v!%Ndnk4srI-Vp_Pf7{KEu z!o9q$AUx`;i2ngvN{8B3I(pYh^8CUAU}*+Ct?wvs(3DFam#2gc(-eFpZ+#6#ByHwOWRp0*SgC(cn7ZQl-0uzq2l;Gz+QP=6t^c1bPk( zXyG_{vay)%E4VTB^K?Lg zRN%NECBOJ%cqXa8?-Ai~@{VrDQjgB25MFrm@8hv8yl zA@_}$0X%cg{bT6MCvGaHS-K;?S(Z*V2K6VR4*e13%YTMGzw{=2kcJcC^zVU($NU_) z=_L=^0IG-dQ7|FeVd0Z5bFk=Bj=`4KEd2O1V&sa9IXH8K2l66jpxiIKQ=}6wd*dyH zqKuiOlgu#xjBDj`S#IYo!QI&Of*#{Ar1FSOD@#jM$5v^s3z%ujd5Y=wNBfK$YBlC+ z5Wy(@aq;^3^>5r5@Un2f{`FG&C;#Mogt@$fbe!4Mu4GsZ8~D3-QK@Ib>TgJ0Es+l) z_Q|$zWs02$>QgRw*$u$tZX8Z>VD8>ds?WOPMi)BS-cLpx_+@9zT8aZV2QE(ljMSf~ z^Y*j3G`+&{yqJ|l?;MDK8<#G!wx4ktBM$11M!VZcc$~&tc!r-H7N#M@yfq5O36D82=D^3!fk$`obKyFIPZe{n&L+FvRWybWVjHad zE8%}KC?KFD-bjF+eo=#*Dn)a#Jgb!{PB7UQMg1@KjT50+4&unJS! zXf93T!=-{RkiCQ>^ufS$$7r=$1UJqLy2#7`6hIO?&eaA~GnRLFw=DP;%+GI|ubLJv zHPl`!vycxhHhzb{70Orv45_hbfA+(->D#A2gmyxmllt!WR-p59R`h5}geDeXU1=_r z;J4LV%qQklEbhHFXlB<@s{!!E-R>%`78*?eI@~$C@M!^Vbp>P8f>42ke{LjuYStX! zh=7*!k?9s1A_^a-R|ia(L&K%asedYINuXXtQG=-UCt@4m^O{YhT&9uI{hGx&3^omn3w?AJ^i}&u~{Tkqm##g89 zPST!d(1tAG)2U}W#i?`#)ilWgWWv|j=Jp#jxZ3DB0s_G52)0h6$~lWxoAQF-T;xo(3Fg?>8det- zp!S6*3f$#}Ply!g0dIIYP@Q_N397CB95YN$O~y{pA&}V?Y&`RGl_EF zTBBW%eqgY)kqpi=Coq1M%af6&Y1miJ3$`_VYK1o+(kih5pk`DFPw^gPaiYPVN`$AR z%|F__-`5RY>%h+l!FE*~3~@F(9ATU~z(3|b2c6J?m`j*D+f6n+m|OX%t?Xxk4f8sH zwsmvJ2c>E>d>z#p|JeMh;Rw;WsAnG$Y}aJGpP^%eNXAMB{;yptRbyQxj=qsqS9vkXB#R3a5?sQfIzg{ z7{A0ric+5?_?FNTJIL9@uYc!!g0G+E`|)NcTIns6?-r7HKR9xGRH)rGxwxNZAMWOf z@K)R8DLNT!4x5#TWNw* z2oC|0)aELJbRFRPYkLa~DdMOByN7V@{o7u`n4aV8*7kOo1-g4YT=*)eyZqZlxnRN+ zg7tL>)g7I;mN{A;Ev|h4y5&2|=_gYM#zF7Zf{hj-7opr!5UU7CdjhA>8;dCCPUt@` z*mjg_<&zqCeZ{Nc(OE||58(ME)S{u*0lb`@MZ0NkA+2q#rX9*ApfQi@0eNVd8UT^A zoI0wxDI#J7%UzIU%l-D7Z_=xmFX=mgb(n|Bw8@DXw7Whd5BLvn2YEG|xZAm_hQk6{ zK?Zr_37#!ZE9(S^1ZmtWP8Rj~V35Wmkb?)-{ksp@{8q3x{QKFyPtkpwG@j;(V6}Z- z?Jvg@$B}UubKq@oz)@Rp`sw%I;k*1f<6$)|&Q5^6*V6tjCmYi4J;)>uePR9(3_bGg z2srKgj#+A!PR-zN+QVS*shNSVw?XmcX$FwRSnfz;U$XBywrjeWHFz`F7$6sa^us6i z>6q!9)Z)`o+c!$bh%_)L*;k#TCgG3%ES(4f7-k$Bh;}ZKHwL^Mhw61uXSd~U7@(Pn zx0GJI;IM1_LVRpNJNmt~Nt__E_`$&}E#YG;;=3yktaR66{?n~gn|3kvR_DlEPV(@? z#Ob?ec`XW2-%@=>?=PkGZx+)5PU=VM$?uNyp>*<%iz;eZRj>?4caUW8oNO!esr%(f=ees4d<7-yu8i_HM4TxbFUmDm)Tuj6ZI z89z=xq_sAHMhn4?amrAy9iZLP4$x(HZHE&e@lk}6b9ZU8Nut8PCKy=|JlN-?I)E$z z5%q2Kr4hiJi$j9`R<>|j)!Wub4Il>~g}Msimn4o6CoR5@1W(3Bo!lz$I@UO57T-BN zcC<;m_(UfB32;lTGJwBzd@(sA7s9GL$NGy>LV4p!# zU~_lp2ow~s3{Zr;r1{Xcgmyp`kZh9l1$3tIZ&gL3CNxm`^{T)^&5;uTlr)VLY&+zG zM%Kh6eq5$Fya0`cHd-}2h&q#&VIlb5Zx_y~q1A+6w85OEKm6hFT&*2LyTv`+qm(+F z{HW<<2j7}^?xZ*CFJd!Fe@&C{WMh3T+Q>9oQB!C~>H7$lMBD6OYNT&%SYV#~eb-hJ>x7qw*fbIR8SBH2_Sa2;xOsBn=XZ@)iEC zOyRiPYHFUJ=5?Q#zI|AbzUa$EeO@I^>qR~o;{859O5=TH6eE6)Hy!4btm7UFI)={osRf z&tdK9MAQT>p@F4F?WpgVMrb!d)F|(%#bkeV40N6qE~;=(U0TOq=Tvzf^E_?C>qZS9 zfyB2cg8!2fwu5ro?$BoK?>rMsGD7x4$#cdlD3iQv1+_t~KY40@^>M+`#jCI}? zET$yVK^^;Tn~fA8!MUoDr5d)IyUdO5z=Z`s)EEYcg8)p}y+)%Z&1K|`y816Y>pBePYL$<{7A zpg5Ip%%LWJllm;UYagNytp0Wx?XFn{1^dA`d<-0j1&HyvAY7fiVZ(9S777 zQVYnFyaaJvgbEtD>siN{y?xZBWh{7uOcx87JMydo;xmKu^!fstEPS{3YiPl^(8hs1 zg8tn*)0m5447hWU4t6+I_Hc(ckA+a@R`ZLsGmAc!tFe&P8cHE3oa)B3l@};3a;0+h(kOud4_(()XpOJD{F6NULAx>RWguts6UG~9g; z;j_;G>(=|WC$(Hc*pLIm8g%X^Rs+d-|@qLtAOH#bWkn2t@TRm>ZEzyBID zDSg~EkY#4zKfa;JPoPMk^wq29@Ik<A8`V!T7H<|M$<1M+`@MV z^_MluE9GMw$lyx|_G(Oug5F|~mmT=zI9T)p*KBJ2*_64KE`T)4rcY{xPVs^)&zybs!%d`%2Bx&g%|0(vGCOQj*CnM9zid)bebU)Fr)Q_3wOdBbxx0+*aK|YPhWrY zG;M6IDW0?0gLV)?YlF%bX}dptc~Bs9*SU)6r*yP9{f$CVJ{I_B z3%C<)98DcfGqmeiy0lxEbfEDB_k(WB=|LyUcb3rJ(>E9SyXa>DZ~dEwLWC!V1zp@1 z0OfW(n_pN;D|eUE8cy&FA$1$AGEa}pyZ+LM7|`d7v}nF)$Ub+nkdhytEV+5)_t4g( zd=+-y!zYo^PDVAk2)?43j!PTtG1K;$Ss}26@;P`M(-7?=D)so@=_|<(y-q8#AC8K|Z*LcU z$vPV~*bnuKl#^BK2(1ndBB-zt?H~|53|dK(a?#@WW*heqRv@szd#3PcW?2dtsUCx2_dHOkK=pDw29%Jrdr5#|A zn+<|P(i5TZu@`kAf6ms)uyWnjy*z%yL%|=)-)0W1v&m56a2Z*yiC-<|F*c@9#$cZa z-mB#V7px=3CVNfqfbm?dT>V+eu)gLk>Z}wMUcj3!ZKEQOd9A36?p<9nBM&C)ND* zgkd)_cF-u-m*g~aUjrb_6FI{tAhbjuic*ud|7~9c|CZg|AwPYHIhP5*>F=!2AiFW( zW#fAIaGL^<(G7!)c0sVTbU=lw4o9V5R5Lfs@14D#>*xB}Q=w&?WR|Z5?p#0H_xVi+ zxT(AN?y47q^wp@PjDFL?D2@{x;zF^~QrtNuqV^w!o(#uJ?pPq8SS zVzCF{#bR-v#iCj{Vdf$#H4xO0=rFI8syd(N|y%ySv;4gsGz4Qf*}I`!;iBgE?2zgBpO>8s-$e)_#9c>>^ah)2`m60gwj)c6(<( zG`d*4>70Fd?*&rF60GuSg?{15ulSrYnYf(rZD5{163zMFXwMR z%$ z2s?tDR8G{Ryb2)9)_^vILOqYjQZ}dAjf;qMuyPVN-tMmr2WdbxyMMqr-mvN9C&;Ylhr0e#y58sXgjGzqyb zqCK2`H1ON{x~yFdL_hb^E|UR|+<%8f9GZ_WUK0Av_QF_E57g7h^C0<}bC|mF?O2Ar z23Klr>GXe4wlt7u4b=cFaBf#&tT79#%du@8e~;T}{dV;&30PAE+mPe@gok-B9{WOoq7ESB=Ugxa z07k)MJZ*5`gr@t=_I_GjeZ~05iRpkQx<7x7f6CqFPFiPEB|GCck$%9rdokwa!Iy%W zUr<;?db{89VISXHvuK^x8C=vDoxudXg>97C# zKD&$pULRk`;0jBHn{t1k-+p{t8ofkzh}sn9jF>cr#@pzxMoiyn?y|sf+tIR#a z*t@G!Y4*`Bi^}VtR4N@i&t@=_6y3i%9akWE9Q7S=VB`hhJD_b?meT7ExEftocT7Ai z#gF@*X87(@xxjE?H4bAAye$ssZ>7f~whB-Xr*_d-1uk?7?{2D#&5m*W#{xDMlzeNR zC%6=+CDBGB?mnm!3sd`hlO5*0olO89e0GsufbS}fJAMC4uFo1@E7mm zES^QBK$xIQ7O2UOxV!>w=ma3}cwaiDe*=g3JD6s=@Rbf%%BCN*x`5}p@J|!I!vNgW zZZb%hflILmiEwM~@$+189G>^%|Z65Sb6r z?A>90$?w6Cv!dLTQDLBm$=d}WmU`0-ErU3cjV$cCNtEFyjPRXdTAhFx&z+n?ExqrXRH=qLB}9p z2B^%Jp$*|$g^g-}z#40J(>)#Jr=dtI*Au5{qJ^lS7oeRHSpS6ZlM4~`IryIM=hi{g zNYcE3MyRJEZR7W=JuZts`FW1vECFMjgI(=U6)y+GVt30lhPtB4_zh&{ljq&)Tl7Z|RNlo@!r3 z+>5?t-Sn-POZdHL(p{Lblk$8X&HN}6oT@>xv`XU}LaLG1Z+sAG@7 zv*xDON~ZSa^b{$aZnMSxt9}=Cm;vzPw@tsuh@;Z-s99 z(4PIxZ+;tD#&|}*ii#vD>1v>fsn%%LQY16d+TX|DB>hw{q=tE;J}jeLykopwfc)R` z34)x5nAcvuco}qr*&m>}x|PCG+Pe=*`1#!0s5veU#w7Rn%o;K zlh2$V?GwfLB}sWq<%0NZjl@`Q?{_{hoGqX2Ko&CRsJ6wofLDN8`h;|5i~5FopD{ni zoWcA|Z;t8^xU=4s8>XflFDV20>7UMAi+6Rw>D&S=saJ*;$yxtUYiClA=%S;+V#+7c_4!`(f69=2y?5gM_^GdJ3_mM4* zT0Eh{c>`)y-E7Qq>Jf{Ko|;K{mbyWTAMUHRk@__1nhj8Cdec?gjK^fRc2KFjrU{{W z3SxFRaKR-R6kM5^YKk3fpegnoQ^ximc70x%dE*z}8V6j2JApVcjl=mI5bRo5Xr-V3 zd_Mj9*G)`_#)p!e&#m#3n{q%iH_Zk^lS&{;0b3!EzUq#qi_0#*doD)lyZ+N>#?H<* zLIdW9dkP4iSjTB}b^10Ye1c@2C>y)>ES_CNw@A;$u~3bRTD8Okp9H@;njm#C@j^(L zTWqG;otd<;<&HCpW9}Vdr{avLAmVOy6OEn#(WDKb#YLzKRGp9e5!{MJF*~@LY-r9G zi*kpa0f9CyD)Wmz~#oB6r z{DY7_-7trEF1z^a0wCVwvGl|*vlC#_zY2?Z0Iv#K7G<37X2G%_>5{J~8+rIG@AlD# z*xp7X0zpJU#vSq*w9pm+w5Bn)t4fRishqrl?A-!fTef@M29n6yHdzOdZESD&UR zgEW(H@Z}h|Jda4?hLA&jidPyaqa4g1ZzvQW^EuzKEvlWS7C>&F0hB?zWika)4x0QH zJd8WEzn-Q`v_F3f+{s_H2Q&RV^M$CGNFiP`P5Xr*>e=u711&8yB@l!Zq@O?gf$MU* zvvh}}?$JQN1kA%0Ha0f#>opkwqK7V658+P=&6(Nh`xJ12zJai=nO%H@ZF0LZn^A)G zD=_JS3-;||gK4lWkCKjYLfBQfRq*Zq3n$jWn9PIV(<$MR=dJl{R>|(a>pp;54Wpg|W#z_Ey+mT;r>@vQFPc8T-Zyj3# z`0|His_jqBMbizC-w`B_`Hp4sCHz1V&vxbfSOk;c4rw}0@ebe8===^U$MrGKEwgQH zSg?;p;M+7@Yv>xlxz2+Px&x$Ugov2!NiK~`fZpsTE;mXfdAqbtLfWs7ns~# z)b$HAth68My{q<&A?2)KMrvdJLl3gjLjX!)?|(&Z(6+i9B%t&8!A12|Qn3$A%;A4z zQng7s@KvkC0S2&b{cF@%0p7-8%z^iX12K`mFT}j3bQJylU;pdhrGNTQ|1$m6U)@Xp z@jrfru!Oqyczk|OYwg1)%f(#@z=EYPh0zbj@x051iG1|Wh9OwB}0wHsy&u5rRId>~Jw#1Q+VXZt?Nm{!_XUk zeqwRVw+mwFa$!GeP+<0RZlI7Z->YZ^)I!r>_=5Y~_lG_>Q~IH?&bui^3K#mcX#*-A zpozD$#pdwZMw*$M2~94)8}6tB*iB;csyLy;dKdqKH_;}jznz_3%u3N}7^o)&sY!fO z1z*o2qPQOtj3UG-zEFRIK^_a9nx-`5ARmg)VC>7U74T-!e5hg$O0^GAfBCzEF%;T2 zS2v>_2@?PQ>))s692)TC$&>Wyr=QUV%jvuCo~Omdh4`o)S*DNK))X4&X7NEcfreKH zp%m@CR6?j$Rw*Wg!OTuvTW-=A2)1QU!c@=|_>xz){p$wXiCPZ&!|{a4;`GjmNl$Z| z49yU=DF%aDGh{q?>+N653W8cJCC424nIPt|XxfjEqs8Ao`*Xct%Ikp_KgLVin*fgB zwqTb4#xYFr?h71aG;{^fL6cY0$EIIsX4%eTET>OX48mZ)$aSx)nX|3HJA-l0FKZs> zPsa6^zO)~I)l3hcJWF%S6PQ?c(l$yq_MPKVW$^m^Jg)o1Im?Zmsi`UO6E+|iC!KfI z+6~P|o;zj>yoW|J6JrZ|(yhZSTnd1>Fa`>ax7y7Bz1qx;9Zo$|@j~#(!$ClSDCbd_r~ldeb3(taHzt{lkEMbOj=W&!vQ zyGHhOMt1jJ%%nYhfrSn9A98(OxdO&7j5+XbIpA14i?*$&6<4e6^pF10GmfsGO#l7A zKMu3}k7e$9w_G0QKFEQZPF_JibkGj$6AMPCFEuh1AUgOz)f^=I^8&cqVfPe)z1gg% z_19=sZNEtOm$%cyxj#-5GZSfZcRTH}gPibXRMO!)OGDhL#=g14$}q}u<8)# zWV(WrSm!(!&=0=+Cbi46Xv$2mxO9=Z6LLe$t3=X|i@ru6BULs3<+~28`ls@*^t%8y zKhrE>_SofPC@IXbh%N*qcWVy-d=%)q-$k%_M!u1gBm)*yra%i>Q|zy^aGs&O%Pgqz;Z<2@y~PQKC4oK(%b=M;f^?q_ z`qnWo{YF(_Xk`+AT(xP0kZgu0s4-}odHQ|dE%?#XA$kc-0FIqkh{GQ-AI&~k1MYUI z!>zQpx}CKG?%zxEI0T-5a6jz;YOSxW^#KqL_CXy)Fs$3bPaDarJ>bdKrWvM1@Fnu3 zIM%CRRR*evJQ%_^&}kPAXL(>5E>5Js#TU@?sFQG%E$6A@(?UrvgyNjB zbvl6=?Y+6jX|eH;o0w&upo#aXYZ8t81S|CyFKvG!i;foaWQ@J$tRK44&dS(LTP&jb6QSXU#kB!)w_4;hpgi(n>qQ=^g8BK z)So(QaipnzkZAHEN_i4ol24jc+8zY;4S&(7gYP?U}KvDAlUSuJ?8gFd5#b%cs4LLw}d z!SZ!+R~?~13D46s2qHH*#>Qs_ALR_qE@`}x+rJ-j6b{^8!(J{ zHMB5kBcYtpPsjEt;9RCbm7;Gz;6O0H;?q@gh)*7;&^Bi5>Op7GFXAdF{iZQe<9(7n>yO*3mx(<~<7y_JXQx9jt1x2HpV%;gUm;{mfh z^|FL7Ir0754)V=@FK8w0^rke;d?PrdkHytDuhaJCHcA;dr6=<$14x*k)2G{%FL+`a zjBf|{CG4Q7tuMR|hZJ;GyS0L#J1jM*cTcIc{u~d~@{J*Z{%^E6Ww8&Ad$5QPi$ey= z_~dkhL2Xz;FB3NJHriBzTDHwd#O$Ca(&xTyMmG!2*9&YMWI!vgQ9_gSh^I#vl1n_* z`;PBr&0*(i$56*l_<&}aTEc3mxiKdlWFDsab^M@JSf7}Nn(M9+Y;ULM79w0NxU2Z+ z(GHW!^}(CdIVwQ>`qvB4PN%ikeq`Yg^S48^it3!2aD8Ej$^#IrOi-Ele98mQN5-fT zij#pq5M4m8>u6WeKT-=5k2duYBbu}C6++}c*-8`h&6udKToL5g*{_D9dd(tZ_P;{@ zk3j-HoR~uK}ZHdOYzwaDa z-&|*HG?gY;{OiMYFet3~(Zo&RR)^cH_aeBcZMAd~-^Ph7&SMcgn)(pg-)F(BDS+Bw zEQI;EkW{E}v6xRA%)*8Xa~H&#Acg-97ss^wm*3+geQSYhgqX@3zVxLs&6LK8iY!dG z^sRyfRwlj+J^=@T-YGlMtR_@0C2D z>7s4evqIWnj$A{h0^WlM57JWvkKIjt!El@9eEhY{mxug@QRQnjggp3n`HSbo7+_34 z`*7;qk8QnfZmfrYDxJq4Xqrl%0$hR-o~#zUWx1|!+(3xQrm6*)2*=DK$hPJ$3w3aK;q0Yr;9C;wJps23mT3yDg^f3UHs~GvWdH9 zJ_@*Vv-uDK^D090H3$uW76Gw%-|y>$01qic2G2AXqH6n$5bOvuN9w`8tRQ(*gezxO zl)v~s=D}5x%5Pb(h_g}TljT;(6~p%1F<=5yCA^d$mFX&Z{n&8}zTalP0+rU;`~rUE z07wo6c-&1GG1#s$KqgqA2Bv%$yfT5JE=NpkXtbjKiZq8s@|o@L^s}6Q&_dgM`SLaM z#}<9Co9;hcVXehUs^<*uF=`EKg8gziE9kp$VvaI*chm0LYc@!>;p1U?^vOZ``WY^6 z@E;aSwg9qibOJE9AHr0UqVA%BtCn7kderCCtJkl?ccAm0{Xw7zq$jE+7cKwQ+8{XHIf}r-oJexhSUXg&og^TrF1Su{)#l4f?k{nuX zxI%DkA0~8 z>N?1bk&<|CBUzR%+C+y3R5rZJHpbT@w}f_AHH@0!&)a~XuSeF8`j#!DF@t}M>zFh4 zDOKA}_15c#FlD4J9GvZlBL}loL-L#X)Z8p~j7NTZ9PtCMQHW81 zpTb0H7JoqdfLS|$O<_{VLe)j8g7*Zc%&Ga(j*Yi$7AVN27J)manlY#irNG=mm~z3b zFy6rA&E45SGYOZ~FTa~iFQ2oUEbfceORU@6wdOqmt`2lmS*N(nbzE`5tTs!4S^ax_ z4Gky8Av&20{H8z!i)a_PHGrGR$(hJkN9g)_@?Uhweu_@bF5c~6w8(hItZw8O6pZ|9}OB?}Dr zS^$!-U%g7NS6_3QH@ukvq`@JwPLMtBJ@`&N>&L51<56Q!A;BPjq@hI{u{{NC-tSWo zk#6wE^!BKC9W^Vg&fXKk;cz)lous|1E!jX5tVG<|#yxlj;AI;-O@kQ`ET?8g zY8I%)XuL4>iKplP@jYoQf|^~1!F$EC2#CQx002M$NklylMX)e_q|R)4Q8qJxAlS4Lnjp z)6hbNuR=?GGDM#wU!(ZBPO!dg+uN8jPw_3-wF5{lmoqCJN7Rg_tYqMvYz2Dl#?&4> z@i3EJG&c3WDZf0hh8{y57?pXO@x4AFZ`>f_MbdEmGm)U4dli6q0U_ye+%JH!I4Kc1 z)y*yWt9feFC+|lR^j&E=6Qq&2rq@8@DxA=pPau_%@tIf7yJ1@|+J=oytu5ydX`e7{ z)&Mc4Sr-;i9vS!!P!GDL`Nn{k6|%InPoeVQkaM~DUwxmnS`1_``eOpTPH@JCciljJ zst#J4K}(Di0yWj8?*HI|?W8`7|E_DTLUX@pj5y}k6$ z|M^pP>LwAQR9Wv&!lNcUjyLAOIUI0Yaj}1hPpu9|d3Q1CsIngDA%wW1wa*qNE&W?4w^EZS%g{^>ajS<1#!vlBDr`+a7{rdgqDbM@jUE%@nPXv zrYvd*by`>gK>O|~i99U7nlzmu8(Hx_dHG!kRnk!eX|$_a-X^C~DP9_sH@Qz!#g8W$R4ZvNgRhOZ@^W1~J@MsOEWS z!u0tfprJS)g1rD)fSI>Py<>u7N7Q$We?u7gwpn#aKN!q@u#GtGS&`?hH|4wi-m%Be zE{^2iyn2H_$4TA+d~)G(Nw98gNAXR&p@wo-@jY(ln#e$do`$JSoV_ z)dr8la02p)QP?hg9K4P^QAWXMpuP@g*l?-EMj0=$!R9DEdXOeG)6vR}`HJ#8CfLSZ zv*@oYZ@{7TC6NQf*xqfQf`zt0YV-XI((442yUNY!6EriJt`+^!I1;8TmM8Nmm(gE; zD8V*TYp_cR=qKqu@1!rk+zh~La13It zoc`?3UZ!WyX3~H8FHcx3xVtv$An`V-9;f}GbKuSMRmK#xa@=Ltc|7JlfJ#E)$YZj&o{tXc{EH z_K^L4Slvope4TXwh=LyT1;9g>fU-gdLAnZ|^SDg$DBtIKI6Aj@&{l?L(rh^_H~%jT zmOzkEKQErf6?sLTkaFftY0CUDPvbVIx4Pymw2f(Z!|{)@hgL%ue=kkI!hB+)bo5j6 ztDnB_2km?h$?Qz-m*24d&>rk=BE;yI2B3?CM_KO6a|Jh`%v7fgb$Hk=+ZF}O1eYv3 z>Gy+m;irVNGuMX-cu|OQ6Rn6>X|K7RdGGs(Mfwp_(5%++54R84*%bUDef;5+LK~`= z)?d+{yPS|(0qnYDEH`hp8fIto7Uao;(5?^}YXNDwei&?Lrh7W%{&w6t{2gr#O6*ID zw!x~n&*j-9`bi3aNUPt}lzg_b`tiMV9UeKwpMJmWzkT{kT63M`@1T=`CQPX5%eHfQ zX8+T-nm*cmr=~&${0^-lUQ$!3E!d6l%*!#xTu_M9WL)~&@n9HVFMaWb5iGI8o+5$y z2bm6TCi}2Y4;oO1$qF3RvhE2gtp(y@`LQtuT8QFmTKtrOZ+Ve86A+@m>{E zVB8>|8*FcCHd>e{6W1{|@!i=&vudx+*fy1RHUV&^0JG3;s?!dnb7OTat=whI0*ta{ zsa>fbzcPJ&Z+kyYVbFzqU``GJ!5{R}%kcq-?BI<&a;}&~6Kw~x`3-2=rT=A3 zLxFvPXsRaaSnzUwevX4UX0ii*9_V{cGiu{p7$8UAm(Gj)jdWXwY)(+-4s(WP`EGD! zE!8~UsKFa^H(+73_kQ28y+n{sF5Wx9OaAWSlj_BbDGKLA)+hKv4xRr{uuF%+Q&T!n zOCY1{1d@ImZ-}b%+Tbn#mA4%@~=hvKx*U5F$dlj2L>0`Z!3=vHC^YH zIZkK&i@!h>9zT|+gL8}8U;pf%J>|sVx%8`F-CU84c-+#~?D7ADTo6UY) zd{+=;^wo5zDhku#CW~ZN$gwcgFNlla4$tbCWC`p{P3hDgR{*q87rZW9c`a=s#0n1i8^QXs{yJSB=(}Sbz@z13ItR}~ye#b1D3tJ}J@I@718fGejeh9{g zIPkA9pDv*h@Y%{zdR4<|_bdFHoYPWVAR8=?v=jhX9lnBNr)H&|e%NM1Ykenm5Abio znz7VSP-kIZ<-_Z-o(}#8WRmk5EN}lA1V>&j;JZBlG&u?=cnH4+qerAuf5JqEMW{k) zoZb}xru8Lk0Z?@!zUi`Vj#C490IgyLYQf;fH%-7#`FPm!Ej1>r2ilG%Fnp`sqySb& zz>Lif^Humik;e^w*xo8l>QD_1%@WWop}inVaecBRCv<zJAb-C2BRRW(a$;{yzh z3>>%;6~UIaO~X6l=TRF=!~q!V31|U+b$T0VbN@H2+4c~~mcsmJ4}W5Y?9U9a7XImD ztbMo>a>4=TLOYuPG0iUkUjS2imki^b^ZuB>Ax;T{zM^!+{qx=%<$jTd-={aEVTvW?@vNas#Z9Kg!WfKmEq>GG*<7iiZCq(H z@&ias0qA$=lQn=E>EFkPmFHA|i$s~=0w5W56MwpXkUo91hST~tjD>6(K~Ffs2L!AE zaF@25>CsFz?XT?Nr*ax_d>UHx!FOy%b*wpdX`wHTATQN4txW^svkTRQc5D+Fhui79 z?caw#LBXYcv>@#t22Dvdlqu(5yl9t2f8S*%bam zj}DTFIZA+(Fj6;SZbNfD{DV?HwUOgwVd&P}S^E5#mhDv$3{r>+w&(TT`hc*g=UW{&YOfAzJ8Oe-}So|y*}f>rfGnU|7sLz3*Fqr|D2%H zHG=uor&!cu&ZrQ3rTn2+n#W+*H5mT^z2bnZpOCzeIDf49fQmFG6%N?folpMdzkHhh z^iN;208m&w%Np0r3k$9EZ~o2i!k^c--_D;NCcLNSA?o>0UNIv;~$P_N0-5J7QoZ-E6Y5C{uL?)o&qG0htknsj2UNuTffo^4wJ z$3>%y=+O33NM;iA1T0Te6EHcR&th_mbj^FzU_N*6SJN+k*#S`FEf&#gtF;06#3T+_ z{JK}1PhvlPrv({NE<*3}dm9M+TOP)*u=8UH#J zq}y1<4I%arO|};8rXfhln5}?xKpCYYfL+aC27>-!Cds45X_~vc0Az=InE9@vd^nqC zHJ{|a@5cIV;Jx!MbN@Q;>FwXoKF82<#N10WCABK#v$d@dDz6qek#QbVBQ-L+Xj1tv zG(&EmoJ&)rSqJFS1>iCqwFj9D$eHg6hI`1Da$)YTnm1XO;l|iHiI}(Z$M*HEf~IQ% z9FDzesCeR{{a(|p>r!QL1kOF!!_!8$x6xi=Y@0^2DQklb={OfLfCOXofWFIU60BhO z42`4=oc0r@Ev#wvq5kIL8Q0RE+~T(>qGD*eivI?JE75~zcN3z2ZLZ?Xk2q0+FgFrM%2tR zhVw`b;L3JlKXIN=^UKuTIHKpoyzSu}ZWy_t)X*;>^SR?{ydxkf0183QiJ=)z;-;-* zcmoFqZWlzn1>(DRsz5wmJ4hJ@TDae;m?_u(Gu%-lX_(e zU!8S?^i!z?{pQJTH~CgN`{~ib1|}(;^!+9a{Sz7Y0VCGp9l@3&@qQc^{VwbwjIsiB zK#M6zmjb6|h{jQSMolP%j?-zJ$c`VeBkuy8k43T0G)0Z$*-57kzdP6iDUk3q^2+@0 z7o58aI(38-O-}XK5(1F4pJD}3&jiy7LCJCv!kh`sIO#vcJmkoXvHzyO4{2Me$41~O z{`8|<`%7MBVGAE#eSt>6>A0lFNgFrXgYQ7QMO_qEB$M8C%|8C*w3SU`B@gMIeEAbZhQAO zLKed9UG@vXTu<%91bgBj&Cu4J9>YmCnl(LDDJ!h=F)hW5J>SfqfVF~mB>>|(V_noA z1u6QDeDPC+q8}8$FZwnuHz_sCvhCUD^)XiP_M&%h^2{+NocEL0D}ax6d{eC~E~mxm z*=)*2`RZ(N&}m~l4&R5=mMx(uX^bN?4#T;!p!z6UzCHWau4DT9TJ1+HVZvNzYwZ4n?YJQqC zI@i>yoa{U?9hz7ISX4yJMbZDE!LnH=2CoInqpeV6HN*#lVEQat-u4f{vpSm(8H+fd zKhnj2Xmc(me^?Q1kaGK$ZO;t{LHNkuK76W;293lkLX7dq(}$X$bF(OS@jSjsN8a8T z@RFWp(}e%Y;rBRv+#GQ6wo=}g`4eQgcomv;Bgus9|vNgpTP`$TL>V;bb=70 zsbltCl6Pn|19Y+4P`jg)&4FILc+M{Jy)?UUmqjN2zSw1dy`zc38p2+a^a>N*Sf&qI zqRY}^kw%>2@)kH<)?B5vNSdDhofDkY=a`yR{fx}c@u&b!Dr|1{4j1r!!pUv5Ef#od z_;CUFu;YywTo9Y4y;9*p(IlS$shJ0+fX8d>5mOui|*ase@t6pb=Ci-gNnpf1Kx=Y(CTn~JpfDigoPPgZ|F#vWQKKeg z_78W8$Fzf@t7bvA4a3OEY>pnc+4ztdIIMF8BxI9Nw9{^^gf ze2A6_*PR#_j9--QD3K55B1RsHU_T9~`IKvhQ+DGwZMV>P5g@3Y;L|xpgcY`d(BOhE zjvIS8sShxZxxf>8HO;$n;FT$W#)l6crj`5mVvG&YVj!UyN6A~I6M<(QudG}4m0Mq3^-Ek&B?FdYwikkRxYv;F7vst3tT;)9Hm z%tS3Yv|WiDu7(^9huzbSzJLbmuFA?=zn@1&RaT+yDxd*$1(5Y+eqS6O9v<Q|a0O)+IJ}gn5S;QYP2_4$3>-0i~l-uh0A) zCk>;Nk*XsG@I>!?{^u5J@6YOE{s(=@+SOYgq( z!?gFy7V80uI#ntt5U)J}qkOfUFn$%I7D}!?`r*50U%^cdM=acxCu5g`%wy0)-k>4M z&ln4getE!0$Q>xV4Epaor`S1JPPAP-iBBzGQ6M$zX{ouCH-S8yhRu}Y>U6ygWuEiY z51C@ag~p{zt@(^gCT4ZFE(**{yR6C3hhy(M+4{x7Ny)JmHgoDlM{Utw(7;KBE^_~m z|M+btWj}rY`|A+xVJ%>-wurX+cOaH~>F(X1;9~fvu{F9G)x7E^r{jcxBc-|sPyTc*tr4s1 zJMZlVBdle?=Jr-vC2(+mV_#4X-14&05FPL0}Hd)SatD=;giZP|KK!gyY5<-SZIfD2t@}}Ecx-h6@ z(#i;H6bj4`yjLMB)=ei8g{-4J#|}I@<0}0|H3Stb2YSTw+UT#Q?S7kTRUl=hj-tLh z%tNdUEPGrExO~9Lms(*N_X?4konb6djDJGRtq~ssAOi%S1AbT7f)n$~IQTRYZH2`I zKZ3a*(of^BA?S@;@E@L(M~pIEHcSc!1wQMP>5uY0M%XI#2J++80}2}02p<+W5R?F!@h6Yn~3Wl#zA5Upr*$X3zZQu!k|H0)ed47*UWZn z389ui=gqCOUj2&G|2T}mm|%=k9k|8@?*|78-kwaXV#`zDRJ5oRSYG&$gAg9#VT&b& z?Y<&OJeh({94EeO&0$=oo)#?Jn;tinTp~?$XBx{E9NvqZJkCmx=~A{ZNw^rtWG%nr z>g1JvxFU7C){H~2K9uRu4^SSC8LkE0E>?9RJR_vfXhmb&S;@;RiE=|MyT&8DF;GO0 z(DwA4$MVq%_KbAP!!E>y{k1sxwdyIU;HmsR?P^|eqtaQ$Q^Dg3Fk@YAZq( z$kvyv+|0w-)Y$=!&N>GlXq7{pL@muuX-ox|7NaU8s4)7Gt%1NN8whnQWw&wX&FXw{ zO1sQ6=V2%$DC3-{@IFxA?vO_R84sVwIz-*`%_@Q7Q(AlcS$h2VvBZRuh1HU>!l1Hc zasJ(O8}~yMb{!OLtn5_A`a%ahO&84uG5Gp~gWpQO_?$z3P|n=uWJTjaj;Tjy^dSnB z0U=|?Ggts(?bpRUy;tvIS%g&Y6T)90MK$pPoVms|^^UmNsk)H0R50;*!)_1Du=N~Qe zPMJ-E)*1@4Oy`+aPZJ%AUd<;KuavZ)@)W9n^{@Unm@DEL z9jr0DKt?FI9;9FW)!!sI}|BBP= zo;}k~Ki|77fY49Cw!YrNjWGwMylh?9U2cq(z?Blk$1H3;g78BT&k@>{>V6Fht2AoP z5WtX6o*5Sd+VXoFOoUJtI$bW9C{jh(tz+rodLyl#E6EsmG9(sW@lHBdp_@!^0tU2> zDh$-?+@I+e;p^Ow%fPO6*R5OK^t-?N2={o7XFl(|r!n5Q2%W?9_y7K}4Xja!r=ikdfRHlJ8Y6R^H*pXuVYUasSji%g9|59ST}*^4 zN&oMU*VB*xpU-iBk}=jbfxXbtV!E}^P75vmnP66BSy39PD3U2w!ga13nJ+TT<)-ifeA=Cy5?MhQ8mg2) zA*6CmMs}OnGhHk~{5xiI6CXJi-tp2!0sqiw^o7XJ@BJU^Rom#}ivGu9+7+?vu zM&FGIqos+K7-pTyzF*DhU+guCEkxKchaBwet$|C$*^EP<9+&p<7dXYbxHX@aXNgs( zGDSF|&@rXM${<7`@(i&IS;A6})7;44M`+&dZ!rd1Kw&w8;L$`8xV=g6bF2Z{&L7e? zRwSZ%c3vf6CCx1mR2%gtn2G1F(w}`iO@JzR_2&aG5*3C&F=DD)Y{bgbd5&>N&3N!}o+0Y#|d!r}2w*E{`7;rMN$9Wtfnw zP5Wcxd^iE5fZdt`4c47lH#XRqUkF7=S!3g%5C$REF*4^-r;y`sp=cl~iMHbp1I7}_ zI^a6Ip0-d3cR9>Kh0YSE`#Scn`t!KCz4Oj@(#q}oyeBvmjP-nX$Dr@l@7Sqc$x4dW zY%6{E-4CNV*ISkH?YVrSscn>d$Rq))2Jmwdh zzCEHjtXOAIl0CSKdnfO)zDg7~Z)m&5drU=(ut8et;8OF}QtHC24RGWLVHxI0y-|RY=Senk(#&%7RpQvy8~~oSReS-1UQE0hXiJrbV6ONOg9LdgB{E9B;on22+W9B}ch?P{yhZJ;25qn@YO7t1OL* zrCX3z%Bo+t6Xmych2}%rJWs)VBQsxO!a{CYW2NJrH7n2 zF+h!dW#!qc)Ts-=Qg3l0Nd%0Kd3OGi%(EfKFm<=sF@niq9k_hD1c?rz@c}}H3)oHi zc41)pMe^xj#hSdy z{RUz{1&=$px?U|}kurynAj7LQP$XY#e)ydkNts+3K(ta=siYCgkI_Ap2hd)X6Z2r! zy1RK-f0tAA<{DUq)fXx9fTOMNq%S}J99OUo0s#srEG(LA_EZrvD~%->%v##rMB%1Y z0z6ynuwuc~A`X=7=75tLFEUY#MW!_Yg-O06W%OaJVTN_L>;Q+~J3DIFjA6FK<`=@t zu2wxV^$H9G^HsBS(Ftmklcdn0Ma3>|GYV}xoY>dIQ=q{n@1X>u_uu5SH~my5+?222 z24-F1p{{ZZ*A3lw23YCE@#zemN=);XkM9fr>Gst@=_O1d6L5bb(i{S%{0MxJC*0P+ zoq|W9gV^n!WRzU8CCoI`8D4kGjU^-u#)(^8xXm>y%`W;L%cGcwql1NsabohAGOpBr zR+5hI-@lvg-MNQQ1MkFUuSsEB7nqU)-6}pNb!s)CuqfWD2*tX(=+bE0x;g6B>30P! zaV4I%2&xT*i0< z+>mjrppmG}VVOF>P7A9_EmnKHB>SAk*XiuA2JzfE!PwZLI&vIgi$h7_T&l4KRBjl`PHAc9!T#L1U zx*fByMCEE(e`h>pD>Q#QCNZC(9~Jm{{%U;Cj=cVEGVg1g^jE=hGgWrdyKn0@xswJv zx<9H&BFH+h8{=-$Gs%l*`~hF>Wh|lD;Mv>lvSowrqW@XC|DcxEI4N?leSd6mJ0?+jE_~e)#P65~S?aB90X7OEm>JN1TpS+htp9Vm*=hzf0F20f+Ght>^ z2%62-!qYwH*`zO_yaLYBzH9l&Lu!KdGK^7Y&@y9YL0jN?udk(}bS*F9X~!NwfTbw( zWN~rf`?cnwHkRSHrLRTdB@m_wJo(4u_w+ph?qSM28tpJp5^V*oX%i(FW_w{GZXKGk zx+n$q_UpRr7wegZ;j;Byd$}uj4-GaaJr2V46X-H6TsxSra)oV9(2N*n7Y33Ev(+Y! z7B2x8xJx9b0Jw`Ec< zhZ*Gg^mqQw7X+`LNq_JM@4?vXHgU?>u~$9>6FEbI4{R%<=%nw9YA&oVH}9y<~8^lBg_oJPvBQ2SEP(@#M-D= ztC4Jx04;H=hp?{F)W-`1#a2LHI zwayd|UN|zeC&H(K#6D&oh40s#tk;ip1NEjs-S6q0T z;!+x}e|7^J!g8xw#U)rpCw1X(oLbPZ!ntnK3cXN3DF0pKJlwuy<(I6NST7o=J=RchPv5vWCc63=8)?Y*NL<1_to$yGi(kG=%PRYI z;qtqOn5eX$ZBx$#2T>D$gn=nUvZTW=9{JF?q!Z%>9$g1Ke91M=SdfhwEs4Zu4Mp^Q z)`kUsV-1Y8p2`^Ob^cV9>nUSj0Ej=io$0X-E-LtUad)h;c`{23vBtt|9Gs8_GpWyJ z!{X8x>4W#NEc~lk@@b)K%YjL*8CLI<E(yBjkG>EGj2NMp3w z50!xS&%-msWfM#0Iy7I$!m)wEOXYh7`YZ~rAOmo?4@rRHoUm=^pa68t)o0F|gMQ>p z9wri}5=%dP9}}v;JfW1L;tKbNc8sFs9qgQOy(QB=HZ-&pj-V-c5pfb$Xz(beh}9K5 z3}}I#Dy;39&V`9S(2oDAq7j3)*)ZH}k4Pijx$=#{fCCNPK%HoUc zCi!wr>DPb#%e1n>t}^eg%g%Yrxe@a_zw;>l@|TP0FaF{V%v^9OH)y`Wz_~D>dzchc zfvFFnUaJ?BA-)RZ1UjEXq%xy2(Q&$*3}*-+Ft1hea`KsqvHso)d9gYs?*Im~K6e|X z8M~&Vbr|ewy8F&jTDU)#9(}Q!Hi!q`o}Hu3Mp}K^O*`9I&ttvsCpM2J0#3k2yEt6H zpc52-kI6&b=TG*;k2O)2ug;r%yyuV#3KufF0v1X)wBkn{jRPjSs|C} zxj^gGW9z2aWrvAx8g~ofT&6uV3Op-$;ySe;l-~*^j)Tm(6{+Y#1}EN`)+;jQp8A*t z1=rcI1hD$)y7=zTe6O@9WG5#DrVv1=Q+ZjuDd1^67s9`I6}2QU_Q|&W?sGp13|e3K z-nxDJHiDQZeAXAhUntxaOq zVNGN?Th!{n@$fypjezfl>O9KZ7M2wF+)e7H@GG2Re0@QY{LY`}pbeUgEID7va7@ew-hn zw4(C^tYCHRc6M9lVC)MXrmsh2QtGr!lT!@aSmK$)XO%H?tX0M>E-aOr)lBt64q*Or8e&JD%8g7Sl~j7-K{k&T<6oB zyUp~)mnbJk1iYuOZYrt2$e5B0Xz$wacWrM2Q=dcEpkyv3s99Q|!;ea)=$SUL;ut}htTT|AiPhR*R-OJb9-`FSsr{0V4wk`AeC+-?10G@z% zo^J9gFnt&LND7oai1nV|Vl+D z-tEtn8*A{9p|sP@(Z^z2~)vbwLjW+E?k*PnWR1 zxN&W~8b-ferTfINa3?r;SgjghG(&0O+i*ggGRDF5ftp}oVPfN?ELc_fvWS^70}B{| zgpC=3OCW-NXE2vqoPIW}&O+z)^lY;S-@`HO1T;5Tc&myFB|G^&0+9FiwkXHiFw#n2 z5sQCurCOE?8TpGRI)KGlM7K9gh5^b^q`B_Q;aG5~3SQ7E90}Moz z&G!%T?sm4Y5L%BpZ0%YKNn`Ru9RZwtLJ+nNs`44?n|S8hCRm{)!4frIz_B(cR3qr} zuOvYrhj$Pao-)ewx&`7+Uu9j6 z`lLH7%y1=aaUe(37WieZazd~!Zh8JP4%AaafbzC-GA zCg#M|LsC4rU_ziHaM(vt*q+(Ixa2G3z0bn>xz7o}P1Y$+J4+jcTk3Pz%aHX@T-t}b z#2uT%!mFv(69m_0LuJ^7Za2VHe-7^);*Q_raF^X55RT#&gogE1Ws7&OhhV7eLBYpZ zfao;Bm0#tS-y>EdigjaINn3dhaxzjMS+PGOAUA%cvVb=gOi{XJ;Jt8Vfe>zn+>iV!$-L$!S^CV9KD{Gp6v4%R%# zgg)rz-C)y)YUnHP#z{N!6edZ<4YM*T_B$*Q#X(6tuc3r$@t)JIw8Q3!{6j1oWD8}Q z$zkf9hU}*V&{wFEj=}`gtHE5yI;%iNnv)Ku^wL;t{+hDi%TLn;vX;pjR9ct z-qFS7#X%6XcBSgU857qy5uWIx!b>un8o*1`?l4sw65%BZmm+{LQrUGiP)cpD)zvXk z;dQzAT`WSx&)V~p#N%ev1u#)V5qeRjrOLW~$JL9myNa^y0&w1BeKj!P+V?sonL~x@ zl4QCJyg!HFA(D!cVSj# zVy~g%s{(dY{*}kT%(EXLD51ER>7tORqz4ZPGS7)lU$H~IwoXuR7~0d4%wo6>?PevF zYm>~lZUtK43_1N!uG%@@CFYXBbbIVXD?n}$I9}mHra23c)bSF?)_JFQK*aEpA*M|g zfA%A++yHn4ldixqgR-ZEb(-_(Dxl>x5T!p%y*QJpR*^u!p+N6hb&J1<(FL=o>qrNS zjUMhW#*UE>4*bE1)0^PPhY0FjcKsXN)5=Z;-#T=uJMa6$b=kbSj&umNAA%3iYUS5N zfLUBZISH=T)``ahZ$6zY+d$biixuB20jFJ#YFMA-wh#9<1Pb!6;wq^%l<$QHZ@mkz ze4G4}TD>wRm8vc>2w0m>;h_=4%k$~RPx!77vvT)ty0dZ*I&7h!(hUet5rXfx;K9&` zKA!+pdaV+72Av=H#-al&F$FlSemsS44E|?Oph_>g6CIxr^ccJBDhFNz@WXgGzT7u+ z*Kv|QpFH_GZEmb%MS@awTHrhdtUGt^vZ-?$Jc-cj?W1s&k07XARS7T@?+WAIvsGwa z;?WXB`YNt_mJJ1s^`J2JcS$M%6tpeleDa9`yu6A^X~%YWMP9$ObsblgRnF?P9)SJ%gFHUKPWnEFe`A93D zhGWO3fX`Htj9EV%Q_6hn&9F)IaD{cH!&)|6Bri%nS~NbOE`$+>%ke|TbLv%4zFDJg+$vz) zv{@hse^`!sq9W_GYdyfwGt2SeUlb0l)@fKdgrfX|HlhUeh0@!#k~z>}t#`ilSs#YF zQDPOnxv|CjUS1)O<;uN#>D_yGSoEoZaeS4NbmzOO?s^5du@1OXbxJz!RdGu8i#?L? z1S%FG2EpGhLVh+04C(I|of-sI_2_t-zf#h|%X+RO$$&IwRd`*#|1w??tlpiKZ&|X( zEe<)TV8(dSBm>;VozeJbVGmNS$Bs9;G_+u1^&Gg^@ z`+XQIchIh>fn!^}x%nnwpwB6bjoBtH5RLTV!+Ys_f8(9hB}U5}hXMWKum3u&J$sgV zNct*P3Nz@O4tEIOUc8IkQ7=50SLe3OyDpImCv|sGndinm@o?C#FZ&ZoW678XqIqNCKn%B$K~42EAmN#W68?FtdH@s>PKvTw#s zu|N4u58&d_`a-zMyrq4cW-?1`s-i3zgS)ZrEJOJY;-Y|Ki- zpx@+Cm{hr-i>rOj?v`3CIE1uCp1NbG^}z&tVF`rgoiSxU<{<`uuh%SBArFz+7b``Ag^`#F^MOWu=zdcL>l#XJmGXrXmJUe zw1n$LCr$~>WvA!LBzs&+8w$?mQEn~CXQkb8?=n~Gv3|!_m&T$jDliGJ_>xxbiwY9w zU+MdFn*p!S$mtll*394r*PF@8_)z);xw0PPMpmqi!rhB{ZQFMJl)jbqVoZHZj_KxN zEn&thH2XwNc=x*is=EYrX3Mzl#N@N;u{HRpPVlVo~KVhFc-lLUonl_Fyum zhZ;QcV;={sQCfJZb3)uFs>@=Ti zN*{%f&8v)4L1Y}u9M=%Ym^}6Q8RA%}6ysn$-X&E|&U_o|=5%W>eM&sS&Hg;6IxU%|4I&N%nPdac+q)9Cr_dJ}2_mS3o zTKgVtGq=sE%>0;uTENLyDl@)UfMvy#dB9b8B7 z9fxt6!u^Gs1cwo~Mb}2_pj%f@2Xt`px)6wl*xC%8PI{~kH6Z@d~|yn5;Pe(&dD;q~W#euuR|2J~CDupft< zn|mi>;P$WVhV^B$T~ANaM%wyhFSW30+JVWH3G~D-nMng~H~AhypMl(AihT3P!@wT; z=fvc^_ano(h80g2YatnBEfAV0nN%Rb#^)W@V`{OD@}7(K_!H|0bt+J}9txlJs8lo6 z5E52a5TbBJ84?^n_|>Q<&xa6fsHcfFkw>!Ikr1AMrvX#!KTqK^7=IPlJA*@a;9u6Mbh87u~Y9&q@b%PZnEWsm$ zaO@Z$j5V~l()9<1-?kJuV$amJ`AjF_^10q$ZVYx1WIq4wv-I}c57IJiRM-`th9RIo zTt!r;K&Bg{LJ>9rj0?&i+%v_bO{$PvUS1AG8h{~WXo)rj3G}kNO)RW-H$BzWv<$Em#5N*xi`jCfzjqS3}I3MXUR#QPJsw6SsG%o~3cmH0M+-L`d`vcI)D z85P~j4RR;+o87ejN-5w|$s*9Ee3f^p^iZVl-QgVV^ z?_NDgn>q0R0jF%=Y)fLvRxoA$9K~_u+81Ioyg85bU_C|IoE4fX39$HtUai|_(&$B# z3TNG49Sc8>%XK7izsnk(v3wY==#}GjB*omGjNSAjx)yMS)ho)2_UsJv&pIbK;70CDtqw5P##|rn15q?GwrEF zLD_G$VM*!%b*y{GbbGq#sH{Lj3EZQbsmDB5xnVcDS}9dp>HXjMVfxO)hd~1s*8ckF zC#k=_ni{-Is>E|_v3?D~QP2e4bN=UG@oqF8z+7nW674Jv4dH68CF=&!ZpSn%fjWjOhj3sZsb)X8j& zd%C3d4RZi){akknGz&Os3gzc+CXW-kC9Lwqk8?c~=gxC0bPk~C-&48DPjyhnX*3qu z3srcXy9X~QqlnqUny1rIyA2*LgWU;iGH#PGx;|+0m|tIgZ+cDO%h+eP-p4N+A<^-? z=$L3>)!pKB4ox4gKo1t}NmshKdQS2=l0s|`dK~n8Y1sY^0h|MEZ$uWE7EhT{kU8o` z@8ue8TJ1t{q(0RFtV^9S6H9k(BZi0 z)&v8!B?HP6)V48~wuifEYn|Qa%`F%`g%B+qIH8d5?t;n?YVn}O$^gbscL|wjg<{EB zhS@7WVGdPbX(h#OG>o~{JMK=$355i?HwJ9DN$e1N2S&byGNFdjO~JA+lT{{&nwWS& zYu(|_KfAQ-bh{4Ps=~PM!v?WD`o0T+xlQbpJ9kjXaQgBPrOLRmlg4}7gl54VCzKU(S4*voB9Bx9?A2m<1lz`+l@3_-g&r z)ZO?ht?&GjQ^nb+)XFW!(~5E`N{eubQpuq51z}MZ>Hh6^((>Go7?Ty~7D73cmCq@&L3{McJkBS)=SiZ86OgziSlQFjU>!Q0bg!+|;JX1P)&R<9`g6^JN?03myf^4o z`29zoah%YI!cJ>X#>>@%hA4bs9aR)Au5&88oD1f1K4j|^%JY3}8zl3`ILEL2C+Oex z(KU*GcWJkR3){TbOTr^CS4dzs=7)jy`-7}N4p(F9b)HyzUK_bizQ>3iOK;G;akPxI z#OZX;o)KPxdS_91iL_nT_);>=k zewXEX{%-o}@$J+eFQFXA%9YRpdn#C1e>fmAEitJ)v_aZmUcNPHE7Oz18x~qqeT7=V z{z1=uXZWvR!*}>DR#=7}=+o~m3dFD=a~@9M7VpMobj@=hU6V7-^RB=`JHGdzJ?O|e z?{M!Y;FSM#;mBq3lX%B4zg(kzWye&XF1KI7@n1q23+^NIo`r#tw>aaNlVG^{rIO8_ z6dEv}%S(kmxGqR9c3TZ;um6v0B!`O}@r?uNAfLJ?lHXDjg5!YkSv8 zy1F{Uk;*gaop;z}(w_bb3A|r9sW_{nw5iWHSta%>SNW}}K-{VDYT!zE;;z7H*noQ) z><|NDlig`&>=lyC`jD7ABX*DTOZa>P+1t_7*G1?ieVZ`gVmtlv_%`+Zb?&P#{=fCN zo+5}}U+k{q@Btrzse32=vw!wi#P(btMBICn~i}_KKf-^xqXM2BTXz= zW{JUzfQ*$>A11rHonsD7V?afP%65~x&01828vrb$iW%dYs1WI(+>p6f`1bgD-MKoh zT{59CkHLDw2q}6hsOSbKlN#0)TxCwfr6HnRrKwUPY zMpy(@Ig#?Byj#e1Oh1o9R{<4DA(ccSH`u7~2?Sb|2~){2mUfybBSU#g{|_TmB?<5-;6gV6 zwXg%1{I~DC?q~-=@k!bvrp{QiElxYk7&}lsiX*wq#9nja}K(f{QT^1wKVw5MH)lkpzZ zib7B%$lqv@UWHy93va&B3NHnQy}df-lnV$gt7w2-@J<_&OuBx=TO8ehfS;vn_dWm6-NibjxN5sjQ(EpXXF~m}9fYNKsZt{7AQ%{=e z?XbJ4!fR?#y%G>Ng}+u6msaoxf>^*$>AfoMBsX2*>E^aC@JKAuaM*!?Nb)Y zN<{~C1}{RU%&6z*!dMu1%c?BW>TVlrf^DovCtnEuk2{03&l)DheEb zE&X(p^>dVW%0i+2J<=zB`%K!|-9peoXzTHj4+^)sDFg_c$*Y2uLQGX_Lo6~3@~lM? z108uz|D&KyUiz#xlkVXrn^J((wQe4IuWzA5MKH3zrqX`=nEdgXd7CH?(Hp+dWc#+~ zypvZb$}X>_0%y6N222@;LwTv&o@({AKT4B(4qV3mX|+E8%yuM(P(JwmluCB6iqXYL z3#`C+SeR*9>Nw>(^c{1&96^;EQMadTGIC)p3H__&)dC5EhE0hrGSE*<$6*fX$I9HV`C@Yy7ylC-Vg7jdFD@r&5-TR7ishiejA)y zmusXOIIc&o2x}a?vO!ES*9i|e@#+C6v4I1=RZfAFw8>!{yKJ<$A>_e(?bd7>&eG;~ zn|B{;?%qaf*1t;MxqCZ(x`*W`_j_s=n_JMNihc6YUMSyr2S{sCbu3F_f3jZ10L$=ySTl&Ddl?YX^K4*Ix2}>tEZF7L@{Bnyc!oyVu|l;AZb-St<-mntRi0z*Ck*= z#B_}Ec@9pumdkl3)83KtldKuUXoPwWQfW8(e{ehSk;~+px$X>0p!y0Vnl(|vne7U~S3NO(oB0u_Q31*JKrdS3!KQ59)s4*ZEKJp2T z7b$&8-f2oWCEIb2WMF#4?^59sYF2)K8~Y%TEDKD8LpW_1%ul;2+omV?RE;-Z-#`qw@XEw`J<4dkf!%$hx}>nYxY$ki z?(M)pUT(AGq6m>c`?X(tOc484`gi~CLtOHSb@MG*-!F=*Z*zVbeHlI3!hU~;Bkudq z%sR{Q5$7|F^;l@1xgc?6AFSd zi=nUz3o+L-;`QFXmEM2v1CBLcOCNvoN!nw#T%l7Y-MW0oWtT&QHGXKxQ4}jgl(cAw9-j@B zbt}{;boUU*mToU2h_}-Yol^`}K$8~4g<~h2!?0EmPbiLXchR+F1`DlFTrr0BqqDmg zbr>7WHXJ)avOe1iL6YCPhJ-~#jE%?Hb24JY@XBMRg3TcM_9zP#j*0yeYm5nSsjzyw z3A7V>&6qjd_YmZO!)H;>8mXwxSW2J57|Y;sg{P__n}k3mS%FHE@!R{>XI&};YQ)n4 zq0u+pq{7-TGMCDP-^w!F8LIN^#ax4qCRG>O7H8tzu@nAs|1=21zX}yQexUFa*1lRGk}jEUD@oX5d&!vjH;#tIAN(UT{&Z z!|OBfo~TL2SevwVajlSM9bWUQ5Hl!stcg~}CosTO_{5?4ysA)~PU7?)r_R5M`YE&^ zZS>hh=hnU(m+RqKDbx1>p3EP`tmuMCI`tz=CDEP0eStYTRvupAYP|(S*G5yLf%|*G}0<9$d*mGmu6L)4W~?7F_~5La+^ZZ>Ga_?EM{Th0;0-ahD7>~9Yi zS?46pEtFm=lBAv6w{ORKHpDx!w>?TttyS4Tm|6a1dg~#Z86PiWC4CD|a_AG{tg|8E z=1#VT60egv$cIkqt z0?5s;-Zu7VxVV-i-MH4;mssP=1FjLM9}uNeP(J;!IDl^g$pc5H;xF)eKVNOU@u3i@uIPF@sC&*HSL@Iwx)o&G1s?Gou~6d_uvR^;ELZwv!1 z1kgAc@e)H}{H~tyyBN>RJ-y6sKu5&%ns!T_q-1% z#_Dput4IkeDt4V!T!jJ#g_6n$t#>@_YULJgXXN|x^Dol&&UTb7?>Plj<0(0RhQ{B} zEn%vw#`Hk9OuJcGQh}4*ct#*#PS*O)L)`C(omb;juGqb%cvjLVU|EKKQr2C16>&oX zi}$gh>XDxyR%V#A^v%wn%zR%`c>?IV8fr%hT$ zIj-i*-E#_Bx2LJ9P*uUBV&3^KAMSJKvE$1f=oelYS{KHe%3JabS;#_JgoPJ|Op(8V zyK*QzD6cT>I>qq>p&-iZ0(DY}G{5Mnc*L|aaQJb_e6B&xnaIyv+116-n^#C>%sMDh zbmz`7{H{>p8AZi&gN0BfLf4pf`!uru<=7g;*L9|>aBy=ZyXlJearZ#+a^7!&7HjAGg+UeS3xr_SYC9;I_|rI)^+-(qQaW+;kn+KJTxUmBp~SbzRd{e%=_n zC{m?S-(edYo8)yP2W1y!SJzg9?u^6LS~4&^rx|up+;li#qw+X4ANJGj`~Q2|`;@l& zix4UO4Y$#Q-b$xot&Czv>+HVoSl*9X;zmqC`Zd79%lEc!pRR+x_ZB!fqpJlP^TNsF zIBO*v2c7VKcN4>PR=lIGicPN0E-sKNyQuSx1j;gys9v_Yms;d`%amp7i!^UWBQ~=z|?D= zP^ANBT@@5`Mf~^w{<{!THT|Q1^l|LkpIy!uzsoOi?&gN+hd*3RKl|D3*m0j)Mb4_` z#XbM#X!ynT>5Q74H}$rGW6#KV(qnLDEC;p8*Oty%m2{{(q0sp9k}r5I%6{i}9$}rloE|;GHRGJ? z%*%uFrloHZ18biVq#hxniM35#D+uOhoXiNJjH@uxN|f$?ozeHeNToHvqMqI1O>R9WtLeRhuZ@T|BaOj9S!=ZRY8Rv@k(uwX0)DR zY|JAcZc@3aC0{EncsPB0Q2U=Z8a1F z`SJc=$E{cvS=i!rm6D(}$JL^jj;9YMb?+BG@}A2ev)@+z)F zT2|g6h7uSHJPFfrNgZxpB~jGSjV{XbI&tm}dURZ=)9=}qG?ra>5605_;?1!t_&dF( zgQp%i;fbk(!8W0Bv{vL)N~C|+c9oY|`JLU{M;uBqWbSqQSRvI28b86Q3hjug#2R5t z>3Lj-F_OwB@w(!LqETWHR~cU_ej+&sSYsSBX~=kqZlsiXoX0d zG;FENWh_{GcX3^9VhN?Cr*u;+i?tHeLaMvnMcK6l{p`k^FkV#^x^%5BO-1P!uH^JXSmGTb zelv<$?}QU7lG`de>t0tCtyT}-Z=U`isk zgm7E}En1p2m{eLi4UU|Ka%I?F;R-yR$Wul<-Q>cFi9PH!(EiIHT)6a!1%_Q0bSBjc zpGhZzaP!~>11|&vzKB$Mox~u%kVapjN)>G5WVFJKyhPaK6COU?O8@X5ew_a8zx_TZ zF~O`};$86)LB46~C1OCq%#Y%tjH7A5T$M;&%@2l^Cs|<(WDq4+~+NLkWYnW0|lo7ycSPZv5&J!Her^}wkIK&xPD_5fJlKr zn`5wrRSkXAbwuS~>DlxeZ@okEDDY|du*awO^lKE3yJ=x=0UkQXaGNxH#sy{C1P+yn zvDtT|M2sSEn-rRA&{{KMJgLkRXFkgUkro09P9>&OMrJN+`TfM&9itb6r*{TaQAKTt z66j9tep=mqL`)b0S_9k3$$F?9}J9#HvrpnvLPV2R+$upb?MW^Fcd?4 zmyVdO^ihSVN*>oiemHNVlrGxU^a-xk#^QSa!w)Gxp@(w}VZng9fa^-n_3_FTPx^CY zY;8vzM|%OkxZw+PX1Y6hKH&b2i?i|ZbghdGkATZbV=CAD>!iP~9k%JmG1Brlr@Zdj z)>Tr_40US(<=AMg)#m-AQr^v1=fW0qV$5L%bFOWBTl7W6ji&$;&kQA2ByiIu;Cr9- zOnUTP;&+uk#)}ko>w5a^8SjW4tOA$AvP#QC6|t^`YpZLiv$>siaaml(`m?rxA;acw z9GKGGL?K>#n%;ZJ0UKY9($lYYsK@nMYeg)Np~ZLu0Edb$@f%7FHeacbjuqC(ua^Aqyrks_YXfk$amtmGr%t8UZ|Mvq}$=t zA9s`t+f{XqlwQV zh~q)ozpD^6KuOYP2XnlfO&2c$Q>mzm|GVay-*t8yIO z!M#a075}G{qjo;U~J}sJ5 zcDX}c=I1^m3Zp6W;%mwCQB0YJ^KNHY8>j%|YPm3Zfd0!8RM#=Aq%*Sf>YW!ebJJ6T7h(iqkOuEX7u^{{CmI!ig zsKlv2KdJ$12(7hhAFC}-%$yClY2Xk{(xxz);yXVAhJ5Rl_^V}#GL>-pnWjg7tFSg^ zp9-K6?5WR>%04mwrVElsM3!6S>h0@DDkz$qxVewqOd0%i{8W+vu1a)ZR?!rCPN0!C zwCsXR;%1>8$s(WPm#x1XV`*J?XDjP<3HA++!s>|oT1l!^h_%PY&PYW-_j(=4c1_Zf zALqB_#V?NebB6O+2F9yEo*zO?0FhF%3M2X3&ciNt+rq)~6h#zv z0;1khzinxGtfI^JOeF4&v0836!D;^)tM?}Pdnk?w84oSYjr22 zZtQSh4qRvz8SfviCO0DTK!d}e z=@>4)t8C&K<=qsCjVYdoia?x7$FwIM*k@DFqJuq^V!fm{dHg1M#IL}S9EVtY_^mR_ zHC9(d6>uuJd~cNgs;n+nf2F+8Mop9YtTXc+lC<=YY2l=K=lxQbbI6aW+$&e>oX#7W z`13*)UN&N*rQ!XFSXzI@wl*m@=9?aMae_g-qF5a|vNesw%!W(9Vj|{CpCMmlI6vk2(Q-;uo zc_-d)3Oq8|{_}si=etqnd5JhBZ4?7ap1PCC#@v(14Hr4uXkj&= z^~sYbPs8d;LBbOt!;Mej)u3I+B$-uDX=}H~#BAC~GYIX~N*nNQr2%d`WBeTKh5~Sh z{%e`$4t@wyJQx$OUIBVUK>n{EchZwDYH0_nGJM>kkM7R5P<$Dje{pFs-Mf1)1ip74 zzMB@8Z{wz^atG@fK3z_P@(6fJQ$I=>+XX(|tqQ)5>B7mkK%0WdK%vA=oFh4=;zW!q zq-c#63M`dWc^FQuC4>iZSc8Iy0-|Fxm?*V$jZuL#o$Q`m@#S7!F=asQ-q(@!4qEG=%WmrJNx(Q84^I8~~Z*FcraFI{WbA6x87uqPvaSs@9 zk$aVr(1;_)A>-;ObkHfxhPDTC}M! zRuN}8l{Lnf1cQMipU*EWQWppkN#a5mSzHtSt>Q>5?_vqI!~KRdh^$OFxR$H=gk*(oo;A&IV1e#N~Hu9?zK;?^^(fx+t%8 zZ%0MoM7fpcaZKc_d=tD$ay+e9nu~RZ@6N$`oFc4J&pvWRF&K$_hDpfExjf-#vAolo z(`0?pOVA9wN4AQ~_ag&;9Dn-}v}Awzn7oRkqv?(2pruuV&G*^Tn&9DR|IhW?I<6~8 z)b*J`YF@!o>AGNf?dq8<*q4FZC8fJbzr!@I29TRFHyC(LG2qKZp~)AJFO*=6 zUemsu1-90~ckbZAvw%gZ49L`r{j9Rr`P{Y##ODYX`0K3rrmoi-1Ixsu2>~8PJu`a> zk|=~^^duVJl~eCI@0&6M3}Zgo4feO+y(&1$B%7SpgQA5jGM6Hu?+dd#nBYP*F;zfcvb3Qo>Z@EX{Z2{$9NQ0BM9gGlfJb_ zGv)s0?{R}a(nJ<8K{k2g#7<+>fP95rg?%kMgywmOAX$V+mAn{^9G{^y^b)W3+wsYN zj&InY%}taXHC&AMIE~Xo3{=>jm5$Po%}AFHm-HF}>oIKyrJsbvNu#*J1tA^$lc6^9K7K3@3*) zNHfyC?H@bxc_TpuuXI`XoqZUt_{F{+?Qf}a|CxCTbarYwN!LE(b{wWdH}b4gPW1_z z9iBxo=-v}DdwrBq%-wL0gMU*QpweZQ*j;3y7;}~aZd`<-D~OEyrzBfaiRC<6#DmK< zLS=__JNI0t$7q?D){GYK0Te?NAjVo&#EJes>#OxlCzW0Mr}d$5QR(74r_{R)oMlSp z_m$vN3+)+Wu&D(X8yhnVtVFEY!|01{t|;+qeZu1Se)F_k)^_BbYGA*X9-XMG*-AJ(qgLMZa>qwlLw z*@Ev9zbghAdX9JNgw|bibiKD7t-^pN2Gcsqx>aaWda>_g-r+UYY-v4Iasb^xvEO3t z7Itz)SuMi>FZ$)CnVV4A?Vb6=9Nipj#{0ZZJu1DzvJM;+9>L9mP_8;^6GbJDsXwpl z&hMg_vTp;3tI3J1jz=9un_5R*M4hjpsKkQ1%iQ$4eG%^!l#bRR9oDh3bN%vUXGcIf zIGSReCvT>?{5k&&cqB~R_R4+YR}pRmSEfo>H#nZzVo<@zA(4&<)cs^!R9$FZ9MdZZY&Cht5~hAhSD@F7N%(MppHt! z^D(AR1B=5~BO%m5PYShJ=sX^(h|C4Zb(A`C%PJ&8fIL3?{@vpCXbF3)BE-|tR7_}X z63TAIe-z#+2h{4EJebD^_xCI2OJ{ErYb#>sf$NArH1#kJ8i+;fplng9<~VZ)nZp$> z6#J4>&OaXII9~SCcvLF4wzqb%;+tjsc}8(pNJM+uSK8+w1g%VrjD5nXq0T4q?7DaD z2^<6-oKr_Ccb=(4@^ey(ZcF&gsqC)HI1$IK%Bi%%Jw(=VgDl3Jwp0(h>f4%Yr3xSe zwX5u}sSvR$7Pn$e6zeJd4j7J|8;2ot9NggWg<{M|*DmYa?d)Kcy2U{oy11{W4kyFL zI%Nmkgn&M^t&(;eCvl>?q9@U+&~bFFSGPo;t5}o&%!(t1kLLwXE=y_Hz`ANC?Q&4c z60%~Kjl%9mR&24dGt}T~gtr%XD{HDQ{kq1+Apk=UX(4A1w@fUN8qkRA$Oxs(sG*mb zSR}S32W6lJWiB#KF-gcnoXB3?_Ya4=>@1|6Co{yX1h;d6T<-J!pa$ob|2egzXyvaFQGybj!FlS8%;(8?ljjBl-#i9;xHDKzLXWe$0e;jVuOeIQ9I5A z@3=y*(7m&;5*VfPF@w&^e1(2FK3ZkzO6Np`9j8qNT!eQfXIPCmHj|X+hMArRN{g>5 zC-fak1S@B<#x3c{wMw6dLp#Ak6m=|!-n_uv71C$F`%s4Mi8NIGW;^lc3JfsS7=aUv@mWdWwXSC)rxfX?0FJq>1OZR541^2=% z9_({Knt_k96h(akO%O=F+utQdA(p9F2G=%FWDU2e!#VB;1?C{D|FXL>Mi98pqbCD4 zt(k!V#%KAJsrMu=F`HW4tMj4dnyEE{}C=oa3(>5U|8a1tA=WuCs zqXcVMfjg3%ZwK#XU2{2Qn7L364Tv)KiT6;BV<=72J?aT%BslgxTIj>ER_Uy=aI?1^ zYnuu#*Kwcu30FhP6k76X9n)$5*&zk?K@QvVfS5u*|0#YJ-wz4IkvpSjp|x1CfFgRA8= z;oWLl!W0_ovvE1>AwZ+CzezV3cpWg{3s6R;ya6kNok77|t5S2eg$L8-+;#J2&jj=3LcE#m$VfLUvVq9~khTR~O~8{?)dRkpJM zU33VRAMX&yTcD041>10erL2b$m?iF6Cu8g<*1zYA9;qq+w71*K>$!nGtv_8?@AyI;q+q&L3q5cvp#Gm4tOpLu}5Y^cvm8s_R`8 z?M(T`8V93ow@#eW?mB z5$FrFMESoo)aNYdxNL4V)9xARwjg8YyE=;z3;7K?Yqq(D4jU{cYdB7P4Yc` z+74ya+*~hVc$5C>ul{e81n=s1?VN%RHMxc#z4FxJWT)d07pazdeLtg+BxM`dmiS zY!UYcMt@;(F&I?qa3@=(QN;0(R;eSqMr7esWz&~mehKqVkZ9@_t~d%%241$^m!qEy z247A~3N#9QOyX~%yG=?Xq(5EbS_mSKMF!fQImP%v5Wk|~(} zsQ&VG*tED;LAOM}bu?a#0dZQuc__WqHfim`xT%a(U=iOc%W}u6k=!R=YkfDX{Nj9;pD*jIgB4S8NJ&fhLp4=T`1rl6{1RA zL8}UK6MEI^#}m-+Sr7~vM?T5j`<_>#+^}^Q}oiEGuQZh zUymvNl5UkgKHr>p_C2k2jg}w0a$-b^g{OE`krzTG-$l=i{ngd)nv<}oG}+t6EsXEB zC9ONwS-@7Q6_0p=au-@HZjYe^^EdgO4w=@xP9c%7;NpDdH*HS;xjc`$;0vkUg z39svn^_aYR>a20OThw_l=+jkzawq8e4+49M!pQb*F7dKJl4~_F`e^g>r6rkyo<6=^Y6fTPN z%eCA2IKBB||II%I{;6QbaH2426CP;(E-szxZ>P=|t@QK1;N7_RAlH%Vr2o9}6ZFZuci?*O zf@HF@%)urnR>nl|o*e_e0p*;(ETF{W+YQB5oOlb~Ea;md=90=w+*-r!P}(BdPm?&6 z&b*d;N3W6b9DlN+Pz$|WcLtt<@zXyCZ5RX1HOM}P&WHX8l52q$!z%M;TFfP`(uK*q zp;!ec6Y%5yb^W>i)$J6>M7S<761=W5LENY0zv?3yrLVu9O&@)~lOO;10j?!03?46tGR`H#1+TW>#A2*YFg@Y(_f0w- z16oDZ!C3?4QY==nLY!V^`soOTWscu9;fSXnUh=yzHHqssqlFi~+^2#B&Gd-b)+;f@ zu1G_d1o@`eH-G_|Bx$z1V7>tmF5RRsFA$8h?BMuYhNdttm#+9VDd<|?Xyli}XP+&k zPd-stb=Mh@l|x5nh$rZ8Ew(z$?_0Na(m(hI^DxEl!N}c7fB1+0SNh3Me#*;TWr6~R zFMO@F4(fePQ1Oj{nct5tn3-An#sGQ68vgG09tIQLXE#*E$$*csubKqY)g?@-)s;t~ zGMHU{8^qRKRfS$HAPkgyn*epUZZ8CPxyvbl8=G6%wS@RC?rsx@H@&rdCzJ)o7U{DM z?ryRSZ>vpU;3iDJf-`OHVmUJ!by3RLIMyRtGfxI;{)N3>xg1DIAnID;0!=h*!iID4dBdm7H2-sSMI8 z$F{s5uBDT%*+p()*~DK2agH41_*DzIC`6>XU%4iP79`j(lMV)h$Y7#GoxMib_ z^yS_sX&2WgQ&@93-job`@sbm8c*+^{G((KJ zOn}deaH1frN>n`05dfMj4S3*)gC~xKn(=d^#X3Mgi7`-PJW1Y)`M`Lnl&De8jCqVK zHL0f*n7aBznlwI=yhCh`BlCs z=6goo(c=5R%@cj$I*qwCX!igBKmbWZK~&RFK6#JN9BYE}QrbsV$9Pq7P1{S))}N(! z-+4F9^L%G(C)Nl1G+^Uin#?EOF}`7lk((5!o?^G^S#~11j%Qk6y#YRlCalT$BA&+J#aK*W9@|{T3Ck(p2davTrKVPP z4NmSAR25CS@pbtOSp$Qnghn8MPko;21;A&VftSc*osR1SmwXAurth#1e(;c!cCoZV zv)lV1zYm9Oc8sW#S6;+_+FHXJ88|H`O{(;+Kk4AT^$tp^-B=sO8-%_Y%%I?s-s~R= zD=c6u144MsS2mu0c$=fw&mvNyrMj$i}sw>2nb6tz~0I;jr^S6pu zt+rxa4-Bhx$QDrfU7$5@j~HmUXL2140^u=Uu{ct<$|B!G(l(H?9!t;sh)e^mD-oyn>tZJ-$r-3=PJQ!O0lQH7x{V9uXwp_QX@XIZ^bf;4?XfW4s$` zaY%@YTMwGS%8+$vtR&0F6L$zijMrk|X!2*Qwdc8xt=U2u4^!+-b>@3RBBz(UY-4&g%5O8?`3+({1}?4*DG z&;KXNFqLl-QA+>qzuiN@^)^WNbdG8Ss#j^In~BWNb);*l=XyZ7j_zD*9htc}TxA{` zUd+1oa)vwE$_~wjl71x|No!-8F8hsHeh|J8-N(* z1&WJzT+3ZkmYdjJW!?tuDNM0T+#+afY&v3!sMXD&Pas>E>f0;Je3sHJ+!r@?IZOwI zOh?5Qf+1{C2xPXWON>lvvGnvbsTl;B+1Yu@uTWnvJ;gd3r|_4NF& z8J9$hCQ_W{1biL~pYI61r$#NMpa0on`tp-4Vx4YAUwQ-_UPT$Vu+&DlSs|X*FwIqn zsWqJAw8eh<`U?~*C`JY-HNs^LcjOVmN*&h-TAI9{GqWF!pYW%wa{}awttE`yyL?Ah zc39ARV#WFjD=pL=muw(5&K%abx^u=MAoHs0;)E z#zYGYt?&#sYyfA4Qbdj1Ecj9xqwA>fDVQ4jCg8IIyK7dI9km&(06k%xaaZV4I92Fs zGA7LljTG>OTlW%$VSD6naS<*@DB8lx1U|~~k3jI|;UBF;kYo93gAi9xdNC*rG_E$i zRpNfFGPe)Lmtu!fSnjCEpaT12qFIu)yS`3%rnu+*vr@^(TH(0qPO5cH=Eo?8;o%i2R6=COUGNwZ?WdX`vYIN#2Rbu3`NOV&|Uqxa)@+^6wgRydM%hc zsNFlrh#`1Kh&U8Qq446K&$2}*<78zeO?@r5tbcz_aMSr2!t615_sdMagrdxKNr9Mp zRJv$osP&fZIzP&Mkx%k_vFIvw6*oF*S0zOSA-i6~N+-t7{$WAlnDG%LWy^rs3cN32 zQg6$#f>D*0v!z#nska*OkeVd>fH@?Jz4(;%kVOj80+ScTzchqaYBSAVH0 zSIbh@h2w@y6a@)P9;(O?IM?gIA9aRugL_(^ifh*+pNYy)WOLosq87!yJ!|tmmGDE|3d_h^BO@#ME@dY$mMIGJBuNcV5Q6LWS9 z-_^%BLVUV5Mjnc~N#px&h!Rjt$a8o)V}H;>A5cvlEUv1IN4vdD9^Nai8?IYMlV?2} zqvRTMvg5G7gHjD8I(6%{qw?8z+|0}zYkfQQIEmB~DMfyTHe8ph;M7)ItlLAz!p*53 z@VK`3IK`M|A}(d2H^B}|Oxq9-j-2(`nfSvs)}o2BDZ0scP0}>JyjAGbtF&x8Ci6`v zpO?JT_xNNm_GiPT`LAeWCz00 zo?aKDExk6d4mfYzr{nr2EWO<(43vwdKwcL)9M{^*xmV`i@-ji|CRP=;EdF;m9kaEf z7TAS^n-}e_Wv)31*Lh=#NlV>r7;q;4{(^PQ@Z41QYGObJsW3CICWJRoT?PYrcl>Bc zgZvW&Rfdq1-ybhv@j2S;Yy4Y9m+{el^EbbY4XN@T$X%+SEbihy3S&t8!+-N{zK25b zE()6bZhnosePz(+=LtegTpF3p4vHX8u~QjQ#X3M?M=Kx&Ox>1xFw0#OQ3H13Rj4Si zX{q3IWuZ8>9;L|aELK(Qmd`D-(T6Kqe+LF1Rx|_!gp>}P`5sCp1u=zZT^DuD*xAWK zR=>B9R_?B(zj60I%=~Wp;;S!-bMiHoUs#30;P+8HG$~g{NRTORqU2HFQ)n?|hM&2E zptQlke(N0Tp80w2tEu>MLX#`muLJ_I; zg?)*BJI;Jp5!ME-CcFDufrZN|nrUS;0x-74(I};g{0*!DT&%0-oxP>Jw1KYU6uD08R!0f=HZ!9 zS(#N?Rawfh4^KSpXEHvzYvQ*vd_2_YsF?LC zh7F-akLwz4Zt!}P!+jQM9|>mF;nx5bC(0^$$mjB11)w=%x>-vJpc@#!m9nKqE)O0m z79wNNE!>AK@F~fBeBO7k$PYIGP36A$a)v)4=V*iS!P8tDfB;=&@0n+eAS#aq3 z7P@2Z@n~zbgZ78qj7CN~1-XpB<^19L#?P!>C8Zc}4CpIWbq~Qx$zy ztKx1C8!&(z#}|*s^M0@^DKtQ|Lv@ymad}MJ(ihR5cmN%YGN);;GAgWv=rcEZtZ&@M zv&u3zS1d1I<1dBy-A$Mtb&|KH3`$Sm&u*Q=co)z%`3jU0pVY*u9EW%0FJeBKQ2g@WtdkUN65*UVp8Cmjx?C?CjT>R8CG- zP*_xLG}EnHzr{l0Tj_^CyaVX<0LF80ycz~G231qQD@+hyoC@JC@Zyhu2A`~?-}6Mu^WBLbk8V5hll?frEj0qxpLoafUkrCf;`g)x z=UiM**7c;^1}JFVZ5Hex{uk-XMFE9w5ze2tmOGI+Q9HSp$@ufuhSy!+te`QUJCQTj zl|}OFuE*!4Jl9&^{qBSGH~;2c6mWM`AFsJ%LKK|Gj z{@efi$3J9OANMvv0T%0mT{8rN&2e4;l>n34Gwsykx@v>`sIn>5{45CXOTA=ozsESw zf$1~gg{78sXYLdL~&z(PKC%i#7q;fA5YrLuO3=jJ&f z%=kC{wh0*A+1g>@kJ7~0upR;U*ajHE_#n!eVRMHy(mj69r<0~C=oi6=E4^uy3SyAq z!qn?F(%Qr8>GmyRtn64<6e!w)v}K;9^0DC6+C#-B^U7&*IDVDMPDs|#^|{Kp3-!nk zuxB!bjOT*!YE;-&`bNB>e>f(72-s!r;X=r#tT^NxRK^C-qil*8bLkOrv+P&hoSIsI zp%`Dgb`4NTcf?Ux44YvO%eXzj#w`F(WBlxORI1o5A1hsUj!Le5XrF`|77yAVBNR0i z7nQUOIKmTHi$FrPIG*D-fe2`6drW%P2pZH-q6Hlex`3Gn|2?&)cc~W^6ab>V4lcFa z*Twzw?xPK??EtY*e50>}mXFgeGUku{ZvRy6JKnV#)SXh-&**@1Au92RcI5UNX5#=ZgwLns=-~|`-kJI@Kv`+y$ z+dCm+l>-rfZ(OeRu`cFq9$j9_(a48u0dSdvnefV%pnQflHOB>I`%`J5c^rM?BPnRd zb>)kFCdh7IwUJ8#T$blOk|}^lWKPtHyqy zk8QGUL5%S|@!5jnat0=qO|xV7Qb8_3r2y->ue6H_MqZ`=%)875e)hT+(puIU0>FGS zAm}_I4YZC1tfJrC9FiCPAplrD2~O&&u58@cVDpE~rTtxE-Ew~)*&lcZd&YkfBS?l(& zEFVqtlJc93#b$c*tFssuLDyZZBWZQB-c7SpvuOr*c{lA87x!1!(-NCx3wXk~xpwE) zowRm$?XcaGw6TtUcfRnw(CV~%LO%G=IK6hj>v+XVMPB#2CXFrJ(f+|d_&E#i8Zo`z zg^@4uTIZDmxXNR?vYcBYD*>-67EIAuPq6*DmGrsF$dj3F-rP;U{o9{JFzDlBd%E38 z^LJT{txm;4{7{gcv6Tl~oY08xa12Ce#g|Ox(3)itMGz6j9E0vD8B*Sm#d#ml?V`J% zLZ>h2PYULuFnr#c=wu?`b$rtJytU(X)mNFW^-Nx16GMo&H+eDmyb|km)%epdNBODC zVjpdqHRY+YRKUw%|0lV;fBLO>{WS>igN3F$%0t2}j9irCUZ_H$WrHW6bq=%=m<|vu zVyd7VWED`Il*N3^TPU1>AAViM(qIEu#P<9;VA*`+IfG@-*WdeE+6QFX1{6~sY;0_# zwe>Z?ubr$Qpu7kyc_5EVieJjZ$qsOpCKAz4x6qN=_wh-qT%h5AQ ztB~0|V?f{mZlyzFZ>bc9dlW#G))wm<>sXg$Ag+`hO$cR$jL1v{SmnHcQTB~x*i-6! z9E6!JO>NS~d&oJq($VLllR;)k2I$D& z+kn=cO}5ts^|;qCyKa@UyU z>4|c4-H|cm*Jgg3!Tr;DFV?B>NFeU!FuvEel_8&>J<2}h5%q912y}&|o%Q6Or&MYO zDlq7#K^^eUjU?Uon=lfiGbNOiGd}7!rekZhm6zaIyGhm~N zQ;CJZ{Ji7lN1830BaVjvb0Ei6?t8otLwF6$~ zQ0H^W!iDqS_#5}qZ~VqXEQ&g5aq<7)^sQNTzZ}HFh~f30|MOevr$4>Kz&Gzg=zv%Bx)`E2uCf(-3eGv-^gB^Ps)By7=<8^h2SEN_Mlhdv1kseB_LG5*c>GyXCTY}Z?OR*f&B!gc) zcF;#Ey)46wrP3c}(5K6yd8)OZzW&W_diw^O2+Y0n)$Md`1%(=y6i8Lx-=6px*C8sE0^a1APE8tuXIE;Q8cym2qlz0$@NwV z%Wg*2%a&=8DGA zi8a7F4uOb-1)GZAt4Pv8kR}dYU_4~&5}FI(Tl17rT5*(Y_tWKHDEIWS2Sl`pnWJeQ zqYFwLJ*Q=kwDLVZ^WH+4pW$Vig5HYRZt%7IFm|isT9s=h$v@^B+|NDodym5kHr8(k z@EUces1`z8YeoA;)xU`>8cogf9eHl|(H~O;m2V7Gtd$|&Mb+t}>=;2NjALUe4YAbI znhE>A0K#BMs3%x%O*n9Y2cQKH#!mM!L^+2!8%(^<%yxx2W9^H}L?}?F-!MGC`Efy|f9~ z)g;EBhlOkb*v`~vqE6N!#;%=27cXEjqcxRlO~DCcoArpJCuI)Ycz~`rlv!Hzk?q&A z&Jzu!kie8WaTlx6188;t-wdErU8^`d2w78MoM_2&fVI{%eYr3@7XTYngZkW0PRg&~ z2ksk?#{nA~T9iryX(I0kPCA%9={I`W@60<4(Ma~Y`Dk&KtxxlHpO-JpEBFF=&_d=_ z0pW%m;t+EU`3?oWB#V}0whpjr1pN$_URTiYztqQa#B<)s`kS62 z+w%=C@z;q%H<(a*DA2YD<6lBew% zKwcM(EiAFL5@_JwI0Y~^rRyUW6Ai!;t!U~^Wu~eG?|xLd$=||d?!%9MnGzt??D7Jj zmkJlkG0zP-1W1raCCj?Z%>q!c`0cWAJz&>1;)Y>$Aoyj{Dy@!UrYoNHLz{{ExfnMO z7p;D_&hE7Dc)Y!vjnP2s+Q6N!&AV=O9d`DKg`yI~@6us_#eSDX@}qm4rgeZKJ>>LB zgATJ$cN|VCA=0b7qH?GFQ7LqXzJ~HRq>Pi2c?(jgR7;PbBR6Vsv5S?2mJ-p1lO8_9 zHJk6?u=KF*pdNnK3S-3Xx*%rxMz#j*}ikmUgnHE7MERADVL z;-osbix-xOT8@y53Zu_?2&E}=fBt;>Uu6q%>pEL(M&MElV3J+i&;zKqJU2k$%1zR} zNtc^zj1j=n*N{BXaCLJvp(hip9I%q{zmL*&#hLDf#4&%EG*j2 zmUWCb#;F!SeY8~h((xy+SKEF>M(kM+t{I1^<~o@>OF8@dW0yvXcjLpje)hR}loT?YYg+sa zi51uZAhO?2NaHUmLx z z^IBgg7SlG0+WIOdgkdSh{Vs<4fJ-V;S~V)y`oyXf9GY%>Vk2%JC`~)tyJ;QwM!Gbt z%v6$`jI*-AZZL~opVbPfhbu&YD%5+(!q;+Cn6yUfW5I=b7jdl`C~iG~i@`|Ch^TV7 zT8x@3$PxF}yNBGb5Kjcv5=P2kPgt_RT-hov&kMG%(9y-UV4K&dWH6zYPsX|mcO2=# zKlj*m5iOQ6(%)ZPo#=k2byOTDPkwtHfB*ogT39(Lw^@rsrOB7FCW9-ROIT8k>*ROq zrWJzyQIxIdTk6shJPACWv<{a%C+?^ zYZG0e4pgeCdu|7z8_3(24~8o&^)io&ifx>BXia53Y_w3~yYRsQmTVF4h8stWVQ#lE znP%Qgn~W)ETU07_$r=HKdZ%);E)QZ|sAAjSBk(=$kt$loP*VXfeHg}awJGuuj2kD) zQQ6gPN;P;yK3a=(x&S7)){TfSHbQ}`1Nxd5Unmt2mCqFvv36@E&9uG(w`t*bzRCV9 zJm+VV@4D|ieDpBpbU~0M6?uOb;A3ZX7cf)YYi@dlwgLcB*TNHF`NF;SbLL%HS8J%O zpoa|5bYdb2OsVi#1ig~qaU&AoEcr@1e?2YjVY&0+#~-J=s}BKka6^`lyx;NC$vjl* zV;;G^j1PEjVPQ6X1w<|;r8>$m!K#`+S9G9#7)GXi9xYo@EKX@L`2XBYZ_fmi^} z08UmH)6af9z!+f@U}9s;tB$_0FrX~UwTXiIRfK|`SUcOgV-s5l+8I3D7*lV)`DSeD z+3oMHe=rF#G{(nvtZBzP1uhz9NLYb}+_ZX#=2fCjYyM92MPqwQ2PBS{sleC3Vn zoWffp&m4ow0v)G4ESPozc!+Vx`0p{sEPIMrOS+_+{7rCE*)q@JCeq7zSr!u(zQ_SV zPhBYc3`*lyb%`%H6v&VkW@4Yj8*&8RvmflI0Li#+9IZlI+YlCbwue;cZXUK@+*goF zcSiZmO%?BP{(%u0=X50Z3aDt6*1>9P*LHGP$R2Zzu@bf3@|xiL*T36?#jPLU+deVQqhS8Pc$|LkAfL%Q=0N(;vKv?;+m~?w0`+#CV7=s=4E*Ps z7n=HB(&XoI?f84<=HpktB;V=lr>1B9{?vyretd2UOz`gEvyRP2o2|6ErKzz4^%391IQp(7!tOoXWLGhl2c*bS|LFINPv=7cmEzy} zt-I-We&-kAo@}nWyA4iTosU!9W&kG_77hsfyAy!d++5y)aeT{PAAGQg73Va|wz;GlQamlIQOi>9a$Dm+kM*PD@@*O$3~c z&E;5B{;OAekj(G@{+ldb2UuaRroZ!dZZW=Zr+@eF-b+9F(FzM#Eo#nLu)mt-z0zFE zVzmJ3op9}mr6J(K5jbHbLgg5X2A7X2yDB;=LlR%htvMFB0$?f;T0xkTfq7eKi+jXq z>Tti-04js678YhMfLWvplquU{7Z*1c<3nw4vv>ti@YKjgSTF#Hq5L4A!eT*%$$SJl zbVbq9!o_X41bdCe@eCF=(*R!!02h5^oH13}Z5QPLEwrfD6V%9!8p1eLHak2OaejgL zPwx(6YIk`~{7))~MoIU*6+clod}Pi*;Mh z!$L}1{T!AS!2ablSaBNsbGb&4k#p;G(UG8o;4;K%CiB7&MB3?Re=)_HF-^i zQmaT$0`0@=#>d;%6?uwyL0B&dt_sA~P%`)Sv`7y+yF9nTqr99k*MH0fUux&4c;|Qn6xu_pL7dQ zns%L(?7JdEj{jb+hqBJ|TRUs%Yu~&Y)-88GR_1X6BMsW!+e|G0fqW=N9uERE+uK|4 zr9hX~QUCzZL$KNWOv+`)sQuq+>gIpE2D$ypdVyzwo#lDlo~HrZ;CDAZIviHgLtd+! zXfdn|+}g%U2YMNAQ9!En@g@L~;L+?X7Kq4_*ck1^PDTvA_yw)e0&t%^p| z8E&qeTtP-DWm2aDu;{up@UyM_(nV&Hd?OSD79M-{!!OAx`NOl0WcA%k~ z%)7*l+}UGu8y?kyH0Y)rB@Yj#Xjlexos>&a@gS+(M56Fq)BN@Ed-JuY{Vn_u!2CpC zDbI_wX?b4W^Ztm#293)mL;1bDKdTOfhn4@%y~@-a!%p($F>X1fd_mPVmRs|ST0Szr z13tMS#{>Dn9UTQ_IZ&wn;dcW(iF z_7>TY!{w3q=ylu~mFjhP#@MaSCWy4mQ}BA9dB;ArZq{8s)V13BPbPo&^*rfI4R}49 zP;~8>-=BT&Yrf<8`uR0i?)i$7XM0#c30%P242q-YZRWjoj^swk8Oyn&AJ6bbH#D>_PlHL+0RcvOTR7`gDP;!78mcj5QP;H6H{LWfNRcCF7jPOYUq(gor_i$ zn>^=ydw_yqe3cPLf_FJ#3LCDt&-+_b044x8IXG<}1%eYDv0O4RzLo$2EuPA#fXx;| zW0y)mCXonZgAI^Pd;8lcROgyxxUMH-5WEY zl>_(+`VDAn1)wlufcYe4E3xf6l}*3L0-yfW#i|un7E@{Y%4P1xT8z5s?h!x}^o;%^ z8^=uxrGE}OX;rt5V)tlkb$QJx>U&Wx~+@V9kvu{kCDNw#gq%FAG-je4?;E}MCjTJfSz z816A5xX&WSbWm|_01P(a4|%l0L-a-fH1ehE72^ZO{o{w?4WMEVC46DNmM+h%QSU9v zdOt1Pm`*otF2F}xQeyQ9h&8GbO9>^p!+c=hO*e6atZgIrKt1bNkg&$r_1P|!j~%mF zpLbkQ2}{@IYW&g?Wyt4|;3C1Y|L_m~0Bf_i(zn0)ZNPgi{j+6>@tTyWf@S7w%S_+` zK?4DWXC?1(Ghk~|5T8wf)-7DGw*d*hgH`Jc?bHeeK$5;NZqzPdO}Jx{vAj{ru-^jg zf`4>XXFxz>(bgIkmhfjAV9ij=9p(or95gU*`Lanpf*1HKe?RIxf~R$PW5nSu ztX!LHi|r}AXq_X57WGy(R-Df!2pi*u?}A-g136yirWeu-ZuP5o@25Lh8ED<4x+4FMCEIk^ zFUNlKYt-BJOum@s0$`GdOjt{ijU^o3S4F;>uP)8@OO<#3nH;YHgwaoqHExS!8OM`( zXszYT%_*iZ+G;;}C;i8~R-rzP0E`Xu1ki;`_4GWO0c;pWITnC>vhrDiX~k(cf*vm7 z2embLcPquqJ8m3tARu!hObE}z57fWu ze29*$+)&n37zYD*IxNf`x0D^0#XKeW`y%N$&(VsDbP@$*ircqmLyTQoSMqGK}Ig+;@?66RG#dc0fH^$~X z!S#558tFx&_g6Wwi1=MkyYHN;^`+0oUC&9ASCu$lu1R)wFuz8y&Jc5^!Rb-QQ<*f5 zU;iwAcMRCp4?b8(|MZ{!b^6Eu_^0Vl|MY75_~Q$gug_wCUDWAp6kv@TVAk*c?zeGY zTo^CpD(PSU>#xP!@b~}zFWGfGVEi(uFH$TL%W79eNUIwc;3}K>xOA4b1P`yHsHh-x z6^4ku0(c!xrCjqVC)Xih*8whHQ)rH=9I@8tp$^Y);!4x;WJQ4=vN9G}A4O&xMZ8j7 zC+5*iTATy?LV0Qf>bQ6oeA`8dQ<-nDh{r&MJh18GHx*LfNOJpdHUu)Kp019M>{vg~AEFVou>k zfwl(iRO#4<4hPUr1tXL%n7%B?BP0H(kVWhn+Rhg+rcv6~jfo6906bag@WBPf~cUYq-H=hE86BLEe0@2 zR;qIu!Jmvz>2+H2H^-5^rwg+vio^8yYPw2&t61XM0fnDSpG(4GnPGp(ZvuP%8<3fs z+}$nWtHCop6!7lUGQVFJP7o ztf838zvX^%Ix+dXG-&|L?QTCx-}>fkx(bN-@ROPJ^Pkk{U+RjE)4&pT1aBHMwabYl z0+pfc2icW1f-mwdV4VDn4*ZX{(l!89wAM%%bJ)k4gI&-J8%dwq-{+!o7_4@gbf5Kb>-_U=E zR%pW*5FCQXg6;u!Dz6;l{LFPE6gP5(s^q@ohZy~tKSI8cj{u**ly!4Vos@juSIFcO zZYaO|Q|=eaXCBhcHkWPB2x{_j?zSpi@l8uwE%W@|gRbHB#}A%#YI~v`%p(F|w(H~s z-2mzwx>fjhHW%DjJ>Ls78es^8&WxS_acuA3Va$AaLse=+HU&(_wR&t z=F*ZUClmCW*o}YnS1ak;-+mOX$69h;q$erh!cMS+DK7&XEQwW;v^d&DVJHhfSCB64 zOHwBJg8XO{T27%ndTo5e;d5iWFpNffAs47odFZ587R4QHRYv~m=8^!ir%&-MIGaX{zx?dWc2O)*ddC01E>wcdWPWL8j6s+o6TrlPR~~ z;M?-0^^b-3u^Xk43v(@&1R4CUQWh3T?u7GQ@G;yZslN)Maf8ANhMNocVcvpgzDv)l z{pCrOUgx!om4$9=-fzu}=@g|=mPutKluCZL3}0F>mHUmO);_X-}_mg4J{n*bom;e6BaeN{c z44bLrU=B{x{gXb&wcsY6ZxLQg&1$r8F~ay1~~;a?1ypRPK&5RIjmZwT8^(fzc7aTzAKD-nyrrpv> z3z96`GLU2am4$KAEcLu|>sDITr5L_=>#cWLLtc-3N;w7e@cU$Y%}3C?-L9oAEL>-2 z0F;O`HDF_52k@Vw;dd~&TpBU@>c3Lto^ECN^4(g?`NZ@w^#9tz=+g#I_oX8kJr=YXbh4bw=g?@DOm8%&z ztPvY~oCq?kboasaw1Z2$8)L(vLFnm2$`EAjv+2;= z1q`E3@~rXOOxD49O3U9!(vALl(R953zO;aslYWlbqX2L)1?^!*2SNVap+lcCZeFwx z&hI(7`3sWdYXQbS3-k{^Y{YK=|MmN$bmLbE?093F3E}yfUMp0C#^)Wh^C8simEBb5 zgwK$&r&WnXm@!8>xSoAh0n_uL#%DJd=Y7ZZojbDucBv@ey-R!%EWI4#E;LLX08zm( zgTE`tUqvbtpSyQuVlUTTq&(ZTu`!kY)xUZV<#EIUaSjDVYk^l&{}-v_#WNQacpeHk z&$ipW^n)LKnEvkH{V>KT8+}+WuB4y*$m-)c@1J*L4gFS|?JH zijERZ;G?*#xF~iS6;LQiiDB^Juma;*!5I~C7n$xlM;sTvci83M>tM}AuAXit5EAjI zP;@M##K&=BG*6Xv6*ZHx&PBfO9vR*FfdTw0fThc6@$v$oT7_6H8(5-k2Z&?2 zMLDoOw(lgVDfkfdE<-MmRtuF;fz!R{AIRcF0e%fGu zB}r5JPjDmJ$!Ghw@T2!-<(lSjLgVIU4-XF9spjOE+H1MB8kAOFljy zX`Ht}e_SJOoS5$rH%{AJc&_w;@6w>8!UJBHCrsAS>yy9vyS#8bnV0t$;B4~uQjYnQ z&;H&(rniX6_x9Uw0U%zD_>4=J78#eggyIIMNb_Xkwbd=$nAhPk-0R6h8i$O8M)Fy- zje}b}lp_xDz!IzQ>|{M)8URs&K-xrIZM{0N*230BfKB=bpdP=UOQY@ zNDJhZ?(2Iyx0x5%=vX4|7q0=a5r>UVT77UWt>Kcs`aS?`Z;lO|d3X~31US|zxI)Y? zV-0$0v$DF$*bRUan)<$jwWTrG3hnLt7fr|e?v(>xg@EP#MJs-u&$(bW4#3{yuYY4e)&U&8ZU*vT%1@c%^I4ZX>(I+sx0phhn z4~cj3V<%swj|T+t?gIpzOJda3nuJKzosk_*WWnhPnX$jl2`^eBC@YU-$m!2q{PI#1 z5bR-oWH&3v==$-Gm(!1bypn$QvnBdUB>|TZ7EgclM>m*kYn;%y#R8W_B8z{mNL)xx z$oiM6%jCmOrYa%|-Z$QO0|4$0^5uSfpPURQpLy12>nU*eXMc7jZnbT%3+!?dx=3fE z!1K~d%DS+K$_ZUu5wiFHiEW?8O=^>z4*kuTF=Z!OAc}q3Cn^(a4)dOiSr_xVDEO>9 z|5|)a@x8g+K-u+_HGmQn9h36VmkR%Ah}95Eoo*He)Ni)|wQ4h186_;hzlzHnaawwJ zvF5^6Xa`q{{mZ!CsfZdcq`_%#Se*6NFx;6!nZr`d#XaQ?Q6h#~N4UsvK`%KZi^+EK z_PpaZo;~(Q7eJHN6-`{CJhjl^(!-dbI3*6?D^59+&f^!24`9z7?-~e@JLnP28afJo zXc>~{4Aw|Yb}FWai9B>UU4QflMHtr+zN_elLO50!?cdB3u8Fd8QHM6DbSRaXFYMC| zPSuQq5a5FY@(ksay6slBLIJYAwrd6pffgu8h1f(RcI=y#oB^Ex06+jqL_t(o3iP{d z^zEPs3RdZ>6%;k^tW@%7@Yj$&kAnuhFp;V(_5?l+)SVA0n)vYrKAO9Lp~|EEZ66=@ zH@~SodLY3Raq6yIxlSLgV4+6v<>2eW7%x!p36IEE_KURBEiRNu>-0P+4gpX=N>BRH z==~=o%TghClg+~^?%p9z)&e{zUqauhX{?T>XF1@3*irDy+Ui=)<$3EfE<3vM@vc0f zR^<{3Gyq)_YZupF_f`R+>G$1-EnK6l%WPW1HMvLZy}>@499TwO!uogVGIbX~rEd-n zi20@RJpsTuHUtEGl@u1FoHbnza)@fymuA4y#N0D5g*F? zba}gX{~iZP+z6{Dt)W!VFA?-!iy7rv1~v$yKjv*grW-n{<3bnn4@x_9>zx^E_3 zxdiCJ0UKLe8??t$?U0Yk1?q2~SPvyqfTI&_@CPlQ1i5=H@(x#Hr7rTBIttX6vVHgE zAqgEo3f+560W2`K9qaRp3s}Zp!-X9A1j?vuxX(B?lp#%lEM*FL7ceg91u))aY!w~cc!)MK-yu5yTnk?buyUHDrpSO^AR4f`zIlj||+$(t=@buN0nMV5NH@|`Y z@s${#UHW5n?OwX|lUo2_K%c+qp1J`5^*(XdD)3qGBiTp~X)Ha$#gz9pE{Ih&g>paT zc|e}EKF>~Y3N$jVr1O+{#AD^z@i;DDpU4m0dgMI2btPnc1zp`$t1%cWSShaFX#(6` zN;`wiG}UjUMXVc*whOXW&It`T zxpawdmi+B)AEc8(LU1V|XB4rAw)TuZ?n7r723So?EqZQR2Rv{IwdnP%F zg>?k%GxC4K>AXhol6|qHd z?Hc9P0IRr|7wjzV8|=Un1qSyA7Td&;=>v{9pN1XJSOyAsRM^3`zYjQ0-XrDfjcu$` z+MM9lPj~OLJ6!)hfDkT19-YqOe}p2@0R(FjM{JHdE>zwj(DzPSXSZAn1_2aZi>5eb zY?*iEa+f)wL!EMPW9JC~Cl}$=r(46?0P^ZKQm=PYyG~mG;82-)Wj>5`E$!A4>d^9F z+z(nMc(i(pO}jer$GTnIilzXFsx!nHV*`Z6yeTUiM++3KAEcu(!-iB~SUCtq(|+z# zKkrw$1EmQ|4lV6eOyvjL;fZrSly_sJU0%72+Y~|iRaR+}!G`@e1W2G`$rslV>Tax< z5|mjb+7q!?MtOw~yru+wJLRp4hgy*p7DUk~6(@C2jU@r~YV%`;3w-@BlEA z$rbG-3+U)eD}$Vu)=49RXjiaUnyp?qH8AS$~ck6JA?d4RzCim5O8IPudRe@ZIZxK=pxcqf;~yASH3~XJ;J{)VOc*r<`U#+Azuw+Tofra~12Zo4MWwSFRD5 z+k}PL%O}xa%BDy`U~KlNiwYnlqHRGQ{t%ioE6Rrj@|^6(I51ea8H|W$R>SqL%{Xgg zA=SnzXqV6Sy>oNKp@QTi%CVmo=I4mJS54~&Q|Vv}T@2dWhYR%iwJTg32lrONL~jis zrSK%f16oonZpdLN0$Cy+7x}8&$#>aJxQ9C^*W?X0-|QPLZTx=HdLa%BsC3h9#^&&6 zi>dm-3}ckj9@jYZpo0?6*aPwiTHM>{r`vb<{@3s{{4za16a9Vn9)Kurv@Hlzr+wLS z74W#Gbr#l3@VEWTABReeA?xmy1FS^7-e&`4KpY`&JAik1GRkx;3pfeLZ*AexQ8U&b zz!G%sA`kk+rrKcq?$M`xygi0ptWl7e>zj|lf>Vo}<%KJN##6|ygR}vl)dggiW?c?h zn5th+Bf!1`thqXjPb(m((LxqqTLN&EQAmX^Ax=VkVv=9rqfNY;;Tn~t0 zxWB)dA!}$8a!+2g-ta<;HOh2sNa$`$oG@w6t5^j^ru^~#@``m4&lX_O?^=#J-xQ!r z*S1WFk^_10dk@^0`ihIf=0}O^nG|+Hs{mlV*?=d&rLaxT|hJtsUI%?Yd8{IM)bj^2PtWBTHkhc7p`nl8bdL1)bX~@I$8C z_}-}X$$u)Ga9m?xD+8;&S36nBY(6s2Mh}*fYZ-#Ia76 ze6)`p?^MeoWFSa$SqAzLuAqOB1r|Q>mbAuCT20dzfAJ! z_A){aE3X)vPsz^MYFz@QM+}PN68mK;aq;Yl6wty+*TsMFFTTe3rN0?lE~cCI)kG5X z@}(&H_~W_s%U>?UuL4K%%d1Grz%tn?2i+}y_uY5XGCP8Q@+W^1t~LtgyrY*F1m*r$ zQJl-M+fA7v#dYiZ&Gc4p(HaaY{?y{Ktzpu=v zKV`wx(e>rku<6qq^NN0TajK>Dzxg-sVNHFLu|a!rJKps`mS`u-vanu!u{q!sRr@oM zE$ZxeiTLd5s7z_e({SfLz>briaZDoz$yYh0BBF(ryZXVu$}#>$JRrc7DXaz6<0+iE)m#z3YTsGgng`vdIL^yL&oZcr>s&j=mH?4 z;xKQM5A#EdMYl?fmSo1lQDM@>=U_X#^?8Dq7C)ZkCJ0sooKqp9zvGGIwrUAEF1g*DC0<)8GWV}!A z6dILg@?w?>nTAIt+0XWc02A%+a`*rz{6+sqJ-Aa^;?5;7VeA+|8NtXp1O7lV2z837{RBJ7Hpsk=z@bkDbrFJLH_{s(Ejd*JTI*<|lD4)oa_L8z>G9Bez zOAqh$(%st+xy|v77Ob8Km?tEd$wK~Kp98bowx_9qB05II-SYgae zRX8MKZzJ8=xf7P0WJVc7tiHBrQvV)Z0%E0dHf%AGi&6dsJRCOvJhH05Po z3jHOBGzj{^*MfK9a*iyKx4j;1B(E69d{>^+=m0&HcLut*U8M{+VtFv)MqR#qIo2v# zsA;k0yQ!_LmiF|iLB<=%Z$TCVuLp2R>2Xpk9ufkCJ}Zr*o{@trA(%9lZM4Zc#Te(B zeIiESORG}XOCh(p&-PE|BP|9#D{v33#!NDP&yw#+nB_f>gIp&) z<=9h^uCd$kI+McNMU=`|w*Em~n_XanpJL);A%!w}{rYuw>DL1Qd!2PVsVwENnce?4V-PLr1 zcpv}g_4%}o;t%_NVUpu^9rtff>D3j(@AVhll5RYAD7i9ib$-+Zq!Z+=o&}Hq3JXCAg7zT9}LF zE}(5pdd!azCj>rrfvd9A>Z>U6od;tl86`l_{}%4u;nGN+aW6~%fffv77rP2(FaFcr zL76EqqqU<7ZHopq0Hd^481}~24Jb^GkGR)5hq9G-&Q+weUI;5D6+iOS{U~1C7qPay z&K&Mq(4B09><9QnTb`91>-L9=go;~B*EN*h1L7sQ5o7yN7wiNU^eDX4Mdfoc6`88C zmz8hEJBonM_;Tr?Vo+-1JpxTynMwP;zyUw*qIllBcP}f&_5;rdU^ePGHk#!K^q9IR z^!BkJPzh=t<#3NQvpn0TlB&C92IBx62%3KV#${eN2)aB?xz+R#LrpEIbTv|mkA-fO zV(s!*`yihxYU~;M2p+OMS`ig~7Qn*>1{zRDe)RV~JOLaLpUupAc$yj2I?P<#z2=j>H1L@~u`Qm~QeAS;f7!A?M@0x)JZ}ZgJ8Z049S* zOU;EvPE^A>^42FDEC6`W0+h|x9awjct%(XRXM$M&%-hzFN(7-SC%`T8tiwx!wYISd zpjH7~JB+i&vtEyS@mao84plkTt);mK^v+gad$e#YLoA16xs&hqnfaJU$x{HS@UmTJ zJqmCf&-1&$z5FXkq})(G^#IAWo>J*wUthzz_>**H0qZNqzt1TTRPtT#IG%mUD^ry{ zPApuf{`Z-quK|D|hrNzPm-pCipAl^K-OWtFCF^aS$vk!WY+a)b6K#p_HkjW{w!X-S zDt+$vi0!8QSDtzkf0Z?Ue!$@&wp0F+ryPR?xU((NNxAFq#>;YSJ06uEd#p{30qAem zS^iPBFI}3W%8V%r*V;$7L7z3Qqt~j8+upu$Wj6vSD}82rCsVY2+-@2k^|h`&K6jyy za!deP3pK~K_gm)x=*n}nCulPko0d^tlVv^Yvw2%jX_PIXk#ou=%GJ_LzP9XLEbu(| z1g5v!;u+igUTZP^uM2Occk1)$G9gmx+_R1q)B^X-q^a~)`&znLpG!YaTj>FO0HITx zx=u9(UX76!+ZE(%4M-EDvOV^T$v&WlPu#UyLaGG&6)E(UV(%ve3#6dC# z^yQE>onWG6&d<(sFbK4V56if<9~`f7h>87zOwuJ@f;(=Vr_YiK-}xSNI^m&L4tP1h zv}&;9POHRZyiqtU!+f&0t{&5WraHv@t!H* z9O9zD1*Rt!Ju3-py}Nsme&_aPxEsKSC(l2C0H-%W-;Jo!8i;EStc;m!b-~cNZc6fD(~wPBStHLsrhNF zCa?q}eoS|p0O^byU7&`TY6yxQ%9vOS>b^~xT-U9q&%G7aX)Wqt-K}%B*a7@PW@(Y3 zvgx{0d3D%U2w_W0yGEMFkf-3GYfgq*EWFkEI|ti52aq7~EQPmY!!xc={;?bv^;!-Y zD@kQq;826LR0Lc0Tc};oB=}Rnu=gpRg`B@YZg#mE?>S9 z7JD^-Jzb1?2F0el=o#xUk<2H6K&%PsxU8x?g-f0ML%DTaVePjTfcHGEs%rp|P zK&AsM`vzFos0^tf9pH9T$K7g1CGt>{5%5ys;sxNfkM&J|pv9Sr@whV4Zuwt6Lsx@m zbkFn#%XuYS0Xwwi=O@_{weJyWGH#>!FYx?kEG{TRec?IjAi;#-gcc}|M`40 zhP(Z5#)tIAQlkO?X_X`J-5$*{j+m#gM3I+9fE5wvj6O6ddW@Gbu>_$H$D-LvFj5o& zt>kxkP1j8M**f)t-%+0FC%Mvle3fS-1O9kwWHa~%3rp|SZHg0ESwaUOrNSNc8<*+x zZN}CV?HN!;f819>JDvzz@(++?;u$|y*r;Cx@ow6%^!7$64~fY+2?=m5&Ph(iZ3kJ7K~)ly38 z6)?{J3E9Q?b}SStv{L`*2l8@VAT6wiBY%MFwmiMOe2LA|HvJ0F#^!{9zU>RGY^8-( zXy#$PEX%U(2f-XIa`%wK0?Ptd0)Y0t;D&%#+0=Bsy1tI<9AK;maF|~ME7EEW>qz;9 zyyWN2HC#V8HY0!QUwA0Wx39_nq!e|sj=Z9MJgYmZ^$>^)z=Q9$q2v%v3J&x!Iq}6( zuA}a^1o5ZMZ0|&pM&=sspU8$SEPp##33mbdtb?-R*Jf{~w+C#}G5*%MsF*rn?(#5$ zDm*X+ZEoT+xZ9dd2N*jaK)*Jdm(px&Ym-w}0iog9?3$hVBA1@q6m{c5Zl7bmv_6k( z2*Qti_75~Tg}f!p$KScxHX57F_X8|I4Ja=ig4B>(&o^mcvJMe%lFyC{ll(+e_)*@% zLXEc9I*eCjiT!B(x@=bMGhVa<&T@e89DHw#g|&U_yh0~q-J9plm>=Nhj;AY=&l=W| z&yxzDmG|S%9!?X-e>jit5=!RRJko)kJMZ+IT`bMc_R@_TI|zyF^S-t=MUdwi2;feW z9hE2h({nldFV@Gp{qZ0FaVRJXoX<@v-1xbv@YyTs#9pWW>n#Qj?R>7mY%xrvdyn9m z^(ogBk;mCfKXxZ_MYqbi>)XT%aUefC9bj2D6l_yQeS!Pw*_61rW0C^OLSM>4gP4Br zd%Nk^fBi?eIIgmY(9&fpJ$$G+cs(sGaX12tzl{xID2%NgCi8k7pPe-FxtM9~# zjP1dk6E%zJM%p1}R|n8-8wI0tfU6=(o2f#OW+4MDI0VleMlQOgk6%V(W#8}FHQvQI zV>2vpvK5kdmK0;w#fdNIrm`8934V4V&*fdLy99A6H9!><=sL1}8!*i=tbv!vg zREa|c;bX2a|0rvGqo1S7<(=}P9e(Gtbt(V6GqVL?6JUo&*b6*ru^_ef+GNAG&V4^GDwiZgvV+P}I5u~^ZP&FXN(Kgmpn|s83;kTlwPAVR@ zR0XR6h``Je{n9~+bdy^{B|)ysOV=rH8PE__4kb|+7aj4WD>bHR$W~|;fEV-$%M<3( z493C7VOfMGCXZn)wB19owEyg*$rN-#QKO70V%D@1%Rwl>MA2`#4SlPQ9vU%fn=hJqKn z0JOR*>+XjuuBS8x9|*M2KSRMtERhUUjVccxpuEFpwpU>I&fPm{u(*;gWBnsQlhZMp zaWRtw+dH1`0u;AV{%f2#I3N#|;j7oLVXeE)_*@Gh#bya?t6bZM0g%`r`d=#$*FEyA z?!!}!1ze~IhRs~(X@vrNx-t8E@S*@4*QQXI<78L+ivQRrrajz59VNwDF@_bzghd!% zytT%21~Zoj1#)(g2g*&HKGWQltLcp!Hv^C`2BEs3?eTyr!6oAxwi^BPmFx9%^F}kR zEZhd{eVe#)i|O`lKzss?&#f#m#*K}2FZFhN#Ac!$$RgKJj`c2MM|uk==}s-ctE>4G zn^hR z$D*LNuK+^H)tG}r)+N$jpfLB9d}AN;3WlL>eB;OHNBhE#3mFyH+*l|83w(;~)-rbp z&&Z3)(&85BqZyQrOw8!9Wcj`((@~vxHWwcElk`i@qjj~_^;XXkJ*o~%4r^q`nC^&f zk_g(eA;7zUIW<5Y@r5{z%A^6I4B!NK6Hu9u0yT|Hki$6j5~CAPUb>O{!s4ZL4>)o8VHhKYm2c#b8)m!h$H~5YR+G!`(aVk{YDfmB zk-FUeV((|^7$?1(H+R!N`bR%W-~RR*3@AW`wJt$zKlx-X{pd$mhpcox)U^|Np`K-3O)V;jlyZ3+z zXD9vO2mfDs=N$v*EAZiSCXcNxTril7`u)5k`l@8|vuIUhqt+c6pi}w1NKd1HG;^V9 z|CP!3wEGn!br$d|DCD{>UZe{OJQoG9a_F-#_f$I<>@FT-!E^#E#U-XAt!U7!SUToK zgNsm5q^y%X_M8?OS}eGmi<=YCeJI77W4%#FAs^zAvD?{Vk^5nKb11#S-CC^jJmN=p8Tkl;5u%`Gl3Ai6t6a-6e3Tjc$I`M-Vjd?&4z!&1@Y;3a80!V1xs|1V} z(CtGXfs!7|Y@hxaVh!hL?$qXRBlCL(RCve=!1EboDRURqdm_o-x+vKj9y(FS6|@E@ zRi$6$6XRT=j&njPfDYD;Z`^nzef^tXr{3@y846%HM@9g70;_w1s{HBLs&dc(jwRClPC4(K*#kPcen&p{k^u)cjwE6H>dh?BLy1B?97@POfonI28lVIgF zc(zZ-fz{0(r%U42*I@&bqdB%bKmxGa(@U#b*8z4&+Zg)rbOiXPzZ|c+ed~&=doMSI zERn~hO{Q1w498DUi{F%ICauo29;;w!Ws z&YU5;`wW};$f^d1YwYay!=+u`u>YM~+<+Yrvr0GGE&>0w2y>02j2x1`mWaATdVq=7 zt+Oe+0_Hx??J$OIwpLZwmbYz#dAg}IEKDn=O;)= zZ%RwRW$IyqB!K$bRw zyMg?)FSpmG31GjG78vW;Af`tHKzR5@d8E7c^gJ72SFSM^lCgYl&yqKi2q?ocZ3S|G zdfD%p2ce6EqFyE0KlWGAOYF%H=W~WN!9{5F&@;*B&~<~xXhgw?O&!Yui{xJf~jQ|BkX`>etZ5>Vtkwr zYa;aNFDwhqZp`*4SBOD7Bz9NNpvzAToRc59xcYPoI3C@VbBsDZKX1wUYaU*b6Uv=C z4_Jge!essda@UF0#b(0d>jCaV9Lv2{!>x#2a3&6|T0f0syy;`eG#O}oP+d*a%j;=s z2}_h+PGH;l00jXnkEzQnvWWvi?3^CfKq2&~mrC*?Rv^}=hw^s+{)2Sq_8qJyvS)ym zQb95ShImB#?~PK2fho| zloX&WUu+{cmCI4ImN&W#`jczK&CLyzcx7BXko#R=9P7_n+;Ba4&{(9#jG_XWZoHRv zn7MQp@cQm|zZW3!kN@INpzAI`uV4sZ9zfUaTldm?U*89)UE+S?qG7GLvyFm}KAmdD zK-9X5J~w0j+iqV14O&e`|B|b&a=ME*;PC_8!UVjn`e`X-R{$h}Iq2?WzJXQPec~bR zVi~u%G|v@j>!?7cLZ2!J6x7&U5J*th%>Y@p+|4x5Cb;%NA2%4>MDZkEUGU@` z-K#r%*J3teltF6){YOkhKJQ{QqU)P-nFzoOz?4^;f(z#atIejKLZ^am(kDfOLo#1u5%aawQ z)6?j2TK>`|)0{Dko~?_oy8?2a8?b_RrBAf(l?H}42-jBnR#}uWBY`*QP2b!Z+)npy zuf?-a4?eT+CdX>#p{{kFxXvsYve?p1g7x;huS}=YTJikX1MvEbKm49OgV`xQrfqz} z#|3+_-eLgE>_ul<9j$KK)0l>{u&ETkjzI=n=g0l!~37!e`tbCvL`waqaDgZwC;DeX|3Nrk(XFlEE7)XFB zg9Aa8FJ2PFYfQ0QG&8oSdXXlw!0GKlAKvBbmr2sNiJNWxjIOlqf<(uB5j*E)2>F7~ zDmyf*cJg`j=+WV1_JY~H_Q$Nh=HX=$wgOJ--}%lersPKolyin)f2H!rEZD9ynI+zv zJewDCpGF{W7a;DkPA(c=I|09-1-dgh}gC_$6!q?4NKG`Ek|yod7r^ri4gPhVX7 zB2u7_MM|@R(!j24l_SelMn(f&#jqH3p+8m(TtB%Om4_<9Ybuqx_~nVw-PEMz0TxjK z_QV5^ui2Dg$}iVR=B*;FLTwfuV{eTKJixkHM>5QDP_o!Oq|kX>7#jd14M46DLEz)G zHRe*?W3+G)C=(#jwMT`<&&nGYzgk^I92`DdcEtVye7dxP8zl4l`u&ILCqMlufWUTI zA!gq4@)F=wGi~9P^JsSyMT<={lmca#r%<^-H&%~$QH%OBK@`rA0(JrkP zFv{Uxpgr1NkxsGR%yf$f#(!eB+|SUd7WYAK^0jTW02ef9fPAl!PlNVpP4Ton>9E$* zZOAOgWlX+3p~&&0773FzJ4rdTtx-o6QeG+(zHDF60Y%TbN?IQ(8nj7;RVC1q>gt{X z$uqh$UcGvaVD*>4DX?SX`rXZs$cui{f(#;t#e&2fr%-6Ar)pmCr}eQ8+|%uhh--tD z&OH5srAHOZtu`Sfsx5c}U`Q*RKJg9(4Wsh_!pI~g)`|in!7)51U^)QwYD{5$5pij_ zM^Io4d|DM*T;Oell~aRLM6Fj&Qv0aBe) z6q%yK0pL-MT!wmMICi8KOI%2M8%A90%5WZWcGJ8*iqMe(__BBakDrSgq81^xJ&e<=GEE`AaN+R?`L^ zC8H6RTv%2$=&w3_RRJ(p)))l+($X?=6E|KgeAaPiTU;VwKW=VX&pg87%I0}`tQJ#U ztkopQ@wQqY>nn+TH)hw}ySH&c+yubWm3YWnnULwo$OBG$7Fctz$X9d3Ol(gr07A)w zxF54wp-dBy(?!uh^d01-pmGZu?e5^V%6+c6wYJrQ%eaDm?(yE^IeSam`Pp#{;ezM9 z7Orp1PgYjoVlvsT$xUGSTbU-XqDaVxPUL&M*LHi4GNUAAi$<714=xJM_*vOyUOp!e z`Pt;VwPTMVlG3L2O-GNXDmO;;NJP` zv+2rJte<9x--YEIVb7qyeFEjEo70J08}dGcTCP>8chrYAMQz7bwjDh!7&#zeW`17?_5E$BH{uSCcnWU9oZ351V?HT6OOCgJt^U^ZrO!^~Y`cZ%~!aU>M zx>}!cd#$HmbyXM0tg;Rm0tRMU6}#)WpTt&(hgLHk5l7 zkjHtBU1%MHCnn^CbUQ9LBEyM4A?Xi#3LY7p9@&dCSrL3G4DGfR;_gFCHHlKD2jm`^_Q*U_AA8y~i{n%i67Qvn~`^RpV zdR33NJ{q@={eJNU3ef+}X?ARg-z6`P$H3W1!!x=v_V*Q9QvBtTE~pLaQ~%t7g^U;TOdXaDSj zxOn&OZ2I#*zmop!&n|P~)@<?vsPU$rfuXVZbSPlh&|^0t6%*} zT3Vhzir3C~ueb!$yHs7Jhs#{E1y5TpZk8JxYiuAbv34YGE7mnzJMbD78AHr5s`f3D#OdqyA)?FY&de)31#0JR-OU^0W@#X5o*r$kPA{pkpvFZIfOCSsn~<@BZD?TsPjDu7>&a z&l>%%6>C5VJf?+eaT^tY?*Qbv0hrJ@qpqhucft?S-*K`BV0x89vfg{|9byKf zqae?fyS`xHXmc-O2g)m%2j$01iX$4A4Gt;OaN~;23-2pqFL;;VbhXtTwi}xk$Q( z6UzL2l}fi}_S5Ant7+xt{j{(&WN^>KV&=5`yzY4Ev%_{;fM$Q!^-`+`C&aUJ%F{|U zMi@uoLfAl4E#ws@t>Oev;zX5~Dd*Kz^JQvwKF_iueDSWJFA2`sGQfw{NB2`@L_aAOHAL$jBET8*C(e2L*OEwqs2SgN;E46jXpE5eav!1+%8KHh@=D zMn-_81pB}u2jxv=ffzvUp0jo2Jy692I~CF0q!nyjCRhCPA@~;4- z&xZw`_438{c&CiBCg;j{@qj!1Dj}|Qeb4R7YZb2vcBl-;CN(sqCsPX^m?bt8dQ5oy z*?yJEAwipa5Tq+e0}9o_wXuWi(+GvE(DHa4&AU!H4J`+#i}`3pQinel=C7uiw#V8N z99fi0I*xGr(@m8+OzrHkxWWQuyjD@)5Cm%xIC)5HqJ7-q=CHb{EMT$K*abLJmmn^$ zl^`$Uj#)7QfKWWi6ZXfoa5^p5=TZy4Rgs(QUmtz>M2-L^#+e(8x>&K`&bWdVPKT2v zD|67X6(<3r3uBpVj_u#({40( zhmuR3O45L=>Y9eicfwnrJdoOzJq#X zCy3PruvvrNxgG)`aTpmO-~h`kLrA#B8gWwPsJ{aL0jzRbr7nHj$cAC(Zq#TI7wnzH zse{{dX$BXr|Lr%fBC9@ucLcq#pdt^I_g+8qq;ZtADzgWwxYqVZ{}12aKwf>7m#*B% zFaU+o5I!D{Nx|Z2T*7An9%pbF6O7cVZy%jhS3xRCyQnXGqxG%GlIp);&y|%Mv>0$; zXCKh)K44WRe!Fpl0O`b3+8W_n#vR7R+S_$}5*M+%0d0tlhqYG3W`z*%zfW8_>Mpn! zr!@+0Srm1#d@YDZ>f2gLa?O~L2C!EzVphqZ1+{c+%L9cEf?EK7qSE{|Rp^657gpNt z_%}|Z*ouZA)jFPIlu>ts1m1D2Z~slVIaGqzG(q&;#CiDe0ra$=mHEgN_^pjB*ERIu z0I(chwH`iiK2ayiFV~_n?+95B|HNvXRN65L0bU|n({30HpVyYRKQQvI8AX_I+U$=yOmt$w_d?hNl??JFPB z$5p`gIKfvrT=-vWv+?KX2gj*{4*J)`50udbP;!#RK*ale9p;gF#WqiZfMqUl?y~$X zbgF%xpXRxm7P0(;Pz)Ei(W}~6rCI|Wf21X8f z-~h48u!b{UWP`pM2>9yG%6ID);Co_pqH2|lY%Hq7w=jc@PO)8gK>wEAE^P0sG8 zsd_2>`fmVy%|cAho$R4U@>g@~_U$Pq;54e~I*>ok)BBYDPrP1Qk(PrrH=;F=b^oHURzn8X}_?aGwGspz~Y0`@6RI%QZm;_Lk9GWOd~SN z*=~JCPM`nJZpNE%lhm;Sth_~&24({pqK?p)28-|2UKF;@k< zE??dY|69}3o{AMweJ9!tg4f<&g$S^0sto%Zi!j0@J_#>g3KKE3KAzBO=j{sf^w7og&tMXSH}rb&*x_3^gQbtu>Nu}_>5CkS zNrWx=p&QK*N&&aJ2qo^w%3Sw1H;-KwFR^&zx40zY-UgF_fB`}nFnW}LPNz4{zF7ed z0qC%j)+fsFDg=#ekN)xAcD>SLJMF7|2$dF*B5<= zDC8qDk+CmJ;5DR8zC`jX4`p@XUEsjyVqBzM%OtNh(y{J8u+GDbj;G?lHlEHhI^}P) zk33Dz1v?>c^C`DK^-|P z8KXcx3ixe6CGbH6UwxC|s|Uf*94+HyvP?k`ogS-2pn0k7Qs7p|EdUIKM1gP@wM5$~ zqLIQJHEm_!Q@^5PbM|D=NC?CfREAC`hyXT*w$>5-eE=}Fw!Xny%svvcs%S555ctRi z;0}4|40+JOQTN7PDo;^s+xZF-6k^Ni7i%&|vuIPy6zA||re9J5IH37Jwdr5zhj(e+ zv8Vu6l%dN8QA{rY9nav1y$<-I*4-XAZW@74|2qK8$iGqjFqf6a_#K@sGNp}9eCW@K zCn=fSC&#+Z_t@X^NB?LYear!7*Kj~Iz_cxDOkKP;?)OBq>djkLbhPdDHAHbB)yG2Bq(5qUA_lFimYESAn$#;F)t*x%6Wunrpuk8dlrJx*UmuRc% zGfcl@6KGlrjvZkdzKeFwI*#WL(O7jPikvQ}xz~Z$9N-7mrpLhWzOuXyD7H?0@#hAu zNdt43fRBOrs@2sqzyJ=`FJ71pKurH;85osiA}@`;jSTb8^eGzD=#Lv)~=H&of%rm1=_Rx0q9{G~@Y0BtcU%C*`ee>qcsONYS|Bn?-kC{U>>_9B#;832`Q@BqN) zla2MX&KzqABuj_NmjI*`Y?UL~jz@f7c_Ds>c4;#N9V6kNyd4heam$Ap`NpH6oi)4^ z_t3|Vg(iq95$JodF$I_?$if4Hb8HDBKWWf@t3u~au`YAVD4@XN1Y!h8^$;oQ6tQ0c zxd7u>7iwx2%9pZAS_z^zZ>5!`mYi$3{hE(q zHmr`ZeQ~xVEyb9Q-2PI4*GN8(e9VcweNamW_iE|y{vSu_5C7F_`qghY)0LYNx&tL) z91oEVQcFUH#5@XMO!_l94*&MwUP}M)AHEIK5LG>Uqq7_y|0|*7#Q*rO=l6>Jc8DNt z)Qngi`Tl1mNe^nkHFa3eWO@$TexwV!2dZX5P_Yocc>k!$Xe5e~T6t#oNGT`2@Oz&z zIl>2>Ajs+5=fpFw@vBv;b9(dpA1tNsFKvdiY?rQ81nIx^-j(!+3qF4OpU%hM{_Vx|{qNsrF^92{D#H)RlN}7COWX?b2Mj zd+6A;8X{%*?E!w2c#MQu51Jf5UA&cU zT%S#suB$rf`>X@B?W#0R%r560Js)2PV|L+t#$+h3Z3Qs zO7^T9S!u^DRQt(!W*>WJ(6My#gydXO1IT{PY2JEEkM?Tb%!gJQ?Gb^o zE?s;Gc8!7w5U<8mwZ1{*^;Ldcg_fWj0R{&UPR{vfCSo*b^nV$qWo56&<3BC8yt&eY~m6eSEuPW?`B_9hm z&sBgk--~Dt<+PUpKA65yXEkdC;`O~KU=?$OG4z=$moEhXr%$mBUR#)pcR5(ellf(N z($S8wFY>6l(Pa(?-{7rw)_>2Gq>-0H8zvbOF!JP9RTINZz`|T3Gz_ej1ws_?lMp z8uNR`W)Z-%0}r)ywVO)LIS!I+x4@Lwt&%|6BZ=SLJ4Sy4pyoW&`)z@1)$^q23kq^x zw1XdJn!V-`Yb_ZokJ~3UZk&VsE?UklK)){hrOtkKu(F(X?%xeM)--&a;0JYh8VzKh z08jQ&*5bUc%`1x_YUbJc82pb0NPiwk#QMaTdT*3HY?S>QZU_8uBIWseIit@LH;jCJ zC4g6+zgq(ppoycAEcj6)zTJzW8P{VrE92iYj*ZH^&HpMx%&oAXn+5c z_4IErhq*P*X)X^K?dA>|82hN&8(HUbTpEuHe}$g}yRPr{(|`SbJH53j*yWBPOMd!o z+V}okDgBE#%IT9CP3vA^Wxixtx4J~L20)DvfS`f0?JB~OLYTgl1SbX@X6$2D(n3(% z)OQt*jrZ9|hRKuyFQ?lIfC>>=v{-;G%!1f%#->;8t8GjtOK7Fm#~W!zr{xs|2Q&qE zzK`&9*qcae8w6jN{&u>2Wj}3he}a&JKbehr0>`|CADav8*>qT|>5m(K1O>o1H@<=! zzECIu&=jn)J)ZQdVdP+E!nzyp%sGE{+U!sTpydaWd`jUv_!S+YMnVw*Hi(J2BXFRZ zr-G?~lJ_aFW=?xEiW^;1{M~ft+eGJ>G_3X zbhHgM#VwxX3vj5dqA-4dlkZ+<3(db4j<{C*u0XGr z%i`i9(XFS_JQ}e2wTj`70i+JBo0eUnUIAFrePw`$l9~}*Guh^Sv;;Qyx6)yMBb<;c zOetT~0lKC`gF%o?(=7#500H)C0u#HKxZbp z{cH@~pJ#L^-MNP;qLFooki5FRP92#OKEurlv-)@QK?ObYK|c{+Hv8!^hm?~WJ7~M% zTdC7ZyFcDf*JrM!i$piwr*AfP35vsf@?gqm!w(l`cVlSNC`jt-$n;2?=ocw(GzmVs zhIn7pf%hjR>BxR^o(gRFj0ckO1a)w{G_#BG9@69^1+k2^3UQ6?rdISf+tdzA{ z?H6>UphNhm89IO=)JykyfAS7(bpcEE5AUXW^+Ku^uQ8|SCrlcbmX^@g-$0%fRN8

    0b{ElJx#g8rxWjf4iFOF zHU_9SQdsfI}3t+R0uXAf8A1nau867jmP0cae%`2~~>$YWBPY(|G z!KMTs^}6?+H3bbo!G3bLD+KNk(B}UZl~I`d?2=pHd2|2{gsPo<3+#dGe8;bjtpDfGUq< z`Z<4*K%Yi+?MC&R>0aoj0wz=V`+ie=nOZDv{4OwL7i@a{Ue(Uk^}~|IXT8e0RR( z_dKc2`aYV&Eb_m&i+~yP0HK}t&xq8s$m?G$jHi2;pZU{y_sM4)ZLh5CHx~i@cG#Fw zaRIR71RvtdH&@*d4$>;70UJ1AK99WVxX!Qd(?{D&;fu@7uR?--r5~b*#KuM)j~S`3 z4}W4F58&zcVF03Wy6l=_T{FR6lNQY+A^{c+3LGwYAO1@8^`+p_W*>I2)gkI)AuZtW zdeEPYG27qiryu=b4Z&rDXl93L;gXKvchcRD+o_D;b@}32Y8bH*Utzu8EZ_)$*+30$ zB3H4=4UHy*o~TYH{hvA+6jT%p;siM8KdY{MDo+pF#03Q@TVy0J`zE$seDbreXQ%8g z1-vYoTmJdP(-iElf)>ZM8M%kK+d+s}2IM&0BbNq3ECQ)(S_ohiWoh&{fAiPK^RMIM ziMl@&Fc=dxL=Ny(gg#us&9yD?z(st3O`#Qi5!$vd1z_L{2u((}6A)4mQiv$y)3A;vmpoPBezI9l z4a{hc01Ouq)LUrJD2ysxPfZyuZGmTd5!J5UhW;4W5>e~+aE^S03Tdx?ye}H1snc6= zd{vTlb`{VXDwokhqaSy6@gc@!%cAhR5CelItnJ@#ja`Dt8tX1DL80l-Y*5Zv;`bcSLu^6_Id>b z^&-(%1tJ8qUqzB;-5*@Oc!5v)FtQz*@B-jaq|?;Cd=WX|MeA!>YP%E!c;t&Z$mYEw z0H`@A-_>~Q4>-s!>RO_Uu+82YAj*icV}(gHV=>3X4DG15m70}Sx_G&a5WdfIuI3&# zvR*k)W2#C3_Ahj4gC8b&1vK2-OAnUr;FIVuZEd1CwFM7AyGl*17uoLYf0xupr!CI0n7dTMKy+t<&YBwP%_Yzjxq6m z6H<5dQ@O@^!Tj+z4+qpFnnkN@cV`!1Y6ISF8*Qu4ns}#)zaYXR<5=<@=;6SLf;dcm&X@UjNzM>9poPi{g%ybHjDe@aV*56G9S zZ~#FFayp0LM^v`j|0ipEt%js1K|6HUV|W=Mup+?llRv zfsd>5bSb_0mA8p2(F~2c7U0^->UQR<%&dm_`DWcCI0f~t&^EQnymr>)o#yAe;<4+H zdkEJqO$Y-xq8VfQ%krLKe{In(E#z`F?Z>xU=@9L;y$*n|{)Qbel4H_(u#);5=x;!q zd(4YC^c3_u2rj zyg)mfZ5D%8Vv(C66!8?Bq}6ow<~>dk@!f?UoaShseP(2sUrImzF=kgFimKfIB4s^r zp8Z5wLkUhMlQWmsIc}9=DX_2C6hT{xHsMmBmI zOF?LmRAFvyPM)2g=PkK8j#H>p7PT+Bu9{*$i9WpU`1!6T*!Z}PG&6d7-f{Gl@^v=f z&8!Q$^Z9ue)h~KHzUcC^lPkuUe*5)}{q)8jC&O$C{9a?we;2^Zbx_SML7ubDwm5n8 z!AP+UOz-R-rmyh&#@0dl;dDLy(Jba@O$4kcA;0(hy}Q|(W219QKm22fH00JH0H5{#APU+0Z1UvU9kMB{;m3!3$ zK^tw11E;Z)8s2mw+HiRfTZcS_j}-eZzcBJ{eX9tyn%T7{&}<;0pk{w1Huy%mgbT#o z3YcnK3}T;aPEX$NzKX*i1t-_n4x1eVPaOeT=mSlyBn1$K>pFf{Jjplp@e`z=4irUK z;diKuiQWJ~(C0nXCO-oTnDY9DnHk?pKR zb?h0hHqi!I=A~xNH|WD_sg2279{}s1jWAtfZg4nOVLi_@4xy@;bJ8}{86XI>elSSW zdjtnS8)OxbaC)6RDJI8<2%7B)9J8Z^)Fk5RReUd922hih1r+bz!KpXnF*hfzlOHrC z`JYCgqixAQKW)8_wQV@I3Z*)uo~=sz1GE?Y%U&2T=LirpA9vrK&#z?IfP- z&k?o{@#9va??wEp0Gs71d&&NL(Ca~}5#XSNNvzTJ%B9IP-Dszcw2pSl0bp(wngrZx zp~;9ZtwWq^AFyESzvu`vG{IQ)APp=rfHp_lSL_0?QT7h`eDEKBN@Uq(o?|ow#~civ zbj}Hu_8D8}MssQ=%}>tZ>*)~9+7$pIz$5|;?6d&rX1FYno|Nr5v7xSGad(z?(1u>!CZ#-GH!|xJ?hc{6qWWI*A9Sf1Kxk{bw!vHp8+#P!l zR%}zor{rJxxcDwF^F80!Qq4#{lY>{YIG3N_13;u*Z5$NN@O_Sh?J*-ukdCM$J(f)< z8$_}+5AfovtegA}?JIz?(pDFRg}YIw0;+@+0L#nUDP19u#U}l~fm8be>nysb@WUz; zL7MKdjE%cMT;FRjQk5(=%ME7KKlf>&K$m`juJqcIM> z=sL9rQUsV9npff88EX zy?#Z_(l$6wnv?JA`;I=$llmlWo|6W{XP9GWn}#obcnIv}=dhM9TIaU8vt z-|PEp2fT*$ftmmWVl|i3!x?XD{>O^kFj-^ zP<^Gn^bIFt ze|Br&;Vha6O>I8BrgU0AyO5lA{WVqT+!tft2< z0e|JeZu%B$<^TTXeEJySIxjPBIA?#Xn;V@n;6~n0PpaZZ%~RJW{rdBH9d<~f=lGw` z$MC0Y$Hrx~7!U^d7v_9|MXbvegd|sFwO>NJijA^|i7qD*DjllHY1k4>L)jtTH9y!# z0QvZRHoMg;X==7X{ybm8X9<3)YE>fM)esDEL|epMV;TW&YKD6lYr&o#{<_9O?aE3j zj8jIe63EonxL8HE-1a?*&5#r4=|hXu^E3JBbVb_PVAp*V&e z=hZrz$Bak~ECV56pau9)3#|>o5`EN+Aq*l#Q7)(c2t<1bT(xaAr0UQhCT5t%RxyD@ zaVL`b8uf8|b34p|$^aST_zVg&MdraF&g|9l(joTrh zAL-8sA=DY6hL=KS5d>R-{|K7hZS4e2_W@!D`2Q+iXaY#`2z4$1OoA0tf5wYGE8&QJ z8y`ZN#2JaT#r`Sm`7H7UlJucAjs5Gmn~m2555fsLb1mz8!G%4*B7;!?$Z)^Hr}z2X zD^G$bl&9us1zgIuofP5FT@f^^yzV_8q* zqu}>HLyM~1VeiJ29c{~L`M&5iYp*$~+ITl_+<-^Ig8@fd(of7)_!oW#q{mn@;Xyn2 z22|5+MZeJv3{UkW-0(S z8(<`YnApM6=t-J;n8C|^_`(3M(Q3m22u0aUAN}Mj>Fuw7%qD6pee}@<1pWnXgTdtV zG5XM#?6X5&X}BnQTKJI9eaTw!>Z=>T^5_J4o+9bjBS3|&UXEo=YrwOByOn5+>4m?YRa;Q?S31EJj^c3boiH6MzlsxcVDv0 z9?AFf_~PQ^Y0tvKLM%*vci}oONxj+AYNyyoLQ5n(I^F=)Rqvq$o;ff zpV8#^xZ?HUZ#ls5@H+X1pvAlSjFD%YrduA#zt%&o35D+(U{RGQvSzHdmo$<0LkP^| zAq^=+A7QeZ9c-#~p=KBDBSXuvKKWQZtT&CH1aMMoJK9bjMatw$n2rf%P2!)bj#fvF zv1=1uu8)SwrjE4N*D#&E5`zA{`}fc?oWhCwO`;cK%12)+tncpa1idTF3XX+9eXt#7 zR@QlDW)^K1v`6sKG)8p2`T6;D>B5ER=VEU|pKxDr*g-Rj!TXdt9g#!(v?(-Y_%6*D5$^z@ zeRXpJa2Csov4sd2^v30H0MxFh2X}s$T4)~*jDT4oPxX;0M~#;f;Q!&lY}zKk*yT6g zObZtX-a*|2BG;i~K~LwAAZra8RWnOd&=%Jl`1hQ{ht>csvMxT`)S$JW^MkZWfT^1| zt_CfsRIrU{;Qk(_ae&ViKm^CfR%elq(fl0(VS)&`rG$Xd0%pD!m_M2o`&%2Q{Vfj6 zjKT`k7XSu>7r*6v(mIoK(1?9xlu^MvgC#64ucSIr|BN~rk$QPnD$3`*ppV*M(t^CY z#_t`p=DhBnDQJXwAi%4CzVNxI;6pYYK=B{{=qG6duw|k#k!As{bAG7Cr#wUYGX)I~ z>CW#bX)mvjzv;H)A||==8=tqGYM!ahC4i$A;}O2LG*?vP%k`8Tee{v!xZ5wLY|U`_ z%?^`X`{EfM3%^`(m-R%06|KE0yrI*>3SbV+t||OkQI@99*2TFeaF_8{rz{U~J!(N4 zL(7f)Ld*D!bBw2A?X9ymE)zBJeh1B0&1@-0(DTaG8!>(nIh0@ujt9z{1fRi-T%8Wb zB+6!PBUkD#)I+aKghA7b-}NKf;N}v3K-KuP2L${h+Aig5I_iAThZtTB!z?z|9-cL* zitE$pI58yAVWVT2f3}~T8)r%RUgs?LzS4Uf!uCNW-TCN7y76u|U3mi=*aJ4B`gK0NSn|0SefC6&70d~Z7Wu3cn|al! zK8JGk(e%fEd@+6f>uW^w+ho$qG+}7ufB*h?04vX`&&l64Z@Gs6e48k5*D*z?CQ(=?nho1|i>geZf_EqkI0-WE1uqU!)&nuupXX1pE+CQfcA~ zO8Fx51r>j;i?wguxaFFNGO=8cdu<4-4R=?#O1vk z>*_NILZPOJ$(^f2k?4%(Z+^b}PvP42T_JI26Kxz!A|~gqrztkLlg)Yb4cx?9E`Uv& zh+PK)7TM@(1SG_ko5q=sC%yT8K_TN10pkE)9@!rDm@2#{1b6CTgf2B>4%wuK;KOH| zrB;ITFBcB2bG~=f{7P^kHqYE^etEfkJtgJ8D}1n#cPn5Y_}q!RV=TCSdO|RuT??Rt zljDW7MD#QE{sGYVgn}m7l7?;N&3gMxXfN;{0_U)er2P%qxWU7C=B~Y0%TsZN3^jM)!=LtA%{oeU`p^YG~mJZ0UeI2Nz<_K!+h< zN-y-WCVdo6TM9&K-9@8JUcnqjh*oo_(HPJ6^tLMdpLM5=jAX5{Hs@C!e-CONUVixC zsn1ytW^6XkhcqX*9ANUAMf{_`@K3mZfMygjXCFRQAD>HCE?t3l9HzDXB{b$V)oTEl zw^AP(JZN9#`!#&NVYW+fgPU)yr-}MXTE6>}w6zPM$#0Yxz_8}Cwe$uCYM0a3zshrH z`VFM(HT=8^^uRCe2m4a9J?Ev`Gy((82SLh>?JYnr9M^Nu`(O?8HfZJg8`s0({lpaf zc50fjWlliPW%92AxEru)pP8Kt4KDd?3qYz({%X{MvZP&nZEXQm>C|38 zUZ^ZI9nEFfcjlIjdzQ0wROR_ye0BZshkqLFK48o{XfgIyGP5*@jb{YMM#n9xXVU{j zlZQk{@Ykfj#Ow%ztI$;ZOj+`L0iD&g6~>{J-nf1pQ&UVrxn{l82vt7P3M}X{%PxNN zW6s-#mmhhPK34(JT~8bfdD;=>891W}5ZFR1IpLBbAFt8+j%FnLJO0ft`525|_w#Y` znKAmYf={qIYh-a*--7T5%&pDKwwW@X%={RB=ZDBW+U4)gzpMgr|Fix??mQxRxCPrl z@(XaU!T7)P_S?|PwY0jt&Y=Q_Q_S^{3*j$%vRmRzIGd zF`_dI+y%cI?nAgqzBGW*7>%^2>jWOMt!lPhqOnZ~sul97Hlo|MO zq!p;ALPmT$FG(7D)&L*aEzergR91b`8 z`M|m+3XTWs4)zUK=brA+UzQtr*GNy89|{$27|AX>7nej8QUrInk(bL%AD$y%)`y79 z+ut3NFy{f3M?OpY>!s8v&ZhpX_h%r!bjWKMJx@7R@S}%?bcY=>2XM@guqxvbFM?)T}-9)=~Mv3 z>hsH%iLn6*t_o=^3)`{<9M?ZepB@o!cIkkN_sponHjXTl8y^}@`E zeiob^7;J*~1AxAj)lD?RV&*Xx&MWTWA0tBpGHMm%=o0a_iZD8k2Ic@K=I<{rrv6SV z{OYNZ6WU&wg=&JTR)>z&LkwjPZhq%p$L7^uLXqa610#58Fp{*1{hK1jSx-1xaQmj)MlK#!ZHH8(hS;eVE)O! zNdC=QH~ba>GZff0p+htOP64gEQrc@1Mfu%#((Hv_N=0aCd+B39!bx=L$O{wF0)S#f zuZ2dNjf-h%6_R|vM8LJ*O|uvOe5y?WxUN^zUJFeXHFb{giG*plPRIw0mEM`8ogN5o zXg{L4Wn|N-NzCP%0Hh%40xx9%<~AB~H8l6GP2kT9Aa@%_^*1mNyGws>@8FATi@*!? z>8H0oO&2a*!1Ccjnt|_@`@87?|Ahx#z-T(DNS_(~Sp~OimlwEaf(V4&^a+twk1+jf zVS4L$?Q>vJgOOgiVZehb{=xvm7>kgz>^AzCT<9}3Oa(gWZ}?UPpM-~i$s5eYCT4$3 zYGym1z4UHq8A4#$(*Alft{Vd;+MzfLPZ!9n)5AZPnEati)ODLJ~0W4u2>2oa~ zUk_7yjGO)^&0~}Ynp>1ZS^k#4B42*%pc%J{5Ys^^NjMnD@$)rSGadow?`?E8*J?J5vtM@Dqr} zo@-OIm3EJ#wOK^#^pMEV2lzCbC}2v?rcJO`ns!`Uc&V$q66`c+luLdp%F5DMsY? z%JI%sVn_*TJ(U)3;ZNWB$a1kw@Hc zhCgnu%j5WmAiAE5=d-dg$ig5TsGK45Gu)!U@vr~&6*ih|CUIVK>(+E=`RD`cc>oXV ztpM!5cZjUQ>E6963*}xc7{Br>MNXto@5mCHl-su_ z)8G7?zk7?7pA33vBP%uI8IxdT9s*x$tG1dW8iyfy0w>g#o_#oBS%&6X+e%97(^X#u~ zeAN`v;j)eCV&2w5sOeCK<+cO-ifo?x$4z$uPy1tNe2D6We;+SWy7d~V+HpRV7u#;$ zP4RblqrlP@S{p}bl@t*k>r+IGLs$UyK#(IbE-^NKCdI0)Fi5|c#ghOw?~NiD7vFP% zAQ$NTJ=RX@6eslbk3ieNQ?wH)G<&?CmqXi;rU0bMb)2woZt@*R)>F7{0H9EK)-iT% z+~7o%%zqNu!yRZ2e8YX)kqBC}u=+Z+3 zuQJbedG%OF5vTyniwImj>b1X(1MhLnY$>PJ--$JD4sFviK&@@Jf6BC*IEjoWBTCooy+Zm@@%3jhGUk+|1LmIdgjrv@c^ z);`)`0|5~VXcw?%VV*VsO&ZLv@uU5;y73?#?s9lW`wj&XpqC>wd2ao?_t|r>pZoed zsW!pb6OiJ7e%Ebzb%Ga}UjbcSmfX;1){-`jqsDMj+dwkj2xw3tY*jS8lu zQ`tb=tm?5g};!F&*% zOzMvokiN;NAs28Qk9HGg`1|ai8fd!APR?RBl|>-*mgs15rzO^y7vLJ@YnE-9jt9MI zGa@N~)AH=G;kAcvjFovfj$@#k<f7=LXC2g_)Hb<@X0OCJ2-DBZaB%V}on5@xXzX`W{^G1R=zyj<{^XNeXmsK7f@fEV!nuX%TOHRK zNBFZl;59Q#OO-tUggb3{Xyy}c4B$X*C!AkkHb_tbtT$%s&@JY^-F>1i@*1B;Q;h?I z-7NrGI?l&=!+ujnr5SpwMp8fyPT%}_0JzOul zR!)BH)OY6V0N~Ex15=au5QHa80w5mlwkZ>u;a{wKFd890+m;2aGw+<@PtEzs7g5b@%|m69gLV zM_)(CDV}{)0rQ*8BP1ArEcm%~1r#fQvW>xKqwl2!K;GUV2928B^1SPhb&;>uJtz?j zm*$3h13%~eZ60~bulYS6-~6|e@8=HivVtxzi?^ETlaFZl)Ha;~@bEYtpKQyy{w{D5 zC#PTXcAT}7M~U#P&q(QUpm5>=*KfP++ka9^zxjuRwLO4Y(Vy>rTuwI^OX=_bC;Mq_ zff;5x1E0xmeEQ#VjfE~WYMew*zc-lu$URvkmS9iA2lGK^feIlc|NdycInZa{-Cdkd zf9r4FV6$elQci;i(Nn?+a0Yng&*z2a-^~w3;%N)~pb9hl&E=i+AK%+dzqCczWgRA- z(m}8oBFP=5|Mf?UMAs;#RW!l=<)x|gPp?kn`-@ZFr*we6QcQ2*EcojzlH)MP9}~{_ z1Hcjiu%~U*okE;t`XhxYjK7y2X+A}(u|f|&{^PfNM*GP;X20ejybLmFaO}GA1e3!>)E}}o1GQy91 zO8+aF-$0lEoy;wfoZkxMJVs6mK*B`u3dJX$=cyqt%QnicH*Sg*^7^0Y44U3vXas#B2lJ%_eY@ym#$+2DN=@ZpcX5vbaREWPaG)=wV+O@j8ROzU>q>Ui!!=rP zEUUZ%keVi5a}#ay`!ws3WH$h0?PR~j4X6CEP8JYl(l?gH1^+xVMO}vVvCW!- zhPDMlqJ(bUtf$%(9^@$Ky0H(n1#{<_lO;v#@&OvDfo;^O`6y2o)RNa!X?NK&Mg#-P z3(Yt3a9oEC8_DtU@&)R0;!x{KA3$MN%5}$4=kx8u;|a``$xK2xt^+d7ZXBhz7kTei z(*a=5K0?mD83c9&`+fFo`;bBd@oQ{SjW*Uk@~i-O=vcnLFI@o?NumydbdI1ct=LZc zEfbQAq>TWKz$o|@Kq|HHS%tN{kEFLuqd*P8NnQX0XaMTl>9E6CAoi8u;U#F)Q791& zPBc#Kg`5WPFlPhfcL}n{dR@d_#8mm6SjV^5@ZCjT4K*dY%r!v1F%Go^_l=Ck7aq$r zl!CsPS~!CjiV5e6yk z)rm6B-eUk@Qb)j@0kGc1EM7lzd7rAC<$lmU5p;+Q{KNmE`JmNQ!!J=E01kzaG&en! z<}ksdn}VMRBG|XqK`o*qqW>*DAbKmNTnB*5B>;`ug<%u|>UNw2ji4)dS`Q!)37mS{ zpS00MVwhr||saj*^Gv0`*?=ze1|QD zkGWm;%DcPEXtWw^fCDYGTPG$OJfqtUp#yGwm@JdFLc6OgtAJjBPxF^3djY>i0!e6k zLK8ZBFJcwY1ZcgxzLI|M@%w4zko_Ql$hYP%ry8JOnF<_mpf7kk1_+f4{rXfjhdor8w;Mr%0l1fdt>XZDRQ2C)>5;* z6EtHc42HDW#^hb4imVO}(BmmDJO3pwC-NmPzqinKV|JziYfc4#yaup5J3X5k%u(l> zdvVP-yN5)^Jg!=vSf^ae=hRAu_q7;1_m1d21OR%nmG-$k@;oW; z6Z5kDu1CS&sDmKuzCmB;SN+$yX38(^ha=`l1rScKxsR6CU>csa&6=8PfsvoQUr_9b zz!^m{o7*p>ZTh)Izqudo(^ey!=1DqKGhN`*&UJpfws?>HGU~@~Pok|npOf!-8%M9_ zW6z8}f5|Sf@&EVV*<|xnOS6~e(%i*$7%Dw?a@5aX^&hVhb=J&Its(t*J%Kko{@gFF zL=SlSHA}RUcW!Y;1oQ2vt4q=jxQY5{6~~8>cspvroOD|M5_ICDnno7SpjjicI>Iq! zkIih*EtBTmN`t_YT9MX23Xusb+6S3yUoI$sa+pe(tO$n<7xT9O~L|CvpL6} zoCVj@I)B@m*yNN9hnCG=HfXX)~r&Yp8RTojG zzJ2?42tjI9oyN3Jd-kl?oI_t-JxcGcb85$?=!aLzX}#fT=-HEXQhOpcR0^hi|3%7r zz;%2Vfz3Wq!@;>U_37PIK0qM-g1X7E{qFi+`U%=3AL69>yd>)-$fYpyCMFsR#!pKA zqFnQrZXW@@kEIXh&~o5GSmMuhlBA@ya5PVUsKLY*k!$Xq1eknf_t;T>SL;d#y?)Mo z9X%S=Dtm4S(y&h%L%6l02dp0*v}QCJQTxjIua=b}OI8Nsi8jXurf5I-qYp7^M9R7vxo=tK4yd=FCg5k4pk+NX4Xwud3FypHCXC+is6 z65c$La_7Zefg`R*op{gh5^NJJQfQ3d$bWQA4FQsC{%%DyKN9Hjy4URE@U;bxBZI8I zN=i`z>jPVKAt73k$^bRvtBf~V${0$e9_!j+L#n~ldk=qG;{snq)f1={fOVfj!xf6V z0;sgnt{2$Md@p5VfoKo{nRNnw$=VSOBya1<%Xjw+_J^RIKS!GtsMJ!aV8R73S!nlR_Q*VR_x#)y8$gvvX3*+0%PG$ip(;49i-cLSJDTc?4;`pZ-yCL zVF1w8tFiV&bFkbB%xjE&Ujo=TL^z}pS-%kljSXI=`KZHv_G{N#chX>qhO&V5Zhg}qr}hB_CBD*PFnqBEnWD^PP+K*Urozs7q@uE@pWu_ zY7cR5i+!kY*CjMC-dWpE)A*F?F&70cYxPE2dZ&`sFHj!ZcP&oCI(EKv8SUncyrj7z zH0qG$bR*4bezZ;h?WsL9gDD^V+Ud|T!Vowk~FY1^7=D#LX_s>uFO0u#>OB ze>An$kDNkw@E6)pq8v5b1DGSf88qF-0qvx3yF(vvO?;9~PF_uuGy1)12LEx6KFbop zzz+JivW|xM5rF?Zm_Oke(!1CwqEMOcGAHh#mA4l( zoB`*-0|YPCx)G#6JFv$B*#|)E)9+(~%#dG0Sp-c@0m&WAQgyE?uLT@Kb2o%~FwRG}0>Y2lpPXXFBqCEU8QYUI#oowu_Pm{dfW1hCvo!dbVE3 zJ3Z90VfqLQlX~*$cA$M^*FJ|2j(dqsv%gtS*I(QFtdx(bG-tg>R=IFCB`J#Sz z3|jc^Xtio=q~5_^xSFoqybr4H;x`kUqZ^89)4bDFljxqu2a&N8hyOxWMYhWG655VeI@)~GDM;|Av6S;t`>-#_^G z>3?qDO7o~31F^E%JX=SYi)DL%2`2M9_qNiX&W@)KSd>ponlXL{2eNsQO}1 zb4x~@*VF6y=e*uBXCuV8QQ75G#djBJHyXbD{aML7+z1P1*(0H~b;@Jj^Y!JU^jAM@ zr3HAZBGzoXmj3ZO^|XU(a!!VMd6wh^geV+uRr#Tt^traw2@td$NaQ*spb_%a z@bbIqNt0y8KBLnT8}CQA`FUBqM_UlSSsUVnSboX#^4i$c%X7I6 ztSR^-TjsbPloM?j$?Fw<%y=mdk%>4ccrid|C}9Tb^(=ZCb+ipp+o#Qfm#ie^xi+~j z<)GMEaiA1Q?Zv1P26hGO#tPyFeY*R6!@iAWE}nv5aG?UqNUb;QSs) zgS{F;CV-FYm2=yhV@P=4Mc7_?u!LD4KHvZb1JDf9BA#PRy9nd~GC;qk5QKP`tf(h~ zJRnAv5OTS<3&^71I;k&&GWJ6PFue{Ma2zx>8nv|eaT~|rgS56+$JsKD;W6!TZWM?< zi900#5TfGUyh%j6SwJw>bqga&Tep`m%f@{{0}!o@)q&D1W>&^KAfFmj>Ywa5OrPP=9|&}Kms z?XZJc9_QT*CZzzm>*8jH|R)Rv|E^23BScOWwm2 zU@~P09tjS#7zZHU)MhNVX#O}FA=PFEW;s9YJ^?e}$k0r>hL6@G`Um|(e3Pl&MS0vb zx@Yn^_abTt4)Ci48j%X)93;{&RE(UmJ(iJ8KU=(m_S9n3IkbbwOU=dx`%xnzWh{N5>QPi@B1C-jkZ_^!2|sv$)l$5CG}On({}*CD)(tG zRomN$p1Ec`yha%Oe+%73tl}YFvty#htFz#kR1XX(*RX|W)e^fUw}NOrrYzT z0Jj(eouY3UoI)+&=-}t8$7s1>aN`BcYzvqY&S1`1!yLNr`bd50mRFkeRTWyk%g z#Xu3)o3m*Lnphm$26SdlIrnVhDb0+_%-@2xgfE$-ap#=}B9A0GVVvw2u_%eZB*0tQgQD!ZN z1o9UC;yv#HHzXzcD9k?di^F%#7w6VlU@&D2-aMT6E-Y$zedDK7h z;5okvG*u_j!irKz#e-j7Tw{eQ8(|h}VH0i7BT|g+! z7)|-lw)`Eg|Fy-%^ta!BJFNrexVe4NWEkgdRID{k7~KTDXnwEyITvO(9qd_S)0J<* zPnIha_TF4;%HI_}W%6FP$J8cwb7*q>HAD2>-z3j(5s~kTeo7dhAEL>#!A5K}W%n^o zYu(huw}@U!SlZKD>uB;_CDM+*9ZpGZFi-Q}Wt~Tp8}dk8_hZohiF-!x|FisA0QfQ+ zmp}K9|200e5J22GQ(G#=NT~Avz$D-elF+s(pefvAGphN5;7bYNT@8EWAA~UIg~O!qXM(?*3@8IM0c0C+ zUp9C#@p)FBWz;MLa!%@xKHE-LI5lrHD*zloPHtZDhofJt3gwg$KG`n^Ul&wSfD7#- z0Fja}v*q`UK86}>&~E|1M>0jV87hJytS_3bx#e%6O@Y=k?LVu%3mn%`h`jRV)syw0jz+;L*d5w(e!xq*k1f?IQdOK2qAf7BE$wA-k|{CYu(mL|F zjW9d9zT}zY>yYgUf#djxcurbSz^&s8&`5WO`!!5_cF<_rLa^SX?r5=szUa!~sD$<& zbOt;T5k5;?AH&Q)00|h6rY>U~?xo#snm}N+U#ephtd;uyf(99?LxjFV1ic0zi2jX@ z<_0c9mpKOXG1@U|LnuJ2{iW0UF!ckFW8|p4AcTZt``I%VxA}~g24=Pc#3W8SIZ zW4$7BE1SYP=5hz{okPm&GDZXD<-sb>>Y=wj+E!uSiH2L1eyO5qEUyaSOCNuXPnCYH z3gE=p*~O0Gi6s4^jkd$McE)h>jdqlNr1an=Kj3Hj$g#n98&Q3C;ByM_J;uu@l^)d1 z&&@@hD$G%ZU(Ghvg3_;GAOCvycYm1fOlr10mM+d*VNPK!vUndba4&7*&rgGoCZ?e^ zz^WsGN9uC0kIS7&0;+6+!2!N)0$>rB0+TpvA~hn^Ggf8CO<-zk4^X9$z5=k)>FlOY z;1#QNz%NW0C#k=?vmIKsr#~lv%{t&~i^zl}9GX`MR6yW6_#2BKnxu*xoHt=4cKlvtS#WD!-n($~>6 zug)|6YRcj7?8tWkbKZT>LxLzSAI?qN@&_CaMnlH9@nCrB$fGIF* zPKQ3dQN9GQTS^~wchcP=W|#1}aMU0DMqk)Bu7wdnmwi3jsM#tp+9Uo>Kf*imG03YB z;E!WQ4-ViX1|V^1n?Nm9-(CZFsTzM z0v$~j+pBCQPDXw7HhbVd1Mo5ldTqEczQAOafjxNeAo9tXS$sLCe{8p#f$y!cC@gi- zr|eB8IP%U|{BfmShz+eC(Yjb^JZ<)bM;}Gr93~WOIKFkSk7;T`)q0Tr)s?aI#~08d zgKG;~eHC9ce`&RaDJh#i3KjtSS95iu^{~)c5t8Ma?u;^@rFl*Z2W%$JLvjQh z-oN|+XS&eVzgU=v=!iLeyv{nRY53-LH+^#%({6+SLE+~~I#B+V(q_~4ej1tN9IZCsMyD@3bx_i&R= zoue4awU5&QzV#i+--F^6va~rU$9JQ_OmLC^LbtpDq!p*EoF- zF#^z4O+EV*$Sv;l?-<`*i;7H8U$)g1tEcZF?kw};#2SFgG~+unm3HAv9rD5Q5zvcq zLfEZfb|-jvMAW$spe2Y4bax2mqfZ?)0{E`L+XbM}Q}O|VRjYHGQr2kW92v3jI(D=H z^$PPiq(#&Dyfju%9vk+jUygR-?=O3!FHp0 z2H+*7gw_l1;l~jM>s)isqKQyOzW~R~%PK%`l_-7-Wwf~gJLaxkOi0~nV|6Xc3vc#BeaA=8{LIBha%iLt-?9Xn8(rX>j3h&ZpgFS%tigO9RYqlEuYampZ9YdKp;b@H7ej^ zaF+oHX|vW$HzqCsb{5hG{^USAg1VnnkE|OX?aOD?F^Lvn4Ios_*h3DF zSkUMrG|!-SwJ9BsSQj0V;mbPZC3`78^Ff*@^GuC(VjSMTC%rOr^PRE$QdZ}<=CtI~ zu8+>`&ofykWvVX);AO*QZ5>V=?%cYbR+kpi{DpNKH9mkqL^jWNiq6J=SNI@1-&K8W zwd-=a0$+wH`6B8f`jQp=eFzL@llRf8JW05bsVlsF_m8V-VSOz96Cwcpdu~>Uck;b( zB2!PStxuj$Y;q`Yw0>Pqsn%Dn0mOp1@l1a23zz$n54ylz>uqB?;D-9;YNM9d*ER`@ zjF#6bQCvpHKnC4r0Jm6lbWU$L_>;*if?h2)eE;W-8)@5(#%SlAeBcS+zYIWEUUixL zrH>z^zxcs=n%rcwPJ7?~rEXf1Zureto)nN4*uc%!C(;yv;`OzO^hc-(yP2BhWcI&W zuczPq=~lwWFC(F;Xg@t*v-Qao8)x$U%3cN>2xRzO^ELNXoZ&u9&VvayvTDcZ{4y_J zVCu?FFMWh;&}Coa9C}uAVN*yRV*}Plko~pQo%H=Xo1qD4umd;aRp?-;Sxx`%^||yZ z8fPy{gU_nddHG#!%3sHy$~QL%D~`aYD0E(u^H#p_yGxi0amxPV86xOWzt>$uPZ4k6 zvrEmc7fJH^Nuv7v5`O4Do#qrA4E`*1`yyqX_h&XY@o5VGHC(yxnkt5du&4V7_0IHc za}00hyFy3!NI|f3)2g=Y^EA@)c!#DXFkgW7RsUZjX$dpsA!XQV%$vDx#$5dhZSL#hoEfP z56gb@Ln7Y+zz^8N+0XW+fNc?=mEj{vQ))M6*Yw5ae8{5k zDNH-Mwp}nS+ZTrD=Tt&q3z{G!^VQ+JSsw6x7ZA3K*4b`A(C8{6I8R}P zG>&#m7r{Dw@L*!=9EeVJ@pmWhz)-9ZiGxjFwNsTN0o62*VfMLx^Oby{FYE)%W=;BuX z2&QD8?GHg$-7Ku(msYdXpjG>b-vVeJdAGl$b^WQzA2a@!(f%$#pL1b~`|-n_8ptAb zG_ohrYMb0gBW-3Pl_%jl&{l^z-&Ugyps5FNwa4coQ8P>MKH}=7v8^2<*D==Y<0{b2 zBtY%_Cuq+Ztr2Rha9G^EL|}(2jdTrP)Nf?dBMPbb?IHWS#uiaa;Ug_H`8{Y3?N;6&I&hbEXuGC*cDYgbh&PB6 zB^;3xVRD*7kt;m_*CobLN9@q<1VJLYfL%ufsq(?RFM}q6@G|PNzhli|T>NbRO51Ak zmCzFNnseN-I{EUq$5VD+e%-ut-joNL@VAJ_%e#4+8 zbknAWn!Hw?Az$x8hnYV;e2u~vbY{;eM!lZ&C#SKT=JNX<(ur*!CigzEr^qjS8Qvvt zyP=Ir_2TVoO!8{F`IXPGxrQJ|rSWsv7|r9d4e2?o+-t6qWA&ZyETup9=N`bYiC(hY zNPqOvG@F1^I0JPvYQv>fPMC7%^e5`;#PZ3vKdGeukxk8w3+?pxFVxZ=n@8}fhv{tn zpKUbsy|A@9&4l)(NUCE|Y#yZMbQamhYXV|xcdn!@PIvljR?n9I!|J@U4{pgvn*GA% z$Zi^JRBr-!{b+7HbA&qN-dz@sn`|6>l}8xM$--ng|IO{&oDhzs{|nG-7a`_FG`&vd zdzSBNl;n{jUFODI1fTMtT5I;3^?TZVPs&0Q#$RRw{hL3!9i{61Z3JmtDDt zPP55-;{k#tI_KZ{U@ZLwRDb{7cRAUfE2m%GVMEAqF}e|v%Q*WkEN!EXvj`INoWgL} z7vCMLIj$)*NUJ8xXmGOfdmpybJAedgT;+7(`>)+QN}mD{e>5jp6^}ggkDJa(X#H35 z@Alh^8)?#okY{HQQnFM4kl2q9>1*``G`~Cn&r5xJ_TJ^os+_SpPtWi*|2m6mc!iC^ zi&4+3t*o>?0e$>OjLltqGOgDXoL+62qq$38`s%ujxrthaFIu!uF0?r9M{9D=kU>AE z$&G0y#AT=XI{!s-XRUHW3gEygZ7Fn(St=0H1<{BA?u}-0!0^RZvJx8c)bIX{Z1Y6a^%J7>-(KUEzGFj%I$b zco3QwC4e*mJNaNH>^w>p0&OMu%^2%TAAd@2fGsmOc{&8N^MZX5_2mu)?+HNpW^+9J zO(|fj1ye@D#EtM-g$3!%d-EDSY%6)>*C;N8J{>!Iub_ayV=iF1!S*@Zrx{M~XqU}B zn!E_E5t)zvQiCn(XvZrH@J~VesfkQo&z?6qT!L&0s?iVBOOr1;=3!R(^<0=x2#S8- zp4!Agnwmo^rHM8cng{n-19}MHA!H8oP-8gi1}*l~Mj`T<{z)x2%BG|)do)k?>kk;; zjlER5)J+$QI03(knIX{{^)*#t7^G3;35q zv%_b@=Ny9Yg$oOzk!@S=-MbHMZNxZB=(Z~>F=MG+4{dqY*6)eiu}08`}F-Jc;mWk9c=T)CqGMjSNmz} z%0k*)!Q3v^=DpO|W8VR&wRNzT`a|t5GmA0j>0R;x9!e7%JIvj7s@@o+jhS)kWn%>J z*u9_uIe1x&kL~BHAep2V8a`^y@d|-gI^~}{MUA=u{Q*7IpjYEm zEkm`^yw{|5=mc8zkW!k2x6MOelh9KKesOeQ#7s0>0K!Tf78>Dla-VvzX4cTongNuU zV&5~qOSHxh@2B~hxm3K2W*F=0IKWh4eKpKJCb_=>AzGFx0js}3J^ zP*@xq)^JBrqm7$1s%vGS?<%0|)#(fA+H@sdZZKxh%MtrP!L*5Oz>lMC`VJmSyX*%! zQUFo(zhfhfxbN1`rAlW`7jbcdmRG&Cm!<)dXPiH@b&P|E23kx&b(m$=)9vyBt^yjA z$J6BFOWsMXB4u=vfm&)?`aO7^ckoPyH6{jU_&|35@Zl40*4sk}!F#ner6uz^3CR0c z4q*7%-yiaE=MvYV%{Dq(&NZ*|%K4o0Bu}Gt^66=`1urw_E7B%wr@*vem-GLCcMnZ8 z#XsvFHZRBb(RzY*bg9@#xA6oypYuM=+H2qY zD`{BSIw0y4&+h}g9?C<=Cp5`pzrbzq0)+@8bjs6JIV=r-%Q8CsZ(e@h&-uG|=bz)9 z?5&E``qF54*@!&3bog3Znr8!n!1(TGs9WRck%`=Y=!Gu7`-^|qFI7cpRqDx!;U%h- z5tKvt0QqU;EldY);0$zMcxGvHZzr^2TlkaM}fB$OQ_;-YN#!k}c z5m|Lu%qQCEtKa=e`nB)>DVx57U>2))F5`TXRqh-lw$bTQe=oh&C!%8r-Y-`vDNM7q zeKr}{HX$Qk!K-U*V*aQ1-%nrRBfYld z(WsI4I2qh$QEe$4F@a9{6tiWso>Z~uYJQmmW!C>JNi&AG_WS9t0bVUI)lWie_Q4FB zr2pj>kuC4n(%-qFe~GMbuKww2ntZ^?>~=fd#LjCO2lJm^b^`?I@v^t9UlZ`^;=urL zX^?(1qC(?VU%EdXtbmn`l=ELiyzPK5M66Ccl#bZmrKWo6_$tw$lm%;wDZf z+;8Q{g(6=x@@oQUYLWf-?>|U$pJLC?_pBV+F*2s!7yhgA2hZ?sTlJAKP95ZLd#+5p zOIznX*=+)i7ch+|xq~>T$+`R{!qpWvy_=j6KaH1u-c7Jq>Hsd2(5^!9i}sRqHh0iZ z6Py-qV5a*;CF}pim7WRlnkgzU$ek5LJJ6pev%(?$LjVCP=}yTFznlNuG$i9XI0#2j zoyMd_ZBosfOtH6Md=Ntr%rniaLqHtC9NTOj=9Pg7SueyhmPCgXFlTc8_B2?ZHkvTT zEXIFFA25G_iCO``tBko&2@@avhjkGIHRoW<%ZvQYh&;l)htKj7gRgX9EI=N#5djUL z;qY)T6GqfR;R&Ddq~KYdu)G{9(9l&vR?!r=TQKeo1c9)z|Qm1ai=ZQ$F906~YHCJyEoQlr=eV8O%{ zzePIP2meWh>W4%Qyp13Uy+Ajg+}cW?eO5!Tbw9~|lg<%%)hW2(@WBDVB!U;53Q)nh z1iu7msNk?%Z3O4T7`|@8oX|y_WOq4k_MS_Z!591<02gzoPu=xR6IxY##&J>_gAdmR z&Ga_3dI>ia#r^fT-}#j}*pIB^Pn06v2|$=H?=gPT8hucuo{e$Ld+`M|GlBLGdzfv2 zjvhh2>~9ZlXPJ9-A~SAogZAz3gn4byHu*`fEf{kT4f6>}e*YD zS1WI(_VOZ{ga`QBnoVWQT?Gbv-0S`TgH$%8Xa>UfdhBroz6;Cu7;^x9Owx`3mB#Q% zRnvzp;Gwr01yY{SM^mTG+T3UC^s_cbKa~-%>*PH*Ih($M33RWvkyhp=(;9uCY3(6x zF@)5`ggI#UAZ-Ca>;ZsOkTGXAY46HjYUA9$iVWJ^!8*k;rIrRuFe0XOTLgfg2B_+> zziUE_t&JA#MO&wdbA0M5kDCAg*?Y4XNwe$D@5a7lFb~)7?}nX8%Vf8LzH;shaY!TSDS7fUEjpX@!vuXxFGkM+ zEy*}}z!}N-a{p*PB)sg9k4VhYvh-`d(u^%LSzgL-;#}hUammY6oqQ5HN}jj<3CLyM zX+Gom+7@$E&b34}%~>{i=8Ol31*yOHap7$d4{g7dfV8@zvw%cPCm&>*n1g?wO0q7_ zX#su6lxYD!2F%}&vGZKpi3vDuVku6RpYR@x8A5T#escdIAeIA^6rz+-y61b^v3d(ubT z0O2<`T(Z>3IE@{2i!N(|%LLOhrbrW(dYE;*?ikA_O}@wSl1jyE2XseSmJ#zK52@`( zHpu|CH>L^J_*z)Hx)K(aH}FN~6ac6wvG^MG+H-E90mpOfdbqGYY0~7kLy+{LD}b}* zM=rq>QR*8YQ5{TTw33pEYC7!X7#}g#=y2(7KK$CBRKw!BQ}NMBM(YIojr*POW@k2h zcd`(+>r-KM^#VfB6zlNGgW!Deq{RVd-bu255Bs>eTM2*lCqE6?)e2wy`uD=xN0-CO z``1`-G_O9q@6omVbnfUwV4vx4hARV1zUfVulqXHDk6SF-T6EbJf>i=Z{|K<_O9Ycw zDS1@O{oPX>fA#+T@E*ZK*VuJCiSOhjSSswNA(fh`~%gTNd zefu{y=ttc*$>ZCc1pa?7gs_3^b%NpNcoxs~v%T`CgIp@@$LCt9V+wc7swfuo z)K%A4yi(6Z-^XNi?o0N`Bs$0>Lex0jo;M@&DX>sHZ(y;YLg2g}pXcXo%oE&SVWMS; zv7xe?!|lZg?+yA^#YD(S;a$ssKEkncSx_dign~(^j8S)Yaa%2{7O-~k6f=YL#&}l< z-&upUbB{S?xWCC~*XJZqZ31{D)=cyR!f>4C$R=8J=%ygQuzc5J6Y@IztQ~d$Bl>_6 z)~}SqTAG+Xnj;i&>CUMjqTm8;@vD|S?ygfO_?cEgJ!FLhY@#ldWjR{77|c4BM_Dmm z6lI%VwFbZjm{#w4G8zgE0={*&N;D_(HU;NcKQ2TbXG7u*1bQYAaK?B?5n$sYKe>4p zMfrH2;1x=x^&90AEiT9_3gf(k2L#UFMMfaD4#3Q?zZcrXTyy;^3jAPosXL?n+GRWq z6dVhp9IN3s-yb*GretuK`zWZoKs7m_ph^6p?r405G)g(ta=o1} z%h8KKJpfHe7|@#Zq684{q^vzahYNQ?xc&|b6P7YvtW`4Hm%-d5WAJ*~YrL#1mR0+> zxUzr4?*!exzY_NU{IB`)^WoCc)vz^-rB(ehbZta0a;J~-jJX75ShrXp_zIaQOBhZF|o1$y)*e{*a5s7VDYyEC^e6kS0qkx&kbMt-OUL>A{)Tp2$uPt(kmZhk{)x+ zjcN&ZLQX?eCtA3CE$r5=pu|rQ9t!unem#^s`(csIzuEOWxJLp0(06fsUKS z1@q~l$jV!SfK=>nibl!B_9+aIf%XWdKcL?h0obN*y%{=x`A%3^x)f%<_BmqmVrf#D z2`k(?T(}D8SmuPoJR91?^Xg)`zOxqBm6hdTc(-a$fBFyqzaqb9=o9H!h1b&y$#4PtcV1=!(tm#M3k&@K4kHXI-iP2{+yRaYB#(#6fyP|h~Py~1>;(57y` zN&Mq!__)?t%t_ii0pQq1k1_atXMYE)=&gAAV3T>&1i*FNip)i6U$iL#9PK5-}azbc@?7=O-rd%g)#$Ce_pG^S}Wmjp@dV5{= zp{!|Zlpzf6U*`8Lu^$ImtZCiX=9wORIEUM)VT(OAbKs^4{cp>CjjBGX)swy1Kb~MK zPceJJ%L-s5(>`~Rauns){zJ5}LPjc#Ejz(nU-Mr#7K35f(;C2Ls^>I;VHnT3P z@6vx&8G0sGw^$)EtJ* z4cyeB0Zv18Obqv;OFyeM?RK3}-q4bq5n@auxbHoY?&N++^1JekDFwa!Y`MPDx@lp( z&pdX_O&OlEr9aY1M7@n`<0tQ43#%&^!`w2*!Ow1E%37y0APxd7dV0(ukvakGoKY`) z%B}a*z(NJz*y;#LebVx%dj#{IQT?B^x<{XP9NHXH-2o(W9N@p0G;?ez09Zdt#|dHE zJccfO`TNA?T2p9Z%+5GuGE+R~L+9h=@YnmVgk1zyo*q>teLq^W^t-<&i(Y!=@STYZ zqy6?D{!G|e<@B4)`B2|tv3!Bjb>eqb-6?tsN#SV+1!jkxtF%Y7?)nvgtJerXdKR!N zt%kk&S)N%&InjbD*-=P;efV9bb>V^_K$vvo9=tdG z=EAS#&2jU~K7SxF_T?2o&NRs8AyYlLO`6zZH-F6lsQiAUiU;?lACmqH;kPju`vRbs zbYix@&i5(E`2V;N!cAlWL9j>VmFD?8-UUF)F19w{1x@zh9vb)xijg|Xv+Pgj`z*9r z*6ipB)05}$gcW!J9^c|ru>uObu0;Y_PMU&o^yr-A^hJe96zq`&%F!xEivSi>kF?iP zL)Ss$l|)xA#z>G^KuOCU`2bvpER>xD1K>T-`{A0J3Ts$O<2nb55dlIK59!8Z<_{7LJK&Yj zhr#b}z5QO;YZCbUvtPkI>;`KhKrHm(G4_s^ZgRWyT?8GWT~DR!6mjojtm|0VlmRXl z=a+~DTZsURbSs#%Fna@qY8OxqaD{^u;$dURapRkm_H(jH4vihqhGk-r2v`>_<2dy$ zTJbU7ZURZy72M)FDxgD79!0L1o+h#rvFkd-c+!eN&_`!nX*7a^d^aB1CQ3D4p&`TP zL5YK3CL6fJ;a0`92+EST?~fRogYNW3r=;@q9`j(6xLDf&YHa|(F2KZe12?*z`vk$( z$_+sK$kKk8Q9_@RYlqV!^K~qm;M=-33+_9Q>a@l9UAiH***vkI{oQ}HNv!hPdtuLb zYRu~zK7JRLE`-fGZjV^5IA5YgBy)PJyMsbH2otzXR#CVoIZb#88EKH83ZK3N; zWPwi6?O5K>fq2TCF1rY#umqSoM|`XSfZQIQ3Qg!qOSKOD6Ns~xots2n12|$VTg=k| z8;w)jMcg*&#?y_ zq$2Xds0%-Fg$^@FhJv{{nlm02qBX1GSCSDry!Slq^Io5iF8pq0MLMu;%t>AYL17uo zL83g`=piN%3>%}HgImt{A(jgtgCt{$8>MG__JzL9DQ}mxb_!vovlOpsQGH zLp%sXB@|17Sr$R<%U&z6csrQU#P8Ef6VrvBxfHLWGZ* z&vSlEuyKXBU2WPjL7@6wIAq>{TyJ^1WfuoN%=}J=>m)RyL?70KC$0i1UyFil~-N~@4owPRFW*q9qP-MFEf`h z*9SaPd5V*YiRDu5LTWibNgaO`z-tO2!0%~Fm%o<*(SC+DtdQR}dA&|vfBW|BSU>Oe z`_HVcg%zy8bb~y}8cm84VEX&FyWtC1N~t6srBin|O1qekqg><&tgCr+OkAJDEioR% zgCL(Kfv<_pR(Smnet=m%d_yUAEIB4+tTOi4%^c_hcq9oRp(BR>0MIKm3~YKbw8n>aF3WaZt}q*pJ|%M z3Q`k!hxkd0e%^E-olC1H&xB96Yd7FeT9CM_p8#F-D|Jdz5*%FKl0~4Gg~(G4xLqbt zaweKvIR+>>2>5j@Tc!z6Z9s5A31cR?f2!c=OCBq&IA%$}Zj=HTopE?9Gw#Upo1c>% zS3xHfpa{UxPt-xzy}0X36$L+JA)W>~H@}F54j=?EQ+!6-64C7((rF$6Ki$DqF3w`% zFo>tWq3{_1exw9?Pi#BSN05P=r7{77ee&(mMg@6pqFk-UtiP>!Fs1dUb|b)!-$!|} zf0h7ms)w|}xIhZ;T6PJ7nD=@bT5SQKKR%{ z7Sa8YKJ$42iUHtNqdpyW*=&K37%w)%0JR%oEgsuPrt-SMoc>DcMxLaBdT^28a0+W5FrJ3ttGENDImJu9Y-Ac}R@vPBDqWXzyVn1{+9(Z`Ubc0#31 zSP%cvH*@pzoFZ8dn^>o(Rw|B%^cLyrfw6V~rHZ7FJi!wG3Ea$fLJg~;T4Re-gBxLb z5BJ2@26X{K(P9W%BRBdEC0g1YLW?03)5K=Vx$=&hYC0PX~&$v}hc`exY2vgy*|RG8Vs zO75Lc!seZup-6!BTnS56^4-HSEx$*3lZyl?oe$YX4$Yxni;e9t^YKR^hf86e`sHuq z;WJP0aojpT#Imb{7fI|H+9l{c3lJxGzm8jC4e)-UrRyz+BXGpeKJv;o?%gdE`~rNf zs1V0}Ijo>&3Gn~Y<>@e4#X{x&ozUMyp1VY(GeE2%E~%a>pAqCI9L3~d^O(vEKIQ*8W} z(Ql(mJI@ViUqhF4hSN=AQ{=bsQo%L*TwqIJFY*fN;Cr-2rJU$i%{&wml$NzJji97u z@=8iK*HPkv2O!NNeizu}&EkS=n@iCCH1!c2_QdKnBf4SA zW4u0|yw}%w@}4vg=b|Z@PszJ|n|}8<%QPLGGv*qBK>9!z+ZGsrR-Dc;t!=x=u5MyP zpq?_dO62mVmeTsg_F`brXAH9RAComSSc~to`F)>wr7Zw3!NzzBC-re`TpLV7z)N}9 zxsv@$AGlUPl+k)pnZj$X^A5b0$H^vFB1fdeU|qE?_ol=`@AR@A3$4H7K{OUineKfK zX{T}5C8EuUn!kHc22PkLgf#q8?^vHUL}*&{$*@Mef@UG6TfS3-#r4Ch@@d6%M*2Wk=zD* zjMQV2{CsgWyiwc?clIlYH&KLtjGP{RqzEIgV6~?U#*32dy9orG&kQyRRH@kV=2;}n|GZ@wAIsQZo-Wg7(Nl;&E=eDAb8}HpO5AV~8jz9s zN(Cz|65K&Gzl$vR9TraGb^Rh1U~>o#$LF5Z+t^;ZF>Z1H*H9i8Xw!K56)e8KgMg(A z=1GaQhrSquO9=jPNB#s-)it6>?4;@UKSIt%Fv{u_f*agQD-?KE;fb#YX#kf-9()Mw z@`fmwW7$w-o4wC{&Xc3bdbyjCp>CN|TzJ#x9?l{CtPG2WVDS@#Xq6Zbqz$^XsRu=-l8Gxw;eoIV4X} zl!x^21;}Hy=kC67hvbj2!zj>KP)dg=Is1{f9beBVKx%!1i(4LaAukxJunkQMyr@`c zofqTo@SZ3U$y$08>`1ynp_8$J=LxDrlI=SN-&&HATrIJ;xc)DLD?Ba4($1SO||1nk>li{V;uVIzZ3Gcl14x8VEG-whO z9R;e&K?Zfelr}-XQN2*i4{I)6n77KEbgOb^*Qu2WG)ecdFTKcjKWFJb4}+jK&?Jho ziW`I$EoWECx;y!2mKLE30dx@A`*>GWVlTZv; zSpkyn6CPsw-mOqvo(tD&H^MF4!*=M0qNfisK7u}%7B7Z!lUSuIoZ{KUr4ehgA#SGm zEEZ|dT|7JjP_BS4p0bmcS%dvz*jodnD=frzA@YX`n~{yx%~06H!-4VFcZ9|Uj2}Z5 zV6F~bHV8VN;hGzza3*eW%mEB=rIbblgrp%wH~ToF8zq~bWn2*R1d;Ev0kDmgmDc_R zW5tn!^l86V=^MS@$~|M}LYMsWzN~R}p-F*(#EQnDP1afg62XZGf+prsg%!z<9EL z0w12R+fV(?#&39rpKVXtUrDj>PWnc`*|D>Ie02n4e|~yZbKOjGcl|OZn(q8f++Ei& zo!i27R(2ibdiq)!#M>Y2*|XLz0fn>x?^4D9`cnm&o|y?XEP!+H&P1(%e$q;vHR~hy zjhYm%YE|e7t$pG#x=98}Qam$DV)Dn5Q`R^u4f%UJ3jFdUz+A>|ei}`BJgR)CWEc7l z)?GbZ0wZN4RQb?-FZzXa4t?;!bohYN8J9l49%gP}nhcO6b1_NyRmQQ53z*G3??hmE zh`#Zk&u@i4%PzApDx^Po(sX;0l`ro~^#UbhK76fvC%oEQXHwITPpNK*msm^f@Vd|9 zzC*qVnD#jNJF7}A-02{nQb&bC-3HtpbO*?!d!vGgi?KT;&)reka`ZsRBZkOVbVzT+K~*Ty}_dExlT z+q@oEkEj$gc35ofq9nFCWMzh!P1Br`r$DR=N!N`%7;0+0)uH^p%4nE}R}kM20WVj) zOWs()pcHWUUYMO&2>m8*OSlQO2CP+_N|uYe_b9#y@g9p#^+-t@{4}0yr;4RCW?!ki zCyLJDUFq86j~jf$iNP>%zNg!!n_>g9A?1P0R>P6xGR@KOaNS|f}K zHhYyepH0V#g5t04&`-j%tHk)}F;{l?*j&W2LLlHESuxAE?R-wH>ipgMrj`?)QrB)* z!p-+ttZ>82prrE{l}>WNu(5Q}zU0@Sy!0#r!W8(9*>MQ`l1abi$8|1mV@=2Im#O)1WZ=0hS zw_==8>Pq~u0ibRNkfZ=z<&Mhu(8+*xi~T)nVr)F<7~sq1^#LJTxROchNw-oGi+@|t zNWcsFj{vV@!2RCmc-Z&eA72cved)=1_C|C!&hLfGoE~|d=lg7ewVB5mtbrVZNjBQ5 zth2g030{s0=E)<80tJ@@FkAGK^xEH<4A*Wp!hGg_JP2m5*$LaY3IIYty}uT46|Bx- zQ^vUMZxMgYxnN~R!;@vsDc0&ky%<;O4Xp5{=^R8gf5z#xCW{@foc0T@wIkU zA?c?9^RdH01E50yTJ#OPAd?}G71md!`V20v^eJwzZQPkJb?V{TCIBDquW!y^sf3&2 zps`AyHpz|WndfcjMnyQ&uZDFjGfEs@P~Ih08GOG*?5>TQE1|MR=~xoB0g><*35Ac} z34>XkBL(KD=NwlVw|HtJd~mQ!tSkCfO1X_&zb=<0`cAghA^un&Pn<3rlOSZcvEp9X ztI?+$NWHi@;)=Kq2-lg$f{nw1vJgrM;mMP^ovXPC&J?W`Vz7GKsr8%VpYUGi6)(D4 zkT6YZK288d>0*#phow9}qbig;| zHO{p7CNDT83FOL?rrP?() z_MU5MtYysjl&Zo@;B7;AbQg=$9yBaXu*^r#83FA<9al>D5gH3IjhU0uta;mUqa=MK zW9PNQ>*?>lyH@RE=~{tjS9JLX!tTLCm`!ZZv6e$SE+A?<$}(y7weE6l)ap+9OV=c* zDSl4p!gyKZpHte^h1~qJ^s({EI>Zo@RDUef0P}!0{I)nrn-NL{i^fxO6tN6~cr)wE z95XzJ8x=u0XV!65BmP&6{l&s|bYUFNmVL&@j=zoiJlK-sZ~IZWnuq&H=Qb!| z0Asagel6h6Sb9tgFLRPfUC(nIv%w&lZcmyNTxtXj^tgBPevy)k=5KV~4;Q1$$J4Eu zauMuNcJss;>k9O!Tx4jwW4oIIU3UPrHUYA30&so(>tClX>^`zMyur0A?8ba|f!$|z zCw}Lnk1(}nVShwT1>GD&fw39_rIupjNo%yr+ZtIBu?w_XIitQFkKn%0cQN+MGlN8 zLCep$4vw=Yg6h(U82QA#BTbw_(w}T93r=oQupk+pK16~m{ zLzzO6OY&4WPphR+%9y}Q9ADD`jq%+0cU`-(DudBi4W*j08}NGkZQ@CD6DXp_oayKS z2=6rpnY_@K8y`I^fsFs@W=JW&h>|tKXMh(NNH$X2kRKefOW}rOGPrC07(C+0I3xFyGq3bv7DAKUFHo%6tf=?a}wZ2k| z)+2*xsenazq!pakIDHMx2O3RVb)*I~Aoel8(?uwJs(H!12dzv7fj)T9g=h#!l!6** zt`P+D`B8C>ziMe2AWJ2zj1|~bK!!OM;Q=Qq3MX`l7p0tY>C&Yz!Dib&ARqf<5A|1C zhE#DdX7q_bbzOHo{QoQXowZPI8^{x2c8m!pJkN8wCV6J!@L3~)=Oi~HBt$TPJgl^T z1xEWV;%}iu)v=JvgO23NSV$3H&sc%R5v;K3Czxy;wYeFrys+MEVMVpMwSl|zy)ZLf z4?l7BI(gAPLBidQP^n`ERK#kF6W|7nV|VR-s3KSN2>9QFZwY4TLfM5D_EFCFGX#(a z%o#>d52e{L=N@j2^T$&vdjxOaBtTmm%dQL-*HcqiqAkotFj7~r7FK{jG_l@!_>8AP z_Havd6SaZGe*?EhV{%!C7P8qcysF4V-J-5ptgmvDZ1h#&Ps}Yt;dqc!k9D;V$kSzG zuLTHOz{NaAXB!_6TdJtY%Uk;Zf_cK)Xz{9y<+$m>RU*IXnOOkH-7qoDiH%E3fVQjD z$B;WvUqX<{0FC|*xLAard&*_T{A3)AR+oVGS~N~^N~ardf(C+^IM2lKIT}CH@fG+f zLKjtBIIq)Z1KQkVY+VD=Bz;+zgwF81fu~L~F1iKw10MHEPXPM?3`cWK;ywP^Zd)ZMnj*wuQ4>=Hw4y@qiPJdJLR z;(Vlccot6z{wUs;n?7+UEPJev9@W&e zZt_k=4ds{meuU>A_2`f8@5W4Mqlo79sh^~)kF(056J6N)z|<%XlC#di!y}FUP2ejd%7rxjwwmqX!XZnassn;tLOLKak2q@Qgy?16R-fr3WMa zT460*nY1~9H8-FR$U9`E9l=j`c;f=F?( zb#joUjL}_`ahC0T+uq&{-~8sc!w-M>eT3S?<#3C*7J^?lZrlh{xE#KZ8SYOatn813 ze=W8~*`C-$7rF}wISUAW#__t%>vP2I^4T=)ASA!dE^Esj*`tJ+-ZXkIBTN-x;&~Tu z3n#fSsR6bTVm4o1#!MY)h>KlZaXMOFjPEsYZ=W5>t;>^P{^K32Ee`7uGdt!_juql? zF+S!7FDdbi>T|*!@$$@m;*!~7Aj&m&(Al9S-pO%^j;D= zfmPDLB!4Po8Q0Te>guH_xgq30@fB)tU7kt$lwPX81ql-VH>)DF1;{CP;TTa;tQ3r z;AsGGWpHV$Otbrq-%l^I6RpJzpzO0gmnu)zC%u+b?!mv_Yq@a;J=THorZ3`k@}emb zkc(DWV~?g*SFZ55x`fA%HJBpyD%ni9b< z%iuZ#5*5bS-zp=3moX3W(H%uA3s3qowwU}#rNWmd7E)XU9Rwa65%ch6?P7y0gA|`3 z@6j*uTCA&9KL8Yt32N==+4%FppFHSE74Sm^t;0q$4hf+~MjD`;B}KO@K#}e39pZ!m z(x7-5xEyMsP_`5Ue-9} zF_~>zT=c^R?u-%eVm!BzZR``{#HG(4lXapj3S^S+9+o^4@F&3wutKc6u$shj%ouL- z$V}_&#Qy^n(o#x-i~pz>Y$DEK^S9L+1sIIKk0mDl#hk7N=!})d0F7MG%wX!%xHmR* zr`ba}MiwgV8nGmd$mekuxr(Z9rJ0I(%i6|szpoGN?m#`Q4A#M;@|jltyiA-LQn ztk1FV8sI{03!ICom8Mox6Daf@-?5&Wf!~1NhJAw1uM^L;jy1nveO7nMsS=x4{jk!l zhpY5q51Ow4zAluJWdITPu=a9t^Z|%^#OxaAR?ZxA&buKnz_L{UNP*TM^rk-i)>w9V zVpa{E)6De(yw6iryk_9(yIB8ptHwa0 z47?1yRX`!xpdHaJ^po-_#~Ka-&;?D&Z}{%qoWek)3Qt`HMANd#6s=foX!12)GamsE znt&V&SW3YHH0IQg?J|G)9`)hXg9&Nzt{h@Mu}1u8e?@CYfn)Mad0{N%$e)zIc~6?s zYYG2KuO9zC{@g?NOaGK_<$RY%L_Wdy^tp$gIlh>4UQO3eN{;<&d9qe`EMp@`NtkQo zXK0k1jm0~j9LLz7qxE_Gx3hk>HzU}^7|qVkhh2UCI2P%c94-5#@6w<;8LTQ=W4cxU z8yLK5TsUGV!*OePtJ_zNYMIBlH0> zM^sF@>^L@O2`qe50b23b-#`5%8~f^=cDTo`;CGh*;${$VS#2{X0A6RQk$K)j5ZrUe z(Dt43kQQDXV0&V6!TJ(J#2A5_|&-290p1}{#X4KBs5P3oaf&C9JPKJ*r zI2rXh_M0?t8XcUiMTd9Aetw!69%g-}PwA3=JS=>sES~5l10g4;8Y(0w!lA;T@=5xA zKgRV@c(kZQ$UH1Gopi~_6j0cG&X{0{!0+``_(wL1yYXq>=7JK%L?0JQ6`#_6iRF-m zE-d4523yn#ot%2gC8;k8o0~{7%?#^;r=?}Gtx%nwghxo_e7=bCGypxMSc`*VJXBcSlVy0&QBu*(d3Vist1jb(e{C_K!TD`o$OR z76_1U8^=f`RDi>D;7X&WLNc$T$uY^Yw%HF_^hS4d@>KXIGtMxH(2=W!y&aWI^D)Qx ztN@|;b4}$mxenMuUyw=s?r&|tAC(A`7DU!fkc5>jG*RZQw^q|lTvT-P(#4hRqLL8< z!6VzKa9EZ>?me~B*gt+(NwJJB0F6p;v?icak{C|ZGnShC8R*uHWkR$_;28{<&b4W2 z*=}y#x_K+y@5A(2Kx~%>)leBWp{HX#rIYj^1S&%Hq)ps=0ZF&E%V8dHYze^2{91r5 zo9ip|8Fi!2`T%#1)Ofw)*g22J+cNg~=tt*6p3@M^D2T?-(xottiDPw2qMGz2)=6ww zRoeA1L5$f6=wcHKB7rd7H@9%dG=A3{0F@hjNjsA^9?d$fuj3sLV_+_A?h(_h-v}43 zVQIyL+{I#}0Km1lxESY=d}D%B1oyDQ(uGUfbpC<4Mp-2G1?|uRNgz7gByQf#d(?9; z%;qMUO8}5V4h5*ph2Bg8Zd3rH7BBY0)ZE>0ZFoQIwC~W@i0)0?m;j7+a4)X!Y(_W9 zOO&eXl!gckjL$rlnKR2AAcUpI4pvHg0CCZ^46>_XJyw}6p@U&bMqK8MC;w5e2m)d? zU${6Q3KPUHs|~_BuB>-yH%Aqqz|V5RC*YGv5S?T!zpeltUkk(x%WpQqW&?{4>aj=Q z{5_CC-IxmtoH_XF5*v^?CT&w1J6*GA= zLH4w*@zijh7L1R1P`-8ac|ilM0r8UL^h$t3tZw7FLq5`u7+(#9;bq*_2~>l{7%p<$ zn^>apFv(oyDUsIV$9To74<*YTPd?+OjIZ?h@!yYr_W4-%vGNY?v8&)4efc46L^7w% zFfFQMKU%aKJ;|DQ@YEB3NSfrhn!lS#j&JgLmAx#%uC)oQ^We+zTn>KY|0p>Q@&kvK z&*|lJyxXJk5ER!9K1(--k8z!)2jT`vnB>G&J4%!d!tH?Ie(o5g8*RxNoyZ`^^TF}5#y{&mtPTY}isAM5*a>0r@tO0aB8>LioyD-yuN}$#Jh>$I8lc{w zj;9mO`SX1mPss3Q@KFZ(iFEr2>OOaouwt66g7i47^{_aXlsHT|NVfqmtb^~Up%D|@az`5 zb|16r`wD{LWxjhd<5_Fz0$hR-FQQZ_TX6OlmRCvdeS%9x1Z*fSS(cJ=)4u{SFB4DV zZ+)^CezDen*nlk$2fdB(@7ZQzE1WtT31b#^9K zUY-s!AFhY;I+Z!K5txY+IkbpGxvx*+={KiUKK9*WJUQ=`jc#~F?5vLGAx8f$P5j@b%wc4&eiYfm6FD zefSx@TNho67FeWp4}DRRq;Z@0D4$eI(PZ4Qc^=7f-I?Fw^sK9Zy&5||NmT@^=e0^? zjWB@jzmG}4BoUfv{b*fO0QO5dZxca1=^cg5gB$rY-v22`mYlk%kgouqJm+8Yi^1z< z3Zs-N5PtQuDOuC~5Wi6(D+fQtpGO(`lc%?#V#FIUyot9f1a}Bf?oMw8#jA@+~F_p_s5v&|{B@Vx23uv421h)iw zE?>Gx{#eA|#<#If9HQNAWL9D-a%x};_dkmkpmA=FT7EE&5%{8v=yrn3lp8}1ZU>-f z2dir>RWe1)udv=q#y05+txDQ}6Wi0&5BH@S-7F7^QZfZf4=44X6k+=ZSjcPxVD14@ z8atEHV!mkb#m2u>Z1E-ag9lVFN7uZFZ$AX~)kWx2(8DK=W zElsO1TX+BXvMl~{F<$nkQ&I_f%44CEVVoQ1Ufwz0bV%DsiZ$!#05302ekD(bQt-ummfd)LU@V&lof^wrso+0Kd7niOb?3 zOb{Gsw;?-FCK9<4G3$su1XKFi_4rKK>HDKm#T^%45$VI{T6eK$XNFUC8X zBi>e#11{Q^mjQpUDq|1@y6VunK&kV7mw8Ypc39Qlx9g#aBHsc`>!4q6GZ$NIqzwSm zGFXeE`?Fr=!xSenmH`X*nNxQ*>!AV&)!;rlIrj6ALluTvlwpe4SMdf6;>+yyqJi`25qQE*Hg_hf^>nNKl{V;-4xf*4vQ zkf$wxLd^L%NwPvXo(bCSym5x}JoJ!QLjuG}KXHy&Iy6FN&h<9+A)bHuu-pmj$bNDy zU2dhNqy@=e%Z_|Q6oKDU>zOp4q`v+&+(v*zW=xYl2upo?b27SH=b?!S^>==% z6)xV-hdG4U^Q1gu^j3E|{J-t%VT}Nfko|ccf9PJpZJFWuYVlAupUQ;+6}vp#4YQb^ zTfZl3>2Xzbt|Uz9xL?n6EeCV^DZnYyCd|A=td%#BFm7=yyMpn6Rdi{o5kCL8m09-tUF*PyPwP%YP6bjSJ@8n>WJ-4ElX`1%8W|7yl8a=~`?Z*Zy?ee77aW z?OGy8=m7Kdzv!34|Hm=tw(<%m{=Cw|ynQfM)|wChUCu!j36Z>!xV0UHT!$`4tTkvzd;GbnpKfLLXQBCd-beeH<#5ybYVh=PE0 zo|a=i$0ggBqo49_p&IS|xa3WC=y9EK_t{-7hrWTycu&pxZ)=+@I2?c(RvUzXdU4*N(O$*TK^0UvL~F`vzE4zStSBMtb*^J>>IReNTaO6*pe zqHsz-DAf%)c5O)foK%@mU{ta^wub;_fspu(XF(-WaLjiD(kG0=o%H0hd>{&>C|l|0 zSb?!WQJLYpzsJOzE<1itk1qE#yL4k9*)>NNAkQ2wJiO5!>ScT2o4P$|i8410kcBeX zK!J@`Rc6lm-p9>J6-;@#4-YS4qLPPKt8@-w5L)vzdIY%7OvBsAXMP!lb|o~re?c(n z2B48Z5Rhk44e2pi;3N1@~*A)+G zJ^?gPWOX6ZVy(sXLLLBuYbM9Z-R^<@XC|iyub4N=h;D8@Tm^ery7y4*`!Q~hC*awa ztRXyR9}KBm4U3a2iyQ=mdl(%b?%&;~y#SbkjJnP#00Z=>5a$tU4%QmVOXIp}siOXw z!B;86IHV(0M0Fc zJDR+vYp>%=#G4rOS#U7AKpwsM$oDv3Lq1e+#cK+Ztl0Js&j@0_wwb0@5C z+#>e|AfPd!pe6cs*wq5!a4ss85($d>P!RoG%vK1Yk_$77)6fQPtOQ*zl5a6T69F~j zRT=-z^{U8tsR>4FKORpxv^6CkO_#XROa z)u}>QtaQR8w4+Pug~Dd|xu2R0_ug%V{cYqOXx77Q2#6KlX*9zsWA_utI&f zfp4WL1*(jvmj|E{Tp0q6WdW)R&|3_s&7m7wXkuH>LMcz<9>-c5qz*;{i=W}ekx#(m zd-O%21%QVYP|=|N%#{N2nsi8~K`InPlDFjPUxD)?{clXNeQ2$#n=eemE#^Kn)P#U` z`K_sr98GZH^Uz0@{);jy`N?B+xg0W&I;=GX_>G%S-2(iQB!D59Drn(Q!z2J~uqNbl z4j@riST_P1Y zJ#$(f;R%962kIFlS%KxHiLYUiUA$HLxeEM$~B~M5|@SJjwykFCVb< z)O(O6#&m?S%v>}(coJN-pWk$|nZ2?G7Jt7%u{iSbZuvb(j#$xtQ01qJS;1`qr- z(BbAhjCcxv_3Z}%g$Hgt^2Ii^VA}t*{uANPImTEKIL@Xc_nqgfr;Z%qHRpN!Q@{7q z95+NT#z!^9QjQy`^EFQ%WZJlL6^2?4-@{vB)fgsRbN(zY!fb1Y!X`2Y{^bNWzZ$~3 z@4|#N10xySl|P}0seXo-9WOz@zjW^&f|dJ;Npj?^wa0b0*v%=jJ6#-^a{v0;BBuWf z;m^D8gjM3*Fv8&;3*MXgx$yUf|3&z(?)^NV(w%S@;q5z9_rrF+f#Oc!UQD<@)436r zH~%dBtM(@dDk_dgM*wAH#GDmM%N`F-YzKGC2LfLFOWhi+X&)iiED$y0%P1+QrKB`U zrJ=4B4?pm zYHOsJA6Ez0@p<)2+i=_sN3JQPUxXIK_}rDW?JrV#!KxGcw1!}y%WlS_n4V*jB#)Bm z<5tJhS+4^&FAyAi=M?&S-28tOo~yWPCx79eE_3#7Hj|v(q=i6q$8%j*SvaOa{?31v zd^wQ}6%LO8ypJNahxLTkFD8Xxli+umfXoJ6ZV^~pS0|OZaYZCrG9c(G?<9&|d^G+^ zgWV_hQ)})?-9)00%UzbQMVG%>zC!sf)=&a56H}Z%#Bc6GKdj&+?m#L`T2qyAa~DJt zxET!bbHrk8cN;Jt9&A^2_b&tXG~u(XF~s>2DR9Hm?s}4B+Z50va76h9V(b_USofwj zN?sd)q}9VK-O!oiSPHh3P-=_FO`eFSHA^2QrGSE8CP=;D z!qz@6WPm(d#KO|5iAqsk3B_ULvMMuuMDh-;{=Vb;I;ub)n|MCQhEI8BAvAleL54G!Tm%$wEkuoje~lNRGB2p8+ogbpYP*(G z%4cpk<((c#(0I{<0lO{f=qSkxDxj(u*fSW-K>8 zeB`DRzUHOR7*7q$zX2y_R+`8ZZR!oZU?HSZna2buTHfqnMaKz=#X92;&}nK@u1mUe zO~8PP#OwaCpLL1M)0ZA-(ZQm~n0|tbS~(e%+BsFHPqg%7NFGY|k9C|doCB(G51@0v zhV=BM`3QRQ6`2CsQxS$B2gpL6x(0~=rg9P*mS{p}dz8^1AlNZ>Z3Q+g`lSWyZ4cK0 zlJC#H5~fl1R|lLp+vJqV`dWDXGv#pgrPsn={q;?P$oIqhfKt8Nx5D-6Vz@TFMC__X z`k)pDQ;VVXxzB~S3U|Wl=5Cm}axuJe@hjnL6R(9nrzP&?Hp6ZgOG=QJ9w)bEho1}c zRe-v!cfvcINchp<pDj4{2CHw7%5A91W0uP(nyqw>WR*ohMWGBw6h zk4@QTllY8Sz|KzB!ZzbmX3TX!zxTiiLc@YZJMa&CVav zsEy+qceJsmedl@Us7oIRAP;eIPLo+ngSxgGqcHXd^?P0^_JM`dw{AjP6}slZ{(Hui zbB=UDc$%yqEEu7KWqc8k^J%3>r_h$02)4IS!DW~OO^VP~m+=yO)%`b~$PAd96WC^> z<8hK*DUBJXr9wQvg)X`zov!?De@8vV`NqdXDtVlePJc$=S6A+Yi=KY(kKM2CI0_0AQ>31SzdaIgk16%3T$0FJRjpYvp)s8em-uY#fa8*v8$`6O6WMw||LN9vmcbG_ z*#c4iq&JDBjk!E|hbc@Ykj!y4eZJRp*;vWc!Y&HWrBkXRf3YVXr#JBGru|xwC4%fJ z#hmGkQF+%-?){QSmVc$;Iv_byDacpg7( z6*dfi;ezNw^r%+b!;_E&tl-5=57f&>bK!w&&$in3?;Of?6#&w{d!9*<%9s=ey48*K zNxCJr3$W`KZ}!3_z`@3IIk4C0zwYpsPyIJrx>f^?3FM#@9N= zdj!Q=Ba1A{7x1!7!IK!+8M&iB0$hWp$f?_d_5gQdHT-U9Wv2KQdNJ-&Z z^U!Wzv{_|A%Nqq;dqWEe?yw42p0Fti@SE>pwSaKnL3=OKHUsD9K-~u@lJRg9n5yLq zI_TaOl~3DC-j;8V9wj%ej#Mapd5@M;(LzE14`Q*GebDx6!KNY`U6^=Ir8owzAB|b; zSw2V0Hx|AK)Ufsqa;}S%!>+#vrIY~+@&xN|!$UIw67#rz)h4Gy0T8I(z(Ng0C66*w zz+z2!%D%B4f{J#ImQd+(DqX0EMtZW&jFlTUDnUyZE`$YQV%1oHd+gHhb=ShSZlTd1 zRY24pF^#|^J|$o%E%86)M4-b?^)CBzlp^@VcLA?1r`2`Cb=*-2a{$0EyK#Kwo4Xv5;J9k3&;vl^GrC$p(bFUI_s(>}pUKlost9;>7=m1y^7cbyu zJBQ`|Y{>7IIQ=jaCi4@a1>f*Ble-k=!)#b1KHyueZ!#}%0i-QCKso23Jfpbl z(d5{7wuz6m+rUDO*@KlyRj`Tq+hu%;^u7HicocaoV`3i*blKo~5^L7YpYy7r;1EECdd%SYvJL%>2m8@Et$bzX~wLL!mwxb+~#`ti;8 z^z*@w=lmdDn^HO-e|$(ED3iDmT@YZ$(l4W>-)L+d!(?Mdknri$XT1D}KHFijOy@l= zw$w{-GEMU6q`zG^l9%&8#^ZuFJSxR!=+iiF$;)BQU~T1^po{hZ`fmZ0cNssc(nmS( zs-PcM;Za2#{GQ5WVB4`xMLb+C5OQFE%PKymWLK+1NhF(SFzP3{7 zlc$5ZN`YFNXx*h^Uji_$8KdHJ25gM;;i1B;=VTL;(_M&Pt=LMRA?eQeF(%mCEM^Iu z`FLZS7RB6SL2+SAue@0AE@5}Ctsiz~Uz`HGDrz>hKgvs)F z_I9v8MfYQNk66MIaP|jJxO2b`VRMF4BzAh?i#xcVvUB`roQe~$qI&)HJM1{1sEmfq z{zq0r8FP2&Fhc_#Ub}HET)c55TpX4Gp?W+#Iyb z=n@O8uOe|uk_>y$V@Dg^_~gSkV>!%fbq<21e>~x-9F`FLrlN5EaH}Yd!SH3y$Ng?i zatJz`1e9&CJ7f)ytA*EeOT5b_usAkvk4!e_V|X>EuE#!mKrr(L=LXXFSDDv4frC{VTQfWTBoK`L4SB=&yRHw|HNZa^73LzkM)H~W z#7&F29*KhAP~^1CP{=g)Q5%5X+&uZt=R15?G4T{LF2`%}(PKD3R18M6ZoWyfj)`KV z!l&Y;&>rh+J;&xo@{u|V2Jlo=b{JR7)fLMe1>d~h#O0!oVwv8{Pc+VULyKhO?zIbc zUlb%2Lim`aoiRW$O6`6FK&Lc|thP6T_#Ll^WX9bxQ>m2%{h0;{O)oQ@^ z5}Qm~yS3p-(naih@-|sw43bS<+zBrOO62i?sA4TQg<>t(2{aH7254^H=Ny1$wD`&m z3H}r7Y%bK2@$KT8sj@B&nJg#v8t+Gw*Xd~L+6jA%(|Ue7tfC?10PN6<;uVj zYRFB!9^PJk4}fbMx*diUcE^WZTrmgxVS~^U-&t8BUfM@^KoN zO8ep8-Tq$qUhx+0zw|c1lKe4?>tzoY)`k8?xZIrt?0bg;CCMLN*ysoP0}da=aIhR7+ShC zZw4~Jhevbd?e_5}GVnkjOFH^uMr9hPJJ1(7c{kVG{buE^E%uUYe4ja(wNELKn z!RvwZm8KO-tn@`)4bc}%uX?NaQSv2w@T;brTT$m7TN;n9l3yQFi*#!U?eR##3( zQksb%D`O6WqRoKf^hrk7f7CuNpRJU`Qtj6$BT*!0ON1PAbDy%;VD8c#94hv`<)}!n+QWokE#$QJ5p&o%tOYOGOc5VGQBmvJ1TeBFPz7(SS9^jbgIHH3M;427o(5g!zL+!2^`3 zodLoQXDu>YXfywyr!>7a@jkJnZiMUQc2?P`gOniFv@e7`uUmWN)5W2J9-CTwprvmPWxh68vGAqYOuHi~vebAHna$^r%5N z@?gB3K3^*QT;wa#Q!?oeZ&YyUVnW~H^vNE_hZg_}Bald+^%QQn)g8)hz^?!6l?&m0 z1G;i;Y$y*i8-3yc-Giz9s4DxriVk?~3ze*nN0e{BbOpiSBLqu!tgmnc`>p!`Uh@A( zwd{pz&Y3G?tF+u&KyjPFN&>ktrZslYw~doSyHhJD>Excr%A*v{V$JxZ$+C^%b&(h{ zZ!SWYC=k!Gn}7axCwx4e3mfIPa7Q;UCYUYM+QMs^w ze>2?v=ng;yRy2%f18W>N3*8*5axJ5q8+EZiQkt;90YX`se(V8O6}1WhU>I`HN|W*L z0hBlz1?t?$aA-a6ZN`J!l7-(4(qv!risP;v=`LrK4$>PQd{2NVyOm(~Y`|`YJ7JSJ zRXM=DI%~DckGuJ4U86M>Sro8B5{$_j4ukv1fwEW^t@93n_31Ay3VQ%Y_NcLl1aPGV zzYhclu#&MKe9oz0iof#t5L|=Dc`x;_U8CEmFQ0i-pn}I^a-JNJ23Ksk*$?P@CTfy5 z*Ss%B+1up%HXFLddNsTT@Kh}jpN#uknTfDM-EX0Iw}?4K;JJ|FK%eVa-{_(q%Nf06?1H9|v_7=Oe9;;hk++xJ2Z3+t(ii=o(_#7(XjExfjowID=5-9jj``Nviw)=gMT zQNFQ+oaWA(0(o``5}z~v6D5KVL-(_xP+LOw+6c`yfY}`m2Oy?e5wNL>>e?rddT|B? zw+aAP3EQjOx3SHJ`b6l@5+jX1TdZE>bnfNwQsZh^8q9^+>~xqc0)7E>){D28QF~#R zv0rO#gk54oWuT!_opEed!#W$}`&fmkKeTY^+uPrVi+<4f@1D;K|0o9rgl|AM>x=5yit7RY8M%tK%AIKjn^?llm`y&8ji`Z^u zV)+%B|9$9fSep+m3<64RHU$TFaBswV5N*+DhQ>H!pSh6*w9p^l2h5%{?nVvuBeOQlC&(&!_CGwARnZ9&j_Bjkd zI@7wg4DWQlos^ujmTijj*MU7pl6Uwz?pb5z78=M8)O)W+j6cws7*{ItinI`}jp}EY zK1n*PgL#_MF&V0BK)>RM463J^dzmtrPG?PA;GgfS&)d`b**6+(P(|+utKNV4q;{n6Ts0;1Hxe@)26i z1sKok1V?-{D^;q6-pHTjk-rF|F1TaFw+&#l&f++akU7I5W;v(zmcQR-LEmP#?-s)I zON{ZgU720JYqkoQ>NPktw?K_ZM1_%e^l{NK<1R`|!6e;8iB_bP5~lhHI^ z^KFc_2`0K4_V@lLgt)hmjAhScFqYQJoL)4sj39)V!@`|gd^Pl&Tll=(fhM^h!psm5 zG`^2Lmwq&e{fCtg!%OX748M8dXT$ZzKM#Mr_1}iI&Ijo&qj!4h?OSN(n*^=sgSsD~N3uWVYsPJr z(+1(_v!fzVuPE(cNjrL1G@X9?>LM0}SZ5N`qKb9sD?9t)a~lnO^bz0?g1*04PixB6pB8RileZU_cDtL6^{5^3}PEYr`U|^XO%rf+csWpowpZ&8A zT`2%Q#u&zoUeG_bNHFUgoKpIYTlFx{*0B4$0w)4q|N8Po_|~O~u!W_J$$QsvU;pk> z9>9x0#cYC@$CIX6+z<30c8H$<06+jqL_t)3HpIm^pA|4;N+0D5vM}o$$_Yx0?l1x-X^LRxK{@HwqwiyxjebhY zOW%{r+{XpM0^cK$@KD|f57KSMcE*@DITS6F(JbKV+%z#q*c@5k!s-b3I=xW5&p_;{ zauj!p;qOV-=0gJo(%8MS>!Je5%9tBgz$2=xF}Mb?hIP^IdnmYhK-(@sceT1DSNdlf z&Pbg{b7bVGA|}Jda#PxCLnkNUMw!B z1&OhibfJSa3o*+ii4GQ_(T%&s@ z?}EH;=|$P>K#BpRVS_18|K;t`A(PutQhtyxEUd{^V_JB2iaMk8*+WX;s(G?V;6d06It+~M9jTP zr4|;Kml#Rnq_U}~^<5vD+(xX}0~pI|y@aBfZzFH%+KbzsdD`ba>S1J40aBYFO}CgI z>v#&}>DK`+P(?QEqWc{>Nh`yF4DgON5#zzh!bZ04wj&nV^Bg!c8|E^z;j=TJ2e`Wy z3Qgh#VTHBBCTx>9dS%+W*Sw2W*nRY-ooIp8rQ9x7XcJl&<`=06bWiNP94=UbQ#2Gh z)CCJLikNk|)?BD6LjrC(no_y)t*;DawFvw_jmzVu@)tv8ekyzdz_{DFkA4AY#`txa z8zn%hEb+(aLUzSpnm<0?(M`*Qf zQx=C(FeTLmyYMCr0AxU$zZ0TMEkGmFmcDUjA?zR*M7L#VT!2waHDj>pitC{Pg0a$B znwZh>WG(j~1I9)xXCw1zd)b#K$SY%ls^g@$4va0Etb3Q1LkFv|7GPzo(P0hl5w-$Q z3mu>haJI$9K?C>5454819Hu1LCdeb7>5|8gm;clg4Q{_fzm%E(Rd`L4G4Jubmc{PD zYT@5U{?rPrL_KwhV_$)10rv!%q3wt)BC~fSKckgj7rTpT^&!>JJ&->`} zT81h=SjPr25Q|JPt&Zg7{Ehi0Y;t^NpIY3hKc;OMuhYX1jMiO|q63cdbjsJ8Ynl1+ z8oJN4Jb-y$WbL?wr_CgxWEud%9<-$;zNd`3;gg@m3VLrL)G;?5>UIrXyAg9*I%pra zd8fT8zXoDMFTnkoyvvw)#Y1B(i@6}!CC_x;47iqVDx6l%qkc;*|8pz=MckwR2Jo`S zWajL&Xp+l;By{+ry73Xe^Sl7W{(dcd{Lw1}M?{(b(tF|R>o-GXlEs73mmNo576~F* zKGW|H(!*R=wg5L!4&E+;EOd z7MEv%|4~S=5B_Dt|6KY)_&7{Pl2Qcmbh_ zg*w{^H!F9;d*u(qR$(`6<+nn$elPU55gMSG2kU_VhY#~UyBl!_jCy7&v!Qt9E1~ld zi}3yi(lp`{CoD$k1jd`THEj+eW!NZeg?g^R$s>#5tCPRWPSvbo87)|p zoaHp1Tm?X`u}09-W?1Rh!zjI}Tu7&>8SBM+HE~_WbOBV1xP#!-CVq^YPw&jt!ZZTZXRs#N z#vK1WVyU(0x6{BbOR#5+KlA@Agn$egz5>RmVr{)+4>EA_0KW^lj>y+fFI1AgvIu)+ zz^xoSwM!o~mKQ^r-R&ROB$)Z-)n@n)BiN-VoCq}z(jvRJzx)19xKi(j|Mk_WunEw` z^p5FY-Y7=E>uY!6&+v=U<;U(Omd7W~J{#d~k>&oSeVRjyEy}XXHy+Mxm z`3#GOFl7Ye5Z1LEiWVB20@*{s%^^_s+g19m%00-KY`~<$ z6?363_Jb~hq|~`?lPv(-lT3$#PZ@4>wW0E zgHPN#KuEXA_4nIh`O0Fb;SM~+l|S39gwN-{81gs17}oCP!}2abQ)MY+b~%9)V7Cpx zwMqDweJrH1y5~lKcr9$ujh#H>58alafkBsf#L0~@ecX$rkk@OVnPr>+DU931~vEsRcC&8tVoqI3bTF3Pj*U>$!CUx!VfSgZj z)xseeo7-Dh+;6i1NqBazlfbg|Ysm-zB_cHS48hmmZpzQ`U3)qZ}qV#MM z$d-ThkSC>Ifl0?Nv0{n0L^^iuly`W@g1~;8{_$Xgyx@@fA3~7aOG=1`1;G1yfbvb| zM?w#ln3kI4d-xZtM`GA{a%O7}c?6KPjpf=Nz-JcAsySpn`^)Du5!kghkUHRStTSck zCM$hQ6Iy+#SF8cfPS*3`!d@X%0mNH?W?i1~1ae)~l&s!0@G#4qbegN7 zA2D7fTo5yWo7LKlF#Ldxpil0i0~*&anPBbV2&=hUV6|%w4{ws z+nV(52eLc(V!mnK>BZydA*$|VC3LU>ulXCmEA1`Tk{&y9y1jey%~MG-2{PJQn+bpU zM_&yee)pAd?TwGa;x!qju3IkHEI=$c2DgX3bNAsuTZ>|u6HwSa$)X7kSQxf%P7-jR z*i@q`#)WoN*+;oJO7KO9e43!1T4Gq1X#5_FZU-T1eX2s-9jE)_Cl}x%rm|D(>=3w% zf7qM{GgZJ6@Y25Wlyu_rr##c+p&P`4n®N*G1mvBl({FKKG#S)bj+lLi$*zk=!g$;TeN>y+-zF}_bz)ilBUbNgFuLEv0{$kA zVl|5dm=K|=pFuqL5T?G#X&;^ncY}pqRbxb^V;#Xl3eZ#A|7v*U!_S2d@f-F4$vnZs zRR&Q!RQ?hqe(vK?TI__0Z+{~c-}zEl!i3|eH6!m|2rIK&;cotJ_)hVMVXe4MY_c78 z3ilAUokIdG1Vo+r3<3u+E!Ka4)s1dtNz#RPt-8Qr2(#R;;7;ONraBAo#pyeZ_2}Z5 ztpW706LMS&ETgMWE{#(dWf=(5xl0JCn87P!Cq-G4(AvfAroD~A7oCN^u{cPBu|0r}`dLv}sx&-*nt9TWG4*)L032ClL=E~QnB{u33J|?Gs zL{iKxZPYnNz7blNXQNVh(jzXgEp|ZBCK6DS_F5H#lu8nUTmtA(@Hi=xvFid_4G3kA zmB^|>1@V28t~bB?TtFTPH`pGUq03>FvAnS?p8H4n>V$k1q0hO{N0D*|OUsoPAw8M` z&R0uYRfG<{vO@4;thJ8vS7)Ij*c%)B;nF2sTZjYUGtz8_etByl7v5O!0eqdW+>`Ep z+U+~3od2J_H;b_}OAhnS-Iv^#+N<}T9yW)ZMKdE(q-;@?NJ*3gi=to)ey|J(HVhc{ zgCF#4_}R~TFyIFphF}{IEC{jz$qy1OQJf)%GaAj_J=0rtb#1xc%)I;J?~DKbnK!dC zv$DFYx~qqMs&3x@{{Q*U5+_cah&T~(^84cKTevf_IS%um-~IG|=M1Frn`#KhS{NA2 zR>c64WyX-ke0MZIVxow^oJSOuGq%CZ`VPtm6*+BUS=C|06~H2{pqmtoh516L2R{wc z?P-2mdPO-jS>;Er_%JBDbGL+*NSVHNI0)gLpKV|zLyV#4uR(uTQ@_m#Zn#1fiyH`= zt?0DD`7O`D>_zy4B7(n`Gocs3Qpx+ss|3V{&+08PS1v2{#XAR`U6vaN8A+yJBBuZ# zsmS79aX~-a;igfntEF(W>0_0FZYB*8%6bh}cr0j_P^zzDxzPn4yTrUIQ1=AIiHaUS z(MIa9zqvB0LjS!sP`A&sHn5`052D!YV5K@R#ur0g##*LMtS94md8%3;fmye$A@65v zQgq|+k5fT~*O-mdkwQ7a$~A7)>chG3^V5>|gD+tWBoyI#2)r2Spc`6QaXZ@WxADoM zEczwT0ZTlO)A8t69YvrxqVm9WCm9_@u^*X@L+FyPMl-zPy#{Sc>J=abdNF$qf;- za6LyU1DxiWCrh~Fx_;&JLpML&WQ%pIm+I?jvGTd}T>E*}_dOKOS}9G^4RHoJroPoq z^S5qd9mIY1=qh)>`Ta(E8H=+Q`dEe$w0CPrV18VYC*Vd8C2^~Ydm*M&x>92A##~0} zSAHpVZ@-gz&@HU5 zwQj5;AM|LCmRlz2g{NuierwD&SMbbDQ5Au8W{4;MH^4CD0C{vD>%e*7TB0oj4j~xe zCcA?RWf!_}K)Lg}3gmt$is-92Rfl)$ATxL}A(YPpbPv~zbBg#xA4$JFgus3VS%`jT1R`iUtpiNi zvuR7+{8<54RtSPSN7lBC4D_>soIb)mxHb6zoi>)2*NAZp{n6rE3V9y_zFRm*mzNsM z87ybPIg_JjjpwC2rkyy?WFWr^RAQyEnRaN5FjI*Aacs)Oum zJb9ge@_V3aDDK=8*ruAQEn3UP(N#kLp2 z>5Ap3GQQb`-+4l_x^Td;p?$tF$6X`nWQz&UwwcN@@nAS)agSpV+)?lpA16Wn{?WJ0 zpuszDZtbUKj!c{)s4u-;kw3a9e?TH0OsAq&IZw1$9seN<1&A*hN!@W@cr zyY6HGY2);C0oFS#?$?MF(u6RbOohTi8n6G41eyP<>BhgUq~9^al+lg6;tNPKd?Tgr z*MFLJVJBPFo%C+~9b5p|?OS12v9ggm?5N9JNEY0Z3L6vRRZz;jD^3Qmg|2NH-o=Pu}gGwJJk+|4?#SPN%6(hXz!=R_G`==q$QS` zF$8_2n*+iWsQgf}@29md21fn?Mr%T3_$jaQ(EA_uM!ult@4G*c4O@XAv@kk+6rcu- ztM|`K(uX7;^W9W!(ZNrk(eg7J2vGabcCqqFH9;l83RYa}C&GV=beSqu00oIHwY>a%Aq*?u4tjFS%x{p%OW4=Zn*-DbyY8-W>`Q7#y@*;rJOFUAV?9@p@gaX2w||t=1b4mIarf< z0;bjzW#Fr8iLPPc9tToM1Enhkl<3`Uf^4I>=o6Q-hEikkWlpABs;B1K3JN!@UkFHU z-Wuzm-_c0FN%k=m9eiuqb;!!tksNvXZlmaGZ&43|vGI@QP*PV=sx`3KSyVnr9zP;%zd;LP(UeVO6c>8182%N#m@G7}S+8g6m12L7C&`bnMtwwf zhED)1lz~PkFLMg@Yrp#CG>6iu^_@51+3&zD&WtTQR2?_C)oZWfw)b@uvR`A3o=at1 zpLEF^8Q2TO)fhi8V|WhV!}1*~`4;1_joa~F`#u(QJ1F0JX%B_e7*7CD9Sb}igBc_G zt3_m3J~Ea=EYeC#OR2oWeA^@p4D-4|T)oQtVrpEv!i!oOVYw$>IPO?xmTPlqk(hq- zS}&qy9xyk>xs3mvAPkqy@pC!`m~#6zQwN(oN~@V7wd4d1tXTa&pc zqD=8HhzfniRa4r;Rn)KLRvc~-IDLGK^Is)hvH4NrUCWHQ{4h%7*$=;^b<)5J3h| zvh`KY2OVsT=~HXylPWmImcrZ<1SX(dlrlvr8Mkf7n2C~$sl zJ6$X;T=gMLh0=>G?FD4RLIi=_h!F9S=SOG~9|D!ZhFXC&J^gu3o7K!BWO z7fddOtGFRNkJV4Te>v@kBIN9P&MP()UQb$Ma9)K^_?bA?Vy_R;^sr<>eh;{|F;}gh zGDtEDL*gTR>)Lba&2^cZ@9{3CzRRn)uKjMhw)a8$;@$70S9ab>KW3rUrBN#!7hnb2 zw^4Y#3UmD&aTB~Z3;C_N>*+3*eWU76QyWHaqq0Q|u6f=^9fU=m$26P7?)U2Nr(NPe zjGQy{LzMBO1($J5j(X%97f38HI!X4yt6Hn@ma)5i%--N+V9zxA1_}gu0LA* zcj;RH5@x@(^nC5h>E-&brq=L2$6)I|Dm;qmKUn$u=~oy3)AYUeKTqF2_`lL?&ELjl zV*$pS-8%YOCzeGG7v(NW?Gn3>WlTowBa=Gy5ja}7t1Ny1Q-y*d>%9+oOePtQb5jnm zM`SyxHJCC}Q8Tu-6)C(w4U105(fwQl$wFEB< zEWrhS8{FOzYj7PX50Xye`R8Oc07Ng5@&A~eZ2VG-!*vW4Z!%h!k&z;mPRE7KPH|Eh(1EOR!#o?q?2uM+;xAbq2Q{z|&s-)0WCLU~*#uyXSf$OnLw9S@~9vh@ab~Wr6VW}x6RH)dli-}D7^Xv0@os?x@3dc6t0rI2A^zQd5nMytQ2`ekb(B)Y`JgFg8@ure?x>X1H8&dv zcp;wE8OxzDxM<&yHMK>sv72jaX>nnd$)k-BCo3AT5$I?GR|iL%b6AssA}qgC_f+`H z*xY5qTijuwgi{}P;#(X>q5Jd8g>@8C*U~bI!8Qu--uNyGvAe`5;uOmAmvK=f*#GeD z)WE%PX|jqUj5z~yvWT_X7V)Pz)(E)ZBGttzjd$u=$zb(AfI;cA8B&ENiEj>Xvx)K; zg~8CBW|9_)MKmqqq_MIDKm{d)vr!Ayl2)S#P-Dhf3tJWI9==f}tcW_80&bU84#uhA zrdea-v@+zVD&UwgMp4D#AKMV^uoA57Jn6H9CsPB(r7oib!ix0JE)yvSB^7<3-U@AC z7zHL}+GZZk$O&4iT_pt26^t6>bSv=E#z^X_$Yi~@4ykmTxx9^xC!ZisryHDjV|#Li zm$b(u4r&=|e^my$-k95<_7!MmjSaFAAI)`a5>q>HEmwZ25HrnMeZ`IGAG@P+3oycy zFeXS3FZZCY-00Kqc{Y_-aRli;*GxNm#9qZsw*<|M)q?s$M;frq>Zc}3WDHp!g-`}3 zTg+Vs$^M1aTD$`u0MN`D4-A&~2ewscm2nmoo?>~S?<#EHSw;bkb;l43IX7L(9%oKA zaoKXR!wFaS4LgBeAgzlaAh1a z$)sgdJwsm2BfoRRlTbMc4-=R>?cU+7Q!lJgiR=O?%DY(B!_r(vue0bjn8=yJ>+sQI zT{z!9%}LxK(iDVSZZ%7GX%E7+1@T#e(#lt!Oa^VNLFmd5#5_=|BfE(!V7u`OjLGxq za{C{pEv&8v04WEP-v?Mry~S?ZCElO4@^XRy-lePQ53l}!-Gw(HyxE}o*Y{2iu}UrP z&suKr^TH`RSxiz&$=0Q-85Um|Cw;Ouv(1v-^Q*DPD&o2@IV=GvvF6k7uKy3|OY^^z z-syga-FRGTOPAA^7yhHvB*w#^S^d5A!i`@;`LUiE22i|Z3{hSW4Xn?6a1om7uCvNn zuqwnD(4zB0C0~nI{gaQ-$&y?qJ>|{s^ZW0dH_k*G!RctuPZ#T%i4s+wRo#i|C2h|k ztj&5h`~KhFNa+hOApbM^$9Gt(+{NBuwc6mLQ1`4PX=f9r;}>_kd}k1p5it!>1ot@A zQWuP3FmM+H%17v2xRe6GEf~*l;G+KR)doxk0ElCs z25aw%RhB0hYRRNPw1?763yOVQJQc*e7j4dJqRG(y=_E8Lww5xD`8n48Oqgo( z-WFD52AZyDA%Y;1>CojRk8=u*4bWbcV>;sOUTAN-&8xg`ysAky*%T+^PWv#nP36m zCtqM0xP`7DR8HBcqT&+&UyyriNj>c(vl z{-$yjMbnM-G@M&TDbhe$!yyqwG%)^;XfGeo8)U9U3&e5tbkL&83EeSx-bb<5#u{_T z=IU7C-bAMyvpFo%R9Gv>`?~*Tw~6&5F|@+E9ih5}oC4yq0ft*+bDl9Ko-8R8 zxy+BfaMx4iCcaV*^_F#+0(TnBsjv>3DzC=~+dYE+dun27w3x0he-YQoB@VwJ{%iL( z_pz)S!gS!qyfjCQs>`_bRyp}GCyi)f8(7~JSjSX))KL(yw;mR!d+i5lcd(DEUk{}r z&%s6e78Z-RB1arP)Rx++n3GU z>?*r^y!4ofK^~}=Y$}8dP_MQA7$pYAUfCMYbmdmi1bcT@$`m&!cTn6_fR$qxu5-~Z z+mW}$QXVG8;?EIJ`fx3I>+_Oq@1h&bjDelj(oM^?9(1;cPowemn&3>F7AQiUNV?xD z7wKMUbDXbv(k(QzR0Vfci1EJb=K%UPVvW+h&oR|vHDnx=p&eb3(a5#Hhdtth4w&b@ zn=832+x|(vJ$+WiU^e18T=LqFzSXa`(Mfd}1EGeFpE`-Gx61XJ18YrF&esM)>;(a@l*r zE4w@UeRi1_d6JD!wt`$F|7UrgB?y2AF8L=@3GIBnM(iXI6`VRu62t`r=W19WDA;Ci zuW%~P0Y3Z(xJ?b2Xr^2Varp8X7f!@QjsSMC$$fgy$olYY+vagY_%Y3Rp*@`Mquim- zzgfSN{z>P>^ne|+50fHLuQT&goE+)h!crU~o)O5bGg@T54}rag_5hF3W7uTADunQ* zt@qM<%h%Wu!DKsf=%VBTwv&Fc+(^GjF!oB{V4&O#B9)b`@BeL>&pmvN8c5SM1urG<*N4CE8P9VJ{e`Ly8V#)mwWV#_bAieR#qAFzx8V>D;Z}PtW(RgU+~O z5X0*in}0d|()?eI;*3S&KCZiVWo*2!^`3)?kJTl6&UFa^!hoI)+#1*KEVBs*a~CMb zJY}I8u{e2B)k#>%q-5skk@?K#`{qLP>nvD5gURBw$it-Qhcb;L&ll={mJ`ojqc1Q| zhtE&Ha4|AQiAhRg`r4gVx`Hx2+Q`^vr#13GuIQWseI!1q_nz&a_x=5O6kcD3x&A7l zbN(7GjX#0UeUndE6PoE6NH#;ut_1|!jFrtAW&20$NN>OVLR$Or+o{%q*3EYEDYV^r zxCuS~K32Pbcxf)ZGv_huEb=gJPpWGBt9$v&JDebf+lu9$HWc3PAcbi?oF}iu+kf?9 zEq(XuDE*bU`swn4^Wnp%z{JxU@ZpR6Q~PEJR`gzlg(q&|Yj-FNKh9>rQvo1t(oQFo z72vbVxmK$M4dbL~;>dMT4loTv9WuZwNyLm(p2&On&w0Rn$k8Q&k>@{Xw9kUl+JD!K z>9g|$Df?>h`=G}NYpTdh+#*3^5nA@Ol4_w~U~}W%ZY$m2AXZ8fs}bTg6sx=GmDd+I z?JX0I^L11=8|_&pZc?&Yd2{VbTAV`(06#+X*F?;4;%fC!n$T|M*pRUrVpUb+I+R*8 zfNn(0t%}M56ejZ?EQ64>kH*6Og)mGWEs|`t?}bo>P^1FMp!LT8(G{|o~#NicDZdrZEmkKkQL;c^-G&m~R(?y`XtwZwX9Edo?47#0=!7F2_8+iANT zJNtBiYv@1)i};%j0(c-|WP+!+giEZcUi+SH<{y`&_~XS>PSg`pUsjFnupcTMVZBgh zfIILF_S$wihC?LQ`&d^jqs&=AST$y^)+t)T8KY{{LRs|o`}FNn8ZE5EcwW77HO=G6 zktq>?LHo^=-o77}&Vd_YzEbjq(h|GF(7`wKkXfJb%FH?j2lm zbqUn6Pl3MyK3u|rYXO(VMp%n+A6LWSFxwdG5)UgZDOohcEqMnI#i*l{UBp$<{)SRO z9*5`KBkDjI)umrctBYwJC0&V=CP(00a}8^E`{e?}km@ctM4{GU1I_>!agTSkOi_?m zc_PlMP*QQ!1~=MFoGmPzcHq_K3RrRBuDn!w38nLDx?Fu7Yb9nK%B~)Py^C0ZmFB=B z;S0aoXT38_13Tzx^q|Lv7M5=FePT54qzBCFR^e{itM77X4bN={mSoPUP{LEL{9qYn zZp81Ryo0^W6GxiUM}eS%ex%a3yiTm7>uIlZ7aE7v88cyyxO9D#Y{uDYl$LRuMkz-u zzZT;#1SjMzPMdP#m2+Puw)C;U*dh_&s!BP`i#qWloM#Q`20Y9Jg^wpnI!_GX9v6Xw z&?0`Tm=a&IdG2#6Ch4A9g$^&Wc+3O$0T#E0;?QzvALEOT`0d={+j5i{{bY(RZ)fGG z@Xgut{GKuA(Y(Mqe{H2H`-tI`Sjlv4$)V zC%NRDF<&UULlA29$ut+0%ulYPpE%8g7<>JKd&Q{{j02rFdy`cEv|=(j0H7zAg7Oj`@;5oy7taxVt7@E zpHWL+_`}y==CVbg$b#H@;d0tq+)fPyyBfRqN3F(;9Hy_RWO^cLt)*xfc6;>O0IK!+ zt1x^VzkR6iVu35K!U%skEzJFDT55bfjbK)5l|SUYJ2UkhFWiMh2GZCwTYD(aXyYXW zvR63a$ROs5Au6z5-#SP?ULn{s%yXV>`^nb&Tg>p;QQ!D6{Vg8-t^1i9qxrtGn8ox0 zbk9suoqH8Jy3Ee8?aG@5InJyxe&NM*<-1o>W&a(X+l!M>crsnl+Uc8X^XdP-wwUfB z;J8ETE@p5VAF&GQJ3}4GxLm)u)k}Zw&Ow@kVa)q`vI?tSgyHoh#ykn1y(%u&#DsVg zrh2_SNx$(fj5osWQLa4q1;2Um=|J>?_5U1Y%Na+zq%}~)e1aDU2J%A>(31gBhzTsl zIm4?mE8=K*kAGgvY&!$9e(;8iT#!?g8q5ze#dlo$+Xenu;vEDK|A~+xOq|SOexA*y z5H`4BAYK&{hj6qCl-^CORm$*JDv!DyEIs=?5Pga8Ab$poW}M~n6Vn>ulh zDkvL_9b}sgnym{>1c9fmTE(bH+}_$oux=wHqpagV8QmHU{$Hk!GHz@AP-wCKP*0}v zK7xkARqzWZV04t@^s5o=E8`})2G2G}yt4?r0UUy_j90QT3E-1ATP69$IEtSp6*vmL zxyw)7b)Fb{WF8|O>_npEcir;mF)*|>)z;bW^uTWadEQg#D9`HbE@OoGdP1wAw z;?PylIx@p`%T_tmM`^lE{i}QoNy3O@4YtL`#oL^;i*+|5aVZ#L-lP3q%JJ+^+7W}U zuuG4qumBGA+bv?%VZ}t-B4G7cUVzA5@PLAOa)9fj){I=oe4{L}+Cv@MC|U{#|@vado6oKl#P-JVMd)uSl+P+wh1kNvy_X*>{iQt{a z7Mqmo%t?MzR>9bJSfY7IQMl{TK4IWkDK81P0}h2iMURCl@dt5HRmoMsYe%adT^TFH z1e=W67#_Pp39rYz5u7F#-FBMO{s^Ilv1?C^fxS$=#q`qkAZ; zv^1p+&jGv3jD@(>MG3Qp72Srh+;}GriLlXrW>W43Eu9nY&NsahC=R8G8<(`5&0FP7 zmCPedjCN7}c_>j2nn4uAC@r6}xw6aOrcBv^vIktYOzD=-d}?`KAJSG|m?rP#d1s&J z<>vXM=iyaKfpvK21%|r9Jd!W5K5K${q~SaYLipF>9(0fXlu-KkENi7X0YBpu-dEH2 zeCXVbm~hMim0zWhH3Z}IO(u;0v=}xliykQ3xDNja+};3R4|EHqyb2bC`fp3ePAYCf zQDi-CmS|?nwJC#7Tt+*!j5TaZk?~$2&k7-wUIBL&py6Y!1!-GdMK}Z`PF!TpE#ii| zx4V}%adX|o>qMoTha=dpCMKI7VCB1jCFnJPxQu^$k9G&}vTA3>hPq?%%-R<13mAx_ z0t%YVN2S@#r2dv$g$<{-XWf%TX~EzNYfFG?xwPTMl}Qb0gKfKNX5(SHo2obn;@0==x=iD@FoH(@f#T5 z91qN=<;V}O{4&A1*?~f-buyXv0~b@?%N1Vcxw?NB_u39-wwYP_$@iZ0@BZ?u9PjrE z#pZ~1qN5+7LKR%w%>#g)%arNkAVB?-Q~1;k6@?1Li1F7g$0A7L;%&n z^)IK^DuU2=-^L-;r7%loSJ@Ruw;%Q^d$v&g8o2%d=JUI7X7RS3R zq6Fe6zRMM8!vZ_0_t}|M2z6)6*jtv46?t^mD*QbUqjVKgpjT(Ckv^CvpU?5fnR^Q! z;9YjVwC-{wpOxfTT;=HY`Yg?ZCN{TriHi$8U0z80*Dj}JcHT<}tWTry!lMI6{q2=T z+9GC`$}egDlSmeGW51tn?hVuPFn}+%`soGYs?4ztI6h}31>qY9gY@P5owQk>PhIfF zHrdwr=K6hvvJM;w@x2dm;;iz{y6^j1kv&T|aaLPDE%$>@V0^Ul(yitMZ~64P!62SB zlHLy{mUiVd7Fi?^O1;0qs352sqiYGj-a>sJx`%+o ze44=YJ2$lWMZdt7kQNZcOQEze%CG}IW!6+<#DtPZjkfO~$W*l4fR?(kH^O~-LVFD2 zk7)#J2XR=W341ME&1$J3pNFzxgjo524Ki4#@8JwNC*0M%-ygGGr#yZGRSYI&Z# zxI$^&WhX-{6$~>e_Aj~T*r<#_wz^Q#;@cDB8q~MMdh`Jbi2=V6&m2Df`^S~Q&rziT zYTA+uJY)J^L7>c(NttrR29PCiL2OW8$B@NX#Q0IJf;&Jx9WN~4QHg@`R?Q0CXT25z z>dZs?VeQcZ`j3qa0yByz#<0QZgQ3Wy6J4-km*)e<-hMh73f{=dSOT&k1#a{>A#iJm zVio0Ai7^~=*uj{^wTH576;3eCt)UoWCrP2&ny@Y!JBhFu+GWi$+(n%-%P7u<8@Ptf zc|ZgW1ZGYrc?c@yoA)W>fYS**Ji;+h;e{B$8e~WnW>d^J@{64D9{mK@J@qrJTzHgQ z*p0C$<8|T(t)&%$*NbNZHtZ%St_+ah+1aC9EMf+&m?suDYi;fZr2lQ1vFKzdc6leP zk?4EG3`SXYK>fE-bj%^-FW|i}q2E0$lC#VHVzP< zfSlLl;a`n8exvyuu54K9Kqtz?hnpY11JAOBW!F6QV|B+kjd5S!XPmX%!!|7xHsNwd z>^LV(rp$rP(VyZ1?uYw$CU|Nl(6Ak4U?^QG7;jd3=7s}+g9MSsOMW|O=ca({u^icJ zPyjAelpHC+k7Yajah9^f48dbkMH6%^(`KQi!Jw%Oz$fr=eNO*fB#vRBN@yU^rpl&; z7NMp*_FqPi#OW-}u1)dGu?spRE@mGsvO<>=byH!&%sHeOhuY{VaWoA)vDB zM)^Vddi@9No)f#Jz-c2hNyS7BL~8I61x`g$9>`j#nSX9`FZFggo)}{3?-7h2Us)nw zg@vcW#L|RSK?BqZlSt|8u!kuq@dG4&G3jU8aqLn;_!5&^DL60tScc<2oY;?>Y*!No z?o|TU&oNOua>S3BN-#lMU7kL59<#7dFnh(tFqWL-jey2R@cNe_ij|<8N6&>{4QBhL z?OW;n<*R8QGuGL`DuEvlnpk-88M%YV?^#F*vp)FYDULpocA1RRF}oX;-F=#WbL7dt zpKM(+L;rs6N9iVRjW6|X&VuNX6$M_;z&%rTO+OZ<6kf{}D=tchcJ8-z3=SI(L*jvPGkWvawuRV_v&pGxKNKz>Ra0 zJ6F$juBR`xegOud&f--|?-u_lO!Hf5dG2>96J~DocKY+}ui);`PVZFTPdn_|zerq% z132L=x^a=}?3wrpDT%a1Q!ha>ZnH8Uu^Hz!^{GD;qpA?c%LoPrdOb`$sB%DvK~GI1 zcL-U%?aI@@LKR%M*`&CSYq~YgVv6;D*d*)?=K7!fWH)_rlZ^w{0}2ZU_X33)`{8fA z-AcPyiu}QR8 zM@-FWwYhd7{N%_wlf_#RH;S1Y)-Lc@@;XI$j3RZZun-sRix;pUkmwetY%ioWf{NFO zVcEe7ez@@#yv40FK$zMcbMOgg^6_fWV#`D1#YMiKx+s~Ji08A=zygK5S{syDQ?+Pm zbq^5!QGvk4*O_C|%N7Fm5Wz)qU%`@6O9n(0G~&=BPTnlw+JAWmMH&`d?eacWDXS>U zS2*o-8|xLUx}fI^^%d&oAb=j$T?VYTkIc6Y0eY%6c*B^*T1Gt#AU7@`^=zCX&;e^> zGt#X1%z*Ks%SFr#_7R3E2+=nY#(oIFAHjSn#B0?+^#T<*pI%z-9%b<6a=6=(9Rv}U z%03E2%6NGiq1<@VDHpQDyZrjBI-+(OWN%Df*}MTtm=e}V1RCKRizND>B~;7}l{mz2 z+AHm*jpiOMN+`RW1k8yp<7_;l!u_{Wbq#BwOKg#qE^%@&fZJ&$>Z5MWS1GP;$39MOTZe0yqZi7~_p78gUX;U}ZBjZ#lv?xMmRWU?-+M z%5qV$NO{=H>@wyuml6Mn>TQBqus6xBRj@E<@v5>0gU=J((D*~T2rF->1`FsTuUM!D z0nkaV1_`WB`=vcj8YZ|r^3oW0%`qD0F8RmA3{#;jzWWbtFrQ4(G6Eh;MZ6ZO&vC*$ z_powlOgK~wTA|HhiNOB*r5$21p^&2wb*$!lSaWUeb%1{se@{!gNqHVUbvNy>?$wId zXxA^Jd!&pSChS;pcA*EuK_l%DRK16?Z!8hfgo<%OszEu@U``*vOYdL}w8uT>JW4Os zpQtIB?cfR4qhqc3=>8f(?XT=!A$}&o*s&6GF6=?8JQ*~NuoT0UwaBLRDwb(|lxba_ z3v&;&(lvOGa#W}~4?QVzJVK$$2I)F(p^M#0+T!%gHnmzawuM;F(G6%rANn-}rp6sq zX(Zn0ZXWcT@=s3^Xb{3lJ53XCU`U&EX|xP269Gp4`0(2~7vtvGohaYjZt6=uv{YL` zS*iOb)wq z9S@)xt5^}^ln+d>ybKzGJdMkv`hoM3ib>C9ceQfhpXCA8>o-lZbSqd_Vna z^#|-8aA0nADQHFBaG#5GtN7kmK=|qMpFckJy9OiB;1k5294EmLAObjkYc(c1o5>)M z*DGt7A>&e$ZZpN4T&EzOuc%JerBTJ#1yWa^_~SI-0&kh6kUob6&H@v4V8w^f+R z^iNZ#;!eSb8qB$wj#eA46_0|gpKm&v8cG2mYaVQL1~5Dh4h7k>*;T!2fb6dm;T@Rf0Ayr{~-Or zQZLQ1LdI*Y z%PCU0lIY5LtVt~9(qCabM)*EWcz#l?6c#%KsqeZ0VU@=k^3iIcIB4-3CXl#5V;vKYTVs7}UDRcv>dimu!#scXq&kf_du>?ncG(@Ew?acqCk^dZVjY zg49!Y)MDeDc&G@8x})?65od26^J5qveTcTGHfWFOpMXA!C5gkJopH^nrzs)KOQidOE0Hx?FIPEYFg*{3Vj=L`b!(j&z@P)U~2<*9t zFvg`nWK#yEmR7(na9S5nLLRfFtS5~1>RxuDBgmfKLpgVVd+r1|V9Ypcg|!G>P9+q| zS_q$$oDV7q2W+zJ<95G^b#b3qaup7q;N*$$S@G1z3iUgCXdd-I07ygd$iq@x$B#|m zbLzXuBj>|sX$pa$x2zFGm;s>t%)_OMBIlPXSXQtr&<43{AabUPvh^S^6L$?|*BUzv zWtp8o=|^;nu4gDG3?2^(S~2|U+;63CjsCy%Zto``tF{2lpxMOgJ6QtAD=5{(zDIyr z8nm%GNlexg~*JPg_`` z$fzDqcCW*q_t+9&h0--7I5Szqml_1J!AEU=evEl!feLwky!;=h zrTJfH$MZIe@ik((WUis9Kb=Ti?Ht{{H6QW7qGO(re(@*Sjqg^Y_6Cg)i_6 z=IzSxdz6KX1xc~zd(BDu7i)#|H#n*B)y@(9(Cg$`OU?^bbyHp`YaVp4wuvb?mXDov zPPgL3yUB@NyC>a`L+0`c7HpY&Fg-tBtECtAabr9uX4hHON^=e9KhN59G?E!g^e{!{qbtzEirfob1`?DJI+5Kl00=Y$u$|A%H_y*`4 z!jb>ZOGR-CfkbyLdACTsqCg^JK0tBeru;Fyb1mXy3IzpuWwPV-=^ACNGT$l+^0r9> zZJYm9xL8Ln$V6fMG5%M?Z6R;2l!?ILSm^8#pt{K^S7m|)uVbN8Eo~u8_ENRKoQhjW ztmAD2Pu5vzhwUokX4YatFSa^y8ePA)7D~3=w4L_axLIPh)4)PtWo0Gk zxXw`>6m-(}bH`;0TZ5phFe?L-FtRy+C|M5}N34n(@F`1#Unt_<)h775>q@+8z0;2{ z$}An|<1`s}D8>*N8AHl;ew-;z=6ymmKKiE$MM2QHV81~~zvbsTgihGNVrc~>LlbK- zd45-U6*9W=)(CiA$MsJBS}Tb@xFWtdNhaXS{@yZfKEX48X?0JmEBm@djq}s<=HbbkiP| z^&Q^9!ve;&f(hOats7R3?I2T#yg>@vxP%k3$JND)=`P^~w-f78^mtRoX#aRQP z1QeHTr}47-(o*o;xTE5z2XvG%|BA!Qz#|EfWm|9FR_CRwhrh*D*RE(Y`D{-f)5{Hn z388=DFby^*vjaag3ZZfvK;bz)o!0jJo;WBhviU4E0H>XE)TQFcjY&6@caiCK!Ql!D zI_>_ov@;JA*@Z9K7G=tZ({7t#ImQk2kd)C5R;IYEPFO=!c#g3AWw8Z*!*sw#{2ZlL zSihtxCzEnOnH^wVnmY@NsmlQsTfoDU!@&StrB$3UXQ)%_x*9a!jZC2sV+uTS8WYNC z`b^8CjVJ4R+~1!myiP!D!YGKSfrBTWJxf1TB`p-%xG}+mFv%Jq@+E7fmf~fANzr6T&)4$sN$LX!X52*Kq(#!ernDM(LQW?fSUi*Em zhE{rk9nOsHlMs6MVhK76)GnU?Vx@k3xnz&#Fnzu`c^@*v&crD=Ag=rrY4Z|9@Ff;! zA(SWc8?;_G#FHtkTRK~q$L=0qm*NYZDu^twU@;XRS})Be*~2X|@@(m!4M z=jluPUr1lv{{?iq%fLMQ_t^bo&d#-9j<8ZV_@%#2z@zV{(&mp@kUFW<{1S@~N)%WO zOE*1?rL%X;g;{o)(Vz`Yd7v zRw34yzr$Rc`OS7F@41K`U;M09Gw4ycS^oz3g#wGfww8a0E8tCM@*}L1{&&{lAHYB@ za?0Qer!ltDx0VL!N}s^8FjqTN@g7R=EC4E23YhmF$JV31d|X%dc7=u8Qz`9F4sqvA znccyXR<$s82Uph8fPm3e+#8-|I)v#HuH+A}%=*R6Zu5-gQ+VqJT@rkq#$XU{$g1Wt~_(?Z0VHd=)^ zpr?<5z5$$N+Q!fmC!>m{4sn9qyi`_Em^e!inkn|7OBrs{te_)P{-`uk=|VC{@`yM} z8ylPH2DJROS6@wEVTZd}y3RAgN1)K(+c0qTQUtcQ9)+5>+V8VVZwV;15W=+Bn4sK4 z`a(EGVFkZ8K#?{r6SR+1)Rwh~(s7_SyDskFY54R-r$83@k zgOJ!R>sMb&3$4ZUlf56OyWLxuDWH5Jc2J$zEdfv3)GM67qi<_XHk;)uzT?Jef5e&S zU-nQd12;$&s2T{;r7mtYxcLlF?&R5o`=Nly&HTP4gjE%=bHhYlZ}wSM!>8kKK9%zC z)j=E1$1S(h9U$t zX`A+8WvY^`Pn%W9lvIAR#!pasx!EB6Ra%O&D)Utkh0@DvX-~e9kSiFaiN;jR=0Bj| z#Tv>el~leP_bIbp$G3sHGCat}UY^wFrUDym^1X}5`15=;XmC>_Pjpq#Fiuzv>mmpM z3uq9?NziS|sc0%U^qT?m(%ZLKAQM*N#rcTO9W*$I*NKC4P1z072L0hcoa^Y;T7 zv-xM?aQxlbC!f}9`@cTfVre6QHly$`wq%&0I_oDabnC2m20F}>@9fp)<0C;ldXnYX z=M=<-?iJ3LGm=5;Gt!l1xO`yHdX9)LyIb}1m9+Hgu;cXyOMjHUKlhXL^}R2qFSI|O z7KWJcl4Pnbnp8k7ZY`ug_s_nV>K)u+7JmaBR|jT`cpkV7d*rnX!{a2DEM7T#uneSs zm>BF#p>=qNMX8u}))3N~{nW(D>nJ^{9hM7oO_>3K6NpqOd=9sy{{{x`ZJ3Zt>F;f? zroYp|v>#@r!C@SQVk_;@-9L*p;iaATi5anzeo*@{2L|lGRH?{y*@hKoet+CbFs+w# z*RxBe?eEXn991z)}izkK8A1&9@dj#k; z<_|pZVY2L>t(4PUVgoI5r1@Fk^2qFOU#+HZT}Nrg5-^LVhlTwqy^u)^VGv}S$j}3a zpwu(K%s|dx{%69$%)<-9Ems!B=w}6HZ-RGW-NLsi{_;F_y`7ICBuT0i*k)hMUiYu< zSAlL!LY3;a^CXrUs#%dmEho#(Oo~Ky(9Y;u1Mx^7K&R z4a|ceGoFT?PyrSv;aL-tifIO8Z%-8-hp7$WZIicx@LhJERolQEA(sO#TE%vHzjrHr zzWKTIg(Y39h}VME*Z$5XCw1+n#sZ2G+zZRN5s%!pw|kFBgn}Y)Hz$7Ku3TtC@gwMx z?IAJFW2Ip>*%Y1#Xam2E`wc>W84FS^PzD?$M>HYmJ{BmBrvnoIr)7jNBz@&W+%Lyc z!KqF^%kUfGBh{Rr$P-z_gY$Dfxu?|WScy9!l#4%yrPCVL*LkdIFmUFSK$KL#KvyDh zJZmL+SoZ-@Y9TP+C(hF{g5(@gsf^!MrvLLCoG?P6vWGNpB(Ns5({)srQPig35aQ}Q zb8Q=(Y!mLl{u57%iJGcd3XK_yJ?7v5D|{u~GBLlD62sLNFNp{SPu^KNe%J+Z{=|b^ znU&|UAI@Rte5ROS%-JwgK^1KgB1b@>J;G?_2A2i{pG}sN(Lhn<#K4;$IA{1npMCXk z>=DFqtlGe`as$i8_q+E{^0d;mCe~QMaS=Mhgk}J?#R9Z$mj8%prs_4;?ECS_0I4vVdA7QTf_SBn z_EDZSDJWwt{F=|afSn{Ac~im%i?C$u~ezVI^Y7~8?na8G(tDx zl*Hhpc^?|J$8N|0W7DO4Pzjz31+TP%fA>*v>Q;ZFO-xGGrh>R-9-eo=g%WsKW0U(< zt&;ArVbLQtmzIMzbsRVjtQ=k%?TpAcc>9sVnD(<~WE<$_ASt(y<-C-y#rd~+yNq** z&C~^^gfi^3wDXGjQ}&F3bFMw>$;qdtbs%DZDgOSXo6Y$h>4U8;7P)1N8+_4K>T z|0Pxp_4M!C|C*&+D=VoHEo}m18+?IewmyM=a%St{*Vw_R{0<8v)@9?Lk{bqXrf`iY zUqvupVv(M(BSH&+)5DP3X9ol7xKPQ-LF_Q82O7bY8Njn^3GRh@!E)gi)TR z%Uk~Q@Y@A)7vJn5F;dD1{o&%~YJ4JX5YNEaLJqmfh4+)k)M(06`QuD1!-VW{Si-+t z{yR_Hdy&QQ%WZ1l;bxye(m^a?xa4x(Ht1AYlC#u^>(+QE3n6?7gL8dJ)UNdbsSK%ms4< z{rDNNy4s&{D-QujhF=AcIAE-}VtV)0JXHMj2JbVD@eko6 z1PUkT8A*my_q!o+sGJ<3Tsn`bB9Cp5CT0h5JY4XhAZxvJK?QdKYnGZq1A@OYJ*!LxBl0iZv5ArXPb+n`~!%ye3aJT{GI63*&-n7B;q1_d>r^=G+{yzZ7E; zWay*rHd>HK(@M<6a=k_@u1Z>4UrY0I3%p0%AgsXVc+d8GO5EnwHi|T?!DzDq=vAf_ zutcePnk$=5W9HvZe=BWn+~H)hmpGy8IzrCpP{a{e3?c0Ryq^g?%K zmD8##_&Q7qrIQItuIeSmi`X#4Ei)d`2<29l`9DT5QjxHR<;P83wNSXog9sWw;&PdI zFao+qKYAz%b=h-7JTkx!_9S9Q9@wC%?(nm2UOziuF;GGSQI4q!Wxhf9Pw%yV-_%q@b(!K+%=31QO-Ji5l_dts= z6fjtAb+J569(?3vm=cb8G*VhGxdv2?kA~$G2S7&~b1N#eHe?XjY+IH_Sho7p&5vxJ ztCR^nUMQN}NOOcl9Zz9YY2)UlEKbji)8vN8L&;&`pS)$@0u^~JP8b7DMXfL_c!CGX zxl`125G7Ix3%CI=*+=Pb+_R6I#0LtoC>4wLRV9?lL)#NPJMbUyTMJRG2+?U}_;2&f z@B6$Wy>?^8(-a5r3c8t_%7+$LgqDC8*r%@}H`o{lamL~DuoxBIN{}V!QWqLN))kWZ z5C^>g13eyW6GcRWr(e{{6=m>$CQT85@}{FS^X&A?n3K~-;>#TTfO^k5jXYpXFS>@E zUi#1BU6~Xy!NCV!&unrz-x<%RH;2#R8{Q#CgKW_!-(c8}^j`X2`h4Nn*?qc_{?_7u zk*<|qNZ;=Kt8~9dpw=6h#lOVP6%+picApAsdo*B|qRqCKJ0=CR(MR2cK`yScGw@qX z^gm&T_g%`&gx$O_PZ#;kdN2Lj+*i|j<#WOK&Q7WvVs8!h(-sSR3nKr`4z6kKTTEC9 zZ(h=;?HjXS*p}xfAMg#l&uPA0ba&t$6#p74B;Y8 zh~W{y)c9{fS-05jSkkwi7wf|gjICpIUW=lJPxYT%aNEQh=|N>HeS6`1;o|rU2d|_r zM%|7p7Y9K)=%AqOSYd3GlD^kU)epGj)24zy)F&clb_eR zFQ?zw`|}8C#3m`V(*`Hj{;>W=`o{dXI304A-RgZ5WBUj~F!;Rd(yk?*5H#k+^Ni~0VY zIW}g_Xq&rk?=RpaQ64}WPH(w&`u-8!KRw^adDk{gS%r_Y+K1{2g_i+X0FEar#|}7F zL77EX#>e=QmQKd=aXr&Y!*7|x?D%gOQ+I#)o!w`afJj4_a_=iNDp;5|;zyy(vW$`p zG0(lH996Hgp~OAY0DrOA1<1dIcIO~1*H_c(>SDUQ+(=hZoLw1TPM15)bdS@ScRTy(-sn!+ ztlvw+c>?H{*f>*=x5z2+zDBJ!jKq@)|cQR!QcxHc99-LA{;5GBl7N`hjFtYxf$ZH=bSilrGXKgh&Cy~uK+8#<}lpB5MTny{9WwzPAN4Ld` zDN;xW0#>wj7Ey9qGkZ@Q$-wq7@bV|q>dyVLN@0@?*~4W`oAgi_!Y>p!Q9y>fc_!zM zZ4Nrbcu4{^7KnS~*KJV@2cXc@BQERmXg*!WjZ>aNrPmOpSfRYZNq~FAc;Zw*V7WDX z09}A45@#$=BNRhp5L|cEA0bYK`hULm4>?)ukAn$}+4G3jIs}}~n_BZMDwuD} z0_0PP8OP*Xhp=HCQu+(*NPLS$=qKbjT2N%UpM%gW)D|F^bBBcLWVJWhNmT3X-22ZL zUQG*|<@A4!e$1keDd#y8@?<&B%5RpEbbYdsE@7;#LGbxxM?oQ_TWGpoWS6yuQt_fB zx3~VwEM8ws)s26j8V~*$g`{ylj^5VQYF}o9Z+E8F+Z*Zf!Gp9h&(YZ2Kg#*6K<>%5 zV^*=URo=$k@sGpZ(HLJ}Joo~NKowsy457|>vXwuL(wLA(^Y3X?aIrFFY)v`bj3Ky8 z+=0wuWN{Z)$sH^|2kfNF&Xw6w z_21(rl^&<1*lmTOG07Zfh<--q(+arYJzdu~F&%%v$HO5$YLXe%Lf;KenVm(H8Gq-^ zR{Gqw2UpC}-bdFYO}URN)V^+AHtiI|vHkbwIL#Fk`jze}8S-j)|D30Ju8-wg7=7tJ zC!gxxNu&Pd$5QL*mB`oywHCg7s#yHgvHS_wBQQI7b{O#?bXnz;l)e;7^vn&;e8xjj z5fYYC^hf1Y0fBmgpqvk!0={urM*1Q!PeipECoZYra!1>DD~xlQ9e9%rf4Ad_PztjO z85SCk9{Q6D77nnCw}NXPtootMGdH9}S}iGfTC`SZ;R-R6Clqi9@5V7vSrQG;pEG#& zCe|kE45nY3G}N=i~+&aRyaLYoT7g4VWx#P>|X zslW+e<1(kt;R@F4Zv<|Zu=E&fi9{ESJGRcrw-c1$W$-E#hmK!XguS<%&nr4oijE`e~P7*&AsE=BDPG%lub7`hUjyL0+3tF>p2M%dnLleSH zhu6fN!wyBQ8I3HU@G9X}hv?0R0Y8LXmR(M+3JxfQ9uU*=0cMRiS!e1XhrEW){l+S? zYy)KX31HtN9+YF}ye(omy|(-+0jayGyLTtuz4tDb9|s`}4q4lUf1TJnldu-Wng)ef z9fj8%2k2~8+Td|kPu_D14MF-e;nQd5rIYVOF#7X{>D1yvp#wZ-)5&oN`V%B5$Hb%D z=FOCZIIR%n^8hQA-efOz2&J&!*-j4%^PGgel&(xJrDlbgoXk&Mjx!x5TsAnS z1*OVHuNmwNQ!{iutWZAKXXhnh6Zy`|birrR*O)`+S9z>7Vd5v+!S`%?^Jnn_?X=ju z==kQIO2^_%{5|uQwDbYCp>Sf}j-e0D4hr|eGIB~GJ)E=)U&=74FuQ1?38DP=%~13L_d1`exXoUI9&pAYaD$f69yd^=8!Gi& z7ioGtQ{fh_!`u)g9on=-+xDS{T8yD97l@(sqUcCjzbTgA!sQ49vgiI4Gn@xSt@0wz z(X-jBe65|mmtW^F$$!s&=dho>m&4Bb{K%hozzeZq6-D)D3a?pcO|M4;5-p)sjyR~N zJb#vM4H4A7*MB7~)Z1yDBV=9jGBW$gv=VYgz`Tw02kHO72mVr8o7{*AT}S=3zQS(Z zb4*Q`JQ{iN4zwdyggg%?0e7ZH7q*;O}B}KalSia-hTV1 z$>)7`!PZ&yweq@9nle(ug%{Fj_a_j?5%ZgG+5(|jP>-T_Ai5LE`Xj6GsGIAl|HE4Pq(YL(|4OcNN?1Cn(kp8*3(iJ zgw4hZ28S|y&y;`1kNAzP0`9!AlbGK;@vRAWcbSuF_%!cr!8E9Jn4J+PUic6{Em0-a zM@ji`e(6Cc-NXgzt)*((!fNa{-`h{GeF$ZjZLybaPUg`%D=+_K8_i?9h#g{PT-`_b zC!=|16KF?}S(q_~HUi(zKKul(D=>ktZw%9~+!h2*kuJx8yU_-2LD+Ik(xrOK(0-ee%^Qh3$ z>MMkHYv!GB)8IO@0%PUWN{MFp%;V?7vT_DKw(wEOb{$%M$Y`I!EPGGO9)lFmFD}5C z;~vsMITG_9xKM^ECkO)dM|q_oE4lhg`bb4Ifo2; zNCP6f`E0j&Ii(neyMz*Q3`|-mv_|D7^`V5~^o0t-)jY=|tB`565U#OS({-`QMxpJ} zvTN^vQxg%!LWW|j6#^8vnvJ=%yo}-qB_}nac+9LK6l`+A`^L>1>GGA;w7^NRx|OtA ztr*J!!e(oi0N1o<|6nh@ckc~OZhJ1hw0biwar$qSKGw15*<&-+Zg~P_fc7|;W+0Yb z=8t_R=DjcvI8C$idx#=+SlGrLwGQl-ut>s*uIPtxU>rTiu0*>!D6obFEViJ*jNWui zg{}Anr^|OAIB+l?f$j_GB~GEc)!j`Sj=h=AO_phEfh+lgw~UTvFNt4)LV1S{%gV|P z1rqtib0xvBJY!r=O=jU=R-ZMJrtQmHnf1{ej3FB{*)nDfr=yg=U&kV=0!;Q<%X%nE zdjmt8pb%wFYvt>TB^@n8S1VX#q3}|KO5!>aoJ<2l9mo9Jz z;1RZ5c&sUSWBrNisqx>6v36xJvq#TLin&g+a`@P;%#!Y?g_%M#1_I>>d{#8yKUqI4 zqll2vWCK#Ad7qC8FP!HXTa?2doP#IL2e>?TyPR^UofitPGVrf+B4f4Kpd{eUxO%c> zAEm+odK!vW<7&Bx0o$y$qjsHkXZGoZt}Pm+@0dAjd7=1-D=@)+aZmhuB6VG;LD35E zWp+awx`h&JOpIH-Nz^M!kD0wFwTo#Hz4#LS7$GP2E2}6;bhmY_nWR^t1D+^p7qrl7 z6%NuH3-1tnZ6|eE15{M$mRi#Njxyr_3+hnudu;81X4D{}RcKju`e`;_sWy^s!I^a$ zN_3P{o&byrj75ZeClsnD!I0An71Cw$htkmd?F*GS6CERHE#r0^3- zEVf;TH@MDQnB~Z_#j4Mow!g@0FoDi_2xwYm&aZ;YRpzm4cNS-k`k_@S4W;=_HcKLw zB}y%0(}fGE1p{wmxpir?a>x{I1E+*-G=(}Yl%9pNd-m5k^+x2;NJK(atF<$0G#OrCXx{pN_ z6Z8{KOu_Vi;fF9D#H&bG!E+W(CfZL|znvZ|z6)_;sssVPSVQ$ezITb;D%>Dz#3VRM zOiGk%|MpA&Fx_m8(^p#@p^Z715eQ}p#)N>j2`THeRJ{K={pNQNLM%({-xdv>4na9tdv&M z96RnRT}CY8Idng6aAgYSfW93MJ#wql=8E;Jrvbh3IRMA`LYwW2wH$Ok!yv!#M!$lVGx*^2X$h# zDIcg5DvCFhW99^IS+BIfx@PMMSP7?0!xW{B5Gqs%Xr*ApEjNUQ231Fybv(%{93{Ax zsJBcccP(=!2iwH0L)p+k(X;Z)%#pXT9=SvOQDDV*XsxE3j&0iA-N6l!K5?y5OKUC_ zd>vwS^|79Nuz4>viLKY1Yv2Z{B~;cA$5!Q6yR*ZojHR@@gA(Y$MwG2pSB1^o>o*n| zyJ32;k-5~oyLUJ3{^(n2du=J*oWF_SUrW1%R{G)mpQH~KZW${b+!9ZY_wT5u*_8?t z$M=Bzx7SdbZP9Ozn+zy1c4F(GawoN}AdFRBOE>mVW*0Y82Lu@xaCO8&qK!Mv2rG{A z=oX(vV6%#%8=et(i_dg4J8h23CMc@GqVZ&!D0CO$|F#kG6g+b)o0DyvFs^;(ib8LR zI6oDBp<-n`6?rp6r3#cv2<(9~(|cM0+7a%BH67(dj2n?U%XqS?_-$E14QYGszEqqB zF3(>uFgeXe#oRzC#e6pYP{fr57qaR@aZzI~soWY-cY}Uc>htMs4i731?xm}^h836x zO04t1d~2zlwhMLWRfT%tBRH*5SIHiy?Ac>+dWdqnR3TOzKOCKC|zuq|FinJ5cf@#c>HG7#OS@ZI5_!Z`R%eH^4V&ZjEu9@68jUVub+h z93TLE=CL-cFRoG#n-9jfHO?G|R$y<5ViAM|AELc6pKQY^9nG6D^BrgeGKC`O@V+=) zO^x1CI$-DrVf0|UHL79k8ptjcTr>N`+SM}o!W-F#$NF)RKwIL4?&Z-S+vMEAa!Pzup;bhS*8wM5%!{kg?D8_F%gz%6jLE`Ofs zS%D6D@F&foUHf4ztodPu^?t%SEWgUCObd;Th;_CKQu`#Il`&wbxJnJUy}kD+2|X#rm0WQNavXAsD(ye8;Jf;K+GfKj>8 zBA(ZP9h_%D;UydCqS{IiK(!&$3xdl+@&3Xa9C^DBB+o|n)4(h*(d@7Yo`@b8laYz@ z#&^2uf7|(A(|@)1mGtYlAQI0rr%O?nTKeJm2P{&*Le|2HKz=uMlvvw?DdfsHEk=#E#C9@WIk`keEAXJa8NdnY?fWRL|dCqeQ(oxwg z4G{6=w8I3?gqG9#dGem^+obJ3!O#3F1Vo*uz!5}I=ITkNgDSytV>jwamip-{D?3#X zh6?<;RlmdMgH&29rv*;Rx

    ZS2{1Hmpjj~z#F(1MonhvV@NJW{VF?}m-eza3sao* z{sCuT_c=l8)0~{&{?i&Phw0pSE`7EArS$cKufSKR)`IZ|-kvJNUM+Fxuv^6fOzSZB z!r!UgN*`1o5SQ&w$oVtots4y}5uh7yEY{N2+G6ToU4;Q*O9FSKe|mGC*b0m3w?5dz zn(s(hI17$a9CzK$k)z^O=e$RYzmAgXM=(!4tg;^U&S`Z@b!3S55jcCycTL}kEX2QA zx^mi^58tyaH)6iG&VeO-++{njP3Aw3>M4X?B7T_h7Goj&^suBsSjdy*En-oni(MDjxrQg| zH5vrLRxy(a6~2=z)iS{{fBU0<>mD{B=0wGvYb>Ec*T9%8E-r@qVV!Fq;7{u5)as-8}4EdIKHy)%9Xj7W3rj;fFD8iodv29eCtZ*Ps5$n*> zR6{$HHb8-j8ey-#x<3*ln>D0 z6$yL~%$pqBRzeuelk;edAjnq;i+!w~^d4olsicSX{rCj5yr2L<(pv& zTa3Y7#chxa2NirHNVrWg^PSIjqIXSfVndzTai8cG;*Br!JDrHiG8q zT9?zU?7hG)CIYU()mGX^$@8FzT`d-S+Sh6lfO;KEKz7+5TEzj6MZI#h0QYj&U%wgo+n!R&rxB{DK=PeYjlXFnE4F%4}(q+|Dz-6 zlj@=i!$x`+O)n=0aVk%}2tiuICmRcq$4v^_t2^wh!i3ExmK{#*_)7YPpZ-?*e*2Hp zPvBg>%JJH@$(z*fo zb&#$@_1KIE)RB*pKRc^#@8c;FyA6-1CLX&;?!liB zdrWXGgwCq@;o6je-hh~{LSF)fM^uzVn zvHH*ioXS2+0Rdx3<;fuHKfwCyhmD`6wb63A*}aio>E2Aw@yV25EZi_OPb;|)8r-(l zR@+Zq?7(Trpf$t)&)$3WSe9k^ef#h78D>7MDC=8V|Q<_u>Dik=Wf z3L*hfkO=Wb-+&On7rp3(A3%Tr5rho%B0?fHh8%(h&1tr&YCBb3zOvH0@Ym}1Tl=1S zPej}>nU$`pV`tvD=bp3AHfy`J{(G&p>p1zS$+dmPg6vh8Lzae$mrH0E7<7>T`SeO$ z$Dex$Eew{;!3Sx3=36jSw;Atfv(_)BKF+(B0R`Wye~{KVacBUO(Qm(yt}by@^PL~1 zz@j_^xgS|7ud|KX*% zbRLt9Wlmk1KAI0s%rdCF`v(Ag|Ln!ZbO+(fIpvcCg0gw#1=--PI@(|4IeQE)@UR104 z;&Scaq%BPEa$rM+x(?CAmC=@oc1PKnT%TROXAtP}nW0u=PA5P9$2)3@W#``Zq{BNw zHWSNHt=F`W>zBHPT%q``@Eq*cMR2ph?C{=)P4?Rq=C@xz~_E*hfeFD|7o{o*s} zm6flg!Ohw9y?4I@FM)_Ks!MkC&JI4j!EWuTLI|g)!GEDe!FL^os0poNH=j8{YN}N= z+c-r%5yGLt9RPxvx#!WAxt<0atGuN^j}`zzS{*azI$8npwJIUj5n5`P>zI&f30%oe zdM(0shQ9gA+$ps3@1zZc%W)_5xqj|H06rB2;RYun&ZEtEY8mrgBwbt0y4bEW z*h<#b;#1;`Jk1HAG{%EtLmmvE46Vv2gLzB|rkTeOf_Dh(u7z*RV`CQ6Jo zixCtqzf4`AZ+WbhpV|(^Sn~d~c&4r9kI%+)$S9G5zU4HF`AQ(!cWrOKeRuc!<@a*Y zS@#)}aq?Zz3=L%+k;Aho;G=Xzzh{F6r}rzIJiUMk`5gUr25p!IRwII9*4O4}(zo49 z=mI+Q1Vbo4f5LB{5g^y7-J}~I%V{4TjmNPsFzDR}C{?pY!-@8Q)4T!TD*T|dsScRl zz~5xHESS{DbjE2N&<2w?4!@F?QIAm>4|`)=husl*KTQ70_qiucJo#2lF-_*?=!^}( z@*eX^5A8a;qO{U)>JszJfceJYJ*lxSXx5TfyTV$2JI!_2D8avB9O^LFOLtqS$wj+D zX8_6`{uKv6HZ`=rBHc(W|A}?ge#83Lfqqv}toR7JYx+OO_#JjQF_qIzd)ONqN4ZQh z*L)ZZM*hXqs=UPyI`inCJt<8GJCMBDkn~Tjf%X;2t-76gL zx6w58Elqgs*8=X&L(YL=y7H2DL|LvbirBFZ3QMcS&&lh>Qj%s+?L@dm4BB$Km0oT9ZNA$ zEv>j5RSJdql+01SW+Kq90NZ!80yShJGSP8hLZbWDYkxZZ-Q_=@{_cv5 zI)j%_8)zhWagQ64QwKefDAZV!FIxTo5~^wM%Ky?UabmN31P35hrP z<8$V@LxVe`M+(F{)!v!l^T_4J>(XyHcrPvCqvL*qX_)d);Uv$Yb#`m!1iPo15!^p5 z@?$3fMVLQ5x&MO?fWQpG!YE_vQ2_E3c&6Gb`M+{f@&l3OEMn+41NWz}NNp zTj}+=9|C?Yr%Rg`)2nEHJ+plQb$Xa5XdBGsw1hsvy(*yA=l}S{)W#g*_UU`+-pVSb zDb4iScV0+ycMja|`UJT?U9Z}A^GIe)&{@^(8i!HmZB#E{BJ?-u?%s%zo?cO&ygo>q zH!x#3vr1%mNdN#q07*naR2hus-@E>wr&AxCNNXGa7;V=-AZ8hmAEz1@s?Wa(u=B=} zD&yVsr8}IqMbf5zP$)l_k(w5!2^>W{fwtFA&!kJM-Sj)}ucuiUmiyHy)F|lfg;x55 zOY`YAV3+0oq=DhUj_EL6z{l6y%Lrghlp{X&lO}y%?>jHPa;G2W^95k}bg%}6;KJM+ z{g8q}Guv9GuP{dxNSK+KMVQ4L1Lju+8%?Z=o+XbhDC5p>u_LV}g2L~@!h8T_YINPX zqoepPAPa}Zkk%F^c7kZ08mPdNgX4h`1V}czb(Am4uwUOTUbl`4t$o%f*P9-iNjmF4 zfBIY+E>bVRqA^3yOZ#@gcreM$5*tFmpkC51e)C8EeV`T&LQ8|Wu};U9kJ)R|7_aR4 zBXh1$8&2ra?C9KL^KET4t;}<3F{5l9utULD`svF{jVzl|jXDJ@we8kcaUMRq9vWXw z=-tMIZ*}!{`r!|Mkd~L1Xfp(FG{n>jQ|P&H0q~u+G;dGMym}2#&GyzgJU)rI6`-fu zWY+cC&3DrG-}s}nd4fFG&ZWB@OoUbtY~aJJX9liqQ-P7(x60Jbj-9CkgR=_wQil%^ z0LY5@uJ;0efY*&@(pQ(hnl7EivGP4OEdh>(y>G#@{g`qg7;2_UeJhyTR4^+KQ59PE z5ojy4yZto!Ef+de_OwDz7hLGn@m(Y2mWA%3Cl-xS^aHi5z!U=vT0%Q<2{YVT1idD{ znoQfBOMwH9H)L(4+|doFH|5OQCEO6Hkd!08L`5FczD+VGVT0xvL)zuUEI=8Pn{-po z=x=VMRmwoiph+ySC;wSmb0VEd8m6R!ZGG-^0axbOp??OgthGir-DY!lxDFoysJ9_~ z1w+t!DT3{1+JXuS!_KDxXc__fAXQ^laU$J5^%YL(d=5Zh5g=tfOf5U`dZsk0N|EG_ z?+`$(co8D9e~Pyz4-4V6h+6)RbjmlTU6W*t)k%`$SdQ-f((t6x0yvBpc;9&=(i*3* z_d%oB0NOa+u|h`_VAQmm;g&w4UtqY4mK%qy=}&_JF+!w(JI1_}l!94z5? z^~5dwV$qR%05{_%h#d2uoObEPC%C+?v|_;RlNseLddoNl_ZWrL8S^;$uwV^h8uBD_ z4kIV1Ga`>0v&Jog{LKtwVZC{RLqed*7V{G?Z=xnz!*}aReL&vDH{OXGp*eb~d748- zX5oWS9H;y=c1at zXmYRK;-qr4eSI;2;npr$j{99S;xYhx*YQ&+E!4=h4&|`l_$&L6 z{j0D1uj$6}2Mm%tyd5m9F3SRS*hYoE-p4k+JDX15c{V-w@fXwcd`^GxM=(#{i-o(W zTLq?|S~FN#MYgrc4_9$7e3t8Opzzgxf8h)Kd1H{L;>M_W9);ev=9C2b>yp zZDyXyTpE-!*!3cd){vdo0ptOkerKUj)oVU;+RQ z1_CA`j`x088GXvrk==|zRw6xec!a8)&4@|S>2!Oz#4hBeU?{Tpr@f=VIk8qbnSOKg zKTTH_-%alTbiTT}l@?%RN?A;$nyG1WDN7?5>g-hxu3|dn&nHv5heu3=c>9jQ_hlge z*$WHlHbUTUzqf`z3dZU|OliD1`C}Q~h1Q%Y3&@ja)2?Kk768_kF%din)N}pSPh$&4 zY-{IU$+To2`aSDqoxXax7v_xr_Sr`I{%Kqqd-GclJ-b5_6ijf$%Pi}M$m8F+M0f=h zfMG>1h&7kqq%e!1+XU2b4$o$eFrI)|3SELBp45m87QanzNE&K)31AJ>7T}?vi^80s znI?@s_Z@{hPir*j3|B@|z(tdg&Ngc=ZL3eaqVLMY9~!8KqKX|dr#Iy#`y>Dlc8v@_ zPJ{#u>Mr8W0OnSISA~YjL4d0QugQew1ch?*V8}S5!}BYPN?8~|$YLnrnIJ|~jW!oM z2w#S!%0yYMsYzIP{!@^#P%V9K;!CSNdlD?89&79p0M#;DJzE=UXwWVQADTMN;;&0z zUh6ol7qnW#|I6*$Z>A4Ec%QaY`$wJ!Js||>%a^aFt5X?o6GC)w0o+&mM0S{iphMswhDXqVIi7{Uh^rbiW=%gIXCF406d|U5bdYNk(%@N z0pHIb=kjP!?$Ma66-d*(jujhbAxjb!;00hIa7db+2(Y-JE$I|Cabx zqh!`zTG@r9XC?u6X@73;$w8X1l9X?aLA4Rtc*pqpy&|R2>t@CO95c^P(4UUrKW~Q? zmT#kJ<-M^M7mwA}bR0&1C9R9WZPSYVzN#0=1sRJElS9L(8yIJXl&4$0G&AJB)G`U6mJK!=)ZU8$(HU7V0TD!bK_pGGH` zxvv2q=DMafBieQy+JVC~7BN%Zpe?(!#R?$eDK)_GSY;mz`ZyQxujq5}i@s?)p6{Ja zck!FJftyYA{L zV1PeG{d2iSRT!sDc13Fd@+86h*H0hz(GF#Sy2!g{alP}n z7^GP=XmPU1I)O3-w6-i3TG-NHdeP>k_AdHi@x6PTjtJC436H~m6tE5KP(e9IDm(omUKHXgWAT4a3O!FHj)8hI`2KGE}-ob?L zU!ob27nDg|9E9aT<`WkL9R>a=_~CCem{1WXCN6r6}#?vVwGt&bnqp5q1_)wW6;1J z-U)Ry!55i3_4v;dskUJX4o$0g@%VcO1q88p5QF7qS@NB8-4OuE;)>GB+^^zT?D9O0>d&ufzEVx!Ion8Yt}y3~7{N!I``*;i z(c)$voc^sKq{gXg5U@7@sIpV=U+_uBIy;w#0St4*%)HASW{m`ahDe2PO(GPInMe3c zG{F3AIS@1ezT9Otgl2{n9lZKE;Lmr)@t@99g`Q|5|O+tefrVVcdxDyR2# zpywJ6R1p#XhkE#L=d#d2}_8NZ7+$?r90n}hEvRdW9 z0Djf`Y!p*IAr}FJ3T9iA0HJ8740?_3y68#K(!}q$DNPYPx6ex8Jco_mj4#2hFf%4; z1y{#SI0h#!vss*28qG9%DYUf6t^gtIQI3t6lcIB#Q=eR$?(X2b{@*Y7hI<@El>9B& zN(l+SWQtrkE<(T6&WfsdPa*vJQHMO%3Zhv19dMYQsk(40pzX${GOG^%r${LZ6s zX`LaBb4L?RX?^VpOn32N6((x}U(nYo3>{Yhn*v|45dm?U__k{Ci9o#VHhJ(o=BFu@ z8`Epbv&V7E`WEdG?RH3}&T()m(H2I968tUZ^U2GlE@+PgFrsa|V4Vw-_|SOMG6ks_ zP`>YP=(NoSe#nd5?*LpmtCC4*^!XkA#B8{2e{C>cE_5yeaxR3-+wtUG_=LKeu_9`t zHw5Z5^X_VL%sjt{^ZZ8RWLmDTq|?>;G!N)i#o4>#NjlhGTbvGhwe?|I-QED;SE|N; z94h7;BCqyy=8SdH+aR9fb%8l^hWhCzP&UB+5$#Tgk{7W5xb`>MAaQ+eauA${EnpRi zyGGI_w&0X|Ke7e(=LY>x`GdEtk*eJ=%;G=VMDh z#lgw%av!aa!ACgsbZL4z!Zs$B44o*9yAeGYsk1jWh z?x*R!_DAX0@BMQ6^4c%LXbE{wXLKL5lI2~`2TkMW={THcu8tIG7l`@|C8Jh+^$dnllqmPW2pC=L2zEKNp4uK;xkk;o(lw?Ytlq0krWRPKC z*Y4dzTL-`dfi?@8v^I>aj9>=2+|hLdC^OYMQ_q4W3^AyQpqM6)c3y!ps*bQFNTPtQ zP?do)1uXdvwYjFXp8PLx#gqacerq=ae9#-Qn@io+*Kx9q)>^?V`<(&5{B|@K@rs~N z72nNTHhB#nQnyJAVv>ekZpXbMfnDF`SA0yjcu|cjb(S>KGYC529VuNn^(<|5Io;a0 zj^;aB1PHO4wiExhX#-CPufee^$XiAQ6-!eGn6T{xBV+fUMcQ_=j15lO@}Fj=)m5|r z5VkD8?`cM@CRD7!+#LJQ;YHgjgmvGhJvVWF-NT2K0@@shZ7go$|Af=PYr4&VhUTpX zl3Ed_7yjK4(z*!NJ^U9A0k|sdyJ@y|4-FPpqfJio8sc}Mev-0tnl=JfCQzoYr!!X3 z5bv%7hV`#US*n9qDWn^B2uaYQB8X#UN+I`KXhJ zpk;Fpz^ui1IDu4m3Lw8mdTNLqB^)i*B&CD;ZqYZ5T~60F3++4FqG*TkeO8V%$--8O zPARae#$WjOq3^2AS)ngP+eQxdioGGXA6h|dt~Bs3)ddLDHd5^k*RHL43o|ElH*z*n zSX-(nP9FQF%bh4Qf4H2)*qvwbX6b&U&7WN%`WNeyq0sEAs(S2@qRsk@6URg*f$hkc zu|G2YiV$VxcZ_ZOyr9so4dA1;Q;}xzUF1`KpHk#9MI>6sGI8zvC{+Xtbh7UE7Ui$f zPWRfHGY>hWXge*lNzoRd>T}48A`fHCe4LvDz_)KRxB0u+Y$)b(L9U)iPC1b8n6jkf zIF?}Jd*!%K@H$FuyZIS~@x9{4uJ5SBXdZvi`bgWLNoXe)8B9`BtKA|=vCwdjBIYud z+dba&HTa||Jlm<}GT;$&*4BH> z-Xg$L1}XJ{XxYrye%-~i`xI6j9eBuW#yLz^d$-f=>fPY;?FZ2Zkd-&L*3&iS(h7iB z+Z_=6x0QqQrR$K6>!pDp)d0l?eR!U9ykq8h3y^Aq`EiS9eeR<$2Ix&HC6OXiikY9} zCsZrvRK9kFDBdXF&;5K4Jx5Wzzu&W+sZ9T6j*EcEGl0ewPBI>_$#D#LO{dlP?htNV zqUWwYzWvl)xW{&Du#j%Rlq|Y?CV=$BON~Q4w6i?rtUN44(F+i4FCjBHHo0%N6-3c* zkb_qz>@ZjrPCcvt4&UOpyMv||XHvWV*U<8MhJ_Mmk+na^q`FMR!-r>)niM>RL*KvA zd^!DR>|(vK^t1H8H2zKcsIq}m>R~!P$IkD4rC6l==)dcmH(`7*B!65%hJ5})>dl@< zJ(!cSMym8xn5nle{z_V3ehJfvUH0RTt8Aa)e4Ll+2m%|m&GfHNe2c~IX8Ow7FQxOH zGYBJj$EzQ6!O{EuXP8`%wQ57vksy1gjdN!Fpj~(mf%Qnu;B1cY%IvZ@e2JY%oCa4u zAT2qLy(^J%t8gB_c#q?sG1ckBF6B-}XHs#-DgO*j=CtRwpT#uxg*+`%I04k#HRfy> z@*lKc$NcDjW|QK>a1K35BIF7r%}~Y76mD+j~YI~{3=A!0vQ0x{YO7v(?fpO{wTA&JD6UyvuW>~P)r{!lmGyp;Nd(V6;Y(66TBBZ08f`6L&%%(Vsb_0rR&tw1~FQf?47;%q4iXVB2X&hz$7!z8MQdcbp1%6(E>&*EgT$%64Vh z18Fkud!$jWoxlhW&H1SbmbV;=F;c#_``Y?NxA+8gDQHS}-8a|=qK^UdqM1ut}VIY@XcY{WVk?Lo2RC zDcdQx9epesV?>8ioP#cH!*M^|74e{FyP_WYb3u(tTr+gx+4S}33UV9{pg_nN2Rwh8 z9?>n+?LH?CCUD{>*8Zd&~u3JbSZ%U^=OXUQXY~;n7gQokQ81 zH|Bqw-kNbmKzA1nzkMua$kTUbFCO9>nPPHoZ(dO8|hZ}-^0{j z<=6db4Qp7w1oQOe>s#q7d`$k2&(5U(6a_R|a+URON{jb%pw>>!5uaTZV}QQ*rBx;D_ch27wDhXz4^` zdcl-;BGM852tLSE2MYoi^SFd8nR%VI&)}?gj+3_nbiz+k1>s+TKq*v^LxC1nUskfK`$KycTcwz_j0uxJqAAH>uCe$=Hs|zvg@@dau$Yh!$5um_(97MLYf!*1k({5Gq3h=KFw|}AW#7U0^ASk z+lfQW(mbc36#=+~yz${hngz(yZ&Egz9uGJ;ZT=!;ekRq?au}d7+Q+eSwZ_~8z?b>4 zNy#-Qs$)XesDBBKBif95RoE;W(l-Z~M@mdVGR-oI z%(*r^n?85}!x+DAzgV6CdXRk8wwqds1M>SUKXP1E5UBlE1pP^Abf^D4KZ5@P?_vXL zcy+kZMgyqHvvok@HDrwIjMW#`IpGhpHZ^b}UFNFL#vyGFyzoTLUX%3bc@!LhgMYM@& zf_bx^&TqnV;ZKv}PvE__@t?&82tc)c&iNuVjQL^T@zZ3EsM2rN^(DwSpF>n^q2aZH zCSo68?JgdGW+=xMOyJj-WN9o_fL?y}VwC@x@En%C05S^5v@XYKc3kYL1^smcr6?)RRG-!eb5r(rPJN$sxf zUnFPYai1PvM0^>>ZhI*l(KFvpqV1m=O~E4DT1G1yur1yoeeRjo!R>TVqQ}qfhXQJQ zxx4>v`$Ik-K>KQ17|f<~+h@}=o%8A9*4ebuTgFx&=fOPFY*^OpI6N8(ICkpjgwNlZ z0|aJ#Fdt^N_vrb}HUA2}b$*z(`rly~xskDRteTRJ#|HyWRU2|R2Q4a#^dLbd+Ih}U zWv6ns@mkube2a2pR&<|fI_fP+5q|aRW_kycB2RGKgrTzAHQ6<|#L0&!!Hs*NZzv#0 zyB$4~-=_JvgXL&Y&eI$Ep~PR>>L8uM|I#gv1E<(eB=j|pgK3R97mhbGv6@1Z+FhoF z-%j%i)TO3~Yh|RMZ~$I84ez*%Eg)tnKrrtaH*1CEJQSjed@YI}6ZW1OAv23J6ptNZ z{A=}a1{`Kkq*GP1!*poy45gXTlhDd$R0 z&Q*!$_IFyBN|Uj)18k5^L*C8LOmP%)1&d$sS(Xz;rVudIsNzNPjeZ-npU}jlHM{E~ zpoWmb@ zHbH94B}3bZxuS-1{x<%}G*1!q5=0Q>>d>~DX@bh(CsD`TYw61TJ82catDWu^^yB~l zXdW&yhz4cJ0e}L~MxS82L%Bciem3oQj1$t$wD6l=VBdk@_CeE|1bc`zRABWbKF}tV zT!=U1)T8Y+elw|f!kb!{F%OOsFpmzvN*zFpO4CiuhcuvveXhsa(9)!LWI8)~ZO50u zq#-o5bQNNG=i3~Ta4vd*?P+t>DZizysih3n@(GWW(Lyj6S}oU{(Dagy{En1)ZjCkJ z^85Y*>4YjtLCq;|$G9S2zQ>wYQK%_#xm9Qog*KKYEF~Sl>#l8`lb$3l*hRz8 z$I$L!O3^op>L`hGHWe>3_#G)u1tmgV*AIKvLtpIO5?gNmO-XGvo7|E zr!NGkZZRpN0FEJ#qobDA@lISjc+|a*rZzOijFg_9*71oYP^kiWt;^ikW#f;-np&M( z(3fXuQ#$Tk+sr@D6%x)?b!@0 zmQ!{a<6Fa9^bO`+<}XG#KtErYxE8_I?%5wkH$tJBDlj{6C)0t|mCZN$R(CEf)XuZ{ zc?Tfvc9(Jbq6f+7{;JvJ0L*_dU5YYs1AUFxD8)`fxs>sAT`Qbx9m)o(xI5g_DLb%CSn5+d7YDumpRO15e-&z zKMn^_!1V;66vvLaV;>Opi9eFIT7z}eU(WzmW4-=<)cZH2`AcqpKj~I*;%-l zwpLzC?HfNx4SZhg4g=J0neDi}Pa?Zt@V5ho&y(wQ(+c(TcdzjAWzE;|)$}K`KcheY z3`Vw&w%2^Rw0%Ckxb=KG4>PsMqSY8VI4{7CpFfH{Sd9_wc~NFW=%ZjsxFaymoS~q# zaCSL&4>Vgo0m$hBW{I9k5sN9pWk#+CJ7+7JRCcQT*?E9-7Gf`Puk)V8YQ zFRTv}Ww{=DAj$AA=r{iaK{V4)D?;B{HO8UlaptV7uW4+EJrKp6dFv7&gb^~OqKKiP z6>rH6@E-sCUk(GoE!rXK&-G-~OGu=@hy&r-P`G z80i_$H<(wTyV(j-HPQAG#8SIJhTk%FFlp-G^tu7ap&+J7qwSzyC;hZ3)TdYyeMN*IPG}e$E(%+2gWy)?*McX=H%EW1(UcI4 z!8;Tu%une@ZNN+CiKxG3kbrWwtL+?sklxS$(C$|5qW=^a8UXGYgOQmfP;EZbzQ6oRf zhfSN~IQP8CyY}xz%wQLnm(#}0+o^MJBj`fI^DbkK{?O(WIB8`ai*Y$g!K+-R{4BSF zCE8)$8ATkGplD9Noh$Z)Y4?om{o>_4FYR-~cZ$zGQQ~>jA`N0Z<#siz-SpZog-3CJ zFRiE5J=4l^##8!r%m>`oY?LNMv!-x3kW1OC-vhj0e4}+U15ZE0JYT~XUX>FBXHof? z!%Vs%pJ7L1QFQJRj1im(U>y+5z7~%)eP-SW;D{S>FT9>mp4?$A$fF?7Q<^K>ecH+hrzswcDz#gWSfM6uD zh|8K2Y}{rD&3;720CXQi$07c}R!FnXT;yR*Cs@DMnTv13Kdl1(>T1c8F@pv>YBl*` zazo3)Kk!Dh=Ey_WQbZ}$cJjhLSCX^`ueNy?|B<)W(n(GSJ_fw@Ay)ZzAoEbVm=ns+ zp1$jYy3*qxJ-xLhuL|&Mcb2`K$rhk*&+P){P)uvb3ch| zh1S?j+VjSz_l96suf3esp7{;@Zao8fj?cHPJ507^L7a(m<`f&>lbwc7Z;gMxOYXuU z&{c$00NmH>cj)6kMP<2_R=UgS`7L}oZCy&IJEu^!2jn31rtE+hX&iscKh)Aj=l5YY zuc3MHTL_mYS&n$B?!QIohIF<2FeN=_v8>G+OWM`I>(Qe`A;{Sf|WIw^1Sx{D} znVZA+%M#2SC$xzcY+=sOhcQ?42u_wh!BzJV5Z9gpY#{B@dQ9k4D~aU!7zj|@-S}j*=Dy-u-}BwAmWzO zwvjFQxUqRZG-C>x_-pEPK3(O6#VcvH`Rr&%Jt`da+|d>}nTPgQ0kROh3$26(^@_Hn z9$TD5+EL&{n2LB*$G&=(Qys5VK2A5AR{^0tm0SN*>e0Z%1QF~r2I@-xvn}nvWVa_+ zzg)*D4@LT-cWdmph!etAL#;Z#)hUn4L{0 zR#xy&hJ!Ld&i=Zmbux;1r#qsF2Qiqbz07V9;OUeiBhJT&{Pjkz;JOCA1DlsX~sEbX?@gpH=E2U%rOGtf}m=?TGu?k)3;bOTYw_=6W0s- zP}TLqK1IJU1Z76_i94k}84(`ipXftTC}t`L^8E&x1lqShyvh4$`&3TjFOB*Fp!b;@ zH(DM16n4<0$ET(1C-b7jiIZ}npJPcdF~%G;i84Tx=x=+69*~vHN-z(CzY_O?=^g zoL=VC;)lQ$@6O`0_j`a@W6TP9)%@&L@DbIBF3J>K!-v;AexB+F64ZLh|}G3cv)Dm#$nGo)1w^9dfFO z_3UB(IP^I4v6Fk=XQJ-4Pr+ZnTs(#C_5`TYmvC*>cuKIU^deQWW%=~U+g zC;6RCuWr1=DKckarZ{ni9&l{>IIa0vJ_oC5z4Cf$Rldf2ffG>{=&jl9^zGkzBVBn; z6C?z+IZn^vGZ*F>I~VzBsqVe!$&Js!#GrM7Gt}upadi#m<{ifNuff0oMg)U1J@(P= z!T{It$5%ap82{0(R}ivizm}SFUq=AF4MTMU{zZ0z4|B$#dnI)?zKzf}@jC|Fl3wn7 zCH=#X{{fl|7t=@Gf0=5R|2LR-cHlbn&~gS*-0yuVXr zLX5(VO&{!G^a1*u-Hv-|0EHkuW;S7n+40rnu+!NMTmh!&&Ve*`0$$V^hF2h~{7e(O z$j*Y9nSZ%a2$cSo#}3fTANvK$8iFL>zF!Vyp|rRk_o$EgjN|iKkMi{BCybsfC-VZs zGd?oaAtV#evq$L|ojq#Yl%60HxvA|QXqdKnD60@)~?f(Ahc%V})W zfT0(J5}>kg3VKOnD0;JCRy9lP2s~@F1)jMgMv5xB_XbMk5oy$4|VwW-XLV59^9PHP{ z_0I2df8F_H@~fc9JKHt)@#1;$#{GTorq5KGQijoJ z$cYPh(I3Zk=2zB)i^7fyK%6i?F5TlY<_lwRZO;#L-*WqMznCWkzRE#h*>S*Y@qWaE zsG|{7j=JNzsMGGc?uc~6ACvaj`2wDo&}c*sp?LP;=o*+C4;o!g(}TUIKYOr50~u=p zNei!gX`bq7-{FaXLpCFv*F)QL1m=YgJo-xXQR3@NU3;{;nv=14k+nzTFbs-Qw{VUr z=d>pn%Jhz=-ERQzQ%1p-3Pc?A?=oHh4QrT^$C^pKk#{-^@M`nWjRndv5yR5D1ESiW z1g<6rv~VB3Bu-Y1j97a*IX_}KX7jp`eHF$T5DcxXGPOY-pjoYd9 zF&nt0{Ejd8p}-JN{{t95He($3Pi>$(q(yekXDf8rn65`1f)ei>ZjZhSE+BK5F@Amc zU#2gmKbKlVRL>uKDfX{t0d!sG)S=e_x!#<48?{T#g&a>z7?V-8ef`%?WMm7;Po}uM8P%69LtT*S$)vDxOe3r0ebxr?-eG_ zgS6QEYML8fMDu?s&1}DvuHZ|q6Chrms4Dn;IN$naTI~G>dM58+z^NH$2JG3$PA}mi zKS|GYpTQShHpg|&F6wKo;}?H6E1PU!+)Qt^-c3Jh|2W-5voXM1%JdOH z?yZA}G3D~MDl&!jSK z9bJo3h36X{1v-@>w6JUqgp;Q^bR_qMZo{+&x z(6)dCd=P0yWu$oYYwyb2McEMsxxs&g5I_k3jm;)U_sv`IEZpN*U*h?GT|tuXA@mMS zB=aRWDkAOZMMQ9HK`&a1cQ7+S00a5uzJ8c+x<8Kz3GKSoK}&8M%`Y_g6l4UQ3e!Zv zO93x`H%4tNV3akv$T%u(+3dyeuxJihr@g_ki)JWh?e)Qhbb9`m)6(2?QSYMN_rA09 zP7yEaC?O6AMcW$J_tcswKrYK)nB;W;x-`eJ9z%q8oW_I(mzrK{+v{l^y4+m3g|AV} z7ZF_5C{Pooi zENh}zswssl71q2TBowrm_WrbcyYtAb<)jUQ9&vMW&vbUA9sfcl7NkPfE!%*~!LQe!^1(s@idCX4oE-bd3ZOuu)HBd8A@NP|Aza6XDPMm`Q2v0gYI z*J#+98!q%g5jJ%AXkXJdkNc$_m7v|EirQD(B-`UphmC=J&Xnr$T;Z ze_^bD)fF}pIFhaR0W}5uqTEAhGhba!CpLIz{WP12xF+IYq&~8;2b|O}!DV6sYCX$= zCg-?bXS(LpctF`+4b3tXE1FDD&*RInS$~NWbXVw)$eYw{PyMW~>u4R*(sVuD!kPb7 z4qa5sZ|X`LmRTr1H#$=?1gSVZ_=l)@$L+?uHU?N40ze( zhXrXn*oK7k)qEW9VQ)OS=MEXVX%xV}o=)V>V@%+WsaWRQUU&dg9P)q% zg96KHbuBc)e%yXD{glr;t@kjy+(2k>7gT1^^*^O=U2Fin_0vl@+kaA_j(LlMV1Iop z-P)cfQmiW?6`Q;oG_S%;El)WmD1YL=DD4sAH@9~ z565pGdckY9&(8jo-zo&)9YXLZL3$#))R=csj2hqokQ1ky;(1qOsvM|Hlyp?QV$Fv| z27IBdp-G|Xf$c`T5RT}dcLq1o_1=g0gxO3Tyngo1qA?IeM&%9)Snvpf zMdN-l>Y7ET^8WZ4vCX3Z?eg1u|3*28=XViyr&HXY_Dz=L$tdGH)60B~N&6si_W|s1 zg2P9De1Mn=e`JkT^=wgDkx7wzEH)qifSp}ptUJGAAY*Ok646Tu%!=#wg ziWB#8na0G~H{R|CMY;VhK!74nL6b6li)Y1mHY?3@SaB~mqN8OJ)bPspC-=q;T-*Za zDb|SMmSwU}NM}Eey8`$v;sls8YD>Z7a`3nRriEy`(&QBNG^+IG;@{OwkC{oa`o~xItO!0O)f)JSU?awz!4gB~Ik* zR+g9((Cw$MwRv~ITp(PT}{E^?|i2&qE+%)TCbFz_Gr41npVYq%NoARt9IF(@B6X%FH}7 zC0B>=b6+ZUkoE0m4CirfI$bBe^kjIDP3IsrAxyvq)X`+ z>GGSz(QlZgxxpgJ|hlTHRg5CRRg9RddcQF|_gg#q=#j1%VH4|(FyXzvv zk=&ffqkk^g(*V2%9E-la{3?JKlNX;3&$mziA}Br+-XmSQic%eaKaCX7&T zNAltR^!Z)(t3PCCuKxiH$7{qBSSr%s@&ST=!5VEeJ}&%^Fg2X|(|HS_YZ1oyHNL6q zz6=?=a;Uh^p8N-Bg8g+$#yo@x0T;Yu`7Kv1EuZ*XTy@jEd%p+swMuf`bp6IZ<{L1J zQ>)Y6v6cz24bzpGL3)3&pPs!7z`0MpkLz|{`an+0Uk;j69Vu9SIpoi5 zhzj0U?G#1|lZ&h=mv{EvB{wzw;@IzN2-4C2NA$A=25QnBF*p54HJX4G?zY2&sIApO ztIHwfCj$wv?*Kr8Yzij=LxNP@E&>UHY6HNh?a6#R_x)_1s43#%_0~_D!faQ^%m{2J zmoKH283ddGy_#qH4(%!Rvs_0F#ucP1d(@_W9h|~x3 zX}htRuGOxx(|;L1E^Bfu2!_n1V1|K*k?Cil_euk`1a;ECO+RbYG5eukY2sts&f%YJ zZWgm;fEd9J`{&7(Wx&cVCa(CXSzP3U&`k({=QBN~qnc0jPqf!#rE#g3#{{!`mdVd! z)YF&xfL3?#%XE*$p@lQ1IpnZ5AVdS;tI6D;cg;q(of_NA^d-#s0JG}c_dtk_S~B!S z$7wOHV@~9^^>$vej~O!fmDgy=;cr)WtwfwJpPqme%92=PuAJS(*WpbL6QVv<=F$$n zCY?pB*A|;cvp3+i*eB_B0dO$&ui&RM)a&-0X>5z#?cpsaDc1_My4Y9~7~7o&piCSV zg3O$q>Dz+Rb4q&LJunC)@!AmJt5JnVYoq;M!Q&QMYTgfxEC4t%B$=FtZ6?6v9k&6S zoUB+w8%v;W$RRjsh`&IAv?{JNDmU2(S>goQ=jktO!&ZMroa^wLY*^O64S)yGg=70h zYaK9*xrk4{{vmPJ(HgtUybdj+TGQ(mcZtA^$3Od#h08T|O=wWqS_^GF`9~UariTx< zO~71$*;4?07(xOoK9g<@ucqs)8EZ{USE;-7-A2RfEvQJo=}gnllIk z#c|8_6&Fpvr+2Z3UmXzJe-GHYlX17OYvzchOY&@ywg}>r$i3-Pk

    J{Qx1 zAXNsV4tOn?Jb0zr8H!7NT|{?O+jM8r$&-IC&CYx^t*!rSb|Yn=?&9$LZ4y@dYEn;k zRqKuU5e+Bz4=TYy&$7YAq(Eh0%vJN(Hhu+dvM;AMTW_a-zVrv_&Gy@9lho78+pneP ztJg6-dpEVVX^ZY3AC%1_eP%febGrx1i0TIe$;%mF`Q&sRmxH8gfD<-h-bqHNhaWMY z0_5u<8De&V@fCf=IM^QJdp-CqUZrKXlXq>LM-8`Z&d>Hrqm4@m~T%ojL|ylp8%PP@VyTwyNNXHEsJ3jI|D zav5s<;algjz-_J$R6yaEU3UFC3zl%_4sfD0Sd5kZm z)SSDHbLpEMOk41&y4t?O3Mj$Xw;lz~Hlh!DDR78`{@XIrL)q2j%%%sz&IammAa%N^W`ipUj z@sZ0W5y})gtd!;FYCPs~P`>N6v@NvNc0aq{--Q;}w05%p8}i)b)s(t;YYIt??R$O% z3GHL{t(=y1%YcmZDrn|AhlL_Nh4B66l-+6M@tN~0p5;946UCi_zcVckk8B18 zwE!l-TY!=QyBO;%!A(foP}>DF#u-48HfwN;W^OT`)X)g?bUgtt!El3RF92%CYjMx} z2I(kJ3wZ;aw_f)3haDna5}gL770>o3CNyEbWirMG)!B6CqpQpzw^Mhs%|;&efF7nH z%^L4A*3h6Hpd}Z+t*BTZ!qrP-D>@wY50iJmAN#NlAIhPDs~8*~ut``+9XF4e(;6H* zSQ%>tJMXi8!=`O*>33AV4!Q+@NGsBBclMZa>Qk;8J+K`B1`XLh4$UCA!?I092Q zh9^Z8U!AhaS&CD!NB zRZN7hG_R%a&HWIesg2LC3+YR1U%-^?eE6UUA%^~@|D;l{E2Z-3yA8 z_G6sC;{3BDAGo%+wh+)_kyj%OXV9DsqzD{<(bUZ!6W?^*>Z0Xkcm-c$uUva6e#_)HVYD=h@1E7v1E%2Z|LYUy z_o+@`+*QXHqw3gd-lNB(iR2vM&kz)0=Kblt>~o511qkj763J_A;iKt(1g9`j;s|ro z9l-Qwzfg)H0o~-H96)FVj3&TS18|`b+6oPa0^B0$5b*q_qehv{&)`>Re<>(hKLn`h zA-@nH59YvKe0L4u<^yyqz0*V>RR-AD*uYmC02^APF^>iafXN8Xrj4WdW+4m<@`d0q zCF20;XaMWxfdgHHIt8FMngHh(UQQ=xpJQ`u9$}a=(f4I}KWQk0t^XJ7fO2ir5| z+)_n>vr4}n&1Gm~!op#~P~mda7c# zU31*=TcFm%6>Kj-edqiq-9LNqVd6dW%yDf{ldbLzcq!KKD6!?H{7ft-w(&O&;6KEl zr&_$@HK^3w)<=-6Mw-@l+Tx;ld80N%{p)BLxv|6s2M*Xd5jJ~TtSOWq2< zR9TDPF@2vwKu;ovPcctywq&eD1;{ln|M+5`4CpLTDbmB;4;AG<{(IR7iLyFZ514n- z=hNKaRZbVCasxDlx38t%`o{o^H<`Q0qmMr@X6br+9W3BH?{g2oUfE~ZVP)7^G)j~= zHw3O@QpY7l1HY5>Z!pSGz^lMVy6H2E=HguMQhH{9c3T6jFX9Z?)S2bv$p@h7&Vn5O zdU_~;m3CUHt}^-1K_7E)e4refer5O<(yyk!gsF%JlRo}(SZ&Q0oxVQAeDU8e{&D&l z=8FSpQuyh5%%pjO+2OSB%$Jyu)>HfTpD?F#a>o3*)R`C9atB~na2Giyx?T5=e;!ld z5DF9~!$#X;0sqPDTj{Nt_tMGka-8;f20sAjI%m>JoMO(fSgJABbAkSp2j@6gO2(6; zY0rq_eleh65>OFu)Gy&!UJVBrnG!HKt5^*tDe${FT%K&)VDrV~y^w2OvUZ*T=! z3Fq*n_w)yuMi{dZscLA+EiU|48shIOn5|tEklv3|k}divqGvF_9huSyPmZ<{IqOz z^)2EFP2>(R)K>x{U1~$s{5uJSA1*bwYLumkwt^=-S`Gn+8g2n%-+5GbHH*wGz-{SG zywH-9fSc^p^Sf@N&|jnn)X4?3kkC&5I(?34uc8?{5Ad{tU|2&)3D63L-Dtf!U>|MJ zho%EgQ|>~K2Ev(UZ+$hnFnQ}^iWKt;brdwTUBXY8b&kCFLpiEyd#JI15HbwS1=80{ zl9R6%Tj$cH`7fulGngoM*|A3#q=SH9<%k_+vDD=-raSFvH?e)AZHdoJ7&H^Ov|SbK z)adfJfQ>?M&(6ynScTuHxns{J$Tn?tkM-b|&WbrWWxckVIxRKxCrh&7$477yXs*ad}Jec3(;_%UBL-9NAt)DAFswy1BGQo1@E|j^n*Zb8no2-o>{uGJSG+bAFa3m#3h!adI0> zD`(Ef=Y^lWe21>YoFbUJGdN=Rrem3k{!zp)mvelNw)bBVG_dLc#BOyMmpoG|vmQXB zF(fjRn%RYBg!g=R@_xQl+;)EBzQiSdZ1@;`pAc>8gcC{a?4OfYd}#g=DQ>$SV5LZa z=G=~P${gWX@)^o5F?fcc1^S5VP=0j6IRN4&)-d?3RND*iwa&b>w00^rI{c=Z0zjQQ z;MWYAl+2TE5Cl*kE(@Rpzr{K{^|j7{6BXju8?KvKE7mzKMzHmd;%1r@Z67|Xct?Ip z@XU@@+>1UGRf>vvH4X;Kn8vb_rM*6#)!X>inKzqDFp<7_<8rmj5oP9G+U(62a zt*fKs=PWoeFd?ly`}MSW_RGu`sL3|~B6WOv9F8gQh$!G}Qg=6qS-`P)70oYaBo}#4 zetd50VtRQK2g#T~&axoMW_)`6e!=|I&ZU{!1!|a?050|kEV|Y6)RvR9aPYY|z!YI4 zTCY9B^vuqw9Mm{4uVc>~xGPO=7mxYQH#N&paH#2FUAqCUi|DZ<9na(#NI!%tgdUjM zi2&;>jAeux0U!lK8DsPI-Zti+3Y)IPxlBF>BfhwP(A5VBY6A51$-9qFt||g`kyEdZ{}^mF254Fg*_jq{tM)vJ6QKcNVnACd zJaxkyhqp#_uv%UQID{17#)2o%g((uiR~L}0M_bfspT(Ke>HO>$(%JSG(dftmnIHZlJIyby$gM$o(ufxENN6Z7l4 zoy~NoyUIz~x6=9?zE>I@v``Q>16>pr$Xa^xu+~W)w+gKf3e$j${o|A7uHSnZhEtJGqVc8n4S;PP?2|rQPAA$U9_8n| zpP7F7UXfq%z5Ch0bfZ6&=9VIj-QT;@DDuePJLy!!&EI=zzB_IE`~hSnx`z$WHIQZ`s(agDzsgSg_UIXq4>J=da za`|`k6%z^qQ1SyB{h(*%cC@U_9LR}+G;TZ!5a(HUoQAX{VjZwc^b7rS>fsm^&UXyE zra9;4Yi$xM@Vfy{`n;%&d1)%CSs$r10ZIFq2pZ3IP~VKwr^dzw-3d)F(zK6^fSV{p zDXqgVa~oQK3U!c{{R8{WVJS*``FH~aF-;SX47}$_vQjkejJ|z7dM9`UXp9*;H6BIs znQ2AzBmXJ7h}3mc%inCJL{gDRq?gO$vjMj51KP{-T8!8ZqWnc|j-2sv#HW`6HaE)X zQ9wk7(Y3={WV4Rr!sWDtU#%C@S70A{=_i$c%Z}PdbP1R#{@!mdbnr7hrjt0i`^NC^ zq%REr3~Zby`{j=JxF{vinVvx%WFyk$G1Ln6o3^6pBM@Zz$hz-Rna`q%q=!~MqMei zuozb@{6hJxSnCCOie37=GtG{34?m6^bmA%@L!0Fd7)R1lLnn6Wc{srIZ3}HGPt?o1 zuD+pw<2xf0SW?I`iwvls_g+3@ENG0{rYxY0Zr#^c)Pq9z0*G=ZACe+ru1aA3&zQ>Ijjrxl&W&$mDDKXIct%RfN+) zR!a-MPZOr)#plz-#m}Xc=I1eSIt6{0lRVj#K4g1jfwWv}qHy1j;Ahch(Ra9)`$UK% zMhtLE8VGWg(~9&d;}G+zb<7p7RaVmtPAOB1QpMIGK5q04qs&>>7BnrAsSGp`)o3AOy2Y=x4Yl=LE|y%M!?%N8WwMlX=2|w zfObXvBCUwaJLKe_2w{4Sj4k|ZQsrmG`@6sGvk{ruIDbb$a#`rhwrehybu#YWkn51g z}IOuG_tmE|)xD^1XZZNMqf1kt;>FMng0Mf2Cf;o^%#9Oz49{mwaO^tDaqeE}dp*0rZOCUJrs62roltpSD&@n?(0 zA6ida%;|1aP!7KR=OI1aj^n_-P=tLtLh<-MzKgmP{d(G~MV#VUId1W+e6NU?zoXJS zZ{?O7-Lo#``joGCy>{O}4@irCBz=pJn?@C%Jgq5RHno1SqA3K_z`{_}A0-(!WUGtNx30GhK!b-W49plh8E6xVvFI z0kauY7{5IH^Xb>pUrFbNFUNfc{{047MAZ<>tfDNY+FJXi=OinbHDJaP==zWN==$00 zyWz9T`&q-}eoOKhjz3~9KEpLUuG*)70&!TxXxX_{2mD&#FbTk~`97ymc~V$sIi2pE zOqcKhb_Nivg}~+*&-2cwpn``Ku~xy%FeJM}BJO!CGk7AkYF|#1uuB5YEe&xbT>Efv`qyfO8!kz(Lbxu|DBt!fHV)|Iatg#9}wzhOT zUB35ry0P{Fz}I!m#}%R|Yl+s8K>Xn*8eNsaJ+utiWgpI@`PpaE*~N2d1t;~3m^|0< zF<48SemJYi2~L*j0|j083OQNmmaYx7jx z*ZY@S8ao!tp`g3@6G)%_0R;0`2~|!0SAF`=^P+2bg)~5wm$4{oOe*GZ5QpNOnhiXunao3P3(5 zXx*8wFypIW6KVFI3VerA->KbVK2c%|Vx?APIb2oDq9D4&jM9W}>jC0Bo9XXN?YQEw z3hR{3QtkJ&3ACxo;{hxM9cR7Ta}+PS9am*X*0jx&ln?Q6$-ro(&AVK$*-FTS}B z?J~X#Q%c{neN+|^Y|EQQ)`*@cSjl78Q^7kY_$nJo>gZ1z3AzE^R|gy(hBjBP$vjK` zzNeNFI-0%I}={+sK zqu#dPC`9{leGGUN6!{c?tIhQ{>Yq!0srqXA&FTwr1g#T^jDw>q4{2RXJ5_75&8$RN zPUq5}P5*WJ<-wm%KTCg{-mSczKCZmUgmxujMWoRm(=fF;Tr>-qof$rtekJ|o^vdv8 zn2zx$aj4e9BsCX}>`%+m!NlPh?{f1>oSx|9FW|OV+9zltdwOtipBXxGTlQ$&Te{Ey$r86jBk zp!k;u6bbUb%FcAQYEv*Uu4K;jbR917kxz{NN6nbxTct@IsD{7G0nud?@)PUa*34v2 zOx{%pkN_njWM&#Zi-dGfrKO#8^}nEx!a@;b$$AL=P1@tN#g+86_J#BUj^_t8v;byg z$BDqFjv!S3*6b(N*}yw)B2-Jey1vagVXO)`(I?+g0v75x98=(dC=hd<9nqmx4$(KuH;U`>gYtFp ze9Cu3GXpQm8PTctO`?eBpxZanl+32xD$<(v?CAHLLvl$*<>9x7w3s@|m&{8zzXzDQ z+gc9{uh1N>V&>TDZloo&xHg!#7M%rpngZ6+<|YSEFn=^!q{F-te*M(;!kilzFSt3T zjZ8TTpa!a;Sa&qvwluMZjWvfO%FaKeU4T|QAME{AnCRO#qJK~x!+3t@I)h;TLlQp3d z_sS_8Uw>i>jCR*R9H4e^ZJ#*F;{=`{1r+^GU|0S(YM)Qvs6EH8j2y!y?I@KXl7mYR z8nNTa)9ibz5Abz1eKq~n^wr^CX0lpM_tG_35Ki?gg$}5`sP>5={%Y_u)8wz6$TSWE zyXCc?-lsJ5Nv^Yp`~ENKWB^byzw;G-a6Gd6uZi9s5^&&JnSg2n-d6>{SQKuE8!PoEV&ZT z%?kZ{HEJ&+biT(UH!fsF%b}dcFT+UTB(#uz%h8UnAD9C4R8A255st*K(1bhJ*#}Pa zK3}}?;pJe?W3@cAeHmSJ6MM>z-ctTizBbV(Q?2`?!u054A%@2Rs>2(Eh6w_NCN|A5 zg;L1OK|aP`*=&kyXwjyPCoz~~_@RiBL8bhjAdfsz!Lvw0O%>xCaL3KNg4fD#kOs6> zWV*!hf?qjtBAvmfW`~%3c8qV%3fJa$th?0?By|Tc(kpWUsIgfT9^c7f{Ye;j5{{L_|%e;a~ zQnd^3MOuJOJ^ED_GrD?cF39C^`qunrT4P#TZP#!#Pu`eTN*M)3dC|qrY?{G|b7*yh zFCYAjp+VN^;in6y%v+Qd{0`SJlmkNPOc;?mPTJBRw80YfF?)P6L1ET19W7+-B;)RV z$37QYY6m1$008AEA!r{urt=$xfbRFx77i}k{n5Fon`vqqcTfmx5<&whzD}DRb_2aisP)SZn%h@riOqD`cs(LT=FiF12=N3;|_+~w| zbuzU%p|Fn6LCqnz0M3BDaAptS&$w^VMuX;7Yyb>vvvD|wK++tk+lIy1ktRVG+R(XO z45R8iMjCy!Ba5-Z_dd-KwXyGn_ExEGMISA0nf{J6o!dBoZO~ohoRBR*I~D@W%xm1+ z?gHB5w7%XTUre&yh%E>7+$bAP{HaSHe_$OJkvSJu)&@611l>G5Oc1Tg(U}zvoMU3< zqsi>fw94r|$hD@YhH2VcyVG+6C&!*VDc&F7Dd;r5@pvv`tc>b%P>$nq#Clj|j5X-E zc(Z1WJr1903V3+**8#d-;bbd^yG%zBv^9?b&49by|2VCo6>z$KCe2qkb`^#ukZmFh zSryZ=KDf@N7x^7i@F*$M1R=GP$Dnp-J0K4plv#_b!U-VVbhUj2aO-3I_uLAgN`PzA zBT7wqH0kd)LtL$|VYqWIw7;yIS|3)gr=JQYJO7o12&85{=b$nhn$VMK=XqymK@4|pz-SfWhI7{a=CTqdow2tpQCZL_ z#wxRzDrNxAIP!gil4EKMUcsDFunP}56uOhvTAS%Q^KM7yGsd@`2*X`iux9np9uWm%+yh{` zt~xsDjr#{ChX$IR474!qE##y9U-Z|ETvwxC)GWW9px;ijYmM~o816b__xQenD--24@GR6eMVpITRMZDwhV+tHo z;Im5s=K<$4gW93j(d@j|xS8HaKSfxqqW!RtE^MDo&u(8t6YOF-**(GBg40wPj;AiT zkYuK9deIchVUA>yrUV)HcKz2dZTwnV-~NAz;co1-xD)nsN;1c{9*_dANZtMwG+*8& z1=b=oouBrg;1;|Ai*Mn3k7(S)?Q6=0CiP3W4#YhZ9(J^x>V4(iRfod#l&e2HIVxTC?dZXMQ1F zs=XY3hzdb`B7m89&UIjYv8l{u#I#WDlYrqSag19QBo)eP0BjwA_U-1jFdK91opcNS zO${>l`#pWJg2rhd+G(=c z+q2NmLN8OiW@??dr?!Ye+Jzb?J}1&lzEPx?M9S&9-uWqlDq@%|W-uW zu4+u)NB>-dxQK9a^**F~!BQT6ic=YTd%JWgPReB-bp2m87E1Ug>tllLhl0>R!%5!|+C;&cCm>zGeUAo&VOp)BIPAK{b2`-)Q_x`=1~Fq5ZFge`ueryvo8boH!yT ze`le&;p2-MsG3QNdVT8fJ_So;o^%f?wN*I3;F5ofRz!8Q1#$+P9F5Gz<~k_)tY^J^^#pH;!}$g z&APzeN9wg-;qV=VU0=QPO?!`%v-5j!cjc0GSt~tm^sIcpW$SBKZH z=BZ9BF;7m~ThLGQ1r~b<^l{iw-osJ{?Jfkt(hS{|nOUc7o0tf$qXAWGl`%iV{2KL9 z(Lm&Hx8?j@+VNr1&;cz>CzOCgc?3raJp^TO{i|kkfk$ve>6;b$R69+RaqO9<{2BBs zuMhJcTB;JB$+WwOfUC*|M2^!J+iXTOS_r^6geHDY z0@tH~$x@#e3Sjx{g08<){G4lfX|SssOW4)q-#>c%zuAv*Y8nRlDAu=&H*B@APUDyD zFPmSrZ#2Jc|JK6q+T{`sm03*cpEHIGUzrR_TCc&Z2}bc9m>!j-w0f2Xh6GwoI@E*m zUHc1s?)<3wlW^3u((%heMuCh183j&*0-B4Wwx~3~ez5Rk`~v>WE@Afga`U#`*uQF< zop6d!n6sW-i}g75z+eJD7GHpQJ!|E73Z2^T=fwAb19BKkgzv??}SmJ5Oh?K{C^e81rFXQ*o&1V zd!@Evi||#_ev;-{34dM>i;wM%M{i>WbQiNhm~$=0sR8zi)&^iKdVNaWh_g{o_&0asPmQZt^omy^H&!J^*F?~C$+ z4rK;1X~ZPt7e<4&oYb!4{V_g!#xwXMT6x8({3QI2g2FucW4}I&dV1gY`VHIQDD^qJ zw2TACQ{#TL)KG_{61)n}>f;)(#A~i_t!6dG$mX7m|kesE}MdT%wL-MN|;?EK<6~Xg~Y5|Cm{O21Sw&x&}R~o zg|(^{#~OgcW{9T{mZbKMCYsC7mgaagyVz7LVrSV$IM+bXl}C`$LfEy-NxMZI0>WWL z=&%JSzzo!*qTI{-q2@)ymtwha5#NXEdk+mOb2JDM?z!JMm8V>~J{w^`!&5*v!X6o) z$0#R<#&cdLiWc9cA3caIbPPyYmZw4ijbYA)uvZvl|6SqN?bC+cFB6xB{!VY#{=D^< z_D}c!*#7bEKe6p8{zd+D?<@AtYJX(c3s>y#-~XTN`>h|?-o~E&!SdgAb@un#AJ|*1 zAKUlZuiKTvW&5}5e+y=-V(+!ym5*Vn%6`h?`}pX3r}D0ST)yu< zyEMrNrz~U?$S9Ce;4CR1;g^=H?^QqK_kop`i?-NVV6aCD?}1Gkf;(7!=?e zyZaRw-Jg>8puqAN0FRxe$AUo)$kmNxz!Mq#Ct(o7f-x-a4>Li~S_#XHgWdSpH3}I= zdp)&s4J`^vUW7$24=OtuLw=hoS822n*Y*W1FH*_1oRFEGgBxVzhK0@G(P1S^JEUaN z?DB;hJdYN@LP?7i?fOfI6g0_r7t2X-5xgG4B=#V!iV*2ir(i$7v}Bup>R1QCmISCB zd%Sz!-nsX--P!*c4(Ru+j!&01d{vhPpL7GLhJC#Iz}{ba$M#t*e)G-`@d3EUK|^1% zmlr-`-6oFVS%mFz8rshGeOp?r+4>S@-5}6vQCCiZtX45c#MDP9w{tsaTJ1}x-x^d* zK33(kOV)|Fk2h|4!$0ynrJPu*l=!(abwczdf+!6D+if^f0ZxJzqLKq@geYrhvp`?` z@OxZLtA&kzG9gR1fTq8oX+x=r&|)4Z@Dc7wTw|Ry7D64=qDK$PD}H%aNT=y)aXdn$ zSvimUjxVGnk@WhbIO#ubX~)eMaFnUAH0@$LsBh5sCVnH;pY-)-+!1t2p8BnCAj#a< zM|o&R`IQ?9w2@X)E)?fQC_^owPJ6tzcN}R>ji+M;mk!5~lt6G?f>2K;)f0mY$;byS zrUv!NUJ%kq=pX1A>lv@Z-2P4Qjw*TpmolQDdzdQRsSSdjY$&1)I4MTRdALHHEjYu% zNvkz&8@10Z(tXTCy1)dp((L1}Resf~%qJX#fpDmRIbo4H_oY>ZFieNA^{GRf6BBnk zO`nTp0vYEOl@)7}V~)|Ll?v3HEetir^QnHMTS&n>ZLWY7eWYC7Ar}I!2AZ#`w)#zH zi#Zsdi+xVF&2Nxl)8mDaU<$T>RQxIpeEKl_U59#f5peZnF0N*kzze^$qNVDgHpuBi z5&(GabNL7Q`FHO$kW0bV3CB-DiekSGp6}BX?;?1;p9#E9qrFZGrUu&r^V4US3--6F zD-6O9?a%FFcIcW`Wzbw~Z(4JwWxt&JvX!t`|BL3=?4$0-sLFp}4L5s%T`3@;#-ZTf zd-(T#U}~cK#oCwbmutU>GgZ{=asFH_R_&$2ZTp+Ge+_1+V>M3rk+b=`93_3X@?-l( z>ATjDb5eFCUE_;E-X)zr7Je$yV2U{{N@Q>|3S<<>C@`A>(doTDm|LZ7dx&3}H)}t3 zv&UPFYxbGFSL_z%kDBwO`PF0RFtRGMAs^>0PD;DqWAXY9OcN)^uy70^vSWHW%0dSQ zvHL?{jXO=3w-T7b{B@Wp%+*uOTvA|mIR*t>yQ_EK_IAGxNhM@Vx7#rQo^=+`brBAO zL5r`rgF1pnk|v0HM&1k_!lH;rm{P$QUC8m|+^OEhEHN%V53kPLd%}?37b(eWMQf27 z{kAyv#7Wa4F0zLATCAnZ)IvrTWzoWDNC!tNjtL4GpqWm@y$iE=uj-7aiE;9Sl2mvs z_wcdCsa}hN2dCwVdneCJ?bKI)!Sz&R36KJpE;JJ)YYY9zIoV=@a zgql^<3^+H@{>k<~!->88qt@(p{fez)T2p0><7v|GV)~DA~3ur}ZF9YXOtUW~*Tj_gLrlIgycBSbTuS zM3D_A9SYJlOd5@M`G?@YpsqEAED}(%$pd)P4%s*d<)tWXqe2w!o3FH*zo7wf-&J>vP&p2fs1N zj)zVmDX|c0AR+f-c7d>%uiG0rb{J79UB~oC=0Eb+aHn_=0oQ$doZqos)ZJSJCXzS? zp@b&tFceYdL|Y;Y83i&5WE41$6wq9ic6|Tl!jJ5SwYM=dTIQ6;b$e<5w!OH2ljkL8 zyrlVcl0nQ;t@vA(t1Y4u|0mXJzX|(upR!aAtPP9vTIp9|&OXbtgkf=BpN0kVJ-)n* zolPxZ!#u3C`1>jT75q)SKpXvHv|ZX+ET_)MlfClyus2cMr4T znhsoj_ZeZ376QIwpF%cmo|lRqxpgKSMo8v`V%W7{=_Pg|S_p_k3Jbr`f1`GeG+bC` zP_Ben^j5Bwiuxzxl2ZniE-X-#Mj9FVv9Ks=fk8PAE{v0)>7#`W4%YWs95wgDA|>hT z8cRwO@@ZEeoRS5r@b~;&2O0|_H4G2JrGTb@(x^UN7DaMgEGNlYBss2Q%y}A&cE3xN zs$IFbU^i9^xSc>(3x~~*(XQHOc)KF8%jwiF3E&$1lL=%Sc5EM~$lB;^vlw*l6<8vp z^k3bfuN*Gdq~%IqXzR2{-mAbvg6J#1I$bo;B!C$2M~7q|c^&)5xK>l>pXyNFsYddg z=EKt}S7h($e5Nqmklbc|1* z6wax7&XN7lB!XCAZmIOph=RTxOpEA?`H^*J5&Cqy`v}4ItfG@K@S#=elrVp+*>aVW zABBtlCw)F$nBvsdGUU$l9q`j1dDa{)eKXB}p??blS&=h&(l0bzayqo5fFcPd<}HL@ zZN38(VrAJ;N{OVx_W-|&I*>yfEYpLK!=$oZvHd+Xd(q~SFIF9@Qe=#kix;?Fq)7RB zrHm9P?}r*c(}6#GZ`Akro+`-nT|t{n?rYPZPUuhmF5hwa@nW2DxIiYQn1ikw2Ji;9 z!Ku(qcIEb9@HGFPRR>gAYmD(PZ4~SRl1iWZX+YS-1K95(ah4Nj4-91CxKxtlI0T0a7HVX+%93TZHNNKmX+F>`@oZZ!5-kOTpd;FWRs zE;GJz5y4QaW4oC9b+q#zGft}@@t(heH`FrbdptLl7iRn*w>1-p)$4UzU(VS=e%mTI zo|o&LJ^Xb^7^U^1Gxxxi-Tn+VahV{EH)lx?#Ns| zIk0BuP@=_(G{e@x2hA>odqqqf*%IMfQzuRz)ReSqGN3GWUJCH-6uU|)b5uWK!KJ3~SJ7Q-QBy9e}qEV&tX=az$n{zR)xQ zTN)p;Yes>L0vQFKdkUmAzjljz_SNNY*}q)+o?XP@{nhqmyWYHHR~wgYsmCrN%!p>v z<7x5Y_=ZGO_4>btWQ@Do1ZKh2a; z@m-ke0?wHiD{Z^B*|9Y=w@Sn{IfYTBVR;zZM&)|peH=7%)4`kvvhLt2LiH>vPLe2Z zQ$0uvY@5Gm7f?=%g;)c8+z6Y+A)+eMkP5!EkMEnd+JQI;9VM_YN=S(SPj!{BO@{%v zA2?_)nVM^+II)A!%Yg}=>QB6cT|wX_jT+K37b-cg=MJHtDETl`(CiBBpf{7!D1P1` ze52M_%oBv9!!lxg`ue2hrePiDUUbwLQy8boao{P>Bw8E=mfCmdM=c1d!?+a`UXBIz z6Pb$56T;Zm_zs$u!gW>%ZOWXJN~SiO6w>t$a|%LWX=1rH78;JIGOBJqa{^CbMT);AcMtV05Z@oJZBqf&+WF&+#Tzf2ZnZetxv;R-#wXK-PRYM86gl++gC6QH5=lwK9 zWDe`^IV?xw`pvPl`?l=^H=CUW#AX*$a)QFE#K zl$`6w@5(rNobts5G{{~CTJTM!dTOTT3`pwAlfFrceBOilUNYass_JCTvF;r1_=1xF z06+jqL_t(v4X+S6YK9FX!(HEboS>;(#LP68!zZS_MVs!>mk=`2{+WbhoN8mw>ZFAd zJ{S;q$*<M0j?sx`@gXB5aNkWt|LQXmSy z#C}AF_g`D$Kr$A*2rrUp-5Z5*MVyN(ruNnhr5eWq3fyi~B14oa&6hrY+*r zY7q%jd8Lk)7Uq5Y+WQUGf}?QKFyp=Bpa&HKl3EbUS-avrE+$N0L4)Ov)mRK|t}fVW zUCX4fu#pCr2Z2lH5eS3v4(&EboZUS46i^>faG_4pH_msZ z)kpQCj}PV>%@bjKges)^JkiOUk!mTOhWyhF0kfmyFrH7(CsCPkxJSn<k(L?Jth-yZL*-b>f%}iTt&<#C6I|fZ6>K7$lTGt0gfYcDw85bAgJP-*OQ6<@O!3v z^})k~1Zn`E5Mo*W_UU)T2^gNIDA8I!nXaOJm%9i3#j^!dRvmzo3A|>smpYkD8Gq?! z-oC(Qdtu{+RGY3AwDGm4q|O1wy&7W(3P!Ir~VnwmCykx?L{Kt_R# z0w+U(=`IHlE~CjKEi7q1g`b335_kk5);JFyner%{ z(gIPM0`egxp_cgUr~#mbuighCQxHr=rd;u(3o76Y1S;dz)n;MT5(!@Qj>b_1 zT(tWR74UzuQmfkK^^3N;h*=`qRwaabSK*-=`#2WHS^E}>4m}n`S}dt@j=vTz<0z({ z{oUAt!!^UeHRcz%GQzjDg)-$YbFYH5gwo6q+Ul^XMOvncPTdB&L^_YA4Q-*sYV*+E zLC7U~>ZkPS-y}MRbiqegY-hE-gqYGy86&{}w^&AuNIyr7hfuasq$wTlqt?shGy$sK zjfc^Ep?(s~rOTLKF29e1?9LuAkzL6T;l0ysV*E zL`68^8iZ2u-6Z&->4PTdr1y#osA(?LyyK#aLDJX<;&t$X8}$JR{zWSZ<k3P9t0U}M zgMY&ao|9`emuVx1-oiVHM7;xJCBU+^J;}tjZD)rQI}>YSYhqhFwr$&**tTukw!WNu z@A=;UP~Ba<)>Erm=PzFmI3|SCdSOm6UwG1LUlnAT?0l{cj|=D}r9z{IG7pSyD|wis zm4XkBGWqK`ho_}VDQlpenE z%A2N_5E*{xQ{gvL2QF0CiDS&u+iJ`G^CL!VFNJ=9IEiIOl;Pu=Dku1j?2y-}uZ%$= zg7Xr#&yj0k=p(bWy4QiwOjXW~)9m$;0I^iu|JJ1(4>;@F?`&NIva5uvm7!Z z^j-*O*wXC5<4YX7VErhWhVAdG28y#qz>i3Bf1~Ay6mc~s&5HUaV#JK{kJM8Y@sBFE zDp>G1F{w4M@Qd5Q@~L>B zVBe9biB;iF5xG+7wlP-u8f#~%y2=Q~+fgQ}$?7&sZNxR@tO(99?V^n$bPZb_f!%V3 zzq@LVrMl#{hU({DMY*hA`7#-aA7f2%?~3*LBA%_a0r zi58DUeE5C})Xyl0bSwYWiTC%=)&xQpOq?FB(ygzxXv=!8Q)u^K06Y?GrQbE+3H8#D z`ZBPhUPF}^o0Nhb#Q&1;$;+NCczE(mr*4R>4?)!OoPCFUCfCq1Hh&)1@@1Do)C2r0($FK7x#wuJ`&1Vr{B2;JuyTKMKqAw)7N_89tkeUeDxCfkXD&c zU1d(lPYZCN(mH|o=x+UeC!b{c^AH&HU2cV@Ut7WW9ww^zH~RrPC}kbvp%Go~wR*G( zmAC)??*|qEDwhok{uGUTjm6dR1-(i}u6aVY_ArRK0fi(J8(kXp|W;&jT2p(xs!A#KLA*!^Ia>$9yJJbpCK zXa6q)4xCEfw1m|!0?-e^g$bcKPZ>~&LW04U-k~+-ehDb`?fVO&h=dp+SD2m{RV!ZF zXVCGt@K?#_(yso2yNdQbvt5UF8k<)yJQd5MWyZOO2s6Xa6!yV0E?Ql#jc3mQP6GZn zb;re&@Vg#0%G6A^%O>O|iHA%`@o~brDE^^mw<_ddN?)eR3@DFhoewssP6Ic{IXK0k zHhql;wui0hr@l9J2W6}&?RQT2SM8KzSA^bhK$L;RNaDW0gSVj#M6_^5(4O-(bBh8_ zCczmwIbdjR+&wFpz3#Sn3;wX)aeukPyy8n(qwox3LX2W!AOC@y74KTX*%Tqoh@-eP z0Og}-T+XZIUFlc_^$=`SuB*88V*e$7rlXwvpoluiPv z%l(W-iLjF&QmZOl6`~i1d=$Ragh`%ezIM7IhU!ZtFJtu3%XG>Fp21K*cM=Y5vKE3q z_d1hJLki!bW(xuh_?}E>_l{~(!pQ*KmK3*7r#9VE6fzsU@x-$xXl^qsrbK7mKVPOT z95@6+Aw>NAC^8TdGFcS-bGe^sdw)oP`#$Mn=B>4+1_QozAM;645kNxTrEU zOl!YfK)3zj4?}wZL*)QdX9^6Dv)&}>{9J30sDK8Mi>rhNI|iVOPHE- zAF~=f6iC{Ku=c0ZoY87{zSaz}QM1uM{)wPiF5W;sWOz|j-&}yN<3?0Hl?^`}2RU9B zBpX2P97CHTp`&)Pb^yLwQ0w2-RAa$72-H#|m%r(k+@OWZAVk>KTVSBpB6@EI6+lY? zzbn@x!C+ILbsK1rgu;5Yo`%7)QH5^KV^_VOL(>O8FZxnezrj=tG~tCmLt;ZTOuJ_@ zCxwYNBR*8^BEa0782tsGWg7t~E=)zM2IgghjWdO)2!j|#c>c5KxBBCGma- zn6Qy9mNB52fUC*!%vKT5ihW%yFep$hT$D{uHC?*1|7}sXg(&7 zda2PbUyOUuQlOEbh3@56MJ4&EU16Z5@26zCC=1gFj$VAcQHXcBVUq^aM(^=K%_g5c zyQ(@zuA<@qBnmq*3vaMP>?K-F{Rd9LL~QB}$7?ZAJwI zkFOwN62$&aM-zRdOZyb8G@L-sC{}y4`382;ivO`}RHz}Gh-dCPZPuP-+nD$HIxX(6-sUrIGV$J!wugsQ zI01ZHKBN`%{BalK9@)!Sx?6x5PPh7h4B7utxl951*iU&eMpeYrdG(_& z;&K(O4&LwzFrC(fCH$1midf5zqtQt^g<}e;d+FmJG+hH%7%y-B z=T*Y$JK48Hcm@$SrbZMtAItAr(m^6c&aYdbSC~F#R}xT=1Q6g`1(T!Do55UfmZ$Fh zEYKCjtVC_diS*;r75AEjj$h!bq14WmbyAjb1r_ZMd!$<%A3Q+zQP1} ztXo#wRlBi|H`x0y<|fN74}@(_U_OzrlLZ=YQYFNm1 zEIpm^*EeBo*1~6O+H?zOZ|@I9PL^q#Y5iN+jAN1fzFFKXEIdQ^uHLsPiud#JAoAt{ z%%C*jSAD%(^{Fg5LxNL2w?({Hxfg6YiQMG!XPkBDp-7Q{#2H8dIr>ZmfdLqtYyKwX z%ZZWXtUX+hrgClf24OFlE)%ivh4ha5;e|e^-l{Y{lgtY0n$~Lrk!f#mv~_hE79fUR z^^l2*ZK4-~^v<)ed5)T=nWva+{Rs@yYF30ex_&8kt>Ke@@xjcfA{mOv;85wvp~LG0 zccFj92iP*HYpo9)#WzFj#MN&rA)3jGJ6JCl1t%h#mMI0>?FS2nU8OE@QjtV8VR_6h z37CAR9rN;edz$RTV$6ecp3tR9qr{bY!yVxyI-y-KK1|s8UU77m+2A4WT?O2+?p(WS z@CG*`?umG^jB}@X{McU=E&HOw!3&^Cf#GX1Z4bzG0qkTj>2E@F^xJXDskq4V8+F>z zv(f|V0P&!&1mXmRb37Y}BsBVOqU`L$3kJg@FCEe(PCiluEJGOx-aFdRTa|@g17)UA zQWny@NATCQHuP9& zKJeLLiO$!$(cAYEIK!vft0>X8z3z4U*)a!#;w*;SQxO9f&R&1^B1cKPCj9YxY-jmy>a=y3uTqy#ydL>yvsxG!7sgWT@GE@+@{ zt?=`x{id}g zohhk>h8>@j9Q zF{{t}iHD!gAxRhBxOQHNf%8qdXxFV8p*@2s{OD0`Q4~&3o&aC2wx={{OR3;5v%2FY z0`*ODZG^cy1-<)}%r&(bA5(q_C6oQybs_ZNG6l0VfuA_Ywb3v430pKWIVNrufJ zdZ5cqm@P8OQ1E$!HqXUYhTq~G(Fl94AHO-9@p{D+rnnAE5UyrH~+jf?%|8fIR`ofqh)*GuCz1KQB?zuiYt5(a~7*{S1;bnca?gJguP*z5w z&CD|%J37DYy*dMPi12JYSIH+~`Jw27BG%hq47ivA=g3&>=hKbL3<HY z(CLIJZdr4J2u0|5F~-jb`Uv50fgu!0(Ei$YF;TxZU|>H>kK}NB1$v8gar-Yfvd!iHU zWoft#EcK7oWBz&!hkJ%%kS+iD@QX4lc81>STq|;+>n*wP`wu8|fv8&~ub)h@A{eAt{5o4Rkzm zUjO)14;x~&O2O2@D->P%+am3Va>)2pF4|!v67hGJF+W+YGioPf_$?hM-34)&?975( zRJM17%)LMvIbx~hc^CV(40*7!qiEEXwC+>f1}jHh23`u&;eioi9z5e7$>Drkk7N4Kl< zhnz|i#=&EK4N)za2AgY>WHxH&a_ZIIm`r#`d&b5d%Xln`mmJb=Z%W%9gNt#DgyN(AcpdmZ(5JJM1a2|eSMDN zh%38xuXEAZkUDp%Bx$SSJ{Lb)i1dN8wRx~d;opSNF4Wa-vv`x|@mHx}On16clVhja zIe?*54S2&^Ei^zG#~E`S>33hIaF=kL+qv!2yx`$=BB9mGfC)FDFjzoV?>(KAI*n{F zpF<$gj~r+oAC}1|iwZC*8jb&|5C{hI>q3Pm`Ilvg#&f@S{yWZe4ty#y#o$CYlqc`{ z8WES-XQolQTpR)D-GG#Ue*LCQb4N^KgDz7mSuKeggS3-c<#uEJoRILSCeaJhb3HXa zJ+ImeXeegaaEvPeQ@8s`MzTgJ3tD;qe$^le{|9<3LDiEaxjSR%oGIz%T>D^D0Cb2U zU0X9_y!py&(aAK@ln9WcsNSrwsq|R^rKfGnUxD3N|8`R?6`=muUn)O<>+O z?ctpKUw@=L$dpby3|-@;|J2ui_=#bvAK-aY)gQn)XIPI%uNTrc_8Ws%vi-81cdxn* z{cky{UhbmH)FMS#(vWWMoh$OvU%cR(;X>Trg~%x6iS&ztQ+zL%-Ifa0YYnm;#*UeB z>2tj1YlqPw%kPa(`Ar!FKCSgqtnE4Q!f%2H0)PjCF+0O+NXDN~`X=iQb+@SDQR~+6 z!Ga=a-2WOq=^CD{BRavH{&&h|x0zLkj#p}4t>u{7-}2I{j)ahtBMJhGj6sEC_{t~^ zoFVbW6r4gSIACH+Qb`+?x75-TOF+ZzyKhzio5#on3!{i1Z9`PCOE+e?jC*G9XGWpJDNf1wD z%JIC%nO=_qebD|@dmM9jcJEimv5fHd;&-p4OTv>UN<02i-Esf5ihqVrM!r#KN}yeNvXuR zk=+#sEQ5Cvs=Aa_HB?`fJNi9PkK~6g$w<#9D&y$Dpe(mc!@TEbGk6m2^6=>dFEjG6 zc;E9CIT5GrlKuEu#oCa#c~*EOJYG<~3ix;*U!ODHRkqgzRucB^t6;KFvG zgar5}{8xsbA_u?c#rfr_BuWky50T=f+Xr_WIOz0te@ScVYt?$F%7^U5n=Hb)CmDKS z(Xc6u>h%;M#n;|^Z22qm-_&Kt`0n3MB<%Kkj}2~AdgA~D_Q*=7-^*;ndc(-q`+7KS zDPPk5oH~OAM>*uIfswL1yJLaYbEgMM^owVJFFczs6aU zWlK_%5$mZ#N@lD2P=G=2Nti(hq5w5V#jC(#9vixaB4UGXQL+N7f ztm19Yq{lzn_uSd~|4^T|Ev%Ra`INBdYWGe#?<1!Pm6cP4CtyZVGdBrkJS;Kh{jW%L zXc)m3o<>$l8Z81+I?FcWSGf*nl=s~Kq_JkqH78S8lE`xW*xZ5-b=>~WL~#l+4znMw ze1TnoszL7d*nsGlHSutN+CK8@M}uInl{;Kex=-COk7)iEDh|$Dc#?lLT5y&9q=I=J z+~S}^hF{Mf{GL6UTMEO`l}ueAjrqbZLAt!$U-7+8YbJKMD(u#xpTF+5F|9!5GalWB}R`bN$RLKRO zeU6X5zribE7Rrj-U~Ll-2vTGRgFBtiTso^ooqPVb_5O?I$_yhyU(@m4>DlyI)X{lO zibaWM`-^b}vq`s3^ZBc6drTmk$;``W=Nawz0jVYgm)ArPFWhUg?3$7;!tW~fxY`h9 zv_nMEVzv!S)&94}s@41&rm6!#gz1i{%8%eXGc81Ul()?>_pcNi(-Vjux?uIYlT$L; zJW*UjpI4S5hWdvxK#xjdnf&)A+bo!E$r1T-)ff0za63&(C|J!WUPwuZn%s@^DwqtT zJ4w5bS=@&>W<#uPL^z^R61yt+jy6(rV4$p40Uoq#9LGg5t+p)AA_wq_JJhyfB2*=> z@4O-EG$trzBbyAIO!{vnL}WuvA*Q6OUs?X@J{2WN7Mbi61szu@#CyW@4fu_1#M^Sh zZ$oC>KubH>6wtp&yd4>Wai|><6u%d23}(VIo_AB!BIfi_&Tm+0%>T6Z$zuhxKo1r) zv#yH_XGaj5iJY$e(nUJ&+m7xX9dl8|gxCAHt9V`1!$~z+zs0x1rvcN8#=x=sO9EB2 zhTd&f*kO&C2Gt$L24>)>x=R*zCl?!btvT4mp4Dtax%Kyc0BDW1i+##1)6M;iL5A8E zU~FGqK=NwYT+jvW-zWbkzJ1C0QB0q2Hp`qQ5;_D*mu?l3

    F1q;+q~4k_K;FmVoJ zW_7k6gS~J+C@ISHO~>IvhX8}?IV41|?GhaWv-!uv#uQez_Wv@s|50eV)fEKBmC?LL zg8MZanr7Z#zZH7_mxGTr=X|FHPoKp4F>*z^FPJQJ=P55vgWJ&q+~;+z0CnS`YdrA3 zLIm|#&Q`c7Gs7dpx?*X*GWn4ircp!~AAbXZ3Tvi+cC_oG?QP#grIBdPL#mg?p67%_ z0_*f%_T)_+=-P!qj_%9M5a27oq0LAXs{(yd1>B%UZf@{1yy@@9o?k}SoT*LL5~C8C z6twrztmwI%?Rv;7hr=?kxmByjVe;lJ&1E#TTpw%sYkBMY7>JV@t9rBn_tx+A1Gx}e zP=83}f5N2_hW$Q85=Kpm4KPIP!LvrYG=1v_?^PRgc*j~5f*Mq~zK`Lh&Q;?0o`)w; zP^5+GiIdP{4SgyF(1FrBUZvq1oeZcwO)<&{F~7q-KwiN$%`tALZ9ek|_AY;4q*JcaiOIWz%6%{LoNi;2pm zKd7FVy;tJSLwgGQa|jMo#b*5#SXtor@zJtC2S zV#>gtJBZ?N>Sc;>)RZZQM;RNtf))pWhASWC-(XC!Zneh51$0?qG*Yh`V}nW0Sc%HF z`yI!N!n4A_OVIEsWmdd$sM=P#-xljjx8xqfWk-99RShwY3&^e`(P7WMQDnM0^F6n+ zVfD=9w23tHh&wvFsI&}#D9xZ-3u2PWs(i2+0E2y09HW#6*Ee|p2a4A7q~OT~ZY0kF z0|cc;0*@%>KMK4GX3z0!7u+hVz2SN8zm94&CC)d+TwVN4H78K>9AlE2t@`pvPwc8` zd9WNoOkWi329{#)%$Tc+DE5NCAq`cPi(Z)9uG*no0j3i_PC)77%AQYB>f!YYhn}p* zXqhko!`lSP_zIq;&DrMXM(0U=%~HE)xWwQ`!}+swwsk+s_raa9%LJ1X|Ll&tdgbBJ zXiLJy8$(=PbZN<%f57fG8rhHSCij*(Jk;pT>m&Hl{Q`0^;llcH2PfbDG3CE4OtFk_ z$pbn4KH<{l&QsOGN^?uEi}cn%%}88l5an0Nb?~Lj{c*;9GeaQXjs<_KT6=`e6z288 z1A}yhvn{FWgQHy%sS1a>E|td`JeJwD}+2~2r8t7wP9er!VW-G?^7KBwI^!#R4?==b)99*;f$G5wd(5k0MldM0ETQ|3w|-R zUFf@f>Y2?M8q?>HMa+6!B#bd8dqF6T?*3ri0QZz??Gn6ZI@4_s?LROR;77#AfArabq*uTzC%#) z!69wnp`hR;(j;MyrKO_ssW?s8#V9f~flTt>U0D$G1!IU zmci5865$t0xG!y0Zs9(?(xDVp_d#j|84_*^8%1RYsDk%VkIhUo5>{muNf(V!-P0)H zYG!W-Ho0;2|9;f|=WRPt{Klx#fKy&dZ=qk3A2v0Dbq3Yv9eRxH)^^|1f4x6};@9xF zz$01029q2x?5UlLI(ytEB%^w>v~9eVj^gl6mf{>FEQy;quP4V?_qbWqdka`tUx@)U z;v*-V!X^x7P7DxO(~XWA7u;MG^674~TyShOp)Y!n6iCklG?N$QV~t5` zqCeqSAlZk2Y})itk!?9z~st2yNT~BiGUeoa1o%5`d{2sBH^A4GIPsfyKT=r0Y4!~#yMd( zou_{rl;#Hr3^@`+nh0VI7M}4TXzoVdOa<>c!dzAW5aCS{xnUpcz8D^>S6F$*+$ZOB zdd0Akk*OBy&3Zb#i%Mh(6h8?xxVV9hkKd&wsDliW!tQp;HhveXZqBeQl#WVdBr8Rb zZQa2_P~e}ZeI^x1Aqc7h-HAp5{=pq6KnoFV5>FAJ(7d^ofPpphJ_D{RnE}=5c-9hU zf6rqR^1-=V1Z1+tfy>MpSIx5dgfY&vSQNtAsH1o(OTn{nfQtMuOU-_GX3jh~q%n?U zHp%#dZDv{NvUt8k@Rhe=szZ*S*&P~`12=oz!fnJHcA`~ua!BmCMsxnRh(yseQ}lny zyYGf*E?)r6>!i)O`o88(c))JEoAIx zV!_=;bAZ*@^Kn_X9Qw>A%400`RlRSD)J6Qx4bODSt-4s!lg`0-!6Y7fr5$o>t%r6M}TqbZ`uOxIuDmue`O~z=#Q3t6`;V(d4*Q3F^J2?J^QQ`%ZFDT;IP(CkboshC>t^G-;`FRa+K_bWLbtXb@tS729&+Z-T`PldH^fX71lpPh415m;4j za_KRtSJ^;QgH#)tM2a85|EuoR3Ze#K_d#$lb@GD_5{{WPL zQWH=-w3uACFZ$tFb&~nhx95eXxE3kScxYivm%D+;uepVZpS~w|%6}-mfqZ|aYsN>IV zHzx26!70DIj2_M&bDmA#ogOp(`&MIvPr`%4OuB~Fo*SlhU2IkU;<&i6KlAhaHfY;A zyx|40xQz2cd~v_U$I^Km!=lkrpe zu3^Z_Zr%3=NMXx19ma@KrKA)zp1qM+!5>9DC!eJOy<%NI7?H)vkPJRSy60LY@L4g! za(q)P+ht=6c3)+MAXRn5Pk#?;AvpXy?&mR%;t0n;!I*~3`s)B?-lE6nC!-r^f+pSX zT3mhDTENYb$(9Y5&OyPOT7K%ShINg~g5p8S%RdN~Pr;37Q;wMa@KD|lp zaI8~p=p>D|#I#av_^Ec}q0#+b+s#wpfI&9+$8GhUhV;7kBAIsozFW!$)Hni~J@~ag z{E+;ikcUv#n-L7^RdZNXT#k$I=l%lLl2oLFcBpEG-l_-`e^p|bMldP|*u$&WTG zm51<}!rk)D*}B&#pS+$Ld09Sp>YwPUN_1K^;h0|*XZ3tFvQM!WzjTJ0mXxQvb3eqo z9FmS*)oBk}S^hPf4fXqOXfw54TQDD;^$2*+uQeH$TNDPhtHG;Zq6(rFjebn>fK0J= z|I8qdp}r}n*HZ}(RYMGF2+t|ID$WP5a?tl)QE|6u)X=csMBCF$S!ph23=nDsS$F+O z$~b|VsYPzBQml&3aihRPc)7VkJmd(*vHF9TP9#X@#F6p0j;fhM;utxmy~L0F*MI^4 zMGy_1K7R@dboV5HB~B}Ha16icj5KOMe|>xvvjr+cPxLG?Y@Ni9!-#O!P*`H4@cL9R zgT*$9lPQagQ{+?GYQM=`UPHo&D>dZ^dfIu0o@C7w8&6^?etC&jR{U>UEJLrXi<*51 zbW)ie;sp(t4bq7}fF;a-aEWY3oy+?k_E$h|6EPTly#?};k$SWaO-mTz$P8sFkJ_N(L>Y2w(g(nYFq zb+WHoImUzTWm)2*V<^kI|ze**8w`uTnV zF^W*S4ApDJQWlTtuubO!%y10V{D)9SUptrCD@{0roOTJHJdm-~(jHnGB6-kzl-XpM zD-AlmmV5cP+FKJ|c27l~c5e=sLj|6oow#JOn&ZLp|4yx`d|Kpl6&ax6`^O=_wCuym-NkK@rMY*lTXsn!MSfkhMMKUg9ZQ`BD}(U&KshR` z<}-iv@hD)CKA%ES;tJ^^X~CJOk(u$7MA4L052Roil>*MRX;xV5a8DU>4N9s_b6)4x zH=XV5sJ5(5x7>@R5D$DEp+1D~(|Dsr=qu!Pju~?q$vxM`pkherw1X)J16}la0{F<) z;)MVMnfE~`Bk_}%*NDw$I!^F*@n>k@-FeBT4m!dBVr4OvHW+_W=ks8fp;Sj}<>B-NDE5hm#GDlV4Y+2*R%Mr|O#|UFX+~pE*#zBVX{S=1)re zn}Q5^0|d7dK>-$G-pUn6b$Q+>mv57ng*D2z{6x@@W_@jhBV4DcSQWEu7 z{`M;=$UIIKy?mS4Ml6pYeIprfNJac!kjYy}Qw{t*TB3ueVEH zAD2T%Pm~f>ih)$Gy=&?1-g-Xamg55^pOFR;5Od~JJJn-n97iUm3r?|!T zp|y+-uFZ3{caYArf2qHRFK33ugS#L%H5s~NfE+gk5czyZss!K%7{`Y6VHuB{J%czbpn;VKpjQU$9w06 zit~j@B4P!jjQQq*2P{+v2S_x0w&ui{bD(n$Hg>!)(OqshF&hb2ATuEstw@Rdf{pHyH;#B- zG>25+g}nKvs^_Tj);6Og3~>GQUt{-UfG$^gVz|-r(OvsDw7{Mg1}`m`gXyUS%%2Re zCK|r*wZx><)7GeSBHrO3;o?aKyX+l9Fuc9lgb0H|_v2back<|qqRn4s=Q zwhKLw_HbxT1B-+SzO+Kt6cuo4I?0SLcNW{6sn+cb7Am~=1pWMPT8_K%O8;qF9O``% z^HdTkf7xRyC+83tiUzX(@3QMm>Wdt|k#uT|PV z;%gH3=Yi|eyv7DLJH`D?@671CQiaFF{09|dO9O#lkylGHVRDrXRX0pfUwOYVFBS_@ z$pKv-aMZk#4|I{dk<5*}w$!>t&)oL_{VcHwd@bt&;TNSMY~s5Ft4^K{UAy<$wL`it z(&Ja{)a&ZHP5bDB+^TQe(2jv0hC|7W$9>uPYI#WepiIGiZ@9=lsxQ^Bo&DYJmvT9W zVE_E~XRq<&avE)wjpY zfhGN*vc?&Wrt-@06hWQ3^5>uN7%k1Ub|LFnXEy#+{Orp|?(ri@iuwUNLL3|x0z zd%-v`x15SEz({O(B!S%Fo?t$ax!P2WoOFB29?!327`(~u-lny!#-W5DWh!Z}$SRn~ za*k8_njd;Q+JE(F34beapzkZnv#tM-d+LirXp652H71J=`1^YDS);Y`b|>bW-|)uq z+4srvX}oDNI1B9a{d97LgcZ^%OovcUH_d2CHc5)IRg_CVYJ#@zR$%7bSR&x|d z$jFjuv)HE31q}spCh>24uBdN)YE0Xdws;x%*CU9AZxc~j(6eO@eZeKhG2$r2{W=ME z;AN^oUil%W?2NKSTtI~6vuP&Di~ozykCEM(O864Atk8&Vd$DpfxMt8n)F2*VOSm(3 z_1EqX)owFINFV@-FLzuc0XtD!vt}8`OCe!uRPz}zqm7=n*$D)l;6%Li!Xkkg()wq&$G|ZJ^vQN z%YG<#9`bZigv(EnY~~yZq(Pyn{rT2%Ft%uEx%T9IwTjR#-Z>!Kc9%LC3Kk#qw-66| z+>F)EdFBGNpOPDwlm2Ah^`H#;SIn_NP^^Ef_E_@8+=GR4WtTZyfc7fkc@MDd2UU+1 zP4~X&JS#H1R?d)egv;MflcQj0E^U&R%g0Z_{luz1X!y>o&C;HI?_ub;0nr%>?$Gf>t{eR)2EnG5D$$ z?7MV6IlP2hHknL}c{{A{sw<)q#YYl!&$Z_v6@*i*8YjtW@El^RdKGo_Y(E6?P*M)Nazix8SYa$bBt zz;3A7W(ygULYhJZr+EO=aHY3wj6a)GR4N}9^48yQh4)++_Ra?q%Yo|8V{WCKYUi1N z_0rS6@JG}`&s(e1J}2eewSS^uTDTlty89klR9d;&cA!kfLXtlyhEy|2R{wH=GDG{< z77C^I-Fvz~K1lFC{57>afl~vA?wZ&B5cz#8{wDRLqbt|LEwS*YN9t!|)yd1`)kyjv zF)vzzj;;3ggSXD}(BoRP*G5&&Dtjpl49}>=V-u!st#{`a%bWh0AWUGfH|_iV6`~5C z&6n|oB1dH~vNY-ooB|rb*)!|Q!cbR0bUZxX9^)DTMU=<6Pgm!&@SDVg_R{O0qly2Y zI{#)92&iK)e5^2C&bbGEEknH`uj(Xoe%!F9>6$(`E+Uf+W1^(7ndJC>v`@FROi>s` z=J)FF3Qgav84xI>^^WIspG3f33pUYOJnn5aZkPl>TPFV^Ju3q{l*H_om3=o788= z6r&U|haMXYriis!peq(ksWKRhhSXSy9R>We6(%a88KHz;KXVM2wMmGQ_G6@Q@4hdZ z8*knUmYo1DBy9&V>a({XG_8#?E?ehXJiQUF?J*eVB2_V(bS1DAs3JL+ULovJV3K9z zy=Yv|v7woR0hP6T>=uE{F{!oruz`+$dSrrcj_%v;fQ&f7msLq#e7*axC0_Yf?B$KZ zSYl(p;1BmdnL-c8N$>JGbDLvy0ZPI0m+`$315mO)=*OsgPVhJSbcnP$*djvkT{Ktv zdpH?)){U5YR2^eeA%xk8cr?|)g@rc~8?-WMB{aeYz!@K~I|AWp%C!`{sdAgFgNEft`<7hJ%F?RsgW1by*?V)DbUrHIq04#|#> z-oB&d>LDSmyBQ@=cciE=Mt!;X4v~vsRowAT2wWMiSEX3ZG|;)_BLm)f(1hH3NgDIyDA1k6kzcFt4h({gz~ z%GRzqSUiBG$~Uph{SiYQewzI*I3b}|L${)@(Y-{mO?F%uTm-%ZCmCX$#Vcfv5xw#U z(VqGkbeJE_k_|3DC2k>WMAXA2*+7DAwbaaV_j}7WaaW{ zRo&TkuhR!S1px|mm--Ks+1cFgqOHz143s<=mdUKqZaRPcO=}&HiPf+pt+2V{RGkuk z(3R$O?%lFcU_)8=$EC5T@Cc?Mee4+<3_=*3T3-*nY~_ZN&s@dqm$piZx}U6n zx%S%YINIi*dxa-AwdKK3%M{6BuT3Ak7QC!qT01n>9QldyQ=WgGrIO~b0@8DItVZny zmocLteVa!_^1vxj!%!xj#*DCBpNB~(!b3JK)89PVs|iW*)_0r4!28GcCSZ9Qgb^YK zlqdy_LwfdU2ajp=F`tjSe@x{AXHj5Z0`e~(_L2x4VSwH^!{Z(YbG_tRCKSAN$z{pc z(7k-GY8bMKptXpZ=ETKtJXVt)ws;c?&nd<=L4i{s5j{6ZuO=wjIt8)QLlk|ATZKGr zkX7o5KWtt9D$1IvN*tp%h}S3jf4d}ow_GWK5@|UPwMPqok0G1k=E!oy7_5;cP!eMe zXOU(1R<%g6i+DI>w2xdk&OsX|;1|&ikI!Uc~2AHg{I{CPPb}>bzawiv@vwhlDah;OGZAfSkOabkurrqE{NZP z-jFVvUr2v*!6_>D{zx5RrDq$j?_VcDHk&c;^E+vQRM^#wp+q;#;#kvj41{bOzom$= z??{kgEJDYs3A_qIcuJFkoSxhVYur$pI=`@NEm3UQb9JS2&{VPq4^H4_R*lMWS&>%o!+jNyjQ%cKVc|8z?|+B86JnX7xcaUyp?)dy{t7VFMGu?c<=wK zgZuAS`#V^*{d+Rx{nPrycY}JvAJgqW^7#c!)TQonm-Q0BQ|pqR#L{QvZla(zan!AH zoqG4-F)&!gjOU+JB>h+u?mI^1+CFO!8foY|#P2E_<>hwFUkqy-kTMpI&YVUkCFeIc zG3|>204_!Bz_D$AL>Vi zGXQDTN(&50cPP|jBuf^k^0c9B+?t+hc}@B2d8O^RWdc-WSupT5nkSK4H=-;YowzP) zVzNl(lF0sZ3QdARZ^&9Z>orokXY0(7*3FQ!qdc8Yll0RR^b$aK+KRhS)+IjwrCY4> z{eM(_WkVcHvUNhx5FiA1cXx;24#C~sb#Ms@8a%iM5AF_w>)`J0gTo+$y*zvO?%nqX z^p~#c?o+3#&q}j2%|C>{H9>APLUvNRt{ZSPw=XtUMIdAEG%;0Tb zu!-eq9$~R8p_vbng#UqfyF7tAQ*5LkY1R{I{i$BS*&I6Vrhv#W>mwDsu0VTF*{A$+JNNXmfQ5XLhN+un>CJ{Ur^U4wB0sOw zAH?G?qT>xB1FRr7*Zg}2tI3uTo|I)V8#A8{v?2u?>_E)V!?GO9VH7B4Bj4ax^WO=- zspp?{z&E(_ad4Lo!(RncTZ2Ud|LDN6Sm_oQ;w#yWmW3M zMRuqP@QhPc>2*z&qEV=4^xeH}%#=TQe?!{eqO2d67laKxN zyXr#LNwJ#M-45o3ur$H=$2_pUt{Kv2$N`MM(u}jaIZK5EL{f-En!%Y+1d?@muBt$x z=!&X+>*SZwE3p%~<#`%fd&b@5MngpUQQr(a09$=^D^K$4h ze(`xD9R@);O(K5xSdYzhv9-IO54#CzFepm$5@iG1JZ4@7$BgPGQdoF<%R>2_M0+Z^ zZNrV;tZ?(Mh|a^0qyF!M?>|Xbq(&P1nLIu51k>}>2y0{aW{0ODrZWO?pY$dkb0TKf zJI_mMiSk|>&ghGy7!A@04Qac3VDCs4djO|qd2**Y5#i9Sxh1P9v>OI>JzN~VA{ebI z<4rQ7?0)D>NFuIxTmL4SaF`RD9*84bTo`eAR4JR5CPo_{;OoqwUVH-{iBk1VOR4qq_%Ialz!TXSx;ikkZJ1VuotxrM$rMREGRhIx zh(EJ7ImMRgS!Z#TfB(7O9@7ciEBqpZx(`n=0(NhGVt1&NnK?=S-?qvG4V;|c^)VA? z_Ct{zXJM~(J%l#*@9KNbdszyR4F}_q)FyhER-YPsf@dy08^&IO-KBEm3~~0yDA6Wp z^5D65RE|5uesfEht(&6a2D5=>@Vcc7d>(M`t2w;&gh{y%Ng>N!=iZNTT_w?UP+2v1RNrCw1bK{}{vdcmy zC!_SzYhw6{`Tic7F})iR*m)Vo(8W0m#$eBc85s;I9dQG(PND&_J|$0!vqGj!&HIa1 z>(zeOv`>2?bXn0z0|!~?_20byNMHQ)U~^Ip`y0PAxUG_H{E+Gj1HT3RjP2yFm`?BV z?oFF|JsR2vgMFP(7!lU>JQNy^NFhk^1AXLBW7*PiS|~fpD(3ORGk-5;KXAJk58`jS zSd8=z+FV4e=$Sf@)YO@7TW>|R_}+LjR2I{p&Rz@UvA^1gYHwVvNw(QrXD3?dqr!&7 z!r9La=LMuMlaqm5FOQr1E;pxsY0;$9+z(QXb{a~)j4q^~j*K<%$3iCn1OoDnlM={L zu6roT^~=dsDlabI*q@MgzN8?YwgaDgznMB-pi8wlsEe-|yeH7_X|L&2J~O`6o@~CZ z!Qn-q%e>?}$blcs6dj^{@|DkNAn3Pv>_srDsp1r__P71f!`n;BIheUgO`Y&3Qi!8X zUAuZidJ^);kW-ftV&Q>c@>j~TG-B!C*R1+n7dFIkFT1#Na(#CSnf z=i4Q$r=y<7+liFdH`xRd=x!QP8?_2+B^ir`#vE6+U`c?PdFWUZS+E zx1RJ=>7|NP<~e1#>{FzXhGgRWoc2ZgPk9VRC>HQYV5p>Od8>6{@}ewr5+BB_A|b63Yf@Gq5v31`=a~rS<6)1rli9I%XCO(A z8JbCXjrMnhQON|FB!Mof6I!qe>QNyb4$kT?487w>93euAdl&(3?1x?l?Rt_As}Yvo zzkRY`$%D9mBp&b3yuS}N4sJ}3UJ>6(`FI zA>}w);#HvM7=UYswL`Qb58(<>%QrA^@Oz$L=u7=L^izft|G+^;=jUR#B0g}RAM3A< zuG^;wF^jNCsz0VLDwU$n?Qz$4hy0yvEX&u|qL^b#D!t%gNC6r$8GL z#wu~6fmTucnBbi}(blsw5=P_0Kq@78UR8ILMhbYW#to;Hs_w7P`Jhg_4SPTH!1~9^ zXPq6PM-_1VDcMRwyP`zWec7RDbuaQ1YVMUj^X#dJ#1N#(MedcZzKyelQ+d)-JJ`Vl z{(=Vl+5^<`MINZIFk1B<3%H)DS`>5YJ|egGr@@jdLyhl>_WKYtPmy}+y9?%>d2X(@ zK$DMkR_c^z1{BcXK#7zp7$yJjp1(JL(-Q2$&*Xmoxg2{sw!G5VhZOsdM$FqEk7nDr z3%j%|F)Po{yVeX$0mC_p>9V6#G3K~M*G`wwm0Xz)jMcr+7J6MRFyOKIa#7fNwNBvF zpl!vbby8lKch@)ngE&~^Y|puov{LOWY1XxvV58T+Y=t2$O5}*mV zKV752kiwrC%IaeN=Y_I}&{9KJn_c$Ce06X#X*T96eGavl_H3zlox)eY#i`$8xN?`x ziAlc(4pp7rjX#0YVHW6J;38DtsgJBM%KD=j{%QTg3t&@k<#f|bT9?SdtCRX!dKo{2 zdEEH}JSGYDkPBPx$e`aB54Zu};CQ7-h#WMlQk*TQ;t&XlNhmMAxIEQ}NDKCi_5Jp)3n(kDh+cr0iiJ8{6uaxmw z0(=k7eb_6cNs!)rH&`RMT{lX=bH(+z293ORnl-9@lYbWb;UC-V z$?EGj`LhH@kxk)(Md|yW&-8bH(zT$sOIPqkkyU){de*TvI8{t^UYVCM;TvAANm^fj z-~X+*2yBNQ+z#}_F2bF5QL-#m3~4Xs?FRX$6OKp4tXEYOq@*YrtXuJKYPBS zTq@EKcHe8=L)(u^)65>N9*!bl=<(lkh3p?Q8R7b1C3EM6dm>4gpHeqg8pAK_KBx^M zX93#Qo41QaDz7S7KEWF6R(2T)9*9QejKURUX$;gm*L|yj^he(IKwE`ES=!o~EIbK# z8^S!IZ6|I&AD$R5OH&6+O@~j%FlD0jWHI-!*8G-K(kbU!ZqzKH*lo36up?-MS%rgD zFVu9}gysL;h)j4o{Sjy3R!&w7=EFEae@SNQaFt}alQV6OuQ>_!*|Nt2+XNGXmh&na z(6{ey@e|X&%UHL_rTJsD|F&m$Lv$>MOV(kJ$#Sz@s_R>C)SV7%`oJIUWkMddB@b0X zroJ^48SD6*k@g3NH7>5*y-ZafVqTaqYyv$}dzVa%U#=`F#{?$bZ{Ox!6>`Flk580w z)S0?`A3x+Q{mR6ZZSaeTIaU62NCO`?kONIn_+@ohSbZ^vD=C%D+d=Z1u7_-3A)(I1 zCePK4^LLB*BiWyBgVd(knUD<{+HY&4HvZO5=Wb9XrNa-*ep|jJ=hI%ijvIWAKGR~c17FqOr6 zq5R2NbEZYS)Rf#AIcbZ$K~lB20~2LVoFujezbR{qlYN%Q~ixglC-!H?k`vdEq56+o-=<%yFF$a&tDy4(9ju2GVze7MFp zDa)j@k5O1+SXV~^ukE4K?M0~M$U)27-+-gchgiYtJPav}TCLwjK5>(v-`B@Qoi?o8 z#)(G+sIPmPXeU^)rJ`INQr07fly>MEHEE@=EV&YJ$io`@2e1PY_hJQtm1~?{-{i&T zspMS^j&w%(R%_rf!+XAdrhFfv*!UN_#L)IAcVSvw9{Ry&-e@L&etJ5?Py!HXj-eR7 zN%{kp?Q;me%3nRtQ8vrd38hSDH?+H<@JvTb!@AP;GsoT-jQqalK|_``hweK>an|MZZwt&;?RG2(%FORf?1%4t)sG9M+K;M!~1`vLf#& zT;o<+Am{iz!8(3zM)dF;bpy4N>nU^}wi9oCPK)#rdQ9Pjyd${3kL^G)c7np;maaiV zKs6=eXQXfC#b#vI__t{)({yA*==r)bCgI+JD%*~G zgK6_n>}hQK8qt-9mevZYjnmtY(p!L&*_3l>JtLE^ALLuX9EXGF{hOol?{7{KlhEF! zyPJ@|Sr?bj#ar?nUxl)xEo?;M05~j$Xr=+}GJA>sF|(7N_FeNbRl?K~KC8Hq!x@`d5ZYoPARt&Audr;iOZ z5n`be$0c`Z*`$FXZ=cFQJZ%ctC%38bzm0+MVJONr&9C={Pr5)~oj91IT?&Q*OBr>$! z**Kt@Gs`wc0}x{LcyTI^>+rc zb_LK?=l21dXYP{ivJh2jY?-i(TjGfLgx{)(r|~K?u2=GJu7BeXq~}$K&d{pp0E(y5 zJ4wOo9W%x6Mn_;lC69O$epw(UTy5Nc<)$G6%#o8oLa(yN&7O3}CiLZJCsM2h-;J;I z#+zQyL(v%tC~+MzUa`~oSr^$OXmkf^!8WWg&z>H8B>TFOsYn@(8nWSAZ+Fe6O(-%=${kB4HG2}hu7$0)wvWhvTOmn zjtF}n4G{*NH@^JTS2DO73JcFZdD88K5_M$GhPmDdtVJShhQFL6Mt0pbK)-(+lcT&_UL|HPMPz4jbIT@K30g z3D`nAN`kUX*BS)1Y{4?1qAMC?bP5O zRwc6}#40AO$b81&o}-)1x(PSz<>5`a$=bH{pprU*r|F}~qJVi052bkB+>xH-5pB#u z86-)ZxP7Xjy?H%LuU1f79?kS$HaCEePpVOw*VehVwl}hOa9R?PXIaec??$0W6UdS| zilpB}6@C2Enro@tZjnnxD<3PKfuhj@1(Ok%BEf#+JeH;paptl00TAo7YrG~;|aQ-!HZDwi_era&X zHJ}`AE~-JVnur+GdIh{V$ebT<$AS9DHd27cil!C-EgV{?QEDAhgEYktKdFMBcRJtH z&i<1@UUldfRl-uBsvyQr_?(c=)`=w}0h7R15}@x>)&+v}tBx0M3(FrrO_%4?hrSzr z*T#sNB4E0Nm@Avvwk|kayvwx5C>jIZ?&HiZR&7+Lf4>cY#zeYqmV1S+;n|Y0dytU3 z%B>MfIgbv&SI zxY1d(Beum<+rSPUYZvDKypo7br#~hqUDbsVuZ9}TP*I8;br?t&5N~e~XAB%0xu9cM z2xfYDIoP%0D-_~$*+uv(UMXssD=&nkMN@cDCy4xGMl33of~651qga>v39(AqXo%&^ z+8(J{^6M#)D_EbX2yj_BjYc%x%5vo@mLQx6EsY%jCVTl*x0e>EeNuf&s;%9unrIH! zO9-*T8Z1|@vG}@IBwH-UNo*!m0ji@O3u*5vv<-423-YA7l=D@YGJRdm+c2QPuOZUPbx> zpS=Eh_f62EL-_-?Ix#s1OT*^ZKqY-r{-&CRqXrU4F%G5a$!)ng%uMKVQ} zTt_$qYSRZw+71{(rYOa(EQN-#O{S(6(n7Obyx0QKvyaWhici%PEvLB85GGv&$))T4 z`~YH?gc z3?f%{)vP#H_RZ}2`cqOuY$Vxi3&fn0`qkuP$SS?$uiQTEV0rRBWeY|@p83e@9#eQW zj4OuNI#Bg4{D}*gHNthAyoB}~TdBcbvf-pj#``Y62(SfF)3V*33N5XuVLsEiFZ9W# z3N+xGu2;Nl9A-$8CczBBDn{C`a+BKeHmIduhzsXalP767I)qD$dHslyCB~+evsq|i z<&ifr9|dSVm|APPFztbQzk)7kVqX}E)#dZ^5(Z*)#W+R&LAC!m+kd6IDmsC)je{cI zPY3D2HjNN8h>KI66D2k$dKAXQ$d7edrUikldd=Q&wXF65<)0uC#e>>Xt$ArjviYe- ziBo~emNBTInsPzPyng-?tst^dXfc2Bj`cwjb;a^LZJA%A7%HcK$e(lu6lD81?Sqen zAtig(J~%CpKQeCOkSy2w^9(q8?zQ?V+Ujc2J-OTs7Hv;vixCGH~xBu>fFCCIPv_*Vd@F;~@^=84PwE(qJTr$%;S0Go{Q@z5FN{{M1Y|% z#r2YBIf_U-PWqTr)s4O}6C=&I?(eds1o~+dCB1Zc zFRhq=`1=y6HEr&Qf{ccSu@2^Ov5Qa~3*5J!f@ee1qM;q8woG($Ld#1|Wg^|MuHBYA zV5Dlznf9@8-%T&11mDK3CIENFHXO7VJFB@PxXqQw(P(m1vfiq47Vzh}b;!^#8lGWl z5qS9QOywLsm?=z~V!JdSan9@0;N$uB1F3E5Yl6He=Lzo{%ZO) zwfQXASKzX_prUb3J$uf=K04BeiVAX;T6xaG3@RV|)qJSjw)HDOKP%B~ZMrPp^ZqOS zvX=@~VG%Sh;{ofT`4i1CZ7g`#BtVf(qg>LP2aLwNlc&{9ia@uJ=~OCm84(FwIG=9< zBo983ykd_qJ!4O}ke!#R@yPNCI?^dM#3gYiQ>A5ZcndgZK0iEtaT~cNk25`oxESj2 zuo>tAuMZKZagt0Pd}tV@eyck+NV920vJ8yp-#EyKzZW6)!_Rg%vR0UiRYU3ZAsC4V zjQ)32-oacNulDz$=gDx$yhNk2+tplgq4k3oOTE#5-b5~aeufA?^gMOxsXn;PuVdY9QoMlp87ZLOpv+J$wcldhLKWdUlrAT;tYy@ z!Ce=@H4CxxtRd{MKS=s=z~mC8W~cQi=#)_f5{W6Zy#lbm={P{MI_rY9R!4a6$)8N` zC{B+Ec~FG`T^61Hf#m-jC?34`;bp>QmM_@jI7=_1wN~Q`2x~iBhW_2eI2SG|LyziTl(7W=bXZXtRP5|4|5oNVa^Z1s2kCeB7kRWYpavgX_cqD+q$_!3Gc!;0 z&fRBzM>?5+x#pSC$m1?lgFd!W02 zy&#)KZyyqk7<2diU>=IB0q=#3=g8W!!GC66FAg8KQUplK^wW1|pJWV4Gcq~dUM^*VTQnom@lX!OeveU z^lR0&T?5DWj=_JRqPUH*J#}XZ7m^3p7pkv-hOz1jZ+%L9-oM#N{=J`)D$JVMy}zT` zRKLMk7@hlm#JIc856ErW?ox+GB*RNkPqfZrCzLy+rFs&+@ z?88B@i{Ia;9sXr*1DFcNUFr42iY_$OvExGW{ngk+oCjm4<{$)%1VDct%ICr5*Zot- z>c_04IJc&00}=bWUBPbm%qiT0UKsh`_L;Knyq*k>Q|7GPuW183c0Xm?Ih;ad6||z8 zi5V_V8JEL~qcqJe8WwWS_mIqVtZ~-4g-AO}<7!(g7)Y=M-`pBoUG-7Azq9*s<{!(94dTP$!g5bdRE+ z{6tP6Bn3n$CZ(v4caI{dB3>7sTaoK2HCvlG2LIud{t5m{Oqa7Ts=tFc!H{V|8AMYc zUF4|0P*Y!lmw*U)a-d5d^_!sqI)lP7UmbEWm1q4vK0tzc$Zj@0SdHjgAgcf(OA!>` z>{UJ4;8zt~>q|7*Xpju-gdshk;c-B{=!T_5rg+Ia7(#`S9wBS+@JZR zF3KCnOMn9w$j&nO$|^we*y|yM(6EYuSmaT;b>cRkQx7txM7L4QT4o{Abs0|N*w-uN z*{YDs@9sf2LE*vL>3y+G+fI}-rNov1>=4Emm!7inGuyOjPM#e@GX=`;j-lLRx|8$M zw^BP-o%`w#vbdxYaV2^I6g|(tY)q73XO%&u_4G`XJ&lSV<1Ja{J)nt4!xNQv0B5a zX(9GtsJ`LvnXQIkq|`BnrpOvdJ>$49hV zn@w?jn$Vk&@IvXm^utv;fI55m7I+g_MMBuo^g%HtrM(+TU=@!yfSXHZ5dl3hpF*?wHIrfhhpR>$+A3L^?#2 zE4pn=i`u8MiF^Jn$yB{mZ^1ff5bKXgvQBmGmsz?+^s@s?yoaC6)PtjT;nF^y*qTF9 z!ZvZTZQVag$8!_F(rdbc@gs8DhvCe0qTl9y1sgwrKp~(Ch!gE&!7VA8UjaeG?UJrk4RU&OPRUFuK&FKa#a_kZ%@9Nk%T9E0i`PmoPqDu|ibB8c9v&}J+U(ZzgFS;N2h?bE?1)ZxE)@1~<# zr81s;y3NiNS%3o*c>Y!}P3`7}x+^No14cok&tJ@QS)5=3onM`L?nqu(2|Z%+)XO_k zxh+!u#i#xoLTxgWIS0NP;tusNSc4K3f9U9|h-G-yc>xu0_Oi$@RHHch@4?oWFtUI4 zgcz<0=_@pqV-uWNMNo`JbAo%#mSrr5sh-c zU~gRAVpQ}k2fR|=%q?(!?lJa!#HEczCm!0I8v3|}&15*iH93o%2Qjhzc&9k^DG#yotR~SzG*#zF>?7&;JoWH1m?Vy#+ zi9IN;!69jgT(}mz+Isr4s7_N}B@8n`^AOC)f5oW^f;2$FO@0rs9N2qd2h1Ii7c{R> zDxp6fpQ#G{HEWFH&Jywf=&pVmE?2bqknx+R@8Feh=c~V4yK{Ft$?V70AA&qg0#J!H z^?-o)y!ub`yj%Nyh8hPAURo4R_nEgfC<&{7t*hh%CwO$do=-bEkulOzU$qbdKi?L$ z@qGNlZ$a9^*%7{f5%|1$PVwfcBpjWC82drmAn)Y=4@NS@h541Bp#6CWf~Y+@f|{LF zpBx=?s=db8F&%UuX;uP_3-!a`7aFOokedCZ(fQP>hI+1OrYM9{ ztt}0(2JYRPIN_ug%nI~x+)!lO~r4DWJk-|8?>40g~zLreM2&0(h`Seq1?w;TXzqfXLN=O;XAwyif=rWQ7 zA_RNIwr_CT*Povb{=IdmnmsZnOa$*!P`+OHCEJhs${F8<^*4V}d-*MMF&~OQf-y!Y zk9EWHv9VZcbH9g2jLq@qEkE01N8y3RDplbA;g21l*hGEzw!-9 z`kP7k(K{mqOZPpoCaku9tj}9Vo@jguSfd@3^9ds37fGI*spyk6_gQRhmK<2{J%H>& zG*j9J$?Of4_YOAz>;^YPhRK6Tb@clA`eL~Zvx39+{@C2B{5<11<+a9g@P0O z>+O1MkI181$TwTb|Rj~c=U|luT>Kehee=%&+!DjV9lm5 zGNh)66T4gp^eyxgBGaRiT~(-2Gwla)r8KIu%jR4a{~a+1n_`H`9&|we;_vV6GTMDj z=C}R|xHmmWqZCyi!nZ8w1xtF=e3g>YyIg2-S1}HoOSn^*=WGN_4Pzq0>jhLKc+{}o zK>B4hz7+ff%cTx-f$CiCw^agexULHRvOHBN>~ZeVh*%4Kq=VA|dSS84qtl~-uiVHB ze^Uzk;F_Z$6sRTFKP6lGuYDOrjjC$oScXIBBB+r~%T&MmxvUuQ+!*!;=^?l_`gpciXQ#z=T*B8pZ zerBhIR_D>8lffw8+((0Iy}(;PU(uIqqK;Y+3#+2@@j1N4>xi^%8~6A$?lV*8&VBi9 z^J;RdLQTJ7mi@qUbGuP|Z#r?=rmmJ+^0F7!#LJ(2j^FOo`O94MjZyF0d%mN(N@sIK z&wbWo3gEaQabP>HQ=-9f4p~@a#;b!EB}6`}&&95vt+G(7O>+vd=Zj3?9Qnzc8u*~? z7(-?9f)+xSLk?Uo?PeACBErTwfHfxn#bXqk`D3Wb70Bq)*`hR|T?tR4to`^&`AYI=ncukqsh234i5x#AHmM;HMv@5xScgtvZZC+C{&7~jBi^#45VxfiUG8$Tz z8zxE@+m+!q{(ivbNC5|{;gm6r(H8UfTK{c&-dWO?j#BK6T&lasdnG44>x}yw1B9ai^l+x$}scRVu^PQNG`M~(=*BcMtNndiZ;~K?Cd$GL)LYxw7@6j!;F6s$X zvqJ6`o8TmRcpzctd2mJ6b#DpnRdxf*Pzt89jxog`GvBW*ouC;T2Xi$ z!fIt_dLs0R2}b*Be&|7$8rwzM2!>zdt!?O#xywBmoT*wCnKZw-DFiviMebCUkB@`r z3yV=y3>$e`A@g#=oUu$-u?|XlefcLGj5C^7wik=LJf!0S*7egox-LhpvaO+vRby=w zWuD`qk-=K!h>BlT$W~jI+^-4$AEb!>YRqU?>fyw5(~wldB+oL1we0vuCj-b^EMVe%2?rN8 zQRhul|B$!J$!tz4a38Bb6qHFC=Jl|cG2$bio;f(h7LV)Byiq#|@Sm|lr>c*+2@Xn291rSPOMh;9U{l-qBcO)|FBr2U zZV&{}_(@xN0l3Iau5Ml~%3S1nvsb{beg-bH_+b+s!YCB}c~G1-k`U#`&X39#Q(Va+ zTb~4$EdCn|dtsp8R}`OzKCZ1*FLaQIkucorG%u`8af>yYy9jF*q?#i+X$>Nl!LEO- z;@9D_X<$#D!!5vL5!5r#iHSJ*MIt!cz2f$KWywO>1Z9Q^De5(Us+h)2zNQ^4u% ze&eYevVwE1-!%RS=x()*%z}B34?}EyOWg(%@21&JYxrGFks>Q_fz{luVEU0C{gPVH z`@HF2-?U=peisVwS}Y}3G#9DuCs9whzZ0($X3*n|NB!{$#7>z4a9&axDD>u8^YwRz zJgQHmM%I@6c2&Gef2tr%oMHQB(0Ak?@1(XoyD_3DK<`J_l%>318(Hm_g8rA>*#FZ; z9=gVgAJ>?MkgrG)N)z7sAvyA(Zt4rndq=Zk&TOI~RV$Gj&w9W4)Iw2i-RuDuo`Kuh z8zkt5tPh=e<&sgL=7fu5Zv*_CMwh8$2H7u0)Rc}ZwpH1-W)6FJkoDTNxNh$mhr(4V$(Z1U*Q@?l!ZYPNGc0cm ztH>PyA5E<=o*y>wbu(=1XI5-)0n-fNdC)@&AML>N-{ANR28I!px%GPsGVdQZx_gm(cFROxXItdZm+fg`PWCf01sda17x^mciE39jf_e zCr`R?=H4|fRbfV{7qH#dQ`Sw{Zc7qkRnhCbh!;Au9xq0dUXd1!XVvLSY;WsgnczjC zYq=bNXTdTKVBH>!=Ws$7f>&mNUT7*#cAF`LzY(4Wa9W)Ne55k;9Ps8d{xW@UGBmI8FSPl`(27F!`NdT%E!tth~(_-0UG-B|?2Z1Rd0$^Y%!zD`9 z7a6OF;Vma#m^ly3h}?zmNM=OltjVPvhFUuloSB(~gO@Wy$YXP}^@9}t4(bbL&u-55 zs?Qq(M*Kd~D};JJDqW;HXLmO(!HBKDuVhQyK4X>8Gc2lMM)OJO7qStYnY}nEy%wKx zqbrtP!pCu68`7*$sXxX%8tvjkgu{*x6g|-mCNj8dWzeYaOm{S|4Z*80cFbi}_wW}m zS)bzWV|fBej@zp{+wNkwQrbxDU;wydJEyCaWyG}e*gXHPJQ+ldZ;s`ev>}qK-AaTA zRg?*g{}4OP35ct{U{OMz?Op8zr}2^W>=+5Z2-jc1@g3Ffsx$_Z1$T73j88s74yLnd z#<+rPxhYu*z}q1$oCbiZTSH|yjXCgyIE=;9DEQ|EeUwk_?s1g3UT}uLc}*(UN@i`U z&Tk%Mv2e*wtjhCiW7L^C#Z__b!mz>))*c=Bl%1!ga%6>4Bj%c#eAB_A z?bh301}_bD%fjs43g(hnbfjLxT*Xqg(84{xAYKSi7<+nyh;dwR85Ea($48Qc@!81*6vFt?0(X*HJQ|rA=DhaEVKan}26eZ;hVadfbC(MuBcT=g1*ni>$RG3B5l z0#18-D^@?lZtKfo!{x`1x8S4n`qAdem}5$O8b`z%YwSzr_3k6D9Iv6hs$!0TSO$`d zeFQN+=@AKmeC+6ZZ1Q$#YDaze14}qs4Tq-fXc<_UZtMNo`Y<}TON;QbyRgXeq9SoHUp(r`{uf1~Tfz?ASz@`Q!$c znq}Fu%-A}HD3@Ao=>Zwc&7LI}>A7Qgv^)>PQ4au;z=Nc?nGPhx$$dY9iUbf~@0J%} zqIHTe1*g)FJCXu#fI~vlw&Q&$AMx{Ng7f{2jn~t>9@)u>L<4RWEk&~7s$S=O7{5E? zXMflhkBNH?wJ*^$T6@}NbzIE1f%7v^5yzmi8F4x1zdSC2YatL-VZtwl9FzvyHyR(q zch2C@g6^0i06vw-Wsy10%n0v~lN{Rm8R|K9rzrK|1(EP;=`}qeAm+{Aq4l@<1NXmJ&{aBETrZk1mY&@k+9+qXLs1dvLY3bihM3VjV5(zg zht5doV-TJ(hb54i4Yt$N7U>G z=R22jXl@xCbN*Ga?FV6}X{g#z>E8Y`?8Od~68cW>FAcspS%n@GMcLA`9$Rcs zRe}jFF>%QHPhY65Lydc*3uwMiY1=L2U9a)HY`wf1FY?IS`tgvjQ+7NuKmiWR6{f~i z+^oaXb%g|ybsUuaj=eO!@SmT|2zF;)uK(#q3JRVBweP=B|FSd5&i-v#0Al{1O_7D= zEJ=@M3tFH&K1@1;u*Hn~l!XgKh%>AIvnh_S)Y3HkG4d;~_1YYEe>BpOKEUA7hNqgU z#f+y;MUK?$+jBHC>2TLSu1(_9%q0P|<#QBsn#@70ry(!vqP&Ms{C>i|#D=Fip*t^} zCrUTzYDmK&buHT&wgxm%x!pHDvA4RodV9~R!Px$p4Me_|UlY5x%NdXkoLng{80D`> zf1%IzC&0MRHL4Gdvw_$0T&?>9;XUg-fs$*8vHx#$$l7?$mDg9&ITwp&rLC7JZ z%V&!2H+bM3a03UJC7g_=E%K#Fc&}fK&oiy`0R;a)*C&~9s)zsYX_HWDoKv^#$bjXH9PgUH~t z5AcUWD{(Nd4M;2;4yiTE?BMjpNww9P%6aoqKsl||+?oT!PP+Jmy>SO&gGS=$XXe=J z9;OD?MZdoiKYngz$%4vA7r5hF;Wc)TX$GgH%Rl|vu0l}miCizcC|k$b63=Smnom4< zp7P!5^nEZ2YZ{0(ZRErRHs8;Vf7tq8Er7f`i+dR>_Zo-aKM!W$>w^OLmeIZyuPMtck3_pIkRmD7pZFxC=Rzi?A z;oU%9lVv%H(c22TrSX6Xo7zBL4#_`_z|V01{A_YSOK5$@J$Lf$oV8)nY<}RXD&V#; z=CJfQc$|@4XRdG_bI$95?_U|FPX+VYK-*&}_bSS%tP**NN!5&S0xL5_O~?E6q9*#m zn9Fs$SAKLjku5#e>0)ot=Rx+jU&h<*sm}&O@YWp#xbsWgE2F*gKUtsWY~$%0kpwV3 zWuycWuHy-z#?r(}SRX0oEuP`5X>CmqzZPGlA+k{`_cme!QfCHbXRuOeBPafK)Oaw1 zcw1WcdD3_=f0b+gliW(MyiUk@kxltZelF2Yxe%$u>PkiwsoEm`EaI?~EpJ|1qTotk z&mzf9PquJfb09yOz@^{5BX4?)JM%)M^XiNaS1U>6#WpE)NroNLZ_itbm;4D?%Lm+g z(JKaHWhXG+u9_YKi8uUyS2Att!Bhi;V}Bk$aEGtI0Cr6 zxU}`eOmJQvYm$cL&|q!aI=30BCAbpFfmyMZ1@~cIDuX$J?Q2jJ$WaKU<`jwgBF~Xm z*yd<=4_lYb5AWM*!I4mm-Y78Db{-38SoIcWkmE8B+Rph_BgY)=vBRrZ>QRaRT%Nuo=>NFUWl21 zq)Xj)e9ZUwPm$sD1!mO4z01b4y7*Gn=zeNNa(vB%l|`{Rl`)Hp!zaNRWHI&@SvkaZ zuS+>S=TYQobSBVpAYSb@Ka1l|koos0Hn9P;3XiKlnq3Xv zwYXKI5%FvYFV1D< z3#wrBjX;jlyya9vh4Qf=PgljZ|A$05b=FZ`{@#(}9F{65Z;}MVS|!7b1wbA#_@1v&c@=L{8E_*PgX)623qKX&a2$@&#KgQE|A#4UD985GE~}q#Pxpq-vq^?) zWiP|rd32{1K-AVdW0tl$xbL`9KL_-+Q>Wlgx)SdZGdgl@5K3EGKsnlLnsaSCopjIw zwbxByGofOfpUHb6<>#Sp*N+TR;CoD;d~!ek>Ge)X0tef0gAhnUtVAL~b!*wSwITG_ z1pT-pdw-1(h!rsRDj2Y3CCP`AUiCkXNky@*Kb`d~&zaDTkF+QBdXEkz!sP9~V0UIF zgaT#X?bZI%NKr)m>hZ9R_yZZ4{&mE(b7Mg)rX#$K->89PdAk={yJ#9YR+imJe;oG! z97yxW_PZQAX1x)pyJ&7A;s4N4o*CPNq~~%&VpVOLLDEw5MYfN;s@$Lk(OAVs6O*{N zv-NO7-@itsKwg~XV4aO#o&jvg6xP2+sibqxR`7vm$S(J~OTJs;Voq-eH|) z!&d7su4eO%SI2SxF0rT3H%(a`uIk%VSus!TtQ?JC6$2;FV91hBV;H#Cl&1?1+2yFy zV}bk@E5tWPfX1+x0MT zn<70996fIBy_8*~+y&yIE)Tx-Oib>ZSY82(S!W@ycikJI;t!&J9h0JvI)3WPQtN>6 zf8CKz?_etWN3HnzqYu0RcO=Uf<)Z%lf*->=pJZ{{uMKepeI~3Vo!-&6guey6s|e9u zLX)k;Y&&WXwg886;q1)u`-Q2Cu-*BgK64gVJ_Xf&g_#b3C|KD8AiQwTd7`MhRiVSP z3UDf|2$zgj`KrP6F7*&;`xvYMbb`iXLVh zXOh@?Cj-jYhiclLm!!hTnUftMwe5Sdr(i1P7kuf412Z8#JcH`GUOQUkuLV# zz`PZ%2gs{5Oq{NVI2sF;RDhM-V#)l;ln;VLgs(b}mbMp^z+Mu`nWNc?^UDTdZ!p=zQKi>k zag-eeg6g0@Jv5{&2!}erFjfjueZ!VsFFFdm?Or^xyZ{g=9)=_#$7WuG7CrVISd)Xa zY6^|=I(I`zVm~u3m}OIR-fgcMi+x#NxqKcIvekL=HHe%#2@EoheezT*0-TFwP|&p+ zFdSxZ6d7E(_u%O%AoqlpU37F* zhyGS(s}@M;cjoV_foF>e02{nateGc%Wv^Kj7gXj7R{~n1SN+b08(G$-r1;NNE_|@` zxA)2&p)Ao>E;Gi4v!c_)AO8lt*3UORJQsbBl(&~Wzb6Ij;5eOYW_1rF!uSq+_UE%7 zw)M<4;fWsB4>qEC|F#Z+Z`vtoDn3`j zV}Ctw2pz;bN(oXjcCV6}Ya*INM3+n_CDk$SK}WzAR_wV-%zS19e_$(PepUp#g@~$T}4^Me99H z#A0QCl zz|f1?AMxOGOV&9oX}3d?#mSP&ePg=jv;OUHGiZQ~<1hVV`=Yey;d3)(5zL**Sx<6o zx5UKF&Faz__sct8ER!7d)Ti!w|GAAzZqH{Zw*#ew2#{)Pk```OptXS|v8x}Kh)4NJ zLu;N;)Rt>iOu!#&E6?R2S?T~hE%iIcyg^;iho01=o;XX%c*i7uJUd2Wibk(=V>TPK z9@EK0n*RSPXz(al3Ep-L>5IITva7qe=)5w;sYPRddqxpc#Hiy4PVqWFUmyt;9p^q> zNddjI$9keY8PQH6_I*%>inC!h{K$XqF9Kf~RJd}#-pbe#wPtko<$0WL#NV{Q2I_Av zzg=sPiG>9VvyKAc35`$Ask58=cb>5D&u@k>{*x@*OMj#~{u$)%8T?|ZG&-Kxt%n7? zMDnXhqj3(gMH>wjNjUB3lR-4RXiH7?zLyC?7i0TnYee#)c&|pqJ6FXT8>H|%ajCs; zjkSqkl~WW-Y{7(|x{Y=-Rh$XA(H#}grrDH2TBZ9_>^-u&n@TSAm+0Tc!rq^C?ja;se(k{8?xI0BJ=%urF8Kj&<$5B5lDzbW4 zg7-`K!*2SGOiRLW#4RFs>f`d5ivPLhO08{rg|uF`Nfi$9d3Je`fsjlK9&TVhnE@!j ze2c&5+p@%$IyZH^6VMj#CF5=kDhOXB1RN+Tc*^Jll&RN$qQi)&3||gE%E9tIJ+m<~ z2pN4a-pW1Vxek2lIv4MVO=Tc@d|g*AESk4Gfm>jxZs4NOq#jf@ujt+7%2&p}-<8bKhcWRh{* zQ;zlx(S#+cYNYL!ch_&`eX?m2&F@UMEiNT1_WmmSly<5|jg|Z&IUv2Z*P)9W?0_2+ zb#0!=>G;-N-`SJm6S!x&{hF7q#IK<4QrU3F!sC-ONej}XO^SI< zS_rpyBE_^`P4@ihrU>!-COG8E{IHh{Vf)!JO z!gpPwfm1fmj9pz`-m~5Abdm*j9;hy~65u}m3hlrBY;Mk&Kd74Mgbc!W_x9TOMAyIl zh5xkt+jRj%*d)pi@&*CkY(j5ny_YmNJQ-;Yq>X!1Yt7XN@!v6$a?%`slpY!ng`#GR z8=K-=$5qm>iS^6KttR+7?yPot4PCN$qzy4z+Ah>u^sB9VYF}rl3^XP5&r9ePl?tcM ziRp#!T|SbFc|}=$8^c=BFHADAw2?Y7-JvcEl~vEbp{YUHqV^*^Jv|-w1O&&RC?CnT z86rkKAYuiERpu6N!#GEUO!78? z3FCdXik6IS&{zA@Fxa?gCO!5?d`|CiC_v^f1JzbDLd_+&M0G|UNZ!%ajo&Mp-bQ-Y zTsP_OAP9WYu9|n-{-VTmasEU2@K= zYfx3xsAGZwW`%a~5sXaboQjsZ+Jv{$1v50CIY#OoHkgF$51=o)CO4ZRqrT9V!vh-M z=g&+BS+qqwK?V8-NwQo{BSb@1vZ)5#OKS<%DgjKT8g+RI%3khMvUdQ%JRf#K8A=6%2P7(s_fA+mGv73c_Hkw5OkD8k zFk*}{H&qxcG08eybC6n4g4R~z6B8CQ48qd?8cV9^tM#*Q<1c>fe;cchOzjV4^Atmq z*mlre!_t+?oH_|G8uAm_Cjb?Dm$y~otgHP40*OM03Jge$JM{s}36l#9a0~13yaq1M z*INKp3a_6djH>{lXOFz(DUfv2aj|V@_gKhl+=aBws|*(Hw7uwk;uCWbX>G+5rmmze zsD%iNIi1j?xA69uo`wLmxTDp-aPhgiW?|qld#b1Wf|pIK$LxYlz7CWPv>gT1l0}lk zW=_EKBeEiCI~=zBX#+jj58+gw2yfn4ATK;%UeXvCWRbzKvS|neTzJAs117yJ>!!h_ zm@j<|6@% z@;_cI9G!|>mgpn;53B+ca#$Q$K9(tHNq)aCa}AZ5Kf03T2(tox+}ON}p5Yyj^Bs&0 zv0=#32T{@4YEd)bh^<&oWrYUOo&*HQB07YHf_5d06!`tP6LP9plX zulPaR(Z%vk5#(I^9r2QBZxe|reIp5_B}v!iv-w5a2otg(T@Gwaz@K`@9Q~Th@pVmK zHPbD5x9v$ws0TVe0|Q~~<)X7K74Gkj*`XXch3#`i5}b1rwgr@71dbPX5t(T|8hOPQ z=d#j)klv^;x4~C1{>EJeEA zPR5UO-1oEh<=ELx0KqE-wsXNQ-0XrR^qTA4?5*C|T4191@C(PP8E(>JbI+V~5^fB( zp7yVbtPW0H)W0sVT|uWTWD@)#xzg| zoM1VtN!wMjt~7~?A5of8F^NQ@$B|K;Gi!M2bMYNl6|<_#w^-A2MvR5iXnAD^TkI>r zm`lG#eb)SrUK2Wf{_{hAA!M?xsUPsxvJZoJH@FRNpboUNN0HR5IMM-Ot>iR03I;!R!}mHwAdH!;=N27@&_+(dLGrtSXosA2tNL!LV-swo8|r3^tAxPozyu*NH0n|n0*sdExvOt15)Q))6GWo zd>po+yfH2!o!cNwU#c}wO>80i3HDLkoYzq|Qel=@0md$VaQ@i0a-1+fIe)8B9Ca`& zJ_NKqU%dvB5Q@p9`2r5Bkdl5wEeqoN?KEr}gWi!u|Fi$0HIPE6AB_%>cbkKm7n zEamx5MYsI+jlUZ<|KrD+{`v&1`W)H!ZZcMj+H(CvcM_mC0hP{^%`Vb2#q;5&6ts!t{18Vg$|0j6?Rc5MNJ&evtJO%7=y6kwCJ{2f+K$e+K5O2Uqc)hc zn9PSr%oG4MHf+DO<*!7P+3kS@wN5Mg%Y?Po#@i~_b6BcBs^xqfM`Bz9-3gq*>3UtW z&h4qcTl2Pst?uf{0#kYS_w=by5Z%FMz|w>FUDXo=CCn|~Dq-2+AIG=l984{51|(>O z5I*ol58IR!yMw%QfKEHdf%|L8HqcZsxk2iJWAs`xbL;QkWs&^{!P}b)IrDdP$LUnkN^O9)uZT-w25ZU=ra8Fc zSn5EN-WpB7X=Fhy?z*w>T65Dk&8+6=g-E5zPhH-c-O3mCv-Cl{#MNM8bf|+v$hO}E z9X0FqC6oC96jx!6hEZXis)h(K*_XeLSd{u>H`x$pZI#w$xyXklmF4U-fMmYo+4+k9 zsFwMQ5`OHSP&cQ~Afb<3mhi$I=PeJ*gXY!L5n`p%8Aq7d>wW74sKa;RMvixzv`f)T z4N^|*ob#`wOZaFhWKF7TFS(IPO>03V@%BHy8x=76jvwDS#M}HRB+fnHY~E{q8v=By zT1pK^jm?;r1lhU?=wcE9aV`01PWGzhKHd=9IVM#(KwNrWDusVm8ii@ z^BS**+9P>Db^zh-L3YWC%wGGmBXl5c-E8}Q^Ez?dU{PJOQ0e5-$@9Z^j0R#V|00Ic zst7t53!mD{Lzy!kXXS0@O>6^jEt>|c_5<*D>T@K^d4yt#0^+n}-#ON-RTm4T7R_0= zDk@*URJl}dR5{#nY7Wb>>|FyuFaA?QbMh*KAB4GI|M{m7g8a$RL+54%(_O(KjSgLD zS>8GNfG7B83VQKnPafy2U|f3r=P52`l-vQBnVNlCZ^W)b?y~y*5$;zR_wY``B{W~; z_N%`%4>>z}GOsF&kZZnQUIprY``6jQU-|VN&KXqU2whwat>w!sLkg4eHAlEBMPFZ4``)MI9)eVvf@ zk5t}92Gh#{>}#_>Ol?;5HTWgW=~Bs)MqDfamX%g*h|J;98gAFEL!j#oBsPHVdo zPVnw}=Hzv$7~bSrXM?yN#HF1dsD~O|iXsMDMSNR>!(tT*a0yZb(bgE5o&SKr-M8Ue zIfAV%v_jE)%00(MB>jSRuEULJ%So@L zU}^Mr5TFeEdeX!p+^ggOk9Zq*IY~I(;Sax%m?E^p&e)Yq=h4e3EjfEO998=5 zI!2PZ`BAk~j-iP_A4OSlRKWh!Lh`F&S(85A%;Nvz*}oE5dgK#l*vK6Ec%OdqHu%Wl zGQq8kq^vW!DkJ#k6{pM0O9}<=B_=KSEbEH-`@!|*wTRM%(5EBhFwwuJ>PoZ5!&*yS_j{7^Iyc@2mJxUS(T{9%kPHoSXo8>LnqRskFR|L_2SF+484Cj+DSUj~6UKHMa(zi^V>3zQhW!ootz)~k**^qkySS0Sd} zL6B-_={gscsDPZWgHe-P@Pho^dA-YPD%jat_u5SzPBIxxyj`E0arkJC9vqeacyj+N zO!iUtGMdfMSiHkjv-I9NYkLe9@W<cH!)lsRaY|c@?7}Y#kf*Q=CPkp9 zM%nWv4MjVJhBQP;r5p9T=P&P`Kr<#%@`JVS;+g8gU;HQpO-Ju`^jA+`j%pN6d)rTa zhRZC|>nj5sA!udQT)68J)+rW=ToV-K9 zwU(Hq7G}1Wrt3Y(O4MOG(N_G?higyIJ7HST&O4sL7wRu?SXgGi*7^b> zNj?o;3TvL%iE7H`nc|D5u1fy#DLG>$Xtrw>JsPGB3Z{zv}lI`_e2y zk3#qf!|RS_?=q^^k(S$h;c3Hpn`|`Qg#z{x5BoF(j}Uu>h#p zCAXc*IZa|}iXS34oD0E!CE`p*n>99#7qYXpTmkAF=pUBFyV=bbzMp6wYAk(Qlg@<- z6aiM_GO&SF6|-h*bJEG_Bx6H1)UUwKUt(+L*PVqN;XhBjh}aT=5U`HL;PqFbse%Xz zZoCxRq1|2Aoi^oes?H7qo1V2;MvX$QyLlMNx1rDpaT&9G*Y|`oW}Y>TEN~U7RoNZ` zzPC)j`Ci_85l#=9!E|b`2YOGf&1}ai%JjoKMqtHxyNkQIX(p{!4Lp%>_aUthajvni zD@wX}75;Aj{}+-vSfT!|ds5TYvx%0dFyJL227Inq*jX>TzO+1nWJLS_&4g z5F&yCsc_S3E_m%o#)vsWbeT~$y7tYd^a{k>k29Iq(NZ>e6Aw1ed8ViFzu$W`$QrcN zBE~)pFyB~z2pN@d&=%qlj_iq1*CF^ah{S-FXJq))5(x^wTUlS{eNUBWH%iGFlijpq zV8?N#%5ssQOGfr4r%Sc&BILn3xo7sEk(ZO+XT?5NEhk@(Qrb*&mIq=*rY(nks=Y2} z7!TbW+(=WrBbC&?WoAsz3fLI#gXpDQR#GFS;h9m(v~&LYgWDzH+g*J6()!H9n>z9u ze7Ttl7>kUn1^Nt*ICKqWN%2B2$(#Jma1X1$-cEcKrD-Pm+PCyC;BH5RQZq}<+-xiE zGhWN#4dktq?Fjep^DP`VV0@Ebw|NHCn8{f+-;D;qLiJRfY~OEfCLX!kyu47RFP(pv zm^e0XwrISt;V8A_F+(RXKanFfge^kPqW4l`XNzPWPf7E$il#BfP}s_||JRreW6%2DIX z7-zhH9mf*Rb07f#KO{Y_`lrl5`3p+8n`lolq`i(VhNFQMMVT&L3-et0PQu#F(?P9> z9{cekWwf{?awXVX+0eMRRA}6w@dIxH^Vl++?g7rT3mwr+5iR;G#`AfGdJSdX^^ni% z{ZQm|EsOn!j|xT5P3P0GiVyJVAa}W`map0u*_I72ze}anxhj%1r{nWhJvb|8+@~2~ zB^#i+@X~}lZ^7-iyS^>lj;3YzquKIbQ65GS^SWXlV50rkr~g}pej$Fk^gZn( z8UXmh$S{%Hd_(Q75%5M%-DC9^d{^g*bBi)uxezhZS@xdpf(C#&ojtC(X|!KZ6g8N| z{ba`oTvm#{k4E_EIG$8TKdJ?3T0H;|HW&J)V|BdIjy#QLG*}(V&-IoXNoC}(+87!N z)fsGfT<}+_n63%1k4z}o+$#q6))u&KcobH%Abb{i|G4LQFP6f7Jk*j3ZthMZ{4j5H zsOpWXcJX|az_yxzxach--UeOhzR_guRYkoUZ{tN&R*{<09Inv^H;3+tem>CrKw?II zu?fLEB(`MTmZey>b#v(ZDKd-Rzy>3qy2^IvT3hW^0-kSVzY!tc@a&2FXMb)mO{|pA zj$kEFaAQ!jMfZVb%el+BRkvH5W2_DgExV@kdYPdf1`oaO)?X!raNKnxp5Y=frD&R$ zW)fr2GXHs2f6OTjp>G6^8u(LKI)F42DrZpM_zGfLy<2^Yo#`DH5|7G}=w+nzNsxc~L$sE&FXx_oiXyUoY@+d;pH0i(gnc?8scTx8)NUk9o(x z9Y5D9doO}UmW2v^ta9*h{B|)X@5m-m?-vCNR15xNQ8y0S6&NF1Mwl+krkfU)1l_PM zDpPLgX$+NLhgwb;=!66sBs!y*Yi450S@oXmk>V{uU8O%LWFRt-*DXn1bA?=E^iMVH z1n3)Df4$6piKn^c{ls^8dJbDSuZH1aif>>(EQRtVGWJl)v7)oG*DOr{fxpqJrnJ1! zvx`($P15AsshM=%RsRz0CEV(F+FU=zU7+ruTh>I-aDuVHuvi3u*-Y}m0+fyFVjxO4cFB(K&0?mvEl4ok$RGu@) z;ZElOl`Cf$LvX9On`(jz`q-=6U95MQPWWdtqk<6D4cr4a?8dYHALMpW79n%C$G(n2 zj3Ol%dG7c4NzIXgG^|>=-l`r&fYA zv-v84a@xlX2#rW@P*&RRG+SHQM!!d!&vW*bb0mV?=6@K%|3M2cQ4n|$lX>&M$_l_9 z%%Xw4` z8~+a0U(pncIJ@Ql`X5{*i#0N$_)+RpjVE=~6m;SKh(V1vl$wK`=FF6B9y zVk5+QI?rzeT=p=dn~mnzoJ4V25Bzj_tcps>qN-WXEwzB*1ks=buYY>TdcOmje*Y8v4VTZ&iK(l<7Jss z(ntFYVg?bmVrUhlsGR1l7g|Z`{^@r{OmnFw!!+|nN}$KY+r7dMYv;TXEob-SJVUvPUh);y(@`kPgroO(23Yka8SKLc zn&2dDqvN{4y#wyQQuzFNn!>-G&kLwCAND<<`Oau9qT-%$!yxx}f<29^=VjZKtSj3j z<6%rr=4N8YM<%KeeYhO5sqzO`Ny=%?8FE~E5&yikNIC}F9CIq2J3$F#?=}JbpoBzFv#>50@mG(;WZy)lx^i^w1Dc-4Bq`A$S-<<&6v}1y>#U z{K2cEi6*{6w>p?FmuStzczVc?a>P&O!>9R=#QeW8wqE>sgs5?gPl9Q8!Hch6ewgi5 zmMVH;g1FRi;13Y?{{C+TR*gJ9DBn7Uc%7Y6wkF&KX*tJO57Nfmfk=w56^CR@D%b9` zf&3qfb}D_Fl*u#9=6QJ2^zQ^c`8x)$6Zw6IrkkRkfD0D8i-*geP|z;EKP;9vI3UwX z7y!qm{r^A%i1H8K6G$Hrhy4>=iU_JEBJ0WJidBeudUg=kJxlX@yrK!Y`mJQS0_sa* zR32h5;Eo43A8e%F!zKJ`6^eer@>xyRMJ@& z+uZJt*u||8sF4AmRrDJn}}Cj{|rnTYWZN?o|D8OUQcM0I7~2-`Vyt%eF) z@6RI{kY&uhLhrJLm8;Of{3HzCjp&shNyz6FY1Y&7*%(Md={kGQr12Ud{4L2tW!3d3 z(>veEPB#6@LU&vxS<>{XX!39O>3YscnEKu>E2Codl6hc+01Whb42k`*f>d`lL;%yh zUs)x|9=64FgwKvd&gw1#FpoG9A2UNGGEy{?v4XGGni-{ZPM6ON*8O)1hR!dqg6=WJ z%t~k;JB7vi}V$1TR9LyyQv=Cgm$jyp7?(}PWQ|@qCJrjmprYj z(D~VHaB4o`KKcO5Tw@)mE2XghYu!C>oXv{$ao83_th#vkQ>HKxu}*Bf^1L+tbpB#2 z!;7eoEDrKnJBY`JMg_@$#{LtTe1H8Ek8iTCvB#__h9x~Y0?z4Y>Teh45Ur_OQXPI) z;s`@$pyphj#8wzQuAs`3qpd`FRt_1$V}(H0*2VOS_r3_Ba>gZSrL>)oTc;HX;}Ze1 z>Rw9g;2{`mDO8H*^KDh565fLld)AWD4SqVFT>b$;o^8u=8c?WR~=p|-+85fXij1^?`6dilNO^>MKWlqaLYvek_Lnj026a-#Tu$ zgC)Z>sp^O~$R6^tyutO1-t;Cgi<8uKl+b;Vl6tUk&K~PK~2*#RbS72R(901H8q8lCm+0q&GfbzrlQoW$}Jiya;Vm$ zvE}n|cK5blQ=nw8161?mdi*$*DyGo_eny?J#IZ#M#fnNl7h}&Z1i_MVq1qbPeA1cG z*k*!M1E@rdMiIK~{-{a>ij@=Mm)Zo8(r)3s{=BSX@6<|`4`oV-I!1oFRGs#|S;eqn zF14l;cSq_6|n&fIi`%GY+M z2j?^!ya`}5i{n(wvays}&}rREDaCOz!r-zxAtBAoah$=+^0*$^th=RwL&thR=WR-* zwHHQQzkIKVl!s>r=odLa6L^C~`m9tj4_@uDH&{LuTx^@$=i&~5RE4>?Q0{9-IWfI) z226lHklHX_oK8}@EqbQ)22RsU&(i*@JD(x_H4ILs2!=MS#+apP%;(g<@mC@TpXj5X zZgDDWx)N<34|_s{gj$$rQkljDmmvBqI*$Tnl|RF|P1;19rEiC*j)Dsvf2(nofpO40 zX$``8G3kN2h9v4h!%8=N%U>g zd5Z|TIT#QS7zItfMSF-Mpf?SmaF3GiJldV~D=+NFxKp$^@k5QLcLE$P#10Ku1Om<` zMa=8EaK{FUJA3(AXm>OO`A+C|>tZZ63YYLOrB|b>yENqYX0k1slrlH?DM|SYhE9JC zkApHl8HuMA3QbRY(?@%<#0_)~&*vv_Bq&+doWg2|a<3MPF1QBZ3mC`Gd^=l_O|?hS z^RL4~x+r!Uc<>J=btf7-iAQjxQyAYsw7MThaWv)dc)zf4`t5{q8$q7Y?b_SI2W@iW zT~qrrPN2GPipSIgi=C(2?|xYu=t#|A6Rus0hH!oiidB=zxPJ{q{$CZ$->8P!U$k+g zf;oqU^0J^HNYL+(pQO{)hI!M8+uIecaHt~{{hN-SHD|Ls`}yZvvBdrkRW&~t0o0-{ z0zpMTjTN;{mKGNyv1ZCQ06^)B_`YXjYxRa=7e_014o>V5fjH0Sj~z$u==#YI4wm%Y zZua3FYcd!TGG-qzWLEl;41z6&4J2D$(x0~6c?Ut_?T9yHK74DdslkT$Z}ao<>BU$n zOG@DqJ%}CZV+ieI=>7a18aHXGOfL5JeGh83hnEo7x@=n;ww!o>Lk&kLuW0CzEwy=- z5Kco>$$;B<1q_0B@}Bw!1JxS6py63txo>kuGV_$rNSyF`;3j0}q+m-uBG8`yeZ%el zf{wrV_=6`X=0##+;XJd9%~vJPn!b42w`I=FV&`p;W5I}e#S<(HdVrgTo@ z)d!R#(pem)yNVbFti5WKXY!Z<-DRe>IHs`^T- zEf*a33``XdwoLwA7dnk6(o567-WifrV&*X&{V7HsQ`^4n&h=`qW|(pXa=6q2w1m?k zTyKmR7cmT4P!IRMbask~G=H7o&j|Y2T*#^oS28Pd@xU?X@7!DeBdSx^aUT$(N_av` zwu`Zfw)*o1uihkfb9iZ5$s!b6mKFwa3Qp19sbu=bscEtQOo6c`y*T(Z;w@Y}*QXuI zFHRdVUWvCx3_G4Vg57J#QVKyQ!Fl?fD)u5kkQ%g$)sP1-OLXgMKytRbLkJ&uet>ux$)9*||bgIoH zorF*9=*d%0tYmh#+?mJyHR-MH6VPJr09rgHSMThGBe>;_0Ju`fpBJTMidX}=+ zRp1=e?CNYM?x@fgX@O?!=aI7nxS@00R{_v{n^b#wqJ5RprAGVSYB3q#%o6TBPoAh5 zq?i=cib=HXD$vWIrZ zs$q3x=9Kc}H{{LAyfUR-*Cwz*Y+tG>0+R^x^~#$?z0w)pw+}$%cX+}el51Uy2N^(X zRI7AWT{CD4rK(S>i`KR_%6OJY8tt;KwCLbUVBsj{CEyMv<&7KEBJ)PG8ZCUy6EQMe z8opqkcAUV;%R=kHo&O4hameAgnZr(_HW5}o_NLUlOk^TbPO`^u!|PpCwc1>7*@tI5 z?cuv3+pz+u-?*YxZWSk<)(Q=ys=9QdYyK9rzM2}4SH=KEyPn+WTMMya< za3z(vM&$r*qnb$hGm7n|_#!869Ow^xr|n+^pCN3%ckd>jYvytbQwe-BHfiQG)N4&S zl8^l#js8oj$^)6pmn9jRe{t+ zau3R7sf(5HxJ4EO2XXIb>aS^M%lCCF^ zGnp@maN&aHLy&PScmSbT8G)v1$uZ|F7IyhkmU3+kq!2jXBS3STxwQ3<&`t;#{FExa zzD66w?@Vx~Tw8wfNT4r?9)F(g%_ISj+d@a?9@809EAoxkk7o;@7FS+D{unbY2EiHM z9}jG-EzSV5=|gEbi4%-xoj*_ zqA<)2vH=-Vwg~x;3u0j^J0cU$l69In*BcL#JeZ$^Z^)4B*h;EphiOab{3578EnXlL zuKhH32^%u&S$_U8sa6wfblCwwlkYHR8fQ})UhsVWif23get+f?js*yV?lC18B-zt6 z(_d}&{6*;xN`389+{YM1|N49}IL_!6Yv)&=@%5|2X8IEes3463w-LRIYZTt6BlZAcQi^Ird<#{t%>T=ye8m~3Sc$Kb_mgB~V)atF|z$4Fnawk66;B2Yt!lsTHv9A6FQ@4f?bT{MCJ0&e(6MlhG)nW9wwi54j$A9Ux zs6TRF{;rHxJX9o=7aYMSCu52&iAg;vxgPZc@Pginx`>zK^V_eR7|+0-lEM~Vb&|q^ z0^H;NBj0emDKMaPby|JPvu5*#JnsR42i6QmSrsh89tt6=DxJc4-^QBVKA}}k(FuAu zuGg1@0IlOirN1QKhyyW08Z-JE z!=;>5wk+!)BfcPy{lmi|!BdB`6U}5BGz$=(9`Ovu?D_qT0d}kBipD(K<>f_ZkBTrx zo6m(=a`A$F+hxL0%krK6ulppjdv>q;lHr8t>O(_SJdN(VP^Ni)sj}B4VWE-p71x~M zFEpezw1n5slv7z0TBr;Wn(3Fj$;^k&bziZehhgJ8e*Qp9vIY8N+&uk@FRX4BtVo1V zv@HG#9G5%gz@7)97XUElp56^2&xlH;uDS)|ht%-2M=Ofd#TA)P!jI+zUOT3*5gKb| z*M+?wMjuGa12#k=2zL5rzTwe7wh2F*{naM?Gx%tU<+I=6j3pWn)R$i8{;dBI^@2fd z=%=qNkq`Nm*i8+_y<%L;qrKnl&>(p6fLWx%jIa+|g#T8yt3>+F(xY^XcPIYE6C+GL z+O@c&l@g53c>#L_`OKHqXiz$*9LB2G)$LmWrf*N4kOs&}ifMF?c-)YC)m;vbkSsyt z=Bd;EzxoQ{qs42n17D)vb`%Wtbm2QS)o zf6yD9$GfzUE5V3v%XZw*AoI~|H9P>V{fHaPxa%(IUY(L2YVAOF&M^RVVJ|8wDd`!} z5hveov7YG(qz0UEN*FJ(VKvOFHKq0U_pUjZ4d6~m*NP2wg@e^B)c0$vywr3_kV}al z$E^_5?*+DdI18d*B#(VA#2^)pflOGrU7xtm#A!~04x7u|BTlr^6W3LgA+YH_I%3kd zQqsgwMQlQv0cO~O(j?hC8QUNY?epsL`B`;gAXrWd%ZTxS&&`K*M6n_|K}UepaQNw2 zE>@yLjQPz9Pb+eA#Jl!`IjzjfwTLgZv^9?x!oWvsD?{k5bJyLUEFaQLk!wmppIe~w zkMW~}CWF_@cZ;29*|J>k9<~%>Ij|(+ zE0}Cp8QYPRFGij7cnwk9f4mQuTy17UU@(D~)0MBoH;RfCOux6<&W`J{5_$S%jkS## z-NmEd&c2_W@?y~$9js$gr;Z@;ADX7(>--H*^~kczRA(PD%zhOn_o32J=Gelv6G!zI zQzUAR46h_c=7yzjydO6uuE`Q!^D4GIu5>)O;DVe^B7SWWjFVK&%(xf}Kue#3{ z6@;6hC>EtZxVjN{ur5+-DPU|ut0t%>bWclJKXL-Hfmrc2p~NH83Z%mwFtVT}+sOBsht7xMMu7 zaOHaE^eCgJ4O{=ML9p-DDp_aExJ9b2Yv%7ff^H1A@P}25+@JF;KYfzl83bt!>jxf>o{mi8ue-Xsn&K8ZH_!MB^1YLZftX~N z6u8g*90*`_8^I_Hh&wv@i#{`1Y=M;T`%mnC8WvYuw~i6#jf<5h9F-KV$~wG|Ow|2==?@pxWWH z?)nL^YD@OX0d6onb?OnjF+VP}czZi$F>EnkGH~G%94q8!KbOTAap9K8QOa~@t-JoU zZIWc`rx^X%W#~YP^RYRXQGiNu(j>?8eB^E7T9naTOrOo!P~f!9a}kRStojnvdI;9D zk5O>3ip8$cbh4ByM0uZ|4_&R)(e_$aNSwg~`)|{z;WXHefB{CvQwu3e z9!lYH?AblLjEX9z(iQM;aVaVRcM$D=#p_p$^ohasM{B5^ynUvN)beGJbxW8fl>Vl4iC&mG0WD}^FZ=T8^e zT-L>|1f6biEvVKTwu)Q(|8#zU0Z-z<52u!bHOXtVU!@#xKOMCzH0orUDU>L@_ue;7 zx0G8(u7%(}-KbUdWE(Q5axEWlub~hs|3QX|d)m1gy4qZR`t$0}<*wPQwf?zH2f38P zF=KIPOAn&~Ng=QOXiie+FrP{|1G@~>P|fbHeV&IP?n~H<5j>drQ|Y!!#%+n-5GMb; zF?=a5GT;T37yfhnyEh@k9b|%Tn6C@6PfoAKcX)qu!sk{Z814qmtj>or8B-}gJ&rh< z7EM%~d*An4?m9C)olYgA@Z+L;75*0CnZd4&7cosL;a=xM;9`bYJ&R3QYv~V8BO`W7ZRu|BQ;-3AG?~zwLdwxhVebNt_RMZ4@>e+y~tTPg;DH}=t;jyJMX zUotA^k&#Dsmtt;H*eNtE9^blZiF2aw2LX!xD4XfvV39Zgt5U{gY==%vG`Znj(d=;^ zF^|&L06DgwYx{Yx@KH!)v|aJr?FES73J;%J!_yj{28$8sUQXgzMqhCh&}w|cz~RuM_kPgUIM zX)1(&5mNfh8HZX(8S8VgXR%^*D0Vd$aCzQYeu`GxBEeJD3^98W(qg|C)c1Oe#WDvr z{br3ebuU96?QYo{E_^`4qDLhr>(PN(2(^v4Zku=l%K~uZ*_bN- zMRnWyTV~w{W^e6A_Tj&MW?!I0dweHrPx>TV?U$`yYyiku>U%yWGFp9yI=$)0*?Pix zXG%Vc?B0{~96QFUJL;i&s~%TD)lWO_smhrV)%^inEuaU}`D_O)Nz@q6ZYMiyfBk?>79Yg@K?AwD zSo5;S2C1xs>$K0)#D})tR9r|E2Wf@KJZdc^Z66N%E_5#>e9z;YB9i2!tCT7bQNDTL zt}!Ie5W;9onA`;XqL9QOm_Uw|?AF$%Bl_BqF+RtVLJMp%vMXjAx>NS{h=N2 zACe1mK{Je z4*J$(<;Tfv3JKuZ5HhQMc4mV)$)wz88Azw7z`2S*LeyqKOg&-VHYUqf!(KVC+lahu z$I+3E^Vk5KCE}zEsaFCrWx-G)$r{yyWw#*}>lIrGmhEofTX~qV4l7lL`~b*ZKD2~v zETD92IPyfecEk#m3ZHc4Vw?z3i9C@4EKwMWTut)Ck+dJ&>})z8FUi3==9?FHDt6ba zN{Z7GJN#e$UJ!_S4qO!Vss4V{8wn{d0*t(Rg#Es%B_+IG)t*b+om${qw1ASKi`;qG zdT2|@4V;fb~I zLiGu$D-~FI29jv4v}zmG4SRq6ecP&TITGw!Y(8z#6v@q6H*5%%L}qPvdcJ1`SdVIB z+r|)EPt9SE%=w!~vt zSRt8gm&pTg<*x(S@LYLF;VbX3q;@N|;qL~Pt&?DO*|*`WX}_8Nhvkdspds2TExihT z;BoCvqVCAbOQ2Dh=4>Ydd}fEXmmAr3E>VQx47yg9u$h-fNhXjNle1!m_i_d)M|;?8 z{=b^le`?lQ&e-ty$o>j&dh}*s|MKfSJ3BP{;hR6ScL;@)2iO~vPvfiKm^Z?%cr!hS zGXunRD&(R7l)=*hX=wn2V#pHWYJUz7VU>Ib6>rK|Dgr{;k?nDD+v3^>HuE6U*fxWV z6G(kt(WSMrksT8*uta~XMtG+g7m-LdzQf7s$gtvq@C3!COAa@9nBLps6kV*<-hw!u-3Zx=)pOjZpvMCI1 zCg&5mI&&z3#*-m1Nsdae$*7I8``m@lwINfckStI?hIfB`EZ*lSL5_)}PHImkYU#47RcxP&zaMwDS zLyOh}n^Z?Q1w#~4W!onlaojz&^B7_+JGM5UX6A=DFlW{ocWvwsA^RZqPDj>jJ%+GD zbx#|`x#dXkuPm`rym{dWXaq3_x#y7+On9j=hQPx59nLiZkD<4rpCG}|z6JE>7-d*y z>{$twy)ytd$kErC;t-q!X+|GL^x@&a}=5+7WUu!#Ij?I*#cuDU8(L8ilev2o_?G{oJ@zzZ1?X zsE1mtOo-|Xn@(hF?3;9WGM*QttRq-BHbc_?fQoyl@4Dbq;n*rb&Eq@w4?N}5cVuo$ zPEKvE5i^WQa{L7MYvT>5OJj|l`7?54XAY%Q0VSFcTF7|S;c;HC8pGOE3jwfmEL!A- z`q=ucqVb-PLYI4pI2GvRFHI2tT9pb5j)LTGMt|qeE0=XA`^nl)W#t;3^ou)QdDOTL zb%`_u>MswQUM)%JV{*(wZ6zh83pbWs_?uOztInwgE96zLVE~4f;%ln+QrNLoHhW&- z`;h*T+^%DH;wprce%vW!PFWCj$IcJjH zGTRQ1c5TvbGRc#>;NAz2D>M7@tB-A!o90q^V9o4_6#)$*yp*u@-uN~9`}4oGa#*sT zZ+$}kP9|AjfRdXt|BIV|h67&Tty$@Q86cqvhe=#2ug4;V_{>t+YD^ARNH_P5@tPfV z+cr5pvmxHg_OE_vh4wy4TLF`HtT~7uP2Nb|0gCwm5XA(#@YKkfZfXFGZOxW!3-A*l zagS;5(?wIF_HAbd*3J&BJS*9H)PU3z!K8LU6!pOj-Hd$!h`bMw`|BSU?4Pmg($bb~ zS8us|U|Rhr1K1SVl>s<{0GP)zRt^9`Z9pW+I9sKWwF^k_0mZ5}1E7L>1Xz}AQd~nV zRyRNHV3QUkr0V*L8JxQEhJtZ@BE zeb&Qg>+IMLTPM~>Da|t~4kzX$=AZTQs%_TSZDn8Fl^;s`*dN z%xE%n1r*8rs0YqP`}L}}2yM79CO%KV3F9u96OtvjM4l`jhTE+(mm{l;>MA01Fv-Rq z+h{VdUI;k@c^2jZ)LxJ!Oy%R;1QIC;|I9AutVDwQ8BVVeD>#lw2@#`8>7!~2z&7sj zoAZ`Xj}?4YR78zd%uHO!R)PIUKIV{XZKZBCV8{9@xI>CE@?nXjQ0~+W56?07PyWeq zhXa+X%H@3vdIRf35Rmzdsxef9H;Ec!WUaz?0;64Ix8k3bSMZfC4JCDGqHBY$2hc5$gSz&K7B) z&UEArZ4kNXll)&;b}~wGe@z%)YG4fE6~gcpJyd*VqSNtz>VFp#pU!g?+CTAwG?#XBB7Fi}4px_lTxoL3a&0bq$}Z4-qE+vAzS+LW)w~ zWfPxO`Pa55*Y>r|Y`6gnijl&m&XzjY)0g&CHA&rX)pL=!Jxfahwi;?tAj!gGSEs$E^56lAG4xE(x`**)@ z-`n{f9;%M*_xA16<4>KpPvq3?+HKvUk_oxGV$&KEB{>oN^&NYFtoyXrB=6Hc9<-T# zcj-ObC=IMt|I9u=-m`;)!0G|uf}82vJAZBucK*`JIe^u3C#n}PGB;3#B4+SxNAWppwH=;OxQgEFk8%(6o=1eqncl+txXNta*qss*JLT@I{3u4n7^(cn?5; ztiQPJJ88O><#wbPlkkLPrYnZROB25@uaw!*3V9md_O0=!w;;wyIQrGT1;1`uZhe{D z6|CEH4roQCCT~ukaK_1m7HI~2s>C^#*(T{}oM&bi$>b)N>pQ8A6Y5$9gstFTT#f3G zUI06RWw)#B!W#eqK16E*zU~KQ`$zl0c8gnf4hf~)8*a*SZQv-`aZ#{qh<%UnFpjjW zcl|@g5-K(JDf*j$!zE{L52c_8Fx3T%NgV<-)Z^+@ea{0o$+uOI|KE%Vi=3CF{SdL! zifi9ZHRJ@!*)aqF#I)&3%NNyLd(}{Ruzz50lYDlCz7W9EwG{AD_W*f6xp&9DBsYla z`=9E%R&n~_m9l_(cX3a6r5c_#Hnl)%fmgr+nmshHU!P9(Sr4oAdfiGSYws&mj!?E< zA0-i!`PwRuqiWp|6mp)S({QOK@(^5ANDJlB5)=p_nKBt4r`4v7z#nH`%i7%&l<1z7 z^9`I%KFatNyL0Qd`4Bxir&3njG>6KVG|SC*$<$BVwP9X#Z1Sys^UtvgS{+d<7P49) zZPCh!6R?GHjs?RcpHu)g3p~Q}a3E%QkDLi|0z5Ps+fb2rFgzhju$UhjsO%726IcZ0 z)I|bzHckqVVmLzi(&ej)c;AI6^U3Kn971UIP)I2$JkG})>Zb7t=ZBn{c2I{9YUuML z-CK>63vzs3Vy%SK(*i)Ub*)-M-Or_GhBoShUr<41AhtY|L{ggPXnzJJ`*7bzOECECBjpefQ&{Kgte)JupS?d`H+XI$d!w8ufu7peRu z_I0^|xiFCg9S8sm+~b$>>b+A9{NI!jcgb7eI|8iXb2H3A9ziNq@BIlkLna84ZjN_9w;H*nBt@SgJw7deW@~?OWdHDY zJ*(9-_M?AVx6Z$l1U{}YdDbm6L}gS}*qX?UmoMC^6F|o#=H#xrhOO7F(AeOn%#Dqt z^GkT%dazZqa%9WVnvI%B-8+ZYd=0e|66!2qKvS#|N|p%as}iOt#TL^)OJ)OpXr9iq zC=$fh$#bvGL1NWRg`+u-R>zU|ZhAbm?tgWy z^oCq_X2UO(ivkZDl>}|1_Zxr@O}ae@yHoNloIqsl2{7SNgiLWBx4m5^EOLBt&NIB1 z1yDmVlsfHZdwP855!N@8XApoF`sCdb+PzYQpVpWR^eXT?BK$E&NMn`I#zw^IN%LtU zxzxx$2K=38d-j$GNk^z^PoUG)2RJ8J*Q`X+S_rfxR5Ur4LJ5{pTY;y@m$6RnjoTcd zHVe=%CS^^gNlmhR0(cg)>*DoF!*6)2y`Ht_v$ls<#-h8%8|uBfDZ)_4RTAMc_6n_Y z+ai})p6hJ_7XEYlmhEZfCxMEq2c_@-pe&%?)jdM@i?pN`NG*_B;M=x<9!f8Xm9N)c zEJD|yKI)uxtdFBFMvCdHAvM&5hiAgXqA zE|=^`IbqBk`D62tVHKRLWu;2cgFb{%0n9Stn4Z*G;Ey4R-p@~t*aitlJGLPKMFs^p z6UQQest`n|kfsG3h}8s1L^=@!iCdLX&PS$#Da3$mUsN75Z<(;oF)FscNf;BMn2!NK z1=RK-wvP#Ayu?0<#pGO-^^|ATBZ4AQd$D`6Ut)jFJ;I%`cS3ORnXoYTStN@7(Wg#1 z28q-M`#mA-H3znu)yr0sx{V41(G=#D#nC7w7aBWvKDA)cgMwx;cvDVlh0BUGz^Ggo zVXBI(P}m-2CK76nL1i7CKT+ms+)&@5LVd>vjR|^_7{)%*%0%KgCxeu?{uVm~c#PojyNSV*J Tcn(C(00000NkvXXu0mjfR-4N4 literal 79483 zcmcG%c_5bS7B>Di*ir3lG-#J0y(JBXR7gmKSE5W6DY7+C(L^a_mmSJbhK*8^D5Nx? z5{XKKl8Q!+q5)BoB))4s&-?OHd!KW@-}n3DoOb28pZi|xx~_GtbwB%@z**+TzxMvU zH;2Rdm1kmT$>E6NAA52{yW^K;7iB39N1nqo)VE&Wt@>hl;7F~z$4azUNSr)0?Dhs} zHTi8l<0MuoOJ0%8*z{24)Zpl(lyj$At-OzA^i#iExUA;hmV2(v;brFEd1*-tGd$mE zW$amS`%$u+>qMOwwaxcK<+i+a$=Ks-vFpY?hv5A^&&x+CaX58pMi%+oR(U!6CV@?S zn_OfjcPPW^LP}|aV%omuaJ!ioTx;&Fah;fRAj3)}wX`9$%$%38r?sH`vz*@Mmc*!1 z*Y3Ld%2r+zy(Q~T&QIcS^1ck#5fi^szE0l|hpT?Cs_x@gX09_k^{s`S_GkNyJ^Po| z-0Ooq9cC9iUUtiFW}AnXPE}#(g@#zWnR!)LUEUotvgp3x?;H1wrFGuQoi*#m@pZ#K z4OV-7x20v5*Ql!J*t;EGFe~+K7Q5``(nrbIjkh`br+RkUZWDR6#|E`D)kp2ON)>Aw zde*GG^bIf1{q^|Ix(SNk$I$UTZoc8IH>o}LY=YZQy*`C6=kiu!K zZ1rUl8_uUI8X|7==Wxy}Z{SYhdfksR^h1Qpkne0Z!JAXrZEB_M^EOXQ8pL1rRjv0! z%J&@PyLk7FdpRBX#vYkC@&s|D^n6dTz3Yw@v-k%``cEQhhi&in!681sDqPq+CvD{O zyA|WtxhE)gQ@QE_3~%zsA=f`~7}UNgFQhh(f5=pJMFQdT+U9UMz1A>mWWfwsc#14Y zF-iTP>Mbc7mi>Nfz;7JRgGWw_n@8dx`e{jScG@FKUC#k@76?yF_WP{OElo2lu?O|n zyH>{_{m)#HJXPW36(CN>T_-vqP3DH8O`Fb| zr8zcmr(S2G3NY2>J#p9~DS(c6!p}BTSH~{CqU++r0pJVl;`6ZhAMD~&&NJln8Uk__ zm48;07{=kGXA~4Qm;l+Kb~B^SW^W#&&~sDL{3KgS$^r()*}?n$j9=HD8>!Hf!)aU> z9V5$gJABA)=5Yv0TEK60F{@+jE+Qi9*=z!}vXR;E_b}PK@3h#Po3;;u&)J^+-UXoy zM<@g6bgN^UXMurC8ay82dF$F^ZVJ;l9LI(JF>iOD_AGPt9^rNWhQhQw#TzcJ63FqN zU~N3;70MhVE@;1BuS%hPW!`?Il*}Nm^P@5eicW54i%P=xLsSfc_cz2c&))zEdaUts zkP?Vq_R^Z8r49CJ`vBMC^3Sn2+gF^e@D1;QowC)PH$1@Yp79W!w<63*2X%eFX9=MI zQsg!(|#F>%)equsANI)K9MiYJGm!5qo^i7P%4_fTD4iFF7u%*=muvS`}w+u-MF_EXIa1^#%C9@SG}^~&_bZW(Z~X4 zZ3E5L=XY5`EH4L8JAgPw%}>(3^@b?W{&ZJK#h(SYdah&P7h;US!$z*$i6Nh$C)(V`wqZTgVX9=bLlM( zfq35C>;6QBxXG`oEKn*uHX^u-ApnPfF{CDnf@@@Cac*o>ayh`Rc-G9x3n{vPhA)tm z*>_tYg=ezBb9EFMprL#E&XVv2!=sYZg7>eTsciM&KJ?k;r-3QO!8Zj`5*7hDS6y5e z;#fv_1Odu(`{%dV&|Cgu-!c;$GsVV?u`%e#IeAuFT3V73{go`U;I zInbjANMzoa4v;QR8bC_XU3RXr)eM-9rk<24Z2ma7=4oW%ojN~>j^?Y;5$__iF|77> z3*|;7C(?0F10v5raEjf1KqPW?%p<(X9n>q+K!p0WN5Vg5nMm5cb4j*R=?U|b{vMCN zj0Z6PfRv2}z7{TRI5g+Fi>o%a3l}iA4BIvCAV-l|*8NAx7O*07VVIZ=16w}1EE@CX z4Ay!38Lz(ahWBCDjs8q>Ry}dJ&lJ~ORQw#&=hS$*rbkdte#Z1o=0X%fX2B9R!p(zkem_Z{tu@fnEOQ^hEyO@a7x zc(6WTscyKCte-nkW?oLU*bOWTL4dzNeB6*$GJu=#2Y5Q!0b-;JuFSwzpPAp68!qJ@A|hRofDVbBIr5bjGz05`n*O+61I!RR~p!+V~8swj-qt$IHxD#A7Ge zXPPtkRgP>YUud!QMyNgJ7*VfB~}$m1VS6X-n&lqy9jJ$DM)&jpfJvg-|G(S^1AwUACjp~tuqZrqt|?>q? zg`yELwoDaUkE0<-&!=jR5#t?bf&}SgsUaWcntD55=WgUs2Rg+5_V%bhM^|Ys}F{F6GD9|PH1P6{A$?S8jJKCpfaD($;-c~kn9gT&xggRlvv?P_1N69rP6i*SP=~O77 zL<^i74&4CkR^r!U%nPBj!ZNjh7|(FXP4NA? z_wX0B!b`+KH?c(PA>x>QM(3n{1ALkG5<*9%g$4*de#l=rEXYo3|0sWY=(aG;_-;#{ zR|CIE5@LcUA`MEB@(|e0048wIlGGl8t&(+|Ck|25siUX*PMg6Dlv>%1{X)j`_WTA_ScP>>&<-P$S&~4r8F8jk%9CfTp;l46D6-)KisT-xpdAudj$T zYT#|b%y+}lf@mXgkWXKK*s!sJ77ximG^$XLa`{J@3@*qbkYk`-S}5sHu?-0O=aKNb zSD+r@Sfbafe_g_omM~Mc(f~Vv!+a=Cq){l{h5GX0-czl+s}V|DzvcQC5B z50b-qE+Y&^m`zu0D4Yf=H0lfEmWLqyFvs~C2o=Zo3rayd?!KU?2oz!>W=&JV<|l!E zObH$anBXuz2$;PAtPuQ3c_D~Oo8Y%GHtJb>4ATYvz~L#1P85ocJ&MjzV7U+z zQVdGoreXOdc%Gvhk^O#-#4wt`byyNg?+m0DKSic0kD51on5=$a$Qr05NA6PjXIqJ3 z%R3_3%m&1b`Zc9D74iZ8)?}QO+S@M+e|SQQi(^ z!fb$w7z~cLLX=VqcEYJc-ykmjerS2)@%6S_L_9^kS2i3-N41Q^%?M`o!VGZxZ>+ z!pP{2{p}RmUmB)^J>URQG3r8*>F0eg|5H)$a}W*%+D6ymd^pWRuz`E!1f zEF_gv=ff0<^gcrAJ{?eIUUQJO#c7zvI$A+13f}NE5Rd{`El%yl^3UjTJcXrnAS$r~ zzY<3xVat)Yjliue{DvcCdBvIR1vrwC@R1bwMwuSa+wJu zurFx;+99wl`zdytpEwi;b_T>U34XUjDh11whsn&jn3C|7U}PcY37jXP%phy|B_X9sbugl19*Uz??VkSArPSt!Pqo^!Oy}VAZ0LS&w2<0I4zD|jdNy<*L^98 zVbzzryvUwj1U9)5@TSAAqjuiJi=^pA+5hmOzW||ch<6g=?SL1}z>83=Xz0Q@qxR#A z(lD52PSoW^-?2sph3@GS2Ma^L^g+T1+R8`JN&tK*5Yz_<0(-w8?76hL$7_C!f&ky> zk!hagI}V$3W;eH&m}Cq3DRf~v!Fv$k?Zu9xy4eE+R{-z0`AM0SqOrJbFfXeP&txx&*3Q5L$^}7~>(2}i8$oks7M$rB9b=1zkwG^*G3$90zHbOL z>=@<{S3#rne(ix{nr6DRX%8Hl0LswZ`K%{!!1*wC;5aJP9$PrwFI`4p9WkO_yAhZ( zfzqNqx1-v19hifhuY{fYOyEvsMxg{Bc3MLBBUCbo;}ircU}L~>_!cs~12NU%9KG^m z5NJNb=qtt-RKGD4ws9aFW5M3%|=||zJA*%y`1k@6aqkio%MX+_J zz+B^!gQTt|kw14pGw(PLNO=5)HwK+G?KN*tJ9^l;had>#CTPq+K{&|FE@)SJ3+f;OSY1yX|hn{f*4kd1SLrGuyN9S#MKDnHyA)AG5&D>fQg z*uk@sN8+H>BE;3O!#?r6^5N4sb-VZxr=|$KF>{JOcn}e{!*v!7yEwL$T^<|(>-)TZ z=;9F2Usfw4OKZc_}9l$p8oNAWa{7_9o!z{zbi2zIeZypL5 z$sIq;#_Y{8r;YIrMDFJmLenlT+hamy_2{8^NMF>yYI4qhVj-wYF;?L4u z9_1gF?x;A>eiAYPF77dR-QAe$Q9`j*h8>1c)JnnRlG#c}Ke^JKyRVEeYx6u2D^ohX>EB;v1m6N!v14bZz4hUfzXo!N6A<}9 zl%AZu!bJA1K^OR$5EH`2KO;e?ygh~>!eOS#GukG`nbe8TbcDThgc?3Csh$0R3Pq`- z;XENmge4h=8hn)C<%P`k%Yzwiho6FroaI%j>LiOy7YkXW(0e!|0B8sxi|f$yb)!C9 z2r()c4)|mIpB}}Rd)!w&r^Ax54(>QC+D0%7byAV3c{E1Zh#h~U9S3*WG0}rc1SS?V zNLY#Ra~|wt+zFWmlq6=y7uD_mAI|tm(58Seud5gT0B85K3W*x zK?dgx+&vI_r#EYkIM1EVWbc<^3=S%kZ!q$+jkY}QCtIfRK7Y$h54xUPd#sAJ5-0|a zgTSp>1Q8!04I+J~#wDAt)qtlLM*o~C(q8x1v=^WBon_`eM<~j<{3z4Jgo-Lf85NlB z)d?5cB-gS*okBIu?Xa#sv~TVce*HQERFjjT&K(v1Tn-CI7|5O+eEvaKsC>6@6kLkK zz@Kxpnhi74qN5Nqz<|TxOD;2Wqd|gDmMF%IMlSmrtv49WkIeV3!!o#f&o7kYaGRJqCAQ+G($w z!w3zg-bw%6I<{D67Oiu;(>fUub~Cm4LPMsIFLf(%!N*xYMc789Cn1w(DU*BJGno_# z$(xw5#}mbq$XW{%!zr3T-U5{~qNqke>=iF_PEBgR0oTwN#DjA9GK6PQ~X zb}I-_Tn`k(VyG}P$Goi&tC^%SmL0{PVl7hYO7mxi2bk;e1(&>gx4c)vPR+LzJg`Sa z@?FR00Zc#|W-@({pnd$@syicB+9RI&<2anafm`BOoDFRD^IQ5(s=$&|Xq1lGT^+%fRj2_SlO`{5Ni;R2l9DL=)kux_FEczoaj6*mpKd>vv z)zCLacjyZqh_T>6#%$m4oPQuH{p|l^86;<1V9$j!`S8?npaKFzRrm`_4oF&BGa02(w#n{|jxcdO6kft=0%+ z3v^1wanWuLg!%R8nk{@gLS!Doww=^4WMeGJ@uDwD@HxU(d=+#@F_obst_1I5YRini zC|u}r$V;)|!bc??0Y3x#Ts{~4Ig^U-R~+H`=^HQInQ2L%JI?0XfOJKP>C()cupM4IjAJ)H5=Br<2>*u!B!tPEb_ zv#LiZuzy>oE&~qe(x1V>iIVWY(Y~{)i|=}%x&&;1>Ir6{Wf%!?R#vL2>!1)Ioh?wF z|6v6i=BNK@1q_tgJ)jl)r#)CQRK#!u1@^~1w6B0u*dn|uh@PQ0JDmb6l#%|geD$AJ zXx~GZ6)fBi+uCAlBwLs_^e8mQQ#(g>p3owl_}i!Ga^H*0i(5<>a~}x;Q4KG8$Cit&$%_V z&&JZ8xr5hoCu}ue`BXmQxc+(hH%d>GG?)I?AaDE9L^Srgq|Q=Y2&w^>;clFHk;WxIQRi?P%0ml)x9H*mwtSw?+SABRUP zHm*4Bn-I8Rkp+IQo;@k7=}+#1z+IPyb1yEpaIMkYZ7Eq)qa z7$}W+v3AjZ&4`ljMUC9bK%XJwo-bXr|3Nr@Uvw;(yIEe$Q-NDA%2RzDY^QC0IHITe zsn9hulwK?{StP?tkHgMpnesIEMJt9lpXQv}hqo6R_0c?l&HQniqkhV5c%Yog)_tP9 zDbpm2JQK=7?kbJASXBM$iKXco>9D!RlGRJFvptrQw??7f%0jHhAQa2j{BRB_p_FXyijj#=^kZ_E78yDm)*bNrB!@rpLFJ6f@y3-Te@M0-B?n-Jkm zF_$dTwM&z^Kg)D$%){*~l}60(%R4$!tnrwgHU+bN6T#R&HsLG?rn)Z^iGkdDO`htv zSs9-5jg-U|FCKC(F~UQKdm@DUAR7TbfQ%|s1{w@TMm5-shA8nfTEt<}!9T{UdyiJB zF~@k_evBJfA&Fgq!33?4Rnq-mR^ZVJIZxSy0^5HhIA)35*y%-5g=%)b4_98Aa>6}9 zWT;h8PhUlQkDytS1pt0=ZuF1{?}20@}alCmMrJ33q}^JrO!^XL&T#`1CL zDUTR}qE``+KM9xBW5UjWMEy-(t_pL^JZq<2ol4MNQ29@>tWLN*WmQi^VqTGywkk!! zXS(DUP_#P(iAi(Z{C%tl5^h9MS%#txILEAxb2P?dH(Nnb<;uX&Nz4)kU6){lV$hkN zVS+MRZNT5f{30p26-+d@b8C0^)D{U_AIptiky_U8n?6OexFecgNa<9el=|=3@KO{B zITNj@`)Fym#fuw%SOV~TkWchDr6fHffF~0R6U2fg zdp`zStm-{BOp|FmLp*CuZBI#>m0O`iM3{$yn#IsUWKnK3TTM|?@D-Uzbj(99lb5pX zk~7k^CcW(tZKJKCovW#(k4|g^xbRv=wFsZ2IjAYR`Fvy$-N&+Y4+P!yn0l($5zuX} zH4m~nw6ppZl#kLndSrIdTIHPueLF(tx}kWIz?nc=J|9Hg`V zF(H?cko%*kJUg(C4JO>)mQ4tb&T<|JAqRcv=LtChqD*A)eZM^v1NjM=Effl(eDco| zl8l5TQYilJbdJK=rulRZ23Y@oj!2wi&S(PG!}d*(TN;XdfagqIq&%oNa$~7ey4N@)1wj*Kucsk?gwl`X&K40 zupzOT+zFT^{9;5(@fetDkxwAoU0zujRF!Hlg;^8@+7Trs9HkP=$mC85e(K|(Vbs}$8y%&>`mM6_8zKXi8%qWWxFN@wwW?`UIjI?OwqKar68xd1zOb#1@fjhn zSdY+~ZSj2Bn##0zzakdSsP7I%QjdQ}zg)C=Dr3fxT5we^r7R?E)7Ot0Pn%zbw!KKR zl=QXJ&Sk8i8ec*5*&ZCcA^XGdhQGnXXjIth*1kbBMh{!>J*w|c8{F91^lA%-1Wh}E zH4kGV4Ox7eb1Drsq|=FOhXrm#8}%2>MNV0vZ%b8K*m`9=?JK$6)X=_qK_+Qm6Cm#^ zAQFVedqCq8DxUm(DKKc<=a43o7p1brIR=$$AI(fKC_rCHGn2x|FnBCJ#Y+fOkzFa? zo$*J83^%fU?Y{kFCEFtz&KZ`HxoABv<8aXX_QTB)PA3j0!qhRshRCi<*<6&MLmeHW zd78|ah_JeCcG|7sSkB5#WyFi|Xwde6q?s^D3*fh01%4~rpF5Bl#}GCKO_lr*1_K)2 zq2h3*bJHSv_V~&3Olh~DD%pp7(cF&*fvi)Rt;iK&v+{QUe9HkYD-TD}uPEXTLw zpqktv#=fQ>=Z9kw8u?s=b3wb)8N~NnBw!9bcw;Flz~5(6UV3Q9Xebouo&$7$FO+L5 z&`9Jy(BQ`hZmd2I$+Ibv`UY*?AUI~VgTM-+M0oznJz*wUq>NF(eT)Zm`E`PSOakdE z?J&n@7#b}le~h`{=7)!iyU~I?fle+T{6pLx+MwBpw3HMcK#68F5_2$<$mK7$aZZ{{ zaoyWBc_I9BGq&B{h+x1coSS^03wCLf;ki69qUYl$?ODJqq3z>qcl6|qIAE^{XtO*n2{9(iXujyh@$ftM5>Yxba}B%C~NLltapAy$9m{8_E-I1 zt`~2o{j?eDZ9+qpih0>mNQ+2@=agauk18F4*SB$555czp{DR}aAO{6UhNqehvjwhQWV@Zia(*EgOu8A(>}X9Q^|lYv%65sgoIK8j-hChMb>@P zj^zc!zELAy;5p3$P}cTC-eK*f@FDp<2Y3PD_T=G^Fw2=e+ULAvmKMM(e}c;Sg0l9I zhfkZDho;Yf?qjoT8MhTkCEWlBt%Yf8ructeOOIV^?+~&C|FjmYYvz!DT`OE@U(8z7 z`=KvrV{MrB3i%Y;o(_&mODg`?OEO2TMvGsD;d>a<$gg1=c@&54VAvsq3Vqw-;C6pO z#{W3#IK*<*RMhctpaxIZr{9CblJ2+QFn^`)FMEz#VKIbtAs~QJv~nQE41TAMWJHB| zY+!CP_|%QEb}Q@ys%5WgpW_dLexpgF-lts>3LptYThddiL(8AJ)~q*F5_=D*_W_M$ z2{z{OW&I9B_+i3Mg9xGhCq&>aB>TPi#uk?FXV`oJAtR?m!gw3iGZX)sP0O4_?k2ST zXb~`7$9!NYOtpJtSo+bj1*ZAQyXkVL`gfB>f1>_arpbtdhuRsQo2*RXnf6;sR&V3G zahN>xT>1E<*UgWa>;A zorAnA{Dd+qNCqx%)U^Zbv^|pr$r$Te1FPKxySEU>`?c#~fcA9!9ffR?S0^xw0L(C8 z445%BY1wKK*Mn|!)X@_ffItuR?>#y(Baw?62bAh1v)u|Y302tV$5 z^8+yUm@XVIGr37o9rlO=`5Gj(#ZP|mG|Bn%3UQh^E zl+U+Mn5dkHq;$`6t!bK=$bAALs$l7VKgdb;AkWB>b_VYtJd1XNvF4j+rpfdn@Zw&} zT(L4Rji&bgmdp#B!0|R4CAx_EVv;`(=^FMi6I163hn zdy#Uw9Hv<#lk#{77hqJU4m>FS$JED1_rI7StldZEVd@Y=ZK zqQTsD6t`0bct0isPW$Y%W#x6WC)M z1^2h$R)iH71&9mC2Px~UUi+bMSqz!d#aI7~DNNV~@du+Ay|coFd{IvHfPq(#d@kcz z*-5KVG5N%{Vo(BM>BTU2eF~%(7iQFtR6hg=aDhg1Dkkr}P$0G-owymQN57A9#dc|Q zX%0ofZyt_-hD>1~sM0w$eD{T+3WKZt7!nPa9{1c(5Syt)c}q!*_^8$J;88O{7YLon zL8Iozs&-zJ-z0zN%lC&%FfO4naeFi{%%)l+0lS9z%nSu;0qC-+lUJF7nNsGA_5$%3 z55310R`A857?2U%1~7V;WT$NlibR2e=1O97rc2&Huh~^l*N35JiE76m+5+mXUuk1- zpm!cId<|bf{k;x)4r5jJ(t;szG)Np{DY+l!&I<}^!vq2sr=0_#h(LJ0MIbOQ$A$Xd z;eqW_4t_UToy=yBrX@_tDMa=_`UUJ|$C=S#>!Y|2>UYz)-E%Ot3yl1UC&)RT?QK_u}}L*2~*kTfS=`7p$a zVyJ~g&_KF8V|iie$lHSKW{M2P#7uxWO0A`MZ%Hk4M=gll!$u2D3!7nWmx?eCfV}ihANrii7eh@Zv z4nqF;Fb??tmVPWY(Tn;ZijfJf#`Q7t(C~Nn>u~{o?@6;w4(`*95#TXt%J`ixj&-RWw z;+QY^ePGswWy2^G$sb&vM6tHK*0<*X;FtVscV>2YI$y9Q=6iW_C`fHU>_x0KqNim!&dP*%Tk6W)!svtI1 zGIvQhX*-ZOT z0UWXWQn5+?CsblIj`+L=a29}RA-`Y|NZIS_Nt8_7GWSZ%KAOQxA0!`0 zH(e}N8Hru-{p&Y(e8hJPSVhH*+wG?aZ-mQtj}^p5!O)ZtMP(7Uyqo;sK8va!GI*a( z*n`7C+XB(?L?%fUGwo`Fq{VpCL9Hu7RG-3n3f@gnJg2;2?JbPHe6d* z{Kd$_7@FMFbtieq{LHdg8@hufHycTAhE*-b1DEOGQMk~cZl>`Dzd|DKH<`;=73zmx z;5(xKd2toY8hE?m*hmcpy7|oovT+e66QN=!FNn4XHN_#l;A#gDIP8c0=lCx~`{IW9 z8*xXx=!O4$#J*{|vl$;pk#|r6uxL!Td<=KQMaGEbV}c1A3k^U5I!VSq07->EPQWKU zrjkz;l$anf1)72sz>LuTZ)Mo{x{=TK@Les@I zUYqz5V@bW?V#TxE+S(p&bH1%v+w!8#T_V3nH?g=ov1<0ghfNH-Z)oiO=ZoJ=HmN_K zaM7)t)e@(E)1Hg>+3Ij8(lbLnHKx4y{dBeb+iFIJeg-L~8WOW@M(&sU!+PfL8>Q7J z=v_S$Fro2?uHIEm@nzLcL4yZKbh93yKG@Lj)sfLUhO_hM{S_}IG1W$G_%MaO(`?ih zhXsi092vdg@so1rir+u~entAOlk4`8S}|VQ*8Tw=zuSxupB*tPNOf(Hgv5(oDT_j0 z^%`swens|5-<#s&Q{;nnu56Y2Bi4UHqw1BQ9RZw_9Wi|s@>f|$${rl$B$`&{V{;jQ ztSV}AIhv=S&~PB7V#OM-(A{qL+@zh&{F4Xo33_t=@r(0;qK9YOeEe;?*kw_J!u1jo zkE{o*8H%Iq!Mcl=Zr(LpWLdS^z!v#eQx2W*S6ggt*mIUm(I3A_ge0U4Roo%@@we^$ z)Fl=2<*f%ej|>dBy)7jy>t?x*QR?jDPC;trNsE_Sj{mgM|K-2FMiZ}*U|-{0apysS zwE8rK?ng#1?#=|LFz}#Xi(mZCgMJ21Z&Wu(N&FI@BCXCd^pmxYJins0R?RNh`&Z|n zx05CsgswJu><$i~0gHStNkTx9Cq;l0VGma>RoEBb?s6K_*r>z0nK~@EB^72*gAH z{AOD^0;a)-m?0Z5b+9Gy^yW56qCa4-CC1R-=5p*6;S=fOM18uP=rw-4K+M3nm&6Or z6jKUyu_W#am#Eaip+0RNufjOsH{}B}k;R=Om z;wjhT@!A;nwcBP1UfX+lZSAUqMma{$CV734jx@^Iu6AXp8l5x?CoN=8Y5;=AI3k!6 zLI0xPTI3TH?mTcF+oXTxYT^lhrL11pq?$9UD^(K}2_R2`B{-S| z>X__TJ>a6@uLOK;OIc((9NH?iy&uEnM+Zm6N95mjYL;&=9Ez5ufR6dyBlq7FBhQKc)Q0)3iTp|X}tg`BZL|o zmRD#yC67)#4h=S1f??Ek%zQ|_XI8qREQ^dT&>ETVM z>&Awfhf<=)Lut-u*nN4cZ<8=8{t0kqK3oWAHvLdz8B$S&veu^;AHs`woTe9(k%;aF zc_#~@*qi_2t@L7bXs;>HIdliA;O-gxWDCDI3@@JlFE4%s62CD($TjK3ru5?I&M!U% z#qT?Ahhzwt;yfAUR|E`JTk^8k;K;BmvIDAm4IUnKMK{0ii6Ek>n8w(plGx z+PeiWU#>G%=_^xd*sjVR3N|A2UVEUML~Ou)Fw2=Rww1vcS+vEPz}O4J+1*dueHk?9 zcn{QpN>KsQ%n*1>AnhfPb{cVDkfudQ`zRqX`3N6r@Mqq0=btx!OX)UgFFHs!&MJ*; zN7rYZ@R@qiHY!Q&wdzCzDfQDEvI=gK%}DCi^;OqdP&?0u$qdZDJzI$~yaX9GH8Om2 zfXy%yXs1>6lWkxM?(Pjxm1m7%>{6$iew9Swpk#tmW_wm+i?U+_dYTHp1(4Y;uiy*G zTZQwr=l3044&}GR)H!q&xcePtVfqx4{k!5w$_6u1cAdLJa&@U|`CYDaB5APcz;cM zpLy+y-2&}_Vr$LrKOzhsVLkdH!Lj(1UgXwNWt zuTm&x0+f@J2cxG2rrb7IEVgW zZQZsYS9Aqd-U|&vo)fs9`fs~-mWSGKQE1RIVu(@OmSV8%0M)hDmtYq+%w>x>Ae7{4*c0Vjc6-Q5(7@@()^rVm(|NClo@B?>%VkYZ%WfgVs>W-4dsH_0Z^ zo;p&^kbv9ZtjKTe0~>~%eTkeYjsinv>PV)&1=bbAk)#yqHCIve#!y4;2R>eU)I4@Y z+?LbD9+1U7tfO)^LlY*BSR4?px<89W!%SK-8Z$1YGgZY7(YUB;ErT@Oc`T47Ns=b5 zIiFTybkKDJ{T)~M{C#&*Yl-d>$YN;)rO;cR`;9^>!)$pN(x9N(gIn{|jB5Cg}EyyovSU}A!_aN0_`P-0OSGVLF{9xmCY}ZJvgGLtmO+j zezQCW8$gr$?;D`1Oj6mtfz=tXqne2a?nQ!36+1jV`XJjQO9nB)KO_K&CV_IWR@ zuK%Pmjwu#lF!`~m{=EEE$0pM#l?1?&;R7@qgIP>S4x~VWZC-0K>8hB?`hJv-kW+9e zR!Gfur4s_B(sCzL_&Obvixt>4HFVdhuhNHXZZ0cZqLo^#_vo-5L_!}TVTbu?$mSNO z-MZ($Mh$fN2-Tc7E?di})+x+mPwFiiS+9h8_NP%YAWD;)Ei^8K79U8VsaYqY3wh)j5hdg?=}sJ~9yggScVG&CZTG-4Y{Y3$Lo;_-g88O*(B&tx|<+efc8{p!3N zZ02ry1B3M)#|sFE!`GK99*5FJQQ$jTALZ`0l?&*Dlr zc>q7*hK8Dp(Ca0cQ0p-stw+!kSklTVRD&X(o_~DNY&5FRNvb}-ZQivzJmd4c?R8&` zzcmbl_r3-X4pla90OBiZZeBPo{U}s#tmRCLFCS*R+*lcsKgCRPyGNeXUY1%sxR1fOP1bV-zl!%!(U*ZWW3K{@VQi)<&KIsEO*k&}p;8lq-! z=H1W%>^yH`AejcZV1a=!Wt372)k@!HunM-;5Ra|(m3U#sADgeBsI5oT);xt)Xu6Z1 z-AzNT(UoegY4R+M4@}zIsR2>=5 z-0P(9NGHv?H1lFL6P&~rSCQYjE`2s~%3*iF43{BG4${R3G+frFwXK9Araq0fWljF7P);#;T5#m(V15wtuqOAMfdGJaTd<;-} zaQFL0>nA7o4tEC^r_5g$a-E&e?O~v8C}c7iyF>7>fQj4ktFfX zYrMqQ9HR!`kAjm*!8t-mx4urn>5Jf;5f08rLV7H^OeQ$^HKYk}GOOQT`z+xl+~gjTVfe)(;=r1tgx%{o7YRS;5yZ0B2&;y+M9(k+0OM?%m zD;OzNo`(r2G9eeZh_E!9riaOU2upo|rB#qJvn_lqRaB6ajqBLgR!IScW2qXB^{>uQS zJ2a;VxtPgR@ZT*KAGHfNJj*GP*Zul+uuoG%#++*fX%$-AmY(Z_s;wyn zeIagtryeu(Q=e@UG}sHX#Ar)gTt;^%ut^ZuUNQ>t3?v8|E+Bz8 z6e1OJ-x!|l>R&nd+h|Ilnl%36% z;ReUa^eANHBl(VXM`4V6<7QV%U70Fq5HR?ee1{Kw$NaRkV#{dq9pDF3p+*JgFhPAn zLEX&{>R)FmsJJ1g9WN8!ad)y|Y`MJCsCVr{LzT~$@rS>Hy$v;Bugg+m?=i4<46!%z z?xx1$hhwkZ922EhNf*!TK3CH?E3sprp4ev+t{D=Vi8Oby;ioWbN_Rk-!zYYg;G9Jh zPmpnp-n|iwQ9D^(dG%TWH(qtlzJlo45)zK}ua0ZYeBU?`Rh{x~Wg`*)_ej!&4CoA zWkn7N&8$_v?ezM}@X0MT$BWSphpew^*6902u*<$LDWIZM_-*bw$m=QO&r2R%__cbW z#WgmdQz-E%2ax#HClSz63h0grl=vhHC`Q~&K+VpNGR0*a#%2{O(3;KO&=#yWdm>0b zy7^(R=rteLw#hpG@$lwuf7o!7`Rn>Q@93|yS^rr_)+vbGeS?}`1~u;pNRR%TfzKm z*T8(6Q^fo`)J4+`bTqZg?@;R0FQRA9Qe8`4`4}KD`0STwO;V*V&_f=K^YPVEgm+R~ zfZAe!+7fi#rb;zQy5!ZMz6aOg|2go;Xxz%5OM9}u=kjug=eM!xq4il8bcN%Z;q~|{K~GR{k`$;AE`{Z@zD-j|6KX7+G3jl7L8IOvB_QV*1!Ij}H6PrkXg1ik~wW|tJ}=Ea7?Oi8Q2=e>Z>GkSx1V-@tK zzRxcu=X`?Uqwc~H+pe6_9U$;n3J`>xTIcoW@TX^6f9>H74R9x=@8$!g?^_F{k0qs- zSBQ;Fx$^>1P2@09-Hq>H3asgNx-Z?f^tOZM_g_Z0El;4QX22uE{1_AVh7Dki16Xfe zqmAw`348vbtx2)2SiH^rS!-+Pmhtl6t9ulE-2jHwgJIJ71ze|!W0U>WHtdwW;5aqy z@qPLdBRJsp3T*cg_RSfiAzM;VRxZKB0=NDxe^OL;+p zFp~r!pS+P-5D5ZqvZVXKgN&qG>z6m&bylB5^SfaqG0l`GxeK57zKUzm*Zam%8xTkp_jZbp0uU$PD3jJf3$5+Hlwxf#)};AP>$yPoxW6B%Wop_~2L`hZ;Q{=j46 zodr9O?Sv{?9wqj&09Ox1YP&ybw6J-)MbFq=8N+LQU67`_3#ymE#Ka& zqX4fYm}DX1Yd4a<5lqUd!WB@3drKbSGCV?G+^nW%>9%a$T7G`@U}vM1aHCce$*S8D z3CoCtszgGm#UP<+u@DJ!eZHm{%TmP%C%N!33G=<){7tX*T42K|an-7dx2-x?Zk^og z>;Zec8g?NmD~%iPKIYc$xDR1|{oJ*Um8OR-RP`&XacfA2i5S#o^-7b!_kP&BT7Y2h zvDbi>=P(buQ^`C?m7DIjicZST^MHBS@V-E0)C1sK$^ZD~mvcYlgbb#e{^%3}%_$n{ zpCp|BMaDJ8gmAtH#?^;FW7B^`Llr=?9Rs5o*XB2WcBoq9jYFKM`?94aT$VXRmEaIl zafmx~2>&#$lLrprk3+1)A+Eupsm9_ELzkc&{<*U)xugK~ukUXXlaD@Ge@ULIP0had z&s6;ux?b3wMF)^2;8?Q2i8)Vbe+#t7W*y&rP>v2TgbvV~%uv?@IMM-3e>ea}J~NaY zm;+4a^70;hr7TD=W#%)L8C_tEuC)j5Ewqm_ydd9)kd8%EPb*0sj#m-T!#o;Ar@{VtH36v$0+)HCnC zOHd^(&o#WyHrr0TU3;74Jd5O98*-kjVRNFAPYTO6^j$R zA)#lG!p<8&mqV##H#U+Nt&!OPnBss)_b6-S&nfoyz#h-^?Dh`siyt#wG# zg)eR5byNgysboAGRYX$y7-nYUvJWGAm#rNn9TK2=h7?HwiWKwdQ>jJl!;QzCN8_7N zVP$&Qu1rZakvq3|T-xitDRi(FiR+lu(ZPam0R%*UK}08^4J4k7EQ(vQr7$9o{Ng^C z?`QCfF6&CQjAqb)0~0EE*&QcvDqyVnlS9~TVC+>aohtR`r`ns@wy~{^0oo@58hM@W z@DkODIZ>k)fa;7YshQN;;VS)EN(3DYZX4N>~D6HDHutf9_~nXhA5+TU>uZ&conH4QI7 z7I$~;!llsNS-$^pkon(1a|LohKiht9GU~Sq=IHI-N!u@>?OV3vODHD=;K=i%48U$< zYBCldo5nc85BaNH_$-}|sn~1JxA(nbq_^zRbr?rO*0kZo~tdD(PV~Z@L5j zus6lh7iB|w%&k>>AXvFMHA53n^YptBK`sGQ4NKnFc3**dv1(*-+c)>spDFxb?mV5* z|2TNjlg|qgvY1sbQsb`PAS^8*EGePNsrx~oQkV}LNT8(e5|(zwkwU4ry9?YA#a0UN z^OnI+9)q7Ngr5W#jlyNX&udTM$Ao5zbgePX=31#Ode8w?nk68%FNiI%L-xV}4*5Ci zne#5U-DAl82+ruG+5WaK&nn(UJ#z2=p(|uMVt$-rewAWAnPRRU7SG3%NIXHhrr85WRXmb#xikm2Ya$DvQk7=j_LPDUMsQ;ntD! zSBwywZnrArY3>ZkUm7yk1erE8iCkVJ?L_4#C+L?Yd zCWt4;LB^UWU`rJE*vKdlC=g^Qq&y6ajA~QkM&%O){vZm75CvkXMz2JTrYhIN5&uBf zjA}VKtLw#$UQ(GuUwCCMK7w@i4memL$i#Fe6Ze@+Tmuozw!3J2QRN*ufLjwuW?V6W zMgZ64GsFBck%!DkZh!a7THZ~{JUGsM3G2~8_D?_uDKk*7b&mn;UxGN=U0*8OPf#fK zE+WH`fBy6r$>Aj^3tRbRfsAU*?r-lDchsR;_r}+i1@eV3pYdq?xPia+pAnUGSt zu)Pvn2~4SucF(QX38#M@0X<}p)^52!7{BrE3`8_2fe}&a9HXV%cD5N$#Kp?)-{@8h zHv;(Y9MB;rO|vi_-j8b=n?T<0l=ovTBL(z4-D$eOv<@#oi`b=nIZ``Q)%IQEa#LdC zkiRydv>6@jDo4Y=lv?zEk#k>dz)70V;j(f2;791vOS90WGyS?h2yz(3z)X(H_3(zu zA0HL6>%{8CsRi zbrMA}FhNygiUC85$@4)ADe8Er+x+DVsGTHD)K(Qxy8uImiZt;H2OQB0FFp-wE1U-} zrV%d^=?g>y8qGEnwYAV@K1UnhfPq8M+hSA%$J3-vBSUdTvJh7!YZSooTPOylt56IE zYcs`QBozZ*>!7w!*K_ZN;@?0)Ek;Fd)7w}l_sDwxsFt?!A=XK{HT|6K`FU)A&mEwY zIYa5xlMT%+yDzUE8+7!^-mlI(_IXad^0#qw+L#l^P7HiuTlMIdGP~-Dx~fHMEz9&) z4xC){m-m~xn{wC3MufbXbG_j7;2VBnNBbR2_I#4&bm5uerZv2*nKwh%nBU^*9v}4n zuW7?hijMC6aNX7M%k-z~tH_)zTD(%J_jFC}jW>&0vTY{R-Mkz+_rvqED!fyAQ4cIV z?#(ncUKrh?<1^TJ$PCNwH|HK4xK?e{1)jtdS#gnNlh+Nd`StXf+cz#vv3qmWSasit zv!k~D`FYX|QP;ZSqmG-Omi^u{vT2E}NquG+$0nn%g6P*HnTjQIoD0TA9y&H|>(_B+ z+7f=_N;9U93UR-*V}s_RnutMr*Zq3YWMJI<#QbWGT) zn(sy?Ca9S0-=71-?d*oPY36m&-e5F zUDxj~&-K34v+upvT6^t%-_E<8L*Jmm++^xp`%7^Byug=T@!s1|bU~E}p2pdpDvn2&9XY2n zE7O8i3w<5kXPZt)Bm`~oRIwfNWZr#&zTFe=9I|Vk{&LCwFPigSg=w1cPj>JtDdrb@ z@b1ckD5-oWZNu#165n&nWc{xUZyNp7X6>*uIjYJR)@;?_A39euaiPo^4f)1#T9($5 zo<=KD3dSE>)h#>c+;CLlm=ivsG;6Qz5LDwCwPBa`TG3OZ6V6?|f0wf^NxDgm=kEG4 zz6j1oG+&-&$pVKsUO_nu!B?`p2qct}q}iUp?m>bkkg(|88qT)`v%8l`97 zPYpvBHL@17s8QO^riNZouYsu7yDP!xPCt?^AHKYD$|2*I&Ohg!_VnDKBo#uxl0*_D*=}Pp_0#}K zRuH`Ibh6>Ki@_D|P2j2&xROal=;p?RE3-`~#UhkKtoGy`)4g(yla4h@EKOc@ygY5* zyY~e^PMZ^64SpIWNn~@vBRF zzqJ^tJhV4x;?rYEcQ}-V7?s)qm6AD2TpfR9 zr`I5$TCS#f|Ev|yucD;kS;w=*>WbG`DjrMrE>Ium9g(D`^32`E;&}YKT{o4VX>6G5 z%6ZJ`TGLAJnA^sDe)tVKe0-NfI8_{>TAnpRw@p+|Z(d?yk#I{yprqRG`ue~r5~dbk z^X65w?pKJOqVTF|udafgfBCd4v#g(q%s?T^ZkCL}8s>)2ws$^16WHyk%Zob64j_T`hDPW+xczB`UDYCc!OSurGg< zdD?4z*YH!<^f|^2+pyuapTGt*zwC`*ganW|&h0ar2yekcldScVZ#(AaTWnR8zLq7v z>8kXS&Dr^H1I!caoA<9+vOg|(et?%rZhXEI3t2O-%sQt6y-fqMbbzd;Q!YuXDjKwZ ztZa0??xR4$yRH5oMBzMr|0qPuij?;)OL?b2dE@cjz-p81&ge(iU@ZwKEmSA*{s#CP4fEWV5VcqEbV_JdOF?YBXSuaD%p zY@6PcAG)hHALMcTX0*^sl0OS7@V#Hd1mRUKp>EcDSoLy7y@o_7Rf@YDvRM%Sb|Jq~ zwF4Ed0PV$^=MMz7x0z3Fn!9r3d{YnoXJz5tVnPtsBgd`8gPZp&@J4?uHBX%XtXf5C zLTkf~gsEw{*>#bemtMY>5ZV4jDM3Z=+ufJbwnts_S01K3^hNs&_p(>gUTJ?zvlq_$ z7fJJ2NV6;>%{$oAoczN)D3Q*#(6ZfnI@y*)*AWlOAAg86ikI*c0T?f<`n!6nctGfc z-mK*vwReSo@vDw~*Q33A?Zc|uQeJQ#94Mf7PjV&)OX&t!)&zFl-wbwTfL*aHQ0hS= z1lcWA6D_oDp3IqEv{3GyXrVegsD=6uv`{&wcP8rY(l!~7-YK984-*dzsPPa}!^c;h zoanbFvqDCe4B2BcWWJw`g64gH7m#Y9$(*aEX4K+6$+M0~N{TOJ902*KPR25GLv=K z<@0ZqI;RG$7<>v^vFaviMd*N5EMv67A6lVx>9(`KnZ2=avhs=$8kPjIWNp(`Wa!n|REf;%h!%mR*yX|M!{%pe7t- znBfIxT)H(kUb?B);m;#oB4acEzmJr{K9bbHkzQ$`rQPY&xFHZc{pIN=<*=F>cVIOq zT8zj$cV|Ru_k!TX36?_1g2J0}{qoeendynSM$U>>sP01>QdWA2anx8pIZT;y``qPqCZmu}_}|*tW&b zn5h#yb4LU(dhE)6r*L4(AN0|_3cn*zFra@;1Nt|b@jDHX(7#0JU$OcCeM;J^w)Ir? zMO`=tiJ0^a67ko7dt*z)?44r|ckYy1ghMVCVh(v+ABQAWFO5hgFK0AV$O@!P~7`Y)MmK)O2bN-fRm_YiTTi$U` zrPn~xb4K30#^Do>my{1bVHR}m@{+1`XI-3ct3Bni>^8g@vfYSHuWJMJ3Sp_Ja0eS7xjpVh%qX0?SonVvyMZ~*TNRSE5*ac@J66h&6h)`a-Pt7P6zr|OkU}4{oDHU)`Xn^l9zS-^fLGh! zd>-7=`0}hi+ZFycvqxE_zV19z`}JXycI`XVthVlq&&ZPMqNURqx_tuO7%w=NhHiA? zp^$Z^TfM}<$L>~Fo{qnxBl=@V&Jmqin9oEq(i8o-0n2Lk7{1-wB(#KJdG2>uN)jx0 zTxVdp6|j6lusk{d%Nt33?K^Nn3p!yYPG~^G>;HVhD=PgbT>C1kuQ^>%e3xZr`-70a z6`_T%d)=~eFoB+rOLr{c5fFLcTK~vQv%P*im1`L&R)9aF9omksq@hm|1IM*^wYleV zQy$B#{gNsmMyHw&9Vs4|WarChTak}?O3Q^o|ML+`=!mK{g@;R@vxnPD9tT!khlA1~2gUJ>98^9W)B$XLrd^342Nj_PTs~ES z_1!@pUXna~ItREnpZf9)knKbI0NGY7*?jr)K|ap2E!Ufa&Dat->tx>XzaWvSI=Zt; z`E62j8tr>J<~D9vQe)N}+w3CBoK5HOM|r`A#Z%=bxU{Fi4C}r=`j}I1GTOjmj9Qze z{%8XgXan}Q(FV?=4XAw?e%j}Kua|Mjq$%a5$rUwwja38$K;?6ABzKh=K9Hx1_%on;+LJ~6F#m2QU`-v4r{J?^OC z{X+kL99N-XY2}ZN(_>B19k%^fclZn4!Q&0r;zI(h?<*9G-T{f{pooW;V%ns?f5(9BA2Kr{jK#go}QfA zr=+2X(+XvV9g;>PNwl3Nt>QkSGKQJ+er+b~(+;KKE)T=q2LCI^OOp8)X}EVuxv$*$ zru>W(DUuZJ)}TmiXLGJ>q@n!B_N~g#*3!_bNJDE54Xy6X&}xcZ$m}H;5Nc@`l8ils ziN)%%!n#);IPaTI!u!|R$(|8{m=>nnW>(DRqH%8}t#>X}q|N(Koq#nVH5XZe`2!qU z6wPIszd5a;G1)1ri(IM@v~+!Oa&$*^(~nsyWoN2yFzEjOlzIAG z!Qg08R z5hZ1_s`zA8pH||f-;k7YtxYCN+!I~R^S=TgSm`&kgq3anibX*5EYEZ&FE8GV52A(`l`Dm( zPpJJ|Rn{9+{VzG$fIJAIQxBraOb*s@#ervsE_{a!%;4&>^BDcdbjIezE*3AcIer*& zFNE^#BIUb;)!_ZWz?k(=K5uMx?j72cD|ztajCTXf_>WY$vQxqAIlW00l1UY+pbE41 zK?;3Sf5FG;P8xQ^5@Ej7J0ja-AFe?vIMC7Aso=cVUB6)7#qxyJy1en54Wgwb`8$2F z7wwjrr`+3ns3~mr8sdoDmq*JSm5L6p34Oc&hw1tcr`peo^nM=745<~A3TLEZG*V$E z`=@i6^5u~S0b)l2TnhoBjn>o`1hJ+LKqtS1PQG1X>Y3O3j(<)MSTkgI)x=?QX+XG- zI~^TIa$=nr70y=gx;dC>2Hr7f1_$9?m@U!=b9k2n^A!);@B6xY9{yD8MN?SaS>fV* zUS;M9lkM49cCEm&ORJDN{!Q5H*}438TxKrZ>d_=Xi|yp1A%p3%WUXZ1fm4Z>o%MG0YkF$>@J2;4akr5Fu@ zEPDzMQ2ZDkVD{Mi+6l^4TTWSLWB=gFTHcePi~jd?RM6HQ|0l=nbc_WKM7+LT@;mZ+ z-*9D3YKoH76!FhQN2`S5Q#8hlBMdiGuYnte#0?m@h=&9S4*+7tg9rN-&W3XMq!LLa zK@xSvUrFLeOQd{jkffyyP#2MNgg!eLd`5K|aAwE`BT-zClQbpE}Y(w@b_hnnp(=Ov#vi zRcgzz!nmUs$+xP0+dBa#y5`%(jcdL#3|6$hm_PMQPSXPw$M>4zgOk3FI_3{Y``4?{ z=0TGu2TZw=2J2T&)~}Y@P%TQunvJ$unuNFkd)af|D6<{%d4Cfz>N z-0sYKvoXi!sO^v8+v+JNIg}F)GR&^bk)!ed8woO z9YkM2bJw*GRlXwFA2jXxvHE4#@z$|LqD|XPS%daRSSeL0=*UpW={)XAz34^wu*%qE zY7ZIzQ#m!ZUAN9|?I>Bf_RXU?oELBdbY;cDbLK7f297$D*&2IR9G|S-zP!-id9Sl@ z6irtTQWyvG!#}>C9)Axwde-)BTyio#h*1ZwVEYaq_>RDfn8vaC3NtX5H7xBtUu{40 zt|_gP6Yt`lYZ_goet_JrSc2VyAg{W}E?b{t$p&T~744QEeTi zteg#_yY_(5`N^E#aGI<6u!;3e!;M%IQsyUDuB2X3{C@hf7cDkVXd&x(uS=}4C%tpT zJH6;BquCzpDm+-jC0H+`aQg2mRO-#Jq;#2?Obz+ZbV(y!o_mn4VMx-4EHvnUrt9h; z(2hp>JS@&+x3e7S>Q&D(me;=f^0qRt91D!?V!!xu<~|Cx26Gj+WMiL0?|I<`l&~2~ zHjBBGIhJq!npv~xQru`<+RnzMEwk07jkVcAxK^G4O_~j)l_pHO?bA*^E)hO%cy=X! zZUISB0$Y+E7MABOE&X=2wJX8rkQw!BD`>Noi(PkU;*;LlC8x&-i(J~#nG}s7jAUu0 zyWyg~Wls}UESZE=FEc!^#qj)07bUb*-N$zz1U+l!0+}p6-(2A`vSs|Dd|2La6D zE^wv43Vzm?054@&eddvyQO$T#qrgEib-*xSLO4pW!r1YG1pleU;XseYqkjyiM!s ze^^fM%DwgVrbY>@d`WLzhoY5V&${R-Q!_`xl88c|z1Klc|-g4cyNkdz%gU_-&Irq=1=G9|CT*%J<10eD9T16}5e< z(qS%dw6Zf{mP+CyY#w&WOv2!|KZHrmGNW4MA6D^nzcU~9=3MIH!`kj_+}tGGu=WyR zkG%a_xax&-ze$vwuK2an+w+b(o}2S5MEx#qJ|atT_qLj~PkKUL$BKG=L7SZBdT*hp z^FFQYR=o+wr^4QOx^?3dz2#t5pMlDE6jvT^zWB%^2+ufV&Y;E3Dum~eN$5%1RJ<1J zm~7fvXRvnGZl1Qi=fjSHTT!z0lU>A1@|{32ZBQ)t3n+GfzoEb4)Uot1Lgmz%IlMWJ zv)0&lmpHWCZauMu(e51ZsS12*m1pNAHw}F9+%rvj2!yD>pL|~ygy;hak#W3>XJS;w z@)g(hd*{yXzh`)l)#ziXi!Mg4K+XC)zoI4p)d>Y^d@FI4XBrr|To@z225yLFK0wcp zPCm4WXa4io61sxBXXRa)+dTFT8#K%fehaqvB2c7$(2NIY&gT$l64{{PB+%3nXfy$u zNv-{J@s#KB==gs<0ccvv0Gizw?aU@viDiuOfAaj!R=Kze1l6gpkgR-XEn{`_9@-SbY&zS0C7b;q9^;*%4gxcM6on1~aa)4dVx9MxzkQjh@T81@G%ZOu% z5Yckc<%763{BgsR#W|Nn3!<@4VAN%V_|%-@S6%U+aY;P#xQf}2oZQURMN6LSK4@S( z6sg(qO|z^xpGgh>*U6$xUi?XM6W_BGcY7@5!#cHNteh+h8oAQtN?A@*4!tLV;e*S$ zVhNWs7z-oDasHJr6Bxdf5MN5cm(YuL)_;{~ReLF}U z;LEP=EWw;t5RSHZ5{^$(n zC(fL$M8o~0hr89s*jCF^!&OJa)kecDFM&D?4Ga`t9`ao)-|uQd=qa(f^7W8V+8ULw zg}o~ouy=#tSLWNnugn4n#0Uhl)-Wyi0M3@_TYRR`R1 z2K})iTfT3J%RP1FZ4d2Es`MyyEe<|VQ+cM?7kl3^g&OSU3bsfp z>hgV0wR`W6e_ede?(}z*)F$zSmXAe=W1Lp#t)_UN1hm#G^-_h4oUkKI!E{p-a0^Bk z=MJTFN7A_)Q%D4ggqq$vFz40|sY(;dG*cC<&d86ZBTw+_U9Dz|V zFbG3W#CB#ke0l9IZBYUI2QF3{Vd8ne0FzE3&BH;MhbL~Gal)h_9rLg>&BM8vhp()! zH|%64zYEO|lti+GnsByqh#3ADY}4T~nB6mTJaTMHTlp;~r@&ktL0vBx-aoC|)cv0C zfKzj!NGWX(qe%f9a)p|VH5mnrDg0Wb(SG|9I|*bsja^v|!dR2d^wF%)#6MAHBYsi?1{y+qA`LWzc#Aqhygq~YtAu!ZK#|w@rqAWAxh7ot zc2d2)cI^5$%tZb9+4YyB`d=ET|B50Z^~YrsjJfOMT4TFwW9QtS zGP7daM+zo(DqjKR(nTr;U-`mb^ey->JU{tg-ks08MxLxOC@~^Hze0l z+Nl^zN={RlHI%&n!$-a7kaMv^g@>GYV6)Z@eT|;MUfRR%Ycgo^Wmo{|uAR_YdnqCG z&FcKteKb^!i>s1IR!Y5BAcQfDsaq|f70H6k6C z0+GH0lD3W{Eeevhm9-x|eU+}}CVpm1+R(#4#IMvEbPN$Pcx@h~11Xzb0V#`b)hyEk zZAG%M?SDhC?%WSpKRvdvBEugi_DQ7^2cfjlRO887?b<$$_8pu;>?Xa+(j;_<{YyWrvl!|s6LnXEx-adZUsqzFU-L-6^4a=z@elo4ABsy4&zG1D7t<2! z!%o-~`*W}%k!;Gb5SI@z(_mAs6nyO+_8mMmO4<3qn)f~Yz52a|M@P<9vN;+l zK|Zw(M_WvY-E!e`nA5XEKf%&k3zl<#WyJnaf`&ZXaKustOnuc@AtV~JTGMtZw5C*6 zYf}G+o4V*ir@M0ft;tm&*vmH{5$%`vQ)Jf=6-Vm1h17HR!d4ntv*G!_ z#3`A{S_kymbA_1-S&cWugFOz+qcu<_Jmp1rO0%KZGO9$-ma$~CxnKddjI~ABEhf_w zJznO5xnE|VKF4~|mSk3#2Ci=kQ?=pTTu6fgpg}35LEf-c2S|g?H$j62D9KyE^w^`& zT(f(3^E7PND4bq**6RxwXE~`)@&Qtx(+ewHkFNgTOHC6tBFq5AXsi5$R^Hfz(ABHS zIZjPmgxm632~6EWxN6`TRf6f#hpf}bd!MJdz&fpk(i}lEdEWBJDMt;Q(uGd>>fcUz z2B%y(aLVJKAUqpLcmzrjlgn7XFBh3SbxHsZpZz`>!H=`Eeyk!UerDF(9-g9`k!pUxq zJ5%-?R)6Z6YePGx6xNQ(H<%W1cx0(B-`I(Fq}JDdt!pq=zSC+iLtU#YS?a0`HsYA` zAV1RLf09y~O-g1K^nO~?O!GJGcb!&Z59ftFTr=%=kJ5g(nf7p3u!k$H+vjr3sqnbe zSw~FBks+6F!GydaqdetMp1pX26saawr>kkH*caw;K<)+03AK!a6DlAO-z~-4k7#No zuWZYY(!bUSJWH))(ce~hy)V0!Fl$DQ>gN3f9=w*Am_lwi3vPHnJ%xTuZg@Z3FthT% z*786}S+PEyf5n5ldG3=yPQTOva_SLyKNJGISJ;@9zz4Y7_YUZgN8x*X_p%)xhPt@4 z!S$OSqn0`u_`m>wRvmj~;esyWo#rebj{~J8dPSKEQfCuV8eFLvm^c(~v}S3{$yWRPHP)$6fT#%59*^C8Khy zsB$q>xh)m2v2tYU{&I(`ia=He!<*|Q!jiv(Z;PJH>VcEc18?km+&OXfSkY7wmk}2Y zeS(2Y`*G-qX~1O*tulGB%2Z~};ZK&M6~>dbF~d~C!&sXv;PD#ak?x~@!z2Cs2*%n} zkhQsw2Y0mJAcMY*40KF)fGO7cKZ`ZQz9*^gAr7K{OEzozf{Va zIHr$lHHKqUsUt9LSL$Kf4%~-nn`Tyjs$FL=!z`8oE4A>@fA#K2>zuWa3e`eLg{U?m z?L)&i9~WCk-}ao`VS;rveIY7zAY20%soTvU7V(2cKYwS@Yp|&FHx|+4^a%^YHK8Q`oLgEnoM-JrlxB-qrx?|e*VK35 zYwXvAqce9HrwlEOUEz$5mb~~~w`Vut7!iOy>8p~1-Kp!&Lg#;82wXmb^$={P3+_Ij zq+aU2G{*YcBenL&}o|)&Txxrm)%SU){%1Sr!HJ55` zoTivHmxse{anzBYs|3X4&h6biBzs!7|B?G$m;Soqy-Z+YOGeXSz0Va^^2Wxt^QMiF zyCm&%GhJO<^ftfY!&f`!6sJxPnclNB?89QI8q1x_bLRWkP5h}g=f!Q_1r2I#*>$3) zc>GO%%;%Xt_tUIBbti>XFrv!`YqN3~a((q8%>_l8Jq>&s0R-hJ??d0%I0 z*t}V%mY?9>*wV~~pN=3)$c9o4d=Y`SZ-7Jjth|&!0*e z74mwkVZ;KLmXSmJ>mPcbxH)CYge?~~HB7jZwENV7LP6V8~Jlpt5SIE;iL!m7b~jvWnN!yzr7&UlSepeqlx!@ zoFLh-ul-D0UDZQz`-V*uhD&yo^>r+l>`?!}Z}2VX>k`S1#x$GlMV61gHW*0ySn8w) z@d&rJG-&GIQ*5<-hl3k5SABUCIiYCV_~g}6p($pAM3cr{j=#Mr!tO!ioWwfqrOJ<6 zE(XZN-H9@F(rg{o6Mp{FX17W9qD~feJ{p{wx1A@Q95>N{_uf0IZUPEJg8b;uU`>T*rdq>ot%0o)q3 za_KYV+W)dQ-*@Qv%kC%QoSQ>i=W*P1w7+3wwJpL@|LW<9SCqvgg{>t^IE`mJY7dq1 zj66MrzZ)?9`5rJ8E$qIrL?ApnN|i&hbN{G>wPU`Tn#ivE%3~_^eOr`vr1*rk^K~xE z`tBK;eN5W_{p6Y(otwr*j@~gddqv*NmWy5AZTeQ&^cFa)h#y{{8-CoS=M2E0+c~^` zY3rdJa~H*q(M_HCKcFLSS2G@;sb+tfoVxsoh<^MeAG8}3bO3R1G$UCbXbdZ0|w zD3jQ2lxYr?=^d*~%~Yo0D3jvzRSq|njg}wRV>D1Ep9v__=YcZaoAyFNY;3B@wEwA6 zHdLva!#=vz+Vq z^WeYq7&#iuia!2Y)}<~#FG_0hK5#8DuEA?hh^17gFsn#fC=xfjNPNE)$&^*3 z_TNR)N0ByBk(}DI8imL4M_B-%R?tM@t57#Ig91) zj5oFaF#bSc_s=Ak=n;O8l=h^h_U0&+RK$vnYRfPjdstP74z>x{%;vIGNqkSdsc+8@ zOa6>cD3g{D-=T4m5RE>Oy}{cQoS!^a+dbWVw`r_Ol4IGX?FC^6(_d`?Ktlqa?}iaGH%0hzMdInEmr0hLJ z+!%56M@QSMVv?#+g!U?(Z}SW@n4`9Fi#(t?=H9ti%CC&Ww% z6!|0gTcAkPOm^^7E+BYWV#mGD2)=DvfADT2yW5gD`TJA-aeb{^-}8h3`x$$MWbB2U zC*HsF;E~GC-r5P-Zy~=7uNM%npRICmUkzSoCnT?!?)Ku9;%K+;>Qjd6cf8Mzv2P@) z(qFN_>*7kJ+=rd=U!qQe1RwVLCF-m}&7a0K>9xj+swI`K@*d}_S)2ThNqJyf&r#g~ z&Dx`T#4;U#{FITFDuLPBE?$F9_qBO*0r2wZk0G9K>Rg=KeNVFiKb8QW#s<6`054$< zz>m@^oQ`m7 zP>|CJ0q3hfCUKl(1+LiD-~KsHZaTBQ=z_~c5vOppLr=8B;i58p)>%V0lIH?PHe? z9Dlq0lDz9Bj*yeaafH+1<-qBA;574W^aXWE2!gud0u)){_afV%$Qs0{vEbA{i~Mu> z&CIxXD)M`+ld=@xu96=kGKaGRETaIGeO@QFe4Z&hkriM{`M(AjYIR9|X!L&sc#8rI zBNQ?L1_OnD3<@bg4x*0`Oz7c`KT-H|H3GcM3eb)MH2y8XN(8uPN{)+Dc96QE+nmsp z+9`D#@)CQbwq$rIFahChi0cr z-Z7clLlCtG8Gol(nt-J9?w>4Q+_7quuer=`llXU%Cn zE31Tg0#Nt+Z^Z;DQ4!|r4Z5)Xqg`MW?E?ZmZHY6yw@86>VI z!szKPA1tE)BsR_%n;N;0-24`}d9llI^Ja{j-~1^hdl|X;qp(d%-MjZW*g0_wg}Y7M zMef#JcaBVE;FFmx_A@Vz6mf!8nC5Qzj~M(BgO)Wb23LyV${#VTXU8z&e;b1-#b8M> zXsksHi`X&z+*Qo4%$=FN?fBczw_=}NQ0jDRzN=7gF0rerA@D`XLzO-to@1TsnoON? zgXnST7e0q_q{pM9n_O92AUc=wu;o8u2&X*wQyyR+Wwh90VXSaxysGyJ15xcwFzx9A zUXM7_jnEmK64;&L6&drpaD1!K84g`K|4CpaI)kb?Ili6o&XYLvJmp87G}c_;l?Yk& zyhVVv2IJa_q7MlzTDmpdg@o$~3D>< z{2nnkfpB%hVZ}=S9>`Lf5H2GS4Fry^%Dh7pbA?x|>uuqmWyF+D4qpzBfw;`bNWDqQz?xMpFkjINV*12Af&j@o#lHeu}A2$7J> ztOtbzL?Ar3FhIJEl=vxnvXG2Y0oy2^XRUS=g)-KNh$8eRYY6ojf8nMwLrK?Mzo~*t$IK|Y>cpmCT3f<2sQJ9!abAs|+aEgoE)(di5 z3E-3opqvf2DNZOf(jj`$-A&}oAs;NYB$JDFtGV0}(%c_?~9!7yzuU=h$ zmQeY9qW(AWFzH*g_wd8s!w!27Y2S_4Ob5aGbGvT5Hgw0ck*`Sfo|5SOB+=Us(No+n zkm~w`@DseUORj7&1c$)QeIEF^$Z%Z~T+f&QuB(FU*>J&Lh47A+KozSl&o^#4yQ_?WQA9hV`w0eD8xW=L+aD4Y3?&#NR+sq zXkHSi1+u3gE6>wGcEw<%#~SGw%8*MOx!U zSBWyihs;bsW`@4UXta>6f^H5_!Sow&kz3)&Ic+V9`O|i=c}~j>Z(7Hhnu6z}i09GZ zIc)-l#+?6Tz-$8=6TtOG;`;lQ3%u;V7 z;IyHKaioWCbuN#W(W+zhK(A5;ucU*(t9tY*6`JjhoY>W3rkJ9Zi~NnRD~gZ89FDE% zcZtJYVZwSYqiRR7u8?G$;&*T?bHzz^Yz^$#C=}uBuY(M2dOhr@V`QLp7~4(mK1{&w zj()o{a2bGo7r^Fr>r(fBJK!Z=Mg!Q-3D}bG5}7&B>cA58@xyHAYHD?$(Ca+pl3`5&=Z%pDj7{9g`_UBi@J!wMCxq5>`L037xNpxB6MZ2 zJDVmt+YNLP%Ua5kHaklzkfnUeQdIhHtn(nGZ5xg1Ho*sYEnxrKaLId;<3n*D{16FmZ*)GC?iYLEzw5yqm2xv zHnI$JoH@090TI@y**_cIRrIb(yWx8#rlI_&Qs0vLM{m0))SuSoZ#V&s%5ZFKT9+BP z5<{FHywI*^-D3qfA=xS9gy6lUwd6o%tN02xCQH}=TRj6zGr&}c2GiCq61NCkaP-~7H}!ePVw!T0vr zCi#SMEnxf%T>T$mWxFFR?z0GMBf>iQUaw8P;UzF0+H@=Sn=<-Zp+wPvqZrp}%z*Vv z0F(Dc5b-FxztM+V#J|k#Sm<(kkv51x8xotqKMPD3>P6==`eb`uI*uT?$3bBSmp>-6 zBu$#NJLQn4fH(w83{w8i^dCq+*$gXivX`9n95B!=8A%(k0v+Ktk2>ACfG}@A$ks*( zx~vLhi$k4dnK)DctaJugSpyPEavsVCR!{ik-Y|n^zpoAKj%N*6WnC}{>+|-Bcj=rN zH4At@2)rLW`w!)kd5b<%oB+Hhk#cP=h?E*)4I}pq-6gO0V_2`p*q?R5HutPeW7A(@ zYupbtW2h^uL6aU3Y7Y+#4H0`&7b6 zQ}1rE2{V~@yU4}D+q|zuliLpi+eVlKeL@^B8*$8LV;Be!IRbr6BL%p(*qsvpfecLma_ATxTFx)GKWM(3n1|%LjZ1GSe@!;X7U}<5{ zAM88?J3MP^ohkS$+*Yu#YEKA-dRw9XLq5=X6e|rz;Qa5*kCo{9*x%wKyk@o4>nuNj154ik=aZtYK7jX{~mtR4uSL2`nxny0CNF z5lG$q&qLPL|Yr;LSH-oRM+tQ8DX#Oe1{Uf^QJ(!Gv^i{jkR3TD9 zM3KW}P75I(lZR*yOT{jZw8iRwFc`x96ri+2TjkgZ7Mr7EZ7zVwe1o>Q>$30~Px<7y*UPL0@!8Uv5JHt>z-I{*MQzKf}S;OhdiL?z6#LuMf;>V zBatS-le%W?u_wb1V^1KPzlwpdW65Zo@(WpV@>CjN6O@_tyeoLK8WfLKQYdo~L8d@x20=`}WU#DY9kd9Gf%}%B@ zsI#_vgJoz@aEACgoA`PYy`WMaepMCu(t7vubqx2$&#htOToB+u>*kf+JaeReC+R=YzIcjFGtz4@}Uf{)mJiG@*mM#1iC% z!f~P@Rh%7;%r;7p>Vg+9CtYX11z;yD>epmhP_j&>^@oNl$$`*ZXMbtI(BVKp&jiqQ zZ?n98Mid;ZHXJOiI1HALmS1d1a4M!L+8heM~ zPs?BT&DXj!cX%{8n%7Ig6GprBHE4h4qT*Apa%{`LFyD!KN zo`WAfLEt1BRFpXYeuDtM8~_gmz<&bi$tt7e`#oY*9yO^sq_2OWNs;>z_=>2Sm8qMP zFxZeVWU__9-2)1nKJ4<3g}2W&JRxWOIJw_hX9h7+Wvnwt#*HU(Gz;JaC+^Gl@(k$u zj6`Y~6;hoF370561BGm+LMl;HDh>DJV)WoX>A}D@D#TeJRaKmvPXOG|d$MGas_gKJ z5Z-oL5ZRG#Wm5x4pG@Ih_+)+YD9vbO&SV+6SG@#lEuyybzhWbhSv9?OwQRG7jeJM5 zK1`bN3f9Un(glJw2SN7xSSH8;(dq&A>ybE_S^ulZjL9~-4n!t~`v647g~<3=B>XLF z6j^dLA`>t|WJeI07Ov&~caiatfQ|+5+i1jYM?aT2g??UQgMO|NnBCSZwsBp^DOqlk z2Oh`+gZT8x4C3#tk@TsT$NHKNWCY^p1;7klEQfl|A{p&yeIv?gO|t zVnC{zsxY)o{|M6w>yRbxqDUzfLt1qfq2WlAtHr8QEes#Z(0L?jJ(Af#E2IJIxcM3S za^(d`yEqD%NHfuF(D2+_l6K>HM8nk%a)r_~M=C#b8b(8Z|3<`(#}fVcChX3wsInmf zD{TWhr={>2>0tlxIB7(Y^eOB~LMfIK#PS#;Idy3vkh0NP$4o}1Ve-M$R8>X1AaV(-`480M_#x(C8gRy>T zF~*80jt~Q;|NB@#u=)6D?diLN@Ad_D#~%cI7|5XY$IrLnGhR)|&;c^O5i$&#zM681 zNT@U4e*NFReHrW9kAN}P!I*}HtSgh^vB<$hFlHjt>#YXHz&&8hPGZaiFh)&?zEqF> zr8*?OYRs43Sjl{8*nfTLb{v_!#co_c-~~xxIA(!JVV0uJVPu!dJF^dahk4*7>Bej5 z#!0#DZza;7670eusu6nuyU>cN9E=}fnB&|8#35T_M_H;EM6mEL0Q?^TH;dyNyl4|p zg$oB8e3dfDr=|oJZo-#uV)}OUz>@MY9K;Ps6sT7)5jm#QzQ$=~#;;13UgYG zbIdn8{PE3JuB-y*92Cqfl zD1m(c_4^Iz_wy1Zr}Y1R6EX+s)qln>hxn(VGCr+{f4UjsS4G%^;4>o^lFuAa?4bks z*-ZI4O!>J_`8oZ`dNKw(PRft5GV(v*8bPqc)v?0`IJ34|)IMB%FOQS(eRktD*=5ub zPGJtYFv)&z5X~alM+RmQgKK(f-V+Lf)x8rx_N=}UB`>rb6rxKUmCb4_!!?oRZPvgq zXRmu>u2C55`oQ}vb^>dFCg$drZb)z99Q+KdxCho2Mu1i3{$~&MG6e$PR%NbuQQmZzsPoccV8dSRZ`6ILUqp zim{1|kq}Vpitn>yRAKBuRT$JpQdKNa6&VdE&~W;`8%+8%P7^((Phn5Mh3?&mv~eJ9 z^3foumTiFGVw*8v=IO&0R=}*iZpZ*ke!tGn4 z@@V;8v>E(6n*zj{^^{F-`W72}U0KlEs^}36xMkLm707&yc~c*E!i?-_+Xj3*yN%08 z8sOtiVS#Tuw*Aj1E@-H|*#DuG$n1gA3H57ZCb*aq$20wOGz}3M^#gO_>B z%7QaxfeQX-76@mWHcOii6^+2BF2qss4G}4JWd2aRnc{Tl%U|NVMxx>MIkRD6|%oZQ{NvAvtTi~2w91H@F+cqp0@qkX+qH{7eh;d z(|o2APd!-O75c~tJ^2=W8tX(sBp;|*Ty_AT7|>F3Xb~K@bYP!0f%a*aX`dFpKH+5C z5cG(XYlxTPs27tZRyb|TC^_7$aFSW!FqWL0A4$;~R%1#;W6~UpnrWv}_3Ej5IS)~* zLaJ3GeWI#O_M=cF$TBd{k)=8*6Le-#ot&snA;cSPBxI?DlZ)S&4VE>WSYJ#4h)?Q5 zXmKlQfcJ_GI;;_jM3{P)d`#I8Za0?bUz!uv@hE2yx|*(9a4lwFOmvblC0oi1TbhlR5}eOTJ{n#Uogf>}-Ju`y>lyusj3t%S@Y=?O zW&a*MJZq&*=N$HqhmxX>_Jdk6?WaEy0gA+fA_=AMkDy4hu9y92NGlL35~S;lp;(ah zN777@Y{Vn(y=rWO{OOt@|;oKMTy6Js9>QfK6Y@${{BNHi$CzJ&! zX0%)L1gdf-4L$LLDOF0G7<2;vt)s$;vD>PmMl7^Zur`<`l|83gsbY;)eK2z&{!@qNWR-II71^KIL68_e?hpvz6Hr8y|+_F(T1};NfOlPs1D?>6xJuP zenQm`?w9yB~0a%j)sLDxx0+AF0g+dh}C69a&ZdHMl^Hxh$vTL|_0e$3<16 zvv6ks{o`(;&i1(H>EE0&kdv4R$a$Xw^p=9V#XMc7@IkkRnkmOs-G*#Gjrc-4|mkRG-N2|KOPiw$6nxM1W`=j1}Jua11RSD zv8SLvc~L4Pfcx?Lxca+;J8SROwyaW85-p4Q-259cqd*`Y}pRt9;-%$co4k(Vy-}RT4A~{GzO_+56FVX%Y1n&>0$mG&Hv( z4f6I>%|XnTT!Lm?2i$n88PYIfMD8oF2$CIgg)u%N_alk^fW;u*eS)s&@fvI2m^-oT z2G&f>d-ZH4Dm98~14ZReQR$JIu$UWZNm2FS$BrOt3dou?vR1?b85qW_HJCc{QJoLr zHP^VI(ZH>-3E-L-)6b0tS|pteC8vRCYVfDKZhth?wWhmC%P_>ANyI)0VmE|APy~2M zQ08F5sx|Gm2%N5zb80cj%~7;r?x=ftZCb3H<0~0i(j-|L2U#i>r^@jV6!_qOo=oib zqnVExruu^V-0(QU|9P06vvimU90ppVa&jplNO}TtAb=bwu}XtckGbW( zKfkFvuc~M0%K)6#otVwUJ)3eXfRaeN}3cwS4AKlFCNAa|!jb4u^_Og*V6yX~!tN=bw{dJX$(k1RSfk>= zj|DeQYA+FPxz?NVJys&6uji{v`oTnV{y~FY>OLcVGHECm7OvqThF@0};Zt^eb>;0i*ytE>oYtSH}@Q;m;?dT`Q;3s64 z>>HaCl;B#KH%Rk_fEn+r6-&N8a^LXHs$SLqNorE>dGnUkz8>vB*SnvG40>E=^;7=a zF!~8s{6v0fWrl6tN24F}9*>{HAvj#Fr0_wk@$5qtoo6?E`+C#m!v;j$H^JiO+y{dO z#Wi1eG^V?le&QW|Le>GnACP~Q!X20CsjPAIRATFwotsyMIW9GO(e2!5s25Jhjn}A% z&zms+P{@HDd6I($eM`6;JM_#wBmOJ9LB!3aIXnBXl0b5k|M; z2y_5Hhcf@Ib;z26-WJEfgN|74KoTnd~rc`;>BYxi#L)~y36(5yoW;;NZyTgP3F&Bz`|*-J{%e1S-7Gbb*fff>h7wzHFIll*?BBVWL`B~H)*!V&>hg;j3d<=ugDj%) zl|Lyu<}1&z({$v+9(-lk#DTAzkFU&?c$j=R*i3(QOJ#6l(t$UL7u=K>fINOP|In0; zFY(nDP5A0oTNXpO<0eyz5(dcOp8;|N9bZ^x)9Pxq;DB9^Tpxa2`d5ZsMn?hFIUi7; z1V=D4nV3m{U`rE9iS9BtNV=K zBkMqQ9!YiPCvw-7gWM7)h}`pRZdKJ~ArZk0O$KRRMj~zuBqEi5Pa)-gCMCjLceD^m3!N3Dt8RwOMw_ZlNi3OuHgIY{M^I~ zX081PpNRDNKcVz}Li)Jukv=^L4EMJRO5c)!&>#Mv_O3H3s8dR8Q~yK~gGptqmp~#nQVEnyMiRY$ zKoWgyrAbU7!C;dZ9`B}6JcA6D!>M_RtWz5kT3@RNNZ{likUCZ438(H5G1+`{8AzS7 zju4#e{}Z9@$A2Nv?TZAPB(NE)$=FI!-OoDT4CjX~OWC<1 ze$4fg(7ijE&QO)XSHOqV>>_j;$n}IPthv&a6*bMp4}v#BMOQ%f3b zyVXH^i-yC3OBw))AkEM$Qs*wy|AmDpSSU5GsPRdsJS=+F{C(r$p~mk`7bedNXImrF zm4MK&y{R4naY-YivTWtuLghuBm-;5YGUj^5{ImJ@9o$GR&~|}=?ZU+OPYAaE2KZjV zimeL@UR|cAd8Z~aERJMLJ@qU$)RSbOaWU7JVElGnjNrUTLN>wRR zrB9hU|FL&w|7jk?HMIP~dO|3cBJ|6Obl9O#@GsYdG1or_(nd5!#-#%sDz~KVLa^P* z%AhCW6(8OjddG5>x|U$x3Y4dyI?SQ{iY5D*(5iHP)o@x>upOuh*8a^_3UECkWDN6`o~UP!zO@f z0hJh!RAeXDh|>Sr{R8WKjuP(QIPX$t!#kmfqUlW0^xCCae^2BE$PPT18iK4zvw?jq zC~LLL^;UBKX)37ROaHo`=% z6=JMu#hchoCR7LoYof;sCJvJc5syNSObDu_Cd^=>)SOJ{^CtYrM2ysg*AAGNOePlc zCVD_dMsQPVVkMba2@};4M;yq+L8&9G312di%sX=AAxwmp{@sKbnK0*2)asCl#ZnZR zog^77rcoUNe z=AabCZlHK^nmm=`VcKKpU`or#Q#R{rl(Gj5FT)>6JsDi)%s?IZ1!_|M(ZJlQr4_{@?xe~Aw1kX<4 z3<#3YOiC>a)X0}q#5u@z;cc3SgK2O`Z1>2L!~21wDqv89q|lnWmY^C2@jl~=?iVOW z8~y!#8qk@<76dATBqJ#afui-jN`P+lHK2#@C$cyLI_7z%@FW4s1|XLSRJ2tn7Xri% z?VJX48{FeJE?tgXg%*PlF4UF7C=*jr4pY+t{Gt>rgYTushMJ*1Bjb|9a^G>~TD45|-VSZ49|;fz&y5ryUQ zGvO_OLa=!0@XClNAm38{2PJ{>I1KicR!H#nt^w2~dCn7{&Jk}D0iEFsEC#@f&i)E@BLVV?h7hb>IfQtw#W zZW-o~v}^6%PzF&3I}0#{3P^C%F{(cH_P~==;vI$zqfF;B3fv@xdjp1vq5u}q=*qG` z>KTCTM=XYL#KfHEy7_TB|B6<3k#h91B1*d8eH7bIxYGJM={&!!1I{|BZ>2@gZu=F? zIA_u|1;g0WWy1*SxvXrIZzF-!@QbwI!x^?8{!MD7iYFrL)PXp%;F*9uYJ(uv$e+hy z(u}3(XkAXE;tmxx*4y|1`hX~@ZwSD3B1#+y>>&JZ+sF=A3@t{Fg1>A#*D1pWo{$-V zEjuwY!e(rcbO{^upC)FyftlgBX2`h5XEG%Y9%xgB7E;NYZiNTu;DH=21;|71O$br- zeGnq*5`}0VLPUKyDV-0|BcwCco)9__#m6Bt3rrEB9zH~&ra-7ck`_R<(|-~MT#{jc z8}pdLu}DbjQ8Ms!s6Torf(o)%i2{Lv)LOzM3zVC%0~Le6GbJt%BVDJgdu{U?3v|b9 zY(tX2MW!}#M@%MFn{^ugMzwhM=mz{A;5W6p5|LR5rdQkWkKFGJ2ZS_S^@!6;^+)@bFjz9$`5hv2Wh*UID#?x zw8PROoRLSrPbt53XbF1f4Hyvd>Fb5H&qd}qm9%UikJ8~$KEUT1abXN>#0zy9GN4Na z?rFn71PmNzkK7=TE%N{E92`nKGmr7`k>Z3Y$ z)MFUB@+#~ck2B}~TCm8l6I3Z(#(R|wuevfEbtY^1MT(!`aY+X~#$}GrPsn^Mdq86f zWdXGJ>NjKV=p%Rilu75F?A;12xGb?Z_8*uRKTm)qj1*POAlH?1&qrl>R z=pm}f7n2rMl^vIQM$-bElBZBTL)u-_hYh2ICxO5!hNjEdic~xxpV+9qSZ0>#u%H?C6Mw-M0ps+i;8`ayW?V* zqMNN%YI@b0jrFQ7=P zn@Zn;J9_L}3X-?JKujCavBrjFN^H$@>&pQ#3o!_@6DebFK`6iF#fed-&N?iE@0PgU zAi(xXJmAzt1Xj|^8DTgX_ueZp9zr0yuJfT{N@$KKb^^fioML1ofKE3s%ZI&hdI1bx)npzSC50%fXgkep9m+M%sPpr>Du#PBn| zIj4Rm0W;}5z=$yvrp8L&x?k zG3Zx`DUuy;(vH#SaRM}Qma>7J(zgsGZz)g@E7vg z?KH&k-wNOgppm8ue<*VQnsqxdEsai*_*^)HwxcnUrdmusxBifD+1r-93Wglw7{E@I zj*>^cm&iML)EkaU?8JIe5)1Er^a!0Y$oUc1!*QW0aVhjiiAyh8{1^|sH}N^eagGmc zvxdZ2*afq zD$|7ko?6n&c=l!Lgf}Uw?~j_`8);UcDO3M+w-V}^x2#0pg@@%Z>x(2s#f@?{$($` z0G+~)x(*u_VP3t1-E&vYZWZZ66sI0ZnPk_4vwIeH|3F_NAh9_BqQ>D*AO+#S`s*sA zom{yOoFX7T9HR>ed1w2S$l*-j>9vT1rUjd%CYcYG^5V$6ODCxpKoN?m$0Jz%O@nRU z6=ZOj4G@DmA&WJz6M^ggjr)K$eV`lF5P%yo7mjT#gsIoL53*y;0zxV{}GtRj-QfFD}-zngAGMl?Xg{KJDHZELVbh`Kgm`!mc zALNRuDV#87eS&cq?l9<*)1EsmbQ!^GpZbP}`nXt{LY+U2?^(R4>og*cQ`0_iVhQK{ z%NvN}j8Cv_jAN_3%l8wD?AXQYMpP%_$a~AGW)kWad@Ni@PJI1|6J=Zj>Ai%U>o{^L zBwg)(3~acZiL~Zgz0Mo5Si3~KxDhPC?}#HyeEW4cI-Nx&596@jk?bH|2XNd@dnscf ze8&8Ny&uO0MQ(k-_5iOFcRHp-J{Kc?Asku0pcZ3@u<1Yy`H7^-SrT8J3dAX&%VjXp zDimi{76YerO-0g`cmAq@2{FjJax}7zuS#fYqIf7%4NyFM(*Gj8GBM9u00MoDmE5rs zcWZnOx}Au;bzB-*w)b%i%Ge4|R>Fbj#k7A0$AsCGXcbO%T$+jA6hzNYYHtli$DJ#H z(whVr6bN}q`;FmpJ`7bK62r~)-ZN?Yo7g_871kONIaF5>1uTUdkW-Cl`(Lp=8>kQ# zN7qE!Rh18H?*pbYh6Kml@gyEBnmvF_LdK3

    Nz)xTP^uJp~iB|Op1py zT&(H3d}wt#h{FVkpoz4Wuq^b&srg}NxOmPmB&0g{nm)Ef?XqRA}5@V56% zN|Ni%(%uk;k*n7KX_+|+jqmsD>4dSG$S&dEmRwCUsZO*HosnqH$5DibE+#RiBNL4p zdXjOlkR4MS`2$O%6uUKQ3DZLq4lb+&022Q2na*T<(~MtMY8a&G>nk)SdyD~-*&l^@ zG$pY=7Vf5bhy5|IXDy~8_DAM*3?XRS*5KOf$-Sm1Qw{DHX% z2D=RtLOQPJM~^wKa>8dE=K{mr4ZWKSS?7K^89Fr#m%K=yxnK)7&fuN8vEf>rs~G=0 z4wgnpRxQ2uQ;V_W#k6(biDpS&{Ck^!2kqZX`*+>^pNLKFs3b>LcUDd3RjO%|Q0^ov zUxBwIl9c;4$!Z>V&VtJqr_F~UjC>`eKae64|}cYx=aA_qHgzFA^u)yn+U zkGwt8Wc44M$g171_=^)cml9!Nw5c@Wu~pHcVJ*f%X@^>ulE*?1$*v~YwXa0pVB!YV zo$g=iKWMUTUp{uxg6mPLs*!dNi_;Ias<#*$3+6}uLlmnim>=Up+J1W}FYy~L2~=LI ziQ)QO?Yr1@KP^ zfT;km_AUTeasUEt+bhS0oc}BvK3t*CxBb3j=ZagR+-@~@_vhCte@|am^W;{pby4Mu zj2oT$eJw5H{uWQE;Sn(Ws2GL~VR&4~`AQg`qqr=7#W6v^+ZV~nYN7sT3odHH(}Wj^ zdB-Z>Im8XtJgSqMQY^Yx@Jjtiedy)WPjg13g+)|O6YP9#(DA{nz+7#}xif7y<*)78 zbu9-KvFWGb2p1%VYW(JV;d0t@$2iUDBMmMHR1EvNUYl0UR9rTvGQnA2!G9qB^6}xB z**!bf7&~`dh%_49nd^eT9ZSLc!~WjbzaY9V=4eNdnnQd+V}ox@ z?98Eq46`(L1=p>+1o0LnZ6Ru>Djkw-1lpj%9IGP5aIHLn>duhmV=Ie`OxCX2x7IVw zOl|r|?Zd&5qsNDwALR0Dg1GxisgG%bL zObk7}Jb(4tGd7v$fD+`JB#Jf3bYJW++wC2PV}ksW%#r%5zf|4XhFAq{ORHE953#qixP7Ai>4nz&aF6Z&fFHovN!n62VvU6*WVQ)=v9+w#FxLs8FHY!Erv5*%&! zWKVpk`nEO31s}^CD!a?dl-xHo?C>yq{h{i`+jpLs=N9j>h#&Y^Fp$`{=R>4Im7!lt z4#Rw-#A#dLE!5~gM6)KJ4z^phukQ*XJHugid;t=y+r2-e&nm(yVo&P4IS#Wy{i59X zvd8~%*5=TWi$C8kGKt@>_-ZLUn8A8r_T}`EZAC>Uo8SmM=#h752tnZOFRn~%OV4=v zKz-5nbsI$KxVzRoM5+!kZ>w|r|-!a35U#dyoq#IOVRa!1Bjf$QKy9ys4~ z2BD7^sBBysA-g!U^oLrDlI?y!+NnC^+j@+jnQWXCf5!3Bm9Mv^RZIim=-|jcS^d=k zsyhvo6qn_JyT+^bJ^tyKpsECd0v8O{gD1^y&#x&aw^vuCqdM=Fc9y#@y4sOvZWfj` zyLDHAIQVRSw)m&KFs&H%On-;Zck2w;UV8gxiJtq`p77Vhm}%oU9dG){s;Dx^CxhhK z4RQ&+%|Q|jT8zIp0-+7ToH&I4;S<}t@y>53cSA4l;7?-W@nYN=k&Nxy&-Y=88wIMCXj_#G*9zT z+xFM3e?saqvuayc0Xqsc2-qp;(-HK_gcoB92F+C(1N==@;!N`%bhUeEEgA+zG^wVN zeEzt>F!NsrgG2m3L=^aXiVo)Rk^ZsUyD$4d$J1T@mDAc5*32~4{U}gD$hEC^wuSel z$}iMD9=Y2^&1E?A%?6J3>JtU?M@9IUsb#ub&q%dxeDDjps!6g<=qZrK99_*hJAZm{ zZanTrRkUW6CxZU*#>qC-8#A+ySG*c%Emo*3fg}2=JGB!!^$I>T*F?K71^st$4r*lA z#-Eut)c+xIs8_FQ`-|uy`%Qyy>jKtBkb}nLV5CX(?hDiF>Nfu}9JQS{TJ$kP``X4m zYE^CqLzZ@>5o^P23R1rI28C{kK%ur;IHutUe;%dDFLX~gE3XXB3;UW8Ds!S(coz&( zY%!KeuqZKDpPhdpcGAh0r~d%sf6bcj+PC#Sz9 z$v`lF%4wg9Y0%Zt8Mck9wa0ab0Uuq_d?*POKSTKiGR?D)a_aRBZ<~( zjqU4pPm$J;&8?$#T zW7OaCMemi$4ndz|pSGExlYC;>+08~#mH{ZMDaU*&dePLhSLd$|`CT(DHZ)u2hI>Z# zaWe(NpN=2Y@Pii86v2|u1!;N=;F$ovEzL|D)Voz=(u&@3GB`5F7cS^i0jW~~>7an@ zPl^Yuj}&I+opB#I4n5fa*)jLN*Z-kczp=l|b7*7CLg>|<_o$)~yISfk*6Y;!XI=g5 zDT_ap$6qFtGlMRlfV5^}yfKIqsIX6onzKDy_en?J!byd5V>K~GuTBh|`9gMP41TYoLO+)H!DF+D z4C=;S1<4V`zQuf1MY3Fi=6AMbYI_1PDqO(#k@(u#b2a3b=uaQ%j$FhYAKvrQF>XBc zjjtLd`6WGLL%#aKrs^%_!vp#F8N-IE8~UIs>VD!8zG%!N;F15EfMjCTYj3d1GLF1> z*75$9*Qi_YV>q*67B_U5oU<^JS+Hed8cK3J1lj#@z4`U*#REZ9BW|pEx#glAQm)$4 zHPbXv=YAfl&nw-~Q7)rI8pam=yhaKATIFv;(kYWw`*P6!4B2Gd$v_SKCanPvN$4Xm z3$e9*v;(#6rS179LH+h-J=>Zzjx_!}X@*-Do!aMPrj!`wQ}Gte8RbaQRAxM-=}lvl zC3O^izUxs5uSF?S^)`3|+7?ZOO`aXxPAzBqo!J=-&pSMpJQ)>v>gUadDeRmkX$tPv zl4|@$8n+GYh})DZHg3=6@^Sl@h0(9-TinwQ#TVzcUIA-I><|TPnZ5r;z#jW}(BQ%` zM3iP*Qb;=}qjrMhrP*$q9F1q7{q}@zO*4x)K0M}`W85vUcnsz^@t|lARU2h@$DbsH zx|G!el-2RGR1@4Xd-pyHhnzS2K+aWM@7u{eXdz@R_hV#-1wnKprIw@P>C#_ECQ1u~;JIoOw8Jh^JiuvOwtvE7{3)SanJ2+|> z1Nfs>GqoLIL-_-iUVU*Dor!Lk3C304-PGDcgV%A}CwsD=>pgr+g`?hX*Lylp6@vA8 z=`K~fub9Jm+&S_aB^XjcUc50QR^HBy$2^-$nuNV0p|+Lg7)Q=$9@Nms8v~fAQ0mQ? z{=s~jVC~p*4jnrs08TyvfXuT1(6n<%wfZ2nxWV_T{RV~8z#U69cK7ph_|xSMoPReo zEcz#!z}?(dmGyK&UX(>gh;(R)3I=%t8yxX)I@Ac)a;~H!Uk3kR?*5ebx*@-VyRV45 zPazwp|9I0_v^yDv)k*F?%O2QjXZOZ7XBxA#WJpFY$K(9$Id0B$|EIsgDUDAw&%thjE%CnpDo2spQa6JIy$y2|33QgE=9U zqNH?^q*9C-yIt5)DMY&>QR-b|@9+2b{k{L+dtLwcU30C&wdV9ZYpv)0-1q(YJing* z8V0n99^M`R1OfnF;0yTm3J4|~VMYUhpC5n)06-OhLY4tA@CX9FfRju>Y4I2U%)mVW z!1$2=dY2FV*So3!ANH?f#TUR|o&+37G5ZhfPmI|gXJotH3UF}q_EY-%&A*S=E`Cs3 zW{~yA8Q`+g7+bC6!#B6&Uv~fI6Ap+C3tvxc@wELdSFoSHbz8bRP^o`CM)s4 z!9$0S98F41OV2o&d5W7~P*_xawxpC-T_X^R#FE;&%U7D3uU>1p-g>Y7e#e8(hh2}P zeKL9f@6Z2u@oMDto42Fy-j9uc`ut^j=IiXYxp_su!0Y*Ux4`Y+J^SDKr33a0s-y%{ zQc?5^0zItgxQ>#tu{C^|D_JEhez}PaM-}0gSJik|ZH+DEFJyQ^ulfp8+^e;p6kS{F z+5f+erTjnj?B5;xuYSD*+yULi#X?ut!o|YE!iDMrE>tE}u`rXFi%ar9OUdHWs8~Ax zz5D`y1hU%L*mi?$(1r~`J=q7cd;YJNUxVOd$^P{M(11a}hY6+wYytRZ%YU@b1!hs| zx%FSZlZ($wWrRGFQXqm8xEqbg;VMVI>D^kD%y$XO>d!tOiJo;lKNAYazS+2bsnd69 z?FxBrZ}h~PGdr;kfL7DN4p-;%Pt_bdHgD8OK$DvGLJkB~O}sd52Pd)A?lKenS;r!- zx;}NR^60%}BL73J*sv)3#)(I7W=5Kt)i!AfBM*1`p~vDZB}=5|V5&`qZ=yxms#mMuoC;#8eNTv(~q zK{sg@rgV)g0i->XHTh{C3#5_Rk& zvng!rOhfSUENQ5tHn#tGzn5JDw<_Myu?mt+u+UhBvEYYMJmTaAKH87Dz_H!9l;XYh zeTY;qk8{@s>OO0g$TLx@-GQcG@W{xCK+Ac(i{^}qKijOuzQwp;I+U@CSXK; z5Kr(L76N*0QNJTS&cAH&?BZ+Ib>7_dXYp5Lm44RLi5v6_zmIl9u^#aZ`^f_C6McHw zA>$61orQhiz%L-PJ_vFNMPHiKvu2xv>$K$Ej$WOqEhS=)!$!DKqoqkC`=H#P!&<^< zn^#`~gFoY))zfogbWr-7sV(O`GM|U#c>Szyz~2x+o1*Bby$ARzu0Mra23J24N@^1R zcn#2+9z>4gKL%7u_8uwQ{NO6rBB9-GrOrEhe$1gz1&7*xO z8q$wx)ehKgK3x^`ID2K!YFU3u9sflQK=-Lotx9jr^-55t!4`}mQ|)u|wzLbZ0Ub2t z;b?9jX6V7rgPpS{8g}b^LC#({)w9uMTu__|pBZOzN`i!5HKV&4h@{jM%t{Eyr54&d zP?7j>UR}7sp;mCbo0s{;mvBg5c1ddp7N9dVK{+3G6aiQd(+{mSf4zVWk9pZbfqvLB z9{x4q#5qDjB6flK>5>lQJ!|ErG8|;(j~lnIZ+R+cdewniWj`3O>TD0@Y6l=<_( zuD$5<)euL!FcGT5IH%|eEgZG~f!MOOq(&2evTOG4yj(YVuKwBYcCdR-4lkWHT9Xxt zUOn^+Q2%qM_HhfWY9l)-XOG|Wb=$s*;6eNE)%*gA_e}q6HfJ$cqYYE?$cHW!{~7=6 zyZIb#og?hW*Lv+*lIuru>s!vW$mIT(iL*au#jMp6Ya}BMdEPViD`O(_wjBlZVv3{D zbF8fs_z(?B4jSrxB6S@5<^=9-z=18&S}V(I2ATE!uWeWS0(K*o&Y$(Z)9p&}eoi|m zID|%5bNTA%YWBZZA_EOkrl_?ZUA0g(uii$k8!i}Us%1UEI>)gBCww~qYtP5rRebRw z(QTrNEa&Ay;&GKPc8B$O!hJuRZ>7%hZhNUeY^mJxL2mJ(6zKta zs~2+MI99f1j(0v4+R#4h`TEEED3h%A*#wQBHG?FcGuLw^dfzg>5pm#8%~dJgq+uJRqxvzw>%b(Ph_ zT@4P0tL9)6K6n$03*oLGeNgxDRN<;3PvMe#Bb>%94@z^I*P5XrroR9R!FaWqOYIYz zG?FJm#nqEl^lS=?O{^RoDLGktAG^7**W&3pH;QW))}tItp(zFp`ontFF#m1H2pP>Yay9C(_e8r@l%zD?TmQCV22X-*l}jd z!pQLx4V!b1&&>CbegUt|YT}D)1E{_(F{Xjzn9-7?J_$NZ6fyb(>+YQsqrJg>hCeM{ znvtKY0edI7_Wj+2e`ojB5bOCqUDPjNdu4=|r{>Jh!dJ6bCW&y@Ux14wG%fA7%50aD z8@`cI_s%ppoN4niym#?WVoH3ea`3L3SVwrUUN{mC)CQ2r+<2TZQatue_VqW|3{s=r zM11X)pY_@5k>|&;g59J0zl;UC-bdB3;#+fv-on3kPIwcOc$=uu`|fXRUw;?XxTe>x ze^<{QE@U4Ly$M7lE1a>i2h!{Q?+QD-YGbp2cFrW?5+)+h8BL=+#NN4?D>- zVy)KcrPXJzkd(G`exo(ao?YnuQD?O|V!C$wM=kL*->qvl;Cs{KfR^N@+4m;~MK9wG zl$V}pO>Q!Gbw7Zs$DjGtQ<^zlcUf=mWusiqn$?nDzy#H{Vop$TR=c-ffO)`VCk6(n z;gQ(jr+USvL$c8w7r6j$>zu7Ou!dKuON@*RQ%$AX| z6ekMm2bCyQaT$BX;lslVOld8mJUoy%Bv`76_rPE>n|TN68U!y>gE7dFjSrQ#d5&!x zm!p$_;=m`R26YJo>KyhTt>t0|{u0%fSNy-$jXzJokj}k4lLYrnsI&4+5)ihxIPMqH zajMZ60qsuYNcpEpV7^D0;)0F(UdKVeRWDwPYf+_IN#TQHMLao%{N(wEsD|3tpvuKpXlbaSEV{`f!&Kf%hHnxF|d zm+MpIutb_^?$kSO!qXt3d%<(UsWqm#*wRyX-_R?aJU6lHUCUxa63JnC+arxtnu+?e zqN$0T=^{bIKx7u@FXj{4@$pQJa@xiXSw@o-dD=ZPqV(?QmEcfWTKOpN{`D@24$1*H zInd`N5ZSt!58RrG2@NDVQuBh8()&g2*Q4F+^&YygWMwHcb%i}?I7+XtQZ<(W;2mpI zMrwe94&wDOyk7AeR0ejFaL%39!{3VVy-{PrjA%T-+ zSl3DFB=D{caNm34@EKyU8*~{bYa(Cjhp`y=%lUbDAxjbl6zv5z(7e{IcQ35z*&+yuEAU z?$&GSZb=86Y)-PVA?gh}n_B8ubx?w!YHa5XjOxU4z24kq*BMVk6AQfHMPi9dt#uy7 z<3l+YZ(8~k4Ksz;u08(krE3OZ6_1U2oSH7)cW0zhn_}}Ix3UtN`{3%Z{fyA#=ZdOI zc=g^HzI&I^8`9^MKoUtDC(?O_xZ`WsZ1h64d#g91)U-$GjjrjPk3J?xJ@DuBBSfj5YSJn2h4~g%Np}Mh;jIhzy zl;o1k&v)kKf!`mTCuGEJuXb5;NbEB^mfb_L219=5EHk@@`g+U@UU8E`sQ{U6 zA(>#M-RnazTE<25pQTeq(NNF)MSvs#utU-dhw#;kC< zYV!=#)Fqyv(g5e2%E2gG!a2$Y@A)KHw|zMi4FM?$fMH@{fa+o{u+QkV**QNKO~9tS zX-HPz&t3~dfO-d-nAp4y7eKttV_5A|IqCV z?+CF_Jr2%RMvTYpVyGfiqhJ@9-w^#MzVd`O0RfPG2!O#DCWf!1_%~OAC3p%vEw$w) zGtqolGfw>=0*H7EEi}YzR7V3fZ3q&SatJrh}qC|_ftP_oj26S#i&^3qw(AK zwqSSu{&1apeD(mH8uWb447BX)ky8_X^c`*;OQ&B+FWMC!4IHUikSUyFfj)2K@4B^!N%)oUr=Pg3oz2Ee zJ?E}}46upk&`;Ic*pr~Q*Vt+LpODL3B#=*8yo&uT%{y!>uuo325j{!}S6$q|Z`H#6 zMXW8akT%a%$C{d!`bEQf5*gN;hp_ArecJPvUTUM2m+ZD)H_JO^R@!Z?hM8kE2Pex{ zC^HQ*Y{b`ebJa7sziq8g!9Lh_6L6vJY_@A(v6sh5lIlb;W%AmfkLtihs|Q&1=J?r0 zx&(r8HOc3O5>bXt)*o0-AtUVNhDD~HOEHziV&k)jjF^b>9Qms2e^xS%hT344CEVkv zYnfbJK6K?7Ifyfdh8F(du`QMo7-tJ8Sl9|b_#7z|Cj@WH0MXYZQ? zO`6G7^^8}obhQ!H=la}z!Rm5cEbSAC2NA&2TCCs9)FHzJgD7}C1rH7bt7XOcAmK29 zMfRt~2 zGAtC*lS47*j@k)UlB-L;{{phT-T=u>y|ZJx%DLC~np_v?npIHsrFE|hq?bT9EzvLc zK!-zZVtP&3&-F5h3RFPQxT6<$`Y+?vJ+d=^6MS$1S^k5ZblxxrXd zn?KVc^1Su?V{}ZWGgQInyJ!o%c&ySj18XgrgPp_AvgHNa6$NXHIK4bC0K5!MO zn0Y#7^>l5-HMbjAh+S`&!d?05#~M4Vt_C2Z3I5F}*DNX21a+|1N|)gC_J#(>?Sjns zq>BXFa>tq0h}0|~RW!}0#Hm#ARUTGVzI})5$l8CmcE=^R2KA2MsZsWZYOIFk=IWr% zAv@zS9^W^Iw!VxI-LziqwJ_!*?#5>;LXd?4m;CZZ0%5zhk>i;E`eX6FPm_A8SLk=7cpELG;i zhk0>~BZyDB&zwm%&eW6wsxp*|#~B$gWne_b_0@_>miWfX<|7V?jKM&JM>G{S{kO~b zuS-hd@Q6no_^MAL%cZZ8W=ug3jEP}t0*F2Wjt3;n2;=j-+L)v4HLKdN46^rPv|jy7 z+O=cum2%AePW>#Jse^-N-n3l;N+yZuk`&cQxH$>38lh?B#HtCmgLN&ctYh)tpUkVL z-% zUl^kulh5_-H>qWm6Ptwz$s`QS*0kHRMSjZj5w;m>y4(td8y=QN5UlJhe@_Gr0B&$r zpzrdMG8aOOa3B^xHY0$Uk#5ppQ?f7G@7_8}PGpF-Ly%~`G%(aB_o#4pw}+;WUhHV@ zQk@L(V{Q6KgL{hg0OcX%PMDX*sg*)E*9MD{Pbp4x8;h6^N&xjFT%7G1%SuF{y$rBy z@mh~I)?K%SO|@tmZM?p9kitg}Mycp(Xl(!LJIMeTc z%t+1*iLc?>XKPidFD}47hu2BWL{zfklq9K>f9=z;72UAXtIr8YLTK85DEd-=_)k)-9a`URfFF}ZW#|gYn$oltR z#dE?mRx|5DL5-f%D&pDvzg;VN<6ZvXXHcSD)0`bRu_L@Z72^NpU`zSWbJCc@Cc3 zB;89$h{~0n-s|<4_J>2qS_=DvS5-s^Eur)FsA1t-pn~OwpQr4phE{#AG0&rzq}U|U z(mec`f_FJ7`KQML|05qT=<2JwIxn<{S#Qc^qxl@osDacM{MRVEnAeXT>vs5Cs9A3H z<$A8ElDz4R;1#WX`S}_#JNNpdrA-k1;|WQ${Oq<(vhAu4wJRJ^)7`*2>yvx0v99I9 zSJb#R#}AzgLd}FCD7_2~;wlZRB&n3b8ult9$2rdM)55aCc^GCw)ftM!3S(-VI~-j` zSWgaSdvs*Dng}Cl&eYGxS-Sce?|3KSxYNcm^@xX?C`J?;&Gk<{Q6(S9zrpTEQVnO~ zN6gvS+h=8lP0Jog)_EhcZ?pxcSa!Y;MESi8zMh2rkCof+S-vZ`ZGr3%0bbcoPgVYv9htv3wEvQ0P{U| z>zLmMm84t^4GMHke|}MGA4uNwb9tc{(P+tbXZ3_CXC81ILH1IT3<*SbRwlKRxBIyn zVo;SZ&^k36=C5p%FK%Vl=^lg3>2`dEVWBxgi%9z}13o*7m1o3cd^>`=gyV%A;;4tH z#|(AaMQ=(TJwJ!X2yC7En3MgbzODoW3F71JgI@w3fMEPwl)r+#)^&pT2Ex5Ksv|AS z_X7e5MnX`ip!v$GA;a|atFL+cZqGlu?G+VZZh$$TZ|{n8Sv?6QNqwx zqYJMlgN-xa@7ej}%7x%j5xMo!(iZyD8Alh*K)9$V?m<;G_R&54?(3(Qj+PM+cbZv( z#x3dF^Ws=V^*Tm7A6yvF*Lzoae{-74*lMAjzhA(WxUTarWl_WgLzX*Ayzcl|h;q2S zJc8YfWGx=LCV!P0ldX1RU+O^idaOS~LQD?VfVGh%3O~I;f z4XaMf94t=iPrrVTWF5PCGCHvQ9ojFNs$JjDUD<%paL4Ob7m{Ex;A7di|K}z8AE*9I zr8PWUmL-3gvoqI5*Plcu^4#LNDIAUJRDyN7E_eymkk1V@LsZ_>E$P4bR}lBqUNc)q zQD3cv6{Nqx;|`tNFLek45QU!*%my%|Qc z`bg3PF0Lg|iJAn^m^o`$SO_$MLK89bh8yLTy3QK^I4y!3_FcZo_+^XC{3Au%usP;H zowy-{T+|KVU-K%~&S`EPWF5Kn7G|)Tc-$H@ZKRjPrFzMl6u)Do*X}IMDQu#6shY1w z*A3^K>GdY2sYt7WD1UVJS50|q-Yi?K(#FzaJm$G%Ur*Azu^)Z# z?u1tqS9qvR?}BXQl9Ff5RyK*LL9dHwXWX!y3GV_gH9_c8ud%>}>Ss@n(Q&#C4QV^- zl&#jpeZRZ&c!hCVfmgKsA%rfwc_dzKI{GV5^BM9K+NA;MW*u$R`TfF&?D`N9O z#GiA!13Klo5PrU0rh4ip)D^m(TF_84uyKcw$hogVsHLAHp0FxacvI9J+FF&Cr ztUd9i1Srl7ys&#VH!bd*d#o`TmA*KjU0Sqx#(0c)E=K!n?gk`k-RLQmZa0$h^U3eL zt83_p=L1N9IxhtVpHo+v>Rr5^_Hxtmpg3UUTt?JJMwHhA%SmbRqA3`1(}TU#u`GU% zjZG(Ca2Gd>kpXv_h8=}sb>JX)!&5+DRO5r@lZBh)WmelxevIPIamht?B=y$07xA=` zJp9r}lF=HQ6(cp~T-Sb%R`*&E#A~x8Tlhl1;@_kFK3Vp_FnW>*OUZvRzK)1XQsw$7RJFxLkCi)o;cmYz`wWmLF zdkY|U?ar^8PJf8VfZr>H>a?hDsL<4B5e3GrmyynR5f(|?ga=jVHMIf~iz zy1#7OqTXqGJCaKQGwWX{=%;>oe-{Vk{Q?H%X!e8>bmHOusJ za5nbJz~+HySB9K|I;3~a4V1ta`vzhA$6Ok*K&6 zPCyxR@|iT{yrWT{(*7UL3z&P}D>L3rfFnJTz0@h{%(i$K=Pl7m*l0L|s|$XOlZR+Tv$4b*+qspFAw9itN3(+1USGHE zzku_0(L0+NFBihbfI&0i@JE|f_T0a;DVh~66qbc0xMtw_Wx|6nTbG) z9OR_Q`)8uBOt>kwk_h!Mai1n&H7)dSIw`~+P z8c6iZkCnn5b@rX3c875pmWa|C9AH>PjBVaq5l`Pefk$ehC%lp<7a5^WV58moR!M3x zAUghojQ>3wpop=mc-&x0r8i!433s2jp93E*eT|Fvhyh+;;xN|*Dm(y$Ldh-OD$b%!X1%O;-L~}z0;wt^gr$VL>8(~ z6MaU@Bakc50&Blzm4lFv-o9t6f7-==%hh%orW0?QW36ccj4{)ETkd632&+!pNmh%B zGRHw+<;{%KQ}n>$5ph;-8ydR%dr{@kg)7izpEfIGQcn`-d}=Y^_W6ZiT0p`7WyAe7 zm#kAKwwYX*dIa%O6^xN&(gPAqrn@&W#)NYU{0uaw`M4IU4%V=PTCP{R*9dgNj!`X) z6wCso10Ene;<`@y((cAxmzcfBbjFzs+&k~S`N3m)h%}HN>lv4?uU?u# z-|+W=_CmL>qazWOU~huZ$ZYY+;mbg5rJ z%M_fGZ>XI*M*BEb*Lz6^CX>hxlI{x%+=wAdOSmh9SxzfVL}+BkHQ|&T*x=U3VTU0e@-aUP_HrI5)KL_WyMfrU2;T|_SFEsY?vxy5 zFbyO1GxLLUmCXEfab!H)Pan+pC_5Yw^`!&mNeut#MORJ(18OGi>f-!jPP$@zx%~o? zM3VB~WF&4&pf*NZ}U*`4aFLaiVT^TCta$H zPi}%BPOfJbHq4&s&;PHQ7zKj$Ku;izKKcx?9dT^3w3v(Gz!t{$Io_JF&sFn{Eu4=y zz*n7ADCa917!9D}k(WC4K-(7q%KzjP^@zqF(|mP#?3S5`Pjy8s+wYgq+Ve~?)z3Iv z0B!+1h!J4y#jCQGQ}xpNAk7E2$bTaJFPP8YzQ;CM)Qu$yVXRr(0R9O#P4*RJDFo;S zzn|y(kMF7k9n^U^qCG&Q2ZVu)j;Tc~(q0%>va8)h`K_ho6NZ-n>e;hJ@%-S&uhR zC+QNrr9$NQk7L+{WP%YGzyA@<7&IhC&}|k2iRre*rAr`lqS4XAp7Y4MJmZNG&{^j6Kc zzbg#KS_eEJ0JxsE7?a$Ce9ag~td>L^a-zYGF@)FOEu&xc`)rpe*9|feXRROT%f5wO zha71{n}#OQwo-DLH+zh9`3vZ-nuDa4qMaJ`HJP&J@Aa-tkIJflwK|d=p72-zu!by}&D}mj}c-OZ!}P#qoGe z6Skue7tP74KF4{Hr3bhM{yrsVG~GVL)-@lLV2?V{5UTFPB#NFb{(dz`+8)`&2mV2X z&|iUkdUJE~hr|w}NZn@);-;QGI*ZSGVX40aOS`uJCFl!=fmUAW*)yH)z1E z{j!EbrNs7Fml){sKatD-A~@?Ok{h3*A$sS( zFM+Kf`e?zk21~%Mg-VMG@8q@q0uJ;aI|!y&Y{3u>rc=3S1TcPW&+MtMbtw%mQylWPr@QKH z-bK0VP3=AO(GB_}g0=g)m`sQ*%3W;#KX?QoY<*cch4?E04UzqaOvLpu{cPj-?h;Wf zV(JgRa&-YNnr+~Zcywe8uQpU%&}R9!qEys>Ru|_|dKPuC@ex}}fp_hJ!(Y$&P@%&_;pMpRUnIKV7)ne>(m;p;!HQiVdBp%>D!?OLHj0|k> zoYj(YG!(!T=Kr~CRW0`Stv}Gv;z#+`jqOPkFjuU~^LD=g)V*Z+QbyP&Fa7@DRU*RK zMt@K;?oA36RD(O3U9=wE0eI?th#M+OoPF66$L7wU!bP3T8fwC1AyY0y3%<8sOi?o7$SacH1JiB-P)ynK>*3OxPYw zwt?)8KJ^q3*#otHVS}xDtS@_J_KHJFO~peHLm9$B-X0BgxNqm1x@NPF1xTGLRYur2 z<`0pf!n^SR@klh9)dTJ2g|V18;D zw@DYOGo(L$B2bHXA?f$M5mAv|g~QJ&7*W1N8TS@+z0my7Bt>nxA|R}1x+)?sN=3LH zrHH{$SjCLf!i%&<7uLNqg%rpcX}$d97=4UgsK>CzKjGO2;KVLDTzR#&l5&!wh{Lj`PNuq%TdogSBF`k2VEf3!-tkUu} zX44Fq``bN6qMw2*2l3rws=08N;M9&7f)%wpnp<@0p#Ywlt3Q%8D#?<|bP=kQWJdN$ zcZvhFCZdM~wX%ypA?Hd4u`f>4^VL=g9S%tP8obabaFAL3TcCgp%B1Ag=4QP7M(RUD zxo^MCfP0KkAXua$=`rp<&j%i9Fh!BMk}2F~F1i8l-k;Y>;dy`(rel^xj3EUXE&wwTL)@@Iuz|(L378~1adlBJ2%bb+5jm~C1 zVCG%utz_i8q0$W`4aSBwV4Io%fjJDI;J|jfQC8G)RRF-3iun6v_yLGqUBynX1t|Iv}L#TX?YR* z5kMbU<^)m}Afwt2BgwW}6vyW=Ww|*%$w-?+ ze%ULQlz$>WtOy-Yd4gfO-tA8C@Y_`5!KKkp6`Y&snE@H+#Oc;nTkw`kz!>n~vdj=? zb0yxj-+97S5q4O65*H{E(uWgR(2nc4p~AmzV^~1cT4SSwB3%T@hDaF*l~b)J$!8<* zD^7FBy2`Ev1gO~;WyodLItmv!d(ez|vzH5m^d1r>B?PNifMzHABe;$;{USgez{G&I$=`R-7n+~jywOw&@kyd2F!5p%6!a0g zb@|FQxB)3iAI55MbWk#F5WqdLP|IyBzNLeN%zUEW%yCH<(-L9qs`T= zcKSgUy6WQr;TDo?xlAWy<{H6-SR0?q>I!v|JH81VX@t|wN_k5(z*2(IoSS`20?dHp z8;i`G#(nZV(fX&mU+fx(>AH6f!F7}UH%=*7jTIk6DL7ZtY0ItKWN%I=qdck%UihNo z3AR^oVL#LOxoF=T1FINqpRu5?diHwB^7O1zOoXx>J+DzSUoF@gtL5J0{ipr!?isx)P zu&p4E&U*7hgYKkc1A<{Vx2TnyNo$aiU>z}@Qi=Mlnk8QFjf_)u9-}p^aD-g2#Ry50 zr+u%^U4e+_9k&C$B8C0En8sdYTh&ELTtFO`oH*GLXlE*3=SK)uC4w`HKpqArS=Q)= zF@WLlTKv)v@<)=wlc_#_i1A)kcNulN5HpSjD?lEed6e92>8?F2%ZU^8_NcEv>}XaJ z|01=Yw?{`)e9@VzNla}6m~L?`xCZl01oG}_aeMK8p#>x~X}(WV-3nJkjQ?>hRG`*c zO8}Rn>DnZ_U*4B+JVs|l9~C$hUSQsz69cK(PSWO{*xZzeUlL$|b;>=!SNiDV_t9U0 z#fx6Yf7ty+%ilEn(VxGOMr?gNOK_?=BXkCXw6}L9ah3Ks=ghMXzZsGCMiA4EjDcA} zno%`aD+M}Qf6KbY9~R%}N#~MB7Gj_)6rkmQ0%TS%{>L0DXI^$J-3hPeSm$MHrfIz5 znigZv0e)RBUcWEN61MEmg)l`)70AZ-c;qUZ|8)&iiwphpB>2%qQkA_epheRjRwCcF zOXNfqn}*T?Qfd(FJ;Ra?Kdk|Y)&SCZO9ESaMkG~KR4zi9pViWt*exx|MLU+q*O$S>PZb)^#oK$lM4RQ6Z@Pf_VC&GmjBUVV`}}N-DOd>a#kAK75;W?$z_u;@ z4O%~&V@}M}yZ-_j82DfjFGQ-RVh|^6|C6IZxuC^@w6T$N zNti8sm8O)EKI>J-s9vH79^BYe8eYk@#_Npa3tyn`7)YSb%YB+;g;m~scYE$3ELbo2 z3H1@L9bS&$T|%fcLfzKPh|`d3!8xW7FD4TWbl@&9WhCXGGy2@qO7iw@BN7^_48obZ z3h^KU)FTBrpov1}fdMYP(Ql9k9Az;bMWbLMsoPmS1|=pzynS3jyG*M12+`IX|Lj-q zqc60t;s|H16$al)bkm1>;-+Lzcspc(SO_gR%mVc>hqCUncrb!2B}tEQ99mnRF5xj- za%Wg4ze!L}u}6bK*A?VyH8^f-rN*bPCjQ0>{^Tc+mpoX)sVJwV9B>)Gai3D6lkG0l zzJ9oS7kN%bd31xAntvQyg{saM6tsjeIIXg#(7_vWR2UTQ_Dw7vEf7_xt&k1=1hlhJr2+3ROf$idq6 zW$4sCySQ(;6%US0*Uf~105@R@3g|^6MIAfzaZv7bY4cmaL_0A+^4fq!e6&3iU0p0t zT1<9>pb?wFHqz(CIlyzZH4U3vG!Z%-XoO**MoxuTNiWLbAb&~1)mL;@`@)L`e40Wh z`~z%)1T>UBZs9G%_Oo*KxOTmOObe9EYLZg~xX$;WJtsZyIZFs~!QMXsmMA2=Qo+4F z$hv5M=o4xES7(wfk(;qBE|IFJEJH7@0(12MzECo{DPo>iy4Q>x+tf~EHs>lUCQu-n ziq=J}R|dA7QeS#Z*3?9=?m&#Y`UIXrCI2PFWzVKNt2<%TfxUtP?F}kWLtm>tRrj+86d)(NWnVN+89#|bb zMDHvJsQRfr(g@$Ag0OB}1~BZjh$Nn2Lb**k^D*gcn=D6()u zhheD5gosI(QI4jREF=4BXm0)+@(T5_l}TLDgNR)XfK9+HX5rlCIfb(N-`ef%ms{jU zF!e-ZAggBVToXs*gDPvgDek9i{Z!AnY9tZ|G1Cvmydn>Q29#z@KUhD}$Dpw>;;C!W zbH4jqJ72KTyb2p4=XrEJk7sjaHDfP);E*I>0Et_uTk*Y`t(W}1cI11r8aT!D^pq3= z>Prkr8C8>LaN%#S!4W$ng7v%IK9qYlu^UjYKxiBl!0;}0k(v1$X;pw7C>Ch+_gKs( z*&mXqkG|^DO@*(#w(i$XEO5IIP6!35aXMt*P$5G3+H5VFulnQC_QOefR`;zZDP+9P zWs595)_q9=mPG`z_?9hS`FzV8E#T+i)Ed9ZXRiIJ)V#N%p@ngC3uA#tr&*DQpS{SH z6Oxg25J3wt>*GeIUW_Np#t&=Ad|Pmu)|W3#4-T?~Au1|%8V^;TwrTDso`@4(C5upS?=9mrtnL_a{5Ct-Kc8za-70lP-lQ718DbCxQJ^J z6p~VP)(A(MJHuCgrYq?BNK`@SbORoqNm>n8rq6l8Pdv z)hfK0@*1ZRR$a)AbBlqw{ta94D3S_mUpyk%0CS@Q{k>UL7st+hxQVvgyukm33v*Z_ zoNX+AqEfhOC@(|PDPRU(w}EDIdb&=&$q?IiXc|RFeHv+%ckF8&<ngh6h&D z;9Zp0K(%nC9ZLB}&$B^7$N-XN8aBKlPtr_wJXMY{Y$gqWwAF&V`0V~S{U&|xfV=*T z_+R`9hDuSg~I$hc12U3DzJ7H%0P(+Z9JU@%f=FZ{e<&>HyTs&QLyG zyZWQvKEUihZ8V16Ifb|XIo?ZIVo;{BiN<|-g`2bM1|G>e(&$u z8_kl3Z|^U#obV-_@8zXvJbdLna$y&91^_EJ$W#DD#n0s#(<=9&df&eIgvLtQ1M<^-_Nmp2G)??x2t_q@KXH(G)Sf zEbtGd(>V*-l+ZiFGZ$I{N_CsV61`h+C_~SI9)aar1khBds15PuhQ*;#^VsF)LaQ5kz$GtHDNy$Zu2i5j8bCbt z#-;YU_deJl$YrsZZ5h&uq#-i>U-B_A-!7g5h;72!CX}7DJEY%8xwI z=MUP4`#?N236pYNrRVVK%IlRk;y!?x-nACgB@p~lp(Klo5$z%Zzxt}nP7dvrwZrSD zgoBhc2G+X6lY8dqj5t;VYxJU#ThFzSl%iBv_~wmp(C}k>KEvI+DX+G|WUviTbIQJ+ zNWF#L02Lg>6P_=43P*$VR44QuPSAhKE9}l2nb*5K|92VfgXTaB3|y-4WJo2 z1k}TNJ4QJiYG6d<%M9f9qvE3#GSwbagc22t68R!yvCkfdTnUT|BvS?d*$xijBRQTfIP01p7g;Kl!1#P4C%D+C`AlDo=g1; z#|iH-d*DWj3L(W5r6t}bKT3s&xleB|O3M#JixA}P8VdD@@l+JLtn^wKq}EG3i(Tt> zHyCTaYhF$E24{C!0rZq)dfi-YWjaFj5Np5_W#{eBnqQBI&V9Vj)t#of?a;m9!*wFg z5*fW@6Foc*dn5qLw%j;qrxY=2hpRkPx-tpw2v@7c+HJ9}MdR&SNy(F#G0?t-g2l9b z&{GdpCP>Qzqmi_Jt$2m+u*Cl9foFHJUbWl80$&m1zS1 zi>)sYhkAYgf6v&3!i*5IGo&nIOKPOC48v^5q_QP5l@mhgRD-A{yHN}=F+0;{lomxo zsVT;AN;riSrIq8fo%4J2`F{WS{I2gGOkG#k6*JHKe(w9V-C^wx5i`VfxJvNjn@MMB zZ-0%cEQkcgUoY?x+54gjOZN9tJ=aMhACx%d!0w6hjW=c_LuDyotd!YmOZtya2e&{Tpod`J$~RWuW^N)@>Gm z00)9HJZ?0;`OGSPny^nai|XE--wii;4IYhO4Ca4GQSdgZAV@A9GRR^>4t{la$SCO; z(CPvzIH`X1D020W7I*7o`w7awEs9u+-+QVtD!?@;Dv~pdmP*~JQxi?kf9WsiP>|sz zZ*Em$Qu&|fd8wc4CFYJ(L!jbCHn|)biFHkBXh_NB)d+<(9IjHh9|>TiR*Tq>)j+Hi zPQ{S{p{fUJeyK{#)i?BW;2SN+Gn)uzdj!;4@A<8ZYPx4*+$n7+Dut!FiU-fp`K3}> z>Wo$3Oclt2lMbZ3jkY-kvB%Hr;J1^%z`q$SR?dEb%5%g z(t#3TA~1xL_PY@{quoWyMG@~&16ka-%U2vQjY5u^Tu+S}I`=kW?3yujMcvoIbH{?_ zWpbpSmOb1!)z5qpQRtkkrpnID!n&tfq5DF{= ze;+Yu&QTj6R4=E;WujrRr6^v9_?aye6S`}jZ)7-#D+BT;$eNAI93e=dK8F|x>Z zOkqu=(WF}bq3Fp_K8(~o+#pJLi#au8-%xt@93IH#!07|>I`GTegAUNtk1Ml?!)Sz} zdBY9sWJ;!3w6=XjPGt?H6AZKfv8=GPnYBLa(PnwZgW-wyL|cQMINb zAX({OMek~ACXHd7iGt=7H$^NKZ0)!03)%?v%ePs+k+QYP@ZoQs5!)3vg-U_m#Cbtr z;TJ+cS}qp>(}f08LsieBq4zXDOXmJ3bck&o;9br=Xqi$=Xz5WkB)Q%7vyzyjLROG- zB4ptf@3o_3&YCE<09qjx<%V1T1*31R^JCs@;Sj<8ZXu;fUeBCljWRE*qEgB6>`_gI zRX3yy+6E~!TFkFO`5UQy6b$d`(zo?>#L?e{#wc}iKfAhW`?V=fyE$d(ttowL#fI%JI)R4OFhe7Tx1?0+p6(xnw}*n^p{HxSj*~ylKpuS~AVC--UbEqn6k=)E zfivyCbNW#&W{|k}MI<<6l7L@WRing$XPOKj=#xY=t$BW?iC_Uf0#&D=-d9Ld-`j6E zY8KYR)jf#Ak)#X>!iT|yd(RQgDlHa;PjJc>Gh}eumE?jG@B39w8Sd-ed{H|1ER8#q zq0ELZ>+cfFB98w)L_hALZ7~z#H-lMHjPq5D!XMGwLqLT=#+vgGBUDJ+9ua>Iv%EyI z)6^k1qiJEkAvmSf&9Ju^L&GM(LRBy`fcTo{vTf=ax(Ky%jyf@kkSgE}=G}l|0wFc6-?JaRE7e9K5-|v>GKt(W^j?uVU zv1=2$mY`(xXipC5UT_TiqWOb~+QL4U&}9@#a`z1dt7EAUqn->M)7-aYT{(mtI z?l+7xz-F`|?>#`C*2S-(rLsM~MG=)y4tQxB0%o9XkF)_VF1SW^ zYELo(YR?{!pJ4}qKM45-^V~Jba9o0M3f==0{BwB$gP8j=5AM_Cr@pit(~#9veAC-7 z(&_QiQ_1`fJE?J{nVdvvL(|uSid2AWAoBAgA1rWxB*Qz)Xaz*Vr(lfUSANAKI>s&J z3S(89JgNRl=>-&{&tB$>v1AxwgQeu4ZU~6x!KKYZv;p9bd^sq2NfrnFJ0u$!oADSb zl8hHg)g@PpT4Q;(eVC}{!B?C2HSU9=x@{dgi9svsSg@Ul_~xqnv-Y}qnB^yaZWW!C zxRdFcI;q`I6e5dXIPfPLwwA8&8ll(WLrij#;i!YQ98l`U0OKiTk@{U+f|qi$Nd{HN9GvvGb9dw4!J2OiS6e%R_z*qbBG9%VBTGl;wVXL~j z6uA5PuO4@IR|j(&XpLr`@Q@|~^&@W%E7@P4ZV^He86g$v< zgpTp%b$e#ZKTx3T{xg%I z-^K^hwR5ASEO5~OiLRF{6|Qq+vjs~qH0J;FiBINwSu4zv?cfX>P`gszrKRJ-fG(hP zsebK9jfnjhijL%R;eP#tF`w_ATkanU=nPgzZgYU3K)evYF}DC_?#-V>q7Z|CnaYzh zq2oZ*Q35z28~{dCL@!P0?Y`DewVlzPFuz!DJ0x4~?fuxHWo5zx8I=4WjX7#Egi88i zcnPWFOAGMP>W8(l_oG!3=E9!MCc|zDo!+3HD)CeqmOdp^k8gu-II44o6DR-q>16ry z8+7={Bwnz(JwW3HK`Gons**=md4ur?aSH*D#Aba3V*yA$rJJVzU#c%<U*XYkGz+Lsr9a3~k`N}8gmNJPj6``0nf^No$x+Yg4l-s()?oCfOBJybbvntqtF%bx z=fbVrSNdZf&?I5}R=6Pzg||`qb=3l^@zRV7FneHN7|#3sU#PpJuOc&h_+XOhOy@}H z>1Vu-xd>%`oi!TfqFPf+dM~r1LI)kP<#W(0d>~K?O(Yg$&?S0xPsOuAK&qScjs`Q@ zE<`nTieAO`sq?#5R@cYUavikWyH%ntH{nwbQ?&JUfZX%}_28}zx}hL2(Av4+75!$w zLj|R?I@|bIZMc>j2GPdtXiBuo?B3E}+{3WwHbnCb$w%Wb-P7eoJ!dl@ylH1q=A^sb z$dkom)TXjPq>gU()3sqKiDR{&W+F+1Nvn&z-N56kzr6Mzq}%bmD&;23m7)e z*HUP=L%tDg;z+y)3)eWTZ^Yn>T3Gy5zE>l$RPFc=lDo-HhIM{?U0o?#yo(*rHrdAS5@@M zR8qgIb*PC#Ixu{0GfYEC;^fy-akJtBPIZ0!m5kD@?4}qG2r)M&7YBuXw*TvFzmaZ3 z`I*w(94wq+P?_C-5b<}xHYOxpKg2wZbfOo4l1uniazpwF(%_L&$|YOzHSG~eHmg2T4Atuw|%UteL^P!wqr>G87TplFlQ55K|LPL*oTk;cFdQC z+9q+ngy^8A&@}9Lo?78N$Om1`8&RX~1h6B}QJj1*OULuiNf$X=4kJh(QQxsHB|If# zY6ZHW0`>1*)e8qmdIt zQ;a}><+q3VeKk5}oS5KqYF<$MFNC9;sD2|T;hhI(3UYM4#TV#VvdCDxk`+(M4slc4 z7qf#}b=vXn1e4LH<`?SbG8426eY|*DyfS^fMyBMn(r>(K*N(d!E=QwK(2BlkwqWYy zj3}o`rT}J%Lkie^P364((gPJ@IzZ{&OZru9l1bz6ySj-l~ zhKr{?{;^^jk0H`V2iyA0<+|)Exi+geru$c&yeok_GPO;$RjYc8abye!_$I+1Nye`) z?N>@(>H+`gg~$aygV;^`%vWmCK%eG}g(KW_7R=8MpKo9|X&?r3p5O#wAkGH5mKU-h ze(0vt>UGwN4b}gxveC#xf8O2f1tGpvYic=8^>mYEtJjN#=6|6o^1TzoIr^@=fC%li z!Td@t{LsdSbeJr*MGvaDvd1xlQIv>~kC`~CdZk1o+4drx6$p19YN>p>?uz8LhMs7*IEIUGYb zx>r8!Td6MeGo8JI%$!NT_DNjhD6Y8`@nBT|DwBk?Y*tcbV(@G60L6MeQgVi&SKC+? zpyE)Rd>AC8;v98#5N-UtkP3_rwZ=;b<2eN>r&kbyhk58;lcC=IThQcB!B?e*Bh>BE zeQ1b9$#tdQeGccH;%FXdjosoi@Ja$G+Ro>hJeBL5~>@DbT^tI8K0cz!!wsCKQ;Q9wJb&(Bc3Z{eIhCKBpbHz_<3w< ztjq0B!l5wQ>kr0O+n+tSpj)&zN%&wWlvQbG@71^Y7e z+CB1u-(vE%BK>LZeW6gIZ+a?iJ&{Pt3ws$)MKzqms~3mm){NBsM4b}>39veTx4vv! z<;N;Ad`>}#1j|q-Hc>M#hwD^<^QL1kG}w#(ZT54LiTU$y4F=P zBg3~CebvY}v?2dA%4rX;2k7J58qSY)r1+w8a=cYf7q(!x>@*eKK4^*svazP0oJ^$G zR1(9W>Dst^JSR>Dxh~yJ%7~;wB1W&6^+{d^)H_B--xV+Mo3d+B5YVnun^H#@AU{r8 zb3&4M*&AKGQL29H-foGfr6?W$prH$$r@6grxW$#`=iSgjo9Brv^LOFu~}7Fu^B$6EEbI9V>CH9KQvTAq;yF_SCU7ok$K;7S4>E3soLr04cPX)BK~-l`REooKXL)eTPT>Q0t4(o1jR0b{{8SOn zpCX44T;`7tx*jGQoqUTq;YK+yD(`pZqCu6b=JDAC&_w{GmjkcNmXnI5&_~y&lH@<= zInkF2Zy#|CN$a=hsR?y6pQYR0D9qw4Gvr!|++WNQTAzb3n#OMDE5!@!-Ra|1TkH5a z?0BqG4u0L`V6pQt2{LJC+ff$G==wZzRoHZsW(I_+e3G3MF0lc(2MUnhzdFb1LOH1) zP7DF+7>$r$-?2Ujkixc9gO{ijFz+q#yEYqiP*ogd-jd2%x#1i$?K~QKM83+tA~Yxy z#i*CApx*3xHs@+kvk8|Q|+9BK0k8a@b7R+GYS!^H{y9H%yrfzbA z0@7%xNuH+(!J+{0F`!QXjhUu@2pG+!;F37@Z+|b9MF}A>F^0TuCxm*OA)nIAX8z)G z3|Sw;aVO=XX$E@b&#eNTc?Ga9qyD@L-XLAuQawESwfh=69|>>StZSY&c$YoR zT_4{RJJj$vEQ)7Q`Y&WWeS_0nwx)@>=RlL)!{6wq28+F$bR#w{gfsK~v}Id42_8&* zE&y>I%kpq{vMX=&-^o64Ne?J+!N}|abd*YXoAreRcnTl>`0VH+DLuzNI)EdcH1VmUQ)azL)Vnd%n^aJemW1 zc9DPNBMA|m9ZPLAiq;e+-jxqnJw_SGToXi`cag{&=SZ7yC+E5MqFB@C1>O(Ij~U-`d9NBT;New<%?{QqeO^Ftbz2lE1{XzGi)9fhJ`rM2dvmH~3OVT-RmB;^ zb|}Epr6VOVp)u*~#yj#0FR!FV4^J^x{g`hWPtS7iCGNo5#Z8v9cZeT4JLTO(ly7%j zJ6ciiN4fQf1T{EF^w?uQZj`f8F%+rgsvS`<8QW>Ccf_HWeJ|1YF_D%NIr)=P|GGVn z{jAM;%d-z#huWQv_YBT7aN(^mdqI2+W;wzW=Z6D%*Oe)ce)P^z9yCS%9$Z4l(BjXz zbWPU;d%w1f57@gh}AS21#3QS_qb!ar>fB58ldf}ntg(iO>C z8XX)JA>YWKBsT$eHK(#f(yCGy65fPnhDfkupKQ7pvL%(BUl;9_u=VrpJ&e&;kA27| zWt(Cti+ueY>dE8cJm%3(c~!G(umC_HH?f9BPb9W4-z1NMfJQX53iR|ZD0_pal-Q*v zh5gEXM`K8v#?VjbmhRHb(-nVpo~7yW)K!;L2rDwYxucDoM#xwLq&RMl zZdY+7KXwTEJ7y@;8x%Uy0RagR=S&y;6bKMlPa4!%@@yFIUh?O#LxE|74b_JBIZFqv zPl((N@W6=iqi%l0jY*U_1GthI3NmEaID7cPO~cHVb)z5*H}5BX==#ygBlb$J=kPpG zQJMBY#0sp4{eVu8LZpH42wTPq&F!t3BPwZGnN3Z<>D5k@8otyJoQ#=Aik~PS2@-$E zZai-EO%AWP`{sHa!MMe#tIvDME-{`wl0A58F0lGJ0{rA;nFXR_AGk2}){XH8|9C%0 zgIKRfW2n|$Y6#~Q0X_-_aF1Akh#WeuaN)cZX<~0?ACjh^ZRR)jOMRe$N%{v z**2Oyx`(az>WvTLuOM$|sEU`guVlD(D~Xn11E+vJJt^i)`0q z_>(G!-s;t78HbvGuw&TDF{3_2wxA+x6ez6t`gL^exq#0HIck?dFor4Woipk<0oE=A zQFo}9>6o7P=<~(d!g3i3|kg ztfYd8bMi(zG-GSe)(CN~jrCLnm;3`@w9CQ-%7~k&!d*^lyPnGT#bHmRz76AFI6NpW zTeD|gX!0^x^ymAb;9`hc?d)i5-_Wu`hcKzQrtuRzX#Ri#Ya%l(9J)Gi$1&!SS7Ein87T&{xf;G`IHNy-jlc5lLt9& zDC%-S%UUfSyg_aJ4F~V?7|6qAWs~&ASx{9YVpqHU*Pr&V?o@pDy2rHrKf57MWc<_o z3hTdwxdX;$N>edKAQ2+mtg1;lF}~qkz`#7;ZS9We$nq_kFht2e(&eF)79}a*{FX}} z|M+V|#LS(ciimd;Q@?pVXj;_WYm%nDnVr%ZfA#Yytb@Di#736=yr49Y9I659n-;fx znISbow}yUK3RM|nCwChnY6XGK2grl2i)+P3O3|QqcNY??~B&m z-EM9LvD0;>%yj-riGK?{aS`3IE4Vc?7atfl31_UdU$0sASns?EJG%Z7M}2Y?yZKNE zF+fttEEWA@h0*dd>fW4}&iws2EC+wW`#u8Z=;*fJ{_}H@DAnR^x61PLAoU-2$N>tZ z-5DSG9C3tUo(085#3fHnY)HWms|`!>{6>{d#z_5PyjOaHQfW;`6BwZ>{1Zzu%zCtRxG z`IYIgP_>@?6UexIiVTq|B9`=?d!UECt*naNy1Bn>kbNZ?q21{!?i~}yXv^l1(q2)diaTom%AI808M9Na)T04Bp?{Kha9cQy1C zGZ(f{+p^UO5t$1{87#CVd%*AQlJayZ{_U6hmI?uQ-m6~;17t1jB70cx`@wCz;2%hR z(BG2}APteuVe?Q|IjWyw%6@JNE$pUOpwA^!pGuGSBEfzopWIay@0DvEov<%lVgoHy-?eH$^pG z&{o-cHmz8CU9};-HGKxhy(C*{+f~maR$du8P~PZX+iT~pmz8)KjA21K$du81TMyXRr9gtP^fExZ^_8pfJy1JG#El9Tc-9wVgI7vrwo0Lro=)QyxN)Pqlz?*OE z+v!J2XHsLgBoMsdrIxz2l-d{59P6BJTjiRSaxPq~mgzMU+0kw7Qe@of9WVqxhqq(L zn?~4J9cqf8Vr^Qmou!GCH^(yRo7O0V&TZ&>eq|WT`d7=T;cIl}Vf)sTXVJc5oNB>_qC*(iuJ_01&W@7N-Oo-3Yg{sUyW$iv$< zvhJCmQlsy5a+B9%l?-*&;zBO_s;p;5!kkfX;apfll}64?z|cIHQ1#)3ngBw*NvWMU zeA$7RN5mL1T9-Y_WX_n zJvF%4#rjJW_qVf5F5oB`YaPlP*<@p|yCfQ6&qsH$F`FlA6_>!=Fh%a*YvAVjs%Jjo zX(~z8Tfqv2gmDI1i<8Cw+P8d1r9ayrV_=4%G4cp(EMy#G4AExrBK5E7p}@4n;(Wn%Ax5C`(m^3m?F+#~c;aX5 zx1s5`6szxV7nXRzn0o&Vl1MZ0_Oqbz}xt=dNB0c z-BjzxYbw~Wo7iMI*NT~NfUArEImhsb@6wU%pCh}>HKmQOro}@i|A`0LlbWdgt@|tp z-qYNtd9CL5us5ieG4s@0&Zu+@;)e)tpX;v}YL+K4%nc^5^FHhGLH31`&)@gw`J3SU zxbIzrV43STq6v4Zu$$*o}4VNDrvx{!^BWuhqBlJ010gmrrNt`zi33sVgv(f)TT z?D9>w9B$nAQG6=h8auoXjoy)Qe^SUB!6<(tpX!jIx1YC2G|_Q&G%nkhtZ@p1_?)oG zwa!7c+H;?&?;gYMYw;ICp_IcU7J~V(SUT4=e1mlLx$_2{TCnp5 z?IizBHn&EyrJeR6G?CI!&LSvJbHgfE)4_*yYOjTYh)se8fU>QOSTb6ay6a^MXUswi zBkpnUH54n_BpQ0m3=f7CU~qu4$N!gg?F^v4cmUO;9-0Xi`4HeBQH%+Z7eQ>xA`K`C zH6qfQAGg=onNVO8O!d zqhrU!n#eQU*LpA^`5czQ%-o<9Kr^_h zCBn?(JD1X0Tpggb95*yBD8sFZKt0>Oocn=*e#F(v5S+O$+x{rnLGAmHB|Bl^ypj>q zB?PNn)NKgjUez5AfW#(|Jm=(6&A+|i1R+-)=^{~5R0lCeW3%%h@#|2aB)Wu=40ksg zTW~*k0EyN3dUBx&ctM^2+iUng*$|@9BQNKhDoK+cmh)_tk(P>sqR7(TEl26U-zvH- zy`Q56(7%QAHyuxkrb?)3fv*cXEWEX@jW8#qH>+ z?E;y1d%~hdBh9Rneox3LlNjrd8n@n?EVOuiwND|bu7 zy8#BDRS~p3f7!BYcFZbk+F~&+NAE1X$|}K@)dzjWV&pfoxNtP*fG{}eZI}{T?n8z_ zO)tAPi+xfl-pKfM*Z=NUnkjKaLF8GZjf*csRgFv*d4^WjUxFDbs-Wb?EqKldc##p?e2@{fpAw~}J;r&>9!ABKyixC<({=$}=C-U>>GFz8cW&lEP zEsZY;xZ~t!-bI4t@=YCdhXtngvA&4Qnm$7>ldeBB29Zdudq5=6>Esji#E+a3^N1Tv zkgir`b^WsbB3Fk>(~RGySm%mpJDr?OA0AARB7DJquu zfPX5Y7}h+UnNUul05Abfg881M)$5?}6lowY9OFhW-0(J|I61)NqTsg-9V4)zmNXeIjfsCTey3qJu~}V}rhT2AVE#G|ci8hr+rW{^4ZlTqIBZf*W8GVqokhp(pCTEV2BCPV;Ru*Z z8cM0c50dZWsD0R2i6|*!9&=v>By<ht*MDNI2UpLH&%+MkO}kVpWNkB02V=p*`G=$ zTz;6;9Duv%o@ageu7qHf9?zwvxdg9_>4$H+Ur)_n=@hwLtIATqM23aiN{hQKcNqdT zB9W^8zlb{YphFb4@|CGWbI~>BY(?iaXuLIl&l^gxVctj;rYBHhN?Y~)c4ei<*Mj?r zR(#rNbCV3g4OMTnIxEdlSXPsd>JWJ_w>O-I8DN#aQEmcvmVwg77ekoWt+dfCn(s}U z(4Pp8-|Rg;o=)wbbMW{UN!5O&CfO^_hjbK9_r*U4_WPom@1T?<8=3N6qhPI?zu46U z+9B6!6|R>eI{~iRI4=E-svL|3GZ_H96e2>3NVntrD8WUxD3k@fzuCqew}(^XHW>d_ zC9Ns2hvx8L%Uo;B)#13e!P+Iqr|Koi>mbd(lO8|2TIr7P^+W1QSmY7``Cng=ReD{> zBoO~w0l&~YFUU_Fk&yb8b(>TlAU*nf*;KdUaJq57IxBWcz93+v-L@JFU}TAa0%TKQkPo?_28qmjW6^E&^=1b<4aeR zyX1tIyw{A|%v}Tnd~TH{T?5<=WOlguWhcyvItzTP_kuM4U45vedk!)U9jWZoyxMgKGpDtQg|;%I_m7Bp`Z>+musmGVbPjhwe~ z%|Qg?jI&jI>yD`(^IcX3W8+niwoVPgShF~4EVbmlLdVlv)c9T6wRnoF?A42&>&Xia z)j;-@K2$^$+$t?RAdY8Zwimz0ceqSQZOnVO0*cMYOoOwf<$|6#z;!4sy=wNPmp3P& z_6t#75ge+7P8 z_lIfexQY>%%Q+7k>k*LK0Vq{Sngm)yFe!Kd`No!kjU9BNw-R!Dfj6RK^o9KL6OzY} zUf-*;wj9x__lkh(s@3gaKp6uYp`bc;Hfb;%2NeL6rAn{lCSI`oCY}o)YO`FEmK}k+ zVsx32tb4wLJAS*>zd|we@F45LWKaSdyQ5XNt++l7nrOj0ul>mj_aorGi>=>=rBu42 zdDl&yAQHQR1C*%;%#+INwmITcc9Y}VgIIlO!za9*)}ma()tH;D>=2`a>}p_K08Rto zt5T+DYQw8^&`T8Y%OD!o0z{8u5wL%U?sCDkYGGD+DYpVmrZKKJl-IT=!(4(RnN4f4 z7%b#^1{lRMOh}qBNu?T0oHvgTY2!+YE47_0*q^+TJLkbJ(A|z#K58K2yK?* zBHX@EK3y_G!oJT#e;HDXx#=B9)5OIUL^(5oK(We-OT*j#$lw?DV|`4EYY!V#wh7)p z=#JWv%cWMFYVt8o-biI`F`D3k4a;g3c078|mM#-QdvZcuG7)paa|~TpSWFAILIh~$ zYAbEI6>-Q*wpBc6rl`A{XeGhLfbuR?@m(&%Kp#~C-dTB> zu!kKv52FQ2(FKQ)Eg^sP&Le*>jB>Yces$aXE2AkmbyeZ()l~1T@58Zy)4e}m2G*=j zLPY17bhN055S4~KSW+;TRy^Sn83I-b2r`5fHj!zEVPQhp6U+KGIi>t*mYJfVaSh&f zy7Aei>rP0W*9(Y4g<&fZUe31s7V*zXrUd=C-ieT8CrNtsbR2#7EcZqXk{JE&5SMGq zeS;g(i8Ps zo~w^GzqV{BKWnZfPr$vukDB!sYS^+|Ocd#>4zG57igYX)Q#ho-1RG9Y`z^agRnlk$ z&%c|IiWkM())>Is|m_IWWVB|}6J(OD3Suzc z|7syI@9E$tnW4LlmNg@cZsF$A0l;}(!pZ&55Cdz;O)zO8GO}Oud#)hX6txb)_UQ+Rk5tnheqfW6 zT|A9>P*8s}NPVO{Z}?1=I!YU45yKp|-e5E9jlPO<=ok*1T0yr&^Uh(>A*e&E6{euUH?ie;AL1gpo?h%kMCyji>2@92ZpPy=Ldla zCY^~{($9&(7oEKqcyP%_f9IMH9iI8qIpq|8|s|P7;lJu%QcBbEE}H+ja!w%=8w# zm;hS~A+_OL^+u*E78q`T8rVes8HMVQ!y{Oe%7fQDB~gNP)U#$?Yv43RKkDyh#>oZV z98Pgg2*Jzi+O=)LNAgKt9gbk6UGzTRhIUM6u983-+zyC>W3LM<(@BqRV&FNtAL4>~ z9ob3RUOz`vBR+&5Wj&r>oC#GIfsK&xyO{zqS@1Hfnb(#Ngq{=znZZT82dPOl0m%p$ zI#Bti1IbIcViNtqIRTjpCxBH{06tok6C{fSj>}npuz7+2QsF?3HrOzxd|*i`T4_H+ zq%?xC=1r}x+&PSP!&w9iT746b0I-e}wqE`b?+m)bLI`4BeEe=`K(kH*UGDda@vv^a z;xP6Sa@z%z)J$3lH>et;EiCiEqxWGC-<>X;t{G-Um?rdpVYc+AQd(p|SIp)mik<@( z#U2&!ha!9E_tqG}kkS8qYVu~cv?z6O)rJe^@SQR!q*lSD;JgIoX@HxI@Lpv-boE7q z4)=wAFn>5V4y#tok*65FwmfTnQ=JNy6(U!finqj`NS!05FxFf%i^RO87*B6bXYYN| z{y^9SF4CY~>59RuYZC7jW_XL9{U}CF(b`E&Pd4R~*m>qYN8NKZ)zg?w!J1HjmSXe> z?f0Gq{zM@e?uX7hFIZN6gl+=lid(pj-<)~I zCbmQIFo_Z|vNrU4l>))9jyb_uU%n)gaI&!g+KZvnSbZ{=CO>oW5QmjO_ z%5)EY+Q(fdM!bK5T(dV_y2$H>?Fp&9YVrd0mc0*U#If)EMl$?#B1XrB_|95x(Sr>t z$XS6-OPoC4Wbigui4cx5*Wp9Pu-5;{LDJ-izirG$obD(#T4 zcpVg_JQ?mUZy79y=SNcG7n}?C{-}odiQNa6kxH2rzGX|;cU9k+({5%Q za^NVn;27%#84;J2wCzj2U9GR9`_`$5mz`6Z{ zcCaiYi8aZ4mRQzlI*aldWSKUiANUM874jvP8b>GO*hPG>5ZqI&_MwzKWl5RM~o)%L}s!wC3f? zgigoPKGlXKk?w`%?kl}zR%Unk(|vX9Gsl1As^Lg4a~3{O{JcdQWY^84F(kpR)bcu7 z-h27|beY}9AtNn5JHvvn{4dl=yok%Lz2IQ6D1rnM3r-yk#Yj3+69yawhkQl3bu`$@ zY%&JHuhItT19F0rko<*A3#3BldLKs2KB`Y^0_)6h0QTT3wE&rl6v!_?I0V-EQLgj;ZwR?7PD2@rpk@6s}s zSk8&^NS=>eLaPJb2$M~dQ?!*NYA9cK;Fy;H23WpC2JC#vm;VLKSVP4_U}1P23mUmj zzrJ^9{mtFujKr+KX|a3S)Q_RC99~u&4JawrgT_=75QhM`(5scQrc8#m^oeITo&^NO zIZCYCTl_}FWVhRfey})dCJen)-;If&lXo^K2Oau;!0Q4DPBbG+JVZBRY1IGwN4J&_ zVa%azf@`uUVT6*(0?{v@AYi6mBweP8i;h4?8%Xq6Kf2s?>1nmbdqC)(Cjk%CEzZ1& z@(QpEhb1LI+D_&?5MrUS4yUu8xep4v63-t`t&ITO+~1;_{fgf-Sxl1!5RK2bU^59!+p12A6xAitYmZo!Eo-c*!{VC8GRANoDT@%C6KE zy7h;rRXAa<2<%upnXr@uRs%9&s6Mowfz8FJV3`LqDR%%Qk5CI`6pg5h zezzycdVaTi@`C$kfr$6jcaPP>xN-LVjtoh2al-DjV}}we~Kdi3XO4>2J1#m*I;^Ek|dk6EM(*+!pF*UfywNFsM62U zyAC@9Nb5H%dFiJ4R2(o)uZS~pUh^q!~QXDYcVd3ykXZJ@a|3wWoeIjnU%01oI}X}IlV1F%S*iIE4uuk z^=A=&eQx!%et~Kp5FVnk;IWY?$<0>p?Yl6J-v5GAvV(hewaF$l# zY1rV=m#EM=G*PCgEHIdhJoRYLy!eQ?VAUn8*fF~nyEaO#b^Y&pf!V_9PLZe1{Dah%Hl}kryXB7W z?CqB`0WY5tlneajRbjN_Ujpcu>O{-hr?EjGvBfGy9j(zb#W*sujEuZI%t#O7HaGjA zhrM`>&;sAJCXr3-Cr?3Us9TxhZ6+;$S6jX)$*xyZawi_lZu_YwuU69byp2-S?nd)2 z^ERDKpjayk4+_O$Ims0t;!6i>f4|GN zsvnuH_s}D$Q*we{{$E-H!Aznb<5L*_BK0s6S<^7bj2sG|vC>Ro`KAf%Rf;3hk`fzI&i(_jIdMlWu??8T_r#AiA+*37LdA10ZN}s46>xe$498@V{$dk$$X3Hh(kfw6on8N zifAoT6kO_pI@;;@yY7<&!P@CO&+~rXKf*~ua&pdnU+eeUpy~L&D{@~ll@NzopLDTc*G5!brl<*B=eyjo@!lG&*{ThybJ&loiv$<~1VAvNk*KCSDY`Mf3rdQ2# zKbpDs_&~k$%1}9@z|Xes%Ef~`!Zw|xpvlRMw#6l8n7ne~YEa92=V~3Wo|hm<|Mrz{ zyxjr8K|lK5Ora07ABJ|6fEA{Y5OkQ#sDf4(i*xyFGUc{B!eFs28VxYZRxp>Fz!T|{ zTjZaxGBC-UZP?k~rs1=jQkdMu)TeEc%fJ6>nVcCD{%B3@oINL&*i8%I(92A)U3fvJ zahmeH$kKI&d58V)26RU*e(&gB^U;BGh=-gC3Cp~^R1izgxZqSp+Bf`(MxzW2=amLX%!xw$4CA_cz`=)I{gMa-wTVY>|TvB6d z5`V(wm-T1u{$A2#YFyF2hmq6#QaVsHVDhWwC*=ys)rBxjZ~n_`rOP6^YYvJ#8OxZn zN;4NnsobF=d-T;QQMJ#Sn}(H3yoFR0yq6F-fK%w9gVyEkZJ$nGD+R08|xZs zB+b+lO>r&NJcofRKRMPOQmV@~}O(epy#@{Pp_AVgBD5uk-zqB8rMECg?iujAvq$nk;XuK;~{xs1^GX zO4Z}Hd6r>EZe(V7Cq~5NpS)bilr;?nbOguG8L^nPQgpCko&U4*%ET;7;g`YuO0$9= z_F66AbK}k%?Hmf*RW97IE?arP!k}~Ko9u6jUFZLlmX|qj?{-O{j9GJ@bLA!$wVbSV zpv8ix-x&Lb6F_~qq<)_MS*;oMq^)j}8se2RxrU227T;W9Zr+*q?<`cc4FO#sFd5AcZzE`?}2xn`$`D#{Tej!GopW* zqVuy(zlc29^^sxk?sRQ$iB%ap*{z{*I6PG>c@*HUZzOvjWD)P9nDjx|PxTbD&Rh8~EX(_t0Dfa4y3o#Vycty+u#H<5JV~!App;7bbXQ-S zCmlS{$jbL3N`>6b+9o`bG!I2R!Ijyk_`8F6kA-!$pA`1w~7ugj1; zO?TwgXr}|DvtovKWaz<98y1v4@|t|pxA)6ZNacS7t&6h_zJL}he0PyH^bD3k4qP76);<*%Cf&jr|LJA3Pd+mnA1er0rFE{(k2`gXd@>f=Qbk12&8QH8iMrLuU>loT9pn-?3jg+IE;Y-y)Ps=Fd#e0tCo0A;(VyL)N9Wx ziF@T)kBq$hHakVp<#?~V%g9aiC3D-B=m-n9Uk>WJueJY{jNK?aPSWO4UlnE>M3ic8 zn#6L+-YSZ)G;CWqhvGJ@>s~y;&@o0A5sc?Q8BY9L)!srvUZ{Rzg5m1OILXbq5=-*W zE&AOD)4GZ$Wni@-uG6E=W8|8t|1giMsEK^a3bQ`?&3SEr>}waJoJ!r^)W_ASF6P1$ zQyvf7-w9V16}vCIy2kKX+k&Fo4Yp5xf994bp)$62_SM%=&Btwxqn1EM^XFdO^DFsW2lGgFQLCijv(G>^ve_*GZ*Gnq^y z!`OsXrTdn4rTON~2q^D7zGOuQ6p)xS)#N>8W)y}SCz7?U`DFJWPA(K4Tx4ujmv>&0 zu{1tt>9*>Y-U-xO4=XKdoicCd?QOI14@IrEpvt3-n#jM!BZlVmDUW9O8BfnPSMDzG zOPjhRcEZQe%j{OTRX_dh+6-)UY-qo1O?l_GfZXgUuHN<00mg+1J}_f6*ok{}RajbS zh0P}OaP1`P1(DMiiq;bz-}mg+&QrML4hXlVWfHyZ%L^|zd=y_qo$UP6{;dY4p-0hJEMum7&a3vQ^Kf54zl zfgcMy+2Hc`=)lmpFBm-6#$`r!yQlBI@P%<=-)~_w2T|{rQ-APg<;s4~$qxM+Z<+j~ zgFgr!?v0-}aAMc32X>LwSB5vPtzuVcH^f{?YPr`d<=f{hDrnDIb*LyWUm=(?DKw_u z|LdaPXFsWGYIdC*6nbZyxU|geAtnFQ?S4!9^PGl)<_otE$pe)8Y#^zzXzso4eIEH< zZwZ`#@8p**t9+&-oq=r`ah;taI=qd&XRs20O+w*W+awUGDK#-r@LXw~8gI8nyBAvd z%^q4}5dZsQ9(D5Ceu3i+R}0r_;=>=QifsLEp4|LF$p%WAN@V#IrAZRXZu=yyS?nSO~2_ijY>U20nhS!zb#NDhj)tB&szTgr~@6~%- z4jzfF$Pac25OUJ;z&g|W8T|-gpY=sZn<&@P^o#JRO&HnqN~B}F**b6XUDq|*-3JRuztyw5E=6AYLUti&$!e2z!tB0qqr&@EX?9a& zA5}dFc$sl<|Hs$Y?pdQH849Jf&%N-b^M~%`XWl4YZh5GCK7O%#ZT~M*m^6RetTXv9 zDt~O*3V=0C`MV;0Y~k{nJ(Hw3b|r*E_h{F@S%|=AmgYR4{b@7=) zsrr@4FVL1StPU*-^^wM3+igN4gjHnH2%c_whi^wn76#<%Mwn7D|FUbZ#Lw3(<4t@J zvXv0aHu-v)RwJ?pa;{XL#LCLe zIgT6CJ~L1GVIGtTRz)eIy7yRRnMlkHrf8E&Mf!tzryx*m(n)NN1LXYY##hxtlOsC< zprw%)GNn<6QUid0Ndy@pPnbi%W@>*IK>G-K=2H4ta)Mr-6L%YFcN+UtQjKfvoLGw* z$v!D_;S!yb)R#H;rJTZgpPt6YsI33Wkhzgvn9)zEhb=a)AeZjpWOzkdVP0F( zLTk_ZgbrsD^eV~cUd`Fh&f4F?4%!t;bk>EO_~%i!E~^Lb#C>OSVL?p5!T$Vd;>53& zowseXPM%6THG_O(jU#E8SEV$|HN==(uI4JOK6Q25tzF)=!v5^UqHfF7i+`ubX4fC$ z%A(jeA*N6Hb#d*E>y_m0skj}`(o&b@u(s!QL=vp-nZ$`#j2(MCjQEF~B$w+q9obVJ ze&N;K`lbF(!mlLu=I5@ro@nA5M!45093ou(Q;zgb>WdoamVHh(rZJ~3&Sk6xA7r}T z$g$^FaKF zr5!!jd9D9y(5IhWZWxyOye<0G)Vcy+4@KKK_ZMPqooOr2lnf}Q_I)nux|qFrup(pI zf@OC9@WvEQ!&iUETulu6Ei7?)O6I?J)J6T`B{e3*bQk9CEB)z~&x*-$HGjWdPgGR5 zejF3bI$o{#n8}}IS{3ulVSU$<326!;+Zo#0VMR_AlI;EpE3x-EvD2AcK7ap5Fb`** zDfNw08^I;PkkPZ#&i49tlpV%$t~(}U(N>9*1CK0%-N{fFwiW-!^|LdHU-3P{%M0;=-mz#f*_!XkQ)d{MqYWj zvUYiDyx9T00ZX>W2FYHY#F`{98<(I2=b5N!oiO1RMeuScNV+8?RWIv-_u;hmfLPAo zBw8?lO3RGSrKJB`DL7kAN>9t)67RD>L@FU&@E>Lc%(83@{W)uO{>88N53P2MGc{V= zPVt_xiAtIuoy!}{Jm480THw6jI5O|^@-C0e8qc~c_)*nGKum0#=tWf;Cxk0Y4(X4c z0aM<4D*|+_p+#qozaEn;mTKPzvuI|X0x~b?LYH&KhIZxbn6Hn&?Ysx5AoN>TC)}cA zF?77rsdc6VCSlprJ($4&(x_)lynoC1Yqu1!h^f2lLY$qwGPU}?S4)AHk$;>2hGpr- ztJwV|`zBxitaH-wUjv{_SL-pPEJ8baY8)iq8PM^TPn0B>+DhTc$sAb0U@*uDX#uBp zxo{TkJ4ka{cjZ>l`g8Jyx@{}>lM>0#&%yL%!L+Zg#AJuIi}V+K8((~^CH51Su-a>1 zVfCx_u85yC@RaRtRS9_u?6?l@lx*WoUHR5?q;;>e`AaW}r{`SN-S;c&u~mt=;<2l5 z>aiyQ59s`in&x&(or89%^DmFMPF>nne7OLIimqvZY@AD!-A`ePd&44nK!4>U`jOD5 z{wG>2YG-|JD5LJizHuK=X!$?SWOJD7zL34yB1I+GPdVfbC_qB76CBgp3x=e5@W5|7aK$8dzveDd3OG**~6e`z$9jiyF8Au->v@BCGFws zue|>5!RnUyQt>A_)BNV;oXV}(x1PTyIdrRbZ^rb)SK0!Zlyh>rVO+=eYdcaWw5+{w zmT@dsT1T6w&8wNXpnhXWj2%o8aPKc9u`{vu>koN3E&bwWvzsK({T~Gm9-`|e{XOoL zwB-ELa>aMYrEz(aGJJc3Iy0k7rc6r;9O(SK)32OIERZVQYiBzf**(mT3^m9$Vgyng zGfdOE=Lt2-%$4rdQ9x_zPF1nofVXwqYJsaM^9} z!0zRi1|~V8uIPsh+m}m2Z?2^0?mZUE3On#!0Tdae{%iMdifVVKDSf07^UxjVGuT!1 zi-$$A9lI#a>vEPPxf?HvNh)MNAt_fy@z*4pD@@2ORfme}t&BzjgL@-&t}LTmHs_Ey zt@nhaH9dNc?jZaiE^gLY&|4aohP>wFeXPJkMG|pJUnxw~F;9W4g(qg|j+AAF^JXRO z$xjms>K)S!=?=B+bb6#B%tV*eNG%yGnbcbvVu`KS4@&jPv8b8CjeP3uoOP+4Ox=~~ zK6Kb}$e#}~P?SMQhQfILY5vf?c)9_X2R$w3lt8Ag@GNv3hVhYNRna`7A z0>?a$PoevEeKD;d@ls_Sd-C(J<`z-nHmHuc$&>5u!y=*cypEi}aV4@UH$y+MOL{U;lt{_4mH^Ei?Up5=ytb zI>c&gRc^W`EXv(We`8pIQD)je#sPt?fJ!CtGM*?iJeU73ABLsGQV}+N=~i((OOanq zM9KjYF95QJ**xmje9Avc#jz6d{pUsSZN#C|1#C7(_%c3$az2}xari9du8>bk?=P7m zH>sq`_AG%`!B8~|vn@!3=xmr$IW-!K1Qf@V!W-zl(a;e&R}g_^tUSy zZnMgm8nwZd8rzl03rXXjinZ8yp!HWygvFx0w@%oZ9co=rBX&CKB8*s&zh7*Xbu*Xd z)Rf@K`EJO+YkJ14FUSjL#eXAQ=k}|YQEO(1jaTgelm}^DJd@?-tTd$_&T(2jqm&?d z{?D6OX5tN3OFO>N`Ny2@gU0lGt`nOR>@G%aNN}Te|GaU3PgJK;%iVQRp5uU#U87B~ zq2a2wI)ewff?HL_Zs#PIt@0*iCqWTr9D?$-=IjyG8SAn=cq!8{a;*ohTuPduRZqZy z@59dhSlSiw_3X@mOx9y+f~z92;1P8*Gco+kK`qAO}g5!=t z=&aL*#^oX{qV6zCVq>M7+#b74CoiC~!+k4i⪚llm^pQ7IV0tiexXMr;u9Hvdcqf zn;HDb*C_zn0}yGpyfz3#3xnEWq#5KX zlAs|RVVDlHNsQ2lWU;Q`a}27mjqI3r&vN5xBl=cAS=b}JN^9fw`wAIQxU__`Eg2)M zttjM)7bSbgcp3GQ*F6*RZOf~y^(sD_wrp?Ge(?%#`NGNUs>ty0^;3xZTBMJxB@AD? zTU2MqDQS=<4?X#~yBgAYCQ`TggV0P!e5XEY9;a6ViK8Zx3D43?Ay=lPwxG#5r)wAH zVg zVca<-7HpCIyYRlZSIx_hB(~=Q^Yb5;8u1%k7D#*n8{}-9ZL@xzjDtCfEnkTO`uGx- z5GL50DEo+y{Q164{yfXg_HG7|24H?QilDi7$^w!sePd@t{jzkiHmR8p!*KVv*>BeVbyieyu5RBRW5@mmK zcL{V=LQy3y$PKk!2G5qi?Yc*nlZN|riq%wiDVUVs&knBrIZ>mc@v3KD0X|o#a`1H$H3cpDUia0K@xuT6#|6}r{+CteQ zeXH_D*EaHxW%66oU+;b;{jQw#EVp6y)DY61W%_ATT9bqC7-W^%rt57wNbyi`iVHD9 zzSW|5-*oZ=&Z5uR0%+zjbJ&l&(L1ADc^f*cFJ=dK{pw|W(mbxLjc0!*I{(oVawAFb zye*jYvf9-D@LItWo;D-w$7HO2{$zJx1@;AUgO0<*CSLN*A5B`mQ(xjvDAFJCQ=5roC+eX@>Z&5pV*Ox(jn7{JU-2y`w zo$%bdVN&y7Ux@Z`PMo#`zrw`;bq5>72O*>Q8~(@^!jobuLBK|Hi`jz(1+YmFFGOR& z53Oz+aj*g$MmNiSJGI><&}+fw0DfyCzf$5B?#Eri=SF7}RKumCn+yS?Y%siGmFo!DR+@)x#8W zeGsjpj7IRt?u4LM9Qvy(fpiH~kPMoJ;#WkFNISfYDoK4EAo$2_B!WNyewWXQk*e3} zPvY(9{1XD9Z7(!TmjBAUM|wIB8pPDRviL=@$=I^sKof%%>qd$S?cuH?HOIlMtvCd5 z!Cm4eEp#=;fdp6(idj*9oqK6$&*tUFq?|LMm$#s}Eo+U17E%T%c7ewgP#I-5R_n3kAw@`4NGDNqF&ciUEH`t-M zKatD=?{sjr^xhJ{*q_Go?}28Xn+c>l3@S*UEqo{1QUe12PUxce z@WI0gGHZ`-Zdr>f3aSTDt)q^_0(UPVgfo~oDUeYU8pRBaNhOP**i3oA3+Au8iQ1hE zZS)FCh|=~n1;=gRu^gypsuW5TzVm2jzl5j^!(fe*3}(>D+*iWCU}`yd3O@K09W;Lf zMwtP>LYj^VNt8@uOi!Th*+1gh>5m$%R#=3shhr&a_!8e&y<8$b<-9H8)_|# z>cEcM-BG%UVv>9*JnpG+(eoOwEHh6$(UNv5*Rki z@oi2K>An8EU$VV>gB0dWl}N z)XcEBuNAg;8uP^drCs;$1x39uHh)|IU1p*T9_$K^kC|Y>t)|V}_bRM%>lE7D982?Q zNv~>cn6VEOg^w(JJJ>5OnBJul26ZMM{Osv&>@BFN=WNYv z;2h`#L0T-Wj8`IwysuOo$6LS;li)`mGOJ=HHj^mE+ZHQopmT zA-i(xkA@~~?t)Bu#R*q)QGc;s`B_R==d8n%;|ymrplY#eLR@E1et-`r4ocJN?#Hp& zLb?$|)&Z=b>+#P`9Aek%z7qE+ta@$>bduY{gf5(GAG4=WekVUW1g(!NsZAxX>9;v= zm@!GcM+OV2uS>SDxwnJuJ4ABn22nMNNvcc|`Pf|VJ>p^Z-GYT-5sfwL=SijB&y>DLd^(M%_cSID zx-t|cQw?^8T6G^;Fn?qPm|i?sOHgk}e<7)kXL`s`oFCpnki|sEas@*T$}0mIFsKC( zOk}r#Nnnfg<6v=;xFSR9w#@%qrh+p_VHCa6_H|Lz?y)*Jzjn}`0g#MB%$#4Pr(f^9 z_kNAD;cS|tbwzo2A@pb|q~cU>L$EjOE~IYguHb*?vG44z;)VsJ(i&`=)wFFE$V!Y) z05egAF{z|K2t4%DIRD~LfE4hU0D@Bmw<@6t#=?o>SJpk`XTJ~ADO(P2v9jM&7!#^w z53Kx+>b=K;-w;D8^OGzvlFQeuFeq^NET_VtTZ5|+O3hxnuNksA77=&$Xe z&?@~CL4&^ko`M4mDxo*6JQRuuzNMQ^+kbOqGq!OqJy^h8wSPuviR)~R)t8e@<+dQG z%`0I10<@~^V`1B`wBeR@a{foQ*$ANEh7 z5dSAPQLCa2^7xy~zFI34MKSXk)2AuQA6|FC*b!a_4nVE1;1GSY@%4nIO$jnlIz{kA zkj}W!;4qwDxMRQfYUV z)pRGeuY2Xy`1}atX_-Gr7Tc3#;RBPCF2zG#^$l*qxz)jjXDSMHhJ-0(0t>p#t(yp4 zOV9B4#*uHbqYR)W?C!i{(V?-Sy8gpX+&vZ16Xb?HmzZlxM@HBo4I#gLEAcdT@Qt@Z zZBN-u0lOc1WvW`B(m=JsdN-opYAoo9s!NJ4hNtKgIv=57RwnUrc63z$NTMwyFez7G z$!+YO^{3l+_>$$$D=;U33#V9iaV~_NK6*dkinzVKQz6Mb>u4QXyI9#%*CWK@_&?T9 z0OJX2O4#g9KyoQ6FERk~Z*#%)(uaL>#t7R;u^c{+aHWz862X_R!hG?hAZYy0!vSCr z%s>>yq-f}I>J-psIaA;yP4;CtRYf!T+swYA-nrdC_r-QzB|L2-TOIDBB!{kcB6J$S z)b`0dq9fl4hESB75poN}FutT@GPHH2|0$5Cx@CY6pl5};BR-fm8tH(JNJ8K}Y`^7( ztP6|_WGcHRK459vt@W+J2k8;zr{$a(5$)*<>OJE-e`BpRAZO|9*~DE8!%!Z6)p};y!x3tknWUmBfBI0x z!5^H&D>9W~O1;e!Ie1b9g6Q;FR@qn(n>p`C$*mv&*?yJWP7B>S zOxS7T>3%7m^U6_o%Xi{E2O4sgNFuZKpwVK)v))~Mq+fU{cZgQ5j22L=sk=KfnqeRZ z^$D_VLFRap5C?&{zQDPdnE08}Vt75f&uBmWRXguHn!R!RDgn}X?>}Two#&ykRhygr zM_NJ#!PAlxAePS3N^g7?@7ym8pze2Ao89*tF`7@|Rl76_N~&N613of}=tKiquS(wm zU1+_ zeq*Urm;{qRAg6Tg%DqiPY%DvaMt95E)|rsisuU2X4$25iMfZ-FS8{g6x0 z#*!JBpg0aH86{SAj$V`KoS0)UY;!_w%GhUtkULP^De+q>LFdf+(7#L#!d2|NjERJ%66 zVV>O2OV? zO2srw?L7%hZLQ8-8GxX88C@YM_!LRHNS`w)1JYQW*uV6&-8eQK8WnNvCTv1CVq^ai zVsSO%saaUWc)G~pK@<%L4(?JBTJ%D@5?#V?EDd8Dl$)8rnA(rhupj$5Ff9bv zJD^R+vcn25*vp_yI058eBe?_oI!;}weLi6AS@W4@1=u!9z~R@Vrt14u*Vv_G>XS<% z9mSs7K^moE=Y_K$=^cj3hSs?OnAb)2WBq!V-%I&dY9-`!nbu5@K9XwlBw%Qbmu_IZ zBilYeyAW|`bND2Be^Y}8tBg_w%Pk%`G!mRq~N4U_rcl7EctC#BQ*7DFR~-qYV{xzG*W z(s|ADn1=%g+9!5r8@P*K*DWz!QcIw|iPB%%`G6ZxheELR>8iCRi*zpW7k@)fu_3O5 z1K+zB%>Q|A(iNfD=GD^udWSC@FRSX=CFaOIqZN-VYAqI@pT<7~IW~N|onJrjRlcad z=pBe1dKI11fkk;^PMl9e=u71zq6sI3i-pB)FU8M8Bm|e3H1_MwLxF=03ra=^n;@gI zFgZ;T`$j?h_v*^4ucWci?yXEe*>^^FC3I|iy^QZHXeAS0bNSW{HaILlIa25hPMk*m z=>pgcyG`1PWl54)?m(WTHyoq@Zq?n>5sY`d z2g`_?Xx{0^{m$I}%p!w zMUd#nWkswuK8+o1&wsT;e5YM#0yBnpLo1}x>S+%AC80Y2wtS{k@Ezkr_a0@3@y{b_w!ivV=K;(t14{E>W;&%H0<82g5F{)^WO~66RzEwrm}4yq4Rc2Kt&-HH2$IH1mx9eJ^*?Q`Fil5 z7|R6N8?u(u^vQ2G>{=M!3VAodLSd4*dF>odlZx)ci0l?7~o-gl$y=(H%1jt zq>xU@k^_Ix7Gs}ZlPvZ(^h~Nw4lE2Q`C%*Lbg|XpTfus@zu1;Sl%e`LeNM*LCkr=w zrhKFab3#dP=uyr^NRZA3;~(x@eHTc7&JG9$N1&C`BfjZ1(U0^<7M(s7T7eS-Baa-W zEjjBcl5VTAXy$UsM+;WES`KhL6g7`lDcAab6kb_TdMf-G;<1h*_|F6z$N3?;dY7Hv-&Nc=6uee&{vA zZ8cnhYOdt=rcxp_Nkd^Fp!i$L1d%=~=oTQvNh6^iU{@39=HVss3hVEwXKE}3D7^5=AV2H74vzOroSf+V8mACJ2_`+~q|Tr=7u zN+WIPP)LO@pPKr{+_8Gaadc_yM!{DIVu-z6SRJ~*DAVt)@*c>AjfpD547tE~md>bL zLf(2Kbu)DWopN)wqWCIga2YQU@IU`Hjeucgt#f2QaNBiAVL4y>c8Lr|qK~ZOwYKYi z&I)&?IFq#u?AXNWD`39sVaz6IXcIz)zy*vPrtol{WpmDBJ{evUy+z=RLn`1QiubMl=c6RsY0C2H`u!(N1*~+bPxiOz?G5!1!VSlr+?k6px^6WYB zW=&=PU}w2aH=?cS{b83t*McC4-sIS4`2A!v%xW+wH{^5WZxQY6bbW8EKCL+Qw)p$I zw}n9^ccxq~Od_3W`OWUC-7n}1YS}l*{tSn8$uG@k|6`<*+Uj0e`t@X0-xea+p&fB(PPH3=Kyh>ZGfBP}0Y){OEINXDRs;u|ffU7_*5e zUlut^u**e={9)w)?^j2ORD6IKV$8`@JcBeV`Z<(G3|Kty2tbq&K!KT(P|bqB!UZK> zID-DiLTSFZ-O2Jth!}hZ8KLk3{WfU!bR=X^QpF^&zS1@Hr#VI{27pzPr0u^e8!YTy ztpvXnFd2S@!8&2KU$cY0xEpx7l;-@^09%RLG(&n&MvlYJK|0VsG7+3R=R&tC#ndU> zpb1xvl%=G*CXa%8HqP<&%%J`<6x74N6J0xvpJ@?5Gp^nR7o&H)I%k>Spx~DD7{4zj zK4plNwQQIbp{q23usrv#h12LRpzT7zC$v`d_t zi+7Cgo~6i!PKz?d!%ykQ8`+ax>(Dyi09B`$=Va8EU&O zrElACOlnmS-yOe(GS6bkZIK&^ zKop)ad-O?~&^q0SE4|6G<@)fdHdh+!;t_V_=q9S+FGOP?h=EV-$)sTd5Bw{@XMomF zGUPrj|nWV<r7v_XvH#hlzg~5`66IyU)xvq2@yQPr*gI5>GL8V?ydV%Y zkee5{OsX-~8p3|>=rz_Kv3FoB#TQSc-{AuRojJ55@i9zJh|3n@GTV1$R;i zpy7>r)motS_-`LY$6x3*++8hL{Z|uUF3xoMQkly2^0eY>n+|{I`vh~IohZIDo zfG2`5QLUhgqu@=DEUG0{n?ou*4Vj1%=w=?MzT{D(b-X=_*b6<7A&+AL?_@IGPD)jr zp%T*Q4YtONNgs1l<7IUlc=-+tWYO215R}>&&*5+3Kc98|s}4q+eON2QvCWg(OUd3& zZ8ElS;1NeSE_5@9m1P7rvg6WGjKOPSK(H9c<0PQZRrrMF;*c5{-GlK9`~Ld^g?p7^ z+Ul^LX%>si`3=`y-u|N*?7O}d9T325*M?N9>~`%M7qrzVCm|W4E2!b0i`V5iKxOd( zz3={3fct|>rzSLfjaV2c%#XQpmGiyim1L{u)WK}!;QU8I@2S7i+PCDzN%nn-9r(o{ zj|@Fjqw-LE0*_TdJfas5MECM(%V=<4v|sGWIezg4n8U(}7=_PQr_tb=7KcQcE)Y-E z_1tU^zy(kIj%W(96-)v{w-FGzMU9n`V<>pk#CWFzG{h@(3Ba5%FN9e&Bbf_A zS*-Eev<4J|Wi-zdO(hR8?s}V#*F7JVwDJBarj_+JET3~wVGrp2KPA;e$zDbksFE9T zsSe&DSDTD5Zpb^s15j$F(lFPb9d|1_8SBCxrt_sjPQa=R2t`0yi%o(`>HXQ4#=LL8 zY*v!o`N(-U+d>_#2oek~hK7oW{ui)=>>X5V*wX@hW05RXNhiuV$RxWXyQ2%_hxQ$}I8i<;ow5X2 zl?3@DV5-5YLRK!X%|@fSk(WE>8pjL6F+MVql;D<(-6!>PKp=W4|CB(Sg2j$vWq3{N zP#qMZjie;6X)N{~b`*5%3N0a_!+yX9NJL{}TR0F}?Sc#(p(}x1kmh?Q)2%BGK@K=! zy}jpV+g<{lo(kwCYV8Fw1(1qA(9yeVhv7wPOBs(TU=*0i3Ad~Z!d^o_?}WB0z6NB4 z9LW!ecSLH?8bB2@Y#w8@=YjEpyOQ{q3IUYW5apO7n}DK;qh^i+!-n*j+G{>RB4veL z;~q*H;cVe=q1+;gzXGq)ISXZ1MR`&m;^QBX)7Mn0(=+O zqfjRg6tN)M0c9ZIwv`A5w@<>Ub#wt2P+^%hs&Sm=z?~Fa%>at^9(igI?J*mJ zZxhALZk~P+xKn;L@Hc7&bElM{z|hV!mvRR%y;}jAVp*D8oC~1*1ePa z8e;(ocARC%Fd_MJ3JB zoz&3OO}f1H0a(TtyRLP!jSQWS1|O2Y?pi@HSA?hHpn)pP0zyoshsH?qClo^4Dr{Nb zL4o1I`}?B5(N6l61MW%g-b?4f1a!{A!6S|LtO2gfyhyz}OsjN^4NV(w-KWuY17#dL z?r}pZEQzVu61OH)$YH5S7wCbCnlB)DWRzVbxHQofBhfBx?|7lxP{}Ze5 zGP;Ua5YOPUe#|e8!UBls-dz}O(W@HrjG;uJJR5sUl7DtZ}H`UAKncHAmdc07-)jfeOqEME&SI zIyGuml!r%OG@@@(Vc=;n1CG}?y(TqS;B+~^qD&Zx)l~=zEr3~Cr@oL#JF^J(4v=%G z{fsPyK5}9)$X61?xV;P^!5WxUFsxEA0Ed`yA$Ys|ZRgd=e#VvwcIIYzi3@ps_p{!7 zbIW&ly)i9!fwf-Em9i-fTyd01RC`e6R;IYh(JqlM%Z(a);~FVSX-;dOBH?07KX2>K zxMZJ`yXe_1yDl5j^}X_Wyb8=N!hj1txUSe1l60X0LM1j;fGFZ+jk%{;*Re-6t>EOTUT4jE|G^4WOM|f_ zNmpS=I7ouPr@tcbqCdzM{A_Cf61hvbT*Un*1YxP(DVd})HbCHk8HaJ!8HTR0GZmOOpm*D6jTKQnk!NPtyvj| zen(oqdPlm-lz1sil)3_yJdrg zw#Ck~JZ#!*0m2F_IhgdzS&D$gzSWZOGYuL$?#f2(ukgymQ7wd^2&BhVf#)&OdaRln z_go#|9TTn@1%$>K0v~iUZB#Blj!KO?>wCW}Q^&;-RPu+@;}GZQ?}N7tC>8F@HX{zw z_)epkiOPARbLkoZrm`4OPN8}?$D5Eq^dI1~|MESY302qu?#^j}Er%XBg9FF#)TbV! zE>z+Gzqpx#=^h-OSo_tIdRqWYqLx#eq^yvVhQ%LaaSVre1Y4zmRU~^H*3M?~q)y4O z3R*D9oaHt5ZPzsrH1|C&B60J6%t0$JeLEGT>I8zhfy#yEMuIDr6o>(^h9e_n3Ai0| z1b1ZS(SbCAEAMv;<1qUc%1nhts8r#Ntl<>fqgZe3;3q(lBwSH$MLg59hY6%Ki zrUOkz;W^F7AAk1|uo-Q@c~G5gJSv2sCM~>0-@D(^eDbf4C|)r7d%#Bm92NK=Yobqr z*@q7Rg-ca^67}9gc0P_DsMdkR25e8b**GGG9dlpGg6ML?$Ef74*fpR?T>7@Vt)eJ9 z!n+&NM**P5-6Ne!5_q9dF#yCEgjN^FrDzDtQ!O>PsFA3ifKVc-B;}-2ChlFMC<_Q& zeJXk8W1z5kDE5t$nmwshI+$+GSW*wGg^d;Pn&(LMAO}-N0-QAJe&cirtuET}C8iUo z;I+c&2GzZd`Z2goFH1t{O+f$1<54xvP=1dCT74a0Q9@ieIj{{f-fh$82C`y13caRx zu$~9D5k4*elk8J!GK37E&V_f&4}6nJr*o9I$;~2tNYcMefzc}fhCv}cq0M$U5(w2$ znNX>f%_QarK(d8Mr#~0fQc&=b#;x>Jpn4z~)qRh$8nB%GJe75EpOKvuxaNRAA~2_YIB|`=6BVHW!#IFEcEXjM zVg~k&u;`)B{;!Ipb9fbp&y+Sr0)MVh38gkFR89rG0h>X;TP?OV;#+kbBfSCp8nY8f z>TAex>=6k_+~mTIA0W|6(qkYBy*nM5!I0mKMjW5123Qbc`$FWHDoc<+1^W?MVFwuS zi8tIhzzsqC!{6nkGQNv1j{tyKGQ#Pg0*9Cn#79X4`f}hkgTG4#XJiH`8cqS}*lK43 zE&_u3fIH%U-lrZ!HQG3{X>{(NkpsMs6s#BQkH!rE#W@;``B35@{4%bNRQ}6o=pNV8 z?}9vP=r522BMfrIn@!TC5Rk??0?_#x5#IxsdPH>yg&Sls`u63e`heM$Q9Nc_uA0E)dmi1iHeBF zUWAGs!FoaL53bFq8KYsn_m6b04_Im)N36Aq*Dt4@-(ezwguQ%)=O+J0tO0>1V~nMfWHYIdBV-mxYZr4!uzXF z#9?Jz<09=YgD;u0=||KDRYKMd@x zvGRS)b9R4*Kcr5&-QhU%c_MAwvcmE>!4tdFOK!AjACH}BF%&pwM8LGXLS;u6N(dfB zLPLAp5do%npRizmk+8<)40?h@_&8-iA4>uM;W;7-I1Bz|oI>DCx+;w8{ZsxQUgs$~ zfT9%D0b=Ol;k+^VNDApXzuAmHzEZVWzkasvf-iQT8JR6i{8;&rsK{nj&wsPek2Fp0 zm+6x_k$#~hlhI#*wUwMo0)$YVKEMh9El3Xnzfkx-`Hp=JSxq_;94tUWejIPA7v-IQ zg%c)7u|E;?s?)p!nm)|3qd_mUTFArtfDqTb_upxRNX%{Y(ly@VDB7jIh!O_Dt^5fY zg|*SiW1dLsv@?Y#-V(NU#hdy!(JUpRJaI))$z&+rvr69~)~<$6fv?Gp0)OHIj>U&4 zSn!wZgtR@f(}Co1Uj#kT*y$fMs7M;F{p%wh;l>NM-$j&=_Z>hdYrpN31Y~zU1tTk( z;Mv)T0228;f`T(Cjs0npy`)iWuE_30BW$JjuMcfnT3BunB#dL2N|M~2crX)Y7#BT2R}G&!$tuSkgKZPn^7f8q-niVK!_++0zkt@Q4fNbQR=$W=c){B zQCzz97bUe_TdU%aa$c4Ei)R*l^_90oJQV1)_rm&GdVDo$n3&~q_T$pqFgB~82&^wQ z2MAXJ4Dko4G~U?483WfbAL3NadunZoOj%W~;dsHM3lPAEdGo)1A7P5`P6{3yG>Cnx ztb;}m_f{b~*ctDVt9-7gtjRC$LH#YGRofU>CIrhQE_vmv`j}cx z*4S6(gXO_s8BqnpfS_y;rcl43L(0a`fuR7HU&}{y_{XgTXMzTfK8{;xmd55U@03jt zLzWBzIyAY9*r?>QRHqS(Q8W!AU#hHk8x>38FW$Wi-uF1B`fnHyKJ&ApEZ9^|`0{O6 z9nHoVqEm1ZeTg4En7?rGJNoDg zNXMa%!8f7w6C-%>mAza77iB1TLQ965jLAcFL9u1yIz(+vI$>1o+nvox*s$v`i@%}mfs1e5K z=**Ld_1|`jL)Q5l87{`oyr2$t-@Tf~0SMG5>SnZp72xlUuiA%D8bo{e{;Cb`G5h(K zY&8C}I8=i))&EY)AUoLxoIcnT8iKV{tfn~|Y?%z|=J5aEwHFAK8|x&hZV>2n97_Ko zEJ*Xoao367By9y&fbLs?Pn!i3koUvSNT<5$c0u`2mneVgodU z=;;6UKpcQL1ULZ58wdhVSqCu&H-iH3FqA~&$03XF9q^AV^if%>rYn`*udWUl{pWiE z8NCl#y3pgw5;`B;5Vy^%s6IZ{C01ceD)}?|dp8hT)ekKk>XHw6TLB1{LrEmjX17gX6JWOB>v;o#g-8XsO`}!R zPY}wd)-&h{I5B_4LMn_DNEWpnQ4t~%I%9D2X4qmcU~U9@y(5@0W{iqX#wgXl48x)? z3(P^f>%VPptHA8O9wm%F+0g0Ph>6kNjkA`=y+&mJf90S5{C!xTu@?Zlq7J+vnT+c@ zwY80B@u|8?pfW<(YdK2Z>;s7em1Wcq=z1i(M|TsKd@_6^KPV2UFHo`|85zdt|C0(l zI1}4Eggg*x;E#cK2>RQw zh=at<}hC1Rknx1nvw#m{D^VM|o$E z8j;W*JsYH)vY>|vP_qe$X;?Hod^!HiqxA9KFRI>GlLgov75`!R-u-Vh4Hw;R6fzv( zVF`Puq00Zc8uTa&2Gd3H=R^V|{GYE5k5Rm*r!;GXC<(a?<0KnSOlWm+#7y(izc{Ko zN~qP126e$XQe}9m5>Qk;s)E5s-{Fs@xln+^l`10`#fy-hS4$=~h;btog2$7qqmFoW ztGZ(V$PGH)*_tdt+3|Rhv$!f6r5+EX$n$ZqZNt;nc`v>2Ev7kr0jp6d@VHL&5>c z5KNUE2c9C_H+X;VnbxC=@%}%?yjJlTya7lV$O1%}NHD2r2|f8MTKyoG!B#+VDOzFy zVVEM5fmTBHAgrHC*5ivgW(!oQ)ky2e;9s3jgNz-aM@5n^o;y7OqJf`98DJ{hU!qMF z{BoyV`>8J_0Hviy7enXG6oLaKRiS6Qi$O4{C@X~ao5PVDvS^}H07jbOtpn$p!gcZY zHH67S-ogEQ|B=ENfTraGoGpgSIyXBS@d%a1AImRu8FgOMCU>+2R~uUEl8Cydl1&@1 z+;lBInQA~FTkt?S-QjyGfJE6mQm-z-y{19{`$6YA;r3M3>!|7`b>5P64YBO;C1Hsiw5=86? zV88%L^y5A>E~`M`@SuKKt&B#slIDr3)*okAA)O9M1Sq(M3$7xFok4oPnyp8Rhg8PL z7%?v!qc6w#f0`HC%4E(4^H4hf=L{<0F3s^+Whzh0rA6W;+4ERXA{I@WrDf?SNDiw3 zdBvyMz>>-ri{-Ov1VWNOZMliqO%zS;&kVMp7;2gEs$!@mV9XrACWt0hq%f0No)$k! zw11(VHzE_Z=}R1A&U%vW>;~`cD6#+Oy-8tV(6e<@j4+6Ir0;n3KlP4KcMNc5E)_ax z@0n1o{JA!c7=hIsoh{PAjC=u|1x_VYsaG_SY zczRDWUhpNru@(Tpl!7>wD@egxIe?ZyZX^06kT!0*P(WSdb-F5@qA@zgXmVs{A+7=G zJx&MUtcXN9E`J$v)qCm} zm)|O6XN-djOo98YpixGnv-mfC0{wYJU5`8jv_elx{8QuYTJ#eVl^r~K3NpGB46VaE z!%L&8G)HxO8pumM-*Nwz-XYzSmW~9Z#-`^m^yozF=P{ zHRUt#(x(XvEkr#Q9S7d{k~Uwvbph~Z_vC}3v2Xiln0;B!xs74(%JU_5uP#`}%oTCl zv1+>Rv;)g#-db4x-u2L7ucwa7H5gazoN(EE<71N;ov50!73I7)&YOmQms<|JM*LCt z4Pgj9-E3!l(dx9I>DeWtEm~IT;}ZQ<-tBr^Tyc-UZ$Rq6*CuJ z2cB&$GE)v%4NnPl1EzOXP33Eo6)pff1CT1>A$S}J(M1O?L-7P%NdW3CL6&`jD{fE? zj+}?UStlq6*78{fn$1xFpYIRMa9i3Y0LLLO1BVzOjYdt-fW|yS2cE^E>LN21umy#* z@Pw3rx}OGIz@{uH?K=RoGO#oOme0Tv0RBiyPIM8Sy6>=viCxdbew%o}As!5rE z_F-Vq@Fi@z1XA$=?<#0u^!5PGeu7Tx!_)zFH9`Vp1h^al4)SA<_e$P5i$Gx6Cu;7o*Yi*2q44u-ZGPT-)Z%_ecBPQd(*D5yV{6ynU3KUA ztu@QNQU$E%U1c_Yl2A7__rUAW8yjr6oR#%F9xced@nmwCgyn0$m?z@rY}xFOO=jx~ zS*$!UN^jbk-5ZK(t^PB#h6v903M_x&fsa@lRHn)Tu-&Sm@&8p;k!+2}q z&vuzByQgiF;aqBKx$Mv3jrLv=GU*aa(~OjTmd{Ex?DW00<&~k2uaLTz`%ACAn(|AT zlzWt#XqaK$xE6!Fu8r!ED`tsxoHt!;>2QR~g zqQZCaE-2EPGqrk!sh>;EMU{}qkgtnPmdJcroLv>NaHD4CD~{KG3}Mq0dYWIl9qyWN z@m4qMiLz~(kLxtA1aF9Ooc9HIBBLU3g9Px#32;bRa{vGr5&!@f5&>tQ(oz5bKmbWZK~#7F?7i8K@Ygc#A zN)E|^D2k*hQX(k9fDP+O5TIwh`bB?%{{#O81_Zqbko{yBFfo8EQXoW|;xY?4!{+pK zRo7mXd*^C~!f6H;4i`+wup0@CUz{%jN#<^X$TsnQwOpkxG`KjLhDnDFX z8>H=DeVj_QaZ1x?YdW3f(ri*lx#Bp{{EKu!f!B@#O0r4HrS_vndh)}qH2nyVzb*~h z+}upR@f*L9YPA}2Xp%no;DhvoAN=5Tss2ShFDUS(QGj*RpMLLq-}_&`v;tm|@&Ek4 z{K{_@i^YFis}<5pYdzJDK$l>4X38dTQq0O27152kG*!_0xOrKT3B$n58>E z%cX;dDP3NFEA4Dg((0wZOvU0Pz5o8V`OBw#f$4jOaO3~pV4T{=C#l`(&TBqRrBXT7 zksYm-Rw|dvX)x@@@71-H_&hi`O6_jPOkUF37hKLGUHQLID5PegoUUEDlD>BH-E_>O zx9)wI_8;6$g~>2AmzUGwNiRKo^dOCpT{HfQg%a17QmI%-opv{MDZ4)!rpa)Oyq;$3 zjah-`XQ@`nrRG{MHCv}?5^XF|!j$za#j`NWQ;%XXrCPH}+fr)PTIt=J-%N#CBfWR) zF6-L&)4?qWl(?C)_W3J6Zz_G5Ha0g>y;V;~#|LRNold z@t>vt_b>l2ee0cnn11~mzn)6de40**(5H|Je5X6oEI;DSVLCoJN`LZS|4X`e_YS}F zX*d}{qegm{a<0Amc4}U^lz#Bx-=*Ggn6~Se(vN=hqjY?HmOhY&{ab6mH-&nd z(azBXJ}JynDPLqhoumo$D(4EpVgl z|MJJwp_Vq*T0zGhc<0Jx=uuuylj$%$*?o}Sd+$f7)oiBk{_1aVeKocFC+YqBKc?(b zT3g$qO@Bh0?xm%2GnL1M)aCvO^=+*-)6E-SPyg(n{g0{O!W9~$BBl2q-cSGM|M9=4 z-tOb{?XSI)e)so&Fa7BKpQQ))AEkq%{dBZ@VKS)o;hiQ$MHjCxdr~Q); z=)+qNQxDqx+CToC^rH`cn*R2$|C;ud6C_WgRw122^R;|6eJ9sS-@Nj6>Rnz<@87-+ zU3=-?qep23&kSk*1R6D)%jw&{{LNIWm(%|4ema1srqHXwnE0LF{_o)KX==CI>4)F{ zQ7Sf;Q={HYfA{|T=^O8UE&UejfXVS``XO}u%fI@|)P%09YintQOu7DzchYLDncn;J z@28W_Nh&cQj*pL1jyBwU>t;HBe3UM?R?Era_ojd6VKlvLtWt!gk z#=9xko}}*Xak@R|rGwj#Qma%*t@YLP?z`VizyDAF`}FAWQTmfV|9?{TJoPbxTQ>k1^E6t_! zPIEII&IakUmP_AUzntD}uB6SISK#+e#tghVq!VPO!~p*sX{N9sF;~Q=YbfOw1%&K8 zr1gLL-uKf0UjCIW_>g%%(trN`pQRRj^zMxt>DDggP|fYNb!fcHu*;`S`mY9GLge(PfAQba z+V*C;eC=B5owQRCSvVd|Qs25)i^#-As_pes4_+E?ucSTtyot=0Pp>Skrw@nl#&J7c z-?*HfJbFyu6wcubu}$jkvU963^LBCm_gQ$5DqLAu&nW1J$>DZ5Z9BcJ+dO#O$$ ze(Ez`#>^r0I(aeHYRH`9Q7<{O6}ppjG99PJaGL5!$z^DL`|oa3Z~nKx;QJ%ma^w1q zw2Zua&^}D#N+B&(8>vyLglzsXV|dK7J6Ery)z&8MZ>AA*&;WUS`@>r?2EYByZ=_#+ z_Zw-wO5GT1Km74~sXnWwJIK@bK6oEFjh+mD$xchH3i5PAMoX8M8I$C7AGuO%Q2$yb zm1*Cljm`9cIl(z(I3A`o@@yfO4=6J+)_U|&0e&yg#|6r?4MpDDEEm)8$x(V#7^ND# zSUBn>APM@WMIRibCcM;S4(TC*hlOdXGIT1_5@WoVb{{`Z?|$R!$kQdp){yzX2QTiX z2m8C!sg}y6S}Lq8r{QdvzIFBOH02%Z=Ny#Hxs#Y5lwXZ{E#?ssn#KMWr8jT9+(W7QMIVq>FeeEz+DiZ|i1n{c|5XS6i^?0t`E~UTt zi%U!-m(sVswUbs>zQ((Uso&ou?{0edu$cw}n2yQpjZM(g#i5hnr%k?+W0fQ=?SnTV zjd~2C<7z6^8Eh`F=E?bCcwA21!+Ki2a+2~@0I@fu&5Jf(eEvP40K;@P%%@SO%;G%H zJoI|Xwh;@4!GHw=3V@&8ZZ{@b=8eAxop{m23ktlP0&`{N=MH#%|IeRXEtg9Fa%HWN zu57QT%a>5RE3?$@-2w#Zq~+Co+TTN2s+0hEiYyfKEclB611Qjw7Qg`tNu$ZM3s`|t zP(WG9155>ggc9JU=6KLfk00!#urvTdE&-%9(s0xUl&OY-+&&$!ysZI50PDN(hD_PY zB_n zlPHu&Q9N6so=X5+^daSyOGQA(di39f@`j^vnj-HD^pkb-nH*r3CFRpNRk&Y88U59p z-%2gOkXWueujK*RdH{x$QHw?H)7_nFi9Uk2q;U~&Y6>6pXmhXEPqmpFn)x&nD4iU^ zt2|>rplszQ)d0B)B6IV06nLMGpl-Go`s8RI-_6r?BR}}ZPjZ!)=ris}ns}~SuD}zu zxF=ZF?+*ai;6oJVV%g{Dj{!;2&hqlcV|;!M*0y2Z~`sO({}h*q=Gs6p+uig83+@2OqMS4dt^hSui-T= zj_GF+J6i_@_&V?75nurA7gVCLk-y{4c1WiQ?aJEE1=?N!B*;^4^Z`VWTodx{16=k2 za7R3^%vmVzMHwpANf3VPPVJRog=86!X!>lP1L zeJUg`6p9%weAngXFP|xW6!3CfnWVjcIV#>L z*WpX#8N8s9A43S*m4YW}4s{T4qT0x*tVVgq0%M~Dy_)bq1p|TE5Kx%>h6peo%lZg8 zCkT{7_UGx-9N+D;3C|SxEFxQS@J#?GfaE*{uV%8iT;h4hBYotZY$VCbfX+PYxVBDN zlc=M8NuCIjOgGkyKhFn&*1+8fZ_ys6Tk)~m;|Jz=0m-d(WXT=SdMdq za$5l0c`MPcFg@+?9oorXw3GXb_lmSV@`?<&4iT+?&U!`*qL{29zQmt$XEx(L`T5&& z3%rw8X3;@K_3^RwfF6t`=@k9Lg)#tHsmQaw!~3OZlhTE@n>}Ch$I%^ zVm_t3=rihA2)^-o-es`nxh9FJLbN5a<{!bRg1lin01T~X@U{8J+gTgU$170-9$xUi znaX!X=C`7923qGG&^+ur@i%{YMp-5P#`ZhqWbNfW`Y!sLd%of0R>=9lp&^etFGM?} zAOB_CK7ARo61# zO95c{td>bG5Cpv5dvA^VE2+UmxxCy!;LUjFyX1Mw&d!E)eWO#(ovHeWPI1KBNuMyu z1ax4TcZ*m<4cjb67F&d$FpIC^W5p5zx9qI`wBxgH42A}~0i`a&Kw3S$|5a?qMV&4v z@G=Take~g7Y6$X7BERg(ujX|ps0KSk{w)@60$_{pU(H%y)arr)Z#D%cC^u{_r(F5bN+N|xWEZ%1gHmCq0I-OON?O{4PbS;Byd&$Dh z1!g1`-Xs1yIc4*TXRT+y*Cn40fLtZ6DQDu3MXJj0%kn(yRh|*-T3uN|>0rSwibhfi za=8E)$SS*@s#r>m00R8&Dk6uXl}DQud>K*xlzd!?xiDrm9m>Hh>LNHn#Z{VAqO8-R zJ$zdv<)Dh0?bzJdNZXe-0Y(bYZa{V@K4>&9=BFd_dBKuT1!keR#X_B025?~6l$QYp zEcj78+_j_1;01{SF+zNn}mo~4Y9-wHa z4L{Ro_EcD&xg-q53Wam7SVv#YY2dA(2lO7p$I@83X}MIxLSPvSr$HABA=>HY%`xE8 z2H;zvG)hND2kGDtYYggC5**+^6(>Q*Mq>#py2EtTJHo;&mzJ<<7-Bi1qPv3i;x<-B zMe2SD>pSZge8i_f=`xmd*Rh;Ctf2TWHBy)M4*`3vsFoLNt7~avV?AA2-AF6>CS?E! zo*Y6?z!;T5EXP{Vv(Z{gTURfq0oGn0V8Nx*UZX!N=vpkNX>UFa005+$z>+|M{E(v` zU}!KTc!~aWY|0~!#|n8kQq9Y;ClKexmY_7OMf(KK9Rrip!IH4wXTys;Yixi8m_{AV zZ%kSCEkh=hOs`pX0i{;z7pYecV6Hd70glBme?j1J4Wg!nZlVGhVIM zimBCVkS`z}*Qo{xp5@-4%qKLTq1bDTm4}uyELWs!JVko~SP`HI@Yr@xU+(t-E{1|$ zqkQTM;=#Wl1X%dM!)9*b4xZ1MGTHGolaO=#oWWBwz$$8#RPG!>tmF+j1D1A%ZU%|esNWI5`bv##X)EO=0Z{Jk0R;!iLv?Cu#Cm!H) zz?hN_C9Qc>!}w1?xTtPQ9|+h?qu(=dLcq=Bc#yvjh#%i7h2vSc0YiEv2%5gm@)A%3Ff)C4S0ti%~e)6OZCF@0((k*k~Lz&=rbBFib z%n<|>V3R(kpf%TQav552Bg>1*6$6msp-enbNG6N3_bh|)pe*8oW!MkqDo`V~k#}qX z`sA6213ZLZf^B%Fz#LE>m7;&*9ojUatpOfVXE!s-%2?!q<(PSZ;fw+PkAjrh0y;F# z3Iv#}e4Yqw({|H>w`}fH07y!D&$+?2XZ>cMWpY%&3z|)#2ifPGKdF<0UCZs9W}~*0 z47{Eb%(5`P$8&rQ@X&jf8{^78@S#P@_|sppDG+qGjnJ5PO0-#^&%V~eE&#+RWd1%G zM|Sf}bM#%&Hd`P`-U-moz98?|B$9B}8F^`)7xSofE?}W1Z@d!l5~#X&ubKYifBXiW zfJ+?v0>1u2%n`A_s$Q3*S*mf_5XWC0cGZ>TTFvf7Mt0e z^V+pPTQD$z%qN=Ht^pSnzM#MxNP(NzcS5F2XMGmBW89J+q2Qt<4;rbxUrhV=Q7DG% zC`ffIxwP6TrrihZH0L+c-Mwns-EX607g4-daH}fwcNa@2EUj2tv{|FouzWc=8~~mH zp0M6K!Q~-e0VpXirNLe^b#N0LjVj26XI8mqlny|GTkam4R1%!rQ4LVau&_d5?*KNa zun87bv2ZE!S7L##lIG%ikrv9!;ScdZ_`s__csB0F4S%6F{d5u%@eCshUgM zJ3FjjPovBcVBG-sh**fJK(Kg@oo@>uUzZ%pyV!vy50#sERvCD%xwcW|Su45~++}nJ zT)}!u1-pvbNwbNINwb#rp{edsuK7MHM^#N!EOkHB4RH+6RR+9MQBfh&O6L@pps-+? z7j6wzu(Y!AdRQP?calIwzmGc(e_bt?$TKuHC!kQEiU9(;fuS|dDWF(~`~|sGRCN&< zLO(49RA2>73MeL)lVd&B&+TDpG=ZmTVFh%0_!w)sQ(Rj%SkGTcBdh`sJGawPV~xK{ z(7&19e(T-TKEY-7!&{k!0(l+s+;}gSo&Ye}_8iJ%vx>`4Z8a?aB=zRywV>C%JCD=u z6Tq>sk`{onzfgYY7Mc`khKtqwTlA4M5@;F$Tpb)901O}F#)`WymTh&wfZ?DMypjQ4 z$5Fo$K<~W=_tPU>a3%u2&|w0fsxYcp*RdMhee@*lKY4;O4yb_QiMk$&+xF$lc&@wy zJ&!TE%G^U607CY6fsWr=T1kKKd;c_br|od}+r_=C)9#?G(Pnt{^4D;wd-q$o7~$p% z7~P}44(S(pV2mQabREwa=(qx?nxh?cfUnKVxFzFi)*ZG}L*{Y_pwjiwF?30kN2HZj zd{U=@byykTt6CSNh9_xXSnAN{DwMWQx8DFgxff#+t0OUol~pVz0h{)Yo}^J9t2C?_ zZ+`P@;RY)TBc|oJ>I|gJ$%#R)@NX1!yIIDlb%OtCTecz+>$dim)IbdDLig z2|1~Cn|)%tyT~S8DPg8yq%q@oihJs4C`iV0A%ked=S&|R^9##(tv7)n!X||-vw)gBO17EfV$8667z@2Le~twZQi(Jngf2`4S*LsE?(gR+h>=EiRR*W$HPGA4}+fB|vD$ zh%{8rg+&j*n`4qUG5}m= zKINugy>K>HxOtEd(2jO_$4XGsXtzugU@sr^qw}nH4FOQXa?SB1u+Iu3^2|1y{Qi8h z{*HfYK>q^fS#S79Yc#DHvwPH$=cJu7D`)@kXR_R^W?7&3-Tw1pfMnEZ0muoLhgGyR zrv8Hcw%a*c+Q>7n1iZ|93}m^#um7yc9jh6;Al39oe{_ukr%X;x%A%AqZh`nNtHE4O zP7<(#JI^t0Yl35^r}S`Qm=A69MS2+p>^KBTu>uRH&thK%q3+3^&&)Nx#jBb_K7%te4H1l9_%wkV>i{dby9^<0LI&Ij- zqU)rQ4pHikPH@&8a4YpzpL02jH=+GmOVoqr&x%x@XdB$vmO2+ijzCB0xr62 z34S^Ado2JkzPqsZPsJlk)_`Zw$ctD~sRo+2E@`dR<+R$ULJAbK3YHVZQR!p#vVXV_ zc(P0Tk8!E%BHM5Q5!gV9(h?|uCEhWHs0(EO0$igYC>n~%(}rf{>yoiVEE7SmGL{s! zvx%~{erYAO9-q)26#I@btx%#}tNYJ9so0jGO&#kF%hWO_hq5nbFaWGM!u7I`6_Igh zW`Hm@I6y8GtZ;vXj$_u}(pccowY7kQN{SW@C6qM*y=>u+lEC{FlvkgH*wLOtz!zzt z;wF8=ihw*+a#ay@{}~dyW>Po=pt{9+{*(@_AZc>Xd#*Al>%UGj8Y6;1buX^?BZ6t zhH_V?&1PRjVU_N>-PMVmq}!P>!1`@AoVA_;2t$LDI=oX#o5VRnjIpvQ_>GqvFnUw;Huoh>=`q{xU%vR(8JnLB9Xw_%@ta81WmR4J7 z_3{>q8?o?E>?_de^5rX`M?HFYFP&nQW?QrtJk)v;Kw9v3B8VjD#JH8V0)AS}2u=;L zvJqf$K5qhm$`kU!Sf!f09J5n&jxj(}mHrry2k;}-ZN+9cG(Vj(pG zxHuYjDHD3pkN|T4UM$aLynQ;PKKCBpO$XF*3ebe=8-Sq}UIln6Tqgl&c8F43Lq*0MKq+yDnC!`)Pe`l^B0l zQ}^gG?+Qp{JUFDD)~y6h1j_Pt>L=)--?NH|CSwBj=fevXtRHRNt>JHWzAXKra~E>qA66v zxqxS(8Q#7DLE66Y{0eHl(HKAybm5jmEaY7cd7r26S#~pik=vNMDJzU0j@XEL@{JMzsko+m6BrDc zA0CEE=b>nCWWZBYqYDt~>> zI_qvvHK-TAp@Y__RzR?>Xx##6!(x-M*hQB1*l5tzxbH?f^pa1=3K|IxVgbeYrPcHN z?G5P;9l7QJ%=5gUBCiBZnI$Yu{K3nt&C}ESc6N5rfvSar zu8cqY;bvHZ>E5__)=n~&2{_km5@;tLa_yif#^YD z9O~-A4zboKWvnWSxCk}bS?&|8eN6D)Iy=!dcAE(&&hxlM60rFIq3)--q~U^6LuLXTui&{&KqCIoop7Nf-mT+C}F4oyys4} z0L@sdE98q7N(Pt}_%hy?N_gx#BZtB@f%g=r833n`xR|C`6f4?Bjh0ZD>sYU3gf4{c0U)=Xk!j_MI_cMH>sDBJN7+Nm{s60F8QpeN&Vo zWwdhpQ1?6(rYMhc1VN4g#Wtb0_iclUq+pi{gf#FoV&JGWa6R0V>M#T$CWvaBMpzG> z9ugA`Ym!TVJZsl~1%-PfjoXx4!j-DBgXIrySw-4hC889xZLF5l@=7C}>;aZR1LF*- zj81UrX;a@4Ry`$@&nA4h)WA)zj{09)CU7)r831jLO(PX41LzCtKqa&$xh+$yjK8)> zepP7{1dM>yxXxXA_uaJC*hr_iW7M#u5fukO08xfe7MflumIEAPO=G}u5ev2 zrbHdI7^4HBIrWEc1!7j<(+$8h1L+&zY-t5+rlyt|#Ji(y($XaYWaBOjFr^h%zc)=!o*bmdPwrzeie(w&U2?|w zz~xYP$O?R<3u#A-F4|z<3z7(;33xUD!*a+d$Pr@!m@lnvtfXOBDt6QThYx~ppFDmL zzrDI({u^g>oS;%E%2v%~)w+DA`r)$@*rEk1_l|u$*fVD>egN!Ho z!k9tpSY<8)tah-JJs6%45M1|Nq+M91$`ky387Z=r0g;fZI0XPHKqb|>0>Ex4`GAeG z!weU*2+@Z?7wj9+?+klyL_2hgTwzQM8QZt}$EmzYT*n;($sR#A^Ge4&gV(@yiA!DkGuRKN|qDh+6NUU`9_ z)5XvOP;!92#@Vw#ilI=(9%DINM0pm^Jm_O>0o-VpBdZ7!M-xJZ823z|Mk}RwOxa3y zzL8l5`6ToOJOQzaXMnVV-m-*l?iKpN%?S@T$umX@5GyugC}+yN<+vB{8wws#*9vvj zlGSl;aRSli&DI1ocH}#!*chJWeUtSW(4Ir&&I$A41e%SfSnkqCh6Ji$4JRO1<MsB>R)cLv|^@oUcXZR=X$(1RS3s9eL*VwLk8Lee~29f7dm8QZ#?A+h~2Z*+usl3s+?xqVBa!%4ioOek!;9bZ5rT zi*!MO3kv*VC{RXuVabQW;DVRMF$>HYAdQRv2Fk4`v5ip_I>aZMu+T;0K;gvt36Nq1 zg_a|W;&e6_fO%Yf8YnMql*=&+NWU`$L_-1YvPf1@Aj9@}S;ua_6Fa+F>lh!b&6=`;vOhq1bfI2l$2}J4?1ZnBu$CbX+3wyhRc7go z!jLHi$~r+gEMq{?3anMan&hluSz8uAZ6DO3eCkX+CMxVM)cH@vM;944C@XB-l{m~P z{JnU?_s)~bwtGTbEc98ho2!a&AD1It6Y1D^$FfYy;XdUZ0cK6vG;9FSNE=;{DkzJC zaVMVFB~6zjqF`n3@vm(=#l3HTZ!c|Lx)eEP?cnBI#%J!LfJ;TK`Ut3xUbeJK+e*Z~ zI=GTbn<$+qPQ5nDwfz97(_;f1BPr^DHQh26ISnj0P_{Y%ARSIHG``o)%bK1tTu+sxb!4(x{MN17W*aY(g=~OZ^Y6s`|Bl7SwyJag#^ACB)m`KKn zQQ=k5jJPoA`m}alDX&1%R*_@RnrsUy7Y;bR$Z)?)V8#YkQTb!*uNvjmaayy z3jh!x7xhLp1E_0;vTB(Fl+&Z5L*jd_r6TTvR!*z2C!D&e@~NB6I)LG2+-*H+&i;2v zOxIH}XpFT|2dgrHN&7Oc+eQ+v1}4)(5$-Tpd$Fntk7vO9tFef19@m;eI4);;0#$~^@<=<#bOO`3_u#t zw+WBy$b-hV*503;~8+2Ei7 z+Odq4*!K1o<7w{ZOCzd))H|Hy*xDktG^c0wh)FnZ3X(!wJ0gOG!6ccwE!n@&6p{da zr-Q|y3chs6_*dXVZX(5)L{w%2Ag;xAAG9M#)D(8XqK&p468CMHcyiaSUQ6$DO02+{ zI#pP#(gy;c0#eEp^AU8>Iw=5m^3ZKfm_Qz|eo7ouR1Cea?| zv634NQP<4+OaPaaa?CTH5?Ip03jzxCg)1XvT0Qf0&M-D&7_M+#8Vf8-XTh=9=%HYL z_mLw*`c~I=4|&mT6(7;CeAGp`$edOIF{z2i0HiI*cjA{4QA{LkltTz=!H@>O|mdCKyL3(1Xe zQ)T>WZ3yy6z3orv$ed|xv_%S?Bl5~}w;fr+a7O+hf9ETUR(|SETSi_g|1)05Y7|%r z@4DXM$oc#178!hglg?daIM4S}eo$vjlJQ#&;<;m2AZ8bfFQd85lNHf-UeHSu>?3w0 z{@y51^2BFI_KHkcUK*v<8^>v{4@%!-Kz`2t`V3Y1Ot0iH@n5=piW?)lu;fqx^vXu! zJUe-oKX*`#do<+K&k5}t(azcmARiNIbGsb^H^LiGpO}X|P8$7;L;fq#A}1;>E>uEQJ}=S*1uiJ?i>E-l z_fy6sn~M9!DMHSm2+r7bRo1wcGG!yNR9ony0wD4U8O6A^nCMZYdEgj

    @1Ot-~Jz8-BNJ={TMprGw+U03BQ-7FWH%iRxI26xZ8nXY1Q3 zPf%ta28<+D9I@l3nB54d4kuU<)}eqgpE$DqgAacaAY_HuR$89*JGf}_oK|upEj^i39e=+taW7NSJxLBe6MO==lDDLx z;I2v^>ULOwsf!!}0xq`>(-t7$jT<*IUV*S(c&aB*%-xd z>pEgwab1LLnZVosxR3*3p@VvdVO+4e(U<%g_rr;j< z%-A?|34J)_odbB5H82)9Re&J4BV3&hj&{?n+qVM1bWGNW(X>HqWDf;vR&T=hjO~+0 zwC5QA>I1$POMqh0QJ|_ux10; zKB9k(qvV(?(`UL@)}hhr<|RYYW!Ct^b$yhz|n7u;v6fPQ1ZGA zOs_6PI=7~PGZggVsrnppXOY#Fj)B6wrK^f+LIVks!a1 zMtiwu-4~UJm{QzRewE-idBP-rDxCHzB-((knd_mWpZwi zem*RH1#qOr$WMM}a);*_16nBiV8lG*9mi}IPmFi@y^v#y=m2}jBNF-47@L+Gb1uB! z$70qwO`dn&an36MID0~|pi-P3DR9KQi}u?dzniR=E`cfr9 zaxJ5Q|5~@8DbFg)qAiOI=AV}Db?236Fwe#(*R7ZLf`-&vx`V|9U%P3S)tbxmdPVv3 zd?WBZd4;F^GVa9kNjd>3S4?oGV|f};YOZfG6m zD99dv9a{a`*U|0?L5Y=8hd{?8K9{eZ1o-Ge|BFl$+yy+0LH8HI&DXiF2b_G?d%ToR zh+8-WynS6j=@<2<{jXxIvTy--y@)~=*DfgV^QXY1yo<%jl$ca3R>xTC2zIGNvH(;e z(6U7dt%9Rsy@4B_3NK5@SafPpp>nK@cQL6lea!tbic_n#!p=HYDY*mgv&qC_-(%~I zdG&;V-F*OX7rCZaO~L*PR!I=ZQE?>R5|%VAVrdz)+Za+6TwnSCUPriZ4Jf;U@;m{j>EhZZ zh-U)?=LD3>xFib5Xvw2xPPl6t5MMV=?q`d7)s1K{>Abyv2ba;vKR4ik`Prezzrs5<;7d!H-oml{Bu_K-4H>GI&r#0CL)_X&& zd6o!buDhB2JHUd-HNHxi3b8R?q)#OB%XZ|E=_s258`55-qRjh#m+mUl9r}8ocuBiQ z0A96T+C6pG{4whR)}NKrbba${X`}JYGys^G0J`}{k>pQ%Sg+-ZfR=^h^ziX6Yy5pc zSpd15K#>2e}1r5X58+9y8N=k~n$I4KQ_t`^o!=HYl)T58cRO+eej00Io|U&PgI7pu3etxEt#8__3ic7o} z?dILb07YRRLiPfsmSJqHB>=AuR!Jwi|IvwS_0?3J5t9yUgC=f?@{WDHPkgKoZry_S za1n-Q1b9}7FQt`@*)5Vvehu(KYnwLUq(S!&01gW*inNwk!hO2g6TWLjIdH+vI1|+M zq*nuvFE3%mMXVuZhyb%O@w&Zs08ki~zz@h7fjQ*K2%hv*&o}@A-V2vf_{p(oy<<#J zPh&p`2-t>=ZNO4KyI47Ki*c@nrhE#eVO=-dnpn`)cwb=1Llg|!?RXc&Q4W>p1LcVP zVSKtJtcAw7TL(V}C{FAzXeCfKh9^8ZRFI|K0!(9LpaNe)adZ!?Y4AwObMkEFeF8m{ zFVySHNUp6+e##>uZ(dbUJY#j{??HpJMBD9yt5>cvh9_9KVg+Qr{1*s^!UfvC3x4?VZm=Viv2QtUhsah50LK0`8oi~-(^MWsuta@wZb2g{tkDQ^gV z%^{~nwW9w1$Slct!I$jh^9flX80Gp@zVohfRsP~j_RfpmJD&&N$vNu>zGdR&d^vvi z&!@7FtncQ}m_JFnnViFi88Wf37F!g$$jyo<1b_CkpT(kPas7?{L_p~KI>BNIj?2c+ zXKZko=RLzZpE0AK*K4*No;beKiWBRqxMarZjK+Cmmry{f{jiyiA8^7EU|9ICpQ~HB z&gql{xxYw1UkZ$frPiiTPM>gk4n7u*2J}bo!tPC_3iI z^P|+bq?_~07q(wS*DCQnJKPA4EF>Va; zTd8NUo5G?BA%>dAf>C!O-LVA_ShtXOuZt@k3b+7%ErPRW);U?5l-2GGqlDoinW;<& zck1I}-a3^O_Wahm${p)ciFpJ|iy{hh4yzcC6EEWCprus~phu-PKVLlZ8tW!4nK(U< z;Jsa}ggV5yYIcnQ%WgW0{n?VPQhiF;kFa;2cESb`x zeOh@LPj-#K;?hCaNU9W|4C(?q#R6Kk&nECD@3~+HW`wTNh&mIqWsJ6D9iI^J|1p+4 z2Y^Ncz_oVU(=Jt}T;K410ChIX9q^P|SP1Q*u=exE{; zQkJdzX&WF-aKe-Bw3zX}hpzku=w11DL z5-SL61kAbAe6x5>SbETMKnr-$AVCDc9}}zV7%M{+-}2R+h!-`*_3!BhvF(?$vP7$b zpUy7KucHETB48cMDXi^K^4D?4s}s8o_%DE-B~HX!X|!+&J&so)OMr`=(_{LoOZ|wa z#WkGj0AKp)$AA6Tb~e0^n=PMOiwVMJ3sA9Ri-+=SfnHmXaXnDEZ+5q%g2PDL06aJ_aajiNcKHGSe(_rajEUL<7Vt&=Ou=*1~ zGZ-wj~y?)|%f(EF)P>@DL6jwu3tVc~fX z_eNvg3Dg?8BuG?9H-mX8N1AFGTzkm-dry?#@ zn8C{DSjN%Es^S8`s7-dnS(1-J(HD`inhy3w@_A^HS<5mq0zp+i8K-R=z{#R1VSPK# z*fN9%>~rOkV@ive0Cc%_uJ}bs^G`?3)4P7lewhQMDr@E!4aVN>4(RJHe5NcBbSCy3 zap|_#w?gjUx%(kwDwD~JoNSNQOEEIY%UDnLcibmWq$C4Z%QfUPJmVdokKpXcSKDD8 zvEe-b^f$!e|Lh*GeBmho-zd+q&qVQpMDdZVhx19yk+e^1yaD6E4H-9hY=h&cT6LoU zP)o380bT-F$AJ{c{tCb?W?tMP59TRj3~F`hoN6+E0WaGg!X~c1_#dkle8%SjFnjJv z>G(O*f}geC93bDE!*a$X3+8@x=9V73OJ#$1nG2N{abN~x*tXjav4p6ppZv`MFPAtk z?wD5>`}D3SH3p(d=~{J}7BRaPPo1Zc1Pv48^A{HnzOkPok)N}1ygy-Kpu6AaXSvT= zmrwD!mV<%;Jx=&67@)L{a6^{48>q#Xpx12f_Bv-#20H@00&0u1*|y~AqS=hW3H)>r;6H3Yi8SgxBG#C4YwT<@_f2TxDgVLO38jQ3W= zBJj&xaC`=`yzmwI*okRyOzgRzuBZMHno>yH7e4-4Uv$B+m<(U*hFnzsf&#BG1%^E> zVgZjjC}x$TRAm#QGF{@(C>H<7A}zLb8_^=e-A`dGPrV8YD(2fZ;EN{}7Fiq@2v$Eh zL5NLpOPhG267G=9yMi1c$F$H#QN~6JieZ5ji%A8X*IjP%9i_w1Jb~R=movAkz;O?G zv5*-8ST4K^Ugvup70#-4ilR3nDzkM{VTq&45vne>7m5H)h;?Ng-ELiYb~It3OrYx1 zlL0QAY|0F@aKkzrH>m=OFYDjb1z0&c?GRL*GST{bgD%PhCkc|F@m!3WqM~QH#`H0c zj`eltyyhud{HL3qZc{T92Cd!l#P^y8U`74BPTt1gYNLox^ZoP)uYeWwVi12ul9&;T~XXX5j$! zpIP_X54PicayNe&ez<%I7fgWJ4HWC50i<~ds!)%5oT{ly%mfh5zBb0!NDv8&srA-6 zPZIwL_ui|-c>0?k{{+xPpbC(YXSJG(GRaWui~WP$0Is@NTIr5ASO-{c0bWGc3ABuG zNqm9@*(2U#d=US~6WW%E?Nu%TI6=QT)WCNwHTlK5$kPJr##|yUShzD`oixBgMi8ve z_;xJR;MY2V<8_@~!re62YjJo0r%879=x4?-vHp6z6Uws;2tWYWu?9!6q;DF4RWru$ z0U+=Q(5uK8k-}Pv2>xEb`gT~98H3As*frv~5$1|@`!ZI2Y*0gk5hjxrK&kDGt<*Us za6ZcVXvjK&HZYo8V74=rK-jM z6u@*OEdaDG(Wjo;xv>7!qV?-H-%U5KT#MK}9zIanT1{&kYXs4Mf<@{h*_yn_69Dki z#x{9egKo{VRmHmO2&={pvWC-WXH#@4z& zU>WWUdP!S`##!>KPnm;OPtT47Z<50~fF-z!YYSS%EnbcqkeQzX{x(r@CO~Bzq~{9e zkyj;gzu3l!orVndApk0AAnw-rG!insHLxCo3;r_8;j} zCdR?nzIGU&`}+o}CkP^zO#Yp>@^S~qiNZ3n`4z&>f1hP|@#U2iu-nGaccj@k1LAc` zf?h)Z5vNN_H`AgCelB>=?{M?-PCMdum9X$SyXQM+i3JD?<#7+8K#pd9(=F@KfBmK`9VPS2!7h<`ci!!)b`x-zmTu4 z1QvW66@Umq2QI?TDxU)mS~$MDl!0A7^99rC7t-!m6sp3xSiHUBv5Q+56u6+kmq3C4 z`~Ui{%%hdmJ6%d!JC*bfC#R_p4ha0M(xGKa8z94-Z%-!6KQx{yF*FU1&B}O`_EU5H z07cnd^;6uSP@;SLEUHnGnVN+lQtlyNP%0)^U#ZRDKIdiQWaJF)#kopC2+S0$I|Zu% zQ*a?v!RYGh2AHr|z^VWYsY|ACu>NC`(WPYz<#efqwFuT@_aEE`ydqFKz=-p|>hi+n z(O;q9A#y#%P|F2nkZx;ZEWW~O4ELutfz&Tw*&&7zC;VZ-qN1#|%sf26&5bIOwRwj743*5=b7$jVW2CjFh3fK9^cK|?4TAS(aSuCHX2e|S1 z-EzZPo#4lDaDeZ|%YH`I%&oeURR~%x-LmUG0*pfuiwhaOYIWDvz1Y1D@X-4{V!r%M z0Gp-&fVx)d0&3o2C5M7k0?f+;df5O^>>WUH?!Pwy!6&&)FM|IbGvil@>ErK;umnB>*Tew!{ zJq6DE>`R{~PkG(b7CZ2d?tTrxJL6(yvGZ{2rN5fgX=mp$uhQ3JTy;x;XN3(sCjv?I-sa`1JW$01VEfqj~ zX#6&SYr$KP20%*2Api9}kN|g1%Q9x(;IW5Kz$=U&`_Yphr^eF)Y*QAT;$~T7oX()T zLGg7r)OC?e0|@rCOvkX}QayKfj1C8NWgu#;0RhOGx`E z<6#QqfBE!WEwW=OJ0$Pmj;X++7MU#0ijgm~Z}Ej6Pk)kU zmU&dzmbe*VQij<7wn}SXfo^|>GX0qJRGutKqlk}r%_n_Co~$u8vkip>aQgBB zbw3?=!YE@}Sz>#wyC(};cfl<;BT9@ht-1WuZC6XhaMPoi&Wu`S#>;2*JXiOZ7ysfr zG}iIcQefmUUuhl3cr-&UCxbX{~n^bPfkW?E1FoU61ydFllP)H*0io~vMS7kB($dAju)`uix6u&L&p-ECT={4hpW{m1 z+Lqy|r7Nc#yRG^D^DXk)c);;$Q!92U~ZNLJQ`W_2VK(7&Nx^4%TBY=|S~h(YvP1%;tGXj6g5Cr2wZ7t=E;RQ7NzE4+@LF0ucj83Bu7UCsU|46u1W@E zcSp9!qThF>p#YPQ@q&cCTptQb_{U`d4nY#%>-XDO)Qr=O>(|4D(U@TZH~~IU4=tu9 zJZJo*0C~8l(yub6D@g>`C({OCmx_|Vfe_H?QP?$+2+&J~z_qF_HmpNX+zc8&<&?MK zEQgf0k(`A6bJ8VQS5NCZ zQttBSBHvFR@tIIi0)(SqXwy`8Rq7YdhVloS@QR9hVzrS`gPNEDngx z$*FwJ#wsp)06i3YX$!k9z>Wv^KTM~b3_1Zc<=+5Rb+6N9Z`uT0lcA{lS)%gJMVa!$ z_h`-8U7p8s4L7#Ok4D5~t8fb9b=pHe%xP&Huhz;^#Ci(;puJK(V&6e$fxfKI%~PJy zrH&X>SfGt`xszXbCQbgBh%hKnsR!=g`k#+YbNo;*$mSp7+BPlq%f zmVix@Qy8slG>w14I+Kd12u`$+qFtU~e{hIGtXm@ru~v}AYtv2h($=O}QyfrH8e^3< z!EG7q%>rYxM6h`+O(I5Hv;k|1;H_*d3Ziwf_-daBAQ1x#n&jcH3HpVWgvPS-U4!Kt zenx)L;%j4t7*QiEAF*0gl>~;Otpdl=OjB3R=7DTB*%K?3Rh;`Z$ zKmf&JB8ZN}dMyFWZ4%FGqy83vmxp7pKEvo@&;#tk!ZnZXnIjHh##%6BSaxNpugtgl}7ve{Z!X(>qM*w*su2zT~u zl6v`@_MpM!l@pFPEz%$p`3cfN4f2h6bGo53CL&O~bBZsjGtng>GyVMqCCkiY#TS&} z3$k$za!nB72auu6M6scO=E;1aG^TCT!@2|z292b%{Ox=mvQ+-%x0@9_%o<1c%S67K0zzHtzymK6X#z6pRpEJFPycN%q);*&UyIuq( zr@jG(oe)^@2*p}pOc3nx<2q*GHBRzain>_F8<{>wyWABSV;$w-rOV#9q?J#7tD9Ok zII)M>%Qf6HlI~oKrNyY@E;q3*?zWgTiC4l&kQeE5Pyl+50HP0ZW$g1A{_keT^%;wZ z?m<1(H@}$EG6jtWM=qeV{LgBQteEQJ8l8W3_2>RY`Qrlc`2JGr0IKFn0$wg!T~sg* z0iMUk>Ein@x~^Xqahz}#Ru4_ksc!6nHrWCKKj64D=a=3`mq# z)?O;Aj!jMb!xaz(K*eN+(rMhQG7IENja_xDQ5su=^q64S``w4QL9u`YL@IKB#OZj) zvqN=QfG+}YujxurWAURU z3Xl=`KV4(0sJQmkf zepK|HmO@QpVM;74ly}=yE*sN~lOIQYHgVgu1?vUs67NuU*gR0@P*zwI`+kABWU+A9 zs%;mGumIPTtJG!jXT7AIN>9|C_C&G{YH^Ds-4=;^0(cPy+ImD=EfQ1E@ql@Qn0tG?&u`CkGx7NM3-j zM7i>nyk_jJGQ8q%t;_^q!g@s97bRUQK;1?Sj$R~YoIG8p|63?OB>)>9rWCk_as;a4 zghhFgb_twqUD}G^@)eX=iU^B6Dn*DhPDG}zx|2S5cprV}5e0H6ffgeGk6M}O%494$ z!7{BI8_;K+xK@MX6+%!jXm!m?$^!`z|I_EVXJTD9#;sJ|Hr|~8i+_{!x&#!jyB4fwObX-JF6~GX1)D0shcVv1TT!G z*Td3F@YXT2FYnb?!qRiI0%*p`lwIPBVe3U-X@!Y?i!I_{irXaMv@V>w*ji_LkIdyS zWq@PLzT?`Loz53={`;%S<+;U>f9@48ICf}*F3JLWi?pzOTKtYro`(a@nmQj%+~V7F znRDrBcJW9EsAn>SuIHaQy9fOlj}d>0P$d%pDj!o0DQ^R~KliNtEbpGJrx$si&z$>F zpSb7qWPq%8p31DbGC7|CU0M}ez{~1)R(O0_6!Yn@5Gufn%4dOMKmDCafoJo7_Lsbw z0a`O;jzD~g%^+hn+78FOa$i0d4;9?YHeG0o87*k@Y^I;@R}k~_W$|%uFWAfZCJspv zprS*y;8gwwt}$<0*3LzHo%fm7`E!mkp5hq^_%28rI2IXX)1-G7rDe5Z)XRMSd-ke7 zUYTUZMq`kE{nrs%D1a_({6m&T%4nv5$`%rf%qql%Xf}-l5$hOu0x^WrtEHHE4sdIF z{J5I_(|@{={^oBs7O%ZQpL877Zz|D(GZ~um>6#Y<(zpq4YLY)8jl0o-cf5ivebiVX zrV-%7@rTQt@P`?Es}F78U>Ze>KgU1L7B2vk9Jh|eF7A0+F2$sBPF_smyUqa%y%T)! zSM@Xhj5H=s`Pe}Cy5~MKxo}S~sk0}r)=M?45YB{_by@I32pQWU9n8?rFCkHmpw<9D zJ`V`T#w(>+?p)YfuJw9@Q!Jj(wNK#dql(74+ZSIiC~!f6FPQ@6b-;JlT-8QD)t9x# zz?^?v2kbF$>jCnKpy0UZ4FC(!@lfamtVU^NgOeDSv4lXl8?I(J=^(yXk9f)+u4%eX z^#E?BfcgP!vv}0~ZAN^r9M(Ctm1SIrw2CODbxzzHoXpZGmQ5cp5APpsgk^`dFdxy+MFs>0>-B17`QTA|;+E zM_=$a10b=y9N>j;nB{b_5X;;vDTHKM0yhiAFpiXGfo<8byB&0jN8=x98XzGr<^?ZI z(EvUnr;3v9hhr>%W`H#YdX^@(Ul7D#*EERhnR{Oe4a>m!x~b`+HX{E%?j}oIi}-B7 zX|{g~e+_8kw6~jboE%ynv*C!9LYGtG#;kJ_mOyce7=UU4cen}htY))zDiI(&<(BD- zX}W&x1{Pj`76%z^t*Z*nF>#+dfUzn;(%-b$ba4tc*(>?^T1MI!j6a7Z*#uBTfNKEw z(Icq;5;5*d9Kzrxy!HfTfStAFOIRa4;MCA#^0)2S7tjx*QJZ=h1C5g`PujsJf(G_a z5n2cy-F)Y5EXOXx9;0;o(e1Q%$U!?RPq4b$p^{h=0R|3n>3j6#KHwegC2w6Kms<67 z{nF*ssx32K(FFjE`i#556!7@u5g^|~PT#~85Wugb_sq@~<-7}sahh)5{V^>vX4e*d zK)*1ar^KBaAHzq62!U7SyHc@%l^Ir2@aSknthV7+YGTng$#ICn@!eDav+Vcf-DUup z9)56wo8bV9uOdKI4d6f*DZ#pa8}JWZQ(KW*Fc;vdLeo8tw9r!<^D)~rKniF}Ozo-3L8^>GxUQ{h;|ItRB z{|z1ZX%*sq@^*g8z%+CSK~3bT7F{uKMZV$+=owi5deDKJFeMMqcxB?n0CRcBU7wIU zwm;J|7p{_qXA+>X0NdfbqP!RIvV1{v<iGI`|Cp=tR}$KmFFeir&5bCS zpE4|oKdrx!+m@ZJ1LQrP;rSx`V!f=7EtxrYN1JGsycF{lPcR?(jvG!{I05EGXlSyO zIW^+;%&H;mG!ds*UL9tt{URZnG zy5-T*#8iMG-jw8!^;8;;S7Mj*X}dpF`|PzhI5~0l09@Knn6$S&sqf_tcJbn=my0Kc zSbCje=~ZsBpw#W*&0(F-*5fh8Bm#pzXaWC0FD(R%0E1e1FKFa=)b*gdTPGG3$6f<& zJp)8p16BKUlGAiVcJMKXRu_enDyeAr@f4d%thZ}{fxWB z0JlLc&Wz_)U-iUJU3(So_;0Tfg!rR!V!jCeNY0=5r}Tho5g%@h72#PGsarPn&a*nZ z@rkp3tsZn?5|$V9S}%YuKR*hDy;cposO$v=USkU2!Gi+JVi~L35f(GUK7wcpXuv{H znI5OHaeTS11DwHzf`YH!ddtQ!FNjEJlr~ zF=Qb+9Cfjn@i{ETO4+96Y>LGRUrQ)zo5bQeB=~cm)3^patCAs@l2>U^!9;0Np4L!o z-Q=7Rj9MT=SHw`h=6xZE6%C+Pp&YRo&aP2U>f|~D z<|o-9tBjE%Z5FcZ$?k4^cNbknrGhfP+*-ks4T~b4TgS4bjkVjMmN3qpQB(;3C{y_t z3J*72E30GRDgFIMefb-rh#6a{!O5MO+eenC zPb_kaexuRGWfOcFqMUcbYEH;e*Rm|XOu6MS1p2rHiqdLrV;$u3c1$qxE}PfKxBzAr zW)0$x?OeK%*4H-Dq@5{bp3Xc)L0u&V)0G=n2*!?OoPeTbQy;NSfeyF))rq3KdHcD2 zR^LUvjR$n~<~2ggUa7D&3&8S_n-I$iR#5>wN1oNRAxrLWz5J7VT_27n9n`G_&nA9d2f$zv8Tv$P6u zy0^C*^$7PZtg#C4+7|Dw60ClP``m=MXD5JRyQjFp68L^V+%iF{dSxkHx$-u3Tfy>) z_-R;z7G5 zi4zB~+#T+P3ze371>&)JDynNIhg}c8Ab0^_NeR#^4+w^J7W{g7`&zoP^)?%nYpfmX zbG4a@(ndRpp zfWtSp=ojNz!ADK{AE!e)K0B$d!gwwb1FwtqSsNZy@zIhZ%RL(7UMgED4-dKG7$ldJ57xeFZ%qZ*no`y*~1@yBul$8u= zMqz?PGCN-##|pf86|ITcJlh7Zl6;i0;c1@X67GQ`zW>69d7gX%Oo(@xM+5}|G=C~@ zy=BqohFoOlIP~4%FF;}IWH?66g>>nrCR$TuW!8JzjbKpGE!W zGHtQuG&?PJBWOE3Tm&Erw{ytl$(N=PJY>u_!BgD>jSu(9l7(9N`QNXu7}S^rDd!oM_8K<7^B*F z(7Vqjs%4C_uyBKiv~JGk&8)s>?U8V=O7p(?L=}HkJ}2QulkGllJz37T@xh^Q4nUq{B{z^h*4xgw2D$` ze617W*F2|8#;Y?Xg$ePy+W3=mg!FSg5D$IqKMVRU9yD72CYBv5D;_&gyb*Z%v&*x;S#z^-_Zg$tlq2QX4A zy5=?uG$(~mnv_ujnv}EqcdOLL;DRp}!0t|HX*T*X!6vZ`6=<92dX2($)=zpC|OtFji zMz{2$aTgSLy(nPLu+0j%g`v0@+h)wkX)f-n#G)Ec5Ba9W7a*6r z(!|1Hwh>SEwU%g4}9povHR#=6NStu?nfO>rv-z*fBi-W$P4Zy@0Fo556 zPkTc-FO>-pk2TZ4Q`)e&5D-!cabc>}K#ddWmX@0YDJIsI3wPwTfRnLx^28mAfbj_N z7taWl!%(R5&0OR~73i+wFWHcwpgLrBtfA@dy~<`ZwtTM)Wxuu@>Oa z2^LaSz{DojH&-uTC$<>;Hfh8ul*ipRO57oqhNpln#7culRDQ!cA-+*B{$r{SpfKtw z)0|7z0$k1qqaO@hE=X&#Mh4}-^WiPpHA*+>r)GoT<^U*WQwIcFTU*2GX^%F+69Qz^ zV~==S#sKQ!_8I+3PSRcLj`g)xdi$O0aWbMK#TaCh87`BYlvo95d;Iu503fb%lyUd& z-Jo?;24b$RG}0w_BUde? zYZpsLZm+Ka@B+;4?d<`SAU{wVjZ=1l>!e@|4N1!YNdlRp?lP8EydQ3Y^R*Qv2A~1z zU9c0V!)#RJ`>NilsYkoyD?wd%SHZM+hB3o?k?1a(@7J(-7c~Oe1-s5laOi;>LRhA9o&FTK3@c0KO6S|YK+QZ^x1bL7aegW&&;Z_zoyZ;% zZo_kTyZQ6hZSiM&UX{!{o{$b-Mxrj}9QEfjv;0%`aK*Cu44uQ@FG**8199cW*f63G z1Y(WLXn)j@IoVt#?@}IlD;d&vzO#!!FQW>8Kn-8xS= z(FxZ>T|ISER7guFt>%_?PFUQqsDREMRX^gC!S>^3I^mSTA@8`Te`(^D7GeXeo={z0 z`q*7d7IaDQ*CkZ+2NNbLsH*N;-Xn3AcQ~ zMGrlwZ*iz^_!wWSly2N)k)4jx-CgB6@)G&3fHZzhh+eLB59{fte|?Zz zxHVq7CisOCQouzCEll^PDi%p&01K^ILZx-_$zO#9Ih& zXL+o1P(cO*EOA+wy8u-dFJyEqK(SJ30x}GUS>+8CEaUhTJUX6UP|^J&lB$T7N$Tw8 z8K4>pSsn$=as={NAwWMYhpD6%U0(a?Pk$PqQ=GC@V}9ac+aLKzOY6s+r1D|?1&4I4 zQV9}hTf$mM03`yFV=Xrz-<_RpVm;w9M!d8sr#(uyB6GCX23q?FIJk?yzrU9$IS|ws zWELJ%x&)ydi8-OZ+0H$6=YB?8ANT~x&L9aw4#>wco_jw!jB+fCdT144yIdzoIISrn z9uW8Emx3mhVm*|u0TzC<$vTz_09u^NxXgy-n1J@W6O~GJ-uDzy<97i{a589{7-C1K zdxS}N1Tb|#6Hy#-!I^MUXlU3JYe}y6=&(rg%=x6AtFmW5c}i#;9>DVgmqlW$4Lg8! zhkL2D+6c?VUjGD(EiKrImjjSu@b^3Siku4Bg{IR~N4Xq^B?Z=9xIzke`KKkK)_*%! zE)n}{g*NoV4Rwa)#KR zIHZ7gS8H2or?!*ooG_N>^vgW%zY+F?18^pWE@88;v5X@l7A(`$0_gdAYUbOFQBE4{ zdAc=V*A%llNaQj{ifNUlm6v1AB#4zGeo~;#2xFezUO?kHkOGSN`J*i?aAsu6ZJBaBmoH!$coNybGs;ZI znt*qWVBt~bx#!Q7n(>JJ7In<-gyrhfhvwhRAd{z0>B1%66VI7s4G0;nMOs+vdL8+w z3*iu}W!;yJO(@^BT1&{|jkJLUsV5j8AD$p*?mt)RGZ!rDyo{OC$)ZiJg~dU%71AX& zT%DnZfcOA+L*-@<`QeP#Y%!i=PJ32nMMnN(|B{tJV4cGks?3G?0KA|B*DJW-I`_r~ z&D=uyj9sXQwI~y7vc@=7XB$H#b(O{3d%pi-#yflYtOn12nKv}R$mj2V?kfuxS$sx@ z^qp?ii_Mm}w;=c{O0=H>poJFpi!#qSQXsa>`vLIrzWFV5m;k&Wc?c-_3g(}t8=Q8x zxtaOBE4Z}$viuUjgXZX_0p_be{NYx*eS4XEGv4{)f`t#T-szxtzSy!XivRoj552c= zH+_?d;9~sf&JqhGTogF{GhZI33a)Jh763&KO3?Kphb7`!asmbDsItg9clGS{XZo@L zezKLsXUgK|_0o`-2p!x6`&c850KA4+bIn{}NQ^n)GbN^82ua)m2LRT(E(#n@7|RC! zpK)s9E5WX_#*m93+9yrWQ4{n2k!7}0AIq&zV=;BspXZ7^yG8?b6P~bpzDWN+dvDeq z*Olb=o}4o=gWx2JD%GrZyRYqaU;N~0`PIvx)vtcDKY+cwZgyuxbf;acJ~=Fwws7J~e~EBOTXx~! z-m`T~ClN}}Jk$S32#qG1DmP{F^ZGlp`S^KNeNo#52R=#;plE_ngIN=T%JM3LItK{V zI3dtC_1!~GF+&K}nRZ3f7Br*ux22JWjqecpdib(AL6gcQTvL+}?2Qqd zeuNsVDNx)?laHxVWEBzG-{t1?8 zeyK3T1~}+dAX3;1;3>ESJ=p|Amc28`8#E9UxT*ILkRjTayNPdc5dsG}2M}tDL?2ho0b;g01kmJ3p%~!dXnqsW>p^HzjPS#zPdQDstfO^J zfEVqBV1Ey-xCdLCXhwPxYA+n#_spxUDH)*Un^*Xk;w7N#Jg=hF`o9hjii7Y!u^Ep%rmCeDR13aX=7#On+Y<7|{m^tB%;Wv1c0% z^`)m~mtq_|(sy1te)71mzpKTA?Wk`37@KJDKwk}Q%e}qbVi(_I0<~XaF4@H-^9^AM9aGJ1Xa?5N#K8CMR*$<~_nT)^87RP7|uM+ryhXS7r!{Akl zmPbaq7a%rsj+$EX-l|W`6{ia;uN}V8CX8LjY!j8h_L6B0^uP2te)-21SC(-8?r}<~ z0HvV$5~jW@97eE&xv~IQ)-ua|A;hOLGw0dVC(ST^O!7|`sq{RjpL6kfHx;*jGIaDBf6MvH%jPC&CDDq(MfSOPa<;M#HE~PP*JY6C;Blzu8;@c>} z8Si+Xd7pp!yi}`8t)fB9$!R^P^YTcL?tJ@_a3(}a)*~~GrvV1?K=G}KmKY1gBp+Wk zu@VW?pc#$nv8M9?j;jNa=G~GtG(i1{j1jiS_r6URJ7XzkkS(-MBdV1KW^wEAbY~1r#w>rodoM+UVi&o zg!3G5nEea_ZLbO2@rzYXT;VM=<5Y9DMw?S+&_WatJ9*na8@|LJ@D{1Zra&KF5n__M zJ+DS=DzrVX@pkNE4fX{!-FE-HS?sIjgb);yvtuWBl$h1I0Y45gCmiwZDTs$}c{2#0 z9r3MVj{lCz#@IX$c+I4>a02CcFvaP1v)KQhH~VN9*ghY8TvPl|&>Yjt6ExO38z;r; zBg_IY)2_ABVvrUA=NdMvm<{ZFeXlrt!J86RnF)O}bxKwGx&3wV&1N@0x4JLtyWqgb z%7Jy|TsDqFG`Jcq9f21JA1nCV85RG$k9HNpiH?z5_{AA1?5dT8uN_Y(tD=E|*BhKg zqXA61Q-s<+=84B+O!z>u`hW<6M$Fz6f?IC1I+mBvFTrdJ$3w+7evZ-{DJYSG0(x?( zoKl^wCA^rz!L`nJ2ROqY@ba{drjT;_5Q$*G`Y>RfP#E^~TR|DaRf0kmikzFK1%;Td z5r9U}euS3FKBgsOG^!fBC3kaeo?(*0+EqnMB(}n=e*u26{+-}d|MaL^?7qg_?3o8L zVde>lwf+b}nv*5-$&iyWT}NYUpnn)Q@ze={{}7YTeI0%ygr49ZY8CCAwYAmafRijo zn80b$g{%oZ`21oAt`<-Y0H)nUQwo6W^QTV(WYo`?=7-iRfGElnEQ*CXe51*!YXo>~ z&{WWN5~45|tabqaro z41*REG}97f1kW+Yd~omc;`Iwm3y1i?Ly%9Jd$f&x7R>{JuuV>6{POWrK*^Qj`HQz9 zMArd^hG-LQb1LBgAakX;S=7*C+{b@em(vyPFR8Bp8Q^?>A5g3fDDmLIlj2uf|DtFP zpOW4_Z$Gwjse)?NB{7X>eKa$7d0EIlWk6r`6vhC!Xr9!L@?Q58+=$?}+#!e5Ipey* z-guyPAy2Dl>r}8gY+&Lj7anXpE*{>0jGwl>Vx`4NtC%EibRJVyyEp*Q8?Yy8wxrU2 zL>~%Txg@x?jFKTO3FF3ySjMD1Q{Ri4#Xi(j_MsGqQ5AyyhygJgaq{HpW^n`n(^|sc z-!gs$0TWy60QyIJfQrl|^pDTCenXq1K?z9K>#i0}e1+9@Wg`FhpXU%u`glTWlQ6dp zWW zF+L~QcKr86Mh(!&(?|vD)dVIt(no7fB_uL!sNdZ~50OACP``RhYiJ)~?pjB~$oW-M zZ7Gj|8}*`n$;$q(){>-qc!ipM;e?+tok}Xp7QtQBqAO<;HLAc0*1lJ+mP@JiCjG>t!`WAbJEOdd>A@!_wdAJ7;zUY<1Y zwU|$o4*t>*_KAvXH+`$lzGXyR)Km);JE{*?;6co3!3*ab?ol820fJx0fR5RmF-)>~ z<{>kJk^|zZ{pI`zXl2Jp4Ws7P-~R1Z_~v@Z(bb=Seu%0!N4~N+9iRmw4Z$g}|O*ME3WtkGwe9vq><&BQgeIH!o$_)A`=yX|5d5UI&Q3ip2Z z*8N9#unyXPtB>d# ze4M*n>GW~7i;3moOZpVTT&#XSAelcDJ8n}>UZgw%ectBdHhC`6EI6>>!25IHAOChc z1c-Wd34csgz%CqOKSe8peN!7P=wq};~&X+&~<^Cm|gf7e?Vx%+*>X-)~dxJ z+OY%P2#+v_R71qmx17jB8w1qARJ}f%dW3~~n+-P`Q8l_0aMEU4(7{cod&p@3Dovt! zDveD%o5TUaw%R6>z#`^66N`tDOXqDby6>Y(-I(e0Yb zM9rd57q5~IzcQ1(bW<=B_#JVi7qAg9A(3_@w54gEpIS~GIsUowq-mu7QUB@S6Ic1o`4Ix058KYARs%&_rq$zxn0AEVefOiuD{bBAmK!fKn>xD%ke{ z&J>Kl{r0=i_-G@ICVw%?8fX;ZFuH?DQSSs$1#_gGowvb+K4U?@Y5IhFj3E8=^UsSO zeZ19L23$yo5~#6gAJ8O8jk9OZo}o4MmtbJ(*NLBkXRDarJ$m#M?Wre4tAa@)V=wA- zZZj`G%^)-%$8CUO_yDk>c~mkVEU(zE_1WIenebV5#Ob27kD?@_)@Jynog6Z`0r(Z4J zThKpQFH2rl@Fyplq9(Djg<7xH^U9DrfMk$&gh`RpznNbd`Ih6OQf>txt>&WlKGWn-StEH<~a2PT@Rbk?^?Wb$py(L`>tRpamT*M2u;x( z;oSFxZ}cUuT_bN~#F z5MF~GCa=RoqffX;dWfTA+4=7>Xv+AmzH*o4{or|>vG8U^(?nIw_%z#V0K|Bb-Azox zZdTYw6g&8p`X}Rp8IK!Ii0Am|sf~hRnzVK3ziUtT@#Tb*!j$xS6(Y|b=6*VFKO(OV zT4tL3RS_gE6nMQHKUgd^;PhRk534bN^aSj>szi8k)nam0()`T%)Ke1$7j<5*kFbG` z@o5kjT2Ef|P=U1Zbpf}%a_S%Wn>i{>fYB^F()SnuR}+6_4K!9Yk39k`w7Fztf; z2uJu|K}%aiJf6fF&<14D>`|Xx^%4FKIWb#dXbI=H-6mQ&q$LIWK#~EJX^J)QZ8rBp z(}z5fbNJ01?}jyE^Ua)Lx-{i;MFQs#agbxQ5j<^5za+k`4O195xqLGC!i-Jo6e1n* zV$)0>N$7?i%E)Q_&cV0$3Vdxe4{Ds^>&b~DG+P9aeA_EsB)xPwa<$42&DOYXq66UL z0b&K)4;h)WOj@1~ji8Syt zpA=^r1t~uQU?Dt_pP*DN%uo16uoXzLy%pIdB>_>X%y676W6leTn66bTE1aNdKSYp6 z8zlfQ@-6UF(?gI}K}T&YSW`66jO(LqmxI9Bh<@wg21)yaxO67_MgxR#9gU9r8@~ZK z!&$oin`&#t{d>Pa=%f#`ZPQxj`)|Jq|604dd-zq;A605X`_VFHL94vf*?FCKy}i91 z0NDVLYCykUUt33W56A5A$O4MHvrGAnaYmad+Uxu5 z>1Utev>(l++7^9YKUz2n&to!jn)e4P={Hr{Mn$Q_R{6_+=uRML8e2KF-BK+H+fy+0 zHCloxu9}Wr=89%9o>n?BZHJEV{Eu4BeIEK{qXuGWp?jiwIa#|+CE@%R2>w5~X^|Eip!+;nVXgQEa)R<0dI_D*Q%h^=UJ3$QB7BwA$SJGY9fBKo% zQ5~N50HW3C@zhk!X-pse%yfBeUL z#ozqRTMD(KUd(TCXNm*1Gr-JkLSpS0lf7QCjERt9bZ1moeFM&0Q2@qy{r|_{znW4yRM*M8j_Ko6noIccq3F}3R2oh= zTp;Ex-{ukgJIkjZn6RK?&L7B-&OUuZFwV9XEcPvH%vm1^nR6&#+*)v8!GQ$_W;oD0 zLCc%n+Zf+jhaAjN>;DGrjE4XLY!U~a%BVJyFCo}GPR(s<*qq{Sb!X?WcslYVx@|Vk z2b`LA%1bmfidN7hz!~(BzK;}Y_~$874TPh8g!vwtULh2*aixIxuTVb**ikr+W!A~n zNj1u4v#GfrZ+1rkq75hy5Q^uTEFAH+yn|4ukWLmU9bePI&8$HIB47KR1d0?hu8C&M z&}aeV8fuOPKy%7l7=bm*bmM!IU^ynSZsAm)J%O4l^2OjAe!(F>r)WRu?qdju;HF+- zvyGoFO~@LoHDd&MGaI9|qxOy3MAEu~x!lUi2K34^gc~;abp%lbxE-_@6zmWe$qT=f zoP??HXhm%Y-~0zH?kmYlA=CTwf^{KU2p?-`H8lVqcL7B*FH>F9Aq6ektdI7HS=D*t zF4z?y7=p+oCIqmhUJ^Xa-N1M{wg5D-V-s8uQir;_r6*XTkj_?uM?{XR{Cou*_K>3%2*hhs4 zYsucIIof^{mfLDNt#xr62w-HuP3;dlFF=F7=nw=s(Xl?h&-6fFLL*Tkv7adIkU z`AEhcDn&$?_wq?Z@YrfT}y51hN`TdV^W0WXM>9|DRs_#?PEsaNha z`P+E#LH;}51OW{h=nC+Ryy@}tL(<-w|195ikoFN6cX38d88mW!viQD?1NxcjV`9}@ zb)uNpy)R3d?nejUDBl${mk!bHxz1^KSLgT%K6PSXOpQYGP>wlF-W?n}1N=E1evdjg zfR2k*5`#ei06+jqL_t*PnD)}Vv2!mSXZ!A4K5UEtY1CxHWYCipBiy_j-3gKMIKbPf z@~_9~J1PFwnFs)SK!v{;2A%O*nQW%$zjdZ_5;}o90WZRW0}Bo;IB=I7=+@q{;b#AZ z@NvpK)30K(wA3!zIF~QBcM)PKO>M5}>4&)}eCqUWpkNKEQ*#=g42#+vxMuh5h-4`xsPyUAA&tzrpKM?)ywkki6$C#tW9eYd zSr5U2eOsxCVLm#HKEZSpJdMD752&O0nA&7bg!l@Z^b>hq+2#d2r^#W;runA_2ZUCf z^z1Ca;}k~E!7jWQ1cMYE_|E;iSjQA zjP?P+F8zV2ttT)AHT0`$vD{t((0V{-A~n<#8p2-~K-$qUnh>YEMHPJK{=-!?u+W@) z{igW#+1JIde)oI$t%jp;ErP7tTY_q2Bj4e942%ypBuq8ni=7UpSaP$XgF!J1*O06SAFj~zS-+rhm!$ik3fqmN4dXPe3!6EKi zAkfqTXleoS=DlfXT$TMPD2mK8KZWTW-|&L2R%cGNa7y4H2lAqSH37~zz&8<er*PQLw&lSMb^ zQuGn#gq|-w)C{%9AUC-Tz?fvZZ#T;so9{b9>cc?v%+vB-F2su-I*V0el8PKORgXFK z9r^j#mY0SGY2#civZb3xTSap7rJq#MVp~RwO(*pSoO0X+KnmYsw43YFbjah&wvG_} zOtv|ef`il1(>3?Y*wIHWM!4M_fhV=l!o2{r#)~cHB~NB{tKiEWdzjpN&4fM^$h&KD1C z7=2^y335SRDC-9}uo8XLTD-p&0g8~dttB<&DTL@;a#$I_bd zg%f2ZP~snPqT~vvq&7h%|TeL^S=hLViq6!jm+`SI?5j?w~bP7wF?)man08U}!gcHx|E#iXyG%#_i0tA^_A2?&o z$Xvt7E^Uc?>!mC>=(E(B5FJu+16mNqlb}x1fc|)VtS_!L{9?75YtRiy7+`>)oqp%^@r0Jva3Ae}4a`LE6<>U@QvBWj z_Xh;y=f%B8TgWSDmr<%FtD26rmN*ss5MOqdXKt{u4X;)>oiacR?)d})h{A{G-su89 zzIpqi*xTQMWDEiU2|Jxtz~hJb{rYXusXbtyj=5$Pe@Pty8??))i$KriAv0M1<{kW@==-{_3PqFPM;l6{PBgU3(HJwk+ z?_J-PZz*ahDTuc?P(Uy(_G=Ct>hFDp^7jdj7oC|qUUP`n<$gWJEWgS+S`+6f0fJT= z2jBG;pl%DtK0AR8iOY}BgYT)F*>+Ef68lr8)BdnPjHuYrNaEfVei2=HE-(Jnwq{WIi>M$z7ZLx+qRM8KE8P#fMc0| zigEVW+cCsq?mW96S3Y1K2n{c8*#CDpprXgAsGW}H%#Sg7q>nhn7z;YMmmt@yfJCH? zYu**zIN0w3NKzyukI1p?d>iD|^ zu8+Bm$iWl84Hs%5%_T8o=8YQpj~Nf{D;RJ9h4PGX#K*O+y{V5N_yv@?GZ3Y;e>vlQ{dk0Y{#bGQ<|oQ;GEjczVD_RobuELoWcVP@dRe7m~GwT@S)#u%G_$v z_znOHA-N9Nv9@|2b1vSPvk~3dd&6eK&3sySlDU~{E9Qgvu0q~OJJJiwEt}TZn5GSN z-lQkfF|>k27D6b=8`^V17f%hAYSyutR~Vn#a88QkLfQp_1ZNJ8j*C7TUhGauXF0c9 zGhDdc$1!9v?LxGUk!*z$6sWLq+Ma#(LXd-Jw>Pv zEeU89@KQkZvBGv=b0h9=-~?L1TPMtF*(Ea+GM^hP?Xr+tsQjis1WXYEzy4+r^eNv% z4SLii%Heko$wV*bZ+&p>0Zl7Q_W>E#BU+dgnJa)-7c(%rIsR@t#U_2bS$$mG?<^29^(=Ne^Y2wsnsw5NUEsCBzRM@BG>Wa zcdqa36Z>3fK5#paMPu0hj!$ia)s$-J6AewCIp*9hXZxJ!>OM_h zaQd}s(0*}pA${`1`2#>c(|+r+&LzxXl((G^>wIhjPX%>co&rX1Xm(szQxL!z&Bg-` zNZ8^eP?_d)Pr7%E^YHIzH}p4urjkt3dSPm(PUkTJ zx*=o3w5d$#pNW3?k7!#XXCVIas8NYIk0T53Q@B8NCZY?^D)^)epHF~$P#NPm;zz|& zUz?NVjc+*zr;w=e-A&gsA2M5u7HWaBT|{tj6CL9*~|!e=uo{?pt&-=%hxhS4^aX$iW(nO_0pPeh zN1kTyk%Ve@ubS)hdzcgA|H!pU04IgCMtNks=5dw^90xGy|EY%A+j?WYXs-PV;NY}4ezRWe<0nil!zYh^3&8QJ`0l&s zoL0%{ZV#UpYiNH7HtI9&<*S#8w+Pc4__N#E@)=Il`IlXg$S(mTO>w{er++HmzI}z6 zso*i%bEd;b*yGJ#7R!}i6vHF5x&Y2>x1@=r$fb#uVHUKATkrUj`-~Wd^|{ zL_kER@)<-DUsGU~zm5^xTa7?S1{||31U+T!I|#VqvFRr?r3y}BXS%&19R}$rdua4o zdYm`rhO_i##zb{&#mmE6bV*B6P*xMr!v-AVr}#xWq%SHww*b0USC?@Vk9Hre{OLdx&#D0H>5?f450moanl_1@MfvSC_X60TyUtoES)% z&OxC`a69!+QMz0fwM-zX|PY@OWSyLWc7$~@Yxd#};(&5A)m1iuyKNkT@%4*>;6))^$?Au(GfxyFL(q#h+kzRUd3O` z0cL2GE@C`ul`mFJ^jZA{`%-$1-39LQ=$!=Wgs3yvQdnpIjuGTxZSW zEJPG;#R~+4gLn&k_L!@*c{W}Mckrdz0`S6LnDqEB6*HUfOcpagh|T{R8fb!b;Sy9Z zQGwFn;|byERusT}g}d0W6IZ`dQ9e%UpPxK0I%?;app|Xd=`3M#w+V>o<{aK>l84{0 zBYlsR+4>bg0p;b*eE?txI|J600Rlk-FhkxbF#9c2Q+n#nMU6iS(V9tmYSt<*WOQ0@ z+&U+7w!(B#^F+zT8ylv2hn#e#nd8>h7JG8~mVz;lsp3>f!t43FQxWD~CP4%sQ69lp zP3i*pBDVW6L$oM`*+=XT?tj8e7SkTJui8yCw89LIJ(#@Xzo1>Gv4NQ`j_XN#cr+|d zh+hFXXf^H?4<0-z{_i*cr}*Rl{l{Ww`=I!L{^kFR4=?<*qU9wBC+N6^w$E3;|4p$% z+q5tNON_MEFvtH8I1ps!guGM0NomtOSTn+nW&8-WzC^Q2YXQo|zh3k_QKx`U{FceL z57T4Ae&)+G50|iC2~OBI>S%bi8SBxU{0;LSp4)#`0CU={rFa&mvZN8Hy$M~p?*}b0 zR&QA+(wc|!P%7Vcem8FfHyYt*?i!KLWoHWKAvG=^K6uEGM)MU-s;Zie@PeLL90Ps< zs}w8nOu#GG_gq%j|7oFog4+1QHK0r*PQ(=4N@mfX;JUJme>njqG`p$WVRLm9SM69d- zq~g+im*=mNglIpnleSmNXL_8T&p9Ze-x-JH4AV=%yYf2$UI_qmeopcNvxjEoQyE2{ zED6Z7eST~k$tyos@i(65M7Q|Pb-XY>&b6y_k9?saIiPx`%e2Ka>KV`YR;x>3*NC9yRE7z>D!^;^37?AZ37ui;lrCzFeoelLdp0EwN%d#Izo zeEAZy>}@twEgaWwGB^Hpm^ivQbpgAuIQ#&#Es9uhV8MY;g9Gd9oe<8|^3mkQb?cO) z%9oZkzlv?-Y(=@Hh~ma@xqZLbSo(||_5t(IGdA1%`0VNz|K}h6iOprbIM_KZy49`X z_3mM@{AL?L8sLKy`-b{94eWGCTR{b5K`+cGxlm}G%%y75 z@NdWoZ$p5F6;9Y|w6@7YD%?b)=|$S4877aeT2Fm$`3g5WxQAwbhN+#q@-m#K(c77x zVQ5jfaX>c=I<4zI2MTx|kQQ9hzX6;Mm_)!x{X9xI{Iq(k9gXylx< z*Qz+mt|Nqxan{qqS^x3Leld7)9H89Ry(iQ!^uXi{#IW(<1VC}XP~Y%(5GkRT1=7(c zOrvIzc~mh!?N)a~<3`8!`n9pmg4MDxaRR_qjS>V|(pB)!R>LWoCwk#ng-ARIaIqhN z89f~yt1Y&RZ?3~)wfTVlHz*!%JS<+kT`6Agyroa$g5cq+;>%xt8Gr{OGt`WWb$o3- z*jS_gtVBD7b{Ayp(^bm~o#*e;todUz(5m6USSlW&1%N z{Pv34@sX}~2e=UATe~<>%?-2zGxn%g*Dcj03!>ye+R#p<*df}O`i#qhYSPJ=Pm>xv z4dyH74O{s}fh^s!+t&qhojK@P_@_!^s=bQeAx($jX%F+*y9m--4aZTGLHkrW*q|PD zRpZZlXooT`n)Vqd8~W7<Fxnc8~LP3wV1V zMRY0Y3Yv1h@+I}mJQjSyEqIwgLce+S18Ga#D}7o!KgvJiMXk6VpdoeM6x6(K6T)92 z32i2+(Z8JYqO_*K91-o1+9Kbb`G!XOfl2zTFWY~)Xt z)+00kNo#uhM1wyL!vVCgg3`b2)2WTK*3?Y-{vg9N9xpt(_KneZ|^s`zr@QcL(s}^IyH{|ec_i*Z?qorAzp&zYktQ$ z4u0DcNOlK+my?t#pDd8p@(x0sqlEo@l7R_n=;?6p6JQN(%4UZ~j2YxT^Lrm!KC-k< z+{^cmiX~35ggG~$?j~_lww{V|vcsElz^L|y+Ko5Q<0=B34^DS*QvU+`6|R}R7U3tt z0T;Vd{IB?N65r>vEl=I@O`(E`3&SETII!Trf&;hVKoiqMH`r=xD7?CvJH^Sl<}nH) zPA}8@v6yY#2=Y<8tA%z4>K{cD=k-R$x~}-{*>;C%%x9k#nvhCn3g?qO!8 z!}Mcjj-1*KPNGu5eETde!ZT))9!Wn!zzd_%7R0$I(%Dz6HJp@`QX9805FYlc#X%7eyaMI%nV9ydNft=B!qk=8DpA!djKI zA!t?Fse?vd8!+HdAC#C^h86;5krT6_AdR{qv`2@VZNyCWobR)mc$!=di;mt1O*~H5 z41YLq8WA$mowcyrAKRczOML|l)o|n}Hk!&tCWH`=c*8#1ZWW*zAWEP5C4nDbtT?e3 zxO)qrd)hrgBm2AJ@n?^Mk7OI5;0hXFI*5in1k#$Xm$aqrySuwxJp1lj=;eR`z&G8{ zb(+h?{q?7zUDm}IVFcG3KqCdHWS!FOK{#;ri>f^HIL% zb3v~RlubXkZhm6jCY|e(&!>atjSYC=-Y2x3gGnjIS&O9KrkqZFCxDk(`X*(n+-dr5 ze0_XeXF%BL- zSpP&Sv>g z(p2os$(TxsL`&1x=yu=>hym z+9W!kw@&#plfPv0k&YbF$x~>dh!`moQIa5N6nJRgKM34tF6MVXg3|)f$ba8vW3|L1x#hUCK)80<@>!rb7|rkuw6|U7UmgU>87`@rBqO zll%i^v~SO}eaiAie=|QOi74JV!4SebbYxyl;@tXzu}fDFke&FUn##+5CioW(a^3bI z-n7}C1*q{OF9m(V-#7yAjMTGfuIp19#>-tY*SC~)ITW=byf-euO z}thk&cn< z>&tZ@051C<*D)r`Z}~ADVojU6VYWn#S5{NEZ+GbljSnB{)X$0Hc6sDf64ve3cq7O% zmilyl^htcHqI$aTNj<3It@!@nJt_&WK2(SPDKgthud&=OI{5FpX|S(V>Bp+pkJW>` zX0vDpd9uAkdmd6Y?dhA6_YGvxKWr9n|7o*0dCL*P3$W{bIr>qgcY@AuJU$A7Zb5{L zQhr-=cbmXl5VAF6d+dDTE*(7o5~&zz5zv!F*Q3q#7qM42ynv< zYivCba)v_y5&jJ^sT`m!rbdtePnf?DF9a;+JwYB#FWXBiI9}dEEBYwFEq!}=vJ%p3 z{1!l~kyk8vtb=)u$=}y$a)p3nG^e(=cj8U*`82VymNsZ|18s;m%5mi%YNu*f%-3kJ zVIo-v46NuFyR?r&o}Br zny#Ru@k#6U-F)N+-66CA*8qRQgb?tLSUjB$=HSPWztgYF{~C|Fp?R{l&fy6N4SPrP zwP;fR$@NMCZlXnpt;!XM_2qcQub5t?qThs7qQje=H( zM?1xz|MCx{Sc{Vs+l-wK=CJN3$0Plrp)pARKRv~_@KUq*_WS3>o7XQGZveOecXdn$ zH#az$dF!_@ZXI50jynsqs@avPGT)$cipKZ3%ze>3qEDCFZkczfG~uHZqL0{5WLUn( zcYdDl-Re8?B;RXfmP!YZv40D8ITlSv9{SA_R2gFGHG==up_;3XC!JUOAsY3J(^%;T zl^PS;2MErmPO8(qdY@usj{s>MIm2;12{MDS%Hn4F?wJ%x$peq zeB*cyFmbMY?ySXJG&g@JiiVQ1bQ$GETf~E)Ssn>dUe>Qu>UjlRmf|dKo@1R4TbP-4 zST9b?7a7r)gTO?~S*mkaC1Z`{aWVygTH_{Z7c|8Ni#*g)mMJ4trq!8b1@nAg;Owqw`4IVxnTa{i8;4^tywy zQUT2O43s?t@ZEjJoA>XRA?t?3yl3%?D21AuV!E=l$Q00_O zZj*f2l)6c;05GTl(C^|@8%-HC&(zL=^5k;|FixlA602H7pY zCYt2r+!KPE8+@gUay}AF6VHeM69g3EO*jCOwDB_u3uTo2nYj6{50-mYaK<< zPw+Ff3U3Y($_0qRQ*$c8`q<_edeOCQZv}P)9<&%LfCa`~#Wb;5TS4P$55b7OgqB0_ zfurDnOrjn(zUGGdh0=Udt-S)Butu>_JX-&<*xh-7f4Y~r95^bTKDbvjHdZ;Yv{|fR zA{bf(_I8T50I2Jm8^sZtgEV5%=A={m>)O(K@!8g|8C&<*i>pCt9a4)}fBE7| zRIiesAFnS4d6I*D@Ge5?tCVkM=klCxr%742DVT9G=|E-_d)9M>O$taC@s#Em3!s-# zqjzcY8QNUn}qI?Bat;fGUEXltHpxM&{kg*jO6Fx>Q?!7&?}x|k1{)IO~Cj6lWt(jraC zd;6*LCLL5lNyrb0m=}m>U50$?FN$6rzssy9DMHhayj*SkcsK%6eav0_pnk|{jQc)o z1IE|1VglWQ7U_``>Zk^lbD`t=v&WC3A9wLBDTx&|EXiAFNS}FR8u48ZZA&Tv516fy z344^UcCjJGW$-Uk?Y9*_3qHQoZ?b*9Ue)LkFw5s*hG!_&DQAZ%v-H!GAx|PJNt9<*4(kqeM4UN z(yv65{4wNoxxY!h=7@2h3ivJ1HK0B9Ysd|1=nF7-Oc>wp!2jS=YX0=3x($r~&xtuQKliq%8;Q7fqdSl;26SQBWMMIQTZ`p;>+| z&W(S{mk!R;A3Q)EIGBCoEG{Hyw}%`F+bec<+R>>mMEDeLr2cg4n%E(A&R%UYiS$_D zon3zBp^20FrF))Yax>=Iht<5(foV92jFs;C%=8q;0}fc|a4KXA2lkT{^ZHp#Vlpoj zCwp!3TZv7Q6Zj&08XU+Ad0s?*Dz(aspY%$*3*=9&4nHKX<;^tDe#o*HW&J!j&>QY0 zO(>l7)@yB6Jb(i?pKR{>Y%~?b1((KL$L5z9u5CU*sumrfPj)r2i+h8 zKp~ju3+rGHhxllBDP8rTL(Q-zd7KV0n?$?GO{u0>u6lw|iV*t!!b^KmCA~nJ<4Y48 z1)4#a(NIQicIarB*fGdW-C~61h?T!2>^i4%Jeof;|mR zMI9_o=i8DY)M4G*oW$6!uW~{qZ9*Ru6yq<~=9BZXKarNcmKbG^BAb4v*z9~(eD>gJ z@uzRz7QN#^v4Yv>uRs3{>5*wL&DA%tLfA3dT{}D52yi|8b76*wz^RX?4&b4V;U8{3 z0DOCdmOtY#_G_sO%aNb{lL$GNerMW@F&*T)cM9hpR+kE1=zYS%!(Kq>FzYVieVj6_z=85w@4Ttw|y-vP8UUZ<8M3|-1U%@{T z2*6hXV+5x>T*dW5Eu|qE$h!w`(MZ}SGGz*Ao4+3`NzhlMbC`FV;d`syOcgu~oC z7h6FK2#wKGEN%Ao8PE7}Jm?)T$MuVS{Ouhx{`WY1L;e|Z{@CSnNB*Q2I;r%CK^%D& z(dYbu;7Lfi#yptsk=EHWDwgkEm>Z;nwgjl_!3*nJ!!Nf!s+M@U(L)ordw|bK*F)x< z{k?-?pEYtwJ{}(9G{@{l?!0Ki2O>pA+c-BRz$56qfLHvEeSqLr5a4|6xOBa@2Wx_^-$YNy<%L&Ujo5Txi>8VV)-6Fh$F{yF)i(CN zdAHSW7UVJjkh$Kw1Z03bJxG703rsV(k@g?%Ez~e$?W8aCF#~mwl(|hOz1ejP6bNYo zyxc`_;I6IpiZ8$1FaG(T*U9pHbKFHKKW-jlS}ZiYZb?Ss2!Gwo-7=3)E*X7LbH?Tp z6PE+;hQT)J;X6*=-ZZr!Rrk129KYcR>-DRh+FnJLeP8Xa!{^Ivo>Ytf^!ol)_5akL zxsg?f`zZw%E*(x!HV8i@;(r8MLls@27U6ag7999c90=c6%q0yr!fM#7DWqJWPo@+6 zWhuPG+w2e)vRK2?$XXR|UfH0Aa2}gkedVYv!TN&`=^NX<{ew7gLleVxC!Koe{H5uE zJG}w%Oi$5DG(~%bmz3*$zze~LG{-q-xTZxgZ-i6d^!Ctj3hlF#O7Z5+TQ<1+3P*MXZjQHj;olQ>4J?mISu=B0^SfCH{(+Ysr&>cJR)cR5@%_OlV zz^~9zuyG8Nd=p*r&9+pCBbRV^udf&1c!wsP0tA9XPHJ5g&fA)T5J&S)9dI`gghTk@ zdH7F(&kCdRTVW$>hDApH;aLRYo2OyYp7F~+Qm(YheuWAdE!WGZ(|n!24rjywn3}Yn zoZ=6Tbn{lm$P0$#9xN5MdkBY7CVu146i6axl;|IVDHF{KQ$cqKoaH@uu7Ldb(f#7( z?i!By5hRJX$%&aE_}fPCMBhe-j0MeOmjS_QXh1G6Ez=h@8)7_QO1IK^fOZ3*Aev?4 zI3>~%GA$4%dZZ|*he@{kY*46F=!Orr#k1Mk&FZ(6G&-gMME4Sz5Zl)*FOzz(J z;;g*$^_$IV`l)Rj;5M3u=yXI|+e!-1Zh=!=d3MGN@2N2)FkfHN42&?BuRge5NIipW zrlQ9I4JB`;`|nhgYhPN&nA=DZb+Mq55f|EHnh46zVBc9`XAv@AG+Au6&^pA@=?B0ZW5$v>ZkW}yQXpILq{RnV`}#>PA&`5w2pc3G8(FHcLCZ@(NKa9 z&QB-IOMm{$p8+Gf&|MySN7+$4MX2GEc;?t}y~6)>ICAGjgojE1fMr6LmY9clakF=@ z&wQft4F`eHpRC(=&z=Q&aZ)f~%8Xw}^3;^uIcyMS$I~@A$ArnF+z-pf3yS5zbo~5m zlIW6)F?9BW)=jiWd%ae)bpSnpc+1ddJ^O%1r>DFuB5u3QTuwb)$E&y^=ysE4SXWSo z;LD528pZFpo^IcWRsNZ}pD7M7l{^xtn!v7DBZ%v_;IHdP#W@|xj4FJ$fA`7X_7>=}e`NDTxf)8O(UCKiQ#)z8gF{ZoxPFqem?mO_+rzXm zq=7sip2c|QmF?+~*GGV6$8VOM6Bc2?fgha%E@rWD_|Yl(_z6%xeDr9HKZfPvAN~P> zXzn}GkG~!Z3O{}hcp{(LR%-pIu@##_cNA=(bZQ%ZJ`}Ln^!d!-rqnl`#|Zbm{%UM! z-8k2I13SQU$PKh(eQ$3U^TK2HUTmNdG}wK{x@bP0cIO+^x@EH2R>*g$8dVO9jjemd z{tFJx(`l{%8vis4bPFoL)l=9J3`Q$J5XZH$i&nJ)h~?^}H+(UfBRf+!#Q{F>)6H~t zYM$ZfIpVk%3A#t}O6{y0HO-2{vPoi<;u7+M0z$L z0V%?(01$(prBD&^ z=sUGuoMOKJ`puiDn2?1tHz-f?L-pOxGEeZDKwmY9zj{D3p>X@lj)({1m$ z4xz9>+{u2Ghw{ZaPoUpCEH?vQ6;yzi)^|9ub-j4|22HR%+rajbvL7lqH>Pj*2UiffyOyPbGllZ|7u?7y{t4b;h_m~nDCSv z+?T`hC;&|O=(qE$ppol`!TC37bg^!bCT2mup&?$L7>98jr|m+Yr%5+gmSU}msi7rhI8{);M|9bX_Fx_XfP= z*#K%hS#g?|5_8+&iH~1=v0wc1m;1#Z{%{W_+%~6=Fr71m6ZJg3yc?2RL#?rfqxGMC zpbyqLF{*`T;PjxnE??}!0+0bh#>#o!lvDB8DKwYaR5@R!Q8@MR-Iom6q6cuf_w8CS z=sJ1aB9nI)vxu?az_mG$7o6CPT|3p!Goo|I*49b!U;pc$(NOkH?&IR`{~n((%u9=~ z;J{sTz)fM=@Vb|9bBiF(+NE4SWFs$_(ZKHq*b?^y71*f8#u8ED1bt|t5M{`w|S2A&sD<=qtXEYQrPMi>sv*!e1N7i zT052(TWAv!1iu7X!pzd#BE$5|&956?=NUJyuHg!L{fmw=4GSBK9M&lE_! zFTalQi()-{0E=C&Z65`i(^DMS%XgVa@LM54pD7AVL3p%b{E0ZTe@YQz@_2GFi6;HF zedJBQjX*SGCSYR{_411#FXioJ2z&^Hbb^$TDN_|DdE}o_H=>1~8_hEg1ox$tR71>Gn99ozO603>jzi7a#%D97P z(TCn6pPW8_&&w+6a^XR4i{Qw)r-^rh1OXV8Lo3}q7xmJg

    O3&x>UFxg9R%tY)bn z>y=}j&&8K72BL(Pu3(01mrWbfqjqZ1rhd-`$*OWF#aTa?&j(^L1`|*aF-VwcavlrK zR&$XEdky_gv;L>QJ}w?U0hIX*ewhD2ya7NnW{}eMp+jOhCnBTWwhAby22`IH0Jf(u zvG|I)+0%3n0OSSo^hXtd4s*A4k&jDgoF1PXaR3LtyTbg}dD~&ZI>CAmS!c*A+EeedhY$Tqj@{lL+Nyo@#dKOQ8VU$k>+(@sf+oW4)qbM`-Y#9+3~v z^+9RewB`l{sa)Ddf1tLUz-@qNwo$ZG8HJxdcWZDVkW~WVnCEyIF$5Hp_5it{QLS)+ zuGH@FYUV-~H@v~AgQqyz-o0CYTb}y(U;fM2H0Y@KyT7~7_Y{+ENa05n-9`iK8l1SH zNCWiN^Z|-!!Z)uU+&a&nU_!y6#&WM{uHD>!Rx}6gp^0gBBzt2P&^4Q#LDAVdQzO1n z9uP4Ehqj>RR<sgoLD7aQbwlgj#`)>Ui@*%gl2G*=KZ>N5BWA+5V69WdewsZoE}COf=0=SKP6qM zUz9&Cd?{pvfJC_&TnTWI`$P6@r(?`!`)?6Y(C$N~(z$f_+9D@uL3a;6CJw3vq5rca z3|7(10;pa2oLB&^ZkVD0jcdvT1}f-OX%IESY8A|*=+KnFbIh0Mx2L^B%o(4<4}6gJlbL`O&;fR|V#2+X!;%zb}A@h@Df|DXrUO<&vws_d`1;9@q|-#ZQ?N6TfY= z1Zk?yB^)YH$28Rpyw~8K0XcJ`RQZMY7fCW$V& zu&+jcz2y0IZp+`Chu7~6*N8I713txArfmYmw3%Yg^yPsZPQiKC@#8G$s8YjL6G4Gd z=ag_JPalv@=l0pa*p0?=?w@@Uq(=R?&$D@?JMSCP+~vy)iKKm?DSYykIyaZG#{0U7 zuxx!@MgL$t3!WI#`X^z`HDO;Lx1&4(8G}Gfm2&MGgBmBhHkiK$%#HQK;$fTdK6+I= zT;C{89^NZne(Ri#rWnBUkTLDS36XcoD)O8E2YMWMlR&%kJFQ^pX1EZrLr;Q$QPS?R z{q*f7xTm>h17Cs8%{!Pnp>vzL!9_s#?2t5F99Hw*wDfH9{^nBLBEQ{)W_Bv zcF@np9Mp0=JP3bpnu4oUN9*x42jUo?Pqbp>ou8e3M{5(ooQ*;%=%hUX0M$_RgiB9- zl+Gp*TAKN@#yJn9KA!kKZ+1Q_<(gM5oTsnjtQ%h5S?J-2-3`@epB)C^b!Qp>=ovZL zB$LJ=AWKaRfax)C1NwX!(^vr=MI&zPj&Qmk-5{TzdQ|7W<_afG=|}FSymau;&-wt6 zLpI@A1@uNdv3vnH4qCWXpbK>OTL4}w?o`q$f2)iZi547KaNvjLfNu+%%>irGad}$t zxcIBTdRzSG|J*G8+kg8V8x|Mdn@&tWbS)O;{ggOxg1}c}gRH}PaIM$~+QX`DQ2Ew} zMH~MaEkF;ow0gd&Y-L2gbUr|CKP!6ObeS054d)Yi{n@ zu=ah^Tb5;cmK{fzQ?RnmKTq`fm$UWpZ(OQEJ_iD{H!byM#)DWNmQu#z}GXa-;OX*pNUsNRuB)NXRZ1+>l zUmzwly#z4yr;;_oysvK*-{LDk`7G$>T9$;vU-%mWr97n7JpgP#lfP9`uxS?Tl8#w= zymMKYZdZ9bL?f^}Mr#3K$bJ`XWp=V*5-hVUBbbzcQ3b0*}{jwN!1r#a7vf%A>1)@&e>EWZOKk#H@gv>BEtFlgP05WC3E2WcyIE}8Y zgKsk|0adO9rPNw@S1>2bO|mC|NJyb)seEQzXx8N?(`bws?Xy+jCO4EU{hk-1&OFQU zB90Ambuc%RT1#~q95WSARqs0>gog!MVyxyq5NQA+;Ae$#S!X^Nu&$=jYQnn<(|oy$ zT+{e|S59fZeOIjfJhVt*VrRKiw3+7_yfL4pewbsGdBRi2wg7VQSl$8R?ayGaJtHWj2NYws#zl9*WR|c5#n>SzqQy*6I;!vV$R& z0PS4(E<%#jP!+r!04~8M)^;4-^ARi)unKUCsIGIam(C+zJNIXxD+64i-37n0mX+&- zJ;L=q15!~R_^SJZC==SwgQU3^yM8+T^l+$u1n}PC#ZH6vn+5^NKJt!l|I7)~Q;%&I zz$^B8X_odI1 zCx`L2O_`?E8WjKL-@L*^tX2H?|NbjB2fk%pjO`z#aX!I9JNWp~%&uNXK*w3Hszv&F z36aGODj&g&wN=&?(4xKly>i3M9*xcQDcUS<2HU(%)+ZIOj6*<3n`g9sutQKF@-1+M z4xKlV0aTH1fL?4o(q=hrVNda4`IkSg7AL&CL3)Mjq!px!4LTd`o`Sf@RobjLuY~50 zn^VgP3R7cEU$e)JEs{D4vFSHM?kNcQEjVC(+c@`SkB-(50x`d{9}w5rtZ9(D!t|UL z8W0E+(&ENGe{&w@4Z($lW?c6aehvlnI^`tn-F)KPz5f;Nf)0iwV#+f?Bp zw0p|>+rC5EUExdfn)7wJiq`Ybq-`cJ59;&n`1CCr37o18(KY)peI>$l#kdje0R>EQ{0l`rd&z+r}DC4uwA7Ji# zpY~b~6VFQJggra{&${^5X>Xxr_Bm8x0>%B=)|f3fX-?3$Xfa710@g$ynvE9q6pV`c zLVxZr)Avl0>zk8L>YZFBO|J2jYa+KB1?Sci4 z_c8rUKuFM2lW0w&@7>>`ADpGhSB`zl@RNgOq;m3|OTKMB=Qq9+0G09yjZNv`Bgm_N zLi_o2pY)mWG_l|3;D}{_JzvHg9N_1UQw(S4U&l>a6D%v`<(!$yuwToS8ZRvBXxn<) z?sBIW8iKAPb)&z3loE`pjFz}GIzIoFrUcF+m`BcmqGdF1bQ1hSeCHdKnzIk_w53gFA zw>8~-B=U_i_#`6hze^1-CxqLC5reZ2=ybc(jBEO*8eVB~y+JO_0)kxIf8HpLUN2F; z)6OD%Y8-Hr+2NF|=}p*ArH<0BptJ~AbHIaEo;<;P#8tTrYnWL7+kgAA`0l%v;(!0| zM{Hd3lg0Of1MkNHeYUXR#F~`^8S)c^LAdbLDK~;1`CSKG8S?hGfyvNm1(1t1MvW9r zC_2mBCm&CGTtn9<~c%> zra1zcf@(?o0x@kiJCi_wRi_oAzuAAI55x=n6yG><#@teQ)A=Nx`8QzK(E)&N0=3mla|CY}^ z+9B-oc-SDFw9gAshp6ir?bC7X&*%3Msjl;Ko4uFw&fh-GX~P%KZ1g+-b)N2+Hz-Ao z62bT05Gx5Z_dGpQixp`bW4Z<6&d0`Q_VON%u=|dC?y>7HP?(H%m*8BVJXZ7Xn^eX$ z?t60D^K{=NuW6cYnl8n+-u8vme`BsFGlI+j&mj~pIG+)Ve&rz~@j{}E=T~IGA2oOd z?gay!+YZsZdHwngUqd54J8q5z!KJ1>Xa0H8^0aq1@PDc`nP7l6y($MC_r8@Ja>puG(} zS;HtZ7wNb$p1;l4U|+Ev^EebhrGes^FM%{)l$Wkou{Kcl@zF8#CeTZlOrbq%3j37* z%1T9hv#)4(mg4Xf>!nkEwd`n!DC63HpfR+yoJXmH^{}4Sqrp1hAqu&o`tQmqiEj)Psl0=r7FrI{8*h2?jUVItjGSAS9`#`3R{hif z!JBZzzQ;+Go>n>O^*2Z#+FhIk+AB}Zyg|}Q)JB^(=-blMa<)g6UxZOsOI0mq) z;}F`l(Y>bkI}IJ2SNOPZ<87`cVwGmGjyBj3O{ZR%xVU*Ge%dUBK&oC=1;Iq&L{KMf ztlj1kJlzT*l|RrRc$MCYpMMEuc7{BcD3G{WtqM9=phOyn&2+4Zq1nK-ds=8WxCv() z9`ObCq|tiCNsexC;YlmBvH;&s$xpMCso)!GPgxZ73Mcxc2b+T#C2{0QDwN)a#V#BC zot?eVinDaJH1oeiXPGfcvv^@0bDTK6Dk6GM+WoL+jkL`HT|;;nZ^dn6 zX`(F!Y2>$M-9<1LfePH{hv_9*!}}6kh=)_9P-ZSQ%(wgnjhgZT(*_?41XbITPGHV_ ztf}o6y)}NP92NG6qw_YvlHvHcI6B!0TC8e-lspIXiZP&2r=P3PkPvXGk%n$`ypNX2 ziKi&)H;PU}`AH~FrA+_L>u0NlPo7qy&;Ae^g^t%tLLM8JJf5`0Rq@g=V((#_!WKDG zj?_qRdq)4^VYIE^mlb%){gkH}n=*ecmjiPj%5)o3e_i(1I4KbDyuQL%2Jo^;^+QAh zK%a9O?Mh!!cGX8;%{J7m3c%4h#2HR{a>!-n-`DxJ29o1wabBEM>pF?=Bo2i`ghMUW zRf_yj?+g++Udns%48GXb@-*6)YYzYj5J29^m6FF?MUntv;q@Va_yO8o$K8|Yli|et z!Z?pt0g;fIgxs73?JKsPZNqm-bI>L2Q4o&}URwp3N#258p0H^9_(>XDn7c72ZR4*> zz6;bJG4E=UugTi_`npLYo%6i;tI>3DuuFcG@K5L(&^_%2uq;rfcB>#u(j>Di=P*fu z{_#oCSwcyt4TywU|A@6`=y+r(SJ6COUwc(N!oKm@^F4+=ve_{C^s+iP4N=ca5=oEi zqjfyQ;e4lqdjTv1bEnu@+k-?2{+%CGd}++S%o7$+R(mtdFI?gR93)*w&&gKLz1 z+S6JmnNmp1GtMn>lbk5~y#cSF>W=@|2{dWkM>XF1yl=2$`Ln#Sq zllnD-o2-+aPO<+I?U@^-yJq%_Xdf8|ZnFXVSgP<-ru{?n(D~;Vzt}JS{l9<4h6T?& z?gOWShpznk*Sp2P`*+{q&+GTRz4L9^&wI|EV$b68mK+$Kuqjaxx~e+Wm&MQldd^pn+)wde?8Lg6Qbi?l0` zO>vdZt#kU=C=)N7tqT$%eCl)cuCp2F5X5{F7g>0-z5MEN`R&@LP`iTCDXWk|)*G6+#(IGd?8h)?e+z~E(${zi%TO8~oY#ux& z#%A>KQ2P>@xFqDZYjWD+h;nnCAd_%;x=XT)+c{5vME^P$Im)0UYwnnGL9(A`GIJXH zfWSchtq?isMmbM1vpnwy$@xCtrV|V_Bt9F1-@)@p7|-&b^K?g9K_7n*q_M>++LY9U z_kH^8fZ^609YeQ$+LwiltT1OqLx-{H5*pe_0F&0ytftGDzRonf>)RT{P^4*`{|0;< z@o7}OEA`zxuQD$5dx+jF{>hkCsIybN)1qU*dZqpo97*#w{0OAhHlY%>4NxG$JtzKg zqXIBdvHvq4*kqct?_(CIxwDxCFCF`$y=8D-Gx&D|4Fks$G!0pE0(=$(mNU=_E)#Sm zVVx9|ZwNXlFxC?8Nv3I1I=>m$Jhx7oT6+?q+RFacSC<-JYA^+;K|1|hIc5z4XVyg^ zE7Dj`Zma+rHR;!aLaqz;_#JPo^7;;J-kGkuOFfh=Ek}tncAe*NoVzK6mI1~@oHp|I5_^cFVxO08N1Vv)96V$la=mD}uRuF?2~A=(y3{UC z#8Effh&GQGSJG@M3XUM!K$ zcD4gjg^w@D2LY)bKCUJexFwl_Twi^)jRWC-W)Q(1e2wj)S#xmEHj z2##r5X-E9ipVo?RzFCh(y%6yhHwHMYA9U$pE6FzZ7FnDnncr{Do`1qu`tNG4YChqN z=45gG;3Od6)dhG;CrRC$kq)TF66TC*Zd@~Y^l1j9rj((&?TWJqpDYJl)D=*EidtTu zY@^&k#ayTE-#;n-woc&W>XWjJ=UIsLw$g40c5ad(vRJW zDm=KE42QHJOlkpTN4Cyke%z2HQ%R*9zyUHJBc!<5)U3in)26)y8?LpqA@oQjW{s*^ zwHkxs^WU5lpZ~&pI86_s0Kg8A@01e>-Kcx=UWN6ZeK6l_gxQ=|0pG?5cTH&d?Qd}A zzqL}l+QDC$dCv*4QD@VfwQk}!HF7Hvf7n}`0{*>4Q?3CpsAfrBAjrHVIM>Zfw%fj!fO|Y%VJaR+A=MZ>@w?oC`-#K zHfZxDK-(sQ-T;%H0MBqRT~Qmz{s=F;7j#A%c*j0o?E{4-1w-=EZR4@OrQ9grPW5vQ-X7&Ty<{qyAN=?53sbi$EmIR zhX5D!WgeNGF(OPZ&VJewF-w={e%Qp`XS!5N-s=8eZv+D5OG9x1+BbSJbj68af_BOx|<01Yf zA3uH?%hSzGWp1{svyJbYZ@vjHJKhP&JVmGUbLH9xmvWL|VkC|Sjo8D?=m)fRzJITe zZ@vf35f+{^+ISD;OnKE}XG7d+8wyWQY4~ zMed{MTBBcK+{Ic?C1tF`Nzo>>LM{w`GG>1CMAKlrBkJAdap24jW(i*vqnXW0DFAN?AKq|42J`)?cvbQ5kU@Ddc*MM!Yl zih_jU778yFE)g)=3GgzL+9;|V==eG0sGK@rg1WbZ5H;Rp+$_EYGoN)l#e&93vguR^ zB#r3}T(ufA50CtICYebs6JWP1m`2MW4~}tWlT_`vtOeG^frg_ni*-YXW8;@sd(EdG zZ#G-&``NZ+eoJeVO(6yNL?U03L>a~e2)xS5uR;)8HNopAKc;-k2mm-FuR)3b%+J2|@>G?12fY_>PzGL37Mj-@~0KbIo6#l}6S-lriH{zu@)g z;db-gw@<=%tApDpkf=uyxgsfwl}bg9^sgXPn9uMa#EgQ2Y*cJTVN8Bfz;fy%$ZO@b z3ddqe@w8*;+hgl!glhOejIUVa3rlEzU_xH}wIPHrRRY5cSn)(R!ymW$x>fr~?TwDZ z^$TeGccB15={zjLp1*vBH=zmmE1Ql- z$M79WDYmiQy$#RNb-I;JMR_b{qUb1PE8EDdof53$^kX4neMw8A&8s|k`V`A9w#ZqV z9!QbHbWnEK^(BOc&@%m0`|gx*so$DDG!yn*BY)#X_C*)Mfw2TdQP*X_ZRX7a<8=HQp~88hq;;W;hF zE(+o0i$0!tZ+(k9MGYk5~6uB;3NfUeRWt36Yvg3+rV)Dm0X5TFRqRirL z+HxU^!%j)Vvq~tpvuoLD{P^x9EsaFipMhK3PTBcZdSmelOkr^!ih_H8KpmpsIzGPF ztPQdF!m>-t@#>$A=%<3PO)Ct?n}mAhn{S+3$fu4^S#;sM%6QX2q;`v?bSv?Nk$cAv zlydT9<_CFS7`otIl(KG9e`$r6xcIZ7Ld)EXNDbhYci@r=_SyRv1sk_&XG5na>DO1|oIU_;v#Tp9^i9VZ916T5V> zn}}~A1u}u8^RaVwg~IFG8pOT&^m<#%@T&0s;Ddwa@BjU8p@+T{UIg{H`S|1g=AZnN zU&Wu7rw`xw^Li1@ZoXec0fjkNT~$aJrU;51j#00t?d7`@GA+H@38Qm_F2adh6b{&m z;Av2wefkNLKqiVzHiy#@+b14l)ubcbQGQM=v_8sYmpuDQ8^Ck%u*;aK0x3AqrP-|+ zFaOFzEN(J0d2W|yCT@{o;mG+6T8$O<@$}e+qlK9`c(UO#E76|*Xpx;hA2EJU+1I2+ zgxVvw?;WGS>UD?wo`BnKEE=SbIL$+gOK??~Za*aspj3iLTa}4kRqiQtIA3r}VOIgp zN$dj1Ic#qh%3AYHpD*6bCH?GQz(rW^Q{D+m88S@Td;@r$>_>R#`zj|zj%GS9zG9#Z zDkimjP(Z4}ug`8((juq|HBQat1yAQRUy)TI^{Rr9bhR=<>7q3jBAXfT_H-U0t$+Lx zrQpGq8R~x%N6rJ7z^%K{XGR7+Cw-DxcIph;%DSGYcr@EVkaR1K&t#CyxTU`J2NvHs zY^QRE$Aj;-Il*oZe>7{&27Y~_MA0COyx-JuT)r96es$O^FR556|CGI$FI!IjK@kda z5+p++Z6_;wD(+S6A#7=(87bEP1jGtmhWk-DY@zb=L=+u8QK` z_N(8js;HJoL6Jli)e3d7xk_jIE3Nm2yTd3|TzoCN4O%`h5``>q>a{|9_G)w$T_1h) zG0x%d(MQ8!bAky87$~3YN4?idG>bEg74|EJQYR%xt%E#FLxsl}UeIUMCjKPd=J=HD ze8Sf9>(;HiEFAC0$CvN4xE*8hTP&RlhgXjFyY0X&`lX>mhviXo=l%|k>%V0CqT6LT zz+-@)FY=$d5TbPw{`sb`*~)|iUY5UXHt)}xyR%Q4FaHMKkxb7}+jr$tK9|Q!{5AGT z&55si1+7b$=&MxhA2WVuT&KOdU?MH!!hn=h7GR1Rnl?>Z8V>0Sf6FV%87j9_I67aC zQjB*LllITt!OoC);gvd@ytKm0T%Y0t z=|BJHk5XY9oR-(}bdXRBJDq+)od1)-;(#q6{~yf!breuWFs{FXXp(U6Dun44YodI+ z>fiCWY++o4FO0L#7s0LZ6i!}R(Y26?TiK%USnQn^zF+p%g6eQ*&N@jcpa2| z{bi)hE7H+Ma>N{4$MI7v?XI=Oh=!!^u$~rnX=sPx57mO;oDlStKjYUw8l2(-}}7>oXm(Hh!-lm3iJ*W0{waY@gIND{K=pE_}x5# z_$q|FiF<7deE#FlBb+MK>7U||m1$RMt5ByO6UP=O!3vV`=Y`;}e-5|DJbb#>{6ByC zRr6@)xVg>BxfT?j{Pd756^|d{uzTlIwr~ND#iWhfI|wb0Q8Kv7ig1Z?X}Yw_kf+}) z6fjJZX2-WtJejAtGU0SmmqGT$17j?YP*!PZWm}qH;V{IyVSU52Ok7#ia-zSEg=~it z$kc!^^-n%-r5xhtMMuY4`g9?JlU=uijAw^As$J_bw?H`|7q_F<5<8Xpm~2sWlNiJRrVj>rJUmYdW^Jx(BN@QF3zgf#L$)-u*@X(GB1Qbx#Yj?z;3 zOv}7=EHYNHTADH+pNUuMVv!<6h_ei_E+JtbVR;#!G?qst**@Er#>XRIT3$*;Ca_HW zxGDUuY;rREO265MPDA)e>oGisH5K;q08kwOf^0(#`+zZao1x#JD+P}xm>DD73$S}{kqoAe1p<7kVL}3v6C0E+Xq}hj;RSMa+4{>ak8=tB7};CvIO#Rx-bSQCaZEoitT?u}bP&sX55wFdFrATh&rPeM=h%4Bb7J4GoA0XF7w`WDsy_|H^NF*^k*t`QSdF%s9gYowdgOf zmfPBCw${FFCQopi$00YdU9ymQL{09)cp2+`h($e^Ub$X z!GlAvwlf9&SIWVk(wfwSKQGvimHyKdK{kvk(?Ms;MtSX)k7xsqi%}*4UnO9Tr170} zJeJ{s<7J7qpi)deDPtB2FYl?7dNQW8Nw5yQ0q$IE=3RP!tIxq|Ay`Ham(ZH99BM*b z+N^EO1ut8Oix&nj-c<-*OI+}Uxn}YZaEgM=yr?hQnXUtz%lO`Y+x8Y1@L7V{q^FRK_ z2iZb6{|HhYj`jWY)1R@r{dalIMC;-Ih359pN%NO5?FfgzyD&M3p)|0KN!+JT`^^_$ z+-A~3qr`&dx(P(#%D|CHR^Q+Zw#L@5xORoOxNi#7^rZ&0u*_0ij(}QipsX5xF!lW$=)IgI8eXM ze!S|W@9YHoEvkZ7f~v}KPi6eWKm3OAiA^AxJzNg7A@M|k5mr!CI6K|jGAAS6v9v$G@1fg_uvsfqQbZD2NnS&ng zZZuCGAsUfh;YXp&DXU|hKsz)=ycT^w)0}Ur*B8XK!LLq1A*wa;z5>Ua2F5w_{JYRCz!U zLNqogu<(Alx>E`I|!!-Rz`Ry&gKz19 z&Gbi~efAmj8F9aoG%CT4;_Hm#K(N*_lpOJ&+uv{Q+(ywidRPv1!HQ0wm;R3E(^TkK z=7!T$dt0l;k{0W+4hu=E{qLF|e1rmgd$W1Mt{ggHA{GkG`{*Ptv=hwv!7)dk_SXh0 zO1i*vE?mX3HVR-DX&AF<8-BMO{?hk*gK|(`g0bSF5A)>RK7EyaWc*u#k9?moNPcc< zZ&Dslf-h=-=+z%yjh|1Xb^GfCT6i+50OX0ZEAY<<-NMO*_)(IZIBBy{Jg|?cAID!Q z;KFef)bNbT&YEZVGa1_!O1%izIYseF$JlJUq#V8QikC%Vf=mFCa?+`iotXle1XzOK;NI*a9^{-* zqD*$};d06oH*P(QPa*NSFgTDFfx$k;6<>Nw-ud1}mMJ##IE1HtgePQCw`PsUJ&uvT z)vTff8FE6STR?1-`U9GxL}GpodQRk}MkoR#VTqS25D?_yRFCXKq9TL5pJe&M-ATwqy?qo9SG2V@Qdm(i@DwUjS)MXJP-=E2%pq~3pmBiE zIKa8Gb(-?bBb?;5c&G59;Ang&)8Z(<#E%$tviNV20JZoE726N-ceQsAMcJ)e_nP&! zyTN-gpBLYYgi1r|5+7}2tUWw^4ZQ|6V;vB{ttGlo3F-qZVyp-FzP_^2eEiX82(R!Z zr%(bcze@Wg@{f`YDOFmSr+(?QazL*b_8+0_%631Vtpn*EnzlsH!bHXB+WH29q$g=k zB2ad5)Ql`zP@QM?=HEzJu$Cmb_~h+kIQx9^t^Fd;rHS!mrNJpNa6y^ z`tq~FYv#`!j&O_*rHOQ=t);H8&d{1kze}Y%GlhabmL8nisJ&r>ucNL97Z_oUzK^;# z#$vc=aZX&$^3Ca#1>d?YQkaYQFXr*G_gceY0<<-R?*&}#)9XUN3pDrc-3M3Os*$j2QprPxKG&_6 zD{Nyf{=SM3FW+>#Ta0`5El`WNma7EEAhPC$M-fc zc5uWZyQUWY`vUDw)Xwtzq?+{sq( zkYsOIu=ZXe97CtkK9eRb#he|yPl4ig*-p5Evb5Cm)e3(E?~S%sVr7+9dbQ*wHf^}e z1h~t#gwnHI0tjqqy=j%F;_GF3{1Q3dM0rCfVCVlfhPM^(S50fHuh*M3Kl|AePF;ME zvF$o}2uJ^4|LmXr604167JxQ^`*!lib;^H}=H)4H59LE9=Zq&#GHs)79dIJ9Of-9og~Q@CWZh$WO;A(ruN;x@uqPjEI}7w7g0IHS=X zCx~HnfielL46r#F64pJ6Q(w0rITq@3MV}kYP56fFip9)fQAS;&=(Q!d!X4mT3_dE< z_?V;a0q*upr@F*b;8$T>K|vv+)Th|EBAuF{JmP8z zojYsSfpUNtv0iE?`4xx7>%6R#!FpA>;8v!66ips&ALW2$SMf!b1%Dxz{QavCwzs>> z7T3qk)7@=O5Zh@!`d}8{Jqn+f4-gDXy<>!ImZ0HPH|GV3P8S)9YSBqZ#lK<8nI1`$M~PQ4@|AO z_ICGe5>bSyTDv*)50qJY``_nSPe7E*lqt#si>fdyAIo%dzIc=;bq2f^AoPGAa`1$_q;$-k{6P}l@!o2)dgN!kL6Sw8m z=HA^q_MW8)=AuJ)6iKU4l+Mjgn z(68Gr(pToDw!Z?$a<-rYUq$2)MUnl7ywj!4X>sO>hbp4(-KSmRb4z9VhDu_Tbmmk2 zFM7CuJR`o;7JVN$(2v*ceP&Yo!opg$PjZJ>En#`*v-sH;y2Vi|RqI07RpFJkPyZxI zI#?!ctqq$`@9c33ceJF=i5OtyX zz$;^GhZ7R{lWZ$5rrsn3;YFww8kV+kUV4>LDzfCA8Fc7m<;V)8Ud_X2g1J?Nm+jt# zoP-vEC}zu>_H|KiS)TC7jD@ZAN2Y}@csTM$-sgo{#>@?fARLi!OtZe_3xlxAm`dFV z54wukD^qwSmrMMyAew1vA{Uc_ zH%y}91gp9~6`%e3E%^;N!>KgL}Am4twDfad?A*rheQRyQh& z*4EuEP~xq)QcoY1Eo!_BJm$*7gJH8XeG2|}qIh!xtbnJVuQ? zDL1(&(DkrT%C<))_WFl$vxazH*ry93P@o5jF0I|1cDwCUA=IrGaZ-#zEdm>JimZHt zLRQS7MFi6TA+9C7LSH;UQKa%wK}s6vFRMfP_&yS^c^{_8U1~3MmjuvSMW5GY9FDE8 zdn%W!`u++%MXS`};#JY71_3F&C{+7*)a>j%X8v=+pT#LY@fLzSV7|h3r)7`rPvQ_h zhZj;I(h@$4(xj%PY{B1@?VQZmJmjZkz<7KZ9#haAW3i%gG=eB-svr>2B_`DeSoA4K z-NVP-DuTU!mP$R>%--y4ZU2=Xsem&mpXOUBRq7#sL%94&szpo1Gho_OXWy1*^U!L} zHhQ8bMWF;jS|H6huwsNiwlQ6re%UmDu$>-dUcG{EHRtHWrms?AsV}|=uOP{7Ged#e zJS==EBF0@_#tAxBQ`VnBTG$TU4rhNMJ6sgf|M1axD!s)2wFJQ0)^|BTWZ;%A|AFsR ze~gq z`B0#=btnwemGXM0Ol{k?-yGnB^6nOR9X)1855BvwZ0@oEpfYKu;w$l?7s{@uPqAh_ z*lo7hl6mjWoo2u|W8csm;1`N(xA$H1!AAr7uR-&42mi}UTASIo*>~2z+$;pk9R(zV zthx`!X7Tm(NgeciDy2N?;*bvDwJ2R);lxAmbE~x$bg_N{P86_~6T+0mgA=+)!?ViW zpi!9M6h>ky-i7XtH_Q845lp*YcL<^3@nXuwKos(4(i)6O71~&m3X^E^*zU zb(jCh!&F*#9qH#RuwL_@n8gKAnX^T|Eo{yD%~Wtz>~Nik@0C@y&ncLkG{lqdSGu!P zqBH9bN@skfvk~QzI`VM)0%2Zs3kyoS$vzXP7sY+fchg>n7o5Cj z8|rgl-+X$vC{QLLGq!#2y)^}^6O*#K{nnuU+7MMq_1R}T%};*vw2b91nbUy&|K~q{ z-2CNVK5G8!fBl$2ybk>@3HT=YJ5PZbLc1%xy{;B6PVL%ZP{j@tYbV#0oAi<7#Lw+H zwrMAh3Gkar9He~snZ+}NA}9DuT~2sI=yEb?TrI{X6PzUsxF?I%dND zZBBP|S~-VXptpaWOp}M82*!#lET5GXm_4h%@EvYln3y79(#}fI8j2KbU(+hepi|yk zn@nE0x718!Q!cmr<%FvvET9l-Jgf-U`v?mt?v8MT9VJtn86f+Vcg7_6> zUZqwkZx=sjai-lxNr=@+4Vkbr`#(bRj^f60wPjV%@m>FzBkwUNSid-9PJ~}YlWpy?_360T6~UK{U&?xhlW92>S-f!60)5m)Hr$PodSgdsucIo0u^f; zY=K!vpxRk_8fzxwjS#?&5EPfT1{><2BCglxU zoeViSZ47^`;NPu}Mat4K+W;wt0w@0o1!J+FaQfNsP^ZzrKv5biF9oKBAWh{NooyeZ zaP6>Fbrr#HgDp}sj+WP-khP-2d+Bqe#SdDsN*zkeJ@i%Zf;LO~eoB36wE(Uh!zNxH z9$cEYo;$*Ngv#Tc90#A% z4nP||6L;GW3NRH<;2c3;9PP{eIrXQWjyZq;#!8u#FKrXWd9^+TOP}Y-n{LNu?<4Vx zU@mW*RT3-;;uf*u?|bhJ=282Jyx`BAKJ~>5uEMZC*1wf~UV#l)(${tSW^+{~ zlYOAOSj-#zwOUQPQg*SAgF&g|DAK8{DTgfV?H^!`H6*^GKIx#HSF$Yhr>(^5bOA@@ zZw)7sj}hCln%tijmh*D^&B*pk-0OGEe%X8=M8=CLecQ?ahwyS$Xb1f%=Y&2@l&cb7 zn1-5N9@;KoZ-pa5^K-_zT|jA@spL47rXbdre=fP!=fG`n%0XAFeb{aAJQnU7?q8 zd_jRupJ<;`LtxVXO!#Rf4L7InQhydD5_Kl6yZGT++q@MYXwI`$dSy&kVFSHN5JBpX zf)#3=T>Tyzl3|ZNbupw^yr*hQI_|Z`%9cP7ImGa2A6}6+1}fRWWxxVb-@fvQIe<9Q z235j3{HkWA^_4!s0t%R@&Rgw|9T(vU`c`NmfAHk?*ST)TiZM6JUHF+N-qnSe^asFQ zY+FKcS}X-i`=zdF^Y(8p;>d43ItLd-#*9-_`t*_wv;epk(awL$OS&3ScwNCl+2QH~ z!N#qr-bq1&*6Ygp6kO}(Wr7v9u`ywVYs?C#=9(`9t)#Vs@2IS~X0S|AoH$E-4}<=@ z2Kd5Zwg@4vMg!SFr}5(`FJFQ=sKYM4Os*P^sI$qT>svSBeWQS#c^%~6^s?JmpIznj zosLEoru3&%`|~%wEH8_nmZ19H`pHjrSj~Jn>#jvy^au7wfAn?p%{Lp(*I%zk&|Um~ zbALmD^C-~W`+&CR33T`xTEp`2gY{Zt|g@=|}3Tin4ivGZ@CGkBn&(J9}!J)BcUca^1 zeBxW3#Ag)s+3&zC=^WeJ1nsSs3WuEVr!|4oY~C%@z5|96RiVxX=;fpssXt00LtJ-90UnnDGU!$ zG9i$;VxMx7jn->IAUlL~gajs;eS{E!y27q9EW*9OrBAK}m*ijHl`rX+?Yp%k(5j$w zj4*`)OTjdj12}bdyW)_ore;#Gx4h+N|L)eLE@e)iuj6;p&JL8=pjs42q=J8T5hlNV z@Kv*hfPVMxeH4;EXi)_DXiR`fxjHDX4oBPY&VH4ZEP&b1gLx?+tN56vOob*}o41<} z?tjKcn-zwD=gF4xsoeBl+RGma zxrm!Wy$T6w8Q^nV-^?4^#4Oe-JJgH!^LF2&AKFkZxG;5VIpoDn6q@&M-HkFd?IQ8z z4FYxXGNo6EI*TtSl-2SMbQZU{P8N_>8V%7>^ely5} zb-P~_z{*zqR@di*a+cWHmK*$})acZ-~O zyOn&ClS0$Zct2Na7qc3Wur7*%SN>EPphiMmM<@_oRE=d4F{O~S?co4E`Vfj<^ExRF zFGI>X_n&<7eVfMqRi&drtJGNWqO5e$#QsTq6P<5bvuYh`;wcBL*uU9lyWlx{IsgDb z07*naR8`djM^57)Od6^gT`G_ZQ7x~E53j;2H7X3=qfGD9=KD({_-LmY4_TzNx1e8f zYoLz&xA%90lVhd+ptf$^YHr`X%MM%0LIyn)K%T_dth+OVfrhP}#~gTZ8ztHYcgLKv z{g^!2hROnk+x+Tjvr*Xa9OV*yxnqUns|qA2gW=2?hrf7oEd4P`d>!Z4dA4=+tc+0T zk}5hwAIqNdmkLbTAx4QseN367>laIR2W}BoA5*Q%9kaYhx%CP3BxV2CA!vX`lu{Lg@1$W}CjDZZC2;5?1PkM`^=RYM=h_r!yDyXX zA|5yQHxzhX6v(O*=GFGy2K#asE*!Alr{MO2-}`AM^bsr-$l*DECOI|^zx)~6N|KV*tkwio{RN1rzz{_vCL5sIn*{lyo}*I#{w zVqug?xA8LRcJL!qpGUFep{)4&T;b! z3#LsT!V^vlOJU7ZlZ5Ns&yQ}AF_!r#x|I|p;9IsFBKR)jN5$wR4HIsKDJHTrR`w5A zm3BLzN{E<-(G|Jn%&i~$`@GlmgS~pnTP{c`Xp|La^0;D%XbD`)XnD29Na4yXJa{Si zm2#W_w@l*OL0~#!%hnjnB;!n&d#F5*Ft4(avg z`pSXRmdp1*DFH<-J-U9q#oHl!si3TNQl*H;ku$Wu&!g`fCb>xd*)ssmJXSnnegWH{sjcktt% z`OP%7Ac2CW^`eBgX!VjUbl_2Z>Y(gIi1)-heTG>puOn1Cc(qLat`iirH8R-S*~@k* z;bp6`aIGurmHQFO2ZMDce-GfXY!9V)$(!u8`tk%+BJofu{9>%6koh`Kh@zHy(07!| z!}5+8{9ooo)hHB&Ek6jFKk{D(fmT5pq`@Zu{mN$^e9g=7mvCDPP(P^pSVlp+#WuSs z2TW+~bO=34;H$+@k)ejO4Yj@0tFP-EXKp3e#(kN(5rlN%KbYE2QMNX(Qs34)eV=@+6Vx+OR4Y&YIUcw$UZ`=NZXK>lOs-2RI#$dm@&q3s2n0u z%IVfLgYfw4pL4^h;v0<@#h28)BS{pyKceif^nn87%dGWm)za zCzr8!ov=WpBFF_3PnvWQC+$xxK+3)sO@4DkpEaY;U7{ad={;_Kc%KrFmhnN3l^%X9 zRkElPO8~dU+KMka#~{Xx6_m;!ee_WljWcf0=Bx`t+Nt$Xc_$0NvB=fjP-U_ z<`s@SY~M`E=ayq$M)5vHtzn$(jZzcY>*uzza)kEN)v&a->jP?2s_b|f zg@d znOo(5|M%G%MxXHS{@st6jCu0T`_-qPN0*y-7gJ#H844Ym?D|(A^)9|ly;p7slpKNoa_z}7@uM}=3LHS zRdP+4OjiY3U{qLK6^@F3r2^9lehmt(U4&HeBU2PXZ0%vPs<5}Z%=Wvj4f-9AvNwh4 zoEDch;Z+MLPiE|}S5UYLCR#74D4FiCQIJXZF~XIlQ>ck>U^>BFF=WzA)=AsaNBj*c z4@66Pg=Pz|#gpkc!b9mli+}?Cu?+KD0>qCJ27xIPddihB%#Jb#nBmdHnRtC`^$`0_QBvOyxWI_sD}hm}B*O zD+r=@__B%P|J6b{@>Sijq#rsIit|7cbJvHE3WW$rywk$P57NZ(OrCMUJ%=8snBdAwK7G;5_e-^jD!Eouz*;Y;l#JtgRAG=OXUbiZZnAV=?$Xfx4_^3BLh1 z&b^}~Mp%cg7VZS{$=xEndM}OOlH_M9@jaV&jeFIru;aUFjPis$FjWbN%(3Pq06*mm z(Imco+Sy=K>&tKUZ-*lkG4>4+`c*nONl7CCjP#)lvDgtPX$1h?l1X`$?0okO9}w3l zvBsIpn#3cnzP)TC3E*n*eGUdz*pH0PT$!=NM**Ni8fx_XA%|+%I%rI z6(?a!o1-K8&{ipDuGX=ZQ@{K3-)63M5C4+eDDoaQE8Pdp1jTP}vPCC=BYG^V=*?0_ zErs^>=sUn$3#YUVU{zl7eNfT@YmIS~G4^{VD5-s1c7 z?oZ;j40tw@u%r(4UE&JgUuh4;l1sx84=`x0L_-@aqWR~-iVpWxnpqE)>w8mp)w0Z) zslIrvR|$Etss^d9otvxfZ_3KMrBb_UdyhHbwM+H(CA93zII7>mmre((y9^9hD25G2 z%dRob{^tYc71A$8b)Wc{(-zNax)mh^dvjhLDKwl^LfB1*rFi3Zr|M(w&6s6Zq z|L`g(`)VrvC z9mo9%)9-FQnPfYgCjm0)#Y)MQZ~L+oy~WMBXar53FA``?;}*^_zPmgzjy)~G(}gQ1 z)dqzFg*YuNiie3ZAs2R08mahWQjS2>$D*YNJ*05bw}M4VL0-WFI&>+U^&xD)h#=|Q zVveANYu zx;R~QxxbF}68?iwurBvjsRNW|%)??u#x>jM7Elfl{>YW9SP`zNY{OrV^mv^?T2`f3 zwv=M2Q^R6i)pyUkCcZj8+c+dbwaj)d_*8zP4!vK-r<~N2cRMOrD^1S<9Ba@p;+*pH zSjnsMZu{1DGjCI%;M`{wOFjFKD9HKFfAS}uY2fb~tP1%o-w;;qyF6&2v{k;A|FlCb ziArB0EQ9UbljT$nt8`GX_nj6{beQCs#$*P>KFKq%c@<%rwlYPDdN?|W#i{L0r~8ZH zs`X)76|myzmRh%W+LW@crFo>^t()+0p-tiQd@A38VBxb(EQ&JlMA7x+Db`#RZ>5hi zhdsQbA^jdPm+`HoZ>csh=}gNn3k&Ui$-2bxF9}n3T=U6i>LxrBero$r;pAUQq~&XT zB9=;>!xIm!3r8qq%89Drm&FbEfVSnhkTT6v)EjI6bVwarW$nVqSiS%caXVyjqfec! zt=_?B*ZrouvD<9YXFvRwMIg5#Qisb}SM_m3?;_3y-Gvs*j+NH$Ds4@kkuNN-S3@kL zY#(dHnW4}>!UwNLCNpHvmF?!YKVQbes|T;F;p~2+EH2T1QRMKy4&9egm>=#R=G*4& z+YHDk!Yl?Qv@4`1;)y&`K<}in`Bw@!x|RF+cm)6GdvJ(?%kjyriyj2%f>!F!)ZC{( zr(7;t8Dtw5ShR3G#cA?^I@vZl! zE90&GLRtJNu>30RwJw|rlVAFd3ZMLs9P5GUdP)>v#HDz1oSd6D24<|<$EwXmF4{bL zKjMiGxRWm|{O$&McEa|t2M<=+FU=m^Er{|=MO?1@mG+m5{i0Y@JpQ}fmM0V3gey=0 za`o4z&B`^lGn$JELLL2&51729v%Nyzi&33u0ZU(CUIWYa`+}ekk1?lw+=Taw0uB@o z2G?G>eZR={#*|i}Zgq9k{O<2!9*-YK2ieyZwzh`NKm3Q^L!{XoGLB9D#~{Am|%o1+fg zytvX`Z8leKbIRJ+;A}loU$&DIpmRvZU&Sh9rFV<1qqZpLZKk=V5E01d|FP`Whug!d zs~6u@rO{bhoezb#ArGaoKKVD3SL?f0`DNOoSZT`vO;w1M`t<9wAv{exs7isw7{Lvm zsoaonJ|sxlAdoGMOEMw28Ji0NIAv6loyRY!j_tGfSN;+a_z~Jkq+;2i5;@z0mEwJt zZ@I0?ZQmttUfVaw6Xsb$seAA3zYY#@^sZ6{>r(4r1*Q4{CnV1BKNT3n3w*AZt_y+B z1EoRv?umTIwEy|;9@;?GA)!f9>pL|o=dQdYhsZQ?8m|%*$v?~X0t(&~( zPjcW-6zH_&PRGRxEbi==eI_rV#v-i5PExBf<4Fa?HFy!%1(I5OsS7&20nX^xZn-5K zDKAL80zulPIGd);jiWQgRv&z1_^B@A0fhxIT6qG(M&;!rW;vEhWuVJ6< z8oyO0Q`c8`jc|VWGgFENBOfq`#9s5{nkymp}-;q?yla= zb`t#(MJQu(-CsfFu#D9P4C7?mxgMfko#~=*K^Wk@6HS#bUegcZ+ z5{tA7gijygU>Ie@z0Nv{9ju3%Ap*w%eu?lI2AUBBEE|*N8b%1c6K3vF;w_a^*v2UF z4u?g6IVC*SL@&5z;tfu@f_G0Rxm@cMkS{=J%bO8h(XBksK~o7;z>!|q_#$|4N}}%%xi}h4g$(sz0_kR5%mesIYZVw#uo;U!tB?m}q{6S(ZU~p=WZfR(%j;m3EsEUx z3kJgBXMN{g`9yu8h(xj4K~U6Bn%hK@prFHTnTwSZ#eAutQQ&YfGDAdUv>wq)zMJ~rVA>aOyX z3I~}2s;2(tWfqJ@lVtoepJ(!DaVyj;)nBk(z@d%fn^1f4naC6B{&2WM`&RJ;zgj;s z0rXe+R>4_=u;qc|)Miv3i9;V1KT|?M@ewEJ#FS zfwmc1=J=U9VB76BJON&Z_wKX(tejA~6wPrPc_Gc>n`daY@UhU;uG58vQE;`b+7~a< z%4!Icw&;a~71cZ#p-oen6_^vOl+gdvURXFl*det{KmR)205Q=h)~bDJbrX{92> zaqW})A2y%;@B`W_rNl}7bogDf{RHbStbE+M*zGgstZ30_yFv+1JsjYy-h~6LDTU+q ze7E@x>;oyZeUO%YTBPZpaJkR;hlN-NUT$=FKVysI^sDCX27b(bu-bh2>m}k^50;dG zoWF!Z)Bae$YL2-EX<>xa1vHa){HaWeMHxIi;x{pG+D5istKhPo)N$5I5e`3k`M?c* zd`}Rb?L-BhR#VB-Y}b~VK*V0U_+l26?kA!zz z;bp}vVcpel{^oDCn&0`IZJ4T5Rw@4F|LRxe(IbvCJ>8x!4`3l?DzVn&!uw0#l2!%$ z1ZB&6X9YkfqQn2FrtM&_7>-#T9${fsn&1@^pNTGv*|48_=nj|&&YS0%i1SAjWSGOw zusVEU)Onhlo41()4m?g`b^dP0Y-j!Kpzt=A@ZHAULHf>}1N=fTxmg^QUsJK2jD7gw zUh~iY`7iM=vE2Nx|HY*H#-G<~qW0ShmT5VY0Jd(8P!ciW?RE~M-0AJ@XA*C};~3-O zkaHFVLbt<}tq87&mhDV3b>K3Kmi!xmwV&*6TiTCQPo#` zS=4gdM{reG0!IOy?Fu41xGFF3c8=&f2*x`m-r&4~ljP$}pX3cbDNVp> z5rUw|HpFA-mhGx+b4-TwbS2;UBR93KW9dV=glndXhPDG0k229dl$kuLBExs-cP&HOPuq`@;E1wh z>n7jq45svx2Cblg95JaSpTsy7d@rv=`PkOQ*`kG4fF=Jg(U&bM59YvvIJjZC)zx~F zb`yGAFz_unLwEWl^{%Bt=c%uCBkpD^o$U9vHn0r7dz(=a1<~Q7W^epUEWCD8*Y=5S zS=7Ioy^Gu54&lcv>d>w{gk_bJ$5jwzehw{Nc~(m*pO^03Wqb8TvvgMl_zuc0 z>QW1<@i)z_^{wXMllA7yUy{f1CWfxmF9=#@^N+|l(zo}ew#xh3R>}#EM4Y>Nyi_NM=f%5ucSC_Ug#vYOTExczsZ7ik-@beIw(XUbY2%o&xv8bZ z_ftj{T%Uiw-Tc#k`b!pBR$`rev$gU2q5a#E%i*3rycEcbwNHn^rHenWE)#8qw5pg< z;pKKgC;M6HCXK$BbY|{ip!eC1#!BcRimXgDVSL-YZQN&Fzn94{<5w#L_E8$FAtcfTq!-t92agyo}7}<6}+Kjt(62&#cCiysYnr8$sr0=~QSKRlTAAG*Y zQSn-7A@sAo%%tLdj3R=0Z3(jHVD=!o}6%tLQkUomQsxVZ9o)R}0&Xb*YBMu4#DymK?9a{Xf zBG4*EoXyQRKD$k8$`;1npe(v<;3G|e%X;+GGzDoAMR4>!LI*;EzHPiWUO6dCeo+~w z(5>$rv8*LNpC)PVQY@>UpHr>%b7R$yXR`D)*`zMSX^djZKB%^NI+!^)>rCF0S)w2jN&74- zk!bqk>d^mKzoyqpM?u}~Y`tZ9xuz2We1vt6ebbUh+7eRNxhsE!VV~*t*;wI_zgCoB z7g#C>UM8fxSOw{eEM)NyAjP>rJ_A3IN*wd`>m270asCo`lsEirJb9fl!M;A$bC;X2 z%xOry)KmG)w}yfaIaR#shf`NSS`Wb%d{OAwPpI6)>R6@EglBsAQ>m`|AQ{Lc6|C}F ztE?#&o0IV#7OcO*GW5HqhcXJaXcXM5oIa?2cS4`2B1`_-x^*k{{)EF=R6cFp@n8(M zeNUSqmd<;eVC%`Y2Di&Sc}hI=c`I0ot=t+kqbK-c=Afaa5oK+@Y}W1qbNEC2hM`um ze{cEFHr$1OOJB-(O5f@@8B3+f5SbLbfv;jptJWxY?3Yuo@FxAWhn3_sN#bOTgkN(a zCf7U@$9k044AJ=v`QvN4@^3&#D{>%T}{Oo6sL-fn!tD>vV9$x)_ zefsGh{=hhm;nlR+%VhujI48XI{CMxs*A6E%_E(uOmZ9T{C8NP%n$C9LwuM|V`9;xB z+S5fb(pk8YKHg^w2sqpbuosf*=EECAfjTg|DqaUmg)ZMaz`o1D^4-2&{CC-29jL#r z(8Isu@BGf==I{NzZ<>Gq?|(ak_d8DR?~AnWL_XWwIL}3haJ!k?v=nm6YBEEc6YO>Z z>@eBaQbmCd#g_s{7i)-2l9|Vp$+443C(A`a;5%2uonClc`6}?15dZ^&Nvc7C;p?xz zZXP{&fNv&zEs>`0wh?go^wSR_%*Y;bn!SD4&h zWqR0cz9rp+N$^j9chIb@+$=bPb2`Y*hkF`1p2Enn#Q`mghN=ik5C6614Y>coBZlgL;z@#?> zNL)iXQKu|XSZHO(KUbX(nBY(1W2$6dX=2_NwQpf4$Y>3+ySEoXtXO;$3yv&(w~58q zEm;bi(p&|Vh3%juSi{fTI@=OQqhTyV?88+?DkEsTaRd#`L;o|XIVVepIF29U?A@(3 zl;v;7nW9;e%*E9eV=W)Jp4kBBOIZBDB1u8l-HeEE&SZWI4uxwLf@_a=XoC zx{T6wiV_d24|JlWLWxG6a+`Iq7;Hxd{o!qG-XpJd;<(s-QrC;|U&npL$#!i0pNzFC zNmQ6toGAg;bfwi4Ol<%2@mgA)7kDjCCA3k}XHc)%{&-%R3*H3|wAMmKWmOFYNAY9l zGks31M)?>eE7wvc>(yR~df=V>XWhaSMR=wDVMr{Mv{<1oX**Vr<<(l%e#f6UXo;!g z^a2;>=qSubSd{fq_TIj;MLK0SdyHp8-h*gIp_%r&OuQpZcuH{!Kb%c#`D#5>TAqzl zbMn0|P>e4_#Yq~fT(REWt{jV8`ujLCx8UM*P7sV;c*;Q(JrWLRWVA&7 z@r?$oyOZ&zV z=od;l6tLJq{Y>Gh8U2Rih4eJYoBDo};-_n#C(@NjT36Es;kz&aaji~kJ}ns7)^qVY zNx67ioW@&T#uNFAa;u2b|CS306YB|UIo>3$`6T~}IM4Mq?i&2=`~i+T73@1i)%cXf zP_KF5&udk9mBzi4l>;lQb_zA~@9Rp|%?BSGG=KYVKjB#UJ&17moE1D2c63;*!t1~P zcNqCq1S^c+wJm+9*xs5Gz*m9HTLQzzsk48#G+rZE?-NdF)Q{G+>3$|(2QeM!UM86T zduNq*8N%Gec_S%ce_dCj>XuCVbOkBDm5JpWiTW!-%2n?fiydx5eMKOz0A_uCg2LgIVW?zC^g!7fP#_jG72px#Y~{H{imHsDU zVsL`rSNWx~2L=~}=@kaF1PasB&O1D-@UjokS|_J95?8^&U$Q1I{S$6(t~Q_l=(kDF z77rW-KY6^}{PK%mHxC{?WWtZ)g9)yo!`vqthcqnTwFSs~oYM}THuvw3n~y%(L>R+i zJfFKLtQ7WYzatD!+!BNW;0%QgGDow7Yq2HDwYrJIYrTiN72EM(~0Vj>=4{XXo38H)=7O8Y9Z%z;rDa)vGbO@|n zuDX4IX;pXyNgmC%{grVAOTGch5%5$Hi!e`76~mTLc6nFE9&QuNcX9qz;z@;U1rN=Q z!yEGL!29cLIg&rO5AgAWfVUVF+YAxC-qQgfD&LafWmW94(KTj3GqO25ey)@4z#0Gqb^%{O1gSJQ8O z_8CeK`?F`aCyLvtK>5o*s7U+KGU+sjoQi#^hk5-awslonj-)oU7w(`)+n?owe|!af z+`8A6JXhPT!Tvn5jO-~~fa)iWPM)tQxd3iVX`?cvhA28%09jpKho6eIP3co?{VxtB z_X1m7vUOUV_<6B6)iP2($jO+tA3qRk+UMp%T&>NrD3b01IMy*`*yv%UN;}TNE2qxx z?QS=FDCx(Xlz6v)(0q9NTTW0!nJ;hP1F*x`bwo$0@+I?G#-VkLa2zA;*Hjh_xEC*# zMBCd>iFb&FBx4WCG8aUw1ILN|gZ*Z=dD^VrTIbZq4aOXlOZzM!&?b-fo-~77tIcO0 ztuzn59Dx_R%Z^a;VBJ@(yHa*(Vmshkc|$(#q12L>jjKhO-}4^)q@xy?($F#5tI8ky z%(UA|Oy1cRRN6(k(Vx$ za_Ut&Af3?}PS~bs_fdwi_n{_k%J{xepiEdU*5Gk^S`3{pKJ4<6kyk zez|e8wekC}f;Tot6&bKcCc9PmSwwa|k>;hsVVx9vjlT~EL?$yT!<_VHY~|{+0)UE! z+xWGy<*TR~V%4dDVLac>;d$?*h_oyCYPyTF=k@iCGTCNAtI*TIsjx~kEsB`+l0q2H zt7^X@)Dj${`-7wA^B)hJzxB5`B@c(dZU=E{Tqd+!N#~RLhd$pF`V4|okW<;Sw>xaU z{_?x#n=i3wnykhOWE0`#{@uGNx5AFfJGY5^_uygk_`x>E)Z^2N3A>6e@zbirQv(B= z=j189xSOM%uh0GoMa#?{{6LKSobb;$-OF|F3ip|mmVtksc1Ds2FA6XUcy4R4ek#?T zNz02^$gp}Z4YFFVpaHT)A;8uV1UfB66gGq-yc`7|DVGfS?^ZKuaDZ@Uoe!~eu&!{o z-KpmycS;0nb%V`>MivWaBuJ0k04Ekj1S(h#D z;%YKs?4wBS5l3NEHu!)t5$_tlh>I zD>sxQQ~P2REK!E@-fj6qEL10ZD&Y5$zwyV&z)M=&cD9l6c8YEq>#q)qR!`n!i6FeB zFFEsGM&V^2=vG3vGm5Exy~g}z!!f714w_Z0r&m3FmORCAjP>IRCk^*WpMDCOEf1E7 zk7ALMFlW8G-K2msm1 z#Ey!Nk2TD!YoR;Dm{E&3}>Hf zyu~=)``)+}Uj6R#YgKsFjBG?Q?0Fqcjq^IL(yNbB=qEpU%JvXW{*lA$>^sl0k z-}|C)ygJ2iQoQXHxM)y$+spML{2eC-2;N%t-nw=9MUof6z3TTaTev^|_@MdhGq=-w zXv!-Xf3AweP5d{G0wd;A7@so@KsnOKdWXrkGgBwDP7LbgFw;}!U#9}!Gxcb%wsGfX zUaPW8A2yp=B2pI}aOWC%j-w~buvf``4!$~*_ ziZTZ?^_c)`tDIRwu=wzPuleW$R;=Zg`t*}?t(cmon%iVa3H~CJ|wgm`w3my zAMYGDU;Nc~&7+6+NaObF250#A;@jAbrJ7qr^gpH_mjPS-ZnJgm2?DZCeqG5Qu?5JJ z<(#+1Dh`;4TV%i-a*leac%~3M^17~6t5E5*b4CT9A(jJ?p`T3!o|ewU!f5NLji*vZ zIx0wnfFa`f`9m>S!MSdMXZB^`nL1=kq{5cMf9j4pi2{ZAt==F?!(O%-QqMWr(^Y#d zuT~Ow35z1>S?ckG*9zk26|4v!l_!I>-I%|?@w>_{gh{N93NP{vnipwHm_#-{36`lZ zLj*Cl87ka~OxdanA1f9TSCOj=??k!Cmc*zo#T~F)w{UPxyBSUPrB>jTERqQ@K#Yia zl8zR*Je9b=slUMk)F?-sxQpkRU=(@UGDIk)%^48W=P~8AC_i{(yp*^7TI*>Z`6@Ss ze2@-^6<6>nJ_Bh(Fk_S*z;x?x8@=Sf!%_AF`$!`HW5U`4R+M;9#y$R0)H(W z72rk5N;>#g*jk?KJbv1Y5A}Vtjxv6&SjvWy2{V?a^f@XY4Z)js zoB0gN{39Eu|?X)VK9wcVN5hQYM4%df;jv zEYBn#eL#+=--+}jntA%&5MJY@N@=THFvckg*TQ!lwi7!yupEB0jrPLN8bfg!C9vy) zgY?jf+D0cLnF!!^kztvs=5v`vG0F9~;3 z=o<>W90eSR97No@3Roi>g z(IAXhgBBbE6e7R+_1FBqLQoK7x7o%q`p)i7gbOF%Du9j=R!+1=ixV00ZWVVHt_rXU zyjpJ6>6>MVl7h*wlY5gG6c-{laiX6`n2%A2&2T7;1FKA&wK5tX4iPpY-o(0L1!u>@-@jkulj*g)yvIubs zV6FH`T}eGNj`okuUUTL zP|IW4cnpp$ZVE11ZzznZ__FX?!?`szLNsghrcwx9!7q>b`tJzs;|u{=tF^BCSyKew z0e6P7YcPO0l;ndf3YRHX6w8#4Jmj4t=uu&bia5>71gva%1g|$Hv=wfBoh-he)Nv^T zj>7xU6Pn7aZuO-CL$6wnT1KD!BCq7$_Rmd9%xgbc7w3fvBVVnLbK?mO93r$*D=Lhw zi?jjqN;m} z8-LQY?KWOPx8%2oyZ7>-TRxZA206ud({(~^6VJ%M$yj;;HFWoea?(ILv?(xitkE#ukySVpaRv~u+zPZu&`9cP~8HOJuv z1-EQN6h~_-x0{v0dirnSTR8DnVWD;E5d(sKj#fY_j8u^@Y-Nk$NJWutSj?zD{Vdwo zMafV9n|?}5FUo_iN=lKs{400@E9GG;JY`uy`Lu+;zaExd9S*1~+vrg&l4gzm;3#c~ zMNIgDkNEo2Cl-05e6f%2qKHw^WuB9<)2- z%MF2o6{}^dK^*6SxX1jW^a@g>pj@^Q<&^URHSlf+=`)$gpw}}81~oPg^5&6AZ{4qAwDb9*iYXvsacq(&os#j3EA@V{}5*FZkfx$VHZ~zFLnTuN$RTq0!A> ztq*ZB41+{D5D8AIXgyUXdbZE`&rb`_5x{uP1l#w1XHsjNsgqbH*skO%jLMcyATt^D zTb+b6p>!f2A&hrggRQca@w+c}nn&M#hjVboX~$e9&tsG&PMQs6h1(pN1DW@!aU8+U zNjT|>C7A8bJVeb2u$v@UxI~fV7Rwm|$})nzr?aV4a$cpt)ICON)gLum>(l1xH#8!I z87%?$Tm(eQreHkPCks|zaTKice_$y_xqV6t2z^Pd@Y9;D(P25WbusYtM`3avJ-Bi& z#v)0ma)ISiNymJgpde%B;z#*-S*YQ+Gr{=4k~YR)43<&rxf)6zOsPxOe;k`*-d&2OK`If>oTjp)xA* zOhSBKk%x8Vb|1*3vg?>bPEb-zy43#+-(Lt?p^?a#&OF7~d+9{Eigg#|0j5f<4sZs8 zJLGmJiU;dn3zhf;at;SiF@xr`G8A&u%@V{H?y-)dxktpOTnJB@w-5PZJ!%=Tj}rC~ zTbLNfl(-h@$D=~}5qVGID{CKT|3%>-y1&^Vp+juY&`CM<5&vR5)-rHaK`U2A7^c3b z^1loxaoW(f`trm#6+st>Rf+6adBVhh$1(gQQH?G=ktKQ2+HOFaa6 z%wmNEV?O3_kFx4aI{o!jWVv={+Dr zE3?DSX0tyUu+T&P%~r-p63oX+EwTVeJ}BZqr217$`{CKPrh-drwwZJxE*mBbO}UTq zuWdi^>qZVsDgWuBvh)`?N(bEM=9-AA^70#9qlDDos7FpN`a!JFjaI7?xJV`*{F z4>~a%Bp)q)%`beE(XgZw(2@B$Z{WN0gc*D_!LsrQy380DmRI2YOmHPID0s*XZV(#jYqI~ijzxqxgQ~@JO zE+^VdQ1YFLxvR@g&Yh&o)>(b#U0}8E1n)LMl}9;Z9AyF?R1l=WiSgQW`H`;6mPPYX zU^D+33T_G_t%89B1;vsY?31U82%X~gnM6ksMx-()QvqU`abC?Zzq#qw6v|~rf=IB; z-gq?w=LQHE2tqT@oF_!M1W&h}NCy>rT6+%G4^f1XfNvm4T7Id8B+DP)2-XnXxf%T5FnW~4{r*Pt@rNHY2PiYg2uxZSB#Sv*Qv$WkR(w@8S?5}pD9jHh2NBd# ze_&iO^f}gO^R(74DX0(md9}NNl8r4=2uvl@g?V4{HzE0*nF=bt`&oVtt$j*;8CUBs ztd!Zh8lG1xP>XLGaf+3KcxYWRCXE6rAK!@J;9==R+Uqlgha!cJqH7GxWb89B{hV*P zNvaaxM2XwdMHtTnE6N|?-0y7yt3XREPp6B59-dRNp%9J8Z=)cuQZE72@`5<0c_tsL zQNE#Me5ZE`^fqdn6LI*5mW#tYi6>7-ISlkRqI8MrsqxMYCnX8Rh$c51&bGyKlV9-C zw2M?KkEMKb|Dd#&5=#3J=U~I1lD@Qm`|c=dk=&(`Qv#J%r9>zcXgl_grN&iSV%3Bd zt9`Tmw^ncVXFeaJXh5Sf&!=?ArS2;>7ZfS4OZ9903NqB=q1Lms75jiEPo6YifAdZA z;M;F=vh2?GcFNJCZD~1YpFCyZG$RTmwyfEnN1L;b`{0+w5edQ1@`V-Towl?EaXF)64Ha#u0P|hPLvdNPODt0{S zl7U1eDe1dQSOa&r;AQ-Xb@ropn+$u+@W4KF6KmqN<_I11)6r&gFuRQ+X0_l=4hF|j z6+ZTF2J1D-E$Y;=`z#&AIrm(x2j8ins+M9dRILK%n5%qV71Gl4K*f>$vBo zh=p=r(?09W3*{K#XKg~goBvqrGGzO>0WceX3&A+SAAF=s{Tan^D_!G4{Ij=U`u&Df zWQcK@CiQA?b3jZTT@#$gyMFlLL38)+2KwpZ?P)&5wWll>UE+vX9}P?t^hRSMLpu z3H9DB@Uxlzw#LMlc4hp5lqe55HEzEswH`iP%^7^?sPKmbWZK~&B=X_c8XIS1B&Mxf6vg9j6>a;lYS>?6{5 zAUr6%9H~?w_{|Ba;)(r-^WHLL^=cq*CbBA*wB#wmpnZ_|k&tm)3n^jiY*~w=zGJE> zdFR~`C;7SgafwyowT+^z9mD3JX=qk|=EqaV~zyaGD#-G4Mp04${=ynK>XJEvw~Mh%}_H<<@PQwGQ&( zoIgM=f`{8J&xFuE0xopcA^==^meJ0PtK}rKf*0=;#&kxkUzPPuzF`gLb1UZry0~d& z0>na%NE}iP-#7^I^5g#APL!dx(y z?gmn+Aii3+p_<2Ht8A0A6V;MU0*4;JDGL=+kGDx9Jpr#3C4FX(y3udflzvuGDC!~c zDH`gei%-Y^DBs0&9tz?-mOO3O4<0^5$;Ii0D8%>i6ZquuW30Lki;pZ~Y9;Feh!$ru zSr#fRH+AKqCeqhBvwstO;gv#rw+&h*X)sOyF4B%uq%~bzUv7uqXYTdom&BjpgR*rS z^Y*K#Hqjhka0?jvM^qWJ{o(MZI~yK!FY&bdT+& z*9mI6C;K*}R~Q&1x&v@p1EtCfgKfnK&JZf`S5B8}T69aF|*2WKY=ncn2SmK!oNsgu@al2YH_ zT;FdBtgddr7m9)^V9JOhj~ufKo$U}#uq6@K2tZ6+3U@g#WiKi24CW;DE zP8yd{rmeF@QQ=5mD(V}Ypr`FKxmJ-NY)^(Vm|kC9V-zx;bd`RA2=XrKs&eDRDL)dN znuaZohGl-`9Jy?pR&i$e6m$V*R4(F}t!)twX&XHIKHDr1Y;eXxT>b93YR;CnMQd>5 z5xwNfLk%L^maAgq*JQ~}wunTCs9$Ts7hFwC6s?|A3v6(deg8g=l<})H!p~6cORJ*5 zf=Fity434pcGRz18Y9%7*Z+zzA+&1m%NyUhVAyLm);AIA2F?DsEMnBUG@JaaBXgD( z3T^5|~PQWviWW#QPaY&qd>V z_*ewZi!)L&JFXyNPn^Ph%PX72FdVq{dgfYs|=4b^zJ^;S{g;b)ejISj>NqNzoQRi8fyD0uG z7SuuDFNz6qwo=S@hyGOIzZ1V(I6cP~Vflx;feuU>6bwrHIC&C_P^f$-53TE@gIkdY z_)_w@qz8fxqD8J2Nw~*plzyn+gF=O-^$DrMMEV8~bMap0$ki8M??;|HD)4z&-dt(c*0-AR z7agugo*E>Mz<;lfZpISN*nD-kUwym0b6^dFG*JRAFp3 zyV)FN(F$FQp>WWSuzeNx&2N74zvm}E`9%)Pe3OR=TarmiqGXcI?k2m@jRp!sjc1Yz|4Oa%}_C2;EOvv=+sR<0b@FV|X0-1zXGdC7Xu8W!v2^~t(rsyu6|FAzT# zR4k+%Gy=`FkzZ190XP_l_^zgV_nHVRI2@&&>^r?)Id<09*L(6<+_xBWo{KnM8~5(r zR{Gh`u3%om!07<-q5;jWI$A*&CXLTPNZO);X{i8H9_^m~T?fPod@w|F;yk%c{!qKC zhNtGMM2z&Y2nAkiNiWLaTZSNpW^j5 zK8pUkc6M4UKpW}v&sSJHb240G5{~RwR+zXkaT*NDEQ|!aW}FB-NPqZ;-(kM_Bj&fA z^x0?A^zp}cnPU{*QJP0%%QT%=9G4tJicpLT#kcntpBM4owQp$!CHTmL?^^9oukW?@ z)?#t#8cG3ar0)?Io%r>MMYW4!7LM*9$D&UnG7dQw*@s;?yI|JDWpy181Yb8Up52Tz z%}N7cMe`s89~E9UR#$m1gBU(H=#03@qE%;8E_z(n zn%@dg2rq!c!*M;HbOuV*99imA>1PHHCvZh`yqHjA3-k#7Eh}hdNL&b zhXSyqEp%OCenB^Bh4Aaa0OhivqIISmXh_h=A<#NL$A@txv>#}@se)@PdReQ`UM&O* zHM=Z*@Bw*SUwI?6to%(NC4|!!=1^!WQ2&FyF1#^7m_gftvT8D#19O7D%u4uY3Sh#z zA_PM7>GTi>&R^qGXFKYu&~3UwBF8@d`A(SJM2Q5uq%kcDeHs;b92=nu_Y|u1kwe8S z+^ONH{*jhd;yull0tCf`3*JB+Pxm{w(m@XsD?t_VqL)OHYP(oAg*$!nEY~;Fa_c>w z1+~c2?^heF(_+X#g^v6YtOY)xOP-q-V|ks&$^eJ$!06X#J}UhrVG+Cx0hXJZzv&yv zEah_8E4oAx_i^9F_yXzXK|sP@MteiD62h+9Ql9ea*kK(EnFIK1nG}498)Y$n%9{Q7 zDX`NFX5T#+r9$C(GAe_occ_u_qec8mg7G@ zAaC`N7)!^Y9ZDCYUs!el9$h+U5^TMtui<1n#m%EXltHaCd`bnag074!l-D+_;Z0u< zIy5EWd!(FnKwsP2-DRWgN?M{HcDkJypPPWdK~w8Sh=LMH(_>0}6+BjblZ zM84fT@#H}P4%`2z-;xA86#jrwwN~9h7MMRW0{Xp2Xv9g|Sy#HQo zCJe9P2dwiU51BL2MyjB3RKi`^fH}n}vdkaefOX!u z@0i3V>O{Nu%a{t{jJ`7kDA#lo-(X$%k#bbjHe`IMkb1az^AbzwD zl@U<0uhDq^hefF~|Hkqz<x`rt$?Q2}CF1>664+xP+_8IRR@|cig zc^!P0ADwuR`s{ED9WqA2VMq!p&(O|hT2LF&<^I$9gf@ixs!MuU(D+N50^~; zT~*qMb)LQInSuk{u*U*Zu*n{BPNBppo2`vrUR1>b`1FKrZynwCcblO5PVDDbhSPJr zxcj~2hG(y|!Nnot(wy)D;{(~e1+BWiAFe>P+%@{Zw7vMfaS z?sQn#HNKbD*9Es)Y?7>UZ*M+%`&JexOk)w)^2xg#oLgD-$OPLyF8E?GegE!O`o-T3)6xcJMF=Sq91(X8G1o*h z>Ah=p4iUnvy0(vSh>tQ>1&I*0HEk&?d02PSQcxH83X{}1UxcPlbizoPmXPAIc|dj|pqaVyGrJa#q0_Ija029gnNNZ z;$&A_w&vr5wD1#&@rB6BN5}M-NK!O(aZ`WHp$eVb0X`gYZ#1E`&1EVhJl4_9SYN)H z+KnyJP)L?B)W1<8d6!QF6N*cTs%dJ@BZC?ddtQzlef}2wnb#U>4_VHP zX4V?wc4p801mZx1n$*-X{rs%c&4^X z1Fa}EVKm!oV>;&n8)^wTwugB#ecO!=9qg+~DVXKRz@O;1WD(c4hx4ZvOEH#7%%GhV z3ko?@^5I>^2rlVAl$o1j`g1rvr@uy@pkGRlFtOvAW_FJ8j%!VPzSf;%<{>mmNyGZZ zX^_OrxtXiU^NWwgvAt2o%Y4X}C3P+nVG2N$7X-UJXu(4kRyWqu2jBRBgH^D?S`%Dd zkAn+1r!ndrX)^{mU#U^=Z|h+lM+eN4Bi6TQ1>4RQK=%IZFgB1hWuCw<&M`9%-k5m! z#z-?w!2Os9cuih`PR%xT2JjjIb_KZ@6K!d0r&)DqA(Ll-`_Rg74_^rlEr6fl03Z>1 zYF6(?MzkT<{`Qnj+fg2=FrEiDLnvt)^9=OxUW}!YXYt22^>ccV4o?$I!G2~t#DQ^| zkkS`*BA-M47A7q*fH+TzZ`fvT@`g5-AgqUk2oOda@CCS~nSL42t7<=^x#V#HY{AAb z^Npm%|7tE;Y%j)QfyZ#C9D8N-3n4)AngKAy)Q@% zk4DUsbbjxA=8huox8>V0-_52U{_q|qqwEwjuPufuzLVD1`|QGw)6JVJ>A(N?Z-lnp z+cM`Z*7aGg!B6l%k{9j@F7l=3FRoz}pj==Jwp0PPhOD<{thZb|kGOV`txzI>BA2>L z??Sy+TMf;HCIVHD-R54WhgLE^O^BqnnEdS8&DAFB{184AP!+tUn6bFvU0rRb8#ivG zt*tFI=y%yH++{5#XoDb@I}Ga;7Rymyr7-4v3h%;IUQoEQr}X+IHW|s}D;)pgA4Lr> zO3fk{hvU1LcK|+Ld4DNPj&9$qrY}BSPSYWRP7O(k1-Tku`pMGoXGl(w{#?6&9FQ%n zM*Laq)A^rnm4#4@k0DgWedwO~YI$-<-c_Ox`;!Shgr1&()$sY=qwsuK3p*9$==qKNl2jfqA(iAzM9DP=K(C z)=lPv_-l@Qj`Ke)?^kipdOx*Uot-Mzg0G_N^E&75m-}52&$d{oWkKU&{ZjmXTs?~% zj2Hc!)PlxzaQ#M=(auy*ejvpufWSY(6 z(Vqf*CQVKI1gst~PKH(|QS>)xpI*ivVvBLO!5A*OW1p)srkS;l)5~a<_t3_bTRoLi za4ED;DUarL_8%pa2$`fj&m3Ib3xL;W^XEZo=oTgAPiSoN%)>GSsV2-5nxs($9%klo z{1>FCFh(?>hjUNE4He2D02e@&(Mi0|+@X0}%q3LBamfmqU(jz(OEo#i0#Jtoe=?s6X{~vt_6mI^R)LqS&@>JT=g~^*#M^#i_p%)A^R)&cE5$7FuOotC8)v zsEPeL7UT9`lIlZ!qPqQ&&dx?JRhK%EV`1s;&`={%b zr6uHKfH3P@d~&AoH(wUzuuR^G#&|k1ymG+Hfz!r!(Nzo_$5nZGSK0Y5&gJG|PBV7k zTj^K7dOv;m;U8d@vXTDh|NNRo8Vg2ObVq>(%A9y~=eaLPrvmz^N#lz#DjgcY?rjQd z09rr(@s|ic59k2Ly;%QiU!!IBD@;ne0Fa9<^HlXGDV@cK2|(V;eX7^aj6^#&R|cHi z^kl%xe8-qQjnMum`0mWKFCM;i6mYU|0Dseh!c`V>-?Wnde#+~@B^EDaZ+lC>i*ObN zO87(@q<{G@zfAw+pX{ zfWi$6ZUvqi+E;=n0dy&NXhyVXKB? z*%2o`DV#1fao~%fI^EZtqenc19(TX_r~pOd#8Rfuvk^PsYJmy#tgfyBk}LrN5BYFJ zJ)oEljXeR-lO8EpluQ3Th;p&uH$Oi!fGdwApW#18KU^v#%x25{9^+F5&<4|z*8W=B z$4$Ujw+;X@(1buxKqsFx0h3JA<5cfb9a^ zTY1@^qZC4bzoc(yYyi;cLq#FE3GH0-9+_%!?dfP`&Fr{Frz4GVuVMOq${h7q@Mp){ zBioy`tR39qq_rh9KBS}C!dsWOqJ4U38}z!p@Jl5CHb!_;VAc`10)Nr=RWv}<3~6A} zbkO01LNo(oKCwn*txdV33IKQd6!T=yZU@KOBS74M0u_yOA>;!;wu5G(0jOkYiu%!Bk7n|Fxl`1; z_>ML_t0V~3(RQK~teqEw=4pzInY@k<5Fi>xT4cJIkJq80ML& z81Kd@oYF4t zldw#@tTtudV50v6!tg7`D@{7f4K-FDrh(-?EjWf+chfJf3z{s-twlRnuO|h|HBz&O za;shG*xcp40h%vE{648%G1d1HRS`H6xYEgcpJ!4?b7aSEfj>b90qLMMRSMrnYTS}X zRkO1q1bye7E!xfT-nOP6#`vHnSWYHPLybQVaY>qL;bg!DEmg~|lkw0v%lsgy<@j7^ zG6m0&N6bT{C8#&xleL+MOZ))TDNmdb%eT}EkkfAnVEJ^E?gV4`3J}Wc&=3nb4#QK% zN5@4qrZkzetkD*}N*NsUqaM6y%SSqqK0kcQL-V!l;T)g#Hok2v&kM+SsKbu3n#TkkcLL39pflZKBqfkYcIA zdX0*%qB++YUP_;R%!UKwe+6(-ZT)tg&6A}jU@nI=v1aJ;?$*|2=B64x2fJze)?L2Q zrnCJC@>7`UeC``_P5fA(iz&=yB~6LeN6 z1kE=0n(3>*z0B?|i`s>@c^dk8(#8NEJNB3JqPT6?SRb%s<)m|Vffx^1)S`_ybRldh zF2cJ)0mnQCXFqQm_~BUX0>=}re&1vFo5C*z&0M5NJAEQO>!kdq)b+f|S+7si=4PJ- zy`anKsX7m|T4MkNwSVW6!s$`piMw4uAN_|6(Ogl8>m%HiajaZnRjr^T04C_v;zU)i z3!zOxst7S3@<(w)-x&`cc%V>^UDjos`EIay7kntEO%|iqZ(L1x_wEGXb$Hlib8sJj zCIVavqncxwnkFOieFnEOh@sD@W?EZcBi(lDt}F-W<%vw1>go4IA!SG!iea|1i`yAM z#|-ByuF=$p*B?_At%fRfzOq?MH@<~GuhBAK&l36J=c~ph870v76URVNCwIw7%me{D z3|<{C07#c-agW#@HGdq)Kc4Uu5XXACh<7<3)yyx=qZ5O9$Y#AJvuXMq17Q);4hAV4?QI3T_LVhEi29=g=%X((->L&XBEWM5{cwO83w4;psarghZhj(=4(Ugm!DHc< ziDzne)dc6-_)DaYq1B?$7w-tkz>{3__B^TLS40T<#?!e8A1%U9DFa0~G4imp1DP@7 zrOhnw$zZ+oGvp^sYJx8odE?|8=lQ!J4z;s_hR3WIgLjUJnd6@LTO^UwvRyM3BN`6{ zt?g@?F3W>Oe~_1>BqlIV#QrVMJ{0`Dz&FKH!G{6n%maj31$NgxW59wC=*d$FU(j2# zT{H521CR#F1A^roLdZ3H`$kdDfUhi)-~{dIeaaB?Yt(~&Nc&|DhEwPZ;W3PHph32p z4hx6{Kg0psK0-XXq=_4UO#&Hxks%}hIjd1y5S$P8br(WW1>JRTp#07$by z^HC?*D~ufzen+{SpD5kV9$I?Pe}ErG zfjzbI)Qk(DnEK@!)pRu`#; zbIBT~l?zzSm>**fXWV9oic;m;IiA-r3$Fuk*_WYW+PZQrefZ(!)MR{joOOdn4e)z> zx@?4XsIzNkyQn8hL*(xxcmlgRQ)0k0<&=2bp-F|@imK%?kZ z`e*;_D+c9VI`fy9Pk3apJ3u9NlUtZVUT1NKy5zZO2EoisqH!#>z05_~J0Iu}(AV}@ z#Y8aHVGA^Yr(t}CvFBNLK;p5dBjSwk+$c?(xd5&qfLXSMv1y^y`LcLE#uuDzex5$W z9?9(D#%o6b2V*DpqsjfXE8*D%ashL4apxp+5l*Loi~HgfV%z*p136Ex)yuoU zE*?7woh>v8nrQ7D`|e`?a$E``Nz44vpO-v4Kuf3&7}a3+Tw!d=!d+iBYik>r(ygUS zm$LI_PZH|^uJ~SWdv*?c!l@UWe3akxZ( zh&D-epJf~iZ=LH>MeFgQ1Sj>7jn@Ag>v&&%s zT~0il#du5^)QW+i{0wuNx^2v-j#k*}Mmw~?G?ARlHqzGS8ekzi>}XBdwt@itex|jR z)Sr)i&weJzqcEx|UQEwb?5HCcHSi#uP zI-JoPcOistEMEuQ(8;)hdtHq>#t|J(OL+TCWD&$#c-cz}10TeCfa@4jnuSjycQ&b(m8#_%i=;z9^pOr_w1ZJijeX<*!`tIX&jj7QYG}EaDaKMVm-} zLh79TbazjOfa<_qLEo*?ie$I z9~`vtlkP?qJWG3y2D7wtA0L*2-KIQOZZ5u;CE^2nDJC8x#ukE8+C&7d6`1WeT;1$dq z-#<4R$x3Q>Rn_ovQn?666nGq;2Kzq5?MJUX@xtAZagP&00i4cFyNYHDKWu0l#NhXY zQmA1zJUm|wvnOQvPCjOyJ!+2_v!TLCpgPBKAh(Y*kj^I?uZwrin|Ln^_$d0z%PO}M zpNqcYL?Y?pB)G(mTAmcon^JFfdd0$`0J*YBQA`Rub;FSNcC zHWiL^CR;(XqR?irE#%=mULo9|Ri#j}bAN|82s4cmW5E(*LKPn~%P}@In*cFrF@|tC zZN~yx&`SVBZ3fLc0yG2I5;QAudY@WP#t*+R2pXPF)oL+MyU1k`940U}M5n^BZ7i7W|{P&Xa^7Ls$U zDPsNQV(%o!Ah>W(!7H@41OTi*_Y~qD4*C7+5n`54E^9XP!hJ;DJk>A+YcnF@aLS7? zEUg_+^SEKE3u(_UUFa*Ahag2g6re*sp2zlG>70Gv=N*Lvs&Vs|3o_TXwHSi zvED|vg?x|yeAEnaevuzMwM$y*Fun?NH*l7Z8U^pc$(;H*!!H;VC2LGwh@<9EPLy?Y zaC>Bs^D-A`rpAyWkS3&_64>t_&=&ZbgBNwc&qD+`=)pe^R|r8n%2iJ*?aOG+33zcM zi&1>PoM3)Ap@$%x)v|OCh1^I~JqjNrcV7Iv~2&gTVLM$D14r%Q#lRAC4i8 zL4uv3#YTSn2LKn8sg72pr>v?mwS*?&>e?!A7PTzCx%HgECUU$2rvpp`gaL%LU-SXH zy4^#{2#C@hr4|P|w9%&3R7^g%ci2xg_4E)BgJXIPEv6Cm(i~EtAP&z^^NskC7uT96 zddl4Z+SlP9$Aw%cdLea{7H%2{a@u2pSi})Z2yHv&2*C$8N`hXZzSePzmmdf-U3;Zn*W?7@WQRax?pW;8)Ipp{A zefxC-&DM>LP5KK~Ke3>PZ`nWyV4ifVd<>@Tz2H;xlsAYUCW-)Bg3Hc{JpjeqclOiv z9l%=R_z6=^0GZ*?4Vby6Fljk%6~S^XhVXsHjfd~fV?-UD8*;h4Yh8|lFP*}I1&xZe z__-MLx$y0*z^Yi)}3wAo@MR+<2 zNU1LV%6fp0PvGZ=6QmtyK)^$MXHC)a$}{-GQJ&|Q(?qq2T4$S{`-B3yFdTAv)L@Ur z#Pei!@y=6IK*!c<2pT+9Z*Py&x-jP``mdmtajst7=hF>m*b3V^!~Q@2=PDE0If9}K zrFPPB!J}Y!UM6nxRmCIMqpmNHvz*wSI1NthUb{Gm1xnn#__IiXf-ZhbJ85-tL9`hE zUB9lNDXXcXj_ZqlDotEZ%68tnd$$?R>c9GGHTv}h;3a*W3yXdaP{dk%tdL_R1oEP*1E}N*qX)Nys4(aG|UnO zyahpAr0T?1A0>i1E~;G+dvc^8`O3-)_r~G7Yk)s6eb%(`MP(jpe}!)o1Tle*-dw{g zeD$y(Yw}sb1W7PQAx{C?g>j<=K!SXO_{S`a$y62KD;UN8rqgGH7c`m1Q~I5AzUn?D zO|sdY4hfXH19lXGfYWy@l3goMAL%7f<6ob+7|biRxsbxU3VAt@rotw1@+Zq(0({nw z6e&AG6XiE4fypd1__m+|Hworhj2r%r#XsK)mJx$Ln%F5`X#N%V$U_OoqP zp}U}$zP_Aq6x!lB^w$Jv{{R!vlfjNgdQms(C$Og=7T^Kxs`g2opd|nWeT0)TjrgVD z6VEfh$zxNrk9y=AIw4tAjyEXp9Kea%lb`Ki{X;VlO^e)40GgaqS5||Gd(&|jjXeZ* z>NvsivH8klrWP7!Xv{Ec?hl2kA9Tl(5}gU=#A$_VLuoMdok=O*ArIV}S!udJ?8K4=pr z!*luoW-J3r`omB=6l!SDK>)uG%KZ5#wku>?eH#RiMgxASexxZk1~fi z(63u)bha2PIAG)JugdAGukM8daI&Ndc`OXN@~$98fGMzR_#L918;!##tNybDXxuD# z@4as@{(Uz<6X^j*TEOT_j#t((K%Jtk;_2CctT73}zqB)Qp8a{k0Db%3Z%L272WUGU z_i#ajXpAegahEr|%la4Hz_a;>p7kRs7^_C=fVr)XCi4gHe-M1Pedlhv z_4O_ISkRF2@vd5`6Me~9c0%+Gt{2Db;Jf+rh_vu$Fvo0oO2dLlC?E<11hp`Od}fmq!5?C)@b$I>fmv z8*)z_jB4x6MhyOTs%Loj%uGCpuqppN4x0{{@_9S+0U*}eojC^tE16yS2a)6 zFPg{2`!7xb+ulXQML3-TF7o|&pp4%J$4tTMFTPk#zxmB21d;=Q=>dK=Z>4X3b02N% zZvbSodGL;prH&=Y^Z}xH5q?zQwV{B7V-2*N2|PF$Lewxb;a;JQQ4&NVz8VjEdkiY;P0!{kpq-A5VtRN}SCP*eQgiMlFQ1H;FhXS9%h~*C9o47isj$L20be5J5D4+Ef zpil^8kxpgh8zxv5-#EtQc=)AdfDGs|1MsTM6dYN)lcuK>dIbECd=<`lDaz_%+p>m6 z7Xp`Ni=nkO5!`1Z6yezDF3uN2o@>-r%i7)nrlD<-Pv)X-fD~$(D~(aS5bUNpcGqcr z1fG#*2uxfksJbdwzN=ADwNO@;RxsOKPkT81*QbskQ3j;4;1{$;Kn;+Q_H)cpaL@;q z0=vSWAd7;&K}|X5%p!P6c1Ixu1S&dVD_&t|hE`35_EvDz{7PC`C0R{A%!wgzlJ5$D zOnt}(AA-suz*+_oa@@sC%HbDGp~f`G2Mvl4bV*lWL@=UC+xk9+R`7$`Jrt26>=A3&`Ztssy7xfB?!M&|bXOu7q38ZY8d4mFK}PvkGk7<}-U zs%N?l(+(#LZUcJlVcu+r`HnU#v3Y2m40wWhfY_9_)uKOhn*HiV1>xJi(BKe+-Sqh#d;}h#MRL#$lj<;qq|e%S<@>1r zJSPE6L00>veP3tddCZ~B;fp3?#oqoU=9o3;VE;e}pZ|0G$$$rO<-O+)nu+hG^~+r} zw^jh_OlS!}4B3EFaA99+3*ZBN)QA&YngPm;7{3d^#<2<#!XF+ak~KXopX0CxQUrlb zR-OV}FhozG9d!(@KdyDg5&in<>&lG;wVb-}lWi@J*g*27`^AO;SQewGWfPn!~rTi7f_4m5!W~UoFG2$7)L-R zz+ZWlPoMnMDidrI7*xAVaI4F>DX3?eENA9PNX2!Dv2cw#h4~y5&cHhxH;%z`nmEq{ zH7Vy}D9Y(QudTabam`nZLEiJ)eJdI9vR9if_wfe)i{}FKwPf(Hc>b z3_LpT@zB2FVIixl%>Qec8&hWI=m2dQ@Z<0h()I8N*K0zm&W+V78&kXV-EHJ26hSsN zOX=DNXl<|7)8{v@;LcH6dmuFPNgO9BF2vKvY*ne&}RF_r+y(6t39E_A>sIh2t$EGqlB`@iDMH0 zf&p|!D4L=f;v8DWKadE@3_vV2MM8s%a!ZRTj+;qCU}ngN=8Jf0bV%$F!T=?!3Rz*Q zDfpA;-l^q)R%Lv=jYgc{1yLI&Kq!S|q%ahvjC6n{;djap-vD(`0BC9+l)Y+Mc-Mau zGz$)Td+7kM#I~*C)2)IAfhh^HhO=*Yu}+zmYVEYSejU@7fzBg!1hUNk4B_!3%MZKU_EtVUn6-!w7BYxb~A;j}^f`53zG?+hB(u~6w z$ZlpIAnr`FMA?hi)UtDp%4CLpaW8?y@$oDvq ziHHgDo#W*1ytGrJ`B~0QG=#~Nv5aYNjeW382|+I0Xus=G-fHY08PO&;bV>Z z35cn^7N*V6y#nt~(V#&`iZVe7WE007Vo-D_N!r9>96;j=@Vdg-eZUELj^Wm$2H(Rx z0T>-HK1j1w{4_QMJq3s0)$YL_W56)L5Pe0CD5K>F(1RE3%QeQUg2%elrQ5-Dll&Zy zZ64DN(@if30#shc;SgK}2*sFOVjjq`jrC~A+OuQ8F8Tt%+wD8~AQiQ>x`2m*TRq13 zZm$y{nkRxfMullDG+}0hFBxG&bAr4m+sL+aERtA`A@&Z+TO%x4}$(>^e7aigvyWLaY1rq%7nwR3gVpYm zK3b{3Bf$^w*3{iA)^=`^SwABjLooN5-m0UVsj#+`G6nr+|X;-Mg*m*fPoM z6ecsjtgkd({@k$POQIJ#EjS>ErLAd;(QrRJX^jC@aR1? zx(r8aulN;rj;>z!3(J20ev{LF>gnJ9+fPH|qnHQupZTj_T}prc=ifpTdsqy5dXz$42pf!Z z0U|L#xH}z-UIZ6GFHbB}xIPM8GquJHCLp8vjXsll2MAdR08Pw?La-%{K!hjnbr2@h zhEbczMSBYnN==(i2e<*Dsnu%Zn7Yjvgnt(VAVC}j83nX#)~$vLrX>m_05TTG`-P7v z7w^bHr0vFp*L(+5gb5p340W`EmR0~c(ApZq8w%1G#6~4TKwtsy4n50c;N zpEA?dKU9r+mb(hPm`Nct2^3{bu}m8UNqw~p0eW;=?4rLyeM{v5=4J>9PBDC9ah<7` z7w+84yWZ59`=Jd*o|dZ$Af#E9GtWrDpMT*)3L&7zeE}%}C6f-pC$yjNL6{6ZO~2*ienMg9iU4lM{k zn4TwT(gvm-W+n(EYs(0OGt6RX7t`+U12B}=5vbq+fEaCG^jUO(za*L>+HZ`&7J!uL zQ#Nz4jTC@vZ|SCRH)f60z)^jvwnBZ;D1%n14UKq1XXN2e$oHw20E6Z&)~&(;2$d54 z<1m4oRndT>1-Pvo#cu?3fV%E@7ho6dF8`5)t}ey`cGS~KTYcT7F9UR~E^kGh3!$9s z&o!uNw71_yn~B2~T;P&#$i_;u)J^Kh4ZFL5V8h+$Lu1x9`v4*3Jx&LQwYS#M$i$>; zZXTvZsCCv#<@LPgdR)QCLF z8ia5uvVc53OCNq`nr?i6x$og1ee!GII>s^quo3`+{YD_5O*`7>G%GYE(~I$(O3fHg zJ<)c6CTzQn*{hoz)Wr>o5o4PNP1t|wP0$4yiW=EhH4p2cc?(>@m?l64=uW-J2w)n3 zoUvWavL&>pnrxtqX_H}ph_A7Ie0v>212&BrN9%ltKQ`(V6sF_2k)c0EN0~OD$ppE% z?zjdpE^oP^BHh9#;=GB1-?_GrhQGaN7GOH)9XG9a5lADz9mk5O7ilqneER99*;El) zOaa#>Lpe4B$ZpI;$OPolw#xlO|9EBSW%Jnuq@QE#OnL~|7yJPL06+jqL_t&>@{X^? z&|oAPJ4QRc7D6!w7urLM{3k2MhrB||kow8A9(G2Pkv{#4jO=B3v%`%aqCmGQelCMI z1tp0u;6tqUSha6uu zts39X#qi+(eyEgjalkrZ$^`4bAc}w&<#LzA^^Dh+^t2G8%)C8|=fP9OkH>sllXp54 zDC}uO&@8Igdp$|y06qo?dOLy>P{NF5CH;s0@P}cZcI{d>%vmnuYw0$ojC&oX?&qlP z6xC`^NIb9LAktj#vFKnhiox(PDbr6U2yndz>@K?qaFultUMdA#IG+rju=>CM_l@+6 zUu*(?jnEXi!>O#>4B|{8^CroY)2nHpyx>1s%4hw}go1FYP^eksy?aaPr$6PG>!|;$ zsmHUH_#*xV1)f9!0qWJ&5!#CST$-^<*mXV5V!WFE$N#ti==C|g-sahpQhNKJtKo%i zSDI;7&$7_2&pJ2+6UP{Rbj5TD0ZV8K^jy93DS#0vRN}WnT$qh10Es|$zrdkI6L1k4 zJ@GvL=0EO~D@2&D3;cQm;j_-+73A%)tH@V)0y`ze7HxZ@C1Q zT#Q=|fiKOAT<~jhWQZ}@2Ij(e?mGLaA!JtBQErm2!p9hYA%a~~oL!dyU?@f0uU9tG zwJYxvk5i)DUB)-q4DgED#+V5@;$_G_3bLA4#!h+&uu_uLr7deeT(_jjz;gLQekqF@ z+*$}~ZXMlckwPtxe&>Zb^}y#zpS6g{igl0qhX#uJnyKwT7QP#Scww@~J;BOD1Uwx< z&!0vbg$a%ErEO@-A&^RULjYuyThJ;bDB^@Bg>j3N6vDVB8xx7UzNaaqf@~EKLQRS= z`2o-xVy@_>QLM$7gSwp!G|JHYVlG-j+MO6N7&QP(tX9c0FE0%R5moX^ha6 z>thLW@R{x6T$VM>C^}`#E4U!gJmUO?crmEV4Qo-PcuY%A4IB&)A%TwXo%iKKL`Z-l z%{y}d3`GE2UD`qryTo(aNa-$9W*+B!z5?6>L>&N3lx&+2+^pLiAhPH3U*ACo0MHaQ z*e{MGW8I?JaGmvJ1JfgVJ)c}>;}t=QSjRB{wNz;?3E0@*0mrDbV56qwzG}Rq2AC({ z`=^E6QuFwW`9+_#oV>U2YWyzJtjQ_5(rjTX!A+F7CnmwRHLN_0YH!5H&~>0SWVzl)29GSiV*Rx7|h) zrjLKUjbU2GFZ8Y7EYUWYJ~MWDvZ|?=Pn*L#I+*WzI7l8RGNtU>aiw-BAeq{)YP4kl z&!Ig#VYd*uRF;e~AbV5+HCFKnr|F|Uu>`M7TkYO4bAyNKs12&4d}*NhpJU)5s)K`J z4>QJQy1ca+Aem;k*`Jr-l)xH%D&R$0?WK0mPxC}6B6x3t`q>l5%9=@JK*$?3(0?8T zu1Bc4sfiWfDV6o9bJJwrzVc0c0BZXCW7-3aP2vcW3EsMC6XxEOO0D4-AA@L_hDBZ$ z+QSlppwv0|KXT(i6p0q)GRCRM;pokWE(%_L=;p~EX6+d0f9j})TpSkT{LFoQp*5J_ zT4--J@T2Lell3Z^Yuk6|1D^+wC#~EtF&z&iu+KZ@(gIQT0&qK-wZEgk%9#9LtcpC) z!guG|QVY{*WNT44;wh$Dt`c-V4#ms4-^MS-juIx-O?bA2Up;*zx=B_;LvMK4!fcl^ zlkVYeKQ`R-Hyqk~v{P^m1JHzdc}Bh5@X9%yUZ89Vz7pd2W$y46ps z-@ec8m2L2B^^G=VXT=={`}}zXeF`jZc?7Es@c2ZX3r8Imx%(HsyUv5|&zgqq%{UtU zSFr0>zuJVKurJ>jq#ykV^?9B%cSpb1Jn#@7Yxez@6`UN^zEF60(I6O5n121mUwjK~ zuXgzHs>qWU;erBhAO#$&Ro(ySqn-5c|2>Y%F=JoBQL!6N|LwnhBmMo~Z-us|TEOqR zIpagkyD$g29Cfj(xs79uvy&r&LV!aX!(A+D1N@j@ms!~ha7+-q1f_gW6GF`(L&#!K z^xn`T+Yw~ER|C9R!W^*OTIO96LMX%lF*FN2J zaTik`%IilVtPt}2Z9Xo9^#!IdSOZk4a`btc;OH7*s$4suUL9-A`wA{K(p*LWBabBn ziM5Ti!LIXo(7}8Ra~B*GkA`{9mfy&NWT+n+Div4y)`dS47VUL>CsipsYDdHaGzdCS zP8RIGTZ9UOHl(e)fef&l5Ni88@A zo0=UGNa0x1u4RDQ_0{W?b%om$UQ@dGevZsfc_jHNXktrEQIIiE_ zTf;9H0LYX>FEIOT<9l`WG8%&b9s3=fj|;lUJNnzAvY3CGzWnSbOcI&l_{U}Ug>DvMs7Q*);FdwM(DX3rurm5!ckdHuBm-=-{M-5%q zruJ!)i*aNJfKOgq<|Iis91ifw7v)5o%D$_)cMWY+l?tq@X6lYFLsdJ;p3>(s(6X96 z3fZakr}nG=!ZAE`wO#_V*|(Z}lL;x%BVQkHme_uozRH6E=$ivKZ3ZfS1nA2Li~J`& z{2aG`WD+#7oMEmf4^f`zJSKNW$YT{F33XWv=IOIsO10hq9I3ltmEiyK@@o3VH{QcE ze?4fr4i8E92K2Y957FYhkLIWS+q%0cpc8q=Q0uF+c-4UjOYIfy_51A5j@ZGjFuAb-^vuDuMl60ce<%iqXHI0{ z1xa2-v#W^{@wYj+R*`r8_T=OnHvloX z_M?frAlHKj&GgrQeIo#{SH5*D2aKKKXHR?rDMMp#xk`X7oK5+d8P< z-#=ik*91)g6yd;~W`cPY@tX*UE$q13^>#X(xjGuclY`TFDGPJ$MEyi+u z@L<}4p%>aB2LJ;3lg<)X!M6wnV5Jb81qIrjxZxNY8btkgC%~}zOChu&SSu^e4S?d^KBkXB z3*wuW!gQSANvs_Xda2;ltXaDU5W%Ub2*jSsH3oRWASO%^w;!N^p#uC|QwuSY8qRZ& zp47#3t#>%D2N0O5QHMAr-Kz-Ep;4x=MnR6~T2%HazXg63z}4Ov4$%mt&9RTm=^?<` zxY8gr{gC?BR@XQb0cZ8_Q3CLM89`w&{O(cb9HfFAc3R**rdRP;Mhr%U7t{nJx~5?A zRU!8k8b?J+(%HU0#Gl-KPEBitZyudMqh-P&6`C6+C|Iwit42YcN*#5?l9fGkM`3Ht;T(wvg5mNXBXkz z)8GV1tds5ISV39U0lC#wDe`P#3Vazy z_4n>w1AO11Y@8B)iPLeH)Uy^SIpbl&5>NrJU-gq2HU*2j`GLdoQ{`| z?}zhMPtQ zZF2|&0CjgrTRDJ|l6mkywDF*k;I;XBz=Gq5^{`%s829I8IR46IJpOHYb3bh`4?EU6 zs~N0^v4cP2DqqJlqvWDQTZn0D?muCiw7r^DHi)?Hp|rC5pr3XRnrXLtkeUEt9zLN- zb!0=_8X#L!4Myuhvd*{8(`pQwNo;P_^5#p>)&iUe`F?Ku6OYdPS&X`8MtaeQGGDQC|IAZr`>dqqz@fQvNDhzG3!0HJ=X>btw8pMAEL{^NiA0TX;Leel5^3l|*7 z-bB3|f$dy)Yy(rh>xAn2y zF)2#;i||+qgfKN103I7@{=m`61rq+k(<-JcD=ezDIN8Cs#~vRk{Wbz$uIU7)?WdcY z`iZI`6g25C2=>V7YW#ln)e8JpLNIuaU5_Ub{tbOz(FY6tFE1G%UkO1G`>GlPBgUZU zw~s`JyU-kw$HJeAo_|=F#=?3Iw$y40o}SD&!;cpJIW||g2H?Q7LZ?3a`;4z@DuwS4 zzz=|^CPKZI7|j-bUDPB|IHytT2-{2S@UCFKQEA{18OG@m zb@9#A0|csf@NKlWilF!d{C~WUCKgWfmp0g8MoU6r7;UT;f@l-4q%mp`*JJ3ZH^Pv@ zg#z6ArOT8F@DG8mi(sq3MrAWS$=3z^n7N`P*Z_ad07vw9lBF)Gv=Df%8BQTA6@F^$ ztH^)k$L=-tM+Gh%h_7-|+8Eoc4i4QRO0-7^8g~Ae$sc{ej4UhHPCSYa> z)Q-@sR3C2P_l0&E<7cc>!W@Z03v_v+n!%b}?SZ&Y>z$Xn&KJ%Pjq<}0gfXhv}Y;&8l2)ihjDAW_rLT+Er@@+{+; zVwRv%h4zrH726+8yXERSbV756_R{zz-VrbZ)X;}lF0su;dM0^PGp`A1-~qtJyX1?G z4yJ<#0epq1tyUN2GCD=pY)7*twF@*OT3_2jXahLaKT%2G!ZuetKaXHt2m!PSESokz z1@($RAB^C#9}>kr2H($x{hX$Kp0p8WYxLC$ZLxpIvO?sV#Dsw%Fan zht$XPYfhnVEw6D>VK;qs2koQ!K41@h(=i9_5&MQ(G$nj{W$h1%KtZ`zwa~PuzyYKR zJ`W8lE5f5GYx^;pTKb?Xk+(kcy8HOH+5tfB!x6NNT27kyb*uO2zXDPKT#Q{^e2h&- zoKy#x@5zA1t27bfq1O=Y!Y&}z0Kj#INv-Y5Q6GRm&=2Ar9P9^hCCx(1aLEH0q%nVK z+oN`kYSy9A^I(S4fcvwm52D}u%KqKcdb_)UsB55jzRpPo$_W)T$`BpXulYz7!w569Gh~g z^y#^b=X1wQHO4~&Pg&Cb6v<{yyhz6z_IBQrj>MSL4_;nL@Bo@tMUx!|diybxu_6|2wX^k0S*kX>x_a|aZiBoO6hw#cab4L#i z#nO5j1FFyP*END4^cU9T6TAw21AMi9)Y$bwwAH-69#&(yq)vMn{_`HU8SBnMr7c{T}FvOg9qH`+% zX;wTjPS0;o*+}P>_nVXuv~fqYxvGy37hZJ8AboSJhH8q5jvj~V9Ufg80nApC+SZfjMEw#Ds2|SE=C^U4EhsxhkuHb;%#=y z-Zs$7asAe<)%2hK)Au;6WEfzV{nI|l48Q`q%BB;7{o7XYCo#nnp1TB$G2)D2A|5&D z5d=W002njIn0zO0^T$};@f$&jbq@*81w-cD04{P~nhhc!BjC8}k8dqb5cKp$o$59@ zi7~(|%oA1+vR&YNeEp1tat%Rdzyexy`d0QCojgmSlyEk{2+j%B+V$u zbM7&Z3#N^UBM3F@p;cas>Q|vgc2ZtaIK7&NmbCwl1Vv!$A9m~4gTbEyC~OO zL+!fZ(2Wg#16q~1XZ~d+76dqB%ifDiO|NH^nPGv8ObJhe)OU{=`py8~qyi>k8bsZC zwJzpTtarF4?J;(uJ)x&UBHxSiir1kbLi;LYg_HfcFzNk15==)xh%^L0<6;ihc&$j? zs~*sHJvMM~^uD?k!d-80h_Hk+cE#pcvq>R=JrbG+#gwWVMlDROpo5z}o&Z_L@q7bK zp9)T!gSTn#DdmPCBr&wG9yuXeGXz{g5u4`WV5x06g(x(2Ci#E_sF9;l7y%^9p@~q9 zc5b4D*KTf6W>bU4*7aG}oDUb`_*c=e5`k-l^f7=6*+&_Ihl<$dDZPF8P{{RG>!$?2 z)`(YAs{-IV^^H#|q@Qcen2A2{0F)_@8kbc;k%esI za{fDVpEqJIi6w-NWbG-J$E4L%4Zs`|KkKE#>L}YnT|z*mT-)1+XtYo!!A=rTsJ9LY zxIn-&XYJp4-4M2If5c~aSI}oEp6Ri4P!0PcxTsVv46rBQ&}im0wAw@hU22UA>{Os-9GpUawwb22T~A8HI_oQcV1wXP$I=_zM zIhFxz8IOL~W=zs?J1t;xe3J)BIgaKONt3eaz48W{Z#US$xL#dJYm6h(`~jfmZN|W_ z2Am38r+?ySuMHUJ7^=^zD!{t*_~et%(q|w4Hu~O#wzhr6Xvevx*ASE0Cx%QA-laZKX?X*u^=P1Z(yQ$%jqE*T&F?FH#%1UMpl83)w_6YE zx6gmqOn2Cv7!TRJ>8NQ+pTNXXe|Qs2+}Se&M1)_om&AXUfVXX+-(lxr8kl@?J*>a& z;RagJ8Ttm0$zxpth(lZN#o#ab@L9w0i14g$K12LM*n5UoUP)Bx?k5Ka3K3?kO1bya z7T6AUOjI%Yo!5j~RN-y|JHoVLtnaXx8uw`~F6$rcC`&^^?j6n7PRr=Q}OYF z&b40(J}+)?yo$bgRvW#x&!P`9b~(d2=shOzH0HoCfpb7SBd%+;5hhS!(!)3}63E9f(N|^Kv z0|1Dc#JJ)Pca$0lz-$$K>XgOPv_|+Is;#nqwG**U*R9Ntbz{4-FfH-L~A}NH;Wtxa*+fe>78Uip&q?4ZGViuw)@4UW(chX1!NDiCPu9N`N zHiR+k(NH6bXb4fa@NoC>1WM)36j(iT5A=} zjL2DfW-1(|`ob47F?s2DUb9}c9nD)ocg{LCVce7Bqg<3(8hY(N%al)A6f7HOv&6Y( zRcJ05pMR!mP_Q1-=ME16AORF{( z>im-*qWz5Qgvoo6u0XkYt0A%wiar^|r|s1Sv!>>4x|$Q-xpO;hq2=c-$2G?l`NnZV zFvM}Cfu`#SKX!F^Y>KZsPa-VgC(r{p1p1|;Id?j}0J^1trrSNhvl`>3Ptn2#pxZ<49CuyeoJ^n(CNd>S>zl#=?X?WgaFMnp=R9W3;aX z%B+W{FbZnhCayzW6X$Us@Cn*#GHLOaFg+{G!7Yb#3c?AaV-@nfo zFSyJ&V*VyzjU0iFubkJmH*MMC+SCYLteHx%FT zgFh$7p5lJ-ZQ7R2u_h|YFVk?NqlxKn4Iog^Gy4^TA7jihe>M}y_dAWH^h5mpZK3tN zfp&47b(?&`Iq7K&;Qkx5$CoWO@bJGipwS&w>wrQ{G=3X^IK90Ce7Ip^3av>xLezmQ zY{zJK+Qffq0_H-_Bl0V_0=h$_h#NjAb23MNB~6Tt@w)GN!&)QD!l$clN->7BC38em zlm(%=U@KrKeMA7zjUNvn>2ZL|Ku7Z=GQ`*3u*)VFV6(n6!?gglRDqyefp;i-8SYtz#fm(&Gkw%rVM77R?y#DL>G-7S(s%fC$9PQa>CP+gREazl-mOW9MIIuf|V% z;~DRndF&v;40g1q38ID3Nr)VcM!jDe07V{_&yy-#r-BkfBTrP?C$KOwRJRLkvz?7fN<2s>_SZ^v7k1mzOA4`G(=EjIACX9 zZ5^}-Sj3L_3^9pmvfEon1EPdBgBm<;K=m=@>*1e7A#53+CYzl8b?@#y;^Q}pg|9-L zU0@axFBi(P1ed!Z@bO%qLmbvdy_wrtM8{&0`ytc>{%1Ev80V3Po3m~4`m7gi3pDg#dzp@niMtAyz$s|vd#{%kt9l(z6cP+ zP0MRO^Ln!J9hHnP=%;4gNGTZLMiZ;kf_KN#z#?+(yDT`65zxK@xI1KHq5v6u?zP~C zL0TD#atmZt5P}TG)r6&n2A^hDWlbp&J~Gd+>+jqJ-CCG333hF5ZXxL8ce8IEc$2&W zI*UR@vN7i!(MEGj=;WGP?K*|L$eH;1-C9~9?! zMVRavFWQPbzz0;8ISNgn!RQeB<9~>Ea~`=YGD6J3@P}FrD=X^=wVR|Z5GmbVcV-&R z-+2Bn>J)SqAf_DF*STNb$mO8!5hwGwz)fgBa8H_BBff|$3X1R@A#Zu4My`6otT#*T#*`g)DALXg+;I%edu0#?J% zZag}NyWY_uyL}f?ooCzW%8hDz|AVEpx4oM7=m%=LN&e_oD^tL>jH&OumVhQP7w=sVS-ReSI?p z9pfFcq+n|Dh2|Ie+3&i&F5@s}vGB$K@T<(>Awg%})PekfXVPoH=7+$!e#N4{kX97{ zq|$~)Zf4ksRO`MXj5nqJUy>YW(0raUMa9;z2s#z7afbjJk|Cp zA_qg*7ob^O+mz;UN{IJPxqo!a2=MQ``~k8%&QZ@sgFIOGmH@K`IFavhQmRUfCC4D> zbqVd{f4a1p-UD>6GXKfCBih5ZQQNM?wVGQBw1@5#@O5Rb&kF{uxE^VoRC^5g6BOnpASPY1NL z?Z(4AS2A+b4)%51A>Ui=d=~$n! zW|GiK-nY!LaS|XfV+iF@`&uAQp4`D_Uzj(Oo@F|(;0W{5gNuJVmjlRIAsU7Tx^$_J zd4v8YUUJ)g8+dMa|-+Zw1_4UMV_i1Aqf z)2Y?x-3dTfX9p*j*b=|6mKE&Q;G~%4tA`BSjtXZJ-kqzV{$^y;v!k9`{EM4!Cj}f^ z@&Z4bc&x5g(x3k6-Sp!hf1Ivg@1^(OUrGPvzg%a0Lug~+v$VwG9?&$OAbEac-m_5G ziO3KW@cc!`%!}}N3b-&>q}O9(GB&KSAoybduT8+NQ{`IXNN!CY-|GRoBOG*CAf4kB z_}JpT$_KHAivjah=K3b*<@}=1tdQzQ%>ozyr!Ni_D+I69#2D*KW+C{@DXo~+T)6i= zbq_&L)3^!W`hhy&qj^HVhbbFE`?YKMU&H)m`_9e4ofN8(dTO+ju`M zudxtD8(^e52g14<1Uf7hP}$Lt}B=oBTWcxEW(jC$zzIN zAAIPru3Q5&TjeyyDJ%Lqf`|+9yiUX?3R(l<2pa7Q1)DKrS(&;-nsh4T*-rz&v6FG_ z`yq%z1LNhzwxXYO&2fTsk;pL-i~g>_q$BniHyJwh#WloaQz3Gxwt_lm6EI5cBS=YI z%$q7}c@*EKIOT`Am-Gs3IsIq(2t__#+g?F)uf0KtZzLB53g4Yjr^tbCeU?orhr*19 zRhq9sK&!5B4}C08Xie#>O9@E2hSoz1@C)raOayfTKSFr(WH&WH>HvERF@i6h{$9*+ zrXFRTm%Au`_*s(1 z)3OJM1^DHBnYCl2tM#_VmVK1Em{-QBam0^j@onjNE^X*KEx_BQ_9cW%PSeyBle!oq zfA~6wkOfE~K-^-qG=+ne(LxG;HcF)mi~?*3xk0iy-K3#fZ=vmwxdLDopIZp!Rm_l9 zFh$%zb{V1F7t*ARWNKDVEjWc{{*@Ug3jcg%ELrSI(;4rmQ40ut_wF5l)nU4X3}hcT z8IJl2A29tzm{gj0T{x|mvJPnmYHd)X;i0?{B7TD*6~wCn{6PMZf@OAm#+@f z*3}I_?TvKvOF$y%(Q4IGoAGFGe-}Uwz;8F31_}h#n6qbex&S;KTL3@=T0JmEv=394NUF?ce07Mba8C$(l=Ka6YP`iWoSm@sqy2G5az&+bL8MS9*UqY z69vWPQ2|@|gpU=m>=FF!4gSaA6~KpW&rkml@*xb+i+N-QUxm3gZBF%!6mfC_N2$iU z1@N^vKALfWY>{t~hd{9pOwaE;=0`i^p8)_{SIyB^sQX^^Fdf|M#b%BtE?#5ax{BHE z?4Xl+5TPd{(udV5jPjEvT$Gm3M%=)xzH)VmlXuaUq8$XP_jh+uutAn2KWVgBW{c(T z<1)^x4AG7@^X28B_3`{@EM7Y%QM?wZ%@goC2f4wyT|TouBK_GJQ9PLt3Ym&Yf z0I+?6F@d8>NB};fZwY|9DMPO#Pfch6g)wh00iISFyNBR5_vn*5Y@XR$ohw=^t#s|i zRn`guW~L1f!FTeOO>6nXOqTcpYInZAPd^n0ipI}{=dZM}oH74V{-}2pzu=E!8Wwz2 zygVP-WKh{xf*lj4i1ec0Co_FqdOAe?!4K}IfAeo{0(yZkQ+@%jd-qy6uU*5T{H658 z7b_3}LY>o!U&6VVb64K*v}Ju<9iDg}W^&+vkzljVF#DKbOAD*KNK&uuyJ}Q5G#_*V zcl+cSg5=d{e1C_982t01fLDoutbs4F`Z9|l2Zl33@T@+h&dZeYjJ)2ZNA}CbiQ!GF zdr@}lA?S7c_SYOw>{$031V<(w#aZo>=2!0>+wXB74fahV?3C4N$(7th}X3QQ`1L$S!lG{=Q0Q>2T2 z|GC^&@Q4tMsJzctfiAD1`wbu{~80&Tq z_BfzH-!p@>wGP;lHW~N22xkIqXg3W6eF&C;a;q((7K(KoanB9Z83G$?Y6LNSq+o*5 zLa6p7eSM7p1jLKTAp|rR?$%XJwQ!!#4|mViPH|UR4G(GLI2{XU5}M7>Bx3!c^JBGY zd{#l~Fbae~B z4F!HdgQBgXY)8NHN+g%Jh>=sX^>ZUaIxbz>SL6d(fM;Oq03y^~VFO_~n})^Xpoeb@ zL{;K=npu)ILiEE$dy6Zstv(qK=i-GFz>Ytg1;l|%UQD&Bap zZ`bH$wY$(_)D*FakEd_mz!Y=~!4dvLo($qoC}Rld^S&Ip!ow2;$|~z&*+u_BWi-%Q zHT~vc61;LLU0U~)#d|zg^M?ZjBLT2DJ(D|XD@i9cqIw9d z{XRbT9;1{{w4eMI*L({|YRar1y%M~lHu(DG%~WUYdw_PK8ciFk>*=F!e?)l!qX7x@ zqeTMzhqr2&{XuWb;JqsT+p6Uix;*_C;6;F@kV~n#k@yHuz~NjT&6)gA>u-v=l|JM~ z_@8Pu*NA%Dzk)BIg#f-Z*xlPnzy9=7fHE|(A?zxqx!2KF6noUaat)r)my_URv(-k{ zq91p@N|)Dfr*B<3Of}48AKbkKDE1w+cm#ba91MVS`VK$>r_)V1oo>LXlOC8*0@xE& zo-zi^;P>1H)PvuGoN7-h_%Go{uFt<17i?!6+*&&Zd!Z(rv_lM0i^hSf4vpxj-e3%r zc`-Yqtg%@@y9z$(H_DS1tMp&Ny9a3R?mQ3>qYQu(GM4E&HjEii2LPbq7p;t6CrtMm z^nD&hTBhe%seI*WzLv{A-C+}Cj3(t6;4{Z%ol76L)j~7Q_bLBqL|J(&G(suA{@*kk zRKrT3G1{Akb&j!4(G7XecVhH1!G$0{21ql%XFb%a6R^@e(YG{%bv_iJ4ed$ZaRcPx z10x*!iOa`lHB1j}2iv6%P*Y}&HYDv@X_&6DiJ|lM9_x6`Qe*sNoU)w^_V*I>(Z6LC z|CKHJ#|og*Q1fawVIBZ@^+;o4UodXGmH#+)TPL!nPt9x1weABnTXy`gQrS91elWi8 zQYXr)rraVu7LtDGQhIv|vgZ6_`eFrePx=QyPCez!NZ4{XD4GZKvrAQ(zr|^F!e6g1 zM|m}s?Q__~w71V#%J@rP#EC%wyOc5jGslL_>(>IX7w9uTH@^g4gTt*C*}$g!p}zzz z7^ed=iC8zGlv zy9yHPWd^|$Pad@F&rLUXWjw_Sr`)-vUuKDaKWW!cX=tv8-NJJfK-$g#yUrsBm{l=l zjETh&?u;P6jyamu(;;6JXgedDcj}RSQnNaDCG4j!yN{kXah-rHyXE$yPlqqR{5XC1 z;THjR<*|UBW^Dey``riW7r$Tu#RS^v;KUJ$LK+SaY(U`#hUf_?^cuB~8}L2v!Kme6C#b$o2c|$}{KT zzhgRkfWOOq7RAhi6E+h&dujLHH`DUQH}QYcWDFn1CZp>uOVGvvexq@fQ#vt0V-c?D z$GUg-h7|#&?G=G~RfGd!d_+0@E z5wtOlu|lJQAm5r3xlp&ShA_l}zNo)Im-)H-EO6y%ivk28Bq@xY5=brXs?}whYI+6D zS+Lh}#NHx}k${7OLp08bL>{?EK4w*P`r`ZOwMTA5)O>edM%!#2KfbxG2A2Z38eJ3< zuCd+WANWLJuMpe>9CV6o{WWiiW8-;OVO2V*&8EOQqaCSndVmkgef&j*PznD0uq z>+E~_dW%BwOrcU;5w-I+FI~YW*cQ(LaS;y7wBeBQ_tC_e;tYGZKT0bcW>H4a5&(61 zr-{*oatshiJ(aM6d09mhT0n;0@E}aLZ0F29Q3_sC1j_}pJoY2Ci!}L)uu$fF#N8fHwUDZK0VnLhiD41n>-M%19WB%ZCz6AFlhHQaVO^uGIoqqTf{kygbFMJj>?7 z`kHFDu8JhkX@jTO!{36?*LO@4C~1e;iYGkhqV{GA`#2i4jr z`em$b5a2!i&-<%rkGOWY{@#1(gAYFl0ons6T+ie_MtQVvBFJZsDViS(ju@s(Sq43= zpB&$lR;DAN^gD#+DyIK^!0-V;maTpq;0+vadq;3w0bfwRsIO%W%{Hq`a`p#L9{tuw z--2E0>DT}GkBl?-(tGc-@!b?)o2Gkr?x5whomMy6>E0olu)Md^mELG3(GC;MP2p2P z1p(Kf5$!q!v9iqts2rB{r^N~}^&7xlYPkk?@*o!_b1HRIn=1Ts5!;&a&rwu1aC9}XEe_YXMu0w8V*4q#gd zjC4Cf3j!{@i{B|v@YCIf=6;sa`@QfLcR=}Za!&g*nFMGqoa2UqL2!Ev;3e-FM_w3? zx&V~J7>jC{jY_KkJg6LxsdLtv%E%I9DtwpQUvQ6Xpi8E!)PQeDyZCsDX2gZac8eS` zUE`WpoHL^i)Sclu(ifC<%nQJgID&rca2j7 zV?@+BEo_t6uqz_WzhiXy`Op6;1-Ib<8Jhk7pS?G2k~B%~`y%hGto!J@r|0A@cNbV3 z3oKwDL4XFMBuL*tGSipS3z<=PC7C3n2@AfUAkw(Nl2~A`*|~bA@4Any$~*P@dE`@_ zRa4bnea-ajOmt^u=9AA8hlhtp_&?k|Acm6H$0e-hvq!u%EtA>UX{q-Zu%1@uqD!A< z?^p1|Bk@N>eL7_!H+|*vpS9-d02WkOq_8G9PS$I7wihpv?VtT@4TW)@`;KpU?hh)` zw{G3S;^F~-+IBF)b7%In2ce&x7e{VB?cOK+&UlLx07rm+XEDPQ9{pD4;MlIIat{U5@a* zh){nyCBJws$~^_IrnyDEm6NsFUV8iO`^X!Vtyt)?d5zobAN;``zQ2#$`Qglq-D%4W>vn=xv#ja7(8-; z=;EQc_|5y*mjc7@qBuFf0X|^SF<`wVAj3X1a)xnBoQ=FTU>BZMr#vHsGb$axcpN%` zBH^N3rLd%{1r`cn^-@f56DA9wLw*(zalyw+r6VWuL)lTuR&fAjV2O?u1H0e}OI2eb zm9aR{f??Rl{}KNW*Dga`l61+?lFRj^yVA;dL9DDSQ6FTF0E+;Z7Dbk=8;?8gx=k%F zFQ+}?eB8hHApNZVv$V$U@xuH9z#DPIP$>5R+4eZKF;2!pk*O2hyT*=tk5#rxoZ5m` zR*u(*OB8L)yX}s(Vy$a?ZLW%e^b1R|GgbjAS*D663Guwv46GTrZhxR(&{J4cD9hE1 zY2V2ea;B(-S*Ph_nNLf3ZV7c^aw|Qe0v7+BJp7K5M7br_Cn`K<8?JV=M@JX1pft#H4VPN>+*tE3qFfE%jTna7Qe+9iJF}KCu&B90>vizhMtT*Zoo%^-MZ|$cF?Lcv4K}8*S0NK z93aWoge40P%VSC@llMG5sdGlDkO|;|Ox20vL|80Lq_O3Myh1h^54eR3t}a>8_sE}o zPqw<(ov+rMtli;NW#iiFDtV@K`;(vJnn`TACHjt&3(K5_+M`dl)_;l%?qvda1BzGK zNT7VJKqF=sBYp(f(VCgVwR;Kv2GBx=9UQjfRK&XciY(YO(7a;;ei#GZ)8)s>*0~We zL`K_xwh@X2-yX7Yy0NjD4tBTF?g3!a5mt6JEV9sP+XSdLBd89KVPOQQr2$yC-o*_a ztF}6NK68Y$(b`5JOsh_1O#_!bfh%7dY&>+aOziX74G!0_f?aW9EPXFHt{j(+VO_*L z(PJ!UQE-j@oJDLrwQ-O-|ylWkxv%E=F zPw=|*n(qIoq@!lbdthC#%rla*17R(bl zod3tunU09LbgHiLfY&U$)xw;2M)ULY^SS6R5=Dm5rBS8S$-ws@+(k2a#`|IoGNI)& zJ{q?NQ$SMzm4YVElh5n?jDTQR!Le%{qvshb>(o2I-P2PN9X~IWc)rRiRq*YpJjcCS zl?^y`Owj9#1}I*rov-P`j!^~JMdZip*aYjY{Bz;=c!C~>%l6Hi#c}Dm=JLzh+D`h@ zKm9ST-Ao?1Zh!poLi*(|R|#f6i!$MY_$3IwXCRb|?deJ786N%S)(cR;VAU?|pVq5- zFl=L`(qRHQF+E?PjBjrF&7;8n!6iTe)=S7170H|gWLzg9a{|yni3T(;3o!A6JGLd< z^lCNTw_NDE@bn(6F>0GAYb=;2;F$83>|xBjpq1%ddkB)4EJG^=gE@N#tC9GtLK88? z04W6G+`Mcw2;ht|P{QItcOzBuIW{m{bZfC7_q*5*SFBJ*S&&mV=UWx3P+p=`%3!_b zLcfLLrFDaAw~udsk{)1fvAVXD-ney{0Y!{2uJ3gX#N1CS%kyb&0oN;(moC;ao0|ZB zoCr2Ik54AaO%%(pO}^ZPw(Z^@?Wc<36QSHtrvJ}N)D~Lg1-F)7x*#`DDh#}?TcOrv z*;39NihesUx5+I}yUl(%ot+nKPh7WM`JPbzl_8VgCU?ZWvF?TF0luL438uIauIt)N zs}*yYmJfB>;Wb^o;&eD;XtC}QAc%n5C)$+@J}w7(gn}D7K@M6^cElEH5-(^8Dj8o2 zrHMSW92vlm>d9KAp=G538|)e}c2TNJ{ICGu3`Z#E`#Z!rY^7OT**b%EIvi}{BD+Ig z5Fh|AKjakjS8-JZ6*wa9l1YBEm{*a^R~|ni*4EY*AO)^(iz^AMw$-3_5${Sv0eLa& zAa0h)Pb$)*)CDMM6YM(C-Yd8WFA@VZ*2l$!z*yFb3>HKVIKr_eGZyH4D>Zcy!pg}0 zG&r<*0l<1TEx$S1_2yT z_$tQGByZi*w4?DpU&s0tWAj7`;#-?%O!TjSsSO|NG8`BHIwD!c>5(B6X*=V&7~_Hx z(lq1?Pa0red6dSubwaxUa`-M4jpa2zPrNj|Bx^HR(E*&7035~uKL@+Ng7-g3OMt$^ z{!E%3&tug?EV$;I@YgK$X~lsEYb)20b!}WdJu#C5I~ErL)ZHY0v~qX}ciu%#tUY2= z_sCdC(4y#jt<41esa)X`a?kAk;bwrqF%VL2xMpOwJ!+jT8N9-;u9I zYShs=gTD%M|B|vi^;4GV_80uXKTrM*kd!&9NCIUs2QlEaJayy5(zO<8Z=< zOsod{pfy_$;+@Fn^Ivj=|Ic49u4n-!D0W6$i(8)Q-2vlqs=iB*i`4O%3Yfkb`jpva zsLisE$EQbT4 zE|OfVb$1B@d6ApHqzM3!7mZoq=6B}5kcm>c z5dM;Rfp~C#{KpTFjh?V{vNgIqEH3VIkJn4--FLUs4}S0{{n!6`DgEe2Z({NF#erT< zfF3mByOa1g>DeeCFF3x8Ghp}`lVeRK_*oL-F!fpF-D{%ok&m%!zDZwi3LNaS@D-Th zpH@4Ol&)Y11 zoqyp|@?+rh;(g3f_Qv`^V1>mPJM(okfCb`jRZ0t#(FWAPJ%zpSA$j|p>p_nzCxR6a zSA#`1JNpDZcMn?S%0+J8V05vr0tG-s{Z;T>1amR+QSOJ87nhWyTBP+*P<049;$AJk z{9RYE-R)+?rE$?-chd>wz@ycxfHKDJLQg?4WHT0Jb`B87bg*O09m-^kk$YGJv}Unp zV$CWo%S24C)No0oEOf;)Q*h3IIeZZfs!C<`HpiDtwQ&otcx>pdQLx zdsnL$=`b}v^DXMi5BwD0&dDjLsQMB2jdd4_+xesJsg;3rCZAv-X&}I&a!0-@FLnBI zMt~7lK|u@w02#_~4X9f5wU48C^0`BRczT1cInSa}R$U;_qO^u+l(kM!r{HSP2J1df zWL#JvaQsk)#wZe4OJTDMf#n8(Uyu67xB+!ivr=v?Y1aBGD%KYHPW0*O_M6=-m3 z>1=BOKExso_g(?63cOKgjX4hGPQXHiP(A`Ypl^Ddex^0&>>Msev=#hr!}14z``O0YmvbaWm=F>_qrx7rUE?y z1aN~S^rcd7Din8lM?gT23WHy3$)#mRi*`1#*fg$Ql`-fTKEbQOhdD3vH6Q5{j2Yjb z;ZN`zeWyhmfS=$^4PK9Ri!u*787y3v!0{LmilF^k5^^M~MB_i#vHUz@d{*Fz(GV*a zEWJA9)#JDNd?|hVdsw|NX6n$oNnZ}IrW#=J)JG1D3I0zB0Tu|DRdE#@RoOroUI$!! zFU>cX!n#W9y&CSi8kYzdE4u|B7dX_z;P63HWRxeY7H)2Rfvcz^^ngAB1%h}>3*1kS z>=Jzb01&i|wO56{UtZ)?ZNa#Z+sbOjCT^Cy2)+)72^ko$A8j?|1-%o|(`t5WXv{xF;& zv)IreLnW?bO$$)x99_`m3d<63ZxZ{h+2k}zERb5XON+t-bbt!u{Qw?U290>OhSgmO z+1fR5qd5dSSt1D;WG<2i;LVx0y}om)m!qQl_>O~At{GGhd2H74fb2st1S z>YQRl2YimlN{gRSFT@)`ced?qCmKf0xZF+U;_N%%czEhRGjW8pD42qVy+AX;aXv_}TBiqV2`S z{B9*Ko>cVgq5(T*In5w6-FaKU+;R;?l_NO2rS#^T6DlHnewr0Wt zA_z6Poi>nfJ^lUPUrrAna^i;){sognS15{P2P*qihIzpPdE6q55Ah<&PIZF$O-+Jb zjrrInc;+TbU&m;liDd+Dz9^prOR%sP)xSt`kdANG zWvtgQDZmP$d|Hsb4%Mx~A8)_CjeO%2Go}OO%KUs6x5Or|I@VcrWK5R@JzIpN#l>!V z>#ZE9ep<%GkNlD6g73(yg)6PDj=r?``gi~Ct>U&<`u;l7`IR>K6=d#0Zw_$bO@Q}X zyV$ibaevk-r#myqC5D>n)h=4cAq%^wxiw-mnjX!(o^s`7kj@>%=~4GliYn-97K!^+as0Rg|O7?+sln(?S%d}CB1*zN-tazkT3xtvN|m%tZHaSXio^YUak=|qL|)47;vtEX}5Mp}jrw#WEg4S1s=h{WA0 zi30#jtpx>+pcd`Y+k<{1h_h~Yp~+1Ywp@Tjq!1gyoAu7&Q|K?08~eV2`%jcXpGz9S zPuFd#lv$Azyg^x_b7ZedD1az~5W9~eUu~hV0bbUK!!;y-gXkXs00=Om3zOVnR`$0^3 zo$T%GrIY)@HM0Flaejyv+sqE`YvV&GuZ0R_nR8edsd>iD-bLaW6@3d`QhSk(eUE?^S;v<8wxlq05a6XNPtgXgC+lM<8Oy9s z8_T8LboqK4X2SIq;KUez2Uvfu@3*iPoaOYw8?50L(hgQp)y6@(vD(IZOZs3fMT-uw zj2a+U#*Ay(yF`3~kRKeJP~ue~{~6ZQv-96c^9!@Y$1<4xLE7Bf4zSB{r3>Q{H1v>% zJx=Z0n9=+k8ln`kidlVZeF$WtIacJbAAT7$&dT-!r&g>T-Dm9UPYdU7LbbAOYvQ<<9ZN?&(ukvPi7p;_mtrm`zG?C4@YjQ(2iML zFmGJH1`xPNT{#W2J{v%Fhge}Beeiy2V|gZMY?%yqF7vSS~-zDw{)pI zUT^yoGm*GM1Whk~kQQy~`EZ{N72GrDD*#}u(U=2(Cz3JwI{-a%JTJdHH`(ScR!NLb zieqaBTF$S`r+KWW4jAWR9z$g5U>6rrz{WO=IG~&%@0PFob_Glz#*@_|8Fnh5%zkm78$c3nZws)r%^?iP_zVJa8M#39xKlYq z9c?Q`z+zFCzQ02VLD>O-sFut%>Qbjp4ca-v*ziQSCf-1d3c1<9_W9q#qHC~hF8SD+ff|&&n-znaBmfDR0x&~MQ zzLW%k>n=1r7yBvoQE-n4veSoux%s6O|1vX?SKeYb=zse7<8*^lbWB|qw*R=YlK$$g zw{V$aQC>)m`1SSq<1U>sUd|F`UuG>O*UHLa`pduk7;7w!lZHnWu3A>O39z%%NPqjc z97m6ZgZI*e^zCo&q|28D*`Ho>pYgS@aFl-k_aCMYK3L+l%!I+kSD2);%4Zus$x7&N zoTPb$)XQ1BFj--z{2gS`?|#Y=<+W1!#e6ya?>A~`3!uyH-(v=Dvp||-LGYwB%P!u# z`@{79EQ?F>bxq_GzQ2U2@JU6#(%)=@((?M@Fn#-A#L?YZ%<`5t*k%8OUBgGbf7RRh zvWfT(t}9yHC>-3o(5*z5jR7D}$PD;p$l|-lLR~8Xt(&^6ShWyTQFD9?YWS{fwHJm% zSM*oO3zt9?s*f4}+q+nsE(1isZDkkMC_u5h5A&*swGe1k+;x@~DQg>d9o!^BF~X{= zG+x6E>n5PX+bC+QxT9Gv0#HSa=Obng=86JeS*~ZPnuxt2TsH0S~C~d1VBR0?6Wx3sGMFz_VPiEK@9&QPwQS zI-QlIrON}A0CSJeexmH2bweEzWEARC*|AMwg)zx205d)p3-f5$*+rcw-nN`hQEx9# z8W6FQ_!{pjNPLd6sk8S)^PyYBa8eE_U-+#7STTcDh4G5@DC4G)k zW7Uh(&}uS_Mm_L5~foJL{?4 z`ZV2o?;Y*~;FTcMHLG`izE~>y&iA4}^OKpz@}vA9fKbIsMvJ5VfOUW%Xq2hN2b`%+ z!q}6C8dyZkKVWn0g>V01TEBE9trJVC!rHXTb0zv>4EW(md0O2# zUTjN9&y4&XH$>g5EK)E@-s(5OD(Cx%!9zXBw&;#bmf`{4qppk#-6%(t=_!e5e!LjZ zWTsrHsaT(tqP=JRkgpZAEO^MPXI(p)gFMH5v@Kkvr9tqxd?c7k-(tNrC;@oQJV=*k z)~Po!VcB4Baw=nIfQ2hoQls8V7!;I7vuVJ|u_dfuo47I#ITXMcPs$N3nzX3PyvVM&q`Gna2C`|GHi_G{zrPo2znTLAV*LC~08ygZ+`^c`+Z_e-q;8>IN-mu*fpTmR509 zEDq(z5*rQ_KDq1sH^-BG5-&>AmG=;Ubz#A|G(VrqbnGjOpb__JwQT%Ytzb(X?6~)i z(%|71yz&Uz5u$+Dc+2&fbP2VxkN&-f738Mi7tdN)%!7n85Kt|1BCdRy^Jgqk|8TWN zUTA3@ZnDb1Nqx?5&EsEA-&^q9<2&pck(nBLkg?FIs}* zJpqz+(mU_$ruW}pK|W^rdhr~d?@kqyZ_=|aeq<`~p7_`VOK`tSLj92!cQA$5vt)_nhDBJ@iM6`dCq#NSfDaKu1JhX6q&=nBt z4;H{KFvIEsD+{g3J4{Tvv}~Zf>9*6?49P{radNKoyRKe_i(u2Hhl3(651TtZf*|*h zV+7%?E#cb6Z55Rn7O4gtM%n3)S9!53zls~w;4X^N9@j6Y_WCt;dvB)ZT$6fmZ;T1+ zCqQKg4BCMOnPWwT(eGOK#ICgqK));a_+2+1EstoKT0ktIBN#H*V)0CV5uBJitbgoQ zt3X6Md4|t2k3dg=Jvsv|Riou@1(tV6cCvJ767=Xcifda|HY zq8@E68^yMOuF93g$PHb)qLY^LVJg9NsOr69Nl zJRkeb(8$m5YLaTC@en)9(B($DK~r$LAhjw;!cPw4J<- z@isla;&$V3RMU0j0? zkj0c9v<~2ll9d@b<$2rR1-q1+^5h{_kXm?2){CY>E)@3{yjrx&>-M*)C_TQsnm?J^ zGSUG%xPo70;z%@`?{O;PDlU(hxUInmO)XAJ0A7vs*MI$1STg*{pFB+0uXocgeleT= z=5KCsf0JjYdhgX#_|@g=0Q2~8HIo-<2G369>#M8lOUOA3B4^DDCI++t>nj9^cR)@L z)C=a2ZwuN!L%C1L+)2E70ryJlJ0wW{;U*`q;KKBxeCnjx0QAcTG`Q=A$u%y$rsv8V zVqO8*`a|N?6=0WnNu^a#{@>f$N`HOrTDoh@rspQSIlj77C-%-TKbE}= zur>=#JjGIz1*WAPEH3CgE{3WXpG1*?>mRw#3UpQc|bu65e$9H5Jy zcNGe}_08*{{476_4f*ml^Gm*-b1!m5Sz-Kf7x!p&H8ldcJ!GDbk0U|Ei!o`BwnmkMkEa_5%959aOLOpoa>$aaTqJ#-?T1@8G_Ig~$*9RhOzhE~MSGg>}pw zKvIKk)Bo~Kk>PZqW=sERT_WIVKy~9hnO_?)_RjuYOu1Im+A>y4fL@`<(AJ2L!x}|P z#WDP>0;U3Q+?fuvw3BD&JS`IUBkFn3-T_Dee4{Lt;#u6mJjGMDo%Qt%tUa*SfjOFh z0=*+P{=+3_xudJ&-%~ zI4-RcUv&xklZiZ|#aCE~(O+R{#C^}K3qL`Z5%dQCP_DHe6c~ySY5;)(Uzps) zm@nhfs{0{&I{XBf0q8}*2>@ziY(YQfra4?4-@4oiu(|^nKf;PfKu;^z$4S{CIw1{x zrw;T#wzpV!0(@+KmfCx4*c@)5!)^ff07?PQ4w%=9K<9Fi?tZIlmm+|DlZ`TIXnZa~ z5bqbnHTIT#UBfb{MtnrOb-?dccw12H7^tKcf()5EZJ%I??%-HglCN|MfFA(1Rs&#$ zMh0mYI1GN(<#_BhgOum=t_cv)Zu0WF-cd0Ykt5Q^Ii=;enO~wE$T`~7g{->kcUYsjH`MHm(BmIgw zO>A%-w0h~N24W1rD4pXUH?{$QKKm@MQ3PC#*`G391d_D6| z%oF^Q`HXtz|6Iv5n||mE0KBG)%b!)|4-fH`?lc1Q+Ss_9Zr;2d;Fp3&Hz3`140Qj= zC#&hlKkg9^?8QNZ&)4LOy+S{b=!E>?xeE^lV@Kt6P8mTLsL2z-h3hJ_zM!@?l>->6n6*Bw(~!lOoHx?Z#%#!j3%B=-8|~jqmk!gv`d6QYo17$Ux9jO2 z{^4>2FZZ;*Vj*$H<7ZxyTeRf5&SZN!Nv9y=Q{Rb;S&=J>m}0gLw7Vf*%CIsKfIyFMmPSQ}S^ z7ZjsUuhR?OfrneVdNfRL;?C$A^(;Er2Cdq3Cphc(7p%fp_VGjFt$5^mENoF0WDhBb z(^-5H94{H$MT;ALAF-fR`JSl*cOiTNEaIoIM&c2$54zQE~)LOo|oaS4ob;O_9c+g1)n*s<+JX=c-?_lN8XScqDf{%rMV#jxkq9|CQ zl}-e{SM78mP1#M908f1MbFuJ_hWH2B;}PU68>wQhho|>xy%a7vC|qOQF?GjL88Gga z<1dr}?l<_iN|HOs72PLLMz!sLttd^axJuRaR+1F`GtR( zA&ToK--|U_Sd&qnNoC(^Ew9heCFg@=pW66S> zm_U(x=w+_uR!{~E6yRU7atHFf@K+xeVZez`~Sh=e94k!fh#7TDoSoT*5 zK&QKB5Mva5QAD!$o*9%i;!RLKLBg>VGfv$cJhoQ?d`p6F3G0kTTIFB`<1v}!0hKJ{ zAsIZM!#p`iKn0h6CcwKLme*Ex_EY9O+F?AU*qp@uZE@*i)+oQAeSlNLc>;k0LUga; z*89&>hZtnT5hvoY&Z?vAF3#Z!jDjugLs6qY9XA#7lqc=$Mz@!)j&ZreocAEcAC@2k z+=h)7v^!4o+nv<9#32z_#TnRJs~#wq9>0N?{M!301`l&lP`^HR4vb?ElyLUYiI-V z0syl?Jg$QtH0=x0z*kslR&b3}ZpbAbG^FdCDF7#Vo$3SU5`24*-v8wqACA z{8EfJiR_^s5gduq>h$3A^2+rteGkp_=JRK|v5&p2Gh0MiY)Dc>_DtGMC{ zOv~G0SqeB8uoi$;l}Xft461aP2Ua1NsT_5bf0aqfMTFZJ@COKvxL_PyKzR+u=MucN zOY9{(AmW=H0je(nq^+&t`GsXu53ie%uJ9B=%Nh6Zc<93ddIFLmh>&5% zqjWB?801R4ahggW z4%r+r>Do`pQTK?INWmRIb6o@FwK4Kh+ChHqI+ZCO5=%gLmSIEgPXt4lAhJ6VV9@tH{l z`Shg#Ub)&#s+i3k9buB&HK_9p)(>~mjT?7hGKGhOW0rfq#qakZ&{i;9e)gNc!7#5e z8AOeJd2INFC$EW6t^qI=DE+fG(XriQfzUf}LI{cUtXUl2k9f$MljJ#$A9?4`{~dA>@GCSE@is>YC)tT^S6I{lQDAE(`R2(F{m>s zT}MbYct0x{1ER%mPiW>@k9{H6oT%J=6?_r+I$kh*A*DaL5XYe}E%2U}yiY5=M~}2{ zb!SZrK6gZ)_9W7f?3I@X@U;u1Hm|zhd3xHZq zw0TxTrf{vHpA#w8Px0vjenVh zK)J8rmZ&@V&O;Pps6SEebdbYa?6kkGZFvRcd1{u!Iun2()^VC}g^TP-iYre$2R9;zJs^G-eSR7CCvRBA1)H>$oA^B(()gy@Z@XkS+nQx~zhjS=A zxGbV72@;LR#Hu0A(qOP1om!#anytlHh+0+`Ij%Q`;7;DXb-}C^8A>zj1ygpL@ble$dbSA34zd!K`da8*3m3yUr1p#$8m>VOr-9W@kpoREnYdW36Qk33u}%oBTOaZx}B<+5!Y zCIe@4njOa=4>(Lq_ZfL_7I#O3c4yL9>7wFOZTZ|s>8&D+HU8YaG!Ni48a%*u57RAT zZS}QcL%5BHm+)M{5)Nh1_&+<_oczZi7yy1*cGb^V=cpSB&_qyb#~SoF^yRg3`^P^_yJ))KhcVGze$;xj-NxYF(J?E*l|rFkrB$^dXhpM9H*R`>T6aJVb-^-WQ(kgMncDL4v>m=A>=phb+ zfIqFPc?Gx;^GkLyUtnCldFw6ej=Lw8Xu7cq=w|t9$*ODHyan()8#h1GH+LDzm|e4bNi8$}0A z*$^z&tiGE9K;c2%FCzve@*sX9FV|Cs3uv&$E;r#D`fSeFm)sC>^`x?&^Q05`o%{o3 zRUApW*faVW)W`x)O{Pq1;9gP=@=iIFb+7>tbo)`-Vcaye?!;h28s>cR*vUOPp&|2C z?o-<9*?*c zKa2r<_wE`NE}9}gC~p30zk_(z^=omvQTX&XG&!)HY+P_Wb7Ig`)Civ=Q7i(I*b-P; z4R9}fRx7B}MGBZzHK}kAAq37#4yz$?yE@yPeBy{d|JL)eIB{JtyuK6de zgjQBM;U0IUd^?fD=f5~%x$6Z{Xp1#InfmnC9uvR|R{dnAFX5ZK)WI_C;lqc-$+{Mx zm#2Do{dGuI!$UR%u}qLL$a|W^-ly{Lt6wdpAN}aJ0_eJa{RmKVlRn*HtgujU4wJ(R;0yiqv_}sQ{o}0&?)Mm6X1joB_17W;v9J z#eG&3xWPKGPV#ib*sZp+d@TN{+%{m zJ>ull8(Zn>Tkob)3;qMl=yIyw4snj0r&=gftrnI~ymyE3i({PUd`ij3ph%D zE@M4%9@u!O;Gg@5Gb z_@tdf6nSZ-sL@JB*Q7Q;7qd>mjL{%0ScYhcZXb=8nE)E9!vkg`nt#BOgc?YCR$($yPjdGQ_Au7k9@zY8Dkq>ex) zn+Jj}d0nLyE$+D6JHVw2>B>8AbIKh-)1?*km7nb*`_j|CFJHSFV@Y7~5O?4Xz>dFr z;%)}LG7oFPJ3y`qREKLi2RAsjJ-lHFci6SnRcOTtr_`edSlZss7Fd?)7*{rh+=Uip z07F6Us1Rl8qG%j1supr&bBoYKbpaAxMP;YVPeoVfp(T(beQ-6VT;+TZ_rFnW!k9Im z*$nJ4PP&YBMN$zZ<8kPd!3@nn(Qim(Qo%?g;=yz4LUMH;WuAMY0W!tywJ6 zO2lT}8Nw47p&mfb84q@$zif;7Id{dF492BT6cPO3@3}g>PsUb}fAKC~7vP+wd!OZc zI#TG3+UzLxhm>v5fWG67@0ms(F>5Q5g%v zqW_#%rLBh(7&@oIPKe_u>dO5Bm^MIpQ|DR1+AjUyL7v!7r%U_PHk!0nlqYz{fK2 zss4LSEB#{Ca!@`~QqV9dRIq$eus>H)2znHi?DCvhM7cb#Wmmn$q;5V>Vn52%!bca5 z7b8KH z$5R>=Zk`r+h=tm#S}dKW$FqDNvcNTl*Y-Ax4HSTEy_FZ3_Ei?%nIS!G_T!JSct951 zx#QyXcR79WW5B$>WAe?`cfH;Wd0k^dT4yoQ3wPFE{c1V=r~h=F7$ktIEUdn)J&sW3 zJcaT+D!E5=iQxXIGw0^_b^Ad(n{g6 zU4XGF)&@(yXKj9_9>p){kMx~bkKAdnk|%34_IbIHE<SR&f_(HU^LUEBF$Ypxm#ekU)jNoAlNa@MI+<;IpPP8ZniH0AG`SoyE0I zurX89#;cf1WBUi*B_311N4%~sF|x+GIue^{WeJ|Ir>%YBzx7)Y7DX4k?wI&wop~$_ z>uI-*rP#_KU3nAT0P~!kJD;TvF5X9+JO`<;YH|IC#a(4N9hTMr{H~?)E!-5D;|52Y z@CD!@^38f#zeXKbHhAJti#X~v|meZBAj~x4X`xhbd4(aOyT-(cuIAhG!5yX8`i~M}< z>yU4J6zI^+e!#uKeA2_RVAL`)r3SCCB#3r9K0NT@liQ!r=G}Db)(s93&~;HDD1%|) zsw|i?l?u6bEKez*xR!5&dtu}@a$#qCoBjx?h`SaCBm~#W0DcA62^&D` zLJla~!!?z@QR*L;Mm}yV4H;jtSwufuyoaWokT1~MGCgH?NI&v(mTlD0`B^|Lz4u-wu9NO>M1o^Ih442$$%qapQR70` zT(r#<7BxYmbGY2avWxB#5=a>|LYwn*q0}jC${q}$ba;_O=ANioH(>o!vQpOQ(m3J- zkEqlOlxw{Ow%&h#IlzrK-#kFb=F?06?(Z(ApZw$sg5%i>@Hp{x76A`kc%1=w<=KV_ zhRikVew~0XT^Vm;%B;fhJVNf-sde!u?5{pR=TCon87njv74Y7*YaxILnj zK=16fzozlLTtsd%wQmA?&1o`!4y)CfPp*Q|yI2VtzZi*rBc4{F(bB?MFism&fq>s! zkjKeUt}-Do-_%`Lw7Mu|uH<@|-E&uyE+{2A-{OOxW4Qald4PqdqQ+bz!~oX~u0BRl z&mjkpeYoXxh%Z?ML|B|%W~Y3N^(w)hi6z(Xw76I02}MrOHwv(5>TDILm;vY+D7eI- zR8wq^{Iw|Rs`T-ki{xTwJ?hDWDt~pxS=B{2O1%qgo9l^i2EH|*b6voTmT5htv6Xa* zQ^nX!t8mI>-`IIRbXKxX(ucPyRv`wrR!PjSDP`k~XQio%;fQsfed9l246VM72n>Xg z1eP0EQ31v%Bdmi#thK=Kb0Ke|Z^*}!2&2S_vT2dhO`3(vpG12~a&2UjjQyo`mKI!w zypgWcG|*~CC6WSyu0zTQ_aW8|Dpb<22?*0g5!&D0j6TJ440oOEVnwUsxv*BEyd(Go zd@Z$5Vx{GT10gNJ`Kfoii%TMklj|q@d1hvbcfwJ09}#%I9hPuoT(!Eor}Fy(RxRb) z3^9HvkCXE1)TK;FfDUv$+}KEa<9j^!2t|B7;?(IbXfJFL0D5DSVDc!gf}Xlg^@zhX z*Sdt;-z{PlEummiA%H3eK#(-(E%=*()I2aCbi}r!F_R<|I^Zyxtw;1HmOcxMS}7b6 zYwSA8+zN{ND($`$K*}s0T!IJ<0E;2C85$SJeE8#g88Ad(*@Gio*TT&p{j+J|)|J$_ z`x#>b@QUk#0d)e{r@1QmvSBbb-Wm0VkVpaEbqjJlz;gg88=M9x9T+kw*M6ekDPurg z;^b343!>Bj7sK^deG7o*Ar?DFZ2&CBm*A(4DFfnAjXVVs3mfBunP&x_8Ip(cLKi4c z3v9IJY3B?Zfsd#+?$3ZI1_-B>$O_7J3;=i-b4hJCybtIDuACSLk|z>8O9w$q>W$N= zZ8daA*>TBJ(LdBhKB)rC2bdVa|LLm`YTP3eVm~FV2gHoqd~lfzgP_1@7UyvK=b zq#B2^)d2o0bMt9#c9FKpbBDMicBtQWmQPsfNyFLMC1eymjfGQNVgf$2kX?(oDz0!! z<8J*hUBcB6#wA`Jb;Z544IumhRyQry84Da1AWt^{gb#_g=SixWJ9WndxB>jX3{bYh zDW%)UncMfN$06=xha4b>Tsy+`RRH$p8#jYa9E}AqsEeO;HQ65knhXU23_6P|{N;iH z#`pKz0jLQg9%{J?e~zKQ3R$tSP{=fh2k*c{@D{mIKYEU9A)_N#_?~_)QJ(xCf}1-- zK+yf6Qv$#YoEzXJ`&yZzWt>)zVOi?^7fjAC-QC0V(6tABv9h|7YRIZOeKSkkzu9F# zPV(w>3_7J;JbTVKZXfqbv!Widn=LI;{5EwV0ATuj9!t+PEUG#??X-tX>|JGEU_8sW z#!^&9M_%$VeD6QYFh7$th@UJkcs*awzdinJCXP~sG)7a*jp*kdl%R{Zb*WR2S*!<9 zSt!@C?GL3o`PzJj5Hm8?o?5^8T;>Hg*Q|pzvMHZCZ_%-wXzj;(y1ZiIULUe}bYO^dHwD58< zbWV~tU4IS@D(Xb5pnkz*y*D;yB39Sl-Xp9Yb@!de>T88bM&RV)Wkm2!>NCTF`W<#M zE}sQ<@%tLr|Ki@g^nU>??XuwMFo~Kj>Plpd1j`hTf@MDaGC(usfjf(H?6!o7v4ZP0 zOIB%bk7I{DC;^68U!TWKafO37K8n6FVD9(7Um}*sX+A={BiJ82sN(8)3o!Er)?7`% zy*B~CuBMMZ+Q5y`MUuNDENBtlUnJP&j^8X+f#0NyQNUPQEttVwZIvr`F9YlBZv3VZ|L-pCG_c@=&UIr*!%m z1(*EzPvTFmv|l1w2ZMk&Wk6xX=5%7q3pNkBzV)bff%%`#;>EdT&O07*naRN*OVsj@@)RzcNSuBx)( zq`S!xa%`bCOB;!0)ZL0}Ev!g~d@sQVj$i?k2)xc(XaKs`WK#!^eGV=lKgN2WGDj^= zvjzt}s2_1sBKf09HBmSlH4m!FAfWwenSz*F8My|iV3jhaE^Z`@P!MP57gJ*vH$-@C z*g=VJuXAGICxEB>DCC*{_o(v$n+`|ZZ?b3FSTB^vo=jH-gw%?OwxH~_3|j9!v>{GD z1xRB1BB-wN4$G6Mcu>^O(h22!w2L(&CsqysDE2PHE?9VV=2E$J9bj(_z=Xj3P?N6Z zQD`n$DXrj>=_Ftz!Rv1NqUbiU1ghc|c!cE)K9ivY9%2PohmY=m`bpZ*YUjQ0q}65C z9RMU<#%EVv34Y1k60wmyF^)3jL+hamYkbEePLX7(9Bmx|W>i@BMPHzl&fyBW#YvY( zM~8s40AaG4{lJ=j1b8z7Br0L?VU<~!1x2LiEFjmAm}_;cx|ADQfw#*FgZy&Sp&)GD+nDc z(6}HaLg@LMeM+56#Ox{$vE0Hk=?L&>fBzDv3*HJabn|wVQx!QK^Rlsw;CoJ{gVpFS z`T_KO>+V6?yibfY!1F6tzKu26Rl<$T19lE@@!qqhlq0z82vv45KH%Rv^se&KW61#C zjk*U7w0HplFrd3w#ClM~>KXtS?fhi@VR}Fx7-w$3cR(QbPHHf(8wc?KK;i1O)wHri z3`PD`1Q{t$xhWVmpnocW^)p<%wlJ4gdG)c190GdvIVfhlgd9MD+Q*W1afxxt>7MFr zU_gu?U@zh~3Bq>(x3guO@|@r3rv}!8TBmV40I0wSa+Wb10AN6$zZO@{3(z_06pEb0 z$Vqu)y`T)uqgS9&@RhuvY{gOyC>qf#;FWt1O zBwgY+$6=lqx)601dR{^HmA1xdeXpJFEYMH5zDL6;FXn?NH)m)+99M(<^6$z!nZUxH$P$n1n_^v;%Wzr!H+)qEZtaIiRw3->w=3xf|9Lk3vtPOp$%Fo# z_nYabKWL}_@&i^>!P~Go&*vMSr2;4KEeAL$_%<1VCx1Tq&B5ia##A!q*6GC9o+O2m zE%-)7NZfJSyh>zMFYpUgkUWdwmdeQ}WfaC0;LfF00p0eCI5 zxIQZ_AbkGv&Yko-nD%b4bM!F)(|^8nDg6qos@id;XFaJPV_C@L_!j>4SD8@TOz!U! z6n2}%slo7HG!?>aTHw8Cp0DEh+^0-r6?XS3ODI=bWaSB~%}%K?6Q+}6$jjM2UXw+Hqdxzkus`vFk!Q&o_S8Qpf)Hv;Rzm>~u$0Fu;YTV30~k+<^?| zrpYrrKz^}<&V9vP9t%dE7i`lN!Co7nXpM>0rK}PRxWhudhVnEVF9RqT>(6Ctj)fvy zs>o@Xg&vBDrm95W^sPViHl<_yq=z1WP6{q_5C8xa2w^ui#O{Dxd56_)Oig#omHU2I?Z4? zWscId=pP?>oXlvFrasy*K`lXbfw&Wi`-0}=*~D#53$1vRdI)0bu2jKdr6jV&wVb$v z&(Vd)?JR!dCuu-u>l&v^l)<;{JAo)Im)-3jVdWI`=0+6_en3eA0NE|28*J#T;kh9` zvddcjfHiQpLYyuppnj9J11Bra&jB(4dKoutwl$lM7Unqs2sbI_O+hkgq(W6ju6hcj z{1z@)tT8-%K_yBT8?8N5ri=wASkdnr-w3M^#zeu-em2h5n1J#gt}vzz1M=(kRM+5E zz)~RB;2C*NGO27k#LBGO{1=Jeh(Hu=X)Y7gy!vk1-FO@8ApxzK)Z!G#5{EXR{n7Wh z`2au&hK{J40o_#qp?c(}d-4dE$XV85vsi|AIHm6J{@wK8(+^VR;SN?Uw2No^T2~3$ z-P=w5{hy`sC+k?gEvChrx6=69t+ctgkPZM)WZCFJt4hVyJ@Gt-=Gr(JlzWiY4tt67 zk)Ra{rR#3Msy@J33BYNFIvN!JfZwWo4%$Ia>mL>&lpXpn&aZOvTQ7oX7i&K5o+SB`hvE6sa5wm? z1bC!niu^_|#orjNc9@nzJekLppGaKypKZ`mPaX+yoUv2|d>aVb0O+`u*Ijl7>l5$S zB2XaL@wSh9>>+^Z-oXZ^2-azzCxLPb=)yAh0YveemP2FBVRB_Gm^=Z)Jiv>5t`aD` z15g##Z2+&*)=jb&K&B_$?ZESY^OL_%+W@CLm@cyxA0apT`<~ng_)I?-AUwMlQVb<1 z*Gr&8&Zl<1*UI!?Nia?M7eFLGa*q(k(UG7q;|6QkYLoa?xD5-W^{KnS+b+PJ0ml1; z)DZYlACjzEiE5=|y=*7@sZ@+RE#i7udAW(91ypRvaI^I*lI0fb)ag1Dzx!JsIna-B2tddWS_fAd-wRmg zH607k!`NodMY@_QALR#=bvaeNxSnhN`Hzz;J~(L{^8y+6;E_Y*;yf0Sb$G9~&0!pz z;=Nn$an3AjO8Qy0KYlLCm#&#U`7nXw+)vQ^@zwmPC^}j|o>JSO^3uTrKYCczKHAt! zSLWnpWFzZVPhO0T2TVp4WkCb2jtyD7$0kw?Xg-svD_rSl-RkgYg9|vP&<5MM;*};OKsS?!kOz`bgHe-0z zGUG7>XuscJ;+q4^FhC>&zhTTQ7OB*)PEbd`S1O8z>4flMg4)?x#m$YQth)^+B~QCz zQfJgQn{9low|PHgq3i;VUG*-fsF0U2?+Tt$*m(M%6Za)f_W7S5{WSd$6aPgfRD%ef zMP6&+s9NEkC0<%;{SF}6KVyRaU*3H;eF*6FBulSbOw|9wojd9Gct%Syt-}P%{_R_D zg|*m=T6%@zh6Hm_P$*y4`E^b1*v%1VLq)(k>q?_?GhO#$LiU;q9s`jY#P`!rKZQqB zN}N7kUXnLk%vRsq#T1{(%=k{fo@JMgjz5nlEdBIi!MdIWg1x+&el>HFz6{B<&sb$M z81M_FmcXKZ~}x{7+m%wr8Dp1 z;-twXAeqs7YCJvlau@u?gUh%y!OviKYq`QzAaB8TSLj5xxwp^Xd9 z05Cwy>FImTvye+ZW4^_!C{yLu)vwCUkcn5IVQ06Ke*IB9ZSHj9bmN7U3QDY2MH6t% z1uX@2aEsECK{rRAcYbiESs+L&7p((~<0T*$0p0oCa^3YkAw#)8V%9L{s!kU%Shziu z&)9&VZRRcTAV6Z7)_p`?MO%ut`+Yj)m^CQD*4m2uyYEv;`k2DPiXT-7o#<_et<~Ms zHH0TMHcR-lVVzPz!8Ty8ZPNXYfx^M7YCJ!JG%D(QFHPDrX zO-@(dDTgbb?x$l=FLFxtJ?g$VMqa`PfD;2k0a&nBBKA=(f10)w#4YlntS22Zjd`aM z)+1;$si&Z+JQ<2DGzvFRnr)j^kaf44nIXm&bQ%yda|et1U97-V?%i#Yjr;UQ~Ol=cDbu43V2|9hB1rQZhqzn8`z{A23e#T9Vp2+PTJEGghwN>=8vih#%9 z;{kx&K4YYRCmpPRoJya3np*FCJFWii_flzj2~ZbzVu@m1cwV|`#iiCdi<{`>ZCvFZ zZlq@0xKDsj3$v;7b|r1yoC~?EOI7f{VggW}@Csq0PChM2>1L`y{7c+xedRw>k$-&Qk32I{0T-yjYHUo<{_^5w zc%0z>)U8VX_Hh@0ri&YAK5&6?l`9*}A)nZ$xCZzrKqWsYgB&kmamTv61(0PN-4bi} z769x5@=`uB){r=H2{-Kpz`3O*bP>QJxxzJ{0FY$KG9kCa`up9s85G?U2XXp_MooAF zS`+JS2+ib4Wycs7&wYT%`;Xix8>Ll1xogOf9w6KUEU!9P*Nj+C+Le~8E2MI=-oOZe zzUg5>MROPvaEv_4_&)+jKIB!OSw-$VAYb@qj2n$ct9rQ7X$>g2e1vt^!9JF1 z)HwhM$}(uX7j%)eFK4jup?HZAFZo)!$|uTiyHR;z|L2rJnjSQF+$s+Tfb|1iFv-`L zP|jZdmN`rQqYYYnDv#MUr=kgPN?a0JhJ{H`2-={%Zqb zzx8(0;GmawksYySgU*orM6w<^Oo8;_KKS667$@#L|%buwYxF7@|$ZoH{~2mX#~sCndQzz9|6FJr|HO3$q*;(#XJ`{pp-BL^qX& zf{76yP2K?*m$+6<+CvzMp3sJT{g=n8cldLDk1tbyGA~Vt9M>(&{~4$+xN+){*XQFQ z4EXC`i_8(Aad9TS`|bw_pB)-&7k$MN(l%Vtl)B%fGby0yYz>g?bb`q$n29A&Lh>cx^HtBl}TW^wj?PVo2(6r|hV`c{av)8uq_ zVV?W`{Qmv)Z*Yg4XCgJ(2LfMP2(f?r=9|%i)6d3@7yP3DQ~(>W@_FLIU9gy!cAGoO z0$s(V8#@cf6Mrs7!R+q>Vczc|QMXOoY8zcVhTwlnt?YS$1CwuOS#8-LvrIyN0$}hL zkBB3~LysDz^nYGU=^rp{_nEVjt_8+VE0Xnb0pqDY=S?`pyX|kC!Y?XQ@(zDqG+P13WHBy__o8Ah7JWvU zUE$d9IZl7;vPkx&rlFgdsaRC{yyIPPy8yVA!9~6TO{yp*7q|ZB{sFE_y9ZqqHY_`4 zs%e#zi|Z=5^?YP~xH6EZyYk97gJQdTJVMbD@(BfrI-+S(uk7ZhTMe0W&3SUV$7#0< zVU=wZK$Z3)-bvPR1zn_#^C)XbCt_r(Ak&38rzno%pT=ySXeM8)iN@`gJsE7)qk-Z) zgOXmV%!Kl4@OWt;Xj0IZr*m6v?^Bb7dEe*n_3`LG^WZj@oZh9f=v*^&JuvBSZ#m8* z8sMF|;ZXu#1=Vyru^&7kS4W)eMDPm5s{C zOIqRN_6wY4Wjd7IsBq?QE1!cXGnX}2()R9hx_^hbOvr8HFqO(!){r;bn7H{305=v0 z5`H6Hy-dt6TpTMXZJC~^%q2&(~Qy7cuCb){_sl*3SR zp%|qDEYSC?(`>VVRPU&r77ibz#;5P6^6j6qb7DVLiOa;;puSc5PC%n(Cd?;CfG$Hi zc7(gFCzB5!?PE^yL0ZA>bNTzd86$`P}&ZE>q4ojND z4i;l&ER~k7riWN%eAIiGhPXIMJ7YK5=2%-gxTX_%3UVl0dOa*X8IJ?BborzYII@jQ zxU#y4a*KJHAQgSBf-L>mw_uH3P1mno*JG1Du3=f0EuN-}@UuK1pwh*~vbOsujhk4+ z%&j6@%x{v&Ps^V>cRx$_S&s%EKqI6>SUP%gWg*X$8(r$_*w9jOhXCID2gI|YQVnGn z0qlnWqGfn@0 zlbS_;x&fe~)aUng`mr+8N^^^=Sb|}JH|8WrfKQr}txl<%ZkL_TAu5$El=rCv{Xv<(_-HKZ&|2~(dE*0%A-mR+v`y-YhZBTv-UTG~1TR7kzCqK(1aWV6# z;>mTfCsU4WTdc8>8^;Xr_;J_PMY+NMGQ(8zH~StNlmKm-JGg73n_a>^v&NV!p0OH& zk=7;WP<&8cJzMhr6V;&oHQHZu!w)^KBkwKFr8nDU;t}qqdx`jY$OuY5k<4QVP&hcf zXga3ZiQ7N_#rq=J7U;lt+iU*>$gP|$D#~wqDztFJUx4;AZG?8)Q*`N*sScBtK4Vun z&y)@ZX$erX&w}k}5qEy935}K4Bi^xVkZf&C9I&vU)0#HRaPv@(cbTWOunR92bCq~g z>9~HAYkrSDos2av=l;(Rx7puSVM1Vz2@by!RfardYUy&#yOpRT7L- zoxxg75am?oeN~$9s_W?~J=GZ(RwqmHYmG^CX@?1c!8e@J(92w3-l(KEKB=Ymzo%eE z5S^Yyf3jla!K;&b4VSqQfC@BZ?C zQ85fNU1YcxRKY@1g1D%m0yz{{gnjYu0y7jL^02MJArrzC^qaPBSz*EIiF`FC&j(?( z#jYOU#*8-R2 zOH6u`jHc^Y-1nRQ?nu?$?v00g>Gyy66XI>KxN{+Vnz&ySAt={AS!37o&)0`(9oNCz zEf&RDqi&>F?0GEv;``1iB<`kdd3*g7sm1waBsLcVb) z3vgtJmOa23y4DP^!XB<8-;gtFm*}X{9?GzR>Dkpq!BN2jY7Yg^sd7kuDu2NU2oFIp z6>XKS2&6t~OVp2RNU@Nywt7Z4ET5sis^6G@P+tK-tOx)-WFK9UwCdufXp7G)UyJ#~ z_B~E2Gvo!BkSe&GCpM-G7|K2B`+{5CaGVP1DWGRzQqyvC{#ZFczXr-`11pTqfsLhZ zSZ-L~yo(J0KwYeX@1_W1Pa)PT*NtDj;Y8BX;Pgf5aN?%ez4zH3G~Y+5wV$1z@z-Mf5+)3r4Z`Weu(SJVlQhj8XDk+xM`JQqh!lW7f>lwhqmdXX^4Z zxXD#eVB}r?% z7I9#^6)d*ctODHN=HR=0f?qp*^1(hQw|z?csH5cNk6op*-acYY_mH?+AKl5%5q6?E z*G#j-XsY9a)~E?|s(_6(C6o$g^;r+t?I%dYNzmMZJF0YtEjq5<-K1%=oo zSY^zrghfzU{>J6WxK9=4^C~xcZ>P=apP$}It8cuO9?o0>c&lU0xsoouv4Z}z8?mzb zfO@5EHe&V&tcGRL5%sr4U@oi`@2ABZSJD=N)OXsfVK(6vgsgGY1R3XM0eb+;hq_m) z)Y3olP{r{_{f`D)P*)k!PkUI9>5|%O*3u^H;U(694$T?j{KZMI(JBxZI&*%O&1^me zmgg8fE79~Gz|J0JkBP%2z=Jm|PXo;GSEbw}aDTvwcUmHaQIfobENS9+G+r3F;0)xci@e^uu83h%FXbM8-CWM2bkOD zfQ2yu)GNd_M=GX6c)9^?+*E`pxbfmH$Gfz$wLRPw=lg(s_91k=kNn-&S4^H}EFaRZ z0gkfFUj)Ef0u1Q@9ttWpaYwFU;Z+836Hwl#FUtVwBl#L@DZyS(s1)o9S3UTmjwRIA z2G)bPnlAwmwzUEp}vcX|VaWs);KQ8z9mNPzhml1M`WWAk#2VGtN=}QlGV1rqg z?pvsB`-G#pEl&Y(S7^I)E~H?*BNve!uFs`kjKvGq!Llv4gp4X<&1g)uu-*k+ufR)n zfJ{An#+i2J|^Q6FDohpqc`Q0O?1^F9DZMX<*6 ze&_n6c!Xmu-TU-9JALo5`@jMZK^zr3=h-3lqaNFOcCvxfjhw#gyzA5VxOnKN-$zvb zHVc>+Nw(q&ldZ0b_io<&TviIIRd%Uwuw!*Psp!0etKE2VB)2hoh#mQ<+WOBdv*=`rdb3FIk z(t`N)Sj~?{WBB%cd zB%T&!UpOtOg1GwIC@}v$;^6Yw^NwJZBnj{d0LlfuGGlu2=l#kSuWPC40@#k@1u#&#SLN{;+h(s-vEmvz{|RCNI|*ob z2UNg)2t{s}`<7Y&SpcsnHA4iIF^8&H`pgPYK~H0_IsXeF4geY87V;jsPEeB`j)Dl1l&?eHtD7te(J2cN(qmbt4@17t`$z`|0BkZl{e$ZOUL)r1HiC z%a((-#J1$V+1bL4P8t@=59lS`a@ffYj1L?HfQXPS4!hNK2dB zY5vMhT-mS=np;Tk&c2=Gbb3mf4bxYBS5Po##EhHPg-~_5Kw&#@Oi-6&Lq9M$R51{W(mm<`9QNaYr-yNl(Fiv z9}S4Vy|axQ^?H(ILy;b^(d2)ScH?3p!cK|Rs z4-j)}`BIvvEq$ycd&Je++}=t%#=Ttz>=UHr^L5Hw!14=3DeckU+pLS)@V0Tct}U;m zYXB^Rj~;;Ez#jzjAE?8^FS1PqKCNIu<@${t2?L2Sv>)sC2%W36$M|nYJTr~}`Gz}b zq}{{q;F+r&1xi;2up(cM>EGS%e%k%yD6K4GX$#_&AuQ-&cb&1k^s}0KKb*BTOo+nYHkqB^mUJAZk1-R z&NATB+sV)a{dA8I0B}WsUAak6nhNO_s^un?cygh~zZRuWG>$SCC84uBb_@ zEus&pgX;|eFP>3$MqVe0e%z8LeXu^K>!fw2Ck%f@0I%G<9ziAV-@h91k(LoKYikb) zh)B@M7>rLM!I%G=K_Tm$`WM3eD^TB8O)>dcmp$D)1%S%<-ZU3EEu-PiXfYh7YVrTG z_hvnkWyyiw@jZNrJt8BQ%38asx>?=LZnC)!IUJHRB1B*y34#DU2zn3%{Rh42ML$9B zdKVyRvEYM!J@+hj z?AS50V`hh2F#+ao<(Ka6T)tLK|MbsVY4IzQ^jEl29FqTp{N*_x^P7pUMV=sU&wstnvh_RZL-yKLZ9H8vrq{0#@c!Rj zWZuC*Js6K`2s7hJaHpjzgzFuwzy8bj-%tHF-%LMQUWTAQe!nGv`Qmk2Julk5GCOZ? z{p@yn^P^7eXza0LcAG`ID@Q{T0$wSExm&o5sr>}EkUEPBo78Ea70BSruPv^oZ{3@w zU%fj-@nx`O6b6`F|KFEkb|_$#MdX+F$GD7Si^~NTu!}5C@59jLDeB_S3VHpwu-O1m z8@E+Xs7X^zO^G0M!P8|+r25I9}y(|?}*{k`A4FZ z5YAdF{VG;;UpoX}XJdUj6u(%}zN?kEi}TP7=rL z#~Uc`Ud5{Rhn#wPUc7yrdcF*Kbr}i^D;g_vCoPdidM&e+m*VYu`eO~&MMR`&7jz}$95RYMulEc zrkURpXsC)knP?Yp+iaST_cAN$_$$h%Zh3pJ73zd*#2=w;Qrg`8E-OnO0Tqr?X3Y7l zjnY^ZC%LPmZXR*kAnntA$en#@s0@F&ZCclC+*~6S;6U{=5_#d9Y~SM~X_W8yEiC<* zqLjGiiaxkD*nfkP>vkos7`rI~cNb9nu)kkLTV1T6bfwK&=Fy_)FK;4LXpf@MO+Q#8 zQ~(YrE2|IXE8}7qipARmIY-b=xP}?#$xR_$SmMwHlq^&dG$I?@Yb>c5jROi zoYp2PIOrh?uQmcX7ECClnoTynP{1q>mZ`H3?65+ieMcz399I>KJ=_itiyfYEf-%-_ zEv%ACD2ISFVIyIt>nw!nL}MMXZXy0%0e% zvG&wWYm7Bi3k#>(F19Wx2K&qfPcLoYK3Km^UvcdofJdXXrPSYSrQUiYUEWwvZxDKA z@`Lx&jh|ji6I>??uU^99kjQLw(LU+Ex&R87iPzO2>URZYS&IW4q-Pzh$BI}Wjw&dN z8AIVC{S!aCtOvqLYZ%Fpp$zD5NC4g+9<*ZvqIK9$7s0#6jcu&udRXn~w#zuv=iVmR z0c}!=qEf1t+oY)Edk8{YRgqZ?P88uSk43PqZYB zZ#zC4@2p&gnl_Aa4W3WVrI>HhPx=@xaI-RPn-;yt&6|7CcTD;F^S?o#v<#iNgf-vh z8u87bK<9^#b*DYbF`)CoRzB;rTxO0g) z(Vr0ekAo0y&bwLKZ$8rJNDjC#;78Wm>|&M;ygI&{%`L`FkUK^vK$CA^?y6j6OpNU% zKFV*kkP)<$a?IzG;}<7IM_aP-iRaN4^Jnibf*u_Xput#?d%%!-V$7haD#}${3xiPJ z@vbz|szjSUP5}>x?WAcyPeWyENcD~ORskkx*v3VQQ`R6DQkl}(|rQa8vx%VlUZ*O5VPj# zAWTZi8WK4E+DBK?Aq-TO&Qty@>*w$#TlP}gNxzAO*Rx0}tu8|JFT=3>1oy-Y5)KjU z6$`GXGPWRAXC>eN2D@JW6-uxF8Ku{GPJKM9tjE3YNQ8MV1XU86E88AKq4{z>ankgl zVsAdrS=Zub6~^qF*ALS#-8)FlBjSIMxq-_AI-%p`!d=@?h_Fz3l=MaLYhkC8>IdEQ zs~`7(GXetlw}@x)O9upt#3jwL-SxYIbpUa6*-bPcIb*{oW3IEsWfKdS`DAad^3K1w zJ>pOT78f6K(`lf>?AK6OT_aXSkJTUa{>GOw3PS9$&xYm8S|~hfz{YH zs9Y!t^7o~O5fq=N!p7bDu%gN+Z78l}@+ZKoi=eK8qmMA>Mv?20Z5z-JU9l|PmIXZL zV4Bx8|DD0<1n%?CPQA|iJp0a-3YmhydWD@<8}iY#g8wv$zA`o-jBaX^Vm~$4#%T!` z)d_A*JKHEldYyoa)+=^7Qvgwv@S<<{f-(NU1_?X@`dhHiP;S1Bc( zqZB<@K`;Uj`qYaZ7WI>vvQ<k0^h9&k0pQn7!y%5U4j;uOL- zbAs26%Ty$;mhOp!Z0B(+-MNJ;A#tw;%mY!%a;o5aYta*y%OU6*1I+6>b7YBwdzwqF zux8pj*vCb1Cve;`b6pOQsbu4Zg0yqYo5$`s{4{0o_C8in^r_4#eT_T&X?lodT5UHh zb3|h;D=gyJqAcF_5ATAH6PO6Yc{uMuo@l2$*${=<-nEa@=;M`i^zy6e5LeBk!+UH> z5)dDnSzxnZbg+wZjhJ7d$goVtocg9%lW5sfWW%O;ZJ1syZKS&wu5!{Eg18Dx8Z<%S zr2t)qwy2=>(>}ry3?30cg)pmPlnEEt#;WUL)#ZtQ26pf9eprI4Tp)0IEv%)Civ{dC z9nKhc84pIAVlGMJytbVd3!PwoE0jC$1QOGn1dax70~ZFtf1bd&P{q;{E3+|Ue}r|D zVSeMOv_F1867> z*H}iJ%~F(!1VL|h_c%4RM`!@%2RKy2jkAu6+7fk9LAnhuzlT*@&I3D+S{e#gX_9oo zYb~sXF*m4*`KI!-h$WlKu2^r#b7t}xWjTg)jF_B`uPRH^Y>@d8%2+F#OV*z$LuiX{ z&{w}hi9&y!3VE8R@%eg`)Ay)_HmG3Vz%}xv#wza0i~;KuiyLzTMVHoHZkF(yNefJE zi#5&Km{;^HzUh}rzCQ5Ra*$OE5Mc$2+u~)~cn_Pzo1WS>pJZpd;cKo2N`! zizv+4%lEhsh$HJBh#BD#c-@0A@)YYQza8U%GG%0Pn020=^8UrTd1|&OEz10oDZKLD zFv*Bzq~dEl{t6SYk}h4k3E!jO?ZnJFnCE`(-w^IHt73)tRL`nyb9nOHyZ5*a0FTql zFCU~o`lFB1>Z-ekoItX@kpBF8>uHBY1R8jfxYm{No+0@H3vG`(B?H`(842@{O zj9mSj_c*Iis%C2{?JaS%Hj>&Hqv#(7*-@VO8guLq zJ|4sgfT8T7T-%1HLjcu0DkZc4v!3%L^9c*vL>SoEnH(TFST46;{6sq!85=F;LfCoH zS}YK4BMPUlb5iF&tYD`BLS!@hi)pNQp*5nP9A}@TX7T)j1|XDYInN6}sQ$U7mck`M zr!kJ4H!16XeK1(Tc=l5v-jbVdfuq=N`~0~Ah+y8qFcBiuK*9eF4SE|;~p zgbNm-64{x27U7X^g=%owQI|Xcfk(!w43YhE`r(tP2plT|PjL zaAAYV&%{-fVhE*LwBT_3S(NKS&0fTce!(j+K%{S#biIM$P9TV^F-h0ZkWf!eLw!Q9q}6V zghGPf?yP6|$ToZ3Z)}F3!TAlkf*|Yri}=@XUE-u@VzE?etyE)k=I&1jUc`uHy>ygF z{An7u1-E4_B^1o23}R8m3B~rA{~<7=G%N1_zXM_v4YyP2>ULUOCN>vV1|wXvDy1ci zwNOf6Wm80%prT}229FR@N5C*2gE8gw!6{>iHCgwLh(mOQ>z>vn5vae6puhMY^~--3 z)ArqB`q_JTu;5?}nL`D%OH%|&-S2caG%$CZe#zqFn7B!W%z*sm&qNepsz&TH{tDv-~+gG?(V1Vt!t^j zvy0VT-Xf~vpUTPsa>`)ZCZPQ=U2*-zh3^nIr@oVrd5|eev9@B<%eu<+rGrKL5G6`w zD9>b@Xc+>0DCU^YDleoLMdrznu^;O`NIl|08P}~ImU&%vh-$Fvz_^?%9O|NdD^eN!J*gQaSg=GzG6y_?a)(PhCu^RipM|DJ5KCMH% ztnI_pzrL4RtKa~Nt2UjlP;ak~;)vk-McR%La(u=FMb~<#dz%3CxRe%9hLxdHi?}}S zGyV5D)SyFvr#kg*;_AA&x}4U@lRVsjb`(&;tr&D0{5t+*h*Y0$-$s$Uio%iiR5nWCSVh2T z3SkUqg+q5y&VZ{8T}&Cz3OFf^>oe|B0EQzdLRLGk$;3rz&L@k4cGEuCL&l3XGaeDs zi7I?2a)bE!TEeXd5^WR3C`Y`N?mw6wq=}a0J!v(s1x}V3`!%fe)`0T_86ny=JH5Eu z#myNwX+h~s6O{Aq5juIY4NsoHYw&xh+co_!0$V3|kG5&^*n~b^#vSi{0JmrSwODH8 z!^td7wQd^cnkL*&=*1~5^0}WGUbx2dGe$<3NT@>%MjLwc+x1nR){R`}wRo6!TN%(z z8k(6Aq9{14(=sEGy-0=%4DkYk?aH3XM8FcWfdM)=SV|v$_?2*T+}gUy=@%X5PcDFGJ$+V6rYD9H z&4}Ghh}u&pkZ1xEFgn7g{;3OlUIj2=A+&WOK-O?!gkK@JCd@TfU(;P2nDdSaH$e9K zyS4Od--W?qQKb84C-OVtHKh9*iiOGbLi%4eo9Pownh*DvQIN85W03^}&n!b+84MO+ zh8oO&b~|QFcDz9O+c($K(S9Q=y_&2dAOG;RbobgOk005xGwXY{4`4)S#|AEqc%wg) zqV1Kt(R(m>(prC4AWoj(_+P*OetH9Q?|ir(Rbc~SW-PF8udk>7cWaCGoiClXaQUW! zMZQaYT1mF)je~yrZ{PbcEyEz})K}6+i?5{{9FyzGgrBgJrQ%CMZZiMd1nB?IKl&*B zbY&&|udloUGXPP34GNj^FZ-h;tq3q^=>=8hKu5^JJst5 z1gtc5tie`^>0m2d!kg`GYTm;Qfq<{G=4B1!#|Fx-|MdEh1%&>wTRZ>UzIg9n-YBL& zx#&rZC4P_6H)xj(IIo-}+eChq51x|u_b_ck7mbq~vBRb7l<|Tb2a`CkfmPWXZ5B&1 z=Znewi}u|B=_=A6=W7cdr;z9!&WB^!5 z^m9qh6JdGZJpTe)Qpdd}-0YlqFuz5ZVjOECFgbi>xFCe35zl0B^ZBbgfw&ri2Lj3n z3n+u-E-rA&7jsa-G^|iyz(r&&u@rl<9}1Kx&7cK-@}2YfZhPN6(q@$n1`3yf713fB z8^7%KxJle5T4B&*%2$*vRJ%b>VujBM3*&51_Ml4iL%}B6CZi1wO%$3~2SQ;)4Z-wN znu2=`_XVv|7C2RHxNnRmtRfV|DNP~4a^}|%+kL_?E6yOGb81%si?k3zlr$7FY!B@- zKgC3{eKG5S-|=mg1=3TImH#8;(?Of{1Vz9wJ}N4J3+)J3N8!QCWR0?&AL6#<1&;AM zAGfU5?62)GkCmLX-(S4XgWPS+rg|clF(Z3gE~tp&V?M#{pD}Hf-5Jr3(rib_^0Pqp z&bL19a0KPXTBVHNRij}5@$57~N7yu?jQSBm`^V(jWrI=S7#L0q4aOQAphB$}{02Bc zj`C419NgPUH-GrUv_jd-1Q+kGzm>*^Sk811KyEX zG21_j1Q>l?fVHfTc=Yiq5FCMY1p?HE%fO3F~_RtV`*^;(8+&H*0^#TtRrTUcuA zy0^Fiue-{IEwC>j3!wr;$wBO{opxIP^j0bz9;7bw#u!p2aU^%i4Mhi*V|^_1iYSxT z@7^Pp)nd9{Yh-w$!VSdqbiyoNm*YZvX$QkWVYCiGyn^*s%ollRnmMMekRC4lBQ0Pa zGXFhY{muj>6T25%2i3Ip>2})x;lE86);H6oZ~ivCJC;_gQ*9ODOY6Y2o|czz;iI3r zAii;MJ5_P@yLItOx?X+>{DZcj5UbNQ;p-4gRA{nTfI@0>`3-%dyk!nbcpJsnYppf- zqiMR1tKgV}HLA?@F6}NMm>8e*lFBc5$pRY$w^zYA!ceSo3SDup0}bJ1PfmASAeL8E z1ugFt8jGp6O^j*ev?=_+!roC@ML9M?;dBf4w4peO<(c$$u-ArPhk9^$5cXxUHeLc& z7l6+M1?4{Na`Rr~a%?=+Z~btHXFxCATXN%>N6dwWmbxhRbbXBJ49v+6sIjJ@4J&LA zHR%s(E-VW>*=dX5v6&5TPy%MV;N3pMQ$#s0PFaBXYTP>yE)Y*>G<1lk~}?cnF`=m_Ur~6uJ}$toiDOC{{+mx?LWr4; zGqfpO9Op7PuCbo;w?tXvBV3;wy6I}!3++X$#^5Su#v3TS4mgeD9zpFfHLbd1DL^3nG5Z64 zv;GRK992GF5Lio@pe1E}N}jS#QEtkDBrbeelW;#eBB&^*mAJhNzT@M|X(^nNGfzD1 z>&k8=ee>^|aS~&u-TsVT3v6DjL8xEfFQm=Ajr6bkZ@_G1Cmf#VbWd@L>5~=YWuCa- zKdPlq-@BUr^8fm6PQSUGe)6|p!)*eM-2=oRf8~q*UWK{&PWlv5Bx!&8G=i|~!YFq% zK?X3IJ1@gL{n7R7=}nl!2%LJ3YJ}z*4EC>}^!m%q&Cv9F?@`G>%QBYxD54~661cUs zHLL0Tg|B*EX5OYEJAzeq z^#<%VE)au3SI|{Xx%y2kxjwmd0JC%uyM~_VBifaXv+(^7(&42w$i@A&8~f-Tl)3U0 zJ_%IKE8>UGzlCYO@l(FLKIIxgfV^j^KP^W^jXN;zrq0M@l<0yJi_WZkmsUFrcM1Ri zKmbWZK~&@8H1;^=T81Jx*u3)W+xO{Ox{hV#EtFm^=%1B@-zzB4{+N^dROnfTeDM~F zHDsOieO%W(c~TkdMNPI%Yqtg9{=zmBgt2XHn8YK;?h9_HIJw4dHt2CCQ+wX=oyXJj zzVj^e-9IazWy$9&t4*vP6-!l^M_q+Xx&T%Y;-r(ttO%JvrWV@iJkFWMpqW8D5M*F7 z+w5jHPELA!N(DkDH^ z1;i7c(Pn9<{1$>*9CMD~s5^x*T5O-~&9Y|S@guv#U+x8x2{(&DxPYmTvS%4BLq&lK z9EBC#Kq9V+LfGsd5Cn#-306U8yzf{H*+|er#+_^xlbIzE!U*(L#@u_p0{NqaM)TI#XZL!YmgC^Rv2|egN%EH=MOww zCUE6%7_+N@KwgD!tMojmz$dhUe)VXJ@=U-Ys6h6XL!?6T#z&u|T@+90?Qf-aZ3`IG zQ9kJ2h4O(H#VxJ&Oh7e+a8H!e3Z;u+HbjzZHaFq>Gn*^(i$^^gJsrNC_BnMB{cgH* zgJAbPMgr@@=(P>tzY4HERydi(2>8K2$F#s45e{9IX^uJLCSPI-E5yJl-5+t&T&Uwt zi#$R*LjemeXi3>cQDJ-hOlj-1O?!cBc|m_kn1%q=_6t!;usBqxX zaZ&*yii(xiWgI-Moyx?;s&GPNg|f<8|Ip_}lvj&)x6`!CX^pha5C-bBeLtp%Rpez0 zXV$VIz^dHaOZ81uikz-EgrI2srt(TlwkgWGP=tWkT9c|!ZW@e@5ji5zyJ{2T^er}* z!wSqm_QF=H9%H|CQ7+vhp4WuKrW&-T4qvgwr*_aI2H0K3`T%A?nZJiv>{Zgz#S7_% z^Q63#M#RDzBlNeJ!^>Eyt$cbXm5vU8_d+_lxS8(JsV;oX6b8bBF`VPh0NooW;|9DS z0sDd11#smGirLEq+OMMg+CJLjfFTu=oYco*3=v0;c^Yof%X7K*Go)^uY6 z{txgTV3bf~(TDeMq!oBhpN%Mc(pbVpl^B9dlG2@>7{f!9W(FyDtTNg&NeBCTsSKP~ zx>^^4m%vvmcNJG%t_RT0GKv?I3WZ*WSY22zRvOS#w6VGkJHI1#7n`V_K&iD?nR&r5 zF-G7UF!sbfY1M!McN~KT(kf~Gg!V|6{FY|MtmHZx?H`dOQczu#2E>759bc#JdY27a z#$M$REFXtjOxPS}{9!CrwSt*U65q>dCw0zBgH&Wy%zn>*InVr@o_x_Z*=qkiobt6O zf0D}yfe-Y@*L`!$d~aj1DHO$F|9O(IF1FI=9v&B&C2Uq-R-V_U&!)~s`!0F~LQnpCy5IJS?+&&+v;0uRCL?N}sYG@nVExXrglHJksPd?8dVy%0Bc^ z67B5N(&i=$0E=A*wR@LOOu$)1;iAY1j2CXg1VJ~?on}K>hnT#AdHaBPZ-bbOmO$m_ zN@wMR_;B3zM?c0|=z}XPK$sn$yUcS{{KT2;_9fgIe=B`-LS&w}w38PtS5s7j(=S2< ze;uWe?u_SEcI724K)_xi)`MKdh)MLvH%IAJlr|44s6-7<0ID9s>}@Qi z3e1XJtBc4=XOKFalvCR!{>9vgm0)x?e)wrx`GEG|E^tgT7zl~Di_lzln~)_3isvec zzPUe27f~LqV9k>+EP1y1Z>N=J%gc}_gU;vatVKkDrUZi*3sjzY?%NTSEHz-}E5NNs zUmr!L%rE@kLIAkTZlQ9m;FiaGjgu+=6D;PodL>*gIfh&2j0XG8ioTBFo(mE(lfI+E2x*A&uyR3BHXyD`S#23Y zOUYQ3Ac{(g5QJ#qJZ;Ds(wt$>%T}r5GnEhK_qX*sw^e)?#C$Npm87@=AGVSvy^GKa z6;Vj%SXNbvSj4zd8G<#HVtr6V`Jo$*ejT3XXPuU3(0yIa0^*AIr0zT=6n6*f5WeX?0!@Pzx(7`tREj z2p%|SHElTxRi@|*-`3;2wCoU8$s>+wX{;zl&m9PauuM3!PLto+xMVjRD~0yx7YYKu z8@PG4h(+bvIY#-_whgq)eFgLd%Dij38yJ7~D4VGq$|6GDNki^6DUAD1X#scf7l z4-jhNK2<}RP~@ol5yG$AEh=Xy1bCX$@yI+BqMhCZ-zLO0+I#<#bp2*2y^9;-0M{~{ z6=O}MyTAiJAK1AFs>rfWo=_-crYL?F*S6AE-hLYe%OXnLgAnSCWmiL)yHMUr?a|w* z!wH6X&@m>gVd4RuS0PDZ(J#ltHmO#N+x8tib3AOi*W$9t4LPj~a+qat?Vz#XTv+2^ zIXPl3sSwqTE`KkSQ%pIYVA8d>^MjAm;U~9oYXtfuTrv9}GZ*kI z*zTlN6t(S@{dCBwo-`O2z5TRu{Z^{nKt-xDYJ59wEnZHyaCI!=4vYUVeUs*CXrMdp zDvD2zahlvsYjpbxO2`WgV-;pW8Pq?)geGVX9$rGdU<;fMHg}m%GEM;R%pWnxn{y)3gZa}Jy7Qn2M7#Mngx9V zRtyf&nrI*GU&5XI<)xK$S6T+#hmJYKwmEv&%-56%<=2!Wd%} zTI=hpQT_maR|{mVfVFsw*(MREZ_$`2F&nd(Q)ISDSwL~-E`u2~DI+YA+p?T{ZtfY= zY_X5R3wlr`ewVzS2P71k?;fyp2VB0Z1-Uz_(raLWHa6mJU`)TJ@H1NWg>#`DwDRm&_7sffmALvmkT zb|J4IuJ3!A{czl*jWw*&`|ugZ#Q*?lS?~sY+a{HNp_SpCfIoe6bi6(*1wA|UosKKV?Yo?*;TI-ypNL}*VeW}0cIY- z^UM`r09wUV3DXPP@-*cKqBCPh1pGXYrP7N!hW6v$y@m9z{?%LQm%n*0z5eD-x`hNb zV&ctcRA8z@(&D(q>M2$;pVc?9p$ZdqwbaHZ{wQ6?jUp(>XT5l?=Paa{Vvj=Ye)dE> zS5eO_6AK~B=P#w}=?%ghKC9%hvR6=2spPr?gZEES3aJ=UY4x~Crrr0p*wMO!!fTFC zzON{Wo6Mxjtwk7g7s#Ah^xEB3@8`*K^#w1pcnx7Pw_qCn;MPrcp;RR038Dw{SLW#Q zI?CcVC%fsJi&xXTix+Y4>ZkAQ93J;cMc&Kw`FBuyee}i~t~)10Ixmjz498%e*adn6 zb9LA4Q`VNqsF`>J*Tto4_pn}K_2O-J-4<`4ID{!WR+KS0R0NjUl`FaE%+(i{58Zem zgU@N0a>h$2jQn|On|zIjE}*dN!Nk~~&rY8V18dCUY@Se&m#r5qNN6h zL&)@2Q4T1Oj1hDc*xWfTK?`+b8>!Hm%~=_LX{b!7Nfgkvz+lwKg$OLLgqRf=XG^6# z>LK`91z{d4@R0I%qgf}9p8GVL<=DxU1N zJcd>(pP9@o-1s^ziFtXT=BYwcNFxv8gTfYdQh;+VQ<`O^2_xHS+sSf&vfjDmuajo* z%Nxge?$7IMmTfR;1uMkCw7{BKMJcH8W}U+0D-Ln>7eLd})h`}OqWAL2^3 zfe=q9ihHJaJg4!LkHXR{<+<#KZg6nqSrg&`LJ^2jCXQ06z#3Uw z1`V6w3W`th6T!?A(>%>g0Z5qHpC*<}1J>UoP9faE9lq9B#=Q$09)Nuli|rjO%0Vjh zJ_Ogt)$Zt!7+qNNxVU8P&Y&7&K|Z@8j6%^z3w1YDpz@RRAsK9mLq+|$Anh}gZc*VD z3^SmztB-|-Rzkk7(5KQ@G%4u<1X481-SirYpIV<_pxlesu%BdWrtm~NJOKVd)>itHjv4J~9;;uf%yk_H+0zr$}85@YB^ zUF4^11!-j;t5Nx(#pQ(l=}NdVTufWL#8CS2`{~}{Zfdbc9Z_#(u#FNNIgYZVWnBb}qoD!!kE)h4bg5qwQwwDQ3Kf$_d!UpD4 zz6~ER52`4`bB=kZQwFAVataSnWV2BPwTcmy>_9xmU;Giu%7ZFbI>2>I(0C##;(myt zlKh^?Jp^7wcld-EA5e%mZJ7F?RKqh2_46PtKp*KwD4umuc1{JcVSYP)3=#ITD!owT zpkNw+C%Gb=*Mc6|9+ghI)k)*0Siyyop8Lm`W@Zy@8k2X5@}P#|Pnv6#u_OEu3n>29 zXhPZXZ;_?BfyK2+#YR^mse1tn5J?L%+y-0rJ^L-7A2C+u@b z24goSlK%!U1ydAV?(i$D7ZGYR6vS1CAje8^0~7`Bgh#;g86zp9X>y$$1Iq!Ti*b)s zG8u2k{2}--KNY)m>x9#j zV;5KRF%~e(%fv9k;?}meq2D%c5>!%4DFj|l~ zPI>Zou8<2iliAZ~H+2l*qxZHeX%}ICQtxBszAa~7MebnC{Abn`lgSu9ZM=TQ0opZcluPGZ`pxvqoofC^p zm=$2D3z{Acw!%D%i=13|1HOmN+LOYvBJcl;GKH}o(1)J91?#R>jW6wNr(Z##tm|YS zjm}M!R#UW9jh1wyig$%>t@heFNnYAN+NOYO|7>TbnCzmIVa(~&5(iHt){iFP3JwW` zzlX&brkK&kINXBuZ^Sy?#XJNXrPSe&ibIrcZchxyD4C#}6&A_`HW&+-+VzROR%Z;2 zakbONf(p07A#Q<3;B_B`k}A0|)=NvA^jJGY(L`*p2}%TrM*)$21Be?yBvgt&K5pU3oNMt3IO7W4Z!IF z@UNf@=%5_(WJlqva-j!q46&^0@=gznEOBV!Gd{YaNk=-gvxp)T>;aX51wHp#TBLzf zF|4(~H0Oj!0!`8SN=ug`tiPDK!RL;#{HsX2%_Evpm*qQm#^9O;4_$E70P_aApRzfi zHnPu{*NR906s~eaB?5=dvrbVbNj(&#nrzs4a%hKls4D6dw}a-y zw{ID?0Sh}Vg1IiER6Q!7@*-PRKACx1BQGjKo?&H7U6$aVpvT}wPIIFjz%DjaqFvIT zY)$f&Jmtz_`F?4UmW)F_PEbk<_ygWElkd;RHd6@**oZa8c!>}6m)1tvS}Yo~%Xtv< z*~ZgG`#qn6E=ipD(+lQwC?JIl=y_Ceemjl zfs$=80-=@;W+=#^~J*wGZ=Y~ql|mR8ECg#D8JTW zx^}TDxyv_x>sWxrLJttLN&j>JdJ=>=wMHiSM91!*gX9>r*>NLkLb`adlNQ!k+=H-Y zIPU3;tJ@N^C-3YkO#H8w@1;NQUv|#_Tqjt=XL~y13|bPtD=595RXQO{XP2lj<&LiL zx;(o2Dkkl(UA&mK_^!D~@Ns@tVLaAwUo6AG@1g8^g~j3zZr%31!eP7vx14 zjTKJTnNM1It+9J}iO)6*`Ap21&l1=2hTE(WH(`X9;W>g)xGxv=uv$rsjo~$H)?nP+ z8HcVw+=nn`Wp)yyIWQb!PN=F7@9T?7PPbsbLr}_U4SGlaPfISg1DIyx>YSBSNUSjq zzeKFDk8l}1D_Mg9IxoSHZ(`wf*8IeyG2K8|lR+{6i<-K~Yo59B3aBIl_#!2DyMDS@ zPJgjRjD{HVFSd1J!3rGKB6#ssLrB|Q4|LZs$n8^=^@JdBXxohSPY5&|ZW~Wn&eOa$ zz;Yy4n8+kbPuNBX#$G{07b+x!u&k(KdEv=uNL3O4a{z5sI8pGB&bssNZnA=337|TE zC)^8*EEwi2eigljCgc`kwmq22**&j?i%f2C=RWt6a($psqY^|u$hb$kTZa1>Z8A>E zbc{(2HWDr8B6YfCqjBx_aDqvhs;*EIE z-;~E#l^Fn)ijJN80}T|TE9+}%>&hnAP0H4dhxG*lW&7uoze5Q|nc-@gH7W8@E$bCy z7cq*6=Y;Y}C5uox?gf4HkRiRlJo!yQe@uI{^vMFQpVSt0Tc-b%R_W6~3pDDkVTrv# z0QMmYuOSvM9bLk>x4ycCz`}-=LW~Lt-0M_UY4hc{PuZN>-+7^yTvi4m3*(f=6?ZI*4Jo%{h;xE~UrkZ~YCF1yahHk*3#J)T$@eYA0BCE%sdD-J3w z4^V!z;b#UY;}!`LJs?i*!4dJ+_;CCsRvfLcmfAhor31E+L|?^=*wk~biDN+8KKD^* z9btXeEv}-(!j+CTE-(+8At|%rfc?andDz36t%rqF=Lq+{%2HY^6H5)RmZjDT!Y=ra zl4^=I*nlzVb1Y$P*8{6iXoIJd+dHYUO=t&c5B5k}#Oa7UK;bpP+DN$6QPwQ*>4LjM z5PPtXI)M6wKPs!#WZWt}`n$gkEg%d9^K=o*(G~&eOIV*dr?v-Jl(9KEKmpcS#!Bj5 zmvASv358h)H_SG!zK-cot1Tel>5v?C5tep~hPFWY;2{P$oG_oUicxtPA5Wdso(iig zcvX{@jnW3&r=@AQt&u}i2#UnB0%g>2?e)MAT`0A}tS`6loZLz&Kf(>v0Q$b@ybS!~Sr@n-fv~z-TD$M6RuW&zz-@uKq{8h2wDAZ& z8fVnt2`$0@*)L`RdBu?i>-ZW)X^`LsXnCx4F>Ro2W7ZUv(w@pX2G-K z*ewnNH^#{p6qv&cti7Q?qaB7E(1N}hb+|qsQ~CQV`HWw-!Oe7)%baIGRn!lht<^En zicD+MTnU;NXTL&GDNN5y4?LUEZ^zjb{ima%l_%A9Mp%ZQS=K*_2N}xgBB;`<*Q>F3(p|Z$CSu#zU!7uRK_3*T08!K=euDP*0cUoDez4!&* z6D~5KDwAHMBK^KD`5ev=T%jc4u zEO2jOi6c#ce&oV{maqn#^$-l1_E2b(T^9yVN(|nW{PRhA>!%O1sL<`ll;z6kv5cg7 zq`~=hUPcHL3<_i<(tPnD$AAE|Fc{k+%ylLV%KRcQslcGR!vhr<2vpE{W0Uou&v`rY zdWKkb#TuYB6IS9w0$G=EeXH`G0x0GqVOdZ>G1Eu6(H9O#smA^aMxQ|C2K+VU@Ul-2 zCZ8$9Ij_atP{(_%FhlYk+3NgV`xf`-u-h_#Ty{U|$bQ8gp2fHD=2>t#GoftuMp(9? zF`*qQ9jz>Dv%?*+Wl*#%V7fFynWJP?Ax27r4U_>&A}#cEQ>pO0M!73k&sBkGmrbOI z0TqHy6nWgfXs>PN;|{#aJ-cNuTo+6eVCPAR$`mRlGOC?Dv_}Q*l@N$6qpv_YlQ3lO z&PrLmGXaxv)JkN?MphdOJ{7w?HrvF52JK&j53n{Bdd8Bvw@a)qkjWt2jzy21d|d&T zmzUw;S3+@MV=E{YR36cR5P&j(p7h_gWs0|~m6jFl0`Oit{&XqckD(AQG=$M%ZOg1f zj&WfI!(8yn6?)dDU^~X-P3);cLsnr)TS$VItx#>C`u+U_JWhsbZDlRpy@TaBI6vr7Cdl^EBeuA3)lToW_9m}*j%CT~(#r)(wa8RX( zZf~th4doelspS$!6Q$%L%9hrJOKF?9SUbdr)!lLc4yewopn#gfUp3&R z+6woj^|ZP13a1s~LP%_~DRIv{DW|!(O02d8;ty%Xf?}M_xhett%Zz=G{!2HOfk6pN zlb&&rm?tGpsBVKV`@1Lrf&Zk*0Zmv$9@4%J)>3uqla8#SoDrvvh%4AZUF4jYK)0sG z>YBx91A5Rccv9&LeRiH`#b+4AaCzpsgrZ-CmvzxV<{0hfghdR?!keM4Qksw?TKQ>d zs6r)A;-k3FY>&9Bi;PJh<=>Qchy>07U2tuOiW@g=GwVd>ix!vQiF4L=iPx0pa7*9h z*OtLcm5%<4#QOlAOKat2GxA>!R||fhgzAJxD)mE=A20HoI?5`7A^zsea_48 zD{TJO@wH_f|Dc4d#?#v?jiDyN3@k=4xqHxsTSRTyfqoBRDr1l2q`2s{iX+=8R>UyP zJGi_{Mh}X2lTVM@tEOGb<^ASmmkM$hCofm)R_k7O0 z=f>)`t-7a2+%VSWYy*e8%!S;d%Xg38&&OgOm}k~{|AVNF_v4gN=3tlI^4W59f5v~D zzud)vf#n!v<9{6-tfo76E=HjH#l=Gw&z;%Hk6Cci3`QdA@z9(OHV`UecD=;qi~egS#sI;<)x=6?0i)|mt0~Yq50DaO0Ul& zVRWB3d>J^x#IVdgX=*^^uF7yhh*nuNn=(T#z>Ld23pZf_Vu*~pu(()H7q>o2s~2E? z_TGsHF43Mi^(OJX?p?i_zSsU~>ceO)bl4$d(cUmnE-@FX#H)BzvVPm*Zi)*+7s2aR zk=;4w>#|G=ZTS$Tn$}(BJuP|HQxU&L9GU|4|Mj>VM;>1#?u3kF4Q6le<&E_IzxZ}) zUs_Mid)e;Wd|5IbCclTxdg?86S{jSq)E%cUAlZi{1eD*}9;IuzhaGZaQjH^cj&`!0 z$|q_9UGnH>V_KaKxsy_>EUp)b*P%jeUIBWN1H1-Der*Sahnw#=*};Z$J5_-;|I#n6 z0|GK|HyGDH-72R~pM$b%ei+2`4BO}LKKb>-E5?(T^|)_}*F)&mJTv*YWjsxO1pxc6 z5SKIO#{xF=%67S7aDipx^4dv$_?A?J>A(=dRd>z zJig;|c0I5n(xS=x#B;)0zsu%Wv$0A{unh!#EKd-^hXkaaU=>opEwPF_1(Y2n3jKrF zSXK@*8ScEFJfN-ortGldz0sF+hxW0)k{+JHMa<-3B`>-eiJ&iw>sZ<>tt#xFLW@FlmVCpz z`~_~KSjSlt0igZ!=~+oEu^#iW?-?#y1r!!-EPg&j3(z;XJQWN3_0<&=8ib~xK=I-T ziyU#o$XCKj+_gViNO^yKV(7*M~DT=7w)Bp)f;7_m1iHgK>>!Y~R{^_Oljr0+&)w&!n@UF`37K+#Paw9D_ z>3ge|ZV}$W@JZQ;d6a|TU&k`J4nDU~eC62tm6Lk40leA~1gKB@oZGr_3p4)#p{U8C~+Th&sUcLrIW&FuEJACFRpPPQ?bldCv ze*WHhzXKGs!*8)(xWo_p=^#E;qRsx3w(&1WrG3nX@}y+$nXDRW8J1N&PMx~JX4wndtO?AW#q|QDy|%RhVh{!E{RsgW2@m+vcka;Ei58+Ire^o zdClmMWc|p-*Lq`Y^L{=~h2#|+ zVR6p;x>@&`Cr{Ul$85ltBfE&BRuz+KJ{?Z@^VG+iH!rbZU@5e`kK$_|0*oerX3i#4wg=n$svaa*7~IEw%|& zc)eFX;e8^q4| zgdmQ#t>z*fC7t|{m=f|KeS^~wfB)tny@ybB59a)DFq?m`&`E1Bu-~k#ruK!^wD*cT zCouGwtylTD^CvYnIBIPl4 z7$kXcOwKCf3%O52JsooehT$y5-51i1xk_Y2`>gQqHp?gqWka7&vR+xHLztZ|c)rYd zXEXE3+GBYtN`>`xV)#A{J$kYhyZ~=GJyCq<9+ES{$E{-AWiU-+I9SLG0#Clv?zI%w z6dB_?b1#(l$=-{d;mDKG02N#c8S_lF_f8U{!b1ffXJ)+5#Lt`;6e{?KWbV_j5}Lhe zA5>IWw*631;XM@`^Vha3&*Q7+RdHbal>x$)!dV}hHRvJG6EyvxgVhY>4EkDt5wM(= z)EAAG(@*<@eGZ%v1FchGE9-OCB!AnltUZ!yW99^`qV-;p-@ayD&0d@R4MiVq3plD& z<9?iYhp>!2R+Ljj3DGvD5^blln3u+R3KtIqdX<7E;T82FjA|*N)t0d=?Yc2fs#waY zkaIMd4xz-H4zWsEtftK^T*?q22K3WYH?M#EY2=R}`zRd-l&8RHvKH69P?*IX{!pHE zcp9eL7SSaB3|PO~C@d!Mzhe|OSs7V-h0??rN5lzaV2H7YB2q91e92>dW(~X{w=YDR ztr4`zcx__@qC-yfJ1}6niwE_p*zMEz{(yk%6l1KU&XGziZLiFdwZU%{I{PSz#GgY> z3moF&r>j{73!NUJ3y|S>Ppd+4I(sh~W*Gu^uDrM(cjE5sC-89GXh?-kEtLi;34EBn z?wrhC3Ou#bnrKAl`Qjmvqs^QckHujbz5yp=+QU3xjBS$&ugwda=>p@}#WGEoO`Dpx zHp8qF5ITn0AlWCdJOYZhm=nfYV`C3~MEBN!3$BGKq~aY1!M55&@Z8vM24VM}bq#4# zg_wGc4~dD2mqd$$nyMTPIBB8qy1dF<8m5m>CQeH`>GG8dN-tt_Vf{7Y(T( zt3Bpf05}?;TO?~U%*$>1yhph)U42Gfk%fzFer0^{0&}N|a@7+A;~)bv&8BIz)vg=I z^s9%ZbO%M(hbS$(@PRSaqQI;i%QUv6@bHt*;b`<{{BbSt^E64?F7*tTKj81CNRWEl z7#B}bdi2wBay$T-k@5#QY)@{hcMzWYEEBf~1+vdZ=j7e2Ru!(knb9dXF0_7)!)=J~C*FyJ!bdvi^$Z0cuHZB7D6^Nuf7UW_5cCJ$ z%viv}Wj2P6CD*nnQr5x12QQ;;zVmsLXFKUt_U7^Xc`;-NyreGMfwRdk6ka*-9{$e2 z>EdC~dzD{DM-7x;YwSJ_(TWaGe(k1LUip}7t!hMy2SNUDslT`bqyViauXqwg857~! zVxLJRGc9qLCGw2XC|r9PWx`i`>05utal}&l$4Fz!`_XtQ{p#dFMS8%kIWb($dSsG${7d!H55Gkba5q77XmKy!BQZUb>R1pWI0c@7+wBFtdGj z3`gZA3;kjG6chaG?C^aXi?3ENrCF0ir8!(gFmWtU1sEU&jmJ&S zQWb^U2tO)+1l_5L`*;qdMffilq*D|3r5F1DUtnSU21as!yIR84l*Rh2a!)Pj^Sx>F zYwlQMQS%}t;qNIw94h-<_hQI5fw3u5np0uzp;d@n;LMRZ8 z12ZA+jA{w#hE^UyoIpb{aiIB+yX(i~L`;?H@*G<1M^)$)zZ>aStigQC1n2G$GSpz_}Ae?U*t=tqm{?7qq^@@XLvRuVjShu^#N!hgJ-F}^viQHh}vOp6IETw27> zQ37iZ53R1Ae@`Qb#B9^wPco8=qaG$_ZH59)KxNG;0-b<7I zP1*rXR!HTbqJqJvqL=hbD;@$d+k=V9G`j?Sry5`l%#{SpZGV;SX34vd&k%ggGQrBuX3?F1#Je#U0moEMFKK z@cIdDlMJR~Cvr0&ZWMvr{vBTXe zQ+`s#adN~+(GP=~GtMYQ#F=A7fqfA<%ocyWIKJW$IOCwfBN$S_Q$+g=q$e_JK~rP% zZv+A#aFC90(;AEjP4P}Pr-UEB!F4zlW&klYxurz}|FPDqq*k36V9c3&;LLyxh87!y zRYqgCvxoN|yb64Z=X1HS73fZT6|BYiLCr8D{03H9N7bM&Ux7F1fWlqAYv4zjHRUF4 zI3gZtsSSQ}ipS~_ilZu4gv9(BPVSKBfNsADd{sX>Zs2r`8@afQ7LF+;JRg;%&SMebJ{cKYn`>C47{OGwA;S1hT5r^<9Xe>+Yq z@8NsYAd$v(%1s4N0M*#4uhN(KP&UpUKp7J^4@?G6F#KWDq>286VT@NFF7xy|wk|fM zsjlXng633Fe0$325L4b773<03fiT>ht@uivh(*Dt0I}G3n%+6pyj08R%tReEaUoXA%DK3w@OkN-g zU>{O^0rmTL5u>;l=8tn=9#u{|k?vVG)`hNy@V`LN$yS}+o)&>AWdbrVpH#)~Zk-cm zuJ+T{{{hc&!24j9lguYqi~tj~l>YtjLi(S&UrirQmSDOboC+uTf1a-FyNTs9-D2^( zPN2~X>>#egT($I<2hJ5&!N*K8u8S;_&myVx(rT*1sajoPjvT;9{D-y7q}r2;$`Iho zm)lrW5-njShZL*ZX<_;O)UL8SH)t`Ru=0YT{q;BBOdXh`AF_jf{ji<(uZ^%6f{}n2 z9`O0M1W>)jBJUFNTa$z1=8Fv%`?d5(cVA1T!~OI<_6>!dpFNnFPs(3Szu)=q(zkEc z()Rdzx?4X?H)=b;k41rhO_=hxIR57K&R?f%Of_Kb{3@ad zsU09F82BPjOmF?PfzLdGq4JKuj%l1+@yXLRL@Zm+xc zTAsZKunN%3#`0<5pmOom?lk=s^JfINbEZ=DX)Ag`-!D~(MWbW^P{T?|p$0lvL&+f~ z#%`z+MnQ#a5i0|RQuezDeVb2ll9q}bi-QU6gj*p8Ygl$*4#Fwc0aZ@jsvy*UdU z5*7&a%zjWt3APlHm*agEUYXmaf}uAJ{C#3EMkbQ&RVcG;lZq`ZbhOs-y(O&|W-gB5 z(ny|gd0_lawngQPf?pNIOp&~O6hM8IqP$-~Fzg{rHtR6-y%m~)K!{K~#X`c^GsY=F z+@T~^3KUaX-@v8=H||9hSv}dcHO|r;KAS^jn1Xfm)wTh|AwuAU;NPPT8C1vtUjuPZ zQCbC%<2@BX_a}c_W>&L!4qOm~Jy1aRGXvl&L}UAdg)koi9G4pez9tB^?dSr#11zDs zXk|R%P`5<|ZsYuj2hrYHm7#v3!mJgH9`ICqqEO=Qe7{4RfsvR~LUJe}G}D6F+b3TW z#&(2Te1Q!tlLA!W75EiahQK2NzuS;J*-?0vX}y(p;MG1JVp-i>;j}$1*jN)4gtCP- z@I+kkBd#pZ-Oq7H*y|FTyTph`11!A)z>JA8r3%1)fZ|QXnZjr&AK({M9$WT_^P`RNs-vV*5RI}Ls3MU$tdUptQfeT?}kr-&4 zsMkf51s2mI-l0I17a?e(wQvJc{!m4_ZcbWzStdKFX=`m67rGW7lw2_4>sS#EP*SNI?>6N$NNY_95DE0RaIbo6*WVmM*h$qylzl5vY7Mq5|2&3Lh@QB^vrv#%0 z3d=903v1s=uk!s8UJl(c@)$}xWNmP`nl5PF#0K7S=fm`I+D*HIJLw*LPT4qOC_r^{ zMl4A?;I)<@&bkugTR?F(1b+-@UqCsUI54BI#zrP^l8Y9NiEL&q@pnkydn$`#j&T=U z3WXDNv%uIp-&KZGy|MuoVl+RtFf@aQjqM6YA ztXsSjZ^b*PNGM54kQc^NGP=|_XF`dkEGJ#k9hojzjMUIL zYh4#T-9GT;fzr()e-+-~Y-!|_7k_a}2xNoe)qo>$@7&o!@rA1wF}^l7?()3{ zAyP5mqL~dx2^D9#27%+=iYAP$xf*-_I}_w!VGjbOZ{xI7J5RNV$x)yDjc z1Yel;m)Ln&Am}AN;pdrjLRmM)zZhOj-y5yZ*s_yxJEN*E^=%?Cq}xPv=2e)wmvP%$<6hmJnWs#aAi)vB`!G%W2sqYxnO(Kh zlX+2atu)i&%bdFS_lGbtmO+9!D#N_5u4Y8{sG6j zKIO!&*Vv7#V%^neA>D;R*r7e&g_(4*{?>?No9%OH1Hs_GNUsf#-i<4(iTqi9nQY&!tC6f-c9=@O!h&--)MiEfb6(2jz38^ra$I=*~~n-J2xf1 zLJybC9y=%OFkq^`^1iiY>@S6eXR`0FZWVGP+dgX zR1M_=tRu85cC{4{8G{gt0;^!4BLFdqFxC|nR`CgI9%whsQ6Zr)oyDZF9d00IG&h*r zIY!stek?-;kx5H0m16l8>rBG{06+jqL_t(LG%^@>pM^yl)&~>ThdzRWu57tLl${hw zTqu+TyVPH1$GSWvs^|EK^-sl~C;x$zDrF=sLKb)`h|S07lHo>0f^~|PN{9;Lu)+-B z^}ZHN3Z%ZPV#C1Gp>!eN5XFJ9ql~AeVCecVqVKxJsnoHrF>ZXTL=mL2=|vPs3d$vw z5w49WE2~J^j5G)GpwwV|JSlC8D_4VYTIY1X)rA&tKvB}iG8w_sQ!xz$AC^oi9%eKt zhffF)3N$cV_RD|vy#^0ar7x?^#ZQYGv$3%uIn3;s zEi(?4ebgfS!Q^vU4RKRfP5;?vf#0L8Z!7Cn`6&|GFWmRCoq%ZAat zy>TKsKx!k*4uHoP<+a105~oUkTUsPi|IqkQD4~4+hw1O6_kLSLfO2PmevoLkHjWheIwPIUr8&K zSJ~j9oboM##dF4@$|1(RMtF&Jly7|+@bTmasRLcCEG}e9AaGO5!6|cW8--Vk-^L9s zA8;ZhvG0($1Fyt?sbX04QI0sSb&q)HScn@6=bj82cqKA1?&dRwpZw|=IuJ2h85ffW ziHP6+8RFi_F&o@dA&ueQusEe1{HH+OS5fkSql=K(B1m}$3(|!&aI@1+5CaCe~eRY&7U>b)?l_@Ey`|`fTYBbRZww z*zkzHp8-4HeIR8t{N$!(xh_8+PVuB2U-BynND=005vHu3wzoGUzSo5dw@`j@JZ>dZ zd<8vZQF<0hoG4)8__Rs4Dy^(GZ|-@T2cw6~5g_@nXp+)c<_xH9DT zw3iBmy9iNU;53;g@imGDLprT4?r8xNrk$8W#ymS`ttA?-S*C=Hsddh`zO326LrCc- zX)XOpdYi!V-SoY}GA7sTvax_0!DJieyEKa-@-Eb?HC;>_>^8rHQszyL{hoLCI|~X2 zZ1ugLHX~M;N;TM+^d9bmKj0(564q^MX@l@jdttJj%B z-+|%zV7Zd28_o3kt9$9S*DzJTZ`cq72t*tw^5n&VuiKo`^H)FnX?pL*O?D4B(z@^x zyWGDGBiTpc^Z`Lme-OFvARPRAPV`!aar-K5>LDDyyV^+K-Mq!_FBVPh1$OY5uPz>} z8h6Tf(*Lva7wHJ|`94S5vSg(mE|st|z-p0ePJCL z%+pcY_~~^Pk67LF9zJy07eQw4<-ZZ$16NI zwZ+mXp2-~g@2upz?)L1ms9rv&62wDWjrVqTsb|&p`QHyZCo>|0#O|vK-TEG`xbs$h zpMNusYlt!8kId!g4X=3xmUX#y+*c<*v$;8C5$XnP8)Z6qa(dzfRQfCc%u$1RQsaSp`%7U^nfs zfupmciX1LZ68+G6CE{a|KgZE*%ulj;#hZ~Vw}^XIiS<=?ys24ID z+nftR;zg!x$jb4z&sC6$FDkvdDoYg{!7)nV(f0|VKYt$wcI>eDEYO*xQUqQPv>JG6Vb;*~4_tOgi>#3YVv^n{ zu0w9y=?WHPhu1z%JH)cR$GEmx6*}NnAB!z{8V7z66QE7LwKyw;@(lGAcy0KI{i9CW z$9m9{q%qt|HJ;b85)&cIW%&3dx)?4P;3?rG8ufKLORG^dP0EYWMmr$x?eFbl;kB1~ zJNrb7D5nE>`{D@Rt@d`>?tPLDZrq5NSlt4aR9FcW;WVqn-}(@o9^bkH2JWQ&?nmkN z-u2W*sjKzd2rI@P?tVb*R@`0(_%pMy=^RWR-hd)#3Hhts1s@J}(jJPx4ud$AC+2&A zMV2&3OM8`hlRNtngEd{pBVf0K!zA|@Jn;eD0}8DHCr^>v~rP1%YSL0^L@yiavhDhjKDopxJyGRL~Nq1T7ii}a`TiBG*YFv zT8Sagq}7vLyJ8Nj0YAf44;gO+|Et2?c!3kf-eh(^#uu~y6ivdyh&d-;>RP7)-S+B6 zUILz~P=!fYf~pXXH~dTDooEMn?dH^t1iqaoCY8iv#v?W<$e;!AXQh~nWXiukEB|?( zTb74{xEbLXY4sU_^{po_z;@fu=snBos)_R$Hr z*)e8zY*a4gGwdXdufJW21JUyn*Y&p%l{`b81CsDC!5 zJ*wniJexs4LIlIc36H&=Cp<2p{Mt=du6&B63Z}K}j6ACZtx2^deBcqpJ%QyI>mRd% zD$1^^J4u-RFYmI@&WKU1%dQ9nRE5{mCMPc&jQITbF`FHy=0Pu%YgLH8#J?mn{uTiz z3C?$Fn!QhaRQ!LPAgAU!^`>lU&C37Lq$vuo<@7(M{|KVQ&OGeWKb!s;3pZx*{E|du z%Ns%bL83N<=Po;1pTf}o{iKmzLGkqtOy<^^R#_>1h$`)Wp!{#AYw5pFf1VD&neUZQ zcyTPeOsmPITc{thh^_PNO&FDIX^zL_VqvG=4lX&JD{JYKs{|Uow2)rmq{Dym@*C;O z*Ir6nSAUW&Z2mhK*6bbsPKk##*nb0$5SS;%)jGN`Vc+}7$La9>Y5K#1-%sD{zL{R1 zUQE>{wrmi|QU5Q}@bIV92tkMX9@6Jsb{W}UK+!Tx?>6qF+cn%H*?E`IrV^fkIh21u z42t)v?}B)oToUR$9!DQ-|2X|a^GCSWeVrZA3+&#lz?g2c=xF|Xl4Mk`?IA#2zm;C2 ze^rFZdF!qi69kYk3xEsmNqRIvKgstDuVg&{BS!B7}xzPSTF9ucppRN`Lys1@IRBrIPUVt8^p8IP1)p0tGftu$T6{l zo)7B~rr8rCLm3k}pW+XNYUOni>$NTF-oe%DzA5w+MW5$Y-EhBwh1uUPhjehBSDtB} zqwQ_VgJDG|aSTEc1N?KhDwRz!eW5|pHAg066cvcFXAx#0hKxV6Zv{W$sPfiwhs3ej z=cGf&FvOb38kxC6ic898G6yp%t)&cwEZ+&<6ylb>fzpX*lW<|q?kMb8QM8|Ht+V1q z-VMPj1TETuRkG`WmJ&+@k8ZLfy+>g00h?yVRS6uU&C-;NDYi`5dEn!BU@7#U&Qm_( z1ea6dOid8Vhdn4fG^m1-tBPW(juNT4f}mH$YK4ut%<@UJuY&%BC7mRkyue7 zw@^=IT>rz;NqAYG9|Ta>xdqx_d?l4072-=y&~A*dt{IWnb$m!)21NoK!zYy3sz(H# zmB5!N2EvXbECJ*a{R@}Q!K-{J-stuQV=#EFu;p9@!J7iWSk zraKvidDD3TZuqVf#Zssz+|~R^%7yX_U(Ccj4IH!}<8c8;>gpi)|EO0;H+!xBpS?G0 zk}SCn^gQ-0_pGd}>Zbjp;@2GSh?pjZAMc z*_alSX@(kUHVy%h#6kewjc)W-d+tkQWbFFBbK_=4MrGF0UDb^S%Bv#d#=U-hIez^3 z@#AL+Es7Twp%zjZ(}}MD56cp9-v1VN0{8I#kE9zWhUBy zq9TYw1)(K#VjnnlG>HY*`e z4ICQ*lWl~To4a?wzhlhe?xwBnoz&UcLkkA*pgz1uo&qM3YBg5a8T=X#q)nKil(So@mI8ed|))?VO_s&+jyl@GPdxbt~xbO_vl(7;R&99^) zT5e?ySxQ)@hzlin=qe@(VJcqQWfn1p@VJ>)4)lVt7oXH{REQ)(E6h?@EKOAt)cM(* zBj5&j8x-BOlr=OYXwnjsIKY?NNl{x`(j3>)G2>!X#RC4gTwFsyMZ1m>;5mEdh?JSo znCvvrO2#z>^Vy;@N9h7G9abEI@ zUjeUxA@2ZObDl~0obzV3&YgK2Kb%HRLr^M`0e!;ChyW)v9aC(Oo<2<|(t`zj+B?*H zBE$lQvndV_=)XL*f^6|OPq_dm?`H_>1jvJ+02hEc|MI+f89)Y_H3%0MtC%M@5M;UU zpPO4uZ!Qqzgz;*l9XSARip-0mYuYMm5}TMg9^Zp5Fc*}%lY}8tv(80ihukT>!Y8{Vds?TqOpyK=ll`Me#*?d7Kco(R?pK@ zgMb6Z2JWcEgs!tF%%}DB+v%0o2MpW>wTSqLpl;?!s1bb-%>7*AOIN%lGn+S=W{)I_^QCaOZcGB%md5b6RH zsv8G0dh*ung-k4Y%=Uz-On>adlSAkUEBZCbj`3W27on6?un99VH~4mXv-C?sD>DW# z2{{}xP(r-Oxd$`7&w_hLK@d#BMFKCB>(ftQJ${g0=C8jv5oAn%hm$mgTkm2zsT#aV z8mzXLWg9wv+H6vNub+_Pr?4|E7>B(E;dd9<#2oa}UHm!}wsBH?83C*%(9nEch)qXt zkp+}RnCLXc!$!LPlh@OwzxjHagJE1BtYa40j7^dPsjc)rj!FNPR)W{jBD+^icWA3^ z>%k;!617NtER>1-wDK~Jj5*Qsl+1q!0Ehf8)#u5RLoCCl0ViX(t9R4w#>?rI;eVOF zSAHY?=;;4WZ+HI<^;x>fjXl1+FsC^9*#~Hr!7QCD)GYKcg{9{2r@^&rFpxW#j`)@& z-h~Q#`r?;a3_gM>*p9OJ^URZ@S40!8R?u9^UpdB{R_y}R#uVURz1&Q zdmYFBS2>lIPupWr{QgP_&70UFPvCJ{Mt)_F56!z&OfSH&`hK>o+V)gF9)`}gg2rvFDP%Q#ZhW=?Zo-hhb}&l#<^EjK;RgH!|1=MYNyoet z#!fbAX2uwLV>)HOXYZOL-i?cFQ^htLI+QWaBb-MmcTy{8I?24}45)M2agNn?Qo#wo z&L-UFv8nMwGB-)~AtyDOibw6{p$z>&72v8ZQxZIe5l7MzQEgWj=kX&nk5g`h8*K2i zLinQ1MR$C3NQDzk0mG^?O6EqO31v>&sMJD@uVW4?`#1?6p;^%=T!Q)Lluh9lnr#Di z$NKa_?+F-aY?dWyVQ6+CK&ZNL(I_upOdOcf@A1$Qai-pe%2pc?J(vq-=#aZ8W{~nIu0ox4wceOUyP| zkB<y+@dFxo(XpTac?EZluEk znlvLdlP;ra!#pmG>5J{1naNQ;PZUV2y)4KlOK9ZI{|Lu;pZ+L(!XG~YY>Y@~%Ye0b z0?wJw4;|+P$1Z+FA94YekG-$Yw=u#J&GW>wk(+LYz00~ET7<0K@xDMIi)O$qdX&#U zaLZ(w;;LG1ZovACqkR#d-TK=saU-ym_Bie*QzJUWJ=b%`kWN@C+t=u9Yyf#D_U*h! zD1mgV*W#(Bf%uZ5?{gf+5hsw!Li`!!PXO(#3?HL=l&g?(9=u#&6Kk~tqgK-2KKz_o zPZiK@zjY~%UVIrkeUw%X;frqWVM5746nND^;87Jn#3TB1NSjA!3Khq+ii3%ggQhPr zV0j;W+o1P-gu;8!+hYVLi@^HeU^^|*wG!?o6omE1MhWKz!kP{^y^p^vJDvkqblhzst&S*~uXZ}Sr z0r`)nBtaFx8HMD93PRYuEyi6_P6RX30f&Ir_-BOq@;&xA3q+Y*YQ34>AUMLun;$dp z@#DpYx(_~BzVoSvw%Dk_eABfHnsUZhEiuOz*KVZl`b#*iM=*(_ug#se!3l)KjTOqg zLN)lL?eC-^0^&M8$;#LY9J`@!eis#NjgbC8@z&9GH$+22AYGG=E8zzxB-W<{{ z@zSx)J_^SanDaV!bUNMwhPw(Sg*k}kw?cS@YYZiD;0_wGopmDB<65VVTbLeg(y9Lv zd(k4=kCt`6i7S#7_-76Q$_S$}jV1(i9xek*mpSKS7we+?1Oixuw|n7;`l&}-j}B`@ z6<$ytKrPx4+Sj~FgTQrOj$kL$V=HpEUS@9g>&zp@zRGyGhHA+%WOA2K zOlWdHZ5`s982D4C^w2fsp~<1lGjL&$1YK+>By}xS;MH~f=|>0}9acXy)b-DIgN|~; z-zPjuUgi^gm*7)d(3Q`LU3-F-ej&3$Y@jxKdkd&n*VFdFYTE6rq*uO)IO57S#1CTT zg3CY#N-~m+XZ$|DyW~2-e25T7=Oa%kWbB?aQIvB;6UMn+7AT^Oh)Ho80g4?hpqYSE z#uG$+((=tu`9=*=*tL3hH>9_DF_JV>2(hSI9#C3+QIhvLB#c{!1)y~dvAihb)P12s zypwdj*yePVQ;cAsJ^`$z%KTzoqSlYV>nHuM;}_!P^h)Vh=?^O3hp=$E^gx1`aL>qu zE(o(}i~yupc{$yy{$cw5@Xb_#ss6_JHbaKchRm-I;W$7}L_=wV&4Q=ECKu{M7TY$& zY+B&83$yjtFb_8e`)Te79~=FJ^mobw)7JPSSS=!P={U$X z@!#HmOx+)|=@?g<*pZ9pEUqwJ<<&2-Qgfzw@8>q3%;e_xY;wlBP3gjM-K%b=*Gq4t zS4*q-q`1QA$Zq{Zur-gR=VI+#IaZ2U z^;x_xvsUS_QQ3i6gi+`s6l|}dCFhA|K<#{ZK8v<|7G;VD3VnVLlfv)aXKu0~%9G=+ ziRvpb82=nb`)`P<>BiX{w=rq_=U2+<&u@;?B{oj-H=ibnLw-KXWOW`=vK*zwPuqb{ z*zg{kw~QHNUbtD9Dd*(GeCEDCABKtHFqTc2{%WddaVqrc>C=mi^z3_Qe%k;(Ck*&( zgYtGf#?EpV||4}gL4$=fN=FawWwT;GA7=QK&&dXC1V7+ zPk#&8KN_X2K_Ah_Nh40ED`9p|dt*bwCK{*J^-If`P~-oJ;46ompl2F#lNQ-@^3K4NZ)*cd5sAl;B6pv>&Ni8txtOh5s+w&X$-ph;o$Buv>(!NIm$m!klXy zogq&)0W)l{s-V@-zyWQnIEH5sg$z^4{8uLlbO3yTjoz^Ftp;nrEp^6XQqQoFjhXH=Dq`9Lo|elVWWaWiejf zV{QcR>O(%D4nj~mIh8y(7L+z*z%!lXYZhoZ_SKKV75zz}i^Lj;panrq8y;d4ZL0+| zDz8(n@;>W*7h&oUbJ8&B118?j=8(9F_9Jd2HW1)^facjwvj$&s zDeZ#S9h|f4cz$aS;S&auYpXlyy9;wzbKpp%cZ8N%4PRunwA?}KkTbjLKKQc3TfqxDy&-B?NGbv}Vj4wHNt#I+m_9A>6=s-(u+{f>!{e<}cC*n?J+~U@v@~>A2o` zumH_yp%FDGY59Ui3>_JHUe1o-#xE!~`F{*u2y1k2YEc&)~HiKd*HU_9m**ZN}w zouENr7s5E#0h5}HZSLK|tacvUTJ8|Y1Uz!Qq_kyTYPlxH809os?Laib=x5z>LJ~%s zffBc7l6UB<#5|JuNLh9+vilj`iYM_W|DBKd>67d#-r|qbmZpX<)X3yU4J@hh@zs=glS7 zp>?$4o6vVByMDtCwvW=UHV)GKYuD4-8VCIBo;&ye>;RJy;n9Z(td}{^>GwZGAj@F~ z1!past>L2IS!0|BCQvexWyIcy{)8}?g9SV07-fG>zOoml|K@NwucK4>L^XAcdR(Z* zIz6^i*`8lM4G1`JU5NhX-8Iy7i9YkzjdcBu57SGp-Awa3TSQCCyiJ~vL(Mb2?TTHWP2`bT>cdcm zT0FmJz%%-TSCveyLW#wSS)#eAh8ISz}SX8%thZ0#0;chMO7Z zm)~R^WZS^$*TVREy0r0P8r{6gN#oap2wo!Ybss(cBIbX`|L;{6*ITKvcPZ6>^4(P2 zUV}cPiFNDMG}wgcg30<78^oG=Zg(t2uE{}=}5WdvOBA!KImx4xAMg!b-xqOx=Y z-!iWQT~zz~f5}ulJ1!@{d|DTDsK{6~@K;gBOk%(Z@A>09tpk6ZBLRQ{Cm z$PTK)--Lm3Q#1mGLrqNR&*z<<6}T>Oiu)bBEIVe?>g_H|4e0qxFxB6}LGsVRZ%^{r zPsO;ti>b{o`K&RXkBuZ30g=uTEYnR@g-u=!Q>Lhc0qxm-059SMO|@)|>Y)gc7vX9=TZ` zFwYJTk1zq!PY=hN%A9~t`jML=j{4}Juo>u1H1`KERciGV5Xw~5 zykPTO9pV29N9<~I8NqIVKtMfXg-$dL?J%~AY$DZ)$ouTZa;Rn<%j_J_TWpv+YBB@4aa{iM$dyc%dVSJ3!S11vTfl*b#K z8PQgRWqtgA^|tr9!zPvqTdFH00}e1^M+cqo^P@SGngMcIAqd01MYu@uK{M-l_V(GiXMXEbt3{z`MPVCjLWXToH1API zv=i7PbP}g~;Lwrgf0)_U(J*gdGG{c*JVh<+2ee#hvTJYgT@Y^>8#N*8H8hIr3#`SL z(hIBKAm8h$dhiiZS3f3-A(F~^E3}ms2*9(9M%69$97kv{jWJDCb5LQ0!U+5CoN=)= zYBor~;oK5);%ZuIUr7x#>Bk6i%9zIKuB49udI1iwe&-UnjF#FyPBb zA28O-AQJ%{fM3)s-hnfkhXUvrO6N(9J=Or2>zi(XU|NBwIKWGRrxDpvJ80&DnyLf% zbG4ofPEkdeYxGRbK6_|t6`<4cBITao@dU5+m3)8@^?qj;Z8&`Yp>CyswT4hxO()wT zjKo=efZ?bj+6V?P?#zF_^l?lk`34;1pON_h#9-FirK93sZ)B(v9il96R&c z)Q`wZeqV`e=i(#<&Gs4VutHMJpMzckGhumtBERE6oon>@GHYT9+*3PTE$U<30&H{8 zAwIQUzmdvItZ8h>UB@(E9mv$rV}-LghZf%vdrIc4@Oi@I8@eedBS00WPD{w}HegPk zu&ANS_Bsxws4q|X*orTK>x`wi8o$W|rOmU3ctk9cg1Rf*QCNQR^!ddzf&jw^vUPBZ zeSEZ@cJD8z_kR2u{s-`Fguu%|A1lpgph|3tXHyAkF}HNU{=3iV&0bo3sxguY`6NiwC%Pm6P{hlwhhR2|5&7cDg;EP8vbHS$-|O)co($%FXL(yivhE{CnUc zOaX#9TkOubwuRv9Cq(Z-`>C~im2lX*`02rs{qQEIn(wEL$_A&Mgj?qX&S)dCp_!^H z7)nmwO3N>$;qJ$*av8o4Ah_=?Tu=S=Z_;^MIblL*7L3DEDzAJgjSx_kE^|8C!aP(a z@SNl#yj9*!_c^7!JkSIXCITjEcJ71<^FG>dXtpgNq^slet5y3`e5rnq6Iy5k^R~o> zs*+wnsPm0f?{C4Be2xCdXrLjaHX4Ll#^sqN7SW>2ldJ4QQqGvTk#G*fTV~&0^U=0{ z0j&Z}W7LUwoaCa^fQ~gLT0*|Hfl0;xeic7GFnFF0F0mQ>)4SN=lSfEAUdfMB`=`$% z#A-B3UsCo%I$Gr9Stk4F@C-Dd+TWhPKNp=LiDX@_-ilfms^%+$P^`mUXS4$+c zqG-1;h{W1sJ^mV^Id-tIi`fch{an{^PTp+Q5Rh>=M8rJ#P5rqc6GX_Oz$;%DDS<8n z>`0bpgfD5JN^OF}oLpX_;}{c^k5#)o4?A zz!ojW-Ux&g$V0~2O&YMwQ!W_D*R%Y7{(av4d_$&Ig2R-9F@NX0Z&?h1lWWNM%U0;) zX@t-w>Y-1X5wh!x`g0jl?K$|yQ=tJZNcNs z&BKSh`n5E;{BjyLS2&DprxoT$58)H>5}5}GN&spBVO2!>M7TEp!j-g2K!Oi{_6vNZ zVvb0Aoj2Kkcmjg|T)lifU0?ZDYE@rDYiKzQ7T~$CC3mbx%)1cW;+mj#8*|IMn73gP z*lFYU3-iPt<2T}9@VIf1jtB)<1}~hB?oWycpP(?nfpVO?@H(Z1#Z+w{rNet0sfK{F z(qvz>u$=msW_FP->7T1e-+@G!E7yr2I={4x8#@9wK&KQ8Wr9la#ui$3wjP1xz?bN) z1d^z;uc{&3aE(@;)0%_EA*|D^SKQDKTtn?CTu)SMFQMJ_I{1u_tugviM5G)L3%%aB z#yml@4l9F#W|g$9j32Nf;}>HEeW}*_sK868WA9wbP7F!ePNTLmzo&MGLYP|S*=5d+S6DL4uV5*Y zVL>m!ff|Bzqg^W4cHUZ#8q3in`!h+-eGgzlLlgJ}ts$@JOxyDOlV_8k+5F0X_zoZO zLG8gg+N743bViD+pWh0?{c06j3(ZxdI4X1!TdqIP0y-;jrcQI6BsZ|p=#x8KaL?P> zpC~bY#+|5SQp>53&fIyDX6Wxqd9F z{w5YTi>x0D*Dyy$pbVm@VXeQxu49VexlDhggBRB}UGvPf_3&{_^Auop5hmp{H%gBO z`Taci>9=`lTHvesr_0Q9MJ*KJxBg=f5&UMHK%V3I_cMe*Hb5OTwEQT3Jeem9ibET} zvoStthfJ9c>PryfG7Q*SbtC<5KPRg~nhiEimw$b8rDo;Xl4IdDH zjNgG95QAyqlq6)25Yaz<@m0cCZ>A6N;LyV_&$s6|c|5+HzQ6xFX=U?gX%RnHbtunu z{EyVZ1x+IZ7a6Ax#Px{9Vqr>XBtzc9-@%9zyM36vgE4CDIMbZtB>kw283oI+#EYWv z4{?+}XndT;&5t0!INwE;-|=@*hUGfJ1pDDTvqUczL>Q0|Y(=Lb^5Fch7GA{h=!;Cj z(*-{;3)S87m2W^unNL-Y?qY>LAKw&sZB#bXkLP}ozPGo8Xmwyq5{= z62e=N!rXnV=UwnW=Oj#H;{*ehCm9CK+Z2L0ZZthU`SXW;lQo1`U&V)jg0g&1qCcMl zgk1_o+&CRG<~k-{MyM3^Jgm^q`WXXVrT^FPX@+hrC-pFc6AjwC-+U!4zk4_6w0-?7 zlN4Ph?sg-#=`k;#M*a3fOw{~T#`zQz&X(?eTdh4EMvD`X`1aIyk*OT_oFtzjDR9C{ z=Bf4VahoF3+@W8A@Q?e_uX#S_w!$tqU_*EX&Gc$GLodPX_J|<%v{Epu5{f5ESU>%CP?mtI`QG4?9O6w@Z!TW{mLXP-XkCoS91A~2&YH7F|kcr)tkvvy%Rj5$+d zZ8;V257>i$YLow%!^@Fo{}9g+Lb=!oN4ujVXv5G@^7Rp99kHg47+X7~KOr~6`f(a> z?5E>Rd}y)@Dx(3mw6K5x1(h37JZ3oSD7Y~sk|l~T%w3J~IB4RjVyBD`B#Xj7ejmD~ z0|(9Gd|nc${MWY-Knjfso0TQGQ8bM40ky)$t5TXru)zimtwJ?}+%pEq$*hkq@zeTD z)8F1#&}nqOh-SwatL^4ArRI3gs}NRt@j!a`cE}oI|H`+kyXJzHJuS>SDbl{udi!i9 zH}f{*6kjw~jE0fLZ~fXZ7Wj)BAXRL`A8Zi}rx~WS)|Hj4Z=YrQ^b)YD!*hXb0Xtku zA)IQa)=OVa?dvxXf)atTf55tf|5l<2et;R_zC4E-EF5gkVXAnk#X;KI3Z}tV()(}y zB6Y#pBLv(>RDzpe;IB3YmrY66Q;HK$j+DF@)@zej-0fuzP zxT+{pUj@sNkwo7q#5<0ycvteCIz$XELk2#HKj0X`p(BK?Mpx}HZ+pz=?lKx<#d&&w zZ#OJq#Fsvr`lA*ws-Xo&0YcCScZk6GrltvwP%PJ>Q_11T7Cz6=m_!q6u5>9aRIa9N zTv3eh9oK`GRSR^n`epnZU1J{Ya32H9$`yp#SLh>}YXjV*pykrX6jSSlAadw`n+0V8!?AXzkb^Km~JAzN2pI;6e!wg@bnRi z*RVpG=R2FDv|oW9#4{q1GU#gg7$MPsBntOJ=m#tmLh858))%1XRbZn=Tv;J;%zfGm zt<#)2pHCigDCmqf&$KshyZv>Zc}Q+MT70Um56~$jrO;i>gjZQN7NL<_xET?r9fz2E zVrSN-_!cj`FY?k}$0DE4ei!Mtb(qFlaY@&KB}3)1LT0oVrKj8oZB}lTnajtteS<>{ zEgTf0_MO+j1ayzS2_s>pcAVv85s-nq875;bi~^qIqa`6ER?ytw)Kej@+lVpatiq#_ zruEU+0e?AKPh$j;B{aF_^k>Etw|!+a^)KN&&py~?wazq))fDuJ64^a?$nYtEK>M?M zp%_46!c4)h=CSmWf8^($c4YFU@tJHorOyFhx5)$~K ztUXcE`G24n5O|ULaT5d{WM4-}@|E%)0xuS9p!M6C#0sR_X+G_a7tu1(20L3Eh391$ zlvg?VI(5Z+3MGj}7oyxQ<7ytl=Lzfj0*vU~)wIWn+igzh7VrLll!ik|mJG&ML!PX#$}MTb?B1UE{EmCMX-% z61VJM{CvnC-lC37ON(oNBGl>=GP@#&<+G_2j4RP2Pw~uJA57LQG387o?NoQz(Br!a zpCd2VzLWm(!vB!o$B)-O5sNt6Pgg5nO zo0=S2E;O5oI2}Y>B-$NjOkLXSX1C79t}VSstIT8@jht0Rv#P6ri7|0A+9ASYf14*>VE29QH z6yD~Yo~%&>8$jCIM2m2Yv-YlSVr+&jAq~ zis6mt6cOLqnmS~oeU|d@)ZJ+0;_ESz-LPF$d2Ukk$+pHiN&CznZO>umZ~o=h80^V_ z9~)BedD@ymlE=-{?Vb+s6l~efM6v-)dm8gdh^3 zN(a0v;sk$)%Z9<|GLaC`2;A60s~qRlNXb6Dc{AO=xk;b`)^z&Mxl=kuTWg-pw`OHE zU|N3R1@;sKMNpUoK5X|6(8>qjY!4?Mp-IPhHSv2@M$5f~rj*Bd{D@)eVnW%aEv`xS zH5^d0Vb&K_t%f!fpck(rYWp!8YjCtp00zdFFeC+4zvhqlX_H$7kC2` zaq9QeGb$tkJxyYV&NuMW*5=VuGgD;ed7XPr$P_dT{#*z1zEh^$&>%&KHpH2p!o3kf zf9se{vEFi9dZ0i>I*{tjTh)7@na+9lRT-b+I!s>22H=U#A(*1vm~YeN2XGP0f*+bH z8ia~nC;rT^dIL8XfsKMAUgQH_f)~eFF6k>uf3Gnvyb;Ar8lBJS%#2w<5HMw&q_?4s zO8cV;ysyxVPJ!F(Ff4D9yp{%J+I{)m)9mg__c9&BKL@AjpMXlt|q7{p^nyr->m7A$Ff}fdqtKx2AKN z`7N&16rSA3E&%T3A2>^N{$k`_4b>i9e?mKJp59 z3JdBAni%=LH}xX$62gKq*k$tPV9j%74YEwf|=)O+#kk&(c4_!i&|G4UbkvCEt(qXn^g1;>+5 zJ~3ws`VIK$;>h{bnVv|7mFk}-?DAO)Usl)lZ>HV#ufgbmVGM*zmHrlX@vZRA@{8$9 zo1DJDpsM9H2N5MsP6luC9#iKnd>%{-ypCDSw@^dh#T-!lGl{!0ji*>1x8sk*&-BS~ zfD$#+bd@0$=hjl`>Nit)ejS{Xx&3rWRqp-5A${2cR#%vh^Xb+4AExWo-(kT*AOxAM z6gUCIX|a844Q@~^1Rie0MbH$BJX}TzhN`(w_;7W<$xJSux@qw1ll9*6O@+b&r{-5+ zOn=EQ%Qa=?`&}$7<1gbi7Q!pE*}C#Pk@3tQ7pwS?$Gi&umT2-fciEirObsmsE;17r zmoil(nRKwKw$KD5G1x}?d9@8w$Q<6lLF!i!CcS}ww;y66w8MfO4Bb>QpXa5_;tI@> zW|x7{li`xtJVxVo|N3fba?p?qnxAD`MU@X>Y~Nleq#psRJD7%sfbg*fOdluh1YyQjCa=O9zeit8LpC$?W8u;_umSpT@noZ8fPm*Te zJNXSCkZZqss@v%h%)?C=l?0>u3F~{3HfBui#c!wu)j7i9H5;EJ1TvjoC&;9y0FEn>MfFKkJmVH|KS7Zre@Q!Di6WmX{mfX>)4bM`AGGE;;_rMjXkYpWXEjB2SR(OyjD#KbecR|I@U;&6 z;qx5-^YUk!e(uk_4d*?~pXJ}?i8@Ximcfm`KB2EPij;vi8s!Ut*Hs48&3^cKGLwB% zpvPcn9yx#zAL5Fk^ZqYztd4*MAEmz7#O!qe?JRw_E%acxyL;*ACR+2oJ%pV7w1*i} z3GKxyo9Gd^*rUuYe4HC#g)=3DXEpG%kJibd=IfmH5h&-ZZPBnsCG|AV%q%itH zy=B&i9)j5;Xh+w60cW=z&Yda(jV1!DIkeX+Tzilzema*s)ZbB45v?IjeisqY)#eGx zfTn(ZX)Y~czNr5&$61Y;0dr|@dk0H_Oxq_Pa^a?6wne||_`*WgfQhI=DXa?cMSG0) z;L=FXW7qH#hC~0t3s^Fs48S|3{wgMUXu6;QR)Qa^mJvQ7*D&!Y002M$NklUn8WFkyF(7-I z0-Gj)uukVjRUccdlU4Z4IxucPhYX5Qq-{-L)j-gvuu~yhrcneh2YgMr|0(ZhnscIM z_DuT5O*Ex2`bIi(iGG)$cWQMns)a?_2S~;gpp;yv$sTlq>uG_s^TOZ`$YTUt;6w;| zs7IdJHfW|zu_WjeMfr}OeC}T)?DCBOhfR(8;Tb?gtrDcKw#e?-x3R(?0zVY)F;3z- z^l?bPFbt0QC!Pw^%nz0Aj0RZ+?UasYfOjs(e0EC5E$DA(eE|ohOuoju1@=g~?AXEw z?gsMFT?C_D4o}DUf{i?OI4d&fo_!Sv!X`8Zxdto&kL(3oLgKX%!ZY!yfbgsi!84-u z7-5@-B*N0USf)P)9jJn*uAQCz!*u5(+{VyuVeHybi0Hq)v^HN$$IzD29xw8a?>}i0 zKKVapJ=ZjfWH+U|!gjh?^JbkB2&Yd@T@(23>HR3|A%C6=ydDCaU%yA5$Cc97Xf566 z^HS+!nka?HlA(AHakSdwv@kz4iYMSo1&ZH9^?eFw!uQw8Jyf0#a9G#Gml9Db4i|`C z$07-qO-eogOGcp_%o9>-l7co7&l9RDCz59>iX1b4oKOn8vO|!WhoAnZt*;}HxQ?oF ziEn7z^E3kiL823PwX&uoDdySggX46my>SK=GSUKL0@1z)6TVCIob|)|v5}Qs&cW

    m%kjC z#sYaW$c$hn53jAK0;Y2F_xD&DPT#(`5B6GE=cHX`av6cwYY4pFnY?Bm)O$;p(R6zQ zLD%oVbiJ9{RT#eXb3PZ(Ul*m;9L{eMbRVJGUx(p-iOtvRoce^SI&EKIL-raW&_BW) zaXMM+>=?&y9t7hwlR?J+27UkYTm8_W+Jza>Z;~g)3OarrlFg_XY(sBrj=jbfr-`>2!g>9Bm z%veKB8B_Yo{xH44$NLv1a6t{M=`5CUhX4ilIcfd~4(uAt@k^YFPs8oP^3Hi++0*5p zljS$|R&1U>W$$Lo$Ts_#+49~2fpx%PwR{v}co60qKl{$7Y{G@FU&P2p{j@_NRS#`) zH%?*BLP^qDmNY)uOp{LNuSL_rOyh>{DY7IVW6p3YePK&Z9|W%P&WRr^9{Ii>HzVO| zhUX^laT7t;0vlGH+oNfXW>I?^b11ZTR*9~7#3s6o7R>;)UEwB;- zCpXM$yXXf@S8DV+4G>Zk5f~Vuu}_`Obp<&rbumV6P}M*%0-n(cHBX(J)5*8Knjl!L zRhlw&DYt|GOsB@y-^XlFKV}`;tia+BUul{M7V$?k<}+fmuI80X%tSeYvg|B9_J6Ej z_C9X8tRCFjhPlcS8|Ply#FpnoD4D-aA7jHe`7Is<&uP2C1wRTv257pu zLFO^f$!t^jlUM6l*MNubyKz)FXK;gp!imY)<=>O4KGBzD%$DQ)nw>w9C+d|t+Sq6? z@4BHzKoR4NP=xg(Ow2sMbHge{ru|AFyWmj|6UI^d9+9hGWSsEf#pc;{y2Lur11a_u zL{*7)h>75MPeE6<;V%P+s+u_*jtp?t-XtjY)D&SuX?SLYIFB;7EYITLVhy#5Dj63BGXa`mUA-H*vKNPnV_^ExD z4&=SZX@xW3OXP1tBh~5=-41EvHe=AGeofuv;wK`v@SRnPvFc3Z`>X zkQ^47;EP^muSOZRK@&jliI-}@7AK2M)WLHFWZ{&ZO!}MKLYVd;<8TO$9`u-}@RCLL z>S|5O=h8xWEb!5aNy0}n;-TuGLN&(4*+rVPB`$>vd26g8C1~3vv^k*|nEwi1g~_EI1mlcLJK>K+T}J`)Rqw@aMbo3B9VVG4r5CC`w_mZPCnmzj*( zgctdAILc79x`KoHzAE?J(^uF-lZXtR9j()9*VSyTdgv=p`p|Y(n6OtdAzQ#KZf8h^ z(-h%J3;YetFPXUqWAipAR^MWAolQmxx_)qr2vIO0sKjg5m?wBzw7izmFIQeiD290j zDaVmKkK}L7~CC~QEzJnlg(pi1w4?s5N08xsvR!$o86l&(o zFnw6Nncl2!r5C#EET}S~mjCcNgkK4>muv4K(BhwDzebGl`Vl&JvADCC)z;F{i(kjN zH~#Q`HVfVJ8&B*e7$%sZ(Z|q~7KWMXUyo%7qcDd;^m7tL| zPJQ=otfl>{%c+BJh=p5Qp&gm!c)EWbx%+Hb{t{n73Y1K?=V`!7ZM)0(`MCr?Zb<8E z_;h#$?W|=ur;At~+v9k@a)eOp2u)9zMj1`->vv0^GxyjWe9$VSm)XGQaI@V{lC0kd zkk8Z8fk3u_U|ePJ`8ZaN8T*=(&BfEHX{MB!A#{Ti46&6*;y2epqWJA?&16b^q_{3V z-jnt!jM7)4=Cab#L9AQQSZJ4d#2L@Nr_;0m>*PL}Gn$mqtazJyxh77mFHAL??mf+? zLhEiq$lMAVt#J{}thGyvm@u-@-6J$Qo4|5yE>-5(#1dVReGaf?<$xCp4FQ@KTC!}) z2AYl`#8E4zgV~w>W?U~6Bxo|N1MCq8oCBP8_t1pZ-A@Q5;KkfP6!m>Wc?y#XXpk6* zvBJ9r{CSPoXcs8IsHP?xE`3+(>qyA#?d_vMM?^Z>WaLBT5o0z*NM8bWZkVzm6QWu2 zN*LvVPZU@MC35p6M6-DOQ}d=-5PS zy2W~;z*C$(O$wU$Wi!B!&?-WZ28YcqjVzL7qC|Kh9jHeTo5n=awJ^5w@e z{LTPn-xTn*z!Np`x(JXI>~t8b9(9`@0A9H6m7E_bQ4Y@;f`G04~VtmJjM^kV8mv>(wtAr;H?7& z7lFWv90+B*J6kcX`jCpT1X$s_i}FtV3&n$Mj_aqdeTaD^E*R9#8ZyR3#`*wh?Iyv2 zT14WmVLDhsu%@7Ch@h{Al%Wh@E0i6$i?9K)6(9PfRtuWuC9Gu19QFtYgHcow@{GVY z%}z_0T#on*4t5AqrXY?(ALgZ#u*#fNvr-e|0SCWT+-GRMSVW`CeST-K2{;gnRX8xk zEHvnrk>*)AOBH;B)$lD=fybn-5qy?D49CPPS1{t7limqO_kMn7yeL&u#|v}oslS71 zHSkU9|Wc{T4Bq+KpVi8jBWsO#LF?J1)SwOZXrYj56cjP5`9ugQqZIl z0jB(h@N=+jY!=(1=DDKLpc2?aG7lE>VonUV7OqHu$uAXz@Wh<4<2 z*E#Avd*=f_fj!yxd2@;krztBg`qZF|>dE3pU6uu{b4?jG(S+3pB*NDk!eTY{iuz2m z?vo_mS}!e_;9UNl-OT7w+~YOpF(77~-A{un)UWpYM=+1?f^W{NF?c1;iS9JWKham4 zW;ywXT{h47?YPG?%ZD~{$QjcZ__13{3zVmunHn^Gjd6MrpOOvwGq54H*_W9M_QaY! zyePxRE-u$nfrAaBICju-QzLQ6??W0^3v*ALzrzpe)pHsm{5+GwJq=a3mc_vcne}z& zimjGgk+q~kyUNTTX^V5%J}UIG`@+{|`gvLjY<-@-Xb>=X><<2zUP?Dln(k?b5gNNL z6f;S@RtTkUt!WGmvdA+LUg^99d~rMT@ilakL*OMuL%E$vlCWA+4J-u1P)lxe^P3u zS8=jApRF_}ne?%>+TKhr?R}hnvGyi7a`GrHdI(8w;{3f={_F5VVp$pclIJU+;*AiM z#gEuNanmyA`q{}|p6l6tLpV8EGy38w;?H#AQ&b*H1(R#Hyq|v7dRkE{+FwmqcZdoF zL)O(yApL;4=3x-N3mthcU5D0ui>S08H1DSEA|}s_`x-0cCd#7*Hkj^>dkrJ`&DL(0 zUBFH72xtdFd!RKv7ItacuSKFAEgHE7<9i~=3_Wj0}SKr zIGSvqzP)ZDoC|G{xcm5jWTI$Fx=nvSP?+}kuRhQD5C{Z=4s0`VhTA;2gbBy_G#Sq` zO%Ta~BhKzh9YWKBx)oT3whoWN@wr(*9yfX7&6q{Wt%yE)+_cDqtJzfN)70FH9(2ef zpCK<}A3TL0aYD_llf>;aDe#m!BI2OJ0dAW*-*L;R1rojvX->jD3vrz zL#wugiD3!hmKsV%BRobcTRHm18)k#QK|s9||xdSwAVP_AgD{03Fo@P!b^y8IOM$_ED8U8DxXrN!O3c?+t45ZyA=F8Q4;4ngh z_BicfUX4=;1ZL0+iW)I@#>~58B(lZor8HN4jjm=w-Z1&9uHz1*N>p3~Nt)83QK?{m zj4!bvnqR!lVIroM%>4p$d&Hh=%sAOvpZN))0@`c?aAjCnOfR-CB?4ul(X~X~yI6L# zp)F{Srd7-v@gLYZOlu8T)fd0S){P z!cl03>vHrh%wFLQ!7Y9L8NG49Pbd8@!E;N z8`v2%A_k4Vj|8l_%{=+euj5AC6nA}yfeAECIxgCUz);?jDD+)$&fpC@_@-;npC&Y- z%(%Iaa9zoQoSc*x^;u8;@riRgG&!>lL}m|hA^gHK1{707Hq;(eNE zU%8%i=cHejow+hV>x(?D&tu@L$!ZZzz9HIQ3kacK!zS_iAQPtAWreK`^MF%oQf3;k zS^r(b$}yMW?c416?jNP$5(g|a)C1}&0xvbpG^y?3E@Oa?NJQlMC{p_QDMVY~V3m2R zd43n#(<72+X^yraFzv%k&HR2Jb(Q& zg#hZ0L|pl4|LfSD@1`52n;<<1g0P{DbL1YIE0Y4S_~uW86S^g$CI&Y#Sr_8rYqvN6f|P{4z8rOfU~z{>5{DZNBB$l>1Zzf z0pXTsQw!nLi~Ap@w=TUo`v~R)6*1(Uo92qJ7+-Zl?~L7kB2;m5nv5kx}Jmj*oHjS}DEGrt8Zv3I7qy>wbEZ zdarJ;r_F)R##Y7uqOzwj!YKtg5 zd|JtIGGf;Y^szA+3kT1!dt{;=3dp{W2C=3B{})Pz_YiQoX_D#F3`;GqL&j4x*lGA( zRJQNRAKrms`UnBvUtWUt-~d_tc%Ggf1oGh(t~t~F^h!TllDIHEpWX1wqzBFO+jX1& z<~vQ<51)w>I^eaPZh+l1svR+g&XqJNip`O4+AB6+Ag|^y4@=esdWLq4+U}XaODzuC z%Byac;yHN&RxssiZy0?}!I->9m`PHOW|#YHBo>$0C=>mvFxW#2hfODIVuy%?V@!YW zw3m+2${7+Z*+2m$=%*W#@bTj&$%7jTb;DOdYs@;;xF`(JzJU8Mrm|2;-Lv$doT$%?%IM=8jH+q8@E((#yPGb<2Q4{s~1H?wQQ%-h*d z`sk*(%t5T?Vi7ga0XnIY_MBWt1M@$V;%xPkCpTgt%Nw-#ecD>MIW^jm<^ zZdw-Kr+qoOI=P=iH#V-5fynQLL;gt_mKKkrNE@KwiSOyLqxg7t=H8i~0Z(3=)MbmZ zchqn};Eb^M*5E_d4faN8zt7{}tUhR>FV)4Ql6l}npxTqf%S@ZpA|m7%%`j7#ThTCP zCG!$K7(TFD?J`zCjP<_YLX3lb1<94;CE)%t+E(~PqhS3H;)#rGxb_%dp&bSM^x=@7 zax3N4n0NK^m(a3d{vvQ16L@QgKr{w1-s2L@n0mX488DinjF)tyj}UZ#d7=1FRhtSP zvIN6bZ4)sOf3ig^UHYMIK-6%WH)y;94JoBd%zd@xt~38Sz@|fV&tv$weS~V%PT!dG zAfSGpJfJLL!V1NJE`sX_{?J>+?-S4Ab<}ug;KDDME)ALpQZ*r#P7e_5l{A-~N{*59 zW->zYgjtY%Pv%98qvLV1P)KJLgk&ExPmguHJo_grlJ;>R0$i9Cf+y&db1MS3G02W{ z2SHx}Sj@A2gvOEMNn6yUyeQ@U&?j4%UfWj63Or$Kgs(U3lH(&!6oGr-ugG>nFTKu_ z?Xujw?34Qe2WcZfA(Ql`Mq6$BBKW;dAFr_Q>cd+FTr3B=WOPuYiH6|L!rh}wbNJ`s zg^DZ7JJ1T&ls3YtW7ex40&2}|1%K6b4uO|G1s&sylWPQ}vOhfLP-h>!XygB@y@HHl ziJ%M!G!=x#@bZiWFV;8LI1i(G9qY^hLo1(JI$t93)QW#z(6fMmx*T_ixblO+x6$e7 zrnM5PzDy{IK^4sci6ZkUCTJPS``Vqow6MeJT}+aPw@4=QK--wM}h zPeyR2viufYvhcwot(WemT$+Qz>`M(L(=elf3IC}hanrFQz^JepXb z^fwN_!h#B0!<>99X8oM53q=>89uAX^aiZD7$6i_U7V0bF)1guQ`)R)R=hO$&GyYu` zx+9=J&%%de+8AYgft6*Tvi4OFg3Z_eTbR0cc!!Nn>6^6eG`Rq`_8aNTKYAI{7*BNZ z>2P^B-F;<)MKfcd%wOhYY<`;vUWfXmQM1g`yVuiFz~?6KPI~1Hkt;1d>55|n@Q!N zUBrq$-R{#Kd_GrFusS5ywoSJyHWvGslyZVKVsY;i`Q}W9r`%i_H_cR@b`qZzDBVU- z^(ubZ{QcO;yZ}Xk*{cV`w19Bx4!80Z!8Y%)uir%TYe}<|N0cpmHI4jt_$d2TixW~n zdI%#bBld__$?-fr2?VA$@lW#JC&}ylW-Au2LJqQdlKp&Wac)oyoi1a|b|R2~*o48@ zD^TbVnySvaWCXbBG~$>a`9UOwKj)=j#Ca6oWQYw6KNM(%zZ`fh_X{V9>)3pGg9o8a z#HLxHzyPz}Z3Is(PT;$2KpLKEmvBJuT1%b*T95?-#FRK(!n;z;u@alyE>6F7zFx+h z#EnW1&3WWo!PhB>3jahr@1tS6-zHufn;uW_ImeG?xi*a0a2t?F|6fJsiYN8xoZ)i` zY71{T2TxSN)TN2G_949F@pup67(Rh;VNhk0x;CW8Ot5HsV{qQw22!U9H6(wUFRh z4&V_AuMqwK>#(sJMLCqxu1XT{~)n-v( z%1_z@F7X~6n^X?GvLXX^KDV9T8nVwAD43$HW#&==L3U@*4vnHJf~<0V9f8CfA;|Wf zB7B{Jo(fo36mXs&lOEGXBo2%*w2XyzuKr~-a}Z385SW$WtuR!xRbzxqnr|hNS>a@mJ@ID$9~p1V2d&*11i2qPc@5Hs9e%y7@V=_eeZwmaR0C{|Q z=N!4B546Y9wcv=U zZ&IcL?a<`nnf;3OiUC%LE>3HW<>`%Zin;50#dCv5HRzWn`nn!EW6uLx6<;~0Z|u^Pf$c8k~yof z0Ok?#9@W;2d_OAJdCwsRFiiWy`{|o=|4GcgziIz77$_LIX`z=B#DfY>`~7|lMVQ$5 z4|69_=T&{dJ*SH7!Iw%0X&oo69}itHe!G&S_fl~?eZBM+lijmH3GCSfDNOe{b@?g2 zyl#xJ+0Lmb*x(}fa&6Ri^0>!=M4yTDM^h5n1%p}Q~g04GA`BlSTA)oc@ z*XcJYx!Ah!?UnY^U(Nq0ZIyS@*V}KVbsX70uC~`KJYoT39Wh$a(fYMCxcZM$^Zg$) z@3&wMF;lF3hslFb3l(P0t03m6OUBxeFA&aTOK~yuo73_VCyb?Q~P{t zxMrAxT{IEG`7L#oV5sswQt9WC+;lI}6*2k*nt;KLaqSQhu#5>N4@YY_3x;`rR!L?? z=s)e`5N)D`^@HUS=85<=;l_jP%|mLPG^&mM{hmzHbXK0V5z^_v-XI4LP1L)ER@eV| zrILQM2=0NSpkUz8v*`Cj+w%P3Q-wfmxF<-Za7vtWrIOu!SaRHT!W$+Yo)AN86rc~F z6+P%;TWu*eXdF@pNy|-VByX9?*yCZcn^;51`@RfwzRA6?Z8Td(UEf2ltuos?2lz^5 zKYQ)UwY0kM<+S|5%jw0!d$hBS76sc0%uphto0?QKtBphNduUnMaD@n(JH|$ce!8hs z01-k6wNbdv1b1#6*r2d+FxlP;n`a|9I#ntqd={~hEAm~y2g`^uD+DU4jEPdWhL#Bq znzwd;p4!JdsnBL)%ZYpwO|JqQ6_h~ODNg2SR=ic5O%9jO1Tg=zS|2`!c>(eT3cTqp z2M9!>L;QA(Wx^P(6gSVOn^(p;>Y((v$V)i=&NoR9t65m)!jaQA#f$8%{CoP@lUzUS zeV@7c$~U(D4sAvR5Nf#zgJ$_SE#iMR@=W?^%gSm$FrFO_rE1{hGCkbtYKAMc)3FTRa035$#xi+;NYJ^G3yP10%M?-S{H4xb`?kSbGYrOt)rc_gYbx;SqQbp9QY-I85u2Id6v34 zPN~3E`>ggBcX6V`O~#F~YojHEWQ)9;QNrjqozt>Hy}L4w_7|}>TqeMSeAaHOLw>ac zi5e-*qN0+H%E6f$0v3Y_D40V>6gdk~#i`fmg8+@bD2SqP>ZK9*RIL!4W`Vjec@5eJ z9*|$!Wd9pF%SV;#5G$j~Xf=&-@_vMC4-6o2i6@VSi6fxO=$b~J^>Ed)EZFZHPo$HPJ>JNw zm49hc6z2~aJf;~E>J={(QdO~(D$_RSi#29=#W>TR7_+!(8IA>S@jK7sw+AF@-NrX< zGMEO)fS}012M3g>X8$SuY>IK7+&Z6*OmJDD9o;TD{~Z@JiI0#*DmT5$p-Ppg<}Iuu zEF%Pg)aQC>a}_Q*Cn87yb4-m`euiKSgT~MX?toLrR4@W}svLA+%^Q(EkAc}dhoI#S z^;3-HO()AySU4oI=oXPgH!u&~2jF(uUAcV9|hKYSyt4OU|_W;Y+l)X{t{Bx+e*eIFkay>F-0L5r~8|A6se6NK=p zT)e?|c2Z;hb0$UHKKsWxmkTBzoJE=|7BR1}EH{sJw74pG7q?&b&t$tgEga)2VVR`t zi9b&7<|$h zY~uQa7%!y1M_Bc%qb>ZV?WK<^w_sRVh$m8WSd3_Q-qxuqp3NnlP!7KYQ=z)P3;Jo| z2Ac+xe!LWVo$2&x#;0=SW>E_FEK&!Qcx$1Q-rFsuuRKaHHPZmml+%w9cJ){f;xzNT%x7Qyt@;(cmc_|~xv$#hr+SAYTQ}8$eKds`-RWRe+F^s)M2gA#N zhrcZDNAumxa1qybGB!Wth4CH+Ly!s9*{Q@jmRa)1kKn%u4P`esv2g-#GPs;+Lf~As z)iIwHxvpin=(ihC_HU6qMAQ+r|AeYbV!(=wkvGd0KNQqRCkGt19TB-~i+-$M!L;c5 zi|`{vH^ymnuYEJ!Ccg%6q zvH>pu2jLTD?~{J2jiqJ=%3*P^*kj{brtFHoxS*B#fa!5)b%al!<-*nU#>&^yeKwE3 zy!&G!hHcaCerm|9X|4tB9dTk$L#QQ2BK|tI!P!ZG_^-B!PX}mzjM(g^A`ZDH`E$_t zQ@+AI*GT_37RhG}kZTAiyVSv!C*a%xpPlT|P)9|l5MiPZ89k|@hUVm;tqd&FTdtn z?L<@bf0Ew-43-_5k?}O^?<6OgDb=yD<{V+j7j;bp6`_qro;(SsOj9JgHx2Ks{j4hr zX2!c{Gwmbb*+|#Q>uII7f`)zrEg-ZTIe%Sl4KNXpzb|6j3T{%Sfjm*$l*iSRY5yS8e_W$tU>WQLOOuT*l0Eku9KIkikis7(=^_iQy2+b zF~TVy!CUBqKIJ@U#eEkMuqm5neaTGptpZ&1*EdqQq`*z30}Jwf)|`w6WFh$17%p7p|}7r$9@WqA(QETy91ZCZ~rW**(YPWL%^tyzjx6N_l1a zpygVuRg*HFe|_N~psc=^E|>15mC_FQ1ZK^o43%bXKC_=XygW$1>U}?bqy8u9+l~J` zh}get|IcY}cpD-PaTasgyo~S%5i{j++5)m3p-%7GFMoSBL9RHJsg~blLHY@k=&VC@ zr%ek&e4WknyM@*4H0*2v@e4ASI?iTi?MZqNo+ppKAQuaTgY;_oeaN9?86tNM0agVf4RC z#Vl}`A-y4g?myaFta0KG-S`TO{g-xd)(j(b0AAm$-A;$h>yN58(|ffK@K3XcA2|$j znD1(VDF8FMc>3>YC;Q;y{Q%BSD)O+w&u5W5byE}TStNOnU1;`4m=iv!z{__xU>^T= ziL-i|r(iHo!jAoIx`nyqU6`vI_(2Q)EYC3c_LV($=YNNz;0}!PEb}>&{kQIC&gAJU z&K*t(#yVq?fn4lxygx}?#%7A2@jdm(DH3JzTD(8`ozI9af4?-e)Vz5LJg)RY>VVna90V_5d&nGNs&$CQ_py~T}_i+G&1mv&~_ z8ON!A%wY($?f$*n>Bh@ACxLAb7=yJ^;4lYYC}haH3;)zoO!LqHH_ zRtUI`@zG_JH??kTLX9N z-3}W`1aH2Bl=)z=opyFNF-yOSONv)hd%l-$_TJ$%{}BH}ybE8{VUtusGb~^@)z^&n z(Gc(PQPafB0p_Im?2a}$q9*{0Y>Y)|k@DaxFx?xoIoB7E` zSiDdFMo83ZjyYv^iAXpAzcjK`u+fJ`xV5bCzR&X{p5>dP9OtH=&ECtOecJE9-wDp; zb;SIbAYOjYdR)~N*Ok%8nXll#3tvzu!~;O@XHfPvd0_F{P2~W7dY8v-G>ck}5BD`Y zJj68i5d1j8;dX6=StMnS*!U@2ImUE+AMHMIsEdh(0+kAUp8@Nfefl@j0lTJZnmmSZ z3b>c}IQg6!j&Gz%KfKP@IW=#^kq0bF^1OWK@7sUh^Y=vYg!y`QHO=x;W+b0EmI5X( zC3|Aqf_6=Q`lZ>ISP(+=xN-VUl;{Q=0f40`kin1-t*)Ik-(O72<5k2+xQO5|ra3_9 zD}ao~NM^N93oK-rV3qb;42#(qT*VU@G|vq-`SrcZyfdFL5yJ6T{EYneVuGhZr)?j< zrEghwegpBKW9D@XoMXPjg@^bE9Q5Dp?=k@=3RJG*_D1p@Xt|}RJr%<%h-gfKwocWBf-J5>qs#@AvSF3slW7Y)K?nMf+kPk~asj zmK1!x$y0_SK@EIRpQ&!$Wnp7&K8t^NsS0rw$LZ&TtaT05y$n2rBru9E(%^6%lGlEpvB!)e7r zO<~tZIE^@_{Tncz-#^|+Kkn|vq&&^}#kw+IbcW`Kb-&01M?w*(2%lC z<@}w)(h%M9UT8*#zpN#=w97c{f2sc>P&NwNF#@oCgd&F_^tubo->$uz-miTKj+SLPmZ?xI$>c8t5Ch6fmOq@Qgz?7$&?4PH1 zui=C1>x5!|(j@J^hcP9-I2gmpcz7-I=qKNho7dld``r8aGc}BS zme(73sjCb#U&Yb5b1I%Q$2w?76x>yjyf>XJH`?K|sRs(3@)i7yrii&jqDf{?svAT< z#UKXlfrD@k9~;cr5LzgkA!ofRPNcKGN!>tsm$vdZ<0!RSlmj{B#2g*#(~G_cN9yv~0z#{`wbit=w1hB>Xt_8zpP!pU;{s<6 z)E}BmG)D~$eS&q4phsvp*v~#1WW%}wvQ}#0Yh`8SG8zuo(%i~wnkT~U*3qwcXM>1< z_fol~085+@VizYK$DA;qCNLqt2>9jiNZZkc8DB?0!@R}>s(`;BM*)h-XrT#=@aqbi zO3TYv2*Pqr`Afis+JP0^1Oo@BnL3!5>4(gBw);ovz5pIr)9x^*XQ5Ib-rW&okRLVWyy99=@T5yMQs?Cam9KAXZSW>uHnUHmh2BD+aGoJ zrjt+3rhvfdErE7Xn$)XCmF>y}UwQjOW5stVL%N~yn&LaM8 zmn-x5z0w;N{`7=Vz=kr_2;!egYYM8325M*Rv3RO~sjNO}OV-v9f(OBwYJ2o!>UZ?X z-_Dl|zNh$dHlM%LZnHm87dJw?)-rf`8e)@?;Qpix^F|GmN_>ap)9FLaj6TLA9#{`0 z!PAF1-Q;%EWgis=0?Pt|)(Uy`Dys;-My&QK`-Bbx+76mwUFPx;rnhYbVTWk!@Aq~P z);gOjP($PtNLWNC1>n+=5d=egFEB^+jVjSD(uOdt@dLCzNzpFZGul~)?k^$b)GsQfSzrj? zEY&d!!s|WfL{w!#jwv4s<@|+!Q9_3GgXcYP*Z@QJS@{tPVW|VX=-?H_0}e=4fYJ8k zZ8EPk$A3PIrt&ZsaC7qn1+#8sf5pY@${f)+zNP%8(oI z@#^!(zkU!y?G+`DaD4jS@xM>6)&D~X-E#VB;~%A~m9HQO z`yhQV_qXZaf8+l^-5pN>Gp9M{wi!%URr|OFp~U!l-d)d>ngA$`YCb-V(rYkSzsKVK z0mV22v*bjnGx7e2$TSO$7ekFdpGJ8eza@=52$WOn7~=PX?q>QQxBe!*-P45Z*8{e; z^YN3A8Ks3H%nC}glIuq$&l+}MoZiD9*P{epjLreF{J$!r}qBHK`>80aq z>CMiU)76RK%X}<9=YwJjoablwjPKIcXOg1WbIN8RHsw!Z5#@)=mvFA(17{Hvp=+K< z^Ut_Ke^mZ%+Ck&14^y>S-cI-NIk8#UV#9Qt6o~DY5RPFUdW^Y|+IGQevpCK&+>g`G z2S&|RpEeuGsN`uM2gEWGC)~i}6#BS%fPq*b#i))H))|RrlG%*u3G@bf1lc@*en@8eE&ZAwnlYmp#gWJ{iQ5dx z?yhdKyQ^zo03?um-n_fM&*Pk%CvPrT3RQ)wMgV!w7ORJchlhu|hs@$d-_J|;al98c zo*w#@J!gw)`jd~NkVnij1Vg~6j@C?@g=rh#CQ=m5MRcY-L@+Qwz@gB}F;gyxT#Uv$ zt}^&pz(qbR)?|Qv_TG33PkeS!P`ZKFp4#L1I>)o`3`PAN}tZK;9v;c)bJY8wl288y~kp@pi<)M ze6VlN3L%sMevcYVSK@J*)_3#4G3Ge&ul!zKjT<<`FGh5nyoi|?ZY3r)L+s+%v9I2jK*Ak3Dp!W1v3CFuI#{`;H~%Aql& zQ`1JF&?KByV^Rja*Oav0-%X99O&r`WAqZFj+vh4(G`56od@b6ksbZ=9743Dr#F(+s zm3+7l^S$Z>#^>n6O0)oo8bOsH4Jf0W!cN-bH+!TC?@Dq%@IPw}CbYEFJ{jWwuf~ao zyMtcZMcB7l+hMbO8R6Fohd-@^R$Q9_Yq|-DsZeADyaxy~)Us6g1$NU!;~e8d)Dygn zE3e}V52rEpL=AbMjc4I=e2K3CPjgnVHtP!qUu0gqtJGz_ghzh1Z|4#@{w+I9Q^7gM zJu>xxK@%9M!M6-fF5_B5yih3S$&$V1B?MpB;Lq3q0Otm5iUUphVF<4_Ob5XAh_yox zVKCbPm;!>^(YG|tHVPkV<0dBA#&c#QPNXAm z$7+mAu4F*kLYQTLMIS~%fVY?v$<=b2&~r1fE(V2N`mwH2u0k+ROznf`3XSC5AYeJa zT=BTC;*9sw4?~no*$Ousv?pp{8@zMu(Dz+3y~XC?ReWWQpryKsD9r{Vlh8bq3m~Di zK@ge|KcdP33HGU19MED~VIxf?y3tfv>~Z)?56%5Hho8_4KZORib@e`i`B9b{=jR6F z^LK8bKsRn6TNlSWBLl%{ZmqH2X#Np8h>>7 z$LZVM{~Q|APS={RuuF0?jczgsZvERCF?g6tJIF>d?qMiApJIZuL{1ag_-R_&c@YgT zJVi|qx)dyyX`OhQX%}-wwU$_Y#<1p63GOy}bgtnN5dAF_>i?VNm(su5`axE?ti=7S z+_qWlU!vWz$R}BZvs)nDIUx2=(5U%3jxX23#PjU9@s4C%s_D-@Jr?VgL4t z1d=lOux)lZjv)G7c1~6};pJ-YUfMbE_RLDQ`GFo3u;&%|dY_-^pUicur z(7%Z(*YlzAwa95V#nb`g=>#tJK3$>CWV*PAUlw*Ubz1*vhZ&3jm9S<%jXl{W%zrms z8(j++s>3(%+yDST07*naRM|CvkqTG&jERE=#tWE2ZZ_|w+t7~pS|6mJb>B>P8+ZA} z30C|wul!FTl31LB@5Cc#Tg*r8B7=-}xmY)j%xM;U!RS&>2cb(T zC^LUG%VOM6rl&l)yX|-i+zGDwWJ614w_Kq4H5N=HVL#qvEY{_|;c}=~4qUy53wFo8 zf#yYrb`3e4M+9-ckHEb<_bLOFDV+*zr{p#v!T1KPGvR$C%{*sSj;|`ma1x|RpcI`%5S%E6jA1=r{2)F7S zWV5@2Rve4;-Tj^P;r-Xs+g*&fu8?Qz8tE|U8o$JKolP1}ckaIpA4I)^ld!Gl2Q`G; zU>l?DFotha8zG0BPHH(qi>B4!v|ojAX`N>`7QqzoMSoPS5gJ8k<_>GpWBXBKQg9I; zSJF89SmBH(QL5!3-MYm|aT|PpC!CfQWu1X@)+O`I)}*WxFm+LTBJj;Vu7u<4NXvtZ zb{(A0LiD?6k@JoByfZ{jQRm`$mbJC5Gtt6ug0MQfjYi)8kjEP$&M0)7aMSl7m>cRaKwUjKkXgfhi`UXa#^dX z7(!6+P?Jg9(Lgh?GvM$kHz(&oRJIE^2#->$#Nml_>5)rGzx1PxqmaXJ2dA8}cL z$Gyq^%-Rzfg4E~N>y%+a!6A#owSQK=`a?3EgCl4Tn@2+)Vpa&^52f?{9 znufTdPxmKB={ADMWz2kEI6Of3)j=z+9czXre(D_GHO7~4*K|t3iFJts1B&X07AW_& zE2_+Wsr7XNU&_i=u4YkWQ+Zq579SMP*avozW{=`Sc#je!=O#X#-Gkrc%x18wCh${y z&STTzqn|3#K`o&HYdGDS#O9Z4I)oupg#LB@umFYM$n?U)1SY^FmIKFUk6*wmyl>{pZ2?3e) zcf2qrf@)LG7BnU1E;Taw$4CqOG-_lLH~bXs6&AM9t3%(bbBcYguuJV{BM%u{x*l-O z(bRNAWQ_3Rn*#jmc;MyPf#~+Fvx)V0PSe~let5YG0bcLnj}Hy6{WxKrnJC&udEljE zbfF8(sfTnm3zf-Gq@)r0TDPYrU#83VuQOFC@X8?u9S|Dh410{zL{CgPGccy|RLSsSbgrQO_^a)!Xh^lw zR+#slH7L)_>4KLJ%A*|Q7i94WpV&J|tDKzj(1GG*D>s?2bu_;QY#$VIi|BEfiCRm6 zO_0A6$!ygg9-KH0!F0k#-EQ4Q@U@Mw<&AU& zLG5#cYw2bmr>VnrPLf%KP&-LFFMoP4vaa*NlWCsjApMCsl1d75mYTp)FB8^#nyvs& zH8Q@~e-$Q-okN`SZgH}vnqNO}zl}ihZo1pt#M!25_iE<~M9nV$6n42oh&F#PM%DObe)HE{+xfRApUp=X1=l&|`7$Oc_t|xm8G4+c z`ubNeTl~ZKa~rqV#ZzPECd`wdd(>>L{~-+6`-=!l&Z&^^MJG>d)aOKb5L`97@LWV{ z@f<$B-dK{pe7eCpU!~7qKnUm&=buK%j@d0a3sdSk7>2&O(98@rwUqk|8Un*jKR6OP z9W{NxNtkN|B!)O83(a0uZ!pBns0fi1US+`(n?(p-G+}AD0A`^*L_1frKLs>0=n7d} zOuAN`GH)pK(y4CEI^mBf4LCVkX1WOT8ZqVj@@mXjqq!B&?B<^>97NmAz|JB+ZcAo9GUnN@Cxc5QWAl%;`Q!jBraRX-jqS2Wv_ltmSp<0^p?;ZkSnmR) zjSB_^ko%Y@YInyY$C9vW%Q+s>k;-QbztMdZ@|fVwoG4H z2M99N!V42XHY7K$ErAQ;bmtBNHUt`Xx9+4Lj((JeJDv2(>Q~WKZwU@`b7Q( zoXme(&ZBlNAsj2S5kr;urgm2$FyV97LSb<$4w{GQfZH*@R4ijTyFx5k_txR0Ca7bElu$ ztE05j9wCrB+1`Mc%?zM%L;Xsj7DOUIX4@lF;siX(XFf(fi@ZZ%#=DqVE1)!wS>8U7 zKJqDOS(M|uO1=?|I+gD-u2M|_Z5H%j7Su6z0BqJg^1f)Nz-I0r0EqjeLpIp=n5Qrq zW7U5NzG88*1Rrn(x(B5~z}Q(>NL|M25L_J+Z$v*2n41-_>vgF%oIx@1Ua;8%A7MsEB^?7 zd24c*?t{Ji`2Ongu0Q33NMS*Hr}W)eU#i_!MtOhZUP^%7U>Q&Uxa1@haR+Z#5a3U zA8TYw8%al%xR8hLTDU?Ry7*8W%GdhLcR{AnreGKn%ze=MT%W9_7Y8q-?;L(Be6RJOr=9?*$>TOcWA}XTVH&x| zg2Zr$=79$iEDX1C4E!eJoQ1tiHF@sCbiBXZOoul(-E>LDt;W&Jwe)Xp*3*ldwe(9n z$Mv`fb~0)*85aiPNEe@jVR@O8;O<`L`N9Nef3z5T8CQvEl?9JSmS2V`3nqsT530s| zuI6$Q1CVWDn z2|)bil@&}4H9158M)_@=EG@4rrps4W@f!p^xwD(K!RXVKwY9Xgh=v=QVLePp!8wD(q57r=uBaAKUcw#_Pd6|#M01`6{@v-j zX?O8`7BSmk*a`xF+EaqW_LiXnhZqv9a^)Lb9_es;TxieF=*b#d-!F5F*IoQ19pdmB zyJnVL#Lt|EC)+7}aJ)AZ#wxr);8n9@i5Y&G$X<%8|0T^qmuc$;+BR*1Go37!hv0_R!*JC>;8 zRv~mFap0$?VS19MyrN@z$l_l|;ck4icn(dyKBsjeP_wOSJlcwKEJp?S@!2ZQ0?*ZO z@~QYT7pIILygB$k8}sFd@@l$D+Q^&dY8l>FpG`Ugo!^T13ZTQ)0B{mh4cUL6{V!~J z8~rA}@k~4+aKx?^ibxz5kPrwXU_cYGgAdUK;JY|#rL_^7lAOv_$84_&&Ndk53!3Z$ zUxSNsee#XeL?jRE>feQdx;n0Hhno(7i2R^|ybC`{WT4xOK^uRi9fahZmgq^I`rYHC z0_Z=&4}>I7Bm^+yCubM9DAwg*-Krtl=R2YtYs))Jf!2PE_z^4MFq=~g9Ctlp6!A~l zNfS-Zwc2$~pvUx?6bff_WKm;25)%|IjbMs)IgPdt4O2*_#YB&d%^@e!uJ5pcFxulC zR|dBJ*!qnBAp*mrWzGH-j^QE#qO!?6d4uoake^D#yhT+lu?)6XJUZlW1iX<86oeTBEu_V(J9(FEmWDxDtHA_Lzni|4ekF~*$^!MvFQx9? z*MY@-qB)Jpm^*OT0h_$bbBw527U$hKyq@D|-tTrUr5`ZqKWu12!Ct*IOy9mf;Am<3 zh~gw_i>P5RPn}!3e9#e$tOEH51<47U@M%XICh|AQA)M&0DiQ<! zS}|$4SZ2a;=eXg{m-TcDhi>z|&!6?+q;EJDfGxqSF7wv+{Mu%Av3z2`HehbQ4bi&m zp#e5VjJweI`_$o9eiJbF^&N6GIYi#<2mRfLP*+{u~?=SKRN zKZLLgnwsZ0ei1GxRPppMCV-d&LZHQ>4QL_gU}4J~T7$7JHL}$Bktv6#$6pjX(Pl@` z{KAv@>g@PBcZSIW@5TEty&;~$h%wx-8bkW5F`jul%XqVT5LcgMJx=s;k#0~U?b4OYEIbj^?Cgdai~~$Osbn2-UHnjp4GOr4 z;3Wee5l;LViLt7t5rQ=R3@KO){3GKqQzDA6DgK*j6k0lT<_Q5IS~u*fYf93^L41o- z_!>jpjk73OK>#&gUZXEC4oQS@oh1!t!<^B{`B>< zyL*`Sc6#ah#`7%N&>z5ge;+f&?9+=~?bWrkOxo2I9oe_jB22%V{aV-@vAed^T}?lH z?I&rtjn)c+&{w~BHGS>ItEqN)DZP95o%F$`3!?!769^HDtmuE>HBY=^F$!Fxtq=~i zWIQR(1qR~JpkIEoPdm-!bgBMa+F5;t`l3zPy`S3fGKUTENF2^Wx;Bf>4wVf!>bV|s zy(U^10^hv+jlP8TvDRY|JWelwlgkI&_!MQqPn=W1en%jt0Larj5%Z)aaOTQdCtWEt zQNNfrACTnwvgATfmMV7h@52o&@dz51X zs87aM7u-=B+QoZd5~sS6{{(@VDkKBnvFSTph6kiy?8y@vRz;nv7jhy4;dMG#8moIC zNH`~spGv3q-j`{-j}C~NNBgmT(hp%EUG!=@#8blz&c$;XU!g*sKC;{}p`|upCw|Vd z*_V#TYaEEv;B@!}R@jTo4cmP;EiYlJe2D|Q7O`;Q4?enyB(dSHH&{1jejkAIW73Eh zZe)sA3R}eyV@Xd-IOSSeQ(}RTYC(c7x!>X=LVjq80vrXFij5U`btt2a1WD81Ja&O* z0Mr8bbCQqrmVR7{B|Yg7dy_@uVlXM~QiwfH> z0)PhdUo4ApTEGNUh&H%+Bi$N6vqs=?4?Gy5A;#1JR%Uu4_)OLY=cJvwIhZn^XVYsi zgK~77Z+mh*+UXvGuG@>PbdS9D5!yLL&WGF;1rUNB&c`@@j?F0WdUe=JJFJ7l9RW#W z$x^f_Vv}j+t}6vH1_CZb2)cBf?U{GR3EY_bDyj{E`Bk(mU&fsAW_RtGz-tahpSi~Y z1AiAsk~b@6d{0Xw4kq`~Py7Eqz1sOT1|9?or*U0QsvCQ}ESeQE4b@OCpl6F|Mu#kd zPU~ZuQDj>Q4v}V^6N>KtL0bD?zRZb3_$0WE)4?T<)*k*_RQ1143@AkeNuJBblX(;$ zPhW^8v$fGw&Hll`9mX3gqx97)qx2>Iu68qH6Dgxe+i+1(Fn7`S<8Ng&=t2Z9EJ{!sFxAf^@VXAsyw|-FMEJZkv-b$IWG3@@+OxOh zhCdN@RR8wsm(uOk8|jVJm(mtOG$-h1L5w_!8DgqF3;HYo4iKu`ZF9=w`1Q0lUJK#Z zt^SR;t|JT+t2ECu&65m{!IkZ$^wt0HN_ywZxAFVIZdqrTF5g*8&%b`-;qc)}R_GJT zOE{i@C2&xK_qT**J>S2T ze)s-wq}txw_{RC)Nq3LO8z*=2aIhK`dee+q+?ts6Bihg zSQm4eEh-dhX>!xiM3Ds(@f27!;4Vvzkv0qc8bXB$rcK!dP7^yds1`y{*1~Vi7~xs( z2!R3&{sICN1zzr8>twydV!^V7j^)Q9$9pR4pXHW+B`|y++7rdM^8I;N(;NCD1Ox17 zmkZeyP8(c7z%$0ddp5VsjB~Y*Oc`Q_;OIpxUU1(~o8ycSV}*9;>%>O+>;gvfLr=G3 zT@%54&8iH>n=#>hAPb6dM6%Gu%xrtmPDefbisCL|0v&M?q=}+IVb|`?4!iJZ97W$D zc+qZ=dwHZ5i!y~+i`~VvwY`$NYYgVj5!-Fq8cV&RT(OKrDs zE)C2)wNaBYoj;Q)L5?Ed1`o+Q&8FJ?tRWO@lU_lRS`()NFldTzjFY9-AYH~e;07$- zDypq0c?3MX*uPC2W_zfPIuazIO#XQ-HWrMp;7|e8ML|4?{$S%ss2G4C{?Wxb#)^1B z6Q`K|js}=$>3q7PXp56Fn3nDiqjbB95uuS^#3+O%pDej8(8YBvsnZQj&UXVqT;cU(Zpw@!FnrfA}KVb z%$R=lW7*GqG+w+DABYz1g;+FT%-nQ3f=~2x?iMh*(gUxJKv`YphR}4y#D9p^v#^V- z*+ftXHbMZV5PyOO*r>UXc5oGOo0GKn_@2jxw7)1jQs#%K4?ngd>Q#QLUj0B~O(9nh zM(&~LqV_4|FNCot`B);Kkx{w1Rn7LS`;vYbKQ{O(k*uq5xsHbF3oW!R@pbuN%2mnW z+3nK@fxYQs`oZX%>~8HMoVW|_WW@gTYsv(Gn(6HkyO85eR0?mVwS%kaxBlk8gFuYZ z&u{(>w!f;O=K4Buv@quijS41<786Ccznre@KA)bu$7y!&va7lO*Dy9ZH_by)hOSY+ z1Yvt2b>7o|lmw3nq_q}s7*B3P7$H;ymy1&zrMg>LrjHL7{5YX4KE#sdGPW(hach8P z7e`7X&F-*($Th5Ho|&}jn^X4>*>z>KV9<(8+Cf#z94(6-M4Zra*wG^gYkwC)_$Tf4 z^gNE`Hy17o)d%DQNp0X$1p(O<0vpu1l48sv>Ya^mr4O#Wh<3~~vQq)gsoQdL3LJu& zK~X)_)%S6zey6>ee%gIKT^e0VnmfLL@asmO1v@fpwj(0?$`gmc{Rqsm?oNr}-9ZPMhErkaNO#grul8Pn!CFl_ z>Hnrj?-HYA{d3a%OJc^yO6^v9Vf=i0Yw@-84$SK}_GGN8@lM|dzA#Ql;EY-oZMLD$ z8nt~?`{PtOYVV%CwAZx$$&CDLrIG&it(1Nb()pzWv>@D3K3&|oZ;X4uT5}&Ju-vIT zH!>ewmaejJJI`UgX=NVQ(6YIOpy|y|(T~?v%KzTh7<0{9`t!@|isCaZZCb-mHVCsB zoq&0TrHl%QqY8U8foLF&k(my>MYEuVlXFi4)YQ-gmM~Rerip;SMV;o1Z3I|4qsK@L zEg}3PAt*C$13^{;VVO*_nnK2x0cWjDIs|ECBxvy320vqlQR6i3rTgkZF>gZ6>Wnq3szcbl7A zX>ad@oFf*4t7xg*xc_prV~m!=6k%rwM?paA+VJEx(gs{P&gaaqFh6aA<{fdFjh9} zEYkUWM#2yd$Jhf_(wZ~k#H*s*l3w`ExQ@VCzD1l|Tjf^e=r?lCIfPg7n4k4Y8{v$0 z*+14uZ5`>bYnDk!2-T)Tn1E1ejM~v5TgM!aleWRf?zoN9`xS7r&D`F>^D-L<^(D-c zxiXiu(5O>+Ng+xTQ{w>wcC!rMVwx$3phO(Dz$i4XR&UIYnxeIS6L$oRt;03=MKoYr zJ;ouM5{C$@(exTM!2>o?a|@$&@e|MVcQwplSAF?u-7|(u!FH|!V-EgPxMvjx=r3KW zMrk$Z5dL(?b6cHtbDX&ZU)$uS23^tj*#R0|`|xayG4vT;ZJARfM>VWuYPU0#BOSHM7&RA^3J+tLT-fGG{X4-N28w;>v;D_TOK{2*I04If^7gZ2f! zGOv58C_0*<93dTdt{S5y4F$yd2F)+%p*$Pa3X^8BLhV^cjL?#4G8f5nI)%YjU;i^ENwDxK2@>J`@x8P$U1qNt4 zefiyAO_vUCq}QMS>$J7>Aq)-Y#|)814>@9c|0aVBKO@~O1YOUirK9z<3iGtoXP2?} zQyj1U1p}l?#2kL?Nb?7kC|Yd{c2AmLCeLrD-uRDr&nRYaOEej-duKjTFaFUfK9!K`}A7{1TW z?!Cngj-y^?Lb(`bG8|Kw)LI+1^@!Cu&nE&5*+3?mICXYso`r{lfEZf_C-@M4d0aM* zf8Xb{tsk?%xIWlOUpjg@ec|wBcAMEXd_pw9@zR2cQt-umm1n%NGS9BZ5QxEka(p;v zw{RZZ>2=cXB~G})-(HPHi1a7uQJ(6L^Qhd}=NqF}(|@)1U!?yNpA!46@AF1M{rN%l z_-hDbmT{o_ox^XWZ}q;8bAER+tCUDn%wc+`^?rK2^9DGy7Y8To*VzSR0VH!}**fQb zZLOZ}!G!rRhZfIqSlLdY_f#MM5v*OS-ZC4F6$1pBFUMSdp{( z6oRFCQsj#q4xsb=pDi`gpIpP?xO*(0LQv~arqupbcu_+eH_Typr_*wU7;0|mK$Zw{ zs?cTxEIdrKhbL$_qcd?qT4R$!IgPWIf)};A6r3sGGPw(49jbdD3u3<`5y+-qC5@sc zrZJZP%@9G23v$z(SZG>-BKYk*rq%b=#cY`-u9Ts3T*Vl}9zLq@g@t8C`0P?kNQPI& zI%pRO%7u0?|7K8`qskUU1M^QV!g^DVY4RGFHZd20Vl2+{Q%fGCqlx(~I}Ml)A&Ew70aK#^HYuD#2n-A4g3#DVw2ck(;Z)>+=3{ zsZJTgqqkV}<3kk9V1*XeA#rHXw1&wzHrO@l(sS2N2h8aT0$J(@O&U$_P&<2-MHNn_ zb;CSm0Xl_Fg-Lz%hH$41#UJvmfUkb5Ho8mcO|<6@<;NHU4HjAov=AGHmVqXWH*hPv z#tE4w7sG)n_x7qIBtR4WUgEN;&k8yASzqSg9rKLw1-z;TTjQ9{r*&(r2Ai<-Zlh+o2?yc(IhDX0(AYp#Y6Nco=oavTR; zDV|I1EyvJ_Kpo5SLIZ?ysboY~sl)(w*#~YOc@(a457pi+DoQ|KgDInSCkM zkUl8f=;7n@9s;lhgkP&{gdy(5>~WRxgXoJsV!HqfQUo7p2sJsFLYg&ZgTd1RVZ#tw zYOaQ06T0BRB4r4f4)q_CugL7Lf}0Nh3A#~1=#MlRAq*GVmFpSxEi;LFS8$_z{}f!U zTF`%QieTNLJT^DlYm?|y8&^h&Yk@i^-ua-}r+uY~?i5X|5pE~g&mcRr-~hbiq~~{Y zXALxlFxi_W-dMi>90&GI6PmMtF!Sd473gA@e)@nkjwR>5>?c+YVJ($3i(RC`L+2^R z-x|F2YV#_Zsh7bCMjHZe>?z4v`VNj!mm}2l?lQos58sdBLU}!h`4n z1K;)OC77gDXm|t7?Ezy!;gX=So?eFtfI^#-drzTMqi{Ozaat?uUG{7b$wxsPva~ow z5_w?oJpoJJIaZ`(UFbuHImvd2UjoRsNoNd3ZGxvA<`p%AJ7j3e&|#)c_Efk7;F_O` zguds`^cMyDqU?|J;+fA9IbBth$SK7iu03;Fe;%wpL+?>*WT4z0-$1gwmad`YrMmK` zA4FLNa(yPyof!f*+3C2k`D(hleG@_h5eNBorYXbNgsCx%5P}&m9NPyog_`&WGuj z_r4wiurW+(o=6_6s1u0-t)F$<379<0!-%ahX_>oT!IV9eR-6lbeErm(PcIK%Nq@NW zhiNicgW36GiaeXX*fnjKj<4^w(`xH?(yt%=XJ`PzOsnBDp9W^eQz;MdEz!gA{w9w2 z-)q00-bDy@*nqLBZ>LR8soZ99xrx`&H}Pxn+qf`%sHzu0k?FaK8RoBUP12uTMN}7D zd1hq%c4jNid^rbY(%RNfE4YJv4UWy7%^tk4hZBrS4)4>d6y?f#yI}LU_h38`Bpe(Z z;^T@%4*e}?W#Z#hDdMRS;3C2WrChM4Up0M~fTS*NkI*QH6aGjzV@hGZv(sc>GOy!* zCVpdZ*wTdsE4+PUIKG{UATK{mw8#fT3-_uyRoRS@6gz=UBWG! zKdGrT#B>Cnh{cFbiwE?hxS^2Cg_HbDz(Rt~e;ck~GaO&@!aDnX#G=TBx;y?gG!r_b ztLge4C-&_lFe)T4<|ohAz&z8V-;weGa~6=XSOOk+?5r#4UZ!j`!}^$>I&U@cXVj?S zu^!<}XbBU=*m1}K{#emRi3Qnog2zvu>k7!=A}NU^SqsMarCFhZ5c5));c1kM_@et7 zg0p&enr;s{aO7y1?j0T=c;h58G{i2!v)x$fq*YWPXf1UTLIT4ezm)F_tctg@&-rQY zw@f?t`hkxp(}my#*i)gRPPD=5T*P{jh0Ry9&$e6tA+XS2YPrck4@XVR`b6M)5O71F z#U|uvy3Zf|2aH{qk8!p=hW8dfa#HIntcpN}(vhe@d?`OX`qey7jDPYCycg%qj`rBk z;fu&}l@9ybAD_K|?}5*iI{9qg0bB4R%HUn3Gi@16W1x((9>tsd-rq7#0fGFT{EN&& zm}ngS%)@CR4=YN)+TG19)kON*ygYsg|1RN7)n_Zn2_N^O z=B^pbPqsp`Y{*!U?~i};$LJ&b#&`-Bfm`s+f{tgGD>?5>9|MkM(X;#!Ff!9EAi$Is z4xxd~zK4bN;{?VwE(FGGc21Zd%-fmCIcfs|W*6RWwf_Y)rY<22W&U6W7}T-MK@exn z#j6oQ|9+c|0PshF7y4td;eEjR`H-^qeNVne;Dc-ABm6BcLc@a}AeXYbw%ztRw!*Ai zl|sIXM~mRf8bU9%mgQ^u5D@tsH!mXtGLemo+Jecmx z28A_~Shd&*Y_k(l4KhTACThO<-sj9Hah56sh{+k}^X1U0bK+vBgHS+~Hdv34ts@4p2%@$cjMo&B$+7msd2>|HG6G4-U)9Y0b7 zenAr3Hxsx9uSd&AX`d4>FYU@?oF6hn3PIsm?iSj5FAiVD-_B;*t^Fxurz-HTfYF3&m=F_y>0e>1B>;e6L$en=tIR@eg*})2fIPRW2&l zfOxLbG$C_Q8L;>q}h{4U5H2QCn$Z^^JuJg_EH)HUk#pcmXg1;4&V1C& zGRfxsgY+f~oa?w>=-}U{$Kf)LfDmliUg4Dsj66G-kuI$w#72-eKm*T(uUB{LUC@t; zxSEu*vqxGMLI`@sET~&*Q7NPprq-{3ZEgZ(@lwBJ`Xy3`5%uM&&$l=sjYY%q(DTq( zOto++VJ5=P_MycP=3Kxu=S_Ob3at`^VQGZf78{xVHCVy7IRUbT>CZH@Ps;uyZRE!e zeC-#j3v1PH@XTp<3M`=j#25NFU3Cqtup(^R~m2)btHBL$JA0MUQX{ ztq>4gP#`p+JwPJh6-Z`2W_CshfeH)yx*GRvM2%ctu^!C+HmcXbvt72(2<<6MQe4l?W$BXyEgO z!&e{`oDA9{?mGVDmKrOZj)}=Q;}CR5OA=v+8w)JdqfKs1fae|ijCy*I#t_Xdh1+$; zjAKJBUj<;%rud;~zGpltX`Q(}*}!Z<+lvCv%$j=f{vt4*ne5D$r^`{J)iE?5jHwo| zG~>Krx)7WLQ+W*Q=*9#u19nc4X03U&8glx-ipH3sMG_w>j0s;it&TNE)dclEJQaOX zFbLs8lfIKSHyC#vV0Be9#^EwF@e<{(vU%p1KSD=C-c4M0d}%(|N|&*Ua`RM8{D}vM zAS@ko!fp?_*a7%-mzl|{mpXo~Vk%;Bnq@Lqg?1HGvm7)mK~{;0j=b zhH!@_T}G`9WnA*)J#m>*(*aWnh6>T#m_3RPG;Q=~09Zhr6|4gwD!L_-miMF{J0?tj zLCEy*0g=v0?-*Kg!-lzYfY5QsJorrDbq=sTYp)L&Y_E;Jp03vJKUKcGTBxh9;PkS< z^9ooQF6R)))9>BAdlCOw|B?ZG01=Qmh5c!~ie}ZV4sos!}~~88GJ4Ww)!>I!K#Kf6pmPZ>QgaAlaT4BT8QQ@rL7ESx@>Aq6*jYq;tJE zKPL=KHCng6gRhd?5O)|VPR6J&-$?Z(9N(9e@8VpZeSHK7h@S>0p8?F&Tb*}sc)o>} z*VS}wcr|SducS-EOB|KF#Dt8hd=cB7oS3lU`Pt9M1p(nLJsar*tpoEB{dEpgeWDWd zY;+bX@%TS%)}Kcm{b$6G_MH)kTZ7IuhaaFhK{*$UwOPNDMivimiTD_S0pg$Eo`cbL z@g;MVhX2w#cUPO~?=HD;&5#fr zo-lb8=FJ_sb4_@N?{4FYJ|T#IQM~!;JgH|~HKrSwG`df-2NS4p?wsCuQbj&=KA{B! zR6|fjDm7rJW-Oe_Y1ZfCKu^a)0H#>Ro<;>j%e~q*ks1zlhAYPjxyyMMJ$T|vejqOZ zBbMvG!>eVVxA->A{I~gMAI%#T4*mKD9mz2%fq+bBZIQMvNuUgeCb3sGt7DW zrbfGK98lD$zX0EsP3-kuqmZf7!htrtShyr2D;KIRBx8Zkm+tgM+V~uqv4n(otRwi3 zdupi0MVxfTqOpabOP_L@f$>XVrv^4Hn*|M2c>9`WaT$Me$DEANwPxVw2L|o>UHnObq#tu3oWbA?_ z18j)uPAlDIcDFACD@S2d1CUkB{kSX{Sg+_U0wzJv=B9(6+?HXgSlD;ZD zEAv-qQwq8wcL#3~#MjUyDW1zTK87IByes(^(N0_<4RJ~V-ieP7dT{&%yeW^kG2FS< zfL)m{6`(gbAOuaHI*$LxYG3oG`^RnO*1PRPd_e7{)!Nmxh<_<(V@>Gmm@?L&8`1&s zy{QRmT_1gJ@>_`Jy7VHp+)@v1-v0Gsv9& z{3PC-ReF@_PXOzJQp&{Q6@$^oEY$JFezj-K#&4Ns#auzY>g76z_C4?gfma=0yTgSp zv}cF@${8yRG+hYMd~+T;ajwY8=)S&oBsCHs5PCMfzS!&3tP}+KJl;_X{Ub!{5o2n78@{K z_%MmH-L=Pl#k=Sz>^hf=LcEJMcoXn^5Kx7fp=d~c$iEMfRJu^k&()+4doS;?1dY=- z;Q+YNOV0#e4-34fJ03)hj{R_QgVWr;7y_@SrM?mfCCvp(^;p$%xYSXDUugdx1MPUE z)X*q`s_^yrU9|w^xixkZgH%P7v*T6MRPXgoF+iIBX6NnnH!D9#?_oAJz#J%xkz#pp zPRZ3=K8<^bD-&a*dle$8YHT?L&({TbNZHPyM-dakR>yyr+I8pWah#{wOkv&|8(&KG z%de15^2lTj5k9|=na`#6voeGzFD=WK2Psi~?2FVJ=YtH75 zwQi5sVDQ}O{*cpv-s8`;r*T#hyu5G-?pElKx6*AF=LVw1&!@FUwVo?5r`P+MBqUB^ zLggQ_ie=fS5?D8Z7Zx6n;hN{8FdHZsMZY&g#i@)#Z15?L2(P3xmn;hs>Y0v|^Q=t0 zi$)jNib45IzvM>Cb@;-yUhIf-FTFP?#0bV3lbj*`L=HKPPhkwNqtAtB8KMj_DVf{y zT7I9u8b37C{J{LBA1ZNIAqrxpG{ls+v#^NfkN(Njf^s}$e6m>eE7XVzzgsq$`&dku zZR6@1apc@M3*1+p&BrnoQk@7q`rg<6QB@W|n2B+j~9HGKEn z$LG}{ZD9dq`w%SEbqKE{2;dJ6_tO@;`f5UXdZq$S3t)ZAXW|fIfj|# z``ty1k$4}zTUh9DOR(*^Z_P4JU&N_)pT+e+Uy#&ANBtum&J#C$=R~?l%5yHee5It* zHtM?u$rXLT`DYa5axYF=)J%>MRAP`7ihA(l+?C*4G;8#wu*onZF`Dh$3-962W|vb7`D@~P3fx+O_E>wJ|3{fP^@?(56+eArKSU+? zl(l4dECd_X*zt{e0KVg3=Nrt!P(J6UQ@_U2hftH#P!o8d=G74G1RU=9fxLkPdFcGV zgW0AQ1{3^lHK3g}C3m4sLPQ+aqv8m>j>l8Auvt6EuZd73+K6y#0YR5|6tn>tPt6k? z46PQ*vTWO@Hl+}(hEmh2`tDKgg=xdKk@pE0Rx&B$R9*#a1G>OC+EIK|XeIRh6E+PI zdp_8nBA%aS+H3RiE_yD?EqXYYNmMRZ3qAgRz?x`;-eIFikV}d&7~pZ z?GP>QHe*CBAP+6*;P-fGgcc~m=4IToG!&Asv4vy>SRr6xF<)~7zJ>`fC$iQVb20YZ zw7`#-4$B=kVWu2JzJLwDq51r=Hgl}zhl^VKXy3@Ypg6O1j&$kL>2O@9)89V&o6!D> zbs~jJehj>$1>i=bxadk!$>#PN($d{1s#aB zE&XJ(LRiXXV^eNt@&#VtI}v|~^dxE3!X%z!8r(qPDoqx^3iyc$W!Gv8^b^kUp<#Tb znpCia6^zozO-$-544wO&$Aiw6(YV$Tgy|h_JVjePB ze8FRAK9jD454Hw#Xd8D26NGty{aN_DKp@ohX^c#WBBrMy9N~-Mo#7ACR~CMUdcpXe zJEdJ*+ssI3Mktyl@=K)87&Iqxia00UJn+F?Ox2Fx?%>?H`)2wHr{e9j@UsC_W%eIo z+&!?Yk1c`}4%O8etWFgKql6%d( zo`uf@#Adbqtll^o8bYH8Y#pKg++#OT=j3dvGD(imLJj+~3Wra&-a?(3Js@{-DWLkhi`*KLoYi|PX=#f^OnMWB#C_eoAE19QJH0#zrln}|bJ^M2%z(Q%ei0oMk; zd~~k=PN96-2Ip>Ja^$;`#8{F?ZQsT*;@9?pX;E|tonXY&^4ih|&tp{Sk=sxtu&6y$ z;_qT!qSN{>9gNd&-W#RAG1#nKmbWZK~(vF;s%GBBUzI$x1w%6 zP8*!+IK3Dxd^!=z1?z)^T_+O7xT8K%Ar=69gkMibXcfw;*ICe8rc1*z7*~UzZ1lrJ z{vR4sIgVo(Qz+o*Y+nY8$aV8B2k$&}DnfB#EkbHq&NIbFs3~OOy^CPhOO(ap7XMN@ zpWZ?XX%+u0Ef>nfc7YyO($|5dwV2?ir;d7~=Bg74P^%|S+tPnew1t9bn7j504TcDS z)P!oX_zkHS`DSP6WJWhD=Akd&iK8Dpv!}XK1f|O#MKg0!@y3SGJwlek@Soo zV~B}DAp!y{Lw;h8IU@r~B~TB(WwR*zfT~cYLd#fedKEU&1o~C!rh+c}2^yj)_EG&1 zfq{a-JDir*&-rcq8e?{%AN_2d18XdYzQ}@9 z-Z?~u%ANW#*!-Z{{rF!s<`S@4Odp^}% zSt0Z#j@VWZpqzquwn70!fNw5cMDkq-KZ%!9J<{)aX#(!Hp{PT&$(-X|=EqGN@)u~V zCy0WK)PbJhehA)X`w{vE+?K|6baoGIUm15W%T;rd5vGHCgka49T8s9U0#AftL0{?1 zCc+Gd?E>-aLj_)*>}u4&Ny=6OOd1q|)QjN$NRTNbQ_zV#DLYW?49LZtRU&$s{>ffT zJXXMmm@CF3HRKA7L4c!v+z@}b;Rqt;4fKKBpo9KmJBaUk>dk)zT-BhKbjBdZy=_K7 zO@B70n1BzK*fjC<+;Z$odjeaDLtKRT6HY$Y51V5C7>jNgP>!bJIbZHAupk6NFSZNf z^B0_2tooeDg#4$xC*rP~rH zp2ew;%ebQ0*B>o)?vQ4MId%n&@CIuJ&E&`6-k5eAAQbGgvAaUXb=p~HzG_$jHVO~f z9aI3O8zXcSk1Nhy4!J@8YD(EFHYBueC!-+WB|M4*54<=#?y)&^fV+m(-F~{Ztpws( z_*_81fv{TNNv}5E%md^pXqD5>fC2w{|Nl*2ZvR$#we#z;Zy%v%#V9Wa`$x#>tfU^} z?qL70b2t1Uy@iI%Uh^QFJlmFXXZx(QPv1K+!oRZuWX*|i?PhAd_`M+LlLL<3J=lV{ z9H$APl}_j%D|}3y^jLNI#Y&Xfz1(K+O97bn|Az>`w$L)V-R2O6?i&cdmhhjn%&CA^ z)AL6+!UR(7uk2uw1;dQv{>6gf!?t$H0_zBYzK)pNt#3kmGeaZ~*IR|Lz$rq58NAcS zttm`KZ_3HEQ^r~5{vNKFV2GC(nj6Hwn0|WR*c7G!eL#Z0t7PyVu7LAmNhi?QtX(HU z!Kj^x>KL~^uT;5K|h3C;E)he~-K3n#P2G817v!wqQ!@GVSr+JQX?> zj607*82N((oMFTC4mcv+?8EektFg;a6>?SY%kT5JTrSXjUz!pb*E*^Brt{tgLZy09 zZuW%)h7^T*F>P4~->W4ibjo+835_E&E<#xk7p62hB9|tlCbs;ki^}-KGNM=2#L1F7+4C^ zF`6OuBaQ_^n5dx4bi&46`Y`i~3XvmM;S@D1uH|WVp)J5T^mI*EK+P7kqp_X#kKRq= z?oHC6;m!LJF44FL{v5;hfljwSYqYU70g@L(3K$9ef*>%1k;b1 znv9OoHiAJ-9Ah0$(h_H5jPhJ>t`J69N)`!^E?Z4FezbiZv0uo;^wS8 z`BZ9SMLy!dIl=FSEO5_}%6IzgGUOQf8hw$Y#c!$Q7O=1_ly!b-m4x0B-*l07?$d)F z=hI|U-m+xyv8*(?GvtDA)1Onnc^3_tM%rVuQoo+`F!N3w&3z7EKmfLgNK7$81KOiT z^#s~+qzYLF!39Ey7J?69)ufMYYiRNX%|>Rd<__aYfk>D@fboy8H1nr2eu-XZCU}gO;%Cq>E@K4xqscD36!2)SBWyyK2%*Rb0aLHF7p`kY z`1`A)O*bAwYdxJ*b8ZDu3@SD6A^;cRDDRPJK-rIcr5t*Kx$Y8usxM=FdI4`wRGkGH z8)lA}oF|k*CKJ|KFpX|5)a1DfT(Islw3sD6ePb-=kEDheJX;H)xB3nZ{CbWx_b>2P z^YhS%c5Wi$poi84oRW74l`5XgjT{uSaH`)*+ly$vdC&r|*+9c^8TvaJvMULJwcpfE z-RF>z5k70XIM#2n8Bu4Wx6R4UHDEkI5a&S)YKs}$G?scCpwYerKb?$^X7CsjcPz`XkwwpE@_cG!>C9RTa z95NXX*nvBV;7>^fsYWU2y4&7NKSiVKXA5tzpt?_;mG?hB!T)qy2=P!m3&Ix;;x01$pQa8;@Zs)v@o%8!5qT*r3&t5(>O9`*!eSJE9UTE*(uF{ccV^{cTzFPPq~}Fd zfK`64GPb@epF@aKnb#ETyF2bFS-L@S-JsXJ5kdeN;w;po4Bl7E4_H_q{z9~4TsFQ& z;IOzHjJO3?gJ|QQ!ax4GPb5B-j+@J}OcKm^6i6iE$MRZ+9=LSz7TjuLs;Jfhf{P}; zR=V9qV83tX0n6@UCtbO6nT>KxL-4nvfGY$T0$PDDS53k~h(&!{EN&UJ7Mx)vjBJOM z@tzRE0>0ZH);$)t)S}i{pdBkX%U-})HZ3F3TvtW!1A}5?Nr%m}CFqgFMl~4+-%KBG6A&|)BM_+PpTjpzI8P=H*5`7+Z znk%E4=ls3?-=0)B8(!21+U_ZP`fO6YWla4-C1B5xA=-LEV7+k;yoY9oc_L9GwNKFK(3U4&`RnW=@a0sdh;h%@y z!?)XUlT&9Zb4E(aE{BP5$no>+az^aOeq~>qtG{(j1M4V;j4-onx0V1ca|43H5p;OS ziPXK$PTF3KrKL(DmeZ#_MHqyrAzz@&C%@MZYdL-?x?)B0o!jYCd|$Ba=N z(m!^$c`IFQt)yE2J?P0Eq)2{IpNtbsa@mkO)LS5UIzj^#`X!ypkgGOK-obg74KnrshnCbN6ZDtGCx_t5ZKx_gi1tY>LH3!eiBNGf1{ zVVYo5Pq#;WC=uoK*N^Fq1FVs5HolU6bLn5C7hB(^W-j)go}i#ht(SMvULN={ zhSAC8TOm~D;~MH6bSuAn(pSyeDAS#J1c3cRoQtCkphJBHSs}oH zW1b?l(iycO3t^z`F#GXLf4QiUv37S~Vb^RCPd85ZW*U36oEv2FOGaEFjSEWYK?Io3 zsq+SP$U-8Y!AcXvI~mj*^`rcrD-@%Q=u1`~7ULZ5c*BY|>g+ToTs-u^6A5Mb4c>(| zn-55MA}v2Q#f3}=UHnFnzYh9In{104{$*Qpc0!8}ETW<=Jo(0wVov5K^A!j$z|Rh} z0=Mo6e$mij5!%HErY34#cB3cwNtqyoRI@#tC4s8^)RboL7Z6&|)ks1oLga@6bx6xSKVf@}@qu~z1u6@Suq_qtV za`}Ts6^pt)c?G=fA8HXY18c*3|9mG-dc7#je~)L<3D?;;F$43hgn)azE@#@~k5vJO zd6?2a?_x0x3?^$qfL9ox zZzQXOKolXs2tvl0e!RzT6uzRm(@t8(M^d-Zfq!GO2u*#>+?xXo)JPvf83u!H=9h6a91Gd*D2L7T1# z455(B4U92qP~f5slN=GBy4nu4WM`&|@zoy%VrpAqg3fTxa+%!Dm^jRiICW?lM}t7W zgfZh-9G>MtE(PwD_%Z){9F=tN=C*@{9dkyLIR(1z91d-FGo`_%X}9@B+zec%9JGx2 z7n*1>$?bzlD9a$wI$~4S4WIZ5J-x!e>xZwPX$ZE_o(8@o+xirP7n*7cSzG6zg;y|j zL;^9q8*@t7L(-lFXwn9FxIB3dt^UjSYwaRLL+FJDU5PK$ID%K}G(`Be)xb1;w9V<{ zdswBQw85rvoz1kY!AVTi>{lZhe=*<_U2nYJaQ+KQMCzT?)0{ zYQ39&y!1L603U{*qanmc0`C-adYXM;wohLKnCCju{4tB*6Q}_Zs4e2me+`WhR{=cN z)#PGvD}xo(0`=SG;yT=c4hHiA>FI0Wvl@L=2vnz67q0d+q6&7(**(Yi*Zb|;>F*JK ztxOgXd|gj3AH9g_<8$mVqWQxp$uqf^5Vw5P>U@UMLswCE@3sFSz4PV!>HgO@(yb46 z((~Qbki6A?&Y*|c|YxJ?4)b>xZ5A10vV0HdQYx(rV6GAvhNj9Ac6?%9Dk z0KwC6@(vvWLlTEgd^DQt^4t?5+s)rdy}=(-tcz{5Q8-Gw-TW>ZDKGJ?hDo`5ndcz+ z)^_cCX{o)Rdc!}334+;R7jU8Vd%XVw%o%29XoxL0ehFsf&#Cjf=#53i1WPtLaPlg6^k7e58br zIwC39+D7xsV0uJ8>0WCqz1ez;(@x(_AGAJ5_Y{bcS+me`v9Co|&IhE9-eEg8#vWfm^#OwtH>Ud|8`{y7v7Byuq8 zyc9;&)6m=q_G5;9ku1`imX8#mK+Il^SYynZ_xMD1((iKtbDM z$-q^oxd8#wnI4>VrfquK8bGY0p|Ri*q-ZoPui~#yzf+9eJ`2^uUO(*~9;A0axD(o6 zN0^92{n)cojgz>Rm(dNun-nn*(PI}J8C7QUFi zu=FKP)?Q*vl2?(zD7 zzsP#nV?xkf|2-CDThO?Bm~-x7KBHzr2j0>M_JMvA+jfEXu86!Acoz@FPZm;E-#qx< zJgYco3R`(`OO16L0x#_w@lhaDf&#~WW0ARc3CGzz1fle`h(i0ub>4B(^{BNR!Y)l+ z7Z7-O&_N$Kj=`a38)w;S^aq~hjHA3t8E4%-Oq{dQebjd|Se%nlz?Qbm*YBa=U8b9Z zbL2^RGsp|8q93Av=ju|HHH**pnMPh2m-MA6W%=q+uk*N&(`Q;PhH=NMA$VbppnV80 z6ppBwr3RP>OpMU_+UFfqi*d+_dk7p>Cu^L%w}e1qMX?#_5RD@63YwsAB>ox*l|B8@ zlT;PDj~IhGSf4H+z(8O!(gfQ!dRT^I6yZY#86y+vnEhj|sxYeF=eF>@B$7lX$%!!g zcH-@md63R15T^3bkV(4J#=7C9#k9IL#G=75rYoBkepasngVprGt@qQ~Rx2&_yr=?J5iA7CIOfQG5U#~M};`B@GQSATQx;f zYN{ZK2f)&T?Mgsr!5 zVZ+3kVCu0#xUcX473j}VdN0;~3ytSFxa<|aA-J2YQ?Fa8KfOa9n0-QD8wjMD@N!-2 z!sgl`_*v(`m2*J}kISL0_D-C1dN73+GDZ~md*Y>VwtqIEp*1#xmUp-E)MQ!D z!e<)-pg*`GLYxe$O_rxJo&56gJ;|W78Fa67zL#EY|EfcUnpH6T<16~YoE3Cw>hgN` zt@QUeqQAppe!GQ_5vdJ9q7;Mi!dyN(A8Rt7o$A8bx`QVOvl{sCs)e~C{Q{A!EnQ9Z z)oW?8^L~7Nvc3P`A6Y%o){S&0>X&tH*Il$Wa&mD`e=fd~|1VEupnq1eyPgqd9-?NBTr+d=JgC{q!F5 z!(S}^b^3nywe(>NCa+*z_W=EmQSJIoJh@0DGBU%}_Ekc)BLB7^!m3l_)1` ze)6e=U_fT}$v!&{&XpN{$L~(uMY`h@$9E@UJ-G+UEBM@iiyJvE$Q0@bhm#A0vx`-b zrCC{M8Hv~ADevMMB_Hb!HjxACiC`hdf|12^byZcQc7Js1`#NsuCf+oln z^KKRt_~?^OftmQDkZ7IL{jM+H!Zd0lg!8pn6g>>zCQku;DAQ{W0gUqD#8uE)=N6hJ z`sWJmETWIluo$ucMW+j4H|_AWFY0jEz-)1}mG&FE=t3OuJuhe$SX_D1T9rOns(n>} z`b4=O%R_+&51Y63IuA0+=NgzLd(vKm1zLyWEfqzM5GJ;Gx466-3tDkb^X0|W%V`IA z^}(|i&gvHy*T@?Uv8dB027!ld`l)yitl=ZVwk*JWFM@L!Z+0!qIb)nS)=Cpf$I~Ys z%DPdo;t2>(+=2K-RgSSq6Mm6jsxZYA>HBwM6rrU6e7yBgLc$ zJ=HKq%d|a#*JM+>*|m@~40 z;VI#hk;1p_(4urRqXy45MX97(d+U??(7YTMafg`vTF31adQ!-aPP?L1Gh z9ZU{zkHH*)$>nV{!?yXi#YScy|7ZhtB*j!WQI42XSP?%9pBD%;ka!koeB2~DIR?n7 z564lS?49UbOP7!ge|_N(Krl>v3W3s-E$88LlO0v4rK#3GEdDJ0yz>sHPT=z)>J_Z# z!)NwciU}j#SiAz$=rFI48D_CsLlZ`bFvKvJ_YO`8Z+$mSRuP1;+f!e?fj|rmFTOtu zzjzR+GAoh}1z;{k?z026!=&Pv@?Tql&jjbGy>(0s{tCwLConho z(-^U%A^yNN+$L700VJ8>xDgL?ut_pKXVT@Ip|K__VwQ#Ene$3Y} zuT7NS=HK~mXQeOWmu)Do<@@qFpI3fn%z<7L{25I#@6g%4-CM$@n&vDR7%$$FMj?#+ zH~$%L{A5)lrc6ddd^;&X%V~YbTuVaz@03Ba78EXcPo7jsjMjIY+D!_)NJJ9Ya?n%Z zoH*`6N}-hX(5ZAE0Z7>n1zSUfRs6MBTqp<&%`WR@8_I&`uQrG3U=0?FAsj2>DCF~G zNmhpNDqQ$Gf(DGx!av}jr!MX^chjJ$*(X|T2!LFiw)oT1NXPZIIs6iNU$nEVmw*ky zg)g|D!EqLWn<9_utJ%Hji-%L%J+nxUcw>1ixDDBF5`B#@LKhU_-$u<0VHq7E9)Qol zPQP;DEL-{qZ`We}t)p4d!S7qEyT$@tJs##|2ZDKnBh_%xn5)L~K5`JJysyCYQD9I_ z7x`E6iRU8kT-)MJ1>PBdfI|__7^m{&UD>Dhvwb`Zb8z-chPR{aWQaNtfA*ey0$xS5 zvWzm7&k?s;Uhy&NShO|FrL70BoCVTE&lG7-e49neH^+T=0!o4NRrAg=nJ5F|FqRN{ zg^B(k?V|lMsN+_Fe~S#YMe2iCgew|g2v7B})P@GN7<|(Kv`PL2e2Z}fjp`6*>dAR* zAV}J#2=$|02o##UbB&=Nxj2v`Dso&`M`!8V&`$vtU?Kc!(A6`*7dbrXA2oIt&?;X) zTu+5zld4zE^KHR}+oW@05kwp{T#s8! z8)@@&ggY1nKT`xVQ@p=VIPFobCnJ`0QnTbbCk@m`dz9q?A@aBFKIsO zT(-fSg9*Ly5Mtr01K#!!KA+7=1YGnec)|uR^N7N}C_)g&dRweNE-&t?jQ|a&Hk8D8SwdlLcXdicIyDQ;m z;d20iK#Cxtm4U)-L6SeA7FssKE=*T`W9eU}n`i{M)A?zI7);dX9@-Z_TK-A;N%!Yz zx5*$t4c>bC?{giX4z=YgXm8=bdY8px!C-m%q7Lh;L9MU4T3(njO01dfs-K_T|AIrH z94pyW$5EZf^-+U;#QH&)e!bm!AKwwEg5yB^^5{}}sdp<(AT@_{$42TPJ<6Ijp9g@l z5MiOXcKEH-*=G{gIZ%bBxL8@*T}VrNXd^lCLv9*qY1M0*8COj8&IE;dRRU>s65p)7 zjN0~JRz6g3DTB#0qDub?b>E>4I!}Ka0oZH6=?*DyS|40Iabem!m{*osV#hf>xw~@CR3lhCUK_8b z7l*gff3p8uEE@Uq=tMjj?nP~gHV;}B4IjFpIj zjE4=em@LXH7kp(~ysCYqFr<9<;Cxwluz(sPH5su8bEkTt15>U^hYJ?+EJL+zzAyj$ zZe85{ACb<}{W>iC;?zqQHZcwrPN2nwFrgIOGnqbJwHr9k*t zaibDpxaKH)V_@R6jb!q~- zcpGA#6!67dse#~rqx(vFars*;2ob8YaGJL9V}~Y39_zW>2mfX`R^zd5jzyk+$vjO| zVOLpy%NK?&oE1XYRvir=(dT<;Jlw_a-X15XO=3X>e1Lxq(-v1P>g1213DQ^>X13q- zh7y;&FR4w9{3&?$?6*^)NPp_RFG!nnKlAG&y{zU_rJ^AM0bt@RmWp9QrowdKqkZfq z3IE-{#JG+5hSQWey>khER{kw~c_|R2pM`C}i4tqIrm*rr?Q-CJm1xDVqZtYCl24h^iK1yI$Fz?cLaMsfK1gj zT|}VVhQ_rSM+(3$p{c(*SY@2jHfRo56hfUL+A{i>3#}+{&6E?Q_@ElNr;kBs7dSVB zo=7*OG2!pgiUo|0YZ`4XHW11#HrmG-EYrSfBK+By+)C4ZG?(_-xW%ukLN`DXp*}+3 zJY=IEE=zxDni;zu(xfhc2i&D1Qv~$k=v@LA4T_&47hg$#q0RWW8j?}3cc|D1!6IJO z6ePOQL8u|*cFH`WAL_VU(2o~y`K#u43_H{0@A>Zdm;9!z>EcvKdyP#&HQ=d3VuOxO z0_8BFcdjXTsYr_uP>tl{AU?A};fAY%GB)^X3ek!~f$&PtH`{QPd=R+f*QNP;b? zTY_5ZJ5OgG_qe7SmcK8Utjfx&TEiaBS$m)2-m5Z`3+`-UDe-}2+hXtT@8X!2L5R~p!jb(h+pbs*%pan8Z3A9`eG@huSi^zlyW?|Kg86aD z+aDjXn)n%x!hb#ZApM8&PjT>lGu;~BOjpNy_)NIMY7ACCD8LK~VlR8pN~!Cy($jri zZ87HMFcU1Q2tU0o6kht`nk4+{#izSjd&g_1#eSL#rVOJ+S-0~VLc}Q+Vz!5Vyv9F6 zu3**_+Xq(*<;{}M^&ArFB3t}rT) zemy~26t=Wr+~edX9wXR7lLQ^#loK!Kv(;xlzk`qkZS^UXq{bht^KDZJ;OjAh}0JWcdvY^6wWaJ0@Olr*QHjZ(eh>CtBd}u{AqMpvbRIC)si<~_xv1S%i zSTm?!(4xy#`5p=zhb;R;yw~K9sfmmuI0-jxjDNj+!kPZ8=*Lt(TX%sc7$%e%m`JfW zFlGXA&>7>uDi0iP(eqshr{lnD73HaFVbv7JX4=O3+R=iIg)7oUJL3TlaZeGpcYdHE z75}v$JLXSk^*S>iu+`=2@Qw7wjo(9dyB!nQaZF6eBko*1DP9VelNCVzm`~AicD0 zT-Vg4z|_krZPKj2F#534($X{)&C#LjnYk}~is(kU1+>sSE2|`MT}4wZId0d&RD8+r zEd3kBlU!Hk@1ew|=U4(`2mIpOF`=(a+4hJU1b(a9=`a?tfH^=Bgn~7;8uNY*k64i6 z;Hw@MHd=O%@Siuv57t0yMEaL-S}-XA_C(C*TK|Sx_t+m;d!WEfix=6F+l#UEvRy2`0!y!z29S;gtTw@fDICkKjyEMykjP0aS`m;@Qq9m*S*P#*tFL1=P9X^C%i5+QhBmDz}u+6*T;a8%O8K zz7hHM(EbT|jU1$nP=4K|+RQVHRd~>UYcOD|^yI8&M9#W`nYgyF6T_$7b&m-sQ#cfhw0uHokY}S0vpftaP@iDAk z(>9%%{`8E2L4+4)2Zwlny!~IMA8!9623Yp+^3r|XK}DBVCm-}aOg|d_GW~4(7g%*Y zff5`o$BXuLQ}hj&NBg#~z0C)jV|EWUo8=jcuY~FQg$A8(xEh<{FE0n;gIAH~SmKAr z?ZS_+{Cd)Vf-j_xWA$ag2XJq?mu`)2q&IMUe~;52uTFOJ>J9l`PQPCYIEaB*wR)$} zw>BV2a9$&V2#)P1KUx8%$i(}JVV?LSDkcmT z=u}GOgmQtWJ(7e1iBSP1RQQYmuU07HjY$#r1}D5_LRTiwk+Afm!i>5pu!Fs0`O@S! zo?9muhb;1Rp|vR2Z7pdIpQdE9ocaXS`|#gY(FEmIgvpy*Bt>s#-f$;#(hfe}NYPTB zJ%SsvD2US#s_>+%ZfO!}nVhMl z()!VMV6ud9LMV%*Ijo<+Cw-J2w6^6bBsn2)`{D$JnRS`qbVh5iEtFl?2CtyEG5l-}zY#9hIDrJ{Y7Y%{Yw&c(ELgnRBsOVgZH=^QGJ(u^s>yhT{y9Ds$ z8W2&>$9-6)6}ajeIO)ITeJxu-XJF(j<;K_{PC|)Ws0(TFT-9YpM`R#J4Obv3KgN?M zELa=(3rJimywVZ0EF|WKI*lum*X9e@ioS*ZQz5Y~p6F>{ickIZ#g)hJh>SPZcV^BT z50A0VzFlsYeZWCQkJ~7`95=z`{BR3Pon5R6aZ*A1pyo$WKRV)_=Y7^Tst;_p$6dUX z^iz}@aq1%qqwqDzIOMx-m@<~dc<&enyF*FNwPKr}4CpEEdSp4*Pc`bTgtZ^!9zy?e zI0e0=`+ZDeXLX+CC3mTEn2^c$EtSWQyZUa(?jz4{mrZ@$j*6{o}!Q|Y8}UB zlujt4JXNyx*3EH7n~}F3XpB(0#Y&B7FpVvZDoj1pAhzv`&Avm3w(|!+f^%0Kr4lx- zHMSKD(3Ugg$8-D!rg)zOE@Kn1TBdP{g%DHxo)}?S@f!FAm&U-rw=#coiP(gfoddpj zLro%SY&G{f!}HV*2Ld~~B2yo&^$x4(*L$y}o1J&l7i=4OfAsHwcVN_X^P)lQ`9*qX z@JH!SxBlz&F8)|{Jf=Q6+C}j_J$zAg{k;F{^yBTHg~Q>;IPr7)qP724D!X832xOykeAEpFu7)W4t{rX!xE5hax1@q2UT2 z|MC&9r@T`4QT5B;_TMDWi{(Ugybw}(^LX_f^W&cnA++X6kCPY`O7Z7?0J}nOIuMnh4^27o{3lx6Ze0ldr=pLMB_bJ6?QxDj7(U7S?{rfC)<|Y zF4;i|78YP}>Z~I&Wrfuj-xn0%eh(ZYfT6C%C`fD80e>nIUi>}8E*NRm6-%=I6brBA*RC?VP;akU#O@+S> z4*2$!5O;pC_@@Hwn3EqL;{5*O{^#i@_}1HH&;J#yy>?kmyv~ut*GE^=?a@uPKjQe4 zFWbv${Z(o|zq+-j#n>w+E*=;o8P~fwu0Njq2eNNI-xufMtC3^tDwdII78uj@tI}aH zi+M8rgKu}G1PGrie92=nqoC+}6yHVEEeP4hJ8^ckd5ras!f040aiarbXCNxDRAfZ( z`vHtj;Y-K@thk6V*(-mG-)tjDBsPD7GM<9tv}>{Qs!#BO?kLL@TnnV$?$eotOiwtT zml9`EwIC%DBA+sEglAl0%Tk^g2rqA(SOb@dHkVaI#$jY5Z`@aN?@V7{26g`4dUZNf z*-YiKU?p~1z%W@viLu96aF3HqcdoyS{B@F!P`JrE-)!v%2Aw_HmOoBdYvU*97q{2&IAt1BpM2cC)pFF$6FA$_NquL^5%R8XvyHOf-Ah+@Z>76i@1|>P zW!&b}*DjL;t+U)7r!r4#u;3U!bBIC0UxrWk4ji&2YSd?J1fGMuLawvhX^G}PgD5he<1v0R$LG(h z?g9py4F%q7KFyFD$|%7*t!GT1$ClL7uT9#T(BccclvgaxW-!uFqa#ME|Fc>!sDaHOj%%%U#f@G4%@1CKPUpd;F{>fld~CuBRpO zb&fOo@tQ)b{EE3~D7@@9D#ldm$Ed~^0R-@_i}KHvjG};nLQ+VpLbRxOv7&9A?4k7H zDDNSQDSWe?vj(pj>l|wj4h}e>Rt(P)PS3rf8=O(tYJ77%)jM?tmXiFm`OGg@iZ2U`(fc^+G^CyiLFe|#mvBlBu zL!1Y{(fdLAgYEw`z1hLpX_vjczR0to`*dDfe%Wf^{@VMP(%$bq;Nz~5EcOjJ@6+ec zkO5Rjf5qu_?*4gj(E1NG0k%IvWIVx>Qn zU!1L?s=dp0ngRY5Zs2V5?W5Py+lN>pjF^~J&Mvdl`?`4X&DQB6rE9s>?y#lNlYuT6 z6veunkkjk_2@0T*RttvDx2$DsD!jDfw)89rD!B8tM;SH6y2`i{ zg!pg=qLL!gD)kWeW{lN!;9^@zQA}iPA^0mmhOP*B<;gM`i^exz~EBG)0sKZ}RpNpXS7tJLfaG$~i{Lgp5CwGFnbK88R$P;-Mj? zPE6p0?J|a$(_`v=jvUenla#%jAgQqMDPyy3vXnGHiS_!<57OJ|tyqMd=$yR^uO&>y zIFqHYwm_+-1$zfeE$P0tbuYcvdn0{u_yPW|ewFq);p_M)`#cNm&gycJK~tYCErUMW zYI^t5)vdSE-tbHW#$^#0bz=@5sGXDywXJaVjzw- zFg8+{xZAHv^De?Zg_4;%~~zjb2a9|)77c-Sn&t!JSyK+G}&iN zFG@QTm*7j=2wUzmmig~e`^=N-cT2cyi8Ed_`E^d*rJWYaXwYk)cH+PCU8%|mI)DiU zK`p=W^>;$+DCMpx{cy^dG~qhJx84(sE4sY9?hA^=1}s#S*{(1~UFUSZZ7dVp_-gfK z_C#72WmGSzY+!6t4s?4inti@sF<{g}BWry|t(wZM#ptRg4E zF}e!eM1No&o8LyvK^%h|pTTXK<9+lMU_-^s*2ON{jN4rnhEPyt7_g!ojjf4M#H?j! z1)X@ZA&U$h=F6kehw1Q`9asTI9i@x9dg`HFK?-6ley%Tft52r5{Nj*&F@AbZyZgt;EzuW)&Y5#;xN8?Y@V@_L`2UjW)-%y|p zI6CzjN7VNb)^?eE?R9R_o%FMyKvzbidCqd@C#Swu=@2J=5Aa{{kW+X*>V29%7<|N} zSHCkEM;8Ha8|x0OKDM#?xG`ZXCRQIlJ30Q@*00iM{V&qzoiEY>6J|wA%kbYOY&fvt zz~$#ac~`wU9{CRY?+Br2pA%&~ivGjl$LX)I+^^q-*T7eBwh&x7<@7zmc z0fwN}ZQW&z)`!iit&NVekC{Y%g^-Bfy@h6U%;qem-h22b;t2ZbeNqvgmV!7q-Hh}u z79xu{mVh@APr(5;zcKm$C4w@MyuJ zGI6AHb~Bdz@X+y1h5Zha_d8p6({JHJ>k9s?RQ03@7FckXBQT;~W5$Q$-ccx?hNC|9 z0i-|0S?>;(TKGd8+(`GP-=&-*oN0UNB1$i;FQ%McWsoN=CRIHYS=)UMsd4NC{q%Tinw}t99^=@5%F?L5 zTpWem8PLOF@KsKewOwMnDKxNAlWuvGzTjQ?a)>i_cM_n9)#s1?zVa-V>)7cC79o_) zf7q587n5=uVX+~Ac=jOOCzX{;-Rh=}St^z+vg!cUeWrFrLOq>2tWrcoeMzg+5OG!i z&M3?)UrbpJ3yN8jYy8TS{Ie)r##v&5W@#@G!4>_<^7zXN6qO&DCd;U|R~h$8_U6_4*V}$pFBdje1@Idzuiy@P ziSykH{se?SU`HuqycVy%@s3||PCLl#Fvfd4@!^l}u-D?iiAQY1-sU7@jG9nnZ!y3O z$Kt>*CwX<;9#&sid^rY3t#O2}ze%^J#Q`DkC<5i4Fop%sxppfaxwtc<-#ETaj*n7V zvStMQMtn|-F+Sh&Ta`h%F?P@LGiJqmXS(&rq+hLhA zqN6@>kcm+-&z`3d^Nj>~9Ag5dhpU1$RCsX7jL+WH@-?&AElWRKNHY)osq6j z7MVQ@jcQmoW0}~-I(nn`l@@M$?Xxx$7o;25_C81u_e+m!qG#4%)FN_#7Ed8oo^%PTw^l%F(lU22iDxNbe~Bm zCoD4VITfu3Km6SmCm44BFdZEKUlq(Egd?&CU9ZDYhu+*j1qW!R$>j6&=;(i@4c~tX z1kavja?H2S_y>gwQyi}Sc_0aqqojyj$?I~E5rSaHf9 z6QPQcUWOEOZ#YPAU%ipGnDn%O?T)dcVB$5!>6Z(5zWH%^ZM;zMc2`A-rP+ ztA8&RAU*YK(npczjL(@VR$|f66c&;Za+8*`(pgv%ywt&1Kw^cURTc`T4iiuLF+m=C zG<=W_nJA2y+^M=d(!o4dStFER$860RAwPB?EVh1D^-~FZ*m^?os%0q62I9oQ=^=6+ zvLYtybU?rYMhO+V{0P6w^2M`+yoxC0S2P(b z>`M3=OqDysorb>vKIL^K9R-*4te|O@?9F*-tMOo6d#~k5DXd&;^n{|S#B+_lPkPz@ zWH^6Fi*-LtN3atkK{Qf)gY_aCdhI!QCB#1$PDp2=4Cgu7kU~ySqzZa0Y)l z_uTvbL+@U-tE<00s@fHnBVA`mhVPv4)lc0ykfjp^M88LnIf#$VH2^XHT6>n?I{0Zv z-2_$iFM+@2M#vfdjMTf$32IQsXr+I?_xqs1RsZ5SGSBnoRB)v;SerttAbQ~yK!4i3 z;yr%t|0CPI2Z`R1^zEwtZV_yPBogwpBm9&q*{R^U|IwP!_1N8&;TfZ~NIzVDyZ6NW zNKdUE0h=zs${0hb9IN=F29>Tn8Z%w!!aL4qO5z?CXuUaXA`YK2Oz$O8T9Ne$MDr`j z7n9&Ry%(imahTP-0i6ybK*2cuYRI-P81^fA$1723ghR9}iIC$0K@l~N=X=KsWe(d= zsgnPrvOn6qe4=6YvxkKX{ie~&e)78W$g$h$IJQ$~L&u?b2l)Wv#6S!sqKal@4eE_; zqFKP@w*0UuI#HSr^ae1WW(@L4VhQI7ekIUi?~gA1(EfI<{O&N~vfO2No{~nQev!R` zUN*NhGvUW=u_myZi9c=b^Au9id}v!y^dntwVg}!3fp`xsI9eq{*Mt1WYUnSFi=uj*(Fkh8w^LkMmRZO>aXWtl1%*l4aNW*+PAjx`@ykG zv00~)xEd&UzUuQYI;#OS1v$W9EY5B3wqPhAFj!=9HRRn;d1OIPhZwG819t4 zuW!J)2isQdO1!8MZ|LEGTg4KDD+FsMqV0C@!c7inG;tOWiElHyu%R6uZrkwXczrFw3?Dy-R)?d;$=}? zMpLg-@yiKzrc-0u^$-I=fpbjq~gZizE!ePUcXn%_&*$T;vN3%@Rx zRJ)#DYz7;{qmOi3k>cD%0@S)?V*puM(j@6Th*nj(X{);qB|A#bl8 zk+l7Z9>$H&N+a}7GHXX|yFJ0MHB1O&FPANG%|hhgSds^pq_B-12|9#kpWe7|jUcb> zVf8OyqK>pJL10M2xA7(scQE^yM>fj)E!k)Z!BJP`HxD1< zDc% zQ<>H1Y{u^5`&eMOe>SCVvhglNab4;2uD#_Ue&dpP1Ctg?(R4`& z4z)E-4p_Y`*_}DAE#IZ=XohJn(mq0k?+)!X08er^RI(`D=9AjQ(!}IwzLi_VM}|cX zXI{Pix!`(XP$5BWn3?IV)5uo}g0b64SNNZdLhf4DAbhn__-ub?)~5yPTlh}( zEhwf7c!TbPxq77%RaUMqy_C~uY7ysU*PJss&svMphjFHHG8Yu*xZ#f2+7O)uw@q;% zX#vLEt~W;7#o7U1wYA$SOqdhOIIV7C0I8LDWv68!>pCeXPy~A<%ezC8_ z$i}e{-+Gf%ft;5643)WH`sb(FA<1?WQ^cJ!Dz*U(OatZgD@TXR@>{P0K9Xv`#U*T% zMx_0Nu=dh&NhxQE@xeeo4dJUdc=dU~CiX6*f6kp#3;~7q3cm5EJwJuG?A4>~Kr{p{BFKYNuF$8%*&a~DJ$Gdf&vkIvx+cxaJrbl~c6$C?>T&Mtt3IiI z#6>LM05_0*odh#p+lnvyHKPTWtZXZt_*D|M%{L)#DuGB7MWnA@DE(7schtWWu_y4Z zdr{fu(vSwh;ZpF4R%qefUq#I=$p9jTq94{FHQD#T1lKKP!i z(tGK!LJ<|uXvaJ1ZK|RHDOHlZnMa$p>qf6NCQN+vSMTCLEZKJxY8b~7vy%bU_KAHs zhJ0f0P}SrqqXCOZh3cD?12q{aZs>HtL|~LOw|-Gb-8q-8$<}&%+T|TDK)LygoLeG@ zN>D+peLT4rT$gn9fYpSmFn1|bF|SqLM&=k=Ib8N&1O2;bm^-`hR6P+8a}BwV{TY6% zCn<%p*~>G#F+_(tj1?L2(;ylk*ORy%m?W-1PCepcCpzc(K!%c#b8SCSX0^|FrS$QR zzl^s8GapKFj}mgHCViNTf@} zec{D!V?CCcM@_Ong#h!*O`lBFvZFp3dpa^yLsl|#B2N!f@4^`fHyBt_g!3iYAtms+ z@v4^OSkNVrEnS+I_jcVAk8;6EriIEE*{((sNn;E&E!mwAvUhxDm`I~Pl!tw5izYxr za+Vj(tQ(=`z?nOuDTpVuxR3mAL55>MGs_z3$o8QF@Y|1X+cg;@^S)L@TyPhDL$_|f zP^g^#tD|S0r1&v@#EVm1)r1L7-!%_+A?< z(4|PqsxXP(eHQD9N6s2q-3>7DGqn+5y+d-5KI6ms6LkFQxHA6p2gJ993x3#UJ> ze4cL}Neh{MuGF91KQ3bCFYO)BKUP`vpBC*rZ-#vMT3i!JoCW*%{vH_`DssQSsVADs z(Ywc}evFF}0#_9k9irQ|ef*_=3s<%XkpQ!W&V2Q>T?Hj97e@0wWblBzfX+P6em^t> zCT0XNEk6M%f+WN2yXR2HQzWW;QdA*nU(WaQ2JXY!DAx7RX!|0sg7woQ&^pPc&WOjz zfkNK#;HR>F5Pium3DKMLWy-Kb%&47fHCfA>UG=yX9Bb^bMDi>rT!T^<2Ybf}ALqE& zVh&CdX<@-^_fEO`=Hcg@a43kp^Y)fT7=g7kXc+~|89FcYs`bG7VI@|-d{L(t52goD z7jMe0;6wji+dW7O!A!QpmK76oDd|y6bYIZL{Fl#AATAcYeZMrjQJ4KhO)7*Z&txL^ zk95DB^!d1+(35hHbz?!@@3zT@EFq2ix2(HO+os-FaO|76T^SLo=ZJQuPiWnFFL6`n zj#LGEpC;`Xb(IzoUbK5HRQs}w;SR=mFIYe(VY%e=S@E*C;=@fgOXm!Hr>vp;kOW;o ziV{icWC^e9rO?C&qIB%233%4yrq*7m2?8%r>+4C@{shdJzQiT)E9wMp|I4e0WtaO+ zkj^iPP$bK(!#PK>CMcYLSM1vrW{DoBYciq2-&c1K68>U~XeP$td(+6v-LRB39 z=o)z2Rp2@(;b4L$>~D8QCWqyo6@UUcYdP@4;Z_j1Z{QPO8eBG3Xq}$yP9N{`OV)i? z5EUn7ppbx<6jH}CepA0TPR`*tzsRJiJZcys2QhqJ$l6M&N}aq&mK^2mxAM8FLJLwv zOHJd{Ok!o>Fgp17azr(;=5LL($GV-~A#1i!Cseck?DlK4mbqLnhVW)$1lJYqi@5l7 zKHUB~^!^N{!mgqNV1DTL?+n&My7t&6maP?&KmGf7;Xv$HEse4n`+XK6Y%Ss;!6D(p zuCg)eq4{bQ9Lytj#f%a(vUwQ-jnxb>jBS?_XO0NB&}#utjp78fw5lnKGT%NfTLz6` z2sKeX6q^~5kcsRbV$K|F<@GSc#3sXi$g8zV)$1jHKNAHua+0Xa4S4g_0ZqBD_B*S0 z&rexEUTB4HN;#bk6AICYsbq9I$P?P+{j-ebQ+Aiw6NJ}iY*V8fi5UlBBLwK|mHB?~ zgZZsMe~G=Fw8S?{$fPkEN3;oSb03VzEM@i87o+)oaI)b8mnzXFU8rlQ%Y0k(JsKls zzQz~g)Ak{}!$D$*iNI6eeT^sAKw|P5^NojRk0a_Ov(mBjbI_-EQGQ~ht#>5)KRFHE zKVSGOS?c&8p{h8f`;i8|d@r^Qcdx`099Bv-wFNT`NMF(a$4OougdQdqM-)MIYw3`R z<&!boeF68sQ8s#8JZ6(8KwJFG$=CdHIZ|KLuB46x}*6?D2nMbwl z+fm3pgL>*k4DW>v1gnxz4+U*BI8i>d5C^T1D|a?hkV+t^1_QfrzeiUEPMd=ODTFJEohF|bPk)I?5GFad;vF#*JGL^l z1)7(AJ4cP3tiGGe;1|cGo^`t3@>w+-CzqrR#P|eTSsp#VkEzQ{l3lo^BpYN$cwY;h zYxCB@@+iCisGt@3i`1zx^=V5LZZq~Auc!Ys<3U}&&{8!7&Z}jx5VZvf zi|fgTXZ9@0R)k^>qn*d_Q}XIJkGon)Vgk0moA{baip~c+RkE-XTluzPyLe+vmAgtF zVck}F3F}2hBg^5D8zd~dz>B)jeeG;>^}4^vv&-`RttU0J&2aYp*jtXAiERwM)uD(a ze<*HZyQ}e0^wd}JdRx`xI&V3!=+iR}nF2(0+2fCu)YN(QBz2(KBg#)|qpB1;rm2!& zOJ=oWM=Cz)!JsWFeC9o$XfSjhTB_2MRcB{M}_uw@cKY*Ow&aJleU{ zkM>0+5lF;iLEd=%pN}gT){q?mcPRHakkHsMu33;|sA+f`RY-36s>C@VfyAD4Th5-O zGno9ZCr`h-fa=rzhJQRxMMD2&4y(BdE&m%W)_o0r$TjN@lnkJ1iAh1p7vn=;^WT-! zzjKWi{GvY@&^3$N(XtJWn|lL358`(sO$Wdt_apWYVYA4SVAm>pj4rYnGa|#bw~ig% z=<(FVmu}8JB6V((Qo`>w7|mg5@K7A_z8H>npq1PfT2LZ`3&I(%>vM4?@9>7eePE{`{%qQd0Ni9Il1|aFG}On z?^D+Y?yD_K_nVz_iuQUI`AwVhyJoJyoUQ&}2*NCwxfNcKs{4vf^AI7f=k5a*lz4?; zoq1sq%$Uf*hk-DKgct9o=M6*x>%c|vV{xkB`H+rtEe}u%2;O%{G1`%@n-sWa0lI+} za^CQqzbucP8|!h53QI=MTeQdAHhCykJ$YDJbQml5peQ--O_%&bW&GnNP+Hdk-ylip zSps0-nsr^VWL~jjAfczU#?btaj_crY&9JAPF$xc zwL;Piu`1TyRwA4_)AhJm$$XkwnhSHmh;mi_l@k!IlZn1gDy-0d{Y|iU_AOR2z?QLM zVV^VfuXvNBrePpmj~WnDY(Si;AqF!2%2(hndNZKf z{qz;|&C}x#0mA-?OZX!05T&3)wvpq~+2(Fc7R*fNMvm;;z^EWgHZD)Pzz)hM<|}3^ z!wb3U-GqD2CV|U2jA&`l>ut~u>pgPYIKmlF+mMcCrGWz-+u~_y(do*JTMi*0ahQvb0m`DF>sSTf~ zV36Ai9xcqBk@4b>gblYwMGK^y_$%Je2aDwNT_*l7)K5jo0;D%=zD@U(xSvx4$py&# zZ&cVs7fd3m!W70(`|O{!(p(E^7l(kwxg(+;ERt#{VEi=+n^AyQh3#25SR zcbTfPr{Mb=Y2)fGMQR@WB>+wy;_`LvkyQ4bveIUkCue7rq)&}=Wc&+_U@zwKeyW+7 zkfhs{JE-S3xPH)#G{Q4}h~=+``!iDYE5v0Bv*dLwGTl+D0>P$~%n%DV7I(G3D z6h*BQ49cRW!Y0FDQXpHp&)}=Z59>{mI`AA@cKMJ`e(NaLI@@_6Q>a$#pSHBRSrWI( z`J$;Z-Z&sE51_2M%O)t@Z(|wngqenU+fkzJ3B@!{u8D3L8c#ZPU3u=jTO4CvVA}m$ zjqE6utt}^Au}fQN>W5i=VG`hBQp6^ok6%BQHXlR1m4J!)w_MY65ki4-tJOJ`CCiby`W!P zg*R5xi-iH+iPaP=WgU0-jerc3fCOF~wR$KU9{Ne}D?^)4frk}Iv!n?YE;yq__Ckg? zIjMa?;L&M9z}C6>KgQ)(bUiNgg<*!_E|aIm?crT-u+Q`1mA8UtNBSkvwsi9_ckO#y zTy-R7>6Q9Ps%3Tw=sny?S;lovo`I?MyX-c+cG@q0eCW_JX6_QyrJ%IvbDi6Vcie;h)OYRN9yS=GJcSo@r446|n7)ME<^Agn^#YRQQSpX=l4yKUnbYp*gtjyo_@&X>en zUG~D6TB6hFTzMTmW3bS-yZrW$J-0tPa)f^6IgwhAaD`W-Ud486ElX_D14X&QDNfUg zSV6mh;8o7@f?f}blap+jY}(Be zzSj}HhPpvthSHMWPg1~{iZ9qotZLyCvD)pHI3Qq4 zA@Z7j_~wykwuz*H#S|SJra{yGms1B>I=KOm@S)SAeEYZmN66%?TVa`_fD|HqV5G+< z;=ze*rsc^>umMw@h6)9#?6$X^K+(sKkK#%55@HQ-v&6)!%B11M3GFOkWM5a;^$Qqx zj(iVB*q9Sm@IKc+pTwpnXa-27j0#__^~i8gMzTR}OT#0pAK?Fq7__;@^L~ftRt;qv z9lM%;qQ{&3N8?^`o&F5rUCc|+H19(I`P5Sg9B~-$fyZko3tO#iH@ZAP-A!KEgV)i3 zz6Z-t9odP_3(&jMj9~ELJ=w>nhzY^>aZw^dQ*cxM)5w$db=&Z-+nM}>QG*Z~aScH~ z{w)<`htK$0JV3JyKX2xD#e&J#LSK@$Sz|rxd)hQd7rxElU}M4uRySj!92N}~YgCzB ziZ0BZsv*U0LCjoVSCQVu&NR1nUsUZqgfU6P$`KvD&KUbD$SpUAFUH_O^U}GK(#6tT zT?L&d!QNs_+~$Lz_D)JCn(&&m)IGT1s_oyA+s98i!#$bci2JkKzY~OZ#+&z-jgFqj zgfe6kgxkI=(mCMa8CUZy3wZoml}Dz-_a)cnn#^cM3>?l>SMV0h2$7{v3PJ|q%8}cD zToEoJk0KtWA=zQU5@5EL+Xf((#EqoL5JNcHw9dVl-^OcU_wC}2P^R`PO@9++;GMfA zleZ!StVh-(#t6@|9aAK|&wCxqZz*-UI6FZxR`UZ4`I$*N-FouA1Vi={baChL%FHW$ zw*6_D8J1q)J_cRszHMX4PFDN|g#cPR^X2mg4`oEQSdIhApNjP(Rgu2~2$Whjyx()u%(2du))Gh2&B#yqcaf8%Aq(Nn$9JSd3NqxVLL9dEx57V$P4R_Hamt-=hU%h>=bL>F-Tt%OY^6)^ z$*0+^o*RRdBPLtz?7;(XZv`Kn^s8g!B5@d-G_%*ToTgx=D7c*AIGi61%=bJkX4Cmff6t(K%Q=76BFC>-x=(*~!08U` zkDQ;@#8Go0$j5NvHxZG%z`4>1CN^4qhAMiE1=%5xh7vQFy56%ZDX@`lLwz2igZa$} z^kMy%EHKTSnNQ6Ax&GsNaWH-2oeH4yQr;kJ?5_Ebm)S*oSiXJMVwTx2*QWIu@JCv$ zy%;fh3BVHIRU+V*0cE#sFns9|I-;WaLVUHzGr8$hO0HSVDBhjVdQ%POVO7$*X6K)O z{_Ag^Hv{>o_&Nr_qTq_IL&rHOi`$**6x_j@PMc}4ol#7>K~UuTYzECFXXW46S~pWIgE}|-H(vd_=Mc1DT}sRh`<`dlUm#~Kt*~SKoH9+jgAX7~ zqIiWc6u99^58nr%sa4V`JD?G&Z+red7SDN#d=vVF%=vm4M$e5HM)-+9yVzBdwrEdE zmO6<+RikmZ(p(YKQ>?ehW%|Qr^Bbx+DfQ8-#u(N-%FvA)66N1;k%lGNDJz9NOJ*cN z!Hc;d`~Z4<|jtteZfo_e#Gi$Kj9xp{q{S=5ir=umS+3pA9_@xm5~r5yI0 z$}zT0O2%X7x&;~CH>Nj$Hh;` zwnv(UQlQf%NkSc8P7c6^?7N;eH3OrvJ?U7u0Nr%0e|=UE;Xa3N<>9~6wXv`Z0@y-9 z{(V!p#CF*}+F%*oADo!!-#K$!C_KM0&d&^YD`HiSdl@HFw#IanpelYr8iy=sU1O@NYN7Fr+Pg4T3bhpeg}f6*jQ zl6*AWj$tH0c+vkdaZ6C2+Z;9-2gx6jFc{HHs?R^g+aWTm>t^9G#IjfHO zHo!U^tLVLuzY32s#)>Yhjf^6jAGWR5+;%%VR!&#-_VYWPE`kPJL}cu$g0C|8Q-cZ)e)YqV80>&)j!GBw!o2D^L-b2-mL2i#B?yNBqBhL{rt3h5J#q3#r zFO|PGjXT_(GvEJ4?Ym&?S{3}|i!jZ&`%f!haoAbqQ5yIRq6rUt5>P#0os!4k<$9r? zC=$HBu4(r5M|v}8!(1}t2EDgsR7cemGG>-FRj7|zJiOi8sIMOQ{6MFPRa}|kNq>cr zV?N^l;P}4P;?B=d$7jbZ0o9yC7vJ0=^)1=XJA5GQSGDKO-`-Ekq;M`Im?O$2-_G0Q z1G0Re*(J3wpmm`9J<%2E50(KU2N-7f(^FaGzh?fVZE8s0${J+}SGPoT#LMX5vkh({ z_uwdZS)9FPdf2#Q%y(uftbIi_y(h-FzWOb^xl=fRG!Ds8ijf+&d8v28Wa};uDH`7^ zoRFEe=FNfoGJSUNU_jmJ-5$`rnCPa|CBNEN2`$Q{X{2&^K%+x{%a%&lb)s_{HcSNm z!h23(X|Y}y5-dv}bfy2j^@>V&29Pyx@HuVxgeJkO3g6|Ya2fgom*Zm7KzujAHQo{w z>@=yE^72^b@s6fBx}+9Hn=|gVSN~II5n;kv=gk6r*n;`ZiFU3I{gLf%&}2Zr3GKJ5 zj{o%_re~OLi}|FO1fwmez;K%NP;ZaDvO>!5k4%+{*c;%e z++3>Ibaw*?uL~`Zn+t@`VJ&ecG7h_swY?PtHu3SHDMj8X`~IVyR`4B-HOI0Go%~`7 z-s$G+3b>lU!fE`HFUL!hqc7(mN^6Or=$4(`P4-Y)y)-(v-zQp?FcO30g1>=IKn*oK zf1`LmS`9H>5a)<#YE)gEIU*>#u|8KnLzwYofv_yh-9$h!lAx?2Li`V19T8y&KeX}PdYDGcQ zA=C4pi)AR>mT?Jsa|}85D|w3@?pPU`aIm#A_=LW)^*dn9Dm^9OsH&4OP%DuTbQ39z z6U1So@;{D7=gQ-+!P+g$Zt_`v6MXG1Op%4A1CyqK@lLs%?3Qk=1q4Wre& zof^UK&s*X3n2k4>0?^b#vh7BtY?j82!@d0fR(E-Otu7FU`x$}YT}+{i*1^LcIlS`B z#6DAQsY*hiBFZi4<>L(3vU#Yk%2uZ}*s#93iveu}0kEmMgPA*j=i8E7Tf}+T`2;y% zV$=a*qBnvWRtU7~#=IqOl9<44M$62VdeT*24i_w@s)f;nliaibvLtSUi}>s*E>YG+ zfdj&nn}%On2od$KigRk2;NsMu@(p{Y88Pely!TRU!w5*C6c0(s}d= zGt`nrs~u6o-0>KqGKp*3mt1~Rmaw@sPfv`b9U9pBqM`D-TB8}%9f zRKj_@$64Iu_SWYb!P}p|HHEROUTCEQ1fRl-jF(^1{|MLYT6RCiKy6O0G8E{mULuY^slXTG(bL zFhATGPIKk{WIp1loF$TwY$BBOFeZfW3?7O2^8IV53J51cg_p{w-gCNcJXz{=tZ1nR z*KWB;roWs@YvgrKU`C-Ap*4?^FL^NmHZcYGQ5J9$r{VRlE7PL!Gu(dhRI2z8@RN4p0Y|5suEYs;s+ zGuTYn4KAMGZJ%=%@72znEmB~nPs6=)Ww+a$Yjmd1+m&5g(Cv#ZPq+I^0f4FL;`;P- z^8o0EUQ058EToV|6(f?&%_P9TWYO=?@qr(NcN6SiBgTVV5sUpJm?ygPgPJ7vF0D8-OZ*zA%x z0Cmu`3#Na6|Hs(PY)LN;$&`r>Q^COwSv*5seRguMiMm>sHuT|?f3dORRVKrs&Rja= zKIgVSue~`KIHSc%y@VUY38!3+$WlX7@-f`*6Mb*gXd9e5>J_54)XhX@8&ickrlO+F z(~hTV(jS?sMa@I&Z$}(#zaVE&4vhc>drlJRnU>{7pooVRz$vjchBHPgEG;NbCQ&^b z!>%zDwr;`#1%{TC$BQrk;j5Md$={xpxBBR8%xHgZ+05e6_#XkAA^i1Fuog4jslc+F zGpw`5z;c(k)8lv|o#aYh(d6jl=b!jM_-!XN%N z=ooC>%PadqXm?X6@VHH*;8|FKtZZ>=4QJ}V)mB&eiG5fpX5v0;uxI%udvdQs+wa!u z85p^(?tP*^jeRBnppuxTcdlYakXfH-%ZB~7?Rj4-?U!@^f_w5 z^}GXxGWvq(hHLQmgcpvcDy{~Dwqa^#ZQD_{qc*(_atL~J%kfjkHj{Xs&Q^;lL;a{D zPlf$VrdiJ_GeN~OAWCM*@ReZWlzY}tAGPd+hxoa!L-_BSesr$onV0%Z-F;RfHJy9sqgUlC$9V?OyoP}=lh3$;X{>aD68{hTYgmdI9Y`X@EF0@b3xN!^^?;K|El>C7=a4n zq2<%99r^rP4X;mW#SBv+qRr4e+9&pl-DmNZsJV5_Fj!(`kglwz5eHYVU{;jIX9%&K z{TK{AavIfepu29v`Ac{IMX`-YEKuBZKmd=`S*u#}0i!Nxe*bmIoyQCmq^?opn^}#< zf4bG!>4JvvRE3F>t8^5mWZSIW*MTbo>ah_|9Lmjo{$6bpR`*&bKRg=$G^RnatG)|p zybw%di*ByjYk~8@VY}(b0fR-hKX%ew*R+<3x6?KvDuBy23)miV}xyd*=-w)$xvK|}xQ)S6`$9-Gl zW!G0z?v6#}Fq(4{T`Ath)OB_Lk73#GwYpbz3^-c}qut2^3g>Qu#e)+io+smR&>*%xo#LDy2Ilo0+_hx0JGmqMAMpH zXu4bMO)0rVPY+wbJe!9H3pV3`m|H9E&)Xf@uT)}Q-!B)9DuOz9^^hstt&J=`%0ds9!8TykTj$DKpEn0!X*l&HHJq>SyWnkW(}`&!{DE|j zKlBT?ObQwPc?TOl%$vfMd5mjV=wL<=2qCi*?|zVsRj>1_zA9bKb+HjUNhWbC|L@G0 ze!%Z&K^%T1^c3u9_U4*%5ku;9vtKnEwesMB<7>D)~`Plz?88Oq8G zP*+Q9`RxI+e@3b1h}TnPpv4j$`~*IK0~Kbal;=Zfqmb5xeNnU(RSHrT*J`tDwI1h} z@;}SfXDI(v+2+3CGO?7%S`n)6ouu9?U8!qO4KXm->}#q>5c0Ha!{w&~As1>eI*-Q@ zP0n7^gz+7mv9Ye1NIpF%;B6ZPG<3puYxkzu`-QNXv==0l%1EoQOjq~NghjN|6!=#O zR0tpkC9!;}p=?tYE=b=haP{p9VnoG};G3sK@vQD!U@W@n%%=AYX8tikPlhq20H78q zW~vTui))aA!2%@Gee8L#UGG^fO3_vC&;a5pz^lkVoD$i_A#RnGZ)9IAeC|vWR`t~v zkHD<3V)S-y+MGwWeRP~J4@dFeE|IjOA?a-qiuOh6Kf*$d!WwuNY|Yj)cg}JFfFOpd z2fAwiT_H5SuBt#*_>9jOUeMKZtg zMg_DrybpyeuRUyh%`6n|V3*dsT67)qC>qr33zB_?9-}jC>V9oW5ducKVahw82kqYP zt6{W-Hs`H+;azFlWSq8(Vq`-!L7tQPjQHD!Dhy#BL zkO-Ks?}tD_cLGV}Pgaj0pU}bEf*vTtkB?U`oZk^(!lsHXRM*CJr!{lfbR9$^--jwH zE!UJ~1SvVFkxKqBQD$Y~WlKD@#vs7tsC#-(_Kr6Alz7TP+1WMDJG5a~t%$icUvYwoqs}nc|V2td&(l2xW zwoRH?G;0Ff-Bnb~keFg=NydLwezVp)C(1}lMM;dU{C!xSUlG$MTe3Oj7p~I(nipDn z^V4fUyUX{Al4=yO&hCOGXN+mQE~`N9R>LEpL1;^hl*gj;)-qk@)^%}t|H4Yt3QfH!+29Piuf$nj*D zv9)~vDAsUa{dw%|cYD6c#R{)^3ArRQ3yMlH`5Lr@bHwi6M149*Zwki|Jv-EUrr_IG zZcJHPInWjMkKgjYZY=??Qn3_ZkkgzM@{veg)BBh&2~I}t zs4E{P52EYh6;T-ZIjiWc4TX99yxDs&IMnFOM7F1C3ztm6ayQDiGq1R$5Wdn;Y{-g? zGtuRja(*c&=dSu9L-RC#d-VLuW0X;e+~lRq#Mp34XSBCO>!9IRFqA}t%a~FbtP|q>87jdS*V`V+lte<>$9DKW^~os2IWSbi$Udpb*}scPn0q-oJizgIWdNnE=&IPH8ZThS|Db*6#O52>m;VSBhP13-@)_jzFiKoiE{+%)Rma#RLfg3 zGVqTlb)KjEYQ(aAjM~sA4!B}G^)ct(3Mi0@0VpPDqghRklKJSuUY%}}RipT8c>-Wd zo1i`eknVm|LnX=Tr^fO-&;!GKwaA1iHN!nJw_2%sW*ePS_LCcqzOpYvT#rjkTwd(o zW>8&QG(F;5uH5w~PW_i1KAw;qgi{9-k<42Jjhp-aMKEy@e}@7!4`cR~fxM%ouf!{B zI4gA>Un%$u)z>p}gR4wojm@5Owh?N#!+;~!-j82RC%dunU~U(Lb;X8+(3~0GHkx@> zBgcSuh0XUT&RG>3z;~ZxtC_#iqkaXy7TJ6A>fTo<>Z6d!o41i*hGO=|^dB@6;jiZT zb9{K*UgC=Y<+1bu;G1SoqL4X%jg__6A2YquOwN|SF%O6S^-5~br{xVW^Bu_olaOcU z&PQAioc>Q$7U!;Xr}r)JdL8V+Sczk@dD@0gIv7Hl$Z4J~t;}F|fRC}}I%w~sjjd2X zA=zfhSTa~;oG%B~M$$JeN|yerGMuhOvq+NY4*p+g(lCRJI#g3Q%*V{e8i%&LV9iGa zP0t@ez`FJRbJ4QUSdGw9pVvh|E!261RC`0>w4xXw1w@MNFL^xk-FzBHmYb6(wzBl7JPqAvud3y(%{IvPPC4Ce+Y zPrWB=d#DRi5Vrl)jmU8%tFD!SBp)YEy@;EiwEEcc^6^Yu_<2_F{h%QXeg!pR54crD zcIizA0bv{dTeCJiSICavA_#zX5OYia&TYP*2$gU?TQ6*lAtYIwvffSlWbZ7em(GC? z25rtj{!YNFPMfm})?vyLgU@;6hDUkK68*sBu{KWkBu&R09&nyC0OAxTI~f*B7bc)z z$js;xz;IIi_iIm(%=ZDZZyp*czqCZ+3ih~BrW-?tZ1cvgTk*a#B+_YFSrsqOW`67R zx+%n(!9@v~JD`F;Mk@d%_+*?2i*Qd=M{5}#+_kt<1 zb+|cO6IRh<`(CYOgwkE9r@Q$3S{kzA%qF2inOQ*;?g^KVBdn|uZI67Lh>I2ZGwFI~ zPD<192e?q8NOQFsQ9|x#CsUv&0SXepWdB#2RxI{5H+yY-bGhPxv#_!NUF_wJR!RCbE8^tF>dR=`e0sR;4uv1I$1 zRfd`{T_X1JNwJ=C>j3Hsk)@%k(RLBvEV~^%OL9g}@1PRx5Km%R9l>e^%|xVZ%Uxtc z;^tacgXP=me(eyz(n~JNclIqcS3EF7HQg_bka80ByE7D)-uZukdl4aZ;%VishBJFc z{|1&O9a0P!Uiy{dBlmre`L?cusODaKI8h|yz;0%5U#p)B`|XQA$`)mGfiYSPqP$DT z_WWq8Fr&~skg4Ak^Nsy%Y2ped%E4^(IdPF)Mv}9iNLEYCRugM-TwH6p?4Xy`+i~_C z8^4i}O@5-#KWAGlg0Jgz;EO)Q*mDBDRiWp^;Y5Eiyw?Ls(Oe%sbu3e5n|IORim^*& zl%40CU$ye`*y2~Tj}8c{yeaBaIQX`HZ@ItsEXHo8@m$;cYD2bgcn~#<^KMs2ZK?6G zIa*T2JpfIcS!?RSw#TnYy(`g0gPv3Ez>jado3$o=l!y96S0qawt<66x_TeO zKj$)x`(uTA#+@5@E9O4Nuk%)5D9R;VpE}P@=uCIBrPF>Wi$2w8LSY%kCc_hqqJY?? zVHj)s$G|&kR?+L9H=bU+tF&YT#etsQ|HlFV(G+}{ASdev9Q>RJOa91Z7^H+hT8^Fi)A7NuGp}N8q89<^Z)Y29sNcz5jEqVT+awmi76L^iEh#A zq_!KfAOB`>xH=}Hvyj*J<7;1_-fXUx%(Xz&3aFeYcZxykEY=)`6p+S$xK0u7d{QS=yHQ#8YLPGMh194ZIA%dwlim%2m ze!=jLNf50E1}Vi)YsNtT7Eh+J+W2i?5CvtGz_QZ7MI}?yEum2L*jKa4e>9!zhH2#= zi!ns@2O=|C&lb*@=;pw8csXp}&gG}^{W5~m& z6haIwZ)MD)DA}*`RkSFSKGJ*)Gl3t>xbz_x;u~tHb5o;kYj*XVBtoO{i$zk-X z|BMK#V0En}yq5odpQP#0YWaAy==gl=9y8CFXuz16hHmtZqoB|eAoI+aIh)lWRaTOT zSc!T%Z_2zP z?{mqmncyf+Dv=LVm2t~TPGqO~9ZT_yS1TC$k6Oewifo#l<>!cT$-w&Jf26n)NQ&W! zFk+Z-el=5Q_Bm1sNGg17Uz_a3fYs<)w_BglwnemWASM_*K67itw}<1we2|baphjF+ zLwNrqu3V`EE;NcHXAFS~h5^v)QDl-aB!}f_$y{xDyg8#mRChQOd?b(-=IiotWYZ3H zViT>afH-6z6~nm>jh1ue$(Lsd?lHX`S-)!9m!(-15$O^ponwSDlSHE>crq24c~awR0A8?<4sZ#aY)LngCwBm@a_S^kom( zU`prH+N3FfQW0D2Fs+CH@Ix@814$rdhuTBhJAbOk07su)=t|GSpn5vUWxQex$OJk2 zX&Iw*6|wEsfZm~b&$rk+{-%8j&r}`c#>VT}Jk$x9YIs65^Nuq_#dCnzLG$;S4ZdwM ztNLyn9Y`e_K%zRF{MW;$?rV?I_ol}gBMG>-tr>b>lUEyc?5>abcEyW*SzIhZELet7 zDqS~FtESooJ7rj?dQV4X3-Ld?y;ZvcdPz@E-7t@x_2ELFkrYC0o*sbRzdmo5<-zb^ zU2=TC?v)x{@lmm5TvGH~m|E`y&MJ!Es+0gc_q@2NPP43k|2qkWY6&`i$n}*NZ=B;y zcXdRZWR~ijD{j{xFtU^X!f%PO=~I{5oNevg{eOz!9}4GoUj&f{hcC&HLIYZAlbWZ? zH(uz}JKN_!gmRacluQW3+%qeIv?i*?>5M|k5{pZhgnmeWr_UU&>%Mu!F`IT9{SbW;O>pPO9KtO%sg|?J4b!k ze_*Y>s;;`~td@^7j{b9>(l@Zr&FfC)#IIZ(r!;(A4@$~&30QRsvOIrZbAsSw{+GAC z_g_?GOdFJC0JwhHF8|2PWhxa`nfc_@TF`%ABAZSt^b`Ax3kW_`!+%R&3s~_-MMm9@ z2fpZ4vpi(Y1(x^1eWA#jPRsA>4H_TjBqWFHXe%*2;vWOAxc%f);g})JJE_7e(l60_ z7I`$4k-#ODsb}WpmU>B1Evmirp6dP4qN;$}LKha)y6`xU!d^F> zp2q83zz*5<;44d*)qTJ;= zTz6WccIJPCkUTHJ5|Ug8!!bvyvQhS6S!dUt8Dn2{uA|X3qbAM*+5QB$f1|hudD;OTGb67tU$Z&Z3KZV+V zcVSCO)BL+&N}n@xdB4VeGxFhtxjp=Taaxj{baU|$QStG~Vu|C6t1f~Lvv+FVfL2RG zd@^w32lc~$WsQ-wIJQa`4cCtTQ7CE+um~*0F%$U~CToyfgvrw`#$x~OIupdKsRxKq z0_xB6FYb+G``l>_deYxzTU6dE=v9Vj-qd|MdE7Yk@K{m?B$CC(427XCOEfGk?OqpC z;~4^r3tWT!Hta|6H*{nI9`LBEi*-5s#sU28| ziUuBC7^W){eM|CpDo!CkjV=mi7)E@4J&0F>i#~&{yokSyGpNdp>Mb1lh@su~9yI## zi~=qMji{!zS&i6}nxbyY4)2de9C@%0YtYqc#dbD=_H6|ia4I7+FfnSs9oR>mVWZ|y zS48AAJlc&?6z~fTF3(f?)MKgzNBBp^$DmeVjyvq+)O_WhPkoCu!hq#0g!w!<&J?2i zjlaCVu7#e zZ(Rwac>G@ergjCfg?6UmG9`#dX{HFCQNmZv6v^p$LTz@PXDlLpaC%1=w)sCZFZ)ZtFt1sTb)-k7YE42>Hec;NHHWa4O0#G2W)`WqM9TQVF^nZr zQ>SMqr1!+ZmhGfg+3d`U?S|i}cKjfcjI>oW6j=FTECk#8E)4-)<#tMHE%DKebc;?E z=U1LI1Fr1vi~K@$t8A5@yE@#>WBhO8PUu&TY?UpqeVbU_)xqhiPBV!cy$t??26;Kj z+!<-(2?O@zss>-`RsfxedpP}A{)|X1aUVq4Z`86Cto7Bk(co@1iXIe^h`$-H{j-o{ z)0=Fye~%AHzA{+aa>Hj)4F75{wagOG<)&}lrs~ziSbiMki+>kwl0{`YoI1xzyw3!T z7H??!OJ8~@R3uy-`-aQBlKYC_h7AsFJ-5B}tH|H7-eQEOJTh-B>kY_dF%*!GB%Ud` zaO7d?cBK+Xl=xJ`7`p84yXWb@&%!*jotlky+a7->MoiU_(=Rp5xREWm#Dx`C{7U~` z#e|X*Nq#iEK96HH)X2??OG@{irBg)in=hF$@~w85X3$WaUd=(i<$l<}{12ALK6fL? z0q3o^O#$%d3WY)kVQOR%m({DKg!x^%pgH!R1k^9h#g~SZv@dZggu+9uB3F5eqrIk&&HHfG6vKZ|<{KR(@?H zqG`HUuIVDB?_-~xk@?U)ttFn7h|sC?p=Ha@YQN7E-_Q zdC2&z<`;OzWcdpjG{^ACSw=qN_y6mG{_d3m|y%D42XUcnD)x;j^!&9In-a*%#5M z`zg0qxXG@| zK#TCTKj?$;FFFiZ3WBwHC~uYG zNGmQLY>R3)0S6y1SKO05Bml5b%{K*9#}&hr_GkKbDe7-GRkrSEK<>HMd)=6I5wMWFO{6GBoY(9FZQ>}q7ijFbi z_-Vvg>nq>)2Wn?}%|+@$zGsXbF59(>eZ{RPk6rA9{j|T+XD$a3s(4w0ATjXW($(eu zcI}T%(lpvXW{)=8(Z;_fc347C~jK2vqK8*yIHha`={*>sko6|NUEH!to#e;VewN$7Y==go`l;1zvb z&L;E0KCDxW)qB=v=S~{7_N*F0?*5g72)wL*1Bgt~IUuPQ(IogHlu6#e^TW&XEj?C$ zhSOuk`SO6N?a$kVU*49^Oorbx926xCAMQaN^)+MEBUMhibWLE9$ws2UjZZs`Y0}5+ z)-_~+iV9V4GuNns_hsx|)IcbP^;A3K`+`lWa+c(;4+n>)le5)6ZadcEW9imKNEfVQ z8lTig6-^rPTps7FxKWQ-(-NUAtdrC#fu176qnwiJsV;vv6ovlbnIl^wdl0eS@cvxO zz0Y(_dC1a<(`(PhwItJ}Z?yu^#X@U`Uftf=ka2&a&IYwo<=JB14treO+OmItjV+cP zWi_+U2W|`XU#{J&${laxFYu9fM;?Oe)u`c0pA|f1(sA!?d}WXN+we8j6duxGxwyi& zL)%=Zl;eOxC0FY}?IAd<9a^ynWH@S4f`bMlXMb4Bu$xqMa7ip?2%dYqyZN z&LvM3L7YXNjo# z(NCg*m|`a1{4T~$JjjdQ%mD$3hRjLLHSfNYeEDQ-p9Hv+H82_K@6!SX<|VRgz&A>B zlW?>gK1&zM^Tb+{&F%t-pNw`yJX!&)u_RP=(XS{Lt)C7(KA-`nfhk)_`p&n5mrqk9 z!BlEZp7SU}L*fP!6pIKZhlY2Lxi9Ju z<*O8xUst8={|heqzo$EwwGOs=1@H)_bPSf<@Z01qR`z*g_RcRAsKIH=wa-55wlhJe}l6#e6YMW_>Q0D{I`T^1F0zaGoD(C1iepG z*izg!V3h&z4V}7~<5w5$4w|MFTJBODSWX{`rJJN5@*G}%PUyi91Wzq_mj8Y)gi>3S zT@n!4e*SC?#%fR(UqhqHx~>LKUw%;$(SeNxI7E&_vTQW|sF9~rdS61?Se$QVasRfW z5_^RE$o2^F%${B-nz82*ygMXcAM!1Qp9=KG0D{PaRwR}Z-u)REP9NE#W{jW70|~)1 zky{V8PHPfpkQA0J$+pf+NV*j5gbvpV}lp`HEMkr zrt!`s)`T(~$>oxv@w4M+W1&{_b-z#Rtcp>7^f;rR*NI|w3H3h;bfp(Maj>lER5a3k ziNj4;XL7RWm^A8c0=KXJj+rtv2QX(Q+ zw(~P2E73;!)(FetZxV31j}n<0v`6~&%c0X|J~St;{DWi^hV3mkvl1QbpWAmF$%AI1caAh~IM~mWE@eO5$ zcYK4f&MQBnRd1F(Gah3^U&-MkKO8dBu|Ghh16t;gOe}Gw&+@wNn`w`V!dZEkP&0S8 zcQa@;9s*^T#BYKtxpLITgr@BRb+K1A$xckQt$a;Y6iLD~!uR$$lsGXan~x!Mun)Hy zG3~dz18O2~3lhq%hn*|?WFLhhDo5V;7^_=@Q zkbwfe!Y3y(|G5+%RRX`FT5fVI*R!aE-86G{q9Ut$RsqqH*vk)f!wkLIy2>a!wNG?) z)#lbu63!ZI^J*Iu1F{Biu}{>>T>@GgbRP?S_0|MBFYh!$jQyi~*dW^}9~wiF zH#y?Cv}Qr%C$8a*&sN|xa+8UfEb=1z!Elx)vD1w@Q+8;-DV`;R8Ly+w<~FQbf}PcH zSFMv_HX~-!W@JPSod>VNEzNrlUNX>S=g_GBTYKGnYjo7W1{)kQ?G+VfYHnx2QZZXM z5YF(SzLR#_^!>_4wd_wuCnATGa$<__$N6yHu$k*eKFW>e=@$P#rnpZ*PK=miJj~13D1KYvP-rf7mE4hUoQ8fuEj**PJ(LhYZKTc79=j)o1?lBg78xu zVyiwE<`j;%0XIk*b2?wP+YdluaOx>xzP#auC|xz^(b&bwwJ+()%gwDSTdZ;7_y42e zR;Rz!Yxx6of92bG3UB{89`^T#2fF{&9+C&p_1}A z9Nur7Ckr;xy->h&)Hd;=alBx8Hm;>s?Q7p^?jzA3sV(8BLu$*ZgPVq6qGRBJBIs$& zq>!JQwQ^2IK-LBMmIX%OrMafF51h3AXc|&mx}2>C@6p>?aiX(t+b!%uQ5fcTIIF)6 zA3C)$w!;5y(2(vD(Z1n7)ppw(+34Ev$OA>f1z@)BCMRy9Pc5NxAiAd@(gTW&{_^b6 zS@mx(oO+$cfu9=lcCrmtEEyD1wic!g3VOt6h5I0Ezpaw(56Ve6|NfTGAsBe46)k_> zMhH><#QiHG+mTc9+pSg@-n^WX?8f&_SYz*^2)AMXk zjlE+d-i_me*~i&W?Ry5>f#aGZT7N=GaXq~k6Q{GWdrPU!L*>D>w)ZE;HvmY;G+nPa zwP-=9_6q0R0N*NKW)LXFI=yF1^{%m4={n{XI>io-FSHLQ*>8xj*Cz z<<|XtOiM_BVPPHvSy3n<SIO(V#@Q-qsCU?sgt{WP z-xds@pMR0eZiU%Lm-SfurK5V4K(9(dZQbw-Ed~E|&0XdhQ*Y+|uPehNpkW=eUe^&L6A9YYpq2Z;1v4H5iNx z+1Qg9&`C7d8$!T}4;pz!hV~^h#xJUC4>tRMSPs?5srm5ww#03eEdO4%W8Hu4}9 z)w=g}IV+QJjHzQ6J{f*Iyu~KJms-|>j$Jz}P@F8Hjn*19@R}QRa6qWvCJUPtYJJ@9 z5&P+#;WkIOGmY8j52&aW7N?vkHz?&>(-j znW?ru<=I=L&E1SI%x?NVJ@*;^`;c>rPbpVAY}S5+eSO^EA{}a`l%`(DuTQk4#GZ7& z5a8^o?5WOMH*8G`>QBGA_#kX;lF532LQ=nh$cFE>#vIMDT@5@A!{+@rxz1VV5>GF)G)HN`6LxpequlrwxmjCQpA8I$d*- zknQ6_wCP3H?V|Bxhs+p@_zCJ@XO_hIGdBI7B23&>#hgD56wXTe!_bcjA%7|5$ZVgE z@9`hy$BC{E=dMuJaHCRt2*$C?1|LGDfn37bTpoCAEGb5`){&_hI6U`hJx4iiUCi&m zTgY2@(xc=t{WK>`Ck5wnD8TpY$vw8&Z{7g=T_|vakd9O+A)(Q5WQ>RQJX(?@@TUx^83Fb=79gi9^D&P1l37c z5yHm9fSFwT^q3xa!joQGek3WHWmR6cUHFt?p#u_q9e5Dy{=>)nT&Ja!(Ae&H^I-M@ zA@P$IqvTEm-&HGV*NULpM;;n~Pr3A3MKjOD(jYqtXyup@tamNuJXegH?TL1c_F?10 zlo>$ERTvLK)#?!q1kPg1cz@~pp~Zf}O=F~~BqJxM+e*NT6`_b5Swvk$>W8IyW;Ig=L zJZm&cx~wp5V?0>3WD)wz|*rZTb`k*Otn+1>te$(_5; z+*GCPf^!5ZJ}l}!%tUh}SpG8jlx^wEMyo%|8K%cy2HFR7f&TL6vfUUU3aaukq-CK- ztd_2zAK`F7&}sda7jP&F!R)ceg1ZBCoCVIC-_h&{e}8SVdo^^Y>zUk?6VCr3EMR+w z=lF{***HdVC%rbcEq<6W<$YMs-=jh-BeA^XPiKcKr=x*`=1!dcygr+&Ln(%67gIZ7 z$aeB1>4&{uT6o8}0%sHqN_h86% z4!=lu68vmCdOvIPn5_O7|20ZH`9Zn4b0xiw9 z<+W{Dey!X)-=|%-9g)?fu}~Bqr(t|2wV#okwAC-Aj;hyzG4}Bp17q;#t1YehyFSXo z_9{W4>t2^U3_+1Mxnj5pe#USN*QG&2r`Y7{xqVp}o$8A^Y-$hqCydVw?sqa|UA@ z{a81O1_~HTB65C8cc6=LCrsu&$)rtIPk$WYPs;3a1G5S9i+#+ARp}ESWvi6Tk4kbX z-q+M+Wb2u-(4KwVPxm4bo@ueRnewshBJ1q%r}inoPwACxFvF4}NS=z35Jzq5Vv&uS zMqm`K)tX1DiM0{*qPELq=8XM7P?1KLSm3DI^Ft0bQZFXR<+i@X$wnJ9G5umKD`-c|8f*C3DM4voegG-WUuh&fD*W#J?r`pN0a8^ z(ui}-vyam0N_@@AxwSMAB)Zq}J95=l)Eq`yp5~N~&MBIEt%BDRa+W%$&gdfA&7#w10+Rk& zlYVAoyQ~c{=_GC%yUwsc(9(_d+Z(imiVsN<^7@O?mGuNl4C@{W*80}Ic{)E`@ntbo*T@Wk}^pcjUR&a&BuMzdt` zj**b2^s9$dD^eXHVy)e9Gn8#4*An&)Cw(pJ05^HEwGD9-<(jX(xvvXyb8?Qxph|(6 zgSst9+)?bM2E3WT&ivqK89>hAs> znHi6q+O_5%*aogk+e^;ZH9zyd`BiQ5U0d(mSl@rr;@Df^J=Hp~?o zDm=Nw8F@1N$3m8`!5m5#KqNw55HM^R^dXfbcZGjY`V|~oekzmLShRscYLFo(KgoMp z<-$0W0=sFl>#H7h%@FK%090>ZA-Wxb0kUF6n!0Saj=mB9s8aDx1W80$ZV9s`2fp=n zZ^28(G~&>J!L;dKpV75XK?h5^<3dxD1T!W!8rinJkK zGGns~t+hJS%#UQ+og_U7>3<4ZtHG6TUmJ5KNN z`NKv?CTUY~>)$@-3#V7?K+U&OorXcgTbZ>?fr+({f5H|Bko=PU0c>{Efzm>@r&Bp8 zqWO!~iRcisZ>Bv(lM~wLfvV-vZI`Vgo2@Zn`LPuATtV|ME_c8%p~!&??{PoCeoLz< z4a#;x?(NI-nuw>D?`Qa#Dm&P)xzLbk83o!U_Mij%2mmH23SKZXo`5l#_UYL=W=@uEDR~OJfR{{`+ONnY4PYcLzUErRCq)-jR)YX$RyN;_frfiI z`9=F9`-{|K6lk+ID5O1xnqy5ZQ+o|M#XKFTtjg%R5M!nK%O(*k?E}MXZLmt48(s>Y zBSp0=G8-$=GG4@(B=DFou8@%8&kugJ%$h_;PS1S4t+&diJB?5SN2*sVRA%4RJgHu6 z(yPt(!dAKQVbfGn;(4`4zKq<(;_P*lg~G*!WVkt3tikfOa_47iS43YE76 zazdX0Md$HJD`?bxwt+7i&?!GP6{XTQ%43bpqc;!I zdAsmNqb(!^^(S=H&yz5R_uQ?Bz@{IY#lJ6rK2$K&7;ytvJSREXog@t>NZ0Iw^vC4p z0M1I`aOCE{!UwGAX6&Czn6IDiIys<$iXUq&&T;bv962RiNqSC;M!4|+Uf<|uB&6dlcC%Ko;p`zTNAN^ z)sJFlPn^OS<1+&Uk4_%M_h~dg|0f*DXw-hQ4ifLt%e)S$in#F z^(0cpMYbf}I$Vi6jh|Xy6~_l|*)}U85Bu{Hg@i@Igc3S=kNXWPqr)V$+=SEL zpdeGo7|7995Vp|j-(qXKZS6_|St&{Y#8D|qKiH(VQ5pYv)r%YbNnm2TYO16@vj?JB zw%`h8biYm9R2@&9y}QuU&7Q07Ei!j=tG3Pd6wygrZeTG7PO}l${|Oep@^a4P`MzkP zGgW%sIGQ1-c;uZ${A*0*hM!L*>S3ZcS9>i^%XtME5ul(yk)D{hvB9Q1!TyX(s-1?H z?E(Vv_%GfF`iveL(yn_NEXJjN&TVsf(%kUEw(l_G;;qh(j#p-c<}q_6LRtlHy$iMB zy9{d+Y-?I=y1;hYkJp}a;NqPak>!Q+-+AAFF%J9ksz=Ik%+gxAgKT)QwY{|c*v#H2 zfpdhQ6v52DKn{e zht|=$NV8G3wUW?}&|tku*F|ITDF8n@cr#zuCx*TCb^lhEjMgzt8Vd^bBo%m~q z>&d#;tLND%B5NtB4GcVGcZWz2J`RJ2qt094@!?shaKWvaL|Y!^tvy3Fy+6IL61|$B zUj8}0XXlH42v4>GhhJQ2#+00v)CJ#HQS92NUq^z3mxr{h*I=oZ#d~Y| zthCBul755(>b9rd5!4K2G{8U4g(W; zB`hKZED0v)FkPJ#xwr^1y=egd)*ZIPtvmcsdjTSl5TA_qb4#4mKX+`2vdz>n1QjK+1fBKuN{$= z&vE>7+|J^!sVyzrdVV35%+v4e5Dmpg>rs0O;5y_^_9)Gviae2Fd~Cehim9ni%UGh* zK$X+%$|AnoC!CTd*5{1iBYblMkeu6~k6tJ^zjh8J?AyMUiRcKe@r;L7M*H+ z8Po_fO;K^S%(GM)5aUeGpZ}UCK19j;n+OsPm4U>FwO^z{eFqj`y9d>igCWv$7!1rW zv(VH&afP0j2NB-EP?U*>7I8A?e~# zPHZAA65v(rcrW}vN0d@=Lpv3LF@i1{xQ`b}nP(msbS}-o=1;Swt7u=+WDi9*YjlS%lU4@#13%)r`>lIvm;FtknNS>w}JG(cH|A_v#Yx`=vWL0;I zertETg)H?gvwOVowDX|cv?X+SI~~1#iybS98EE%Ok!g!0kiCXm7m`yM0QcFdmD#5K*8boOyApW zon(UgBkqB z|J5snw$!?Q<^snw$ke0g3@u0HJTFN%ov+&8^!j_k2F?p66VLrcpK7~ED=M9N(x)8@ z2cIvXsl6{>#;g1(Q(*)h(N5Yz{*#Z|FuvwzDq$P36lWqKGkhP`QJg>j6X0YT%Gn}2 z@-pOi>>W}wbwdGxYdOKHZFkB}f`YHv8pFnucTjV0Mjs#k-BAYzBZq1}MzH)RRCUFci?OQFC ztUZtYyFyKFsET&yT@Z-JJ%R6A7gr3CMtd?Z+IjRRx|64Rr=o&Ml$fj339Z8h^F)1# z-sLD!ggs8naSk#9-?1kq^GD$ZgtKMA>zGIeS*~_*$gCC{#aUWBDl{I+?g`lDe&}P3 za(f9fL8GpAc#KwYwgEh24qU97!*-h0u?~?@SFQ$HqlZ;&o9XIdLsx&VzFuCK;mXj4 zWIGKPT(!rn@KQxUt^SGB{3!Xs2m08!xMb8YyJT3o!9MF#r5y*B%~mqnV6#fPA8w43 zK;Lk=#8;9UN79%rpS)lu)ea-R&UA{uh9cV0^gR~7mel5rStM8O*tf)#jV`xIu{-WS z{M^&l@4ltNu=0~pM88GoE6M@7%DcX#L!=v>df2k`351`$o@;JRb+)LC>umewoYo&2 zyOjZoQ6s9Q?QcmUcF!l!Ilf((=AHD`e*g;#&qcAE*a;sDLlvQKw(WVe$lSrMhA|Up zh6i7~Fh0ElOP~og<5Qf|ZBq52{(8u|1q5DfoD?Z2cD*PQHQJfU(Dan$VfLF(q25iL zNCy!pqJX`!2!=kBxe5kx=n1Xx1f&FVDuv-aqC{jZaKZKb-M zKiYh9J3IE}GCRTW)>p|Tdsp-}2H$1GwZoL&ajc|-FQa3U+b3`F!@8)K5Zva{nx8yV z@m`9=EW&!`AkrpPP<-4w{)!{onqWA?YwIz?+oSia;k2~6Fs(psG^>n(&({c1m#DLouqH`FHQw$G9O)f#DK^N#n5e@4t!F*BG#6RB^9^eIL~_+sGG+-)An_LA z9QP*s#yK^qoJo&H$lFOQaQeOx1JHU8o1{9#=5 z@%x)bmQy@z*gUa?bhHzUBoghSYvsOj!g~t!eWzy4VWlQR2knp+`zykqFloNh9=)&nDIs zUfQ+T8h_d570bkj`}FdpZQ_40TtJjV;v!O{jT=@HRbVtMPOygZ1-U+=Qz=gg7D za!~M0J#wB#dUBn<*pNC&O?{{o3#AVVi|ou`j)B`mb${}OYFeI z{sFn#N}CR%ZP`nDp{&d>l%@C!ldD?9o9r|X(m)9|>Dchd{^0asw(R#{9bAnCF2RH1Y~;0~ix7kjUge_r@?t;K=9_yQFEp@ps=U+Z(2z|V*~XZo z!}L_89!30l-g)gdH#bGSvI*!kq)l*pwg>E6ooF=QqkBSh!xV^~tkZQ)6Ko=LqfIkz^J zke%wCH8SZbUr!dcr+qNyvZJQ!!F*r(XX}uD>^mc_Tt18z~Ip;bV z{|ck&dKA^xZ(t0AHI_oz$|XATPkGVVCV=Wh*&`PSaXzSh)w)qr13!Y}<*^|BLt}mH*(`WpQbX~)=ggSzLE4$M!VObEq_IP^F7wL~bUJD7fFfdP1g}2jy$|lWn z1uZ$lvG33xmiRQfj^Ga84LO|!oyRvs>~ARs2s8fJX29!gVNAU)Q0%F!b0PUXH;lNixFK&pxG_f21VsR&=>kDNqoQwqk z2gP=NdOm;>^riyjpyI4{(ZDC5XvDw7L2GcaIU>rE??$ok8gKlYODb`ZVW^y!6zuoC z7Du}u@iTlENHR9cmB#TO8PeIYDkP5`SHT_MW>he|dzg@Rkc@J812a`D0s+mP(V{(2sgYDvbfX zTZ5Vd$6k0UZFKp(b!lG^u+CHbRh9D75sbG@OBC_cmyr-ck~mQ+NbNOBoZ~Yy)DQ-d z-af01De@y}Bz6O{e$4!^&qPjEPFWD=b8xSDW7Ak7=%lZWMsD?3r6Fhn2%X-pkOX4x z&W~=0yfz-?Mz6|hVuK1&1?=>j4g`D=U*^J+X_SkUQlkr$t+k+;XEvhqYbo07ZZ|qR zlbEMWz{4iICd|RB*Gw^|b&xvxQ8}lz-;GDLW-Udq0Plon5e;M*XnD%AG4h~IY;Z;D zsV;vF9e+FxsR_tDGz+&&0v?nm3}_v?u$OIZ4J_%*fjjBWEq!S0U9z**oek`|^_hC= z-@wq#qPzyVoK>f&;^3j1>SJf=Pc{eo92_^hsG6@1vw^Oxtx7Rs;rLj`vPZVy5=POV zfs@m+94J#{ORs?J8AhqiA3SC|y+YBc_Jqy+l0)uJ@9ZUFIj@3}5PLv$CLh9oI>?iZ zS#1!qT@dK*c!~b%VUqlzMcAZd&%n~P*iQDm$LGfdhdy_l?>&w8d5n_>Q^aP6mVBC6 zYoEs^K)>Ia+L8u=GW6@fa^!}uT2#eJxxXO?y#qZh(#UER`A0~{V}*+t{NSYPeNH2J z2%p*U!&6BYncvCrD2Kb$XWZl9{$9rq7q^6W9dsv_OBPT4YvNbC_l3TusIaYD%~1~% zHhT-!|76Mk_5H>aaCpY>j{Vtm<5BMN*^AhZv2T8IpB^I6RFXdsOYkdgew+R^d zqSn$Ap`qxXzb=z6_$h*UH~2fl@TV<%=Q4t*@m@U-LWcv4i{%(TnLY7DMNijSpLl|N zxV&l~R#;Emk9Mije#?K;j;tS9-4VzXay7;ugtO$-*uh`=h4O+6e#j=;=nnPaC3{UK+D^(E3iHwtW5$P z5TKS|l^*3D2?P=70qBSsce}pm-g}3gg^4YBv-vo0al?II4Vu^!RB-a7T^YDA@8*`>fv48$7*+6<5S9;vn)NPEgfsfq0mBaZ2Y3D(dY~92a`2XkG%x~l=pJI4IzFI_mf0E5{AM4x8k}JA6 zirq}aIes34xwr7OkT@JPXLa9IDXOKKJ66k1^^XX@EBu}wCOO5(U<-CRDJ=!sordw< z;eoz`@W|uF`U>JxqDEi?drH;Xi-B--Jq2~1#at_eKJN4n9~IFS;q4-s!F{rPE6H;6 zk707q)mJ+}ikY|ElC@%OAA5LA5yS-iOJO*MI5RdP-(H~*t6;rWI_v{`9k{~ zaPUpFx4s)~pyY&$Xph!JRT1!*m6x>Z;^{7rYl}mr8_Lfx{8VZh={jz<`zzRm=yxdN z8Q+*WMHyBmLHwbK8i7ZFI$S- zfRAe0BcK|>s`B%1T^-|bRrRW(6)uF9&uDu73$59yug=%%cZW80yy@(MJ9zFHJEBHn zA`ihl{{SA@Q$UpVvXvq9S8qbWze2)gAI4!}p5sM!pz+UZYvnA@uMW!t*_9)dA*CtW z7xu0!+5dizb<|5B7XTZ~F*{p87&6L2pVa0jDmP_`t-bdg!$Un)s{+%@YgPv|+5ou{ zqCT67QQs!Lz_|Bz;wa?h)Ps+Of8|tU{F6O1HjLiXV`Sr7RW*fQ9G~nWkNh&b15L5* zN_?9EYBn|z65XF^v2-UWfp2%Fe*74E(Cf-``*6mHBSdqEBB@C3mIwYt{X`xI*t^`c zkzd~ruE)+;%nTFpG-$rbSfAz6sU&Ir4-24_+FMa>kA=m_u3WH9Lh2msh1(U?6ZL*U zZSWvWAH)#iC|(xUbBnzh;&7`tcgNKewMQ>t>| zn^H50Pq<^OfrW0_y5Rl8Npy|8`u_4oQeOT4ngi;PSZMjf-5UBm`%;G)>A2iBN&ck|aX*>9QJw<@vViHL?USYYChB0gBfJb=n0 z_{{?ZUoIX@JzlXf!@5S53N)q;l4;YB+T=Rd&-E4=w z7*!K=GRIZjdC5UpcAD{8f?B!IJ`|+qf(O zH<(b8o!d=)EYbK$XUGyZp&@S{O5Q*$xn>mCH$6#>JT%*e&}bd0QCSqVrv-rO65X*oRB2$2K@UI=9@g(~-^UyI=)M)Q-}e<+A8s>@LlrRu#0{l-hmToD%c z8u&uh;WxIY{*ZxfIIzbH<}FCcOHs%bv`D*Wn7viy&^dYt6Gk_m=SYS}`rGPG4l=&Z zr~Y`<4ngv3Mo~w!YMEQ9$23yZbxVcbdL3Z<-6yq7^&UAJ@mWN=p9T+}}0s zT|t!J>s}PQ=_D69vYiWe$W`CKe#ghh9sSCGsL0R3SC`P2K6bv2i^Xz-In9qY#QQbJ zna7(Z=Iih$`}K~}5w&e2%>E)@s*2+a*_cxcWq(qMGY3iPhC}w}Gh7A}ME@kh!F%{q z$>g;pLAn714Fg-PCV!d-C9nCf7*}L*%}RRx?R#zX?A(A-c=ZJCZ%%r2F2PmExHS>>2v8H)j>j z1StOPXuG6$5$}hzkIoRV^h=PqIiC{xe%|M${R^PgD@FW&qZ{)J)@<%(vwqh-VHYg3 zd3-0i(vYIBU}Eis{u&R>7HG6e2w5Y76v2R?FupK-@)ARei1B+)FTcF~MOGO>*&;T4 z`oRLijVyy}wN2|`i$cL)YNqcK)^^!!)*Wum_yuQ{=S#KJQly3-?hTg3JQaB~o|_-(NX9kv_J_K8pj$lEEP{|tn{c_&etsWXag6oKBZYKjv7xk@fIgC_o#9t zsB6ABs&dSKcL$mwvPiV@g?QWXmmExPq**aR7iqXjXuQ8;nuS!yFakNSjF3*BplbkG z4bIvW&T!T=<}plrzp)cu&$Bd(tdd~wxY=YwX)}g(f}T<#RTssT9riJq_AcN z9sQ@1JLQqPLtBRhj~LjsCCHFbr9D#EkDjQt*5d)_wW zt{oStncMbA>(Yo%2(IaU=e_h=V{9NJl z=lj{+E2*P{VXNK=$$gYVgE-^tiIwZiOiM?ux<8olioOq^ji!ocs|#F`{ADxMT>o;l zNjv>ljWlACXbXF4xqtZ!UckE`QQ!K3%s-}XT};*05$?XE>mRaA6x%Y~17=Y8?Kp7{ z?1P8ncp4v6Fjc4r9+2)%(T%A1Ql(j*kG-qXu-6|CE8f<+0YLccnv|Nx#&(6V-}%Ul z3)#}0al%^W*Z*N>mef&ZD+YKy!i~TH1y&(3k;O-0pQUzC?_{sEgnTI%pm4ue^Pb>7 z7tML(f92ypI4zwxU$s5QIR5l^-xWC0zFn9;aB5li#PaLBbAmlviLDSb(avoAJ9q0q zZx;efDN@|!$4e{w2$=d8u~Z!qa92r+G2szmZc~vcYVZ!%^VLi(Hc&K0DX^s36=BXsUG)D^b(V2$b=%gbXz>JH0}xBVF`x9qb@JT3q2X#E)Mgs=F%S-gDQqT7rOjEL`Bd!H|=FHo%ll zq}7|H|C7mfThp^BW*amSjd!`FV7SjL$N0LyI`|z`(O+2EY+WJI>EWI zNTARN)i_J0!gZq$LG(Ys63zIT3!VvWEi#$)~hrdCxGzF^|t9t?MYlw<@ zI&(Rpl~W-8LD#We(((v>i%YS7Ns)_m=d>d$q>ZH(!n+2%I@#X}yjt(ED?UftiQ?8= zfBsmK*0y&B;R_%XP>+8IzSaJ`6_8ZBqH0o6@x4@Ib)J*sm-ExUR~HbQ1p=99h;S;= z!rymJyqD~v{3g!5J^Wus0bGkC<+sc`IMjE=|L5t{e<0*Z{e1Sq#s14Kvt5S=1O9 zLP+Qd5`cOGPsa2rFS3E-^G5{+a!{?^oR;e!Q_`9; zm&HIm>#;BkwZ-^0Evyf?t+|4@4e~v?THfHPLsAmjXr#-bugJb zlCn6yYyA9~UovYrURHwTcAAK$anx9|U6!lCHMYe#axd%Y>n(c6gk!Zbi(TD7our3L zYVl8oB~u9SkX4Vdb7NHszt<#ARNu{+J0y?cptbkdNhCdmNK&!mmqhk=yFMd&^u{H} zL(6#n2o>?H?R>?QnrT|OC7s0K*vkUv!EKN&n27%a&e~A@!6V{2s#?^+U#>?&1@4T5 z4?CgJL5Rw)xp&5CIYER4VHG}3YLg#w%p_m`F%KK2?il*P4E1@@(ga+JUrHtB>Oa8pLzx#1C$dm)#yIG zs_FLRNA|WbY=NnnPQa47uNzZ>S+d~fA6aP1DjEI65z)2HC4cg7*yui*!5{SVWTZ!` zSN=C%ubK~pg5CkUY(qPY8d?k&E}T87DHOs}`3gEqW}70s z!P;Xg`bGd{PA3`^mB|;{wN8M0nWHb@WAqr)wiU(x0A(QOgWZ(mo5ckPG8e{4c)^GQ zi^sPwD%{8>@JrS<@Uqb-fy*2!^*-CG{G!HW;TcCCZ0(t*#*!m|xf}ICyb^5b+^FkX zY{Qmh@uoO#`!*k&Z-pJ1>6>@z0Cc*7F6HQGp%malDSFX5Np{(x)^KkPuDct_4$gYMh-3mirZ;*J%nKg zjI#`>)gr*Wx`=cBN?G)hl7-M7gGS76pl^S53!Kz*kvnejr{A@czh$K7qW=`ncd{mw z?1xrg#uUUj0h-{lw;4IEv-T)MsMz7Hl(~bAUS&BA6+~L4UhmpDdCuENy2P5e>G5DX zdZb>*ZYge-nQ#Sp;ZN8}+s?$OQM8|g$9-M(Q6eCq6*4b-oIFhXbAS0!y=TIsHs}yt zlX}@y!U&rN@-QV{ATN-jiG zMF=ezT>B>VwEbye(NyGBw@%`Czgilcs**r(%Q0JrsMMW3K+&jQh^8xBQ}}wa?L1ig z@fR%~WuQ*V;O(>7?>)8j=!9?fi}`fkdAsV=>%Gs>5Qu(ow@0hgTAbxI2|5V1)xC6; z2zvj<QxH)LuuzjyN%opL9=@A{$@KO1TxjC)jp2nh&Qs;=_xR zBh!MjV(dWNxU=mH1IduT9d6mE-Ni34zD~8t#}AXH+6j{-rO~&-S$UJ?`fQykU2IKS z1n3eYMC|V3x;8jVI=PIW<4+tg-aV023!i>CM@``o;+WvDv`AgVgHi4dJVM= z^Yan(!%B#W=t^ZBIoWsbg?o)U3c9B1VLbgR#3U#;s+mYlH=>FW7YqV7#cPEcm&b?S zlwOMSCzc5mnKcXubbX&6gi#Z}3D9fV`~69vB&`s9{-7@4dHy8;`$f5Mll7X@V4M*q z^IQ(RSV)VA!8K8IOqMt7F85H+TOYN!V(4aM(!B`7+vcKZ)B!+O=6uFYmi3`d!cam) zPfmg&&j2=S!9?1M2m{Tb?Zz*Wwa0^AuNExN=k`tC=QrcY#skp?3jbY7wBe7bV-##? z>PTu&F6EqrtL@?>eyZ>!51F7QC73UZv9=XB^2)ZAaDq|cS+iSG+Qth?Dk1_^zOpB% zh>!BJdbVxW7Fv{jbWa01f1Hhtcq4|C?UxsX8V!x^ZQZKGqVXX%E*LM%SjGFy8=+5k zlok>DhO(l)!8AieC78oUEou$L0URuEN|ip6ZxgX8qOP>pfq5pa_Ad0`fyHtRsFBS} zOO1D%EtW^VMvy)9W$8L2$hofnYjgQ7Wl#@#Xw^&<`Y>Nwa`w_y=9)T0$V8sY%3Q4LuyKbS%8qIJg>K+xgj zQ0Hi{apo^~@Z8KwlK{cE7P9^27RB)Y(g&q)=o z9I-^LzD{Iu6wh8zV<*NF06(9s(MCsKt9DfNxNS07UeG_33~YPP5p=etjzY104!4`p zwqJg#(aBZ=m`mlKjMRk}SZl)ml3YA81()nzr1uaLlN{f9FE&1iYa|tLzdJLe!>-YU zZ46ZUt{pHwoS5=^xP5KX2S`&+?7k#lUky!PXjv*@C6O)&ynE;ar zX;DRKTA8$o!vv{CJ@yFbN9QpBzEh16jkcy02e6@3-KST9o#t z4NsFXN(>;oQ8G6`Io%!fd$~OE4(hrk_+yfGZ~veMOfA(6lNa-)1U^pz+n z+zR60{LeZB@rA+FNEJ7jjzJb$*5+aPA|``BT2p2NB9U~@tT7|>--#OOt8XByQ;{y0 zGtN64<+m8@s(>8fE63^M9`v{-sH(xHbnHh&{BkV*A^aHaX;yeYB1XySqi-O?muUhK zpS?>Ioq3|n%zL=+kH>h>8>WM}DpJDJ*o=WDa?dTuO)SJnv}c2S-JBk=W#0`CaKIT6 z*W=*J+C#HszOXJ{eN|I8vzu=)iU}kXMXpXOhq#DyL>l_AVutj`RwxsxBKr4Hc9qnY zj3hBYQc80KERn_m>A^fds;KPiJ;kumjOV<|M!DlgzM$?4YC0d`)mUglX_+b6}}P6ar)(AuGr|_`|;^~MtZ1RBSbDFM6a&s z2}j;qy>&W|zZmUM9G_Fi2X{{V*H`8c4mg#{5Z6_=cc4EURlKA0hxeYr{MW4Hf#Gj_ z^0}h&g}hYat>g>xGDuY7?gA7f!ATHM%H3msB0BK$f)5V9oP|HN<%cAVs-K-m{CvDO_|-Vraw&;0ol=lR!ioj-!3%G((Tr)t|rb2 zJEHqu<*xFJmRl-PNE7)-Vxj-V7GJ?dnDoZYmp5yLJ+~ay4#P4WnN7WcJX!<5t^+ru zWvxE*6PGYaLV5+a)pn{kGK@q8hUyv64TieykCi&T0UfM)blB&q56DCUQ-J_?)wzYu znRyabJN7DaBE^Q3%i){a36wBq@z$i|VVB@SG#w+FN&JtDFiVmOA`_MKHG-;NKsDqlEP`0;F$;9aZ7IoC_3^dvtHTq zAjt)}PfaKDkS091r6f}0ib$wNpWT`cQQs%pV3bg_5Yp&$7-t0aEANJfym}CX94R#m zCZO0+zLjun?Iv~moO$#X!D7rMpCZeAp}vfjqTDAlYIAU76gWMjr^nhU#oc5QdZ}77`@$%kiYLS5*`0;E{jzyx}MyxjBiUPwD3OO0{`yR&J(y(k)=y zSA#R7?K;s}A!BH>l!4E4EqzsyHzq5k_+;GKtN# ziC$I@Lol#?kg~|-6#KRhoxoFIyeNKJ^8}lbpM>vG^(YK;Y)e_1P$Ep>ofFK7x;>5J z!G^3&c#_Jsr^Cvg(hcjTaFp#b>F5@kcz}pOSl4J1V6%Y1q+k+}oSYoHBKRpGDuD+6j*^3EZDQ~zAx9F8sJzt$oFPiAw^(b^m~efRrXenJ zB*6Wx{!nh_CS(1>okFi0yu^+7Gb^=C4ugvgo+2cpN$4c-Ta{V34^o_<6=v_}NB7KE z#4Od_gv?sAFbSfr1MVI(;TrwnL1$Sv`TjHVu%K4am8sFwBo!rf@?APNv4`Gq%Xd|e zWh%#=20i#SS{G*(;R|k@A}iBz8ubr~Zi>k_ zVsIa%`T1d5ggK<=Es7=-$b46qaG$aBH7fQvwiBaOwpZTlMNyPvtPOlw(r?GnH$^nS z6QljH#Qi}tEy8A6Z$hwQ92|OY73iDgtzY_%>+4w^Lt*StAy&{jt(#d^GX_m!YhM-r z(~DkEdcKIkvB|a_=3jMz<0^81V`0RCx+ne!wc`CoeM)=-D)Djr+A=^@^${~mHz|wN zFSa&S&A^ADjoAZ|)^j}ZpwYtF_7O=;CVs7{LoQVED72~i&sk&_s<6<*7m*_rl|1Ap zBa`-#2RNTpSiHyh|M!yBqVcy^^aY{qtw9}grnH#;rU642W`t>waA{UGJoM4)Af}HC zz~9H$gthtMlGThI()hYtG*^abo;%7I!Tyj*h4lSR zZ(y0b^JhJ|@1NY>;0H5DV0|WTS4yKFY%xY_h)N2R{#BTG1fMP6`)TiYrWBl2`iq2f zol6Dp6RVS$IZ^crIs=PjK{nYN3DPzW^Ir_23d3?2v1T1%xupl#gGz#)e!70B*uhFu zF3ED?hW8O1Q!Q^82R6Oix$z}<#)|Bfg1DoX5HXkGMDI<@@daB;B>#+gMO>mI2}Ad$ zb7CLmpTyaOHh>j$ zSZ(hO_&q+oKFp~f7XRnubPKk8(;6;-``-%OtA?L|7}e+WE4ini8M3yt1tPmr>5+&@8BuS(8 zc_!hpzlG0et>jHytC!ImY!(dByobbp39;? z;*puRsC0ttS>GcAT!{#|B$d<+Xso18MkAHJNUQ(@QvDf795zt1+-|6{(wgLEc<`I2 z!2}&>Y&YVQEC_@;HjKb81Xd~wKfOsi!UyONwjKpoXcD+eIlRNC*1E!}k^C4OBcKk> zAkAVH2mDAKu$j&}p1||3YZFi=VPdj-*LC;xJ#cuZv7l?Dwywe(b<^%Shd>^vkj$mb z8bR1TX%nl*H2c^UWwK$+46D-c#MUrV>Oig^wc%$-7x7x>R~)r|(~M=Vy=r$2LQZcY zT^rlDEkCo^Jlx?~S?eNRBZqKGCg+m*G3?bD93Tm6NApMb?mxYe!76=?H~MAxFlF}T*G!73Ryf;xtt-Dfd*lBp5&7%t zDTtYBPGl7j3~ z->3hhl~M-#eY?5elxu%_y%w;5>+oYo1F1enb*lCMPJ{oM3wKXY*8p5%@mBtnRP#uh z&^tFt$)Z{D%G1%b66WYl)OnXtlZj^?cFtg4>RDM!r!X)52pmaOF!}jOLQ?mbi5Clh z@Hmv_k0O4|AL;$nBtTZkW&t2Nd#G47rHwlp???)xARKjIVaqM`2V;5AacLfYhx4zc z_hkGE(8VYxeM#4a9w%oP<+}=tpyDb*0$Lh3rR#+8?a`-iLVray8K2p%fVH6a4BkuN zfUT21I+E)@mPc9T$`lk9=R4F|!Jq_}SBJuLo{#=n<$hYUfk9ZcTjF7Q;!8bNj|&}P zI8DCjheAC)M?%A%vZQSriZ^D{+iZKni%tnOTjSRzDyGHYUr}t1$3m;9(L(`~Zj{*S zB6>Y;yil~2d7HZA+fkNZZ@4MFh8rq`=QM4M+L~UV z13fz!0~WQAYYg1aZDtS5A7&oC031z(Xc)3Yin{xoUMRwE*E=4$H?59SIWz0p-Y5-G zQ~oC};$N#iG3o`dxis?(`gx!#uE(aUvG@Uy&q_8rMOE044_8@49BG&>Ed6rP;H;{}b$Tx*1JG=9V%BQP1oIr%8u0yav3 z!U03C5Di0|M=V~L>#Av}bb|cGz=_oUWg3iVC69rhd4YJov9S$;PSW{17$#S?5+5WR z*Qkuka{x@OSGynr=vO+xtm;b0-z_)KJ6gpQjgpdw-?I5^5FbZP%;JvpCTl?tU9*3rF8A}lO>bN~LMdQL2ynf(LkaN`9pwrp%2jPbz_`ZIe z4z4gp3thDT;j!u2m=|| zYqR8DSS?RCVq^G0iu!9-6j93 z!4zDzy-*6DCNzCDaD7qVVGxY68nSu45bs#F8*a--PDz}c8+UB}@6d~XQRfhEoR)9D zql~|~^^HR-r;eH$E1dE)lTN3maYSb&ml~Dhmk1<2vU^Gka$qW#IT^vR3yvgzIod4( zl}~8#LmJgfjoxgSrN9+my#Iv$Ii>_vV@LwDd*54=Zx)93RamVhwDZq|=?Y)c^s(Ch z)ny#FmH|F~j6Srg>4!&8vLb@r)jlq%2;2J`9(gdO;Z7I@2<#FnmN@C5if0!(;u#48 z=wuytoPY(^`+rO&bek*!Yf=r8Ew{!sVO&EGF7x^#Pee2{e9|RJ5 zWG9X{zdE2w%ICR?2g!jRAX+1?|0;r23h*rkHOH#|*5Y-gHkK>brw}XOx1C0L@Utyi zk3P&Q+cK%z7X}(LkwotV6%O|z_9YA2Ol23`IlY5esS|2!x*aZQsV%!@_QcGa&@$^iT_Y_ju8k7t zoz7HnG2FVzxaO$aH2cnP!JcoMTE@)F#`F@0a~oDa@)iV?(Y!jlJ)qV7ROTe1zk0$H zq#f!i{*eb9OtfkzaH+Ae(lkVSw7lT*;5u{eQ69zYVY^8`YM&wCHSyd}Or~9uUCOm) z8HEA_XNE*&wa#jAV>}}W#&c-ywKAA!KE7`0z#uO2Q}RXU72E!}mRkc`GulR-S={;x zIL5of4-o!jR(dYWH(u|7I*Y}w3EBL$T-2qSe#eCk$}AMFiQU~gS&c6w;Gz- zfsNJOA7*-afndNNG9Ub%*yaDx(_0bO+Fd5UPj^9q&XL+00l8)$&KU(|@C1jW(z2xE z8M2s*A|&t=&*|S(v;IiLtmdys$nhSTuS*ytpW45`Z-$mi`J z9#E?fuce?;rh1fonvO3rsiLf*Amer#&nV1SLTP6lyU|c2V-A&wJHJ?bdfUb6+~J%h z-b}J?!zELd@f7~CMqz@1n^_H)ZhsT8ok#IX@o53T(I1T@`a24QZ zwH(*$$=KM9`J#!-fkUB(8KL34Lvol=Ih-1({my7Nntm2XJmH9-O7}oLC@w5_4AR+< z5&NKL*KuPzI{OA;QSGt`Sto30LidELcq3sCR@*ZyXSGan zTVqp$SraRYApP#ds2>^$!N+m*C?x5W@8?+1Ukqe=^@TQwzLk1QL^|6xdA!4gqS}gm zx~BTVF>W9(^qW|jS@zh@8Oslv|2gOkgoi zoXyH|T&s`}e!+Jc<12cjB1OnVPi~gFfIa=ROF3)8m*CsaS!m}j0@V!Z z2ztJocCsHWEizYBQbsevNhsUZ>^_5Kz^s|2i3kTEt$Ujy zo+%v^#Lrxwuj#W?7uL+c^l(Cv!#srf2--*V^Cp^n(u1JPNo|T~oY{0L*ORJP@|Dey zY{MoXh9_T6n?NM-3N!X_K!E`O0+H25$fz-BK>Ueu~Ci-R%iwfNjV7X2eh1^8#W6NKV zt75B8JyeduXG)cv9|zNd7zyBj*6-tsUcq5zW)NbLkpvsFrW#RbZiHSM{E;q{{uCy7 z_KMut6d2y{G3YPWh?jD31@%}m3Teu^iSyz2&spsvR^n`~;)h;8Ip6CaaRC z>pND#N9K+BO;mk#j~?@-@t*7Gny`D{<)e1QU?Nb;h@)zChgGu}UNzAPGWa?GHzH3iJF^6+b%uozPvor=0}xj}#W(~kd) z#<1`q?kri4ouCqm+7sUQ|M$z<>L1iKxw$0r{}Ptu25ZY3#nIrYleqoMESp6Ndc zjogyCLQ#tpfu~aB0&N8mID}&tcwvVsRIMAmIINGF*6eJ9gyJBZEO)C~59I=4P4L-v zo=h+F>pYybiF^#7ef&y$kE6ZnWm>PXRA?|UTT8Co*z8vyg^s#2{H94L8Dv`X$yRo8 z^Hm|ANFrkao@If;sp~zl0)Df)W18hSJ5-m+L3OeOr`yhQH+G`R{<2@^WezcNo#ByY z`w8@2+~q~SMjthhAN)$34!?u~FV(#lFTG+RY6{0k{1uINV?8 zM6b3*9^5@_Te+QyBCyXfe_Ht`8u8@Kl~W!s>0ZV4oi@*69Momt2(64ip~t3zN56{h z+If~m=_+AmohnU{V0nB8O^ElYPFz53q`5h$w_S^U|3nTx?nTcz)kST6j2e*sI@Lpm zN^r*Fa!dc*j&KgZu_gCz{Ik+{q-Uza8?v3IXISuawrh(kuo}~B%ekE7TUCXXc$d(( zwsI1xM-iEFdCi=EHXXR*4t%NOzAK8k-F%H~WT^=_6%zt$|vWViM>HEd% z_)Iii+-kYUqXQyDW)YHBXeX)%=s@uG^xV3{kS}4=N9$|9ZP<<7Xvt4cz}&xKOPalv zm&#xl$H9ZT;;n!Bzk4jdoWd}R&Co4ZK~*KyB$2&@f4uW?E`*eQ-71fGy(m z_f7lqkbOJxoFWOCJ=6ws384o}e8`Aw43um)w+XhOKnJ_ZYu_Q*r|*ccL*LiSUENyY z$G^{B0n+bi4{`>x#0|9LV9g?9CYu3Xob}Yc< zW^bqJ4+w_i?FfM*#SX_u>B%xd$gk$pBSIO}iMFRRakKj_xQg{T;q<6%Yu&QNsTD*` zv~3<%y^s9tkmH|(1VBBx2<}U!{7#Pvv=1yFDMPA;y)_1%0uZ)|DM0kfNe=15AmUr5A+L2R$25XXRK7(2^&&5SBy#?5%&KEHokTX#q zy%1Bkw~;sPL{cK-`sUsRs$ks}Rf%QH_X;$~L|{mDA-I17=}csE7l{lR5Q5@5%OwHP zyvH15A&qbopWTCXFdnfKf;QVHz!lY?EE0obmDOu^P-b#RMlDp58wxA>AaadZJ`%T% zIJy0nMJVIgByRs+Z1VT(o)R^-Jw4BCprZ-+;rK4SaClekMxdEcuAQu-ev@bxvw+32 zaA2NeoCo<`?NHKdy@;K5KioUVM?2gvG%+CjTOr89*{Btox3~0sI{K=aQH*U18^`r5 zDEx{>!~Dhh-}VGL>Mwdv66qh#`Hno;R8Fdb667m0cfQ2VRy=H^Xcj=S^hjHB>qls{Dv`wK@1N6dAG2Gz|L&+iYJwc?nZpzO znI}6i*YvxPBlqVUOJ&TfDBQ~7+!n1FfoyznNcW%KEF4M}gEZ}C1@=EC7B`B!A|o)j z-cflHspK5GL58B~u_l*q-xFWihB;w3&ebe)dUVHo6%=x8Chsh9~RX3LTGS zn?Mb!^KPq^pa8a;JMIng^o53iStjq~1+XUZVbeA3PG7eJ7K@E`LB$TRD2 z$G-^nUhJ;J)14nuvH!#Oaj_?cnFd$JCiG$Pb8hkGO7C438~wi7p7K5B&z+|3dP%V|DJ8Iy6Sp`jgA1JoyK0nq_z;+Xu$ge%R_9EJdS}R_)Wzic6m9tVlfDJ zo+j}KCz}m4NRu!aohDAs{q!d{(m}#tY$z?IrczJGH7zI)F2Ij4)SWN8!35;2m9=f9 zyEwnB-U?8Lp{?T&VLp{dXzMpcD5gWKyklij3Az_(#kFG_;lw&e{o;LH1PBF`WoyP5k!TT`jv5(r2s;#UWr%i!hV4OYyn9h~!Zm5}Jb?wt+QlzdT(O+D5rxQk@ zM##zubH=t}I|~m&>y{#a{z8 zAJ*BSOUL8j4t=!9RPUY~sIH*Eyt|Ron#dD9HP3i)=9VDRrW?J##Es#3Vv7+pnV7p> z;Mr5@_u*m^EV99FK3$vf4a25G%MGZu{ZBG}=^^i#6ZFb(1xLa*2`n%;S?5y&BLT#6 zLl$F|GQqkp8yp%v`fU{l4*BBzfm4UizrK;ob9r=9mftS)cnLcE!-pdML=i2)MF>1b35vw)*vk4Xe5WcL=Hd2VUj@Lu zt9aOozUC6Z72VR8Cn2QuQmF%c&??J&eXRCazIU58p9qx+=#mSbR>$`YVCWxpTIkF? zDDiVVE9a=l#P%L%|>W9TeVQw16U@){%}3uBV$zW-@JHQNM71>Z4CXWH3;}6 z0S`5VwR|<_oZk|3_xE&baZi+dUa=X#j5O1q4pA5#tV}eSyJB^5d*^L~5u&xc{_ou^d|uRZ?(Mi@AOxJPuK8DHQJ+nnr8 z^zKWdodT|FgPGJdl21~VG#x_)FxoDh@LK>q8b0d7&vpU=EW7IlPySP|nW?(5H+x8P zZ=uh*FHlVnlH<+L-uw{Pa#GlR=jR|fnb&&||NV+Dk@`NYrs@R8%XM)OhNd+K<<%6O zw>fGBCb1nGJ&Qb4JTl$r`UK@!25;XD!O!2D!r6?pa86&F!&C;ig5PefOWCJ@fw0*Ok!pDmPv-B&k_dL@zLIkH~akW{<_Naf%9;qL~+Nk9`>zKWD zzM<^XpbR4*P*LcOKJ{!iQF}~#F3&6xbrK=0oKx9wdq!R*Q_K9uwP6l`qwY>T|wk*9h z4Fiuqbpwu!@z3u-RyF~PN+Ywy9nY_5y^C(=F9(c~2{-fPzhk^#h5p^lX5rLuXMmkn zlL#066X;(33EJ*v6et&A^XydZzBV$nR zmut+^-k&JFQmg@ot?q^b*glo5#KYm})XK9w;byqoGg8Wwqu*&B=GMO$*)TPtOLbqdg=+Q7)6^Q)8X6sN%ei zCz8UTs@?)cH!pXnoa3T;$(EjYSJl|@=g%YGlKQz%wJa4yzXRRHGK%7f&a3n6;q70{ z$vm}t&aqmvJ~zMS#d0W8K2*hWZjY#ZYzNJDO*~X!Q{(RRUH1z1rfk4`G(T6{!Awk) zov9zcHdl3uY`mz50dCFfl2Oyh?(yb)&o90}U#l(Y5PH|Q!7UQ&aJz8Tx97D6^Uci6 z_>1)mu+RG7_uVi1_lD%c7@P8s2}JLS4=WHcu1I`7NxB{&~`MIvqWr0;gFBR;Ub`uDEza~d_X)fpWY zufw`$j|Z!BLTM|Z{}5ZRc{WXJZz-%m@7p;waD>f`zzhQ?N+c`B9!LWJBUnJpacYWx zaEN9%!Utc|3@K>LPo@6Lda{S8>v$&Z(xTzf`~!vc=gVm91~go|#- z6w@>Xt8K*aX*#NU`9Z=(HCg+TBN@dUfJ&E87tdA>or-RgKKj^v;$4ozny0NvsEV>M zvxzNRR_N{B4Arnlszq|6IA*PT7Y8QnV}2Ya@WX&bEeJttT!k+XjS|BYS`j}$dUK&P1?1}B;)|*vqXPNOhANMkM5rag@%(LGt{d&u)jxnJNN@uDL&NhpWX0Yce z>67(EJ7%@Y>Y+O26gmdKsgct6BXoAoc!m589*TTe_HhhUH1>vbzH3g2GF(~9?0q4$ zwAFIJ8s;KI`h(Pl6a{l%+-UyetQXqzZmPjH*ZN~$&~I{co|jfOYJEXJT$OI)K&$Dl zs%^Ye&|>lN1t4DVmW~};VY=)XJ&D!*hAlrC(#}Uda29tt%C$MgUNb$1Be~ffqs!@j z;oX0yo`_zXZW%uq^dq`l#%g)|1M||%MEL~^#8w!Th|x44>$g-~l$Sh@S2-qnM?(p9 zy>yzaNW|x+3#2`s{c(<2R)ORcP|}?2WO^?nYC=4wv$Kd{I2=))CClejw&+tlUhmj- z{e@q}R=tD5YT`eS1|7Ab1A3~{zm=hyQpAEBtM}U&I>kT_dZ@L*W!pq=&GhR7j zxKQml<8+qEoFO?~xibzUb~D0eWNOVY&}GvYx_9ADa~X@i^i-Ft4Biu-CZ7}2H8na7 zfb<#^;8m)rHcb#;O3Gt>lngmAWu~+_DsIVI`8~ja5(2vHple98}?s zFr9+?Ye_Z93NrcHK4KaSL3?9r>pWj;&!pbX-KDnde)W4bV{2YyW`A_1tj^3W;T)Pv z;5@@w*LBjuh3DX85QAGwS&bWO z6kSl|)hX-YG)?q~7V8Y_@J_xvznr#WaI$sQyrLLRmU4biE8C(hq-)T+FX%&b*t?rJSjQ-ysnXqU^e?)N=>%Qw1mpI+p;?K zt=h|!D9kqoc#A;3_WtB6^flKEN4xfQng}(cxV&Spkr(>2i@N2RjY6Hcn3KiDW~bEp z|CEq_t&3Xln3eJ;5rsSSLswEd%W9+V(#J*%8ew0h>qPv`@+6Q)s+iZO*%&&nek!r0 zCxF!KP@-pLI)=Ji)EN>q%2iX9Y=es7^+0KA^qq6NIxEuYF|j?2leGA9NtJ#VEYi3y zjh|X;KW9SJdNR2$_`UJ>#0}U*atkl^M*}l7U$3(pPB8?+zH7&)MUc4-Uuf*u0()bW zskX&fM-MyzA}IMvnTwIRGH0mUR?>bXCoCMK{M~E9;LtPM8_zfx|^YU=!SuzRYJN$S{Qohjsc`QhY*CJb0mfi!IyKM^LwB3f4=Rn zd*AzB*R`%#t5~T&TZgS<)k)t^OGtz`5NdGj*e1pCh=>%6GkRxmk9L)D!g}%GoD!K! z7eyjC$b7Z<=%nxoGyV`K&ipv%#@r$7iwo+PBDLE@M|6aj@sFOU<`+rF1wMax8ih}@ z{^MluKaK{+Wb$UO!y?!CZ5$MT?d)-+-zy#XV>QE8UJ-=9bhE+99@i4DthG%hs<+y)m zHD9;Bp#RRhhat;@&su%1QrP!Pp(#_J*z=Nemj-HF;i&-0SDSe!>^k$#Jc zc*vl9q6B}3Pp$#LyPqLBQwM{h~iGQh6%>o{4+qW_n*MP)&GnOUxPYLYw8oz<)5&(#V_8Rz@7kf6ZA zKhdfT8e6?!m9t9qWDUXC6dbgbMm8L2BJ1W5u<2uB(D9pey7;y9(bmqMg$#?(dzCGE zn+h7AH^=UkRp*>19QNUoHVL2fit9U~1`(n@1jEMy)9nhf><2?Bm_4#?N5NUG9XJbp zT98HlX9BIJKDiz8dP%f5;TkH3ee_lp?rNUUZ_B)^ozAYLUY!cKu*yie?D?H69zVf} zirmS${kBDz3dydI4SKl!n$ZD5>0S{euds-ynkpM_ycR#`2rn|&jv2Vi*c9?hfmLdV z;H!B0S+t39bcY2bppP1Z2=AAXDw-?#sDT2}vic*jLA-$%|#Dj*oi9JNOW(pYPSnA*Nj z?IfF=`VugNHI0@btWuL zt0V4P`|*cLE()cc^^Za-l|5R9VfB95Q+XWhuNqGFSp4x$BUKxoRLa*ya&R=M7T5my zBA};@Jbg>4L>d*of3jblUc3W8d%gk&C%l4`F@*EjvhV$ZoizGU`a6|c^Z9De7*XgL zqpH(^_C$*;fwNbl$Q%yR8y{f_-;VVg_O8jHm13x|xAtJ1eGpdPd7D$(ci5l`7)ZH| zHRThqGX;;?yQeMTgV_D_oB`kjwVyVbTeiNsAAw5rkg=X-Q#eApc8TTWVqjxr@*nbwrQ6{w2WW7Yl2Z{%A4-1KaaFqw)dF@<7PV~a`&*oj3=mT$`Ri3mz6StWQtGcYH0x^RcLo+yylbH+vA z^*Mt5wclO)Z=Qgx!>_J)4>ZJjSJUOi^h1XYZj-3q7RU9hkDCw`?BJ;xZ& z8Q{RTpLSlghj^h~7J#*M$%MyeqnPhsM8eKsk$mT?r3xjfquSS_gMn#u84$>$x;Ij} zR=$m~C%_<+RnRSP-BKLBL1#S{iw zy&7-cb23Elt ze}}Wr<}K2G-GOht&G~!uaO%5Zni^$qF~y911-2u9e+7#aWwf-WaU+Z7unG=*!Z4$9 zi(>rMOY5DQUm}LIz={ID)BK&Xv&OmTfcH8`RDC#OVUWW(7+-^$VgWaJzO&;mvH4Gu zkSf7W0-FjG$Q*?9{?96B_a`+yi9=DWM->_=A`QUnic&>g`hVm6e~{w`0rqDXg9&a3 zeh*^XZnuHSoKFn@ZirW)lsXJ~CUn*Y<#Jw=<29dQB*2z+(@NzX&K$uraiH0PiVIp@ zNwa`7rVAp_OT!hy%e<5h zF-R8&DS@)x+1+!eu!&1!`_oFFSI)Z=rlN3$A?jwdy$T2pKP07kJ0+5hfGmylZ!F!G zrokpTBgeN=-nf&^=2zxwBSbmg65nWbU&^X>eF$?580`~<2ezLJ=Srmn!#TVKiU+RF zoi4fgnW=<$yte)?AX~qJkU?GzeNbHyNP^z$+n8ZyiIW|f59)Fi>O#CEF@IWIB>!6V( z<}wWzZeIPqYBMO42{v;naIQ}ayYSrvyqyx1rze3Ak4skN3WNe@2E;?=P5a6FXw;fg0j_-ZeA^*fK@vsAv$cuzj-U( z5A&@O<l(7xhcRP1iP2kO|I)KviMc$@AdxIpX!^ zu1iooKoe*zm)|n>T_C|fKx`JzksW=ak=98Oha zv#wVEG7=ZOL#i5eV*Six)8M5myed@Rs?Pkb|@mMGE)0p>YNi<=`J9AEO;hUQ?QwL zR{hU)Yr_7_+tK>dv7cRUL)^HJc;Jbt%}l2CJ@@q_rmA6X+sYpC<9b8gC8XU zPY-`dR++L}TrKi=D|*>g!7b3%`Olj+{uv_dKECDy&QKzwO})Du9-x-6Vh z%upKYAk~#7`nG1hW7j7~fY|1$eA;B(W@-wxcU}o?|5P~Qjko_iLD!qQB(dyBq4D_a zRxV{*r4K)IH*Z$2OA2?M6(u-F{Mr|0TKF0a$G4fn!HwpoDtDUs`eQh_8k}L@$*tTz z;qw_Hx}!d%$8AvT-B8ltH?smpv_lhS6hQS7IeXCiH|P_9j!%(WG5?X$l?jXbIDBo2 zIrbYzA0zSbf9FT>ubO2r`|-#Ernx{k(2H$NBU8)z?+5Ssj_LE^jJlPDJ$+X3#$0PI z;CTI*w2+|)1dV?_Lkd28+-lXrOq}GnD5b0FfG$<}P2zNvTDCXkY(5-tlhycOn)LL} zwu%Qr{c7&vi+4QNK1IM2&BumlTV;Is%aKt9K6+J#!UN-Rr^9V}iuMg{!1m@W z*;RgEpah#8Ex|Hxi#ZXYXE7F`N~=ufw!Y|eNvDV!L;B|zH`snphalh0WHGY~=98yW z1UN@ae~F&7>`j{!ndp%9ox^sGh9khN8(^k!L>w%+bi)I~F1DSz;x29QRYf3=EJwa~ zPT-pggkJ{r5KqoflTV7(yFxS{rBWiaG(zxl)vPXWf@&|ZF5 zw0*-*5$H1a#=IxMf;RjTWB=-&zUVruY33BNbCxOh*!}W z*Rj;?&Shcyz!**#G9M=5l*^J^pS2q;(3rcCwQsQnbX+MbZ*7Js6`i^IoH*Bf7Y$53 z42!2N{;3uvN;>o|yc9)Q{suJW_uOQ=p-9NFzZdi)!QaWi#r^d~C2Wr%rVq+dDww)s zr~#R;G@EF3++aB+?rUs0`pQ6aEnyc<^L?jArfR01%ILBaL+!(lRDcGH?AovMSf1`5 z|A`jlkDb)3KAU?i$R|F|ReMmuqO!qW;3rH1PrmQ}mPp?1Dpb8<+j|mY_Z5`7OUXo{ zDW>{)-aOaNh2${`H$|KaA@NXk3aA7OWdcWiI8no=!<}P$U@%MxRuOF422STs8cAeh zwsHF2NME5^S)2l@+UsR()u`0FY2fhT(6XBqm~Zcr;-5gIQCZ#{bQGW3ew(yf5%~V~ zGkdz??lgn;y336R#gI^c;(Jz@CwkS^U#Q9$yVzHbOeBFK`)cYnt_k|g;6)fa4+lvq zi~IcBqLX!{moFT!rnDLxwKwhHd4^~H21?tJy=4h|MCW?i<&{XV-6^E}BkX2=J(=rV zJez*ZiE#Glk3E2sTa6fM{3#Tr%YFnP%}Kdm^VyjOiok~TDrRI@hURPg9gkbXRaA%m z`_3PG(Y|cDvt7je93hYRnzysRIMbUZl;}tpG5p~yYw?+N{`{+{5OqH=p1vwq|Ib3P zcB`zU#%VPS4a6xR1VlCe^@FiQsS+#!Qi{V055UpGA5mgz;SgAB3s zi((PC8n9m=)-EBeRPjH*VeYTUef|tSnhRC+DQ03apFAQ)(rzkTdKsk}^C!1-O%7n0oWe>LAhXtXg7Y_pOTm}cXYQ2t zi+#6gO)h2oEIz8HK;-Fu?D(%!He@^XVplXdxmS2Q*CnEey$m>mg?Rx(^5%?Qi>>3Je9r2xq@Ms zL8*;TMTx2Oa(n$w`qT`eaM^ecka0el^VD9oY^%bQ*7P!kR2eGz8h0s_+ip?twAOs~ zWR8>dTk~iud|G}K>{ZbPE~(G9vI?GWpv_JP{V!vGh)Cv`BoRDITMl3GI#AMu7~<5J zb5qOe2HXj=rh(01Fj=1$ywpID9#9vc_3XjaUZAP0a>U77C=C_)ZiQ)JcBnMTXsbEA zc*IFot!O=i4G(W5HG_=3)h}HMMS{*<(@OKQd+hMGAiye+SRf;uJWzxBb(9~o92V)@ z`h|vR(C=F9V(!Q%BM{#;8sDM#fkovzPIX2cjf+vSY(7@gw6r$?DAiDyr&P~rTwwVvh23KG#vt6q+fS%TJxD4hRTxQN*G?eUu4BZ;Hr_F zCPwj8{cx_xM}D1S(UvL)jGo0AR?5XFHYgnWXlIfV9*|YNwB76Y*zkvCUfn=-eicu( zpZ{X)+s>V~6QTqnufL*2dXtA*Z6&pFY2P?GKiD!)0>@oX*sh8vKIQUjU^ zK#bo`8FBoBwq?qG>N-%KwCZMh^UzOxwGZcM$!5-_BYk|HpmP&($tv+&m~+Q25YDWP zxIa8WJTTPk0pR)8P7Zb`3rU7N!X<@XkTd&J#$(30g=l;J`Pz|_p}Jkgx~Man z!>%+mzrPyn+ZrhQ&{*Pm(hD%(gNF4vV)G|{K!X$X;!A%tMRz(R!t(huzKe6Y*nB}a zh(zn!;aTM=f3-ojO!>$@*0y@&gvd)>yHWtztg6J;>=q}iHO(+AaYW&IHbo=jrsQgF zt9a{9w9vRAI<0*aEnp>IRV&u~qOJ?Y!=5toU$w3HR}hS6wn3yOD|1Sf=|;A$*BvS* zDAvC{%KRH)WmHUy^Pjszn>NR1nEnjb25#-4g_?{)4~y~{-p*Uu%Ab2;XlLkpb5QcF z!B^9Xz=}+M&W@!guVQ>8rL$%mK`*HTQ`h$rF7fbR^#qGj68*7#b-&vY?|QMB5~qs< z>*dX0DeJzkdmb@c87bL7bG;X6zrBT}(t!WRL|=?dB-(@!-faMQ-gZ?dIus|t8_7O7 z-~-gDZ!F<9)7^X9r&26~3EQris)W_TIa=Q^Io$R}af}(t<(#%S`lP@v3IW%r53YLF zdNIww+ns!O9pU}VG5(tb(H>7Hqn$gyY=`BW-}skJj>lyei4+t6h;IUP)%&ZiI;~AD zmHJKB&#np<<3<+A0jHv0#uN$9pErin0Sfu-j~O`pdkw=l40Yv=?p8KG!`eTEdk)vCuk8(a6+|~EoZqCs zYyH^!>*hHdGBh7qH5-fkYV0y`dc`8=upt7|tPH@ij%rxPbU58B zrOiky{pM1y%8FuEtqO^~vbv_wB%L_$_2PyDBPCL?&TGTyzz!V!r#%2hl7#vzk448~@T&ABLwi)}jZd$8rX4U9}bpNQ;Er5@3#eQScNvpCZK@X=}q zx7`1$vj32}Gh(jOe_**@y>rq$GM?*WddDMg`@ys=Z9+u7%iSXJj7ZCn@Jg64vECpo z7_s+^dZ2$doYnRVeP9kyP*R{9cCWBskKlYRQTtO-1g zXtt3m)(txi=akO*o*_JFfyQKd8N{+|3n)WMY&467B^8dW8bR)P3W}aw$CUYYue-j5;;mO zkkkv9yQWXVZ&|JFwX}UN=yNtRlfGVV-30rbeT|AJ#7v43LX(K&Sf1Cxsz50Haq|7D zU1OHs-EdXZb55Ctf?BeSj)e}J-g{3f_WCT~(_%xfGtS#tvZ_>~%1y)UEqr2>L0`?s z4&52Fk#~*llU}}mx2WdqWz8v=g@6ybr-ZhS}xNBv|9B6Zy z{P8YgCr(OjnTxaM(pD*g2=$SroCR70bfHiY^Zv=sTRa*=`7X?3`=x;-ZeBIasUU-b zgbpgWQO34Upl1seXWX&Kjx>w^GOm+#i+wVa!5*kHvSS=^#c;#CT*c2DI;1rjAq?OM z!}=!wbiUMpX{rE3DB!gi?$jCHKf-p9Tk~RjR-m|Y9Mu}Pm*XE>Y~CqNaD_(@s9SQ;k;a zUYBCt=}6`*<9iXIQ}#S0Q?ET+mGHL@--~#J?;q?qnDPveD2T{J*Xz~XNV56@EV=W^ z_JiwL2B~Z14{!aHv@O(4o!EeoBK5Ce)jQ7qG}3q4CVmYm=NP7>A;a|}tHxbYfM_ee zvNZ<%3X6)Xa=Vg3h5@H~-tiODyaqYVd* zk~64Ir*)Yn!FE-uRa-#*#9I^Dcyn(O15cyz9%{UL%ZiD9q6#)WUZ1rE$=GOW%E8A5 z{YtX05v;0%0s8u7ustlOEZ@@4X@$Po;%_the(K`=ogq zwIQvN*<$svw?unjgo6m@>E}LLB%{R(DDKW;hU9f9EGeU7J&Faz@8#f09untJ#=1q3 zGLFHQW`w3ZV^^;urcYblPp3y8y*xh~szX8AETjTcXw}((lQm3wdWLt!PDO{|O>6l_$ zEoL+J8Pi42s`0+isEmJk%i`ux8X3*t$Z2)^V1navj+?J+I)wH$DuA?UhWH6U+KrtO z8~#b;O=!#r@u(-6lFl#l-IZ9@T$W%~C{=mBmRi#kjo7M~PihcgdI>3hzNR_M+d@fMPqm%G$aj=nMK6%11(3^o8eZ&@^5dVke+K)T$ZCze5nL6mNd}hvuS?% z_HxI96uYVg0p5>xhy(~0nrbbbP>1<7^RlhL$6^G3dz9bnF!sFy-*-KtKfkBuImr7^ zvMS?w`zW_aDgp?eVw~0|1R1g@?vQU<0ZAS%Uf zs2VQa-%6v#lHM$Ynitp8>-~-VmhYJ85ophjqhkDEZ$A$jSIC@Qh5Pdx_how<=hS(V z^$|}A2l(>&axwb|nJ{bg&PGFt{z&Vdy%VAbW#^)x7AobF*2-8m3HAh{oK+X|6O|(9 z`Yfe;H?e8oG4rjPu^|&?{eUXr;-K>0)zpYT2Bm__E44iOM*l#==pVPAKM|~sL|oo# zRLnYP@M;*eyMFXB_J5r!F{6b_5*IeY{9?PlsR6Po(G&U6!Yg`wDsds9xRp_SKo?u`mwGuYVa3$Ooo0kELnvgUvNR6miHe!uO2YoPtWcLE3ds5 zW6E|lb=o-{h9C(#hn`8;xMJG0m;UaQ;s!+k63v{;YJ0s*ymyI zz-3|e?A9l=aq0Ckuc|X}OsJM-DtXN{atHx2>pDo)x^9ic_8881Xj=VTReoN=a|FPs zLR^=LRI?$uVV8-=pR{SfQxpH->4BMgq zs|orD0Vm?zB3*dlvWC@pT>FPF;)EDTqOS3YuPQiy0b?r%f4c0+s~v z?afWVRbG?Y8N7@z%sp|UbJ%|5L%btK_< zN{P1ihl+;oz&_H8H7dETRuIGT1sfR7b0wxKxy0BolLJ;nx zQdND0sG41OUk&;!-|P?UkGx>g;j1gw@cwbk_$PZDb!^n*W8f9-#-Ej_@QG(JmdhYQO7mtW9f)?AT$O2A=ltwxZ5OKyQ|>=n+Jn!G zC?8|5iRVp*sAH&|9#5`{evSTtPy0Qh!0*SmF8Tzpd18xM{w|><5l~p9%#;{MVCYTE zT^8dssoOHO;K!M*K0frk+Ut>s%#j#AJF1kJQi|_d*`NZR;I#b#wfP*=xb7n6jTbrc zUaC1?IBRO<|3HU2-Iv1|d*V>^M{lw-&E;;;9#n*+@n=zd8S!m4;j;t=L)7L!6VZxc z^ed~zIl95`u!T(p-Xq5p;x)<N`Q=|)eH%JOd#rXjl}MMnu^07I^z9P0up_1-)g1;c3&I5co@BHAdQ zY30ZbE5F{fymQ~t>Bm$I;apfUujneQ`Ks)oe^DsgV_?iSV&d1z$W}!A7l@*Z$nn51 zjN_*7J0HOw|AyTxbWb54U1nWLSUg?^bUGHDP01aa(FoA?sqCcARw%f%^<`twFxgoz zg_Y*CV|0<@UGfNkQ6|`y2-cg^Oa215rU?58+C+#D?)b zTA!RW6Nl~(5)-Ez(aN5hnWMOYF%zrI7PCXBLfh^S&FNGMi%VLNK{Hqf0|v^oEMR$8 zB8#{VR%BmqsnAMZ}pH14fF{>KKDghgH@^bRbrol5}G#rYXjC6|AQYU zY<2z<`MX7k!@;3*=u-kJ%snoW?9kB)yfM^18zs$VQ$IAWTx*p-L97%3SIy|9CfQ=eF)$XP~ zag#pxrkM{`NQvFb!i)}S?S6?)s`3{#D%yf;H2(gbyyk1?Ivw3J9ZSIPR#c!}=3uA* zBF&NXO@Jw{MoXSLwvq#}ZCTeXyg1WHMj9qQ;@1Byg3W|NJHXdo?Z$NBo`lFNfWwxCA&*P#5sLcXTwCF7V zm_PtsYGvEJ%*}pa1^XtMC?ENm0#*duVsb~J(AKPX*@1aF?tMwnRlC{abWkCDHZC=E z=9t}JcBpF1ej&MOrvjRp)-v}{@!i8k3Y11W9xF}_?)-xu1wMK<7+<(5C*i`#ITWz^ zZQ7P^bOe=-6V07*$`qNNblaowHdR2uaO!I3=Y)(rhW{PO?|JOpN4DKb(-7anconPf zrx69Q;tKTwSz6b@^W^1@mW-Vkq5a;vHxoB^lc81=iQ=pHF}!R6vOw#JHb*;h4VhlC}4dW!kZ$`5V+_tg^Q;S#~UZ`ncrz z$@}0cR^qtWhG=6R+6TD@-WLXdKfPLABf{;)hR*I|Z8+ykRz)L(zpv&CVjPdS-qgVi z`ej1jOTlNThFJ?(PK~@?vEGw8jV3u)0u`b6`JNla;zU~yRA28Fw}d}+uBIIIMI=j=OXCIl-*pWZ}{2QgRq z>?pL2(xoeSBVnn8?O#Eltjn@9Rd1;Q&C~i9{Iw4u0XsP>`5%a4{PD-v`K?FC#+n_n zz+4(g>a1EY8ie7_={h2p`J;jc0dKV5{XG`MV|#h=rqt5h&h5n3Za4~VZ)c$ojUXIA zKfj7Fxv|wOYWn*;gJK>F`Y^vbttWhvMhc&){S)(#^Rops+*{Jz7POf zi^sVzEW0JuSV`p=|EUHnysmm{D)1)9GTJmFRgJL^`^3Pvr12m-J9Opl(7Uhb=$KtF z&YP5?J#J_cbfBOQ8sSwciZ8_jZ0?y#$vy$OCmXRwtyvGY*uEL{!z9~t!X}C>CMl+p z2<%7qo&f*=O^!}l+XdQf=3SAZU_7fEeggwQzpM#KOi__?jp?d954S2MmAxFp#oz9i z#SWgmNE2v+X!Rz;!vjwNSYWc$;rhk^8eMimf_5+vN*{Q8LyiqdZC4PZ|Co2{Kmz7$&FVO&L^#Qp06T+b<}1rDT1j1B`N}b>`Bp zu4}aiR)(*shYd|X4J>^Z-7Roo4nF1S{gVzMT?_FaKqN7+udNPkO`UAC7b`1U8LTLR zRVPNCq={(U7TW&wnJFv}>NdD8?I>;1_~=!rbuMx_cXD4SkdD)xp1as)Uy_*zla@9; z&>~6wFd?plF6iyFD*=}HjPZxB0sCj$0PjN`W8 zmFg2^FVZ4#SqD%&Tl4H5k|sQ*b$*ZBcsWwTUI!#-m>8e9uA{z?_!p71``5DdOu~A4 zEb^80*LKcpEPJOIr<9}hf7uYakeX+>pVKQOIUEG@_wCa2rk)d>s8I7to9k&VkSy62 zK^fZ@kK|2^9!?hivz)T3rhBl!&i)sk`0^M#ud-9!jK-~>Il*(0U&~zPcIykIAnsi! zXXmf&%K-wlHVD6qW3nH9p3}U+Mp`+xkWY3B(D|3>!C6f?fibzi*nZ*J2_oOe*{3W1 z=B4e^xybD~T++aIlbQVMi|0dXPww{-$g&&%8n|yX^CYl9?o^QF4yp}`WX;=3Nmj@-Er5h%xH?92V8e+wd?Tw z$q@zSEPj;O*J}Jw7%-P$ti<|fXj08rWd?~U^|Hm;D6!JnQ7gw&*N+`Ha79m6kTa+y z8F@tiq(;H_0aFqaQ$PLpOox+L9b3?}9K)gyziV&<1m z(s%#N=Ct&4rgfVn+$pB5?8Pl+jvR4)G<2%eh|R&b?=+Dby}u!uM8nHN61*T$8K(*Q z^U`GqL9ggz7szax+Bf}euO(#v34X0u#SFBF66P41r&1M8pm!(bQ_+s zw&RwoN2iquchI^L8WS*HEGVrS#8voaSj~q|M#95oJryhC^sp+h_R!C(AMy>W1RC_wUk?-mnz<&LlI$02HZ@fZb-@C-SoxG^1DiGJ@%( zfL&pkokQKH!hW&{5%D$L#Gc6miz^G;nI|X$asTixC*G_aict3Bfcrl>5U;b|Xs1hR zs*v2jYn*sVOlmnHM^s`wJ$VPD*o=$irneD8?}dL8bcY1YWe^JdrT$*voJKFJ3B483A5$-X!@aN{mzX|mTzY0fJMe)=bXS@4#q&=2@r{Pn zYpic2uL&_La?|*fRJ9rLPlt>53ZF8xDw?_wqNnD?*yEj%T4izkz6t(oRVPGcd^skY z9QTHK4FH|%S-0<5D<1ZBsMkOhsSb0Dc1t6{cKsCNqLF{MACGXJEWJMGvNSUz5J!d~ zeJ4Ezjz+=VLoQ-w$Cnd=-N!(@$orh8iYuLmRtN~ zijn-mE!i-YgR;z*jn%{Wx_jk`CE+Pgo=U3a2ZAX^|M&O}4b*)Pb6$G}j!!s`N?(jn z$QiU2D}RXAd6JVaxYqe2& zZ?a379s16)@$O*R_3xps?OFW7PYXi7XpIf6UxXvB$E`#5al;qqig=m^fRcdj%}@r* zRjV%pM6%`C6&0?>Q$3U2e&rKIS8{PVp>KWvT*U>xy*LtmMu(>v*KscZ_r0fGYN^AG zGZzy2Ng(m}dV;%GrTdbzli;a0LHGS$kE?ex^`dtv0v&jDmGpHjxqYbPy3)okPQyc- zq#7i2U?Kmjx5Rr;$YI8Ypw<>7T*yI4y7WWNwA*px@jT{8k?1eq`jrKejV;xmKfce} z$9C65d-@$r_EZb^54IqRv)5o34GzPVtfWP@BsVEHNg3LCyEuJ5=T%eBu0z8ak>YUQ>_R|l2>!j~C_Iaz3P`hmVDndDY@Dj$7<&iGP{wUK-4+RpoQ4rQ7&%Z`_kHzVJ^gQU6_j7iv&m!DxsTTdX1s*-3{GuQ&sqH^% zwmzW!lbUp1&wJV1XKN1W^mWRigQk}e23mD4Z~C^8BhhLRD-WUF`@BLS(9=Jn;&IKH zqq!B|OtW27X(l5O-;+7*8b=&KP(Skt->h>RCLPrO9UTW@9g?j`q6_03MBQ`yS^QqL z|BEu;y?c7mca_?;Vu{3FaR@v3YbWA(-*n)50iw&gb5U*-_r?MIdq)UP^vqV>8>8#+ zybklpx#?#v?{k*fQV`SqjA04PNU3%99HfhlUWtTH zyKsj0_wS#`ZgFqThb&QW8LKySTOK5cjOFeh(_Qi)LP)fQ@i>1MFgsbiSDvp=-tr;P zh&wr3R$EZB*Zj6HN`wcC{F)T(zQ6C_?+`81izglTW%3_FYY2go0-RJOx=IjB3%CdD z{lW%h3!HGmwq0AK5y@1mJ3pPg+Q9;N=a2(w_G?$Ln-C>Tev$iyK>qmeS`R@t=ijq5 zl5~SL)KLe%P1TE1Ehhit)xfLG;JA1{GLy~+Q;?4C_pJ)I+%hyQWGFozr)`c7moxML3 z95BB0-PJ8PWF2 z{53%ju_$5*Q>vlR2cricQXE*AOl?`cAZ46y>J1$yIoz4-Y*d2oSW5cGQFobNALFU) zS~a=5zxGf3gs;a7BV0H4N8MM)4u^+$d^g1qzJT84gsMMW@!SNZC#rjK=~xZw%P^}* zR#(S^wc5YD;!Ss#@KDic0%l~ZGZGatZX>d`DOg~M$C@KBNPt%Yfi@xhUf$QO{H4$G zzon<}_VF8+p&jM`D4d++Zt7scHO!N<#D2*`=&rzlleMW;C4 zui&%?r6>|jFW<@M?bHXMQ^gfrKb^mh-Pv80jL6np`mUWJtatx@dAueeh9d_Z$a9OU z627x~ttzo%6Cd5B{N=n~>A){92DjbuY@a}KEnA#TCcoj4U3DN7;HUoa#lLF?tJ|+8 zLmab3Vmq_vMnYKCww*hCiRo>>SbFb6;&hFY3TqfwXY97`1M3|Fn{{0Z0NNM8y3N zmr^-ZuHgofQ&Bjqrk~HTe$d8oE?L8NunEs^00I%)mSM0Y5^Y^f3V?t85$-nQI{d=O zDOJ7O&(eX{@u=26sb(pJ6Yw$GO(FBfyQk+Vb?EKo5p>5*2mmNDS)?&-=IS?wdUMm zFaJ(esAo_(3lF!vOPhStJApMA8edc8HQfjp=3=JHV%G|tyD82i{FUwTuzXx)EX7^f zv-G_qzoTa=wZq-B!()V=6v?T=SX*q+`cwi7tt1ztP(v=Jvs|?ws;#4bj?k&o&Hf>E zkcZNnj|x=bLhmFE(WprVA#74MSgPcH^>CaV+Ze22MKlpz7BUA&$!ef;i8YmHzE0EV zKiBnr99KVDIFZ1x*RACqDkB`z#)mNCdf=y!%U8#@z?&~}myB(eGdgj~%kaoHa$O2r zh~d|`R#ga8=5_y@Wn-o)$QyJ}WLwJ=s*(=bMD)9eRR#j{IiZa<4J6Glyp%faqISnP z`ih3R^z2?AO@^84X9`9)e^ui@ws4)ik8h_Fw~W6ughO*K(8vL?)X(WfGN(ndD%ipY z0oe2>In!x1uf%+DsJ^0fS)`j=hWbS!#~d0o3>C&A&Kw$Pcl;oIfrMF@NYi4k-Tp%T1nqP|NWw$T4)@ z^mk}>5W24VR^l(MP}~jK?f&NPq5ga0{L|;+^f-?CFbw&4+454=mV$c{8l&XGFE$7n zDHE)Kk0)CU=DTpITfQ+Y8Tsx%yh^@uciQnG$excx!7K0dMR##`#p!WViFG+_9rq)e z1K49vb)PWawq#H7mTN-)0VsalM*JWPKK8y3%-?DHLLK!@#mlM3FDKLYPv;&DJ0%M2 ziRR(y{wdB~UvYSsT4@vWt&eXJJAd*(Qei@|=YcUpzpnzLU90pa zzxjmTS~2E&24>SYs}T_gKbwK|JEYxi4ZV71Jq#Z}0^}=o-oWzu^e*x-iIi-kjZpu~ zfvXw`>!lD$RoOI^XlrVJR{~?wAUMR%&vGgeJ|BE>77zMLi07b^D%z<3gqgEcm1MPR zX4%~0*UEFbs@uF@+YGcD5ibBLhS3e$YW@7=B##A~1V*d*ttQGjazKg?mBqx%^yp}e zRQBPY;Io@>h6qDV{wzwWnrEy0w^kZyW4b?Enh2?oek5cQr5k21JMJz-1bV4Mc(45L z?lxRS{m1&xCc01ST)H1ckE@P5THaAX&Q?XXunOUa1$vhyxT^IdcL&wCdl+F#Y%Tux z0RQ7mXmzllL0rs58wm?Fe^_4CR_11$swye0CXV#b@5Ic3*IDcu8~W22z9irW38oBr z8jO))LuQiiQCC={QOh@-x^4dQfrVUV-E^_0YA((*+ZE$n{nCr^Y^XRx%tDLaR;bQ6 zS-HAHW%m>;F=Y!R-u89{U#9VmJuF0P-JC3WLX%@P4+VNu29;1A>3|m1WFj)&F!rrI zsvlGF>$aDf?-6)TXMUv3(P93iry&ib5(ikIp`O#*Z(a<_R=XS`A! zX)T9P&sCdCU5HKaJQySE8ia|I!E7FAIQ)MB;u0P0^A_;Wa3#Ob1MzDHBD|%VzT}4V zj0ZCRXAK&-Xlp~z4F<&@FmS=Ody4U-`(N<&oIlwK?T4g)#Lbn#(&QCkZS~l1FOmL z_hIztH|j7968QoV4^4YGV(@CP;jCyRO2#>?cIZx-CP+D4I|7<$+HVccn!7dmir<9iWFK#(Q3qoK?}oMyF!a!7XD8#Z zg~6zGqA&chH3ID?1CjRJ8ny8f@3E2klx1yrKTW$llX)4XFB-NOZ=7Ku3B16X@}iOf z5Czy2{R~eZ#%}G`_*I?q;xFumQ4=GK#vAaQVqnoY#Rd)PD|&|0U$jilzj~;z6^`&~ z8jqMfYndPP+T%*&oH0hG30pL<9+8PO+bB@HFJso^#P-o@whlt~45JRlttnnn6P7%7 z@dRrQSjIT+g*{~L@N%-l*AKhw*EKo)p0$WO;oy()f`5DzP8s{WroP+FyVQA~zRXKQ zsmQpeHR|#LuoK#6#p}x*_C_6uH{(PsWMRq{5XZw;EOUI!^1%@=_814DBfI~H=PYIH z;Z4;)%Z9sp9H`hQfhr6%B}ZD+~?lxCH$zjINx}S7c0S(VuZX3_be9kb#-xcC>0iCmI97pWZYx zX0v^-)Yo*MQoqx+1P1bGnj0W*G8SwO>2obSDET+W06ORn(3N^kxNayeB%3C?FbYxZ zjfJvK)Nw)0*3(az2``nR|Fke-cl7d`1e%OKPG6>P>%O6DSwYVO`^7dhBUo%xBUtdK zYRuA}mD*Ha^36*uwqMG^Fem*wg2_Op`iQ!4Hh5hJs+mVd;2lSx{Yg9Q{4We1{)~qd zMj3fyO^1cCtM`|^F#KEI!u^9hh!~jQ4K;c7H4nVA(E4aU?EH5>3&Y>O4*h@s6%Q(m zUBCFF(Eaa!$y)*5l%4QEx`&aAhvWFaW-p2Fcxdln{P}Y}6Al0C3GiV#-;@E|=;iP1 z4c6u-e-KWmdpsy!hfePw!>ea_XYJzEb^I+$`50_42pK*4YaWse$QXbO_qO$XwXfZr zws_OkXTXHckAB4-4GdgPo*pwGVxY%{poi?Mu))A@(?*BfM0t?@=cm66|INvN6~4do zBz$@Ao6tSrp|6`ZJR1!5nrH0Qpig1{i~+|(41}b&coW&V%fJOsdJPf-gA_g4KrQTa zc0w0?Ui5z(CMOIi_b^_8ISp?!>NeuGV2$Irr_ ze0ewg>7!4=*BCob{}LDdBSu0zAJsobhmY`@`^xmN2ab3luXsjsM5gwRHGD*+H%fZM z%Daw@TN^E0?0XFQ_%yjeh?{sS@^B=IPSJa)V!v0R!+QZ289pf7I!0O&%$=t_SyLgt%*{A^zZFkUOY}EQOzekKA zzQEve%ves(cs=(tSh5MRG~%a2aYlNK6iG2#ljlgsi!gbMH8IujEx3(I-Pun z0q3|E4*A04E7C`dt$MsjV>d4|c*%ewOtwioXtMNDFDcn(LN7eK7;A=IJfHS?am#TN zFSI@z*q?M+O3Aosg7vV+ShLR(LDa`Oqpu~hG*L??sh&KyUQb@x*Bp3ziVn3I>ewiK4Kigb{R+ERd|dsv$Ms1Xj>S1RL>8< znPrYU!>!_rjx4WiVDRc5zX0FY0pyE&0PU}DA8hdQbc8W#jE-%LAyYi*#`N6@okgB- zP3j}BK!%#=1Iilbg6$aZv4)nfM#BjTl3w%;GgRpw!;c{p#=2oR-CaHiKU`B5T>L+E7M0NffqkI#`{A zlR&y+@H@I2rd7ul{dt$U&kp_H0gxV%dLWFC8G!r- zFDZoBMxRiOk)y8;^)dd)vy0(I_@?u1_^k7j(AQw-yFSbPcuU5ES_Z8VUlcm|hQS?6 z9Jd)2H1Ud4XzCN(7#GJ4u;W4B)M%3bW4x2b8{=>UkM`#Mu=R)RvGwEy%LLf#f~AJL z4-Uf7^OxZjgI7aQUQv`Yec`L4{whyHhaJxE0*!pQ_Ib<6a4Bpek9WHm??&O93A*yd zpXTmvc=7nD3R_w6(NDDZpuNW%yJH?!8m4W(ZiJuwUn~!O{qMs6{~KNlk3WT2Z`?E} z8it2}`Dtf295c9m%};OCiuXn=*W9`jj&=`lOk=M!`ch;20pp`Cb>MNd!4gR-Gaa+k zkpW&{aMj=qqP+E*7{XQw4@Rx+ehyKHdI@=)T{p@RkhdXK4xqXs?8 z1N|BUM;b&y`v zgSkJUXrTtihA{@M!|(A&KV&%_`#3e(LZWr29r}-1-gdmh3m9m5*tPWB!^2)4L`|Ma z+QglCf4IZ*^zrjUJfQ|QUfJgsV=yu z&75o=k;Rxq&q_UWyHhra*OI^2Y_Y)Dp|f$9I&PvT;|hJHkWJrvc%lv2#J$g^>|+>D zWcBjVcTe!_MS&E^qbc*I5}!xlLW zjo&b|Xc^xz6%ZDRjETQZ%HexI>k@W|UD078joMZP2vWqs75fa1M z#%9=O3^Sabge|;>CK$u+QlwAWlO7)V7>H~o1bSN^nZhPVP`JJJG2{P z#ZBsUhq3K8Z6%D_7{^XA^z|4kZLnup?~E6i6xIk!xawTS$PIpXZJ$+HtH`B(d$j56 za2QTldC{GG$iA-h1w8FoVgQyC3{83wNE#ij_+lk>WW3aDFpgoF=NUR{q}1wc+ww^n zy!7SFwmk2tj(BM4o5rzI>dENW;?`r)&6kw{8SRFy{ABD?SKnqU3;gZijwr@!X6%9k zB%~=d_InaErghI9tFL%bZ~YYf+ObIdqnr)Kl-n9}%7{p>Xlbm5Dw-YM;VXTkS|OUFX5yQSjb4bzt4;K zja}Pc?s!0-;<+~Ex-ea+i|{s#7jms+#`f@3VIh9{(Ob*lZD;MLIuUG{Ea%!k&}5g? zYo17l$WtY=!D~(YGI>Vey+T0u7w!quQ`Y_e1MbnFz73P z>6ikm^3pJJfm4I_R^407bM4Qt&q z^$%LMfDH#z-mFgUfrLHU$mG$^VC&brUl{%9={OwzTfXWe7yexaFFZi|j~RZ!=k&Ly z7;rGOvB%f&1%uTW@*30Fhs{DyWJF|e#=xP^TfW}ufEzxz54vHGy8V2cPh6ieFu^12 zMQb;FiTnN;59#gKr{Q40AP@mEVBwS;jyQH0^nAcT=~I4pxYetkT?Qk1fapP@_gFH1 z{bE||SvJCN{hQ7nJTxF+fWXjV2YT_?<$MdnmkfPF3|L(ZQSGmK;ogfUVRP?C#eQCD z59qhi^^dkc3V#3>y%kg$8?0*}36Cdu+WmhY{uAELs@>=>@Dhf_gWeDabJl8*=HdLz zQ|gauC;Y_1MqI|2Qgs<9SA(@ZiF)VCP_dd_8yB7E_1WeQhPa2l!?20bOTFdk&H-OX zVh|0WZDLH0rF~!g|;fvJUJSRJpJLs~sE%d@RUPrIl#^S3lp5s;C z4Z9!zEClu+ZLt)rfmhiCuNXaJ>w<}5678xxjh?J}{@V*Z>L9}hfABQlz!U0D*kTj< z30oVCF`o2TE_uwh3D{o3n6c1^<#sqdnPEmAC-Qii$|%QJL(ks^29u6t%1FdmLf$%E zJk$nwad(HEusPY}g%k!Oj5N05fP7dpmf667a~6&oM`4I(%gJLlu7CBKM>gZ4J&bNU zjG}hw|HrK*yO8?Y(2pX&dxpRiB2;5 z=u4K3O}^~7vng{|*r%*5F@-M+TEj78%_jA?uZyeg*!BZAslq5if7Hubxq`{=WjX*0 z{~7&h6lqCTw<4liV$k5H;UaJwu3b(vZc{l6)a^XPRJFI{gSR}Ms`uWq?0LcSGa9c= zUGLx_*TIuaCJ002ovPDHLkV1fslnw|gv literal 74300 zcmb@uc|eY9*EW8eYiHB24N+)N5>ZJgQzFr{rKEvGu`5J~ib5z76&j@2Q7RGbB1Dox z(I!fUM3hLP%tOCpo!52O9rk{n=l8vTe6MZjJlAlnV;yUq*Y^Zjm>K>0OP^l^0>Q7w zQw?Vb1Y-Eljsmgv_~nzchKxWkKwxY*apvlFbr&84sf;c;dP8rq)QN)%f3NASIbdVQ zIH_eC()rREzU9NxWg`=kPp5yg^gf!=O}nJvkC(UnZn=I6FE#sWoSHB&si7t~%NKGc1Rt+-}-YYG#}f{q5p|xBbVj`*JAawrl%w z6E!TobiAb-PS__11Uc_z$BK8)f3R|*A)KpwF!mxT8SWFh=Lf+$#})CQHBWgoFF z@x&z?VCRjyYPGM>OOC?)P!FAu~qfn<&^JP z$aisQ;jQe}e8WZ-M`K|eDLr44txJ~T#FM<^NdE~WEz!C}0#3ZGDwz9eR;o&6$rG)W zOXF4B4ZG|N4Et_@ldJDL^lCB63#pCcolN0QBoJ2Ce+utE{#)2gWWfYks6ZBESW;gN z_m=J#cIL%~KuLk%&fR$~pH$$;#MA^gTfKp|T~7ma<_J&JnHMM5efcnb25iuNsI8?w&Kc?bK$Ty@&TsYN992#)ReMX3e( z@jz5-Bu)|*uULQPMJFkR%EqT!8OWx;-xIA)0>Qar;Ki~2==dPveaCLzot;wq z8G%Qzk1q2XD<#$boS}xLdhfB~JC{cx{SRG{yy2qByNEa)bJcM`n#>HvKKN#tq&hY! zPq@mW3NSU~+;{Mk7DA_c{Lc((Xy6WC{Qcod0PuP4@Yy(g0e86mSyr50eNmi658kRu zDF_5<85fJ1rUKbJY)vDw&a6{c?C6_dpI}W%nZsawW_xUa(bM|VDvBKif_Ez;qxu=U zB_6ajJqAKa4U}w%S{`M40TEfvWfQ3FxBJYCXqL^gc`n|{srwN4?9FFhI3twd2xTC7 zw>;{T1uBrG!J`~MKePU*o1%e0;5c_nRBh;At4dwH2YQtiDjMXd7CO61A;;@cwFgjM zJD3}B!Lb2dszvO|{QIt25<^_uAC-?+o#&QSbR#?#q%vuHY*P&T`9c&y2Tv~t86oMN zS@iPA?Iyd_eSmB6gSRp8?IV0EcxHUZR>Sh$O_-(p3#Y~pS)g<;cRm=O#EkDMOBam$x4Nr?WEVG&8}k`P?t?#f+)f0+$F zx`RaKsJDXDC7}l~Mf-j>8kW-`IzDuyRKfUTsI^s7%)L|W6Ua4x-PU*)nW%Hz+ir() zMA9L0=Py9yAqt#gR|bgeULJK9H!Vf!mFglw-C82CCF+k(srybRSj(iv+b8VN!e1T$ zFc*Nz1_NJn7d0K6b=BEb59UGz*p$Ltqt@amGC5gxH_04Q#0G+i84PT#J;$LjXO`^P z+PAp6@R{+eorS-#NDdR8kS4RMDgzrIRoUJv(vDAfztUP1`~E8F}Tvdc!c*&y(d~3h*^bdgqm*<{u~-i z0#!OLT!hw7A1^g~K)u)vRThE(zX$oaA+00;eennQ=_Ch`(Kyt~beMV{E&c>+3b*FD z&`+l#h7IBz!RDMnF>gYprP5FT1^63iSlXe|)-I3Ajew1(&d@O%!e<`)J(y6Z5Wag{(K@b@2lfwYrPM7l-&feg#$!?&A`m6|O+083zJ z>U`AK17y=6Ue$quu|wPYW|u3@u4_@q0rm+ER}h6IS01c170TKNliU)&LDtSq-FJ5~ zf*~*BP6pfwCwF$j9T4pcq(|wA)+TdwNRR2F&8{EX?=c8VpmGlC5~#bSh?^?FLc@>_ z9YFen7)&^fdIfzJfC}P>xb}!<^;f8m@vb^wAryR)5EP(qn@Dnx;)in3%A0P$M4+<& zP$U;T<8QZIqy_3?R-m9j;V&j(tCD3h)Co6j1e*B2|Bmp&TQvxR4v3)Nm1huQJ8)DY;;V+EfSWn^?J-DcM+n|q z9iUPu>kti4DU|gN7)Gp8Rgy9+?~#MXyn~~F-V;EnT7+8Vyx>@6AZZDAUQdn=pU(mY zz=o#}o5?26eM zo=pcMxK`A@{+cK@5Qf;px4#bpiitD;3bp-6VAKv2u7#7fSes-XcHh>wU}sTq$cdZ zAYwElMSL|}Ly*7^TyG_Y%{jrjTrscd;4t(gjER3G*;g};p`5py4km5UOnx^xz=#9F z!-3sn_zA);&4c|x-QdDsY%pIDrayxXP#~ud-3af;m#ocp5#zjPvw$^yM95GMIS?5P zH#EF#0Vx9^{vbVKlx#@wham6Ip!{ae-}ZZ2H7fc@^D*m^V=xNCI7TqilRwi*YjotRD0e zJEb{Cfv!&X;c%P^vvb8#j89jf24_ROEooAAG!o?!nuHBg6NcTmoAeTmq5?shOpO9e zG-rO(!9u`p34Sd`o;*>|pv?MtP0IEtO`_R55hD4ZIhAJiXqt#B()*#{ELBi;uw&;mDA>=D0(-#!RLpbRR-9PRY z{-R#=h&b>jj`((vYRoR9ZBjo2zHGb%(NSxm1;W|@k%{P2onuUT~r5fmVEZ9uDVD>)GugK76|5`;kTZSg;OTv8)ME9NFSXJ-@jqF@I_$@1gpD< z_N=AAq#2J%;3uaF{*FZ5soLch$mQ5&_D0NTWcu= z_cjOxhM4D=P4*B4^oHlw@Z4O(au}{0k1IPN05mvc+Zl@KL7}yYayAh=CIDxO@@6m- zb_3MJAaJ}DBhva* z4)Q893KW~CBpRA`2u-L+Xu9yBLHTdOmF@k_E9<>a21<^6ELvBHe_vjQQvB~YpzzTr z0H$Bgvv^{fSgUAoXOnO&z^>?YHYC_V=uT^J73jdxi%Z%$kmm;sb%KVE_Z?nIO66*Z zBI6g>w@t*pRBQ)3Kmnp-H0(g8DSfGAQ}pcjjl>OoMt1~c7@K*JEL^g+b8q` zr3xC>vL=xxA(HOD0!z(a{>jPWRBU4%Ex{ERpBd{SAQvIE1oamlyv2;ON|Z`_q7yst zop?7A=8nYq0Jr_1HykO;i%*@I1DB?Vx}?avMADhaSspI+jaPjzNyBmnHi?2$GG1la z1lDWLyBM3HI*z)il%ADE84s^Z*a_TLCJB6)62|nDmgx*J}Wl)V%5U7EvqsF0INc8r^R!49o|HmNE zeunWY))&;pj^F|xI8?!5U63~5M~5B=TN$EDtt{;bR5fU|2ateXB5<5oe^eFKx=kpq z@yJ0`_wo?Gcfc_3I2%Z~_sm!wlQr3z6WxLyZto!s0;vgFGf)sta6zy}qM`{sI$398 z!QOhpZIv4~>BQbNg%h+1J zR?S_SZJ@RVT)x!zhb^=U{y^`gLc%86S+vf zfDKsvx666VMG|nrTaKd}wg?3e{hH@OF#hz%Au8#0YMb!KKehRBY$98|DRdTy68A8i~OT&x`H-w=Cnz^#N9YSe9_; zhNbuVj{QT5+w%yh8{>T-6G9#?vmkqL2ei@kNXW6z_6V#^F$ETd?Yg2*MYm4>7xt11FZsHparPBD6s3-q z^MsgAT#*TAz()&qFJR9vcc!}~R-hII?lr@;i5A%u3tFVmd(1}wFc5$iS7PRCM{~Fc zVuoQkum$V?vNsA>Vm4oYc1b4M`HQ4!vbhoh0@~D!gL1- zoKsNuKtPB%4=urOKk)MrZ^3b1{OzVB#WHy7YR<1uYjSu)&VI734}5~v`l2!-EdiO#qD)4&WHMnlC{HIN zdKH={6>B|23{US5ifE|8mm0hzB^*rzj=B+!ba)(rBh*1zuC6*S2XPJJKEy38yA_2f zehm~uVhm$D$D9oyD^p^b8LiEqV%_PsEA5|I9bm7=6E11@b~#m|O3nW^>c9>i$$uqZ z2M__N*vUvBLHqdJYI9tJj!ZjLbh0OmaWUFb@u#5py{JoVLvJMcCV$)27HR4^GvyCG z9-JL(F2<9U%tV0A#UU4V%e@WjM z-J~ygK*mCcjJUa>l6Qz&e~UxBp2wXHu71KYF(lM@yu7G4zR0>isVHp$IJK%hirweS` zK%dMlwMzK|lx;gLH34&c20gm%ZB=_5N#oNXB57`(_n41=O(b(Fj&lwRVhz+KKC8Nm z2D@iQ${)Z1J^C{o9KR912je@(y7;aKtV_TKsP1DIT8fo`U`h3G?XhSCNaq(Y&;M|O z75mr!bOIL2oDCSo{>g?JtQ4^vL4*C#hL#iHg*C#vn96g(eqs1S)Yv2*4V6E8rt#m?m*csGu-lPO`Iz& zyr<)uq^@;#OT~?FLhQRHa948*@z66n8KNzL*h?%Oy3uEH7Dfl9*+r@vUj%Q$Rdp$ z%IgM*uToTIq1YZMyKp9jV()!~qFdi>6gdJc*k_FZIR1#>MA!4m!8qwyv2|8l|%I^mcXVCBtV z!R8%|V*By-0(gy?;7;h-wYglwD z*;c!#XwEAH;_c47F=38{C@{-Z*@f_*p{hS$V5KOq13;Db=dBEwJ(XXNG9K=;B~Z4^ znt-{DY81dUl6Ie+7d$^%r)o%omWLcd_c8~7Ja#fbY46pXZZOKlWo~F*cjXJ$Hz@a0 z6?b_hsaf^L_1bW4pF?)nWy>Z>znHpVC^BS%>bS&LhdlhLM|CWKJL{=B>eRzdfen6} z`gmEGV`f&M>^m!p#4swbpGM-{5&(1qBH<2v{!NisJ6ZZYh@!o*zL|8<$X=Qk&m{p> zbvjE%FSPG&d_+n7ou!m^x5KT$d}qxa(QV1#4zEht5tF_@!jeKE!Ph7ZOhhy-TBBK) zikNf+UEQDAxgwQCbF*@NXh*$H4BLyS33)G#Ihx)jCVmciNiVGB}dYHvIoZG!ja@39U0`bd1wlSb4_bF)q%}~ z(qK#E@&I5!;k=VwAx&1jDq-Q=B7&@Yu;OzPfiw0=3pme9YgSpHO1JSN^5g)03m zL91aLRcX1d^0FPuceomJpCqMSH;X}Qa|^V-PRk3Pl*FNP5Pl^4Hiu3!>oY^=!<~6! z!yI|8NHr`<>+#(YsX*)~-U=gx@72iN>({~xLs+5mqZRPmJ%b!sM;!#Ti4I~7DwPuf zxEE^U900CpDZ4tH0lY5*_z(`@Xjz$zHqY0z#6lgk8PXOPEU93yvy3)*TyTYYZ(3eg zA495<^N(7Nc!47}aLg-&R?88S7%t>Rtq4MyogzoRrL=@IRYtQ)jOR^?1!aU{VUJjJ zW3lj>`nX>UQ-EVnIsoqv}shg83tI9VrhAs4T>Wvi~<#b~R=RGk5Fw7|> zpUqprwLehyvo%M)M4diypw2l<#i==ZQ70=p$JJ^lR_=3#q0lyyEB#uzvtV-3o#P?_ z{fs_DRT7cZ)%_@gcp$4)^r4-~k&6{^L@YaE1=rPejME+h8K$n+q_pdKgc1K`7jp>n zM2oMcmYBXn7?Lg^;TMyO(jGDj-AL|qH9n#u{%$U8NkL{%8!R*bWQ$bT>Hze_IY=(& z&axI;$eok__eVTLMD~3b5fLlGC?eLxUYg3sqm}P$1OZM=l`YWadBc=zCo$J&b{<*A zir$cMD5uvbFJsLKr7ws>A^A+D-S?p@{_d8q#`jg9EnI3BqOUG~OYNk8C>1QP*UGj< z&NH_n_WFgBeAeL|?Vx=~ul3|bN(I7trpzu^7CdOjKx)KjgVSquZvoPDCY99Nba$b| zU;z8iRur?O9&2d^)EDD{3zP=yi`f@BTxdsM&B+wOg^WUw(Nqo>y+~u!CAPpkb4Xj^ z`Cm99!Q`)Hiz9YDISNG%_V+A;_hlJeumLvRTO_8_Qda*n`dr1~psA zw?#67gy*pzi~tOa<_vM#oXr&5_?0cxMS3f~G`Iw%&3allgy49E^mVtv9qv6R+8L)9 zw6~zLUkCydWt$oU=@gLnHo&U+Ajz$VZKdb`c7!vVs*2hmlh=G~B+5i_&PPYQ z`yFdaP_u9u))V5DVY_uCqB8dvQA&M9B4IC5qlg4Bfo7J!n^2Y6p-n9R=>)FIJ20ZK zpX#K8Vcj|}xP%0>dj!u`1KJ2^s*cP9-^X(N0IH7bn^<+|21OoE9X9t{zSp-;FZ@>9 zQyVDY6Vu5!EpBV%vI(4I>UIbd^RECFRqL6SYHi0@hw~^K6I!b8$2Yp;KD{{B{3za5 z+dokDAyfE~tp-Axbvq=IY~H{`2)b*mwX_(BDKq_RP~_|ObI*O*?n4F&2)%mo94o3% z-5?Z}P=Sn)7K7V-ZrkOSw-mrw)?~8Sn6;{-l=hz6(Z>fkon}&Q z6r-7xbSBNDrpTEczawZt^Q!JbkAdcWwt2zF&iQ`qTd0h|{a{Pact=kD$vJt0vap-W zLaUSSnczkLBt(}_E)x91Dg9=-g+w0T%`v7CBzw7aD=sr{Q#T*V63Ww^L}29kAMMFB z$P2#hdhLUXQ~tKw(aX>@Mn$9VWJ0NBKOg$2uB8uQm}CnO0;q$)_{%Qk$f$j-Eu<81 zF-Vmi9~k*M?pE~ceSuk}!G-8VEk0thY2#yi<21BZBGL9!A1AgAQYN%*3R*r!)sO^$ zD;2Ha7{HYw1ee_eX^9pdmlzNf7ls8(c|(D%-)XoW+cBpxFz()y`Aw8>@cKA z(G(qBue`^2LsYqOYZt(%vhGl6*UzqeFZx+zJ28<%Nz&Pwr)4}m{UM4R60~k@E26tG zShev(*wUDWZR3aAfFS2E$5r#BwC5Zt?dlWWg5_-EykH;KYiY-T0HK=6A~oYK`$OEzt)Pt2>@u@N z&-?PU@FDH&gchzF?3AzV`zeO;ZkdpGhbjdj3m|_iDfld$Ne+~?1F?+cCkas1waG<{ zCX_Fn1MRj0laO!=7oK)Yc-q(Bm~p*mYeJyti9vpr97pUBIS;(i`2Y93K~DKb zWZ$c)8`ff6>j0tRJ~1!2=3~?7c?1>`VFZ?t=blNzZvnJFtFdzAI;TSiGCW;k8>0hv zPp)ZTqlhsx*oWxjAZv(j1}M>MS#r9|pe~*x z)9wE6&Wk-*8tf({3Sk^)p}V$ZxlLX)Hqz8bzP|Q`Z6XU;%nABfJN1Hs{=rXS47ES^ z5RF@lpH-JoUhnzUAC{n)0#|2(Ss}Q(+yb+12$U^uto6**fO;Vv$9i^VdtO&FO61O( zg(g8~o#eR)XF-&#-VSjM5&B=8Mf>G5ovD=<9R=pq!Gf<=adHSyhGcyW@lLVnb{NFi zNlH7m21Vuosy+*HV?|asYpPSry1T9q{U*|3MqV&pb1$x{Ds!oX4gHtUF=%2*P{LS~Ve{Wdy0fX!;ZfhvPo)`F%eQQ{>dcCc-yKo%=<%6+l z4(0;lHCFzB87u$k9L!j=ET1K%T?)k<#t z0~ki_2eV;mmU@QE7o6M=i{ybs(jGmfJX2+{L#NjaFe>VQR*{mm}js2}r(JAacMH|_FaO+o6 zd2nm~G1qI@UZV&?5ND$d7yG`V86Jp=!Wx$_fR$vOd~3h`Nurm2=6N(8T!)&`)cp@>!%F~m=kKTH zsu>nRntqs=XYZEp%pRabDiXrW21)#XzGSvx5nwjJsk4P!K5~v%E|E)C;(ENuoX4JT zas5A5Kc>Aeu>kP9WNv7;CID{?j66Oa(PzSTp_FC^K$2364EOmyV8?DnA2-Vjj>F_5 z(tC-p+Jmq#Dlx*sgud<~NQP!n+V@NXxXULR7Tv)<0k2^7p#kZ*lMp=MI|LWHUR!OS z7u*S?U)?t0_}7-0u}NvtBSD+<)#?0Y|IPUl2-C0lY1H|5VFEvq@?@f)R^I17i69EI zkG5WjD582wY4=1q%Ce_IP&rR?^FdeVRT-N9&4E*avg)KHXyb~lTqd62qU6!WAL2o6 zEX8_(46YZR-3%in7%dgwIxSv=OD91Bqs49c3jI`d1V=kdvuAo@HRrP#H_-%P!Mqe zUXQ=1iolCP6oE&zG=h4F;x`)vM+j#kvHk;KFNx%f%Uj+pwWk#N%!Ehbwzz8DbcuIM88 zsVt(63CW_ma$JHpFIwkw;IhaIZcZ)@#uGS&NF#`JFaKcAcU0nA3^w&ak#5)0bla*V zJXl`GZi&sG>HJ$5CS;Cj@eFYOKxr_Fo|1s0F=y?=&L7_bv_0D0ucf4E_A|!^0iF0k z!Y7kkFw@%sLr)h-hKU(2`dE@|1TdAuY*fTo;W2^}PC`*(4B~cZ6=9p%K$xEa3u!tL zJv5b`nF@Z>oWIFddmWy$!I!57vqSNQ_pgU$Dpt2FNIu|cvQWrEGl71*ZeDjD+`*2% zVF-a@0zbZ(xHUA-lJF7UhLI%afovLM{G;lVe9PA(~miNztS`I zw#9)jQqg5dicZyTMVI@?zEx7Y12pjPYuE>r@~|;{wgLENhnE zXYEL!EP+jhgi7a~{D~qt!@%}oEI<~*&&v~~-94E~OcAkCv_y*PWNE~I)Hm$IH0e5v zmYOh;UNzv`$6gZ8ouZU|CrS4=&NWiT zB=)kau%d92_2W3d*E5-wnXOuXuM!aFv4$6`v2fPtZ@XN4pus=5gwqD&!)E1iVF5d) zw%-{Lr>G{Lslc`)7`Sk(3LV>?9lMHuH-!owX@tN$dc%vR-O6`VxJnpE#WP0*SlD~m?^`=OND>)J8-LlVe5ZQqV|8P} z)W>~W&)4S9$E!Q{IO7Ah8X(S{A0H_D9z=tX4;0T7=X$m{$~_<=$nV=F6{@tNI@*#( z7)qa46we$TCgAI`D_SS3h^~A^;8kw#3wkt(-HL4F;^A1y_yZIk6d1ra*LcHXt~zEJ zfM*`*Ad$xpZ~?!LF=@JD(A-d}4&x8q$e7NsnuZLY%^aQ67cJM=QrCPk{k$Gkh%ZJN zh+!(}u#WdSaG0yWT@pk>7h&~Yf&+so${q|*vOfz5Fb*1Ggf}B9@gLvx~45)pt7J%_}SYSg`cfX zd`9x&@Big(S#ugbEqk~VSjR08{%H%9*p8FCW&*CswEF%UeKsC$#uqUD%Dtg3XaiJt z65^PiyT7{zkE>qFlZ$K;OM{nEq3%$u#st)fE&b0(lOe;1{1y}WMU{KC07)M!#D|cJ z8B~Up3)ThsT+nAE+_QY_b_zB_r$9>E8l9yt0CFLdTSjv=upTwwp7QNu7rf%rD+>nZ zS;~)Oxi$k>aZWI;Aq&nDewM;M9pGZsKXg~XQZ!`JTdZ=q7YB6(%2s<+?j38c#OU)1 zh@E3obC+U{aEDavijr*Mv32)2!l92vxTpMHG*CBS2>4%2KM8q*g@M`DgUY^SzI_Ex z5D)S2K;8DYmdy$Ry3Am6Y#kvgilWMqb{R$W8{g8^e5$gX|B7V z;9hm-Ov9tONzE&akC#QvEm=DIpiI@;Wv{0#w2;s})j@kv$V|B}VUw?i=DwRTciqq` zg^i{);fhw%ZY7xZxt9_Ce$BHj??WUM74-8G({NCwRo%gKB%L=QNl&XkNUZD^$b^KW`dr7HBQOQno!h8H`X&IrGA2U?FPr`l; zOAQ0-5x$#Gn`q{Doopzux@6x)>XA9Wt3CV8r-M&ToKo7-#NJYe@ ztg!ciVn!BLrzH&z_OH;>2nj@>DMS;6A^PRJ5Y3xzr8#SY;iw5#<-H7^G_HO%JW)o< z>|kXBITv|a{*O&5I%E=fsQT{^3x z-bdxIhGp<*bL$Z%6CD!l1CA`}ZO^-a@+mqbLqS>P>xZ zmS@GNuh|~x(b4LVgo2?r9JY@-vM6#t;9so|KRT_9S}ui@c)*`v*`0w6CRUmeX3Tsk zg9RPWdPQ0WimA@93eHkd%x#xEt<2!I)_^$G-t&tjbkm;P96jrVIm-z`$gu3V=suCb0(4!&qao^@y%VkrT}h z(o(Tt315T2m;#@IZj3xaBo*TyWnYvtG8`3}>{O&9CDqN+D%0Tha{po(GpXb~6F=x* z=skaqXNT!W@067IjWS_QX^>Ncaee~jTEwXjU6GFb#qvrrP5r^5 zD^vq6mm}ipdxUfd$5j60=#U&MI+Z&A$s*61;qj`(&_$O6Bx^Uk4;f0-;jA8T`2&nC zkdiVxz(M)q%pPJ^g?oKtJ^lg8wUW^runw~O6aFt1kuZ6|?)XhK^&P&P)S zSc312nPN7=tDE)`#oTUImx8CTB%>I(>jtfPHQY$Dp$nbuSbo#5>+VSp5gCB@1EbshEzWO;D$*o(U#{Ro|{95um#{r#ZMX0$PDb;+V z04J4H6zkAo7QxAo_uW7_mR17-F5BSJSFJwkUhAnIB6{bF1Fjzj#i>S0fv2m7*GwBD z&016M4nop2!o@2@I+?chi{2j(XL&{*mzG+E{xt&pf7M5oSbu_8s8nOcyb2}77y~QK z>gYhRr890@WXKiNh`$oIDy&2Xi*5ixBXQsH)3|T!_1!T;11=lE##zJoO89sf;SD)$ z5_OIvh{)r%1((-UU!OO{m{pr5x`EoOeMETeQ3*vpTh{DoX+39+ttQ=GE*GbIR&D0c zINY0mR(0mk2d~#`kd5nW;}lY9Z|r3dC|BUKMbi!rniA#Ja`f$d^~9f#v=L$1l$F6T z3+v@vb{PgNOx`LRE~)5n&pj6Ow_Z|FsXF`OS(L;kt|TfBCQmqQ8&cUApt~vgY5GU~ zM>+>w*Ty;?wL5I2(bSYY+5g4Pl+qEBnvFipOi&K1TMQ134Y>(Lp9Z9hH1 zecfmSzR*YFt;YBCW$e%+W-_#36MHuwJMq7Q*$#WQEr{y4t65>`n(#pv>W49OqXKfOwlYd<8Gb#23Z0)35*kvp?_%(dz_dwLn)9U%4qEQL zCex+J#ZS{4G2PqRWlWR%B=_t9*COER*hb#9>(%yG%Bclc`wLslB*10Hb1i}kwV35< z27za<_!JyGGXJ0+RiP5A!bnu%Etkf;cIoAFP1b|fB3sz{>>rsn^T-?f!xkA$MK$6s za!VY8Y6Cq7E|ip-fU8=X@fHM=N-qzfNdwQ!2#_2d@;)TUreLZU6~q+7I|q^@sUVWB zRR?u&Nk89vEHY=Rm>a|?%7%n;r%h`v)QDGa2uMEU2k)vpI!rh5icuf!ASQLkJr-Iq zvP?8(#4H4ni^+w%wn3%-R-u>RD6jM&N^gs-tkk?M$xivo0aFu4?p5s;dF`816X;jB zwc+h`+18_4KnauN6PAyJId~p@GD^WPcJWAaXceaVHb8v^$3l51h4(mLb^d|yTL8vv zbbkl_{z+%Og71x)a#|a#s>QLYu~?`Xj*=U`_v@u+V2-8IEny2Q}DqF)OSpKvH+FDjIA4*_~YLb!h^YWW#xa zI|YJ+u&@vKpRPnBt{7o=WH`!kL(~x-K_du3+dT)OoBhZ8Oy-J(Ld5k3&8=_+&G{Mz z$b;s#L+;!a(%k%~9L@2apIul07L7^}y)epiAX>}MFN9EH3=RkX;X?50gK~D^=pg}@ z7m75Ft7sh2h-wu@)lxL7#aUs18~AGeJDx~o$3nvAD|3xM0Zn8xNUoPcz~wbS&h0T% z#^?Y!(-d>H(T{aKePK!EmOr zXnJmYX*$Ad_E)+7H5b2S9l?Yv_#PG<$e3{SCWxz!o01PLa$lbhcQiF31*P zHR;|O0#gb>XM~_uj0?e(N`#;o7VMdT_F#?na9C2Y%j#E=`$5;FN*AzNiRvTOZsu-G z@|c`3Q)JHEjkyzrTcRWN$PlG8Yl`70N9c?(k5Sv+2V85T0DUpc6^%(T>XTWu6{Fft z(5}jI!Q@HZ>(5ubiCyZ~TsaBaCK`Wb18sA_Ag~V2a2a5K7tL@H;|6UVDt8b+SPkrg z86{DYCV>PuPIqX`I&2%KT8D8y5*!{vsGG>E)AoeAOw6Wf=FmB_A=}_!Z_lkq7CpQB zwam`!^~+cNV=%xasDa0=P>jC-@jDRpWh`1QGZ?(UE*OKA0>$i-ud1=;RU6HztHK`1 zJ)u%D!1_b@)Y2^V4CQW)_$xv96p87N{kjFsPP2{gW$=CZpXB=@fX)=4BOoYchJ?<$ zw(p{^Gy{kVTUo(hjQXQD1g0HIjxk{J3;&ZHY?)wB!aohG0}A`;Dn9JteAul3cu8y^o3tdbCWC@?|g_!AFI@0ut?iVQ!$7a*OqjuuY1A6c0F8z3z zd>#d#C*rTL(a2h%*|W~$=z_dbO)V|Ynh!Hy^j>rA{%DPDft0UsManh6Mhi?MtQno- zAmEO%dJyLRD3~^yLcbn<2l7lC_Y!r!Y_<2N?sk0WuL@!3MX>w*;kR!WmH~Dfc-RFw z4H>n;bKv|FA)&{k^$IF6;KpWoV);olfka^Gf*EY}_T=SqR0Z;c$NyjlY2Uc#u;cZz zqj{03DE1Rn?39C0%E2SbfkA5y5{Bh(*s=WR0y%_YiDymkzsF1o(e?_Nt{}`v@yy*& zBB(wtBMMl&EvO9!J1Ze+-Xly(LVF*S*f^cx_*N{};VS7vCS9V8S*ke@IjR;LyzV z5_wXu1m`Wu%O_%CGXi0mVE93jvLv0ns&REesr&KVu9q=bhGTg(0yb@434S#kHOqpV3|hbb4zzwY5&CfuN9)Iej^}iJ+^f6E-}gs*r-g*3U)wkTDqFThsW8w2 z5&p^0Q~+?f0_6QFMmE97YE{DTsnHIYNuS~9_y)I^1{ zh_YY|491mDzIq5d!XV$+ztLzX(Ud*qux(KytwNxr^U0V&gpsYlh$}R79+fy@gi8F7 z1eN$+p6Li92Em6dU4h+}>YFtNcoS)j z$2FKBiFjxt9tWs;bP$ghG=Q>rVBvh)6V($!@z{bmbmZ{iYy?wmL`;F!$fw>14iQPm z8Wq~4&S6UqZ)|0BKk6S)0#c!@!Fvp;E5)p~$#%i*u~<&sCul4OG&+t3G$x?q)6U;B z2GF<;Xq;{&Xkb@N9iK*vVr1pwr%N4VbiG3Au~KovN~KzxgKyN03+py`A z$A(c&BrlVYmqidCP1pp+(}WX%;J{ho}Yb5vbWUriX3kV7Y1;ko;5( z#Aa~V*6dMZ02D(1Ke1xQ&pJ)XuMtn^kF}N<0`!gG>WuUhg50?DU^g*lCDLASu+s2|})udP8BzCK>RZMauR2oj}>MB&vA zbKmjDrh5zYXypCD9BeJ+u#GZ82`xjaHd$oEfJ634ON}$M8jWbZ`=~!}?qS0cM`;wja;&-8@&{Lk(pW zx-z)Q1z5(*C`92UpOHJfXaQhE;?19g4c8*wvA;!#R-0p;i-@J;w<{2_U4ab>$ya?) zZwfG>8h^|gMejPa2xUe+hEcS7NH~T^k2QENTJiGRi=k^jy?eg{m^#9(dH4isT)IDc z41`Su){>D20>#krRu@;l4nYrqn*Ti0%J7b^-4X22kkUC8*q+Da)`%}fr@Vo8iA?+! zufr6uX;z~L%pAH9nnE)UwCK`=n4OkM-eZg{O>H?^dBo)`X>A%H*HSC@f#!2VE0+x@ zmLVGM-1PO4M71{Bq=c4Nhz1hqhXf}0p_H$;TH~_qHhv?YerYQba7S8d4ir6QjO;Br53z-*15G&v zztO|0@w0ENfGL@+GT6MEax^p!Qmf|m8UrK4fE_ce=3v(JA~-DsD@CgzSRhIux=v)6SwH{SQrqPcq>j=u4J%dklEH`}^sT)#j1I(qyC zj8c(!DSm3$N)FhZx-ahcipsJb{*<8$K|h9A)gp9%0#9hD3uOrHcnNF*RpocZdfsIQ z@^>9z7f0W%EdkhPUqXJ_~b6VKc(> zU^vxbJ@|12*#&%)PKN$D2ct@>U(=t+rL8$zMfrqL4PdEpFbXe{)5~DvNaPbpByF#G z@fSc0P>yT_#G+xMHy%feZhHV?hSnqOp`SI&Xq9sAYxqqXI(+dC7Ca-k(}RyEzFe0= z=q~t%fwX2u zp9>XxvjcK#WccB5xzl8li$!VRc@?~8R_lZ3RbXHRbb+U7jJl>JbV08J7&+hKnQ`mV z=7TF9fw`&>0?nY~`@0~MYf31S#f`{h;r-F$7RW2bEv}cVMzNK|HNU|F(&iH(QjvNE zHmUR7*L~jDVkH<1@4BB68>4&r6~~um+AJFbQo}lb{YPS&Jxxj@o_bX0$8h+;!E(dS11S&vif5RV< z=pP3fL&(iIR;k@yEQ8NJd@s8n{}Y?T-di9zz-rZx(V`V{+Jr1fBsWcfM!qom|=yl&Nfh= zu%t$h@z)ZI2@_CqH!&92iwJBb5*@QpUTIj-zWF1-E1BS#V^Jt06zc%RI%*h;fYK14 z^aB!x=h0f&CJZMwGaqKcIgnw|WMb$6K<5s0mj&2OH{nZ$-W@3a7?HQ(jkMZ!ljP{@ zC;etzCk1=_)7P3f`}Hug?gLeIIZuW+K!)QW=G)_8V6${`Xj3g=P6fqdV~W2STi zTE#$i5=1%Lc1K3#@;y!lTy#g%?nv)}45+lG_2>tc4-996;u2vN7~L@emFBVm|3RH5 z{3%E~$sz;o5f9q=`kqKWR4!NSg99!vL$NR4jA9SI^J;walQAI4nsQcA?>KG@!J6qc zxM-oIl-1Kkw8_}W1y^ar`L7q~V}z|MgU|j^FBgXv;Xo}0`;|*lP%&*{YXjwE#CE#` zfkco4v1J@AOr)m;VQr0uDpn~(N7hA0j_d^nh{HgjfyGB8K~;>PT1-%V`998XxekEp z=l5a5NB8rqD<2o_tNQIlZ`rskPv6c6EnX8a34Ho3dF0oR!;IWe`xkj*F*(H1t2+qm zbuOxE6at-zATEUU?Vu&38rS6Ftqji&ZEVGl^hY$cozZ_Ru|I449c-n=v(@}=h-_E9 zKeV;=B5a-V3br~GrHQ298dbiBc678DPE2l6nz{H><6N^FU*2_D`ese;GHe9`;6@l4 zj4Qy2UC7odcO)N=PB9A`S3`jH){Rp|Np*&N$Mn$>PuK`;!JffdxeBXQ##Ne$u(1gr zI0o4Xs{7Y~~q`0sRY2)8quzv6VSMu;2~<%U zX#3Udg3C8ROZ58a_+9)WkAgJ2D(2QA;dn;Rh3=YeL{)E#G`%JM#H>UzO9^I;!GDmPXfexW0?g8dRwc9M z<13nW!ooVD=@C3^vwXdXK-O0>XWT-rBDPVB*LJjO=18r3>=^I=e+61XB#C&}Tg*UlEN>uv*O6;v22o-_+ypCQOY*KK4bZ0Cr*{c35KFm~QrQlui z)H1TdDKuame2=F?E4I} zP04T9v*NnukE;2!_p$#g%d^g|;PNf>41E6D z&59ljC6ce!etT2Cbcpa*>Y8Q#Y+)RALN-|gzO{4*h#YiME6jxtgmNRC`I6U^Fw@mLY z>>V_{&>rtQ40i67yr&<%+Mu5lJwqR+;iLo`l#)=34E@G6*yaSw_J>_o|BGD>WY^hR zme3&#gP*YCDLXR%Nmr1yAyNgE&A>+@jM_zlCE~RM^Eb*EFLv{=w3pP~ZTAlsO2|Ho+u$J9;l@5k`)n=0(`rWRy3DaK^z420_XWOhrd=^O zHQx5LnpeZo7>RoQ+??4XECvO8M1)Q_?6BZbeuRttr#G8b^{>qTGfG(|yy^YN{arV1 znz?nIabn4>WeySV4wn4!{M6Cu5?6fJ_q<-?lBD!?z_Pbd=WM1PT2#FwUHf$U&S_F( zYr7ccq?`7U-81Q){=I`&)8ux|kvTo3*Uj1y@|r`oJQH__pL?*sWV-`ygDy__)b8qj z>C>BUO5E(;Til?`!rSOmy8$bH?Y&WMt5(+;NtWxnX>Xgn`QfEM+E30ZD{>0eIP%I| z%RS-Pp5J#3dT?UBR@a_0)IJT|XJ%Qj(!uuf!IBZDEB!njB3!~gwjUZ(d~Jr(SO4U^ z+Y3KADeV3;YW(%<&#oH`o;-Kz`2A%tEp*=Kzpka5CU-5ISgUiuZH2-@VgJ@7i_wpsN3qiIu92X9y=5<}ZY_Yy>MXTZ@L zn^uf5HR@V;U|7IW7VJsS`dm#5UFWZbzdX z#@X=Nke;&ZdL@eg_H+&`KAm=M{1wUQbGB1{|8rSk=c_xB!)FmVS-xy#(v^cHe?8jJ z9QWe%i14Cbiyrwd+0&=C%Y?_zwZER&IqgPK|Ah-zEJQ?y9qcdYzx3RBS-&YeR%aS3 zU0l9s@4~$^k$Jb+#eMc{ln(n_=k!jq#M;1lqnkGUt@EeqSN}y~4wGa@-t2DCc?V}@ z&Q!!%dOI0|zD1xElWay!cs7s0uW8~zNm^XTm0x&62yCOQI_nt{}#T=BpZ*~vd4D048eHA$Ggu%5swOxWc0tGi1 zGNpmc%?|;`P>*HN6MnbdJA25@eQ&=SZ~nXKw$@hZ?RFzAHs=9o=TR@NF3+ZwuKYtK zYS`aJAMX#*x!GNMR$T2dRF%uOQF`%~F!Zb9SHXwXjq!e`OOt)E}Y#*ayv8&Xj=e89K*ZPG$8nHGb_)o*iJ-_aHbQ}ro1>d7(Pw!so5N%v7 zaevu3-Raw#Hd5WV3G3!Exr4fSUUkhYxgvOcyhQBco53vwbf8o5$aWW(Tja`|&d&Ze>Vg)@)aRFes(C=?4B<+b?F{jZk7N;K29nN3(spRmEb(>87^YtvRnOS8=rI zHq8j7lw0H^b!wF9-zLD55~4oLpe!)5se_`@ieH@%PaQu&wktU4PX(^>2lko2bJFiq z90sqvx-)Y25G`p9Gnqlt+^Cq+QA{4Jm=rz!KhnNDuE+Fy|J^jiL?(<-iOL?OR8q;3 zq#{X3N+}U#32jmtWNk$Wl`Son7KuuVrcDvav{Fb5Nh&Rre%E=P=N;zrneX@a`u*k~ z@qRzgeV=o#bDis)`(}_#%}A!h7ID^#E4n*~tq)(FPvCdgM4q0h^A`6Is}0cX@f_pd@6H%&XTk_gzc`9xiWr|MkS6 z3k_E{3?Ki?ZTsdEqhy@(`)AsXJ0NjJa}f?OQ971-R7gmxAtqA6@E5C?~A~0 zLhdnmSDlYs{9k3~KFT@!vGS8`_oxq_>`xp*263#vrrCa&f}Bu<@%)lawzlyD0KrWY zSca_^zOY5x;ETXsS@=0_vDREyd-AjFkAC)0j4qzMf3t0Tt}Y^Ue$sXDS3~)iZ?X$E zec3!}VZ$vmmCml?0jV4IU$_Ma>V6UMIdDS&ZUovYfIMaIPr}MocZcj62Nu(-6`r|s zj;b&Kyc8_97ogvf0~TAcVZCvp|Mio1tjTmd+=Up7Ab8gT=hcrom71FWt5L%9OYT_z zY!n57n5JhujPm*yqgKKw{Do(}QPaS64FSX)5BWI%!6+kCuXfZLO)e(t_gtmwSNy#@ znn=0Ued|it{8LD#3_E-AmVJcwCB*13aP}CXN&N7iR-8ekrXX8WP$6uQtsvVoDDk`g zij%k~yUPd3Gq-9WXnF-`ddyj1YtmvZOIdnARHAXKKq?n`BM~Wk0f7@f=D$|;9btGe&c(;3Y?u&%`3RCCoU8oE6`kmaESa-H# zpMS!Qd`UTIOa5xGf_B`iIALlR*-wcP3 zo56SZ$#E;(zZ@lb(0_8_fClU~X@hyNx?)3{lz8un+58<3$@}X{!l|L=ASl}SO#}O* z?KV@#Z$=EHfj0FyFWo0z24_ZLryZz2{u&j(LTZdcHJX832JEA-4U2OVTy$B={C)g| zEp5lz1jif!OaxFb-^K$uN5VJ?8bB0e)O1QOv`8K((Rn~h&JGEn8+#o}ZV;oBHjz#` zaMW;}D2i^m%-d1p_O{OXU59|aI5PT+^&8LSyXUA*?eVe&+LKpJQ0a#)rz~^-qE|n9 zH>e14`uwR@{X^bLy<(^XZKz6TB7a?2ZIp9!Nz;baBI8_>27;TE!W@=cgZ#w~+%QtU zx%%#`CC&b9K^>W}Y~*#Au?~ z@ ziq7+b^`BnPyuW}ew38rXAjz6tpaceuY!I5{w**q4?Xr}@P|^mLkgP|GyaAIb2$jH;A@(mQ~(wLcS)deYVuk*8yjyh|P`J@|?;rjFzdXcJrmy;|!F<9+s1Tq<6wEMMLhqK?=`qK^`do*AyoHi3!Pi4Y9B%0_lJ_Q93}MSAU{( zQ1~$&R^U$>ZZRob0kv>(CO?He<{&5c^px(IKlAU7iK7&rt;(OIm(Iu3jzb&IdcR$^ z2pOmAs5ELjPN#`Y!e*4 z7kCn*!tn4%JY`A(PdU)#)2St31==m(No-L5xb@HyNP{sXbAL_@DB+V9;)JII0eYcB zL;7m-T*YrD8m;G2*mm~|>}@x6PBE1S7X`(Ha;76A zKo)-y@B@Q9980|uk1_<3L`EAaW9UPyHJeyVomeZ2=|55Lb!`Y?BN>EAy-1iRt{6Xg zOyKpCQJ_^X`-lTWeg12J+JHFp%3ia%+<&?zX}R=ey^98y0I>&q#CzvEztr?hcTLvW z@_%Q~8M3FfqjW?X$ZYSpT|Ou}-(%9>3jjluEE6eZLYb6tM#{KZL3l(lYRVbH<46Ij z-hL}aO{JQt+ySvJ6hm>)i$zXf3}72MNjCpyx1e~-+n5K>hDbVb=Zk@GNa^s7hb#3G zF9c3{_5k6al(}I1F)TguaqZjDk50y}V#JgM3U~qZw}RXuUzeXL7#;Xy&nZI1-I_@Y zKZ2V{%!2q_RzM5(Khh?6k|qJ;p~^;%!=JcqWavu<*fJ+~c29kQ+s zQsVVQBHB~ePu3IB&TL^+&n74w6;MGWQNbFP3i<-$Z6ER1f+clUe?+v8wyR+^W?6PTf^s#+dGIa$ppWoe4p8Z|e&U7-< z>21osxnwg9PSBkccNxC*I+LE0R4)l1h(X&^YJqWoNG(I87XCy}o)HAn{@}eL)EuK7 zJI+{kqQ`iE9%G^1xDa`sR(sg*AyiDWshAcy-?o2JFf#c=={O_6ZO|!LIUVKS?LueU zA5d2`Uuyb2C3^w5(3rST_I*mF{ucpdq*`UbktL!!Aop8(7Y9+?)VJRAkiPs$EVd!^ zQND81q;;{&NH}Oxg)(88JcaEJ`>HI+V|H(}#4>)e^+>bH7Qodf7la&&PJ1y|X&u2M zZ4XGaji(020iyv1s)KIY+?yMTYGOnkRdvqGVbatzjt!v+%$KI1!J~ZwE^StK()|k| z^xJs660|=Kv`-0XPC`_M9SGP$;hj0Tan~y-yk>UrApqB%@|bJl{`$0f`$PGGlOZhD zj5qS==+>N8>E5F~@{PR3Gxf86evBk!~f3)ABDlY94I%3 z#zR`D(DY^;8Vc`~C}zfx7Ts;o7|wmjm1W4Y>P3RU@qjnJ2L!)4->0 zve^HvAM#ZPDo+s&ou{(L1!08vKrm)~`nAFt6`f*Z5>|OE9tj=tw^N<-rVq96C(a-3 z`RUBdvb$?98SxX8h5%7)^4I(RM~05>iFD)<2N_~+)yB@$`b6N6Pt4d2%4$J_Fr=Ua z#G&N($(r6eG}o0|K&u0iXDv}EfTfaZ;b-4&3`B=kNR`|cm3$u477Yr7u{}ga!806- zE7{CVJAvQNM*$p-B8<^ngwzw=`q3sBZ0YowdlKe;b^a8dNC~O{JY4_q_~VoEM_ zW`29Eta%G{Bedo7ZzdSUJu_zOASM5cPt<@PvPGP;Nup5x0?K4+EKEHKBw_5}B1?3O z0f-7SZGzgLcZ9|X_xwFFaz!zk6>SiLL>*0XrA>dzZf4-2lMBfpmjhG*P zhIIctr$+xpD2<1UMyiblPf@}6r?r>?6^o_>EsnH%6`L);tA_2d^p$$2e>>4kP?iA{ z%A2}HuD<||bqUADz;W?pE^*V^Z&z+BM(J?=N1Fm6AP+@@iPQJq-ixCC(N>7s-WP3g}N@t5hRX212NV)GWo#$UHqxR}P~r}&W#C=ja;mD*!? zn9&M0q!mUfRKzbBV*&;93;wYLC^>$$y@(JOLr=oaN5Z3VOh8QmG&R7i{%v#jd@8t` zUj1U*sfEt;jHg)d;u1bx6(GhPDP3QV_-C)Xd!+=0catqVV)50iNfl-gN1<=t4k?dl z?=Qlzu|W)jdbEpQ!1aD;ryqJjp<-BehRN?qXd|)>A?F;>CCTe*S=NBA2cjyO%rEgG zJ@ilpqx)!@0wiKCHF79Ah}noQerX0rkMh6|1AeBght*}ik}WPsKhcpgi!Pa|xadMJ zmEiZGklazNw%ui_6?f0?d!IrRK5&*Y?4C_YOl2u?yMWTn&e;9q-6+z{cwekyU#x`pi3B7jM7??ox3B4t5xdM4?=p) zT$9`5G*1u*30Twz+p{UM+x!y(s=mFqJQ8;Fk2fHNR~x2|KT{T3bwN{goz~MNs?N8l zI^P1b`*lz2I15ED7C!4r?WXq!&OF~g{ycgYNoF2#NaxcLYy9@lqW&x(co>swBj^}H z%RuRPQj?<4u&W%2(8^A{FxmA9$9$AJ&*{JgDP+Qh^86A;deB@vh? zV3AGer5P z(Qhh6^c^VrVf^Tq$07P95TocL2FO##_w?_9%2$Q-SDZp3uoJ*2q=j)L!r$^c!Y@-I zR3PBVzYDP2?krkwXtkUPJzh9~@K&_ML=ogb{4o@Dq1ynzN6ysKc^Nh2KO}^rrN^V5 z{jZ!MORLLg?#ZfSb*6iD{_lBLQb~Xz>G70Ui)tze$-M z8rX8Y8Ppfmb@fv|9H890J5DcLNR<7_+)(Qkm`bm{oYd-uu{~$L3S~B;3N)h%?5aauw+bo{ zJ6uF6#{RpCIRX_Un!i$S*L5_0_3~MsPwRsQ&j$t8ub`%unh?O{8T8tX_eGNt^rh|> z45+;|97z!lz<4_JMVI7*R0RLf@pP9ZzY^PL0^;VKEZfgNin!Ks*}j=t5Wp5AuN7Gn zeUmRC-j4E=?j^e^ajy?Fw^o2zI1FWGUO3TXMy%RkU@ak>kIk(g4Z+3^fH97VIdFq% z?)#UB5a2ZC3Y_Aa_jeY93+j{jrT(XF@Hn5gPFx#Xw`o;IW*8CqnYNH zKbriKOkTk^nVJ!%&>g0%{=wvBD0D+ZVDciESnbd)JgqyTQ~n+BtqXihg6i-OYJSJJ zxqr*?5wC?exV=M8x8;&74aP(Sj6Uuz%y-G{JXWG@iEdJ*@zlDa(Db(+IEzz2d7C-A zeD;OM@9qy(25;fUz`Kz_!@%J&F@*jvk1w=nZ*6yO?lXtN9(SNtMtI{4b z?}wZk$B6Ta^5nuhGWV8U5hHlpFwG_0_6TC;6$I;FX<+aPqaj!mtx)uDq~Vx+581sl zargXCRI+yx4xvwS%qV(}a0qaDK8;ItbyEJie(1mq7d(RKqh-` zJoVRk)L$#wCs`lT9t)ivfYd!hla5wG_|E+P<50?XQ8PxL%*Ft5q&ar zVoU@8@arXGOoJRDW>b{bqs${rY>AI7h<$M<-Rn}}vboerOeM+m3aew+7*x-Urc zO%>2*GE<<>(#YVK|1cOM2j1o4X?iQqR8tk<%qP=zq96WV4jC4Tj4Z(rWPo*DVXr`AL2o+DF~WFZlNofV zE>wmugelETzPtue4?4wfJ-n+h*6hHX^Rh8=JgRJJ`mc1(VT6iw2n4n4rd!-!lAQY3 z5leFh=5CK8nz0}7{3ae+!T|yox(j3c1*1m7RJ7kI`bS zv4dz1&fGD$FbFa$5u;t)&e~su+YOxqb+yLQ=^@UEyh9)*S%q;jjKV?_ur`G8yaRqX z9t9hvsNAljvo>Aktj!9+zATLRSda`}Gr)gljZV8!SNZ&n^JLdxOd z&{Gv0#^WBIq5@PgnkhRQw2dEOdfh*HSB2qSH=0x$h88|5qN{)k6g8v&HEMp{-|9r@ z*k^z|x}%p!R=q7hMT%+pbm^q(IvN|AXR7cm_!j_b7w$HHfaCo%01W5-zvJd!IrO+F}fuQAPpNP?r z=QK0qDaDYdft1`LRnOtMT`D3IuA;xrq@^?I(JIYi^aLcY40IJKo8oNe&E1^QwmiE=g&*Vbn0SV%5=0C zq-9)iv!NbyhbG`{HkANK7`or|&*+c?jK+O0cO{_iwqjJd7pU(Ca4{{v9(Q@#o&+AmI^uEW%=%#*gm)1&NFOm9dV(PcQ^C@ElQS1Ui9bNobs(M(wZx5>8tKqc zn_%O=P#N79RoAGHbz|EGrW;0b+(11V#i%Vd-Eq?}ITK&?g_V)87PD>{jRv6=1I-07 zA5DZd*~J{aUDUQj()(dNSi~QP4occJ`vYQ3RrxJdFZ*DpTQ*wXjz2|3Ektsm0_H7u@wi`Pb;m! zT^MzB9zjSz%M=pKIWPwqGz}$|OO>ZIx}f3Cu8GL@0OWuF)w4sL%!5g5^t9l-x+ejR zel>GGSp&qF+8d-zq{7jOOmad+DfRK z8DQceG_ih5=k|k6LG3R?4_|^Rv;ty@4NwsXI1%S#evtZ_7+c`F?M%9aNP@uul#QE~ zlRjW-V+dm~z0q03Ttdb$&-(-lP7^cBJ-LZE>L5`W?xoaI*6g1Wx-|8Mz6-e7qYD+) zgU;ZanTon#1ERSInW1?L74;)=a|dybE2dm@ACLj`plAjepQ`zM@5gTYuzl=vt-4{)ev3tCPN#@-PZZG-yJ zbiH~U4dq##JAO1WI>ia2_Y%Y-jfQVyG2s1=#L=MBq=(e8GKphICnM^VF?5ndj4%N6 zG@*!F5IdW!C}UnAWA?w}GG-0-iiZ33IJN=BMQ|$@G%ov4NYj3Yk|$HYJlJhcH3c&6 zO#q^SYoky){b28H-n=sD*C|D5)hcud1+`P_+qmvWnl>f`CBL?e^F^k-8FkKhWomFOwC#IGo~U*rQHOk410hnIvil^ zNdhyU+(T`#B&190vr^>ASmX&04dO1;`(NNm+{hkugd1c&IA$pEEbM>TJ+7^s#|e`6 za&Q1$g9yXeW7$7kZ%(&Yw_vyRq*t6V4&H4(pNdF66_KY@MC_O%GKPu>b6J3jNL`1P z^QLKFEE>=;j5Vo)a+~%{&7BN9%pS>(6NR5?6gv{|e?WQ?^WL`>MMDN)BC!_*jhS3P zSEX5uH2;cdV_{e_1G3%?f(8;U=TDl9OC)27okYV>}9Nf84R4f+~x&#*8l z227sJhs-D6TcLR1x-4?`(=gf$T4--=XsH;W+5t`-Rs_iOB)G*Tp6lGU`I~w7#NEKo zF5HCaL2<4p?T(8*blDP@riCJCj)qHcbRybDd~@_4d_zYCX*er5{1QJbkOrBGk#t%| z(&;iIom_~PmV#EC`O@jaK@QRnAbIB*HBkYGq5^f9=4LmkJUw8npQ&41jnA zr$3PgQYx>)&vbKp9ol24y_5668_gIL;P%Bb95#JwMe^Z(xsD%j9Q6Z^v-vnC!O9>( z3_rwxEa}QS9eNc_!rtZo$Gt~G_FV|U-suJcla3cFw?OTMXV7#Sr%%o_orgCor*fm& z513I8d&xQcuLbxhaRgJ$wjX+;{J@|7iUy7$#=3D`@dF%F{0(NrUydQYs*nLoWiav> ziqBJ6=`Bs|0(BL;reVK}@B{rF4KEnNfErrEo-ZxZNm}SK3`t|mBDxyvN7BL=47osw z8&)SefFT#U=7Jb6HGvp0WvWkw&j}T@*(*yCjdnz1;75Gw!RO009LwPHg${Z>Y_uPk z9Ehjc&0H7MEL_E!1pC4v_4*@JZjeq3NE&g%yASC%ARkWV&=s-0w^4G>GxzA3+4>Ed zt#9lrxo92g2}ewM(i2DXGt!P4Qyc($avSX?rMVlp#O~WVeGj__2sAYeZ z9o%n|jC5dxTYP5Yw@=;9mtPG@?-n*aC?8oBw=1?vbA0T*ub+7V%0}`|x|wCq$4^$9 zth~6;FZ@j9rv;zPM(lLjomae6Z;AGmW#T4fo6_FQDY3ZgTIe`R-SW>#PRWl-1#T~l z&);=iQX%!_j_?i9i4Qd<95tVy;FPh-sL^qMyxjgYc#|;m&9@m8} z?(FJpA3yA_OY0bc!uV1Vd?JExSh2s(FIwiMA}u}Ys+f8~y;PxR&Vz5w`RV@Rqiwg3 z*0i2{@0>?wQJP1`$-7^gN(8n4)=n!OH+bW!;LCNFi)A&nJT^O449VJNS^#T|)h8>9 zhAeehvEkBZ->>eUUq9Ltlv^s`cVvA@slYJp?>mo(N_=QA?)%PR*f5DkNk=Iy172bL zYA>k?a>wdc>xy(*Jy<*E-rl@lk8bcs1G$Q-ov(nh(g;Ah)br<8mQ3tUyzaubJ5UaCki!pEa7S z*>L6etmz#yoH87Y8b_;J{wB2NP$j%vtu{iqIrG4VPZGkW%321eOLA_@hcsrL?XcJ6 zyuKA)x9~XaZ;iM#Or9$)8W4d!8Pt9Edc{AzF7cJT?ig#)=pS?Qs(#V-<(mTorL>fj zKOA~}`|>MT(*SD@7hF_{NjTz`v-d`tOKVv2NER^nO9kwXtq&Fgek=f{QCH#iX-Cbs z0x%Cm+C{n^4jFdC)i>*Iw}O?aM$Y_|kA@7Z{ru^J{)9cBYBkLY;qd)%xOidw>0mfK z5DqUVhuchkmnHJ7E2p(8{@e(}zuf}q zqvjc<0u4vj2UoDpQ&2CA$9{F-!mG%-)-Qne2{QV6o!W?s`$LBP?Nhh9Ab-d($IP8_ zvig*d33AxSWCkD^3_!yFW51=$ey#DnLFdfP6XO15ztgebM@9Vo0^6=bv!>sMdj&Z6 z*32EAS_vSuwqza{1=p?uV+a?<`fYN18?9zEZ^9q$FC)mF+otMzz4ckOAKtk(V_ zR_Jhz3BINeOv=?>F1E2EO3MMIEzW~?{oq|u)77A~9_*&F9h7#(x{;Usw|bLOs@~Ot z2}jolH^3_n@Sidl=YH<|I6EQE4zCesK#?gTfM~)l29mQsuNN}C;XP{mhna3Vyf&&TXqzF9hO zUwPESlm^L;fCq3&-)zK6)8R@X=6ItHe)Z)obPNoelHiKYD1+l{Ex| z>+eG7)*^I8&2J(nICn4W4kEH{Ie%jLuz-SM9TS^;iXJv0BMgqvUJ2VrwDQYC1u`)y z5M|+X-RjGK4H>2g?<6e$?v0D^M)$Uj|=o|MJ4vu`zYskl*usz;H!aM==#7 zJCllnBoze{B%nq;ih`{EEu%lmCICn~K~lQqC{zIvsH<5&rPcMCLjzsks*Py)lzHH| zD-g2cJ3_8n^*12xVnGOb{+%=IZ?cf9L8whrj=Tzkfv~tZku6_yLvha!#oLYFa}V zQ^wC*G{#B8&;l4LJD4w?7FY1|+Vo<1)KU9HuwXKh$mR&y-iEwbPb$a;Drn;|6qOMu zCf%PTjwNONsJQYn2mQ~HhvsHYq?FKT7DZZ5F6kg)ia zkHsXyViF&V3BQr|wK?zSPJt|w7RGwty*~n^HVyFa0=L-VlLGMVwKa_Qb&;PdBdz*t z)XsFvE(WPB5Ft`KIa0m)>LjPdg-nq}t{nOUv~QyW+MnF@$@ql0$0QH^a%{@=$>QiE z@FbwOpAS8??QzbB3m*RnJ+lHp?-ZctSGU?GbI7m+reZO)`Wq$UE$>my=K($AuYjIf zGoV*w+2|#Hv2^@+2*He1>)P*4G%=jxIoD=0_VS$eGUC7QMRyLR0Z~7b2Fbgy7maM} zrJd5ig;Wylh0*{>_?NvX9K>Gyy0Dk7ANQh}EwcK?#Bi;#jtdZz2#QG-V&X>pP!6`w z%K$1)5OLKQIILLs_sWlV3fE4#t{AundNX$~sN1Tq23`k$P6Zk)Tzwa+bSsP#C-PcJ zvR;3cypzAgrqcq(go-jq;#?m{;sgb2p-RFfRO~yAu5N)cH@L!tr0{0Aq5)xPy^MXh zl#X}kOU#X%FHKeWer#*uizm34V_LvsH{8*;52mQyp4SA_My6GJeqzZC?| zuJ0V@oSOulb&zz`KvE4!Er8}5KoeXP0%91{2TeJ1K(pn7Re!nanQr|_0BDc^5LRMD zeBe-Pp=-q_3Z?u%Kvu23g=Dzm1d53N6sPaT$EEH)!sQY^F3-M2p|}ns|6x9;um#kZ z0Dc-p=A#=!PviC72|AgBR?qEuzSFT5^dOgAD$oIOI}`dWBFz1pm;G(G_yr=+po*cO z$3~w+=HDe<3ksO@`|}(RC}f%XuHbu^SN$509Ii$z1_jpg78Q@ZG@ZnZ0mBPnXp%&{+HN0kqb_Yg5^~tsH9o zu0^6|)aZa68o>_IerniFFcur!r>-a{p-@#=e#cq1gU&?k+?~~A5c~y<`E`3 zHq((Y*Um^yxcur(MX&*h`6STxwTW5=rL+DYwsC8clj&)`PlCuNGzFoSf$>QnbL3`6 z!zYgXDiY`op9G;p{qB>BhwzEN)C7Bp!uZ@UE;L6Fnj|JP2Z=l>G;0u=mUsUM&6?q< zm(l58ZLet+vdM?+bFAgCA(sXI)a3KG%_R_+>>hsl*ffH`26bA^AKfP|%pfjCgHazr z+3NH8dj!&l6&M$gK3zzkF*`?VzPSp4;lGL)#H#mmcyDdFXK#4#d&BfznRN@}^+z9< zjN42lHpu|Z)kY2-dCQLI|#=L9C($R@qFfa+O#muD9o-7XF>5j^2LV9lc%Yo$qL4 zsCUTkxgt!x8;$(#=69b=e)n}(Wpj|?}x#iA?6B?K}QXoNGXGW{K~N5Q2{tY3e%PD_qu#pMG}sw5Xn#Fdb|xTU}G~vZ5wXaM)vl{qb(%j^QdHOK*

      FUmm`XFZ1IiN>U#ASdNQSuR*@1`qxQn2iF=u*B)}6w^q+LKleYfrbAFL_}VA2Dad1iqS6?ryxX)F>Y0PUlQGDI z#Y6Wc2G}j8V?+Qo|{X@>Ps!i9ohU?mlSCxXq7>w&?}|Ny*MAseAeUq1JONAk?X$$&cZw!SPr8{*wlZg-YqoO)jD^?%0ZBVr_g*)C?11=Thj!l3|OlXgcwyY+w975`9Gnzlqeegsc(%%`g z-{nXpN!AOBV`v&`hGM0?wH{^bTG5o9;gg*5(AQUD zAsRWnFT{*g^LZu?(fm3wpW;x5I#G~cD$t5Jj2gwoVfjqNVXER7 zCsQd9&01i9wxh4-)b96tfE)^hw)O~OWK$XxqRe7+eTPywYp0ybpJ;Qik-mj7^lCoM zEPRU=Hb8+|O4^~i(6j5{7WiG>T1XDs($MO{WB5(za?~dm4IV4{rTK>xzRyH9c7;PylL{(rSs=x?T0n-%x z8<%2D!?LEgDmrDXr}-hety{4G@QmHC|l4?7BD1tT+&|<6>+Q* zrKAX0^zTQHzlrKI0vkU_Go80+z+_O90j7G%hQm{B3Q7f3>0pFX@Btzaz&4uK4kPrK zLdj~P+(;opjJ$23`s|7U03&ZrySwDmx0@jzH}KE(rYl^6Si!V+1Vrw$8BTNOpa|}> zCops=01T??Ul+-}fLa)jA843CK$!6U2LwXs(4@Eq@`(;-4BBrOYQ@Kql0k3YlEV@d z837oxJ6u$zex^56q zBJd8QUK}qGTRv3~{CnB(R5{R-Fa&l3=t&jyG`IXrM3sLfboI5f;DVJ_AO&xR6r@l; zq@_D(IA<7PVvA$AJo1D(JTVngWdvUTY8(D-P8!wZ9cO0}*N`Yoc*b+#`OghdUfLaI zH(*E`aH9=qjQY#kPyc>ff{dVX`09XxQ8L&6*}zd#6EI;fCL(fCf7hZ zLT!|_N(e7LA~m5B$k@stL4%aX93Vk?IFpuh^)T5)_qlo)vci%nRMXM!GFib8Q~_m0 z;3}}PsOXPe7#J@&2CvY!Y4qBS;ZSZdv_vd41zMSLd(dG`20wfSUof3S;6y6hKHj6^ zwgPwHW8KBWQ#H_azpsVJ(TD;c=S6^zN0*mrtzQUQ5H2m#ny<~Y+R|pRs0Ilf?)0-o zTnIV=@q&Lp!DR3R3V<&JGij`L6oTa#^mjlLRxoFhG++%B2OYgY?yp7e$4qhxK{~&0 zz_OK*v@E6z?6SMDY%A8Ek8?hB_|g^Eoh$3+omv^94!F~@23Yo3yr4=hmR*PCsJm|e zuhuG&wTC!souC5fJPP!CYxli9`>sX)xZ=$UTJdi^6dmm(6G*uIY_-!5Ixg9K8n}|S z7P9w8SfRk_Ozkp}5Xyk;V4(UC>X8x6m(X(qjC)iUcZw2ca zf;Xf{meJ|ckQlbGn}VFJ5tbw97EA%&9tEpM@aO}SSUQg^!r;J}N-ycJKrNU?qrJE= zJh2%E9+S^~v>5x|9@WGf)oXP$jN8SOvCohaq5%##r)xcr;7Ov4jkgw}qjEZqroqh< zm=s>OS`MSU|1>Fi1xz{xA@N@Bs5!=DqFFcM1#LFhrHbY1@EO9^V*k+d|o_?{59Ro zLH-T|Ci{!<_ZKEvWOEo7)c(T-(YULyoO3~cB*rLK!W|^9Pe2`K6D0?Uk{~@;X>L*> z0Vf&2d2da4!7T}Lfihgsr$!J)I=~KkP%0n}fuM32F33d;p^t55;0P`Ruw=v_9gq4y zw>vG6u;j6`yuXhM2_7nKhp$~A7hF5?UEb+u<7odFi7cep(&4H0zvICBAj(`Yre}mj zMSg{49D5ZVu#Gxl&kP_4j0o+y#1~MaraKAadGY^$FBhhlS5OTK0sY=1foR2egvJ@7 z0o^puIL`?~6eC-Z;V29;v%%$qArNmuCzZMS(pl(QWYGxps6)_5Y+_W(dj9OA;KtLJL&2Minzi3o!1!1W9+uS1_N7vWoLqX8g+0;q|*p| zf)|xW?WE<1T{1@oW8SE( zWSnM56Y9%w+H7?=?N^u)O*QFHq}eY_@|JBSaD_ln(*=$`q5OkcdyvDkShHSJI>`TE z76yI>tW2bHB9e(Vn#2x{4qO8?0nJEv(WGKLMJ0jNH2&j4Oe5u}6QQQUS3)U*~V zn9eA3FCW;9*~hi?z2A5>BV6Ot9f=#E*St(2@zkGe8S1by^LKG>Sl!gRD zjDf}A&4q5?+PflAujpt)v$;+wrFfslVF025fEbcb>@D4L9%#>Xf)?C@ zg_fcW7NZQ}nw5S#U9;jqZ-fF2%>c!Nn@q-7k2=c)Zh1WG3Xl%R@)MUkLXdc4gqFvs z08df@LqKkS=YZU#hj6{#;XFilXBpLB=Bk%y*-E3vmggAbPra^aOsaeS4@n`$;+E1I z%Z@ccVv|&2B^l*wpBTeK@`F(j_I5GAbg6|H%H+L6SZNRGQ6M(5k z22%lLr1X=Zi88QyS4`0qrDGs3esLfSqk*Qd+n-|_h~=1a?8NM9KdlQXjy9zL(}8OT3LAeQycZT}PgU4ZHOP&1Ek>jtBax#idcL=NdAx;#V&DXwUAHv)~OW~hkaOcFC* zSPCyxQexRS(S#V3_vtjeaGdqRNn#2WL>V*lnj~QTI7p+(nCrp{O9gHs$Pvgo3IfeE z1Q7%oAQko4c4MO#NHYH}L`)b~&clG_4pWC2_+5qCmhE8dFM<7o34Wk<#Y>F+vL<9d z>}i7gldr@5G^Za3%X&|U)O@zB`^?9UQd+y7zJhoj6Zx>H=E=F%?j_) zm z-mc!B=H72o8*}fXIMk4+kd8*)$yuV^H-*5uf+_*Vky{Ajwi%2!t)ig&BIrb^bT`BR zaHN8I7e1bhWiH^?*>qZqJD519;=1|m5F$9X;w57|tgFXAMqzSJZWYj`LA6g-93Jln zkIy&;M=98*SnGGbEyMo^!I}AzQ%0_ z-8n>Wfz-!PsW|=A1)UY^>&JxbmMKSH;4+uvBCMc;5=A$3CV<6`1789xzH)W&`#)wl zc~#I&0Yvlhzjk8F9X=;WnQ zi^)J4geG(9%n^J;-NR?bYND;rMv_Z&});}pcy?o=MQgvKty*eep= zi=n*{SYa2i=30PR1MUFi;#%K!rm1IAJfDi`LP~Md0Jvo>vuDxdODJgx(%*^Q6kwKv z$7gUJuf^|CbYQ|_KQtIRG-;|sAO9K$;>nDPWe79^hxZA;QQ^E+j2v=eoPJp23cVc~ zrsY(;pdyAk)JtK|MfK8u8cH+H7rf*a_Q52f1bh60+2bwjk!63;kTsM9$S!54x$SJT zxtf~IgV;|#T#RF!vHDD($86=eGHQ4V&G90dIp3dVIrtzk z0cQb9Jxzzmg2Oed86V;pYn~!0e^bVXLcnGYOQeDEq#)Lk4>Oj-XSQIBI2M+Nb$w|v ztoxWI!x#q44SEgH9iTjOj8ihx_q547DfHAnM{j{ii!p>g$lW5 zo&a{BYsqM^=Az+a+P*7LJ9b-63k0T7i144RIQhjLP#y%wjAwE8)22+Ysf1mq5r^@g zoIar;0^S#7=OnE6zx!|TVt8MUcwHL*OZSh08{m$l4a1KMGmkhB#o>`Fnsw-}tN&SZJxig;5gIc^P~g_mYjI zN-4^ahcxZrM>)*B>}%L=x(yeBC|v%)MWM`%+$jWi_Gf2Pd6=qc%jU4Nt9y-m3sck( zIc*HE_M^5a94wF03*ww2noRdl6zAdC85j4XSytma#qmx4%Z_Ww@JIu?Z)!6C><6os zJ8~GywV{P+u;l=Zp$6I@+4wmsj+paX>>kI<=PG>fY7Dml_+$T z2#Pr4Qz7`&@7h@|(%LqE!WR2PQ2$xbH*%+p;Tqh?u4JUMTJwgFdu$}BtP}dBZ_;jT zfxkrPTZqrV!L*yL*bVbHP7*fLoi-)hkf=(2Lr2|;$M`GBj)%idJ@JT&wU72E!k^-it1Wp~2XR#8ct?K^{-yw8ytWMiwXZnT5uUK8)$P=WuI6vXUV;j-tGec$ zS}Rn2O777Hi`wwsZ;yhydp^b9UA#cEKaV%dvZwf9rc&&xvZNs&wKmAtex731@~S72 z7wcdV+oELRDCxB-VP>#vi08Mjs%9-6Iq5C!gFcVs@v29*8H#Uc0>G@l83>No`3VhFNe1QkEzCSpuyR{1c!^iHak?W&!csyQCba+a$8*TM7w%YjO zuEpCdY+`t|$)oosV~oa^2$ZHqBp2k}Uu1PIJwf{JW)Ey*+0gzJ7hPuIoPiC`JC) zsKSq5$)9@6%bJ7WPbW)@*aQbSE4)tdUthN#uXCq&wAa%+u2P88+h|K^^kd;C`q77e)au0MUH@9< zxA;qRzACdoMj`h5tr!keB?qRR?Tf!R_WP0I@3ajI{6Y(eF!8@Snmo_*j6w7a;N=&c z6#uQ*<6tKKqY=?`V~Sox(bm8=Tk-XLgY zso3_hy)M0zGsyQElrxqUdZ~FUKN|;6{7Ued+1mT4>vd+YXYMnKU}FaJ8W$7nVG+Hi zmR=K!*QmJv{F*&@%~_!STQ2hilXKxAaM*L~x`oV^Z>OBw11!0opUAjVR_bVat13A|M`T4dVlNyB1uxrF1>my$L ziZoO`k1#0Vn-j@n78UrWAG7bN*qMeYQN{+n)PZ>Cw#9g7Csq$@V9-r3Rg0;a&677S zUxLqF_8<=;*IUFg3oWBBMq{CE%k5OVbh^5ycLx-0{RJsk@wcHluQUV>Y6YCDKTQ5G zL2FWY)xnKZK1{%CnoYEJ3s_db8-=UJm+ZS8nRvGQyt{{3Sajl9e9QojXJHSzScJu! z+u@?{$#{I?RQB;-dy4ycX(@v5WRQ9;Ro@mbRd7FPq`XDo)tP1dp37XCLpR$1lP2?Bf^lxF9_~n0jqFs*eBjxoYwT2!re3O-!c$P-C_M=_!%sP#Fh^kCYIAWV^krBPUowv` z#p7x8xIXhZFA!m8N(ox|Scv3H4dHmn*Ku{hO< zw;Oa2c$M++?z?7E*iWIK`)tPtA@Z(ar_`>w?13t{tTq;rMivO0cC_EGNJ|v70B&<$-Btu8D@OMOWb6) zy!{XM~L20+^RRR=;MW5gO3kEEF}!4Z7yXa<5dUWWH5qk0bJ{=$GXH) zoL;SqSDP)zs~zA`KOn?!R(kp)Rrp60!R20HPWBP1?HA((7BEC(p4RTJ`%FFHRr8SW zjND1O3_Cd@=FL)rouyQQf@qKZSWojJuVRm1k)LIOu(Lawvr`r+IljeA#!?iE!_P5V{Ay`I`^?K-2J^?)B!1}C5Gm{ zWo-NzAl+R|W)YxK;!dO?Y&kHC_8I2@b6y^YIl_d4Jebo2&&qISlt#deNC>Z%Nb>AV zn9%_U>|&T* z#lgNAUYRuxW>*nkengEXik!HSHG6vof_?5H%w7dSsYfYB)!PhdzVs>g25MVoskORM z=irW3q0BoXStC7t@I>I;9zsXWj@u};Db>SKal&jNWA*DgZofjsIWY}ko9Mu;caGLO zIwkqB{Io|K!Wz53sW1gZ6W$ukv>E8klHuz`SM_eRrlBPfK37%cO;y4JUUCPz#vrIK zy*T5e$$0yiq9BiV37WLv0TLk~0xi6Zh+>fpr@8&*2uoM48>bW)q8O6Z1zTKxoFbo`)Czn za8+1rsKRDL^;MX*XT`Kf=OjU3gBRUMJWGt~a*th}r}fOx+%lM!7pLVG@5J=D9G<77CJjHPU7spw4 z0K;Csbd-QpFT%tWhKP$ilZgv(oW2RX072s)JgsL zdO%uDm)>mm+ppwGmU@EO1ESPJ2@s;l_S zNR2*&hg`3EyA@FBis#ZM!vYL(kDiaG?^MKb-=VNsapf)JJu+{b8FmPZ|v>j`kz&Y^JF&Y^J1wwKHNTp<7w)^U!T1X3yKHx#BGG{+)=#st}DV<7KQZPY;aG)A4ZW9FNOEHtcG4Mv@7fb_NZ~u6)vYXQd5x=3 zE~yB;%7c$u-N79mSjhvq=FTM+Pa+J`AOn8PQ~@{CUd8Tcg>-Po7|t&|5z+^;c+b8x zd_)A0&P)oi314VlH6H(?D%3MWu6po%^$;N{PRvuN48a@O`4T%HY&6H#tE)L&dA)%( z?HAXaQHv8>pjnH;)$%JB#p)V7DhXJLG$O!Vpv;!%90**L0Gr1^zmOBB6M4;);3l-{ z3l2`a?xmndZ#XrhiCc{q1_jz3O@Cy;@gOe@si|R%Jz2_gIl`-e5cP=W_Jmn1!hfBL zJ<>*^6oe{Bf0QAD?MX3v;L0{Hj?|bqvj={#8jZlNjCl^6SzfDvHd+oTf>|a0i;bbB z#`8ZHTt>@y^Ovzg(umwG_^y?RAs_DK`%r|EwpxvE(4p%vsM&^Z(Ee30$mI;*pc!P423N~?)w*O5U)k`s zYmh;Hee0ZMs&>W@{;PAMr#K)Pn){;gG)B)1BT2DNu3+)D*MLuD!DP2NG059Kh5sj< ze&TIE%5d%jaXw~enWJ2SQKxZq#XFZs$r!{pa4Bj4!sa5*-v_@sH}~g%<)s&@6qJJ4 z(qewB9#P@(K?Ta=zxM8UtV4~S)`BK5}2(80qKARM}MXtu}4?aMg4oU*L1 zaPrpCm(UQrisJ8}QwN1g3cm2gKo|oW!1g1338~sw_IaK2@Fn6CN@WOy2UGcbYgWSE zh$E24COTPv&u^|2AE9clbdBa@#5pYM^ouh6XwxsS&hmU z=gimk<9W%Dz%D`RfeELvAFjZMvTCV!e&g{LMIi_5^O!))8aTvRwd-}`>nUhEE)cNR ze}7qx_(ri+0=()0CJ@x?yxki6+NN_y52c=UfAjq@h6kCd!sOESHDj8#wxr!Wb`?h+ z_O#^_jH|p(v~PDl@iCOyr8SWj)f>R!Wei@}UCqAmEcS7iB_%I;dXW;lk0|70-3K;S z-PNy}CTQ(++VKY%Uk^F{GKN_>L-n5zTxTiX{mnlgh+`jE1rNCXRhDIfldUnJAvF!K zeK63W!g~3h%N=w|kEh@Gg_rS(WWqk|fC@iHb)t8UMCn*+#ZjwUp!GG8ni9L=?n`2q7Y1$gFSg;Nv_qf6sG% zobrQBcCz>PzUy6Uz3;c-QMDdC5f%DdE9b%b$b{HR3#FoqgibDeoedbN8{}_56{1o9 z?qH-~Kx1G!4B~76y&tp<|pGnXObg+viC^HSg(jt!i z3#qs{)*SpVEI!gsmA2(1xG@hbRbw6)OrXj46=eD7Bdi(yD^XNG#iD1MfUN%c1#j*I zKwq~a&@vkPm*>F(OWKg~4apkUvBp_K@eZ0NV_RSWU9@eq#7&5+ZU8ClqD_KAZ#6BB z(j>Av(R1ApK9NVmB_eR?J#NM=vU*rl7zLB#Zi)ii*CNpEb(E7l_!M<{bd06|k-VDz zB_-p?Q$%%(X7?IWSwos*lL(xTXxR|wdDceXCm;8D+o{IrnZce%(tP?apGOI!GJ%G^ zCA=VrzLlUaUF9?9{)t_GHlbrkR)-2`4|z#UaVq_sAoLtCe~U5f3A^PjCE^lL*J9lk z2?<-N?`VuDGMDM2MT7Gp?1a>*r+)&Uhk&z4|>iBQ~9E5dLp26}<_VdJ6NV zXSSRtx$2*Y0rFgu>%{mL+eWrnZ{}9sB9iOgH!u<6ziDU;3h6fuEmlildehK&{e(Go zYaJ{w2k89~_~iYB<;W(03>tM4oivt9NKSSm7%j+eerc$wL9_?UcTa_f22RXHp*(VH z=uT~i(^Ds+qbAW2?BS>}eqwlI%#$K$(z8W)UNW(9V#KcXuu**7_L3N3x6JT>ZDFu2;%@Vdd| zwFapN`T}McOgkj_qG8hdA6GBD<16m@tnR~C2U{=QGO6?Z)c(n(PnK(cIo)aah*vWF z>y1Bs^Wl7sW!3T4$SR#t|GA2!cYQt%3VM=xO`5du*6!h9H8r|37+W{-jIAl2<6FMh zyQe+%Z!lt;KW@2!8I%6XO|7ulI`M*6_}*8SXm#M!h0pVkVsxXwJZ<8fb0=O@@Tcwk zZt}#dKbHD`3Sj@iAd?Z)}w zkFIU+_hN}zRRVbcLf}>i0aOgNEZRu_Y`N|mOhN_JQ1bs&{(dgBH*Dv~%Nh@BB z8^6PdWq+tI+1*th-FyqN-Nnz7nTpVOH$Mw@TF>;X>Kxm0_0aXiVbl1q%sEteKj}6a z>rAv`$-mb{eC8nuk%(Fv#9Dr7bZM6Q$*H(nzehrTuHO`|fJZ{F?jyU(w-whMd3*zKV2^4mz+ zBlTr};NYHeHM7#*O`db7+0!JDZ^7i0sWdJW32aX_1;tsPVJ^}qrU;jtEBDF=Ja_fg8SOtO%-HA8TGI0TeFh`9Kwsw< z+p}+PGte*Sgav8osM2k-Pdu8Q6>Y&z+Sb{0$}u4|EWYtuCw_z#*S^_~Dr9%t{>x*3 z-!F{@OLMHkeSTJSIoRxx4-}hSi7*{&_T=`GMg2*Z{QS0h_x>PO%k!XzI-Npb7Na`~ znD>53iz`HwrFNSglzh>{U*-n#fJw@c_%Qro)?G(ElLO%jwNPB>W(&3~G-q?l zRX%_gg5DgHz?`s9)r03X+@wnTV~0=7f{BO^dQVN`XB0&GJMo)NS)Xz7-|{mPB0T1p zzzmIVw56%!Pcb%b0oUHf4RN;IbT#{>`q$PkmRMKH@8B{=>g4!7VI2|3I{&4S_9r4wmuU8 zNAj<lzxr2VQV0Hl=Pi zSRPz#!7YJ&7O^iJLsN6^uxD0Fx`F=%LGk7D+1D2B2yRE&GSBJ;ucie9$Y>BxBn$Pb zmU6Oo$GbTsGuiM|z)Pxa>>)AW=9yh#4M0>41f|Ic!wF&NP|ms4-;3G4i5Rhdl+c{7 z@PLPZ3B&x7m}JY>?TupaqmZZGOz-?t1Dm zH+Kd{=YT8e%Mf?- z-ti+HdX?pU+TvDby?Zvx~%eM@sV+J zMX`D)&O%f=ysa3C>b#b1ZUDv9JvW`U%F9c2xr zRp~Zu3+i+)ylwXE9+Oyr1)r6_8oYKrYC{qrM(rW^CNWLTqr?oK-rSpbC`4)GoyNR+ zwtGazwF3Z|02b!Kl`pPkLA$WmM$NBt{hIS{+BE0S^>c3NN@XxY7Sdi6nu2C{3?qU1 zm)qL{TM=q0*x|7Y?5NqPwfMX3qUR3xa28ZAbkX0m>%rYPDaG2(+SGvi`$qm#GedA? zK^uOknfH8WuYB;eQ90G^f^S1$w679n>B9&02}2ZP1MTLWLMSQ#-NG$zsp*lh%m9F+ zFs_!kc%(#|1XfP(ou4q}_xbNs+%ur3ol*l}oDEwzgAdxmEhRxF!4Eane$BrVW?$@h zkN==~~;+y(Mvl8`Vo3TGSh}O1;Pmasit3+aP6IOIBUHP;tC@GzXHH* z3kZj`{M@6WmMsZ5`mYd;u4*98OS-ljI#gSL>FO~=~# zgXTB~!$SLs_WF${1F77S^|=`$HwCm^1cgW!*EYcsiBt23#uK%T{p0I3+wbp{W}{nn zXuNij!34aF3{|Z%CY%?;{f91OXx*(Z?rTVmAgBA1uQ2VXcKiiqo^aa#Fn_#N4l$J@ z;r+e5YT;~p`UqROvv&=KkXbRWhvKBOhfRBJk=WBdY63a$*2nEaaPY~rr$=nq`8yKE zd^c&pEnhiOEBSy-LOg|jnw9ym#1U2dPd)UU=4V@yb{fYsnVccj*K&$<%Khh=r*G~hdxJpSNlogFJf)~7no7RiaKq7r;TY&hlhF6wXGF9^vA~sVH z`b?&`v$FMo(6mg&#_&zH8tai~S`meM)~n(DZj;9LYq?&TW>*S$U=tsaXJdB$5d#tF zJL?PSHvQywf$Ke?E8UUP{-!dBV=&%1>9}sSJD~g)VOL2xcA|mDf|4l2NfKOoOT-<}p_6qi z4cr^9Flx zStyym2}-u`2@uM`DMtQF{mLpS-fTiV_ZP#{K7cwht<-1%%|RIj?0$Hb1w^0Th~*q1 ziKp}LFT)%0MLe><^2cCqgZr{b_ZFi`EdNP@n=45Wa{N#cjbyz7h)#Kb(r@Wl#+1N0B?r>G>M0QBi5YT3_v#2J`G7_I*zQDh%dT26m pZO?A655mn~MCs#rk8x_$>{N}%C^zNFm*gRZ<9e5MvNgN|{{TD}P diff --git a/contrib/PanopticDeepLab/docs/visualization_semantic_added.jpg b/contrib/PanopticDeepLab/docs/visualization_semantic_added.jpg deleted file mode 100644 index 082b00eb95f767ffb252b6e04ec29920e0204664..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 75334 zcmbTd2{@GR`!{@#-PoBaMD}GYWf?-rpb?W{%wm~T_RL6GTeKL7YO-%>5@VS$GmKP( zN{K8jqA?^%g|rAQr1;{!_5J^U&-*;@^Bl+fzL(>=@7pnR&s^s{*Y!C+=jS}Hwc)jQ zfFj1t!wrBy0Kgsm1J>RH{^;YB2mruh0TTcKqyQ0!G9U_GLBK!YJOvQ@dkp}F;28i! znUMeSDpTaYUzGxwqW^upegpW)Gk_f~@<_sw_{bwM+SXev06P~Ctk}OV{(Eik_l=4| z&5T#sz)i8wH0k26zg^C(wE;>}V&F(bA-aHw5=2x9vi1nj0!Jhc`P=^91|A?HqGI9_ zl2X!88So8_ir~;iMMcC!#l_dZ267sF9S~CzSJts~mQe8tmeh@fS*7HbOX<1XcnHV8 z_@-|i5_eV_x?!WLn!16ZkukyqX=A(1&fdXw7us#Ny9b6qB>DRJ2arR ziARqeKarGrE-n51g^TpO{DQ)w%f(k16_r&ib~T5~yLqdj@pe;l%boU)N1a{WJ&&IV z284q{FNa@^ydR&K{4n+L)8{YWfBc+Z__g@^&(gYG;Pd>~Eb#cRW&ev^N}ydLVq&6V zlIwOsM2@W+t|TU|V=1BR>?0W*tD%h>0#Q{{GtxJ5)z^1rMh8SQ|3A02H{i6St&IS(q7ZO0i7Ek3z_YC7vSO`)e@uPV z2D_3-u2V@+%0YNWXkGi3i^M0Q8e2}(95b}2O2BRR-He5O^uEG(RyLW8XgoBov%94? zypm=SVsAhzE|9P%9MqwjjVg$9hAN{$;$L_ zq*(RQ6FmH0hxj3y@%iw|xTgJxujh#9NJx4{68;_C8YTOQ`sDOOTuANpH(D6_RRE6{ z_@e9*Yht?}i14b(Beydk3{T13s_4$}v9Q#=SEH=Hn8mvUJ%b|rgD+IoF`-3z=T$kp zNQp3*Z0=76m67mgH(N&{C-bBiZZKf?^D~VRugN#e&s4HWoT($Q#oABtCmpyvLO(43RgnJ^f_h9S$?B?kct|H- zJ4VjFPAobj40j~7`{_`9{@oV!jfm=_$3GJXOI^rYU<)-ZI&n$;#oa_gGVe$X+SY1v zF0`A4&2bQTy^Hs}dz>vV7dh6&dAs#YlwH5XwVH9~Iqw$jo?54~w@5vzn$!c97dr5g zT!Tj`{l+uIC$p+G)5=YTjQYHP^Z)kP6%0Y?IeP>6E5D!%?ud#ZbE{ z7P3HAjzamz)^Xalj@+XcgB5XrKD_TepME5m9Ex%My^&{4Q zg5Gjy(p^sHsy#f{`f$z~aL?&l;2pUJtJ1=&edI%Kqo;JUgM9+)eq)xe|AhVCn?OCy z{fZ*oWobeR!-IbXIihNh2727#-;G1ri?aOU8R8+@R;Wd-XGEW_<-nlHj58H=ui1Z4 zi+oCH@bX@1tIZ`tt9hFzgWz#t_g z%fL!o#v{G!^y6o}3$>s9e)C={Kl7OLH7^g)lC|32FFHT@1~B>^X0!%qc0x;u?&s^B zxO@^C4NV1(!d<>9cEBw6?k)Ua??ZH116W-OE**ac-(=v_Tce?A^1FM71&HmDIAI87 zi=Wo$YGZ-lN7K1|fw}2_ZuIb!hr1+;Q>Pr0)k9ix2rNwMl+Ub5=qAKN)hd+e*^_vw zPqY3lhw7K0<=gDIlc#$b=YD#sk!(GGX;GwCu&zgsc|GvKKYAYwtO3%!emS$J#|j_{|J->z>-`gMkS))?EE?=h zcF*eSnL$6=d-qWHw&j|CrjhD3mY9i84o7=zt^ysa zHsNK7Zk2M*W_bq|Xe&{+M;6Y1TP0Pk#FI+j6whjY^4zZE#ySgGnfq2(emvtO^wI6} z6v&;dsC~m`_P^N0&-TAfL$3j511oQ*@oRvbMTW_BX6GIg`Ce&CyZdRQK)XzlIqM$V(J8dcZBc56|9l{9?x>>Wii+{i^yioH5l+9IqmZ-IL zVij(cSS3S)RLjfhWNze5(qasZI*C5xw+)z>&c6|f7&p28{V36dg;`kY)vN8I2SLK ztO2(Ima;tCwgwKh9T+?Ot21e@m?<#vU(q{j%{7QRVvF<24{GnOs(>GEH}nzVg)?cK?TZljJ`qj2E4rGhbgA zPHM4OWL*eRL=4D1sq7bU+EY-&FIFhU)A)hdA)4}m1zAU~)XJD6ch*`Bb}sfePSecp z=o0aH@{1l0<#}=sWUpp=_DhidrrD0=+rBY#wlTr zy9#dw3ofyQ6(_!@UFWV26VxxB{w*y<&aY~8hl;x$&XMbvAuJ#||J*jja$bJWmFuUr zG$1_HyvW`Izk8Ze{OraW(7Hl(efgc8eZP7v=W+6fO0DE+T#wa){qiH>AuHh{`}^Oj z8Nt(-NJPfrvP^3&r}ubu&-mYX#hMrQ8^FO2oOku0L%zC6&N(@lAJ4tt@b%QIs`Kea zZ17OpRZPUP)5aBZJJ&&4r)>!Ju|l|?j8Hn{dDXe#dgS?m)_jT zT=&f#`E%W0lUcqkYB?m<^3xd~OiN6p;*AW*Jon-xF{;z9lu{1glBB>7sDJOjOuuf| z1CMz5fv~`g$k;(vVBs(P)3u4?N374kT=HATXhN zn_>ElJ6mS8G5HO$ZD%lk-I_>^0fUQ1daI)DWeJ4WWlwV5#C7hc2CE);^g{K|1BWORcgo^wLKmSX?PWz*9Npv_w!VcOW1jv3)%E$gb*Oab*Md|IYjpz?`uk7c z@`DLN)7t}!yMJ)n{`3i0$@g}a2~y-Q&uT3qiUQrRX1CVLhcWLjRrvZ|O;Ri8;; z9$hnm2H79CF_O5FL8pi%REb@suqYKS4#Jcarc@8|iY~ho$p^G^vKozJs4c2hOc|zt z&eot8hHHKjb+`D@RukJie)C1A*Y#n%NI;p@~-`f zF}&=?VSLx#GgEV36FPU%s(ReoA0`U};j4w7{T^zt_QoK<3-JS8FFNfWu3VK+W4gFo zQ0&Xn6yNm=(l8Rs9tv_VJMO!1SER&OHaY1wzsn(k8)`ndH;-jG##hOr*4{*CQIww_ z4)4ex7TWM*^d9O7ZhG%GtthEWKg&*C3F1)wkH^yc!|n#F|xkOP7;nq zR2JviGsEB^qvI&FA2;|zr>KKem8|_VE-<-P3v)3}gP3=A;Iy&g^YJ;ZEB9 zVpZ?PtB49eeZy|vUOmp*o7ZZJ@c3$4^x@(~ZIVQq@P{0Ec~{-*f*{gh#H-61rG*>r z-V}3<#B@2&^m^(${;3*0(I$!<8L}9_Ukng^f6FU^1Z+!6bMrE3ouq|Wj#T*EFbg=R zaxm)DV9tajT*t`1>gbj@s-mu94CQr8FOrT4qP8tx9^==Ehv*g9!%v4>neTCJIgHiKp4CZiPCcdr6%pS3fJKbQZ|rKb3405%8}QPQ*Da(g#J2j z`R*5&6?7x-v`KLR+FGo7|^6g@L`>&mURYE6O0;O|4fGJoJj9rYsY7P$8so%?KJ z{YBGYR)=N1Oj`EUwejm=kKYMzB6YPbf081^y{tX$`*w_1x#U&-uypsb+h5>*OrxI` zls}Y0J0wp%nyVVT+r~c!ZLO~^<_QVy3{W!3WrWEz2W2&eSL$a|Q9G^9y+?bYY`m0@ zy-R>cJtH^NiBp8zWQK0-wSUN`5DT8f25i}6k1UomO~sp(wqMPl7a1M@154LuepKx= z-gH3@-)B_3F|sPsfHWoqx&m!CTx$}|YBbYvAMGAtc=x3tX`!>CN67wfe;;7U=}59% zUM+W>6CKWpv-Uc%ha;JR*BQ0+see3PES)MORz?PwR+Q^!w_nZkMnWP}&C2~+&2IcL ziu!^0#cTUh+yf8!VVmn}j#ny%-DqE|`P`_cm-EA6(0?Ks(m;i~SktsRTh|`g6X`xf z`YTi2cDgYef`;M#?_Jb?22WA^;9QkItM0%0jw$taj)0h3N&RANp>)e3Is%YysnXjac!84kU=Z9!f_8P-Ad|Pk+FgVZ z-WZ@k09>WAzaAXB@>4y=n=FA+DG^@M#E9=eI3fV!@lv~Lu?1#|=dR-{&3Nw-M6D$X zdSWV%ZVarOxb>6(FTeqj6?8xn1|=H<6?9`rt#vl_48@o!+K7ZE!hq08k%G+#8QBoJ z7%Y`MCMHLU=o=xl03ne6td$7-I4a>_%n{J!l89Kvn@G4r`EFdr`in^1cg6 zUV32-m?rjg`ILm*@Kc+*JyY*TKhMgM)06W));`02+A-j5$jRG#OzNLSu6~d}mb7z0 zKj0t`hc&eJygmGr#FVc+CSo%0bN)%wx76Hu-c9!>ZxNm04!2(@aNXWF*<$xVCW)Ck zvF9gG4&?1R@LsFXyXl9F;=a(BZWi8#-EQxYf?YP#|K`rFsjIkgWvsN-6y5MoNlmPe zj2i7DoLYOpXPE2ZD|fHMX)ER|-^1~orLeG<#E+}|`lt4$J)7pUNNK6Qj&YHQ6xqBG z9CC2EQCgKcd>j4aVR^>0aB0ogF`vlob`S z;{-BY>WB5`_4T!yXbFBuK#Gl^tPOH8{UtW+lxX7zuOZ!QIVi#6Ywo$bhpiA z-?8ZL0F|Y0Q9;LqKoQ1F8AU}c!-nxpeVuL}|2L+X$It)SJYU>PmFnp%s<)nC@6{X#I93B z-HXzY$n`aJZM$>y)h-sx!J3esC&TY8?LU!ziR>DvBX(vJP zoZ>{dn*HQZ!Gti9`C!^fJ$D#*OO9Lv0)7uveboETH>#&+x?5r0A7^NhT`Ws={-oyM|^jF5#d;9*v~95W9-CpJCxyS%A|m-mP`xme3HN3rGQsSzc2 zqIFB^ufXY91wC!Pf6`Dgkf3)Hb~WbqIZ8(F#Ic&Shwk1QDx6T1mv|Q`lKwS_AQSf8 z_k;WD_c>f`klY)pzpk=IB795#CzE8vt2lA!Rb0BpFD9tLd?&OMZ9RB154kYSw$14@ z{X$o#r_ZT27fz(?WnuNAgKC7h+9%z|do7R?nW2H^J1YV#GrLpq`7|skWqU4GE!WmO zY=<&pM*{zdCHL4_3-kd!8MPPWyFYfrkMDJpPayO#uo0tOn!B0Hw6D4!S6Zcc;5i;HM&z2sKU>IY+NJjBH4GhXN|D88`n0S9WI zdM~gWKFcDu)5#HE?e@1YWj`)=Rj0E2lJ8NUIUT*K1bc9%cn#?AzC1Uuw@HxG<@9FV z=?SA3(H=DlrtHU4%g=>~qt-`{p?5xv9MYjav5}f3-4^ zT<$8=k9_`t%ZNs%XFiIq>t24^{W(Ce^5xVo=hrVNT!d`0Rw-$Gq%p!ln9V{SI zB{R2K1umh%*oWlHy|8z#c*V}#^RDu2gb%}Xm9@km{$YX5ZZlEcDIW?{8A>nfa;K>T!p zyP165gqdir#=zcLR7uEkGa{HKQRG=O#oZXDZ&P}kpQxbZp2pyX+&!6ZOkP}2eF_oO z%KkY`HJJ9z2w$C}o)49nqM7Z^`04#zo^r8^(nZs0(JmRJ7Ln6I&R0<^15#5F;Wyh? zu+l3W#N)*@Z5xB-R|_14w_MeaaJA)mU}EGt_m3rUQ{z5DS|{LpmBWRnoR78&?p5px z@R+X>A9flP4$A6D69}%R80jFi{kWKD8Q<}|_CT1}&6I)GT@x`ndGgY{aQRsJ@DHJE z`9Gf`CEq^BhDKbp4oQ1cZ@MRhIr1RWR+sZ}M?^_%sVRo~HIky8SUzZwkt$5!9JB)l z{c^KKSq`*UK3&pqP503Oxd?4mRRs--h-8X!XU2aXI2`K}QtPVHGwdZNy2Rns)FCF$e?ZBx!Em?Q zCx@XPAc0fvtPyl30a@;4+0EamhQVIns>LGv{g`;Dg~8sKwUPy;|4OrGAM*w5Nk+;V zaICe}I>5?v-&1wBYtPOcpk8N`XD|2e#qIvkC7JZ3$yh7ln)u$6?Owy(6R5yD&pB4z zaMEnsdx5ER@_GU>@a>jN#xKwj9BjNy2lccg7~&~_>4`XW?~;x`V*=u z=$|kh+S{E!``CK)1|SQRErrOa8Pe~r0YCX2m5jbGF{X??J{$D#7N~bZ{JS`LYFOF9 zo)v&|MfSY09TEaNaMFm$HQeN|xp70g{79j0VV|b}Xu7cd!yRaAj)V}dEO)8tLDK0c zsfAGR=i|E>$col&;+w-5_s8|K4@EV^mo`{+O{hL()5)c~tk4M6k%m0Q??e-)4eHALhO5U4=@ciz>`I_;{ zLP4Zl{^Z~v9V41G>0WKPx5QaB^|Oz1T{jvr8l&Z>J~Fuhp>G;~dOf2A(q9Fn1jdpj zrS8TvGTqT5sUmre8QU*vSI|veKt~2fGyjKPA$IK**LtK4 zIRPi;y~u6&xGa1EU^178g`4J)5Y6hP0dnHysMUrcmNt}Et)hJTOON(E@4*iN&7^6} zCPM2j$(!7$@P_>ez4a~I=?KeX>s!w2iWP)x)ae)a?lyLO17wKplR^j5?hH`&@@>JT zAct%O>=+TVu|AjkWW4+z|H%aQcz`a2xRpTkMqP;Yjdps;_p7ljn=UF=zqJPNYzoQ$ zo?KOl*r6)UYrc+BX?o$#1xc34=`TU<&gNb+)7~(3S6PUMCk0@q<#}B*R^)4IBADAt ze$~X17k3Qy8IZ6*c%yCU3ZLV{LrHOvw`Uy%LU@0w z64)Xhy+A-k4qos#1S83mKpSqKQ(#<)TD{$9EatOpuZhUWa2U!$u$-CU<{XvlHsXO- z;C?nC(5kY2Z$kwOQYSVO~zHA-u3p!yd_;rnXY z3iUyh*&&vze3GA)+IiQ#w+*gA;u#ko+5`_L3Ji~N#Xd^HaT>&*2;ZGoc=K+yzWOI` ze>B@(GC)C4&jsFc@o#$X$HKC2eI31c_Log%!tf411Mh779rzkRziNb`YBc))Sd?Wv zZrvSH=qNAe9s--c)M}a$pL>&=@2hX8%A4;$dx^G#<@ghAHnFHT?Wb^2bolw1LUL^r zhQF^Wf488t`Ax5_4&!9Gh9C&NW!rc1?FV1dLbY1Sw`+HunrDl=Io;tucq7Ky1s&Qi zMVdy-3>6i3F@RokW2OXN;Dg%0Pky;|s9}rr+f<^tyULc+)Rh;{O7IfS_7axFOmD>Y zSkfmYwe_rK7pw&YUjq+~&2k4M7b9-@@hLF`k&2Vjx%mtX8S_Y7lRD`sP6&*s>LGL* zN}Tgjx(NGh_;|@T-cdUayFsZk#&wrE$5vh(J9QYF&YWZnT@`d_aCY~OR8u4iZ=4h! zddKk@$+K*!r?!!@C&v`*YmD!1T&&5R_8jp`8L)}&n|-_hYkQvP@~C{g;&5Sz`ykF@ zj+Bjr_>$9WJ*$YSY!M%?Zk7?|FAE|k$=0)pxo7<$sEs|{pidj;?+Dv6LApAJ2M#wT zB=8RM50IWj&`jN}(Ee%3N2wd9eap2Bbh5H%g@x(~HU>!1O&k_7bEG5^0xAeWmX~)7 zoW~ef<$0q_M>9A)=5^Q3z_Qu+fGIZSosLU9okp+Eh|eac-5J9AyCNtE)SSCB#+U_z znE6~$HRcx(&qa$GweX5AHEjGxoXpqR^yP`^8*P27tVZXYCEtegN~xboL%YmNPDk0+ zJ~kbB`r*Dp(yPsl*|8hC##$A0ufO)xM=AKYhu?YL>Xk{h+5h3#uG748!5<|W#;*}m zhv`opBOk-{j0#hGF!MDuZVG<@fy8bt=gcD?_n&997ltU!8nl)=bZBGl7G zHYOJ**l6^hz^E?RR4}!>z*2yHa}ArtO(E_B6mR73i7jnV0PP!^;`c6F>NoC$`PlphttO4>`>MDMr3NtmRWoPLibh`jQDb&GyEwo4L|Rq8s<0gwjp4nWHlb$T|IKTpUo?mL5J7@Si#3@7 zZv9lTvYts6buz5Ld?sLJ+Qsb8nc*s(f`SH6x-;Y=GEqvK7eZcMEMnS(2qVJTdidl8 zx0{*NzE#eUPkDxk;WVBYJ(QEaxq!RKRlpFJg>AVXWV(|sY{(hU^ZAKq0gy=18rngZo|1+8R zHvq};vP$Qq*;E=HE%7~>XAF6T)>z!B4sg#oh4%IBodC>5Hx1MkG-JnRG?b;*rEeNZe+8!1*%4@Xa_i!xmG*$iA9 zf=yGzmt+;3E$A21S3bdgnc}INZ%Es`|7vq-L5U9oSQAdwM;-Bsu1O!iMYKIL_jwH% zs_8GW=vCgstP5z?ZY0)N{p|Iv0+}kf^lk2+`k*(|FnecJgzIAazB|;nPP#X4=y$$x z_`UE^2zjGaC`#P#MtFdI&E>-^6g;gF@iih4!B9X&WVyqebld|Bo`n>pr5wd*A6KmxCE#Dn^F4%#0i^{{q1 zPBflue%8tnA!<1ZIKj`TESEY`&y}62b6rH#?^VcO38=jz{CfoA7MKfF`x~nHqRJ%> z1C;n)kxk&zw(|GVI2}a}loT8CJaFoC6h2XZ>avg3wTF@{68FPr908J^S0KQF(vmUc z=}-eh-NcV{8x7&q&-tmaWFZAbJ=}5CK=ryX;|>8|(pN)W!eoFe zZZ0KZJpL9HW($ogEB>joDsr~VK1pEcmiwEe9TV!ir3jU{6ALmfx|gQ?TJ>Xu8TT z7`usP)}nX&Z4sKW%jwpp(J+WPZ^~zHqMOu?+cgb3mpt~64qqvha8M7~6mKOqBJZ(c zYWx$^jpSHSfXNFtfJ7T+PSu_)_JzwKv$IKrY4`S}8^ghj0y`~qA&Kik6-7mu_(s$QKw&vYe8P+u#&fsopcU>ck)Ct5{J`pZrc zOZO(>Zqy$dFIE}o#)R)ks6uXQb+|fq4fPk%VLl!xd04~QqLhmue+0~Ttx(D7UKRQ3 z2Aamqgo@&{s`W3tc>nhju~AyB0RcTt?>MGq)Zy@4e(Qb^z6p#eaAF5r1Hf+$G-(0rv65e!TUvYlPvDNp5EiOc>6~9@_O1eu<6B7E zBU4$9O9Q(x0AtM8$|ON-=&e^_YSe1WnZ^e$q84jFcmL8*?x!fb?D_HqzDoaq76HB_ zOw?Q8e3n{g`Va~z9|3rEvN5E%qUbXC160t#Qbyw{rg%jj9cZDf)~}el-HKKKS!yLi zBCR4Hd4(zNXzGyM>gVyelhC}}PBSho%d|WFh4T6Lz~wph5Y0Uvdr&eyO+I*n7Uy3% z3k1*qN7N-|qCh@agqX*PiiBJhRP`d?^@~zs8FrOO;1H}?gh9^PhJ+X(2e=@F6Sx&s zA~ZF6#Lx_3{4xw8m9bcTp^b}(6kQv9rdFNR(5BUDhw})!_7q3_{sVC?mvez|t(NBnA)eO?p2gbm)&iI!}x*2I9)w-pEqG@N+jF)?pMnuajUH{^%6Y$vC zW8Dqks9I*)yu1i*!{ho)y@+ccm78^$4=ajNVJ|G7X(utqG;#yK$YCL65-a&rLm_6@+iUGQEj3yaig|5SdYqyaErW$3&W zbKTeAGwBnAoma5H%tB;fKt)ocH40<>5YA|ga`tk!vGQ{XdP6DHBbWlpAafI!gcab& zX@Iq7$>~wz`!C)%8xO{?e3mKa!y)LkE+YNIAiX5jPyC;U zM@u6`Iu*_DULls2Y9f2`pw&Y&Cr7O7bT2a z_h#kX!>WR%9Mf=ZHW6-Pn5;sdn{t*?-Hn0So2dCqgO8TM5x+QVMYYH*E2_QzthqW( zl%;dFBr9ovQ^AqQWq`A9-S4+yKw(IVy_|ll_|zbK!jxVfb5~)g!mEuGnvZ263ooRD z!x52RNa~O9Zs$V$%v05Nk|iiUSXqUkYy0%nlju?N)mrxu*9BD)sJQdfgm)GuCkLZ~ zFMuUGx4d5Zif%@aJW?0j$Tz|L;%phnnP~M(cU75%KcCmXaWZ=w-Aq{2!(q#MP0jW0 zzZGamv8?!*M5b`aL0PbQq@g(dNz2t|hqgNVZZR@ATCFx6&~0BGD+TE;%waSzAVyF!)<4VSUfIcSHE@~i ztKX`+F2aJ*TTXO}GAm(HWJI&J;6==aEB_>??JVw=`;F^bXK9cA*6yv#&+FVfGC0?I z`X3&j?7a}Rz*G~f-a;w(xcu=8rQv@NuYZBn2&B$)UaLAN4F(E`^#TJazjd+SX$(1% zv&rWHCa>~LL0i_#6-rHf;y)&=>q`T^Q`5ikkZ2I>2AnRg7V2xoU1^KxV*!LYzYwiE z2fDcGca@$6ou=GSU^74=OLfF`4;Bdt02L9C9HXSDcS)&&nnnPb$d_6NOt@?70q=za zX@KjxEK<VZy&of-s1c!!c_O~$<&zj&uVp*lIQ zIiP`G{!Mj*(b<0`1oDwUL&24*USK=&3i~j-h$(H2ed99=LbJ3C;TZ8G=T9~UI6RThZOf2#(n^r2$OYeP>J7gaq3ho(={LP{Uwpn zve43Wp7JeE*`JH1j&~USF{GoVQneF9tzxKS+=uPTCavz$!xUQkiK*J%#-tG{ciMZu z5MoxVda6+Zt26Bu32|1dKE^(4?FIS|ccwhWD^emQqv9GZ2_KD=65n^=j!BWFpWiGU zWp&^#iTCWm(HfknH<=*F>Gy_pfhikf$S12d(o2G>8m~XO_5h#q7E=~Fg0g*3r6qSGE$ zL~M%L-Y*^!TSCOgpxx!|Ie{NxK&CGknsNqXC41xm|7@0rPc`PWRLEa+^i&dDLz{xG z0#ATMR`v1C0h?xgl+{7A(V>E}m?6SKEl)akJqr>*D}aJq6CADwWLWMT+(lZ(IUAN7 z6C)c-_wLLqYEm5#5)QEJ7GN)z8;a#pXV2K!I7^Opw|ZlNG}p}Y54`zd?yODlo2_vt z;{z@mC8V8CdcqzL|5O=J4!J|W%~Ogo)s-pAi00c(`326`o}Czd7ja8w?y4a_(4+Z~ z*8USotS<5qznxQGpbOV60N#T*8TpqZ61R3lusLCBy7}s(xk&^rPbyHItXK@XTLQ$y*2xZTj#>lG ze{eCyG2v z%AvfFHLzP;QU{(nQYR0@kgSL5(2L@>x)@3|0jqNniv){(;MoLh^=9xwDv5NoMuLe1 z#ukoR|4^DC-W?o1Kidc9TER!<3O!cv?CxCxbD;KYw?srGG;?w;5~w?rux^cp@XgN4v^F79Yj_)ONKY%cVJ zg%nFS|EchH^;Q39g3_ek2ZL*EJ&C+^)fmA0=G+!>Z26O&)EDHcc#_yf-3t`>pe?}c z)ZYvh7;LzUgZWV!NYrUXh9hsuw|mYkkW!A>@#Rxtzy*ldfXMBoIC;b*%_&IbLIB-ZEkYdl27SHU(<4?wrg1kN>ve*CF@_)_D^kF0;z1bDa4yBu93|Fo zVTRSlQ&CI;M|ADwGqKY7B~UnNmYi_=c9K{wgRkB}bWS&k^gqZ5avYFhTpz2uRZ}j) z#py;CA5nXEknvf2(>z)zGFHeQzL zUa4Dn8P;1xfQ;g2Br5HzQ&G|l&<*)U$eJ-4qR(WHlpe>?RDYpnH;d#L)t^Bm<%&|? zf2&Fwkb?(_O_f&aL$eBwGJ{4n@IGh}C>cYRP(!{uO=);9NRfbiqM(8WSG@x1fNx(? zeOUr2?$Lxzw40(j3T^a;H%RrqOQO&U(l65klW?NOEZw}6yw1hW!+~A0d#9!4Rn^C? zrS28kK7JW@Qq8q-XQih2&ibNpXtcvs&^=+I#*g`c@)_e=hY=UCMKYgB)`35ch0$pn zMOoyxG-4vp@k`_dS!u2sH+tiy*LTh&bVvjpG8<2Fjo*1+t5@XCZ;i^>w;!@z6_4J9A>nL5=AmI0U=lWRw`4Zrf z?}IiE&;cLLzgs}oDP*|QVD(T9iD_}cu@mr1zJG>&)$8x^ug>sSZvYu9=&eBEVLjpn zOFk$Ci_2XgG^(#>)KoxdJPJbN{&2GiN0UsDIFbu!K&J%=hj|P)S1o6h+Cl!lOS90b zq`~3)@7Ryrsn%Y|E@V2A5t7rPThpu4{dA0Vev=(j0z^kIN31PPovK1$(E3GB4i2xr zUji%j;+&e#6g)zMDJE6Y#On2LJew1%AK#-x1BFxjbGLxW#-onV5i z=CP^Xj3`q7(d{l(Bye+2{fwv~Q`ri5 zqn*uI7R)e422-8YKaBf(FkJ0mAhlGd(vt$TJ z#Aup+>EJPYwc1awBlU)A^_0)41BGn#`x7?%t`tuWI1uR%5TPigsl1#V2Vt}oU1AUB zK!!Sdpmb0vH#PcuWFQTyUAIG%u-{R37}I( z!_xoTXTu^G{sb#5VAsUjmJg;Ry*{I)He3^2%>^0UtEygW!#tYPT7QQ!%j>~5Ol89o zsV7SU0gPLskmrr)aT)TFisi!zm9gx8d#23L4@4nk^;_Kylqh{XBINoSp&``SUH91lZKEa0Q=b?*VfThy` zd)UDta3d(xlDt16`Zl9?{5s?U7ouE3Kh8Fksvc4v1*@PCKmpX4>B?cj)B(1aM!&s8 z`tnEEf)=7Kt-c3qupJgk58Z?}8iPH_+Y*QqZ#fajsP>zc05df@vfw<*@EKw!-!oAf zVxOs?>BG4jG0))aN*VWEM_$5Zg#6xJ6D+`#)YI1^PM|ib{SgPqJ}Hd6;%kU`owWZ< z$#tHD@SB4IeRuSp=?&01R7%!IBcagd8D`UZ%+#JoCyVV-dLCuuLh()*f)KwA)l2zEM5S?ZW!QN0;#R8wwJ4HhjU5zx+oT{EIyQC42q{ zj(j}O05VI?cVsa5V(&KdY1D%g>d&) zV|A6^T;cQiB@_%)Y1a4S5+LnGIrx5yb!AhTc?wKpk*=D16e=eyS6uVyUZhNAojsXV zkq0u4ZNW06;nkn?vdxWZqQ}Qs1s8XNu8@4wh3+|?(Z4Qw{EfFs*WY`$^9Bpo0N*k) zB**Ek6Uh0T-r6m&``3Z{f9f8fQ}fMvpNVcx8u$R@GoEKe1gd*TU*5BoZhZ8I&-e{^gpdO|DG)>1o|1xI&)A6507|2g0m> z2K1~8Gy^9Zrp@W}KC)=i0uIRJkyiY7rkEq8VQL!7lwh0_c9kO^871A_xO97o6ge?A zm@K2NB*=jyKsObN-5*-;k4fSF*dMDfNFXTVm7(Ya+gAmpLJ8$(QZswD?D){DH}msy zIG|$TQJ4vB4@i{*2w&oxEDrC3(Rm{aCHPDQfYICUK;!$^nuAdY;u!c-%<8U zP0E{B>gYs`nr4AgD*c*-xcuF;knLYkQp@}v8!z_}+lQZr2k+o3(8Ec;Z1^T6*Kv7Y z4!*!VAr=OoMVL197_SE+;3k#7a|i~OK)^$`k<<)p@Jc8J%5G|AoknJ>I5NdYsnm$2643C%8WpbuE>D&<8-y9XS}4Sm>7Jdh($$-I7ZiqkI@z71HPShIyRBUurENJ z?gCegYJxFVq(Pj)FEa$ID5kx^Xd(`s<;w(0#UoW1kvL0vI@@akjJdH+Xf?Rgif}hqkh79^3N;iyqFfwy#aiwZ_NxcM+Hpf zRm3(-5-RcXkzm>{=%?@XzS;35b)JK;DNEh1n;nHYpe}i%XvKt2j(rHG*H8sG3W5f* zzaXt~zzuvgt}FX=q}3#C3|tkb2_ZL1-xpNJ38Lyfis?wxdaW;`RrS z@5Y_@-lQ$rfRFjPC-#r46>TGmb{zh52miF`caopo1!9skybE^zN-+Ur-n#i`{n>K2%fgolSoJ_hW{=XU(f z4sWs^Z+^opJ3foiUL(F-*;PlbdVEFX5kIxH@iV@qf?;4)a<)u` zpu=vC8bryV>cy7U(Q%90tWrSI6F_~Y?3z7)E#T_H?vfAxiWC2P*%x0?sB}A;8FjZ) z(h46{;pBd-!`Rm*DN554o3Hysz-Rj1;+}UBoz61%0_YbHXroeg+oI~YeV0 zY@q{U0T$gAjBq5PAPaF^-%w$H00ZK5P^V?fzv)*AthCvxk24D7P@^WhN?$RpzQNBF zl0W7_^yF33yo8;z;aJ7N4{(tq1#ujydhY!;4dN1naoZG?(xvES8{!`Ak+e#4fL2`% z)68D_`Lm~&ZsM9AEE2Y&<%TWm5`6JHU*fbd>}NWlO|cDjnnbe+&|YWEn}HbvCU(!2 zb+-BQz4Uz@=d;=d?_yQ!FddbN@Y>{^i64pzPaMrd>`}ZtK%?*8&KWB_(j=sWTkNvi zsCoZkM@cW&(W&fv3-a@>duQ?wzbyT+%^uHTSluWBt?%N_WpS4u`JT8A#r?IJy<**Q zBs9*4PGIt=kL*en-Kt?qIu=sZ>;i2t6e}~e5H9=G!$Vz6@+!)Y2W#bj`cc4h`+5P+ zt%$!J`A80ZlMjqbH(cd=u1x9%7xbsFb9FSZRADmptYDA`Gm69-nVMprXPMh}!%$$q z1!kFMpz?b5a#y)Ta058dP-Y4fZnCM16OlOK--?8aMH?GjC}F8D{*kpPo=JCasc2Pb zpTlE!w}xDK($ly1n-|np_*$?-dr$oE{3C(Xcb`V>>un$HITS4eZ4-gfSg{dk{6va8 zf2KgeWh^!B<4e93{KKYHy>xr8=nrfwAcgZ%_mC6}u3-8G5|eu3J|$63D*MBo`waC*?nZi3_71E`kjucX}mI&0h`~%Uhdk|0~2T3aIy(gNp zn;wl_8?$kY>%XB)^jUt*n|*QQ8*5%g>LUm@V8w?}ry<+&W6x1Kp+nbEopnE&Lk#c7 zf86IQE2T?Z=#0-si{r$9yRDjo-qW|~=%MQaUB+oCXuodL`RZTe+3$>tt|Bu*!Ag(U zRHT|a>LP3O^U=k%Z84e*@~oJ=J>JS9HBFYY1X=fj0TCZ$LLQ<cXxUjF3t z6qb;xhM}agK))K83!%!2q?JDdxn}+$ar*XMTy zek*#KLF6ri`NrGWm}}!~X!jyFb2P`P6_?%$Dt-#zj5h2?JxJJNk-L>ocm~fz5TbMi zisji==go5~%WB7NzBXQ3J6C93x#hK;EF-O7Saf$3J+z*1#M}mBvT+g>OH3U`o=tER zHfAp4&{t=C|D%0e4N)B9#-{f9Rk$i)n^crD99>AhmKIgZfWpgzI7g(XdBy#=jdwg( zVsG3f3fZU!rhC%=yT1Yz>)(5ux-|4%cf5)S=wbhv@h>(i+QLk0Ez-|n*`)_R%Vt!o ze#C!Zes2{8Hd_IXW`85v7?qPF)*svi$_^$a zK*ClsOYa~yCvYL*4c3NXX>hGGgC&}S+Ai)#xjGyfH5YKZdTi&^ zZXnn+@2D|gi>)_zPuD;J9*Q%?4K5@Y@1~uuJK@%T`}eO;keh8+^N(YFZl}Z&bYE~b zuN%9H4Ol z!YmsiENna8z3Y}Q@gfF|CT~kD6l`gPY*D%~cK$_NbXp?Majz9e@7gODUP5xs`j}j6 z(}V1TJNGQV;ePQjHm@m*cULj!pmSbZd$~rZ#e1S^%k|NX9J%LGJr+B)CA!(O=5``Z zgTC)Q+aof7QDD}GW{2V`(egqhwbbgrbO(jx^O<}NOpi0^u^R;?&yr5cbeWaN$7o)O zFBo1ewFsFhG0Uo_#**y>Dwp!;7f07cv6BNd8BlF>e-0~_D73>n=b&a(7e~WN!lHD* z6I%&Xq-s_SJ+3KMw^ZGW#q9^m`_iSd1a*@ckx+pv38lg$3080uC!xgED;NRAX)npb zV>&;e;{WM>jvM~$b_S7m5KD8l;$)uPVP%&_K3I}?opJP6s5?`fi7cFGI%i~JwTYrw z`Zp$252`R3FUWi(!A9Ls0{_?Ft!?NS$DyKKwP;*yGZgVoMtkxuEDZ;;ku)@7{VlO2 z?Xb(Hx@B&u%P`$KlPb%%~7V7rUGEgvPOtIpPH>Jurv30~9kWxGo2Q7v)a#*s;8;wJXszk0N)x$qKt>Ie^(38i6+l z_Ogm42r^M3U7RPkU-xH!3ap{PA_vi(`~3vKL4f<8iN@ko!nw<{`Cdeu z|5jaO#Qa!!PaQ4WvE8ab^*-SVZy=(2u$}Xj>`6BVoEclGJ^Nc|GdV{|h zJcMu=_<)QT>1^mdfurXk%bdZqc-BStCQnV#bt3qVrSR#|S@Gvx4Cz+f%8fUrYr=l@ za3Y!T=d(h9pvn?)D?TG1glg~bC%Q|atz!sQ8WF>xM&nkQI(hhI9+rS{(sE^2RJ^_` zF}TDekeZI>gla2vbVRMfx|&0{b*mgh@r-o{i*9lzRZit8E#+-CG=9oY0YfZMi0yNl z{}qb={mI2ukJ7CmOGQma=%oDNlj`elgo>d2%G2VS$Y-9epft}ZL3qnIn2wVR!(=8~KP2OtE>>RlV3Vmf<@F*Z01R(;!WzeW)1$Lq6bftRrLz(rH?Ln^N`86tlGD?1 zlcMbxrcYz~E>43A{7X|rjF!{Zcg3MVH(VsSFfF&7=JD69&n3qPtdj|S+sL#7tvZ)oBpVXe0Hd8TfjZZcxk$7??rcJNR$-2{RMH8}Dc|+R=HtBK_4_Z`VyN_wl_iTF` z4blff6J&dz*+QL=hp&HlhMxeEdlj%9aE~Mxq#o2}V5nwU z359knFrh)JF79y{2Z3gfL=eb04S{SAW!0K$C~Nf*L0gShYDA7kQSn>f`&~-hK0#Yi z`5{as2Ih3Ni|TiSZP1e3!dc^iWt_z)uuo8W|`Dg7bIWO4B>(9tDYSssn8k*-40eJKkyQ7x~^cPpftvT!%h| z*dOIs0$)plDT@uj9C`3@sn|c>ubsYgdw<89M~oRZ~4vG8!m3Oa3hpHL}h_4*)mR_)KpPZ&E* zf~PD~wkFdSc~~SwRyTHDdl3GUPNjCvUFj9v2-}=VWifSD01zAjysl|c9#T8mZPU?O z@KPhTHPfi3Xsh>fZS5G)B1Xg8(a(96Y9#YiA_g-CCd}ISdeZ;{#%}-s*nG)F52Z}Pfaq$-Y|9k9Tu8cRi zGZ{ER^v~p`0}V!TeO|XyX~$TRNxb@43QJB>i1tFUn20hc=6IZ@;sR0jAB07&L7 z(Be?lKUJ(}kU4;LI<>$8j?efN4lc3>hXH%=b}D(tbwSzUYe?QV6!d!M zg(<>6P>Wk;HlUoCXNG@U;ppXBhYgbh%k9GBXT?u8oGvPiygXAX)-}oL)A`Kv)FYkK zM(d|!%9bc#O}^OSv|mfjt&f26x#$r<)bD>;mbLw^qnef0eqgYXr3K~7e;w5c-0=bo z^s=&!{7>oiY30F$CIki(QwOR{op3Gc+&Rb{QxelxHrnjG!MutG-D)v+`A+;^HMh!; zap}QVG1%g#drz`IdfQq30^w{gJ~6Bckyf9A!`58VR`g4>sE|h|sdZ1K9!w()95k5t zut93+)0eu>q2`F z_KOTMOxLYk<270~5+?kv?&TkV;d#ESYWl!NnV{N600Xzxa=OyrNm|lg8hmY?h%!Rk zn=>Z~x8x&!1+lIfm~Xvx70xwo5+Up;@5s1iX*{|vW;o%5Xr~p7Gb;$C{19hDUGN_Q zPWMs{b*mnfT^!r&UxhVPDRL z2HTEJ6kV(pc6%pg@Y9qZ{uz&T#9&hF`{X=NUUOYZi6Rx2Nvp?WxK^;mh7d{Ng^(0jsK3D_K8UQ&g#FXg);Bn@ zp+}nJ{PIq`_J)^tq&gE1b{JV76uu5TO8F!}|I?*eJm1vTAq!S|Y>klQIzx|DUT(smm2TC}-r0gd)j9Twl zcE4NE#vDY;_))_0@d~SXO3pk8K+NX&)ykyDwVl2@S-VM9mhqH=%$Lo^@ENFk!hvt4 zQfJ61Bm)UwS?0Wmz-fPZ_Y zOKr%^M1knWQTU=;Zt3EV_5#h}M=5j9`Xst%bY@Ap@=Aj;*YkJb&Wxkoz6n;a>S{9X z>4nwGu4oCTT4nk>{p7zT)^?@vP%Rpr_28WV?pAY?wM}bQT2A|^K+*Z@m3k|uj6?tH z&jK|vQ+4-5V5lSGW=z_YgjloOoA-L_XSSb=Zne#E;4$!rRN)=o2Gj{QY@p zs3|`Q50c$6D+ChR%}CV%X|S}g$^(z2FJe0XML+-hHQ>6{BQxa_{@_U@UjhnlsP@t$ zrDf$#?L+Z)a@xrkqSd*Jak#|;ai|zM%$(4G)}pQ_z|`D1*Zsi5=ief+oW~$QQ4{{+ zn*>I+v6L7U(SN)FGExOPp)BMidW6iYAi@7WAEubF0ksL$5>?ws_{M>W8 z#%xy_H1@Pc^tSGh6E}j@A_T3aBAt&8xFxmx<7Sj|v7h|EEf5t}*UM$AW{|(FK3KVQ znXIV0_wsh{^{qIye!FI^@i^x+w*|-*)Y$GTZ;BPafZqC@o-8Sy7Z8Omjps$=Bs)hr ztvP?IT*bVQ9C1B|`KE`y*X&eB?5Dt0`@{1h>UKD-JGWzLT?4cIGv$Qm{NwM%Y=s1Q z-LBn(T6|Wyjb6p+P$zA~vd)@XWRDXzI@ZKFgx2(4rLfu5FL8SPIFWKSbj+klauCy6 zdn!79rYt4^H6H!ju{`+1kyTD-SmM2~rcigi=6w-PX8ePU$H&6PBi{Udspb9Ou_Qus ziV8$qITc?kuZY?2^D=42laW4+%=f;jsQPk>_hIpjI9nXSH8Z0`mi#PM<|=qGTbyN) z9`Y`DqHm{dN1_Px>?8uZ$|_zXF2ANTSJG}Ng|^%1OtQp#43y|am(?owMLyQnO?yH0 z>$G5TKk^BFFL6t~SU}pk4HRU7iU!dg`9xp-w_wY0;cLoJ29kTWR9LPf&Qi{4^JpgA zBwF(h!q-Nm%00l;zSGEfccQ;%e}NZ%izB9ypCg~#Pk}r55`d-|C86S-{oKZ*oZjFFgGmq5W1`)9xR+cGUCXXCkcwN8>fNx>vG_F8rAwj7 zv-c`f1 zK2OnNs!xe;A1)f~Uz$xEsK2YwDhf&2ZPe!SD{yCj-Jzu=-&H@g{w=3TR;$~;KL8K# zpb>}1N1-^@zKFk)=E_bKAt`?+j20h5J-0>Qz74XJ_*T+z92 z7eWS&U@@}HZMP@9*~Z)^_9Bpqnjsq~qClCA523(U>XiJ|k;W?^lv?h3bYpeARbEO_ zfw^aI871(2*!VO*agj%+oZsEmwbIgd;{m;WAijA};5Yn6lJc>1gan7Sob)N%6U$H@ z+TbL5ra6Jr6{lDG*H@Yda&wDA@RxOrnlFk9 z$nz`IAA?xy0K%PJb9i>)1TOJ!HuLnSK&R$yiPI#d4=<3I-6^c9R5V+8bK1LctSjb7 zvyCd0>im-a`~GsBX}1|c`1DZak)9t0mnNm%p-PfNky&05o7$d7d)JHwR$f~oEXqzZ z#udePDy+?Q#@`wxVsjScdy?4SVT=R{jd-Z4PIV13P7KJ>N z6>N&Yp=!Spv$n2t<|)&5^CSB+<0sXmx*29xXU9@hJ-wdRo&a|FDIxj&y) zU!Mo70`;gY@(jVR9i))KeDE9BPxuX53Ir;=&8WIF>|MDHkU1fijS%les?={ zw3MlJLv=!Hq>#C%WW z%UDZnKm)6P-Kt|!<5h_G{u~;IdP&@6SBpHKPZS%Y080Z!xtp#ZCO@eg2q-LF^-y+A zxABk&vh7xP%aOwr_!3k4H3zzGFkjizU%?Xj*`{<-`vg18=7u&7Nso0jQ1aRr+s(sm z7pE7OmU(%*5*_$Z?H-ReIT1S_m51?^{Ucn@*|xPd;J4y5%=o({fNEcCz5Z>{0K`oK zd;?G@|Fg+k#H%GFh2r_1Fu|42^epIBVpp2)#ej7I)7Lw$fM>&wJ1T_D(IIbx<9JnR zqwNcie^P&6dRYI9CmZw^PCaM}U-3X4dya7rBDPAf%e`cGdZu>R9SbtM$@9mvq={#= z;1_C=gRbgIqOZOC)g2rm6~udgwkKLwo9$kOR4r++VrzCVF^x$w3SUV9gbacEJyj;t z0}V78U=^<#A!^H}Zg0$&I-x?PID`4g_b%V|Q9Q!n0w-&qYjaRB1{OsS7ie(kYTb!i zU0q7Q^>%LR!Pf5Xt+;f{3CRV?&1;*t9$KFrP^erqAk}HcdN?V_a!m1i10@E+*llO* z<(E;jzb9b3(>Bq}!`@?rtO;+gLC298Nj(9Kh{4VondF@|Xb?q3r;(+S#F?M+~ z^Ub=Wf3V(9Z%q1s4Khz2O48f4F11u!psbGUY87pY1q9Q&_XFfCfuh9f=Q*7<)uX%k z8GWAfG8s7-dhg#PR^aVuNnMT_*LI7h$BWkERL5=Td*iYAPY5QOF=Lab9mv$q5&Z_zctulaKAQxGy_@XJhm>**{PP zR%CK^^IsL|F}|)-%;ir+;_h+}gfi=0G#|IAt0h%60LvbT1svX|%9W*&m~EW|)e@F9 z?IZotHwPYCk3`geRE0%0V}1#zO+Y`tkDczHWyto^GTmTmam#n?rvKkpv^}N!Hdr9F zcu_iIIsR-#tMXKfVSY(b^?PFZY#FZ}*cf=$(dYi35=~iAz2^sr2feMNi#5zQlzN+e zT^1yTf`bC*C77DrI#(j$uFkNTOdhoEC23{U#zqim zUHg*mCf|gp)&NtG8v38F<@9Z>&zLK8@$0ObA#>egWUXLHkO<%!mV_e;rtX|Plmygz zATD6>H-d3FA5o~?h~07qv;vw_DZuMoEPXqQXV4eUqG-KEq61$ z-rXhi_=aap>%WXLlZ5O$-nl`XtBhC9(c1V1SVRa6$YarbvN(Hysdn16cr(&L2BH_y z$hxCZT`!N{kqpvVoi-O5YFFsm4Twk;thQ}bv$tuDb19U2KH77i zK0U@Op!fP1+)kEr4h=tunYGlxYE0Y8_v}N$!ZGyFM`CIk9~W#m`Ta97`r8VB!^www z`_GiSpHtn-sJ@^IX1~?FW(R(Chhb3F4Q0#U>45`b5SN4orDtzbgAeNS!%<^YxA*Z5NffBJr7}TB82q&lFmWEq?9(chP zmL$9I9*KU|l4sY#()94R92Kah5Y2s@snPMci^=dz86wmfG+3LvK?W}qR6v%M!arS2 zA+$$@j2sGNnkO5|&W_i*HFS-tl}4HoC;(2C1dpaz1JPGvJQmNt^rwV+;f%=MeRi|( z&x#qr7Nsjv%u0*YaV&-q;3$M8I8!cNJB~Fjdv>Czar-t;+QW4YS31hfxBWpt8^a5K zPb7IvKFo5ld`jFMk6X6JNf)VNB2^s7DtHZWyZP_^8Bti}F-ar_`XeqdJwv$YXn^;- z|#enXFfSA^qZo_3OL3iW$R2I^e6re0i5B%b z1^ph3ICaF!A7z}7dJ+A#l_!r!<5uE>10QM86%+Eu5Q@&(xoQ5-nNYdb5qP=$Vw8tq z$+WUgSh>9W&iJsbu&IR!i=`xf`%b2eCy}5*uzrZdm4N>@onr^5z)E&a%ZlThKI2w- z{wP(@54(NWw)9lDyh}O91rZl|bA?^7HR5 z1&M619BSD$=AxxWEsZq;DC@MoY`b5yxH9hv@``xM6I$L#PqcsPXtI6Pfz262q&O3y z7yzG+W*pf$Xq(%>?%Ypg#UudA&T8<`XBt29&hXCZ5`} zYT$O{oY4M&o*%WB#Oc0U=!n^N@qD_ZPa@hfRo$fs$SJoRa0tT(U9EZfdmzu}p;XcN zuh@08>3Wib`Jb=CMFTRI3PXwWeVDN7Vkr^|3ktkpM{N%SK z>Twj7@Mgs1{UycgguCOL)JiRG=jm_vtn(UQ@OwC3dm}1kzo2fd@qvFJ?NGgmq6S&; zOMjuZk#^;htpvQWP~n`qER}l2Bxgxbg+|B~wt%?3a){zu3ajEB9$&6N6c1EB$HUd% zsdqMe|F{^N+>?7G>dDXWb7i|r6oiVlZ68?sH*^FY8q8?ar~hRbs`4gh`AZ*FkO2da zvZ`&Rty+4|fyYlwY!V=P(}ZPBEx6{bF67pg%YZD+tw^n6`a6&uUZ5W9H8p+T`3o8t zeeR5i)*ZTDMR9Y&>N2_z~Q@6yO8YW%85cJ7s2p0ZC`{Wqn6 z(eTf@z?;=7+DwZ$jpP2}`F1$Wq?$b}IYE;?j~uc)bwQGqHHL$Lj)N9E3A~P6~lSJr;qe zJ}WS>$@f9yDT648OZg;Sz4h?XP4j^XuQy8^h00J;L4q*YEHdFv|5oO`H2N;G$CLfn zrO>|iLuUK4b;??#ov~(*$KKHyp$_~V5~)jpyx@>dGvv)9oIsuhTOT0I6CNJT4cJxh zCxrRaY-G8C!-h6$$`>ziG9gFgB-%@R#gS6B#S{?ts2UyQ(M^TW-{CdES+oKscnFSF~gCg^U}Ym zDuZGbRVg33yUfxNYn@Qc_2 zlS71JxyVPZ>I5Im00O%B*}zHsGr5a*9nJ+Cz8&{uf}f?kK9dc1J<7ZTCs-9#g07GN z0f~i(#rxfl#c5{5)2}f2UN4X>D$SADyu>sqBrs%i8pDrz6y0TwfGtAeiK2KaXhJ+pFC#S?L zW@Md6IxbkE|GP%Z?69QrQMal<@w?vDlO(7VzOP2X^bM;i*A+GIBf4@Gp1RU>V4>{W zXpqQh)zW6`JYx*EEpxOJ9}r2juQCIykWWH<3SIfa(Dg^WOCavnVA)e%eQ(*LBwy2- zkS*@5jK(mjbAcA?D56~5-kCwUP*uDi5opjVb<3&y98jL}DBN(m=a}PW_TzQ19@1w?AhMvWd-xCDf>TzAmElqi04e<Dp_jj1~$p(flJY>dXWgk>Bw zKFy(=f5C<}yu6G2c%9%hBUlo6BJF7TGAoDYkJU#0V0Ff86x=c^xW}$pg42wCy0W6) z4vI$~*83^F`BS~bd?)?lFTH@=`RY@DP2d!Kt-n0;nGqi>)fj%seEf^jwbuJil3w)N z2|x1%6gMo1e08RH{RB<}1=7wkPh8*~)dwddJfoM}uMEjZK^xT-nXk8M6nK52-?O~p zM=yful-!^lG$eq=)ac&LpJVAXI_YV%*@d}AjuzElqfy1BOEKGNxkBGaU~=OcGB)th znxJQn?Gub+q@STsj3$;}y7DI}xO8lljzYYTD1I;{)9Rz7!N*f=+7-=BHF#Q3LmU_{ zKnD*fHkCb57Hpn=!C|(56IH%W;aW)n$Z%N}IhUvC;3ki>tZ^(=LFRSY*~eK8uuce_ z1d8o5BC+*!T-E{4f*IH{#qPZ1vvH`;x=VY@f1V6q%gMlL$?Jo{vfb{}Xn$?!Rpg>%|QI1b1F z7#9de*w34D@dqp34V84dR6JSfSbovZZo|o5s(xPAxzJ8?AXFX|N{$LKYOE>0&jGHi z{E2uYk!b@wb4IAR_}gXymY~vA(^M2hZ)@sD=QCR-myw=W?P8=V8r=4K>-!+0^<)d$!D051WF`3z6ozsRy7$UY~} zmf}jbT&R!yje5h9x@Jrh+gWTwhu>rQV?5W4rRg9y?5!*a z!tYHyy|yF$rAZyN*(tBKsf0Ef!?a;6w-W@86*6rd_;*yrPakzD?{*x@h!UO_7m1Z> zqLdwop@i&@=sPFMJ#NExaiF@;Mz||DmXRIgu-d*r1N||`enfqM*v!V_Tg1XkZ>f~K~*;~F{4Xrz1e9Tj9PM!17RKGj>Y&w-JPbc?xi zc50WRx6~Xpg;!XFC0FwPl|dK~Fi;KS)D`L^h@J9G=zB-Ep`i54X#VB_z9O1e)kQ<2 zA4y_;_8d${{dlKRTGcN;3k_Pqqn>^y1(h(LYy@e5b$dqsWT1fOtpqUBBY)iG&UwXWeaRI;hnkof5_ZDc;w;v~k_ z8J)KJuEGhO@Y`8W+?2H^paGmg(Ku7GXA{*m+JE!5V~2}|m?szBw%oQj_Dp172@f(aApxNRlnwR-`>j{gg}Y4eVyF0_uM_jZQxA_cmMBP`F~y&bbbhpY?~(h zO|d(@4^NOmp`Ap0)#=(c@V59y?5 z>C|CDbc3}5Nr}28r;4dY{pWnA{L=5d$C+X+(_NY0xL_kn75S1{jyG@h*m7{L6mJ?g zN%rv|``GE0vUQz2X9Q~Z>h+W_93;QM7OM>WW-2(gxP^a+W<+iGDp{gyQIdM*#6%k7 z5?nRhp!8J9_dGe-4%7&Y9}!V6#{Z(=AhJ-8H+il776t@FkI7QVdUZdzK>j zal>(I)_|lWH|m)2cmsBDVDts!kXJ{#gYWZ`TEAW~%2#?@>pU3=7hyTHXUN1>Lb~D( ziAl`>LWsb@#%^L*FSNDql_sTc;zJiiZWj9dpd&4E#wl{im9$Q1G3mOW`U*MF1tVHy z@>v2^0Br5Frme9yTzl(0nOc3z_GijVPxwH3QPK+f+||u}!&suwqdOx=`Sa~k*XK;! z7WUr1;tTxIdUw~r5eK#$c&1EeCstYVWmzqcuP|O@e5QD=FIryAr!EJ3zvR87M1Jy( zRRwV+@cQ^X`Hz7;Jtxel=d^WlB!i#*)Z>PGb7b)?`^}z>H#nSJGgF~l?61N()Q?=# zVC}$Xb#8*OeLcpC6}3oYJooAu%%?{J;0|B!uYuM>amNmP#Zg#AVsf*QZnbP!+X9v3 zBv2l^ic!MU`v{Z^GM~>nXkNwY=y$`csGUydQJFBTvFH=tNEd{LL3|DKT~g*l#S63DsVeGswh1}GkXZe!+Cimqu=i6224s!3BMQ{8Wp)nmy7R;b;W zM6)68I#X>Zo~mnRDsoYQpv?L`e?D2~QUAV1I2M(I^|~<_si>??C|&l5$3R`&L?v#j z!F1VaG9ols<5JrdeCR4)laO;IyErlvwlh6bMui`l&+OnVbnG=k+`IfOsRw;~)pM{A zn?x1&UR@fLJ_uZ9ncg~|eAK#Jk=;G|GYpxIfs(FZuck%#YvlVw#*^%6Fk|CAlAqBM z&11qOoyHio4glnx<tb0Y4f>i4U+a{cik| z)3dMk%F*W0xcvpoVveFmxeBp;l$>U)!G>AM^e)CHp6|BhEgxGsyLUbknjKb7f>0L^ zVP1_!u@)v)i=e%}a=C>hbR--mhIwmf$m>0*}C&ay{yyz|5S^d*VY>!qlw-PI5P$YvTR|sitA1cNx7pjhzpLIHqox8jt* z2LY1tZ6LVNl(*NnKYmF+;7Vane%BasPRv)ol#}fDg!H753dckPO2(HviL2E4(sUw0 zdY8X>ed4yOerjhmdYp3h@n57)hp{bwE#JT+Ft`W~Z?B6!$?oHEp+}oogBXxpl&Uv) z&W3#ceF`mU=tg21|3k{-m4dwY-o28ttCm^FzTrAcc5>~3?%U1QYDsebQln>wXDDUL zEW0{8uhw1n-saou{PkD1!-wM)Wp&Y}?@ZO`w)P;(76;B|%82OCH(4D^YKw06Q7z`z zBRgu7X3MsAZrOGCnsyXpfH_!Hd@)1ELMZL}zoUK1;VIlwJjT8N20ZGX#KuTeOm~ZR=VZ(Qekyw9oE~wkZ0OKu%E8=udEJGur%p{JTfUU^1JhYf zN3qgve(7yXGdUqnql~s9C_)~yd0Z7w4ED|GvvsxJ&&jjJc^$WHeOKa=Zl1{H3RCx-iM`o|jjv zqF2tJ7nkJ0#a#R2oh;8eBJGq|d+jFL&K@%i^E?{p3iy)O@~p!cXPD zwA*5OEs{d|H^SyrdhB zm@%DXZ_yz_B*QK^mGHu1Kus+omAeZrW5Z2gcW7;D|_!EWeZ&@tgG|uLoLRHzatFNXheEUu>`1xXK^Wo)8 zCw#jL-MgNy!+3FG$Io!`7}wHar8`5tic|wZDd1&wx>)DdRIBl?YfMlPkIaKECe02v-+x}}Cpxg!Owc8Je%Iivu zSBJwzZ9?N%Uq10s#<3d6IW>>6r)S%!T~eY4`EtfBOBP+_8SApMT_ZO2n?qieI3n#~ z<#)`;{76rPAquM>llC~)vpp>yN4Soz3!W`~7n7VuqE*FB-p3gyL=NrARw2YF1Mp zsr6lPL!A;SE;Vk|j%8wNU~Z|) zG+=$BtDVoJ0Aug%KHAecyl21IKxv=rStN;B!Ci!02o4@a@tWHD4QvnUeP$3=h}f>J zGKid(zk6td($N;2FUafw2V7~bsm^q47b+mtq88n;VAcboD#2f8T&=-V!$*;-d(AiL zQ<<|W2 zk^*A}vXK;!dI2aL!IGhnyVx*~R@t>BFjmOdpA_KMhC+Kvp3~@Y!qR5kQq;Sldn>HE zEB?wI|3ym(YGovlLcV&2U)sAPB$S#ZKUKlz`kMYMm1T!&pjO3_In+s^<~Q}8O+N9y zZDoGX8TRM8r&H&ne^NGIn<6lzk6*mbvXcmEXC$?knZ~lO*cyDL&-uel%yA zeSd#;dAir$CWEpk%hsN;8|kg=6K?aU!AU4&#>K@*w8b4Kw8YV1GW^`nAj$W#mWc=u;v>3kzfo{oFSaLQW=tj=t#ZY_9Ql= z`QZIcEd%e4%?h_4zB0|5C?-m`+2j<^@|!IlC-qj>KHt=%g7X#~FuCJc=-;a%tGM{L zZb=>_Z_Fd?eUu`p@$a3as+>1qC#ftios%Y<_spK4w4|l{O#YGByaA6Wmdm@hoOoHf!-~9uzTe;P_s6VUBDUxA{+JS#RJ6~wM z?%5PEx$hwst(^W2Lx~2^Jkn9sd9AQ@Q~V9L`itS;Ua>P~kXYvq2&UA*Zr?kUbnk-* z6VE;zEI+HQ0sj7GdE^(itR`pOcvs7e$lJW^)Dg~(l~rdJiRK2Gf25|t_gg2q)@i2I z(oqaozEmOA&k76ld4#Ott=v$tLh6&inDdnGEfk^V(?H`x&7pmt`-=_mKoEt_K0$kZ zu#y>p=a)P)DSobEz?a}~pq@(tK(kwy^UC?X}VO6QBJ!v;% zP;h54$FGRe+J0km$n0aQCfTDElwQc7(I;Y@2yE?|P!$Kz<8Of>B~aP30rfUSW8fK| zaQYcvqCj81Wm)t}B4A!KX@#KG$EFE~8$}5h2ZYVvS{tlh1*)N&y6Jj{)(CBTj8|g@ z(c1l|CuN^ih_%>d!$UkqpXTK0yS_j)@(DvT@lhD|EY4={!`?)CU)-D9j32noH_U`K zoT)~05~_3XO4&?EK@IU(SL1c`1Dt(YU?yJTjDh5iAkffg7x{j#3*HIArF%JmZ8Wui z$L6TRwkjLfmydhpN)mFg;3a^uW!ILCS_e)5TXi zqGgUiv{C8D)2GKWDXIs3&2E|2+K)Fp{j}5C{0gLm-2uBj&Xfk@ z8O*`BZ}5PW#P2c%9ljKx7qX!z zKiC~=ywnu})4Y0-PCJ70^AQ6)oS|1}F8WZwcF)MOS%XajKlj;LF!COW{P2UJ5J9s> zvS~xMpM5?xP!PZl(l$Up`*R`CG0<;!$;A9XThiWa{Kug_V%QB7X?TB9s8!4R1OD46 zry~15qK^DC-So&d4D+lYFW;pubL5Q+8o^bX`oXVWGv)dr)ZCP5rsA!dEjoLoAJy`Q zEq#nw^NssW&vZ|ReX1&|b0O5K%_8}z5LP0ohnsz+Wgm!FSQ46VIs{SbSw99VGCQ!v z;-7M{UgpLNX`!lZg%1Qz`rNgx8WM+qK={eD9TjL$GtD89%A7V^W9V z>n^Qc$=g_K`v95x`yKO|b^E}AqyTf-zGP?HBgZI{yK$4E3noEL>8|BmOS9fvx!;r~ zFOd`;nQ<2lOF0*Mwh2+4Fxk6`~;s(#I z6rl>P>k95f@0YY!4ZR-6XJok1M&}|DSdRt7Ce694m5XeQMoZBkx4c+z*Ur3v%!(M6 zc(WhCrfHXFSOoHZZ{yLSm9TB)HfeQdM8|d(FiAI<9nP}3izn%c2H z!AAwjG4!9y^0jAXU@hU6y3A&Wv`ZARi%);kb=(PLoi2)Hv4PTNystGZgq=OLukyri znCnXUg<&UYZ5yAeB2$j(g$s`I=Ykcu zji2!`wQ*h(gv%4W$kMV>nx|LBKfCSPoW65;n>Uen$K-V>wW zCVpzIQ~a>;ExohLcd}-`kG9)r3d(K)|D<5!HhvwwLgEE_i3i6^jjX!Bl+vd$9YcRm zy;FVIhd@W5KbmO7TD)E_D{CbVEyG=LrLMyQbM_Vyal)*6um1-vh+Vb|U@9D?aT_KWtmp+RvB$C86rIT}lsq$qbByC@c&IAF12R z%wMxgHJNolGc$g0TSue0V(w~8UT{2wlTcDQopZi7JF3w>(Eo*)5C;eqn$PZ3!?^WW<>=lunuayFaZqru6cNh@z?>TgA_YQY57xRKJnNk`I z;Di7|8V`i+U?~D3uCE5G>}Kz>?&pwR7YKzT(m_o-+BuoCIUWCf5a*7zU8G7C5 zLjZDLB-V}OU-u0z!vgPhQ!q&6E8qDjrO6+VdXcR?y9qT2P}ffjDc9Lvz4jwxn6|_R zcP6tP67LMhmr*WUOhk-q-R4$YAftL2OKvI&sMCM4< zB73h`u$Kw0V{oLrCeWrwlo#y#^`+OZi%5-)lzT+!nD%UOP~$5mXq{NOv>3MS&>Fwc zVST8f17cPrCa_Xy_P|)eThaii$jY7(H6v4o@FE5vrDtYYMx4x&wL~JEptga+Mr;#k zMEn>qp&6HU39i-7)xJp0LKcZv=0i+qpCpxoG`Z%D5-R)qz=`e9l$QOSc|ayagovk+ z5S|B4(J%>#K9#+8qsyjwg54{p-P#I&6K;)OhjUdi>JuvVf5%pCn1zie|GvywBQZR* zk)u8gGeqcvQKWc1OTh$ajWh^F<-ZO-*`+(-vP->)H7r&qu;w{KEqH}Y_Pq&FFfJ9Y{6EhNDQXCd=tMI0HbZ34RG@p{%IVQ6@3y9Fg~J#Bf_ z)-ZVZ)bmp-dBX<7c4?tX@VW`l)$a6hJC~32{G)?HvTu)*$Bn7p^vfTMPHpc*Rljng ztFG2J6Ao@mBY$(9Y;RZXQ7nm68rF0psJn8_4$L*0XX6AjT#MHy5c=z_w~_B}5ba4; zXn7vl2C1dFToYTK4IVH4A~Q^$eQ0Fp^+MrR%_PS!sjiG6hF z_3JH^`-xqVZ_q=p9bBEXGQ*jHJNPssA^+1sN;=i3EbwVs7AhOo`dz(-g8l3}r~pbE)FtVe3ew%WTz?ULPP%OuT$P4(xbJiePP*%B=^KZ`MD0OIdoNnQ`J;d(X{ERBGloHMScl63*@+y zSxZg$wmeGIq-fyUk1vEB1MXp&yTvMV;;uU}UlD|YoF#rd0W^^n3-sP1HH&r3l8loC z$FGL^nHlSPHWATx$wzoYPe;aX5^pQyDKGl!YWqP#BkN4l8_#+#^pPz-45PPZj;MV0 zo)0DxYEPij({9qxAC;E~tD2Tw8V|iDD9~N?4xdyB3JS^&_bHkhYa;>Rb)!LFXrRdv zjvD#I8~Pta+X0OJJ=Nl7J@yf}J+=Y85i*be(cRP*tBoX`MQ2srAR|Lr-NwmWx7P46IJZ1!J+<)M=djGPW9Rgk>0UYP z25TqbvDBjtHDB209wPdSb#CcNRhw@8ackg!#1=gTkhjJCogdW&<1f$Skj*P=NQq^w z#h%my>GRLY`(CE}E<3@=B>UKJwhlVtYKp5OMVH<%nMI3kx~SIE$ZHX<@B_nHM+S_^ z=qej_$PJw2{mo8eo#*uz{uJWYIxA)f=Rp~Os9_E<8H$JxA$CxbF-_hyu6V>%F4I9_5VtrAkfzNxH?x1dhob#Ig zd!>aFO8?C{CT78}54ANSBz}Nha6%f&dZRX8{R8$u)ljanv)c?zYqs~+hnAZe^HDl0 zLP(z+Bdyly1cx* zh`M<-z{9}uE#;$;|7-HC+%(rnZsN$YhZQ&1*xha_-AEv))&0X@+U}@(z)T8sleFIE z?&%0HNbK-`hFt1@h*VGkXm~rT4H?y;{}?8j!T<=l>5gb8uwyZbczVXAC@El~DvtI^h`Apr8mJ)f>P(P$kljRG|r zR;Ej1B^37!1J?y+?5~w~1)j@?K%~F>h~iukOLSep$EoCANZPrm?O4Ek2)q-MX87S@ zjH&iJ>AV5h1EqV?O!CS5?W; zrevNv0ym93_X&D^n;ojMd_CAOWK(WHq885MROO5MUcO)J8eWqG{NDo;Y z&)q537>i~*0$FW7b2Zpdr>w>R*AvwM;86{M+s)&ox6ulS=Gwg<%e(3IX~Q&(*I5-s zMCa{32Oev-2!(#+|G7bLgut0HfOv9rt%+d~@Q& z`_8j_VfkA(nJzc;%QE(ZcRwu+mD#c7;}_YY_%4M`~Jl{LtOw(7VxZ8SIWkck7rNBnvC*U&yh)(E2k2&t|w8 z`0DQp#BI;oQ4wf$ef6T%&pv!0HC`xe;BEh~@gIB##{WJ;(oTHrZ0`$ROc5l-BAqj0 zcp=A2v4)US{+1Ck?D7hxo{|zaj7g!Jl|s9lQ0+C)K6m-)#lfJz$tS2@P`OKADs zOcfMR;6Q*2e01TgLAYTH93R?2y;RZEsyrsNHg@D~a<0Ss_w9!-NuicDb4JByZvVj!buuUiI5n!TSlCLa-*xApDWTAU2~Frl2u z%(cS0b~dKzG^LEWX|Kv?y1$YaWxfWudDZDus9q9x-uu?n{UsvC{J#0`pc!d06+NQj z^x4vkRovlYtgmAIyr!uo2~ zILXz6yJ~BqMRC~pIK6HD?`2;~2Q@xLez|TzE^OaFyCgDuNRgFB__aXKKHJ76=4Lq@ zBPrey@SI2NWFP;kI%Kjt6(xE>kH^0+oA^SuCBh!@Ts$|2nq9KK;V9E2iUv6Wl`F@j zW2p>cQ38?P6fV8BgQ?H&f`Ds`7r`Tx}P~Ge&clT z!QcYP`3leW%LI-PVL=OJunTHr@r(Yn($K3qczbP)_2mR7i?A$j?k2d6!Il-;sx)Td zF6Ftbv|)8_OUrij_yH)Q$CIj9HH7{$La2cp;r4t8c_rwc!*rTIZ;Ae3u+$WMhdFXG z=Pj~$2YsunDUsFRpZ3AmslI#0d48$s^&-JBg$^xi2@fs%M9>Pn^9D9rzBjM1Or*xC zdxSpCb1bIurG=J|;0+L|zrwJq+P$AFHH#{FVLZ0_qs6TzHK5w)43_HjJ?6AqnF1;21FE41ojyCT`xXy=5$FnR4 zV3G;mFj&D$zW4gz5a$rrlNLZ}$i8{AY>xd5`BPXZ$zCJJ`YRrM$(MZNa+OX~TIrn& z1iheBYsD>U_XeI{ZQr#u@tX19Bj#@Jf6NI<%`Kmt@Kqg_pTMFr-mSL+xT(lzILL*X zFCjwjK2_51x-s6vXgf|>$ScEV$a4$6hE$G*zBCM@wGq@#Y!%h)kT@Y&oQM4C4MLKl za(IO1ku~cqM%Mm5@bAcM>M>~+Um@)BRfsOKXE6lLQgBRl49{Wu&=3Y@mneHsD1?p` z?e6Q?dSC$ID6cFzGc|#NuZUrYTWNI}F#lrivr#f(JA}#c?;8pZ@ysPvog*5Y9fPxB zPOf*px;RCkr{8oyf~7hBG^J)uXVq#iUDd{X!I1_wq`lme9@#TxN3RHM%s1yH%=6tV z&c&rJ9C{~Nv;`b7u>KZ#k!9+2D z%|NX<_JYUE1*Ym&rgIj}SIJAKnC4T~Bc#<`t3@a}a1D>_aiFK$Z0vz8>aD;ot5m+i zHz>gt>N;>lYkQJL_CRJ3_r-a-O6=MNY=bM8uXz0#dd#44!wu^BIv~X^tp!?f{Yu!t zG}NFZ4Bck^dV&sksaE~MA>{BIA=^HWy#{(bM zuk1|9>htnS*1}KGzO*O9$5YnkYJ?9N)~PlQnOltQj4I0sjIqc*4cCujC;M%!EXh=)sy6-E&1w9DFBj&$;3wY>3FvpMolO}yhZ=x+pf<2Uw?<0ksHOeFjVuO%{<_y{ zikBnXwQ;)|Gl79LYmA<)KS$)V)b)KtDx?`P_KHYxZCV0|{dd44=c&oJsF9;ihruL)jJ2ZDlnI;ge z^{Yi8=HY|9Aln+S-PL+ z@4!q8|4b>7ajP84#zn~WI7$&5ug^X@{aWe|=Y8{0b%#o!YtlaXxcHXg+co!hnAg20 zc7FdCQobW#h+XkpH`eqo$oIJ_jY$Lvp!;1ekGM+}{_=wT3#lv;7XlSMzmzs{km9pC z6LN45wL#ydKHL6_AmMi7j;<5pl8Ykt@%X>EBWOLdM#ahoX6atGH};cU?39oVe<3mz z?mFnSHib4dD>A~otTDV^N=ln2^?r&Y2lfiS)fs4^hgRQH*;Zft8B`@`-WM}HDQI8m zALbjDwPG%fd?&(PUBJ4y-d2%t)gg{#L$~(=G35+R3(5T?AJZrnf2GMDE^j&^;t<@W z^8iz@kt01bQx!79GCsUv1Er*&?()xah)mZzkd{{F%~wvB@9n+;ei0I!@-P^_p`tq2Yc+bD z$EEAsC-YvQ%;m>{FUD<5ze2_72BFU$sJy#>)g>RWybnPjPwv<#7@)8u3h*Eo_#e9O zbCo8@9-3>tu*rH~_fJ*=GdIWur^tds&A|Q-4m@{l>>y_cps78ydUJ>0RW z`h%CwhlZq}x$jVqDS(6c5|kb0aEuT%Y^n*3g6>10doo7BkoX20YibN>Oj2!lh$iyp(Im*jiqRj~W9!BcOWa^K^!x$QQa)QauH4)rR980Z7MgJ2Dqn>!gw|ApjA z;|w_Q3#45~PHII(#Vg@IoRbv4-=D;4by*1Wq%VBvQ7Ryrr6#(z1%m7Hlz-iC>o@fDZnoKmO7FsN1*O?_iL1)axsoj%j{`aGKAy|ZWI5pe_KZ!I(I}#XY1ic%;>A^VV`&dLFI_j z^=n7Ci$*|nir;)}TP8DlAX6KOJVUtNftE-JYgmLlw_+X1DL=OyFD(xVp@#>y68(ah zG#F&^2p*h7Pzc*{TeL{~+|HNRQc<@rrW|{0Lv=-uKJnqkt*zt=lCfcWh=Z&}V&>uk zMq4X(n)YF5=Hbm#WAw8%WkKD7S;M9qrE_KIo}T4KqL|J;rac^b+Ki*L3BGR~mD|57 z_c(W^Iqd=me@LA{rsE#r4!^IM{!Q*( zx=i`_)qV%`PZv+r??JIB>84?}D7&*HZ1oAuI)mvYfg$8{di4J1E#I0%j7#P2hcDMf zwdf_j)-)c!^1Ok?5v&Nwd`u(be-lNuy#ZRG4Q;N%k4_uU4GK1 zu{##4?%zAtVbK+P(T$w;aFxBepfAy)Y-IMv+u(hA=o+>x5f^ZevGbZV7xnBIY4Pw| zRGt1)ToOeB+jaG_o2+)%E4bR^c}D1W447*E6|S|zBj3;Fd0VoE-z%YLpbLLmNzkJ4 zbZ+R)Bx|N@hP1Ve@hmn^XDSsehOh9QAf7e$jrtR-uQ;hML?{J ztp9;k$h$2bwT>Y+Y{ClNjr#J^-9EIoIKMnoY9U6Rm(&8w^J2IVoO9V=T?g#gWTh+s zogM6lA*2oZk+BPJ(l+kb)Wyn#%4&X8axL~8{1AT!xchhn|GxvDuKFX<0^fezZ|o{35}>{K_L*Uf;dr#whPSRO_|pj1V@4c zF2KM1t|7etSZn`>CI(1LBT6|s1Z7uzopYiv%S5N|~2x;DXm& zfu4W=2QQ5xS_V5~fcKT_DNv|HL(A@;T&msM4KX!n9_>RIwXylIOqtL;U}Z1d#!BUf z-lN|z1+s)ib8Q7d;W#$+Ue0$@;Qn}m_uI|CB`0{^-~SK=v|Wn1z#?{Ad+b5{vtj(= zx6z+3#?QxX>)C5nAIEO89l2Dgvm_)R0py$$e!)D;ksUoR8DUCh?8rESSCj0hSTLHa zSCWmmNl#UH5KpZ$F6rCd+w18=KdR=WqG;}_#xyz-C8-rZ*{ek52%4{&4AgP|#@AQ4 zz-dW?=AQFLTT67UYp2L9VOLF2HqC~`?##16{RjuvW$V&?s;4~&`afjOnE0%DvL)V( zl9fIj#ma&FJz`dbqym`yk1Q_xay)WmnRk@Y)6`%g*;g=e zntLe9gnt8hfbsax{131s*!vCitsV4O1TY%EknQ~v^tu!+pJ5Ybs@QRckQ4*K#A2A% z*Y7g_Z02fMY_VaK6ry+mLZAZtx*rVEI9VXavJM-d0lg_|lnpMIC;T$ME#T}y{s`}b zsHs-rkBcRqh}Mq^ZR54-=1gXJsMbCg>)nPv$=~8x;eKh=tJtZB_(hpSrX4DN3C+Lw zF|ZNtVExB41A&^Ac;=LUR6m1#@A1TZ^1b&P^GWVSjQNnK-DhHC<1EGv4*4~+adUF} zr#JA&p-?x`LpRA>U=dXo(9`lY$C%vGcf37kXZby`8ymkvxwhQE$%7%js{UBWt5wau z)Yeqt!;wY9!swXPp8ae4{7g~Ctu{-holSl7o6IU0_?@Ow@{ef~XrK6x?u*K-GZvXAKJwAQ?_AlgAy-!?k(;)A+gIC`t6w#f;A2T~|gVT=lkXqz<#m+Z53!!8K;eb2qjIZ>}o|?#XBTiYw+G@t-XO+p?<#5gQ1zEhv%4*HSd5d zG0v2__yUewJv9vwACz0#IA}qA-awph(b(N1gA-Cz zTu_YiWrjrOfK=FY?9t1;Jks$u%+^aO1b?S*CcvGqfrTL>%-@JCeYbcobM&qWe27be z_F7~>CUpVR-&%9I{4RRwwi@~l=Hk3L=DVpGn>JTk^}vzV2$93bvtF@%?ETPX_=oqf zo$ofGS&LaJR!8{STyg?oVeMyAk(`Fm42>)OBDJ#(yDDTLsAnyl5L(Ryg)k z7?e6eg@D84kaarT#14v~AQTXC4ik^ts4Z5E7&&HKO5eAk%i<>;+M)kUN0Ix!_AFe$ zGgqzl=7aX65PDM_C)2%U1K*83magk zMQ8@;&&pgWayW$VjR06AH@wqEoe{vF9KcKcY!uhBX)4hsIGCInz*N9-p_$`B@J;b6#+N;F z+0d3Sc+6S5pISbvosz!1HU@xh;s-YEE$xXB-O9sIA(?E>X2UHz1vd#L_tqm#RIa)k zX1`c#ziaHERf$W1@y}-yz48+BH2iwShd|xGhvo*-4@G*0wxiGG zcjQ;h8MkaV!aKC5aJlBXoA8JEV^QMdL(!6LJb)Sf@XSFPVwjn6dPI#RLwSs~@?;5&NZEv^=4>{QSKMKW=(o$k~6 zhMBDK$)DEq>bcndreT}=daa;u2_I>_vF_n-HeggToZsgzo3Texj2el!K=xn+kYKKr zp_h|xEpvSD8WV1v&)i=_JH9yYrOEabr0u+^psBK_yr6t{c==U2#l}8qly!XU(3HZkt3CP=v3M1sqf=kacJvyE<$f&$Z@z0WPAOxi(Fdj2BQ42t z3eUWNeP?+$anl~_ZGlngW*vjdqy&8zr%;V*bir$;pDVqLmfVqg0Vczf4SmgapS#B0 z#KaW&7I`-&ZQj)S^R}02`I$*AB2Ay2ykZVVxtYOsmr(E33kRQG*IDp0`Yj`@8@u~l za_@#t68D(Tb;Cp4517o+62GB~+miB&SUDn1ni?g+2`vr^z|eSV%s5>9 z)(dDBLE$wX>gZ=`NeJZwyElmaJNxn4rM`s_74#$n_|yCn+w0bf?;V*BV&fxnQYvVDbCi=hJ^&A%Z;^Vq(P7Na3=Vq{Vlh<^uF}}>7{pT(Ep{{hb9)zT{E9G*G8hy@_;=}-i$he83N_ZKx4yjNhQ6%^j}%=dC9r(e^P z|2~88{r`{9AY!Z3S6qOdj*;6ALU=G1*p~T#?Q_A;WKTrTaE+ybTs5G>Rhzj0OQGxUrDHlZPUoP|o5 zJ>e>-v30q3ce7VADzI{_EMadqdh&*LeHXjE9bM7vgETVZ7_-}7x4rX2-{tH_M-}-K zS2h+BcZpSI<3{Rt2u;*yceLq;-D>|pmh&FyJF+Hn||#T zMt@|jUyiAX@CEg+j8{xir`|9IYN!_Tbo2)E2oV(;23P1giIREkPj;cx=o+vI2;Gll zUPico;8n3p6Q|q-?Ubad-T5{xsCA}Ai+_PS$epNeeGeSosP(O`xjf)6;AYPX_3cYH`84=Dv~-=p?7LuNHzjABXp1#iL@Gd!g+s zFWnDl3BRgyv~xu9JAwKu1_KCm^r$?6085gfBn2vUKq!_c@bX>e(f|3liVfyb5E~O3 z@~uM}0l71Z5s_i38NpMY%L$6wAE>Pr{Ft3z+gO7yb!xPBUoC_)Yf)|`>ll3r>4s;X z;7-uSG>;?TR*8IfV?A=xD)yBW)hXGNCaJ0iu;bop%Umh=<_gS=G5#7{?x|hOOxd0B zx@7UP2XV*{pE=Q`_Pn{nUy0elzOhShZ+Z|rMCusK9_cw@>5RrH3FJQ|fFc`kR2pDa zykUrbeHl8T@)Q$vDt*_=i&YxJ126X*#W(`!J7d@xoseb2t%8Y$iONOZ!W}u?G=sy= z1=1-Eb7t}@0$eNn>P$=9%|iS9kK$_K&4&!HRjv2~u)K$~Rlpv40ul4oFeWaHJa}6m zY`chM{;7g&Yg2>G>AS#$kM%E~jA#=LG55&=6Of z!qBY=8#+(fx7`D3+5x41^#u&KGWWJ3&>bunH#}%LRHE`1<9XF=eA;+REHmni;f;1{ z{lWQR`x~TQltb1s-1bu3miQZRpnh1?hLqqP2m)1sjB5z{tjQnh+{s@JRP4oohQdPIgO=#Oe z*;r=ni%*)I(GG=3J5sa9%h~fz+0kp~YEIP~v)76%Sk!R;&(- zNu<9@Kv_^3KI6PqHT$5|1-2@ZKY~R|b*$zFD1H`c;edNEINL1a}XNC!k)Zu71ko;c`972lHJ62GE^Pt6v6Ms$zw5&*h| z*xeSeYeO@ohLxJ~Agu%rvk{nIGF2eI{X6zr$OsC(-0P(&d5>N)mUVLcPUIKn zx9EgJ?n)GHY8j!hT$}<&{Tu$h5c4G*Omv<1*-jqagWNiukY#kWuwE`LfS;gsfFi$xT^ z^3S>AHLLcSeU_^JE1S` zkgA{~Mfd!*?6$%!_C0hPb4{u|J~4A;*MrEUO;@NErPGDflF<4(ce>W@$gEu$f6#i}{QQz=wC)A zRRxH`1yV4Tdjk0MfT5w?fSyDs$nfDID)rUB&K0}@l@AERhxGY#a*um|aEi-Se(?Bx zZ|Py^^@eqQmgq?&wmwNPmE(Q-ce-W?eseBKXoxx|_+&j^cn+_T7BCJ6`j(7)9Ag_BJGmUjo@cExYKG(kWHru--UgMQqKbwgK#i^eoG3t&VTZg4`_?#$=@}cmRq~FOZ}owU5T@*K zW9*B)tZE-PrLaI2LUUxq8z%~$!AHvx@c4QFf5gjf9Jt_DOvu}Q&$Dpj*>_O}+BFw^ z9MVGj&_Wa16QPBDa83|AYD58$jBOx}=8|fBELd`o`X*S0Dg2!E#ryu#ilc4MyAc2V z^gKzSIoTnXWI6BaP}go{f)qDPm}MN$QJ?|j`YBpHm!o7c%2e@yLTJ-OO?tu{2R&LI zK`p`K*-1MG-Q++Euo@vWZlfV4AoH*clQJKKal;`G^i~w(r7j?XfkaIn&-fQZ=&D=g z8q-zs7jG22l3+*J8(kg?o1$skCq3El@AzxsOm!IL!;aOV9dU5TG+6U5Q{WQaud&<0 zR?mP0#w5CzD$`7)O z@Q_@yFlQSOPRjm7etEoR{ZcM*lG?YkJcVwIx6$m=Z=5N>tu3xw6>C!YshltBr7}4+7Q-AE*^j!NAH1qf zENpf+YD|w%sQz4!o|3J5Q-lof{Wh$m>zk8Bw{!F!a&_8#WB{$;)~h~GdIDXah_OD? z`|<{pITtv)!Nx-IDQd8D^I>RfQqQ~PDoy@6t?|np)1I)C=t#BAiRb^oH7RY`!+Oz? z;vJ6fS-9>3;&i9!KF1LIdsEZwYwiB3yObqAuX)TA!@1elQneD?%AKwqj0215Q#bA} zYC_k*<2I`ggi|=(3BJg)E0ERW*L44?+V=JpbFzB8uczhXPn6s7g7~EVs_;E4{!Z6@ z@C2cK)SMz--V0Ij-Sk}e1-Jcq^Vx2dPnz}2OwAhd>7B8ND%Se`=0UE#S`NAVi9bxL zf6z6qjB?DXyr9m&>Qft@r73+9h(@$L5zMN=-W#SaHAWEkoY`>Aqcu(F@sF^4I`s%Kecub8#F$&^{E(G zLLd0eHzLiS`bOL5EZM{xR_RoCqfmCt>$PJmwU27TeVrZj^j^hhShEq7BF-*a)y)$Nk@`?^omi(c@R|=YN|(x&>@-WUNLHp94_6@?v&a z>UNtG^B#X>R)8G#(*E^#t+p<6C&oNs-RyN&&+gI$61ZJ`R%1(o^qcF{1}rSA z^b4@1?=ooviXe|RB&OC5ioinOMSqW3lu>#0f>(M;Ml^;W#F@rb zQoS_1aTwx)%alNO_uyz4a^|@j9Cl-4ea?{3tv`!K)3`lT2Dr7^w+9>7SyeNWOcjsT zS5y;A$}u%JOu@{h28>8p5a8Z|cdz@*0W~kqj@Qr)d#Kv-9=x53ph`hz5wIXxjHAME z;Kc_1*lPi6H_O=C{|PELjsfpbe3^zHemn31=-OY;-^YS3silE1Cku6%hR{iSKsi|1 zL1rrUT|wj5!6_H4vf2_EeJW>~o>q?+ls(U~TGVJbMg;EN&tv8Tpvo_2*t&bxB z6Rd-D>|AMqAp{B9bV~nuhd`j$0+pfscvQ*@o>CnLV5punF);Ch{w|(w#?93jW*e?) zx;niqup$?>g>Pd3HvUdb{ga<)3;&8l3cG^S;nvy_qWAO@>P@1hn>U8jK z;y^Hqk1{C_{a=8xxm z!c>f~vgP^Qng29iSek4I4tam24<@|Ngj^5yNAyt>VBz3$&_wg-OYR>1?AAOg9YU109Bglsn} z5IIz2ture_$AV39c8COMyX1TFFp!3U%mc7Y@S1euE+HqAIU3w0rrYvbr_w?--AF0D7o7Z_Iizx=(7y@J1mOOeEDd5_ zIPxjzHeVS{yh*!BRRjpBlUiFrehktRd5ZFXJ^rtN11{(PHl+Uh48*G-x0e50@<{{f zdv7T)+`?Bps5&r&^(M&FMEgVf&*PouL%~KpzpIGDuVuYAOvEXc-qu1DKyg+MKTc^MTyGwA%g`Yb%c1HQA0s1G1Z^mGpunocNtp zz4nq{1nT`Zzln4Am(Y^)%fh$9^if+D=Ffw^*k)q%`i^Z6P6xDG=Y%z~lo?p%*sC=M z5<_j>9L&eFy;W<};HsyLM~jf}T<3TwF2jr2V@NqliJ4(L3bkVp4d{fbN2p}C*7|}` zZ$)`#R9f=<+FH5#VQ&IK+T|I1SGVB@z+>xk5 zd}RrIpxZZy{YzYseJ)7OX#-O%2ulnMKMP8P`AAYHI3SZuY|>Sw_>8o(gQ znO$N*KgAEv8{Qv2X(^?gq4v3497BGuRO~7oM81rax*7hVn;%-A?{K9;rav56Hv7s+ zo2Cvz!6{wG!2TL-Rb}z|W@)*DCp`WOHr5`f!P{3Qxp=<2p4r;8H*bD{ zWc0$jeijzK+o}$PQh7K955oVsN?D5X#s*}>F${`KRY7+^+)W9=1iHgCPU~r^58dHa zhkp>=9`|NqNcLW(Qi7Kd4V)QB`%n-UDjVb}Dacvnpbzl*HrIFsPpB0_Ta`f}u_L=KII+OP(Ggq^QG|4}bhOM*O4ttxw7u`#b z^jAPvOy7qQQfaA)J}RIv0G2C5|I-0ENXbYevTcGKlC_YO)9C5`$^qbRi_w>Sg!P

      NETzC=!8#NH^CyE6}Ect11S0fLV}5&YV}9`(03 z`vr&B8ZGQ3QM7bQK(!d*Ax*4_yXt2&LHN;DBnAo?+R#SL%~)qA=L%0+K__=+d^#Y2 zF5lGVDMgTVSJciiAx|g(bBD}jp zbChk5(K%1o?RrGsQtj>N=dt4$d$0BLD zUh^91u0oDHw9BtEQ1&ULKKEMf)NJs`tW}|py6e&P9Q>$E zK>$7AQ2fHKn;>WkR*UMQmZ&L!GZ!sTfDVF>l8r|!W2+3WB?ql)}Y|fqBaz* zSO`cJ(IP4eTH89bSM9Ce+WVY@;LzLqJl_vEhj4OoviDl=de^(x-W$enn{VGcugpC; zXcQe6gy3u(#{{%@PhVdkN~m`Pv8tSw0DT0?K2@jtX>;Kc*f}q1Jdhlpf(*4e6POW(-K zt(K=}M_Q?s1vzK6p^sXpOx`oe^Q3c2M@DApF2T>YWLpJ4_*Ra7+8F4%h0-NIl<2*M zR##N|RO73;Z2-l;Y+1{{X=PM+_7I5}JtRCeNP0c|JTA}8YKIkN#)OW#I4NZ{bJyI% zX|1iN=Fb_wyz)To=H>|Yf^V=*CkJ-#MT{L$qj`hx&#R+R9R-q`A@@UB2H51nLLn-N z{Ln)B(Put2F1A@m5+h>*f18{hOkDvhs)#FUOV)NUlm(fC*uk=ZGV5kM?vKd7(dj|X zBX5pQpa1}pHdu}&XE`sfC{3%-67$wuyQ7-CQ!-+7^Xw;3_=S!!S;Z! zO`FStA_VACr0)zj&ZOa@L7qmMV7L{GGDtUq1wq9e1(h!lUqOzV1%CyINhDZ=P$mdA zKUJwTC2S^(>nP~nr`%tv=0BZits0VeIDvVl=kd79a~5<28~)Ig*|2J>-pwtMh1|AX z)0xp^nZjwP44BRUmhfKr6n9Z_J0%G==!R`73uJ!qnI<4M0x~`h_$b`%H`m0phKilj zC0|hLWS97BPmGH`8M&TKJ)FjG7L7_+o9sDqrk96%5C1!{E^_>Z1L3EwV4oh_^i*f- z=}he*=2ccpt8k(8IaDxkMwG{FC|}ZKcsS*JtZ4R}*3CIChSnip{PLqnG~o$k+5Zl_YS?#ePM zM7gs30o7Dy)@eP56UFVlXnVUwp4q1Iv>Q!viJNhWbKU;dI~PBzsG|*SE&80>SkLR- zJfQXRz_^JD#W9uQ^fzZ=U!XaJJwn6N{o3*ppWBKUhZiJYo?fZ(y(Y_rby;(}_S`!Z zy`!|n^E#{QxTvw^!E(>(vL~I1A9zXDctvQwjk5QC@X~Pm-u^M7=Z2rM)*d?R9S0*F zGf&Wxw?>belu&4W;L}~d(e@<%Mx$D{L>xKoxhl9x@aeY(>G((2422g~ao3hU*|YL_ zlf|b=5qF$sqGQ%H_&VM$FZvp&Hz z)1Lz?QH>n?^^KBKhJ-?z)y8zXBqicl=9T3PZ4&T?FW^1{ydteBAh3QA&RJEoI4)MW zCcHhjcHs=&$*NyBPYy~bxjpgphYzkEpTvBqxfObxS_h}NMan$-(FujDWQUO;l@1@J8QJJa*MxI!Khfm>=DIaM zVvOf)e$t&2Etg}1Y}c<8w;VfAJ*<^qlOb54V~9ZEvQl;D9Alo@R%UfZrT6|hLuAoK z32v!x=BdhT@_KkI`6el>^cwlDl~3(-X1+J5lu_rlk1&)xRjUqMtIf+>x1GbL4McI= z6cm9v1EfL1ZuD0oAQl)SwTd*)rEbZ(=L`$?;oa(bhBAM{O~hodBnTh{Fs+72hGwp! zd!woyF=pLpx6eN50xcCrEEtvt*+noNW)9ORIusk^KxWzl5o{oSpaV>y^2W1zxs(uJ z1UpEug)e5g-WwT5m78mhCUIxV15(CDXwbs6;hi z87;Y)9jbW0e*f-~RpaWPzhC#X>cweE-E$#tq-xJM|F%)n+jp`$yNmSKc-Q8=69M?K zpxj}iA&&;@d7$0rfWHvB2H~%W68YzCY@Crtqg>#wZ|CVKrF?yGGUHmvkVU1!9*gGq zf23}r{n({_LOat|6DmS`_`}#AQT5*La&(KYP`i8jsW?}oIw$?MT~~BgoMH*Ct7lumEPNGYC65wdWLMp zEly4IlK5QNcIex8M|m`;Iy{58aB`j3=r50UZ*7ln$>*@B(@(7{9UHYWtXwwJE2ZOU zrDs%o%AL>%Zq<7mc0B!bX{@8g(cSFfiLud4tEEjN!b4rgObUT5jJY3eJ@EC^* zhw39n9MU^;MZPyq*zfa=eDuQ$CBT5w9qsSzF`^9H&u9lWoSP-}VO)z+Yb>F501Ecl zUaVvGR_^C3Q|AP#nB&{BZ^%0YUFExTdVS7ASx*Xwd)xm8nDvUBP3Ox$)r{0Ul3d?e zykqhRG-;nvHu|eBNNp`_Ve_Z~$nFV0F+V~!iH(u8e6r;Uc8h2pVK*cLBAtYA5W;rB zev#yZJ%*^ZhGq3|7R`joptyz{1!d?kqya)D(BLpeqc`v$HmpNVNywwYC>|vtw-C0> zB0mm25lJx%a7`uIY4~Y|9Ln7OT$mv|rG-ryV`6SZ2v6^whgCuJLCgknNAJdpq`PH49)a&QF)H!aEV{AT(Lwg(Tyc_=h z_g35fNPKXOcXaN>_S9vkK5Q?YcIwf*JF&kHi|BT~R_yB1Q!x6%%(df+{9jUr>De{E zv%WBNPuz2%At(QP8&+|}(TMZq#oOC`UsO)!0hvtN)_BA>B241R!SRSWNYBa@Q#qPP zVzXV!5%)9)LbG#B95EL1BcNi_goQ-J;Ilojv|$8GD&aVIs89!=j(b zP!SLhsIO5BgKdss5DX?Df~ZAiGhCE`qX3FuG5uwXPsmP~$mh9a!0L}1X!%x1rWr`@ zB_INP62;H7h?*9$Hbv9+O-bd|ii#>B$oCxffYx={0X|c>;mthX7hAQ-8h%XlA2QJn z*a4|XrFUBzQ3IMpmh)QcLK@Yt@-B)spKQUD+NEHL#)-fav(N#t>d)?>E2lAFSeb%iq7T(J2mQ?`1~cRUG#0 z(H~rZV$&5eHbDY_b2|t}fAfel6ry@rw?#%-IV>w?g}k)1Wo2&a$y&)+D3iFiKVgFU z%Esz7!&23bQ_=L9-71ik zD8Uy*A9yiKvgtfZM72%g2RSh?8-cns)v(IJibkDU#GH0PCZk{pYO;*7=2qA(o>DrB z4l`WL%AjD)PuO2JQ&DZ}di8-PqxHzQcjq5{@6!8&WxZ2cGP;gl51*n|Y!3f@*^X+T zeBILA+P&<+&369B+%Q9fH9!e-jbI1H9H}cCU`f{{@V0+1x9P)i`$lOV|&}t?OF^(_LqiEI=A^(Cygo#Pk2-b;oACln_0T4z5 zkXxDj22Jq9uM|UMI>m=m^Q_NlcwAf~#N` zC6iw{PAF}eYn_v54`I+NZ?$@Wv#g+!S5P%WB^OSXtj%cYW>;-ptxaZKlnG-~TjnK% zwANZHl|>^K!)W|@7%ih-(R2zP!f2Xb65W%_z~V9t2>ZbX1sfr=MT#3ZS01f5vI%fU z;nxGnr1xi#+(Ki(4)mzLTH-{O1^WkwSI5QripEu#xM#$ zGwQ(TlT)}2ppyE_W;>0uzMWpREcUr9{P#yeZMzD?BFnE-vI8m?mDBR~6#>5KB2Q8i92BBU(4gp@7Kei zJLi-k8I^#%$foxudD~Hf(;0z3g;3H_g&s{;tO6$;mp3jqSL(WJE=^vnr+B%q;;sU0 zT*UK$o(7Q<%S#m8<>tomrgM}Mn9Lf|K?@f}6wJ-&vt)1$WHN;DnKh$MZZcJL0+-?2 z5RI$DZAy@2(4lnj$^D2ovhzN&tV16W%ECBEm?klD*3VW=Y7Krg!ivF~`q+D45BD>C z>_kWuLCP-N!~?^ulNtusC_mK~t_J*9eJ2zb$ab<$X3q~yRE`akoZRFkkQAu)oKdV! zOrq6?I|{$Fn;oG5WpafC7I&=lVXn1}l));+!U~X+qV)83U_~iVr^hhnUrc7J9OYYN zghSEmzx{05Lf2GWNk6dvBNm&db(S1T-TCr|Zdlo?F+InN2Ho!C8YZcQ-k%9W6hK{3 znQ1rB)jygQu=s`LSSapHgOm{d$%adcAq=@Hm@sgN-i6NnU=D~Sy>QB8{jLJ-r_7)E zmt=RA|FikTfug0~4(QIAb-K}|!NqAIU9&0U_|18b=6{tKvM>;S)fotofkYASltLhSXll8QzMNy|Vgl|?$k zE}9p(pE<(scmoK?#_*!Uo!H+yO=OR7K0uY$$VBeeHitb8Vu@yq{1)#(*yk`V2Gp8y z=X|8%*UXL(fLW$HWL7{DS|A@%8f}>p=DApz zt=zw>@WEAk<~1Id;>t{v2g@secOS^JN)ZZ%+PPFDD~^4^im9U-c5R{6e(cfmW$j29 z|8cNRNRWv{V1ochY$VI_Wvkzkw@0U&oru{?(s7Y1$ zalrjyYz<7P(Mbsd&dp|6`6iIUoWY2^q$u1UcG+GGC`Ivvy*_LyJ7k3xS+*f;c0?xZ z_`&7~7xObC2Z}2XGkq8qo}TfxaB%kg2sQ?x$nd4;ATAe|4XmhKBlDNRpCw{dJHsN# zlLrkX9j^wVE{WowTN;O4TCIov1W)b5WD=j9%yL@1y@n|$X8}f=`WcN__pwrhw&oa2 z1`{$4pe2Cc)F9|Jq=}m_8v9j|nbOt|LgOL%Fg^&VXv?UJ5sgc?E_8+sWSu#&sZpngAP( ztbxaYd_cxe2Rk1Bxb>G+x|*g7T-XD(w*GTikHiNiT{n+BcQR)Ckd@bFitg7YW)vL%-A=bTqb7`ldE&bR*pnT>c z*@gfZB)YzWs74vLzr8~#1_}`UuFgexj>|lNc#&2Plmw42p?Rg`H~p)lyha6UjJ+PJ z%+c@-=g?6j3n!lTFHo$35|EkW3d)t0k=4Rl4_YH|nyPMg<-S(IGJfRA?NJ&-3l$(9 zFkrnTF(|&tvIm0FF6h?bVAFJFjg}uWL{EDTi{B`BLw$=K*0dlA$F`{1wMU1PTWPr< zDX`wNo}pj&|^u z6B3H`0gp}&3S%bm2(snkp%b=0JT}K!6prkWtKF+ACtZJ(?flfQa;_HyUs7za1G9gvT>l7}_;c z=-Z~+ie&PX7;OhKUoXFO0BvzI$G24g!pxR~LB%&pfiGQ!xk~ReD53hq#vlMUr@+=% zP*+GR(!$=;%H1Z+o*e;&0@Rl+_4Xlpu48A+XkK`L!>LuRy_=rrCYPsF+}qC@6LoXr zjiYyVE_rHwsO7!c*#|=%+%IOGdm0sDWtwnxUpBR`X%kUsh-N zY2djGrJ_;B+jer`MPEoe1aYuIxcm@=TIN9D&UER>oNE7E=_r<54u*!Up0qwMgr#q@ zp9laWBaYx{fhd*`qe^BEguI%p+#m+1h>&9}G?RhfMK}%5ANdew0 zzk#gUMBb3aq8S%pc(4aD@iLez;J;u+fpiFeBbv^j2AUBi?pO4RAli6KF4-aIvrVd@ z@t?*c0a>U%rO$21FNeOV*)95=<41Zu3J@a=Xy6?+BWg5)i~_&A6eM^!y+$} zxRV`3La#Tfo+=Hj5&-s!)$Nj?*pa(Y0hIw(JjUGPcC%tR4dRn#g;Y#^B1l#@lnv0- zbnT4@0g1c;mieuCNDjg>s9CaGLp<}&_I~2@JZ=-MVfU2ro-)I-xEJT93|`25p^^=8 z9o3{|i(X7EU!U)^A+%}52G>CEFId82r}e*#D~XgU3!OS+dMu}U)%{28l5pc2$L}q) z;ZC*5R-y&XLH3^|a_0w)pZjd{1o7UF*H7{Apk*75odsn!WH3SL;3PaMsJTv)nz-=D zOy*E)R#1k9i%5pEi3d>5cA7?ngIA zlz2YZBv+H<}b~1@ni5i(p*G-16~LAbUn$ zX1oV_i}ACp&`r(n@uqWJK!TbIH4%KH{!PO$Yd2dAci_E}(D7>j0_oV(r(y2Zaxcp6 zV`FOscc-{IBz{!Qe>y=~oELGjZGW+|=T@j&r|BLFBoC}}H?n=VE8vkwvC4pKzw`2P z18LQL9Yc&#z}GC3hP}h8f%KUL!;&-|Izt;7im7mh@&_D=1({HQ_73+Gkv&CEr#~4V zyXYyyGoa$$fz|_ywMkoi2mH$6v0{!ztZj`qENhlAh5|aGFh77MOyhi?VI2U&<9);o z74=}H3Uku8;VAh!8r-l#TTk@?t``JUZ{}*q4}`xk>QIrDo{lshy(5bv7amjiD3*KlRWpcsZV}(f+Nb8Pe z>!=P6^mhNI0UyVdh_8hVYo1pJ#1{&0h?*rtX893%B4|TkCn-=D<^-r)3AgIQDWeCW zqm>QqLl*FFqx@y03YZC?Bov8bi1=6&U`;?Cwo@Dpu-OArN|1pPF@H;%2x(JE7D4e|2&hY?H-}knHy;}TbYPukAjy&e$+9}-S zRlC1!=3NgkSbaWW=3fL;W9V*IKF1DSOeAaIb4OSCugCL%K6RdyNIFtaKMXjz>@aSnF zo9KCYkN};d2}s4XzeV*Ks4;`UM^F(6$7a@9-Y5ZLZVQVhyk@gtk~CckbR9^cf^Gn@ zR0*0aFt*VJCZLL`9frq|P_&z4-(9HKnWE|tTZlwkps;|VkZo72h733l%_1pnZVVom zC8H!u*7EfeX_S+jyl+N!W!X~J-wf+rh0>;@97RC6h19OKq%wsAAC+95?%fPR1fA_7 z=!72Z75c9S$1@d_KW>`8lBa7Y=yT0#BvS<%H?9q>Q8#RJHky)-o zeoS&|_@gh8@IaET*-!^@nH<_0H5a-Aacew4h1EDhxG?#E+2j44*D#AmoFAXORy6`n z)PLmD*bH#FIheqs@d1u_iK3O!HZOnp&c)ERsXjnEUl1I3E+s+++rT;E;gWu}iTL>t zdyjxCC?P0tf;&!h7{iOm-zqnZSzw#>xJ@=`>ZD>WZxf9lo`amwmeJ^=psbP9J}JKs zwQAEPkXQ6~Koq-h@OuTP$rxzjVQ>2zl}qbtv?&oV)?-~L(cb2t$gyh|UyHiVIRWVg znp0H=&IaWRcnlZAL)UJ-e-;UxHHwDYTeyxv6V$zB(#?-?28E_;<0Zdh4=9B@>-w@ybMyq3Ti#P!H?*S@7o8EM> z;vB1&VfnJ@!Xi5kAGSkR9N+wH?DQ{`c{YLC*pXnQLPw)^2NO}C-j8t~*&0mo4YPuZ z3W-{}?}&dvSFVjH`MD%eb)e&<IYGERmE*y`{I zJ}zfRSjCU(c*Ps~K#`D#-t;#b>g+dkgm8i!Wg^0S1S*SrlNWoTarsfK6<{gE%6g<$ z>y5^*U117iJD0M)cd8IHLIAhP1?I286t9Qy8Vq-%p+E2>BOOJwk<#@_2V0=Okwqfg zF!94TxeoK17+6HJ;6XzJt6snu+796+;WhPWh1vay_=uwtwr?&mJqWI_x$_y*1a&0j z0ZZsVI{ZH(9csr{PzzzT+$j8F1^5l3F@iv2s1i7fWUrL#eD?K4TMR4j_;hs`}WYQz#Y{U*aQMlgc+y&7m(Di1yuBR#3=;M5tCGKe>(D3k(d0VoAl^Ct6p z^))2X_fvYzFX(4>2;={UAJS2K}wJr zqIdu3Ntw-1ie?!SYx8(R(z05%)iuV2J$tmy@R4L>=8_xo+J{Vr#;t)R7{1jqgT6(i=Vd)hLgj+4!Q#q~u@_$jNaZl4G^~AItbb z#-oYS6Fm?vL5YV!$$|ZGYN3iH-Su3BCM_zONAZ?S_T)*QmM65HeB$wA*+toE*s`?F zm7yOrqb#rYY?f?RK*eEk@>%(T=`zJCTKgpW!-VTkN3n!rl{@5cMfNIK9*3eU)^sCn zW-qMYM3Pi+LI4Ep_SjW12{!)oua59Wb6DVF+%T##03iMTYknn?u2D`34}fYW;6#lR zJM*V7o@0H(nDybS5Si{m$~F2xpzIK@8i_7NzD+7l&=J_$Bl1cHa=$jEha0}O?IZ&9drb3_%F5CnIgRaDpyoRU26MB?s9*|v;A)<(4#-<<AxRI?*t?^TwpUzjUZ4~H^bC4V5l(GRd}D~bF^52w1dn01`C!H#h~5*9O=dXNnZ z;`K})kMrBvQ(3i@6JSs?(l8Cw%<20n)dmE%b=(bTqFIsXQl z31cQQ;(;47IhxJi!3)n2jyg!0fnu>r4N5B|`8sj>-*>9vDuMOO8vo!cdCQ( zw6xl)30eLhw5v3pne$Yc+ag^PxjRq$Oh6hFCZ$+r!ERx6AM%KjYvg0EQei9{{WW8R z<^p^m6IpaSF8HkRl~?bFMx~6o?_VD=8HA!A3=)L)=?hCfFkp!z>dRI&RLXNYps1N2 zV$)V0yiWN2Y<>){IA#f(7UM)p_=;rl&|1}{3_(Ic<~Es8=LY|TM@DTJq%5X6Xc#5?;v^xCM3>o7F#L(iTIw44Gj|@%3s#zV<)h(mSd9o3Cc;$izJN}8o{1C~ z-N)k^*GP)Z23x-bJ7(AYG*gEK{Nss699(KRsG4~zo2xEZy09RvDs zVV)~fezO!()yrU8`G&OsEON@2d(&ER)0) z!47>Ce9Ay|M!2+}A0}>uPgju6mDNn-e<%d@nOvVC5jKbo66?$@-6uQ1^v1xY|%8~%Op`FktK=lrQOCx%hv9XXUz@V13 zRDO}LH^j;te8UDNCV&m%^#unjB3H;@i+Y1EK?RAAv=%o)Q`f?RQgE5cfic*id_heD zCKD!KynPJC+_m0#j)zuA(0S7ZNTOE4*Fir>dl-&=0C5cR^Z)XA9xAk8zvN3R9x!R0 zk9kVnSI7NR*Vg=&e6}Qy!0I3>?7N<9Pv=GFlGdhCfc>lkNYzikY$YRNy1Ly=obVPSmfDDf`q5IDMd(Iz79~q0Qk{7q_vCC z0pF7(%{qd=ncAfPvl*Z9^&@H=rANM67Iu2hr7{Et1_v;Bqus`gKILiROfMbCv zUORe0w)KV zfVhO@y3jiKIFO`0R_K(`N12q@;=&O>}%mWqR)`AMS@C;TX)~q9)2#Tdv7F*qTC&h zq6C^v$D3v_0IZNGguCR`?lA8fBs=uc2INt%%NFK&5OEG3f*+p*HL`AqENDj!iXt!Y z6=K8>S%c9uP$7-cxXhoUUp@nG$DQv8SPUg&t<;yz4N943Bk5`X4dwst|BWEuKa?0l z3J8uy@(aX6Pk@4f=rx?|0w_Q`>-uoR_}J)(E;sXAC@!~V=plKafDxFrXRQjD3kBF! zBNV=Vs)D6*(Aa_p#hZZ?wHku%7$pW$U!}=WC%q~X>c|Nx=65%eRU#t5+fY#{t<(V2 zHk=JbGa+`~K&mVW>qgnDOEU_fZJrDMoUFU)U0p)XWxZ=6q~@}x4kP0c2Ki?X&_DKExk+<2ve{BFMksiI4HBj zqygF8TWks!HgJhYuna!j!b( zRQidmXp7r?MUn-~j1Y;{EC4BgsVmzJ7TrP53oYD8aaT3M zO2?K*BY3plR|_}5)ZXybUjMS>E3mRF_$8hk2&)i=5W<(gEZv`oAW%a9Uk}v>yk4DA z&U&-`vEKm1uR;H>hGIyYUbg%#vjP_T7K`{_?Ck&LS}d5LmC5mb4fbdu+@M?}0^kG# zouIN?^mS1@Ap8@x2L=)3V&PU{o@b#BxTxHL4ka5F!Rb{nZV|=L<7xk8 zEgcIhmrf9_wWT1@t@(u16;^z2a#YM(*d`q{Kp;#NcS~KPEXvF2QjcpthtI#UNgx^M z)+Ke$XjVuDI7c=OD5pJSY+HMI=lC7ELstKkEpco8(p|LmAFMjxFwY+fith8;YwvSA zXZg*YvV5uat_}9Ocw3!HHs5kuOkj7BEzf^r2*9=5v?j5hGR;SIizRhtQ0V!R-92o| zBxkYKN7IEuHjYv<3P1FIsH4{B7K?I$c|`>#;{lLJ zaJaaUrC984o-Z>1R8TgJpvmfga*e)=5oSQw!d?iZxq`ff?o0j*fbK?n=U4e-P!(aiV>L|N^0MmaUt4Bo~ekQ7E9&_Yqg zmRn&7c*z`+zG`cx-Obz89@@ow6mVD;~beD zO^dWn$PbHR)fTyr&HZ&b<2?Fj?h8h?J zbe$ZU26A%HAqbvirP3zk0J|fp9_-Tqs9`O$^XMPb!3h9npew->$a(>$M5m+lGIfwo zz?%mdBYgyi3+4+miD7ITm4)eaM#t+THUzUUWx1GABCsMzlKBZRYP8-x=?DESIut(A zM`;YOuN+bF115t;Wtq^{~;$qfUu(3 z9WJI~?oPGyV8W-o!O^Uc+CsW3TDc!ht3j*_mo!2OrS9`)D2g`T17)PzHJT=it{mCw z``qhdDnY;^LhKzpHDQVFad{6779-<@F5q-4C%2)H7XT9+n4II2~pTrRoFY6j|Z=1@+;yAzLJk z)517i2Y!nEDdKAvD)(l>SdY`pZWm3k;h9SCuBue%u+`A9`B~Ms@?l>xAI?gR^!>^F?gC+g8(=2?1didCL* zmBd{bG5YA3b)Rz&9DTOf`x9lv&E4B%Jh991g@xnU+t$d+Kl$dE4R_n2Qs+aDM_gjm zJg|bbM0d?PmROm8LZJG$+pf~}F=_9}kI;i-)-C*bXv+P#;m>ED`0nt!nC=C?6};>J zr1I{g$*C*%l^$vM_RfHoizD8-x!?D?|0RbI)jG{se=F}b;-1z?>QmXRi`N-Amis!B z$6OA&QP1h{ecrs%In!zW?=%bXq)T@1UvRYY z_7vZ`uD36dWG<4;ntRYY6wN}XE&K2w1&liXY1z;W6W_AjOgG3yhTa6Pcga9I>*U$*q>6f|$2RsCd z^Z*frZ~qstKyR|ZJJZ3l1H;&yD<>0CBUF+{3j?tfp0T;1g;2qT0aa-kbp&HOgC0oG z7#&c)eFBs|;YG`@*|ezVtH(SkQq-!y9OX!zXHX<{UF+HL ztb!)WeC)FKpDlY9$Q+x-CQk~UvDtrZ)3ySi8F3x!^MgLS;d%Jl@zT3b_A`H9Is4=J zxwEeb=03LWuyLK7@+{_%jp)$TpM(hqKO4GZ5&c}n^Ra(czkj?SkaN~*NsNSNYmYHLfxc-JkZf5zGr+k||+o4=;GMCSk&-UOr4_8sr z9_?NAOV!4I(Xy_UtnuBWQy!BYQ7v6w(YqFXHU2E&t(TMq%=`(T-?wsmcY4hF{G{IU zCuOs~y>aj#HylK6^C-(MpH?rhc6{DcA9zh{U2d-p?EI~=$Re=9pE1S$&LpbmpzEdU zV#GhrO!d=;C#-$3u|0or?&PUEo_ruk-nQ&~chO+`e80^Nnoffaeb%*-gKh(24%kt7 z^i0mysUJ5k*er1$Fl5-hL^n;Lb^aF*pDdg2{XyE8?*04fD%FB8O2y^y&dU>cMgM8- z?0C${jI#X7^Qm*BVer;7je?TVmpre$*zo1Usap~zQ(4ow1t)DYE=X=xOs)^v;pQtA#ex7C(NXy$~1H(9?eO$m7^Mj*eQ#t3}*F+ENpG(v-QS496+{`IQ8T zM&Tdou|4C9t2TT#7phcQA5JJhI~NO4jXWuM65 Date: Thu, 29 Apr 2021 19:25:45 +0800 Subject: [PATCH 095/210] fit to paddle2.1 --- paddleseg/models/layers/activation.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddleseg/models/layers/activation.py b/paddleseg/models/layers/activation.py index f7910bc1a9..6ded5f272c 100644 --- a/paddleseg/models/layers/activation.py +++ b/paddleseg/models/layers/activation.py @@ -53,14 +53,15 @@ def __init__(self, act=None): super(Activation, self).__init__() self._act = act - upper_act_names = nn.layer.activation.__all__ + upper_act_names = nn.layer.activation.__dict__.keys() lower_act_names = [act.lower() for act in upper_act_names] act_dict = dict(zip(lower_act_names, upper_act_names)) if act is not None: if act in act_dict.keys(): act_name = act_dict[act] - self.act_func = eval("nn.layer.activation.{}()".format(act_name)) + self.act_func = eval( + "nn.layer.activation.{}()".format(act_name)) else: raise KeyError("{} does not exist in the current {}".format( act, act_dict.keys())) From 07c66227a470b6ad271bfd5dabee2e39e8df2064 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Sat, 8 May 2021 17:25:33 +0800 Subject: [PATCH 096/210] fit to paddle2.1 --- legacy/pdseg/models/model_builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/legacy/pdseg/models/model_builder.py b/legacy/pdseg/models/model_builder.py index 2ccb0b98bf..4f999b1d79 100644 --- a/legacy/pdseg/models/model_builder.py +++ b/legacy/pdseg/models/model_builder.py @@ -116,7 +116,7 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): class_num = cfg.DATASET.NUM_CLASSES with static.program_guard(main_prog, start_prog): - _new_generator = paddle.utils.unique_name.UniqueNameGenerator() + _new_generator = paddle.fluid.unique_name.UniqueNameGenerator() with paddle.utils.unique_name.guard(_new_generator): # 在导出模型的时候,增加图像标准化预处理,减小预测部署时图像的处理流程 # 预测部署时只须对输入图像增加batch_size维度即可 From 03b53b34095e6a5646f17936e0abe620f7ad266f Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Sat, 8 May 2021 17:34:02 +0800 Subject: [PATCH 097/210] update README.md --- configs/unet/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/unet/README.md b/configs/unet/README.md index 84c9091e52..6426a86276 100644 --- a/configs/unet/README.md +++ b/configs/unet/README.md @@ -9,4 +9,4 @@ | Model | Backbone | Resolution | Training Iters | Batch Size | mIoU | mIoU (flip) | mIoU (ms+flip) | Links | |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| -|UNet|-|1024x512|160000|4|65.00%|66.02%|66.89%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/unet_cityscapes_1024x512_80k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/unet_cityscapes_1024x512_80k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=67b3338de34ad09f0cb5e7c6856305cc)| +|UNet|-|1024x512|160000|4|65.00%|66.02%|66.89%|[model](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/unet_cityscapes_1024x512_160k/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/cityscapes/unet_cityscapes_1024x512_160k/train.log) \| [vdl](https://paddlepaddle.org.cn/paddle/visualdl/service/app?id=67b3338de34ad09f0cb5e7c6856305cc)| From 318dd24cf3c7788f3fe88c7ccb910a09e6f469e5 Mon Sep 17 00:00:00 2001 From: Roc Date: Mon, 10 May 2021 11:10:03 +0800 Subject: [PATCH 098/210] sync_batch_norm supports amp --- paddleseg/core/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index 151a95adeb..a582c8d2bb 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -157,7 +157,7 @@ def train(model, if fp16: with paddle.amp.auto_cast( - enable=True, custom_white_list={"elementwise_add", "batch_norm"}, custom_black_list={'bilinear_interp_v2'}): + enable=True, custom_white_list={"elementwise_add", "batch_norm", "sync_batch_norm"}, custom_black_list={'bilinear_interp_v2'}): if nranks > 1: logits_list = ddp_model(images) else: From a43d1809f3e03bd1a61ac58fbfcb84f3d8ef89e9 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 10 May 2021 16:30:31 +0800 Subject: [PATCH 099/210] rm paddleseg --- contrib/PanopticDeepLab/paddleseg/__init__.py | 17 - .../paddleseg/core/__init__.py | 20 - .../PanopticDeepLab/paddleseg/core/infer.py | 349 ------- .../PanopticDeepLab/paddleseg/core/predict.py | 188 ---- .../PanopticDeepLab/paddleseg/core/train.py | 315 ------- contrib/PanopticDeepLab/paddleseg/core/val.py | 181 ---- .../paddleseg/cvlibs/__init__.py | 17 - .../paddleseg/cvlibs/callbacks.py | 279 ------ .../paddleseg/cvlibs/config.py | 297 ------ .../paddleseg/cvlibs/manager.py | 143 --- .../paddleseg/cvlibs/param_init.py | 91 -- .../paddleseg/datasets/__init__.py | 15 - .../paddleseg/datasets/cityscapes_panoptic.py | 192 ---- .../paddleseg/models/__init__.py | 18 - .../paddleseg/models/backbones/__init__.py | 18 - .../paddleseg/models/backbones/hrnet.py | 820 ---------------- .../paddleseg/models/backbones/mobilenetv3.py | 364 ------- .../paddleseg/models/backbones/resnet_vd.py | 361 ------- .../models/backbones/xception_deeplab.py | 415 -------- .../paddleseg/models/layers/__init__.py | 19 - .../paddleseg/models/layers/activation.py | 73 -- .../paddleseg/models/layers/attention.py | 143 --- .../paddleseg/models/layers/layer_libs.py | 165 ---- .../paddleseg/models/layers/nonlocal2d.py | 154 --- .../paddleseg/models/layers/pyramid_pool.py | 185 ---- .../paddleseg/models/losses/__init__.py | 17 - .../models/losses/cross_entropy_loss.py | 74 -- .../paddleseg/models/losses/l1_loss.py | 72 -- .../models/losses/mean_square_error_loss.py | 60 -- .../paddleseg/models/panoptic_deeplab.py | 339 ------- .../paddleseg/transforms/__init__.py | 17 - .../paddleseg/transforms/functional.py | 160 ---- .../paddleseg/transforms/target_transforms.py | 281 ------ .../paddleseg/transforms/transforms.py | 888 ------------------ .../paddleseg/utils/__init__.py | 23 - .../paddleseg/utils/config_check.py | 59 -- .../paddleseg/utils/download.py | 163 ---- .../paddleseg/utils/evaluation/__init__.py | 17 - .../paddleseg/utils/evaluation/instance.py | 345 ------- .../paddleseg/utils/evaluation/panoptic.py | 220 ----- .../paddleseg/utils/evaluation/semantic.py | 85 -- .../PanopticDeepLab/paddleseg/utils/logger.py | 49 - .../paddleseg/utils/metrics.py | 146 --- .../PanopticDeepLab/paddleseg/utils/paddle.py | 125 --- .../paddleseg/utils/progbar.py | 209 ----- .../PanopticDeepLab/paddleseg/utils/timer.py | 53 -- .../PanopticDeepLab/paddleseg/utils/utils.py | 120 --- .../paddleseg/utils/visualize.py | 195 ---- 48 files changed, 8556 deletions(-) delete mode 100644 contrib/PanopticDeepLab/paddleseg/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/core/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/core/infer.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/core/predict.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/core/train.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/core/val.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/callbacks.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/config.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/manager.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/cvlibs/param_init.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/datasets/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/datasets/cityscapes_panoptic.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/hrnet.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/mobilenetv3.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/resnet_vd.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/backbones/xception_deeplab.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/activation.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/attention.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/layer_libs.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/nonlocal2d.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/layers/pyramid_pool.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/losses/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/losses/cross_entropy_loss.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/losses/l1_loss.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/losses/mean_square_error_loss.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/models/panoptic_deeplab.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/transforms/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/transforms/functional.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/transforms/target_transforms.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/transforms/transforms.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/config_check.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/download.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/evaluation/__init__.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/evaluation/instance.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/evaluation/panoptic.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/evaluation/semantic.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/logger.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/metrics.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/paddle.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/progbar.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/timer.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/utils.py delete mode 100644 contrib/PanopticDeepLab/paddleseg/utils/visualize.py diff --git a/contrib/PanopticDeepLab/paddleseg/__init__.py b/contrib/PanopticDeepLab/paddleseg/__init__.py deleted file mode 100644 index cf7c746bb5..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import models, datasets, transforms - -__version__ = '2.0.0' diff --git a/contrib/PanopticDeepLab/paddleseg/core/__init__.py b/contrib/PanopticDeepLab/paddleseg/core/__init__.py deleted file mode 100644 index 35189064a6..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/core/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .train import train -from .val import evaluate -from .predict import predict -from . import infer - -__all__ = ['train', 'evaluate', 'predict'] diff --git a/contrib/PanopticDeepLab/paddleseg/core/infer.py b/contrib/PanopticDeepLab/paddleseg/core/infer.py deleted file mode 100644 index 069ed0cd92..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/core/infer.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections.abc -from itertools import combinations -from functools import partial - -import numpy as np -import paddle -import paddle.nn.functional as F - -debug = False - - -def get_reverse_list(ori_shape, transforms): - """ - get reverse list of transform. - Args: - ori_shape (list): Origin shape of image. - transforms (list): List of transform. - Returns: - list: List of tuple, there are two format: - ('resize', (h, w)) The image shape before resize, - ('padding', (h, w)) The image shape before padding. - """ - reverse_list = [] - h, w = ori_shape[0], ori_shape[1] - for op in transforms: - if op.__class__.__name__ in ['Resize']: - reverse_list.append(('resize', (h, w))) - h, w = op.target_size[0], op.target_size[1] - if op.__class__.__name__ in ['ResizeByLong']: - reverse_list.append(('resize', (h, w))) - long_edge = max(h, w) - short_edge = min(h, w) - short_edge = int(round(short_edge * op.long_size / long_edge)) - long_edge = op.long_size - if h > w: - h = long_edge - w = short_edge - else: - w = long_edge - h = short_edge - if op.__class__.__name__ in ['Padding']: - reverse_list.append(('padding', (h, w))) - w, h = op.target_size[0], op.target_size[1] - if op.__class__.__name__ in ['LimitLong']: - long_edge = max(h, w) - short_edge = min(h, w) - if ((op.max_long is not None) and (long_edge > op.max_long)): - reverse_list.append(('resize', (h, w))) - long_edge = op.max_long - short_edge = int(round(short_edge * op.max_long / long_edge)) - elif ((op.min_long is not None) and (long_edge < op.min_long)): - reverse_list.append(('resize', (h, w))) - long_edge = op.min_long - short_edge = int(round(short_edge * op.min_long / long_edge)) - if h > w: - h = long_edge - w = short_edge - else: - w = long_edge - h = short_edge - return reverse_list - - -def reverse_transform(pred, ori_shape, transforms): - """recover pred to origin shape""" - reverse_list = get_reverse_list(ori_shape, transforms) - for item in reverse_list[::-1]: - if item[0] == 'resize': - h, w = item[1][0], item[1][1] - pred = F.interpolate(pred, (h, w), mode='nearest') - elif item[0] == 'padding': - h, w = item[1][0], item[1][1] - pred = pred[:, :, 0:h, 0:w] - else: - raise Exception("Unexpected info '{}' in im_info".format(item[0])) - return pred - - -def find_instance_center(ctr_hmp, threshold=0.1, nms_kernel=3, top_k=None): - """ - Find the center points from the center heatmap. - Arguments: - ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output. - threshold (float, optional): Threshold applied to center heatmap score. Default: 0.1. - nms_kernel (int, optional): NMS max pooling kernel size. Default: 3. - top_k (int, optional): An Integer, top k centers to keep. Default: None - Returns: - Tensor: A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). - """ - # thresholding, setting values below threshold to 0 - ctr_hmp = F.thresholded_relu(ctr_hmp, threshold) - - #NMS - nms_padding = (nms_kernel - 1) // 2 - ctr_hmp = ctr_hmp.unsqueeze(0) - ctr_hmp_max_pooled = F.max_pool2d( - ctr_hmp, kernel_size=nms_kernel, stride=1, padding=nms_padding) - ctr_hmp = ctr_hmp * (ctr_hmp_max_pooled == ctr_hmp) - - ctr_hmp = ctr_hmp.squeeze((0, 1)) - if len(ctr_hmp.shape) != 2: - raise ValueError('Something is wrong with center heatmap dimension.') - - if top_k is None: - top_k_score = 0 - else: - top_k_score, _ = paddle.topk(paddle.flatten(ctr_hmp), top_k) - top_k_score = top_k_score[-1] - # non-zero points are candidate centers - ctr_hmp_k = (ctr_hmp > top_k_score[-1]).astype('int64') - if ctr_hmp_k.sum() == 0: - ctr_all = None - else: - ctr_all = paddle.nonzero(ctr_hmp_k) - return ctr_all - - -def group_pixels(ctr, offsets): - """ - Gives each pixel in the image an instance id. - - Args: - ctr (Tensor): A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). - offsets (Tensor): A Tensor of shape [2, H, W] of raw offset output, where N is the batch size, - for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). - - Returns: - Tensor: A Tensor of shape [1, H, W], ins_id is 1, 2, ... - """ - height, width = offsets.shape[-2:] - y_coord = paddle.arange(height, dtype=offsets.dtype).reshape([1, -1, 1]) - y_coord = paddle.concat([y_coord] * width, axis=2) - x_coord = paddle.arange(width, dtype=offsets.dtype).reshape([1, 1, -1]) - x_coord = paddle.concat([x_coord] * height, axis=1) - coord = paddle.concat([y_coord, x_coord], axis=0) - - ctr_loc = coord + offsets - ctr_loc = ctr_loc.reshape((2, height * width)).transpose((1, 0)) - - # ctr: [K, 2] -> [K, 1, 2] - # ctr_loc = [H*W, 2] -> [1, H*W, 2] - ctr = ctr.unsqueeze(1) - ctr_loc = ctr_loc.unsqueeze(0) - - # distance: [K, H*W] - distance = paddle.norm((ctr - ctr_loc).astype('float32'), axis=-1) - - # finds center with minimum distance at each location, offset by 1, to reserve id=0 for stuff - instance_id = paddle.argmin( - distance, axis=0).reshape((1, height, width)) + 1 - - return instance_id - - -def get_instance_segmentation(semantic, - ctr_hmp, - offset, - thing_list, - threshold=0.1, - nms_kernel=3, - top_k=None): - """ - Post-processing for instance segmentation, gets class agnostic instance id map. - - Args: - semantic (Tensor): A Tensor of shape [1, H, W], predicted semantic label. - ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output, where N is the batch size, - for consistent, we only support N=1. - offsets (Tensor): A Tensor of shape [2, H, W] of raw offset output, where N is the batch size, - for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). - thing_list (list): A List of thing class id. - threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. - nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3. - top_k (int, optional): An Integer, top k centers to keep. Default: None. - - Returns: - Tensor: Instance segmentation results which shape is [1, H, W]. - Tensor: A Tensor of shape [1, K, 2] where K is the number of center points. The order of second dim is (y, x). - """ - thing_seg = paddle.zeros_like(semantic) - for thing_class in thing_list: - thing_seg = thing_seg + (semantic == thing_class).astype('int64') - thing_seg = (thing_seg > 0).astype('int64') - center = find_instance_center( - ctr_hmp, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k) - if center is None: - return paddle.zeros_like(semantic), center - ins_seg = group_pixels(center, offset) - return thing_seg * ins_seg, center.unsqueeze(0) - - -def merge_semantic_and_instance(semantic, instance, label_divisor, thing_list, - stuff_area, ignore_index): - """ - Post-processing for panoptic segmentation, by merging semantic segmentation label and class agnostic - instance segmentation label. - - Args: - semantic (Tensor): A Tensor of shape [1, H, W], predicted semantic label. - instance (Tensor): A Tensor of shape [1, H, W], predicted instance label. - label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. - thing_list (list): A List of thing class id. - stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. - ignore_index (int): Specifies a value that is ignored. - - Returns: - Tensor: A Tensor of shape [1, H, W] . The pixels whose value equaling ignore_index is ignored. - The stuff class is represented as format like class_id, while - thing class as class_id * label_divisor + ins_id and ins_id begin from 1. - """ - # In case thing mask does not align with semantic prediction - pan_seg = paddle.zeros_like(semantic) + ignore_index - thing_seg = instance > 0 - semantic_thing_seg = paddle.zeros_like(semantic) - for thing_class in thing_list: - semantic_thing_seg += semantic == thing_class - - # keep track of instance id for each class - class_id_tracker = {} - - # paste thing by majority voting - ins_ids = paddle.unique(instance) - for ins_id in ins_ids: - if ins_id == 0: - continue - # Make sure only do majority voting within semantic_thing_seg - thing_mask = paddle.logical_and(instance == ins_id, - semantic_thing_seg == 1) - if paddle.all(paddle.logical_not(thing_mask)): - continue - # get class id for instance of ins_id - sem_ins_id = paddle.gather( - semantic.reshape((-1, )), paddle.nonzero( - thing_mask.reshape((-1, )))) # equal to semantic[thing_mask] - v, c = paddle.unique(sem_ins_id, return_counts=True) - class_id = paddle.gather(v, c.argmax()) - class_id = class_id.numpy()[0] - if class_id in class_id_tracker: - new_ins_id = class_id_tracker[class_id] - else: - class_id_tracker[class_id] = 1 - new_ins_id = 1 - class_id_tracker[class_id] += 1 - - # pan_seg[thing_mask] = class_id * label_divisor + new_ins_id - pan_seg = pan_seg * (paddle.logical_not(thing_mask)) + ( - class_id * label_divisor + new_ins_id) * thing_mask.astype('int64') - - # paste stuff to unoccupied area - class_ids = paddle.unique(semantic) - for class_id in class_ids: - if class_id.numpy() in thing_list: - # thing class - continue - # calculate stuff area - stuff_mask = paddle.logical_and(semantic == class_id, - paddle.logical_not(thing_seg)) - area = paddle.sum(stuff_mask.astype('int64')) - if area >= stuff_area: - # pan_seg[stuff_mask] = class_id - pan_seg = pan_seg * (paddle.logical_not(stuff_mask) - ) + stuff_mask.astype('int64') * class_id - - return pan_seg - - -def inference( - model, - im, - transforms, - thing_list, - label_divisor, - stuff_area, - ignore_index, - threshold=0.1, - nms_kernel=3, - top_k=None, - ori_shape=None, -): - """ - Inference for image. - - Args: - model (paddle.nn.Layer): model to get logits of image. - im (Tensor): the input image. - transforms (list): Transforms for image. - thing_list (list): A List of thing class id. - label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. - stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. - ignore_index (int): Specifies a value that is ignored. - threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. - nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3. - top_k (int, optional): An Integer, top k centers to keep. Default: None. - ori_shape (list): Origin shape of image. - - Returns: - list: A list of [semantic, semantic_softmax, instance, panoptic, ctr_hmp]. - semantic: Semantic segmentation results with shape [1, 1, H, W], which value is 0, 1, 2... - semantic_softmax: A Tensor represent probabilities for each class, which shape is [1, num_classes, H, W]. - instance: Instance segmentation results with class agnostic, which value is 0, 1, 2, ..., and 0 is stuff. - panoptic: Panoptic segmentation results which value is ignore_index, stuff_id, thing_id * label_divisor + ins_id , ins_id >= 1. - """ - logits = model(im) - # semantic: [1, c, h, w], center: [1, 1, h, w], offset: [1, 2, h, w] - semantic, ctr_hmp, offset = logits - semantic = paddle.argmax(semantic, axis=1, keepdim=True) - semantic = semantic.squeeze(0) # shape: [1, h, w] - semantic_softmax = F.softmax(logits[0], axis=1).squeeze() - ctr_hmp = ctr_hmp.squeeze(0) # shape: [1, h, w] - offset = offset.squeeze(0) # shape: [2, h, w] - - instance, center = get_instance_segmentation( - semantic=semantic, - ctr_hmp=ctr_hmp, - offset=offset, - thing_list=thing_list, - threshold=threshold, - nms_kernel=nms_kernel, - top_k=top_k) - panoptic = merge_semantic_and_instance(semantic, instance, label_divisor, - thing_list, stuff_area, ignore_index) - - # Recover to origin shape - # semantic: 0, 1, 2, 3... - # instance: 0, 1, 2, 3, 4, 5... and the 0 is stuff. - # panoptic: ignore_index, stuff_id, thing_id * label_divisor + ins_id , ins_id >= 1. - results = [semantic, semantic_softmax, instance, panoptic, ctr_hmp] - if ori_shape is not None: - results = [i.unsqueeze(0) for i in results] - results = [ - reverse_transform(i, ori_shape=ori_shape, transforms=transforms) - for i in results - ] - - return results diff --git a/contrib/PanopticDeepLab/paddleseg/core/predict.py b/contrib/PanopticDeepLab/paddleseg/core/predict.py deleted file mode 100644 index eb0249033d..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/core/predict.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import math - -import cv2 -import numpy as np -import paddle - -from paddleseg import utils -from paddleseg.core import infer -from paddleseg.utils import logger, progbar - - -def mkdir(path): - sub_dir = os.path.dirname(path) - if not os.path.exists(sub_dir): - os.makedirs(sub_dir) - - -def partition_list(arr, m): - """split the list 'arr' into m pieces""" - n = int(math.ceil(len(arr) / float(m))) - return [arr[i:i + n] for i in range(0, len(arr), n)] - - -def get_save_name(im_path, im_dir): - """get the saved name""" - if im_dir is not None: - im_file = im_path.replace(im_dir, '') - else: - im_file = os.path.basename(im_path) - if im_file[0] == '/': - im_file = im_file[1:] - return im_file - - -def add_info_to_save_path(save_path, info): - """Add more information to save path""" - fname, fextension = os.path.splitext(save_path) - fname = '_'.join([fname, info]) - save_path = ''.join([fname, fextension]) - return save_path - - -def predict(model, - model_path, - image_list, - transforms, - thing_list, - label_divisor, - stuff_area, - ignore_index, - image_dir=None, - save_dir='output', - threshold=0.1, - nms_kernel=7, - top_k=200): - """ - predict and visualize the image_list. - - Args: - model (nn.Layer): Used to predict for input image. - model_path (str): The path of pretrained model. - image_list (list): A list of image path to be predicted. - transforms (transform.Compose): Preprocess for input image. - thing_list (list): A List of thing class id. - label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. - stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. - ignore_index (int): Specifies a value that is ignored. - image_dir (str, optional): The root directory of the images predicted. Default: None. - save_dir (str, optional): The directory to save the visualized results. Default: 'output'. - threshold(float, optional): Threshold applied to center heatmap score. Defalut: 0.1. - nms_kernel(int, optional): NMS max pooling kernel size. Default: 7. - top_k(int, optional): Top k centers to keep. Default: 200. - """ - utils.utils.load_entire_model(model, model_path) - model.eval() - nranks = paddle.distributed.get_world_size() - local_rank = paddle.distributed.get_rank() - if nranks > 1: - img_lists = partition_list(image_list, nranks) - else: - img_lists = [image_list] - - semantic_save_dir = os.path.join(save_dir, 'semantic') - instance_save_dir = os.path.join(save_dir, 'instance') - panoptic_save_dir = os.path.join(save_dir, 'panoptic') - - colormap = utils.cityscape_colormap() - - logger.info("Start to predict...") - progbar_pred = progbar.Progbar(target=len(img_lists[0]), verbose=1) - with paddle.no_grad(): - for i, im_path in enumerate(img_lists[local_rank]): - ori_im = cv2.imread(im_path) - ori_shape = ori_im.shape[:2] - im, _ = transforms(ori_im) - im = im[np.newaxis, ...] - im = paddle.to_tensor(im) - - semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference( - model=model, - im=im, - transforms=transforms.transforms, - thing_list=thing_list, - label_divisor=label_divisor, - stuff_area=stuff_area, - ignore_index=ignore_index, - threshold=threshold, - nms_kernel=nms_kernel, - top_k=top_k, - ori_shape=ori_shape) - semantic = semantic.squeeze().numpy() - instance = instance.squeeze().numpy() - panoptic = panoptic.squeeze().numpy() - - im_file = get_save_name(im_path, image_dir) - - # visual semantic segmentation results - save_path = os.path.join(semantic_save_dir, im_file) - mkdir(save_path) - utils.visualize_semantic( - semantic, save_path=save_path, colormap=colormap) - # Save added image for semantic segmentation results - save_path_ = add_info_to_save_path(save_path, 'add') - utils.visualize_semantic( - semantic, save_path=save_path_, colormap=colormap, image=ori_im) - # panoptic to semantic - ins_mask = panoptic > label_divisor - pan_to_sem = panoptic.copy() - pan_to_sem[ins_mask] = pan_to_sem[ins_mask] // label_divisor - save_path_ = add_info_to_save_path(save_path, - 'panoptic_to_semantic') - utils.visualize_semantic( - pan_to_sem, save_path=save_path_, colormap=colormap) - save_path_ = add_info_to_save_path(save_path, - 'panoptic_to_semantic_added') - utils.visualize_semantic( - pan_to_sem, - save_path=save_path_, - colormap=colormap, - image=ori_im) - - # vusual instance segmentation results - pan_to_ins = panoptic.copy() - ins_mask = pan_to_ins > label_divisor - pan_to_ins[~ins_mask] = 0 - save_path = os.path.join(instance_save_dir, im_file) - mkdir(save_path) - utils.visualize_instance(pan_to_ins, save_path=save_path) - # Save added image for instance segmentation results - save_path_ = add_info_to_save_path(save_path, 'added') - utils.visualize_instance( - pan_to_ins, save_path=save_path_, image=ori_im) - - # visual panoptic segmentation results - save_path = os.path.join(panoptic_save_dir, im_file) - mkdir(save_path) - utils.visualize_panoptic( - panoptic, - save_path=save_path, - label_divisor=label_divisor, - colormap=colormap, - ignore_index=ignore_index) - # Save added image for panoptic segmentation results - save_path_ = add_info_to_save_path(save_path, 'added') - utils.visualize_panoptic( - panoptic, - save_path=save_path_, - label_divisor=label_divisor, - colormap=colormap, - image=ori_im, - ignore_index=ignore_index) - - progbar_pred.update(i + 1) diff --git a/contrib/PanopticDeepLab/paddleseg/core/train.py b/contrib/PanopticDeepLab/paddleseg/core/train.py deleted file mode 100644 index 4245489dd9..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/core/train.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import time -from collections import deque -import shutil - -import paddle -import paddle.nn.functional as F - -from paddleseg.utils import TimeAverager, calculate_eta, resume, logger -from paddleseg.core.val import evaluate - - -def check_logits_losses(logits_list, losses): - len_logits = len(logits_list) - len_losses = len(losses['types']) - if len_logits != len_losses: - raise RuntimeError( - 'The length of logits_list should equal to the types of loss config: {} != {}.' - .format(len_logits, len_losses)) - - -def loss_computation(logits_list, semantic, semantic_weights, center, - center_weights, offset, offset_weights, losses): - # semantic loss - semantic_loss = losses['types'][0](logits_list[0], semantic, - semantic_weights) - semantic_loss = semantic_loss * losses['coef'][0] - - # center loss - center_loss = losses['types'][1](logits_list[1], center) - center_weights = (center_weights.unsqueeze(1)).expand_as(center_loss) - center_loss = center_loss * center_weights - if center_loss.sum() > 0: - center_loss = center_loss.sum() / center_weights.sum() - else: - center_loss = center_loss.sum() * 0 - center_loss = center_loss * losses['coef'][1] - - # offset loss - offset_loss = losses['types'][2](logits_list[2], offset) - offset_weights = (offset_weights.unsqueeze(1)).expand_as(offset_loss) - offset_loss = offset_loss * offset_weights - if offset_weights.sum() > 0: - offset_loss = offset_loss.sum() / offset_weights.sum() - else: - offset_loss = offset_loss.sum() * 0 - offset_loss = offset_loss * losses['coef'][2] - - loss_list = [semantic_loss, center_loss, offset_loss] - - return loss_list - - -def train(model, - train_dataset, - val_dataset=None, - optimizer=None, - save_dir='output', - iters=10000, - batch_size=2, - resume_model=None, - save_interval=1000, - log_iters=10, - num_workers=0, - use_vdl=False, - losses=None, - keep_checkpoint_max=5, - threshold=0.1, - nms_kernel=7, - top_k=200): - """ - Launch training. - - Args: - model(nn.Layer): A sementic segmentation model. - train_dataset (paddle.io.Dataset): Used to read and process training datasets. - val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. - optimizer (paddle.optimizer.Optimizer): The optimizer. - save_dir (str, optional): The directory for saving the model snapshot. Default: 'output'. - iters (int, optional): How may iters to train the model. Defualt: 10000. - batch_size (int, optional): Mini batch size of one gpu or cpu. Default: 2. - resume_model (str, optional): The path of resume model. - save_interval (int, optional): How many iters to save a model snapshot once during training. Default: 1000. - log_iters (int, optional): Display logging information at every log_iters. Default: 10. - num_workers (int, optional): Num workers for data loader. Default: 0. - use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False. - losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). - The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. - keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. - threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. - nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 7. - top_k (int, optional): An Integer, top k centers to keep. Default: 200. - """ - model.train() - nranks = paddle.distributed.ParallelEnv().nranks - local_rank = paddle.distributed.ParallelEnv().local_rank - - start_iter = 0 - if resume_model is not None: - start_iter = resume(model, optimizer, resume_model) - - if not os.path.isdir(save_dir): - if os.path.exists(save_dir): - os.remove(save_dir) - os.makedirs(save_dir) - - if nranks > 1: - # Initialize parallel environment if not done. - if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( - ): - paddle.distributed.init_parallel_env() - ddp_model = paddle.DataParallel(model) - else: - ddp_model = paddle.DataParallel(model) - - batch_sampler = paddle.io.DistributedBatchSampler( - train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) - - loader = paddle.io.DataLoader( - train_dataset, - batch_sampler=batch_sampler, - num_workers=num_workers, - return_list=True, - ) - - if use_vdl: - from visualdl import LogWriter - log_writer = LogWriter(save_dir) - - avg_loss = 0.0 - avg_loss_list = [] - iters_per_epoch = len(batch_sampler) - best_pq = -1.0 - best_model_iter = -1 - reader_cost_averager = TimeAverager() - batch_cost_averager = TimeAverager() - save_models = deque() - batch_start = time.time() - - iter = start_iter - while iter < iters: - for data in loader: - iter += 1 - if iter > iters: - break - reader_cost_averager.record(time.time() - batch_start) - images = data[0] - semantic = data[1] - semantic_weights = data[2] - center = data[3] - center_weights = data[4] - offset = data[5] - offset_weights = data[6] - foreground = data[7] - - if nranks > 1: - logits_list = ddp_model(images) - else: - logits_list = model(images) - - loss_list = loss_computation( - logits_list=logits_list, - losses=losses, - semantic=semantic, - semantic_weights=semantic_weights, - center=center, - center_weights=center_weights, - offset=offset, - offset_weights=offset_weights) - loss = sum(loss_list) - loss.backward() - - optimizer.step() - lr = optimizer.get_lr() - if isinstance(optimizer._learning_rate, - paddle.optimizer.lr.LRScheduler): - optimizer._learning_rate.step() - model.clear_gradients() - avg_loss += loss.numpy()[0] - if not avg_loss_list: - avg_loss_list = [l.numpy() for l in loss_list] - else: - for i in range(len(loss_list)): - avg_loss_list[i] += loss_list[i].numpy() - batch_cost_averager.record( - time.time() - batch_start, num_samples=batch_size) - - if (iter) % log_iters == 0 and local_rank == 0: - avg_loss /= log_iters - avg_loss_list = [l[0] / log_iters for l in avg_loss_list] - remain_iters = iters - iter - avg_train_batch_cost = batch_cost_averager.get_average() - avg_train_reader_cost = reader_cost_averager.get_average() - eta = calculate_eta(remain_iters, avg_train_batch_cost) - logger.info( - "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" - .format((iter - 1) // iters_per_epoch + 1, iter, iters, - avg_loss, lr, avg_train_batch_cost, - avg_train_reader_cost, - batch_cost_averager.get_ips_average(), eta)) - logger.info( - "[LOSS] loss={:.4f}, semantic_loss={:.4f}, center_loss={:.4f}, offset_loss={:.4f}" - .format(avg_loss, avg_loss_list[0], avg_loss_list[1], - avg_loss_list[2])) - if use_vdl: - log_writer.add_scalar('Train/loss', avg_loss, iter) - # Record all losses if there are more than 2 losses. - if len(avg_loss_list) > 1: - avg_loss_dict = {} - for i, value in enumerate(avg_loss_list): - avg_loss_dict['loss_' + str(i)] = value - for key, value in avg_loss_dict.items(): - log_tag = 'Train/' + key - log_writer.add_scalar(log_tag, value, iter) - - log_writer.add_scalar('Train/lr', lr, iter) - log_writer.add_scalar('Train/batch_cost', - avg_train_batch_cost, iter) - log_writer.add_scalar('Train/reader_cost', - avg_train_reader_cost, iter) - - avg_loss = 0.0 - avg_loss_list = [] - reader_cost_averager.reset() - batch_cost_averager.reset() - - # save model - if (iter % save_interval == 0 or iter == iters) and local_rank == 0: - current_save_dir = os.path.join(save_dir, - "iter_{}".format(iter)) - if not os.path.isdir(current_save_dir): - os.makedirs(current_save_dir) - paddle.save(model.state_dict(), - os.path.join(current_save_dir, 'model.pdparams')) - paddle.save(optimizer.state_dict(), - os.path.join(current_save_dir, 'model.pdopt')) - save_models.append(current_save_dir) - if len(save_models) > keep_checkpoint_max > 0: - model_to_remove = save_models.popleft() - shutil.rmtree(model_to_remove) - - # eval model - if (iter % save_interval == 0 or iter == iters) and ( - val_dataset is - not None) and local_rank == 0 and iter > iters // 2: - num_workers = 1 if num_workers > 0 else 0 - panoptic_results, semantic_results, instance_results = evaluate( - model, - val_dataset, - threshold=threshold, - nms_kernel=nms_kernel, - top_k=top_k, - num_workers=num_workers, - print_detail=False) - pq = panoptic_results['pan_seg']['All']['pq'] - miou = semantic_results['sem_seg']['mIoU'] - map = instance_results['ins_seg']['mAP'] - map50 = instance_results['ins_seg']['mAP50'] - logger.info( - "[EVAL] PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}" - .format(pq, miou, map, map50)) - model.train() - - # save best model and add evaluate results to vdl - if (iter % save_interval == 0 or iter == iters) and local_rank == 0: - if val_dataset is not None and iter > iters // 2: - if pq > best_pq: - best_pq = pq - best_model_iter = iter - best_model_dir = os.path.join(save_dir, "best_model") - paddle.save( - model.state_dict(), - os.path.join(best_model_dir, 'model.pdparams')) - logger.info( - '[EVAL] The model with the best validation pq ({:.4f}) was saved at iter {}.' - .format(best_pq, best_model_iter)) - - if use_vdl: - log_writer.add_scalar('Evaluate/PQ', pq, iter) - log_writer.add_scalar('Evaluate/mIoU', miou, iter) - log_writer.add_scalar('Evaluate/mAP', map, iter) - log_writer.add_scalar('Evaluate/mAP50', map50, iter) - batch_start = time.time() - - # Calculate flops. - if local_rank == 0: - - def count_syncbn(m, x, y): - x = x[0] - nelements = x.numel() - m.total_ops += int(2 * nelements) - - _, c, h, w = images.shape - flops = paddle.flops( - model, [1, c, h, w], - custom_ops={paddle.nn.SyncBatchNorm: count_syncbn}) - - # Sleep for half a second to let dataloader release resources. - time.sleep(0.5) - if use_vdl: - log_writer.close() diff --git a/contrib/PanopticDeepLab/paddleseg/core/val.py b/contrib/PanopticDeepLab/paddleseg/core/val.py deleted file mode 100644 index 9ee82ad219..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/core/val.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from collections import OrderedDict - -import numpy as np -import time -import paddle -import paddle.nn.functional as F - -from paddleseg.utils import metrics, TimeAverager, calculate_eta, logger, progbar -from paddleseg.core import infer -from paddleseg.utils.evaluation import SemanticEvaluator, InstanceEvaluator, PanopticEvaluator - -np.set_printoptions(suppress=True) - - -def evaluate(model, - eval_dataset, - threshold=0.1, - nms_kernel=7, - top_k=200, - num_workers=0, - print_detail=True): - """ - Launch evaluation. - - Args: - model(nn.Layer): A sementic segmentation model. - eval_dataset (paddle.io.Dataset): Used to read and process validation datasets. - threshold (float, optional): Threshold applied to center heatmap score. Defalut: 0.1. - nms_kernel (int, optional): NMS max pooling kernel size. Default: 7. - top_k (int, optional): Top k centers to keep. Default: 200. - num_workers (int, optional): Num workers for data loader. Default: 0. - print_detail (bool, optional): Whether to print detailed information about the evaluation process. Default: True. - - Returns: - dict: Panoptic evaluation results which includes PQ, RQ, SQ for all, each class, Things and stuff. - dict: Semantic evaluation results which includes mIoU, fwIoU, mACC and pACC. - dict: Instance evaluation results which includes mAP and mAP50, and also AP and AP50 for each class. - - """ - model.eval() - nranks = paddle.distributed.ParallelEnv().nranks - local_rank = paddle.distributed.ParallelEnv().local_rank - if nranks > 1: - # Initialize parallel environment if not done. - if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( - ): - paddle.distributed.init_parallel_env() - batch_sampler = paddle.io.DistributedBatchSampler( - eval_dataset, batch_size=1, shuffle=False, drop_last=False) - loader = paddle.io.DataLoader( - eval_dataset, - batch_sampler=batch_sampler, - num_workers=num_workers, - return_list=True, - ) - - total_iters = len(loader) - semantic_metric = SemanticEvaluator( - eval_dataset.num_classes, ignore_index=eval_dataset.ignore_index) - instance_metric_AP50 = InstanceEvaluator( - eval_dataset.num_classes, - overlaps=0.5, - thing_list=eval_dataset.thing_list) - instance_metric_AP = InstanceEvaluator( - eval_dataset.num_classes, - overlaps=list(np.arange(0.5, 1.0, 0.05)), - thing_list=eval_dataset.thing_list) - panoptic_metric = PanopticEvaluator( - num_classes=eval_dataset.num_classes, - thing_list=eval_dataset.thing_list, - ignore_index=eval_dataset.ignore_index, - label_divisor=eval_dataset.label_divisor) - - if print_detail: - logger.info( - "Start evaluating (total_samples={}, total_iters={})...".format( - len(eval_dataset), total_iters)) - progbar_val = progbar.Progbar(target=total_iters, verbose=1) - reader_cost_averager = TimeAverager() - batch_cost_averager = TimeAverager() - batch_start = time.time() - with paddle.no_grad(): - for iter, data in enumerate(loader): - reader_cost_averager.record(time.time() - batch_start) - im = data[0] - raw_semantic_label = data[1] # raw semantic label. - raw_instance_label = data[2] - raw_panoptic_label = data[3] - ori_shape = raw_semantic_label.shape[-2:] - - semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference( - model=model, - im=im, - transforms=eval_dataset.transforms.transforms, - thing_list=eval_dataset.thing_list, - label_divisor=eval_dataset.label_divisor, - stuff_area=eval_dataset.stuff_area, - ignore_index=eval_dataset.ignore_index, - threshold=threshold, - nms_kernel=nms_kernel, - top_k=top_k, - ori_shape=ori_shape) - semantic = semantic.squeeze().numpy() - semantic_softmax = semantic_softmax.squeeze().numpy() - instance = instance.squeeze().numpy() - panoptic = panoptic.squeeze().numpy() - ctr_hmp = ctr_hmp.squeeze().numpy() - raw_semantic_label = raw_semantic_label.squeeze().numpy() - raw_instance_label = raw_instance_label.squeeze().numpy() - raw_panoptic_label = raw_panoptic_label.squeeze().numpy() - - # update metric for semantic, instance, panoptic - semantic_metric.update(semantic, raw_semantic_label) - - gts = instance_metric_AP.convert_gt_map(raw_semantic_label, - raw_instance_label) - # print([i[0] for i in gts]) - preds = instance_metric_AP.convert_pred_map(semantic_softmax, - panoptic) - # print([(i[0], i[1]) for i in preds ]) - ignore_mask = raw_semantic_label == eval_dataset.ignore_index - instance_metric_AP.update(preds, gts, ignore_mask=ignore_mask) - instance_metric_AP50.update(preds, gts, ignore_mask=ignore_mask) - - panoptic_metric.update(panoptic, raw_panoptic_label) - - batch_cost_averager.record( - time.time() - batch_start, num_samples=len(im)) - batch_cost = batch_cost_averager.get_average() - reader_cost = reader_cost_averager.get_average() - - if local_rank == 0: - progbar_val.update(iter + 1, [('batch_cost', batch_cost), - ('reader cost', reader_cost)]) - reader_cost_averager.reset() - batch_cost_averager.reset() - batch_start = time.time() - - semantic_results = semantic_metric.evaluate() - panoptic_results = panoptic_metric.evaluate() - instance_results = OrderedDict() - ins_ap = instance_metric_AP.evaluate() - ins_ap50 = instance_metric_AP50.evaluate() - instance_results['ins_seg'] = OrderedDict() - instance_results['ins_seg']['mAP'] = ins_ap['ins_seg']['mAP'] - instance_results['ins_seg']['AP'] = ins_ap['ins_seg']['AP'] - instance_results['ins_seg']['mAP50'] = ins_ap50['ins_seg']['mAP'] - instance_results['ins_seg']['AP50'] = ins_ap50['ins_seg']['AP'] - - if print_detail: - logger.info(panoptic_results) - print() - logger.info(semantic_results) - print() - logger.info(instance_results) - print() - - pq = panoptic_results['pan_seg']['All']['pq'] - miou = semantic_results['sem_seg']['mIoU'] - map = instance_results['ins_seg']['mAP'] - map50 = instance_results['ins_seg']['mAP50'] - logger.info( - "PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}".format( - pq, miou, map, map50)) - - return panoptic_results, semantic_results, instance_results diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/__init__.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/__init__.py deleted file mode 100644 index 5fcb1d6c10..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/cvlibs/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import manager -from . import param_init -from .config import Config diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/callbacks.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/callbacks.py deleted file mode 100644 index 1188b2cdac..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/cvlibs/callbacks.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import time - -import numpy as np -import paddle -from paddle.distributed.parallel import ParallelEnv -from visualdl import LogWriter -from paddleseg.utils.progbar import Progbar -import paddleseg.utils.logger as logger - - -class CallbackList(object): - """ - Container abstracting a list of callbacks. - - Args: - callbacks (list[Callback]): List of `Callback` instances. - """ - - def __init__(self, callbacks=None): - callbacks = callbacks or [] - self.callbacks = [c for c in callbacks] - - def append(self, callback): - self.callbacks.append(callback) - - def set_params(self, params): - for callback in self.callbacks: - callback.set_params(params) - - def set_model(self, model): - for callback in self.callbacks: - callback.set_model(model) - - def set_optimizer(self, optimizer): - for callback in self.callbacks: - callback.set_optimizer(optimizer) - - def on_iter_begin(self, iter, logs=None): - """Called right before processing a batch. - """ - logs = logs or {} - for callback in self.callbacks: - callback.on_iter_begin(iter, logs) - self._t_enter_iter = time.time() - - def on_iter_end(self, iter, logs=None): - """Called at the end of a batch. - """ - logs = logs or {} - for callback in self.callbacks: - callback.on_iter_end(iter, logs) - self._t_exit_iter = time.time() - - def on_train_begin(self, logs=None): - """Called at the beginning of training. - """ - logs = logs or {} - for callback in self.callbacks: - callback.on_train_begin(logs) - - def on_train_end(self, logs=None): - """Called at the end of training. - """ - logs = logs or {} - for callback in self.callbacks: - callback.on_train_end(logs) - - def __iter__(self): - return iter(self.callbacks) - - -class Callback(object): - """Abstract base class used to build new callbacks. - """ - - def __init__(self): - self.validation_data = None - - def set_params(self, params): - self.params = params - - def set_model(self, model): - self.model = model - - def set_optimizer(self, optimizer): - self.optimizer = optimizer - - def on_iter_begin(self, iter, logs=None): - pass - - def on_iter_end(self, iter, logs=None): - pass - - def on_train_begin(self, logs=None): - pass - - def on_train_end(self, logs=None): - pass - - -class BaseLogger(Callback): - def __init__(self, period=10): - super(BaseLogger, self).__init__() - self.period = period - - def _reset(self): - self.totals = {} - - def on_train_begin(self, logs=None): - self.totals = {} - - def on_iter_end(self, iter, logs=None): - logs = logs or {} - #(iter - 1) // iters_per_epoch + 1 - for k, v in logs.items(): - if k in self.totals.keys(): - self.totals[k] += v - else: - self.totals[k] = v - - if iter % self.period == 0 and ParallelEnv().local_rank == 0: - - for k in self.totals: - logs[k] = self.totals[k] / self.period - self._reset() - - -class TrainLogger(Callback): - def __init__(self, log_freq=10): - self.log_freq = log_freq - - def _calculate_eta(self, remaining_iters, speed): - if remaining_iters < 0: - remaining_iters = 0 - remaining_time = int(remaining_iters * speed) - result = "{:0>2}:{:0>2}:{:0>2}" - arr = [] - for i in range(2, -1, -1): - arr.append(int(remaining_time / 60**i)) - remaining_time %= 60**i - return result.format(*arr) - - def on_iter_end(self, iter, logs=None): - - if iter % self.log_freq == 0 and ParallelEnv().local_rank == 0: - total_iters = self.params["total_iters"] - iters_per_epoch = self.params["iters_per_epoch"] - remaining_iters = total_iters - iter - eta = self._calculate_eta(remaining_iters, logs["batch_cost"]) - current_epoch = (iter - 1) // self.params["iters_per_epoch"] + 1 - loss = logs["loss"] - lr = self.optimizer.get_lr() - batch_cost = logs["batch_cost"] - reader_cost = logs["reader_cost"] - - logger.info( - "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.4f} | ETA {}" - .format(current_epoch, iter, total_iters, loss, lr, batch_cost, - reader_cost, eta)) - - -class ProgbarLogger(Callback): - def __init__(self): - super(ProgbarLogger, self).__init__() - - def on_train_begin(self, logs=None): - self.verbose = self.params["verbose"] - self.total_iters = self.params["total_iters"] - self.target = self.params["total_iters"] - self.progbar = Progbar(target=self.target, verbose=self.verbose) - self.seen = 0 - self.log_values = [] - - def on_iter_begin(self, iter, logs=None): - #self.seen = 0 - if self.seen < self.target: - self.log_values = [] - - def on_iter_end(self, iter, logs=None): - logs = logs or {} - self.seen += 1 - for k in self.params['metrics']: - if k in logs: - self.log_values.append((k, logs[k])) - - #if self.verbose and self.seen < self.target and ParallelEnv.local_rank == 0: - #print(self.log_values) - if self.seen < self.target: - self.progbar.update(self.seen, self.log_values) - - -class ModelCheckpoint(Callback): - def __init__(self, - save_dir, - monitor="miou", - save_best_only=False, - save_params_only=True, - mode="max", - period=1): - - super(ModelCheckpoint, self).__init__() - self.monitor = monitor - self.save_dir = save_dir - self.save_best_only = save_best_only - self.save_params_only = save_params_only - self.period = period - self.iters_since_last_save = 0 - - if mode == "min": - self.monitor_op = np.less - self.best = np.Inf - elif mode == "max": - self.monitor_op = np.greater - self.best = -np.Inf - else: - raise RuntimeError("`mode` is neither \"min\" nor \"max\"!") - - def on_train_begin(self, logs=None): - self.verbose = self.params["verbose"] - save_dir = self.save_dir - if not os.path.isdir(save_dir): - if os.path.exists(save_dir): - os.remove(save_dir) - os.makedirs(save_dir) - - def on_iter_end(self, iter, logs=None): - logs = logs or {} - self.iters_since_last_save += 1 - current_save_dir = os.path.join(self.save_dir, "iter_{}".format(iter)) - current_save_dir = os.path.abspath(current_save_dir) - #if self.iters_since_last_save % self.period and ParallelEnv().local_rank == 0: - #self.iters_since_last_save = 0 - if iter % self.period == 0 and ParallelEnv().local_rank == 0: - if self.verbose > 0: - print("iter {iter_num}: saving model to {path}".format( - iter_num=iter, path=current_save_dir)) - - paddle.save(self.model.state_dict(), - os.path.join(current_save_dir, 'model.pdparams')) - - if not self.save_params_only: - paddle.save(self.optimizer.state_dict(), - os.path.join(current_save_dir, 'model.pdopt')) - - -class VisualDL(Callback): - def __init__(self, log_dir="./log", freq=1): - super(VisualDL, self).__init__() - self.log_dir = log_dir - self.freq = freq - - def on_train_begin(self, logs=None): - self.writer = LogWriter(self.log_dir) - - def on_iter_end(self, iter, logs=None): - logs = logs or {} - if iter % self.freq == 0 and ParallelEnv().local_rank == 0: - for k, v in logs.items(): - self.writer.add_scalar("Train/{}".format(k), v, iter) - - self.writer.flush() - - def on_train_end(self, logs=None): - self.writer.close() diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/config.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/config.py deleted file mode 100644 index 5ab1d29872..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/cvlibs/config.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import codecs -import os -from typing import Any - -import paddle -import yaml - -from paddleseg.cvlibs import manager - - -class Config(object): - ''' - Training configuration parsing. The only yaml/yml file is supported. - - The following hyper-parameters are available in the config file: - batch_size: The number of samples per gpu. - iters: The total training steps. - train_dataset: A training data config including type/data_root/transforms/mode. - For data type, please refer to paddleseg.datasets. - For specific transforms, please refer to paddleseg.transforms.transforms. - val_dataset: A validation data config including type/data_root/transforms/mode. - optimizer: A optimizer config, but currently PaddleSeg only supports sgd with momentum in config file. - In addition, weight_decay could be set as a regularization. - learning_rate: A learning rate config. If decay is configured, learning _rate value is the starting learning rate, - where only poly decay is supported using the config file. In addition, decay power and end_lr are tuned experimentally. - loss: A loss config. Multi-loss config is available. The loss type order is consistent with the seg model outputs, - where the coef term indicates the weight of corresponding loss. Note that the number of coef must be the same as the number of - model outputs, and there could be only one loss type if using the same loss type among the outputs, otherwise the number of - loss type must be consistent with coef. - model: A model config including type/backbone and model-dependent arguments. - For model type, please refer to paddleseg.models. - For backbone, please refer to paddleseg.models.backbones. - - Args: - path (str) : The path of config file, supports yaml format only. - - Examples: - - from paddleseg.cvlibs.config import Config - - # Create a cfg object with yaml file path. - cfg = Config(yaml_cfg_path) - - # Parsing the argument when its property is used. - train_dataset = cfg.train_dataset - - # the argument of model should be parsed after dataset, - # since the model builder uses some properties in dataset. - model = cfg.model - ... - ''' - - def __init__(self, - path: str, - learning_rate: float = None, - batch_size: int = None, - iters: int = None): - if not path: - raise ValueError('Please specify the configuration file path.') - - if not os.path.exists(path): - raise FileNotFoundError('File {} does not exist'.format(path)) - - self._model = None - self._losses = None - if path.endswith('yml') or path.endswith('yaml'): - self.dic = self._parse_from_yaml(path) - else: - raise RuntimeError('Config file should in yaml format!') - - self.update( - learning_rate=learning_rate, batch_size=batch_size, iters=iters) - - def _update_dic(self, dic, base_dic): - """ - Update config from dic based base_dic - """ - base_dic = base_dic.copy() - for key, val in dic.items(): - if isinstance(val, dict) and key in base_dic: - base_dic[key] = self._update_dic(val, base_dic[key]) - else: - base_dic[key] = val - dic = base_dic - return dic - - def _parse_from_yaml(self, path: str): - '''Parse a yaml file and build config''' - with codecs.open(path, 'r', 'utf-8') as file: - dic = yaml.load(file, Loader=yaml.FullLoader) - - if '_base_' in dic: - cfg_dir = os.path.dirname(path) - base_path = dic.pop('_base_') - base_path = os.path.join(cfg_dir, base_path) - base_dic = self._parse_from_yaml(base_path) - dic = self._update_dic(dic, base_dic) - return dic - - def update(self, - learning_rate: float = None, - batch_size: int = None, - iters: int = None): - '''Update config''' - if learning_rate: - self.dic['learning_rate']['value'] = learning_rate - - if batch_size: - self.dic['batch_size'] = batch_size - - if iters: - self.dic['iters'] = iters - - @property - def batch_size(self) -> int: - return self.dic.get('batch_size', 1) - - @property - def iters(self) -> int: - iters = self.dic.get('iters') - if not iters: - raise RuntimeError('No iters specified in the configuration file.') - return iters - - @property - def learning_rate(self) -> paddle.optimizer.lr.LRScheduler: - _learning_rate = self.dic.get('learning_rate', {}).get('value') - if not _learning_rate: - raise RuntimeError( - 'No learning rate specified in the configuration file.') - - args = self.decay_args - decay_type = args.pop('type') - - if decay_type == 'poly': - lr = _learning_rate - return paddle.optimizer.lr.PolynomialDecay(lr, **args) - else: - raise RuntimeError('Only poly decay support.') - - @property - def optimizer(self) -> paddle.optimizer.Optimizer: - lr = self.learning_rate - args = self.optimizer_args - optimizer_type = args.pop('type') - - if optimizer_type == 'sgd': - return paddle.optimizer.Momentum( - lr, parameters=self.model.parameters(), **args) - elif optimizer_type == 'adam': - return paddle.optimizer.Adam( - lr, parameters=self.model.parameters(), **args) - else: - raise RuntimeError('Only sgd and adam optimizer support.') - - @property - def optimizer_args(self) -> dict: - args = self.dic.get('optimizer', {}).copy() - if args['type'] == 'sgd': - args.setdefault('momentum', 0.9) - - return args - - @property - def decay_args(self) -> dict: - args = self.dic.get('learning_rate', {}).get('decay', { - 'type': 'poly', - 'power': 0.9 - }).copy() - - if args['type'] == 'poly': - args.setdefault('decay_steps', self.iters) - args.setdefault('end_lr', 0) - - return args - - @property - def loss(self) -> dict: - args = self.dic.get('loss', {}).copy() - if 'types' in args and 'coef' in args: - len_types = len(args['types']) - len_coef = len(args['coef']) - if len_types != len_coef: - if len_types == 1: - args['types'] = args['types'] * len_coef - else: - raise ValueError( - 'The length of types should equal to coef or equal to 1 in loss config, but they are {} and {}.' - .format(len_types, len_coef)) - else: - raise ValueError( - 'Loss config should contain keys of "types" and "coef"') - - if not self._losses: - self._losses = dict() - for key, val in args.items(): - if key == 'types': - self._losses['types'] = [] - for item in args['types']: - item['ignore_index'] = self.train_dataset.ignore_index - self._losses['types'].append(self._load_object(item)) - else: - self._losses[key] = val - if len(self._losses['coef']) != len(self._losses['types']): - raise RuntimeError( - 'The length of coef should equal to types in loss config: {} != {}.' - .format( - len(self._losses['coef']), len(self._losses['types']))) - return self._losses - - @property - def model(self) -> paddle.nn.Layer: - model_cfg = self.dic.get('model').copy() - if not model_cfg: - raise RuntimeError('No model specified in the configuration file.') - if not 'num_classes' in model_cfg: - if self.train_dataset and hasattr(self.train_dataset, - 'num_classes'): - model_cfg['num_classes'] = self.train_dataset.num_classes - elif self.val_dataset and hasattr(self.val_dataset, 'num_classes'): - model_cfg['num_classes'] = self.val_dataset.num_classes - else: - raise ValueError( - '`num_classes` is not found. Please set it in model, train_dataset or val_dataset' - ) - - if not self._model: - self._model = self._load_object(model_cfg) - return self._model - - @property - def train_dataset(self) -> paddle.io.Dataset: - _train_dataset = self.dic.get('train_dataset', {}).copy() - if not _train_dataset: - return None - return self._load_object(_train_dataset) - - @property - def val_dataset(self) -> paddle.io.Dataset: - _val_dataset = self.dic.get('val_dataset', {}).copy() - if not _val_dataset: - return None - return self._load_object(_val_dataset) - - def _load_component(self, com_name: str) -> Any: - com_list = [ - manager.MODELS, manager.BACKBONES, manager.DATASETS, - manager.TRANSFORMS, manager.LOSSES - ] - - for com in com_list: - if com_name in com.components_dict: - return com[com_name] - else: - raise RuntimeError( - 'The specified component was not found {}.'.format(com_name)) - - def _load_object(self, cfg: dict) -> Any: - cfg = cfg.copy() - if 'type' not in cfg: - raise RuntimeError('No object information in {}.'.format(cfg)) - - component = self._load_component(cfg.pop('type')) - - params = {} - for key, val in cfg.items(): - if self._is_meta_type(val): - params[key] = self._load_object(val) - elif isinstance(val, list): - params[key] = [ - self._load_object(item) - if self._is_meta_type(item) else item for item in val - ] - else: - params[key] = val - - return component(**params) - - def _is_meta_type(self, item: Any) -> bool: - return isinstance(item, dict) and 'type' in item - - def __str__(self) -> str: - return yaml.dump(self.dic) diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/manager.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/manager.py deleted file mode 100644 index cd1d105a5e..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/cvlibs/manager.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from collections.abc import Sequence - - -class ComponentManager: - """ - Implement a manager class to add the new component properly. - The component can be added as either class or function type. - - Args: - name (str): The name of component. - - Returns: - A callable object of ComponentManager. - - Examples 1: - - from paddleseg.cvlibs.manager import ComponentManager - - model_manager = ComponentManager() - - class AlexNet: ... - class ResNet: ... - - model_manager.add_component(AlexNet) - model_manager.add_component(ResNet) - - # Or pass a sequence alliteratively: - model_manager.add_component([AlexNet, ResNet]) - print(model_manager.components_dict) - # {'AlexNet': , 'ResNet': } - - Examples 2: - - # Or an easier way, using it as a Python decorator, while just add it above the class declaration. - from paddleseg.cvlibs.manager import ComponentManager - - model_manager = ComponentManager() - - @model_manager.add_component - class AlexNet: ... - - @model_manager.add_component - class ResNet: ... - - print(model_manager.components_dict) - # {'AlexNet': , 'ResNet': } - """ - - def __init__(self, name=None): - self._components_dict = dict() - self._name = name - - def __len__(self): - return len(self._components_dict) - - def __repr__(self): - name_str = self._name if self._name else self.__class__.__name__ - return "{}:{}".format(name_str, list(self._components_dict.keys())) - - def __getitem__(self, item): - if item not in self._components_dict.keys(): - raise KeyError("{} does not exist in availabel {}".format( - item, self)) - return self._components_dict[item] - - @property - def components_dict(self): - return self._components_dict - - @property - def name(self): - return self._name - - def _add_single_component(self, component): - """ - Add a single component into the corresponding manager. - - Args: - component (function|class): A new component. - - Raises: - TypeError: When `component` is neither class nor function. - KeyError: When `component` was added already. - """ - - # Currently only support class or function type - if not (inspect.isclass(component) or inspect.isfunction(component)): - raise TypeError( - "Expect class/function type, but received {}".format( - type(component))) - - # Obtain the internal name of the component - component_name = component.__name__ - - # Check whether the component was added already - if component_name in self._components_dict.keys(): - raise KeyError("{} exists already!".format(component_name)) - else: - # Take the internal name of the component as its key - self._components_dict[component_name] = component - - def add_component(self, components): - """ - Add component(s) into the corresponding manager. - - Args: - components (function|class|list|tuple): Support four types of components. - - Returns: - components (function|class|list|tuple): Same with input components. - """ - - # Check whether the type is a sequence - if isinstance(components, Sequence): - for component in components: - self._add_single_component(component) - else: - component = components - self._add_single_component(component) - - return components - - -MODELS = ComponentManager("models") -BACKBONES = ComponentManager("backbones") -DATASETS = ComponentManager("datasets") -TRANSFORMS = ComponentManager("transforms") -LOSSES = ComponentManager("losses") diff --git a/contrib/PanopticDeepLab/paddleseg/cvlibs/param_init.py b/contrib/PanopticDeepLab/paddleseg/cvlibs/param_init.py deleted file mode 100644 index 335281242e..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/cvlibs/param_init.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.nn as nn - - -def constant_init(param, **kwargs): - """ - Initialize the `param` with constants. - - Args: - param (Tensor): Tensor that needs to be initialized. - - Examples: - - from paddleseg.cvlibs import param_init - import paddle.nn as nn - - linear = nn.Linear(2, 4) - param_init.constant_init(linear.weight, value=2.0) - print(linear.weight.numpy()) - # result is [[2. 2. 2. 2.], [2. 2. 2. 2.]] - - """ - initializer = nn.initializer.Constant(**kwargs) - initializer(param, param.block) - - -def normal_init(param, **kwargs): - """ - Initialize the `param` with a Normal distribution. - - Args: - param (Tensor): Tensor that needs to be initialized. - - Examples: - - from paddleseg.cvlibs import param_init - import paddle.nn as nn - - linear = nn.Linear(2, 4) - param_init.normal_init(linear.weight, loc=0.0, scale=1.0) - - """ - initializer = nn.initializer.Normal(**kwargs) - initializer(param, param.block) - - -def kaiming_normal_init(param, **kwargs): - """ - Initialize the input tensor with Kaiming Normal initialization. - - This function implements the `param` initialization from the paper - `Delving Deep into Rectifiers: Surpassing Human-Level Performance on - ImageNet Classification ` - by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a - robust initialization method that particularly considers the rectifier - nonlinearities. In case of Uniform distribution, the range is [-x, x], where - .. math:: - x = \sqrt{\\frac{6.0}{fan\_in}} - In case of Normal distribution, the mean is 0 and the standard deviation - is - .. math:: - \sqrt{\\frac{2.0}{fan\_in}} - - Args: - param (Tensor): Tensor that needs to be initialized. - - Examples: - - from paddleseg.cvlibs import param_init - import paddle.nn as nn - - linear = nn.Linear(2, 4) - # uniform is used to decide whether to use uniform or normal distribution - param_init.kaiming_normal_init(linear.weight) - - """ - initializer = nn.initializer.KaimingNormal(**kwargs) - initializer(param, param.block) diff --git a/contrib/PanopticDeepLab/paddleseg/datasets/__init__.py b/contrib/PanopticDeepLab/paddleseg/datasets/__init__.py deleted file mode 100644 index fefa6a07ea..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/datasets/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .cityscapes_panoptic import CityscapesPanoptic diff --git a/contrib/PanopticDeepLab/paddleseg/datasets/cityscapes_panoptic.py b/contrib/PanopticDeepLab/paddleseg/datasets/cityscapes_panoptic.py deleted file mode 100644 index a3789e4c2d..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/datasets/cityscapes_panoptic.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json -import os -import glob - -import numpy as np -import paddle -from paddleseg.cvlibs import manager -from paddleseg.transforms import Compose, PanopticTargetGenerator, SemanticTargetGenerator, InstanceTargetGenerator, RawPanopticTargetGenerator -import PIL.Image as Image - - -@manager.DATASETS.add_component -class CityscapesPanoptic(paddle.io.Dataset): - """ - Cityscapes dataset `https://www.cityscapes-dataset.com/`. - The folder structure is as follow: - - cityscapes/ - |--gtFine/ - | |--train/ - | | |--aachen/ - | | | |--*_color.png, *_instanceIds.png, *_labelIds.png, *_polygons.json, - | | | |--*_labelTrainIds.png - | | | |--... - | |--val/ - | |--test/ - | |--cityscapes_panoptic_train_trainId.json - | |--cityscapes_panoptic_train_trainId/ - | | |-- *_panoptic.png - | |--cityscapes_panoptic_val_trainId.json - | |--cityscapes_panoptic_val_trainId/ - | | |-- *_panoptic.png - |--leftImg8bit/ - | |--train/ - | |--val/ - | |--test/ - - Args: - transforms (list): Transforms for image. - dataset_root (str): Cityscapes dataset directory. - mode (str, optional): Which part of dataset to use. it is one of ('train', 'val'). Default: 'train'. - ignore_stuff_in_offset (bool, optional): Whether to ignore stuff region when training the offset branch. Default: False. - small_instance_area (int, optional): Instance which area less than given value is considered small. Default: 0. - small_instance_weight (int, optional): The loss weight for small instance. Default: 1. - stuff_area (int, optional): An Integer, remove stuff whose area is less tan stuff_area. Default: 2048. - """ - - def __init__(self, - transforms, - dataset_root, - mode='train', - ignore_stuff_in_offset=False, - small_instance_area=0, - small_instance_weight=1, - stuff_area=2048): - self.dataset_root = dataset_root - self.transforms = Compose(transforms) - self.file_list = list() - self.ins_list = [] - mode = mode.lower() - self.mode = mode - self.num_classes = 19 - self.ignore_index = 255 - self.thing_list = [11, 12, 13, 14, 15, 16, 17, 18] - self.label_divisor = 1000 - self.stuff_area = stuff_area - - if mode not in ['train', 'val']: - raise ValueError( - "mode should be 'train' or 'val' , but got {}.".format(mode)) - - if self.transforms is None: - raise ValueError("`transforms` is necessary, but it is None.") - - img_dir = os.path.join(self.dataset_root, 'leftImg8bit') - label_dir = os.path.join(self.dataset_root, 'gtFine') - if self.dataset_root is None or not os.path.isdir( - self.dataset_root) or not os.path.isdir( - img_dir) or not os.path.isdir(label_dir): - raise ValueError( - "The dataset is not Found or the folder structure is nonconfoumance." - ) - json_filename = os.path.join( - self.dataset_root, 'gtFine', - 'cityscapes_panoptic_{}_trainId.json'.format(mode)) - dataset = json.load(open(json_filename)) - img_files = [] - label_files = [] - for img in dataset['images']: - img_file_name = img['file_name'] - img_files.append( - os.path.join(self.dataset_root, 'leftImg8bit', mode, - img_file_name.split('_')[0], - img_file_name.replace('_gtFine', ''))) - for ann in dataset['annotations']: - ann_file_name = ann['file_name'] - label_files.append( - os.path.join(self.dataset_root, 'gtFine', - 'cityscapes_panoptic_{}_trainId'.format(mode), - ann_file_name)) - self.ins_list.append(ann['segments_info']) - - self.file_list = [[ - img_path, label_path - ] for img_path, label_path in zip(img_files, label_files)] - - self.target_transform = PanopticTargetGenerator( - self.ignore_index, - self.rgb2id, - self.thing_list, - sigma=8, - ignore_stuff_in_offset=ignore_stuff_in_offset, - small_instance_area=small_instance_area, - small_instance_weight=small_instance_weight) - - self.raw_semantic_generator = SemanticTargetGenerator( - ignore_index=self.ignore_index, rgb2id=self.rgb2id) - self.raw_instance_generator = InstanceTargetGenerator(self.rgb2id) - self.raw_panoptic_generator = RawPanopticTargetGenerator( - ignore_index=self.ignore_index, - rgb2id=self.rgb2id, - label_divisor=self.label_divisor) - - @staticmethod - def rgb2id(color): - """Converts the color to panoptic label. - Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. - Args: - color: Ndarray or a tuple, color encoded image. - Returns: - Panoptic label. - """ - if isinstance(color, np.ndarray) and len(color.shape) == 3: - if color.dtype == np.uint8: - color = color.astype(np.int32) - return color[:, :, - 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] - return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) - - def __getitem__(self, idx): - image_path, label_path = self.file_list[idx] - dataset_dict = {} - im, label = self.transforms(im=image_path, label=label_path) - label_dict = self.target_transform(label, self.ins_list[idx]) - for key in label_dict.keys(): - dataset_dict[key] = label_dict[key] - dataset_dict['image'] = im - if self.mode == 'val': - raw_label = np.asarray(Image.open(label_path)) - dataset_dict['raw_semantic_label'] = self.raw_semantic_generator( - raw_label, self.ins_list[idx])['semantic'] - dataset_dict['raw_instance_label'] = self.raw_instance_generator( - raw_label)['instance'] - dataset_dict['raw_panoptic_label'] = self.raw_panoptic_generator( - raw_label, self.ins_list[idx])['panoptic'] - - image = np.array(dataset_dict['image']) - semantic = np.array(dataset_dict['semantic']) - semantic_weights = np.array(dataset_dict['semantic_weights']) - center = np.array(dataset_dict['center']) - center_weights = np.array(dataset_dict['center_weights']) - offset = np.array(dataset_dict['offset']) - offset_weights = np.array(dataset_dict['offset_weights']) - foreground = np.array(dataset_dict['foreground']) - if self.mode == 'train': - return image, semantic, semantic_weights, center, center_weights, offset, offset_weights, foreground - elif self.mode == 'val': - raw_semantic_label = np.array(dataset_dict['raw_semantic_label']) - raw_instance_label = np.array(dataset_dict['raw_instance_label']) - raw_panoptic_label = np.array(dataset_dict['raw_panoptic_label']) - return image, raw_semantic_label, raw_instance_label, raw_panoptic_label - else: - raise ValueError( - '{} is not surpported, please set it one of ("train", "val")'. - format(self.mode)) - - def __len__(self): - return len(self.file_list) diff --git a/contrib/PanopticDeepLab/paddleseg/models/__init__.py b/contrib/PanopticDeepLab/paddleseg/models/__init__.py deleted file mode 100644 index 37e457c547..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .backbones import * -from .losses import * - -from .panoptic_deeplab import PanopticDeepLab diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/__init__.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/__init__.py deleted file mode 100644 index 8bc32c14b4..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/backbones/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .hrnet import * -from .resnet_vd import * -from .xception_deeplab import * -from .mobilenetv3 import * diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/hrnet.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/hrnet.py deleted file mode 100644 index 40ed660d9d..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/backbones/hrnet.py +++ /dev/null @@ -1,820 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager, param_init -from paddleseg.models import layers -from paddleseg.utils import utils - -__all__ = [ - "HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30", - "HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60", "HRNet_W64" -] - - -class HRNet(nn.Layer): - """ - The HRNet implementation based on PaddlePaddle. - - The original article refers to - Jingdong Wang, et, al. "HRNet:Deep High-Resolution Representation Learning for Visual Recognition" - (https://arxiv.org/pdf/1908.07919.pdf). - - Args: - pretrained (str, optional): The path of pretrained model. - stage1_num_modules (int, optional): Number of modules for stage1. Default 1. - stage1_num_blocks (list, optional): Number of blocks per module for stage1. Default (4). - stage1_num_channels (list, optional): Number of channels per branch for stage1. Default (64). - stage2_num_modules (int, optional): Number of modules for stage2. Default 1. - stage2_num_blocks (list, optional): Number of blocks per module for stage2. Default (4, 4). - stage2_num_channels (list, optional): Number of channels per branch for stage2. Default (18, 36). - stage3_num_modules (int, optional): Number of modules for stage3. Default 4. - stage3_num_blocks (list, optional): Number of blocks per module for stage3. Default (4, 4, 4). - stage3_num_channels (list, optional): Number of channels per branch for stage3. Default [18, 36, 72). - stage4_num_modules (int, optional): Number of modules for stage4. Default 3. - stage4_num_blocks (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4). - stage4_num_channels (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144). - has_se (bool, optional): Whether to use Squeeze-and-Excitation module. Default False. - align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, - e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. - """ - - def __init__(self, - pretrained=None, - stage1_num_modules=1, - stage1_num_blocks=(4, ), - stage1_num_channels=(64, ), - stage2_num_modules=1, - stage2_num_blocks=(4, 4), - stage2_num_channels=(18, 36), - stage3_num_modules=4, - stage3_num_blocks=(4, 4, 4), - stage3_num_channels=(18, 36, 72), - stage4_num_modules=3, - stage4_num_blocks=(4, 4, 4, 4), - stage4_num_channels=(18, 36, 72, 144), - has_se=False, - align_corners=False): - super(HRNet, self).__init__() - self.pretrained = pretrained - self.stage1_num_modules = stage1_num_modules - self.stage1_num_blocks = stage1_num_blocks - self.stage1_num_channels = stage1_num_channels - self.stage2_num_modules = stage2_num_modules - self.stage2_num_blocks = stage2_num_blocks - self.stage2_num_channels = stage2_num_channels - self.stage3_num_modules = stage3_num_modules - self.stage3_num_blocks = stage3_num_blocks - self.stage3_num_channels = stage3_num_channels - self.stage4_num_modules = stage4_num_modules - self.stage4_num_blocks = stage4_num_blocks - self.stage4_num_channels = stage4_num_channels - self.has_se = has_se - self.align_corners = align_corners - self.feat_channels = [sum(stage4_num_channels)] - - self.conv_layer1_1 = layers.ConvBNReLU( - in_channels=3, - out_channels=64, - kernel_size=3, - stride=2, - padding='same', - bias_attr=False) - - self.conv_layer1_2 = layers.ConvBNReLU( - in_channels=64, - out_channels=64, - kernel_size=3, - stride=2, - padding='same', - bias_attr=False) - - self.la1 = Layer1( - num_channels=64, - num_blocks=self.stage1_num_blocks[0], - num_filters=self.stage1_num_channels[0], - has_se=has_se, - name="layer2") - - self.tr1 = TransitionLayer( - in_channels=[self.stage1_num_channels[0] * 4], - out_channels=self.stage2_num_channels, - name="tr1") - - self.st2 = Stage( - num_channels=self.stage2_num_channels, - num_modules=self.stage2_num_modules, - num_blocks=self.stage2_num_blocks, - num_filters=self.stage2_num_channels, - has_se=self.has_se, - name="st2", - align_corners=align_corners) - - self.tr2 = TransitionLayer( - in_channels=self.stage2_num_channels, - out_channels=self.stage3_num_channels, - name="tr2") - self.st3 = Stage( - num_channels=self.stage3_num_channels, - num_modules=self.stage3_num_modules, - num_blocks=self.stage3_num_blocks, - num_filters=self.stage3_num_channels, - has_se=self.has_se, - name="st3", - align_corners=align_corners) - - self.tr3 = TransitionLayer( - in_channels=self.stage3_num_channels, - out_channels=self.stage4_num_channels, - name="tr3") - self.st4 = Stage( - num_channels=self.stage4_num_channels, - num_modules=self.stage4_num_modules, - num_blocks=self.stage4_num_blocks, - num_filters=self.stage4_num_channels, - has_se=self.has_se, - name="st4", - align_corners=align_corners) - self.init_weight() - - def forward(self, x): - conv1 = self.conv_layer1_1(x) - conv2 = self.conv_layer1_2(conv1) - - la1 = self.la1(conv2) - - tr1 = self.tr1([la1]) - st2 = self.st2(tr1) - - tr2 = self.tr2(st2) - st3 = self.st3(tr2) - - tr3 = self.tr3(st3) - st4 = self.st4(tr3) - - x0_h, x0_w = st4[0].shape[2:] - x1 = F.interpolate( - st4[1], (x0_h, x0_w), - mode='bilinear', - align_corners=self.align_corners) - x2 = F.interpolate( - st4[2], (x0_h, x0_w), - mode='bilinear', - align_corners=self.align_corners) - x3 = F.interpolate( - st4[3], (x0_h, x0_w), - mode='bilinear', - align_corners=self.align_corners) - x = paddle.concat([st4[0], x1, x2, x3], axis=1) - - return [x] - - def init_weight(self): - for layer in self.sublayers(): - if isinstance(layer, nn.Conv2D): - param_init.normal_init(layer.weight, std=0.001) - elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)): - param_init.constant_init(layer.weight, value=1.0) - param_init.constant_init(layer.bias, value=0.0) - if self.pretrained is not None: - utils.load_pretrained_model(self, self.pretrained) - - -class Layer1(nn.Layer): - def __init__(self, - num_channels, - num_filters, - num_blocks, - has_se=False, - name=None): - super(Layer1, self).__init__() - - self.bottleneck_block_list = [] - - for i in range(num_blocks): - bottleneck_block = self.add_sublayer( - "bb_{}_{}".format(name, i + 1), - BottleneckBlock( - num_channels=num_channels if i == 0 else num_filters * 4, - num_filters=num_filters, - has_se=has_se, - stride=1, - downsample=True if i == 0 else False, - name=name + '_' + str(i + 1))) - self.bottleneck_block_list.append(bottleneck_block) - - def forward(self, x): - conv = x - for block_func in self.bottleneck_block_list: - conv = block_func(conv) - return conv - - -class TransitionLayer(nn.Layer): - def __init__(self, in_channels, out_channels, name=None): - super(TransitionLayer, self).__init__() - - num_in = len(in_channels) - num_out = len(out_channels) - self.conv_bn_func_list = [] - for i in range(num_out): - residual = None - if i < num_in: - if in_channels[i] != out_channels[i]: - residual = self.add_sublayer( - "transition_{}_layer_{}".format(name, i + 1), - layers.ConvBNReLU( - in_channels=in_channels[i], - out_channels=out_channels[i], - kernel_size=3, - padding='same', - bias_attr=False)) - else: - residual = self.add_sublayer( - "transition_{}_layer_{}".format(name, i + 1), - layers.ConvBNReLU( - in_channels=in_channels[-1], - out_channels=out_channels[i], - kernel_size=3, - stride=2, - padding='same', - bias_attr=False)) - self.conv_bn_func_list.append(residual) - - def forward(self, x): - outs = [] - for idx, conv_bn_func in enumerate(self.conv_bn_func_list): - if conv_bn_func is None: - outs.append(x[idx]) - else: - if idx < len(x): - outs.append(conv_bn_func(x[idx])) - else: - outs.append(conv_bn_func(x[-1])) - return outs - - -class Branches(nn.Layer): - def __init__(self, - num_blocks, - in_channels, - out_channels, - has_se=False, - name=None): - super(Branches, self).__init__() - - self.basic_block_list = [] - - for i in range(len(out_channels)): - self.basic_block_list.append([]) - for j in range(num_blocks[i]): - in_ch = in_channels[i] if j == 0 else out_channels[i] - basic_block_func = self.add_sublayer( - "bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1), - BasicBlock( - num_channels=in_ch, - num_filters=out_channels[i], - has_se=has_se, - name=name + '_branch_layer_' + str(i + 1) + '_' + - str(j + 1))) - self.basic_block_list[i].append(basic_block_func) - - def forward(self, x): - outs = [] - for idx, input in enumerate(x): - conv = input - for basic_block_func in self.basic_block_list[idx]: - conv = basic_block_func(conv) - outs.append(conv) - return outs - - -class BottleneckBlock(nn.Layer): - def __init__(self, - num_channels, - num_filters, - has_se, - stride=1, - downsample=False, - name=None): - super(BottleneckBlock, self).__init__() - - self.has_se = has_se - self.downsample = downsample - - self.conv1 = layers.ConvBNReLU( - in_channels=num_channels, - out_channels=num_filters, - kernel_size=1, - padding='same', - bias_attr=False) - - self.conv2 = layers.ConvBNReLU( - in_channels=num_filters, - out_channels=num_filters, - kernel_size=3, - stride=stride, - padding='same', - bias_attr=False) - - self.conv3 = layers.ConvBN( - in_channels=num_filters, - out_channels=num_filters * 4, - kernel_size=1, - padding='same', - bias_attr=False) - - if self.downsample: - self.conv_down = layers.ConvBN( - in_channels=num_channels, - out_channels=num_filters * 4, - kernel_size=1, - padding='same', - bias_attr=False) - - if self.has_se: - self.se = SELayer( - num_channels=num_filters * 4, - num_filters=num_filters * 4, - reduction_ratio=16, - name=name + '_fc') - - def forward(self, x): - residual = x - conv1 = self.conv1(x) - conv2 = self.conv2(conv1) - conv3 = self.conv3(conv2) - - if self.downsample: - residual = self.conv_down(x) - - if self.has_se: - conv3 = self.se(conv3) - - y = conv3 + residual - y = F.relu(y) - return y - - -class BasicBlock(nn.Layer): - def __init__(self, - num_channels, - num_filters, - stride=1, - has_se=False, - downsample=False, - name=None): - super(BasicBlock, self).__init__() - - self.has_se = has_se - self.downsample = downsample - - self.conv1 = layers.ConvBNReLU( - in_channels=num_channels, - out_channels=num_filters, - kernel_size=3, - stride=stride, - padding='same', - bias_attr=False) - self.conv2 = layers.ConvBN( - in_channels=num_filters, - out_channels=num_filters, - kernel_size=3, - padding='same', - bias_attr=False) - - if self.downsample: - self.conv_down = layers.ConvBNReLU( - in_channels=num_channels, - out_channels=num_filters, - kernel_size=1, - padding='same', - bias_attr=False) - - if self.has_se: - self.se = SELayer( - num_channels=num_filters, - num_filters=num_filters, - reduction_ratio=16, - name=name + '_fc') - - def forward(self, x): - residual = x - conv1 = self.conv1(x) - conv2 = self.conv2(conv1) - - if self.downsample: - residual = self.conv_down(x) - - if self.has_se: - conv2 = self.se(conv2) - - y = conv2 + residual - y = F.relu(y) - return y - - -class SELayer(nn.Layer): - def __init__(self, num_channels, num_filters, reduction_ratio, name=None): - super(SELayer, self).__init__() - - self.pool2d_gap = nn.AdaptiveAvgPool2D(1) - - self._num_channels = num_channels - - med_ch = int(num_channels / reduction_ratio) - stdv = 1.0 / math.sqrt(num_channels * 1.0) - self.squeeze = nn.Linear( - num_channels, - med_ch, - weight_attr=paddle.ParamAttr( - initializer=nn.initializer.Uniform(-stdv, stdv))) - - stdv = 1.0 / math.sqrt(med_ch * 1.0) - self.excitation = nn.Linear( - med_ch, - num_filters, - weight_attr=paddle.ParamAttr( - initializer=nn.initializer.Uniform(-stdv, stdv))) - - def forward(self, x): - pool = self.pool2d_gap(x) - pool = paddle.reshape(pool, shape=[-1, self._num_channels]) - squeeze = self.squeeze(pool) - squeeze = F.relu(squeeze) - excitation = self.excitation(squeeze) - excitation = F.sigmoid(excitation) - excitation = paddle.reshape( - excitation, shape=[-1, self._num_channels, 1, 1]) - out = x * excitation - return out - - -class Stage(nn.Layer): - def __init__(self, - num_channels, - num_modules, - num_blocks, - num_filters, - has_se=False, - multi_scale_output=True, - name=None, - align_corners=False): - super(Stage, self).__init__() - - self._num_modules = num_modules - - self.stage_func_list = [] - for i in range(num_modules): - if i == num_modules - 1 and not multi_scale_output: - stage_func = self.add_sublayer( - "stage_{}_{}".format(name, i + 1), - HighResolutionModule( - num_channels=num_channels, - num_blocks=num_blocks, - num_filters=num_filters, - has_se=has_se, - multi_scale_output=False, - name=name + '_' + str(i + 1), - align_corners=align_corners)) - else: - stage_func = self.add_sublayer( - "stage_{}_{}".format(name, i + 1), - HighResolutionModule( - num_channels=num_channels, - num_blocks=num_blocks, - num_filters=num_filters, - has_se=has_se, - name=name + '_' + str(i + 1), - align_corners=align_corners)) - - self.stage_func_list.append(stage_func) - - def forward(self, x): - out = x - for idx in range(self._num_modules): - out = self.stage_func_list[idx](out) - return out - - -class HighResolutionModule(nn.Layer): - def __init__(self, - num_channels, - num_blocks, - num_filters, - has_se=False, - multi_scale_output=True, - name=None, - align_corners=False): - super(HighResolutionModule, self).__init__() - - self.branches_func = Branches( - num_blocks=num_blocks, - in_channels=num_channels, - out_channels=num_filters, - has_se=has_se, - name=name) - - self.fuse_func = FuseLayers( - in_channels=num_filters, - out_channels=num_filters, - multi_scale_output=multi_scale_output, - name=name, - align_corners=align_corners) - - def forward(self, x): - out = self.branches_func(x) - out = self.fuse_func(out) - return out - - -class FuseLayers(nn.Layer): - def __init__(self, - in_channels, - out_channels, - multi_scale_output=True, - name=None, - align_corners=False): - super(FuseLayers, self).__init__() - - self._actual_ch = len(in_channels) if multi_scale_output else 1 - self._in_channels = in_channels - self.align_corners = align_corners - - self.residual_func_list = [] - for i in range(self._actual_ch): - for j in range(len(in_channels)): - if j > i: - residual_func = self.add_sublayer( - "residual_{}_layer_{}_{}".format(name, i + 1, j + 1), - layers.ConvBN( - in_channels=in_channels[j], - out_channels=out_channels[i], - kernel_size=1, - padding='same', - bias_attr=False)) - self.residual_func_list.append(residual_func) - elif j < i: - pre_num_filters = in_channels[j] - for k in range(i - j): - if k == i - j - 1: - residual_func = self.add_sublayer( - "residual_{}_layer_{}_{}_{}".format( - name, i + 1, j + 1, k + 1), - layers.ConvBN( - in_channels=pre_num_filters, - out_channels=out_channels[i], - kernel_size=3, - stride=2, - padding='same', - bias_attr=False)) - pre_num_filters = out_channels[i] - else: - residual_func = self.add_sublayer( - "residual_{}_layer_{}_{}_{}".format( - name, i + 1, j + 1, k + 1), - layers.ConvBNReLU( - in_channels=pre_num_filters, - out_channels=out_channels[j], - kernel_size=3, - stride=2, - padding='same', - bias_attr=False)) - pre_num_filters = out_channels[j] - self.residual_func_list.append(residual_func) - - def forward(self, x): - outs = [] - residual_func_idx = 0 - for i in range(self._actual_ch): - residual = x[i] - residual_shape = residual.shape[-2:] - for j in range(len(self._in_channels)): - if j > i: - y = self.residual_func_list[residual_func_idx](x[j]) - residual_func_idx += 1 - - y = F.interpolate( - y, - residual_shape, - mode='bilinear', - align_corners=self.align_corners) - residual = residual + y - elif j < i: - y = x[j] - for k in range(i - j): - y = self.residual_func_list[residual_func_idx](y) - residual_func_idx += 1 - - residual = residual + y - - residual = F.relu(residual) - outs.append(residual) - - return outs - - -@manager.BACKBONES.add_component -def HRNet_W18_Small_V1(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[1], - stage1_num_channels=[32], - stage2_num_modules=1, - stage2_num_blocks=[2, 2], - stage2_num_channels=[16, 32], - stage3_num_modules=1, - stage3_num_blocks=[2, 2, 2], - stage3_num_channels=[16, 32, 64], - stage4_num_modules=1, - stage4_num_blocks=[2, 2, 2, 2], - stage4_num_channels=[16, 32, 64, 128], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W18_Small_V2(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[2], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[2, 2], - stage2_num_channels=[18, 36], - stage3_num_modules=3, - stage3_num_blocks=[2, 2, 2], - stage3_num_channels=[18, 36, 72], - stage4_num_modules=2, - stage4_num_blocks=[2, 2, 2, 2], - stage4_num_channels=[18, 36, 72, 144], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W18(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[18, 36], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[18, 36, 72], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[18, 36, 72, 144], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W30(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[30, 60], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[30, 60, 120], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[30, 60, 120, 240], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W32(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[32, 64], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[32, 64, 128], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[32, 64, 128, 256], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W40(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[40, 80], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[40, 80, 160], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[40, 80, 160, 320], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W44(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[44, 88], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[44, 88, 176], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[44, 88, 176, 352], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W48(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[48, 96], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[48, 96, 192], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[48, 96, 192, 384], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W60(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[60, 120], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[60, 120, 240], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[60, 120, 240, 480], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W64(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[64, 128], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[64, 128, 256], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[64, 128, 256, 512], - **kwargs) - return model diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/mobilenetv3.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/mobilenetv3.py deleted file mode 100644 index 7a83f04c00..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/backbones/mobilenetv3.py +++ /dev/null @@ -1,364 +0,0 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager -from paddleseg.utils import utils -from paddleseg.models import layers - -__all__ = [ - "MobileNetV3_small_x0_35", "MobileNetV3_small_x0_5", - "MobileNetV3_small_x0_75", "MobileNetV3_small_x1_0", - "MobileNetV3_small_x1_25", "MobileNetV3_large_x0_35", - "MobileNetV3_large_x0_5", "MobileNetV3_large_x0_75", - "MobileNetV3_large_x1_0", "MobileNetV3_large_x1_25" -] - - -def make_divisible(v, divisor=8, min_value=None): - if min_value is None: - min_value = divisor - new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) - if new_v < 0.9 * v: - new_v += divisor - return new_v - - -class MobileNetV3(nn.Layer): - """ - The MobileNetV3 implementation based on PaddlePaddle. - - The original article refers to Jingdong - Andrew Howard, et, al. "Searching for MobileNetV3" - (https://arxiv.org/pdf/1905.02244.pdf). - - Args: - pretrained (str, optional): The path of pretrained model. - scale (float, optional): The scale of channels . Default: 1.0. - model_name (str, optional): Model name. It determines the type of MobileNetV3. The value is 'small' or 'large'. Defualt: 'small'. - output_stride (int, optional): The stride of output features compared to input images. The value should be one of (2, 4, 8, 16, 32). Default: None. - - """ - - def __init__(self, - pretrained=None, - scale=1.0, - model_name="small", - output_stride=None): - super(MobileNetV3, self).__init__() - - inplanes = 16 - if model_name == "large": - self.cfg = [ - # k, exp, c, se, nl, s, - [3, 16, 16, False, "relu", 1], - [3, 64, 24, False, "relu", 2], - [3, 72, 24, False, "relu", 1], # output 1 -> out_index=2 - [5, 72, 40, True, "relu", 2], - [5, 120, 40, True, "relu", 1], - [5, 120, 40, True, "relu", 1], # output 2 -> out_index=5 - [3, 240, 80, False, "hard_swish", 2], - [3, 200, 80, False, "hard_swish", 1], - [3, 184, 80, False, "hard_swish", 1], - [3, 184, 80, False, "hard_swish", 1], - [3, 480, 112, True, "hard_swish", 1], - [3, 672, 112, True, "hard_swish", - 1], # output 3 -> out_index=11 - [5, 672, 160, True, "hard_swish", 2], - [5, 960, 160, True, "hard_swish", 1], - [5, 960, 160, True, "hard_swish", - 1], # output 3 -> out_index=14 - ] - self.out_indices = [2, 5, 11, 14] - self.feat_channels = [ - make_divisible(i * scale) for i in [24, 40, 112, 160] - ] - - self.cls_ch_squeeze = 960 - self.cls_ch_expand = 1280 - elif model_name == "small": - self.cfg = [ - # k, exp, c, se, nl, s, - [3, 16, 16, True, "relu", 2], # output 1 -> out_index=0 - [3, 72, 24, False, "relu", 2], - [3, 88, 24, False, "relu", 1], # output 2 -> out_index=3 - [5, 96, 40, True, "hard_swish", 2], - [5, 240, 40, True, "hard_swish", 1], - [5, 240, 40, True, "hard_swish", 1], - [5, 120, 48, True, "hard_swish", 1], - [5, 144, 48, True, "hard_swish", 1], # output 3 -> out_index=7 - [5, 288, 96, True, "hard_swish", 2], - [5, 576, 96, True, "hard_swish", 1], - [5, 576, 96, True, "hard_swish", 1], # output 4 -> out_index=10 - ] - self.out_indices = [0, 3, 7, 10] - self.feat_channels = [ - make_divisible(i * scale) for i in [16, 24, 48, 96] - ] - - self.cls_ch_squeeze = 576 - self.cls_ch_expand = 1280 - else: - raise NotImplementedError( - "mode[{}_model] is not implemented!".format(model_name)) - - ################################################### - # modify stride and dilation based on output_stride - self.dilation_cfg = [1] * len(self.cfg) - self.modify_bottle_params(output_stride=output_stride) - ################################################### - - self.conv1 = ConvBNLayer( - in_c=3, - out_c=make_divisible(inplanes * scale), - filter_size=3, - stride=2, - padding=1, - num_groups=1, - if_act=True, - act="hard_swish") - - self.block_list = [] - - inplanes = make_divisible(inplanes * scale) - for i, (k, exp, c, se, nl, s) in enumerate(self.cfg): - ###################################### - # add dilation rate - dilation_rate = self.dilation_cfg[i] - ###################################### - self.block_list.append( - ResidualUnit( - in_c=inplanes, - mid_c=make_divisible(scale * exp), - out_c=make_divisible(scale * c), - filter_size=k, - stride=s, - dilation=dilation_rate, - use_se=se, - act=nl, - name="conv" + str(i + 2))) - self.add_sublayer( - sublayer=self.block_list[-1], name="conv" + str(i + 2)) - inplanes = make_divisible(scale * c) - - self.pretrained = pretrained - self.init_weight() - - def modify_bottle_params(self, output_stride=None): - - if output_stride is not None and output_stride % 2 != 0: - raise ValueError("output stride must to be even number") - if output_stride is not None: - stride = 2 - rate = 1 - for i, _cfg in enumerate(self.cfg): - stride = stride * _cfg[-1] - if stride > output_stride: - rate = rate * _cfg[-1] - self.cfg[i][-1] = 1 - - self.dilation_cfg[i] = rate - - def forward(self, inputs, label=None): - x = self.conv1(inputs) - # A feature list saves each downsampling feature. - feat_list = [] - for i, block in enumerate(self.block_list): - x = block(x) - if i in self.out_indices: - feat_list.append(x) - - return feat_list - - def init_weight(self): - if self.pretrained is not None: - utils.load_pretrained_model(self, self.pretrained) - - -class ConvBNLayer(nn.Layer): - def __init__(self, - in_c, - out_c, - filter_size, - stride, - padding, - dilation=1, - num_groups=1, - if_act=True, - act=None): - super(ConvBNLayer, self).__init__() - self.if_act = if_act - self.act = act - - self.conv = nn.Conv2D( - in_channels=in_c, - out_channels=out_c, - kernel_size=filter_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=num_groups, - bias_attr=False) - self.bn = layers.SyncBatchNorm( - num_features=out_c, - weight_attr=paddle.ParamAttr( - regularizer=paddle.regularizer.L2Decay(0.0)), - bias_attr=paddle.ParamAttr( - regularizer=paddle.regularizer.L2Decay(0.0))) - self._act_op = layers.Activation(act='hardswish') - - def forward(self, x): - x = self.conv(x) - x = self.bn(x) - if self.if_act: - x = self._act_op(x) - return x - - -class ResidualUnit(nn.Layer): - def __init__(self, - in_c, - mid_c, - out_c, - filter_size, - stride, - use_se, - dilation=1, - act=None, - name=''): - super(ResidualUnit, self).__init__() - self.if_shortcut = stride == 1 and in_c == out_c - self.if_se = use_se - - self.expand_conv = ConvBNLayer( - in_c=in_c, - out_c=mid_c, - filter_size=1, - stride=1, - padding=0, - if_act=True, - act=act) - - self.bottleneck_conv = ConvBNLayer( - in_c=mid_c, - out_c=mid_c, - filter_size=filter_size, - stride=stride, - padding='same', - dilation=dilation, - num_groups=mid_c, - if_act=True, - act=act) - if self.if_se: - self.mid_se = SEModule(mid_c, name=name + "_se") - self.linear_conv = ConvBNLayer( - in_c=mid_c, - out_c=out_c, - filter_size=1, - stride=1, - padding=0, - if_act=False, - act=None) - self.dilation = dilation - - def forward(self, inputs): - x = self.expand_conv(inputs) - x = self.bottleneck_conv(x) - if self.if_se: - x = self.mid_se(x) - x = self.linear_conv(x) - if self.if_shortcut: - x = inputs + x - return x - - -class SEModule(nn.Layer): - def __init__(self, channel, reduction=4, name=""): - super(SEModule, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2D(1) - self.conv1 = nn.Conv2D( - in_channels=channel, - out_channels=channel // reduction, - kernel_size=1, - stride=1, - padding=0) - self.conv2 = nn.Conv2D( - in_channels=channel // reduction, - out_channels=channel, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, inputs): - outputs = self.avg_pool(inputs) - outputs = self.conv1(outputs) - outputs = F.relu(outputs) - outputs = self.conv2(outputs) - outputs = F.hard_sigmoid(outputs) - return paddle.multiply(x=inputs, y=outputs, axis=0) - - -def MobileNetV3_small_x0_35(**kwargs): - model = MobileNetV3(model_name="small", scale=0.35, **kwargs) - return model - - -def MobileNetV3_small_x0_5(**kwargs): - model = MobileNetV3(model_name="small", scale=0.5, **kwargs) - return model - - -def MobileNetV3_small_x0_75(**kwargs): - model = MobileNetV3(model_name="small", scale=0.75, **kwargs) - return model - - -@manager.BACKBONES.add_component -def MobileNetV3_small_x1_0(**kwargs): - model = MobileNetV3(model_name="small", scale=1.0, **kwargs) - return model - - -def MobileNetV3_small_x1_25(**kwargs): - model = MobileNetV3(model_name="small", scale=1.25, **kwargs) - return model - - -def MobileNetV3_large_x0_35(**kwargs): - model = MobileNetV3(model_name="large", scale=0.35, **kwargs) - return model - - -def MobileNetV3_large_x0_5(**kwargs): - model = MobileNetV3(model_name="large", scale=0.5, **kwargs) - return model - - -def MobileNetV3_large_x0_75(**kwargs): - model = MobileNetV3(model_name="large", scale=0.75, **kwargs) - return model - - -@manager.BACKBONES.add_component -def MobileNetV3_large_x1_0(**kwargs): - model = MobileNetV3(model_name="large", scale=1.0, **kwargs) - return model - - -def MobileNetV3_large_x1_25(**kwargs): - model = MobileNetV3(model_name="large", scale=1.25, **kwargs) - return model diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/resnet_vd.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/resnet_vd.py deleted file mode 100644 index 068a7e2b00..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/backbones/resnet_vd.py +++ /dev/null @@ -1,361 +0,0 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager -from paddleseg.models import layers -from paddleseg.utils import utils - -__all__ = [ - "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd" -] - - -class ConvBNLayer(nn.Layer): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - dilation=1, - groups=1, - is_vd_mode=False, - act=None, - ): - super(ConvBNLayer, self).__init__() - - self.is_vd_mode = is_vd_mode - self._pool2d_avg = nn.AvgPool2D( - kernel_size=2, stride=2, padding=0, ceil_mode=True) - self._conv = nn.Conv2D( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=(kernel_size - 1) // 2 if dilation == 1 else 0, - dilation=dilation, - groups=groups, - bias_attr=False) - - self._batch_norm = layers.SyncBatchNorm(out_channels) - self._act_op = layers.Activation(act=act) - - def forward(self, inputs): - if self.is_vd_mode: - inputs = self._pool2d_avg(inputs) - y = self._conv(inputs) - y = self._batch_norm(y) - y = self._act_op(y) - - return y - - -class BottleneckBlock(nn.Layer): - def __init__(self, - in_channels, - out_channels, - stride, - shortcut=True, - if_first=False, - dilation=1): - super(BottleneckBlock, self).__init__() - - self.conv0 = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1, - act='relu') - - self.dilation = dilation - - self.conv1 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - stride=stride, - act='relu', - dilation=dilation) - self.conv2 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels * 4, - kernel_size=1, - act=None) - - if not shortcut: - self.short = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels * 4, - kernel_size=1, - stride=1, - is_vd_mode=False if if_first or stride == 1 else True) - - self.shortcut = shortcut - - def forward(self, inputs): - y = self.conv0(inputs) - - #################################################################### - # If given dilation rate > 1, using corresponding padding. - # The performance drops down without the follow padding. - if self.dilation > 1: - padding = self.dilation - y = F.pad(y, [padding, padding, padding, padding]) - ##################################################################### - - conv1 = self.conv1(y) - conv2 = self.conv2(conv1) - - if self.shortcut: - short = inputs - else: - short = self.short(inputs) - - y = paddle.add(x=short, y=conv2) - y = F.relu(y) - return y - - -class BasicBlock(nn.Layer): - def __init__(self, - in_channels, - out_channels, - stride, - shortcut=True, - if_first=False): - super(BasicBlock, self).__init__() - self.stride = stride - self.conv0 = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - stride=stride, - act='relu') - self.conv1 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - act=None) - - if not shortcut: - self.short = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - is_vd_mode=False if if_first else True) - - self.shortcut = shortcut - - def forward(self, inputs): - y = self.conv0(inputs) - conv1 = self.conv1(y) - - if self.shortcut: - short = inputs - else: - short = self.short(inputs) - y = paddle.add(x=short, y=conv1) - y = F.relu(y) - - return y - - -class ResNet_vd(nn.Layer): - """ - The ResNet_vd implementation based on PaddlePaddle. - - The original article refers to Jingdong - Tong He, et, al. "Bag of Tricks for Image Classification with Convolutional Neural Networks" - (https://arxiv.org/pdf/1812.01187.pdf). - - Args: - layers (int, optional): The layers of ResNet_vd. The supported layers are (18, 34, 50, 101, 152, 200). Default: 50. - output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 8. - multi_grid (tuple|list, optional): The grid of stage4. Defult: (1, 1, 1). - pretrained (str, optional): The path of pretrained model. - - """ - - def __init__(self, - layers=50, - output_stride=8, - multi_grid=(1, 1, 1), - pretrained=None): - super(ResNet_vd, self).__init__() - - self.conv1_logit = None # for gscnn shape stream - self.layers = layers - supported_layers = [18, 34, 50, 101, 152, 200] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format( - supported_layers, layers) - - if layers == 18: - depth = [2, 2, 2, 2] - elif layers == 34 or layers == 50: - depth = [3, 4, 6, 3] - elif layers == 101: - depth = [3, 4, 23, 3] - elif layers == 152: - depth = [3, 8, 36, 3] - elif layers == 200: - depth = [3, 12, 48, 3] - num_channels = [64, 256, 512, 1024 - ] if layers >= 50 else [64, 64, 128, 256] - num_filters = [64, 128, 256, 512] - - # for channels of four returned stages - self.feat_channels = [c * 4 for c in num_filters - ] if layers >= 50 else num_filters - - dilation_dict = None - if output_stride == 8: - dilation_dict = {2: 2, 3: 4} - elif output_stride == 16: - dilation_dict = {3: 2} - - self.conv1_1 = ConvBNLayer( - in_channels=3, out_channels=32, kernel_size=3, stride=2, act='relu') - self.conv1_2 = ConvBNLayer( - in_channels=32, - out_channels=32, - kernel_size=3, - stride=1, - act='relu') - self.conv1_3 = ConvBNLayer( - in_channels=32, - out_channels=64, - kernel_size=3, - stride=1, - act='relu') - self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) - - # self.block_list = [] - self.stage_list = [] - if layers >= 50: - for block in range(len(depth)): - shortcut = False - block_list = [] - for i in range(depth[block]): - if layers in [101, 152] and block == 2: - if i == 0: - conv_name = "res" + str(block + 2) + "a" - else: - conv_name = "res" + str(block + 2) + "b" + str(i) - else: - conv_name = "res" + str(block + 2) + chr(97 + i) - - ############################################################################### - # Add dilation rate for some segmentation tasks, if dilation_dict is not None. - dilation_rate = dilation_dict[ - block] if dilation_dict and block in dilation_dict else 1 - - # Actually block here is 'stage', and i is 'block' in 'stage' - # At the stage 4, expand the the dilation_rate if given multi_grid - if block == 3: - dilation_rate = dilation_rate * multi_grid[i] - ############################################################################### - - bottleneck_block = self.add_sublayer( - 'bb_%d_%d' % (block, i), - BottleneckBlock( - in_channels=num_channels[block] - if i == 0 else num_filters[block] * 4, - out_channels=num_filters[block], - stride=2 if i == 0 and block != 0 - and dilation_rate == 1 else 1, - shortcut=shortcut, - if_first=block == i == 0, - dilation=dilation_rate)) - - block_list.append(bottleneck_block) - shortcut = True - self.stage_list.append(block_list) - else: - for block in range(len(depth)): - shortcut = False - block_list = [] - for i in range(depth[block]): - conv_name = "res" + str(block + 2) + chr(97 + i) - basic_block = self.add_sublayer( - 'bb_%d_%d' % (block, i), - BasicBlock( - in_channels=num_channels[block] - if i == 0 else num_filters[block], - out_channels=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut, - if_first=block == i == 0)) - block_list.append(basic_block) - shortcut = True - self.stage_list.append(block_list) - - self.pretrained = pretrained - self.init_weight() - - def forward(self, inputs): - y = self.conv1_1(inputs) - y = self.conv1_2(y) - y = self.conv1_3(y) - self.conv1_logit = y.clone() - y = self.pool2d_max(y) - - # A feature list saves the output feature map of each stage. - feat_list = [] - for stage in self.stage_list: - for block in stage: - y = block(y) - feat_list.append(y) - - return feat_list - - def init_weight(self): - utils.load_pretrained_model(self, self.pretrained) - - -@manager.BACKBONES.add_component -def ResNet18_vd(**args): - model = ResNet_vd(layers=18, **args) - return model - - -def ResNet34_vd(**args): - model = ResNet_vd(layers=34, **args) - return model - - -@manager.BACKBONES.add_component -def ResNet50_vd(**args): - model = ResNet_vd(layers=50, **args) - return model - - -@manager.BACKBONES.add_component -def ResNet101_vd(**args): - model = ResNet_vd(layers=101, **args) - return model - - -def ResNet152_vd(**args): - model = ResNet_vd(layers=152, **args) - return model - - -def ResNet200_vd(**args): - model = ResNet_vd(layers=200, **args) - return model diff --git a/contrib/PanopticDeepLab/paddleseg/models/backbones/xception_deeplab.py b/contrib/PanopticDeepLab/paddleseg/models/backbones/xception_deeplab.py deleted file mode 100644 index b83caec51d..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/backbones/xception_deeplab.py +++ /dev/null @@ -1,415 +0,0 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager -from paddleseg.utils import utils -from paddleseg.models import layers - -__all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"] - - -def check_data(data, number): - if type(data) == int: - return [data] * number - assert len(data) == number - return data - - -def check_stride(s, os): - if s <= os: - return True - else: - return False - - -def check_points(count, points): - if points is None: - return False - else: - if isinstance(points, list): - return (True if count in points else False) - else: - return (True if count == points else False) - - -def gen_bottleneck_params(backbone='xception_65'): - if backbone == 'xception_65': - bottleneck_params = { - "entry_flow": (3, [2, 2, 2], [128, 256, 728]), - "middle_flow": (16, 1, 728), - "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) - } - elif backbone == 'xception_41': - bottleneck_params = { - "entry_flow": (3, [2, 2, 2], [128, 256, 728]), - "middle_flow": (8, 1, 728), - "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) - } - elif backbone == 'xception_71': - bottleneck_params = { - "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]), - "middle_flow": (16, 1, 728), - "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) - } - else: - raise ValueError( - "Xception backbont only support xception_41/xception_65/xception_71" - ) - return bottleneck_params - - -class ConvBNLayer(nn.Layer): - def __init__(self, - input_channels, - output_channels, - filter_size, - stride=1, - padding=0, - act=None, - name=None): - super(ConvBNLayer, self).__init__() - - self._conv = nn.Conv2D( - in_channels=input_channels, - out_channels=output_channels, - kernel_size=filter_size, - stride=stride, - padding=padding, - bias_attr=False) - self._bn = layers.SyncBatchNorm( - num_features=output_channels, epsilon=1e-3, momentum=0.99) - - self._act_op = layers.Activation(act=act) - - def forward(self, inputs): - return self._act_op(self._bn(self._conv(inputs))) - - -class Seperate_Conv(nn.Layer): - def __init__(self, - input_channels, - output_channels, - stride, - filter, - dilation=1, - act=None, - name=None): - super(Seperate_Conv, self).__init__() - - self._conv1 = nn.Conv2D( - in_channels=input_channels, - out_channels=input_channels, - kernel_size=filter, - stride=stride, - groups=input_channels, - padding=(filter) // 2 * dilation, - dilation=dilation, - bias_attr=False) - self._bn1 = layers.SyncBatchNorm( - input_channels, epsilon=1e-3, momentum=0.99) - - self._act_op1 = layers.Activation(act=act) - - self._conv2 = nn.Conv2D( - input_channels, - output_channels, - 1, - stride=1, - groups=1, - padding=0, - bias_attr=False) - self._bn2 = layers.SyncBatchNorm( - output_channels, epsilon=1e-3, momentum=0.99) - - self._act_op2 = layers.Activation(act=act) - - def forward(self, inputs): - x = self._conv1(inputs) - x = self._bn1(x) - x = self._act_op1(x) - x = self._conv2(x) - x = self._bn2(x) - x = self._act_op2(x) - return x - - -class Xception_Block(nn.Layer): - def __init__(self, - input_channels, - output_channels, - strides=1, - filter_size=3, - dilation=1, - skip_conv=True, - has_skip=True, - activation_fn_in_separable_conv=False, - name=None): - super(Xception_Block, self).__init__() - - repeat_number = 3 - output_channels = check_data(output_channels, repeat_number) - filter_size = check_data(filter_size, repeat_number) - strides = check_data(strides, repeat_number) - - self.has_skip = has_skip - self.skip_conv = skip_conv - self.activation_fn_in_separable_conv = activation_fn_in_separable_conv - if not activation_fn_in_separable_conv: - self._conv1 = Seperate_Conv( - input_channels, - output_channels[0], - stride=strides[0], - filter=filter_size[0], - dilation=dilation, - name=name + "/separable_conv1") - self._conv2 = Seperate_Conv( - output_channels[0], - output_channels[1], - stride=strides[1], - filter=filter_size[1], - dilation=dilation, - name=name + "/separable_conv2") - self._conv3 = Seperate_Conv( - output_channels[1], - output_channels[2], - stride=strides[2], - filter=filter_size[2], - dilation=dilation, - name=name + "/separable_conv3") - else: - self._conv1 = Seperate_Conv( - input_channels, - output_channels[0], - stride=strides[0], - filter=filter_size[0], - act="relu", - dilation=dilation, - name=name + "/separable_conv1") - self._conv2 = Seperate_Conv( - output_channels[0], - output_channels[1], - stride=strides[1], - filter=filter_size[1], - act="relu", - dilation=dilation, - name=name + "/separable_conv2") - self._conv3 = Seperate_Conv( - output_channels[1], - output_channels[2], - stride=strides[2], - filter=filter_size[2], - act="relu", - dilation=dilation, - name=name + "/separable_conv3") - - if has_skip and skip_conv: - self._short = ConvBNLayer( - input_channels, - output_channels[-1], - 1, - stride=strides[-1], - padding=0, - name=name + "/shortcut") - - def forward(self, inputs): - if not self.activation_fn_in_separable_conv: - x = F.relu(inputs) - x = self._conv1(x) - x = F.relu(x) - x = self._conv2(x) - x = F.relu(x) - x = self._conv3(x) - else: - x = self._conv1(inputs) - x = self._conv2(x) - x = self._conv3(x) - if self.has_skip is False: - return x - if self.skip_conv: - skip = self._short(inputs) - else: - skip = inputs - return x + skip - - -class XceptionDeeplab(nn.Layer): - """ - The Xception backobne of DeepLabv3+ implementation based on PaddlePaddle. - - The original article refers to - Liang-Chieh Chen, et, al. "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation" - (https://arxiv.org/abs/1802.02611) - - Args: - backbone (str): Which type of Xception_DeepLab to select. It should be one of ('xception_41', 'xception_65', 'xception_71'). - pretrained (str, optional): The path of pretrained model. - output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 16. - - """ - - def __init__(self, backbone, pretrained=None, output_stride=16): - - super(XceptionDeeplab, self).__init__() - - bottleneck_params = gen_bottleneck_params(backbone) - self.backbone = backbone - self.feat_channels = [128, 2048] - - self._conv1 = ConvBNLayer( - 3, - 32, - 3, - stride=2, - padding=1, - act="relu", - name=self.backbone + "/entry_flow/conv1") - self._conv2 = ConvBNLayer( - 32, - 64, - 3, - stride=1, - padding=1, - act="relu", - name=self.backbone + "/entry_flow/conv2") - """ - bottleneck_params = { - "entry_flow": (3, [2, 2, 2], [128, 256, 728]), - "middle_flow": (16, 1, 728), - "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, 2048]]) - } - - if output_stride == 16: - entry_block3_stride = 2 - middle_block_dilation = 1 - exit_block_dilations = (1, 2) - elif output_stride == 8: - entry_block3_stride = 1 - middle_block_dilation = 2 - exit_block_dilations = (2, 4) - - """ - self.block_num = bottleneck_params["entry_flow"][0] - self.strides = bottleneck_params["entry_flow"][1] - self.chns = bottleneck_params["entry_flow"][2] - self.strides = check_data(self.strides, self.block_num) - self.chns = check_data(self.chns, self.block_num) - - self.entry_flow = [] - self.middle_flow = [] - - self.stride = 2 - self.output_stride = output_stride - s = self.stride - - for i in range(self.block_num): - stride = self.strides[i] if check_stride(s * self.strides[i], - self.output_stride) else 1 - xception_block = self.add_sublayer( - self.backbone + "/entry_flow/block" + str(i + 1), - Xception_Block( - input_channels=64 if i == 0 else self.chns[i - 1], - output_channels=self.chns[i], - strides=[1, 1, self.stride], - name=self.backbone + "/entry_flow/block" + str(i + 1))) - self.entry_flow.append(xception_block) - s = s * stride - self.stride = s - - self.block_num = bottleneck_params["middle_flow"][0] - self.strides = bottleneck_params["middle_flow"][1] - self.chns = bottleneck_params["middle_flow"][2] - self.strides = check_data(self.strides, self.block_num) - self.chns = check_data(self.chns, self.block_num) - s = self.stride - - for i in range(self.block_num): - stride = self.strides[i] if check_stride(s * self.strides[i], - self.output_stride) else 1 - xception_block = self.add_sublayer( - self.backbone + "/middle_flow/block" + str(i + 1), - Xception_Block( - input_channels=728, - output_channels=728, - strides=[1, 1, self.strides[i]], - skip_conv=False, - name=self.backbone + "/middle_flow/block" + str(i + 1))) - self.middle_flow.append(xception_block) - s = s * stride - self.stride = s - - self.block_num = bottleneck_params["exit_flow"][0] - self.strides = bottleneck_params["exit_flow"][1] - self.chns = bottleneck_params["exit_flow"][2] - self.strides = check_data(self.strides, self.block_num) - self.chns = check_data(self.chns, self.block_num) - s = self.stride - stride = self.strides[0] if check_stride(s * self.strides[0], - self.output_stride) else 1 - self._exit_flow_1 = Xception_Block( - 728, - self.chns[0], [1, 1, stride], - name=self.backbone + "/exit_flow/block1") - s = s * stride - stride = self.strides[1] if check_stride(s * self.strides[1], - self.output_stride) else 1 - self._exit_flow_2 = Xception_Block( - self.chns[0][-1], - self.chns[1], [1, 1, stride], - dilation=2, - has_skip=False, - activation_fn_in_separable_conv=True, - name=self.backbone + "/exit_flow/block2") - - self.pretrained = pretrained - self.init_weight() - - def forward(self, inputs): - x = self._conv1(inputs) - x = self._conv2(x) - feat_list = [] - for i, ef in enumerate(self.entry_flow): - x = ef(x) - if i == 0: - feat_list.append(x) - for mf in self.middle_flow: - x = mf(x) - x = self._exit_flow_1(x) - x = self._exit_flow_2(x) - feat_list.append(x) - return feat_list - - def init_weight(self): - if self.pretrained is not None: - utils.load_pretrained_model(self, self.pretrained) - - -@manager.BACKBONES.add_component -def Xception41_deeplab(**args): - model = XceptionDeeplab('xception_41', **args) - return model - - -@manager.BACKBONES.add_component -def Xception65_deeplab(**args): - model = XceptionDeeplab("xception_65", **args) - return model - - -@manager.BACKBONES.add_component -def Xception71_deeplab(**args): - model = XceptionDeeplab("xception_71", **args) - return model diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/__init__.py b/contrib/PanopticDeepLab/paddleseg/models/layers/__init__.py deleted file mode 100644 index 86ec36c08d..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/layers/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .layer_libs import ConvBNReLU, ConvBN, SeparableConvBNReLU, DepthwiseConvBN, AuxLayer, SyncBatchNorm -from .activation import Activation -from .pyramid_pool import ASPPModule, PPModule -from .attention import AttentionBlock -from .nonlocal2d import NonLocal2D diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/activation.py b/contrib/PanopticDeepLab/paddleseg/models/layers/activation.py deleted file mode 100644 index 89b6cf0e81..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/layers/activation.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.nn as nn - - -class Activation(nn.Layer): - """ - The wrapper of activations. - - Args: - act (str, optional): The activation name in lowercase. It must be one of ['elu', 'gelu', - 'hardshrink', 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', - 'softmax', 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', - 'hsigmoid']. Default: None, means identical transformation. - - Returns: - A callable object of Activation. - - Raises: - KeyError: When parameter `act` is not in the optional range. - - Examples: - - from paddleseg.models.common.activation import Activation - - relu = Activation("relu") - print(relu) - # - - sigmoid = Activation("sigmoid") - print(sigmoid) - # - - not_exit_one = Activation("not_exit_one") - # KeyError: "not_exit_one does not exist in the current dict_keys(['elu', 'gelu', 'hardshrink', - # 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', 'softmax', - # 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', 'hsigmoid'])" - """ - - def __init__(self, act=None): - super(Activation, self).__init__() - - self._act = act - upper_act_names = nn.layer.activation.__all__ - lower_act_names = [act.lower() for act in upper_act_names] - act_dict = dict(zip(lower_act_names, upper_act_names)) - - if act is not None: - if act in act_dict.keys(): - act_name = act_dict[act] - self.act_func = eval( - "nn.layer.activation.{}()".format(act_name)) - else: - raise KeyError("{} does not exist in the current {}".format( - act, act_dict.keys())) - - def forward(self, x): - if self._act is not None: - return self.act_func(x) - else: - return x diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/attention.py b/contrib/PanopticDeepLab/paddleseg/models/layers/attention.py deleted file mode 100644 index f4be94f608..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/layers/attention.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.models import layers - - -class AttentionBlock(nn.Layer): - """General self-attention block/non-local block. - - The original article refers to refer to https://arxiv.org/abs/1706.03762. - Args: - key_in_channels (int): Input channels of key feature. - query_in_channels (int): Input channels of query feature. - channels (int): Output channels of key/query transform. - out_channels (int): Output channels. - share_key_query (bool): Whether share projection weight between key - and query projection. - query_downsample (nn.Module): Query downsample module. - key_downsample (nn.Module): Key downsample module. - key_query_num_convs (int): Number of convs for key/query projection. - value_out_num_convs (int): Number of convs for value projection. - key_query_norm (bool): Whether to use BN for key/query projection. - value_out_norm (bool): Whether to use BN for value projection. - matmul_norm (bool): Whether normalize attention map with sqrt of - channels - with_out (bool): Whether use out projection. - """ - - def __init__(self, key_in_channels, query_in_channels, channels, - out_channels, share_key_query, query_downsample, - key_downsample, key_query_num_convs, value_out_num_convs, - key_query_norm, value_out_norm, matmul_norm, with_out): - super(AttentionBlock, self).__init__() - if share_key_query: - assert key_in_channels == query_in_channels - self.key_in_channels = key_in_channels - self.query_in_channels = query_in_channels - self.out_channels = out_channels - self.channels = channels - self.share_key_query = share_key_query - self.key_project = self.build_project( - key_in_channels, - channels, - num_convs=key_query_num_convs, - use_conv_module=key_query_norm) - if share_key_query: - self.query_project = self.key_project - else: - self.query_project = self.build_project( - query_in_channels, - channels, - num_convs=key_query_num_convs, - use_conv_module=key_query_norm) - - self.value_project = self.build_project( - key_in_channels, - channels if with_out else out_channels, - num_convs=value_out_num_convs, - use_conv_module=value_out_norm) - - if with_out: - self.out_project = self.build_project( - channels, - out_channels, - num_convs=value_out_num_convs, - use_conv_module=value_out_norm) - else: - self.out_project = None - - self.query_downsample = query_downsample - self.key_downsample = key_downsample - self.matmul_norm = matmul_norm - - def build_project(self, in_channels, channels, num_convs, use_conv_module): - if use_conv_module: - convs = [ - layers.ConvBNReLU( - in_channels=in_channels, - out_channels=channels, - kernel_size=1, - bias_attr=False) - ] - for _ in range(num_convs - 1): - convs.append( - layers.ConvBNReLU( - in_channels=channels, - out_channels=channels, - kernel_size=1, - bias_attr=False)) - else: - convs = [nn.Conv2D(in_channels, channels, 1)] - for _ in range(num_convs - 1): - convs.append(nn.Conv2D(channels, channels, 1)) - - if len(convs) > 1: - convs = nn.Sequential(*convs) - else: - convs = convs[0] - return convs - - def forward(self, query_feats, key_feats): - b, c, h, w = query_feats.shape - query = self.query_project(query_feats) - if self.query_downsample is not None: - query = self.query_downsample(query) - query = query.reshape([*query.shape[:2], -1]).transpose([0, 2, 1]) - - key = self.key_project(key_feats) - value = self.value_project(key_feats) - - if self.key_downsample is not None: - key = self.key_downsample(key) - value = self.key_downsample(value) - - key = key.reshape([*key.shape[:2], -1]) - value = value.reshape([*value.shape[:2], -1]).transpose([0, 2, 1]) - sim_map = paddle.matmul(query, key) - if self.matmul_norm: - sim_map = (self.channels**-0.5) * sim_map - sim_map = F.softmax(sim_map, axis=-1) - - context = paddle.matmul(sim_map, value) - context = paddle.transpose(context, [0, 2, 1]) - context = paddle.reshape(context, [b, -1, *query_feats.shape[2:]]) - - if self.out_project is not None: - context = self.out_project(context) - return context diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/layer_libs.py b/contrib/PanopticDeepLab/paddleseg/models/layers/layer_libs.py deleted file mode 100644 index 2b4845d10d..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/layers/layer_libs.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - - -def SyncBatchNorm(*args, **kwargs): - """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead""" - if paddle.get_device() == 'cpu': - return nn.BatchNorm2D(*args, **kwargs) - else: - return nn.SyncBatchNorm(*args, **kwargs) - - -class ConvBNReLU(nn.Layer): - def __init__(self, - in_channels, - out_channels, - kernel_size, - padding='same', - **kwargs): - super().__init__() - - self._conv = nn.Conv2D( - in_channels, out_channels, kernel_size, padding=padding, **kwargs) - - self._batch_norm = SyncBatchNorm(out_channels) - - def forward(self, x): - x = self._conv(x) - x = self._batch_norm(x) - x = F.relu(x) - return x - - -class ConvBN(nn.Layer): - def __init__(self, - in_channels, - out_channels, - kernel_size, - padding='same', - **kwargs): - super().__init__() - self._conv = nn.Conv2D( - in_channels, out_channels, kernel_size, padding=padding, **kwargs) - self._batch_norm = SyncBatchNorm(out_channels) - - def forward(self, x): - x = self._conv(x) - x = self._batch_norm(x) - return x - - -class ConvReLUPool(nn.Layer): - def __init__(self, in_channels, out_channels): - super().__init__() - self.conv = nn.Conv2D( - in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - dilation=1) - - def forward(self, x): - x = self.conv(x) - x = F.relu(x) - x = F.pool2d(x, pool_size=2, pool_type="max", pool_stride=2) - return x - - -class SeparableConvBNReLU(nn.Layer): - def __init__(self, - in_channels, - out_channels, - kernel_size, - padding='same', - **kwargs): - super().__init__() - self.depthwise_conv = ConvBNReLU( - in_channels, - out_channels=in_channels, - kernel_size=kernel_size, - padding=padding, - groups=in_channels, - **kwargs) - self.piontwise_conv = ConvBNReLU( - in_channels, out_channels, kernel_size=1, groups=1, bias_attr=False) - - def forward(self, x): - x = self.depthwise_conv(x) - x = self.piontwise_conv(x) - return x - - -class DepthwiseConvBN(nn.Layer): - def __init__(self, - in_channels, - out_channels, - kernel_size, - padding='same', - **kwargs): - super().__init__() - self.depthwise_conv = ConvBN( - in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - padding=padding, - groups=in_channels, - **kwargs) - - def forward(self, x): - x = self.depthwise_conv(x) - return x - - -class AuxLayer(nn.Layer): - """ - The auxiliary layer implementation for auxiliary loss. - - Args: - in_channels (int): The number of input channels. - inter_channels (int): The intermediate channels. - out_channels (int): The number of output channels, and usually it is num_classes. - dropout_prob (float, optional): The drop rate. Default: 0.1. - """ - - def __init__(self, - in_channels, - inter_channels, - out_channels, - dropout_prob=0.1): - super().__init__() - - self.conv_bn_relu = ConvBNReLU( - in_channels=in_channels, - out_channels=inter_channels, - kernel_size=3, - padding=1) - - self.dropout = nn.Dropout(p=dropout_prob) - - self.conv = nn.Conv2D( - in_channels=inter_channels, - out_channels=out_channels, - kernel_size=1) - - def forward(self, x): - x = self.conv_bn_relu(x) - x = self.dropout(x) - x = self.conv(x) - return x diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/nonlocal2d.py b/contrib/PanopticDeepLab/paddleseg/models/layers/nonlocal2d.py deleted file mode 100644 index bd577c1a16..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/layers/nonlocal2d.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.models import layers - - -class NonLocal2D(nn.Layer): - """Basic Non-local module. - This model is the implementation of "Non-local Neural Networks" - (https://arxiv.org/abs/1711.07971) - - Args: - in_channels (int): Channels of the input feature map. - reduction (int): Channel reduction ratio. Default: 2. - use_scale (bool): Whether to scale pairwise_weight by `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`. Default: True. - sub_sample (bool): Whether to utilize max pooling after pairwise function. Default: False. - mode (str): Options are `gaussian`, `concatenation`, `embedded_gaussian` and `dot_product`. Default: embedded_gaussian. - """ - - def __init__(self, - in_channels, - reduction=2, - use_scale=True, - sub_sample=False, - mode='embedded_gaussian'): - super(NonLocal2D, self).__init__() - self.in_channels = in_channels - self.reduction = reduction - self.use_scale = use_scale - self.sub_sample = sub_sample - self.mode = mode - if mode not in [ - 'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation' - ]: - raise ValueError( - "Mode should be in 'gaussian', 'concatenation','embedded_gaussian' or 'dot_product'." - ) - - self.inter_channels = max(in_channels // reduction, 1) - - self.g = nn.Conv2D( - in_channels=self.in_channels, - out_channels=self.inter_channels, - kernel_size=1) - self.conv_out = layers.ConvBNReLU( - in_channels=self.inter_channels, - out_channels=self.in_channels, - kernel_size=1, - bias_attr=False) - - if self.mode != "gaussian": - self.theta = nn.Conv2D( - in_channels=self.in_channels, - out_channels=self.inter_channels, - kernel_size=1) - self.phi = nn.Conv2D( - in_channels=self.in_channels, - out_channels=self.inter_channels, - kernel_size=1) - - if self.mode == "concatenation": - self.concat_project = layers.ConvBNReLU( - in_channels=self.inter_channels * 2, - out_channels=1, - kernel_size=1, - bias_attr=False) - - if self.sub_sample: - max_pool_layer = nn.MaxPool2D(kernel_size=(2, 2)) - self.g = nn.Sequential(self.g, max_pool_layer) - if self.mode != 'gaussian': - self.phi = nn.Sequential(self.phi, max_pool_layer) - else: - self.phi = max_pool_layer - - def gaussian(self, theta_x, phi_x): - pairwise_weight = paddle.matmul(theta_x, phi_x) - pairwise_weight = F.softmax(pairwise_weight, axis=-1) - return pairwise_weight - - def embedded_gaussian(self, theta_x, phi_x): - pairwise_weight = paddle.matmul(theta_x, phi_x) - if self.use_scale: - pairwise_weight /= theta_x.shape[-1]**0.5 - pairwise_weight = F.softmax(pairwise_weight, -1) - return pairwise_weight - - def dot_product(self, theta_x, phi_x): - pairwise_weight = paddle.matmul(theta_x, phi_x) - pairwise_weight /= pairwise_weight.shape[-1] - return pairwise_weight - - def concatenation(self, theta_x, phi_x): - h = theta_x.shape[2] - w = phi_x.shape[3] - theta_x = paddle.tile(theta_x, [1, 1, 1, w]) - phi_x = paddle.tile(phi_x, [1, 1, h, 1]) - - concat_feature = paddle.concat([theta_x, phi_x], axis=1) - pairwise_weight = self.concat_project(concat_feature) - n, _, h, w = pairwise_weight.shape - pairwise_weight = paddle.reshape(pairwise_weight, [n, h, w]) - pairwise_weight /= pairwise_weight.shape[-1] - return pairwise_weight - - def forward(self, x): - n, c, h, w = x.shape - g_x = paddle.reshape(self.g(x), [n, self.inter_channels, -1]) - g_x = paddle.transpose(g_x, [0, 2, 1]) - - if self.mode == 'gaussian': - theta_x = paddle.reshape(x, [n, self.inter_channels, -1]) - theta_x = paddle.transpose(theta_x, [0, 2, 1]) - if self.sub_sample: - phi_x = paddle.reshape( - self.phi(x), [n, self.inter_channels, -1]) - else: - phi_x = paddle.reshape(x, [n, self.in_channels, -1]) - - elif self.mode == 'concatenation': - theta_x = paddle.reshape( - self.theta(x), [n, self.inter_channels, -1, 1]) - phi_x = self.phi(x).view(n, self.inter_channels, 1, -1) - - else: - theta_x = paddle.reshape( - self.theta(x), [n, self.inter_channels, -1, 1]) - theta_x = paddle.transpose(theta_x, [0, 2, 1]) - phi_x = paddle.reshape(self.phi(x), [n, self.inter_channels, -1]) - - pairwise_func = getattr(self, self.mode) - pairwise_weight = pairwise_func(theta_x, phi_x) - y = paddle.matmul(pairwise_weight, g_x) - y = paddle.transpose(y, [0, 2, 1]) - y = paddle.reshape(y, [n, self.inter_channels, h, w]) - - output = x + self.conv_out(y) - - return output diff --git a/contrib/PanopticDeepLab/paddleseg/models/layers/pyramid_pool.py b/contrib/PanopticDeepLab/paddleseg/models/layers/pyramid_pool.py deleted file mode 100644 index 87e19f36bd..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/layers/pyramid_pool.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn.functional as F -from paddle import nn - -from paddleseg.models import layers - - -class ASPPModule(nn.Layer): - """ - Atrous Spatial Pyramid Pooling. - - Args: - aspp_ratios (tuple): The dilation rate using in ASSP module. - in_channels (int): The number of input channels. - out_channels (int): The number of output channels. - align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature - is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. - use_sep_conv (bool, optional): If using separable conv in ASPP module. Default: False. - image_pooling (bool, optional): If augmented with image-level features. Default: False - """ - - def __init__(self, - aspp_ratios, - in_channels, - out_channels, - align_corners, - use_sep_conv=False, - image_pooling=False, - drop_rate=0.1): - super().__init__() - - self.align_corners = align_corners - self.aspp_blocks = nn.LayerList() - - for ratio in aspp_ratios: - if use_sep_conv and ratio > 1: - conv_func = layers.SeparableConvBNReLU - else: - conv_func = layers.ConvBNReLU - - block = conv_func( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1 if ratio == 1 else 3, - dilation=ratio, - padding=0 if ratio == 1 else ratio, - bias_attr=False) - self.aspp_blocks.append(block) - - out_size = len(self.aspp_blocks) - - if image_pooling: - self.global_avg_pool = nn.Sequential( - nn.AdaptiveAvgPool2D(output_size=(1, 1)), - layers.ConvBNReLU( - in_channels, out_channels, kernel_size=1, bias_attr=False)) - out_size += 1 - self.image_pooling = image_pooling - - self.conv_bn_relu = layers.ConvBNReLU( - in_channels=out_channels * out_size, - out_channels=out_channels, - kernel_size=1, - bias_attr=False) - - self.dropout = nn.Dropout(p=drop_rate) # drop rate - - def forward(self, x): - outputs = [] - for block in self.aspp_blocks: - y = block(x) - interpolate_shape = x.shape[2:] - y = F.interpolate( - y, - interpolate_shape, - mode='bilinear', - align_corners=self.align_corners) - outputs.append(y) - - if self.image_pooling: - img_avg = self.global_avg_pool(x) - img_avg = F.interpolate( - img_avg, - interpolate_shape, - mode='bilinear', - align_corners=self.align_corners) - outputs.append(img_avg) - - x = paddle.concat(outputs, axis=1) - x = self.conv_bn_relu(x) - x = self.dropout(x) - - return x - - -class PPModule(nn.Layer): - """ - Pyramid pooling module originally in PSPNet. - - Args: - in_channels (int): The number of intput channels to pyramid pooling module. - out_channels (int): The number of output channels after pyramid pooling module. - bin_sizes (tuple, optional): The out size of pooled feature maps. Default: (1, 2, 3, 6). - dim_reduction (bool, optional): A bool value represents if reducing dimension after pooling. Default: True. - align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature - is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. - """ - - def __init__(self, in_channels, out_channels, bin_sizes, dim_reduction, - align_corners): - super().__init__() - - self.bin_sizes = bin_sizes - - inter_channels = in_channels - if dim_reduction: - inter_channels = in_channels // len(bin_sizes) - - # we use dimension reduction after pooling mentioned in original implementation. - self.stages = nn.LayerList([ - self._make_stage(in_channels, inter_channels, size) - for size in bin_sizes - ]) - - self.conv_bn_relu2 = layers.ConvBNReLU( - in_channels=in_channels + inter_channels * len(bin_sizes), - out_channels=out_channels, - kernel_size=3, - padding=1) - - self.align_corners = align_corners - - def _make_stage(self, in_channels, out_channels, size): - """ - Create one pooling layer. - - In our implementation, we adopt the same dimension reduction as the original paper that might be - slightly different with other implementations. - - After pooling, the channels are reduced to 1/len(bin_sizes) immediately, while some other implementations - keep the channels to be same. - - Args: - in_channels (int): The number of intput channels to pyramid pooling module. - size (int): The out size of the pooled layer. - - Returns: - conv (Tensor): A tensor after Pyramid Pooling Module. - """ - - prior = nn.AdaptiveAvgPool2D(output_size=(size, size)) - conv = layers.ConvBNReLU( - in_channels=in_channels, out_channels=out_channels, kernel_size=1) - - return nn.Sequential(prior, conv) - - def forward(self, input): - cat_layers = [] - for stage in self.stages: - x = stage(input) - x = F.interpolate( - x, - input.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - cat_layers.append(x) - cat_layers = [input] + cat_layers[::-1] - cat = paddle.concat(cat_layers, axis=1) - out = self.conv_bn_relu2(cat) - - return out diff --git a/contrib/PanopticDeepLab/paddleseg/models/losses/__init__.py b/contrib/PanopticDeepLab/paddleseg/models/losses/__init__.py deleted file mode 100644 index e4d5cc9e76..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/losses/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .cross_entropy_loss import CrossEntropyLoss -from .mean_square_error_loss import MSELoss -from .l1_loss import L1Loss diff --git a/contrib/PanopticDeepLab/paddleseg/models/losses/cross_entropy_loss.py b/contrib/PanopticDeepLab/paddleseg/models/losses/cross_entropy_loss.py deleted file mode 100644 index 87320c1598..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/losses/cross_entropy_loss.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -from paddle import nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager - - -@manager.LOSSES.add_component -class CrossEntropyLoss(nn.Layer): - """ - Implements the cross entropy loss function. - - Args: - ignore_index (int64): Specifies a target value that is ignored - and does not contribute to the input gradient. Default ``255``. - """ - - def __init__(self, ignore_index=255, top_k_percent_pixels=1.0): - super(CrossEntropyLoss, self).__init__() - self.ignore_index = ignore_index - self.top_k_percent_pixels = top_k_percent_pixels - self.EPS = 1e-5 - - def forward(self, logit, label, semantic_weights): - """ - Forward computation. - - Args: - logit (Tensor): Logit tensor, the data type is float32, float64. Shape is - (N, C), where C is number of classes, and if shape is more than 2D, this - is (N, C, D1, D2,..., Dk), k >= 1. - label (Tensor): Label tensor, the data type is int64. Shape is (N), where each - value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is - (N, D1, D2,..., Dk), k >= 1. - """ - if len(label.shape) != len(logit.shape): - label = paddle.unsqueeze(label, 1) - - logit = paddle.transpose(logit, [0, 2, 3, 1]) - label = paddle.transpose(label, [0, 2, 3, 1]) - loss = F.softmax_with_cross_entropy( - logit, label, ignore_index=self.ignore_index, axis=-1) - - mask = label != self.ignore_index - mask = paddle.cast(mask, 'float32') - loss = loss * mask - if semantic_weights is not None: - loss = loss.squeeze(-1) - loss = loss * semantic_weights - - label.stop_gradient = True - mask.stop_gradient = True - if self.top_k_percent_pixels == 1.0: - avg_loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS) - return avg_loss - - loss = loss.reshape((-1, )) - top_k_pixels = int(self.top_k_percent_pixels * loss.numel()) - loss, _ = paddle.topk(loss, top_k_pixels) - return loss.mean() diff --git a/contrib/PanopticDeepLab/paddleseg/models/losses/l1_loss.py b/contrib/PanopticDeepLab/paddleseg/models/losses/l1_loss.py deleted file mode 100644 index 5fbbae2880..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/losses/l1_loss.py +++ /dev/null @@ -1,72 +0,0 @@ -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -from paddle import nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager - - -@manager.LOSSES.add_component -class L1Loss(nn.MSELoss): - r""" - This interface is used to construct a callable object of the ``L1Loss`` class. - The L1Loss layer calculates the L1 Loss of ``input`` and ``label`` as follows. - If `reduction` set to ``'none'``, the loss is: - .. math:: - Out = \lvert input - label\rvert - If `reduction` set to ``'mean'``, the loss is: - .. math:: - Out = MEAN(\lvert input - label\rvert) - If `reduction` set to ``'sum'``, the loss is: - .. math:: - Out = SUM(\lvert input - label\rvert) - Parameters: - reduction (str, optional): Indicate the reduction to apply to the loss, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. - If `reduction` is ``'none'``, the unreduced loss is returned; - If `reduction` is ``'mean'``, the reduced mean loss is returned. - If `reduction` is ``'sum'``, the reduced sum loss is returned. - Default is ``'mean'``. - name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. - Shape: - input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64. - label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64. - output (Tensor): The L1 Loss of ``input`` and ``label``. - If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` . - If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1]. - Examples: - .. code-block:: python - - import paddle - import numpy as np - input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") - label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") - input = paddle.to_tensor(input_data) - label = paddle.to_tensor(label_data) - l1_loss = paddle.nn.L1Loss() - output = l1_loss(input, label) - print(output.numpy()) - # [0.35] - l1_loss = paddle.nn.L1Loss(reduction='sum') - output = l1_loss(input, label) - print(output.numpy()) - # [1.4] - l1_loss = paddle.nn.L1Loss(reduction='none') - output = l1_loss(input, label) - print(output) - # [[0.20000005 0.19999999] - # [0.2 0.79999995]] - """ - - def __init__(self, reduction='mean', ignore_index=255): - super().__init__(reduction=reduction) diff --git a/contrib/PanopticDeepLab/paddleseg/models/losses/mean_square_error_loss.py b/contrib/PanopticDeepLab/paddleseg/models/losses/mean_square_error_loss.py deleted file mode 100644 index fa66c9c5f3..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/losses/mean_square_error_loss.py +++ /dev/null @@ -1,60 +0,0 @@ -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -from paddle import nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager - - -@manager.LOSSES.add_component -class MSELoss(nn.MSELoss): - r""" - **Mean Square Error Loss** - Computes the mean square error (squared L2 norm) of given input and label. - If :attr:`reduction` is set to ``'none'``, loss is calculated as: - .. math:: - Out = (input - label)^2 - If :attr:`reduction` is set to ``'mean'``, loss is calculated as: - .. math:: - Out = \operatorname{mean}((input - label)^2) - If :attr:`reduction` is set to ``'sum'``, loss is calculated as: - .. math:: - Out = \operatorname{sum}((input - label)^2) - where `input` and `label` are `float32` tensors of same shape. - Parameters: - reduction (string, optional): The reduction method for the output, - could be 'none' | 'mean' | 'sum'. - If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. - If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned. - If :attr:`reduction` is ``'none'``, the unreduced loss is returned. - Default is ``'mean'``. - Shape: - input (Tensor): Input tensor, the data type is float32 or float64 - label (Tensor): Label tensor, the data type is float32 or float64 - output (Tensor): output tensor storing the MSE loss of input and label, the data type is same as input. - Examples: - .. code-block:: python - import numpy as np - import paddle - input_data = np.array([1.5]).astype("float32") - label_data = np.array([1.7]).astype("float32") - mse_loss = paddle.nn.loss.MSELoss() - input = paddle.to_tensor(input_data) - label = paddle.to_tensor(label_data) - output = mse_loss(input, label) - print(output) - # [0.04000002] - """ - - def __init__(self, reduction='mean', ignore_index=255): - super().__init__(reduction=reduction) diff --git a/contrib/PanopticDeepLab/paddleseg/models/panoptic_deeplab.py b/contrib/PanopticDeepLab/paddleseg/models/panoptic_deeplab.py deleted file mode 100644 index 923340bf32..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/models/panoptic_deeplab.py +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import OrderedDict - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager -from paddleseg.models import layers -from paddleseg.utils import utils - -__all__ = ['PanopticDeepLab'] - - -@manager.MODELS.add_component -class PanopticDeepLab(nn.Layer): - """ - The PanopticDeeplab implementation based on PaddlePaddle. - - The original article refers to - Bowen Cheng, et, al. "Panoptic-DeepLab: A Simple, Strong, and Fast Baseline for Bottom-Up Panoptic Segmentation" - (https://arxiv.org/abs/1911.10194) - - Args: - num_classes (int): The unique number of target classes. - backbone (paddle.nn.Layer): Backbone network, currently support Resnet50_vd/Resnet101_vd/Xception65. - backbone_indices (tuple, optional): Two values in the tuple indicate the indices of output of backbone. - Default: (0, 3). - aspp_ratios (tuple, optional): The dilation rate using in ASSP module. - If output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). - If output_stride=8, aspp_ratios is (1, 12, 24, 36). - Default: (1, 6, 12, 18). - aspp_out_channels (int, optional): The output channels of ASPP module. Default: 256. - align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, - e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. - pretrained (str, optional): The path or url of pretrained model. Default: None. - """ - - def __init__(self, - num_classes, - backbone, - backbone_indices=(2, 1, 0, 3), - aspp_ratios=(1, 6, 12, 18), - aspp_out_channels=256, - decoder_channels=256, - low_level_channels_projects=None, - align_corners=False, - pretrained=None, - **kwargs): - super().__init__() - - self.backbone = backbone - backbone_channels = [ - backbone.feat_channels[i] for i in backbone_indices - ] - - self.head = PanopticDeepLabHead( - num_classes, backbone_indices, backbone_channels, aspp_ratios, - aspp_out_channels, decoder_channels, align_corners, - low_level_channels_projects, **kwargs) - - self.align_corners = align_corners - self.pretrained = pretrained - self.init_weight() - - def _upsample_predictions(self, pred, input_shape): - """Upsamples final prediction, with special handling to offset. - Args: - pred (dict): stores all output of the segmentation model. - input_shape (tuple): spatial resolution of the desired shape. - Returns: - result (OrderedDict): upsampled dictionary. - """ - # Override upsample method to correctly handle `offset` - result = OrderedDict() - for key in pred.keys(): - out = F.interpolate( - pred[key], - size=input_shape, - mode='bilinear', - align_corners=self.align_corners) - if 'offset' in key: - if input_shape[0] % 2 == 0: - scale = input_shape[0] // pred[key].shape[2] - else: - scale = (input_shape[0] - 1) // (pred[key].shape[2] - 1) - out *= scale - result[key] = out - return result - - def forward(self, x): - feat_list = self.backbone(x) - logit_dict = self.head(feat_list) - results = self._upsample_predictions(logit_dict, x.shape[-2:]) - - # return results - logit_list = [results['semantic'], results['center'], results['offset']] - return logit_list - # return [results['semantic']] - - def init_weight(self): - if self.pretrained is not None: - utils.load_entire_model(self, self.pretrained) - - -class PanopticDeepLabHead(nn.Layer): - """ - The DeepLabV3PHead implementation based on PaddlePaddle. - - Args: - num_classes (int): The unique number of target classes. - backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. - the first index will be taken as a low-level feature in Decoder component; - the second one will be taken as input of ASPP component. - Usually backbone consists of four downsampling stage, and return an output of - each stage. If we set it as (0, 3), it means taking feature map of the first - stage in backbone as low-level feature used in Decoder, and feature map of the fourth - stage as input of ASPP. - backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. - aspp_ratios (tuple): The dilation rates using in ASSP module. - aspp_out_channels (int): The output channels of ASPP module. - align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature - is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. - """ - - def __init__(self, num_classes, backbone_indices, backbone_channels, - aspp_ratios, aspp_out_channels, decoder_channels, - align_corners, low_level_channels_projects, **kwargs): - super().__init__() - self.semantic_decoder = SinglePanopticDeepLabDecoder( - backbone_indices=backbone_indices, - backbone_channels=backbone_channels, - aspp_ratios=aspp_ratios, - aspp_out_channels=aspp_out_channels, - decoder_channels=decoder_channels, - align_corners=align_corners, - low_level_channels_projects=low_level_channels_projects) - self.semantic_head = SinglePanopticDeepLabHead( - num_classes=[num_classes], - decoder_channels=decoder_channels, - head_channels=decoder_channels, - class_key=['semantic']) - self.instance_decoder = SinglePanopticDeepLabDecoder( - backbone_indices=backbone_indices, - backbone_channels=backbone_channels, - aspp_ratios=aspp_ratios, - aspp_out_channels=kwargs['instance_aspp_out_channels'], - decoder_channels=kwargs['instance_decoder_channels'], - align_corners=align_corners, - low_level_channels_projects=kwargs[ - 'instance_low_level_channels_projects']) - self.instance_head = SinglePanopticDeepLabHead( - num_classes=kwargs['instance_num_classes'], - decoder_channels=kwargs['instance_decoder_channels'], - head_channels=kwargs['instance_head_channels'], - class_key=kwargs['instance_class_key']) - - def forward(self, features): - # pred = OrdereDict() - pred = {} - - # Semantic branch - semantic = self.semantic_decoder(features) - semantic = self.semantic_head(semantic) - for key in semantic.keys(): - pred[key] = semantic[key] - - # Instance branch - instance = self.instance_decoder(features) - instance = self.instance_head(instance) - for key in instance.keys(): - pred[key] = instance[key] - - return pred - - -class SinglePanopticDeepLabDecoder(nn.Layer): - """ - The DeepLabV3PHead implementation based on PaddlePaddle. - - Args: - num_classes (int): The unique number of target classes. - backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. - the first index will be taken as a low-level feature in Decoder component; - the second one will be taken as input of ASPP component. - Usually backbone consists of four downsampling stage, and return an output of - each stage. If we set it as (0, 3), it means taking feature map of the first - stage in backbone as low-level feature used in Decoder, and feature map of the fourth - stage as input of ASPP. - backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. - aspp_ratios (tuple): The dilation rates using in ASSP module. - aspp_out_channels (int): The output channels of ASPP module. - align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature - is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. - """ - - def __init__(self, backbone_indices, backbone_channels, aspp_ratios, - aspp_out_channels, decoder_channels, align_corners, - low_level_channels_projects): - super().__init__() - self.aspp = layers.ASPPModule( - aspp_ratios, - backbone_channels[-1], - aspp_out_channels, - align_corners, - use_sep_conv=False, - image_pooling=True, - drop_rate=0.5) - self.backbone_indices = backbone_indices - self.decoder_stage = len(low_level_channels_projects) - if self.decoder_stage != len(self.backbone_indices) - 1: - raise ValueError( - "len(low_level_channels_projects) != len(backbone_indices) - 1, they are {} and {}" - .format(low_level_channels_projects, backbone_indices)) - self.align_corners = align_corners - - # Transform low-level feature - project = [] - # Fuse - fuse = [] - # Top-down direction, i.e. starting from largest stride - for i in range(self.decoder_stage): - project.append( - layers.ConvBNReLU( - backbone_channels[i], - low_level_channels_projects[i], - 1, - bias_attr=False)) - if i == 0: - fuse_in_channels = aspp_out_channels + low_level_channels_projects[ - i] - else: - fuse_in_channels = decoder_channels + low_level_channels_projects[ - i] - fuse.append( - layers.SeparableConvBNReLU( - fuse_in_channels, - decoder_channels, - 5, - padding=2, - bias_attr=False)) - self.project = nn.LayerList(project) - self.fuse = nn.LayerList(fuse) - - def forward(self, feat_list): - x = feat_list[self.backbone_indices[-1]] - x = self.aspp(x) - - for i in range(self.decoder_stage): - l = feat_list[self.backbone_indices[i]] - l = self.project[i](l) - x = F.interpolate( - x, - size=l.shape[-2:], - mode='bilinear', - align_corners=self.align_corners) - x = paddle.concat([x, l], axis=1) - x = self.fuse[i](x) - - return x - - -class SinglePanopticDeepLabHead(nn.Layer): - """ - Decoder module of DeepLabV3P model - - Args: - num_classes (int): The number of classes. - in_channels (int): The number of input channels in decoder module. - """ - - def __init__(self, num_classes, decoder_channels, head_channels, class_key): - super(SinglePanopticDeepLabHead, self).__init__() - self.num_head = len(num_classes) - if self.num_head != len(class_key): - raise ValueError( - "len(num_classes) != len(class_key), they are {} and {}".format( - num_classes, class_key)) - - classifier = [] - for i in range(self.num_head): - classifier.append( - nn.Sequential( - layers.SeparableConvBNReLU( - decoder_channels, - head_channels, - 5, - padding=2, - bias_attr=False), - nn.Conv2D(head_channels, num_classes[i], 1))) - - self.classifier = nn.LayerList(classifier) - self.class_key = class_key - - def forward(self, x): - pred = OrderedDict() - # build classifier - for i, key in enumerate(self.class_key): - pred[key] = self.classifier[i](x) - - return pred - - -if __name__ == '__main__': - paddle.set_device('cpu') - from paddleseg.models.backbones import ResNet50_vd - backbone = ResNet50_vd(output_stride=32) - model = PanopticDeepLab( - num_classes=2, - backbone=backbone, - backbone_indices=(2, 1, 0, 3), - aspp_ratios=(1, 3, 6, 9), - aspp_out_channels=256, - decoder_channels=256, - low_level_channels_projects=[128, 64, 32], - align_corners=True, - instance_aspp_out_channels=256, - instance_decoder_channels=128, - instance_low_level_channels_projects=[64, 32, 16], - instance_num_classes=[1, 2], - instance_head_channels=32, - instance_class_key=["center", "offset"]) - flop = paddle.flops(model, (1, 3, 512, 1024), print_detail=True) - x = paddle.rand((1, 3, 512, 1024)) - result = model(x) - print(result) diff --git a/contrib/PanopticDeepLab/paddleseg/transforms/__init__.py b/contrib/PanopticDeepLab/paddleseg/transforms/__init__.py deleted file mode 100644 index 72332d8eef..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/transforms/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .transforms import * -from . import functional -from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator, InstanceTargetGenerator, RawPanopticTargetGenerator diff --git a/contrib/PanopticDeepLab/paddleseg/transforms/functional.py b/contrib/PanopticDeepLab/paddleseg/transforms/functional.py deleted file mode 100644 index d53fa8b84f..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/transforms/functional.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import cv2 -import numpy as np -from PIL import Image, ImageEnhance -from scipy.ndimage.morphology import distance_transform_edt - - -def normalize(im, mean, std): - im = im.astype(np.float32, copy=False) / 255.0 - im -= mean - im /= std - return im - - -def resize(im, target_size=608, interp=cv2.INTER_LINEAR): - if isinstance(target_size, list) or isinstance(target_size, tuple): - w = target_size[0] - h = target_size[1] - else: - w = target_size - h = target_size - im = cv2.resize(im, (w, h), interpolation=interp) - return im - - -def resize_long(im, long_size=224, interpolation=cv2.INTER_LINEAR): - value = max(im.shape[0], im.shape[1]) - scale = float(long_size) / float(value) - resized_width = int(round(im.shape[1] * scale)) - resized_height = int(round(im.shape[0] * scale)) - - im = cv2.resize( - im, (resized_width, resized_height), interpolation=interpolation) - return im - - -def horizontal_flip(im): - if len(im.shape) == 3: - im = im[:, ::-1, :] - elif len(im.shape) == 2: - im = im[:, ::-1] - return im - - -def vertical_flip(im): - if len(im.shape) == 3: - im = im[::-1, :, :] - elif len(im.shape) == 2: - im = im[::-1, :] - return im - - -def brightness(im, brightness_lower, brightness_upper): - brightness_delta = np.random.uniform(brightness_lower, brightness_upper) - im = ImageEnhance.Brightness(im).enhance(brightness_delta) - return im - - -def contrast(im, contrast_lower, contrast_upper): - contrast_delta = np.random.uniform(contrast_lower, contrast_upper) - im = ImageEnhance.Contrast(im).enhance(contrast_delta) - return im - - -def saturation(im, saturation_lower, saturation_upper): - saturation_delta = np.random.uniform(saturation_lower, saturation_upper) - im = ImageEnhance.Color(im).enhance(saturation_delta) - return im - - -def hue(im, hue_lower, hue_upper): - hue_delta = np.random.uniform(hue_lower, hue_upper) - im = np.array(im.convert('HSV')) - im[:, :, 0] = im[:, :, 0] + hue_delta - im = Image.fromarray(im, mode='HSV').convert('RGB') - return im - - -def rotate(im, rotate_lower, rotate_upper): - rotate_delta = np.random.uniform(rotate_lower, rotate_upper) - im = im.rotate(int(rotate_delta)) - return im - - -def mask_to_onehot(mask, num_classes): - """ - Convert a mask (H, W) to onehot (K, H, W). - - Args: - mask (np.ndarray): Label mask with shape (H, W) - num_classes (int): Number of classes. - - Returns: - np.ndarray: Onehot mask with shape(K, H, W). - """ - _mask = [mask == i for i in range(num_classes)] - _mask = np.array(_mask).astype(np.uint8) - return _mask - - -def onehot_to_binary_edge(mask, radius): - """ - Convert a onehot mask (K, H, W) to a edge mask. - - Args: - mask (np.ndarray): Onehot mask with shape (K, H, W) - radius (int|float): Radius of edge. - - Returns: - np.ndarray: Edge mask with shape(H, W). - """ - if radius < 1: - raise ValueError('`radius` should be greater than or equal to 1') - num_classes = mask.shape[0] - - edge = np.zeros(mask.shape[1:]) - # pad borders - mask = np.pad( - mask, ((0, 0), (1, 1), (1, 1)), mode='constant', constant_values=0) - for i in range(num_classes): - dist = distance_transform_edt( - mask[i, :]) + distance_transform_edt(1.0 - mask[i, :]) - dist = dist[1:-1, 1:-1] - dist[dist > radius] = 0 - edge += dist - - edge = np.expand_dims(edge, axis=0) - edge = (edge > 0).astype(np.uint8) - return edge - - -def mask_to_binary_edge(mask, radius, num_classes): - """ - Convert a segmentic segmentation mask (H, W) to a binary edge mask(H, W). - - Args: - mask (np.ndarray): Label mask with shape (H, W) - radius (int|float): Radius of edge. - num_classes (int): Number of classes. - - Returns: - np.ndarray: Edge mask with shape(H, W). - """ - mask = mask.squeeze() - onehot = mask_to_onehot(mask, num_classes) - edge = onehot_to_binary_edge(onehot, radius) - return edge diff --git a/contrib/PanopticDeepLab/paddleseg/transforms/target_transforms.py b/contrib/PanopticDeepLab/paddleseg/transforms/target_transforms.py deleted file mode 100644 index ce646f5ea1..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/transforms/target_transforms.py +++ /dev/null @@ -1,281 +0,0 @@ -import numpy as np - - -class PanopticTargetGenerator(object): - """ - Generates panoptic training target for Panoptic-DeepLab. - Annotation is assumed to have Cityscapes format. - Arguments: - ignore_index: Integer, the ignore label for semantic segmentation. - rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the - corresponding panoptic label. - thing_list: List, a list of thing classes - sigma: the sigma for Gaussian kernel. - ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. - small_instance_area: Integer, indicates largest area for small instances. - small_instance_weight: Integer, indicates semantic loss weights for small instances. - ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in semantic segmentation branch, - crowd region is ignored in the original TensorFlow implementation. - """ - - def __init__(self, - ignore_index, - rgb2id, - thing_list, - sigma=8, - ignore_stuff_in_offset=False, - small_instance_area=0, - small_instance_weight=1, - ignore_crowd_in_semantic=False): - self.ignore_index = ignore_index - self.rgb2id = rgb2id - self.thing_list = thing_list - self.ignore_stuff_in_offset = ignore_stuff_in_offset - self.small_instance_area = small_instance_area - self.small_instance_weight = small_instance_weight - self.ignore_crowd_in_semantic = ignore_crowd_in_semantic - - self.sigma = sigma - size = 6 * sigma + 3 - x = np.arange(0, size, 1, float) - y = x[:, np.newaxis] - x0, y0 = 3 * sigma + 1, 3 * sigma + 1 - self.g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2)) - - def __call__(self, panoptic, segments): - """Generates the training target. - reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py - reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 - Args: - panoptic: numpy.array, colored image encoding panoptic label. - segments: List, a list of dictionary containing information of every segment, it has fields: - - id: panoptic id, after decoding `panoptic`. - - category_id: semantic class id. - - area: segment area. - - bbox: segment bounding box. - - iscrowd: crowd region. - Returns: - A dictionary with fields: - - semantic: Tensor, semantic label, shape=(H, W). - - foreground: Tensor, foreground mask label, shape=(H, W). - - center: Tensor, center heatmap, shape=(1, H, W). - - center_points: List, center coordinates, with tuple (y-coord, x-coord). - - offset: Tensor, offset, shape=(2, H, W), first dim is (offset_y, offset_x). - - semantic_weights: Tensor, loss weight for semantic prediction, shape=(H, W). - - center_weights: Tensor, ignore region of center prediction, shape=(H, W), used as weights for center - regression 0 is ignore, 1 is has instance. Multiply this mask to loss. - - offset_weights: Tensor, ignore region of offset prediction, shape=(H, W), used as weights for offset - regression 0 is ignore, 1 is has instance. Multiply this mask to loss. - """ - panoptic = self.rgb2id(panoptic) - height, width = panoptic.shape[0], panoptic.shape[1] - semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_index - foreground = np.zeros_like(panoptic, dtype=np.uint8) - center = np.zeros((1, height, width), dtype=np.float32) - center_pts = [] - offset = np.zeros((2, height, width), dtype=np.float32) - y_coord = np.ones_like(panoptic, dtype=np.float32) - x_coord = np.ones_like(panoptic, dtype=np.float32) - y_coord = np.cumsum(y_coord, axis=0) - 1 - x_coord = np.cumsum(x_coord, axis=1) - 1 - # Generate pixel-wise loss weights - semantic_weights = np.ones_like(panoptic, dtype=np.uint8) - # 0: ignore, 1: has instance - # three conditions for a region to be ignored for instance branches: - # (1) It is labeled as `ignore_index` - # (2) It is crowd region (iscrowd=1) - # (3) (Optional) It is stuff region (for offset branch) - center_weights = np.zeros_like(panoptic, dtype=np.uint8) - offset_weights = np.zeros_like(panoptic, dtype=np.uint8) - for seg in segments: - cat_id = seg["category_id"] - if self.ignore_crowd_in_semantic: - if not seg['iscrowd']: - semantic[panoptic == seg["id"]] = cat_id - else: - semantic[panoptic == seg["id"]] = cat_id - if cat_id in self.thing_list: - foreground[panoptic == seg["id"]] = 1 - if not seg['iscrowd']: - # Ignored regions are not in `segments`. - # Handle crowd region. - center_weights[panoptic == seg["id"]] = 1 - if self.ignore_stuff_in_offset: - # Handle stuff region. - if cat_id in self.thing_list: - offset_weights[panoptic == seg["id"]] = 1 - else: - offset_weights[panoptic == seg["id"]] = 1 - if cat_id in self.thing_list: - # find instance center - mask_index = np.where(panoptic == seg["id"]) - if len(mask_index[0]) == 0: - # the instance is completely cropped - continue - - # Find instance area - ins_area = len(mask_index[0]) - if ins_area < self.small_instance_area: - semantic_weights[panoptic == - seg["id"]] = self.small_instance_weight - - center_y, center_x = np.mean(mask_index[0]), np.mean( - mask_index[1]) - center_pts.append([center_y, center_x]) - - # generate center heatmap - y, x = int(center_y), int(center_x) - # outside image boundary - if x < 0 or y < 0 or \ - x >= width or y >= height: - continue - sigma = self.sigma - # upper left - ul = int(np.round(x - 3 * sigma - 1)), int( - np.round(y - 3 * sigma - 1)) - # bottom right - br = int(np.round(x + 3 * sigma + 2)), int( - np.round(y + 3 * sigma + 2)) - - c, d = max(0, -ul[0]), min(br[0], width) - ul[0] - a, b = max(0, -ul[1]), min(br[1], height) - ul[1] - - cc, dd = max(0, ul[0]), min(br[0], width) - aa, bb = max(0, ul[1]), min(br[1], height) - center[0, aa:bb, cc:dd] = np.maximum(center[0, aa:bb, cc:dd], - self.g[a:b, c:d]) - - # generate offset (2, h, w) -> (y-dir, x-dir) - offset_y_index = (np.zeros_like(mask_index[0]), mask_index[0], - mask_index[1]) - offset_x_index = (np.ones_like(mask_index[0]), mask_index[0], - mask_index[1]) - offset[offset_y_index] = center_y - y_coord[mask_index] - offset[offset_x_index] = center_x - x_coord[mask_index] - - return dict( - semantic=semantic.astype('long'), - foreground=foreground.astype('long'), - center=center.astype(np.float32), - center_points=center_pts, - offset=offset.astype(np.float32), - semantic_weights=semantic_weights.astype(np.float32), - center_weights=center_weights.astype(np.float32), - offset_weights=offset_weights.astype(np.float32)) - - -class SemanticTargetGenerator(object): - """ - Generates semantic training target only for Panoptic-DeepLab (no instance). - Annotation is assumed to have Cityscapes format. - Arguments: - ignore_index: Integer, the ignore label for semantic segmentation. - rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the - corresponding panoptic label. - """ - - def __init__(self, ignore_index, rgb2id): - self.ignore_index = ignore_index - self.rgb2id = rgb2id - - def __call__(self, panoptic, segments): - """Generates the training target. - reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py - reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 - Args: - panoptic: numpy.array, colored image encoding panoptic label. - segments: List, a list of dictionary containing information of every segment, it has fields: - - id: panoptic id, after decoding `panoptic`. - - category_id: semantic class id. - - area: segment area. - - bbox: segment bounding box. - - iscrowd: crowd region. - Returns: - A dictionary with fields: - - semantic: Tensor, semantic label, shape=(H, W). - """ - panoptic = self.rgb2id(panoptic) - semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_index - for seg in segments: - cat_id = seg["category_id"] - semantic[panoptic == seg["id"]] = cat_id - - return dict(semantic=semantic.astype('long')) - - -class InstanceTargetGenerator(object): - """ - Generates instance target only for Panoptic-DeepLab. - Annotation is assumed to have Cityscapes format. - Arguments: - rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the - corresponding panoptic label. - """ - - def __init__(self, rgb2id): - self.rgb2id = rgb2id - - def __call__(self, panoptic): - """Generates the instance target. - Args: - panoptic: numpy.array, colored image encoding panoptic label. - Returns: - A dictionary with fields: - - instance: Tensor, shape=(H, W). 0 is background. 1, 2, 3 ... is instance, so it is class agnostic. - """ - panoptic = self.rgb2id(panoptic) - instance = np.zeros_like(panoptic, dtype=np.int64) - ids = np.unique(panoptic) - ins_id = 1 - for i, id in enumerate(ids): - if id > 1000: - instance[panoptic == id] = ins_id - ins_id += 1 - - return dict(instance=instance) - - -class RawPanopticTargetGenerator(object): - """ - Generator the panoptc ground truth for evaluation, where values are 0,1,2,3,... - 11000, 11001, ..., 18000, 18001, ignore_index(general 255). - Arguments: - ignore_index: Integer, the ignore label for semantic segmentation. - rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the - corresponding panoptic label. - """ - - def __init__(self, ignore_index, rgb2id, label_divisor=1000): - self.ingore_index = ignore_index - self.rgb2id = rgb2id - self.label_divisor = label_divisor - - def __call__(self, panoptic, segments): - """ - Generates the raw panoptic target - - Args: - panoptic (numpy.array): colored image encoding panoptic label. - segments (list): A list of dictionary containing information of every segment, it has fields: - - id: panoptic id, after decoding `panoptic`. - - category_id: semantic class id. - - area: segment area. - - bbox: segment bounding box. - - iscrowd: crowd region. - Returns: - A dictionary with fields: - - panoptic: Tensor, panoptic label, shape=(H, W). - """ - panoptic = self.rgb2id(panoptic) - raw_panoptic = np.zeros_like(panoptic) + self.ingore_index - for seg in segments: - cat_id = seg['category_id'] - # if seg['iscrowd'] == 1: - # continue - if seg['id'] < 1000: - raw_panoptic[panoptic == seg['id']] = cat_id - else: - ins_id = seg['id'] % self.label_divisor - raw_panoptic[panoptic == - seg['id']] = cat_id * self.label_divisor + ins_id - return dict(panoptic=raw_panoptic.astype('long')) diff --git a/contrib/PanopticDeepLab/paddleseg/transforms/transforms.py b/contrib/PanopticDeepLab/paddleseg/transforms/transforms.py deleted file mode 100644 index cb8f1b69bf..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/transforms/transforms.py +++ /dev/null @@ -1,888 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import random - -import cv2 -import numpy as np -from PIL import Image - -from paddleseg.cvlibs import manager -from paddleseg.transforms import functional - - -@manager.TRANSFORMS.add_component -class Compose: - """ - Do transformation on input data with corresponding pre-processing and augmentation operations. - The shape of input data to all operations is [height, width, channels]. - - Args: - transforms (list): A list contains data pre-processing or augmentation. Empty list means only reading images, no transformation. - to_rgb (bool, optional): If converting image to RGB color space. Default: True. - - Raises: - TypeError: When 'transforms' is not a list. - ValueError: when the length of 'transforms' is less than 1. - """ - - def __init__(self, transforms, to_rgb=True): - if not isinstance(transforms, list): - raise TypeError('The transforms must be a list!') - self.transforms = transforms - self.to_rgb = to_rgb - - def __call__(self, im, label=None): - """ - Args: - im (str|np.ndarray): It is either image path or image object. - label (str|np.ndarray): It is either label path or label ndarray. - - Returns: - (tuple). A tuple including image, image info, and label after transformation. - """ - if isinstance(im, str): - im = cv2.imread(im).astype('float32') - if isinstance(label, str): - label = np.asarray(Image.open(label)) - if im is None: - raise ValueError('Can\'t read The image file {}!'.format(im)) - if self.to_rgb: - im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) - - for op in self.transforms: - outputs = op(im, label) - im = outputs[0] - if len(outputs) == 2: - label = outputs[1] - im = np.transpose(im, (2, 0, 1)) - return (im, label) - - -@manager.TRANSFORMS.add_component -class RandomHorizontalFlip: - """ - Flip an image horizontally with a certain probability. - - Args: - prob (float, optional): A probability of horizontally flipping. Default: 0.5. - """ - - def __init__(self, prob=0.5): - self.prob = prob - - def __call__(self, im, label=None): - if random.random() < self.prob: - im = functional.horizontal_flip(im) - if label is not None: - label = functional.horizontal_flip(label) - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class RandomVerticalFlip: - """ - Flip an image vertically with a certain probability. - - Args: - prob (float, optional): A probability of vertical flipping. Default: 0.1. - """ - - def __init__(self, prob=0.1): - self.prob = prob - - def __call__(self, im, label=None): - if random.random() < self.prob: - im = functional.vertical_flip(im) - if label is not None: - label = functional.vertical_flip(label) - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class Resize: - """ - Resize an image. - - Args: - target_size (list|tuple, optional): The target size of image. Default: (512, 512). - interp (str, optional): The interpolation mode of resize is consistent with opencv. - ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']. Note that when it is - 'RANDOM', a random interpolation mode would be specified. Default: "LINEAR". - - Raises: - TypeError: When 'target_size' type is neither list nor tuple. - ValueError: When "interp" is out of pre-defined methods ('NEAREST', 'LINEAR', 'CUBIC', - 'AREA', 'LANCZOS4', 'RANDOM'). - """ - - # The interpolation mode - interp_dict = { - 'NEAREST': cv2.INTER_NEAREST, - 'LINEAR': cv2.INTER_LINEAR, - 'CUBIC': cv2.INTER_CUBIC, - 'AREA': cv2.INTER_AREA, - 'LANCZOS4': cv2.INTER_LANCZOS4 - } - - def __init__(self, target_size=(512, 512), interp='LINEAR'): - self.interp = interp - if not (interp == "RANDOM" or interp in self.interp_dict): - raise ValueError("`interp` should be one of {}".format( - self.interp_dict.keys())) - if isinstance(target_size, list) or isinstance(target_size, tuple): - if len(target_size) != 2: - raise ValueError( - '`target_size` should include 2 elements, but it is {}'. - format(target_size)) - else: - raise TypeError( - "Type of `target_size` is invalid. It should be list or tuple, but it is {}" - .format(type(target_size))) - - self.target_size = target_size - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label), - - Raises: - TypeError: When the 'img' type is not numpy. - ValueError: When the length of "im" shape is not 3. - """ - - if not isinstance(im, np.ndarray): - raise TypeError("Resize: image type is not numpy.") - if len(im.shape) != 3: - raise ValueError('Resize: image is not 3-dimensional.') - if self.interp == "RANDOM": - interp = random.choice(list(self.interp_dict.keys())) - else: - interp = self.interp - im = functional.resize(im, self.target_size, self.interp_dict[interp]) - if label is not None: - label = functional.resize(label, self.target_size, - cv2.INTER_NEAREST) - - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class ResizeByLong: - """ - Resize the long side of an image to given size, and then scale the other side proportionally. - - Args: - long_size (int): The target size of long side. - """ - - def __init__(self, long_size): - self.long_size = long_size - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - im = functional.resize_long(im, self.long_size) - if label is not None: - label = functional.resize_long(label, self.long_size, - cv2.INTER_NEAREST) - - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class LimitLong: - """ - Limit the long edge of image. - - If the long edge is larger than max_long, resize the long edge - to max_long, while scale the short edge proportionally. - - If the long edge is smaller than min_long, resize the long edge - to min_long, while scale the short edge proportionally. - - Args: - max_long (int, optional): If the long edge of image is larger than max_long, - it will be resize to max_long. Default: None. - min_long (int, optional): If the long edge of image is smaller than min_long, - it will be resize to min_long. Default: None. - """ - - def __init__(self, max_long=None, min_long=None): - if max_long is not None: - if not isinstance(max_long, int): - raise TypeError( - "Type of `max_long` is invalid. It should be int, but it is {}" - .format(type(max_long))) - if min_long is not None: - if not isinstance(min_long, int): - raise TypeError( - "Type of `min_long` is invalid. It should be int, but it is {}" - .format(type(min_long))) - if (max_long is not None) and (min_long is not None): - if min_long > max_long: - raise ValueError( - '`max_long should not smaller than min_long, but they are {} and {}' - .format(max_long, min_long)) - self.max_long = max_long - self.min_long = min_long - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - h, w = im.shape[0], im.shape[1] - long_edge = max(h, w) - target = long_edge - if (self.max_long is not None) and (long_edge > self.max_long): - target = self.max_long - elif (self.min_long is not None) and (long_edge < self.min_long): - target = self.min_long - - if target != long_edge: - im = functional.resize_long(im, target) - if label is not None: - label = functional.resize_long(label, target, cv2.INTER_NEAREST) - - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class ResizeRangeScaling: - """ - Resize the long side of an image into a range, and then scale the other side proportionally. - - Args: - min_value (int, optional): The minimum value of long side after resize. Default: 400. - max_value (int, optional): The maximum value of long side after resize. Default: 600. - """ - - def __init__(self, min_value=400, max_value=600): - if min_value > max_value: - raise ValueError('min_value must be less than max_value, ' - 'but they are {} and {}.'.format( - min_value, max_value)) - self.min_value = min_value - self.max_value = max_value - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - if self.min_value == self.max_value: - random_size = self.max_value - else: - random_size = int( - np.random.uniform(self.min_value, self.max_value) + 0.5) - im = functional.resize_long(im, random_size, cv2.INTER_LINEAR) - if label is not None: - label = functional.resize_long(label, random_size, - cv2.INTER_NEAREST) - - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class ResizeStepScaling: - """ - Scale an image proportionally within a range. - - Args: - min_scale_factor (float, optional): The minimum scale. Default: 0.75. - max_scale_factor (float, optional): The maximum scale. Default: 1.25. - scale_step_size (float, optional): The scale interval. Default: 0.25. - - Raises: - ValueError: When min_scale_factor is smaller than max_scale_factor. - """ - - def __init__(self, - min_scale_factor=0.75, - max_scale_factor=1.25, - scale_step_size=0.25): - if min_scale_factor > max_scale_factor: - raise ValueError( - 'min_scale_factor must be less than max_scale_factor, ' - 'but they are {} and {}.'.format(min_scale_factor, - max_scale_factor)) - self.min_scale_factor = min_scale_factor - self.max_scale_factor = max_scale_factor - self.scale_step_size = scale_step_size - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - if self.min_scale_factor == self.max_scale_factor: - scale_factor = self.min_scale_factor - - elif self.scale_step_size == 0: - scale_factor = np.random.uniform(self.min_scale_factor, - self.max_scale_factor) - - else: - num_steps = int((self.max_scale_factor - self.min_scale_factor) / - self.scale_step_size + 1) - scale_factors = np.linspace(self.min_scale_factor, - self.max_scale_factor, - num_steps).tolist() - np.random.shuffle(scale_factors) - scale_factor = scale_factors[0] - w = int(round(scale_factor * im.shape[1])) - h = int(round(scale_factor * im.shape[0])) - - im = functional.resize(im, (w, h), cv2.INTER_LINEAR) - if label is not None: - label = functional.resize(label, (w, h), cv2.INTER_NEAREST) - - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class Normalize: - """ - Normalize an image. - - Args: - mean (list, optional): The mean value of a data set. Default: [0.5, 0.5, 0.5]. - std (list, optional): The standard deviation of a data set. Default: [0.5, 0.5, 0.5]. - - Raises: - ValueError: When mean/std is not list or any value in std is 0. - """ - - def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)): - self.mean = mean - self.std = std - if not (isinstance(self.mean, (list, tuple)) - and isinstance(self.std, (list, tuple))): - raise ValueError( - "{}: input type is invalid. It should be list or tuple".format( - self)) - from functools import reduce - if reduce(lambda x, y: x * y, self.std) == 0: - raise ValueError('{}: std is invalid!'.format(self)) - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - mean = np.array(self.mean)[np.newaxis, np.newaxis, :] - std = np.array(self.std)[np.newaxis, np.newaxis, :] - im = functional.normalize(im, mean, std) - - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class Padding: - """ - Add bottom-right padding to a raw image or annotation image. - - Args: - target_size (list|tuple): The target size after padding. - im_padding_value (list, optional): The padding value of raw image. - Default: [127.5, 127.5, 127.5]. - label_padding_value (int, optional): The padding value of annotation image. Default: 255. - - Raises: - TypeError: When target_size is neither list nor tuple. - ValueError: When the length of target_size is not 2. - """ - - def __init__(self, - target_size, - im_padding_value=(127.5, 127.5, 127.5), - label_padding_value=255): - if isinstance(target_size, list) or isinstance(target_size, tuple): - if len(target_size) != 2: - raise ValueError( - '`target_size` should include 2 elements, but it is {}'. - format(target_size)) - else: - raise TypeError( - "Type of target_size is invalid. It should be list or tuple, now is {}" - .format(type(target_size))) - self.target_size = target_size - self.im_padding_value = im_padding_value - self.label_padding_value = label_padding_value - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - im_height, im_width = im.shape[0], im.shape[1] - if isinstance(self.target_size, int): - target_height = self.target_size - target_width = self.target_size - else: - target_height = self.target_size[1] - target_width = self.target_size[0] - pad_height = target_height - im_height - pad_width = target_width - im_width - if pad_height < 0 or pad_width < 0: - raise ValueError( - 'The size of image should be less than `target_size`, but the size of image ({}, {}) is larger than `target_size` ({}, {})' - .format(im_width, im_height, target_width, target_height)) - else: - im = cv2.copyMakeBorder( - im, - 0, - pad_height, - 0, - pad_width, - cv2.BORDER_CONSTANT, - value=self.im_padding_value) - if label is not None: - label = cv2.copyMakeBorder( - label, - 0, - pad_height, - 0, - pad_width, - cv2.BORDER_CONSTANT, - value=self.label_padding_value) - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class RandomPaddingCrop: - """ - Crop a sub-image from a raw image and annotation image randomly. If the target cropping size - is larger than original image, then the bottom-right padding will be added. - - Args: - crop_size (tuple, optional): The target cropping size. Default: (512, 512). - im_padding_value (list, optional): The padding value of raw image. - Default: [127.5, 127.5, 127.5]. - label_padding_value (int, optional): The padding value of annotation image. Default: 255. - - Raises: - TypeError: When crop_size is neither list nor tuple. - ValueError: When the length of crop_size is not 2. - """ - - def __init__(self, - crop_size=(512, 512), - im_padding_value=(127.5, 127.5, 127.5), - label_padding_value=255): - if isinstance(crop_size, list) or isinstance(crop_size, tuple): - if len(crop_size) != 2: - raise ValueError( - 'Type of `crop_size` is list or tuple. It should include 2 elements, but it is {}' - .format(crop_size)) - else: - raise TypeError( - "The type of `crop_size` is invalid. It should be list or tuple, but it is {}" - .format(type(crop_size))) - self.crop_size = crop_size - self.im_padding_value = im_padding_value - self.label_padding_value = label_padding_value - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - if isinstance(self.crop_size, int): - crop_width = self.crop_size - crop_height = self.crop_size - else: - crop_width = self.crop_size[0] - crop_height = self.crop_size[1] - - img_height = im.shape[0] - img_width = im.shape[1] - - if img_height == crop_height and img_width == crop_width: - if label is None: - return (im, ) - else: - return (im, label) - else: - pad_height = max(crop_height - img_height, 0) - pad_width = max(crop_width - img_width, 0) - if (pad_height > 0 or pad_width > 0): - im = cv2.copyMakeBorder( - im, - 0, - pad_height, - 0, - pad_width, - cv2.BORDER_CONSTANT, - value=self.im_padding_value) - if label is not None: - label = cv2.copyMakeBorder( - label, - 0, - pad_height, - 0, - pad_width, - cv2.BORDER_CONSTANT, - value=self.label_padding_value) - img_height = im.shape[0] - img_width = im.shape[1] - - if crop_height > 0 and crop_width > 0: - h_off = np.random.randint(img_height - crop_height + 1) - w_off = np.random.randint(img_width - crop_width + 1) - - im = im[h_off:(crop_height + h_off), w_off:( - w_off + crop_width), :] - if label is not None: - label = label[h_off:(crop_height + h_off), w_off:( - w_off + crop_width)] - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class RandomBlur: - """ - Blurring an image by a Gaussian function with a certain probability. - - Args: - prob (float, optional): A probability of blurring an image. Default: 0.1. - """ - - def __init__(self, prob=0.1): - self.prob = prob - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - if self.prob <= 0: - n = 0 - elif self.prob >= 1: - n = 1 - else: - n = int(1.0 / self.prob) - if n > 0: - if np.random.randint(0, n) == 0: - radius = np.random.randint(3, 10) - if radius % 2 != 1: - radius = radius + 1 - if radius > 9: - radius = 9 - im = cv2.GaussianBlur(im, (radius, radius), 0, 0) - - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class RandomRotation: - """ - Rotate an image randomly with padding. - - Args: - max_rotation (float, optional): The maximum rotation degree. Default: 15. - im_padding_value (list, optional): The padding value of raw image. - Default: [127.5, 127.5, 127.5]. - label_padding_value (int, optional): The padding value of annotation image. Default: 255. - """ - - def __init__(self, - max_rotation=15, - im_padding_value=(127.5, 127.5, 127.5), - label_padding_value=255): - self.max_rotation = max_rotation - self.im_padding_value = im_padding_value - self.label_padding_value = label_padding_value - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - if self.max_rotation > 0: - (h, w) = im.shape[:2] - do_rotation = np.random.uniform(-self.max_rotation, - self.max_rotation) - pc = (w // 2, h // 2) - r = cv2.getRotationMatrix2D(pc, do_rotation, 1.0) - cos = np.abs(r[0, 0]) - sin = np.abs(r[0, 1]) - - nw = int((h * sin) + (w * cos)) - nh = int((h * cos) + (w * sin)) - - (cx, cy) = pc - r[0, 2] += (nw / 2) - cx - r[1, 2] += (nh / 2) - cy - dsize = (nw, nh) - im = cv2.warpAffine( - im, - r, - dsize=dsize, - flags=cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - borderValue=self.im_padding_value) - label = cv2.warpAffine( - label, - r, - dsize=dsize, - flags=cv2.INTER_NEAREST, - borderMode=cv2.BORDER_CONSTANT, - borderValue=self.label_padding_value) - - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class RandomScaleAspect: - """ - Crop a sub-image from an original image with a range of area ratio and aspect and - then scale the sub-image back to the size of the original image. - - Args: - min_scale (float, optional): The minimum area ratio of cropped image to the original image. Default: 0.5. - aspect_ratio (float, optional): The minimum aspect ratio. Default: 0.33. - """ - - def __init__(self, min_scale=0.5, aspect_ratio=0.33): - self.min_scale = min_scale - self.aspect_ratio = aspect_ratio - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - if self.min_scale != 0 and self.aspect_ratio != 0: - img_height = im.shape[0] - img_width = im.shape[1] - for i in range(0, 10): - area = img_height * img_width - target_area = area * np.random.uniform(self.min_scale, 1.0) - aspectRatio = np.random.uniform(self.aspect_ratio, - 1.0 / self.aspect_ratio) - - dw = int(np.sqrt(target_area * 1.0 * aspectRatio)) - dh = int(np.sqrt(target_area * 1.0 / aspectRatio)) - if (np.random.randint(10) < 5): - tmp = dw - dw = dh - dh = tmp - - if (dh < img_height and dw < img_width): - h1 = np.random.randint(0, img_height - dh) - w1 = np.random.randint(0, img_width - dw) - - im = im[h1:(h1 + dh), w1:(w1 + dw), :] - label = label[h1:(h1 + dh), w1:(w1 + dw)] - im = cv2.resize( - im, (img_width, img_height), - interpolation=cv2.INTER_LINEAR) - label = cv2.resize( - label, (img_width, img_height), - interpolation=cv2.INTER_NEAREST) - break - if label is None: - return (im, ) - else: - return (im, label) - - -@manager.TRANSFORMS.add_component -class RandomDistort: - """ - Distort an image with random configurations. - - Args: - brightness_range (float, optional): A range of brightness. Default: 0.5. - brightness_prob (float, optional): A probability of adjusting brightness. Default: 0.5. - contrast_range (float, optional): A range of contrast. Default: 0.5. - contrast_prob (float, optional): A probability of adjusting contrast. Default: 0.5. - saturation_range (float, optional): A range of saturation. Default: 0.5. - saturation_prob (float, optional): A probability of adjusting saturation. Default: 0.5. - hue_range (int, optional): A range of hue. Default: 18. - hue_prob (float, optional): A probability of adjusting hue. Default: 0.5. - """ - - def __init__(self, - brightness_range=0.5, - brightness_prob=0.5, - contrast_range=0.5, - contrast_prob=0.5, - saturation_range=0.5, - saturation_prob=0.5, - hue_range=18, - hue_prob=0.5): - self.brightness_range = brightness_range - self.brightness_prob = brightness_prob - self.contrast_range = contrast_range - self.contrast_prob = contrast_prob - self.saturation_range = saturation_range - self.saturation_prob = saturation_prob - self.hue_range = hue_range - self.hue_prob = hue_prob - - def __call__(self, im, label=None): - """ - Args: - im (np.ndarray): The Image data. - label (np.ndarray, optional): The label data. Default: None. - - Returns: - (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). - """ - - brightness_lower = 1 - self.brightness_range - brightness_upper = 1 + self.brightness_range - contrast_lower = 1 - self.contrast_range - contrast_upper = 1 + self.contrast_range - saturation_lower = 1 - self.saturation_range - saturation_upper = 1 + self.saturation_range - hue_lower = -self.hue_range - hue_upper = self.hue_range - ops = [ - functional.brightness, functional.contrast, functional.saturation, - functional.hue - ] - random.shuffle(ops) - params_dict = { - 'brightness': { - 'brightness_lower': brightness_lower, - 'brightness_upper': brightness_upper - }, - 'contrast': { - 'contrast_lower': contrast_lower, - 'contrast_upper': contrast_upper - }, - 'saturation': { - 'saturation_lower': saturation_lower, - 'saturation_upper': saturation_upper - }, - 'hue': { - 'hue_lower': hue_lower, - 'hue_upper': hue_upper - } - } - prob_dict = { - 'brightness': self.brightness_prob, - 'contrast': self.contrast_prob, - 'saturation': self.saturation_prob, - 'hue': self.hue_prob - } - im = im.astype('uint8') - im = Image.fromarray(im) - for id in range(len(ops)): - params = params_dict[ops[id].__name__] - prob = prob_dict[ops[id].__name__] - params['im'] = im - if np.random.uniform(0, 1) < prob: - im = ops[id](**params) - im = np.asarray(im).astype('float32') - if label is None: - return (im, ) - else: - return (im, label) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/__init__.py b/contrib/PanopticDeepLab/paddleseg/utils/__init__.py deleted file mode 100644 index 1d01505947..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import logger -from . import download -from . import metrics -from .env import seg_env, get_sys_env -from .utils import * -from .timer import TimeAverager, calculate_eta -from .visualize import cityscape_colormap -from .visualize import visualize_semantic, visualize_instance, visualize_panoptic -from .config_check import config_check diff --git a/contrib/PanopticDeepLab/paddleseg/utils/config_check.py b/contrib/PanopticDeepLab/paddleseg/utils/config_check.py deleted file mode 100644 index 47a7049823..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/config_check.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np - - -def config_check(cfg, train_dataset=None, val_dataset=None): - """ - To check config。 - - Args: - cfg (paddleseg.cvlibs.Config): An object of paddleseg.cvlibs.Config. - train_dataset (paddle.io.Dataset): Used to read and process training datasets. - val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. - """ - - num_classes_check(cfg, train_dataset, val_dataset) - - -def num_classes_check(cfg, train_dataset, val_dataset): - """" - Check that the num_classes in model, train_dataset and val_dataset is consistent. - """ - num_classes_set = set() - if train_dataset and hasattr(train_dataset, 'num_classes'): - num_classes_set.add(train_dataset.num_classes) - if val_dataset and hasattr(val_dataset, 'num_classes'): - num_classes_set.add(val_dataset.num_classes) - if cfg.dic.get('model', None) and cfg.dic['model'].get('num_classes', None): - num_classes_set.add(cfg.dic['model'].get('num_classes')) - if (not cfg.train_dataset) and (not cfg.val_dataset): - raise ValueError( - 'One of `train_dataset` or `val_dataset should be given, but there are none.' - ) - if len(num_classes_set) == 0: - raise ValueError( - '`num_classes` is not found. Please set it in model, train_dataset or val_dataset' - ) - elif len(num_classes_set) > 1: - raise ValueError( - '`num_classes` is not consistent: {}. Please set it consistently in model or train_dataset or val_dataset' - .format(num_classes_set)) - else: - num_classes = num_classes_set.pop() - if train_dataset: - train_dataset.num_classes = num_classes - if val_dataset: - val_dataset.num_classes = num_classes diff --git a/contrib/PanopticDeepLab/paddleseg/utils/download.py b/contrib/PanopticDeepLab/paddleseg/utils/download.py deleted file mode 100644 index 7b4a1c3a36..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/download.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import functools -import os -import shutil -import sys -import tarfile -import time -import zipfile - -import requests - -lasttime = time.time() -FLUSH_INTERVAL = 0.1 - - -def progress(str, end=False): - global lasttime - if end: - str += "\n" - lasttime = 0 - if time.time() - lasttime >= FLUSH_INTERVAL: - sys.stdout.write("\r%s" % str) - lasttime = time.time() - sys.stdout.flush() - - -def _download_file(url, savepath, print_progress): - if print_progress: - print("Connecting to {}".format(url)) - r = requests.get(url, stream=True, timeout=15) - total_length = r.headers.get('content-length') - - if total_length is None: - with open(savepath, 'wb') as f: - shutil.copyfileobj(r.raw, f) - else: - with open(savepath, 'wb') as f: - dl = 0 - total_length = int(total_length) - starttime = time.time() - if print_progress: - print("Downloading %s" % os.path.basename(savepath)) - for data in r.iter_content(chunk_size=4096): - dl += len(data) - f.write(data) - if print_progress: - done = int(50 * dl / total_length) - progress("[%-50s] %.2f%%" % - ('=' * done, float(100 * dl) / total_length)) - if print_progress: - progress("[%-50s] %.2f%%" % ('=' * 50, 100), end=True) - - -def _uncompress_file_zip(filepath, extrapath): - files = zipfile.ZipFile(filepath, 'r') - filelist = files.namelist() - rootpath = filelist[0] - total_num = len(filelist) - for index, file in enumerate(filelist): - files.extract(file, extrapath) - yield total_num, index, rootpath - files.close() - yield total_num, index, rootpath - - -def _uncompress_file_tar(filepath, extrapath, mode="r:gz"): - files = tarfile.open(filepath, mode) - filelist = files.getnames() - total_num = len(filelist) - rootpath = filelist[0] - for index, file in enumerate(filelist): - files.extract(file, extrapath) - yield total_num, index, rootpath - files.close() - yield total_num, index, rootpath - - -def _uncompress_file(filepath, extrapath, delete_file, print_progress): - if print_progress: - print("Uncompress %s" % os.path.basename(filepath)) - - if filepath.endswith("zip"): - handler = _uncompress_file_zip - elif filepath.endswith("tgz"): - handler = functools.partial(_uncompress_file_tar, mode="r:*") - else: - handler = functools.partial(_uncompress_file_tar, mode="r") - - for total_num, index, rootpath in handler(filepath, extrapath): - if print_progress: - done = int(50 * float(index) / total_num) - progress( - "[%-50s] %.2f%%" % ('=' * done, float(100 * index) / total_num)) - if print_progress: - progress("[%-50s] %.2f%%" % ('=' * 50, 100), end=True) - - if delete_file: - os.remove(filepath) - - return rootpath - - -def download_file_and_uncompress(url, - savepath=None, - extrapath=None, - extraname=None, - print_progress=True, - cover=False, - delete_file=True): - if savepath is None: - savepath = "." - - if extrapath is None: - extrapath = "." - - savename = url.split("/")[-1] - if not os.path.exists(savepath): - os.makedirs(savepath) - - savepath = os.path.join(savepath, savename) - savename = ".".join(savename.split(".")[:-1]) - savename = os.path.join(extrapath, savename) - extraname = savename if extraname is None else os.path.join( - extrapath, extraname) - - if cover: - if os.path.exists(savepath): - shutil.rmtree(savepath) - if os.path.exists(savename): - shutil.rmtree(savename) - if os.path.exists(extraname): - shutil.rmtree(extraname) - - if not os.path.exists(extraname): - if not os.path.exists(savename): - if not os.path.exists(savepath): - _download_file(url, savepath, print_progress) - - if (not tarfile.is_tarfile(savepath)) and ( - not zipfile.is_zipfile(savepath)): - if not os.path.exists(extraname): - os.makedirs(extraname) - shutil.move(savepath, extraname) - return extraname - - savename = _uncompress_file(savepath, extrapath, delete_file, - print_progress) - savename = os.path.join(extrapath, savename) - shutil.move(savename, extraname) - return extraname diff --git a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/__init__.py b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/__init__.py deleted file mode 100644 index 7c86ed1641..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .semantic import SemanticEvaluator -from .instance import InstanceEvaluator -from .panoptic import PanopticEvaluator diff --git a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/instance.py b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/instance.py deleted file mode 100644 index 1230c4d98e..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/instance.py +++ /dev/null @@ -1,345 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from collections import defaultdict, OrderedDict - -import numpy as np - - -class InstanceEvaluator(object): - """ - Refer to 'https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py' - Calculate the matching results of each image, each class, each IoU, and then get the final - matching results of each class and each IoU of dataset. Base on the matching results, the AP - and mAP can be calculated. - we need two vectors for each class and for each overlap - The first vector (y_true) is binary and is 1, where the ground truth says true, - and is 0 otherwise. - The second vector (y_score) is float [0...1] and represents the confidence of - the prediction. - We represent the following cases as: - | y_true | y_score - gt instance with matched prediction | 1 | confidence - gt instance w/o matched prediction | 1 | 0.0 - false positive prediction | 0 | confidence - The current implementation makes only sense for an overlap threshold >= 0.5, - since only then, a single prediction can either be ignored or matched, but - never both. Further, it can never match to two gt instances. - For matching, we vary the overlap and do the following steps: - 1.) remove all predictions that satisfy the overlap criterion with an ignore region (either void or *group) - 2.) remove matches that do not satisfy the overlap - 3.) mark non-matched predictions as false positive - In the processing, 0 represent the first class of 'thing'. So the label will less 1 than the dataset. - Args: - num_classes (int): The unique number of target classes. Exclude background class, labeled 0 usually. - overlaps (float|list): The threshold of IoU. - thing_list (list|None): Thing class, only calculate AP for the thing class. - """ - - def __init__(self, num_classes, overlaps=0.5, thing_list=None): - super().__init__() - self.num_classes = num_classes - if isinstance(overlaps, float): - overlaps = [overlaps] - self.overlaps = overlaps - self.y_true = [[np.empty(0) for _i in range(len(overlaps))] - for _j in range(num_classes)] - self.y_score = [[np.empty(0) for _i in range(len(overlaps))] - for _j in range(num_classes)] - self.hard_fns = [[0] * len(overlaps) for _ in range(num_classes)] - - if thing_list is None: - self.thing_list = list(range(num_classes)) - else: - self.thing_list = thing_list - - def update(self, preds, gts, ignore_mask=None): - """ - compute y_true and y_score in this image. - preds (list): tuple list [(label, confidence, mask), ...]. - gts (list): tuple list [(label, mask), ...]. - ignore_mask (np.ndarray): Mask to ignore. - """ - - pred_instances, gt_instances = self.get_instances( - preds, gts, ignore_mask=ignore_mask) - - for i in range(self.num_classes): - if i not in self.thing_list: - continue - for oi, oth in enumerate(self.overlaps): - cur_true = np.ones((len(gt_instances[i]))) - cur_score = np.ones(len(gt_instances[i])) * (-float("inf")) - cur_match = np.zeros(len(gt_instances[i]), dtype=np.bool) - for gti, gt_instance in enumerate(gt_instances[i]): - found_match = False - for pred_instance in gt_instance['matched_pred']: - overlap = float(pred_instance['intersection']) / ( - gt_instance['pixel_count'] + - pred_instance['pixel_count'] - - pred_instance['intersection']) - if overlap > oth: - confidence = pred_instance['confidence'] - - # if we already has a prediction for this groundtruth - # the prediction with the lower score is automatically a false positive - if cur_match[gti]: - max_score = max(cur_score[gti], confidence) - min_score = min(cur_score[gti], confidence) - cur_score = max_score - # append false positive - cur_true = np.append(cur_true, 0) - cur_score = np.append(cur_score, min_score) - cur_match = np.append(cur_match, True) - # otherwise set score - else: - found_match = True - cur_match[gti] = True - cur_score[gti] = confidence - - if not found_match: - self.hard_fns[i][oi] += 1 - # remove not-matched ground truth instances - cur_true = cur_true[cur_match == True] - cur_score = cur_score[cur_match == True] - - # collect not-matched predictions as false positive - for pred_instance in pred_instances[i]: - found_gt = False - for gt_instance in pred_instance['matched_gt']: - overlap = float(gt_instance['intersection']) / ( - gt_instance['pixel_count'] + - pred_instance['pixel_count'] - - gt_instance['intersection']) - if overlap > oth: - found_gt = True - break - if not found_gt: - proportion_ignore = 0 - if ignore_mask is not None: - nb_ignore_pixels = pred_instance[ - 'void_intersection'] - proportion_ignore = float( - nb_ignore_pixels) / pred_instance['pixel_count'] - if proportion_ignore <= oth: - cur_true = np.append(cur_true, 0) - cur_score = np.append(cur_score, - pred_instance['confidence']) - self.y_true[i][oi] = np.append(self.y_true[i][oi], cur_true) - self.y_score[i][oi] = np.append(self.y_score[i][oi], cur_score) - - def evaluate(self): - ap = self.cal_ap() - map = self.cal_map() - - res = {} - res["AP"] = [{i: ap[i] * 100} for i in self.thing_list] - res["mAP"] = 100 * map - - results = OrderedDict({"ins_seg": res}) - return results - - def cal_ap(self): - """ - calculate ap for every classes - """ - self.ap = [0] * self.num_classes - self.ap_overlap = [[0] * len(self.overlaps) - for _ in range(self.num_classes)] - for i in range(self.num_classes): - if i not in self.thing_list: - continue - for j in range(len(self.overlaps)): - y_true = self.y_true[i][j] - y_score = self.y_score[i][j] - if len(y_true) == 0: - self.ap_overlap[i][j] = 0 - continue - score_argsort = np.argsort(y_score) - y_score_sorted = y_score[score_argsort] - y_true_sorted = y_true[score_argsort] - y_true_sorted_cumsum = np.cumsum(y_true_sorted) - - # unique thresholds - thresholds, unique_indices = np.unique( - y_score_sorted, return_index=True) - - # since we need to add an artificial point to the precision-recall curve - # increase its length by 1 - nb_pr = len(unique_indices) + 1 - - # calculate precision and recall - nb_examples = len(y_score_sorted) - nb_true_exampels = y_true_sorted_cumsum[-1] - precision = np.zeros(nb_pr) - recall = np.zeros(nb_pr) - - # deal with the first point - # only thing we need to do, is to append a zero to the cumsum at the end. - # an index of -1 uses that zero then - y_true_sorted_cumsum = np.append(y_true_sorted_cumsum, 0) - - # deal with remaining - for idx_res, idx_scores in enumerate(unique_indices): - cumsum = y_true_sorted_cumsum[idx_scores - 1] - tp = nb_true_exampels - cumsum - fp = nb_examples - idx_scores - tp - fn = cumsum + self.hard_fns[i][j] - p = float(tp) / (tp + fp) - r = float(tp) / (tp + fn) - precision[idx_res] = p - recall[idx_res] = r - - # add first point in curve - precision[-1] = 1. - # In some calculation,make precision the max after this point in curve. - #precision = [np.max(precision[:i+1]) for i in range(len(precision))] - recall[-1] = 0. - - # compute average of precision-recall curve - # integration is performed via zero order, or equivalently step-wise integration - # first compute the widths of each step: - # use a convolution with appropriate kernel, manually deal with the boundaries first - recall_for_conv = np.copy(recall) - recall_for_conv = np.append(recall_for_conv[0], recall_for_conv) - recall_for_conv = np.append(recall_for_conv, 0.) - - step_widths = np.convolve(recall_for_conv, [-0.5, 0, 0.5], - 'valid') - - # integrate is now simply a dot product - ap_current = np.dot(precision, step_widths) - self.ap_overlap[i][j] = ap_current - - ap = [np.average(i) for i in self.ap_overlap] - self.ap = ap - - return ap - - def cal_map(self): - """ - calculate map for all classes - """ - self.cal_ap() - valid_ap = [self.ap[i] for i in self.thing_list] - map = np.mean(valid_ap) - self.map = map - - return map - - def get_instances(self, preds, gts, ignore_mask=None): - """ - In this method, we create two dicts of list - - pred_instances: contains all predictions and their associated gt - - gtInstances: contains all gt instances and their associated predictions - Args: - preds (list): Prediction of image. - gts (list): Ground truth of image. - Return: - dict: pred_instances, the type is dict(list(dict))), e.g. {0: [{'pred_id':0, 'label':0', - 'pixel_count':100, 'confidence': 0.9, 'void_intersection': 0, - 'matched_gt': [gt_instance0, gt_instance1, ...]}, ], 1: } - dict: gt_instances, the type is dict(list(dict))), e.g. {0: [{'inst_id':0, 'label':0', - 'pixel_count':100, 'mask': np.ndarray, 'matched_pred': [pred_instance0, pred_instance1, ...]}, ], 1: } - """ - - pred_instances = defaultdict(list) - gt_instances = defaultdict(list) - - gt_inst_count = 0 - for gt in gts: - label, mask = gt - gt_instance = defaultdict(list) - gt_instance['inst_id'] = gt_inst_count - gt_instance['label'] = label - gt_instance['pixel_count'] = np.count_nonzero(mask) - gt_instance['mask'] = mask - gt_instances[label].append(gt_instance) - gt_inst_count += 1 - - pred_inst_count = 0 - for pred in preds: - label, conf, mask = pred - pred_instance = defaultdict(list) - pred_instance['label'] = label - pred_instance['pred_id'] = pred_inst_count - pred_instance['pixel_count'] = np.count_nonzero(mask) - pred_instance['confidence'] = conf - if ignore_mask is not None: - pred_instance['void_intersection'] = np.count_nonzero( - np.logical_and(mask, ignore_mask)) - - # Loop through all ground truth instances with matching label - matched_gt = [] - for gt_num, gt_instance in enumerate(gt_instances[label]): - # print(gt_instances) - intersection = np.count_nonzero( - np.logical_and(mask, gt_instances[label][gt_num]['mask'])) - if intersection > 0: - gt_copy = gt_instance.copy() - pred_copy = pred_instance.copy() - - gt_copy['intersection'] = intersection - pred_copy['intersection'] = intersection - - matched_gt.append(gt_copy) - gt_instances[label][gt_num]['matched_pred'].append( - pred_copy) - - pred_instance['matched_gt'] = matched_gt - pred_inst_count += 1 - pred_instances[label].append(pred_instance) - - return pred_instances, gt_instances - - @staticmethod - def convert_gt_map(seg_map, ins_map): - """ - Convet the ground truth with format (h*w) to the format that satisfies the AP calculation. - Args: - seg_map (np.ndarray): the sementic segmentation map with shape H * W. Value is 0, 1, 2, ... - ins_map (np.ndarray): the instance segmentation map with shape H * W. Value is 0, 1, 2, ... - Returns: - list: tuple list like: [(label, mask), ...] - """ - gts = [] - instance_cnt = np.unique(ins_map) - for i in instance_cnt: - if i == 0: - continue - mask = ins_map == i - label = seg_map[mask][0] - gts.append((label, mask.astype('int32'))) - return gts - - @staticmethod - def convert_pred_map(seg_pred, pan_pred): - """ - Convet the predictions with format (h*w) to the format that satisfies the AP calculation. - Args: - seg_pred (np.ndarray): the sementic segmentation map with shape C * H * W. Value is probability. - pan_pred (np.ndarray): panoptic predictions, void_label, stuff_id * label_divisor, thing_id * label_divisor + ins_id , ins_id >= 1. - Returns: - list: tuple list like: [(label, score, mask), ...] - """ - preds = [] - instance_cnt = np.unique(pan_pred) - for i in instance_cnt: - if (i < 1000) or (i % 1000 == 0): - continue - mask = pan_pred == i - label = i // 1000 - score = np.mean(seg_pred[label][mask]) - preds.append((label, score, mask.astype('int32'))) - return preds diff --git a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/panoptic.py b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/panoptic.py deleted file mode 100644 index 9c930bcc69..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/panoptic.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ------------------------------------------------------------------------------ -# Reference: https://github.com/mcordts/cityscapesScripts/blob/aeb7b82531f86185ce287705be28f452ba3ddbb8/cityscapesscripts/evaluation/evalPanopticSemanticLabeling.py -# Modified by Guowei Chen -# ------------------------------------------------------------------------------ - -from collections import defaultdict, OrderedDict - -import numpy as np - -OFFSET = 256 * 256 * 256 - - -class PQStatCat(): - def __init__(self): - self.iou = 0.0 - self.tp = 0 - self.fp = 0 - self.fn = 0 - - def __iadd__(self, pd_stat_cat): - self.iou += pd_stat_cat.iou - self.tp += pd_stat_cat.tp - self.fp += pd_stat_cat.fp - self.fn += pd_stat_cat.fn - return self - - def __repr__(self): - s = 'iou: ' + str(self.iou) + ' tp: ' + str(self.tp) + ' fp: ' + str( - self.fp) + ' fn: ' + str(self.fn) - return s - - -class PQStat(): - def __init__(self, num_classes): - self.pq_per_cat = defaultdict(PQStatCat) - self.num_classes = num_classes - - def __getitem__(self, i): - return self.pq_per_cat[i] - - def __iadd__(self, pd_stat): - for label, pq_stat_cat in pd_stat.pq_per_cat.items(): - self.pd_per_cat[label] += pq_stat_cat - return self - - def pq_average(self, isthing=None, thing_list=None): - """ - Calculate the average pq for all and every class. - - Args: - num_classes (int): number of classes. - isthing (bool|None): calculate average pq for thing class if isthing is True, - for stuff class if isthing is False and for all if isthing is None. Default: None. - thing_list (list|None): A list of thing class. It should be provided when isthing is equal to True or False - """ - pq, sq, rq, n = 0, 0, 0, 0 - per_class_results = {} - for label in range(self.num_classes): - if isthing is not None: - if isthing: - if label not in thing_list: - continue - else: - if label in thing_list: - continue - iou = self.pq_per_cat[label].iou - tp = self.pq_per_cat[label].tp - fp = self.pq_per_cat[label].fp - fn = self.pq_per_cat[label].fn - if tp + fp + fn == 0: - per_class_results[label] = {'pq': 0.0, 'sq': 0.0, 'rq': 0.0} - continue - n += 1 - pq_class = iou / (tp + 0.5 * fp + 0.5 * fn) - sq_class = iou / tp if tp != 0 else 0 - rq_class = tp / (tp + 0.5 * fp + 0.5 * fn) - - per_class_results[label] = { - 'pq': pq_class, - 'sq': sq_class, - 'rq': rq_class - } - pq += pq_class - sq += sq_class - rq += rq_class - - return { - 'pq': pq / n, - 'sq': sq / n, - 'rq': rq / n, - 'n': n - }, per_class_results - - -class PanopticEvaluator: - """ - Evaluate semantic segmentation - """ - - def __init__(self, - num_classes, - thing_list, - ignore_index=255, - label_divisor=1000): - self.pq_stat = PQStat(num_classes) - self.num_classes = num_classes - self.thing_list = thing_list - self.ignore_index = ignore_index - self.label_divisor = label_divisor - - def update(self, pred, gt): - # get the labels and counts for the pred and gt. - gt_labels, gt_labels_counts = np.unique(gt, return_counts=True) - pred_labels, pred_labels_counts = np.unique(pred, return_counts=True) - gt_segms = defaultdict(dict) - pred_segms = defaultdict(dict) - for label, label_count in zip(gt_labels, gt_labels_counts): - category_id = label // self.label_divisor if label > self.label_divisor else label - gt_segms[label]['area'] = label_count - gt_segms[label]['category_id'] = category_id - gt_segms[label]['iscrowd'] = 1 if label in self.thing_list else 0 - for label, label_count in zip(pred_labels, pred_labels_counts): - category_id = label // self.label_divisor if label > self.label_divisor else label - pred_segms[label]['area'] = label_count - pred_segms[label]['category_id'] = category_id - - # confusion matrix calculation - pan_gt_pred = gt.astype(np.uint64) * OFFSET + pred.astype(np.uint64) - gt_pred_map = {} - labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True) - for label, intersection in zip(labels, labels_cnt): - gt_id = label // OFFSET - pred_id = label % OFFSET - gt_pred_map[(gt_id, pred_id)] = intersection - - # count all matched pairs - gt_matched = set() - pred_matched = set() - for label_tuple, intersection in gt_pred_map.items(): - gt_label, pred_label = label_tuple - if gt_label == self.ignore_index or pred_label == self.ignore_index: - continue - if gt_segms[gt_label]['iscrowd'] == 1: - continue - if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][ - 'category_id']: - continue - union = pred_segms[pred_label]['area'] + gt_segms[gt_label][ - 'area'] - intersection - gt_pred_map.get( - (self.ignore_index, pred_label), 0) - iou = intersection / union - if iou > 0.5: - self.pq_stat[gt_segms[gt_label]['category_id']].tp += 1 - self.pq_stat[gt_segms[gt_label]['category_id']].iou += iou - gt_matched.add(gt_label) - pred_matched.add(pred_label) - - # count false negtive - crowd_labels_dict = {} - for gt_label, gt_info in gt_segms.items(): - if gt_label in gt_matched: - continue - if gt_label == self.ignore_index: - continue - # ignore crowd - if gt_info['iscrowd'] == 1: - crowd_labels_dict[gt_info['category_id']] = gt_label - continue - self.pq_stat[gt_info['category_id']].fn += 1 - - # count false positive - for pred_label, pred_info in pred_segms.items(): - if pred_label in pred_matched: - continue - if pred_label == self.ignore_index: - continue - # intersection of the segment with self.ignore_index - intersection = gt_pred_map.get((self.ignore_index, pred_label), 0) - if pred_info['category_id'] in crowd_labels_dict: - intersection += gt_pred_map.get( - (crowd_labels_dict[pred_info['category_id']], pred_label), - 0) - # predicted segment is ignored if more than half of the segment correspond to self.ignore_index regions - if intersection / pred_info['area'] > 0.5: - continue - self.pq_stat[pred_info['category_id']].fp += 1 - - def evaluate(self): - metrics = [("All", None), ("Things", True), ("Stuff", False)] - results = {} - for name, isthing in metrics: - results[name], per_class_results = self.pq_stat.pq_average( - isthing=isthing, thing_list=self.thing_list) - if name == 'All': - results['per_class'] = per_class_results - return OrderedDict(pan_seg=results) - - -if __name__ == '__main__': - panoptic_metirc = PanopticEvaluator(2, [1]) - pred = np.zeros((100, 100)) - gt = np.zeros((100, 100)) - pred[0:50, 0:50] = 1 - gt[0:60, 0:60] = 1 - panoptic_metirc.update(pred, gt) - print(panoptic_metirc.evaluate()) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/semantic.py b/contrib/PanopticDeepLab/paddleseg/utils/evaluation/semantic.py deleted file mode 100644 index ca59a6a503..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/evaluation/semantic.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ------------------------------------------------------------------------------ -# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/evaluation/semantic.py -# Modified by Guowei Chen -# ------------------------------------------------------------------------------ - -from collections import OrderedDict - -import numpy as np - - -class SemanticEvaluator: - """ - Evaluate semantic segmentation - """ - - def __init__(self, num_classes, ignore_index=255): - """ - Args: - num_classes (int): number of classes - ignore_index (int): value in semantic segmentation ground truth. Predictions for the - corresponding pixels should be ignored. - """ - self._num_classes = num_classes - self._ignore_index = ignore_index - self._N = num_classes + 1 # store ignore label in the last class - - self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64) - - def update(self, pred, gt): - pred = pred.astype(np.int) - gt = gt.astype(np.int) - gt[gt == self._ignore_index] = self._num_classes - - # raw: pred, column: gt - self._conf_matrix += np.bincount( - self._N * pred.reshape(-1) + gt.reshape(-1), - minlength=self._N**2).reshape(self._N, self._N) - - def evaluate(self): - """ - Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): - * Mean intersection-over-union averaged across classes (mIoU) - * Frequency Weighted IoU (fwIoU) - * Mean pixel accuracy averaged across classes (mACC) - * Pixel Accuracy (pACC) - """ - acc = np.zeros(self._num_classes, dtype=np.float) - iou = np.zeros(self._num_classes, dtype=np.float) - tp = self._conf_matrix.diagonal()[:-1].astype(np.float) - pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) - class_weights = pos_gt / np.sum(pos_gt) - pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) - - acc_valid = pos_pred > 0 - acc[acc_valid] = tp[acc_valid] / pos_pred[acc_valid] - iou_valid = (pos_gt + pos_pred) > 0 - union = pos_gt + pos_pred - tp - iou[acc_valid] = tp[acc_valid] / union[acc_valid] - macc = np.sum(acc) / np.sum(acc_valid) - miou = np.sum(iou) / np.sum(iou_valid) - fiou = np.sum(iou * class_weights) - pacc = np.sum(tp) / np.sum(pos_gt) - - res = {} - res["mIoU"] = 100 * miou - res["fwIoU"] = 100 * fiou - res["mACC"] = 100 * macc - res["pACC"] = 100 * pacc - - results = OrderedDict({"sem_seg": res}) - return results diff --git a/contrib/PanopticDeepLab/paddleseg/utils/logger.py b/contrib/PanopticDeepLab/paddleseg/utils/logger.py deleted file mode 100644 index e7ef757635..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/logger.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import time - -import paddle - -levels = {0: 'ERROR', 1: 'WARNING', 2: 'INFO', 3: 'DEBUG'} -log_level = 2 - - -def log(level=2, message=""): - if paddle.distributed.ParallelEnv().local_rank == 0: - current_time = time.time() - time_array = time.localtime(current_time) - current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array) - if log_level >= level: - print( - "{} [{}]\t{}".format(current_time, levels[level], - message).encode("utf-8").decode("latin1")) - sys.stdout.flush() - - -def debug(message=""): - log(level=3, message=message) - - -def info(message=""): - log(level=2, message=message) - - -def warning(message=""): - log(level=1, message=message) - - -def error(message=""): - log(level=0, message=message) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/metrics.py b/contrib/PanopticDeepLab/paddleseg/utils/metrics.py deleted file mode 100644 index ad5b3c9758..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/metrics.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import paddle -import paddle.nn.functional as F - - -def calculate_area(pred, label, num_classes, ignore_index=255): - """ - Calculate intersect, prediction and label area - - Args: - pred (Tensor): The prediction by model. - label (Tensor): The ground truth of image. - num_classes (int): The unique number of target classes. - ignore_index (int): Specifies a target value that is ignored. Default: 255. - - Returns: - Tensor: The intersection area of prediction and the ground on all class. - Tensor: The prediction area on all class. - Tensor: The ground truth area on all class - """ - if len(pred.shape) == 4: - pred = paddle.squeeze(pred, axis=1) - if len(label.shape) == 4: - label = paddle.squeeze(label, axis=1) - if not pred.shape == label.shape: - raise ValueError('Shape of `pred` and `label should be equal, ' - 'but there are {} and {}.'.format( - pred.shape, label.shape)) - - # Delete ignore_index - mask = label != ignore_index - pred = pred + 1 - label = label + 1 - pred = pred * mask - label = label * mask - pred = F.one_hot(pred, num_classes + 1) - label = F.one_hot(label, num_classes + 1) - pred = pred[:, :, :, 1:] - label = label[:, :, :, 1:] - - pred_area = [] - label_area = [] - intersect_area = [] - - for i in range(num_classes): - pred_i = pred[:, :, :, i] - label_i = label[:, :, :, i] - pred_area_i = paddle.sum(pred_i) - label_area_i = paddle.sum(label_i) - intersect_area_i = paddle.sum(pred_i * label_i) - pred_area.append(pred_area_i) - label_area.append(label_area_i) - intersect_area.append(intersect_area_i) - pred_area = paddle.concat(pred_area) - label_area = paddle.concat(label_area) - intersect_area = paddle.concat(intersect_area) - return intersect_area, pred_area, label_area - - -def mean_iou(intersect_area, pred_area, label_area): - """ - Calculate iou. - - Args: - intersect_area (Tensor): The intersection area of prediction and ground truth on all classes. - pred_area (Tensor): The prediction area on all classes. - label_area (Tensor): The ground truth area on all classes. - - Returns: - np.ndarray: iou on all classes. - float: mean iou of all classes. - """ - intersect_area = intersect_area.numpy() - pred_area = pred_area.numpy() - label_area = label_area.numpy() - union = pred_area + label_area - intersect_area - class_iou = [] - for i in range(len(intersect_area)): - if union[i] == 0: - iou = 0 - else: - iou = intersect_area[i] / union[i] - class_iou.append(iou) - miou = np.mean(class_iou) - return np.array(class_iou), miou - - -def accuracy(intersect_area, pred_area): - """ - Calculate accuracy - - Args: - intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.. - pred_area (Tensor): The prediction area on all classes. - - Returns: - np.ndarray: accuracy on all classes. - float: mean accuracy. - """ - intersect_area = intersect_area.numpy() - pred_area = pred_area.numpy() - class_acc = [] - for i in range(len(intersect_area)): - if pred_area[i] == 0: - acc = 0 - else: - acc = intersect_area[i] / pred_area[i] - class_acc.append(acc) - macc = np.sum(intersect_area) / np.sum(pred_area) - return np.array(class_acc), macc - - -def kappa(intersect_area, pred_area, label_area): - """ - Calculate kappa coefficient - - Args: - intersect_area (Tensor): The intersection area of prediction and ground truth on all classes.. - pred_area (Tensor): The prediction area on all classes. - label_area (Tensor): The ground truth area on all classes. - - Returns: - float: kappa coefficient. - """ - intersect_area = intersect_area.numpy() - pred_area = pred_area.numpy() - label_area = label_area.numpy() - total_area = np.sum(label_area) - po = np.sum(intersect_area) / total_area - pe = np.sum(pred_area * label_area) / (total_area * total_area) - kappa = (po - pe) / (1 - pe) - return kappa diff --git a/contrib/PanopticDeepLab/paddleseg/utils/paddle.py b/contrib/PanopticDeepLab/paddleseg/utils/paddle.py deleted file mode 100644 index c4f514b3a7..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/paddle.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -import paddle - -dtype_map = { - paddle.fluid.core.VarDesc.VarType.FP32: "float32", - paddle.fluid.core.VarDesc.VarType.FP64: "float64", - paddle.fluid.core.VarDesc.VarType.FP16: "float16", - paddle.fluid.core.VarDesc.VarType.INT32: "int32", - paddle.fluid.core.VarDesc.VarType.INT16: "int16", - paddle.fluid.core.VarDesc.VarType.INT64: "int64", - paddle.fluid.core.VarDesc.VarType.BOOL: "bool", - paddle.fluid.core.VarDesc.VarType.INT16: "int16", - paddle.fluid.core.VarDesc.VarType.UINT8: "uint8", - paddle.fluid.core.VarDesc.VarType.INT8: "int8", -} - - -def convert_dtype_to_string(dtype: str) -> paddle.fluid.core.VarDesc.VarType: - if dtype in dtype_map: - return dtype_map[dtype] - raise TypeError("dtype shoule in %s" % list(dtype_map.keys())) - - -def get_variable_info(var: paddle.static.Variable) -> dict: - if not isinstance(var, paddle.static.Variable): - raise TypeError("var shoule be an instance of paddle.static.Variable") - - var_info = { - 'name': var.name, - 'stop_gradient': var.stop_gradient, - 'is_data': var.is_data, - 'error_clip': var.error_clip, - 'type': var.type - } - - try: - var_info['dtype'] = convert_dtype_to_string(var.dtype) - var_info['lod_level'] = var.lod_level - var_info['shape'] = var.shape - except: - pass - - var_info['persistable'] = var.persistable - - return var_info - - -def convert_syncbn_to_bn(model_filename): - """ - Since SyncBatchNorm does not have a cpu kernel, when exporting the model, the SyncBatchNorm - in the model needs to be converted to BatchNorm. - """ - - def _copy_vars_and_ops_in_blocks(from_block: paddle.device.framework.Block, - to_block: paddle.device.framework.Block): - for var in from_block.vars: - var = from_block.var(var) - var_info = copy.deepcopy(get_variable_info(var)) - if isinstance(var, paddle.device.framework.Parameter): - to_block.create_parameter(**var_info) - else: - to_block.create_var(**var_info) - - for op in from_block.ops: - all_attrs = op.all_attrs() - if 'sub_block' in all_attrs: - _sub_block = to_block.program._create_block() - _copy_vars_and_ops_in_blocks(all_attrs['sub_block'], _sub_block) - to_block.program._rollback() - new_attrs = {'sub_block': _sub_block} - for key, value in all_attrs.items(): - if key == 'sub_block': - continue - new_attrs[key] = copy.deepcopy(value) - else: - new_attrs = copy.deepcopy(all_attrs) - - op_type = 'batch_norm' if op.type == 'sync_batch_norm' else op.type - op_info = { - 'type': op_type, - 'inputs': { - input: [ - to_block._find_var_recursive(var) - for var in op.input(input) - ] - for input in op.input_names - }, - 'outputs': { - output: [ - to_block._find_var_recursive(var) - for var in op.output(output) - ] - for output in op.output_names - }, - 'attrs': new_attrs - } - to_block.append_op(**op_info) - - paddle.enable_static() - with open(model_filename, 'rb') as file: - desc = file.read() - - origin_program = paddle.static.Program.parse_from_string(desc) - dest_program = paddle.static.Program() - _copy_vars_and_ops_in_blocks(origin_program.global_block(), - dest_program.global_block()) - dest_program = dest_program.clone(for_test=True) - - with open(model_filename, 'wb') as file: - file.write(dest_program.desc.serialize_to_string()) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/progbar.py b/contrib/PanopticDeepLab/paddleseg/utils/progbar.py deleted file mode 100644 index 563cc5ebae..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/progbar.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import sys -import time - -import numpy as np - - -class Progbar(object): - """ - Displays a progress bar. - It refers to https://github.com/keras-team/keras/blob/keras-2/keras/utils/generic_utils.py - - Args: - target (int): Total number of steps expected, None if unknown. - width (int): Progress bar width on screen. - verbose (int): Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) - stateful_metrics (list|tuple): Iterable of string names of metrics that should *not* be - averaged over time. Metrics in this list will be displayed as-is. All - others will be averaged by the progbar before display. - interval (float): Minimum visual progress update interval (in seconds). - unit_name (str): Display name for step counts (usually "step" or "sample"). - """ - - def __init__(self, - target, - width=30, - verbose=1, - interval=0.05, - stateful_metrics=None, - unit_name='step'): - self.target = target - self.width = width - self.verbose = verbose - self.interval = interval - self.unit_name = unit_name - if stateful_metrics: - self.stateful_metrics = set(stateful_metrics) - else: - self.stateful_metrics = set() - - self._dynamic_display = ((hasattr(sys.stderr, 'isatty') - and sys.stderr.isatty()) - or 'ipykernel' in sys.modules - or 'posix' in sys.modules - or 'PYCHARM_HOSTED' in os.environ) - self._total_width = 0 - self._seen_so_far = 0 - # We use a dict + list to avoid garbage collection - # issues found in OrderedDict - self._values = {} - self._values_order = [] - self._start = time.time() - self._last_update = 0 - - def update(self, current, values=None, finalize=None): - """ - Updates the progress bar. - - Args: - current (int): Index of current step. - values (list): List of tuples: `(name, value_for_last_step)`. If `name` is in - `stateful_metrics`, `value_for_last_step` will be displayed as-is. - Else, an average of the metric over time will be displayed. - finalize (bool): Whether this is the last update for the progress bar. If - `None`, defaults to `current >= self.target`. - """ - - if finalize is None: - if self.target is None: - finalize = False - else: - finalize = current >= self.target - - values = values or [] - for k, v in values: - if k not in self._values_order: - self._values_order.append(k) - if k not in self.stateful_metrics: - # In the case that progress bar doesn't have a target value in the first - # epoch, both on_batch_end and on_epoch_end will be called, which will - # cause 'current' and 'self._seen_so_far' to have the same value. Force - # the minimal value to 1 here, otherwise stateful_metric will be 0s. - value_base = max(current - self._seen_so_far, 1) - if k not in self._values: - self._values[k] = [v * value_base, value_base] - else: - self._values[k][0] += v * value_base - self._values[k][1] += value_base - else: - # Stateful metrics output a numeric value. This representation - # means "take an average from a single value" but keeps the - # numeric formatting. - self._values[k] = [v, 1] - self._seen_so_far = current - - now = time.time() - info = ' - %.0fs' % (now - self._start) - if self.verbose == 1: - if now - self._last_update < self.interval and not finalize: - return - - prev_total_width = self._total_width - if self._dynamic_display: - sys.stderr.write('\b' * prev_total_width) - sys.stderr.write('\r') - else: - sys.stderr.write('\n') - - if self.target is not None: - numdigits = int(np.log10(self.target)) + 1 - bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target) - prog = float(current) / self.target - prog_width = int(self.width * prog) - if prog_width > 0: - bar += ('=' * (prog_width - 1)) - if current < self.target: - bar += '>' - else: - bar += '=' - bar += ('.' * (self.width - prog_width)) - bar += ']' - else: - bar = '%7d/Unknown' % current - - self._total_width = len(bar) - sys.stderr.write(bar) - - if current: - time_per_unit = (now - self._start) / current - else: - time_per_unit = 0 - - if self.target is None or finalize: - if time_per_unit >= 1 or time_per_unit == 0: - info += ' %.0fs/%s' % (time_per_unit, self.unit_name) - elif time_per_unit >= 1e-3: - info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name) - else: - info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name) - else: - eta = time_per_unit * (self.target - current) - if eta > 3600: - eta_format = '%d:%02d:%02d' % (eta // 3600, - (eta % 3600) // 60, eta % 60) - elif eta > 60: - eta_format = '%d:%02d' % (eta // 60, eta % 60) - else: - eta_format = '%ds' % eta - - info = ' - ETA: %s' % eta_format - - for k in self._values_order: - info += ' - %s:' % k - if isinstance(self._values[k], list): - avg = np.mean( - self._values[k][0] / max(1, self._values[k][1])) - if abs(avg) > 1e-3: - info += ' %.4f' % avg - else: - info += ' %.4e' % avg - else: - info += ' %s' % self._values[k] - - self._total_width += len(info) - if prev_total_width > self._total_width: - info += (' ' * (prev_total_width - self._total_width)) - - if finalize: - info += '\n' - - sys.stderr.write(info) - sys.stderr.flush() - - elif self.verbose == 2: - if finalize: - numdigits = int(np.log10(self.target)) + 1 - count = ('%' + str(numdigits) + 'd/%d') % (current, self.target) - info = count + info - for k in self._values_order: - info += ' - %s:' % k - avg = np.mean( - self._values[k][0] / max(1, self._values[k][1])) - if avg > 1e-3: - info += ' %.4f' % avg - else: - info += ' %.4e' % avg - info += '\n' - - sys.stderr.write(info) - sys.stderr.flush() - - self._last_update = now - - def add(self, n, values=None): - self.update(self._seen_so_far + n, values) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/timer.py b/contrib/PanopticDeepLab/paddleseg/utils/timer.py deleted file mode 100644 index d7d74670d1..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/timer.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time - - -class TimeAverager(object): - def __init__(self): - self.reset() - - def reset(self): - self._cnt = 0 - self._total_time = 0 - self._total_samples = 0 - - def record(self, usetime, num_samples=None): - self._cnt += 1 - self._total_time += usetime - if num_samples: - self._total_samples += num_samples - - def get_average(self): - if self._cnt == 0: - return 0 - return self._total_time / float(self._cnt) - - def get_ips_average(self): - if not self._total_samples or self._cnt == 0: - return 0 - return float(self._total_samples) / self._total_time - - -def calculate_eta(remaining_step, speed): - if remaining_step < 0: - remaining_step = 0 - remaining_time = int(remaining_step * speed) - result = "{:0>2}:{:0>2}:{:0>2}" - arr = [] - for i in range(2, -1, -1): - arr.append(int(remaining_time / 60**i)) - remaining_time %= 60**i - return result.format(*arr) diff --git a/contrib/PanopticDeepLab/paddleseg/utils/utils.py b/contrib/PanopticDeepLab/paddleseg/utils/utils.py deleted file mode 100644 index 73a298d196..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/utils.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import contextlib -import filelock -import math -import os -import tempfile -from urllib.parse import urlparse, unquote - -import paddle - -from paddleseg.utils import logger, seg_env -from paddleseg.utils.download import download_file_and_uncompress - - -@contextlib.contextmanager -def generate_tempdir(directory: str = None, **kwargs): - '''Generate a temporary directory''' - directory = seg_env.TMP_HOME if not directory else directory - with tempfile.TemporaryDirectory(dir=directory, **kwargs) as _dir: - yield _dir - - -def load_entire_model(model, pretrained): - if pretrained is not None: - load_pretrained_model(model, pretrained) - else: - logger.warning('Not all pretrained params of {} are loaded, ' \ - 'training from scratch or a pretrained backbone.'.format(model.__class__.__name__)) - - -def load_pretrained_model(model, pretrained_model): - if pretrained_model is not None: - logger.info('Loading pretrained model from {}'.format(pretrained_model)) - # download pretrained model from url - if urlparse(pretrained_model).netloc: - pretrained_model = unquote(pretrained_model) - savename = pretrained_model.split('/')[-1] - if not savename.endswith(('tgz', 'tar.gz', 'tar', 'zip')): - savename = pretrained_model.split('/')[-2] - else: - savename = savename.split('.')[0] - with generate_tempdir() as _dir: - with filelock.FileLock( - os.path.join(seg_env.TMP_HOME, savename)): - pretrained_model = download_file_and_uncompress( - pretrained_model, - savepath=_dir, - extrapath=seg_env.PRETRAINED_MODEL_HOME, - extraname=savename) - - pretrained_model = os.path.join(pretrained_model, - 'model.pdparams') - - if os.path.exists(pretrained_model): - para_state_dict = paddle.load(pretrained_model) - - model_state_dict = model.state_dict() - keys = model_state_dict.keys() - num_params_loaded = 0 - for k in keys: - if k not in para_state_dict: - logger.warning("{} is not in pretrained model".format(k)) - elif list(para_state_dict[k].shape) != list( - model_state_dict[k].shape): - logger.warning( - "[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})" - .format(k, para_state_dict[k].shape, - model_state_dict[k].shape)) - else: - model_state_dict[k] = para_state_dict[k] - num_params_loaded += 1 - model.set_dict(model_state_dict) - logger.info("There are {}/{} variables loaded into {}.".format( - num_params_loaded, len(model_state_dict), - model.__class__.__name__)) - - else: - raise ValueError( - 'The pretrained model directory is not Found: {}'.format( - pretrained_model)) - else: - logger.info( - 'No pretrained model to load, {} will be trained from scratch.'. - format(model.__class__.__name__)) - - -def resume(model, optimizer, resume_model): - if resume_model is not None: - logger.info('Resume model from {}'.format(resume_model)) - if os.path.exists(resume_model): - resume_model = os.path.normpath(resume_model) - ckpt_path = os.path.join(resume_model, 'model.pdparams') - para_state_dict = paddle.load(ckpt_path) - ckpt_path = os.path.join(resume_model, 'model.pdopt') - opti_state_dict = paddle.load(ckpt_path) - model.set_state_dict(para_state_dict) - optimizer.set_state_dict(opti_state_dict) - - iter = resume_model.split('_')[-1] - iter = int(iter) - return iter - else: - raise ValueError( - 'Directory of the model needed to resume is not Found: {}'. - format(resume_model)) - else: - logger.info('No model needed to resume.') diff --git a/contrib/PanopticDeepLab/paddleseg/utils/visualize.py b/contrib/PanopticDeepLab/paddleseg/utils/visualize.py deleted file mode 100644 index 27c950ec0b..0000000000 --- a/contrib/PanopticDeepLab/paddleseg/utils/visualize.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/utils/save_annotation.py - -import os - -import cv2 -import numpy as np -from PIL import Image as PILImage - -# Refence: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L14 -_COLORS = np.array([ - 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, - 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, - 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, - 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, - 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, - 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, - 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, - 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, - 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, - 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, - 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, - 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, - 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, - 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, - 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, - 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, - 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, - 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, - 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, - 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, - 1.000, 1.000 -]).astype(np.float32).reshape(-1, 3) - - -def random_color(rgb=False, maximum=255): - """ - Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L111 - Args: - rgb (bool): whether to return RGB colors or BGR colors. - maximum (int): either 255 or 1 - Returns: - ndarray: a vector of 3 numbers - """ - idx = np.random.randint(0, len(_COLORS)) - ret = _COLORS[idx] * maximum - if not rgb: - ret = ret[::-1] - return ret - - -def cityscape_colormap(): - """Get CityScapes colormap""" - colormap = np.zeros((256, 3), dtype=np.uint8) - colormap[0] = [128, 64, 128] - colormap[1] = [244, 35, 232] - colormap[2] = [70, 70, 70] - colormap[3] = [102, 102, 156] - colormap[4] = [190, 153, 153] - colormap[5] = [153, 153, 153] - colormap[6] = [250, 170, 30] - colormap[7] = [220, 220, 0] - colormap[8] = [107, 142, 35] - colormap[9] = [152, 251, 152] - colormap[10] = [70, 130, 180] - colormap[11] = [220, 20, 60] - colormap[12] = [255, 0, 0] - colormap[13] = [0, 0, 142] - colormap[14] = [0, 0, 70] - colormap[15] = [0, 60, 100] - colormap[16] = [0, 80, 100] - colormap[17] = [0, 0, 230] - colormap[18] = [119, 11, 32] - colormap = colormap[:, ::-1] - return colormap - - -def visualize_semantic(semantic, save_path, colormap, image=None, weight=0.5): - """ - Save semantic segmentation results. - - Args: - semantic(np.ndarray): The result semantic segmenation results, shape is (h, w). - save_path(str): The save path. - colormap(np.ndarray): A color map for visualization. - image(np.ndarray, optional): Origin image to prediction, merge semantic with - image if provided. Default: None. - weight(float, optional): The image weight when merge semantic with image. Default: 0.6. - """ - semantic = semantic.astype('uint8') - colored_semantic = colormap[semantic] - if image is not None: - colored_semantic = cv2.addWeighted(image, weight, colored_semantic, - 1 - weight, 0) - cv2.imwrite(save_path, colored_semantic) - - -def visualize_instance(instance, save_path, stuff_id=0, image=None, weight=0.5): - """ - Save instance segmentation results. - - Args: - instance(np.ndarray): The instance segmentation results, shape is (h, w). - save_path(str): The save path. - stuff_id(int, optional): Id for background that not want to plot. - image(np.ndarray, optional): Origin image to prediction, merge instance with - image if provided. Default: None. - weight(float, optional): The image weight when merge instance with image. Default: 0.6. - """ - # Add color map for instance segmentation result. - ids = np.unique(instance) - num_colors = len(ids) - colormap = np.zeros((num_colors, 3), dtype=np.uint8) - # Maps label to continuous value - for i in range(num_colors): - instance[instance == ids[i]] = i - colormap[i, :] = random_color(maximum=255) - if ids[i] == stuff_id: - colormap[i, :] = np.array([0, 0, 0]) - colored_instance = colormap[instance] - - if image is not None: - colored_instance = cv2.addWeighted(image, weight, colored_instance, - 1 - weight, 0) - cv2.imwrite(save_path, colored_instance) - - -def visualize_panoptic(panoptic, - save_path, - label_divisor, - colormap, - image=None, - weight=0.5, - ignore_index=255): - """ - Save panoptic segmentation results. - - Args: - panoptic(np.ndarray): The panoptic segmentation results, shape is (h, w). - save_path(str): The save path. - label_divisor(int): Used to convert panoptic id = semantic id * label_divisor + instance_id. - colormap(np.ndarray): A color map for visualization. - image(np.ndarray, optional): Origin image to prediction, merge panoptic with - image if provided. Default: None. - weight(float, optional): The image weight when merge panoptic with image. Default: 0.6. - ignore_index(int, optional): Specifies a target value that is ignored. - """ - colored_panoptic = np.zeros((panoptic.shape[0], panoptic.shape[1], 3), - dtype=np.uint8) - taken_colors = set((0, 0, 0)) - - def _random_color(base, max_dist=30): - color = base + np.random.randint( - low=-max_dist, high=max_dist + 1, size=3) - return tuple(np.maximum(0, np.minimum(255, color))) - - for lab in np.unique(panoptic): - mask = panoptic == lab - - ignore_mask = panoptic == ignore_index - ins_mask = panoptic > label_divisor - if lab > label_divisor: - base_color = colormap[lab // label_divisor] - elif lab != ignore_index: - base_color = colormap[lab] - else: - continue - if tuple(base_color) not in taken_colors: - taken_colors.add(tuple(base_color)) - color = base_color - else: - while True: - color = _random_color(base_color) - if color not in taken_colors: - taken_colors.add(color) - break - colored_panoptic[mask] = color - - if image is not None: - colored_panoptic = cv2.addWeighted(image, weight, colored_panoptic, - 1 - weight, 0) - cv2.imwrite(save_path, colored_panoptic) From cc79ca6f57da2c3591233c5877747a8718a78c69 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 10 May 2021 17:36:05 +0800 Subject: [PATCH 100/210] update train process --- contrib/PanopticDeepLab/train.py | 6 ++++-- paddleseg/models/layers/pyramid_pool.py | 5 +++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/contrib/PanopticDeepLab/train.py b/contrib/PanopticDeepLab/train.py index 91b44245c9..a34f216377 100644 --- a/contrib/PanopticDeepLab/train.py +++ b/contrib/PanopticDeepLab/train.py @@ -15,10 +15,12 @@ import argparse import paddle - from paddleseg.cvlibs import manager, Config from paddleseg.utils import get_sys_env, logger, config_check -from paddleseg.core import train + +from core import train +from datasets import * +from models import * def parse_args(): diff --git a/paddleseg/models/layers/pyramid_pool.py b/paddleseg/models/layers/pyramid_pool.py index bb1c9bafda..6ce9958ee3 100644 --- a/paddleseg/models/layers/pyramid_pool.py +++ b/paddleseg/models/layers/pyramid_pool.py @@ -39,7 +39,8 @@ def __init__(self, out_channels, align_corners, use_sep_conv=False, - image_pooling=False): + image_pooling=False, + drop_rate=0.1): super().__init__() self.align_corners = align_corners @@ -74,7 +75,7 @@ def __init__(self, out_channels=out_channels, kernel_size=1) - self.dropout = nn.Dropout(p=0.1) # drop rate + self.dropout = nn.Dropout(p=drop_rate) # drop rate def forward(self, x): outputs = [] From de2b311757b601663fad01d3a0f3da37f30a3ed8 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 10 May 2021 17:56:13 +0800 Subject: [PATCH 101/210] update val process --- contrib/PanopticDeepLab/core/__init__.py | 20 + contrib/PanopticDeepLab/core/infer.py | 349 ++++++++++++++++++ contrib/PanopticDeepLab/core/predict.py | 188 ++++++++++ contrib/PanopticDeepLab/core/train.py | 315 ++++++++++++++++ contrib/PanopticDeepLab/core/val.py | 181 +++++++++ contrib/PanopticDeepLab/datasets/__init__.py | 15 + .../datasets/cityscapes_panoptic.py | 194 ++++++++++ contrib/PanopticDeepLab/models/__init__.py | 17 + .../PanopticDeepLab/models/losses/__init__.py | 17 + .../models/losses/cross_entropy_loss.py | 77 ++++ .../PanopticDeepLab/models/losses/l1_loss.py | 72 ++++ .../models/losses/mean_square_error_loss.py | 60 +++ .../models/panoptic_deeplab.py | 339 +++++++++++++++++ .../PanopticDeepLab/transforms/__init__.py | 15 + .../transforms/target_transforms.py | 281 ++++++++++++++ contrib/PanopticDeepLab/utils/__init__.py | 15 + .../utils/evaluation/__init__.py | 17 + .../utils/evaluation/instance.py | 345 +++++++++++++++++ .../utils/evaluation/panoptic.py | 220 +++++++++++ .../utils/evaluation/semantic.py | 85 +++++ contrib/PanopticDeepLab/utils/visualize.py | 195 ++++++++++ contrib/PanopticDeepLab/val.py | 11 +- 22 files changed, 3024 insertions(+), 4 deletions(-) create mode 100644 contrib/PanopticDeepLab/core/__init__.py create mode 100644 contrib/PanopticDeepLab/core/infer.py create mode 100644 contrib/PanopticDeepLab/core/predict.py create mode 100644 contrib/PanopticDeepLab/core/train.py create mode 100644 contrib/PanopticDeepLab/core/val.py create mode 100644 contrib/PanopticDeepLab/datasets/__init__.py create mode 100644 contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py create mode 100644 contrib/PanopticDeepLab/models/__init__.py create mode 100644 contrib/PanopticDeepLab/models/losses/__init__.py create mode 100644 contrib/PanopticDeepLab/models/losses/cross_entropy_loss.py create mode 100644 contrib/PanopticDeepLab/models/losses/l1_loss.py create mode 100644 contrib/PanopticDeepLab/models/losses/mean_square_error_loss.py create mode 100644 contrib/PanopticDeepLab/models/panoptic_deeplab.py create mode 100644 contrib/PanopticDeepLab/transforms/__init__.py create mode 100644 contrib/PanopticDeepLab/transforms/target_transforms.py create mode 100644 contrib/PanopticDeepLab/utils/__init__.py create mode 100644 contrib/PanopticDeepLab/utils/evaluation/__init__.py create mode 100644 contrib/PanopticDeepLab/utils/evaluation/instance.py create mode 100644 contrib/PanopticDeepLab/utils/evaluation/panoptic.py create mode 100644 contrib/PanopticDeepLab/utils/evaluation/semantic.py create mode 100644 contrib/PanopticDeepLab/utils/visualize.py diff --git a/contrib/PanopticDeepLab/core/__init__.py b/contrib/PanopticDeepLab/core/__init__.py new file mode 100644 index 0000000000..35189064a6 --- /dev/null +++ b/contrib/PanopticDeepLab/core/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .train import train +from .val import evaluate +from .predict import predict +from . import infer + +__all__ = ['train', 'evaluate', 'predict'] diff --git a/contrib/PanopticDeepLab/core/infer.py b/contrib/PanopticDeepLab/core/infer.py new file mode 100644 index 0000000000..069ed0cd92 --- /dev/null +++ b/contrib/PanopticDeepLab/core/infer.py @@ -0,0 +1,349 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections.abc +from itertools import combinations +from functools import partial + +import numpy as np +import paddle +import paddle.nn.functional as F + +debug = False + + +def get_reverse_list(ori_shape, transforms): + """ + get reverse list of transform. + Args: + ori_shape (list): Origin shape of image. + transforms (list): List of transform. + Returns: + list: List of tuple, there are two format: + ('resize', (h, w)) The image shape before resize, + ('padding', (h, w)) The image shape before padding. + """ + reverse_list = [] + h, w = ori_shape[0], ori_shape[1] + for op in transforms: + if op.__class__.__name__ in ['Resize']: + reverse_list.append(('resize', (h, w))) + h, w = op.target_size[0], op.target_size[1] + if op.__class__.__name__ in ['ResizeByLong']: + reverse_list.append(('resize', (h, w))) + long_edge = max(h, w) + short_edge = min(h, w) + short_edge = int(round(short_edge * op.long_size / long_edge)) + long_edge = op.long_size + if h > w: + h = long_edge + w = short_edge + else: + w = long_edge + h = short_edge + if op.__class__.__name__ in ['Padding']: + reverse_list.append(('padding', (h, w))) + w, h = op.target_size[0], op.target_size[1] + if op.__class__.__name__ in ['LimitLong']: + long_edge = max(h, w) + short_edge = min(h, w) + if ((op.max_long is not None) and (long_edge > op.max_long)): + reverse_list.append(('resize', (h, w))) + long_edge = op.max_long + short_edge = int(round(short_edge * op.max_long / long_edge)) + elif ((op.min_long is not None) and (long_edge < op.min_long)): + reverse_list.append(('resize', (h, w))) + long_edge = op.min_long + short_edge = int(round(short_edge * op.min_long / long_edge)) + if h > w: + h = long_edge + w = short_edge + else: + w = long_edge + h = short_edge + return reverse_list + + +def reverse_transform(pred, ori_shape, transforms): + """recover pred to origin shape""" + reverse_list = get_reverse_list(ori_shape, transforms) + for item in reverse_list[::-1]: + if item[0] == 'resize': + h, w = item[1][0], item[1][1] + pred = F.interpolate(pred, (h, w), mode='nearest') + elif item[0] == 'padding': + h, w = item[1][0], item[1][1] + pred = pred[:, :, 0:h, 0:w] + else: + raise Exception("Unexpected info '{}' in im_info".format(item[0])) + return pred + + +def find_instance_center(ctr_hmp, threshold=0.1, nms_kernel=3, top_k=None): + """ + Find the center points from the center heatmap. + Arguments: + ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output. + threshold (float, optional): Threshold applied to center heatmap score. Default: 0.1. + nms_kernel (int, optional): NMS max pooling kernel size. Default: 3. + top_k (int, optional): An Integer, top k centers to keep. Default: None + Returns: + Tensor: A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). + """ + # thresholding, setting values below threshold to 0 + ctr_hmp = F.thresholded_relu(ctr_hmp, threshold) + + #NMS + nms_padding = (nms_kernel - 1) // 2 + ctr_hmp = ctr_hmp.unsqueeze(0) + ctr_hmp_max_pooled = F.max_pool2d( + ctr_hmp, kernel_size=nms_kernel, stride=1, padding=nms_padding) + ctr_hmp = ctr_hmp * (ctr_hmp_max_pooled == ctr_hmp) + + ctr_hmp = ctr_hmp.squeeze((0, 1)) + if len(ctr_hmp.shape) != 2: + raise ValueError('Something is wrong with center heatmap dimension.') + + if top_k is None: + top_k_score = 0 + else: + top_k_score, _ = paddle.topk(paddle.flatten(ctr_hmp), top_k) + top_k_score = top_k_score[-1] + # non-zero points are candidate centers + ctr_hmp_k = (ctr_hmp > top_k_score[-1]).astype('int64') + if ctr_hmp_k.sum() == 0: + ctr_all = None + else: + ctr_all = paddle.nonzero(ctr_hmp_k) + return ctr_all + + +def group_pixels(ctr, offsets): + """ + Gives each pixel in the image an instance id. + + Args: + ctr (Tensor): A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). + offsets (Tensor): A Tensor of shape [2, H, W] of raw offset output, where N is the batch size, + for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). + + Returns: + Tensor: A Tensor of shape [1, H, W], ins_id is 1, 2, ... + """ + height, width = offsets.shape[-2:] + y_coord = paddle.arange(height, dtype=offsets.dtype).reshape([1, -1, 1]) + y_coord = paddle.concat([y_coord] * width, axis=2) + x_coord = paddle.arange(width, dtype=offsets.dtype).reshape([1, 1, -1]) + x_coord = paddle.concat([x_coord] * height, axis=1) + coord = paddle.concat([y_coord, x_coord], axis=0) + + ctr_loc = coord + offsets + ctr_loc = ctr_loc.reshape((2, height * width)).transpose((1, 0)) + + # ctr: [K, 2] -> [K, 1, 2] + # ctr_loc = [H*W, 2] -> [1, H*W, 2] + ctr = ctr.unsqueeze(1) + ctr_loc = ctr_loc.unsqueeze(0) + + # distance: [K, H*W] + distance = paddle.norm((ctr - ctr_loc).astype('float32'), axis=-1) + + # finds center with minimum distance at each location, offset by 1, to reserve id=0 for stuff + instance_id = paddle.argmin( + distance, axis=0).reshape((1, height, width)) + 1 + + return instance_id + + +def get_instance_segmentation(semantic, + ctr_hmp, + offset, + thing_list, + threshold=0.1, + nms_kernel=3, + top_k=None): + """ + Post-processing for instance segmentation, gets class agnostic instance id map. + + Args: + semantic (Tensor): A Tensor of shape [1, H, W], predicted semantic label. + ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output, where N is the batch size, + for consistent, we only support N=1. + offsets (Tensor): A Tensor of shape [2, H, W] of raw offset output, where N is the batch size, + for consistent, we only support N=1. The order of second dim is (offset_y, offset_x). + thing_list (list): A List of thing class id. + threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. + nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3. + top_k (int, optional): An Integer, top k centers to keep. Default: None. + + Returns: + Tensor: Instance segmentation results which shape is [1, H, W]. + Tensor: A Tensor of shape [1, K, 2] where K is the number of center points. The order of second dim is (y, x). + """ + thing_seg = paddle.zeros_like(semantic) + for thing_class in thing_list: + thing_seg = thing_seg + (semantic == thing_class).astype('int64') + thing_seg = (thing_seg > 0).astype('int64') + center = find_instance_center( + ctr_hmp, threshold=threshold, nms_kernel=nms_kernel, top_k=top_k) + if center is None: + return paddle.zeros_like(semantic), center + ins_seg = group_pixels(center, offset) + return thing_seg * ins_seg, center.unsqueeze(0) + + +def merge_semantic_and_instance(semantic, instance, label_divisor, thing_list, + stuff_area, ignore_index): + """ + Post-processing for panoptic segmentation, by merging semantic segmentation label and class agnostic + instance segmentation label. + + Args: + semantic (Tensor): A Tensor of shape [1, H, W], predicted semantic label. + instance (Tensor): A Tensor of shape [1, H, W], predicted instance label. + label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + thing_list (list): A List of thing class id. + stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. + ignore_index (int): Specifies a value that is ignored. + + Returns: + Tensor: A Tensor of shape [1, H, W] . The pixels whose value equaling ignore_index is ignored. + The stuff class is represented as format like class_id, while + thing class as class_id * label_divisor + ins_id and ins_id begin from 1. + """ + # In case thing mask does not align with semantic prediction + pan_seg = paddle.zeros_like(semantic) + ignore_index + thing_seg = instance > 0 + semantic_thing_seg = paddle.zeros_like(semantic) + for thing_class in thing_list: + semantic_thing_seg += semantic == thing_class + + # keep track of instance id for each class + class_id_tracker = {} + + # paste thing by majority voting + ins_ids = paddle.unique(instance) + for ins_id in ins_ids: + if ins_id == 0: + continue + # Make sure only do majority voting within semantic_thing_seg + thing_mask = paddle.logical_and(instance == ins_id, + semantic_thing_seg == 1) + if paddle.all(paddle.logical_not(thing_mask)): + continue + # get class id for instance of ins_id + sem_ins_id = paddle.gather( + semantic.reshape((-1, )), paddle.nonzero( + thing_mask.reshape((-1, )))) # equal to semantic[thing_mask] + v, c = paddle.unique(sem_ins_id, return_counts=True) + class_id = paddle.gather(v, c.argmax()) + class_id = class_id.numpy()[0] + if class_id in class_id_tracker: + new_ins_id = class_id_tracker[class_id] + else: + class_id_tracker[class_id] = 1 + new_ins_id = 1 + class_id_tracker[class_id] += 1 + + # pan_seg[thing_mask] = class_id * label_divisor + new_ins_id + pan_seg = pan_seg * (paddle.logical_not(thing_mask)) + ( + class_id * label_divisor + new_ins_id) * thing_mask.astype('int64') + + # paste stuff to unoccupied area + class_ids = paddle.unique(semantic) + for class_id in class_ids: + if class_id.numpy() in thing_list: + # thing class + continue + # calculate stuff area + stuff_mask = paddle.logical_and(semantic == class_id, + paddle.logical_not(thing_seg)) + area = paddle.sum(stuff_mask.astype('int64')) + if area >= stuff_area: + # pan_seg[stuff_mask] = class_id + pan_seg = pan_seg * (paddle.logical_not(stuff_mask) + ) + stuff_mask.astype('int64') * class_id + + return pan_seg + + +def inference( + model, + im, + transforms, + thing_list, + label_divisor, + stuff_area, + ignore_index, + threshold=0.1, + nms_kernel=3, + top_k=None, + ori_shape=None, +): + """ + Inference for image. + + Args: + model (paddle.nn.Layer): model to get logits of image. + im (Tensor): the input image. + transforms (list): Transforms for image. + thing_list (list): A List of thing class id. + label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. + ignore_index (int): Specifies a value that is ignored. + threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. + nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3. + top_k (int, optional): An Integer, top k centers to keep. Default: None. + ori_shape (list): Origin shape of image. + + Returns: + list: A list of [semantic, semantic_softmax, instance, panoptic, ctr_hmp]. + semantic: Semantic segmentation results with shape [1, 1, H, W], which value is 0, 1, 2... + semantic_softmax: A Tensor represent probabilities for each class, which shape is [1, num_classes, H, W]. + instance: Instance segmentation results with class agnostic, which value is 0, 1, 2, ..., and 0 is stuff. + panoptic: Panoptic segmentation results which value is ignore_index, stuff_id, thing_id * label_divisor + ins_id , ins_id >= 1. + """ + logits = model(im) + # semantic: [1, c, h, w], center: [1, 1, h, w], offset: [1, 2, h, w] + semantic, ctr_hmp, offset = logits + semantic = paddle.argmax(semantic, axis=1, keepdim=True) + semantic = semantic.squeeze(0) # shape: [1, h, w] + semantic_softmax = F.softmax(logits[0], axis=1).squeeze() + ctr_hmp = ctr_hmp.squeeze(0) # shape: [1, h, w] + offset = offset.squeeze(0) # shape: [2, h, w] + + instance, center = get_instance_segmentation( + semantic=semantic, + ctr_hmp=ctr_hmp, + offset=offset, + thing_list=thing_list, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k) + panoptic = merge_semantic_and_instance(semantic, instance, label_divisor, + thing_list, stuff_area, ignore_index) + + # Recover to origin shape + # semantic: 0, 1, 2, 3... + # instance: 0, 1, 2, 3, 4, 5... and the 0 is stuff. + # panoptic: ignore_index, stuff_id, thing_id * label_divisor + ins_id , ins_id >= 1. + results = [semantic, semantic_softmax, instance, panoptic, ctr_hmp] + if ori_shape is not None: + results = [i.unsqueeze(0) for i in results] + results = [ + reverse_transform(i, ori_shape=ori_shape, transforms=transforms) + for i in results + ] + + return results diff --git a/contrib/PanopticDeepLab/core/predict.py b/contrib/PanopticDeepLab/core/predict.py new file mode 100644 index 0000000000..eb0249033d --- /dev/null +++ b/contrib/PanopticDeepLab/core/predict.py @@ -0,0 +1,188 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import math + +import cv2 +import numpy as np +import paddle + +from paddleseg import utils +from paddleseg.core import infer +from paddleseg.utils import logger, progbar + + +def mkdir(path): + sub_dir = os.path.dirname(path) + if not os.path.exists(sub_dir): + os.makedirs(sub_dir) + + +def partition_list(arr, m): + """split the list 'arr' into m pieces""" + n = int(math.ceil(len(arr) / float(m))) + return [arr[i:i + n] for i in range(0, len(arr), n)] + + +def get_save_name(im_path, im_dir): + """get the saved name""" + if im_dir is not None: + im_file = im_path.replace(im_dir, '') + else: + im_file = os.path.basename(im_path) + if im_file[0] == '/': + im_file = im_file[1:] + return im_file + + +def add_info_to_save_path(save_path, info): + """Add more information to save path""" + fname, fextension = os.path.splitext(save_path) + fname = '_'.join([fname, info]) + save_path = ''.join([fname, fextension]) + return save_path + + +def predict(model, + model_path, + image_list, + transforms, + thing_list, + label_divisor, + stuff_area, + ignore_index, + image_dir=None, + save_dir='output', + threshold=0.1, + nms_kernel=7, + top_k=200): + """ + predict and visualize the image_list. + + Args: + model (nn.Layer): Used to predict for input image. + model_path (str): The path of pretrained model. + image_list (list): A list of image path to be predicted. + transforms (transform.Compose): Preprocess for input image. + thing_list (list): A List of thing class id. + label_divisor (int): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. + stuff_area (int): An Integer, remove stuff whose area is less tan stuff_area. + ignore_index (int): Specifies a value that is ignored. + image_dir (str, optional): The root directory of the images predicted. Default: None. + save_dir (str, optional): The directory to save the visualized results. Default: 'output'. + threshold(float, optional): Threshold applied to center heatmap score. Defalut: 0.1. + nms_kernel(int, optional): NMS max pooling kernel size. Default: 7. + top_k(int, optional): Top k centers to keep. Default: 200. + """ + utils.utils.load_entire_model(model, model_path) + model.eval() + nranks = paddle.distributed.get_world_size() + local_rank = paddle.distributed.get_rank() + if nranks > 1: + img_lists = partition_list(image_list, nranks) + else: + img_lists = [image_list] + + semantic_save_dir = os.path.join(save_dir, 'semantic') + instance_save_dir = os.path.join(save_dir, 'instance') + panoptic_save_dir = os.path.join(save_dir, 'panoptic') + + colormap = utils.cityscape_colormap() + + logger.info("Start to predict...") + progbar_pred = progbar.Progbar(target=len(img_lists[0]), verbose=1) + with paddle.no_grad(): + for i, im_path in enumerate(img_lists[local_rank]): + ori_im = cv2.imread(im_path) + ori_shape = ori_im.shape[:2] + im, _ = transforms(ori_im) + im = im[np.newaxis, ...] + im = paddle.to_tensor(im) + + semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference( + model=model, + im=im, + transforms=transforms.transforms, + thing_list=thing_list, + label_divisor=label_divisor, + stuff_area=stuff_area, + ignore_index=ignore_index, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k, + ori_shape=ori_shape) + semantic = semantic.squeeze().numpy() + instance = instance.squeeze().numpy() + panoptic = panoptic.squeeze().numpy() + + im_file = get_save_name(im_path, image_dir) + + # visual semantic segmentation results + save_path = os.path.join(semantic_save_dir, im_file) + mkdir(save_path) + utils.visualize_semantic( + semantic, save_path=save_path, colormap=colormap) + # Save added image for semantic segmentation results + save_path_ = add_info_to_save_path(save_path, 'add') + utils.visualize_semantic( + semantic, save_path=save_path_, colormap=colormap, image=ori_im) + # panoptic to semantic + ins_mask = panoptic > label_divisor + pan_to_sem = panoptic.copy() + pan_to_sem[ins_mask] = pan_to_sem[ins_mask] // label_divisor + save_path_ = add_info_to_save_path(save_path, + 'panoptic_to_semantic') + utils.visualize_semantic( + pan_to_sem, save_path=save_path_, colormap=colormap) + save_path_ = add_info_to_save_path(save_path, + 'panoptic_to_semantic_added') + utils.visualize_semantic( + pan_to_sem, + save_path=save_path_, + colormap=colormap, + image=ori_im) + + # vusual instance segmentation results + pan_to_ins = panoptic.copy() + ins_mask = pan_to_ins > label_divisor + pan_to_ins[~ins_mask] = 0 + save_path = os.path.join(instance_save_dir, im_file) + mkdir(save_path) + utils.visualize_instance(pan_to_ins, save_path=save_path) + # Save added image for instance segmentation results + save_path_ = add_info_to_save_path(save_path, 'added') + utils.visualize_instance( + pan_to_ins, save_path=save_path_, image=ori_im) + + # visual panoptic segmentation results + save_path = os.path.join(panoptic_save_dir, im_file) + mkdir(save_path) + utils.visualize_panoptic( + panoptic, + save_path=save_path, + label_divisor=label_divisor, + colormap=colormap, + ignore_index=ignore_index) + # Save added image for panoptic segmentation results + save_path_ = add_info_to_save_path(save_path, 'added') + utils.visualize_panoptic( + panoptic, + save_path=save_path_, + label_divisor=label_divisor, + colormap=colormap, + image=ori_im, + ignore_index=ignore_index) + + progbar_pred.update(i + 1) diff --git a/contrib/PanopticDeepLab/core/train.py b/contrib/PanopticDeepLab/core/train.py new file mode 100644 index 0000000000..58ab85957d --- /dev/null +++ b/contrib/PanopticDeepLab/core/train.py @@ -0,0 +1,315 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +from collections import deque +import shutil + +import paddle +import paddle.nn.functional as F +from paddleseg.utils import TimeAverager, calculate_eta, resume, logger + +from core.val import evaluate + + +def check_logits_losses(logits_list, losses): + len_logits = len(logits_list) + len_losses = len(losses['types']) + if len_logits != len_losses: + raise RuntimeError( + 'The length of logits_list should equal to the types of loss config: {} != {}.' + .format(len_logits, len_losses)) + + +def loss_computation(logits_list, semantic, semantic_weights, center, + center_weights, offset, offset_weights, losses): + # semantic loss + semantic_loss = losses['types'][0](logits_list[0], semantic, + semantic_weights) + semantic_loss = semantic_loss * losses['coef'][0] + + # center loss + center_loss = losses['types'][1](logits_list[1], center) + center_weights = (center_weights.unsqueeze(1)).expand_as(center_loss) + center_loss = center_loss * center_weights + if center_loss.sum() > 0: + center_loss = center_loss.sum() / center_weights.sum() + else: + center_loss = center_loss.sum() * 0 + center_loss = center_loss * losses['coef'][1] + + # offset loss + offset_loss = losses['types'][2](logits_list[2], offset) + offset_weights = (offset_weights.unsqueeze(1)).expand_as(offset_loss) + offset_loss = offset_loss * offset_weights + if offset_weights.sum() > 0: + offset_loss = offset_loss.sum() / offset_weights.sum() + else: + offset_loss = offset_loss.sum() * 0 + offset_loss = offset_loss * losses['coef'][2] + + loss_list = [semantic_loss, center_loss, offset_loss] + + return loss_list + + +def train(model, + train_dataset, + val_dataset=None, + optimizer=None, + save_dir='output', + iters=10000, + batch_size=2, + resume_model=None, + save_interval=1000, + log_iters=10, + num_workers=0, + use_vdl=False, + losses=None, + keep_checkpoint_max=5, + threshold=0.1, + nms_kernel=7, + top_k=200): + """ + Launch training. + + Args: + model(nn.Layer): A sementic segmentation model. + train_dataset (paddle.io.Dataset): Used to read and process training datasets. + val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. + optimizer (paddle.optimizer.Optimizer): The optimizer. + save_dir (str, optional): The directory for saving the model snapshot. Default: 'output'. + iters (int, optional): How may iters to train the model. Defualt: 10000. + batch_size (int, optional): Mini batch size of one gpu or cpu. Default: 2. + resume_model (str, optional): The path of resume model. + save_interval (int, optional): How many iters to save a model snapshot once during training. Default: 1000. + log_iters (int, optional): Display logging information at every log_iters. Default: 10. + num_workers (int, optional): Num workers for data loader. Default: 0. + use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False. + losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). + The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. + keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. + threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. + nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 7. + top_k (int, optional): An Integer, top k centers to keep. Default: 200. + """ + model.train() + nranks = paddle.distributed.ParallelEnv().nranks + local_rank = paddle.distributed.ParallelEnv().local_rank + + start_iter = 0 + if resume_model is not None: + start_iter = resume(model, optimizer, resume_model) + + if not os.path.isdir(save_dir): + if os.path.exists(save_dir): + os.remove(save_dir) + os.makedirs(save_dir) + + if nranks > 1: + # Initialize parallel environment if not done. + if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( + ): + paddle.distributed.init_parallel_env() + ddp_model = paddle.DataParallel(model) + else: + ddp_model = paddle.DataParallel(model) + + batch_sampler = paddle.io.DistributedBatchSampler( + train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) + + loader = paddle.io.DataLoader( + train_dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + return_list=True, + ) + + if use_vdl: + from visualdl import LogWriter + log_writer = LogWriter(save_dir) + + avg_loss = 0.0 + avg_loss_list = [] + iters_per_epoch = len(batch_sampler) + best_pq = -1.0 + best_model_iter = -1 + reader_cost_averager = TimeAverager() + batch_cost_averager = TimeAverager() + save_models = deque() + batch_start = time.time() + + iter = start_iter + while iter < iters: + for data in loader: + iter += 1 + if iter > iters: + break + reader_cost_averager.record(time.time() - batch_start) + images = data[0] + semantic = data[1] + semantic_weights = data[2] + center = data[3] + center_weights = data[4] + offset = data[5] + offset_weights = data[6] + foreground = data[7] + + if nranks > 1: + logits_list = ddp_model(images) + else: + logits_list = model(images) + + loss_list = loss_computation( + logits_list=logits_list, + losses=losses, + semantic=semantic, + semantic_weights=semantic_weights, + center=center, + center_weights=center_weights, + offset=offset, + offset_weights=offset_weights) + loss = sum(loss_list) + loss.backward() + + optimizer.step() + lr = optimizer.get_lr() + if isinstance(optimizer._learning_rate, + paddle.optimizer.lr.LRScheduler): + optimizer._learning_rate.step() + model.clear_gradients() + avg_loss += loss.numpy()[0] + if not avg_loss_list: + avg_loss_list = [l.numpy() for l in loss_list] + else: + for i in range(len(loss_list)): + avg_loss_list[i] += loss_list[i].numpy() + batch_cost_averager.record( + time.time() - batch_start, num_samples=batch_size) + + if (iter) % log_iters == 0 and local_rank == 0: + avg_loss /= log_iters + avg_loss_list = [l[0] / log_iters for l in avg_loss_list] + remain_iters = iters - iter + avg_train_batch_cost = batch_cost_averager.get_average() + avg_train_reader_cost = reader_cost_averager.get_average() + eta = calculate_eta(remain_iters, avg_train_batch_cost) + logger.info( + "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" + .format((iter - 1) // iters_per_epoch + 1, iter, iters, + avg_loss, lr, avg_train_batch_cost, + avg_train_reader_cost, + batch_cost_averager.get_ips_average(), eta)) + logger.info( + "[LOSS] loss={:.4f}, semantic_loss={:.4f}, center_loss={:.4f}, offset_loss={:.4f}" + .format(avg_loss, avg_loss_list[0], avg_loss_list[1], + avg_loss_list[2])) + if use_vdl: + log_writer.add_scalar('Train/loss', avg_loss, iter) + # Record all losses if there are more than 2 losses. + if len(avg_loss_list) > 1: + avg_loss_dict = {} + for i, value in enumerate(avg_loss_list): + avg_loss_dict['loss_' + str(i)] = value + for key, value in avg_loss_dict.items(): + log_tag = 'Train/' + key + log_writer.add_scalar(log_tag, value, iter) + + log_writer.add_scalar('Train/lr', lr, iter) + log_writer.add_scalar('Train/batch_cost', + avg_train_batch_cost, iter) + log_writer.add_scalar('Train/reader_cost', + avg_train_reader_cost, iter) + + avg_loss = 0.0 + avg_loss_list = [] + reader_cost_averager.reset() + batch_cost_averager.reset() + + # save model + if (iter % save_interval == 0 or iter == iters) and local_rank == 0: + current_save_dir = os.path.join(save_dir, + "iter_{}".format(iter)) + if not os.path.isdir(current_save_dir): + os.makedirs(current_save_dir) + paddle.save(model.state_dict(), + os.path.join(current_save_dir, 'model.pdparams')) + paddle.save(optimizer.state_dict(), + os.path.join(current_save_dir, 'model.pdopt')) + save_models.append(current_save_dir) + if len(save_models) > keep_checkpoint_max > 0: + model_to_remove = save_models.popleft() + shutil.rmtree(model_to_remove) + + # eval model + if (iter % save_interval == 0 or iter == iters) and ( + val_dataset is + not None) and local_rank == 0 and iter > iters // 2: + num_workers = 1 if num_workers > 0 else 0 + panoptic_results, semantic_results, instance_results = evaluate( + model, + val_dataset, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k, + num_workers=num_workers, + print_detail=False) + pq = panoptic_results['pan_seg']['All']['pq'] + miou = semantic_results['sem_seg']['mIoU'] + map = instance_results['ins_seg']['mAP'] + map50 = instance_results['ins_seg']['mAP50'] + logger.info( + "[EVAL] PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}" + .format(pq, miou, map, map50)) + model.train() + + # save best model and add evaluate results to vdl + if (iter % save_interval == 0 or iter == iters) and local_rank == 0: + if val_dataset is not None and iter > iters // 2: + if pq > best_pq: + best_pq = pq + best_model_iter = iter + best_model_dir = os.path.join(save_dir, "best_model") + paddle.save( + model.state_dict(), + os.path.join(best_model_dir, 'model.pdparams')) + logger.info( + '[EVAL] The model with the best validation pq ({:.4f}) was saved at iter {}.' + .format(best_pq, best_model_iter)) + + if use_vdl: + log_writer.add_scalar('Evaluate/PQ', pq, iter) + log_writer.add_scalar('Evaluate/mIoU', miou, iter) + log_writer.add_scalar('Evaluate/mAP', map, iter) + log_writer.add_scalar('Evaluate/mAP50', map50, iter) + batch_start = time.time() + + # Calculate flops. + if local_rank == 0: + + def count_syncbn(m, x, y): + x = x[0] + nelements = x.numel() + m.total_ops += int(2 * nelements) + + _, c, h, w = images.shape + flops = paddle.flops( + model, [1, c, h, w], + custom_ops={paddle.nn.SyncBatchNorm: count_syncbn}) + + # Sleep for half a second to let dataloader release resources. + time.sleep(0.5) + if use_vdl: + log_writer.close() diff --git a/contrib/PanopticDeepLab/core/val.py b/contrib/PanopticDeepLab/core/val.py new file mode 100644 index 0000000000..472484f126 --- /dev/null +++ b/contrib/PanopticDeepLab/core/val.py @@ -0,0 +1,181 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from collections import OrderedDict + +import numpy as np +import time +import paddle +import paddle.nn.functional as F +from paddleseg.utils import TimeAverager, calculate_eta, logger, progbar + +from utils.evaluation import SemanticEvaluator, InstanceEvaluator, PanopticEvaluator +from core import infer + +np.set_printoptions(suppress=True) + + +def evaluate(model, + eval_dataset, + threshold=0.1, + nms_kernel=7, + top_k=200, + num_workers=0, + print_detail=True): + """ + Launch evaluation. + + Args: + model(nn.Layer): A sementic segmentation model. + eval_dataset (paddle.io.Dataset): Used to read and process validation datasets. + threshold (float, optional): Threshold applied to center heatmap score. Defalut: 0.1. + nms_kernel (int, optional): NMS max pooling kernel size. Default: 7. + top_k (int, optional): Top k centers to keep. Default: 200. + num_workers (int, optional): Num workers for data loader. Default: 0. + print_detail (bool, optional): Whether to print detailed information about the evaluation process. Default: True. + + Returns: + dict: Panoptic evaluation results which includes PQ, RQ, SQ for all, each class, Things and stuff. + dict: Semantic evaluation results which includes mIoU, fwIoU, mACC and pACC. + dict: Instance evaluation results which includes mAP and mAP50, and also AP and AP50 for each class. + + """ + model.eval() + nranks = paddle.distributed.ParallelEnv().nranks + local_rank = paddle.distributed.ParallelEnv().local_rank + if nranks > 1: + # Initialize parallel environment if not done. + if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( + ): + paddle.distributed.init_parallel_env() + batch_sampler = paddle.io.DistributedBatchSampler( + eval_dataset, batch_size=1, shuffle=False, drop_last=False) + loader = paddle.io.DataLoader( + eval_dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + return_list=True, + ) + + total_iters = len(loader) + semantic_metric = SemanticEvaluator( + eval_dataset.num_classes, ignore_index=eval_dataset.ignore_index) + instance_metric_AP50 = InstanceEvaluator( + eval_dataset.num_classes, + overlaps=0.5, + thing_list=eval_dataset.thing_list) + instance_metric_AP = InstanceEvaluator( + eval_dataset.num_classes, + overlaps=list(np.arange(0.5, 1.0, 0.05)), + thing_list=eval_dataset.thing_list) + panoptic_metric = PanopticEvaluator( + num_classes=eval_dataset.num_classes, + thing_list=eval_dataset.thing_list, + ignore_index=eval_dataset.ignore_index, + label_divisor=eval_dataset.label_divisor) + + if print_detail: + logger.info( + "Start evaluating (total_samples={}, total_iters={})...".format( + len(eval_dataset), total_iters)) + progbar_val = progbar.Progbar(target=total_iters, verbose=1) + reader_cost_averager = TimeAverager() + batch_cost_averager = TimeAverager() + batch_start = time.time() + with paddle.no_grad(): + for iter, data in enumerate(loader): + reader_cost_averager.record(time.time() - batch_start) + im = data[0] + raw_semantic_label = data[1] # raw semantic label. + raw_instance_label = data[2] + raw_panoptic_label = data[3] + ori_shape = raw_semantic_label.shape[-2:] + + semantic, semantic_softmax, instance, panoptic, ctr_hmp = infer.inference( + model=model, + im=im, + transforms=eval_dataset.transforms.transforms, + thing_list=eval_dataset.thing_list, + label_divisor=eval_dataset.label_divisor, + stuff_area=eval_dataset.stuff_area, + ignore_index=eval_dataset.ignore_index, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k, + ori_shape=ori_shape) + semantic = semantic.squeeze().numpy() + semantic_softmax = semantic_softmax.squeeze().numpy() + instance = instance.squeeze().numpy() + panoptic = panoptic.squeeze().numpy() + ctr_hmp = ctr_hmp.squeeze().numpy() + raw_semantic_label = raw_semantic_label.squeeze().numpy() + raw_instance_label = raw_instance_label.squeeze().numpy() + raw_panoptic_label = raw_panoptic_label.squeeze().numpy() + + # update metric for semantic, instance, panoptic + semantic_metric.update(semantic, raw_semantic_label) + + gts = instance_metric_AP.convert_gt_map(raw_semantic_label, + raw_instance_label) + # print([i[0] for i in gts]) + preds = instance_metric_AP.convert_pred_map(semantic_softmax, + panoptic) + # print([(i[0], i[1]) for i in preds ]) + ignore_mask = raw_semantic_label == eval_dataset.ignore_index + instance_metric_AP.update(preds, gts, ignore_mask=ignore_mask) + instance_metric_AP50.update(preds, gts, ignore_mask=ignore_mask) + + panoptic_metric.update(panoptic, raw_panoptic_label) + + batch_cost_averager.record( + time.time() - batch_start, num_samples=len(im)) + batch_cost = batch_cost_averager.get_average() + reader_cost = reader_cost_averager.get_average() + + if local_rank == 0: + progbar_val.update(iter + 1, [('batch_cost', batch_cost), + ('reader cost', reader_cost)]) + reader_cost_averager.reset() + batch_cost_averager.reset() + batch_start = time.time() + + semantic_results = semantic_metric.evaluate() + panoptic_results = panoptic_metric.evaluate() + instance_results = OrderedDict() + ins_ap = instance_metric_AP.evaluate() + ins_ap50 = instance_metric_AP50.evaluate() + instance_results['ins_seg'] = OrderedDict() + instance_results['ins_seg']['mAP'] = ins_ap['ins_seg']['mAP'] + instance_results['ins_seg']['AP'] = ins_ap['ins_seg']['AP'] + instance_results['ins_seg']['mAP50'] = ins_ap50['ins_seg']['mAP'] + instance_results['ins_seg']['AP50'] = ins_ap50['ins_seg']['AP'] + + if print_detail: + logger.info(panoptic_results) + print() + logger.info(semantic_results) + print() + logger.info(instance_results) + print() + + pq = panoptic_results['pan_seg']['All']['pq'] + miou = semantic_results['sem_seg']['mIoU'] + map = instance_results['ins_seg']['mAP'] + map50 = instance_results['ins_seg']['mAP50'] + logger.info( + "PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}".format( + pq, miou, map, map50)) + + return panoptic_results, semantic_results, instance_results diff --git a/contrib/PanopticDeepLab/datasets/__init__.py b/contrib/PanopticDeepLab/datasets/__init__.py new file mode 100644 index 0000000000..fefa6a07ea --- /dev/null +++ b/contrib/PanopticDeepLab/datasets/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cityscapes_panoptic import CityscapesPanoptic diff --git a/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py b/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py new file mode 100644 index 0000000000..c8a8f49d2d --- /dev/null +++ b/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py @@ -0,0 +1,194 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import glob + +import numpy as np +import paddle +from paddleseg.cvlibs import manager +from paddleseg.transforms import Compose +import PIL.Image as Image + +from transforms import PanopticTargetGenerator, SemanticTargetGenerator, InstanceTargetGenerator, RawPanopticTargetGenerator + + +@manager.DATASETS.add_component +class CityscapesPanoptic(paddle.io.Dataset): + """ + Cityscapes dataset `https://www.cityscapes-dataset.com/`. + The folder structure is as follow: + + cityscapes/ + |--gtFine/ + | |--train/ + | | |--aachen/ + | | | |--*_color.png, *_instanceIds.png, *_labelIds.png, *_polygons.json, + | | | |--*_labelTrainIds.png + | | | |--... + | |--val/ + | |--test/ + | |--cityscapes_panoptic_train_trainId.json + | |--cityscapes_panoptic_train_trainId/ + | | |-- *_panoptic.png + | |--cityscapes_panoptic_val_trainId.json + | |--cityscapes_panoptic_val_trainId/ + | | |-- *_panoptic.png + |--leftImg8bit/ + | |--train/ + | |--val/ + | |--test/ + + Args: + transforms (list): Transforms for image. + dataset_root (str): Cityscapes dataset directory. + mode (str, optional): Which part of dataset to use. it is one of ('train', 'val'). Default: 'train'. + ignore_stuff_in_offset (bool, optional): Whether to ignore stuff region when training the offset branch. Default: False. + small_instance_area (int, optional): Instance which area less than given value is considered small. Default: 0. + small_instance_weight (int, optional): The loss weight for small instance. Default: 1. + stuff_area (int, optional): An Integer, remove stuff whose area is less tan stuff_area. Default: 2048. + """ + + def __init__(self, + transforms, + dataset_root, + mode='train', + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + stuff_area=2048): + self.dataset_root = dataset_root + self.transforms = Compose(transforms) + self.file_list = list() + self.ins_list = [] + mode = mode.lower() + self.mode = mode + self.num_classes = 19 + self.ignore_index = 255 + self.thing_list = [11, 12, 13, 14, 15, 16, 17, 18] + self.label_divisor = 1000 + self.stuff_area = stuff_area + + if mode not in ['train', 'val']: + raise ValueError( + "mode should be 'train' or 'val' , but got {}.".format(mode)) + + if self.transforms is None: + raise ValueError("`transforms` is necessary, but it is None.") + + img_dir = os.path.join(self.dataset_root, 'leftImg8bit') + label_dir = os.path.join(self.dataset_root, 'gtFine') + if self.dataset_root is None or not os.path.isdir( + self.dataset_root) or not os.path.isdir( + img_dir) or not os.path.isdir(label_dir): + raise ValueError( + "The dataset is not Found or the folder structure is nonconfoumance." + ) + json_filename = os.path.join( + self.dataset_root, 'gtFine', + 'cityscapes_panoptic_{}_trainId.json'.format(mode)) + dataset = json.load(open(json_filename)) + img_files = [] + label_files = [] + for img in dataset['images']: + img_file_name = img['file_name'] + img_files.append( + os.path.join(self.dataset_root, 'leftImg8bit', mode, + img_file_name.split('_')[0], + img_file_name.replace('_gtFine', ''))) + for ann in dataset['annotations']: + ann_file_name = ann['file_name'] + label_files.append( + os.path.join(self.dataset_root, 'gtFine', + 'cityscapes_panoptic_{}_trainId'.format(mode), + ann_file_name)) + self.ins_list.append(ann['segments_info']) + + self.file_list = [[ + img_path, label_path + ] for img_path, label_path in zip(img_files, label_files)] + + self.target_transform = PanopticTargetGenerator( + self.ignore_index, + self.rgb2id, + self.thing_list, + sigma=8, + ignore_stuff_in_offset=ignore_stuff_in_offset, + small_instance_area=small_instance_area, + small_instance_weight=small_instance_weight) + + self.raw_semantic_generator = SemanticTargetGenerator( + ignore_index=self.ignore_index, rgb2id=self.rgb2id) + self.raw_instance_generator = InstanceTargetGenerator(self.rgb2id) + self.raw_panoptic_generator = RawPanopticTargetGenerator( + ignore_index=self.ignore_index, + rgb2id=self.rgb2id, + label_divisor=self.label_divisor) + + @staticmethod + def rgb2id(color): + """Converts the color to panoptic label. + Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. + Args: + color: Ndarray or a tuple, color encoded image. + Returns: + Panoptic label. + """ + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, + 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) + + def __getitem__(self, idx): + image_path, label_path = self.file_list[idx] + dataset_dict = {} + im, label = self.transforms(im=image_path, label=label_path) + label_dict = self.target_transform(label, self.ins_list[idx]) + for key in label_dict.keys(): + dataset_dict[key] = label_dict[key] + dataset_dict['image'] = im + if self.mode == 'val': + raw_label = np.asarray(Image.open(label_path)) + dataset_dict['raw_semantic_label'] = self.raw_semantic_generator( + raw_label, self.ins_list[idx])['semantic'] + dataset_dict['raw_instance_label'] = self.raw_instance_generator( + raw_label)['instance'] + dataset_dict['raw_panoptic_label'] = self.raw_panoptic_generator( + raw_label, self.ins_list[idx])['panoptic'] + + image = np.array(dataset_dict['image']) + semantic = np.array(dataset_dict['semantic']) + semantic_weights = np.array(dataset_dict['semantic_weights']) + center = np.array(dataset_dict['center']) + center_weights = np.array(dataset_dict['center_weights']) + offset = np.array(dataset_dict['offset']) + offset_weights = np.array(dataset_dict['offset_weights']) + foreground = np.array(dataset_dict['foreground']) + if self.mode == 'train': + return image, semantic, semantic_weights, center, center_weights, offset, offset_weights, foreground + elif self.mode == 'val': + raw_semantic_label = np.array(dataset_dict['raw_semantic_label']) + raw_instance_label = np.array(dataset_dict['raw_instance_label']) + raw_panoptic_label = np.array(dataset_dict['raw_panoptic_label']) + return image, raw_semantic_label, raw_instance_label, raw_panoptic_label + else: + raise ValueError( + '{} is not surpported, please set it one of ("train", "val")'. + format(self.mode)) + + def __len__(self): + return len(self.file_list) diff --git a/contrib/PanopticDeepLab/models/__init__.py b/contrib/PanopticDeepLab/models/__init__.py new file mode 100644 index 0000000000..28dda451ab --- /dev/null +++ b/contrib/PanopticDeepLab/models/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .losses import * + +from .panoptic_deeplab import PanopticDeepLab diff --git a/contrib/PanopticDeepLab/models/losses/__init__.py b/contrib/PanopticDeepLab/models/losses/__init__.py new file mode 100644 index 0000000000..e4d5cc9e76 --- /dev/null +++ b/contrib/PanopticDeepLab/models/losses/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cross_entropy_loss import CrossEntropyLoss +from .mean_square_error_loss import MSELoss +from .l1_loss import L1Loss diff --git a/contrib/PanopticDeepLab/models/losses/cross_entropy_loss.py b/contrib/PanopticDeepLab/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000000..11b57337d3 --- /dev/null +++ b/contrib/PanopticDeepLab/models/losses/cross_entropy_loss.py @@ -0,0 +1,77 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + +# Repetition in manager.LOSSES, remove before adding. +manager.LOSSES.components_dict.pop('CrossEntropyLoss') + + +@manager.LOSSES.add_component +class CrossEntropyLoss(nn.Layer): + """ + Implements the cross entropy loss function. + + Args: + ignore_index (int64): Specifies a target value that is ignored + and does not contribute to the input gradient. Default ``255``. + """ + + def __init__(self, ignore_index=255, top_k_percent_pixels=1.0): + super(CrossEntropyLoss, self).__init__() + self.ignore_index = ignore_index + self.top_k_percent_pixels = top_k_percent_pixels + self.EPS = 1e-5 + + def forward(self, logit, label, semantic_weights): + """ + Forward computation. + + Args: + logit (Tensor): Logit tensor, the data type is float32, float64. Shape is + (N, C), where C is number of classes, and if shape is more than 2D, this + is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor, the data type is int64. Shape is (N), where each + value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is + (N, D1, D2,..., Dk), k >= 1. + """ + if len(label.shape) != len(logit.shape): + label = paddle.unsqueeze(label, 1) + + logit = paddle.transpose(logit, [0, 2, 3, 1]) + label = paddle.transpose(label, [0, 2, 3, 1]) + loss = F.softmax_with_cross_entropy( + logit, label, ignore_index=self.ignore_index, axis=-1) + + mask = label != self.ignore_index + mask = paddle.cast(mask, 'float32') + loss = loss * mask + if semantic_weights is not None: + loss = loss.squeeze(-1) + loss = loss * semantic_weights + + label.stop_gradient = True + mask.stop_gradient = True + if self.top_k_percent_pixels == 1.0: + avg_loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS) + return avg_loss + + loss = loss.reshape((-1, )) + top_k_pixels = int(self.top_k_percent_pixels * loss.numel()) + loss, _ = paddle.topk(loss, top_k_pixels) + return loss.mean() diff --git a/contrib/PanopticDeepLab/models/losses/l1_loss.py b/contrib/PanopticDeepLab/models/losses/l1_loss.py new file mode 100644 index 0000000000..5fbbae2880 --- /dev/null +++ b/contrib/PanopticDeepLab/models/losses/l1_loss.py @@ -0,0 +1,72 @@ +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + + +@manager.LOSSES.add_component +class L1Loss(nn.MSELoss): + r""" + This interface is used to construct a callable object of the ``L1Loss`` class. + The L1Loss layer calculates the L1 Loss of ``input`` and ``label`` as follows. + If `reduction` set to ``'none'``, the loss is: + .. math:: + Out = \lvert input - label\rvert + If `reduction` set to ``'mean'``, the loss is: + .. math:: + Out = MEAN(\lvert input - label\rvert) + If `reduction` set to ``'sum'``, the loss is: + .. math:: + Out = SUM(\lvert input - label\rvert) + Parameters: + reduction (str, optional): Indicate the reduction to apply to the loss, + the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + If `reduction` is ``'none'``, the unreduced loss is returned; + If `reduction` is ``'mean'``, the reduced mean loss is returned. + If `reduction` is ``'sum'``, the reduced sum loss is returned. + Default is ``'mean'``. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + Shape: + input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64. + label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64. + output (Tensor): The L1 Loss of ``input`` and ``label``. + If `reduction` is ``'none'``, the shape of output loss is [N, *], the same as ``input`` . + If `reduction` is ``'mean'`` or ``'sum'``, the shape of output loss is [1]. + Examples: + .. code-block:: python + + import paddle + import numpy as np + input_data = np.array([[1.5, 0.8], [0.2, 1.3]]).astype("float32") + label_data = np.array([[1.7, 1], [0.4, 0.5]]).astype("float32") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + l1_loss = paddle.nn.L1Loss() + output = l1_loss(input, label) + print(output.numpy()) + # [0.35] + l1_loss = paddle.nn.L1Loss(reduction='sum') + output = l1_loss(input, label) + print(output.numpy()) + # [1.4] + l1_loss = paddle.nn.L1Loss(reduction='none') + output = l1_loss(input, label) + print(output) + # [[0.20000005 0.19999999] + # [0.2 0.79999995]] + """ + + def __init__(self, reduction='mean', ignore_index=255): + super().__init__(reduction=reduction) diff --git a/contrib/PanopticDeepLab/models/losses/mean_square_error_loss.py b/contrib/PanopticDeepLab/models/losses/mean_square_error_loss.py new file mode 100644 index 0000000000..fa66c9c5f3 --- /dev/null +++ b/contrib/PanopticDeepLab/models/losses/mean_square_error_loss.py @@ -0,0 +1,60 @@ +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + + +@manager.LOSSES.add_component +class MSELoss(nn.MSELoss): + r""" + **Mean Square Error Loss** + Computes the mean square error (squared L2 norm) of given input and label. + If :attr:`reduction` is set to ``'none'``, loss is calculated as: + .. math:: + Out = (input - label)^2 + If :attr:`reduction` is set to ``'mean'``, loss is calculated as: + .. math:: + Out = \operatorname{mean}((input - label)^2) + If :attr:`reduction` is set to ``'sum'``, loss is calculated as: + .. math:: + Out = \operatorname{sum}((input - label)^2) + where `input` and `label` are `float32` tensors of same shape. + Parameters: + reduction (string, optional): The reduction method for the output, + could be 'none' | 'mean' | 'sum'. + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. + If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned. + Default is ``'mean'``. + Shape: + input (Tensor): Input tensor, the data type is float32 or float64 + label (Tensor): Label tensor, the data type is float32 or float64 + output (Tensor): output tensor storing the MSE loss of input and label, the data type is same as input. + Examples: + .. code-block:: python + import numpy as np + import paddle + input_data = np.array([1.5]).astype("float32") + label_data = np.array([1.7]).astype("float32") + mse_loss = paddle.nn.loss.MSELoss() + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = mse_loss(input, label) + print(output) + # [0.04000002] + """ + + def __init__(self, reduction='mean', ignore_index=255): + super().__init__(reduction=reduction) diff --git a/contrib/PanopticDeepLab/models/panoptic_deeplab.py b/contrib/PanopticDeepLab/models/panoptic_deeplab.py new file mode 100644 index 0000000000..923340bf32 --- /dev/null +++ b/contrib/PanopticDeepLab/models/panoptic_deeplab.py @@ -0,0 +1,339 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager +from paddleseg.models import layers +from paddleseg.utils import utils + +__all__ = ['PanopticDeepLab'] + + +@manager.MODELS.add_component +class PanopticDeepLab(nn.Layer): + """ + The PanopticDeeplab implementation based on PaddlePaddle. + + The original article refers to + Bowen Cheng, et, al. "Panoptic-DeepLab: A Simple, Strong, and Fast Baseline for Bottom-Up Panoptic Segmentation" + (https://arxiv.org/abs/1911.10194) + + Args: + num_classes (int): The unique number of target classes. + backbone (paddle.nn.Layer): Backbone network, currently support Resnet50_vd/Resnet101_vd/Xception65. + backbone_indices (tuple, optional): Two values in the tuple indicate the indices of output of backbone. + Default: (0, 3). + aspp_ratios (tuple, optional): The dilation rate using in ASSP module. + If output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). + If output_stride=8, aspp_ratios is (1, 12, 24, 36). + Default: (1, 6, 12, 18). + aspp_out_channels (int, optional): The output channels of ASPP module. Default: 256. + align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + pretrained (str, optional): The path or url of pretrained model. Default: None. + """ + + def __init__(self, + num_classes, + backbone, + backbone_indices=(2, 1, 0, 3), + aspp_ratios=(1, 6, 12, 18), + aspp_out_channels=256, + decoder_channels=256, + low_level_channels_projects=None, + align_corners=False, + pretrained=None, + **kwargs): + super().__init__() + + self.backbone = backbone + backbone_channels = [ + backbone.feat_channels[i] for i in backbone_indices + ] + + self.head = PanopticDeepLabHead( + num_classes, backbone_indices, backbone_channels, aspp_ratios, + aspp_out_channels, decoder_channels, align_corners, + low_level_channels_projects, **kwargs) + + self.align_corners = align_corners + self.pretrained = pretrained + self.init_weight() + + def _upsample_predictions(self, pred, input_shape): + """Upsamples final prediction, with special handling to offset. + Args: + pred (dict): stores all output of the segmentation model. + input_shape (tuple): spatial resolution of the desired shape. + Returns: + result (OrderedDict): upsampled dictionary. + """ + # Override upsample method to correctly handle `offset` + result = OrderedDict() + for key in pred.keys(): + out = F.interpolate( + pred[key], + size=input_shape, + mode='bilinear', + align_corners=self.align_corners) + if 'offset' in key: + if input_shape[0] % 2 == 0: + scale = input_shape[0] // pred[key].shape[2] + else: + scale = (input_shape[0] - 1) // (pred[key].shape[2] - 1) + out *= scale + result[key] = out + return result + + def forward(self, x): + feat_list = self.backbone(x) + logit_dict = self.head(feat_list) + results = self._upsample_predictions(logit_dict, x.shape[-2:]) + + # return results + logit_list = [results['semantic'], results['center'], results['offset']] + return logit_list + # return [results['semantic']] + + def init_weight(self): + if self.pretrained is not None: + utils.load_entire_model(self, self.pretrained) + + +class PanopticDeepLabHead(nn.Layer): + """ + The DeepLabV3PHead implementation based on PaddlePaddle. + + Args: + num_classes (int): The unique number of target classes. + backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. + the first index will be taken as a low-level feature in Decoder component; + the second one will be taken as input of ASPP component. + Usually backbone consists of four downsampling stage, and return an output of + each stage. If we set it as (0, 3), it means taking feature map of the first + stage in backbone as low-level feature used in Decoder, and feature map of the fourth + stage as input of ASPP. + backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. + aspp_ratios (tuple): The dilation rates using in ASSP module. + aspp_out_channels (int): The output channels of ASPP module. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + """ + + def __init__(self, num_classes, backbone_indices, backbone_channels, + aspp_ratios, aspp_out_channels, decoder_channels, + align_corners, low_level_channels_projects, **kwargs): + super().__init__() + self.semantic_decoder = SinglePanopticDeepLabDecoder( + backbone_indices=backbone_indices, + backbone_channels=backbone_channels, + aspp_ratios=aspp_ratios, + aspp_out_channels=aspp_out_channels, + decoder_channels=decoder_channels, + align_corners=align_corners, + low_level_channels_projects=low_level_channels_projects) + self.semantic_head = SinglePanopticDeepLabHead( + num_classes=[num_classes], + decoder_channels=decoder_channels, + head_channels=decoder_channels, + class_key=['semantic']) + self.instance_decoder = SinglePanopticDeepLabDecoder( + backbone_indices=backbone_indices, + backbone_channels=backbone_channels, + aspp_ratios=aspp_ratios, + aspp_out_channels=kwargs['instance_aspp_out_channels'], + decoder_channels=kwargs['instance_decoder_channels'], + align_corners=align_corners, + low_level_channels_projects=kwargs[ + 'instance_low_level_channels_projects']) + self.instance_head = SinglePanopticDeepLabHead( + num_classes=kwargs['instance_num_classes'], + decoder_channels=kwargs['instance_decoder_channels'], + head_channels=kwargs['instance_head_channels'], + class_key=kwargs['instance_class_key']) + + def forward(self, features): + # pred = OrdereDict() + pred = {} + + # Semantic branch + semantic = self.semantic_decoder(features) + semantic = self.semantic_head(semantic) + for key in semantic.keys(): + pred[key] = semantic[key] + + # Instance branch + instance = self.instance_decoder(features) + instance = self.instance_head(instance) + for key in instance.keys(): + pred[key] = instance[key] + + return pred + + +class SinglePanopticDeepLabDecoder(nn.Layer): + """ + The DeepLabV3PHead implementation based on PaddlePaddle. + + Args: + num_classes (int): The unique number of target classes. + backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. + the first index will be taken as a low-level feature in Decoder component; + the second one will be taken as input of ASPP component. + Usually backbone consists of four downsampling stage, and return an output of + each stage. If we set it as (0, 3), it means taking feature map of the first + stage in backbone as low-level feature used in Decoder, and feature map of the fourth + stage as input of ASPP. + backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. + aspp_ratios (tuple): The dilation rates using in ASSP module. + aspp_out_channels (int): The output channels of ASPP module. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + """ + + def __init__(self, backbone_indices, backbone_channels, aspp_ratios, + aspp_out_channels, decoder_channels, align_corners, + low_level_channels_projects): + super().__init__() + self.aspp = layers.ASPPModule( + aspp_ratios, + backbone_channels[-1], + aspp_out_channels, + align_corners, + use_sep_conv=False, + image_pooling=True, + drop_rate=0.5) + self.backbone_indices = backbone_indices + self.decoder_stage = len(low_level_channels_projects) + if self.decoder_stage != len(self.backbone_indices) - 1: + raise ValueError( + "len(low_level_channels_projects) != len(backbone_indices) - 1, they are {} and {}" + .format(low_level_channels_projects, backbone_indices)) + self.align_corners = align_corners + + # Transform low-level feature + project = [] + # Fuse + fuse = [] + # Top-down direction, i.e. starting from largest stride + for i in range(self.decoder_stage): + project.append( + layers.ConvBNReLU( + backbone_channels[i], + low_level_channels_projects[i], + 1, + bias_attr=False)) + if i == 0: + fuse_in_channels = aspp_out_channels + low_level_channels_projects[ + i] + else: + fuse_in_channels = decoder_channels + low_level_channels_projects[ + i] + fuse.append( + layers.SeparableConvBNReLU( + fuse_in_channels, + decoder_channels, + 5, + padding=2, + bias_attr=False)) + self.project = nn.LayerList(project) + self.fuse = nn.LayerList(fuse) + + def forward(self, feat_list): + x = feat_list[self.backbone_indices[-1]] + x = self.aspp(x) + + for i in range(self.decoder_stage): + l = feat_list[self.backbone_indices[i]] + l = self.project[i](l) + x = F.interpolate( + x, + size=l.shape[-2:], + mode='bilinear', + align_corners=self.align_corners) + x = paddle.concat([x, l], axis=1) + x = self.fuse[i](x) + + return x + + +class SinglePanopticDeepLabHead(nn.Layer): + """ + Decoder module of DeepLabV3P model + + Args: + num_classes (int): The number of classes. + in_channels (int): The number of input channels in decoder module. + """ + + def __init__(self, num_classes, decoder_channels, head_channels, class_key): + super(SinglePanopticDeepLabHead, self).__init__() + self.num_head = len(num_classes) + if self.num_head != len(class_key): + raise ValueError( + "len(num_classes) != len(class_key), they are {} and {}".format( + num_classes, class_key)) + + classifier = [] + for i in range(self.num_head): + classifier.append( + nn.Sequential( + layers.SeparableConvBNReLU( + decoder_channels, + head_channels, + 5, + padding=2, + bias_attr=False), + nn.Conv2D(head_channels, num_classes[i], 1))) + + self.classifier = nn.LayerList(classifier) + self.class_key = class_key + + def forward(self, x): + pred = OrderedDict() + # build classifier + for i, key in enumerate(self.class_key): + pred[key] = self.classifier[i](x) + + return pred + + +if __name__ == '__main__': + paddle.set_device('cpu') + from paddleseg.models.backbones import ResNet50_vd + backbone = ResNet50_vd(output_stride=32) + model = PanopticDeepLab( + num_classes=2, + backbone=backbone, + backbone_indices=(2, 1, 0, 3), + aspp_ratios=(1, 3, 6, 9), + aspp_out_channels=256, + decoder_channels=256, + low_level_channels_projects=[128, 64, 32], + align_corners=True, + instance_aspp_out_channels=256, + instance_decoder_channels=128, + instance_low_level_channels_projects=[64, 32, 16], + instance_num_classes=[1, 2], + instance_head_channels=32, + instance_class_key=["center", "offset"]) + flop = paddle.flops(model, (1, 3, 512, 1024), print_detail=True) + x = paddle.rand((1, 3, 512, 1024)) + result = model(x) + print(result) diff --git a/contrib/PanopticDeepLab/transforms/__init__.py b/contrib/PanopticDeepLab/transforms/__init__.py new file mode 100644 index 0000000000..af018889ae --- /dev/null +++ b/contrib/PanopticDeepLab/transforms/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .target_transforms import PanopticTargetGenerator, SemanticTargetGenerator, InstanceTargetGenerator, RawPanopticTargetGenerator diff --git a/contrib/PanopticDeepLab/transforms/target_transforms.py b/contrib/PanopticDeepLab/transforms/target_transforms.py new file mode 100644 index 0000000000..ce646f5ea1 --- /dev/null +++ b/contrib/PanopticDeepLab/transforms/target_transforms.py @@ -0,0 +1,281 @@ +import numpy as np + + +class PanopticTargetGenerator(object): + """ + Generates panoptic training target for Panoptic-DeepLab. + Annotation is assumed to have Cityscapes format. + Arguments: + ignore_index: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + thing_list: List, a list of thing classes + sigma: the sigma for Gaussian kernel. + ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. + small_instance_area: Integer, indicates largest area for small instances. + small_instance_weight: Integer, indicates semantic loss weights for small instances. + ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in semantic segmentation branch, + crowd region is ignored in the original TensorFlow implementation. + """ + + def __init__(self, + ignore_index, + rgb2id, + thing_list, + sigma=8, + ignore_stuff_in_offset=False, + small_instance_area=0, + small_instance_weight=1, + ignore_crowd_in_semantic=False): + self.ignore_index = ignore_index + self.rgb2id = rgb2id + self.thing_list = thing_list + self.ignore_stuff_in_offset = ignore_stuff_in_offset + self.small_instance_area = small_instance_area + self.small_instance_weight = small_instance_weight + self.ignore_crowd_in_semantic = ignore_crowd_in_semantic + + self.sigma = sigma + size = 6 * sigma + 3 + x = np.arange(0, size, 1, float) + y = x[:, np.newaxis] + x0, y0 = 3 * sigma + 1, 3 * sigma + 1 + self.g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2)) + + def __call__(self, panoptic, segments): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py + reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: + panoptic: numpy.array, colored image encoding panoptic label. + segments: List, a list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - semantic: Tensor, semantic label, shape=(H, W). + - foreground: Tensor, foreground mask label, shape=(H, W). + - center: Tensor, center heatmap, shape=(1, H, W). + - center_points: List, center coordinates, with tuple (y-coord, x-coord). + - offset: Tensor, offset, shape=(2, H, W), first dim is (offset_y, offset_x). + - semantic_weights: Tensor, loss weight for semantic prediction, shape=(H, W). + - center_weights: Tensor, ignore region of center prediction, shape=(H, W), used as weights for center + regression 0 is ignore, 1 is has instance. Multiply this mask to loss. + - offset_weights: Tensor, ignore region of offset prediction, shape=(H, W), used as weights for offset + regression 0 is ignore, 1 is has instance. Multiply this mask to loss. + """ + panoptic = self.rgb2id(panoptic) + height, width = panoptic.shape[0], panoptic.shape[1] + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_index + foreground = np.zeros_like(panoptic, dtype=np.uint8) + center = np.zeros((1, height, width), dtype=np.float32) + center_pts = [] + offset = np.zeros((2, height, width), dtype=np.float32) + y_coord = np.ones_like(panoptic, dtype=np.float32) + x_coord = np.ones_like(panoptic, dtype=np.float32) + y_coord = np.cumsum(y_coord, axis=0) - 1 + x_coord = np.cumsum(x_coord, axis=1) - 1 + # Generate pixel-wise loss weights + semantic_weights = np.ones_like(panoptic, dtype=np.uint8) + # 0: ignore, 1: has instance + # three conditions for a region to be ignored for instance branches: + # (1) It is labeled as `ignore_index` + # (2) It is crowd region (iscrowd=1) + # (3) (Optional) It is stuff region (for offset branch) + center_weights = np.zeros_like(panoptic, dtype=np.uint8) + offset_weights = np.zeros_like(panoptic, dtype=np.uint8) + for seg in segments: + cat_id = seg["category_id"] + if self.ignore_crowd_in_semantic: + if not seg['iscrowd']: + semantic[panoptic == seg["id"]] = cat_id + else: + semantic[panoptic == seg["id"]] = cat_id + if cat_id in self.thing_list: + foreground[panoptic == seg["id"]] = 1 + if not seg['iscrowd']: + # Ignored regions are not in `segments`. + # Handle crowd region. + center_weights[panoptic == seg["id"]] = 1 + if self.ignore_stuff_in_offset: + # Handle stuff region. + if cat_id in self.thing_list: + offset_weights[panoptic == seg["id"]] = 1 + else: + offset_weights[panoptic == seg["id"]] = 1 + if cat_id in self.thing_list: + # find instance center + mask_index = np.where(panoptic == seg["id"]) + if len(mask_index[0]) == 0: + # the instance is completely cropped + continue + + # Find instance area + ins_area = len(mask_index[0]) + if ins_area < self.small_instance_area: + semantic_weights[panoptic == + seg["id"]] = self.small_instance_weight + + center_y, center_x = np.mean(mask_index[0]), np.mean( + mask_index[1]) + center_pts.append([center_y, center_x]) + + # generate center heatmap + y, x = int(center_y), int(center_x) + # outside image boundary + if x < 0 or y < 0 or \ + x >= width or y >= height: + continue + sigma = self.sigma + # upper left + ul = int(np.round(x - 3 * sigma - 1)), int( + np.round(y - 3 * sigma - 1)) + # bottom right + br = int(np.round(x + 3 * sigma + 2)), int( + np.round(y + 3 * sigma + 2)) + + c, d = max(0, -ul[0]), min(br[0], width) - ul[0] + a, b = max(0, -ul[1]), min(br[1], height) - ul[1] + + cc, dd = max(0, ul[0]), min(br[0], width) + aa, bb = max(0, ul[1]), min(br[1], height) + center[0, aa:bb, cc:dd] = np.maximum(center[0, aa:bb, cc:dd], + self.g[a:b, c:d]) + + # generate offset (2, h, w) -> (y-dir, x-dir) + offset_y_index = (np.zeros_like(mask_index[0]), mask_index[0], + mask_index[1]) + offset_x_index = (np.ones_like(mask_index[0]), mask_index[0], + mask_index[1]) + offset[offset_y_index] = center_y - y_coord[mask_index] + offset[offset_x_index] = center_x - x_coord[mask_index] + + return dict( + semantic=semantic.astype('long'), + foreground=foreground.astype('long'), + center=center.astype(np.float32), + center_points=center_pts, + offset=offset.astype(np.float32), + semantic_weights=semantic_weights.astype(np.float32), + center_weights=center_weights.astype(np.float32), + offset_weights=offset_weights.astype(np.float32)) + + +class SemanticTargetGenerator(object): + """ + Generates semantic training target only for Panoptic-DeepLab (no instance). + Annotation is assumed to have Cityscapes format. + Arguments: + ignore_index: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + """ + + def __init__(self, ignore_index, rgb2id): + self.ignore_index = ignore_index + self.rgb2id = rgb2id + + def __call__(self, panoptic, segments): + """Generates the training target. + reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py + reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: + panoptic: numpy.array, colored image encoding panoptic label. + segments: List, a list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - semantic: Tensor, semantic label, shape=(H, W). + """ + panoptic = self.rgb2id(panoptic) + semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_index + for seg in segments: + cat_id = seg["category_id"] + semantic[panoptic == seg["id"]] = cat_id + + return dict(semantic=semantic.astype('long')) + + +class InstanceTargetGenerator(object): + """ + Generates instance target only for Panoptic-DeepLab. + Annotation is assumed to have Cityscapes format. + Arguments: + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + """ + + def __init__(self, rgb2id): + self.rgb2id = rgb2id + + def __call__(self, panoptic): + """Generates the instance target. + Args: + panoptic: numpy.array, colored image encoding panoptic label. + Returns: + A dictionary with fields: + - instance: Tensor, shape=(H, W). 0 is background. 1, 2, 3 ... is instance, so it is class agnostic. + """ + panoptic = self.rgb2id(panoptic) + instance = np.zeros_like(panoptic, dtype=np.int64) + ids = np.unique(panoptic) + ins_id = 1 + for i, id in enumerate(ids): + if id > 1000: + instance[panoptic == id] = ins_id + ins_id += 1 + + return dict(instance=instance) + + +class RawPanopticTargetGenerator(object): + """ + Generator the panoptc ground truth for evaluation, where values are 0,1,2,3,... + 11000, 11001, ..., 18000, 18001, ignore_index(general 255). + Arguments: + ignore_index: Integer, the ignore label for semantic segmentation. + rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + corresponding panoptic label. + """ + + def __init__(self, ignore_index, rgb2id, label_divisor=1000): + self.ingore_index = ignore_index + self.rgb2id = rgb2id + self.label_divisor = label_divisor + + def __call__(self, panoptic, segments): + """ + Generates the raw panoptic target + + Args: + panoptic (numpy.array): colored image encoding panoptic label. + segments (list): A list of dictionary containing information of every segment, it has fields: + - id: panoptic id, after decoding `panoptic`. + - category_id: semantic class id. + - area: segment area. + - bbox: segment bounding box. + - iscrowd: crowd region. + Returns: + A dictionary with fields: + - panoptic: Tensor, panoptic label, shape=(H, W). + """ + panoptic = self.rgb2id(panoptic) + raw_panoptic = np.zeros_like(panoptic) + self.ingore_index + for seg in segments: + cat_id = seg['category_id'] + # if seg['iscrowd'] == 1: + # continue + if seg['id'] < 1000: + raw_panoptic[panoptic == seg['id']] = cat_id + else: + ins_id = seg['id'] % self.label_divisor + raw_panoptic[panoptic == + seg['id']] = cat_id * self.label_divisor + ins_id + return dict(panoptic=raw_panoptic.astype('long')) diff --git a/contrib/PanopticDeepLab/utils/__init__.py b/contrib/PanopticDeepLab/utils/__init__.py new file mode 100644 index 0000000000..eb73861bcd --- /dev/null +++ b/contrib/PanopticDeepLab/utils/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .visualize import visualize_semantic, visualize_instance, visualize_panoptic diff --git a/contrib/PanopticDeepLab/utils/evaluation/__init__.py b/contrib/PanopticDeepLab/utils/evaluation/__init__.py new file mode 100644 index 0000000000..7c86ed1641 --- /dev/null +++ b/contrib/PanopticDeepLab/utils/evaluation/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .semantic import SemanticEvaluator +from .instance import InstanceEvaluator +from .panoptic import PanopticEvaluator diff --git a/contrib/PanopticDeepLab/utils/evaluation/instance.py b/contrib/PanopticDeepLab/utils/evaluation/instance.py new file mode 100644 index 0000000000..1230c4d98e --- /dev/null +++ b/contrib/PanopticDeepLab/utils/evaluation/instance.py @@ -0,0 +1,345 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict, OrderedDict + +import numpy as np + + +class InstanceEvaluator(object): + """ + Refer to 'https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py' + Calculate the matching results of each image, each class, each IoU, and then get the final + matching results of each class and each IoU of dataset. Base on the matching results, the AP + and mAP can be calculated. + we need two vectors for each class and for each overlap + The first vector (y_true) is binary and is 1, where the ground truth says true, + and is 0 otherwise. + The second vector (y_score) is float [0...1] and represents the confidence of + the prediction. + We represent the following cases as: + | y_true | y_score + gt instance with matched prediction | 1 | confidence + gt instance w/o matched prediction | 1 | 0.0 + false positive prediction | 0 | confidence + The current implementation makes only sense for an overlap threshold >= 0.5, + since only then, a single prediction can either be ignored or matched, but + never both. Further, it can never match to two gt instances. + For matching, we vary the overlap and do the following steps: + 1.) remove all predictions that satisfy the overlap criterion with an ignore region (either void or *group) + 2.) remove matches that do not satisfy the overlap + 3.) mark non-matched predictions as false positive + In the processing, 0 represent the first class of 'thing'. So the label will less 1 than the dataset. + Args: + num_classes (int): The unique number of target classes. Exclude background class, labeled 0 usually. + overlaps (float|list): The threshold of IoU. + thing_list (list|None): Thing class, only calculate AP for the thing class. + """ + + def __init__(self, num_classes, overlaps=0.5, thing_list=None): + super().__init__() + self.num_classes = num_classes + if isinstance(overlaps, float): + overlaps = [overlaps] + self.overlaps = overlaps + self.y_true = [[np.empty(0) for _i in range(len(overlaps))] + for _j in range(num_classes)] + self.y_score = [[np.empty(0) for _i in range(len(overlaps))] + for _j in range(num_classes)] + self.hard_fns = [[0] * len(overlaps) for _ in range(num_classes)] + + if thing_list is None: + self.thing_list = list(range(num_classes)) + else: + self.thing_list = thing_list + + def update(self, preds, gts, ignore_mask=None): + """ + compute y_true and y_score in this image. + preds (list): tuple list [(label, confidence, mask), ...]. + gts (list): tuple list [(label, mask), ...]. + ignore_mask (np.ndarray): Mask to ignore. + """ + + pred_instances, gt_instances = self.get_instances( + preds, gts, ignore_mask=ignore_mask) + + for i in range(self.num_classes): + if i not in self.thing_list: + continue + for oi, oth in enumerate(self.overlaps): + cur_true = np.ones((len(gt_instances[i]))) + cur_score = np.ones(len(gt_instances[i])) * (-float("inf")) + cur_match = np.zeros(len(gt_instances[i]), dtype=np.bool) + for gti, gt_instance in enumerate(gt_instances[i]): + found_match = False + for pred_instance in gt_instance['matched_pred']: + overlap = float(pred_instance['intersection']) / ( + gt_instance['pixel_count'] + + pred_instance['pixel_count'] - + pred_instance['intersection']) + if overlap > oth: + confidence = pred_instance['confidence'] + + # if we already has a prediction for this groundtruth + # the prediction with the lower score is automatically a false positive + if cur_match[gti]: + max_score = max(cur_score[gti], confidence) + min_score = min(cur_score[gti], confidence) + cur_score = max_score + # append false positive + cur_true = np.append(cur_true, 0) + cur_score = np.append(cur_score, min_score) + cur_match = np.append(cur_match, True) + # otherwise set score + else: + found_match = True + cur_match[gti] = True + cur_score[gti] = confidence + + if not found_match: + self.hard_fns[i][oi] += 1 + # remove not-matched ground truth instances + cur_true = cur_true[cur_match == True] + cur_score = cur_score[cur_match == True] + + # collect not-matched predictions as false positive + for pred_instance in pred_instances[i]: + found_gt = False + for gt_instance in pred_instance['matched_gt']: + overlap = float(gt_instance['intersection']) / ( + gt_instance['pixel_count'] + + pred_instance['pixel_count'] - + gt_instance['intersection']) + if overlap > oth: + found_gt = True + break + if not found_gt: + proportion_ignore = 0 + if ignore_mask is not None: + nb_ignore_pixels = pred_instance[ + 'void_intersection'] + proportion_ignore = float( + nb_ignore_pixels) / pred_instance['pixel_count'] + if proportion_ignore <= oth: + cur_true = np.append(cur_true, 0) + cur_score = np.append(cur_score, + pred_instance['confidence']) + self.y_true[i][oi] = np.append(self.y_true[i][oi], cur_true) + self.y_score[i][oi] = np.append(self.y_score[i][oi], cur_score) + + def evaluate(self): + ap = self.cal_ap() + map = self.cal_map() + + res = {} + res["AP"] = [{i: ap[i] * 100} for i in self.thing_list] + res["mAP"] = 100 * map + + results = OrderedDict({"ins_seg": res}) + return results + + def cal_ap(self): + """ + calculate ap for every classes + """ + self.ap = [0] * self.num_classes + self.ap_overlap = [[0] * len(self.overlaps) + for _ in range(self.num_classes)] + for i in range(self.num_classes): + if i not in self.thing_list: + continue + for j in range(len(self.overlaps)): + y_true = self.y_true[i][j] + y_score = self.y_score[i][j] + if len(y_true) == 0: + self.ap_overlap[i][j] = 0 + continue + score_argsort = np.argsort(y_score) + y_score_sorted = y_score[score_argsort] + y_true_sorted = y_true[score_argsort] + y_true_sorted_cumsum = np.cumsum(y_true_sorted) + + # unique thresholds + thresholds, unique_indices = np.unique( + y_score_sorted, return_index=True) + + # since we need to add an artificial point to the precision-recall curve + # increase its length by 1 + nb_pr = len(unique_indices) + 1 + + # calculate precision and recall + nb_examples = len(y_score_sorted) + nb_true_exampels = y_true_sorted_cumsum[-1] + precision = np.zeros(nb_pr) + recall = np.zeros(nb_pr) + + # deal with the first point + # only thing we need to do, is to append a zero to the cumsum at the end. + # an index of -1 uses that zero then + y_true_sorted_cumsum = np.append(y_true_sorted_cumsum, 0) + + # deal with remaining + for idx_res, idx_scores in enumerate(unique_indices): + cumsum = y_true_sorted_cumsum[idx_scores - 1] + tp = nb_true_exampels - cumsum + fp = nb_examples - idx_scores - tp + fn = cumsum + self.hard_fns[i][j] + p = float(tp) / (tp + fp) + r = float(tp) / (tp + fn) + precision[idx_res] = p + recall[idx_res] = r + + # add first point in curve + precision[-1] = 1. + # In some calculation,make precision the max after this point in curve. + #precision = [np.max(precision[:i+1]) for i in range(len(precision))] + recall[-1] = 0. + + # compute average of precision-recall curve + # integration is performed via zero order, or equivalently step-wise integration + # first compute the widths of each step: + # use a convolution with appropriate kernel, manually deal with the boundaries first + recall_for_conv = np.copy(recall) + recall_for_conv = np.append(recall_for_conv[0], recall_for_conv) + recall_for_conv = np.append(recall_for_conv, 0.) + + step_widths = np.convolve(recall_for_conv, [-0.5, 0, 0.5], + 'valid') + + # integrate is now simply a dot product + ap_current = np.dot(precision, step_widths) + self.ap_overlap[i][j] = ap_current + + ap = [np.average(i) for i in self.ap_overlap] + self.ap = ap + + return ap + + def cal_map(self): + """ + calculate map for all classes + """ + self.cal_ap() + valid_ap = [self.ap[i] for i in self.thing_list] + map = np.mean(valid_ap) + self.map = map + + return map + + def get_instances(self, preds, gts, ignore_mask=None): + """ + In this method, we create two dicts of list + - pred_instances: contains all predictions and their associated gt + - gtInstances: contains all gt instances and their associated predictions + Args: + preds (list): Prediction of image. + gts (list): Ground truth of image. + Return: + dict: pred_instances, the type is dict(list(dict))), e.g. {0: [{'pred_id':0, 'label':0', + 'pixel_count':100, 'confidence': 0.9, 'void_intersection': 0, + 'matched_gt': [gt_instance0, gt_instance1, ...]}, ], 1: } + dict: gt_instances, the type is dict(list(dict))), e.g. {0: [{'inst_id':0, 'label':0', + 'pixel_count':100, 'mask': np.ndarray, 'matched_pred': [pred_instance0, pred_instance1, ...]}, ], 1: } + """ + + pred_instances = defaultdict(list) + gt_instances = defaultdict(list) + + gt_inst_count = 0 + for gt in gts: + label, mask = gt + gt_instance = defaultdict(list) + gt_instance['inst_id'] = gt_inst_count + gt_instance['label'] = label + gt_instance['pixel_count'] = np.count_nonzero(mask) + gt_instance['mask'] = mask + gt_instances[label].append(gt_instance) + gt_inst_count += 1 + + pred_inst_count = 0 + for pred in preds: + label, conf, mask = pred + pred_instance = defaultdict(list) + pred_instance['label'] = label + pred_instance['pred_id'] = pred_inst_count + pred_instance['pixel_count'] = np.count_nonzero(mask) + pred_instance['confidence'] = conf + if ignore_mask is not None: + pred_instance['void_intersection'] = np.count_nonzero( + np.logical_and(mask, ignore_mask)) + + # Loop through all ground truth instances with matching label + matched_gt = [] + for gt_num, gt_instance in enumerate(gt_instances[label]): + # print(gt_instances) + intersection = np.count_nonzero( + np.logical_and(mask, gt_instances[label][gt_num]['mask'])) + if intersection > 0: + gt_copy = gt_instance.copy() + pred_copy = pred_instance.copy() + + gt_copy['intersection'] = intersection + pred_copy['intersection'] = intersection + + matched_gt.append(gt_copy) + gt_instances[label][gt_num]['matched_pred'].append( + pred_copy) + + pred_instance['matched_gt'] = matched_gt + pred_inst_count += 1 + pred_instances[label].append(pred_instance) + + return pred_instances, gt_instances + + @staticmethod + def convert_gt_map(seg_map, ins_map): + """ + Convet the ground truth with format (h*w) to the format that satisfies the AP calculation. + Args: + seg_map (np.ndarray): the sementic segmentation map with shape H * W. Value is 0, 1, 2, ... + ins_map (np.ndarray): the instance segmentation map with shape H * W. Value is 0, 1, 2, ... + Returns: + list: tuple list like: [(label, mask), ...] + """ + gts = [] + instance_cnt = np.unique(ins_map) + for i in instance_cnt: + if i == 0: + continue + mask = ins_map == i + label = seg_map[mask][0] + gts.append((label, mask.astype('int32'))) + return gts + + @staticmethod + def convert_pred_map(seg_pred, pan_pred): + """ + Convet the predictions with format (h*w) to the format that satisfies the AP calculation. + Args: + seg_pred (np.ndarray): the sementic segmentation map with shape C * H * W. Value is probability. + pan_pred (np.ndarray): panoptic predictions, void_label, stuff_id * label_divisor, thing_id * label_divisor + ins_id , ins_id >= 1. + Returns: + list: tuple list like: [(label, score, mask), ...] + """ + preds = [] + instance_cnt = np.unique(pan_pred) + for i in instance_cnt: + if (i < 1000) or (i % 1000 == 0): + continue + mask = pan_pred == i + label = i // 1000 + score = np.mean(seg_pred[label][mask]) + preds.append((label, score, mask.astype('int32'))) + return preds diff --git a/contrib/PanopticDeepLab/utils/evaluation/panoptic.py b/contrib/PanopticDeepLab/utils/evaluation/panoptic.py new file mode 100644 index 0000000000..9c930bcc69 --- /dev/null +++ b/contrib/PanopticDeepLab/utils/evaluation/panoptic.py @@ -0,0 +1,220 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------------------------------------------------------------------------ +# Reference: https://github.com/mcordts/cityscapesScripts/blob/aeb7b82531f86185ce287705be28f452ba3ddbb8/cityscapesscripts/evaluation/evalPanopticSemanticLabeling.py +# Modified by Guowei Chen +# ------------------------------------------------------------------------------ + +from collections import defaultdict, OrderedDict + +import numpy as np + +OFFSET = 256 * 256 * 256 + + +class PQStatCat(): + def __init__(self): + self.iou = 0.0 + self.tp = 0 + self.fp = 0 + self.fn = 0 + + def __iadd__(self, pd_stat_cat): + self.iou += pd_stat_cat.iou + self.tp += pd_stat_cat.tp + self.fp += pd_stat_cat.fp + self.fn += pd_stat_cat.fn + return self + + def __repr__(self): + s = 'iou: ' + str(self.iou) + ' tp: ' + str(self.tp) + ' fp: ' + str( + self.fp) + ' fn: ' + str(self.fn) + return s + + +class PQStat(): + def __init__(self, num_classes): + self.pq_per_cat = defaultdict(PQStatCat) + self.num_classes = num_classes + + def __getitem__(self, i): + return self.pq_per_cat[i] + + def __iadd__(self, pd_stat): + for label, pq_stat_cat in pd_stat.pq_per_cat.items(): + self.pd_per_cat[label] += pq_stat_cat + return self + + def pq_average(self, isthing=None, thing_list=None): + """ + Calculate the average pq for all and every class. + + Args: + num_classes (int): number of classes. + isthing (bool|None): calculate average pq for thing class if isthing is True, + for stuff class if isthing is False and for all if isthing is None. Default: None. + thing_list (list|None): A list of thing class. It should be provided when isthing is equal to True or False + """ + pq, sq, rq, n = 0, 0, 0, 0 + per_class_results = {} + for label in range(self.num_classes): + if isthing is not None: + if isthing: + if label not in thing_list: + continue + else: + if label in thing_list: + continue + iou = self.pq_per_cat[label].iou + tp = self.pq_per_cat[label].tp + fp = self.pq_per_cat[label].fp + fn = self.pq_per_cat[label].fn + if tp + fp + fn == 0: + per_class_results[label] = {'pq': 0.0, 'sq': 0.0, 'rq': 0.0} + continue + n += 1 + pq_class = iou / (tp + 0.5 * fp + 0.5 * fn) + sq_class = iou / tp if tp != 0 else 0 + rq_class = tp / (tp + 0.5 * fp + 0.5 * fn) + + per_class_results[label] = { + 'pq': pq_class, + 'sq': sq_class, + 'rq': rq_class + } + pq += pq_class + sq += sq_class + rq += rq_class + + return { + 'pq': pq / n, + 'sq': sq / n, + 'rq': rq / n, + 'n': n + }, per_class_results + + +class PanopticEvaluator: + """ + Evaluate semantic segmentation + """ + + def __init__(self, + num_classes, + thing_list, + ignore_index=255, + label_divisor=1000): + self.pq_stat = PQStat(num_classes) + self.num_classes = num_classes + self.thing_list = thing_list + self.ignore_index = ignore_index + self.label_divisor = label_divisor + + def update(self, pred, gt): + # get the labels and counts for the pred and gt. + gt_labels, gt_labels_counts = np.unique(gt, return_counts=True) + pred_labels, pred_labels_counts = np.unique(pred, return_counts=True) + gt_segms = defaultdict(dict) + pred_segms = defaultdict(dict) + for label, label_count in zip(gt_labels, gt_labels_counts): + category_id = label // self.label_divisor if label > self.label_divisor else label + gt_segms[label]['area'] = label_count + gt_segms[label]['category_id'] = category_id + gt_segms[label]['iscrowd'] = 1 if label in self.thing_list else 0 + for label, label_count in zip(pred_labels, pred_labels_counts): + category_id = label // self.label_divisor if label > self.label_divisor else label + pred_segms[label]['area'] = label_count + pred_segms[label]['category_id'] = category_id + + # confusion matrix calculation + pan_gt_pred = gt.astype(np.uint64) * OFFSET + pred.astype(np.uint64) + gt_pred_map = {} + labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True) + for label, intersection in zip(labels, labels_cnt): + gt_id = label // OFFSET + pred_id = label % OFFSET + gt_pred_map[(gt_id, pred_id)] = intersection + + # count all matched pairs + gt_matched = set() + pred_matched = set() + for label_tuple, intersection in gt_pred_map.items(): + gt_label, pred_label = label_tuple + if gt_label == self.ignore_index or pred_label == self.ignore_index: + continue + if gt_segms[gt_label]['iscrowd'] == 1: + continue + if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][ + 'category_id']: + continue + union = pred_segms[pred_label]['area'] + gt_segms[gt_label][ + 'area'] - intersection - gt_pred_map.get( + (self.ignore_index, pred_label), 0) + iou = intersection / union + if iou > 0.5: + self.pq_stat[gt_segms[gt_label]['category_id']].tp += 1 + self.pq_stat[gt_segms[gt_label]['category_id']].iou += iou + gt_matched.add(gt_label) + pred_matched.add(pred_label) + + # count false negtive + crowd_labels_dict = {} + for gt_label, gt_info in gt_segms.items(): + if gt_label in gt_matched: + continue + if gt_label == self.ignore_index: + continue + # ignore crowd + if gt_info['iscrowd'] == 1: + crowd_labels_dict[gt_info['category_id']] = gt_label + continue + self.pq_stat[gt_info['category_id']].fn += 1 + + # count false positive + for pred_label, pred_info in pred_segms.items(): + if pred_label in pred_matched: + continue + if pred_label == self.ignore_index: + continue + # intersection of the segment with self.ignore_index + intersection = gt_pred_map.get((self.ignore_index, pred_label), 0) + if pred_info['category_id'] in crowd_labels_dict: + intersection += gt_pred_map.get( + (crowd_labels_dict[pred_info['category_id']], pred_label), + 0) + # predicted segment is ignored if more than half of the segment correspond to self.ignore_index regions + if intersection / pred_info['area'] > 0.5: + continue + self.pq_stat[pred_info['category_id']].fp += 1 + + def evaluate(self): + metrics = [("All", None), ("Things", True), ("Stuff", False)] + results = {} + for name, isthing in metrics: + results[name], per_class_results = self.pq_stat.pq_average( + isthing=isthing, thing_list=self.thing_list) + if name == 'All': + results['per_class'] = per_class_results + return OrderedDict(pan_seg=results) + + +if __name__ == '__main__': + panoptic_metirc = PanopticEvaluator(2, [1]) + pred = np.zeros((100, 100)) + gt = np.zeros((100, 100)) + pred[0:50, 0:50] = 1 + gt[0:60, 0:60] = 1 + panoptic_metirc.update(pred, gt) + print(panoptic_metirc.evaluate()) diff --git a/contrib/PanopticDeepLab/utils/evaluation/semantic.py b/contrib/PanopticDeepLab/utils/evaluation/semantic.py new file mode 100644 index 0000000000..ca59a6a503 --- /dev/null +++ b/contrib/PanopticDeepLab/utils/evaluation/semantic.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------------------------------------------------------------------------ +# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/evaluation/semantic.py +# Modified by Guowei Chen +# ------------------------------------------------------------------------------ + +from collections import OrderedDict + +import numpy as np + + +class SemanticEvaluator: + """ + Evaluate semantic segmentation + """ + + def __init__(self, num_classes, ignore_index=255): + """ + Args: + num_classes (int): number of classes + ignore_index (int): value in semantic segmentation ground truth. Predictions for the + corresponding pixels should be ignored. + """ + self._num_classes = num_classes + self._ignore_index = ignore_index + self._N = num_classes + 1 # store ignore label in the last class + + self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64) + + def update(self, pred, gt): + pred = pred.astype(np.int) + gt = gt.astype(np.int) + gt[gt == self._ignore_index] = self._num_classes + + # raw: pred, column: gt + self._conf_matrix += np.bincount( + self._N * pred.reshape(-1) + gt.reshape(-1), + minlength=self._N**2).reshape(self._N, self._N) + + def evaluate(self): + """ + Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): + * Mean intersection-over-union averaged across classes (mIoU) + * Frequency Weighted IoU (fwIoU) + * Mean pixel accuracy averaged across classes (mACC) + * Pixel Accuracy (pACC) + """ + acc = np.zeros(self._num_classes, dtype=np.float) + iou = np.zeros(self._num_classes, dtype=np.float) + tp = self._conf_matrix.diagonal()[:-1].astype(np.float) + pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) + class_weights = pos_gt / np.sum(pos_gt) + pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) + + acc_valid = pos_pred > 0 + acc[acc_valid] = tp[acc_valid] / pos_pred[acc_valid] + iou_valid = (pos_gt + pos_pred) > 0 + union = pos_gt + pos_pred - tp + iou[acc_valid] = tp[acc_valid] / union[acc_valid] + macc = np.sum(acc) / np.sum(acc_valid) + miou = np.sum(iou) / np.sum(iou_valid) + fiou = np.sum(iou * class_weights) + pacc = np.sum(tp) / np.sum(pos_gt) + + res = {} + res["mIoU"] = 100 * miou + res["fwIoU"] = 100 * fiou + res["mACC"] = 100 * macc + res["pACC"] = 100 * pacc + + results = OrderedDict({"sem_seg": res}) + return results diff --git a/contrib/PanopticDeepLab/utils/visualize.py b/contrib/PanopticDeepLab/utils/visualize.py new file mode 100644 index 0000000000..27c950ec0b --- /dev/null +++ b/contrib/PanopticDeepLab/utils/visualize.py @@ -0,0 +1,195 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Reference: https://github.com/bowenc0221/panoptic-deeplab/blob/master/segmentation/utils/save_annotation.py + +import os + +import cv2 +import numpy as np +from PIL import Image as PILImage + +# Refence: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L14 +_COLORS = np.array([ + 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, + 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, + 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, + 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, + 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, + 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, + 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, + 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, + 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, + 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, + 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, + 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, + 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, + 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, + 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, + 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, + 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, + 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, + 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, + 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, + 1.000, 1.000 +]).astype(np.float32).reshape(-1, 3) + + +def random_color(rgb=False, maximum=255): + """ + Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L111 + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + Returns: + ndarray: a vector of 3 numbers + """ + idx = np.random.randint(0, len(_COLORS)) + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + + +def cityscape_colormap(): + """Get CityScapes colormap""" + colormap = np.zeros((256, 3), dtype=np.uint8) + colormap[0] = [128, 64, 128] + colormap[1] = [244, 35, 232] + colormap[2] = [70, 70, 70] + colormap[3] = [102, 102, 156] + colormap[4] = [190, 153, 153] + colormap[5] = [153, 153, 153] + colormap[6] = [250, 170, 30] + colormap[7] = [220, 220, 0] + colormap[8] = [107, 142, 35] + colormap[9] = [152, 251, 152] + colormap[10] = [70, 130, 180] + colormap[11] = [220, 20, 60] + colormap[12] = [255, 0, 0] + colormap[13] = [0, 0, 142] + colormap[14] = [0, 0, 70] + colormap[15] = [0, 60, 100] + colormap[16] = [0, 80, 100] + colormap[17] = [0, 0, 230] + colormap[18] = [119, 11, 32] + colormap = colormap[:, ::-1] + return colormap + + +def visualize_semantic(semantic, save_path, colormap, image=None, weight=0.5): + """ + Save semantic segmentation results. + + Args: + semantic(np.ndarray): The result semantic segmenation results, shape is (h, w). + save_path(str): The save path. + colormap(np.ndarray): A color map for visualization. + image(np.ndarray, optional): Origin image to prediction, merge semantic with + image if provided. Default: None. + weight(float, optional): The image weight when merge semantic with image. Default: 0.6. + """ + semantic = semantic.astype('uint8') + colored_semantic = colormap[semantic] + if image is not None: + colored_semantic = cv2.addWeighted(image, weight, colored_semantic, + 1 - weight, 0) + cv2.imwrite(save_path, colored_semantic) + + +def visualize_instance(instance, save_path, stuff_id=0, image=None, weight=0.5): + """ + Save instance segmentation results. + + Args: + instance(np.ndarray): The instance segmentation results, shape is (h, w). + save_path(str): The save path. + stuff_id(int, optional): Id for background that not want to plot. + image(np.ndarray, optional): Origin image to prediction, merge instance with + image if provided. Default: None. + weight(float, optional): The image weight when merge instance with image. Default: 0.6. + """ + # Add color map for instance segmentation result. + ids = np.unique(instance) + num_colors = len(ids) + colormap = np.zeros((num_colors, 3), dtype=np.uint8) + # Maps label to continuous value + for i in range(num_colors): + instance[instance == ids[i]] = i + colormap[i, :] = random_color(maximum=255) + if ids[i] == stuff_id: + colormap[i, :] = np.array([0, 0, 0]) + colored_instance = colormap[instance] + + if image is not None: + colored_instance = cv2.addWeighted(image, weight, colored_instance, + 1 - weight, 0) + cv2.imwrite(save_path, colored_instance) + + +def visualize_panoptic(panoptic, + save_path, + label_divisor, + colormap, + image=None, + weight=0.5, + ignore_index=255): + """ + Save panoptic segmentation results. + + Args: + panoptic(np.ndarray): The panoptic segmentation results, shape is (h, w). + save_path(str): The save path. + label_divisor(int): Used to convert panoptic id = semantic id * label_divisor + instance_id. + colormap(np.ndarray): A color map for visualization. + image(np.ndarray, optional): Origin image to prediction, merge panoptic with + image if provided. Default: None. + weight(float, optional): The image weight when merge panoptic with image. Default: 0.6. + ignore_index(int, optional): Specifies a target value that is ignored. + """ + colored_panoptic = np.zeros((panoptic.shape[0], panoptic.shape[1], 3), + dtype=np.uint8) + taken_colors = set((0, 0, 0)) + + def _random_color(base, max_dist=30): + color = base + np.random.randint( + low=-max_dist, high=max_dist + 1, size=3) + return tuple(np.maximum(0, np.minimum(255, color))) + + for lab in np.unique(panoptic): + mask = panoptic == lab + + ignore_mask = panoptic == ignore_index + ins_mask = panoptic > label_divisor + if lab > label_divisor: + base_color = colormap[lab // label_divisor] + elif lab != ignore_index: + base_color = colormap[lab] + else: + continue + if tuple(base_color) not in taken_colors: + taken_colors.add(tuple(base_color)) + color = base_color + else: + while True: + color = _random_color(base_color) + if color not in taken_colors: + taken_colors.add(color) + break + colored_panoptic[mask] = color + + if image is not None: + colored_panoptic = cv2.addWeighted(image, weight, colored_panoptic, + 1 - weight, 0) + cv2.imwrite(save_path, colored_panoptic) diff --git a/contrib/PanopticDeepLab/val.py b/contrib/PanopticDeepLab/val.py index b662ad3404..2553fbbec1 100644 --- a/contrib/PanopticDeepLab/val.py +++ b/contrib/PanopticDeepLab/val.py @@ -16,10 +16,13 @@ import os import paddle - +import paddleseg from paddleseg.cvlibs import manager, Config -from paddleseg.core import evaluate -from paddleseg.utils import get_sys_env, logger, config_check, utils +from paddleseg.utils import get_sys_env, logger, config_check + +from core import evaluate +from datasets import * +from models import * def parse_args(): @@ -89,7 +92,7 @@ def main(args): model = cfg.model if args.model_path: - utils.load_entire_model(model, args.model_path) + paddleseg.utils.utils.load_entire_model(model, args.model_path) logger.info('Loaded trained params of model successfully') config_check(cfg, val_dataset=val_dataset) From 175dde706e142a8443e1110eb1acf2487691c97d Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 10 May 2021 19:26:21 +0800 Subject: [PATCH 102/210] update predict process --- contrib/PanopticDeepLab/core/predict.py | 9 +++++---- contrib/PanopticDeepLab/predict.py | 6 ++++-- contrib/PanopticDeepLab/utils/__init__.py | 2 +- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/contrib/PanopticDeepLab/core/predict.py b/contrib/PanopticDeepLab/core/predict.py index eb0249033d..df10de48a6 100644 --- a/contrib/PanopticDeepLab/core/predict.py +++ b/contrib/PanopticDeepLab/core/predict.py @@ -18,11 +18,12 @@ import cv2 import numpy as np import paddle - -from paddleseg import utils -from paddleseg.core import infer +import paddleseg from paddleseg.utils import logger, progbar +from core import infer +import utils + def mkdir(path): sub_dir = os.path.dirname(path) @@ -86,7 +87,7 @@ def predict(model, nms_kernel(int, optional): NMS max pooling kernel size. Default: 7. top_k(int, optional): Top k centers to keep. Default: 200. """ - utils.utils.load_entire_model(model, model_path) + paddleseg.utils.utils.load_entire_model(model, model_path) model.eval() nranks = paddle.distributed.get_world_size() local_rank = paddle.distributed.get_rank() diff --git a/contrib/PanopticDeepLab/predict.py b/contrib/PanopticDeepLab/predict.py index 95644295d3..a7dc73c6b1 100644 --- a/contrib/PanopticDeepLab/predict.py +++ b/contrib/PanopticDeepLab/predict.py @@ -16,10 +16,12 @@ import os import paddle - from paddleseg.cvlibs import manager, Config from paddleseg.utils import get_sys_env, logger, config_check -from paddleseg.core import predict + +from core import predict +from datasets import * +from models import * def parse_args(): diff --git a/contrib/PanopticDeepLab/utils/__init__.py b/contrib/PanopticDeepLab/utils/__init__.py index eb73861bcd..eede34cc67 100644 --- a/contrib/PanopticDeepLab/utils/__init__.py +++ b/contrib/PanopticDeepLab/utils/__init__.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .visualize import visualize_semantic, visualize_instance, visualize_panoptic +from .visualize import visualize_semantic, visualize_instance, visualize_panoptic, cityscape_colormap From c492f8777254bab258ca432e2d19e6cbd940581b Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 11 May 2021 10:06:48 +0800 Subject: [PATCH 103/210] update models --- contrib/PanopticDeepLab/core/infer.py | 2 - .../models/panoptic_deeplab.py | 118 +++++++++++++++++- 2 files changed, 115 insertions(+), 5 deletions(-) diff --git a/contrib/PanopticDeepLab/core/infer.py b/contrib/PanopticDeepLab/core/infer.py index 069ed0cd92..65d26b4869 100644 --- a/contrib/PanopticDeepLab/core/infer.py +++ b/contrib/PanopticDeepLab/core/infer.py @@ -20,8 +20,6 @@ import paddle import paddle.nn.functional as F -debug = False - def get_reverse_list(ori_shape, transforms): """ diff --git a/contrib/PanopticDeepLab/models/panoptic_deeplab.py b/contrib/PanopticDeepLab/models/panoptic_deeplab.py index 923340bf32..7d29cc0260 100644 --- a/contrib/PanopticDeepLab/models/panoptic_deeplab.py +++ b/contrib/PanopticDeepLab/models/panoptic_deeplab.py @@ -187,6 +187,118 @@ def forward(self, features): return pred +class SeparableConvBNReLU(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size, + padding='same', + **kwargs): + super().__init__() + self.depthwise_conv = layers.ConvBNReLU( + in_channels, + out_channels=in_channels, + kernel_size=kernel_size, + padding=padding, + groups=in_channels, + **kwargs) + self.piontwise_conv = layers.ConvBNReLU( + in_channels, out_channels, kernel_size=1, groups=1, bias_attr=False) + + def forward(self, x): + x = self.depthwise_conv(x) + x = self.piontwise_conv(x) + return x + + +class ASPPModule(nn.Layer): + """ + Atrous Spatial Pyramid Pooling. + + Args: + aspp_ratios (tuple): The dilation rate using in ASSP module. + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature + is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + use_sep_conv (bool, optional): If using separable conv in ASPP module. Default: False. + image_pooling (bool, optional): If augmented with image-level features. Default: False + """ + + def __init__(self, + aspp_ratios, + in_channels, + out_channels, + align_corners, + use_sep_conv=False, + image_pooling=False, + drop_rate=0.1): + super().__init__() + + self.align_corners = align_corners + self.aspp_blocks = nn.LayerList() + + for ratio in aspp_ratios: + if use_sep_conv and ratio > 1: + conv_func = SeparableConvBNReLU + else: + conv_func = layers.ConvBNReLU + + block = conv_func( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1 if ratio == 1 else 3, + dilation=ratio, + padding=0 if ratio == 1 else ratio, + bias_attr=False) + self.aspp_blocks.append(block) + + out_size = len(self.aspp_blocks) + + if image_pooling: + self.global_avg_pool = nn.Sequential( + nn.AdaptiveAvgPool2D(output_size=(1, 1)), + layers.ConvBNReLU( + in_channels, out_channels, kernel_size=1, bias_attr=False)) + out_size += 1 + self.image_pooling = image_pooling + + self.conv_bn_relu = layers.ConvBNReLU( + in_channels=out_channels * out_size, + out_channels=out_channels, + kernel_size=1, + bias_attr=False) + + self.dropout = nn.Dropout(p=drop_rate) # drop rate + + def forward(self, x): + outputs = [] + for block in self.aspp_blocks: + y = block(x) + interpolate_shape = x.shape[2:] + y = F.interpolate( + y, + interpolate_shape, + mode='bilinear', + align_corners=self.align_corners) + outputs.append(y) + + if self.image_pooling: + img_avg = self.global_avg_pool(x) + img_avg = F.interpolate( + img_avg, + interpolate_shape, + mode='bilinear', + align_corners=self.align_corners) + outputs.append(img_avg) + + x = paddle.concat(outputs, axis=1) + x = self.conv_bn_relu(x) + x = self.dropout(x) + + return x + + class SinglePanopticDeepLabDecoder(nn.Layer): """ The DeepLabV3PHead implementation based on PaddlePaddle. @@ -211,7 +323,7 @@ def __init__(self, backbone_indices, backbone_channels, aspp_ratios, aspp_out_channels, decoder_channels, align_corners, low_level_channels_projects): super().__init__() - self.aspp = layers.ASPPModule( + self.aspp = ASPPModule( aspp_ratios, backbone_channels[-1], aspp_out_channels, @@ -246,7 +358,7 @@ def __init__(self, backbone_indices, backbone_channels, aspp_ratios, fuse_in_channels = decoder_channels + low_level_channels_projects[ i] fuse.append( - layers.SeparableConvBNReLU( + SeparableConvBNReLU( fuse_in_channels, decoder_channels, 5, @@ -294,7 +406,7 @@ def __init__(self, num_classes, decoder_channels, head_channels, class_key): for i in range(self.num_head): classifier.append( nn.Sequential( - layers.SeparableConvBNReLU( + SeparableConvBNReLU( decoder_channels, head_channels, 5, From b8f3fac1dd229d167246367913c39e5793fae135 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 11 May 2021 10:07:45 +0800 Subject: [PATCH 104/210] recover to origin --- paddleseg/models/layers/pyramid_pool.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/paddleseg/models/layers/pyramid_pool.py b/paddleseg/models/layers/pyramid_pool.py index 6ce9958ee3..bb1c9bafda 100644 --- a/paddleseg/models/layers/pyramid_pool.py +++ b/paddleseg/models/layers/pyramid_pool.py @@ -39,8 +39,7 @@ def __init__(self, out_channels, align_corners, use_sep_conv=False, - image_pooling=False, - drop_rate=0.1): + image_pooling=False): super().__init__() self.align_corners = align_corners @@ -75,7 +74,7 @@ def __init__(self, out_channels=out_channels, kernel_size=1) - self.dropout = nn.Dropout(p=drop_rate) # drop rate + self.dropout = nn.Dropout(p=0.1) # drop rate def forward(self, x): outputs = [] From 69ac997bfbbe8708c048c8b7eef1b1a1b9d1a0d4 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 11 May 2021 10:24:01 +0800 Subject: [PATCH 105/210] update README.md --- contrib/PanopticDeepLab/README.md | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/contrib/PanopticDeepLab/README.md b/contrib/PanopticDeepLab/README.md index f234947f8b..7c8ba47161 100644 --- a/contrib/PanopticDeepLab/README.md +++ b/contrib/PanopticDeepLab/README.md @@ -27,14 +27,15 @@ Panoptic DeepLab首次证实了bottem-up算法能够达到state-of-the-art的效 git clone https://github.com/PaddlePaddle/PaddleSeg ``` -3. 进入PaddleSeg/contrib/PanopticDeepLab目录 +3. 安装paddleseg ```shell -cd PaddleSeg/contrib/PanopticDeepLab +cd PaddleSeg +pip install -e . ``` -4. 将当前目录添加到PYTHONPATH +4. 进入PaddleSeg/contrib/PanopticDeepLab目录 ```shell -export PYTHONPATH=`pwd`:$PYTHONPATH +cd contrib/PanopticDeepLab ``` ## 数据集准备 @@ -43,7 +44,7 @@ export PYTHONPATH=`pwd`:$PYTHONPATH ### Cityscapes -前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集并整理成如下结构。 +前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集并整理成如下结构: ``` cityscapes/ @@ -73,7 +74,7 @@ cityscapes/ pip install git+https://github.com/mcordts/cityscapesScripts.git ``` -*_panoptic.png 生成命令: +`*_panoptic.png` 生成命令(需找到`createPanopticImgs.py`文件): ```shell python /path/to/cityscapesscripts/preparation/createPanopticImgs.py \ --dataset-folder data/cityscapes/gtFine/ \ @@ -94,7 +95,7 @@ python -m paddle.distributed.launch train.py \ **note:** 使用--do_eval会影响训练速度及增加显存消耗,根据选择进行开闭。 -更多参数信息请运行如下命令进行查看 +更多参数信息请运行如下命令进行查看: ```shell python train.py --help ``` @@ -105,7 +106,9 @@ python val.py \ --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ --model_path output/iter_90000/model.pdparams ``` -更多参数信息请运行如下命令进行查看 +你可以直接下载我们提供的模型进行评估。 + +更多参数信息请运行如下命令进行查看: ```shell python val.py --help ``` @@ -119,11 +122,13 @@ python -m paddle.distributed.launch predict.py \ --image_path data/cityscapes/leftImg8bit/val/ \ --save_dir ./output/result ``` -更多参数信息请运行如下命令进行查看 +你可以直接下载我们提供的模型进行预测。 + +更多参数信息请运行如下命令进行查看: ```shell python predict.py --help ``` -全景分割结果: +全景分割结果:

      From b70979184ed4ed722d99d3d7409d7ca419acfe98 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 11 May 2021 10:31:20 +0800 Subject: [PATCH 106/210] rm some useless --- .../models/panoptic_deeplab.py | 25 ------------------- .../utils/evaluation/panoptic.py | 10 -------- 2 files changed, 35 deletions(-) diff --git a/contrib/PanopticDeepLab/models/panoptic_deeplab.py b/contrib/PanopticDeepLab/models/panoptic_deeplab.py index 7d29cc0260..f40216777a 100644 --- a/contrib/PanopticDeepLab/models/panoptic_deeplab.py +++ b/contrib/PanopticDeepLab/models/panoptic_deeplab.py @@ -424,28 +424,3 @@ def forward(self, x): pred[key] = self.classifier[i](x) return pred - - -if __name__ == '__main__': - paddle.set_device('cpu') - from paddleseg.models.backbones import ResNet50_vd - backbone = ResNet50_vd(output_stride=32) - model = PanopticDeepLab( - num_classes=2, - backbone=backbone, - backbone_indices=(2, 1, 0, 3), - aspp_ratios=(1, 3, 6, 9), - aspp_out_channels=256, - decoder_channels=256, - low_level_channels_projects=[128, 64, 32], - align_corners=True, - instance_aspp_out_channels=256, - instance_decoder_channels=128, - instance_low_level_channels_projects=[64, 32, 16], - instance_num_classes=[1, 2], - instance_head_channels=32, - instance_class_key=["center", "offset"]) - flop = paddle.flops(model, (1, 3, 512, 1024), print_detail=True) - x = paddle.rand((1, 3, 512, 1024)) - result = model(x) - print(result) diff --git a/contrib/PanopticDeepLab/utils/evaluation/panoptic.py b/contrib/PanopticDeepLab/utils/evaluation/panoptic.py index 9c930bcc69..16666c6465 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/panoptic.py +++ b/contrib/PanopticDeepLab/utils/evaluation/panoptic.py @@ -208,13 +208,3 @@ def evaluate(self): if name == 'All': results['per_class'] = per_class_results return OrderedDict(pan_seg=results) - - -if __name__ == '__main__': - panoptic_metirc = PanopticEvaluator(2, [1]) - pred = np.zeros((100, 100)) - gt = np.zeros((100, 100)) - pred[0:50, 0:50] = 1 - gt[0:60, 0:60] = 1 - panoptic_metirc.update(pred, gt) - print(panoptic_metirc.evaluate()) From fe2e107db13106681f6ef88da3002271c6791c1e Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 11 May 2021 11:05:17 +0800 Subject: [PATCH 107/210] mv loss to paddleseg --- contrib/PanopticDeepLab/models/__init__.py | 2 - .../PanopticDeepLab/models/losses/__init__.py | 17 ---- .../models/losses/cross_entropy_loss.py | 77 ------------------- paddleseg/models/losses/__init__.py | 2 + paddleseg/models/losses/cross_entropy_loss.py | 20 ++++- .../models/losses/l1_loss.py | 0 .../models/losses/mean_square_error_loss.py | 0 7 files changed, 18 insertions(+), 100 deletions(-) delete mode 100644 contrib/PanopticDeepLab/models/losses/__init__.py delete mode 100644 contrib/PanopticDeepLab/models/losses/cross_entropy_loss.py rename {contrib/PanopticDeepLab => paddleseg}/models/losses/l1_loss.py (100%) rename {contrib/PanopticDeepLab => paddleseg}/models/losses/mean_square_error_loss.py (100%) diff --git a/contrib/PanopticDeepLab/models/__init__.py b/contrib/PanopticDeepLab/models/__init__.py index 28dda451ab..af512e6bed 100644 --- a/contrib/PanopticDeepLab/models/__init__.py +++ b/contrib/PanopticDeepLab/models/__init__.py @@ -12,6 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .losses import * - from .panoptic_deeplab import PanopticDeepLab diff --git a/contrib/PanopticDeepLab/models/losses/__init__.py b/contrib/PanopticDeepLab/models/losses/__init__.py deleted file mode 100644 index e4d5cc9e76..0000000000 --- a/contrib/PanopticDeepLab/models/losses/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .cross_entropy_loss import CrossEntropyLoss -from .mean_square_error_loss import MSELoss -from .l1_loss import L1Loss diff --git a/contrib/PanopticDeepLab/models/losses/cross_entropy_loss.py b/contrib/PanopticDeepLab/models/losses/cross_entropy_loss.py deleted file mode 100644 index 11b57337d3..0000000000 --- a/contrib/PanopticDeepLab/models/losses/cross_entropy_loss.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -from paddle import nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager - -# Repetition in manager.LOSSES, remove before adding. -manager.LOSSES.components_dict.pop('CrossEntropyLoss') - - -@manager.LOSSES.add_component -class CrossEntropyLoss(nn.Layer): - """ - Implements the cross entropy loss function. - - Args: - ignore_index (int64): Specifies a target value that is ignored - and does not contribute to the input gradient. Default ``255``. - """ - - def __init__(self, ignore_index=255, top_k_percent_pixels=1.0): - super(CrossEntropyLoss, self).__init__() - self.ignore_index = ignore_index - self.top_k_percent_pixels = top_k_percent_pixels - self.EPS = 1e-5 - - def forward(self, logit, label, semantic_weights): - """ - Forward computation. - - Args: - logit (Tensor): Logit tensor, the data type is float32, float64. Shape is - (N, C), where C is number of classes, and if shape is more than 2D, this - is (N, C, D1, D2,..., Dk), k >= 1. - label (Tensor): Label tensor, the data type is int64. Shape is (N), where each - value is 0 <= label[i] <= C-1, and if shape is more than 2D, this is - (N, D1, D2,..., Dk), k >= 1. - """ - if len(label.shape) != len(logit.shape): - label = paddle.unsqueeze(label, 1) - - logit = paddle.transpose(logit, [0, 2, 3, 1]) - label = paddle.transpose(label, [0, 2, 3, 1]) - loss = F.softmax_with_cross_entropy( - logit, label, ignore_index=self.ignore_index, axis=-1) - - mask = label != self.ignore_index - mask = paddle.cast(mask, 'float32') - loss = loss * mask - if semantic_weights is not None: - loss = loss.squeeze(-1) - loss = loss * semantic_weights - - label.stop_gradient = True - mask.stop_gradient = True - if self.top_k_percent_pixels == 1.0: - avg_loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS) - return avg_loss - - loss = loss.reshape((-1, )) - top_k_pixels = int(self.top_k_percent_pixels * loss.numel()) - loss, _ = paddle.topk(loss, top_k_pixels) - return loss.mean() diff --git a/paddleseg/models/losses/__init__.py b/paddleseg/models/losses/__init__.py index a0410448e8..b704e3dc4a 100644 --- a/paddleseg/models/losses/__init__.py +++ b/paddleseg/models/losses/__init__.py @@ -23,3 +23,5 @@ from .ohem_cross_entropy_loss import OhemCrossEntropyLoss from .decoupledsegnet_relax_boundary_loss import RelaxBoundaryLoss from .ohem_edge_attention_loss import OhemEdgeAttentionLoss +from .l1_loss import L1Loss +from .mean_square_error_loss import MSELoss diff --git a/paddleseg/models/losses/cross_entropy_loss.py b/paddleseg/models/losses/cross_entropy_loss.py index 9502e507b2..40117ba1ff 100644 --- a/paddleseg/models/losses/cross_entropy_loss.py +++ b/paddleseg/models/losses/cross_entropy_loss.py @@ -30,17 +30,20 @@ class CrossEntropyLoss(nn.Layer): Default ``None``. ignore_index (int64, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default ``255``. + top_k_percent_pixels (float, optional): the value lies in [0.0, 1.0]. When its value < 1.0, only compute the loss for + the top k percent pixels (e.g., the top 20% pixels). This is useful for hard pixel mining. """ - def __init__(self, weight=None, ignore_index=255): + def __init__(self, weight=None, ignore_index=255, top_k_percent_pixels=1.0): super(CrossEntropyLoss, self).__init__() if weight is not None: weight = paddle.to_tensor(weight, dtype='float32') self.weight = weight self.ignore_index = ignore_index + self.top_k_percent_pixels = top_k_percent_pixels self.EPS = 1e-8 - def forward(self, logit, label): + def forward(self, logit, label, semantic_weights=None): """ Forward computation. @@ -74,8 +77,17 @@ def forward(self, logit, label): mask = label != self.ignore_index mask = paddle.cast(mask, 'float32') loss = loss * mask - avg_loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS) + if semantic_weights is not None: + loss = loss * semantic_weights label.stop_gradient = True mask.stop_gradient = True - return avg_loss + if self.top_k_percent_pixels == 1.0: + avg_loss = paddle.mean(loss) / (paddle.mean(mask) + self.EPS) + return avg_loss + + loss = loss.reshape((-1, )) + top_k_pixels = int(self.top_k_percent_pixels * loss.numel()) + loss, _ = paddle.topk(loss, top_k_pixels) + + return loss.mean() diff --git a/contrib/PanopticDeepLab/models/losses/l1_loss.py b/paddleseg/models/losses/l1_loss.py similarity index 100% rename from contrib/PanopticDeepLab/models/losses/l1_loss.py rename to paddleseg/models/losses/l1_loss.py diff --git a/contrib/PanopticDeepLab/models/losses/mean_square_error_loss.py b/paddleseg/models/losses/mean_square_error_loss.py similarity index 100% rename from contrib/PanopticDeepLab/models/losses/mean_square_error_loss.py rename to paddleseg/models/losses/mean_square_error_loss.py From c3abf0cd4f3bf5f105bba742a2bd55d5ced09cbb Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 May 2021 16:04:17 +0800 Subject: [PATCH 108/210] update copyright --- contrib/PanopticDeepLab/README.md | 2 +- contrib/PanopticDeepLab/core/__init__.py | 2 +- contrib/PanopticDeepLab/core/infer.py | 2 +- contrib/PanopticDeepLab/core/predict.py | 2 +- contrib/PanopticDeepLab/core/train.py | 2 +- contrib/PanopticDeepLab/core/val.py | 2 +- contrib/PanopticDeepLab/datasets/__init__.py | 2 +- .../datasets/cityscapes_panoptic.py | 2 +- contrib/PanopticDeepLab/models/__init__.py | 2 +- contrib/PanopticDeepLab/models/panoptic_deeplab.py | 2 +- contrib/PanopticDeepLab/predict.py | 2 +- contrib/PanopticDeepLab/train.py | 2 +- contrib/PanopticDeepLab/transforms/__init__.py | 2 +- .../transforms/target_transforms.py | 14 ++++++++++++++ contrib/PanopticDeepLab/utils/__init__.py | 2 +- .../PanopticDeepLab/utils/evaluation/__init__.py | 2 +- .../PanopticDeepLab/utils/evaluation/instance.py | 2 +- .../PanopticDeepLab/utils/evaluation/semantic.py | 2 +- contrib/PanopticDeepLab/utils/visualize.py | 2 +- contrib/PanopticDeepLab/val.py | 2 +- 20 files changed, 33 insertions(+), 19 deletions(-) diff --git a/contrib/PanopticDeepLab/README.md b/contrib/PanopticDeepLab/README.md index 7c8ba47161..d4faa744fa 100644 --- a/contrib/PanopticDeepLab/README.md +++ b/contrib/PanopticDeepLab/README.md @@ -1,7 +1,7 @@ # Panoptic DeepLab -基于paddle实现[Panoptic Deeplab](https://arxiv.org/abs/1911.10194)全景分割算法。 +基于PaddlePaddle实现[Panoptic Deeplab](https://arxiv.org/abs/1911.10194)全景分割算法。 Panoptic DeepLab首次证实了bottem-up算法能够达到state-of-the-art的效果。Panoptic DeepLab预测三个输出:Semantic Segmentation, Center Prediction 和 Center Regression。实例类别像素根据最近距离原则聚集到实例中心点得到实例分割结果。最后按照majority-vote规则融合语义分割结果和实例分割结果,得到最终的全景分割结果。 其通过将每一个像素赋值给每一个类别或实例达到分割的效果。 diff --git a/contrib/PanopticDeepLab/core/__init__.py b/contrib/PanopticDeepLab/core/__init__.py index 35189064a6..3358db4d38 100644 --- a/contrib/PanopticDeepLab/core/__init__.py +++ b/contrib/PanopticDeepLab/core/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/core/infer.py b/contrib/PanopticDeepLab/core/infer.py index 65d26b4869..c2da37c7ad 100644 --- a/contrib/PanopticDeepLab/core/infer.py +++ b/contrib/PanopticDeepLab/core/infer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/core/predict.py b/contrib/PanopticDeepLab/core/predict.py index df10de48a6..78b9b54ec2 100644 --- a/contrib/PanopticDeepLab/core/predict.py +++ b/contrib/PanopticDeepLab/core/predict.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/core/train.py b/contrib/PanopticDeepLab/core/train.py index 58ab85957d..a3bdaf966c 100644 --- a/contrib/PanopticDeepLab/core/train.py +++ b/contrib/PanopticDeepLab/core/train.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/core/val.py b/contrib/PanopticDeepLab/core/val.py index 472484f126..9e0f90b97b 100644 --- a/contrib/PanopticDeepLab/core/val.py +++ b/contrib/PanopticDeepLab/core/val.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/datasets/__init__.py b/contrib/PanopticDeepLab/datasets/__init__.py index fefa6a07ea..4f0f3a9500 100644 --- a/contrib/PanopticDeepLab/datasets/__init__.py +++ b/contrib/PanopticDeepLab/datasets/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py b/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py index c8a8f49d2d..6a56d7bcf7 100644 --- a/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py +++ b/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/models/__init__.py b/contrib/PanopticDeepLab/models/__init__.py index af512e6bed..44b46327e4 100644 --- a/contrib/PanopticDeepLab/models/__init__.py +++ b/contrib/PanopticDeepLab/models/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/models/panoptic_deeplab.py b/contrib/PanopticDeepLab/models/panoptic_deeplab.py index f40216777a..dfe13f1b4c 100644 --- a/contrib/PanopticDeepLab/models/panoptic_deeplab.py +++ b/contrib/PanopticDeepLab/models/panoptic_deeplab.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/predict.py b/contrib/PanopticDeepLab/predict.py index a7dc73c6b1..15e24d87c8 100644 --- a/contrib/PanopticDeepLab/predict.py +++ b/contrib/PanopticDeepLab/predict.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/train.py b/contrib/PanopticDeepLab/train.py index a34f216377..49d8a3e46f 100644 --- a/contrib/PanopticDeepLab/train.py +++ b/contrib/PanopticDeepLab/train.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/transforms/__init__.py b/contrib/PanopticDeepLab/transforms/__init__.py index af018889ae..67b27709ba 100644 --- a/contrib/PanopticDeepLab/transforms/__init__.py +++ b/contrib/PanopticDeepLab/transforms/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/transforms/target_transforms.py b/contrib/PanopticDeepLab/transforms/target_transforms.py index ce646f5ea1..6230ec1d98 100644 --- a/contrib/PanopticDeepLab/transforms/target_transforms.py +++ b/contrib/PanopticDeepLab/transforms/target_transforms.py @@ -1,3 +1,17 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import numpy as np diff --git a/contrib/PanopticDeepLab/utils/__init__.py b/contrib/PanopticDeepLab/utils/__init__.py index eede34cc67..894d8a7adf 100644 --- a/contrib/PanopticDeepLab/utils/__init__.py +++ b/contrib/PanopticDeepLab/utils/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/utils/evaluation/__init__.py b/contrib/PanopticDeepLab/utils/evaluation/__init__.py index 7c86ed1641..8cd9f71a3b 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/__init__.py +++ b/contrib/PanopticDeepLab/utils/evaluation/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/utils/evaluation/instance.py b/contrib/PanopticDeepLab/utils/evaluation/instance.py index 1230c4d98e..9a604ccbfc 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/instance.py +++ b/contrib/PanopticDeepLab/utils/evaluation/instance.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/utils/evaluation/semantic.py b/contrib/PanopticDeepLab/utils/evaluation/semantic.py index ca59a6a503..d47e6eef20 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/semantic.py +++ b/contrib/PanopticDeepLab/utils/evaluation/semantic.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/utils/visualize.py b/contrib/PanopticDeepLab/utils/visualize.py index 27c950ec0b..02fffaac94 100644 --- a/contrib/PanopticDeepLab/utils/visualize.py +++ b/contrib/PanopticDeepLab/utils/visualize.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/PanopticDeepLab/val.py b/contrib/PanopticDeepLab/val.py index 2553fbbec1..2af624ceb7 100644 --- a/contrib/PanopticDeepLab/val.py +++ b/contrib/PanopticDeepLab/val.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From def908a48f7d950886da8f514af9b73a736e760d Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 May 2021 16:06:34 +0800 Subject: [PATCH 109/210] format import --- contrib/PanopticDeepLab/predict.py | 4 ++-- contrib/PanopticDeepLab/train.py | 4 ++-- contrib/PanopticDeepLab/val.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/contrib/PanopticDeepLab/predict.py b/contrib/PanopticDeepLab/predict.py index 15e24d87c8..69b1c0b5f4 100644 --- a/contrib/PanopticDeepLab/predict.py +++ b/contrib/PanopticDeepLab/predict.py @@ -20,8 +20,8 @@ from paddleseg.utils import get_sys_env, logger, config_check from core import predict -from datasets import * -from models import * +from datasets import CityscapesPanoptic +from models import PanopticDeepLab def parse_args(): diff --git a/contrib/PanopticDeepLab/train.py b/contrib/PanopticDeepLab/train.py index 49d8a3e46f..7adf32edde 100644 --- a/contrib/PanopticDeepLab/train.py +++ b/contrib/PanopticDeepLab/train.py @@ -19,8 +19,8 @@ from paddleseg.utils import get_sys_env, logger, config_check from core import train -from datasets import * -from models import * +from datasets import CityscapesPanoptic +from models import PanopticDeepLab def parse_args(): diff --git a/contrib/PanopticDeepLab/val.py b/contrib/PanopticDeepLab/val.py index 2af624ceb7..5d33ecb8db 100644 --- a/contrib/PanopticDeepLab/val.py +++ b/contrib/PanopticDeepLab/val.py @@ -21,8 +21,8 @@ from paddleseg.utils import get_sys_env, logger, config_check from core import evaluate -from datasets import * -from models import * +from datasets import CityscapesPanoptic +from models import PanopticDeepLab def parse_args(): From d831eba80c4152344f42c87a81525dba90bf6dd1 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 May 2021 16:17:40 +0800 Subject: [PATCH 110/210] update annotations --- contrib/PanopticDeepLab/core/infer.py | 6 +++++- .../datasets/cityscapes_panoptic.py | 2 ++ .../transforms/target_transforms.py | 19 +++++++++++++++---- .../utils/evaluation/instance.py | 7 +++++++ .../utils/evaluation/semantic.py | 11 +++++------ contrib/PanopticDeepLab/utils/visualize.py | 2 ++ 6 files changed, 36 insertions(+), 11 deletions(-) diff --git a/contrib/PanopticDeepLab/core/infer.py b/contrib/PanopticDeepLab/core/infer.py index c2da37c7ad..98a3e8a2ff 100644 --- a/contrib/PanopticDeepLab/core/infer.py +++ b/contrib/PanopticDeepLab/core/infer.py @@ -24,9 +24,11 @@ def get_reverse_list(ori_shape, transforms): """ get reverse list of transform. + Args: ori_shape (list): Origin shape of image. transforms (list): List of transform. + Returns: list: List of tuple, there are two format: ('resize', (h, w)) The image shape before resize, @@ -91,11 +93,13 @@ def reverse_transform(pred, ori_shape, transforms): def find_instance_center(ctr_hmp, threshold=0.1, nms_kernel=3, top_k=None): """ Find the center points from the center heatmap. - Arguments: + + Args: ctr_hmp (Tensor): A Tensor of shape [1, H, W] of raw center heatmap output. threshold (float, optional): Threshold applied to center heatmap score. Default: 0.1. nms_kernel (int, optional): NMS max pooling kernel size. Default: 3. top_k (int, optional): An Integer, top k centers to keep. Default: None + Returns: Tensor: A Tensor of shape [K, 2] where K is the number of center points. The order of second dim is (y, x). """ diff --git a/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py b/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py index 6a56d7bcf7..59141367c0 100644 --- a/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py +++ b/contrib/PanopticDeepLab/datasets/cityscapes_panoptic.py @@ -141,8 +141,10 @@ def __init__(self, def rgb2id(color): """Converts the color to panoptic label. Color is created by `color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]`. + Args: color: Ndarray or a tuple, color encoded image. + Returns: Panoptic label. """ diff --git a/contrib/PanopticDeepLab/transforms/target_transforms.py b/contrib/PanopticDeepLab/transforms/target_transforms.py index 6230ec1d98..9b914c297a 100644 --- a/contrib/PanopticDeepLab/transforms/target_transforms.py +++ b/contrib/PanopticDeepLab/transforms/target_transforms.py @@ -19,7 +19,8 @@ class PanopticTargetGenerator(object): """ Generates panoptic training target for Panoptic-DeepLab. Annotation is assumed to have Cityscapes format. - Arguments: + + Args: ignore_index: Integer, the ignore label for semantic segmentation. rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the corresponding panoptic label. @@ -60,6 +61,7 @@ def __call__(self, panoptic, segments): """Generates the training target. reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: panoptic: numpy.array, colored image encoding panoptic label. segments: List, a list of dictionary containing information of every segment, it has fields: @@ -68,6 +70,7 @@ def __call__(self, panoptic, segments): - area: segment area. - bbox: segment bounding box. - iscrowd: crowd region. + Returns: A dictionary with fields: - semantic: Tensor, semantic label, shape=(H, W). @@ -182,7 +185,8 @@ class SemanticTargetGenerator(object): """ Generates semantic training target only for Panoptic-DeepLab (no instance). Annotation is assumed to have Cityscapes format. - Arguments: + + Args: ignore_index: Integer, the ignore label for semantic segmentation. rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the corresponding panoptic label. @@ -196,6 +200,7 @@ def __call__(self, panoptic, segments): """Generates the training target. reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 + Args: panoptic: numpy.array, colored image encoding panoptic label. segments: List, a list of dictionary containing information of every segment, it has fields: @@ -204,6 +209,7 @@ def __call__(self, panoptic, segments): - area: segment area. - bbox: segment bounding box. - iscrowd: crowd region. + Returns: A dictionary with fields: - semantic: Tensor, semantic label, shape=(H, W). @@ -221,7 +227,8 @@ class InstanceTargetGenerator(object): """ Generates instance target only for Panoptic-DeepLab. Annotation is assumed to have Cityscapes format. - Arguments: + + Args: rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the corresponding panoptic label. """ @@ -231,8 +238,10 @@ def __init__(self, rgb2id): def __call__(self, panoptic): """Generates the instance target. + Args: panoptic: numpy.array, colored image encoding panoptic label. + Returns: A dictionary with fields: - instance: Tensor, shape=(H, W). 0 is background. 1, 2, 3 ... is instance, so it is class agnostic. @@ -253,7 +262,8 @@ class RawPanopticTargetGenerator(object): """ Generator the panoptc ground truth for evaluation, where values are 0,1,2,3,... 11000, 11001, ..., 18000, 18001, ignore_index(general 255). - Arguments: + + Args: ignore_index: Integer, the ignore label for semantic segmentation. rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the corresponding panoptic label. @@ -276,6 +286,7 @@ def __call__(self, panoptic, segments): - area: segment area. - bbox: segment bounding box. - iscrowd: crowd region. + Returns: A dictionary with fields: - panoptic: Tensor, panoptic label, shape=(H, W). diff --git a/contrib/PanopticDeepLab/utils/evaluation/instance.py b/contrib/PanopticDeepLab/utils/evaluation/instance.py index 9a604ccbfc..c00cf89e87 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/instance.py +++ b/contrib/PanopticDeepLab/utils/evaluation/instance.py @@ -41,6 +41,7 @@ class InstanceEvaluator(object): 2.) remove matches that do not satisfy the overlap 3.) mark non-matched predictions as false positive In the processing, 0 represent the first class of 'thing'. So the label will less 1 than the dataset. + Args: num_classes (int): The unique number of target classes. Exclude background class, labeled 0 usually. overlaps (float|list): The threshold of IoU. @@ -243,9 +244,11 @@ def get_instances(self, preds, gts, ignore_mask=None): In this method, we create two dicts of list - pred_instances: contains all predictions and their associated gt - gtInstances: contains all gt instances and their associated predictions + Args: preds (list): Prediction of image. gts (list): Ground truth of image. + Return: dict: pred_instances, the type is dict(list(dict))), e.g. {0: [{'pred_id':0, 'label':0', 'pixel_count':100, 'confidence': 0.9, 'void_intersection': 0, @@ -307,9 +310,11 @@ def get_instances(self, preds, gts, ignore_mask=None): def convert_gt_map(seg_map, ins_map): """ Convet the ground truth with format (h*w) to the format that satisfies the AP calculation. + Args: seg_map (np.ndarray): the sementic segmentation map with shape H * W. Value is 0, 1, 2, ... ins_map (np.ndarray): the instance segmentation map with shape H * W. Value is 0, 1, 2, ... + Returns: list: tuple list like: [(label, mask), ...] """ @@ -327,9 +332,11 @@ def convert_gt_map(seg_map, ins_map): def convert_pred_map(seg_pred, pan_pred): """ Convet the predictions with format (h*w) to the format that satisfies the AP calculation. + Args: seg_pred (np.ndarray): the sementic segmentation map with shape C * H * W. Value is probability. pan_pred (np.ndarray): panoptic predictions, void_label, stuff_id * label_divisor, thing_id * label_divisor + ins_id , ins_id >= 1. + Returns: list: tuple list like: [(label, score, mask), ...] """ diff --git a/contrib/PanopticDeepLab/utils/evaluation/semantic.py b/contrib/PanopticDeepLab/utils/evaluation/semantic.py index d47e6eef20..43f999442a 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/semantic.py +++ b/contrib/PanopticDeepLab/utils/evaluation/semantic.py @@ -25,15 +25,14 @@ class SemanticEvaluator: """ Evaluate semantic segmentation + + Args: + num_classes (int): number of classes + ignore_index (int): value in semantic segmentation ground truth. Predictions for the + corresponding pixels should be ignored. """ def __init__(self, num_classes, ignore_index=255): - """ - Args: - num_classes (int): number of classes - ignore_index (int): value in semantic segmentation ground truth. Predictions for the - corresponding pixels should be ignored. - """ self._num_classes = num_classes self._ignore_index = ignore_index self._N = num_classes + 1 # store ignore label in the last class diff --git a/contrib/PanopticDeepLab/utils/visualize.py b/contrib/PanopticDeepLab/utils/visualize.py index 02fffaac94..fe88940210 100644 --- a/contrib/PanopticDeepLab/utils/visualize.py +++ b/contrib/PanopticDeepLab/utils/visualize.py @@ -49,9 +49,11 @@ def random_color(rgb=False, maximum=255): """ Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L111 + Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 + Returns: ndarray: a vector of 3 numbers """ From a377547fa0317a442a0236d140dcf86a4e276d6e Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 May 2021 16:52:41 +0800 Subject: [PATCH 111/210] update annotations --- contrib/PanopticDeepLab/core/infer.py | 2 +- .../models/panoptic_deeplab.py | 20 +++++++--- .../transforms/target_transforms.py | 39 ++++++++++--------- .../utils/evaluation/instance.py | 5 ++- .../utils/evaluation/panoptic.py | 6 +-- .../utils/evaluation/semantic.py | 4 +- contrib/PanopticDeepLab/utils/visualize.py | 12 +++--- 7 files changed, 50 insertions(+), 38 deletions(-) diff --git a/contrib/PanopticDeepLab/core/infer.py b/contrib/PanopticDeepLab/core/infer.py index 98a3e8a2ff..8ac1d800fe 100644 --- a/contrib/PanopticDeepLab/core/infer.py +++ b/contrib/PanopticDeepLab/core/infer.py @@ -307,7 +307,7 @@ def inference( threshold (float, optional): A Float, threshold applied to center heatmap score. Default: 0.1. nms_kernel (int, optional): An Integer, NMS max pooling kernel size. Default: 3. top_k (int, optional): An Integer, top k centers to keep. Default: None. - ori_shape (list): Origin shape of image. + ori_shape (list. optional): Origin shape of image. Default: None. Returns: list: A list of [semantic, semantic_softmax, instance, panoptic, ctr_hmp]. diff --git a/contrib/PanopticDeepLab/models/panoptic_deeplab.py b/contrib/PanopticDeepLab/models/panoptic_deeplab.py index dfe13f1b4c..27f041b9c7 100644 --- a/contrib/PanopticDeepLab/models/panoptic_deeplab.py +++ b/contrib/PanopticDeepLab/models/panoptic_deeplab.py @@ -38,12 +38,14 @@ class PanopticDeepLab(nn.Layer): num_classes (int): The unique number of target classes. backbone (paddle.nn.Layer): Backbone network, currently support Resnet50_vd/Resnet101_vd/Xception65. backbone_indices (tuple, optional): Two values in the tuple indicate the indices of output of backbone. - Default: (0, 3). + Default: (2, 1, 0, 3). aspp_ratios (tuple, optional): The dilation rate using in ASSP module. If output_stride=16, aspp_ratios should be set as (1, 6, 12, 18). If output_stride=8, aspp_ratios is (1, 12, 24, 36). Default: (1, 6, 12, 18). aspp_out_channels (int, optional): The output channels of ASPP module. Default: 256. + decoder_channels (int, optional): The channels of Decoder. Default: 256. + low_level_channels_projects (list, opitonal). The channels of low level features to output. Defualt: None. align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. pretrained (str, optional): The path or url of pretrained model. Default: None. @@ -78,9 +80,11 @@ def __init__(self, def _upsample_predictions(self, pred, input_shape): """Upsamples final prediction, with special handling to offset. + Args: pred (dict): stores all output of the segmentation model. input_shape (tuple): spatial resolution of the desired shape. + Returns: result (OrderedDict): upsampled dictionary. """ @@ -132,8 +136,10 @@ class PanopticDeepLabHead(nn.Layer): backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. aspp_ratios (tuple): The dilation rates using in ASSP module. aspp_out_channels (int): The output channels of ASPP module. - align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature - is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + decoder_channels (int, optional): The channels of Decoder. Default: 256. + align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + low_level_channels_projects (list, opitonal). The channels of low level features to output. Defualt: None. """ def __init__(self, num_classes, backbone_indices, backbone_channels, @@ -223,6 +229,7 @@ class ASPPModule(nn.Layer): is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. use_sep_conv (bool, optional): If using separable conv in ASPP module. Default: False. image_pooling (bool, optional): If augmented with image-level features. Default: False + drop_rate (float, optional): The drop rate. Default: 0.1. """ def __init__(self, @@ -304,7 +311,6 @@ class SinglePanopticDeepLabDecoder(nn.Layer): The DeepLabV3PHead implementation based on PaddlePaddle. Args: - num_classes (int): The unique number of target classes. backbone_indices (tuple): Two values in the tuple indicate the indices of output of backbone. the first index will be taken as a low-level feature in Decoder component; the second one will be taken as input of ASPP component. @@ -315,8 +321,10 @@ class SinglePanopticDeepLabDecoder(nn.Layer): backbone_channels (tuple): The same length with "backbone_indices". It indicates the channels of corresponding index. aspp_ratios (tuple): The dilation rates using in ASSP module. aspp_out_channels (int): The output channels of ASPP module. + decoder_channels (int): The channels of decoder align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. + low_level_channels_projects (list). The channels of low level features to output. """ def __init__(self, backbone_indices, backbone_channels, aspp_ratios, @@ -391,7 +399,9 @@ class SinglePanopticDeepLabHead(nn.Layer): Args: num_classes (int): The number of classes. - in_channels (int): The number of input channels in decoder module. + decoder_channels (int): The channels of decoder. + head_channels (int): The channels of head. + class_key (list): The key name of output by classifier. """ def __init__(self, num_classes, decoder_channels, head_channels, class_key): diff --git a/contrib/PanopticDeepLab/transforms/target_transforms.py b/contrib/PanopticDeepLab/transforms/target_transforms.py index 9b914c297a..8479093762 100644 --- a/contrib/PanopticDeepLab/transforms/target_transforms.py +++ b/contrib/PanopticDeepLab/transforms/target_transforms.py @@ -21,16 +21,16 @@ class PanopticTargetGenerator(object): Annotation is assumed to have Cityscapes format. Args: - ignore_index: Integer, the ignore label for semantic segmentation. - rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + ignore_index (int): The ignore label for semantic segmentation. + rgb2id (Function): Function, panoptic label is encoded in a colored image, this function convert color to the corresponding panoptic label. - thing_list: List, a list of thing classes - sigma: the sigma for Gaussian kernel. - ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch. - small_instance_area: Integer, indicates largest area for small instances. - small_instance_weight: Integer, indicates semantic loss weights for small instances. - ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in semantic segmentation branch, - crowd region is ignored in the original TensorFlow implementation. + thing_list (list): A list of thing classes + sigma (int, optional): The sigma for Gaussian kernel. Default: 8. + ignore_stuff_in_offset (bool, optional): Whether to ignore stuff region when training the offset branch. Default: False. + small_instance_area (int, optional): Indicates largest area for small instances. Default: 0. + small_instance_weight (int, optional): Indicates semantic loss weights for small instances. Default: 1. + ignore_crowd_in_semantic (bool, optional): Whether to ignore crowd region in semantic segmentation branch, + crowd region is ignored in the original TensorFlow implementation. Default: False. """ def __init__(self, @@ -63,8 +63,8 @@ def __call__(self, panoptic, segments): reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 Args: - panoptic: numpy.array, colored image encoding panoptic label. - segments: List, a list of dictionary containing information of every segment, it has fields: + panoptic (np.ndarray): Colored image encoding panoptic label. + segments (list): A list of dictionary containing information of every segment, it has fields: - id: panoptic id, after decoding `panoptic`. - category_id: semantic class id. - area: segment area. @@ -187,8 +187,8 @@ class SemanticTargetGenerator(object): Annotation is assumed to have Cityscapes format. Args: - ignore_index: Integer, the ignore label for semantic segmentation. - rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + ignore_index (int): The ignore label for semantic segmentation. + rgb2id (function): Function, panoptic label is encoded in a colored image, this function convert color to the corresponding panoptic label. """ @@ -202,8 +202,8 @@ def __call__(self, panoptic, segments): reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18 Args: - panoptic: numpy.array, colored image encoding panoptic label. - segments: List, a list of dictionary containing information of every segment, it has fields: + panoptic (np.ndarray): Colored image encoding panoptic label. + segments (list): A list of dictionary containing information of every segment, it has fields: - id: panoptic id, after decoding `panoptic`. - category_id: semantic class id. - area: segment area. @@ -229,7 +229,7 @@ class InstanceTargetGenerator(object): Annotation is assumed to have Cityscapes format. Args: - rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + rgb2id (function): Function, panoptic label is encoded in a colored image, this function convert color to the corresponding panoptic label. """ @@ -240,7 +240,7 @@ def __call__(self, panoptic): """Generates the instance target. Args: - panoptic: numpy.array, colored image encoding panoptic label. + panoptic (np.ndarray): Colored image encoding panoptic label. Returns: A dictionary with fields: @@ -264,9 +264,10 @@ class RawPanopticTargetGenerator(object): 11000, 11001, ..., 18000, 18001, ignore_index(general 255). Args: - ignore_index: Integer, the ignore label for semantic segmentation. - rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the + ignore_index (int): The ignore label for semantic segmentation. + rgb2id (function): Function, panoptic label is encoded in a colored image, this function convert color to the corresponding panoptic label. + label_divisor(int, optional): An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id. Default: 1000. """ def __init__(self, ignore_index, rgb2id, label_divisor=1000): diff --git a/contrib/PanopticDeepLab/utils/evaluation/instance.py b/contrib/PanopticDeepLab/utils/evaluation/instance.py index c00cf89e87..97e27d6018 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/instance.py +++ b/contrib/PanopticDeepLab/utils/evaluation/instance.py @@ -44,8 +44,8 @@ class InstanceEvaluator(object): Args: num_classes (int): The unique number of target classes. Exclude background class, labeled 0 usually. - overlaps (float|list): The threshold of IoU. - thing_list (list|None): Thing class, only calculate AP for the thing class. + overlaps (float|list, optional): The threshold of IoU. Default: 0.5. + thing_list (list|None, optional): Thing class, only calculate AP for the thing class. Default: None. """ def __init__(self, num_classes, overlaps=0.5, thing_list=None): @@ -248,6 +248,7 @@ def get_instances(self, preds, gts, ignore_mask=None): Args: preds (list): Prediction of image. gts (list): Ground truth of image. + ignore_mask (np.ndarray, optional): Ignore mask. Default: None. Return: dict: pred_instances, the type is dict(list(dict))), e.g. {0: [{'pred_id':0, 'label':0', diff --git a/contrib/PanopticDeepLab/utils/evaluation/panoptic.py b/contrib/PanopticDeepLab/utils/evaluation/panoptic.py index 16666c6465..01fd6f75b9 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/panoptic.py +++ b/contrib/PanopticDeepLab/utils/evaluation/panoptic.py @@ -63,9 +63,9 @@ def pq_average(self, isthing=None, thing_list=None): Args: num_classes (int): number of classes. - isthing (bool|None): calculate average pq for thing class if isthing is True, - for stuff class if isthing is False and for all if isthing is None. Default: None. - thing_list (list|None): A list of thing class. It should be provided when isthing is equal to True or False + isthing (bool|None, optional): calculate average pq for thing class if isthing is True, + for stuff class if isthing is False and for all if isthing is None. Default: None. Default: None. + thing_list (list|None, optional): A list of thing class. It should be provided when isthing is equal to True or False. Default: None. """ pq, sq, rq, n = 0, 0, 0, 0 per_class_results = {} diff --git a/contrib/PanopticDeepLab/utils/evaluation/semantic.py b/contrib/PanopticDeepLab/utils/evaluation/semantic.py index 43f999442a..79a004124d 100644 --- a/contrib/PanopticDeepLab/utils/evaluation/semantic.py +++ b/contrib/PanopticDeepLab/utils/evaluation/semantic.py @@ -28,8 +28,8 @@ class SemanticEvaluator: Args: num_classes (int): number of classes - ignore_index (int): value in semantic segmentation ground truth. Predictions for the - corresponding pixels should be ignored. + ignore_index (int, optional): value in semantic segmentation ground truth. Predictions for the + corresponding pixels should be ignored. Default: 255. """ def __init__(self, num_classes, ignore_index=255): diff --git a/contrib/PanopticDeepLab/utils/visualize.py b/contrib/PanopticDeepLab/utils/visualize.py index fe88940210..6b14215c87 100644 --- a/contrib/PanopticDeepLab/utils/visualize.py +++ b/contrib/PanopticDeepLab/utils/visualize.py @@ -51,8 +51,8 @@ def random_color(rgb=False, maximum=255): Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L111 Args: - rgb (bool): whether to return RGB colors or BGR colors. - maximum (int): either 255 or 1 + rgb (bool, optional): whether to return RGB colors or BGR colors. Default: False. + maximum (int, optional): either 255 or 1. Default: 255. Returns: ndarray: a vector of 3 numbers @@ -100,7 +100,7 @@ def visualize_semantic(semantic, save_path, colormap, image=None, weight=0.5): colormap(np.ndarray): A color map for visualization. image(np.ndarray, optional): Origin image to prediction, merge semantic with image if provided. Default: None. - weight(float, optional): The image weight when merge semantic with image. Default: 0.6. + weight(float, optional): The image weight when merge semantic with image. Default: 0.5. """ semantic = semantic.astype('uint8') colored_semantic = colormap[semantic] @@ -120,7 +120,7 @@ def visualize_instance(instance, save_path, stuff_id=0, image=None, weight=0.5): stuff_id(int, optional): Id for background that not want to plot. image(np.ndarray, optional): Origin image to prediction, merge instance with image if provided. Default: None. - weight(float, optional): The image weight when merge instance with image. Default: 0.6. + weight(float, optional): The image weight when merge instance with image. Default: 0.5. """ # Add color map for instance segmentation result. ids = np.unique(instance) @@ -157,8 +157,8 @@ def visualize_panoptic(panoptic, colormap(np.ndarray): A color map for visualization. image(np.ndarray, optional): Origin image to prediction, merge panoptic with image if provided. Default: None. - weight(float, optional): The image weight when merge panoptic with image. Default: 0.6. - ignore_index(int, optional): Specifies a target value that is ignored. + weight(float, optional): The image weight when merge panoptic with image. Default: 0.5. + ignore_index(int, optional): Specifies a target value that is ignored. Default: 255. """ colored_panoptic = np.zeros((panoptic.shape[0], panoptic.shape[1], 3), dtype=np.uint8) From 4f5e3013eab36967c2b8ed3a8a41ce1272f3cd5c Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 19 May 2021 16:56:33 +0800 Subject: [PATCH 112/210] update copyright and annotations --- paddleseg/models/losses/l1_loss.py | 8 ++++++-- paddleseg/models/losses/mean_square_error_loss.py | 7 ++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/paddleseg/models/losses/l1_loss.py b/paddleseg/models/losses/l1_loss.py index 5fbbae2880..f0f58454b8 100644 --- a/paddleseg/models/losses/l1_loss.py +++ b/paddleseg/models/losses/l1_loss.py @@ -1,3 +1,6 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # @@ -30,14 +33,15 @@ class L1Loss(nn.MSELoss): If `reduction` set to ``'sum'``, the loss is: .. math:: Out = SUM(\lvert input - label\rvert) - Parameters: + + Args: reduction (str, optional): Indicate the reduction to apply to the loss, the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. If `reduction` is ``'none'``, the unreduced loss is returned; If `reduction` is ``'mean'``, the reduced mean loss is returned. If `reduction` is ``'sum'``, the reduced sum loss is returned. Default is ``'mean'``. - name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: 255. Shape: input (Tensor): The input tensor. The shapes is [N, *], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64. label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64, int32, int64. diff --git a/paddleseg/models/losses/mean_square_error_loss.py b/paddleseg/models/losses/mean_square_error_loss.py index fa66c9c5f3..e6fc8918c2 100644 --- a/paddleseg/models/losses/mean_square_error_loss.py +++ b/paddleseg/models/losses/mean_square_error_loss.py @@ -1,3 +1,6 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # @@ -31,13 +34,15 @@ class MSELoss(nn.MSELoss): .. math:: Out = \operatorname{sum}((input - label)^2) where `input` and `label` are `float32` tensors of same shape. - Parameters: + + Args: reduction (string, optional): The reduction method for the output, could be 'none' | 'mean' | 'sum'. If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned. If :attr:`reduction` is ``'none'``, the unreduced loss is returned. Default is ``'mean'``. + ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: 255. Shape: input (Tensor): Input tensor, the data type is float32 or float64 label (Tensor): Label tensor, the data type is float32 or float64 From 42c248ce3511a61bbb4b0b3ffa6d25b90f488ff4 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 4 Jun 2021 20:14:01 +0800 Subject: [PATCH 113/210] first add matting --- contrib/matting/core/__init__.py | 1 + contrib/matting/core/train.py | 301 ++++++++++++++++++ contrib/matting/dataset.py | 128 ++++++++ contrib/matting/model/__init__.py | 17 + contrib/matting/model/dim.py | 153 +++++++++ contrib/matting/model/loss.py | 47 +++ contrib/matting/model/vgg.py | 168 ++++++++++ contrib/matting/tools/gen_dataset/gen_bg.py | 107 +++++++ .../matting/tools/gen_dataset/gen_dataset.py | 224 +++++++++++++ .../matting/tools/gen_dataset/gen_fg_alpha.py | 58 ++++ contrib/matting/tools/update_vgg16_params.py | 57 ++++ contrib/matting/train.py | 84 +++++ contrib/matting/transforms.py | 187 +++++++++++ contrib/matting/utils.py | 24 ++ 14 files changed, 1556 insertions(+) create mode 100644 contrib/matting/core/__init__.py create mode 100644 contrib/matting/core/train.py create mode 100644 contrib/matting/dataset.py create mode 100644 contrib/matting/model/__init__.py create mode 100644 contrib/matting/model/dim.py create mode 100644 contrib/matting/model/loss.py create mode 100644 contrib/matting/model/vgg.py create mode 100644 contrib/matting/tools/gen_dataset/gen_bg.py create mode 100644 contrib/matting/tools/gen_dataset/gen_dataset.py create mode 100644 contrib/matting/tools/gen_dataset/gen_fg_alpha.py create mode 100644 contrib/matting/tools/update_vgg16_params.py create mode 100644 contrib/matting/train.py create mode 100644 contrib/matting/transforms.py create mode 100644 contrib/matting/utils.py diff --git a/contrib/matting/core/__init__.py b/contrib/matting/core/__init__.py new file mode 100644 index 0000000000..6e3eb12999 --- /dev/null +++ b/contrib/matting/core/__init__.py @@ -0,0 +1 @@ +from .train import train diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py new file mode 100644 index 0000000000..af7ccaba3c --- /dev/null +++ b/contrib/matting/core/train.py @@ -0,0 +1,301 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time +from collections import deque +import shutil + +import paddle +import paddle.nn.functional as F +from paddleseg.utils import TimeAverager, calculate_eta, resume, logger + +# from core.val import evaluate + + +def loss_computation(logit_dict, label_dict, losses): + """ + Acoording the losses to select logit and label + """ + loss_list = [] + # raw alpha + mask = label_dict['trimap'] == 128 + alpha_raw_loss = losses['types'][0](logit_dict['alpha_raw'], + label_dict['alpha'] / 255, mask) + alpha_raw_loss = losses['coef'][0] * alpha_raw_loss + loss_list.append(alpha_raw_loss) + + # comp loss + comp_pred = logit_dict['alpha_raw'] * label_dict['fg'] + ( + 1 - logit_dict['alpha_raw']) * label_dict['bg'] + comp_loss = losses['types'][2](comp_pred, label_dict['img'], mask) + comp_loss = losses['coef'][2] * comp_loss + loss_list.append(comp_loss) + + # pred alpha + alpha_pred_loss = losses['types'][2](logit_dict['alpha_pred'], + label_dict['alpha'] / 255, mask) + alpha_pred_loss = losses['coef'][2] * alpha_pred_loss + loss_list.append(alpha_pred_loss) + + return loss_list + + +def train(model, + train_dataset, + val_dataset=None, + optimizer=None, + save_dir='output', + iters=10000, + batch_size=2, + resume_model=None, + save_interval=1000, + log_iters=10, + num_workers=0, + use_vdl=False, + losses=None, + keep_checkpoint_max=5): + """ + Launch training. + + Args: + model(nn.Layer): A sementic segmentation model. + train_dataset (paddle.io.Dataset): Used to read and process training datasets. + val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. + optimizer (paddle.optimizer.Optimizer): The optimizer. + save_dir (str, optional): The directory for saving the model snapshot. Default: 'output'. + iters (int, optional): How may iters to train the model. Defualt: 10000. + batch_size (int, optional): Mini batch size of one gpu or cpu. Default: 2. + resume_model (str, optional): The path of resume model. + save_interval (int, optional): How many iters to save a model snapshot once during training. Default: 1000. + log_iters (int, optional): Display logging information at every log_iters. Default: 10. + num_workers (int, optional): Num workers for data loader. Default: 0. + use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False. + losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). + The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. + keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. + """ + model.train() + nranks = paddle.distributed.ParallelEnv().nranks + local_rank = paddle.distributed.ParallelEnv().local_rank + + start_iter = 0 + if resume_model is not None: + start_iter = resume(model, optimizer, resume_model) + + if not os.path.isdir(save_dir): + if os.path.exists(save_dir): + os.remove(save_dir) + os.makedirs(save_dir) + + if nranks > 1: + # Initialize parallel environment if not done. + if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( + ): + paddle.distributed.init_parallel_env() + ddp_model = paddle.DataParallel(model) + else: + ddp_model = paddle.DataParallel(model) + + batch_sampler = paddle.io.DistributedBatchSampler( + train_dataset, batch_size=batch_size, shuffle=True, drop_last=True) + + loader = paddle.io.DataLoader( + train_dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + return_list=True, + ) + + if use_vdl: + from visualdl import LogWriter + log_writer = LogWriter(save_dir) + + avg_loss = 0.0 + avg_loss_list = [] + iters_per_epoch = len(batch_sampler) + # best_pq = -1.0 + # best_model_iter = -1 + reader_cost_averager = TimeAverager() + batch_cost_averager = TimeAverager() + save_models = deque() + batch_start = time.time() + + iter = start_iter + while iter < iters: + for data in loader: + iter += 1 + if iter > iters: + break + reader_cost_averager.record(time.time() - batch_start) + + # model input + if nranks > 1: + logit_dict = ddp_model(data) + else: + logit_dict = model(data) + + # 获取logit_dict, label_dict + loss_list = loss_computation(logit_dict, data, losses) + loss = sum(loss_list) + loss.backward() + + optimizer.step() + lr = optimizer.get_lr() + if isinstance(optimizer._learning_rate, + paddle.optimizer.lr.LRScheduler): + optimizer._learning_rate.step() + model.clear_gradients() + avg_loss += loss.numpy()[0] + if not avg_loss_list: + avg_loss_list = [l.numpy() for l in loss_list] + else: + for i in range(len(loss_list)): + avg_loss_list[i] += loss_list[i].numpy() + batch_cost_averager.record( + time.time() - batch_start, num_samples=batch_size) + + if (iter) % log_iters == 0 and local_rank == 0: + avg_loss /= log_iters + avg_loss_list = [l[0] / log_iters for l in avg_loss_list] + remain_iters = iters - iter + avg_train_batch_cost = batch_cost_averager.get_average() + avg_train_reader_cost = reader_cost_averager.get_average() + eta = calculate_eta(remain_iters, avg_train_batch_cost) + logger.info( + "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" + .format((iter - 1) // iters_per_epoch + 1, iter, iters, + avg_loss, lr, avg_train_batch_cost, + avg_train_reader_cost, + batch_cost_averager.get_ips_average(), eta)) + logger.info( + "[LOSS] loss={:.4f}, alpha_raw_loss={:.4f}, alpha_pred_loss={:.4f}," + .format(avg_loss, avg_loss_list[0], avg_loss_list[1])) + if use_vdl: + log_writer.add_scalar('Train/loss', avg_loss, iter) + # Record all losses if there are more than 2 losses. + if len(avg_loss_list) > 1: + avg_loss_dict = {} + for i, value in enumerate(avg_loss_list): + avg_loss_dict['loss_' + str(i)] = value + for key, value in avg_loss_dict.items(): + log_tag = 'Train/' + key + log_writer.add_scalar(log_tag, value, iter) + + log_writer.add_scalar('Train/lr', lr, iter) + log_writer.add_scalar('Train/batch_cost', + avg_train_batch_cost, iter) + log_writer.add_scalar('Train/reader_cost', + avg_train_reader_cost, iter) + + # 增加图片和alpha的显示 + ori_img = data['img'][0] + ori_img = paddle.transpose(ori_img, [1, 2, 0]) + ori_img = (ori_img * 0.5 + 0.5) * 255 + + alpha = (data['alpha'][0]).unsqueeze(-1) + trimap = (data['trimap'][0]).unsqueeze(-1) + + alpha_raw = (logit_dict['alpha_raw'][0] * 255).transpose( + [1, 2, 0]) + alpha_pred = (logit_dict['alpha_pred'][0] * 255).transpose( + [1, 2, 0]) + + log_writer.add_image( + tag='ground truth/ori_img', + img=ori_img.numpy(), + step=iter) + log_writer.add_image( + tag='ground truth/alpha', img=alpha.numpy(), step=iter) + log_writer.add_image( + tag='ground truth/trimap', + img=trimap.numpy(), + step=iter) + log_writer.add_image( + tag='prediction/alpha_raw', + img=alpha_raw.numpy(), + step=iter) + log_writer.add_image( + tag='prediction/alpha_pred', + img=alpha_pred.numpy(), + step=iter) + + avg_loss = 0.0 + avg_loss_list = [] + reader_cost_averager.reset() + batch_cost_averager.reset() + + # save model + if (iter % save_interval == 0 or iter == iters) and local_rank == 0: + current_save_dir = os.path.join(save_dir, + "iter_{}".format(iter)) + if not os.path.isdir(current_save_dir): + os.makedirs(current_save_dir) + paddle.save(model.state_dict(), + os.path.join(current_save_dir, 'model.pdparams')) + paddle.save(optimizer.state_dict(), + os.path.join(current_save_dir, 'model.pdopt')) + save_models.append(current_save_dir) + if len(save_models) > keep_checkpoint_max > 0: + model_to_remove = save_models.popleft() + shutil.rmtree(model_to_remove) + """ + # eval model + if (iter % save_interval == 0 or iter == iters) and ( + val_dataset is + not None) and local_rank == 0 and iter > iters // 2: + num_workers = 1 if num_workers > 0 else 0 + panoptic_results, semantic_results, instance_results = evaluate( + model, + val_dataset, + threshold=threshold, + nms_kernel=nms_kernel, + top_k=top_k, + num_workers=num_workers, + print_detail=False) + pq = panoptic_results['pan_seg']['All']['pq'] + miou = semantic_results['sem_seg']['mIoU'] + map = instance_results['ins_seg']['mAP'] + map50 = instance_results['ins_seg']['mAP50'] + logger.info( + "[EVAL] PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}" + .format(pq, miou, map, map50)) + model.train() + + # save best model and add evaluate results to vdl + if (iter % save_interval == 0 or iter == iters) and local_rank == 0: + if val_dataset is not None and iter > iters // 2: + if pq > best_pq: + best_pq = pq + best_model_iter = iter + best_model_dir = os.path.join(save_dir, "best_model") + paddle.save( + model.state_dict(), + os.path.join(best_model_dir, 'model.pdparams')) + logger.info( + '[EVAL] The model with the best validation pq ({:.4f}) was saved at iter {}.' + .format(best_pq, best_model_iter)) + + if use_vdl: + log_writer.add_scalar('Evaluate/PQ', pq, iter) + log_writer.add_scalar('Evaluate/mIoU', miou, iter) + log_writer.add_scalar('Evaluate/mAP', map, iter) + log_writer.add_scalar('Evaluate/mAP50', map50, iter) + """ + batch_start = time.time() + + # Sleep for half a second to let dataloader release resources. + time.sleep(0.5) + if use_vdl: + log_writer.close() diff --git a/contrib/matting/dataset.py b/contrib/matting/dataset.py new file mode 100644 index 0000000000..e6371f7448 --- /dev/null +++ b/contrib/matting/dataset.py @@ -0,0 +1,128 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import cv2 +import numpy as np +import random +import paddle + +from utils import get_files +import transforms as T + + +def gen_trimap(alpha): + k_size = random.choice(range(2, 5)) + iterations = np.random.randint(5, 15) + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size)) + dilated = cv2.dilate(alpha, kernel, iterations=iterations) + eroded = cv2.erode(alpha, kernel, iterations=iterations) + trimap = np.zeros(alpha.shape) + trimap.fill(128) + trimap[eroded >= 255] = 255 + trimap[dilated <= 0] = 0 + + return trimap + + +class Dataset(paddle.io.Dataset): + def __init__(self): + self.png_file = '/mnt/chenguowei01/datasets/matting/PhotoMatte85/0051115Q_000001_0041.png' + self.background = np.zeros((3, 320, 320), dtype='float32') + self.background[1, :, :] = 255 + + def __getitem__(self, idx): + img_png = cv2.imread(self.png_file, cv2.IMREAD_UNCHANGED) + img_png = cv2.resize(img_png, (320, 320)) + img_png = np.transpose(img_png, [2, 0, 1]) + alpha = img_png[-1, :, :].astype('float32') + + img = img_png[:-1, :, :].astype('float32') + img = img[::-1, :, :] + img = (img / 255 - 0.5) / 0.5 + # img = (alpha/255) * img + (1-alpha/255) * self.background + + trimap = gen_trimap(alpha).astype('float32') + + return img, alpha, trimap + + def __len__(self): + return 1000 + + +class HumanDataset(paddle.io.Dataset): + def __init__( + self, + dataset_root, + transforms, + mode='train', + ): + super().__init__() + self.dataset_root = dataset_root + self.transforms = T.Compose(transforms) + + img_dir = os.path.join(dataset_root, mode, 'image') + self.img_list = get_files(img_dir) # a list + self.alpha_list = [f.replace('image', 'alpha') for f in self.img_list] + self.fg_list = [f.replace('image', 'fg') for f in self.img_list] + self.bg_list = [f.replace('image', 'bg') for f in self.img_list] + + def __getitem__(self, idx): + data = {} + data['img'] = self.img_list[idx] + data['alpha'] = self.alpha_list[idx] + data['fg'] = self.fg_list[idx] + data['bg'] = self.bg_list[idx] + data['gt_fields'] = ['alpha', 'fg', 'bg'] + + data = self.transforms(data) + data['img'] = data['img'].astype('float32') + for key in data.get('gt_fields', []): + data[key] = data[key].astype('float32') + data['trimap'] = gen_trimap(data['alpha']).astype('float32') + + return data + + def __len__(self): + return len(self.img_list) + + @property + def gen_trimap(alpha): + k_size = random.choice(range(2, 5)) + iterations = np.random.randint(5, 15) + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size)) + dilated = cv2.dilate(alpha, kernel, iterations=iterations) + eroded = cv2.erode(alpha, kernel, iterations=iterations) + trimap = np.zeros(alpha.shape) + trimap.fill(128) + trimap[eroded >= 255] = 255 + trimap[dilated <= 0] = 0 + + return trimap + + +if __name__ == '__main__': + t = [T.LoadImages(), T.Resize(), T.Normalize()] + train_dataset = HumanDataset( + dataset_root='/mnt/chenguowei01/datasets/matting/human_matting/', + transforms=t, + mode='val') + print(train_dataset.img_list[0], len(train_dataset.img_list), + len(train_dataset.alpha_list), len(train_dataset.fg_list), + len(train_dataset.bg_list)) + data = train_dataset[0] + print(np.min(data['img']), np.max(data['img'])) + print(data['img'].shape, data['fg'].shape, data['bg'].shape, + data['alpha'].shape, data['trimap'].shape) diff --git a/contrib/matting/model/__init__.py b/contrib/matting/model/__init__.py new file mode 100644 index 0000000000..99a5ffe53e --- /dev/null +++ b/contrib/matting/model/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .vgg import * +from .dim import DIM +from .loss import MRSD diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py new file mode 100644 index 0000000000..f42065b8ac --- /dev/null +++ b/contrib/matting/model/dim.py @@ -0,0 +1,153 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.models import layers +from paddleseg import utils +from paddleseg.cvlibs import manager + + +@manager.MODELS.add_component +class DIM(nn.Layer): + """ + The DIM implementation based on PaddlePaddle. + + The original article refers to + Ning Xu, et, al. "Deep Image Matting" + (https://arxiv.org/pdf/1908.07919.pdf). + + Args: + + + """ + + def __init__(self, backbone, backbone_indices=(-1, ), pretrained=None): + super().__init__() + self.backbone = backbone + self.backbone_indices = backbone_indices + self.pretrained = pretrained + + self.decoder = Decoder(input_channels=512) + self.refine = Refine() + self.init_weight() + + def forward(self, inputs): + x = paddle.concat([inputs['img'], inputs['trimap'].unsqueeze(1)], + axis=1) + fea_list, ids_list = self.backbone(x) + + # decoder stage + up_shape = [] + up_shape.append(x.shape[-2:]) + for i in range(4): + up_shape.append(fea_list[i].shape[-2:]) + alpha_raw = self.decoder(fea_list[self.backbone_indices[0]], up_shape) + + # refine stage + alpha_raw_ = alpha_raw * 255 + refine_input = paddle.concat([x[:, :3, :, :], alpha_raw_], axis=1) + alpha_refine = self.refine(refine_input) + + # finally alpha + alpha_pred = alpha_refine + alpha_raw + alpha_pred = paddle.clip(alpha_pred, min=0, max=1) + + logit_dict = {'alpha_pred': alpha_pred, 'alpha_raw': alpha_raw} + return logit_dict + + def init_weight(self): + if self.pretrained is not None: + utils.load_entire_model(self, self.pretrained) + + +class Up(nn.Layer): + def __init__(self, input_channels, output_channels): + super().__init__() + self.conv = layers.ConvBNReLU( + input_channels, + output_channels, + kernel_size=5, + padding=2, + bias_attr=False) + + def forward(self, x, output_shape): + x = F.interpolate( + x, size=output_shape, mode='bilinear', align_corners=False) + x = self.conv(x) + return x + + +class Decoder(nn.Layer): + def __init__(self, input_channels): + super().__init__() + self.deconv6 = nn.Conv2D( + input_channels, 512, kernel_size=1, bias_attr=False) + self.deconv5 = Up(512, 512) + self.deconv4 = Up(512, 256) + self.deconv3 = Up(256, 128) + self.deconv2 = Up(128, 64) + self.deconv1 = Up(64, 64) + + self.alpha_conv = nn.Conv2D( + 64, 1, kernel_size=5, padding=2, bias_attr=False) + + def forward(self, x, shape_list): + x = self.deconv6(x) + x = self.deconv5(x, shape_list[4]) + x = self.deconv4(x, shape_list[3]) + x = self.deconv3(x, shape_list[2]) + x = self.deconv2(x, shape_list[1]) + x = self.deconv1(x, shape_list[0]) + alpha = self.alpha_conv(x) + alpha = F.sigmoid(alpha) + + return alpha + + +class Refine(nn.Layer): + def __init__(self): + super().__init__() + self.conv1 = layers.ConvBNReLU( + 4, 64, kernel_size=3, padding=1, bias_attr=False) + self.conv2 = layers.ConvBNReLU( + 64, 64, kernel_size=3, padding=1, bias_attr=False) + self.conv3 = layers.ConvBNReLU( + 64, 64, kernel_size=3, padding=1, bias_attr=False) + self.alpha_pred = layers.ConvBNReLU( + 64, 1, kernel_size=3, padding=1, bias_attr=False) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.conv3(x) + alpha = self.alpha_pred(x) + alpha = F.sigmoid(alpha) + + return alpha + + +if __name__ == "__main__": + from vgg import VGG16 + backbone = VGG16(input_channels=4) + model = DIM(backbone=backbone) + + model_input = paddle.randint(0, 256, (1, 4, 320, 320)).astype('float32') + alpha_pred, alpha_raw = model(model_input) + + print(model) + + print(alpha_pred.shape, alpha_raw.shape) diff --git a/contrib/matting/model/loss.py b/contrib/matting/model/loss.py new file mode 100644 index 0000000000..c85eceaad0 --- /dev/null +++ b/contrib/matting/model/loss.py @@ -0,0 +1,47 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager + + +@manager.LOSSES.add_component +class MRSD(nn.Layer): + def __init__(self, eps=1e-6): + super().__init__() + self.eps = eps + + def forward(self, logit, label, mask=None): + """ + Forward computation. + + Args: + logit (Tensor): Logit tensor, the data type is float32, float64. + label (Tensor): Label tensor, the data type is float32, float64. The shape should equal to logit. + mask (Tensor): The mask where the loss valid. + """ + sd = paddle.square(logit - label) + loss = paddle.sqrt(sd + self.eps) + mask = mask.astype('float32') + if len(mask.shape) == 3: + mask = mask.unsqueeze(1) + loss = loss * mask + loss = loss.sum() / (mask.sum() + self.eps) + + mask.stop_gradient = True + + return loss diff --git a/contrib/matting/model/vgg.py b/contrib/matting/model/vgg.py new file mode 100644 index 0000000000..05fca07658 --- /dev/null +++ b/contrib/matting/model/vgg.py @@ -0,0 +1,168 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D + +from paddleseg.cvlibs import manager +from paddleseg.utils import utils + + +class ConvBlock(nn.Layer): + def __init__(self, input_channels, output_channels, groups, name=None): + super(ConvBlock, self).__init__() + + self.groups = groups + self._conv_1 = Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + weight_attr=ParamAttr(name=name + "1_weights"), + bias_attr=False) + if groups == 2 or groups == 3 or groups == 4: + self._conv_2 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + weight_attr=ParamAttr(name=name + "2_weights"), + bias_attr=False) + if groups == 3 or groups == 4: + self._conv_3 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + weight_attr=ParamAttr(name=name + "3_weights"), + bias_attr=False) + if groups == 4: + self._conv_4 = Conv2D( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + stride=1, + padding=1, + weight_attr=ParamAttr(name=name + "4_weights"), + bias_attr=False) + + self._pool = MaxPool2D( + kernel_size=2, stride=2, padding=0, return_mask=True) + + def forward(self, inputs): + x = self._conv_1(inputs) + x = F.relu(x) + if self.groups == 2 or self.groups == 3 or self.groups == 4: + x = self._conv_2(x) + x = F.relu(x) + if self.groups == 3 or self.groups == 4: + x = self._conv_3(x) + x = F.relu(x) + if self.groups == 4: + x = self._conv_4(x) + x = F.relu(x) + x, max_indices = self._pool(x) + return x, max_indices + + +class VGGNet(nn.Layer): + def __init__(self, input_channels=3, layers=11, pretrained=None): + super(VGGNet, self).__init__() + self.pretrained = pretrained + + self.layers = layers + self.vgg_configure = { + 11: [1, 1, 2, 2, 2], + 13: [2, 2, 2, 2, 2], + 16: [2, 2, 3, 3, 3], + 19: [2, 2, 4, 4, 4] + } + assert self.layers in self.vgg_configure.keys(), \ + "supported layers are {} but input layer is {}".format( + self.vgg_configure.keys(), layers) + self.groups = self.vgg_configure[self.layers] + + # matting的第一层卷积输入为4通道,初始化是直接初始化为0 + self._conv_block_1 = ConvBlock( + input_channels, 64, self.groups[0], name="conv1_") + self._conv_block_2 = ConvBlock(64, 128, self.groups[1], name="conv2_") + self._conv_block_3 = ConvBlock(128, 256, self.groups[2], name="conv3_") + self._conv_block_4 = ConvBlock(256, 512, self.groups[3], name="conv4_") + self._conv_block_5 = ConvBlock(512, 512, self.groups[4], name="conv5_") + + # 这一层的初始化需要利用vgg fc6的参数转换后进行初始化,可以暂时不考虑初始化 + self._conv_6 = Conv2D( + 512, 512, kernel_size=3, padding=1, bias_attr=False) + + self.init_weight() + + def forward(self, inputs): + fea_list = [] + ids_list = [] + x, ids = self._conv_block_1(inputs) + fea_list.append(x) + ids_list.append(x) + x, ids = self._conv_block_2(x) + fea_list.append(x) + ids_list.append(ids) + x, ids = self._conv_block_3(x) + fea_list.append(x) + ids_list.append(ids) + x, ids = self._conv_block_4(x) + fea_list.append(x) + ids_list.append(ids) + x, ids = self._conv_block_5(x) + fea_list.append(x) + ids_list.append(ids) + x = F.relu(self._conv_6(x)) + fea_list.append(x) + return fea_list, ids_list + + def init_weight(self): + if self.pretrained is not None: + # 初始化需要特殊操作一下 + # self.load_pretrained_model(self.pretrained) + # 在那时不利用fc14进行初始化 + utils.load_pretrained_model(self, self.pretrained) + + +@manager.BACKBONES.add_component +def VGG11(**args): + model = VGGNet(layers=11, **args) + return model + + +@manager.BACKBONES.add_component +def VGG13(**args): + model = VGGNet(layers=13, **args) + return model + + +@manager.BACKBONES.add_component +def VGG16(**args): + model = VGGNet(layers=16, **args) + return model + + +@manager.BACKBONES.add_component +def VGG19(**args): + model = VGGNet(layers=19, **args) + return model diff --git a/contrib/matting/tools/gen_dataset/gen_bg.py b/contrib/matting/tools/gen_dataset/gen_bg.py new file mode 100644 index 0000000000..14ba895ef6 --- /dev/null +++ b/contrib/matting/tools/gen_dataset/gen_bg.py @@ -0,0 +1,107 @@ +import os +import shutil +from multiprocessing import Pool +from functools import partial + +import cv2 +import numpy as np +from tqdm import tqdm +""" +Get background from MSCOCO_17 and PascalVOC12 and exclude The images with person. +""" + + +def get_bg_from_pascal_voc( + data_path='/mnt/chenguowei01/datasets/VOCdevkit/VOC2012', + save_path='bg/pascal_val12'): + """ + extract background + """ + person_train_txt = os.path.join(data_path, + "ImageSets/Main/person_train.txt") + train_save_path = os.path.join(save_path, 'train') + person_val_txt = os.path.join(data_path, "ImageSets/Main/person_val.txt") + val_save_path = os.path.join(save_path, 'val') + if not os.path.exists(train_save_path): + os.makedirs(train_save_path) + if not os.path.exists(val_save_path): + os.makedirs(val_save_path) + + # training dataset + f = open(person_train_txt, 'r') + train_images = f.read().splitlines() + f.close() + print('there are {} images in training dataset.'.format(len(train_images))) + num = 0 + for line in train_images: + image_name, id = line.split() + if id == '-1': + num += 1 + ori_img = os.path.join(data_path, 'JPEGImages', image_name + '.jpg') + shutil.copy(ori_img, train_save_path) + print('there are {} images without person in the training dataset'.format( + num)) + + # val dataset + f = open(person_val_txt, 'r') + val_images = f.read().splitlines() + f.close() + print('there are {} images in val dataset.'.format(len(val_images))) + num = 0 + for line in val_images: + image_name, id = line.split() + if id == '-1': + num += 1 + ori_img = os.path.join(data_path, 'JPEGImages', image_name + '.jpg') + shutil.copy(ori_img, val_save_path) + print('there are {} images without person in the val dataset'.format(num)) + + +def cp(line, data_path, save_path): + image_name, anno_name = line.split('|') + anno = cv2.imread(os.path.join(data_path, anno_name), cv2.IMREAD_UNCHANGED) + classes = np.unique(anno) + if 0 not in classes: + shutil.copy(os.path.join(data_path, image_name), save_path) + + +def get_bg_from_coco_17(data_path='/mnt/chenguowei01/datasets/coco_17', + save_path='bg/coco_17'): + train_txt = os.path.join(data_path, 'train2017.txt') + train_save_path = os.path.join(save_path, 'train') + val_txt = os.path.join(data_path, 'val2017.txt') + val_save_path = os.path.join(save_path, 'val') + if not os.path.exists(train_save_path): + os.makedirs(train_save_path) + if not os.path.exists(val_save_path): + os.makedirs(val_save_path) + + # training dataset + partial_train_cp = partial( + cp, data_path=data_path, save_path=train_save_path) + with open(train_txt, 'r') as f: + train_list = f.read().splitlines() + max_ = len(train_list) + with Pool(40) as pool: + with tqdm(total=max_) as pbar: + for i, _ in tqdm( + enumerate( + pool.imap_unordered(partial_train_cp, train_list))): + pbar.update() + + # val dataset + partial_val_cp = partial(cp, data_path=data_path, save_path=val_save_path) + with open(val_txt, 'r') as f: + val_list = f.read().splitlines() + max_ = len(val_list) + with Pool(40) as pool: + with tqdm(total=max_) as pbar: + for i, _ in tqdm( + enumerate(pool.imap_unordered(partial_val_cp, val_list))): + pbar.update() + + +if __name__ == "__main__": + # get_bg_from_pascal_voc(save_path="/mnt/chenguowei01/datasets/matting/gather/bg/pascal_voc12") + get_bg_from_coco_17( + save_path="/mnt/chenguowei01/datasets/matting/gather/bg/coco_17") diff --git a/contrib/matting/tools/gen_dataset/gen_dataset.py b/contrib/matting/tools/gen_dataset/gen_dataset.py new file mode 100644 index 0000000000..95086ccaf4 --- /dev/null +++ b/contrib/matting/tools/gen_dataset/gen_dataset.py @@ -0,0 +1,224 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +to generator the human matting dataset. The directory is as follow: + human_matting + train + image + 000001.png + ... + fg + 000001.png + ... + bg + 000001.png + ... + alpha + 000001.png + .... + trimap + 000001.png + val + image + 000001.png + ... + fg + 000001.png + ... + bg + 000001.png + ... + alpha + 000001.png + .... + trimap + 000001.png + ... +For video, get one every 5 frames, and composite it with one background. +For image, one image is composited with 5 background. +""" + +import os +import math +import time + +import cv2 +import numpy as np +from multiprocessing import Pool +from tqdm import tqdm + + +def get_files(root_path): + res = [] + for root, dirs, files in os.walk(root_path, followlinks=True): + for f in files: + if f.endswith(('.jpg', '.png', '.jpeg', 'JPG', '.mp4')): + res.append(os.path.join(root, f)) + return res + + +ori_dataset_root = "/mnt/chenguowei01/datasets/matting/gather" +ori_fg_path = os.path.join(ori_dataset_root, 'fg') +ori_alpha_path = os.path.join(ori_dataset_root, 'alpha') +ori_bg_path = os.path.join(ori_dataset_root, 'bg') + +fg_list = get_files(ori_fg_path) +alpha_list = [f.replace('fg', 'alpha') for f in fg_list] +bg_list = get_files(ori_bg_path) +len_bg_list = len(bg_list) + +dataset_root = '/ssd3/chenguowei01/datasets/matting/human_matting' + + +def im_write(save_path, img): + dir_name = os.path.dirname(save_path) + if not os.path.exists(dir_name): + os.makedirs(dir_name) + cv2.imwrite(save_path, img) + + +def composite(fg, alpha, ori_bg): + fg_h, fg_w = fg.shape[:2] + ori_bg_h, ori_bg_w = ori_bg.shape[:2] + + wratio = fg_w / ori_bg_w + hratio = fg_h / ori_bg_h + ratio = wratio if wratio > hratio else hratio + + # Resize ori_bg if it is smaller than fg. + if ratio > 1: + resize_h = math.ceil(ori_bg_h * ratio) + resize_w = math.ceil(ori_bg_w * ratio) + bg = cv2.resize( + ori_bg, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR) + + bg = bg[0:fg_h, 0:fg_w, :] + alpha = alpha / 255 + alpha = np.expand_dims(alpha, axis=2) + image = alpha * fg + (1 - alpha) * bg + image = image.astype(np.uint8) + return image, bg + + +def video_comp(fg_file, alpha_file, bg_index_list, interval=5, mode='train'): + fg_video_capture = cv2.VideoCapture(fg_file) + alpha_video_capture = cv2.VideoCapture(alpha_file) + frames = fg_video_capture.get(cv2.CAP_PROP_FRAME_COUNT) + print("there are {} frames in video {}".format(frames, fg_file)) + + f_index = 0 + while True: + if f_index >= frames: + break + fg_video_capture.set(cv2.CAP_PROP_POS_FRAMES, f_index) + fg_ret, fg_frame = fg_video_capture.retrieve() # get foreground + alpha_video_capture.set(cv2.CAP_PROP_POS_FRAMES, f_index) + alpha_ret, alpha_frame = alpha_video_capture.retrieve() # get alpha + ret = fg_ret and alpha_ret + if not ret: + break + if len(alpha_frame.shape) == 3: + alpha_frame = alpha_frame[:, :, 0] + + file_name = os.path.basename(fg_file) + file_name = os.path.splitext(file_name)[0] + fg_save_name = os.path.join(dataset_root, mode, 'fg', file_name, + '{:0>5d}'.format(f_index) + '.png') + alpha_save_name = fg_save_name.replace('fg', 'alpha') + bg_save_name = fg_save_name.replace('fg', 'bg') + image_save_name = fg_save_name.replace('fg', 'image') + + ori_bg = cv2.imread( + bg_list[bg_index_list[f_index % len_bg_list]]) # get background + image, bg = composite( + fg_frame, alpha_frame, + ori_bg) # get composition image and the response background + + # save fg, alpha, bg, image + im_write(fg_save_name, fg_frame) + im_write(alpha_save_name, alpha_frame) + im_write(image_save_name, image) + im_write(bg_save_name, bg) + + f_index += interval + + +def image_comp(fg_file, alpha_file, bg_index_list, num_bgs=5, mode='train'): + fg = cv2.imread(fg_file) + alpha = cv2.imread(alpha_file, cv2.IMREAD_UNCHANGED) + print('Composition for ', fg_file) + + for i in range(num_bgs): + bg_index = bg_index_list[i] + ori_bg = cv2.imread(bg_list[bg_index]) # get background + image, bg = composite(fg, alpha, ori_bg) + + file_name = os.path.basename(fg_file) + file_name = os.path.splitext(file_name)[0] + file_name = '_'.join([file_name, '{:0>3d}'.format(i)]) + fg_save_name = os.path.join(dataset_root, mode, 'fg', + file_name + '.png') + alpha_save_name = fg_save_name.replace('fg', 'alpha') + bg_save_name = fg_save_name.replace('fg', 'bg') + image_save_name = fg_save_name.replace('fg', 'image') + + im_write(fg_save_name, fg) + im_write(alpha_save_name, alpha) + im_write(image_save_name, image) + im_write(bg_save_name, bg) + + +def comp_one(fa_index): + """ + Composite foreground and background. + + Args: + fa_index: The index of foreground and alpha. + bg_index: The index of background, if foreground is video, get one every 5 frames, and composite it with one background, + if foreground is image, one image is composited with 5 background. + """ + fg_file = fg_list[fa_index] + alpha_file = alpha_list[fa_index] + mode = 'train' if 'train' in fg_file else 'val' + + # Randomly bg index + np.random.seed(int(os.getpid() * time.time()) % + (2**30)) # make different for each process + + len_bg = len(bg_list) + bg_index_list = list(range(len_bg)) + np.random.shuffle(bg_index_list) + + if os.path.splitext(fg_file)[-1] in ['.mp4']: + video_comp( + fg_file=fg_file, + alpha_file=alpha_file, + bg_index_list=bg_index_list, + mode=mode) + # else: + # image_comp(fg_file=fg_file, alpha_file=alpha_file, bg_index_list=bg_index_list, mode=mode) + + +def comp_pool(): + len_fa = len(fg_list) + + with Pool(20) as pool: + with tqdm(total=len_fa) as pbar: + for i, _ in tqdm( + enumerate(pool.imap_unordered(comp_one, range(len_fa)))): + pbar.update() + + +if __name__ == '__main__': + comp_pool() diff --git a/contrib/matting/tools/gen_dataset/gen_fg_alpha.py b/contrib/matting/tools/gen_dataset/gen_fg_alpha.py new file mode 100644 index 0000000000..6589bf2375 --- /dev/null +++ b/contrib/matting/tools/gen_dataset/gen_fg_alpha.py @@ -0,0 +1,58 @@ +import os +import random + +import cv2 + + +def get_from_pm85(data_path="/mnt/chenguowei01/datasets/matting/PhotoMatte85", + save_path="/mnt/chenguowei01/datasets/matting/gather"): + """ + Get matte from PhotoMatte85 + """ + + files = os.listdir(data_path) + files = [os.path.join(data_path, f) for f in files] + random.seed(1) + random.shuffle(files) + train_files = files[:-10] + val_files = files[-10:] + + # training dataset + fg_save_path = os.path.join(save_path, 'fg', 'PhotoMatte85', 'train') + alpha_save_path = fg_save_path.replace('fg', 'alpha') + if not os.path.exists(fg_save_path): + os.makedirs(fg_save_path) + if not os.path.exists(alpha_save_path): + os.makedirs(alpha_save_path) + for f in train_files: + png_img = cv2.imread(f, cv2.IMREAD_UNCHANGED) + fg = png_img[:, :, :3] + alpha = png_img[:, :, -1] + if alpha[0, 0] != 0: + alpha[:100, :] = 0 + fg[:100, :, :] = 0 + basename = os.path.basename(f) + cv2.imwrite(os.path.join(fg_save_path, basename), fg) + cv2.imwrite(os.path.join(alpha_save_path, basename), alpha) + + # val dataset + fg_save_path = os.path.join(save_path, 'fg', 'PhotoMatte85', 'val') + alpha_save_path = fg_save_path.replace('fg', 'alpha') + if not os.path.exists(fg_save_path): + os.makedirs(fg_save_path) + if not os.path.exists(alpha_save_path): + os.makedirs(alpha_save_path) + for f in val_files: + png_img = cv2.imread(f, cv2.IMREAD_UNCHANGED) + fg = png_img[:, :, :3] + alpha = png_img[:, :, -1] + if alpha[0, 0] != 0: + alpha[:100, :] = 0 + fg[:100, :, :] = 0 + basename = os.path.basename(f) + cv2.imwrite(os.path.join(fg_save_path, basename), fg) + cv2.imwrite(os.path.join(alpha_save_path, basename), alpha) + + +if __name__ == "__main__": + get_from_pm85() diff --git a/contrib/matting/tools/update_vgg16_params.py b/contrib/matting/tools/update_vgg16_params.py new file mode 100644 index 0000000000..15be91b9e9 --- /dev/null +++ b/contrib/matting/tools/update_vgg16_params.py @@ -0,0 +1,57 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle + + +def update_vgg16_params(model_path): + param_state_dict = paddle.load(model_path) + # first conv weight name _conv_block_1._conv_1.weight, shape is [64, 3, ,3, 3] + # first fc weight name: _fc1.weight, shape is [25088, 4096] + for k, v in param_state_dict.items(): + print(k, v.shape) + + # # first weight + weight = param_state_dict['_conv_block_1._conv_1.weight'] # [64, 3,3,3] + print('ori shape: ', weight.shape) + zeros_pad = paddle.zeros((64, 1, 3, 3)) + param_state_dict['_conv_block_1._conv_1.weight'] = paddle.concat( + [weight, zeros_pad], axis=1) + print('shape after padding', + param_state_dict['_conv_block_1._conv_1.weight'].shape) + + # fc1 + weight = param_state_dict['_fc1.weight'] + weight = paddle.transpose(weight, [1, 0]) + print('after transpose: ', weight.shape) + weight = paddle.reshape(weight, (4096, 512, 7, 7)) + print('after reshape: ', weight.shape) + weight = weight[0:512, :, 2:5, 2:5] + print('after crop: ', weight.shape) + param_state_dict['_conv_6.weight'] = weight + + del param_state_dict['_fc1.weight'] + del param_state_dict['_fc1.bias'] + del param_state_dict['_fc2.weight'] + del param_state_dict['_fc2.bias'] + del param_state_dict['_out.weight'] + del param_state_dict['_out.bias'] + + paddle.save(param_state_dict, 'VGG16_pretrained.pdparams') + + +if __name__ == "__main__": + paddle.set_device('cpu') + model_path = '/mnt/chenguowei01/.paddleseg/pretrained_model/dygraph/VGG16_pretrained.pdparams' + update_vgg16_params(model_path) diff --git a/contrib/matting/train.py b/contrib/matting/train.py new file mode 100644 index 0000000000..b96ee41dcf --- /dev/null +++ b/contrib/matting/train.py @@ -0,0 +1,84 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from core import train +from model import * +from dataset import HumanDataset +import transforms as T + + +def parse_args(): + parser = argparse.ArgumentParser(description="Model training") + + return parser.parse_args() + + +def main(args): + # 一些模块的组建 + # train_dataset + # 简单的建立一个数据读取器 + # train_dataset = Dataset() + t = [ + T.LoadImages(), + T.RandomCropByAlpha(crop_size=((320, 320), (480, 480), (640, 640))), + T.Resize(target_size=(320, 320)), + T.Normalize() + ] + + train_dataset = HumanDataset( + dataset_root='/mnt/chenguowei01/datasets/matting/human_matting/', + transforms=t, + mode='train') + + # loss + losses = {'types': [], 'coef': []} + # encoder-decoder alpha loss + losses['types'].append(MRSD()) + losses['coef'].append(0.5) + # compositionnal loss + losses['types'].append(MRSD()) + losses['coef'].append(0.5) + # refine alpha loss + losses['types'].append(MRSD()) + losses['coef'].append(1) + + # model + # vgg16预训练模型地址: 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/VGG16_pretrained.pdparams') + backbone = VGG16(input_channels=4, pretrained='./VGG16_pretrained.pdparams') + model = DIM(backbone=backbone) + + # optimizer + # 简单的先构建一个优化器 + lr = paddle.optimizer.lr.PolynomialDecay( + 0.001, decay_steps=200000, end_lr=0.0, power=0.9) + optimizer = paddle.optimizer.Adam( + learning_rate=lr, parameters=model.parameters()) + + # 调用train函数进行训练 + train( + model=model, + train_dataset=train_dataset, + optimizer=optimizer, + losses=losses, + iters=20000, + batch_size=4, + num_workers=5, + use_vdl=True) + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py new file mode 100644 index 0000000000..b01a509740 --- /dev/null +++ b/contrib/matting/transforms.py @@ -0,0 +1,187 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random + +import cv2 +import numpy as np +from paddleseg.transforms import functional + + +class Compose: + """ + Do transformation on input data with corresponding pre-processing and augmentation operations. + The shape of input data to all operations is [height, width, channels]. + """ + + def __init__(self, transforms, to_rgb=True): + if not isinstance(transforms, list): + raise TypeError('The transforms must be a list!') + self.transforms = transforms + self.to_rgb = to_rgb + + def __call__(self, data): + """ + Args: + data (dict): The data to transform. + + Returns: + dict: Data after transformation + """ + for op in self.transforms: + data = op(data) + if data is None: + return None + + data['img'] = np.transpose(data['img'], (2, 0, 1)) + for key in data.get('gt_fields', []): + if len(data[key].shape) == 2: + continue + data[key] = np.transpose(data[key], (2, 0, 1)) + + return data + + +class LoadImages: + def __init__(self, to_rgb=True): + self.to_rgb = to_rgb + + def __call__(self, data): + data['img'] = cv2.imread(data['img']) + for key in data.get('gt_fields', []): + data[key] = cv2.imread(data[key], cv2.IMREAD_UNCHANGED) + + if self.to_rgb: + data['img'] = cv2.cvtColor(data['img'], cv2.COLOR_BGR2RGB) + for key in data.get('gt_fields', []): + if len(data[key].shape) == 2: + continue + data[key] = cv2.cvtColor(data[key], cv2.COLOR_BGR2RGB) + + return data + + +class Resize: + def __init__(self, target_size=(512, 512)): + if isinstance(target_size, list) or isinstance(target_size, tuple): + if len(target_size) != 2: + raise ValueError( + '`target_size` should include 2 elements, but it is {}'. + format(target_size)) + else: + raise TypeError( + "Type of `target_size` is invalid. It should be list or tuple, but it is {}" + .format(type(target_size))) + + self.target_size = target_size + + def __call__(self, data): + data['img'] = functional.resize(data['img'], self.target_size) + for key in data.get('gt_fields', []): + data[key] = functional.resize(data[key], self.target_size) + return data + + +class Normalize: + """ + Normalize an image. + + Args: + mean (list, optional): The mean value of a data set. Default: [0.5, 0.5, 0.5]. + std (list, optional): The standard deviation of a data set. Default: [0.5, 0.5, 0.5]. + + Raises: + ValueError: When mean/std is not list or any value in std is 0. + """ + + def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)): + self.mean = mean + self.std = std + if not (isinstance(self.mean, (list, tuple)) + and isinstance(self.std, (list, tuple))): + raise ValueError( + "{}: input type is invalid. It should be list or tuple".format( + self)) + from functools import reduce + if reduce(lambda x, y: x * y, self.std) == 0: + raise ValueError('{}: std is invalid!'.format(self)) + + def __call__(self, data): + mean = np.array(self.mean)[np.newaxis, np.newaxis, :] + std = np.array(self.std)[np.newaxis, np.newaxis, :] + data['img'] = functional.normalize(data['img'], mean, std) + if 'fg' in data.get('gt_fields', []): + data['fg'] = functional.normalize(data['fg'], mean, std) + if 'bg' in data.get('gt_fields', []): + data['bg'] = functional.normalize(data['bg'], mean, std) + + return data + + +class RandomCropByAlpha: + """ + Randomly crop with uncertain area as the center + + Args: + crop_size (tuple|list): The size you want to crop from image. + """ + + def __init__(self, crop_size=((320, 320), (480, 480), (640, 640))): + self.crop_size = crop_size + + def __call__(self, data): + idex = np.random.randint(low=0, high=len(self.crop_size)) + crop_size = self.crop_size[idex] + crop_center = np.where((data['alpha'] > 0) & (data['alpha'] < 255)) + center_h_array, center_w_array = crop_center + delta_h = crop_size[1] // 2 + delta_w = crop_size[0] // 2 + + if len(center_h_array) == 0: + return data + + rand_ind = np.random.randint(len(center_h_array)) + center_h = center_h_array[rand_ind] + center_w = center_w_array[rand_ind] + + start_h = max(0, center_h - delta_h) + start_w = max(0, center_w - delta_w) + end_h = min(data['img'].shape[0], start_h + crop_size[1]) + end_w = min(data['img'].shape[1], start_w + crop_size[0]) + + data['img'] = data['img'][start_h:end_h, start_w:end_w] + for key in data.get('gt_fields', []): + data[key] = data[key][start_h:end_h, start_w:end_w] + + return data + + +if __name__ == "__main__": + transforms = [LoadImages(), RandomCropByAlpha()] + transforms = Compose(transforms) + img_path = '/mnt/chenguowei01/github/PaddleSeg/data/matting/human_matting/train/image/0051115Q_000001_0062_001.png' + bg_path = img_path.replace('image', 'bg') + fg_path = img_path.replace('image', 'fg') + alpha_path = img_path.replace('image', 'alpha') + data = {} + data['img'] = img_path + data['fg'] = fg_path + data['bg'] = bg_path + data['alpha'] = alpha_path + data['gt_fields'] = ['fg', 'bg', 'alpha'] + data = transforms(data) + print(np.min(data['img']), np.max(data['img'])) + print(data['img'].shape, data['fg'].shape, data['bg'].shape, + data['alpha'].shape) + cv2.imwrite('crop_img.png', data['img'].transpose((1, 2, 0))) diff --git a/contrib/matting/utils.py b/contrib/matting/utils.py new file mode 100644 index 0000000000..d82cdbaeec --- /dev/null +++ b/contrib/matting/utils.py @@ -0,0 +1,24 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + + +def get_files(root_path): + res = [] + for root, dirs, files in os.walk(root_path, followlinks=True): + for f in files: + if f.endswith(('.jpg', '.png', '.jpeg', 'JPG')): + res.append(os.path.join(root, f)) + return res From 1767f955bdd99561d3fd9b1e1499a719a8595853 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Sun, 6 Jun 2021 23:55:48 +0800 Subject: [PATCH 114/210] update train.py --- contrib/matting/train.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/contrib/matting/train.py b/contrib/matting/train.py index b96ee41dcf..e301398a5a 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -27,6 +27,8 @@ def parse_args(): def main(args): + paddle.set_device('gpu') + # 一些模块的组建 # train_dataset # 简单的建立一个数据读取器 @@ -73,9 +75,9 @@ def main(args): train_dataset=train_dataset, optimizer=optimizer, losses=losses, - iters=20000, - batch_size=4, - num_workers=5, + iters=100000, + batch_size=16, + num_workers=16, use_vdl=True) From 8951da97a538ba0d5f9a3d83ca54d71218f230e1 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 7 Jun 2021 16:52:26 +0800 Subject: [PATCH 115/210] add stage params and add argparse to train --- contrib/matting/core/train.py | 77 +++++++++++++------------ contrib/matting/model/dim.py | 19 ++++++- contrib/matting/train.py | 103 +++++++++++++++++++++++++++++++--- 3 files changed, 152 insertions(+), 47 deletions(-) diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index af7ccaba3c..0ec00a764d 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -24,30 +24,34 @@ # from core.val import evaluate -def loss_computation(logit_dict, label_dict, losses): +def loss_computation(logit_dict, label_dict, losses, stage=3): """ Acoording the losses to select logit and label """ loss_list = [] - # raw alpha mask = label_dict['trimap'] == 128 - alpha_raw_loss = losses['types'][0](logit_dict['alpha_raw'], - label_dict['alpha'] / 255, mask) - alpha_raw_loss = losses['coef'][0] * alpha_raw_loss - loss_list.append(alpha_raw_loss) - - # comp loss - comp_pred = logit_dict['alpha_raw'] * label_dict['fg'] + ( - 1 - logit_dict['alpha_raw']) * label_dict['bg'] - comp_loss = losses['types'][2](comp_pred, label_dict['img'], mask) - comp_loss = losses['coef'][2] * comp_loss - loss_list.append(comp_loss) - - # pred alpha - alpha_pred_loss = losses['types'][2](logit_dict['alpha_pred'], - label_dict['alpha'] / 255, mask) - alpha_pred_loss = losses['coef'][2] * alpha_pred_loss - loss_list.append(alpha_pred_loss) + + if stage != 2: + # raw alpha + alpha_raw_loss = losses['types'][0](logit_dict['alpha_raw'], + label_dict['alpha'] / 255, mask) + alpha_raw_loss = losses['coef'][0] * alpha_raw_loss + loss_list.append(alpha_raw_loss) + + if stage == 1 or stage == 3: + # comp loss + comp_pred = logit_dict['alpha_raw'] * label_dict['fg'] + ( + 1 - logit_dict['alpha_raw']) * label_dict['bg'] + comp_loss = losses['types'][2](comp_pred, label_dict['img'], mask) + comp_loss = losses['coef'][2] * comp_loss + loss_list.append(comp_loss) + + if stage == 2 or stage == 3: + # pred alpha + alpha_pred_loss = losses['types'][2](logit_dict['alpha_pred'], + label_dict['alpha'] / 255, mask) + alpha_pred_loss = losses['coef'][2] * alpha_pred_loss + loss_list.append(alpha_pred_loss) return loss_list @@ -65,7 +69,8 @@ def train(model, num_workers=0, use_vdl=False, losses=None, - keep_checkpoint_max=5): + keep_checkpoint_max=5, + stage=3): """ Launch training. @@ -147,7 +152,7 @@ def train(model, logit_dict = model(data) # 获取logit_dict, label_dict - loss_list = loss_computation(logit_dict, data, losses) + loss_list = loss_computation(logit_dict, data, losses, stage=stage) loss = sum(loss_list) loss.backward() @@ -179,9 +184,10 @@ def train(model, avg_loss, lr, avg_train_batch_cost, avg_train_reader_cost, batch_cost_averager.get_ips_average(), eta)) - logger.info( - "[LOSS] loss={:.4f}, alpha_raw_loss={:.4f}, alpha_pred_loss={:.4f}," - .format(avg_loss, avg_loss_list[0], avg_loss_list[1])) + # logger.info( + # "[LOSS] loss={:.4f}, alpha_raw_loss={:.4f}, alpha_pred_loss={:.4f}," + # .format(avg_loss, avg_loss_list[0], avg_loss_list[1])) + logger.info(avg_loss_list) if use_vdl: log_writer.add_scalar('Train/loss', avg_loss, iter) # Record all losses if there are more than 2 losses. @@ -203,15 +209,8 @@ def train(model, ori_img = data['img'][0] ori_img = paddle.transpose(ori_img, [1, 2, 0]) ori_img = (ori_img * 0.5 + 0.5) * 255 - alpha = (data['alpha'][0]).unsqueeze(-1) trimap = (data['trimap'][0]).unsqueeze(-1) - - alpha_raw = (logit_dict['alpha_raw'][0] * 255).transpose( - [1, 2, 0]) - alpha_pred = (logit_dict['alpha_pred'][0] * 255).transpose( - [1, 2, 0]) - log_writer.add_image( tag='ground truth/ori_img', img=ori_img.numpy(), @@ -222,14 +221,22 @@ def train(model, tag='ground truth/trimap', img=trimap.numpy(), step=iter) + + alpha_raw = (logit_dict['alpha_raw'][0] * 255).transpose( + [1, 2, 0]) log_writer.add_image( tag='prediction/alpha_raw', img=alpha_raw.numpy(), step=iter) - log_writer.add_image( - tag='prediction/alpha_pred', - img=alpha_pred.numpy(), - step=iter) + + if stage >= 2: + alpha_pred = ( + logit_dict['alpha_pred'][0] * 255).transpose( + [1, 2, 0]) + log_writer.add_image( + tag='prediction/alpha_pred', + img=alpha_pred.numpy(), + step=iter) avg_loss = 0.0 avg_loss_list = [] diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py index f42065b8ac..efc4e5109b 100644 --- a/contrib/matting/model/dim.py +++ b/contrib/matting/model/dim.py @@ -35,18 +35,28 @@ class DIM(nn.Layer): """ - def __init__(self, backbone, backbone_indices=(-1, ), pretrained=None): + def __init__(self, + backbone, + backbone_indices=(-1, ), + pretrained=None, + stage=3): super().__init__() self.backbone = backbone self.backbone_indices = backbone_indices self.pretrained = pretrained + self.stage = stage self.decoder = Decoder(input_channels=512) + if self.stage == 2: + for param in self.backbone.parameters(): + param.stop_gradient = True + for param in self.decoder.parameters(): + param.stop_gradient = True self.refine = Refine() self.init_weight() def forward(self, inputs): - x = paddle.concat([inputs['img'], inputs['trimap'].unsqueeze(1)], + x = paddle.concat([inputs['img'], inputs['trimap'].unsqueeze(1) / 255], axis=1) fea_list, ids_list = self.backbone(x) @@ -56,6 +66,9 @@ def forward(self, inputs): for i in range(4): up_shape.append(fea_list[i].shape[-2:]) alpha_raw = self.decoder(fea_list[self.backbone_indices[0]], up_shape) + logit_dict = {'alpha_raw': alpha_raw} + if self.stage < 2: + return logit_dict # refine stage alpha_raw_ = alpha_raw * 255 @@ -66,7 +79,7 @@ def forward(self, inputs): alpha_pred = alpha_refine + alpha_raw alpha_pred = paddle.clip(alpha_pred, min=0, max=1) - logit_dict = {'alpha_pred': alpha_pred, 'alpha_raw': alpha_raw} + logit_dict['alpha_pred'] = alpha_pred return logit_dict def init_weight(self): diff --git a/contrib/matting/train.py b/contrib/matting/train.py index e301398a5a..e0e2d7b780 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -21,7 +21,86 @@ def parse_args(): - parser = argparse.ArgumentParser(description="Model training") + parser = argparse.ArgumentParser(description='Model training') + # params of training + # parser.add_argument( + # "--config", dest="cfg", help="The config file.", default=None, type=str) + parser.add_argument( + '--iters', + dest='iters', + help='iters for training', + type=int, + default=None) + parser.add_argument( + '--batch_size', + dest='batch_size', + help='Mini batch size of one gpu or cpu', + type=int, + default=None) + parser.add_argument( + '--learning_rate', + dest='learning_rate', + help='Learning rate', + type=float, + default=None) + parser.add_argument( + '--save_interval', + dest='save_interval', + help='How many iters to save a model snapshot once during training.', + type=int, + default=1000) + parser.add_argument( + '--resume_model', + dest='resume_model', + help='The path of resume model', + type=str, + default=None) + parser.add_argument( + '--save_dir', + dest='save_dir', + help='The directory for saving the model snapshot', + type=str, + default='./output') + parser.add_argument( + '--keep_checkpoint_max', + dest='keep_checkpoint_max', + help='Maximum number of checkpoints to save', + type=int, + default=5) + parser.add_argument( + '--num_workers', + dest='num_workers', + help='Num workers for data loader', + type=int, + default=0) + parser.add_argument( + '--do_eval', + dest='do_eval', + help='Eval while training', + action='store_true') + parser.add_argument( + '--log_iters', + dest='log_iters', + help='Display logging information at every log_iters', + default=10, + type=int) + parser.add_argument( + '--use_vdl', + dest='use_vdl', + help='Whether to record the data to VisualDL during training', + action='store_true') + parser.add_argument( + '--stage', + dest='stage', + help='training stage: 0(simple loss), 1, 2, 3(whole net)', + type=int, + required=True, + choices=[0, 1, 2, 3]) + parser.add_argument( + '--pretrained_model', + dest='pretrained_model', + help='the pretrained model', + type=str) return parser.parse_args() @@ -60,14 +139,16 @@ def main(args): # model # vgg16预训练模型地址: 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/VGG16_pretrained.pdparams') backbone = VGG16(input_channels=4, pretrained='./VGG16_pretrained.pdparams') - model = DIM(backbone=backbone) + model = DIM( + backbone=backbone, stage=args.stage, pretrained=args.pretrained_model) + print(model.parameters()) # optimizer # 简单的先构建一个优化器 - lr = paddle.optimizer.lr.PolynomialDecay( - 0.001, decay_steps=200000, end_lr=0.0, power=0.9) + # lr = paddle.optimizer.lr.PolynomialDecay( + # 0.001, decay_steps=200000, end_lr=0.0, power=0.9) optimizer = paddle.optimizer.Adam( - learning_rate=lr, parameters=model.parameters()) + learning_rate=args.learning_rate, parameters=model.parameters()) # 调用train函数进行训练 train( @@ -75,10 +156,14 @@ def main(args): train_dataset=train_dataset, optimizer=optimizer, losses=losses, - iters=100000, - batch_size=16, - num_workers=16, - use_vdl=True) + iters=args.iters, + batch_size=args.batch_size, + num_workers=args.num_workers, + use_vdl=args.use_vdl, + save_interval=args.save_interval, + resume_model=args.resume_model, + stage=args.stage, + save_dir=args.save_dir) if __name__ == '__main__': From 31520a0578687af58ad89d40fc2959497fb2a8a3 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 7 Jun 2021 19:34:30 +0800 Subject: [PATCH 116/210] upsample by deconv --- contrib/matting/model/dim.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py index efc4e5109b..a364e3d276 100644 --- a/contrib/matting/model/dim.py +++ b/contrib/matting/model/dim.py @@ -90,17 +90,23 @@ def init_weight(self): class Up(nn.Layer): def __init__(self, input_channels, output_channels): super().__init__() - self.conv = layers.ConvBNReLU( - input_channels, - output_channels, - kernel_size=5, - padding=2, - bias_attr=False) + # self.conv = layers.ConvBNReLU( + # input_channels, + # output_channels, + # kernel_size=5, + # padding=2, + # bias_attr=False) + + self.deconv = nn.Conv2DTranspose( + input_channels, output_channels, kernel_size=4, stride=2, padding=1) def forward(self, x, output_shape): - x = F.interpolate( - x, size=output_shape, mode='bilinear', align_corners=False) - x = self.conv(x) + # x = F.interpolate( + # x, size=output_shape, mode='bilinear', align_corners=False) + # x = self.conv(x) + x = self.deconv(x) + x = F.relu(x) + return x From 890ff4b7291644af5ba45a5f731b25b2f4e82502 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 8 Jun 2021 11:48:36 +0800 Subject: [PATCH 117/210] add evaluation process --- contrib/matting/core/__init__.py | 1 + contrib/matting/core/val.py | 131 +++++++++++++++++++++++++++++++ contrib/matting/dataset.py | 68 +++++++--------- contrib/matting/metric.py | 91 +++++++++++++++++++++ contrib/matting/model/dim.py | 5 ++ contrib/matting/transforms.py | 1 + contrib/matting/val.py | 87 ++++++++++++++++++++ 7 files changed, 346 insertions(+), 38 deletions(-) create mode 100644 contrib/matting/core/val.py create mode 100644 contrib/matting/metric.py create mode 100644 contrib/matting/val.py diff --git a/contrib/matting/core/__init__.py b/contrib/matting/core/__init__.py index 6e3eb12999..2e0309c2bf 100644 --- a/contrib/matting/core/__init__.py +++ b/contrib/matting/core/__init__.py @@ -1 +1,2 @@ from .train import train +from .val import evaluate diff --git a/contrib/matting/core/val.py b/contrib/matting/core/val.py new file mode 100644 index 0000000000..fcc53f63a8 --- /dev/null +++ b/contrib/matting/core/val.py @@ -0,0 +1,131 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import cv2 +import numpy as np +import time +import paddle +import paddle.nn.functional as F +from paddleseg.utils import TimeAverager, calculate_eta, logger, progbar + +import metric + +np.set_printoptions(suppress=True) + + +def save_alpha_pred(alpha, path): + """ + The value of alpha is range [0, 1], shape should be [h,w] + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + alpha = (alpha * 255).astype('uint8') + cv2.imwrite(path, alpha) + + +def reverse_transform(alpha, trans_info): + """recover pred to origin shape""" + for item in trans_info[::-1]: + if item[0] == 'resize': + h, w = item[1][0], item[1][1] + alpha = F.interpolate(alpha, (h, w), mode='bilinear') + elif item[0] == 'padding': + h, w = item[1][0], item[1][1] + alpha = alpha[:, :, 0:h, 0:w] + else: + raise Exception("Unexpected info '{}' in im_info".format(item[0])) + return alpha + + +def evaluate(model, + eval_dataset, + num_workers=0, + print_detail=True, + save_dir='output/results', + save_results=True): + model.eval() + nranks = paddle.distributed.ParallelEnv().nranks + local_rank = paddle.distributed.ParallelEnv().local_rank + if nranks > 1: + # Initialize parallel environment if not done. + if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( + ): + paddle.distributed.init_parallel_env() + batch_sampler = paddle.io.DistributedBatchSampler( + eval_dataset, batch_size=1, shuffle=False, drop_last=False) + loader = paddle.io.DataLoader( + eval_dataset, + batch_sampler=batch_sampler, + num_workers=num_workers, + return_list=True, + ) + + total_iters = len(loader) + mse_metric = metric.MSE() + sad_metric = metric.SAD() + + if print_detail: + logger.info( + "Start evaluating (total_samples: {}, total_iters: {})...".format( + len(eval_dataset), total_iters)) + progbar_val = progbar.Progbar(target=total_iters, verbose=1) + reader_cost_averager = TimeAverager() + batch_cost_averager = TimeAverager() + batch_start = time.time() + + with paddle.no_grad(): + for iter, data in enumerate(loader): + reader_cost_averager.record(time.time() - batch_start) + logit_dict = model(data) + + # 指标计算 结果保存 先实现单卡的 + if model.stage <= 1: + alpha_pred = logit_dict['alpha_raw'].numpy() + else: + alpha_pred = logit_dict['alpha_pred'].numpy() + alpha_gt = data['alpha'].numpy() + trimap = data['trimap'].numpy() + mse_metric.update(alpha_pred.squeeze(1), alpha_gt, trimap) + sad_metric.update(alpha_pred.squeeze(1), alpha_gt, trimap) + + if save_results: + alpha_pred_one = alpha_pred[0].squeeze() + trimap = trimap.squeeze().astype('uint8') + alpha_pred_one[trimap == 255] = 1 + alpha_pred_one[trimap == 0] = 0 + save_alpha_pred(alpha_pred_one, + os.path.join(save_dir, data['img_name'][0])) + + batch_cost_averager.record( + time.time() - batch_start, num_samples=len(alpha_gt)) + batch_cost = batch_cost_averager.get_average() + reader_cost = reader_cost_averager.get_average() + + if local_rank == 0 and print_detail: + progbar_val.update(iter + 1, [('batch_cost', batch_cost), + ('reader cost', reader_cost)]) + + reader_cost_averager.reset() + batch_cost_averager.reset() + batch_start = time.time() + + # 指标输出 + mse = mse_metric.evaluate() + sad = sad_metric.evaluate() + + logger.info('MSE: {:.4f}, SAD: {:.4f}'.format(mse, sad)) diff --git a/contrib/matting/dataset.py b/contrib/matting/dataset.py index e6371f7448..b59c012ef2 100644 --- a/contrib/matting/dataset.py +++ b/contrib/matting/dataset.py @@ -23,45 +23,29 @@ import transforms as T -def gen_trimap(alpha): - k_size = random.choice(range(2, 5)) - iterations = np.random.randint(5, 15) - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size)) - dilated = cv2.dilate(alpha, kernel, iterations=iterations) - eroded = cv2.erode(alpha, kernel, iterations=iterations) - trimap = np.zeros(alpha.shape) - trimap.fill(128) - trimap[eroded >= 255] = 255 - trimap[dilated <= 0] = 0 +def gen_trimap(alpha, mode='train', eval_kernel=7): + if mode == 'train': + k_size = random.choice(range(2, 5)) + iterations = np.random.randint(5, 15) + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size)) + dilated = cv2.dilate(alpha, kernel, iterations=iterations) + eroded = cv2.erode(alpha, kernel, iterations=iterations) + trimap = np.zeros(alpha.shape) + trimap.fill(128) + trimap[eroded >= 255] = 255 + trimap[dilated <= 0] = 0 + else: + k_size = eval_kernel + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size)) + dilated = cv2.dilate(alpha, kernel) + trimap = np.zeros(alpha.shape) + trimap.fill(128) + trimap[alpha >= 255] = 255 + trimap[dilated <= 0] = 0 return trimap -class Dataset(paddle.io.Dataset): - def __init__(self): - self.png_file = '/mnt/chenguowei01/datasets/matting/PhotoMatte85/0051115Q_000001_0041.png' - self.background = np.zeros((3, 320, 320), dtype='float32') - self.background[1, :, :] = 255 - - def __getitem__(self, idx): - img_png = cv2.imread(self.png_file, cv2.IMREAD_UNCHANGED) - img_png = cv2.resize(img_png, (320, 320)) - img_png = np.transpose(img_png, [2, 0, 1]) - alpha = img_png[-1, :, :].astype('float32') - - img = img_png[:-1, :, :].astype('float32') - img = img[::-1, :, :] - img = (img / 255 - 0.5) / 0.5 - # img = (alpha/255) * img + (1-alpha/255) * self.background - - trimap = gen_trimap(alpha).astype('float32') - - return img, alpha, trimap - - def __len__(self): - return 1000 - - class HumanDataset(paddle.io.Dataset): def __init__( self, @@ -72,6 +56,7 @@ def __init__( super().__init__() self.dataset_root = dataset_root self.transforms = T.Compose(transforms) + self.mode = mode img_dir = os.path.join(dataset_root, mode, 'image') self.img_list = get_files(img_dir) # a list @@ -85,13 +70,20 @@ def __getitem__(self, idx): data['alpha'] = self.alpha_list[idx] data['fg'] = self.fg_list[idx] data['bg'] = self.bg_list[idx] - data['gt_fields'] = ['alpha', 'fg', 'bg'] - + if self.mode == 'train': + data['gt_fields'] = ['alpha', 'fg', 'bg'] + else: + data['gt_fields'] = ['alpha'] + data['img_name'] = self.img_list[idx].lstrip( + self.dataset_root) # using in save prediction results + + data['trans_info'] = [] # Record shape change information data = self.transforms(data) data['img'] = data['img'].astype('float32') for key in data.get('gt_fields', []): data[key] = data[key].astype('float32') - data['trimap'] = gen_trimap(data['alpha']).astype('float32') + data['trimap'] = gen_trimap( + data['alpha'], mode=self.mode).astype('float32') return data diff --git a/contrib/matting/metric.py b/contrib/matting/metric.py new file mode 100644 index 0000000000..c05482703b --- /dev/null +++ b/contrib/matting/metric.py @@ -0,0 +1,91 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + + +class MSE(): + """ + Only calculate the unknown region if trimap provided. + """ + + def __init__(self): + self.mse_diffs = 0 + self.count = 0 + + def update(self, pred, gt, trimap=None): + """ + update metric. + + Args: + pred (np.ndarray): The value range is [0., 1.]. + gt (np.ndarray): The value range is [0, 255]. + trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. + """ + if trimap is None: + trimap = np.ones_like(gt) * 128 + if not (pred.shape == gt.shape == trimap.shape): + raise ValueError( + 'The shape of `pred`, `gt` and `trimap` should be equal. ' + 'but they are {}, {} and {}'.format(pred.shape, gt.shape, + trimap.shape)) + mask = trimap == 128 + pixels = float(mask.sum()) + gt = gt / 255. + diff = (pred - gt) * mask + mse_diff = (diff**2).sum() / pixels if pixels > 0 else 0 + + self.mse_diffs += mse_diff + self.count += 1 + + def evaluate(self): + return self.mse_diffs / self.count + + +class SAD(): + """ + Only calculate the unknown region if trimap provided. + """ + + def __init__(self): + self.sad_diffs = 0 + self.count = 0 + + def update(self, pred, gt, trimap=None): + """ + update metric. + + Args: + pred (np.ndarray): The value range is [0., 1.]. + gt (np.ndarray): The value range is [0, 255]. + trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. + """ + if trimap is None: + trimap = np.ones_like(gt) * 128 + if not (pred.shape == gt.shape == trimap.shape): + raise ValueError( + 'The shape of `pred`, `gt` and `trimap` should be equal. ' + 'but they are {}, {} and {}'.format(pred.shape, gt.shape, + trimap.shape)) + + mask = trimap == 128 + gt = gt / 255. + diff = (pred - gt) * mask + sad_diff = (np.abs(diff)).sum() + + self.sad_diffs += sad_diff + self.count += 1 + + def evaluate(self): + return self.sad_diffs / self.count diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py index a364e3d276..ddcd0bd0d2 100644 --- a/contrib/matting/model/dim.py +++ b/contrib/matting/model/dim.py @@ -56,6 +56,7 @@ def __init__(self, self.init_weight() def forward(self, inputs): + input_shape = inputs['img'].shape[-2:] x = paddle.concat([inputs['img'], inputs['trimap'].unsqueeze(1) / 255], axis=1) fea_list, ids_list = self.backbone(x) @@ -66,6 +67,8 @@ def forward(self, inputs): for i in range(4): up_shape.append(fea_list[i].shape[-2:]) alpha_raw = self.decoder(fea_list[self.backbone_indices[0]], up_shape) + alpha_raw = F.interpolate( + alpha_raw, input_shape, mode='bilinear', align_corners=False) logit_dict = {'alpha_raw': alpha_raw} if self.stage < 2: return logit_dict @@ -78,6 +81,8 @@ def forward(self, inputs): # finally alpha alpha_pred = alpha_refine + alpha_raw alpha_pred = paddle.clip(alpha_pred, min=0, max=1) + alpha_pred = F.interpolate( + alpha_pred, input_shape, mode='bilinear', align_corners=False) logit_dict['alpha_pred'] = alpha_pred return logit_dict diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index b01a509740..a48d1077e2 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -87,6 +87,7 @@ def __init__(self, target_size=(512, 512)): self.target_size = target_size def __call__(self, data): + data['tans_info'].append(('resize', data['img'].shape[-2:])) data['img'] = functional.resize(data['img'], self.target_size) for key in data.get('gt_fields', []): data[key] = functional.resize(data[key], self.target_size) diff --git a/contrib/matting/val.py b/contrib/matting/val.py new file mode 100644 index 0000000000..0fc09e7b61 --- /dev/null +++ b/contrib/matting/val.py @@ -0,0 +1,87 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from core import evaluate +from model import * +from dataset import HumanDataset +import transforms as T + + +def parse_args(): + parser = argparse.ArgumentParser(description='Model training') + # params of training + # parser.add_argument( + # "--config", dest="cfg", help="The config file.", default=None, type=str) + + parser.add_argument( + '--model_path', + dest='model_path', + help='The path of model for evaluation', + type=str, + default=None) + parser.add_argument( + '--save_dir', + dest='save_dir', + help='The directory for saving the model snapshot', + type=str, + default='./output/results') + parser.add_argument( + '--num_workers', + dest='num_workers', + help='Num workers for data loader', + type=int, + default=0) + parser.add_argument( + '--stage', + dest='stage', + help='training stage: 0(simple loss), 1, 2, 3(whole net)', + type=int, + required=True, + choices=[0, 1, 2, 3]) + + return parser.parse_args() + + +def main(args): + paddle.set_device('gpu') + + # 一些模块的组建 + # train_dataset + # 简单的建立一个数据读取器 + # train_dataset = Dataset() + t = [T.LoadImages(), T.Normalize()] + + eval_dataset = HumanDataset( + dataset_root='/mnt/chenguowei01/datasets/matting/human_matting/', + transforms=t, + mode='val') + + # model + backbone = VGG16(input_channels=4) + model = DIM(backbone=backbone, stage=args.stage, pretrained=args.model_path) + + # 调用train函数进行训练 + evaluate( + model=model, + eval_dataset=eval_dataset, + num_workers=args.num_workers, + save_dir=args.save_dir, + save_results=True) + + +if __name__ == '__main__': + args = parse_args() + main(args) From d888cb37312cf332cc17d9e8d3b7b444a7af5e89 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 8 Jun 2021 15:40:39 +0800 Subject: [PATCH 118/210] using trimap if existing during evaluation --- contrib/matting/dataset.py | 21 ++++++++++++--------- contrib/matting/transforms.py | 2 +- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/contrib/matting/dataset.py b/contrib/matting/dataset.py index b59c012ef2..b5703ed27b 100644 --- a/contrib/matting/dataset.py +++ b/contrib/matting/dataset.py @@ -70,20 +70,28 @@ def __getitem__(self, idx): data['alpha'] = self.alpha_list[idx] data['fg'] = self.fg_list[idx] data['bg'] = self.bg_list[idx] + data['gt_field'] = [] + if self.mode == 'train': data['gt_fields'] = ['alpha', 'fg', 'bg'] else: data['gt_fields'] = ['alpha'] data['img_name'] = self.img_list[idx].lstrip( self.dataset_root) # using in save prediction results + # If has trimap, use it + trimap_path = data['alpha'].replace('alpha', 'trimap') + if os.path.exists(trimap_path): + data['trimap'] = trimap_path + data['gt_fields'].append('trimap') data['trans_info'] = [] # Record shape change information data = self.transforms(data) data['img'] = data['img'].astype('float32') for key in data.get('gt_fields', []): data[key] = data[key].astype('float32') - data['trimap'] = gen_trimap( - data['alpha'], mode=self.mode).astype('float32') + if 'trimap' not in data: + data['trimap'] = gen_trimap( + data['alpha'], mode=self.mode).astype('float32') return data @@ -111,10 +119,5 @@ def gen_trimap(alpha): dataset_root='/mnt/chenguowei01/datasets/matting/human_matting/', transforms=t, mode='val') - print(train_dataset.img_list[0], len(train_dataset.img_list), - len(train_dataset.alpha_list), len(train_dataset.fg_list), - len(train_dataset.bg_list)) - data = train_dataset[0] - print(np.min(data['img']), np.max(data['img'])) - print(data['img'].shape, data['fg'].shape, data['bg'].shape, - data['alpha'].shape, data['trimap'].shape) + for data in train_dataset: + continue diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index a48d1077e2..e063ab7460 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -87,7 +87,7 @@ def __init__(self, target_size=(512, 512)): self.target_size = target_size def __call__(self, data): - data['tans_info'].append(('resize', data['img'].shape[-2:])) + data['trans_info'].append(('resize', data['img'].shape[-2:])) data['img'] = functional.resize(data['img'], self.target_size) for key in data.get('gt_fields', []): data[key] = functional.resize(data[key], self.target_size) From 1acaf1d4930d1b7f3c15c48ea53700442f92c312 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 8 Jun 2021 16:43:50 +0800 Subject: [PATCH 119/210] update transforms.py --- contrib/matting/transforms.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index e063ab7460..a1fa820f5f 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -61,6 +61,10 @@ def __call__(self, data): data['img'] = cv2.imread(data['img']) for key in data.get('gt_fields', []): data[key] = cv2.imread(data[key], cv2.IMREAD_UNCHANGED) + # if alpha and trimap has 3 channels, extract one. + if key in ['alpha', 'trimap']: + if len(data[key].shape) > 2: + data[key] = data[key][:, :, 0] if self.to_rgb: data['img'] = cv2.cvtColor(data['img'], cv2.COLOR_BGR2RGB) From 344c70acca8f90b0ff9e40cf915779494680e660 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 8 Jun 2021 23:37:42 +0800 Subject: [PATCH 120/210] update dataset.py --- contrib/matting/dataset.py | 66 +++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 37 deletions(-) diff --git a/contrib/matting/dataset.py b/contrib/matting/dataset.py index b5703ed27b..fd1f26ef15 100644 --- a/contrib/matting/dataset.py +++ b/contrib/matting/dataset.py @@ -23,29 +23,6 @@ import transforms as T -def gen_trimap(alpha, mode='train', eval_kernel=7): - if mode == 'train': - k_size = random.choice(range(2, 5)) - iterations = np.random.randint(5, 15) - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size)) - dilated = cv2.dilate(alpha, kernel, iterations=iterations) - eroded = cv2.erode(alpha, kernel, iterations=iterations) - trimap = np.zeros(alpha.shape) - trimap.fill(128) - trimap[eroded >= 255] = 255 - trimap[dilated <= 0] = 0 - else: - k_size = eval_kernel - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size)) - dilated = cv2.dilate(alpha, kernel) - trimap = np.zeros(alpha.shape) - trimap.fill(128) - trimap[alpha >= 255] = 255 - trimap[dilated <= 0] = 0 - - return trimap - - class HumanDataset(paddle.io.Dataset): def __init__( self, @@ -90,7 +67,7 @@ def __getitem__(self, idx): for key in data.get('gt_fields', []): data[key] = data[key].astype('float32') if 'trimap' not in data: - data['trimap'] = gen_trimap( + data['trimap'] = self.gen_trimap( data['alpha'], mode=self.mode).astype('float32') return data @@ -98,17 +75,28 @@ def __getitem__(self, idx): def __len__(self): return len(self.img_list) - @property - def gen_trimap(alpha): - k_size = random.choice(range(2, 5)) - iterations = np.random.randint(5, 15) - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size)) - dilated = cv2.dilate(alpha, kernel, iterations=iterations) - eroded = cv2.erode(alpha, kernel, iterations=iterations) - trimap = np.zeros(alpha.shape) - trimap.fill(128) - trimap[eroded >= 255] = 255 - trimap[dilated <= 0] = 0 + @staticmethod + def gen_trimap(alpha, mode='train', eval_kernel=7): + if mode == 'train': + k_size = random.choice(range(2, 5)) + iterations = np.random.randint(5, 15) + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, + (k_size, k_size)) + dilated = cv2.dilate(alpha, kernel, iterations=iterations) + eroded = cv2.erode(alpha, kernel, iterations=iterations) + trimap = np.zeros(alpha.shape) + trimap.fill(128) + trimap[eroded > 254.5] = 255 + trimap[dilated < 0.5] = 0 + else: + k_size = eval_kernel + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, + (k_size, k_size)) + dilated = cv2.dilate(alpha, kernel) + trimap = np.zeros(alpha.shape) + trimap.fill(128) + trimap[alpha >= 250] = 255 + trimap[dilated <= 5] = 0 return trimap @@ -119,5 +107,9 @@ def gen_trimap(alpha): dataset_root='/mnt/chenguowei01/datasets/matting/human_matting/', transforms=t, mode='val') - for data in train_dataset: - continue + + for i in range(10): + idx = np.random.randint(len(train_dataset)) + data = train_dataset[idx] + trimap = data['trimap'] + cv2.imwrite(str(idx) + '.png', trimap.astype('uint8')) From 9d1ce7ef2a48173b23f80ea0942f5c67f8a70219 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 9 Jun 2021 20:18:33 +0800 Subject: [PATCH 121/210] add skip connec and update refinenet --- contrib/matting/core/train.py | 6 +-- contrib/matting/model/dim.py | 80 +++++++++++++++++++++-------------- contrib/matting/model/vgg.py | 25 +++++------ contrib/matting/train.py | 3 +- contrib/matting/val.py | 2 +- 5 files changed, 67 insertions(+), 49 deletions(-) diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index 0ec00a764d..6d9f40c9b2 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -42,8 +42,8 @@ def loss_computation(logit_dict, label_dict, losses, stage=3): # comp loss comp_pred = logit_dict['alpha_raw'] * label_dict['fg'] + ( 1 - logit_dict['alpha_raw']) * label_dict['bg'] - comp_loss = losses['types'][2](comp_pred, label_dict['img'], mask) - comp_loss = losses['coef'][2] * comp_loss + comp_loss = losses['types'][1](comp_pred, label_dict['img'], mask) + comp_loss = losses['coef'][1] * comp_loss loss_list.append(comp_loss) if stage == 2 or stage == 3: @@ -235,7 +235,7 @@ def train(model, [1, 2, 0]) log_writer.add_image( tag='prediction/alpha_pred', - img=alpha_pred.numpy(), + img=alpha_pred.numpy().astype('uint8'), step=iter) avg_loss = 0.0 diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py index ddcd0bd0d2..d214711ef7 100644 --- a/contrib/matting/model/dim.py +++ b/contrib/matting/model/dim.py @@ -37,12 +37,10 @@ class DIM(nn.Layer): def __init__(self, backbone, - backbone_indices=(-1, ), pretrained=None, stage=3): super().__init__() self.backbone = backbone - self.backbone_indices = backbone_indices self.pretrained = pretrained self.stage = stage @@ -52,7 +50,8 @@ def __init__(self, param.stop_gradient = True for param in self.decoder.parameters(): param.stop_gradient = True - self.refine = Refine() + if self.stage >= 2: + self.refine = Refine() self.init_weight() def forward(self, inputs): @@ -63,10 +62,9 @@ def forward(self, inputs): # decoder stage up_shape = [] - up_shape.append(x.shape[-2:]) - for i in range(4): + for i in range(5): up_shape.append(fea_list[i].shape[-2:]) - alpha_raw = self.decoder(fea_list[self.backbone_indices[0]], up_shape) + alpha_raw = self.decoder(fea_list, up_shape) alpha_raw = F.interpolate( alpha_raw, input_shape, mode='bilinear', align_corners=False) logit_dict = {'alpha_raw': alpha_raw} @@ -74,15 +72,15 @@ def forward(self, inputs): return logit_dict # refine stage - alpha_raw_ = alpha_raw * 255 - refine_input = paddle.concat([x[:, :3, :, :], alpha_raw_], axis=1) + refine_input = paddle.concat([inputs['img'], alpha_raw], axis=1) alpha_refine = self.refine(refine_input) # finally alpha alpha_pred = alpha_refine + alpha_raw - alpha_pred = paddle.clip(alpha_pred, min=0, max=1) alpha_pred = F.interpolate( alpha_pred, input_shape, mode='bilinear', align_corners=False) + if not self.training: + alpha_pred = paddle.clip(alpha_pred, min=0, max=1) logit_dict['alpha_pred'] = alpha_pred return logit_dict @@ -92,24 +90,44 @@ def init_weight(self): utils.load_entire_model(self, self.pretrained) +# class Up(nn.Layer): +# def __init__(self, input_channels, output_channels): +# super().__init__() +# # self.conv = layers.ConvBNReLU( +# # input_channels, +# # output_channels, +# # kernel_size=5, +# # padding=2, +# # bias_attr=False) + +# self.deconv = nn.Conv2DTranspose( +# input_channels, output_channels, kernel_size=4, stride=2, padding=1) + +# def forward(self, x, output_shape): +# # x = F.interpolate( +# # x, size=output_shape, mode='bilinear', align_corners=False) +# # x = self.conv(x) +# x = self.deconv(x) +# x = F.relu(x) + +# return x + +# bilinear interpolate skip connect class Up(nn.Layer): def __init__(self, input_channels, output_channels): super().__init__() - # self.conv = layers.ConvBNReLU( - # input_channels, - # output_channels, - # kernel_size=5, - # padding=2, - # bias_attr=False) - - self.deconv = nn.Conv2DTranspose( - input_channels, output_channels, kernel_size=4, stride=2, padding=1) - - def forward(self, x, output_shape): - # x = F.interpolate( - # x, size=output_shape, mode='bilinear', align_corners=False) - # x = self.conv(x) - x = self.deconv(x) + self.conv = layers.ConvBNReLU( + input_channels, + output_channels, + kernel_size=5, + padding=2, + bias_attr=False) + + def forward(self, x, skip, output_shape): + x = F.interpolate( + x, size=output_shape, mode='bilinear', align_corners=False) + x = x + skip + x = self.conv(x) x = F.relu(x) return x @@ -129,13 +147,14 @@ def __init__(self, input_channels): self.alpha_conv = nn.Conv2D( 64, 1, kernel_size=5, padding=2, bias_attr=False) - def forward(self, x, shape_list): + def forward(self, fea_list, shape_list): + x = fea_list[-1] x = self.deconv6(x) - x = self.deconv5(x, shape_list[4]) - x = self.deconv4(x, shape_list[3]) - x = self.deconv3(x, shape_list[2]) - x = self.deconv2(x, shape_list[1]) - x = self.deconv1(x, shape_list[0]) + x = self.deconv5(x, fea_list[4], shape_list[4]) + x = self.deconv4(x, fea_list[3], shape_list[3]) + x = self.deconv3(x, fea_list[2], shape_list[2]) + x = self.deconv2(x, fea_list[1], shape_list[1]) + x = self.deconv1(x, fea_list[0], shape_list[0]) alpha = self.alpha_conv(x) alpha = F.sigmoid(alpha) @@ -159,7 +178,6 @@ def forward(self, x): x = self.conv2(x) x = self.conv3(x) alpha = self.alpha_pred(x) - alpha = F.sigmoid(alpha) return alpha diff --git a/contrib/matting/model/vgg.py b/contrib/matting/model/vgg.py index 05fca07658..f490ad88ec 100644 --- a/contrib/matting/model/vgg.py +++ b/contrib/matting/model/vgg.py @@ -79,8 +79,9 @@ def forward(self, inputs): if self.groups == 4: x = self._conv_4(x) x = F.relu(x) + skip = x x, max_indices = self._pool(x) - return x, max_indices + return x, max_indices, skip class VGGNet(nn.Layer): @@ -117,20 +118,20 @@ def __init__(self, input_channels=3, layers=11, pretrained=None): def forward(self, inputs): fea_list = [] ids_list = [] - x, ids = self._conv_block_1(inputs) - fea_list.append(x) - ids_list.append(x) - x, ids = self._conv_block_2(x) - fea_list.append(x) + x, ids, skip = self._conv_block_1(inputs) + fea_list.append(skip) ids_list.append(ids) - x, ids = self._conv_block_3(x) - fea_list.append(x) + x, ids, skip = self._conv_block_2(x) + fea_list.append(skip) ids_list.append(ids) - x, ids = self._conv_block_4(x) - fea_list.append(x) + x, ids, skip = self._conv_block_3(x) + fea_list.append(skip) ids_list.append(ids) - x, ids = self._conv_block_5(x) - fea_list.append(x) + x, ids, skip = self._conv_block_4(x) + fea_list.append(skip) + ids_list.append(ids) + x, ids, skip = self._conv_block_5(x) + fea_list.append(skip) ids_list.append(ids) x = F.relu(self._conv_6(x)) fea_list.append(x) diff --git a/contrib/matting/train.py b/contrib/matting/train.py index e0e2d7b780..d54d11abc9 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -120,7 +120,7 @@ def main(args): ] train_dataset = HumanDataset( - dataset_root='/mnt/chenguowei01/datasets/matting/human_matting/', + dataset_root='data/matting/human_matting/', transforms=t, mode='train') @@ -141,7 +141,6 @@ def main(args): backbone = VGG16(input_channels=4, pretrained='./VGG16_pretrained.pdparams') model = DIM( backbone=backbone, stage=args.stage, pretrained=args.pretrained_model) - print(model.parameters()) # optimizer # 简单的先构建一个优化器 diff --git a/contrib/matting/val.py b/contrib/matting/val.py index 0fc09e7b61..68ffbd6d92 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -65,7 +65,7 @@ def main(args): t = [T.LoadImages(), T.Normalize()] eval_dataset = HumanDataset( - dataset_root='/mnt/chenguowei01/datasets/matting/human_matting/', + dataset_root='data/matting/human_matting/', transforms=t, mode='val') From 9681a07ce777b2e51b1296d1c00cd9df4bd1aa65 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 11 Jun 2021 19:57:25 +0800 Subject: [PATCH 122/210] update static config location --- benchmark/README.md | 6 +++--- .../{ => benchmark}/deeplabv3p_resnet50_vd_cityscapes.yaml | 0 .../{ => benchmark}/hrnetw18_cityscapes_1024x512_215.yaml | 0 3 files changed, 3 insertions(+), 3 deletions(-) rename legacy/configs/{ => benchmark}/deeplabv3p_resnet50_vd_cityscapes.yaml (100%) rename legacy/configs/{ => benchmark}/hrnetw18_cityscapes_1024x512_215.yaml (100%) diff --git a/benchmark/README.md b/benchmark/README.md index 5078391f28..0d383cdd17 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -37,17 +37,17 @@ benchmark/deeplabv3p.yml ``` cd legacy export CUDA_VISIBLE_DEVICES=0 -python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True +python pdseg/train.py --cfg configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` 单机多卡使用如下命令进行训练: ``` export CUDA_VISIBLE_DEVICES=0,1 -fleetrun pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True +fleetrun pdseg/train.py --cfg configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True ``` deeplabv3p模型的配置文件为: -configs/deeplabv3p_resnet50_vd_cityscapes.yaml +configs/benchmark/deeplabv3p_resnet50_vd_cityscapes.yaml **注意** 静态图中的BATCH_SIZE为总的batch size。 diff --git a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml b/legacy/configs/benchmark/deeplabv3p_resnet50_vd_cityscapes.yaml similarity index 100% rename from legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml rename to legacy/configs/benchmark/deeplabv3p_resnet50_vd_cityscapes.yaml diff --git a/legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml b/legacy/configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml similarity index 100% rename from legacy/configs/hrnetw18_cityscapes_1024x512_215.yaml rename to legacy/configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml From 7f9140ac6806519024ffdadd44651965530457d2 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 15 Jun 2021 11:03:55 +0800 Subject: [PATCH 123/210] add origin legacy thing --- legacy/configs/cityscape_fast_scnn.yaml | 53 ++ legacy/configs/deepglobe_road_extraction.yaml | 45 ++ .../configs/deeplabv3p_mobilenet-1-0_pet.yaml | 47 ++ .../deeplabv3p_mobilenetv2_cityscapes.yaml | 47 ++ ...eplabv3p_mobilenetv3_large_cityscapes.yaml | 58 +++ .../deeplabv3p_resnet50_vd_cityscapes.yaml | 47 ++ .../deeplabv3p_xception65_cityscapes.yaml | 44 ++ .../deeplabv3p_xception65_optic_kunlun.yaml | 34 ++ legacy/configs/fast_scnn_pet.yaml | 43 ++ legacy/configs/fcn.yaml | 39 ++ legacy/configs/hrnet_optic.yaml | 39 ++ legacy/configs/icnet_optic.yaml | 35 ++ ...ovasz_hinge_deeplabv3p_mobilenet_road.yaml | 50 ++ ...z_softmax_deeplabv3p_mobilenet_pascal.yaml | 49 ++ legacy/configs/ocrnet_w18_bn_cityscapes.yaml | 54 ++ legacy/configs/pspnet_optic.yaml | 35 ++ legacy/configs/unet_optic.yaml | 32 ++ legacy/pdseg/loss.py | 123 ++++- legacy/pdseg/models/backbone/mobilenet_v2.py | 315 +++++++++++ legacy/pdseg/models/backbone/mobilenet_v3.py | 363 +++++++++++++ legacy/pdseg/models/backbone/resnet.py | 341 ++++++++++++ legacy/pdseg/models/backbone/vgg.py | 82 +++ legacy/pdseg/models/backbone/xception.py | 317 +++++++++++ legacy/pdseg/models/model_builder.py | 35 +- legacy/pdseg/models/modeling/deeplab.py | 88 +++- legacy/pdseg/models/modeling/fast_scnn.py | 304 +++++++++++ legacy/pdseg/models/modeling/icnet.py | 206 ++++++++ legacy/pdseg/models/modeling/ocrnet.py | 493 ++++++++++++++++++ legacy/pdseg/models/modeling/pspnet.py | 115 ++++ legacy/pdseg/models/modeling/unet.py | 135 +++++ legacy/pdseg/solver.py | 46 ++ 31 files changed, 3704 insertions(+), 10 deletions(-) create mode 100644 legacy/configs/cityscape_fast_scnn.yaml create mode 100644 legacy/configs/deepglobe_road_extraction.yaml create mode 100644 legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml create mode 100644 legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml create mode 100644 legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml create mode 100644 legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml create mode 100644 legacy/configs/deeplabv3p_xception65_cityscapes.yaml create mode 100644 legacy/configs/deeplabv3p_xception65_optic_kunlun.yaml create mode 100644 legacy/configs/fast_scnn_pet.yaml create mode 100644 legacy/configs/fcn.yaml create mode 100644 legacy/configs/hrnet_optic.yaml create mode 100644 legacy/configs/icnet_optic.yaml create mode 100644 legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml create mode 100755 legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml create mode 100644 legacy/configs/ocrnet_w18_bn_cityscapes.yaml create mode 100644 legacy/configs/pspnet_optic.yaml create mode 100644 legacy/configs/unet_optic.yaml create mode 100644 legacy/pdseg/models/backbone/mobilenet_v2.py create mode 100644 legacy/pdseg/models/backbone/mobilenet_v3.py create mode 100644 legacy/pdseg/models/backbone/resnet.py create mode 100644 legacy/pdseg/models/backbone/vgg.py create mode 100644 legacy/pdseg/models/backbone/xception.py create mode 100644 legacy/pdseg/models/modeling/fast_scnn.py create mode 100644 legacy/pdseg/models/modeling/icnet.py create mode 100644 legacy/pdseg/models/modeling/ocrnet.py create mode 100644 legacy/pdseg/models/modeling/pspnet.py create mode 100644 legacy/pdseg/models/modeling/unet.py diff --git a/legacy/configs/cityscape_fast_scnn.yaml b/legacy/configs/cityscape_fast_scnn.yaml new file mode 100644 index 0000000000..34bd76be31 --- /dev/null +++ b/legacy/configs/cityscape_fast_scnn.yaml @@ -0,0 +1,53 @@ +EVAL_CROP_SIZE: (2048, 1024) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (1024, 1024) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (640, 640) # (width, height), for unpadding + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True + FLIP: False + FLIP_RATIO: 0.2 + RICH_CROP: + ENABLE: True + ASPECT_RATIO: 0.0 + BLUR: False + BLUR_RATIO: 0.1 + MAX_ROTATION: 0 + MIN_AREA_RATIO: 0.0 + BRIGHTNESS_JITTER_RATIO: 0.4 + CONTRAST_JITTER_RATIO: 0.4 + SATURATION_JITTER_RATIO: 0.4 +BATCH_SIZE: 12 +MEAN: [0.5, 0.5, 0.5] +STD: [0.5, 0.5, 0.5] +DATASET: + DATA_DIR: "./dataset/cityscapes/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 19 + TEST_FILE_LIST: "dataset/cityscapes/val.list" + TRAIN_FILE_LIST: "dataset/cityscapes/train.list" + VAL_FILE_LIST: "dataset/cityscapes/val.list" + VIS_FILE_LIST: "dataset/cityscapes/val.list" + IGNORE_INDEX: 255 +FREEZE: + MODEL_FILENAME: "model" + PARAMS_FILENAME: "params" +MODEL: + DEFAULT_NORM_TYPE: "bn" + MODEL_NAME: "fast_scnn" + +TEST: + TEST_MODEL: "snapshots/cityscape_fast_scnn/final/" +TRAIN: + MODEL_SAVE_DIR: "snapshots/cityscape_fast_scnn/" + SNAPSHOT_EPOCH: 10 +SOLVER: + LR: 0.001 + LR_POLICY: "poly" + OPTIMIZER: "sgd" + NUM_EPOCHS: 100 diff --git a/legacy/configs/deepglobe_road_extraction.yaml b/legacy/configs/deepglobe_road_extraction.yaml new file mode 100644 index 0000000000..d6770287a3 --- /dev/null +++ b/legacy/configs/deepglobe_road_extraction.yaml @@ -0,0 +1,45 @@ +EVAL_CROP_SIZE: (1025, 1025) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: u"stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (640, 640) # (width, height), for unpadding + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling +BATCH_SIZE: 8 +DATASET: + DATA_DIR: "./dataset/MiniDeepGlobeRoadExtraction/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 2 + TEST_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" + TRAIN_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/train.txt" + VAL_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" + VIS_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" + IGNORE_INDEX: 255 + SEPARATOR: '|' +FREEZE: + MODEL_FILENAME: "model" + PARAMS_FILENAME: "params" + SAVE_DIR: "freeze_model" +MODEL: + DEFAULT_NORM_TYPE: "bn" + MODEL_NAME: "deeplabv3p" + DEEPLAB: + BACKBONE: "mobilenetv2" + DEPTH_MULTIPLIER: 1.0 + ENCODER_WITH_ASPP: False + ENABLE_DECODER: False +TEST: + TEST_MODEL: "./saved_model/deeplabv3p_mobilenetv2-1-0_bn_deepglobe_road_extraction/final" +TRAIN: + MODEL_SAVE_DIR: "./saved_model/deeplabv3p_mobilenetv2-1-0_bn_deepglobe_road_extraction/" + PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_coco/" + SNAPSHOT_EPOCH: 10 +SOLVER: + LR: 0.001 + LR_POLICY: "poly" + OPTIMIZER: "adam" + NUM_EPOCHS: 300 diff --git a/legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml b/legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml new file mode 100644 index 0000000000..7578034ddc --- /dev/null +++ b/legacy/configs/deeplabv3p_mobilenet-1-0_pet.yaml @@ -0,0 +1,47 @@ +TRAIN_CROP_SIZE: (512, 512) # (width, height), for unpadding rangescaling and stepscaling +EVAL_CROP_SIZE: (512, 512) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: "unpadding" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (512, 512) # (width, height), for unpadding + + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + + MAX_SCALE_FACTOR: 1.25 # for stepscaling + MIN_SCALE_FACTOR: 0.75 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True +BATCH_SIZE: 4 +DATASET: + DATA_DIR: "./dataset/mini_pet/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 3 + TEST_FILE_LIST: "./dataset/mini_pet/file_list/test_list.txt" + TRAIN_FILE_LIST: "./dataset/mini_pet/file_list/train_list.txt" + VAL_FILE_LIST: "./dataset/mini_pet/file_list/val_list.txt" + VIS_FILE_LIST: "./dataset/mini_pet/file_list/test_list.txt" + IGNORE_INDEX: 255 + SEPARATOR: " " +FREEZE: + MODEL_FILENAME: "__model__" + PARAMS_FILENAME: "__params__" +MODEL: + MODEL_NAME: "deeplabv3p" + DEFAULT_NORM_TYPE: "bn" + DEEPLAB: + BACKBONE: "mobilenetv2" + DEPTH_MULTIPLIER: 1.0 + ENCODER_WITH_ASPP: False + ENABLE_DECODER: False +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_cityscapes/" + MODEL_SAVE_DIR: "./saved_model/deeplabv3p_mobilenetv2-1-0_bn_pet/" + SNAPSHOT_EPOCH: 10 +TEST: + TEST_MODEL: "./saved_model/deeplabv3p_mobilenetv2-1-0_bn_pet/final" +SOLVER: + NUM_EPOCHS: 100 + LR: 0.005 + LR_POLICY: "poly" + OPTIMIZER: "sgd" diff --git a/legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml b/legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml new file mode 100644 index 0000000000..8a7808525d --- /dev/null +++ b/legacy/configs/deeplabv3p_mobilenetv2_cityscapes.yaml @@ -0,0 +1,47 @@ +EVAL_CROP_SIZE: (2049, 1025) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (2048, 1024) # (width, height), for unpadding + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True +BATCH_SIZE: 4 +DATASET: + DATA_DIR: "./dataset/cityscapes/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 19 + TEST_FILE_LIST: "dataset/cityscapes/val.list" + TRAIN_FILE_LIST: "dataset/cityscapes/train.list" + VAL_FILE_LIST: "dataset/cityscapes/val.list" + VIS_FILE_LIST: "dataset/cityscapes/val.list" + IGNORE_INDEX: 255 + SEPARATOR: " " +FREEZE: + MODEL_FILENAME: "model" + PARAMS_FILENAME: "params" +MODEL: + DEFAULT_NORM_TYPE: "bn" + MODEL_NAME: "deeplabv3p" + DEEPLAB: + BACKBONE: "mobilenetv2" + ASPP_WITH_SEP_CONV: True + DECODER_USE_SEP_CONV: True + ENCODER_WITH_ASPP: False + ENABLE_DECODER: False +TRAIN: + PRETRAINED_MODEL_DIR: u"pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_coco" + MODEL_SAVE_DIR: "saved_model/deeplabv3p_mobilenetv2_cityscapes" + SNAPSHOT_EPOCH: 10 + SYNC_BATCH_NORM: True +TEST: + TEST_MODEL: "saved_model/deeplabv3p_mobilenetv2_cityscapes/final" +SOLVER: + LR: 0.01 + LR_POLICY: "poly" + OPTIMIZER: "sgd" + NUM_EPOCHS: 100 diff --git a/legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml b/legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml new file mode 100644 index 0000000000..a844e28c19 --- /dev/null +++ b/legacy/configs/deeplabv3p_mobilenetv3_large_cityscapes.yaml @@ -0,0 +1,58 @@ +EVAL_CROP_SIZE: (2049, 1025) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True +BATCH_SIZE: 32 +DATASET: + DATA_DIR: "./dataset/cityscapes/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 19 + TEST_FILE_LIST: "dataset/cityscapes/val.list" + TRAIN_FILE_LIST: "dataset/cityscapes/train.list" + VAL_FILE_LIST: "dataset/cityscapes/val.list" + VIS_FILE_LIST: "dataset/cityscapes/val.list" + IGNORE_INDEX: 255 + SEPARATOR: " " +FREEZE: + MODEL_FILENAME: "model" + PARAMS_FILENAME: "params" +MODEL: + DEFAULT_NORM_TYPE: "bn" + MODEL_NAME: "deeplabv3p" + DEEPLAB: + BACKBONE: "mobilenetv3_large" + ASPP_WITH_SEP_CONV: True + DECODER_USE_SEP_CONV: True + ENCODER_WITH_ASPP: True + ENABLE_DECODER: True + OUTPUT_STRIDE: 32 + BACKBONE_LR_MULT_LIST: [0.15,0.35,0.65,0.85,1] + ENCODER: + POOLING_STRIDE: (4, 5) + POOLING_CROP_SIZE: (769, 769) + ASPP_WITH_SE: True + SE_USE_QSIGMOID: True + ASPP_CONVS_FILTERS: 128 + ASPP_WITH_CONCAT_PROJECTION: False + ADD_IMAGE_LEVEL_FEATURE: False + DECODER: + USE_SUM_MERGE: True + CONV_FILTERS: 19 + OUTPUT_IS_LOGITS: True + +TRAIN: + PRETRAINED_MODEL_DIR: u"pretrained_model/mobilenetv3-1-0_large_bn_imagenet" + MODEL_SAVE_DIR: "saved_model/deeplabv3p_mobilenetv3_large_cityscapes" + SNAPSHOT_EPOCH: 1 + SYNC_BATCH_NORM: True +TEST: + TEST_MODEL: "saved_model/deeplabv3p_mobilenetv3_large_cityscapes/final" +SOLVER: + LR: 0.2 + LR_POLICY: "poly" + OPTIMIZER: "sgd" + NUM_EPOCHS: 850 diff --git a/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml new file mode 100644 index 0000000000..41b39ee978 --- /dev/null +++ b/legacy/configs/deeplabv3p_resnet50_vd_cityscapes.yaml @@ -0,0 +1,47 @@ +EVAL_CROP_SIZE: (2049, 1025) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (2048, 1024) # (width, height), for unpadding + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True + TO_RGB: True +BATCH_SIZE: 16 +DATASET: + DATA_DIR: "./dataset/cityscapes/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 19 + TEST_FILE_LIST: "dataset/cityscapes/val.list" + TRAIN_FILE_LIST: "dataset/cityscapes/train.list" + VAL_FILE_LIST: "dataset/cityscapes/val.list" + VIS_FILE_LIST: "dataset/cityscapes/val.list" + IGNORE_INDEX: 255 + SEPARATOR: " " +FREEZE: + MODEL_FILENAME: "model" + PARAMS_FILENAME: "params" +MODEL: + DEFAULT_NORM_TYPE: "bn" + MODEL_NAME: "deeplabv3p" + DEEPLAB: + ASPP_WITH_SEP_CONV: True + DECODER_USE_SEP_CONV: True + BACKBONE: "resnet_vd_50" + BACKBONE_LR_MULT_LIST: [0.1, 0.1, 0.2, 0.2, 1.0] +TRAIN: + PRETRAINED_MODEL_DIR: u"pretrained_model/resnet50_vd_imagenet" + MODEL_SAVE_DIR: "saved_model/deeplabv3p_resnet50_vd_bn_cityscapes" + SNAPSHOT_EPOCH: 10 + SYNC_BATCH_NORM: True +TEST: + TEST_MODEL: "saved_model/deeplabv3p_resnet50_vd_bn_cityscapes/final" +SOLVER: + LR: 0.05 + LR_POLICY: "poly" + OPTIMIZER: "sgd" + NUM_EPOCHS: 700 diff --git a/legacy/configs/deeplabv3p_xception65_cityscapes.yaml b/legacy/configs/deeplabv3p_xception65_cityscapes.yaml new file mode 100644 index 0000000000..1dce747745 --- /dev/null +++ b/legacy/configs/deeplabv3p_xception65_cityscapes.yaml @@ -0,0 +1,44 @@ +EVAL_CROP_SIZE: (2049, 1025) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (2048, 1024) # (width, height), for unpadding + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True +BATCH_SIZE: 4 +DATASET: + DATA_DIR: "./dataset/cityscapes/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 19 + TEST_FILE_LIST: "dataset/cityscapes/val.list" + TRAIN_FILE_LIST: "dataset/cityscapes/train.list" + VAL_FILE_LIST: "dataset/cityscapes/val.list" + VIS_FILE_LIST: "dataset/cityscapes/val.list" + IGNORE_INDEX: 255 + SEPARATOR: " " +FREEZE: + MODEL_FILENAME: "model" + PARAMS_FILENAME: "params" +MODEL: + DEFAULT_NORM_TYPE: "bn" + MODEL_NAME: "deeplabv3p" + DEEPLAB: + ASPP_WITH_SEP_CONV: True + DECODER_USE_SEP_CONV: True +TRAIN: + PRETRAINED_MODEL_DIR: u"pretrained_model/deeplabv3p_xception65_bn_coco" + MODEL_SAVE_DIR: "saved_model/deeplabv3p_xception65_bn_cityscapes" + SNAPSHOT_EPOCH: 10 + SYNC_BATCH_NORM: True +TEST: + TEST_MODEL: "saved_model/deeplabv3p_xception65_bn_cityscapes/final" +SOLVER: + LR: 0.01 + LR_POLICY: "poly" + OPTIMIZER: "sgd" + NUM_EPOCHS: 100 diff --git a/legacy/configs/deeplabv3p_xception65_optic_kunlun.yaml b/legacy/configs/deeplabv3p_xception65_optic_kunlun.yaml new file mode 100644 index 0000000000..bad5c9b04c --- /dev/null +++ b/legacy/configs/deeplabv3p_xception65_optic_kunlun.yaml @@ -0,0 +1,34 @@ +# 数据集配置 +DATASET: + DATA_DIR: "./dataset/optic_disc_seg/" + NUM_CLASSES: 2 + TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" + VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" + VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + +# 预训练模型配置 +MODEL: + MODEL_NAME: "deeplabv3p" + DEFAULT_NORM_TYPE: "bn" + DEEPLAB: + BACKBONE: "xception_65" + +# 其他配置 +TRAIN_CROP_SIZE: (512, 512) +EVAL_CROP_SIZE: (512, 512) +AUG: + AUG_METHOD: "unpadding" + FIX_RESIZE_SIZE: (512, 512) +BATCH_SIZE: 1 +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_xception65_bn_coco/" + MODEL_SAVE_DIR: "./saved_model/deeplabv3p_xception65_bn_optic/" + SNAPSHOT_EPOCH: 2 +TEST: + TEST_MODEL: "./saved_model/deeplabv3p_xception65_bn_optic/final" +SOLVER: + NUM_EPOCHS: 20 + LR: 0.001 + LR_POLICY: "poly" + OPTIMIZER: "adam" diff --git a/legacy/configs/fast_scnn_pet.yaml b/legacy/configs/fast_scnn_pet.yaml new file mode 100644 index 0000000000..2b9b659f18 --- /dev/null +++ b/legacy/configs/fast_scnn_pet.yaml @@ -0,0 +1,43 @@ +TRAIN_CROP_SIZE: (512, 512) # (width, height), for unpadding rangescaling and stepscaling +EVAL_CROP_SIZE: (512, 512) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: "unpadding" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (512, 512) # (width, height), for unpadding + + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + + MAX_SCALE_FACTOR: 1.25 # for stepscaling + MIN_SCALE_FACTOR: 0.75 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True +BATCH_SIZE: 4 +DATASET: + DATA_DIR: "./dataset/mini_pet/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 3 + TEST_FILE_LIST: "./dataset/mini_pet/file_list/test_list.txt" + TRAIN_FILE_LIST: "./dataset/mini_pet/file_list/train_list.txt" + VAL_FILE_LIST: "./dataset/mini_pet/file_list/val_list.txt" + VIS_FILE_LIST: "./dataset/mini_pet/file_list/test_list.txt" + IGNORE_INDEX: 255 + SEPARATOR: " " +FREEZE: + MODEL_FILENAME: "__model__" + PARAMS_FILENAME: "__params__" +MODEL: + MODEL_NAME: "fast_scnn" + DEFAULT_NORM_TYPE: "bn" + +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/fast_scnn_cityscapes/" + MODEL_SAVE_DIR: "./saved_model/fast_scnn_pet/" + SNAPSHOT_EPOCH: 10 +TEST: + TEST_MODEL: "./saved_model/fast_scnn_pet/final" +SOLVER: + NUM_EPOCHS: 100 + LR: 0.005 + LR_POLICY: "poly" + OPTIMIZER: "sgd" diff --git a/legacy/configs/fcn.yaml b/legacy/configs/fcn.yaml new file mode 100644 index 0000000000..726350b734 --- /dev/null +++ b/legacy/configs/fcn.yaml @@ -0,0 +1,39 @@ +# 数据集配置 +DATASET: + DATA_DIR: "./dataset/optic_disc_seg/" + NUM_CLASSES: 2 + TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" + VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" + VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + +# 预训练模型配置 +MODEL: + MODEL_NAME: "hrnet" + DEFAULT_NORM_TYPE: "bn" + HRNET: + STAGE2: + NUM_CHANNELS: [18, 36] + STAGE3: + NUM_CHANNELS: [18, 36, 72] + STAGE4: + NUM_CHANNELS: [18, 36, 72, 144] + +# 其他配置 +TRAIN_CROP_SIZE: (512, 512) +EVAL_CROP_SIZE: (512, 512) +AUG: + AUG_METHOD: "unpadding" + FIX_RESIZE_SIZE: (512, 512) +BATCH_SIZE: 1 +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/hrnet_w18_bn_cityscapes/" + MODEL_SAVE_DIR: "./saved_model/hrnet_optic/" + SNAPSHOT_EPOCH: 1 +TEST: + TEST_MODEL: "./saved_model/hrnet_optic/final" +SOLVER: + NUM_EPOCHS: 10 + LR: 0.001 + LR_POLICY: "poly" + OPTIMIZER: "adam" diff --git a/legacy/configs/hrnet_optic.yaml b/legacy/configs/hrnet_optic.yaml new file mode 100644 index 0000000000..7154bceeea --- /dev/null +++ b/legacy/configs/hrnet_optic.yaml @@ -0,0 +1,39 @@ +# 数据集配置 +DATASET: + DATA_DIR: "./dataset/optic_disc_seg/" + NUM_CLASSES: 2 + TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" + VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" + VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + +# 预训练模型配置 +MODEL: + MODEL_NAME: "hrnet" + DEFAULT_NORM_TYPE: "bn" + HRNET: + STAGE2: + NUM_CHANNELS: [18, 36] + STAGE3: + NUM_CHANNELS: [18, 36, 72] + STAGE4: + NUM_CHANNELS: [18, 36, 72, 144] + +# 其他配置 +TRAIN_CROP_SIZE: (512, 512) +EVAL_CROP_SIZE: (512, 512) +AUG: + AUG_METHOD: "unpadding" + FIX_RESIZE_SIZE: (512, 512) +BATCH_SIZE: 4 +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/hrnet_w18_bn_cityscapes/" + MODEL_SAVE_DIR: "./saved_model/hrnet_optic/" + SNAPSHOT_EPOCH: 5 +TEST: + TEST_MODEL: "./saved_model/hrnet_optic/final" +SOLVER: + NUM_EPOCHS: 10 + LR: 0.001 + LR_POLICY: "poly" + OPTIMIZER: "adam" diff --git a/legacy/configs/icnet_optic.yaml b/legacy/configs/icnet_optic.yaml new file mode 100644 index 0000000000..0f2742e6cf --- /dev/null +++ b/legacy/configs/icnet_optic.yaml @@ -0,0 +1,35 @@ +# 数据集配置 +DATASET: + DATA_DIR: "./dataset/optic_disc_seg/" + NUM_CLASSES: 2 + TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" + VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" + VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + +# 预训练模型配置 +MODEL: + MODEL_NAME: "icnet" + DEFAULT_NORM_TYPE: "bn" + MULTI_LOSS_WEIGHT: "[1.0, 0.4, 0.16]" + ICNET: + DEPTH_MULTIPLIER: 0.5 + +# 其他配置 +TRAIN_CROP_SIZE: (512, 512) +EVAL_CROP_SIZE: (512, 512) +AUG: + AUG_METHOD: "unpadding" + FIX_RESIZE_SIZE: (512, 512) +BATCH_SIZE: 4 +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/icnet_bn_cityscapes/" + MODEL_SAVE_DIR: "./saved_model/icnet_optic/" + SNAPSHOT_EPOCH: 5 +TEST: + TEST_MODEL: "./saved_model/icnet_optic/final" +SOLVER: + NUM_EPOCHS: 10 + LR: 0.001 + LR_POLICY: "poly" + OPTIMIZER: "adam" diff --git a/legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml b/legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml new file mode 100644 index 0000000000..45f5fc724a --- /dev/null +++ b/legacy/configs/lovasz_hinge_deeplabv3p_mobilenet_road.yaml @@ -0,0 +1,50 @@ +EVAL_CROP_SIZE: (1025, 1025) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (769, 769) # (width, height), for unpadding rangescaling and stepscaling +AUG: + AUG_METHOD: u"stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (640, 640) # (width, height), for unpadding + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + FLIP: True +BATCH_SIZE: 24 +DATASET: + DATA_DIR: "./dataset/MiniDeepGlobeRoadExtraction/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 2 + TEST_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" + TRAIN_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/train.txt" + VAL_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" + VIS_FILE_LIST: "dataset/MiniDeepGlobeRoadExtraction/val.txt" + IGNORE_INDEX: 255 + SEPARATOR: '|' +FREEZE: + MODEL_FILENAME: "model" + PARAMS_FILENAME: "params" + SAVE_DIR: "freeze_model" +MODEL: + DEFAULT_NORM_TYPE: "bn" + MODEL_NAME: "deeplabv3p" + DEEPLAB: + BACKBONE: "mobilenetv2" + DEPTH_MULTIPLIER: 1.0 + ENCODER_WITH_ASPP: False + ENABLE_DECODER: False +TEST: + TEST_MODEL: "./saved_model/lovasz_hinge_deeplabv3p_mobilenet_road/final" +TRAIN: + MODEL_SAVE_DIR: "./saved_model/lovasz_hinge_deeplabv3p_mobilenet_road/" + PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_coco/" + SNAPSHOT_EPOCH: 10 +SOLVER: + LR: 0.1 + LR_POLICY: "poly" + OPTIMIZER: "sgd" + NUM_EPOCHS: 300 + LOSS: ["lovasz_hinge_loss","bce_loss"] + LOSS_WEIGHT: + LOVASZ_HINGE_LOSS: 0.5 + BCE_LOSS: 0.5 diff --git a/legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml b/legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml new file mode 100755 index 0000000000..b1c6ff7990 --- /dev/null +++ b/legacy/configs/lovasz_softmax_deeplabv3p_mobilenet_pascal.yaml @@ -0,0 +1,49 @@ +TRAIN_CROP_SIZE: (500, 500) # (width, height), for unpadding rangescaling and stepscaling #训练时图像裁剪尺寸(宽,高) +EVAL_CROP_SIZE: (500, 500) # (width, height), for unpadding rangescaling and stepscaling #验证时图像裁剪尺寸(宽,高) +AUG: + AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (500, 500) # (width, height), for unpadding + + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + + MAX_SCALE_FACTOR: 1.25 # for stepscaling + MIN_SCALE_FACTOR: 0.75 # for stepscaling + SCALE_STEP_SIZE: 0.05 # for stepscaling + MIRROR: True + FLIP: True +BATCH_SIZE: 16 #批处理大小 +DATASET: + DATA_DIR: "./dataset/VOCtrainval_11-May-2012/VOC2012/" #图片路径 + IMAGE_TYPE: "rgb" # choice rgb or rgba #图片类别“RGB” + NUM_CLASSES: 21 #类别数(包括背景类别) + TEST_FILE_LIST: "dataset/VOCtrainval_11-May-2012/VOC2012/ImageSets/Segmentation/val.list" + TRAIN_FILE_LIST: "dataset/VOCtrainval_11-May-2012/VOC2012/ImageSets/Segmentation/train.list" + VAL_FILE_LIST: "dataset/VOCtrainval_11-May-2012/VOC2012/ImageSets/Segmentation/val.list" + VIS_FILE_LIST: "dataset/VOCtrainval_11-May-2012/VOC2012/ImageSets/Segmentation/val.list" + IGNORE_INDEX: 255 + SEPARATOR: " " +MODEL: + MODEL_NAME: "deeplabv3p" + DEFAULT_NORM_TYPE: "bn" #指定norm的类型,此处提供bn和gn(默认)两种选择,分别指batch norm和group norm。 + DEEPLAB: + BACKBONE: "mobilenetv2" + DEPTH_MULTIPLIER: 1.0 + ENCODER_WITH_ASPP: False + ENABLE_DECODER: False +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/deeplabv3p_mobilenetv2-1-0_bn_coco/" + MODEL_SAVE_DIR: "./saved_model/lovasz-softmax-voc" #模型保存路径 + SNAPSHOT_EPOCH: 10 +TEST: + TEST_MODEL: "./saved_model/lovasz-softmax-voc/final" #为测试模型路径 +SOLVER: + NUM_EPOCHS: 100 #训练epoch数,正整数 + LR: 0.0001 #初始学习率 + LR_POLICY: "poly" #学习率下降方法, 选项为poly、piecewise和cosine + OPTIMIZER: "sgd" #优化算法, 选项为sgd和adam + LOSS: ["lovasz_softmax_loss","softmax_loss"] + LOSS_WEIGHT: + LOVASZ_SOFTMAX_LOSS: 0.2 + SOFTMAX_LOSS: 0.8 diff --git a/legacy/configs/ocrnet_w18_bn_cityscapes.yaml b/legacy/configs/ocrnet_w18_bn_cityscapes.yaml new file mode 100644 index 0000000000..15fb92ad5a --- /dev/null +++ b/legacy/configs/ocrnet_w18_bn_cityscapes.yaml @@ -0,0 +1,54 @@ +EVAL_CROP_SIZE: (2048, 1024) # (width, height), for unpadding rangescaling and stepscaling +TRAIN_CROP_SIZE: (1024, 512) # (width, height), for unpadding rangescaling and stepscaling +AUG: +# AUG_METHOD: "unpadding" # choice unpadding rangescaling and stepscaling + AUG_METHOD: "stepscaling" # choice unpadding rangescaling and stepscaling + FIX_RESIZE_SIZE: (1024, 512) # (width, height), for unpadding + INF_RESIZE_VALUE: 500 # for rangescaling + MAX_RESIZE_VALUE: 600 # for rangescaling + MIN_RESIZE_VALUE: 400 # for rangescaling + MAX_SCALE_FACTOR: 2.0 # for stepscaling + MIN_SCALE_FACTOR: 0.5 # for stepscaling + SCALE_STEP_SIZE: 0.25 # for stepscaling + MIRROR: True +BATCH_SIZE: 4 +#BATCH_SIZE: 4 +DATASET: + DATA_DIR: "./dataset/cityscapes/" + IMAGE_TYPE: "rgb" # choice rgb or rgba + NUM_CLASSES: 19 + TEST_FILE_LIST: "./dataset/cityscapes/val.list" + TRAIN_FILE_LIST: "./dataset/cityscapes/train.list" + VAL_FILE_LIST: "./dataset/cityscapes/val.list" + VIS_FILE_LIST: "./dataset/cityscapes/val.list" + IGNORE_INDEX: 255 + SEPARATOR: " " +FREEZE: + MODEL_FILENAME: "model" + PARAMS_FILENAME: "params" +MODEL: + MODEL_NAME: "ocrnet" + DEFAULT_NORM_TYPE: "bn" + HRNET: + STAGE2: + NUM_CHANNELS: [18, 36] + STAGE3: + NUM_CHANNELS: [18, 36, 72] + STAGE4: + NUM_CHANNELS: [18, 36, 72, 144] + OCR: + OCR_MID_CHANNELS: 512 + OCR_KEY_CHANNELS: 256 + MULTI_LOSS_WEIGHT: [1.0, 1.0] +TRAIN: + PRETRAINED_MODEL_DIR: u"./pretrained_model/ocrnet_w18_cityscape/best_model" + MODEL_SAVE_DIR: "output/ocrnet_w18_bn_cityscapes" + SNAPSHOT_EPOCH: 1 + SYNC_BATCH_NORM: True +TEST: + TEST_MODEL: "output/ocrnet_w18_bn_cityscapes/first" +SOLVER: + LR: 0.01 + LR_POLICY: "poly" + OPTIMIZER: "sgd" + NUM_EPOCHS: 500 diff --git a/legacy/configs/pspnet_optic.yaml b/legacy/configs/pspnet_optic.yaml new file mode 100644 index 0000000000..589e2b53cc --- /dev/null +++ b/legacy/configs/pspnet_optic.yaml @@ -0,0 +1,35 @@ +# 数据集配置 +DATASET: + DATA_DIR: "./dataset/optic_disc_seg/" + NUM_CLASSES: 2 + TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" + VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" + VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + +# 预训练模型配置 +MODEL: + MODEL_NAME: "pspnet" + DEFAULT_NORM_TYPE: "bn" + PSPNET: + DEPTH_MULTIPLIER: 1 + LAYERS: 50 + +# 其他配置 +TRAIN_CROP_SIZE: (512, 512) +EVAL_CROP_SIZE: (512, 512) +AUG: + AUG_METHOD: "unpadding" + FIX_RESIZE_SIZE: (512, 512) +BATCH_SIZE: 4 +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/pspnet50_bn_cityscapes/" + MODEL_SAVE_DIR: "./saved_model/pspnet_optic/" + SNAPSHOT_EPOCH: 5 +TEST: + TEST_MODEL: "./saved_model/pspnet_optic/final" +SOLVER: + NUM_EPOCHS: 10 + LR: 0.001 + LR_POLICY: "poly" + OPTIMIZER: "adam" diff --git a/legacy/configs/unet_optic.yaml b/legacy/configs/unet_optic.yaml new file mode 100644 index 0000000000..cd564817c7 --- /dev/null +++ b/legacy/configs/unet_optic.yaml @@ -0,0 +1,32 @@ +# 数据集配置 +DATASET: + DATA_DIR: "./dataset/optic_disc_seg/" + NUM_CLASSES: 2 + TEST_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + TRAIN_FILE_LIST: "./dataset/optic_disc_seg/train_list.txt" + VAL_FILE_LIST: "./dataset/optic_disc_seg/val_list.txt" + VIS_FILE_LIST: "./dataset/optic_disc_seg/test_list.txt" + +# 预训练模型配置 +MODEL: + MODEL_NAME: "unet" + DEFAULT_NORM_TYPE: "bn" + +# 其他配置 +TRAIN_CROP_SIZE: (512, 512) +EVAL_CROP_SIZE: (512, 512) +AUG: + AUG_METHOD: "unpadding" + FIX_RESIZE_SIZE: (512, 512) +BATCH_SIZE: 4 +TRAIN: + PRETRAINED_MODEL_DIR: "./pretrained_model/unet_bn_coco/" + MODEL_SAVE_DIR: "./saved_model/unet_optic/" + SNAPSHOT_EPOCH: 5 +TEST: + TEST_MODEL: "./saved_model/unet_optic/final" +SOLVER: + NUM_EPOCHS: 10 + LR: 0.001 + LR_POLICY: "poly" + OPTIMIZER: "adam" diff --git a/legacy/pdseg/loss.py b/legacy/pdseg/loss.py index 5f657b0ba7..97d69d0032 100644 --- a/legacy/pdseg/loss.py +++ b/legacy/pdseg/loss.py @@ -18,7 +18,6 @@ import paddle import paddle.nn.functional as F import numpy as np -import importlib from utils.config import cfg @@ -35,11 +34,46 @@ def softmax_with_loss(logit, label = paddle.reshape(label, [-1, 1]) label = paddle.cast(label, 'int64') ignore_mask = paddle.reshape(ignore_mask, [-1, 1]) - loss, probs = F.softmax_with_cross_entropy( - logit, - label, - ignore_index=cfg.DATASET.IGNORE_INDEX, - return_softmax=True) + if weight is None: + loss, probs = F.softmax_with_cross_entropy( + logit, + label, + ignore_index=cfg.DATASET.IGNORE_INDEX, + return_softmax=True) + else: + label = paddle.squeeze(label, axes=[-1]) + label_one_hot = F.one_hot(input=label, num_classes=num_classes) + if isinstance(weight, list): + assert len( + weight + ) == num_classes, "weight length must equal num of classes" + weight = paddle.assign(np.array([weight], dtype='float32')) + elif isinstance(weight, str): + assert weight.lower( + ) == 'dynamic', 'if weight is string, must be dynamic!' + tmp = [] + total_num = paddle.cast(paddle.shape(label)[0], 'float32') + for i in range(num_classes): + cls_pixel_num = paddle.sum(label_one_hot[:, i]) + ratio = total_num / (cls_pixel_num + 1) + tmp.append(ratio) + weight = paddle.concat(tmp) + weight = weight / paddle.sum(weight) * num_classes + elif isinstance(weight, paddle.Tensor): + pass + else: + raise ValueError( + 'Expect weight is a list, string or Variable, but receive {}'. + format(type(weight))) + weight = paddle.reshape(weight, [1, num_classes]) + weighted_label_one_hot = label_one_hot * weight + probs = F.softmax(logit) + loss = F.cross_entropy( + probs, + weighted_label_one_hot, + soft_label=True, + ignore_index=cfg.DATASET.IGNORE_INDEX) + weighted_label_one_hot.stop_gradient = True loss = loss * ignore_mask avg_loss = paddle.mean(loss) / ( @@ -50,6 +84,45 @@ def softmax_with_loss(logit, return avg_loss +# to change, how to appicate ignore index and ignore mask +def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001): + if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1: + raise Exception( + "dice loss is only applicable to one channel classfication") + ignore_mask = paddle.cast(ignore_mask, 'float32') + logit = paddle.transpose(logit, [0, 2, 3, 1]) + label = paddle.transpose(label, [0, 2, 3, 1]) + label = paddle.cast(label, 'int64') + ignore_mask = paddle.transpose(ignore_mask, [0, 2, 3, 1]) + logit = F.sigmoid(logit) + logit = logit * ignore_mask + label = label * ignore_mask + reduce_dim = list(range(1, len(logit.shape))) + inse = paddle.sum(logit * label, dim=reduce_dim) + dice_denominator = paddle.sum( + logit, dim=reduce_dim) + paddle.sum( + label, dim=reduce_dim) + dice_score = 1 - inse * 2 / (dice_denominator + epsilon) + label.stop_gradient = True + ignore_mask.stop_gradient = True + return paddle.mean(dice_score) + + +def bce_loss(logit, label, ignore_mask=None): + if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1: + raise Exception("bce loss is only applicable to binary classfication") + label = paddle.cast(label, 'float32') + loss = paddle.sigmoid_cross_entropy_with_logits( + x=logit, + label=label, + ignore_index=cfg.DATASET.IGNORE_INDEX, + normalize=True) # or False + loss = paddle.sum(loss) + label.stop_gradient = True + ignore_mask.stop_gradient = True + return loss + + def multi_softmax_with_loss(logits, label, ignore_mask=None, @@ -73,3 +146,41 @@ def multi_softmax_with_loss(logits, avg_loss = softmax_with_loss( logits, label, ignore_mask, num_classes, weight=weight) return avg_loss + + +def multi_dice_loss(logits, label, ignore_mask=None): + if isinstance(logits, tuple): + avg_loss = 0 + for i, logit in enumerate(logits): + if label.shape[2] != logit.shape[2] or label.shape[ + 3] != logit.shape[3]: + logit_label = paddle.fluid.layers.resize_nearest( + label, logit.shape[2:]) + else: + logit_label = label + logit_mask = (logit_label.astype('int32') != + cfg.DATASET.IGNORE_INDEX).astype('int32') + loss = dice_loss(logit, logit_label, logit_mask) + avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss + else: + avg_loss = dice_loss(logits, label, ignore_mask) + return avg_loss + + +def multi_bce_loss(logits, label, ignore_mask=None): + if isinstance(logits, tuple): + avg_loss = 0 + for i, logit in enumerate(logits): + if label.shape[2] != logit.shape[2] or label.shape[ + 3] != logit.shape[3]: + logit_label = paddle.fluid.layers.resize_nearest( + label, logit.shape[2:]) + else: + logit_label = label + logit_mask = (logit_label.astype('int32') != + cfg.DATASET.IGNORE_INDEX).astype('int32') + loss = bce_loss(logit, logit_label, logit_mask) + avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss + else: + avg_loss = bce_loss(logits, label, ignore_mask) + return avg_loss diff --git a/legacy/pdseg/models/backbone/mobilenet_v2.py b/legacy/pdseg/models/backbone/mobilenet_v2.py new file mode 100644 index 0000000000..eefeba8d15 --- /dev/null +++ b/legacy/pdseg/models/backbone/mobilenet_v2.py @@ -0,0 +1,315 @@ +# coding: utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import paddle.fluid as fluid +from paddle.fluid.initializer import MSRA +from paddle.fluid.param_attr import ParamAttr +from utils.config import cfg + +__all__ = [ + 'MobileNetV2', 'MobileNetV2_x0_25', 'MobileNetV2_x0_5', 'MobileNetV2_x1_0', + 'MobileNetV2_x1_5', 'MobileNetV2_x2_0', 'MobileNetV2_scale' +] + +train_parameters = { + "input_size": [3, 224, 224], + "input_mean": [0.485, 0.456, 0.406], + "input_std": [0.229, 0.224, 0.225], + "learning_strategy": { + "name": "piecewise_decay", + "batch_size": 256, + "epochs": [30, 60, 90], + "steps": [0.1, 0.01, 0.001, 0.0001] + } +} + + +class MobileNetV2(): + def __init__(self, scale=1.0, change_depth=False, output_stride=None): + self.params = train_parameters + self.scale = scale + self.change_depth = change_depth + self.bottleneck_params_list = [ + (1, 16, 1, 1), + (6, 24, 2, 2), + (6, 32, 3, 2), + (6, 64, 4, 2), + (6, 96, 3, 1), + (6, 160, 3, 2), + (6, 320, 1, 1), + ] if change_depth == False else [ + (1, 16, 1, 1), + (6, 24, 2, 2), + (6, 32, 5, 2), + (6, 64, 7, 2), + (6, 96, 5, 1), + (6, 160, 3, 2), + (6, 320, 1, 1), + ] + self.modify_bottle_params(output_stride) + + def modify_bottle_params(self, output_stride=None): + if output_stride is not None and output_stride % 2 != 0: + raise Exception("output stride must to be even number") + if output_stride is None: + return + else: + stride = 2 + for i, layer_setting in enumerate(self.bottleneck_params_list): + t, c, n, s = layer_setting + stride = stride * s + if stride > output_stride: + s = 1 + self.bottleneck_params_list[i] = (t, c, n, s) + + def net(self, input, class_dim=1000, end_points=None, decode_points=None): + scale = self.scale + change_depth = self.change_depth + #if change_depth is True, the new depth is 1.4 times as deep as before. + bottleneck_params_list = self.bottleneck_params_list + decode_ends = dict() + + def check_points(count, points): + if points is None: + return False + else: + if isinstance(points, list): + return (True if count in points else False) + else: + return (True if count == points else False) + + #conv1 + input = self.conv_bn_layer( + input, + num_filters=int(32 * scale), + filter_size=3, + stride=2, + padding=1, + if_act=True, + name='conv1_1') + layer_count = 1 + + #print("node test:", layer_count, input.shape) + + if check_points(layer_count, decode_points): + decode_ends[layer_count] = input + + if check_points(layer_count, end_points): + return input, decode_ends + + # bottleneck sequences + i = 1 + in_c = int(32 * scale) + for layer_setting in bottleneck_params_list: + t, c, n, s = layer_setting + i += 1 + input, depthwise_output = self.invresi_blocks( + input=input, + in_c=in_c, + t=t, + c=int(c * scale), + n=n, + s=s, + name='conv' + str(i)) + in_c = int(c * scale) + layer_count += n + + #print("node test:", layer_count, input.shape) + if check_points(layer_count, decode_points): + decode_ends[layer_count] = depthwise_output + + if check_points(layer_count, end_points): + return input, decode_ends + + #last_conv + input = self.conv_bn_layer( + input=input, + num_filters=int(1280 * scale) if scale > 1.0 else 1280, + filter_size=1, + stride=1, + padding=0, + if_act=True, + name='conv9') + + input = fluid.layers.pool2d( + input=input, + pool_size=7, + pool_stride=1, + pool_type='avg', + global_pooling=True) + + output = fluid.layers.fc( + input=input, + size=class_dim, + param_attr=ParamAttr(name='fc10_weights'), + bias_attr=ParamAttr(name='fc10_offset')) + return output + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + if_act=True, + name=None, + use_cudnn=True): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + act=None, + use_cudnn=use_cudnn, + param_attr=ParamAttr(name=name + '_weights'), + bias_attr=False) + bn_name = name + '_bn' + bn = fluid.layers.batch_norm( + input=conv, + param_attr=ParamAttr(name=bn_name + "_scale"), + bias_attr=ParamAttr(name=bn_name + "_offset"), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + if if_act: + return fluid.layers.relu6(bn) + else: + return bn + + def shortcut(self, input, data_residual): + return fluid.layers.elementwise_add(input, data_residual) + + def inverted_residual_unit(self, + input, + num_in_filter, + num_filters, + ifshortcut, + stride, + filter_size, + padding, + expansion_factor, + name=None): + num_expfilter = int(round(num_in_filter * expansion_factor)) + + channel_expand = self.conv_bn_layer( + input=input, + num_filters=num_expfilter, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name=name + '_expand') + + bottleneck_conv = self.conv_bn_layer( + input=channel_expand, + num_filters=num_expfilter, + filter_size=filter_size, + stride=stride, + padding=padding, + num_groups=num_expfilter, + if_act=True, + name=name + '_dwise', + use_cudnn=False) + + depthwise_output = bottleneck_conv + + linear_out = self.conv_bn_layer( + input=bottleneck_conv, + num_filters=num_filters, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=False, + name=name + '_linear') + + if ifshortcut: + out = self.shortcut(input=input, data_residual=linear_out) + return out, depthwise_output + else: + return linear_out, depthwise_output + + def invresi_blocks(self, input, in_c, t, c, n, s, name=None): + first_block, depthwise_output = self.inverted_residual_unit( + input=input, + num_in_filter=in_c, + num_filters=c, + ifshortcut=False, + stride=s, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + '_1') + + last_residual_block = first_block + last_c = c + + for i in range(1, n): + last_residual_block, depthwise_output = self.inverted_residual_unit( + input=last_residual_block, + num_in_filter=last_c, + num_filters=c, + ifshortcut=True, + stride=1, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + '_' + str(i + 1)) + return last_residual_block, depthwise_output + + +def MobileNetV2_x0_25(): + model = MobileNetV2(scale=0.25) + return model + + +def MobileNetV2_x0_5(): + model = MobileNetV2(scale=0.5) + return model + + +def MobileNetV2_x1_0(): + model = MobileNetV2(scale=1.0) + return model + + +def MobileNetV2_x1_5(): + model = MobileNetV2(scale=1.5) + return model + + +def MobileNetV2_x2_0(): + model = MobileNetV2(scale=2.0) + return model + + +def MobileNetV2_scale(): + model = MobileNetV2(scale=1.2, change_depth=True) + return model + + +if __name__ == '__main__': + image_shape = [-1, 3, 224, 224] + image = fluid.data(name='image', shape=image_shape, dtype='float32') + model = MobileNetV2_x1_0() + logit, decode_ends = model.net(image) + #print("logit:", logit.shape) diff --git a/legacy/pdseg/models/backbone/mobilenet_v3.py b/legacy/pdseg/models/backbone/mobilenet_v3.py new file mode 100644 index 0000000000..e0a6a8df3c --- /dev/null +++ b/legacy/pdseg/models/backbone/mobilenet_v3.py @@ -0,0 +1,363 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + 'MobileNetV3', 'MobileNetV3_small_x0_35', 'MobileNetV3_small_x0_5', + 'MobileNetV3_small_x0_75', 'MobileNetV3_small_x1_0', + 'MobileNetV3_small_x1_25', 'MobileNetV3_large_x0_35', + 'MobileNetV3_large_x0_5', 'MobileNetV3_large_x0_75', + 'MobileNetV3_large_x1_0', 'MobileNetV3_large_x1_25' +] + + +class MobileNetV3(): + def __init__(self, + scale=1.0, + model_name='small', + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + output_stride=None): + self.scale = scale + self.inplanes = 16 + + self.lr_mult_list = lr_mult_list + assert len(self.lr_mult_list) == 5, \ + "lr_mult_list length in MobileNetV3 must be 5 but got {}!!".format( + len(self.lr_mult_list)) + self.curr_stage = 0 + self.decode_point = None + self.end_point = None + + if model_name == "large": + self.cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, False, 'relu', 1], + [3, 64, 24, False, 'relu', 2], + [3, 72, 24, False, 'relu', 1], + [5, 72, 40, True, 'relu', 2], + [5, 120, 40, True, 'relu', 1], + [5, 120, 40, True, 'relu', 1], + [3, 240, 80, False, 'hard_swish', 2], + [3, 200, 80, False, 'hard_swish', 1], + [3, 184, 80, False, 'hard_swish', 1], + [3, 184, 80, False, 'hard_swish', 1], + [3, 480, 112, True, 'hard_swish', 1], + [3, 672, 112, True, 'hard_swish', 1], + # The number of channels in the last 4 stages is reduced by a + # factor of 2 compared to the standard implementation. + [5, 336, 80, True, 'hard_swish', 2], + [5, 480, 80, True, 'hard_swish', 1], + [5, 480, 80, True, 'hard_swish', 1], + ] + self.cls_ch_squeeze = 480 + self.cls_ch_expand = 1280 + self.lr_interval = 3 + elif model_name == "small": + self.cfg = [ + # k, exp, c, se, nl, s, + [3, 16, 16, True, 'relu', 2], + [3, 72, 24, False, 'relu', 2], + [3, 88, 24, False, 'relu', 1], + [5, 96, 40, True, 'hard_swish', 2], + [5, 240, 40, True, 'hard_swish', 1], + [5, 240, 40, True, 'hard_swish', 1], + [5, 120, 48, True, 'hard_swish', 1], + [5, 144, 48, True, 'hard_swish', 1], + # The number of channels in the last 4 stages is reduced by a + # factor of 2 compared to the standard implementation. + [5, 144, 48, True, 'hard_swish', 2], + [5, 288, 48, True, 'hard_swish', 1], + [5, 288, 48, True, 'hard_swish', 1], + ] + self.cls_ch_squeeze = 288 + self.cls_ch_expand = 1280 + self.lr_interval = 2 + else: + raise NotImplementedError( + "mode[{}_model] is not implemented!".format(model_name)) + + self.modify_bottle_params(output_stride) + + def modify_bottle_params(self, output_stride=None): + if output_stride is not None and output_stride % 2 != 0: + raise Exception("output stride must to be even number") + if output_stride is None: + return + else: + stride = 2 + for i, _cfg in enumerate(self.cfg): + stride = stride * _cfg[-1] + if stride > output_stride: + s = 1 + self.cfg[i][-1] = s + + def net(self, input, class_dim=1000, end_points=None, decode_points=None): + scale = self.scale + inplanes = self.inplanes + cfg = self.cfg + cls_ch_squeeze = self.cls_ch_squeeze + cls_ch_expand = self.cls_ch_expand + + # conv1 + conv = self.conv_bn_layer( + input, + filter_size=3, + num_filters=self.make_divisible(inplanes * scale), + stride=2, + padding=1, + num_groups=1, + if_act=True, + act='hard_swish', + name='conv1') + + i = 0 + inplanes = self.make_divisible(inplanes * scale) + for layer_cfg in cfg: + conv = self.residual_unit( + input=conv, + num_in_filter=inplanes, + num_mid_filter=self.make_divisible(scale * layer_cfg[1]), + num_out_filter=self.make_divisible(scale * layer_cfg[2]), + act=layer_cfg[4], + stride=layer_cfg[5], + filter_size=layer_cfg[0], + use_se=layer_cfg[3], + name='conv' + str(i + 2)) + inplanes = self.make_divisible(scale * layer_cfg[2]) + i += 1 + self.curr_stage = i + + conv = self.conv_bn_layer( + input=conv, + filter_size=1, + num_filters=self.make_divisible(scale * cls_ch_squeeze), + stride=1, + padding=0, + num_groups=1, + if_act=True, + act='hard_swish', + name='conv_last') + + return conv, self.decode_point + + conv = fluid.layers.pool2d( + input=conv, pool_type='avg', global_pooling=True, use_cudnn=False) + conv = fluid.layers.conv2d( + input=conv, + num_filters=cls_ch_expand, + filter_size=1, + stride=1, + padding=0, + act=None, + param_attr=ParamAttr(name='last_1x1_conv_weights'), + bias_attr=False) + conv = fluid.layers.hard_swish(conv) + drop = fluid.layers.dropout(x=conv, dropout_prob=0.2) + out = fluid.layers.fc( + input=drop, + size=class_dim, + param_attr=ParamAttr(name='fc_weights'), + bias_attr=ParamAttr(name='fc_offset')) + return out + + def conv_bn_layer(self, + input, + filter_size, + num_filters, + stride, + padding, + num_groups=1, + if_act=True, + act=None, + name=None, + use_cudnn=True, + res_last_bn_init=False): + lr_idx = self.curr_stage // self.lr_interval + lr_idx = min(lr_idx, len(self.lr_mult_list) - 1) + lr_mult = self.lr_mult_list[lr_idx] + + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + act=None, + use_cudnn=use_cudnn, + param_attr=ParamAttr(name=name + '_weights', learning_rate=lr_mult), + bias_attr=False) + bn_name = name + '_bn' + bn = fluid.layers.batch_norm( + input=conv, + param_attr=ParamAttr( + name=bn_name + "_scale", + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0)), + bias_attr=ParamAttr( + name=bn_name + "_offset", + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0)), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + if if_act: + if act == 'relu': + bn = fluid.layers.relu(bn) + elif act == 'hard_swish': + bn = fluid.layers.hard_swish(bn) + return bn + + def make_divisible(self, v, divisor=8, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + if new_v < 0.9 * v: + new_v += divisor + return new_v + + def se_block(self, input, num_out_filter, ratio=4, name=None): + lr_idx = self.curr_stage // self.lr_interval + lr_idx = min(lr_idx, len(self.lr_mult_list) - 1) + lr_mult = self.lr_mult_list[lr_idx] + + num_mid_filter = num_out_filter // ratio + pool = fluid.layers.pool2d( + input=input, pool_type='avg', global_pooling=True, use_cudnn=False) + conv1 = fluid.layers.conv2d( + input=pool, + filter_size=1, + num_filters=num_mid_filter, + act='relu', + param_attr=ParamAttr( + name=name + '_1_weights', learning_rate=lr_mult), + bias_attr=ParamAttr(name=name + '_1_offset', learning_rate=lr_mult)) + conv2 = fluid.layers.conv2d( + input=conv1, + filter_size=1, + num_filters=num_out_filter, + act='hard_sigmoid', + param_attr=ParamAttr( + name=name + '_2_weights', learning_rate=lr_mult), + bias_attr=ParamAttr(name=name + '_2_offset', learning_rate=lr_mult)) + scale = fluid.layers.elementwise_mul(x=input, y=conv2, axis=0) + return scale + + def residual_unit(self, + input, + num_in_filter, + num_mid_filter, + num_out_filter, + stride, + filter_size, + act=None, + use_se=False, + name=None): + + conv0 = self.conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_mid_filter, + stride=1, + padding=0, + if_act=True, + act=act, + name=name + '_expand') + + conv1 = self.conv_bn_layer( + input=conv0, + filter_size=filter_size, + num_filters=num_mid_filter, + stride=stride, + padding=int((filter_size - 1) // 2), + if_act=True, + act=act, + num_groups=num_mid_filter, + use_cudnn=False, + name=name + '_depthwise') + + if self.curr_stage == 5: + self.decode_point = conv1 + if use_se: + conv1 = self.se_block( + input=conv1, num_out_filter=num_mid_filter, name=name + '_se') + + conv2 = self.conv_bn_layer( + input=conv1, + filter_size=1, + num_filters=num_out_filter, + stride=1, + padding=0, + if_act=False, + name=name + '_linear', + res_last_bn_init=True) + if num_in_filter != num_out_filter or stride != 1: + return conv2 + else: + return fluid.layers.elementwise_add(x=input, y=conv2, act=None) + + +def MobileNetV3_small_x0_35(): + model = MobileNetV3(model_name='small', scale=0.35) + return model + + +def MobileNetV3_small_x0_5(): + model = MobileNetV3(model_name='small', scale=0.5) + return model + + +def MobileNetV3_small_x0_75(): + model = MobileNetV3(model_name='small', scale=0.75) + return model + + +def MobileNetV3_small_x1_0(**args): + model = MobileNetV3(model_name='small', scale=1.0, **args) + return model + + +def MobileNetV3_small_x1_25(): + model = MobileNetV3(model_name='small', scale=1.25) + return model + + +def MobileNetV3_large_x0_35(): + model = MobileNetV3(model_name='large', scale=0.35) + return model + + +def MobileNetV3_large_x0_5(): + model = MobileNetV3(model_name='large', scale=0.5) + return model + + +def MobileNetV3_large_x0_75(): + model = MobileNetV3(model_name='large', scale=0.75) + return model + + +def MobileNetV3_large_x1_0(**args): + model = MobileNetV3(model_name='large', scale=1.0, **args) + return model + + +def MobileNetV3_large_x1_25(): + model = MobileNetV3(model_name='large', scale=1.25) + return model diff --git a/legacy/pdseg/models/backbone/resnet.py b/legacy/pdseg/models/backbone/resnet.py new file mode 100644 index 0000000000..60a7bc5dcc --- /dev/null +++ b/legacy/pdseg/models/backbone/resnet.py @@ -0,0 +1,341 @@ +# coding: utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import numpy as np +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr + +__all__ = [ + "ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152" +] + +train_parameters = { + "input_size": [3, 224, 224], + "input_mean": [0.485, 0.456, 0.406], + "input_std": [0.229, 0.224, 0.225], + "learning_strategy": { + "name": "piecewise_decay", + "batch_size": 256, + "epochs": [30, 60, 90], + "steps": [0.1, 0.01, 0.001, 0.0001] + } +} + + +class ResNet(): + def __init__(self, layers=50, scale=1.0, stem=None): + self.params = train_parameters + self.layers = layers + self.scale = scale + self.stem = stem + + def net(self, + input, + class_dim=1000, + end_points=None, + decode_points=None, + resize_points=None, + dilation_dict=None): + layers = self.layers + supported_layers = [18, 34, 50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + + decode_ends = dict() + + def check_points(count, points): + if points is None: + return False + else: + if isinstance(points, list): + return (True if count in points else False) + else: + return (True if count == points else False) + + def get_dilated_rate(dilation_dict, idx): + if dilation_dict is None or idx not in dilation_dict: + return 1 + else: + return dilation_dict[idx] + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + num_filters = [64, 128, 256, 512] + + if self.stem == 'icnet' or self.stem == 'pspnet': + conv = self.conv_bn_layer( + input=input, + num_filters=int(64 * self.scale), + filter_size=3, + stride=2, + act='relu', + name="conv1_1") + conv = self.conv_bn_layer( + input=conv, + num_filters=int(64 * self.scale), + filter_size=3, + stride=1, + act='relu', + name="conv1_2") + conv = self.conv_bn_layer( + input=conv, + num_filters=int(128 * self.scale), + filter_size=3, + stride=1, + act='relu', + name="conv1_3") + else: + conv = self.conv_bn_layer( + input=input, + num_filters=int(64 * self.scale), + filter_size=7, + stride=2, + act='relu', + name="conv1") + + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + + layer_count = 1 + if check_points(layer_count, decode_points): + decode_ends[layer_count] = conv + + if check_points(layer_count, end_points): + return conv, decode_ends + + if layers >= 50: + for block in range(len(depth)): + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + dilation_rate = get_dilated_rate(dilation_dict, block) + + conv = self.bottleneck_block( + input=conv, + num_filters=int(num_filters[block] * self.scale), + stride=2 + if i == 0 and block != 0 and dilation_rate == 1 else 1, + name=conv_name, + dilation=dilation_rate) + layer_count += 3 + + if check_points(layer_count, decode_points): + decode_ends[layer_count] = conv + + if check_points(layer_count, end_points): + return conv, decode_ends + + if check_points(layer_count, resize_points): + conv = self.interp( + conv, + np.ceil( + np.array(conv.shape[2:]).astype('int32') / 2)) + + pool = fluid.layers.pool2d( + input=conv, pool_size=7, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv))) + else: + for block in range(len(depth)): + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + conv = self.basic_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + is_first=block == i == 0, + name=conv_name) + layer_count += 2 + if check_points(layer_count, decode_points): + decode_ends[layer_count] = conv + + if check_points(layer_count, end_points): + return conv, decode_ends + + pool = fluid.layers.pool2d( + input=conv, pool_size=7, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + out = fluid.layers.fc( + input=pool, + size=class_dim, + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.Uniform(-stdv, stdv))) + return out + + def zero_padding(self, input, padding): + return fluid.layers.pad( + input, [0, 0, 0, 0, padding, padding, padding, padding]) + + def interp(self, input, out_shape): + out_shape = list(out_shape.astype("int32")) + return fluid.layers.resize_bilinear(input, out_shape=out_shape) + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + dilation=1, + groups=1, + act=None, + name=None): + + if self.stem == 'pspnet': + bias_attr = ParamAttr(name=name + "_biases") + else: + bias_attr = False + + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2 if dilation == 1 else 0, + dilation=dilation, + groups=groups, + act=None, + param_attr=ParamAttr(name=name + "_weights"), + bias_attr=bias_attr, + name=name + '.conv2d.output.1') + + if name == "conv1": + bn_name = "bn_" + name + else: + bn_name = "bn" + name[3:] + return fluid.layers.batch_norm( + input=conv, + act=act, + name=bn_name + '.output.1', + param_attr=ParamAttr(name=bn_name + '_scale'), + bias_attr=ParamAttr(bn_name + '_offset'), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance', + ) + + def shortcut(self, input, ch_out, stride, is_first, name): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1 or is_first == True: + return self.conv_bn_layer(input, ch_out, 1, stride, name=name) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, name, dilation=1): + if self.stem == 'pspnet' and self.layers == 101: + strides = [1, stride] + else: + strides = [stride, 1] + + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=1, + dilation=1, + stride=strides[0], + act='relu', + name=name + "_branch2a") + if dilation > 1: + conv0 = self.zero_padding(conv0, dilation) + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + dilation=dilation, + stride=strides[1], + act='relu', + name=name + "_branch2b") + conv2 = self.conv_bn_layer( + input=conv1, + num_filters=num_filters * 4, + dilation=1, + filter_size=1, + act=None, + name=name + "_branch2c") + + short = self.shortcut( + input, + num_filters * 4, + stride, + is_first=False, + name=name + "_branch1") + + return fluid.layers.elementwise_add( + x=short, y=conv2, act='relu', name=name + ".add.output.5") + + def basic_block(self, input, num_filters, stride, is_first, name): + conv0 = self.conv_bn_layer( + input=input, + num_filters=num_filters, + filter_size=3, + act='relu', + stride=stride, + name=name + "_branch2a") + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + act=None, + name=name + "_branch2b") + short = self.shortcut( + input, num_filters, stride, is_first, name=name + "_branch1") + return fluid.layers.elementwise_add(x=short, y=conv1, act='relu') + + +def ResNet18(): + model = ResNet(layers=18) + return model + + +def ResNet34(): + model = ResNet(layers=34) + return model + + +def ResNet50(): + model = ResNet(layers=50) + return model + + +def ResNet101(): + model = ResNet(layers=101) + return model + + +def ResNet152(): + model = ResNet(layers=152) + return model diff --git a/legacy/pdseg/models/backbone/vgg.py b/legacy/pdseg/models/backbone/vgg.py new file mode 100644 index 0000000000..443fa43556 --- /dev/null +++ b/legacy/pdseg/models/backbone/vgg.py @@ -0,0 +1,82 @@ +# coding: utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +from paddle.fluid import ParamAttr + +__all__ = ["VGGNet"] + + +def check_points(count, points): + if points is None: + return False + else: + if isinstance(points, list): + return (True if count in points else False) + else: + return (True if count == points else False) + + +class VGGNet(): + def __init__(self, layers=16): + self.layers = layers + + def net(self, input, class_dim=1000, end_points=None, decode_points=None): + short_cuts = dict() + layers_count = 0 + layers = self.layers + vgg_spec = { + 11: ([1, 1, 2, 2, 2]), + 13: ([2, 2, 2, 2, 2]), + 16: ([2, 2, 3, 3, 3]), + 19: ([2, 2, 4, 4, 4]) + } + assert layers in vgg_spec.keys(), \ + "supported layers are {} but input layer is {}".format(vgg_spec.keys(), layers) + + nums = vgg_spec[layers] + channels = [64, 128, 256, 512, 512] + conv = input + for i in range(len(nums)): + conv = self.conv_block( + conv, channels[i], nums[i], name="conv" + str(i + 1) + "_") + layers_count += nums[i] + if check_points(layers_count, decode_points): + short_cuts[layers_count] = conv + if check_points(layers_count, end_points): + return conv, short_cuts + + return conv + + def conv_block(self, input, num_filter, groups, name=None): + conv = input + for i in range(groups): + conv = fluid.layers.conv2d( + input=conv, + num_filters=num_filter, + filter_size=3, + stride=1, + padding=1, + act='relu', + param_attr=fluid.param_attr.ParamAttr( + name=name + str(i + 1) + "_weights"), + bias_attr=False) + return fluid.layers.pool2d( + input=conv, pool_size=2, pool_type='max', pool_stride=2) diff --git a/legacy/pdseg/models/backbone/xception.py b/legacy/pdseg/models/backbone/xception.py new file mode 100644 index 0000000000..d45adc21af --- /dev/null +++ b/legacy/pdseg/models/backbone/xception.py @@ -0,0 +1,317 @@ +# coding: utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import contextlib +import paddle +import math +import paddle.fluid as fluid +from models.libs.model_libs import scope, name_scope +from models.libs.model_libs import bn, bn_relu, relu +from models.libs.model_libs import conv +from models.libs.model_libs import separate_conv + +__all__ = ['xception_65', 'xception_41', 'xception_71'] + + +def check_data(data, number): + if type(data) == int: + return [data] * number + assert len(data) == number + return data + + +def check_stride(s, os): + if s <= os: + return True + else: + return False + + +def check_points(count, points): + if points is None: + return False + else: + if isinstance(points, list): + return (True if count in points else False) + else: + return (True if count == points else False) + + +class Xception(): + def __init__(self, backbone="xception_65"): + self.bottleneck_params = self.gen_bottleneck_params(backbone) + self.backbone = backbone + + def gen_bottleneck_params(self, backbone='xception_65'): + if backbone == 'xception_65': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, + 2048]]) + } + elif backbone == 'xception_41': + bottleneck_params = { + "entry_flow": (3, [2, 2, 2], [128, 256, 728]), + "middle_flow": (8, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, + 2048]]) + } + elif backbone == 'xception_71': + bottleneck_params = { + "entry_flow": (5, [2, 1, 2, 1, 2], [128, 256, 256, 728, 728]), + "middle_flow": (16, 1, 728), + "exit_flow": (2, [2, 1], [[728, 1024, 1024], [1536, 1536, + 2048]]) + } + else: + raise Exception( + "xception backbont only support xception_41/xception_65/xception_71" + ) + return bottleneck_params + + def net(self, + input, + output_stride=32, + num_classes=1000, + end_points=None, + decode_points=None): + self.stride = 2 + self.block_point = 0 + self.output_stride = output_stride + self.decode_points = decode_points + self.short_cuts = dict() + with scope(self.backbone): + # Entry flow + data = self.entry_flow(input) + if check_points(self.block_point, end_points): + return data, self.short_cuts + + # Middle flow + data = self.middle_flow(data) + if check_points(self.block_point, end_points): + return data, self.short_cuts + + # Exit flow + data = self.exit_flow(data) + if check_points(self.block_point, end_points): + return data, self.short_cuts + + data = fluid.layers.reduce_mean(data, [2, 3], keep_dim=True) + data = fluid.layers.dropout(data, 0.5) + stdv = 1.0 / math.sqrt(data.shape[1] * 1.0) + with scope("logit"): + out = fluid.layers.fc( + input=data, + size=num_classes, + act='softmax', + param_attr=fluid.param_attr.ParamAttr( + name='weights', + initializer=fluid.initializer.Uniform(-stdv, stdv)), + bias_attr=fluid.param_attr.ParamAttr(name='bias')) + + return out + + def entry_flow(self, data): + param_attr = fluid.ParamAttr( + name=name_scope + 'weights', + regularizer=None, + initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.09)) + with scope("entry_flow"): + with scope("conv1"): + data = bn_relu( + conv( + data, 32, 3, stride=2, padding=1, + param_attr=param_attr)) + with scope("conv2"): + data = bn_relu( + conv( + data, 64, 3, stride=1, padding=1, + param_attr=param_attr)) + + # get entry flow params + block_num = self.bottleneck_params["entry_flow"][0] + strides = self.bottleneck_params["entry_flow"][1] + chns = self.bottleneck_params["entry_flow"][2] + strides = check_data(strides, block_num) + chns = check_data(chns, block_num) + + # params to control your flow + s = self.stride + block_point = self.block_point + output_stride = self.output_stride + with scope("entry_flow"): + for i in range(block_num): + block_point = block_point + 1 + with scope("block" + str(i + 1)): + stride = strides[i] if check_stride(s * strides[i], + output_stride) else 1 + data, short_cuts = self.xception_block( + data, chns[i], [1, 1, stride]) + s = s * stride + if check_points(block_point, self.decode_points): + self.short_cuts[block_point] = short_cuts[1] + + self.stride = s + self.block_point = block_point + return data + + def middle_flow(self, data): + block_num = self.bottleneck_params["middle_flow"][0] + strides = self.bottleneck_params["middle_flow"][1] + chns = self.bottleneck_params["middle_flow"][2] + strides = check_data(strides, block_num) + chns = check_data(chns, block_num) + + # params to control your flow + s = self.stride + block_point = self.block_point + output_stride = self.output_stride + with scope("middle_flow"): + for i in range(block_num): + block_point = block_point + 1 + with scope("block" + str(i + 1)): + stride = strides[i] if check_stride(s * strides[i], + output_stride) else 1 + data, short_cuts = self.xception_block( + data, chns[i], [1, 1, strides[i]], skip_conv=False) + s = s * stride + if check_points(block_point, self.decode_points): + self.short_cuts[block_point] = short_cuts[1] + + self.stride = s + self.block_point = block_point + return data + + def exit_flow(self, data): + block_num = self.bottleneck_params["exit_flow"][0] + strides = self.bottleneck_params["exit_flow"][1] + chns = self.bottleneck_params["exit_flow"][2] + strides = check_data(strides, block_num) + chns = check_data(chns, block_num) + + assert (block_num == 2) + # params to control your flow + s = self.stride + block_point = self.block_point + output_stride = self.output_stride + with scope("exit_flow"): + with scope('block1'): + block_point += 1 + stride = strides[0] if check_stride(s * strides[0], + output_stride) else 1 + data, short_cuts = self.xception_block(data, chns[0], + [1, 1, stride]) + s = s * stride + if check_points(block_point, self.decode_points): + self.short_cuts[block_point] = short_cuts[1] + with scope('block2'): + block_point += 1 + stride = strides[1] if check_stride(s * strides[1], + output_stride) else 1 + data, short_cuts = self.xception_block( + data, + chns[1], [1, 1, stride], + dilation=2, + has_skip=False, + activation_fn_in_separable_conv=True) + s = s * stride + if check_points(block_point, self.decode_points): + self.short_cuts[block_point] = short_cuts[1] + + self.stride = s + self.block_point = block_point + return data + + def xception_block(self, + input, + channels, + strides=1, + filters=3, + dilation=1, + skip_conv=True, + has_skip=True, + activation_fn_in_separable_conv=False): + repeat_number = 3 + channels = check_data(channels, repeat_number) + filters = check_data(filters, repeat_number) + strides = check_data(strides, repeat_number) + data = input + results = [] + for i in range(repeat_number): + with scope('separable_conv' + str(i + 1)): + if not activation_fn_in_separable_conv: + data = relu(data) + data = separate_conv( + data, + channels[i], + strides[i], + filters[i], + dilation=dilation) + else: + data = separate_conv( + data, + channels[i], + strides[i], + filters[i], + dilation=dilation, + act=relu) + results.append(data) + if not has_skip: + return data, results + if skip_conv: + param_attr = fluid.ParamAttr( + name=name_scope + 'weights', + regularizer=None, + initializer=fluid.initializer.TruncatedNormal( + loc=0.0, scale=0.09)) + with scope('shortcut'): + skip = bn( + conv( + input, + channels[-1], + 1, + strides[-1], + groups=1, + padding=0, + param_attr=param_attr)) + else: + skip = input + return data + skip, results + + +def xception_65(): + model = Xception("xception_65") + return model + + +def xception_41(): + model = Xception("xception_41") + return model + + +def xception_71(): + model = Xception("xception_71") + return model + + +if __name__ == '__main__': + image_shape = [-1, 3, 224, 224] + image = fluid.data(name='image', shape=image_shape, dtype='float32') + model = xception_65() + logit = model.net(image) diff --git a/legacy/pdseg/models/model_builder.py b/legacy/pdseg/models/model_builder.py index 4f999b1d79..cde61e1e4b 100644 --- a/legacy/pdseg/models/model_builder.py +++ b/legacy/pdseg/models/model_builder.py @@ -24,7 +24,10 @@ import solver from utils.config import cfg from loss import multi_softmax_with_loss -from models.modeling import deeplab, hrnet +from loss import multi_dice_loss +from loss import multi_bce_loss +from lovasz_losses import multi_lovasz_hinge_loss, multi_lovasz_softmax_loss +from models.modeling import deeplab, unet, icnet, pspnet, hrnet, fast_scnn, ocrnet class ModelPhase(object): @@ -71,10 +74,20 @@ def is_valid_phase(phase): def seg_model(image, class_num): model_name = cfg.MODEL.MODEL_NAME - if model_name == 'deeplabv3p': + if model_name == 'unet': + logits = unet.unet(image, class_num) + elif model_name == 'deeplabv3p': logits = deeplab.deeplabv3p(image, class_num) + elif model_name == 'icnet': + logits = icnet.icnet(image, class_num) + elif model_name == 'pspnet': + logits = pspnet.pspnet(image, class_num) elif model_name == 'hrnet': logits = hrnet.hrnet(image, class_num) + elif model_name == 'fast_scnn': + logits = fast_scnn.fast_scnn(image, class_num) + elif model_name == 'ocrnet': + logits = ocrnet.ocrnet(image, class_num) else: raise Exception( "unknow model name, only support unet, deeplabv3p, icnet, pspnet, hrnet, fast_scnn" @@ -169,6 +182,24 @@ def build_model(main_prog, start_prog, phase=ModelPhase.TRAIN): weight)) loss_valid = True valid_loss.append("softmax_loss") + if "dice_loss" in loss_type: + avg_loss_list.append(multi_dice_loss(logits, label, mask)) + loss_valid = True + valid_loss.append("dice_loss") + if "bce_loss" in loss_type: + avg_loss_list.append(multi_bce_loss(logits, label, mask)) + loss_valid = True + valid_loss.append("bce_loss") + if "lovasz_hinge_loss" in loss_type: + avg_loss_list.append( + multi_lovasz_hinge_loss(logits, label, mask)) + loss_valid = True + valid_loss.append("lovasz_hinge_loss") + if "lovasz_softmax_loss" in loss_type: + avg_loss_list.append( + multi_lovasz_softmax_loss(logits, label, mask)) + loss_valid = True + valid_loss.append("lovasz_softmax_loss") if not loss_valid: raise Exception( "SOLVER.LOSS: {} is set wrong. it should " diff --git a/legacy/pdseg/models/modeling/deeplab.py b/legacy/pdseg/models/modeling/deeplab.py index b98f3b1fe8..baae59d0bc 100644 --- a/legacy/pdseg/models/modeling/deeplab.py +++ b/legacy/pdseg/models/modeling/deeplab.py @@ -26,6 +26,9 @@ from models.libs.model_libs import bn, bn_relu, relu, qsigmoid from models.libs.model_libs import conv from models.libs.model_libs import separate_conv +from models.backbone.mobilenet_v2 import MobileNetV2 as mobilenet_v2_backbone +from models.backbone.mobilenet_v3 import MobileNetV3 as mobilenet_v3_backbone +from models.backbone.xception import Xception as xception_backbone from models.backbone.resnet_vd import ResNet as resnet_vd_backbone @@ -273,6 +276,78 @@ def decoder(encode_data, decode_shortcut): return _decoder_with_concat(encode_data, decode_shortcut, param_attr) +def mobilenet(input): + if 'v3' in cfg.MODEL.DEEPLAB.BACKBONE: + model_name = 'large' if 'large' in cfg.MODEL.DEEPLAB.BACKBONE else 'small' + return _mobilenetv3(input, model_name) + return _mobilenetv2(input) + + +def _mobilenetv3(input, model_name='large'): + # Backbone: mobilenetv3结构配置 + # DEPTH_MULTIPLIER: mobilenetv3的scale设置,默认1.0 + # OUTPUT_STRIDE:下采样倍数 + scale = cfg.MODEL.DEEPLAB.DEPTH_MULTIPLIER + output_stride = cfg.MODEL.DEEPLAB.OUTPUT_STRIDE + lr_mult_list = cfg.MODEL.DEEPLAB.BACKBONE_LR_MULT_LIST + if lr_mult_list is None: + lr_mult_list = [1.0, 1.0, 1.0, 1.0, 1.0] + model = mobilenet_v3_backbone( + scale=scale, + output_stride=output_stride, + model_name=model_name, + lr_mult_list=lr_mult_list) + data, decode_shortcut = model.net(input) + return data, decode_shortcut + + +def _mobilenetv2(input): + # Backbone: mobilenetv2结构配置 + # DEPTH_MULTIPLIER: mobilenetv2的scale设置,默认1.0 + # OUTPUT_STRIDE:下采样倍数 + # end_points: mobilenetv2的block数 + # decode_point: 从mobilenetv2中引出分支所在block数, 作为decoder输入 + if cfg.MODEL.DEEPLAB.BACKBONE_LR_MULT_LIST is not None: + print( + 'mobilenetv2 backbone do not support BACKBONE_LR_MULT_LIST setting') + + scale = cfg.MODEL.DEEPLAB.DEPTH_MULTIPLIER + output_stride = cfg.MODEL.DEEPLAB.OUTPUT_STRIDE + model = mobilenet_v2_backbone(scale=scale, output_stride=output_stride) + end_points = 18 + decode_point = 4 + data, decode_shortcuts = model.net( + input, end_points=end_points, decode_points=decode_point) + decode_shortcut = decode_shortcuts[decode_point] + return data, decode_shortcut + + +def xception(input): + # Backbone: Xception结构配置, xception_65, xception_41, xception_71三种可选 + # decode_point: 从Xception中引出分支所在block数,作为decoder输入 + # end_point:Xception的block数 + cfg.MODEL.DEFAULT_EPSILON = 1e-3 + model = xception_backbone(cfg.MODEL.DEEPLAB.BACKBONE) + backbone = cfg.MODEL.DEEPLAB.BACKBONE + output_stride = cfg.MODEL.DEEPLAB.OUTPUT_STRIDE + if '65' in backbone: + decode_point = 2 + end_points = 21 + if '41' in backbone: + decode_point = 2 + end_points = 13 + if '71' in backbone: + decode_point = 3 + end_points = 23 + data, decode_shortcuts = model.net( + input, + output_stride=output_stride, + end_points=end_points, + decode_points=decode_point) + decode_shortcut = decode_shortcuts[decode_point] + return data, decode_shortcut + + def resnet_vd(input): # backbone: resnet_vd, 可选resnet50_vd, resnet101_vd # end_points: resnet终止层数 @@ -310,10 +385,19 @@ def resnet_vd(input): def deeplabv3p(img, num_classes): # Backbone设置:xception 或 mobilenetv2 - if 'resnet' in cfg.MODEL.DEEPLAB.BACKBONE: + if 'xception' in cfg.MODEL.DEEPLAB.BACKBONE: + data, decode_shortcut = xception(img) + if cfg.MODEL.DEEPLAB.BACKBONE_LR_MULT_LIST is not None: + print( + 'xception backbone do not support BACKBONE_LR_MULT_LIST setting' + ) + elif 'mobilenet' in cfg.MODEL.DEEPLAB.BACKBONE: + data, decode_shortcut = mobilenet(img) + elif 'resnet' in cfg.MODEL.DEEPLAB.BACKBONE: data, decode_shortcut = resnet_vd(img) else: - raise Exception("deeplab only support resnet_vd backbone") + raise Exception( + "deeplab only support xception, mobilenet, and resnet_vd backbone") # 编码器解码器设置 cfg.MODEL.DEFAULT_EPSILON = 1e-5 diff --git a/legacy/pdseg/models/modeling/fast_scnn.py b/legacy/pdseg/models/modeling/fast_scnn.py new file mode 100644 index 0000000000..99019dc6e0 --- /dev/null +++ b/legacy/pdseg/models/modeling/fast_scnn.py @@ -0,0 +1,304 @@ +# coding: utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid as fluid +from models.libs.model_libs import scope +from models.libs.model_libs import bn, bn_relu, relu, conv_bn_layer +from models.libs.model_libs import conv, avg_pool +from models.libs.model_libs import separate_conv +from utils.config import cfg + + +def learning_to_downsample(x, dw_channels1=32, dw_channels2=48, + out_channels=64): + x = relu(bn(conv(x, dw_channels1, 3, 2))) + with scope('dsconv1'): + x = separate_conv( + x, dw_channels2, stride=2, filter=3, act=fluid.layers.relu) + with scope('dsconv2'): + x = separate_conv( + x, out_channels, stride=2, filter=3, act=fluid.layers.relu) + return x + + +def shortcut(input, data_residual): + return fluid.layers.elementwise_add(input, data_residual) + + +def dropout2d(input, prob, is_train=False): + if not is_train: + return input + channels = input.shape[1] + keep_prob = 1.0 - prob + shape = fluid.layers.shape(input) + random_tensor = keep_prob + fluid.layers.uniform_random( + [shape[0], channels, 1, 1], min=0., max=1.) + binary_tensor = fluid.layers.floor(random_tensor) + output = input / keep_prob * binary_tensor + return output + + +def inverted_residual_unit(input, + num_in_filter, + num_filters, + ifshortcut, + stride, + filter_size, + padding, + expansion_factor, + name=None): + num_expfilter = int(round(num_in_filter * expansion_factor)) + + channel_expand = conv_bn_layer( + input=input, + num_filters=num_expfilter, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=True, + name=name + '_expand') + + bottleneck_conv = conv_bn_layer( + input=channel_expand, + num_filters=num_expfilter, + filter_size=filter_size, + stride=stride, + padding=padding, + num_groups=num_expfilter, + if_act=True, + name=name + '_dwise', + use_cudnn=False) + + depthwise_output = bottleneck_conv + + linear_out = conv_bn_layer( + input=bottleneck_conv, + num_filters=num_filters, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + if_act=False, + name=name + '_linear') + + if ifshortcut: + out = shortcut(input=input, data_residual=linear_out) + return out, depthwise_output + else: + return linear_out, depthwise_output + + +def inverted_blocks(input, in_c, t, c, n, s, name=None): + first_block, depthwise_output = inverted_residual_unit( + input=input, + num_in_filter=in_c, + num_filters=c, + ifshortcut=False, + stride=s, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + '_1') + + last_residual_block = first_block + last_c = c + + for i in range(1, n): + last_residual_block, depthwise_output = inverted_residual_unit( + input=last_residual_block, + num_in_filter=last_c, + num_filters=c, + ifshortcut=True, + stride=1, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + '_' + str(i + 1)) + return last_residual_block, depthwise_output + + +def psp_module(input, out_features): + + cat_layers = [] + sizes = (1, 2, 3, 6) + for size in sizes: + psp_name = "psp" + str(size) + with scope(psp_name): + pool = fluid.layers.adaptive_pool2d( + input, + pool_size=[size, size], + pool_type='avg', + name=psp_name + '_adapool') + data = conv( + pool, + out_features, + filter_size=1, + bias_attr=False, + name=psp_name + '_conv') + data_bn = bn(data, act='relu') + interp = fluid.layers.resize_bilinear( + data_bn, + out_shape=input.shape[2:], + name=psp_name + '_interp', + align_mode=0) + cat_layers.append(interp) + cat_layers = [input] + cat_layers + out = fluid.layers.concat(cat_layers, axis=1, name='psp_cat') + + return out + + +class FeatureFusionModule: + """Feature fusion module""" + + def __init__(self, + higher_in_channels, + lower_in_channels, + out_channels, + scale_factor=4): + self.higher_in_channels = higher_in_channels + self.lower_in_channels = lower_in_channels + self.out_channels = out_channels + self.scale_factor = scale_factor + + def net(self, higher_res_feature, lower_res_feature): + h, w = higher_res_feature.shape[2:] + lower_res_feature = fluid.layers.resize_bilinear( + lower_res_feature, [h, w], align_mode=0) + + with scope('dwconv'): + lower_res_feature = relu( + bn(conv(lower_res_feature, self.out_channels, + 1))) #(lower_res_feature) + with scope('conv_lower_res'): + lower_res_feature = bn( + conv(lower_res_feature, self.out_channels, 1, bias_attr=True)) + with scope('conv_higher_res'): + higher_res_feature = bn( + conv(higher_res_feature, self.out_channels, 1, bias_attr=True)) + out = higher_res_feature + lower_res_feature + + return relu(out) + + +class GlobalFeatureExtractor(): + """Global feature extractor module""" + + def __init__(self, + in_channels=64, + block_channels=(64, 96, 128), + out_channels=128, + t=6, + num_blocks=(3, 3, 3)): + self.in_channels = in_channels + self.block_channels = block_channels + self.out_channels = out_channels + self.t = t + self.num_blocks = num_blocks + + def net(self, x): + x, _ = inverted_blocks(x, self.in_channels, self.t, + self.block_channels[0], self.num_blocks[0], 2, + 'inverted_block_1') + x, _ = inverted_blocks(x, self.block_channels[0], self.t, + self.block_channels[1], self.num_blocks[1], 2, + 'inverted_block_2') + x, _ = inverted_blocks(x, self.block_channels[1], self.t, + self.block_channels[2], self.num_blocks[2], 1, + 'inverted_block_3') + x = psp_module(x, self.block_channels[2] // 4) + with scope('out'): + x = relu(bn(conv(x, self.out_channels, 1))) + return x + + +class Classifier: + """Classifier""" + + def __init__(self, dw_channels, num_classes, stride=1): + self.dw_channels = dw_channels + self.num_classes = num_classes + self.stride = stride + + def net(self, x): + with scope('dsconv1'): + x = separate_conv( + x, + self.dw_channels, + stride=self.stride, + filter=3, + act=fluid.layers.relu) + with scope('dsconv2'): + x = separate_conv( + x, + self.dw_channels, + stride=self.stride, + filter=3, + act=fluid.layers.relu) + + x = dropout2d(x, 0.1, is_train=cfg.PHASE == 'train') + x = conv(x, self.num_classes, 1, bias_attr=True) + return x + + +def aux_layer(x, num_classes): + x = relu(bn(conv(x, 32, 3, padding=1))) + x = dropout2d(x, 0.1, is_train=(cfg.PHASE == 'train')) + with scope('logit'): + x = conv(x, num_classes, 1, bias_attr=True) + return x + + +def fast_scnn(img, num_classes): + size = img.shape[2:] + classifier = Classifier(128, num_classes) + + global_feature_extractor = GlobalFeatureExtractor(64, [64, 96, 128], 128, 6, + [3, 3, 3]) + feature_fusion = FeatureFusionModule(64, 128, 128) + + with scope('learning_to_downsample'): + higher_res_features = learning_to_downsample(img, 32, 48, 64) + with scope('global_feature_extractor'): + lower_res_feature = global_feature_extractor.net(higher_res_features) + with scope('feature_fusion'): + x = feature_fusion.net(higher_res_features, lower_res_feature) + with scope('classifier'): + logit = classifier.net(x) + logit = fluid.layers.resize_bilinear(logit, size, align_mode=0) + + if len(cfg.MODEL.MULTI_LOSS_WEIGHT) == 3: + with scope('aux_layer_higher'): + higher_logit = aux_layer(higher_res_features, num_classes) + higher_logit = fluid.layers.resize_bilinear( + higher_logit, size, align_mode=0) + with scope('aux_layer_lower'): + lower_logit = aux_layer(lower_res_feature, num_classes) + lower_logit = fluid.layers.resize_bilinear( + lower_logit, size, align_mode=0) + return logit, higher_logit, lower_logit + elif len(cfg.MODEL.MULTI_LOSS_WEIGHT) == 2: + with scope('aux_layer_higher'): + higher_logit = aux_layer(higher_res_features, num_classes) + higher_logit = fluid.layers.resize_bilinear( + higher_logit, size, align_mode=0) + return logit, higher_logit + + return logit diff --git a/legacy/pdseg/models/modeling/icnet.py b/legacy/pdseg/models/modeling/icnet.py new file mode 100644 index 0000000000..2f4a393e4d --- /dev/null +++ b/legacy/pdseg/models/modeling/icnet.py @@ -0,0 +1,206 @@ +# coding: utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid as fluid +from utils.config import cfg +from models.libs.model_libs import scope +from models.libs.model_libs import bn, avg_pool, conv +from models.backbone.resnet import ResNet as resnet_backbone +import numpy as np + + +def interp(input, out_shape): + out_shape = list(out_shape.astype("int32")) + return fluid.layers.resize_bilinear(input, out_shape=out_shape) + + +def pyramis_pooling(input, input_shape): + shape = np.ceil(input_shape / 32).astype("int32") + h, w = shape + pool1 = avg_pool(input, [h, w], [h, w]) + pool1_interp = interp(pool1, shape) + pool2 = avg_pool(input, [h // 2, w // 2], [h // 2, w // 2]) + pool3 = avg_pool(input, [h // 3, w // 3], [h // 3, w // 3]) + pool4 = avg_pool(input, [h // 4, w // 4], [h // 4, w // 4]) + # official caffe repo eval use following hyparam + # pool2 = avg_pool(input, [17, 33], [16, 32]) + # pool3 = avg_pool(input, [13, 25], [10, 20]) + # pool4 = avg_pool(input, [8, 15], [5, 10]) + pool2_interp = interp(pool2, shape) + pool3_interp = interp(pool3, shape) + pool4_interp = interp(pool4, shape) + conv5_3_sum = input + pool4_interp + pool3_interp + pool2_interp + pool1_interp + return conv5_3_sum + + +def zero_padding(input, padding): + return fluid.layers.pad(input, + [0, 0, 0, 0, padding, padding, padding, padding]) + + +def sub_net_4(input, input_shape): + tmp = pyramis_pooling(input, input_shape) + with scope("conv5_4_k1"): + tmp = conv(tmp, 256, 1, 1) + tmp = bn(tmp, act='relu') + tmp = interp(tmp, out_shape=np.ceil(input_shape / 16)) + return tmp + + +def sub_net_2(input): + with scope("conv3_1_sub2_proj"): + tmp = conv(input, 128, 1, 1) + tmp = bn(tmp) + return tmp + + +def sub_net_1(input): + with scope("conv1_sub1"): + tmp = conv(input, 32, 3, 2, padding=1) + tmp = bn(tmp, act='relu') + with scope("conv2_sub1"): + tmp = conv(tmp, 32, 3, 2, padding=1) + tmp = bn(tmp, act='relu') + with scope("conv3_sub1"): + tmp = conv(tmp, 64, 3, 2, padding=1) + tmp = bn(tmp, act='relu') + with scope("conv3_sub1_proj"): + tmp = conv(tmp, 128, 1, 1) + tmp = bn(tmp) + return tmp + + +def CCF24(sub2_out, sub4_out, input_shape): + with scope("conv_sub4"): + tmp = conv(sub4_out, 128, 3, dilation=2, padding=2) + tmp = bn(tmp) + tmp = tmp + sub2_out + tmp = fluid.layers.relu(tmp) + tmp = interp(tmp, np.ceil(input_shape / 8)) + return tmp + + +def CCF124(sub1_out, sub24_out, input_shape): + tmp = zero_padding(sub24_out, padding=2) + with scope("conv_sub2"): + tmp = conv(tmp, 128, 3, dilation=2) + tmp = bn(tmp) + tmp = tmp + sub1_out + tmp = fluid.layers.relu(tmp) + tmp = interp(tmp, input_shape // 4) + return tmp + + +def resnet(input): + # ICNET backbone: resnet, 默认resnet50 + # end_points: resnet终止层数 + # decode_point: backbone引出分支所在层数 + # resize_point:backbone所在的该层卷积尺寸缩小至1/2 + # dilation_dict: resnet block数及对应的膨胀卷积尺度 + scale = cfg.MODEL.ICNET.DEPTH_MULTIPLIER + layers = cfg.MODEL.ICNET.LAYERS + model = resnet_backbone(scale=scale, layers=layers, stem='icnet') + if layers >= 50: + end_points = layers - 1 + decode_point = 13 + resize_point = 13 + elif layers == 18: + end_points = 13 + decode_point = 9 + resize_point = 9 + elif layers == 34: + end_points = 27 + decode_point = 15 + resize_point = 15 + dilation_dict = {2: 2, 3: 4} + data, decode_shortcuts = model.net( + input, + end_points=end_points, + decode_points=decode_point, + resize_points=resize_point, + dilation_dict=dilation_dict) + return data, decode_shortcuts[decode_point] + + +def encoder(data13, data49, input, input_shape): + # ICENT encoder配置 + # sub_net_4:对resnet49层数据进行pyramis_pooling操作 + # sub_net_2:对resnet13层数据进行卷积操作 + # sub_net_1: 对原始尺寸图像进行3次下采样卷积操作 + sub4_out = sub_net_4(data49, input_shape) + sub2_out = sub_net_2(data13) + sub1_out = sub_net_1(input) + return sub1_out, sub2_out, sub4_out + + +def decoder(sub1_out, sub2_out, sub4_out, input_shape): + # ICENT decoder配置 + # CCF: Cascade Feature Fusion 级联特征融合 + sub24_out = CCF24(sub2_out, sub4_out, input_shape) + sub124_out = CCF124(sub1_out, sub24_out, input_shape) + return sub24_out, sub124_out + + +def get_logit(data, num_classes, name="logit"): + param_attr = fluid.ParamAttr( + name=name + 'weights', + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0), + initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.01)) + + with scope(name): + data = conv( + data, + num_classes, + 1, + stride=1, + padding=0, + param_attr=param_attr, + bias_attr=True) + return data + + +def icnet(input, num_classes): + # Backbone resnet: 输入 image_sub2: 图片尺寸缩小至1/2 + # 输出 data49: resnet第49层数据,原始尺寸1/32 + # data13:resnet第13层数据, 原始尺寸1/16 + input_shape = input.shape[2:] + input_shape = np.array(input_shape).astype("float32") + image_sub2 = interp(input, out_shape=np.ceil(input_shape * 0.5)) + data49, data13 = resnet(image_sub2) + + # encoder:输入:input, data13, data49,分别进行下采样,卷积和金字塔pooling操作 + # 输出:分别对应sub1_out, sub2_out, sub4_out + sub1_out, sub2_out, sub4_out = encoder(data13, data49, input, input_shape) + + # decoder: 对编码器三个分支结果进行级联特征融合 + sub24_out, sub124_out = decoder(sub1_out, sub2_out, sub4_out, input_shape) + + # get_logit: 根据类别数决定最后一层卷积输出 + logit124 = get_logit(sub124_out, num_classes, "logit124") + logit4 = get_logit(sub4_out, num_classes, "logit4") + logit24 = get_logit(sub24_out, num_classes, "logit24") + return logit124, logit24, logit4 + + +if __name__ == '__main__': + image_shape = [-1, 3, 320, 320] + image = fluid.data(name='image', shape=image_shape, dtype='float32') + logit = icnet(image, 4) + print("logit:", logit.shape) diff --git a/legacy/pdseg/models/modeling/ocrnet.py b/legacy/pdseg/models/modeling/ocrnet.py new file mode 100644 index 0000000000..8ab8925eb5 --- /dev/null +++ b/legacy/pdseg/models/modeling/ocrnet.py @@ -0,0 +1,493 @@ +# coding: utf8 +# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +from paddle.fluid.initializer import MSRA +from paddle.fluid.param_attr import ParamAttr + +from utils.config import cfg + + +def conv_bn_layer(input, + filter_size, + num_filters, + stride=1, + padding=1, + num_groups=1, + if_act=True, + name=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=num_groups, + act=None, + # param_attr=ParamAttr(initializer=MSRA(), learning_rate=1.0, name=name + '_weights'), + param_attr=ParamAttr( + initializer=fluid.initializer.Normal(scale=0.001), + learning_rate=1.0, + name=name + '_weights'), + bias_attr=False) + bn_name = name + '_bn' + bn = fluid.layers.batch_norm( + input=conv, + param_attr=ParamAttr( + name=bn_name + "_scale", + initializer=fluid.initializer.Constant(1.0)), + bias_attr=ParamAttr( + name=bn_name + "_offset", + initializer=fluid.initializer.Constant(0.0)), + moving_mean_name=bn_name + '_mean', + moving_variance_name=bn_name + '_variance') + if if_act: + bn = fluid.layers.relu(bn) + return bn + + +def basic_block(input, num_filters, stride=1, downsample=False, name=None): + residual = input + conv = conv_bn_layer( + input=input, + filter_size=3, + num_filters=num_filters, + stride=stride, + name=name + '_conv1') + conv = conv_bn_layer( + input=conv, + filter_size=3, + num_filters=num_filters, + if_act=False, + name=name + '_conv2') + if downsample: + residual = conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_filters, + if_act=False, + name=name + '_downsample') + return fluid.layers.elementwise_add(x=residual, y=conv, act='relu') + + +def bottleneck_block(input, num_filters, stride=1, downsample=False, name=None): + residual = input + conv = conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_filters, + name=name + '_conv1') + conv = conv_bn_layer( + input=conv, + filter_size=3, + num_filters=num_filters, + stride=stride, + name=name + '_conv2') + conv = conv_bn_layer( + input=conv, + filter_size=1, + num_filters=num_filters * 4, + if_act=False, + name=name + '_conv3') + if downsample: + residual = conv_bn_layer( + input=input, + filter_size=1, + num_filters=num_filters * 4, + if_act=False, + name=name + '_downsample') + return fluid.layers.elementwise_add(x=residual, y=conv, act='relu') + + +def fuse_layers(x, channels, multi_scale_output=True, name=None): + out = [] + for i in range(len(channels) if multi_scale_output else 1): + residual = x[i] + shape = residual.shape + width = shape[-1] + height = shape[-2] + for j in range(len(channels)): + if j > i: + y = conv_bn_layer( + x[j], + filter_size=1, + num_filters=channels[i], + if_act=False, + name=name + '_layer_' + str(i + 1) + '_' + str(j + 1)) + y = fluid.layers.resize_bilinear( + input=y, out_shape=[height, width]) + residual = fluid.layers.elementwise_add( + x=residual, y=y, act=None) + elif j < i: + y = x[j] + for k in range(i - j): + if k == i - j - 1: + y = conv_bn_layer( + y, + filter_size=3, + num_filters=channels[i], + stride=2, + if_act=False, + name=name + '_layer_' + str(i + 1) + '_' + + str(j + 1) + '_' + str(k + 1)) + else: + y = conv_bn_layer( + y, + filter_size=3, + num_filters=channels[j], + stride=2, + name=name + '_layer_' + str(i + 1) + '_' + + str(j + 1) + '_' + str(k + 1)) + residual = fluid.layers.elementwise_add( + x=residual, y=y, act=None) + + residual = fluid.layers.relu(residual) + out.append(residual) + return out + + +def branches(x, block_num, channels, name=None): + out = [] + for i in range(len(channels)): + residual = x[i] + for j in range(block_num): + residual = basic_block( + residual, + channels[i], + name=name + '_branch_layer_' + str(i + 1) + '_' + str(j + 1)) + out.append(residual) + return out + + +def high_resolution_module(x, channels, multi_scale_output=True, name=None): + residual = branches(x, 4, channels, name=name) + out = fuse_layers( + residual, channels, multi_scale_output=multi_scale_output, name=name) + return out + + +def transition_layer(x, in_channels, out_channels, name=None): + num_in = len(in_channels) + num_out = len(out_channels) + out = [] + for i in range(num_out): + if i < num_in: + if in_channels[i] != out_channels[i]: + residual = conv_bn_layer( + x[i], + filter_size=3, + num_filters=out_channels[i], + name=name + '_layer_' + str(i + 1)) + out.append(residual) + else: + out.append(x[i]) + else: + residual = conv_bn_layer( + x[-1], + filter_size=3, + num_filters=out_channels[i], + stride=2, + name=name + '_layer_' + str(i + 1)) + out.append(residual) + return out + + +def stage(x, num_modules, channels, multi_scale_output=True, name=None): + out = x + for i in range(num_modules): + if i == num_modules - 1 and multi_scale_output == False: + out = high_resolution_module( + out, + channels, + multi_scale_output=False, + name=name + '_' + str(i + 1)) + else: + out = high_resolution_module( + out, channels, name=name + '_' + str(i + 1)) + + return out + + +def layer1(input, name=None): + conv = input + for i in range(4): + conv = bottleneck_block( + conv, + num_filters=64, + downsample=True if i == 0 else False, + name=name + '_' + str(i + 1)) + return conv + + +def aux_head(input, last_inp_channels, num_classes): + x = conv_bn_layer( + input=input, + filter_size=1, + num_filters=last_inp_channels, + stride=1, + padding=0, + name='aux_head_conv1') + x = fluid.layers.conv2d( + input=x, + num_filters=num_classes, + filter_size=1, + stride=1, + padding=0, + act=None, + # param_attr=ParamAttr(initializer=MSRA(), learning_rate=1.0, name='aux_head_conv2_weights'), + param_attr=ParamAttr( + initializer=fluid.initializer.Normal(scale=0.001), + learning_rate=1.0, + name='aux_head_conv2_weights'), + bias_attr=ParamAttr( + initializer=fluid.initializer.Constant(0.0), + name="aux_head_conv2_bias")) + return x + + +def conv3x3_ocr(input, ocr_mid_channels): + x = conv_bn_layer( + input=input, + filter_size=3, + num_filters=ocr_mid_channels, + stride=1, + padding=1, + name='conv3x3_ocr') + return x + + +def f_pixel(input, key_channels): + x = conv_bn_layer( + input=input, + filter_size=1, + num_filters=key_channels, + stride=1, + padding=0, + name='f_pixel_conv1') + x = conv_bn_layer( + input=x, + filter_size=1, + num_filters=key_channels, + stride=1, + padding=0, + name='f_pixel_conv2') + return x + + +def f_object(input, key_channels): + x = conv_bn_layer( + input=input, + filter_size=1, + num_filters=key_channels, + stride=1, + padding=0, + name='f_object_conv1') + x = conv_bn_layer( + input=x, + filter_size=1, + num_filters=key_channels, + stride=1, + padding=0, + name='f_object_conv2') + return x + + +def f_down(input, key_channels): + x = conv_bn_layer( + input=input, + filter_size=1, + num_filters=key_channels, + stride=1, + padding=0, + name='f_down_conv') + return x + + +def f_up(input, in_channels): + x = conv_bn_layer( + input=input, + filter_size=1, + num_filters=in_channels, + stride=1, + padding=0, + name='f_up_conv') + return x + + +def object_context_block(x, proxy, in_channels, key_channels, scale): + batch_size, _, h, w = x.shape + if scale > 1: + x = fluid.layers.pool2d(x, pool_size=[scale, scale], pool_type='max') + + query = f_pixel(x, key_channels) + query = fluid.layers.reshape( + query, + shape=[batch_size, key_channels, query.shape[2] * query.shape[3]]) + query = fluid.layers.transpose(query, perm=[0, 2, 1]) + + key = f_object(proxy, key_channels) + key = fluid.layers.reshape( + key, shape=[batch_size, key_channels, key.shape[2] * key.shape[3]]) + + value = f_down(proxy, key_channels) + value = fluid.layers.reshape( + value, + shape=[batch_size, key_channels, value.shape[2] * value.shape[3]]) + value = fluid.layers.transpose(value, perm=[0, 2, 1]) + + sim_map = fluid.layers.matmul(query, key) + sim_map = (key_channels**-.5) * sim_map + sim_map = fluid.layers.softmax(sim_map, axis=-1) + + context = fluid.layers.matmul(sim_map, value) + context = fluid.layers.transpose(context, perm=[0, 2, 1]) + context = fluid.layers.reshape( + context, shape=[batch_size, key_channels, x.shape[2], x.shape[3]]) + context = f_up(context, in_channels) + + if scale > 1: + context = fluid.layers.resize_bilinear(context, out_shape=[h, w]) + + return context + + +def ocr_gather_head(feats, probs, scale=1): + feats = fluid.layers.reshape( + feats, + shape=[feats.shape[0], feats.shape[1], feats.shape[2] * feats.shape[3]]) + feats = fluid.layers.transpose(feats, perm=[0, 2, 1]) + probs = fluid.layers.reshape( + probs, + shape=[probs.shape[0], probs.shape[1], probs.shape[2] * probs.shape[3]]) + probs = fluid.layers.softmax(scale * probs, axis=2) + ocr_context = fluid.layers.matmul(probs, feats) + ocr_context = fluid.layers.transpose(ocr_context, perm=[0, 2, 1]) + ocr_context = fluid.layers.unsqueeze(ocr_context, axes=[3]) + return ocr_context + + +def ocr_distri_head(feats, + proxy_feats, + ocr_mid_channels, + ocr_key_channels, + scale=1, + dropout=0.05): + context = object_context_block(feats, proxy_feats, ocr_mid_channels, + ocr_key_channels, scale) + x = fluid.layers.concat([context, feats], axis=1) + x = conv_bn_layer( + input=x, + filter_size=1, + num_filters=ocr_mid_channels, + stride=1, + padding=0, + name='spatial_ocr_conv') + x = fluid.layers.dropout(x, dropout_prob=dropout) + return x + + +def cls_head(input, num_classes): + x = fluid.layers.conv2d( + input=input, + num_filters=num_classes, + filter_size=1, + stride=1, + padding=0, + act=None, + # param_attr=ParamAttr(initializer=MSRA(), learning_rate=1.0, name='cls_head_conv_weights'), + param_attr=ParamAttr( + initializer=fluid.initializer.Normal(scale=0.001), + learning_rate=1.0, + name='cls_head_conv_weights'), + bias_attr=ParamAttr( + initializer=fluid.initializer.Constant(0.0), + name="cls_head_conv_bias")) + return x + + +def ocr_module(input, last_inp_channels, num_classes, ocr_mid_channels, + ocr_key_channels): + out_aux = aux_head(input, last_inp_channels, num_classes) + feats = conv3x3_ocr(input, ocr_mid_channels) + context = ocr_gather_head(feats, out_aux) + feats = ocr_distri_head(feats, context, ocr_mid_channels, ocr_key_channels) + out = cls_head(feats, num_classes) + return out, out_aux + + +def high_resolution_ocr_net(input, num_classes): + + channels_2 = cfg.MODEL.HRNET.STAGE2.NUM_CHANNELS + channels_3 = cfg.MODEL.HRNET.STAGE3.NUM_CHANNELS + channels_4 = cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS + + num_modules_2 = cfg.MODEL.HRNET.STAGE2.NUM_MODULES + num_modules_3 = cfg.MODEL.HRNET.STAGE3.NUM_MODULES + num_modules_4 = cfg.MODEL.HRNET.STAGE4.NUM_MODULES + + ocr_mid_channels = cfg.MODEL.OCR.OCR_MID_CHANNELS + ocr_key_channels = cfg.MODEL.OCR.OCR_KEY_CHANNELS + + last_inp_channels = sum(channels_4) + + x = conv_bn_layer( + input=input, + filter_size=3, + num_filters=64, + stride=2, + if_act=True, + name='layer1_1') + x = conv_bn_layer( + input=x, + filter_size=3, + num_filters=64, + stride=2, + if_act=True, + name='layer1_2') + + la1 = layer1(x, name='layer2') + tr1 = transition_layer([la1], [256], channels_2, name='tr1') + st2 = stage(tr1, num_modules_2, channels_2, name='st2') + tr2 = transition_layer(st2, channels_2, channels_3, name='tr2') + st3 = stage(tr2, num_modules_3, channels_3, name='st3') + tr3 = transition_layer(st3, channels_3, channels_4, name='tr3') + st4 = stage(tr3, num_modules_4, channels_4, name='st4') + + # upsample + shape = st4[0].shape + height, width = shape[-2], shape[-1] + st4[1] = fluid.layers.resize_bilinear(st4[1], out_shape=[height, width]) + st4[2] = fluid.layers.resize_bilinear(st4[2], out_shape=[height, width]) + st4[3] = fluid.layers.resize_bilinear(st4[3], out_shape=[height, width]) + + feats = fluid.layers.concat(st4, axis=1) + + out, out_aux = ocr_module(feats, last_inp_channels, num_classes, + ocr_mid_channels, ocr_key_channels) + + out = fluid.layers.resize_bilinear(out, input.shape[2:]) + out_aux = fluid.layers.resize_bilinear(out_aux, input.shape[2:]) + + return out, out_aux + + +def ocrnet(input, num_classes): + logit = high_resolution_ocr_net(input, num_classes) + return logit diff --git a/legacy/pdseg/models/modeling/pspnet.py b/legacy/pdseg/models/modeling/pspnet.py new file mode 100644 index 0000000000..f8c2943af4 --- /dev/null +++ b/legacy/pdseg/models/modeling/pspnet.py @@ -0,0 +1,115 @@ +# coding: utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid as fluid +from paddle.fluid.param_attr import ParamAttr +from models.libs.model_libs import scope, name_scope +from models.libs.model_libs import avg_pool, conv, bn +from models.backbone.resnet import ResNet as resnet_backbone +from utils.config import cfg + + +def get_logit_interp(input, num_classes, out_shape, name="logit"): + # 根据类别数决定最后一层卷积输出, 并插值回原始尺寸 + param_attr = fluid.ParamAttr( + name=name + 'weights', + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0), + initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.01)) + + with scope(name): + logit = conv( + input, + num_classes, + filter_size=1, + param_attr=param_attr, + bias_attr=True, + name=name + '_conv') + logit_interp = fluid.layers.resize_bilinear( + logit, out_shape=out_shape, name=name + '_interp') + return logit_interp + + +def psp_module(input, out_features): + # Pyramid Scene Parsing 金字塔池化模块 + # 输入:backbone输出的特征 + # 输出:对输入进行不同尺度pooling, 卷积操作后插值回原始尺寸,并concat + # 最后进行一个卷积及BN操作 + + cat_layers = [] + sizes = (1, 2, 3, 6) + for size in sizes: + psp_name = "psp" + str(size) + with scope(psp_name): + pool = fluid.layers.adaptive_pool2d( + input, + pool_size=[size, size], + pool_type='avg', + name=psp_name + '_adapool') + data = conv( + pool, + out_features, + filter_size=1, + bias_attr=True, + name=psp_name + '_conv') + data_bn = bn(data, act='relu') + interp = fluid.layers.resize_bilinear( + data_bn, out_shape=input.shape[2:], name=psp_name + '_interp') + cat_layers.append(interp) + cat_layers = [input] + cat_layers[::-1] + cat = fluid.layers.concat(cat_layers, axis=1, name='psp_cat') + + psp_end_name = "psp_end" + with scope(psp_end_name): + data = conv( + cat, + out_features, + filter_size=3, + padding=1, + bias_attr=True, + name=psp_end_name) + out = bn(data, act='relu') + + return out + + +def resnet(input): + # PSPNET backbone: resnet, 默认resnet50 + # end_points: resnet终止层数 + # dilation_dict: resnet block数及对应的膨胀卷积尺度 + scale = cfg.MODEL.PSPNET.DEPTH_MULTIPLIER + layers = cfg.MODEL.PSPNET.LAYERS + end_points = layers - 1 + dilation_dict = {2: 2, 3: 4} + model = resnet_backbone(layers, scale, stem='pspnet') + data, _ = model.net( + input, end_points=end_points, dilation_dict=dilation_dict) + + return data + + +def pspnet(input, num_classes): + # Backbone: ResNet + res = resnet(input) + # PSP模块 + psp = psp_module(res, 512) + dropout = fluid.layers.dropout(psp, dropout_prob=0.1, name="dropout") + # 根据类别数决定最后一层卷积输出, 并插值回原始尺寸 + logit = get_logit_interp(dropout, num_classes, input.shape[2:]) + return logit diff --git a/legacy/pdseg/models/modeling/unet.py b/legacy/pdseg/models/modeling/unet.py new file mode 100644 index 0000000000..be9b8e5bb2 --- /dev/null +++ b/legacy/pdseg/models/modeling/unet.py @@ -0,0 +1,135 @@ +# coding: utf8 +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import contextlib +import paddle +import paddle.fluid as fluid +from utils.config import cfg +from models.libs.model_libs import scope, name_scope +from models.libs.model_libs import bn, bn_relu, relu +from models.libs.model_libs import conv, max_pool, deconv + + +def double_conv(data, out_ch): + param_attr = fluid.ParamAttr( + name='weights', + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0), + initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.33)) + with scope("conv0"): + data = bn_relu( + conv(data, out_ch, 3, stride=1, padding=1, param_attr=param_attr)) + with scope("conv1"): + data = bn_relu( + conv(data, out_ch, 3, stride=1, padding=1, param_attr=param_attr)) + return data + + +def down(data, out_ch): + # 下采样:max_pool + 2个卷积 + with scope("down"): + data = max_pool(data, 2, 2, 0) + data = double_conv(data, out_ch) + return data + + +def up(data, short_cut, out_ch): + # 上采样:data上采样(resize或deconv), 并与short_cut concat + param_attr = fluid.ParamAttr( + name='weights', + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0), + initializer=fluid.initializer.XavierInitializer(), + ) + with scope("up"): + if cfg.MODEL.UNET.UPSAMPLE_MODE == 'bilinear': + data = fluid.layers.resize_bilinear(data, short_cut.shape[2:]) + else: + data = deconv( + data, + out_ch // 2, + filter_size=2, + stride=2, + padding=0, + param_attr=param_attr) + data = fluid.layers.concat([data, short_cut], axis=1) + data = double_conv(data, out_ch) + return data + + +def encode(data): + # 编码器设置 + short_cuts = [] + with scope("encode"): + with scope("block1"): + data = double_conv(data, 64) + short_cuts.append(data) + with scope("block2"): + data = down(data, 128) + short_cuts.append(data) + with scope("block3"): + data = down(data, 256) + short_cuts.append(data) + with scope("block4"): + data = down(data, 512) + short_cuts.append(data) + with scope("block5"): + data = down(data, 512) + return data, short_cuts + + +def decode(data, short_cuts): + # 解码器设置,与编码器对称 + with scope("decode"): + with scope("decode1"): + data = up(data, short_cuts[3], 256) + with scope("decode2"): + data = up(data, short_cuts[2], 128) + with scope("decode3"): + data = up(data, short_cuts[1], 64) + with scope("decode4"): + data = up(data, short_cuts[0], 64) + return data + + +def get_logit(data, num_classes): + # 根据类别数设置最后一个卷积层输出 + param_attr = fluid.ParamAttr( + name='weights', + regularizer=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.0), + initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.01)) + with scope("logit"): + data = conv( + data, num_classes, 3, stride=1, padding=1, param_attr=param_attr) + return data + + +def unet(input, num_classes): + # UNET网络配置,对称的编码器解码器 + encode_data, short_cuts = encode(input) + decode_data = decode(encode_data, short_cuts) + logit = get_logit(decode_data, num_classes) + return logit + + +if __name__ == '__main__': + image_shape = [-1, 3, 320, 320] + image = fluid.data(name='image', shape=image_shape, dtype='float32') + logit = unet(image, 4) + print("logit:", logit.shape) diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index a519910f2f..531a42d4b9 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -33,6 +33,45 @@ def __init__(self, main_prog, start_prog): self.total_step = cfg.SOLVER.NUM_EPOCHS * self.step_per_epoch self.main_prog = main_prog self.start_prog = start_prog + self.warmup_step = cfg.SOLVER.LR_WARMUP_STEPS if cfg.SOLVER.LR_WARMUP else -1 + self.decay_step = self.total_step - self.warmup_step + self.decay_epochs = cfg.SOLVER.NUM_EPOCHS - self.warmup_step / self.step_per_epoch + + def lr_warmup(self, learning_rate, start_lr, end_lr): + linear_step = end_lr - start_lr + lr = paddle.fluid.layers.tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate_warmup") + + global_step = paddle.fluid.layers.learning_rate_scheduler._decay_step_counter( + ) + warmup_counter = paddle.fluid.layers.autoincreased_step_counter( + counter_name='@LR_DECAY_COUNTER_WARMUP_IN_SEG@', begin=1, step=1) + global_counter = paddle.fluid.default_main_program().global_block( + ).vars['@LR_DECAY_COUNTER@'] + warmup_counter = paddle.fluid.layers.cast(warmup_counter, 'float32') + + with paddle.fluid.layers.control_flow.Switch() as switch: + with switch.case(warmup_counter <= self.warmup_step): + decayed_lr = start_lr + linear_step * ( + warmup_counter / self.warmup_step) + paddle.fluid.layers.tensor.assign(decayed_lr, lr) + # hold the global_step to 0 during the warm-up phase + paddle.fluid.layers.increment(global_counter, value=-1) + with switch.default(): + paddle.fluid.layers.tensor.assign(learning_rate, lr) + return lr + + def piecewise_decay(self): + gamma = cfg.SOLVER.GAMMA + bd = [self.step_per_epoch * e for e in cfg.SOLVER.DECAY_EPOCH] + lr = [cfg.SOLVER.LR * (gamma**i) for i in range(len(bd) + 1)] + decayed_lr = paddle.fluid.layers.piecewise_decay( + boundaries=bd, values=lr) + return decayed_lr def poly_decay(self): power = cfg.SOLVER.POWER @@ -40,6 +79,11 @@ def poly_decay(self): cfg.SOLVER.LR, self.total_step, end_lr=0, power=power) return decayed_lr + def cosine_decay(self): + decayed_lr = paddle.fluid.layers.cosine_decay( + cfg.SOLVER.LR, self.step_per_epoch, self.decay_epochs) + return decayed_lr + def get_lr(self, lr_policy): if lr_policy.lower() == 'poly': decayed_lr = self.poly_decay() @@ -51,6 +95,8 @@ def get_lr(self, lr_policy): raise Exception( "unsupport learning decay policy! only support poly,piecewise,cosine" ) + + decayed_lr = self.lr_warmup(decayed_lr, 0, cfg.SOLVER.LR) return decayed_lr def sgd_optimizer(self, lr_policy, loss): From 2042499f57b1e6fc255c5bbbf670fe030fc62ddb Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 15 Jun 2021 15:13:12 +0800 Subject: [PATCH 124/210] rm warmup and cosine_decay --- legacy/pdseg/solver.py | 48 +++++++++--------------------------------- 1 file changed, 10 insertions(+), 38 deletions(-) diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index 531a42d4b9..d410cae63a 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -37,39 +37,11 @@ def __init__(self, main_prog, start_prog): self.decay_step = self.total_step - self.warmup_step self.decay_epochs = cfg.SOLVER.NUM_EPOCHS - self.warmup_step / self.step_per_epoch - def lr_warmup(self, learning_rate, start_lr, end_lr): - linear_step = end_lr - start_lr - lr = paddle.fluid.layers.tensor.create_global_var( - shape=[1], - value=0.0, - dtype='float32', - persistable=True, - name="learning_rate_warmup") - - global_step = paddle.fluid.layers.learning_rate_scheduler._decay_step_counter( - ) - warmup_counter = paddle.fluid.layers.autoincreased_step_counter( - counter_name='@LR_DECAY_COUNTER_WARMUP_IN_SEG@', begin=1, step=1) - global_counter = paddle.fluid.default_main_program().global_block( - ).vars['@LR_DECAY_COUNTER@'] - warmup_counter = paddle.fluid.layers.cast(warmup_counter, 'float32') - - with paddle.fluid.layers.control_flow.Switch() as switch: - with switch.case(warmup_counter <= self.warmup_step): - decayed_lr = start_lr + linear_step * ( - warmup_counter / self.warmup_step) - paddle.fluid.layers.tensor.assign(decayed_lr, lr) - # hold the global_step to 0 during the warm-up phase - paddle.fluid.layers.increment(global_counter, value=-1) - with switch.default(): - paddle.fluid.layers.tensor.assign(learning_rate, lr) - return lr - def piecewise_decay(self): gamma = cfg.SOLVER.GAMMA bd = [self.step_per_epoch * e for e in cfg.SOLVER.DECAY_EPOCH] lr = [cfg.SOLVER.LR * (gamma**i) for i in range(len(bd) + 1)] - decayed_lr = paddle.fluid.layers.piecewise_decay( + decayed_lr = paddle.optimizer.lr.PiecewiseDecay( boundaries=bd, values=lr) return decayed_lr @@ -79,24 +51,24 @@ def poly_decay(self): cfg.SOLVER.LR, self.total_step, end_lr=0, power=power) return decayed_lr - def cosine_decay(self): - decayed_lr = paddle.fluid.layers.cosine_decay( - cfg.SOLVER.LR, self.step_per_epoch, self.decay_epochs) - return decayed_lr + # There is not paddle.optimizer.lr.PolynomialDecaypaddle.optimizer.lr.CosineDecay in paddle2.1 version + # def cosine_decay(self): + # decayed_lr = paddle.fluid.layers.cosine_decay( + # cfg.SOLVER.LR, self.step_per_epoch, self.decay_epochs) + # decayed_lr = paddle.optimizer.lr.cosine_decay() + # return decayed_lr def get_lr(self, lr_policy): if lr_policy.lower() == 'poly': decayed_lr = self.poly_decay() elif lr_policy.lower() == 'piecewise': decayed_lr = self.piecewise_decay() - elif lr_policy.lower() == 'cosine': - decayed_lr = self.cosine_decay() + # elif lr_policy.lower() == 'cosine': + # decayed_lr = self.cosine_decay() else: raise Exception( - "unsupport learning decay policy! only support poly,piecewise,cosine" - ) + "unsupport learning decay policy! only support poly,piecewise") - decayed_lr = self.lr_warmup(decayed_lr, 0, cfg.SOLVER.LR) return decayed_lr def sgd_optimizer(self, lr_policy, loss): From dd70e90176f5d25580db1fbf4355f46bda96f5e4 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 16 Jun 2021 09:15:26 +0800 Subject: [PATCH 125/210] alignalign dygraph --- benchmark/hrnet.yml | 1 + benchmark/hrnet48.yml | 1 + paddleseg/models/backbones/hrnet.py | 84 ++++++++++++++++++----------- 3 files changed, 55 insertions(+), 31 deletions(-) diff --git a/benchmark/hrnet.yml b/benchmark/hrnet.yml index 2ed509d485..d75140b605 100644 --- a/benchmark/hrnet.yml +++ b/benchmark/hrnet.yml @@ -28,6 +28,7 @@ model: backbone: type: HRNet_W18 pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz + padding_same: False num_classes: 19 backbone_indices: [-1] bias: False diff --git a/benchmark/hrnet48.yml b/benchmark/hrnet48.yml index 702caf4c41..88ecbf6f85 100644 --- a/benchmark/hrnet48.yml +++ b/benchmark/hrnet48.yml @@ -28,6 +28,7 @@ model: backbone: type: HRNet_W48 pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz + padding_same: False num_classes: 19 backbone_indices: [-1] bias: False diff --git a/paddleseg/models/backbones/hrnet.py b/paddleseg/models/backbones/hrnet.py index ba75bb573e..bb72a25162 100644 --- a/paddleseg/models/backbones/hrnet.py +++ b/paddleseg/models/backbones/hrnet.py @@ -70,7 +70,8 @@ def __init__(self, stage4_num_blocks=(4, 4, 4, 4), stage4_num_channels=(18, 36, 72, 144), has_se=False, - align_corners=False): + align_corners=False, + padding_same=True): super(HRNet, self).__init__() self.pretrained = pretrained self.stage1_num_modules = stage1_num_modules @@ -94,7 +95,7 @@ def __init__(self, out_channels=64, kernel_size=3, stride=2, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False) self.conv_layer1_2 = layers.ConvBNReLU( @@ -102,7 +103,7 @@ def __init__(self, out_channels=64, kernel_size=3, stride=2, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False) self.la1 = Layer1( @@ -110,12 +111,14 @@ def __init__(self, num_blocks=self.stage1_num_blocks[0], num_filters=self.stage1_num_channels[0], has_se=has_se, - name="layer2") + name="layer2", + padding_same=padding_same) self.tr1 = TransitionLayer( in_channels=[self.stage1_num_channels[0] * 4], out_channels=self.stage2_num_channels, - name="tr1") + name="tr1", + padding_same=padding_same) self.st2 = Stage( num_channels=self.stage2_num_channels, @@ -124,12 +127,14 @@ def __init__(self, num_filters=self.stage2_num_channels, has_se=self.has_se, name="st2", - align_corners=align_corners) + align_corners=align_corners, + padding_same=padding_same) self.tr2 = TransitionLayer( in_channels=self.stage2_num_channels, out_channels=self.stage3_num_channels, - name="tr2") + name="tr2", + padding_same=padding_same) self.st3 = Stage( num_channels=self.stage3_num_channels, num_modules=self.stage3_num_modules, @@ -137,12 +142,14 @@ def __init__(self, num_filters=self.stage3_num_channels, has_se=self.has_se, name="st3", - align_corners=align_corners) + align_corners=align_corners, + padding_same=padding_same) self.tr3 = TransitionLayer( in_channels=self.stage3_num_channels, out_channels=self.stage4_num_channels, - name="tr3") + name="tr3", + padding_same=padding_same) self.st4 = Stage( num_channels=self.stage4_num_channels, num_modules=self.stage4_num_modules, @@ -150,7 +157,9 @@ def __init__(self, num_filters=self.stage4_num_channels, has_se=self.has_se, name="st4", - align_corners=align_corners) + align_corners=align_corners, + padding_same=padding_same) + self.init_weight() def forward(self, x): @@ -196,7 +205,8 @@ def __init__(self, num_filters, num_blocks, has_se=False, - name=None): + name=None, + padding_same=True): super(Layer1, self).__init__() self.bottleneck_block_list = [] @@ -210,7 +220,8 @@ def __init__(self, has_se=has_se, stride=1, downsample=True if i == 0 else False, - name=name + '_' + str(i + 1))) + name=name + '_' + str(i + 1), + padding_same=padding_same)) self.bottleneck_block_list.append(bottleneck_block) def forward(self, x): @@ -221,7 +232,7 @@ def forward(self, x): class TransitionLayer(nn.Layer): - def __init__(self, in_channels, out_channels, name=None): + def __init__(self, in_channels, out_channels, name=None, padding_same=True): super(TransitionLayer, self).__init__() num_in = len(in_channels) @@ -237,7 +248,7 @@ def __init__(self, in_channels, out_channels, name=None): in_channels=in_channels[i], out_channels=out_channels[i], kernel_size=3, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False)) else: residual = self.add_sublayer( @@ -247,7 +258,7 @@ def __init__(self, in_channels, out_channels, name=None): out_channels=out_channels[i], kernel_size=3, stride=2, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False)) self.conv_bn_func_list.append(residual) @@ -270,7 +281,8 @@ def __init__(self, in_channels, out_channels, has_se=False, - name=None): + name=None, + padding_same=True): super(Branches, self).__init__() self.basic_block_list = [] @@ -286,7 +298,8 @@ def __init__(self, num_filters=out_channels[i], has_se=has_se, name=name + '_branch_layer_' + str(i + 1) + '_' + - str(j + 1))) + str(j + 1), + padding_same=padding_same)) self.basic_block_list[i].append(basic_block_func) def forward(self, x): @@ -306,7 +319,8 @@ def __init__(self, has_se, stride=1, downsample=False, - name=None): + name=None, + padding_same=True): super(BottleneckBlock, self).__init__() self.has_se = has_se @@ -323,7 +337,7 @@ def __init__(self, out_channels=num_filters, kernel_size=3, stride=stride, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False) self.conv3 = layers.ConvBN( @@ -370,7 +384,8 @@ def __init__(self, stride=1, has_se=False, downsample=False, - name=None): + name=None, + padding_same=True): super(BasicBlock, self).__init__() self.has_se = has_se @@ -381,13 +396,13 @@ def __init__(self, out_channels=num_filters, kernel_size=3, stride=stride, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False) self.conv2 = layers.ConvBN( in_channels=num_filters, out_channels=num_filters, kernel_size=3, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False) if self.downsample: @@ -465,7 +480,8 @@ def __init__(self, has_se=False, multi_scale_output=True, name=None, - align_corners=False): + align_corners=False, + padding_same=True): super(Stage, self).__init__() self._num_modules = num_modules @@ -482,7 +498,8 @@ def __init__(self, has_se=has_se, multi_scale_output=False, name=name + '_' + str(i + 1), - align_corners=align_corners)) + align_corners=align_corners, + padding_same=padding_same)) else: stage_func = self.add_sublayer( "stage_{}_{}".format(name, i + 1), @@ -492,7 +509,8 @@ def __init__(self, num_filters=num_filters, has_se=has_se, name=name + '_' + str(i + 1), - align_corners=align_corners)) + align_corners=align_corners, + padding_same=padding_same)) self.stage_func_list.append(stage_func) @@ -511,7 +529,8 @@ def __init__(self, has_se=False, multi_scale_output=True, name=None, - align_corners=False): + align_corners=False, + padding_same=True): super(HighResolutionModule, self).__init__() self.branches_func = Branches( @@ -519,14 +538,16 @@ def __init__(self, in_channels=num_channels, out_channels=num_filters, has_se=has_se, - name=name) + name=name, + padding_same=padding_same) self.fuse_func = FuseLayers( in_channels=num_filters, out_channels=num_filters, multi_scale_output=multi_scale_output, name=name, - align_corners=align_corners) + align_corners=align_corners, + padding_same=padding_same) def forward(self, x): out = self.branches_func(x) @@ -540,7 +561,8 @@ def __init__(self, out_channels, multi_scale_output=True, name=None, - align_corners=False): + align_corners=False, + padding_same=True): super(FuseLayers, self).__init__() self._actual_ch = len(in_channels) if multi_scale_output else 1 @@ -571,7 +593,7 @@ def __init__(self, out_channels=out_channels[i], kernel_size=3, stride=2, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False)) pre_num_filters = out_channels[i] else: @@ -583,7 +605,7 @@ def __init__(self, out_channels=out_channels[j], kernel_size=3, stride=2, - padding=1, + padding=1 if not padding_same else 'same', bias_attr=False)) pre_num_filters = out_channels[j] self.residual_func_list.append(residual_func) From 5d07585bf421c319f7f1f6094f1da5f59d882199 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 16 Jun 2021 16:40:01 +0800 Subject: [PATCH 126/210] align dygraph --- .../deeplabv3p_resnet50_vd_cityscapes.yaml | 5 ++ .../hrnetw18_cityscapes_1024x512_215.yaml | 4 +- legacy/pdseg/models/backbone/resnet_vd.py | 6 +- legacy/pdseg/models/libs/model_libs.py | 12 +++- legacy/pdseg/models/modeling/deeplab.py | 71 ++++++++++++++----- legacy/pdseg/models/modeling/hrnet.py | 22 ++++-- legacy/pdseg/utils/config.py | 7 +- 7 files changed, 96 insertions(+), 31 deletions(-) diff --git a/legacy/configs/benchmark/deeplabv3p_resnet50_vd_cityscapes.yaml b/legacy/configs/benchmark/deeplabv3p_resnet50_vd_cityscapes.yaml index 76214f1f31..e4a3d7f59a 100644 --- a/legacy/configs/benchmark/deeplabv3p_resnet50_vd_cityscapes.yaml +++ b/legacy/configs/benchmark/deeplabv3p_resnet50_vd_cityscapes.yaml @@ -32,6 +32,11 @@ MODEL: DECODER_USE_SEP_CONV: True BACKBONE: "resnet_vd_50" OUTPUT_STRIDE: 8 + BIAS: null + ALIGN_CORNERS: False + BENCHMARK: True + DECODER: + ACT: False TRAIN: PRETRAINED_MODEL_DIR: u"pretrained_model/resnet50_vd_imagenet" MODEL_SAVE_DIR: "output/deeplabv3p_resnet50_vd_bn_cityscapes" diff --git a/legacy/configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml b/legacy/configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml index f02f1c7af9..cc0e69ae6a 100644 --- a/legacy/configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml +++ b/legacy/configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml @@ -33,8 +33,8 @@ MODEL: NUM_CHANNELS: [18, 36, 72] STAGE4: NUM_CHANNELS: [18, 36, 72, 144] - BIAS: - False + BIAS: False + ALIGN_CORNERS: False TRAIN: PRETRAINED_MODEL_DIR: u"./pretrained_model/hrnet_w18_ssld" diff --git a/legacy/pdseg/models/backbone/resnet_vd.py b/legacy/pdseg/models/backbone/resnet_vd.py index f5f50f7f10..5e308b1f31 100644 --- a/legacy/pdseg/models/backbone/resnet_vd.py +++ b/legacy/pdseg/models/backbone/resnet_vd.py @@ -47,7 +47,8 @@ def __init__(self, layers=50, scale=1.0, stem=None, - lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0]): + lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0], + align_corners=True): self.params = train_parameters self.layers = layers self.scale = scale @@ -58,6 +59,7 @@ def __init__(self, ) == 5, "lr_mult_list length in ResNet must be 5 but got {}!!".format( len(self.lr_mult_list)) self.curr_stage = 0 + self.align_corners = align_corners def net(self, input, @@ -216,7 +218,7 @@ def zero_padding(self, input, padding): def interp(self, input, out_shape): out_shape = list(out_shape.astype("int32")) return F.interpolate( - input, out_shape, mode='bilinear', align_corners=False) + input, out_shape, mode='bilinear', align_corners=self.align_corners) def conv_bn_layer(self, input, diff --git a/legacy/pdseg/models/libs/model_libs.py b/legacy/pdseg/models/libs/model_libs.py index 834361a4af..0dac259937 100644 --- a/legacy/pdseg/models/libs/model_libs.py +++ b/legacy/pdseg/models/libs/model_libs.py @@ -130,7 +130,13 @@ def deconv(*args, **kargs): return static.nn.conv2d_transpose(*args, **kargs) -def separate_conv(input, channel, stride, filter, dilation=1, act=None): +def separate_conv(input, + channel, + stride, + filter, + dilation=1, + act=None, + bias_attr=False): param_attr = paddle.ParamAttr( name=name_scope + 'weights', regularizer=paddle.regularizer.L2Decay(coeff=0.0), @@ -146,7 +152,7 @@ def separate_conv(input, channel, stride, filter, dilation=1, act=None): dilation=dilation, use_cudnn=False, param_attr=param_attr, - bias_attr=None) + bias_attr=bias_attr) input = bn(input) if act: input = act(input) @@ -163,7 +169,7 @@ def separate_conv(input, channel, stride, filter, dilation=1, act=None): groups=1, padding=0, param_attr=param_attr, - bias_attr=None) + bias_attr=bias_attr) input = bn(input) if act: input = act(input) return input diff --git a/legacy/pdseg/models/modeling/deeplab.py b/legacy/pdseg/models/modeling/deeplab.py index baae59d0bc..3ea3a876b6 100644 --- a/legacy/pdseg/models/modeling/deeplab.py +++ b/legacy/pdseg/models/modeling/deeplab.py @@ -72,7 +72,7 @@ def encoder(input): image_avg, input.shape[2:], mode='bilinear', - align_corners=False) + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) if cfg.MODEL.DEEPLAB.ENCODER.ADD_IMAGE_LEVEL_FEATURE: concat_logits.append(image_avg) @@ -86,16 +86,25 @@ def encoder(input): groups=1, padding=0, param_attr=param_attr, - bias_attr=None)) + bias_attr=cfg.MODEL.DEEPLAB.BIAS)) aspp0 = F.interpolate( - aspp0, input.shape[2:], mode='bilinear', align_corners=False) + aspp0, + input.shape[2:], + mode='bilinear', + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) concat_logits.append(aspp0) if aspp_ratios: with scope("aspp1"): if cfg.MODEL.DEEPLAB.ASPP_WITH_SEP_CONV: aspp1 = separate_conv( - input, channel, 1, 3, dilation=aspp_ratios[0], act=relu) + input, + channel, + 1, + 3, + dilation=aspp_ratios[0], + act=relu, + bias_attr=cfg.MODEL.DEEPLAB.BIAS) else: aspp1 = bn_relu( conv( @@ -110,12 +119,18 @@ def encoder(input): aspp1, input.shape[2:], mode='bilinear', - align_corners=False) + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) concat_logits.append(aspp1) with scope("aspp2"): if cfg.MODEL.DEEPLAB.ASPP_WITH_SEP_CONV: aspp2 = separate_conv( - input, channel, 1, 3, dilation=aspp_ratios[1], act=relu) + input, + channel, + 1, + 3, + dilation=aspp_ratios[1], + act=relu, + bias_attr=cfg.MODEL.DEEPLAB.BIAS) else: aspp2 = bn_relu( conv( @@ -130,12 +145,18 @@ def encoder(input): aspp2, input.shape[2:], mode='bilinear', - align_corners=False) + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) concat_logits.append(aspp2) with scope("aspp3"): if cfg.MODEL.DEEPLAB.ASPP_WITH_SEP_CONV: aspp3 = separate_conv( - input, channel, 1, 3, dilation=aspp_ratios[2], act=relu) + input, + channel, + 1, + 3, + dilation=aspp_ratios[2], + act=relu, + bias_attr=cfg.MODEL.DEEPLAB.BIAS) else: aspp3 = bn_relu( conv( @@ -150,7 +171,7 @@ def encoder(input): aspp3, input.shape[2:], mode='bilinear', - align_corners=False) + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) concat_logits.append(aspp3) with scope("concat"): @@ -165,8 +186,12 @@ def encoder(input): groups=1, padding=0, param_attr=param_attr, - bias_attr=None)) - data = F.dropout(data, 0.1, mode='downscale_in_infer') + bias_attr=cfg.MODEL.DEEPLAB.BIAS)) + + if cfg.MODEL.DEEPLAB.BENCHMARK: + data = F.dropout(data, 0.1, mode='downscale_in_infer') + else: + data = paddle.fluid.layers.dropout(data, 0.9) if cfg.MODEL.DEEPLAB.ENCODER.ASPP_WITH_SE: data = data * image_avg @@ -178,7 +203,7 @@ def _decoder_with_sum_merge(encode_data, decode_shortcut, param_attr): encode_data, decode_shortcut.shape[2:], mode='bilinear', - align_corners=False) + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) encode_data = conv( encode_data, cfg.MODEL.DEEPLAB.DECODER.CONV_FILTERS, @@ -212,13 +237,13 @@ def _decoder_with_concat(encode_data, decode_shortcut, param_attr): groups=1, padding=0, param_attr=param_attr, - bias_attr=None)) + bias_attr=cfg.MODEL.DEEPLAB.BIAS)) encode_data = F.interpolate( encode_data, decode_shortcut.shape[2:], mode='bilinear', - align_corners=False) + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) encode_data = paddle.concat([encode_data, decode_shortcut], axis=1) if cfg.MODEL.DEEPLAB.DECODER_USE_SEP_CONV: with scope("separable_conv1"): @@ -227,14 +252,18 @@ def _decoder_with_concat(encode_data, decode_shortcut, param_attr): cfg.MODEL.DEEPLAB.DECODER.CONV_FILTERS, 1, 3, - dilation=1) + dilation=1, + act=relu if cfg.MODEL.DEEPLAB.DECODER.ACT else None, + bias_attr=cfg.MODEL.DEEPLAB.BIAS) with scope("separable_conv2"): encode_data = separate_conv( encode_data, cfg.MODEL.DEEPLAB.DECODER.CONV_FILTERS, 1, 3, - dilation=1) + dilation=1, + act=relu if cfg.MODEL.DEEPLAB.DECODER.ACT else None, + bias_attr=cfg.MODEL.DEEPLAB.BIAS) else: with scope("decoder_conv1"): encode_data = bn_relu( @@ -372,7 +401,10 @@ def resnet_vd(input): if lr_mult_list is None: lr_mult_list = [1.0, 1.0, 1.0, 1.0, 1.0] model = resnet_vd_backbone( - layers, stem='deeplab', lr_mult_list=lr_mult_list) + layers, + stem='deeplab', + lr_mult_list=lr_mult_list, + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) data, decode_shortcuts = model.net( input, end_points=end_points, @@ -427,5 +459,8 @@ def deeplabv3p(img, num_classes): logit = data logit = F.interpolate( - logit, img.shape[2:], mode='bilinear', align_corners=False) + logit, + img.shape[2:], + mode='bilinear', + align_corners=cfg.MODEL.DEEPLAB.ALIGN_CORNERS) return logit diff --git a/legacy/pdseg/models/modeling/hrnet.py b/legacy/pdseg/models/modeling/hrnet.py index 88ad31360d..888659a0ca 100644 --- a/legacy/pdseg/models/modeling/hrnet.py +++ b/legacy/pdseg/models/modeling/hrnet.py @@ -133,7 +133,7 @@ def fuse_layers(x, channels, multi_scale_output=True, name=None): y, size=[height, width], mode='bilinear', - align_corners=False) + align_corners=cfg.MODEL.HRNET.ALIGN_CORNERS) residual = residual + y elif j < i: y = x[j] @@ -272,11 +272,20 @@ def high_resolution_net(input, num_classes): shape = st4[0].shape height, width = shape[-2], shape[-1] st4[1] = F.interpolate( - st4[1], size=[height, width], mode='bilinear', align_corners=False) + st4[1], + size=[height, width], + mode='bilinear', + align_corners=cfg.MODEL.HRNET.ALIGN_CORNERS) st4[2] = F.interpolate( - st4[2], size=[height, width], mode='bilinear', align_corners=False) + st4[2], + size=[height, width], + mode='bilinear', + align_corners=cfg.MODEL.HRNET.ALIGN_CORNERS) st4[3] = F.interpolate( - st4[3], size=[height, width], mode='bilinear', align_corners=False) + st4[3], + size=[height, width], + mode='bilinear', + align_corners=cfg.MODEL.HRNET.ALIGN_CORNERS) out = paddle.concat(st4, axis=1) last_channels = sum(channels_4) @@ -302,7 +311,10 @@ def high_resolution_net(input, num_classes): bias_attr=cfg.MODEL.HRNET.BIAS) out = F.interpolate( - out, size=input.shape[2:], mode='bilinear', align_corners=False) + out, + size=input.shape[2:], + mode='bilinear', + align_corners=cfg.MODEL.HRNET.ALIGN_CORNERS) return out diff --git a/legacy/pdseg/utils/config.py b/legacy/pdseg/utils/config.py index 0df689e37b..e56c40dfa9 100644 --- a/legacy/pdseg/utils/config.py +++ b/legacy/pdseg/utils/config.py @@ -215,12 +215,16 @@ cfg.MODEL.DEEPLAB.DECODER.USE_SUM_MERGE = False cfg.MODEL.DEEPLAB.DECODER.CONV_FILTERS = 256 cfg.MODEL.DEEPLAB.DECODER.OUTPUT_IS_LOGITS = False +cfg.MODEL.DEEPLAB.DECODER.ACT = True # ASPP是否使用可分离卷积 cfg.MODEL.DEEPLAB.ASPP_WITH_SEP_CONV = True # 解码器是否使用可分离卷积 cfg.MODEL.DEEPLAB.DECODER_USE_SEP_CONV = True # Backbone分阶段学习率 cfg.MODEL.DEEPLAB.BACKBONE_LR_MULT_LIST = None +cfg.MODEL.DEEPLAB.BIAS = False +cfg.MODEL.DEEPLAB.ALIGN_CORNERS = True +cfg.MODEL.DEEPLAB.BENCHMARK = False ########################## UNET模型配置 ####################################### # 上采样方式, 默认为双线性插值 @@ -249,7 +253,8 @@ cfg.MODEL.HRNET.STAGE4.NUM_MODULES = 3 cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS = [40, 80, 160, 320] # FCN Head的卷积是否用bias -cfg.MODEL.HRNET.BIAS = None +cfg.MODEL.HRNET.BIAS = False +cfg.MODEL.HRNET.ALIGN_CORNERS = True ########################## OCNET模型配置 ###################################### cfg.MODEL.OCR.OCR_MID_CHANNELS = 512 From 543248015c05800f6a16cfeb41255e1043bce834 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 16 Jun 2021 19:52:29 +0800 Subject: [PATCH 127/210] update README.md --- README.md | 62 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index eefdf07990..29e8982f7e 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,12 @@ -# PaddleSeg Benchmark with AMP +English | [简体中文](README_CN.md) -## 动态图 -数据集cityscapes 放置于data目录下, 下载链接:https://paddleseg.bj.bcebos.com/dataset/cityscapes.tar +# PaddleSeg -通过 **--fp16** 开启amp训练。 +[![Build Status](https://travis-ci.org/PaddlePaddle/PaddleSeg.svg?branch=master)](https://travis-ci.org/PaddlePaddle/PaddleSeg) +[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) +[![Version](https://img.shields.io/github/release/PaddlePaddle/PaddleSeg.svg)](https://github.com/PaddlePaddle/PaddleSeg/releases) +![python version](https://img.shields.io/badge/python-3.6+-orange.svg) +![support os](https://img.shields.io/badge/os-linux%2C%20win%2C%20mac-yellow.svg) *[2021-02-26] PaddleSeg has released the v2.0 version, which supports the dynamic graph by default. The static-graph codes have been moved to [legacy](./legacy). See detailed [release notes](./docs/release_notes.md).* @@ -49,45 +52,44 @@ Welcome to PaddleSeg! PaddleSeg is an end-to-end image segmentation development ## Dataset -DeepLabv3+ 模型的配置文件为: -benchmark/deeplabv3p.yml +- [x] Cityscapes +- [x] Pascal VOC +- [x] ADE20K +- [x] Pascal Context +- [x] COCO stuff -**注意** +## Installation -* 动态图中batch_size设置为每卡的batch_size -* DeepLabv3+ 支持通过传入 **--data_format NHWC**进行‘NHWC’数据格式的训练。 +#### step 1. Install PaddlePaddle +System Requirements: +* PaddlePaddle >= 2.0.0 +* Python >= 3.6+ +Highly recommend you install the GPU version of PaddlePaddle, due to large overhead of segmentation models, otherwise it could be out of memory while running the models. For more detailed installation tutorials, please refer to the official website of [PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/2.0/install/)。 -## 静态图 -数据集cityscapes 放置于legacy/dataset目录下 -通过 **MODEL.FP16 True** 开启amp训练 -单机单卡使用如下命令进行训练: -``` -cd legacy -export CUDA_VISIBLE_DEVICES=0 -python pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True -``` +#### step 2. Install PaddleSeg +Support to construct a customized segmentation framework with *API Calling* method for flexible development. -单机多卡使用如下命令进行训练: -``` -export CUDA_VISIBLE_DEVICES=0,1 -fleetrun pdseg/train.py --cfg configs/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True +```shell +pip install paddleseg ``` -deeplabv3p模型的配置文件为: -configs/deeplabv3p_resnet50_vd_cityscapes.yaml -**注意** -静态图中的BATCH_SIZE为总的batch size。 +#### step 3. Download PaddleSeg repo +Support to complete the whole process segmentation application with *Configuration Drive* method, simple and fast. -## 竞品 -竞品为[mmsegmentation](https://github.com/open-mmlab/mmsegmentation) +```shell +git clone https://github.com/PaddlePaddle/PaddleSeg +``` -对应竞品配置文件为: +#### step 4. Verify installation +Run the following command. If you can train normally, you have installed it successfully. -configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py +```shell +python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml +``` ## Tutorials From 28f8a5318acac1253d1a818cf6497c0f67ac5e38 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 16 Jun 2021 19:58:17 +0800 Subject: [PATCH 128/210] update solver.py --- legacy/pdseg/solver.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/legacy/pdseg/solver.py b/legacy/pdseg/solver.py index d410cae63a..ee98474009 100644 --- a/legacy/pdseg/solver.py +++ b/legacy/pdseg/solver.py @@ -51,11 +51,10 @@ def poly_decay(self): cfg.SOLVER.LR, self.total_step, end_lr=0, power=power) return decayed_lr - # There is not paddle.optimizer.lr.PolynomialDecaypaddle.optimizer.lr.CosineDecay in paddle2.1 version + # There is not paddle.optimizer.lr.CosineDecay in paddle2.1 version # def cosine_decay(self): - # decayed_lr = paddle.fluid.layers.cosine_decay( + # decayed_lr = paddle.fluid.layers.cosine_decay() # cfg.SOLVER.LR, self.step_per_epoch, self.decay_epochs) - # decayed_lr = paddle.optimizer.lr.cosine_decay() # return decayed_lr def get_lr(self, lr_policy): From 5ef6a757f13677e0f5794608e2359be6061f4a2b Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 18 Jun 2021 17:06:01 +0800 Subject: [PATCH 129/210] update train.py --- paddleseg/core/train.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/paddleseg/core/train.py b/paddleseg/core/train.py index ce8a84fdca..30fc6573e0 100644 --- a/paddleseg/core/train.py +++ b/paddleseg/core/train.py @@ -83,7 +83,7 @@ def train(model, The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. test_config(dict, optional): Evaluation config. - fp16 (bool, optional): Whther to use amp. + fp16 (bool, optional): Whether to use amp. """ model.train() nranks = paddle.distributed.ParallelEnv().nranks @@ -98,15 +98,6 @@ def train(model, os.remove(save_dir) os.makedirs(save_dir) - # if nranks > 1: - # # Initialize parallel environment if not done. - # if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( - # ): - # paddle.distributed.init_parallel_env() - # ddp_model = paddle.DataParallel(model) - # else: - # ddp_model = paddle.DataParallel(model) - if nranks > 1: paddle.distributed.fleet.init(is_collective=True) optimizer = paddle.distributed.fleet.distributed_optimizer( From d93247d28740c1c4683c16c6e4799ec6c3c3d72a Mon Sep 17 00:00:00 2001 From: wuyefeilin <30919197+wuyefeilin@users.noreply.github.com> Date: Tue, 22 Jun 2021 10:19:34 +0800 Subject: [PATCH 130/210] Update model_libs.py --- legacy/pdseg/models/libs/model_libs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/legacy/pdseg/models/libs/model_libs.py b/legacy/pdseg/models/libs/model_libs.py index 0dac259937..7a966c33c3 100644 --- a/legacy/pdseg/models/libs/model_libs.py +++ b/legacy/pdseg/models/libs/model_libs.py @@ -42,7 +42,7 @@ def max_pool(input, kernel, stride, padding): def avg_pool(input, kernel, stride, padding=0): - data = F.avg_pool(input, kernel_size=kernel, stride=stride, padding=padding) + data = F.avg_pool2d(input, kernel_size=kernel, stride=stride, padding=padding) return data From ff2388671059dccafab5375e0f8d74b11419f7ad Mon Sep 17 00:00:00 2001 From: wuzewu Date: Tue, 22 Jun 2021 11:17:08 +0800 Subject: [PATCH 131/210] Update README.md --- benchmark/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/benchmark/README.md b/benchmark/README.md index 0d383cdd17..6c02126f4f 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -1,7 +1,8 @@ # PaddleSeg Benchmark with AMP ## 动态图 -数据集cityscapes 放置于data目录下, 下载链接:https://paddleseg.bj.bcebos.com/dataset/cityscapes.tar + +数据集cityscapes 放置于data目录下 通过 **--fp16** 开启amp训练。 From 462b30911a51342941c99e6caf3cf9bcd3d59e9e Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 22 Jun 2021 15:00:50 +0800 Subject: [PATCH 132/210] evaluation while training --- contrib/matting/core/train.py | 115 +++++++++++++++++----------------- contrib/matting/core/val.py | 3 +- contrib/matting/dataset.py | 19 +++--- contrib/matting/metric.py | 8 ++- contrib/matting/train.py | 25 ++++++-- contrib/matting/val.py | 16 +++-- 6 files changed, 107 insertions(+), 79 deletions(-) diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index 6d9f40c9b2..38d4929588 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -17,11 +17,12 @@ from collections import deque import shutil +import numpy as np import paddle import paddle.nn.functional as F from paddleseg.utils import TimeAverager, calculate_eta, resume, logger -# from core.val import evaluate +from core.val import evaluate def loss_computation(logit_dict, label_dict, losses, stage=3): @@ -70,7 +71,8 @@ def train(model, use_vdl=False, losses=None, keep_checkpoint_max=5, - stage=3): + stage=3, + save_begin_iters=None): """ Launch training. @@ -130,8 +132,8 @@ def train(model, avg_loss = 0.0 avg_loss_list = [] iters_per_epoch = len(batch_sampler) - # best_pq = -1.0 - # best_model_iter = -1 + best_sad = np.inf + best_model_iter = -1 reader_cost_averager = TimeAverager() batch_cost_averager = TimeAverager() save_models = deque() @@ -205,39 +207,43 @@ def train(model, log_writer.add_scalar('Train/reader_cost', avg_train_reader_cost, iter) - # 增加图片和alpha的显示 - ori_img = data['img'][0] - ori_img = paddle.transpose(ori_img, [1, 2, 0]) - ori_img = (ori_img * 0.5 + 0.5) * 255 - alpha = (data['alpha'][0]).unsqueeze(-1) - trimap = (data['trimap'][0]).unsqueeze(-1) - log_writer.add_image( - tag='ground truth/ori_img', - img=ori_img.numpy(), - step=iter) - log_writer.add_image( - tag='ground truth/alpha', img=alpha.numpy(), step=iter) - log_writer.add_image( - tag='ground truth/trimap', - img=trimap.numpy(), - step=iter) - - alpha_raw = (logit_dict['alpha_raw'][0] * 255).transpose( - [1, 2, 0]) - log_writer.add_image( - tag='prediction/alpha_raw', - img=alpha_raw.numpy(), - step=iter) - - if stage >= 2: - alpha_pred = ( - logit_dict['alpha_pred'][0] * 255).transpose( + if False: #主要为调试时候的观察,真正训练的时候可以省略 + # 增加图片和alpha的显示 + ori_img = data['img'][0] + ori_img = paddle.transpose(ori_img, [1, 2, 0]) + ori_img = (ori_img * 0.5 + 0.5) * 255 + alpha = (data['alpha'][0]).unsqueeze(-1) + trimap = (data['trimap'][0]).unsqueeze(-1) + log_writer.add_image( + tag='ground truth/ori_img', + img=ori_img.numpy(), + step=iter) + log_writer.add_image( + tag='ground truth/alpha', + img=alpha.numpy(), + step=iter) + log_writer.add_image( + tag='ground truth/trimap', + img=trimap.numpy(), + step=iter) + + alpha_raw = ( + logit_dict['alpha_raw'][0] * 255).transpose( [1, 2, 0]) log_writer.add_image( - tag='prediction/alpha_pred', - img=alpha_pred.numpy().astype('uint8'), + tag='prediction/alpha_raw', + img=alpha_raw.numpy(), step=iter) + if stage >= 2: + alpha_pred = ( + logit_dict['alpha_pred'][0] * 255).transpose( + [1, 2, 0]) + log_writer.add_image( + tag='prediction/alpha_pred', + img=alpha_pred.numpy().astype('uint8'), + step=iter) + avg_loss = 0.0 avg_loss_list = [] reader_cost_averager.reset() @@ -257,49 +263,40 @@ def train(model, if len(save_models) > keep_checkpoint_max > 0: model_to_remove = save_models.popleft() shutil.rmtree(model_to_remove) - """ + # eval model + if save_begin_iters is None: + save_begin_iters = iters // 2 if (iter % save_interval == 0 or iter == iters) and ( val_dataset is - not None) and local_rank == 0 and iter > iters // 2: + not None) and local_rank == 0 and iter >= save_begin_iters: num_workers = 1 if num_workers > 0 else 0 - panoptic_results, semantic_results, instance_results = evaluate( + sad, mse = evaluate( model, val_dataset, - threshold=threshold, - nms_kernel=nms_kernel, - top_k=top_k, - num_workers=num_workers, - print_detail=False) - pq = panoptic_results['pan_seg']['All']['pq'] - miou = semantic_results['sem_seg']['mIoU'] - map = instance_results['ins_seg']['mAP'] - map50 = instance_results['ins_seg']['mAP50'] - logger.info( - "[EVAL] PQ: {:.4f}, mIoU: {:.4f}, mAP: {:.4f}, mAP50: {:.4f}" - .format(pq, miou, map, map50)) + num_workers=0, + print_detail=True, + save_results=False) model.train() - # save best model and add evaluate results to vdl + # save best model and add evaluation results to vdl if (iter % save_interval == 0 or iter == iters) and local_rank == 0: - if val_dataset is not None and iter > iters // 2: - if pq > best_pq: - best_pq = pq + if val_dataset is not None and iter >= save_begin_iters: + if sad < best_sad: + best_sad = sad best_model_iter = iter best_model_dir = os.path.join(save_dir, "best_model") paddle.save( model.state_dict(), os.path.join(best_model_dir, 'model.pdparams')) logger.info( - '[EVAL] The model with the best validation pq ({:.4f}) was saved at iter {}.' - .format(best_pq, best_model_iter)) + '[EVAL] The model with the best validation sad ({:.4f}) was saved at iter {}.' + .format(best_sad, best_model_iter)) if use_vdl: - log_writer.add_scalar('Evaluate/PQ', pq, iter) - log_writer.add_scalar('Evaluate/mIoU', miou, iter) - log_writer.add_scalar('Evaluate/mAP', map, iter) - log_writer.add_scalar('Evaluate/mAP50', map50, iter) - """ + log_writer.add_scalar('Evaluate/SAD', sad, iter) + log_writer.add_scalar('Evaluate/MSE', mse, iter) + batch_start = time.time() # Sleep for half a second to let dataloader release resources. diff --git a/contrib/matting/core/val.py b/contrib/matting/core/val.py index fcc53f63a8..c15d703970 100644 --- a/contrib/matting/core/val.py +++ b/contrib/matting/core/val.py @@ -128,4 +128,5 @@ def evaluate(model, mse = mse_metric.evaluate() sad = sad_metric.evaluate() - logger.info('MSE: {:.4f}, SAD: {:.4f}'.format(mse, sad)) + logger.info('[EVAL] SAD: {:.4f}, MSE: {:.4f}'.format(sad, mse)) + return sad, mse diff --git a/contrib/matting/dataset.py b/contrib/matting/dataset.py index fd1f26ef15..183bb9fada 100644 --- a/contrib/matting/dataset.py +++ b/contrib/matting/dataset.py @@ -104,12 +104,15 @@ def gen_trimap(alpha, mode='train', eval_kernel=7): if __name__ == '__main__': t = [T.LoadImages(), T.Resize(), T.Normalize()] train_dataset = HumanDataset( - dataset_root='/mnt/chenguowei01/datasets/matting/human_matting/', - transforms=t, - mode='val') - - for i in range(10): - idx = np.random.randint(len(train_dataset)) + dataset_root='data/matting/composition_1k/', transforms=t, mode='val') + print(len(train_dataset)) + for i in range(1065): + # idx = np.random.randint(len(train_dataset)) + idx = i + print(train_dataset.img_list[idx]) data = train_dataset[idx] - trimap = data['trimap'] - cv2.imwrite(str(idx) + '.png', trimap.astype('uint8')) + print(data['img_name'], data['img'].shape, data['alpha'].shape, + data['trimap'].shape) +# print(data) +# trimap = data['trimap'] +# cv2.imwrite(str(idx) + '.png', trimap.astype('uint8')) diff --git a/contrib/matting/metric.py b/contrib/matting/metric.py index c05482703b..52044032ca 100644 --- a/contrib/matting/metric.py +++ b/contrib/matting/metric.py @@ -31,7 +31,7 @@ def update(self, pred, gt, trimap=None): Args: pred (np.ndarray): The value range is [0., 1.]. gt (np.ndarray): The value range is [0, 255]. - trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. + trimap (np.ndarray, optional) The value is in {0, 128, 255}. Default: None. """ if trimap is None: trimap = np.ones_like(gt) * 128 @@ -50,7 +50,8 @@ def update(self, pred, gt, trimap=None): self.count += 1 def evaluate(self): - return self.mse_diffs / self.count + mse = self.mse_diffs / self.count if self.count > 0 else 0 + return mse class SAD(): @@ -88,4 +89,5 @@ def update(self, pred, gt, trimap=None): self.count += 1 def evaluate(self): - return self.sad_diffs / self.count + sad = self.sad_diffs / self.count if self.count > 0 else 0 + return sad diff --git a/contrib/matting/train.py b/contrib/matting/train.py index d54d11abc9..1903af2799 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -101,6 +101,17 @@ def parse_args(): dest='pretrained_model', help='the pretrained model', type=str) + parser.add_argument( + '--dataset_root', + dest='dataset_root', + help='the dataset root directory', + type=str) + parser.add_argument( + '--save_begin_iters', + dest='save_begin_iters', + help='The iters saving begin', + default=None, + type=int) return parser.parse_args() @@ -120,9 +131,13 @@ def main(args): ] train_dataset = HumanDataset( - dataset_root='data/matting/human_matting/', - transforms=t, - mode='train') + dataset_root=args.dataset_root, transforms=t, mode='train') + if args.do_eval: + t = [T.LoadImages(), T.Normalize()] + val_dataset = HumanDataset( + dataset_root=args.dataset_root, transforms=t, mode='val') + else: + val_dataset = None # loss losses = {'types': [], 'coef': []} @@ -153,6 +168,7 @@ def main(args): train( model=model, train_dataset=train_dataset, + val_dataset=val_dataset, optimizer=optimizer, losses=losses, iters=args.iters, @@ -162,7 +178,8 @@ def main(args): save_interval=args.save_interval, resume_model=args.resume_model, stage=args.stage, - save_dir=args.save_dir) + save_dir=args.save_dir, + save_begin_iters=args.save_begin_iters) if __name__ == '__main__': diff --git a/contrib/matting/val.py b/contrib/matting/val.py index 68ffbd6d92..fa146c62a0 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -51,6 +51,16 @@ def parse_args(): type=int, required=True, choices=[0, 1, 2, 3]) + parser.add_argument( + '--dataset_root', + dest='dataset_root', + help='the dataset root directory', + type=str) + parser.add_argument( + '--save_results', + dest='save_results', + help='save prediction alphe while evaluation', + action='store_true') return parser.parse_args() @@ -65,9 +75,7 @@ def main(args): t = [T.LoadImages(), T.Normalize()] eval_dataset = HumanDataset( - dataset_root='data/matting/human_matting/', - transforms=t, - mode='val') + dataset_root=args.dataset_root, transforms=t, mode='val') # model backbone = VGG16(input_channels=4) @@ -79,7 +87,7 @@ def main(args): eval_dataset=eval_dataset, num_workers=args.num_workers, save_dir=args.save_dir, - save_results=True) + save_results=args.save_results) if __name__ == '__main__': From 97b46aedfcd79c98e9657591828bacd71b051511 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Wed, 7 Jul 2021 10:43:55 +0800 Subject: [PATCH 133/210] add panoptice segmentation link --- README.md | 3 ++- README_CN.md | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 48e738484b..91a8fed5e4 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ Welcome to PaddleSeg! PaddleSeg is an end-to-end image segmentation development * [Data Augmentation](./docs/module/data/data.md) * [Loss Description](./docs/module/loss/lovasz_loss.md) * [Tricks](./docs/module/tricks/tricks.md) -* Description of Classical Models +* Description of Classical Models * [DeeplabV3](./docs/models/deeplabv3.md) * [UNet](./docs/models/unet.md) * [OCRNet](./docs/models/ocrnet.md) @@ -143,6 +143,7 @@ python train.py --config configs/quick_start/bisenet_optic_disc_512x512_1k.yml * [HumanSeg](./contrib/HumanSeg) * [Cityscapes SOTA](./contrib/CityscapesSOTA) +* [PanopticSegmentation](./contrib/PanopticDeepLab) ## Feedbacks and Contact * The dynamic version is still under development, if you find any issue or have an idea on new features, please don't hesitate to contact us via [GitHub Issues](https://github.com/PaddlePaddle/PaddleSeg/issues). diff --git a/README_CN.md b/README_CN.md index 4b0576a8d0..6b9557ad0a 100644 --- a/README_CN.md +++ b/README_CN.md @@ -103,6 +103,7 @@ PaddleSeg是基于飞桨[PaddlePaddle](https://www.paddlepaddle.org.cn)开发的 * [人像分割](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1/contrib/HumanSeg) * [医疗图像](./docs/solution/medical/medical.md) * [遥感分割](https://github.com/PaddlePaddle/PaddleSeg/tree/release/2.1/contrib/remote_sensing) +* [全景分割](./contrib/PanopticDeepLab) ## 代码贡献 From bf50598d86611aecc75acc597397d93010bc2478 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 7 Jul 2021 11:18:45 +0800 Subject: [PATCH 134/210] add resnet backbone --- contrib/matting/model/__init__.py | 1 + contrib/matting/model/dim.py | 53 ++-- contrib/matting/model/resnet_vd.py | 372 +++++++++++++++++++++++++++++ contrib/matting/model/vgg.py | 2 +- contrib/matting/train.py | 25 +- 5 files changed, 417 insertions(+), 36 deletions(-) create mode 100644 contrib/matting/model/resnet_vd.py diff --git a/contrib/matting/model/__init__.py b/contrib/matting/model/__init__.py index 99a5ffe53e..0b49f005ee 100644 --- a/contrib/matting/model/__init__.py +++ b/contrib/matting/model/__init__.py @@ -13,5 +13,6 @@ # limitations under the License. from .vgg import * +from .resnet_vd import * from .dim import DIM from .loss import MRSD diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py index d214711ef7..8746bdb030 100644 --- a/contrib/matting/model/dim.py +++ b/contrib/matting/model/dim.py @@ -31,20 +31,29 @@ class DIM(nn.Layer): (https://arxiv.org/pdf/1908.07919.pdf). Args: - + backbone: backbone model. + pretrained(str, optional): The path of pretrianed model. Defautl: None. + stage (int, optional): The stage of model. Defautl: 3. + decoder_input_channels(int, optional): The channel os decoder input. Defautl: 512. """ def __init__(self, backbone, pretrained=None, - stage=3): + stage=3, + decoder_input_channels=512): super().__init__() self.backbone = backbone self.pretrained = pretrained self.stage = stage - self.decoder = Decoder(input_channels=512) + decoder_output_channels = [64, 128, 256, 512] + if backbone.__class__.__name__ == 'ResNet_vd': + decoder_output_channels = [64, 256, 512, 1024] + self.decoder = Decoder( + input_channels=decoder_input_channels, + output_channels=decoder_output_channels) if self.stage == 2: for param in self.backbone.parameters(): param.stop_gradient = True @@ -58,7 +67,7 @@ def forward(self, inputs): input_shape = inputs['img'].shape[-2:] x = paddle.concat([inputs['img'], inputs['trimap'].unsqueeze(1) / 255], axis=1) - fea_list, ids_list = self.backbone(x) + fea_list = self.backbone(x) # decoder stage up_shape = [] @@ -90,28 +99,6 @@ def init_weight(self): utils.load_entire_model(self, self.pretrained) -# class Up(nn.Layer): -# def __init__(self, input_channels, output_channels): -# super().__init__() -# # self.conv = layers.ConvBNReLU( -# # input_channels, -# # output_channels, -# # kernel_size=5, -# # padding=2, -# # bias_attr=False) - -# self.deconv = nn.Conv2DTranspose( -# input_channels, output_channels, kernel_size=4, stride=2, padding=1) - -# def forward(self, x, output_shape): -# # x = F.interpolate( -# # x, size=output_shape, mode='bilinear', align_corners=False) -# # x = self.conv(x) -# x = self.deconv(x) -# x = F.relu(x) - -# return x - # bilinear interpolate skip connect class Up(nn.Layer): def __init__(self, input_channels, output_channels): @@ -134,15 +121,15 @@ def forward(self, x, skip, output_shape): class Decoder(nn.Layer): - def __init__(self, input_channels): + def __init__(self, input_channels, output_channels=(64, 128, 256, 512)): super().__init__() self.deconv6 = nn.Conv2D( - input_channels, 512, kernel_size=1, bias_attr=False) - self.deconv5 = Up(512, 512) - self.deconv4 = Up(512, 256) - self.deconv3 = Up(256, 128) - self.deconv2 = Up(128, 64) - self.deconv1 = Up(64, 64) + input_channels, input_channels, kernel_size=1, bias_attr=False) + self.deconv5 = Up(input_channels, output_channels[-1]) + self.deconv4 = Up(output_channels[-1], output_channels[-2]) + self.deconv3 = Up(output_channels[-2], output_channels[-3]) + self.deconv2 = Up(output_channels[-3], output_channels[-4]) + self.deconv1 = Up(output_channels[-4], 64) self.alpha_conv = nn.Conv2D( 64, 1, kernel_size=5, padding=2, bias_attr=False) diff --git a/contrib/matting/model/resnet_vd.py b/contrib/matting/model/resnet_vd.py new file mode 100644 index 0000000000..793de43894 --- /dev/null +++ b/contrib/matting/model/resnet_vd.py @@ -0,0 +1,372 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager +from paddleseg.models import layers +from paddleseg.utils import utils + +__all__ = [ + "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd" +] + +# delete the compoment in manager.BACKBONED if existing. +for i in __all__: + if i in manager.BACKBONES._components_dict: + manager.BACKBONES._components_dict.pop(i) + + +class ConvBNLayer(nn.Layer): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + dilation=1, + groups=1, + is_vd_mode=False, + act=None, + ): + super(ConvBNLayer, self).__init__() + + self.is_vd_mode = is_vd_mode + self._pool2d_avg = nn.AvgPool2D( + kernel_size=2, stride=2, padding=0, ceil_mode=True) + self._conv = nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=(kernel_size - 1) // 2 if dilation == 1 else 0, + dilation=dilation, + groups=groups, + bias_attr=False) + + self._batch_norm = layers.SyncBatchNorm(out_channels) + self._act_op = layers.Activation(act=act) + + def forward(self, inputs): + if self.is_vd_mode: + inputs = self._pool2d_avg(inputs) + y = self._conv(inputs) + y = self._batch_norm(y) + y = self._act_op(y) + + return y + + +class BottleneckBlock(nn.Layer): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False, + dilation=1): + super(BottleneckBlock, self).__init__() + + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + act='relu') + + self.dilation = dilation + + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu', + dilation=dilation) + self.conv2 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels * 4, + kernel_size=1, + act=None) + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels * 4, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first or stride == 1 else True) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + + #################################################################### + # If given dilation rate > 1, using corresponding padding. + # The performance drops down without the follow padding. + if self.dilation > 1: + padding = self.dilation + y = F.pad(y, [padding, padding, padding, padding]) + ##################################################################### + + conv1 = self.conv1(y) + conv2 = self.conv2(conv1) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + + y = paddle.add(x=short, y=conv2) + y = F.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + in_channels, + out_channels, + stride, + shortcut=True, + if_first=False): + super(BasicBlock, self).__init__() + self.stride = stride + self.conv0 = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + act='relu') + self.conv1 = ConvBNLayer( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=3, + act=None) + + if not shortcut: + self.short = ConvBNLayer( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + is_vd_mode=False if if_first else True) + + self.shortcut = shortcut + + def forward(self, inputs): + y = self.conv0(inputs) + conv1 = self.conv1(y) + + if self.shortcut: + short = inputs + else: + short = self.short(inputs) + y = paddle.add(x=short, y=conv1) + y = F.relu(y) + + return y + + +class ResNet_vd(nn.Layer): + """ + The ResNet_vd implementation based on PaddlePaddle. + + The original article refers to Jingdong + Tong He, et, al. "Bag of Tricks for Image Classification with Convolutional Neural Networks" + (https://arxiv.org/pdf/1812.01187.pdf). + + Args: + layers (int, optional): The layers of ResNet_vd. The supported layers are (18, 34, 50, 101, 152, 200). Default: 50. + output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 8. + multi_grid (tuple|list, optional): The grid of stage4. Defult: (1, 1, 1). + pretrained (str, optional): The path of pretrained model. + + """ + + def __init__(self, + input_channels=3, + layers=50, + output_stride=8, + multi_grid=(1, 1, 1), + pretrained=None): + super(ResNet_vd, self).__init__() + + self.conv1_logit = None # for gscnn shape stream + self.layers = layers + supported_layers = [18, 34, 50, 101, 152, 200] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format( + supported_layers, layers) + + if layers == 18: + depth = [2, 2, 2, 2] + elif layers == 34 or layers == 50: + depth = [3, 4, 6, 3] + elif layers == 101: + depth = [3, 4, 23, 3] + elif layers == 152: + depth = [3, 8, 36, 3] + elif layers == 200: + depth = [3, 12, 48, 3] + num_channels = [64, 256, 512, 1024 + ] if layers >= 50 else [64, 64, 128, 256] + num_filters = [64, 128, 256, 512] + + # for channels of four returned stages + self.feat_channels = [c * 4 for c in num_filters + ] if layers >= 50 else num_filters + + dilation_dict = None + if output_stride == 8: + dilation_dict = {2: 2, 3: 4} + elif output_stride == 16: + dilation_dict = {3: 2} + + self.conv1_1 = ConvBNLayer( + in_channels=input_channels, + out_channels=32, + kernel_size=3, + stride=2, + act='relu') + self.conv1_2 = ConvBNLayer( + in_channels=32, + out_channels=32, + kernel_size=3, + stride=1, + act='relu') + self.conv1_3 = ConvBNLayer( + in_channels=32, + out_channels=64, + kernel_size=3, + stride=1, + act='relu') + self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) + + # self.block_list = [] + self.stage_list = [] + if layers >= 50: + for block in range(len(depth)): + shortcut = False + block_list = [] + for i in range(depth[block]): + if layers in [101, 152] and block == 2: + if i == 0: + conv_name = "res" + str(block + 2) + "a" + else: + conv_name = "res" + str(block + 2) + "b" + str(i) + else: + conv_name = "res" + str(block + 2) + chr(97 + i) + + ############################################################################### + # Add dilation rate for some segmentation tasks, if dilation_dict is not None. + dilation_rate = dilation_dict[ + block] if dilation_dict and block in dilation_dict else 1 + + # Actually block here is 'stage', and i is 'block' in 'stage' + # At the stage 4, expand the the dilation_rate if given multi_grid + if block == 3: + dilation_rate = dilation_rate * multi_grid[i] + ############################################################################### + + bottleneck_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BottleneckBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block] * 4, + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 + and dilation_rate == 1 else 1, + shortcut=shortcut, + if_first=block == i == 0, + dilation=dilation_rate)) + + block_list.append(bottleneck_block) + shortcut = True + self.stage_list.append(block_list) + else: + for block in range(len(depth)): + shortcut = False + block_list = [] + for i in range(depth[block]): + conv_name = "res" + str(block + 2) + chr(97 + i) + basic_block = self.add_sublayer( + 'bb_%d_%d' % (block, i), + BasicBlock( + in_channels=num_channels[block] + if i == 0 else num_filters[block], + out_channels=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + shortcut=shortcut, + if_first=block == i == 0)) + block_list.append(basic_block) + shortcut = True + self.stage_list.append(block_list) + + self.pretrained = pretrained + self.init_weight() + + def forward(self, inputs): + y = self.conv1_1(inputs) + y = self.conv1_2(y) + y = self.conv1_3(y) + self.conv1_logit = y.clone() + y = self.pool2d_max(y) + + # A feature list saves the output feature map of each stage. + feat_list = [] + feat_list.append(y) + for stage in self.stage_list: + for block in stage: + y = block(y) + feat_list.append(y) + + return feat_list + + def init_weight(self): + utils.load_pretrained_model(self, self.pretrained) + + +@manager.BACKBONES.add_component +def ResNet18_vd(**args): + model = ResNet_vd(layers=18, **args) + return model + + +def ResNet34_vd(**args): + model = ResNet_vd(layers=34, **args) + return model + + +@manager.BACKBONES.add_component +def ResNet50_vd(**args): + model = ResNet_vd(layers=50, **args) + return model + + +@manager.BACKBONES.add_component +def ResNet101_vd(**args): + model = ResNet_vd(layers=101, **args) + return model + + +def ResNet152_vd(**args): + model = ResNet_vd(layers=152, **args) + return model + + +def ResNet200_vd(**args): + model = ResNet_vd(layers=200, **args) + return model diff --git a/contrib/matting/model/vgg.py b/contrib/matting/model/vgg.py index f490ad88ec..4adee6f7b1 100644 --- a/contrib/matting/model/vgg.py +++ b/contrib/matting/model/vgg.py @@ -135,7 +135,7 @@ def forward(self, inputs): ids_list.append(ids) x = F.relu(self._conv_6(x)) fea_list.append(x) - return fea_list, ids_list + return fea_list def init_weight(self): if self.pretrained is not None: diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 1903af2799..4be6316141 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -13,6 +13,7 @@ # limitations under the License. import argparse +import os from core import train from model import * @@ -112,6 +113,13 @@ def parse_args(): help='The iters saving begin', default=None, type=int) + parser.add_argument( + '--backbone', + dest='backbone', + help= + 'The backbone of model. It is one of (VGG16, ResNet18_vd, ResNet34_vd, ResNet50_vd, ResNet101_vd, ResNet152_vd)', + required=True, + type=str) return parser.parse_args() @@ -152,10 +160,22 @@ def main(args): losses['coef'].append(1) # model + #bulid backbone # vgg16预训练模型地址: 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/VGG16_pretrained.pdparams') - backbone = VGG16(input_channels=4, pretrained='./VGG16_pretrained.pdparams') + pretrained_model = './pretrained_models/' + args.backbone + '_pretrained.pdparams' + if not os.path.exists(pretrained_model): + pretrained_model = None + backbone = eval(args.backbone)( + input_channels=4, pretrained=pretrained_model) + + decoder_input_channels = 512 + if args.backbone in ['ResNet50_vd', 'ResNet101_vd', 'ResNet152_vd']: + decoder_input_channels = 2048 model = DIM( - backbone=backbone, stage=args.stage, pretrained=args.pretrained_model) + backbone=backbone, + stage=args.stage, + pretrained=args.pretrained_model, + decoder_input_channels=decoder_input_channels) # optimizer # 简单的先构建一个优化器 @@ -176,6 +196,7 @@ def main(args): num_workers=args.num_workers, use_vdl=args.use_vdl, save_interval=args.save_interval, + log_iters=args.log_iters, resume_model=args.resume_model, stage=args.stage, save_dir=args.save_dir, From de2ebf1fac91e8b21a76c5736f335b0c709cd53e Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 8 Jul 2021 17:17:28 +0800 Subject: [PATCH 135/210] update shape --- paddleseg/models/deeplab.py | 4 ++-- paddleseg/models/layers/pyramid_pool.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddleseg/models/deeplab.py b/paddleseg/models/deeplab.py index cd17894bdb..c12ae3e820 100644 --- a/paddleseg/models/deeplab.py +++ b/paddleseg/models/deeplab.py @@ -82,9 +82,9 @@ def forward(self, x): feat_list = self.backbone(x) logit_list = self.head(feat_list) if self.data_format == 'NCHW': - ori_shape = x.shape[2:] + ori_shape = paddle.shape(x)[2:] else: - ori_shape = x.shape[1:3] + ori_shape = paddle.shape(x)[1:3] return [ F.interpolate( logit, diff --git a/paddleseg/models/layers/pyramid_pool.py b/paddleseg/models/layers/pyramid_pool.py index ebf433643a..b1cbc0c71a 100644 --- a/paddleseg/models/layers/pyramid_pool.py +++ b/paddleseg/models/layers/pyramid_pool.py @@ -88,10 +88,10 @@ def __init__(self, def forward(self, x): outputs = [] if self.data_format == 'NCHW': - interpolate_shape = x.shape[2:] + interpolate_shape = paddle.shape(x)[2:] axis = 1 else: - interpolate_shape = x.shape[1:3] + interpolate_shape = paddle.shape(x)[1:3] axis = -1 for block in self.aspp_blocks: y = block(x) From 258a4183a656e267f75f825b772c919c1c106890 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Fri, 9 Jul 2021 08:52:37 +0800 Subject: [PATCH 136/210] add grad and conn evaluation --- contrib/matting/core/train.py | 4 +- contrib/matting/core/val.py | 14 +++- contrib/matting/metric.py | 148 ++++++++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+), 3 deletions(-) diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index 38d4929588..97a7c25e99 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -271,7 +271,7 @@ def train(model, val_dataset is not None) and local_rank == 0 and iter >= save_begin_iters: num_workers = 1 if num_workers > 0 else 0 - sad, mse = evaluate( + sad, mse, grad, conn = evaluate( model, val_dataset, num_workers=0, @@ -296,6 +296,8 @@ def train(model, if use_vdl: log_writer.add_scalar('Evaluate/SAD', sad, iter) log_writer.add_scalar('Evaluate/MSE', mse, iter) + log_writer.add_scalar('Evaluate/Grad', grad, iter) + log_writer.add_scalar('Evaluate/Conn', conn, iter) batch_start = time.time() diff --git a/contrib/matting/core/val.py b/contrib/matting/core/val.py index c15d703970..44ad190f48 100644 --- a/contrib/matting/core/val.py +++ b/contrib/matting/core/val.py @@ -78,6 +78,8 @@ def evaluate(model, total_iters = len(loader) mse_metric = metric.MSE() sad_metric = metric.SAD() + grad_metric = metric.Grad() + conn_metric = metric.Conn() if print_detail: logger.info( @@ -102,6 +104,10 @@ def evaluate(model, trimap = data['trimap'].numpy() mse_metric.update(alpha_pred.squeeze(1), alpha_gt, trimap) sad_metric.update(alpha_pred.squeeze(1), alpha_gt, trimap) + grad_metric.update(alpha_pred.squeeze(), alpha_gt.squeeze(), + trimap.squeeze()) + conn_metric.update(alpha_pred.squeeze(), alpha_gt.squeeze(), + trimap.squeeze()) if save_results: alpha_pred_one = alpha_pred[0].squeeze() @@ -127,6 +133,10 @@ def evaluate(model, # 指标输出 mse = mse_metric.evaluate() sad = sad_metric.evaluate() + grad = grad_metric.evaluate() + conn = conn_metric.evaluate() - logger.info('[EVAL] SAD: {:.4f}, MSE: {:.4f}'.format(sad, mse)) - return sad, mse + logger.info( + '[EVAL] SAD: {:.4f}, MSE: {:.4f}, Grad: {:.4f}, Conn: {:.4f}'.format( + sad, mse, grad, conn)) + return sad, mse, grad, conn diff --git a/contrib/matting/metric.py b/contrib/matting/metric.py index 52044032ca..565ddbebe6 100644 --- a/contrib/matting/metric.py +++ b/contrib/matting/metric.py @@ -12,7 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Grad and Conn is refer to https://github.com/yucornetto/MGMatting/blob/main/code-base/utils/evaluate.py +# Output of `Grad` is sightly different from the MATLAB version provided by Adobe (less than 0.1%) +# Output of `Conn` is smaller than the MATLAB version (~5%, maybe MATLAB has a different algorithm) +# So do not report results calculated by these functions in your paper. +# Evaluate your inference with the MATLAB file `DIM_evaluation_code/evaluate.m`. + +import numpy as np +import scipy.ndimage import numpy as np +from skimage.measure import label class MSE(): @@ -91,3 +100,142 @@ def update(self, pred, gt, trimap=None): def evaluate(self): sad = self.sad_diffs / self.count if self.count > 0 else 0 return sad + + +class Grad(): + """ + Only calculate the unknown region if trimap provided. + + Refer to: https://github.com/yucornetto/MGMatting/blob/main/code-base/utils/evaluate.py#L46 + """ + + def __init__(self): + self.grad_diffs = 0 + self.count = 0 + + def gauss(self, x, sigma): + y = np.exp(-x**2 / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi)) + return y + + def dgauss(self, x, sigma): + y = -x * self.gauss(x, sigma) / (sigma**2) + return y + + def gaussgradient(self, im, sigma): + epsilon = 1e-2 + halfsize = np.ceil(sigma * np.sqrt( + -2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon))).astype(np.int32) + size = 2 * halfsize + 1 + hx = np.zeros((size, size)) + for i in range(0, size): + for j in range(0, size): + u = [i - halfsize, j - halfsize] + hx[i, j] = self.gauss(u[0], sigma) * self.dgauss(u[1], sigma) + + hx = hx / np.sqrt(np.sum(np.abs(hx) * np.abs(hx))) + hy = hx.transpose() + + gx = scipy.ndimage.convolve(im, hx, mode='nearest') + gy = scipy.ndimage.convolve(im, hy, mode='nearest') + + return gx, gy + + def update(self, pred, gt, trimap=None): + """ + update metric. + + Args: + pred (np.ndarray): The value range is [0., 1.]. + gt (np.ndarray): The value range is [0, 255]. + trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. + """ + if trimap is None: + trimap = np.ones_like(gt) * 128 + if not (pred.shape == gt.shape == trimap.shape): + raise ValueError( + 'The shape of `pred`, `gt` and `trimap` should be equal. ' + 'but they are {}, {} and {}'.format(pred.shape, gt.shape, + trimap.shape)) + + mask = trimap == 128 + gt = gt / 255. + + pred_x, pred_y = self.gaussgradient(pred, 1.4) + gt_x, gt_y = self.gaussgradient(gt, 1.4) + + pred_amp = np.sqrt(pred_x**2 + pred_y**2) + gt_amp = np.sqrt(gt_x**2 + gt_y**2) + + error_map = (pred_amp - gt_amp)**2 + diff = np.sum(error_map[mask]) + + self.grad_diffs += diff / 1000. + self.count += 1 + + def evaluate(self): + grad = self.grad_diffs / self.count if self.count > 0 else 0 + return grad + + +class Conn(): + """ + Only calculate the unknown region if trimap provided. + + Refer to: https://github.com/yucornetto/MGMatting/blob/main/code-base/utils/evaluate.py#L69 + """ + + def __init__(self): + self.conn_diffs = 0 + self.count = 0 + + def getLargestCC(self, segmentation): + labels = label(segmentation, connectivity=1) + largestCC = labels == np.argmax(np.bincount(labels.flat)) + return largestCC + + def update(self, pred, gt, trimap=None, step=0.1): + """ + update metric. + + Args: + pred (np.ndarray): The value range is [0., 1.]. + gt (np.ndarray): The value range is [0, 255]. + trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. + """ + if trimap is None: + trimap = np.ones_like(gt) * 128 + if not (pred.shape == gt.shape == trimap.shape): + raise ValueError( + 'The shape of `pred`, `gt` and `trimap` should be equal. ' + 'but they are {}, {} and {}'.format(pred.shape, gt.shape, + trimap.shape)) + + mask = trimap == 128 + gt = gt / 255. + h, w = pred.shape + + thresh_steps = list(np.arange(0, 1 + step, step)) + l_map = np.ones_like(pred, dtype=np.float) * -1 + for i in range(1, len(thresh_steps)): + pred_alpha_thresh = (pred >= thresh_steps[i]).astype(np.int) + gt_alpha_thresh = (gt >= thresh_steps[i]).astype(np.int) + + omega = self.getLargestCC( + pred_alpha_thresh * gt_alpha_thresh).astype(np.int) + flag = ((l_map == -1) & (omega == 0)).astype(np.int) + l_map[flag == 1] = thresh_steps[i - 1] + + l_map[l_map == -1] = 1 + + pred_d = pred - l_map + gt_d = gt - l_map + pred_phi = 1 - pred_d * (pred_d >= 0.15).astype(np.int) + gt_phi = 1 - gt_d * (gt_d >= 0.15).astype(np.int) + diff = np.sum(np.abs(pred_phi - gt_phi)[mask]) + + self.conn_diffs += diff / 1000. + self.count += 1 + + def evaluate(self): + conn = self.conn_diffs / self.count if self.count > 0 else 0 + return conn From f1b79c698508beb686cd651af2165ae801e96399 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 12 Jul 2021 21:18:06 +0800 Subject: [PATCH 137/210] update loss.py --- contrib/matting/model/loss.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/contrib/matting/model/loss.py b/contrib/matting/model/loss.py index c85eceaad0..5adca48e31 100644 --- a/contrib/matting/model/loss.py +++ b/contrib/matting/model/loss.py @@ -34,14 +34,18 @@ def forward(self, logit, label, mask=None): label (Tensor): Label tensor, the data type is float32, float64. The shape should equal to logit. mask (Tensor): The mask where the loss valid. """ + if len(label.shape) == 3: + label = label.unsqueeze(1) sd = paddle.square(logit - label) loss = paddle.sqrt(sd + self.eps) - mask = mask.astype('float32') - if len(mask.shape) == 3: - mask = mask.unsqueeze(1) - loss = loss * mask - loss = loss.sum() / (mask.sum() + self.eps) - - mask.stop_gradient = True + if mask is not None: + mask = mask.astype('float32') + if len(mask.shape) == 3: + mask = mask.unsqueeze(1) + loss = loss * mask + loss = loss.sum() / (mask.sum() + self.eps) + mask.stop_gradient = True + else: + loss = loss.mean() return loss From f43cf04546c4da71c3ac240cad36a75816a2e288 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 13 Jul 2021 17:43:08 +0800 Subject: [PATCH 138/210] rm grad and conn evaluation --- contrib/matting/core/train.py | 4 +--- contrib/matting/core/val.py | 14 ++------------ 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index 97a7c25e99..38d4929588 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -271,7 +271,7 @@ def train(model, val_dataset is not None) and local_rank == 0 and iter >= save_begin_iters: num_workers = 1 if num_workers > 0 else 0 - sad, mse, grad, conn = evaluate( + sad, mse = evaluate( model, val_dataset, num_workers=0, @@ -296,8 +296,6 @@ def train(model, if use_vdl: log_writer.add_scalar('Evaluate/SAD', sad, iter) log_writer.add_scalar('Evaluate/MSE', mse, iter) - log_writer.add_scalar('Evaluate/Grad', grad, iter) - log_writer.add_scalar('Evaluate/Conn', conn, iter) batch_start = time.time() diff --git a/contrib/matting/core/val.py b/contrib/matting/core/val.py index 44ad190f48..c15d703970 100644 --- a/contrib/matting/core/val.py +++ b/contrib/matting/core/val.py @@ -78,8 +78,6 @@ def evaluate(model, total_iters = len(loader) mse_metric = metric.MSE() sad_metric = metric.SAD() - grad_metric = metric.Grad() - conn_metric = metric.Conn() if print_detail: logger.info( @@ -104,10 +102,6 @@ def evaluate(model, trimap = data['trimap'].numpy() mse_metric.update(alpha_pred.squeeze(1), alpha_gt, trimap) sad_metric.update(alpha_pred.squeeze(1), alpha_gt, trimap) - grad_metric.update(alpha_pred.squeeze(), alpha_gt.squeeze(), - trimap.squeeze()) - conn_metric.update(alpha_pred.squeeze(), alpha_gt.squeeze(), - trimap.squeeze()) if save_results: alpha_pred_one = alpha_pred[0].squeeze() @@ -133,10 +127,6 @@ def evaluate(model, # 指标输出 mse = mse_metric.evaluate() sad = sad_metric.evaluate() - grad = grad_metric.evaluate() - conn = conn_metric.evaluate() - logger.info( - '[EVAL] SAD: {:.4f}, MSE: {:.4f}, Grad: {:.4f}, Conn: {:.4f}'.format( - sad, mse, grad, conn)) - return sad, mse, grad, conn + logger.info('[EVAL] SAD: {:.4f}, MSE: {:.4f}'.format(sad, mse)) + return sad, mse From 63b29ff91f1f012df74cdeb7dd6720d49e50e0a6 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Fri, 23 Jul 2021 11:16:12 +0800 Subject: [PATCH 139/210] rm README.md --- benchmark/README.md | 65 --------------------------------------------- 1 file changed, 65 deletions(-) delete mode 100644 benchmark/README.md diff --git a/benchmark/README.md b/benchmark/README.md deleted file mode 100644 index 6c02126f4f..0000000000 --- a/benchmark/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# PaddleSeg Benchmark with AMP - -## 动态图 - -数据集cityscapes 放置于data目录下 - -通过 **--fp16** 开启amp训练。 - -单机单卡使用如下命令进行训练: -``` -export CUDA_VISIBLE_DEVICES=0 -python train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 -``` - -单机多卡使用如下命令进行训练: -``` -export CUDA_VISIBLE_DEVICES=0,1 -python -m paddle.distributed.launch train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 -# fleet开启多卡训练 -fleetrun train.py --config benchmark/hrnet.yml --iters 2000 --log_iters 10 --fp16 -``` - -DeepLabv3+ 模型的配置文件为: -benchmark/deeplabv3p.yml - -**注意** - -* 动态图中batch_size设置为每卡的batch_size -* DeepLabv3+ 支持通过传入 **--data_format NHWC**进行‘NHWC’数据格式的训练。 - - - -## 静态图 -数据集cityscapes 放置于legacy/dataset目录下 - -通过 **MODEL.FP16 True** 开启amp训练 -单机单卡使用如下命令进行训练: -``` -cd legacy -export CUDA_VISIBLE_DEVICES=0 -python pdseg/train.py --cfg configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 2 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True -``` - -单机多卡使用如下命令进行训练: -``` -export CUDA_VISIBLE_DEVICES=0,1 -fleetrun pdseg/train.py --cfg configs/benchmark/hrnetw18_cityscapes_1024x512_215.yaml --use_gpu --use_mpio --log_steps 10 BATCH_SIZE 4 SOLVER.NUM_EPOCHS 3 MODEL.FP16 True -``` - -deeplabv3p模型的配置文件为: -configs/benchmark/deeplabv3p_resnet50_vd_cityscapes.yaml - -**注意** -静态图中的BATCH_SIZE为总的batch size。 - -## 竞品 -竞品为[mmsegmentation](https://github.com/open-mmlab/mmsegmentation) - -对应竞品配置文件为: - -configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py - -configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py - -相关执行方式请参考其官方仓库。 From 41f41c78d8aed8cbf3d8ae6a0a24dee8173f1a18 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 26 Jul 2021 09:27:37 +0800 Subject: [PATCH 140/210] update metric --- contrib/matting/core/val.py | 12 +++++++----- contrib/matting/metric.py | 3 +++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/contrib/matting/core/val.py b/contrib/matting/core/val.py index c15d703970..9a5f8f2f43 100644 --- a/contrib/matting/core/val.py +++ b/contrib/matting/core/val.py @@ -34,7 +34,7 @@ def save_alpha_pred(alpha, path): if not os.path.exists(dirname): os.makedirs(dirname) - alpha = (alpha * 255).astype('uint8') + alpha = (alpha).astype('uint8') cv2.imwrite(path, alpha) @@ -99,14 +99,16 @@ def evaluate(model, else: alpha_pred = logit_dict['alpha_pred'].numpy() alpha_gt = data['alpha'].numpy() - trimap = data['trimap'].numpy() - mse_metric.update(alpha_pred.squeeze(1), alpha_gt, trimap) - sad_metric.update(alpha_pred.squeeze(1), alpha_gt, trimap) + trimap = data['trimap'].numpy().astype('uint8') + alpha_pred = alpha_pred.squeeze(1) + alpha_pred = (alpha_pred * 255) + mse_metric.update(alpha_pred, alpha_gt, trimap) + sad_metric.update(alpha_pred, alpha_gt, trimap) if save_results: alpha_pred_one = alpha_pred[0].squeeze() trimap = trimap.squeeze().astype('uint8') - alpha_pred_one[trimap == 255] = 1 + alpha_pred_one[trimap == 255] = 255 alpha_pred_one[trimap == 0] = 0 save_alpha_pred(alpha_pred_one, os.path.join(save_dir, data['img_name'][0])) diff --git a/contrib/matting/metric.py b/contrib/matting/metric.py index 565ddbebe6..9177d72fb9 100644 --- a/contrib/matting/metric.py +++ b/contrib/matting/metric.py @@ -51,6 +51,7 @@ def update(self, pred, gt, trimap=None): trimap.shape)) mask = trimap == 128 pixels = float(mask.sum()) + pred = pred / 255. gt = gt / 255. diff = (pred - gt) * mask mse_diff = (diff**2).sum() / pixels if pixels > 0 else 0 @@ -90,9 +91,11 @@ def update(self, pred, gt, trimap=None): trimap.shape)) mask = trimap == 128 + pred = pred / 255. gt = gt / 255. diff = (pred - gt) * mask sad_diff = (np.abs(diff)).sum() + sad_diff /= 1000 self.sad_diffs += sad_diff self.count += 1 From de60dee27505d40fa2f73ace52adb2419dd54e92 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 26 Jul 2021 17:39:32 +0800 Subject: [PATCH 141/210] update gen_dataset.py --- contrib/matting/tools/gen_dataset/gen_dataset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/matting/tools/gen_dataset/gen_dataset.py b/contrib/matting/tools/gen_dataset/gen_dataset.py index 95086ccaf4..4871dbea14 100644 --- a/contrib/matting/tools/gen_dataset/gen_dataset.py +++ b/contrib/matting/tools/gen_dataset/gen_dataset.py @@ -102,6 +102,8 @@ def composite(fg, alpha, ori_bg): resize_w = math.ceil(ori_bg_w * ratio) bg = cv2.resize( ori_bg, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR) + else: + bg = ori_bg bg = bg[0:fg_h, 0:fg_w, :] alpha = alpha / 255 From ce052fc6b3b87efd4e93953f00aeec378f4884c0 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 26 Jul 2021 17:52:56 +0800 Subject: [PATCH 142/210] add human matte dataset --- contrib/matting/{ => dataset}/dataset.py | 34 ++-- .../matting/dataset/human_matte_dataset.py | 185 ++++++++++++++++++ contrib/matting/transforms.py | 6 +- 3 files changed, 210 insertions(+), 15 deletions(-) rename contrib/matting/{ => dataset}/dataset.py (86%) create mode 100644 contrib/matting/dataset/human_matte_dataset.py diff --git a/contrib/matting/dataset.py b/contrib/matting/dataset/dataset.py similarity index 86% rename from contrib/matting/dataset.py rename to contrib/matting/dataset/dataset.py index 183bb9fada..3f9547f326 100644 --- a/contrib/matting/dataset.py +++ b/contrib/matting/dataset/dataset.py @@ -23,7 +23,25 @@ import transforms as T -class HumanDataset(paddle.io.Dataset): +class Dataset(paddle.io.Dataset): + """ + The dataset folder should be as follow: + root + |__train + | |__image + | |__fg + | |__bg + | |__alpha + | + |__val + | |__image + | |__fg + | |__bg + | |__alpha + | |__[trimap] + + """ + def __init__( self, dataset_root, @@ -103,16 +121,6 @@ def gen_trimap(alpha, mode='train', eval_kernel=7): if __name__ == '__main__': t = [T.LoadImages(), T.Resize(), T.Normalize()] - train_dataset = HumanDataset( - dataset_root='data/matting/composition_1k/', transforms=t, mode='val') + train_dataset = Dataset( + dataset_root='data/matting/human_matte/', transforms=t, mode='train') print(len(train_dataset)) - for i in range(1065): - # idx = np.random.randint(len(train_dataset)) - idx = i - print(train_dataset.img_list[idx]) - data = train_dataset[idx] - print(data['img_name'], data['img'].shape, data['alpha'].shape, - data['trimap'].shape) -# print(data) -# trimap = data['trimap'] -# cv2.imwrite(str(idx) + '.png', trimap.astype('uint8')) diff --git a/contrib/matting/dataset/human_matte_dataset.py b/contrib/matting/dataset/human_matte_dataset.py new file mode 100644 index 0000000000..09c30b08ea --- /dev/null +++ b/contrib/matting/dataset/human_matte_dataset.py @@ -0,0 +1,185 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import math + +import cv2 +import numpy as np +import random +import paddle + +from utils import get_files +import transforms as T + + +class HumanMatteDataset(paddle.io.Dataset): + """ + human_matting + |__Composition-1k(origin dataset name) + | |__train + | | |__fg + | | |__alpha + | |__val + | |__fg + | |__alpha + | |__trimap + |__Distinctions-646 + | + |__bg (background) + | |__coco_17 + | |__pascal_voc12 + | + |__train.txt + |__val.tat + + + Args: + dataset_root(str): The root path of dataset + transforms(list): Transforms for image. + mode (str, optional): which part of dataset to use. it is one of ('train', 'val', 'trainval'). Default: 'train'. + train_file (str|list, optional): File list is used to train. It should be `foreground_image.png background_image.png` + or `foreground_image.png`. It shold be provided if mode equal to 'train'. Default: None. + val_file (str|list, optional): File list is used to evaluation. It should be `foreground_image.png background_image.png` + or `foreground_image.png`. It shold be provided if mode equal to 'val'. Default: None. + + """ + + def __init__(self, + dataset_root, + transforms, + mode='train', + train_file=None, + val_file=None): + super().__init__() + self.dataset_root = dataset_root + self.transforms = T.Compose(transforms) + self.mode = mode + + # check file + if mode == 'train' or mode == 'trainval': + if train_file is None: + raise ValueError( + "When `mode` is 'train', `train_file must be provided!") + if isinstance(train_file, str): + train_file = [train_file] + file_list = train_file + + if mode == 'val' or mode == 'trainval': + if val_file is None: + raise ValueError( + "When `mode` is 'val', `val_file must be provided!") + if isinstance(val_file, str): + val_file = [val_file] + file_list = val_file + + if mode == 'trainval': + file_list = train_file + val_file + + # read file + self.fg_bg_list = [] + for file in file_list: + file = os.path.join(dataset_root, file) + with open(file, 'r') as f: + lines = f.readlines() + for line in lines: + line = line.strip() + self.fg_bg_list.append(line) + + def __getitem__(self, idx): + data = {} + fg_bg_file = self.fg_bg_list[idx] + fg_bg_file = fg_bg_file.split(' ') + fg_file = os.path.join(self.dataset_root, fg_bg_file[0]) + alpha_file = fg_file.replace('fg', 'alpha') + fg = cv2.imread(fg_file) + alpha = cv2.imread(alpha_file, 0) + data['alpha'] = alpha + data['gt_fields'] = ['alpha'] + + if len(fg_bg_file) == 2: + bg_file = os.path.join(self.dataset_root, fg_bg_file[1]) + bg = cv2.imread(bg_file) + data['img'], data['bg'] = self.composite(fg, alpha, bg) + data['fg'] = fg + if self.mode in ['train', 'trainval']: + data['gt_fields'].append('fg') + data['gt_fields'].append('bg') + + else: + data['img'] = data['fg'] + + data['trans_info'] = [] # Record shape change information + data = self.transforms(data) + data['img'] = data['img'].astype('float32') + for key in data.get('gt_fields', []): + data[key] = data[key].astype('float32') + + return data + + def __len__(self): + return len(self.fg_bg_list) + + def composite(self, fg, alpha, ori_bg): + fg_h, fg_w = fg.shape[:2] + ori_bg_h, ori_bg_w = ori_bg.shape[:2] + + wratio = fg_w / ori_bg_w + hratio = fg_h / ori_bg_h + ratio = wratio if wratio > hratio else hratio + + # Resize ori_bg if it is smaller than fg. + if ratio > 1: + resize_h = math.ceil(ori_bg_h * ratio) + resize_w = math.ceil(ori_bg_w * ratio) + bg = cv2.resize( + ori_bg, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR) + else: + bg = ori_bg + + bg = bg[0:fg_h, 0:fg_w, :] + alpha = alpha / 255 + alpha = np.expand_dims(alpha, axis=2) + image = alpha * fg + (1 - alpha) * bg + image = image.astype(np.uint8) + return image, bg + + +if __name__ == '__main__': + t = [T.LoadImages(to_rgb=False), T.Resize(), T.Normalize()] + train_dataset = HumanMatteDataset( + dataset_root='../data/matting/human_matte/', + transforms=t, + mode='val', + train_file=['Composition-1k_train.txt', 'Distinctions-646_train.txt'], + val_file=['Composition-1k_val.txt', 'Distinctions-646_val.txt']) + data = train_dataset[21] + print(data.keys()) + print(data['gt_fields']) + + data['img'] = np.transpose(data['img'], (1, 2, 0)) + for key in data.get('gt_fields', []): + if len(data[key].shape) == 2: + continue + data[key] = np.transpose(data[key], (1, 2, 0)) + + data['img'] = ((data['img'] * 0.5 + 0.5) * 255).astype('uint8') + for key in data['gt_fields']: + if key == 'alpha': + continue + data[key] = ((data[key] * 0.5 + 0.5) * 255).astype('uint8') + + cv2.imwrite('img.png', data['img']) + for key in data['gt_fields']: + cv2.imwrite(key + '.png', data[key]) diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index a1fa820f5f..0ac197ec1f 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -58,9 +58,11 @@ def __init__(self, to_rgb=True): self.to_rgb = to_rgb def __call__(self, data): - data['img'] = cv2.imread(data['img']) + if isinstance(data['img'], str): + data['img'] = cv2.imread(data['img']) for key in data.get('gt_fields', []): - data[key] = cv2.imread(data[key], cv2.IMREAD_UNCHANGED) + if isinstance(data[key], str): + data[key] = cv2.imread(data[key], cv2.IMREAD_UNCHANGED) # if alpha and trimap has 3 channels, extract one. if key in ['alpha', 'trimap']: if len(data[key].shape) > 2: From 52ebbcf8a16756346de9a16123eb5ef220c97934 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Thu, 5 Aug 2021 16:44:57 +0800 Subject: [PATCH 143/210] update cross_entropy_loss.py --- paddleseg/models/losses/cross_entropy_loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddleseg/models/losses/cross_entropy_loss.py b/paddleseg/models/losses/cross_entropy_loss.py index 55b755b46f..de07866ad5 100644 --- a/paddleseg/models/losses/cross_entropy_loss.py +++ b/paddleseg/models/losses/cross_entropy_loss.py @@ -68,8 +68,8 @@ def forward(self, logit, label, semantic_weights=None): raise ValueError( 'The number of weights = {} must be the same as the number of classes = {}.' .format(len(self.weight), logit.shape[1])) - - logit = paddle.transpose(logit, [0, 2, 3, 1]) + if channel_axis == 1: + logit = paddle.transpose(logit, [0, 2, 3, 1]) loss = F.cross_entropy( logit, label, From 4c7ed0a343f0c4cbf1d49d8af3fa0e1c61a1c769 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 11 Aug 2021 10:46:48 +0800 Subject: [PATCH 144/210] add modnet --- contrib/matting/core/train.py | 126 ++--- contrib/matting/core/val.py | 34 +- contrib/matting/dataset/__init__.py | 16 + contrib/matting/dataset/dataset.py | 2 + ...te_dataset.py => human_matting_dataset.py} | 64 ++- contrib/matting/metric.py | 6 +- contrib/matting/model/__init__.py | 2 + contrib/matting/model/mobilenet_v2.py | 264 ++++++++++ contrib/matting/model/modnet.py | 459 ++++++++++++++++++ contrib/matting/train.py | 99 ++-- contrib/matting/transforms.py | 180 ++++++- contrib/matting/val.py | 42 +- paddleseg/cvlibs/param_init.py | 29 ++ 13 files changed, 1137 insertions(+), 186 deletions(-) create mode 100644 contrib/matting/dataset/__init__.py rename contrib/matting/dataset/{human_matte_dataset.py => human_matting_dataset.py} (72%) create mode 100644 contrib/matting/model/mobilenet_v2.py create mode 100644 contrib/matting/model/modnet.py diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index 38d4929588..07fd225c95 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -14,7 +14,7 @@ import os import time -from collections import deque +from collections import deque, defaultdict import shutil import numpy as np @@ -25,38 +25,6 @@ from core.val import evaluate -def loss_computation(logit_dict, label_dict, losses, stage=3): - """ - Acoording the losses to select logit and label - """ - loss_list = [] - mask = label_dict['trimap'] == 128 - - if stage != 2: - # raw alpha - alpha_raw_loss = losses['types'][0](logit_dict['alpha_raw'], - label_dict['alpha'] / 255, mask) - alpha_raw_loss = losses['coef'][0] * alpha_raw_loss - loss_list.append(alpha_raw_loss) - - if stage == 1 or stage == 3: - # comp loss - comp_pred = logit_dict['alpha_raw'] * label_dict['fg'] + ( - 1 - logit_dict['alpha_raw']) * label_dict['bg'] - comp_loss = losses['types'][1](comp_pred, label_dict['img'], mask) - comp_loss = losses['coef'][1] * comp_loss - loss_list.append(comp_loss) - - if stage == 2 or stage == 3: - # pred alpha - alpha_pred_loss = losses['types'][2](logit_dict['alpha_pred'], - label_dict['alpha'] / 255, mask) - alpha_pred_loss = losses['coef'][2] * alpha_pred_loss - loss_list.append(alpha_pred_loss) - - return loss_list - - def train(model, train_dataset, val_dataset=None, @@ -71,11 +39,9 @@ def train(model, use_vdl=False, losses=None, keep_checkpoint_max=5, - stage=3, save_begin_iters=None): """ Launch training. - Args: model(nn.Layer): A sementic segmentation model. train_dataset (paddle.io.Dataset): Used to read and process training datasets. @@ -129,8 +95,7 @@ def train(model, from visualdl import LogWriter log_writer = LogWriter(save_dir) - avg_loss = 0.0 - avg_loss_list = [] + avg_loss = defaultdict(float) iters_per_epoch = len(batch_sampler) best_sad = np.inf best_model_iter = -1 @@ -150,13 +115,12 @@ def train(model, # model input if nranks > 1: logit_dict = ddp_model(data) + loss_dict = ddp_model.loss(logit_dict, data, losses) else: logit_dict = model(data) + loss_dict = model.loss(logit_dict, data, losses) - # 获取logit_dict, label_dict - loss_list = loss_computation(logit_dict, data, losses, stage=stage) - loss = sum(loss_list) - loss.backward() + loss_dict['all'].backward() optimizer.step() lr = optimizer.get_lr() @@ -164,18 +128,15 @@ def train(model, paddle.optimizer.lr.LRScheduler): optimizer._learning_rate.step() model.clear_gradients() - avg_loss += loss.numpy()[0] - if not avg_loss_list: - avg_loss_list = [l.numpy() for l in loss_list] - else: - for i in range(len(loss_list)): - avg_loss_list[i] += loss_list[i].numpy() + + for key, value in loss_dict.items(): + avg_loss[key] += value.numpy()[0] batch_cost_averager.record( time.time() - batch_start, num_samples=batch_size) if (iter) % log_iters == 0 and local_rank == 0: - avg_loss /= log_iters - avg_loss_list = [l[0] / log_iters for l in avg_loss_list] + for key, value in avg_loss.items(): + avg_loss[key] = value / log_iters remain_iters = iters - iter avg_train_batch_cost = batch_cost_averager.get_average() avg_train_reader_cost = reader_cost_averager.get_average() @@ -183,23 +144,21 @@ def train(model, logger.info( "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.5f}, ips={:.4f} samples/sec | ETA {}" .format((iter - 1) // iters_per_epoch + 1, iter, iters, - avg_loss, lr, avg_train_batch_cost, + avg_loss['all'], lr, avg_train_batch_cost, avg_train_reader_cost, batch_cost_averager.get_ips_average(), eta)) - # logger.info( - # "[LOSS] loss={:.4f}, alpha_raw_loss={:.4f}, alpha_pred_loss={:.4f}," - # .format(avg_loss, avg_loss_list[0], avg_loss_list[1])) - logger.info(avg_loss_list) + # print loss + loss_str = '[TRAIN] [LOSS] ' + loss_str = loss_str + 'all={:.4f}'.format(avg_loss['all']) + for key, value in avg_loss.items(): + if key != 'all': + loss_str = loss_str + ' ' + key + '={:.4f}'.format( + value) + logger.info(loss_str) if use_vdl: - log_writer.add_scalar('Train/loss', avg_loss, iter) - # Record all losses if there are more than 2 losses. - if len(avg_loss_list) > 1: - avg_loss_dict = {} - for i, value in enumerate(avg_loss_list): - avg_loss_dict['loss_' + str(i)] = value - for key, value in avg_loss_dict.items(): - log_tag = 'Train/' + key - log_writer.add_scalar(log_tag, value, iter) + for key, value in avg_loss.items(): + log_tag = 'Train/' + key + log_writer.add_scalar(log_tag, value, iter) log_writer.add_scalar('Train/lr', lr, iter) log_writer.add_scalar('Train/batch_cost', @@ -207,13 +166,15 @@ def train(model, log_writer.add_scalar('Train/reader_cost', avg_train_reader_cost, iter) - if False: #主要为调试时候的观察,真正训练的时候可以省略 + if True: #主要为调试时候的观察,真正训练的时候可以省略 # 增加图片和alpha的显示 ori_img = data['img'][0] ori_img = paddle.transpose(ori_img, [1, 2, 0]) ori_img = (ori_img * 0.5 + 0.5) * 255 - alpha = (data['alpha'][0]).unsqueeze(-1) - trimap = (data['trimap'][0]).unsqueeze(-1) + alpha = (data['alpha'][0]) + alpha = paddle.transpose(alpha, [1, 2, 0]) * 255 + trimap = (data['trimap'][0]) + trimap = paddle.transpose(trimap, [1, 2, 0]) log_writer.add_image( tag='ground truth/ori_img', img=ori_img.numpy(), @@ -227,25 +188,26 @@ def train(model, img=trimap.numpy(), step=iter) - alpha_raw = ( - logit_dict['alpha_raw'][0] * 255).transpose( - [1, 2, 0]) + semantic = (logit_dict['semantic'][0] * 255).transpose( + [1, 2, 0]) log_writer.add_image( - tag='prediction/alpha_raw', - img=alpha_raw.numpy(), + tag='prediction/semantic', + img=semantic.numpy().astype('uint8'), + step=iter) + detail = (logit_dict['detail'][0] * 255).transpose( + [1, 2, 0]) + log_writer.add_image( + tag='prediction/detail', + img=detail.numpy().astype('uint8'), + step=iter) + cm = (logit_dict['matte'][0] * 255).transpose([1, 2, 0]) + log_writer.add_image( + tag='prediction/alpha', + img=cm.numpy().astype('uint8'), step=iter) - if stage >= 2: - alpha_pred = ( - logit_dict['alpha_pred'][0] * 255).transpose( - [1, 2, 0]) - log_writer.add_image( - tag='prediction/alpha_pred', - img=alpha_pred.numpy().astype('uint8'), - step=iter) - - avg_loss = 0.0 - avg_loss_list = [] + for key in avg_loss.keys(): + avg_loss[key] = 0. reader_cost_averager.reset() batch_cost_averager.reset() diff --git a/contrib/matting/core/val.py b/contrib/matting/core/val.py index 9a5f8f2f43..bc1229d1f8 100644 --- a/contrib/matting/core/val.py +++ b/contrib/matting/core/val.py @@ -41,10 +41,10 @@ def save_alpha_pred(alpha, path): def reverse_transform(alpha, trans_info): """recover pred to origin shape""" for item in trans_info[::-1]: - if item[0] == 'resize': + if item[0][0] == 'resize': h, w = item[1][0], item[1][1] - alpha = F.interpolate(alpha, (h, w), mode='bilinear') - elif item[0] == 'padding': + alpha = F.interpolate(alpha, [h, w], mode='bilinear') + elif item[0][0] == 'padding': h, w = item[1][0], item[1][1] alpha = alpha[:, :, 0:h, 0:w] else: @@ -91,25 +91,25 @@ def evaluate(model, with paddle.no_grad(): for iter, data in enumerate(loader): reader_cost_averager.record(time.time() - batch_start) - logit_dict = model(data) - - # 指标计算 结果保存 先实现单卡的 - if model.stage <= 1: - alpha_pred = logit_dict['alpha_raw'].numpy() - else: - alpha_pred = logit_dict['alpha_pred'].numpy() - alpha_gt = data['alpha'].numpy() - trimap = data['trimap'].numpy().astype('uint8') - alpha_pred = alpha_pred.squeeze(1) - alpha_pred = (alpha_pred * 255) + alpha_pred = model(data) + + alpha_pred = reverse_transform(alpha_pred, data['trans_info']) + alpha_pred = alpha_pred.numpy() + + alpha_gt = data['alpha'].numpy() * 255 + trimap = data.get('trimap') + if trimap is not None: + trimap = data['trimap'].numpy().astype('uint8') + alpha_pred = np.round(alpha_pred * 255) mse_metric.update(alpha_pred, alpha_gt, trimap) sad_metric.update(alpha_pred, alpha_gt, trimap) if save_results: alpha_pred_one = alpha_pred[0].squeeze() - trimap = trimap.squeeze().astype('uint8') - alpha_pred_one[trimap == 255] = 255 - alpha_pred_one[trimap == 0] = 0 + if trimap is not None: + trimap = trimap.squeeze().astype('uint8') + alpha_pred_one[trimap == 255] = 255 + alpha_pred_one[trimap == 0] = 0 save_alpha_pred(alpha_pred_one, os.path.join(save_dir, data['img_name'][0])) diff --git a/contrib/matting/dataset/__init__.py b/contrib/matting/dataset/__init__.py new file mode 100644 index 0000000000..6091bf7680 --- /dev/null +++ b/contrib/matting/dataset/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .dataset import Dataset +from .human_matting_dataset import HumanMattingDataset diff --git a/contrib/matting/dataset/dataset.py b/contrib/matting/dataset/dataset.py index 3f9547f326..0025281d74 100644 --- a/contrib/matting/dataset/dataset.py +++ b/contrib/matting/dataset/dataset.py @@ -88,6 +88,8 @@ def __getitem__(self, idx): data['trimap'] = self.gen_trimap( data['alpha'], mode=self.mode).astype('float32') + data['alpha'] = data['alpha'] / 255. + return data def __len__(self): diff --git a/contrib/matting/dataset/human_matte_dataset.py b/contrib/matting/dataset/human_matting_dataset.py similarity index 72% rename from contrib/matting/dataset/human_matte_dataset.py rename to contrib/matting/dataset/human_matting_dataset.py index 09c30b08ea..870026ef09 100644 --- a/contrib/matting/dataset/human_matte_dataset.py +++ b/contrib/matting/dataset/human_matting_dataset.py @@ -20,11 +20,10 @@ import random import paddle -from utils import get_files import transforms as T -class HumanMatteDataset(paddle.io.Dataset): +class HumanMattingDataset(paddle.io.Dataset): """ human_matting |__Composition-1k(origin dataset name) @@ -61,11 +60,13 @@ def __init__(self, transforms, mode='train', train_file=None, - val_file=None): + val_file=None, + get_trimap=True): super().__init__() self.dataset_root = dataset_root self.transforms = T.Compose(transforms) self.mode = mode + self.get_trimap = get_trimap # check file if mode == 'train' or mode == 'trainval': @@ -101,12 +102,13 @@ def __getitem__(self, idx): data = {} fg_bg_file = self.fg_bg_list[idx] fg_bg_file = fg_bg_file.split(' ') + data['img_name'] = fg_bg_file[0] # using in save prediction results fg_file = os.path.join(self.dataset_root, fg_bg_file[0]) alpha_file = fg_file.replace('fg', 'alpha') fg = cv2.imread(fg_file) alpha = cv2.imread(alpha_file, 0) data['alpha'] = alpha - data['gt_fields'] = ['alpha'] + data['gt_fields'] = [] if len(fg_bg_file) == 2: bg_file = os.path.join(self.dataset_root, fg_bg_file[1]) @@ -116,15 +118,34 @@ def __getitem__(self, idx): if self.mode in ['train', 'trainval']: data['gt_fields'].append('fg') data['gt_fields'].append('bg') - + data['gt_fields'].append('alpha') else: - data['img'] = data['fg'] + data['img'] = fg data['trans_info'] = [] # Record shape change information data = self.transforms(data) + + # When evaluation, gt should not be transforms. + if self.mode == 'val': + data['gt_fields'].append('alpha') + data['img'] = data['img'].astype('float32') for key in data.get('gt_fields', []): data[key] = data[key].astype('float32') + if self.get_trimap: + # Trimap read from file only happening in evaluation. + if self.mode == 'val': + trimap_path = alpha_file.replace('alpha', 'trimap') + if os.path.exists(trimap_path): + data['trimap'] = trimap_path + data['gt_fields'].append('trimap') + + if 'trimap' not in data: + data['trimap'] = self.gen_trimap( + data['alpha'], mode=self.mode).astype('float32') + data['trimap'] = data['trimap'][np.newaxis, :, :] + + data['alpha'] = data['alpha'][np.newaxis, :, :] / 255. return data @@ -155,16 +176,41 @@ def composite(self, fg, alpha, ori_bg): image = image.astype(np.uint8) return image, bg + @staticmethod + def gen_trimap(alpha, mode='train', eval_kernel=7): + if mode == 'train': + k_size = random.choice(range(2, 5)) + iterations = np.random.randint(5, 15) + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, + (k_size, k_size)) + dilated = cv2.dilate(alpha, kernel, iterations=iterations) + eroded = cv2.erode(alpha, kernel, iterations=iterations) + trimap = np.zeros(alpha.shape) + trimap.fill(128) + trimap[eroded > 254.5] = 255 + trimap[dilated < 0.5] = 0 + else: + k_size = eval_kernel + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, + (k_size, k_size)) + dilated = cv2.dilate(alpha, kernel) + trimap = np.zeros(alpha.shape) + trimap.fill(128) + trimap[alpha >= 250] = 255 + trimap[dilated <= 5] = 0 + + return trimap + if __name__ == '__main__': t = [T.LoadImages(to_rgb=False), T.Resize(), T.Normalize()] - train_dataset = HumanMatteDataset( + train_dataset = HumanMattingDataset( dataset_root='../data/matting/human_matte/', transforms=t, mode='val', train_file=['Composition-1k_train.txt', 'Distinctions-646_train.txt'], val_file=['Composition-1k_val.txt', 'Distinctions-646_val.txt']) - data = train_dataset[21] + data = train_dataset[81] print(data.keys()) print(data['gt_fields']) @@ -183,3 +229,5 @@ def composite(self, fg, alpha, ori_bg): cv2.imwrite('img.png', data['img']) for key in data['gt_fields']: cv2.imwrite(key + '.png', data[key]) + + cv2.imwrite('trimap.png', data['trimap'].astype('uint8')) diff --git a/contrib/matting/metric.py b/contrib/matting/metric.py index 9177d72fb9..5e021cd405 100644 --- a/contrib/matting/metric.py +++ b/contrib/matting/metric.py @@ -38,7 +38,7 @@ def update(self, pred, gt, trimap=None): update metric. Args: - pred (np.ndarray): The value range is [0., 1.]. + pred (np.ndarray): The value range is [0., 255.]. gt (np.ndarray): The value range is [0, 255]. trimap (np.ndarray, optional) The value is in {0, 128, 255}. Default: None. """ @@ -78,8 +78,8 @@ def update(self, pred, gt, trimap=None): update metric. Args: - pred (np.ndarray): The value range is [0., 1.]. - gt (np.ndarray): The value range is [0, 255]. + pred (np.ndarray): The value range is [0., 255.]. + gt (np.ndarray): The value range is [0., 255.]. trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. """ if trimap is None: diff --git a/contrib/matting/model/__init__.py b/contrib/matting/model/__init__.py index 0b49f005ee..61da2293c4 100644 --- a/contrib/matting/model/__init__.py +++ b/contrib/matting/model/__init__.py @@ -14,5 +14,7 @@ from .vgg import * from .resnet_vd import * +from .mobilenet_v2 import * from .dim import DIM from .loss import MRSD +from .modnet import MODNet diff --git a/contrib/matting/model/mobilenet_v2.py b/contrib/matting/model/mobilenet_v2.py new file mode 100644 index 0000000000..88ffa57ddf --- /dev/null +++ b/contrib/matting/model/mobilenet_v2.py @@ -0,0 +1,264 @@ +# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import numpy as np +import paddle +from paddle import ParamAttr +import paddle.nn as nn +import paddle.nn.functional as F +from paddle.nn import Conv2D, BatchNorm, Linear, Dropout +from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D + +from paddleseg import utils + +MODEL_URLS = { + "MobileNetV2_x0_25": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_25_pretrained.pdparams", + "MobileNetV2_x0_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_5_pretrained.pdparams", + "MobileNetV2_x0_75": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x0_75_pretrained.pdparams", + "MobileNetV2": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_pretrained.pdparams", + "MobileNetV2_x1_5": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x1_5_pretrained.pdparams", + "MobileNetV2_x2_0": + "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x2_0_pretrained.pdparams" +} + +__all__ = list(MODEL_URLS.keys()) + + +class ConvBNLayer(nn.Layer): + def __init__(self, + num_channels, + filter_size, + num_filters, + stride, + padding, + channels=None, + num_groups=1, + name=None, + use_cudnn=True): + super(ConvBNLayer, self).__init__() + + self._conv = Conv2D( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=filter_size, + stride=stride, + padding=padding, + groups=num_groups, + weight_attr=ParamAttr(name=name + "_weights"), + bias_attr=False) + + self._batch_norm = BatchNorm( + num_filters, + param_attr=ParamAttr(name=name + "_bn_scale"), + bias_attr=ParamAttr(name=name + "_bn_offset"), + moving_mean_name=name + "_bn_mean", + moving_variance_name=name + "_bn_variance") + + def forward(self, inputs, if_act=True): + y = self._conv(inputs) + y = self._batch_norm(y) + if if_act: + y = F.relu6(y) + return y + + +class InvertedResidualUnit(nn.Layer): + def __init__(self, num_channels, num_in_filter, num_filters, stride, + filter_size, padding, expansion_factor, name): + super(InvertedResidualUnit, self).__init__() + num_expfilter = int(round(num_in_filter * expansion_factor)) + self._expand_conv = ConvBNLayer( + num_channels=num_channels, + num_filters=num_expfilter, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + name=name + "_expand") + + self._bottleneck_conv = ConvBNLayer( + num_channels=num_expfilter, + num_filters=num_expfilter, + filter_size=filter_size, + stride=stride, + padding=padding, + num_groups=num_expfilter, + use_cudnn=False, + name=name + "_dwise") + + self._linear_conv = ConvBNLayer( + num_channels=num_expfilter, + num_filters=num_filters, + filter_size=1, + stride=1, + padding=0, + num_groups=1, + name=name + "_linear") + + def forward(self, inputs, ifshortcut): + y = self._expand_conv(inputs, if_act=True) + y = self._bottleneck_conv(y, if_act=True) + y = self._linear_conv(y, if_act=False) + if ifshortcut: + y = paddle.add(inputs, y) + return y + + +class InvresiBlocks(nn.Layer): + def __init__(self, in_c, t, c, n, s, name): + super(InvresiBlocks, self).__init__() + + self._first_block = InvertedResidualUnit( + num_channels=in_c, + num_in_filter=in_c, + num_filters=c, + stride=s, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + "_1") + + self._block_list = [] + for i in range(1, n): + block = self.add_sublayer( + name + "_" + str(i + 1), + sublayer=InvertedResidualUnit( + num_channels=c, + num_in_filter=c, + num_filters=c, + stride=1, + filter_size=3, + padding=1, + expansion_factor=t, + name=name + "_" + str(i + 1))) + self._block_list.append(block) + + def forward(self, inputs): + y = self._first_block(inputs, ifshortcut=False) + for block in self._block_list: + y = block(y, ifshortcut=True) + return y + + +class MobileNet(nn.Layer): + def __init__(self, + input_channels=3, + scale=1.0, + pretrained=None, + prefix_name=""): + super(MobileNet, self).__init__() + self.scale = scale + + bottleneck_params_list = [ + (1, 16, 1, 1), + (6, 24, 2, 2), + (6, 32, 3, 2), + (6, 64, 4, 2), + (6, 96, 3, 1), + (6, 160, 3, 2), + (6, 320, 1, 1), + ] + + self.conv1 = ConvBNLayer( + num_channels=input_channels, + num_filters=int(32 * scale), + filter_size=3, + stride=2, + padding=1, + name=prefix_name + "conv1_1") + + self.block_list = [] + i = 1 + in_c = int(32 * scale) + for layer_setting in bottleneck_params_list: + t, c, n, s = layer_setting + i += 1 + block = self.add_sublayer( + prefix_name + "conv" + str(i), + sublayer=InvresiBlocks( + in_c=in_c, + t=t, + c=int(c * scale), + n=n, + s=s, + name=prefix_name + "conv" + str(i))) + self.block_list.append(block) + in_c = int(c * scale) + + self.out_c = int(1280 * scale) if scale > 1.0 else 1280 + self.conv9 = ConvBNLayer( + num_channels=in_c, + num_filters=self.out_c, + filter_size=1, + stride=1, + padding=0, + name=prefix_name + "conv9") + + self.feat_channels = [int(i * scale) for i in [16, 24, 32, 96, 1280]] + self.pretrained = pretrained + self.init_weight() + + def forward(self, inputs): + feat_list = [] + y = self.conv1(inputs, if_act=True) + + block_index = 0 + for block in self.block_list: + y = block(y) + if block_index in [0, 1, 2, 4]: + feat_list.append(y) + block_index += 1 + y = self.conv9(y, if_act=True) + feat_list.append(y) + return feat_list + + def init_weight(self): + utils.load_pretrained_model(self, self.pretrained) + + +def MobileNetV2_x0_25(**kwargs): + model = MobileNet(scale=0.25, **kwargs) + return model + + +def MobileNetV2_x0_5(**kwargs): + model = MobileNet(scale=0.5, **kwargs) + return model + + +def MobileNetV2_x0_75(**kwargs): + model = MobileNet(scale=0.75, **kwargs) + return model + + +def MobileNetV2(**kwargs): + model = MobileNet(scale=1.0, **kwargs) + return model + + +def MobileNetV2_x1_5(**kwargs): + model = MobileNet(scale=1.5, **kwargs) + return model + + +def MobileNetV2_x2_0(**kwargs): + model = MobileNet(scale=2.0, **kwargs) + return model diff --git a/contrib/matting/model/modnet.py b/contrib/matting/model/modnet.py new file mode 100644 index 0000000000..6e5469b688 --- /dev/null +++ b/contrib/matting/model/modnet.py @@ -0,0 +1,459 @@ +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddleseg +from paddleseg.models import layers, losses +from paddleseg import utils +from paddleseg.cvlibs import manager, param_init +import numpy as np +import scipy + + +@manager.MODELS.add_component +class MODNet(nn.Layer): + def __init__(self, backbone, hr_channels=32, pretrained=None): + super().__init__() + self.backbone = backbone + self.pretrained = pretrained + + self.head = MODNetHead( + hr_channels=hr_channels, backbone_channels=backbone.feat_channels) + self.init_weight() + self.blurer = GaussianBlurLayer(1, 3) + + def forward(self, inputs): + """ + If training, return a dict. + If evaluation, return the final alpha prediction. + """ + x = inputs['img'] + feat_list = self.backbone(x) + y = self.head(inputs=inputs, feat_list=feat_list) + + return y + + def loss(self, logit_dict, label_dict, loss_func_dict=None): + if loss_func_dict is None: + loss_func_dict = defaultdict(list) + loss_func_dict['semantic'].append(paddleseg.models.MSELoss()) + loss_func_dict['detail'].append(paddleseg.modells.L1Loss()) + loss_func_dict['fusion'].append(paddleseg.modells.L1Loss()) + loss_func_dict['fusion'].append(paddleseg.modells.L1Loss()) + + loss = {} + # semantic loss + semantic_gt = F.interpolate( + label_dict['alpha'], + scale_factor=1 / 16, + mode='bilinear', + align_corners=False) + semantic_gt = self.blurer(semantic_gt) + # semantic_gt.stop_gradient=True + loss['semantic'] = loss_func_dict['semantic'][0](logit_dict['semantic'], + semantic_gt) + + # detail loss + trimap = label_dict['trimap'] + mask = (trimap == 128).astype('float32') + logit_detail = logit_dict['detail'] * mask + label_detail = label_dict['alpha'] * mask + loss_detail = loss_func_dict['detail'][0](logit_detail, label_detail) + loss_detail = loss_detail / mask.mean() + loss['detail'] = loss_detail + + # fusion loss + matte = logit_dict['matte'] + alpha = label_dict['alpha'] + transition_mask = label_dict['trimap'] == 128 + matte_boundary = paddle.where(transition_mask, matte, alpha) + loss_fusion_l1 = loss_func_dict['fusion'][0]( + matte, + alpha) + 4 * loss_func_dict['fusion'][0](matte_boundary, alpha) + loss_fusion_comp = loss_func_dict['fusion'][1]( + matte * label_dict['img'], + alpha * label_dict['img']) + 4 * loss_func_dict['fusion'][1]( + matte_boundary * label_dict['img'], alpha * label_dict['img']) + loss_fusion = loss_fusion_l1 + loss_fusion_comp + loss['fusion'] = loss_fusion + + loss['all'] = loss['semantic'] + loss['detail'] + loss['fusion'] + + return loss + + def init_weight(self): + if self.pretrained is not None: + utils.load_entire_model(self, self.pretrained) + + +class MODNetHead(nn.Layer): + def __init__(self, hr_channels, backbone_channels): + super().__init__() + + self.lr_branch = LRBranch(backbone_channels) + self.hr_branch = HRBranch(hr_channels, backbone_channels) + self.f_branch = FusionBranch(hr_channels, backbone_channels) + self.init_weight() + + def forward(self, inputs, feat_list): + pred_semantic, lr8x, [enc2x, enc4x] = self.lr_branch(feat_list) + pred_detail, hr2x = self.hr_branch(inputs['img'], enc2x, enc4x, lr8x) + pred_matte = self.f_branch(inputs['img'], lr8x, hr2x) + + if self.training: + logit_dict = { + 'semantic': pred_semantic, + 'detail': pred_detail, + 'matte': pred_matte + } + return logit_dict + else: + return pred_matte + + def init_weight(self): + for layer in self.sublayers(): + if isinstance(layer, nn.Conv2D): + param_init.kaiming_uniform(layer.weight) + elif isinstance( + layer, (nn.BatchNorm, nn.InstanceNorm2D, nn.SyncBatchNorm)): + param_init.constant_init(layer.weight, value=1.0) + param_init.constant_init(layer.bias, value=0.0) + + +class FusionBranch(nn.Layer): + def __init__(self, hr_channels, enc_channels): + super().__init__() + self.conv_lr4x = Conv2dIBNormRelu( + enc_channels[2], hr_channels, 5, stride=1, padding=2) + + self.conv_f2x = Conv2dIBNormRelu( + 2 * hr_channels, hr_channels, 3, stride=1, padding=1) + self.conv_f = nn.Sequential( + Conv2dIBNormRelu( + hr_channels + 3, int(hr_channels / 2), 3, stride=1, padding=1), + Conv2dIBNormRelu( + int(hr_channels / 2), + 1, + 1, + stride=1, + padding=0, + with_ibn=False, + with_relu=False)) + + def forward(self, img, lr8x, hr2x): + lr4x = F.interpolate( + lr8x, scale_factor=2, mode='bilinear', align_corners=False) + lr4x = self.conv_lr4x(lr4x) + lr2x = F.interpolate( + lr4x, scale_factor=2, mode='bilinear', align_corners=False) + + f2x = self.conv_f2x(paddle.concat((lr2x, hr2x), axis=1)) + f = F.interpolate( + f2x, scale_factor=2, mode='bilinear', align_corners=False) + f = self.conv_f(paddle.concat((f, img), axis=1)) + pred_matte = F.sigmoid(f) + + return pred_matte + + +class HRBranch(nn.Layer): + """ + High Resolution Branch of MODNet + """ + + def __init__(self, hr_channels, enc_channels): + super().__init__() + + self.tohr_enc2x = Conv2dIBNormRelu( + enc_channels[0], hr_channels, 1, stride=1, padding=0) + self.conv_enc2x = Conv2dIBNormRelu( + hr_channels + 3, hr_channels, 3, stride=2, padding=1) + + self.tohr_enc4x = Conv2dIBNormRelu( + enc_channels[1], hr_channels, 1, stride=1, padding=0) + self.conv_enc4x = Conv2dIBNormRelu( + 2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1) + + self.conv_hr4x = nn.Sequential( + Conv2dIBNormRelu( + 2 * hr_channels + enc_channels[2] + 3, + 2 * hr_channels, + 3, + stride=1, + padding=1), + Conv2dIBNormRelu( + 2 * hr_channels, hr_channels, 3, stride=1, padding=1), + Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1), + Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1)) + + self.conv_hr2x = nn.Sequential( + Conv2dIBNormRelu( + 2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1), + Conv2dIBNormRelu( + 2 * hr_channels, hr_channels, 3, stride=1, padding=1), + Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1), + Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1)) + + self.conv_hr = nn.Sequential( + Conv2dIBNormRelu( + hr_channels + 3, hr_channels, 3, stride=1, padding=1), + Conv2dIBNormRelu( + hr_channels, + 1, + 1, + stride=1, + padding=0, + with_ibn=False, + with_relu=False)) + + def forward(self, img, enc2x, enc4x, lr8x): + img2x = F.interpolate( + img, scale_factor=1 / 2, mode='bilinear', align_corners=False) + img4x = F.interpolate( + img, scale_factor=1 / 4, mode='bilinear', align_corners=False) + + enc2x = self.tohr_enc2x(enc2x) + hr4x = self.conv_enc2x(paddle.concat((img2x, enc2x), axis=1)) + + enc4x = self.tohr_enc4x(enc4x) + hr4x = self.conv_enc4x(paddle.concat((hr4x, enc4x), axis=1)) + + lr4x = F.interpolate( + lr8x, scale_factor=2, mode='bilinear', align_corners=False) + hr4x = self.conv_hr4x(paddle.concat((hr4x, lr4x, img4x), axis=1)) + + hr2x = F.interpolate( + hr4x, scale_factor=2, mode='bilinear', align_corners=False) + hr2x = self.conv_hr2x(paddle.concat((hr2x, enc2x), axis=1)) + + pred_detail = None + if self.training: + hr = F.interpolate( + hr2x, scale_factor=2, mode='bilinear', align_corners=False) + hr = self.conv_hr(paddle.concat((hr, img), axis=1)) + pred_detail = F.sigmoid(hr) + + return pred_detail, hr2x + + +class LRBranch(nn.Layer): + def __init__(self, backbone_channels): + super().__init__() + self.se_block = SEBlock(backbone_channels[4], reduction=4) + self.conv_lr16x = Conv2dIBNormRelu( + backbone_channels[4], backbone_channels[3], 5, stride=1, padding=2) + self.conv_lr8x = Conv2dIBNormRelu( + backbone_channels[3], backbone_channels[2], 5, stride=1, padding=2) + self.conv_lr = Conv2dIBNormRelu( + backbone_channels[2], + 1, + 3, + stride=2, + padding=1, + with_ibn=False, + with_relu=False) + + def forward(self, feat_list): + enc2x, enc4x, enc32x = feat_list[0], feat_list[1], feat_list[4] + + enc32x = self.se_block(enc32x) + lr16x = F.interpolate( + enc32x, scale_factor=2, mode='bilinear', align_corners=False) + lr16x = self.conv_lr16x(lr16x) + lr8x = F.interpolate( + lr16x, scale_factor=2, mode='bilinear', align_corners=False) + lr8x = self.conv_lr8x(lr8x) + + pred_semantic = None + if self.training: + lr = self.conv_lr(lr8x) + pred_semantic = F.sigmoid(lr) + + return pred_semantic, lr8x, [enc2x, enc4x] + + +class IBNorm(nn.Layer): + """ + Combine Instance Norm and Batch Norm into One Layer + """ + + def __init__(self, in_channels): + super().__init__() + self.bnorm_channels = in_channels // 2 + self.inorm_channels = in_channels - self.bnorm_channels + + self.bnorm = nn.BatchNorm2D(self.bnorm_channels) + self.inorm = nn.InstanceNorm2D(self.inorm_channels) + + def forward(self, x): + bn_x = self.bnorm(x[:, :self.bnorm_channels, :, :]) + in_x = self.inorm(x[:, self.bnorm_channels:, :, :]) + + return paddle.concat((bn_x, in_x), 1) + + +class Conv2dIBNormRelu(nn.Layer): + """ + Convolution + IBNorm + Relu + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias_attr=None, + with_ibn=True, + with_relu=True): + + super().__init__() + + layers = [ + nn.Conv2D( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias_attr=bias_attr) + ] + + if with_ibn: + layers.append(IBNorm(out_channels)) + + if with_relu: + layers.append(nn.ReLU()) + + self.layers = nn.Sequential(*layers) + + def forward(self, x): + return self.layers(x) + + +class SEBlock(nn.Layer): + """ + SE Block Proposed in https://arxiv.org/pdf/1709.01507.pdf + """ + + def __init__(self, num_channels, reduction=1): + super().__init__() + self.pool = nn.AdaptiveAvgPool2D(1) + self.conv = nn.Sequential( + nn.Conv2D( + num_channels, + int(num_channels // reduction), + 1, + bias_attr=False), nn.ReLU(), + nn.Conv2D( + int(num_channels // reduction), + num_channels, + 1, + bias_attr=False), nn.Sigmoid()) + + def forward(self, x): + w = self.pool(x) + w = self.conv(w) + return w * x + + +class GaussianBlurLayer(nn.Layer): + """ Add Gaussian Blur to a 4D tensors + This layer takes a 4D tensor of {N, C, H, W} as input. + The Gaussian blur will be performed in given channel number (C) splitly. + """ + + def __init__(self, channels, kernel_size): + """ + Args: + channels (int): Channel for input tensor + kernel_size (int): Size of the kernel used in blurring + """ + + super(GaussianBlurLayer, self).__init__() + self.channels = channels + self.kernel_size = kernel_size + assert self.kernel_size % 2 != 0 + + self.op = nn.Sequential( + nn.Pad2D(int(self.kernel_size / 2), mode='reflect'), + nn.Conv2D( + channels, + channels, + self.kernel_size, + stride=1, + padding=0, + bias_attr=False, + groups=channels)) + + self._init_kernel() + self.op[1].weight.stop_gradient = True + + def forward(self, x): + """ + Args: + x (paddle.Tensor): input 4D tensor + Returns: + paddle.Tensor: Blurred version of the input + """ + + if not len(list(x.shape)) == 4: + print('\'GaussianBlurLayer\' requires a 4D tensor as input\n') + exit() + elif not x.shape[1] == self.channels: + print('In \'GaussianBlurLayer\', the required channel ({0}) is' + 'not the same as input ({1})\n'.format( + self.channels, x.shape[1])) + exit() + + return self.op(x) + + def _init_kernel(self): + sigma = 0.3 * ((self.kernel_size - 1) * 0.5 - 1) + 0.8 + + n = np.zeros((self.kernel_size, self.kernel_size)) + i = int(self.kernel_size / 2) + n[i, i] = 1 + kernel = scipy.ndimage.gaussian_filter(n, sigma) + kernel = kernel.astype('float32') + kernel = kernel[np.newaxis, np.newaxis, :, :] + paddle.assign(kernel, self.op[1].weight) + + +if __name__ == '__main__': + paddle.set_device('cpu') + from mobilenet_v2 import MobileNetV2 + from paddleseg import utils + + backbone = MobileNetV2( + pretrained="../pretrained_models/MobileNetV2_pretrained.pdparams") + model = MODNet(backbone=backbone) + model.eval() + + x = paddle.randint(0, 256, (1, 3, 320, 320)).astype('float32') + + inputs = {} + inputs['img'] = x / 255. + + logit = model(inputs) + print(logit) + +# for name, param in blurer.named_parameters(): +# print(name, param) +# print(blurer.op[1].weight) +# print(blurer.op.1.weight) diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 4be6316141..77fbea232f 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -14,10 +14,15 @@ import argparse import os +from collections import defaultdict + +import paddle +import paddle.nn as nn +import paddleseg from core import train from model import * -from dataset import HumanDataset +from dataset import HumanMattingDataset import transforms as T @@ -90,13 +95,6 @@ def parse_args(): dest='use_vdl', help='Whether to record the data to VisualDL during training', action='store_true') - parser.add_argument( - '--stage', - dest='stage', - help='training stage: 0(simple loss), 1, 2, 3(whole net)', - type=int, - required=True, - choices=[0, 1, 2, 3]) parser.add_argument( '--pretrained_model', dest='pretrained_model', @@ -116,10 +114,23 @@ def parse_args(): parser.add_argument( '--backbone', dest='backbone', - help= - 'The backbone of model. It is one of (VGG16, ResNet18_vd, ResNet34_vd, ResNet50_vd, ResNet101_vd, ResNet152_vd)', + help='The backbone of model. It is one of (MobileNetV2)', required=True, type=str) + parser.add_argument( + '--train_file', + dest='train_file', + nargs='+', + help='Image list for traiing', + type=str, + default='train.txt') + parser.add_argument( + '--val_file', + dest='val_file', + nargs='+', + help='Image list for evaluation', + type=str, + default='val.txt') return parser.parse_args() @@ -133,56 +144,64 @@ def main(args): # train_dataset = Dataset() t = [ T.LoadImages(), - T.RandomCropByAlpha(crop_size=((320, 320), (480, 480), (640, 640))), - T.Resize(target_size=(320, 320)), + T.RandomCropByAlpha(crop_size=((512, 512), (640, 640), (800, 800))), + T.Resize(target_size=(512, 512)), T.Normalize() ] - train_dataset = HumanDataset( - dataset_root=args.dataset_root, transforms=t, mode='train') + train_dataset = HumanMattingDataset( + dataset_root=args.dataset_root, + transforms=t, + mode='train', + train_file=args.train_file) if args.do_eval: - t = [T.LoadImages(), T.Normalize()] - val_dataset = HumanDataset( - dataset_root=args.dataset_root, transforms=t, mode='val') + t = [T.LoadImages(), T.ResizeToIntMult(mult_int=32), T.Normalize()] + val_dataset = HumanMattingDataset( + dataset_root=args.dataset_root, + transforms=t, + mode='val', + val_file=args.val_file, + get_trimap=False) else: val_dataset = None # loss - losses = {'types': [], 'coef': []} - # encoder-decoder alpha loss - losses['types'].append(MRSD()) - losses['coef'].append(0.5) - # compositionnal loss - losses['types'].append(MRSD()) - losses['coef'].append(0.5) - # refine alpha loss - losses['types'].append(MRSD()) - losses['coef'].append(1) + losses = defaultdict(list) + losses['semantic'].append(paddleseg.models.MSELoss()) + losses['detail'].append(paddleseg.models.L1Loss()) + losses['fusion'].append(paddleseg.models.L1Loss()) + losses['fusion'].append(paddleseg.models.L1Loss()) # model #bulid backbone - # vgg16预训练模型地址: 'https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/VGG16_pretrained.pdparams') pretrained_model = './pretrained_models/' + args.backbone + '_pretrained.pdparams' if not os.path.exists(pretrained_model): pretrained_model = None backbone = eval(args.backbone)( - input_channels=4, pretrained=pretrained_model) + input_channels=3, pretrained=pretrained_model) - decoder_input_channels = 512 - if args.backbone in ['ResNet50_vd', 'ResNet101_vd', 'ResNet152_vd']: - decoder_input_channels = 2048 - model = DIM( - backbone=backbone, - stage=args.stage, - pretrained=args.pretrained_model, - decoder_input_channels=decoder_input_channels) + model = MODNet(backbone=backbone, pretrained=args.pretrained_model) # optimizer # 简单的先构建一个优化器 # lr = paddle.optimizer.lr.PolynomialDecay( # 0.001, decay_steps=200000, end_lr=0.0, power=0.9) - optimizer = paddle.optimizer.Adam( - learning_rate=args.learning_rate, parameters=model.parameters()) + # use adam + # optimizer = paddle.optimizer.Adam( + # learning_rate=args.learning_rate, parameters=model.parameters()) + + # lr = paddle.optimizer.lr.StepDecay(args.learning_rate, step_size=1000, gamma=0.1, last_epoch=-1, verbose=False) + boundaries = [50000, 100000, 200000] + values = [ + args.learning_rate * 0.1**scale for scale in range(len(boundaries) + 1) + ] + lr = paddle.optimizer.lr.PiecewiseDecay( + boundaries=boundaries, values=values, last_epoch=-1, verbose=False) + optimizer = paddle.optimizer.Momentum( + learning_rate=lr, + momentum=0.9, + parameters=model.parameters(), + weight_decay=4e-5) # 调用train函数进行训练 train( @@ -198,11 +217,11 @@ def main(args): save_interval=args.save_interval, log_iters=args.log_iters, resume_model=args.resume_model, - stage=args.stage, save_dir=args.save_dir, save_begin_iters=args.save_begin_iters) if __name__ == '__main__': args = parse_args() + print(args) main(args) diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index 0ac197ec1f..6fa8d94111 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -93,13 +93,53 @@ def __init__(self, target_size=(512, 512)): self.target_size = target_size def __call__(self, data): - data['trans_info'].append(('resize', data['img'].shape[-2:])) + data['trans_info'].append(('resize', data['img'].shape[0:2])) data['img'] = functional.resize(data['img'], self.target_size) for key in data.get('gt_fields', []): data[key] = functional.resize(data[key], self.target_size) return data +class ResizeByLong: + """ + Resize the long side of an image to given size, and then scale the other side proportionally. + + Args: + long_size (int): The target size of long side. + """ + + def __init__(self, long_size): + self.long_size = long_size + + def __call__(self, data): + data['trans_info'].append(('resize', data['img'].shape[0:2])) + data['img'] = functional.resize_long(data['img'], self.long_size) + for key in data.get('gt_fields', []): + data[key] = functional.resize_long(data[key], self.long_size) + return data + + +class ResizeToIntMult: + """ + Resize to some int muitple, d.g. 32. + """ + + def __init__(self, mult_int=32): + self.mult_int = mult_int + + def __call__(self, data): + data['trans_info'].append(('resize', data['img'].shape[0:2])) + + h, w = data['img'].shape[0:2] + rw = w - w % 32 + rh = h - h % 32 + data['img'] = functional.resize(data['img'], (rw, rh)) + for key in data.get('gt_fields', []): + data[key] = functional.resize(data[key], (rw, rh)) + + return data + + class Normalize: """ Normalize an image. @@ -138,7 +178,57 @@ def __call__(self, data): class RandomCropByAlpha: """ - Randomly crop with uncertain area as the center + Randomly crop while centered on uncertain area by a certain probability. + + Args: + crop_size (tuple|list): The size you want to crop from image. + p (float): The probability centered on uncertain area. + + """ + + def __init__(self, crop_size=((320, 320), (480, 480), (640, 640)), p=0.5): + self.crop_size = crop_size + self.p = p + + def __call__(self, data): + idex = np.random.randint(low=0, high=len(self.crop_size)) + crop_w, crop_h = self.crop_size[idex] + + img_h = data['img'].shape[0] + img_w = data['img'].shape[1] + if np.random.rand() < self.p: + crop_center = np.where((data['alpha'] > 0) & (data['alpha'] < 255)) + center_h_array, center_w_array = crop_center + if len(center_h_array) == 0: + return data + rand_ind = np.random.randint(len(center_h_array)) + center_h = center_h_array[rand_ind] + center_w = center_w_array[rand_ind] + delta_h = crop_h // 2 + delta_w = crop_w // 2 + start_h = max(0, center_h - delta_h) + start_w = max(0, center_w - delta_w) + else: + start_h = 0 + start_w = 0 + if img_h > crop_h: + start_h = np.random.randint(img_h - crop_h + 1) + if img_w > crop_w: + start_w = np.random.randint(img_w - crop_w + 1) + + end_h = min(img_h, start_h + crop_h) + end_w = min(img_w, start_w + crop_w) + + data['img'] = data['img'][start_h:end_h, start_w:end_w] + for key in data.get('gt_fields', []): + data[key] = data[key][start_h:end_h, start_w:end_w] + + return data + + +class RandomCrop: + """ + Randomly crop Args: crop_size (tuple|list): The size you want to crop from image. @@ -149,23 +239,18 @@ def __init__(self, crop_size=((320, 320), (480, 480), (640, 640))): def __call__(self, data): idex = np.random.randint(low=0, high=len(self.crop_size)) - crop_size = self.crop_size[idex] - crop_center = np.where((data['alpha'] > 0) & (data['alpha'] < 255)) - center_h_array, center_w_array = crop_center - delta_h = crop_size[1] // 2 - delta_w = crop_size[0] // 2 - - if len(center_h_array) == 0: - return data + crop_w, crop_h = self.crop_size[idex] + img_h, img_w = data['img'].shape[0:2] - rand_ind = np.random.randint(len(center_h_array)) - center_h = center_h_array[rand_ind] - center_w = center_w_array[rand_ind] + start_h = 0 + start_w = 0 + if img_h > crop_h: + start_h = np.random.randint(img_h - crop_h + 1) + if img_w > crop_w: + start_w = np.random.randint(img_w - crop_w + 1) - start_h = max(0, center_h - delta_h) - start_w = max(0, center_w - delta_w) - end_h = min(data['img'].shape[0], start_h + crop_size[1]) - end_w = min(data['img'].shape[1], start_w + crop_size[0]) + end_h = min(img_h, start_h + crop_h) + end_w = min(img_w, start_w + crop_w) data['img'] = data['img'][start_h:end_h, start_w:end_w] for key in data.get('gt_fields', []): @@ -174,10 +259,67 @@ def __call__(self, data): return data +class LimitLong: + """ + Limit the long edge of image. + + If the long edge is larger than max_long, resize the long edge + to max_long, while scale the short edge proportionally. + + If the long edge is smaller than min_long, resize the long edge + to min_long, while scale the short edge proportionally. + + Args: + max_long (int, optional): If the long edge of image is larger than max_long, + it will be resize to max_long. Default: None. + min_long (int, optional): If the long edge of image is smaller than min_long, + it will be resize to min_long. Default: None. + """ + + def __init__(self, max_long=None, min_long=None): + if max_long is not None: + if not isinstance(max_long, int): + raise TypeError( + "Type of `max_long` is invalid. It should be int, but it is {}" + .format(type(max_long))) + if min_long is not None: + if not isinstance(min_long, int): + raise TypeError( + "Type of `min_long` is invalid. It should be int, but it is {}" + .format(type(min_long))) + if (max_long is not None) and (min_long is not None): + if min_long > max_long: + raise ValueError( + '`max_long should not smaller than min_long, but they are {} and {}' + .format(max_long, min_long)) + self.max_long = max_long + self.min_long = min_long + + def __call__(self, data): + h, w = data['img'].shape[:2] + long_edge = max(h, w) + target = long_edge + if (self.max_long is not None) and (long_edge > self.max_long): + target = self.max_long + elif (self.min_long is not None) and (long_edge < self.min_long): + target = self.min_long + + if target != long_edge: + data['trans_info'].append(('resize', data['img'].shape[0:2])) + data['img'] = functional.resize_long(data['img'], target) + for key in data.get('gt_fields', []): + data[key] = functional.resize_long(data[key], self.long_size) + + return data + + if __name__ == "__main__": - transforms = [LoadImages(), RandomCropByAlpha()] + transforms = [ + LoadImages(), + RandomCropByAlpha(((640, 640), (1280, 1280)), p=0.5) + ] transforms = Compose(transforms) - img_path = '/mnt/chenguowei01/github/PaddleSeg/data/matting/human_matting/train/image/0051115Q_000001_0062_001.png' + img_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting_old/train/image/0051115Q_000001_0062_001.png' bg_path = img_path.replace('image', 'bg') fg_path = img_path.replace('image', 'fg') alpha_path = img_path.replace('image', 'alpha') diff --git a/contrib/matting/val.py b/contrib/matting/val.py index fa146c62a0..3c20604ae2 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -16,7 +16,7 @@ from core import evaluate from model import * -from dataset import HumanDataset +from dataset import HumanMattingDataset import transforms as T @@ -45,17 +45,23 @@ def parse_args(): type=int, default=0) parser.add_argument( - '--stage', - dest='stage', - help='training stage: 0(simple loss), 1, 2, 3(whole net)', - type=int, + '--backbone', + dest='backbone', + help='The backbone of model. It is one of (MobileNetV2)', required=True, - choices=[0, 1, 2, 3]) + type=str) parser.add_argument( '--dataset_root', dest='dataset_root', help='the dataset root directory', type=str) + parser.add_argument( + '--val_file', + dest='val_file', + nargs='+', + help='Image list for evaluation', + type=str, + default='val.txt') parser.add_argument( '--save_results', dest='save_results', @@ -67,21 +73,23 @@ def parse_args(): def main(args): paddle.set_device('gpu') + # T.ResizeByLong(long_size=1024), + t = [T.LoadImages(), T.ResizeToIntMult(mult_int=32), T.Normalize()] + # t = [T.LoadImages(), T.LimitLong(max_long=2048), T.ResizeToIntMult(mult_int=32), T.Normalize()] - # 一些模块的组建 - # train_dataset - # 简单的建立一个数据读取器 - # train_dataset = Dataset() - t = [T.LoadImages(), T.Normalize()] - - eval_dataset = HumanDataset( - dataset_root=args.dataset_root, transforms=t, mode='val') + eval_dataset = HumanMattingDataset( + dataset_root=args.dataset_root, + transforms=t, + mode='val', + val_file=args.val_file, + get_trimap=False) # model - backbone = VGG16(input_channels=4) - model = DIM(backbone=backbone, stage=args.stage, pretrained=args.model_path) + backbone = eval(args.backbone)(input_channels=3) + + model = MODNet(backbone=backbone, pretrained=args.model_path) - # 调用train函数进行训练 + # 调用evaluate函数进行训练 evaluate( model=model, eval_dataset=eval_dataset, diff --git a/paddleseg/cvlibs/param_init.py b/paddleseg/cvlibs/param_init.py index 335281242e..8148783679 100644 --- a/paddleseg/cvlibs/param_init.py +++ b/paddleseg/cvlibs/param_init.py @@ -89,3 +89,32 @@ def kaiming_normal_init(param, **kwargs): """ initializer = nn.initializer.KaimingNormal(**kwargs) initializer(param, param.block) + + +def kaiming_uniform(param, **kwargs): + r"""Implements the Kaiming Uniform initializer + This class implements the weight initialization from the paper + `Delving Deep into Rectifiers: Surpassing Human-Level Performance on + ImageNet Classification `_ + by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a + robust initialization method that particularly considers the rectifier + nonlinearities. + + In case of Uniform distribution, the range is [-x, x], where + .. math:: + x = \sqrt{\\frac{6.0}{fan\_in}} + + Args: + param (Tensor): Tensor that needs to be initialized. + + Examples: + + from paddleseg.cvlibs import param_init + import paddle.nn as nn + + linear = nn.Linear(2, 4) + param_init.kaiming_uniform(linear.weight) + """ + + initializer = nn.initializer.KaimingUniform(**kwargs) + initializer(param, param.block) From d5dbdf5ee6019ddc4df76becdc9912fb13eeae24 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 11 Aug 2021 16:28:57 +0800 Subject: [PATCH 145/210] update modenet.py --- contrib/matting/model/modnet.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/contrib/matting/model/modnet.py b/contrib/matting/model/modnet.py index 6e5469b688..a28809c377 100644 --- a/contrib/matting/model/modnet.py +++ b/contrib/matting/model/modnet.py @@ -125,10 +125,6 @@ def init_weight(self): for layer in self.sublayers(): if isinstance(layer, nn.Conv2D): param_init.kaiming_uniform(layer.weight) - elif isinstance( - layer, (nn.BatchNorm, nn.InstanceNorm2D, nn.SyncBatchNorm)): - param_init.constant_init(layer.weight, value=1.0) - param_init.constant_init(layer.bias, value=0.0) class FusionBranch(nn.Layer): @@ -193,9 +189,9 @@ def __init__(self, hr_channels, enc_channels): stride=1, padding=1), Conv2dIBNormRelu( - 2 * hr_channels, hr_channels, 3, stride=1, padding=1), - Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1), - Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1)) + 2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1), + Conv2dIBNormRelu( + 2 * hr_channels, hr_channels, 3, stride=1, padding=1)) self.conv_hr2x = nn.Sequential( Conv2dIBNormRelu( @@ -445,15 +441,10 @@ def _init_kernel(self): model = MODNet(backbone=backbone) model.eval() - x = paddle.randint(0, 256, (1, 3, 320, 320)).astype('float32') + x = paddle.randint(0, 256, (1, 3, 512, 512)).astype('float32') inputs = {} inputs['img'] = x / 255. logit = model(inputs) print(logit) - -# for name, param in blurer.named_parameters(): -# print(name, param) -# print(blurer.op[1].weight) -# print(blurer.op.1.weight) From a8c0c67cee8b71022301970381097c11e76703b9 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Fri, 13 Aug 2021 14:40:48 +0800 Subject: [PATCH 146/210] add consistent loss and update L1Loss --- contrib/matting/model/modnet.py | 35 +++++++++++++++++++++++++++++- contrib/matting/train.py | 2 +- paddleseg/models/losses/l1_loss.py | 2 +- 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/contrib/matting/model/modnet.py b/contrib/matting/model/modnet.py index a28809c377..d415581e26 100644 --- a/contrib/matting/model/modnet.py +++ b/contrib/matting/model/modnet.py @@ -78,15 +78,48 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): alpha = label_dict['alpha'] transition_mask = label_dict['trimap'] == 128 matte_boundary = paddle.where(transition_mask, matte, alpha) + # l1 loss loss_fusion_l1 = loss_func_dict['fusion'][0]( matte, alpha) + 4 * loss_func_dict['fusion'][0](matte_boundary, alpha) + # composition loss loss_fusion_comp = loss_func_dict['fusion'][1]( matte * label_dict['img'], alpha * label_dict['img']) + 4 * loss_func_dict['fusion'][1]( matte_boundary * label_dict['img'], alpha * label_dict['img']) - loss_fusion = loss_fusion_l1 + loss_fusion_comp + # consisten loss with semantic + transition_mask = F.interpolate( + label_dict['trimap'], + scale_factor=1 / 16, + mode='nearest', + align_corners=False) + transition_mask = transition_mask == 128 + matte_con_sem = F.interpolate( + matte, scale_factor=1 / 16, mode='bilinear', align_corners=False) + matte_con_sem = self.blurer(matte_con_sem) + matte_con_sem = paddle.where(transition_mask, logit_dict['semantic'], + matte_con_sem) + if False: + import cv2 + matte_con_sem_num = matte_con_sem.numpy() + matte_con_sem_num = matte_con_sem_num[0].squeeze() + matte_con_sem_num = (matte_con_sem_num * 255).astype('uint8') + semantic = logit_dict['semantic'].numpy() + semantic = semantic[0].squeeze() + semantic = (semantic * 255).astype('uint8') + transition_mask = transition_mask.astype('uint8') + transition_mask = transition_mask.numpy() + transition_mask = (transition_mask[0].squeeze()) * 255 + cv2.imwrite('matte_con.png', matte_con_sem_num) + cv2.imwrite('semantic.png', semantic) + cv2.imwrite('transition.png', transition_mask) + mse_loss = paddleseg.models.MSELoss() + loss_fusion_con_sem = mse_loss(matte_con_sem, logit_dict['semantic']) + loss_fusion = loss_fusion_l1 + loss_fusion_comp + loss_fusion_con_sem loss['fusion'] = loss_fusion + loss['fusion_l1'] = loss_fusion_l1 + loss['fusion_comp'] = loss_fusion_comp + loss['fusion_con_sem'] = loss_fusion_con_sem loss['all'] = loss['semantic'] + loss['detail'] + loss['fusion'] diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 77fbea232f..31f99d6cd0 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -191,7 +191,7 @@ def main(args): # learning_rate=args.learning_rate, parameters=model.parameters()) # lr = paddle.optimizer.lr.StepDecay(args.learning_rate, step_size=1000, gamma=0.1, last_epoch=-1, verbose=False) - boundaries = [50000, 100000, 200000] + boundaries = [20000, 500000, 80000] values = [ args.learning_rate * 0.1**scale for scale in range(len(boundaries) + 1) ] diff --git a/paddleseg/models/losses/l1_loss.py b/paddleseg/models/losses/l1_loss.py index f0f58454b8..125c55d406 100644 --- a/paddleseg/models/losses/l1_loss.py +++ b/paddleseg/models/losses/l1_loss.py @@ -20,7 +20,7 @@ @manager.LOSSES.add_component -class L1Loss(nn.MSELoss): +class L1Loss(nn.L1Loss): r""" This interface is used to construct a callable object of the ``L1Loss`` class. The L1Loss layer calculates the L1 Loss of ``input`` and ``label`` as follows. From afecce33ef1dc39a87c942c80b1e9777aea5997d Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 16 Aug 2021 17:54:55 +0800 Subject: [PATCH 147/210] update human_matting_dataset.py --- contrib/matting/dataset/human_matting_dataset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/matting/dataset/human_matting_dataset.py b/contrib/matting/dataset/human_matting_dataset.py index 870026ef09..dc7bf1f0a9 100644 --- a/contrib/matting/dataset/human_matting_dataset.py +++ b/contrib/matting/dataset/human_matting_dataset.py @@ -121,6 +121,8 @@ def __getitem__(self, idx): data['gt_fields'].append('alpha') else: data['img'] = fg + if self.mode in ['train', 'trainval']: + data['gt_fields'].append('alpha') data['trans_info'] = [] # Record shape change information data = self.transforms(data) From 8f10a4756b2bad38ced47dbf2033a24c5b8a59a3 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 16 Aug 2021 21:06:37 +0800 Subject: [PATCH 148/210] update dataset --- contrib/matting/dataset/human_matting_dataset.py | 4 ++++ contrib/matting/train.py | 10 ++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/contrib/matting/dataset/human_matting_dataset.py b/contrib/matting/dataset/human_matting_dataset.py index dc7bf1f0a9..50e1fa3c9d 100644 --- a/contrib/matting/dataset/human_matting_dataset.py +++ b/contrib/matting/dataset/human_matting_dataset.py @@ -122,6 +122,10 @@ def __getitem__(self, idx): else: data['img'] = fg if self.mode in ['train', 'trainval']: + data['fg'] = fg.copy() + data['bg'] = fg.copy() + data['gt_fields'].append('fg') + data['gt_fields'].append('bg') data['gt_fields'].append('alpha') data['trans_info'] = [] # Record shape change information diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 31f99d6cd0..886d79d137 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -155,7 +155,12 @@ def main(args): mode='train', train_file=args.train_file) if args.do_eval: - t = [T.LoadImages(), T.ResizeToIntMult(mult_int=32), T.Normalize()] + t = [ + T.LoadImages(), + T.LimitLong(2048), + T.ResizeToIntMult(mult_int=32), + T.Normalize() + ] val_dataset = HumanMattingDataset( dataset_root=args.dataset_root, transforms=t, @@ -164,6 +169,7 @@ def main(args): get_trimap=False) else: val_dataset = None + print(len(train_dataset)) # loss losses = defaultdict(list) @@ -191,7 +197,7 @@ def main(args): # learning_rate=args.learning_rate, parameters=model.parameters()) # lr = paddle.optimizer.lr.StepDecay(args.learning_rate, step_size=1000, gamma=0.1, last_epoch=-1, verbose=False) - boundaries = [20000, 500000, 80000] + boundaries = [50000, 100000, 150000] values = [ args.learning_rate * 0.1**scale for scale in range(len(boundaries) + 1) ] From ecd870ea1b9e5e31a1c0f36bba0317d903e5e658 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 18 Aug 2021 21:17:32 +0800 Subject: [PATCH 149/210] add resize by short --- paddleseg/core/infer.py | 12 +++++++++++ paddleseg/transforms/functional.py | 11 ++++++++++ paddleseg/transforms/transforms.py | 33 ++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+) diff --git a/paddleseg/core/infer.py b/paddleseg/core/infer.py index af7388a662..0bfdf50b46 100644 --- a/paddleseg/core/infer.py +++ b/paddleseg/core/infer.py @@ -52,6 +52,18 @@ def get_reverse_list(ori_shape, transforms): else: w = long_edge h = short_edge + if op.__class__.__name__ in ['ResizeByShort']: + reverse_list.append(('resize', (h, w))) + long_edge = max(h, w) + short_edge = min(h, w) + long_edge = int(round(long_edge * op.short_size / short_edge)) + short_edge = op.short_size + if h > w: + h = long_edge + w = short_edge + else: + w = long_edge + h = short_edge if op.__class__.__name__ in ['Padding']: reverse_list.append(('padding', (h, w))) w, h = op.target_size[0], op.target_size[1] diff --git a/paddleseg/transforms/functional.py b/paddleseg/transforms/functional.py index d53fa8b84f..f8bd08880b 100644 --- a/paddleseg/transforms/functional.py +++ b/paddleseg/transforms/functional.py @@ -47,6 +47,17 @@ def resize_long(im, long_size=224, interpolation=cv2.INTER_LINEAR): return im +def resize_short(im, short_size=224, interpolation=cv2.INTER_LINEAR): + value = min(im.shape[0], im.shape[1]) + scale = float(short_size) / float(value) + resized_width = int(round(im.shape[1] * scale)) + resized_height = int(round(im.shape[0] * scale)) + + im = cv2.resize( + im, (resized_width, resized_height), interpolation=interpolation) + return im + + def horizontal_flip(im): if len(im.shape) == 3: im = im[:, ::-1, :] diff --git a/paddleseg/transforms/transforms.py b/paddleseg/transforms/transforms.py index ef4c029039..19f5eb5e40 100644 --- a/paddleseg/transforms/transforms.py +++ b/paddleseg/transforms/transforms.py @@ -225,6 +225,39 @@ def __call__(self, im, label=None): return (im, label) +@manager.TRANSFORMS.add_component +class ResizeByShort: + """ + Resize the short side of an image to given size, and then scale the other side proportionally. + + Args: + short_size (int): The target size of short side. + """ + + def __init__(self, short_size): + self.short_size = short_size + + def __call__(self, im, label=None): + """ + Args: + im (np.ndarray): The Image data. + label (np.ndarray, optional): The label data. Default: None. + + Returns: + (tuple). When label is None, it returns (im, ), otherwise it returns (im, label). + """ + + im = functional.resize_short(im, self.short_size) + if label is not None: + label = functional.resize_short(label, self.short_size, + cv2.INTER_NEAREST) + + if label is None: + return (im, ) + else: + return (im, label) + + @manager.TRANSFORMS.add_component class LimitLong: """ From a47a5260784cbacfb22b76a75b89ae983a546a97 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Fri, 20 Aug 2021 16:12:41 +0800 Subject: [PATCH 150/210] add resizebyshort transform --- contrib/matting/train.py | 15 +++--------- contrib/matting/transforms.py | 46 ++++++++++++++++++++++++++++++++--- contrib/matting/val.py | 9 +++++-- 3 files changed, 53 insertions(+), 17 deletions(-) diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 886d79d137..54040ab8c7 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -144,8 +144,8 @@ def main(args): # train_dataset = Dataset() t = [ T.LoadImages(), - T.RandomCropByAlpha(crop_size=((512, 512), (640, 640), (800, 800))), - T.Resize(target_size=(512, 512)), + T.RandomCrop(crop_size=((512, 512), )), + T.RandomHorizontalFlip(), T.Normalize() ] @@ -157,7 +157,7 @@ def main(args): if args.do_eval: t = [ T.LoadImages(), - T.LimitLong(2048), + T.ResizeByShort(512), T.ResizeToIntMult(mult_int=32), T.Normalize() ] @@ -188,15 +188,6 @@ def main(args): model = MODNet(backbone=backbone, pretrained=args.pretrained_model) - # optimizer - # 简单的先构建一个优化器 - # lr = paddle.optimizer.lr.PolynomialDecay( - # 0.001, decay_steps=200000, end_lr=0.0, power=0.9) - # use adam - # optimizer = paddle.optimizer.Adam( - # learning_rate=args.learning_rate, parameters=model.parameters()) - - # lr = paddle.optimizer.lr.StepDecay(args.learning_rate, step_size=1000, gamma=0.1, last_epoch=-1, verbose=False) boundaries = [50000, 100000, 150000] values = [ args.learning_rate * 0.1**scale for scale in range(len(boundaries) + 1) diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index 6fa8d94111..79f09d0366 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -119,6 +119,25 @@ def __call__(self, data): return data +class ResizeByShort: + """ + Resize the short side of an image to given size, and then scale the other side proportionally. + + Args: + short_size (int): The target size of short side. + """ + + def __init__(self, short_size): + self.short_size = short_size + + def __call__(self, data): + data['trans_info'].append(('resize', data['img'].shape[0:2])) + data['img'] = functional.resize_short(data['img'], self.short_size) + for key in data.get('gt_fields', []): + data[key] = functional.resize_short(data[key], self.short_size) + return data + + class ResizeToIntMult: """ Resize to some int muitple, d.g. 32. @@ -186,9 +205,10 @@ class RandomCropByAlpha: """ - def __init__(self, crop_size=((320, 320), (480, 480), (640, 640)), p=0.5): + def __init__(self, crop_size=((320, 320), (480, 480), (640, 640)), + prob=0.5): self.crop_size = crop_size - self.p = p + self.prob = prob def __call__(self, data): idex = np.random.randint(low=0, high=len(self.crop_size)) @@ -196,7 +216,7 @@ def __call__(self, data): img_h = data['img'].shape[0] img_w = data['img'].shape[1] - if np.random.rand() < self.p: + if np.random.rand() < self.prob: crop_center = np.where((data['alpha'] > 0) & (data['alpha'] < 255)) center_h_array, center_w_array = crop_center if len(center_h_array) == 0: @@ -313,6 +333,26 @@ def __call__(self, data): return data +class RandomHorizontalFlip: + """ + Flip an image horizontally with a certain probability. + + Args: + prob (float, optional): A probability of horizontally flipping. Default: 0.5. + """ + + def __init__(self, prob=0.5): + self.prob = prob + + def __call__(self, data): + if random.random() < self.prob: + data['img'] = functional.horizontal_flip(data['img']) + for key in data.get('gt_fields', []): + data[key] = functional.horizontal_flip(data[key]) + + return data + + if __name__ == "__main__": transforms = [ LoadImages(), diff --git a/contrib/matting/val.py b/contrib/matting/val.py index 3c20604ae2..5ebecd834f 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -74,8 +74,13 @@ def parse_args(): def main(args): paddle.set_device('gpu') # T.ResizeByLong(long_size=1024), - t = [T.LoadImages(), T.ResizeToIntMult(mult_int=32), T.Normalize()] - # t = [T.LoadImages(), T.LimitLong(max_long=2048), T.ResizeToIntMult(mult_int=32), T.Normalize()] + # t = [T.LoadImages(), T.ResizeToIntMult(mult_int=32), T.Normalize()] + t = [ + T.LoadImages(), + T.ResizeByShort(640), + T.ResizeToIntMult(mult_int=32), + T.Normalize() + ] eval_dataset = HumanMattingDataset( dataset_root=args.dataset_root, From de97bab07ea9d1106780829b74f51f7bbd2d5dae Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Fri, 27 Aug 2021 17:32:38 +0800 Subject: [PATCH 151/210] d10, fcsstop --- contrib/matting/model/modnet.py | 6 ++++-- contrib/matting/train.py | 2 +- contrib/matting/val.py | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/matting/model/modnet.py b/contrib/matting/model/modnet.py index d415581e26..c24dc85220 100644 --- a/contrib/matting/model/modnet.py +++ b/contrib/matting/model/modnet.py @@ -71,7 +71,7 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): label_detail = label_dict['alpha'] * mask loss_detail = loss_func_dict['detail'][0](logit_detail, label_detail) loss_detail = loss_detail / mask.mean() - loss['detail'] = loss_detail + loss['detail'] = 10 * loss_detail # fusion loss matte = logit_dict['matte'] @@ -97,7 +97,9 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): matte_con_sem = F.interpolate( matte, scale_factor=1 / 16, mode='bilinear', align_corners=False) matte_con_sem = self.blurer(matte_con_sem) - matte_con_sem = paddle.where(transition_mask, logit_dict['semantic'], + logit_semantic = logit_dict['semantic'].clone() + logit_semantic.stop_gradient = True + matte_con_sem = paddle.where(transition_mask, logit_semantic, matte_con_sem) if False: import cv2 diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 54040ab8c7..8f40388447 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -188,7 +188,7 @@ def main(args): model = MODNet(backbone=backbone, pretrained=args.pretrained_model) - boundaries = [50000, 100000, 150000] + boundaries = [100000, 150000] values = [ args.learning_rate * 0.1**scale for scale in range(len(boundaries) + 1) ] diff --git a/contrib/matting/val.py b/contrib/matting/val.py index 5ebecd834f..2e8152164b 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -77,7 +77,7 @@ def main(args): # t = [T.LoadImages(), T.ResizeToIntMult(mult_int=32), T.Normalize()] t = [ T.LoadImages(), - T.ResizeByShort(640), + T.ResizeByShort(512), T.ResizeToIntMult(mult_int=32), T.Normalize() ] From e98b76256b39c5d3ae3267910f425bb4aa312ed7 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 22 Sep 2021 11:58:58 +0800 Subject: [PATCH 152/210] add RandomBlur aug --- .../matting/dataset/human_matting_dataset.py | 2 +- contrib/matting/train.py | 2 +- contrib/matting/transforms.py | 63 ++++++++++++++----- 3 files changed, 49 insertions(+), 18 deletions(-) diff --git a/contrib/matting/dataset/human_matting_dataset.py b/contrib/matting/dataset/human_matting_dataset.py index 50e1fa3c9d..22c3cc0299 100644 --- a/contrib/matting/dataset/human_matting_dataset.py +++ b/contrib/matting/dataset/human_matting_dataset.py @@ -104,7 +104,7 @@ def __getitem__(self, idx): fg_bg_file = fg_bg_file.split(' ') data['img_name'] = fg_bg_file[0] # using in save prediction results fg_file = os.path.join(self.dataset_root, fg_bg_file[0]) - alpha_file = fg_file.replace('fg', 'alpha') + alpha_file = fg_file.replace('/fg', '/alpha') fg = cv2.imread(fg_file) alpha = cv2.imread(alpha_file, 0) data['alpha'] = alpha diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 8f40388447..4cf861b442 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -188,7 +188,7 @@ def main(args): model = MODNet(backbone=backbone, pretrained=args.pretrained_model) - boundaries = [100000, 150000] + boundaries = [10000, 30000] values = [ args.learning_rate * 0.1**scale for scale in range(len(boundaries) + 1) ] diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index 79f09d0366..a670b427b5 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -353,24 +353,55 @@ def __call__(self, data): return data +class RandomBlur: + """ + Blurring an image by a Gaussian function with a certain probability. + + Args: + prob (float, optional): A probability of blurring an image. Default: 0.1. + """ + + def __init__(self, prob=0.1): + self.prob = prob + + def __call__(self, data): + if self.prob <= 0: + n = 0 + elif self.prob >= 1: + n = 1 + else: + n = int(1.0 / self.prob) + if n > 0: + if np.random.randint(0, n) == 0: + radius = np.random.randint(3, 10) + if radius % 2 != 1: + radius = radius + 1 + if radius > 9: + radius = 9 + data['img'] = cv2.GaussianBlur(data['img'], (radius, radius), 0, + 0) + for key in data.get('gt_fields', []): + data[key] = cv2.GaussianBlur(data[key], (radius, radius), 0, + 0) + return data + + if __name__ == "__main__": - transforms = [ - LoadImages(), - RandomCropByAlpha(((640, 640), (1280, 1280)), p=0.5) - ] + transforms = [RandomBlur(prob=1)] transforms = Compose(transforms) - img_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting_old/train/image/0051115Q_000001_0062_001.png' - bg_path = img_path.replace('image', 'bg') - fg_path = img_path.replace('image', 'fg') - alpha_path = img_path.replace('image', 'alpha') + fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' + alpha_path = fg_path.replace('fg', 'alpha') + bg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting/bg/unsplash_bg/attic/photo-1443884590026-2e4d21aee71c?crop=entropy&cs=tinysrgb&fit=max&fm=jpg&ixid=MnwxMjA3fDB8MXxzZWFyY2h8Nzh8fGF0dGljfGVufDB8fHx8MTYyOTY4MDcxNQ&ixlib=rb-1.2.1&q=80&w=400.jpg' data = {} - data['img'] = img_path - data['fg'] = fg_path - data['bg'] = bg_path - data['alpha'] = alpha_path + data['fg'] = cv2.imread(fg_path) + data['bg'] = cv2.imread(bg_path) + h, w, c = data['fg'].shape + data['bg'] = cv2.resize(data['bg'], (w, h)) + alpha = cv2.imread(alpha_path) + data['alpha'] = alpha[:, :, 0] + alpha = alpha / 255. + data['img'] = alpha * data['fg'] + (1 - alpha) * data['bg'] + data['gt_fields'] = ['fg', 'bg', 'alpha'] data = transforms(data) - print(np.min(data['img']), np.max(data['img'])) - print(data['img'].shape, data['fg'].shape, data['bg'].shape, - data['alpha'].shape) - cv2.imwrite('crop_img.png', data['img'].transpose((1, 2, 0))) + cv2.imwrite('blur_alpha.png', data['alpha']) From 1e06fc902e2995e4f765adce81e8169bd3e9e733 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Thu, 23 Sep 2021 15:14:14 +0800 Subject: [PATCH 153/210] add distort aug --- contrib/matting/transforms.py | 109 +++++++++++++++++++++++++++++++++- 1 file changed, 106 insertions(+), 3 deletions(-) diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index a670b427b5..443c5e2687 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -17,6 +17,7 @@ import cv2 import numpy as np from paddleseg.transforms import functional +from PIL import Image class Compose: @@ -386,8 +387,104 @@ def __call__(self, data): return data +class RandomDistort: + """ + Distort an image with random configurations. + + Args: + brightness_range (float, optional): A range of brightness. Default: 0.5. + brightness_prob (float, optional): A probability of adjusting brightness. Default: 0.5. + contrast_range (float, optional): A range of contrast. Default: 0.5. + contrast_prob (float, optional): A probability of adjusting contrast. Default: 0.5. + saturation_range (float, optional): A range of saturation. Default: 0.5. + saturation_prob (float, optional): A probability of adjusting saturation. Default: 0.5. + hue_range (int, optional): A range of hue. Default: 18. + hue_prob (float, optional): A probability of adjusting hue. Default: 0.5. + """ + + def __init__(self, + brightness_range=0.5, + brightness_prob=0.5, + contrast_range=0.5, + contrast_prob=0.5, + saturation_range=0.5, + saturation_prob=0.5, + hue_range=18, + hue_prob=0.5): + self.brightness_range = brightness_range + self.brightness_prob = brightness_prob + self.contrast_range = contrast_range + self.contrast_prob = contrast_prob + self.saturation_range = saturation_range + self.saturation_prob = saturation_prob + self.hue_range = hue_range + self.hue_prob = hue_prob + + def __call__(self, data): + brightness_lower = 1 - self.brightness_range + brightness_upper = 1 + self.brightness_range + contrast_lower = 1 - self.contrast_range + contrast_upper = 1 + self.contrast_range + saturation_lower = 1 - self.saturation_range + saturation_upper = 1 + self.saturation_range + hue_lower = -self.hue_range + hue_upper = self.hue_range + ops = [ + functional.brightness, functional.contrast, functional.saturation, + functional.hue + ] + random.shuffle(ops) + params_dict = { + 'brightness': { + 'brightness_lower': brightness_lower, + 'brightness_upper': brightness_upper + }, + 'contrast': { + 'contrast_lower': contrast_lower, + 'contrast_upper': contrast_upper + }, + 'saturation': { + 'saturation_lower': saturation_lower, + 'saturation_upper': saturation_upper + }, + 'hue': { + 'hue_lower': hue_lower, + 'hue_upper': hue_upper + } + } + prob_dict = { + 'brightness': self.brightness_prob, + 'contrast': self.contrast_prob, + 'saturation': self.saturation_prob, + 'hue': self.hue_prob + } + + im = data['img'].astype('uint8') + im = Image.fromarray(im) + for id in range(len(ops)): + params = params_dict[ops[id].__name__] + params['im'] = im + prob = prob_dict[ops[id].__name__] + if np.random.uniform(0, 1) < prob: + im = ops[id](**params) + data['img'] = np.asarray(im) + + for key in data.get('gt_fields', []): + if key != 'alpha': + im = data[key].astype('uint8') + im = Image.fromarray(im) + for id in range(len(ops)): + params = params_dict[ops[id].__name__] + params['im'] = im + prob = prob_dict[ops[id].__name__] + if np.random.uniform(0, 1) < prob: + im = ops[id](**params) + data[key] = np.asarray(im) + return data + + if __name__ == "__main__": - transforms = [RandomBlur(prob=1)] + transforms = [RandomDistort()] transforms = Compose(transforms) fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' alpha_path = fg_path.replace('fg', 'alpha') @@ -402,6 +499,12 @@ def __call__(self, data): alpha = alpha / 255. data['img'] = alpha * data['fg'] + (1 - alpha) * data['bg'] - data['gt_fields'] = ['fg', 'bg', 'alpha'] + data['gt_fields'] = ['fg', 'bg'] + print(data['img'].shape) + for key in data['gt_fields']: + print(data[key].shape) +# import pdb +# pdb.set_trace() data = transforms(data) - cv2.imwrite('blur_alpha.png', data['alpha']) + print(data['img'].dtype, data['img'].shape) + cv2.imwrite('distort_img.jpg', data['img'].transpose([1, 2, 0])) From b84291521365cca45b52bb3867a38f5d4304d229 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Thu, 23 Sep 2021 15:19:23 +0800 Subject: [PATCH 154/210] add predict function --- contrib/matting/core/__init__.py | 1 + contrib/matting/core/predict.py | 152 +++++++++++++++++++++++++++++++ contrib/matting/predict.py | 132 +++++++++++++++++++++++++++ 3 files changed, 285 insertions(+) create mode 100644 contrib/matting/core/predict.py create mode 100644 contrib/matting/predict.py diff --git a/contrib/matting/core/__init__.py b/contrib/matting/core/__init__.py index 2e0309c2bf..21afe12cb6 100644 --- a/contrib/matting/core/__init__.py +++ b/contrib/matting/core/__init__.py @@ -1,2 +1,3 @@ from .train import train from .val import evaluate +from .predict import predict diff --git a/contrib/matting/core/predict.py b/contrib/matting/core/predict.py new file mode 100644 index 0000000000..242b0c2384 --- /dev/null +++ b/contrib/matting/core/predict.py @@ -0,0 +1,152 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import math +import time + +import cv2 +import numpy as np +import paddle +import paddle.nn.functional as F + +from paddleseg import utils +from paddleseg.core import infer +from paddleseg.utils import logger, progbar, TimeAverager + + +def mkdir(path): + sub_dir = os.path.dirname(path) + if not os.path.exists(sub_dir): + os.makedirs(sub_dir) + + +def partition_list(arr, m): + """split the list 'arr' into m pieces""" + n = int(math.ceil(len(arr) / float(m))) + return [arr[i:i + n] for i in range(0, len(arr), n)] + + +def save_alpha_pred(alpha, path): + """ + The value of alpha is range [0, 1], shape should be [h,w] + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + alpha = (alpha).astype('uint8') + cv2.imwrite(path, alpha) + + +def reverse_transform(alpha, trans_info): + """recover pred to origin shape""" + for item in trans_info[::-1]: + if item[0] == 'resize': + h, w = item[1][0], item[1][1] + alpha = F.interpolate(alpha, [h, w], mode='bilinear') + elif item[0] == 'padding': + h, w = item[1][0], item[1][1] + alpha = alpha[:, :, 0:h, 0:w] + else: + raise Exception("Unexpected info '{}' in im_info".format(item[0])) + return alpha + + +def preprocess(img, transforms, trimap=None): + data = {} + data['img'] = img + data['trans_info'] = [] + data = transforms(data) + data['img'] = paddle.to_tensor(data['img']) + data['img'] = data['img'].unsqueeze(0) + + return data + + +def predict(model, + model_path, + transforms, + image_list, + image_dir=None, + save_dir='output'): + """ + predict and visualize the image_list. + Args: + model (nn.Layer): Used to predict for input image. + model_path (str): The path of pretrained model. + transforms (transform.Compose): Preprocess for input image. + image_list (list): A list of image path to be predicted. + image_dir (str, optional): The root directory of the images predicted. Default: None. + save_dir (str, optional): The directory to save the visualized results. Default: 'output'. + """ + utils.utils.load_entire_model(model, model_path) + model.eval() + nranks = paddle.distributed.get_world_size() + local_rank = paddle.distributed.get_rank() + if nranks > 1: + img_lists = partition_list(image_list, nranks) + else: + img_lists = [image_list] + + added_saved_dir = os.path.join(save_dir, 'added_prediction') + pred_saved_dir = os.path.join(save_dir, 'pseudo_color_prediction') + + logger.info("Start to predict...") + progbar_pred = progbar.Progbar(target=len(img_lists[0]), verbose=1) + preprocess_cost_averager = TimeAverager() + infer_cost_averager = TimeAverager() + postprocess_cost_averager = TimeAverager() + batch_start = time.time() + with paddle.no_grad(): + for i, im_path in enumerate(img_lists[local_rank]): + preprocess_start = time.time() + data = preprocess(img=im_path, transforms=transforms) + preprocess_cost_averager.record(time.time() - preprocess_start) + + infer_start = time.time() + alpha_pred = model(data) + infer_cost_averager.record(time.time() - infer_start) + + postprocess_start = time.time() + alpha_pred = reverse_transform(alpha_pred, data['trans_info']) + alpha_pred = (alpha_pred.numpy()).squeeze() + alpha_pred = (alpha_pred * 255).astype('uint8') + + # get the saved name + if image_dir is not None: + im_file = im_path.replace(image_dir, '') + else: + im_file = os.path.basename(im_path) + if im_file[0] == '/' or im_file[0] == '\\': + im_file = im_file[1:] + + save_path = os.path.join(save_dir, im_file) + mkdir(save_path) + save_alpha_pred(alpha_pred, save_path) + + postprocess_cost_averager.record(time.time() - postprocess_start) + + preprocess_cost = preprocess_cost_averager.get_average() + infer_cost = infer_cost_averager.get_average() + postprocess_cost = postprocess_cost_averager.get_average() + if local_rank == 0: + progbar_pred.update(i + 1, + [('preprocess_cost', preprocess_cost), + ('infer_cost cost', infer_cost), + ('postprocess_cost', postprocess_cost)]) + + preprocess_cost_averager.reset() + infer_cost_averager.reset() + postprocess_cost_averager.reset() diff --git a/contrib/matting/predict.py b/contrib/matting/predict.py new file mode 100644 index 0000000000..a860d7aa07 --- /dev/null +++ b/contrib/matting/predict.py @@ -0,0 +1,132 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +from paddleseg.utils import logger + +from core import predict +from model import * +from dataset import HumanMattingDataset +import transforms as T + + +def parse_args(): + parser = argparse.ArgumentParser(description='Model training') + # params of training + # parser.add_argument( + # "--config", dest="cfg", help="The config file.", default=None, type=str) + + parser.add_argument( + '--model_path', + dest='model_path', + help='The path of model for evaluation', + type=str, + default=None) + parser.add_argument( + '--image_path', + dest='image_path', + help= + 'The path of image, it can be a file or a directory including images', + type=str, + default=None) + parser.add_argument( + '--save_dir', + dest='save_dir', + help='The directory for saving the model snapshot', + type=str, + default='./output/results') + + parser.add_argument( + '--backbone', + dest='backbone', + help='The backbone of model. It is one of (MobileNetV2)', + required=True, + type=str) + + return parser.parse_args() + + +def get_image_list(image_path): + """Get image list""" + valid_suffix = [ + '.JPEG', '.jpeg', '.JPG', '.jpg', '.BMP', '.bmp', '.PNG', '.png' + ] + image_list = [] + image_dir = None + if os.path.isfile(image_path): + if os.path.splitext(image_path)[-1] in valid_suffix: + image_list.append(image_path) + else: + image_dir = os.path.dirname(image_path) + with open(image_path, 'r') as f: + for line in f: + line = line.strip() + if len(line.split()) > 1: + raise RuntimeError( + 'There should be only one image path per line in `--image_path` file. Wrong line: {}' + .format(line)) + image_list.append(os.path.join(image_dir, line)) + elif os.path.isdir(image_path): + image_dir = image_path + for root, dirs, files in os.walk(image_path): + for f in files: + if '.ipynb_checkpoints' in root: + continue + if os.path.splitext(f)[-1] in valid_suffix: + image_list.append(os.path.join(root, f)) + else: + raise FileNotFoundError( + '`--image_path` is not found. it should be an image file or a directory including images' + ) + + if len(image_list) == 0: + raise RuntimeError('There are not image file in `--image_path`') + + return image_list, image_dir + + +def main(args): + paddle.set_device('gpu') + + t = [ + T.LoadImages(), + # T.ResizeByShort(512), + T.Resize((512, 512)), + T.ResizeToIntMult(mult_int=32), + T.Normalize() + ] + + transforms = T.Compose(t) + + # model + backbone = eval(args.backbone)(input_channels=3) + model = MODNet(backbone=backbone, pretrained=args.model_path) + + image_list, image_dir = get_image_list(args.image_path) + logger.info('Number of predict images = {}'.format(len(image_list))) + + predict( + model, + model_path=args.model_path, + transforms=transforms, + image_list=image_list, + image_dir=image_dir, + save_dir=args.save_dir) + + +if __name__ == '__main__': + args = parse_args() + main(args) From 1f4450dcce2086d62e7a04ade00efe9d8a41f857 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sun, 26 Sep 2021 12:05:33 +0800 Subject: [PATCH 155/210] merge dim network --- contrib/matting/core/train.py | 2 +- .../matting/dataset/human_matting_dataset.py | 25 ++++--- contrib/matting/model/dim.py | 70 ++++++++++++++----- contrib/matting/val.py | 2 - 4 files changed, 69 insertions(+), 30 deletions(-) diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index 07fd225c95..42d092be26 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -166,7 +166,7 @@ def train(model, log_writer.add_scalar('Train/reader_cost', avg_train_reader_cost, iter) - if True: #主要为调试时候的观察,真正训练的时候可以省略 + if False: #主要为调试时候的观察,真正训练的时候可以省略 # 增加图片和alpha的显示 ori_img = data['img'][0] ori_img = paddle.transpose(ori_img, [1, 2, 0]) diff --git a/contrib/matting/dataset/human_matting_dataset.py b/contrib/matting/dataset/human_matting_dataset.py index 22c3cc0299..73b7bbb6a4 100644 --- a/contrib/matting/dataset/human_matting_dataset.py +++ b/contrib/matting/dataset/human_matting_dataset.py @@ -61,12 +61,14 @@ def __init__(self, mode='train', train_file=None, val_file=None, - get_trimap=True): + get_trimap=True, + separator=' '): super().__init__() self.dataset_root = dataset_root self.transforms = T.Compose(transforms) self.mode = mode self.get_trimap = get_trimap + self.separator = separator # check file if mode == 'train' or mode == 'trainval': @@ -101,7 +103,7 @@ def __init__(self, def __getitem__(self, idx): data = {} fg_bg_file = self.fg_bg_list[idx] - fg_bg_file = fg_bg_file.split(' ') + fg_bg_file = fg_bg_file.split(self.separator) data['img_name'] = fg_bg_file[0] # using in save prediction results fg_file = os.path.join(self.dataset_root, fg_bg_file[0]) alpha_file = fg_file.replace('/fg', '/alpha') @@ -110,7 +112,8 @@ def __getitem__(self, idx): data['alpha'] = alpha data['gt_fields'] = [] - if len(fg_bg_file) == 2: + # lineis: fg [bg] [trimap] + if len(fg_bg_file) >= 2: bg_file = os.path.join(self.dataset_root, fg_bg_file[1]) bg = cv2.imread(bg_file) data['img'], data['bg'] = self.composite(fg, alpha, bg) @@ -119,6 +122,15 @@ def __getitem__(self, idx): data['gt_fields'].append('fg') data['gt_fields'].append('bg') data['gt_fields'].append('alpha') + if len(fg_bg_file) == 3 and self.get_trimap: + if self.mode == 'val': + trimap_path = os.path.join(self.dataset_root, fg_bg_file[2]) + if os.path.exists(trimap_path): + data['trimap'] = trimap_path + data['gt_fields'].append('trimap') + else: + raise FileNotFoundError( + 'trimap is not Found: {}'.format(fg_bg_file[2])) else: data['img'] = fg if self.mode in ['train', 'trainval']: @@ -139,13 +151,6 @@ def __getitem__(self, idx): for key in data.get('gt_fields', []): data[key] = data[key].astype('float32') if self.get_trimap: - # Trimap read from file only happening in evaluation. - if self.mode == 'val': - trimap_path = alpha_file.replace('alpha', 'trimap') - if os.path.exists(trimap_path): - data['trimap'] = trimap_path - data['gt_fields'].append('trimap') - if 'trimap' not in data: data['trimap'] = self.gen_trimap( data['alpha'], mode=self.mode).astype('float32') diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py index 8746bdb030..0f536545cb 100644 --- a/contrib/matting/model/dim.py +++ b/contrib/matting/model/dim.py @@ -12,14 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from collections import defaultdict import paddle import paddle.nn as nn import paddle.nn.functional as F - from paddleseg.models import layers from paddleseg import utils from paddleseg.cvlibs import manager +from .loss import MRSD + @manager.MODELS.add_component class DIM(nn.Layer): @@ -34,7 +36,7 @@ class DIM(nn.Layer): backbone: backbone model. pretrained(str, optional): The path of pretrianed model. Defautl: None. stage (int, optional): The stage of model. Defautl: 3. - decoder_input_channels(int, optional): The channel os decoder input. Defautl: 512. + decoder_input_channels(int, optional): The channel of decoder input. Defautl: 512. """ @@ -65,8 +67,7 @@ def __init__(self, def forward(self, inputs): input_shape = inputs['img'].shape[-2:] - x = paddle.concat([inputs['img'], inputs['trimap'].unsqueeze(1) / 255], - axis=1) + x = paddle.concat([inputs['img'], inputs['trimap'] / 255], axis=1) fea_list = self.backbone(x) # decoder stage @@ -80,19 +81,54 @@ def forward(self, inputs): if self.stage < 2: return logit_dict - # refine stage - refine_input = paddle.concat([inputs['img'], alpha_raw], axis=1) - alpha_refine = self.refine(refine_input) - - # finally alpha - alpha_pred = alpha_refine + alpha_raw - alpha_pred = F.interpolate( - alpha_pred, input_shape, mode='bilinear', align_corners=False) - if not self.training: - alpha_pred = paddle.clip(alpha_pred, min=0, max=1) - - logit_dict['alpha_pred'] = alpha_pred - return logit_dict + if self.stage >= 2: + # refine stage + refine_input = paddle.concat([inputs['img'], alpha_raw], axis=1) + alpha_refine = self.refine(refine_input) + + # finally alpha + alpha_pred = alpha_refine + alpha_raw + alpha_pred = F.interpolate( + alpha_pred, input_shape, mode='bilinear', align_corners=False) + if not self.training: + alpha_pred = paddle.clip(alpha_pred, min=0, max=1) + logit_dict['alpha_pred'] = alpha_pred + if self.training: + return logit_dict + else: + return alpha_pred + + def loss(self, logit_dict, label_dict, loss_func_dict=None): + if loss_func_dict is None: + loss_func_dict = defaultdict(list) + loss_func_dict['alpha_raw'].append(MRSD()) + loss_func_dict['comp'].append(MRSD()) + loss_func_dict['alpha_pred'].append(MRSD()) + + loss = {} + mask = label_dict['trimap'] == 128 + loss['all'] = 0 + + if self.stage != 2: + loss['alpha_raw'] = loss_func_dict['alpha_raw'][0]( + logit_dict['alpha_raw'], label_dict['alpha'], mask) + loss['alpha_raw'] = 0.5 * loss['alpha_raw'] + loss['all'] = loss['all'] + loss['alpha_raw'] + + if self.stage == 1 or self.stage == 3: + comp_pred = logit_dict['alpha_raw'] * label_dict['fg'] + \ + (1 - logit_dict['alpha_raw']) * label_dict['bg'] + loss['comp'] = loss_func_dict['comp'][0](comp_pred, + label_dict['img'], mask) + loss['comp'] = 0.5 * loss['comp'] + loss['all'] = loss['all'] + loss['comp'] + + if self.stage == 2 or self.stage == 3: + loss['alpha_pred'] = loss_func_dict['alpha_pred'][0]( + logit_dict['alpha_pred'], label_dict['alpha'], mask) + loss['all'] = loss['all'] + loss['alpha_pred'] + + return loss def init_weight(self): if self.pretrained is not None: diff --git a/contrib/matting/val.py b/contrib/matting/val.py index 2e8152164b..e512126592 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -73,8 +73,6 @@ def parse_args(): def main(args): paddle.set_device('gpu') - # T.ResizeByLong(long_size=1024), - # t = [T.LoadImages(), T.ResizeToIntMult(mult_int=32), T.Normalize()] t = [ T.LoadImages(), T.ResizeByShort(512), From 9cd6b740842f4220e5fb23424ae40d3007cea08b Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sun, 26 Sep 2021 17:52:00 +0800 Subject: [PATCH 156/210] add configs of modnet --- .../matting/dataset/human_matting_dataset.py | 2 + contrib/matting/model/mobilenet_v2.py | 31 +--- contrib/matting/model/modnet.py | 6 +- contrib/matting/train.py | 153 ++++++------------ contrib/matting/transforms.py | 16 ++ contrib/matting/val.py | 75 ++++----- paddleseg/cvlibs/config.py | 8 +- paddleseg/cvlibs/manager.py | 8 +- paddleseg/models/backbones/mobilenetv2.py | 2 +- 9 files changed, 123 insertions(+), 178 deletions(-) diff --git a/contrib/matting/dataset/human_matting_dataset.py b/contrib/matting/dataset/human_matting_dataset.py index 73b7bbb6a4..2dc8adfe85 100644 --- a/contrib/matting/dataset/human_matting_dataset.py +++ b/contrib/matting/dataset/human_matting_dataset.py @@ -19,10 +19,12 @@ import numpy as np import random import paddle +from paddleseg.cvlibs import manager import transforms as T +@manager.DATASETS.add_component class HumanMattingDataset(paddle.io.Dataset): """ human_matting diff --git a/contrib/matting/model/mobilenet_v2.py b/contrib/matting/model/mobilenet_v2.py index 88ffa57ddf..7e8b584143 100644 --- a/contrib/matting/model/mobilenet_v2.py +++ b/contrib/matting/model/mobilenet_v2.py @@ -1,4 +1,4 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D from paddleseg import utils +from paddleseg.cvlibs import manager MODEL_URLS = { "MobileNetV2_x0_25": @@ -39,7 +40,7 @@ "https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV2_x2_0_pretrained.pdparams" } -__all__ = list(MODEL_URLS.keys()) +__all__ = ["MobileNetV2"] class ConvBNLayer(nn.Layer): @@ -234,31 +235,7 @@ def init_weight(self): utils.load_pretrained_model(self, self.pretrained) -def MobileNetV2_x0_25(**kwargs): - model = MobileNet(scale=0.25, **kwargs) - return model - - -def MobileNetV2_x0_5(**kwargs): - model = MobileNet(scale=0.5, **kwargs) - return model - - -def MobileNetV2_x0_75(**kwargs): - model = MobileNet(scale=0.75, **kwargs) - return model - - +@manager.BACKBONES.add_component def MobileNetV2(**kwargs): model = MobileNet(scale=1.0, **kwargs) return model - - -def MobileNetV2_x1_5(**kwargs): - model = MobileNet(scale=1.5, **kwargs) - return model - - -def MobileNetV2_x2_0(**kwargs): - model = MobileNet(scale=2.0, **kwargs) - return model diff --git a/contrib/matting/model/modnet.py b/contrib/matting/model/modnet.py index c24dc85220..555d214885 100644 --- a/contrib/matting/model/modnet.py +++ b/contrib/matting/model/modnet.py @@ -48,9 +48,9 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): if loss_func_dict is None: loss_func_dict = defaultdict(list) loss_func_dict['semantic'].append(paddleseg.models.MSELoss()) - loss_func_dict['detail'].append(paddleseg.modells.L1Loss()) - loss_func_dict['fusion'].append(paddleseg.modells.L1Loss()) - loss_func_dict['fusion'].append(paddleseg.modells.L1Loss()) + loss_func_dict['detail'].append(paddleseg.models.L1Loss()) + loss_func_dict['fusion'].append(paddleseg.models.L1Loss()) + loss_func_dict['fusion'].append(paddleseg.models.L1Loss()) loss = {} # semantic loss diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 4cf861b442..35a1d32ab7 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -18,7 +18,8 @@ import paddle import paddle.nn as nn -import paddleseg +from paddleseg.cvlibs import manager, Config +from paddleseg.utils import get_sys_env, logger from core import train from model import * @@ -29,8 +30,8 @@ def parse_args(): parser = argparse.ArgumentParser(description='Model training') # params of training - # parser.add_argument( - # "--config", dest="cfg", help="The config file.", default=None, type=str) + parser.add_argument( + "--config", dest="cfg", help="The config file.", default=None, type=str) parser.add_argument( '--iters', dest='iters', @@ -95,16 +96,6 @@ def parse_args(): dest='use_vdl', help='Whether to record the data to VisualDL during training', action='store_true') - parser.add_argument( - '--pretrained_model', - dest='pretrained_model', - help='the pretrained model', - type=str) - parser.add_argument( - '--dataset_root', - dest='dataset_root', - help='the dataset root directory', - type=str) parser.add_argument( '--save_begin_iters', dest='save_begin_iters', @@ -112,103 +103,64 @@ def parse_args(): default=None, type=int) parser.add_argument( - '--backbone', - dest='backbone', - help='The backbone of model. It is one of (MobileNetV2)', - required=True, - type=str) - parser.add_argument( - '--train_file', - dest='train_file', - nargs='+', - help='Image list for traiing', - type=str, - default='train.txt') - parser.add_argument( - '--val_file', - dest='val_file', - nargs='+', - help='Image list for evaluation', - type=str, - default='val.txt') + '--seed', + dest='seed', + help='Set the random seed during training.', + default=None, + type=int) return parser.parse_args() def main(args): - paddle.set_device('gpu') - - # 一些模块的组建 - # train_dataset - # 简单的建立一个数据读取器 - # train_dataset = Dataset() - t = [ - T.LoadImages(), - T.RandomCrop(crop_size=((512, 512), )), - T.RandomHorizontalFlip(), - T.Normalize() - ] - - train_dataset = HumanMattingDataset( - dataset_root=args.dataset_root, - transforms=t, - mode='train', - train_file=args.train_file) - if args.do_eval: - t = [ - T.LoadImages(), - T.ResizeByShort(512), - T.ResizeToIntMult(mult_int=32), - T.Normalize() - ] - val_dataset = HumanMattingDataset( - dataset_root=args.dataset_root, - transforms=t, - mode='val', - val_file=args.val_file, - get_trimap=False) - else: - val_dataset = None - print(len(train_dataset)) - - # loss - losses = defaultdict(list) - losses['semantic'].append(paddleseg.models.MSELoss()) - losses['detail'].append(paddleseg.models.L1Loss()) - losses['fusion'].append(paddleseg.models.L1Loss()) - losses['fusion'].append(paddleseg.models.L1Loss()) - - # model - #bulid backbone - pretrained_model = './pretrained_models/' + args.backbone + '_pretrained.pdparams' - if not os.path.exists(pretrained_model): - pretrained_model = None - backbone = eval(args.backbone)( - input_channels=3, pretrained=pretrained_model) - - model = MODNet(backbone=backbone, pretrained=args.pretrained_model) - - boundaries = [10000, 30000] - values = [ - args.learning_rate * 0.1**scale for scale in range(len(boundaries) + 1) - ] - lr = paddle.optimizer.lr.PiecewiseDecay( - boundaries=boundaries, values=values, last_epoch=-1, verbose=False) - optimizer = paddle.optimizer.Momentum( - learning_rate=lr, - momentum=0.9, - parameters=model.parameters(), - weight_decay=4e-5) + if args.seed is not None: + paddle.seed(args.seed) + np.random.seed(args.seed) + random.seed(args.seed) + + env_info = get_sys_env() + info = ['{}: {}'.format(k, v) for k, v in env_info.items()] + info = '\n'.join(['', format('Environment Information', '-^48s')] + info + + ['-' * 48]) + logger.info(info) + + place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ + 'GPUs used'] else 'cpu' + + paddle.set_device(place) + if not args.cfg: + raise RuntimeError('No configuration file specified.') + + cfg = Config( + args.cfg, + learning_rate=args.learning_rate, + iters=args.iters, + batch_size=args.batch_size) + + train_dataset = cfg.train_dataset + if train_dataset is None: + raise RuntimeError( + 'The training dataset is not specified in the configuration file.') + elif len(train_dataset) == 0: + raise ValueError( + 'The length of train_dataset is 0. Please check if your dataset is valid' + ) + + val_dataset = cfg.val_dataset if args.do_eval else None + + msg = '\n---------------Config Information---------------\n' + msg += str(cfg) + msg += '------------------------------------------------' + logger.info(msg) # 调用train函数进行训练 train( - model=model, + model=cfg.model, train_dataset=train_dataset, val_dataset=val_dataset, - optimizer=optimizer, - losses=losses, - iters=args.iters, - batch_size=args.batch_size, + optimizer=cfg.optimizer, + iters=cfg.iters, + batch_size=cfg.batch_size, num_workers=args.num_workers, use_vdl=args.use_vdl, save_interval=args.save_interval, @@ -220,5 +172,4 @@ def main(args): if __name__ == '__main__': args = parse_args() - print(args) main(args) diff --git a/contrib/matting/transforms.py b/contrib/matting/transforms.py index 443c5e2687..2852356def 100644 --- a/contrib/matting/transforms.py +++ b/contrib/matting/transforms.py @@ -17,9 +17,11 @@ import cv2 import numpy as np from paddleseg.transforms import functional +from paddleseg.cvlibs import manager from PIL import Image +@manager.TRANSFORMS.add_component class Compose: """ Do transformation on input data with corresponding pre-processing and augmentation operations. @@ -54,6 +56,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class LoadImages: def __init__(self, to_rgb=True): self.to_rgb = to_rgb @@ -79,6 +82,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class Resize: def __init__(self, target_size=(512, 512)): if isinstance(target_size, list) or isinstance(target_size, tuple): @@ -101,6 +105,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class ResizeByLong: """ Resize the long side of an image to given size, and then scale the other side proportionally. @@ -120,6 +125,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class ResizeByShort: """ Resize the short side of an image to given size, and then scale the other side proportionally. @@ -139,6 +145,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class ResizeToIntMult: """ Resize to some int muitple, d.g. 32. @@ -160,6 +167,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class Normalize: """ Normalize an image. @@ -196,6 +204,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class RandomCropByAlpha: """ Randomly crop while centered on uncertain area by a certain probability. @@ -247,6 +256,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class RandomCrop: """ Randomly crop @@ -256,6 +266,8 @@ class RandomCrop: """ def __init__(self, crop_size=((320, 320), (480, 480), (640, 640))): + if not isinstance(crop_size[0], (list, tuple)): + crop_size = [crop_size] self.crop_size = crop_size def __call__(self, data): @@ -280,6 +292,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class LimitLong: """ Limit the long edge of image. @@ -334,6 +347,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class RandomHorizontalFlip: """ Flip an image horizontally with a certain probability. @@ -354,6 +368,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class RandomBlur: """ Blurring an image by a Gaussian function with a certain probability. @@ -387,6 +402,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class RandomDistort: """ Distort an image with random configurations. diff --git a/contrib/matting/val.py b/contrib/matting/val.py index e512126592..25a7a16fe1 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -14,18 +14,20 @@ import argparse +import paddle +import paddleseg +from paddleseg.cvlibs import manager, Config +from paddleseg.utils import get_sys_env, logger, config_check, utils + from core import evaluate from model import * from dataset import HumanMattingDataset -import transforms as T def parse_args(): parser = argparse.ArgumentParser(description='Model training') - # params of training - # parser.add_argument( - # "--config", dest="cfg", help="The config file.", default=None, type=str) - + parser.add_argument( + "--config", dest="cfg", help="The config file.", default=None, type=str) parser.add_argument( '--model_path', dest='model_path', @@ -44,24 +46,6 @@ def parse_args(): help='Num workers for data loader', type=int, default=0) - parser.add_argument( - '--backbone', - dest='backbone', - help='The backbone of model. It is one of (MobileNetV2)', - required=True, - type=str) - parser.add_argument( - '--dataset_root', - dest='dataset_root', - help='the dataset root directory', - type=str) - parser.add_argument( - '--val_file', - dest='val_file', - nargs='+', - help='Image list for evaluation', - type=str, - default='val.txt') parser.add_argument( '--save_results', dest='save_results', @@ -72,30 +56,39 @@ def parse_args(): def main(args): - paddle.set_device('gpu') - t = [ - T.LoadImages(), - T.ResizeByShort(512), - T.ResizeToIntMult(mult_int=32), - T.Normalize() - ] + env_info = get_sys_env() + place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ + 'GPUs used'] else 'cpu' + + paddle.set_device(place) + if not args.cfg: + raise RuntimeError('No configuration file specified.') - eval_dataset = HumanMattingDataset( - dataset_root=args.dataset_root, - transforms=t, - mode='val', - val_file=args.val_file, - get_trimap=False) + cfg = Config(args.cfg) + val_dataset = cfg.val_dataset + if val_dataset is None: + raise RuntimeError( + 'The verification dataset is not specified in the configuration file.' + ) + elif len(val_dataset) == 0: + raise ValueError( + 'The length of val_dataset is 0. Please check if your dataset is valid' + ) - # model - backbone = eval(args.backbone)(input_channels=3) + msg = '\n---------------Config Information---------------\n' + msg += str(cfg) + msg += '------------------------------------------------' + logger.info(msg) - model = MODNet(backbone=backbone, pretrained=args.model_path) + model = cfg.model + if args.model_path: + utils.load_entire_model(model, args.model_path) + logger.info('Loaded trained params of model successfully') # 调用evaluate函数进行训练 evaluate( - model=model, - eval_dataset=eval_dataset, + model, + val_dataset, num_workers=args.num_workers, save_dir=args.save_dir, save_results=args.save_results) diff --git a/paddleseg/cvlibs/config.py b/paddleseg/cvlibs/config.py index cdcbb049bd..8e99cb9a26 100644 --- a/paddleseg/cvlibs/config.py +++ b/paddleseg/cvlibs/config.py @@ -307,11 +307,11 @@ def model(self) -> paddle.nn.Layer: num_classes = self.val_dataset.num_classes if not num_classes: - raise ValueError( - '`num_classes` is not found. Please set it in model, train_dataset or val_dataset' + logger.warning( + '`num_classes` is not found. Please confirm whether you need it !!!' ) - - model_cfg['num_classes'] = num_classes + else: + model_cfg['num_classes'] = num_classes if not self._model: self._model = self._load_object(model_cfg) diff --git a/paddleseg/cvlibs/manager.py b/paddleseg/cvlibs/manager.py index cd1d105a5e..5c502ea82d 100644 --- a/paddleseg/cvlibs/manager.py +++ b/paddleseg/cvlibs/manager.py @@ -15,6 +15,8 @@ import inspect from collections.abc import Sequence +from paddleseg.utils import logger + class ComponentManager: """ @@ -109,7 +111,11 @@ def _add_single_component(self, component): # Check whether the component was added already if component_name in self._components_dict.keys(): - raise KeyError("{} exists already!".format(component_name)) + logger.warning( + "{} exists already!. it is now update to {} !!!".format( + component_name, component)) + self._components_dict[component_name] = component + else: # Take the internal name of the component as its key self._components_dict[component_name] = component diff --git a/paddleseg/models/backbones/mobilenetv2.py b/paddleseg/models/backbones/mobilenetv2.py index b5da14b5ba..0ae4e960d7 100644 --- a/paddleseg/models/backbones/mobilenetv2.py +++ b/paddleseg/models/backbones/mobilenetv2.py @@ -18,7 +18,7 @@ from paddleseg import utils -@manager.MODELS.add_component +@manager.BACKBONES.add_component class MobileNetV2(nn.Layer): """ The MobileNetV2 implementation based on PaddlePaddle. From e397f9960c1f582e2e18ae702a893050b189757f Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sun, 26 Sep 2021 20:42:13 +0800 Subject: [PATCH 157/210] update config.py --- paddleseg/cvlibs/config.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/paddleseg/cvlibs/config.py b/paddleseg/cvlibs/config.py index 8e99cb9a26..0764585d54 100644 --- a/paddleseg/cvlibs/config.py +++ b/paddleseg/cvlibs/config.py @@ -168,6 +168,11 @@ def learning_rate(self) -> paddle.optimizer.lr.LRScheduler: lr_scheduler: type: PolynomialDecay learning_rate: 0.01''') + + lr = self.dic.get('learning_rate', {}) + if isinstance(lr, float): + return lr + _learning_rate = self.dic.get('learning_rate', {}).get('value') if not _learning_rate: raise RuntimeError( From f49cd3d250541601916a5f6632fbcef6ab0caaf5 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sun, 26 Sep 2021 20:48:26 +0800 Subject: [PATCH 158/210] rm tools some --- contrib/matting/tools/gen_dataset/gen_bg.py | 107 --------- .../matting/tools/gen_dataset/gen_dataset.py | 226 ------------------ .../matting/tools/gen_dataset/gen_fg_alpha.py | 58 ----- contrib/matting/tools/update_vgg16_params.py | 2 +- 4 files changed, 1 insertion(+), 392 deletions(-) delete mode 100644 contrib/matting/tools/gen_dataset/gen_bg.py delete mode 100644 contrib/matting/tools/gen_dataset/gen_dataset.py delete mode 100644 contrib/matting/tools/gen_dataset/gen_fg_alpha.py diff --git a/contrib/matting/tools/gen_dataset/gen_bg.py b/contrib/matting/tools/gen_dataset/gen_bg.py deleted file mode 100644 index 14ba895ef6..0000000000 --- a/contrib/matting/tools/gen_dataset/gen_bg.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import shutil -from multiprocessing import Pool -from functools import partial - -import cv2 -import numpy as np -from tqdm import tqdm -""" -Get background from MSCOCO_17 and PascalVOC12 and exclude The images with person. -""" - - -def get_bg_from_pascal_voc( - data_path='/mnt/chenguowei01/datasets/VOCdevkit/VOC2012', - save_path='bg/pascal_val12'): - """ - extract background - """ - person_train_txt = os.path.join(data_path, - "ImageSets/Main/person_train.txt") - train_save_path = os.path.join(save_path, 'train') - person_val_txt = os.path.join(data_path, "ImageSets/Main/person_val.txt") - val_save_path = os.path.join(save_path, 'val') - if not os.path.exists(train_save_path): - os.makedirs(train_save_path) - if not os.path.exists(val_save_path): - os.makedirs(val_save_path) - - # training dataset - f = open(person_train_txt, 'r') - train_images = f.read().splitlines() - f.close() - print('there are {} images in training dataset.'.format(len(train_images))) - num = 0 - for line in train_images: - image_name, id = line.split() - if id == '-1': - num += 1 - ori_img = os.path.join(data_path, 'JPEGImages', image_name + '.jpg') - shutil.copy(ori_img, train_save_path) - print('there are {} images without person in the training dataset'.format( - num)) - - # val dataset - f = open(person_val_txt, 'r') - val_images = f.read().splitlines() - f.close() - print('there are {} images in val dataset.'.format(len(val_images))) - num = 0 - for line in val_images: - image_name, id = line.split() - if id == '-1': - num += 1 - ori_img = os.path.join(data_path, 'JPEGImages', image_name + '.jpg') - shutil.copy(ori_img, val_save_path) - print('there are {} images without person in the val dataset'.format(num)) - - -def cp(line, data_path, save_path): - image_name, anno_name = line.split('|') - anno = cv2.imread(os.path.join(data_path, anno_name), cv2.IMREAD_UNCHANGED) - classes = np.unique(anno) - if 0 not in classes: - shutil.copy(os.path.join(data_path, image_name), save_path) - - -def get_bg_from_coco_17(data_path='/mnt/chenguowei01/datasets/coco_17', - save_path='bg/coco_17'): - train_txt = os.path.join(data_path, 'train2017.txt') - train_save_path = os.path.join(save_path, 'train') - val_txt = os.path.join(data_path, 'val2017.txt') - val_save_path = os.path.join(save_path, 'val') - if not os.path.exists(train_save_path): - os.makedirs(train_save_path) - if not os.path.exists(val_save_path): - os.makedirs(val_save_path) - - # training dataset - partial_train_cp = partial( - cp, data_path=data_path, save_path=train_save_path) - with open(train_txt, 'r') as f: - train_list = f.read().splitlines() - max_ = len(train_list) - with Pool(40) as pool: - with tqdm(total=max_) as pbar: - for i, _ in tqdm( - enumerate( - pool.imap_unordered(partial_train_cp, train_list))): - pbar.update() - - # val dataset - partial_val_cp = partial(cp, data_path=data_path, save_path=val_save_path) - with open(val_txt, 'r') as f: - val_list = f.read().splitlines() - max_ = len(val_list) - with Pool(40) as pool: - with tqdm(total=max_) as pbar: - for i, _ in tqdm( - enumerate(pool.imap_unordered(partial_val_cp, val_list))): - pbar.update() - - -if __name__ == "__main__": - # get_bg_from_pascal_voc(save_path="/mnt/chenguowei01/datasets/matting/gather/bg/pascal_voc12") - get_bg_from_coco_17( - save_path="/mnt/chenguowei01/datasets/matting/gather/bg/coco_17") diff --git a/contrib/matting/tools/gen_dataset/gen_dataset.py b/contrib/matting/tools/gen_dataset/gen_dataset.py deleted file mode 100644 index 4871dbea14..0000000000 --- a/contrib/matting/tools/gen_dataset/gen_dataset.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -to generator the human matting dataset. The directory is as follow: - human_matting - train - image - 000001.png - ... - fg - 000001.png - ... - bg - 000001.png - ... - alpha - 000001.png - .... - trimap - 000001.png - val - image - 000001.png - ... - fg - 000001.png - ... - bg - 000001.png - ... - alpha - 000001.png - .... - trimap - 000001.png - ... -For video, get one every 5 frames, and composite it with one background. -For image, one image is composited with 5 background. -""" - -import os -import math -import time - -import cv2 -import numpy as np -from multiprocessing import Pool -from tqdm import tqdm - - -def get_files(root_path): - res = [] - for root, dirs, files in os.walk(root_path, followlinks=True): - for f in files: - if f.endswith(('.jpg', '.png', '.jpeg', 'JPG', '.mp4')): - res.append(os.path.join(root, f)) - return res - - -ori_dataset_root = "/mnt/chenguowei01/datasets/matting/gather" -ori_fg_path = os.path.join(ori_dataset_root, 'fg') -ori_alpha_path = os.path.join(ori_dataset_root, 'alpha') -ori_bg_path = os.path.join(ori_dataset_root, 'bg') - -fg_list = get_files(ori_fg_path) -alpha_list = [f.replace('fg', 'alpha') for f in fg_list] -bg_list = get_files(ori_bg_path) -len_bg_list = len(bg_list) - -dataset_root = '/ssd3/chenguowei01/datasets/matting/human_matting' - - -def im_write(save_path, img): - dir_name = os.path.dirname(save_path) - if not os.path.exists(dir_name): - os.makedirs(dir_name) - cv2.imwrite(save_path, img) - - -def composite(fg, alpha, ori_bg): - fg_h, fg_w = fg.shape[:2] - ori_bg_h, ori_bg_w = ori_bg.shape[:2] - - wratio = fg_w / ori_bg_w - hratio = fg_h / ori_bg_h - ratio = wratio if wratio > hratio else hratio - - # Resize ori_bg if it is smaller than fg. - if ratio > 1: - resize_h = math.ceil(ori_bg_h * ratio) - resize_w = math.ceil(ori_bg_w * ratio) - bg = cv2.resize( - ori_bg, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR) - else: - bg = ori_bg - - bg = bg[0:fg_h, 0:fg_w, :] - alpha = alpha / 255 - alpha = np.expand_dims(alpha, axis=2) - image = alpha * fg + (1 - alpha) * bg - image = image.astype(np.uint8) - return image, bg - - -def video_comp(fg_file, alpha_file, bg_index_list, interval=5, mode='train'): - fg_video_capture = cv2.VideoCapture(fg_file) - alpha_video_capture = cv2.VideoCapture(alpha_file) - frames = fg_video_capture.get(cv2.CAP_PROP_FRAME_COUNT) - print("there are {} frames in video {}".format(frames, fg_file)) - - f_index = 0 - while True: - if f_index >= frames: - break - fg_video_capture.set(cv2.CAP_PROP_POS_FRAMES, f_index) - fg_ret, fg_frame = fg_video_capture.retrieve() # get foreground - alpha_video_capture.set(cv2.CAP_PROP_POS_FRAMES, f_index) - alpha_ret, alpha_frame = alpha_video_capture.retrieve() # get alpha - ret = fg_ret and alpha_ret - if not ret: - break - if len(alpha_frame.shape) == 3: - alpha_frame = alpha_frame[:, :, 0] - - file_name = os.path.basename(fg_file) - file_name = os.path.splitext(file_name)[0] - fg_save_name = os.path.join(dataset_root, mode, 'fg', file_name, - '{:0>5d}'.format(f_index) + '.png') - alpha_save_name = fg_save_name.replace('fg', 'alpha') - bg_save_name = fg_save_name.replace('fg', 'bg') - image_save_name = fg_save_name.replace('fg', 'image') - - ori_bg = cv2.imread( - bg_list[bg_index_list[f_index % len_bg_list]]) # get background - image, bg = composite( - fg_frame, alpha_frame, - ori_bg) # get composition image and the response background - - # save fg, alpha, bg, image - im_write(fg_save_name, fg_frame) - im_write(alpha_save_name, alpha_frame) - im_write(image_save_name, image) - im_write(bg_save_name, bg) - - f_index += interval - - -def image_comp(fg_file, alpha_file, bg_index_list, num_bgs=5, mode='train'): - fg = cv2.imread(fg_file) - alpha = cv2.imread(alpha_file, cv2.IMREAD_UNCHANGED) - print('Composition for ', fg_file) - - for i in range(num_bgs): - bg_index = bg_index_list[i] - ori_bg = cv2.imread(bg_list[bg_index]) # get background - image, bg = composite(fg, alpha, ori_bg) - - file_name = os.path.basename(fg_file) - file_name = os.path.splitext(file_name)[0] - file_name = '_'.join([file_name, '{:0>3d}'.format(i)]) - fg_save_name = os.path.join(dataset_root, mode, 'fg', - file_name + '.png') - alpha_save_name = fg_save_name.replace('fg', 'alpha') - bg_save_name = fg_save_name.replace('fg', 'bg') - image_save_name = fg_save_name.replace('fg', 'image') - - im_write(fg_save_name, fg) - im_write(alpha_save_name, alpha) - im_write(image_save_name, image) - im_write(bg_save_name, bg) - - -def comp_one(fa_index): - """ - Composite foreground and background. - - Args: - fa_index: The index of foreground and alpha. - bg_index: The index of background, if foreground is video, get one every 5 frames, and composite it with one background, - if foreground is image, one image is composited with 5 background. - """ - fg_file = fg_list[fa_index] - alpha_file = alpha_list[fa_index] - mode = 'train' if 'train' in fg_file else 'val' - - # Randomly bg index - np.random.seed(int(os.getpid() * time.time()) % - (2**30)) # make different for each process - - len_bg = len(bg_list) - bg_index_list = list(range(len_bg)) - np.random.shuffle(bg_index_list) - - if os.path.splitext(fg_file)[-1] in ['.mp4']: - video_comp( - fg_file=fg_file, - alpha_file=alpha_file, - bg_index_list=bg_index_list, - mode=mode) - # else: - # image_comp(fg_file=fg_file, alpha_file=alpha_file, bg_index_list=bg_index_list, mode=mode) - - -def comp_pool(): - len_fa = len(fg_list) - - with Pool(20) as pool: - with tqdm(total=len_fa) as pbar: - for i, _ in tqdm( - enumerate(pool.imap_unordered(comp_one, range(len_fa)))): - pbar.update() - - -if __name__ == '__main__': - comp_pool() diff --git a/contrib/matting/tools/gen_dataset/gen_fg_alpha.py b/contrib/matting/tools/gen_dataset/gen_fg_alpha.py deleted file mode 100644 index 6589bf2375..0000000000 --- a/contrib/matting/tools/gen_dataset/gen_fg_alpha.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import random - -import cv2 - - -def get_from_pm85(data_path="/mnt/chenguowei01/datasets/matting/PhotoMatte85", - save_path="/mnt/chenguowei01/datasets/matting/gather"): - """ - Get matte from PhotoMatte85 - """ - - files = os.listdir(data_path) - files = [os.path.join(data_path, f) for f in files] - random.seed(1) - random.shuffle(files) - train_files = files[:-10] - val_files = files[-10:] - - # training dataset - fg_save_path = os.path.join(save_path, 'fg', 'PhotoMatte85', 'train') - alpha_save_path = fg_save_path.replace('fg', 'alpha') - if not os.path.exists(fg_save_path): - os.makedirs(fg_save_path) - if not os.path.exists(alpha_save_path): - os.makedirs(alpha_save_path) - for f in train_files: - png_img = cv2.imread(f, cv2.IMREAD_UNCHANGED) - fg = png_img[:, :, :3] - alpha = png_img[:, :, -1] - if alpha[0, 0] != 0: - alpha[:100, :] = 0 - fg[:100, :, :] = 0 - basename = os.path.basename(f) - cv2.imwrite(os.path.join(fg_save_path, basename), fg) - cv2.imwrite(os.path.join(alpha_save_path, basename), alpha) - - # val dataset - fg_save_path = os.path.join(save_path, 'fg', 'PhotoMatte85', 'val') - alpha_save_path = fg_save_path.replace('fg', 'alpha') - if not os.path.exists(fg_save_path): - os.makedirs(fg_save_path) - if not os.path.exists(alpha_save_path): - os.makedirs(alpha_save_path) - for f in val_files: - png_img = cv2.imread(f, cv2.IMREAD_UNCHANGED) - fg = png_img[:, :, :3] - alpha = png_img[:, :, -1] - if alpha[0, 0] != 0: - alpha[:100, :] = 0 - fg[:100, :, :] = 0 - basename = os.path.basename(f) - cv2.imwrite(os.path.join(fg_save_path, basename), fg) - cv2.imwrite(os.path.join(alpha_save_path, basename), alpha) - - -if __name__ == "__main__": - get_from_pm85() diff --git a/contrib/matting/tools/update_vgg16_params.py b/contrib/matting/tools/update_vgg16_params.py index 15be91b9e9..211a8bb638 100644 --- a/contrib/matting/tools/update_vgg16_params.py +++ b/contrib/matting/tools/update_vgg16_params.py @@ -53,5 +53,5 @@ def update_vgg16_params(model_path): if __name__ == "__main__": paddle.set_device('cpu') - model_path = '/mnt/chenguowei01/.paddleseg/pretrained_model/dygraph/VGG16_pretrained.pdparams' + model_path = '~/.paddleseg/pretrained_model/dygraph/VGG16_pretrained.pdparams' update_vgg16_params(model_path) From 75d69f91f5f80485f56d2a64fcfab3819a317391 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sun, 26 Sep 2021 20:55:14 +0800 Subject: [PATCH 159/210] update dataset name --- contrib/matting/dataset/__init__.py | 3 +- contrib/matting/dataset/dataset.py | 128 ------------------ ..._matting_dataset.py => matting_dataset.py} | 33 +---- contrib/matting/train.py | 3 +- contrib/matting/val.py | 2 +- 5 files changed, 4 insertions(+), 165 deletions(-) delete mode 100644 contrib/matting/dataset/dataset.py rename contrib/matting/dataset/{human_matting_dataset.py => matting_dataset.py} (87%) diff --git a/contrib/matting/dataset/__init__.py b/contrib/matting/dataset/__init__.py index 6091bf7680..b827b0cc2c 100644 --- a/contrib/matting/dataset/__init__.py +++ b/contrib/matting/dataset/__init__.py @@ -12,5 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .dataset import Dataset -from .human_matting_dataset import HumanMattingDataset +from .matting_dataset import MattingDataset diff --git a/contrib/matting/dataset/dataset.py b/contrib/matting/dataset/dataset.py deleted file mode 100644 index 0025281d74..0000000000 --- a/contrib/matting/dataset/dataset.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import cv2 -import numpy as np -import random -import paddle - -from utils import get_files -import transforms as T - - -class Dataset(paddle.io.Dataset): - """ - The dataset folder should be as follow: - root - |__train - | |__image - | |__fg - | |__bg - | |__alpha - | - |__val - | |__image - | |__fg - | |__bg - | |__alpha - | |__[trimap] - - """ - - def __init__( - self, - dataset_root, - transforms, - mode='train', - ): - super().__init__() - self.dataset_root = dataset_root - self.transforms = T.Compose(transforms) - self.mode = mode - - img_dir = os.path.join(dataset_root, mode, 'image') - self.img_list = get_files(img_dir) # a list - self.alpha_list = [f.replace('image', 'alpha') for f in self.img_list] - self.fg_list = [f.replace('image', 'fg') for f in self.img_list] - self.bg_list = [f.replace('image', 'bg') for f in self.img_list] - - def __getitem__(self, idx): - data = {} - data['img'] = self.img_list[idx] - data['alpha'] = self.alpha_list[idx] - data['fg'] = self.fg_list[idx] - data['bg'] = self.bg_list[idx] - data['gt_field'] = [] - - if self.mode == 'train': - data['gt_fields'] = ['alpha', 'fg', 'bg'] - else: - data['gt_fields'] = ['alpha'] - data['img_name'] = self.img_list[idx].lstrip( - self.dataset_root) # using in save prediction results - # If has trimap, use it - trimap_path = data['alpha'].replace('alpha', 'trimap') - if os.path.exists(trimap_path): - data['trimap'] = trimap_path - data['gt_fields'].append('trimap') - - data['trans_info'] = [] # Record shape change information - data = self.transforms(data) - data['img'] = data['img'].astype('float32') - for key in data.get('gt_fields', []): - data[key] = data[key].astype('float32') - if 'trimap' not in data: - data['trimap'] = self.gen_trimap( - data['alpha'], mode=self.mode).astype('float32') - - data['alpha'] = data['alpha'] / 255. - - return data - - def __len__(self): - return len(self.img_list) - - @staticmethod - def gen_trimap(alpha, mode='train', eval_kernel=7): - if mode == 'train': - k_size = random.choice(range(2, 5)) - iterations = np.random.randint(5, 15) - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, - (k_size, k_size)) - dilated = cv2.dilate(alpha, kernel, iterations=iterations) - eroded = cv2.erode(alpha, kernel, iterations=iterations) - trimap = np.zeros(alpha.shape) - trimap.fill(128) - trimap[eroded > 254.5] = 255 - trimap[dilated < 0.5] = 0 - else: - k_size = eval_kernel - kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, - (k_size, k_size)) - dilated = cv2.dilate(alpha, kernel) - trimap = np.zeros(alpha.shape) - trimap.fill(128) - trimap[alpha >= 250] = 255 - trimap[dilated <= 5] = 0 - - return trimap - - -if __name__ == '__main__': - t = [T.LoadImages(), T.Resize(), T.Normalize()] - train_dataset = Dataset( - dataset_root='data/matting/human_matte/', transforms=t, mode='train') - print(len(train_dataset)) diff --git a/contrib/matting/dataset/human_matting_dataset.py b/contrib/matting/dataset/matting_dataset.py similarity index 87% rename from contrib/matting/dataset/human_matting_dataset.py rename to contrib/matting/dataset/matting_dataset.py index 2dc8adfe85..38e50a8f50 100644 --- a/contrib/matting/dataset/human_matting_dataset.py +++ b/contrib/matting/dataset/matting_dataset.py @@ -25,7 +25,7 @@ @manager.DATASETS.add_component -class HumanMattingDataset(paddle.io.Dataset): +class MattingDataset(paddle.io.Dataset): """ human_matting |__Composition-1k(origin dataset name) @@ -213,34 +213,3 @@ def gen_trimap(alpha, mode='train', eval_kernel=7): trimap[dilated <= 5] = 0 return trimap - - -if __name__ == '__main__': - t = [T.LoadImages(to_rgb=False), T.Resize(), T.Normalize()] - train_dataset = HumanMattingDataset( - dataset_root='../data/matting/human_matte/', - transforms=t, - mode='val', - train_file=['Composition-1k_train.txt', 'Distinctions-646_train.txt'], - val_file=['Composition-1k_val.txt', 'Distinctions-646_val.txt']) - data = train_dataset[81] - print(data.keys()) - print(data['gt_fields']) - - data['img'] = np.transpose(data['img'], (1, 2, 0)) - for key in data.get('gt_fields', []): - if len(data[key].shape) == 2: - continue - data[key] = np.transpose(data[key], (1, 2, 0)) - - data['img'] = ((data['img'] * 0.5 + 0.5) * 255).astype('uint8') - for key in data['gt_fields']: - if key == 'alpha': - continue - data[key] = ((data[key] * 0.5 + 0.5) * 255).astype('uint8') - - cv2.imwrite('img.png', data['img']) - for key in data['gt_fields']: - cv2.imwrite(key + '.png', data[key]) - - cv2.imwrite('trimap.png', data['trimap'].astype('uint8')) diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 35a1d32ab7..edfb9544a0 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -23,8 +23,7 @@ from core import train from model import * -from dataset import HumanMattingDataset -import transforms as T +from dataset import MattingDataset def parse_args(): diff --git a/contrib/matting/val.py b/contrib/matting/val.py index 25a7a16fe1..d7bd9d3b46 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -21,7 +21,7 @@ from core import evaluate from model import * -from dataset import HumanMattingDataset +from dataset import MattingDataset def parse_args(): From bea3e1ddb5924b652f3fc38cc2272dff7813fabb Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sun, 26 Sep 2021 21:01:50 +0800 Subject: [PATCH 160/210] update models --- contrib/matting/model/dim.py | 15 --------------- contrib/matting/model/resnet_vd.py | 5 ----- 2 files changed, 20 deletions(-) diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py index 0f536545cb..e963cd16de 100644 --- a/contrib/matting/model/dim.py +++ b/contrib/matting/model/dim.py @@ -51,8 +51,6 @@ def __init__(self, self.stage = stage decoder_output_channels = [64, 128, 256, 512] - if backbone.__class__.__name__ == 'ResNet_vd': - decoder_output_channels = [64, 256, 512, 1024] self.decoder = Decoder( input_channels=decoder_input_channels, output_channels=decoder_output_channels) @@ -203,16 +201,3 @@ def forward(self, x): alpha = self.alpha_pred(x) return alpha - - -if __name__ == "__main__": - from vgg import VGG16 - backbone = VGG16(input_channels=4) - model = DIM(backbone=backbone) - - model_input = paddle.randint(0, 256, (1, 4, 320, 320)).astype('float32') - alpha_pred, alpha_raw = model(model_input) - - print(model) - - print(alpha_pred.shape, alpha_raw.shape) diff --git a/contrib/matting/model/resnet_vd.py b/contrib/matting/model/resnet_vd.py index 793de43894..c5124492f1 100644 --- a/contrib/matting/model/resnet_vd.py +++ b/contrib/matting/model/resnet_vd.py @@ -24,11 +24,6 @@ "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd" ] -# delete the compoment in manager.BACKBONED if existing. -for i in __all__: - if i in manager.BACKBONES._components_dict: - manager.BACKBONES._components_dict.pop(i) - class ConvBNLayer(nn.Layer): def __init__( From b1b0056608efb08c5ae9b46af1062e20fd78acaf Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 27 Sep 2021 11:21:28 +0800 Subject: [PATCH 161/210] update modnet.py --- contrib/matting/model/modnet.py | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/contrib/matting/model/modnet.py b/contrib/matting/model/modnet.py index 555d214885..9345967bd0 100644 --- a/contrib/matting/model/modnet.py +++ b/contrib/matting/model/modnet.py @@ -464,22 +464,3 @@ def _init_kernel(self): kernel = kernel.astype('float32') kernel = kernel[np.newaxis, np.newaxis, :, :] paddle.assign(kernel, self.op[1].weight) - - -if __name__ == '__main__': - paddle.set_device('cpu') - from mobilenet_v2 import MobileNetV2 - from paddleseg import utils - - backbone = MobileNetV2( - pretrained="../pretrained_models/MobileNetV2_pretrained.pdparams") - model = MODNet(backbone=backbone) - model.eval() - - x = paddle.randint(0, 256, (1, 3, 512, 512)).astype('float32') - - inputs = {} - inputs['img'] = x / 255. - - logit = model(inputs) - print(logit) From 31a07ab701acef3bdf8da1dd39990c96471c52f2 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 27 Sep 2021 17:13:04 +0800 Subject: [PATCH 162/210] realize prediction --- contrib/matting/core/predict.py | 27 +++++++++--- contrib/matting/model/modnet.py | 1 + contrib/matting/predict.py | 75 ++++++++++++++++++++------------- contrib/matting/val.py | 3 +- paddleseg/cvlibs/config.py | 12 ++---- paddleseg/cvlibs/manager.py | 6 +-- 6 files changed, 75 insertions(+), 49 deletions(-) diff --git a/contrib/matting/core/predict.py b/contrib/matting/core/predict.py index 242b0c2384..ff9d5ca5c6 100644 --- a/contrib/matting/core/predict.py +++ b/contrib/matting/core/predict.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ def partition_list(arr, m): return [arr[i:i + n] for i in range(0, len(arr), n)] -def save_alpha_pred(alpha, path): +def save_alpha_pred(alpha, path, trimap=None): """ The value of alpha is range [0, 1], shape should be [h,w] """ @@ -46,6 +46,9 @@ def save_alpha_pred(alpha, path): if not os.path.exists(dirname): os.makedirs(dirname) + trimap = cv2.imread(trimap, 0) + alpha[trimap == 0] = 0 + alpha[trimap == 255] = 255 alpha = (alpha).astype('uint8') cv2.imwrite(path, alpha) @@ -67,10 +70,16 @@ def reverse_transform(alpha, trans_info): def preprocess(img, transforms, trimap=None): data = {} data['img'] = img + if trimap is not None: + data['trimap'] = trimap + data['gt_fields'] = ['trimap'] data['trans_info'] = [] data = transforms(data) data['img'] = paddle.to_tensor(data['img']) data['img'] = data['img'].unsqueeze(0) + if trimap is not None: + data['trimap'] = paddle.to_tensor(data['trimap']) + data['trimap'] = data['trimap'].unsqueeze((0, 1)) return data @@ -80,6 +89,7 @@ def predict(model, transforms, image_list, image_dir=None, + trimap_list=None, save_dir='output'): """ predict and visualize the image_list. @@ -89,6 +99,7 @@ def predict(model, transforms (transform.Compose): Preprocess for input image. image_list (list): A list of image path to be predicted. image_dir (str, optional): The root directory of the images predicted. Default: None. + trimap_list (list): A list of trimap of image_list. Default: None. save_dir (str, optional): The directory to save the visualized results. Default: 'output'. """ utils.utils.load_entire_model(model, model_path) @@ -97,11 +108,11 @@ def predict(model, local_rank = paddle.distributed.get_rank() if nranks > 1: img_lists = partition_list(image_list, nranks) + trimap_lists = partition_list( + trimap_list, nranks) if trimap_list is not None else None else: img_lists = [image_list] - - added_saved_dir = os.path.join(save_dir, 'added_prediction') - pred_saved_dir = os.path.join(save_dir, 'pseudo_color_prediction') + trimap_lists = [trimap_list] if trimap_list is not None else None logger.info("Start to predict...") progbar_pred = progbar.Progbar(target=len(img_lists[0]), verbose=1) @@ -112,7 +123,9 @@ def predict(model, with paddle.no_grad(): for i, im_path in enumerate(img_lists[local_rank]): preprocess_start = time.time() - data = preprocess(img=im_path, transforms=transforms) + trimap = trimap_lists[local_rank][ + i] if trimap_list is not None else None + data = preprocess(img=im_path, transforms=transforms, trimap=trimap) preprocess_cost_averager.record(time.time() - preprocess_start) infer_start = time.time() @@ -134,7 +147,7 @@ def predict(model, save_path = os.path.join(save_dir, im_file) mkdir(save_path) - save_alpha_pred(alpha_pred, save_path) + save_alpha_pred(alpha_pred, save_path, trimap=trimap) postprocess_cost_averager.record(time.time() - postprocess_start) diff --git a/contrib/matting/model/modnet.py b/contrib/matting/model/modnet.py index 9345967bd0..d0320d5a42 100644 --- a/contrib/matting/model/modnet.py +++ b/contrib/matting/model/modnet.py @@ -10,6 +10,7 @@ # limitations under the License. from collections import defaultdict + import paddle import paddle.nn as nn import paddle.nn.functional as F diff --git a/contrib/matting/predict.py b/contrib/matting/predict.py index a860d7aa07..c6e5f160d8 100644 --- a/contrib/matting/predict.py +++ b/contrib/matting/predict.py @@ -15,19 +15,18 @@ import argparse import os -from paddleseg.utils import logger +from paddleseg.cvlibs import manager, Config +from paddleseg.utils import get_sys_env, logger from core import predict from model import * -from dataset import HumanMattingDataset -import transforms as T +from dataset import MattingDataset def parse_args(): parser = argparse.ArgumentParser(description='Model training') - # params of training - # parser.add_argument( - # "--config", dest="cfg", help="The config file.", default=None, type=str) + parser.add_argument( + "--config", dest="cfg", help="The config file.", default=None, type=str) parser.add_argument( '--model_path', @@ -42,6 +41,14 @@ def parse_args(): 'The path of image, it can be a file or a directory including images', type=str, default=None) + parser.add_argument( + '--trimap_path', + dest='trimap_path', + help= + 'The path of trimap, it can be a file or a directory including images. ' + 'The image should be the same as image when it is a directory.', + type=str, + default=None) parser.add_argument( '--save_dir', dest='save_dir', @@ -49,13 +56,6 @@ def parse_args(): type=str, default='./output/results') - parser.add_argument( - '--backbone', - dest='backbone', - help='The backbone of model. It is one of (MobileNetV2)', - required=True, - type=str) - return parser.parse_args() @@ -76,7 +76,7 @@ def get_image_list(image_path): line = line.strip() if len(line.split()) > 1: raise RuntimeError( - 'There should be only one image path per line in `--image_path` file. Wrong line: {}' + 'There should be only one image path per line in `image_path` file. Wrong line: {}' .format(line)) image_list.append(os.path.join(image_dir, line)) elif os.path.isdir(image_path): @@ -87,35 +87,51 @@ def get_image_list(image_path): continue if os.path.splitext(f)[-1] in valid_suffix: image_list.append(os.path.join(root, f)) + image_list.sort() else: raise FileNotFoundError( - '`--image_path` is not found. it should be an image file or a directory including images' + '`image_path` is not found. it should be an image file or a directory including images' ) if len(image_list) == 0: - raise RuntimeError('There are not image file in `--image_path`') + raise RuntimeError('There are not image file in `image_path`') return image_list, image_dir def main(args): - paddle.set_device('gpu') - - t = [ - T.LoadImages(), - # T.ResizeByShort(512), - T.Resize((512, 512)), - T.ResizeToIntMult(mult_int=32), - T.Normalize() - ] + env_info = get_sys_env() + place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ + 'GPUs used'] else 'cpu' + + paddle.set_device(place) + if not args.cfg: + raise RuntimeError('No configuration file specified.') + + cfg = Config(args.cfg) + val_dataset = cfg.val_dataset + if val_dataset is None: + raise RuntimeError( + 'The verification dataset is not specified in the configuration file.' + ) + elif len(val_dataset) == 0: + raise ValueError( + 'The length of val_dataset is 0. Please check if your dataset is valid' + ) - transforms = T.Compose(t) + msg = '\n---------------Config Information---------------\n' + msg += str(cfg) + msg += '------------------------------------------------' + logger.info(msg) - # model - backbone = eval(args.backbone)(input_channels=3) - model = MODNet(backbone=backbone, pretrained=args.model_path) + model = cfg.model + transforms = val_dataset.transforms image_list, image_dir = get_image_list(args.image_path) + if args.trimap_path is None: + trimap_list = None + else: + trimap_list, _ = get_image_list(args.trimap_path) logger.info('Number of predict images = {}'.format(len(image_list))) predict( @@ -124,6 +140,7 @@ def main(args): transforms=transforms, image_list=image_list, image_dir=image_dir, + trimap_list=trimap_list, save_dir=args.save_dir) diff --git a/contrib/matting/val.py b/contrib/matting/val.py index d7bd9d3b46..ca2f6b3be3 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -17,7 +17,7 @@ import paddle import paddleseg from paddleseg.cvlibs import manager, Config -from paddleseg.utils import get_sys_env, logger, config_check, utils +from paddleseg.utils import get_sys_env, logger, utils from core import evaluate from model import * @@ -85,7 +85,6 @@ def main(args): utils.load_entire_model(model, args.model_path) logger.info('Loaded trained params of model successfully') - # 调用evaluate函数进行训练 evaluate( model, val_dataset, diff --git a/paddleseg/cvlibs/config.py b/paddleseg/cvlibs/config.py index 0764585d54..6ac22bbe17 100644 --- a/paddleseg/cvlibs/config.py +++ b/paddleseg/cvlibs/config.py @@ -169,9 +169,9 @@ def learning_rate(self) -> paddle.optimizer.lr.LRScheduler: type: PolynomialDecay learning_rate: 0.01''') - lr = self.dic.get('learning_rate', {}) - if isinstance(lr, float): - return lr + _learning_rate = self.dic.get('learning_rate', {}) + if isinstance(_learning_rate, float): + return _learning_rate _learning_rate = self.dic.get('learning_rate', {}).get('value') if not _learning_rate: @@ -311,11 +311,7 @@ def model(self) -> paddle.nn.Layer: elif hasattr(self.val_dataset, 'num_classes'): num_classes = self.val_dataset.num_classes - if not num_classes: - logger.warning( - '`num_classes` is not found. Please confirm whether you need it !!!' - ) - else: + if num_classes is not None: model_cfg['num_classes'] = num_classes if not self._model: diff --git a/paddleseg/cvlibs/manager.py b/paddleseg/cvlibs/manager.py index 5c502ea82d..4faab060ce 100644 --- a/paddleseg/cvlibs/manager.py +++ b/paddleseg/cvlibs/manager.py @@ -15,7 +15,7 @@ import inspect from collections.abc import Sequence -from paddleseg.utils import logger +import warnings class ComponentManager: @@ -111,8 +111,8 @@ def _add_single_component(self, component): # Check whether the component was added already if component_name in self._components_dict.keys(): - logger.warning( - "{} exists already!. it is now update to {} !!!".format( + warnings.warn( + "{} exists already! It is now updated to {} !!!".format( component_name, component)) self._components_dict[component_name] = component From 38706299db70a827e8bfaced78800a92cbb78800 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 27 Sep 2021 22:52:40 +0800 Subject: [PATCH 163/210] update train procession --- contrib/matting/core/train.py | 11 ++++++----- contrib/matting/train.py | 9 +++++---- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index 42d092be26..a00c02151f 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -39,7 +39,7 @@ def train(model, use_vdl=False, losses=None, keep_checkpoint_max=5, - save_begin_iters=None): + eval_begin_iters=None): """ Launch training. Args: @@ -58,6 +58,7 @@ def train(model, losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. + eval_begin_iters (int): The iters begin evaluation. It will evaluate at iters/2 if it is None. Defalust: None. """ model.train() nranks = paddle.distributed.ParallelEnv().nranks @@ -227,11 +228,11 @@ def train(model, shutil.rmtree(model_to_remove) # eval model - if save_begin_iters is None: - save_begin_iters = iters // 2 + if eval_begin_iters is None: + eval_begin_iters = iters // 2 if (iter % save_interval == 0 or iter == iters) and ( val_dataset is - not None) and local_rank == 0 and iter >= save_begin_iters: + not None) and local_rank == 0 and iter >= eval_begin_iters: num_workers = 1 if num_workers > 0 else 0 sad, mse = evaluate( model, @@ -243,7 +244,7 @@ def train(model, # save best model and add evaluation results to vdl if (iter % save_interval == 0 or iter == iters) and local_rank == 0: - if val_dataset is not None and iter >= save_begin_iters: + if val_dataset is not None and iter >= eval_begin_iters: if sad < best_sad: best_sad = sad best_model_iter = iter diff --git a/contrib/matting/train.py b/contrib/matting/train.py index edfb9544a0..1bef03fa14 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -96,9 +96,10 @@ def parse_args(): help='Whether to record the data to VisualDL during training', action='store_true') parser.add_argument( - '--save_begin_iters', - dest='save_begin_iters', - help='The iters saving begin', + '--eval_begin_iters', + dest='eval_begin_iters', + help= + 'The iters begin evaluation. It will evaluate at iters/2 if it is None.', default=None, type=int) parser.add_argument( @@ -166,7 +167,7 @@ def main(args): log_iters=args.log_iters, resume_model=args.resume_model, save_dir=args.save_dir, - save_begin_iters=args.save_begin_iters) + eval_begin_iters=args.eval_begin_iters) if __name__ == '__main__': From efc8b36e2254483595532b87fe01fd1cca820e26 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 27 Sep 2021 22:53:42 +0800 Subject: [PATCH 164/210] add docs and configs --- contrib/matting/README.md | 139 ++++++++++++++++++ contrib/matting/configs/dim/dim_vgg16.yml | 43 ++++++ .../configs/modnet/modnet_mobilenetv2.yml | 47 ++++++ 3 files changed, 229 insertions(+) create mode 100644 contrib/matting/README.md create mode 100644 contrib/matting/configs/dim/dim_vgg16.yml create mode 100644 contrib/matting/configs/modnet/modnet_mobilenetv2.yml diff --git a/contrib/matting/README.md b/contrib/matting/README.md new file mode 100644 index 0000000000..89542d76be --- /dev/null +++ b/contrib/matting/README.md @@ -0,0 +1,139 @@ +# Matting +Matting(精细化分割/影像去背/抠图)是指借由计算前景的颜色和透明度,将前景从影像中撷取出来的技术,可用于替换背景、影像合成、视觉特效,在电影工业中被广泛地使用。影像中的每个像素会有代表其前景透明度的值,称作阿法值(Alpha),一张影像中所有阿法值的集合称作阿法遮罩(Alpha Matte),将影像被遮罩所涵盖的部分取出即可完成前景的分离。 + + +

      + +

      + +## 目录 + +## 模型下载 + +[MODNet-MobileNetV2](https://paddleseg.bj.bcebos.com/matting/models/modnet-mobilenetv2.pdparams) + +[DIM-VGG16](https://paddleseg.bj.bcebos.com/matting/models/dim-vgg16.pdparams) + +## 数据准备 + +利用MODNet开源的[PPM-100](https://github.com/ZHKKKe/PPM)数据集作为我们教程的示例数据集 + +将数据集整理为如下结构, 并将数据集置于data目录下。 + +``` +PPM-100/ +|--train/ +| |--fg/ +| |--alpha/ +| +|--val/ +| |--fg/ +| |--alpha +| +|--train.txt +| +|--val.txt +``` +其中,fg目录下的图象名称需和alpha目录下的名称一一对应 + +train.txt和val.txt的内容如下 +``` +train/fg/14299313536_ea3e61076c_o.jpg +train/fg/14429083354_23c8fddff5_o.jpg +train/fg/14559969490_d33552a324_o.jpg +... +``` +可直接下载整理后的[PPM-100](https://paddleseg.bj.bcebos.com/matting/datasets/PPM-100.zip)数据进行后续教程 + + +如果完整图象需由前景和背景进行合成的数据集,类似[Deep Image Matting](https://arxiv.org/pdf/1703.03872.pdf)论文里使用的数据集Composition-1k,则数据集应整理成如下结构: +``` +Composition-1k/ +|--bg/ +| +|--train/ +| |--fg/ +| |--alpha/ +| +|--val/ +| |--fg/ +| |--alpha/ +| |--trimap/ (如果存在) +| +|--train.txt +| +|--val.txt +``` +train.txt的内容如下: +``` +train/fg/fg1.jpg bg/bg1.jpg +train/fg/fg2.jpg bg/bg2.jpg +train/fg/fg3.jpg bg/bg3.jpg +... +``` + +val.txt的内容如下, 如果不存在对应的trimap,则第三列可不提供,代码将会自动生成。 +``` +val/fg/fg1.jpg bg/bg1.jpg val/trimap/trimap1.jpg +val/fg/fg2.jpg bg/bg2.jpg val/trimap/trimap2.jpg +val/fg/fg3.jpg bg/bg3.jpg val/trimap/trimap3.jpg +... +``` + + +## 训练 +```shell +export CUDA_VISIBLE_DEVICES=0 +python train.py \ + --config configs/modnet/modnet_mobilenetv2.yml \ + --do_eval \ + --use_vdl \ + --save_interval 5000 \ + --num_workers 5 \ + --save_dir output +``` + +**note:** 使用--do_eval会影响训练速度及增加显存消耗,根据选择进行开闭。 + +`--num_workers` 多进程数据读取,加快数据预处理速度 + +更多参数信息请运行如下命令进行查看: +```shell +python train.py --help +``` +如需使用多卡,请用`python -m paddle.distributed.launch`进行启动 + +## 评估 +```shell +export CUDA_VISIBLE_DEVICES=0 +python val.py \ + --config configs/modnet/modnet_mobilenetv2.yml \ + --model_path output/best_model/model.pdparams \ + --save_results +``` +`--save_result` 开启会保留图片的预测结果,可选择关闭以加快评估速度。 + +你可以直接下载我们提供的模型进行评估。 + +更多参数信息请运行如下命令进行查看: +```shell +python val.py --help +``` + +## 预测及可视化结果保存 +```shell +export CUDA_VISIBLE_DEVICES=0 +python predict.py \ + --config configs/modnet/modnet_movilenetv2.yml \ + --model_path output/iter_90000/model.pdparams \ + --image_path data/cityscapes/leftImg8bit/val/ \ + --save_dir ./output/result +``` +如模型需要trimap信息,需要通过`--trimap_path`传入trimap路径。 + +你可以直接下载我们提供的模型进行预测。 + +更多参数信息请运行如下命令进行查看: +```shell +python predict.py --help +``` diff --git a/contrib/matting/configs/dim/dim_vgg16.yml b/contrib/matting/configs/dim/dim_vgg16.yml new file mode 100644 index 0000000000..24cf72d773 --- /dev/null +++ b/contrib/matting/configs/dim/dim_vgg16.yml @@ -0,0 +1,43 @@ +batch_size: 16 +iters: 100000 + +train_dataset: + type: MattingDataset + dataset_root: data/PPM-100 + train_file: train.txt + transforms: + - type: LoadImages + - type: RandomCropByAlpha + crop_size: [[320, 320], [480, 480], [640, 640]] + - type: Resize + target_size: [320, 320] + - type: RandomDistort + - type: RandomBlur + - type: RandomHorizontalFlip + - type: Normalize + mode: train + get_trimap: True + +val_dataset: + type: MattingDataset + dataset_root: data/PPM-100 + val_file: val.txt + transforms: + - type: LoadImages + - type: Normalize + mode: val + get_trimap: True + +model: + type: DIM + backbone: + type: VGG16 + input_channels: 4 + pretrained: pretrained_models/VGG16_pretrained.pdparams + pretrained: Null + +optimizer: + type: adam + +learning_rate: + value: 0.001 diff --git a/contrib/matting/configs/modnet/modnet_mobilenetv2.yml b/contrib/matting/configs/modnet/modnet_mobilenetv2.yml new file mode 100644 index 0000000000..1fe689d878 --- /dev/null +++ b/contrib/matting/configs/modnet/modnet_mobilenetv2.yml @@ -0,0 +1,47 @@ +batch_size: 16 +iters: 100000 + +train_dataset: + type: MattingDataset + dataset_root: data/PPM-100 + train_file: train.txt + transforms: + - type: LoadImages + - type: RandomCrop + crop_size: [512, 512] + - type: RandomDistort + - type: RandomBlur + - type: RandomHorizontalFlip + - type: Normalize + mode: train + +val_dataset: + type: MattingDataset + dataset_root: data/PPM-100 + val_file: val.txt + transforms: + - type: LoadImages + - type: ResizeByShort + short_size: 512 + - type: ResizeToIntMult + mult_int: 32 + - type: Normalize + mode: val + get_trimap: False + +model: + type: MODNet + backbone: + type: MobileNetV2 + pretrained: pretrained_models/MobileNetV2_pretrained.pdparams + pretrained: Null + +optimizer: + type: sgd + momentum: 0.9 + weight_decay: 4.0e-5 + +lr_scheduler: + type: PiecewiseDecay + boundaries: [40000, 80000] + values: [0.02, 0.002, 0.0002] From 8b3de26dbea1e0ed371e08fc01dcbff40e752f98 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 27 Sep 2021 23:01:04 +0800 Subject: [PATCH 165/210] update README.md --- contrib/matting/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/matting/README.md b/contrib/matting/README.md index 89542d76be..bb7547eb0e 100644 --- a/contrib/matting/README.md +++ b/contrib/matting/README.md @@ -7,6 +7,11 @@ Matting(精细化分割/影像去背/抠图)是指借由计算前景的颜

      ## 目录 +- [模型下载](#模型下载) +- [数据准备](#数据准备) +- [训练](#训练) +- [评估](#评估) +- [预测及可视化结果保存](#预测及可视化结果保存) ## 模型下载 From 581fac84271159f36dd7323d07a520a0efa397fb Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 28 Sep 2021 11:05:58 +0800 Subject: [PATCH 166/210] update some --- contrib/matting/core/predict.py | 5 ++- contrib/matting/core/train.py | 45 +------------------- contrib/matting/dataset/matting_dataset.py | 49 ++++++++++++---------- contrib/matting/model/dim.py | 8 ++-- contrib/matting/model/loss.py | 2 +- contrib/matting/model/modnet.py | 14 +++++++ contrib/matting/model/vgg.py | 3 -- contrib/matting/predict.py | 2 +- contrib/matting/train.py | 4 +- contrib/matting/val.py | 2 +- 10 files changed, 53 insertions(+), 81 deletions(-) diff --git a/contrib/matting/core/predict.py b/contrib/matting/core/predict.py index ff9d5ca5c6..02bbdaf602 100644 --- a/contrib/matting/core/predict.py +++ b/contrib/matting/core/predict.py @@ -93,13 +93,14 @@ def predict(model, save_dir='output'): """ predict and visualize the image_list. + Args: model (nn.Layer): Used to predict for input image. model_path (str): The path of pretrained model. - transforms (transform.Compose): Preprocess for input image. + transforms (transforms.Compose): Preprocess for input image. image_list (list): A list of image path to be predicted. image_dir (str, optional): The root directory of the images predicted. Default: None. - trimap_list (list): A list of trimap of image_list. Default: None. + trimap_list (list, optional): A list of trimap of image_list. Default: None. save_dir (str, optional): The directory to save the visualized results. Default: 'output'. """ utils.utils.load_entire_model(model, model_path) diff --git a/contrib/matting/core/train.py b/contrib/matting/core/train.py index a00c02151f..f96bbe1641 100644 --- a/contrib/matting/core/train.py +++ b/contrib/matting/core/train.py @@ -43,7 +43,7 @@ def train(model, """ Launch training. Args: - model(nn.Layer): A sementic segmentation model. + model(nn.Layer): A matting model. train_dataset (paddle.io.Dataset): Used to read and process training datasets. val_dataset (paddle.io.Dataset, optional): Used to read and process validation datasets. optimizer (paddle.optimizer.Optimizer): The optimizer. @@ -55,8 +55,7 @@ def train(model, log_iters (int, optional): Display logging information at every log_iters. Default: 10. num_workers (int, optional): Num workers for data loader. Default: 0. use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False. - losses (dict): A dict including 'types' and 'coef'. The length of coef should equal to 1 or len(losses['types']). - The 'types' item is a list of object of paddleseg.models.losses while the 'coef' item is a list of the relevant coefficient. + losses (dict, optional): A dict of loss, refer to the loss function of the model for details. Default: None. keep_checkpoint_max (int, optional): Maximum number of checkpoints to save. Default: 5. eval_begin_iters (int): The iters begin evaluation. It will evaluate at iters/2 if it is None. Defalust: None. """ @@ -167,46 +166,6 @@ def train(model, log_writer.add_scalar('Train/reader_cost', avg_train_reader_cost, iter) - if False: #主要为调试时候的观察,真正训练的时候可以省略 - # 增加图片和alpha的显示 - ori_img = data['img'][0] - ori_img = paddle.transpose(ori_img, [1, 2, 0]) - ori_img = (ori_img * 0.5 + 0.5) * 255 - alpha = (data['alpha'][0]) - alpha = paddle.transpose(alpha, [1, 2, 0]) * 255 - trimap = (data['trimap'][0]) - trimap = paddle.transpose(trimap, [1, 2, 0]) - log_writer.add_image( - tag='ground truth/ori_img', - img=ori_img.numpy(), - step=iter) - log_writer.add_image( - tag='ground truth/alpha', - img=alpha.numpy(), - step=iter) - log_writer.add_image( - tag='ground truth/trimap', - img=trimap.numpy(), - step=iter) - - semantic = (logit_dict['semantic'][0] * 255).transpose( - [1, 2, 0]) - log_writer.add_image( - tag='prediction/semantic', - img=semantic.numpy().astype('uint8'), - step=iter) - detail = (logit_dict['detail'][0] * 255).transpose( - [1, 2, 0]) - log_writer.add_image( - tag='prediction/detail', - img=detail.numpy().astype('uint8'), - step=iter) - cm = (logit_dict['matte'][0] * 255).transpose([1, 2, 0]) - log_writer.add_image( - tag='prediction/alpha', - img=cm.numpy().astype('uint8'), - step=iter) - for key in avg_loss.keys(): avg_loss[key] = 0. reader_cost_averager.reset() diff --git a/contrib/matting/dataset/matting_dataset.py b/contrib/matting/dataset/matting_dataset.py index 38e50a8f50..f35a9a3f94 100644 --- a/contrib/matting/dataset/matting_dataset.py +++ b/contrib/matting/dataset/matting_dataset.py @@ -27,34 +27,35 @@ @manager.DATASETS.add_component class MattingDataset(paddle.io.Dataset): """ - human_matting - |__Composition-1k(origin dataset name) - | |__train - | | |__fg - | | |__alpha - | |__val - | |__fg - | |__alpha - | |__trimap - |__Distinctions-646 - | - |__bg (background) - | |__coco_17 - | |__pascal_voc12 - | - |__train.txt - |__val.tat - + Pass in a dataset that conforms to the format. + matting_dataset/ + |--bg/ + | + |--train/ + | |--fg/ + | |--alpha/ + | + |--val/ + | |--fg/ + | |--alpha/ + | |--trimap/ (如果存在) + | + |--train.txt + | + |--val.txt + See README.md for more information of dataset. Args: - dataset_root(str): The root path of dataset + dataset_root(str): The root path of dataset. transforms(list): Transforms for image. mode (str, optional): which part of dataset to use. it is one of ('train', 'val', 'trainval'). Default: 'train'. train_file (str|list, optional): File list is used to train. It should be `foreground_image.png background_image.png` or `foreground_image.png`. It shold be provided if mode equal to 'train'. Default: None. val_file (str|list, optional): File list is used to evaluation. It should be `foreground_image.png background_image.png` - or `foreground_image.png`. It shold be provided if mode equal to 'val'. Default: None. - + or `foreground_image.png` or ``foreground_image.png background_image.png trimap_image.png`. + It shold be provided if mode equal to 'val'. Default: None. + get_trimap (bool, optional): Whether to get triamp. Default: True. + separator (str, optional): The separator of train_file or val_file. If file name contains ' ', '|' may be perfect. Default: ' '. """ def __init__(self, @@ -76,7 +77,8 @@ def __init__(self, if mode == 'train' or mode == 'trainval': if train_file is None: raise ValueError( - "When `mode` is 'train', `train_file must be provided!") + "When `mode` is 'train' or 'trainval', `train_file must be provided!" + ) if isinstance(train_file, str): train_file = [train_file] file_list = train_file @@ -84,7 +86,8 @@ def __init__(self, if mode == 'val' or mode == 'trainval': if val_file is None: raise ValueError( - "When `mode` is 'val', `val_file must be provided!") + "When `mode` is 'val' or 'trainval', `val_file must be provided!" + ) if isinstance(val_file, str): val_file = [val_file] file_list = val_file diff --git a/contrib/matting/model/dim.py b/contrib/matting/model/dim.py index e963cd16de..ae55b3d50c 100644 --- a/contrib/matting/model/dim.py +++ b/contrib/matting/model/dim.py @@ -34,17 +34,17 @@ class DIM(nn.Layer): Args: backbone: backbone model. - pretrained(str, optional): The path of pretrianed model. Defautl: None. stage (int, optional): The stage of model. Defautl: 3. - decoder_input_channels(int, optional): The channel of decoder input. Defautl: 512. + decoder_input_channels(int, optional): The channel of decoder input. Default: 512. + pretrained(str, optional): The path of pretrianed model. Defautl: None. """ def __init__(self, backbone, - pretrained=None, stage=3, - decoder_input_channels=512): + decoder_input_channels=512, + pretrained=None): super().__init__() self.backbone = backbone self.pretrained = pretrained diff --git a/contrib/matting/model/loss.py b/contrib/matting/model/loss.py index 5adca48e31..684c35ba2e 100644 --- a/contrib/matting/model/loss.py +++ b/contrib/matting/model/loss.py @@ -32,7 +32,7 @@ def forward(self, logit, label, mask=None): Args: logit (Tensor): Logit tensor, the data type is float32, float64. label (Tensor): Label tensor, the data type is float32, float64. The shape should equal to logit. - mask (Tensor): The mask where the loss valid. + mask (Tensor, optional): The mask where the loss valid. Default: None. """ if len(label.shape) == 3: label = label.unsqueeze(1) diff --git a/contrib/matting/model/modnet.py b/contrib/matting/model/modnet.py index d0320d5a42..0afcf5193a 100644 --- a/contrib/matting/model/modnet.py +++ b/contrib/matting/model/modnet.py @@ -24,6 +24,20 @@ @manager.MODELS.add_component class MODNet(nn.Layer): + """ + The MODNet implementation based on PaddlePaddle. + + The original article refers to + Zhanghan Ke, et, al. "Is a Green Screen Really Necessary for Real-Time Portrait Matting?" + (https://arxiv.org/pdf/2011.11961.pdf). + + Args: + backbone: backbone model. + hr(int, optional): The channels of high resolutions branch. Defautl: None. + pretrained(str, optional): The path of pretrianed model. Defautl: None. + + """ + def __init__(self, backbone, hr_channels=32, pretrained=None): super().__init__() self.backbone = backbone diff --git a/contrib/matting/model/vgg.py b/contrib/matting/model/vgg.py index 4adee6f7b1..64b529bf0c 100644 --- a/contrib/matting/model/vgg.py +++ b/contrib/matting/model/vgg.py @@ -139,9 +139,6 @@ def forward(self, inputs): def init_weight(self): if self.pretrained is not None: - # 初始化需要特殊操作一下 - # self.load_pretrained_model(self.pretrained) - # 在那时不利用fc14进行初始化 utils.load_pretrained_model(self, self.pretrained) diff --git a/contrib/matting/predict.py b/contrib/matting/predict.py index c6e5f160d8..1c26a60065 100644 --- a/contrib/matting/predict.py +++ b/contrib/matting/predict.py @@ -31,7 +31,7 @@ def parse_args(): parser.add_argument( '--model_path', dest='model_path', - help='The path of model for evaluation', + help='The path of model for prediction', type=str, default=None) parser.add_argument( diff --git a/contrib/matting/train.py b/contrib/matting/train.py index 1bef03fa14..e5ea740bfb 100644 --- a/contrib/matting/train.py +++ b/contrib/matting/train.py @@ -28,7 +28,6 @@ def parse_args(): parser = argparse.ArgumentParser(description='Model training') - # params of training parser.add_argument( "--config", dest="cfg", help="The config file.", default=None, type=str) parser.add_argument( @@ -99,7 +98,7 @@ def parse_args(): '--eval_begin_iters', dest='eval_begin_iters', help= - 'The iters begin evaluation. It will evaluate at iters/2 if it is None.', + 'The iters begin evaluation. It will begin evaluating at iters/2 if it is None.', default=None, type=int) parser.add_argument( @@ -153,7 +152,6 @@ def main(args): msg += '------------------------------------------------' logger.info(msg) - # 调用train函数进行训练 train( model=cfg.model, train_dataset=train_dataset, diff --git a/contrib/matting/val.py b/contrib/matting/val.py index ca2f6b3be3..2c06470237 100644 --- a/contrib/matting/val.py +++ b/contrib/matting/val.py @@ -49,7 +49,7 @@ def parse_args(): parser.add_argument( '--save_results', dest='save_results', - help='save prediction alphe while evaluation', + help='save prediction alpha while evaluating', action='store_true') return parser.parse_args() From cbe241973b5addef96a1660355ed403b61bd1c93 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 28 Sep 2021 11:07:51 +0800 Subject: [PATCH 167/210] raname matting to Matting --- contrib/{matting => Matting}/README.md | 0 contrib/{matting => Matting}/configs/dim/dim_vgg16.yml | 0 .../{matting => Matting}/configs/modnet/modnet_mobilenetv2.yml | 0 contrib/{matting => Matting}/core/__init__.py | 0 contrib/{matting => Matting}/core/predict.py | 0 contrib/{matting => Matting}/core/train.py | 0 contrib/{matting => Matting}/core/val.py | 0 contrib/{matting => Matting}/dataset/__init__.py | 0 contrib/{matting => Matting}/dataset/matting_dataset.py | 0 contrib/{matting => Matting}/metric.py | 0 contrib/{matting => Matting}/model/__init__.py | 0 contrib/{matting => Matting}/model/dim.py | 0 contrib/{matting => Matting}/model/loss.py | 0 contrib/{matting => Matting}/model/mobilenet_v2.py | 0 contrib/{matting => Matting}/model/modnet.py | 0 contrib/{matting => Matting}/model/resnet_vd.py | 0 contrib/{matting => Matting}/model/vgg.py | 0 contrib/{matting => Matting}/predict.py | 0 contrib/{matting => Matting}/tools/update_vgg16_params.py | 0 contrib/{matting => Matting}/train.py | 0 contrib/{matting => Matting}/transforms.py | 0 contrib/{matting => Matting}/utils.py | 0 contrib/{matting => Matting}/val.py | 0 23 files changed, 0 insertions(+), 0 deletions(-) rename contrib/{matting => Matting}/README.md (100%) rename contrib/{matting => Matting}/configs/dim/dim_vgg16.yml (100%) rename contrib/{matting => Matting}/configs/modnet/modnet_mobilenetv2.yml (100%) rename contrib/{matting => Matting}/core/__init__.py (100%) rename contrib/{matting => Matting}/core/predict.py (100%) rename contrib/{matting => Matting}/core/train.py (100%) rename contrib/{matting => Matting}/core/val.py (100%) rename contrib/{matting => Matting}/dataset/__init__.py (100%) rename contrib/{matting => Matting}/dataset/matting_dataset.py (100%) rename contrib/{matting => Matting}/metric.py (100%) rename contrib/{matting => Matting}/model/__init__.py (100%) rename contrib/{matting => Matting}/model/dim.py (100%) rename contrib/{matting => Matting}/model/loss.py (100%) rename contrib/{matting => Matting}/model/mobilenet_v2.py (100%) rename contrib/{matting => Matting}/model/modnet.py (100%) rename contrib/{matting => Matting}/model/resnet_vd.py (100%) rename contrib/{matting => Matting}/model/vgg.py (100%) rename contrib/{matting => Matting}/predict.py (100%) rename contrib/{matting => Matting}/tools/update_vgg16_params.py (100%) rename contrib/{matting => Matting}/train.py (100%) rename contrib/{matting => Matting}/transforms.py (100%) rename contrib/{matting => Matting}/utils.py (100%) rename contrib/{matting => Matting}/val.py (100%) diff --git a/contrib/matting/README.md b/contrib/Matting/README.md similarity index 100% rename from contrib/matting/README.md rename to contrib/Matting/README.md diff --git a/contrib/matting/configs/dim/dim_vgg16.yml b/contrib/Matting/configs/dim/dim_vgg16.yml similarity index 100% rename from contrib/matting/configs/dim/dim_vgg16.yml rename to contrib/Matting/configs/dim/dim_vgg16.yml diff --git a/contrib/matting/configs/modnet/modnet_mobilenetv2.yml b/contrib/Matting/configs/modnet/modnet_mobilenetv2.yml similarity index 100% rename from contrib/matting/configs/modnet/modnet_mobilenetv2.yml rename to contrib/Matting/configs/modnet/modnet_mobilenetv2.yml diff --git a/contrib/matting/core/__init__.py b/contrib/Matting/core/__init__.py similarity index 100% rename from contrib/matting/core/__init__.py rename to contrib/Matting/core/__init__.py diff --git a/contrib/matting/core/predict.py b/contrib/Matting/core/predict.py similarity index 100% rename from contrib/matting/core/predict.py rename to contrib/Matting/core/predict.py diff --git a/contrib/matting/core/train.py b/contrib/Matting/core/train.py similarity index 100% rename from contrib/matting/core/train.py rename to contrib/Matting/core/train.py diff --git a/contrib/matting/core/val.py b/contrib/Matting/core/val.py similarity index 100% rename from contrib/matting/core/val.py rename to contrib/Matting/core/val.py diff --git a/contrib/matting/dataset/__init__.py b/contrib/Matting/dataset/__init__.py similarity index 100% rename from contrib/matting/dataset/__init__.py rename to contrib/Matting/dataset/__init__.py diff --git a/contrib/matting/dataset/matting_dataset.py b/contrib/Matting/dataset/matting_dataset.py similarity index 100% rename from contrib/matting/dataset/matting_dataset.py rename to contrib/Matting/dataset/matting_dataset.py diff --git a/contrib/matting/metric.py b/contrib/Matting/metric.py similarity index 100% rename from contrib/matting/metric.py rename to contrib/Matting/metric.py diff --git a/contrib/matting/model/__init__.py b/contrib/Matting/model/__init__.py similarity index 100% rename from contrib/matting/model/__init__.py rename to contrib/Matting/model/__init__.py diff --git a/contrib/matting/model/dim.py b/contrib/Matting/model/dim.py similarity index 100% rename from contrib/matting/model/dim.py rename to contrib/Matting/model/dim.py diff --git a/contrib/matting/model/loss.py b/contrib/Matting/model/loss.py similarity index 100% rename from contrib/matting/model/loss.py rename to contrib/Matting/model/loss.py diff --git a/contrib/matting/model/mobilenet_v2.py b/contrib/Matting/model/mobilenet_v2.py similarity index 100% rename from contrib/matting/model/mobilenet_v2.py rename to contrib/Matting/model/mobilenet_v2.py diff --git a/contrib/matting/model/modnet.py b/contrib/Matting/model/modnet.py similarity index 100% rename from contrib/matting/model/modnet.py rename to contrib/Matting/model/modnet.py diff --git a/contrib/matting/model/resnet_vd.py b/contrib/Matting/model/resnet_vd.py similarity index 100% rename from contrib/matting/model/resnet_vd.py rename to contrib/Matting/model/resnet_vd.py diff --git a/contrib/matting/model/vgg.py b/contrib/Matting/model/vgg.py similarity index 100% rename from contrib/matting/model/vgg.py rename to contrib/Matting/model/vgg.py diff --git a/contrib/matting/predict.py b/contrib/Matting/predict.py similarity index 100% rename from contrib/matting/predict.py rename to contrib/Matting/predict.py diff --git a/contrib/matting/tools/update_vgg16_params.py b/contrib/Matting/tools/update_vgg16_params.py similarity index 100% rename from contrib/matting/tools/update_vgg16_params.py rename to contrib/Matting/tools/update_vgg16_params.py diff --git a/contrib/matting/train.py b/contrib/Matting/train.py similarity index 100% rename from contrib/matting/train.py rename to contrib/Matting/train.py diff --git a/contrib/matting/transforms.py b/contrib/Matting/transforms.py similarity index 100% rename from contrib/matting/transforms.py rename to contrib/Matting/transforms.py diff --git a/contrib/matting/utils.py b/contrib/Matting/utils.py similarity index 100% rename from contrib/matting/utils.py rename to contrib/Matting/utils.py diff --git a/contrib/matting/val.py b/contrib/Matting/val.py similarity index 100% rename from contrib/matting/val.py rename to contrib/Matting/val.py From f1b7298df5926e0a8f2991fb8f052a7f08981d15 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 28 Sep 2021 11:17:59 +0800 Subject: [PATCH 168/210] update train.py --- contrib/Matting/train.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/contrib/Matting/train.py b/contrib/Matting/train.py index e5ea740bfb..9f4a87145b 100644 --- a/contrib/Matting/train.py +++ b/contrib/Matting/train.py @@ -97,9 +97,8 @@ def parse_args(): parser.add_argument( '--eval_begin_iters', dest='eval_begin_iters', - help= - 'The iters begin evaluation. It will begin evaluating at iters/2 if it is None.', - default=None, + help='The iters begin evaluation.', + default=0, type=int) parser.add_argument( '--seed', From 9232bffdaec3206468837f4eb28dfedf1cdfd383 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 28 Sep 2021 14:46:12 +0800 Subject: [PATCH 169/210] update some --- contrib/Matting/README.md | 6 +++--- contrib/Matting/configs/dim/dim_vgg16.yml | 2 ++ contrib/Matting/core/val.py | 5 ++--- contrib/Matting/dataset/matting_dataset.py | 21 ++++++++++++++++----- contrib/Matting/transforms.py | 6 ++++-- 5 files changed, 27 insertions(+), 13 deletions(-) diff --git a/contrib/Matting/README.md b/contrib/Matting/README.md index bb7547eb0e..b3c726d2d6 100644 --- a/contrib/Matting/README.md +++ b/contrib/Matting/README.md @@ -130,9 +130,9 @@ python val.py --help export CUDA_VISIBLE_DEVICES=0 python predict.py \ --config configs/modnet/modnet_movilenetv2.yml \ - --model_path output/iter_90000/model.pdparams \ - --image_path data/cityscapes/leftImg8bit/val/ \ - --save_dir ./output/result + --model_path output/best_model/model.pdparams \ + --image_path data/PPM-100/val/fg/ \ + --save_dir ./output/results ``` 如模型需要trimap信息,需要通过`--trimap_path`传入trimap路径。 diff --git a/contrib/Matting/configs/dim/dim_vgg16.yml b/contrib/Matting/configs/dim/dim_vgg16.yml index 24cf72d773..68694765cb 100644 --- a/contrib/Matting/configs/dim/dim_vgg16.yml +++ b/contrib/Matting/configs/dim/dim_vgg16.yml @@ -24,6 +24,8 @@ val_dataset: val_file: val.txt transforms: - type: LoadImages + - type: LimitLong + max_long: 3840 - type: Normalize mode: val get_trimap: True diff --git a/contrib/Matting/core/val.py b/contrib/Matting/core/val.py index bc1229d1f8..855a389586 100644 --- a/contrib/Matting/core/val.py +++ b/contrib/Matting/core/val.py @@ -97,9 +97,9 @@ def evaluate(model, alpha_pred = alpha_pred.numpy() alpha_gt = data['alpha'].numpy() * 255 - trimap = data.get('trimap') + trimap = data.get('ori_trimap') if trimap is not None: - trimap = data['trimap'].numpy().astype('uint8') + trimap = trimap.numpy().astype('uint8') alpha_pred = np.round(alpha_pred * 255) mse_metric.update(alpha_pred, alpha_gt, trimap) sad_metric.update(alpha_pred, alpha_gt, trimap) @@ -126,7 +126,6 @@ def evaluate(model, batch_cost_averager.reset() batch_start = time.time() - # 指标输出 mse = mse_metric.evaluate() sad = sad_metric.evaluate() diff --git a/contrib/Matting/dataset/matting_dataset.py b/contrib/Matting/dataset/matting_dataset.py index f35a9a3f94..aebf2cb332 100644 --- a/contrib/Matting/dataset/matting_dataset.py +++ b/contrib/Matting/dataset/matting_dataset.py @@ -117,7 +117,7 @@ def __getitem__(self, idx): data['alpha'] = alpha data['gt_fields'] = [] - # lineis: fg [bg] [trimap] + # line is: fg [bg] [trimap] if len(fg_bg_file) >= 2: bg_file = os.path.join(self.dataset_root, fg_bg_file[1]) bg = cv2.imread(bg_file) @@ -133,6 +133,7 @@ def __getitem__(self, idx): if os.path.exists(trimap_path): data['trimap'] = trimap_path data['gt_fields'].append('trimap') + data['ori_trimap'] = cv2.imread(trimap_path, 0) else: raise FileNotFoundError( 'trimap is not Found: {}'.format(fg_bg_file[2])) @@ -146,6 +147,16 @@ def __getitem__(self, idx): data['gt_fields'].append('alpha') data['trans_info'] = [] # Record shape change information + + # Generate trimap from alpha if no trimap file provided + if self.get_trimap: + if 'trimap' not in data: + data['trimap'] = self.gen_trimap( + data['alpha'], mode=self.mode).astype('float32') + data['gt_fields'].append('trimap') + if self.mode == 'val': + data['ori_trimap'] = data['trimap'].copy() + data = self.transforms(data) # When evaluation, gt should not be transforms. @@ -155,11 +166,11 @@ def __getitem__(self, idx): data['img'] = data['img'].astype('float32') for key in data.get('gt_fields', []): data[key] = data[key].astype('float32') - if self.get_trimap: - if 'trimap' not in data: - data['trimap'] = self.gen_trimap( - data['alpha'], mode=self.mode).astype('float32') + + if 'trimap' in data: data['trimap'] = data['trimap'][np.newaxis, :, :] + if 'ori_trimap' in data: + data['ori_trimap'] = data['ori_trimap'][np.newaxis, :, :] data['alpha'] = data['alpha'][np.newaxis, :, :] / 255. diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index 2852356def..641c13768e 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -342,7 +342,7 @@ def __call__(self, data): data['trans_info'].append(('resize', data['img'].shape[0:2])) data['img'] = functional.resize_long(data['img'], target) for key in data.get('gt_fields', []): - data[key] = functional.resize_long(data[key], self.long_size) + data[key] = functional.resize_long(data[key], target) return data @@ -486,7 +486,9 @@ def __call__(self, data): data['img'] = np.asarray(im) for key in data.get('gt_fields', []): - if key != 'alpha': + if key in ['alpha', 'trimap']: + continue + else: im = data[key].astype('uint8') im = Image.fromarray(im) for id in range(len(ops)): From 3575858fa83165da0286474a7bc5cef265bcce05 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 28 Sep 2021 16:37:23 +0800 Subject: [PATCH 170/210] update README.md --- contrib/Matting/README.md | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/contrib/Matting/README.md b/contrib/Matting/README.md index b3c726d2d6..9d0bdd2d2b 100644 --- a/contrib/Matting/README.md +++ b/contrib/Matting/README.md @@ -7,12 +7,41 @@ Matting(精细化分割/影像去背/抠图)是指借由计算前景的颜

      ## 目录 +- [环境配置](#环境配置) - [模型下载](#模型下载) - [数据准备](#数据准备) - [训练](#训练) - [评估](#评估) - [预测及可视化结果保存](#预测及可视化结果保存) + +## 环境配置 + +#### 1. 安装PaddlePaddle + +版本要求 + +* PaddlePaddle >= 2.0.2 + +* Python >= 3.7+ + +由于图像分割模型计算开销大,推荐在GPU版本的PaddlePaddle下使用PaddleSeg。推荐安装10.0以上的CUDA环境。安装教程请见[PaddlePaddle官网](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html)。 + +#### 2. 下载PaddleSeg仓库 + +```shell +git clone https://github.com/PaddlePaddle/PaddleSeg +``` + +#### 3. 安装 + +```shell +cd PaddleSeg +pip install -e . +pip install scikit-image +cd contrib/Matting +``` + ## 模型下载 [MODNet-MobileNetV2](https://paddleseg.bj.bcebos.com/matting/models/modnet-mobilenetv2.pdparams) @@ -85,7 +114,6 @@ val/fg/fg3.jpg bg/bg3.jpg val/trimap/trimap3.jpg ... ``` - ## 训练 ```shell export CUDA_VISIBLE_DEVICES=0 From f3f389dcf19f9746f633ec07db33a3aa263d3ab2 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 28 Sep 2021 17:33:37 +0800 Subject: [PATCH 171/210] update pretrained model and distributed training --- contrib/Matting/configs/dim/dim_vgg16.yml | 2 +- contrib/Matting/configs/modnet/modnet_mobilenetv2.yml | 2 +- contrib/Matting/core/train.py | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/contrib/Matting/configs/dim/dim_vgg16.yml b/contrib/Matting/configs/dim/dim_vgg16.yml index 68694765cb..6f76ecfbdd 100644 --- a/contrib/Matting/configs/dim/dim_vgg16.yml +++ b/contrib/Matting/configs/dim/dim_vgg16.yml @@ -35,7 +35,7 @@ model: backbone: type: VGG16 input_channels: 4 - pretrained: pretrained_models/VGG16_pretrained.pdparams + pretrained: https://paddleseg.bj.bcebos.com/matting/models/DIM_VGG16_pretrained/model.pdparams pretrained: Null optimizer: diff --git a/contrib/Matting/configs/modnet/modnet_mobilenetv2.yml b/contrib/Matting/configs/modnet/modnet_mobilenetv2.yml index 1fe689d878..d8e00352d6 100644 --- a/contrib/Matting/configs/modnet/modnet_mobilenetv2.yml +++ b/contrib/Matting/configs/modnet/modnet_mobilenetv2.yml @@ -33,7 +33,7 @@ model: type: MODNet backbone: type: MobileNetV2 - pretrained: pretrained_models/MobileNetV2_pretrained.pdparams + pretrained: https://paddleseg.bj.bcebos.com/matting/models/MobileNetV2_pretrained/model.pdparams pretrained: Null optimizer: diff --git a/contrib/Matting/core/train.py b/contrib/Matting/core/train.py index f96bbe1641..4b1fd66c39 100644 --- a/contrib/Matting/core/train.py +++ b/contrib/Matting/core/train.py @@ -115,10 +115,9 @@ def train(model, # model input if nranks > 1: logit_dict = ddp_model(data) - loss_dict = ddp_model.loss(logit_dict, data, losses) else: logit_dict = model(data) - loss_dict = model.loss(logit_dict, data, losses) + loss_dict = model.loss(logit_dict, data, losses) loss_dict['all'].backward() From beecdfa56807f9e372093b2c3f43fbe487ebf1fa Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 12 Oct 2021 16:37:40 +0800 Subject: [PATCH 172/210] add hrnet and resnet backbone --- contrib/Matting/core/val.py | 16 +- contrib/Matting/model/__init__.py | 1 + contrib/Matting/model/hrnet.py | 835 +++++++++++++++++++++++++++++ contrib/Matting/model/resnet_vd.py | 9 +- 4 files changed, 854 insertions(+), 7 deletions(-) create mode 100644 contrib/Matting/model/hrnet.py diff --git a/contrib/Matting/core/val.py b/contrib/Matting/core/val.py index 855a389586..96c02f5454 100644 --- a/contrib/Matting/core/val.py +++ b/contrib/Matting/core/val.py @@ -66,11 +66,21 @@ def evaluate(model, if not paddle.distributed.parallel.parallel_helper._is_parallel_ctx_initialized( ): paddle.distributed.init_parallel_env() - batch_sampler = paddle.io.DistributedBatchSampler( - eval_dataset, batch_size=1, shuffle=False, drop_last=False) + + +# batch_sampler = paddle.io.DistributedBatchSampler( +# eval_dataset, batch_size=1, shuffle=False, drop_last=False) +# loader = paddle.io.DataLoader( +# eval_dataset, +# batch_sampler=batch_sampler, +# num_workers=num_workers, +# return_list=True, +# ) +# eval not distributed loader = paddle.io.DataLoader( eval_dataset, - batch_sampler=batch_sampler, + batch_size=1, + drop_last=False, num_workers=num_workers, return_list=True, ) diff --git a/contrib/Matting/model/__init__.py b/contrib/Matting/model/__init__.py index 61da2293c4..9e906c1567 100644 --- a/contrib/Matting/model/__init__.py +++ b/contrib/Matting/model/__init__.py @@ -15,6 +15,7 @@ from .vgg import * from .resnet_vd import * from .mobilenet_v2 import * +from .hrnet import * from .dim import DIM from .loss import MRSD from .modnet import MODNet diff --git a/contrib/Matting/model/hrnet.py b/contrib/Matting/model/hrnet.py new file mode 100644 index 0000000000..def34566d0 --- /dev/null +++ b/contrib/Matting/model/hrnet.py @@ -0,0 +1,835 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from paddleseg.cvlibs import manager, param_init +from paddleseg.models import layers +from paddleseg.utils import utils + +__all__ = [ + "HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30", + "HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60", "HRNet_W64" +] + + +class HRNet(nn.Layer): + """ + The HRNet implementation based on PaddlePaddle. + + The original article refers to + Jingdong Wang, et, al. "HRNet:Deep High-Resolution Representation Learning for Visual Recognition" + (https://arxiv.org/pdf/1908.07919.pdf). + + Args: + pretrained (str, optional): The path of pretrained model. + stage1_num_modules (int, optional): Number of modules for stage1. Default 1. + stage1_num_blocks (list, optional): Number of blocks per module for stage1. Default (4). + stage1_num_channels (list, optional): Number of channels per branch for stage1. Default (64). + stage2_num_modules (int, optional): Number of modules for stage2. Default 1. + stage2_num_blocks (list, optional): Number of blocks per module for stage2. Default (4, 4). + stage2_num_channels (list, optional): Number of channels per branch for stage2. Default (18, 36). + stage3_num_modules (int, optional): Number of modules for stage3. Default 4. + stage3_num_blocks (list, optional): Number of blocks per module for stage3. Default (4, 4, 4). + stage3_num_channels (list, optional): Number of channels per branch for stage3. Default [18, 36, 72). + stage4_num_modules (int, optional): Number of modules for stage4. Default 3. + stage4_num_blocks (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4). + stage4_num_channels (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144). + has_se (bool, optional): Whether to use Squeeze-and-Excitation module. Default False. + align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, + e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. + """ + + def __init__(self, + input_channels=3, + pretrained=None, + stage1_num_modules=1, + stage1_num_blocks=(4, ), + stage1_num_channels=(64, ), + stage2_num_modules=1, + stage2_num_blocks=(4, 4), + stage2_num_channels=(18, 36), + stage3_num_modules=4, + stage3_num_blocks=(4, 4, 4), + stage3_num_channels=(18, 36, 72), + stage4_num_modules=3, + stage4_num_blocks=(4, 4, 4, 4), + stage4_num_channels=(18, 36, 72, 144), + has_se=False, + align_corners=False, + padding_same=True): + super(HRNet, self).__init__() + self.pretrained = pretrained + self.stage1_num_modules = stage1_num_modules + self.stage1_num_blocks = stage1_num_blocks + self.stage1_num_channels = stage1_num_channels + self.stage2_num_modules = stage2_num_modules + self.stage2_num_blocks = stage2_num_blocks + self.stage2_num_channels = stage2_num_channels + self.stage3_num_modules = stage3_num_modules + self.stage3_num_blocks = stage3_num_blocks + self.stage3_num_channels = stage3_num_channels + self.stage4_num_modules = stage4_num_modules + self.stage4_num_blocks = stage4_num_blocks + self.stage4_num_channels = stage4_num_channels + self.has_se = has_se + self.align_corners = align_corners + + self.feat_channels = [i for i in stage4_num_channels] + self.feat_channels = [64] + self.feat_channels + + self.conv_layer1_1 = layers.ConvBNReLU( + in_channels=input_channels, + out_channels=64, + kernel_size=3, + stride=2, + padding=1 if not padding_same else 'same', + bias_attr=False) + + self.conv_layer1_2 = layers.ConvBNReLU( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=2, + padding=1 if not padding_same else 'same', + bias_attr=False) + + self.la1 = Layer1( + num_channels=64, + num_blocks=self.stage1_num_blocks[0], + num_filters=self.stage1_num_channels[0], + has_se=has_se, + name="layer2", + padding_same=padding_same) + + self.tr1 = TransitionLayer( + in_channels=[self.stage1_num_channels[0] * 4], + out_channels=self.stage2_num_channels, + name="tr1", + padding_same=padding_same) + + self.st2 = Stage( + num_channels=self.stage2_num_channels, + num_modules=self.stage2_num_modules, + num_blocks=self.stage2_num_blocks, + num_filters=self.stage2_num_channels, + has_se=self.has_se, + name="st2", + align_corners=align_corners, + padding_same=padding_same) + + self.tr2 = TransitionLayer( + in_channels=self.stage2_num_channels, + out_channels=self.stage3_num_channels, + name="tr2", + padding_same=padding_same) + self.st3 = Stage( + num_channels=self.stage3_num_channels, + num_modules=self.stage3_num_modules, + num_blocks=self.stage3_num_blocks, + num_filters=self.stage3_num_channels, + has_se=self.has_se, + name="st3", + align_corners=align_corners, + padding_same=padding_same) + + self.tr3 = TransitionLayer( + in_channels=self.stage3_num_channels, + out_channels=self.stage4_num_channels, + name="tr3", + padding_same=padding_same) + self.st4 = Stage( + num_channels=self.stage4_num_channels, + num_modules=self.stage4_num_modules, + num_blocks=self.stage4_num_blocks, + num_filters=self.stage4_num_channels, + has_se=self.has_se, + name="st4", + align_corners=align_corners, + padding_same=padding_same) + + self.init_weight() + + def forward(self, x): + feat_list = [] + conv1 = self.conv_layer1_1(x) + feat_list.append(conv1) + conv2 = self.conv_layer1_2(conv1) + + la1 = self.la1(conv2) + + tr1 = self.tr1([la1]) + st2 = self.st2(tr1) + + tr2 = self.tr2(st2) + st3 = self.st3(tr2) + + tr3 = self.tr3(st3) + st4 = self.st4(tr3) + + feat_list = feat_list + st4 + + return feat_list + + def init_weight(self): + for layer in self.sublayers(): + if isinstance(layer, nn.Conv2D): + param_init.normal_init(layer.weight, std=0.001) + elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)): + param_init.constant_init(layer.weight, value=1.0) + param_init.constant_init(layer.bias, value=0.0) + if self.pretrained is not None: + utils.load_pretrained_model(self, self.pretrained) + + +class Layer1(nn.Layer): + def __init__(self, + num_channels, + num_filters, + num_blocks, + has_se=False, + name=None, + padding_same=True): + super(Layer1, self).__init__() + + self.bottleneck_block_list = [] + + for i in range(num_blocks): + bottleneck_block = self.add_sublayer( + "bb_{}_{}".format(name, i + 1), + BottleneckBlock( + num_channels=num_channels if i == 0 else num_filters * 4, + num_filters=num_filters, + has_se=has_se, + stride=1, + downsample=True if i == 0 else False, + name=name + '_' + str(i + 1), + padding_same=padding_same)) + self.bottleneck_block_list.append(bottleneck_block) + + def forward(self, x): + conv = x + for block_func in self.bottleneck_block_list: + conv = block_func(conv) + return conv + + +class TransitionLayer(nn.Layer): + def __init__(self, in_channels, out_channels, name=None, padding_same=True): + super(TransitionLayer, self).__init__() + + num_in = len(in_channels) + num_out = len(out_channels) + self.conv_bn_func_list = [] + for i in range(num_out): + residual = None + if i < num_in: + if in_channels[i] != out_channels[i]: + residual = self.add_sublayer( + "transition_{}_layer_{}".format(name, i + 1), + layers.ConvBNReLU( + in_channels=in_channels[i], + out_channels=out_channels[i], + kernel_size=3, + padding=1 if not padding_same else 'same', + bias_attr=False)) + else: + residual = self.add_sublayer( + "transition_{}_layer_{}".format(name, i + 1), + layers.ConvBNReLU( + in_channels=in_channels[-1], + out_channels=out_channels[i], + kernel_size=3, + stride=2, + padding=1 if not padding_same else 'same', + bias_attr=False)) + self.conv_bn_func_list.append(residual) + + def forward(self, x): + outs = [] + for idx, conv_bn_func in enumerate(self.conv_bn_func_list): + if conv_bn_func is None: + outs.append(x[idx]) + else: + if idx < len(x): + outs.append(conv_bn_func(x[idx])) + else: + outs.append(conv_bn_func(x[-1])) + return outs + + +class Branches(nn.Layer): + def __init__(self, + num_blocks, + in_channels, + out_channels, + has_se=False, + name=None, + padding_same=True): + super(Branches, self).__init__() + + self.basic_block_list = [] + + for i in range(len(out_channels)): + self.basic_block_list.append([]) + for j in range(num_blocks[i]): + in_ch = in_channels[i] if j == 0 else out_channels[i] + basic_block_func = self.add_sublayer( + "bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1), + BasicBlock( + num_channels=in_ch, + num_filters=out_channels[i], + has_se=has_se, + name=name + '_branch_layer_' + str(i + 1) + '_' + + str(j + 1), + padding_same=padding_same)) + self.basic_block_list[i].append(basic_block_func) + + def forward(self, x): + outs = [] + for idx, input in enumerate(x): + conv = input + for basic_block_func in self.basic_block_list[idx]: + conv = basic_block_func(conv) + outs.append(conv) + return outs + + +class BottleneckBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + has_se, + stride=1, + downsample=False, + name=None, + padding_same=True): + super(BottleneckBlock, self).__init__() + + self.has_se = has_se + self.downsample = downsample + + self.conv1 = layers.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=1, + bias_attr=False) + + self.conv2 = layers.ConvBNReLU( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=3, + stride=stride, + padding=1 if not padding_same else 'same', + bias_attr=False) + + self.conv3 = layers.ConvBN( + in_channels=num_filters, + out_channels=num_filters * 4, + kernel_size=1, + bias_attr=False) + + if self.downsample: + self.conv_down = layers.ConvBN( + in_channels=num_channels, + out_channels=num_filters * 4, + kernel_size=1, + bias_attr=False) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters * 4, + num_filters=num_filters * 4, + reduction_ratio=16, + name=name + '_fc') + + self.add = layers.Add() + self.relu = layers.Activation("relu") + + def forward(self, x): + residual = x + conv1 = self.conv1(x) + conv2 = self.conv2(conv1) + conv3 = self.conv3(conv2) + + if self.downsample: + residual = self.conv_down(x) + + if self.has_se: + conv3 = self.se(conv3) + + y = self.add(conv3, residual) + y = self.relu(y) + return y + + +class BasicBlock(nn.Layer): + def __init__(self, + num_channels, + num_filters, + stride=1, + has_se=False, + downsample=False, + name=None, + padding_same=True): + super(BasicBlock, self).__init__() + + self.has_se = has_se + self.downsample = downsample + + self.conv1 = layers.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=3, + stride=stride, + padding=1 if not padding_same else 'same', + bias_attr=False) + self.conv2 = layers.ConvBN( + in_channels=num_filters, + out_channels=num_filters, + kernel_size=3, + padding=1 if not padding_same else 'same', + bias_attr=False) + + if self.downsample: + self.conv_down = layers.ConvBNReLU( + in_channels=num_channels, + out_channels=num_filters, + kernel_size=1, + bias_attr=False) + + if self.has_se: + self.se = SELayer( + num_channels=num_filters, + num_filters=num_filters, + reduction_ratio=16, + name=name + '_fc') + + self.add = layers.Add() + self.relu = layers.Activation("relu") + + def forward(self, x): + residual = x + conv1 = self.conv1(x) + conv2 = self.conv2(conv1) + + if self.downsample: + residual = self.conv_down(x) + + if self.has_se: + conv2 = self.se(conv2) + + y = self.add(conv2, residual) + y = self.relu(y) + return y + + +class SELayer(nn.Layer): + def __init__(self, num_channels, num_filters, reduction_ratio, name=None): + super(SELayer, self).__init__() + + self.pool2d_gap = nn.AdaptiveAvgPool2D(1) + + self._num_channels = num_channels + + med_ch = int(num_channels / reduction_ratio) + stdv = 1.0 / math.sqrt(num_channels * 1.0) + self.squeeze = nn.Linear( + num_channels, + med_ch, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Uniform(-stdv, stdv))) + + stdv = 1.0 / math.sqrt(med_ch * 1.0) + self.excitation = nn.Linear( + med_ch, + num_filters, + weight_attr=paddle.ParamAttr( + initializer=nn.initializer.Uniform(-stdv, stdv))) + + def forward(self, x): + pool = self.pool2d_gap(x) + pool = paddle.reshape(pool, shape=[-1, self._num_channels]) + squeeze = self.squeeze(pool) + squeeze = F.relu(squeeze) + excitation = self.excitation(squeeze) + excitation = F.sigmoid(excitation) + excitation = paddle.reshape( + excitation, shape=[-1, self._num_channels, 1, 1]) + out = x * excitation + return out + + +class Stage(nn.Layer): + def __init__(self, + num_channels, + num_modules, + num_blocks, + num_filters, + has_se=False, + multi_scale_output=True, + name=None, + align_corners=False, + padding_same=True): + super(Stage, self).__init__() + + self._num_modules = num_modules + + self.stage_func_list = [] + for i in range(num_modules): + if i == num_modules - 1 and not multi_scale_output: + stage_func = self.add_sublayer( + "stage_{}_{}".format(name, i + 1), + HighResolutionModule( + num_channels=num_channels, + num_blocks=num_blocks, + num_filters=num_filters, + has_se=has_se, + multi_scale_output=False, + name=name + '_' + str(i + 1), + align_corners=align_corners, + padding_same=padding_same)) + else: + stage_func = self.add_sublayer( + "stage_{}_{}".format(name, i + 1), + HighResolutionModule( + num_channels=num_channels, + num_blocks=num_blocks, + num_filters=num_filters, + has_se=has_se, + name=name + '_' + str(i + 1), + align_corners=align_corners, + padding_same=padding_same)) + + self.stage_func_list.append(stage_func) + + def forward(self, x): + out = x + for idx in range(self._num_modules): + out = self.stage_func_list[idx](out) + return out + + +class HighResolutionModule(nn.Layer): + def __init__(self, + num_channels, + num_blocks, + num_filters, + has_se=False, + multi_scale_output=True, + name=None, + align_corners=False, + padding_same=True): + super(HighResolutionModule, self).__init__() + + self.branches_func = Branches( + num_blocks=num_blocks, + in_channels=num_channels, + out_channels=num_filters, + has_se=has_se, + name=name, + padding_same=padding_same) + + self.fuse_func = FuseLayers( + in_channels=num_filters, + out_channels=num_filters, + multi_scale_output=multi_scale_output, + name=name, + align_corners=align_corners, + padding_same=padding_same) + + def forward(self, x): + out = self.branches_func(x) + out = self.fuse_func(out) + return out + + +class FuseLayers(nn.Layer): + def __init__(self, + in_channels, + out_channels, + multi_scale_output=True, + name=None, + align_corners=False, + padding_same=True): + super(FuseLayers, self).__init__() + + self._actual_ch = len(in_channels) if multi_scale_output else 1 + self._in_channels = in_channels + self.align_corners = align_corners + + self.residual_func_list = [] + for i in range(self._actual_ch): + for j in range(len(in_channels)): + if j > i: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}".format(name, i + 1, j + 1), + layers.ConvBN( + in_channels=in_channels[j], + out_channels=out_channels[i], + kernel_size=1, + bias_attr=False)) + self.residual_func_list.append(residual_func) + elif j < i: + pre_num_filters = in_channels[j] + for k in range(i - j): + if k == i - j - 1: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}_{}".format( + name, i + 1, j + 1, k + 1), + layers.ConvBN( + in_channels=pre_num_filters, + out_channels=out_channels[i], + kernel_size=3, + stride=2, + padding=1 if not padding_same else 'same', + bias_attr=False)) + pre_num_filters = out_channels[i] + else: + residual_func = self.add_sublayer( + "residual_{}_layer_{}_{}_{}".format( + name, i + 1, j + 1, k + 1), + layers.ConvBNReLU( + in_channels=pre_num_filters, + out_channels=out_channels[j], + kernel_size=3, + stride=2, + padding=1 if not padding_same else 'same', + bias_attr=False)) + pre_num_filters = out_channels[j] + self.residual_func_list.append(residual_func) + + def forward(self, x): + outs = [] + residual_func_idx = 0 + for i in range(self._actual_ch): + residual = x[i] + residual_shape = paddle.shape(residual)[-2:] + for j in range(len(self._in_channels)): + if j > i: + y = self.residual_func_list[residual_func_idx](x[j]) + residual_func_idx += 1 + + y = F.interpolate( + y, + residual_shape, + mode='bilinear', + align_corners=self.align_corners) + residual = residual + y + elif j < i: + y = x[j] + for k in range(i - j): + y = self.residual_func_list[residual_func_idx](y) + residual_func_idx += 1 + + residual = residual + y + + residual = F.relu(residual) + outs.append(residual) + + return outs + + +@manager.BACKBONES.add_component +def HRNet_W18_Small_V1(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[1], + stage1_num_channels=[32], + stage2_num_modules=1, + stage2_num_blocks=[2, 2], + stage2_num_channels=[16, 32], + stage3_num_modules=1, + stage3_num_blocks=[2, 2, 2], + stage3_num_channels=[16, 32, 64], + stage4_num_modules=1, + stage4_num_blocks=[2, 2, 2, 2], + stage4_num_channels=[16, 32, 64, 128], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W18_Small_V2(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[2], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[2, 2], + stage2_num_channels=[18, 36], + stage3_num_modules=3, + stage3_num_blocks=[2, 2, 2], + stage3_num_channels=[18, 36, 72], + stage4_num_modules=2, + stage4_num_blocks=[2, 2, 2, 2], + stage4_num_channels=[18, 36, 72, 144], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W18(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[18, 36], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[18, 36, 72], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[18, 36, 72, 144], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W30(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[30, 60], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[30, 60, 120], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[30, 60, 120, 240], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W32(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[32, 64], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[32, 64, 128], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[32, 64, 128, 256], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W40(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[40, 80], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[40, 80, 160], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[40, 80, 160, 320], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W44(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[44, 88], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[44, 88, 176], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[44, 88, 176, 352], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W48(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[48, 96], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[48, 96, 192], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[48, 96, 192, 384], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W60(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[60, 120], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[60, 120, 240], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[60, 120, 240, 480], + **kwargs) + return model + + +@manager.BACKBONES.add_component +def HRNet_W64(**kwargs): + model = HRNet( + stage1_num_modules=1, + stage1_num_blocks=[4], + stage1_num_channels=[64], + stage2_num_modules=1, + stage2_num_blocks=[4, 4], + stage2_num_channels=[64, 128], + stage3_num_modules=4, + stage3_num_blocks=[4, 4, 4], + stage3_num_channels=[64, 128, 256], + stage4_num_modules=3, + stage4_num_blocks=[4, 4, 4, 4], + stage4_num_channels=[64, 128, 256, 512], + **kwargs) + return model diff --git a/contrib/Matting/model/resnet_vd.py b/contrib/Matting/model/resnet_vd.py index c5124492f1..045bae2087 100644 --- a/contrib/Matting/model/resnet_vd.py +++ b/contrib/Matting/model/resnet_vd.py @@ -194,7 +194,7 @@ class ResNet_vd(nn.Layer): def __init__(self, input_channels=3, layers=50, - output_stride=8, + output_stride=32, multi_grid=(1, 1, 1), pretrained=None): super(ResNet_vd, self).__init__() @@ -223,6 +223,7 @@ def __init__(self, # for channels of four returned stages self.feat_channels = [c * 4 for c in num_filters ] if layers >= 50 else num_filters + self.feat_channels = [64] + self.feat_channels dilation_dict = None if output_stride == 8: @@ -314,15 +315,15 @@ def __init__(self, self.init_weight() def forward(self, inputs): + feat_list = [] y = self.conv1_1(inputs) y = self.conv1_2(y) y = self.conv1_3(y) - self.conv1_logit = y.clone() + feat_list.append(y) + y = self.pool2d_max(y) # A feature list saves the output feature map of each stage. - feat_list = [] - feat_list.append(y) for stage in self.stage_list: for block in stage: y = block(y) From ca685dc24b2207d0d3b776c95c35f2d3f6a412c8 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 19 Oct 2021 10:55:05 +0800 Subject: [PATCH 173/210] first add ziyan --- contrib/Matting/model/__init__.py | 1 + contrib/Matting/model/resnet_vd.py | 2 +- contrib/Matting/model/ziyan.py | 258 +++++++++++++++++++++++++++++ 3 files changed, 260 insertions(+), 1 deletion(-) create mode 100644 contrib/Matting/model/ziyan.py diff --git a/contrib/Matting/model/__init__.py b/contrib/Matting/model/__init__.py index 9e906c1567..c83fe61527 100644 --- a/contrib/Matting/model/__init__.py +++ b/contrib/Matting/model/__init__.py @@ -19,3 +19,4 @@ from .dim import DIM from .loss import MRSD from .modnet import MODNet +from .ziyan import ZiYan diff --git a/contrib/Matting/model/resnet_vd.py b/contrib/Matting/model/resnet_vd.py index 045bae2087..55662bad97 100644 --- a/contrib/Matting/model/resnet_vd.py +++ b/contrib/Matting/model/resnet_vd.py @@ -1,4 +1,4 @@ -# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py new file mode 100644 index 0000000000..f5c383533e --- /dev/null +++ b/contrib/Matting/model/ziyan.py @@ -0,0 +1,258 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +from paddleseg.models import layers +from paddleseg import utils +from paddleseg.cvlibs import manager + +from model import MRSD + + +def conv_up_psp(in_channels, out_channels, up_sample): + return nn.Sequential( + layers.ConvBNReLU(in_channels, out_channels, 3, padding=1), + nn.Upsample( + scale_factor=up_sample, mode='bilinear', align_corners=False)) + + +@manager.MODELS.add_component +class ZiYan(nn.Layer): + def __init__(self, backbone, pretrained=None): + super().__init__() + self.backbone = backbone + self.pretrained = pretrained + + ###################### + ### Decoder part - Glance + ###################### + self.psp_module = layers.PPModule( + 512, + 512, + bin_sizes=(1, 3, 5), + dim_reduction=False, + align_corners=False) + self.psp4 = conv_up_psp(512, 256, 2) + self.psp3 = conv_up_psp(512, 128, 4) + self.psp2 = conv_up_psp(512, 64, 8) + self.psp1 = conv_up_psp(512, 64, 16) + # stage 5g + self.decoder5_g = nn.Sequential( + layers.ConvBNReLU(1024, 512, 3, padding=1), + layers.ConvBNReLU(512, 512, 3, padding=2, dilation=2), + layers.ConvBNReLU(512, 256, 3, padding=2, dilation=2), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 4g + self.decoder4_g = nn.Sequential( + layers.ConvBNReLU(512, 256, 3, padding=1), + layers.ConvBNReLU(256, 256, 3, padding=1), + layers.ConvBNReLU(256, 128, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 3g + self.decoder3_g = nn.Sequential( + layers.ConvBNReLU(256, 128, 3, padding=1), + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 2g + self.decoder2_g = nn.Sequential( + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 1g + self.decoder1_g = nn.Sequential( + layers.ConvBNReLU(128, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 0g + self.decoder0_g = nn.Sequential( + layers.ConvBNReLU(64, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + nn.Conv2D(64, 3, 3, padding=1)) + + ########################## + ### Decoder part - FOCUS + ########################## + self.bridge_block = nn.Sequential( + layers.ConvBNReLU(512, 512, 3, dilation=2, padding=2), + layers.ConvBNReLU(512, 512, 3, dilation=2, padding=2), + layers.ConvBNReLU(512, 512, 3, dilation=2, padding=2)) + # stage 5f + self.decoder5_f = nn.Sequential( + layers.ConvBNReLU(1024, 512, 3, padding=1), + layers.ConvBNReLU(512, 512, 3, padding=2, dilation=2), + layers.ConvBNReLU(512, 256, 3, padding=2, dilation=2), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 4f + self.decoder4_f = nn.Sequential( + layers.ConvBNReLU(512, 256, 3, padding=1), + layers.ConvBNReLU(256, 256, 3, padding=1), + layers.ConvBNReLU(256, 128, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 3f + self.decoder3_f = nn.Sequential( + layers.ConvBNReLU(256, 128, 3, padding=1), + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 2f + self.decoder2_f = nn.Sequential( + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 1f + self.decoder1_f = nn.Sequential( + layers.ConvBNReLU(128, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 0g + self.decoder0_f = nn.Sequential( + layers.ConvBNReLU(64, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + nn.Conv2D(64, 1, 3, padding=1)) + + def forward(self, inputs): + x = inputs['img'] + # input fea_list shape [N, 64, H/2, W/2] [N, 64, H/4, W/4] + # [N, 128, H/8, W/8] [N, 256, H/16, W/16] [N, 512, H/32, W/32] + fea_list = self.backbone(x) + + ########################## + ### Decoder part - GLANCE + ########################## + #psp: N, 512, H/32, W/32 + psp = self.psp_module(fea_list[-1]) + #d6_g: N, 512, H/16, W/16 + d5_g = self.decoder5_g(paddle.concat((psp, fea_list[-1]), 1)) + #d5_g: N, 512, H/8, W/8 + d4_g = self.decoder4_g(paddle.concat((self.psp4(psp), d5_g), 1)) + #d4_g: N, 256, H/4, W/4 + d3_g = self.decoder3_g(paddle.concat((self.psp3(psp), d4_g), 1)) + #d4_g: N, 128, H/2, W/2 + d2_g = self.decoder2_g(paddle.concat((self.psp2(psp), d3_g), 1)) + #d2_g: N, 64, H, W + d1_g = self.decoder1_g(paddle.concat((self.psp1(psp), d2_g), 1)) + #d0_g: N, 3, H, W + d0_g = self.decoder0_g(d1_g) + # The 1st channel is foreground. The 2nd is transition region. The 3rd is background. + # glance_sigmoid = F.sigmoid(d0_g) + glance_sigmoid = F.softmax(d0_g, axis=1) + + ########################## + ### Decoder part - FOCUS + ########################## + bb = self.bridge_block(fea_list[-1]) + #bg: N, 512, H/32, W/32 + d5_f = self.decoder5_f(paddle.concat((bb, fea_list[-1]), 1)) + #d5_f: N, 256, H/16, W/16 + d4_f = self.decoder4_f(paddle.concat((d5_f, fea_list[-2]), 1)) + #d4_f: N, 128, H/8, W/8 + d3_f = self.decoder3_f(paddle.concat((d4_f, fea_list[-3]), 1)) + #d3_f: N, 64, H/4, W/4 + d2_f = self.decoder2_f(paddle.concat((d3_f, fea_list[-4]), 1)) + #d2_f: N, 64, H/2, W/2 + d1_f = self.decoder1_f(paddle.concat((d2_f, fea_list[-5]), 1)) + #d1_f: N, 64, H, W + d0_f = self.decoder0_f(d1_f) + #d0_f: N, 1, H, W + focus_sigmoid = F.sigmoid(d0_f) + + fusion_sigmoid = self.fusion(glance_sigmoid, focus_sigmoid) + + if self.training: + logit_dict = { + 'glance': glance_sigmoid, + 'focus': focus_sigmoid, + 'fusion': fusion_sigmoid + } + return logit_dict + else: + return fusion_sigmoid + + def loss(self, logit_dict, label_dict, loss_func_dict=None): + if loss_func_dict is None: + loss_func_dict = defaultdict(list) + loss_func_dict['glance'].append(nn.NLLLoss()) + loss_func_dict['focus'].append(MRSD()) + loss_func_dict['cm'].append(MRSD()) + loss_func_dict['cm'].append(MRSD()) + + loss = {} + + # glance loss computation + # get glance label + glance_label = label_dict['trimap'] + glance_label_trans = (glance_label == 128).astype('int64') + glance_label_bg = (glance_label == 0).astype('int64') + glance_label = glance_label_trans + glance_label_bg * 2 + loss_glance = loss_func_dict['glance'][0](paddle.log( + logit_dict['glance']), glance_label.squeeze(1)) + loss['glance'] = loss_glance + # TODO glance label 的验证 + + # focus loss computation + loss_focus = loss_func_dict['focus'][0](logit_dict['focus'], + label_dict['alpha'], + label_dict['trimap'] == 128) + loss['focus'] = loss_focus + + # collaborative matting loss + loss_cm_func = loss_func_dict['cm'] + # fusion_sigmoid loss + loss_cm = loss_cm_func[0](logit_dict['fusion'], label_dict['alpha']) + # composion loss + comp_pred = logit_dict['fusion'] * label_dict['fg'] + ( + 1 - logit_dict['fusion']) * label_dict['bg'] + loss_cm = loss_cm + loss_cm_func[1](comp_pred, label_dict['img']) + loss['cm'] = loss_cm + + loss['all'] = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss['cm'] + + return loss + + def fusion(self, glance_sigmoid, focus_sigmoid): + # glance_sigmoid [N, 3, H, W] + # In index, 0 is foreground, 1 is transition, 2 is backbone + # After fusion, the foreground is 1, the background is 0, and the transion is between [0, 1] + index = paddle.argmax(glance_sigmoid, axis=1, keepdim=True) + transition_mask = (index == 1).astype('float32') + fg = (index == 0).astype('float32') + fusion_sigmoid = focus_sigmoid * transition_mask + fg + return fusion_sigmoid + + +if __name__ == '__main__': + from resnet_vd import ResNet34_vd + backbone = ResNet34_vd(output_stride=32) + x = paddle.randint(0, 256, (1, 3, 320, 320)).astype('float32') + inputs = {} + inputs['img'] = x + model = ZiYan(backbone=backbone) + logit_dict = model(inputs) + print(logit_dict) + +# paddle.set_device('cpu') +# glance_sigmoid = paddle.rand((1, 3, 4, 4)) +# focus_sigmoid = paddle.rand((1, 1, 4, 4)) +# print(glance_sigmoid) +# print(focus_sigmoid) +# # fusion(glance_sigmoid, focus_sigmoid) From b983c7b956b879a8cbd2a3ce96f82440b207eda3 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 20 Oct 2021 11:30:54 +0800 Subject: [PATCH 174/210] update model --- contrib/Matting/model/resnet_vd.py | 1 + contrib/Matting/model/ziyan.py | 30 ++++++++++++++++++++++-------- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/contrib/Matting/model/resnet_vd.py b/contrib/Matting/model/resnet_vd.py index 55662bad97..3fbeb328ca 100644 --- a/contrib/Matting/model/resnet_vd.py +++ b/contrib/Matting/model/resnet_vd.py @@ -341,6 +341,7 @@ def ResNet18_vd(**args): return model +@manager.BACKBONES.add_component def ResNet34_vd(**args): model = ResNet_vd(layers=34, **args) return model diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py index f5c383533e..d829c8ffa8 100644 --- a/contrib/Matting/model/ziyan.py +++ b/contrib/Matting/model/ziyan.py @@ -38,11 +38,12 @@ def __init__(self, backbone, pretrained=None): self.backbone = backbone self.pretrained = pretrained + self.backbone_channels = backbone.feat_channels ###################### ### Decoder part - Glance ###################### self.psp_module = layers.PPModule( - 512, + self.backbone_channels[-1], 512, bin_sizes=(1, 3, 5), dim_reduction=False, @@ -53,7 +54,8 @@ def __init__(self, backbone, pretrained=None): self.psp1 = conv_up_psp(512, 64, 16) # stage 5g self.decoder5_g = nn.Sequential( - layers.ConvBNReLU(1024, 512, 3, padding=1), + layers.ConvBNReLU( + 512 + self.backbone_channels[-1], 512, 3, padding=1), layers.ConvBNReLU(512, 512, 3, padding=2, dilation=2), layers.ConvBNReLU(512, 256, 3, padding=2, dilation=2), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) @@ -91,36 +93,42 @@ def __init__(self, backbone, pretrained=None): ### Decoder part - FOCUS ########################## self.bridge_block = nn.Sequential( - layers.ConvBNReLU(512, 512, 3, dilation=2, padding=2), + layers.ConvBNReLU( + self.backbone_channels[-1], 512, 3, dilation=2, padding=2), layers.ConvBNReLU(512, 512, 3, dilation=2, padding=2), layers.ConvBNReLU(512, 512, 3, dilation=2, padding=2)) # stage 5f self.decoder5_f = nn.Sequential( - layers.ConvBNReLU(1024, 512, 3, padding=1), + layers.ConvBNReLU( + 512 + self.backbone_channels[-1], 512, 3, padding=1), layers.ConvBNReLU(512, 512, 3, padding=2, dilation=2), layers.ConvBNReLU(512, 256, 3, padding=2, dilation=2), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) # stage 4f self.decoder4_f = nn.Sequential( - layers.ConvBNReLU(512, 256, 3, padding=1), + layers.ConvBNReLU( + 256 + self.backbone_channels[-2], 256, 3, padding=1), layers.ConvBNReLU(256, 256, 3, padding=1), layers.ConvBNReLU(256, 128, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) # stage 3f self.decoder3_f = nn.Sequential( - layers.ConvBNReLU(256, 128, 3, padding=1), + layers.ConvBNReLU( + 128 + self.backbone_channels[-3], 128, 3, padding=1), layers.ConvBNReLU(128, 128, 3, padding=1), layers.ConvBNReLU(128, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) # stage 2f self.decoder2_f = nn.Sequential( - layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU( + 64 + self.backbone_channels[-4], 128, 3, padding=1), layers.ConvBNReLU(128, 128, 3, padding=1), layers.ConvBNReLU(128, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) # stage 1f self.decoder1_f = nn.Sequential( - layers.ConvBNReLU(128, 64, 3, padding=1), + layers.ConvBNReLU( + 64 + self.backbone_channels[-5], 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) @@ -130,6 +138,8 @@ def __init__(self, backbone, pretrained=None): layers.ConvBNReLU(64, 64, 3, padding=1), nn.Conv2D(64, 1, 3, padding=1)) + self.init_weight() + def forward(self, inputs): x = inputs['img'] # input fea_list shape [N, 64, H/2, W/2] [N, 64, H/4, W/4] @@ -239,6 +249,10 @@ def fusion(self, glance_sigmoid, focus_sigmoid): fusion_sigmoid = focus_sigmoid * transition_mask + fg return fusion_sigmoid + def init_weight(self): + if self.pretrained is not None: + utils.load_entire_model(self, self.pretrained) + if __name__ == '__main__': from resnet_vd import ResNet34_vd From 3bd58509deafe9f1c9d1b9beb0766e6ae530640d Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 27 Oct 2021 17:02:24 +0800 Subject: [PATCH 175/210] add RandomResize transform --- contrib/Matting/transforms.py | 56 +++++++++++++++++++++++++++++++++-- 1 file changed, 53 insertions(+), 3 deletions(-) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index 641c13768e..fa49f762e3 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -105,6 +105,56 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component +class RandomResize: + """ + Resize image to a size determinned by `scale` and `size`. + + Args: + size(tuple|list): The reference size to resize. A tuple or list with length 2. + scale(tupel|list, optional): A range of scale base on `size`. A tuple or list with length 2. Default: None. + """ + + def __init__(self, size, scale=None): + if isinstance(size, list) or isinstance(size, tuple): + if len(size) != 2: + raise ValueError( + '`size` should include 2 elements, but it is {}'.format( + size)) + else: + raise TypeError( + "Type of `size` is invalid. It should be list or tuple, but it is {}" + .format(type(size))) + + if scale is not None: + if isinstance(scale, list) or isinstance(scale, tuple): + if len(scale) != 2: + raise ValueError( + '`scale` should include 2 elements, but it is {}'. + format(scale)) + else: + raise TypeError( + "Type of `scale` is invalid. It should be list or tuple, but it is {}" + .format(type(scale))) + self.size = size + self.scale = scale + + def __call__(self, data): + h, w = data['img'].shape[:2] + scale = np.random.uniform(self.scale[0], self.scale[1]) + scale_factor = max(self.size[0] / w, self.size[1] / h) + print(scale) + print(scale_factor) + scale = scale * scale_factor + + w = int(round(w * scale)) + h = int(round(h * scale)) + data['img'] = functional.resize(data['img'], (w, h)) + for key in data.get('gt_fields', []): + data[key] = functional.resize(data[key], (w, h)) + return data + + @manager.TRANSFORMS.add_component class ResizeByLong: """ @@ -502,11 +552,11 @@ def __call__(self, data): if __name__ == "__main__": - transforms = [RandomDistort()] + transforms = [RandomResize(size=(512, 512), scale=(0.5, 1.0))] transforms = Compose(transforms) - fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' + fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' alpha_path = fg_path.replace('fg', 'alpha') - bg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/matting/data/matting/human_matting/bg/unsplash_bg/attic/photo-1443884590026-2e4d21aee71c?crop=entropy&cs=tinysrgb&fit=max&fm=jpg&ixid=MnwxMjA3fDB8MXxzZWFyY2h8Nzh8fGF0dGljfGVufDB8fHx8MTYyOTY4MDcxNQ&ixlib=rb-1.2.1&q=80&w=400.jpg' + bg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/bg/unsplash_bg/attic/photo-1443884590026-2e4d21aee71c?crop=entropy&cs=tinysrgb&fit=max&fm=jpg&ixid=MnwxMjA3fDB8MXxzZWFyY2h8Nzh8fGF0dGljfGVufDB8fHx8MTYyOTY4MDcxNQ&ixlib=rb-1.2.1&q=80&w=400.jpg' data = {} data['fg'] = cv2.imread(fg_path) data['bg'] = cv2.imread(bg_path) From bec24f12ed08e688b60fdf21449a26d85178ce63 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 27 Oct 2021 17:45:56 +0800 Subject: [PATCH 176/210] add Padding transform --- contrib/Matting/transforms.py | 72 +++++++++++++++++++++++++++++++++-- 1 file changed, 69 insertions(+), 3 deletions(-) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index fa49f762e3..a9ecdc281f 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -143,8 +143,6 @@ def __call__(self, data): h, w = data['img'].shape[:2] scale = np.random.uniform(self.scale[0], self.scale[1]) scale_factor = max(self.size[0] / w, self.size[1] / h) - print(scale) - print(scale_factor) scale = scale * scale_factor w = int(round(w * scale)) @@ -551,8 +549,74 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component +class Padding: + """ + Add bottom-right padding to a raw image or annotation image. + + Args: + target_size (list|tuple): The target size after padding. + im_padding_value (list, optional): The padding value of raw image. + Default: [127.5, 127.5, 127.5]. + label_padding_value (int, optional): The padding value of annotation image. Default: 255. + + Raises: + TypeError: When target_size is neither list nor tuple. + ValueError: When the length of target_size is not 2. + """ + + def __init__(self, target_size, im_padding_value=(127.5, 127.5, 127.5)): + if isinstance(target_size, list) or isinstance(target_size, tuple): + if len(target_size) != 2: + raise ValueError( + '`target_size` should include 2 elements, but it is {}'. + format(target_size)) + else: + raise TypeError( + "Type of target_size is invalid. It should be list or tuple, now is {}" + .format(type(target_size))) + + self.target_size = target_size + self.im_padding_value = im_padding_value + + def __call__(self, data): + im_height, im_width = data['img'].shape[0], data['img'].shape[1] + target_height = self.target_size[1] + target_width = self.target_size[0] + pad_height = target_height - im_height + pad_width = target_width - im_width + if pad_height < 0 or pad_width < 0: + raise ValueError( + 'The size of image should be less than `target_size`, but the size of image ({}, {}) is larger than `target_size` ({}, {})' + .format(im_width, im_height, target_width, target_height)) + else: + data['img'] = cv2.copyMakeBorder( + data['img'], + 0, + pad_height, + 0, + pad_width, + cv2.BORDER_CONSTANT, + value=self.im_padding_value) + for key in data.get('gt_fields', []): + if key in ['trimap', 'alpha']: + value = 0 + else: + value = self.im_padding_value + data[key] = cv2.copyMakeBorder( + data[key], + 0, + pad_height, + 0, + pad_width, + cv2.BORDER_CONSTANT, + value=self.im_padding_value) + + return data + + if __name__ == "__main__": - transforms = [RandomResize(size=(512, 512), scale=(0.5, 1.0))] + transforms = [Padding(target_size=(1200, 1200))] transforms = Compose(transforms) fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' alpha_path = fg_path.replace('fg', 'alpha') @@ -575,4 +639,6 @@ def __call__(self, data): # pdb.set_trace() data = transforms(data) print(data['img'].dtype, data['img'].shape) + for key in data['gt_fields']: + print(data[key].shape) cv2.imwrite('distort_img.jpg', data['img'].transpose([1, 2, 0])) From 6e8c61bc622f29f44e70124052ebbc3d11327b1d Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Thu, 28 Oct 2021 17:05:22 +0800 Subject: [PATCH 177/210] add refinement module --- contrib/Matting/model/__init__.py | 2 +- contrib/Matting/model/ziyan.py | 483 +++++++++++++++++++++++++++++- 2 files changed, 472 insertions(+), 13 deletions(-) diff --git a/contrib/Matting/model/__init__.py b/contrib/Matting/model/__init__.py index c83fe61527..55f2016f77 100644 --- a/contrib/Matting/model/__init__.py +++ b/contrib/Matting/model/__init__.py @@ -19,4 +19,4 @@ from .dim import DIM from .loss import MRSD from .modnet import MODNet -from .ziyan import ZiYan +from .ziyan import ZiYan, ZiYanRefine diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py index d829c8ffa8..9533cfbcc4 100644 --- a/contrib/Matting/model/ziyan.py +++ b/contrib/Matting/model/ziyan.py @@ -13,10 +13,12 @@ # limitations under the License. from collections import defaultdict +import time import paddle import paddle.nn as nn import paddle.nn.functional as F +import paddleseg from paddleseg.models import layers from paddleseg import utils from paddleseg.cvlibs import manager @@ -132,7 +134,7 @@ def __init__(self, backbone, pretrained=None): layers.ConvBNReLU(64, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) - # stage 0g + # stage 0f self.decoder0_f = nn.Sequential( layers.ConvBNReLU(64, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), @@ -254,19 +256,476 @@ def init_weight(self): utils.load_entire_model(self, self.pretrained) +@manager.MODELS.add_component +class ZiYanRefine(ZiYan): + def __init__(self, + backbone, + pretrained=None, + backbone_scale=0.25, + refine_mode='sampling', + refine_sample_pixels=80000, + refine_threshold=0.1, + refine_kernel_size=3, + refine_prevent_oversampling=True, + if_refine=True): + if if_refine: + if backbone_scale > 0.5: + raise ValueError( + 'Backbone_scale should not be greater than 1/2, but it is {}' + .format(backbone_scale)) + else: + backbone_scale = 1 + super().__init__(backbone, pretrained) + + self.backbone_scale = backbone_scale + self.if_refine = if_refine + self.refiner = Refiner( + mode=refine_mode, + sample_pixels=refine_sample_pixels, + threshold=refine_threshold, + kernel_size=refine_kernel_size, + prevent_oversampling=refine_prevent_oversampling) + # stage 0f recontain + self.decoder0_f = nn.Sequential( + layers.ConvBNReLU(64, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + nn.Conv2D(64, 1 + 1 + 32, 3, padding=1)) + + def forward(self, data): + src = data['img'] + src_h, src_w = src.shape[2:] + if self.if_refine: + if (src_h % 4 != 0) or (src_w % 4) != 0: + raise ValueError( + 'The input image must have width and height that are divisible by 4' + ) + + # Downsample src for backbone + src_sm = F.interpolate( + src, + scale_factor=self.backbone_scale, + mode='bilinear', + align_corners=False) + + # Base + fea_list = self.backbone(src_sm) + ########################## + ### Decoder part - GLANCE + ########################## + #psp: N, 512, H/32, W/32 + psp = self.psp_module(fea_list[-1]) + #d6_g: N, 512, H/16, W/16 + d5_g = self.decoder5_g(paddle.concat((psp, fea_list[-1]), 1)) + #d5_g: N, 512, H/8, W/8 + d4_g = self.decoder4_g(paddle.concat((self.psp4(psp), d5_g), 1)) + #d4_g: N, 256, H/4, W/4 + d3_g = self.decoder3_g(paddle.concat((self.psp3(psp), d4_g), 1)) + #d4_g: N, 128, H/2, W/2 + d2_g = self.decoder2_g(paddle.concat((self.psp2(psp), d3_g), 1)) + #d2_g: N, 64, H, W + d1_g = self.decoder1_g(paddle.concat((self.psp1(psp), d2_g), 1)) + #d0_g: N, 3, H, W + d0_g = self.decoder0_g(d1_g) + # The 1st channel is foreground. The 2nd is transition region. The 3rd is background. + # glance_sigmoid = F.sigmoid(d0_g) + glance_sigmoid = F.softmax(d0_g, axis=1) + + ########################## + ### Decoder part - FOCUS + ########################## + bb = self.bridge_block(fea_list[-1]) + #bg: N, 512, H/32, W/32 + d5_f = self.decoder5_f(paddle.concat((bb, fea_list[-1]), 1)) + #d5_f: N, 256, H/16, W/16 + d4_f = self.decoder4_f(paddle.concat((d5_f, fea_list[-2]), 1)) + #d4_f: N, 128, H/8, W/8 + d3_f = self.decoder3_f(paddle.concat((d4_f, fea_list[-3]), 1)) + #d3_f: N, 64, H/4, W/4 + d2_f = self.decoder2_f(paddle.concat((d3_f, fea_list[-4]), 1)) + #d2_f: N, 64, H/2, W/2 + d1_f = self.decoder1_f(paddle.concat((d2_f, fea_list[-5]), 1)) + #d1_f: N, 64, H, W + d0_f = self.decoder0_f(d1_f) + #d0_f: N, 1, H, W + focus_sigmoid = F.sigmoid(d0_f[:, 0:1, :, :]) + pha_sm = self.fusion(glance_sigmoid, focus_sigmoid) + err_sm = d0_f[:, 1:2, :, :] + hid_sm = F.relu(d0_f[:, 2:, :, :]) + + # Refiner + if self.if_refine: + pha, ref_sm = self.refiner( + src=src, pha=pha_sm, err=err_sm, hid=hid_sm, tri=glance_sigmoid) + # Clamp outputs + pha = paddle.clip(pha, 0., 1.) + + if self.training: + logit_dict = { + 'glance': glance_sigmoid, + 'focus': focus_sigmoid, + 'fusion': pha_sm, + 'error': err_sm + } + if self.if_refine: + logit_dict['refine'] = pha + return logit_dict + else: + return pha if self.if_refine else pha_sm + + def loss(self, logit_dict, label_dict, loss_func_dict=None): + if loss_func_dict is None: + loss_func_dict = defaultdict(list) + loss_func_dict['glance'].append(nn.NLLLoss()) + loss_func_dict['focus'].append(MRSD()) + loss_func_dict['cm'].append(MRSD()) + loss_func_dict['err'].append(paddleseg.models.MSELoss()) + loss_func_dict['refine'].append(paddleseg.models.L1Loss()) + + loss = {} + + # glance loss computation + # get glance label + glance_label = F.interpolate( + label_dict['trimap'], + logit_dict['glance'].shape[2:], + mode='nearest', + align_corners=False) + glance_label_trans = (glance_label == 128).astype('int64') + glance_label_bg = (glance_label == 0).astype('int64') + glance_label = glance_label_trans + glance_label_bg * 2 + loss_glance = loss_func_dict['glance'][0](paddle.log( + logit_dict['glance']), glance_label.squeeze(1)) + loss['glance'] = loss_glance + # TODO glance label 的验证 + + # focus loss computation + focus_label = F.interpolate( + label_dict['alpha'], + logit_dict['focus'].shape[2:], + mode='bilinear', + align_corners=False) + loss_focus = loss_func_dict['focus'][0](logit_dict['focus'], + focus_label, glance_label_trans) + loss['focus'] = loss_focus + + # collaborative matting loss + loss_cm_func = loss_func_dict['cm'] + # fusion_sigmoid loss + loss_cm = loss_cm_func[0](logit_dict['fusion'], focus_label) + loss['cm'] = loss_cm + + # error loss + err = F.interpolate( + logit_dict['error'], + label_dict['alpha'].shape[2:], + mode='bilinear', + align_corners=False) + err_label = (F.interpolate( + logit_dict['fusion'], + label_dict['alpha'].shape[2:], + mode='bilinear', + align_corners=False) - label_dict['alpha']).abs() + loss_err = loss_func_dict['err'][0](err, err_label) + loss['err'] = loss_err + + loss_all = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss_cm + loss_err + + # refine loss + if self.if_refine: + loss_refine = loss_func_dict['refine'][0](logit_dict['alpha'], + label_dict['alpha']) + loss['refine'] = loss_refine + loss_all = loss_all + loss_refine + + loss['all'] = loss_all + return loss + + +class Refiner(nn.Layer): + ''' + Refiner refines the coarse output to full resolution. + + Args: + mode: area selection mode. Options: + "full" - No area selection, refine everywhere using regular Conv2d. + "sampling" - Refine fixed amount of pixels ranked by the top most errors. + "thresholding" - Refine varying amount of pixels that have greater error than the threshold. + sample_pixels: number of pixels to refine. Only used when mode == "sampling". + threshold: error threshold ranged from 0 ~ 1. Refine where err > threshold. Only used when mode == "thresholding". + kernel_size: The convolution kernel_size. Options: [1, 3]. Default: 3. + prevent_oversampling: True for regular cases, False for speedtest.Default: True. + ''' + + def __init__(self, + mode, + sample_pixels, + threshold, + kernel_size=3, + prevent_oversampling=True): + super().__init__() + if mode not in ['full', 'sampling', 'thresholding']: + raise ValueError( + "mode must be in ['full', 'sampling', 'thresholding']") + if kernel_size not in [1, 3]: + raise ValueError("kernel_size must be in [1, 3]") + + self.mode = mode + self.sample_pixels = sample_pixels + self.threshold = threshold + self.kernel_size = kernel_size + self.prevent_oversampling = prevent_oversampling + + channels = [32, 24, 16, 12, 1] + self.conv1 = layers.ConvBNReLU( + channels[0] + 4 + 3, + channels[1], + kernel_size, + padding=0, + bias_attr=False) + self.conv2 = layers.ConvBNReLU( + channels[1], channels[2], kernel_size, padding=0, bias_attr=False) + self.conv3 = layers.ConvBNReLU( + channels[2] + 3, + channels[3], + kernel_size, + padding=0, + bias_attr=False) + self.conv4 = nn.Conv2D( + channels[3], channels[4], kernel_size, padding=0, bias_attr=True) + + def forward(self, src, pha, err, hid, tri): + ''' + Args: + src: (B, 3, H, W) full resolution source image. + pha: (B, 1, Hc, Wc) coarse alpha prediction. + err: (B, 1, Hc, Hc) coarse error prediction. + hid: (B, 32, Hc, Hc) coarse hidden encoding. + tri: (B, 1, Hc, Hc) trimap prediction. + ''' + h_full, w_full = src.shape[2:] + h_half, w_half = h_full // 2, w_full // 2 + h_quat, w_quat = h_full // 4, w_full // 4 + + if self.mode != 'full': + err = F.interpolate( + err, (h_quat, w_quat), mode='bilinear', align_corners=False) + ref = self.select_refinement_regions(err) + idx = paddle.nonzero(ref.squeeze(1)) + idx = idx[:, 0], idx[:, 1], idx[:, 2] + + if idx[0].shape[0] > 0: + x = paddle.concat([hid, pha, tri], axis=1) + x = F.interpolate( + x, (h_half, w_half), mode='bilinear', align_corners=False) + start = time.time() + x = self.crop_patch(x, idx, 2, + 3 if self.kernel_size == 3 else 0) + print('first crop_patch time:', time.time() - start) + + y = F.interpolate( + src, (h_half, w_half), mode='bilinear', align_corners=False) + start = time.time() + y = self.crop_patch(y, idx, 2, + 3 if self.kernel_size == 3 else 0) + print('second crop_patch time:', time.time() - start) + + x = self.conv1(paddle.concat([x, y], axis=1)) + x = self.conv2(x) + + x = F.interpolate( + x, (8, 8) if self.kernel_size == 3 else (4, 4), + mode='nearest') + start = time.time() + y = self.crop_patch(src, idx, 4, + 2 if self.kernel_size == 3 else 0) + print('third crop_patch time:', time.time() - start) + + x = self.conv3(paddle.concat([x, y], axis=1)) + x = self.conv4(x) + + pha = F.interpolate( + pha, (h_full, w_full), mode='bilinear', align_corners=False) + start = time.time() + pha = self.replace_patch(pha, x, idx) + print('replace_patch:', time.time() - start) + else: + pha = F.interpolate( + pha, (h_full, w_full), mode='bilinear', align_corners=False) + + else: + x = paddle.concat([hid, pha, tri], axis=1) + x = F.interpolate( + x, (h_half, w_half), mode='bilinear', align_corners=False) + y = F.interpolate( + src, (h_half, w_half), mode='bilinear', align_corners=False) + + if self.kernel_size == 3: + x = F.pad(x, [3, 3, 3, 3]) + y = F.pad(y, [3, 3, 3, 3]) + + x = self.conv1(paddle.concat([x, y], axis=1)) + x = self.conv2(x) + + if self.kernel_size == 3: + x = F.interpolate(x, (h_full + 4, w_full + 4)) + y = F.pad(src, [2, 2, 2, 2]) + else: + x = F.interpolate(x, (h_full, w_full), mode='nearest') + y = src + + x = self.conv3(paddle.concat([x, y], axis=1)) + x = self.conv4(x) + + pha = x + ref = paddle.ones((src.shape[0], 1, h_quat, w_quat)) + return pha, ref + + def select_refinement_regions(self, err): + ''' + select refinement regions. + + Args: + err: error map (B, 1, H, W). + + Returns: + Teosor: refinement regions (B, 1, H, W). 1 is selected, 0 is not. + ''' + err.stop_gradient = True + if self.mode == 'sampling': + b, _, h, w = err.shape + err = paddle.reshape(err, (b, -1)) + _, idx = err.topk(self.sample_pixels // 16, axis=1, sorted=False) + ref = paddle.zeros_like(err) + update = paddle.ones_like(idx, dtype='float32') + for i in range(b): + ref[i] = paddle.scatter(ref[i], idx[i], update[i]) + if self.prevent_oversampling: + ref = ref * ((err > 0).astype('float32')) + ref = ref.reshape((b, 1, h, w)) + else: + ref = (err > self.threshold).astype('float32') + return ref + + def crop_patch(self, x, idx, size, padding): + """ + Crops selected patches from image given indices. + + Inputs: + x: image (B, C, H, W). + idx: selection indices Tuple[(P,), (P,), (P,),], where the 3 values are (B, H, W) index. + size: center size of the patch, also stride of the crop. + padding: expansion size of the patch. + Output: + patch: (P, C, h, w), where h = w = size + 2 * padding. + """ + b, c, h, w = x.shape + # if padding != 0: + # x = F.pad(x, (padding,) * 4) + kernel_size = size + 2 * padding + x = F.unfold( + x, kernel_sizes=kernel_size, strides=size, paddings=padding) + hout = int((h + 2 * padding - kernel_size) / size + 1) + wout = int((w + 2 * padding - kernel_size) / size + 1) + x = x.reshape((b, c, kernel_size, kernel_size, hout, wout)) + x = x.transpose((0, 4, 5, 1, 2, 3)) + patchs = [] + start = time.time() + for i, j, k in zip(idx[0], idx[1], idx[2]): + patchs.append(x[i, j, k]) + print('crop for loop: ', time.time() - start) + return paddle.to_tensor(patchs) + + def replace_patch(self, x, y, idx): + ''' + Replaces patches back into image given index. + + Args: + x: image (B, C, H, W) + y: patches (P, C, h, w) + idx: selection indices Tuple[(P,), (P,), (P,)] where the 3 values are (B, H, W) index. + + Returns: + Tensor: (B, C, H, W), where patches at idx locations are replaced with y. + ''' + bx, cx, hx, wx = x.shape + by, cy, hy, wy = y.shape + + x = x.reshape((bx, cx, hx // hy, hy, wx // wy, wy)) + x = x.transpose((0, 2, 4, 1, 3, 5)) + start = time.time() + for numy, (i, j, k) in enumerate(zip(idx[0], idx[1], idx[2])): + x[i, j, k] = y[numy] + print('replace for loop: ', time.time() - start) + x = x.transpose((0, 3, 1, 4, 2, 5)) + x = x.reshape((bx, cx, hx, wx)) + return x + + if __name__ == '__main__': + # paddle.set_device('cpu') + import time from resnet_vd import ResNet34_vd backbone = ResNet34_vd(output_stride=32) - x = paddle.randint(0, 256, (1, 3, 320, 320)).astype('float32') + x = paddle.randint(0, 256, (2, 3, 1024, 1024)).astype('float32') inputs = {} inputs['img'] = x - model = ZiYan(backbone=backbone) - logit_dict = model(inputs) - print(logit_dict) - -# paddle.set_device('cpu') -# glance_sigmoid = paddle.rand((1, 3, 4, 4)) -# focus_sigmoid = paddle.rand((1, 1, 4, 4)) -# print(glance_sigmoid) -# print(focus_sigmoid) -# # fusion(glance_sigmoid, focus_sigmoid) + + model = ZiYanRefine( + backbone=backbone, + pretrained=None, + backbone_scale=0.25, + refine_mode='sampling', + refine_sample_pixels=5000, + refine_threshold=0.1, + refine_kernel_size=3, + refine_prevent_oversampling=True, + if_refine=False) + start = time.time() + output = model(inputs) + print('model infer time: ', time.time() - start) + for k, v in output.items(): + print(k) + print(v) + +# refiner = Refiner(mode='thresholding', +# sample_pixels=80, +# threshold=0.1, +# kernel_size=3, +# prevent_oversampling=True) +# # check select_refinement_regions, succeed +# err = paddle.rand((2, 1, 5, 5)) +# ref = refiner.select_refinement_regions(err) +# print(err) +# print(ref) + +# check crop_patch, succeed +# x = paddle.rand((1, 1, 12, 12)) +# idx = ((0, 0, 0), (0 ,1, 2), (0, 1, 2)) +# size = 4 +# padding= 2 +# p = refiner.crop_patch(x, idx, size, padding) + +# check replace_patch, succeed +# p = p+1 +# p = p[:, :, 2:6, 2:6] +# refinement = refiner.replace_patch(x, p, idx) +# print(refinement) + +# # check refine, succeed +# src = paddle.rand((2, 3, 16, 16)) +# pha = paddle.rand((2, 1, 4, 4)) +# err = paddle.rand((2, 1, 4, 4)) +# hid = paddle.rand((2, 32, 4, 4)) +# tri = paddle.rand((2, 3, 4, 4)) + +# pha_ref, ref = refiner(src, pha, err, hid, tri) +# print('err') +# print(err[1]) +# print('ref') +# print(ref[1]) +# print('pha') +# pha = F.interpolate(pha, (16, 16), mode='bilinear', align_corners=False) +# print(pha[1,0,:,:]) +# print('pha_ref') +# print(pha_ref[1,0,:,:]) +# print(pha_ref.shape, ref.shape) From bb50840db87d6d3d183c7f9e578784bf30f47578 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Fri, 29 Oct 2021 17:59:52 +0800 Subject: [PATCH 178/210] fix bug --- contrib/Matting/model/ziyan.py | 20 ++++++++++++-------- contrib/Matting/transforms.py | 4 ++-- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py index 9533cfbcc4..35b9bd51de 100644 --- a/contrib/Matting/model/ziyan.py +++ b/contrib/Matting/model/ziyan.py @@ -275,21 +275,25 @@ def __init__(self, .format(backbone_scale)) else: backbone_scale = 1 - super().__init__(backbone, pretrained) + super().__init__(backbone) self.backbone_scale = backbone_scale + self.pretrained = pretrained self.if_refine = if_refine - self.refiner = Refiner( - mode=refine_mode, - sample_pixels=refine_sample_pixels, - threshold=refine_threshold, - kernel_size=refine_kernel_size, - prevent_oversampling=refine_prevent_oversampling) + if if_refine: + self.refiner = Refiner( + mode=refine_mode, + sample_pixels=refine_sample_pixels, + threshold=refine_threshold, + kernel_size=refine_kernel_size, + prevent_oversampling=refine_prevent_oversampling) + # stage 0f recontain self.decoder0_f = nn.Sequential( layers.ConvBNReLU(64, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), nn.Conv2D(64, 1 + 1 + 32, 3, padding=1)) + self.init_weight() def forward(self, data): src = data['img'] @@ -432,7 +436,7 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): # refine loss if self.if_refine: - loss_refine = loss_func_dict['refine'][0](logit_dict['alpha'], + loss_refine = loss_func_dict['refine'][0](logit_dict['refine'], label_dict['alpha']) loss['refine'] = loss_refine loss_all = loss_all + loss_refine diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index a9ecdc281f..136b7c90a8 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -206,8 +206,8 @@ def __call__(self, data): data['trans_info'].append(('resize', data['img'].shape[0:2])) h, w = data['img'].shape[0:2] - rw = w - w % 32 - rh = h - h % 32 + rw = w - w % self.mult_int + rh = h - h % self.mult_int data['img'] = functional.resize(data['img'], (rw, rh)) for key in data.get('gt_fields', []): data[key] = functional.resize(data[key], (rw, rh)) From 2592e11bf66d274593081bc6b4c3559dd8a4cfea Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 1 Nov 2021 11:03:51 +0800 Subject: [PATCH 179/210] update glance loss --- contrib/Matting/model/ziyan.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py index 35b9bd51de..af5399726c 100644 --- a/contrib/Matting/model/ziyan.py +++ b/contrib/Matting/model/ziyan.py @@ -216,8 +216,8 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): glance_label_trans = (glance_label == 128).astype('int64') glance_label_bg = (glance_label == 0).astype('int64') glance_label = glance_label_trans + glance_label_bg * 2 - loss_glance = loss_func_dict['glance'][0](paddle.log( - logit_dict['glance']), glance_label.squeeze(1)) + loss_glance = loss_func_dict['glance'][0]( + paddle.log(logit_dict['glance'] + 1e-6), glance_label.squeeze(1)) loss['glance'] = loss_glance # TODO glance label 的验证 From 48be067772ecec9377d4524c18cc35bb59b155e2 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 1 Nov 2021 17:11:57 +0800 Subject: [PATCH 180/210] optimize crop_patch and replace_patch --- contrib/Matting/model/ziyan.py | 90 ++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 42 deletions(-) diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py index af5399726c..8e2f07372a 100644 --- a/contrib/Matting/model/ziyan.py +++ b/contrib/Matting/model/ziyan.py @@ -397,8 +397,8 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): glance_label_trans = (glance_label == 128).astype('int64') glance_label_bg = (glance_label == 0).astype('int64') glance_label = glance_label_trans + glance_label_bg * 2 - loss_glance = loss_func_dict['glance'][0](paddle.log( - logit_dict['glance']), glance_label.squeeze(1)) + loss_glance = loss_func_dict['glance'][0]( + paddle.log(logit_dict['glance'] + 1e-6), glance_label.squeeze(1)) loss['glance'] = loss_glance # TODO glance label 的验证 @@ -515,23 +515,18 @@ def forward(self, src, pha, err, hid, tri): err, (h_quat, w_quat), mode='bilinear', align_corners=False) ref = self.select_refinement_regions(err) idx = paddle.nonzero(ref.squeeze(1)) - idx = idx[:, 0], idx[:, 1], idx[:, 2] - if idx[0].shape[0] > 0: + if idx.shape[0] > 0: x = paddle.concat([hid, pha, tri], axis=1) x = F.interpolate( x, (h_half, w_half), mode='bilinear', align_corners=False) - start = time.time() x = self.crop_patch(x, idx, 2, 3 if self.kernel_size == 3 else 0) - print('first crop_patch time:', time.time() - start) y = F.interpolate( src, (h_half, w_half), mode='bilinear', align_corners=False) - start = time.time() y = self.crop_patch(y, idx, 2, 3 if self.kernel_size == 3 else 0) - print('second crop_patch time:', time.time() - start) x = self.conv1(paddle.concat([x, y], axis=1)) x = self.conv2(x) @@ -539,19 +534,15 @@ def forward(self, src, pha, err, hid, tri): x = F.interpolate( x, (8, 8) if self.kernel_size == 3 else (4, 4), mode='nearest') - start = time.time() y = self.crop_patch(src, idx, 4, 2 if self.kernel_size == 3 else 0) - print('third crop_patch time:', time.time() - start) x = self.conv3(paddle.concat([x, y], axis=1)) x = self.conv4(x) pha = F.interpolate( pha, (h_full, w_full), mode='bilinear', align_corners=False) - start = time.time() pha = self.replace_patch(pha, x, idx) - print('replace_patch:', time.time() - start) else: pha = F.interpolate( pha, (h_full, w_full), mode='bilinear', align_corners=False) @@ -616,7 +607,7 @@ def crop_patch(self, x, idx, size, padding): Inputs: x: image (B, C, H, W). - idx: selection indices Tuple[(P,), (P,), (P,),], where the 3 values are (B, H, W) index. + idx: selection indices shape is (p, 3), where the 3 values are (B, H, W) index. size: center size of the patch, also stride of the crop. padding: expansion size of the patch. Output: @@ -633,11 +624,8 @@ def crop_patch(self, x, idx, size, padding): x = x.reshape((b, c, kernel_size, kernel_size, hout, wout)) x = x.transpose((0, 4, 5, 1, 2, 3)) patchs = [] - start = time.time() - for i, j, k in zip(idx[0], idx[1], idx[2]): - patchs.append(x[i, j, k]) - print('crop for loop: ', time.time() - start) - return paddle.to_tensor(patchs) + patchs = paddle.gather_nd(x, idx) + return patchs def replace_patch(self, x, y, idx): ''' @@ -646,7 +634,7 @@ def replace_patch(self, x, y, idx): Args: x: image (B, C, H, W) y: patches (P, C, h, w) - idx: selection indices Tuple[(P,), (P,), (P,)] where the 3 values are (B, H, W) index. + idx: selection indices shape is (p, 3), where the 3 values are (B, H, W) index. Returns: Tensor: (B, C, H, W), where patches at idx locations are replaced with y. @@ -656,10 +644,11 @@ def replace_patch(self, x, y, idx): x = x.reshape((bx, cx, hx // hy, hy, wx // wy, wy)) x = x.transpose((0, 2, 4, 1, 3, 5)) - start = time.time() - for numy, (i, j, k) in enumerate(zip(idx[0], idx[1], idx[2])): - x[i, j, k] = y[numy] - print('replace for loop: ', time.time() - start) + ones = paddle.ones((idx.shape[0], cx, hy, wy)) + flag = paddle.scatter_nd( + idx, ones, shape=x.shape) # Get the index which should be replace + x = x * (1 - flag) + x = paddle.scatter_nd_add(x, idx, y) x = x.transpose((0, 3, 1, 4, 2, 5)) x = x.reshape((bx, cx, hx, wx)) return x @@ -670,7 +659,7 @@ def replace_patch(self, x, y, idx): import time from resnet_vd import ResNet34_vd backbone = ResNet34_vd(output_stride=32) - x = paddle.randint(0, 256, (2, 3, 1024, 1024)).astype('float32') + x = paddle.randint(0, 256, (1, 3, 2048, 2048)).astype('float32') inputs = {} inputs['img'] = x @@ -679,40 +668,57 @@ def replace_patch(self, x, y, idx): pretrained=None, backbone_scale=0.25, refine_mode='sampling', - refine_sample_pixels=5000, + refine_sample_pixels=80000, refine_threshold=0.1, refine_kernel_size=3, refine_prevent_oversampling=True, - if_refine=False) + if_refine=True) + model.eval() start = time.time() - output = model(inputs) - print('model infer time: ', time.time() - start) - for k, v in output.items(): - print(k) - print(v) - -# refiner = Refiner(mode='thresholding', -# sample_pixels=80, + times = 0 + for i in range(10): + # x = paddle.randint(0, 256, (1, 3, 2048, 2048)).astype('float32') + # inputs = {} + # inputs['img'] = x + time_refien = model(inputs) + times += time_refien + print('model infer time: ', (time.time() - start) / 10) + print('model refine time: ', times / 10) +# for k, v in output.items(): +# print(k) +# print(v) + +# refiner = Refiner(mode='sampling', +# sample_pixels=5000, # threshold=0.1, # kernel_size=3, # prevent_oversampling=True) -# # check select_refinement_regions, succeed -# err = paddle.rand((2, 1, 5, 5)) -# ref = refiner.select_refinement_regions(err) +# check select_refinement_regions, succeed +# err = paddle.rand((2, 1, 512, 512)) +# start = time.time() +# ref = refiner.select_refinement_regions_(err) +# print('old time comsumn: ',time.time() - start) +# print('old err') # print(err) +# print('old ref') # print(ref) # check crop_patch, succeed -# x = paddle.rand((1, 1, 12, 12)) -# idx = ((0, 0, 0), (0 ,1, 2), (0, 1, 2)) -# size = 4 -# padding= 2 +# x = paddle.rand((2, 3, 256, 256)) +# err = paddle.rand((2, 1, 128, 128)) +# ref = refiner.select_refinement_regions(err) +# idx = paddle.nonzero(ref.squeeze(1)) +# idx = idx[:, 0], idx[:, 1], idx[:, 2] +# size = 2 +# padding= 3 # p = refiner.crop_patch(x, idx, size, padding) # check replace_patch, succeed # p = p+1 -# p = p[:, :, 2:6, 2:6] +# p = p[:, :, 3:5, 3:5] +# start = time.time() # refinement = refiner.replace_patch(x, p, idx) +# print('replace_patch time:', time.time() - start) # print(refinement) # # check refine, succeed From 6709be6182de9ef4d36de359ba3b786a66255cf7 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 1 Nov 2021 20:56:18 +0800 Subject: [PATCH 181/210] update ziyan.py --- contrib/Matting/model/ziyan.py | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py index 8e2f07372a..ef6a2d8a1e 100644 --- a/contrib/Matting/model/ziyan.py +++ b/contrib/Matting/model/ziyan.py @@ -354,6 +354,7 @@ def forward(self, data): focus_sigmoid = F.sigmoid(d0_f[:, 0:1, :, :]) pha_sm = self.fusion(glance_sigmoid, focus_sigmoid) err_sm = d0_f[:, 1:2, :, :] + err_sm = paddle.clip(err_sm, 0., 1.) hid_sm = F.relu(d0_f[:, 2:, :, :]) # Refiner @@ -614,16 +615,15 @@ def crop_patch(self, x, idx, size, padding): patch: (P, C, h, w), where h = w = size + 2 * padding. """ b, c, h, w = x.shape - # if padding != 0: - # x = F.pad(x, (padding,) * 4) kernel_size = size + 2 * padding x = F.unfold( x, kernel_sizes=kernel_size, strides=size, paddings=padding) hout = int((h + 2 * padding - kernel_size) / size + 1) wout = int((w + 2 * padding - kernel_size) / size + 1) x = x.reshape((b, c, kernel_size, kernel_size, hout, wout)) - x = x.transpose((0, 4, 5, 1, 2, 3)) - patchs = [] + x = paddle.transpose( + x, (0, 4, 5, 1, 2, 3) + ) # If size is lager (4, 512, 512, 36, 8, 8), it will result OSError: (External) Cuda error(700), an illegal memory access was encountered. idx will illgegal. patchs = paddle.gather_nd(x, idx) return patchs @@ -673,17 +673,10 @@ def replace_patch(self, x, y, idx): refine_kernel_size=3, refine_prevent_oversampling=True, if_refine=True) - model.eval() - start = time.time() - times = 0 - for i in range(10): - # x = paddle.randint(0, 256, (1, 3, 2048, 2048)).astype('float32') - # inputs = {} - # inputs['img'] = x - time_refien = model(inputs) - times += time_refien - print('model infer time: ', (time.time() - start) / 10) - print('model refine time: ', times / 10) + # model.eval() + for i in range(1): + pha = model(inputs) + print(pha) # for k, v in output.items(): # print(k) # print(v) From 779cbaeaa6aafa26072dc3ced5c3614166f3e250 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 2 Nov 2021 10:54:12 +0800 Subject: [PATCH 182/210] update Padding --- contrib/Matting/transforms.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index 136b7c90a8..7da7950875 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -583,12 +583,11 @@ def __call__(self, data): im_height, im_width = data['img'].shape[0], data['img'].shape[1] target_height = self.target_size[1] target_width = self.target_size[0] - pad_height = target_height - im_height - pad_width = target_width - im_width - if pad_height < 0 or pad_width < 0: - raise ValueError( - 'The size of image should be less than `target_size`, but the size of image ({}, {}) is larger than `target_size` ({}, {})' - .format(im_width, im_height, target_width, target_height)) + pad_height = max(0, target_height - im_height) + pad_width = max(0, target_width - im_width) + data['trans_info'].append(('padding', data['img'].shape[0:2])) + if (pad_height == 0) and (pad_width == 0): + return data else: data['img'] = cv2.copyMakeBorder( data['img'], @@ -611,7 +610,6 @@ def __call__(self, data): pad_width, cv2.BORDER_CONSTANT, value=self.im_padding_value) - return data From 927cfdd2e08698608f81182f3af542e9d79e3011 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 2 Nov 2021 11:24:04 +0800 Subject: [PATCH 183/210] add LimitShort --- contrib/Matting/transforms.py | 62 +++++++++++++++++++++++++++++++++-- 1 file changed, 59 insertions(+), 3 deletions(-) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index 7da7950875..2bee48ac5a 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -386,8 +386,8 @@ def __call__(self, data): elif (self.min_long is not None) and (long_edge < self.min_long): target = self.min_long + data['trans_info'].append(('resize', data['img'].shape[0:2])) if target != long_edge: - data['trans_info'].append(('resize', data['img'].shape[0:2])) data['img'] = functional.resize_long(data['img'], target) for key in data.get('gt_fields', []): data[key] = functional.resize_long(data[key], target) @@ -395,6 +395,61 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component +class LimitShort: + """ + Limit the short edge of image. + + If the short edge is larger than max_short, resize the short edge + to max_short, while scale the long edge proportionally. + + If the short edge is smaller than min_short, resize the short edge + to min_short, while scale the long edge proportionally. + + Args: + max_short (int, optional): If the short edge of image is larger than max_short, + it will be resize to max_short. Default: None. + min_short (int, optional): If the short edge of image is smaller than min_short, + it will be resize to min_short. Default: None. + """ + + def __init__(self, max_short=None, min_short=None): + if max_short is not None: + if not isinstance(max_short, int): + raise TypeError( + "Type of `max_short` is invalid. It should be int, but it is {}" + .format(type(max_short))) + if min_short is not None: + if not isinstance(min_short, int): + raise TypeError( + "Type of `min_short` is invalid. It should be int, but it is {}" + .format(type(min_short))) + if (max_short is not None) and (min_short is not None): + if min_short > max_short: + raise ValueError( + '`max_short should not smaller than min_short, but they are {} and {}' + .format(max_short, min_short)) + self.max_short = max_short + self.min_short = min_short + + def __call__(self, data): + h, w = data['img'].shape[:2] + short_edge = min(h, w) + target = short_edge + if (self.max_short is not None) and (short_edge > self.max_short): + target = self.max_short + elif (self.min_short is not None) and (short_edge < self.min_short): + target = self.min_short + + data['trans_info'].append(('resize', data['img'].shape[0:2])) + if target != short_edge: + data['img'] = functional.resize_short(data['img'], target) + for key in data.get('gt_fields', []): + data[key] = functional.resize_short(data[key], target) + + return data + + @manager.TRANSFORMS.add_component class RandomHorizontalFlip: """ @@ -614,12 +669,13 @@ def __call__(self, data): if __name__ == "__main__": - transforms = [Padding(target_size=(1200, 1200))] + transforms = [LimitShort(min_short=256, max_short=512)] transforms = Compose(transforms) fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' alpha_path = fg_path.replace('fg', 'alpha') bg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/bg/unsplash_bg/attic/photo-1443884590026-2e4d21aee71c?crop=entropy&cs=tinysrgb&fit=max&fm=jpg&ixid=MnwxMjA3fDB8MXxzZWFyY2h8Nzh8fGF0dGljfGVufDB8fHx8MTYyOTY4MDcxNQ&ixlib=rb-1.2.1&q=80&w=400.jpg' data = {} + data['trans_info'] = [] data['fg'] = cv2.imread(fg_path) data['bg'] = cv2.imread(bg_path) h, w, c = data['fg'].shape @@ -639,4 +695,4 @@ def __call__(self, data): print(data['img'].dtype, data['img'].shape) for key in data['gt_fields']: print(data[key].shape) - cv2.imwrite('distort_img.jpg', data['img'].transpose([1, 2, 0])) +# cv2.imwrite('distort_img.jpg', data['img'].transpose([1, 2, 0])) From e2ea87f7ba5dc5f09c709f8db661c06680ce00cd Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 2 Nov 2021 11:35:26 +0800 Subject: [PATCH 184/210] update RandomResize --- contrib/Matting/transforms.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index 2bee48ac5a..d4198a8c53 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -115,13 +115,13 @@ class RandomResize: scale(tupel|list, optional): A range of scale base on `size`. A tuple or list with length 2. Default: None. """ - def __init__(self, size, scale=None): + def __init__(self, size=None, scale=None): if isinstance(size, list) or isinstance(size, tuple): if len(size) != 2: raise ValueError( '`size` should include 2 elements, but it is {}'.format( size)) - else: + elif size is not None: raise TypeError( "Type of `size` is invalid. It should be list or tuple, but it is {}" .format(type(size))) @@ -141,8 +141,14 @@ def __init__(self, size, scale=None): def __call__(self, data): h, w = data['img'].shape[:2] - scale = np.random.uniform(self.scale[0], self.scale[1]) - scale_factor = max(self.size[0] / w, self.size[1] / h) + if self.scale is not None: + scale = np.random.uniform(self.scale[0], self.scale[1]) + else: + scale = 1. + if self.size is not None: + scale_factor = max(self.size[0] / w, self.size[1] / h) + else: + scale_factor = 1 scale = scale * scale_factor w = int(round(w * scale)) @@ -669,7 +675,7 @@ def __call__(self, data): if __name__ == "__main__": - transforms = [LimitShort(min_short=256, max_short=512)] + transforms = [RandomResize(size=(512, 512), scale=None)] transforms = Compose(transforms) fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' alpha_path = fg_path.replace('fg', 'alpha') From ae1e2b6bcf2897f59304662946405d92b667cf68 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 3 Nov 2021 17:05:08 +0800 Subject: [PATCH 185/210] fix trimap resize --- contrib/Matting/transforms.py | 44 ++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index d4198a8c53..e84bf23a99 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -101,7 +101,11 @@ def __call__(self, data): data['trans_info'].append(('resize', data['img'].shape[0:2])) data['img'] = functional.resize(data['img'], self.target_size) for key in data.get('gt_fields', []): - data[key] = functional.resize(data[key], self.target_size) + if key == 'trimap': + data[key] = functional.resize(data[key], self.target_size, + cv2.INTER_NEAREST) + else: + data[key] = functional.resize(data[key], self.target_size) return data @@ -155,7 +159,11 @@ def __call__(self, data): h = int(round(h * scale)) data['img'] = functional.resize(data['img'], (w, h)) for key in data.get('gt_fields', []): - data[key] = functional.resize(data[key], (w, h)) + if key == 'trimap': + data[key] = functional.resize(data[key], (w, h), + cv2.INTER_NEAREST) + else: + data[key] = functional.resize(data[key], (w, h)) return data @@ -175,7 +183,11 @@ def __call__(self, data): data['trans_info'].append(('resize', data['img'].shape[0:2])) data['img'] = functional.resize_long(data['img'], self.long_size) for key in data.get('gt_fields', []): - data[key] = functional.resize_long(data[key], self.long_size) + if key == 'trimap': + data[key] = functional.resize_long(data[key], self.long_size, + cv2.INTER_NEAREST) + else: + data[key] = functional.resize_long(data[key], self.long_size) return data @@ -195,7 +207,11 @@ def __call__(self, data): data['trans_info'].append(('resize', data['img'].shape[0:2])) data['img'] = functional.resize_short(data['img'], self.short_size) for key in data.get('gt_fields', []): - data[key] = functional.resize_short(data[key], self.short_size) + if key == 'trimap': + data[key] = functional.resize_short(data[key], self.short_size, + cv2.INTER_NEAREST) + else: + data[key] = functional.resize_short(data[key], self.short_size) return data @@ -216,7 +232,11 @@ def __call__(self, data): rh = h - h % self.mult_int data['img'] = functional.resize(data['img'], (rw, rh)) for key in data.get('gt_fields', []): - data[key] = functional.resize(data[key], (rw, rh)) + if key == 'trimap': + data[key] = functional.resize(data[key], (rw, rh), + cv2.INTER_NEAREST) + else: + data[key] = functional.resize(data[key], (rw, rh)) return data @@ -396,7 +416,11 @@ def __call__(self, data): if target != long_edge: data['img'] = functional.resize_long(data['img'], target) for key in data.get('gt_fields', []): - data[key] = functional.resize_long(data[key], target) + if key == 'trimap': + data[key] = functional.resize_long(data[key], target, + cv2.INTER_NEAREST) + else: + data[key] = functional.resize_long(data[key], target) return data @@ -451,7 +475,11 @@ def __call__(self, data): if target != short_edge: data['img'] = functional.resize_short(data['img'], target) for key in data.get('gt_fields', []): - data[key] = functional.resize_short(data[key], target) + if key == 'trimap': + data[key] = functional.resize_short(data[key], target, + cv2.INTER_NEAREST) + else: + data[key] = functional.resize_short(data[key], target) return data @@ -670,7 +698,7 @@ def __call__(self, data): 0, pad_width, cv2.BORDER_CONSTANT, - value=self.im_padding_value) + value=value) return data From 1c600c09d7cdb8aab744b3a9ff4329330f785e85 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Thu, 4 Nov 2021 20:55:22 +0800 Subject: [PATCH 186/210] add vdl visualization --- contrib/Matting/core/train.py | 44 +++++++++++++++++++++++++++++++++++ contrib/Matting/transforms.py | 2 ++ 2 files changed, 46 insertions(+) diff --git a/contrib/Matting/core/train.py b/contrib/Matting/core/train.py index 4b1fd66c39..dabe0254c8 100644 --- a/contrib/Matting/core/train.py +++ b/contrib/Matting/core/train.py @@ -25,6 +25,34 @@ from core.val import evaluate +def visual_in_traning(log_writer, vis_dict, step): + """ + Visual in vdl + + Args: + log_writer (LogWriter): The log writer of vdl. + vis_dict (dict): Dict of tensor. The shape of thesor is (C, H, W) + """ + for key, value in vis_dict.items(): + value_shape = value.shape + if value_shape[0] not in [1, 3]: + value = value[0] + value = value.unsqueeze(0) + value = paddle.transpose(value, (1, 2, 0)) + min_v = paddle.min(value) + max_v = paddle.max(value) + if (min_v > 0) and (max_v < 1): + value = value * 255 + elif (min_v < 0 and min_v >= -1) and (max_v <= 1): + value = (1 + value) / 2 * 255 + else: + value = (value - min_v) / (max_v - min_v) * 255 + + value = value.astype('uint8') + value = value.numpy() + log_writer.add_image(tag=key, img=value, step=step) + + def train(model, train_dataset, val_dataset=None, @@ -35,6 +63,7 @@ def train(model, resume_model=None, save_interval=1000, log_iters=10, + log_image_iters=1000, num_workers=0, use_vdl=False, losses=None, @@ -53,6 +82,7 @@ def train(model, resume_model (str, optional): The path of resume model. save_interval (int, optional): How many iters to save a model snapshot once during training. Default: 1000. log_iters (int, optional): Display logging information at every log_iters. Default: 10. + log_image_iters (int, optional): Log image to vdl. Default: 1000. num_workers (int, optional): Num workers for data loader. Default: 0. use_vdl (bool, optional): Whether to record the data to VisualDL during training. Default: False. losses (dict, optional): A dict of loss, refer to the loss function of the model for details. Default: None. @@ -164,6 +194,20 @@ def train(model, avg_train_batch_cost, iter) log_writer.add_scalar('Train/reader_cost', avg_train_reader_cost, iter) + if iter % log_image_iters == 0: + vis_dict = {} + # ground truth + vis_dict['ground truth/img'] = data['img'][0] + for key in data['gt_fields']: + key = key[0] + vis_dict['/'.join(['ground truth', + key])] = data[key][0] + # predict + for key, value in logit_dict.items(): + vis_dict['/'.join(['predict', + key])] = logit_dict[key][0] + visual_in_traning( + log_writer=log_writer, vis_dict=vis_dict, step=iter) for key in avg_loss.keys(): avg_loss[key] = 0. diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index e84bf23a99..0bc61228dd 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -534,6 +534,8 @@ def __call__(self, data): data['img'] = cv2.GaussianBlur(data['img'], (radius, radius), 0, 0) for key in data.get('gt_fields', []): + if key == 'trimap': + continue data[key] = cv2.GaussianBlur(data[key], (radius, radius), 0, 0) return data From e2231cdde83b8e44b4a560dc2c0fa0b3ddab850a Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sun, 7 Nov 2021 15:37:38 +0800 Subject: [PATCH 187/210] delete unnecessary keys --- contrib/Matting/dataset/matting_dataset.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/contrib/Matting/dataset/matting_dataset.py b/contrib/Matting/dataset/matting_dataset.py index aebf2cb332..e0cef13899 100644 --- a/contrib/Matting/dataset/matting_dataset.py +++ b/contrib/Matting/dataset/matting_dataset.py @@ -56,6 +56,7 @@ class MattingDataset(paddle.io.Dataset): It shold be provided if mode equal to 'val'. Default: None. get_trimap (bool, optional): Whether to get triamp. Default: True. separator (str, optional): The separator of train_file or val_file. If file name contains ' ', '|' may be perfect. Default: ' '. + key_del (tuple|list, optional): The key which is not need will be delete to accellect data reader. Default: None. """ def __init__(self, @@ -65,13 +66,15 @@ def __init__(self, train_file=None, val_file=None, get_trimap=True, - separator=' '): + separator=' ', + key_del=None): super().__init__() self.dataset_root = dataset_root self.transforms = T.Compose(transforms) self.mode = mode self.get_trimap = get_trimap self.separator = separator + self.key_del = key_del # check file if mode == 'train' or mode == 'trainval': @@ -157,6 +160,13 @@ def __getitem__(self, idx): if self.mode == 'val': data['ori_trimap'] = data['trimap'].copy() + # Delete key which is not need + if self.key_del is not None: + for key in self.key_del: + if key in data.keys(): + data.pop(key) + if key in data['gt_fields']: + data['gt_fields'].remove(key) data = self.transforms(data) # When evaluation, gt should not be transforms. From c085ff830fd02cbc6beb921937e082e57efc3ad4 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 10 Nov 2021 10:32:33 +0800 Subject: [PATCH 188/210] update ziyan.py --- contrib/Matting/model/ziyan.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py index ef6a2d8a1e..3492b21f50 100644 --- a/contrib/Matting/model/ziyan.py +++ b/contrib/Matting/model/ziyan.py @@ -590,7 +590,9 @@ def select_refinement_regions(self, err): if self.mode == 'sampling': b, _, h, w = err.shape err = paddle.reshape(err, (b, -1)) - _, idx = err.topk(self.sample_pixels // 16, axis=1, sorted=False) + num_total = err.shape[-1] * err.shape[-2] + k = min(num_total, self.sample_pixels // 16) + _, idx = err.topk(k, axis=1, sorted=False) ref = paddle.zeros_like(err) update = paddle.ones_like(idx, dtype='float32') for i in range(b): From 4af4999f888d3c12a48c75bb0af4c71c44300b2f Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 23 Nov 2021 15:24:39 +0800 Subject: [PATCH 189/210] add gcb of gcnet --- contrib/Matting/model/__init__.py | 2 +- contrib/Matting/model/ziyan.py | 566 +++++------------------------- 2 files changed, 86 insertions(+), 482 deletions(-) diff --git a/contrib/Matting/model/__init__.py b/contrib/Matting/model/__init__.py index 55f2016f77..a4097e22fb 100644 --- a/contrib/Matting/model/__init__.py +++ b/contrib/Matting/model/__init__.py @@ -19,4 +19,4 @@ from .dim import DIM from .loss import MRSD from .modnet import MODNet -from .ziyan import ZiYan, ZiYanRefine +from .ziyan import ZiYanAM diff --git a/contrib/Matting/model/ziyan.py b/contrib/Matting/model/ziyan.py index 3492b21f50..9062dfd69f 100644 --- a/contrib/Matting/model/ziyan.py +++ b/contrib/Matting/model/ziyan.py @@ -34,11 +34,12 @@ def conv_up_psp(in_channels, out_channels, up_sample): @manager.MODELS.add_component -class ZiYan(nn.Layer): +class ZiYanAM(nn.Layer): def __init__(self, backbone, pretrained=None): super().__init__() self.backbone = backbone self.pretrained = pretrained + ratio = 0.25 self.backbone_channels = backbone.feat_channels ###################### @@ -61,30 +62,35 @@ def __init__(self, backbone, pretrained=None): layers.ConvBNReLU(512, 512, 3, padding=2, dilation=2), layers.ConvBNReLU(512, 256, 3, padding=2, dilation=2), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + self.gcb_5 = GlobalContextBlock(256, ratio=ratio) # stage 4g self.decoder4_g = nn.Sequential( layers.ConvBNReLU(512, 256, 3, padding=1), layers.ConvBNReLU(256, 256, 3, padding=1), layers.ConvBNReLU(256, 128, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + self.gcb_4 = GlobalContextBlock(128, ratio=ratio) # stage 3g self.decoder3_g = nn.Sequential( layers.ConvBNReLU(256, 128, 3, padding=1), layers.ConvBNReLU(128, 128, 3, padding=1), layers.ConvBNReLU(128, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + self.gcb_3 = GlobalContextBlock(64, ratio=ratio) # stage 2g self.decoder2_g = nn.Sequential( layers.ConvBNReLU(128, 128, 3, padding=1), layers.ConvBNReLU(128, 128, 3, padding=1), layers.ConvBNReLU(128, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + self.gcb_2 = GlobalContextBlock(64, ratio=ratio) # stage 1g self.decoder1_g = nn.Sequential( layers.ConvBNReLU(128, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + self.gcb_1 = GlobalContextBlock(64, ratio=ratio) # stage 0g self.decoder0_g = nn.Sequential( layers.ConvBNReLU(64, 64, 3, padding=1), @@ -109,34 +115,34 @@ def __init__(self, backbone, pretrained=None): # stage 4f self.decoder4_f = nn.Sequential( layers.ConvBNReLU( - 256 + self.backbone_channels[-2], 256, 3, padding=1), + 256 + self.backbone_channels[-2] + 256, 256, 3, padding=1), layers.ConvBNReLU(256, 256, 3, padding=1), layers.ConvBNReLU(256, 128, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) # stage 3f self.decoder3_f = nn.Sequential( layers.ConvBNReLU( - 128 + self.backbone_channels[-3], 128, 3, padding=1), + 128 + self.backbone_channels[-3] + 128, 128, 3, padding=1), layers.ConvBNReLU(128, 128, 3, padding=1), layers.ConvBNReLU(128, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) # stage 2f self.decoder2_f = nn.Sequential( layers.ConvBNReLU( - 64 + self.backbone_channels[-4], 128, 3, padding=1), + 64 + self.backbone_channels[-4] + 64, 128, 3, padding=1), layers.ConvBNReLU(128, 128, 3, padding=1), layers.ConvBNReLU(128, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) # stage 1f self.decoder1_f = nn.Sequential( layers.ConvBNReLU( - 64 + self.backbone_channels[-5], 64, 3, padding=1), + 64 + self.backbone_channels[-5] + 64, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) # stage 0f self.decoder0_f = nn.Sequential( - layers.ConvBNReLU(64, 64, 3, padding=1), + layers.ConvBNReLU(64 + 64, 64, 3, padding=1), layers.ConvBNReLU(64, 64, 3, padding=1), nn.Conv2D(64, 1, 3, padding=1)) @@ -153,16 +159,21 @@ def forward(self, inputs): ########################## #psp: N, 512, H/32, W/32 psp = self.psp_module(fea_list[-1]) - #d6_g: N, 512, H/16, W/16 + #d5_g: N, 512, H/16, W/16 d5_g = self.decoder5_g(paddle.concat((psp, fea_list[-1]), 1)) - #d5_g: N, 512, H/8, W/8 + gcb_5 = self.gcb_5(d5_g) + #d4_g: N, 512, H/8, W/8 d4_g = self.decoder4_g(paddle.concat((self.psp4(psp), d5_g), 1)) - #d4_g: N, 256, H/4, W/4 + gcb_4 = self.gcb_4(d4_g) + #d3_g: N, 256, H/4, W/4 d3_g = self.decoder3_g(paddle.concat((self.psp3(psp), d4_g), 1)) - #d4_g: N, 128, H/2, W/2 + gcb_3 = self.gcb_3(d3_g) + #d2_g: N, 128, H/2, W/2 d2_g = self.decoder2_g(paddle.concat((self.psp2(psp), d3_g), 1)) - #d2_g: N, 64, H, W + gcb_2 = self.gcb_2(d2_g) + #d1_g: N, 64, H, W d1_g = self.decoder1_g(paddle.concat((self.psp1(psp), d2_g), 1)) + gcb_1 = self.gcb_1(d1_g) #d0_g: N, 3, H, W d0_g = self.decoder0_g(d1_g) # The 1st channel is foreground. The 2nd is transition region. The 3rd is background. @@ -176,15 +187,15 @@ def forward(self, inputs): #bg: N, 512, H/32, W/32 d5_f = self.decoder5_f(paddle.concat((bb, fea_list[-1]), 1)) #d5_f: N, 256, H/16, W/16 - d4_f = self.decoder4_f(paddle.concat((d5_f, fea_list[-2]), 1)) + d4_f = self.decoder4_f(paddle.concat((d5_f, fea_list[-2], gcb_5), 1)) #d4_f: N, 128, H/8, W/8 - d3_f = self.decoder3_f(paddle.concat((d4_f, fea_list[-3]), 1)) + d3_f = self.decoder3_f(paddle.concat((d4_f, fea_list[-3], gcb_4), 1)) #d3_f: N, 64, H/4, W/4 - d2_f = self.decoder2_f(paddle.concat((d3_f, fea_list[-4]), 1)) + d2_f = self.decoder2_f(paddle.concat((d3_f, fea_list[-4], gcb_3), 1)) #d2_f: N, 64, H/2, W/2 - d1_f = self.decoder1_f(paddle.concat((d2_f, fea_list[-5]), 1)) + d1_f = self.decoder1_f(paddle.concat((d2_f, fea_list[-5], gcb_2), 1)) #d1_f: N, 64, H, W - d0_f = self.decoder0_f(d1_f) + d0_f = self.decoder0_f(paddle.concat((d1_f, gcb_1), 1)) #d0_f: N, 1, H, W focus_sigmoid = F.sigmoid(d0_f) @@ -256,404 +267,62 @@ def init_weight(self): utils.load_entire_model(self, self.pretrained) -@manager.MODELS.add_component -class ZiYanRefine(ZiYan): - def __init__(self, - backbone, - pretrained=None, - backbone_scale=0.25, - refine_mode='sampling', - refine_sample_pixels=80000, - refine_threshold=0.1, - refine_kernel_size=3, - refine_prevent_oversampling=True, - if_refine=True): - if if_refine: - if backbone_scale > 0.5: - raise ValueError( - 'Backbone_scale should not be greater than 1/2, but it is {}' - .format(backbone_scale)) - else: - backbone_scale = 1 - super().__init__(backbone) - - self.backbone_scale = backbone_scale - self.pretrained = pretrained - self.if_refine = if_refine - if if_refine: - self.refiner = Refiner( - mode=refine_mode, - sample_pixels=refine_sample_pixels, - threshold=refine_threshold, - kernel_size=refine_kernel_size, - prevent_oversampling=refine_prevent_oversampling) - - # stage 0f recontain - self.decoder0_f = nn.Sequential( - layers.ConvBNReLU(64, 64, 3, padding=1), - layers.ConvBNReLU(64, 64, 3, padding=1), - nn.Conv2D(64, 1 + 1 + 32, 3, padding=1)) - self.init_weight() - - def forward(self, data): - src = data['img'] - src_h, src_w = src.shape[2:] - if self.if_refine: - if (src_h % 4 != 0) or (src_w % 4) != 0: - raise ValueError( - 'The input image must have width and height that are divisible by 4' - ) - - # Downsample src for backbone - src_sm = F.interpolate( - src, - scale_factor=self.backbone_scale, - mode='bilinear', - align_corners=False) - - # Base - fea_list = self.backbone(src_sm) - ########################## - ### Decoder part - GLANCE - ########################## - #psp: N, 512, H/32, W/32 - psp = self.psp_module(fea_list[-1]) - #d6_g: N, 512, H/16, W/16 - d5_g = self.decoder5_g(paddle.concat((psp, fea_list[-1]), 1)) - #d5_g: N, 512, H/8, W/8 - d4_g = self.decoder4_g(paddle.concat((self.psp4(psp), d5_g), 1)) - #d4_g: N, 256, H/4, W/4 - d3_g = self.decoder3_g(paddle.concat((self.psp3(psp), d4_g), 1)) - #d4_g: N, 128, H/2, W/2 - d2_g = self.decoder2_g(paddle.concat((self.psp2(psp), d3_g), 1)) - #d2_g: N, 64, H, W - d1_g = self.decoder1_g(paddle.concat((self.psp1(psp), d2_g), 1)) - #d0_g: N, 3, H, W - d0_g = self.decoder0_g(d1_g) - # The 1st channel is foreground. The 2nd is transition region. The 3rd is background. - # glance_sigmoid = F.sigmoid(d0_g) - glance_sigmoid = F.softmax(d0_g, axis=1) - - ########################## - ### Decoder part - FOCUS - ########################## - bb = self.bridge_block(fea_list[-1]) - #bg: N, 512, H/32, W/32 - d5_f = self.decoder5_f(paddle.concat((bb, fea_list[-1]), 1)) - #d5_f: N, 256, H/16, W/16 - d4_f = self.decoder4_f(paddle.concat((d5_f, fea_list[-2]), 1)) - #d4_f: N, 128, H/8, W/8 - d3_f = self.decoder3_f(paddle.concat((d4_f, fea_list[-3]), 1)) - #d3_f: N, 64, H/4, W/4 - d2_f = self.decoder2_f(paddle.concat((d3_f, fea_list[-4]), 1)) - #d2_f: N, 64, H/2, W/2 - d1_f = self.decoder1_f(paddle.concat((d2_f, fea_list[-5]), 1)) - #d1_f: N, 64, H, W - d0_f = self.decoder0_f(d1_f) - #d0_f: N, 1, H, W - focus_sigmoid = F.sigmoid(d0_f[:, 0:1, :, :]) - pha_sm = self.fusion(glance_sigmoid, focus_sigmoid) - err_sm = d0_f[:, 1:2, :, :] - err_sm = paddle.clip(err_sm, 0., 1.) - hid_sm = F.relu(d0_f[:, 2:, :, :]) - - # Refiner - if self.if_refine: - pha, ref_sm = self.refiner( - src=src, pha=pha_sm, err=err_sm, hid=hid_sm, tri=glance_sigmoid) - # Clamp outputs - pha = paddle.clip(pha, 0., 1.) - - if self.training: - logit_dict = { - 'glance': glance_sigmoid, - 'focus': focus_sigmoid, - 'fusion': pha_sm, - 'error': err_sm - } - if self.if_refine: - logit_dict['refine'] = pha - return logit_dict - else: - return pha if self.if_refine else pha_sm - - def loss(self, logit_dict, label_dict, loss_func_dict=None): - if loss_func_dict is None: - loss_func_dict = defaultdict(list) - loss_func_dict['glance'].append(nn.NLLLoss()) - loss_func_dict['focus'].append(MRSD()) - loss_func_dict['cm'].append(MRSD()) - loss_func_dict['err'].append(paddleseg.models.MSELoss()) - loss_func_dict['refine'].append(paddleseg.models.L1Loss()) - - loss = {} - - # glance loss computation - # get glance label - glance_label = F.interpolate( - label_dict['trimap'], - logit_dict['glance'].shape[2:], - mode='nearest', - align_corners=False) - glance_label_trans = (glance_label == 128).astype('int64') - glance_label_bg = (glance_label == 0).astype('int64') - glance_label = glance_label_trans + glance_label_bg * 2 - loss_glance = loss_func_dict['glance'][0]( - paddle.log(logit_dict['glance'] + 1e-6), glance_label.squeeze(1)) - loss['glance'] = loss_glance - # TODO glance label 的验证 - - # focus loss computation - focus_label = F.interpolate( - label_dict['alpha'], - logit_dict['focus'].shape[2:], - mode='bilinear', - align_corners=False) - loss_focus = loss_func_dict['focus'][0](logit_dict['focus'], - focus_label, glance_label_trans) - loss['focus'] = loss_focus - - # collaborative matting loss - loss_cm_func = loss_func_dict['cm'] - # fusion_sigmoid loss - loss_cm = loss_cm_func[0](logit_dict['fusion'], focus_label) - loss['cm'] = loss_cm - - # error loss - err = F.interpolate( - logit_dict['error'], - label_dict['alpha'].shape[2:], - mode='bilinear', - align_corners=False) - err_label = (F.interpolate( - logit_dict['fusion'], - label_dict['alpha'].shape[2:], - mode='bilinear', - align_corners=False) - label_dict['alpha']).abs() - loss_err = loss_func_dict['err'][0](err, err_label) - loss['err'] = loss_err - - loss_all = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss_cm + loss_err - - # refine loss - if self.if_refine: - loss_refine = loss_func_dict['refine'][0](logit_dict['refine'], - label_dict['alpha']) - loss['refine'] = loss_refine - loss_all = loss_all + loss_refine - - loss['all'] = loss_all - return loss - - -class Refiner(nn.Layer): - ''' - Refiner refines the coarse output to full resolution. +class GlobalContextBlock(nn.Layer): + """ + Global Context Block implementation. Args: - mode: area selection mode. Options: - "full" - No area selection, refine everywhere using regular Conv2d. - "sampling" - Refine fixed amount of pixels ranked by the top most errors. - "thresholding" - Refine varying amount of pixels that have greater error than the threshold. - sample_pixels: number of pixels to refine. Only used when mode == "sampling". - threshold: error threshold ranged from 0 ~ 1. Refine where err > threshold. Only used when mode == "thresholding". - kernel_size: The convolution kernel_size. Options: [1, 3]. Default: 3. - prevent_oversampling: True for regular cases, False for speedtest.Default: True. - ''' - - def __init__(self, - mode, - sample_pixels, - threshold, - kernel_size=3, - prevent_oversampling=True): - super().__init__() - if mode not in ['full', 'sampling', 'thresholding']: - raise ValueError( - "mode must be in ['full', 'sampling', 'thresholding']") - if kernel_size not in [1, 3]: - raise ValueError("kernel_size must be in [1, 3]") - - self.mode = mode - self.sample_pixels = sample_pixels - self.threshold = threshold - self.kernel_size = kernel_size - self.prevent_oversampling = prevent_oversampling - - channels = [32, 24, 16, 12, 1] - self.conv1 = layers.ConvBNReLU( - channels[0] + 4 + 3, - channels[1], - kernel_size, - padding=0, - bias_attr=False) - self.conv2 = layers.ConvBNReLU( - channels[1], channels[2], kernel_size, padding=0, bias_attr=False) - self.conv3 = layers.ConvBNReLU( - channels[2] + 3, - channels[3], - kernel_size, - padding=0, - bias_attr=False) - self.conv4 = nn.Conv2D( - channels[3], channels[4], kernel_size, padding=0, bias_attr=True) - - def forward(self, src, pha, err, hid, tri): - ''' - Args: - src: (B, 3, H, W) full resolution source image. - pha: (B, 1, Hc, Wc) coarse alpha prediction. - err: (B, 1, Hc, Hc) coarse error prediction. - hid: (B, 32, Hc, Hc) coarse hidden encoding. - tri: (B, 1, Hc, Hc) trimap prediction. - ''' - h_full, w_full = src.shape[2:] - h_half, w_half = h_full // 2, w_full // 2 - h_quat, w_quat = h_full // 4, w_full // 4 - - if self.mode != 'full': - err = F.interpolate( - err, (h_quat, w_quat), mode='bilinear', align_corners=False) - ref = self.select_refinement_regions(err) - idx = paddle.nonzero(ref.squeeze(1)) - - if idx.shape[0] > 0: - x = paddle.concat([hid, pha, tri], axis=1) - x = F.interpolate( - x, (h_half, w_half), mode='bilinear', align_corners=False) - x = self.crop_patch(x, idx, 2, - 3 if self.kernel_size == 3 else 0) - - y = F.interpolate( - src, (h_half, w_half), mode='bilinear', align_corners=False) - y = self.crop_patch(y, idx, 2, - 3 if self.kernel_size == 3 else 0) - - x = self.conv1(paddle.concat([x, y], axis=1)) - x = self.conv2(x) - - x = F.interpolate( - x, (8, 8) if self.kernel_size == 3 else (4, 4), - mode='nearest') - y = self.crop_patch(src, idx, 4, - 2 if self.kernel_size == 3 else 0) - - x = self.conv3(paddle.concat([x, y], axis=1)) - x = self.conv4(x) - - pha = F.interpolate( - pha, (h_full, w_full), mode='bilinear', align_corners=False) - pha = self.replace_patch(pha, x, idx) - else: - pha = F.interpolate( - pha, (h_full, w_full), mode='bilinear', align_corners=False) + in_channels (int): The input channels of Global Context Block. + ratio (float): The channels of attention map. + """ - else: - x = paddle.concat([hid, pha, tri], axis=1) - x = F.interpolate( - x, (h_half, w_half), mode='bilinear', align_corners=False) - y = F.interpolate( - src, (h_half, w_half), mode='bilinear', align_corners=False) - - if self.kernel_size == 3: - x = F.pad(x, [3, 3, 3, 3]) - y = F.pad(y, [3, 3, 3, 3]) - - x = self.conv1(paddle.concat([x, y], axis=1)) - x = self.conv2(x) - - if self.kernel_size == 3: - x = F.interpolate(x, (h_full + 4, w_full + 4)) - y = F.pad(src, [2, 2, 2, 2]) - else: - x = F.interpolate(x, (h_full, w_full), mode='nearest') - y = src - - x = self.conv3(paddle.concat([x, y], axis=1)) - x = self.conv4(x) - - pha = x - ref = paddle.ones((src.shape[0], 1, h_quat, w_quat)) - return pha, ref - - def select_refinement_regions(self, err): - ''' - select refinement regions. - - Args: - err: error map (B, 1, H, W). - - Returns: - Teosor: refinement regions (B, 1, H, W). 1 is selected, 0 is not. - ''' - err.stop_gradient = True - if self.mode == 'sampling': - b, _, h, w = err.shape - err = paddle.reshape(err, (b, -1)) - num_total = err.shape[-1] * err.shape[-2] - k = min(num_total, self.sample_pixels // 16) - _, idx = err.topk(k, axis=1, sorted=False) - ref = paddle.zeros_like(err) - update = paddle.ones_like(idx, dtype='float32') - for i in range(b): - ref[i] = paddle.scatter(ref[i], idx[i], update[i]) - if self.prevent_oversampling: - ref = ref * ((err > 0).astype('float32')) - ref = ref.reshape((b, 1, h, w)) - else: - ref = (err > self.threshold).astype('float32') - return ref - - def crop_patch(self, x, idx, size, padding): - """ - Crops selected patches from image given indices. - - Inputs: - x: image (B, C, H, W). - idx: selection indices shape is (p, 3), where the 3 values are (B, H, W) index. - size: center size of the patch, also stride of the crop. - padding: expansion size of the patch. - Output: - patch: (P, C, h, w), where h = w = size + 2 * padding. - """ - b, c, h, w = x.shape - kernel_size = size + 2 * padding - x = F.unfold( - x, kernel_sizes=kernel_size, strides=size, paddings=padding) - hout = int((h + 2 * padding - kernel_size) / size + 1) - wout = int((w + 2 * padding - kernel_size) / size + 1) - x = x.reshape((b, c, kernel_size, kernel_size, hout, wout)) - x = paddle.transpose( - x, (0, 4, 5, 1, 2, 3) - ) # If size is lager (4, 512, 512, 36, 8, 8), it will result OSError: (External) Cuda error(700), an illegal memory access was encountered. idx will illgegal. - patchs = paddle.gather_nd(x, idx) - return patchs - - def replace_patch(self, x, y, idx): - ''' - Replaces patches back into image given index. - - Args: - x: image (B, C, H, W) - y: patches (P, C, h, w) - idx: selection indices shape is (p, 3), where the 3 values are (B, H, W) index. - - Returns: - Tensor: (B, C, H, W), where patches at idx locations are replaced with y. - ''' - bx, cx, hx, wx = x.shape - by, cy, hy, wy = y.shape - - x = x.reshape((bx, cx, hx // hy, hy, wx // wy, wy)) - x = x.transpose((0, 2, 4, 1, 3, 5)) - ones = paddle.ones((idx.shape[0], cx, hy, wy)) - flag = paddle.scatter_nd( - idx, ones, shape=x.shape) # Get the index which should be replace - x = x * (1 - flag) - x = paddle.scatter_nd_add(x, idx, y) - x = x.transpose((0, 3, 1, 4, 2, 5)) - x = x.reshape((bx, cx, hx, wx)) - return x + def __init__(self, in_channels, ratio=0.25): + super().__init__() + self.in_channels = in_channels + + self.conv_mask = nn.Conv2D( + in_channels=in_channels, out_channels=1, kernel_size=1) + + self.softmax = nn.Softmax(axis=2) + + inter_channels = int(in_channels * ratio) + self.channel_add_conv = nn.Sequential( + nn.Conv2D( + in_channels=in_channels, + out_channels=inter_channels, + kernel_size=1), + nn.LayerNorm(normalized_shape=[inter_channels, 1, 1]), nn.ReLU(), + nn.Conv2D( + in_channels=inter_channels, + out_channels=in_channels, + kernel_size=1)) + + def global_context_block(self, x): + x_shape = paddle.shape(x) + + # [N, C, H * W] + input_x = paddle.reshape(x, shape=[0, self.in_channels, -1]) + # [N, 1, C, H * W] + input_x = paddle.unsqueeze(input_x, axis=1) + # [N, 1, H, W] + context_mask = self.conv_mask(x) + # [N, 1, H * W] + context_mask = paddle.reshape(context_mask, shape=[0, 1, -1]) + context_mask = self.softmax(context_mask) + # [N, 1, H * W, 1] + context_mask = paddle.unsqueeze(context_mask, axis=-1) + # [N, 1, C, 1] + context = paddle.matmul(input_x, context_mask) + # [N, C, 1, 1] + context = paddle.reshape(context, shape=[0, self.in_channels, 1, 1]) + + return context + + def forward(self, x): + context = self.global_context_block(x) + channel_add_term = self.channel_add_conv(context) + out = x + channel_add_term + return out if __name__ == '__main__': @@ -661,76 +330,11 @@ def replace_patch(self, x, y, idx): import time from resnet_vd import ResNet34_vd backbone = ResNet34_vd(output_stride=32) - x = paddle.randint(0, 256, (1, 3, 2048, 2048)).astype('float32') + x = paddle.randint(0, 256, (1, 3, 512, 512)).astype('float32') inputs = {} inputs['img'] = x - model = ZiYanRefine( - backbone=backbone, - pretrained=None, - backbone_scale=0.25, - refine_mode='sampling', - refine_sample_pixels=80000, - refine_threshold=0.1, - refine_kernel_size=3, - refine_prevent_oversampling=True, - if_refine=True) - # model.eval() - for i in range(1): - pha = model(inputs) - print(pha) -# for k, v in output.items(): -# print(k) -# print(v) - -# refiner = Refiner(mode='sampling', -# sample_pixels=5000, -# threshold=0.1, -# kernel_size=3, -# prevent_oversampling=True) -# check select_refinement_regions, succeed -# err = paddle.rand((2, 1, 512, 512)) -# start = time.time() -# ref = refiner.select_refinement_regions_(err) -# print('old time comsumn: ',time.time() - start) -# print('old err') -# print(err) -# print('old ref') -# print(ref) - -# check crop_patch, succeed -# x = paddle.rand((2, 3, 256, 256)) -# err = paddle.rand((2, 1, 128, 128)) -# ref = refiner.select_refinement_regions(err) -# idx = paddle.nonzero(ref.squeeze(1)) -# idx = idx[:, 0], idx[:, 1], idx[:, 2] -# size = 2 -# padding= 3 -# p = refiner.crop_patch(x, idx, size, padding) - -# check replace_patch, succeed -# p = p+1 -# p = p[:, :, 3:5, 3:5] -# start = time.time() -# refinement = refiner.replace_patch(x, p, idx) -# print('replace_patch time:', time.time() - start) -# print(refinement) - -# # check refine, succeed -# src = paddle.rand((2, 3, 16, 16)) -# pha = paddle.rand((2, 1, 4, 4)) -# err = paddle.rand((2, 1, 4, 4)) -# hid = paddle.rand((2, 32, 4, 4)) -# tri = paddle.rand((2, 3, 4, 4)) - -# pha_ref, ref = refiner(src, pha, err, hid, tri) -# print('err') -# print(err[1]) -# print('ref') -# print(ref[1]) -# print('pha') -# pha = F.interpolate(pha, (16, 16), mode='bilinear', align_corners=False) -# print(pha[1,0,:,:]) -# print('pha_ref') -# print(pha_ref[1,0,:,:]) -# print(pha_ref.shape, ref.shape) + model = ZiYanAM(backbone=backbone, pretrained=None) + + results = model(inputs) + print(results) From c0511acf491079c0c6af418bc076ef6d3443f8cc Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 24 Nov 2021 15:01:03 +0800 Subject: [PATCH 190/210] add gate --- contrib/Matting/model/__init__.py | 1 + contrib/Matting/model/ziyan_gate.py | 293 ++++++++++++++++++++++++++++ 2 files changed, 294 insertions(+) create mode 100644 contrib/Matting/model/ziyan_gate.py diff --git a/contrib/Matting/model/__init__.py b/contrib/Matting/model/__init__.py index a4097e22fb..6bac17f552 100644 --- a/contrib/Matting/model/__init__.py +++ b/contrib/Matting/model/__init__.py @@ -20,3 +20,4 @@ from .loss import MRSD from .modnet import MODNet from .ziyan import ZiYanAM +from .ziyan_gate import ZiYanGate diff --git a/contrib/Matting/model/ziyan_gate.py b/contrib/Matting/model/ziyan_gate.py new file mode 100644 index 0000000000..ecac074c90 --- /dev/null +++ b/contrib/Matting/model/ziyan_gate.py @@ -0,0 +1,293 @@ +# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +import time + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddleseg +from paddleseg.models import layers +from paddleseg import utils +from paddleseg.cvlibs import manager + +from model import MRSD +from model import resnet_vd + + +def conv_up_psp(in_channels, out_channels, up_sample): + return nn.Sequential( + layers.ConvBNReLU(in_channels, out_channels, 3, padding=1), + nn.Upsample( + scale_factor=up_sample, mode='bilinear', align_corners=False)) + + +@manager.MODELS.add_component +class ZiYanGate(nn.Layer): + def __init__(self, backbone, pretrained=None): + super().__init__() + self.backbone = backbone + self.pretrained = pretrained + + self.backbone_channels = backbone.feat_channels + ###################### + ### Decoder part - Glance + ###################### + self.psp_module = layers.PPModule( + self.backbone_channels[-1], + 512, + bin_sizes=(1, 3, 5), + dim_reduction=False, + align_corners=False) + self.psp4 = conv_up_psp(512, 256, 2) + self.psp3 = conv_up_psp(512, 128, 4) + self.psp2 = conv_up_psp(512, 64, 8) + self.psp1 = conv_up_psp(512, 64, 16) + # stage 5g + self.decoder5_g = nn.Sequential( + layers.ConvBNReLU( + 512 + self.backbone_channels[-1], 512, 3, padding=1), + layers.ConvBNReLU(512, 512, 3, padding=2, dilation=2), + layers.ConvBNReLU(512, 256, 3, padding=2, dilation=2), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 4g + self.decoder4_g = nn.Sequential( + layers.ConvBNReLU(512, 256, 3, padding=1), + layers.ConvBNReLU(256, 256, 3, padding=1), + layers.ConvBNReLU(256, 128, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 3g + self.decoder3_g = nn.Sequential( + layers.ConvBNReLU(256, 128, 3, padding=1), + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 2g + self.decoder2_g = nn.Sequential( + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 128, 3, padding=1), + layers.ConvBNReLU(128, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 1g + self.decoder1_g = nn.Sequential( + layers.ConvBNReLU(128, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)) + # stage 0g + self.decoder0_g = nn.Sequential( + layers.ConvBNReLU(64, 64, 3, padding=1), + layers.ConvBNReLU(64, 64, 3, padding=1), + nn.Conv2D(64, 3, 3, padding=1)) + + ########################## + ### Decoder part - FOCUS + ########################## + self.dsn1 = nn.Conv2D(256, 1, kernel_size=1) + self.dsn2 = nn.Conv2D(64, 1, kernel_size=1) + self.dsn3 = nn.Conv2D(64, 1, kernel_size=1) + + self.res1 = resnet_vd.BasicBlock(64, 64, stride=1) + self.d1 = nn.Conv2D(64, 32, kernel_size=1) + self.gate1 = GatedSpatailConv2d(32, 32) + self.res2 = resnet_vd.BasicBlock(32, 32, stride=1) + self.d2 = nn.Conv2D(32, 16, kernel_size=1) + self.gate2 = GatedSpatailConv2d(16, 16) + self.res3 = resnet_vd.BasicBlock(16, 16, stride=1) + self.d3 = nn.Conv2D(16, 8, kernel_size=1) + self.gate3 = GatedSpatailConv2d(8, 8) + self.focus = nn.Conv2D(8, 1, kernel_size=1, bias_attr=False) + + self.init_weight() + + def forward(self, inputs): + x = inputs['img'] + input_shape = paddle.shape(x) + # input fea_list shape [N, 64, H/2, W/2] [N, 64, H/4, W/4] + # [N, 128, H/8, W/8] [N, 256, H/16, W/16] [N, 512, H/32, W/32] + fea_list = self.backbone(x) + + ########################## + ### Decoder part - GLANCE + ########################## + #psp: N, 512, H/32, W/32 + psp = self.psp_module(fea_list[-1]) + #d5_g: N, 512, H/16, W/16 + d5_g = self.decoder5_g(paddle.concat((psp, fea_list[-1]), 1)) + #d4_g: N, 512, H/8, W/8 + d4_g = self.decoder4_g(paddle.concat((self.psp4(psp), d5_g), 1)) + #d3_g: N, 256, H/4, W/4 + d3_g = self.decoder3_g(paddle.concat((self.psp3(psp), d4_g), 1)) + #d2_g: N, 128, H/2, W/2 + d2_g = self.decoder2_g(paddle.concat((self.psp2(psp), d3_g), 1)) + #d1_g: N, 64, H, W + d1_g = self.decoder1_g(paddle.concat((self.psp1(psp), d2_g), 1)) + #d0_g: N, 3, H, W + d0_g = self.decoder0_g(d1_g) + # The 1st channel is foreground. The 2nd is transition region. The 3rd is background. + # glance_sigmoid = F.sigmoid(d0_g) + glance_sigmoid = F.softmax(d0_g, axis=1) + + ########################## + ### Decoder part - FOCUS + ########################## + s1 = F.interpolate( + self.dsn1(d5_g), + input_shape[2:], + mode='bilinear', + align_corners=False) + s2 = F.interpolate( + self.dsn2(d3_g), + input_shape[2:], + mode='bilinear', + align_corners=False) + s3 = F.interpolate( + self.dsn3(d1_g), + input_shape[2:], + mode='bilinear', + align_corners=False) + + df = F.interpolate( + fea_list[0], input_shape[2:], mode='bilinear', align_corners=False) + df = self.res1(df) + df = self.d1(df) + df = self.gate1(df, s1) + + df = self.res2(df) + df = self.d2(df) + df = self.gate2(df, s2) + + df = self.res3(df) + df = self.d3(df) + df = self.gate3(df, s3) + + focus = self.focus(df) + focus_sigmoid = F.sigmoid(focus) + + fusion_sigmoid = self.fusion(glance_sigmoid, focus_sigmoid) + + if self.training: + logit_dict = { + 'glance': glance_sigmoid, + 'focus': focus_sigmoid, + 'fusion': fusion_sigmoid + } + return logit_dict + else: + return fusion_sigmoid + + def loss(self, logit_dict, label_dict, loss_func_dict=None): + if loss_func_dict is None: + loss_func_dict = defaultdict(list) + loss_func_dict['glance'].append(nn.NLLLoss()) + loss_func_dict['focus'].append(MRSD()) + loss_func_dict['cm'].append(MRSD()) + loss_func_dict['cm'].append(MRSD()) + + loss = {} + + # glance loss computation + # get glance label + glance_label = label_dict['trimap'] + glance_label_trans = (glance_label == 128).astype('int64') + glance_label_bg = (glance_label == 0).astype('int64') + glance_label = glance_label_trans + glance_label_bg * 2 + loss_glance = loss_func_dict['glance'][0]( + paddle.log(logit_dict['glance'] + 1e-6), glance_label.squeeze(1)) + loss['glance'] = loss_glance + # TODO glance label 的验证 + + # focus loss computation + loss_focus = loss_func_dict['focus'][0](logit_dict['focus'], + label_dict['alpha'], + label_dict['trimap'] == 128) + loss['focus'] = loss_focus + + # collaborative matting loss + loss_cm_func = loss_func_dict['cm'] + # fusion_sigmoid loss + loss_cm = loss_cm_func[0](logit_dict['fusion'], label_dict['alpha']) + # composion loss + comp_pred = logit_dict['fusion'] * label_dict['fg'] + ( + 1 - logit_dict['fusion']) * label_dict['bg'] + loss_cm = loss_cm + loss_cm_func[1](comp_pred, label_dict['img']) + loss['cm'] = loss_cm + + loss['all'] = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss['cm'] + + return loss + + def fusion(self, glance_sigmoid, focus_sigmoid): + # glance_sigmoid [N, 3, H, W] + # In index, 0 is foreground, 1 is transition, 2 is backbone + # After fusion, the foreground is 1, the background is 0, and the transion is between [0, 1] + index = paddle.argmax(glance_sigmoid, axis=1, keepdim=True) + transition_mask = (index == 1).astype('float32') + fg = (index == 0).astype('float32') + fusion_sigmoid = focus_sigmoid * transition_mask + fg + return fusion_sigmoid + + def init_weight(self): + if self.pretrained is not None: + utils.load_entire_model(self, self.pretrained) + + +class GatedSpatailConv2d(nn.Layer): + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + groups=1, + bias_attr=False): + super().__init__() + self._gate_conv = nn.Sequential( + layers.SyncBatchNorm(in_channels + 1), + nn.Conv2D(in_channels + 1, in_channels + 1, kernel_size=1), + nn.ReLU(), nn.Conv2D(in_channels + 1, 1, kernel_size=1), + layers.SyncBatchNorm(1), nn.Sigmoid()) + self.conv = nn.Conv2D( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias_attr=bias_attr) + + def forward(self, input_features, gating_features): + cat = paddle.concat([input_features, gating_features], axis=1) + alphas = self._gate_conv(cat) + x = input_features * (alphas + 1) + x = self.conv(x) + return x + + +if __name__ == '__main__': + # paddle.set_device('cpu') + import time + from resnet_vd import ResNet34_vd + backbone = ResNet34_vd(output_stride=32) + x = paddle.randint(0, 256, (1, 3, 512, 512)).astype('float32') + inputs = {} + inputs['img'] = x + + model = ZiYanGate(backbone=backbone, pretrained=None) + + results = model(inputs) + print(results) From 76678853d3e05a4c3fa54f1e17320d22af2923ab Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 29 Nov 2021 10:43:51 +0800 Subject: [PATCH 191/210] pp-matting-v0.4.1 --- contrib/Matting/model/ziyan_gate.py | 43 +++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/contrib/Matting/model/ziyan_gate.py b/contrib/Matting/model/ziyan_gate.py index ecac074c90..52ea22c6b0 100644 --- a/contrib/Matting/model/ziyan_gate.py +++ b/contrib/Matting/model/ziyan_gate.py @@ -98,6 +98,22 @@ def __init__(self, backbone, pretrained=None): self.dsn1 = nn.Conv2D(256, 1, kernel_size=1) self.dsn2 = nn.Conv2D(64, 1, kernel_size=1) self.dsn3 = nn.Conv2D(64, 1, kernel_size=1) + self.dsn_backbone1 = nn.Conv2D( + self.backbone_channels[2], 1, kernel_size=1) + self.dsn_backbone2 = nn.Conv2D( + self.backbone_channels[3], 1, kernel_size=1) + self.dsn_backbone3 = nn.Conv2D( + self.backbone_channels[4], 1, kernel_size=1) + + self.res_backbone1 = resnet_vd.BasicBlock(64, 64, stride=1) + self.d_backbone1 = nn.Conv2D(64, 64, kernel_size=1) + self.gate_backbone1 = GatedSpatailConv2d(64, 64, stride=1) + self.res_backbone2 = resnet_vd.BasicBlock(64, 64, stride=1) + self.d_backbone2 = nn.Conv2D(64, 64, kernel_size=1) + self.gate_backbone2 = GatedSpatailConv2d(64, 64, stride=1) + self.res_backbone3 = resnet_vd.BasicBlock(64, 64, stride=1) + self.d_backbone3 = nn.Conv2D(64, 64, kernel_size=1) + self.gate_backbone3 = GatedSpatailConv2d(64, 64, stride=1) self.res1 = resnet_vd.BasicBlock(64, 64, stride=1) self.d1 = nn.Conv2D(64, 32, kernel_size=1) @@ -158,9 +174,36 @@ def forward(self, inputs): input_shape[2:], mode='bilinear', align_corners=False) + s_backbone1 = F.interpolate( + self.dsn_backbone1(fea_list[2]), + input_shape[2:], + mode='bilinear', + align_corners=False) + s_backbone2 = F.interpolate( + self.dsn_backbone2(fea_list[3]), + input_shape[2:], + mode='bilinear', + align_corners=False) + s_backbone3 = F.interpolate( + self.dsn_backbone3(fea_list[4]), + input_shape[2:], + mode='bilinear', + align_corners=False) df = F.interpolate( fea_list[0], input_shape[2:], mode='bilinear', align_corners=False) + df = self.res_backbone1(df) + df = self.d_backbone1(df) + df = self.gate_backbone1(df, s_backbone1) + + df = self.res_backbone2(df) + df = self.d_backbone2(df) + df = self.gate_backbone2(df, s_backbone2) + + df = self.res_backbone3(df) + df = self.d_backbone3(df) + df = self.gate_backbone3(df, s_backbone3) + df = self.res1(df) df = self.d1(df) df = self.gate1(df, s1) From 28118f679a9be6f1613e9f24edde269cf4760585 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 29 Nov 2021 11:55:19 +0800 Subject: [PATCH 192/210] pp-matting-v0.4.2 --- contrib/Matting/model/resnet_vd.py | 2 +- contrib/Matting/model/ziyan_gate.py | 60 +++++++---------------------- 2 files changed, 14 insertions(+), 48 deletions(-) diff --git a/contrib/Matting/model/resnet_vd.py b/contrib/Matting/model/resnet_vd.py index 3fbeb328ca..ae0ee11cd1 100644 --- a/contrib/Matting/model/resnet_vd.py +++ b/contrib/Matting/model/resnet_vd.py @@ -157,7 +157,7 @@ def __init__(self, out_channels=out_channels, kernel_size=1, stride=1, - is_vd_mode=False if if_first else True) + is_vd_mode=False if if_first or stride == 1 else True) self.shortcut = shortcut diff --git a/contrib/Matting/model/ziyan_gate.py b/contrib/Matting/model/ziyan_gate.py index 52ea22c6b0..6d0b63f7bd 100644 --- a/contrib/Matting/model/ziyan_gate.py +++ b/contrib/Matting/model/ziyan_gate.py @@ -98,24 +98,12 @@ def __init__(self, backbone, pretrained=None): self.dsn1 = nn.Conv2D(256, 1, kernel_size=1) self.dsn2 = nn.Conv2D(64, 1, kernel_size=1) self.dsn3 = nn.Conv2D(64, 1, kernel_size=1) - self.dsn_backbone1 = nn.Conv2D( - self.backbone_channels[2], 1, kernel_size=1) - self.dsn_backbone2 = nn.Conv2D( - self.backbone_channels[3], 1, kernel_size=1) - self.dsn_backbone3 = nn.Conv2D( - self.backbone_channels[4], 1, kernel_size=1) - - self.res_backbone1 = resnet_vd.BasicBlock(64, 64, stride=1) - self.d_backbone1 = nn.Conv2D(64, 64, kernel_size=1) - self.gate_backbone1 = GatedSpatailConv2d(64, 64, stride=1) - self.res_backbone2 = resnet_vd.BasicBlock(64, 64, stride=1) - self.d_backbone2 = nn.Conv2D(64, 64, kernel_size=1) - self.gate_backbone2 = GatedSpatailConv2d(64, 64, stride=1) - self.res_backbone3 = resnet_vd.BasicBlock(64, 64, stride=1) - self.d_backbone3 = nn.Conv2D(64, 64, kernel_size=1) - self.gate_backbone3 = GatedSpatailConv2d(64, 64, stride=1) - - self.res1 = resnet_vd.BasicBlock(64, 64, stride=1) + + self.res1 = resnet_vd.BasicBlock( + self.backbone_channels[0] + self.backbone_channels[1], + 64, + stride=1, + shortcut=False) self.d1 = nn.Conv2D(64, 32, kernel_size=1) self.gate1 = GatedSpatailConv2d(32, 32) self.res2 = resnet_vd.BasicBlock(32, 32, stride=1) @@ -174,36 +162,12 @@ def forward(self, inputs): input_shape[2:], mode='bilinear', align_corners=False) - s_backbone1 = F.interpolate( - self.dsn_backbone1(fea_list[2]), - input_shape[2:], - mode='bilinear', - align_corners=False) - s_backbone2 = F.interpolate( - self.dsn_backbone2(fea_list[3]), - input_shape[2:], - mode='bilinear', - align_corners=False) - s_backbone3 = F.interpolate( - self.dsn_backbone3(fea_list[4]), - input_shape[2:], - mode='bilinear', - align_corners=False) - df = F.interpolate( + df0 = F.interpolate( fea_list[0], input_shape[2:], mode='bilinear', align_corners=False) - df = self.res_backbone1(df) - df = self.d_backbone1(df) - df = self.gate_backbone1(df, s_backbone1) - - df = self.res_backbone2(df) - df = self.d_backbone2(df) - df = self.gate_backbone2(df, s_backbone2) - - df = self.res_backbone3(df) - df = self.d_backbone3(df) - df = self.gate_backbone3(df, s_backbone3) - + df1 = F.interpolate( + fea_list[1], input_shape[2:], mode='bilinear', align_corners=False) + df = paddle.concat([df0, df1], 1) df = self.res1(df) df = self.d1(df) df = self.gate1(df, s1) @@ -325,7 +289,9 @@ def forward(self, input_features, gating_features): # paddle.set_device('cpu') import time from resnet_vd import ResNet34_vd - backbone = ResNet34_vd(output_stride=32) + from hrnet import HRNet_W18 + # backbone = ResNet34_vd(output_stride=32) + backbone = HRNet_W18() x = paddle.randint(0, 256, (1, 3, 512, 512)).astype('float32') inputs = {} inputs['img'] = x From 065ddebffd8f59242501118227387108bd3ebe45 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 15 Dec 2021 17:36:29 +0800 Subject: [PATCH 193/210] add RandomSharpen RandomNoise RandomReJpeg transforms --- contrib/Matting/transforms.py | 80 ++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 2 deletions(-) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index 0bc61228dd..d4501ae25d 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import random +import string import cv2 import numpy as np from paddleseg.transforms import functional from paddleseg.cvlibs import manager +from paddleseg.utils import seg_env from PIL import Image @@ -704,8 +707,79 @@ def __call__(self, data): return data +class RandomSharpen: + def __init__(self, prob=0.1): + if prob < 0: + self.prob = 0 + elif prob > 1: + self.prob = 1 + else: + self.prob = prob + + def __call__(self, data): + if np.random.rand() > self.prob: + return data + + radius = np.random.choice([0, 3, 5, 7, 9]) + w = np.random.uniform(0.1, 0.5) + blur_img = cv2.GaussianBlur(data['img'], (radius, radius), 5) + data['img'] = cv2.addWeighted(data['img'], 1 + w, blur_img, -w, 0) + for key in data.get('gt_fields', []): + if key == 'trimap' or key == 'alpha': + continue + blur_img = cv2.GaussianBlur(data[key], (0, 0), 5) + data[key] = cv2.addWeighted(data[key], 1.5, blur_img, -0.5, 0) + + return data + + +class RandomNoise: + def __init__(self, prob=0.1): + if prob < 0: + self.prob = 0 + elif prob > 1: + self.prob = 1 + else: + self.prob = prob + + def __call__(self, data): + if np.random.rand() > self.prob: + return data + mean = np.random.uniform(0, 0.04) + var = np.random.uniform(0, 0.001) + noise = np.random.normal(mean, var**0.5, data['img'].shape) * 255 + data['img'] = data['img'] + noise + data['img'] = np.clip(data['img'], 0, 255) + + return data + + +class RandomReJpeg: + def __init__(self, prob=0.1): + if prob < 0: + self.prob = 0 + elif prob > 1: + self.prob = 1 + else: + self.prob = prob + + def __call__(self, data): + if np.random.rand() > self.prob: + return data + q = np.random.randint(70, 95) + img = data['img'].astype('uint8') + + # Ensure no conflicts between processes + tmp_name = str(os.getpid()) + '.jpg' + tmp_name = os.path.join(seg_env.TMP_HOME, tmp_name) + cv2.imwrite(tmp_name, img, [int(cv2.IMWRITE_JPEG_QUALITY), q]) + data['img'] = cv2.imread(tmp_name) + + return data + + if __name__ == "__main__": - transforms = [RandomResize(size=(512, 512), scale=None)] + transforms = [RandomReJpeg(prob=1)] transforms = Compose(transforms) fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' alpha_path = fg_path.replace('fg', 'alpha') @@ -727,8 +801,10 @@ def __call__(self, data): print(data[key].shape) # import pdb # pdb.set_trace() + + cv2.imwrite('ori.png', data['img']) data = transforms(data) print(data['img'].dtype, data['img'].shape) for key in data['gt_fields']: print(data[key].shape) -# cv2.imwrite('distort_img.jpg', data['img'].transpose([1, 2, 0])) + cv2.imwrite('rejpeg.png', data['img'].transpose([1, 2, 0])) From 0ca02db9e43558ee53b8036c8ddec115025cd4e7 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Fri, 17 Dec 2021 14:45:33 +0800 Subject: [PATCH 194/210] add RSSN --- contrib/Matting/dataset/matting_dataset.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/contrib/Matting/dataset/matting_dataset.py b/contrib/Matting/dataset/matting_dataset.py index e0cef13899..ba10c91c41 100644 --- a/contrib/Matting/dataset/matting_dataset.py +++ b/contrib/Matting/dataset/matting_dataset.py @@ -190,6 +190,12 @@ def __len__(self): return len(self.fg_bg_list) def composite(self, fg, alpha, ori_bg): + if np.random.rand() < 0.5: + fg = cv2.fastNlMeansDenoisingColored(fg, None, 3, 3, 7, 21) + ori_bg = cv2.fastNlMeansDenoisingColored(ori_bg, None, 3, 3, 7, 21) + if np.random.rand() < 0.5: + radius = np.random.choice([19, 29, 39, 49, 59]) + ori_bg = cv2.GaussianBlur(ori_bg, (radius, radius), 0, 0) fg_h, fg_w = fg.shape[:2] ori_bg_h, ori_bg_w = ori_bg.shape[:2] From 6ac700444396737091844531704bafade0d0e221 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 22 Dec 2021 15:09:50 +0800 Subject: [PATCH 195/210] add `if_rssn` param --- contrib/Matting/dataset/matting_dataset.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/contrib/Matting/dataset/matting_dataset.py b/contrib/Matting/dataset/matting_dataset.py index ba10c91c41..02ebaff1d5 100644 --- a/contrib/Matting/dataset/matting_dataset.py +++ b/contrib/Matting/dataset/matting_dataset.py @@ -57,6 +57,7 @@ class MattingDataset(paddle.io.Dataset): get_trimap (bool, optional): Whether to get triamp. Default: True. separator (str, optional): The separator of train_file or val_file. If file name contains ' ', '|' may be perfect. Default: ' '. key_del (tuple|list, optional): The key which is not need will be delete to accellect data reader. Default: None. + if_rssn (bool, optional): Whether to use RSSN while Compositing image. Including denoise and blur. Default: False. """ def __init__(self, @@ -67,7 +68,8 @@ def __init__(self, val_file=None, get_trimap=True, separator=' ', - key_del=None): + key_del=None, + if_rssn=False): super().__init__() self.dataset_root = dataset_root self.transforms = T.Compose(transforms) @@ -75,6 +77,7 @@ def __init__(self, self.get_trimap = get_trimap self.separator = separator self.key_del = key_del + self.if_rssn = if_rssn # check file if mode == 'train' or mode == 'trainval': @@ -190,12 +193,14 @@ def __len__(self): return len(self.fg_bg_list) def composite(self, fg, alpha, ori_bg): - if np.random.rand() < 0.5: - fg = cv2.fastNlMeansDenoisingColored(fg, None, 3, 3, 7, 21) - ori_bg = cv2.fastNlMeansDenoisingColored(ori_bg, None, 3, 3, 7, 21) - if np.random.rand() < 0.5: - radius = np.random.choice([19, 29, 39, 49, 59]) - ori_bg = cv2.GaussianBlur(ori_bg, (radius, radius), 0, 0) + if self.if_rssn: + if np.random.rand() < 0.5: + fg = cv2.fastNlMeansDenoisingColored(fg, None, 3, 3, 7, 21) + ori_bg = cv2.fastNlMeansDenoisingColored( + ori_bg, None, 3, 3, 7, 21) + if np.random.rand() < 0.5: + radius = np.random.choice([19, 29, 39, 49, 59]) + ori_bg = cv2.GaussianBlur(ori_bg, (radius, radius), 0, 0) fg_h, fg_w = fg.shape[:2] ori_bg_h, ori_bg_w = ori_bg.shape[:2] From b31e2e7f840b6a53f281b4495789022c38aaeec0 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Thu, 23 Dec 2021 14:26:12 +0800 Subject: [PATCH 196/210] update composition loss --- contrib/Matting/model/ziyan_gate.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/Matting/model/ziyan_gate.py b/contrib/Matting/model/ziyan_gate.py index 6d0b63f7bd..935b6d1354 100644 --- a/contrib/Matting/model/ziyan_gate.py +++ b/contrib/Matting/model/ziyan_gate.py @@ -229,7 +229,9 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): # composion loss comp_pred = logit_dict['fusion'] * label_dict['fg'] + ( 1 - logit_dict['fusion']) * label_dict['bg'] - loss_cm = loss_cm + loss_cm_func[1](comp_pred, label_dict['img']) + comp_gt = label_dict['alpha'] * label_dict['fg'] + ( + 1 - label_dict['alpha']) * label_dict['bg'] + loss_cm = loss_cm + loss_cm_func[1](comp_pred, comp_gt) loss['cm'] = loss_cm loss['all'] = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss['cm'] From 8ab26726e2c4572f47f246f5ca8881e96260dd51 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Fri, 24 Dec 2021 11:42:57 +0800 Subject: [PATCH 197/210] add gradient loss and fix model loss_func_dict generation --- contrib/Matting/model/__init__.py | 2 +- contrib/Matting/model/loss.py | 67 +++++++++++++++++++++++++++++ contrib/Matting/model/ziyan_gate.py | 52 ++++++++++++++++------ 3 files changed, 106 insertions(+), 15 deletions(-) diff --git a/contrib/Matting/model/__init__.py b/contrib/Matting/model/__init__.py index 6bac17f552..f89af61773 100644 --- a/contrib/Matting/model/__init__.py +++ b/contrib/Matting/model/__init__.py @@ -17,7 +17,7 @@ from .mobilenet_v2 import * from .hrnet import * from .dim import DIM -from .loss import MRSD +from .loss import MRSD, GradientLoss from .modnet import MODNet from .ziyan import ZiYanAM from .ziyan_gate import ZiYanGate diff --git a/contrib/Matting/model/loss.py b/contrib/Matting/model/loss.py index 684c35ba2e..009b5de3a2 100644 --- a/contrib/Matting/model/loss.py +++ b/contrib/Matting/model/loss.py @@ -17,6 +17,7 @@ import paddle.nn.functional as F from paddleseg.cvlibs import manager +import cv2 @manager.LOSSES.add_component @@ -49,3 +50,69 @@ def forward(self, logit, label, mask=None): loss = loss.mean() return loss + +@manager.LOSSES.add_component +class GradientLoss(nn.Layer): + def __init__(self, eps=1e-6): + super().__init__() + self.kernel_x, self.kernel_y = self.sobel_kernel() + self.eps = eps + + def forward(self, logit, label, mask=None): + if len(label.shape) == 3: + label = label.unsqueeze(1) + if mask is not None: + if len(mask.shape) == 3: + mask = mask.unsqueeze(1) + logit = logit * mask + label = label * mask + loss = paddle.sum(F.l1_loss(self.sobel(logit), self.sobel(label), 'none')) / (mask.sum() + self.eps) + else: + loss = F.l1_loss(self.sobel(logit), self.sobel(label), 'mean') + + return loss + + def sobel(self, input): + """Using Sobel to compute gradient. Return the magnitude.""" + if not len(input.shape) == 4: + raise ValueError("Invalid input shape, we expect NCHW, but it is ", input.shape) + + n, c, h, w = input.shape + + input_pad = paddle.reshape(input, (n*c, 1, h, w)) + input_pad = F.pad(input_pad, pad=[1, 1, 1, 1], mode='replicate') + + grad_x = F.conv2d(input_pad, self.kernel_x, padding=0) + grad_y = F.conv2d(input_pad, self.kernel_y, padding=0) + + mag = paddle.sqrt(grad_x * grad_x + grad_y * grad_y + self.eps) + mag = paddle.reshape(mag, (n, c, h, w)) + + return mag + + def sobel_kernel(self): + kernel_x = paddle.to_tensor([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]]).astype('float32') + kernel_x = kernel_x / kernel_x.abs().sum() + kernel_y = kernel_x.transpose([1, 0]) + kernel_x = kernel_x.unsqueeze(0).unsqueeze(0) + kernel_y = kernel_y.unsqueeze(0).unsqueeze(0) + kernel_x.stop_gradient = True + kernel_y.stop_gradient = True + return kernel_x, kernel_y + + +if __name__ == "__main__": + gradiend_loss = GradientLoss() + import cv2 + import numpy as np + alpha = cv2.imread('/home/aistudio/data/Distinctions-646/val/alpha/test_0.png', 0) + alpha = alpha / 255. + h, w = alpha.shape + pred = np.clip(np.random.rand(h ,w)/5. + alpha, 0., 1.) + alpha = paddle.to_tensor(alpha) + pred = paddle.to_tensor(pred) + alpha = alpha.unsqueeze(0).unsqueeze(0).astype('float32') + pred = pred.unsqueeze(0).unsqueeze(0).astype('float32') + print(alpha.shape, pred.shape) + loss = gradiend_loss(pred, alpha) + print(loss) \ No newline at end of file diff --git a/contrib/Matting/model/ziyan_gate.py b/contrib/Matting/model/ziyan_gate.py index 935b6d1354..06e5317753 100644 --- a/contrib/Matting/model/ziyan_gate.py +++ b/contrib/Matting/model/ziyan_gate.py @@ -23,7 +23,7 @@ from paddleseg import utils from paddleseg.cvlibs import manager -from model import MRSD +from model import MRSD, GradientLoss from model import resnet_vd @@ -36,10 +36,14 @@ def conv_up_psp(in_channels, out_channels, up_sample): @manager.MODELS.add_component class ZiYanGate(nn.Layer): - def __init__(self, backbone, pretrained=None): + def __init__(self, backbone, pretrained=None, loss_func_dict=None): super().__init__() self.backbone = backbone self.pretrained = pretrained + if loss_func_dict is not None: + self.loss_func_dict = loss_func_dict + else: + self.loss_func_dict = self.get_loss_func_dict() self.backbone_channels = backbone.feat_channels ###################### @@ -194,15 +198,19 @@ def forward(self, inputs): return logit_dict else: return fusion_sigmoid + + def get_loss_func_dict(self): + loss_func_dict = defaultdict(list) + loss_func_dict['glance'].append(nn.NLLLoss()) + loss_func_dict['focus'].append(MRSD()) + loss_func_dict['focus'].append(GradientLoss()) + loss_func_dict['cm'].append(MRSD()) + loss_func_dict['cm'].append(MRSD()) + loss_func_dict['cm'].append(GradientLoss()) + return loss_func_dict def loss(self, logit_dict, label_dict, loss_func_dict=None): - if loss_func_dict is None: - loss_func_dict = defaultdict(list) - loss_func_dict['glance'].append(nn.NLLLoss()) - loss_func_dict['focus'].append(MRSD()) - loss_func_dict['cm'].append(MRSD()) - loss_func_dict['cm'].append(MRSD()) - + loss_func_dict = self.loss_func_dict loss = {} # glance loss computation @@ -217,24 +225,40 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): # TODO glance label 的验证 # focus loss computation - loss_focus = loss_func_dict['focus'][0](logit_dict['focus'], + transparent = label_dict['trimap'] == 128 + focus_alpha_loss = loss_func_dict['focus'][0](logit_dict['focus'], label_dict['alpha'], - label_dict['trimap'] == 128) + transparent) + # gradient loss + focus_gradient_loss = loss_func_dict['focus'][1](logit_dict['focus'], + label_dict['alpha'], + transparent) + loss_focus = focus_alpha_loss + focus_gradient_loss loss['focus'] = loss_focus + loss['focus_alpha'] = focus_alpha_loss + loss['focus_gradient'] = focus_gradient_loss # collaborative matting loss loss_cm_func = loss_func_dict['cm'] # fusion_sigmoid loss - loss_cm = loss_cm_func[0](logit_dict['fusion'], label_dict['alpha']) + cm_alpha_loss = loss_cm_func[0](logit_dict['fusion'], label_dict['alpha']) # composion loss comp_pred = logit_dict['fusion'] * label_dict['fg'] + ( 1 - logit_dict['fusion']) * label_dict['bg'] comp_gt = label_dict['alpha'] * label_dict['fg'] + ( 1 - label_dict['alpha']) * label_dict['bg'] - loss_cm = loss_cm + loss_cm_func[1](comp_pred, comp_gt) + cm_composition_loss = loss_cm_func[1](comp_pred, comp_gt) + # grandient loss + cm_grad_loss = loss_cm_func[2](logit_dict['fusion'], label_dict['alpha']) + # cm loss + loss_cm = cm_alpha_loss + cm_composition_loss + cm_grad_loss loss['cm'] = loss_cm + loss['cm_alpha'] = cm_alpha_loss + loss['cm_composition'] = cm_composition_loss + loss['cm_gradient'] = cm_grad_loss + - loss['all'] = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss['cm'] + loss['all'] = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss_cm return loss From f2aaaa99a44c191c85ff943d03fa431a725fc1a5 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 28 Dec 2021 16:53:01 +0800 Subject: [PATCH 198/210] Resize add interp random --- contrib/Matting/transforms.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index d4501ae25d..6581874098 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -87,7 +87,7 @@ def __call__(self, data): @manager.TRANSFORMS.add_component class Resize: - def __init__(self, target_size=(512, 512)): + def __init__(self, target_size=(512, 512), random_interp=False): if isinstance(target_size, list) or isinstance(target_size, tuple): if len(target_size) != 2: raise ValueError( @@ -99,16 +99,23 @@ def __init__(self, target_size=(512, 512)): .format(type(target_size))) self.target_size = target_size + self.random_interp = random_interp + self.interps = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC] def __call__(self, data): + if self.random_interp: + interp = np.random.choice(self.interps) + else: + interp = cv2.INTER_LINEAR data['trans_info'].append(('resize', data['img'].shape[0:2])) - data['img'] = functional.resize(data['img'], self.target_size) + data['img'] = functional.resize(data['img'], self.target_size, interp) for key in data.get('gt_fields', []): if key == 'trimap': data[key] = functional.resize(data[key], self.target_size, cv2.INTER_NEAREST) else: - data[key] = functional.resize(data[key], self.target_size) + data[key] = functional.resize(data[key], self.target_size, + interp) return data @@ -779,11 +786,11 @@ def __call__(self, data): if __name__ == "__main__": - transforms = [RandomReJpeg(prob=1)] + transforms = [Resize((512, 512), random_interp=False)] transforms = Compose(transforms) - fg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/Distinctions-646/train/fg/13(2).png' + fg_path = 'data/matting/Distinctions-646/val/fg/test_0.png' alpha_path = fg_path.replace('fg', 'alpha') - bg_path = '/ssd1/home/chenguowei01/github/PaddleSeg/contrib/Matting/data/matting/human_matting/bg/unsplash_bg/attic/photo-1443884590026-2e4d21aee71c?crop=entropy&cs=tinysrgb&fit=max&fm=jpg&ixid=MnwxMjA3fDB8MXxzZWFyY2h8Nzh8fGF0dGljfGVufDB8fHx8MTYyOTY4MDcxNQ&ixlib=rb-1.2.1&q=80&w=400.jpg' + bg_path = 'data/matting/Distinctions-646/bg/VOC2012/2012_004331.jpg' data = {} data['trans_info'] = [] data['fg'] = cv2.imread(fg_path) @@ -807,4 +814,5 @@ def __call__(self, data): print(data['img'].dtype, data['img'].shape) for key in data['gt_fields']: print(data[key].shape) - cv2.imwrite('rejpeg.png', data['img'].transpose([1, 2, 0])) + print('sum after resize: ', np.sum(data['img'])) + cv2.imwrite('resize.png', data['img'].transpose([1, 2, 0])) From cc8c581b1c84d6efe89d40aa64d278dddff0d2c7 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Fri, 7 Jan 2022 17:42:18 +0800 Subject: [PATCH 199/210] add grad and conn metric, save best metic to best.txt --- contrib/Matting/core/train.py | 37 +++++-- contrib/Matting/core/val.py | 47 ++++++--- contrib/Matting/metric.py | 189 +++++++++++++++++++++------------- 3 files changed, 180 insertions(+), 93 deletions(-) diff --git a/contrib/Matting/core/train.py b/contrib/Matting/core/train.py index dabe0254c8..c4a730981d 100644 --- a/contrib/Matting/core/train.py +++ b/contrib/Matting/core/train.py @@ -15,6 +15,7 @@ import os import time from collections import deque, defaultdict +import pickle import shutil import numpy as np @@ -53,6 +54,18 @@ def visual_in_traning(log_writer, vis_dict, step): log_writer.add_image(tag=key, img=value, step=step) +def get_best(best_file, resume_model=None): + '''Get best sad, mse, grad, conn adn iter from file''' + if os.path.exists(best_file) and (resume_model is not None): + with open(best_file, 'rb') as f: + best_sad, best_sad_mse, best_sad_grad, best_sad_conn, best_iter = pickle.load( + f) + else: + best_sad = best_sad_mse = best_sad_grad = best_sad_conn = np.inf + best_iter = -1 + return best_sad, best_sad_mse, best_sad_grad, best_sad_conn, best_iter + + def train(model, train_dataset, val_dataset=None, @@ -125,10 +138,11 @@ def train(model, from visualdl import LogWriter log_writer = LogWriter(save_dir) + best_sad, best_sad_mse, best_sad_grad, best_sad_conn, best_iter = get_best( + os.path.join(save_dir, 'best_model', 'best_sad.txt'), + resume_model=resume_model) avg_loss = defaultdict(float) iters_per_epoch = len(batch_sampler) - best_sad = np.inf - best_model_iter = -1 reader_cost_averager = TimeAverager() batch_cost_averager = TimeAverager() save_models = deque() @@ -236,7 +250,7 @@ def train(model, val_dataset is not None) and local_rank == 0 and iter >= eval_begin_iters: num_workers = 1 if num_workers > 0 else 0 - sad, mse = evaluate( + sad, mse, grad, conn = evaluate( model, val_dataset, num_workers=0, @@ -249,18 +263,29 @@ def train(model, if val_dataset is not None and iter >= eval_begin_iters: if sad < best_sad: best_sad = sad - best_model_iter = iter + best_iter = iter + best_sad_mse = mse + best_sad_grad = grad + best_sad_conn = conn best_model_dir = os.path.join(save_dir, "best_model") paddle.save( model.state_dict(), os.path.join(best_model_dir, 'model.pdparams')) + with open( + os.path.join(best_model_dir, 'best_sad.txt'), + 'wb') as f: + pickle.dump((best_sad, best_sad_mse, best_sad_grad, + best_sad_conn, best_iter), f) logger.info( - '[EVAL] The model with the best validation sad ({:.4f}) was saved at iter {}.' - .format(best_sad, best_model_iter)) + '[EVAL] The model with the best validation SAD ({:.4f}) was saved at iter {}. While MSE: {:.4f}, Grad: {:.4f}, Conn: {:.4f}' + .format(best_sad, best_iter, best_sad_mse, + best_sad_grad, best_sad_conn)) if use_vdl: log_writer.add_scalar('Evaluate/SAD', sad, iter) log_writer.add_scalar('Evaluate/MSE', mse, iter) + log_writer.add_scalar('Evaluate/Grad', grad, iter) + log_writer.add_scalar('Evaluate/Conn', conn, iter) batch_start = time.time() diff --git a/contrib/Matting/core/val.py b/contrib/Matting/core/val.py index 96c02f5454..794fc8bc8b 100644 --- a/contrib/Matting/core/val.py +++ b/contrib/Matting/core/val.py @@ -67,16 +67,6 @@ def evaluate(model, ): paddle.distributed.init_parallel_env() - -# batch_sampler = paddle.io.DistributedBatchSampler( -# eval_dataset, batch_size=1, shuffle=False, drop_last=False) -# loader = paddle.io.DataLoader( -# eval_dataset, -# batch_sampler=batch_sampler, -# num_workers=num_workers, -# return_list=True, -# ) -# eval not distributed loader = paddle.io.DataLoader( eval_dataset, batch_size=1, @@ -88,6 +78,8 @@ def evaluate(model, total_iters = len(loader) mse_metric = metric.MSE() sad_metric = metric.SAD() + grad_metric = metric.Grad() + conn_metric = metric.Conn() if print_detail: logger.info( @@ -98,6 +90,8 @@ def evaluate(model, batch_cost_averager = TimeAverager() batch_start = time.time() + img_name = '' + i = 0 with paddle.no_grad(): for iter, data in enumerate(loader): reader_cost_averager.record(time.time() - batch_start) @@ -111,8 +105,10 @@ def evaluate(model, if trimap is not None: trimap = trimap.numpy().astype('uint8') alpha_pred = np.round(alpha_pred * 255) - mse_metric.update(alpha_pred, alpha_gt, trimap) - sad_metric.update(alpha_pred, alpha_gt, trimap) + mse = mse_metric.update(alpha_pred, alpha_gt, trimap) + sad = sad_metric.update(alpha_pred, alpha_gt, trimap) + grad = grad_metric.update(alpha_pred, alpha_gt, trimap) + conn = conn_metric.update(alpha_pred, alpha_gt, trimap) if save_results: alpha_pred_one = alpha_pred[0].squeeze() @@ -120,8 +116,19 @@ def evaluate(model, trimap = trimap.squeeze().astype('uint8') alpha_pred_one[trimap == 255] = 255 alpha_pred_one[trimap == 0] = 0 - save_alpha_pred(alpha_pred_one, - os.path.join(save_dir, data['img_name'][0])) + + save_name = data['img_name'][0] + name, ext = os.path.splitext(save_name) + if save_name == img_name: + save_name = name + '_' + str(i) + ext + i += 1 + else: + img_name = save_name + save_name = name + '_' + str(i) + ext + i = 1 + + save_alpha_pred(alpha_pred_one, os.path.join( + save_dir, save_name)) batch_cost_averager.record( time.time() - batch_start, num_samples=len(alpha_gt)) @@ -129,7 +136,9 @@ def evaluate(model, reader_cost = reader_cost_averager.get_average() if local_rank == 0 and print_detail: - progbar_val.update(iter + 1, [('batch_cost', batch_cost), + progbar_val.update(iter + 1, [('SAD', sad), ('MSE', mse), + ('Grad', grad), ('Conn', conn), + ('batch_cost', batch_cost), ('reader cost', reader_cost)]) reader_cost_averager.reset() @@ -138,6 +147,10 @@ def evaluate(model, mse = mse_metric.evaluate() sad = sad_metric.evaluate() + grad = grad_metric.evaluate() + conn = conn_metric.evaluate() - logger.info('[EVAL] SAD: {:.4f}, MSE: {:.4f}'.format(sad, mse)) - return sad, mse + logger.info( + '[EVAL] SAD: {:.4f}, MSE: {:.4f}, Grad: {:.4f}, Conn: {:.4f}'.format( + sad, mse, grad, conn)) + return sad, mse, grad, conn diff --git a/contrib/Matting/metric.py b/contrib/Matting/metric.py index 5e021cd405..b42ef15c93 100644 --- a/contrib/Matting/metric.py +++ b/contrib/Matting/metric.py @@ -18,9 +18,10 @@ # So do not report results calculated by these functions in your paper. # Evaluate your inference with the MATLAB file `DIM_evaluation_code/evaluate.m`. +import cv2 import numpy as np -import scipy.ndimage -import numpy as np +from scipy.ndimage.filters import convolve +from scipy.special import gamma from skimage.measure import label @@ -49,6 +50,11 @@ def update(self, pred, gt, trimap=None): 'The shape of `pred`, `gt` and `trimap` should be equal. ' 'but they are {}, {} and {}'.format(pred.shape, gt.shape, trimap.shape)) + if not ((pred[trimap == 0] == 0).all() and + (pred[trimap == 255] == 255).all()): + raise ValueError( + 'pred should be masked by trimap before evaluation') + mask = trimap == 128 pixels = float(mask.sum()) pred = pred / 255. @@ -59,6 +65,8 @@ def update(self, pred, gt, trimap=None): self.mse_diffs += mse_diff self.count += 1 + return mse_diff + def evaluate(self): mse = self.mse_diffs / self.count if self.count > 0 else 0 return mse @@ -89,17 +97,23 @@ def update(self, pred, gt, trimap=None): 'The shape of `pred`, `gt` and `trimap` should be equal. ' 'but they are {}, {} and {}'.format(pred.shape, gt.shape, trimap.shape)) + if not ((pred[trimap == 0] == 0).all() and + (pred[trimap == 255] == 255).all()): + raise ValueError( + 'pred should be masked by trimap before evaluation') mask = trimap == 128 pred = pred / 255. gt = gt / 255. diff = (pred - gt) * mask sad_diff = (np.abs(diff)).sum() - sad_diff /= 1000 + sad_diff /= 1000 self.sad_diffs += sad_diff self.count += 1 + return sad_diff + def evaluate(self): sad = self.sad_diffs / self.count if self.count > 0 else 0 return sad @@ -109,41 +123,48 @@ class Grad(): """ Only calculate the unknown region if trimap provided. - Refer to: https://github.com/yucornetto/MGMatting/blob/main/code-base/utils/evaluate.py#L46 + Refer to: https://github.com/open-mlab/mmediting/blob/master/mmedit/core/evaluation/metrics.py """ def __init__(self): self.grad_diffs = 0 self.count = 0 - def gauss(self, x, sigma): - y = np.exp(-x**2 / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi)) - return y - - def dgauss(self, x, sigma): - y = -x * self.gauss(x, sigma) / (sigma**2) - return y - - def gaussgradient(self, im, sigma): - epsilon = 1e-2 - halfsize = np.ceil(sigma * np.sqrt( - -2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon))).astype(np.int32) - size = 2 * halfsize + 1 - hx = np.zeros((size, size)) - for i in range(0, size): - for j in range(0, size): - u = [i - halfsize, j - halfsize] - hx[i, j] = self.gauss(u[0], sigma) * self.dgauss(u[1], sigma) - - hx = hx / np.sqrt(np.sum(np.abs(hx) * np.abs(hx))) - hy = hx.transpose() - - gx = scipy.ndimage.convolve(im, hx, mode='nearest') - gy = scipy.ndimage.convolve(im, hy, mode='nearest') - - return gx, gy - - def update(self, pred, gt, trimap=None): + def gaussian(self, x, sigma): + return np.exp(-x**2 / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi)) + + def dgaussian(self, x, sigma): + return -x * self.gaussian(x, sigma) / sigma**2 + + def gauss_filter(self, sigma, epsilon=1e-2): + half_size = np.ceil( + sigma * np.sqrt(-2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon))) + size = int(2 * half_size + 1) + + # create filter in x axis + filter_x = np.zeros((size, size)) + for i in range(size): + for j in range(size): + filter_x[i, j] = self.gaussian(i - half_size, + sigma) * self.dgaussian( + j - half_size, sigma) + + # normalize filter + norm = np.sqrt((filter_x**2).sum()) + filter_x = filter_x / norm + filter_y = np.transpose(filter_x) + + return filter_x, filter_y + + def gauss_gradient(self, img, sigma): + filter_x, filter_y = self.gauss_filter(sigma) + img_filtered_x = cv2.filter2D( + img, -1, filter_x, borderType=cv2.BORDER_REPLICATE) + img_filtered_y = cv2.filter2D( + img, -1, filter_y, borderType=cv2.BORDER_REPLICATE) + return np.sqrt(img_filtered_x**2 + img_filtered_y**2) + + def update(self, pred, gt, trimap=None, sigma=1.4): """ update metric. @@ -151,6 +172,7 @@ def update(self, pred, gt, trimap=None): pred (np.ndarray): The value range is [0., 1.]. gt (np.ndarray): The value range is [0, 255]. trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. + sigma (float, optional): Standard deviation of the gaussian kernel. Default: 1.4. """ if trimap is None: trimap = np.ones_like(gt) * 128 @@ -159,22 +181,31 @@ def update(self, pred, gt, trimap=None): 'The shape of `pred`, `gt` and `trimap` should be equal. ' 'but they are {}, {} and {}'.format(pred.shape, gt.shape, trimap.shape)) + if not ((pred[trimap == 0] == 0).all() and + (pred[trimap == 255] == 255).all()): + raise ValueError( + 'pred should be masked by trimap before evaluation') - mask = trimap == 128 - gt = gt / 255. - - pred_x, pred_y = self.gaussgradient(pred, 1.4) - gt_x, gt_y = self.gaussgradient(gt, 1.4) + gt = gt.squeeze() + pred = pred.squeeze() + gt = gt.astype(np.float64) + pred = pred.astype(np.float64) + gt_normed = np.zeros_like(gt) + pred_normed = np.zeros_like(pred) + cv2.normalize(gt, gt_normed, 1., 0., cv2.NORM_MINMAX) + cv2.normalize(pred, pred_normed, 1., 0., cv2.NORM_MINMAX) - pred_amp = np.sqrt(pred_x**2 + pred_y**2) - gt_amp = np.sqrt(gt_x**2 + gt_y**2) + gt_grad = self.gauss_gradient(gt_normed, sigma).astype(np.float32) + pred_grad = self.gauss_gradient(pred_normed, sigma).astype(np.float32) - error_map = (pred_amp - gt_amp)**2 - diff = np.sum(error_map[mask]) + grad_diff = ((gt_grad - pred_grad)**2 * (trimap == 128)).sum() - self.grad_diffs += diff / 1000. + grad_diff /= 1000 + self.grad_diffs += grad_diff self.count += 1 + return grad_diff + def evaluate(self): grad = self.grad_diffs / self.count if self.count > 0 else 0 return grad @@ -184,18 +215,13 @@ class Conn(): """ Only calculate the unknown region if trimap provided. - Refer to: https://github.com/yucornetto/MGMatting/blob/main/code-base/utils/evaluate.py#L69 + Refer to: Refer to: https://github.com/open-mlab/mmediting/blob/master/mmedit/core/evaluation/metrics.py """ def __init__(self): self.conn_diffs = 0 self.count = 0 - def getLargestCC(self, segmentation): - labels = label(segmentation, connectivity=1) - largestCC = labels == np.argmax(np.bincount(labels.flat)) - return largestCC - def update(self, pred, gt, trimap=None, step=0.1): """ update metric. @@ -204,6 +230,8 @@ def update(self, pred, gt, trimap=None, step=0.1): pred (np.ndarray): The value range is [0., 1.]. gt (np.ndarray): The value range is [0, 255]. trimap (np.ndarray, optional)L The value is in {0, 128, 255}. Default: None. + step (float, optional): Step of threshold when computing intersection between + `gt` and `pred`. Default: 0.1. """ if trimap is None: trimap = np.ones_like(gt) * 128 @@ -212,33 +240,54 @@ def update(self, pred, gt, trimap=None, step=0.1): 'The shape of `pred`, `gt` and `trimap` should be equal. ' 'but they are {}, {} and {}'.format(pred.shape, gt.shape, trimap.shape)) + if not ((pred[trimap == 0] == 0).all() and + (pred[trimap == 255] == 255).all()): + raise ValueError( + 'pred should be masked by trimap before evaluation') - mask = trimap == 128 - gt = gt / 255. - h, w = pred.shape + gt = gt.squeeze() + pred = pred.squeeze() + gt = gt.astype(np.float32) / 255 + pred = pred.astype(np.float32) / 255 - thresh_steps = list(np.arange(0, 1 + step, step)) - l_map = np.ones_like(pred, dtype=np.float) * -1 + thresh_steps = np.arange(0, 1 + step, step) + round_down_map = -np.ones_like(gt) for i in range(1, len(thresh_steps)): - pred_alpha_thresh = (pred >= thresh_steps[i]).astype(np.int) - gt_alpha_thresh = (gt >= thresh_steps[i]).astype(np.int) - - omega = self.getLargestCC( - pred_alpha_thresh * gt_alpha_thresh).astype(np.int) - flag = ((l_map == -1) & (omega == 0)).astype(np.int) - l_map[flag == 1] = thresh_steps[i - 1] - - l_map[l_map == -1] = 1 - - pred_d = pred - l_map - gt_d = gt - l_map - pred_phi = 1 - pred_d * (pred_d >= 0.15).astype(np.int) - gt_phi = 1 - gt_d * (gt_d >= 0.15).astype(np.int) - diff = np.sum(np.abs(pred_phi - gt_phi)[mask]) - - self.conn_diffs += diff / 1000. + gt_thresh = gt >= thresh_steps[i] + pred_thresh = pred >= thresh_steps[i] + intersection = (gt_thresh & pred_thresh).astype(np.uint8) + + # connected components + _, output, stats, _ = cv2.connectedComponentsWithStats( + intersection, connectivity=4) + # start from 1 in dim 0 to exclude background + size = stats[1:, -1] + + # largest connected component of the intersection + omega = np.zeros_like(gt) + if len(size) != 0: + max_id = np.argmax(size) + # plus one to include background + omega[output == max_id + 1] = 1 + + mask = (round_down_map == -1) & (omega == 0) + round_down_map[mask] = thresh_steps[i - 1] + round_down_map[round_down_map == -1] = 1 + + gt_diff = gt - round_down_map + pred_diff = pred - round_down_map + # only calculate difference larger than or equal to 0.15 + gt_phi = 1 - gt_diff * (gt_diff >= 0.15) + pred_phi = 1 - pred_diff * (pred_diff >= 0.15) + + conn_diff = np.sum(np.abs(gt_phi - pred_phi) * (trimap == 128)) + + conn_diff /= 1000 + self.conn_diffs += conn_diff self.count += 1 + return conn_diff + def evaluate(self): conn = self.conn_diffs / self.count if self.count > 0 else 0 return conn From a9073aa44e8812ae42674b862a4460e9cf116321 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 10 Jan 2022 11:09:43 +0800 Subject: [PATCH 200/210] add english doc --- benchmark/README.md | 29 +-- benchmark/README_CN.md | 39 ++++ .../human_matting_android_demo/README.md | 112 ++++++------ .../human_matting_android_demo/README_CN.md | 141 +++++++++++++++ contrib/PanopticDeepLab/README.md | 55 +++--- contrib/PanopticDeepLab/README_CN.md | 144 +++++++++++++++ docs/data_prepare.md | 94 +++++----- docs/data_prepare_cn.md | 171 ++++++++++++++++++ docs/export/export/index.rst | 7 - docs/export/export/model_export.md | 46 ----- docs/model_export.md | 67 ++++--- docs/model_export_cn.md | 65 +++++++ 12 files changed, 745 insertions(+), 225 deletions(-) create mode 100644 benchmark/README_CN.md create mode 100644 contrib/Matting/deploy/human_matting_android_demo/README_CN.md create mode 100644 contrib/PanopticDeepLab/README_CN.md create mode 100644 docs/data_prepare_cn.md delete mode 100644 docs/export/export/index.rst delete mode 100644 docs/export/export/model_export.md create mode 100644 docs/model_export_cn.md diff --git a/benchmark/README.md b/benchmark/README.md index 6ce7939e74..e899711958 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -1,5 +1,10 @@ -# PaddleSeg 下benchmark模型执行说明 +English | [简体中文](README_CN.md) +# Benchmark Introduction + +The content is as follow: + +``` ├── README.md ├── configs │   ├── cityscapes_30imgs.yml @@ -13,25 +18,23 @@ ├── run_benchmark.sh ├── run_fp16.sh └── run_fp32.sh -## 环境 -使用Docker配置Paddle的GPU环境。 +``` + +## Environment +Use Docker to configure the environment. * docker image: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 * CUDA 10.1 + cudnn7 * paddle=2.1.2 * py=37 -## 测试步骤 -### 执行训练Benchmark测试 + +## Test +### Training Test ``` git clone https://github.com/PaddlePaddle/PaddleSeg.git cd PaddleSeg bash benchmark/run_all.sh ``` -### Profiling开关使用方式 -```bash - # 调用train.py时加上该参数 --profiler_options="batch_range=[50, 60]; profile_path=model.profile" -``` - - - - +### How to Open Profiling + Add the following parameter when training. + `--profiler_options="batch_range=[50, 60]; profile_path=model.profile` diff --git a/benchmark/README_CN.md b/benchmark/README_CN.md new file mode 100644 index 0000000000..c59d5ee776 --- /dev/null +++ b/benchmark/README_CN.md @@ -0,0 +1,39 @@ +简体中文 | [English](README.md) + +# Benchmark说明 + +目录结构如下: +``` +├── README.md +├── configs +│   ├── cityscapes_30imgs.yml +│   ├── fastscnn.yml +│   ├── ocrnet_hrnetw48.yml +│   └── segformer_b0.yml +├── deeplabv3p.yml +├── hrnet.yml +├── hrnet48.yml +├── run_all.sh +├── run_benchmark.sh +├── run_fp16.sh +└── run_fp32.sh +``` +## 环境 +使用Docker配置环境。 +* docker image: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 +* CUDA 10.1 + cudnn7 +* paddle=2.1.2 +* py=37 + +## 测试 + +### 训练测试 + +``` +git clone https://github.com/PaddlePaddle/PaddleSeg.git +cd PaddleSeg +bash benchmark/run_all.sh +``` +### Profiling开关使用方式 +训练时添加如下参数 + `--profiler_options="batch_range=[50, 60]; profile_path=model.profile` diff --git a/contrib/Matting/deploy/human_matting_android_demo/README.md b/contrib/Matting/deploy/human_matting_android_demo/README.md index 7a37dc3a56..af958cb73f 100644 --- a/contrib/Matting/deploy/human_matting_android_demo/README.md +++ b/contrib/Matting/deploy/human_matting_android_demo/README.md @@ -1,67 +1,69 @@ +English | [简体中文](README_CN.md) + # Human Matting Android Demo -基于[PaddleSeg](https://github.com/paddlepaddle/paddleseg/tree/develop)的[MODNet](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/Matting)算法实现人像抠图(安卓版demo)。 +Based on [PaddleSeg](https://github.com/paddlepaddle/paddleseg/tree/develop)[MODNet](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/Matting) algorithm to realise human matting(Android demo). -可以直接下载安装本示例工程中的[apk](https://paddleseg.bj.bcebos.com/matting/models/deploy/app-debug.apk)进行体验。 +You can directly download and install the example project [apk](https://paddleseg.bj.bcebos.com/matting/models/deploy/app-debug.apk) to experience。 -## 1. 效果展示 +## 1. Results Exhibition
      - +
      -## 2. 安卓Demo使用说明 +## 2. Android Demo Instructions -### 2.1 要求 +### 2.1 Reruirements * Android Studio 3.4; -* Android手机; +* Android mobile phone; -### 2.2 一键安装 -* 打开Android Studio,在"Welcome to Android Studio"窗口点击"Open an existing Android Studio project",在弹出的路径选择窗口中选择本安卓demo对应的文件夹,然后点击右下角的"Open"按钮即可导入工程,构建工程的过程中会自动下载demo需要的Lite预测库; -* 通过USB连接Android手机; -* 载入工程后,点击菜单栏的Run->Run 'App'按钮,在弹出的"Select Deployment Target"窗口选择已经连接的Android设备,然后点击"OK"按钮; +### 2.2 Installation +* open Android Studio and on "Welcome to Android Studio" window, click "Open an existing Android Studio project". In the path selection window that is displayed, select the folder corresponding to the Android Demo. Then click the "Open" button in the lower right corner to import the project. The Lite prediction library required by demo will be automatically downloaded during the construction process. +* Connect Android phone via USB; +* After loading the project, click the Run->Run 'App' button on the menu bar, Select the connected Android device in the "Select Deployment Target" window, and then click the "OK" button; -*注:此安卓demo基于[Paddle-Lite](https://paddlelite.paddlepaddle.org.cn/)开发,PaddleLite版本为2.8.0。* +*Note:this Android demo is based on [Paddle-Lite](https://paddlelite.paddlepaddle.org.cn/),PaddleLite version is 2.8.0* -### 2.3 预测 -* 在人像抠图Demo中,默认会载入一张人像图像,并会在图像下方给出CPU的预测结果和预测时延; -* 在人像抠图Demo中,你还可以通过右上角的"打开本地相册"和"打开摄像头拍照"按钮分别从相册或相机中加载测试图像然后进行预测推理; +### 2.3 Prediction +* In human matting demo, a human image will be loaded by default, and the CPU prediction result and prediction delay will be given below the image; +* In the human matting demo, you can also load test images from the album or camera by clicking the "Open local album" and "Open camera to take photos" buttons in the upper right corner, and then perform prediction. -*注意:demo中拍照时照片会自动压缩,想测试拍照原图效果,可使用手机相机拍照后从相册中打开进行预测。* +*Note:When taking a photo in demo, the photo will be compressed automatically. If you want to test the effect of the original photo, you can use the mobile phone camera to take a photo and open it from the album for prediction.* -## 3. 二次开发 -可按需要更新预测库或模型进行二次开发,其中更新模型分为模型导出和模型转换两个步骤。 +## 3. Secondary Development +The inference library or model can be updated according to the need for secondary development. The updated model can be divided into two steps: model export and model transformation. -### 3.1 更新预测库 -[Paddle-Lite官网](https://paddlelite.paddlepaddle.org.cn/)提供了预编译版本的安卓预测库,也可以参考官网自行编译。 +### 3.1 Update Inference Library +[Paddle-Lite website](https://paddlelite.paddlepaddle.org.cn/) probides a pre-compiled version of Android inference library, which can also be compiled by referring to the official website. -Paddle-Lite在安卓端的预测库主要包括三个文件: +The Paddle-Lite inference library on Android mainly contains three files: * PaddlePredictor.jar; * arm64-v8a/libpaddle_lite_jni.so; * armeabi-v7a/libpaddle_lite_jni.so; -下面分别介绍两种方法: +Two methods will be introduced in the following: -* 使用预编译版本的预测库,最新的预编译文件参考:[release](https://github.com/PaddlePaddle/Paddle-Lite/releases/),此demo使用的[版本](https://paddlelite-demo.bj.bcebos.com/libs/android/paddle_lite_libs_v2_8_0.tar.gz) +* Use a precompiled version of the inference library. Latest precompiled file reference:[release](https://github.com/PaddlePaddle/Paddle-Lite/releases/). This demo uses the [version](https://paddlelite-demo.bj.bcebos.com/libs/android/paddle_lite_libs_v2_8_0.tar.gz) - 解压上面文件,PaddlePredictor.jar位于:java/PaddlePredictor.jar; + Uncompress the above files and the PaddlePredictor.jar is in java/PaddlePredictor.jar; - arm64-v8a相关so位于:java/libs/arm64-v8a; + The so file about arm64-v8a is in java/libs/arm64-v8a; - armeabi-v7a相关so位于:java/libs/armeabi-v7a; + The so file about armeabi-v7a is in java/libs/armeabi-v7a; -* 手动编译Paddle-Lite预测库 -开发环境的准备和编译方法参考:[Paddle-Lite源码编译](https://paddle-lite.readthedocs.io/zh/release-v2.8/source_compile/compile_env.html)。 +* Manually compile the paddle-Lite inference library +Development environment preparation and compilation methods refer to [paddle-Lite source code compilation](https://paddle-lite.readthedocs.io/zh/release-v2.8/source_compile/compile_env.html). -准备好上述文件,即可参考[java_api](https://paddle-lite.readthedocs.io/zh/release-v2.8/api_reference/java_api_doc.html)在安卓端进行推理。具体使用预测库的方法可参考[Paddle-Lite-Demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo)中更新预测库部分的文档。 +Prepare the above documents, then refer [java_api](https://paddle-lite.readthedocs.io/zh/release-v2.8/api_reference/java_api_doc.html)to have a inference on Android. Refer to the documentation in the Update Inference Library section of [Paddle-Lite-Demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo) for details on how to use the inference library. -### 3.2 模型导出 -此demo的人像抠图采用Backbone为HRNet_W18的MODNet模型,模型[训练教程](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/Matting)请参考官网,官网提供了3种不同性能的Backone:MobileNetV2、ResNet50_vd和HRNet_W18。本安卓demo综合考虑精度和速度要求,采用了HRNet_W18作为Backone。可以直接从官网下载训练好的动态图模型进行算法验证。 +### 3.2 Model Export +This demo uses the MODNet with HRNet_W18 backbone to perform human matting. Please refer to official websit to get model [training tutorial](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/Matting). There are 3 models provided with different backbones: MobileNetV2、ResNet50_vd and HRNet_W18. This Android demo considers the accuracy and speed, using HRNet_W18 as the Backone. The trained dynamic graph model can be downloaded directly from the official website for algorithm verification. -为了能够在安卓手机上进行推理,需要将动态图模型导出为静态图模型,导出时固定图像输入尺寸即可。 +In order to be able to infer on Android phones, the dynamic graph model needs to be exported as a static graph model, and the input size of the image should be fixed when exporting. -首先git最新的[PaddleSeg](https://github.com/paddlepaddle/paddleseg/tree/develop)项目,然后cd进入到PaddleSeg/contrib/Matting目录。将下载下来的modnet-hrnet_w18.pdparams动态图模型文件(也可以自行训练得到)放置在当前文件夹(PaddleSeg/contrib/Matting)下面。然后修改配置文件 configs/modnet_mobilenetv2.yml(注意:虽然采用hrnet18模型,但是该模型依赖的配置文件modnet_hrnet_w18.yml本身依赖modnet_mobilenetv2.yml),修改其中的val_dataset字段如下: +First, update the [PaddleSeg](https://github.com/paddlepaddle/paddleseg/tree/develop) repository. Then `cd` to the `PaddleSeg/contrib/Matting` directory. Then put the downloaded modnet-hrnet_w18.pdparams (traing by youself is ok) on current directory(`PaddleSeg/contrib/Matting`). After that, fix the config file `configs/modnet_mobilenetv2.yml`(note: hrnet18 is used, but the config file `modnet_hrnet_w18.yml` is based on `modnet_mobilenetv2.yml`), where,modify the val_dataset field as follows: ``` yml val_dataset: @@ -78,9 +80,9 @@ val_dataset: mode: val get_trimap: False ``` -上述修改中尤其注意short_size: 256这个字段,这个值直接决定我们最终的推理图像采用的尺寸大小。这个字段值设置太小会影响预测精度,设置太大会影响手机推理速度(甚至造成手机因性能问题无法完成推理而奔溃)。经过实际测试,对于hrnet18,该字段设置为256较好。 +In the above modification, pay special attention to the short_size: 256 field which directly determines the size of our final inferential image. If the value of this field is set too small, the prediction accuracy will be affected; if the value is set too high, the inference speed of the phone will be affected (or even the phone will crash due to performance problems). In practical testing, this field is set to 256 for HRnet18. -完成配置文件修改后,采用下面的命令进行静态图导出: +After modifying the configuration file, run the following command to export the static graph: ``` shell python export.py \ --config configs/modnet/modnet_hrnet_w18.yml \ @@ -88,53 +90,51 @@ python export.py \ --save_dir output ``` -转换完成后在当前目录下会生成output文件夹,该文件夹中的文件即为转出来的静态图文件。 +After the conversion, the `output` folder will be generated in the current directory, and the files in the folder are the static graph files. -### 3.3 模型转换 +### 3.3 Model Conversion -#### 3.3.1 模型转换工具 -准备好PaddleSeg导出来的静态图模型和参数文件后,需要使用Paddle-Lite提供的opt对模型进行优化,并转换成Paddle-Lite支持的文件格式。 +#### 3.3.1 Model Conversion Tool +Once you have the static diagram model and parameter files exported from PaddleSeg ready, you need to optimize the model using the opt provided with Paddle-Lite and convert to the file format supported by Paddle-Lite. -首先安装PaddleLite: +Firstly, install PaddleLite: ``` shell pip install paddlelite==2.8.0 ``` -然后使用下面的python脚本进行转换: +Then use the following Python script to convert: ``` python -# 引用Paddlelite预测库 +# Reference the Paddlelite inference library from paddlelite.lite import * -# 1. 创建opt实例 +# 1. Create opt instance opt=Opt() -# 2. 指定静态模型路径 +# 2. Specify the static model path opt.set_model_file('./output/model.pdmodel') opt.set_param_file('./output/model.pdiparams') -# 3. 指定转化类型: arm、x86、opencl、npu +# 3. Specify conversion type: arm, x86, opencl or npu opt.set_valid_places("arm") -# 4. 指定模型转化类型: naive_buffer、protobuf +# 4. Specifies the model transformation type: naive_buffer or protobuf opt.set_model_type("naive_buffer") -# 5. 输出模型地址 +# 5. Address of output model opt.set_optimize_out("./output/hrnet_w18") -# 6. 执行模型优化 +# 6. Perform model optimization opt.run() ``` -转换完成后在output目录下会生成对应的hrnet_w18.nb文件。 +After conversion, the `hrnet_w18.nb` file will be generated in the `output` directory. -#### 3.3.2 更新模型 -将优化好的`.nb`文件,替换安卓程序中的 app/src/main/assets/image_matting/ -models/modnet下面的文件即可。 +#### 3.3.2 Update Model +Using the optimized `. Nb ` file to replace the file in `app/SRC/main/assets/image_matting/models/modnet` in android applications. -然后在工程中修改图像输入尺寸:打开string.xml文件,修改示例如下: +Then change the image input size in the project: open the string.xml file and follow the under example: ``` xml 1,3,256,256 ``` -1,3,256,256分别表示图像对应的batchsize、channel、height、width,我们一般修改height和width即可,这里的height和width需要和静态图导出时设置的尺寸一致。 - -整个安卓demo采用java实现,没有内嵌C++代码,构建和执行比较简单。未来也可以将本demo移植到java web项目中实现web版人像抠图。 +1,3,256,256 represent the corresponding batchsize, channel, height and width respectively. Generally, height and width are modified to the size which set during model export. +The entire android demo is implemented in Java, without embedded C++ code, which is relatively easy to build and execute. In the future, you can also move this demo to Java Web projects for human matting in the Web. diff --git a/contrib/Matting/deploy/human_matting_android_demo/README_CN.md b/contrib/Matting/deploy/human_matting_android_demo/README_CN.md new file mode 100644 index 0000000000..48142462e1 --- /dev/null +++ b/contrib/Matting/deploy/human_matting_android_demo/README_CN.md @@ -0,0 +1,141 @@ +简体中文 | [English](README.md) + +# Human Matting Android Demo +基于[PaddleSeg](https://github.com/paddlepaddle/paddleseg/tree/develop)的[MODNet](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/Matting)算法实现人像抠图(安卓版demo)。 + +可以直接下载安装本示例工程中的[apk](https://paddleseg.bj.bcebos.com/matting/models/deploy/app-debug.apk)进行体验。 + +## 1. 效果展示 +
      + + +
      + + +## 2. 安卓Demo使用说明 + +### 2.1 要求 +* Android Studio 3.4; +* Android手机; + +### 2.2 一键安装 +* 打开Android Studio,在"Welcome to Android Studio"窗口点击"Open an existing Android Studio project",在弹出的路径选择窗口中选择本安卓demo对应的文件夹,然后点击右下角的"Open"按钮即可导入工程,构建工程的过程中会自动下载demo需要的Lite预测库; +* 通过USB连接Android手机; +* 载入工程后,点击菜单栏的Run->Run 'App'按钮,在弹出的"Select Deployment Target"窗口选择已经连接的Android设备,然后点击"OK"按钮; + +*注:此安卓demo基于[Paddle-Lite](https://paddlelite.paddlepaddle.org.cn/)开发,PaddleLite版本为2.8.0。* + +### 2.3 预测 +* 在人像抠图Demo中,默认会载入一张人像图像,并会在图像下方给出CPU的预测结果和预测时延; +* 在人像抠图Demo中,你还可以通过右上角的"打开本地相册"和"打开摄像头拍照"按钮分别从相册或相机中加载测试图像然后进行预测推理; + +*注意:demo中拍照时照片会自动压缩,想测试拍照原图效果,可使用手机相机拍照后从相册中打开进行预测。* + +## 3. 二次开发 +可按需要更新预测库或模型进行二次开发,其中更新模型分为模型导出和模型转换两个步骤。 + +### 3.1 更新预测库 +[Paddle-Lite官网](https://paddlelite.paddlepaddle.org.cn/)提供了预编译版本的安卓预测库,也可以参考官网自行编译。 + +Paddle-Lite在安卓端的预测库主要包括三个文件: + +* PaddlePredictor.jar; +* arm64-v8a/libpaddle_lite_jni.so; +* armeabi-v7a/libpaddle_lite_jni.so; + +下面分别介绍两种方法: + +* 使用预编译版本的预测库,最新的预编译文件参考:[release](https://github.com/PaddlePaddle/Paddle-Lite/releases/),此demo使用的[版本](https://paddlelite-demo.bj.bcebos.com/libs/android/paddle_lite_libs_v2_8_0.tar.gz) + + 解压上面文件,PaddlePredictor.jar位于:java/PaddlePredictor.jar; + + arm64-v8a相关so位于:java/libs/arm64-v8a; + + armeabi-v7a相关so位于:java/libs/armeabi-v7a; + +* 手动编译Paddle-Lite预测库 +开发环境的准备和编译方法参考:[Paddle-Lite源码编译](https://paddle-lite.readthedocs.io/zh/release-v2.8/source_compile/compile_env.html)。 + +准备好上述文件,即可参考[java_api](https://paddle-lite.readthedocs.io/zh/release-v2.8/api_reference/java_api_doc.html)在安卓端进行推理。具体使用预测库的方法可参考[Paddle-Lite-Demo](https://github.com/PaddlePaddle/Paddle-Lite-Demo)中更新预测库部分的文档。 + +### 3.2 模型导出 +此demo的人像抠图采用Backbone为HRNet_W18的MODNet模型,模型[训练教程](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/Matting)请参考官网,官网提供了3种不同性能的Backone:MobileNetV2、ResNet50_vd和HRNet_W18。本安卓demo综合考虑精度和速度要求,采用了HRNet_W18作为Backone。可以直接从官网下载训练好的动态图模型进行算法验证。 + +为了能够在安卓手机上进行推理,需要将动态图模型导出为静态图模型,导出时固定图像输入尺寸即可。 + +首先git最新的[PaddleSeg](https://github.com/paddlepaddle/paddleseg/tree/develop)项目,然后cd进入到PaddleSeg/contrib/Matting目录。将下载下来的modnet-hrnet_w18.pdparams动态图模型文件(也可以自行训练得到)放置在当前文件夹(PaddleSeg/contrib/Matting)下面。然后修改配置文件 configs/modnet_mobilenetv2.yml(注意:虽然采用hrnet18模型,但是该模型依赖的配置文件modnet_hrnet_w18.yml本身依赖modnet_mobilenetv2.yml),修改其中的val_dataset字段如下: + +``` yml +val_dataset: + type: MattingDataset + dataset_root: data/PPM-100 + val_file: val.txt + transforms: + - type: LoadImages + - type: ResizeByShort + short_size: 256 + - type: ResizeToIntMult + mult_int: 32 + - type: Normalize + mode: val + get_trimap: False +``` +上述修改中尤其注意short_size: 256这个字段,这个值直接决定我们最终的推理图像采用的尺寸大小。这个字段值设置太小会影响预测精度,设置太大会影响手机推理速度(甚至造成手机因性能问题无法完成推理而奔溃)。经过实际测试,对于hrnet18,该字段设置为256较好。 + +完成配置文件修改后,采用下面的命令进行静态图导出: +``` shell +python export.py \ + --config configs/modnet/modnet_hrnet_w18.yml \ + --model_path modnet-hrnet_w18.pdparams \ + --save_dir output +``` + +转换完成后在当前目录下会生成output文件夹,该文件夹中的文件即为转出来的静态图文件。 + +### 3.3 模型转换 + +#### 3.3.1 模型转换工具 +准备好PaddleSeg导出来的静态图模型和参数文件后,需要使用Paddle-Lite提供的opt对模型进行优化,并转换成Paddle-Lite支持的文件格式。 + +首先安装PaddleLite: + +``` shell +pip install paddlelite==2.8.0 +``` + +然后使用下面的python脚本进行转换: + +``` python +# 引用Paddlelite预测库 +from paddlelite.lite import * + +# 1. 创建opt实例 +opt=Opt() + +# 2. 指定静态模型路径 +opt.set_model_file('./output/model.pdmodel') +opt.set_param_file('./output/model.pdiparams') + +# 3. 指定转化类型: arm、x86、opencl、npu +opt.set_valid_places("arm") +# 4. 指定模型转化类型: naive_buffer、protobuf +opt.set_model_type("naive_buffer") +# 5. 输出模型地址 +opt.set_optimize_out("./output/hrnet_w18") +# 6. 执行模型优化 +opt.run() +``` + +转换完成后在output目录下会生成对应的hrnet_w18.nb文件。 + +#### 3.3.2 更新模型 +将优化好的`.nb`文件,替换安卓程序中的 app/src/main/assets/image_matting/ +models/modnet下面的文件即可。 + +然后在工程中修改图像输入尺寸:打开string.xml文件,修改示例如下: +``` xml +1,3,256,256 +``` +1,3,256,256分别表示图像对应的batchsize、channel、height、width,我们一般修改height和width即可,这里的height和width需要和静态图导出时设置的尺寸一致。 + +整个安卓demo采用java实现,没有内嵌C++代码,构建和执行比较简单。未来也可以将本demo移植到java web项目中实现web版人像抠图。 diff --git a/contrib/PanopticDeepLab/README.md b/contrib/PanopticDeepLab/README.md index e4e8ff6ba2..6bc5d154ec 100644 --- a/contrib/PanopticDeepLab/README.md +++ b/contrib/PanopticDeepLab/README.md @@ -1,10 +1,11 @@ +English | [简体中文](README_CN.md) # Panoptic DeepLab -基于PaddlePaddle实现[Panoptic Deeplab](https://arxiv.org/abs/1911.10194)全景分割算法。 +The implementation of [Panoptic Deeplab](https://arxiv.org/abs/1911.10194) algorithm based on PaddlePaddle. -Panoptic DeepLab首次证实了bottem-up算法能够达到state-of-the-art的效果。Panoptic DeepLab预测三个输出:Semantic Segmentation, Center Prediction 和 Center Regression。实例类别像素根据最近距离原则聚集到实例中心点得到实例分割结果。最后按照majority-vote规则融合语义分割结果和实例分割结果,得到最终的全景分割结果。 -其通过将每一个像素赋值给每一个类别或实例达到分割的效果。 +Panoptic DeepLab has comfirmed that the bottem-up algorithm can achieve state-of-the-art effect for the first time。Panoptic DeepLab predicts three ouputs: Semantic Segmentation, Center Prediction and Center Regression。According to the principle of nearest distance, the pixels of instance category are gathered to the center of the instance to obtain the segmentation result. Finally, according to the rules of majority-vote, semantic segmentation results and instance segmentation results are fused to obtain the final panoptic segmentation results. +It achieves segmentation by assigning each pixel to one category or instance. ![](./docs/panoptic_deeplab.jpg) ## Model Baselines @@ -15,36 +16,36 @@ Panoptic DeepLab首次证实了bottem-up算法能够达到state-of-the-art的效 |ResNet50_OS32| 8 | 2049x1025|90000|58.35%|80.03%|71.52%|25.80%|79.18%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/train.log)| |ResNet50_OS32| 64 | 1025x513|90000|60.32%|80.56%|73.56%|26.77%|79.67%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/train.log)| -## 环境准备 +## Environment Installation -1. 系统环境 +1. System environment * PaddlePaddle >= 2.0.0 * Python >= 3.6+ -推荐使用GPU版本的PaddlePaddle版本。详细安装教程请参考官方网站[PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/windows-pip.html) +PaddlePaddle of GPU version is recommended。Please refer to the official website for detailed installation tutorials[PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/windows-pip.html) -2. 下载PaddleSeg repo +2. Download PaddleSeg repository ```shell git clone https://github.com/PaddlePaddle/PaddleSeg ``` -3. 安装paddleseg +3. Install paddleseg ```shell cd PaddleSeg pip install -e . ``` -4. 进入PaddleSeg/contrib/PanopticDeepLab目录 +4. Enter PaddleSeg/contrib/PanopticDeepLab directory ```shell cd contrib/PanopticDeepLab ``` -## 数据集准备 +## Dataset Preparation -将数据集放置于PaddleSeg/contrib/PanopticDeepLab目录下的`data`目录下。 +Please put the dataset in `data` directory under the `PaddleSeg/contrib/PanopticDeepLab`. ### Cityscapes -前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集并整理成如下结构: +Go to [CityScapes website](https://www.cityscapes-dataset.com/) to download dataset and make is as the following structure: ``` cityscapes/ @@ -69,12 +70,12 @@ cityscapes/ ``` -安装CityscapesScripts +Install CityscapesScripts ```shell pip install git+https://github.com/mcordts/cityscapesScripts.git ``` -`*_panoptic.png` 生成命令(需找到`createPanopticImgs.py`文件): +`*_panoptic.png` Generation command (need to find the `createPanopticImgs.py` file): ```shell python /path/to/cityscapesscripts/preparation/createPanopticImgs.py \ --dataset-folder data/cityscapes/gtFine/ \ @@ -82,7 +83,7 @@ python /path/to/cityscapesscripts/preparation/createPanopticImgs.py \ --use-train-id ``` -## 训练 +## Training ```shell export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 根据实际情况进行显卡数量的设置 python -m paddle.distributed.launch train.py \ @@ -93,52 +94,52 @@ python -m paddle.distributed.launch train.py \ --save_dir output ``` -**note:** 使用--do_eval会影响训练速度及增加显存消耗,根据选择进行开闭。 +**note:** Using --do_eval affects training speed and increases GPU memory consumption, turning on and off as you need. -更多参数信息请运行如下命令进行查看: +To view more informationg of parameters, run the following command: ```shell python train.py --help ``` -## 评估 +## Evaluation ```shell python val.py \ --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ --model_path output/iter_90000/model.pdparams ``` -你可以直接下载我们提供的模型进行评估。 +You can download the pretrained model we provide for evaluation directly. -更多参数信息请运行如下命令进行查看: +To view more informationg of parameters, run the following command: ```shell python val.py --help ``` -## 预测及可视化结果保存 +## Prediction ```shell -export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 根据实际情况进行显卡数量的设置 +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # Set the number of GPUs as required. python -m paddle.distributed.launch predict.py \ --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ --model_path output/iter_90000/model.pdparams \ --image_path data/cityscapes/leftImg8bit/val/ \ --save_dir ./output/result ``` -你可以直接下载我们提供的模型进行预测。 +You can download the pretrained model we provide for prediction directly. -更多参数信息请运行如下命令进行查看: +To view more informationg of parameters, run the following command: ```shell python predict.py --help ``` -全景分割结果: +Panoptic segmentations results:
      -语义分割结果: +Semantic segmentations results:
      -实例分割结果: +Instance sementations results:
      diff --git a/contrib/PanopticDeepLab/README_CN.md b/contrib/PanopticDeepLab/README_CN.md new file mode 100644 index 0000000000..34b118b5e0 --- /dev/null +++ b/contrib/PanopticDeepLab/README_CN.md @@ -0,0 +1,144 @@ +简体中文 | [English](README.md) +# Panoptic DeepLab + +基于PaddlePaddle实现[Panoptic Deeplab](https://arxiv.org/abs/1911.10194)全景分割算法。 + +Panoptic DeepLab首次证实了bottem-up算法能够达到state-of-the-art的效果。Panoptic DeepLab预测三个输出:Semantic Segmentation, Center Prediction 和 Center Regression。实例类别像素根据最近距离原则聚集到实例中心点得到实例分割结果。最后按照majority-vote规则融合语义分割结果和实例分割结果,得到最终的全景分割结果。 +其通过将每一个像素赋值给某一个类别或实例达到分割的效果。 +![](./docs/panoptic_deeplab.jpg) + +## Model Baselines + +### Cityscapes +| Backbone | Batch Size |Resolution | Training Iters | PQ | SQ | RQ | AP | mIoU | Links | +|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +|ResNet50_OS32| 8 | 2049x1025|90000|58.35%|80.03%|71.52%|25.80%|79.18%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_2049x1025_bs1_90k_lr00005/train.log)| +|ResNet50_OS32| 64 | 1025x513|90000|60.32%|80.56%|73.56%|26.77%|79.67%|[model](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/model.pdparams) \| [log](https://bj.bcebos.com/paddleseg/dygraph/pnoptic_segmentation/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005/train.log)| + +## 环境安装 + +1. 系统环境 +* PaddlePaddle >= 2.0.0 +* Python >= 3.6+ +推荐使用GPU版本的PaddlePaddle版本。详细安装教程请参考官方网站[PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/windows-pip.html) + +2. 下载PaddleSeg repository +```shell +git clone https://github.com/PaddlePaddle/PaddleSeg +``` + +3. 安装paddleseg +```shell +cd PaddleSeg +pip install -e . +``` + +4. 进入PaddleSeg/contrib/PanopticDeepLab目录 +```shell +cd contrib/PanopticDeepLab +``` + +## 数据集准备 + +将数据集放置于`PaddleSeg/contrib/PanopticDeepLab`目录下的`data`目录下。 + +### Cityscapes + +前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集并整理成如下结构: + +``` +cityscapes/ +|--gtFine/ +| |--train/ +| | |--aachen/ +| | | |--*_color.png, *_instanceIds.png, *_labelIds.png, *_polygons.json, +| | | |--*_labelTrainIds.png +| | | |--... +| |--val/ +| |--test/ +| |--cityscapes_panoptic_train_trainId.json +| |--cityscapes_panoptic_train_trainId/ +| | |-- *_panoptic.png +| |--cityscapes_panoptic_val_trainId.json +| |--cityscapes_panoptic_val_trainId/ +| | |-- *_panoptic.png +|--leftImg8bit/ +| |--train/ +| |--val/ +| |--test/ + +``` + +安装CityscapesScripts +```shell +pip install git+https://github.com/mcordts/cityscapesScripts.git +``` + +`*_panoptic.png` 生成命令(需找到`createPanopticImgs.py`文件): +```shell +python /path/to/cityscapesscripts/preparation/createPanopticImgs.py \ + --dataset-folder data/cityscapes/gtFine/ \ + --output-folder data/cityscapes/gtFine/ \ + --use-train-id +``` + +## 训练 +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 根据实际情况进行显卡数量的设置 +python -m paddle.distributed.launch train.py \ + --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ + --do_eval \ + --use_vdl \ + --save_interval 5000 \ + --save_dir output +``` + +**note:** 使用--do_eval会影响训练速度及增加显存消耗,根据选择进行开闭。 + +更多参数信息请运行如下命令进行查看: +```shell +python train.py --help +``` + +## 评估 +```shell +python val.py \ + --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ + --model_path output/iter_90000/model.pdparams +``` +你可以直接下载我们提供的模型进行评估。 + +更多参数信息请运行如下命令进行查看: +```shell +python val.py --help +``` + +## 预测 +```shell +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 根据实际情况进行显卡数量的设置 +python -m paddle.distributed.launch predict.py \ + --config configs/panoptic_deeplab/panoptic_deeplab_resnet50_os32_cityscapes_1025x513_bs8_90k_lr00005.yml \ + --model_path output/iter_90000/model.pdparams \ + --image_path data/cityscapes/leftImg8bit/val/ \ + --save_dir ./output/result +``` +你可以直接下载我们提供的模型进行预测。 + +更多参数信息请运行如下命令进行查看: +```shell +python predict.py --help +``` +全景分割结果: +
      + +
      + +语义分割结果: +
      + +
      + +实例分割结果: +
      + +
      diff --git a/docs/data_prepare.md b/docs/data_prepare.md index 33e7cde416..8a20a5da8f 100644 --- a/docs/data_prepare.md +++ b/docs/data_prepare.md @@ -1,14 +1,17 @@ -# 数据集准备 +English | [简体中文](data_prepare_cn.md) -PaddleSeg目前支持CityScapes、ADE20K、Pascal VOC等数据集的加载,在加载数据集时,如若本地不存在对应数据,则会自动触发下载(除Cityscapes数据集). +# Dataset Preparation -## 关于CityScapes数据集 -Cityscapes是关于城市街道场景的语义理解图片数据集。它主要包含来自50个不同城市的街道场景, -拥有5000张(2048 x 1024)城市驾驶场景的高质量像素级注释图像,包含19个类别。其中训练集2975张, 验证集500张和测试集1525张。 +PaddleSeg currently supports the dataset of CityScapes, ADE20K, Pascal VOC and so on. +When loading a dataset, the system automatically triggers the download (except for Cityscapes dataset) if the data does not exist locally. -由于协议限制,请自行前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集, -我们建议您将数据集存放于`PaddleSeg/data`中,以便与我们配置文件完全兼容。数据集下载后请组织成如下结构: +## CityScapes Dataset +Cityscapes is a dataset of semantically understood images of urban street scenes. It mainly contains street scenes from 50 different cities, with 5000 (2048 x 1024) high quality pixel-level annotated images of urban driving scenes. It contains 19 categories. There are 2975 training sets, 500 validation sets and 1525 test sets. +Due to restrictions, please visit [CityScapes website](https://www.cityscapes-dataset.com/)to download dataset. +We recommend that you store dataset in `PaddleSeg/data` for full compatibility with our config files. Please organize the dataset into the following structure after downloading: + +``` cityscapes | |--leftImg8bit @@ -20,40 +23,42 @@ Cityscapes是关于城市街道场景的语义理解图片数据集。它主要 | |--train | |--val | |--test +``` -运行下列命令进行标签转换: +Run the following command to convert labels: ```shell pip install cityscapesscripts python tools/convert_cityscapes.py --cityscapes_path data/cityscapes --num_workers 8 ``` -其中`cityscapes_path`应根据实际数据集路径进行调整。 `num_workers`决定启动的进程数,可根据实际情况进行调整大小。 +where `cityscapes_path` should be adjusted according to the actual dataset path. `num_workers` determines the number of processes to be started. The value can be adjusted as required. -## 关于Pascal VOC 2012数据集 -[Pascal VOC 2012](http://host.robots.ox.ac.uk/pascal/VOC/)数据集以对象分割为主,包含20个类别和背景类,其中训练集1464张,验证集1449张。 -通常情况下会利用[SBD(Semantic Boundaries Dataset)](http://home.bharathh.info/pubs/codes/SBD/download.html)进行扩充,扩充后训练集10582张。 -运行下列命令进行SBD数据集下载并进行扩充: +## Pascal VOC 2012 dataset +[Pascal VOC 2012](http://host.robots.ox.ac.uk/pascal/VOC/) is mainly object segmentation, including 20 categories and background classes, including 1464 training sets and 1449 validation sets. +Generally, we will use [SBD(Semantic Boundaries Dataset)](http://home.bharathh.info/pubs/codes/SBD/download.html) to expand the dataset. Theer are 10582 training sets after expanding. +Run the following commands to download the SBD dataset and use it to expand: ```shell python tools/voc_augment.py --voc_path data/VOCdevkit --num_workers 8 ``` -其中`voc_path`应根据实际数据集路径进行调整。 +where `voc_path`should be adjusted according to the actual dataset path. -**注意** 运行前请确保在PaddleSeg目录下执行过下列命令: +**Note** Before running, make sure you have executed the following commands in the PaddleSeg directory: ```shell export PYTHONPATH=`pwd` -# windows下请执行相面的命令 +# In Windows, run the following command # set PYTHONPATH=%cd% ``` -## 关于ADE20K数据集 -[ADE20K](http://sceneparsing.csail.mit.edu/)由MIT发布的可用于场景感知、分割和多物体识别等多种任务的数据集。 -其涵盖了150个语义类别,包括训练集20210张,验证集2000张。 +## ADE20K Dataset +[ADE20K](http://sceneparsing.csail.mit.edu/) published by MIT that can be used for a variety of tasks such as scene perception, segmentation, and multi-object recognition. +It covers 150 semantic categories, including 20210 training sets and 2000 validation sets. -## 关于Coco Stuff数据集 -Coco Stuff是基于Coco数据集的像素级别语义分割数据集。它主要覆盖172个类别,包含80个'thing',91个'stuff'和1个'unlabeled',我们忽略'unlabeled'类别,并将其index设为255,不记录损失。因此提供的训练版本为171个类别。其中,训练集118k, 验证集5k. +## Coco Stuff Dataset +Coco Stuff is a pixel-level semantically segmented dataset based on Coco datasets. It covers 172 catefories, including 80 'thing' classes, 91 'stuff' classes amd one 'unlabeled' classes. 'unlabeled' is ignored and the index is set to 255 which has not contribution to loss. The training version is therefore provided in 171 categories. There are 118k training sets, 5k validation sets. -在使用Coco Stuff数据集前, 请自行前往[COCO-Stuff主页](https://github.com/nightrome/cocostuff)下载数据集,或者下载[coco2017训练集原图](http://images.cocodataset.org/zips/train2017.zip), [coco2017验证集原图](http://images.cocodataset.org/zips/val2017.zip)及[标注图](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) -我们建议您将数据集存放于`PaddleSeg/data`中,以便与我们配置文件完全兼容。数据集下载后请组织成如下结构: +Before using Coco Stuff dataset, please go to [COCO-Stuff website](https://github.com/nightrome/cocostuff) to download dataset or download [coco2017 training sets with origin images](http://images.cocodataset.org/zips/train2017.zip), [coco2017 validation sets with origin images](http://images.cocodataset.org/zips/val2017.zip) and [annotations images](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) +We recommend that you store dataset in `PaddleSeg/data` for full compatibility with our config files. Please organize the dataset into the following structure after downloading: +``` cocostuff | |--images @@ -63,24 +68,25 @@ Coco Stuff是基于Coco数据集的像素级别语义分割数据集。它主要 |--annotations | |--train2017 | |--val2017 +``` -运行下列命令进行标签转换: +Run the following command to convert labels: ```shell python tools/convert_cocostuff.py --annotation_path /PATH/TO/ANNOTATIONS --save_path /PATH/TO/CONVERT_ANNOTATIONS ``` -其中`annotation_path`应根据下载cocostuff/annotations文件夹的实际路径填写。 `save_path`决定转换后标签的存放位置。 - +where `annotation_path` should be filled according to the `cocostuff/annotations` actual path. `save_path` determines the location of the converted label. -其中,标注图像的标签从0,1依次取值,不可间隔。若有需要忽略的像素,则按255进行标注。 +Where, the labels of the labeled images are taken in sequence from 0, 1, ... and cannot be separated. If there are pixels that need to be ignored, they should be labeled to 255. -## 关于Pascal Context数据集 -Pascal Context是基于PASCAL VOC 2010数据集额外标注的像素级别的语义分割数据集。我们提供的转换脚本支持60个类别,index为0是背景类别。该数据集中中训练集4996, 验证集5104张. +## Pascal Context Dataset +Pascal Context is a pixel-level semantically segmented dataset based on the Pascal VOC 2010 dataset with additional annotations. The conversion script we provide supports 60 categories, with index 0 being the background category. There are 4996 training sets and 5104 verification sets in this dataset. -在使用Pascal Context数据集前, 请先下载[VOC2010](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar),随后自行前往[Pascal-Context主页](https://www.cs.stanford.edu/~roozbeh/pascal-context/)下载数据集及[标注](https://codalabuser.blob.core.windows.net/public/trainval_merged.json) -我们建议您将数据集存放于`PaddleSeg/data`中,以便与我们配置文件完全兼容。数据集下载后请组织成如下结构: +Before using Pascal Context dataset, Please download [VOC2010](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar) firstly,then go to [Pascal-Context home page](https://www.cs.stanford.edu/~roozbeh/pascal-context/)to download dataset and [annotations](https://codalabuser.blob.core.windows.net/public/trainval_merged.json) +We recommend that you store dataset in `PaddleSeg/data` for full compatibility with our config files. Please organize the dataset into the following structure after downloading: +``` VOC2010 | |--Annotations @@ -94,25 +100,26 @@ Pascal Context是基于PASCAL VOC 2010数据集额外标注的像素级别的语 |--SegmentationObject | |--trainval_merged.json - - -运行下列命令进行标签转换: +``` + +Run the following command to convert labels: ```shell python tools/convert_voc2010.py --voc_path /PATH/TO/VOC ----annotation_path /PATH/TO/JSON ``` -其中`voc_path`应根据下载VOC2010文件夹的实际路径填写。 `annotation_path`决定下载trainval_merged.json的存放位置。 +where `voc_path` should be filled according to the voc2010 actual path. `annotation_path` is the trainval_merged.json saved path. +Where, the labels of the labeled images are taken in sequence from 0, 1, 2, ... and cannot be separated. If there are pixels that need to be ignored, they should be labeled to 255 (default ignored value). When using Pascal Context dataset, [Detail](https://github.com/zhanghang1989/detail-api) need to be installed. -其中,标注图像的标签从0,1,2依次取值,不可间隔。若有需要忽略的像素,则按255(默认的忽略值)进行标注。在使用Pascal Context数据集时,需要安装[Detail](https://github.com/zhanghang1989/detail-api). -## 自定义数据集 +## Custom Dataset -如果您需要使用自定义数据集进行训练,请按照以下步骤准备数据. +If you need to use a custom dataset for training, prepare the data as following steps. -1.推荐整理成如下结构 +1.The following structure is recommended +``` custom_dataset | |--images @@ -130,16 +137,19 @@ python tools/convert_voc2010.py --voc_path /PATH/TO/VOC ----annotation_path /PAT |--val.txt | |--test.txt +``` -其中train.txt和val.txt的内容如下所示: +The content of train.txt and val.txt is as following: +``` images/image1.jpg labels/label1.png images/image2.jpg labels/label2.png ... +``` -2.标注图像的标签从0,1依次取值,不可间隔。若有需要忽略的像素,则按255进行标注。 +2.The labels of the labeled images are taken in sequence from 0, 1, ... and cannot be separated. If there are pixels that need to be ignored, they should be labeled to 255. -可按如下方式对自定义数据集进行配置: +You can configure a custom dataset as following: ```yaml train_dataset: type: Dataset diff --git a/docs/data_prepare_cn.md b/docs/data_prepare_cn.md new file mode 100644 index 0000000000..302e5334e0 --- /dev/null +++ b/docs/data_prepare_cn.md @@ -0,0 +1,171 @@ +简体中文 | [English](data_prepare.md) + +# 数据集准备 + +PaddleSeg目前支持CityScapes、ADE20K、Pascal VOC等数据集的加载,在加载数据集时,如若本地不存在对应数据,则会自动触发下载(除Cityscapes数据集). + +## CityScapes数据集 +Cityscapes是关于城市街道场景的语义理解图片数据集。它主要包含来自50个不同城市的街道场景, +拥有5000张(2048 x 1024)城市驾驶场景的高质量像素级注释图像,包含19个类别。其中训练集2975张, 验证集500张和测试集1525张。 + +由于协议限制,请自行前往[CityScapes官网](https://www.cityscapes-dataset.com/)下载数据集, +我们建议您将数据集存放于`PaddleSeg/data`中,以便与我们配置文件完全兼容。数据集下载后请组织成如下结构: + +``` + cityscapes + | + |--leftImg8bit + | |--train + | |--val + | |--test + | + |--gtFine + | |--train + | |--val + | |--test +``` + +运行下列命令进行标签转换: +```shell +pip install cityscapesscripts +python tools/convert_cityscapes.py --cityscapes_path data/cityscapes --num_workers 8 +``` +其中`cityscapes_path`应根据实际数据集路径进行调整。 `num_workers`决定启动的进程数,可根据实际情况进行调整大小。 + +## Pascal VOC 2012数据集 +[Pascal VOC 2012](http://host.robots.ox.ac.uk/pascal/VOC/)数据集以对象分割为主,包含20个类别和背景类,其中训练集1464张,验证集1449张。 +通常情况下会利用[SBD(Semantic Boundaries Dataset)](http://home.bharathh.info/pubs/codes/SBD/download.html)进行扩充,扩充后训练集10582张。 +运行下列命令进行SBD数据集下载并进行扩充: +```shell +python tools/voc_augment.py --voc_path data/VOCdevkit --num_workers 8 +``` +其中`voc_path`应根据实际数据集路径进行调整。 + +**注意** 运行前请确保在PaddleSeg目录下执行过下列命令: +```shell +export PYTHONPATH=`pwd` +# windows下请执行下面的命令 +# set PYTHONPATH=%cd% +``` + +## ADE20K数据集 +[ADE20K](http://sceneparsing.csail.mit.edu/)由MIT发布的可用于场景感知、分割和多物体识别等多种任务的数据集。 +其涵盖了150个语义类别,包括训练集20210张,验证集2000张。 + +## Coco Stuff数据集 +Coco Stuff是基于Coco数据集的像素级别语义分割数据集。它主要覆盖172个类别,包含80个'thing',91个'stuff'和1个'unlabeled',我们忽略'unlabeled'类别,并将其index设为255,不记录损失。因此提供的训练版本为171个类别。其中,训练集118k, 验证集5k. + +在使用Coco Stuff数据集前, 请自行前往[COCO-Stuff主页](https://github.com/nightrome/cocostuff)下载数据集,或者下载[coco2017训练集原图](http://images.cocodataset.org/zips/train2017.zip), [coco2017验证集原图](http://images.cocodataset.org/zips/val2017.zip)及[标注图](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) +我们建议您将数据集存放于`PaddleSeg/data`中,以便与我们配置文件完全兼容。数据集下载后请组织成如下结构: + +``` + cocostuff + | + |--images + | |--train2017 + | |--val2017 + | + |--annotations + | |--train2017 + | |--val2017 +``` + + +运行下列命令进行标签转换: + +```shell +python tools/convert_cocostuff.py --annotation_path /PATH/TO/ANNOTATIONS --save_path /PATH/TO/CONVERT_ANNOTATIONS +``` +其中`annotation_path`应根据下载cocostuff/annotations文件夹的实际路径填写。 `save_path`决定转换后标签的存放位置。 + + +其中,标注图像的标签从0,1依次取值,不可间隔。若有需要忽略的像素,则按255进行标注。 + +## Pascal Context数据集 +Pascal Context是基于PASCAL VOC 2010数据集额外标注的像素级别的语义分割数据集。我们提供的转换脚本支持60个类别,index为0是背景类别。该数据集中训练集4996, 验证集5104张. + + +在使用Pascal Context数据集前, 请先下载[VOC2010](http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar),随后自行前往[Pascal-Context主页](https://www.cs.stanford.edu/~roozbeh/pascal-context/)下载数据集及[标注](https://codalabuser.blob.core.windows.net/public/trainval_merged.json) +我们建议您将数据集存放于`PaddleSeg/data`中,以便与我们配置文件完全兼容。数据集下载后请组织成如下结构: + +``` + VOC2010 + | + |--Annotations + | + |--ImageSets + | + |--SegmentationClass + | + |--JPEGImages + | + |--SegmentationObject + | + |--trainval_merged.json +``` + +运行下列命令进行标签转换: + +```shell +python tools/convert_voc2010.py --voc_path /PATH/TO/VOC ----annotation_path /PATH/TO/JSON +``` +其中`voc_path`应根据下载VOC2010文件夹的实际路径填写。 `annotation_path`决定下载trainval_merged.json的存放位置。 + + + +其中,标注图像的标签从0,1,2依次取值,不可间隔。若有需要忽略的像素,则按255(默认的忽略值)进行标注。在使用Pascal Context数据集时,需要安装[Detail](https://github.com/zhanghang1989/detail-api). + +## 自定义数据集 + +如果您需要使用自定义数据集进行训练,请按照以下步骤准备数据. + +1.推荐整理成如下结构 + +``` + custom_dataset + | + |--images + | |--image1.jpg + | |--image2.jpg + | |--... + | + |--labels + | |--label1.jpg + | |--label2.png + | |--... + | + |--train.txt + | + |--val.txt + | + |--test.txt +``` + +其中train.txt和val.txt的内容如下所示: + +``` + images/image1.jpg labels/label1.png + images/image2.jpg labels/label2.png + ... +``` + +2.标注图像的标签从0,1依次取值,不可间隔。若有需要忽略的像素,则按255进行标注。 + +可按如下方式对自定义数据集进行配置: +```yaml +train_dataset: + type: Dataset + dataset_root: custom_dataset + train_path: custom_dataset/train.txt + num_classes: 2 + transforms: + - type: ResizeStepScaling + min_scale_factor: 0.5 + max_scale_factor: 2.0 + scale_step_size: 0.25 + - type: RandomPaddingCrop + crop_size: [512, 512] + - type: RandomHorizontalFlip + - type: Normalize + mode: train +``` diff --git a/docs/export/export/index.rst b/docs/export/export/index.rst deleted file mode 100644 index c859c5c384..0000000000 --- a/docs/export/export/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -模型导出 -============================ - -.. toctree:: - :maxdepth: 1 - - model_export.md diff --git a/docs/export/export/model_export.md b/docs/export/export/model_export.md deleted file mode 100644 index acefc4a022..0000000000 --- a/docs/export/export/model_export.md +++ /dev/null @@ -1,46 +0,0 @@ -# 模型导出 - -本教程提供了一个将训练好的动态图模型转化为静态图模型并进行部署的例子 - - -## 获取预训练模型 - -*注意:下述例子为Linux或者Mac上执行的例子,windows请自行在浏览器下载[参数](https://paddleseg.bj.bcebos.com/dygraph/cityscapes/bisenet_cityscapes_1024x1024_160k/model.pdparams)并存放到所创建的目录* -```shell -mkdir bisenet && cd bisenet -wget https://paddleseg.bj.bcebos.com/dygraph/cityscapes/bisenet_cityscapes_1024x1024_160k/model.pdparams -cd .. -``` - -## 将模型导出为静态图模型 - -请确保完成了PaddleSeg的安装工作,并且位于PaddleSeg目录下,执行以下脚本: - -```shell -export CUDA_VISIBLE_DEVICES=0 # 设置1张可用的卡 -# windows下请执行以下命令 -# set CUDA_VISIBLE_DEVICES=0 -python export.py \ - --config configs/bisenet/bisenet_cityscapes_1024x1024_160k.yml \ - --model_path bisenet/model.pdparams -``` - -### 导出脚本参数解释 - -|参数名|用途|是否必选项|默认值| -|-|-|-|-| -|config|配置文件|是|-| -|save_dir|模型和visualdl日志文件的保存根路径|否|output| -|model_path|预训练模型参数的路径|否|配置文件中指定值| -|with_softmax|在网络末端添加softmax算子。由于PaddleSeg组网默认返回logits,如果想要部署模型获取概率值,可以置为True|否|False| -|without_argmax|是否不在网络末端添加argmax算子。由于PaddleSeg组网默认返回logits,为部署模型可以直接获取预测结果,我们默认在网络末端添加argmax算子|否|False| - -## 结果文件 - -```shell -output - ├── deploy.yaml # 部署相关的配置文件 - ├── model.pdiparams # 静态图模型参数 - ├── model.pdiparams.info # 参数额外信息,一般无需关注 - └── model.pdmodel # 静态图模型文件 -``` diff --git a/docs/model_export.md b/docs/model_export.md index b4a10df78f..d270e102f0 100644 --- a/docs/model_export.md +++ b/docs/model_export.md @@ -1,65 +1,64 @@ -# 导出预测模型 +English | [简体中文](model_export_cn.md) -PaddleSeg训练好模型后,需要将模型导出为预测模型,才可以进行模型部署。 +# Model Export -本教程提供一个示例介绍模型导出的方法。 +The trained model needs to be exported as a prediction model before deployment. -## 1. 获取预训练权重 +This tutorial will show how to export a trained model。 -大家使用PaddleSeg训练好模型后,输出目录下的best_model文件保存测试精度最高的预训练权重。 -本示例中,我们使用BiseNetV2模型,大家执行如下命令或者点击[链接](https://paddleseg.bj.bcebos.com/dygraph/cityscapes/bisenet_cityscapes_1024x1024_160k/model.pdparams)下载模型预训练权重。 +## Acquire the Pre-training Model +In this example,BiseNetV2 model will be used. Run the following command or click [link](https://paddleseg.bj.bcebos.com/dygraph/cityscapes/bisenet_cityscapes_1024x1024_160k/model.pdparams) to download the pretrained model. ```shell mkdir bisenet && cd bisenet wget https://paddleseg.bj.bcebos.com/dygraph/cityscapes/bisenet_cityscapes_1024x1024_160k/model.pdparams cd .. ``` -## 2. 导出预测模型 +## Export the prediction Model -确保正确安装PaddleSeg后,在PaddleSeg目录下执行如下命令,则预测模型会保存在output文件夹。 +Make sure you have installed PaddleSeg and are in the PaddleSeg directory. + +Run the following command, and the prediction model will be saved in `output` directory. ```shell -# 设置1张可用的卡 -export CUDA_VISIBLE_DEVICES=0 -# windows下请执行以下命令 +export CUDA_VISIBLE_DEVICES=0 # Set a usable GPU. +# If on windows, Run the following command: # set CUDA_VISIBLE_DEVICES=0 python export.py \ --config configs/bisenet/bisenet_cityscapes_1024x1024_160k.yml \ - --model_path bisenet/model.pdparams \ + --model_path bisenet/model.pdparams\ --save_dir output ``` -### 导出脚本参数解释 +### Description of Exported Script Parameters -|参数名|用途|是否必选项|默认值| +|parammeter|purpose|is needed|default| |-|-|-|-| -|config|配置文件|是|-| -|model_path|预训练权重的路径|否|配置文件中指定的预训练权重路径| -|save_dir|保存预测模型的路径|否|output| -|input_shape| 设置导出模型的输入shape,比如传入`--input_shape 1 3 1024 1024`。如果不设置input_shape,默认导出模型的输入shape是[-1, 3, -1, -1] | 否 | None | -|with_softmax|在网络末端添加softmax算子。由于PaddleSeg组网默认返回logits,如果想要部署模型获取概率值,可以置为True|否|False| -|without_argmax|是否不在网络末端添加argmax算子。由于PaddleSeg组网默认返回logits,为部署模型可以直接获取预测结果,我们默认在网络末端添加argmax算子|否|False| - -## 3. 预测模型文件 +|config|Config file|yes|-| +|save_dir|Save root path for model and VisualDL log files|no|output| +|model_path|Path of pre-training model parameters|no|The value in config file| +|with_softmax|Add softmax operator at the end of the network. Since PaddleSeg networking returns Logits by default, you can set it to True if you want the deployment model to get the probability value|no|False| +|without_argmax|Whether or not to add argmax operator at the end of the network. Since PaddleSeg networking returns Logits by default, we add argmax operator at the end of the network by default in order to directly obtain the prediction results for the deployment model|no|False| +|input_shape| Set the input shape of exported model, such as `--input_shape 1 3 1024 1024`。if input_shape is not provided,the Default input shape of exported model is [-1, 3, -1, -1] | no | None | -如下是导出的预测模型文件。 +## Prediction Model Files ```shell output - ├── deploy.yaml # 部署相关的配置文件,主要说明数据预处理的方式 - ├── model.pdmodel # 预测模型的拓扑结构文件 - ├── model.pdiparams # 预测模型的权重文件 - └── model.pdiparams.info # 参数额外信息,一般无需关注 + ├── deploy.yaml # Config file of deployment + ├── model.pdiparams # Paramters of static model + ├── model.pdiparams.info # Additional information witch is not concerned generally + └── model.pdmodel # Static model file ``` -导出预测模型后,我们可以使用以下方式部署模型: +After exporting prediction model, it can be deployed by the following methods. -|部署场景|使用预测库|教程| +|Deployment scenarios|Inference library|Tutorial| |-|-|-| -|服务器端(Nvidia GPU和X86 CPU) Python部署|Paddle Inference|[文档](../deploy/python/)| -|服务器端(Nvidia GPU和X86 CPU) C++端部署|Paddle Inference|[文档](../deploy/cpp/)| -|移动端部署|Paddle Lite|[文档](../deploy/lite/)| -|服务化部署|Paddle Serving|[文档](../deploy/serving/)| -|前端部署|Paddle JS|[文档](../deploy/web/)| +|Server (Nvidia GPU and X86 CPU) Python deployment|Paddle Inference|[doc](../deploy/python/)| +|Server (Nvidia GPU and X86 CPU) C++ deployment|Paddle Inference|[doc](../deploy/cpp/)| +|Mobile deployment|Paddle Lite|[doc](../deploy/lite/)| +|Service-oriented deployment |Paddle Serving|[doc](../deploy/serving/)| +|Web deployment|Paddle JS|[doc](../deploy/web/)| diff --git a/docs/model_export_cn.md b/docs/model_export_cn.md new file mode 100644 index 0000000000..90fd5d4659 --- /dev/null +++ b/docs/model_export_cn.md @@ -0,0 +1,65 @@ +简体中文 | [English](model_export.md) + +# 导出预测模型 + +PaddleSeg训练好模型后,需要将模型导出为预测模型,才可以进行模型部署。 + +本教程提供一个示例介绍模型导出的过程。 + +## 1. 获取预训练莫模型 + +本示例中,我们使用BiseNetV2模型,大家执行如下命令或者点击[链接](https://paddleseg.bj.bcebos.com/dygraph/cityscapes/bisenet_cityscapes_1024x1024_160k/model.pdparams)下载模型预训练权重。 + +```shell +mkdir bisenet && cd bisenet +wget https://paddleseg.bj.bcebos.com/dygraph/cityscapes/bisenet_cityscapes_1024x1024_160k/model.pdparams +cd .. +``` + +## 2. 导出预测模型 + +确保正确安装PaddleSeg后,在PaddleSeg目录下执行如下命令,则预测模型会保存在output文件夹。 + +```shell +# 设置1张可用的卡 +export CUDA_VISIBLE_DEVICES=0 +# windows下请执行以下命令 +# set CUDA_VISIBLE_DEVICES=0 +python export.py \ + --config configs/bisenet/bisenet_cityscapes_1024x1024_160k.yml \ + --model_path bisenet/model.pdparams \ + --save_dir output +``` + +### 导出脚本参数解释 + +|参数名|用途|是否必选项|默认值| +|-|-|-|-| +|config|配置文件|是|-| +|model_path|预训练权重的路径|否|配置文件中指定的预训练权重路径| +|save_dir|保存预测模型的路径|否|output| +|input_shape| 设置导出模型的输入shape,比如传入`--input_shape 1 3 1024 1024`。如果不设置input_shape,默认导出模型的输入shape是[-1, 3, -1, -1] | 否 | None | +|with_softmax|在网络末端添加softmax算子。由于PaddleSeg组网默认返回logits,如果想要部署模型获取概率值,可以置为True|否|False| +|without_argmax|是否不在网络末端添加argmax算子。由于PaddleSeg组网默认返回logits,为部署模型可以直接获取预测结果,我们默认在网络末端添加argmax算子|否|False| + +## 3. 预测模型文件 + +如下是导出的预测模型文件。 + +```shell +output + ├── deploy.yaml # 部署相关的配置文件,主要说明数据预处理的方式 + ├── model.pdmodel # 预测模型的拓扑结构文件 + ├── model.pdiparams # 预测模型的权重文件 + └── model.pdiparams.info # 参数额外信息,一般无需关注 +``` + +导出预测模型后,我们可以使用以下方式部署模型: + +|部署场景|使用预测库|教程| +|-|-|-| +|服务器端(Nvidia GPU和X86 CPU) Python部署|Paddle Inference|[文档](../deploy/python/)| +|服务器端(Nvidia GPU和X86 CPU) C++端部署|Paddle Inference|[文档](../deploy/cpp/)| +|移动端部署|Paddle Lite|[文档](../deploy/lite/)| +|服务化部署|Paddle Serving|[文档](../deploy/serving/)| +|前端部署|Paddle JS|[文档](../deploy/web/)| From 94b1a833bd71e535866b2af6c4214a53b7202edc Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 11 Jan 2022 19:43:15 +0800 Subject: [PATCH 201/210] update transforms.py --- contrib/Matting/transforms.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/Matting/transforms.py b/contrib/Matting/transforms.py index 6581874098..72898c5e29 100644 --- a/contrib/Matting/transforms.py +++ b/contrib/Matting/transforms.py @@ -714,6 +714,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class RandomSharpen: def __init__(self, prob=0.1): if prob < 0: @@ -740,6 +741,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class RandomNoise: def __init__(self, prob=0.1): if prob < 0: @@ -761,6 +763,7 @@ def __call__(self, data): return data +@manager.TRANSFORMS.add_component class RandomReJpeg: def __init__(self, prob=0.1): if prob < 0: From a529fe3b6ef14959a022524c10a46ebc90122299 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 9 Feb 2022 16:12:33 +0800 Subject: [PATCH 202/210] add random shuffle for file list --- contrib/Matting/dataset/matting_dataset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/Matting/dataset/matting_dataset.py b/contrib/Matting/dataset/matting_dataset.py index 02ebaff1d5..2d95e88028 100644 --- a/contrib/Matting/dataset/matting_dataset.py +++ b/contrib/Matting/dataset/matting_dataset.py @@ -110,6 +110,8 @@ def __init__(self, for line in lines: line = line.strip() self.fg_bg_list.append(line) + if mode != 'val': + random.shuffle(self.fg_bg_list) def __getitem__(self, idx): data = {} From e9b0ba54beee4bd51583a51a6978c201a17c6a50 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Mon, 17 Jan 2022 14:29:26 +0800 Subject: [PATCH 203/210] update matting_dataset.py --- contrib/Matting/dataset/matting_dataset.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/contrib/Matting/dataset/matting_dataset.py b/contrib/Matting/dataset/matting_dataset.py index 2d95e88028..770dcbd557 100644 --- a/contrib/Matting/dataset/matting_dataset.py +++ b/contrib/Matting/dataset/matting_dataset.py @@ -129,8 +129,7 @@ def __getitem__(self, idx): if len(fg_bg_file) >= 2: bg_file = os.path.join(self.dataset_root, fg_bg_file[1]) bg = cv2.imread(bg_file) - data['img'], data['bg'] = self.composite(fg, alpha, bg) - data['fg'] = fg + data['img'], data['fg'], data['bg'] = self.composite(fg, alpha, bg) if self.mode in ['train', 'trainval']: data['gt_fields'].append('fg') data['gt_fields'].append('bg') @@ -224,7 +223,7 @@ def composite(self, fg, alpha, ori_bg): alpha = np.expand_dims(alpha, axis=2) image = alpha * fg + (1 - alpha) * bg image = image.astype(np.uint8) - return image, bg + return image, fg, bg @staticmethod def gen_trimap(alpha, mode='train', eval_kernel=7): From 52fb663a4b335863453440914ea1da849e4c9292 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 22 Mar 2022 17:31:17 +0800 Subject: [PATCH 204/210] tag pp-matting --- .../pp-matting_hrnet_w18_distinctions.yml | 56 +++++++++++++++++++ .../pp-matting_hrnet_w48_distinctions.yml | 56 +++++++++++++++++++ contrib/Matting/model/__init__.py | 3 +- .../model/{ziyan_gate.py => ppmatting.py} | 23 ++++---- 4 files changed, 124 insertions(+), 14 deletions(-) create mode 100644 contrib/Matting/configs/pp-matting/pp-matting_hrnet_w18_distinctions.yml create mode 100644 contrib/Matting/configs/pp-matting/pp-matting_hrnet_w48_distinctions.yml rename contrib/Matting/model/{ziyan_gate.py => ppmatting.py} (94%) diff --git a/contrib/Matting/configs/pp-matting/pp-matting_hrnet_w18_distinctions.yml b/contrib/Matting/configs/pp-matting/pp-matting_hrnet_w18_distinctions.yml new file mode 100644 index 0000000000..fc0c61ef80 --- /dev/null +++ b/contrib/Matting/configs/pp-matting/pp-matting_hrnet_w18_distinctions.yml @@ -0,0 +1,56 @@ +batch_size: 4 +iters: 300000 + +train_dataset: + type: MattingDataset + dataset_root: data/matting/Distinctions-646 + train_file: train.txt + transforms: + - type: LoadImages + - type: Padding + target_size: [512, 512] + - type: RandomCrop + crop_size: [[512, 512],[640, 640], [800, 800]] + - type: Resize + target_size: [512, 512] + - type: RandomDistort + - type: RandomBlur + prob: 0.1 + - type: RandomHorizontalFlip + - type: Normalize + mode: train + separator: '|' + +val_dataset: + type: MattingDataset + dataset_root: data/matting/Distinctions-646 + val_file: val.txt + transforms: + - type: LoadImages + - type: LimitShort + max_short: 1536 + - type: ResizeToIntMult + mult_int: 32 + - type: Normalize + mode: val + get_trimap: False + separator: '|' + +model: + type: PPMatting + # type: ZiYanGate + backbone: + type: HRNet_W18 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz + pretrained: Null + +optimizer: + type: sgd + momentum: 0.9 + weight_decay: 4.0e-5 + +lr_scheduler: + type: PolynomialDecay + learning_rate: 0.01 + end_lr: 0 + power: 0.9 diff --git a/contrib/Matting/configs/pp-matting/pp-matting_hrnet_w48_distinctions.yml b/contrib/Matting/configs/pp-matting/pp-matting_hrnet_w48_distinctions.yml new file mode 100644 index 0000000000..ed08e5e08f --- /dev/null +++ b/contrib/Matting/configs/pp-matting/pp-matting_hrnet_w48_distinctions.yml @@ -0,0 +1,56 @@ +batch_size: 4 +iters: 300000 + +train_dataset: + type: MattingDataset + dataset_root: data/matting/Distinctions-646 + train_file: train.txt + transforms: + - type: LoadImages + - type: Padding + target_size: [512, 512] + - type: RandomCrop + crop_size: [[512, 512],[640, 640], [800, 800]] + - type: Resize + target_size: [512, 512] + - type: RandomDistort + - type: RandomBlur + prob: 0.1 + - type: RandomHorizontalFlip + - type: Normalize + mode: train + separator: '|' + +val_dataset: + type: MattingDataset + dataset_root: data/matting/Distinctions-646 + val_file: val.txt + transforms: + - type: LoadImages + - type: LimitShort + max_short: 1536 + - type: ResizeToIntMult + mult_int: 32 + - type: Normalize + mode: val + get_trimap: False + separator: '|' + +model: + type: PPMatting + # type: ZiYanGate + backbone: + type: HRNet_W48 + pretrained: https://bj.bcebos.com/paddleseg/dygraph/hrnet_w48_ssld.tar.gz + pretrained: Null + +optimizer: + type: sgd + momentum: 0.9 + weight_decay: 4.0e-5 + +lr_scheduler: + type: PolynomialDecay + learning_rate: 0.01 + end_lr: 0 + power: 0.9 diff --git a/contrib/Matting/model/__init__.py b/contrib/Matting/model/__init__.py index f89af61773..83f007ae26 100644 --- a/contrib/Matting/model/__init__.py +++ b/contrib/Matting/model/__init__.py @@ -19,5 +19,4 @@ from .dim import DIM from .loss import MRSD, GradientLoss from .modnet import MODNet -from .ziyan import ZiYanAM -from .ziyan_gate import ZiYanGate +from .ppmatting import PPMatting diff --git a/contrib/Matting/model/ziyan_gate.py b/contrib/Matting/model/ppmatting.py similarity index 94% rename from contrib/Matting/model/ziyan_gate.py rename to contrib/Matting/model/ppmatting.py index 06e5317753..67b16cb883 100644 --- a/contrib/Matting/model/ziyan_gate.py +++ b/contrib/Matting/model/ppmatting.py @@ -35,7 +35,7 @@ def conv_up_psp(in_channels, out_channels, up_sample): @manager.MODELS.add_component -class ZiYanGate(nn.Layer): +class PPMatting(nn.Layer): def __init__(self, backbone, pretrained=None, loss_func_dict=None): super().__init__() self.backbone = backbone @@ -198,7 +198,7 @@ def forward(self, inputs): return logit_dict else: return fusion_sigmoid - + def get_loss_func_dict(self): loss_func_dict = defaultdict(list) loss_func_dict['glance'].append(nn.NLLLoss()) @@ -226,13 +226,11 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): # focus loss computation transparent = label_dict['trimap'] == 128 - focus_alpha_loss = loss_func_dict['focus'][0](logit_dict['focus'], - label_dict['alpha'], - transparent) + focus_alpha_loss = loss_func_dict['focus'][0]( + logit_dict['focus'], label_dict['alpha'], transparent) # gradient loss - focus_gradient_loss = loss_func_dict['focus'][1](logit_dict['focus'], - label_dict['alpha'], - transparent) + focus_gradient_loss = loss_func_dict['focus'][1]( + logit_dict['focus'], label_dict['alpha'], transparent) loss_focus = focus_alpha_loss + focus_gradient_loss loss['focus'] = loss_focus loss['focus_alpha'] = focus_alpha_loss @@ -241,7 +239,8 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): # collaborative matting loss loss_cm_func = loss_func_dict['cm'] # fusion_sigmoid loss - cm_alpha_loss = loss_cm_func[0](logit_dict['fusion'], label_dict['alpha']) + cm_alpha_loss = loss_cm_func[0](logit_dict['fusion'], + label_dict['alpha']) # composion loss comp_pred = logit_dict['fusion'] * label_dict['fg'] + ( 1 - logit_dict['fusion']) * label_dict['bg'] @@ -249,7 +248,8 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): 1 - label_dict['alpha']) * label_dict['bg'] cm_composition_loss = loss_cm_func[1](comp_pred, comp_gt) # grandient loss - cm_grad_loss = loss_cm_func[2](logit_dict['fusion'], label_dict['alpha']) + cm_grad_loss = loss_cm_func[2](logit_dict['fusion'], + label_dict['alpha']) # cm loss loss_cm = cm_alpha_loss + cm_composition_loss + cm_grad_loss loss['cm'] = loss_cm @@ -257,7 +257,6 @@ def loss(self, logit_dict, label_dict, loss_func_dict=None): loss['cm_composition'] = cm_composition_loss loss['cm_gradient'] = cm_grad_loss - loss['all'] = 0.25 * loss_glance + 0.25 * loss_focus + 0.25 * loss_cm return loss @@ -322,7 +321,7 @@ def forward(self, input_features, gating_features): inputs = {} inputs['img'] = x - model = ZiYanGate(backbone=backbone, pretrained=None) + model = PPMatting(backbone=backbone, pretrained=None) results = model(inputs) print(results) From 0b772488e01863b7ef4134e5c75c7893249f506a Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 23 Mar 2022 15:55:21 +0800 Subject: [PATCH 205/210] add export --- contrib/Matting/export.py | 111 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 contrib/Matting/export.py diff --git a/contrib/Matting/export.py b/contrib/Matting/export.py new file mode 100644 index 0000000000..1953a2643d --- /dev/null +++ b/contrib/Matting/export.py @@ -0,0 +1,111 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +import paddle +import yaml + +from paddleseg.cvlibs import Config +from paddleseg.utils import logger + +import dataset +import model + + +def parse_args(): + parser = argparse.ArgumentParser(description='Model export.') + # params of training + parser.add_argument( + "--config", + dest="cfg", + help="The config file.", + default=None, + type=str, + required=True) + parser.add_argument( + '--save_dir', + dest='save_dir', + help='The directory for saving the exported model', + type=str, + default='./output') + parser.add_argument( + '--model_path', + dest='model_path', + help='The path of model for export', + type=str, + default=None) + parser.add_argument( + '--trimap', + dest='trimap', + help='Whether to input trimap', + action='store_true') + parser.add_argument( + "--input_shape", + nargs='+', + help="Export the model with fixed input shape, such as 1 3 1024 1024.", + type=int, + default=None) + + return parser.parse_args() + + +def main(args): + os.environ['PADDLESEG_EXPORT_STAGE'] = 'True' + cfg = Config(args.cfg) + + net = cfg.model + net.eval() + if args.model_path: + para_state_dict = paddle.load(args.model_path) + net.set_dict(para_state_dict) + logger.info('Loaded trained params of model successfully.') + + if args.input_shape is None: + shape = [None, 3, None, None] + else: + shape = args.input_shape + + input_spec = [{"img": paddle.static.InputSpec(shape=shape, name='img')}] + if args.trimap: + shape[1] = 1 + input_spec[0]['trimap'] = paddle.static.InputSpec( + shape=shape, name='trimap') + + net = paddle.jit.to_static(net, input_spec=input_spec) + save_path = os.path.join(args.save_dir, 'model') + paddle.jit.save(net, save_path) + + yml_file = os.path.join(args.save_dir, 'deploy.yaml') + with open(yml_file, 'w') as file: + transforms = cfg.val_dataset_config.get('transforms', + [{ + 'type': 'Normalize' + }]) + data = { + 'Deploy': { + 'transforms': transforms, + 'model': 'model.pdmodel', + 'params': 'model.pdiparams' + } + } + yaml.dump(data, file) + + logger.info(f'Model is saved in {args.save_dir}.') + + +if __name__ == '__main__': + args = parse_args() + main(args) From 751c415c73080e178c9de426004e6dcbaa95cdde Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sat, 2 Apr 2022 14:47:40 +0800 Subject: [PATCH 206/210] update some annotation --- Matting/core/train.py | 2 +- Matting/model/hrnet.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Matting/core/train.py b/Matting/core/train.py index d84861a303..2ad20ff3eb 100644 --- a/Matting/core/train.py +++ b/Matting/core/train.py @@ -55,7 +55,7 @@ def visual_in_traning(log_writer, vis_dict, step): def get_best(best_file, resume_model=None): - '''Get best sad, mse, grad, conn adn iter from file''' + '''Get best sad, mse, grad, conn and iter from file''' if os.path.exists(best_file) and (resume_model is not None): with open(best_file, 'rb') as f: best_sad, best_sad_mse, best_sad_grad, best_sad_conn, best_iter = pickle.load( diff --git a/Matting/model/hrnet.py b/Matting/model/hrnet.py index def34566d0..96e23a77e6 100644 --- a/Matting/model/hrnet.py +++ b/Matting/model/hrnet.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 211c59b45cbc75bfb9cf5fb721d55e048e42d713 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Sat, 2 Apr 2022 16:32:10 +0800 Subject: [PATCH 207/210] update README --- Matting/README_CN.md | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/Matting/README_CN.md b/Matting/README_CN.md index 369a62f5a7..a94c3faa06 100644 --- a/Matting/README_CN.md +++ b/Matting/README_CN.md @@ -9,6 +9,11 @@ Matting(精细化分割/影像去背/抠图)是指借由计算前景的颜

      ## 更新动态 +2022.04 +【1】新增PPMatting模型, +【2】PPHumanMatting高分辨人像抠图模型 +【3】新增Grad, Conn评估指标 +【4】新增前景评估功能,利用[ML](https://arxiv.org/pdf/2006.14970.pdf)算法在预测和背景替换时进行前景评估。 2021.11 Matting项目开源, 实现图像抠图功能。 【1】支持Matting模型:DIM, MODNet。 【2】支持模型导出及Python部署。 @@ -54,20 +59,25 @@ cd contrib/Matting ``` ## 模型 - -[PP-HumanMatting](https://paddleseg.bj.bcebos.com/matting/models/human_matting-resnet34_vd.pdparams) - -[DIM-VGG16](https://paddleseg.bj.bcebos.com/matting/models/dim-vgg16.pdparams) - -MODNet在[PPM-100](https://github.com/ZHKKKe/PPM)数据集上的性能 - -| Backbone | SAD | MSE | Params(M) | FLOPs(G) | FPS | Link | -|-|-|-|-|-|-|-| -|MobileNetV2|112.73|0.0098|6.5|15.7|67.5|[model](https://paddleseg.bj.bcebos.com/matting/models/modnet-mobilenetv2.pdparams)| -|ResNet50_vd|104.14|0.0090|92.2|151.6|28.6|[model](https://paddleseg.bj.bcebos.com/matting/models/modnet-resnet50_vd.pdparams)| -|HRNet_W18|77.96|0.0054|10.2|28.5|10.9|[model](https://paddleseg.bj.bcebos.com/matting/models/modnet-hrnet_w18.pdparams)| - -注意:模型输入大小为(512, 512), GPU为Tesla V100 32G。 +提供多种场景人像抠图模型, 可根据实际情况选择相应模型,我们提供了Inference Model,您可直接下载进行[部署应用](#应用部署)。 + +模型推荐: +- 追求精度:PP-Matting, 低分辨率使用PP-Matting-512, 高分辨率使用PP-Matting-1024。 +- 追求速度:ModNet-MobileNetV2。 +- 高分辨率(>2048)简单背景人像抠图:PP-HumanMatting。 +- 提供trimap:DIM-VGG16。 + +| 模型 | 模型说明 | Params(M) | FLOPs(G) | FPS | Checkpoint | Inference Model | +| - | - | - | -| - | - | - | +| PP-Matting-512 | - | - | - | - | - | [model inference]() | +| PP-Matting-1024 | - | - | - | - | - | [model inference]() | +| PP-HumanMatting | - | - | - | - | [model](https://paddleseg.bj.bcebos.com/matting/models/human_matting-resnet34_vd.pdparams) | [model inference]() | +| ModNet-MobileNetV2 | - | 6.5 | 15.7 | 67.5 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-mobilenetv2.pdparams) | [model inference]() | +| ModNet-ResNet50_vd | - | 92.2 | 151.6 | 28.6 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-resnet50_vd.pdparams) | [model inference]() | +| ModNet-HRNet_W18 | - | 10.2 | 28.5 | 10.9 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-hrnet_w18.pdparams) | [model inference]() | +| DIM-VGG16 | - | - | -| - | [model](https://paddleseg.bj.bcebos.com/matting/models/dim-vgg16.pdparams) | [model inference]() | + +注意:FLOPs计算时模型输入大小为(512, 512), GPU为Tesla V100 32G。 ## 数据准备 From 0b3cd86645ad5fd2914aef8f4f884aef33ee80cd Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 6 Apr 2022 11:35:02 +0800 Subject: [PATCH 208/210] update README --- Matting/README.md | 42 ++++++++++++++++++++++++++---------------- Matting/README_CN.md | 33 ++++++++++++++++----------------- 2 files changed, 42 insertions(+), 33 deletions(-) diff --git a/Matting/README.md b/Matting/README.md index 463d650d84..e471ac2f84 100644 --- a/Matting/README.md +++ b/Matting/README.md @@ -8,6 +8,12 @@ Image Matting is the technique of extracting foreground from an image by calcula

      ## Update Notes +2022.04 +[1] Add PPMatting model. +[2] Add PPHumanMatting high-resolution human matting model. +[3] Add Grad, Conn evaluation metrics. +[4] Add foreground evaluation funciton. use [ML](https://arxiv.org/pdf/2006.14970.pdf) algorithm to evaluate foreground when prediction or background replacement. + 2021.11 Matting Project is released. [1] Support Matting models: DIM, MODNet. [2] Support model export and python deployment. @@ -52,19 +58,25 @@ cd contrib/Matting ``` ## Models -[PP-HumanMatting](https://paddleseg.bj.bcebos.com/matting/models/human_matting-resnet34_vd.pdparams) - -[DIM-VGG16](https://paddleseg.bj.bcebos.com/matting/models/dim-vgg16.pdparams) - -MODNet performance on [PPM-100](https://github.com/ZHKKKe/PPM). - -| Backbone | SAD | MSE | Params(M) | FLOPs(G) | FPS | Link | -|-|-|-|-|-|-|-| -|MobileNetV2|112.73|0.0098|6.5|15.7|67.5|[model](https://paddleseg.bj.bcebos.com/matting/models/modnet-mobilenetv2.pdparams)| -|ResNet50_vd|104.14|0.0090|92.2|151.6|28.6|[model](https://paddleseg.bj.bcebos.com/matting/models/modnet-resnet50_vd.pdparams)| -|HRNet_W18|77.96|0.0054|10.2|28.5|10.9|[model](https://paddleseg.bj.bcebos.com/matting/models/modnet-hrnet_w18.pdparams)| - -Note: The model input size is (512, 512) and the GPU is Tesla V100 32G. +A variety of human matting models are provided for you to select according the actual situation. + +Model recommend: +- for accuracy: PP-Matting, using PP-Matting-512 in low resolution situation, using PP-Matting-1024 in high resolution situation. +- for speed: ModNet-MobileNetV2. +- high resolution (>2048) human matting with simple background: PP-HumanMatting. +- providing trimap:DIM-VGG16. + +| Model | Params(M) | FLOPs(G) | FPS | Checkpoint | Inference Model | +| - | - | -| - | - | - | +| PP-Matting-512 | 24.5 | 91.28 | 32.1 | - | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/pp-matting-hrnet_w18-human_512.zip) | +| PP-Matting-1024 | 24.5 | 91.28 | 18.6(1024X1024) | - | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/pp-matting-hrnet_w18-human_1024.zip) | +| PP-HumanMatting | 63.9 | 135.8 (2048X2048)| 35.7(2048X2048)| [model](https://paddleseg.bj.bcebos.com/matting/models/human_matting-resnet34_vd.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/pp-humanmatting-resnet34_vd.zip) | +| ModNet-MobileNetV2 | 6.5 | 15.7 | 151.6 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-mobilenetv2.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/modnet-mobilenetv2.zip) | +| ModNet-ResNet50_vd | 92.2 | 151.6 | 142.8 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-resnet50_vd.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/modnet-resnet50_vd.zip) | +| ModNet-HRNet_W18 | 10.2 | 28.5 | 39.1 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-hrnet_w18.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/modnet-hrnet_w18.zip) | +| DIM-VGG16 | 28.4 | 175.5| 32.2 | [model](https://paddleseg.bj.bcebos.com/matting/models/dim-vgg16.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/dim-vgg16.zip) | + +Note: The model default input size is (512, 512) while calcuting FLOPs and FPS and the GPU is Tesla V100 32G. ## Dataset preparation @@ -245,7 +257,5 @@ python deploy/python/infer.py --help ## Contributors Thanks -[wuyefeilin](https://github.com/wuyefeilin), -[Qian bin](https://github.com/qianbin1989228), -[yzl19940819](https://github.com/yzl19940819) +[Qian bin](https://github.com/qianbin1989228) for their contributons. diff --git a/Matting/README_CN.md b/Matting/README_CN.md index a94c3faa06..6d7da55cc3 100644 --- a/Matting/README_CN.md +++ b/Matting/README_CN.md @@ -10,10 +10,11 @@ Matting(精细化分割/影像去背/抠图)是指借由计算前景的颜 ## 更新动态 2022.04 -【1】新增PPMatting模型, -【2】PPHumanMatting高分辨人像抠图模型 -【3】新增Grad, Conn评估指标 +【1】新增PPMatting模型。 +【2】新增PPHumanMatting高分辨人像抠图模型。 +【3】新增Grad, Conn评估指标。 【4】新增前景评估功能,利用[ML](https://arxiv.org/pdf/2006.14970.pdf)算法在预测和背景替换时进行前景评估。 + 2021.11 Matting项目开源, 实现图像抠图功能。 【1】支持Matting模型:DIM, MODNet。 【2】支持模型导出及Python部署。 @@ -64,20 +65,20 @@ cd contrib/Matting 模型推荐: - 追求精度:PP-Matting, 低分辨率使用PP-Matting-512, 高分辨率使用PP-Matting-1024。 - 追求速度:ModNet-MobileNetV2。 -- 高分辨率(>2048)简单背景人像抠图:PP-HumanMatting。 +- 高分辨率(>2048)简单背景人像抠图:PP-HumanMatting。 - 提供trimap:DIM-VGG16。 -| 模型 | 模型说明 | Params(M) | FLOPs(G) | FPS | Checkpoint | Inference Model | -| - | - | - | -| - | - | - | -| PP-Matting-512 | - | - | - | - | - | [model inference]() | -| PP-Matting-1024 | - | - | - | - | - | [model inference]() | -| PP-HumanMatting | - | - | - | - | [model](https://paddleseg.bj.bcebos.com/matting/models/human_matting-resnet34_vd.pdparams) | [model inference]() | -| ModNet-MobileNetV2 | - | 6.5 | 15.7 | 67.5 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-mobilenetv2.pdparams) | [model inference]() | -| ModNet-ResNet50_vd | - | 92.2 | 151.6 | 28.6 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-resnet50_vd.pdparams) | [model inference]() | -| ModNet-HRNet_W18 | - | 10.2 | 28.5 | 10.9 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-hrnet_w18.pdparams) | [model inference]() | -| DIM-VGG16 | - | - | -| - | [model](https://paddleseg.bj.bcebos.com/matting/models/dim-vgg16.pdparams) | [model inference]() | +| 模型 | Params(M) | FLOPs(G) | FPS | Checkpoint | Inference Model | +| - | - | -| - | - | - | +| PP-Matting-512 | 24.5 | 91.28 | 32.1 | - | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/pp-matting-hrnet_w18-human_512.zip) | +| PP-Matting-1024 | 24.5 | 91.28 | 18.6(1024X1024) | - | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/pp-matting-hrnet_w18-human_1024.zip) | +| PP-HumanMatting | 63.9 | 135.8 (2048X2048)| 35.7(2048X2048)| [model](https://paddleseg.bj.bcebos.com/matting/models/human_matting-resnet34_vd.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/pp-humanmatting-resnet34_vd.zip) | +| ModNet-MobileNetV2 | 6.5 | 15.7 | 151.6 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-mobilenetv2.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/modnet-mobilenetv2.zip) | +| ModNet-ResNet50_vd | 92.2 | 151.6 | 142.8 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-resnet50_vd.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/modnet-resnet50_vd.zip) | +| ModNet-HRNet_W18 | 10.2 | 28.5 | 39.1 | [model](https://paddleseg.bj.bcebos.com/matting/models/modnet-hrnet_w18.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/modnet-hrnet_w18.zip) | +| DIM-VGG16 | 28.4 | 175.5| 32.2 | [model](https://paddleseg.bj.bcebos.com/matting/models/dim-vgg16.pdparams) | [model inference](https://paddleseg.bj.bcebos.com/matting/models/deploy/dim-vgg16.zip) | -注意:FLOPs计算时模型输入大小为(512, 512), GPU为Tesla V100 32G。 +注意:FLOPs和FPS计算默认模型输入大小为(512, 512), GPU为Tesla V100 32G。 ## 数据准备 @@ -260,7 +261,5 @@ python deploy/python/infer.py --help ## 贡献者 感谢 -[wuyefeilin](https://github.com/wuyefeilin)、 -[钱彬(Qianbin)](https://github.com/qianbin1989228)、 -[yzl19940819](https://github.com/yzl19940819) +[钱彬(Qianbin)](https://github.com/qianbin1989228) 等开发者的贡献 From 65812ef9c34814e3d1c745bf52aed6b62bff2005 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Wed, 6 Apr 2022 12:55:00 +0800 Subject: [PATCH 209/210] update README --- Matting/README.md | 2 ++ Matting/README_CN.md | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/Matting/README.md b/Matting/README.md index e471ac2f84..ad7c2ddbaa 100644 --- a/Matting/README.md +++ b/Matting/README.md @@ -13,6 +13,8 @@ Image Matting is the technique of extracting foreground from an image by calcula [2] Add PPHumanMatting high-resolution human matting model. [3] Add Grad, Conn evaluation metrics. [4] Add foreground evaluation funciton. use [ML](https://arxiv.org/pdf/2006.14970.pdf) algorithm to evaluate foreground when prediction or background replacement. +[5] Add GradientLoss and LaplacianLoss. +[6] Add RandomSharpen, RandomSharpen, RandomReJpeg, RSSN data augmentation strategies. 2021.11 Matting Project is released. [1] Support Matting models: DIM, MODNet. diff --git a/Matting/README_CN.md b/Matting/README_CN.md index 6d7da55cc3..963f672bc4 100644 --- a/Matting/README_CN.md +++ b/Matting/README_CN.md @@ -12,8 +12,10 @@ Matting(精细化分割/影像去背/抠图)是指借由计算前景的颜 2022.04 【1】新增PPMatting模型。 【2】新增PPHumanMatting高分辨人像抠图模型。 -【3】新增Grad, Conn评估指标。 +【3】新增Grad、Conn评估指标。 【4】新增前景评估功能,利用[ML](https://arxiv.org/pdf/2006.14970.pdf)算法在预测和背景替换时进行前景评估。 +【5】新增GradientLoss和LaplacianLoss。 +【6】新增RandomSharpen、RandomSharpen、RandomReJpeg、RSSN数据增强策略。 2021.11 Matting项目开源, 实现图像抠图功能。 【1】支持Matting模型:DIM, MODNet。 From 1303f03aeebb92c6ecdc305e96f89e5628ef9b38 Mon Sep 17 00:00:00 2001 From: wuyefeilin Date: Tue, 19 Apr 2022 18:16:05 +0800 Subject: [PATCH 210/210] fix opencv-python version --- Matting/README.md | 5 ++--- Matting/README_CN.md | 5 ++--- Matting/requirements.txt | 3 +++ 3 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 Matting/requirements.txt diff --git a/Matting/README.md b/Matting/README.md index ad7c2ddbaa..24b7781377 100644 --- a/Matting/README.md +++ b/Matting/README.md @@ -54,9 +54,8 @@ git clone https://github.com/PaddlePaddle/PaddleSeg ```shell cd PaddleSeg pip install -e . -pip install scikit-image -pip install numba -cd contrib/Matting +cd Matting +pip install -r requirements.txt ``` ## Models diff --git a/Matting/README_CN.md b/Matting/README_CN.md index 963f672bc4..cc6e61c4f4 100644 --- a/Matting/README_CN.md +++ b/Matting/README_CN.md @@ -56,9 +56,8 @@ git clone https://github.com/PaddlePaddle/PaddleSeg ```shell cd PaddleSeg pip install -e . -pip install scikit-image -pip install numba -cd contrib/Matting +cd Matting +pip install -r requirements.txt ``` ## 模型 diff --git a/Matting/requirements.txt b/Matting/requirements.txt new file mode 100644 index 0000000000..1fa4dd4adf --- /dev/null +++ b/Matting/requirements.txt @@ -0,0 +1,3 @@ +scikit-image +numba +opencv-python==4.5.4.60 \ No newline at end of file