From 989f52d67d05445ccd030d8f13d6cc53e297fb91 Mon Sep 17 00:00:00 2001 From: facebook-github-bot Date: Thu, 10 Oct 2019 09:03:54 -0700 Subject: [PATCH] Initial commit fbshipit-source-id: 1f2160ca15bd4e3ccced40e2bfac4025f15f2f11 --- .clang-format | 85 ++ .flake8 | 9 + .github/Detectron2-Logo-Horz.svg | 1 + .github/ISSUE_TEMPLATE/feature-request.md | 16 + .../ISSUE_TEMPLATE/questions-help-support.md | 14 + .../unexpected-problems-bugs.md | 29 + .github/pull_request_template.md | 6 + .gitignore | 34 + CODE_OF_CONDUCT.md | 5 + CONTRIBUTING.md | 31 + GETTING_STARTED.md | 62 + INSTALL.md | 53 + LICENSE | 201 +++ MODEL_ZOO.md | 807 +++++++++++ README.md | 46 + configs/Base-RCNN-C4.yaml | 18 + configs/Base-RCNN-DilatedC5.yaml | 30 + configs/Base-RCNN-FPN.yaml | 41 + configs/Base-RetinaNet.yaml | 23 + .../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml | 17 + .../faster_rcnn_R_101_C4_3x.yaml | 9 + .../faster_rcnn_R_101_DC5_3x.yaml | 9 + .../faster_rcnn_R_101_FPN_3x.yaml | 9 + .../faster_rcnn_R_50_C4_1x.yaml | 6 + .../faster_rcnn_R_50_C4_3x.yaml | 9 + .../faster_rcnn_R_50_DC5_1x.yaml | 6 + .../faster_rcnn_R_50_DC5_3x.yaml | 9 + .../faster_rcnn_R_50_FPN_1x.yaml | 6 + .../faster_rcnn_R_50_FPN_3x.yaml | 9 + .../faster_rcnn_X_101_32x8d_FPN_3x.yaml | 13 + .../retinanet_R_101_FPN_3x.yaml | 8 + .../COCO-Detection/retinanet_R_50_FPN_1x.yaml | 5 + .../COCO-Detection/retinanet_R_50_FPN_3x.yaml | 8 + configs/COCO-Detection/rpn_R_50_C4_1x.yaml | 10 + configs/COCO-Detection/rpn_R_50_FPN_1x.yaml | 9 + .../mask_rcnn_R_101_C4_3x.yaml | 9 + .../mask_rcnn_R_101_DC5_3x.yaml | 9 + .../mask_rcnn_R_101_FPN_3x.yaml | 9 + .../mask_rcnn_R_50_C4_1x.yaml | 6 + .../mask_rcnn_R_50_C4_3x.yaml | 9 + .../mask_rcnn_R_50_DC5_1x.yaml | 6 + .../mask_rcnn_R_50_DC5_3x.yaml | 9 + .../mask_rcnn_R_50_FPN_1x.yaml | 6 + .../mask_rcnn_R_50_FPN_3x.yaml | 9 + .../mask_rcnn_X_101_32x8d_FPN_3x.yaml | 13 + .../Base-Keypoint-RCNN-FPN.yaml | 15 + .../keypoint_rcnn_R_101_FPN_3x.yaml | 8 + .../keypoint_rcnn_R_50_FPN_1x.yaml | 5 + .../keypoint_rcnn_R_50_FPN_3x.yaml | 8 + .../keypoint_rcnn_X_101_32x8d_FPN_3x.yaml | 12 + .../Base-Panoptic-FPN.yaml | 9 + .../panoptic_fpn_R_101_3x.yaml | 8 + .../panoptic_fpn_R_50_1x.yaml | 5 + .../panoptic_fpn_R_50_3x.yaml | 8 + configs/Cityscapes/mask_rcnn_R_50_FPN.yaml | 25 + configs/Detectron1-Comparisons/README.md | 77 + .../faster_rcnn_R_50_FPN_noaug_1x.yaml | 17 + .../keypoint_rcnn_R_50_FPN_1x.yaml | 27 + .../mask_rcnn_R_50_FPN_noaug_1x.yaml | 20 + .../mask_rcnn_R_101_FPN_1x.yaml | 19 + .../mask_rcnn_R_50_FPN_1x.yaml | 19 + .../mask_rcnn_X_101_32x8d_FPN_1x.yaml | 23 + .../Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml | 12 + ...sk_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml | 36 + .../mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml | 8 + configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml | 21 + ...anoptic_fpn_R_101_dconv_cascade_gn_3x.yaml | 26 + .../scratch_mask_rcnn_R_50_FPN_3x_gn.yaml | 11 + .../faster_rcnn_R_50_C4.yaml | 18 + .../faster_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml | 8 + .../dcn/faster_rcnn_R_50_FPN_1x_dconv_c5.yaml | 8 + .../faster_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml | 8 + .../faster_rcnn_R_50_FPN_1x_mdconv_c5.yaml | 9 + .../mask_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml | 8 + .../mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml | 10 + configs/quick_schedules/README.md | 51 + ...fast_rcnn_R_50_FPN_inference_acc_test.yaml | 7 + .../fast_rcnn_R_50_FPN_instant_test.yaml | 15 + ...oint_rcnn_R_50_FPN_inference_acc_test.yaml | 7 + .../keypoint_rcnn_R_50_FPN_instant_test.yaml | 14 + ...R_50_FPN_normalized_training_acc_test.yaml | 30 + ...point_rcnn_R_50_FPN_training_acc_test.yaml | 28 + .../mask_rcnn_R_50_C4_inference_acc_test.yaml | 7 + .../mask_rcnn_R_50_C4_instant_test.yaml | 14 + .../mask_rcnn_R_50_C4_training_acc_test.yaml | 22 + ...mask_rcnn_R_50_DC5_inference_acc_test.yaml | 7 + ...mask_rcnn_R_50_FPN_inference_acc_test.yaml | 11 + .../mask_rcnn_R_50_FPN_instant_test.yaml | 14 + .../mask_rcnn_R_50_FPN_training_acc_test.yaml | 21 + .../panoptic_fpn_R_50_inference_acc_test.yaml | 7 + .../panoptic_fpn_R_50_instant_test.yaml | 19 + .../panoptic_fpn_R_50_training_acc_test.yaml | 20 + ...retinanet_R_50_FPN_inference_acc_test.yaml | 7 + .../retinanet_R_50_FPN_instant_test.yaml | 13 + .../rpn_R_50_FPN_inference_acc_test.yaml | 7 + .../rpn_R_50_FPN_instant_test.yaml | 13 + .../semantic_R_50_FPN_inference_acc_test.yaml | 10 + .../semantic_R_50_FPN_instant_test.yaml | 18 + .../semantic_R_50_FPN_training_acc_test.yaml | 20 + configs/semantic_R_101_FPN_1x.yaml | 11 + configs/semantic_R_50_FPN_1x.yaml | 11 + datasets/README.md | 80 ++ datasets/prepare_for_tests.sh | 22 + datasets/prepare_panoptic_fpn.py | 116 ++ demo/README.md | 16 + demo/demo.py | 142 ++ demo/predictor.py | 218 +++ detectron2/__init__.py | 8 + detectron2/checkpoint/__init__.py | 10 + detectron2/checkpoint/c2_model_loading.py | 313 ++++ detectron2/checkpoint/detection_checkpoint.py | 59 + detectron2/checkpoint/model_zoo.py | 132 ++ detectron2/config/__init__.py | 12 + detectron2/config/compat.py | 229 +++ detectron2/config/config.py | 98 ++ detectron2/config/defaults.py | 561 ++++++++ detectron2/data/__init__.py | 18 + detectron2/data/build.py | 417 ++++++ detectron2/data/catalog.py | 207 +++ detectron2/data/common.py | 81 ++ detectron2/data/dataset_mapper.py | 144 ++ detectron2/data/datasets/README.md | 9 + detectron2/data/datasets/__init__.py | 9 + detectron2/data/datasets/builtin.py | 213 +++ detectron2/data/datasets/builtin_meta.py | 267 ++++ detectron2/data/datasets/cityscapes.py | 317 +++++ detectron2/data/datasets/coco.py | 315 ++++ detectron2/data/datasets/lvis.py | 205 +++ .../data/datasets/lvis_v0_5_categories.py | 13 + detectron2/data/datasets/pascal_voc.py | 78 + detectron2/data/datasets/register_coco.py | 124 ++ detectron2/data/detection_utils.py | 397 ++++++ detectron2/data/samplers/__init__.py | 10 + .../data/samplers/distributed_sampler.py | 199 +++ .../data/samplers/grouped_batch_sampler.py | 46 + detectron2/data/transforms/__init__.py | 6 + detectron2/data/transforms/transform.py | 183 +++ detectron2/data/transforms/transform_gen.py | 447 ++++++ detectron2/engine/__init__.py | 12 + detectron2/engine/defaults.py | 425 ++++++ detectron2/engine/hooks.py | 404 ++++++ detectron2/engine/launch.py | 83 ++ detectron2/engine/train_loop.py | 255 ++++ detectron2/evaluation/__init__.py | 11 + .../evaluation/cityscapes_evaluation.py | 114 ++ detectron2/evaluation/coco_evaluation.py | 465 ++++++ detectron2/evaluation/evaluator.py | 160 +++ detectron2/evaluation/lvis_evaluation.py | 360 +++++ detectron2/evaluation/panoptic_evaluation.py | 171 +++ .../evaluation/pascal_voc_evaluation.py | 292 ++++ detectron2/evaluation/sem_seg_evaluation.py | 162 +++ detectron2/evaluation/testing.py | 77 + detectron2/layers/__init__.py | 11 + detectron2/layers/batch_norm.py | 180 +++ detectron2/layers/csrc/README.md | 7 + detectron2/layers/csrc/ROIAlign/ROIAlign.h | 126 ++ .../layers/csrc/ROIAlign/ROIAlign_cpu.cpp | 495 +++++++ .../layers/csrc/ROIAlign/ROIAlign_cuda.cu | 418 ++++++ .../csrc/ROIAlignRotated/ROIAlignRotated.h | 115 ++ .../ROIAlignRotated/ROIAlignRotated_cpu.cpp | 519 +++++++ .../ROIAlignRotated/ROIAlignRotated_cuda.cu | 435 ++++++ .../csrc/box_iou_rotated/box_iou_rotated.h | 35 + .../box_iou_rotated/box_iou_rotated_cpu.cpp | 46 + .../box_iou_rotated/box_iou_rotated_cuda.cu | 103 ++ .../box_iou_rotated/box_iou_rotated_utils.h | 342 +++++ .../layers/csrc/deformable/deform_conv.h | 373 +++++ .../csrc/deformable/deform_conv_cuda.cu | 1126 +++++++++++++++ .../deformable/deform_conv_cuda_kernel.cu | 1268 +++++++++++++++++ .../layers/csrc/nms_rotated/nms_rotated.h | 38 + .../csrc/nms_rotated/nms_rotated_cpu.cpp | 73 + .../csrc/nms_rotated/nms_rotated_cuda.cu | 132 ++ detectron2/layers/csrc/vision.cpp | 71 + detectron2/layers/deform_conv.py | 494 +++++++ detectron2/layers/mask_ops.py | 241 ++++ detectron2/layers/nms.py | 146 ++ detectron2/layers/roi_align.py | 105 ++ detectron2/layers/roi_align_rotated.py | 88 ++ detectron2/layers/rotated_boxes.py | 24 + detectron2/layers/shape_spec.py | 20 + detectron2/layers/wrappers.py | 154 ++ detectron2/modeling/__init__.py | 54 + detectron2/modeling/anchor_generator.py | 352 +++++ detectron2/modeling/backbone/__init__.py | 8 + detectron2/modeling/backbone/backbone.py | 70 + detectron2/modeling/backbone/build.py | 26 + detectron2/modeling/backbone/fpn.py | 244 ++++ detectron2/modeling/backbone/resnet.py | 479 +++++++ detectron2/modeling/box_regression.py | 214 +++ detectron2/modeling/matcher.py | 128 ++ detectron2/modeling/meta_arch/__init__.py | 11 + detectron2/modeling/meta_arch/build.py | 15 + detectron2/modeling/meta_arch/panoptic_fpn.py | 216 +++ detectron2/modeling/meta_arch/rcnn.py | 204 +++ detectron2/modeling/meta_arch/retinanet.py | 430 ++++++ detectron2/modeling/meta_arch/semantic_seg.py | 170 +++ detectron2/modeling/poolers.py | 194 +++ detectron2/modeling/postprocessing.py | 79 + .../modeling/proposal_generator/__init__.py | 3 + .../modeling/proposal_generator/build.py | 21 + .../proposal_generator/proposal_utils.py | 57 + detectron2/modeling/proposal_generator/rpn.py | 188 +++ .../proposal_generator/rpn_outputs.py | 436 ++++++ .../modeling/proposal_generator/rrpn.py | 87 ++ .../proposal_generator/rrpn_outputs.py | 240 ++++ detectron2/modeling/roi_heads/__init__.py | 7 + detectron2/modeling/roi_heads/box_head.py | 91 ++ detectron2/modeling/roi_heads/cascade_rcnn.py | 243 ++++ detectron2/modeling/roi_heads/fast_rcnn.py | 371 +++++ .../modeling/roi_heads/keypoint_head.py | 166 +++ detectron2/modeling/roi_heads/mask_head.py | 204 +++ detectron2/modeling/roi_heads/roi_heads.py | 816 +++++++++++ detectron2/modeling/sampling.py | 45 + detectron2/modeling/test_time_augmentation.py | 244 ++++ detectron2/solver/__init__.py | 5 + detectron2/solver/build.py | 60 + detectron2/solver/lr_scheduler.py | 116 ++ detectron2/structures/__init__.py | 10 + detectron2/structures/boxes.py | 291 ++++ detectron2/structures/image_list.py | 96 ++ detectron2/structures/instances.py | 184 +++ detectron2/structures/keypoints.py | 200 +++ detectron2/structures/masks.py | 346 +++++ detectron2/structures/rotated_boxes.py | 404 ++++++ detectron2/utils/README.md | 5 + detectron2/utils/__init__.py | 1 + detectron2/utils/collect_env.py | 65 + detectron2/utils/colormap.py | 140 ++ detectron2/utils/comm.py | 263 ++++ detectron2/utils/env.py | 105 ++ detectron2/utils/events.py | 325 +++++ detectron2/utils/logger.py | 196 +++ detectron2/utils/registry.py | 62 + detectron2/utils/serialize.py | 29 + detectron2/utils/video_visualizer.py | 235 +++ detectron2/utils/visualizer.py | 974 +++++++++++++ dev/README.md | 6 + dev/linter.sh | 26 + dev/parse_results.sh | 44 + dev/run_inference_tests.sh | 44 + dev/run_instant_tests.sh | 27 + docs/.gitignore | 1 + docs/Makefile | 19 + docs/README.md | 5 + docs/conf.py | 238 ++++ docs/index.rst | 16 + docs/modules/checkpoint.rst | 7 + docs/modules/config.rst | 7 + docs/modules/data.datasets.rst | 7 + docs/modules/data.rst | 26 + docs/modules/data.samplers.rst | 7 + docs/modules/data.transforms.rst | 7 + docs/modules/engine.rst | 25 + docs/modules/evaluation.rst | 7 + docs/modules/index.rst | 15 + docs/modules/layers.rst | 7 + docs/modules/modeling.rst | 7 + docs/modules/solver.rst | 7 + docs/modules/structures.rst | 7 + docs/modules/utils.rst | 63 + docs/notes/benchmarks.md | 202 +++ docs/notes/changelog.md | 12 + docs/notes/compatibility.md | 75 + docs/notes/index.rst | 9 + docs/requirements.txt | 13 + docs/tutorials/configs.md | 26 + docs/tutorials/data_loading.md | 81 ++ docs/tutorials/datasets.md | 152 ++ docs/tutorials/extend.md | 45 + docs/tutorials/getting_started.md | 1 + docs/tutorials/index.rst | 14 + docs/tutorials/install.md | 1 + docs/tutorials/models.md | 32 + docs/tutorials/training.md | 17 + projects/DensePose/README.md | 64 + projects/DensePose/apply_net.py | 299 ++++ .../configs/Base-DensePose-RCNN.yaml | 43 + .../configs/densepose_R_50_FPN_s1x.yaml | 12 + ...densepose_R_50_FPN_inference_acc_test.yaml | 8 + .../densepose_R_50_FPN_instant_test.yaml | 14 + .../densepose_R_50_FPN_training_acc_test.yaml | 25 + projects/DensePose/densepose/__init__.py | 8 + projects/DensePose/densepose/config.py | 35 + projects/DensePose/densepose/dataset.py | 37 + .../DensePose/densepose/dataset_mapper.py | 118 ++ .../densepose/densepose_coco_evaluation.py | 953 +++++++++++++ .../DensePose/densepose/densepose_head.py | 626 ++++++++ projects/DensePose/densepose/evaluator.py | 136 ++ projects/DensePose/densepose/roi_head.py | 108 ++ projects/DensePose/densepose/structures.py | 519 +++++++ .../DensePose/densepose/utils/dbhelper.py | 145 ++ projects/DensePose/densepose/utils/logger.py | 13 + projects/DensePose/densepose/vis/base.py | 190 +++ .../DensePose/densepose/vis/bounding_box.py | 36 + projects/DensePose/densepose/vis/densepose.py | 581 ++++++++ projects/DensePose/densepose/vis/extractor.py | 152 ++ projects/DensePose/doc/TOOL_APPLY_NET.md | 93 ++ projects/DensePose/doc/TOOL_QUERY_DB.md | 105 ++ .../doc/images/res_bbox_dp_contour.jpg | Bin 0 -> 91492 bytes .../DensePose/doc/images/res_bbox_dp_segm.jpg | Bin 0 -> 156777 bytes .../DensePose/doc/images/res_bbox_dp_u.jpg | Bin 0 -> 158784 bytes .../DensePose/doc/images/res_bbox_dp_v.jpg | Bin 0 -> 158204 bytes .../DensePose/doc/images/vis_bbox_dp_i.jpg | Bin 0 -> 86262 bytes .../DensePose/doc/images/vis_bbox_dp_pts.jpg | Bin 0 -> 83397 bytes .../DensePose/doc/images/vis_bbox_dp_segm.jpg | Bin 0 -> 79599 bytes .../DensePose/doc/images/vis_bbox_dp_u.jpg | Bin 0 -> 87042 bytes .../DensePose/doc/images/vis_bbox_dp_v.jpg | Bin 0 -> 86995 bytes projects/DensePose/query_db.py | 247 ++++ projects/DensePose/train_net.py | 81 ++ projects/README.md | 9 + projects/TridentNet/README.md | 48 + .../configs/Base-TridentNet-Fast-C4.yaml | 28 + .../configs/tridentnet_fast_R_101_C4_3x.yaml | 9 + .../configs/tridentnet_fast_R_50_C4_1x.yaml | 6 + projects/TridentNet/train_net.py | 68 + projects/TridentNet/tridentnet/__init__.py | 9 + projects/TridentNet/tridentnet/config.py | 26 + .../TridentNet/tridentnet/trident_backbone.py | 223 +++ .../TridentNet/tridentnet/trident_conv.py | 107 ++ .../TridentNet/tridentnet/trident_rcnn.py | 110 ++ projects/TridentNet/tridentnet/trident_rpn.py | 32 + setup.cfg | 23 + setup.py | 78 + tests/__init__.py | 1 + tests/test_anchor_generator.py | 90 ++ tests/test_box2box_transform.py | 58 + tests/test_boxes.py | 63 + tests/test_checkpoint.py | 48 + tests/test_config.py | 58 + tests/test_data_transform.py | 79 + tests/test_fast_rcnn.py | 102 ++ tests/test_mask_ops.py | 174 +++ tests/test_nms_rotated.py | 159 +++ tests/test_roi_align.py | 86 ++ tests/test_roi_align_rotated.py | 176 +++ tests/test_roi_heads.py | 108 ++ tests/test_roi_pooler.py | 85 ++ tests/test_rotated_boxes.py | 578 ++++++++ tests/test_rpn.py | 206 +++ tests/test_visualizer.py | 75 + tools/benchmark.py | 138 ++ tools/train_net.py | 155 ++ tools/visualize_coco_results.py | 88 ++ tools/visualize_data.py | 99 ++ 343 files changed, 38107 insertions(+) create mode 100644 .clang-format create mode 100644 .flake8 create mode 100644 .github/Detectron2-Logo-Horz.svg create mode 100644 .github/ISSUE_TEMPLATE/feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/questions-help-support.md create mode 100644 .github/ISSUE_TEMPLATE/unexpected-problems-bugs.md create mode 100644 .github/pull_request_template.md create mode 100644 .gitignore create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 GETTING_STARTED.md create mode 100644 INSTALL.md create mode 100644 LICENSE create mode 100644 MODEL_ZOO.md create mode 100644 README.md create mode 100644 configs/Base-RCNN-C4.yaml create mode 100644 configs/Base-RCNN-DilatedC5.yaml create mode 100644 configs/Base-RCNN-FPN.yaml create mode 100644 configs/Base-RetinaNet.yaml create mode 100644 configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml create mode 100644 configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml create mode 100644 configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml create mode 100644 configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml create mode 100644 configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml create mode 100644 configs/COCO-Detection/rpn_R_50_C4_1x.yaml create mode 100644 configs/COCO-Detection/rpn_R_50_FPN_1x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml create mode 100644 configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml create mode 100644 configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml create mode 100644 configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml create mode 100644 configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml create mode 100644 configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml create mode 100644 configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml create mode 100644 configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml create mode 100644 configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml create mode 100644 configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml create mode 100644 configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml create mode 100644 configs/Cityscapes/mask_rcnn_R_50_FPN.yaml create mode 100644 configs/Detectron1-Comparisons/README.md create mode 100644 configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml create mode 100644 configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml create mode 100644 configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml create mode 100644 configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml create mode 100644 configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml create mode 100644 configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml create mode 100644 configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml create mode 100644 configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml create mode 100644 configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml create mode 100644 configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml create mode 100644 configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml create mode 100644 configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml create mode 100644 configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml create mode 100644 configs/dcn/faster_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml create mode 100644 configs/dcn/faster_rcnn_R_50_FPN_1x_dconv_c5.yaml create mode 100644 configs/dcn/faster_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml create mode 100644 configs/dcn/faster_rcnn_R_50_FPN_1x_mdconv_c5.yaml create mode 100644 configs/dcn/mask_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml create mode 100644 configs/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml create mode 100644 configs/quick_schedules/README.md create mode 100644 configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml create mode 100644 configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml create mode 100644 configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml create mode 100644 configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml create mode 100644 configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml create mode 100644 configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml create mode 100644 configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml create mode 100644 configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml create mode 100644 configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml create mode 100644 configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml create mode 100644 configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml create mode 100644 configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml create mode 100644 configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml create mode 100644 configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml create mode 100644 configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml create mode 100644 configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml create mode 100644 configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml create mode 100644 configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml create mode 100644 configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml create mode 100644 configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml create mode 100644 configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml create mode 100644 configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml create mode 100644 configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml create mode 100644 configs/semantic_R_101_FPN_1x.yaml create mode 100644 configs/semantic_R_50_FPN_1x.yaml create mode 100644 datasets/README.md create mode 100755 datasets/prepare_for_tests.sh create mode 100644 datasets/prepare_panoptic_fpn.py create mode 100644 demo/README.md create mode 100644 demo/demo.py create mode 100644 demo/predictor.py create mode 100644 detectron2/__init__.py create mode 100644 detectron2/checkpoint/__init__.py create mode 100644 detectron2/checkpoint/c2_model_loading.py create mode 100644 detectron2/checkpoint/detection_checkpoint.py create mode 100644 detectron2/checkpoint/model_zoo.py create mode 100644 detectron2/config/__init__.py create mode 100644 detectron2/config/compat.py create mode 100644 detectron2/config/config.py create mode 100644 detectron2/config/defaults.py create mode 100644 detectron2/data/__init__.py create mode 100644 detectron2/data/build.py create mode 100644 detectron2/data/catalog.py create mode 100644 detectron2/data/common.py create mode 100644 detectron2/data/dataset_mapper.py create mode 100644 detectron2/data/datasets/README.md create mode 100644 detectron2/data/datasets/__init__.py create mode 100644 detectron2/data/datasets/builtin.py create mode 100644 detectron2/data/datasets/builtin_meta.py create mode 100644 detectron2/data/datasets/cityscapes.py create mode 100644 detectron2/data/datasets/coco.py create mode 100644 detectron2/data/datasets/lvis.py create mode 100644 detectron2/data/datasets/lvis_v0_5_categories.py create mode 100644 detectron2/data/datasets/pascal_voc.py create mode 100644 detectron2/data/datasets/register_coco.py create mode 100644 detectron2/data/detection_utils.py create mode 100644 detectron2/data/samplers/__init__.py create mode 100644 detectron2/data/samplers/distributed_sampler.py create mode 100644 detectron2/data/samplers/grouped_batch_sampler.py create mode 100644 detectron2/data/transforms/__init__.py create mode 100644 detectron2/data/transforms/transform.py create mode 100644 detectron2/data/transforms/transform_gen.py create mode 100644 detectron2/engine/__init__.py create mode 100644 detectron2/engine/defaults.py create mode 100644 detectron2/engine/hooks.py create mode 100644 detectron2/engine/launch.py create mode 100644 detectron2/engine/train_loop.py create mode 100644 detectron2/evaluation/__init__.py create mode 100644 detectron2/evaluation/cityscapes_evaluation.py create mode 100644 detectron2/evaluation/coco_evaluation.py create mode 100644 detectron2/evaluation/evaluator.py create mode 100644 detectron2/evaluation/lvis_evaluation.py create mode 100644 detectron2/evaluation/panoptic_evaluation.py create mode 100644 detectron2/evaluation/pascal_voc_evaluation.py create mode 100644 detectron2/evaluation/sem_seg_evaluation.py create mode 100644 detectron2/evaluation/testing.py create mode 100644 detectron2/layers/__init__.py create mode 100644 detectron2/layers/batch_norm.py create mode 100644 detectron2/layers/csrc/README.md create mode 100644 detectron2/layers/csrc/ROIAlign/ROIAlign.h create mode 100644 detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp create mode 100644 detectron2/layers/csrc/ROIAlign/ROIAlign_cuda.cu create mode 100644 detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h create mode 100644 detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp create mode 100644 detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu create mode 100644 detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h create mode 100644 detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp create mode 100644 detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu create mode 100644 detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h create mode 100644 detectron2/layers/csrc/deformable/deform_conv.h create mode 100644 detectron2/layers/csrc/deformable/deform_conv_cuda.cu create mode 100644 detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu create mode 100644 detectron2/layers/csrc/nms_rotated/nms_rotated.h create mode 100644 detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp create mode 100644 detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu create mode 100644 detectron2/layers/csrc/vision.cpp create mode 100644 detectron2/layers/deform_conv.py create mode 100644 detectron2/layers/mask_ops.py create mode 100644 detectron2/layers/nms.py create mode 100644 detectron2/layers/roi_align.py create mode 100644 detectron2/layers/roi_align_rotated.py create mode 100644 detectron2/layers/rotated_boxes.py create mode 100644 detectron2/layers/shape_spec.py create mode 100644 detectron2/layers/wrappers.py create mode 100644 detectron2/modeling/__init__.py create mode 100644 detectron2/modeling/anchor_generator.py create mode 100644 detectron2/modeling/backbone/__init__.py create mode 100644 detectron2/modeling/backbone/backbone.py create mode 100644 detectron2/modeling/backbone/build.py create mode 100644 detectron2/modeling/backbone/fpn.py create mode 100644 detectron2/modeling/backbone/resnet.py create mode 100644 detectron2/modeling/box_regression.py create mode 100644 detectron2/modeling/matcher.py create mode 100644 detectron2/modeling/meta_arch/__init__.py create mode 100644 detectron2/modeling/meta_arch/build.py create mode 100644 detectron2/modeling/meta_arch/panoptic_fpn.py create mode 100644 detectron2/modeling/meta_arch/rcnn.py create mode 100644 detectron2/modeling/meta_arch/retinanet.py create mode 100644 detectron2/modeling/meta_arch/semantic_seg.py create mode 100644 detectron2/modeling/poolers.py create mode 100644 detectron2/modeling/postprocessing.py create mode 100644 detectron2/modeling/proposal_generator/__init__.py create mode 100644 detectron2/modeling/proposal_generator/build.py create mode 100644 detectron2/modeling/proposal_generator/proposal_utils.py create mode 100644 detectron2/modeling/proposal_generator/rpn.py create mode 100644 detectron2/modeling/proposal_generator/rpn_outputs.py create mode 100644 detectron2/modeling/proposal_generator/rrpn.py create mode 100644 detectron2/modeling/proposal_generator/rrpn_outputs.py create mode 100644 detectron2/modeling/roi_heads/__init__.py create mode 100644 detectron2/modeling/roi_heads/box_head.py create mode 100644 detectron2/modeling/roi_heads/cascade_rcnn.py create mode 100644 detectron2/modeling/roi_heads/fast_rcnn.py create mode 100644 detectron2/modeling/roi_heads/keypoint_head.py create mode 100644 detectron2/modeling/roi_heads/mask_head.py create mode 100644 detectron2/modeling/roi_heads/roi_heads.py create mode 100644 detectron2/modeling/sampling.py create mode 100644 detectron2/modeling/test_time_augmentation.py create mode 100644 detectron2/solver/__init__.py create mode 100644 detectron2/solver/build.py create mode 100644 detectron2/solver/lr_scheduler.py create mode 100644 detectron2/structures/__init__.py create mode 100644 detectron2/structures/boxes.py create mode 100644 detectron2/structures/image_list.py create mode 100644 detectron2/structures/instances.py create mode 100644 detectron2/structures/keypoints.py create mode 100644 detectron2/structures/masks.py create mode 100644 detectron2/structures/rotated_boxes.py create mode 100644 detectron2/utils/README.md create mode 100644 detectron2/utils/__init__.py create mode 100644 detectron2/utils/collect_env.py create mode 100644 detectron2/utils/colormap.py create mode 100644 detectron2/utils/comm.py create mode 100644 detectron2/utils/env.py create mode 100644 detectron2/utils/events.py create mode 100644 detectron2/utils/logger.py create mode 100644 detectron2/utils/registry.py create mode 100644 detectron2/utils/serialize.py create mode 100644 detectron2/utils/video_visualizer.py create mode 100644 detectron2/utils/visualizer.py create mode 100644 dev/README.md create mode 100755 dev/linter.sh create mode 100755 dev/parse_results.sh create mode 100755 dev/run_inference_tests.sh create mode 100755 dev/run_instant_tests.sh create mode 100644 docs/.gitignore create mode 100644 docs/Makefile create mode 100644 docs/README.md create mode 100644 docs/conf.py create mode 100644 docs/index.rst create mode 100644 docs/modules/checkpoint.rst create mode 100644 docs/modules/config.rst create mode 100644 docs/modules/data.datasets.rst create mode 100644 docs/modules/data.rst create mode 100644 docs/modules/data.samplers.rst create mode 100644 docs/modules/data.transforms.rst create mode 100644 docs/modules/engine.rst create mode 100644 docs/modules/evaluation.rst create mode 100644 docs/modules/index.rst create mode 100644 docs/modules/layers.rst create mode 100644 docs/modules/modeling.rst create mode 100644 docs/modules/solver.rst create mode 100644 docs/modules/structures.rst create mode 100644 docs/modules/utils.rst create mode 100644 docs/notes/benchmarks.md create mode 100644 docs/notes/changelog.md create mode 100644 docs/notes/compatibility.md create mode 100644 docs/notes/index.rst create mode 100644 docs/requirements.txt create mode 100644 docs/tutorials/configs.md create mode 100644 docs/tutorials/data_loading.md create mode 100644 docs/tutorials/datasets.md create mode 100644 docs/tutorials/extend.md create mode 120000 docs/tutorials/getting_started.md create mode 100644 docs/tutorials/index.rst create mode 120000 docs/tutorials/install.md create mode 100644 docs/tutorials/models.md create mode 100644 docs/tutorials/training.md create mode 100644 projects/DensePose/README.md create mode 100644 projects/DensePose/apply_net.py create mode 100644 projects/DensePose/configs/Base-DensePose-RCNN.yaml create mode 100644 projects/DensePose/configs/densepose_R_50_FPN_s1x.yaml create mode 100644 projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_inference_acc_test.yaml create mode 100644 projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_instant_test.yaml create mode 100644 projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_training_acc_test.yaml create mode 100644 projects/DensePose/densepose/__init__.py create mode 100644 projects/DensePose/densepose/config.py create mode 100644 projects/DensePose/densepose/dataset.py create mode 100644 projects/DensePose/densepose/dataset_mapper.py create mode 100644 projects/DensePose/densepose/densepose_coco_evaluation.py create mode 100644 projects/DensePose/densepose/densepose_head.py create mode 100644 projects/DensePose/densepose/evaluator.py create mode 100644 projects/DensePose/densepose/roi_head.py create mode 100644 projects/DensePose/densepose/structures.py create mode 100644 projects/DensePose/densepose/utils/dbhelper.py create mode 100644 projects/DensePose/densepose/utils/logger.py create mode 100644 projects/DensePose/densepose/vis/base.py create mode 100644 projects/DensePose/densepose/vis/bounding_box.py create mode 100644 projects/DensePose/densepose/vis/densepose.py create mode 100644 projects/DensePose/densepose/vis/extractor.py create mode 100644 projects/DensePose/doc/TOOL_APPLY_NET.md create mode 100644 projects/DensePose/doc/TOOL_QUERY_DB.md create mode 100644 projects/DensePose/doc/images/res_bbox_dp_contour.jpg create mode 100644 projects/DensePose/doc/images/res_bbox_dp_segm.jpg create mode 100644 projects/DensePose/doc/images/res_bbox_dp_u.jpg create mode 100644 projects/DensePose/doc/images/res_bbox_dp_v.jpg create mode 100644 projects/DensePose/doc/images/vis_bbox_dp_i.jpg create mode 100644 projects/DensePose/doc/images/vis_bbox_dp_pts.jpg create mode 100644 projects/DensePose/doc/images/vis_bbox_dp_segm.jpg create mode 100644 projects/DensePose/doc/images/vis_bbox_dp_u.jpg create mode 100644 projects/DensePose/doc/images/vis_bbox_dp_v.jpg create mode 100644 projects/DensePose/query_db.py create mode 100644 projects/DensePose/train_net.py create mode 100644 projects/README.md create mode 100644 projects/TridentNet/README.md create mode 100644 projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml create mode 100644 projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml create mode 100644 projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml create mode 100644 projects/TridentNet/train_net.py create mode 100644 projects/TridentNet/tridentnet/__init__.py create mode 100644 projects/TridentNet/tridentnet/config.py create mode 100644 projects/TridentNet/tridentnet/trident_backbone.py create mode 100644 projects/TridentNet/tridentnet/trident_conv.py create mode 100644 projects/TridentNet/tridentnet/trident_rcnn.py create mode 100644 projects/TridentNet/tridentnet/trident_rpn.py create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 tests/__init__.py create mode 100644 tests/test_anchor_generator.py create mode 100644 tests/test_box2box_transform.py create mode 100644 tests/test_boxes.py create mode 100644 tests/test_checkpoint.py create mode 100644 tests/test_config.py create mode 100644 tests/test_data_transform.py create mode 100644 tests/test_fast_rcnn.py create mode 100644 tests/test_mask_ops.py create mode 100644 tests/test_nms_rotated.py create mode 100644 tests/test_roi_align.py create mode 100644 tests/test_roi_align_rotated.py create mode 100644 tests/test_roi_heads.py create mode 100644 tests/test_roi_pooler.py create mode 100644 tests/test_rotated_boxes.py create mode 100644 tests/test_rpn.py create mode 100644 tests/test_visualizer.py create mode 100755 tools/benchmark.py create mode 100755 tools/train_net.py create mode 100755 tools/visualize_coco_results.py create mode 100755 tools/visualize_data.py diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..a757d4fff0 --- /dev/null +++ b/.clang-format @@ -0,0 +1,85 @@ +AccessModifierOffset: -1 +AlignAfterOpenBracket: AlwaysBreak +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: true +AlignOperands: false +AlignTrailingComments: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: false +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ForEachMacros: [ FOR_EACH, FOR_EACH_ENUMERATE, FOR_EACH_KV, FOR_EACH_R, FOR_EACH_RANGE, ] +IncludeCategories: + - Regex: '^<.*\.h(pp)?>' + Priority: 1 + - Regex: '^<.*' + Priority: 2 + - Regex: '.*' + Priority: 3 +IndentCaseLabels: true +IndentWidth: 2 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: false +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Left +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Never diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..74ff132c14 --- /dev/null +++ b/.flake8 @@ -0,0 +1,9 @@ +# This is an example .flake8 config, used when developing *Black* itself. +# Keep in sync with setup.cfg which is used for source packages. + +[flake8] +ignore = W503, E203, E221, C901 +max-line-length = 100 +max-complexity = 18 +select = B,C,E,F,W,T4,B9 +exclude = build,__init__.py diff --git a/.github/Detectron2-Logo-Horz.svg b/.github/Detectron2-Logo-Horz.svg new file mode 100644 index 0000000000..eb2d643ddd --- /dev/null +++ b/.github/Detectron2-Logo-Horz.svg @@ -0,0 +1 @@ +Detectron2-Logo-Horz \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000..5bd0aab250 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,16 @@ +--- +name: "\U0001F680Feature Request" +about: Submit a proposal/request for a new detectron2 feature + +--- + +## 🚀 Feature + + +## Motivation + + + +## Pitch + + diff --git a/.github/ISSUE_TEMPLATE/questions-help-support.md b/.github/ISSUE_TEMPLATE/questions-help-support.md new file mode 100644 index 0000000000..67aa105124 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/questions-help-support.md @@ -0,0 +1,14 @@ +--- +name: "❓Questions/Help/Support" +about: Do you need support? + +--- + +## ❓ Questions and Help + +General questions about detectron2. + +NOTE: + +If you met an unexpected error when using detectron2, +please use the "Unexpected Problems / Bugs" issue category instead. diff --git a/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md b/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md new file mode 100644 index 0000000000..133a94270b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md @@ -0,0 +1,29 @@ +--- +name: "Unexpected Problems / Bugs" +about: Report unexpected problems or bugs in detectron2 + +--- + +If you do not know the root cause of the problem / bug, and wish someone to help you, please +include: + + + +## To Reproduce + +1. what changes you made / what code you wrote +2. what command you run +3. what you observed (full logs are preferred) + +## Expected behavior + +If there are no obvious error in "what you observed" provided above, +please tell us the expected behavior. + +If you expect the model to work better, only in one of the two conditions we will help with it: +(1) You're unable to reproduce the results documented in detectron2 model zoo. +(2) It indicates a detectron2 bug. + +## Environment + +Please paste the output of `python -m detectron2.utils.collect_env`. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..34d04b5011 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,6 @@ +Thanks for your contribution! + +If you're sending a large PR (e.g., >50 lines), +please open an issue first about the feature / bug, and indicate how you want to contribute. + +Before submitting a PR, please run `dev/linter.sh` to lint the code. diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..a82738aded --- /dev/null +++ b/.gitignore @@ -0,0 +1,34 @@ +# output dir +output +instant_test_output + +# compilation and distribution +__pycache__ +_ext +*.pyc +*.so +detectron2.egg-info/ +build/ +dist/ + +# pytorch/python/numpy formats +*.pth +*.pkl +*.npy + +# ipython/jupyter notebooks +*.ipynb +**/.ipynb_checkpoints/ + +# Editor temporaries +*.swn +*.swo +*.swp +*~ + +# Pycharm editor settings +.idea + +# project dirs +/datasets +/models diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..0f7ad8bfc1 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +Facebook has adopted a Code of Conduct that we expect project participants to adhere to. +Please read the [full text](https://code.fb.com/codeofconduct/) +so that you can understand what actions will and will not be tolerated. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..23ba08d62c --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to detectron2 +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from \muaster`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to detectron2, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/GETTING_STARTED.md b/GETTING_STARTED.md new file mode 100644 index 0000000000..4a264f68e9 --- /dev/null +++ b/GETTING_STARTED.md @@ -0,0 +1,62 @@ + +## Getting Started with Detectron2 + +This document provides a brief intro of the usage of builtin command-line tools in detectron2. + +For a tutorial that involves actual coding with the API, +see our [Colab Notebook](TODO) which covers how to run inference with an +existing model, and how to train a builtin model on a custom dataset. + +For more advanced tutorials, refer to our [documentation](https://detectron2.readthedocs.io/tutorials/extend.html). + + +### Inference with Pre-trained Models + +1. Pick a model and its config file from + [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md), + for example, `mask_rcnn_R_50_FPN_3x.yaml`. +2. Run the demo with +``` +python demo/demo.py --config-file configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ + --input input1.jpg input2.jpg \ + --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl +``` +It will run the inference and show visualizations in an OpenCV window. + +To run on webcam, replace `--input files` with `--webcam`. +To run on a video, replace `--input files` with `--video-input video.mp4`. +To save outputs to a directory (for images) or a file (for webcam or video), use `--output`. + + +### Train a Standard Model + +We provide a script in "tools/train_net.py", that is made to train +all the configs provided in detectron2. +You may want to use it as a reference to write your own training script for a new research. + +To train a model with "train_net.py", do: +``` +python tools/train_net.py --num-gpus 8 \ + --config-file configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml +``` + +The configs are made for 8-GPU training. To train on 1 GPU, use: +``` +python tools/train_net.py \ + --config-file configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ + SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 +``` + +(Note that we applied the [linear learning rate scaling rule](https://arxiv.org/abs/1706.02677) +when changing the batch size.) + +For more options, see `python tools/train_net.py -h`. + +### Use Detectron2 in Your Code + +See our [Colab Notebook](TODO) to how to use detectron2 APIs to: +1. run inference with an existing model +2. train a builtin model on a custom dataset + +See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/master/projects) +for more ways to build your project on detectron2. diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000000..7233ecbef8 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,53 @@ +## Installation + +Our [Colab Notebook](TODO) also has step-by-step instructions that install detectron2. + +### Requirements: +- Python >= 3.6 +- PyTorch 1.3 +- [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation. + You can install them together at [pytorch.org](http://pytorch.org) to make sure of this. +- OpenCV, needed by demo and visualization +- [fvcore](https://github.com/facebookresearch/fvcore/): `pip install 'git+https://github.com/facebookresearch/fvcore'` +- pycocotools: `pip install cython; pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'` +- GCC >= 4.9 + + +### Build detectron2 + +After having the above dependencies, run: +``` +git clone git@github.com:facebookresearch/detectron2.git +cd detectron2 +python setup.py build develop + +# or if you are on macOS +# MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py build develop + +# or, as an alternative to `setup.py`, do +# pip install . +``` + +### Common Installation Issues + ++ Undefined torch/aten symbols, or segmentation fault immediately when running the library. + This may mean one of the two: + + * detectron2 or torchvision is not compiled with the version of PyTorch you're running. + + If you use a pre-built torchvision, uninstall torchvision & pytorch, and reinstall them + following [pytorch.org](http://pytorch.org). + If you manually build detectron2 or torchvision, remove the files you built (`build/`, `**/*.so`) + and rebuild them. + + * detectron2 or torchvision is not compiled using gcc >= 4.9. + + You'll see a warning message during compilation in this case. Please remove the files you build, + and rebuild them. + Technically, you need the identical compiler that's used to build pytorch to guarantee + compatibility. But in practice, gcc >= 4.9 should work OK. + ++ Undefined cuda symbols. The version of NVCC you use to build detectron2 or torchvision does + not match the version of cuda you are running with. + This happens sometimes when using anaconda. + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..5a90478a33 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2019, Facebook, Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/MODEL_ZOO.md b/MODEL_ZOO.md new file mode 100644 index 0000000000..8c821147e8 --- /dev/null +++ b/MODEL_ZOO.md @@ -0,0 +1,807 @@ +# Detectron2 Model Zoo and Baselines + +## Introduction + +This file documents a large collection of baselines trained +with detectron2 in Sep-Oct, 2019. +The corresponding configurations for all models can be found under the `configs/` directory. +Unless otherwise noted, the following settings are used for all runs: + +#### Common Settings +* All models were trained on [Big Basin](https://engineering.fb.com/data-center-engineering/introducing-big-basin-our-next-generation-ai-hardware/) + servers with 8 NVIDIA V100 GPUs, with data-parallel sync SGD and a total minibatch size of 16 images. +* All models were trained with CUDA 9.2, cuDNN 7.4.2 or 7.6.3 (the difference on performance is found to be negligible). +* The default settings are not directly comparable with Detectron. + For example, our default training data augmentation uses scale jittering in addition to horizontal flipping. + For configs that are closer to Detectron's settings, see + [Detectron1-Comparisons](configs/Detectron1-Comparisons/). +* No test-time augmentation is used for inference. +* Inference time is measured with batch size 1. It contains the time taken to postprocess results for evaluation. Therefore it does not accurately + reflect time-to-results. +* The *model id* column is provided for ease of reference. +* To check downloaded file integrity: any model on this page contains its md5 prefix in its file name. +* All COCO models were trained on `train2017` and evaluated on `val2017`. +* For Faster/Mask R-CNN, we provide baselines based on __3 different backbone combinations__: + * __FPN__: Use a ResNet+FPN backbone with standard conv and FC heads for mask and box prediction, + respectively. It obtains the best + speed/accuracy tradeoff, but the other two are still useful for research. + * __C4__: Use a ResNet conv4 backbone with conv5 head. The original baseline in the Faster R-CNN paper. + * __DC5__ (Dilated-C5): Use a ResNet conv5 backbone with dilations in conv5, and standard conv and FC heads + for mask and box prediction, respectively. + This is used by the Deformable ConvNet paper. +* Most models are trained with the 3x schedule (~37 COCO epochs). + Although 1x models are heavily under-trained, we provide some ResNet-50 models with the 1x (~12 COCO epochs) + training schedule for comparison when doing quick research iteration. + +#### ImageNet Pretrained Models + +We provide backbone models pretrained on ImageNet-1k dataset. +These models are __different__ from those provided in Detectron: we do not fuse BatchNorm into an affine layer. +* [R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl): converted copy of MSRA's original ResNet-50 model +* [R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl): converted copy of MSRA's original ResNet-101 model +* [X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl): ResNeXt-101-32x8d model trained with Caffe2 at FB + +Pretrained models in Detectron's format can still be used. For example: +* [X-152-32x8d-IN5k.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl): + ResNeXt-152-32x8d model trained on ImageNet-5k with Caffe2 at FB (see ResNeXt paper for details on ImageNet-5k). +* [R-50-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47261647/R-50-GN.pkl): + ResNet-50 with Group Normalization. +* [R-101-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47592356/R-101-GN.pkl): + ResNet-101 with Group Normalization. + +#### License + +All models available for download through this document are licensed under the +[Creative Commons Attribution-ShareAlike 3.0 license](https://creativecommons.org/licenses/by-sa/3.0/). + +### COCO Object Detection Baselines + +#### Faster R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
model iddownload
R50-C41x0.5930.1104.835.7137257644model | metrics
R50-DC51x0.3800.0895.037.3137847829model | metrics
R50-FPN1x0.2100.0603.037.9137257794model | metrics
R50-C43x0.5890.1084.838.4137849393model | metrics
R50-DC53x0.3780.0955.039.0137849425model | metrics
R50-FPN3x0.2090.0583.040.2137849458model | metrics
R101-C43x0.6560.1375.941.1138204752model | metrics
R101-DC53x0.4520.1036.140.6138204841model | metrics
R101-FPN3x0.2860.0714.142.0137851257model | metrics
X101-FPN3x0.6380.1396.743.0139173657model | metrics
+ +#### RetinaNet: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
model iddownload
R501x0.2000.0823.936.5137593951model | metrics
R503x0.2010.0813.937.9137849486model | metrics
R1013x0.2800.0875.139.9138363263model | metrics
+ +#### RPN & Fast R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
prop.
AR
model iddownload
RPN R50-C41x0.1300.0561.551.6137258005model | metrics
RPN R50-FPN1x0.1860.0532.758.0137258492model | metrics
Fast R-CNN R50-FPN1x0.1400.0562.637.8137635226model | metrics
+ +### COCO Instance Segmentation Baselines with Mask R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
R50-C41x0.6210.1405.236.832.2137259246model | metrics
R50-DC51x0.4710.1266.538.334.2137260150model | metrics
R50-FPN1x0.2610.0873.438.635.2137260431model | metrics
R50-C43x0.6220.1375.239.834.4137849525model | metrics
R50-DC53x0.4700.1116.540.035.9137849551model | metrics
R50-FPN3x0.2610.0793.441.037.2137849600model | metrics
R101-C43x0.6910.1636.342.636.7138363239model | metrics
R101-DC53x0.5450.1297.641.937.3138363294model | metrics
R101-FPN3x0.3400.0924.642.938.6138205316model | metrics
X101-FPN3x0.6900.1557.244.339.5139653917model | metrics
+ +### COCO Person Keypoint Detection Baselines with Keypoint R-CNN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
kp.
AP
model iddownload
R50-FPN1x0.3150.1025.053.664.0137261548model | metrics
R50-FPN3x0.3160.0955.055.465.5137849621model | metrics
R101-FPN3x0.3900.1066.156.466.1138363331model | metrics
X101-FPN3x0.7380.1688.757.366.0139686956model | metrics
+ +### COCO Panoptic Segmentation Baselines with Panoptic FPN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
PQmodel iddownload
R50-FPN1x0.3040.1294.837.634.739.4139514544model | metrics
R50-FPN3x0.3020.1274.840.036.541.5139514569model | metrics
R101-FPN3x0.3920.1376.042.438.543.0139514519model | metrics
+ + +### LVIS Instance Segmentation Baselines with Mask R-CNN + +Mask R-CNN baselines on the [LVIS dataset](https://lvisdataset.org), v0.5. +These baselines are described in Table 3(c) of the [LVIS paper](https://arxiv.org/abs/1908.03195). + +NOTE: the 1x schedule here has the same amount of __iterations__ as the COCO baselines. +They are roughly 24 epochs of LVISv0.5 data. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
R50-FPN1x0.3190.3696.624.024.4134714017model | metrics
R101-FPN1x0.3950.3857.625.826.1134807205model | metrics
X101-FPN1x1.3300.46110.027.327.9135397361model | metrics
+ + +### Cityscapes & Pascal VOC Baselines + +Simple baselines for +* Mask R-CNN on Cityscapes instance segmentation (trained on fine annotations only) +* Faster R-CNN on PASCAL VOC object detection (trained on VOC 2007 train+val + VOC 2012 train+val, tested on VOC 2007 using 11-point interpolated AP) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Nametrain
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
box
AP50
mask
AP
model iddownload
R50-FPN, Cityscapes0.2400.3974.436.5142423278model | metrics
R50-C4, VOC0.5370.0964.851.980.3142202221model | metrics
+ + +### Other Settings + +Ablations for Deformable Conv and Cascade R-CNN: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
Baseline R50-FPN1x0.2610.0873.438.635.2137260431model | metrics
Deformable Conv1x0.3420.0843.541.537.5138602867model | metrics
Cascade R-CNN1x0.3170.0904.042.136.4138602847model | metrics
+ + +Ablations for GroupNorm: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
model iddownload
Baseline R50-FPN3x0.2610.0793.441.037.2137849600model | metrics
GroupNorm3x0.3560.1027.342.638.6138602888model | metrics
GroupNorm (scratch)3x0.4000.1069.839.936.6138602908model | metrics
+ +A few very large models trained for a long time, for demo purposes. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Nameinference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
PQmodel iddownload
Panoptic FPN R1010.17211.447.441.346.1139797668model | metrics
Mask R-CNN X1520.27815.149.343.218131413model | metrics
above + test-time aug.51.445.5
diff --git a/README.md b/README.md new file mode 100644 index 0000000000..d95e65d240 --- /dev/null +++ b/README.md @@ -0,0 +1,46 @@ + + +Detectron2 is Facebook AI Research's next generation software system +that implements state-of-the-art object detection algorithms. +It is a ground-up rewrite of the previous version, +[Detectron](https://github.com/facebookresearch/Detectron/), +and it originates from [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/). +It is written in Python and powered by the [PyTorch](https://pytorch.org) deep +learning framework. + +
+ +
+ + +## Installation + +See [INSTALL.md](INSTALL.md). + +## Quick Start + +See [GETTING_STARTED.md](GETTING_STARTED.md), or our [Colab Notebook](TODO). + + +## Model Zoo and Baselines + +We provide a large set of baseline results and trained models available for download in the [Detectron2 Model Zoo](MODEL_ZOO.md). + + +## License + +Detectron2 is released under the [Apache 2.0 license](LICENSE). + +## Citing Detectron + +If you use Detectron2 in your research or wish to refer to the baseline results published in the [Model Zoo](MODEL_ZOO.md), please use the following BibTeX entry. + +``` +@misc{wu2019detectron2, + author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and + Wan-Yen Lo and Ross Girshick}, + title = {Detectron2}, + howpublished = {\url{https://github.com/facebookresearch/detectron2}}, + year = {2019} +} +``` diff --git a/configs/Base-RCNN-C4.yaml b/configs/Base-RCNN-C4.yaml new file mode 100644 index 0000000000..f44bc6a265 --- /dev/null +++ b/configs/Base-RCNN-C4.yaml @@ -0,0 +1,18 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + RPN: + PRE_NMS_TOPK_TEST: 6000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "Res5ROIHeads" +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) + diff --git a/configs/Base-RCNN-DilatedC5.yaml b/configs/Base-RCNN-DilatedC5.yaml new file mode 100644 index 0000000000..458a7785e0 --- /dev/null +++ b/configs/Base-RCNN-DilatedC5.yaml @@ -0,0 +1,30 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + RESNETS: + OUT_FEATURES: ["res5"] + RES5_DILATION: 2 + RPN: + IN_FEATURES: ["res5"] + PRE_NMS_TOPK_TEST: 6000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["res5"] + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/configs/Base-RCNN-FPN.yaml b/configs/Base-RCNN-FPN.yaml new file mode 100644 index 0000000000..f61493dab0 --- /dev/null +++ b/configs/Base-RCNN-FPN.yaml @@ -0,0 +1,41 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/configs/Base-RetinaNet.yaml b/configs/Base-RetinaNet.yaml new file mode 100644 index 0000000000..3f47c64c3d --- /dev/null +++ b/configs/Base-RetinaNet.yaml @@ -0,0 +1,23 @@ +MODEL: + META_ARCHITECTURE: "RetinaNet" + BACKBONE: + NAME: "build_retinanet_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"] + FPN: + IN_FEATURES: ["res3", "res4", "res5"] + RETINANET: + IOU_THRESHOLDS: [0.4, 0.5] + IOU_LABELS: [0, -1, 1] +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml b/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..773ac10e87 --- /dev/null +++ b/configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,17 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + LOAD_PROPOSALS: True + RESNETS: + DEPTH: 50 + PROPOSAL_GENERATOR: + NAME: "PrecomputedProposals" +DATASETS: + TRAIN: ("coco_2017_train",) + PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_train_box_proposals_21bc3a.pkl", ) + TEST: ("coco_2017_val",) + PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) +DATALOADER: + # proposals are part of the dataset_dicts, and take a lot of RAM + NUM_WORKERS: 2 diff --git a/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml b/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml new file mode 100644 index 0000000000..db142cd671 --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml b/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml new file mode 100644 index 0000000000..bceb6b3436 --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml b/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000000..57a098f53e --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml b/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml new file mode 100644 index 0000000000..f96130105c --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml b/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml new file mode 100644 index 0000000000..bc51bce390 --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml b/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml new file mode 100644 index 0000000000..0fe96f57fe --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml b/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml new file mode 100644 index 0000000000..33fadeb87d --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml b/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..3262019a12 --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml b/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000000..41395182bf --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml b/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000000..9c9b5ab771 --- /dev/null +++ b/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,13 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: False + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml b/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml new file mode 100644 index 0000000000..4abb1b9a54 --- /dev/null +++ b/configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml b/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..4a24ce3a9a --- /dev/null +++ b/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml b/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml new file mode 100644 index 0000000000..3b5412d4a7 --- /dev/null +++ b/configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Detection/rpn_R_50_C4_1x.yaml b/configs/COCO-Detection/rpn_R_50_C4_1x.yaml new file mode 100644 index 0000000000..e04821156b --- /dev/null +++ b/configs/COCO-Detection/rpn_R_50_C4_1x.yaml @@ -0,0 +1,10 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + META_ARCHITECTURE: "ProposalNetwork" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + RPN: + PRE_NMS_TOPK_TEST: 12000 + POST_NMS_TOPK_TEST: 2000 diff --git a/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml b/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..dc9c95203b --- /dev/null +++ b/configs/COCO-Detection/rpn_R_50_FPN_1x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "ProposalNetwork" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + RPN: + POST_NMS_TOPK_TEST: 2000 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml new file mode 100644 index 0000000000..1a94cc45a0 --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml new file mode 100644 index 0000000000..67b70cf4be --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000000..1935a302d2 --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml new file mode 100644 index 0000000000..a9aeb4eac3 --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml new file mode 100644 index 0000000000..38ed867d89 --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml new file mode 100644 index 0000000000..b13eefab2a --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml new file mode 100644 index 0000000000..d401016358 --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-DilatedC5.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..d50fb866ca --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000000..be7d06b8e0 --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml b/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000000..d14c63f743 --- /dev/null +++ b/configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,13 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml b/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml new file mode 100644 index 0000000000..4e03944a42 --- /dev/null +++ b/configs/COCO-Keypoints/Base-Keypoint-RCNN-FPN.yaml @@ -0,0 +1,15 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + KEYPOINT_ON: True + ROI_HEADS: + NUM_CLASSES: 1 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 0.5 # Keypoint AP degrades (though box AP improves) when using plain L1 loss + RPN: + # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. + # 1000 proposals per-image is found to hurt box AP. + # Therefore we increase it to 1500 per-image. + POST_NMS_TOPK_TRAIN: 1500 +DATASETS: + TRAIN: ("keypoints_coco_2017_train",) + TEST: ("keypoints_coco_2017_val",) diff --git a/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml b/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml new file mode 100644 index 0000000000..9309535c57 --- /dev/null +++ b/configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml b/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..7bf85cf745 --- /dev/null +++ b/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml b/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml new file mode 100644 index 0000000000..a07f243f65 --- /dev/null +++ b/configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml b/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml new file mode 100644 index 0000000000..d4bfa20a98 --- /dev/null +++ b/configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml b/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml new file mode 100644 index 0000000000..755c12018c --- /dev/null +++ b/configs/COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + MASK_ON: True + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_train_panoptic_separated",) + TEST: ("coco_2017_val_panoptic_separated",) diff --git a/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml b/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml new file mode 100644 index 0000000000..0e01f6fb31 --- /dev/null +++ b/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml b/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml new file mode 100644 index 0000000000..6afa2c1cc9 --- /dev/null +++ b/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml @@ -0,0 +1,5 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 diff --git a/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml b/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml new file mode 100644 index 0000000000..b956b3f673 --- /dev/null +++ b/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml @@ -0,0 +1,8 @@ +_BASE_: "Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml b/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml new file mode 100644 index 0000000000..d6e7014526 --- /dev/null +++ b/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml @@ -0,0 +1,25 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + # WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + # For better, more stable performance initialize from COCO + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" + MASK_ON: True + ROI_HEADS: + NUM_CLASSES: 8 +# This is the setting used in Mask R-CNN paper, Appendix A +INPUT: + MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 2048 + MAX_SIZE_TEST: 2048 +DATASETS: + TRAIN: ("cityscapes_fine_instance_seg_train",) + TEST: ("cityscapes_fine_instance_seg_val",) +SOLVER: + BASE_LR: 0.01 + STEPS: (18000,) + MAX_ITER: 24000 + IMS_PER_BATCH: 8 +TEST: + EVAL_PERIOD: 8000 diff --git a/configs/Detectron1-Comparisons/README.md b/configs/Detectron1-Comparisons/README.md new file mode 100644 index 0000000000..18d13e9574 --- /dev/null +++ b/configs/Detectron1-Comparisons/README.md @@ -0,0 +1,77 @@ + +Detectron2's default settings and a few implementation details are different from Detectron. + +The differences in implementation details are shared in +[Compatibility with Other Libraries](../../docs/notes/compatibility.md). + +The differences in default config includes: +* Use scale augmentation during training. +* Use L1 loss instead of smooth L1 loss. +* Use `POOLER_SAMPLING_RATIO=0` instead of 2. +* Use `ROIAlignV2`. + +In this directory, we provide a few configs that mimic Detectron's behavior as close as possible. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Namelr
sched
train
time
(s/iter)
inference
time
(s/im)
train
mem
(GB)
box
AP
mask
AP
kp.
AP
model iddownload
Faster R-CNN1x0.2190.0583.136.9137781054model | metrics
Keypoint R-CNN1x0.3130.1035.053.164.2137781195model | metrics
Mask R-CNN1x0.2730.0843.437.834.9137781281model | metrics
+ +## Comparisons: + +* Faster R-CNN: Detectron's AP is 36.7, similar to ours. +* Keypoint R-CNN: Detectron's AP is box 53.6, keypoint 64.2. Fixing a Detectron's + [bug](https://github.com/facebookresearch/Detectron/issues/459) lead to a drop in box AP, and can be + compensated back by some parameter tuning. +* Mask R-CNN: Detectron's AP is box 37.7, mask 33.9. We're 1 AP better in mask AP, due to more correct implementation. diff --git a/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml b/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml new file mode 100644 index 0000000000..6ce77f137f --- /dev/null +++ b/configs/Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x.yaml @@ -0,0 +1,17 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + RPN: + SMOOTH_L1_BETA: 0.1111 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml b/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..aacf868ba5 --- /dev/null +++ b/configs/Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,27 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + RPN: + SMOOTH_L1_BETA: 0.1111 + # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2 + # 1000 proposals per-image is found to hurt box AP. + # Therefore we increase it to 1500 per-image. + POST_NMS_TOPK_TRAIN: 1500 +DATASETS: + TRAIN: ("keypoints_coco_2017_train",) + TEST: ("keypoints_coco_2017_val",) diff --git a/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml b/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml new file mode 100644 index 0000000000..4ea86a8d8e --- /dev/null +++ b/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + # Detectron1 uses smooth L1 loss with some magic beta values. + # The defaults are changed to L1 loss in Detectron2. + RPN: + SMOOTH_L1_BETA: 0.1111 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + ROI_MASK_HEAD: + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml b/configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml new file mode 100644 index 0000000000..f0c3a1bbc0 --- /dev/null +++ b/configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: True + RESNETS: + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml b/configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..64b4caa4ef --- /dev/null +++ b/configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml b/configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml new file mode 100644 index 0000000000..c8b822c6c0 --- /dev/null +++ b/configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml @@ -0,0 +1,23 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/FAIR/X-101-32x8d.pkl" + PIXEL_STD: [57.375, 57.120, 58.395] + MASK_ON: True + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 101 + ROI_HEADS: + NUM_CLASSES: 1230 + SCORE_THRESH_TEST: 0.0001 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +DATASETS: + TRAIN: ("lvis_v0.5_train",) + TEST: ("lvis_v0.5_val",) +TEST: + DETECTIONS_PER_IMAGE: 300 # LVIS allows up to 300 +DATALOADER: + SAMPLER_TRAIN: "RepeatFactorTrainingSampler" + REPEAT_THRESHOLD: 0.001 diff --git a/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml b/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..abb33b6189 --- /dev/null +++ b/configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + NAME: CascadeROIHeads + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + RPN: + POST_NMS_TOPK_TRAIN: 2000 diff --git a/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml b/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml new file mode 100644 index 0000000000..fc117f6b5e --- /dev/null +++ b/configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml @@ -0,0 +1,36 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "catalog://ImageNetPretrained/FAIR/X-152-32x8d-IN5k" + RESNETS: + STRIDE_IN_1X1: False # this is a C2 model + NUM_GROUPS: 32 + WIDTH_PER_GROUP: 8 + DEPTH: 152 + DEFORM_ON_PER_STAGE: [False, True, True, True] + ROI_HEADS: + NAME: "CascadeROIHeads" + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "GN" + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + NUM_CONV: 8 + NORM: "GN" + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: + IMS_PER_BATCH: 128 + STEPS: (35000, 45000) + MAX_ITER: 50000 + BASE_LR: 0.16 +INPUT: + MIN_SIZE_TRAIN: (640, 864) + MIN_SIZE_TRAIN_SAMPLING: "range" + MAX_SIZE_TRAIN: 1440 + CROP: + ENABLED: True +TEST: + EVAL_PERIOD: 2500 diff --git a/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml b/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml new file mode 100644 index 0000000000..04ff988d07 --- /dev/null +++ b/configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3,Res4,Res5 + DEFORM_MODULATED: False diff --git a/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml b/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml new file mode 100644 index 0000000000..74d274e5a5 --- /dev/null +++ b/configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml @@ -0,0 +1,21 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-50-GN" + MASK_ON: True + RESNETS: + DEPTH: 50 + NORM: "GN" + STRIDE_IN_1X1: False + FPN: + NORM: "GN" + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_CONV: 4 + NUM_FC: 1 + NORM: "GN" + ROI_MASK_HEAD: + NORM: "GN" +SOLVER: + # 3x schedule + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml b/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml new file mode 100644 index 0000000000..34016cea3c --- /dev/null +++ b/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml @@ -0,0 +1,26 @@ +# A large PanopticFPN for demo purposes. +# Use GN on backbone to support semantic seg. +# Use Cascade + Deform Conv to improve localization. +_BASE_: "../COCO-PanopticSegmentation/Base-Panoptic-FPN.yaml" +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/FAIR/R-101-GN" + RESNETS: + DEPTH: 101 + NORM: "GN" + DEFORM_ON_PER_STAGE: [False, True, True, True] + STRIDE_IN_1X1: False + FPN: + NORM: "GN" + ROI_HEADS: + NAME: CascadeROIHeads + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + NORM: "GN" + RPN: + POST_NMS_TOPK_TRAIN: 2000 +SOLVER: + STEPS: (105000, 125000) + MAX_ITER: 135000 + IMS_PER_BATCH: 32 + BASE_LR: 0.04 diff --git a/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml b/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml new file mode 100644 index 0000000000..c963606c44 --- /dev/null +++ b/configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml @@ -0,0 +1,11 @@ +_BASE_: "mask_rcnn_R_50_FPN_3x_gn.yaml" +# INPUT: + # It makes sense to divide by STD when training from scratch + # But it seems to make no difference on the results and C2's models did't do this. + # So we keep things consistent with C2. + # PIXEL_STD: [57.375, 57.12, 58.395] +MODEL: + WEIGHTS: "" + MASK_ON: True + BACKBONE: + FREEZE_AT: 0 diff --git a/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml b/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml new file mode 100644 index 0000000000..ea2a6baaeb --- /dev/null +++ b/configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + ROI_HEADS: + NUM_CLASSES: 20 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') + TEST: ('voc_2007_test',) +SOLVER: + STEPS: (12000, 16000) + MAX_ITER: 18000 # 17.4 epochs + WARMUP_ITERS: 100 diff --git a/configs/dcn/faster_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml b/configs/dcn/faster_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml new file mode 100644 index 0000000000..2fda6bb5c5 --- /dev/null +++ b/configs/dcn/faster_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3, Res4,Res5 + DEFORM_MODULATED: False diff --git a/configs/dcn/faster_rcnn_R_50_FPN_1x_dconv_c5.yaml b/configs/dcn/faster_rcnn_R_50_FPN_1x_dconv_c5.yaml new file mode 100644 index 0000000000..ba104554cf --- /dev/null +++ b/configs/dcn/faster_rcnn_R_50_FPN_1x_dconv_c5.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, False, False, True] # on Res5 + DEFORM_MODULATED: False diff --git a/configs/dcn/faster_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml b/configs/dcn/faster_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml new file mode 100644 index 0000000000..4922df01be --- /dev/null +++ b/configs/dcn/faster_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3, Res4,Res5 + DEFORM_MODULATED: True diff --git a/configs/dcn/faster_rcnn_R_50_FPN_1x_mdconv_c5.yaml b/configs/dcn/faster_rcnn_R_50_FPN_1x_mdconv_c5.yaml new file mode 100644 index 0000000000..258d8e2990 --- /dev/null +++ b/configs/dcn/faster_rcnn_R_50_FPN_1x_mdconv_c5.yaml @@ -0,0 +1,9 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, False, False, True] # on Res5 + DEFORM_MODULATED: True + diff --git a/configs/dcn/mask_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml b/configs/dcn/mask_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml new file mode 100644 index 0000000000..87fa9013c8 --- /dev/null +++ b/configs/dcn/mask_rcnn_R_50_FPN_1x_mdconv_c3-c5.yaml @@ -0,0 +1,8 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + DEFORM_ON_PER_STAGE: [False, True, True, True] # on Res3, Res4,Res5 + DEFORM_MODULATED: True diff --git a/configs/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml b/configs/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml new file mode 100644 index 0000000000..685589cc03 --- /dev/null +++ b/configs/mask_rcnn_R_50_FPN_1x_cls_agnostic.yaml @@ -0,0 +1,10 @@ +_BASE_: "Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + ROI_BOX_HEAD: + CLS_AGNOSTIC_BBOX_REG: True + ROI_MASK_HEAD: + CLS_AGNOSTIC_MASK: True diff --git a/configs/quick_schedules/README.md b/configs/quick_schedules/README.md new file mode 100644 index 0000000000..569cb5bec2 --- /dev/null +++ b/configs/quick_schedules/README.md @@ -0,0 +1,51 @@ +These are quick configs for performance or accuracy regression tracking purposes. + +## Perf testing configs: + +### Inference + +Reference devgpu configuration: + + - 48 core Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz + - 2x M40 (12GB) + - buck build @mode/dev-nosan + +configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml +``` +# Before https://github.com/fairinternal/detectron2/pull/84 +Total inference time: 0:00:30.808294 (0.6161658811569214 s / img per device, on 2 devices) +# After https://github.com/fairinternal/detectron2/pull/84 +Total inference time: 0:00:36.952044 (0.7390408849716187 s / img per device, on 2 devices) +``` + +configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml +``` +# Before https://github.com/fairinternal/detectron2/pull/84 +Total inference time: 0:00:21.773355 (0.435467095375061 s / img per device, on 2 devices) +# After https://github.com/fairinternal/detectron2/pull/84 +Total inference time: 0:00:28.766723 (0.5753344583511353 s / img per device, on 2 devices) +``` + +### Training + +TODO + +They are equivalent to the standard C4 / FPN models, only with extremely short schedules. + +Metrics to look at: + +``` +INFO: Total training time: 0:3:20.276231 +... +INFO: Total inference time: 0:01:20.276231 +``` + + +## Accuracy testing configs: + +They are simplified versions of standard models, trained and tested on the same +minival dataset, with short schedules. + +The schedule is designed to provide a stable enough mAP within minimal amount of training time. + +Metrics to look at: mAPs. diff --git a/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml b/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000..a2f37e5e2c --- /dev/null +++ b/configs/quick_schedules/fast_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/fast_rcnn_R_50_FPN_1x/137635226/model_final_e5f7ce.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 45.70, 0.02]] diff --git a/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml b/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000..52fc0ec03c --- /dev/null +++ b/configs/quick_schedules/fast_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,15 @@ +_BASE_: "../COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + PROPOSAL_FILES_TRAIN: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) + TEST: ("coco_2017_val_100",) + PROPOSAL_FILES_TEST: ("detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/coco_2017_val_box_proposals_ee0dad.pkl", ) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml b/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000..14cf2aa82a --- /dev/null +++ b/configs/quick_schedules/keypoint_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/model_final_a6e10b.pkl" +DATASETS: + TEST: ("keypoints_coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 52.47, 0.02], ["keypoints", "AP", 67.36, 0.02]] diff --git a/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml b/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000..dc09034bdd --- /dev/null +++ b/configs/quick_schedules/keypoint_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True +DATASETS: + TRAIN: ("keypoints_coco_2017_val_100",) + TEST: ("keypoints_coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml b/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml new file mode 100644 index 0000000000..4b92392f1c --- /dev/null +++ b/configs/quick_schedules/keypoint_rcnn_R_50_FPN_normalized_training_acc_test.yaml @@ -0,0 +1,30 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS: False + LOSS_WEIGHT: 4.0 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss + RPN: + SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss +DATASETS: + TRAIN: ("keypoints_coco_2017_val",) + TEST: ("keypoints_coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + WARMUP_FACTOR: 0.33333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 55.35, 1.0], ["keypoints", "AP", 76.91, 1.0]] diff --git a/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml b/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000000..9bd962878f --- /dev/null +++ b/configs/quick_schedules/keypoint_rcnn_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,28 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + KEYPOINT_ON: True + RESNETS: + DEPTH: 50 + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + NUM_CLASSES: 1 + ROI_KEYPOINT_HEAD: + POOLER_RESOLUTION: 14 + POOLER_SAMPLING_RATIO: 2 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 1.0 # Keypoint AP degrades when using plain L1 loss + RPN: + SMOOTH_L1_BETA: 0.2 # Keypoint AP degrades when using plain L1 loss +DATASETS: + TRAIN: ("keypoints_coco_2017_val",) + TEST: ("keypoints_coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + WARMUP_FACTOR: 0.33333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 53.5, 1.0], ["keypoints", "AP", 72.4, 1.0]] diff --git a/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml b/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml new file mode 100644 index 0000000000..b2d5b7ff87 --- /dev/null +++ b/configs/quick_schedules/mask_rcnn_R_50_C4_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x/137849525/model_final_4ce675.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.37, 0.02], ["segm", "AP", 40.99, 0.02]] diff --git a/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml b/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml new file mode 100644 index 0000000000..6c4f1214ef --- /dev/null +++ b/configs/quick_schedules/mask_rcnn_R_50_C4_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.001 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml b/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml new file mode 100644 index 0000000000..f68dd8f96c --- /dev/null +++ b/configs/quick_schedules/mask_rcnn_R_50_C4_training_acc_test.yaml @@ -0,0 +1,22 @@ +_BASE_: "../Base-RCNN-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val",) + TEST: ("coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (600,) + MAX_SIZE_TRAIN: 1000 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1000 +SOLVER: + IMS_PER_BATCH: 8 # base uses 16 + WARMUP_FACTOR: 0.33333 + WARMUP_ITERS: 100 + STEPS: (11000, 11600) + MAX_ITER: 12000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 41.88, 0.7], ["segm", "AP", 33.79, 0.5]] diff --git a/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml b/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml new file mode 100644 index 0000000000..e3ce6cf922 --- /dev/null +++ b/configs/quick_schedules/mask_rcnn_R_50_DC5_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/model_final_84107b.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.44, 0.02], ["segm", "AP", 42.94, 0.02]] diff --git a/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml b/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000..a41dc53b41 --- /dev/null +++ b/configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,11 @@ +_BASE_: "../COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 47.34, 0.02], ["segm", "AP", 42.67, 0.02]] + # expected results do not use test-time augmentation. TTA results are not verified. + AUG: + ENABLED: True + MIN_SIZES: (400, 500) # to save some time diff --git a/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml b/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000..6dbfcde0bf --- /dev/null +++ b/configs/quick_schedules/mask_rcnn_R_50_FPN_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml b/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000000..4992104a53 --- /dev/null +++ b/configs/quick_schedules/mask_rcnn_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,21 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + BATCH_SIZE_PER_IMAGE: 256 + MASK_ON: True +DATASETS: + TRAIN: ("coco_2017_val",) + TEST: ("coco_2017_val",) +INPUT: + MIN_SIZE_TRAIN: (600,) + MAX_SIZE_TRAIN: 1000 + MIN_SIZE_TEST: 800 + MAX_SIZE_TEST: 1000 +SOLVER: + WARMUP_FACTOR: 0.3333333 + WARMUP_ITERS: 100 + STEPS: (5500, 5800) + MAX_ITER: 6000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 42.8, 0.8], ["segm", "AP", 35.7, 0.8]] diff --git a/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml b/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml new file mode 100644 index 0000000000..70874e3a92 --- /dev/null +++ b/configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/model_final_c10459.pkl" +DATASETS: + TEST: ("coco_2017_val_100_panoptic_separated",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 46.47, 0.02], ["segm", "AP", 43.39, 0.02], ["sem_seg", "mIoU", 42.55, 0.02], ["panoptic_seg", "PQ", 38.99, 0.02]] diff --git a/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml b/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml new file mode 100644 index 0000000000..26f3b140c6 --- /dev/null +++ b/configs/quick_schedules/panoptic_fpn_R_50_instant_test.yaml @@ -0,0 +1,19 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_val_100_panoptic_separated",) + TEST: ("coco_2017_val_100_panoptic_separated",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml b/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml new file mode 100644 index 0000000000..8aeba40f82 --- /dev/null +++ b/configs/quick_schedules/panoptic_fpn_R_50_training_acc_test.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "PanopticFPN" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + RESNETS: + DEPTH: 50 + SEM_SEG_HEAD: + LOSS_WEIGHT: 0.5 +DATASETS: + TRAIN: ("coco_2017_val_panoptic_separated",) + TEST: ("coco_2017_val_panoptic_separated",) +SOLVER: + BASE_LR: 0.01 + WARMUP_FACTOR: 0.001 + WARMUP_ITERS: 500 + STEPS: (5500,) + MAX_ITER: 7000 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 46.80, 1.1], ["segm", "AP", 38.93, 0.7], ["sem_seg", "mIoU", 63.99, 0.9], ["panoptic_seg", "PQ", 48.23, 0.8]] diff --git a/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml b/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000..36b998833b --- /dev/null +++ b/configs/quick_schedules/retinanet_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/retinanet_R_50_FPN_3x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/retinanet_R_50_FPN_3x/137849486/model_final_4cafe0.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 44.36, 0.02]] diff --git a/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml b/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000..8d95c1f614 --- /dev/null +++ b/configs/quick_schedules/retinanet_R_50_FPN_instant_test.yaml @@ -0,0 +1,13 @@ +_BASE_: "../COCO-Detection/retinanet_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml b/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000..c7c3f908a9 --- /dev/null +++ b/configs/quick_schedules/rpn_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,7 @@ +_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://COCO-Detection/rpn_R_50_FPN_1x/137258492/model_final_02ce48.pkl" +DATASETS: + TEST: ("coco_2017_val_100",) +TEST: + EXPECTED_RESULTS: [["box_proposals", "AR@1000", 58.16, 0.02]] diff --git a/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml b/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000..402d432477 --- /dev/null +++ b/configs/quick_schedules/rpn_R_50_FPN_instant_test.yaml @@ -0,0 +1,13 @@ +_BASE_: "../COCO-Detection/rpn_R_50_FPN_1x.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +DATASETS: + TRAIN: ("coco_2017_val_100",) + TEST: ("coco_2017_val_100",) +SOLVER: + STEPS: (30,) + MAX_ITER: 40 + BASE_LR: 0.005 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml b/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000..bca74987d5 --- /dev/null +++ b/configs/quick_schedules/semantic_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,10 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://semantic_R_50_FPN_1x/111802073/model_final_c18079783c55a94968edc28b7101c5f0.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TEST: ("coco_2017_val_100_panoptic_stuffonly",) +TEST: + EXPECTED_RESULTS: [["sem_seg", "mIoU", 39.53, 0.02], ["sem_seg", "mACC", 51.50, 0.02]] diff --git a/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml b/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000..14ab606f21 --- /dev/null +++ b/configs/quick_schedules/semantic_R_50_FPN_instant_test.yaml @@ -0,0 +1,18 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_val_100_panoptic_stuffonly",) + TEST: ("coco_2017_val_100_panoptic_stuffonly",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +SOLVER: + BASE_LR: 0.005 + STEPS: (30,) + MAX_ITER: 40 + IMS_PER_BATCH: 4 +DATALOADER: + NUM_WORKERS: 2 diff --git a/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml b/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000000..1f78d77588 --- /dev/null +++ b/configs/quick_schedules/semantic_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,20 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_val_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) +SOLVER: + BASE_LR: 0.01 + WARMUP_FACTOR: 0.001 + WARMUP_ITERS: 300 + STEPS: (5500,) + MAX_ITER: 7000 +TEST: + EXPECTED_RESULTS: [["sem_seg", "mIoU", 76.51, 1.0], ["sem_seg", "mACC", 83.25, 1.0]] +INPUT: + # no scale augmentation + MIN_SIZE_TRAIN: (800, ) diff --git a/configs/semantic_R_101_FPN_1x.yaml b/configs/semantic_R_101_FPN_1x.yaml new file mode 100644 index 0000000000..ba8eb98119 --- /dev/null +++ b/configs/semantic_R_101_FPN_1x.yaml @@ -0,0 +1,11 @@ +_BASE_: "Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + RESNETS: + DEPTH: 101 +DATASETS: + TRAIN: ("coco_2017_train_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/configs/semantic_R_50_FPN_1x.yaml b/configs/semantic_R_50_FPN_1x.yaml new file mode 100644 index 0000000000..fcc3ba2d74 --- /dev/null +++ b/configs/semantic_R_50_FPN_1x.yaml @@ -0,0 +1,11 @@ +_BASE_: "Base-RCNN-FPN.yaml" +MODEL: + META_ARCHITECTURE: "SemanticSegmentor" + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + RESNETS: + DEPTH: 50 +DATASETS: + TRAIN: ("coco_2017_train_panoptic_stuffonly",) + TEST: ("coco_2017_val_panoptic_stuffonly",) +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/datasets/README.md b/datasets/README.md new file mode 100644 index 0000000000..cde1919164 --- /dev/null +++ b/datasets/README.md @@ -0,0 +1,80 @@ + +For a few datasets that detectron2 natively supports, +the datasets are assumed to exist in a directory called +"datasets/", under the directory where you launch the program. +with the following directory structure: + +## Expected dataset structure for COCO instance/keypoint detection: + +``` +coco/ + annotations/ + instances_{train,val}2017.json + person_keypoints_{train,val}2017.json + {train,val}2017/ + # image files that are mentioned in the corresponding json +``` + +You can use the 2014 version of the dataset as well. + +Some of the builtin tests (`run_*_tests.sh`) uses a tiny version of the COCO dataset, +which you can download with `./prepare_for_tests.sh`. + +## Expected dataset structure for PanopticFPN: + +``` +coco/ + annotations/ + panoptic_{train,val}2017.json + panoptic_{train,val}2017/ + # png annotations +``` + +Install panopticapi by: +``` +pip install git+https://github.com/cocodataset/panopticapi.git +``` +Then, run `./prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations. + +## Expected dataset structure for LVIS instance detection/segmentation: +``` +coco/ + {train,val,test}2017/ +lvis/ + lvis_v0.5_{train,val}.json + lvis_v0.5_image_info_test.json +``` + +Install lvis-api by: +``` +pip install git+https://github.com/lvis-dataset/lvis-api.git +``` + +## Expected dataset structure for cityscapes: +``` +cityscapes/ + gtFine/ + train/ + aachen/ + color.png, instanceIds.png, labelIds.png, polygons.json + labelTrainIds.png (created by cityscapesscripts/preparation/createTrainIdLabelImgs.py) + ... + val/ + test/ + leftImg8bit/ + train/ + val/ + test/ +``` +Install cityscapes scripts by: +``` +pip install git+https://github.com/mcordts/cityscapesScripts.git +``` + +## Expected dataset structure for Pascal VOC: +``` +VOC20{07,12}/ + Annotations/ + ImageSets/ + JPEGImages/ +``` diff --git a/datasets/prepare_for_tests.sh b/datasets/prepare_for_tests.sh new file mode 100755 index 0000000000..d59b5643c9 --- /dev/null +++ b/datasets/prepare_for_tests.sh @@ -0,0 +1,22 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# Download some files needed for running tests. + +cd "${0%/*}" + +BASE=https://dl.fbaipublicfiles.com/detectron2 +mkdir -p coco/annotations + +for anno in instances_val2017_100 \ + person_keypoints_val2017_100 \ + instances_minival2014_100 \ + person_keypoints_minival2014_100; do + + dest=coco/annotations/$anno.json + [[ -s $dest ]] && { + echo "$dest exists. Skipping ..." + } || { + wget $BASE/annotations/coco/$anno.json -O $dest + } +done diff --git a/datasets/prepare_panoptic_fpn.py b/datasets/prepare_panoptic_fpn.py new file mode 100644 index 0000000000..88007b3ba2 --- /dev/null +++ b/datasets/prepare_panoptic_fpn.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import time +import functools +import json +import multiprocessing as mp +import numpy as np +import os +from PIL import Image + +from detectron2.data.datasets.builtin import COCO_CATEGORIES +from fvcore.common.download import download + +from panopticapi.utils import rgb2id + + +def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map): + panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32) + panoptic = rgb2id(panoptic) + output = np.zeros_like(panoptic, dtype=np.uint8) + 255 + for seg in segments: + cat_id = seg["category_id"] + new_cat_id = id_map[cat_id] + output[panoptic == seg["id"]] = new_cat_id + Image.fromarray(output).save(output_semantic) + + +def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories): + """ + Create semantic segmentation annotations from panoptic segmentation + annotations, to be used by PanopticFPN. + + It maps all thing categories to class 0, and maps all unlabeled pixels to class 255. + It maps all stuff categories to contiguous ids starting from 1. + + Args: + panoptic_json (str): path to the panoptic json file, in COCO's format. + panoptic_root (str): a directory with panoptic annotation files, in COCO's format. + sem_seg_root (str): a directory to output semantic annotation files + categories (list[dict]): category metadata. Each dict needs to have: + "id": corresponds to the "category_id" in the json annotations + "isthing": 0 or 1 + """ + os.makedirs(sem_seg_root, exist_ok=True) + + stuff_ids = [k["id"] for k in categories if k["isthing"] == 0] + thing_ids = [k["id"] for k in categories if k["isthing"] == 1] + id_map = {} # map from category id to id in the output semantic annotation + assert len(stuff_ids) <= 254 + for i, stuff_id in enumerate(stuff_ids): + id_map[stuff_id] = i + 1 + for thing_id in thing_ids: + id_map[thing_id] = 0 + id_map[0] = 255 + + with open(panoptic_json) as f: + obj = json.load(f) + + pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4)) + + def iter_annotations(): + for anno in obj["annotations"]: + file_name = anno["file_name"] + segments = anno["segments_info"] + input = os.path.join(panoptic_root, file_name) + output = os.path.join(sem_seg_root, file_name) + yield input, output, segments + + print("Start writing to {} ...".format(sem_seg_root)) + start = time.time() + pool.starmap( + functools.partial(_process_panoptic_to_semantic, id_map=id_map), + iter_annotations(), + chunksize=100, + ) + print("Finished. time: {:.2f}s".format(time.time() - start)) + + +if __name__ == "__main__": + dataset_dir = os.path.join(os.path.dirname(__file__), "coco") + for s in ["val2017", "train2017"]: + separate_coco_semantic_from_panoptic( + os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)), + os.path.join(dataset_dir, "panoptic_{}".format(s)), + os.path.join(dataset_dir, "panoptic_stuff_{}".format(s)), + COCO_CATEGORIES, + ) + + # Prepare val2017_100 for quick testing: + + dest_dir = os.path.join(dataset_dir, "annotations/") + URL_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + download(URL_PREFIX + "annotations/coco/panoptic_val2017_100.json", dest_dir) + with open(os.path.join(dest_dir, "panoptic_val2017_100.json")) as f: + obj = json.load(f) + + def link_val100(dir_full, dir_100): + print("Creating " + dir_100 + " ...") + os.makedirs(dir_100, exist_ok=True) + for img in obj["images"]: + basename = os.path.splitext(img["file_name"])[0] + src = os.path.join(dir_full, basename + ".png") + dst = os.path.join(dir_100, basename + ".png") + src = os.path.relpath(src, start=dir_100) + os.symlink(src, dst) + + link_val100( + os.path.join(dataset_dir, "panoptic_val2017"), + os.path.join(dataset_dir, "panoptic_val2017_100"), + ) + + link_val100( + os.path.join(dataset_dir, "panoptic_stuff_val2017"), + os.path.join(dataset_dir, "panoptic_stuff_val2017_100"), + ) diff --git a/demo/README.md b/demo/README.md new file mode 100644 index 0000000000..359affedce --- /dev/null +++ b/demo/README.md @@ -0,0 +1,16 @@ +## Webcam and Jupyter notebook demo + +This folder contains a simple webcam demo that illustrates how you can use `detectron2` for inference. + +You can start it by running it from this folder, using one of the following commands: +```bash +# by default, it runs on the GPU +# for best results, use min-image-size 800 +python webcam.py --min-image-size 800 +# can also run it on the CPU +python webcam.py --min-image-size 300 MODEL.DEVICE cpu +# or change the model that you want to use +python webcam.py --config-file ../configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml --min-image-size 300 MODEL.DEVICE cpu +# in order to see the probability heatmaps, pass --show-mask-heatmaps +python webcam.py --min-image-size 300 --show-mask-heatmaps MODEL.DEVICE cpu +``` diff --git a/demo/demo.py b/demo/demo.py new file mode 100644 index 0000000000..26f4c487c7 --- /dev/null +++ b/demo/demo.py @@ -0,0 +1,142 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +import glob +import multiprocessing as mp +import os +import time +import cv2 +import tqdm + +from detectron2.config import get_cfg +from detectron2.data.detection_utils import read_image +from detectron2.utils.logger import setup_logger + +from predictor import VisualizationDemo + + +def setup_cfg(args): + # load config from file and command-line arguments + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + # Set score_threshold for builtin models + cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold + cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold + cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold + cfg.freeze() + return cfg + + +def get_parser(): + parser = argparse.ArgumentParser(description="Detectron2 Demo") + parser.add_argument( + "--config-file", + default="configs/quick_schedules/e2e_mask_rcnn_R_50_FPN_inference_acc_test.yaml", + metavar="FILE", + help="path to config file", + ) + parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.") + parser.add_argument("--video-input", help="Path to video file.") + parser.add_argument("--input", nargs="+", help="A list of space separated input images") + parser.add_argument( + "--output", + help="A file or directory to save output visualizations. " + "If not given, will show output in an OpenCV window.", + ) + + parser.add_argument( + "--confidence-threshold", + type=float, + default=0.5, + help="Minimum score for instance predictions to be shown", + ) + parser.add_argument( + "--opts", + help="Modify model config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + return parser + + +if __name__ == "__main__": + mp.set_start_method("spawn", force=True) + args = get_parser().parse_args() + logger = setup_logger() + logger.info("Arguments: " + str(args)) + + cfg = setup_cfg(args) + + demo = VisualizationDemo(cfg) + + if args.input: + if len(args.input) == 1: + args.input = glob.glob(os.path.expanduser(args.input[0])) + for path in tqdm.tqdm(args.input, disable=not args.output): + # use PIL, to be consistent with evaluation + img = read_image(path, format="BGR") + start_time = time.time() + predictions, visualized_output = demo.run_on_image(img) + logger.info( + "{}: detected {} instances in {:.2f}s".format( + path, len(predictions["instances"]), time.time() - start_time + ) + ) + + if args.output: + if os.path.isdir(args.output): + assert os.path.isdir(args.output), args.output + out_filename = os.path.join(args.output, os.path.basename(path)) + else: + assert len(args.input) == 1, "Please specify a directory with args.output" + out_filename = args.output + visualized_output.save(out_filename) + else: + cv2.imshow("COCO detections", visualized_output.get_image()[:, :, ::-1]) + if cv2.waitKey(0) == 27: + break # esc to quit + elif args.webcam: + assert args.input is None, "Cannot have both --input and --webcam!" + cam = cv2.VideoCapture(0) + for vis in tqdm.tqdm(demo.run_on_video(cam)): + cv2.imshow("COCO detections", vis) + if cv2.waitKey(1) == 27: + break # esc to quit + cv2.destroyAllWindows() + elif args.video_input: + video = cv2.VideoCapture(args.video_input) + width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) + frames_per_second = video.get(cv2.CAP_PROP_FPS) + num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + basename = os.path.basename(args.video_input) + + if args.output: + if os.path.isdir(args.output): + output_fname = os.path.join(args.output, basename) + output_fname = os.path.splitext(output_fname)[0] + ".mkv" + else: + output_fname = args.output + assert not os.path.isfile(output_fname), output_fname + output_file = cv2.VideoWriter( + filename=output_fname, + # some installation of opencv may not support x264 (due to its license), + # you can try other format (e.g. MPEG) + fourcc=cv2.VideoWriter_fourcc(*"x264"), + fps=float(frames_per_second), + frameSize=(width, height), + isColor=True, + ) + assert os.path.isfile(args.video_input) + for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames): + if args.output: + output_file.write(vis_frame) + else: + cv2.imshow(basename, vis_frame) + if cv2.waitKey(1) == 27: + break # esc to quit + video.release() + if args.output: + output_file.release() + else: + cv2.destroyAllWindows() diff --git a/demo/predictor.py b/demo/predictor.py new file mode 100644 index 0000000000..e470128616 --- /dev/null +++ b/demo/predictor.py @@ -0,0 +1,218 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import atexit +import bisect +import multiprocessing as mp +from collections import deque +import cv2 +import torch + +from detectron2.data import MetadataCatalog +from detectron2.engine.defaults import DefaultPredictor +from detectron2.utils.video_visualizer import VideoVisualizer +from detectron2.utils.visualizer import ColorMode, Visualizer + + +class VisualizationDemo(object): + def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False): + """ + Args: + cfg (CfgNode): + instance_mode (ColorMode): + parallel (bool): whether to run the model in different processes from visualization. + Useful since the visualization logic can be slow. + """ + self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) + self.cpu_device = torch.device("cpu") + self.instance_mode = instance_mode + + self.parallel = parallel + if parallel: + num_gpu = torch.cuda.device_count() + self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu) + else: + self.predictor = DefaultPredictor(cfg) + + def run_on_image(self, image): + """ + Args: + image (np.ndarray): an image of shape (H, W, C) (in BGR order). + This is the format used by OpenCV. + + Returns: + predictions (dict): the output of the model. + vis_output (VisImage): the visualized image output. + """ + vis_output = None + predictions = self.predictor(image) + # Convert image from OpenCV BGR format to Matplotlib RGB format. + image = image[:, :, ::-1] + visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode) + if "panoptic_seg" in predictions: + panoptic_seg, segments_info = predictions["panoptic_seg"] + vis_output = visualizer.draw_panoptic_seg_predictions( + panoptic_seg.to(self.cpu_device), segments_info + ) + else: + if "sem_seg" in predictions: + vis_output = visualizer.draw_sem_seg( + predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) + ) + if "instances" in predictions: + instances = predictions["instances"].to(self.cpu_device) + vis_output = visualizer.draw_instance_predictions(predictions=instances) + + return predictions, vis_output + + def _frame_from_video(self, video): + while video.isOpened(): + success, frame = video.read() + if success: + yield frame + else: + break + + def run_on_video(self, video): + """ + Visualizes predictions on frames of the input video. + + Args: + video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be + either a webcam or a video file. + + Yields: + ndarray: BGR visualizations of each video frame. + """ + video_visualizer = VideoVisualizer(self.metadata, self.instance_mode) + + def process_predictions(frame, predictions): + frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + if "panoptic_seg" in predictions: + panoptic_seg, segments_info = predictions["panoptic_seg"] + vis_frame = video_visualizer.draw_panoptic_seg_predictions( + frame, panoptic_seg.to(self.cpu_device), segments_info + ) + elif "instances" in predictions: + predictions = predictions["instances"].to(self.cpu_device) + vis_frame = video_visualizer.draw_instance_predictions(frame, predictions) + elif "sem_seg" in predictions: + vis_frame = video_visualizer.draw_sem_seg( + frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device) + ) + + # Converts Matplotlib RGB format to OpenCV BGR format + vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR) + return vis_frame + + frame_gen = self._frame_from_video(video) + if self.parallel: + buffer_size = self.predictor.default_buffer_size + + frame_data = deque() + + for cnt, frame in enumerate(frame_gen): + frame_data.append(frame) + self.predictor.put(frame) + + if cnt >= buffer_size: + frame = frame_data.popleft() + predictions = self.predictor.get() + yield process_predictions(frame, predictions) + + while len(frame_data): + frame = frame_data.popleft() + predictions = self.predictor.get() + yield process_predictions(frame, predictions) + else: + for frame in frame_gen: + yield process_predictions(frame, self.predictor(frame)) + + +class AsyncPredictor: + """ + A predictor that runs the model asynchronously, possibly on >1 GPUs. + Because rendering the visualization takes considerably amount of time, + this helps improve thoughput when rendering videos. + """ + + class _StopToken: + pass + + class _PredictWorker(mp.Process): + def __init__(self, cfg, task_queue, result_queue): + self.cfg = cfg + self.task_queue = task_queue + self.result_queue = result_queue + super().__init__() + + def run(self): + predictor = DefaultPredictor(self.cfg) + + while True: + task = self.task_queue.get() + if isinstance(task, AsyncPredictor._StopToken): + break + idx, data = task + result = predictor(data) + self.result_queue.put((idx, result)) + + def __init__(self, cfg, num_gpus: int = 1): + """ + Args: + cfg (CfgNode): + num_gpus (int): if 0, will run on CPU + """ + num_workers = max(num_gpus, 1) + self.task_queue = mp.Queue(maxsize=num_workers * 3) + self.result_queue = mp.Queue(maxsize=num_workers * 3) + self.procs = [] + for gpuid in range(max(num_gpus, 1)): + cfg = cfg.clone() + cfg.defrost() + cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu" + self.procs.append( + AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue) + ) + + self.put_idx = 0 + self.get_idx = 0 + self.result_rank = [] + self.result_data = [] + + for p in self.procs: + p.start() + atexit.register(self.shutdown) + + def put(self, image): + self.put_idx += 1 + self.task_queue.put((self.put_idx, image)) + + def get(self): + self.get_idx += 1 # the index needed for this request + if len(self.result_rank) and self.result_rank[0] == self.get_idx: + res = self.result_data[0] + del self.result_data[0], self.result_rank[0] + return res + + while True: + # make sure the results are returned in the correct order + idx, res = self.result_queue.get() + if idx == self.get_idx: + return res + insert = bisect.bisect(self.result_rank, idx) + self.result_rank.insert(insert, idx) + self.result_data.insert(insert, res) + + def __len__(self): + return self.put_idx - self.get_idx + + def __call__(self, image): + self.put(image) + return self.get() + + def shutdown(self): + for _ in self.procs: + self.task_queue.put(AsyncPredictor._StopToken()) + + @property + def default_buffer_size(self): + return len(self.procs) * 5 diff --git a/detectron2/__init__.py b/detectron2/__init__.py new file mode 100644 index 0000000000..d1f75b3ba5 --- /dev/null +++ b/detectron2/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +from .utils.env import setup_environment + +setup_environment() + + +__version__ = "0.1" diff --git a/detectron2/checkpoint/__init__.py b/detectron2/checkpoint/__init__.py new file mode 100644 index 0000000000..a6022946c6 --- /dev/null +++ b/detectron2/checkpoint/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# File: + + +from . import model_zoo as _UNUSED # register the handler +from .detection_checkpoint import DetectionCheckpointer +from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer + +__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"] diff --git a/detectron2/checkpoint/c2_model_loading.py b/detectron2/checkpoint/c2_model_loading.py new file mode 100644 index 0000000000..10703358b8 --- /dev/null +++ b/detectron2/checkpoint/c2_model_loading.py @@ -0,0 +1,313 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import logging +import re +import torch +from fvcore.common.checkpoint import ( + get_missing_parameters_message, + get_unexpected_parameters_message, +) + + +def convert_basic_c2_names(original_keys): + """ + Apply some basic name conversion to names in C2 weights. + It only deals with typical backbone models. + + Args: + original_keys (list[str]): + Returns: + list[str]: The same number of strings matching those in original_keys. + """ + layer_keys = copy.deepcopy(original_keys) + layer_keys = [ + {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys + ] # some hard-coded mappings + + layer_keys = [k.replace("_", ".") for k in layer_keys] + layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] + layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] + # Uniform both bn and gn names to "norm" + layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] + layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] + layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] + + # stem + layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] + # to avoid mis-matching with "conv1" in other components (e.g. detection head) + layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] + + # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) + # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] + # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] + # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] + # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] + + # blocks + layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] + layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] + layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] + layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] + + # DensePose substitutions + layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] + layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] + layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] + layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] + layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] + return layer_keys + + +def convert_c2_detectron_names(weights): + """ + Map Caffe2 Detectron weight names to Detectron2 names. + + Args: + weights (dict): name -> tensor + + Returns: + dict: detectron2 names -> tensor + dict: detectron2 names -> C2 names + """ + logger = logging.getLogger(__name__) + logger.info("Remapping C2 weights ......") + original_keys = sorted(weights.keys()) + layer_keys = copy.deepcopy(original_keys) + + layer_keys = convert_basic_c2_names(layer_keys) + + # -------------------------------------------------------------------------- + # RPN hidden representation conv + # -------------------------------------------------------------------------- + # FPN case + # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then + # shared for all other levels, hence the appearance of "fpn2" + layer_keys = [ + k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys + ] + # Non-FPN case + layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] + + # -------------------------------------------------------------------------- + # RPN box transformation conv + # -------------------------------------------------------------------------- + # FPN case (see note above about "fpn2") + layer_keys = [ + k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") + for k in layer_keys + ] + layer_keys = [ + k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") + for k in layer_keys + ] + # Non-FPN case + layer_keys = [ + k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys + ] + layer_keys = [ + k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") + for k in layer_keys + ] + + # -------------------------------------------------------------------------- + # Fast R-CNN box head + # -------------------------------------------------------------------------- + layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] + layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] + layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] + layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] + # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s + layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] + + # -------------------------------------------------------------------------- + # FPN lateral and output convolutions + # -------------------------------------------------------------------------- + def fpn_map(name): + """ + Look for keys with the following patterns: + 1) Starts with "fpn.inner." + Example: "fpn.inner.res2.2.sum.lateral.weight" + Meaning: These are lateral pathway convolutions + 2) Starts with "fpn.res" + Example: "fpn.res2.2.sum.weight" + Meaning: These are FPN output convolutions + """ + splits = name.split(".") + norm = ".norm" if "norm" in splits else "" + if name.startswith("fpn.inner."): + # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] + stage = int(splits[2][len("res") :]) + return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) + elif name.startswith("fpn.res"): + # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] + stage = int(splits[1][len("res") :]) + return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) + return name + + layer_keys = [fpn_map(k) for k in layer_keys] + + # -------------------------------------------------------------------------- + # Mask R-CNN mask head + # -------------------------------------------------------------------------- + # roi_heads.StandardROIHeads case + layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] + layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] + layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] + # roi_heads.Res5ROIHeads case + layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] + + # -------------------------------------------------------------------------- + # Keypoint R-CNN head + # -------------------------------------------------------------------------- + # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" + layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] + layer_keys = [ + k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys + ] + layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] + + # -------------------------------------------------------------------------- + # Done with replacements + # -------------------------------------------------------------------------- + assert len(set(layer_keys)) == len(layer_keys) + assert len(original_keys) == len(layer_keys) + + new_weights = {} + new_keys_to_original_keys = {} + for orig, renamed in zip(original_keys, layer_keys): + new_keys_to_original_keys[renamed] = orig + if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): + # remove the meaningless prediction weight for background class + new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 + new_weights[renamed] = weights[orig][new_start_idx:] + logger.info( + "Remove prediction weight for background class in {}. The shape changes from " + "{} to {}.".format( + renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) + ) + ) + elif renamed.startswith("cls_score."): + # move weights of bg class from original index 0 to last index + logger.info( + "Move classification weights for background class in {} from index 0 to " + "index {}.".format(renamed, weights[orig].shape[0] - 1) + ) + new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) + else: + new_weights[renamed] = weights[orig] + + return new_weights, new_keys_to_original_keys + + +# Note the current matching is not symmetric. +# it assumes model_state_dict will have longer names. +def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): + """ + Match names between the two state-dict, and update the values of model_state_dict in-place with + copies of the matched tensor in ckpt_state_dict. + If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 + model and will be renamed at first. + + Strategy: suppose that the models that we will create will have prefixes appended + to each of its keys, for example due to an extra level of nesting that the original + pre-trained weights from ImageNet won't contain. For example, model.state_dict() + might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains + res2.conv1.weight. We thus want to match both parameters together. + For that, we look for each model weight, look among all loaded keys if there is one + that is a suffix of the current weight name, and use it if that's the case. + If multiple matches exist, take the one with longest size + of the corresponding name. For example, for the same model as before, the pretrained + weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, + we want to match backbone[0].body.conv1.weight to conv1.weight, and + backbone[0].body.res2.conv1.weight to res2.conv1.weight. + """ + model_keys = sorted(list(model_state_dict.keys())) + if c2_conversion: + ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) + # original_keys: the name in the original dict (before renaming) + else: + original_keys = {x: x for x in ckpt_state_dict.keys()} + ckpt_keys = sorted(list(ckpt_state_dict.keys())) + + def match(a, b): + # Matched ckpt_key should be a complete (starts with '.') suffix. + # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, + # but matches whatever_conv1 or mesh_head.whatever_conv1. + return a == b or a.endswith("." + b) + + # get a matrix of string matches, where each (i, j) entry correspond to the size of the + # ckpt_key string, if it matches + match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] + match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) + # use the matched one with longest size in case of multiple matches + max_match_size, idxs = match_matrix.max(1) + # remove indices that correspond to no-match + idxs[max_match_size == 0] = -1 + + # used for logging + max_len_model = max(len(key) for key in model_keys) if model_keys else 1 + max_len_ckpt = max(len(key) for key in ckpt_keys) if ckpt_keys else 1 + log_str_template = "{: <{}} loaded from {: <{}} of shape {}" + logger = logging.getLogger(__name__) + # matched_pairs (matched checkpoint key --> matched model key) + matched_keys = {} + for idx_model, idx_ckpt in enumerate(idxs.tolist()): + if idx_ckpt == -1: + continue + key_model = model_keys[idx_model] + key_ckpt = ckpt_keys[idx_ckpt] + value_ckpt = ckpt_state_dict[key_ckpt] + shape_in_model = model_state_dict[key_model].shape + + if shape_in_model != value_ckpt.shape: + logger.warning( + "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( + key_ckpt, value_ckpt.shape, key_model, shape_in_model + ) + ) + logger.warning( + "{} will not be loaded. Please double check and see if this is desired.".format( + key_ckpt + ) + ) + continue + + model_state_dict[key_model] = value_ckpt.clone() + if key_ckpt in matched_keys: # already added to matched_keys + logger.error( + "Ambiguity found for {} in checkpoint!" + "It matches at least two keys in the model ({} and {}).".format( + key_ckpt, key_model, matched_keys[key_ckpt] + ) + ) + raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") + + matched_keys[key_ckpt] = key_model + logger.info( + log_str_template.format( + key_model, + max_len_model, + original_keys[key_ckpt], + max_len_ckpt, + tuple(shape_in_model), + ) + ) + matched_model_keys = matched_keys.values() + matched_ckpt_keys = matched_keys.keys() + # print warnings about unmatched keys on both side + unmatched_model_keys = [k for k in model_keys if k not in matched_model_keys] + if len(unmatched_model_keys): + logger.info(get_missing_parameters_message(unmatched_model_keys)) + + unmatched_ckpt_keys = [k for k in ckpt_keys if k not in matched_ckpt_keys] + if len(unmatched_ckpt_keys): + logger.info( + get_unexpected_parameters_message(original_keys[x] for x in unmatched_ckpt_keys) + ) diff --git a/detectron2/checkpoint/detection_checkpoint.py b/detectron2/checkpoint/detection_checkpoint.py new file mode 100644 index 0000000000..4e0a35e04f --- /dev/null +++ b/detectron2/checkpoint/detection_checkpoint.py @@ -0,0 +1,59 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import pickle +from fvcore.common.checkpoint import Checkpointer +from fvcore.common.file_io import PathManager + +import detectron2.utils.comm as comm + +from .c2_model_loading import align_and_update_state_dicts + + +class DetectionCheckpointer(Checkpointer): + """ + Same as :class:`Checkpointer`, but is able to handle models in detectron & detectron2 + model zoo, and apply conversions for legacy models. + """ + + def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables): + is_main_process = comm.is_main_process() + super().__init__( + model, + save_dir, + save_to_disk=is_main_process if save_to_disk is None else save_to_disk, + **checkpointables, + ) + + def _load_file(self, filename): + if filename.endswith(".pkl"): + with PathManager.open(filename, "rb") as f: + data = pickle.load(f, encoding="latin1") + if "model" in data and "__author__" in data: + # file is in Detectron2 model zoo format + self.logger.info("Reading a file from '{}'".format(data["__author__"])) + return data + else: + # assume file is from Caffe2 / Detectron1 model zoo + if "blobs" in data: + # Detection models have "blobs", but ImageNet models don't + data = data["blobs"] + data = {k: v for k, v in data.items() if not k.endswith("_momentum")} + return {"model": data, "__author__": "Caffe2", "matching_heuristics": True} + + loaded = super()._load_file(filename) # load native pth checkpoint + if "model" not in loaded: + loaded = {"model": loaded} + return loaded + + def _load_model(self, checkpoint): + if checkpoint.get("matching_heuristics", False): + self._convert_ndarray_to_tensor(checkpoint["model"]) + # convert weights by name-matching heuristics + model_state_dict = self.model.state_dict() + align_and_update_state_dicts( + model_state_dict, + checkpoint["model"], + c2_conversion=checkpoint.get("__author__", None) == "Caffe2", + ) + checkpoint["model"] = model_state_dict + # for non-caffe2 models, use standard ways to load it + super()._load_model(checkpoint) diff --git a/detectron2/checkpoint/model_zoo.py b/detectron2/checkpoint/model_zoo.py new file mode 100644 index 0000000000..42591e7207 --- /dev/null +++ b/detectron2/checkpoint/model_zoo.py @@ -0,0 +1,132 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +from fvcore.common.file_io import PathHandler, PathManager + + +class ModelCatalog(object): + """ + Store mappings from names to third-party models. + """ + + S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron" + + # MSRA models have STRIDE_IN_1X1=True. False otherwise. + # NOTE: all BN models here have fused BN into an affine layer. + # As a result, you should only load them to a model with "FrozenBN". + # Loading them to a model with regular BN or SyncBN is wrong. + # Even when loaded to FrozenBN, it is still different from affine by an epsilon, + # which should be negligible for training. + # NOTE: all models here uses PIXEL_STD=[1,1,1] + C2_IMAGENET_MODELS = { + "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl", + "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl", + "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl", + "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl", + "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl", + "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl", + "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl", + } + + C2_DETECTRON_PATH_FORMAT = ( + "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" + ) # noqa B950 + + C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival" + C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival" + + # format: {model_name} -> part of the url + C2_DETECTRON_MODELS = { + "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950 + "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950 + "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950 + "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950 + "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950 + "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950 + "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950 + "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950 + "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950 + "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950 + "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950 + "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950 + "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950 + } + + @staticmethod + def get(name): + if name.startswith("Caffe2Detectron/COCO"): + return ModelCatalog._get_c2_detectron_baseline(name) + if name.startswith("ImageNetPretrained/"): + return ModelCatalog._get_c2_imagenet_pretrained(name) + raise RuntimeError("model not present in the catalog: {}".format(name)) + + @staticmethod + def _get_c2_imagenet_pretrained(name): + prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX + name = name[len("ImageNetPretrained/") :] + name = ModelCatalog.C2_IMAGENET_MODELS[name] + url = "/".join([prefix, name]) + return url + + @staticmethod + def _get_c2_detectron_baseline(name): + name = name[len("Caffe2Detectron/COCO/") :] + url = ModelCatalog.C2_DETECTRON_MODELS[name] + if "keypoint_rcnn" in name: + dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS + else: + dataset = ModelCatalog.C2_DATASET_COCO + + if "35998355/rpn_R-50-C4_1x" in name: + # this one model is somehow different from others .. + type = "rpn" + else: + type = "generalized_rcnn" + + # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`. + url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format( + prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset + ) + return url + + +class ModelCatalogHandler(PathHandler): + """ + Resolve URL like catalog://. + """ + + PREFIX = "catalog://" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path): + logger = logging.getLogger(__name__) + catalog_path = ModelCatalog.get(path[len(self.PREFIX) :]) + logger.info("Catalog entry {} points to {}".format(path, catalog_path)) + return PathManager.get_local_path(catalog_path) + + def _open(self, path, mode="r"): + return PathManager.open(self._get_local_path(path), mode) + + +class Detectron2Handler(PathHandler): + """ + Resolve anything that's in Detectron2 model zoo. + """ + + PREFIX = "detectron2://" + S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" + + def _get_supported_prefixes(self): + return [self.PREFIX] + + def _get_local_path(self, path): + name = path[len(self.PREFIX) :] + return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name) + + def _open(self, path, mode="r"): + return PathManager.open(self._get_local_path(path), mode) + + +PathManager.register_handler(ModelCatalogHandler()) +PathManager.register_handler(Detectron2Handler()) diff --git a/detectron2/config/__init__.py b/detectron2/config/__init__.py new file mode 100644 index 0000000000..e6f3f5eee5 --- /dev/null +++ b/detectron2/config/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .compat import downgrade_config, upgrade_config +from .config import CfgNode, get_cfg, global_cfg, set_global_cfg + +__all__ = [ + "CfgNode", + "get_cfg", + "global_cfg", + "set_global_cfg", + "downgrade_config", + "upgrade_config", +] diff --git a/detectron2/config/compat.py b/detectron2/config/compat.py new file mode 100644 index 0000000000..1e4be0390f --- /dev/null +++ b/detectron2/config/compat.py @@ -0,0 +1,229 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Backward compatibility of configs. + +Instructions to bump version: ++ It's not needed to bump version if new keys are added. + It's only needed when backward-incompatible changes happen + (i.e., some existing keys disappear, or the meaning of a key changes) ++ To bump version, do the following: + 1. Increment _C.VERSION in defaults.py + 2. Add a converter in this file. + + Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X, + and a function "downgrade" which in-place downgrades config from X to X-1 + + In each function, VERSION is left unchanged. + + Each converter assumes that its input has the relevant keys + (i.e., the input is not a partial config). + 3. Run the tests (test_config.py) to make sure the upgrade & downgrade + functions are consistent. +""" + +import logging +from typing import List, Optional, Tuple + +from .config import CfgNode as CN +from .defaults import _C + +__all__ = ["upgrade_config", "downgrade_config"] + + +def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN: + """ + Upgrade a config from its current version to a newer version. + + Args: + cfg (CfgNode): + to_version (int): defaults to the latest version. + """ + cfg = cfg.clone() + if to_version is None: + to_version = _C.VERSION + + assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format( + cfg.VERSION, to_version + ) + for k in range(cfg.VERSION, to_version): + converter = globals()["ConverterV" + str(k + 1)] + converter.upgrade(cfg) + cfg.VERSION = k + 1 + return cfg + + +def downgrade_config(cfg: CN, to_version: int) -> CN: + """ + Downgrade a config from its current version to an older version. + + Args: + cfg (CfgNode): + to_version (int): + + Note: + A general downgrade of arbitrary configs is not always possible due to the + different functionailities in different versions. + The purpose of downgrade is only to recover the defaults in old versions, + allowing it to load an old partial yaml config. + Therefore, the implementation only needs to fill in the default values + in the old version when a general downgrade is not possible. + """ + cfg = cfg.clone() + assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format( + cfg.VERSION, to_version + ) + for k in range(cfg.VERSION, to_version, -1): + converter = globals()["ConverterV" + str(k)] + converter.downgrade(cfg) + cfg.VERSION = k - 1 + return cfg + + +def guess_version(cfg: CN, filename: str) -> int: + """ + Guess the version of a partial config where the VERSION field is not specified. + Returns the version, or the latest if cannot make a guess. + + This makes it easier for users to migrate. + """ + logger = logging.getLogger(__name__) + + def _has(name: str) -> bool: + cur = cfg + for n in name.split("."): + if n not in cur: + return False + cur = cur[n] + return True + + # Most users' partial configs have "MODEL.WEIGHT", so guess on it + ret = None + if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"): + ret = 1 + + if ret is not None: + logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret)) + else: + ret = _C.VERSION + logger.warning( + "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format( + filename, ret + ) + ) + return ret + + +def _rename(cfg: CN, old: str, new: str) -> None: + old_keys = old.split(".") + new_keys = new.split(".") + + def _set(key_seq: List[str], val: str) -> None: + cur = cfg + for k in key_seq[:-1]: + if k not in cur: + cur[k] = CN() + cur = cur[k] + cur[key_seq[-1]] = val + + def _get(key_seq: List[str]) -> CN: + cur = cfg + for k in key_seq: + cur = cur[k] + return cur + + def _del(key_seq: List[str]) -> None: + cur = cfg + for k in key_seq[:-1]: + cur = cur[k] + del cur[key_seq[-1]] + if len(cur) == 0 and len(key_seq) > 1: + _del(key_seq[:-1]) + + _set(new_keys, _get(old_keys)) + _del(old_keys) + + +class _RenameConverter: + """ + A converter that handles simple rename. + """ + + RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name) + + @classmethod + def upgrade(cls, cfg: CN) -> None: + for old, new in cls.RENAME: + _rename(cfg, old, new) + + @classmethod + def downgrade(cls, cfg: CN) -> None: + for old, new in cls.RENAME[::-1]: + _rename(cfg, new, old) + + +class ConverterV1(_RenameConverter): + RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")] + + +class ConverterV2(_RenameConverter): + """ + A large bulk of rename, before public release. + """ + + RENAME = [ + ("MODEL.WEIGHT", "MODEL.WEIGHTS"), + ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"), + ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"), + ( + "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD", + "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH", + ), + ( + "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT", + "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT", + ), + ( + "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD", + "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH", + ), + ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"), + ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"), + ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"), + ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"), + ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"), + ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"), + ("TEST.AUG_ON", "TEST.AUG.ENABLED"), + ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"), + ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"), + ("TEST.AUG_FLIP", "TEST.AUG.FLIP"), + ] + + @classmethod + def upgrade(cls, cfg: CN) -> None: + super().upgrade(cfg) + + if cfg.MODEL.META_ARCHITECTURE == "RetinaNet": + _rename( + cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS" + ) + _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") + del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"] + del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"] + else: + _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS") + _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") + del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"] + del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"] + del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"] + + @classmethod + def downgrade(cls, cfg: CN) -> None: + super().downgrade(cfg) + + _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS") + _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES") + cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS + cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES + cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version diff --git a/detectron2/config/config.py b/detectron2/config/config.py new file mode 100644 index 0000000000..f9d01cb42c --- /dev/null +++ b/detectron2/config/config.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +from fvcore.common.config import CfgNode as _CfgNode + + +class CfgNode(_CfgNode): + """ + The same as `fvcore.common.config.CfgNode`, but different in: + + 1. Use unsafe yaml loading by default. + Note that this may lead to arbitrary code execution: you must not + load a config file from untrusted sources before manually inspecting + the content of the file. + 2. Support config versioning. + When attempting to merge an old config, it will convert the old config automatically. + + """ + + # Note that the default value of allow_unsafe is changed to True + def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: + loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) + loaded_cfg = type(self)(loaded_cfg) + + # defaults.py needs to import CfgNode + from .defaults import _C + + latest_ver = _C.VERSION + assert ( + latest_ver == self.VERSION + ), "CfgNode.merge_from_file is only allowed on a config of latest version!" + + logger = logging.getLogger(__name__) + + loaded_ver = loaded_cfg.get("VERSION", None) + if loaded_ver is None: + from .compat import guess_version + + loaded_ver = guess_version(loaded_cfg, cfg_filename) + assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( + loaded_ver, self.VERSION + ) + + if loaded_ver == self.VERSION: + self.merge_from_other_cfg(loaded_cfg) + else: + # compat.py needs to import CfgNode + from .compat import upgrade_config, downgrade_config + + logger.warning( + "Loading an old v{} config file '{}' by automatically upgrading to v{}. " + "See docs/CHANGELOG.md for instructions to update your files.".format( + loaded_ver, cfg_filename, self.VERSION + ) + ) + # To convert, first obtain a full config at an old version + old_self = downgrade_config(self, to_version=loaded_ver) + old_self.merge_from_other_cfg(loaded_cfg) + new_config = upgrade_config(old_self) + self.clear() + self.update(new_config) + + +global_cfg = CfgNode() + + +def get_cfg() -> CfgNode: + """ + Get a copy of the default config. + + Returns: + a detectron2 CfgNode instance. + """ + from .defaults import _C + + return _C.clone() + + +def set_global_cfg(cfg: CfgNode) -> None: + """ + Let the global config point to the given cfg. + + Assume that the given "cfg" has the key "KEY", after calling + `set_global_cfg(cfg)`, the key can be accessed by: + + .. code-block:: python + + from detectron2.config import global_cfg + print(global_cfg.KEY) + + By using a hacky global config, you can access these configs anywhere, + without having to pass the config object or the values deep into the code. + This is a hacky feature introduced for quick prototyping / research exploration. + """ + global global_cfg + global_cfg.clear() + global_cfg.update(cfg) diff --git a/detectron2/config/defaults.py b/detectron2/config/defaults.py new file mode 100644 index 0000000000..fa1b617048 --- /dev/null +++ b/detectron2/config/defaults.py @@ -0,0 +1,561 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .config import CfgNode as CN + +# ----------------------------------------------------------------------------- +# Convention about Training / Test specific parameters +# ----------------------------------------------------------------------------- +# Whenever an argument can be either used for training or for testing, the +# corresponding name will be post-fixed by a _TRAIN for a training parameter, +# or _TEST for a test-specific parameter. +# For example, the number of images during training will be +# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be +# IMAGES_PER_BATCH_TEST + +# ----------------------------------------------------------------------------- +# Config definition +# ----------------------------------------------------------------------------- + +_C = CN() + +_C.VERSION = 2 + +_C.MODEL = CN() +_C.MODEL.LOAD_PROPOSALS = False +_C.MODEL.MASK_ON = False +_C.MODEL.KEYPOINT_ON = False +_C.MODEL.DEVICE = "cuda" +_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" + +# If the WEIGHT starts with a catalog://, like :R-50, the code will look for +# the path in ModelCatalog. Else, it will use it as the specified absolute +# path +_C.MODEL.WEIGHTS = "" + +# Values to be used for image normalization (BGR order) +# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675] +_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] +# When using pre-trained models in Detectron1 or any MSRA models, +# std has been absorbed into its conv1 weights, so the std needs to be set 1. +# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) +_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] + + +# ----------------------------------------------------------------------------- +# INPUT +# ----------------------------------------------------------------------------- +_C.INPUT = CN() +# Size of the smallest side of the image during training +_C.INPUT.MIN_SIZE_TRAIN = (800,) +# Sample size of smallest side by choice or random selection from range give by +# INPUT.MIN_SIZE_TRAIN +_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" +# Maximum size of the side of the image during training +_C.INPUT.MAX_SIZE_TRAIN = 1333 +# Size of the smallest side of the image during testing. Set to zero to disable resize in testing. +_C.INPUT.MIN_SIZE_TEST = 800 +# Maximum size of the side of the image during testing +_C.INPUT.MAX_SIZE_TEST = 1333 + +# `True` if cropping is used for data augmentation during training +_C.INPUT.CROP = CN({"ENABLED": False}) +# Cropping type: +# - "relative" crop (H * CROP.SIZE[0], W * CROP.SIZE[1]) part of an input of size (H, W) +# - "relative_range" uniformly sample relative crop size from between [CROP.SIZE[0], [CROP.SIZE[1]]. +# and [1, 1] and use it as in "realtive" scenario. +# - "absolute" crop part of an input with absolute size: (CROP.SIZE[0], CROP.SIZE[1]). +_C.INPUT.CROP.TYPE = "relative_range" +# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of +# pixels if CROP.TYPE is "absolute" +_C.INPUT.CROP.SIZE = [0.9, 0.9] + + +# Whether the model needs RGB, YUV, HSV etc. +# Should be one of the modes defined here, as we use PIL to read the image: +# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes +# with BGR being the one exception. One can set image format to BGR, we will +# internally use RGB for conversion and flip the channels over +_C.INPUT.FORMAT = "BGR" +_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask" + + +# ----------------------------------------------------------------------------- +# Dataset +# ----------------------------------------------------------------------------- +_C.DATASETS = CN() +# List of the dataset names for training. Must be registered in DatasetCatalog +_C.DATASETS.TRAIN = () +# List of the pre-computed proposal files for training, which must be consistent +# with datasets listed in DATASETS.TRAIN. +_C.DATASETS.PROPOSAL_FILES_TRAIN = () +# Number of top scoring precomputed proposals to keep for training +_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000 +# List of the dataset names for testing. Must be registered in DatasetCatalog +_C.DATASETS.TEST = () +# List of the pre-computed proposal files for test, which must be consistent +# with datasets listed in DATASETS.TEST. +_C.DATASETS.PROPOSAL_FILES_TEST = () +# Number of top scoring precomputed proposals to keep for test +_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000 + +# ----------------------------------------------------------------------------- +# DataLoader +# ----------------------------------------------------------------------------- +_C.DATALOADER = CN() +# Number of data loading threads +_C.DATALOADER.NUM_WORKERS = 4 +# If True, each batch should contain only images for which the aspect ratio +# is compatible. This groups portrait images together, and landscape images +# are not batched with portrait images. +_C.DATALOADER.ASPECT_RATIO_GROUPING = True +# Options: TrainingSampler, RepeatFactorTrainingSampler +_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler" +# Repeat threshold for RepeatFactorTrainingSampler +_C.DATALOADER.REPEAT_THRESHOLD = 0.0 + + +# ---------------------------------------------------------------------------- # +# Backbone options +# ---------------------------------------------------------------------------- # +_C.MODEL.BACKBONE = CN() + +_C.MODEL.BACKBONE.NAME = "build_resnet_backbone" +# Add StopGrad at a specified stage so the bottom layers are frozen +_C.MODEL.BACKBONE.FREEZE_AT = 2 + + +# ---------------------------------------------------------------------------- # +# FPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.FPN = CN() +# Names of the input feature maps to be used by FPN +# They must have contiguous power of 2 strides +# e.g., ["res2", "res3", "res4", "res5"] +_C.MODEL.FPN.IN_FEATURES = [] +_C.MODEL.FPN.OUT_CHANNELS = 256 + +# Options: "" (no norm), "GN" +_C.MODEL.FPN.NORM = "" + +# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg" +_C.MODEL.FPN.FUSE_TYPE = "sum" + + +# ---------------------------------------------------------------------------- # +# Proposal generator options +# ---------------------------------------------------------------------------- # +_C.MODEL.PROPOSAL_GENERATOR = CN() +# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals" +_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" +# Proposal height and width both need to be greater than MIN_SIZE +# (a the scale used during training or inference) +_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0 + + +# ---------------------------------------------------------------------------- # +# Anchor generator options +# ---------------------------------------------------------------------------- # +_C.MODEL.ANCHOR_GENERATOR = CN() +# The generator can be any name in the ANCHOR_GENERATOR registry +_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" +# anchor sizes given in absolute pixels w.r.t. the scaled network input. +# Format: list of lists of sizes. SIZES[i] specifies the list of sizes +# to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true, +# or len(SIZES) == 1 is true and size list SIZES[0] is used for all +# IN_FEATURES. +_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]] +# Anchor aspect ratios. +# Format is list of lists of sizes. ASPECT_RATIOS[i] specifies the list of aspect ratios +# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true, +# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used +# for all IN_FEATURES. +_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] +# Anchor angles. +# list[float], the angle in degrees, for each input feature map. +# ANGLES[i] specifies the list of angles for IN_FEATURES[i]. +_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]] + + +# ---------------------------------------------------------------------------- # +# RPN options +# ---------------------------------------------------------------------------- # +_C.MODEL.RPN = CN() +_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY + +# Names of the input feature maps to be used by RPN +# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN +_C.MODEL.RPN.IN_FEATURES = ["res4"] +# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels +# Set to -1 or a large value, e.g. 100000, to disable pruning anchors +_C.MODEL.RPN.BOUNDARY_THRESH = -1 +# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD] +# Minimum overlap required between an anchor and ground-truth box for the +# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD +# ==> positive RPN example: 1) +# Maximum overlap allowed between an anchor and ground-truth box for the +# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD +# ==> negative RPN example: 0) +# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD) +# are ignored (-1) +_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7] +_C.MODEL.RPN.IOU_LABELS = [0, -1, 1] +# Total number of RPN examples per image +_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 +# Target fraction of foreground (positive) examples per RPN minibatch +_C.MODEL.RPN.POSITIVE_FRACTION = 0.5 +# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets +_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) +# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. +_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0 +_C.MODEL.RPN.LOSS_WEIGHT = 1.0 +# Number of top scoring RPN proposals to keep before applying NMS +# When FPN is used, this is *per FPN level* (not total) +_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000 +_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000 +# Number of top scoring RPN proposals to keep after applying NMS +# When FPN is used, this limit is applied per level and then again to the union +# of proposals from all levels +# NOTE: When FPN is used, the meaning of this config is different from Detectron1. +# It means per-batch topk in Detectron1, but per-image topk here. +# See "modeling/rpn/rpn_outputs.py" for details. +_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000 +_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000 +# NMS threshold used on RPN proposals +_C.MODEL.RPN.NMS_THRESH = 0.7 + +# ---------------------------------------------------------------------------- # +# ROI HEADS options +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_HEADS = CN() +_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads" +# Number of foreground classes +_C.MODEL.ROI_HEADS.NUM_CLASSES = 80 +# Names of the input feature maps to be used by ROI heads +# Currently all heads (box, mask, ...) use the same input feature map list +# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN +_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"] +# IOU overlap ratios [IOU_THRESHOLD] +# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD) +# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD) +_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5] +_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1] +# RoI minibatch size *per image* (number of regions of interest [ROIs]) +# Total number of RoIs per training minibatch = +# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH +# E.g., a common configuration is: 512 * 16 = 8192 +_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 +# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0) +_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 + +# Only used on test mode + +# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to +# balance obtaining high recall with not having too many low precision +# detections that will slow down inference post processing steps (like NMS) +# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down +# inference. +_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05 +# Overlap threshold used for non-maximum suppression (suppress boxes with +# IoU >= this threshold) +_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5 +# If True, augment proposals with ground-truth boxes before sampling proposals to +# train ROI heads. +_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True + + +# ---------------------------------------------------------------------------- # +# Box Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_BOX_HEAD = CN() +# C4 don't use head name option +# Options for non-C4 models: FastRCNNConvFCHead, +_C.MODEL.ROI_BOX_HEAD.NAME = "" +# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets +# These are empirically chosen to approximately lead to unit variance targets +_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0) +# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1. +_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0 +_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" + +_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0 +# Hidden layer dimension for FC layers in the RoI box head +_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024 +_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0 +# Channel dimension for Conv layers in the RoI box head +_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256 +# Normalization method for the convolution layers. +# Options: "" (no norm), "GN", "SyncBN". +_C.MODEL.ROI_BOX_HEAD.NORM = "" +# Whether to use class agnostic for bbox regression +_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False + +# ---------------------------------------------------------------------------- # +# Cascaded Box Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_BOX_CASCADE_HEAD = CN() +# The number of cascade stages is implicitly defined by the length of the following two configs. +_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = ( + (10.0, 10.0, 5.0, 5.0), + (20.0, 20.0, 10.0, 10.0), + (30.0, 30.0, 15.0, 15.0), +) +_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7) + + +# ---------------------------------------------------------------------------- # +# Mask Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_MASK_HEAD = CN() +_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead" +_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head +_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256 +# Normalization method for the convolution layers. +# Options: "" (no norm), "GN", "SyncBN". +_C.MODEL.ROI_MASK_HEAD.NORM = "" +# Whether to use class agnostic for mask prediction +_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2" + + +# ---------------------------------------------------------------------------- # +# Keypoint Head +# ---------------------------------------------------------------------------- # +_C.MODEL.ROI_KEYPOINT_HEAD = CN() +_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead" +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14 +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0 +_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8)) +_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO. + +# Images with too few (or no) keypoints are excluded from training. +_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1 +# Normalize by the total number of visible keypoints in the minibatch if True. +# Otherwise, normalize by the total number of keypoints that could ever exist +# in the minibatch. +# The keypoint softmax loss is only calculated on visible keypoints. +# Since the number of visible keypoints can vary significantly between +# minibatches, this has the effect of up-weighting the importance of +# minibatches with few visible keypoints. (Imagine the extreme case of +# only one visible keypoint versus N: in the case of N, each one +# contributes 1/N to the gradient compared to the single keypoint +# determining the gradient direction). Instead, we can normalize the +# loss by the total number of keypoints, if it were the case that all +# keypoints were visible in a full minibatch. (Returning to the example, +# this means that the one visible keypoint contributes as much as each +# of the N keypoints.) +_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True +# Multi-task loss weight to use for keypoints +# Recommended values: +# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True +# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False +_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0 +# Type of pooling operation applied to the incoming feature map for each RoI +_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2" + +# ---------------------------------------------------------------------------- # +# Semantic Segmenation Head +# ---------------------------------------------------------------------------- # +_C.MODEL.SEM_SEG_HEAD = CN() +_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead" +_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"] +# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for +# the correposnding pixel. +_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255 +# Number of classes in the semantic segmentation head +_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54 +# Number of channels in the 3x3 convs inside semantic-FPN heads. +_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128 +# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride. +_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4 +# Normalization method for the convolution layers. Options: "" (no norm), "GN". +_C.MODEL.SEM_SEG_HEAD.NORM = "GN" +_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0 + +_C.MODEL.PANOPTIC_FPN = CN() +# Scaling of all losses from instance detection / segmentation head. +_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0 + +# options when combining instance & semantic segmentation outputs +_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) +_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5 +_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096 +_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5 + + +# ---------------------------------------------------------------------------- # +# RetinaNet Head +# ---------------------------------------------------------------------------- # +_C.MODEL.RETINANET = CN() + +# This is the number of foreground classes. +_C.MODEL.RETINANET.NUM_CLASSES = 80 + +_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"] + +# Convolutions to use in the cls and bbox tower +# NOTE: this doesn't include the last conv for logits +_C.MODEL.RETINANET.NUM_CONVS = 4 + +# IoU overlap ratio [bg, fg] for labeling anchors. +# Anchors with < bg are labeled negative (0) +# Anchors with >= bg and < fg are ignored (-1) +# Anchors with >= fg are labeled positive (1) +_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5] +_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1] + +# Prior prob for rare case (i.e. foreground) at the beginning of training. +# This is used to set the bias for the logits layer of the classifier subnet. +# This improves training stability in the case of heavy class imbalance. +_C.MODEL.RETINANET.PRIOR_PROB = 0.01 + +# Inference cls score threshold, only anchors with score > INFERENCE_TH are +# considered for inference (to improve speed) +_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05 +_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000 +_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5 + +# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets +_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0) + +# Loss parameters +_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0 +_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25 +_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1 + + +# ---------------------------------------------------------------------------- # +# ResNe[X]t options (ResNets = {ResNet, ResNeXt} +# Note that parts of a resnet may be used for both the backbone and the head +# These options apply to both +# ---------------------------------------------------------------------------- # +_C.MODEL.RESNETS = CN() + +_C.MODEL.RESNETS.DEPTH = 50 +_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone + +# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt +_C.MODEL.RESNETS.NUM_GROUPS = 1 + +# Options: FrozenBN, GN, "SyncBN", "BN" +_C.MODEL.RESNETS.NORM = "FrozenBN" + +# Baseline width of each group. +# Scaling this parameters will scale the width of all bottleneck layers. +_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 + +# Place the stride 2 conv on the 1x1 filter +# Use True only for the original MSRA ResNet; use False for C2 and Torch models +_C.MODEL.RESNETS.STRIDE_IN_1X1 = True + +# Apply dilation in stage "res5" +_C.MODEL.RESNETS.RES5_DILATION = 1 + +# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet +_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 +_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 + +# Apply Deformable Convolution in stages +# Specify if apply deform_conv on Res2, Res3, Res4, Res5 +_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False] +# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168); +# Use False for DeformableV1. +_C.MODEL.RESNETS.DEFORM_MODULATED = False +# Number of groups in deformable conv. +_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1 + + +# ---------------------------------------------------------------------------- # +# Solver +# ---------------------------------------------------------------------------- # +_C.SOLVER = CN() + +# See detectron2/solver/build.py for LR scheduler options +_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR" + +_C.SOLVER.MAX_ITER = 40000 + +_C.SOLVER.BASE_LR = 0.001 + +_C.SOLVER.MOMENTUM = 0.9 + +_C.SOLVER.WEIGHT_DECAY = 0.0001 +# The weight decay that's applied to parameters of normalization layers +# (typically the affine transformation) +_C.SOLVER.WEIGHT_DECAY_NORM = 0.0 + +_C.SOLVER.GAMMA = 0.1 +_C.SOLVER.STEPS = (30000,) + +_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000 +_C.SOLVER.WARMUP_ITERS = 1000 +_C.SOLVER.WARMUP_METHOD = "linear" + +_C.SOLVER.CHECKPOINT_PERIOD = 5000 + +# Number of images per batch across all machines. +# If we have 16 GPUs and IMS_PER_BATCH = 32, +# each GPU will see 2 images per batch. +_C.SOLVER.IMS_PER_BATCH = 16 + +# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for +# biases. This is not useful (at least for recent models). You should avoid +# changing these and they exist only to reproduce Detectron v1 training if +# desired. +_C.SOLVER.BIAS_LR_FACTOR = 1.0 +_C.SOLVER.WEIGHT_DECAY_BIAS = _C.SOLVER.WEIGHT_DECAY + +# ---------------------------------------------------------------------------- # +# Specific test options +# ---------------------------------------------------------------------------- # +_C.TEST = CN() +# For end-to-end tests to verify the expected accuracy. +# Each item is [task, metric, value, tolerance] +# e.g.: [['bbox', 'AP', 38.5, 0.2]] +_C.TEST.EXPECTED_RESULTS = [] +# The period (in terms of steps) to evaluate the model during training. +# Set to 0 to disable. +_C.TEST.EVAL_PERIOD = 0 +# The sigmas used to calculate keypoint OKS. +# When empty it will use the defaults in COCO. +# Otherwise it should have the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. +_C.TEST.KEYPOINT_OKS_SIGMAS = [] +# Maximum number of detections to return per image during inference (100 is +# based on the limit established for the COCO dataset). +_C.TEST.DETECTIONS_PER_IMAGE = 100 + +_C.TEST.AUG = CN({"ENABLED": False}) +_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200) +_C.TEST.AUG.MAX_SIZE = 4000 +_C.TEST.AUG.FLIP = True + +_C.TEST.PRECISE_BN = CN({"ENABLED": False}) +_C.TEST.PRECISE_BN.NUM_ITER = 200 + +# ---------------------------------------------------------------------------- # +# Misc options +# ---------------------------------------------------------------------------- # +# Directory where output files are written +_C.OUTPUT_DIR = "./output" +# Set seed to negative to fully randomize everything. +# Set seed to positive to use a fixed seed. Note that a fixed seed does not +# guarantee fully deterministic behavior. +_C.SEED = -1 +# Benchmark different cudnn algorithms. It has large overhead for about 10k +# iterations. It usually hurts total time, but can benefit for certain models. +_C.CUDNN_BENCHMARK = False + +# global config is for quick hack purposes. +# You can set them in command line or config files, +# and access it with: +# +# from detectron2.config import global_cfg +# print(global_cfg.HACK) +# +# Do not commit any configs into it. +_C.GLOBAL = CN() +_C.GLOBAL.HACK = 1.0 diff --git a/detectron2/data/__init__.py b/detectron2/data/__init__.py new file mode 100644 index 0000000000..b04d1dc275 --- /dev/null +++ b/detectron2/data/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from . import transforms # isort:skip + +from .build import ( + build_detection_test_loader, + build_detection_train_loader, + get_detection_dataset_dicts, + load_proposals_into_dataset, + print_instances_class_histogram, +) +from .catalog import DatasetCatalog, MetadataCatalog +from .common import DatasetFromList, MapDataset +from .dataset_mapper import DatasetMapper + +# ensure the builtin datasets are registered +from . import datasets, samplers # isort:skip + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/detectron2/data/build.py b/detectron2/data/build.py new file mode 100644 index 0000000000..730d26093a --- /dev/null +++ b/detectron2/data/build.py @@ -0,0 +1,417 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import bisect +import copy +import itertools +import logging +import numpy as np +import pickle +import torch.utils.data +from fvcore.common.file_io import PathManager +from tabulate import tabulate +from termcolor import colored + +from detectron2.structures import BoxMode +from detectron2.utils.comm import get_world_size +from detectron2.utils.env import seed_all_rng +from detectron2.utils.logger import log_first_n + +from . import samplers +from .catalog import DatasetCatalog, MetadataCatalog +from .common import DatasetFromList, MapDataset +from .dataset_mapper import DatasetMapper +from .detection_utils import check_metadata_consistency + +""" +This file contains the default logic to build a dataloader for training or testing. +""" + +__all__ = [ + "build_detection_train_loader", + "build_detection_test_loader", + "get_detection_dataset_dicts", + "load_proposals_into_dataset", + "print_instances_class_histogram", +] + + +def filter_images_with_only_crowd_annotations(dataset_dicts): + """ + Filter out images with none annotations or only crowd annotations + (i.e., images without non-crowd annotations). + A common training-time preprocessing on COCO dataset. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + + Returns: + list[dict]: the same format, but filtered. + """ + num_before = len(dataset_dicts) + + def valid(anns): + for ann in anns: + if ann.get("iscrowd", 0) == 0: + return True + return False + + dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] + num_after = len(dataset_dicts) + logger = logging.getLogger(__name__) + logger.info( + "Removed {} images with no usable annotations. {} images left.".format( + num_before - num_after, num_after + ) + ) + return dataset_dicts + + +def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image): + """ + Filter out images with too few number of keypoints. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + + Returns: + list[dict]: the same format as dataset_dicts, but filtered. + """ + num_before = len(dataset_dicts) + + def visible_keypoints_in_image(dic): + # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility + annotations = dic["annotations"] + return sum( + (np.array(ann["keypoints"][2::3]) > 0).sum() + for ann in annotations + if "keypoints" in ann + ) + + dataset_dicts = [ + x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image + ] + num_after = len(dataset_dicts) + logger = logging.getLogger(__name__) + logger.info( + "Removed {} images with fewer than {} keypoints.".format( + num_before - num_after, min_keypoints_per_image + ) + ) + return dataset_dicts + + +def load_proposals_into_dataset(dataset_dicts, proposal_file): + """ + Load precomputed object proposals into the dataset. + + Args: + dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. + proposal_file (str): file path of pre-computed proposals, in pkl format. + + Returns: + list[dict]: the same format as dataset_dicts, but added proposal field. + """ + logger = logging.getLogger(__name__) + logger.info("Loading proposals from: {}".format(proposal_file)) + + with PathManager.open(proposal_file, "rb") as f: + proposals = pickle.load(f, encoding="latin1") + + # Rename the key names in D1 proposal files + rename_keys = {"indexes": "ids", "scores": "objectness_logits"} + for key in rename_keys: + if key in proposals: + proposals[rename_keys[key]] = proposals.pop(key) + + # Remove proposals whose ids are not in dataset + img_ids = set({entry["image_id"] for entry in dataset_dicts}) + keep = [i for i, id in enumerate(proposals["ids"]) if id in img_ids] + # Sort proposals by ids following the image order in dataset + keep = sorted(keep) + for key in ["boxes", "ids", "objectness_logits"]: + proposals[key] = [proposals[key][i] for i in keep] + # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS' + bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS + + for i, record in enumerate(dataset_dicts): + # Sanity check that these proposals are for the correct image id + assert record["image_id"] == proposals["ids"][i] + + boxes = proposals["boxes"][i] + objectness_logits = proposals["objectness_logits"][i] + # Sort the proposals in descending order of the scores + inds = objectness_logits.argsort()[::-1] + record["proposal_boxes"] = boxes[inds] + record["proposal_objectness_logits"] = objectness_logits[inds] + record["proposal_bbox_mode"] = bbox_mode + + return dataset_dicts + + +def _quantize(x, bin_edges): + bin_edges = copy.copy(bin_edges) + bin_edges = sorted(bin_edges) + quantized = list(map(lambda y: bisect.bisect_right(bin_edges, y), x)) + return quantized + + +def print_instances_class_histogram(dataset_dicts, class_names): + """ + Args: + dataset_dicts (list[dict]): list of dataset dicts. + class_names (list[str]): list of class names (zero-indexed). + """ + num_classes = len(class_names) + hist_bins = np.arange(num_classes + 1) + histogram = np.zeros((num_classes,), dtype=np.int) + for entry in dataset_dicts: + annos = entry["annotations"] + classes = [x["category_id"] for x in annos if not x.get("iscrowd", 0)] + histogram += np.histogram(classes, bins=hist_bins)[0] + + N_COLS = min(6, len(class_names) * 2) + + def short_name(x): + # make long class names shorter. useful for lvis + if len(x) > 13: + return x[:11] + ".." + return x + + data = list( + itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)]) + ) + total_num_instances = sum(data[1::2]) + data.extend([None] * (N_COLS - (len(data) % N_COLS))) + if num_classes > 1: + data.extend(["total", total_num_instances]) + data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + data, + headers=["category", "#instances"] * (N_COLS // 2), + tablefmt="pipe", + numalign="left", + stralign="center", + ) + log_first_n( + logging.INFO, + "Distribution of training instances among all {} categories:\n".format(num_classes) + + colored(table, "cyan"), + key="message", + ) + + +def build_batch_data_sampler( + sampler, images_per_batch, group_bin_edges=None, grouping_features=None +): + """ + Return a dataset index sampler that batches dataset indices possibly with + grouping to improve training efficiency. + + Args: + sampler (torch.utils.data.sampler.Sampler): any subclass of + :class:`torch.utils.data.sampler.Sampler`. + images_per_batch (int): the batch size. Note that the sampler may return + batches that have between 1 and images_per_batch (inclusive) elements + because the underlying index set (and grouping partitions, if grouping + is used) may not be divisible by images_per_batch. + group_bin_edges (None, list[number], tuple[number]): If None, then grouping + is disabled. If a list or tuple is given, the values are used as bin + edges for defining len(group_bin_edges) + 1 groups. When batches are + sampled, only elements from the same group are returned together. + grouping_features (None, list[number], tuple[number]): If None, then grouping + is disabled. If a list or tuple is given, it must specify for each index + in the underlying dataset the value to be used for placing that dataset + index into one of the grouping bins. + + Returns: + A BatchSampler or subclass of BatchSampler. + """ + if group_bin_edges and grouping_features: + assert isinstance(group_bin_edges, (list, tuple)) + assert isinstance(grouping_features, (list, tuple)) + group_ids = _quantize(grouping_features, group_bin_edges) + batch_sampler = samplers.GroupedBatchSampler(sampler, group_ids, images_per_batch) + else: + batch_sampler = torch.utils.data.sampler.BatchSampler( + sampler, images_per_batch, drop_last=True + ) # drop last so the batch always have the same size + # NOTE when we add batch inference support, make sure not to use this. + return batch_sampler + + +def get_detection_dataset_dicts( + dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None +): + """ + Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation. + + Args: + dataset_names (list[str]): a list of dataset names + filter_empty (bool): whether to filter out images without instance annotations + min_keypoints (int): filter out images with fewer keypoints than + `min_keypoints`. Set to 0 to do nothing. + proposal_files (list[str]): if given, a list of object proposal files + that match each dataset in `dataset_names`. + """ + assert len(dataset_names) + dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names] + + if proposal_files is not None: + assert len(dataset_names) == len(proposal_files) + # load precomputed proposals from proposal files + dataset_dicts = [ + load_proposals_into_dataset(dataset_i_dicts, proposal_file) + for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files) + ] + + dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) + + has_instances = "annotations" in dataset_dicts[0] + # Keep images without instance-level GT if the dataset has semantic labels. + if filter_empty and has_instances and "sem_seg_file_name" not in dataset_dicts[0]: + dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) + + if min_keypoints > 0 and has_instances: + dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) + + if has_instances: + try: + class_names = MetadataCatalog.get(dataset_names[0]).thing_classes + check_metadata_consistency("thing_classes", dataset_names) + print_instances_class_histogram(dataset_dicts, class_names) + except AttributeError: # class names are not available for this dataset + pass + return dataset_dicts + + +def build_detection_train_loader(cfg, mapper=None): + """ + A data loader is created by the following steps: + + 1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts. + 2. Start workers to work on the dicts. Each worker will: + * Map each metadata dict into another format to be consumed by the model. + * Batch them by simply putting dicts into a list. + The batched ``list[mapped_dict]`` is what this dataloader will return. + + Args: + cfg (CfgNode): the config + mapper (callable): a callable which takes a sample (dict) from dataset and + returns the format to be consumed by the model. + By default it will be `DatasetMapper(cfg, True)`. + + Returns: + a torch DataLoader object + """ + num_workers = get_world_size() + images_per_batch = cfg.SOLVER.IMS_PER_BATCH + assert ( + images_per_batch % num_workers == 0 + ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format( + images_per_batch, num_workers + ) + assert ( + images_per_batch >= num_workers + ), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format( + images_per_batch, num_workers + ) + images_per_worker = images_per_batch // num_workers + + dataset_dicts = get_detection_dataset_dicts( + cfg.DATASETS.TRAIN, + filter_empty=True, + min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE + if cfg.MODEL.KEYPOINT_ON + else 0, + proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, + ) + dataset = DatasetFromList(dataset_dicts, copy=False) + + # Bin edges for batching images with similar aspect ratios. If ASPECT_RATIO_GROUPING + # is enabled, we define two bins with an edge at height / width = 1. + group_bin_edges = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else [] + aspect_ratios = [float(img["height"]) / float(img["width"]) for img in dataset] + + if mapper is None: + mapper = DatasetMapper(cfg, True) + dataset = MapDataset(dataset, mapper) + + sampler_name = cfg.DATALOADER.SAMPLER_TRAIN + logger = logging.getLogger(__name__) + logger.info("Using training sampler {}".format(sampler_name)) + if sampler_name == "TrainingSampler": + sampler = samplers.TrainingSampler(len(dataset)) + elif sampler_name == "RepeatFactorTrainingSampler": + sampler = samplers.RepeatFactorTrainingSampler( + dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD + ) + else: + raise ValueError("Unknown training sampler: {}".format(sampler_name)) + batch_sampler = build_batch_data_sampler( + sampler, images_per_worker, group_bin_edges, aspect_ratios + ) + + data_loader = torch.utils.data.DataLoader( + dataset, + num_workers=cfg.DATALOADER.NUM_WORKERS, + batch_sampler=batch_sampler, + collate_fn=trivial_batch_collator, + worker_init_fn=worker_init_reset_seed, + ) + return data_loader + + +def build_detection_test_loader(cfg, dataset_name, mapper=None): + """ + Similar to `build_detection_train_loader`. + But this function uses the given `dataset_name` argument (instead of the names in cfg), + and uses batch size 1. + + Args: + cfg: a detectron2 CfgNode + dataset_name (str): a name of the dataset that's available in the DatasetCatalog + mapper (callable): a callable which takes a sample (dict) from dataset + and returns the format to be consumed by the model. + By default it will be `DatasetMapper(cfg, False)`. + + Returns: + DataLoader: a torch DataLoader, that loads the given detection + dataset, with test-time transformation and batching. + """ + dataset_dicts = get_detection_dataset_dicts( + [dataset_name], + filter_empty=False, + proposal_files=[ + cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)] + ] + if cfg.MODEL.LOAD_PROPOSALS + else None, + ) + + dataset = DatasetFromList(dataset_dicts) + if mapper is None: + mapper = DatasetMapper(cfg, False) + dataset = MapDataset(dataset, mapper) + + sampler = samplers.InferenceSampler(len(dataset)) + # Always use 1 image per worker during inference since this is the + # standard when reporting inference time in papers. + batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False) + + data_loader = torch.utils.data.DataLoader( + dataset, + num_workers=cfg.DATALOADER.NUM_WORKERS, + batch_sampler=batch_sampler, + collate_fn=trivial_batch_collator, + ) + return data_loader + + +def trivial_batch_collator(batch): + """ + A batch collator that does nothing. + """ + return batch + + +def worker_init_reset_seed(worker_id): + seed_all_rng(np.random.randint(2 ** 31) + worker_id) diff --git a/detectron2/data/catalog.py b/detectron2/data/catalog.py new file mode 100644 index 0000000000..9019d79784 --- /dev/null +++ b/detectron2/data/catalog.py @@ -0,0 +1,207 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import logging +import types +from typing import List + +from detectron2.utils.logger import log_first_n + +__all__ = ["DatasetCatalog", "MetadataCatalog"] + + +class DatasetCatalog(object): + """ + A catalog that stores information about the datasets and how to obtain them. + + It contains a mapping from strings + (which are names that identify a dataset, e.g. "coco_2014_train") + to a function which parses the dataset and returns the samples in the + format of `list[dict]`. + + The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details) + if used with the data loader functionatilities in `data/build.py,data/detection_transform.py`. + + The purpose of having this catalog is to make it easy to choose + different datasets, by just using the strings in the config. + """ + + _REGISTERED = {} + + @staticmethod + def register(name, func): + """ + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + func (callable): a callable which takes no arguments and returns a list of dicts. + """ + DatasetCatalog._REGISTERED[name] = func + + @staticmethod + def get(name): + """ + Call the registered function and return its results. + + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + + Returns: + list[dict]: dataset annotations.0 + """ + try: + f = DatasetCatalog._REGISTERED[name] + except KeyError: + raise KeyError( + "Dataset '{}' is not registered! Available datasets are: {}".format( + name, ", ".join(DatasetCatalog._REGISTERED.keys()) + ) + ) + return f() + + @staticmethod + def list() -> List[str]: + """ + List all registered datasets. + + Returns: + list[str] + """ + return list(DatasetCatalog._REGISTERED.keys()) + + @staticmethod + def clear(): + """ + Remove all registered dataset. + """ + DatasetCatalog._REGISTERED.clear() + + +class Metadata(types.SimpleNamespace): + """ + A class that supports simple attribute setter/getter. + It is intended for storing metadata of a dataset and make it accessible globally. + + Examples: + + .. code-block:: python + + # somewhere when you load the data: + MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"] + + # somewhere when you print statistics or visualize: + classes = MetadataCatalog.get("mydataset").thing_classes + """ + + # the name of the dataset + # set default to N/A so that `self.name` in the errors will not trigger getattr again + name: str = "N/A" + + _RENAMED = { + "class_names": "thing_classes", + "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id", + "stuff_class_names": "stuff_classes", + } + + def __getattr__(self, key): + if key in self._RENAMED: + log_first_n( + logging.WARNING, + "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), + n=10, + ) + return getattr(self, self._RENAMED[key]) + + raise AttributeError( + "Attribute '{}' does not exist in the metadata of '{}'. Available keys are {}.".format( + key, self.name, str(self.__dict__.keys()) + ) + ) + + def __setattr__(self, key, val): + if key in self._RENAMED: + log_first_n( + logging.WARNING, + "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), + n=10, + ) + setattr(self, self._RENAMED[key], val) + + # Ensure that metadata of the same name stays consistent + try: + oldval = getattr(self, key) + assert oldval == val, ( + "Attribute '{}' in the metadata of '{}' cannot be set " + "to a different value!\n{} != {}".format(key, self.name, oldval, val) + ) + except AttributeError: + super().__setattr__(key, val) + + def as_dict(self): + """ + Returns all the metadata as a dict. + Note that modifications to the returned dict will not reflect on the Metadata object. + """ + return copy.copy(self.__dict__) + + def set(self, **kwargs): + """ + Set multiple metadata with kwargs. + """ + for k, v in kwargs.items(): + setattr(self, k, v) + return self + + def get(self, key, default=None): + """ + Access an attribute and return its value if exists. + Otherwise return default. + """ + try: + return getattr(self, key) + except AttributeError: + return default + + +class MetadataCatalog: + """ + MetadataCatalog provides access to "Metadata" of a given dataset. + + The metadata associated with a certain name is a singleton: once created, + the metadata will stay alive and will be returned by future calls to `get(name)`. + + It's like global variables, so don't abuse it. + It's meant for storing knowledge that's constant and shared across the execution + of the program, e.g.: the class names in COCO. + """ + + _NAME_TO_META = {} + + @staticmethod + def get(name): + """ + Args: + name (str): name of a dataset (e.g. coco_2014_train). + + Returns: + Metadata: The :class:`Metadata` instance associated with this name, + or create an empty one if none is available. + """ + assert len(name) + if name in MetadataCatalog._NAME_TO_META: + ret = MetadataCatalog._NAME_TO_META[name] + # TODO this is for the BC breaking change in D15247032. + # Remove this in the future. + if hasattr(ret, "dataset_name"): + logger = logging.getLogger() + logger.warning( + """ +The 'dataset_name' key in metadata is no longer used for +sharing metadata among splits after D15247032! Add +metadata to each split (now called dataset) separately! + """ + ) + parent_meta = MetadataCatalog.get(ret.dataset_name).as_dict() + ret.set(**parent_meta) + return ret + else: + m = MetadataCatalog._NAME_TO_META[name] = Metadata(name=name) + return m diff --git a/detectron2/data/common.py b/detectron2/data/common.py new file mode 100644 index 0000000000..6512c3d04a --- /dev/null +++ b/detectron2/data/common.py @@ -0,0 +1,81 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import logging +import random +import torch.utils.data as data + +from detectron2.utils.serialize import PicklableWrapper + +__all__ = ["MapDataset", "DatasetFromList"] + + +class MapDataset(data.Dataset): + """ + Map a function over the elements in a dataset. + + Args: + dataset: a dataset where map function is applied. + map_func: a callable which maps the element in dataset. map_func is + responsible for error handling, when error happens, it needs to + return None so the MapDataset will randomly use other + elements from the dataset. + """ + + def __init__(self, dataset, map_func): + self._dataset = dataset + self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work + + self._rng = random.Random(42) + self._fallback_candidates = set(range(len(dataset))) + + def __len__(self): + return len(self._dataset) + + def __getitem__(self, idx): + retry_count = 0 + cur_idx = int(idx) + + while True: + data = self._map_func(self._dataset[cur_idx]) + if data is not None: + self._fallback_candidates.add(cur_idx) + return data + + # _map_func fails for this idx, use a random new index from the pool + retry_count += 1 + self._fallback_candidates.discard(cur_idx) + cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0] + + if retry_count >= 3: + logger = logging.getLogger(__name__) + logger.warning( + "Failed to apply `_map_func` for idx: {}, retry count: {}".format( + idx, retry_count + ) + ) + + +class DatasetFromList(data.Dataset): + """ + Wrap a list to a torch Dataset. It produces elements of the list as data. + """ + + def __init__(self, lst: list, copy: bool = True): + """ + Args: + lst (list): a list which contains elements to produce. + copy (bool): whether to deepcopy the element when producing it, + so that the result can be modified in place without affecting the + source in the list. + """ + self._lst = lst + self._copy = copy + + def __len__(self): + return len(self._lst) + + def __getitem__(self, idx): + if self._copy: + return copy.deepcopy(self._lst[idx]) + else: + return self._lst[idx] diff --git a/detectron2/data/dataset_mapper.py b/detectron2/data/dataset_mapper.py new file mode 100644 index 0000000000..0ca4dee284 --- /dev/null +++ b/detectron2/data/dataset_mapper.py @@ -0,0 +1,144 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import numpy as np +import torch +from fvcore.common.file_io import PathManager +from PIL import Image + +from . import detection_utils as utils +from . import transforms as T + +""" +This file contains the default mapping that's applied to "dataset dicts". +""" + +__all__ = ["DatasetMapper"] + + +class DatasetMapper: + """ + A callable which takes a dataset dict in Detectron2 Dataset format, + and map it into a format used by the model. + + This is the default callable to be used to map your dataset dict into training data. + You may need to follow it to implement your own one for customized logic. + + The callable currently does the following: + 1. Read the image from "file_name" + 2. Applies cropping/geometric transforms to the image and annotations + 3. Prepare data and annotations to Tensor and :class:`Instances` + """ + + def __init__(self, cfg, is_train=True): + self.tfm_gens = utils.build_transform_gen(cfg, is_train) + + if cfg.INPUT.CROP.ENABLED and is_train: + self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE) + else: + self.crop_gen = None + + # fmt: off + self.img_format = cfg.INPUT.FORMAT + self.mask_on = cfg.MODEL.MASK_ON + self.mask_format = cfg.INPUT.MASK_FORMAT + self.keypoint_on = cfg.MODEL.KEYPOINT_ON + self.load_proposals = cfg.MODEL.LOAD_PROPOSALS + # fmt: on + if self.keypoint_on and is_train: + # Flip only makes sense in training + self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) + else: + self.keypoint_hflip_indices = None + + if self.load_proposals: + self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE + self.proposal_topk = ( + cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN + if is_train + else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST + ) + self.is_train = is_train + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + # USER: Write your own image loading if it's not from a file + image = utils.read_image(dataset_dict["file_name"], format=self.img_format) + utils.check_image_size(dataset_dict, image) + + if "annotations" not in dataset_dict: + image, transforms = T.apply_transform_gens( + ([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image + ) + else: + # Crop around an instance if there are instances in the image. + # USER: Remove if you don't use cropping + if self.crop_gen: + crop_tfm = utils.gen_crop_transform_with_instance( + self.crop_gen.get_crop_size(image.shape[:2]), + image.shape[:2], + np.random.choice(dataset_dict["annotations"]), + ) + image = crop_tfm.apply_image(image) + image, transforms = T.apply_transform_gens(self.tfm_gens, image) + if self.crop_gen: + transforms = crop_tfm + transforms + + image_shape = image.shape[:2] # h, w + + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) + # Can use uint8 if it turns out to be slow some day + + # USER: Remove if you don't use pre-computed proposals. + if self.load_proposals: + utils.transform_proposals( + dataset_dict, image_shape, transforms, self.min_box_side_len, self.proposal_topk + ) + + if not self.is_train: + dataset_dict.pop("annotations", None) + dataset_dict.pop("sem_seg_file_name", None) + return dataset_dict + + if "annotations" in dataset_dict: + # USER: Modify this if you want to keep them for some reason. + for anno in dataset_dict["annotations"]: + if not self.mask_on: + anno.pop("segmentation", None) + if not self.keypoint_on: + anno.pop("keypoints", None) + + # USER: Implement additional transformations if you have other types of data + annos = [ + utils.transform_instance_annotations( + obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices + ) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + instances = utils.annotations_to_instances( + annos, image_shape, mask_format=self.mask_format + ) + # Create a tight bounding box from masks, useful when image is cropped + if self.crop_gen and instances.has("gt_masks"): + instances.gt_boxes = instances.gt_masks.get_bounding_boxes() + dataset_dict["instances"] = utils.filter_empty_instances(instances) + + # USER: Remove if you don't do semantic/panoptic segmentation. + if "sem_seg_file_name" in dataset_dict: + with PathManager.open(dataset_dict.pop("sem_seg_file_name"), "rb") as f: + sem_seg_gt = Image.open(f) + sem_seg_gt = np.asarray(sem_seg_gt, dtype="uint8") + sem_seg_gt = transforms.apply_segmentation(sem_seg_gt) + sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long")) + dataset_dict["sem_seg"] = sem_seg_gt + return dataset_dict diff --git a/detectron2/data/datasets/README.md b/detectron2/data/datasets/README.md new file mode 100644 index 0000000000..17730263a6 --- /dev/null +++ b/detectron2/data/datasets/README.md @@ -0,0 +1,9 @@ + + +### Common Datasets + +The dataset implemented here do not need to load the data into the final format. +It should provide the minimal data strcture needed to use the dataset, so it can be very efficient. + +For example, for an image dataset, just provide the file names and labels, but don't read the images. +Let the downstream decide how to read. diff --git a/detectron2/data/datasets/__init__.py b/detectron2/data/datasets/__init__.py new file mode 100644 index 0000000000..a2bfbea6bc --- /dev/null +++ b/detectron2/data/datasets/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .cityscapes import load_cityscapes_instances +from .coco import load_coco_json, load_sem_seg +from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta +from .register_coco import register_coco_instances, register_coco_panoptic_separated +from . import builtin # ensure the builtin datasets are registered + + +__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] diff --git a/detectron2/data/datasets/builtin.py b/detectron2/data/datasets/builtin.py new file mode 100644 index 0000000000..9969e878ad --- /dev/null +++ b/detectron2/data/datasets/builtin.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + + +""" +This file registers pre-defined datasets at hard-coded paths, and their metadata. + +We hard-code metadata for common datasets. This will enable: +1. Consistency check when loading the datasets +2. Use models on these standard datasets directly and run demos, + without having to download the dataset annotations + +We hard-code some paths to the dataset that's assumed to +exist in "./datasets/". + +Users SHOULD NOT use this file to create new dataset / metadata for new dataset. +To add new dataset, refer to the tutorial "docs/DATASETS.md". +""" + +import os + +from detectron2.data import MetadataCatalog, DatasetCatalog +from .register_coco import register_coco_instances, register_coco_panoptic_separated +from .lvis import register_lvis_instances, get_lvis_instances_meta +from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic +from .pascal_voc import register_pascal_voc +from .builtin_meta import _get_builtin_metadata + + +# ==== Predefined datasets and splits for COCO ========== + +_PREDEFINED_SPLITS_COCO = {} +_PREDEFINED_SPLITS_COCO["coco"] = { + "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"), + "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"), + "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"), + "coco_2014_minival_100": ("coco/val2014", "coco/annotations/instances_minival2014_100.json"), + "coco_2014_valminusminival": ( + "coco/val2014", + "coco/annotations/instances_valminusminival2014.json", + ), + "coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"), + "coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"), + "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"), +} + +_PREDEFINED_SPLITS_COCO["coco_person"] = { + "keypoints_coco_2014_train": ( + "coco/train2014", + "coco/annotations/person_keypoints_train2014.json", + ), + "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"), + "keypoints_coco_2014_minival": ( + "coco/val2014", + "coco/annotations/person_keypoints_minival2014.json", + ), + "keypoints_coco_2014_valminusminival": ( + "coco/val2014", + "coco/annotations/person_keypoints_valminusminival2014.json", + ), + "keypoints_coco_2014_minival_100": ( + "coco/val2014", + "coco/annotations/person_keypoints_minival2014_100.json", + ), + "keypoints_coco_2017_train": ( + "coco/train2017", + "coco/annotations/person_keypoints_train2017.json", + ), + "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"), + "keypoints_coco_2017_val_100": ( + "coco/val2017", + "coco/annotations/person_keypoints_val2017_100.json", + ), +} + + +_PREDEFINED_SPLITS_COCO_PANOPTIC = { + "coco_2017_train_panoptic": ( + # This is the original panoptic annotation directory + "coco/panoptic_train2017", + "coco/annotations/panoptic_train2017.json", + # This directory contains semantic annotations that are + # converted from panoptic annotations. + # It is used by PanopticFPN. + # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py + # to create these directories. + "coco/panoptic_stuff_train2017", + ), + "coco_2017_val_panoptic": ( + "coco/panoptic_val2017", + "coco/annotations/panoptic_val2017.json", + "coco/panoptic_stuff_val2017", + ), + "coco_2017_val_100_panoptic": ( + "coco/panoptic_val2017_100", + "coco/annotations/panoptic_val2017_100.json", + "coco/panoptic_stuff_val2017_100", + ), +} + + +def register_all_coco(root="datasets"): + for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items(): + for key, (image_root, json_file) in splits_per_dataset.items(): + # Assume pre-defined datasets live in `./datasets`. + register_coco_instances( + key, + _get_builtin_metadata(dataset_name), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + for ( + prefix, + (panoptic_root, panoptic_json, semantic_root), + ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items(): + prefix_instances = prefix[: -len("_panoptic")] + instances_meta = MetadataCatalog.get(prefix_instances) + image_root, instances_json = instances_meta.image_root, instances_meta.json_file + register_coco_panoptic_separated( + prefix, + _get_builtin_metadata("coco_panoptic_separated"), + image_root, + os.path.join(root, panoptic_root), + os.path.join(root, panoptic_json), + os.path.join(root, semantic_root), + instances_json, + ) + + +# ==== Predefined datasets and splits for LVIS ========== + + +_PREDEFINED_SPLITS_LVIS = { + "lvis_v0.5": { + "lvis_v0.5_train": ("coco/train2017", "lvis/lvis_v0.5_train.json"), + "lvis_v0.5_val": ("coco/val2017", "lvis/lvis_v0.5_val.json"), + "lvis_v0.5_val_rand_100": ("coco/val2017", "lvis/lvis_v0.5_val_rand_100.json"), + "lvis_v0.5_test": ("coco/test2017", "lvis/lvis_v0.5_image_info_test.json"), + } +} + + +def register_all_lvis(root="datasets"): + for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items(): + for key, (image_root, json_file) in splits_per_dataset.items(): + # Assume pre-defined datasets live in `./datasets`. + register_lvis_instances( + key, + get_lvis_instances_meta(dataset_name), + os.path.join(root, json_file) if "://" not in json_file else json_file, + os.path.join(root, image_root), + ) + + +# ==== Predefined splits for raw cityscapes images =========== + + +_RAW_CITYSCAPES_SPLITS = { + "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train", "cityscapes/gtFine/train"), + "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val", "cityscapes/gtFine/val"), + "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test", "cityscapes/gtFine/test"), +} + + +def register_all_cityscapes(root="datasets"): + for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items(): + meta = _get_builtin_metadata("cityscapes") + image_dir = os.path.join(root, image_dir) + gt_dir = os.path.join(root, gt_dir) + + inst_key = key.format(task="instance_seg") + DatasetCatalog.register( + inst_key, + lambda x=image_dir, y=gt_dir: load_cityscapes_instances( + x, y, from_json=True, to_polygons=True + ), + ) + MetadataCatalog.get(inst_key).set( + image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes", **meta + ) + + sem_key = key.format(task="sem_seg") + DatasetCatalog.register( + sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y) + ) + MetadataCatalog.get(sem_key).set( + image_dir=image_dir, gt_dir=gt_dir, evaluator_type="sem_seg", **meta + ) + + +# ==== Predefined splits for PASCAL VOC =========== +def register_all_pascal_voc(root="datasets"): + SPLITS = [ + ("voc_2007_trainval", "VOC2007", "trainval"), + ("voc_2007_train", "VOC2007", "train"), + ("voc_2007_val", "VOC2007", "val"), + ("voc_2007_test", "VOC2007", "test"), + ("voc_2012_trainval", "VOC2012", "trainval"), + ("voc_2012_train", "VOC2012", "train"), + ("voc_2012_val", "VOC2012", "val"), + ] + for name, dirname, split in SPLITS: + year = 2007 if "2007" in name else 2012 + register_pascal_voc(name, os.path.join(root, dirname), split, year) + MetadataCatalog.get(name).evaluator_type = "pascal_voc" + + +# Register them all under "./datasets" +register_all_coco() +register_all_lvis() +register_all_cityscapes() +register_all_pascal_voc() diff --git a/detectron2/data/datasets/builtin_meta.py b/detectron2/data/datasets/builtin_meta.py new file mode 100644 index 0000000000..74c79863a9 --- /dev/null +++ b/detectron2/data/datasets/builtin_meta.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + + +# All coco categories, together with their nice-looking visualization colors +# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json +COCO_CATEGORIES = [ + {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, + {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, + {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, + {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, + {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, + {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, + {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, + {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, + {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, + {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, + {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, + {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, + {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, + {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, + {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, + {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, + {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, + {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, + {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, + {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, + {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, + {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, + {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, + {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, + {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, + {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, + {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, + {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, + {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, + {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, + {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, + {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, + {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, + {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, + {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, + {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, + {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, + {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, + {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, + {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, + {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, + {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, + {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, + {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, + {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, + {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, + {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, + {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, + {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, + {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, + {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, + {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, + {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, + {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, + {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, + {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, + {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, + {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, + {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, + {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, + {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, + {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, + {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, + {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, + {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, + {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, + {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, + {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, + {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, + {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, + {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, + {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, + {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, + {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, + {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, + {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, + {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, + {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, + {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, + {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, + {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"}, + {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"}, + {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"}, + {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"}, + {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"}, + {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"}, + {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"}, + {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"}, + {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"}, + {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"}, + {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"}, + {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"}, + {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"}, + {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"}, + {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"}, + {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"}, + {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"}, + {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"}, + {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"}, + {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"}, + {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"}, + {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"}, + {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"}, + {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"}, + {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"}, + {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"}, + {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"}, + {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"}, + {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"}, + {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"}, + {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"}, + {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"}, + {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"}, + {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"}, + {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"}, + {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"}, + {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"}, + {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"}, + {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"}, + {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"}, + {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"}, + {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"}, + {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"}, + {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"}, + {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"}, + {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"}, + {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"}, + {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"}, + {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"}, + {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"}, + {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"}, + {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"}, + {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"}, +] + +# fmt: off +COCO_PERSON_KEYPOINT_NAMES = ( + "nose", + "left_eye", "right_eye", + "left_ear", "right_ear", + "left_shoulder", "right_shoulder", + "left_elbow", "right_elbow", + "left_wrist", "right_wrist", + "left_hip", "right_hip", + "left_knee", "right_knee", + "left_ankle", "right_ankle", +) +# fmt: on + +# Pairs of keypoints that should be exchanged under horizontal flipping +COCO_PERSON_KEYPOINT_FLIP_MAP = ( + ("left_eye", "right_eye"), + ("left_ear", "right_ear"), + ("left_shoulder", "right_shoulder"), + ("left_elbow", "right_elbow"), + ("left_wrist", "right_wrist"), + ("left_hip", "right_hip"), + ("left_knee", "right_knee"), + ("left_ankle", "right_ankle"), +) + +# rules for pairs of keypoints to draw a line between, and the line color to use. +KEYPOINT_CONNECTION_RULES = [ + # face + ("left_ear", "left_eye", (102, 204, 255)), + ("right_ear", "right_eye", (51, 153, 255)), + ("left_eye", "nose", (102, 0, 204)), + ("nose", "right_eye", (51, 102, 255)), + # upper-body + ("left_shoulder", "right_shoulder", (255, 128, 0)), + ("left_shoulder", "left_elbow", (153, 255, 204)), + ("right_shoulder", "right_elbow", (128, 229, 255)), + ("left_elbow", "left_wrist", (153, 255, 153)), + ("right_elbow", "right_wrist", (102, 255, 224)), + # lower-body + ("left_hip", "right_hip", (255, 102, 0)), + ("left_hip", "left_knee", (255, 255, 77)), + ("right_hip", "right_knee", (153, 255, 204)), + ("left_knee", "left_ankle", (191, 255, 128)), + ("right_knee", "right_ankle", (255, 195, 77)), +] + + +def _get_coco_instances_meta(): + thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1] + thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1] + assert len(thing_ids) == 80, len(thing_ids) + # Mapping from the incontiguous COCO category id to an id in [0, 79] + thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)} + thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1] + ret = { + "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id, + "thing_classes": thing_classes, + "thing_colors": thing_colors, + } + return ret + + +def _get_coco_panoptic_separated_meta(): + """ + Returns metadata for "separated" version of the panoptic segmentation dataset. + """ + stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0] + assert len(stuff_ids) == 53, len(stuff_ids) + + # For semantic segmentation, this mapping maps from contiguous stuff id + # (in [0, 53], used in models) to ids in the dataset (used for processing results) + # The id 0 is mapped to an extra category "thing". + stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)} + # When converting COCO panoptic annotations to semantic annotations + # We label the "thing" category to 0 + stuff_dataset_id_to_contiguous_id[0] = 0 + + # 54 names for COCO stuff categories (including "things") + stuff_classes = ["things"] + [ + k["name"].replace("-other", "").replace("-merged", "") + for k in COCO_CATEGORIES + if k["isthing"] == 0 + ] + + # NOTE: I randomly picked a color for things + stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0] + ret = { + "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id, + "stuff_classes": stuff_classes, + "stuff_colors": stuff_colors, + } + ret.update(_get_coco_instances_meta()) + return ret + + +def _get_builtin_metadata(dataset_name): + if dataset_name == "coco": + return _get_coco_instances_meta() + if dataset_name == "coco_panoptic_separated": + return _get_coco_panoptic_separated_meta() + elif dataset_name == "coco_person": + return { + "thing_classes": ["person"], + "keypoint_names": COCO_PERSON_KEYPOINT_NAMES, + "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP, + "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES, + } + elif dataset_name == "cityscapes": + # fmt: off + CITYSCAPES_THING_CLASSES = [ + "person", "rider", "car", "truck", + "bus", "train", "motorcycle", "bicycle", + ] + CITYSCAPES_STUFF_CLASSES = [ + "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", + "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", + "truck", "bus", "train", "motorcycle", "bicycle", "license plate", + ] + # fmt: on + return { + "thing_classes": CITYSCAPES_THING_CLASSES, + "stuff_classes": CITYSCAPES_STUFF_CLASSES, + } + raise KeyError("No built-in metadata for dataset {}".format(dataset_name)) diff --git a/detectron2/data/datasets/cityscapes.py b/detectron2/data/datasets/cityscapes.py new file mode 100644 index 0000000000..f649dfa613 --- /dev/null +++ b/detectron2/data/datasets/cityscapes.py @@ -0,0 +1,317 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import functools +import glob +import json +import logging +import multiprocessing as mp +import numpy as np +import os +from itertools import chain +import pycocotools.mask as mask_util +from PIL import Image + +from detectron2.structures import BoxMode +from detectron2.utils.logger import setup_logger +from detectron2.utils.comm import get_world_size +from fvcore.common.file_io import PathManager + +try: + import cv2 # noqa +except ImportError: + # OpenCV is an optional dependency at the moment + pass + + +def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". + from_json (bool): whether to read annotations from the raw json file or the png files. + to_polygons (bool): whether to represent the segmentation as polygons + (COCO's format) instead of masks (cityscapes's format). + + Returns: + list[dict]: a list of dicts in "Detectron2 Dataset" format. (See DATASETS.md) + """ + if from_json: + assert to_polygons, ( + "Cityscapes's json annotations are in polygon format. " + "Converting to mask format is not supported now." + ) + files = [] + for image_file in glob.glob(os.path.join(image_dir, "**/*.png")): + suffix = "leftImg8bit.png" + assert image_file.endswith(suffix) + prefix = image_dir + instance_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_instanceIds.png" + assert os.path.isfile(instance_file), instance_file + + label_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_labelIds.png" + assert os.path.isfile(label_file), label_file + + json_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_polygons.json" + files.append((image_file, instance_file, label_file, json_file)) + assert len(files), "No images found in {}".format(image_dir) + + logger = logging.getLogger(__name__) + logger.info("Preprocessing cityscapes annotations ...") + # This is still not fast: all workers will execute duplicate works and will + # take up to 10m on a 8GPU server. + pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4)) + + ret = pool.map( + functools.partial(cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons), + files, + ) + logger.info("Loaded {} images from {}".format(len(ret), image_dir)) + + # Map cityscape ids to contiguous ids + from cityscapesscripts.helpers.labels import labels + + labels = [l for l in labels if l.hasInstances and not l.ignoreInEval] + dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)} + for dict_per_image in ret: + for anno in dict_per_image["annotations"]: + anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]] + return ret + + +def load_cityscapes_semantic(image_dir, gt_dir): + """ + Args: + image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train". + gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train". + + Returns: + list[dict]: a list of dict, each has "file_name" and + "sem_seg_file_name". + """ + ret = [] + for image_file in glob.glob(os.path.join(image_dir, "**/*.png")): + suffix = "leftImg8bit.png" + assert image_file.endswith(suffix) + prefix = image_dir + + label_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_labelTrainIds.png" + assert os.path.isfile( + label_file + ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa + + json_file = gt_dir + image_file[len(prefix) : -len(suffix)] + "gtFine_polygons.json" + + with PathManager.open(json_file, "r") as f: + jsonobj = json.load(f) + ret.append( + { + "file_name": image_file, + "sem_seg_file_name": label_file, + "height": jsonobj["imgHeight"], + "width": jsonobj["imgWidth"], + } + ) + return ret + + +def cityscapes_files_to_dict(files, from_json, to_polygons): + """ + Parse cityscapes annotation files to a dict. + + Args: + files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file) + from_json (bool): whether to read annotations from the raw json file or the png files. + to_polygons (bool): whether to represent the segmentation as polygons + (COCO's format) instead of masks (cityscapes's format). + + Returns: + A dict in Detectron2 Dataset format. + """ + from cityscapesscripts.helpers.labels import id2label, name2label + + image_file, instance_id_file, _, json_file = files + + annos = [] + + if from_json: + from shapely.geometry import MultiPolygon, Polygon + + with PathManager.open(json_file, "r") as f: + jsonobj = json.load(f) + ret = { + "file_name": image_file, + "image_id": os.path.basename(image_file), + "height": jsonobj["imgHeight"], + "width": jsonobj["imgWidth"], + } + + # `polygons_union` contains the union of all valid polygons. + polygons_union = Polygon() + + # CityscapesScripts draw the polygons in sequential order + # and each polygon *overwrites* existing ones. See + # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa + # We use reverse order, and each polygon *avoids* early ones. + # This will resolve the ploygon overlaps in the same way as CityscapesScripts. + for obj in jsonobj["objects"][::-1]: + if "deleted" in obj: # cityscapes data format specific + continue + label_name = obj["label"] + + try: + label = name2label[label_name] + except KeyError: + if label_name.endswith("group"): # crowd area + label = name2label[label_name[: -len("group")]] + else: + raise + if label.id < 0: # cityscapes data format + continue + + # Cityscapes's raw annotations uses integer coordinates + # Therefore +0.5 here + poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5 + # CityscapesScript uses PIL.ImageDraw.polygon to rasterize + # polygons for evaluation. This function operates in integer space + # and draws each pixel whose center falls into the polygon. + # Therefore it draws a polygon which is 0.5 "fatter" in expectation. + # We therefore dilate the input polygon by 0.5 as our input. + poly = Polygon(poly_coord).buffer(0.5, resolution=4) + + if not label.hasInstances or label.ignoreInEval: + # even if we won't store the polygon it still contributes to overlaps resolution + polygons_union = polygons_union.union(poly) + continue + + # Take non-overlapping part of the polygon + poly_wo_overlaps = poly.difference(polygons_union) + if poly_wo_overlaps.is_empty: + continue + polygons_union = polygons_union.union(poly) + + anno = {} + anno["iscrowd"] = label_name.endswith("group") + anno["category_id"] = label.id + + if isinstance(poly_wo_overlaps, Polygon): + poly_list = [poly_wo_overlaps] + elif isinstance(poly_wo_overlaps, MultiPolygon): + poly_list = poly_wo_overlaps.geoms + else: + raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps)) + + poly_coord = [] + for poly_el in poly_list: + # COCO API can work only with exterior boundaries now, hence we store only them. + # TODO: store both exterior and interior boundaries once other parts of the + # codebase support holes in polygons. + poly_coord.append(list(chain(*poly_el.exterior.coords))) + anno["segmentation"] = poly_coord + (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds + + anno["bbox"] = (xmin, ymin, xmax, ymax) + anno["bbox_mode"] = BoxMode.XYXY_ABS + + annos.append(anno) + else: + # See also the official annotation parsing scripts at + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa + with PathManager.open(instance_id_file, "rb") as f: + inst_image = np.asarray(Image.open(f), order="F") + # ids < 24 are stuff labels (filtering them first is about 5% faster) + flattened_ids = np.unique(inst_image[inst_image >= 24]) + + ret = { + "file_name": image_file, + "image_id": os.path.basename(image_file), + "height": inst_image.shape[0], + "width": inst_image.shape[1], + } + + for instance_id in flattened_ids: + # For non-crowd annotations, instance_id // 1000 is the label_id + # Crowd annotations have <1000 instance ids + label_id = instance_id // 1000 if instance_id >= 1000 else instance_id + label = id2label[label_id] + if not label.hasInstances or label.ignoreInEval: + continue + + anno = {} + anno["iscrowd"] = instance_id < 1000 + anno["category_id"] = label.id + + mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F") + + inds = np.nonzero(mask) + ymin, ymax = inds[0].min(), inds[0].max() + xmin, xmax = inds[1].min(), inds[1].max() + anno["bbox"] = (xmin, ymin, xmax, ymax) + if xmax <= xmin or ymax <= ymin: + continue + anno["bbox_mode"] = BoxMode.XYXY_ABS + if to_polygons: + # This conversion comes from D4809743 and D5171122, + # when Mask-RCNN was first developed. + contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[ + -2 + ] + polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3] + # opencv's can produce invalid polygons + if len(polygons) == 0: + continue + anno["segmentation"] = polygons + else: + anno["segmentation"] = mask_util.encode(mask[:, :, None])[0] + annos.append(anno) + ret["annotations"] = annos + return ret + + +if __name__ == "__main__": + """ + Test the cityscapes dataset loader. + + Usage: + python -m detectron2.data.datasets.cityscapes \ + cityscapes/leftImg8bit/train cityscapes/gtFine/train + """ + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("image_dir") + parser.add_argument("gt_dir") + parser.add_argument("--type", choices=["instance", "semantic"], default="instance") + args = parser.parse_args() + from detectron2.data.catalog import Metadata + from detectron2.utils.visualizer import Visualizer + from cityscapesscripts.helpers.labels import labels + + logger = setup_logger(name=__name__) + + dirname = "cityscapes-data-vis" + os.makedirs(dirname, exist_ok=True) + + if args.type == "instance": + dicts = load_cityscapes_instances( + args.image_dir, args.gt_dir, from_json=True, to_polygons=True + ) + logger.info("Done loading {} samples.".format(len(dicts))) + + thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval] + meta = Metadata().set(thing_classes=thing_classes) + + else: + dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir) + logger.info("Done loading {} samples.".format(len(dicts))) + + stuff_names = [k.name for k in labels if k.trainId != 255] + stuff_colors = [k.color for k in labels if k.trainId != 255] + meta = Metadata().set(stuff_names=stuff_names, stuff_colors=stuff_colors) + + for d in dicts: + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + # cv2.imshow("a", vis.get_image()[:, :, ::-1]) + # cv2.waitKey() + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/detectron2/data/datasets/coco.py b/detectron2/data/datasets/coco.py new file mode 100644 index 0000000000..3b17a6f408 --- /dev/null +++ b/detectron2/data/datasets/coco.py @@ -0,0 +1,315 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import io +import logging +import contextlib +import os +from PIL import Image + +from fvcore.common.timer import Timer +from detectron2.structures import BoxMode +from fvcore.common.file_io import PathManager + +from .. import MetadataCatalog, DatasetCatalog + +""" +This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". +""" + + +logger = logging.getLogger(__name__) + +__all__ = ["load_coco_json", "load_sem_seg"] + + +def load_coco_json(json_file, image_root, dataset_name=None): + """ + Load a json file with COCO's instances annotation format. + Currently supports instance detection, instance segmentation, + person keypoints and densepose annotations. + + Args: + json_file (str): full path to the json file in COCO instances annotation format. + image_root (str): the directory where the images in this json file exists. + dataset_name (str): the name of the dataset (e.g., coco_2017_train). + If provided, this function will also put "thing_classes" into + the metadata associated with this dataset. + + Returns: + list[dict]: a list of dicts in "Detectron2 Dataset" format. (See DATASETS.md) + + Notes: + 1. This function does not read the image files. + The results do not have the "image" field. + """ + from pycocotools.coco import COCO + + timer = Timer() + json_file = PathManager.get_local_path(json_file) + with contextlib.redirect_stdout(io.StringIO()): + coco_api = COCO(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + + id_map = None + if dataset_name is not None: + meta = MetadataCatalog.get(dataset_name) + cat_ids = sorted(coco_api.getCatIds()) + cats = coco_api.loadCats(cat_ids) + # The categories in a custom json file may not be sorted. + thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] + meta.thing_classes = thing_classes + + # In COCO, certain category ids are artificially removed, + # and by convention they are always ignored. + # We deal with COCO's id issue and translate + # the category ids to contiguous ids in [0, 80). + + # It works by looking at the "categories" field in the json, therefore + # if users' own json also have incontiguous ids, we'll + # apply this mapping as well but print a warning. + if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): + if "coco" not in dataset_name: + logger.warning( + """ +Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. +""" + ) + id_map = {v: i for i, v in enumerate(cat_ids)} + meta.thing_dataset_id_to_contiguous_id = id_map + + # sort indices for reproducible results + img_ids = sorted(list(coco_api.imgs.keys())) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = coco_api.loadImgs(img_ids) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. Example of anns[0]: + # [{'segmentation': [[192.81, + # 247.09, + # ... + # 219.03, + # 249.06]], + # 'area': 1035.749, + # 'iscrowd': 0, + # 'image_id': 1268, + # 'bbox': [192.81, 224.8, 74.73, 33.43], + # 'category_id': 16, + # 'id': 42986}, + # ...] + anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] + + if "minival" not in json_file: + # The popular valminusminival & minival annotations for COCO2014 contain this bug. + # However the ratio of buggy annotations there is tiny and does not affect accuracy. + # Therefore we explicitly white-list them. + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( + json_file + ) + + imgs_anns = list(zip(imgs, anns)) + + logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) + + dataset_dicts = [] + + # TODO: refactoring candidate, one should not have to alter DB reader + # every time new data type is added + DENSEPOSE_KEYS = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V", "dp_masks"] + + num_instances_without_valid_segmentation = 0 + + for (img_dict, anno_dict_list) in imgs_anns: + record = {} + record["file_name"] = os.path.join(image_root, img_dict["file_name"]) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + image_id = record["image_id"] = img_dict["id"] + + objs = [] + for anno in anno_dict_list: + # Check that the image_id in this annotation is the same as + # the image_id we're looking at. + # This fails only when the data parsing logic or the annotation file is buggy. + + # The original COCO valminusminival2014 & minival2014 annotation files + # actually contains bugs that, together with certain ways of using COCO API, + # can trigger this assertion. + assert anno["image_id"] == image_id + + assert anno.get("ignore", 0) == 0 + + obj = { + field: anno[field] + for field in ["iscrowd", "bbox", "keypoints", "category_id"] + DENSEPOSE_KEYS + if field in anno + } + + segm = anno.get("segmentation", None) + if segm: # either list[list[float]] or dict(RLE) + if not isinstance(segm, dict): + # filter out invalid polygons (< 3 points) + segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + if len(segm) == 0: + num_instances_without_valid_segmentation += 1 + continue # ignore this instance + obj["segmentation"] = segm + + keypts = anno.get("keypoints", None) + if keypts: # list[int] + for idx, v in enumerate(keypts): + if idx % 3 != 2: + # COCO's segmentation coordinates are floating points in [0, H or W], + # but keypoint coordinates are integers in [0, H-1 or W-1] + # Therefore we assume the coordinates are "pixel indices" and + # add 0.5 to convert to floating point coordinates. + keypts[idx] = v + 0.5 + obj["keypoints"] = keypts + + obj["bbox_mode"] = BoxMode.XYWH_ABS + if id_map: + obj["category_id"] = id_map[obj["category_id"]] + objs.append(obj) + record["annotations"] = objs + dataset_dicts.append(record) + + if num_instances_without_valid_segmentation > 0: + logger.warn( + "Filtered out {} instances without valid segmentation. " + "There might be issues in your dataset generation process.".format( + num_instances_without_valid_segmentation + ) + ) + return dataset_dicts + + +# TODO this function is not specific to COCO, except for the "image_id" logic. +def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): + """ + Load semantic segmenation datasets. All files under "gt_root" with "gt_ext" extension are + treated as ground truth annotations and all files under "image_root" with "image_ext" extension + as input images. Ground truth and input images are matched using file paths relative to + "gt_root" and "image_root" respectively without taking into account file extensions. + + Args: + gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation + annotations are stored as images with integer values in pixels that represent + corresponding semantic labels. + image_root (str): the directory where the input images are. + gt_ext (str): file extension for ground truth annotations. + image_ext (str): file extension for input images. + + Returns: + list[dict]: + a list of dicts in detectron2 standard format without instance-level + annotation. + + Notes: + 1. This function does not read the image and ground truth files. + The results do not have the "image" and "sem_seg" fields. + """ + + # We match input images with ground truth based on their raltive filepaths (without file + # extensions) starting from 'image_root' and 'gt_root' respectively. COCO API works with integer + # IDs, hence, we try to convert these paths to int if possible. + def file2id(folder_path, file_path): + # TODO id is not used. + # extract realtive path starting from `folder_path` + image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) + # remove file extension + image_id = os.path.splitext(image_id)[0] + try: + image_id = int(image_id) + except ValueError: + pass + return image_id + + input_files = sorted( + (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), + key=lambda file_path: file2id(image_root, file_path), + ) + gt_files = sorted( + (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), + key=lambda file_path: file2id(gt_root, file_path), + ) + + assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) + + # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images + if len(input_files) != len(gt_files): + logger.warn( + "Directory {} and {} has {} and {} files, respectively.".format( + image_root, gt_root, len(input_files), len(gt_files) + ) + ) + input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] + gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] + intersect = list(set(input_basenames) & set(gt_basenames)) + # sort, otherwise each worker may obtain a list[dict] in different order + intersect = sorted(intersect) + logger.warn("Will use their intersection of {} files.".format(len(intersect))) + input_files = [os.path.join(image_root, f + image_ext) for f in intersect] + gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] + + logger.info( + "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) + ) + + dataset_dicts = [] + for (img_path, gt_path) in zip(input_files, gt_files): + record = {} + record["file_name"] = img_path + record["sem_seg_file_name"] = gt_path + record["image_id"] = file2id(image_root, img_path) + assert record["image_id"] == file2id( + gt_root, gt_path + ), "there is no ground truth for {}".format(img_path) + with PathManager.open(gt_path, "rb") as f: + img = Image.open(f) + w, h = img.size + record["height"] = h + record["width"] = w + dataset_dicts.append(record) + + return dataset_dicts + + +if __name__ == "__main__": + """ + Test the COCO json dataset loader. + + Usage: + python -m detectron2.data.datasets.coco \ + path/to/json path/to/image_root dataset_name + + "dataset_name" can be "coco_2014_minival_100", or other + pre-registered ones + """ + import numpy as np + from detectron2.utils.logger import setup_logger + from detectron2.utils.visualizer import Visualizer + import detectron2.data.datasets # noqa # add pre-defined metadata + import sys + + logger = setup_logger(name=__name__) + assert sys.argv[3] in DatasetCatalog.list() + meta = MetadataCatalog.get(sys.argv[3]) + + dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3]) + logger.info("Done loading {} samples.".format(len(dicts))) + + dirname = "coco-data-vis" + os.makedirs(dirname, exist_ok=True) + for d in dicts: + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/detectron2/data/datasets/lvis.py b/detectron2/data/datasets/lvis.py new file mode 100644 index 0000000000..9fc39b1ead --- /dev/null +++ b/detectron2/data/datasets/lvis.py @@ -0,0 +1,205 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import os + +from fvcore.common.timer import Timer +from detectron2.structures import BoxMode +from fvcore.common.file_io import PathManager +from detectron2.data import DatasetCatalog, MetadataCatalog + +from .lvis_v0_5_categories import LVIS_CATEGORIES + +""" +This file contains functions to parse LVIS-format annotations into dicts in the +"Detectron2 format". +""" + +logger = logging.getLogger(__name__) + +__all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"] + + +def register_lvis_instances(name, metadata, json_file, image_root): + """ + Register a dataset in LVIS's json annotation format for instance detection and segmentation. + + Args: + name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train". + metadata (dict): extra metadata associated with this dataset. It can be an empty dict. + json_file (str): path to the json instance annotation file. + image_root (str): directory which contains all the images. + """ + DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name)) + MetadataCatalog.get(name).set( + json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata + ) + + +def load_lvis_json(json_file, image_root, dataset_name=None): + """ + Load a json file in LVIS's annotation format. + + Args: + json_file (str): full path to the LVIS json annotation file. + image_root (str): the directory where the images in this json file exists. + dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train"). + If provided, this function will put "thing_classes" into the metadata + associated with this dataset. + + Returns: + list[dict]: a list of dicts in "Detectron2 Dataset" format. (See DATASETS.md) + + Notes: + 1. This function does not read the image files. + The results do not have the "image" field. + """ + from lvis import LVIS + + json_file = PathManager.get_local_path(json_file) + + timer = Timer() + lvis_api = LVIS(json_file) + if timer.seconds() > 1: + logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) + + if dataset_name is not None: + meta = get_lvis_instances_meta(dataset_name) + MetadataCatalog.get(dataset_name).set(**meta) + + # sort indices for reproducible results + img_ids = sorted(list(lvis_api.imgs.keys())) + # imgs is a list of dicts, each looks something like: + # {'license': 4, + # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', + # 'file_name': 'COCO_val2014_000000001268.jpg', + # 'height': 427, + # 'width': 640, + # 'date_captured': '2013-11-17 05:57:24', + # 'id': 1268} + imgs = lvis_api.load_imgs(img_ids) + # anns is a list[list[dict]], where each dict is an annotation + # record for an object. The inner list enumerates the objects in an image + # and the outer list enumerates over images. Example of anns[0]: + # [{'segmentation': [[192.81, + # 247.09, + # ... + # 219.03, + # 249.06]], + # 'area': 1035.749, + # 'image_id': 1268, + # 'bbox': [192.81, 224.8, 74.73, 33.43], + # 'category_id': 16, + # 'id': 42986}, + # ...] + anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] + + # Sanity check that each annotation has a unique id + ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] + assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format( + json_file + ) + + imgs_anns = list(zip(imgs, anns)) + + logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)) + + dataset_dicts = [] + + for (img_dict, anno_dict_list) in imgs_anns: + record = {} + file_name = img_dict["file_name"] + if img_dict["file_name"].startswith("COCO"): + # Convert form the COCO 2014 file naming convention of + # COCO_[train/val/test]2014_000000000000.jpg to the 2017 naming convention of + # 000000000000.jpg (LVIS v1 will fix this naming issue) + file_name = file_name[-16:] + record["file_name"] = os.path.join(image_root, file_name) + record["height"] = img_dict["height"] + record["width"] = img_dict["width"] + record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", []) + record["neg_category_ids"] = img_dict.get("neg_category_ids", []) + image_id = record["image_id"] = img_dict["id"] + + objs = [] + for anno in anno_dict_list: + # Check that the image_id in this annotation is the same as + # the image_id we're looking at. + # This fails only when the data parsing logic or the annotation file is buggy. + assert anno["image_id"] == image_id + obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} + obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed + segm = anno["segmentation"] # list[list[float]] + # filter out invalid polygons (< 3 points) + valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] + assert len(segm) == len( + valid_segm + ), "Annotation contains an invalid polygon with < 3 points" + assert len(segm) > 0 + obj["segmentation"] = segm + objs.append(obj) + record["annotations"] = objs + dataset_dicts.append(record) + + return dataset_dicts + + +def get_lvis_instances_meta(dataset_name): + """ + Load LVIS metadata. + + Args: + dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5"). + + Returns: + dict: LVIS metadata with keys: thing_classes + """ + if "v0.5" in dataset_name: + return _get_lvis_instances_meta_v0_5() + # There will be a v1 in the future + # elif dataset_name == "lvis_v1": + # return get_lvis_instances_meta_v1() + raise ValueError("No built-in metadata for dataset {}".format(dataset_name)) + + +def _get_lvis_instances_meta_v0_5(): + assert len(LVIS_CATEGORIES) == 1230 + cat_ids = [k["id"] for k in LVIS_CATEGORIES] + assert min(cat_ids) == 1 and max(cat_ids) == len( + cat_ids + ), "Category ids are not in [1, #categories], as expected" + # Ensure that the category list is sorted by id + lvis_categories = [k for k in sorted(LVIS_CATEGORIES, key=lambda x: x["id"])] + thing_classes = [k["synonyms"][0] for k in lvis_categories] + meta = {"thing_classes": thing_classes} + return meta + + +if __name__ == "__main__": + """ + Test the LVIS json dataset loader. + + Usage: + python -m detectron2.data.datasets.lvis \ + path/to/json path/to/image_root dataset_name vis_limit + """ + import sys + import numpy as np + from detectron2.utils.logger import setup_logger + from PIL import Image + import detectron2.data.datasets # noqa # add pre-defined metadata + from detectron2.utils.visualizer import Visualizer + + logger = setup_logger(name=__name__) + meta = MetadataCatalog.get(sys.argv[3]) + + dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3]) + logger.info("Done loading {} samples.".format(len(dicts))) + + dirname = "lvis-data-vis" + os.makedirs(dirname, exist_ok=True) + for d in dicts[: int(sys.argv[4])]: + img = np.array(Image.open(d["file_name"])) + visualizer = Visualizer(img, metadata=meta) + vis = visualizer.draw_dataset_dict(d) + fpath = os.path.join(dirname, os.path.basename(d["file_name"])) + vis.save(fpath) diff --git a/detectron2/data/datasets/lvis_v0_5_categories.py b/detectron2/data/datasets/lvis_v0_5_categories.py new file mode 100644 index 0000000000..8205e605f8 --- /dev/null +++ b/detectron2/data/datasets/lvis_v0_5_categories.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# Autogen with +# with open("lvis_v0.5_val.json", "r") as f: +# a = json.load(f) +# c = a["categories"] +# for x in c: +# del x["image_count"] +# del x["instance_count"] +# LVIS_CATEGORIES = repr(c) + " # noqa" + +# fmt: off +LVIS_CATEGORIES = [{'frequency': 'r', 'id': 1, 'synset': 'acorn.n.01', 'synonyms': ['acorn'], 'def': 'nut from an oak tree', 'name': 'acorn'}, {'frequency': 'c', 'id': 2, 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'id': 3, 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'id': 4, 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'c', 'id': 5, 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'id': 6, 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'r', 'id': 7, 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'id': 8, 'synset': 'almond.n.02', 'synonyms': ['almond'], 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'id': 9, 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'r', 'id': 10, 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'id': 11, 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'id': 12, 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'id': 13, 'synset': 'apple.n.01', 'synonyms': ['apple'], 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'id': 14, 'synset': 'apple_juice.n.01', 'synonyms': ['apple_juice'], 'def': 'the juice of apples', 'name': 'apple_juice'}, {'frequency': 'r', 'id': 15, 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'id': 16, 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'id': 17, 'synset': 'apron.n.01', 'synonyms': ['apron'], 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'id': 18, 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'c', 'id': 19, 'synset': 'armband.n.02', 'synonyms': ['armband'], 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'id': 20, 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'id': 21, 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'id': 22, 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'id': 23, 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'id': 24, 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'id': 25, 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'id': 26, 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'id': 27, 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'c', 'id': 28, 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'id': 29, 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'id': 30, 'synset': 'awning.n.01', 'synonyms': ['awning'], 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'id': 31, 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'f', 'id': 32, 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'id': 33, 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'id': 34, 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'id': 35, 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'id': 36, 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'id': 37, 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'id': 38, 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'id': 39, 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'id': 40, 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'id': 41, 'synset': 'ball.n.06', 'synonyms': ['ball'], 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'id': 42, 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'id': 43, 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'id': 44, 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'id': 45, 'synset': 'banana.n.02', 'synonyms': ['banana'], 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'r', 'id': 46, 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'id': 47, 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'c', 'id': 48, 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'id': 49, 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'id': 50, 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'id': 51, 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'id': 52, 'synset': 'barge.n.01', 'synonyms': ['barge'], 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'id': 53, 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'id': 54, 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'id': 55, 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'id': 56, 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'id': 57, 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'id': 58, 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'id': 59, 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'id': 60, 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'id': 61, 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'id': 62, 'synset': 'basket.n.03', 'synonyms': ['basketball_hoop'], 'def': 'metal hoop supporting a net through which players try to throw the basketball', 'name': 'basketball_hoop'}, {'frequency': 'c', 'id': 63, 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'id': 64, 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'r', 'id': 65, 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'id': 66, 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'id': 67, 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'id': 68, 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'id': 69, 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'id': 70, 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'id': 71, 'synset': 'battery.n.02', 'synonyms': ['battery'], 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'id': 72, 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'id': 73, 'synset': 'bead.n.01', 'synonyms': ['bead'], 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'r', 'id': 74, 'synset': 'beaker.n.01', 'synonyms': ['beaker'], 'def': 'a flatbottomed jar made of glass or plastic; used for chemistry', 'name': 'beaker'}, {'frequency': 'c', 'id': 75, 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'id': 76, 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'id': 77, 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'id': 78, 'synset': 'bear.n.01', 'synonyms': ['bear'], 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'id': 79, 'synset': 'bed.n.01', 'synonyms': ['bed'], 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'c', 'id': 80, 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'id': 81, 'synset': 'beef.n.01', 'synonyms': ['cow'], 'def': 'cattle that are reared for their meat', 'name': 'cow'}, {'frequency': 'c', 'id': 82, 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'id': 83, 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'id': 84, 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'id': 85, 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'id': 86, 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'id': 87, 'synset': 'bell.n.01', 'synonyms': ['bell'], 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'id': 88, 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'id': 89, 'synset': 'belt.n.02', 'synonyms': ['belt'], 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'id': 90, 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'id': 91, 'synset': 'bench.n.01', 'synonyms': ['bench'], 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'id': 92, 'synset': 'beret.n.01', 'synonyms': ['beret'], 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'id': 93, 'synset': 'bib.n.02', 'synonyms': ['bib'], 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'id': 94, 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'id': 95, 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'id': 96, 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'c', 'id': 97, 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'id': 98, 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'id': 99, 'synset': 'bird.n.01', 'synonyms': ['bird'], 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'r', 'id': 100, 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'r', 'id': 101, 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'id': 102, 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'id': 103, 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'id': 104, 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'id': 105, 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'id': 106, 'synset': 'biscuit.n.01', 'synonyms': ['biscuit_(bread)'], 'def': 'small round bread leavened with baking-powder or soda', 'name': 'biscuit_(bread)'}, {'frequency': 'r', 'id': 107, 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'id': 108, 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'id': 109, 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'id': 110, 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'id': 111, 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'id': 112, 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'id': 113, 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'c', 'id': 114, 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'c', 'id': 115, 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'id': 116, 'synset': 'boar.n.02', 'synonyms': ['boar'], 'def': 'an uncastrated male hog', 'name': 'boar'}, {'frequency': 'r', 'id': 117, 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'id': 118, 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'c', 'id': 119, 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'r', 'id': 120, 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'id': 121, 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'id': 122, 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'id': 123, 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'id': 124, 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'id': 125, 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'id': 126, 'synset': 'book.n.01', 'synonyms': ['book'], 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'r', 'id': 127, 'synset': 'book_bag.n.01', 'synonyms': ['book_bag'], 'def': 'a bag in which students carry their books', 'name': 'book_bag'}, {'frequency': 'c', 'id': 128, 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'id': 129, 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'id': 130, 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'id': 131, 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'id': 132, 'synset': 'boot.n.01', 'synonyms': ['boot'], 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'id': 133, 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'id': 134, 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'id': 135, 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'id': 136, 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'id': 137, 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'id': 138, 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'id': 139, 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'id': 140, 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'id': 141, 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'id': 142, 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'r', 'id': 143, 'synset': 'bowling_pin.n.01', 'synonyms': ['bowling_pin'], 'def': 'a club-shaped wooden object used in bowling', 'name': 'bowling_pin'}, {'frequency': 'r', 'id': 144, 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'id': 145, 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'id': 146, 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'id': 147, 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'id': 148, 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'id': 149, 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'r', 'id': 150, 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'c', 'id': 151, 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'id': 152, 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'c', 'id': 153, 'synset': 'bristle_brush.n.01', 'synonyms': ['bristle_brush'], 'def': 'a brush that is made with the short stiff hairs of an animal or plant', 'name': 'bristle_brush'}, {'frequency': 'f', 'id': 154, 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'id': 155, 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'id': 156, 'synset': 'broom.n.01', 'synonyms': ['broom'], 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'id': 157, 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'id': 158, 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'id': 159, 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'id': 160, 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'id': 161, 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'id': 162, 'synset': 'bull.n.11', 'synonyms': ['bull'], 'def': 'mature male cow', 'name': 'bull'}, {'frequency': 'r', 'id': 163, 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'id': 164, 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'id': 165, 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'id': 166, 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'id': 167, 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'id': 168, 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'r', 'id': 169, 'synset': 'bully_beef.n.01', 'synonyms': ['corned_beef', 'corn_beef'], 'def': 'beef cured or pickled in brine', 'name': 'corned_beef'}, {'frequency': 'f', 'id': 170, 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'id': 171, 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'id': 172, 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'id': 173, 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'id': 174, 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'id': 175, 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'c', 'id': 176, 'synset': 'butcher_knife.n.01', 'synonyms': ['butcher_knife'], 'def': 'a large sharp knife for cutting or trimming meat', 'name': 'butcher_knife'}, {'frequency': 'c', 'id': 177, 'synset': 'butter.n.01', 'synonyms': ['butter'], 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'id': 178, 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'id': 179, 'synset': 'button.n.01', 'synonyms': ['button'], 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'id': 180, 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'id': 181, 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'r', 'id': 182, 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'id': 183, 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'id': 184, 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'id': 185, 'synset': 'cake.n.03', 'synonyms': ['cake'], 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'id': 186, 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'id': 187, 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'id': 188, 'synset': 'calf.n.01', 'synonyms': ['calf'], 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'id': 189, 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'id': 190, 'synset': 'camel.n.01', 'synonyms': ['camel'], 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'id': 191, 'synset': 'camera.n.01', 'synonyms': ['camera'], 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'id': 192, 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'id': 193, 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'id': 194, 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'id': 195, 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'r', 'id': 196, 'synset': 'candelabrum.n.01', 'synonyms': ['candelabrum', 'candelabra'], 'def': 'branched candlestick; ornamental; has several lights', 'name': 'candelabrum'}, {'frequency': 'f', 'id': 197, 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'id': 198, 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'id': 199, 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'id': 200, 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'id': 201, 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'id': 202, 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'r', 'id': 203, 'synset': 'cannon.n.02', 'synonyms': ['cannon'], 'def': 'heavy gun fired from a tank', 'name': 'cannon'}, {'frequency': 'c', 'id': 204, 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'r', 'id': 205, 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'id': 206, 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'c', 'id': 207, 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'id': 208, 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'r', 'id': 209, 'synset': 'cape.n.02', 'synonyms': ['cape'], 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'id': 210, 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'id': 211, 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'id': 212, 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'def': 'a wheeled vehicle adapted to the rails of railroad', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'id': 213, 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'id': 214, 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'id': 215, 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'id': 216, 'synset': 'card.n.03', 'synonyms': ['card'], 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'r', 'id': 217, 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'id': 218, 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'id': 219, 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'id': 220, 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'id': 221, 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'c', 'id': 222, 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'id': 223, 'synset': 'cart.n.01', 'synonyms': ['cart'], 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'id': 224, 'synset': 'carton.n.02', 'synonyms': ['carton'], 'def': 'a box made of cardboard; opens by flaps on top', 'name': 'carton'}, {'frequency': 'c', 'id': 225, 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'id': 226, 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'id': 227, 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'id': 228, 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'id': 229, 'synset': 'cat.n.01', 'synonyms': ['cat'], 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'c', 'id': 230, 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'r', 'id': 231, 'synset': 'caviar.n.01', 'synonyms': ['caviar', 'caviare'], 'def': "salted roe of sturgeon or other large fish; usually served as an hors d'oeuvre", 'name': 'caviar'}, {'frequency': 'c', 'id': 232, 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'id': 233, 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'c', 'id': 234, 'synset': 'celery.n.01', 'synonyms': ['celery'], 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'id': 235, 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'id': 236, 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'id': 237, 'synset': 'chair.n.01', 'synonyms': ['chair'], 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'id': 238, 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'id': 239, 'synset': 'champagne.n.01', 'synonyms': ['champagne'], 'def': 'a white sparkling wine produced in Champagne or resembling that produced there', 'name': 'champagne'}, {'frequency': 'f', 'id': 240, 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'id': 241, 'synset': 'chap.n.04', 'synonyms': ['chap'], 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'id': 242, 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'id': 243, 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'id': 244, 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'id': 245, 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'r', 'id': 246, 'synset': 'chest_of_drawers.n.01', 'synonyms': ['chest_of_drawers_(furniture)', 'bureau_(furniture)', 'chest_(furniture)'], 'def': 'furniture with drawers for keeping clothes', 'name': 'chest_of_drawers_(furniture)'}, {'frequency': 'c', 'id': 247, 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'id': 248, 'synset': 'chicken_wire.n.01', 'synonyms': ['chicken_wire'], 'def': 'a galvanized wire network with a hexagonal mesh; used to build fences', 'name': 'chicken_wire'}, {'frequency': 'r', 'id': 249, 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'r', 'id': 250, 'synset': 'chihuahua.n.03', 'synonyms': ['Chihuahua'], 'def': 'an old breed of tiny short-haired dog with protruding eyes from Mexico', 'name': 'Chihuahua'}, {'frequency': 'r', 'id': 251, 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'id': 252, 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'id': 253, 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'id': 254, 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'id': 255, 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'id': 256, 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'id': 257, 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'id': 258, 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'id': 259, 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'id': 260, 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'def': 'necklace that fits tightly around the neck', 'name': 'choker'}, {'frequency': 'f', 'id': 261, 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'c', 'id': 262, 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'id': 263, 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'id': 264, 'synset': 'chute.n.02', 'synonyms': ['slide'], 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'id': 265, 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'id': 266, 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'c', 'id': 267, 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'id': 268, 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'id': 269, 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'id': 270, 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'r', 'id': 271, 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'id': 272, 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'id': 273, 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'id': 274, 'synset': 'clip.n.03', 'synonyms': ['clip'], 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'id': 275, 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'f', 'id': 276, 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'id': 277, 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'id': 278, 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'id': 279, 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'id': 280, 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'id': 281, 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'id': 282, 'synset': 'coat.n.01', 'synonyms': ['coat'], 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'id': 283, 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'r', 'id': 284, 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'id': 285, 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'c', 'id': 286, 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'r', 'id': 287, 'synset': 'coffee_filter.n.01', 'synonyms': ['coffee_filter'], 'def': 'filter (usually of paper) that passes the coffee and retains the coffee grounds', 'name': 'coffee_filter'}, {'frequency': 'f', 'id': 288, 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'id': 289, 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'id': 290, 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'id': 291, 'synset': 'coil.n.05', 'synonyms': ['coil'], 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'id': 292, 'synset': 'coin.n.01', 'synonyms': ['coin'], 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'r', 'id': 293, 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'id': 294, 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'id': 295, 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'id': 296, 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'id': 297, 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'id': 298, 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'f', 'id': 299, 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'r', 'id': 300, 'synset': 'concrete_mixer.n.01', 'synonyms': ['concrete_mixer', 'cement_mixer'], 'def': 'a machine with a large revolving drum in which cement/concrete is mixed', 'name': 'concrete_mixer'}, {'frequency': 'f', 'id': 301, 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'id': 302, 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'id': 303, 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'id': 304, 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'c', 'id': 305, 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'id': 306, 'synset': 'cookie_jar.n.01', 'synonyms': ['cookie_jar', 'cooky_jar'], 'def': 'a jar in which cookies are kept (and sometimes money is hidden)', 'name': 'cookie_jar'}, {'frequency': 'r', 'id': 307, 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'id': 308, 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'c', 'id': 309, 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'id': 310, 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'r', 'id': 311, 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'c', 'id': 312, 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'def': 'ears of corn that can be prepared and served for human food', 'name': 'edible_corn'}, {'frequency': 'r', 'id': 313, 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'id': 314, 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'id': 315, 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'id': 316, 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'r', 'id': 317, 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'r', 'id': 318, 'synset': 'cos.n.02', 'synonyms': ['romaine_lettuce'], 'def': 'lettuce with long dark-green leaves in a loosely packed elongated head', 'name': 'romaine_lettuce'}, {'frequency': 'c', 'id': 319, 'synset': 'costume.n.04', 'synonyms': ['costume'], 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'id': 320, 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'id': 321, 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'r', 'id': 322, 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'id': 323, 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'r', 'id': 324, 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'c', 'id': 325, 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'id': 326, 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'id': 327, 'synset': 'crate.n.01', 'synonyms': ['crate'], 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'r', 'id': 328, 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'id': 329, 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'r', 'id': 330, 'synset': 'credit_card.n.01', 'synonyms': ['credit_card', 'charge_card', 'debit_card'], 'def': 'a card, usually plastic, used to pay for goods and services', 'name': 'credit_card'}, {'frequency': 'c', 'id': 331, 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'id': 332, 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'id': 333, 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'def': 'an earthen jar (made of baked clay)', 'name': 'crock_pot'}, {'frequency': 'f', 'id': 334, 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'id': 335, 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'r', 'id': 336, 'synset': 'crow.n.01', 'synonyms': ['crow'], 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'c', 'id': 337, 'synset': 'crown.n.04', 'synonyms': ['crown'], 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'id': 338, 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'id': 339, 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'id': 340, 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'c', 'id': 341, 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'r', 'id': 342, 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'id': 343, 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'r', 'id': 344, 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'id': 345, 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'id': 346, 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'id': 347, 'synset': 'cup.n.01', 'synonyms': ['cup'], 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'id': 348, 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'def': 'a metal vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'c', 'id': 349, 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'id': 350, 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'id': 351, 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'id': 352, 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'id': 353, 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'id': 354, 'synset': 'custard.n.01', 'synonyms': ['custard'], 'def': 'sweetened mixture of milk and eggs baked or boiled or frozen', 'name': 'custard'}, {'frequency': 'c', 'id': 355, 'synset': 'cutter.n.06', 'synonyms': ['cutting_tool'], 'def': 'a cutting implement; a tool for cutting', 'name': 'cutting_tool'}, {'frequency': 'r', 'id': 356, 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'id': 357, 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'id': 358, 'synset': 'dachshund.n.01', 'synonyms': ['dachshund', 'dachsie', 'badger_dog'], 'def': 'small long-bodied short-legged breed of dog having a short sleek coat and long drooping ears', 'name': 'dachshund'}, {'frequency': 'r', 'id': 359, 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'id': 360, 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'id': 361, 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'id': 362, 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'id': 363, 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'id': 364, 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'id': 365, 'synset': 'desk.n.01', 'synonyms': ['desk'], 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'id': 366, 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'id': 367, 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'id': 368, 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'def': 'a daily written record of (usually personal) experiences and observations', 'name': 'diary'}, {'frequency': 'r', 'id': 369, 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'id': 370, 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'id': 371, 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'id': 372, 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'c', 'id': 373, 'synset': 'dish.n.01', 'synonyms': ['dish'], 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'id': 374, 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'id': 375, 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'def': 'a cloth for washing dishes', 'name': 'dishrag'}, {'frequency': 'c', 'id': 376, 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'id': 377, 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'id': 378, 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid'], 'def': 'a low-sudsing detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'r', 'id': 379, 'synset': 'diskette.n.01', 'synonyms': ['diskette', 'floppy', 'floppy_disk'], 'def': 'a small plastic magnetic disk enclosed in a stiff envelope used to store data', 'name': 'diskette'}, {'frequency': 'c', 'id': 380, 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'c', 'id': 381, 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'id': 382, 'synset': 'dog.n.01', 'synonyms': ['dog'], 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'id': 383, 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'c', 'id': 384, 'synset': 'doll.n.01', 'synonyms': ['doll'], 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'id': 385, 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'id': 386, 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'id': 387, 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'r', 'id': 388, 'synset': 'domino.n.03', 'synonyms': ['eye_mask'], 'def': 'a mask covering the upper part of the face but with holes for the eyes', 'name': 'eye_mask'}, {'frequency': 'r', 'id': 389, 'synset': 'doorbell.n.01', 'synonyms': ['doorbell', 'buzzer'], 'def': 'a button at an outer door that gives a ringing or buzzing signal when pushed', 'name': 'doorbell'}, {'frequency': 'f', 'id': 390, 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'id': 391, 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'id': 392, 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'id': 393, 'synset': 'dove.n.01', 'synonyms': ['dove'], 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'id': 394, 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'id': 395, 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'id': 396, 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'id': 397, 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'id': 398, 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'c', 'id': 399, 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'c', 'id': 400, 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'id': 401, 'synset': 'drill.n.01', 'synonyms': ['drill'], 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'id': 402, 'synset': 'drinking_fountain.n.01', 'synonyms': ['drinking_fountain'], 'def': 'a public fountain to provide a jet of drinking water', 'name': 'drinking_fountain'}, {'frequency': 'r', 'id': 403, 'synset': 'drone.n.04', 'synonyms': ['drone'], 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'id': 404, 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'id': 405, 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'id': 406, 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'id': 407, 'synset': 'duck.n.01', 'synonyms': ['duck'], 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'r', 'id': 408, 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'id': 409, 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'id': 410, 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'def': 'a large cylindrical bag of heavy cloth', 'name': 'duffel_bag'}, {'frequency': 'r', 'id': 411, 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'id': 412, 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'id': 413, 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'r', 'id': 414, 'synset': 'dutch_oven.n.02', 'synonyms': ['Dutch_oven'], 'def': 'iron or earthenware cooking pot; used for stews', 'name': 'Dutch_oven'}, {'frequency': 'c', 'id': 415, 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'id': 416, 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'id': 417, 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'id': 418, 'synset': 'earring.n.01', 'synonyms': ['earring'], 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'id': 419, 'synset': 'easel.n.01', 'synonyms': ['easel'], 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'id': 420, 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'id': 421, 'synset': 'eel.n.01', 'synonyms': ['eel'], 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'id': 422, 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'id': 423, 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'id': 424, 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'id': 425, 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'id': 426, 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'id': 427, 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'id': 428, 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'id': 429, 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'r', 'id': 430, 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'id': 431, 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'id': 432, 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'id': 433, 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'id': 434, 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'id': 435, 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'id': 436, 'synset': 'fan.n.01', 'synonyms': ['fan'], 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'id': 437, 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'id': 438, 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'id': 439, 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'id': 440, 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'r', 'id': 441, 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'id': 442, 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'id': 443, 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'id': 444, 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'id': 445, 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'id': 446, 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'id': 447, 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'c', 'id': 448, 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'c', 'id': 449, 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'id': 450, 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'id': 451, 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'id': 452, 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'c', 'id': 453, 'synset': 'fish.n.01', 'synonyms': ['fish'], 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'r', 'id': 454, 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'id': 455, 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'r', 'id': 456, 'synset': 'fishing_boat.n.01', 'synonyms': ['fishing_boat', 'fishing_vessel'], 'def': 'a vessel for fishing', 'name': 'fishing_boat'}, {'frequency': 'c', 'id': 457, 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'id': 458, 'synset': 'flag.n.01', 'synonyms': ['flag'], 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'id': 459, 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'id': 460, 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'id': 461, 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'r', 'id': 462, 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'id': 463, 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'id': 464, 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'id': 465, 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'id': 466, 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'id': 467, 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'id': 468, 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'r', 'id': 469, 'synset': 'foal.n.01', 'synonyms': ['foal'], 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'id': 470, 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'id': 471, 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'id': 472, 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'id': 473, 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'id': 474, 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'id': 475, 'synset': 'fork.n.01', 'synonyms': ['fork'], 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'r', 'id': 476, 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'r', 'id': 477, 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'r', 'id': 478, 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'id': 479, 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'def': 'anything that freshens', 'name': 'freshener'}, {'frequency': 'f', 'id': 480, 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'id': 481, 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'id': 482, 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'r', 'id': 483, 'synset': 'fruit_salad.n.01', 'synonyms': ['fruit_salad'], 'def': 'salad composed of fruits', 'name': 'fruit_salad'}, {'frequency': 'c', 'id': 484, 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'id': 485, 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'id': 486, 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'c', 'id': 487, 'synset': 'futon.n.01', 'synonyms': ['futon'], 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'id': 488, 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'id': 489, 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'id': 490, 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'id': 491, 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'id': 492, 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'id': 493, 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'id': 494, 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'id': 495, 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'r', 'id': 496, 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'id': 497, 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'id': 498, 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'c', 'id': 499, 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'id': 500, 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'id': 501, 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'id': 502, 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'id': 503, 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'id': 504, 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'id': 505, 'synset': 'globe.n.03', 'synonyms': ['globe'], 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'id': 506, 'synset': 'glove.n.02', 'synonyms': ['glove'], 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'id': 507, 'synset': 'goat.n.01', 'synonyms': ['goat'], 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'id': 508, 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'id': 509, 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'r', 'id': 510, 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'id': 511, 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'id': 512, 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'id': 513, 'synset': 'goose.n.01', 'synonyms': ['goose'], 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'id': 514, 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'id': 515, 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'r', 'id': 516, 'synset': 'gown.n.04', 'synonyms': ['surgical_gown', 'scrubs_(surgical_clothing)'], 'def': 'protective garment worn by surgeons during operations', 'name': 'surgical_gown'}, {'frequency': 'f', 'id': 517, 'synset': 'grape.n.01', 'synonyms': ['grape'], 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'r', 'id': 518, 'synset': 'grasshopper.n.01', 'synonyms': ['grasshopper'], 'def': 'plant-eating insect with hind legs adapted for leaping', 'name': 'grasshopper'}, {'frequency': 'c', 'id': 519, 'synset': 'grater.n.01', 'synonyms': ['grater'], 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'id': 520, 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'id': 521, 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'c', 'id': 522, 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'c', 'id': 523, 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'id': 524, 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'r', 'id': 525, 'synset': 'grillroom.n.01', 'synonyms': ['grillroom', 'grill_(restaurant)'], 'def': 'a restaurant where food is cooked on a grill', 'name': 'grillroom'}, {'frequency': 'r', 'id': 526, 'synset': 'grinder.n.04', 'synonyms': ['grinder_(tool)'], 'def': 'a machine tool that polishes metal', 'name': 'grinder_(tool)'}, {'frequency': 'r', 'id': 527, 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'id': 528, 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'id': 529, 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'r', 'id': 530, 'synset': 'guacamole.n.01', 'synonyms': ['guacamole'], 'def': 'a dip made of mashed avocado mixed with chopped onions and other seasonings', 'name': 'guacamole'}, {'frequency': 'f', 'id': 531, 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'id': 532, 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'id': 533, 'synset': 'gun.n.01', 'synonyms': ['gun'], 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'r', 'id': 534, 'synset': 'hair_spray.n.01', 'synonyms': ['hair_spray'], 'def': 'substance sprayed on the hair to hold it in place', 'name': 'hair_spray'}, {'frequency': 'c', 'id': 535, 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'id': 536, 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'id': 537, 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'f', 'id': 538, 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'id': 539, 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'id': 540, 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'r', 'id': 541, 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'id': 542, 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'r', 'id': 543, 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'c', 'id': 544, 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'id': 545, 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'id': 546, 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'id': 547, 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'id': 548, 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'id': 549, 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'id': 550, 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'id': 551, 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'id': 552, 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'id': 553, 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'id': 554, 'synset': 'hat.n.01', 'synonyms': ['hat'], 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'id': 555, 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'r', 'id': 556, 'synset': 'hatch.n.03', 'synonyms': ['hatch'], 'def': 'a movable barrier covering a hatchway', 'name': 'hatch'}, {'frequency': 'c', 'id': 557, 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'def': 'a garment that covers the head and face', 'name': 'veil'}, {'frequency': 'f', 'id': 558, 'synset': 'headband.n.01', 'synonyms': ['headband'], 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'id': 559, 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'id': 560, 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'id': 561, 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'id': 562, 'synset': 'headset.n.01', 'synonyms': ['headset'], 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'id': 563, 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'r', 'id': 564, 'synset': 'hearing_aid.n.02', 'synonyms': ['hearing_aid'], 'def': 'an acoustic device used to direct sound to the ear of a hearing-impaired person', 'name': 'hearing_aid'}, {'frequency': 'c', 'id': 565, 'synset': 'heart.n.02', 'synonyms': ['heart'], 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'id': 566, 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'id': 567, 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'id': 568, 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'id': 569, 'synset': 'heron.n.02', 'synonyms': ['heron'], 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'id': 570, 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'id': 571, 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'id': 572, 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'id': 573, 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'id': 574, 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'id': 575, 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'id': 576, 'synset': 'honey.n.01', 'synonyms': ['honey'], 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'id': 577, 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'id': 578, 'synset': 'hook.n.05', 'synonyms': ['hook'], 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'f', 'id': 579, 'synset': 'horse.n.01', 'synonyms': ['horse'], 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'id': 580, 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'id': 581, 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'id': 582, 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'id': 583, 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'id': 584, 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'id': 585, 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'r', 'id': 586, 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'id': 587, 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'c', 'id': 588, 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'id': 589, 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'id': 590, 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'id': 591, 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'id': 592, 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'id': 593, 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'r', 'id': 594, 'synset': 'ice_tea.n.01', 'synonyms': ['ice_tea', 'iced_tea'], 'def': 'strong tea served over ice', 'name': 'ice_tea'}, {'frequency': 'c', 'id': 595, 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'id': 596, 'synset': 'incense.n.01', 'synonyms': ['incense'], 'def': 'a substance that produces a fragrant odor when burned', 'name': 'incense'}, {'frequency': 'r', 'id': 597, 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'c', 'id': 598, 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'id': 599, 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'r', 'id': 600, 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'id': 601, 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'r', 'id': 602, 'synset': 'jam.n.01', 'synonyms': ['jam'], 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'id': 603, 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'id': 604, 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'id': 605, 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'id': 606, 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'id': 607, 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'c', 'id': 608, 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'id': 609, 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'r', 'id': 610, 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'id': 611, 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'id': 612, 'synset': 'keg.n.02', 'synonyms': ['keg'], 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'id': 613, 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'id': 614, 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'id': 615, 'synset': 'key.n.01', 'synonyms': ['key'], 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'id': 616, 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'r', 'id': 617, 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'id': 618, 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'id': 619, 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'c', 'id': 620, 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'id': 621, 'synset': 'kite.n.03', 'synonyms': ['kite'], 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'id': 622, 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'id': 623, 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'id': 624, 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'id': 625, 'synset': 'knife.n.01', 'synonyms': ['knife'], 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'id': 626, 'synset': 'knight.n.02', 'synonyms': ['knight_(chess_piece)', 'horse_(chess_piece)'], 'def': 'a chess game piece shaped to resemble the head of a horse', 'name': 'knight_(chess_piece)'}, {'frequency': 'r', 'id': 627, 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'id': 628, 'synset': 'knob.n.02', 'synonyms': ['knob'], 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'id': 629, 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'id': 630, 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'id': 631, 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'id': 632, 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'id': 633, 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'r', 'id': 634, 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'c', 'id': 635, 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'id': 636, 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'id': 637, 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'id': 638, 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'id': 639, 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'id': 640, 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'id': 641, 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'id': 642, 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'id': 643, 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'c', 'id': 644, 'synset': 'latch.n.02', 'synonyms': ['latch'], 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'id': 645, 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'id': 646, 'synset': 'leather.n.01', 'synonyms': ['leather'], 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'id': 647, 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'id': 648, 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'f', 'id': 649, 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'id': 650, 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'id': 651, 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'id': 652, 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'id': 653, 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'id': 654, 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'id': 655, 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'def': 'glass bulb or tube shaped electric device that emits light (DO NOT MARK LAMPS AS A WHOLE)', 'name': 'lightbulb'}, {'frequency': 'r', 'id': 656, 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'c', 'id': 657, 'synset': 'lime.n.06', 'synonyms': ['lime'], 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'id': 658, 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'r', 'id': 659, 'synset': 'linen.n.02', 'synonyms': ['linen_paper'], 'def': 'a high-quality paper made of linen fibers or with a linen finish', 'name': 'linen_paper'}, {'frequency': 'c', 'id': 660, 'synset': 'lion.n.01', 'synonyms': ['lion'], 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'id': 661, 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'c', 'id': 662, 'synset': 'lipstick.n.01', 'synonyms': ['lipstick', 'lip_rouge'], 'def': 'makeup that is used to color the lips', 'name': 'lipstick'}, {'frequency': 'r', 'id': 663, 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'def': 'an alcoholic beverage that is distilled rather than fermented', 'name': 'liquor'}, {'frequency': 'r', 'id': 664, 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'r', 'id': 665, 'synset': 'loafer.n.02', 'synonyms': ['Loafer_(type_of_shoe)'], 'def': 'a low leather step-in shoe', 'name': 'Loafer_(type_of_shoe)'}, {'frequency': 'f', 'id': 666, 'synset': 'log.n.01', 'synonyms': ['log'], 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'id': 667, 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'c', 'id': 668, 'synset': 'lotion.n.01', 'synonyms': ['lotion'], 'def': 'any of various cosmetic preparations that are applied to the skin', 'name': 'lotion'}, {'frequency': 'f', 'id': 669, 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'id': 670, 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'id': 671, 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'id': 672, 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'id': 673, 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'r', 'id': 674, 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'c', 'id': 675, 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'id': 676, 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'id': 677, 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'c', 'id': 678, 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'id': 679, 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'id': 680, 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'c', 'id': 681, 'synset': 'map.n.01', 'synonyms': ['map'], 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'c', 'id': 682, 'synset': 'marker.n.03', 'synonyms': ['marker'], 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'id': 683, 'synset': 'martini.n.01', 'synonyms': ['martini'], 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'id': 684, 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'id': 685, 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'id': 686, 'synset': 'masher.n.02', 'synonyms': ['masher'], 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'id': 687, 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'id': 688, 'synset': 'mast.n.01', 'synonyms': ['mast'], 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'id': 689, 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'id': 690, 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'id': 691, 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'id': 692, 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'id': 693, 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'id': 694, 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'id': 695, 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'r', 'id': 696, 'synset': 'melon.n.01', 'synonyms': ['melon'], 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'id': 697, 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'id': 698, 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'id': 699, 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'id': 700, 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'c', 'id': 701, 'synset': 'milk.n.01', 'synonyms': ['milk'], 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'f', 'id': 702, 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'id': 703, 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'id': 704, 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'id': 705, 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'id': 706, 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'id': 707, 'synset': 'money.n.03', 'synonyms': ['money'], 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'id': 708, 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'id': 709, 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'id': 710, 'synset': 'motor.n.01', 'synonyms': ['motor'], 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'id': 711, 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'id': 712, 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'r', 'id': 713, 'synset': 'motorboat.n.01', 'synonyms': ['motorboat', 'powerboat'], 'def': 'a boat propelled by an internal-combustion engine', 'name': 'motorboat'}, {'frequency': 'f', 'id': 714, 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'id': 715, 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'r', 'id': 716, 'synset': 'mouse.n.01', 'synonyms': ['mouse_(animal_rodent)'], 'def': 'a small rodent with pointed snouts and small ears on elongated bodies with slender usually hairless tails', 'name': 'mouse_(animal_rodent)'}, {'frequency': 'f', 'id': 717, 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'def': 'a computer input device that controls an on-screen pointer', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'id': 718, 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'id': 719, 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'id': 720, 'synset': 'mug.n.04', 'synonyms': ['mug'], 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'id': 721, 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'id': 722, 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'r', 'id': 723, 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'id': 724, 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'r', 'id': 725, 'synset': 'nameplate.n.01', 'synonyms': ['nameplate'], 'def': 'a plate bearing a name', 'name': 'nameplate'}, {'frequency': 'f', 'id': 726, 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'id': 727, 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'id': 728, 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'id': 729, 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'r', 'id': 730, 'synset': 'needle.n.03', 'synonyms': ['needle'], 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'id': 731, 'synset': 'nest.n.01', 'synonyms': ['nest'], 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'r', 'id': 732, 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'id': 733, 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'id': 734, 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'r', 'id': 735, 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'id': 736, 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'id': 737, 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'c', 'id': 738, 'synset': 'nut.n.03', 'synonyms': ['nut'], 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'id': 739, 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'c', 'id': 740, 'synset': 'oar.n.01', 'synonyms': ['oar'], 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'id': 741, 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'id': 742, 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'id': 743, 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'id': 744, 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'id': 745, 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'id': 746, 'synset': 'onion.n.01', 'synonyms': ['onion'], 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'id': 747, 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'id': 748, 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'r', 'id': 749, 'synset': 'oregano.n.01', 'synonyms': ['oregano', 'marjoram'], 'def': 'aromatic Eurasian perennial herb used in cooking and baking', 'name': 'oregano'}, {'frequency': 'c', 'id': 750, 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'c', 'id': 751, 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'def': 'thick cushion used as a seat', 'name': 'ottoman'}, {'frequency': 'c', 'id': 752, 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'id': 753, 'synset': 'owl.n.01', 'synonyms': ['owl'], 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'id': 754, 'synset': 'packet.n.03', 'synonyms': ['packet'], 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'id': 755, 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'id': 756, 'synset': 'pad.n.04', 'synonyms': ['pad'], 'def': 'a flat mass of soft material used for protection, stuffing, or comfort', 'name': 'pad'}, {'frequency': 'c', 'id': 757, 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'id': 758, 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'r', 'id': 759, 'synset': 'paintbox.n.01', 'synonyms': ['paintbox'], 'def': "a box containing a collection of cubes or tubes of artists' paint", 'name': 'paintbox'}, {'frequency': 'c', 'id': 760, 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'id': 761, 'synset': 'painting.n.01', 'synonyms': ['painting'], 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'c', 'id': 762, 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'id': 763, 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'id': 764, 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'id': 765, 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'id': 766, 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'id': 767, 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'id': 768, 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'r', 'id': 769, 'synset': 'paper_clip.n.01', 'synonyms': ['paperclip'], 'def': 'a wire or plastic clip for holding sheets of paper together', 'name': 'paperclip'}, {'frequency': 'f', 'id': 770, 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'id': 771, 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'id': 772, 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'id': 773, 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'id': 774, 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'r', 'id': 775, 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'id': 776, 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'r', 'id': 777, 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'r', 'id': 778, 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'id': 779, 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'id': 780, 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'id': 781, 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'id': 782, 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'r', 'id': 783, 'synset': 'passport.n.02', 'synonyms': ['passport'], 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'id': 784, 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'id': 785, 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'id': 786, 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'id': 787, 'synset': 'peach.n.03', 'synonyms': ['peach'], 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'id': 788, 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'c', 'id': 789, 'synset': 'pear.n.01', 'synonyms': ['pear'], 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'r', 'id': 790, 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'id': 791, 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'id': 792, 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'id': 793, 'synset': 'pen.n.01', 'synonyms': ['pen'], 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'c', 'id': 794, 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'id': 795, 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'id': 796, 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'id': 797, 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'id': 798, 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'id': 799, 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'id': 800, 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'c', 'id': 801, 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'id': 802, 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'id': 803, 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'id': 804, 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'id': 805, 'synset': 'person.n.01', 'synonyms': ['baby', 'child', 'boy', 'girl', 'man', 'woman', 'person', 'human'], 'def': 'a human being', 'name': 'baby'}, {'frequency': 'r', 'id': 806, 'synset': 'pet.n.01', 'synonyms': ['pet'], 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'r', 'id': 807, 'synset': 'petfood.n.01', 'synonyms': ['petfood', 'pet-food'], 'def': 'food prepared for animal pets', 'name': 'petfood'}, {'frequency': 'r', 'id': 808, 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'id': 809, 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'id': 810, 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'c', 'id': 811, 'synset': 'piano.n.01', 'synonyms': ['piano'], 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'id': 812, 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'id': 813, 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'id': 814, 'synset': 'pie.n.01', 'synonyms': ['pie'], 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'id': 815, 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'id': 816, 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'id': 817, 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'id': 818, 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'id': 819, 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'id': 820, 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'id': 821, 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'id': 822, 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'id': 823, 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'id': 824, 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'id': 825, 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'r', 'id': 826, 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'id': 827, 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'id': 828, 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'id': 829, 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'id': 830, 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'id': 831, 'synset': 'plate.n.04', 'synonyms': ['plate'], 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'id': 832, 'synset': 'platter.n.01', 'synonyms': ['platter'], 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'id': 833, 'synset': 'playing_card.n.01', 'synonyms': ['playing_card'], 'def': 'one of a pack of cards that are used to play card games', 'name': 'playing_card'}, {'frequency': 'r', 'id': 834, 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'id': 835, 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'id': 836, 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'id': 837, 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'id': 838, 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'id': 839, 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'id': 840, 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'r', 'id': 841, 'synset': 'police_van.n.01', 'synonyms': ['police_van', 'police_wagon', 'paddy_wagon', 'patrol_wagon'], 'def': 'van used by police to transport prisoners', 'name': 'police_van'}, {'frequency': 'f', 'id': 842, 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'id': 843, 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'id': 844, 'synset': 'pony.n.05', 'synonyms': ['pony'], 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'id': 845, 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'id': 846, 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'r', 'id': 847, 'synset': 'portrait.n.02', 'synonyms': ['portrait', 'portrayal'], 'def': 'any likeness of a person, in any medium', 'name': 'portrait'}, {'frequency': 'c', 'id': 848, 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'id': 849, 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'id': 850, 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'id': 851, 'synset': 'pot.n.01', 'synonyms': ['pot'], 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'id': 852, 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'id': 853, 'synset': 'potato.n.01', 'synonyms': ['potato'], 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'id': 854, 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'id': 855, 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'id': 856, 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'r', 'id': 857, 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'id': 858, 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'f', 'id': 859, 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'id': 860, 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'id': 861, 'synset': 'projector.n.02', 'synonyms': ['projector'], 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'id': 862, 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'id': 863, 'synset': 'prune.n.01', 'synonyms': ['prune'], 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'id': 864, 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'id': 865, 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'id': 866, 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'id': 867, 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'id': 868, 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'id': 869, 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'id': 870, 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'r', 'id': 871, 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'id': 872, 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'id': 873, 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'id': 874, 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'id': 875, 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'id': 876, 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'id': 877, 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'id': 878, 'synset': 'radar.n.01', 'synonyms': ['radar'], 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'c', 'id': 879, 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'id': 880, 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'id': 881, 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'id': 882, 'synset': 'raft.n.01', 'synonyms': ['raft'], 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'id': 883, 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'id': 884, 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'id': 885, 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'id': 886, 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'id': 887, 'synset': 'rat.n.01', 'synonyms': ['rat'], 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'id': 888, 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'id': 889, 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'id': 890, 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'def': 'car mirror that reflects the view out of the rear window', 'name': 'rearview_mirror'}, {'frequency': 'c', 'id': 891, 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'id': 892, 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'r', 'id': 893, 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'r', 'id': 894, 'synset': 'red_cabbage.n.02', 'synonyms': ['red_cabbage'], 'def': 'compact head of purplish-red leaves', 'name': 'red_cabbage'}, {'frequency': 'f', 'id': 895, 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'id': 896, 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'id': 897, 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'id': 898, 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'r', 'id': 899, 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'id': 900, 'synset': 'ring.n.08', 'synonyms': ['ring'], 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'id': 901, 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'id': 902, 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'id': 903, 'synset': 'robe.n.01', 'synonyms': ['robe'], 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'id': 904, 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'id': 905, 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'id': 906, 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'id': 907, 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'id': 908, 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'id': 909, 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'id': 910, 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'id': 911, 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'id': 912, 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'id': 913, 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'id': 914, 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'id': 915, 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'id': 916, 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'c', 'id': 917, 'synset': 'sail.n.01', 'synonyms': ['sail'], 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'c', 'id': 918, 'synset': 'salad.n.01', 'synonyms': ['salad'], 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'id': 919, 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'r', 'id': 920, 'synset': 'salami.n.01', 'synonyms': ['salami'], 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'r', 'id': 921, 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'id': 922, 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'r', 'id': 923, 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'id': 924, 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'id': 925, 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'id': 926, 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'id': 927, 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'id': 928, 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'id': 929, 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'id': 930, 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'id': 931, 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'id': 932, 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'id': 933, 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'id': 934, 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'id': 935, 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'id': 936, 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'id': 937, 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'c', 'id': 938, 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'c', 'id': 939, 'synset': 'scrambled_eggs.n.01', 'synonyms': ['scrambled_eggs'], 'def': 'eggs beaten and cooked to a soft firm consistency while stirring', 'name': 'scrambled_eggs'}, {'frequency': 'r', 'id': 940, 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'r', 'id': 941, 'synset': 'scratcher.n.03', 'synonyms': ['scratcher'], 'def': 'a device used for scratching', 'name': 'scratcher'}, {'frequency': 'c', 'id': 942, 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'c', 'id': 943, 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'id': 944, 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'r', 'id': 945, 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'r', 'id': 946, 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'id': 947, 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'id': 948, 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'r', 'id': 949, 'synset': 'seedling.n.01', 'synonyms': ['seedling'], 'def': 'young plant or tree grown from a seed', 'name': 'seedling'}, {'frequency': 'c', 'id': 950, 'synset': 'serving_dish.n.01', 'synonyms': ['serving_dish'], 'def': 'a dish used for serving food', 'name': 'serving_dish'}, {'frequency': 'r', 'id': 951, 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'r', 'id': 952, 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'id': 953, 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'r', 'id': 954, 'synset': 'shark.n.01', 'synonyms': ['shark'], 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'id': 955, 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'id': 956, 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'id': 957, 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'id': 958, 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'id': 959, 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'id': 960, 'synset': 'shears.n.01', 'synonyms': ['shears'], 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'id': 961, 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'id': 962, 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'id': 963, 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'r', 'id': 964, 'synset': 'shield.n.02', 'synonyms': ['shield'], 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'id': 965, 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'id': 966, 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'c', 'id': 967, 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'id': 968, 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'id': 969, 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'id': 970, 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'c', 'id': 971, 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'id': 972, 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'id': 973, 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'f', 'id': 974, 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'id': 975, 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'r', 'id': 976, 'synset': 'sieve.n.01', 'synonyms': ['sieve', 'screen_(sieve)'], 'def': 'a strainer for separating lumps from powdered material or grading particles', 'name': 'sieve'}, {'frequency': 'f', 'id': 977, 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'id': 978, 'synset': 'silo.n.01', 'synonyms': ['silo'], 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'id': 979, 'synset': 'sink.n.01', 'synonyms': ['sink'], 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'id': 980, 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'id': 981, 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'id': 982, 'synset': 'ski.n.01', 'synonyms': ['ski'], 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'id': 983, 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'id': 984, 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'id': 985, 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'id': 986, 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'c', 'id': 987, 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'id': 988, 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'id': 989, 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'id': 990, 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'id': 991, 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'id': 992, 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'id': 993, 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'id': 994, 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'id': 995, 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'id': 996, 'synset': 'soap.n.01', 'synonyms': ['soap'], 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'id': 997, 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'id': 998, 'synset': 'sock.n.01', 'synonyms': ['sock'], 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'r', 'id': 999, 'synset': 'soda_fountain.n.02', 'synonyms': ['soda_fountain'], 'def': 'an apparatus for dispensing soda water', 'name': 'soda_fountain'}, {'frequency': 'r', 'id': 1000, 'synset': 'soda_water.n.01', 'synonyms': ['carbonated_water', 'club_soda', 'seltzer', 'sparkling_water'], 'def': 'effervescent beverage artificially charged with carbon dioxide', 'name': 'carbonated_water'}, {'frequency': 'f', 'id': 1001, 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'id': 1002, 'synset': 'softball.n.01', 'synonyms': ['softball'], 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'id': 1003, 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'id': 1004, 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'c', 'id': 1005, 'synset': 'soup.n.01', 'synonyms': ['soup'], 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'id': 1006, 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'id': 1007, 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'id': 1008, 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'id': 1009, 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'id': 1010, 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'id': 1011, 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'id': 1012, 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'id': 1013, 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'id': 1014, 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'id': 1015, 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'r', 'id': 1016, 'synset': 'spider.n.01', 'synonyms': ['spider'], 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'c', 'id': 1017, 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'id': 1018, 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'id': 1019, 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'id': 1020, 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'id': 1021, 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'c', 'id': 1022, 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'r', 'id': 1023, 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'id': 1024, 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'id': 1025, 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'id': 1026, 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'r', 'id': 1027, 'synset': 'steamer.n.02', 'synonyms': ['steamer_(kitchen_appliance)'], 'def': 'a cooking utensil that can be used to cook food by steaming it', 'name': 'steamer_(kitchen_appliance)'}, {'frequency': 'f', 'id': 1028, 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'id': 1029, 'synset': 'stencil.n.01', 'synonyms': ['stencil'], 'def': 'a sheet of material (metal, plastic, etc.) that has been perforated with a pattern; ink or paint can pass through the perforations to create the printed pattern on the surface below', 'name': 'stencil'}, {'frequency': 'r', 'id': 1030, 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'id': 1031, 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'id': 1032, 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'id': 1033, 'synset': 'stew.n.02', 'synonyms': ['stew'], 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'id': 1034, 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'id': 1035, 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'c', 'id': 1036, 'synset': 'stocking.n.01', 'synonyms': ['stockings_(leg_wear)'], 'def': 'close-fitting hosiery to cover the foot and leg; come in matched pairs', 'name': 'stockings_(leg_wear)'}, {'frequency': 'f', 'id': 1037, 'synset': 'stool.n.01', 'synonyms': ['stool'], 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'id': 1038, 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'id': 1039, 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'id': 1040, 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'id': 1041, 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'id': 1042, 'synset': 'strap.n.01', 'synonyms': ['strap'], 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'id': 1043, 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'id': 1044, 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'id': 1045, 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'id': 1046, 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'id': 1047, 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'id': 1048, 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'def': 'a pointed tool for writing or drawing or engraving', 'name': 'stylus'}, {'frequency': 'r', 'id': 1049, 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'id': 1050, 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'id': 1051, 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'c', 'id': 1052, 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'id': 1053, 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'id': 1054, 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'id': 1055, 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'r', 'id': 1056, 'synset': 'sunscreen.n.01', 'synonyms': ['sunscreen', 'sunblock'], 'def': 'a cream spread on the skin; contains a chemical to filter out ultraviolet light and so protect from sunburn', 'name': 'sunscreen'}, {'frequency': 'f', 'id': 1057, 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'id': 1058, 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'id': 1059, 'synset': 'swab.n.02', 'synonyms': ['mop'], 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'id': 1060, 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'id': 1061, 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'id': 1062, 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'id': 1063, 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'id': 1064, 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'id': 1065, 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'id': 1066, 'synset': 'sword.n.01', 'synonyms': ['sword'], 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'id': 1067, 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'id': 1068, 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'id': 1069, 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'id': 1070, 'synset': 'table.n.02', 'synonyms': ['table'], 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'id': 1071, 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'id': 1072, 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'id': 1073, 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'id': 1074, 'synset': 'taco.n.02', 'synonyms': ['taco'], 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'id': 1075, 'synset': 'tag.n.02', 'synonyms': ['tag'], 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'id': 1076, 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'id': 1077, 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'id': 1078, 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'c', 'id': 1079, 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'id': 1080, 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'c', 'id': 1081, 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'id': 1082, 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'id': 1083, 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'id': 1084, 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'id': 1085, 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'id': 1086, 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'r', 'id': 1087, 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'id': 1088, 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'id': 1089, 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'c', 'id': 1090, 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'id': 1091, 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'id': 1092, 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'def': 'electronic device for communicating by voice over long distances', 'name': 'telephone'}, {'frequency': 'c', 'id': 1093, 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'id': 1094, 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'id': 1095, 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'id': 1096, 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'id': 1097, 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'id': 1098, 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'id': 1099, 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'id': 1100, 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'id': 1101, 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'id': 1102, 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'c', 'id': 1103, 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'id': 1104, 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'id': 1105, 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'id': 1106, 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'id': 1107, 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'id': 1108, 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'id': 1109, 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'id': 1110, 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'id': 1111, 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'r', 'id': 1112, 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'id': 1113, 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'id': 1114, 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'id': 1115, 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'c', 'id': 1116, 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'id': 1117, 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'id': 1118, 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'id': 1119, 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'c', 'id': 1120, 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'id': 1121, 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'id': 1122, 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'id': 1123, 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'c', 'id': 1124, 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'c', 'id': 1125, 'synset': 'top.n.09', 'synonyms': ['cover'], 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'id': 1126, 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'id': 1127, 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'id': 1128, 'synset': 'towel.n.01', 'synonyms': ['towel'], 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'id': 1129, 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'id': 1130, 'synset': 'toy.n.03', 'synonyms': ['toy'], 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'id': 1131, 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'id': 1132, 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'r', 'id': 1133, 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'c', 'id': 1134, 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'id': 1135, 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'id': 1136, 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'id': 1137, 'synset': 'tray.n.01', 'synonyms': ['tray'], 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'id': 1138, 'synset': 'tree_house.n.01', 'synonyms': ['tree_house'], 'def': '(NOT A TREE) a PLAYHOUSE built in the branches of a tree', 'name': 'tree_house'}, {'frequency': 'r', 'id': 1139, 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'id': 1140, 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'r', 'id': 1141, 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'c', 'id': 1142, 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'id': 1143, 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'id': 1144, 'synset': 'truck.n.01', 'synonyms': ['truck'], 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'id': 1145, 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'id': 1146, 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'id': 1147, 'synset': 'tub.n.02', 'synonyms': ['vat'], 'def': 'a large open vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'id': 1148, 'synset': 'turban.n.01', 'synonyms': ['turban'], 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'r', 'id': 1149, 'synset': 'turkey.n.01', 'synonyms': ['turkey_(bird)'], 'def': 'large gallinaceous bird with fan-shaped tail; widely domesticated for food', 'name': 'turkey_(bird)'}, {'frequency': 'c', 'id': 1150, 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'id': 1151, 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'id': 1152, 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'r', 'id': 1153, 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'r', 'id': 1154, 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'id': 1155, 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'c', 'id': 1156, 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'id': 1157, 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'c', 'id': 1158, 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'r', 'id': 1159, 'synset': 'urn.n.01', 'synonyms': ['urn'], 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'id': 1160, 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'c', 'id': 1161, 'synset': 'valve.n.03', 'synonyms': ['valve'], 'def': 'control consisting of a mechanical device for controlling the flow of a fluid', 'name': 'valve'}, {'frequency': 'f', 'id': 1162, 'synset': 'vase.n.01', 'synonyms': ['vase'], 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'id': 1163, 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'id': 1164, 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'c', 'id': 1165, 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'id': 1166, 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'id': 1167, 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'id': 1168, 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'r', 'id': 1169, 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'id': 1170, 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'id': 1171, 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'id': 1172, 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'id': 1173, 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'id': 1174, 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'id': 1175, 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'id': 1176, 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'id': 1177, 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'c', 'id': 1178, 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'id': 1179, 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'id': 1180, 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'id': 1181, 'synset': 'wasabi.n.02', 'synonyms': ['wasabi'], 'def': 'the thick green root of the wasabi plant that the Japanese use in cooking and that tastes like strong horseradish', 'name': 'wasabi'}, {'frequency': 'c', 'id': 1182, 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'id': 1183, 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'id': 1184, 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'id': 1185, 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'id': 1186, 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'id': 1187, 'synset': 'water_filter.n.01', 'synonyms': ['water_filter'], 'def': 'a filter to remove impurities from the water supply', 'name': 'water_filter'}, {'frequency': 'r', 'id': 1188, 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'r', 'id': 1189, 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'id': 1190, 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'id': 1191, 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'id': 1192, 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'id': 1193, 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'id': 1194, 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'c', 'id': 1195, 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'id': 1196, 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'id': 1197, 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'id': 1198, 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'id': 1199, 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'id': 1200, 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'id': 1201, 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'id': 1202, 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'id': 1203, 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'r', 'id': 1204, 'synset': 'whiskey.n.01', 'synonyms': ['whiskey'], 'def': 'a liquor made from fermented mash of grain', 'name': 'whiskey'}, {'frequency': 'r', 'id': 1205, 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'r', 'id': 1206, 'synset': 'wick.n.02', 'synonyms': ['wick'], 'def': 'a loosely woven cord in a candle or oil lamp that is lit on fire', 'name': 'wick'}, {'frequency': 'c', 'id': 1207, 'synset': 'wig.n.01', 'synonyms': ['wig'], 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'id': 1208, 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'id': 1209, 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'def': 'a mill that is powered by the wind', 'name': 'windmill'}, {'frequency': 'c', 'id': 1210, 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'id': 1211, 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'id': 1212, 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'id': 1213, 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'r', 'id': 1214, 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'id': 1215, 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'r', 'id': 1216, 'synset': 'wing_chair.n.01', 'synonyms': ['wing_chair'], 'def': 'easy chair having wings on each side of a high back', 'name': 'wing_chair'}, {'frequency': 'c', 'id': 1217, 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'id': 1218, 'synset': 'wok.n.01', 'synonyms': ['wok'], 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'id': 1219, 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'id': 1220, 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'id': 1221, 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'id': 1222, 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'c', 'id': 1223, 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'id': 1224, 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'r', 'id': 1225, 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'r', 'id': 1226, 'synset': 'yak.n.02', 'synonyms': ['yak'], 'def': 'large long-haired wild ox of Tibet often domesticated', 'name': 'yak'}, {'frequency': 'c', 'id': 1227, 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'r', 'id': 1228, 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'id': 1229, 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'id': 1230, 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa +# fmt: on diff --git a/detectron2/data/datasets/pascal_voc.py b/detectron2/data/datasets/pascal_voc.py new file mode 100644 index 0000000000..8053af6653 --- /dev/null +++ b/detectron2/data/datasets/pascal_voc.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from fvcore.common.file_io import PathManager +import os +import numpy as np +import xml.etree.ElementTree as ET + +from detectron2.structures import BoxMode +from detectron2.data import DatasetCatalog, MetadataCatalog + + +__all__ = ["register_pascal_voc"] + + +# fmt: off +CLASS_NAMES = [ + "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", + "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", + "pottedplant", "sheep", "sofa", "train", "tvmonitor", +] +# fmt: on + + +def load_voc_instances(dirname: str, split: str): + """ + Load Pascal VOC detection annotations to Detectron2 format. + + Args: + dirname: Contain "Annotations", "ImageSets", "JPEGImages" + split (str): one of "train", "test", "val", "trainval" + """ + with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f: + fileids = np.loadtxt(f, dtype=np.str) + + dicts = [] + for fileid in fileids: + anno_file = os.path.join(dirname, "Annotations", fileid + ".xml") + jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg") + + tree = ET.parse(anno_file) + + r = { + "file_name": jpeg_file, + "image_id": fileid, + "height": int(tree.findall("./size/height")[0].text), + "width": int(tree.findall("./size/width")[0].text), + } + instances = [] + + for obj in tree.findall("object"): + cls = obj.find("name").text + # We include "difficult" samples in training. + # Based on limited experiments, they don't hurt accuracy. + # difficult = int(obj.find("difficult").text) + # if difficult == 1: + # continue + bbox = obj.find("bndbox") + bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] + # Original annotations are integers in the range [1, W or H] + # Assuming they mean 1-based pixel indices (inclusive), + # a box with annotation (xmin=1, xmax=W) covers the whole image. + # In coordinate space this is represented by (xmin=0, xmax=W) + bbox[0] -= 1.0 + bbox[1] -= 1.0 + instances.append( + {"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} + ) + r["annotations"] = instances + dicts.append(r) + return dicts + + +def register_pascal_voc(name, dirname, split, year): + DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split)) + MetadataCatalog.get(name).set( + thing_classes=CLASS_NAMES, dirname=dirname, year=year, split=split + ) diff --git a/detectron2/data/datasets/register_coco.py b/detectron2/data/datasets/register_coco.py new file mode 100644 index 0000000000..4ac0fa3a03 --- /dev/null +++ b/detectron2/data/datasets/register_coco.py @@ -0,0 +1,124 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy + +from detectron2.data import DatasetCatalog, MetadataCatalog +from .coco import load_coco_json, load_sem_seg + +""" +This file contains functions to register a COCO-format dataset to the DatasetCatalog. +""" + +__all__ = ["register_coco_instances", "register_coco_panoptic_separated"] + + +def register_coco_instances(name, metadata, json_file, image_root): + """ + Register a dataset in COCO's json annotation format for + instance detection, instance segmentation and keypoint detection. + (i.e., Type 1 and 2 in http://cocodataset.org/#format-data. + `instances*.json` and `person_keypoints*.json` in the dataset). + + This is an example of how to register a new dataset. + You can do something similar to this function, to register new datasets. + + Args: + name (str): the name that identifies a dataset, e.g. "coco_2014_train". + metadata (dict): extra metadata associated with this dataset. You can + leave it as an empty dict. + json_file (str): path to the json instance annotation file. + image_root (str): directory which contains all the images. + """ + # 1. register a function which returns dicts + DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) + + # 2. Optionally, add metadata about this dataset, + # since they might be useful in evaluation, visualization or logging + MetadataCatalog.get(name).set( + json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata + ) + + +def register_coco_panoptic_separated( + name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json +): + """ + Register a COCO panoptic segmentation dataset named `name`. + The annotations in this registered dataset will contain both instance annotations and + semantic annotations, each with its own contiguous ids. Hence it's called "separated". + + It follows the setting used by the PanopticFPN paper: + + 1. The instance annotations directly come from polygons in the COCO + instances annotation task, rather than from the masks in the COCO panoptic annotations. + + The two format have small differences: + Polygons in the instance annotations may have overlaps. + The mask annotations are produced by labeling the overlapped polygons + with depth ordering. + + 2. The semantic annotations are converted from panoptic annotations, where + all "things" are assigned a semantic id of 0. + All semantic categories will therefore have ids in contiguous + range [1, #stuff_categories]. + + This function will also register a pure semantic segmentation dataset + named `name + '_stuffonly'`. + + Args: + name (str): the name that identifies a dataset, + e.g. "coco_2017_train_panoptic" + metadata (str): extra metadata associated with this dataset. + image_root (str): directory which contains all the images + panoptic_root (str): directory which contains panoptic annotation images + panoptic_json (str): path to the json panoptic annotation file + sem_seg_root (str): directory which contains all the ground truth segmentation annotations. + instances_json (str): path to the json instance annotation file + """ + panoptic_name = name + "_separated" + DatasetCatalog.register( + panoptic_name, + lambda: merge_to_panoptic( + load_coco_json(instances_json, image_root, panoptic_name), + load_sem_seg(sem_seg_root, image_root), + ), + ) + MetadataCatalog.get(panoptic_name).set( + panoptic_root=panoptic_root, + image_root=image_root, + panoptic_json=panoptic_json, + sem_seg_root=sem_seg_root, + json_file=instances_json, # TODO rename + evaluator_type="coco_panoptic_seg", + **metadata + ) + + semantic_name = name + "_stuffonly" + DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root)) + MetadataCatalog.get(semantic_name).set( + sem_seg_root=sem_seg_root, image_root=image_root, evaluator_type="sem_seg", **metadata + ) + + +def merge_to_panoptic(detection_dicts, sem_seg_dicts): + """ + Create dataset dicts for panoptic segmentation, by + merging two dicts using "file_name" field to match their entries. + + Args: + detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation. + sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation. + + Returns: + list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in + both detection_dicts and sem_seg_dicts that correspond to the same image. + The function assumes that the same key in different dicts has the same value. + """ + results = [] + sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts} + assert len(sem_seg_file_to_entry) > 0 + + for det_dict in detection_dicts: + dic = copy.copy(det_dict) + dic.update(sem_seg_file_to_entry[dic["file_name"]]) + results.append(dic) + return results diff --git a/detectron2/data/detection_utils.py b/detectron2/data/detection_utils.py new file mode 100644 index 0000000000..2e5e70f2d5 --- /dev/null +++ b/detectron2/data/detection_utils.py @@ -0,0 +1,397 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +""" +Common data processing utilities that are used in a +typical object detection data pipeline. +""" +import logging +import numpy as np +import torch +from fvcore.common.file_io import PathManager +from PIL import Image + +from detectron2.structures import ( + BitMasks, + Boxes, + BoxMode, + Instances, + Keypoints, + PolygonMasks, + RotatedBoxes, +) + +from . import transforms as T +from .catalog import MetadataCatalog + + +class SizeMismatchError(ValueError): + """ + When loaded image has difference width/height compared with annoation. + """ + + +def read_image(file_name, format=None): + """ + Read an image into the given format. + + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + format (dict): one of the supported image modes in PIL, or "BGR" + + Returns: + image (np.ndarray): an HWC image + """ + with PathManager.open(file_name, "rb") as f: + image = Image.open(f) + + if format is not None: + # PIL only supports RGB, so convert to RGB and flip channels over below + conversion_format = format + if format == "BGR": + conversion_format = "RGB" + image = image.convert(conversion_format) + image = np.asarray(image) + if format == "BGR": + # flip channels if needed + image = image[:, :, ::-1] + # PIL squeezes out the channel dimension for "L", so make it HWC + if format == "L": + image = np.expand_dims(image, -1) + return image + + +def check_image_size(dataset_dict, image): + """ + Raise an error if the image does not match the size specified in the dict. + """ + if "width" in dataset_dict or "height" in dataset_dict: + image_wh = (image.shape[1], image.shape[0]) + expected_wh = (dataset_dict["width"], dataset_dict["height"]) + if not image_wh == expected_wh: + raise SizeMismatchError( + "mismatch (W,H), got {}, expect {}".format(image_wh, expected_wh) + ) + + +def transform_proposals(dataset_dict, image_shape, transforms, min_box_side_len, proposal_topk): + """ + Apply transformations to the proposals in dataset_dict, if any. + + Args: + dataset_dict (dict): a dict read from the dataset, possibly + contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode" + image_shape (tuple): height, width + transforms (TransformList): + min_box_side_len (int): keep proposals with at least this size + proposal_topk (int): only keep top-K scoring proposals + + The input dict is modified in-place, with abovementioned keys removed. A new + key "proposals" will be added. Its value is an `Instances` + object which contains the transformed proposals in its field + "proposal_boxes" and "objectness_logits". + """ + if "proposal_boxes" in dataset_dict: + # Tranform proposal boxes + boxes = transforms.apply_box( + BoxMode.convert( + dataset_dict.pop("proposal_boxes"), + dataset_dict.pop("proposal_bbox_mode"), + BoxMode.XYXY_ABS, + ) + ) + boxes = Boxes(boxes) + objectness_logits = torch.as_tensor( + dataset_dict.pop("proposal_objectness_logits").astype("float32") + ) + + boxes.clip(image_shape) + keep = boxes.nonempty(threshold=min_box_side_len) + boxes = boxes[keep] + objectness_logits = objectness_logits[keep] + + proposals = Instances(image_shape) + proposals.proposal_boxes = boxes[:proposal_topk] + proposals.objectness_logits = objectness_logits[:proposal_topk] + dataset_dict["proposals"] = proposals + + +def transform_instance_annotations( + annotation, transforms, image_size, *, keypoint_hflip_indices=None +): + """ + Apply transforms to box, segmentation and keypoints of annotations of a single instance. + + It will use `transforms.apply_box` for the box, and + `transforms.apply_coords` for segmentation polygons & keypoints. + If you need anything more specially designed for each data structure, + you'll need to implement your own version of this function or the transforms. + + Args: + annotation (dict): dict of instance annotations for a single instance. + transforms (TransformList): + image_size (tuple): the height, width of the transformed image + keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. + + Returns: + dict: + the same input dict with fields "bbox", "segmentation", "keypoints" + transformed according to `transforms`. + The "bbox_mode" field will be set to XYXY_ABS. + """ + bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) + # Note that bbox is 1d (per-instance bounding box) + annotation["bbox"] = transforms.apply_box([bbox])[0] + annotation["bbox_mode"] = BoxMode.XYXY_ABS + + if "segmentation" in annotation: + # each instance contains 1 or more polygons + polygons = [np.asarray(p).reshape(-1, 2) for p in annotation["segmentation"]] + annotation["segmentation"] = [p.reshape(-1) for p in transforms.apply_polygons(polygons)] + + if "keypoints" in annotation: + keypoints = transform_keypoint_annotations( + annotation["keypoints"], transforms, image_size, keypoint_hflip_indices + ) + annotation["keypoints"] = keypoints + + return annotation + + +def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None): + """ + Transform keypoint annotations of an image. + + Args: + keypoints (list[float]): Nx3 float in Detectron2 Dataset format. + transforms (TransformList): + image_size (tuple): the height, width of the transformed image + keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. + """ + # (N*3,) -> (N, 3) + keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3) + keypoints[:, :2] = transforms.apply_coords(keypoints[:, :2]) + + # This assumes that HorizFlipTransform is the only one that does flip + do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 + + # Alternative way: check if probe points was horizontally flipped. + # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]]) + # probe_aug = transforms.apply_coords(probe.copy()) + # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa + + # If flipped, swap each keypoint with its opposite-handed equivalent + if do_hflip: + assert keypoint_hflip_indices is not None + keypoints = keypoints[keypoint_hflip_indices, :] + + # Maintain COCO convention that if visibility == 0, then x, y = 0 + # TODO may need to reset visibility for cropped keypoints, + # but it does not matter for our existing algorithms + keypoints[keypoints[:, 2] == 0] = 0 + return keypoints + + +def annotations_to_instances(annos, image_size, mask_format="polygon"): + """ + Create an :class:`Instances` object used by the models, + from instance annotations in the dataset dict. + + Args: + annos (list[dict]): a list of instance annotations in one image, each + element for one instance. + image_size (tuple): height, width + + Returns: + Instances: + It will contain fields "gt_boxes", "gt_classes", + "gt_masks", "gt_keypoints", if they can be obtained from `annos`. + This is the format that builtin models expect. + """ + boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos] + target = Instances(image_size) + boxes = target.gt_boxes = Boxes(boxes) + boxes.clip(image_size) + + classes = [obj["category_id"] for obj in annos] + classes = torch.tensor(classes, dtype=torch.int64) + target.gt_classes = classes + + if len(annos) and "segmentation" in annos[0]: + polygons = [obj["segmentation"] for obj in annos] + if mask_format == "polygon": + masks = PolygonMasks(polygons) + else: + assert mask_format == "bitmask", mask_format + masks = BitMasks.from_polygon_masks(polygons, *image_size) + target.gt_masks = masks + + if len(annos) and "keypoints" in annos[0]: + kpts = [obj.get("keypoints", []) for obj in annos] + target.gt_keypoints = Keypoints(kpts) + + return target + + +def annotations_to_instances_rotated(annos, image_size): + """ + Create an :class:`Instances` object used by the models, + from instance annotations in the dataset dict. + Compared to `annotations_to_instances`, this function is for rotated boxes only + + Args: + annos (list[dict]): a list of instance annotations in one image, each + element for one instance. + image_size (tuple): height, width + + Returns: + Instances: + Containing fields "gt_boxes", "gt_classes", + if they can be obtained from `annos`. + This is the format that builtin models expect. + """ + boxes = [obj["bbox"] for obj in annos] + target = Instances(image_size) + boxes = target.gt_boxes = RotatedBoxes(boxes) + boxes.clip(image_size) + + classes = [obj["category_id"] for obj in annos] + classes = torch.tensor(classes, dtype=torch.int64) + target.gt_classes = classes + + return target + + +def filter_empty_instances(instances, by_box=True, by_mask=True): + """ + Filter out empty instances in an `Instances` object. + + Args: + instances (Instances): + by_box (bool): whether to filter out instances with empty boxes + by_mask (bool): whether to filter out instances with empty masks + + Returns: + Instances: the filtered instances. + """ + assert by_box or by_mask + r = [] + if by_box: + r.append(instances.gt_boxes.nonempty()) + if instances.has("gt_masks") and by_mask: + r.append(instances.gt_masks.nonempty()) + + # TODO: can also filter visible keypoints + + if not r: + return instances + m = r[0] + for x in r[1:]: + m = m & x + return instances[m] + + +def create_keypoint_hflip_indices(dataset_names): + """ + Args: + dataset_names (list[str]): list of dataset names + Returns: + ndarray[int]: a vector of size=#keypoints, storing the + horizontally-flipped keypoint indices. + """ + + check_metadata_consistency("keypoint_names", dataset_names) + check_metadata_consistency("keypoint_flip_map", dataset_names) + + meta = MetadataCatalog.get(dataset_names[0]) + names = meta.keypoint_names + # TODO flip -> hflip + flip_map = dict(meta.keypoint_flip_map) + flip_map.update({v: k for k, v in flip_map.items()}) + flipped_names = [i if i not in flip_map else flip_map[i] for i in names] + flip_indices = [names.index(i) for i in flipped_names] + return np.asarray(flip_indices) + + +def gen_crop_transform_with_instance(crop_size, image_size, instance): + """ + Generate a CropTransform so that the cropping region contains + the center of the given instance. + + Args: + crop_size (tuple): h, w in pixels + image_size (tuple): h, w + instance (dict): an annotation dict of one instance, in Detectron2's + dataset format. + """ + crop_size = np.asarray(crop_size, dtype=np.int32) + bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS) + center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5 + + min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0) + max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0) + max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32)) + + y0 = np.random.randint(min_yx[0], max_yx[0] + 1) + x0 = np.random.randint(min_yx[1], max_yx[1] + 1) + return T.CropTransform(x0, y0, crop_size[1], crop_size[0]) + + +def check_metadata_consistency(key, dataset_names): + """ + Check that the datasets have consistent metadata. + + Args: + key (str): a metadata key + dataset_names (list[str]): a list of dataset names + + Raises: + AttributeError: if the key does not exist in the metadata + ValueError: if the given datasets do not have the same metadata values defined by key + """ + if len(dataset_names) == 0: + return + logger = logging.getLogger(__name__) + entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names] + for idx, entry in enumerate(entries_per_dataset): + if entry != entries_per_dataset[0]: + logger.error( + "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry)) + ) + logger.error( + "Metadata '{}' for dataset '{}' is '{}'".format( + key, dataset_names[0], str(entries_per_dataset[0]) + ) + ) + raise ValueError("Datasets have different metadata '{}'!".format(key)) + + +def build_transform_gen(cfg, is_train): + """ + Create a list of :class:`TransformGen` from config. + Now it includes resizing and flipping. + + Returns: + list[TransformGen] + """ + if is_train: + min_size = cfg.INPUT.MIN_SIZE_TRAIN + max_size = cfg.INPUT.MAX_SIZE_TRAIN + sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + else: + min_size = cfg.INPUT.MIN_SIZE_TEST + max_size = cfg.INPUT.MAX_SIZE_TEST + sample_style = "choice" + if sample_style == "range": + assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format( + len(min_size) + ) + + logger = logging.getLogger(__name__) + tfm_gens = [] + if not min_size == 0: # set to zero to disable resize + tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) + if is_train: + tfm_gens.append(T.RandomFlip()) + logger.info("TransformGens used in training: " + str(tfm_gens)) + return tfm_gens diff --git a/detectron2/data/samplers/__init__.py b/detectron2/data/samplers/__init__.py new file mode 100644 index 0000000000..9cfa8a6525 --- /dev/null +++ b/detectron2/data/samplers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .distributed_sampler import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler +from .grouped_batch_sampler import GroupedBatchSampler + +__all__ = [ + "GroupedBatchSampler", + "TrainingSampler", + "InferenceSampler", + "RepeatFactorTrainingSampler", +] diff --git a/detectron2/data/samplers/distributed_sampler.py b/detectron2/data/samplers/distributed_sampler.py new file mode 100644 index 0000000000..43b7ea2858 --- /dev/null +++ b/detectron2/data/samplers/distributed_sampler.py @@ -0,0 +1,199 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +import math +from collections import defaultdict +from typing import Optional +import torch +from torch.utils.data.sampler import Sampler + +from detectron2.utils import comm + + +class TrainingSampler(Sampler): + """ + In training, we only care about the "infinite stream" of training data. + So this sampler produces an infinite stream of indices and + all workers cooperate to correctly shuffle the indices and sample different indices. + + The samplers in each worker effectively produces `indices[worker_id::num_workers]` + where `indices` is an infinite stream of indices consisting of + `shuffle(range(size)) + suhffle(range(size)) + ...` (if shuffle is True) + or `range(size) + range(size) + ...` (if shuffle is False) + """ + + def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + shuffle (bool): whether to shuffle the indices or not + seed (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + self._size = size + assert size > 0 + self._shuffle = shuffle + if seed is None: + seed = comm.shared_random_seed() + self._seed = int(seed) + + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + def __iter__(self): + start = self._rank + yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) + while True: + if self._shuffle: + yield from torch.randperm(self._size, generator=g) + else: + yield from torch.arange(self._size) + + +class RepeatFactorTrainingSampler(Sampler): + """ + Similar to TrainingSampler, but suitable for training on class imbalanced datasets + like LVIS. In each epoch, an image may appear multiple times based on its "repeat + factor". The repeat factor for an image is a function of the frequency the rarest + category labeled in that image. The "frequency of category c" in [0, 1] is defined + as the fraction of images in the training set (without repeats) in which category c + appears. + + See https://arxiv.org/abs/1908.03195 (>= v2) Appendix B.2. + """ + + def __init__(self, dataset_dicts, repeat_thresh, shuffle=True, seed=None): + """ + Args: + dataset_dicts (list[dict]): annotations in Detectron2 dataset format. + repeat_thresh (float): frequency threshold below which data is repeated. + shuffle (bool): whether to shuffle the indices or not + seed (int): the initial seed of the shuffle. Must be the same + across all workers. If None, will use a random seed shared + among workers (require synchronization among all workers). + """ + self._shuffle = shuffle + if seed is None: + seed = comm.shared_random_seed() + self._seed = int(seed) + + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + # Get fractional repeat factors and split into whole number (_int_part) + # and fractional (_frac_part) parts. + rep_factors = self._get_repeat_factors(dataset_dicts, repeat_thresh) + self._int_part = torch.trunc(rep_factors) + self._frac_part = rep_factors - self._int_part + + def _get_repeat_factors(self, dataset_dicts, repeat_thresh): + """ + Compute (fractional) per-image repeat factors. + + Args: + See __init__. + + Returns: + torch.Tensor: the i-th element is the repeat factor for the dataset image + at index i. + """ + # 1. For each category c, compute the fraction of images that contain it: f(c) + category_freq = defaultdict(int) + for dataset_dict in dataset_dicts: # For each image (without repeats) + cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]} + for cat_id in cat_ids: + category_freq[cat_id] += 1 + num_images = len(dataset_dicts) + for k, v in category_freq.items(): + category_freq[k] = v / num_images + + # 2. For each category c, compute the category-level repeat factor: + # r(c) = max(1, sqrt(t / f(c))) + category_rep = { + cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq)) + for cat_id, cat_freq in category_freq.items() + } + + # 3. For each image I, compute the image-level repeat factor: + # r(I) = max_{c in I} r(c) + rep_factors = [] + for dataset_dict in dataset_dicts: + cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]} + rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}) + rep_factors.append(rep_factor) + + return torch.tensor(rep_factors, dtype=torch.float32) + + def _get_epoch_indices(self, generator): + """ + Create a list of dataset indices (with repeats) to use for one epoch. + + Args: + generator (torch.Generator): pseudo random number generator used for + stochastic rounding. + + Returns: + torch.Tensor: list of dataset indicies to use in one epoch. Each index + is repeated based on its calculated repeat factor. + """ + # Since repeat factors are fractional, we use stochastic rounding so + # that the target repeat factor is achieved in expectation over the + # course of training + rands = torch.rand(len(self._frac_part), generator=generator) + rep_factors = self._int_part + (rands < self._frac_part).float() + # Construct a list of indices in which we repeat images as specified + indices = [] + for dataset_index, rep_factor in enumerate(rep_factors): + indices.extend([dataset_index] * int(rep_factor.item())) + return torch.tensor(indices, dtype=torch.int64) + + def __iter__(self): + start = self._rank + yield from itertools.islice(self._infinite_indices(), start, None, self._world_size) + + def _infinite_indices(self): + g = torch.Generator() + g.manual_seed(self._seed) + while True: + # Sample indices with repeats determined by stochastic rounding; each + # "epoch" may have a slightly different size due to the rounding. + indices = self._get_epoch_indices(g) + if self._shuffle: + randperm = torch.randperm(len(indices), generator=g) + yield from indices[randperm] + else: + yield from indices + + +class InferenceSampler(Sampler): + """ + Produce indices for inference. + Inference needs to run on the __exact__ set of samples, + therefore when the total number of samples is not divisible by the number of workers, + this sampler produces different number of samples on different workers. + """ + + def __init__(self, size: int): + """ + Args: + size (int): the total number of data of the underlying dataset to sample from + """ + self._size = size + assert size > 0 + self._rank = comm.get_rank() + self._world_size = comm.get_world_size() + + shard_size = (self._size - 1) // self._world_size + 1 + begin = shard_size * self._rank + end = min(shard_size * (self._rank + 1), self._size) + self._local_indices = range(begin, end) + + def __iter__(self): + yield from self._local_indices + + def __len__(self): + return len(self._local_indices) diff --git a/detectron2/data/samplers/grouped_batch_sampler.py b/detectron2/data/samplers/grouped_batch_sampler.py new file mode 100644 index 0000000000..5f16172567 --- /dev/null +++ b/detectron2/data/samplers/grouped_batch_sampler.py @@ -0,0 +1,46 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch +from torch.utils.data.sampler import BatchSampler, Sampler + + +class GroupedBatchSampler(BatchSampler): + """ + Wraps another sampler to yield a mini-batch of indices. + It enforces that the batch only contain elements from the same group. + It also tries to provide mini-batches which follows an ordering which is + as close as possible to the ordering from the original sampler. + + Arguments: + sampler (Sampler): Base sampler. + group_ids (list[int]): If the sampler produces indices in range [0, N), + `group_ids` must be a list of `N` ints which contains the group id of each sample. + The group ids must be a continuous set of integers starting from + 0, i.e. they must be in the range [0, num_groups). + batch_size (int): Size of mini-batch. + """ + + def __init__(self, sampler, group_ids, batch_size): + if not isinstance(sampler, Sampler): + raise ValueError( + "sampler should be an instance of " + "torch.utils.data.Sampler, but got sampler={}".format(sampler) + ) + self.sampler = sampler + self.group_ids = torch.as_tensor(group_ids) + assert self.group_ids.dim() == 1 + self.batch_size = batch_size + self.groups = torch.unique(self.group_ids).sort(0)[0] + # group ids must range in [0, #group) + assert self.groups[0].item() == 0 and self.groups[-1].item() == len(self.groups) - 1 + + # buffer the indices of each group until batch size is reached + self.buffer_per_group = [[] for k in self.groups] + + def __iter__(self): + for idx in self.sampler: + group_id = self.group_ids[idx] + group_buffer = self.buffer_per_group[group_id] + group_buffer.append(idx) + if len(group_buffer) == self.batch_size: + yield group_buffer[:] # yield a copy of the list + del group_buffer[:] diff --git a/detectron2/data/transforms/__init__.py b/detectron2/data/transforms/__init__.py new file mode 100644 index 0000000000..f7638bb580 --- /dev/null +++ b/detectron2/data/transforms/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .transform import * +from fvcore.transforms.transform import * +from .transform_gen import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/detectron2/data/transforms/transform.py b/detectron2/data/transforms/transform.py new file mode 100644 index 0000000000..524c673e58 --- /dev/null +++ b/detectron2/data/transforms/transform.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# File: transform.py + +import numpy as np +from fvcore.transforms.transform import HFlipTransform, NoOpTransform, Transform +from PIL import Image + +__all__ = ["ExtentTransform", "ResizeTransform"] + + +class ExtentTransform(Transform): + """ + Extracts a subregion from the source image and scales it to the output size. + + The fill color is used to map pixels from the source rect that fall outside + the source image. + + See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform + """ + + def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0): + """ + Args: + src_rect (x0, y0, x1, y1): src coordinates + output_size (h, w): dst image size + interp: PIL interpolation methods + fill: Fill color used when src_rect extends outside image + """ + super().__init__() + self._set_attributes(locals()) + + def apply_image(self, img, interp=None): + h, w = self.output_size + ret = Image.fromarray(img).transform( + size=(w, h), + method=Image.EXTENT, + data=self.src_rect, + resample=interp if interp else self.interp, + fill=self.fill, + ) + return np.asarray(ret) + + def apply_coords(self, coords): + # Transform image center from source coordinates into output coordinates + # and then map the new origin to the corner of the output image. + h, w = self.output_size + x0, y0, x1, y1 = self.src_rect + new_coords = coords.astype(np.float32) + new_coords[:, 0] -= 0.5 * (x0 + x1) + new_coords[:, 1] -= 0.5 * (y0 + y1) + new_coords[:, 0] *= w / (x1 - x0) + new_coords[:, 1] *= h / (y1 - y0) + new_coords[:, 0] += 0.5 * w + new_coords[:, 1] += 0.5 * h + return new_coords + + def apply_segmentation(self, segmentation): + segmentation = self.apply_image(segmentation, interp=Image.NEAREST) + return segmentation + + +class ResizeTransform(Transform): + """ + Resize the image to a target size. + """ + + def __init__(self, h, w, new_h, new_w, interp): + """ + Args: + h, w (int): original image size + new_h, new_w (int): new image size + interp: PIL interpolation methods + """ + # TODO decide on PIL vs opencv + super().__init__() + self._set_attributes(locals()) + + def apply_image(self, img, interp=None): + assert img.shape[:2] == (self.h, self.w) + pil_image = Image.fromarray(img) + interp_method = interp if interp is not None else self.interp + pil_image = pil_image.resize((self.new_w, self.new_h), interp_method) + ret = np.asarray(pil_image) + return ret + + def apply_coords(self, coords): + coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w) + coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h) + return coords + + def apply_segmentation(self, segmentation): + segmentation = self.apply_image(segmentation, interp=Image.NEAREST) + return segmentation + + +def HFlip_rotated_box(transform, rotated_boxes): + """ + Apply the horizontal flip transform on an rotated boxes. + + Args: + rotated_boxes (ndarray): Nx5 floating point array of + (x_center, y_center, width, height, angle_degrees) format + in absolute coordinates. + """ + # Transform x_center + rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0] + # Transform angle + rotated_boxes[:, 4] = -rotated_boxes[:, 4] + return rotated_boxes + + +def Resize_rotated_box(transform, rotated_boxes): + # Note: when scale_factor_x != scale_factor_y, + # the rotated box does not preserve the rectangular shape when the angle + # is not a multiple of 90 degrees under resize transformation. + # Instead, the shape is a parallelogram (that has skew) + # Here we make an approximation by fitting a rotated rectangle to the + # parallelogram that shares the same midpoints on the left and right edge + scale_factor_x = transform.new_w * 1.0 / transform.w + scale_factor_y = transform.new_h * 1.0 / transform.h + rotated_boxes[:, 0] *= scale_factor_x + rotated_boxes[:, 1] *= scale_factor_y + theta = rotated_boxes[:, 4] * np.pi / 180.0 + c = np.cos(theta) + s = np.sin(theta) + + # In image space, y is top->down and x is left->right + # Consider the local coordintate system for the rotated box, + # where the box center is located at (0, 0), and the four vertices ABCD are + # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2) + # the midpoint of the left edge AD of the rotated box E is: + # E = (A+D)/2 = (-w / 2, 0) + # the midpoint of the top edge AB of the rotated box F is: + # F(0, -h / 2) + # To get the old coordinates in the global system, apply the rotation transformation + # (Note: the right-handed coordinate system for image space is yOx): + # (old_x, old_y) = (s * y + c * x, c * y - s * x) + # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2) + # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2) + # After applying the scaling factor (sfx, sfy): + # E(new) = (-sfx * c * w / 2, sfy * s * w / 2) + # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2) + # The new width after scaling tranformation becomes: + + # w(new) = |E(new) - O| * 2 + # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2 + # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w + # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y + rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s)) + + # h(new) = |F(new) - O| * 2 + # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2 + # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h + # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2] + # + # For example, + # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y; + # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x + rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c)) + + # The angle is the rotation angle from y-axis in image space to the height + # vector (top->down in the box's local coordinate system) of the box in CCW. + # + # angle(new) = angle_yOx(O - F(new)) + # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) ) + # = atan2(sfx * s * h / 2, sfy * c * h / 2) + # = atan2(sfx * s, sfy * c) + # + # For example, + # when sfx == sfy, angle(new) == atan2(s, c) == angle(old) + rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi + + return rotated_boxes + + +HFlipTransform.register_type("rotated_box", HFlip_rotated_box) +NoOpTransform.register_type("rotated_box", lambda t, x: x) +ResizeTransform.register_type("rotated_box", Resize_rotated_box) diff --git a/detectron2/data/transforms/transform_gen.py b/detectron2/data/transforms/transform_gen.py new file mode 100644 index 0000000000..5db34945e7 --- /dev/null +++ b/detectron2/data/transforms/transform_gen.py @@ -0,0 +1,447 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# File: transformer.py + +import inspect +import numpy as np +import pprint +import sys +from abc import ABCMeta, abstractmethod +from fvcore.transforms.transform import ( + BlendTransform, + CropTransform, + HFlipTransform, + NoOpTransform, + Transform, + TransformList, +) +from PIL import Image + +from .transform import ExtentTransform, ResizeTransform + +__all__ = [ + "RandomBrightness", + "RandomContrast", + "RandomCrop", + "RandomExtent", + "RandomFlip", + "RandomSaturation", + "RandomLighting", + "Resize", + "ResizeShortestEdge", + "TransformGen", + "apply_transform_gens", +] + + +def check_dtype(img): + assert isinstance(img, np.ndarray), "[TransformGen] Needs an numpy array, but got a {}!".format( + type(img) + ) + assert not isinstance(img.dtype, np.integer) or ( + img.dtype == np.uint8 + ), "[TransformGen] Got image of type {}, use uint8 or floating points instead!".format( + img.dtype + ) + assert img.ndim in [2, 3], img.ndim + + +class TransformGen(metaclass=ABCMeta): + """ + TransformGen takes an image of type uint8 in range [0, 255], or + floating point in range [0, 1] or [0, 255] as input. + + It creates a :class:`Transform` based on the given image, sometimes with randomness. + The transform can then be used to transform images + or other data (boxes, points, annotations, etc.) associated with it. + + The assumption made in this class + is that the image itself is sufficient to instantiate a transform. + When this assumption is not true, you need to create the transforms by your own. + + A list of `TransformGen` can be applied with :func:`apply_transform_gens`. + """ + + def _init(self, params=None): + if params: + for k, v in params.items(): + if k != "self" and not k.startswith("_"): + setattr(self, k, v) + + @abstractmethod + def get_transform(self, img): + pass + + def _rand_range(self, low=1.0, high=None, size=None): + """ + Uniform float random number between low and high. + """ + if high is None: + low, high = 0, low + if size is None: + size = [] + return np.random.uniform(low, high, size) + + def __repr__(self): + """ + Produce something like: + "MyTransformGen(field1={self.field1}, field2={self.field2})" + """ + try: + argspec = inspect.getargspec(self.__init__) + assert argspec.varargs is None, "The default __repr__ doesn't work for varargs!" + assert argspec.keywords is None, "The default __repr__ doesn't work for kwargs!" + fields = argspec.args[1:] + index_field_has_default = len(fields) - ( + 0 if argspec.defaults is None else len(argspec.defaults) + ) + + classname = type(self).__name__ + argstr = [] + for idx, f in enumerate(fields): + assert hasattr(self, f), ( + "Attribute {} not found! " + "Default __repr__ only works if attributes match the constructor.".format(f) + ) + attr = getattr(self, f) + if idx >= index_field_has_default: + if attr is argspec.defaults[idx - index_field_has_default]: + continue + argstr.append("{}={}".format(f, pprint.pformat(attr))) + return "{}({})".format(classname, ", ".join(argstr)) + except AssertionError: + return super().__repr__() + + __str__ = __repr__ + + +class RandomFlip(TransformGen): + """ + Flip the image horizontally with the given probability. + + TODO Vertical flip to be implemented. + """ + + def __init__(self, prob=0.5): + """ + Args: + prob (float): probability of flip. + """ + horiz, vert = True, False + # TODO implement vertical flip when we need it + super().__init__() + + if horiz and vert: + raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.") + if not horiz and not vert: + raise ValueError("At least one of horiz or vert has to be True!") + self._init(locals()) + + def get_transform(self, img): + _, w = img.shape[:2] + do = self._rand_range() < self.prob + if do: + return HFlipTransform(w) + else: + return NoOpTransform() + + +class Resize(TransformGen): + """ Resize image to a target size""" + + def __init__(self, shape, interp=Image.BILINEAR): + """ + Args: + shape: (h, w) tuple or a int + interp: PIL interpolation method + """ + if isinstance(shape, int): + shape = (shape, shape) + shape = tuple(shape) + self._init(locals()) + + def get_transform(self, img): + return ResizeTransform( + img.shape[0], img.shape[1], self.shape[0], self.shape[1], self.interp + ) + + +class ResizeShortestEdge(TransformGen): + """ + Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge. + If `max_size` is reached, then downscale so that the longer edge does not exceed max_size. + """ + + def __init__( + self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR + ): + """ + Args: + short_edge_length (list[int]): If ``sample_style=="range"``, + a [min, max] interval from which to sample the shortest edge length. + If ``sample_style=="choice"``, a list of shortest edge lengths to sample from. + max_size (int): maximum allowed longest edge length. + sample_style (str): either "range" or "choice". + """ + super().__init__() + assert sample_style in ["range", "choice"], sample_style + + self.is_range = sample_style == "range" + if isinstance(short_edge_length, int): + short_edge_length = (short_edge_length, short_edge_length) + self._init(locals()) + + def get_transform(self, img): + h, w = img.shape[:2] + + if self.is_range: + size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) + else: + size = np.random.choice(self.short_edge_length) + + scale = size * 1.0 / min(h, w) + if h < w: + newh, neww = size, scale * w + else: + newh, neww = scale * h, size + if max(newh, neww) > self.max_size: + scale = self.max_size * 1.0 / max(newh, neww) + newh = newh * scale + neww = neww * scale + neww = int(neww + 0.5) + newh = int(newh + 0.5) + return ResizeTransform(h, w, newh, neww, self.interp) + + +class RandomCrop(TransformGen): + """ + Randomly crop a subimage out of an image. + """ + + def __init__(self, crop_type: str, crop_size): + """ + Args: + crop_type (str): one of "relative_range", "relative", "absolute". + See `config/defaults.py` for explanation. + crop_size (tuple[float]): the relative ratio or absolute pixels of + height and width + """ + super().__init__() + assert crop_type in ["relative_range", "relative", "absolute"] + self._init(locals()) + + def get_transform(self, img): + h, w = img.shape[:2] + croph, cropw = self.get_crop_size((h, w)) + assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self) + h0 = np.random.randint(h - croph + 1) + w0 = np.random.randint(w - cropw + 1) + return CropTransform(w0, h0, cropw, croph) + + def get_crop_size(self, image_size): + """ + Args: + image_size (tuple): height, width + + Returns: + crop_size (tuple): height, width in absolute pixels + """ + h, w = image_size + if self.crop_type == "relative": + ch, cw = self.crop_size + return int(h * ch + 0.5), int(w * cw + 0.5) + elif self.crop_type == "relative_range": + crop_size = np.asarray(self.crop_size, dtype=np.float32) + ch, cw = crop_size + np.random.rand(2) * (1 - crop_size) + return int(h * ch + 0.5), int(w * cw + 0.5) + elif self.crop_type == "absolute": + return self.crop_size + else: + NotImplementedError("Unknown crop type {}".format(self.crop_type)) + + +class RandomExtent(TransformGen): + """ + Outputs an image by cropping a random "subrect" of the source image. + + The subrect can be parameterized to include pixels outside the source image, + in which case they will be set to zeros (i.e. black). The size of the output + image will vary with the size of the random subrect. + """ + + def __init__(self, scale_range, shift_range): + """ + Args: + output_size (h, w): Dimensions of output image + scale_range (l, h): Range of input-to-output size scaling factor + shift_range (x, y): Range of shifts of the cropped subrect. The rect + is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)], + where (w, h) is the (width, height) of the input image. Set each + component to zero to crop at the image's center. + """ + super().__init__() + self._init(locals()) + + def get_transform(self, img): + img_h, img_w = img.shape[:2] + + # Initialize src_rect to fit the input image. + src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h]) + + # Apply a random scaling to the src_rect. + src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1]) + + # Apply a random shift to the coordinates origin. + src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5) + src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5) + + # Map src_rect coordinates into image coordinates (center at corner). + src_rect[0::2] += 0.5 * img_w + src_rect[1::2] += 0.5 * img_h + + return ExtentTransform( + src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]), + output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])), + ) + + +class RandomContrast(TransformGen): + """ + Randomly transforms image contrast. + + Contrast intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce contrast + - intensity = 1 will preserve the input image + - intensity > 1 will increase contrast + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation + intensity_max (float): Maximum augmentation + """ + super().__init__() + self._init(locals()) + + def get_transform(self, img): + w = np.random.uniform(self.intensity_min, self.intensity_max) + return BlendTransform(src_image=img.mean(), src_weight=1 - w, dst_weight=w) + + +class RandomBrightness(TransformGen): + """ + Randomly transforms image brightness. + + Brightness intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce brightness + - intensity = 1 will preserve the input image + - intensity > 1 will increase brightness + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation + intensity_max (float): Maximum augmentation + """ + super().__init__() + self._init(locals()) + + def get_transform(self, img): + w = np.random.uniform(self.intensity_min, self.intensity_max) + return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w) + + +class RandomSaturation(TransformGen): + """ + Randomly transforms image saturation. + + Saturation intensity is uniformly sampled in (intensity_min, intensity_max). + - intensity < 1 will reduce saturation (make the image more grayscale) + - intensity = 1 will preserve the input image + - intensity > 1 will increase saturation + + See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html + """ + + def __init__(self, intensity_min, intensity_max): + """ + Args: + intensity_min (float): Minimum augmentation (1 preserves input). + intensity_max (float): Maximum augmentation (1 preserves input). + """ + super().__init__() + self._init(locals()) + + def get_transform(self, img): + assert img.shape[-1] == 3, "Saturation only works on RGB images" + w = np.random.uniform(self.intensity_min, self.intensity_max) + grayscale = img.dot([0.299, 0.587, 0.114])[:, :, np.newaxis] + return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w) + + +class RandomLighting(TransformGen): + """ + Randomly transforms image color using fixed PCA over ImageNet. + + The degree of color jittering is randomly sampled via a normal distribution, + with standard deviation given by the scale parameter. + """ + + def __init__(self, scale): + """ + Args: + scale (float): Standard deviation of principal component weighting. + """ + super().__init__() + self._init(locals()) + self.eigen_vecs = np.array( + [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]] + ) + self.eigen_vals = np.array([0.2175, 0.0188, 0.0045]) + + def get_transform(self, img): + assert img.shape[-1] == 3, "Saturation only works on RGB images" + weights = np.random.normal(scale=self.scale, size=3) + return BlendTransform( + src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0 + ) + + +def apply_transform_gens(transform_gens, img): + """ + Apply a list of :class:`TransformGen` on the input image, and + returns the transformed image and a list of transforms. + + We cannot simply create and return all transforms without + applying it to the image, because a subsequent transform may + need the output of the previous one. + + Args: + transform_gens (list): list of :class:`TransformGen` instance to + be applied. + img (ndarray): uint8 or floating point images with 1 or 3 channels. + + Returns: + ndarray: the transformed image + TransformList: contain the transforms that's used. + """ + for g in transform_gens: + assert isinstance(g, TransformGen), g + + check_dtype(img) + + tfms = [] + for g in transform_gens: + tfm = g.get_transform(img) + assert isinstance( + tfm, Transform + ), "TransformGen {} must return an instance of Transform! Got {} instead".format(g, tfm) + img = tfm.apply_image(img) + tfms.append(tfm) + return img, TransformList(tfms) diff --git a/detectron2/engine/__init__.py b/detectron2/engine/__init__.py new file mode 100644 index 0000000000..6a4538da3e --- /dev/null +++ b/detectron2/engine/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +from .launch import * +from .train_loop import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] + + +# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__) +# but still make them available here +from .hooks import * +from .defaults import * diff --git a/detectron2/engine/defaults.py b/detectron2/engine/defaults.py new file mode 100644 index 0000000000..c1f231309a --- /dev/null +++ b/detectron2/engine/defaults.py @@ -0,0 +1,425 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +""" +This file contains components with some default boilerplate logic user may need +in training / testing. They will not work for everyeone, but many users may find them useful. + +The behavior of functions/classes in this file is subject to change, +since they are meant to represent the "common default behavior" people need in their projects. +""" + +import argparse +import logging +import os +from collections import OrderedDict +import torch +from fvcore.common.file_io import PathManager +from fvcore.nn.precise_bn import get_bn_modules +from torch.nn.parallel import DistributedDataParallel + +import detectron2.data.transforms as T +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.data import ( + MetadataCatalog, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.evaluation import ( + DatasetEvaluator, + inference_on_dataset, + print_csv_format, + verify_results, +) +from detectron2.modeling import build_model +from detectron2.solver import build_lr_scheduler, build_optimizer +from detectron2.utils import comm +from detectron2.utils.collect_env import collect_env_info +from detectron2.utils.env import seed_all_rng +from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter +from detectron2.utils.logger import setup_logger + +from . import hooks +from .train_loop import SimpleTrainer + +__all__ = ["default_argument_parser", "default_setup", "DefaultPredictor", "DefaultTrainer"] + + +def default_argument_parser(): + """ + Create a parser with some common arguments used by detectron2 users. + + Returns: + argparse.ArgumentParser: + """ + parser = argparse.ArgumentParser(description="Detectron2 Training") + parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") + parser.add_argument( + "--resume", + action="store_true", + help="whether to attempt to resume from the checkpoint directory", + ) + parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") + parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") + parser.add_argument("--num-machines", type=int, default=1) + parser.add_argument( + "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" + ) + + # PyTorch still may leave orphan processes in multi-gpu training. + # Therefore we use a deterministic way to obtain port, + # so that users are aware of orphan processes by seeing the port occupied. + port = 2 ** 15 + 2 ** 14 + hash(os.getuid()) % 2 ** 14 + parser.add_argument("--dist-url", default="tcp://127.0.0.1:{}".format(port)) + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + return parser + + +def default_setup(cfg, args): + """ + Perform some basic common setups at the beginning of a job, including: + + 1. Set up the detectron2 logger + 2. Log basic information about environment, cmdline arguments, and config + 3. Backup the config to the output directory + + Args: + cfg (CfgNode): the full config to be used + args (argparse.NameSpace): the command line arguments to be logged + """ + output_dir = cfg.OUTPUT_DIR + if comm.is_main_process() and output_dir: + PathManager.mkdirs(output_dir) + + rank = comm.get_rank() + setup_logger(output_dir, distributed_rank=rank, name="fvcore") + logger = setup_logger(output_dir, distributed_rank=rank) + + logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) + logger.info("Environment info:\n" + collect_env_info()) + + logger.info("Command line arguments: " + str(args)) + if hasattr(args, "config_file"): + logger.info( + "Contents of args.config_file={}:\n{}".format( + args.config_file, PathManager.open(args.config_file, "r").read() + ) + ) + + logger.info("Running with full config:\n{}".format(cfg)) + if comm.is_main_process() and output_dir: + # Note: some of our scripts may expect the existence of + # config.yaml in output directory + path = os.path.join(output_dir, "config.yaml") + with PathManager.open(path, "w") as f: + f.write(cfg.dump()) + logger.info("Full config saved to {}".format(os.path.abspath(path))) + + # make sure each worker has a different, yet deterministic seed if specified + seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank) + + # cudnn benchmark has large overhead. It shouldn't be used considering the small size of + # typical validation set. + if not (hasattr(args, "eval_only") and args.eval_only): + torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK + + +class DefaultPredictor: + """ + Create a simple end-to-end predictor with the given config. + The predictor takes an BGR image and produce a dict of predictions. + + Attributes: + metadata (Metadata): the metadata of the underlying dataset, obtained from + cfg.DATASETS.TEST. + """ + + def __init__(self, cfg): + self.cfg = cfg.clone() # cfg can be modified by model + self.model = build_model(self.cfg) + self.model.eval() + self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) + + checkpointer = DetectionCheckpointer(self.model) + checkpointer.load(cfg.MODEL.WEIGHTS) + + self.transform_gen = T.ResizeShortestEdge( + [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST + ) + + self.input_format = cfg.INPUT.FORMAT + assert self.input_format in ["RGB", "BGR"], self.input_format + + @torch.no_grad() + def __call__(self, original_image): + """ + Args: + original_image (np.ndarray): an image of shape (H, W, C) (in BGR order). + + Returns: + predictions (dict): the output of the model + """ + # Apply pre-processing to image. + if self.input_format == "RGB": + # whether the model expects BGR inputs or RGB + original_image = original_image[:, :, ::-1] + height, width = original_image.shape[:2] + image = self.transform_gen.get_transform(original_image).apply_image(original_image) + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) + + inputs = {"image": image, "height": height, "width": width} + predictions = self.model([inputs])[0] + return predictions + + +class DefaultTrainer(SimpleTrainer): + """ + A trainer with default training logic. Compared to `SimpleTrainer`, it + contains the following logic in addition: + + 1. Create model, optimizer, scheduler, dataloader from the given config. + 2. Load a checkpoint or `cfg.MODEL.WEIGHTS`, if exists. + 3. Register a few common hooks. + + It is created to simplify the **standard model training workflow** and reduce code boilerplate + for users who only need the standard training workflow, with standard features. + It means this class makes *many assumptions* about your training logic that + may easily become invalid in a new research. In fact, any assumptions beyond those made in the + :class:`SimpleTrainer` are too much for research. + + The code of this class has been annotated about restrictive assumptions it mades. + When they do not work for you, you're encouraged to write your own training logic. + + Also note that the behavior of this class, like other functions/classes in + this file, is not stable, since it is meant to represent the "common default behavior". + It is only guaranteed to work well with the standard models and training workflow in detectron2. + To obtain more stable behavior, write your own training logic with other public APIs. + + Attributes: + scheduler: + checkpointer (DetectionCheckpointer): + cfg (CfgNode): + """ + + def __init__(self, cfg): + """ + Args: + cfg (CfgNode): + """ + # Assume these objects must be constructed in this order. + model = self.build_model(cfg) + optimizer = self.build_optimizer(cfg, model) + data_loader = self.build_train_loader(cfg) + + # For training, wrap with DDP. But don't need this for inference. + if comm.get_world_size() > 1: + model = DistributedDataParallel( + model, device_ids=[comm.get_local_rank()], broadcast_buffers=False + ) + super().__init__(model, data_loader, optimizer) + + self.scheduler = self.build_lr_scheduler(cfg, optimizer) + # Assume no other objects need to be checkpointed. + # We can later make it checkpoint the stateful hooks + self.checkpointer = DetectionCheckpointer( + # Assume you want to save checkpoits together with logs/statistics + model, + cfg.OUTPUT_DIR, + optimizer=optimizer, + scheduler=self.scheduler, + ) + self.start_iter = 0 + self.max_iter = cfg.SOLVER.MAX_ITER + self.cfg = cfg + + self.register_hooks(self.build_hooks()) + + def resume_or_load(self, resume=True): + """ + If `resume==True`, and last checkpoint exists, resume from it. + + Otherwise, load a model specified by the config. + + Args: + resume (bool): whether to do resume or not + """ + # The checkpoint stores the training iteration that just finished, thus we start + # at the next iteration (or iter zero if there's no checkpoint). + self.start_iter = ( + self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume).get( + "iteration", -1 + ) + + 1 + ) + + def build_hooks(self): + """ + Build a list of default hooks. + + Returns: + list[HookBase]: + """ + cfg = self.cfg.clone() + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN + + ret = [ + hooks.IterationTimer(), + hooks.LRScheduler(self.optimizer, self.scheduler), + hooks.PreciseBN( + # Run at the same freq as (but before) evaluation. + cfg.TEST.EVAL_PERIOD, + self.model, + # Build a new data loader to not affect training + self.build_train_loader(cfg), + cfg.TEST.PRECISE_BN.NUM_ITER, + ) + if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) + else None, + ] + + # Do PreciseBN before checkpointer, because it updates the model and need to + # be saved by checkpointer. + # This is not always the best: if checkpointing has a different frequency, + # some checkpoints may have more precise statistics than others. + if comm.is_main_process(): + ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) + + def test_and_save_results(): + self._last_eval_results = self.test(self.cfg, self.model) + return self._last_eval_results + + # Do evaluation after checkpointer, because then if it fails, + # we can use the saved checkpoint to debug. + ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) + + if comm.is_main_process(): + # run writers in the end, so that evaluation metrics are written + ret.append(hooks.PeriodicWriter(self.build_writers())) + return ret + + def build_writers(self): + """ + Build a list of default writers, that write metrics to the screen, + a json file, and a tensorboard event file respectively. + + Returns: + list[Writer]: a list of objects that have a ``.write`` method. + """ + # Assume the default print/log frequency. + return [ + # It may not always print what you want to see, since it prints "common" metrics only. + CommonMetricPrinter(self.max_iter), + JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, "metrics.json")), + TensorboardXWriter(self.cfg.OUTPUT_DIR), + ] + + def train(self): + """ + Run training. + + Returns: + OrderedDict of results, if evaluation is enabled. Otherwise None. + """ + super().train(self.start_iter, self.max_iter) + if hasattr(self, "_last_eval_results") and comm.is_main_process(): + verify_results(self.cfg, self._last_eval_results) + return self._last_eval_results + + @classmethod + def build_model(cls, cfg): + """ + Returns: + torch.nn.Module: + """ + model = build_model(cfg) + logger = logging.getLogger(__name__) + logger.info("Model:\n{}".format(model)) + return model + + @classmethod + def build_optimizer(cls, cfg, model): + """ + Returns: + torch.optim.Optimizer: + """ + return build_optimizer(cfg, model) + + @classmethod + def build_lr_scheduler(cls, cfg, optimizer): + return build_lr_scheduler(cfg, optimizer) + + @classmethod + def build_train_loader(cls, cfg): + """ + Returns: + iterable + """ + return build_detection_train_loader(cfg) + + @classmethod + def build_test_loader(cls, cfg, dataset_name): + """ + Returns: + iterable + """ + return build_detection_test_loader(cfg, dataset_name) + + @classmethod + def build_evaluator(cls, cfg, dataset_name): + """ + Returns: + DatasetEvaluator + """ + raise NotImplementedError + + @classmethod + def test(cls, cfg, model, evaluators=None): + """ + Args: + cfg (CfgNode): + model (nn.Module): + evaluators (list[DatasetEvaluator] or None): if None, will call + :meth:`build_evaluator`. Otherwise, must have the same length as + `cfg.DATASETS.TEST`. + + Returns: + dict: a dict of result metrics + """ + logger = logging.getLogger(__name__) + if isinstance(evaluators, DatasetEvaluator): + evaluators = [evaluators] + if evaluators is not None: + assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( + len(cfg.DATASETS.TEST), len(evaluators) + ) + + results = OrderedDict() + for idx, dataset_name in enumerate(cfg.DATASETS.TEST): + data_loader = cls.build_test_loader(cfg, dataset_name) + # When evaluators are passed in as arguments, + # implicitly assume that evaluators can be created before data_loader. + evaluator = ( + evaluators[idx] + if evaluators is not None + else cls.build_evaluator(cfg, dataset_name) + ) + results_i = inference_on_dataset(model, data_loader, evaluator) + results[dataset_name] = results_i + if comm.is_main_process(): + assert isinstance( + results_i, dict + ), "Evaluator must return a dict on the main process. Got {} instead.".format( + results_i + ) + logger.info("Evaluation results for {} in csv format:".format(dataset_name)) + print_csv_format(results_i) + + if len(results) == 1: + results = list(results.values())[0] + return results diff --git a/detectron2/engine/hooks.py b/detectron2/engine/hooks.py new file mode 100644 index 0000000000..c73779764f --- /dev/null +++ b/detectron2/engine/hooks.py @@ -0,0 +1,404 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import datetime +import logging +import os +import tempfile +import time +from collections import Counter +import torch +from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer +from fvcore.common.file_io import PathManager +from fvcore.common.timer import Timer +from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats + +import detectron2.utils.comm as comm +from detectron2.evaluation.testing import flatten_results_dict +from detectron2.utils.events import EventStorage + +from .train_loop import HookBase + +__all__ = [ + "CallbackHook", + "IterationTimer", + "PeriodicWriter", + "PeriodicCheckpointer", + "LRScheduler", + "AutogradProfiler", + "EvalHook", + "PreciseBN", +] + + +""" +Implement some common hooks. +""" + + +class CallbackHook(HookBase): + """ + Create a hook using callback functions provided by the user. + """ + + def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None): + """ + Each argument is a function that takes one argument: the trainer. + """ + self._before_train = before_train + self._before_step = before_step + self._after_step = after_step + self._after_train = after_train + + def before_train(self): + if self._before_train: + self._before_train(self.trainer) + + def after_train(self): + if self._after_train: + self._after_train(self.trainer) + # The functions may be closures that hold reference to the trainer + # Therefore, delete them to avoid circular reference. + del self._before_train, self._after_train + del self._before_step, self._after_step + + def before_step(self): + if self._before_step: + self._before_step(self.trainer) + + def after_step(self): + if self._after_step: + self._after_step(self.trainer) + + +class IterationTimer(HookBase): + """ + Track the time spent for each iteration (each run_step call in the trainer). + Print a summary in the end of training. + + This hook uses the time between the call to its :meth:`before_step` + and :meth:`after_step` methods. + Under the convention that :meth:`before_step` of all hooks should only + take negligible amount of time, the :class:`IterationTimer` hook should be + placed at the beginning of the list of hooks to obtain accurate timing. + """ + + def __init__(self, warmup_iter=3): + """ + Args: + warmup_iter (int): the number of iterations at the beginning to exclude + from timing. + """ + self._warmup_iter = warmup_iter + self._step_timer = Timer() + + def before_train(self): + self._start_time = time.perf_counter() + self._total_timer = Timer() + self._total_timer.pause() + + def after_train(self): + logger = logging.getLogger(__name__) + total_time = time.perf_counter() - self._start_time + total_time_minus_hooks = self._total_timer.seconds() + hook_time = total_time - total_time_minus_hooks + + num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter + + if num_iter > 0 and total_time_minus_hooks > 0: + # Speed is meaningful only after warmup + # NOTE this format is parsed by grep in some scripts + logger.info( + "Overall training speed: {} iterations in {} ({:.4f} s / it)".format( + num_iter, + str(datetime.timedelta(seconds=int(total_time_minus_hooks))), + total_time_minus_hooks / num_iter, + ) + ) + + logger.info( + "Total training time: {} ({} on hooks)".format( + str(datetime.timedelta(seconds=int(total_time))), + str(datetime.timedelta(seconds=int(hook_time))), + ) + ) + + def before_step(self): + self._step_timer.reset() + self._total_timer.resume() + + def after_step(self): + # +1 because we're in after_step + iter_done = self.trainer.iter - self.trainer.start_iter + 1 + if iter_done >= self._warmup_iter: + sec = self._step_timer.seconds() + self.trainer.storage.put_scalars(time=sec) + else: + self._start_time = time.perf_counter() + self._total_timer.reset() + + self._total_timer.pause() + + +class PeriodicWriter(HookBase): + """ + Write events to EventStorage periodically. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def __init__(self, writers, period=20): + """ + Args: + writers (list): a list of objects with a "write" method. + period (int): + """ + self._writers = writers + self._period = period + + def after_step(self): + if (self.trainer.iter + 1) % self._period == 0 or ( + self.trainer.iter == self.trainer.max_iter - 1 + ): + for writer in self._writers: + writer.write() + + +class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase): + """ + Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook. + + Note that when used as a hook, + it is unable to save additional data other than what's defined + by the given `checkpointer`. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def before_train(self): + self.max_iter = self.trainer.max_iter + + def after_step(self): + # No way to use **kwargs + self.step(self.trainer.iter) + + +class LRScheduler(HookBase): + """ + A hook which executes a torch builtin LR scheduler and summarizes the LR. + It is executed after every iteration. + """ + + def __init__(self, optimizer, scheduler): + """ + Args: + optimizer (torch.optim.Optimizer): + scheduler (torch.optim._LRScheduler) + """ + self._optimizer = optimizer + self._scheduler = scheduler + + # NOTE: some heuristics on what LR to summarize + # summarize the param group with most parameters + largest_group = max(len(g["params"]) for g in optimizer.param_groups) + + if largest_group == 1: + # If all groups have one parameter, + # then find the most common initial LR, and use it for summary + lr_count = Counter([g["lr"] for g in optimizer.param_groups]) + lr = lr_count.most_common()[0][0] + for i, g in enumerate(optimizer.param_groups): + if g["lr"] == lr: + self._best_param_group_id = i + break + else: + for i, g in enumerate(optimizer.param_groups): + if len(g["params"]) == largest_group: + self._best_param_group_id = i + break + + def after_step(self): + lr = self._optimizer.param_groups[self._best_param_group_id]["lr"] + self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False) + self._scheduler.step() + + +class AutogradProfiler(HookBase): + """ + A hook which runs `torch.autograd.profiler.profile`. + + Note: + When used together with NCCL on older version of GPUs, + autograd profiler may cause deadlock because it unnecessarily allocates + memory on every device it sees. The memory management calls, if + interleaved with NCCL calls, lead to deadlock on GPUs that do not + support `cudaLaunchCooperativeKernelMultiDevice`. + """ + + def __init__(self, enable_predicate, output_dir, *, use_cuda=True): + """ + Args: + enable_predicate (callable[trainer -> bool]): a function which takes a trainer, + and returns whether to enable the profiler. + It will be called once every step, and can be used to select which steps to profile. + output_dir (str): the output directory to dump tracing files. + use_cuda (bool): same as in `torch.autograd.profiler.profile`. + """ + self._enable_predicate = enable_predicate + self._use_cuda = use_cuda + self._output_dir = output_dir + + def before_step(self): + if self._enable_predicate(self.trainer): + self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda) + self._profiler.__enter__() + else: + self._profiler = None + + def after_step(self): + if self._profiler is None: + return + self._profiler.__exit__(None, None, None) + out_file = os.path.join( + self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter) + ) + if "://" not in out_file: + self._profiler.export_chrome_trace(out_file) + else: + # Support non-posix filesystems + with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d: + tmp_file = os.path.join(d, "tmp.json") + self._profiler.export_chrome_trace(tmp_file) + with open(tmp_file) as f: + content = f.read() + with PathManager.open(out_file, "w") as f: + f.write(content) + + +class EvalHook(HookBase): + """ + Run an evaluation function periodically, and at the end of training. + + It is executed every ``eval_period`` iterations and after the last iteration. + """ + + def __init__(self, eval_period, eval_function): + """ + Args: + eval_period (int): the period to run `eval_function`. + eval_function (callable): a function which takes no arguments, and + returns a nested dict of evaluation metrics. + + Note: + This hook must be enabled in all or none workers. + If you would like only certain workers to perform evaluation, + give other workers a no-op function (`eval_function=lambda: None`). + """ + self._period = eval_period + self._func = eval_function + + def after_step(self): + next_iter = self.trainer.iter + 1 + is_final = next_iter == self.trainer.max_iter + if is_final or (self._period > 0 and next_iter % self._period == 0): + results = self._func() + + if results: + assert isinstance( + results, dict + ), "Eval function must return a dict. Got {} instead.".format(results) + + flattened_results = flatten_results_dict(results) + for k, v in flattened_results.items(): + try: + v = float(v) + except Exception: + raise ValueError( + "[EvalHook] eval_function should return a nested dict of float. " + "Got '{}: {}' instead.".format(k, v) + ) + self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False) + + # Evaluation may take different time among workers. + # A barrier make them start the next iteration together. + comm.synchronize() + + def after_train(self): + # func is likely a closure that holds reference to the trainer + # therefore we clean it to avoid circular reference in the end + del self._func + + +class PreciseBN(HookBase): + """ + The standard implementation of BatchNorm uses EMA in inference, which is + sometimes suboptimal. + This class computes the true average of statistics rather than the moving average, + and put true averages to every BN layer in the given model. + + It is executed every ``period`` iterations and after the last iteration. + """ + + def __init__(self, period, model, data_loader, num_iter): + """ + Args: + period (int): the period this hook is run, or 0 to not run during training. + The hook will always run in the end of training. + model (nn.Module): a module whose all BN layers in training mode will be + updated by precise BN. + Note that user is responsible for ensuring the BN layers to be + updated are in training mode when this hook is triggered. + data_loader (iterable): it will produce data to be run by `model(data)`. + num_iter (int): number of iterations used to compute the precise + statistics. + """ + self._logger = logging.getLogger(__name__) + if len(get_bn_modules(model)) == 0: + self._logger.info( + "PreciseBN is disabled because model does not contain BN layers in training mode." + ) + self._disabled = True + return + + self._model = model + self._data_loader = data_loader + self._num_iter = num_iter + self._period = period + self._disabled = False + + self._data_iter = None + + def after_step(self): + next_iter = self.trainer.iter + 1 + is_final = next_iter == self.trainer.max_iter + if is_final or (self._period > 0 and next_iter % self._period == 0): + self.update_stats() + + def update_stats(self): + """ + Update the model with precise statistics. Users can manually call this method. + """ + if self._disabled: + return + + if self._data_iter is None: + self._data_iter = iter(self._data_loader) + + num_iter = 0 + + def data_loader(): + nonlocal num_iter + num_iter += 1 + if num_iter % 100 == 0: + self._logger.info( + "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter) + ) + # This way we can reuse the same iterator + yield next(self._data_iter) + + with EventStorage(): # capture events in a new storage to discard them + self._logger.info( + "Running precise-BN for {} iterations... ".format(self._num_iter) + + "Note that this could produce different statistics every time." + ) + update_bn_stats(self._model, data_loader(), self._num_iter) diff --git a/detectron2/engine/launch.py b/detectron2/engine/launch.py new file mode 100644 index 0000000000..cf1cadacbb --- /dev/null +++ b/detectron2/engine/launch.py @@ -0,0 +1,83 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + +from detectron2.utils import comm + +__all__ = ["launch"] + + +def _find_free_port(): + import socket + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # Binding to port 0 will cause the OS to find an available port for us + sock.bind(("", 0)) + port = sock.getsockname()[1] + sock.close() + # NOTE: there is still a chance the port could be taken by other processes. + return port + + +def launch(main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=()): + """ + Args: + main_func: a function that will be called by `main_func(*args)` + num_machines (int): the total number of machines + machine_rank (int): the rank of this machine (one per machine) + dist_url (str): url to connect to for distributed training, including protocal + e.g. "tcp://127.0.0.1:8686". + Can be set to auto to automatically select a free port on localhost + args (tuple): arguments passed to main_func + """ + world_size = num_machines * num_gpus_per_machine + if world_size > 1: + # https://github.com/pytorch/pytorch/pull/14391 + # TODO prctl in spawned processes + + if dist_url == "auto": + assert num_machines == 1, "dist_url=auto cannot work with distributed training." + port = _find_free_port() + dist_url = f"tcp://127.0.0.1:{port}" + + mp.spawn( + _distributed_worker, + nprocs=num_gpus_per_machine, + args=(main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args), + daemon=False, + ) + else: + main_func(*args) + + +def _distributed_worker( + local_rank, main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args +): + global_rank = machine_rank * num_gpus_per_machine + local_rank + try: + dist.init_process_group( + backend="NCCL", init_method=dist_url, world_size=world_size, rank=global_rank + ) + except Exception as e: + logger = logging.getLogger(__name__) + logger.error("Process group URL: {}".format(dist_url)) + raise e + # synchronize is needed here to prevent a possible timeout after calling init_process_group + # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 + comm.synchronize() + + assert num_gpus_per_machine <= torch.cuda.device_count() + torch.cuda.set_device(local_rank) + + # Setup the local process group (which contains ranks within the same machine) + assert comm._LOCAL_PROCESS_GROUP is None + num_machines = world_size // num_gpus_per_machine + for i in range(num_machines): + ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine)) + pg = dist.new_group(ranks_on_i) + if i == machine_rank: + comm._LOCAL_PROCESS_GROUP = pg + + main_func(*args) diff --git a/detectron2/engine/train_loop.py b/detectron2/engine/train_loop.py new file mode 100644 index 0000000000..b510526dd6 --- /dev/null +++ b/detectron2/engine/train_loop.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +import numpy as np +import time +import weakref +import torch + +import detectron2.utils.comm as comm +from detectron2.utils.events import EventStorage + +__all__ = ["HookBase", "TrainerBase", "SimpleTrainer"] + + +class HookBase: + """ + Base class for hooks that can be registered with :class:`TrainerBase`. + + Each hook can implement 4 methods. The way they are called is demonstrated + in the following snippet: + + .. code-block:: python + + hook.before_train() + for iter in range(start_iter, max_iter): + hook.before_step() + trainer.run_step() + hook.after_step() + hook.after_train() + + Notes: + 1. In the hook method, users can access `self.trainer` to access more + properties about the context (e.g., current iteration). + + 2. A hook that does something in :meth:`before_step` can often be + implemented equivalently in :meth:`after_step`. + If the hook takes non-trivial time, it is strongly recommended to + implement the hook in :meth:`after_step` instead of :meth:`before_step`. + The convention is that :meth:`before_step` should only take negligible time. + + Following this convention will allow hooks that do care about the difference + between :meth:`before_step` and :meth:`after_step` (e.g., timer) to + function properly. + + Attributes: + trainer: A weak reference to the trainer object. Set by the trainer when the hook is + registered. + """ + + def before_train(self): + """ + Called before the first iteration. + """ + pass + + def after_train(self): + """ + Called after the last iteration. + """ + pass + + def before_step(self): + """ + Called before each iteration. + """ + pass + + def after_step(self): + """ + Called after each iteration. + """ + pass + + +class TrainerBase: + """ + Base class for iterative trainer with hooks. + + The only assumption we made here is: the training runs in a loop. + A subclass can implement what the loop is. + We made no assumptions about the existence of dataloader, optimizer, model, etc. + + Attributes: + iter(int): the current iteration. + + start_iter(int): The iteration to start with. + By convention the minimum possible value is 0. + + max_iter(int): The iteration to end training. + + storage(EventStorage): An EventStorage that's opened during the course of training. + """ + + def __init__(self): + self._hooks = [] + + def register_hooks(self, hooks): + """ + Register hooks to the trainer. The hooks are executed in the order + they are registered. + + Args: + hooks (list[Optional[HookBase]]): list of hooks + """ + hooks = [h for h in hooks if h is not None] + for h in hooks: + assert isinstance(h, HookBase) + # To avoid circular reference, hooks and trainer cannot own each other. + # This noramlly does not matter, but will cause memory leak if the + # involved objects contain __del__: + # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/ + h.trainer = weakref.proxy(self) + self._hooks.extend(hooks) + + def train(self, start_iter: int, max_iter: int): + """ + Args: + start_iter, max_iter (int): See docs above + """ + logger = logging.getLogger(__name__) + logger.info("Starting training from iteration {}".format(start_iter)) + + self.iter = self.start_iter = start_iter + self.max_iter = max_iter + + with EventStorage(start_iter) as self.storage: + try: + self.before_train() + for self.iter in range(start_iter, max_iter): + self.before_step() + self.run_step() + self.after_step() + finally: + self.after_train() + + def before_train(self): + for h in self._hooks: + h.before_train() + + def after_train(self): + for h in self._hooks: + h.after_train() + + def before_step(self): + for h in self._hooks: + h.before_step() + + def after_step(self): + for h in self._hooks: + h.after_step() + # this guarantees, that in each hook's after_step, storage.iter == trainer.iter + self.storage.step() + + def run_step(self): + raise NotImplementedError + + +class SimpleTrainer(TrainerBase): + """ + A simple trainer for the most common type of task: + single-cost single-optimizer single-data-source iterative optimization. + It assumes that every step, you: + + 1. Compute the loss with a data from the data_loader. + 2. Compute the gradients with the above loss. + 3. Update the model with the optimizer. + + If you want to do anything fancier than this, + either subclass TrainerBase and implement your own `run_step`, + or write your own training loop. + """ + + def __init__(self, model, data_loader, optimizer): + """ + Args: + model: a torch Module. Takes a data from data_loader and returns a + dict of losses. + data_loader: an iterable. Contains data to be used to call model. + optimizer: a torch optimizer. + """ + super().__init__() + + """ + We set the model to training mode in the trainer. + However it's valid to train a model that's in eval mode. + If you want your model (or a submodule of it) to behave + like evaluation during training, you can overwrite its train() method. + """ + model.train() + + self.model = model + self.data_loader = data_loader + self._data_loader_iter = iter(data_loader) + self.optimizer = optimizer + + def run_step(self): + """ + Implement the standard training logic described above. + """ + assert self.model.training, "[SimpleTrainer] model was changed to eval mode!" + start = time.perf_counter() + """ + If your want to do something with the data, you can wrap the dataloader. + """ + data = next(self._data_loader_iter) + data_time = time.perf_counter() - start + + """ + If your want to do something with the losses, you can wrap the model. + """ + loss_dict = self.model(data) + losses = sum(loss for loss in loss_dict.values()) + if not torch.isfinite(losses).all(): + raise FloatingPointError( + "Loss became infinite or NaN at iteration={}!\nloss_dict = {}".format( + self.iter, loss_dict + ) + ) + + # gather metrics among all workers for logging + metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()} + metrics_dict["data_time"] = data_time + # This assumes we do DDP-style training, which is currently the only + # supported method in detectron2. + all_metrics_dict = comm.gather(metrics_dict) + self._write_metrics(all_metrics_dict) + + self.optimizer.zero_grad() + losses.backward() + + """ + If you need gradient clipping/scaling or other processing, you can + wrap the optimizer with your custom `step()` method. + """ + self.optimizer.step() + + def _write_metrics(self, all_metrics_dict): + """ + Args: + all_metrics_dict (list[dict]): list of metrics dict from all workers + """ + if comm.is_main_process(): + # data_time among workers can have high variance. The actual latency + # caused by data_time is the maximum among workers. + data_time = np.max([x.pop("data_time") for x in all_metrics_dict]) + + metrics_dict = { + k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys() + } + total_losses_reduced = sum(loss for loss in metrics_dict.values()) + + self.storage.put_scalars(data_time=data_time, total_loss=total_losses_reduced) + if len(metrics_dict) > 1: + self.storage.put_scalars(**metrics_dict) diff --git a/detectron2/evaluation/__init__.py b/detectron2/evaluation/__init__.py new file mode 100644 index 0000000000..975d8f7dcf --- /dev/null +++ b/detectron2/evaluation/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .cityscapes_evaluation import CityscapesEvaluator +from .coco_evaluation import COCOEvaluator +from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset +from .lvis_evaluation import LVISEvaluator +from .panoptic_evaluation import COCOPanopticEvaluator +from .pascal_voc_evaluation import PascalVOCDetectionEvaluator +from .sem_seg_evaluation import SemSegEvaluator +from .testing import print_csv_format, verify_results + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/detectron2/evaluation/cityscapes_evaluation.py b/detectron2/evaluation/cityscapes_evaluation.py new file mode 100644 index 0000000000..8ba4169b32 --- /dev/null +++ b/detectron2/evaluation/cityscapes_evaluation.py @@ -0,0 +1,114 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import glob +import logging +import os +import tempfile +from collections import OrderedDict +import torch +from PIL import Image + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm + +from .evaluator import DatasetEvaluator + + +class CityscapesEvaluator(DatasetEvaluator): + """ + Evaluate instance segmentation results using cityscapes API. + + Note: + * It does not work in multi-machine distributed training. + * It contains a synchronization, therefore has to be used on all ranks. + """ + + def __init__(self, dataset_name): + """ + Args: + dataset_name (str): the name of the dataset. + It must have the following metadata associated with it: + "thing_classes", "gt_dir". + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") + self._temp_dir = self._working_dir.name + # All workers will write to the same results directory + # TODO this does not work in distributed training + self._temp_dir = comm.all_gather(self._temp_dir)[0] + if self._temp_dir != self._working_dir.name: + self._working_dir.cleanup() + self._logger.info( + "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir) + ) + + def process(self, inputs, outputs): + from cityscapesscripts.helpers.labels import name2label + + for input, output in zip(inputs, outputs): + file_name = input["file_name"] + basename = os.path.splitext(os.path.basename(file_name))[0] + pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt") + + output = output["instances"].to(self._cpu_device) + num_instances = len(output) + with open(pred_txt, "w") as fout: + for i in range(num_instances): + pred_class = output.pred_classes[i] + classes = self._metadata.thing_classes[pred_class] + class_id = name2label[classes].id + score = output.scores[i] + mask = output.pred_masks[i].numpy().astype("uint8") + png_filename = os.path.join( + self._temp_dir, basename + "_{}_{}.png".format(i, classes) + ) + + Image.fromarray(mask * 255).save(png_filename) + fout.write("{} {} {}\n".format(os.path.basename(png_filename), class_id, score)) + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP" and "AP50". + """ + comm.synchronize() + if comm.get_rank() > 0: + return + os.environ["CITYSCAPES_DATASET"] = os.path.abspath( + os.path.join(self._metadata.gt_dir, "..", "..") + ) + # Load the Cityscapes eval script *after* setting the required env var, + # since the script reads CITYSCAPES_DATASET into global variables at load time. + import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval + + self._logger.info("Evaluating results under {} ...".format(self._temp_dir)) + + # set some global states in cityscapes evaluation API, before evaluting + cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir) + cityscapes_eval.args.predictionWalk = None + cityscapes_eval.args.JSONOutput = False + cityscapes_eval.args.colorized = False + cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json") + + # These lines are adopted from + # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa + groundTruthImgList = glob.glob(cityscapes_eval.args.groundTruthSearch) + assert len( + groundTruthImgList + ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format( + cityscapes_eval.args.groundTruthSearch + ) + predictionImgList = [] + for gt in groundTruthImgList: + predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args)) + results = cityscapes_eval.evaluateImgLists( + predictionImgList, groundTruthImgList, cityscapes_eval.args + )["averages"] + + ret = OrderedDict() + ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100} + self._working_dir.cleanup() + return ret diff --git a/detectron2/evaluation/coco_evaluation.py b/detectron2/evaluation/coco_evaluation.py new file mode 100644 index 0000000000..d4bc2a2e58 --- /dev/null +++ b/detectron2/evaluation/coco_evaluation.py @@ -0,0 +1,465 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import contextlib +import copy +import io +import itertools +import json +import logging +import numpy as np +import os +import pickle +from collections import OrderedDict +import pycocotools.mask as mask_util +import torch +from fvcore.common.file_io import PathManager +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval +from tabulate import tabulate + +import detectron2.utils.comm as comm +from detectron2.data import MetadataCatalog +from detectron2.structures import Boxes, BoxMode, pairwise_iou +from detectron2.utils.logger import create_small_table + +from .evaluator import DatasetEvaluator + + +class COCOEvaluator(DatasetEvaluator): + """ + Evaluate object proposal, instance detection/segmentation, keypoint detection + outputs using COCO's metrics and APIs. + """ + + def __init__(self, dataset_name, cfg, distributed, output_dir=None): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have the following corresponding metadata: + "json_file": the path to the COCO format annotation + cfg (CfgNode): config instance + distributed (True): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + output_dir (str): optional, an output directory to dump results. + """ + self._tasks = self._tasks_from_config(cfg) + self._distributed = distributed + self._output_dir = output_dir + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self._metadata = MetadataCatalog.get(dataset_name) + json_file = PathManager.get_local_path(self._metadata.json_file) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(json_file) + + self._kpt_oks_sigmas = cfg.TEST.KEYPOINT_OKS_SIGMAS + # Test set json files do not contain annotations (evaluation must be + # performed using the COCO evaluation server). + self._do_evaluation = len(self._coco_api.getAnnIds()) > 0 + + def reset(self): + self._predictions = [] + self._coco_results = [] + + def _tasks_from_config(self, cfg): + """ + Returns: + tuple[str]: tasks that can be evaluated under the given configuration. + """ + tasks = ("bbox",) + if cfg.MODEL.MASK_ON: + tasks = tasks + ("segm",) + if cfg.MODEL.KEYPOINT_ON: + tasks = tasks + ("keypoints",) + return tasks + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + # TODO this is ugly + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + + if instances.has("pred_masks"): + # use RLE to encode the masks, because they are too large and takes memory + # since this evaluator stores outputs of the entire dataset + # Our model may predict bool array, but cocoapi expects uint8 + rles = [ + mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] + for mask in instances.pred_masks + ] + for rle in rles: + # "counts" is an array encoded by mask_util as a byte-stream. Python3's + # json writer which always produces strings cannot serialize a bytestream + # unless you decode it. Thankfully, utf-8 works out (which is also what + # the pycocotools/_mask.pyx does). + rle["counts"] = rle["counts"].decode("utf-8") + instances.pred_masks_rle = rles + instances.remove("pred_masks") + + prediction["instances"] = instances_to_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def evaluate(self): + if self._distributed: + comm.synchronize() + self._predictions = comm.gather(self._predictions, dst=0) + self._predictions = list(itertools.chain(*self._predictions)) + + if not comm.is_main_process(): + return {} + + if len(self._predictions) == 0: + self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(self._predictions, f) + + self._results = OrderedDict() + if "proposals" in self._predictions[0]: + self._eval_box_proposals() + if "instances" in self._predictions[0]: + self._eval_predictions(set(self._tasks)) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _eval_predictions(self, tasks): + """ + Evaluate self._predictions on the given tasks. + Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + self._coco_results = list(itertools.chain(*[x["instances"] for x in self._predictions])) + + # unmap the category ids for COCO + if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): + reverse_id_mapping = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + for result in self._coco_results: + result["category_id"] = reverse_id_mapping[result["category_id"]] + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(self._coco_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + for task in sorted(tasks): + coco_eval = ( + _evaluate_predictions_on_coco( + self._coco_api, self._coco_results, task, kpt_oks_sigmas=self._kpt_oks_sigmas + ) + if len(self._coco_results) > 0 + else None # cocoapi does not handle empty results very well + ) + + res = self._derive_coco_results( + coco_eval, task, class_names=self._metadata.get("thing_classes") + ) + self._results[task] = res + + def _eval_box_proposals(self): + """ + Evaluate the box proposals in self._predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in self._predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals( + self._predictions, self._coco_api, area=area, limit=limit + ) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + def _derive_coco_results(self, coco_eval, iou_type, class_names=None): + """ + Derive the desired score numbers from summerized COCOeval. + + Args: + coco_eval (None or COCOEval): None represents no predictions from model. + iou_type (str): + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], + "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], + }[iou_type] + + if coco_eval is None: + self._logger.warn("No predictions from the model! Set scores to -1") + return {metric: -1 for metric in metrics} + + # the standard metrics + results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)} + self._logger.info( + "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) + ) + + if class_names is None or len(class_names) <= 1: + return results + # Compute per-category AP + # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa + precisions = coco_eval.eval["precision"] + # precision has dims (iou, recall, cls, area range, max dets) + assert len(class_names) == precisions.shape[2] + + results_per_category = [] + for idx, name in enumerate(class_names): + # area range index 0: all area ranges + # max dets index -1: typically 100 per image + precision = precisions[:, :, idx, 0, -1] + precision = precision[precision > -1] + ap = np.mean(precision) if precision.size else float("nan") + results_per_category.append(("{}".format(name), float(ap * 100))) + + # tabulate it + N_COLS = min(6, len(results_per_category) * 2) + results_flatten = list(itertools.chain(*results_per_category)) + results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) + table = tabulate( + results_2d, + tablefmt="pipe", + floatfmt=".3f", + headers=["category", "AP"] * (N_COLS // 2), + numalign="left", + ) + self._logger.info("Per-category {} AP: \n".format(iou_type) + table) + + results.update({"AP-" + name: ap for name, ap in results_per_category}) + return results + + +def instances_to_json(instances, img_id): + num_instance = len(instances) + if num_instance == 0: + return [] + + boxes = instances.pred_boxes.tensor.numpy() + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + boxes = boxes.tolist() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + + has_mask = instances.has("pred_masks_rle") + if has_mask: + rles = instances.pred_masks_rle + + has_keypoints = instances.has("pred_keypoints") + if has_keypoints: + keypoints = instances.pred_keypoints + + results = [] + for k in range(num_instance): + result = { + "image_id": img_id, + "category_id": classes[k], + "bbox": boxes[k], + "score": scores[k], + } + if has_mask: + result["segmentation"] = rles[k] + if has_keypoints: + # In COCO annotations, + # keypoints coordinates are pixel indices. + # However our predictions are floating point coordinates. + # Therefore we subtract 0.5 to be consistent with the annotation format. + # This is the inverse of data loading logic in `datasets/coco.py`. + keypoints[k][:, :2] -= 0.5 + result["keypoints"] = keypoints[k].flatten().tolist() + results.append(result) + return results + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official COCO API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0 ** 2, 1e5 ** 2], # all + [0 ** 2, 32 ** 2], # small + [32 ** 2, 96 ** 2], # medium + [96 ** 2, 1e5 ** 2], # large + [96 ** 2, 128 ** 2], # 96-128 + [128 ** 2, 256 ** 2], # 128-256 + [256 ** 2, 512 ** 2], # 256-512 + [512 ** 2, 1e5 ** 2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) + anno = coco_api.loadAnns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + for obj in anno + if obj["iscrowd"] == 0 + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = torch.cat(gt_overlaps, dim=0) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, kpt_oks_sigmas=None): + """ + Evaluate the coco results using COCOEval API. + """ + assert len(coco_results) > 0 + + if iou_type == "segm": + coco_results = copy.deepcopy(coco_results) + # When evaluating mask AP, if the results contain bbox, cocoapi will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in coco_results: + c.pop("bbox", None) + + coco_dt = coco_gt.loadRes(coco_results) + coco_eval = COCOeval(coco_gt, coco_dt, iou_type) + # Use the COCO default keypoint OKS sigmas unless overrides are specified + if kpt_oks_sigmas: + coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + return coco_eval diff --git a/detectron2/evaluation/evaluator.py b/detectron2/evaluation/evaluator.py new file mode 100644 index 0000000000..465615dbbc --- /dev/null +++ b/detectron2/evaluation/evaluator.py @@ -0,0 +1,160 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import datetime +import logging +import time +from collections import OrderedDict +from contextlib import contextmanager +import torch + +from detectron2.utils.comm import is_main_process + + +class DatasetEvaluator: + """ + Base class for a dataset evaluator. + + The function :func:`inference_on_dataset` runs the model over + all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. + + This class will accumulate information of the inputs/outputs (by :meth:`process`), + and produce evaluation results in the end (by :meth:`evaluate`). + """ + + def reset(self): + """ + Preparation for a new round of evaluation. + Should be called before starting a round of evaluation. + """ + pass + + def process(self, input, output): + """ + Process an input/output pair. + + Args: + input: the input that's used to call the model. + output: the return value of `model(output)` + """ + pass + + def evaluate(self): + """ + Evaluate/summarize the performance, after processing all input/output pairs. + + Returns: + dict: + A new evaluator class can return a dict of arbitrary format + as long as the user can process the results. + In our train_net.py, we expect the following format: + + * key: the name of the task (e.g., bbox) + * value: a dict of {metric name: score}, e.g.: {"AP50": 80} + """ + pass + + +class DatasetEvaluators(DatasetEvaluator): + def __init__(self, evaluators): + assert len(evaluators) + super().__init__() + self._evaluators = evaluators + + def reset(self): + for evaluator in self._evaluators: + evaluator.reset() + + def process(self, input, output): + for evaluator in self._evaluators: + evaluator.process(input, output) + + def evaluate(self): + results = OrderedDict() + for evaluator in self._evaluators: + result = evaluator.evaluate() + if is_main_process(): + for k, v in result.items(): + assert ( + k not in results + ), "Different evalutors produce results with the same key {}".format(k) + results[k] = v + return results + + +def inference_on_dataset(model, data_loader, evaluator): + """ + Run model (in eval mode) on the data_loader and evaluate the metrics with evaluator. + + Args: + model (nn.Module): a module which accepts an object from + `data_loader` and returns some outputs. It will be temporarily set to `eval` mode. + + If you wish to evaluate a model in `training` mode instead, you can + wrap the given model and override its behavior of `.eval()` and `.train()`. + data_loader: an iterable object with a length. + The elements it generates will be the inputs to the model. + evaluator (DatasetEvaluator): the evaluator to run + + Returns: + The return value of `evaluator.evalute()` + """ + num_devices = torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1 + logger = logging.getLogger(__name__) + logger.info("Start inference on {} images".format(len(data_loader))) + + total = len(data_loader) # inference data loader must have a fixed length + evaluator.reset() + + logging_interval = 50 + num_warmup = min(5, logging_interval - 1, total - 1) + start_time = time.time() + with inference_context(model), torch.no_grad(): + for idx, inputs in enumerate(data_loader): + if idx == num_warmup: + start_time = time.time() + + outputs = model(inputs) + evaluator.process(inputs, outputs) + + if (idx + 1) % logging_interval == 0: + duration = time.time() - start_time + seconds_per_img = duration / (idx + 1 - num_warmup) + eta = datetime.timedelta( + seconds=int(seconds_per_img * (total - num_warmup) - duration) + ) + logger.info( + "Inference done {}/{}. {:.4f} s / img. ETA={}".format( + idx + 1, total, seconds_per_img, str(eta) + ) + ) + + # Measure the time only for this worker (before the synchronization barrier) + total_time = int(time.time() - start_time) + total_time_str = str(datetime.timedelta(seconds=total_time)) + # NOTE this format is parsed by grep + logger.info( + "Total inference time: {} ({:.6f} s / img per device, on {} devices)".format( + total_time_str, total_time / (total - num_warmup), num_devices + ) + ) + + results = evaluator.evaluate() + # An evaluator may return None when not in main process. + # Replace it by an empty dict instead to make it easier for downstream code to handle + if results is None: + results = {} + return results + + +@contextmanager +def inference_context(model): + """ + A context where the model is temporarily changed to eval mode, + and restored to previous mode afterwards. + + Args: + model: a torch Module + """ + training_mode = model.training + model.eval() + yield + model.train(training_mode) diff --git a/detectron2/evaluation/lvis_evaluation.py b/detectron2/evaluation/lvis_evaluation.py new file mode 100644 index 0000000000..1befa28ba7 --- /dev/null +++ b/detectron2/evaluation/lvis_evaluation.py @@ -0,0 +1,360 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import itertools +import json +import logging +import numpy as np +import os +import pickle +from collections import OrderedDict +import pycocotools.mask as mask_util +import torch +from fvcore.common.file_io import PathManager + +import detectron2.utils.comm as comm +from detectron2.data import MetadataCatalog +from detectron2.structures import Boxes, BoxMode, pairwise_iou +from detectron2.utils.logger import create_small_table + +from .coco_evaluation import instances_to_json +from .evaluator import DatasetEvaluator + + +class LVISEvaluator(DatasetEvaluator): + """ + Evaluate object proposal and instance detection/segmentation outputs using + LVIS's metrics and evaluation API. + """ + + def __init__(self, dataset_name, cfg, distributed, output_dir=None): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + It must have the following corresponding metadata: + "json_file": the path to the LVIS format annotation + cfg (CfgNode): config instance + distributed (True): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + output_dir (str): optional, an output directory to dump results. + """ + from lvis import LVIS + + self._tasks = self._tasks_from_config(cfg) + self._distributed = distributed + self._output_dir = output_dir + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self._metadata = MetadataCatalog.get(dataset_name) + json_file = PathManager.get_local_path(self._metadata.json_file) + self._lvis_api = LVIS(json_file) + # Test set json files do not contain annotations (evaluation must be + # performed using the LVIS evaluation server). + self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0 + + def reset(self): + self._predictions = [] + self._lvis_results = [] + + def _tasks_from_config(self, cfg): + """ + Returns: + tuple[str]: tasks that can be evaluated under the given configuration. + """ + tasks = ("bbox",) + if cfg.MODEL.MASK_ON: + tasks = tasks + ("segm",) + return tasks + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a LVIS model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + """ + for input, output in zip(inputs, outputs): + prediction = {"image_id": input["image_id"]} + + # TODO this is ugly + if "instances" in output: + instances = output["instances"].to(self._cpu_device) + + if instances.has("pred_masks"): + # use RLE to encode the masks, because they are too large and takes memory + # since this evaluator stores outputs of the entire dataset + rles = [ + mask_util.encode(np.array(mask[:, :, None], order="F"))[0] + for mask in instances.pred_masks + ] + for rle in rles: + # "counts" is an array encoded by mask_util as a byte-stream. Python3's + # json writer which always produces strings cannot serialize a bytestream + # unless you decode it. Thankfully, utf-8 works out (which is also what + # the pycocotools/_mask.pyx does). + rle["counts"] = rle["counts"].decode("utf-8") + instances.pred_masks_rle = rles + instances.remove("pred_masks") + + prediction["instances"] = instances_to_json(instances, input["image_id"]) + if "proposals" in output: + prediction["proposals"] = output["proposals"].to(self._cpu_device) + self._predictions.append(prediction) + + def evaluate(self): + if self._distributed: + comm.synchronize() + self._predictions = comm.gather(self._predictions, dst=0) + self._predictions = list(itertools.chain(*self._predictions)) + + if not comm.is_main_process(): + return + + if len(self._predictions) == 0: + self._logger.warning("[LVISEvaluator] Did not receive valid predictions.") + return {} + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "instances_predictions.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(self._predictions, f) + + self._results = OrderedDict() + if "proposals" in self._predictions[0]: + self._eval_box_proposals() + if "instances" in self._predictions[0]: + self._eval_predictions(set(self._tasks)) + # Copy so the caller can do whatever with results + return copy.deepcopy(self._results) + + def _eval_predictions(self, tasks): + """ + Evaluate self._predictions on the given tasks. + Fill self._results with the metrics of the tasks. + """ + self._logger.info("Preparing results in the LVIS format ...") + self._lvis_results = list(itertools.chain(*[x["instances"] for x in self._predictions])) + + # unmap the category ids for LVIS (from 0-indexed to 1-indexed) + for result in self._lvis_results: + result["category_id"] += 1 + + if self._output_dir: + file_path = os.path.join(self._output_dir, "lvis_instances_results.json") + self._logger.info("Saving results to {}".format(file_path)) + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(self._lvis_results)) + f.flush() + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating predictions ...") + for task in sorted(tasks): + res = _evaluate_predictions_on_lvis( + self._lvis_api, + self._lvis_results, + task, + class_names=self._metadata.get("thing_classes"), + ) + self._results[task] = res + + def _eval_box_proposals(self): + """ + Evaluate the box proposals in self._predictions. + Fill self._results with the metrics for "box_proposals" task. + """ + if self._output_dir: + # Saving generated box proposals to file. + # Predicted box_proposals are in XYXY_ABS mode. + bbox_mode = BoxMode.XYXY_ABS.value + ids, boxes, objectness_logits = [], [], [] + for prediction in self._predictions: + ids.append(prediction["image_id"]) + boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) + objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) + + proposal_data = { + "boxes": boxes, + "objectness_logits": objectness_logits, + "ids": ids, + "bbox_mode": bbox_mode, + } + with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: + pickle.dump(proposal_data, f) + + if not self._do_evaluation: + self._logger.info("Annotations are not available for evaluation.") + return + + self._logger.info("Evaluating bbox proposals ...") + res = {} + areas = {"all": "", "small": "s", "medium": "m", "large": "l"} + for limit in [100, 1000]: + for area, suffix in areas.items(): + stats = _evaluate_box_proposals( + self._predictions, self._lvis_api, area=area, limit=limit + ) + key = "AR{}@{:d}".format(suffix, limit) + res[key] = float(stats["ar"].item() * 100) + self._logger.info("Proposal metrics: \n" + create_small_table(res)) + self._results["box_proposals"] = res + + +# inspired from Detectron: +# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa +def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None): + """ + Evaluate detection proposal recall metrics. This function is a much + faster alternative to the official LVIS API recall evaluation code. However, + it produces slightly different results. + """ + # Record max overlap value for each gt box + # Return vector of overlap values + areas = { + "all": 0, + "small": 1, + "medium": 2, + "large": 3, + "96-128": 4, + "128-256": 5, + "256-512": 6, + "512-inf": 7, + } + area_ranges = [ + [0 ** 2, 1e5 ** 2], # all + [0 ** 2, 32 ** 2], # small + [32 ** 2, 96 ** 2], # medium + [96 ** 2, 1e5 ** 2], # large + [96 ** 2, 128 ** 2], # 96-128 + [128 ** 2, 256 ** 2], # 128-256 + [256 ** 2, 512 ** 2], # 256-512 + [512 ** 2, 1e5 ** 2], + ] # 512-inf + assert area in areas, "Unknown area range: {}".format(area) + area_range = area_ranges[areas[area]] + gt_overlaps = [] + num_pos = 0 + + for prediction_dict in dataset_predictions: + predictions = prediction_dict["proposals"] + + # sort predictions in descending order + # TODO maybe remove this and make it explicit in the documentation + inds = predictions.objectness_logits.sort(descending=True)[1] + predictions = predictions[inds] + + ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]]) + anno = lvis_api.load_anns(ann_ids) + gt_boxes = [ + BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno + ] + gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes + gt_boxes = Boxes(gt_boxes) + gt_areas = torch.as_tensor([obj["area"] for obj in anno]) + + if len(gt_boxes) == 0 or len(predictions) == 0: + continue + + valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) + gt_boxes = gt_boxes[valid_gt_inds] + + num_pos += len(gt_boxes) + + if len(gt_boxes) == 0: + continue + + if limit is not None and len(predictions) > limit: + predictions = predictions[:limit] + + overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) + + _gt_overlaps = torch.zeros(len(gt_boxes)) + for j in range(min(len(predictions), len(gt_boxes))): + # find which proposal box maximally covers each gt box + # and get the iou amount of coverage for each gt box + max_overlaps, argmax_overlaps = overlaps.max(dim=0) + + # find which gt box is 'best' covered (i.e. 'best' = most iou) + gt_ovr, gt_ind = max_overlaps.max(dim=0) + assert gt_ovr >= 0 + # find the proposal box that covers the best covered gt box + box_ind = argmax_overlaps[gt_ind] + # record the iou coverage of this gt box + _gt_overlaps[j] = overlaps[box_ind, gt_ind] + assert _gt_overlaps[j] == gt_ovr + # mark the proposal box and the gt box as used + overlaps[box_ind, :] = -1 + overlaps[:, gt_ind] = -1 + + # append recorded iou coverage level + gt_overlaps.append(_gt_overlaps) + gt_overlaps = torch.cat(gt_overlaps, dim=0) + gt_overlaps, _ = torch.sort(gt_overlaps) + + if thresholds is None: + step = 0.05 + thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) + recalls = torch.zeros_like(thresholds) + # compute recall for each iou threshold + for i, t in enumerate(thresholds): + recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) + # ar = 2 * np.trapz(recalls, thresholds) + ar = recalls.mean() + return { + "ar": ar, + "recalls": recalls, + "thresholds": thresholds, + "gt_overlaps": gt_overlaps, + "num_pos": num_pos, + } + + +def _evaluate_predictions_on_lvis(lvis_gt, lvis_results, iou_type, class_names=None): + """ + Args: + iou_type (str): + kpt_oks_sigmas (list[float]): + class_names (None or list[str]): if provided, will use it to predict + per-category AP. + + Returns: + a dict of {metric name: score} + """ + metrics = { + "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], + "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"], + }[iou_type] + + logger = logging.getLogger(__name__) + + if len(lvis_results) == 0: # TODO: check if needed + logger.warn("No predictions from the model! Set scores to -1") + return {metric: -1 for metric in metrics} + + if iou_type == "segm": + lvis_results = copy.deepcopy(lvis_results) + # When evaluating mask AP, if the results contain bbox, LVIS API will + # use the box area as the area of the instance, instead of the mask area. + # This leads to a different definition of small/medium/large. + # We remove the bbox field to let mask AP use mask area. + for c in lvis_results: + c.pop("bbox", None) + + from lvis import LVISEval, LVISResults + + lvis_results = LVISResults(lvis_gt, lvis_results) + lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type) + lvis_eval.run() + lvis_eval.print_results() + + # Pull the standard metrics from the LVIS results + results = lvis_eval.get_results() + results = {metric: float(results[metric] * 100) for metric in metrics} + logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results)) + return results diff --git a/detectron2/evaluation/panoptic_evaluation.py b/detectron2/evaluation/panoptic_evaluation.py new file mode 100644 index 0000000000..f4054261d2 --- /dev/null +++ b/detectron2/evaluation/panoptic_evaluation.py @@ -0,0 +1,171 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import contextlib +import io +import itertools +import json +import logging +import os +import tempfile +from collections import OrderedDict +from fvcore.common.file_io import PathManager +from PIL import Image +from tabulate import tabulate + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm + +from .evaluator import DatasetEvaluator + +logger = logging.getLogger(__name__) + + +class COCOPanopticEvaluator(DatasetEvaluator): + """ + Evaluate Panoptic Quality metrics on COCO using PanopticAPI. + It saves panoptic segmentation prediction in `output_dir` + + It contains a synchronize call and has to be called from all workers. + """ + + def __init__(self, dataset_name, output_dir): + """ + Args: + dataset_name (str): name of the dataset + output_dir (str): output directory to save results for evaluation + """ + self._metadata = MetadataCatalog.get(dataset_name) + self._thing_contiguous_id_to_dataset_id = { + v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items() + } + self._stuff_contiguous_id_to_dataset_id = { + v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items() + } + + self._predictions_json = os.path.join(output_dir, "predictions.json") + self._predictions_dir = os.path.join(output_dir, "predictions") + + def reset(self): + self._predictions = [] + + def _convert_category_id(self, segment_info): + isthing = segment_info.pop("isthing", None) + if isthing is None: + # the model produces panoptic category id directly. No more conversion needed + return segment_info + if isthing is True: + segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[ + segment_info["category_id"] + ] + else: + segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[ + segment_info["category_id"] + ] + return segment_info + + def process(self, inputs, outputs): + from panopticapi.utils import id2rgb + + for input, output in zip(inputs, outputs): + panoptic_img, segments_info = output["panoptic_seg"] + panoptic_img = panoptic_img.cpu().numpy() + + file_name = os.path.basename(input["file_name"]) + file_name_png = os.path.splitext(file_name)[0] + ".png" + with io.BytesIO() as out: + Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG") + segments_info = [self._convert_category_id(x) for x in segments_info] + self._predictions.append( + { + "image_id": input["image_id"], + "file_name": file_name_png, + "png_string": out.getvalue(), + "segments_info": segments_info, + } + ) + + def evaluate(self): + comm.synchronize() + + self._predictions = comm.gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not comm.is_main_process(): + return + + gt_json = PathManager.get_local_path(self._metadata.panoptic_json) + gt_folder = self._metadata.panoptic_root + + with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir: + if "://" not in self._predictions_dir: + pred_dir = self._predictions_dir + os.makedirs(pred_dir, exist_ok=True) + + logger.info("Writing all panoptic predictions to {} ...".format(pred_dir)) + for p in self._predictions: + with open(os.path.join(pred_dir, p["file_name"]), "wb") as f: + f.write(p.pop("png_string")) + + with open(gt_json, "r") as f: + json_data = json.load(f) + json_data["annotations"] = self._predictions + with PathManager.open(self._predictions_json, "w") as f: + f.write(json.dumps(json_data)) + + from panopticapi.evaluation import pq_compute + + with contextlib.redirect_stdout(io.StringIO()): + pq_res = pq_compute( + gt_json, + PathManager.get_local_path(self._predictions_json), + gt_folder=gt_folder, + pred_folder=pred_dir, + ) + + res = {} + res["PQ"] = 100 * pq_res["All"]["pq"] + res["SQ"] = 100 * pq_res["All"]["sq"] + res["RQ"] = 100 * pq_res["All"]["rq"] + res["PQ_th"] = 100 * pq_res["Things"]["pq"] + res["SQ_th"] = 100 * pq_res["Things"]["sq"] + res["RQ_th"] = 100 * pq_res["Things"]["rq"] + res["PQ_st"] = 100 * pq_res["Stuff"]["pq"] + res["SQ_st"] = 100 * pq_res["Stuff"]["sq"] + res["RQ_st"] = 100 * pq_res["Stuff"]["rq"] + + results = OrderedDict({"panoptic_seg": res}) + _print_panoptic_results(pq_res) + + return results + + +def _print_panoptic_results(pq_res): + headers = ["", "PQ", "SQ", "RQ", "#categories"] + data = [] + for name in ["All", "Things", "Stuff"]: + row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]] + data.append(row) + table = tabulate( + data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center" + ) + logger.info("Panoptic Evaluation Results:\n" + table) + + +if __name__ == "__main__": + from detectron2.utils.logger import setup_logger + + logger = setup_logger() + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("--gt-json") + parser.add_argument("--gt-dir") + parser.add_argument("--pred-json") + parser.add_argument("--pred-dir") + args = parser.parse_args() + + from panopticapi.evaluation import pq_compute + + with contextlib.redirect_stdout(io.StringIO()): + pq_res = pq_compute( + args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir + ) + _print_panoptic_results(pq_res) diff --git a/detectron2/evaluation/pascal_voc_evaluation.py b/detectron2/evaluation/pascal_voc_evaluation.py new file mode 100644 index 0000000000..d555447bf6 --- /dev/null +++ b/detectron2/evaluation/pascal_voc_evaluation.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +import numpy as np +import os +import tempfile +import xml.etree.ElementTree as ET +from collections import OrderedDict, defaultdict +from functools import lru_cache +import torch + +from detectron2.data import MetadataCatalog +from detectron2.utils import comm + +from .evaluator import DatasetEvaluator + + +class PascalVOCDetectionEvaluator(DatasetEvaluator): + """ + Evaluate Pascal VOC AP. + It contains a synchronization, therefore has to be called from all ranks. + + Note that this is a rewrite of the official Matlab API. + The results should be similar, but not identical to the one produced by + the official API. + """ + + def __init__(self, dataset_name): + """ + Args: + dataste_name (str): name of the dataset, e.g., "voc_2007_test" + """ + self._dataset_name = dataset_name + meta = MetadataCatalog.get(dataset_name) + self._anno_file_template = os.path.join(meta.dirname, "Annotations", "{}.xml") + self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt") + self._class_names = meta.thing_classes + assert meta.year in [2007, 2012], meta.year + self._is_2007 = meta.year == 2007 + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + def reset(self): + self._predictions = defaultdict(list) # class name -> list of prediction strings + + def process(self, inputs, outputs): + for input, output in zip(inputs, outputs): + image_id = input["image_id"] + instances = output["instances"].to(self._cpu_device) + boxes = instances.pred_boxes.tensor.numpy() + scores = instances.scores.tolist() + classes = instances.pred_classes.tolist() + for box, score, cls in zip(boxes, scores, classes): + xmin, ymin, xmax, ymax = box + # The inverse of data loading logic in `datasets/pascal_voc.py` + xmin += 1 + ymin += 1 + self._predictions[cls].append( + f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}" + ) + + def evaluate(self): + """ + Returns: + dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75". + """ + all_predictions = comm.gather(self._predictions, dst=0) + if not comm.is_main_process(): + return + predictions = defaultdict(list) + for predictions_per_rank in all_predictions: + for clsid, lines in predictions_per_rank.items(): + predictions[clsid].extend(lines) + del all_predictions + + self._logger.info( + "Evaluating {} using {} metric. " + "Note that results do not use the official Matlab API.".format( + self._dataset_name, 2007 if self._is_2007 else 2012 + ) + ) + + with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname: + res_file_template = os.path.join(dirname, "{}.txt") + + aps = defaultdict(list) # iou -> ap per class + for cls_id, cls_name in enumerate(self._class_names): + lines = predictions.get(cls_id, [""]) + + with open(res_file_template.format(cls_name), "w") as f: + f.write("\n".join(lines)) + + for thresh in range(50, 100, 5): + rec, prec, ap = voc_eval( + res_file_template, + self._anno_file_template, + self._image_set_path, + cls_name, + ovthresh=thresh / 100.0, + use_07_metric=self._is_2007, + ) + aps[thresh].append(ap * 100) + + ret = OrderedDict() + mAP = {iou: np.mean(x) for iou, x in aps.items()} + ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]} + return ret + + +############################################################################## +# +# Below code is modified from +# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py +# -------------------------------------------------------- +# Fast/er R-CNN +# Licensed under The MIT License [see LICENSE for details] +# Written by Bharath Hariharan +# -------------------------------------------------------- + +"""Python implementation of the PASCAL VOC devkit's AP evaluation code.""" + + +@lru_cache(maxsize=None) +def parse_rec(filename): + """Parse a PASCAL VOC xml file.""" + tree = ET.parse(filename) + objects = [] + for obj in tree.findall("object"): + obj_struct = {} + obj_struct["name"] = obj.find("name").text + obj_struct["pose"] = obj.find("pose").text + obj_struct["truncated"] = int(obj.find("truncated").text) + obj_struct["difficult"] = int(obj.find("difficult").text) + bbox = obj.find("bndbox") + obj_struct["bbox"] = [ + int(bbox.find("xmin").text), + int(bbox.find("ymin").text), + int(bbox.find("xmax").text), + int(bbox.find("ymax").text), + ] + objects.append(obj_struct) + + return objects + + +def voc_ap(rec, prec, use_07_metric=False): + """Compute VOC AP given precision and recall. If use_07_metric is true, uses + the VOC 07 11-point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0.0 + for t in np.arange(0.0, 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11.0 + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.0], rec, [1.0])) + mpre = np.concatenate(([0.0], prec, [0.0])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False): + """rec, prec, ap = voc_eval(detpath, + annopath, + imagesetfile, + classname, + [ovthresh], + [use_07_metric]) + + Top level function that does the PASCAL VOC evaluation. + + detpath: Path to detections + detpath.format(classname) should produce the detection results file. + annopath: Path to annotations + annopath.format(imagename) should be the xml annotations file. + imagesetfile: Text file containing the list of images, one image per line. + classname: Category name (duh) + [ovthresh]: Overlap threshold (default = 0.5) + [use_07_metric]: Whether to use VOC07's 11 point AP computation + (default False) + """ + # assumes detections are in detpath.format(classname) + # assumes annotations are in annopath.format(imagename) + # assumes imagesetfile is a text file with each line an image name + + # first load gt + # read list of images + with open(imagesetfile, "r") as f: + lines = f.readlines() + imagenames = [x.strip() for x in lines] + + # load annots + recs = {} + for imagename in imagenames: + recs[imagename] = parse_rec(annopath.format(imagename)) + + # extract gt objects for this class + class_recs = {} + npos = 0 + for imagename in imagenames: + R = [obj for obj in recs[imagename] if obj["name"] == classname] + bbox = np.array([x["bbox"] for x in R]) + difficult = np.array([x["difficult"] for x in R]).astype(np.bool) + # difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT + det = [False] * len(R) + npos = npos + sum(~difficult) + class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det} + + # read dets + detfile = detpath.format(classname) + with open(detfile, "r") as f: + lines = f.readlines() + + splitlines = [x.strip().split(" ") for x in lines] + image_ids = [x[0] for x in splitlines] + confidence = np.array([float(x[1]) for x in splitlines]) + BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + BB = BB[sorted_ind, :] + image_ids = [image_ids[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp = np.zeros(nd) + fp = np.zeros(nd) + for d in range(nd): + R = class_recs[image_ids[d]] + bb = BB[d, :].astype(float) + ovmax = -np.inf + BBGT = R["bbox"].astype(float) + + if BBGT.size > 0: + # compute overlaps + # intersection + ixmin = np.maximum(BBGT[:, 0], bb[0]) + iymin = np.maximum(BBGT[:, 1], bb[1]) + ixmax = np.minimum(BBGT[:, 2], bb[2]) + iymax = np.minimum(BBGT[:, 3], bb[3]) + iw = np.maximum(ixmax - ixmin + 1.0, 0.0) + ih = np.maximum(iymax - iymin + 1.0, 0.0) + inters = iw * ih + + # union + uni = ( + (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0) + + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0) + - inters + ) + + overlaps = inters / uni + ovmax = np.max(overlaps) + jmax = np.argmax(overlaps) + + if ovmax > ovthresh: + if not R["difficult"][jmax]: + if not R["det"][jmax]: + tp[d] = 1.0 + R["det"][jmax] = 1 + else: + fp[d] = 1.0 + else: + fp[d] = 1.0 + + # compute precision recall + fp = np.cumsum(fp) + tp = np.cumsum(tp) + rec = tp / float(npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = voc_ap(rec, prec, use_07_metric) + + return rec, prec, ap diff --git a/detectron2/evaluation/sem_seg_evaluation.py b/detectron2/evaluation/sem_seg_evaluation.py new file mode 100644 index 0000000000..7de27e0a8f --- /dev/null +++ b/detectron2/evaluation/sem_seg_evaluation.py @@ -0,0 +1,162 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +import json +import logging +import numpy as np +import os +from collections import OrderedDict +import PIL.Image as Image +import pycocotools.mask as mask_util +import torch +from fvcore.common.file_io import PathManager + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.utils.comm import all_gather, is_main_process, synchronize + +from .evaluator import DatasetEvaluator + + +class SemSegEvaluator(DatasetEvaluator): + """ + Evaluate semantic segmentation + """ + + def __init__(self, dataset_name, distributed, num_classes, ignore_label=255, output_dir=None): + """ + Args: + dataset_name (str): name of the dataset to be evaluated. + distributed (True): if True, will collect results from all ranks for evaluation. + Otherwise, will evaluate the results in the current process. + num_classes (int): number of classes + ignore_label (int): value in semantic segmentation ground truth. Predictions for the + corresponding pixels should be ignored. + output_dir (str): an output directory to dump results. + """ + self._dataset_name = dataset_name + self._distributed = distributed + self._output_dir = output_dir + self._num_classes = num_classes + self._ignore_label = ignore_label + self._N = num_classes + 1 + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self.image_id_to_gt_file = { + dataset_record["image_id"]: dataset_record["sem_seg_file_name"] + for dataset_record in DatasetCatalog.get(dataset_name) + } + + meta = MetadataCatalog.get(dataset_name) + # Dict that maps contiguous training ids to COCO category ids + try: + c2d = meta.stuff_dataset_id_to_contiguous_id + self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} + except AttributeError: + self._contiguous_id_to_dataset_id = None + + def reset(self): + self._conf_matrix = np.zeros((self._N, self._N), dtype=np.int64) + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a model. + It is a list of dicts. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a model. It is either list of semantic segmentation predictions + (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic + segmentation prediction in the same format. + """ + for input, output in zip(inputs, outputs): + output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) + pred = np.array(output, dtype=np.int) + with PathManager.open(self.image_id_to_gt_file[input["image_id"]], "rb") as f: + gt = np.array(Image.open(f), dtype=np.int) + + gt[gt == self._ignore_label] = self._num_classes + + self._conf_matrix += np.bincount( + self._N * pred.reshape(-1) + gt.reshape(-1), minlength=self._N ** 2 + ).reshape(self._N, self._N) + + self._predictions.extend(self.encode_json_sem_seg(pred, input["image_id"])) + + def evaluate(self): + """ + Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): + Mean intersection-over-union averaged across classes (mIoU) + Frequency Weighted IoU (fwIoU) + Mean pixel accuracy averaged across classes (mACC) + Pixel Accuracy (pACC) + """ + if self._distributed: + synchronize() + conf_matrix_list = all_gather(self._conf_matrix) + self._predictions = all_gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not is_main_process(): + return + + self._conf_matrix = np.zeros_like(self._conf_matrix) + for conf_matrix in conf_matrix_list: + self._conf_matrix += conf_matrix + + if self._output_dir: + PathManager.mkdirs(self._output_dir) + file_path = os.path.join(self._output_dir, "sem_seg_predictions.json") + with PathManager.open(file_path, "w") as f: + f.write(json.dumps(self._predictions)) + + acc = np.zeros(self._num_classes, dtype=np.float) + iou = np.zeros(self._num_classes, dtype=np.float) + tp = self._conf_matrix.diagonal()[:-1].astype(np.float) + pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) + class_weights = pos_gt / np.sum(pos_gt) + pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) + acc_valid = pos_gt > 0 + acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] + iou_valid = (pos_gt + pos_pred) > 0 + union = pos_gt + pos_pred - tp + iou[acc_valid] = tp[acc_valid] / union[acc_valid] + macc = np.sum(acc) / np.sum(acc_valid) + miou = np.sum(iou) / np.sum(iou_valid) + fiou = np.sum(iou * class_weights) + pacc = np.sum(tp) / np.sum(pos_gt) + + res = {} + res["mIoU"] = 100 * miou + res["fwIoU"] = 100 * fiou + res["mACC"] = 100 * macc + res["pACC"] = 100 * pacc + + if self._output_dir: + file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth") + with PathManager.open(file_path, "wb") as f: + torch.save(res, f) + results = OrderedDict({"sem_seg": res}) + self._logger.info(results) + return results + + def encode_json_sem_seg(self, sem_seg, image_id): + """ + Convert semenatic segmentation to COCO stuff format with segments encoded as RLEs. + See http://cocodataset.org/#format-results + """ + json_list = [] + for label in np.unique(sem_seg): + if self._contiguous_id_to_dataset_id is not None: + assert ( + label in self._contiguous_id_to_dataset_id + ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name) + dataset_id = self._contiguous_id_to_dataset_id[label] + else: + dataset_id = int(label) + mask = (sem_seg == label).astype(np.uint8) + mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0] + mask_rle["counts"] = mask_rle["counts"].decode("utf-8") + json_list.append( + {"image_id": image_id, "category_id": dataset_id, "segmentation": mask_rle} + ) + return json_list diff --git a/detectron2/evaluation/testing.py b/detectron2/evaluation/testing.py new file mode 100644 index 0000000000..ae4310f9f4 --- /dev/null +++ b/detectron2/evaluation/testing.py @@ -0,0 +1,77 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +import pprint +import sys +from collections import Mapping, OrderedDict + + +def print_csv_format(results): + """ + Print main metrics in a format similar to Detectron, + so that they are easy to copypaste into a spreadsheet. + + Args: + results (OrderedDict[dict]): task_name -> {metric -> score} + """ + assert isinstance(results, OrderedDict), results # unordered results cannot be properly printed + logger = logging.getLogger(__name__) + for task, res in results.items(): + # Don't print "AP-category" metrics since they are usually not tracked. + important_res = [(k, v) for k, v in res.items() if "-" not in k] + logger.info("copypaste: Task: {}".format(task)) + logger.info("copypaste: " + ",".join([k[0] for k in important_res])) + logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res])) + + +def verify_results(cfg, results): + """ + Args: + results (OrderedDict[dict]): task_name -> {metric -> score} + + Returns: + bool: whether the verification succeeds or not + """ + expected_results = cfg.TEST.EXPECTED_RESULTS + if not len(expected_results): + return True + + ok = True + for task, metric, expected, tolerance in expected_results: + actual = results[task][metric] + if not np.isfinite(actual): + ok = False + diff = abs(actual - expected) + if diff > tolerance: + ok = False + + logger = logging.getLogger(__name__) + if not ok: + logger.error("Result verification failed!") + logger.error("Expected Results: " + str(expected_results)) + logger.error("Actual Results: " + pprint.pformat(results)) + + sys.exit(1) + else: + logger.info("Results verification passed.") + return ok + + +def flatten_results_dict(results): + """ + Expand a hierarchical dict of scalars into a flat dict of scalars. + If results[k1][k2][k3] = v, the returned dict will have the entry + {"k1/k2/k3": v}. + + Args: + results (dict): + """ + r = {} + for k, v in results.items(): + if isinstance(v, Mapping): + v = flatten_results_dict(v) + for kk, vv in v.items(): + r[k + "/" + kk] = vv + else: + r[k] = v + return r diff --git a/detectron2/layers/__init__.py b/detectron2/layers/__init__.py new file mode 100644 index 0000000000..d31d393ca5 --- /dev/null +++ b/detectron2/layers/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm +from .deform_conv import DeformConv, ModulatedDeformConv +from .mask_ops import paste_masks_in_image +from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated +from .roi_align import ROIAlign, roi_align +from .roi_align_rotated import ROIAlignRotated, roi_align_rotated +from .shape_spec import ShapeSpec +from .wrappers import BatchNorm2d, Conv2d, ConvTranspose2d, cat, interpolate + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/detectron2/layers/batch_norm.py b/detectron2/layers/batch_norm.py new file mode 100644 index 0000000000..a3f384f92a --- /dev/null +++ b/detectron2/layers/batch_norm.py @@ -0,0 +1,180 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import torch +import torch.distributed as dist +from torch import nn +from torch.autograd.function import Function + +from detectron2.utils import comm + +from .wrappers import BatchNorm2d + + +class FrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + It contains non-trainable buffers called + "weight" and "bias", "running_mean", "running_var", + initialized to perform identity transformation. + + The pre-trained backbone models from Caffe2 only contain "weight" and "bias", + which are computed from the original four parameters of BN. + The affine transform `x * weight + bias` will perform the equivalent + computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. + When loading a backbone model from Caffe2, "running_mean" and "running_var" + will be left unchanged as identity transformation. + + Other pre-trained backbone models may contain all 4 parameters. + + The forward is implemented by `F.batch_norm(..., training=False)`. + """ + + _version = 3 + + def __init__(self, num_features, eps=1e-5): + super().__init__() + self.num_features = num_features + self.eps = eps + self.register_buffer("weight", torch.ones(num_features)) + self.register_buffer("bias", torch.zeros(num_features)) + self.register_buffer("running_mean", torch.zeros(num_features)) + self.register_buffer("running_var", torch.ones(num_features) - eps) + + def forward(self, x): + scale = self.weight * (self.running_var + self.eps).rsqrt() + bias = self.bias - self.running_mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + return x * scale + bias + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + version = local_metadata.get("version", None) + + if version is None or version < 2: + # No running_mean/var in early versions + # This will silent the warnings + if prefix + "running_mean" not in state_dict: + state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean) + if prefix + "running_var" not in state_dict: + state_dict[prefix + "running_var"] = torch.ones_like(self.running_var) + + if version is not None and version < 3: + logger = logging.getLogger(__name__) + logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip("."))) + # In version < 3, running_var are used without +eps. + state_dict[prefix + "running_var"] -= self.eps + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def __repr__(self): + return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps) + + @classmethod + def convert_frozen_batchnorm(cls, module): + """ + Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. + + Args: + module (torch.nn.Module): + + Returns: + If module is BatchNorm/SyncBatchNorm, returns a new module. + Otherwise, in-place convert module and return it. + + Similar to convert_sync_batchnorm in + https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py + """ + bn_module = nn.modules.batchnorm + bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm) + res = module + if isinstance(module, bn_module): + res = cls(module.num_features) + if module.affine: + res.weight.data = module.weight.data.clone().detach() + res.bias.data = module.bias.data.clone().detach() + res.running_mean.data = module.running_mean.data + res.running_var.data = module.running_var.data + module.eps + else: + for name, child in module.named_children(): + new_child = cls.convert_frozen_batchnorm(child) + if new_child is not child: + res.add_module(name, new_child) + return res + + +def get_norm(norm, out_channels): + """ + Args: + norm (str or callable): + + Returns: + nn.Module or None: the normalization layer + """ + if isinstance(norm, str): + if len(norm) == 0: + return None + norm = { + "BN": BatchNorm2d, + "SyncBN": NaiveSyncBatchNorm, + "FrozenBN": FrozenBatchNorm2d, + "GN": lambda channels: nn.GroupNorm(32, channels), + "nnSyncBN": nn.SyncBatchNorm, # keep for debugging + }[norm] + return norm(out_channels) + + +class AllReduce(Function): + @staticmethod + def forward(ctx, input): + input_list = [torch.zeros_like(input) for k in range(dist.get_world_size())] + # Use allgather instead of allreduce since I don't trust in-place operations .. + dist.all_gather(input_list, input, async_op=False) + inputs = torch.stack(input_list, dim=0) + return torch.sum(inputs, dim=0) + + @staticmethod + def backward(ctx, grad_output): + dist.all_reduce(grad_output, async_op=False) + return grad_output + + +class NaiveSyncBatchNorm(BatchNorm2d): + """ + `torch.nn.SyncBatchNorm` has known unknown bugs. + + It produces significantly worse AP (and sometimes goes NaN) + when the batch size on each worker is quite different + (e.g., when scale augmentation is used, or when it is applied to mask head). + + Use this implementation before it is fixed. + It is slower than `torch.nn.SyncBatchNorm`. + """ + + def forward(self, input): + if comm.get_world_size() == 1 or not self.training: + return super().forward(input) + + assert input.shape[0] > 0, "SyncBatchNorm does not support empty inputs" + C = input.shape[1] + mean = torch.mean(input, dim=[0, 2, 3]) + meansqr = torch.mean(input * input, dim=[0, 2, 3]) + + vec = torch.cat([mean, meansqr], dim=0) + vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size()) + + mean, meansqr = torch.split(vec, C) + var = meansqr - mean * mean + self.running_mean += self.momentum * (mean.detach() - self.running_mean) + self.running_var += self.momentum * (var.detach() - self.running_var) + + invstd = torch.rsqrt(var + self.eps) + scale = self.weight * invstd + bias = self.bias - mean * scale + scale = scale.reshape(1, -1, 1, 1) + bias = bias.reshape(1, -1, 1, 1) + return input * scale + bias diff --git a/detectron2/layers/csrc/README.md b/detectron2/layers/csrc/README.md new file mode 100644 index 0000000000..778ed3da0b --- /dev/null +++ b/detectron2/layers/csrc/README.md @@ -0,0 +1,7 @@ + + +To add a new Op: + +1. Create a new directory +2. Implement new ops there +3. Delcare its Python interface in `vision.cpp`. diff --git a/detectron2/layers/csrc/ROIAlign/ROIAlign.h b/detectron2/layers/csrc/ROIAlign/ROIAlign.h new file mode 100644 index 0000000000..01da8e0517 --- /dev/null +++ b/detectron2/layers/csrc/ROIAlign/ROIAlign.h @@ -0,0 +1,126 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +at::Tensor ROIAlign_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned); + +at::Tensor ROIAlign_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned); + +#ifdef WITH_CUDA +at::Tensor ROIAlign_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned); + +at::Tensor ROIAlign_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned); +#endif + +// Interface for Python +inline at::Tensor ROIAlign_forward( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned) { + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return ROIAlign_forward_cuda( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + aligned); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return ROIAlign_forward_cpu( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + aligned); +} + +inline at::Tensor ROIAlign_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned) { + if (grad.type().is_cuda()) { +#ifdef WITH_CUDA + return ROIAlign_backward_cuda( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio, + aligned); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return ROIAlign_backward_cpu( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio, + aligned); +} diff --git a/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp b/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp new file mode 100644 index 0000000000..0a6490a56a --- /dev/null +++ b/detectron2/layers/csrc/ROIAlign/ROIAlign_cpu.cpp @@ -0,0 +1,495 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include "ROIAlign.h" + +// implementation taken from Caffe2 +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int iy_upper, + const int ix_upper, + T roi_start_h, + T roi_start_w, + T bin_size_h, + T bin_size_w, + int roi_bin_grid_h, + int roi_bin_grid_w, + std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T x = xx; + T y = yy; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y <= 0) { + y = 0; + } + if (x <= 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indeces + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void ROIAlignForward( + const int nthreads, + const T* input, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* output, + bool aligned) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (aligned) { + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlign cannot have non-negative size!"); + } else { // for backward-compatibility only + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + // When the grid is empty, output zeros. + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all channels, + // this is the key point of optimization + std::vector> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_bin_grid_h, + roi_bin_grid_w, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + PreCalc pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template +void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +template +void ROIAlignBackward( + const int nthreads, + const T* grad_output, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* grad_input, + const T* rois, + const int n_stride, + const int c_stride, + const int h_stride, + const int w_stride, + bool aligned) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (aligned) { + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlign do not have non-negative size!"); + } else { // for backward-compatibility only + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, + width, + y, + x, + w1, + w2, + w3, + w4, + x_low, + x_high, + y_low, + y_high, + index); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // for +} // ROIAlignBackward + +at::Tensor ROIAlign_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned) { + AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cpu"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + + if (output.numel() == 0) + return output; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ROIAlign_forward", [&] { + ROIAlignForward( + output_size, + input.contiguous().data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois.contiguous().data_ptr(), + output.data_ptr(), + aligned); + }); + return output; +} + +at::Tensor ROIAlign_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned) { + AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_backward_cpu"; + at::checkAllSameType(c, {grad_t, rois_t}); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "ROIAlign_forward", [&] { + ROIAlignBackward( + grad.numel(), + grad.contiguous().data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois.contiguous().data_ptr(), + n_stride, + c_stride, + h_stride, + w_stride, + aligned); + }); + return grad_input; +} diff --git a/detectron2/layers/csrc/ROIAlign/ROIAlign_cuda.cu b/detectron2/layers/csrc/ROIAlign/ROIAlign_cuda.cu new file mode 100644 index 0000000000..44c29667f1 --- /dev/null +++ b/detectron2/layers/csrc/ROIAlign/ROIAlign_cuda.cu @@ -0,0 +1,418 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +template +__device__ T bilinear_interpolate( + const T* bottom_data, + const int height, + const int width, + T y, + T x, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = bottom_data[y_low * width + x_low]; + T v2 = bottom_data[y_low * width + x_high]; + T v3 = bottom_data[y_high * width + x_low]; + T v4 = bottom_data[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__global__ void RoIAlignForward( + const int nthreads, + const T* bottom_data, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* bottom_rois, + T* top_data, + bool aligned) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset; + T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset; + T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset; + T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { // for backward-compatibility only + roi_width = max(roi_width, (T)1.); + roi_height = max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_bottom_data = + bottom_data + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + // When the grid is empty, output zeros. + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T val = bilinear_interpolate( + offset_bottom_data, height, width, y, x, index); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + +template +__device__ void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high, + const int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = bottom_data[y_low * width + x_low]; + // T v2 = bottom_data[y_low * width + x_high]; + // T v3 = bottom_data[y_high * width + x_low]; + // T v4 = bottom_data[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +__global__ void RoIAlignBackwardFeature( + const int nthreads, + const T* top_diff, + const int num_rois, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* bottom_diff, + const T* bottom_rois, + bool aligned) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_bottom_rois = bottom_rois + n * 5; + int roi_batch_ind = offset_bottom_rois[0]; + + // Do not use rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset; + T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset; + T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset; + T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { // for backward-compatibility only + roi_width = max(roi_width, (T)1.); + roi_height = max(roi_height, (T)1.); + } + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + + int top_offset = (n * channels + c) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T y = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, + width, + y, + x, + w1, + w2, + w3, + w4, + x_low, + x_high, + y_low, + y_high, + index); + + T g1 = top_diff_this_bin * w1 / count; + T g2 = top_diff_this_bin * w2 / count; + T g3 = top_diff_this_bin * w3 / count; + T g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd( + offset_bottom_diff + y_low * width + x_low, static_cast(g1)); + atomicAdd( + offset_bottom_diff + y_low * width + x_high, static_cast(g2)); + atomicAdd( + offset_bottom_diff + y_high * width + x_low, static_cast(g3)); + atomicAdd( + offset_bottom_diff + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlignBackward + +at::Tensor ROIAlign_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + bool aligned) { + AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cuda"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + auto output = at::empty( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L)); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] { + RoIAlignForward<<>>( + output_size, + input.contiguous().data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois.contiguous().data_ptr(), + output.data_ptr(), + aligned); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIAlign_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio, + bool aligned) { + AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + at::CheckedFrom c = "ROIAlign_backward_cuda"; + at::checkAllSameGPU(c, {grad_t, rois_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L)); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] { + RoIAlignBackwardFeature<<>>( + grad.numel(), + grad.contiguous().data_ptr(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois.contiguous().data_ptr(), + aligned); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} diff --git a/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h new file mode 100644 index 0000000000..15601d8abe --- /dev/null +++ b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h @@ -0,0 +1,115 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace detectron2 { + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); + +#ifdef WITH_CUDA +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio); + +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio); +#endif + +// Interface for Python +inline at::Tensor ROIAlignRotated_forward( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return ROIAlignRotated_forward_cuda( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return ROIAlignRotated_forward_cpu( + input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); +} + +inline at::Tensor ROIAlignRotated_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + if (grad.type().is_cuda()) { +#ifdef WITH_CUDA + return ROIAlignRotated_backward_cuda( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + return ROIAlignRotated_backward_cpu( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio); +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp new file mode 100644 index 0000000000..ec71c530eb --- /dev/null +++ b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp @@ -0,0 +1,519 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include "ROIAlignRotated.h" + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { +template +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +template +void pre_calc_for_bilinear_interpolate( + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int iy_upper, + const int ix_upper, + T roi_start_h, + T roi_start_w, + T bin_size_h, + T bin_size_w, + int roi_bin_grid_h, + int roi_bin_grid_w, + T roi_center_h, + T roi_center_w, + T cos_theta, + T sin_theta, + std::vector>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < iy_upper; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < ix_upper; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + // In image space, (y, x) is the order for Right Handed System, + // and this is essentially multiplying the point by a rotation matrix + // to rotate it counterclockwise through angle theta. + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y < 0) { + y = 0; + } + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indeces + PreCalc pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +template +void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +template +inline void add(T* address, const T& val) { + *address += val; +} + +} // namespace + +template +void ROIAlignRotatedForward( + const int nthreads, + const T* input, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* output) { + int n_rois = nthreads / channels / pooled_width / pooled_height; + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all chanels, + // this is the key point of optimization + std::vector> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_bin_grid_h, + roi_bin_grid_w, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + roi_center_h, + roi_center_w, + cos_theta, + sin_theta, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + PreCalc pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template +void ROIAlignRotatedBackward( + const int nthreads, + const T* grad_output, + const T& spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* grad_input, + const T* rois, + const int n_stride, + const int c_stride, + const int h_stride, + const int w_stride) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + AT_ASSERTM( + roi_width >= 0 && roi_height >= 0, + "ROIs in ROIAlignRotated do not have non-negative size!"); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // for +} // ROIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cpu( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlign_forward_cpu"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + + if (output.numel() == 0) { + return output; + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedForward( + output_size, + input.contiguous().data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois.contiguous().data_ptr(), + output.data_ptr()); + }); + return output; +} + +at::Tensor ROIAlignRotated_backward_cpu( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor"); + AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_backward_cpu"; + at::checkAllSameType(c, {grad_t, rois_t}); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.type(), "ROIAlignRotated_forward", [&] { + ROIAlignRotatedBackward( + grad.numel(), + grad.contiguous().data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois.contiguous().data_ptr(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + return grad_input; +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu new file mode 100644 index 0000000000..e27c8670d4 --- /dev/null +++ b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu @@ -0,0 +1,435 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include + +// TODO make it in a common file +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ + i += blockDim.x * gridDim.x) + +// Note: this implementation originates from the Caffe2 ROIAlignRotated Op +// and PyTorch ROIAlign (non-rotated) Op implementations. +// The key difference between this implementation and those ones is +// we don't do "legacy offset" in this version, as there aren't many previous +// works, if any, using the "legacy" ROIAlignRotated Op. +// This would make the interface a bit cleaner. + +namespace detectron2 { + +namespace { + +template +__device__ T bilinear_interpolate( + const T* input, + const int height, + const int width, + T y, + T x) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template +__device__ void bilinear_interpolate_gradient( + const int height, + const int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y < 0) { + y = 0; + } + + if (x < 0) { + x = 0; + } + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + return; +} + +} // namespace + +template +__global__ void RoIAlignRotatedForward( + const int nthreads, + const T* input, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + const T* rois, + T* top_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (inte gral) pooling inside a bin + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T val = bilinear_interpolate(offset_input, height, width, y, x); + output_val += val; + } + } + output_val /= count; + + top_data[index] = output_val; + } +} + +template +__global__ void RoIAlignRotatedBackwardFeature( + const int nthreads, + const T* top_diff, + const int num_rois, + const T spatial_scale, + const int channels, + const int height, + const int width, + const int pooled_height, + const int pooled_width, + const int sampling_ratio, + T* bottom_diff, + const T* rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* current_roi = rois + n * 6; + int roi_batch_ind = current_roi[0]; + + // Do not use rounding; this implementation detail is critical + // ROIAlignRotated supports align == true, i.e., continuous coordinate + // by default, thus the 0.5 offset + T offset = (T)0.5; + T roi_center_w = current_roi[1] * spatial_scale - offset; + T roi_center_h = current_roi[2] * spatial_scale - offset; + T roi_width = current_roi[3] * spatial_scale; + T roi_height = current_roi[4] * spatial_scale; + T theta = current_roi[5] * M_PI / 180.0; + T cos_theta = cos(theta); + T sin_theta = sin(theta); + + T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); + T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); + + T* offset_bottom_diff = + bottom_diff + (roi_batch_ind * channels + c) * height * width; + + int top_offset = (n * channels + c) * pooled_height * pooled_width; + const T* offset_top_diff = top_diff + top_offset; + const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). + // Appropriate translation needs to be applied after. + T roi_start_h = -roi_height / 2.0; + T roi_start_w = -roi_width / 2.0; + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T yy = roi_start_h + ph * bin_size_h + + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); + + // Rotate by theta around the center and translate + T y = yy * cos_theta - xx * sin_theta + roi_center_h; + T x = yy * sin_theta + xx * cos_theta + roi_center_w; + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); + + T g1 = top_diff_this_bin * w1 / count; + T g2 = top_diff_this_bin * w2 / count; + T g3 = top_diff_this_bin * w3 / count; + T g4 = top_diff_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd( + offset_bottom_diff + y_low * width + x_low, static_cast(g1)); + atomicAdd( + offset_bottom_diff + y_low * width + x_high, static_cast(g2)); + atomicAdd( + offset_bottom_diff + y_high * width + x_low, static_cast(g3)); + atomicAdd( + offset_bottom_diff + y_high * width + x_high, static_cast(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} // RoIAlignRotatedBackward + +at::Tensor ROIAlignRotated_forward_cuda( + const at::Tensor& input, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int sampling_ratio) { + AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ROIAlignRotated_forward_cuda"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + auto output = at::empty( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L)); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + AT_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "ROIAlignRotated_forward", [&] { + RoIAlignRotatedForward<<>>( + output_size, + input.contiguous().data_ptr(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois.contiguous().data_ptr(), + output.data_ptr()); + }); + cudaDeviceSynchronize(); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} + +// TODO remove the dependency on input and use instead its sizes -> save memory +at::Tensor ROIAlignRotated_backward_cuda( + const at::Tensor& grad, + const at::Tensor& rois, + const float spatial_scale, + const int pooled_height, + const int pooled_width, + const int batch_size, + const int channels, + const int height, + const int width, + const int sampling_ratio) { + AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor"); + AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + at::CheckedFrom c = "ROIAlign_backward_cuda"; + at::checkAllSameGPU(c, {grad_t, rois_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L)); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + AT_DISPATCH_FLOATING_TYPES( + grad.scalar_type(), "ROIAlignRotated_backward", [&] { + RoIAlignRotatedBackwardFeature<<>>( + grad.numel(), + grad.contiguous().data_ptr(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + grad_input.data_ptr(), + rois.contiguous().data_ptr()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h new file mode 100644 index 0000000000..937f212a6e --- /dev/null +++ b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h @@ -0,0 +1,35 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace detectron2 { + +at::Tensor box_iou_rotated_cpu( + const at::Tensor& boxes1, + const at::Tensor& boxes2); + +#ifdef WITH_CUDA +at::Tensor box_iou_rotated_cuda( + const at::Tensor& boxes1, + const at::Tensor& boxes2); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor box_iou_rotated( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); + if (boxes1.device().is_cuda()) { +#ifdef WITH_CUDA + return box_iou_rotated_cuda(boxes1, boxes2); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + + return box_iou_rotated_cpu(boxes1, boxes2); +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp new file mode 100644 index 0000000000..a6aaa810c5 --- /dev/null +++ b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp @@ -0,0 +1,46 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include "box_iou_rotated.h" +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +template +void box_iou_rotated_cpu_kernel( + const at::Tensor& boxes1, + const at::Tensor& boxes2, + at::Tensor& ious) { + auto widths1 = boxes1.select(1, 2).contiguous(); + auto heights1 = boxes1.select(1, 3).contiguous(); + auto widths2 = boxes2.select(1, 2).contiguous(); + auto heights2 = boxes2.select(1, 3).contiguous(); + + at::Tensor areas1 = widths1 * heights1; + at::Tensor areas2 = widths2 * heights2; + + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + + for (int i = 0; i < num_boxes1; i++) { + for (int j = 0; j < num_boxes2; j++) { + ious[i * num_boxes2 + j] = single_box_iou_rotated( + boxes1[i].data_ptr(), boxes2[j].data_ptr()); + } + } +} + +at::Tensor box_iou_rotated_cpu( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + auto num_boxes1 = boxes1.size(0); + auto num_boxes2 = boxes2.size(0); + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + return ious.reshape(shape); +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu new file mode 100644 index 0000000000..79448e33b4 --- /dev/null +++ b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu @@ -0,0 +1,103 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include +#include "box_iou_rotated_utils.h" + +namespace detectron2 { + +// 2D block with 32 * 16 = 512 threads per block +const int BLOCK_DIM_X = 32; +const int BLOCK_DIM_Y = 16; + +template +__global__ void box_iou_rotated_cuda_kernel( + const int n_boxes1, + const int n_boxes2, + const T* dev_boxes1, + const T* dev_boxes2, + T* dev_ious) { + const int row_start = blockIdx.x * blockDim.x; + const int col_start = blockIdx.y * blockDim.y; + + const int row_size = min(n_boxes1 - row_start, blockDim.x); + const int col_size = min(n_boxes2 - col_start, blockDim.y); + + __shared__ float block_boxes1[BLOCK_DIM_X * 5]; + __shared__ float block_boxes2[BLOCK_DIM_Y * 5]; + + // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y + if (threadIdx.x < row_size && threadIdx.y == 0) { + block_boxes1[threadIdx.x * 5 + 0] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 0]; + block_boxes1[threadIdx.x * 5 + 1] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 1]; + block_boxes1[threadIdx.x * 5 + 2] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 2]; + block_boxes1[threadIdx.x * 5 + 3] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 3]; + block_boxes1[threadIdx.x * 5 + 4] = + dev_boxes1[(row_start + threadIdx.x) * 5 + 4]; + } + + if (threadIdx.x < col_size && threadIdx.y == 0) { + block_boxes2[threadIdx.x * 5 + 0] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 0]; + block_boxes2[threadIdx.x * 5 + 1] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 1]; + block_boxes2[threadIdx.x * 5 + 2] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 2]; + block_boxes2[threadIdx.x * 5 + 3] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 3]; + block_boxes2[threadIdx.x * 5 + 4] = + dev_boxes2[(col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size && threadIdx.y < col_size) { + int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y; + dev_ious[offset] = single_box_iou_rotated( + block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5); + } +} + +at::Tensor box_iou_rotated_cuda( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + using scalar_t = float; + AT_ASSERTM(boxes1.type().is_cuda(), "boxes1 must be a CUDA tensor"); + AT_ASSERTM(boxes2.type().is_cuda(), "boxes2 must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(boxes1.device()); + + int num_boxes1 = boxes1.size(0); + int num_boxes2 = boxes2.size(0); + + at::Tensor ious = + at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); + + if (num_boxes1 > 0 && num_boxes2 > 0) { + const int blocks_x = at::cuda::ATenCeilDiv(num_boxes1, BLOCK_DIM_X); + const int blocks_y = at::cuda::ATenCeilDiv(num_boxes2, BLOCK_DIM_Y); + + dim3 blocks(blocks_x, blocks_y); + dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + box_iou_rotated_cuda_kernel<<>>( + num_boxes1, + num_boxes2, + boxes1.data_ptr(), + boxes2.data_ptr(), + (scalar_t*)ious.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + } + + // reshape from 1d array to 2d array + auto shape = std::vector{num_boxes1, num_boxes2}; + return ious.reshape(shape); +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h new file mode 100644 index 0000000000..6448f97ff9 --- /dev/null +++ b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h @@ -0,0 +1,342 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once + +#include +#include + +#ifdef __CUDACC__ +// Designates functions callable from the host (CPU) and the device (GPU) +#define HOST_DEVICE __host__ __device__ +#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__ +#else +#include +#define HOST_DEVICE +#define HOST_DEVICE_INLINE HOST_DEVICE inline +#endif + +namespace detectron2 { + +namespace { + +template +struct RotatedBox { + T x_ctr, y_ctr, w, h, a; +}; + +template +struct Point { + T x, y; + HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {} + HOST_DEVICE_INLINE Point operator+(const Point& p) const { + return Point(x + p.x, y + p.y); + } + HOST_DEVICE_INLINE Point& operator+=(const Point& p) { + x += p.x; + y += p.y; + return *this; + } + HOST_DEVICE_INLINE Point operator-(const Point& p) const { + return Point(x - p.x, y - p.y); + } + HOST_DEVICE_INLINE Point operator*(const T coeff) const { + return Point(x * coeff, y * coeff); + } +}; + +template +HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) { + return A.x * B.x + A.y * B.y; +} + +template +HOST_DEVICE_INLINE T cross_2d(const Point& A, const Point& B) { + return A.x * B.y - B.x * A.y; +} + +template +HOST_DEVICE_INLINE void get_rotated_vertices( + const RotatedBox& box, + Point (&pts)[4]) { + // M_PI / 180. == 0.01745329251 + double theta = box.a * 0.01745329251; + T cosTheta2 = (T)cos(theta) * 0.5f; + T sinTheta2 = (T)sin(theta) * 0.5f; + + // y: top --> down; x: left --> right + pts[0].x = box.x_ctr - sinTheta2 * box.h - cosTheta2 * box.w; + pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w; + pts[1].x = box.x_ctr + sinTheta2 * box.h - cosTheta2 * box.w; + pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w; + pts[2].x = 2 * box.x_ctr - pts[0].x; + pts[2].y = 2 * box.y_ctr - pts[0].y; + pts[3].x = 2 * box.x_ctr - pts[1].x; + pts[3].y = 2 * box.y_ctr - pts[1].y; +} + +template +HOST_DEVICE_INLINE int get_intersection_points( + const Point (&pts1)[4], + const Point (&pts2)[4], + Point (&intersections)[24]) { + // Line vector + // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1] + Point vec1[4], vec2[4]; + for (int i = 0; i < 4; i++) { + vec1[i] = pts1[(i + 1) % 4] - pts1[i]; + vec2[i] = pts2[(i + 1) % 4] - pts2[i]; + } + + // Line test - test all line combos for intersection + int num = 0; // number of intersections + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + // Solve for 2x2 Ax=b + T det = cross_2d(vec2[j], vec1[i]); + + // This takes care of parallel lines + if (fabs(det) <= 1e-14) { + continue; + } + + auto vec12 = pts2[j] - pts1[i]; + + T t1 = cross_2d(vec2[j], vec12) / det; + T t2 = cross_2d(vec1[i], vec12) / det; + + if (t1 >= 0.0f && t1 <= 1.0f && t2 >= 0.0f && t2 <= 1.0f) { + intersections[num++] = pts1[i] + vec1[i] * t1; + } + } + } + + // Check for vertices of rect1 inside rect2 + { + const auto& AB = vec2[0]; + const auto& DA = vec2[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + // assume ABCD is the rectangle, and P is the point to be judged + // P is inside ABCD iff. P's projection on AB lies within AB + // and P's projection on AD lies within AD + + auto AP = pts1[i] - pts2[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) && + (APdotAD <= ADdotAD)) { + intersections[num++] = pts1[i]; + } + } + } + + // Reverse the check - check for vertices of rect2 inside rect1 + { + const auto& AB = vec1[0]; + const auto& DA = vec1[3]; + auto ABdotAB = dot_2d(AB, AB); + auto ADdotAD = dot_2d(DA, DA); + for (int i = 0; i < 4; i++) { + auto AP = pts2[i] - pts1[0]; + + auto APdotAB = dot_2d(AP, AB); + auto APdotAD = -dot_2d(AP, DA); + + if ((APdotAB >= 0) && (APdotAD >= 0) && (APdotAB <= ABdotAB) && + (APdotAD <= ADdotAD)) { + intersections[num++] = pts2[i]; + } + } + } + + return num; +} + +template +HOST_DEVICE_INLINE int convex_hull_graham( + const Point (&p)[24], + const int& num_in, + Point (&q)[24], + bool shift_to_zero = false) { + assert(num_in >= 2); + + // Step 1: + // Find point with minimum y + // if more than 1 points have the same minimum y, + // pick the one with the minimum x. + int t = 0; + for (int i = 1; i < num_in; i++) { + if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) { + t = i; + } + } + auto& start = p[t]; // starting point + + // Step 2: + // Subtract starting point from every points (for sorting in the next step) + for (int i = 0; i < num_in; i++) { + q[i] = p[i] - start; + } + + // Swap the starting point to position 0 + auto tmp = q[0]; + q[0] = q[t]; + q[t] = tmp; + + // Step 3: + // Sort point 1 ~ num_in according to their relative cross-product values + // (essentially sorting according to angles) + // If the angles are the same, sort according to their distance to origin + T dist[24]; + for (int i = 0; i < num_in; i++) { + dist[i] = dot_2d(q[i], q[i]); + } + +#ifdef __CUDACC__ + // CUDA version + // In the future, we can potentially use thrust + // for sorting here to improve speed (though not guaranteed) + for (int i = 1; i < num_in - 1; i++) { + for (int j = i + 1; j < num_in; j++) { + T crossProduct = cross_2d(q[i], q[j]); + if ((crossProduct < -1e-6) || + (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) { + auto q_tmp = q[i]; + q[i] = q[j]; + q[j] = q_tmp; + auto dist_tmp = dist[i]; + dist[i] = dist[j]; + dist[j] = dist_tmp; + } + } + } +#else + // CPU version + std::sort( + q + 1, q + num_in, [](const Point& A, const Point& B) -> bool { + T temp = cross_2d(A, B); + if (fabs(temp) < 1e-6) { + return dot_2d(A, A) < dot_2d(B, B); + } else { + return temp > 0; + } + }); +#endif + + // Step 4: + // Make sure there are at least 2 points (that don't overlap with each other) + // in the stack + int k; // index of the non-overlapped second point + for (k = 1; k < num_in; k++) { + if (dist[k] > 1e-8) { + break; + } + } + if (k == num_in) { + // We reach the end, which means the convex hull is just one point + q[0] = p[t]; + return 1; + } + q[1] = q[k]; + int m = 2; // 2 points in the stack + // Step 5: + // Finally we can start the scanning process. + // When a non-convex relationship between the 3 points is found + // (either concave shape or duplicated points), + // we pop the previous point from the stack + // until the 3-point relationship is convex again, or + // until the stack only contains two points + for (int i = k + 1; i < num_in; i++) { + while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2]) >= 0) { + m--; + } + q[m++] = q[i]; + } + + // Step 6 (Optional): + // In general sense we need the original coordinates, so we + // need to shift the points back (reverting Step 2) + // But if we're only interested in getting the area/perimeter of the shape + // We can simply return. + if (!shift_to_zero) { + for (int i = 0; i < m; i++) { + q[i] += start; + } + } + + return m; +} + +template +HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) { + if (m <= 2) { + return 0; + } + + T area = 0; + for (int i = 1; i < m - 1; i++) { + area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0])); + } + + return area / 2.0; +} + +template +HOST_DEVICE_INLINE T rotated_boxes_intersection( + const RotatedBox& box1, + const RotatedBox& box2) { + // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned + // from rotated_rect_intersection_pts + Point intersectPts[24], orderedPts[24]; + + Point pts1[4]; + Point pts2[4]; + get_rotated_vertices(box1, pts1); + get_rotated_vertices(box2, pts2); + + int num = get_intersection_points(pts1, pts2, intersectPts); + + if (num <= 2) { + return 0.0; + } + + // Convex Hull to order the intersection points in clockwise order and find + // the countour area. + int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true); + return polygon_area(orderedPts, num_convex); +} + +template +HOST_DEVICE_INLINE T +single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) { + // shift center to the middle point to achieve higher precision in result + RotatedBox box1, box2; + auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0; + auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0; + box1.x_ctr = box1_raw[0] - center_shift_x; + box1.y_ctr = box1_raw[1] - center_shift_y; + box1.w = box1_raw[2]; + box1.h = box1_raw[3]; + box1.a = box1_raw[4]; + box2.x_ctr = box2_raw[0] - center_shift_x; + box2.y_ctr = box2_raw[1] - center_shift_y; + box2.w = box2_raw[2]; + box2.h = box2_raw[3]; + box2.a = box2_raw[4]; + + const T area1 = box1.w * box1.h; + const T area2 = box2.w * box2.h; + if (area1 < 1e-14 || area2 < 1e-14) { + return 0.f; + } + + const T intersection = rotated_boxes_intersection(box1, box2); + const T iou = intersection / (area1 + area2 - intersection); + return iou; +} + +} // namespace + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/deformable/deform_conv.h b/detectron2/layers/csrc/deformable/deform_conv.h new file mode 100644 index 0000000000..43928b7805 --- /dev/null +++ b/detectron2/layers/csrc/deformable/deform_conv.h @@ -0,0 +1,373 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +#ifdef WITH_CUDA +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step); + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step); + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias); + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias); + +#endif + +inline int deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + AT_CHECK(weight.type().is_cuda(), "weight tensor is not on GPU!"); + AT_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_forward_cuda( + input, + weight, + offset, + output, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline int deform_conv_backward_input( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + if (gradOutput.type().is_cuda()) { +#ifdef WITH_CUDA + AT_CHECK(input.type().is_cuda(), "input tensor is not on GPU!"); + AT_CHECK(weight.type().is_cuda(), "weight tensor is not on GPU!"); + AT_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_input_cuda( + input, + offset, + gradOutput, + gradInput, + gradOffset, + weight, + columns, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline int deform_conv_backward_filter( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + if (gradOutput.type().is_cuda()) { +#ifdef WITH_CUDA + AT_CHECK(input.type().is_cuda(), "input tensor is not on GPU!"); + AT_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!"); + return deform_conv_backward_parameters_cuda( + input, + offset, + gradOutput, + gradWeight, + columns, + ones, + kW, + kH, + dW, + dH, + padW, + padH, + dilationW, + dilationH, + group, + deformable_group, + scale, + im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline void modulated_deform_conv_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + AT_CHECK(weight.type().is_cuda(), "weight tensor is not on GPU!"); + AT_CHECK(bias.type().is_cuda(), "bias tensor is not on GPU!"); + AT_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_forward( + input, + weight, + bias, + ones, + offset, + mask, + output, + columns, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +inline void modulated_deform_conv_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + if (grad_output.type().is_cuda()) { +#ifdef WITH_CUDA + AT_CHECK(input.type().is_cuda(), "input tensor is not on GPU!"); + AT_CHECK(weight.type().is_cuda(), "weight tensor is not on GPU!"); + AT_CHECK(bias.type().is_cuda(), "bias tensor is not on GPU!"); + AT_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!"); + return modulated_deform_conv_cuda_backward( + input, + weight, + bias, + ones, + offset, + mask, + columns, + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + kernel_h, + kernel_w, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + group, + deformable_group, + with_bias); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} diff --git a/detectron2/layers/csrc/deformable/deform_conv_cuda.cu b/detectron2/layers/csrc/deformable/deform_conv_cuda.cu new file mode 100644 index 0000000000..0f4e637c1a --- /dev/null +++ b/detectron2/layers/csrc/deformable/deform_conv_cuda.cu @@ -0,0 +1,1126 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp +// Original license: Apache 2.0 + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c +// Original license: Apache 2.0 + +#include + +#include "deform_conv.h" + +#include +#include + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col); + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im); + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset); + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col); + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im); + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask); + +void shape_check( + at::Tensor input, + at::Tensor offset, + at::Tensor* gradOutput, + at::Tensor weight, + int kH, + int kW, + int dH, + int dW, + int padH, + int padW, + int dilationH, + int dilationW, + int group, + int deformable_group) { + AT_CHECK( + weight.ndimension() == 4, + "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " + "but got: %s", + weight.ndimension()); + + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + AT_CHECK( + kW > 0 && kH > 0, + "kernel size should be greater than zero, but got kH: %d kW: %d", + kH, + kW); + + AT_CHECK( + (weight.size(2) == kH && weight.size(3) == kW), + "kernel size should be consistent with weight, ", + "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", + kH, + kW, + weight.size(2), + weight.size(3)); + + AT_CHECK( + dW > 0 && dH > 0, + "stride should be greater than zero, but got dH: %d dW: %d", + dH, + dW); + + AT_CHECK( + dilationW > 0 && dilationH > 0, + "dilation should be greater than 0, but got dilationH: %d dilationW: %d", + dilationH, + dilationW); + + int ndim = input.ndimension(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + AT_CHECK( + ndim == 3 || ndim == 4, + "3D or 4D input tensor expected but got: %s", + ndim); + + long nInputPlane = weight.size(1) * group; + long inputHeight = input.size(dimh); + long inputWidth = input.size(dimw); + long nOutputPlane = weight.size(0); + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + + AT_CHECK( + nInputPlane % deformable_group == 0, + "input channels must divide deformable group size"); + + if (outputWidth < 1 || outputHeight < 1) + AT_ERROR( + "Given input size: (%ld x %ld x %ld). " + "Calculated output size: (%ld x %ld x %ld). Output size is too small", + nInputPlane, + inputHeight, + inputWidth, + nOutputPlane, + outputHeight, + outputWidth); + + AT_CHECK( + input.size(1) == nInputPlane, + "invalid number of input planes, expected: %d, but got: %d", + nInputPlane, + input.size(1)); + + AT_CHECK( + (inputHeight >= kH && inputWidth >= kW), + "input image is smaller than kernel"); + + AT_CHECK( + (offset.size(2) == outputHeight && offset.size(3) == outputWidth), + "invalid spatial size of offset, expected height: %d width: %d, but " + "got height: %d width: %d", + outputHeight, + outputWidth, + offset.size(2), + offset.size(3)); + + AT_CHECK( + (offset.size(1) == deformable_group * 2 * kH * kW), + "invalid number of channels of offset"); + + if (gradOutput != NULL) { + AT_CHECK( + gradOutput->size(dimf) == nOutputPlane, + "invalid number of gradOutput planes, expected: %d, but got: %d", + nOutputPlane, + gradOutput->size(dimf)); + + AT_CHECK( + (gradOutput->size(dimh) == outputHeight && + gradOutput->size(dimw) == outputWidth), + "invalid size of gradOutput, expected height: %d width: %d , but " + "got height: %d width: %d", + outputHeight, + outputWidth, + gradOutput->size(dimh), + gradOutput->size(dimw)); + } +} + +int deform_conv_forward_cuda( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor output, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + // todo: resize columns to include im2col: done + // todo: add im2col_step as input + // todo: add new output buffer and transpose it to output (or directly + // transpose output) todo: possibly change data indexing because of + // parallel_imgs + + shape_check( + input, + offset, + NULL, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input.unsqueeze_(0); + offset.unsqueeze_(0); + } + + // todo: assert batchsize dividable by im2col_step + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + output = output.view({batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < outputHeight * outputWidth) { + ones = at::ones({outputHeight, outputWidth}, input.options()); + } + + input = input.view({batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view({batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + at::Tensor output_buffer = at::zeros( + {batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}, + output.options()); + + output_buffer = output_buffer.view({output_buffer.size(0), + group, + output_buffer.size(1) / group, + output_buffer.size(2), + output_buffer.size(3)}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + output_buffer[elt][g] = output_buffer[elt][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output_buffer[elt][g]); + } + } + + output_buffer = + output_buffer.view({output_buffer.size(0), + output_buffer.size(1) * output_buffer.size(2), + output_buffer.size(3), + output_buffer.size(4)}); + + output_buffer = output_buffer.view({batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + output_buffer.transpose_(1, 2); + output.copy_(output_buffer); + output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + output = output.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_input_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradInput, + at::Tensor gradOffset, + at::Tensor weight, + at::Tensor columns, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + int im2col_step) { + shape_check( + input, + offset, + &gradOutput, + weight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + weight = weight.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view({1, input.size(0), input.size(1), input.size(2)}); + offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)}); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = weight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset"); + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + // change order of grad output + gradOutput = gradOutput.view({batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + gradInput = gradInput.view({batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + input = input.view({batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + gradOffset = gradOffset.view({batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + offset = offset.view({batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + // divide into groups + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + gradOutput = gradOutput.view({gradOutput.size(0), + group, + gradOutput.size(1) / group, + gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4)}); + + for (int g = 0; g < group; g++) { + columns[g] = columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + gradOutput[elt][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradOutput = gradOutput.view({gradOutput.size(0), + gradOutput.size(1) * gradOutput.size(2), + gradOutput.size(3), + gradOutput.size(4), + gradOutput.size(5)}); + + deformable_col2im_coord( + columns, + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradOffset[elt]); + + deformable_col2im( + columns, + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + gradInput[elt]); + } + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth}); + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + gradOffset = gradOffset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth}); + offset = offset.view({offset.size(1), offset.size(2), offset.size(3)}); + gradOffset = + gradOffset.view({offset.size(1), offset.size(2), offset.size(3)}); + } + + return 1; +} + +int deform_conv_backward_parameters_cuda( + at::Tensor input, + at::Tensor offset, + at::Tensor gradOutput, + at::Tensor gradWeight, // at::Tensor gradBias, + at::Tensor columns, + at::Tensor ones, + int kW, + int kH, + int dW, + int dH, + int padW, + int padH, + int dilationW, + int dilationH, + int group, + int deformable_group, + float scale, + int im2col_step) { + // todo: transpose and reshape outGrad + // todo: reshape columns + // todo: add im2col_step as input + + shape_check( + input, + offset, + &gradOutput, + gradWeight, + kH, + kW, + dH, + dW, + padH, + padW, + dilationH, + dilationW, + group, + deformable_group); + + input = input.contiguous(); + offset = offset.contiguous(); + gradOutput = gradOutput.contiguous(); + + int batch = 1; + + if (input.ndimension() == 3) { + // Force batch + batch = 0; + input = input.view( + at::IntList({1, input.size(0), input.size(1), input.size(2)})); + gradOutput = gradOutput.view( + {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)}); + } + + long batchSize = input.size(0); + long nInputPlane = input.size(1); + long inputHeight = input.size(2); + long inputWidth = input.size(3); + + long nOutputPlane = gradWeight.size(0); + + long outputWidth = + (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1; + long outputHeight = + (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1; + + AT_CHECK((offset.size(0) == batchSize), "invalid batch size of offset"); + + columns = at::zeros( + {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth}, + input.options()); + + gradOutput = gradOutput.view({batchSize / im2col_step, + im2col_step, + nOutputPlane, + outputHeight, + outputWidth}); + gradOutput.transpose_(1, 2); + + at::Tensor gradOutputBuffer = at::zeros_like(gradOutput); + gradOutputBuffer = gradOutputBuffer.view({batchSize / im2col_step, + nOutputPlane, + im2col_step, + outputHeight, + outputWidth}); + gradOutputBuffer.copy_(gradOutput); + gradOutputBuffer = gradOutputBuffer.view({batchSize / im2col_step, + nOutputPlane, + im2col_step * outputHeight, + outputWidth}); + + gradOutput.transpose_(1, 2); + gradOutput = + gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth}); + + input = input.view({batchSize / im2col_step, + im2col_step, + nInputPlane, + inputHeight, + inputWidth}); + offset = offset.view({batchSize / im2col_step, + im2col_step, + deformable_group * 2 * kH * kW, + outputHeight, + outputWidth}); + + for (int elt = 0; elt < batchSize / im2col_step; elt++) { + deformable_im2col( + input[elt], + offset[elt], + nInputPlane, + inputHeight, + inputWidth, + kH, + kW, + padH, + padW, + dH, + dW, + dilationH, + dilationW, + im2col_step, + deformable_group, + columns); + + // divide into group + gradOutputBuffer = gradOutputBuffer.view({gradOutputBuffer.size(0), + group, + gradOutputBuffer.size(1) / group, + gradOutputBuffer.size(2), + gradOutputBuffer.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + gradWeight = gradWeight.view({group, + gradWeight.size(0) / group, + gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3)}); + + for (int g = 0; g < group; g++) { + gradWeight[g] = gradWeight[g] + .flatten(1) + .addmm_( + gradOutputBuffer[elt][g].flatten(1), + columns[g].transpose(1, 0), + 1.0, + scale) + .view_as(gradWeight[g]); + } + gradOutputBuffer = gradOutputBuffer.view( + {gradOutputBuffer.size(0), + gradOutputBuffer.size(1) * gradOutputBuffer.size(2), + gradOutputBuffer.size(3), + gradOutputBuffer.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1), + gradWeight.size(2), + gradWeight.size(3), + gradWeight.size(4)}); + } + + input = input.view({batchSize, nInputPlane, inputHeight, inputWidth}); + offset = offset.view( + {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth}); + + if (batch == 0) { + gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth}); + input = input.view({nInputPlane, inputHeight, inputWidth}); + } + + return 1; +} + +void modulated_deform_conv_cuda_forward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor output, + at::Tensor columns, + int kernel_h, + int kernel_w, + const int stride_h, + const int stride_w, + const int pad_h, + const int pad_w, + const int dilation_h, + const int dilation_w, + const int group, + const int deformable_group, + const bool with_bias) { + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_out = weight.size(0); + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + // resize output + output = output.view({batch, channels_out, height_out, width_out}).zero_(); + // resize temporary columns + columns = at::zeros( + {channels * kernel_h * kernel_w, 1 * height_out * width_out}, + input.options()); + + output = output.view({output.size(0), + group, + output.size(1) / group, + output.size(2), + output.size(3)}); + + for (int b = 0; b < batch; b++) { + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + // divide into group + weight = weight.view({group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + + for (int g = 0; g < group; g++) { + output[b][g] = output[b][g] + .flatten(1) + .addmm_(weight[g].flatten(1), columns[g]) + .view_as(output[b][g]); + } + + weight = weight.view({weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + output = output.view({output.size(0), + output.size(1) * output.size(2), + output.size(3), + output.size(4)}); + + if (with_bias) { + output += bias.view({1, bias.size(0), 1, 1}); + } +} + +void modulated_deform_conv_cuda_backward( + at::Tensor input, + at::Tensor weight, + at::Tensor bias, + at::Tensor ones, + at::Tensor offset, + at::Tensor mask, + at::Tensor columns, + at::Tensor grad_input, + at::Tensor grad_weight, + at::Tensor grad_bias, + at::Tensor grad_offset, + at::Tensor grad_mask, + at::Tensor grad_output, + int kernel_h, + int kernel_w, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int group, + int deformable_group, + const bool with_bias) { + AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); + AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); + + const int batch = input.size(0); + const int channels = input.size(1); + const int height = input.size(2); + const int width = input.size(3); + + const int channels_kernel = weight.size(1); + const int kernel_h_ = weight.size(2); + const int kernel_w_ = weight.size(3); + if (kernel_h_ != kernel_h || kernel_w_ != kernel_w) + AT_ERROR( + "Input shape and kernel shape wont match: (%d x %d vs %d x %d).", + kernel_h_, + kernel_w, + kernel_h_, + kernel_w_); + if (channels != channels_kernel * group) + AT_ERROR( + "Input shape and kernel channels wont match: (%d vs %d).", + channels, + channels_kernel * group); + + const int height_out = + (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; + const int width_out = + (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; + + if (ones.ndimension() != 2 || + ones.size(0) * ones.size(1) < height_out * width_out) { + // Resize plane and fill with ones... + ones = at::ones({height_out, width_out}, input.options()); + } + + grad_input = grad_input.view({batch, channels, height, width}); + columns = at::zeros( + {channels * kernel_h * kernel_w, height_out * width_out}, + input.options()); + + grad_output = grad_output.view({grad_output.size(0), + group, + grad_output.size(1) / group, + grad_output.size(2), + grad_output.size(3)}); + + for (int b = 0; b < batch; b++) { + // divide int group + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + weight = weight.view({group, + weight.size(0) / group, + weight.size(1), + weight.size(2), + weight.size(3)}); + + for (int g = 0; g < group; g++) { + columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), + grad_output[b][g].flatten(1), + 0.0f, + 1.0f); + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + weight = weight.view({weight.size(0) * weight.size(1), + weight.size(2), + weight.size(3), + weight.size(4)}); + + // gradient w.r.t. input coordinate data + modulated_deformable_col2im_coord_cuda( + columns, + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_offset[b], + grad_mask[b]); + // gradient w.r.t. input data + modulated_deformable_col2im_cuda( + columns, + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + grad_input[b]); + + // gradient w.r.t. weight, dWeight should accumulate across the batch and + // group + modulated_deformable_im2col_cuda( + input[b], + offset[b], + mask[b], + 1, + channels, + height, + width, + height_out, + width_out, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + deformable_group, + columns); + + columns = columns.view({group, columns.size(0) / group, columns.size(1)}); + grad_weight = grad_weight.view({group, + grad_weight.size(0) / group, + grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3)}); + if (with_bias) + grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); + + for (int g = 0; g < group; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) + .view_as(grad_weight[g]); + if (with_bias) { + grad_bias[g] = + grad_bias[g] + .view({-1, 1}) + .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) + .view(-1); + } + } + + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3), + grad_weight.size(4)}); + if (with_bias) + grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); + } + grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), + grad_output.size(2), + grad_output.size(3), + grad_output.size(4)}); +} diff --git a/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu b/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu new file mode 100644 index 0000000000..ff436bee6a --- /dev/null +++ b/detectron2/layers/csrc/deformable/deform_conv_cuda_kernel.cu @@ -0,0 +1,1268 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu +// Original license: Apache 2.0 +// clang-format off + +// modify from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +#include +#include +#include +#include +#include +#include + +using namespace at; + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +const int kMaxGridNum = 65535; + +inline int GET_BLOCKS(const int N) { + return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); +} + +template +__device__ scalar_t deformable_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const scalar_t map_h = i * dilation_h + offset_h; + // const scalar_t map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = deformable_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val; + data_col_ptr += batch_size * height_col * width_col; + } + } + } +} + +void deformable_im2col( + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + // todo: check parallel_imgs is correctly passed in + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = channels * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.type(), "deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + +template +__global__ void deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +void deformable_col2im( + const at::Tensor data_col, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_im) { + // todo: make sure parallel_imgs is passed in correctly + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; + int channel_per_deformable_group = channels / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); + } +} + +template +__global__ void deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } + const scalar_t weight = get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + +void deformable_col2im_coord( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const int channels, + const int height, + const int width, + const int ksize_h, + const int ksize_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int parallel_imgs, + const int deformable_group, + at::Tensor grad_offset) { + int height_col = + (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; + int width_col = + (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; + int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * + deformable_group * parallel_imgs; + int channel_per_deformable_group = + channels * ksize_h * ksize_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + + deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + channels, + height, + width, + ksize_h, + ksize_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + parallel_imgs, + 2 * ksize_h * ksize_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_); + })); +} + +template +__device__ scalar_t dmcn_im2col_bilinear( + const scalar_t* bottom_data, + const int data_width, + const int height, + const int width, + scalar_t h, + scalar_t w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = bottom_data[h_low * data_width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = bottom_data[h_low * data_width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = bottom_data[h_high * data_width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = bottom_data[h_high * data_width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ scalar_t dmcn_get_gradient_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + if (h == argmax_h_low && w == argmax_w_low) + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + if (h == argmax_h_low && w == argmax_w_high) + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + if (h == argmax_h_high && w == argmax_w_low) + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + if (h == argmax_h_high && w == argmax_w_high) + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + return weight; +} + +template +__device__ scalar_t dmcn_get_coordinate_weight( + scalar_t argmax_h, + scalar_t argmax_w, + const int height, + const int width, + const scalar_t* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || + argmax_w >= width) { + // empty + return 0; + } + + int argmax_h_low = floor(argmax_h); + int argmax_w_low = floor(argmax_w); + int argmax_h_high = argmax_h_low + 1; + int argmax_w_high = argmax_w_low + 1; + + scalar_t weight = 0; + + if (bp_dir == 0) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + if (argmax_h_low >= 0 && argmax_w_low >= 0) + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + if (argmax_h_low >= 0 && argmax_w_high <= width - 1) + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + if (argmax_h_high <= height - 1 && argmax_w_low >= 0) + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +template +__global__ void modulated_deformable_im2col_gpu_kernel( + const int n, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int num_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* data_col) { + CUDA_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int b_col = (index / width_col / height_col) % batch_size; + const int c_im = (index / width_col / height_col) / batch_size; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + + scalar_t* data_col_ptr = data_col + + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; + // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * + // height + h_in) * width + w_in; + const scalar_t* data_im_ptr = + data_im + (b_col * num_channels + c_im) * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + + const scalar_t* data_mask_ptr = data_mask + + (b_col * deformable_group + deformable_group_index) * kernel_h * + kernel_w * height_col * width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t val = static_cast(0); + const scalar_t h_im = h_in + i * dilation_h + offset_h; + const scalar_t w_im = w_in + j * dilation_w + offset_w; + // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { + // const float map_h = i * dilation_h + offset_h; + // const float map_w = j * dilation_w + offset_w; + // const int cur_height = height - h_in; + // const int cur_width = width - w_in; + // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, + // cur_width, map_h, map_w); + val = dmcn_im2col_bilinear( + data_im_ptr, width, height, width, h_im, w_im); + } + *data_col_ptr = val * mask; + data_col_ptr += batch_size * height_col * width_col; + // data_col_ptr += height_col * width_col; + } + } + } +} + +template +__global__ void modulated_deformable_col2im_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_im) { + CUDA_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col / batch_size) % kernel_w; + const int i = + (index / width_col / height_col / batch_size / kernel_w) % kernel_h; + const int c = + index / width_col / height_col / batch_size / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int b = (index / width_col / height_col) % batch_size; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const int data_mask_hw_ptr = + ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; + const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const scalar_t cur_top_grad = data_col[index] * mask; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && + abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; + scalar_t weight = dmcn_get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +template +__global__ void modulated_deformable_col2im_coord_gpu_kernel( + const int n, + const scalar_t* data_col, + const scalar_t* data_im, + const scalar_t* data_offset, + const scalar_t* data_mask, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int batch_size, + const int offset_channels, + const int deformable_group, + const int height_col, + const int width_col, + scalar_t* grad_offset, + scalar_t* grad_mask) { + CUDA_KERNEL_LOOP(index, n) { + scalar_t val = 0, mval = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = (index / width_col / height_col) % offset_channels; + int b = (index / width_col / height_col) / offset_channels; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const scalar_t* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * batch_size * + width_col * height_col; + const scalar_t* data_im_ptr = data_im + + (b * deformable_group + deformable_group_index) * + channel_per_deformable_group / kernel_h / kernel_w * height * width; + const scalar_t* data_offset_ptr = data_offset + + (b * deformable_group + deformable_group_index) * 2 * kernel_h * + kernel_w * height_col * width_col; + const scalar_t* data_mask_ptr = data_mask + + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * + height_col * width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = + (((col_c * batch_size + b) * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col / batch_size) % kernel_w; + int i = + (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const int data_mask_hw_ptr = + (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); + const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; + const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; + const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; + scalar_t inv_h = h_in + i * dilation_h + offset_h; + scalar_t inv_w = w_in + j * dilation_w + offset_w; + if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -2; + } else { + mval += data_col_ptr[col_pos] * + dmcn_im2col_bilinear( + data_im_ptr + cnt * height * width, + width, + height, + width, + inv_h, + inv_w); + } + const scalar_t weight = dmcn_get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos] * mask; + cnt += 1; + } + // KERNEL_ASSIGN(grad_offset[index], offset_req, val); + grad_offset[index] = val; + if (offset_c % 2 == 0) + // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * + // height_col + h) * width_col + w], mask_req, mval); + grad_mask + [(((b * deformable_group + deformable_group_index) * kernel_h * + kernel_w + + offset_c / 2) * + height_col + + h) * + width_col + + w] = mval; + } +} + +void modulated_deformable_im2col_cuda( + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kenerl_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor data_col) { + // num_axes should be smaller than block size + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = channels * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_im.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_im.type(), "modulated_deformable_im2col_gpu", ([&] { + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* data_col_ = data_col.data_ptr(); + + modulated_deformable_im2col_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_im_, + data_offset_, + data_mask_, + height_im, + width_im, + kernel_h, + kenerl_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + channels, + deformable_group, + height_col, + width_col, + data_col_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_im2col_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_cuda( + const at::Tensor data_col, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_im) { + const int channel_per_deformable_group = channels / deformable_group; + const int num_kernels = + channels * kernel_h * kernel_w * batch_size * height_col * width_col; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "modulated_deformable_col2im_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_im_ = grad_im.data_ptr(); + + modulated_deformable_col2im_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_h, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + deformable_group, + height_col, + width_col, + grad_im_); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_cuda: %s\n", + cudaGetErrorString(err)); + } +} + +void modulated_deformable_col2im_coord_cuda( + const at::Tensor data_col, + const at::Tensor data_im, + const at::Tensor data_offset, + const at::Tensor data_mask, + const int batch_size, + const int channels, + const int height_im, + const int width_im, + const int height_col, + const int width_col, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int deformable_group, + at::Tensor grad_offset, + at::Tensor grad_mask) { + const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * + kernel_w * deformable_group; + const int channel_per_deformable_group = + channels * kernel_h * kernel_w / deformable_group; + + at::cuda::CUDAGuard device_guard(data_col.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] { + const scalar_t* data_col_ = data_col.data_ptr(); + const scalar_t* data_im_ = data_im.data_ptr(); + const scalar_t* data_offset_ = data_offset.data_ptr(); + const scalar_t* data_mask_ = data_mask.data_ptr(); + scalar_t* grad_offset_ = grad_offset.data_ptr(); + scalar_t* grad_mask_ = grad_mask.data_ptr(); + + modulated_deformable_col2im_coord_gpu_kernel<<< + GET_BLOCKS(num_kernels), + CUDA_NUM_THREADS, + 0, + stream>>>( + num_kernels, + data_col_, + data_im_, + data_offset_, + data_mask_, + channels, + height_im, + width_im, + kernel_h, + kernel_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + channel_per_deformable_group, + batch_size, + 2 * kernel_h * kernel_w * deformable_group, + deformable_group, + height_col, + width_col, + grad_offset_, + grad_mask_); + })); + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in modulated_deformable_col2im_coord_cuda: %s\n", + cudaGetErrorString(err)); + } +} diff --git a/detectron2/layers/csrc/nms_rotated/nms_rotated.h b/detectron2/layers/csrc/nms_rotated/nms_rotated.h new file mode 100644 index 0000000000..ebdfed5133 --- /dev/null +++ b/detectron2/layers/csrc/nms_rotated/nms_rotated.h @@ -0,0 +1,38 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#pragma once +#include + +namespace detectron2 { + +at::Tensor nms_rotated_cpu( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold); + +#ifdef WITH_CUDA +at::Tensor nms_rotated_cuda( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold); +#endif + +// Interface for Python +// inline is needed to prevent multiple function definitions when this header is +// included by different cpps +inline at::Tensor nms_rotated( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold) { + assert(dets.device().is_cuda() == scores.device().is_cuda()); + if (dets.device().is_cuda()) { +#ifdef WITH_CUDA + return nms_rotated_cuda(dets, scores, iou_threshold); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + + return nms_rotated_cpu(dets, scores, iou_threshold); +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp b/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp new file mode 100644 index 0000000000..2850d4ad76 --- /dev/null +++ b/detectron2/layers/csrc/nms_rotated/nms_rotated_cpu.cpp @@ -0,0 +1,73 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include "../box_iou_rotated/box_iou_rotated_utils.h" +#include "nms_rotated.h" + +namespace detectron2 { + +template +at::Tensor nms_rotated_cpu_kernel( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold) { + // nms_rotated_cpu_kernel is modified from torchvision's nms_cpu_kernel, + // however, the code in this function is much shorter because + // we delegate the IoU computation for rotated boxes to + // the single_box_iou_rotated function in box_iou_rotated_utils.h + AT_ASSERTM(!dets.type().is_cuda(), "dets must be a CPU tensor"); + AT_ASSERTM(!scores.type().is_cuda(), "scores must be a CPU tensor"); + AT_ASSERTM( + dets.type() == scores.type(), "dets should have the same type as scores"); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong)); + } + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); + at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); + + auto suppressed = suppressed_t.data_ptr(); + auto keep = keep_t.data_ptr(); + auto order = order_t.data_ptr(); + + int64_t num_to_keep = 0; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) { + continue; + } + + keep[num_to_keep++] = i; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) { + continue; + } + + auto ovr = single_box_iou_rotated( + dets[i].data_ptr(), dets[j].data_ptr()); + if (ovr >= iou_threshold) { + suppressed[j] = 1; + } + } + } + return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); +} + +at::Tensor nms_rotated_cpu( + const at::Tensor& dets, + const at::Tensor& scores, + const float iou_threshold) { + auto result = at::empty({0}, dets.options()); + + AT_DISPATCH_FLOATING_TYPES(dets.type(), "nms_rotated", [&] { + result = nms_rotated_cpu_kernel(dets, scores, iou_threshold); + }); + return result; +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu b/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu new file mode 100644 index 0000000000..9f0d85c72a --- /dev/null +++ b/detectron2/layers/csrc/nms_rotated/nms_rotated_cuda.cu @@ -0,0 +1,132 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include +#include +#include +#include +#include "../box_iou_rotated/box_iou_rotated_utils.h" + +namespace detectron2 { + +int const threadsPerBlock = sizeof(unsigned long long) * 8; + +template +__global__ void nms_rotated_cuda_kernel( + const int n_boxes, + const float iou_threshold, + const T* dev_boxes, + unsigned long long* dev_mask) { + // nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + // Compared to nms_cuda_kernel, where each box is represented with 4 values + // (x1, y1, x2, y2), each rotated box is represented with 5 values + // (x_center, y_center, width, height, angle_degrees) here. + __shared__ T block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const T* cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + // Instead of devIoU used by original horizontal nms, here + // we use the single_box_iou_rotated function from box_iou_rotated_utils.h + if (single_box_iou_rotated(cur_box, block_boxes + i * 5) > + iou_threshold) { + t |= 1ULL << i; + } + } + const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +at::Tensor nms_rotated_cuda( + const at::Tensor& dets, + const at::Tensor& scores, + float iou_threshold) { + // using scalar_t = float; + AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor"); + AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor"); + at::cuda::CUDAGuard device_guard(dets.device()); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + auto dets_sorted = dets.index_select(0, order_t); + + int dets_num = dets.size(0); + + const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock); + + at::Tensor mask = + at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); + + dim3 blocks(col_blocks, col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + dets_sorted.type(), "nms_rotated_kernel_cuda", [&] { + nms_rotated_cuda_kernel<<>>( + dets_num, + iou_threshold, + dets_sorted.data(), + (unsigned long long*)mask.data()); + }); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long* mask_host = (unsigned long long*)mask_cpu.data(); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep = + at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data(); + + int num_to_keep = 0; + for (int i = 0; i < dets_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long* p = mask_host + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + AT_CUDA_CHECK(cudaGetLastError()); + return order_t.index( + {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) + .to(order_t.device(), keep.scalar_type())}); +} + +} // namespace detectron2 diff --git a/detectron2/layers/csrc/vision.cpp b/detectron2/layers/csrc/vision.cpp new file mode 100644 index 0000000000..79b474a5ef --- /dev/null +++ b/detectron2/layers/csrc/vision.cpp @@ -0,0 +1,71 @@ +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +#include "ROIAlign/ROIAlign.h" +#include "ROIAlignRotated/ROIAlignRotated.h" +#include "box_iou_rotated/box_iou_rotated.h" +#include "deformable/deform_conv.h" +#include "nms_rotated/nms_rotated.h" + +namespace detectron2 { + +// similar to +// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp +std::string get_compiler_version() { + std::ostringstream ss; +#if defined(__GNUC__) +#ifndef __clang__ + { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } +#endif +#endif + +#if defined(__clang_major__) + { + ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." + << __clang_patchlevel__; + } +#endif + +#if defined(_MSC_VER) + { ss << "MSVC " << _MSC_FULL_VER; } +#endif + return ss.str(); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); + + m.def("box_iou_rotated", &box_iou_rotated, "IoU for rotated boxes"); + + m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); + m.def( + "deform_conv_backward_input", + &deform_conv_backward_input, + "deform_conv_backward_input"); + m.def( + "deform_conv_backward_filter", + &deform_conv_backward_filter, + "deform_conv_backward_filter"); + m.def( + "modulated_deform_conv_forward", + &modulated_deform_conv_forward, + "modulated_deform_conv_forward"); + m.def( + "modulated_deform_conv_backward", + &modulated_deform_conv_backward, + "modulated_deform_conv_backward"); + + m.def("nms_rotated", &nms_rotated, "NMS for rotated boxes"); + + m.def("roi_align_forward", &ROIAlign_forward, "ROIAlign_forward"); + m.def("roi_align_backward", &ROIAlign_backward, "ROIAlign_backward"); + + m.def( + "roi_align_rotated_forward", + &ROIAlignRotated_forward, + "Forward pass for Rotated ROI-Align Operator"); + m.def( + "roi_align_rotated_backward", + &ROIAlignRotated_backward, + "Backward pass for Rotated ROI-Align Operator"); +} + +} // namespace detectron2 diff --git a/detectron2/layers/deform_conv.py b/detectron2/layers/deform_conv.py new file mode 100644 index 0000000000..2a8f26cb28 --- /dev/null +++ b/detectron2/layers/deform_conv.py @@ -0,0 +1,494 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +from functools import lru_cache +import torch +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from detectron2 import _C + +from .wrappers import _NewEmptyTensorOp + + +class _DeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + weight, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + im2col_step=64, + ): + if input is not None and input.dim() != 4: + raise ValueError( + "Expected 4D tensor as input, got {}D tensor instead.".format(input.dim()) + ) + ctx.stride = _pair(stride) + ctx.padding = _pair(padding) + ctx.dilation = _pair(dilation) + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.im2col_step = im2col_step + + ctx.save_for_backward(input, offset, weight) + + output = input.new_empty( + _DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride) + ) + + ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones + + if not input.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + _C.deform_conv_forward( + input, + weight, + offset, + output, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + input, offset, weight = ctx.saved_tensors + + grad_input = grad_offset = grad_weight = None + + if not grad_output.is_cuda: + raise NotImplementedError + else: + cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) + assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" + + if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + _C.deform_conv_backward_input( + input, + offset, + grad_output, + grad_input, + grad_offset, + weight, + ctx.bufs_[0], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + cur_im2col_step, + ) + + if ctx.needs_input_grad[2]: + grad_weight = torch.zeros_like(weight) + _C.deform_conv_backward_filter( + input, + offset, + grad_output, + grad_weight, + ctx.bufs_[0], + ctx.bufs_[1], + weight.size(3), + weight.size(2), + ctx.stride[1], + ctx.stride[0], + ctx.padding[1], + ctx.padding[0], + ctx.dilation[1], + ctx.dilation[0], + ctx.groups, + ctx.deformable_groups, + 1, + cur_im2col_step, + ) + + return grad_input, grad_offset, grad_weight, None, None, None, None, None, None + + @staticmethod + def _output_size(input, weight, padding, dilation, stride): + channels = weight.size(0) + output_size = (input.size(0), channels) + for d in range(input.dim() - 2): + in_size = input.size(d + 2) + pad = padding[d] + kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 + stride_ = stride[d] + output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,) + if not all(map(lambda s: s > 0, output_size)): + raise ValueError( + "convolution input is too small (output would be {})".format( + "x".join(map(str, output_size)) + ) + ) + return output_size + + @staticmethod + @lru_cache(maxsize=128) + def _cal_im2col_step(input_size, default_size): + """ + Calculate proper im2col step size, which should be divisible by input_size and not larger + than prefer_size. Meanwhile the step size should be as large as possible to be more + efficient. So we choose the largest one among all divisors of input_size which are smaller + than prefer_size. + :param input_size: input batch size . + :param default_size: default preferred im2col step size. + :return: the largest proper step size. + """ + if input_size <= default_size: + return input_size + best_step = 1 + for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)): + if input_size % step == 0: + if input_size // step <= default_size: + return input_size // step + best_step = step + + return best_step + + +class _ModulatedDeformConv(Function): + @staticmethod + def forward( + ctx, + input, + offset, + mask, + weight, + bias=None, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + ): + ctx.stride = stride + ctx.padding = padding + ctx.dilation = dilation + ctx.groups = groups + ctx.deformable_groups = deformable_groups + ctx.with_bias = bias is not None + if not ctx.with_bias: + bias = input.new_empty(1) # fake tensor + if not input.is_cuda: + raise NotImplementedError + if ( + weight.requires_grad + or mask.requires_grad + or offset.requires_grad + or input.requires_grad + ): + ctx.save_for_backward(input, offset, mask, weight, bias) + output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight)) + ctx._bufs = [input.new_empty(0), input.new_empty(0)] + _C.modulated_deform_conv_forward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + output, + ctx._bufs[1], + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + if not grad_output.is_cuda: + raise NotImplementedError + input, offset, mask, weight, bias = ctx.saved_tensors + grad_input = torch.zeros_like(input) + grad_offset = torch.zeros_like(offset) + grad_mask = torch.zeros_like(mask) + grad_weight = torch.zeros_like(weight) + grad_bias = torch.zeros_like(bias) + _C.modulated_deform_conv_backward( + input, + weight, + bias, + ctx._bufs[0], + offset, + mask, + ctx._bufs[1], + grad_input, + grad_weight, + grad_bias, + grad_offset, + grad_mask, + grad_output, + weight.shape[2], + weight.shape[3], + ctx.stride, + ctx.stride, + ctx.padding, + ctx.padding, + ctx.dilation, + ctx.dilation, + ctx.groups, + ctx.deformable_groups, + ctx.with_bias, + ) + if not ctx.with_bias: + grad_bias = None + + return ( + grad_input, + grad_offset, + grad_mask, + grad_weight, + grad_bias, + None, + None, + None, + None, + None, + ) + + @staticmethod + def _infer_shape(ctx, input, weight): + n = input.size(0) + channels_out = weight.size(0) + height, width = input.shape[2:4] + kernel_h, kernel_w = weight.shape[2:4] + height_out = ( + height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1) + ) // ctx.stride + 1 + width_out = ( + width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1) + ) // ctx.stride + 1 + return n, channels_out, height_out, width_out + + +deform_conv = _DeformConv.apply +modulated_deform_conv = _ModulatedDeformConv.apply + + +class DeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=False, + norm=None, + activation=None, + ): + """ + Deformable convolution. + + Args: + similar to `Conv2D`. + + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(DeformConv, self).__init__() + + assert not bias + assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( + in_channels, groups + ) + assert ( + out_channels % groups == 0 + ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + self.deformable_groups = deformable_groups + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) + ) + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + + def forward(self, x, offset): + if x.numel() == 0: + # When input is empty, we want to return a empty tensor with "correct" shape, + # So that the following operations will not panic + # if they check for the shape of the tensor. + # This computes the height and width of the output tensor + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = deform_conv( + x, + offset, + self.weight, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=False" + return tmpstr + + +class ModulatedDeformConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + deformable_groups=1, + bias=True, + norm=None, + activation=None, + ): + """ + Modulated deformable convolution. + + Args: + similar to `Conv2D`. + + deformable_groups (int): number of groups used in deformable convolution. + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + """ + super(ModulatedDeformConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.deformable_groups = deformable_groups + self.with_bias = bias + self.norm = norm + self.activation = activation + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, x, offset, mask): + if x.numel() == 0: + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + return _NewEmptyTensorOp.apply(x, output_shape) + + x = modulated_deform_conv( + x, + offset, + mask, + self.weight, + self.bias, + self.stride, + self.padding, + self.dilation, + self.groups, + self.deformable_groups, + ) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", padding=" + str(self.padding) + tmpstr += ", dilation=" + str(self.dilation) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", deformable_groups=" + str(self.deformable_groups) + tmpstr += ", bias=" + str(self.with_bias) + return tmpstr diff --git a/detectron2/layers/mask_ops.py b/detectron2/layers/mask_ops.py new file mode 100644 index 0000000000..dfbb09e005 --- /dev/null +++ b/detectron2/layers/mask_ops.py @@ -0,0 +1,241 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import torch +from PIL import Image +from torch.nn import functional as F + +__all__ = ["paste_masks_in_image"] + + +BYTES_PER_FLOAT = 4 +# TODO: This memory limit may be too much or too little. It would be better to +# determine it based on available resources. +GPU_MEM_LIMIT = 1024 ** 3 # 1 GB memory limit + + +def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True): + """ + Args: + masks: N, 1, H, W + boxes: N, 4 + img_h, img_w (int): + skip_empty (bool): only paste masks within the region that + tightly bound all boxes, and returns the results this region only. + An important optimization for CPU. + + Returns: + if skip_empty == False, a mask of shape (N, img_h, img_w) + if skip_empty == True, a mask of shape (N, h', w'), and the slice + object for the corresponding region. + """ + # On GPU, paste all masks together (up to chunk size) + # by using the entire image to sample the masks + # Compared to pasting them one by one, + # this has more operations but is faster on COCO-scale dataset. + device = masks.device + if skip_empty: + x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to( + dtype=torch.int32 + ) + x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32) + y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32) + else: + x0_int, y0_int = 0, 0 + x1_int, y1_int = img_w, img_h + x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1 + + N = masks.shape[0] + + img_y = torch.arange(y0_int, y1_int, device=device, dtype=torch.float32) + 0.5 + img_x = torch.arange(x0_int, x1_int, device=device, dtype=torch.float32) + 0.5 + img_y = (img_y - y0) / (y1 - y0) * 2 - 1 + img_x = (img_x - x0) / (x1 - x0) * 2 - 1 + # img_x, img_y have shapes (N, w), (N, h) + + gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1)) + gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1)) + grid = torch.stack([gx, gy], dim=3) + + img_masks = F.grid_sample(masks.to(dtype=torch.float32), grid, align_corners=False) + + if skip_empty: + return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int)) + else: + return img_masks[:, 0], () + + +def paste_masks_in_image(masks, boxes, image_shape, threshold=0.5): + """ + Paste a set of masks that are of a fixed resolution (e.g., 28 x 28) into an image. + The location, height, and width for pasting each mask is determined by their + corresponding bounding boxes in boxes. + + Args: + masks (tensor): Tensor of shape (Bimg, Hmask, Wmask), where Bimg is the number of + detected object instances in the image and Hmask, Wmask are the mask width and mask + height of the predicted mask (e.g., Hmask = Wmask = 28). Values are in [0, 1]. + boxes (Boxes): A Boxes of length Bimg. boxes.tensor[i] and masks[i] correspond + to the same object instance. + image_shape (tuple): height, width + threshold (float): A threshold in [0, 1] for converting the (soft) masks to + binary masks. + + Returns: + img_masks (Tensor): A tensor of shape (Bimg, Himage, Wimage), where Bimg is the + number of detected object instances and Himage, Wimage are the image width + and height. img_masks[i] is a binary mask for object instance i. + """ + assert masks.shape[-1] == masks.shape[-2], "Only square mask predictions are supported" + N = len(masks) + if N == 0: + return masks.new_empty((0,) + image_shape, dtype=torch.uint8) + + boxes = boxes.tensor + device = boxes.device + assert len(boxes) == N, boxes.shape + + img_h, img_w = image_shape + + # The actual implementation split the input into chunks, + # and paste them chunk by chunk. + if device.type == "cpu": + # CPU is most efficient when they are pasted one by one with skip_empty=True + # so that it performs minimal number of operatins. + num_chunks = N + else: + # GPU benefits from parallelism for larger chunks, but may have memory issue + num_chunks = int(np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT)) + assert ( + num_chunks <= N + ), "Default GPU_MEM_LIMIT in mask_ops.py is too small; try increasing it" + chunks = torch.chunk(torch.arange(N, device=device), num_chunks) + + img_masks = torch.zeros( + N, img_h, img_w, device=device, dtype=torch.bool if threshold >= 0 else torch.uint8 + ) + for inds in chunks: + masks_chunk, spatial_inds = _do_paste_mask( + masks[inds, None, :, :], boxes[inds], img_h, img_w, skip_empty=device.type == "cpu" + ) + + if threshold >= 0: + masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool) + else: + # for visualization and debugging + masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8) + + img_masks[(inds,) + spatial_inds] = masks_chunk + return img_masks + + +# The below are the original paste function (from Detectron1) which has +# larger quantization error. +# It is faster on CPU, while the aligned one is faster on GPU thanks to grid_sample. + + +def paste_mask_in_image_old(mask, box, img_h, img_w, threshold): + """ + Paste a single mask in an image. + This is a per-box implementation of :func:`paste_masks_in_image`. + This function has larger quantization error due to incorrect pixel + modeling and is not used any more. + + Args: + mask (Tensor): A tensor of shape (Hmask, Wmask) storing the mask of a single + object instance. Values are in [0, 1]. + box (Tensor): A tensor of shape (4, ) storing the x0, y0, x1, y1 box corners + of the object instance. + img_h, img_w (int): Image height and width. + threshold (float): Mask binarization threshold in [0, 1]. + + Returns: + im_mask (Tensor): + The resized and binarized object mask pasted into the original + image plane (a tensor of shape (img_h, img_w)). + """ + # Conversion from continuous box coordinates to discrete pixel coordinates + # via truncation (cast to int32). This determines which pixels to paste the + # mask onto. + box = box.to(dtype=torch.int32) # Continuous to discrete coordinate conversion + # An example (1D) box with continuous coordinates (x0=0.7, x1=4.3) will map to + # a discrete coordinates (x0=0, x1=4). Note that box is mapped to 5 = x1 - x0 + 1 + # pixels (not x1 - x0 pixels). + samples_w = box[2] - box[0] + 1 # Number of pixel samples, *not* geometric width + samples_h = box[3] - box[1] + 1 # Number of pixel samples, *not* geometric height + + # Resample the mask from it's original grid to the new samples_w x samples_h grid + mask = Image.fromarray(mask.cpu().numpy()) + mask = mask.resize((samples_w, samples_h), resample=Image.BILINEAR) + mask = np.array(mask, copy=False) + + if threshold >= 0: + mask = np.array(mask > threshold, dtype=np.uint8) + mask = torch.from_numpy(mask) + else: + # for visualization and debugging, we also + # allow it to return an unmodified mask + mask = torch.from_numpy(mask * 255).to(torch.uint8) + + im_mask = torch.zeros((img_h, img_w), dtype=torch.uint8) + x_0 = max(box[0], 0) + x_1 = min(box[2] + 1, img_w) + y_0 = max(box[1], 0) + y_1 = min(box[3] + 1, img_h) + + im_mask[y_0:y_1, x_0:x_1] = mask[ + (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0]) + ] + return im_mask + + +# Our pixel modeling requires extrapolation for any continuous +# coordinate < 0.5 or > length - 0.5. When sampling pixels on the masks, +# we would like this extrapolation to be an interpolation between boundary values and zero, +# instead of using absolute zero or boundary values. +# Therefore `paste_mask_in_image_old` is often used with zero padding around the masks like this: +# masks, scale = pad_masks(masks[:, 0, :, :], 1) +# boxes = scale_boxes(boxes.tensor, scale) + + +def pad_masks(masks, padding): + """ + Args: + masks (tensor): A tensor of shape (B, M, M) representing B masks. + padding (int): Number of cells to pad on all sides. + + Returns: + The padded masks and the scale factor of the padding size / original size. + """ + B = masks.shape[0] + M = masks.shape[-1] + pad2 = 2 * padding + scale = float(M + pad2) / M + padded_masks = masks.new_zeros((B, M + pad2, M + pad2)) + padded_masks[:, padding:-padding, padding:-padding] = masks + return padded_masks, scale + + +def scale_boxes(boxes, scale): + """ + Args: + boxes (tensor): A tensor of shape (B, 4) representing B boxes with 4 + coords representing the corners x0, y0, x1, y1, + scale (float): The box scaling factor. + + Returns: + Scaled boxes. + """ + w_half = (boxes[:, 2] - boxes[:, 0]) * 0.5 + h_half = (boxes[:, 3] - boxes[:, 1]) * 0.5 + x_c = (boxes[:, 2] + boxes[:, 0]) * 0.5 + y_c = (boxes[:, 3] + boxes[:, 1]) * 0.5 + + w_half *= scale + h_half *= scale + + scaled_boxes = torch.zeros_like(boxes) + scaled_boxes[:, 0] = x_c - w_half + scaled_boxes[:, 2] = x_c + w_half + scaled_boxes[:, 1] = y_c - h_half + scaled_boxes[:, 3] = y_c + h_half + return scaled_boxes diff --git a/detectron2/layers/nms.py b/detectron2/layers/nms.py new file mode 100644 index 0000000000..e29435e77b --- /dev/null +++ b/detectron2/layers/nms.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import torch +from torchvision.ops import boxes as box_ops +from torchvision.ops import nms # BC-compat + + +def batched_nms(boxes, scores, idxs, iou_threshold): + """ + Same as torchvision.ops.boxes.batched_nms, but safer. + """ + assert boxes.shape[-1] == 4 + # TODO may need better strategy. + # Investigate after having a fully-cuda NMS op. + if len(boxes) < 40000: + return box_ops.batched_nms(boxes, scores, idxs, iou_threshold) + + result_mask = scores.new_zeros(scores.size(), dtype=torch.bool) + for id in torch.unique(idxs).cpu().tolist(): + mask = (idxs == id).nonzero().view(-1) + keep = nms(boxes[mask], scores[mask], iou_threshold) + result_mask[mask[keep]] = True + keep = result_mask.nonzero().view(-1) + keep = keep[scores[keep].argsort(descending=True)] + return keep + + +# Note: this function (nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future +def nms_rotated(boxes, scores, iou_threshold): + """ + Performs non-maximum suppression (NMS) on the rotated boxes according + to their intersection-over-union (IoU). + + Rotated NMS iteratively removes lower scoring rotated boxes which have an + IoU greater than iou_threshold with another (higher scoring) rotated box. + + Note that RotatedBox (5, 3, 4, 2, -90) covers exactly the same region as + RotatedBox (5, 3, 4, 2, 90) does, and their IoU will be 1. However, they + can be representing completely different objects in certain tasks, e.g., OCR. + + As for the question of whether rotated-NMS should treat them as faraway boxes + even though their IOU is 1, it depends on the application and/or ground truth annotation. + + As an extreme example, consider a single character v and the square box around it. + + If the angle is 0 degree, the object (text) would be read as 'v'; + + If the angle is 90 degrees, the object (text) would become '>'; + + If the angle is 180 degrees, the object (text) would become '^'; + + If the angle is 270/-90 degrees, the object (text) would become '<' + + All of these cases have IoU of 1 to each other, and rotated NMS that only + uses IoU as criterion would only keep one of them with the highest score - + which, practically, still makes sense in most cases because typically + only one of theses orientations is the correct one. Also, it does not matter + as much if the box is only used to classify the object (instead of transcribing + them with a sequential OCR recognition model) later. + + On the other hand, when we use IoU to filter proposals that are close to the + ground truth during training, we should definitely take the angle into account if + we know the ground truth is labeled with the strictly correct orientation (as in, + upside-down words are annotated with -180 degrees even though they can be covered + with a 0/90/-90 degree box, etc.) + + The way the original dataset is annotated also matters. For example, if the dataset + is a 4-point polygon dataset that does not enforce ordering of vertices/orientation, + we can estimate a minimum rotated bounding box to this polygon, but there's no way + we can tell the correct angle with 100% confidence (as shown above, there could be 4 different + rotated boxes, with angles differed by 90 degrees to each other, covering the exactly + same region). In that case we have to just use IoU to determine the box + proximity (as many detection benchmarks (even for text) do) unless there're other + assumptions we can make (like width is always larger than height, or the object is not + rotated by more than 90 degrees CCW/CW, etc.) + + In summary, not considering angles in rotated NMS seems to be a good option for now, + but we should be aware of its implications. + + Args: + boxes (Tensor[N, 5]): Rotated boxes to perform NMS on. They are expected to be in + (x_center, y_center, width, height, angle_degrees) format. + scores (Tensor[N]): Scores for each one of the rotated boxes + iou_threshold (float): Discards all overlapping rotated boxes with IoU < iou_threshold + + Returns: + keep (Tensor): int64 tensor with the indices of the elements that have been kept + by Rotated NMS, sorted in decreasing order of scores + """ + from detectron2 import _C + + return _C.nms_rotated(boxes, scores, iou_threshold) + + +# Note: this function (batched_nms_rotated) might be moved into +# torchvision/ops/boxes.py in the future +def batched_nms_rotated(boxes, scores, idxs, iou_threshold): + """ + Performs non-maximum suppression in a batched fashion. + + Each index value correspond to a category, and NMS + will not be applied between elements of different categories. + + Args: + boxes (Tensor[N, 5]): + boxes where NMS will be performed. They + are expected to be in (x_ctr, y_ctr, width, height, angle_degrees) format + scores (Tensor[N]): + scores for each one of the boxes + idxs (Tensor[N]): + indices of the categories for each one of the boxes. + iou_threshold (float): + discards all overlapping boxes + with IoU < iou_threshold + + Returns: + Tensor: + int64 tensor with the indices of the elements that have been kept + by NMS, sorted in decreasing order of scores + """ + assert boxes.shape[-1] == 5 + + if boxes.numel() == 0: + return torch.empty((0,), dtype=torch.int64, device=boxes.device) + # Strategy: in order to perform NMS independently per class, + # we add an offset to all the boxes. The offset is dependent + # only on the class idx, and is large enough so that boxes + # from different classes do not overlap + + # Note that batched_nms in torchvision/ops/boxes.py only uses max_coordinate, + # which won't handle negative coordinates correctly. + # Here by using min_coordinate we can make sure the negative coordinates are + # correctly handled. + max_coordinate = ( + torch.max(boxes[:, 0], boxes[:, 1]) + torch.max(boxes[:, 2], boxes[:, 3]) / 2 + ).max() + min_coordinate = ( + torch.min(boxes[:, 0], boxes[:, 1]) - torch.min(boxes[:, 2], boxes[:, 3]) / 2 + ).min() + offsets = idxs.to(boxes) * (max_coordinate - min_coordinate + 1) + boxes_for_nms = boxes.clone() # avoid modifying the original values in boxes + boxes_for_nms[:, :2] += offsets[:, None] + keep = nms_rotated(boxes_for_nms, scores, iou_threshold) + return keep diff --git a/detectron2/layers/roi_align.py b/detectron2/layers/roi_align.py new file mode 100644 index 0000000000..328bbab2f7 --- /dev/null +++ b/detectron2/layers/roi_align.py @@ -0,0 +1,105 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from detectron2 import _C + + +class _ROIAlign(Function): + @staticmethod + def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio, aligned): + ctx.save_for_backward(roi) + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + ctx.input_shape = input.size() + ctx.aligned = aligned + output = _C.roi_align_forward( + input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + rois, = ctx.saved_tensors + output_size = ctx.output_size + spatial_scale = ctx.spatial_scale + sampling_ratio = ctx.sampling_ratio + bs, ch, h, w = ctx.input_shape + grad_input = _C.roi_align_backward( + grad_output, + rois, + spatial_scale, + output_size[0], + output_size[1], + bs, + ch, + h, + w, + sampling_ratio, + ctx.aligned, + ) + return grad_input, None, None, None, None, None + + +roi_align = _ROIAlign.apply + + +class ROIAlign(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + aligned (bool): if False, use the legacy implementation in + Detectron. If True, align the results more perfectly. + + Note: + The meaning of aligned=True: + + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). But the original + roi_align (aligned=False) does not subtract the 0.5 when computing neighboring + pixel indices and therefore it uses pixels with a slightly incorrect alignment + (relative to our pixel model) when performing bilinear interpolation. + + With `aligned=True`, + we first appropriately scale the ROI and then shift it by -0.5 + prior to calling roi_align. This produces the correct neighbors; see + detectron2/tests/test_roi_align.py for verification. + + The difference does not make a difference to the model's performance if + ROIAlign is used together with conv layers. + """ + super(ROIAlign, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. + """ + assert rois.dim() == 2 and rois.size(1) == 5 + return roi_align( + input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned + ) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ", aligned=" + str(self.aligned) + tmpstr += ")" + return tmpstr diff --git a/detectron2/layers/roi_align_rotated.py b/detectron2/layers/roi_align_rotated.py new file mode 100644 index 0000000000..57381a95c1 --- /dev/null +++ b/detectron2/layers/roi_align_rotated.py @@ -0,0 +1,88 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from torch import nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.nn.modules.utils import _pair + +from detectron2 import _C + + +class _ROIAlignRotated(Function): + @staticmethod + def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio): + ctx.save_for_backward(roi) + ctx.output_size = _pair(output_size) + ctx.spatial_scale = spatial_scale + ctx.sampling_ratio = sampling_ratio + ctx.input_shape = input.size() + output = _C.roi_align_rotated_forward( + input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio + ) + return output + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + rois, = ctx.saved_tensors + output_size = ctx.output_size + spatial_scale = ctx.spatial_scale + sampling_ratio = ctx.sampling_ratio + bs, ch, h, w = ctx.input_shape + grad_input = _C.roi_align_rotated_backward( + grad_output, + rois, + spatial_scale, + output_size[0], + output_size[1], + bs, + ch, + h, + w, + sampling_ratio, + ) + return grad_input, None, None, None, None, None + + +roi_align_rotated = _ROIAlignRotated.apply + + +class ROIAlignRotated(nn.Module): + def __init__(self, output_size, spatial_scale, sampling_ratio): + """ + Args: + output_size (tuple): h, w + spatial_scale (float): scale the input boxes by this number + sampling_ratio (int): number of inputs samples to take for each output + sample. 0 to take samples densely. + + Note: + ROIAlignRotated supports continuous coordinate by default: + Given a continuous coordinate c, its two neighboring pixel indices (in our + pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, + c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled + from the underlying signal at continuous coordinates 0.5 and 1.5). + """ + super(ROIAlignRotated, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input, rois): + """ + Args: + input: NCHW images + rois: Bx6 boxes. First column is the index into N. + The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees). + """ + assert rois.dim() == 2 and rois.size(1) == 6 + return roi_align_rotated( + input, rois, self.output_size, self.spatial_scale, self.sampling_ratio + ) + + def __repr__(self): + tmpstr = self.__class__.__name__ + "(" + tmpstr += "output_size=" + str(self.output_size) + tmpstr += ", spatial_scale=" + str(self.spatial_scale) + tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) + tmpstr += ")" + return tmpstr diff --git a/detectron2/layers/rotated_boxes.py b/detectron2/layers/rotated_boxes.py new file mode 100644 index 0000000000..2b212c9664 --- /dev/null +++ b/detectron2/layers/rotated_boxes.py @@ -0,0 +1,24 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from __future__ import absolute_import, division, print_function, unicode_literals + +# import torch +from detectron2 import _C + + +def pairwise_iou_rotated(boxes1, boxes2): + """ + Return intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in + (x_center, y_center, width, height, angle) format. + + Arguments: + boxes1 (Tensor[N, 5]) + boxes2 (Tensor[M, 5]) + + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + return _C.box_iou_rotated(boxes1, boxes2) diff --git a/detectron2/layers/shape_spec.py b/detectron2/layers/shape_spec.py new file mode 100644 index 0000000000..ed7f0d0826 --- /dev/null +++ b/detectron2/layers/shape_spec.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from collections import namedtuple + + +class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): + """ + A simple structure that contains basic shape specification about a tensor. + It is often used as the auxiliary inputs/outputs of models, + to obtain the shape inference ability among pytorch modules. + + Attributes: + channels: + height: + width: + stride: + """ + + def __new__(cls, *, channels=None, height=None, width=None, stride=None): + return super().__new__(cls, channels, height, width, stride) diff --git a/detectron2/layers/wrappers.py b/detectron2/layers/wrappers.py new file mode 100644 index 0000000000..bc208fdff0 --- /dev/null +++ b/detectron2/layers/wrappers.py @@ -0,0 +1,154 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Wrappers around on some nn functions, mainly to support empty tensors. + +Ideally, add support directly in PyTorch to empty tensors in those functions. + +These can be removed once https://github.com/pytorch/pytorch/issues/12013 +is implemented +""" + +import math +import torch +from torch.nn.modules.utils import _ntuple + + +def cat(tensors, dim=0): + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + + +class _NewEmptyTensorOp(torch.autograd.Function): + @staticmethod + def forward(ctx, x, new_shape): + ctx.shape = x.shape + return x.new_empty(new_shape) + + @staticmethod + def backward(ctx, grad): + shape = ctx.shape + return _NewEmptyTensorOp.apply(grad, shape), None + + +class Conv2d(torch.nn.Conv2d): + def __init__(self, *args, **kwargs): + """ + Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: + + norm (nn.Module, optional): a normalization layer + activation (callable(Tensor) -> Tensor): a callable activation function + + It assumes that norm layer is used before activation. + """ + norm = kwargs.pop("norm", None) + activation = kwargs.pop("activation", None) + super().__init__(*args, **kwargs) + + self.norm = norm + self.activation = activation + + def forward(self, x): + if x.numel() == 0: + # When input is empty, we want to return a empty tensor with "correct" shape, + # So that the following operations will not panic + # if they check for the shape of the tensor. + # This computes the height and width of the output tensor + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [x.shape[0], self.weight.shape[0]] + output_shape + empty = _NewEmptyTensorOp.apply(x, output_shape) + if self.training: + # https://github.com/pytorch/pytorch/issues/12013 + assert not isinstance( + self.norm, torch.nn.SyncBatchNorm + ), "SyncBatchNorm does not support empty inputs!" + + # This is to make DDP happy. + # DDP expects all workers to have gradient w.r.t the same set of parameters. + _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return empty + _dummy + else: + return empty + + x = super().forward(x) + if self.norm is not None: + x = self.norm(x) + if self.activation is not None: + x = self.activation(x) + return x + + +class ConvTranspose2d(torch.nn.ConvTranspose2d): + def forward(self, x): + if x.numel() > 0: + return super(ConvTranspose2d, self).forward(x) + # get output shape + + output_shape = [ + (i - 1) * d - 2 * p + (di * (k - 1) + 1) + op + for i, p, di, k, d, op in zip( + x.shape[-2:], + self.padding, + self.dilation, + self.kernel_size, + self.stride, + self.output_padding, + ) + ] + output_shape = [x.shape[0], self.bias.shape[0]] + output_shape + # This is to make DDP happy. + # DDP expects all workers to have gradient w.r.t the same set of parameters. + _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 + return _NewEmptyTensorOp.apply(x, output_shape) + _dummy + + +class BatchNorm2d(torch.nn.BatchNorm2d): + def forward(self, x): + if x.numel() > 0: + return super(BatchNorm2d, self).forward(x) + # get output shape + output_shape = x.shape + return _NewEmptyTensorOp.apply(x, output_shape) + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + if input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners=align_corners + ) + + def _check_size_scale_factor(dim): + if size is None and scale_factor is None: + raise ValueError("either size or scale_factor should be defined") + if size is not None and scale_factor is not None: + raise ValueError("only one of size or scale_factor should be defined") + if ( + scale_factor is not None + and isinstance(scale_factor, tuple) + and len(scale_factor) != dim + ): + raise ValueError( + "scale_factor shape must match input shape. " + "Input is {}D, scale_factor size is {}".format(dim, len(scale_factor)) + ) + + def _output_size(dim): + _check_size_scale_factor(dim) + if size is not None: + return size + scale_factors = _ntuple(dim)(scale_factor) + # math.floor might return float in py2.7 + return [int(math.floor(input.size(i + 2) * scale_factors[i])) for i in range(dim)] + + output_shape = tuple(_output_size(2)) + output_shape = input.shape[:-2] + output_shape + return _NewEmptyTensorOp.apply(input, output_shape) diff --git a/detectron2/modeling/__init__.py b/detectron2/modeling/__init__.py new file mode 100644 index 0000000000..4202d6a950 --- /dev/null +++ b/detectron2/modeling/__init__.py @@ -0,0 +1,54 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + +from detectron2.layers import ShapeSpec + +from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY +from .backbone import ( + BACKBONE_REGISTRY, + FPN, + Backbone, + ResNet, + ResNetBlockBase, + build_backbone, + build_resnet_backbone, + make_stage, +) +from .meta_arch import ( + META_ARCH_REGISTRY, + SEM_SEG_HEADS_REGISTRY, + GeneralizedRCNN, + PanopticFPN, + ProposalNetwork, + RetinaNet, + SemanticSegmentor, + build_model, + build_sem_seg_head, +) +from .postprocessing import detector_postprocess +from .proposal_generator import ( + PROPOSAL_GENERATOR_REGISTRY, + build_proposal_generator, + RPN_HEAD_REGISTRY, + build_rpn_head, +) +from .roi_heads import ( + ROI_BOX_HEAD_REGISTRY, + ROI_HEADS_REGISTRY, + ROI_KEYPOINT_HEAD_REGISTRY, + ROI_MASK_HEAD_REGISTRY, + ROIHeads, + StandardROIHeads, + build_box_head, + build_keypoint_head, + build_mask_head, + build_roi_heads, +) +from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA + +_EXCLUDE = {"torch", "ShapeSpec"} +__all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")] + +assert ( + torch.Tensor([1]) == torch.Tensor([2]) +).dtype == torch.bool, "Your Pytorch is too old. Please update to contain https://github.com/pytorch/pytorch/pull/21113" diff --git a/detectron2/modeling/anchor_generator.py b/detectron2/modeling/anchor_generator.py new file mode 100644 index 0000000000..b6fc49b1ad --- /dev/null +++ b/detectron2/modeling/anchor_generator.py @@ -0,0 +1,352 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import math +from typing import List +import torch +from torch import nn + +from detectron2.layers import ShapeSpec +from detectron2.structures import Boxes, RotatedBoxes +from detectron2.utils.registry import Registry + +ANCHOR_GENERATOR_REGISTRY = Registry("ANCHOR_GENERATOR") +""" +Registry for modules that creates object detection anchors for feature maps. +""" + + +class BufferList(nn.Module): + """ + Similar to nn.ParameterList, but for buffers + """ + + def __init__(self, buffers=None): + super(BufferList, self).__init__() + if buffers is not None: + self.extend(buffers) + + def extend(self, buffers): + offset = len(self) + for i, buffer in enumerate(buffers): + self.register_buffer(str(offset + i), buffer) + return self + + def __len__(self): + return len(self._buffers) + + def __iter__(self): + return iter(self._buffers.values()) + + +def _create_grid_offsets(size, stride, device): + grid_height, grid_width = size + shifts_x = torch.arange(0, grid_width * stride, step=stride, dtype=torch.float32, device=device) + shifts_y = torch.arange( + 0, grid_height * stride, step=stride, dtype=torch.float32, device=device + ) + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + return shift_x, shift_y + + +@ANCHOR_GENERATOR_REGISTRY.register() +class DefaultAnchorGenerator(nn.Module): + """ + For a set of image sizes and feature maps, computes a set of anchors. + """ + + def __init__(self, cfg, input_shape: List[ShapeSpec]): + super().__init__() + # fmt: off + sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES + aspect_ratios = cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS + self.strides = [x.stride for x in input_shape] + # fmt: on + """ + sizes (list[list[int]]): sizes[i] is the list of anchor sizes to use + for the i-th feature map. If len(sizes) == 1, then the same list of + anchor sizes, given by sizes[0], is used for all feature maps. Anchor + sizes are given in absolute lengths in units of the input image; + they do not dynamically scale if the input image size changes. + aspect_ratios (list[list[float]]): aspect_ratios[i] is the list of + anchor aspect ratios to use for the i-th feature map. If + len(aspect_ratios) == 1, then the same list of anchor aspect ratios, + given by aspect_ratios[0], is used for all feature maps. + strides (list[int]): stride of each input feature. + """ + + self.num_features = len(self.strides) + self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios) + + def _calculate_anchors(self, sizes, aspect_ratios): + # If one size (or aspect ratio) is specified and there are multiple feature + # maps, then we "broadcast" anchors of that single size (or aspect ratio) + # over all feature maps. + if len(sizes) == 1: + sizes *= self.num_features + if len(aspect_ratios) == 1: + aspect_ratios *= self.num_features + assert self.num_features == len(sizes) + assert self.num_features == len(aspect_ratios) + + cell_anchors = [ + self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios) + ] + + return BufferList(cell_anchors) + + @property + def box_dim(self): + """ + Returns: + int: the dimension of each anchor box. + """ + return 4 + + @property + def num_cell_anchors(self): + """ + Returns: + list[int]: Each int is the number of anchors at every pixel + location, on that feature map. + For example, if at every pixel we use anchors of 3 aspect + ratios and 5 sizes, the number of anchors is 15. + (See also ANCHOR_GENERATOR.SIZES and ANCHOR_GENERATOR.ASPECT_RATIOS in config) + + In standard RPN models, `num_cell_anchors` on every feature map is the same. + """ + return [len(cell_anchors) for cell_anchors in self.cell_anchors] + + def grid_anchors(self, grid_sizes): + anchors = [] + for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): + shift_x, shift_y = _create_grid_offsets(size, stride, base_anchors.device) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) + + anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) + + return anchors + + def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): + """ + Generate a tensor storing anchor boxes, which are continuous geometric rectangles + centered on one feature map point sample. We can later build the set of anchors + for the entire feature map by tiling these tensors; see `meth:grid_anchors`. + + Args: + sizes (tuple[float]): Absolute size of the anchors in the units of the input + image (the input received by the network, after ungoing necessary scaling). + The absolute size is given as the side length of a box. + aspect_ratios (tuple[float]]): Aspect ratios of the boxes computed as box + height / width. + + Returns: + Tensor of shape (len(sizes) * len(aspect_ratios), 4) storing anchor boxes + in XYXY format. + """ + + # This is different from the anchor generator defined in the original Faster R-CNN + # code or Detectron. They yield the same AP, however the old version defines cell + # anchors in a less natural way with a shift relative to the feature grid and + # quantization that results in slightly different sizes for different aspect ratios. + # See also https://github.com/facebookresearch/Detectron/issues/227 + + anchors = [] + for size in sizes: + area = size ** 2.0 + for aspect_ratio in aspect_ratios: + # s * s = w * h + # a = h / w + # ... some algebra ... + # w = sqrt(s * s / a) + # h = a * w + w = math.sqrt(area / aspect_ratio) + h = aspect_ratio * w + x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 + anchors.append([x0, y0, x1, y1]) + return torch.tensor(anchors) + + def forward(self, features): + """ + Args: + features (list[Tensor]): list of backbone feature maps on which to generate anchors. + + Returns: + list[list[Boxes]]: a list of #image elements. Each is a list of #feature level Boxes. + The Boxes contains anchors of this image on the specific feature level. + """ + num_images = len(features[0]) + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) + + anchors_in_image = [] + for anchors_per_feature_map in anchors_over_all_feature_maps: + boxes = Boxes(anchors_per_feature_map) + anchors_in_image.append(boxes) + + anchors = [copy.deepcopy(anchors_in_image) for _ in range(num_images)] + return anchors + + +@ANCHOR_GENERATOR_REGISTRY.register() +class RotatedAnchorGenerator(nn.Module): + """ + The anchor generator used by Rotated RPN (RRPN). + """ + + def __init__(self, cfg, input_shape: List[ShapeSpec]): + super().__init__() + # fmt: off + sizes = cfg.MODEL.ANCHOR_GENERATOR.SIZES + aspect_ratios = cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS + angles = cfg.MODEL.ANCHOR_GENERATOR.ANGLES + self.strides = [x.stride for x in input_shape] + # fmt: on + + self.num_features = len(self.strides) + self.cell_anchors = self._calculate_anchors(sizes, aspect_ratios, angles, self.strides) + + def _calculate_anchors(self, sizes, aspect_ratios, angles, feature_strides): + """ + Args: + sizes (list[list[int]]): sizes[i] is the list of anchor sizes to use + for the i-th feature map. If len(sizes) == 1, then the same list of + anchor sizes, given by sizes[0], is used for all feature maps. Anchor + sizes are given in absolute lengths in units of the input image; + they do not dynamically scale if the input image size changes. + aspect_ratios (list[list[float]]): aspect_ratios[i] is the list of + anchor aspect ratios to use for the i-th feature map. If + len(aspect_ratios) == 1, then the same list of anchor aspect ratios, + given by aspect_ratios[0], is used for all feature maps. + angles (list[list[float]]): angles[i] is the list of + anchor angles to use for the i-th feature map. If + len(angles) == 1, then the same list of anchor angles, + given by angles[0], is used for all feature maps. + feature_strides (list[number]): list of feature map strides (with respect + to the input image) for each input feature map. + """ + + # If one size (or aspect ratio) is specified and there are multiple feature + # maps, then we "broadcast" anchors of that single size + # (or aspect ratio/angle) over all feature maps. + + if len(sizes) == 1: + sizes *= self.num_features + if len(aspect_ratios) == 1: + aspect_ratios *= self.num_features + if len(angles) == 1: + angles *= self.num_features + assert self.num_features == len(sizes) + assert self.num_features == len(aspect_ratios) + assert self.num_features == len(angles) + + cell_anchors = [ + self.generate_cell_anchors(size, aspect_ratio, angle).float() + for size, aspect_ratio, angle in zip(sizes, aspect_ratios, angles) + ] + + return BufferList(cell_anchors) + + @property + def box_dim(self): + """ + Returns: + int: the dimension of each anchor box. + """ + return 5 + + @property + def num_cell_anchors(self): + """ + Returns: + list[int]: Each int is the number of anchors at every pixel + location, on that feature map. + For example, if at every pixel we use anchors of 3 aspect + ratios, 2 sizes and 5 angles, the number of anchors is 30. + (See also ANCHOR_GENERATOR.SIZES, ANCHOR_GENERATOR.ASPECT_RATIOS + and ANCHOR_GENERATOR.ANGLES in config) + + In standard RRPN models, `num_cell_anchors` on every feature map is the same. + """ + return [len(cell_anchors) for cell_anchors in self.cell_anchors] + + def grid_anchors(self, grid_sizes): + anchors = [] + for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): + shift_x, shift_y = _create_grid_offsets(size, stride, base_anchors.device) + zeros = torch.zeros_like(shift_x) + shifts = torch.stack((shift_x, shift_y, zeros, zeros, zeros), dim=1) + + anchors.append((shifts.view(-1, 1, 5) + base_anchors.view(1, -1, 5)).reshape(-1, 5)) + + return anchors + + def generate_cell_anchors( + self, + sizes=(32, 64, 128, 256, 512), + aspect_ratios=(0.5, 1, 2), + angles=(-90, -60, -30, 0, 30, 60, 90), + ): + """ + Generate a tensor storing anchor boxes, which are continuous geometric rectangles + centered on one feature map point sample. We can later build the set of anchors + for the entire feature map by tiling these tensors; see `meth:grid_anchors`. + + Args: + sizes (tuple[float]): Absolute size of the anchors in the units of the input + image (the input received by the network, after ungoing necessary scaling). + The absolute size is given as the side length of a box. + aspect_ratios (tuple[float]]): Aspect ratios of the boxes computed as box + height / width. + angles (tuple[float]]): Angles of boxes indicating how many degrees + the boxes are rotated counter-clockwise. + + Returns: + Tensor of shape (len(sizes) * len(aspect_ratios) * len(angles), 5) + storing anchor boxes in (x_ctr, y_ctr, w, h, angle) format. + """ + anchors = [] + for size in sizes: + area = size ** 2.0 + for aspect_ratio in aspect_ratios: + # s * s = w * h + # a = h / w + # ... some algebra ... + # w = sqrt(s * s / a) + # h = a * w + w = math.sqrt(area / aspect_ratio) + h = aspect_ratio * w + anchors.extend([0, 0, w, h, a] for a in angles) + + return torch.tensor(anchors) + + def forward(self, features): + """ + Args: + features (list[Tensor]): list of backbone feature maps on which to generate anchors. + + Returns: + list[list[RotatedBoxes]]: + a list of #image elements. Each is a list of #feature level RotatedBoxes. + The RotatedBoxes contains anchors of this image on the specific feature level. + """ + num_images = len(features[0]) + grid_sizes = [feature_map.shape[-2:] for feature_map in features] + anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) + + anchors_in_image = [] + for anchors_per_feature_map in anchors_over_all_feature_maps: + boxes = RotatedBoxes(anchors_per_feature_map) + anchors_in_image.append(boxes) + + anchors = [copy.deepcopy(anchors_in_image) for _ in range(num_images)] + return anchors + + +def build_anchor_generator(cfg, input_shape): + """ + Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`. + """ + anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME + return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape) diff --git a/detectron2/modeling/backbone/__init__.py b/detectron2/modeling/backbone/__init__.py new file mode 100644 index 0000000000..bd737d9977 --- /dev/null +++ b/detectron2/modeling/backbone/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip + +from .backbone import Backbone +from .fpn import FPN +from .resnet import ResNet, ResNetBlockBase, build_resnet_backbone, make_stage + +# TODO can expose more resnet blocks after careful consideration diff --git a/detectron2/modeling/backbone/backbone.py b/detectron2/modeling/backbone/backbone.py new file mode 100644 index 0000000000..a530efe48f --- /dev/null +++ b/detectron2/modeling/backbone/backbone.py @@ -0,0 +1,70 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from abc import ABCMeta, abstractmethod +import torch.nn as nn + +from detectron2.layers import ShapeSpec + +__all__ = ["Backbone"] + + +class Backbone(nn.Module, metaclass=ABCMeta): + """ + Abstract base class for network backbones. + """ + + def __init__(self): + """ + The `__init__` method of any subclass can specify its own set of arguments. + """ + super().__init__() + + @abstractmethod + def forward(self): + """ + Subclasses must override this method, but adhere to the same return type. + + Returns: + dict[str: Tensor]: mapping from feature name (e.g., "res2") to tensor + """ + pass + + @property + def size_divisibility(self): + """ + Some backbones require the input height and width to be divisible by a + specific integer. This is typically true for encoder / decoder type networks + with lateral connection (e.g., FPN) for which feature maps need to match + dimension in the "bottom up" and "top down" paths. Set to 0 if no specific + input size divisibility is required. + """ + return 0 + + def output_shape(self): + """ + Returns: + dict[str->ShapeSpec] + """ + # this is a backward-compatible default + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + # the properties below are not used any more + + @property + def out_features(self): + """deprecated""" + return self._out_features + + @property + def out_feature_strides(self): + """deprecated""" + return {f: self._out_feature_strides[f] for f in self._out_features} + + @property + def out_feature_channels(self): + """deprecated""" + return {f: self._out_feature_channels[f] for f in self._out_features} diff --git a/detectron2/modeling/backbone/build.py b/detectron2/modeling/backbone/build.py new file mode 100644 index 0000000000..8a1bbc950b --- /dev/null +++ b/detectron2/modeling/backbone/build.py @@ -0,0 +1,26 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.layers import ShapeSpec +from detectron2.utils.registry import Registry + +from .backbone import Backbone + +BACKBONE_REGISTRY = Registry("BACKBONE") +""" +Registry for backbones, which extract feature maps from images. +""" + + +def build_backbone(cfg, input_shape=None): + """ + Build a backbone from `cfg.MODEL.BACKBONE.NAME`. + + Returns: + an instance of :class:`Backbone` + """ + if input_shape is None: + input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) + + backbone_name = cfg.MODEL.BACKBONE.NAME + backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape) + assert isinstance(backbone, Backbone) + return backbone diff --git a/detectron2/modeling/backbone/fpn.py b/detectron2/modeling/backbone/fpn.py new file mode 100644 index 0000000000..657d81cd12 --- /dev/null +++ b/detectron2/modeling/backbone/fpn.py @@ -0,0 +1,244 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +import fvcore.nn.weight_init as weight_init +import torch.nn.functional as F +from torch import nn + +from detectron2.layers import Conv2d, ShapeSpec, get_norm + +from .backbone import Backbone +from .build import BACKBONE_REGISTRY +from .resnet import build_resnet_backbone + +__all__ = ["build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"] + + +class FPN(Backbone): + """ + This module implements Feature Pyramid Network. + It creates pyramid features built on top of some input feature maps. + """ + + def __init__( + self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum" + ): + """ + Args: + bottom_up (Backbone): module representing the bottom up subnetwork. + Must be a subclass of :class:`Backbone`. The multi-scale feature + maps generated by the bottom up network, and listed in `in_features`, + are used to generate FPN levels. + in_features (list[str]): names of the input feature maps coming + from the backbone to which FPN is attached. For example, if the + backbone produces ["res2", "res3", "res4"], any *contiguous* sublist + of these may be used; order must be from high to low resolution. + out_channels (int): number of channels in the output feature maps. + norm (str): the normalization to use. + top_block (nn.Module or None): if provided, an extra operation will + be performed on the output of the last (smallest resolution) + FPN output, and the result will extend the result list. The top_block + further downsamples the feature map. It must have an attribute + "num_levels", meaning the number of extra FPN levels added by + this block, and "in_feature", which is a string representing + its input feature (e.g., p5). + fuse_type (str): types for fusing the top down features and the lateral + ones. It can be "sum" (default), which sums up element-wise; or "avg", + which takes the element-wise mean of the two. + """ + super(FPN, self).__init__() + assert isinstance(bottom_up, Backbone) + + # Feature map strides and channels from the bottom up network (e.g. ResNet) + in_strides = [bottom_up.out_feature_strides[f] for f in in_features] + in_channels = [bottom_up.out_feature_channels[f] for f in in_features] + + _assert_strides_are_log2_contiguous(in_strides) + lateral_convs = [] + output_convs = [] + + use_bias = norm == "" + for idx, in_channels in enumerate(in_channels): + lateral_norm = get_norm(norm, out_channels) + output_norm = get_norm(norm, out_channels) + + lateral_conv = Conv2d( + in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm + ) + output_conv = Conv2d( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm=output_norm, + ) + weight_init.c2_xavier_fill(lateral_conv) + weight_init.c2_xavier_fill(output_conv) + stage = int(math.log2(in_strides[idx])) + self.add_module("fpn_lateral{}".format(stage), lateral_conv) + self.add_module("fpn_output{}".format(stage), output_conv) + + lateral_convs.append(lateral_conv) + output_convs.append(output_conv) + # Place convs into top-down order (from low to high resolution) + # to make the top-down computation in forward clearer. + self.lateral_convs = lateral_convs[::-1] + self.output_convs = output_convs[::-1] + self.top_block = top_block + self.in_features = in_features + self.bottom_up = bottom_up + # Return feature names are "p", like ["p2", "p3", ..., "p6"] + self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in in_strides} + # top block output feature maps. + if self.top_block is not None: + for s in range(stage, stage + self.top_block.num_levels): + self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) + + self._out_features = list(self._out_feature_strides.keys()) + self._out_feature_channels = {k: out_channels for k in self._out_features} + self._size_divisibility = in_strides[-1] + assert fuse_type in {"avg", "sum"} + self._fuse_type = fuse_type + + @property + def size_divisibility(self): + return self._size_divisibility + + def forward(self, x): + """ + Args: + input (dict[str: Tensor]): mapping feature map name (e.g., "res5") to + feature map tensor for each feature level in high to low resolution order. + + Returns: + dict[str: Tensor]: + mapping from feature map name to FPN feature map tensor + in high to low resolution order. Returned feature names follow the FPN + paper convention: "p", where stage has stride = 2 ** stage e.g., + ["p2", "p3", ..., "p6"]. + """ + # Reverse feature maps into top-down order (from low to high resolution) + bottom_up_features = self.bottom_up(x) + x = [bottom_up_features[f] for f in self.in_features[::-1]] + results = [] + prev_features = self.lateral_convs[0](x[0]) + results.append(self.output_convs[0](prev_features)) + for features, lateral_conv, output_conv in zip( + x[1:], self.lateral_convs[1:], self.output_convs[1:] + ): + top_down_features = F.interpolate(prev_features, scale_factor=2, mode="nearest") + lateral_features = lateral_conv(features) + prev_features = lateral_features + top_down_features + if self._fuse_type == "avg": + prev_features /= 2 + results.insert(0, output_conv(prev_features)) + + if self.top_block is not None: + top_block_in_feature = bottom_up_features.get(self.top_block.in_feature, None) + if top_block_in_feature is None: + top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)] + results.extend(self.top_block(top_block_in_feature)) + assert len(self._out_features) == len(results) + return dict(zip(self._out_features, results)) + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + +def _assert_strides_are_log2_contiguous(strides): + """ + Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2". + """ + for i, stride in enumerate(strides[1:], 1): + assert stride == 2 * strides[i - 1], "Stides {} {} are not log2 contiguous".format( + stride, strides[i - 1] + ) + + +class LastLevelMaxPool(nn.Module): + """ + This module is used in the original FPN to generate a downsampled + P6 feature from P5. + """ + + def __init__(self): + super().__init__() + self.num_levels = 1 + self.in_feature = "p5" + + def forward(self, x): + return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)] + + +class LastLevelP6P7(nn.Module): + """ + This module is used in RetinaNet to generate extra layers, P6 and P7 from + C5 feature. + """ + + def __init__(self, in_channels, out_channels): + super().__init__() + self.num_levels = 2 + self.in_feature = "res5" + self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) + self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) + for module in [self.p6, self.p7]: + weight_init.c2_xavier_fill(module) + + def forward(self, c5): + p6 = self.p6(c5) + p7 = self.p7(F.relu(p6)) + return [p6, p7] + + +@BACKBONE_REGISTRY.register() +def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): + """ + Args: + cfg: a detectron2 CfgNode + + Returns: + backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. + """ + bottom_up = build_resnet_backbone(cfg, input_shape) + in_features = cfg.MODEL.FPN.IN_FEATURES + out_channels = cfg.MODEL.FPN.OUT_CHANNELS + backbone = FPN( + bottom_up=bottom_up, + in_features=in_features, + out_channels=out_channels, + norm=cfg.MODEL.FPN.NORM, + top_block=LastLevelMaxPool(), + fuse_type=cfg.MODEL.FPN.FUSE_TYPE, + ) + return backbone + + +@BACKBONE_REGISTRY.register() +def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec): + """ + Args: + cfg: a detectron2 CfgNode + + Returns: + backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. + """ + bottom_up = build_resnet_backbone(cfg, input_shape) + in_features = cfg.MODEL.FPN.IN_FEATURES + out_channels = cfg.MODEL.FPN.OUT_CHANNELS + in_channels_p6p7 = bottom_up.out_feature_channels["res5"] + backbone = FPN( + bottom_up=bottom_up, + in_features=in_features, + out_channels=out_channels, + norm=cfg.MODEL.FPN.NORM, + top_block=LastLevelP6P7(in_channels_p6p7, out_channels), + fuse_type=cfg.MODEL.FPN.FUSE_TYPE, + ) + return backbone diff --git a/detectron2/modeling/backbone/resnet.py b/detectron2/modeling/backbone/resnet.py new file mode 100644 index 0000000000..af6c6ea5e6 --- /dev/null +++ b/detectron2/modeling/backbone/resnet.py @@ -0,0 +1,479 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.layers import ( + Conv2d, + DeformConv, + FrozenBatchNorm2d, + ModulatedDeformConv, + ShapeSpec, + get_norm, +) + +from .backbone import Backbone +from .build import BACKBONE_REGISTRY + +__all__ = [ + "ResNetBlockBase", + "BottleneckBlock", + "DeformBottleneckBlock", + "BasicStem", + "ResNet", + "make_stage", + "build_resnet_backbone", +] + + +class ResNetBlockBase(nn.Module): + def __init__(self, in_channels, out_channels, stride): + """ + The `__init__` method of any subclass should also contain these arguments. + + Args: + in_channels (int): + out_channels (int): + stride (int): + """ + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + + def freeze(self): + for p in self.parameters(): + p.requires_grad = False + FrozenBatchNorm2d.convert_frozen_batchnorm(self) + return self + + +class BottleneckBlock(ResNetBlockBase): + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + dilation=1, + ): + """ + Args: + norm (str or callable): a callable that takes the number of + channels and return a `nn.Module`, or a pre-defined string + (one of {"FrozenBN", "BN", "GN"}). + stride_in_1x1 (bool): when stride==2, whether to put stride in the + first 1x1 convolution or the bottleneck 3x3 convolution. + """ + super().__init__(in_channels, out_channels, stride) + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + # The original MSRA ResNet models have stride in the first 1x1 conv + # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have + # stride in the 3x3 conv + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv2 = Conv2d( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + bias=False, + groups=num_groups, + dilation=dilation, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + # Zero-initialize the last normalization in each residual branch, + # so that at the beginning, the residual branch starts with zeros, + # and each residual block behaves like an identity. + # See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": + # "For BN layers, the learnable scaling coefficient γ is initialized + # to be 1, except for each residual block's last BN + # where γ is initialized to be 0." + + # nn.init.constant_(self.conv3.norm.weight, 0) + # TODO this somehow hurts performance when training GN models from scratch. + # Add it as an option when we need to use this code to train a backbone. + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + + out = self.conv2(out) + out = F.relu_(out) + + out = self.conv3(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +class DeformBottleneckBlock(ResNetBlockBase): + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + dilation=1, + deform_modulated=False, + deform_num_groups=1, + ): + """ + Similar to :class:`BottleneckBlock`, but with deformable conv in the 3x3 convolution. + """ + super().__init__(in_channels, out_channels, stride) + self.deform_modulated = deform_modulated + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + if deform_modulated: + deform_conv_op = ModulatedDeformConv + # offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size + offset_channels = 27 + else: + deform_conv_op = DeformConv + offset_channels = 18 + + self.conv2_offset = Conv2d( + bottleneck_channels, + offset_channels * deform_num_groups, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + dilation=dilation, + ) + self.conv2 = deform_conv_op( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + padding=1 * dilation, + bias=False, + groups=num_groups, + dilation=dilation, + deformable_groups=deform_num_groups, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + nn.init.constant_(self.conv2_offset.weight, 0) + nn.init.constant_(self.conv2_offset.bias, 0) + + def forward(self, x): + out = self.conv1(x) + out = F.relu_(out) + + if self.deform_modulated: + offset_mask = self.conv2_offset(out) + offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1) + offset = torch.cat((offset_x, offset_y), dim=1) + mask = mask.sigmoid() + out = self.conv2(out, offset, mask) + else: + offset = self.conv2_offset(out) + out = self.conv2(out, offset) + out = F.relu_(out) + + out = self.conv3(out) + + if self.shortcut is not None: + shortcut = self.shortcut(x) + else: + shortcut = x + + out += shortcut + out = F.relu_(out) + return out + + +def make_stage(block_class, num_blocks, first_stride, **kwargs): + """ + Create a resnet stage by creating many blocks. + Args: + block_class (class): a subclass of ResNetBlockBase + num_blocks (int): + first_stride (int): the stride of the first block. The other blocks will have stride=1. + A `stride` argument will be passed to the block constructor. + kwargs: other arguments passed to the block constructor. + + Returns: + list[nn.Module]: a list of block module. + """ + blocks = [] + for i in range(num_blocks): + blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs)) + kwargs["in_channels"] = kwargs["out_channels"] + return blocks + + +class BasicStem(nn.Module): + def __init__(self, in_channels=3, out_channels=64, norm="BN"): + """ + Args: + norm (str or callable): a callable that takes the number of + channels and return a `nn.Module`, or a pre-defined string + (one of {"FrozenBN", "BN", "GN"}). + """ + super().__init__() + self.conv1 = Conv2d( + in_channels, + out_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False, + norm=get_norm(norm, out_channels), + ) + weight_init.c2_msra_fill(self.conv1) + + def forward(self, x): + x = self.conv1(x) + x = F.relu_(x) + x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) + return x + + @property + def out_channels(self): + return self.conv1.out_channels + + @property + def stride(self): + return 4 # = stride 2 conv -> stride 2 max pool + + +class ResNet(Backbone): + def __init__(self, stem, stages, num_classes=None, out_features=None): + """ + Args: + stem (nn.Module): a stem module + stages (list[list[ResNetBlock]]): several (typically 4) stages, + each contains multiple :class:`ResNetBlockBase`. + num_classes (None or int): if None, will not perform classification. + out_features (list[str]): name of the layers whose outputs should + be returned in forward. Can be anything in "stem", "linear", or "res2" ... + If None, will return the output of the last layer. + """ + super(ResNet, self).__init__() + self.stem = stem + self.num_classes = num_classes + + current_stride = self.stem.stride + self._out_feature_strides = {"stem": current_stride} + self._out_feature_channels = {"stem": self.stem.out_channels} + + self.stages_and_names = [] + for i, blocks in enumerate(stages): + for block in blocks: + assert isinstance(block, ResNetBlockBase), block + curr_channels = block.out_channels + stage = nn.Sequential(*blocks) + name = "res" + str(i + 2) + self.add_module(name, stage) + self.stages_and_names.append((stage, name)) + self._out_feature_strides[name] = current_stride = int( + current_stride * np.prod([k.stride for k in blocks]) + ) + self._out_feature_channels[name] = blocks[-1].out_channels + + if num_classes is not None: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.linear = nn.Linear(curr_channels, num_classes) + + # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": + # "The 1000-way fully-connected layer is initialized by + # drawing weights from a zero-mean Gaussian with standard deviation of 0.01." + nn.init.normal_(self.linear.weight, stddev=0.01) + name = "linear" + + if out_features is None: + out_features = [name] + self._out_features = out_features + assert len(self._out_features) + children = [x[0] for x in self.named_children()] + for out_feature in self._out_features: + assert out_feature in children, "Available children: {}".format(", ".join(children)) + + def forward(self, x): + outputs = {} + x = self.stem(x) + if "stem" in self._out_features: + outputs["stem"] = x + for stage, name in self.stages_and_names: + x = stage(x) + if name in self._out_features: + outputs[name] = x + if self.num_classes is not None: + x = self.avgpool(x) + x = self.linear(x) + if "linear" in self._out_features: + outputs["linear"] = x + return outputs + + def output_shape(self): + return { + name: ShapeSpec( + channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] + ) + for name in self._out_features + } + + +@BACKBONE_REGISTRY.register() +def build_resnet_backbone(cfg, input_shape): + """ + Create a ResNet instance from config. + + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + + if freeze_at >= 1: + for p in stem.parameters(): + p.requires_grad = False + stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem) + + # fmt: off + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + # fmt: on + assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) + + num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] + + stages = [] + + # Avoid creating variables without gradients + # It consumes extra memory and may cause allreduce to fail + out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] + max_stage_idx = max(out_stage_idx) + for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): + dilation = res5_dilation if stage_idx == 5 else 1 + first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "first_stride": first_stride, + "in_channels": in_channels, + "bottleneck_channels": bottleneck_channels, + "out_channels": out_channels, + "num_groups": num_groups, + "norm": norm, + "stride_in_1x1": stride_in_1x1, + "dilation": dilation, + } + if deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + blocks = make_stage(**stage_kargs) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + + if freeze_at >= stage_idx: + for block in blocks: + block.freeze() + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features) diff --git a/detectron2/modeling/box_regression.py b/detectron2/modeling/box_regression.py new file mode 100644 index 0000000000..32f2d944ea --- /dev/null +++ b/detectron2/modeling/box_regression.py @@ -0,0 +1,214 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +import torch + +# Value for clamping large dw and dh predictions. The heuristic is that we clamp +# such that dw and dh are no larger than what would transform a 16px box into a +# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px). +_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16) + + +class Box2BoxTransform(object): + """ + The box-to-box transform defined in R-CNN. The transformation is parameterized + by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height + by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). + """ + + def __init__(self, weights, scale_clamp=_DEFAULT_SCALE_CLAMP): + """ + Args: + weights (4-element tuple): Scaling factors that are applied to the + (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set + such that the deltas have unit variance; now they are treated as + hyperparameters of the system. + scale_clamp (float): When predicting deltas, the predicted box scaling + factors (dw and dh) are clamped such that they are <= scale_clamp. + """ + self.weights = weights + self.scale_clamp = scale_clamp + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx, dy, dw, dh) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + `target_boxes == self.apply_deltas(deltas, src_boxes)` is true (unless + any delta is too large and is clamped). + + Args: + src_boxes (Tensor): source boxes, e.g., object proposals + target_boxes (Tensor): target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_widths = src_boxes[:, 2] - src_boxes[:, 0] + src_heights = src_boxes[:, 3] - src_boxes[:, 1] + src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths + src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights + + target_widths = target_boxes[:, 2] - target_boxes[:, 0] + target_heights = target_boxes[:, 3] - target_boxes[:, 1] + target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths + target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights + + wx, wy, ww, wh = self.weights + dx = wx * (target_ctr_x - src_ctr_x) / src_widths + dy = wy * (target_ctr_y - src_ctr_y) / src_heights + dw = ww * torch.log(target_widths / src_widths) + dh = wh * torch.log(target_heights / src_heights) + + deltas = torch.stack((dx, dy, dw, dh), dim=1) + assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. + deltas[i] represents k potentially different class-specific + box transformations for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 4) + """ + assert torch.isfinite(deltas).all().item() + boxes = boxes.to(deltas.dtype) + + widths = boxes[:, 2] - boxes[:, 0] + heights = boxes[:, 3] - boxes[:, 1] + ctr_x = boxes[:, 0] + 0.5 * widths + ctr_y = boxes[:, 1] + 0.5 * heights + + wx, wy, ww, wh = self.weights + dx = deltas[:, 0::4] / wx + dy = deltas[:, 1::4] / wy + dw = deltas[:, 2::4] / ww + dh = deltas[:, 3::4] / wh + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.scale_clamp) + dh = torch.clamp(dh, max=self.scale_clamp) + + pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] + pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] + pred_w = torch.exp(dw) * widths[:, None] + pred_h = torch.exp(dh) * heights[:, None] + + pred_boxes = torch.zeros_like(deltas) + pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1 + pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1 + pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2 + pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2 + return pred_boxes + + +class Box2BoxTransformRotated(object): + """ + The box-to-box transform defined in Rotated R-CNN. The transformation is parameterized + by 5 deltas: (dx, dy, dw, dh, da). The transformation scales the box's width and height + by exp(dw), exp(dh), shifts a box's center by the offset (dx * width, dy * height), + and rotate a box's angle by da (radians). + Note: angles of deltas are in radians while angles of boxes are in degrees. + """ + + def __init__(self, weights, scale_clamp=_DEFAULT_SCALE_CLAMP): + """ + Args: + weights (5-element tuple): Scaling factors that are applied to the + (dx, dy, dw, dh, da) deltas. These are treated as + hyperparameters of the system. + scale_clamp (float): When predicting deltas, the predicted box scaling + factors (dw and dh) are clamped such that they are <= scale_clamp. + """ + self.weights = weights + self.scale_clamp = scale_clamp + + def get_deltas(self, src_boxes, target_boxes): + """ + Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used + to transform the `src_boxes` into the `target_boxes`. That is, the relation + `target_boxes == self.apply_deltas(deltas, src_boxes)` is true (unless + any delta is too large and is clamped). + + Args: + src_boxes (Tensor): Nx5 source boxes, e.g., object proposals + target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth + boxes. + """ + assert isinstance(src_boxes, torch.Tensor), type(src_boxes) + assert isinstance(target_boxes, torch.Tensor), type(target_boxes) + + src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1) + + target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind( + target_boxes, dim=1 + ) + + wx, wy, ww, wh, wa = self.weights + dx = wx * (target_ctr_x - src_ctr_x) / src_widths + dy = wy * (target_ctr_y - src_ctr_y) / src_heights + dw = ww * torch.log(target_widths / src_widths) + dh = wh * torch.log(target_heights / src_heights) + # Angles of deltas are in radians while angles of boxes are in degrees. + # the conversion to radians serve as a way to normalize the values + da = target_angles - src_angles + while len(torch.where(da < -180.0)[0]) > 0: + da[torch.where(da < -180.0)] += 360.0 + while len(torch.where(da > 180.0)[0]) > 0: + da[torch.where(da > 180.0)] -= 360.0 + da *= wa * math.pi / 180.0 + + deltas = torch.stack((dx, dy, dw, dh, da), dim=1) + assert ( + (src_widths > 0).all().item() + ), "Input boxes to Box2BoxTransformRotated are not valid!" + return deltas + + def apply_deltas(self, deltas, boxes): + """ + Apply transformation `deltas` (dx, dy, dw, dh, da) to `boxes`. + + Args: + deltas (Tensor): transformation deltas of shape (N, 5). + deltas[i] represents box transformation for the single box boxes[i]. + boxes (Tensor): boxes to transform, of shape (N, 5) + """ + assert deltas.shape[1] == 5 and boxes.shape[1] == 5 + assert torch.isfinite(deltas).all().item() + + boxes = boxes.to(deltas.dtype) + + ctr_x, ctr_y, widths, heights, angles = torch.unbind(boxes, dim=1) + wx, wy, ww, wh, wa = self.weights + dx, dy, dw, dh, da = torch.unbind(deltas, dim=1) + + dx.div_(wx) + dy.div_(wy) + dw.div_(ww) + dh.div_(wh) + da.div_(wa) + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.scale_clamp) + dh = torch.clamp(dh, max=self.scale_clamp) + + pred_boxes = torch.zeros_like(deltas) + pred_boxes[:, 0] = dx * widths + ctr_x # x_ctr + pred_boxes[:, 1] = dy * heights + ctr_y # y_ctr + pred_boxes[:, 2] = torch.exp(dw) * widths # width + pred_boxes[:, 3] = torch.exp(dh) * heights # height + + # Following original RRPN implementation, + # angles of deltas are in radians while angles of boxes are in degrees. + pred_angle = da * 180.0 / math.pi + angles + + while len(torch.where(pred_angle < -180.0)[0]) > 0: + pred_angle[torch.where(pred_angle < -180.0)] += 360.0 + while len(torch.where(pred_angle > 180.0)[0]) > 0: + pred_angle[torch.where(pred_angle > 180.0)] -= 360.0 + + pred_boxes[:, 4] = pred_angle + + return pred_boxes diff --git a/detectron2/modeling/matcher.py b/detectron2/modeling/matcher.py new file mode 100644 index 0000000000..9fa1209aab --- /dev/null +++ b/detectron2/modeling/matcher.py @@ -0,0 +1,128 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + + +class Matcher(object): + """ + This class assigns to each predicted "element" (e.g., a box) a ground-truth + element. Each predicted element will have exactly zero or one matches; each + ground-truth element may be matched to zero or more predicted elements. + + The matching is determined by the MxN match_quality_matrix, that characterizes + how well each (ground-truth, prediction)-pair match each other. For example, + if the elements are boxes, this matrix may contain box intersection-over-union + overlap values. + + The matcher returns (a) a vector of length N containing the index of the + ground-truth element m in [0, M) that matches to prediction n in [0, N). + (b) a vector of length N containing the labels for each prediction. + """ + + def __init__(self, thresholds, labels, allow_low_quality_matches=False): + """ + Args: + thresholds (list): a list of thresholds used to stratify predictions + into levels. + labels (list): a list of values to label predictions belonging at + each level. A label can be one of {-1, 0, 1} signifying + {ignore, negative class, positive class}, respectively. + allow_low_quality_matches (bool): if True, produce additional matches + for predictions with maximum match quality lower than high_threshold. + See set_low_quality_matches_ for more details. + + For example, + thresholds = [0.3, 0.5] + labels = [0, -1, 1] + All predictions with iou < 0.3 will be marked with 0 and + thus will be considered as false positives while training. + All predictions with 0.3 <= iou < 0.5 will be marked with -1 and + thus will be ignored. + All predictions with 0.5 <= iou will be marked with 1 and + thus will be considered as true positives. + """ + # Add -inf and +inf to first and last position in thresholds + thresholds = thresholds[:] + thresholds.insert(0, -float("inf")) + thresholds.append(float("inf")) + assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])) + assert all(l in [-1, 0, 1] for l in labels) + assert len(labels) == len(thresholds) - 1 + self.thresholds = thresholds + self.labels = labels + self.allow_low_quality_matches = allow_low_quality_matches + + def __call__(self, match_quality_matrix): + """ + Args: + match_quality_matrix (Tensor[float]): an MxN tensor, containing the + pairwise quality between M ground-truth elements and N predicted + elements. All elements must be >= 0 (due to the us of `torch.nonzero` + for selecting indices in :meth:`set_low_quality_matches_`). + + Returns: + matches (Tensor[int64]): a vector of length N, where matches[i] is a matched + ground-truth index in [0, M) + match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates + whether a prediction is a true or false positive or ignored + """ + assert match_quality_matrix.dim() == 2 + if match_quality_matrix.numel() == 0: + return ( + match_quality_matrix.new_full( + (match_quality_matrix.size(1),), 0, dtype=torch.int64 + ), + match_quality_matrix.new_full( + (match_quality_matrix.size(1),), -1, dtype=torch.int8 + ), + ) + assert torch.all(match_quality_matrix >= 0) + + # match_quality_matrix is M (gt) x N (predicted) + # Max over gt elements (dim 0) to find best gt candidate for each prediction + matched_vals, matches = match_quality_matrix.max(dim=0) + + match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) + + for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): + low_high = (matched_vals >= low) & (matched_vals < high) + match_labels[low_high] = l + + if self.allow_low_quality_matches: + self.set_low_quality_matches_(match_labels, match_quality_matrix) + + return matches, match_labels + + def set_low_quality_matches_(self, match_labels, match_quality_matrix): + """ + Produce additional matches for predictions that have only low-quality matches. + Specifically, for each ground-truth G find the set of predictions that have + maximum overlap with it (including ties); for each prediction in that set, if + it is unmatched, then match it to the ground-truth G. + + This function implements the RPN assignment case (i) in Sec. 3.1.2 of the + Faster R-CNN paper: https://arxiv.org/pdf/1506.01497v3.pdf. + """ + # For each gt, find the prediction with which it has highest quality + highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) + # Find the highest quality match available, even if it is low, including ties. + # Note that the matches qualities must be positive due to the use of + # `torch.nonzero`. + gt_pred_pairs_of_highest_quality = torch.nonzero( + match_quality_matrix == highest_quality_foreach_gt[:, None] + ) + # Example gt_pred_pairs_of_highest_quality: + # tensor([[ 0, 39796], + # [ 1, 32055], + # [ 1, 32070], + # [ 2, 39190], + # [ 2, 40255], + # [ 3, 40390], + # [ 3, 41455], + # [ 4, 45470], + # [ 5, 45325], + # [ 5, 46390]]) + # Each row is a (gt index, prediction index) + # Note how gt items 1, 2, 3, and 5 each have two ties + + pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1] + match_labels[pred_inds_to_update] = 1 diff --git a/detectron2/modeling/meta_arch/__init__.py b/detectron2/modeling/meta_arch/__init__.py new file mode 100644 index 0000000000..96ef9b582c --- /dev/null +++ b/detectron2/modeling/meta_arch/__init__.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from .build import META_ARCH_REGISTRY, build_model # isort:skip + +from .panoptic_fpn import PanopticFPN + +# import all the meta_arch, so they will be registered +from .rcnn import GeneralizedRCNN, ProposalNetwork +from .retinanet import RetinaNet +from .semantic_seg import SEM_SEG_HEADS_REGISTRY, SemanticSegmentor, build_sem_seg_head diff --git a/detectron2/modeling/meta_arch/build.py b/detectron2/modeling/meta_arch/build.py new file mode 100644 index 0000000000..e7980b6efc --- /dev/null +++ b/detectron2/modeling/meta_arch/build.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.utils.registry import Registry + +META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip +""" +Registry for meta-architectures, i.e. the whole model. +""" + + +def build_model(cfg): + """ + Built the whole model, defined by `cfg.MODEL.META_ARCHITECTURE`. + """ + meta_arch = cfg.MODEL.META_ARCHITECTURE + return META_ARCH_REGISTRY.get(meta_arch)(cfg) diff --git a/detectron2/modeling/meta_arch/panoptic_fpn.py b/detectron2/modeling/meta_arch/panoptic_fpn.py new file mode 100644 index 0000000000..fc47904ad7 --- /dev/null +++ b/detectron2/modeling/meta_arch/panoptic_fpn.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import torch +from torch import nn + +from detectron2.structures import ImageList + +from ..backbone import build_backbone +from ..postprocessing import detector_postprocess, sem_seg_postprocess +from ..proposal_generator import build_proposal_generator +from ..roi_heads import build_roi_heads +from .build import META_ARCH_REGISTRY +from .semantic_seg import build_sem_seg_head + +__all__ = ["PanopticFPN"] + + +@META_ARCH_REGISTRY.register() +class PanopticFPN(nn.Module): + """ + Main class for Panoptic FPN architectures (see https://arxiv.org/abd/1901.02446). + """ + + def __init__(self, cfg): + super().__init__() + + self.device = torch.device(cfg.MODEL.DEVICE) + + self.instance_loss_weight = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT + + # options when combining instance & semantic outputs + self.combine_on = cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED + self.combine_overlap_threshold = cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH + self.combine_stuff_area_limit = cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT + self.combine_instances_confidence_threshold = ( + cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH + ) + + self.backbone = build_backbone(cfg) + self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape()) + self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape()) + self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape()) + + pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1) + pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1) + self.normalizer = lambda x: (x - pixel_mean) / pixel_std + self.to(self.device) + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper`. + Each item in the list contains the inputs for one image. + + For now, each item in the list is a dict that contains: + image: Tensor, image in (C, H, W) format. + instances: Instances + sem_seg: semantic segmentation ground truth. + Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: each dict is the results for one image. The dict + contains the following keys: + "instances": see :meth:`GeneralizedRCNN.forward` for its format. + "sem_seg": see :meth:`SemanticSegmentor.forward` for its format. + "panoptic_seg": available when `PANOPTIC_FPN.COMBINE.ENABLED`. + See the return value of + :func:`combine_semantic_and_instance_outputs` for its format. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [self.normalizer(x) for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + features = self.backbone(images.tensor) + + if "proposals" in batched_inputs[0]: + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + proposal_losses = {} + + if "sem_seg" in batched_inputs[0]: + gt_sem_seg = [x["sem_seg"].to(self.device) for x in batched_inputs] + gt_sem_seg = ImageList.from_tensors( + gt_sem_seg, self.backbone.size_divisibility, self.sem_seg_head.ignore_value + ).tensor + else: + gt_sem_seg = None + sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg) + + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + if self.proposal_generator: + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + detector_results, detector_losses = self.roi_heads( + images, features, proposals, gt_instances + ) + + if self.training: + losses = {} + losses.update(sem_seg_losses) + losses.update({k: v * self.instance_loss_weight for k, v in detector_losses.items()}) + losses.update(proposal_losses) + return losses + + processed_results = [] + for sem_seg_result, detector_result, input_per_image, image_size in zip( + sem_seg_results, detector_results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height") + width = input_per_image.get("width") + sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) + detector_r = detector_postprocess(detector_result, height, width) + + processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) + + if self.combine_on: + panoptic_r = combine_semantic_and_instance_outputs( + detector_r, + sem_seg_r.argmax(dim=0), + self.combine_overlap_threshold, + self.combine_stuff_area_limit, + self.combine_instances_confidence_threshold, + ) + processed_results[-1]["panoptic_seg"] = panoptic_r + return processed_results + + +def combine_semantic_and_instance_outputs( + instance_results, + semantic_results, + overlap_threshold, + stuff_area_limit, + instances_confidence_threshold, +): + """ + Implement a simple combining logic following + "combine_semantic_and_instance_predictions.py" in panopticapi + to produce panoptic segmentation outputs. + + Args: + instance_results: output of :func:`detector_postprocess`. + semantic_results: an (H, W) tensor, each is the contiguous semantic + category id + + Returns: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. + segments_info (list[dict]): Describe each segment in `panoptic_seg`. + Each dict contains keys "id", "category_id", "isthing". + """ + panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32) + + # sort instance outputs by scores + sorted_inds = torch.argsort(-instance_results.scores) + + current_segment_id = 0 + segments_info = [] + + instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device) + + # Add instances one-by-one, check for overlaps with existing ones + for inst_id in sorted_inds: + score = instance_results.scores[inst_id].item() + if score < instances_confidence_threshold: + break + mask = instance_masks[inst_id] # H,W + mask_area = mask.sum().item() + + if mask_area == 0: + continue + + intersect = (mask > 0) & (panoptic_seg > 0) + intersect_area = intersect.sum().item() + + if intersect_area * 1.0 / mask_area > overlap_threshold: + continue + + if intersect_area > 0: + mask = mask & (panoptic_seg == 0) + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + segments_info.append( + { + "id": current_segment_id, + "isthing": True, + "score": score, + "category_id": instance_results.pred_classes[inst_id].item(), + "instance_id": inst_id.item(), + } + ) + + # Add semantic results to remaining empty areas + semantic_labels = torch.unique(semantic_results).cpu().tolist() + for semantic_label in semantic_labels: + if semantic_label == 0: # 0 is a special "thing" class + continue + mask = (semantic_results == semantic_label) & (panoptic_seg == 0) + mask_area = mask.sum().item() + if mask_area < stuff_area_limit: + continue + + current_segment_id += 1 + panoptic_seg[mask] = current_segment_id + segments_info.append( + { + "id": current_segment_id, + "isthing": False, + "category_id": semantic_label, + "area": mask_area, + } + ) + + return panoptic_seg, segments_info diff --git a/detectron2/modeling/meta_arch/rcnn.py b/detectron2/modeling/meta_arch/rcnn.py new file mode 100644 index 0000000000..5fefe64654 --- /dev/null +++ b/detectron2/modeling/meta_arch/rcnn.py @@ -0,0 +1,204 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import torch +from torch import nn + +from detectron2.structures import ImageList +from detectron2.utils.logger import log_first_n + +from ..backbone import build_backbone +from ..postprocessing import detector_postprocess +from ..proposal_generator import build_proposal_generator +from ..roi_heads import build_roi_heads +from .build import META_ARCH_REGISTRY + +__all__ = ["GeneralizedRCNN", "ProposalNetwork"] + + +@META_ARCH_REGISTRY.register() +class GeneralizedRCNN(nn.Module): + """ + Generalized R-CNN. Any models that contains the following three components: + 1. Per-image feature extraction (aka backbone) + 2. Region proposal generation + 3. Per-region feature extraction and prediction + """ + + def __init__(self, cfg): + super().__init__() + + self.device = torch.device(cfg.MODEL.DEVICE) + self.backbone = build_backbone(cfg) + self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape()) + self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape()) + + assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD) + num_channels = len(cfg.MODEL.PIXEL_MEAN) + pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(num_channels, 1, 1) + pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(num_channels, 1, 1) + self.normalizer = lambda x: (x - pixel_mean) / pixel_std + self.to(self.device) + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances (optional): groundtruth :class:`Instances` + * proposals (optional): :class:`Instances`, precomputed proposals. + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: + Each dict is the output for one input image. + The dict contains one key "instances" whose value is a :class:`Instances`. + The :class:`Instances` object has the following keys: + "pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints" + """ + if not self.training: + return self.inference(batched_inputs) + + images = self.preprocess_image(batched_inputs) + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + + features = self.backbone(images.tensor) + + if self.proposal_generator: + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + proposal_losses = {} + + _, detector_losses = self.roi_heads(images, features, proposals, gt_instances) + + losses = {} + losses.update(detector_losses) + losses.update(proposal_losses) + return losses + + def inference(self, batched_inputs, detected_instances=None, do_postprocess=True): + """ + Run inference on the given inputs. + + Args: + batched_inputs (list[dict]): same as in :meth:`forward` + detected_instances (None or list[Instances]): if not None, it + contains an `Instances` object per image. The `Instances` + object contains "pred_boxes" and "pred_classes" which are + known boxes in the image. + The inference will then skip the detection of bounding boxes, + and only predict other per-ROI outputs. + do_postprocess (bool): whether to apply post-processing on the outputs. + + Returns: + same as in :meth:`forward`. + """ + assert not self.training + + images = self.preprocess_image(batched_inputs) + features = self.backbone(images.tensor) + + if detected_instances is None: + if self.proposal_generator: + proposals, _ = self.proposal_generator(images, features, None) + else: + assert "proposals" in batched_inputs[0] + proposals = [x["proposals"].to(self.device) for x in batched_inputs] + + results, _ = self.roi_heads(images, features, proposals, None) + else: + detected_instances = [x.to(self.device) for x in detected_instances] + results = self.roi_heads.forward_with_given_boxes(features, detected_instances) + + if do_postprocess: + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + else: + return results + + def preprocess_image(self, batched_inputs): + """ + Normalize, pad and batch the input images. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [self.normalizer(x) for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + return images + + +@META_ARCH_REGISTRY.register() +class ProposalNetwork(nn.Module): + def __init__(self, cfg): + super().__init__() + self.device = torch.device(cfg.MODEL.DEVICE) + + self.backbone = build_backbone(cfg) + self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape()) + + pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(-1, 1, 1) + pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(-1, 1, 1) + self.normalizer = lambda x: (x - pixel_mean) / pixel_std + self.to(self.device) + + def forward(self, batched_inputs): + """ + Args: + Same as in :class:`GeneralizedRCNN.forward` + + Returns: + list[dict]: Each dict is the output for one input image. + The dict contains one key "proposals" whose value is a + :class:`Instances` with keys "proposal_boxes" and "objectness_logits". + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [self.normalizer(x) for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + features = self.backbone(images.tensor) + + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) + # In training, the proposals are not useful at all but we generate them anyway. + # This makes RPN-only models about 5% slower. + if self.training: + return proposal_losses + + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + proposals, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"proposals": r}) + return processed_results diff --git a/detectron2/modeling/meta_arch/retinanet.py b/detectron2/modeling/meta_arch/retinanet.py new file mode 100644 index 0000000000..2e9a1590ea --- /dev/null +++ b/detectron2/modeling/meta_arch/retinanet.py @@ -0,0 +1,430 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import math +from typing import List +import torch +from fvcore.nn import sigmoid_focal_loss_jit, smooth_l1_loss +from torch import nn + +from detectron2.layers import ShapeSpec, batched_nms, cat +from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou +from detectron2.utils.logger import log_first_n + +from ..anchor_generator import build_anchor_generator +from ..backbone import build_backbone +from ..box_regression import Box2BoxTransform +from ..matcher import Matcher +from ..postprocessing import detector_postprocess +from .build import META_ARCH_REGISTRY + +__all__ = ["RetinaNet"] + + +def permute_to_N_HWA_K(tensor, K): + """ + Transpose/reshape a tensor from (N, (A x K), H, W) to (N, (HxWxA), K) + """ + assert tensor.dim() == 4, tensor.shape + N, _, H, W = tensor.shape + tensor = tensor.view(N, -1, K, H, W) + tensor = tensor.permute(0, 3, 4, 1, 2) + tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K) + return tensor + + +def permute_all_cls_and_box_to_N_HWA_K_and_concat(box_cls, box_delta, num_classes=80): + """ + Rearrange the tensor layout from the network output, i.e.: + list[Tensor]: #lvl tensors of shape (N, A x K, Hi, Wi) + to per-image predictions, i.e.: + Tensor: of shape (N x sum(Hi x Wi x A), K) + """ + # for each feature level, permute the outputs to make them be in the + # same format as the labels. Note that the labels are computed for + # all feature levels concatenated, so we keep the same representation + # for the objectness and the box_delta + box_cls_flattened = [permute_to_N_HWA_K(x, num_classes) for x in box_cls] + box_delta_flattened = [permute_to_N_HWA_K(x, 4) for x in box_delta] + # concatenate on the first dimension (representing the feature levels), to + # take into account the way the labels were generated (with all feature maps + # being concatenated as well) + box_cls = cat(box_cls_flattened, dim=1).view(-1, num_classes) + box_delta = cat(box_delta_flattened, dim=1).view(-1, 4) + return box_cls, box_delta + + +@META_ARCH_REGISTRY.register() +class RetinaNet(nn.Module): + """ + Implement RetinaNet (https://arxiv.org/abs/1708.02002). + """ + + def __init__(self, cfg): + super().__init__() + + self.device = torch.device(cfg.MODEL.DEVICE) + + # fmt: off + self.num_classes = cfg.MODEL.RETINANET.NUM_CLASSES + self.in_features = cfg.MODEL.RETINANET.IN_FEATURES + # Loss parameters: + self.focal_loss_alpha = cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA + self.focal_loss_gamma = cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA + self.smooth_l1_loss_beta = cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA + # Inference parameters: + self.score_threshold = cfg.MODEL.RETINANET.SCORE_THRESH_TEST + self.topk_candidates = cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST + self.nms_threshold = cfg.MODEL.RETINANET.NMS_THRESH_TEST + self.max_detections_per_image = cfg.TEST.DETECTIONS_PER_IMAGE + # fmt: on + + self.backbone = build_backbone(cfg) + + backbone_shape = self.backbone.output_shape() + feature_shapes = [backbone_shape[f] for f in self.in_features] + self.head = RetinaNetHead(cfg, feature_shapes) + self.anchor_generator = build_anchor_generator(cfg, feature_shapes) + + # Matching and loss + self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) + self.matcher = Matcher( + cfg.MODEL.RETINANET.IOU_THRESHOLDS, + cfg.MODEL.RETINANET.IOU_LABELS, + allow_low_quality_matches=True, + ) + + pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1) + pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1) + self.normalizer = lambda x: (x - pixel_mean) / pixel_std + self.to(self.device) + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances: Instances + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + Returns: + dict[str: Tensor]: + mapping from a named loss to a tensor storing the loss. Used during training only. + """ + images = self.preprocess_image(batched_inputs) + if "instances" in batched_inputs[0]: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + elif "targets" in batched_inputs[0]: + log_first_n( + logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10 + ) + gt_instances = [x["targets"].to(self.device) for x in batched_inputs] + else: + gt_instances = None + + features = self.backbone(images.tensor) + features = [features[f] for f in self.in_features] + box_cls, box_delta = self.head(features) + anchors = self.anchor_generator(features) + + if self.training: + gt_classes, gt_anchors_reg_deltas = self.get_ground_truth(anchors, gt_instances) + return self.losses(gt_classes, gt_anchors_reg_deltas, box_cls, box_delta) + else: + results = self.inference(box_cls, box_delta, anchors, images) + processed_results = [] + for results_per_image, input_per_image, image_size in zip( + results, batched_inputs, images.image_sizes + ): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + + def losses(self, gt_classes, gt_anchors_deltas, pred_class_logits, pred_anchor_deltas): + """ + Args: + For `gt_classes` and `gt_anchors_deltas` parameters, see + :meth:`RetinaNet.get_ground_truth`. + Their shapes are (N, R) and (N, R, 4), respectively, where R is + the total number of anchors across levels, i.e. sum(Hi x Wi x A) + For `pred_class_logits` and `pred_anchor_deltas`, see + :meth:`RetinaNetHead.forward`. + + Returns: + dict[str: Tensor]: + mapping from a named loss to a scalar tensor + storing the loss. Used during training only. The dict keys are: + "loss_cls" and "loss_box_reg" + """ + pred_class_logits, pred_anchor_deltas = permute_all_cls_and_box_to_N_HWA_K_and_concat( + pred_class_logits, pred_anchor_deltas, self.num_classes + ) # Shapes: (N x R, K) and (N x R, 4), respectively. + + gt_classes = gt_classes.flatten() + gt_anchors_deltas = gt_anchors_deltas.view(-1, 4) + + valid_idxs = gt_classes >= 0 + foreground_idxs = (gt_classes >= 0) & (gt_classes != self.num_classes) + num_foreground = foreground_idxs.sum() + + gt_classes_target = torch.zeros_like(pred_class_logits) + gt_classes_target[foreground_idxs, gt_classes[foreground_idxs]] = 1 + + # logits loss + loss_cls = sigmoid_focal_loss_jit( + pred_class_logits[valid_idxs], + gt_classes_target[valid_idxs], + alpha=self.focal_loss_alpha, + gamma=self.focal_loss_gamma, + reduction="sum", + ) / max(1, num_foreground) + + # regression loss + loss_box_reg = smooth_l1_loss( + pred_anchor_deltas[foreground_idxs], + gt_anchors_deltas[foreground_idxs], + beta=self.smooth_l1_loss_beta, + reduction="sum", + ) / max(1, num_foreground) + + return {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg} + + @torch.no_grad() + def get_ground_truth(self, anchors, targets): + """ + Args: + anchors (list[list[Boxes]]): a list of N=#image elements. Each is a + list of #feature level Boxes. The Boxes contains anchors of + this image on the specific feature level. + targets (list[Instances]): a list of N `Instances`s. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. Specify `targets` during training only. + + Returns: + gt_classes (Tensor): + An integer tensor of shape (N, R) storing ground-truth + labels for each anchor. + R is the total number of anchors, i.e. the sum of Hi x Wi x A for all levels. + Anchors with an IoU with some target higher than the foreground threshold + are assigned their corresponding label in the [0, K-1] range. + Anchors whose IoU are below the background threshold are assigned + the label "K". Anchors whose IoU are between the foreground and background + thresholds are assigned a label "-1", i.e. ignore. + gt_anchors_deltas (Tensor): + Shape (N, R, 4). + The last dimension represents ground-truth box2box transform + targets (dx, dy, dw, dh) that map each anchor to its matched ground-truth box. + The values in the tensor are meaningful only when the corresponding + anchor is labeled as foreground. + """ + gt_classes = [] + gt_anchors_deltas = [] + anchors = [Boxes.cat(anchors_i) for anchors_i in anchors] + # list[Tensor(R, 4)], one for each image + + for anchors_per_image, targets_per_image in zip(anchors, targets): + match_quality_matrix = pairwise_iou(targets_per_image.gt_boxes, anchors_per_image) + gt_matched_idxs, anchor_labels = self.matcher(match_quality_matrix) + + # ground truth box regression + matched_gt_boxes = targets_per_image[gt_matched_idxs].gt_boxes + gt_anchors_reg_deltas_i = self.box2box_transform.get_deltas( + anchors_per_image.tensor, matched_gt_boxes.tensor + ) + + # ground truth classes + has_gt = len(targets_per_image) > 0 + if has_gt: + gt_classes_i = targets_per_image.gt_classes[gt_matched_idxs] + # Anchors with label 0 are treated as background. + gt_classes_i[anchor_labels == 0] = self.num_classes + # Anchors with label -1 are ignored. + gt_classes_i[anchor_labels == -1] = -1 + else: + gt_classes_i = torch.zeros_like(gt_matched_idxs) + self.num_classes + + gt_classes.append(gt_classes_i) + gt_anchors_deltas.append(gt_anchors_reg_deltas_i) + + return torch.stack(gt_classes), torch.stack(gt_anchors_deltas) + + def inference(self, box_cls, box_delta, anchors, images): + """ + Arguments: + box_cls, box_delta: Same as the output of :meth:`RetinaNetHead.forward` + anchors (list[list[Boxes]]): a list of #images elements. Each is a + list of #feature level Boxes. The Boxes contain anchors of this + image on the specific feature level. + images (ImageList): the input images + + Returns: + results (List[Instances]): a list of #images elements. + """ + assert len(anchors) == len(images) + results = [] + + box_cls = [permute_to_N_HWA_K(x, self.num_classes) for x in box_cls] + box_delta = [permute_to_N_HWA_K(x, 4) for x in box_delta] + # list[Tensor], one per level, each has shape (N, Hi x Wi x A, K or 4) + + for img_idx, anchors_per_image in enumerate(anchors): + image_size = images.image_sizes[img_idx] + box_cls_per_image = [box_cls_per_level[img_idx] for box_cls_per_level in box_cls] + box_reg_per_image = [box_reg_per_level[img_idx] for box_reg_per_level in box_delta] + results_per_image = self.inference_single_image( + box_cls_per_image, box_reg_per_image, anchors_per_image, tuple(image_size) + ) + results.append(results_per_image) + return results + + def inference_single_image(self, box_cls, box_delta, anchors, image_size): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Arguments: + box_cls (list[Tensor]): list of #feature levels. Each entry contains + tensor of size (H x W x A, K) + box_delta (list[Tensor]): Same shape as 'box_cls' except that K becomes 4. + anchors (list[Boxes]): list of #feature levels. Each entry contains + a Boxes object, which contains all the anchors for that + image in that feature level. + image_size (tuple(H, W)): a tuple of the image height and width. + + Returns: + Same as `inference`, but for only one image. + """ + boxes_all = [] + scores_all = [] + class_idxs_all = [] + + # Iterate over every feature level + for box_cls_i, box_reg_i, anchors_i in zip(box_cls, box_delta, anchors): + # (HxWxAxK,) + box_cls_i = box_cls_i.flatten().sigmoid_() + + # Keep top k top scoring indices only. + num_topk = min(self.topk_candidates, box_reg_i.size(0)) + # torch.sort is actually faster than .topk (at least on GPUs) + predicted_prob, topk_idxs = box_cls_i.sort(descending=True) + predicted_prob = predicted_prob[:num_topk] + topk_idxs = topk_idxs[:num_topk] + + # filter out the proposals with low confidence score + keep_idxs = predicted_prob > self.score_threshold + predicted_prob = predicted_prob[keep_idxs] + topk_idxs = topk_idxs[keep_idxs] + + anchor_idxs = topk_idxs // self.num_classes + classes_idxs = topk_idxs % self.num_classes + + box_reg_i = box_reg_i[anchor_idxs] + anchors_i = anchors_i[anchor_idxs] + # predict boxes + predicted_boxes = self.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor) + + boxes_all.append(predicted_boxes) + scores_all.append(predicted_prob) + class_idxs_all.append(classes_idxs) + + boxes_all, scores_all, class_idxs_all = [ + cat(x) for x in [boxes_all, scores_all, class_idxs_all] + ] + keep = batched_nms(boxes_all, scores_all, class_idxs_all, self.nms_threshold) + keep = keep[: self.max_detections_per_image] + + result = Instances(image_size) + result.pred_boxes = Boxes(boxes_all[keep]) + result.scores = scores_all[keep] + result.pred_classes = class_idxs_all[keep] + return result + + def preprocess_image(self, batched_inputs): + """ + Normalize, pad and batch the input images. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [self.normalizer(x) for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + return images + + +class RetinaNetHead(nn.Module): + """ + The head used in RetinaNet for object classification and box regression. + It has two subnets for the two tasks, with a common structure but separate parameters. + """ + + def __init__(self, cfg, input_shape: List[ShapeSpec]): + super().__init__() + # fmt: off + in_channels = input_shape[0].channels + num_classes = cfg.MODEL.RETINANET.NUM_CLASSES + num_convs = cfg.MODEL.RETINANET.NUM_CONVS + prior_prob = cfg.MODEL.RETINANET.PRIOR_PROB + num_anchors = build_anchor_generator(cfg, input_shape).num_cell_anchors + # fmt: on + assert ( + len(set(num_anchors)) == 1 + ), "Using different number of anchors between levels is not currently supported!" + num_anchors = num_anchors[0] + + cls_subnet = [] + bbox_subnet = [] + for _ in range(num_convs): + cls_subnet.append( + nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + ) + cls_subnet.append(nn.ReLU()) + bbox_subnet.append( + nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + ) + bbox_subnet.append(nn.ReLU()) + + self.cls_subnet = nn.Sequential(*cls_subnet) + self.bbox_subnet = nn.Sequential(*bbox_subnet) + self.cls_score = nn.Conv2d( + in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1 + ) + self.bbox_pred = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1) + + # Initialization + for modules in [self.cls_subnet, self.bbox_subnet, self.cls_score, self.bbox_pred]: + for layer in modules.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0, std=0.01) + torch.nn.init.constant_(layer.bias, 0) + + # Use prior in model initialization to improve stability + bias_value = -math.log((1 - prior_prob) / prior_prob) + torch.nn.init.constant_(self.cls_score.bias, bias_value) + + def forward(self, features): + """ + Arguments: + features (list[Tensor]): FPN feature map tensors in high to low resolution. + Each tensor in the list correspond to different feature levels. + + Returns: + logits (list[Tensor]): #lvl tensors, each has shape (N, AxK, Hi, Wi). + The tensor predicts the classification probability + at each spatial position for each of the A anchors and K object + classes. + bbox_reg (list[Tensor]): #lvl tensors, each has shape (N, Ax4, Hi, Wi). + The tensor predicts 4-vector (dx,dy,dw,dh) box + regression values for every anchor. These values are the + relative offset between the anchor and the ground truth box. + """ + logits = [] + bbox_reg = [] + for feature in features: + logits.append(self.cls_score(self.cls_subnet(feature))) + bbox_reg.append(self.bbox_pred(self.bbox_subnet(feature))) + return logits, bbox_reg diff --git a/detectron2/modeling/meta_arch/semantic_seg.py b/detectron2/modeling/meta_arch/semantic_seg.py new file mode 100644 index 0000000000..722a18f550 --- /dev/null +++ b/detectron2/modeling/meta_arch/semantic_seg.py @@ -0,0 +1,170 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +from typing import Dict +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ShapeSpec +from detectron2.structures import ImageList +from detectron2.utils.registry import Registry + +from ..backbone import build_backbone +from ..postprocessing import sem_seg_postprocess +from .build import META_ARCH_REGISTRY + +__all__ = ["SemanticSegmentor", "SEM_SEG_HEADS_REGISTRY", "SemSegFPNHead", "build_sem_seg_head"] + + +SEM_SEG_HEADS_REGISTRY = Registry("SEM_SEG_HEADS") +""" +Registry for semantic segmentation heads, which make semantic segmentation predictions +from feature maps. +""" + + +@META_ARCH_REGISTRY.register() +class SemanticSegmentor(nn.Module): + """ + Main class for semantic segmentation architectures. + """ + + def __init__(self, cfg): + super().__init__() + + self.device = torch.device(cfg.MODEL.DEVICE) + + self.backbone = build_backbone(cfg) + self.sem_seg_head = build_sem_seg_head(cfg, self.backbone.output_shape()) + + pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(-1, 1, 1) + pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(-1, 1, 1) + self.normalizer = lambda x: (x - pixel_mean) / pixel_std + + self.to(self.device) + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + + For now, each item in the list is a dict that contains: + image: Tensor, image in (C, H, W) format. + sem_seg: semantic segmentation ground truth + Other information that's included in the original dicts, such as: + "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + + Returns: + list[dict]: Each dict is the output for one input image. + The dict contains one key "sem_seg" whose value is a + Tensor of the output resolution that represents the + per-pixel segmentation prediction. + """ + images = [x["image"].to(self.device) for x in batched_inputs] + images = [self.normalizer(x) for x in images] + images = ImageList.from_tensors(images, self.backbone.size_divisibility) + + features = self.backbone(images.tensor) + + if "sem_seg" in batched_inputs[0]: + targets = [x["sem_seg"].to(self.device) for x in batched_inputs] + targets = ImageList.from_tensors( + targets, self.backbone.size_divisibility, self.sem_seg_head.ignore_value + ).tensor + else: + targets = None + results, losses = self.sem_seg_head(features, targets) + + if self.training: + return losses + + processed_results = [] + for result, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): + height = input_per_image.get("height") + width = input_per_image.get("width") + r = sem_seg_postprocess(result, image_size, height, width) + processed_results.append({"sem_seg": r}) + return processed_results + + +def build_sem_seg_head(cfg, input_shape): + """ + Build a semantic segmentation head from `cfg.MODEL.SEM_SEG_HEAD.NAME`. + """ + name = cfg.MODEL.SEM_SEG_HEAD.NAME + return SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) + + +@SEM_SEG_HEADS_REGISTRY.register() +class SemSegFPNHead(nn.Module): + """ + A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper + (https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from + all levels of the FPN into single output. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + + # fmt: off + self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES + feature_strides = {k: v.stride for k, v in input_shape.items()} + feature_channels = {k: v.channels for k, v in input_shape.items()} + self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE + num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES + conv_dims = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM + self.common_stride = cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE + norm = cfg.MODEL.SEM_SEG_HEAD.NORM + self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT + # fmt: on + + self.scale_heads = [] + for in_feature in self.in_features: + head_ops = [] + head_length = max( + 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride)) + ) + for k in range(head_length): + norm_module = nn.GroupNorm(32, conv_dims) if norm == "GN" else None + conv = Conv2d( + feature_channels[in_feature] if k == 0 else conv_dims, + conv_dims, + kernel_size=3, + stride=1, + padding=1, + bias=not norm, + norm=norm_module, + activation=F.relu, + ) + weight_init.c2_msra_fill(conv) + head_ops.append(conv) + if feature_strides[in_feature] != self.common_stride: + head_ops.append( + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False) + ) + self.scale_heads.append(nn.Sequential(*head_ops)) + self.add_module(in_feature, self.scale_heads[-1]) + self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0) + weight_init.c2_msra_fill(self.predictor) + + def forward(self, features, targets=None): + for i, f in enumerate(self.in_features): + if i == 0: + x = self.scale_heads[i](features[f]) + else: + x = x + self.scale_heads[i](features[f]) + x = self.predictor(x) + x = F.interpolate(x, scale_factor=self.common_stride, mode="bilinear", align_corners=False) + + if self.training: + losses = {} + losses["loss_sem_seg"] = ( + F.cross_entropy(x, targets, reduction="mean", ignore_index=self.ignore_value) + * self.loss_weight + ) + return [], losses + else: + return x, {} diff --git a/detectron2/modeling/poolers.py b/detectron2/modeling/poolers.py new file mode 100644 index 0000000000..7978511e20 --- /dev/null +++ b/detectron2/modeling/poolers.py @@ -0,0 +1,194 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import math +import sys +import torch +from torch import nn +from torchvision.ops import RoIPool + +from detectron2.layers import ROIAlign, ROIAlignRotated, cat + + +def assign_boxes_to_levels(box_lists, min_level, max_level, canonical_box_size, canonical_level): + """ + Map each box in `box_lists` to a feature map level index and return the assignment + vector. + + Args: + box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, + where N is the number of images in the batch. + min_level (int): Smallest feature map level index. The input is considered index 0, + the output of stage 1 is index 1, and so. + max_level (int): Largest feature map level index. + canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). + canonical_level (int): The feature map level index on which a canonically-sized box + should be placed. + + Returns: + A tensor of length M, where M is the total number of boxes aggregated over all + N batch images. The memory layout corresponds to the concatenation of boxes + from all images. Each element is the feature map index, as an offset from + `self.min_level`, for the corresponding box (so value i means the box is at + `self.min_level + i`). + """ + eps = sys.float_info.epsilon + box_sizes = torch.sqrt(cat([boxes.area() for boxes in box_lists])) + # Eqn.(1) in FPN paper + level_assignments = torch.floor( + canonical_level + torch.log2(box_sizes / canonical_box_size + eps) + ) + level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) + return level_assignments.to(torch.int64) - min_level + + +def convert_boxes_to_pooler_format(box_lists): + """ + Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops + (see description under Returns). + + Args: + box_lists (list[Boxes] | list[RotatedBoxes]): + A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. + + Returns: + When input is list[Boxes]: + A tensor of shape (M, 5), where M is the total number of boxes aggregated over all + N batch images. + The 5 columns are (batch index, x0, y0, x1, y1), where batch index + is the index in [0, N) indentifying which batch image the box with corners at + (x0, y0, x1, y1) comes from. + When input is list[RotatedBoxes]: + A tensor of shape (M, 6), where M is the total number of boxes aggregated over all + N batch images. + The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees), + where batch index is the index in [0, N) indentifying which batch image the + rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from. + """ + + def fmt_box_list(box_tensor, batch_index): + repeated_index = torch.full( + (len(box_tensor), 1), batch_index, dtype=box_tensor.dtype, device=box_tensor.device + ) + return cat((repeated_index, box_tensor), dim=1) + + pooler_fmt_boxes = cat( + [fmt_box_list(box_list.tensor, i) for i, box_list in enumerate(box_lists)], dim=0 + ) + + return pooler_fmt_boxes + + +class ROIPooler(nn.Module): + """ + Region of interest feature map pooler that supports pooling from one or more + feature maps. + """ + + def __init__( + self, + output_size, + scales, + sampling_ratio, + pooler_type, + canonical_box_size=224, + canonical_level=4, + ): + """ + Args: + output_size (int, tuple[int] or list[int]): output size of the pooled region, + e.g., 14 x 14. If tuple or list is given, the length must be 2. + scales (list[float]): The scale for each low-level pooling op relative to + the input image. For a feature map with stride s relative to the input + image, scale is defined as a 1 / s. + sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. + pooler_type (string): Name of the type of pooling operation that should be applied. + For instance, "ROIPool" or "ROIAlignV2". + canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default + is heuristically defined as 224 pixels in the FPN paper (based on ImageNet + pre-training). + canonical_level (int): The feature map level index on which a canonically-sized box + should be placed. The default is defined as level 4 in the FPN paper. + """ + super(ROIPooler, self).__init__() + + if isinstance(output_size, int): + output_size = (output_size, output_size) + assert len(output_size) == 2 + assert isinstance(output_size[0], int) and isinstance(output_size[1], int) + self.output_size = output_size + + if pooler_type == "ROIAlign": + self.level_poolers = nn.ModuleList( + ROIAlign( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False + ) + for scale in scales + ) + elif pooler_type == "ROIAlignV2": + self.level_poolers = nn.ModuleList( + ROIAlign( + output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True + ) + for scale in scales + ) + elif pooler_type == "ROIPool": + self.level_poolers = nn.ModuleList( + RoIPool(output_size, spatial_scale=scale) for scale in scales + ) + elif pooler_type == "ROIAlignRotated": + self.level_poolers = nn.ModuleList( + ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) + for scale in scales + ) + else: + raise ValueError("Unknown pooler type: {}".format(pooler_type)) + + # Map scale (defined as 1 / stride) to its feature map level under the + # assumption that stride is a power of 2. + min_level = -math.log2(scales[0]) + max_level = -math.log2(scales[-1]) + assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)) + self.min_level = int(min_level) + self.max_level = int(max_level) + assert 0 < self.min_level and self.min_level <= self.max_level + assert self.min_level <= canonical_level and canonical_level <= self.max_level + self.canonical_level = canonical_level + assert canonical_box_size > 0 + self.canonical_box_size = canonical_box_size + + def forward(self, x, box_lists): + """ + Args: + x (list[Tensor]): A list of feature maps with scales matching thosed used to + construct this module. + box_lists (list[Boxes] | list[RotatedBoxes]): + A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. + + Returns: + A tensor of shape (M, C, output_size, output_size) where M is the total number of + boxes aggregated over all N batch images and C is the number of channels in `x`. + """ + num_level_assignments = len(self.level_poolers) + pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists) + + if num_level_assignments == 1: + return self.level_poolers[0](x[0], pooler_fmt_boxes) + + level_assignments = assign_boxes_to_levels( + box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level + ) + + num_boxes = len(pooler_fmt_boxes) + num_channels = x[0].shape[1] + output_size = self.output_size[0] + + dtype, device = x[0].dtype, x[0].device + output = torch.zeros( + (num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device + ) + + for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)): + inds = torch.nonzero(level_assignments == level).squeeze(1) + pooler_fmt_boxes_level = pooler_fmt_boxes[inds] + output[inds] = pooler(x_level, pooler_fmt_boxes_level) + + return output diff --git a/detectron2/modeling/postprocessing.py b/detectron2/modeling/postprocessing.py new file mode 100644 index 0000000000..78477bf63c --- /dev/null +++ b/detectron2/modeling/postprocessing.py @@ -0,0 +1,79 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from torch.nn import functional as F + +from detectron2.layers import paste_masks_in_image +from detectron2.structures import Instances + + +def detector_postprocess(results, output_height, output_width, mask_threshold=0.5): + """ + Resize the output instances. + The input images are often resized when entering an object detector. + As a result, we often need the outputs of the detector in a different + resolution from its inputs. + + This function will resize the raw outputs of an R-CNN detector + to produce outputs according to the desired output resolution. + + Args: + results (Instances): the raw outputs from the detector. + `results.image_size` contains the input image resolution the detector sees. + This object might be modified in-place. + output_height, output_width: the desired output resolution. + + Returns: + Instances: the resized output from the model, based on the output resolution + """ + scale_x, scale_y = (output_width / results.image_size[1], output_height / results.image_size[0]) + results = Instances((output_height, output_width), **results.get_fields()) + + if results.has("pred_boxes"): + output_boxes = results.pred_boxes + elif results.has("proposal_boxes"): + output_boxes = results.proposal_boxes + + output_boxes.tensor[:, 0::2] *= scale_x + output_boxes.tensor[:, 1::2] *= scale_y + output_boxes.clip(results.image_size) + + results = results[output_boxes.nonempty()] + + if results.has("pred_masks"): + results.pred_masks = paste_masks_in_image( + results.pred_masks[:, 0, :, :], # N, 1, M, M + results.pred_boxes, + results.image_size, + threshold=mask_threshold, + ) + + if results.has("pred_keypoints"): + results.pred_keypoints[:, :, 0] *= scale_x + results.pred_keypoints[:, :, 1] *= scale_y + + return results + + +def sem_seg_postprocess(result, img_size, output_height, output_width): + """ + Return semantic segmentation predictions in the original resolution. + + The input images are often resized when entering semantic segmentor. Moreover, in same + cases, they also padded inside segmentor to be divisible by maximum network stride. + As a result, we often need the predictions of the segmentor in a different + resolution from its inputs. + + Args: + result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W), + where C is the number of classes, and H, W are the height and width of the prediction. + img_size (tuple): image size that segmentor is taking as input. + output_height, output_width: the desired output resolution. + + Returns: + semantic segmenation prediction (Tensor): A tensor of the shape + (C, output_height, output_width) that contains per-pixel soft predictions. + """ + result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) + result = F.interpolate( + result, size=(output_height, output_width), mode="bilinear", align_corners=False + )[0] + return result diff --git a/detectron2/modeling/proposal_generator/__init__.py b/detectron2/modeling/proposal_generator/__init__.py new file mode 100644 index 0000000000..9eeeb3cb15 --- /dev/null +++ b/detectron2/modeling/proposal_generator/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator +from .rpn import RPN_HEAD_REGISTRY, build_rpn_head diff --git a/detectron2/modeling/proposal_generator/build.py b/detectron2/modeling/proposal_generator/build.py new file mode 100644 index 0000000000..6d24206c4f --- /dev/null +++ b/detectron2/modeling/proposal_generator/build.py @@ -0,0 +1,21 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.utils.registry import Registry + +PROPOSAL_GENERATOR_REGISTRY = Registry("PROPOSAL_GENERATOR") +""" +Registry for proposal generator, which produces object proposals from feature maps. +""" + +from . import rpn, rrpn # noqa F401 isort:skip + + +def build_proposal_generator(cfg, input_shape): + """ + Build a proposal generator from `cfg.MODEL.PROPOSAL_GENERATOR.NAME`. + The name can be "PrecomputedProposals" to use no proposal generator. + """ + name = cfg.MODEL.PROPOSAL_GENERATOR.NAME + if name == "PrecomputedProposals": + return None + + return PROPOSAL_GENERATOR_REGISTRY.get(name)(cfg, input_shape) diff --git a/detectron2/modeling/proposal_generator/proposal_utils.py b/detectron2/modeling/proposal_generator/proposal_utils.py new file mode 100644 index 0000000000..89e585f1e8 --- /dev/null +++ b/detectron2/modeling/proposal_generator/proposal_utils.py @@ -0,0 +1,57 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +import torch + +from detectron2.structures import Instances + + +def add_ground_truth_to_proposals(gt_boxes, proposals): + """ + Call `add_ground_truth_to_proposals_single_image` for all images. + + Args: + gt_boxes(list[Boxes]): list of N elements. Element i is a Boxes + representing the gound-truth for image i. + proposals (list[Instances]): list of N elements. Element i is a Instances + representing the proposals for image i. + + Returns: + list[Instances]: list of N Instances. Each is the proposals for the image, + with field "proposal_boxes" and "objectness_logits". + """ + assert gt_boxes is not None + + assert len(proposals) == len(gt_boxes) + if len(proposals) == 0: + return proposals + + return [ + add_ground_truth_to_proposals_single_image(gt_boxes_i, proposals_i) + for gt_boxes_i, proposals_i in zip(gt_boxes, proposals) + ] + + +def add_ground_truth_to_proposals_single_image(gt_boxes, proposals): + """ + Augment `proposals` with ground-truth boxes from `gt_boxes`. + + Args: + Same as `add_ground_truth_to_proposals`, but with gt_boxes and proposals + per image. + + Returns: + Same as `add_ground_truth_to_proposals`, but for only one image. + """ + device = proposals.objectness_logits.device + # Concating gt_boxes with proposals requires them to have the same fields + # Assign all ground-truth boxes an objectness logit corresponding to P(object) \approx 1. + gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10))) + + gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device) + gt_proposal = Instances(proposals.image_size) + + gt_proposal.proposal_boxes = gt_boxes + gt_proposal.objectness_logits = gt_logits + new_proposals = Instances.cat([proposals, gt_proposal]) + + return new_proposals diff --git a/detectron2/modeling/proposal_generator/rpn.py b/detectron2/modeling/proposal_generator/rpn.py new file mode 100644 index 0000000000..bd8df0f2da --- /dev/null +++ b/detectron2/modeling/proposal_generator/rpn.py @@ -0,0 +1,188 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import Dict, List +import torch +import torch.nn.functional as F +from torch import nn + +from detectron2.layers import ShapeSpec +from detectron2.utils.registry import Registry + +from ..anchor_generator import build_anchor_generator +from ..box_regression import Box2BoxTransform +from ..matcher import Matcher +from .build import PROPOSAL_GENERATOR_REGISTRY +from .rpn_outputs import RPNOutputs, find_top_rpn_proposals + +RPN_HEAD_REGISTRY = Registry("RPN_HEAD") +""" +Registry for RPN heads, which take feature maps and perform +objectness classification and bounding box regression for anchors. +""" + + +def build_rpn_head(cfg, input_shape): + """ + Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`. + """ + name = cfg.MODEL.RPN.HEAD_NAME + return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape) + + +@RPN_HEAD_REGISTRY.register() +class StandardRPNHead(nn.Module): + """ + RPN classification and regression heads. Uses a 3x3 conv to produce a shared + hidden state from which one 1x1 conv predicts objectness logits for each anchor + and a second 1x1 conv predicts bounding-box deltas specifying how to deform + each anchor into an object proposal. + """ + + def __init__(self, cfg, input_shape: List[ShapeSpec]): + super().__init__() + + # Standard RPN is shared across levels: + in_channels = [s.channels for s in input_shape] + assert len(set(in_channels)) == 1, "Each level must have the same channel!" + in_channels = in_channels[0] + + # RPNHead should take the same input as anchor generator + # NOTE: it assumes that creating an anchor generator does not have unwanted side effect. + anchor_generator = build_anchor_generator(cfg, input_shape) + num_cell_anchors = anchor_generator.num_cell_anchors + box_dim = anchor_generator.box_dim + assert ( + len(set(num_cell_anchors)) == 1 + ), "Each level must have the same number of cell anchors" + num_cell_anchors = num_cell_anchors[0] + + # 3x3 conv for the hidden representation + self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) + # 1x1 conv for predicting objectness logits + self.objectness_logits = nn.Conv2d(in_channels, num_cell_anchors, kernel_size=1, stride=1) + # 1x1 conv for predicting box2box transform deltas + self.anchor_deltas = nn.Conv2d( + in_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1 + ) + + for l in [self.conv, self.objectness_logits, self.anchor_deltas]: + nn.init.normal_(l.weight, std=0.01) + nn.init.constant_(l.bias, 0) + + def forward(self, features): + """ + Args: + features (list[Tensor]): list of feature maps + """ + pred_objectness_logits = [] + pred_anchor_deltas = [] + for x in features: + t = F.relu(self.conv(x)) + pred_objectness_logits.append(self.objectness_logits(t)) + pred_anchor_deltas.append(self.anchor_deltas(t)) + return pred_objectness_logits, pred_anchor_deltas + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class RPN(nn.Module): + """ + Region Proposal Network, introduced by the Faster R-CNN paper. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__() + + # fmt: off + self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE + self.in_features = cfg.MODEL.RPN.IN_FEATURES + self.nms_thresh = cfg.MODEL.RPN.NMS_THRESH + self.batch_size_per_image = cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE + self.positive_fraction = cfg.MODEL.RPN.POSITIVE_FRACTION + self.smooth_l1_beta = cfg.MODEL.RPN.SMOOTH_L1_BETA + self.loss_weight = cfg.MODEL.RPN.LOSS_WEIGHT + # fmt: on + + # Map from self.training state to train/test settings + self.pre_nms_topk = { + True: cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN, + False: cfg.MODEL.RPN.PRE_NMS_TOPK_TEST, + } + self.post_nms_topk = { + True: cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN, + False: cfg.MODEL.RPN.POST_NMS_TOPK_TEST, + } + self.boundary_threshold = cfg.MODEL.RPN.BOUNDARY_THRESH + + self.anchor_generator = build_anchor_generator( + cfg, [input_shape[f] for f in self.in_features] + ) + self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) + self.anchor_matcher = Matcher( + cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True + ) + self.rpn_head = build_rpn_head(cfg, [input_shape[f] for f in self.in_features]) + + def forward(self, images, features, gt_instances=None): + """ + Args: + images (ImageList): input images of length `N` + features (dict[str: Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + gt_instances (list[Instances], optional): a length `N` list of `Instances`s. + Each `Instances` stores ground-truth instances for the corresponding image. + + Returns: + proposals: list[Instances] or None + loss: dict[Tensor] + """ + gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None + del gt_instances + features = [features[f] for f in self.in_features] + pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) + anchors = self.anchor_generator(features) + # TODO: The anchors only depend on the feature map shape; there's probably + # an opportunity for some optimizations (e.g., caching anchors). + outputs = RPNOutputs( + self.box2box_transform, + self.anchor_matcher, + self.batch_size_per_image, + self.positive_fraction, + images, + pred_objectness_logits, + pred_anchor_deltas, + anchors, + self.boundary_threshold, + gt_boxes, + self.smooth_l1_beta, + ) + + if self.training: + losses = {k: v * self.loss_weight for k, v in outputs.losses().items()} + else: + losses = {} + + with torch.no_grad(): + # Find the top proposals by applying NMS and removing boxes that + # are too small. The proposals are treated as fixed for approximate + # joint training with roi heads. This approach ignores the derivative + # w.r.t. the proposal boxes’ coordinates that are also network + # responses, so is approximate. + proposals = find_top_rpn_proposals( + outputs.predict_proposals(), + outputs.predict_objectness_logits(), + images, + self.nms_thresh, + self.pre_nms_topk[self.training], + self.post_nms_topk[self.training], + self.min_box_side_len, + self.training, + ) + # For RPN-only models, the proposals are the final output and we return them in + # high-to-low confidence order. + # For end-to-end models, the RPN proposals are an intermediate state + # and this sorting is actually not needed. But the cost is negligible. + inds = [p.objectness_logits.sort(descending=True)[1] for p in proposals] + proposals = [p[ind] for p, ind in zip(proposals, inds)] + + return proposals, losses diff --git a/detectron2/modeling/proposal_generator/rpn_outputs.py b/detectron2/modeling/proposal_generator/rpn_outputs.py new file mode 100644 index 0000000000..2d128177ab --- /dev/null +++ b/detectron2/modeling/proposal_generator/rpn_outputs.py @@ -0,0 +1,436 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +import logging +import numpy as np +import torch +import torch.nn.functional as F +from fvcore.nn import smooth_l1_loss + +from detectron2.layers import batched_nms, cat +from detectron2.structures import Boxes, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage + +from ..sampling import subsample_labels + +logger = logging.getLogger(__name__) + +# TODO: comments for future refactoring of this module +# +# From @rbg: +# This code involves a significant amount of tensor reshaping and permuting. Look for +# ways to simplify this. + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + L: number of feature maps per image on which RPN is run + A: number of cell anchors (must be the same for all feature maps) + Hi, Wi: height and width of the i-th feature map + 4: size of the box parameterization + +Naming convention: + + objectness: refers to the binary classification of an anchor as object vs. not + object. + + deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransform`). + + pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use + sigmoid(pred_objectness_logits) to estimate P(object). + + gt_objectness_logits: ground-truth binary classification labels for objectness + + pred_anchor_deltas: predicted box2box transform deltas + + gt_anchor_deltas: ground-truth box2box transform deltas +""" + + +def find_top_rpn_proposals( + proposals, + pred_objectness_logits, + images, + nms_thresh, + pre_nms_topk, + post_nms_topk, + min_box_side_len, + training, +): + """ + For each feature map, select the `pre_nms_topk` highest scoring proposals, + apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` + highest scoring proposals among all the feature maps if `training` is True, + otherwise, returns the highest `post_nms_topk` scoring proposals for each + feature map. + + Args: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4). + All proposal predictions on the feature maps. + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). + images (ImageList): Input images as an :class:`ImageList`. + nms_thresh (float): IoU threshold to use for NMS + pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. + When RPN is run on multiple feature maps (as in FPN) this number is per + feature map. + post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. + When RPN is run on multiple feature maps (as in FPN) this number is total, + over all feature maps. + min_box_side_len (float): minimum proposal box side length in pixels (absolute units + wrt input images). + training (bool): True if proposals are to be used in training, otherwise False. + This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." + comment. + + Returns: + proposals (list[Instances]): list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i. + """ + image_sizes = images.image_sizes # in (h, w) order + num_images = len(image_sizes) + device = proposals[0].device + + # 1. Select top-k anchor for every level and every image + topk_scores = [] # #lvl Tensor, each of shape N x topk + topk_proposals = [] + level_ids = [] # #lvl Tensor, each of shape (topk,) + batch_idx = torch.arange(num_images, device=device) + for level_id, proposals_i, logits_i in zip( + itertools.count(), proposals, pred_objectness_logits + ): + Hi_Wi_A = logits_i.shape[1] + num_proposals_i = min(pre_nms_topk, Hi_Wi_A) + + # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) + # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) + logits_i, idx = logits_i.sort(descending=True, dim=1) + topk_scores_i = logits_i[batch_idx, :num_proposals_i] + topk_idx = idx[batch_idx, :num_proposals_i] + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 + + topk_proposals.append(topk_proposals_i) + topk_scores.append(topk_scores_i) + level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) + + # 2. Concat all levels together + topk_scores = cat(topk_scores, dim=1) + topk_proposals = cat(topk_proposals, dim=1) + level_ids = cat(level_ids, dim=0) + + # 3. For each image, run a per-level NMS, and choose topk results. + results = [] + for n, image_size in enumerate(image_sizes): + boxes = Boxes(topk_proposals[n]) + scores_per_img = topk_scores[n] + boxes.clip(image_size) + + # filter empty boxes + keep = boxes.nonempty(threshold=min_box_side_len) + lvl = level_ids + if keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], level_ids[keep] + + keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh) + # In Detectron1, there was different behavior during training vs. testing. + # (https://github.com/facebookresearch/Detectron/issues/459) + # During training, topk is over the proposals from *all* images in the training batch. + # During testing, it is over the proposals for each image separately. + # As a result, the training behavior becomes batch-dependent, + # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. + # This bug is addressed in Detectron2 to make the behavior independent of batch size. + keep = keep[:post_nms_topk] + + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] + results.append(res) + return results + + +def rpn_losses( + gt_objectness_logits, + gt_anchor_deltas, + pred_objectness_logits, + pred_anchor_deltas, + smooth_l1_beta, +): + """ + Args: + gt_objectness_logits (Tensor): shape (N,), each element in {-1, 0, 1} representing + ground-truth objectness labels with: -1 = ignore; 0 = not object; 1 = object. + gt_anchor_deltas (Tensor): shape (N, box_dim), row i represents ground-truth + box2box transform targets (dx, dy, dw, dh) or (dx, dy, dw, dh, da) that map anchor i to + its matched ground-truth box. + pred_objectness_logits (Tensor): shape (N,), each element is a predicted objectness + logit. + pred_anchor_deltas (Tensor): shape (N, box_dim), each row is a predicted box2box + transform (dx, dy, dw, dh) or (dx, dy, dw, dh, da) + smooth_l1_beta (float): The transition point between L1 and L2 loss in + the smooth L1 loss function. When set to 0, the loss becomes L1. When + set to +inf, the loss becomes constant 0. + + Returns: + objectness_loss, localization_loss, both unnormalized (summed over samples). + """ + pos_masks = gt_objectness_logits == 1 + localization_loss = smooth_l1_loss( + pred_anchor_deltas[pos_masks], gt_anchor_deltas[pos_masks], smooth_l1_beta, reduction="sum" + ) + + valid_masks = gt_objectness_logits >= 0 + objectness_loss = F.binary_cross_entropy_with_logits( + pred_objectness_logits[valid_masks], + gt_objectness_logits[valid_masks].to(torch.float32), + reduction="sum", + ) + return objectness_loss, localization_loss + + +class RPNOutputs(object): + def __init__( + self, + box2box_transform, + anchor_matcher, + batch_size_per_image, + positive_fraction, + images, + pred_objectness_logits, + pred_anchor_deltas, + anchors, + boundary_threshold=0, + gt_boxes=None, + smooth_l1_beta=0.0, + ): + """ + Args: + box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for + anchor-proposal tranformations. + anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to + ground-truth boxes; used to determine training labels. + batch_size_per_image (int): number of proposals to sample when training + positive_fraction (float): target fraction of sampled proposals that should be positive + images (ImageList): :class:`ImageList` instance representing N input images + pred_objectness_logits (list[Tensor]): A list of L elements. + Element i is a tensor of shape (N, A, Hi, Wi) representing + the predicted objectness logits for anchors. + pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape + (N, A*4, Hi, Wi) representing the predicted "deltas" used to transform anchors + to proposals. + anchors (list[list[Boxes]]): A list of N elements. Each element is a list of L + Boxes. The Boxes at (n, l) stores the entire anchor array for feature map l in image + n (i.e. the cell anchors repeated over all locations in feature map (n, l)). + boundary_threshold (int): if >= 0, then anchors that extend beyond the image + boundary by more than boundary_thresh are not used in training. Set to a very large + number or < 0 to disable this behavior. Only needed in training. + gt_boxes (list[Boxes], optional): A list of N elements. Element i a Boxes storing + the ground-truth ("gt") boxes for image i. + smooth_l1_beta (float): The transition point between L1 and L2 loss in + the smooth L1 loss function. When set to 0, the loss becomes L1. When + set to +inf, the loss becomes constant 0. + """ + self.box2box_transform = box2box_transform + self.anchor_matcher = anchor_matcher + self.batch_size_per_image = batch_size_per_image + self.positive_fraction = positive_fraction + self.pred_objectness_logits = pred_objectness_logits + self.pred_anchor_deltas = pred_anchor_deltas + + self.anchors = anchors + self.gt_boxes = gt_boxes + self.num_feature_maps = len(pred_objectness_logits) + self.num_images = len(images) + self.image_sizes = images.image_sizes + self.boundary_threshold = boundary_threshold + self.smooth_l1_beta = smooth_l1_beta + + def _get_ground_truth(self): + """ + Returns: + gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the + total number of anchors in image i (i.e., len(anchors[i])). Label values are + in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class. + gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), 4). + """ + gt_objectness_logits = [] + gt_anchor_deltas = [] + # Concatenate anchors from all feature maps into a single Boxes per image + anchors = [Boxes.cat(anchors_i) for anchors_i in self.anchors] + for image_size_i, anchors_i, gt_boxes_i in zip(self.image_sizes, anchors, self.gt_boxes): + """ + image_size_i: (h, w) for the i-th image + anchors_i: anchors for i-th image + gt_boxes_i: ground-truth boxes for i-th image + """ + match_quality_matrix = pairwise_iou(gt_boxes_i, anchors_i) + matched_idxs, gt_objectness_logits_i = self.anchor_matcher(match_quality_matrix) + + if self.boundary_threshold >= 0: + # Discard anchors that go out of the boundaries of the image + # NOTE: This is legacy functionality that is turned off by default in Detectron2 + anchors_inside_image = anchors_i.inside_box(image_size_i, self.boundary_threshold) + gt_objectness_logits_i[~anchors_inside_image] = -1 + + if len(gt_boxes_i) == 0: + # These values won't be used anyway since the anchor is labeled as background + gt_anchor_deltas_i = torch.zeros_like(anchors_i.tensor) + else: + # TODO wasted computation for ignored boxes + matched_gt_boxes = gt_boxes_i[matched_idxs] + gt_anchor_deltas_i = self.box2box_transform.get_deltas( + anchors_i.tensor, matched_gt_boxes.tensor + ) + + gt_objectness_logits.append(gt_objectness_logits_i) + gt_anchor_deltas.append(gt_anchor_deltas_i) + + return gt_objectness_logits, gt_anchor_deltas + + def losses(self): + """ + Return the losses from a set of RPN predictions and their associated ground-truth. + + Returns: + dict[loss name -> loss value]: A dict mapping from loss name to loss value. + Loss names are: `loss_rpn_cls` for objectness classification and + `loss_rpn_loc` for proposal localization. + """ + + def resample(label): + """ + Randomly sample a subset of positive and negative examples by overwritting + the label vector to the ignore value (-1) for all elements that are not + included in the sample. + """ + pos_idx, neg_idx = subsample_labels( + label, self.batch_size_per_image, self.positive_fraction, 0 + ) + # Fill with the ignore label (-1), then set positive and negative labels + label.fill_(-1) + label.scatter_(0, pos_idx, 1) + label.scatter_(0, neg_idx, 0) + return label + + gt_objectness_logits, gt_anchor_deltas = self._get_ground_truth() + """ + gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the + total number of anchors in image i (i.e., len(anchors[i])) + gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), B), + where B is the box dimension + """ + # Collect all objectness labels and delta targets over feature maps and images + # The final ordering is L, N, H, W, A from slowest to fastest axis. + num_anchors_per_map = [np.prod(x.shape[1:]) for x in self.pred_objectness_logits] + num_anchors_per_image = sum(num_anchors_per_map) + + # Stack to: (N, num_anchors_per_image) + gt_objectness_logits = torch.stack( + [resample(label) for label in gt_objectness_logits], dim=0 + ) + + # Log the number of positive/negative anchors per-image that's used in training + num_pos_anchors = (gt_objectness_logits == 1).sum().item() + num_neg_anchors = (gt_objectness_logits == 0).sum().item() + storage = get_event_storage() + storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / self.num_images) + storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / self.num_images) + + assert gt_objectness_logits.shape[1] == num_anchors_per_image + # Split to tuple of L tensors, each with shape (N, num_anchors_per_map) + gt_objectness_logits = torch.split(gt_objectness_logits, num_anchors_per_map, dim=1) + # Concat from all feature maps + gt_objectness_logits = cat([x.flatten() for x in gt_objectness_logits], dim=0) + + # Stack to: (N, num_anchors_per_image, B) + gt_anchor_deltas = torch.stack(gt_anchor_deltas, dim=0) + assert gt_anchor_deltas.shape[1] == num_anchors_per_image + B = gt_anchor_deltas.shape[2] # box dimension (4 or 5) + + # Split to tuple of L tensors, each with shape (N, num_anchors_per_image) + gt_anchor_deltas = torch.split(gt_anchor_deltas, num_anchors_per_map, dim=1) + # Concat from all feature maps + gt_anchor_deltas = cat([x.reshape(-1, B) for x in gt_anchor_deltas], dim=0) + + # Collect all objectness logits and delta predictions over feature maps + # and images to arrive at the same shape as the labels and targets + # The final ordering is L, N, H, W, A from slowest to fastest axis. + pred_objectness_logits = cat( + [ + # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N*Hi*Wi*A, ) + x.permute(0, 2, 3, 1).flatten() + for x in self.pred_objectness_logits + ], + dim=0, + ) + pred_anchor_deltas = cat( + [ + # Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) + # -> (N*Hi*Wi*A, B) + x.view(x.shape[0], -1, B, x.shape[-2], x.shape[-1]) + .permute(0, 3, 4, 1, 2) + .reshape(-1, B) + for x in self.pred_anchor_deltas + ], + dim=0, + ) + + objectness_loss, localization_loss = rpn_losses( + gt_objectness_logits, + gt_anchor_deltas, + pred_objectness_logits, + pred_anchor_deltas, + self.smooth_l1_beta, + ) + normalizer = 1.0 / (self.batch_size_per_image * self.num_images) + loss_cls = objectness_loss * normalizer # cls: classification loss + loss_loc = localization_loss * normalizer # loc: localization loss + losses = {"loss_rpn_cls": loss_cls, "loss_rpn_loc": loss_loc} + + return losses + + def predict_proposals(self): + """ + Transform anchors into proposals by applying the predicted anchor deltas. + + Returns: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape + (N, Hi*Wi*A, B), where B is box dimension (4 or 5). + """ + proposals = [] + # Transpose anchors from images-by-feature-maps (N, L) to feature-maps-by-images (L, N) + anchors = list(zip(*self.anchors)) + # For each feature map + for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas): + B = anchors_i[0].tensor.size(1) + N, _, Hi, Wi = pred_anchor_deltas_i.shape + # Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N*Hi*Wi*A, B) + pred_anchor_deltas_i = ( + pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B) + ) + # Concatenate all anchors to shape (N*Hi*Wi*A, B) + # type(anchors_i[0]) is Boxes (B = 4) or RotatedBoxes (B = 5) + anchors_i = type(anchors_i[0]).cat(anchors_i) + proposals_i = self.box2box_transform.apply_deltas( + pred_anchor_deltas_i, anchors_i.tensor + ) + # Append feature map proposals with shape (N, Hi*Wi*A, B) + proposals.append(proposals_i.view(N, -1, B)) + return proposals + + def predict_objectness_logits(self): + """ + Return objectness logits in the same format as the proposals returned by + :meth:`predict_proposals`. + + Returns: + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape + (N, Hi*Wi*A). + """ + pred_objectness_logits = [ + # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) + score.permute(0, 2, 3, 1).reshape(self.num_images, -1) + for score in self.pred_objectness_logits + ] + return pred_objectness_logits diff --git a/detectron2/modeling/proposal_generator/rrpn.py b/detectron2/modeling/proposal_generator/rrpn.py new file mode 100644 index 0000000000..effac74119 --- /dev/null +++ b/detectron2/modeling/proposal_generator/rrpn.py @@ -0,0 +1,87 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +from typing import Dict +import torch + +from detectron2.layers import ShapeSpec + +from ..box_regression import Box2BoxTransformRotated +from .build import PROPOSAL_GENERATOR_REGISTRY +from .rpn import RPN +from .rrpn_outputs import RRPNOutputs, find_top_rrpn_proposals + +logger = logging.getLogger(__name__) + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class RRPN(RPN): + """ + Rotated RPN subnetwork. + Please refer to https://arxiv.org/pdf/1703.01086.pdf for the original RRPN paper: + Ma, J., Shao, W., Ye, H., Wang, L., Wang, H., Zheng, Y., & Xue, X. (2018). + Arbitrary-oriented scene text detection via rotation proposals. + IEEE Transactions on Multimedia, 20(11), 3111-3122. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super().__init__(cfg, input_shape) + self.box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS) + + def forward(self, images, features, gt_instances=None): + """ + Args: + images (ImageList): input images of length `N` + features (dict[str: Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + gt_instances (list[Instances], optional): a length `N` list of `Instances`s. + Each `Instances` stores ground-truth instances for the corresponding image. + + Returns: + proposals: list[Instances] or None + loss: dict[Tensor] + """ + gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None + del gt_instances + features = [features[f] for f in self.in_features] + pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) + anchors = self.anchor_generator(features) + + outputs = RRPNOutputs( + self.box2box_transform, + self.anchor_matcher, + self.batch_size_per_image, + self.positive_fraction, + images, + pred_objectness_logits, + pred_anchor_deltas, + anchors, + self.boundary_threshold, + gt_boxes, + self.smooth_l1_beta, + ) + + if self.training: + losses = outputs.losses() + else: + losses = {} + + with torch.no_grad(): + # Find the top proposals by applying NMS and removing boxes that + # are too small. The proposals are treated as fixed for approximate + # joint training with roi heads. This approach ignores the derivative + # w.r.t. the proposal boxes’ coordinates that are also network + # responses, so is approximate. + proposals = find_top_rrpn_proposals( + outputs.predict_proposals(), + outputs.predict_objectness_logits(), + images, + self.nms_thresh, + self.pre_nms_topk[self.training], + self.post_nms_topk[self.training], + self.min_box_side_len, + self.training, + ) + + return proposals, losses diff --git a/detectron2/modeling/proposal_generator/rrpn_outputs.py b/detectron2/modeling/proposal_generator/rrpn_outputs.py new file mode 100644 index 0000000000..4bbbd2b90f --- /dev/null +++ b/detectron2/modeling/proposal_generator/rrpn_outputs.py @@ -0,0 +1,240 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +import logging +import torch + +from detectron2.layers import batched_nms_rotated, cat +from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated + +from .rpn_outputs import RPNOutputs + +logger = logging.getLogger(__name__) + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + L: number of feature maps per image on which RRPN is run + A: number of cell anchors (must be the same for all feature maps) + Hi, Wi: height and width of the i-th feature map + 5: size of the box parameterization + +Naming convention: + + objectness: refers to the binary classification of an anchor as object vs. not + object. + + deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the rotated box2box + transform (see :class:`box_regression.Box2BoxTransformRotated`). + + pred_objectness_logits: predicted objectness scores in [-inf, +inf]; use + sigmoid(pred_objectness_logits) to estimate P(object). + + gt_objectness_logits: ground-truth binary classification labels for objectness + + pred_anchor_deltas: predicted rotated box2box transform deltas + + gt_anchor_deltas: ground-truth rotated box2box transform deltas +""" + + +def find_top_rrpn_proposals( + proposals, + pred_objectness_logits, + images, + nms_thresh, + pre_nms_topk, + post_nms_topk, + min_box_side_len, + training, +): + """ + For each feature map, select the `pre_nms_topk` highest scoring proposals, + apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk` + highest scoring proposals among all the feature maps if `training` is True, + otherwise, returns the highest `post_nms_topk` scoring proposals for each + feature map. + + Args: + proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5). + All proposal predictions on the feature maps. + pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A). + images (ImageList): Input images as an :class:`ImageList`. + nms_thresh (float): IoU threshold to use for NMS + pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS. + When RRPN is run on multiple feature maps (as in FPN) this number is per + feature map. + post_nms_topk (int): number of top k scoring proposals to keep after applying NMS. + When RRPN is run on multiple feature maps (as in FPN) this number is total, + over all feature maps. + min_box_side_len (float): minimum proposal box side length in pixels (absolute units + wrt input images). + training (bool): True if proposals are to be used in training, otherwise False. + This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..." + comment. + + Returns: + proposals (list[Instances]): list of N Instances. The i-th Instances + stores post_nms_topk object proposals for image i. + """ + image_sizes = images.image_sizes # in (h, w) order + num_images = len(image_sizes) + device = proposals[0].device + + # 1. Select top-k anchor for every level and every image + topk_scores = [] # #lvl Tensor, each of shape N x topk + topk_proposals = [] + level_ids = [] # #lvl Tensor, each of shape (topk,) + batch_idx = torch.arange(num_images, device=device) + for level_id, proposals_i, logits_i in zip( + itertools.count(), proposals, pred_objectness_logits + ): + Hi_Wi_A = logits_i.shape[1] + num_proposals_i = min(pre_nms_topk, Hi_Wi_A) + + # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) + # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) + logits_i, idx = logits_i.sort(descending=True, dim=1) + topk_scores_i = logits_i[batch_idx, :num_proposals_i] + topk_idx = idx[batch_idx, :num_proposals_i] + + # each is N x topk + topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5 + + topk_proposals.append(topk_proposals_i) + topk_scores.append(topk_scores_i) + level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) + + # 2. Concat all levels together + topk_scores = cat(topk_scores, dim=1) + topk_proposals = cat(topk_proposals, dim=1) + level_ids = cat(level_ids, dim=0) + + # 3. For each image, run a per-level NMS, and choose topk results. + results = [] + for n, image_size in enumerate(image_sizes): + boxes = RotatedBoxes(topk_proposals[n]) + scores_per_img = topk_scores[n] + boxes.clip(image_size) + + # filter empty boxes + keep = boxes.nonempty(threshold=min_box_side_len) + lvl = level_ids + if keep.sum().item() != len(boxes): + boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], level_ids[keep]) + + keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh) + # In Detectron1, there was different behavior during training vs. testing. + # (https://github.com/facebookresearch/Detectron/issues/459) + # During training, topk is over the proposals from *all* images in the training batch. + # During testing, it is over the proposals for each image separately. + # As a result, the training behavior becomes batch-dependent, + # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size. + # This bug is addressed in Detectron2 to make the behavior independent of batch size. + keep = keep[:post_nms_topk] + + res = Instances(image_size) + res.proposal_boxes = boxes[keep] + res.objectness_logits = scores_per_img[keep] + results.append(res) + return results + + +class RRPNOutputs(RPNOutputs): + def __init__( + self, + box2box_transform, + anchor_matcher, + batch_size_per_image, + positive_fraction, + images, + pred_objectness_logits, + pred_anchor_deltas, + anchors, + boundary_threshold=0, + gt_boxes=None, + smooth_l1_beta=0.0, + ): + """ + Args: + box2box_transform (Box2BoxTransformRotated): :class:`Box2BoxTransformRotated` + instance for anchor-proposal tranformations. + anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to + ground-truth boxes; used to determine training labels. + batch_size_per_image (int): number of proposals to sample when training + positive_fraction (float): target fraction of sampled proposals that should be positive + images (ImageList): :class:`ImageList` instance representing N input images + pred_objectness_logits (list[Tensor]): A list of L elements. + Element i is a tensor of shape (N, A, Hi, Wi) representing + the predicted objectness logits for anchors. + pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape + (N, A*5, Hi, Wi) representing the predicted "deltas" used to transform anchors + to proposals. + anchors (list[list[RotatedBoxes]]): A list of N elements. Each element is a list of L + RotatedBoxes. The RotatedBoxes at (n, l) stores the entire anchor array for + feature map l in image n (i.e. the cell anchors repeated over all locations in + feature map (n, l)). + boundary_threshold (int): if >= 0, then anchors that extend beyond the image + boundary by more than boundary_thresh are not used in training. Set to a very large + number or < 0 to disable this behavior. Only needed in training. + gt_boxes (list[RotatedBoxes], optional): A list of N elements. Element i a RotatedBoxes + storing the ground-truth ("gt") rotated boxes for image i. + smooth_l1_beta (float): The transition point between L1 and L2 loss in + the smooth L1 loss function. When set to 0, the loss becomes L1. When + set to +inf, the loss becomes constant 0. + """ + super(RRPNOutputs, self).__init__( + box2box_transform, + anchor_matcher, + batch_size_per_image, + positive_fraction, + images, + pred_objectness_logits, + pred_anchor_deltas, + anchors, + boundary_threshold, + gt_boxes, + smooth_l1_beta, + ) + + def _get_ground_truth(self): + """ + Returns: + gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the + total number of anchors in image i (i.e., len(anchors[i])). Label values are + in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class. + gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), 5). + """ + gt_objectness_logits = [] + gt_anchor_deltas = [] + # Concatenate anchors from all feature maps into a single RotatedBoxes per image + anchors = [RotatedBoxes.cat(anchors_i) for anchors_i in self.anchors] + for image_size_i, anchors_i, gt_boxes_i in zip(self.image_sizes, anchors, self.gt_boxes): + """ + image_size_i: (h, w) for the i-th image + anchors_i: anchors for i-th image + gt_boxes_i: ground-truth boxes for i-th image + """ + match_quality_matrix = pairwise_iou_rotated(gt_boxes_i, anchors_i) + matched_idxs, gt_objectness_logits_i = self.anchor_matcher(match_quality_matrix) + + if self.boundary_threshold >= 0: + # Discard anchors that go out of the boundaries of the image + # NOTE: This is legacy functionality that is turned off by default in Detectron2 + anchors_inside_image = anchors_i.inside_box(image_size_i, self.boundary_threshold) + gt_objectness_logits_i[~anchors_inside_image] = -1 + + if len(gt_boxes_i) == 0: + # These values won't be used anyway since the anchor is labeled as background + gt_anchor_deltas_i = torch.zeros_like(anchors_i.tensor) + else: + # TODO wasted computation for ignored boxes + matched_gt_boxes = gt_boxes_i[matched_idxs] + gt_anchor_deltas_i = self.box2box_transform.get_deltas( + anchors_i.tensor, matched_gt_boxes.tensor + ) + + gt_objectness_logits.append(gt_objectness_logits_i) + gt_anchor_deltas.append(gt_anchor_deltas_i) + + return gt_objectness_logits, gt_anchor_deltas diff --git a/detectron2/modeling/roi_heads/__init__.py b/detectron2/modeling/roi_heads/__init__.py new file mode 100644 index 0000000000..390bd17f0e --- /dev/null +++ b/detectron2/modeling/roi_heads/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .box_head import ROI_BOX_HEAD_REGISTRY, build_box_head +from .keypoint_head import ROI_KEYPOINT_HEAD_REGISTRY, build_keypoint_head +from .mask_head import ROI_MASK_HEAD_REGISTRY, build_mask_head +from .roi_heads import ROI_HEADS_REGISTRY, ROIHeads, StandardROIHeads, build_roi_heads + +from . import cascade_rcnn # isort:skip diff --git a/detectron2/modeling/roi_heads/box_head.py b/detectron2/modeling/roi_heads/box_head.py new file mode 100644 index 0000000000..d2fc0323ba --- /dev/null +++ b/detectron2/modeling/roi_heads/box_head.py @@ -0,0 +1,91 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ShapeSpec, get_norm +from detectron2.utils.registry import Registry + +ROI_BOX_HEAD_REGISTRY = Registry("ROI_BOX_HEAD") +""" +Registry for box heads, which make box predictions from per-region features. +""" + + +@ROI_BOX_HEAD_REGISTRY.register() +class FastRCNNConvFCHead(nn.Module): + """ + A head with several 3x3 conv layers (each followed by norm & relu) and + several fc layers (each followed by relu). + """ + + def __init__(self, cfg, input_shape: ShapeSpec): + """ + The following attributes are parsed from config: + num_conv, num_fc: the number of conv/fc layers + conv_dim/fc_dim: the dimension of the conv/fc layers + norm: normalization for the conv layers + """ + super().__init__() + + # fmt: off + num_conv = cfg.MODEL.ROI_BOX_HEAD.NUM_CONV + conv_dim = cfg.MODEL.ROI_BOX_HEAD.CONV_DIM + num_fc = cfg.MODEL.ROI_BOX_HEAD.NUM_FC + fc_dim = cfg.MODEL.ROI_BOX_HEAD.FC_DIM + norm = cfg.MODEL.ROI_BOX_HEAD.NORM + # fmt: on + assert num_conv + num_fc > 0 + + self._output_size = (input_shape.channels, input_shape.height, input_shape.width) + + self.conv_norm_relus = [] + for k in range(num_conv): + conv = Conv2d( + self._output_size[0], + conv_dim, + kernel_size=3, + padding=1, + bias=not norm, + norm=get_norm(norm, conv_dim), + activation=F.relu, + ) + self.add_module("conv{}".format(k + 1), conv) + self.conv_norm_relus.append(conv) + self._output_size = (conv_dim, self._output_size[1], self._output_size[2]) + + self.fcs = [] + for k in range(num_fc): + fc = nn.Linear(np.prod(self._output_size), fc_dim) + self.add_module("fc{}".format(k + 1), fc) + self.fcs.append(fc) + self._output_size = fc_dim + + for layer in self.conv_norm_relus: + weight_init.c2_msra_fill(layer) + for layer in self.fcs: + weight_init.c2_xavier_fill(layer) + + def forward(self, x): + for layer in self.conv_norm_relus: + x = layer(x) + if len(self.fcs): + if x.dim() > 2: + x = torch.flatten(x, start_dim=1) + for layer in self.fcs: + x = F.relu(layer(x)) + return x + + @property + def output_size(self): + return self._output_size + + +def build_box_head(cfg, input_shape): + """ + Build a box head defined by `cfg.MODEL.ROI_BOX_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_BOX_HEAD.NAME + return ROI_BOX_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/detectron2/modeling/roi_heads/cascade_rcnn.py b/detectron2/modeling/roi_heads/cascade_rcnn.py new file mode 100644 index 0000000000..2b3259c72b --- /dev/null +++ b/detectron2/modeling/roi_heads/cascade_rcnn.py @@ -0,0 +1,243 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch +from torch import nn +from torch.autograd.function import Function + +from detectron2.layers import ShapeSpec +from detectron2.structures import Boxes, Instances, pairwise_iou +from detectron2.utils.events import get_event_storage + +from ..box_regression import Box2BoxTransform +from ..matcher import Matcher +from ..poolers import ROIPooler +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs, fast_rcnn_inference +from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads + + +class _ScaleGradient(Function): + @staticmethod + def forward(ctx, input, scale): + ctx.scale = scale + return input + + @staticmethod + def backward(ctx, grad_output): + return grad_output * ctx.scale, None + + +@ROI_HEADS_REGISTRY.register() +class CascadeROIHeads(StandardROIHeads): + def _init_box_head(self, cfg): + # fmt: off + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS + cascade_ious = cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS + self.num_cascade_stages = len(cascade_ious) + assert len(cascade_bbox_reg_weights) == self.num_cascade_stages + assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG, \ + "CascadeROIHeads only support class-agnositc regression now!" + assert cascade_ious[0] == cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS[0] + # fmt: on + + in_channels = [self.feature_channels[f] for f in self.in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + self.box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + pooled_shape = ShapeSpec( + channels=in_channels, width=pooler_resolution, height=pooler_resolution + ) + + self.box_head = nn.ModuleList() + self.box_predictor = nn.ModuleList() + self.box2box_transform = [] + self.proposal_matchers = [] + for k in range(self.num_cascade_stages): + box_head = build_box_head(cfg, pooled_shape) + self.box_head.append(box_head) + self.box_predictor.append( + FastRCNNOutputLayers( + box_head.output_size, self.num_classes, cls_agnostic_bbox_reg=True + ) + ) + self.box2box_transform.append(Box2BoxTransform(weights=cascade_bbox_reg_weights[k])) + + if k == 0: + # The first matching is done by the matcher of ROIHeads (self.proposal_matcher). + self.proposal_matchers.append(None) + else: + self.proposal_matchers.append( + Matcher([cascade_ious[k]], [0, 1], allow_low_quality_matches=False) + ) + + def forward(self, images, features, proposals, targets=None): + del images + if self.training: + proposals = self.label_and_sample_proposals(proposals, targets) + + features_list = [features[f] for f in self.in_features] + + if self.training: + # Need targets to box head + losses = self._forward_box(features_list, proposals, targets) + losses.update(self._forward_mask(features_list, proposals)) + losses.update(self._forward_keypoint(features_list, proposals)) + return proposals, losses + else: + pred_instances = self._forward_box(features_list, proposals) + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def _forward_box(self, features, proposals, targets=None): + head_outputs = [] + image_sizes = [x.image_size for x in proposals] + for k in range(self.num_cascade_stages): + if k > 0: + # The output boxes of the previous stage are the input proposals of the next stage + proposals = self._create_proposals_from_boxes( + head_outputs[-1].predict_boxes(), image_sizes + ) + if self.training: + proposals = self._match_and_label_boxes(proposals, k, targets) + head_outputs.append(self._run_stage(features, proposals, k)) + + if self.training: + losses = {} + storage = get_event_storage() + for stage, output in enumerate(head_outputs): + with storage.name_scope("stage{}".format(stage)): + stage_losses = output.losses() + losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()}) + return losses + else: + # Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1) + scores_per_stage = [h.predict_probs() for h in head_outputs] + + # Average the scores across heads + scores = [ + sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages) + for scores_per_image in zip(*scores_per_stage) + ] + # Use the boxes of the last head + boxes = head_outputs[-1].predict_boxes() + pred_instances, _ = fast_rcnn_inference( + boxes, + scores, + image_sizes, + self.test_score_thresh, + self.test_nms_thresh, + self.test_detections_per_img, + ) + return pred_instances + + @torch.no_grad() + def _match_and_label_boxes(self, proposals, stage, targets): + """ + Match proposals with groundtruth using the matcher at the given stage. + Label the proposals as foreground or background based on the match. + + Args: + proposals (list[Instances]): One Instances for each image, with + the field "proposal_boxes". + stage (int): the current stage + targets (list[Instances]): the ground truth instances + + Returns: + list[Instances]: the same proposals, but with fields "gt_classes" and "gt_boxes" + """ + num_fg_samples, num_bg_samples = [], [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + match_quality_matrix = pairwise_iou( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + # proposal_labels are 0 or 1 + matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix) + if len(targets_per_image) > 0: + gt_classes = targets_per_image.gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[proposal_labels == 0] = self.num_classes + gt_boxes = targets_per_image.gt_boxes[matched_idxs] + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + gt_boxes = Boxes( + targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4)) + ) + proposals_per_image.gt_classes = gt_classes + proposals_per_image.gt_boxes = gt_boxes + + num_fg_samples.append((proposal_labels == 1).sum().item()) + num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1]) + + # Log the number of fg/bg samples in each stage + storage = get_event_storage() + storage.put_scalar( + "stage{}/roi_head/num_fg_samples".format(stage), + sum(num_fg_samples) / len(num_fg_samples), + ) + storage.put_scalar( + "stage{}/roi_head/num_bg_samples".format(stage), + sum(num_bg_samples) / len(num_bg_samples), + ) + return proposals + + def _run_stage(self, features, proposals, stage): + """ + Args: + features (list[Tensor]): #lvl input features to ROIHeads + proposals (list[Instances]): #image Instances, with the field "proposal_boxes" + stage (int): the current stage + + Returns: + FastRCNNOutputs: the output of this stage + """ + box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) + # The original implementation averages the losses among heads, + # but scale up the parameter gradients of the heads. + # This is equivalent to adding the losses among heads, + # but scale down the gradients on features. + box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages) + box_features = self.box_head[stage](box_features) + pred_class_logits, pred_proposal_deltas = self.box_predictor[stage](box_features) + del box_features + + outputs = FastRCNNOutputs( + self.box2box_transform[stage], + pred_class_logits, + pred_proposal_deltas, + proposals, + self.smooth_l1_beta, + ) + return outputs + + def _create_proposals_from_boxes(self, boxes, image_sizes): + """ + Args: + boxes (list[Tensor]): per-image predicted boxes, each of shape Ri x 4 + image_sizes (list[tuple]): list of image shapes in (h, w) + + Returns: + list[Instances]: per-image proposals with the given boxes. + """ + # Just like RPN, the proposals should not have gradients + boxes = [Boxes(b.detach()) for b in boxes] + proposals = [] + for boxes_per_image, image_size in zip(boxes, image_sizes): + boxes_per_image.clip(image_size) + if self.training: + # do not filter empty boxes at inference time, + # because the scores from each stage need to be aligned and added later + boxes_per_image = boxes_per_image[boxes_per_image.nonempty()] + prop = Instances(image_size) + prop.proposal_boxes = boxes_per_image + proposals.append(prop) + return proposals diff --git a/detectron2/modeling/roi_heads/fast_rcnn.py b/detectron2/modeling/roi_heads/fast_rcnn.py new file mode 100644 index 0000000000..67dc4fba65 --- /dev/null +++ b/detectron2/modeling/roi_heads/fast_rcnn.py @@ -0,0 +1,371 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +import torch +from fvcore.nn import smooth_l1_loss +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import batched_nms, cat +from detectron2.structures import Boxes, Instances +from detectron2.utils.events import get_event_storage + +logger = logging.getLogger(__name__) + +""" +Shape shorthand in this module: + + N: number of images in the minibatch + R: number of ROIs, combined over all images, in the minibatch + Ri: number of ROIs in image i + K: number of foreground classes. E.g.,there are 80 foreground classes in COCO. + +Naming convention: + + deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box + transform (see :class:`box_regression.Box2BoxTransform`). + + pred_class_logits: predicted class scores in [-inf, +inf]; use + softmax(pred_class_logits) to estimate P(class). + + gt_classes: ground-truth classification labels in [0, K], where [0, K) represent + foreground object classes and K represents the background class. + + pred_proposal_deltas: predicted box2box transform deltas for transforming proposals + to detection box predictions. + + gt_proposal_deltas: ground-truth box2box transform deltas +""" + + +def fast_rcnn_losses( + gt_classes, gt_proposal_deltas, pred_class_logits, pred_proposal_deltas, smooth_l1_beta +): + """ + When box dimension is 4: + Computes the classification and box delta losses defined in the Fast R-CNN paper. + When box dimension is 5: + Computes the same losses for Fast R-CNN with rotated boxes. + + Args: + gt_classes (Tensor): A tensor of shape (R,) storing ground-truth classification + labels in [0, K], including K fg class and 1 bg class. + gt_proposal_deltas (Tensor): + Shape (R, box_dim), row i represents ground-truth box2box transform targets + (dx, dy, dw, dh) or (dx, dy, dw, dh, da) that map object instance i to + its matched ground-truth box. + pred_class_logits (Tensor): A tensor for shape (R, K + 1) storing predicted classification + logits for the K+1-way classification problem. Each row corresponds to a predicted + object instance. + pred_proposal_deltas (Tensor): shape depends on whether we are doing + cls-agnoistic or cls-specific regression, and the box dimensions. + When box_dim is 4: + 1. cls-specific: Shape (R, 4 * K), each row stores a list of class-specific + predicted box2box transform [dx_0, dy_0, dw_0, dh_0, ..., dx_k, dy_k, dw_k, dh_k, ...] + for each class k in [0, K). (No predictions for the background class.) + 2. cls-agnostic: Shape (R, 4), the second row stores the class-agnostic (foreground) + predicted box2box transform. + When box_dim is 5: + 1. cls-specific: Shape (R, 5 * K), each row stores a list of class-specific + predicted rotated box2box transform + [dx_0, dy_0, dw_0, dh_0, da_0, ..., dx_k, dy_k, dw_k, dh_k, da_k, ...] + for each class k in [0, K). (No predictions for the background class.) + 2. cls-agnostic: Shape (R, 5), the second row stores the class-agnostic (foreground) + predicted rotated box2box transform. + smooth_l1_beta (float): The transition point between L1 and L2 loss in + the smooth L1 loss function. When set to 0, the loss becomes L1. When + set to +inf, the loss becomes constant 0. + + Returns: + loss_cls, loss_box_reg (Tensor): Scalar loss values. + """ + box_dim = gt_proposal_deltas.size(1) + cls_agnostic_bbox_reg = pred_proposal_deltas.size(1) == box_dim + device = pred_class_logits.device + + loss_cls = F.cross_entropy(pred_class_logits, gt_classes, reduction="mean") + + bg_class_ind = pred_class_logits.shape[1] - 1 + + # Box delta loss is only computed between the prediction for the gt class k + # (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions + # for non-gt classes and background. + # Empty fg_inds produces a valid loss of zero as long as the size_average + # arg to smooth_l1_loss is False (otherwise it uses torch.mean internally + # and would produce a nan loss). + fg_inds = torch.nonzero((gt_classes >= 0) & (gt_classes < bg_class_ind)).squeeze(1) + if cls_agnostic_bbox_reg: + # pred_proposal_deltas only corresponds to foreground class for agnostic + gt_class_cols = torch.arange(box_dim, device=device) + else: + fg_gt_classes = gt_classes[fg_inds] + # pred_proposal_deltas for class k are located in columns [b * k : b * k + b], + # where b is the dimension of box representation (4 or 5) + # Note that compared to Detectron1, + # we do not perform bounding box regression for background classes. + gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(box_dim, device=device) + + loss_box_reg = smooth_l1_loss( + pred_proposal_deltas[fg_inds[:, None], gt_class_cols], + gt_proposal_deltas[fg_inds], + smooth_l1_beta, + reduction="sum", + ) + # The loss is normalized using the total number of regions (R), not the number + # of foreground regions even though the box regression loss is only defined on + # foreground regions. Why? Because doing so gives equal training influence to + # each foreground example. To see how, consider two different minibatches: + # (1) Contains a single foreground region + # (2) Contains 100 foreground regions + # If we normalize by the number of foreground regions, the single example in + # minibatch (1) will be given 100 times as much influence as each foreground + # example in minibatch (2). Normalizing by the total number of regions, R, + # means that the single example in minibatch (1) and each of the 100 examples + # in minibatch (2) are given equal influence. + loss_box_reg = loss_box_reg / gt_classes.numel() + + return loss_cls, loss_box_reg + + +def fast_rcnn_inference(boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image): + """ + Call `fast_rcnn_inference_single_image` for all images. + + Args: + boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic + boxes for each image. Element i has shape (Ri, K * 4) if doing + class-specific regression, or (Ri, 4) if doing class-agnostic + regression, where Ri is the number of predicted objects for image i. + This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`. + scores (list[Tensor]): A list of Tensors of predicted class scores for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`. + image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch. + score_thresh (float): Only return detections with a confidence score exceeding this + threshold. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + instances: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections. + kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates + the corresponding boxes/scores index in [0, Ri) from the input, for image i. + """ + result_per_image = [ + fast_rcnn_inference_single_image( + boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image + ) + for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes) + ] + return tuple(list(x) for x in zip(*result_per_image)) + + +def fast_rcnn_inference_single_image( + boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image +): + """ + Single-image inference. Return bounding-box detection results by thresholding + on scores and applying non-maximum suppression (NMS). + + Args: + Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes + per image. + + Returns: + Same as `fast_rcnn_inference`, but for only one image. + """ + scores = scores[:, :-1] + num_bbox_reg_classes = boxes.shape[1] // 4 + # Convert to Boxes to use the `clip` function ... + boxes = Boxes(boxes.reshape(-1, 4)) + boxes.clip(image_shape) + boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4 + + # Filter results based on detection scores + filter_mask = scores > score_thresh # R x K + # R' x 2. First column contains indices of the R predictions; + # Second column contains indices of classes. + filter_inds = filter_mask.nonzero() + if num_bbox_reg_classes == 1: + boxes = boxes[filter_inds[:, 0], 0] + else: + boxes = boxes[filter_mask] + scores = scores[filter_mask] + + # Apply per-class NMS + keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh) + keep = keep[:topk_per_image] + boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] + + result = Instances(image_shape) + result.pred_boxes = Boxes(boxes) + result.scores = scores + result.pred_classes = filter_inds[:, 1] + return result, filter_inds[:, 0] + + +class FastRCNNOutputs(object): + """ + A class that stores information about outputs of a Fast R-CNN head. + """ + + def __init__( + self, box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta + ): + """ + Args: + box2box_transform (Box2BoxTransform/Box2BoxTransformRotated): + box2box transform instance for proposal-to-detection tranformations. + pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class + logits for all R predicted object instances. + pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for + class-specific or class-agnostic storing the predicted deltas that + transform proposals into final box detections, where B is the box dimension (4 or 5) + proposals (list[Instances]): A list of N Instancess, where Instances i stores the + proposals for image i, in the field "proposal_boxes". + When training, each Instances must have ground-truth labels + stored in the field "gt_classes" and "gt_boxes". + smooth_l1_beta (float): The transition point between L1 and L2 loss in + the smooth L1 loss function. When set to 0, the loss becomes L1. When + set to +inf, the loss becomes constant 0. + """ + self.box2box_transform = box2box_transform + self.num_preds_per_image = [len(p) for p in proposals] + self.pred_class_logits = pred_class_logits + self.pred_proposal_deltas = pred_proposal_deltas + self.smooth_l1_beta = smooth_l1_beta + + box_type = type(proposals[0].proposal_boxes) + # cat(..., dim=0) concatenates over all images in the batch + self.proposals = box_type.cat([p.proposal_boxes for p in proposals]) + assert not self.proposals.tensor.requires_grad, "Proposals should not require gradients!" + self.image_shapes = [x.image_size for x in proposals] + + # The following fields should exist only when training. + if proposals[0].has("gt_boxes"): + self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals]) + assert proposals[0].has("gt_classes") + self.gt_classes = cat([p.gt_classes for p in proposals], dim=0) + + def _log_accuracy(self): + """ + Log the accuracy metrics to EventStorage. + """ + num_instances = self.gt_classes.numel() + pred_classes = self.pred_class_logits.argmax(dim=1) + bg_class_ind = self.pred_class_logits.shape[1] - 1 + + fg_inds = (self.gt_classes >= 0) & (self.gt_classes < bg_class_ind) + num_fg = fg_inds.nonzero().numel() + fg_gt_classes = self.gt_classes[fg_inds] + fg_pred_classes = pred_classes[fg_inds] + + num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel() + num_accurate = (pred_classes == self.gt_classes).nonzero().numel() + fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel() + + storage = get_event_storage() + storage.put_scalar("fast_rcnn/cls_accuracy", num_accurate / num_instances) + if num_fg > 0: + storage.put_scalar("fast_rcnn/fg_cls_accuracy", fg_num_accurate / num_fg) + storage.put_scalar("fast_rcnn/false_negative", num_false_negative / num_fg) + + def losses(self): + """ + Returns: + A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg". + """ + self._log_accuracy() + gt_proposal_deltas = self.box2box_transform.get_deltas( + self.proposals.tensor, self.gt_boxes.tensor + ) + loss_cls, loss_box_reg = fast_rcnn_losses( + self.gt_classes, + gt_proposal_deltas, + self.pred_class_logits, + self.pred_proposal_deltas, + self.smooth_l1_beta, + ) + return {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg} + + def predict_boxes(self): + """ + Returns: + list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes + for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is + the number of predicted objects for image i and B is the box dimension (4 or 5) + """ + boxes = self.box2box_transform.apply_deltas( + self.pred_proposal_deltas, self.proposals.tensor + ) + return boxes.split(self.num_preds_per_image, dim=0) + + def predict_probs(self): + """ + Returns: + list[Tensor]: A list of Tensors of predicted class probabilities for each image. + Element i has shape (Ri, K + 1), where Ri is the number of predicted objects + for image i. + """ + probs = F.softmax(self.pred_class_logits, dim=-1) + return probs.split(self.num_preds_per_image, dim=0) + + def inference(self, score_thresh, nms_thresh, topk_per_image): + """ + Args: + score_thresh (float): same as fast_rcnn_inference. + nms_thresh (float): same as fast_rcnn_inference. + topk_per_image (int): same as fast_rcnn_inference. + Returns: + list[Instances]: same as fast_rcnn_inference. + list[Tensor]: same as fast_rcnn_inference. + """ + boxes = self.predict_boxes() + scores = self.predict_probs() + image_shapes = self.image_shapes + + return fast_rcnn_inference( + boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image + ) + + +class FastRCNNOutputLayers(nn.Module): + """ + Two linear layers for predicting Fast R-CNN outputs: + (1) proposal-to-detection box regression deltas + (2) classification scores + """ + + def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4): + """ + Args: + input_size (int): channels, or (channels, height, width) + num_classes (int): number of foreground classes + cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression + box_dim (int): the dimension of bounding boxes. + Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes + """ + super(FastRCNNOutputLayers, self).__init__() + + if not isinstance(input_size, int): + input_size = np.prod(input_size) + + # The prediction layer for num_classes foreground classes and one background class + # (hence + 1) + self.cls_score = nn.Linear(input_size, num_classes + 1) + num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes + self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) + + nn.init.normal_(self.cls_score.weight, std=0.01) + nn.init.normal_(self.bbox_pred.weight, std=0.001) + for l in [self.cls_score, self.bbox_pred]: + nn.init.constant_(l.bias, 0) + + def forward(self, x): + if x.dim() > 2: + x = torch.flatten(x, start_dim=1) + scores = self.cls_score(x) + proposal_deltas = self.bbox_pred(x) + return scores, proposal_deltas diff --git a/detectron2/modeling/roi_heads/keypoint_head.py b/detectron2/modeling/roi_heads/keypoint_head.py new file mode 100644 index 0000000000..3242b381d0 --- /dev/null +++ b/detectron2/modeling/roi_heads/keypoint_head.py @@ -0,0 +1,166 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, interpolate +from detectron2.structures import heatmaps_to_keypoints +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +_TOTAL_SKIPPED = 0 + +ROI_KEYPOINT_HEAD_REGISTRY = Registry("ROI_KEYPOINT_HEAD") +""" +Registry for keypoint heads, which make keypoint predictions from per-region features. +""" + + +def build_keypoint_head(cfg, input_shape): + """ + Build a keypoint head from `cfg.MODEL.ROI_KEYPOINT_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_KEYPOINT_HEAD.NAME + return ROI_KEYPOINT_HEAD_REGISTRY.get(name)(cfg, input_shape) + + +def keypoint_rcnn_loss(pred_keypoint_logits, instances, normalizer): + """ + Arguments: + pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number + of instances in the batch, K is the number of keypoints, and S is the side length + of the keypoint heatmap. The values are spatial logits. + instances (list[Instances]): A list of M Instances, where M is the batch size. + These instances are predictions from the model + that are in 1:1 correspondence with pred_keypoint_logits. + Each Instances should contain a `gt_keypoints` field containing a `structures.Keypoint` + instance. + normalizer (float): Normalize the loss by this amount. + If not specified, we normalize by the number of visible keypoints in the minibatch. + + Returns a scalar tensor containing the loss. + """ + heatmaps = [] + valid = [] + + keypoint_side_len = pred_keypoint_logits.shape[2] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + keypoints = instances_per_image.gt_keypoints + heatmaps_per_image, valid_per_image = keypoints.to_heatmap( + instances_per_image.proposal_boxes.tensor, keypoint_side_len + ) + heatmaps.append(heatmaps_per_image.view(-1)) + valid.append(valid_per_image.view(-1)) + + if len(heatmaps): + keypoint_targets = cat(heatmaps, dim=0) + valid = cat(valid, dim=0).to(dtype=torch.uint8) + valid = torch.nonzero(valid).squeeze(1) + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it separately + if len(heatmaps) == 0 or valid.numel() == 0: + global _TOTAL_SKIPPED + _TOTAL_SKIPPED += 1 + storage = get_event_storage() + storage.put_scalar("kpts_num_skipped_batches", _TOTAL_SKIPPED, smoothing_hint=False) + return pred_keypoint_logits.sum() * 0 + + N, K, H, W = pred_keypoint_logits.shape + pred_keypoint_logits = pred_keypoint_logits.view(N * K, H * W) + + keypoint_loss = F.cross_entropy( + pred_keypoint_logits[valid], keypoint_targets[valid], reduction="sum" + ) + + # If a normalizer isn't specified, normalize by the number of visible keypoints in the minibatch + if normalizer is None: + normalizer = valid.numel() + keypoint_loss /= normalizer + + return keypoint_loss + + +def keypoint_rcnn_inference(pred_keypoint_logits, pred_instances): + """ + Post process each predicted keypoint heatmap in `pred_keypoint_logits` into (x, y, score, prob) + and add it to the `pred_instances` as a `pred_keypoints` field. + + Args: + pred_keypoint_logits (Tensor): A tensor of shape (N, K, S, S) where N is the total number + of instances in the batch, K is the number of keypoints, and S is the side length of + the keypoint heatmap. The values are spatial logits. + pred_instances (list[Instances]): A list of M Instances, where M is the batch size. + + Returns: + None. boxes will contain an extra "pred_keypoints" field. + The field is a tensor of shape (#instance, K, 3) where the last + dimension corresponds to (x, y, probability). + """ + # flatten all bboxes from all images together (list[Boxes] -> Nx4 tensor) + bboxes_flat = cat([b.pred_boxes.tensor for b in pred_instances], dim=0) + + keypoint_results = heatmaps_to_keypoints(pred_keypoint_logits.detach(), bboxes_flat.detach()) + num_instances_per_image = [len(i) for i in pred_instances] + keypoint_results = keypoint_results.split(num_instances_per_image, dim=0) + + for keypoint_results_per_image, instances_per_image in zip(keypoint_results, pred_instances): + # keypoint_results_per_image is (num instances)x(num keypoints)x(x, y, score, prob) + keypoint_xyp = keypoint_results_per_image[:, :, [0, 1, 3]] + instances_per_image.pred_keypoints = keypoint_xyp + + +@ROI_KEYPOINT_HEAD_REGISTRY.register() +class KRCNNConvDeconvUpsampleHead(nn.Module): + """ + A standard keypoint head containing a series of 3x3 convs, followed by + a transpose convolution and bilinear interpolation for upsampling. + """ + + def __init__(self, cfg, input_shape: ShapeSpec): + """ + The following attributes are parsed from config: + conv_dims: an iterable of output channel counts for each conv in the head + e.g. (512, 512, 512) for three convs outputting 512 channels. + num_keypoints: number of keypoint heatmaps to predicts, determines the number of + channels in the final output. + """ + super(KRCNNConvDeconvUpsampleHead, self).__init__() + + # fmt: off + # default up_scale to 2 (this can eventually be moved to config) + up_scale = 2 + conv_dims = cfg.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS + num_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS + in_channels = input_shape.channels + # fmt: on + + self.blocks = [] + for idx, layer_channels in enumerate(conv_dims, 1): + module = Conv2d(in_channels, layer_channels, 3, stride=1, padding=1) + self.add_module("conv_fcn{}".format(idx), module) + self.blocks.append(module) + in_channels = layer_channels + + deconv_kernel = 4 + self.score_lowres = ConvTranspose2d( + in_channels, num_keypoints, deconv_kernel, stride=2, padding=deconv_kernel // 2 - 1 + ) + self.up_scale = up_scale + + for name, param in self.named_parameters(): + if "bias" in name: + nn.init.constant_(param, 0) + elif "weight" in name: + # Caffe2 implementation uses MSRAFill, which in fact + # corresponds to kaiming_normal_ in PyTorch + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + + def forward(self, x): + for layer in self.blocks: + x = F.relu(layer(x)) + x = self.score_lowres(x) + x = interpolate(x, scale_factor=self.up_scale, mode="bilinear", align_corners=False) + return x diff --git a/detectron2/modeling/roi_heads/mask_head.py b/detectron2/modeling/roi_heads/mask_head.py new file mode 100644 index 0000000000..5fa41e04cd --- /dev/null +++ b/detectron2/modeling/roi_heads/mask_head.py @@ -0,0 +1,204 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import fvcore.nn.weight_init as weight_init +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ConvTranspose2d, ShapeSpec, cat, get_norm +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +ROI_MASK_HEAD_REGISTRY = Registry("ROI_MASK_HEAD") +""" +Registry for mask heads, which predicts instance masks given +per-region features. +""" + + +def mask_rcnn_loss(pred_mask_logits, instances): + """ + Compute the mask prediction loss defined in the Mask R-CNN paper. + + Args: + pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) + for class-specific or class-agnostic, where B is the total number of predicted masks + in all images, C is the number of foreground classes, and Hmask, Wmask are the height + and width of the mask predictions. The values are logits. + instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. These instances are in 1:1 + correspondence with the pred_mask_logits. The ground-truth labels (class, box, mask, + ...) associated with each instance are stored in fields. + + Returns: + mask_loss (Tensor): A scalar tensor containing the loss. + """ + cls_agnostic_mask = pred_mask_logits.size(1) == 1 + total_num_masks = pred_mask_logits.size(0) + mask_side_len = pred_mask_logits.size(2) + assert pred_mask_logits.size(2) == pred_mask_logits.size(3), "Mask prediction must be square!" + + gt_classes = [] + gt_masks = [] + for instances_per_image in instances: + if len(instances_per_image) == 0: + continue + if not cls_agnostic_mask: + gt_classes_per_image = instances_per_image.gt_classes.to(dtype=torch.int64) + gt_classes.append(gt_classes_per_image) + + gt_masks_per_image = instances_per_image.gt_masks.crop_and_resize( + instances_per_image.proposal_boxes.tensor, mask_side_len + ).to(device=pred_mask_logits.device) + # A tensor of shape (N, M, M), N=#instances in the image; M=mask_side_len + gt_masks.append(gt_masks_per_image) + + if len(gt_masks) == 0: + return pred_mask_logits.sum() * 0 + + gt_masks = cat(gt_masks, dim=0) + + if cls_agnostic_mask: + pred_mask_logits = pred_mask_logits[:, 0] + else: + indices = torch.arange(total_num_masks) + gt_classes = cat(gt_classes, dim=0) + pred_mask_logits = pred_mask_logits[indices, gt_classes] + + if gt_masks.dtype == torch.bool: + gt_masks_bool = gt_masks + else: + # Here we allow gt_masks to be float as well (depend on the implementation of rasterize()) + gt_masks_bool = gt_masks > 0.5 + + # Log the training accuracy (using gt classes and 0.5 threshold) + mask_incorrect = (pred_mask_logits > 0.0) != gt_masks_bool + mask_accuracy = 1 - (mask_incorrect.sum().item() / max(mask_incorrect.numel(), 1.0)) + num_positive = gt_masks_bool.sum().item() + false_positive = (mask_incorrect & ~gt_masks_bool).sum().item() / max( + gt_masks_bool.numel() - num_positive, 1.0 + ) + false_negative = (mask_incorrect & gt_masks_bool).sum().item() / max(num_positive, 1.0) + + storage = get_event_storage() + storage.put_scalar("mask_rcnn/accuracy", mask_accuracy) + storage.put_scalar("mask_rcnn/false_positive", false_positive) + storage.put_scalar("mask_rcnn/false_negative", false_negative) + + mask_loss = F.binary_cross_entropy_with_logits( + pred_mask_logits, gt_masks.to(dtype=torch.float32), reduction="mean" + ) + return mask_loss + + +def mask_rcnn_inference(pred_mask_logits, pred_instances): + """ + Convert pred_mask_logits to estimated foreground probability masks while also + extracting only the masks for the predicted classes in pred_instances. For each + predicted box, the mask of the same class is attached to the instance by adding a + new "pred_masks" field to pred_instances. + + Args: + pred_mask_logits (Tensor): A tensor of shape (B, C, Hmask, Wmask) or (B, 1, Hmask, Wmask) + for class-specific or class-agnostic, where B is the total number of predicted masks + in all images, C is the number of foreground classes, and Hmask, Wmask are the height + and width of the mask predictions. The values are logits. + pred_instances (list[Instances]): A list of N Instances, where N is the number of images + in the batch. Each Instances must have field "pred_classes". + + Returns: + None. pred_instances will contain an extra "pred_masks" field storing a mask of size (Hmask, + Wmask) for predicted class. Note that the masks are returned as a soft (non-quantized) + masks the resolution predicted by the network; post-processing steps, such as resizing + the predicted masks to the original image resolution and/or binarizing them, is left + to the caller. + """ + cls_agnostic_mask = pred_mask_logits.size(1) == 1 + + if cls_agnostic_mask: + mask_probs_pred = pred_mask_logits.sigmoid() + else: + # Select masks corresponding to the predicted classes + num_masks = pred_mask_logits.shape[0] + class_pred = cat([i.pred_classes for i in pred_instances]) + indices = torch.arange(num_masks, device=class_pred.device) + mask_probs_pred = pred_mask_logits[indices, class_pred][:, None].sigmoid() + # mask_probs_pred.shape: (B, 1, Hmask, Wmask) + + num_boxes_per_image = [len(i) for i in pred_instances] + mask_probs_pred = mask_probs_pred.split(num_boxes_per_image, dim=0) + + for prob, instances in zip(mask_probs_pred, pred_instances): + instances.pred_masks = prob # (1, Hmask, Wmask) + + +@ROI_MASK_HEAD_REGISTRY.register() +class MaskRCNNConvUpsampleHead(nn.Module): + """ + A mask head with several conv layers, plus an upsample layer (with `ConvTranspose2d`). + """ + + def __init__(self, cfg, input_shape: ShapeSpec): + """ + The following attributes are parsed from config: + num_conv: the number of conv layers + conv_dim: the dimension of the conv layers + norm: normalization for the conv layers + """ + super(MaskRCNNConvUpsampleHead, self).__init__() + + # fmt: off + num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES + conv_dims = cfg.MODEL.ROI_MASK_HEAD.CONV_DIM + self.norm = cfg.MODEL.ROI_MASK_HEAD.NORM + num_conv = cfg.MODEL.ROI_MASK_HEAD.NUM_CONV + input_channels = input_shape.channels + cls_agnostic_mask = cfg.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK + # fmt: on + + self.conv_norm_relus = [] + + for k in range(num_conv): + conv = Conv2d( + input_channels if k == 0 else conv_dims, + conv_dims, + kernel_size=3, + stride=1, + padding=1, + bias=not self.norm, + norm=get_norm(self.norm, conv_dims), + activation=F.relu, + ) + self.add_module("mask_fcn{}".format(k + 1), conv) + self.conv_norm_relus.append(conv) + + self.deconv = ConvTranspose2d( + conv_dims if num_conv > 0 else input_channels, + conv_dims, + kernel_size=2, + stride=2, + padding=0, + ) + + num_mask_classes = 1 if cls_agnostic_mask else num_classes + self.predictor = Conv2d(conv_dims, num_mask_classes, kernel_size=1, stride=1, padding=0) + + for layer in self.conv_norm_relus + [self.deconv]: + weight_init.c2_msra_fill(layer) + # use normal distribution initialization for mask prediction layer + nn.init.normal_(self.predictor.weight, std=0.001) + if self.predictor.bias is not None: + nn.init.constant_(self.predictor.bias, 0) + + def forward(self, x): + for layer in self.conv_norm_relus: + x = layer(x) + x = F.relu(self.deconv(x)) + return self.predictor(x) + + +def build_mask_head(cfg, input_shape): + """ + Build a mask head defined by `cfg.MODEL.ROI_MASK_HEAD.NAME`. + """ + name = cfg.MODEL.ROI_MASK_HEAD.NAME + return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape) diff --git a/detectron2/modeling/roi_heads/roi_heads.py b/detectron2/modeling/roi_heads/roi_heads.py new file mode 100644 index 0000000000..07438ddcaf --- /dev/null +++ b/detectron2/modeling/roi_heads/roi_heads.py @@ -0,0 +1,816 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +from typing import Dict +import torch +from torch import nn + +from detectron2.layers import ShapeSpec +from detectron2.structures import Boxes, Instances, RotatedBoxes, pairwise_iou, pairwise_iou_rotated +from detectron2.utils.events import get_event_storage +from detectron2.utils.registry import Registry + +from ..backbone.resnet import BottleneckBlock, make_stage +from ..box_regression import Box2BoxTransform, Box2BoxTransformRotated +from ..matcher import Matcher +from ..poolers import ROIPooler +from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals +from ..sampling import subsample_labels +from .box_head import build_box_head +from .fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs +from .keypoint_head import build_keypoint_head, keypoint_rcnn_inference, keypoint_rcnn_loss +from .mask_head import build_mask_head, mask_rcnn_inference, mask_rcnn_loss + +ROI_HEADS_REGISTRY = Registry("ROI_HEADS") +""" +Registry for ROI heads in a generalized R-CNN model. +ROIHeads take feature maps and region proposals, and +perform per-region computation. +""" + +logger = logging.getLogger(__name__) + + +def build_roi_heads(cfg, input_shape): + """ + Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`. + """ + name = cfg.MODEL.ROI_HEADS.NAME + return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape) + + +def select_foreground_proposals(proposals, bg_label): + """ + Given a list of N Instances (for N images), each containing a `gt_classes` field, + return a list of Instances that contain only instances with `gt_classes != -1 && + gt_classes != bg_label`. + + Args: + proposals (list[Instances]): A list of N Instances, where N is the number of + images in the batch. + bg_label: label index of background class. + + Returns: + list[Instances]: N Instances, each contains only the selected foreground instances. + list[Tensor]: N boolean vector, correspond to the selection mask of + each instance. True for selected instances. + """ + assert isinstance(proposals, (list, tuple)) + assert isinstance(proposals[0], Instances) + assert proposals[0].has("gt_classes") + fg_proposals = [] + fg_selection_masks = [] + for proposals_per_image in proposals: + gt_classes = proposals_per_image.gt_classes + fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label) + fg_inds = fg_selection_mask.nonzero().squeeze(1) + fg_proposals.append(proposals_per_image[fg_inds]) + fg_selection_masks.append(fg_selection_mask) + return fg_proposals, fg_selection_masks + + +def select_proposals_with_visible_keypoints(proposals): + """ + Args: + proposals (list[Instances]): a list of N Instances, where N is the + number of images. + + Returns: + proposals: only contains proposals with at least one visible keypoint. + + Note that this is still slightly different from Detectron. + In Detectron, proposals for training keypoint head are re-sampled from + all the proposals with IOU>threshold & >=1 visible keypoint. + + Here, the proposals are first sampled from all proposals with + IOU>threshold, then proposals with no visible keypoint are filtered out. + This strategy seems to make no difference on Detectron and is easier to implement. + """ + ret = [] + all_num_fg = [] + for proposals_per_image in proposals: + gt_keypoints = proposals_per_image.gt_keypoints.tensor + # #fg x K x 3 + vis_mask = gt_keypoints[:, :, 2] >= 1 + xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1] + proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4 + kp_in_box = ( + (xs >= proposal_boxes[:, :, 0]) + & (xs <= proposal_boxes[:, :, 2]) + & (ys >= proposal_boxes[:, :, 1]) + & (ys <= proposal_boxes[:, :, 3]) + ) + selection = (kp_in_box & vis_mask).any(dim=1) + selection_idxs = torch.nonzero(selection).squeeze(1) + all_num_fg.append(selection_idxs.numel()) + ret.append(proposals_per_image[selection_idxs]) + + storage = get_event_storage() + storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg)) + return ret + + +class ROIHeads(torch.nn.Module): + """ + ROIHeads perform all per-region computation in an R-CNN. + + It contains logic of cropping the regions, extract per-region features, + and make per-region predictions. + + It can have many variants, implemented as subclasses of this class. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super(ROIHeads, self).__init__() + + # fmt: off + self.batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE + self.positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION + self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST + self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST + self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE + self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES + self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES + self.proposal_append_gt = cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT + self.feature_strides = {k: v.stride for k, v in input_shape.items()} + self.feature_channels = {k: v.channels for k, v in input_shape.items()} + self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG + self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA + # fmt: on + + # Matcher to assign box proposals to gt boxes + self.proposal_matcher = Matcher( + cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS, + cfg.MODEL.ROI_HEADS.IOU_LABELS, + allow_low_quality_matches=False, + ) + + # Box2BoxTransform for bounding box regression + self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS) + + @torch.no_grad() + def label_and_sample_proposals(self, proposals, targets): + """ + Prepare some proposals to be used to train the ROI heads. + It performs box matching between `proposals` and `targets`, and assign + training labels to the lproposals. + It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes, + with a fraction of positives that is no larger than `self.positive_sample_fraction. + + Args: + See :meth:`ROIHeads.forward` + + Returns: + list[Instances]: + length `N` list of `Instances`s containing the proposals + sampled for training. Each `Instances` has the following fields: + - proposal_boxes: the proposal boxes + - gt_boxes: the ground-truth box that the proposal is assigned to + (this is only meaningful if the proposal has a label > 0; if label = 0 + then the ground-truth box is random) + Other fields such as "gt_classes", "gt_masks", that's included in `targets`. + """ + gt_boxes = [x.gt_boxes for x in targets] + # Augment proposals with ground-truth boxes. + # In the case of learned proposals (e.g., RPN), when training starts + # the proposals will be low quality due to random initialization. + # It's possible that none of these initial + # proposals have high enough overlap with the gt objects to be used + # as positive examples for the second stage components (box head, + # cls head, mask head). Adding the gt boxes to the set of proposals + # ensures that the second stage components will have some positive + # examples from the start of training. For RPN, this augmentation improves + # convergence and empirically improves box AP on COCO by about 0.5 + # points (under one tested configuration). + if self.proposal_append_gt: + proposals = add_ground_truth_to_proposals(gt_boxes, proposals) + + proposals_with_gt = [] + + num_fg_samples = [] + num_bg_samples = [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + has_gt = len(targets_per_image) > 0 + match_quality_matrix = pairwise_iou( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + matched_idxs, proposals_labels = self.proposal_matcher(match_quality_matrix) + + # Get the corresponding GT for each proposal + if has_gt: + gt_classes = targets_per_image.gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[proposals_labels == 0] = self.num_classes + # Label ignore proposals (-1 label) + gt_classes[proposals_labels == -1] = -1 + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + + sampled_fg_inds, sampled_bg_inds = subsample_labels( + gt_classes, + self.batch_size_per_image, + self.positive_sample_fraction, + self.num_classes, + ) + + sampled_inds = torch.cat([sampled_fg_inds, sampled_bg_inds], dim=0) + + proposals_per_image = proposals_per_image[sampled_inds] + proposals_per_image.gt_classes = gt_classes[sampled_inds] + + # We index all the attributes of targets that start with "gt_" + # and have not been added to proposals yet (="gt_classes"). + if has_gt: + sampled_targets = matched_idxs[sampled_inds] + # NOTE: here the indexing waste some compute, because heads + # like masks, keypoints, etc, will filter the proposals again, + # (by foreground/background, or number of keypoints in the image, etc) + # so we essentially index the data twice. + for (trg_name, trg_value) in targets_per_image.get_fields().items(): + if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name): + proposals_per_image.set(trg_name, trg_value[sampled_targets]) + else: + gt_boxes = Boxes( + targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_inds), 4)) + ) + proposals_per_image.gt_boxes = gt_boxes + + num_fg_samples.append(sampled_fg_inds.numel()) + num_bg_samples.append(sampled_bg_inds.numel()) + proposals_with_gt.append(proposals_per_image) + + # Log the number of fg/bg samples that are selected for training ROI heads + storage = get_event_storage() + storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) + storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) + + return proposals_with_gt + + def forward(self, images, features, proposals, targets=None): + """ + Args: + images (ImageList): + features (dict[str: Tensor]): input data as a mapping from feature + map name to tensor. Axis 0 represents the number of images `N` in + the input data; axes 1-3 are channels, height, and width, which may + vary between feature maps (e.g., if a feature pyramid is used). + proposals (list[Instances]): length `N` list of `Instances`s. The i-th + `Instances` contains object proposals for the i-th input image, + with fields "proposal_boxes" and "objectness_logits". + targets (list[Instances], optional): length `N` list of `Instances`s. The i-th + `Instances` contains the ground-truth per-instance annotations + for the i-th input image. Specify `targets` during training only. + It may have the following fields: + - gt_boxes: the bounding box of each instance. + - gt_classes: the label for each instance with a category ranging in [0, #class]. + - gt_masks: the ground-truth mask of the instance. + + Returns: + results (list[Instances]): length `N` list of `Instances`s containing the + detected instances. Returned during inference only; may be [] + during training. + losses (dict[str: Tensor]): mapping from a named loss to a tensor + storing the loss. Used during training only. + """ + raise NotImplementedError() + + +@ROI_HEADS_REGISTRY.register() +class Res5ROIHeads(ROIHeads): + """ + The ROIHeads in a typical "C4" R-CNN model, where + the box and mask head share the cropping and + the per-region feature computation by a Res5 block. + """ + + def __init__(self, cfg, input_shape): + super().__init__(cfg, input_shape) + + assert len(self.in_features) == 1 + + # fmt: off + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + pooler_scales = (1.0 / self.feature_strides[self.in_features[0]], ) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + self.mask_on = cfg.MODEL.MASK_ON + # fmt: on + assert not cfg.MODEL.KEYPOINT_ON + + self.pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + + self.res5, out_channels = self._build_res5_block(cfg) + self.box_predictor = FastRCNNOutputLayers( + out_channels, self.num_classes, self.cls_agnostic_bbox_reg + ) + + if self.mask_on: + self.mask_head = build_mask_head( + cfg, + ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution), + ) + + def _build_res5_block(self, cfg): + # fmt: off + stage_channel_factor = 2 ** 3 # res5 is 8x res2 + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group * stage_channel_factor + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + norm = cfg.MODEL.RESNETS.NORM + assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \ + "Deformable conv is not yet supported in res5 head." + # fmt: on + + blocks = make_stage( + BottleneckBlock, + 3, + first_stride=2, + in_channels=out_channels // 2, + bottleneck_channels=bottleneck_channels, + out_channels=out_channels, + num_groups=num_groups, + norm=norm, + stride_in_1x1=stride_in_1x1, + ) + return nn.Sequential(*blocks), out_channels + + def _shared_roi_transform(self, features, boxes): + x = self.pooler(features, boxes) + return self.res5(x) + + def forward(self, images, features, proposals, targets=None): + """ + See :class:`ROIHeads.forward`. + """ + del images + + if self.training: + proposals = self.label_and_sample_proposals(proposals, targets) + del targets + + proposal_boxes = [x.proposal_boxes for x in proposals] + box_features = self._shared_roi_transform( + [features[f] for f in self.in_features], proposal_boxes + ) + feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1 + pred_class_logits, pred_proposal_deltas = self.box_predictor(feature_pooled) + del feature_pooled + + outputs = FastRCNNOutputs( + self.box2box_transform, + pred_class_logits, + pred_proposal_deltas, + proposals, + self.smooth_l1_beta, + ) + + if self.training: + del features + losses = outputs.losses() + if self.mask_on: + proposals, fg_selection_masks = select_foreground_proposals( + proposals, self.num_classes + ) + # Since the ROI feature transform is shared between boxes and masks, + # we don't need to recompute features. The mask loss is only defined + # on foreground proposals, so we need to select out the foreground + # features. + mask_features = box_features[torch.cat(fg_selection_masks, dim=0)] + del box_features + mask_logits = self.mask_head(mask_features) + losses["loss_mask"] = mask_rcnn_loss(mask_logits, proposals) + return [], losses + else: + pred_instances, _ = outputs.inference( + self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img + ) + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def forward_with_given_boxes(self, features, instances): + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + instances (Instances): + the same `Instances` object, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + assert not self.training + assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") + + if self.mask_on: + features = [features[f] for f in self.in_features] + x = self._shared_roi_transform(features, [x.pred_boxes for x in instances]) + mask_logits = self.mask_head(x) + mask_rcnn_inference(mask_logits, instances) + return instances + + +@ROI_HEADS_REGISTRY.register() +class StandardROIHeads(ROIHeads): + """ + It's "standard" in a sense that there is no ROI transform sharing + or feature sharing between tasks. + The cropped rois go to separate branches (boxes and masks) directly. + This way, it is easier to make separate abstractions for different branches. + + This class is used by most models, such as FPN and C5. + To implement more models, you can subclass it and implement a different + :meth:`forward()` or a head. + """ + + def __init__(self, cfg, input_shape): + super(StandardROIHeads, self).__init__(cfg, input_shape) + self._init_box_head(cfg) + self._init_mask_head(cfg) + self._init_keypoint_head(cfg) + + def _init_box_head(self, cfg): + # fmt: off + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + # fmt: on + + # If StandardROIHeads is applied on multiple feature maps (as in FPN), + # then we share the same predictors and therefore the channel counts must be the same + in_channels = [self.feature_channels[f] for f in self.in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + self.box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + # Here we split "box head" and "box predictor", which is mainly due to historical reasons. + # They are used together so the "box predictor" layers should be part of the "box head". + # New subclasses of ROIHeads do not need "box predictor"s. + self.box_head = build_box_head( + cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) + ) + self.box_predictor = FastRCNNOutputLayers( + self.box_head.output_size, self.num_classes, self.cls_agnostic_bbox_reg + ) + + def _init_mask_head(self, cfg): + # fmt: off + self.mask_on = cfg.MODEL.MASK_ON + if not self.mask_on: + return + pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features) + sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE + # fmt: on + + in_channels = [self.feature_channels[f] for f in self.in_features][0] + + self.mask_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + self.mask_head = build_mask_head( + cfg, ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution) + ) + + def _init_keypoint_head(self, cfg): + # fmt: off + self.keypoint_on = cfg.MODEL.KEYPOINT_ON + if not self.keypoint_on: + return + pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features) # noqa + sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE + self.normalize_loss_by_visible_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS # noqa + self.keypoint_loss_weight = cfg.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT + # fmt: on + + in_channels = [self.feature_channels[f] for f in self.in_features][0] + + self.keypoint_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + self.keypoint_head = build_keypoint_head( + cfg, ShapeSpec(channels=in_channels, width=pooler_resolution, height=pooler_resolution) + ) + + def forward(self, images, features, proposals, targets=None): + """ + See :class:`ROIHeads.forward`. + """ + del images + if self.training: + proposals = self.label_and_sample_proposals(proposals, targets) + del targets + + features_list = [features[f] for f in self.in_features] + + if self.training: + losses = self._forward_box(features_list, proposals) + # During training the proposals used by the box head are + # used by the mask, keypoint (and densepose) heads. + losses.update(self._forward_mask(features_list, proposals)) + losses.update(self._forward_keypoint(features_list, proposals)) + return proposals, losses + else: + pred_instances = self._forward_box(features_list, proposals) + # During inference cascaded prediction is used: the mask and keypoints heads are only + # applied to the top scoring box detections. + pred_instances = self.forward_with_given_boxes(features, pred_instances) + return pred_instances, {} + + def forward_with_given_boxes(self, features, instances): + """ + Use the given boxes in `instances` to produce other (non-box) per-ROI outputs. + + This is useful for downstream tasks where a box is known, but need to obtain + other attributes (outputs of other heads). + Test-time augmentation also uses this. + + Args: + features: same as in `forward()` + instances (list[Instances]): instances to predict other outputs. Expect the keys + "pred_boxes" and "pred_classes" to exist. + + Returns: + instances (Instances): + the same `Instances` object, with extra + fields such as `pred_masks` or `pred_keypoints`. + """ + assert not self.training + assert instances[0].has("pred_boxes") and instances[0].has("pred_classes") + features = [features[f] for f in self.in_features] + + instances = self._forward_mask(features, instances) + instances = self._forward_keypoint(features, instances) + return instances + + def _forward_box(self, features, proposals): + """ + Forward logic of the box prediction branch. + + Args: + features (list[Tensor]): #level input features for box prediction + proposals (list[Instances]): the per-image object proposals with + their matching ground truth. + Each has fields "proposal_boxes", and "objectness_logits", + "gt_classes", "gt_boxes". + + Returns: + In training, a dict of losses. + In inference, a list of `Instances`, the predicted instances. + """ + box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals]) + box_features = self.box_head(box_features) + pred_class_logits, pred_proposal_deltas = self.box_predictor(box_features) + del box_features + + outputs = FastRCNNOutputs( + self.box2box_transform, + pred_class_logits, + pred_proposal_deltas, + proposals, + self.smooth_l1_beta, + ) + if self.training: + return outputs.losses() + else: + pred_instances, _ = outputs.inference( + self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img + ) + return pred_instances + + def _forward_mask(self, features, instances): + """ + Forward logic of the mask prediction branch. + + Args: + features (list[Tensor]): #level input features for mask prediction + instances (list[Instances]): the per-image instances to train/predict masks. + In training, they can be the proposals. + In inference, they can be the predicted boxes. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_masks" and return it. + """ + if not self.mask_on: + return {} if self.training else instances + + if self.training: + # The loss is only defined on positive proposals. + proposals, _ = select_foreground_proposals(instances, self.num_classes) + proposal_boxes = [x.proposal_boxes for x in proposals] + mask_features = self.mask_pooler(features, proposal_boxes) + mask_logits = self.mask_head(mask_features) + return {"loss_mask": mask_rcnn_loss(mask_logits, proposals)} + else: + pred_boxes = [x.pred_boxes for x in instances] + mask_features = self.mask_pooler(features, pred_boxes) + mask_logits = self.mask_head(mask_features) + mask_rcnn_inference(mask_logits, instances) + return instances + + def _forward_keypoint(self, features, instances): + """ + Forward logic of the keypoint prediction branch. + + Args: + features (list[Tensor]): #level input features for keypoint prediction + instances (list[Instances]): the per-image instances to train/predict keypoints. + In training, they can be the proposals. + In inference, they can be the predicted boxes. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "pred_keypoints" and return it. + """ + if not self.keypoint_on: + return {} if self.training else instances + + num_images = len(instances) + + if self.training: + # The loss is defined on positive proposals with at >=1 visible keypoints. + proposals, _ = select_foreground_proposals(instances, self.num_classes) + proposals = select_proposals_with_visible_keypoints(proposals) + proposal_boxes = [x.proposal_boxes for x in proposals] + + keypoint_features = self.keypoint_pooler(features, proposal_boxes) + keypoint_logits = self.keypoint_head(keypoint_features) + + normalizer = ( + num_images + * self.batch_size_per_image + * self.positive_sample_fraction + * keypoint_logits.shape[1] + ) + loss = keypoint_rcnn_loss( + keypoint_logits, + proposals, + normalizer=None if self.normalize_loss_by_visible_keypoints else normalizer, + ) + return {"loss_keypoint": loss * self.keypoint_loss_weight} + else: + pred_boxes = [x.pred_boxes for x in instances] + keypoint_features = self.keypoint_pooler(features, pred_boxes) + keypoint_logits = self.keypoint_head(keypoint_features) + keypoint_rcnn_inference(keypoint_logits, instances) + return instances + + +@ROI_HEADS_REGISTRY.register() +class RROIHeads(StandardROIHeads): + """ + This class is used by Rotated RPN (RRPN). + For now, it just supports box_head but not mask or keypoints. + """ + + def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): + super(RROIHeads, self).__init__(cfg, input_shape) + self.box2box_transform = Box2BoxTransformRotated( + weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS + ) + + def _init_box_head(self, cfg): + # fmt: off + pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION + pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features) + sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO + pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE + # fmt: on + + # If StandardROIHeads is applied on multiple feature maps (as in FPN), + # then we share the same predictors and therefore the channel counts must be the same + in_channels = [self.feature_channels[f] for f in self.in_features] + # Check all channel counts are equal + assert len(set(in_channels)) == 1, in_channels + in_channels = in_channels[0] + + self.box_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type=pooler_type, + ) + self.box_head = build_box_head( + cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution) + ) + + self.box_predictor = FastRCNNOutputLayers( + input_size=self.box_head.output_size, + num_classes=self.num_classes, + cls_agnostic_bbox_reg=self.cls_agnostic_bbox_reg, + box_dim=5, + ) + + @torch.no_grad() + def label_and_sample_proposals(self, proposals, targets): + """ + Prepare some proposals to be used to train the RROI heads. + It performs box matching between `proposals` and `targets`, and assign + training labels to the lproposals. + It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes, + with a fraction of positives that is no larger than `self.positive_sample_fraction. + + Args: + See :meth:`StandardROIHeads.forward` + + Returns: + list[Instances]: length `N` list of `Instances`s containing the proposals + sampled for training. Each `Instances` has the following fields: + - proposal_boxes: the proposal rotated boxes + - gt_boxes: the ground-truth rotated boxes that the proposal is assigned to + (this is only meaningful if the proposal has a label > 0; if label = 0 + then the ground-truth box is random) + - other fields such as "gt_classes" and "gt_masks" that are included in `targets`. + """ + gt_boxes = [x.gt_boxes for x in targets] + # Augment proposals with ground-truth boxes. + # In the case of learned proposals (e.g., RPN), in the beginning of training + # the proposals are of low quality due to random initialization. + # It's possible that none of these initial + # proposals have high enough overlap with the gt objects to be used + # as positive examples for the second stage components (box head, + # cls head, mask head). Adding the gt boxes to the set of proposals + # ensures that the second stage components will have some positive + # examples from the start of training. For RPN, this augmentation improves + # convergence and empirically improves box AP on COCO by about 0.5 + # points (under one tested configuration). + proposals = add_ground_truth_to_proposals(gt_boxes, proposals) + + proposals_with_gt = [] + + num_fg_samples = [] + num_bg_samples = [] + for proposals_per_image, targets_per_image in zip(proposals, targets): + has_gt = len(targets_per_image) > 0 + match_quality_matrix = pairwise_iou_rotated( + targets_per_image.gt_boxes, proposals_per_image.proposal_boxes + ) + matched_idxs, proposals_labels = self.proposal_matcher(match_quality_matrix) + + # Get the corresponding GT for each proposal + if has_gt: + gt_classes = targets_per_image.gt_classes[matched_idxs] + # Label unmatched proposals (0 label from matcher) as background (label=num_classes) + gt_classes[proposals_labels == 0] = self.num_classes + # Label ignore proposals (-1 label) + gt_classes[proposals_labels == -1] = -1 + else: + gt_classes = torch.zeros_like(matched_idxs) + self.num_classes + + sampled_fg_inds, sampled_bg_inds = subsample_labels( + gt_classes, + self.batch_size_per_image, + self.positive_sample_fraction, + self.num_classes, + ) + + sampled_inds = torch.cat([sampled_fg_inds, sampled_bg_inds], dim=0) + + proposals_per_image = proposals_per_image[sampled_inds] + proposals_per_image.gt_classes = gt_classes[sampled_inds] + + if has_gt: + sampled_targets = matched_idxs[sampled_inds] + proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets] + else: + gt_boxes = RotatedBoxes( + targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_inds), 5)) + ) + proposals_per_image.gt_boxes = gt_boxes + + num_fg_samples.append(sampled_fg_inds.numel()) + num_bg_samples.append(sampled_bg_inds.numel()) + proposals_with_gt.append(proposals_per_image) + + # Log the number of fg/bg samples that are selected for training ROI heads + storage = get_event_storage() + storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) + storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) + + return proposals_with_gt diff --git a/detectron2/modeling/sampling.py b/detectron2/modeling/sampling.py new file mode 100644 index 0000000000..47ab8f4e3f --- /dev/null +++ b/detectron2/modeling/sampling.py @@ -0,0 +1,45 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + + +def subsample_labels(labels, num_samples, positive_fraction, bg_label): + """ + Return `num_samples` random samples from `labels`, with a fraction of + positives no larger than `positive_fraction`. + + Args: + labels (Tensor): (N, ) label vector with values: + -1: ignore + bg_label: background ("negative") class + otherwise: one or more foreground ("positive") classes + num_samples (int): The total number of labels with value >= 0 to return. + Values that are not sampled will be filled with -1 (ignore). + positive_fraction (float): The number of subsampled labels with values > 0 + is `min(num_positives, int(positive_fraction * num_samples))`. The number + of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. + In order words, if there are not enough positives, the sample is filled with + negatives. If there are also not enough negatives, then as many elements are + sampled as is possible. + bg_label (int): label index of background ("negative") class. + + Returns: + pos_idx, neg_idx (Tensor): 1D indices. The total number of indices is `num_samples` + if possible. The fraction of positive indices is `positive_fraction` if possible. + """ + positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1) + negative = torch.nonzero(labels == bg_label).squeeze(1) + + num_pos = int(num_samples * positive_fraction) + # protect against not enough positive examples + num_pos = min(positive.numel(), num_pos) + num_neg = num_samples - num_pos + # protect against not enough negative examples + num_neg = min(negative.numel(), num_neg) + + # randomly select positive and negative examples + perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] + perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] + + pos_idx = positive[perm1] + neg_idx = negative[perm2] + return pos_idx, neg_idx diff --git a/detectron2/modeling/test_time_augmentation.py b/detectron2/modeling/test_time_augmentation.py new file mode 100644 index 0000000000..6ba205591e --- /dev/null +++ b/detectron2/modeling/test_time_augmentation.py @@ -0,0 +1,244 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import numpy as np +from contextlib import contextmanager +from itertools import count +import torch +from torch import nn + +from detectron2.data.detection_utils import read_image +from detectron2.data.transforms import ResizeShortestEdge +from detectron2.structures import Instances + +from .meta_arch import GeneralizedRCNN +from .postprocessing import detector_postprocess +from .roi_heads.fast_rcnn import fast_rcnn_inference_single_image + +__all__ = ["DatasetMapperTTA", "GeneralizedRCNNWithTTA"] + + +class DatasetMapperTTA: + """ + Implement test-time augmentation for detection data. + It is a callable which takes a dataset dict from a detection dataset, + and returns a list of dataset dicts where the images + are augmented from the input image by the transformations defined in the config. + This is used for test-time augmentation. + """ + + def __init__(self, cfg): + self.min_sizes = cfg.TEST.AUG.MIN_SIZES + self.max_size = cfg.TEST.AUG.MAX_SIZE + self.flip = cfg.TEST.AUG.FLIP + self.image_format = cfg.INPUT.FORMAT + + def __call__(self, dataset_dict): + """ + Args: + dict: a detection dataset dict + + Returns: + list[dict]: + a list of dataset dicts, which contain augmented version of the input image. + The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``. + """ + ret = [] + if "image" not in dataset_dict: + numpy_image = read_image(dataset_dict["file_name"], self.image_format) + else: + numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy().astype("uint8") + for min_size in self.min_sizes: + image = np.copy(numpy_image) + tfm = ResizeShortestEdge(min_size, self.max_size).get_transform(image) + resized = tfm.apply_image(image) + resized = torch.as_tensor(resized.transpose(2, 0, 1).astype("float32")) + + dic = copy.deepcopy(dataset_dict) + dic["horiz_flip"] = False + dic["image"] = resized + ret.append(dic) + + if self.flip: + dic = copy.deepcopy(dataset_dict) + dic["horiz_flip"] = True + dic["image"] = torch.flip(resized, dims=[2]) + ret.append(dic) + return ret + + +class GeneralizedRCNNWithTTA(nn.Module): + """ + A GeneralizedRCNN with test-time augmentation enabled. + Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`. + """ + + def __init__(self, cfg, model, tta_mapper=None, batch_size=3): + """ + Args: + cfg (CfgNode): + model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on. + tta_mapper (callable): takes a dataset dict and returns a list of + augmented versions of the dataset dict. Defaults to + `DatasetMapperTTA(cfg)`. + batch_size (int): batch the augmented images into this batch size for inference. + """ + super().__init__() + assert isinstance( + model, GeneralizedRCNN + ), "TTA is only supported on GeneralizedRCNN. Got a model of type {}".format(type(model)) + self.cfg = cfg.clone() + assert not self.cfg.MODEL.KEYPOINT_ON, "TTA for keypoint is not supported yet" + assert ( + not self.cfg.MODEL.LOAD_PROPOSALS + ), "TTA for pre-computed proposals is not supported yet" + + self.model = model + + if tta_mapper is None: + tta_mapper = DatasetMapperTTA(cfg) + self.tta_mapper = tta_mapper + self.batch_size = batch_size + + @contextmanager + def _turn_off_roi_head(self, attr): + """ + Open a context where one head in `model.roi_heads` is temporarily turned off. + Args: + attr (str): the attribute in `model.roi_heads` which can be used + to turn off a specific head, e.g., "mask_on", "keypoint_on". + """ + roi_heads = self.model.roi_heads + try: + old = getattr(roi_heads, attr) + except AttributeError: + # The head may not be implemented in certain ROIHeads + old = None + + if old is None: + yield + else: + setattr(roi_heads, attr, False) + yield + setattr(roi_heads, attr, old) + + def _batch_inference(self, batched_inputs, detected_instances=None, do_postprocess=True): + """ + Execute inference on a list of inputs, + using batch size = self.batch_size, instead of the length of the list. + + Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference` + """ + if detected_instances is None: + detected_instances = [None] * len(batched_inputs) + + outputs = [] + inputs, instances = [], [] + for idx, input, instance in zip(count(), batched_inputs, detected_instances): + inputs.append(input) + instances.append(instance) + if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1: + outputs.extend( + self.model.inference( + inputs, + instances if instances[0] is not None else None, + do_postprocess=do_postprocess, + ) + ) + inputs, instances = [], [] + return outputs + + def __call__(self, batched_inputs): + """ + Same input/output format as :meth:`GeneralizedRCNN.forward` + """ + return [self._inference_one_image(x) for x in batched_inputs] + + def _inference_one_image(self, input): + """ + Args: + input (dict): one dataset dict + + Returns: + dict: one output dict + """ + augmented_inputs = self.tta_mapper(input) + + do_hflip = [k.pop("horiz_flip", False) for k in augmented_inputs] + heights = [k["height"] for k in augmented_inputs] + widths = [k["width"] for k in augmented_inputs] + assert ( + len(set(heights)) == 1 and len(set(widths)) == 1 + ), "Augmented version of the inputs should have the same original resolution!" + height = heights[0] + width = widths[0] + + # 1. Detect boxes from all augmented versions + # 1.1: forward with all augmented images + with self._turn_off_roi_head("mask_on"), self._turn_off_roi_head("keypoint_on"): + # temporarily disable mask/keypoint head + outputs = self._batch_inference(augmented_inputs, do_postprocess=False) + # 1.2: union the results + all_boxes = [] + all_scores = [] + all_classes = [] + for idx, output in enumerate(outputs): + rescaled_output = detector_postprocess(output, height, width) + pred_boxes = rescaled_output.pred_boxes.tensor + if do_hflip[idx]: + pred_boxes[:, [0, 2]] = width - pred_boxes[:, [2, 0]] + all_boxes.append(pred_boxes) + all_scores.extend(rescaled_output.scores) + all_classes.extend(rescaled_output.pred_classes) + all_boxes = torch.cat(all_boxes, dim=0).cpu() + num_boxes = len(all_boxes) + + # 1.3: select from the union of all results + num_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES + all_scores_2d = torch.zeros(num_boxes, num_classes, device=all_boxes.device) + for idx, cls, score in zip(count(), all_classes, all_scores): + all_scores_2d[idx, cls] = score + + merged_instances, _ = fast_rcnn_inference_single_image( + all_boxes, + all_scores_2d, + (height, width), + 1e-8, + self.cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST, + self.cfg.TEST.DETECTIONS_PER_IMAGE, + ) + + if not self.cfg.MODEL.MASK_ON: + return {"instances": merged_instances} + + # 2. Use the detected boxes to obtain masks + # 2.1: rescale the detected boxes + augmented_instances = [] + for idx, input in enumerate(augmented_inputs): + actual_height, actual_width = input["image"].shape[1:3] + scale_x = actual_width * 1.0 / width + scale_y = actual_height * 1.0 / height + pred_boxes = merged_instances.pred_boxes.clone() + pred_boxes.tensor[:, 0::2] *= scale_x + pred_boxes.tensor[:, 1::2] *= scale_y + if do_hflip[idx]: + pred_boxes.tensor[:, [0, 2]] = actual_width - pred_boxes.tensor[:, [2, 0]] + + aug_instances = Instances( + image_size=(actual_height, actual_width), + pred_boxes=pred_boxes, + pred_classes=merged_instances.pred_classes, + scores=merged_instances.scores, + ) + augmented_instances.append(aug_instances) + # 2.2: run forward on the detected boxes + outputs = self._batch_inference(augmented_inputs, augmented_instances, do_postprocess=False) + for idx, output in enumerate(outputs): + if do_hflip[idx]: + output.pred_masks = output.pred_masks.flip(dims=[3]) + # 2.3: average the predictions + all_pred_masks = torch.stack([o.pred_masks for o in outputs], dim=0) + avg_pred_masks = torch.mean(all_pred_masks, dim=0) + output = outputs[0] + output.pred_masks = avg_pred_masks + output = detector_postprocess(output, height, width) + return {"instances": output} diff --git a/detectron2/solver/__init__.py b/detectron2/solver/__init__.py new file mode 100644 index 0000000000..10f84e12d0 --- /dev/null +++ b/detectron2/solver/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .build import build_lr_scheduler, build_optimizer +from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/detectron2/solver/build.py b/detectron2/solver/build.py new file mode 100644 index 0000000000..af685c93ce --- /dev/null +++ b/detectron2/solver/build.py @@ -0,0 +1,60 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import Any, Dict, List +import torch + +from detectron2.config import CfgNode + +from .lr_scheduler import WarmupCosineLR, WarmupMultiStepLR + + +def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: + """ + Build an optimizer from config. + """ + params: List[Dict[str, Any]] = [] + for key, value in model.named_parameters(): + if not value.requires_grad: + continue + lr = cfg.SOLVER.BASE_LR + weight_decay = cfg.SOLVER.WEIGHT_DECAY + if key.endswith("norm.weight") or key.endswith("norm.bias"): + weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM + elif key.endswith(".bias"): + # NOTE: unlike Detectron v1, we now default BIAS_LR_FACTOR to 1.0 + # and WEIGHT_DECAY_BIAS to WEIGHT_DECAY so that bias optimizer + # hyperparameters are by default exactly the same as for regular + # weights. + lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR + weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS + params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] + + optimizer = torch.optim.SGD(params, lr, momentum=cfg.SOLVER.MOMENTUM) + return optimizer + + +def build_lr_scheduler( + cfg: CfgNode, optimizer: torch.optim.Optimizer +) -> torch.optim.lr_scheduler._LRScheduler: + """ + Build a LR scheduler from config. + """ + name = cfg.SOLVER.LR_SCHEDULER_NAME + if name == "WarmupMultiStepLR": + return WarmupMultiStepLR( + optimizer, + cfg.SOLVER.STEPS, + cfg.SOLVER.GAMMA, + warmup_factor=cfg.SOLVER.WARMUP_FACTOR, + warmup_iters=cfg.SOLVER.WARMUP_ITERS, + warmup_method=cfg.SOLVER.WARMUP_METHOD, + ) + elif name == "WarmupCosineLR": + return WarmupCosineLR( + optimizer, + cfg.SOLVER.MAX_ITER, + warmup_factor=cfg.SOLVER.WARMUP_FACTOR, + warmup_iters=cfg.SOLVER.WARMUP_ITERS, + warmup_method=cfg.SOLVER.WARMUP_METHOD, + ) + else: + raise ValueError("Unknown LR scheduler: {}".format(name)) diff --git a/detectron2/solver/lr_scheduler.py b/detectron2/solver/lr_scheduler.py new file mode 100644 index 0000000000..7c88689002 --- /dev/null +++ b/detectron2/solver/lr_scheduler.py @@ -0,0 +1,116 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import math +from bisect import bisect_right +from typing import List +import torch + +# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes +# only on epoch boundaries. We typically use iteration based schedules instead. +# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean +# "iteration" instead. + +# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating +# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it. + + +class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + milestones: List[int], + gamma: float = 0.1, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", milestones + ) + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + return [ + base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer: torch.optim.Optimizer, + max_iters: int, + warmup_factor: float = 0.001, + warmup_iters: int = 1000, + warmup_method: str = "linear", + last_epoch: int = -1, + ): + self.max_iters = max_iters + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super().__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + warmup_factor = _get_warmup_factor_at_iter( + self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor + ) + # Different definitions of half-cosine with warmup are possible. For + # simplicity we multiply the standard half-cosine schedule by the warmup + # factor. An alternative is to start the period of the cosine at warmup_iters + # instead of at 0. In the case that warmup_iters << max_iters the two are + # very close to each other. + return [ + base_lr + * warmup_factor + * 0.5 + * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) + for base_lr in self.base_lrs + ] + + def _compute_values(self) -> List[float]: + # The new interface + return self.get_lr() + + +def _get_warmup_factor_at_iter( + method: str, iter: int, warmup_iters: int, warmup_factor: float +) -> float: + """ + Return the learning rate warmup factor at a specific iteration. + See https://arxiv.org/abs/1706.02677 for more details. + + Args: + method (str): warmup method; either "constant" or "linear". + iter (int): iteration at which to calculate the warmup factor. + warmup_iters (int): the number of warmup iterations. + warmup_factor (float): the base warmup factor (the meaning changes according + to the method used). + + Returns: + float: the effective warmup factor at the given iteration. + """ + if iter >= warmup_iters: + return 1.0 + + if method == "constant": + return warmup_factor + elif method == "linear": + alpha = iter / warmup_iters + return warmup_factor * (1 - alpha) + alpha + else: + raise ValueError("Unknown warmup method: {}".format(method)) diff --git a/detectron2/structures/__init__.py b/detectron2/structures/__init__.py new file mode 100644 index 0000000000..9237e1427e --- /dev/null +++ b/detectron2/structures/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .boxes import Boxes, BoxMode, pairwise_iou +from .image_list import ImageList +from .instances import Instances +from .keypoints import Keypoints, heatmaps_to_keypoints +from .masks import BitMasks, PolygonMasks, rasterize_polygons_within_box +from .rotated_boxes import RotatedBoxes +from .rotated_boxes import pairwise_iou as pairwise_iou_rotated + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/detectron2/structures/boxes.py b/detectron2/structures/boxes.py new file mode 100644 index 0000000000..d1f1887feb --- /dev/null +++ b/detectron2/structures/boxes.py @@ -0,0 +1,291 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +from enum import Enum, unique +from typing import Iterator, List, Tuple, Union +import torch + +from detectron2.layers import cat + +_RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] + + +@unique +class BoxMode(Enum): + """ + Enum of different ways to represent a box. + + Attributes: + + XYXY_ABS: (x0, y0, x1, y1) in absolute floating points coordinates. + The coordinates in range [0, width or height]. + XYWH_ABS: (x0, y0, w, h) in absolute floating points coordinates. + XYXY_REL: (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. + XYWH_REL: (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. + """ + + XYXY_ABS = 0 + XYWH_ABS = 1 + XYXY_REL = 2 + XYWH_REL = 3 + + @staticmethod + def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: + """ + Args: + box: can be a 4-tuple, 4-list or a Nx4 array/tensor. + from_mode, to_mode (BoxMode) + + Returns: + The converted box of the same type. + """ + if from_mode == to_mode: + return box + + original_type = type(box) + single_box = isinstance(box, (list, tuple)) + if single_box: + arr = np.array(box) + assert arr.shape == ( + 4, + ), "BoxMode.convert takes either a 4-tuple/list or a Nx4 array/tensor" + else: + arr = box + + assert to_mode.value < 2 and from_mode.value < 2, "Relative mode not yet supported!" + + original_shape = arr.shape + arr = arr.reshape(-1, 4) + if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: + arr[:, 2] += arr[:, 0] + arr[:, 3] += arr[:, 1] + elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: + arr[:, 2] -= arr[:, 0] + arr[:, 3] -= arr[:, 1] + else: + raise RuntimeError("Cannot be here!") + if single_box: + return original_type(arr.flatten()) + return arr.reshape(*original_shape) + + +class Boxes: + """ + This structure stores a list of boxes as a Nx4 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + + Attributes: + tensor: float matrix of Nx4. + """ + + BoxSizeType = Union[List[int], Tuple[int, int]] + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + tensor = torch.zeros(0, 4, dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() + + self.tensor = tensor + + def clone(self) -> "Boxes": + """ + Clone the Boxes. + + Returns: + Boxes + """ + return Boxes(self.tensor.clone()) + + def to(self, device: str) -> "Boxes": + return Boxes(self.tensor.to(device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) + return area + + def clip(self, box_size: BoxSizeType) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + Args: + box_size (height, width): The clipping box's size. + """ + assert torch.isfinite(self.tensor).all() + h, w = box_size + self.tensor[:, 0].clamp_(min=0, max=w) + self.tensor[:, 1].clamp_(min=0, max=h) + self.tensor[:, 2].clamp_(min=0, max=w) + self.tensor[:, 3].clamp_(min=0, max=h) + + def nonempty(self, threshold: int = 0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: + a binary vector which represents whether each box is empty + (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] - box[:, 0] + heights = box[:, 3] - box[:, 1] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Boxes": + """ + Returns: + Boxes: Create a new :class:`Boxes` by indexing. + + The following usage are allowed: + 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned Boxes might share storage with this Boxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Boxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) + return Boxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "Boxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: BoxSizeType, boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box. + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + inds_inside = ( + (self.tensor[..., 0] >= -boundary_threshold) + & (self.tensor[..., 1] >= -boundary_threshold) + & (self.tensor[..., 2] < width + boundary_threshold) + & (self.tensor[..., 3] < height + boundary_threshold) + ) + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 + + @staticmethod + def cat(boxes_list: List["Boxes"]) -> "Boxes": + """ + Concatenates a list of Boxes into a single Boxes + + Arguments: + boxes_list (list[Boxes]) + + Returns: + Boxes: the concatenated Boxes + """ + assert isinstance(boxes_list, (list, tuple)) + assert len(boxes_list) > 0 + assert all(isinstance(box, Boxes) for box in boxes_list) + + cat_boxes = type(boxes_list[0])(cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> str: + return self.tensor.device + + def __iter__(self) -> Iterator[torch.Tensor]: + """ + Yield a box as a Tensor of shape (4,) at a time. + """ + yield from self.tensor + + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, + compute the IoU (intersection over union) + between __all__ N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + area1 = boxes1.area() + area2 = boxes2.area() + + boxes1, boxes2 = boxes1.tensor, boxes2.tensor + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + # handle empty boxes + iou = torch.where( + inter > 0, + inter / (area1[:, None] + area2 - inter), + torch.zeros(1, dtype=inter.dtype, device=inter.device), + ) + return iou + + +def matched_boxlist_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: + """ + Compute pairwise intersection over union (IOU) of two sets of matched + boxes. The box order must be (xmin, ymin, xmax, ymax). + Similar to boxlist_iou, but computes only diagonal elements of the matrix + Arguments: + boxes1: (Boxes) bounding boxes, sized [N,4]. + boxes2: (Boxes) bounding boxes, sized [N,4]. + Returns: + (tensor) iou, sized [N]. + """ + assert len(boxes1) == len(boxes2), ( + "boxlists should have the same" + "number of entries, got {}, {}".format(len(boxes1), len(boxes2)) + ) + area1 = boxes1.area() # [N] + area2 = boxes2.area() # [N] + box1, box2 = boxes1.tensor, boxes2.tensor + lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] + rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] + wh = (rb - lt).clamp(min=0) # [N,2] + inter = wh[:, 0] * wh[:, 1] # [N] + iou = inter / (area1 + area2 - inter) # [N] + return iou diff --git a/detectron2/structures/image_list.py b/detectron2/structures/image_list.py new file mode 100644 index 0000000000..de2603b26d --- /dev/null +++ b/detectron2/structures/image_list.py @@ -0,0 +1,96 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +from __future__ import division +from typing import Any, List, Sequence, Tuple, Union +import torch +from torch.nn import functional as F + + +class ImageList(object): + """ + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size, + and storing in a field the original sizes of each image + + Attributes: + image_sizes (list[tuple[int, int]]): each tuple is (h, w) + """ + + def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]): + """ + Arguments: + tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1 + image_sizes (list[tuple[int, int]]): Each tuple is (h, w). + """ + self.tensor = tensor + self.image_sizes = image_sizes + + def __len__(self) -> int: + return len(self.image_sizes) + + def __getitem__(self, idx: Union[int, slice]) -> torch.Tensor: + """ + Access the individual image in its original size. + + Returns: + Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1 + """ + size = self.image_sizes[idx] + return self.tensor[idx, ..., : size[0], : size[1]] # type: ignore + + def to(self, *args: Any, **kwargs: Any) -> "ImageList": + cast_tensor = self.tensor.to(*args, **kwargs) + return ImageList(cast_tensor, self.image_sizes) + + @staticmethod + def from_tensors( + tensors: Sequence[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0 + ) -> "ImageList": + """ + Args: + tensors: a tuple or list of `torch.Tensors`, each of shape (Hi, Wi) or + (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded with `pad_value` + so that they will have the same shape. + size_divisibility (int): If `size_divisibility > 0`, also adds padding to ensure + the common height and width is divisible by `size_divisibility` + pad_value (float): value to pad + + Returns: + an `ImageList`. + """ + assert len(tensors) > 0 + assert isinstance(tensors, (tuple, list)) + for t in tensors: + assert isinstance(t, torch.Tensor), type(t) + assert t.shape[1:-2] == tensors[0].shape[1:-2], t.shape + # per dimension maximum (H, W) or (C_1, ..., C_K, H, W) where K >= 1 among all tensors + max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors])) + + if size_divisibility > 0: + import math + + stride = size_divisibility + max_size = list(max_size) # type: ignore + max_size[-2] = int(math.ceil(max_size[-2] / stride) * stride) # type: ignore + max_size[-1] = int(math.ceil(max_size[-1] / stride) * stride) # type: ignore + max_size = tuple(max_size) + + image_sizes = [im.shape[-2:] for im in tensors] + + if len(tensors) == 1: + # This seems slightly (2%) faster. + # TODO: check whether it's faster for multiple images as well + image_size = image_sizes[0] + padded = F.pad( + tensors[0], + [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]], + value=pad_value, + ) + batched_imgs = padded.unsqueeze_(0) + else: + batch_shape = (len(tensors),) + max_size + batched_imgs = tensors[0].new_full(batch_shape, pad_value) + for img, pad_img in zip(tensors, batched_imgs): + pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img) + + return ImageList(batched_imgs.contiguous(), image_sizes) diff --git a/detectron2/structures/instances.py b/detectron2/structures/instances.py new file mode 100644 index 0000000000..a1c939f12f --- /dev/null +++ b/detectron2/structures/instances.py @@ -0,0 +1,184 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import itertools +from typing import Any, Dict, List, Tuple, Union +import torch + +from detectron2.layers import cat + + +class Instances: + """ + This class represents a list of instances in an image. + It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields". + All fields must have the same `__len__` which is the number of instances. + + All other (non-field) attributes of this class are considered private: + they must start with '_' and are not modifiable by a user. + + Some basic usage: + + 1. Set/Get a field: + instances.gt_boxes = Boxes(...) + print(instances.pred_masks) + print('gt_masks' in instances) + 2. `len(instances)` returns the number of instances + 3. Indexing: `instances[indices]` will apply the indexing on all the fields + and returns a new `Instances`. + Typically, `indices` is a binary vector of length num_instances, + or a vector of integer indices. + """ + + def __init__(self, image_size: Tuple[int, int], **kwargs: Any): + """ + Args: + image_size (height, width): the spatial size of the image. + kwargs: fields to add to this `Instances`. + """ + self._image_size = image_size + self._fields: Dict[str, Any] = {} + for k, v in kwargs.items(): + self.set(k, v) + + @property + def image_size(self) -> Tuple[int, int]: + """ + Returns: + tuple: height, width + """ + return self._image_size + + def __setattr__(self, name: str, val: Any) -> None: + if name.startswith("_"): + super().__setattr__(name, val) + else: + self.set(name, val) + + def __getattr__(self, name: str) -> Any: + if name == "_fields" or name not in self._fields: + raise AttributeError("Cannot find field '{}' in the given Instances!".format(name)) + return self._fields[name] + + def set(self, name: str, value: Any) -> None: + """ + Set the field named `name` to `value`. + The length of `value` must be the number of instances, + and must agree with other existing fields in this object. + """ + data_len = len(value) + if len(self._fields): + assert ( + len(self) == data_len + ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self)) + self._fields[name] = value + + def has(self, name: str) -> bool: + """ + Returns: + bool: whether the field called `name` exists. + """ + return name in self._fields + + def remove(self, name: str) -> None: + """ + Remove the field called `name`. + """ + del self._fields[name] + + def get(self, name: str) -> Any: + """ + Returns the field called `name`. + """ + return self._fields[name] + + def get_fields(self) -> Dict[str, Any]: + """ + Returns: + dict: a dict which maps names (str) to data of the fields + + Modifying the returned dict will modify this instance. + """ + return self._fields + + # Tensor-like methods + def to(self, device: str) -> "Instances": + """ + Returns: + Instances: all fields are called with a `to(device)`, if + the field has this method. + """ + ret = Instances(self._image_size) + for k, v in self._fields.items(): + if hasattr(v, "to"): + v = v.to(device) + ret.set(k, v) + return ret + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances": + """ + Args: + item: an index-like object and will be used to index all the fields. + + Returns: + If `item` is a string, return the data in the corresponding field. + Otherwise, returns an `Instances` where all fields are indexed by `item`. + """ + ret = Instances(self._image_size) + for k, v in self._fields.items(): + ret.set(k, v[item]) + return ret + + def __len__(self) -> int: + for v in self._fields.values(): + return len(v) + raise NotImplementedError("Empty Instances does not support __len__!") + + @staticmethod + def cat(instance_lists: List["Instances"]) -> "Instances": + """ + Args: + instance_lists (list[Instances]) + + Returns: + Instances + """ + assert all(isinstance(i, Instances) for i in instance_lists) + assert len(instance_lists) > 0 + if len(instance_lists) == 1: + return instance_lists[0] + + image_size = instance_lists[0].image_size + for i in instance_lists[1:]: + assert i.image_size == image_size + ret = Instances(image_size) + for k in instance_lists[0]._fields.keys(): + values = [i.get(k) for i in instance_lists] + v0 = values[0] + if isinstance(v0, torch.Tensor): + values = cat(values, dim=0) + elif isinstance(v0, list): + values = list(itertools.chain(*values)) + elif hasattr(type(v0), "cat"): + values = type(v0).cat(values) + else: + raise ValueError("Unsupported type {} for concatenation".format(type(v0))) + ret.set(k, values) + return ret + + def __str__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self)) + s += "image_height={}, ".format(self._image_size[0]) + s += "image_width={}, ".format(self._image_size[1]) + s += "fields=[{}])".format(", ".join(self._fields.keys())) + return s + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self)) + s += "image_height={}, ".format(self._image_size[0]) + s += "image_width={}, ".format(self._image_size[1]) + s += "fields=[" + for k, v in self._fields.items(): + s += "{} = {}, ".format(k, v) + s += "])" + return s diff --git a/detectron2/structures/keypoints.py b/detectron2/structures/keypoints.py new file mode 100644 index 0000000000..90a62ca6c3 --- /dev/null +++ b/detectron2/structures/keypoints.py @@ -0,0 +1,200 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +from typing import Any, List, Tuple, Union +import torch + +from detectron2.layers import interpolate + + +class Keypoints: + """ + Stores keypoint annotation data. GT Instances have a `gt_keypoints` property + containing the x,y location and visibility flag of each keypoint. This tensor has shape + (N, K, 3) where N is the number of instances and K is the number of keypoints per instance. + + The visiblity flag follows the COCO format and must be one of three integers: + * v=0: not labeled (in which case x=y=0) + * v=1: labeled but not visible + * v=2: labeled and visible + """ + + def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]): + """ + Arguments: + keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint. + The shape should be (N, K, 3) where N is the number of + instances, and K is the number of keypoints per instance. + """ + device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu") + keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device) + assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape + self.tensor = keypoints + + def __len__(self) -> int: + return self.tensor.size(0) + + def to(self, *args: Any, **kwargs: Any) -> "Keypoints": + return type(self)(self.tensor.to(*args, **kwargs)) + + def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor: + """ + Arguments: + boxes: Nx4 tensor, the boxes to draw the keypoints to + + Returns: + heatmaps: A tensor of shape (N, K) containing an integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: A tensor of shape (N, K) containing whether each keypoint is in + the roi or not. + """ + return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size) + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": + """ + Create a new `Keypoints` by indexing on this `Keypoints`. + + The following usage are allowed: + 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. + 2. `new_kpts = kpts[2:10]`: return a slice of key points. + 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor + with `length = len(kpts)`. Nonzero elements in the vector will be selected. + + Note that the returned Keypoints might share storage with this Keypoints, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return Keypoints([self.tensor[item]]) + return Keypoints(self.tensor[item]) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + +# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop) +def _keypoints_to_heatmap( + keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space. + + Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the + closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the + continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"): + d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + + Arguments: + keypoints: tensor of keypoint locations in of shape (N, K, 3). + rois: Nx4 tensor of rois in xyxy format + heatmap_size: integer side length of square heatmap. + + Returns: + heatmaps: A tensor of shape (N, K) containing an integer spatial label + in the range [0, heatmap_size**2 - 1] for each keypoint in the input. + valid: A tensor of shape (N, K) containing whether each keypoint is in + the roi or not. + """ + + if rois.numel() == 0: + return rois.new().long(), rois.new().long() + offset_x = rois[:, 0] + offset_y = rois[:, 1] + scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) + scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) + + offset_x = offset_x[:, None] + offset_y = offset_y[:, None] + scale_x = scale_x[:, None] + scale_y = scale_y[:, None] + + x = keypoints[..., 0] + y = keypoints[..., 1] + + x_boundary_inds = x == rois[:, 2][:, None] + y_boundary_inds = y == rois[:, 3][:, None] + + x = (x - offset_x) * scale_x + x = x.floor().long() + y = (y - offset_y) * scale_y + y = y.floor().long() + + x[x_boundary_inds] = heatmap_size - 1 + y[y_boundary_inds] = heatmap_size - 1 + + valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) + vis = keypoints[..., 2] > 0 + valid = (valid_loc & vis).long() + + lin_ind = y * heatmap_size + x + heatmaps = lin_ind * valid + + return heatmaps, valid + + +@torch.no_grad() +def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: + """ + Args: + maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W) + rois (Tensor): (#ROIs, 4) + + Extract predicted keypoint locations from heatmaps. Output has shape + (#rois, #keypoints, 4) with the last dimension corresponding to (x, y, logit, prob) + for each keypoint. + + Converts a discrete image coordinate in an NxN image to a continuous keypoint coordinate. We + maintain consistency with keypoints_to_heatmap by using the conversion from Heckbert 1990: + c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. + """ + offset_x = rois[:, 0] + offset_y = rois[:, 1] + + widths = (rois[:, 2] - rois[:, 0]).clamp(min=1) + heights = (rois[:, 3] - rois[:, 1]).clamp(min=1) + widths_ceil = widths.ceil() + heights_ceil = heights.ceil() + + num_rois, num_keypoints = maps.shape[:2] + xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4) + + width_corrections = widths / widths_ceil + height_corrections = heights / heights_ceil + + keypoints_idx = torch.arange(num_keypoints, device=maps.device) + + for i in range(num_rois): + outsize = (int(heights_ceil[i]), int(widths_ceil[i])) + roi_map = interpolate(maps[[i]], size=outsize, mode="bicubic", align_corners=False).squeeze( + 0 + ) # #keypoints x H x W + + # softmax over the spatial region + max_score, _ = roi_map.view(num_keypoints, -1).max(1) + max_score = max_score.view(num_keypoints, 1, 1) + tmp_full_resoltuion = (roi_map - max_score).exp_() + tmp_pool_resoltuion = (maps[i] - max_score).exp_() + # Produce scores over the region H x W, but normalize with POOL_H x POOL_W + # So that the scores of objects of different absolute sizes will be more comparable + roi_map_probs = tmp_full_resoltuion / tmp_pool_resoltuion.sum((1, 2), keepdim=True) + + w = roi_map.shape[2] + pos = roi_map.view(num_keypoints, -1).argmax(1) + + x_int = pos % w + y_int = (pos - x_int) // w + + assert ( + roi_map_probs[keypoints_idx, y_int, x_int] + == roi_map_probs.view(num_keypoints, -1).max(1)[0] + ).all() + + x = (x_int.float() + 0.5) * width_corrections[i] + y = (y_int.float() + 0.5) * height_corrections[i] + + xy_preds[i, :, 0] = x + offset_x[i] + xy_preds[i, :, 1] = y + offset_y[i] + xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int] + xy_preds[i, :, 3] = roi_map_probs[keypoints_idx, y_int, x_int] + + return xy_preds diff --git a/detectron2/structures/masks.py b/detectron2/structures/masks.py new file mode 100644 index 0000000000..747df9954e --- /dev/null +++ b/detectron2/structures/masks.py @@ -0,0 +1,346 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import numpy as np +from typing import Any, Iterator, List, Union +import pycocotools.mask as mask_utils +import torch + +from detectron2.layers.roi_align import ROIAlign + +from .boxes import Boxes + + +def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: + """ + Args: + polygons (list[ndarray]): each array has shape (Nx2,) + height, width (int) + + Returns: + ndarray: a bool mask of shape (height, width) + """ + assert len(polygons) > 0, "COCOAPI does not support empty polygons" + rles = mask_utils.frPyObjects(polygons, height, width) + rle = mask_utils.merge(rles) + return mask_utils.decode(rle).astype(np.bool) + + +def rasterize_polygons_within_box( + polygons: List[np.ndarray], box: np.ndarray, mask_size: int +) -> torch.Tensor: + """ + Rasterize the polygons into a mask image and + crop the mask content in the given box. + The cropped mask is resized to (mask_size, mask_size). + + This function is used when generating training targets for mask head in Mask R-CNN. + Given original ground-truth masks for an image, new ground-truth mask + training targets in the size of `mask_size x mask_size` + must be provided for each predicted box. This function will be called to + produce such targets. + + Args: + polygons (list[ndarray[float]]): a list of polygons, which represents an instance. + box: 4-element numpy array + mask_size (int): + + Returns: + Tensor: BoolTensor of shape (mask_size, mask_size) + """ + # 1. Shift the polygons w.r.t the boxes + w, h = box[2] - box[0], box[3] - box[1] + + polygons = copy.deepcopy(polygons) + for p in polygons: + p[0::2] = p[0::2] - box[0] + p[1::2] = p[1::2] - box[1] + + # 2. Rescale the polygons to the new box size + ratio_h = mask_size / max(h, 0.1) + ratio_w = mask_size / max(w, 0.1) + + if ratio_h == ratio_w: + for p in polygons: + p *= ratio_h + else: + for p in polygons: + p[0::2] *= ratio_w + p[1::2] *= ratio_h + + # 3. Rasterize the polygons with coco api + mask = polygons_to_bitmask(polygons, mask_size, mask_size) + mask = torch.from_numpy(mask) + return mask + + +class BitMasks: + """ + This class stores the segmentation masks for all objects in one image, in + the form of bitmaps. + + Attributes: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + + def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): + """ + Args: + tensor: bool Tensor of N,H,W, representing N instances in the image. + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) + assert tensor.dim() == 3, tensor.size() + self.image_size = tensor.shape[1:] + self.tensor = tensor + + def to(self, device: str) -> "BitMasks": + return BitMasks(self.tensor.to(device)) + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": + """ + Returns: + BitMasks: Create a new :class:`BitMasks` by indexing. + + The following usage are allowed: + + 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. + 2. `new_masks = masks[2:10]`: return a slice of masks. + 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor + with `length = len(masks)`. Nonzero elements in the vector will be selected. + + Note that the returned object might share storage with this object, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return BitMasks(self.tensor[item].view(1, -1)) + m = self.tensor[item] + assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( + item, m.shape + ) + return BitMasks(m) + + def __iter__(self) -> torch.Tensor: + yield from self.tensor + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.tensor)) + return s + + def __len__(self) -> int: + return self.tensor.shape[0] + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: a BoolTensor which represents + whether each mask is empty (False) or non-empty (True). + """ + return self.tensor.flatten(1).any(dim=1) + + @staticmethod + def from_polygon_masks( + polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int + ) -> "BitMasks": + """ + Args: + polygon_masks (list[list[ndarray]] or PolygonMasks) + height, width (int) + """ + if isinstance(polygon_masks, PolygonMasks): + polygon_masks = polygon_masks.polygons + masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] + return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each bitmask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + It has less reconstruction error compared to rasterization with polygons. + However we observe no difference in accuracy, + but BitMasks requires more memory to store all the masks. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: + A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + device = self.tensor.device + + batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] + rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 + + bit_masks = self.tensor.to(dtype=torch.float32) + rois = rois.to(device=device) + output = ( + ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) + .forward(bit_masks[:, None, :, :], rois) + .squeeze(1) + ) + output = output >= 0.5 + return output + + def get_bounding_boxes(self) -> None: + # not needed now + raise NotImplementedError + + +class PolygonMasks: + """ + This class stores the segmentation masks for all objects in one image, in the form of polygons. + + Attributes: + polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. + """ + + def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): + """ + Arguments: + polygons (list[list[Tensor[float]]]): The first + level of the list correspond to individual instances, + the second level to all the polygons that compose the + instance, and the third level to the polygon coordinates. + The third level Tensor should have the format of + torch.Tensor([x0, y0, x1, y1, ..., xn, yn]) (n >= 3). + """ + assert isinstance(polygons, list) + + def _make_array(t: Union[torch.Tensor, np.ndarray]) -> torch.Tensor: + # Use float64 for higher precision, because why not? + # Always put polygons on CPU (self.to is a no-op) since they + # are supposed to be small tensors. + # May need to change this assumption if GPU placement becomes useful + if isinstance(t, torch.Tensor): + t = t.cpu().numpy() + return np.asarray(t).astype("float64") + + def process_polygons( + polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] + ) -> List[torch.Tensor]: + assert isinstance(polygons_per_instance, list), type(polygons_per_instance) + # transform the polygon to a tensor + polygons_per_instance = [_make_array(p) for p in polygons_per_instance] + for polygon in polygons_per_instance: + assert len(polygon) % 2 == 0 and len(polygon) >= 6 + return polygons_per_instance + + self.polygons: List[List[torch.Tensor]] = [ + process_polygons(polygons_per_instance) for polygons_per_instance in polygons + ] + + def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": + return self + + def get_bounding_boxes(self) -> Boxes: + """ + Returns: + Boxes: tight bounding boxes around polygon masks. + """ + boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) + for idx, polygons_per_instance in enumerate(self.polygons): + minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) + maxxy = torch.zeros(2, dtype=torch.float32) + for polygon in polygons_per_instance: + coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) + minxy = torch.min(minxy, torch.min(coords, dim=0).values) + maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) + boxes[idx, :2] = minxy + boxes[idx, 2:] = maxxy + return Boxes(boxes) + + def nonempty(self) -> torch.Tensor: + """ + Find masks that are non-empty. + + Returns: + Tensor: a BoolTensor which represents + whether each mask is empty (False) or non-empty (True). + """ + keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] + return torch.as_tensor(keep, dtype=torch.bool) + + def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": + """ + Support indexing over the instances and return a `PolygonMasks` object. + `item` can be: + + 1. An integer. It will return an object with only one instance. + 2. A slice. It will return an object with the selected instances. + 3. A list[int]. It will return an object with the selected instances, + correpsonding to the indices in the list. + 4. A vector mask of type BoolTensor, whose length is num_instances. + It will return an object with the instances whose mask is nonzero. + """ + if isinstance(item, int): + selected_polygons = [self.polygons[item]] + elif isinstance(item, slice): + selected_polygons = self.polygons[item] + elif isinstance(item, list): + selected_polygons = [self.polygons[i] for i in item] + elif isinstance(item, torch.Tensor): + # Polygons is a list, so we have to move the indices back to CPU. + if item.dtype == torch.bool: + assert item.dim() == 1, item.shape + item = item.nonzero().squeeze(1).cpu().numpy().tolist() + elif item.dtype in [torch.int32, torch.int64]: + item = item.cpu().numpy().tolist() + else: + raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) + selected_polygons = [self.polygons[i] for i in item] + return PolygonMasks(selected_polygons) + + def __iter__(self) -> Iterator[List[torch.Tensor]]: + """ + Yields: + list[ndarray]: the polygons for one instance. Each Tensor is a + float64 vector representing a polygon. + """ + return iter(self.polygons) + + def __repr__(self) -> str: + s = self.__class__.__name__ + "(" + s += "num_instances={})".format(len(self.polygons)) + return s + + def __len__(self) -> int: + return len(self.polygons) + + def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: + """ + Crop each mask by the given box, and resize results to (mask_size, mask_size). + This can be used to prepare training targets for Mask R-CNN. + + Args: + boxes (Tensor): Nx4 tensor storing the boxes for each mask + mask_size (int): the size of the rasterized mask. + + Returns: + Tensor: + A bool tensor of shape (N, mask_size, mask_size), where + N is the number of predicted boxes for this image. + """ + assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) + + device = boxes.device + # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise + # (several small tensors for representing a single instance mask) + boxes = boxes.to(torch.device("cpu")) + + results = [ + rasterize_polygons_within_box(poly, box.numpy(), mask_size) + for poly, box in zip(self.polygons, boxes) + ] + """ + poly: list[list[float]], the polygons for one instance + box: a tensor of shape (4,) + """ + if len(results) == 0: + return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) + return torch.stack(results, dim=0).to(device=device) diff --git a/detectron2/structures/rotated_boxes.py b/detectron2/structures/rotated_boxes.py new file mode 100644 index 0000000000..de71d3cff9 --- /dev/null +++ b/detectron2/structures/rotated_boxes.py @@ -0,0 +1,404 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import Iterator, List, Union +import torch + +from detectron2.layers import cat +from detectron2.layers.rotated_boxes import pairwise_iou_rotated + +from .boxes import Boxes + + +class RotatedBoxes(Boxes): + """ + This structure stores a list of rotated boxes as a Nx5 torch.Tensor. + It supports some common methods about boxes + (`area`, `clip`, `nonempty`, etc), + and also behaves like a Tensor + (support indexing, `to(device)`, `.device`, and iteration over all boxes) + """ + + def __init__(self, tensor: torch.Tensor): + """ + Args: + tensor (Tensor[float]): a Nx5 matrix. Each row is + (x_center, y_center, width, height, angle), + in which angle is represented in degrees. + While there's no strict range restriction for it, + the recommended principal range is between (-180, 180] degrees. + + Assume we have a horizontal box B = (x_center, y_center, width, height), + where width is along the x-axis and height is along the y-axis. + The rotated box B_rot (x_center, y_center, width, height, angle) + can be seen as: + + 1. When angle == 0: + B_rot == B + 2. When angle > 0: + B_rot is obtained by rotating B w.r.t its center by |angle| degrees CCW; + 3. When angle < 0: + B_rot is obtained by rotating B w.r.t its center by |angle| degrees CW. + + Mathematically, since the right-handed coordinate system for image space + is (y, x), where y is top->down and x is left->right, the 4 vertices of the + rotated rectangle (yr_i, xr_i) (i = 1, 2, 3, 4) can be obtained from + the vertices of the horizontal rectangle (y_i, x_i) (i = 1, 2, 3, 4) + in the following way (theta = angle*pi/180 is the angle in radians, + (y_c, x_c) is the center of the rectangle): + yr_i = cos(theta) * (y_i - y_c) - sin(theta) * (x_i - x_c) + y_c, + xr_i = sin(theta) * (y_i - y_c) + cos(theta) * (x_i - x_c) + x_c, + which is the standard rigid-body rotation transformation. + + Intuitively, the angle is + (1) the rotation angle from y-axis in image space + to the height vector (top->down in the box's local coordinate system) + of the box in CCW, and + (2) the rotation angle from x-axis in image space + to the width vector (left->right in the box's local coordinate system) + of the box in CCW. + + More intuitively, consider the following horizontal box ABCD represented + in (x1, y1, x2, y2): + (3, 2, 7, 4), + covering the [3, 7] x [2, 4] region of the continuous coordinate system + which looks like this: + + O--------> x + | + | A---B + | | | + | D---C + | + v y + + Note that each capital letter represents one 0-dimensional geometric point + instead of a 'square pixel' here. + + In the example above, using (x, y) to represent a point we have: + O = (0, 0), + A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4) + + We name vector AB = vector DC as the width vector in box's local coordinate system, and + vector AD = vector BC as the height vector in box's local coordinate system. Initially, + when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis + in the image space, respectively. + + For better illustration, we denote the center of the box as E, + + O--------> x + | + | A---B + | | E | + | D---C + | + v y + + where the center E = ((3+7)/2, (2+4)/2) = (5, 3). + + Also, + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Therefore, the corresponding representation for the same shape in rotated box in + (x_center, y_center, width, height, angle) format is: + + (5, 3, 4, 2, 0), + + Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees + CCW (counter-clockwise) by definition. It looks like this: + + O--------> x + | B-C + | | | + | |E| + | | | + | A-D + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CCW with regard to E: + A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5) + + Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to + vector AD or vector BC (the top->down height vector in box's local coordinate system), + or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right + width vector in box's local coordinate system). + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise) + by definition? It looks like this: + + O--------> x + | D-A + | | | + | |E| + | | | + | C-B + v y + + The center E is still located at the same point (5, 3), while the vertices + ABCD are rotated by 90 degrees CW with regard to E: + A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1) + + width = |AB| = |CD| = 5 - 1 = 4, + height = |AD| = |BC| = 6 - 4 = 2. + + This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU + will be 1. However, these two will generate different RoI Pooling results and + should not be treated as an identical box. + + On the other hand, it's easy to see that (X, Y, W, H, A) is identical to + (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be + identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is + equivalent to rotating the same shape 90 degrees CW. + + We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180): + + O--------> x + | + | C---D + | | E | + | B---A + | + v y + + A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2), + + width = |AB| = |CD| = 7 - 3 = 4, + height = |AD| = |BC| = 4 - 2 = 2. + + Finally, this is a very inaccurate (heavily quantized) illustration of + how (5, 3, 4, 2, 60) looks like in case anyone wonders: + + O--------> x + | B\ + | / C + | /E / + | A / + | `D + v y + + It's still a rectangle with center of (5, 3), width of 4 and height of 2, + but its angle (and thus orientation) is somewhere between + (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90). + """ + device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + tensor = torch.zeros(0, 5, dtype=torch.float32, device=device) + assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size() + + self.tensor = tensor + + def clone(self) -> "RotatedBoxes": + """ + Clone the RotatedBoxes. + + Returns: + RotatedBoxes + """ + return RotatedBoxes(self.tensor.clone()) + + def to(self, device: str) -> "RotatedBoxes": + return RotatedBoxes(self.tensor.to(device)) + + def area(self) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + box = self.tensor + area = box[:, 2] * box[:, 3] + return area + + def normalize_angles(self) -> None: + """ + Restrict angles to the range of (-180, 180] degrees + """ + self.tensor[:, 4] = self.tensor[:, 4] % 360 + self.tensor[:, 4][torch.where(self.tensor[:, 4] > 180)] -= 360 + + def clip(self, box_size: Boxes.BoxSizeType, clip_angle_threshold: float = 1.0) -> None: + """ + Clip (in place) the boxes by limiting x coordinates to the range [0, width] + and y coordinates to the range [0, height]. + + For RRPN: + Only clip boxes that are almost horizontal with a tolerance of + clip_angle_threshold to maintain backward compatibility. + + Rotated boxes beyond this threshold are not clipped for two reasons: + (1) There are potentially multiple ways to clip a rotated box to make it + fit within the image. + (2) It's tricky to make the entire rectangular box fit within the image + and still be able to not leave out pixels of interest. + Therefore we rely on ops like RoIAlignRotated to safely handle this. + + Args: + box_size (height, width): The clipping box's size. + clip_angle_threshold: + Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees), + we do the clipping as horizontal boxes. + """ + h, w = box_size + + # normalize angles to be within (-180, 180] degrees + self.normalize_angles() + + idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0] + + # convert to (x1, y1, x2, y2) + x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0 + y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0 + x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0 + y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0 + + # clip + x1.clamp_(min=0, max=w) + y1.clamp_(min=0, max=h) + x2.clamp_(min=0, max=w) + y2.clamp_(min=0, max=h) + + # convert back to (xc, yc, w, h) + self.tensor[idx, 0] = (x1 + x2) / 2.0 + self.tensor[idx, 1] = (y1 + y2) / 2.0 + # make sure widths and heights do not increase due to numerical errors + self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1) + self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1) + + def nonempty(self, threshold: int = 0) -> torch.Tensor: + """ + Find boxes that are non-empty. + A box is considered empty, if either of its side is no larger than threshold. + + Returns: + Tensor: a binary vector which represents + whether each box is empty (False) or non-empty (True). + """ + box = self.tensor + widths = box[:, 2] + heights = box[:, 3] + keep = (widths > threshold) & (heights > threshold) + return keep + + def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "RotatedBoxes": + """ + Returns: + RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing. + + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box. + 2. `new_boxes = boxes[2:10]`: return a slice of boxes. + 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor + with `length = len(boxes)`. Nonzero elements in the vector will be selected. + + Note that the returned RotatedBoxes might share storage with this RotatedBoxes, + subject to Pytorch's indexing semantics. + """ + if isinstance(item, int): + return RotatedBoxes(self.tensor[item].view(1, -1)) + b = self.tensor[item] + assert b.dim() == 2, "Indexing on RotatedBoxes with {} failed to return a matrix!".format( + item + ) + return RotatedBoxes(b) + + def __len__(self) -> int: + return self.tensor.shape[0] + + def __repr__(self) -> str: + return "RotatedBoxes(" + str(self.tensor) + ")" + + def inside_box(self, box_size: Boxes.BoxSizeType, boundary_threshold: int = 0) -> torch.Tensor: + """ + Args: + box_size (height, width): Size of the reference box covering + [0, width] x [0, height] + boundary_threshold (int): Boxes that extend beyond the reference box + boundary by more than boundary_threshold are considered "outside". + + For RRPN, it might not be necessary to call this function since it's common + for rotated box to extend to outside of the image boundaries + (the clip function only clips the near-horizontal boxes) + + Returns: + a binary vector, indicating whether each box is inside the reference box. + """ + height, width = box_size + + cnt_x = self.tensor[..., 0] + cnt_y = self.tensor[..., 1] + half_w = self.tensor[..., 2] / 2.0 + half_h = self.tensor[..., 3] / 2.0 + a = self.tensor[..., 4] + c = torch.abs(torch.cos(a * torch.pi / 180.0)) + s = torch.abs(torch.sin(a * torch.pi / 180.0)) + # This basically computes the horizontal bounding rectangle of the rotated box + max_rect_dx = c * half_w + s * half_h + max_rect_dy = c * half_h + s * half_w + + inds_inside = ( + (cnt_x - max_rect_dx >= -boundary_threshold) + & (cnt_y - max_rect_dy >= -boundary_threshold) + & (cnt_x + max_rect_dx < width + boundary_threshold) + & (cnt_y + max_rect_dy < height + boundary_threshold) + ) + + return inds_inside + + def get_centers(self) -> torch.Tensor: + """ + Returns: + The box centers in a Nx2 array of (x, y). + """ + return self.tensor[:, :2] + + @staticmethod + def cat(boxes_list: List["RotatedBoxes"]) -> "RotatedBoxes": # type: ignore + """ + Concatenates a list of RotatedBoxes into a single RotatedBoxes + + Arguments: + boxes_list (list[RotatedBoxes]) + + Returns: + RotatedBoxes: the concatenated RotatedBoxes + """ + assert isinstance(boxes_list, (list, tuple)) + assert len(boxes_list) > 0 + assert all(isinstance(box, RotatedBoxes) for box in boxes_list) + + cat_boxes = type(boxes_list[0])(cat([b.tensor for b in boxes_list], dim=0)) + return cat_boxes + + @property + def device(self) -> str: + return self.tensor.device + + def __iter__(self) -> Iterator[torch.Tensor]: + """ + Yield a box as a Tensor of shape (5,) at a time. + """ + yield from self.tensor + + +def pairwise_iou(boxes1: RotatedBoxes, boxes2: RotatedBoxes) -> None: + """ + Given two lists of rotated boxes of size N and M, + compute the IoU (intersection over union) + between __all__ N x M pairs of boxes. + The box order must be (x_center, y_center, width, height, angle). + + Args: + boxes1, boxes2 (RotatedBoxes): + two `RotatedBoxes`. Contains N & M rotated boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + + return pairwise_iou_rotated(boxes1.tensor, boxes2.tensor) diff --git a/detectron2/utils/README.md b/detectron2/utils/README.md new file mode 100644 index 0000000000..9765b24a73 --- /dev/null +++ b/detectron2/utils/README.md @@ -0,0 +1,5 @@ +# Utility functions + +This folder contain utility functions that are not used in the +core library, but are useful for building models or training +code using the config system. diff --git a/detectron2/utils/__init__.py b/detectron2/utils/__init__.py new file mode 100644 index 0000000000..168f9979a4 --- /dev/null +++ b/detectron2/utils/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/detectron2/utils/collect_env.py b/detectron2/utils/collect_env.py new file mode 100644 index 0000000000..57b7c18c58 --- /dev/null +++ b/detectron2/utils/collect_env.py @@ -0,0 +1,65 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import os +import sys +from collections import defaultdict +import PIL +import torch +from tabulate import tabulate + +__all__ = ["collect_env_info"] + + +def collect_torch_env(): + try: + import torch.__config__ + + return torch.__config__.show() + except ImportError: + # compatible with older versions of pytorch + from torch.utils.collect_env import get_pretty_env_info + + return get_pretty_env_info() + + +def get_env_module(): + var_name = "DETECTRON2_ENV_MODULE" + return var_name, os.environ.get(var_name, "") + + +def collect_env_info(): + data = [] + data.append(("Python", sys.version.replace("\n", ""))) + try: + from detectron2 import _C + except ImportError: + pass + else: + data.append(("Detectron2 Compiler", _C.get_compiler_version())) + + data.append(get_env_module()) + data.append(("PyTorch", torch.__version__)) + data.append(("PyTorch Debug Build", torch.version.debug)) + + has_cuda = torch.cuda.is_available() + data.append(("CUDA available", has_cuda)) + if has_cuda: + devices = defaultdict(list) + for k in range(torch.cuda.device_count()): + devices[torch.cuda.get_device_name(k)].append(str(k)) + for name, devids in devices.items(): + data.append(("GPU " + ",".join(devids), name)) + data.append(("Pillow", PIL.__version__)) + + try: + import cv2 + + data.append(("cv2", cv2.__version__)) + except ImportError: + pass + env_str = tabulate(data) + "\n" + env_str += collect_torch_env() + return env_str + + +if __name__ == "__main__": + print(collect_env_info()) diff --git a/detectron2/utils/colormap.py b/detectron2/utils/colormap.py new file mode 100644 index 0000000000..1bf1455e4c --- /dev/null +++ b/detectron2/utils/colormap.py @@ -0,0 +1,140 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. + +""" +An awesome colormap for really neat visualizations. +Copied from Detectron, and removed gray colors. +""" + +import numpy as np + +__all__ = ["colormap", "random_color"] + +# fmt: off +# RGB: +_COLORS = np.array( + [ + 0.000, 0.447, 0.741, + 0.850, 0.325, 0.098, + 0.929, 0.694, 0.125, + 0.494, 0.184, 0.556, + 0.466, 0.674, 0.188, + 0.301, 0.745, 0.933, + 0.635, 0.078, 0.184, + 0.300, 0.300, 0.300, + 0.600, 0.600, 0.600, + 1.000, 0.000, 0.000, + 1.000, 0.500, 0.000, + 0.749, 0.749, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 1.000, + 0.667, 0.000, 1.000, + 0.333, 0.333, 0.000, + 0.333, 0.667, 0.000, + 0.333, 1.000, 0.000, + 0.667, 0.333, 0.000, + 0.667, 0.667, 0.000, + 0.667, 1.000, 0.000, + 1.000, 0.333, 0.000, + 1.000, 0.667, 0.000, + 1.000, 1.000, 0.000, + 0.000, 0.333, 0.500, + 0.000, 0.667, 0.500, + 0.000, 1.000, 0.500, + 0.333, 0.000, 0.500, + 0.333, 0.333, 0.500, + 0.333, 0.667, 0.500, + 0.333, 1.000, 0.500, + 0.667, 0.000, 0.500, + 0.667, 0.333, 0.500, + 0.667, 0.667, 0.500, + 0.667, 1.000, 0.500, + 1.000, 0.000, 0.500, + 1.000, 0.333, 0.500, + 1.000, 0.667, 0.500, + 1.000, 1.000, 0.500, + 0.000, 0.333, 1.000, + 0.000, 0.667, 1.000, + 0.000, 1.000, 1.000, + 0.333, 0.000, 1.000, + 0.333, 0.333, 1.000, + 0.333, 0.667, 1.000, + 0.333, 1.000, 1.000, + 0.667, 0.000, 1.000, + 0.667, 0.333, 1.000, + 0.667, 0.667, 1.000, + 0.667, 1.000, 1.000, + 1.000, 0.000, 1.000, + 1.000, 0.333, 1.000, + 1.000, 0.667, 1.000, + 0.333, 0.000, 0.000, + 0.500, 0.000, 0.000, + 0.667, 0.000, 0.000, + 0.833, 0.000, 0.000, + 1.000, 0.000, 0.000, + 0.000, 0.167, 0.000, + 0.000, 0.333, 0.000, + 0.000, 0.500, 0.000, + 0.000, 0.667, 0.000, + 0.000, 0.833, 0.000, + 0.000, 1.000, 0.000, + 0.000, 0.000, 0.167, + 0.000, 0.000, 0.333, + 0.000, 0.000, 0.500, + 0.000, 0.000, 0.667, + 0.000, 0.000, 0.833, + 0.000, 0.000, 1.000, + 0.000, 0.000, 0.000, + 0.143, 0.143, 0.143, + 0.857, 0.857, 0.857, + 1.000, 1.000, 1.000 + ] +).astype(np.float32).reshape(-1, 3) +# fmt: on + + +def colormap(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1] + """ + assert maximum in [255, 1], maximum + c = _COLORS * maximum + if not rgb: + c = c[:, ::-1] + return c + + +def random_color(rgb=False, maximum=255): + """ + Args: + rgb (bool): whether to return RGB colors or BGR colors. + maximum (int): either 255 or 1 + + Returns: + ndarray: a vector of 3 numbers + """ + idx = np.random.randint(0, len(_COLORS)) + ret = _COLORS[idx] * maximum + if not rgb: + ret = ret[::-1] + return ret + + +if __name__ == "__main__": + import cv2 + + size = 100 + H, W = 10, 10 + canvas = np.random.rand(H * size, W * size, 3).astype("float32") + for h in range(H): + for w in range(W): + idx = h * W + w + if idx >= len(_COLORS): + break + canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx] + cv2.imshow("a", canvas) + cv2.waitKey(0) diff --git a/detectron2/utils/comm.py b/detectron2/utils/comm.py new file mode 100644 index 0000000000..8cc7b3dac5 --- /dev/null +++ b/detectron2/utils/comm.py @@ -0,0 +1,263 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" + +import functools +import logging +import numpy as np +import pickle +import torch +import torch.distributed as dist + +_LOCAL_PROCESS_GROUP = None +""" +A torch process group which only includes processes that on the same machine as the current process. +This variable is set when processes are spawned by `launch()` in "engine/launch.py". +""" + + +def get_world_size() -> int: + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank() -> int: + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def get_local_rank() -> int: + """ + Returns: + The rank of the current process within the local (per-machine) process group. + """ + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + assert _LOCAL_PROCESS_GROUP is not None + return dist.get_rank(group=_LOCAL_PROCESS_GROUP) + + +def get_local_size() -> int: + """ + Returns: + The size of the per-machine process group, + i.e. the number of processes per machine. + """ + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) + + +def is_main_process() -> bool: + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +@functools.lru_cache() +def _get_global_gloo_group(): + """ + Return a process group based on gloo backend, containing all the ranks + The result is cached. + """ + if dist.get_backend() == "nccl": + return dist.new_group(backend="gloo") + else: + return dist.group.WORLD + + +def _serialize_to_tensor(data, group): + backend = dist.get_backend(group) + assert backend in ["gloo", "nccl"] + device = torch.device("cpu" if backend == "gloo" else "cuda") + + buffer = pickle.dumps(data) + if len(buffer) > 1024 ** 3: + logger = logging.getLogger(__name__) + logger.warning( + "Rank {} trying to all-gather {:.2f} GB of data on device {}".format( + get_rank(), len(buffer) / (1024 ** 3), device + ) + ) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to(device=device) + return tensor + + +def _pad_to_largest_tensor(tensor, group): + """ + Returns: + list[int]: size of the tensor, on each rank + Tensor: padded tensor that has the max size + """ + world_size = dist.get_world_size(group=group) + assert ( + world_size >= 1 + ), "comm.gather/all_gather must be called from ranks within the given group!" + local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device) + size_list = [ + torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size) + ] + dist.all_gather(size_list, local_size, group=group) + size_list = [int(size.item()) for size in size_list] + + max_size = max(size_list) + + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + if local_size != max_size: + padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device) + tensor = torch.cat((tensor, padding), dim=0) + return size_list, tensor + + +def all_gather(data, group=None): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: list of data gathered from each rank + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group) == 1: + return [data] + + tensor = _serialize_to_tensor(data, group) + + size_list, tensor = _pad_to_largest_tensor(tensor, group) + max_size = max(size_list) + + # receiving Tensor from all ranks + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list + ] + dist.all_gather(tensor_list, tensor, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def gather(data, dst=0, group=None): + """ + Run gather on arbitrary picklable data (not necessarily tensors). + + Args: + data: any picklable object + dst (int): destination rank + group: a torch process group. By default, will use a group which + contains all ranks on gloo backend. + + Returns: + list[data]: on dst, a list of data gathered from each rank. Otherwise, + an empty list. + """ + if get_world_size() == 1: + return [data] + if group is None: + group = _get_global_gloo_group() + if dist.get_world_size(group=group) == 1: + return [data] + rank = dist.get_rank(group=group) + + tensor = _serialize_to_tensor(data, group) + size_list, tensor = _pad_to_largest_tensor(tensor, group) + + # receiving Tensor from all ranks + if rank == dst: + max_size = max(size_list) + tensor_list = [ + torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list + ] + dist.gather(tensor, tensor_list, dst=dst, group=group) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + return data_list + else: + dist.gather(tensor, [], dst=dst, group=group) + return [] + + +def shared_random_seed(): + """ + Returns: + int: a random number that is the same across all workers. + If workers need a shared RNG, they can use this shared seed to + create one. + + All workers must call this function, otherwise it will deadlock. + """ + ints = np.random.randint(2 ** 31) + all_ints = all_gather(ints) + return all_ints[0] + + +def reduce_dict(input_dict, average=True): + """ + Reduce the values in the dictionary from all processes so that process with rank + 0 has the reduced results. + + Args: + input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor. + average (bool): whether to do average or sum + + Returns: + a dict with the same keys as input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict diff --git a/detectron2/utils/env.py b/detectron2/utils/env.py new file mode 100644 index 0000000000..a05057fca3 --- /dev/null +++ b/detectron2/utils/env.py @@ -0,0 +1,105 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import importlib +import importlib.util +import logging +import numpy as np +import os +import random +import sys +from datetime import datetime +import torch + +__all__ = ["seed_all_rng"] + + +def seed_all_rng(seed=None): + """ + Set the random seed for the RNG in torch, numpy and python. + + Args: + seed (int): if None, will use a strong random seed. + """ + if seed is None: + seed = ( + os.getpid() + + int(datetime.now().strftime("%S%f")) + + int.from_bytes(os.urandom(2), "big") + ) + logger = logging.getLogger(__name__) + logger.info("Using a generated random seed {}".format(seed)) + np.random.seed(seed) + torch.set_rng_state(torch.manual_seed(seed).get_state()) + random.seed(seed) + + +# from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path +def _import_file(module_name, file_path, make_importable=False): + spec = importlib.util.spec_from_file_location(module_name, file_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + if make_importable: + sys.modules[module_name] = module + return module + + +def _configure_libraries(): + """ + Configurations for some libraries. + """ + # An environment option to disable `import cv2` globally, + # in case it leads to negative performance impact + disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False)) + if disable_cv2: + sys.modules["cv2"] = None + else: + # Disable opencl in opencv since its interaction with cuda often has negative effects + # This envvar is supported after OpenCV 3.4.0 + os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" + try: + import cv2 + + if int(cv2.__version__.split(".")[0]) >= 3: + cv2.ocl.setUseOpenCL(False) + except ImportError: + pass + + +_ENV_SETUP_DONE = False + + +def setup_environment(): + """Perform environment setup work. The default setup is a no-op, but this + function allows the user to specify a Python source file or a module in + the $DETECTRON2_ENV_MODULE environment variable, that performs + custom setup work that may be necessary to their computing environment. + """ + global _ENV_SETUP_DONE + if _ENV_SETUP_DONE: + return + _ENV_SETUP_DONE = True + + _configure_libraries() + + custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE") + + if custom_module_path: + setup_custom_environment(custom_module_path) + else: + # The default setup is a no-op + pass + + +def setup_custom_environment(custom_module): + """ + Load custom environment setup by importing a Python source file or a + module, and run the setup function. + """ + if custom_module.endswith(".py"): + module = _import_file("detectron2.utils.env.custom_module", custom_module) + else: + module = importlib.import_module(custom_module) + assert hasattr(module, "setup_environment") and callable(module.setup_environment), ( + "Custom environment module defined in {} does not have the " + "required callable attribute 'setup_environment'." + ).format(custom_module) + module.setup_environment() diff --git a/detectron2/utils/events.py b/detectron2/utils/events.py new file mode 100644 index 0000000000..b31e4b48d5 --- /dev/null +++ b/detectron2/utils/events.py @@ -0,0 +1,325 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import datetime +import json +import logging +import os +from collections import defaultdict +from contextlib import contextmanager +import torch +from fvcore.common.file_io import PathManager +from fvcore.common.history_buffer import HistoryBuffer + +_CURRENT_STORAGE_STACK = [] + + +def get_event_storage(): + assert len( + _CURRENT_STORAGE_STACK + ), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!" + return _CURRENT_STORAGE_STACK[-1] + + +class JSONWriter: + """ + Write scalars to a json file. + + It saves scalars as one json per line (instead of a big json) for easy parsing. + + Examples parsing such a json file: + + .. code-block:: none + + $ cat metrics.json | jq -s '.[0:2]' + [ + { + "data_time": 0.008433341979980469, + "iteration": 20, + "loss": 1.9228371381759644, + "loss_box_reg": 0.050025828182697296, + "loss_classifier": 0.5316952466964722, + "loss_mask": 0.7236229181289673, + "loss_rpn_box": 0.0856662318110466, + "loss_rpn_cls": 0.48198649287223816, + "lr": 0.007173333333333333, + "time": 0.25401854515075684 + }, + { + "data_time": 0.007216215133666992, + "iteration": 40, + "loss": 1.282649278640747, + "loss_box_reg": 0.06222952902317047, + "loss_classifier": 0.30682939291000366, + "loss_mask": 0.6970193982124329, + "loss_rpn_box": 0.038663312792778015, + "loss_rpn_cls": 0.1471673548221588, + "lr": 0.007706666666666667, + "time": 0.2490077018737793 + } + ] + + $ cat metrics.json | jq '.loss_mask' + 0.7126231789588928 + 0.689423680305481 + 0.6776131987571716 + ... + + """ + + def __init__(self, json_file, window_size=20): + """ + Args: + json_file (str): path to the json file. New data will be appended if the file exists. + window_size (int): the window size of median smoothing for the scalars whose + `smoothing_hint` are True. + """ + self._file_handle = PathManager.open(json_file, "a") + self._window_size = window_size + + def write(self): + storage = get_event_storage() + to_save = {"iteration": storage.iter} + to_save.update(storage.latest_with_smoothing_hint(self._window_size)) + self._file_handle.write(json.dumps(to_save, sort_keys=True) + "\n") + self._file_handle.flush() + try: + os.fsync(self._file_handle.fileno()) + except AttributeError: + pass + + def __del__(self): + # not guaranteed to be called at exit, but probably fine + self._file_handle.close() + + +class TensorboardXWriter: + """ + Write all scalars to a tensorboard file. + """ + + def __init__(self, log_dir: str, window_size: int = 20, **kwargs): + """ + Args: + log_dir (str): The directory to save the output events + window_size (int): the scalars will be median-smoothed by this window size + kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` + """ + self._window_size = window_size + from torch.utils.tensorboard import SummaryWriter + + self._writer = SummaryWriter(log_dir, **kwargs) + + def write(self): + storage = get_event_storage() + for k, v in storage.latest_with_smoothing_hint(self._window_size).items(): + self._writer.add_scalar(k, v, storage.iter) + + def __del__(self): + if hasattr(self, "_writer"): # doesn't exist when the code fails at import + self._writer.close() + + +class CommonMetricPrinter: + """ + Print __common__ metrics to the terminal, including + iteration time, ETA, memory, all losses, and the learning rate. + + To print something different, please implement a similar printer by yourself. + """ + + def __init__(self, max_iter): + """ + Args: + max_iter (int): the maximum number of iterations to train. + Used to compute ETA. + """ + self.logger = logging.getLogger(__name__) + self._max_iter = max_iter + + def write(self): + storage = get_event_storage() + iteration = storage.iter + + data_time, time = None, None + eta_string = "N/A" + try: + data_time = storage.history("data_time").avg(20) + time = storage.history("time").global_avg() + eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + except KeyError: # they may not exist in the first few iterations (due to warmup) + pass + + try: + lr = "{:.6f}".format(storage.history("lr").latest()) + except KeyError: + lr = "N/A" + + if torch.cuda.is_available(): + max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 + else: + max_mem_mb = None + + # NOTE: max_mem is parsed by grep in "dev/parse_results.sh" + self.logger.info( + """\ +eta: {eta} iter: {iter} {losses} \ +{time} {data_time} \ +lr: {lr} {memory}\ +""".format( + eta=eta_string, + iter=iteration, + losses=" ".join( + [ + "{}: {:.3f}".format(k, v.median(20)) + for k, v in storage.histories().items() + if "loss" in k + ] + ), + time="time: {:.4f}".format(time) if time is not None else "", + data_time="data_time: {:.4f}".format(data_time) if data_time is not None else "", + lr=lr, + memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "", + ) + ) + + +class EventStorage: + """ + The user-facing class that provides metric storage functionalities. + + In the future we may add support for storing / logging other types of data if needed. + """ + + def __init__(self, start_iter=0): + """ + Args: + start_iter (int): the iteration number to start with + """ + self._history = defaultdict(HistoryBuffer) + self._smoothing_hints = {} + self._latest_scalars = {} + self._iter = start_iter + self._current_prefix = "" + + def put_scalar(self, name, value, smoothing_hint=True): + """ + Add a scalar `value` to the `HistoryBuffer` associated with `name`. + + Args: + smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be + smoothed when logged. The hint will be accessible through + :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint + and apply custom smoothing rule. + + It defaults to True because most scalars we save need to be smoothed to + provide any useful signal. + """ + name = self._current_prefix + name + history = self._history[name] + value = float(value) + history.update(value, self._iter) + self._latest_scalars[name] = value + + existing_hint = self._smoothing_hints.get(name) + if existing_hint is not None: + assert ( + existing_hint == smoothing_hint + ), "Scalar {} was put with a different smoothing_hint!".format(name) + else: + self._smoothing_hints[name] = smoothing_hint + + def put_scalars(self, *, smoothing_hint=True, **kwargs): + """ + Put multiple scalars from keyword arguments. + + Examples: + + storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True) + """ + for k, v in kwargs.items(): + self.put_scalar(k, v, smoothing_hint=smoothing_hint) + + def history(self, name): + """ + Returns: + HistoryBuffer: the scalar history for name + """ + ret = self._history.get(name, None) + if ret is None: + raise KeyError("No history metric available for {}!".format(name)) + return ret + + def histories(self): + """ + Returns: + dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars + """ + return self._history + + def latest(self): + """ + Returns: + dict[name -> number]: the scalars that's added in the current iteration. + """ + return self._latest_scalars + + def latest_with_smoothing_hint(self, window_size=20): + """ + Similar to :meth:`latest`, but the returned values + are either the un-smoothed original latest value, + or a median of the given window_size, + depend on whether the smoothing_hint is True. + + This provides a default behavior that other writers can use. + """ + result = {} + for k, v in self._latest_scalars.items(): + result[k] = self._history[k].median(window_size) if self._smoothing_hints[k] else v + return result + + def smoothing_hints(self): + """ + Returns: + dict[name -> bool]: the user-provided hint on whether the scalar + is noisy and needs smoothing. + """ + return self._smoothing_hints + + def step(self): + """ + User should call this function at the beginning of each iteration, to + notify the storage of the start of a new iteration. + The storage will then be able to associate the new data with the + correct iteration number. + """ + self._iter += 1 + self._latest_scalars = {} + + @property + def iter(self): + return self._iter + + @property + def iteration(self): + # for backward compatibility + return self._iter + + def __enter__(self): + _CURRENT_STORAGE_STACK.append(self) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + assert _CURRENT_STORAGE_STACK[-1] == self + _CURRENT_STORAGE_STACK.pop() + + @contextmanager + def name_scope(self, name): + """ + Yields: + A context within which all the events added to this storage + will be prefixed by the name scope. + """ + old_prefix = self._current_prefix + self._current_prefix = name.rstrip("/") + "/" + yield + self._current_prefix = old_prefix diff --git a/detectron2/utils/logger.py b/detectron2/utils/logger.py new file mode 100644 index 0000000000..a167d7de9d --- /dev/null +++ b/detectron2/utils/logger.py @@ -0,0 +1,196 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import functools +import logging +import os +import sys +from collections import Counter +from fvcore.common.file_io import PathManager +from tabulate import tabulate +from termcolor import colored + + +class _ColorfulFormatter(logging.Formatter): + def __init__(self, *args, **kwargs): + self._root_name = kwargs.pop("root_name") + "." + self._abbrev_name = kwargs.pop("abbrev_name", "") + if len(self._abbrev_name): + self._abbrev_name = self._abbrev_name + "." + super(_ColorfulFormatter, self).__init__(*args, **kwargs) + + def formatMessage(self, record): + record.name = record.name.replace(self._root_name, self._abbrev_name) + log = super(_ColorfulFormatter, self).formatMessage(record) + if record.levelno == logging.WARNING: + prefix = colored("WARNING", "red", attrs=["blink"]) + elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: + prefix = colored("ERROR", "red", attrs=["blink", "underline"]) + else: + return log + return prefix + " " + log + + +@functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers +def setup_logger( + output=None, distributed_rank=0, *, color=True, name="detectron2", abbrev_name=None +): + """ + Args: + output (str): a file name or a directory to save log. If None, will not save log file. + If ends with ".txt" or ".log", assumed to be a file name. + Otherwise, logs will be saved to `output/log.txt`. + name (str): the root module name of this logger + abbrev_name (str): an abbreviation of the module, to avoid long names in logs. + Set to "" to not log the root module in logs. + By default, will abbreviate "detectron2" to "d2" and leave other + modules unchanged. + """ + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.propagate = False + + if abbrev_name is None: + abbrev_name = "d2" if name == "detectron2" else name + + plain_formatter = logging.Formatter( + "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" + ) + # stdout logging: master only + if distributed_rank == 0: + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + if color: + formatter = _ColorfulFormatter( + colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", + datefmt="%m/%d %H:%M:%S", + root_name=name, + abbrev_name=str(abbrev_name), + ) + else: + formatter = plain_formatter + ch.setFormatter(formatter) + logger.addHandler(ch) + + # file logging: all workers + if output is not None: + if output.endswith(".txt") or output.endswith(".log"): + filename = output + else: + filename = os.path.join(output, "log.txt") + if distributed_rank > 0: + filename = filename + ".rank{}".format(distributed_rank) + PathManager.mkdirs(os.path.dirname(filename)) + + fh = logging.StreamHandler(_cached_log_stream(filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(plain_formatter) + logger.addHandler(fh) + + return logger + + +# cache the opened file object, so that different calls to `setup_logger` +# with the same file name can safely write to the same file. +@functools.lru_cache(maxsize=None) +def _cached_log_stream(filename): + return PathManager.open(filename, "a") + + +""" +Below are some other convenient logging methods. +They are mainly adopted from +https://github.com/abseil/abseil-py/blob/master/absl/logging/__init__.py +""" + + +def _find_caller(): + """ + Returns: + str: module name of the caller + tuple: a hashable key to be used to identify different callers + """ + frame = sys._getframe(2) + while frame: + code = frame.f_code + if os.path.join("utils", "logger.") not in code.co_filename: + mod_name = frame.f_globals["__name__"] + if mod_name == "__main__": + mod_name = "detectron2" + return mod_name, (code.co_filename, frame.f_lineno, code.co_name) + frame = frame.f_back + + +_LOG_COUNTER = Counter() + + +def log_first_n(lvl, msg, n=1, *, name=None, key="caller"): + """ + Log only for the first n times. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + key (str or tuple[str]): the string(s) can be one of "caller" or + "message", which defines how to identify duplicated logs. + For example, if called with `n=1, key="caller"`, this function + will only log the first call from the same caller, regardless of + the message content. + If called with `n=1, key="message"`, this function will log the + same content only once, even if they are called from different places. + If called with `n=1, key=("caller", "message")`, this function + will not log only if the same caller has logged the same message before. + """ + if isinstance(key, str): + key = (key,) + assert len(key) > 0 + + caller_module, caller_key = _find_caller() + hash_key = () + if "caller" in key: + hash_key = hash_key + caller_key + if "message" in key: + hash_key = hash_key + (msg,) + + _LOG_COUNTER[hash_key] += 1 + if _LOG_COUNTER[hash_key] <= n: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def log_every_n(lvl, msg, n=1, *, name=None): + """ + Log once per n times. + + Args: + lvl (int): the logging level + msg (str): + n (int): + name (str): name of the logger to use. Will use the caller's module by default. + """ + caller_module, key = _find_caller() + _LOG_COUNTER[key] += 1 + if n == 1 or _LOG_COUNTER[key] % n == 1: + logging.getLogger(name or caller_module).log(lvl, msg) + + +def create_small_table(small_dict): + """ + Create a small table using the keys of small_dict as headers. This is only + suitable for small dictionaries. + + Args: + small_dict (dict): a result dictionary of only a few items. + + Returns: + str: the table as a string. + """ + keys, values = tuple(zip(*small_dict.items())) + table = tabulate( + [values], + headers=keys, + tablefmt="pipe", + floatfmt=".3f", + stralign="center", + numalign="center", + ) + return table diff --git a/detectron2/utils/registry.py b/detectron2/utils/registry.py new file mode 100644 index 0000000000..1826420420 --- /dev/null +++ b/detectron2/utils/registry.py @@ -0,0 +1,62 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +class Registry(object): + """ + The registry that provides name -> object mapping, to support third-party users' custom modules. + + To create a registry (inside detectron2): + + .. code-block:: python + + BACKBONE_REGISTRY = Registry('BACKBONE') + + To register an object: + + .. code-block:: python + + @BACKBONE_REGISTRY.register() + class MyBackbone(): + ... + + Or: + + BACKBONE_REGISTRY.register(obj=MyBackbone) + """ + + def __init__(self, name): + """ + Args: + name (str): the name of this registry + """ + self._name = name + + self._obj_map = {} + + def _do_register(self, name, obj): + assert ( + name not in self._obj_map + ), "An object named '{}' was already registered in '{}' registry!".format(name, self._name) + self._obj_map[name] = obj + + def register(self, obj=None): + """ + Register the given object under the the name `obj.__name__`. + Can be used as either a decorator or not. See docstring of this class for usage. + """ + if obj is None: + # used as a decorator + def deco(func_or_class): + name = func_or_class.__name__ + self._do_register(name, func_or_class) + return func_or_class + + return deco + + # used as a function call + name = obj.__name__ + self._do_register(name, obj) + + def get(self, name): + ret = self._obj_map.get(name) + if ret is None: + raise KeyError("No object named '{}' found in '{}' registry!".format(name, self._name)) + return ret diff --git a/detectron2/utils/serialize.py b/detectron2/utils/serialize.py new file mode 100644 index 0000000000..cb09c517f9 --- /dev/null +++ b/detectron2/utils/serialize.py @@ -0,0 +1,29 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import cloudpickle + + +class PicklableWrapper(object): + """ + Wrap an object to make it more picklable, note that it uses + heavy weight serialization libraries that are slower than pickle. + It's best to use it only on closures (which are usually not picklable). + + This is a simplified version of + https://github.com/joblib/joblib/blob/master/joblib/externals/loky/cloudpickle_wrapper.py + """ + + def __init__(self, obj): + self._obj = obj + + def __reduce__(self): + s = cloudpickle.dumps(self._obj) + return cloudpickle.loads, (s,) + + def __call__(self, *args, **kwargs): + return self._obj(*args, **kwargs) + + def __getattr__(self, attr): + # Ensure that the wrapped object can be used seemlessly as the previous object. + if attr not in ["_obj"]: + return getattr(self._obj, attr) + return getattr(self, attr) diff --git a/detectron2/utils/video_visualizer.py b/detectron2/utils/video_visualizer.py new file mode 100644 index 0000000000..fb73adcbe1 --- /dev/null +++ b/detectron2/utils/video_visualizer.py @@ -0,0 +1,235 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import pycocotools.mask as mask_util + +from detectron2.utils.visualizer import ( + ColorMode, + Visualizer, + _create_text_labels, + _PanopticPrediction, +) + +from .colormap import random_color + + +class _DetectedInstance: + """ + Used to store data about detected objects in video frame, + in order to transfer color to objects in the future frames. + + Attributes: + label (int): + bbox (tuple[float]): + mask_rle (dict): + color (tuple[float]): RGB colors in range (0, 1) + ttl (int): time-to-live for the instance. For example, if ttl=2, + the instance color can be transferred to objects in the next two frames. + """ + + __slots__ = ["label", "bbox", "mask_rle", "color", "ttl"] + + def __init__(self, label, bbox, mask_rle, color, ttl): + self.label = label + self.bbox = bbox + self.mask_rle = mask_rle + self.color = color + self.ttl = ttl + + +class VideoVisualizer: + def __init__(self, metadata, instance_mode=ColorMode.IMAGE): + """ + Args: + metadata (MetadataCatalog): image metadata. + """ + self.metadata = metadata + self._old_instances = [] + assert instance_mode in [ + ColorMode.IMAGE, + ColorMode.IMAGE_BW, + ], "Other mode not supported yet." + self._instance_mode = instance_mode + + def draw_instance_predictions(self, frame, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + frame (ndarray): an RGB image of shape (H, W, C), in the range [0, 255]. + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + frame_visualizer = Visualizer(frame, self.metadata) + num_instances = len(predictions) + if num_instances == 0: + return frame_visualizer.output + + boxes = predictions.pred_boxes.tensor.numpy() if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes.numpy() if predictions.has("pred_classes") else None + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + + if predictions.has("pred_masks"): + masks = predictions.pred_masks + # mask IOU is not yet enabled + # masks_rles = mask_util.encode(np.asarray(masks.permute(1, 2, 0), order="F")) + # assert len(masks_rles) == num_instances + else: + masks = None + + detected = [ + _DetectedInstance(classes[i], boxes[i], mask_rle=None, color=None, ttl=8) + for i in range(num_instances) + ] + colors = self._assign_colors(detected) + + labels = _create_text_labels(classes, scores, self.metadata.thing_classes) + + if self._instance_mode == ColorMode.IMAGE_BW: + # any() returns uint8 tensor + frame_visualizer.output.img = frame_visualizer._create_grayscale_image( + (masks.any(dim=0) > 0).numpy() if masks is not None else None + ) + alpha = 0.3 + else: + alpha = 0.5 + + frame_visualizer.overlay_instances( + boxes=None if masks is not None else boxes, # boxes are a bit distracting + masks=masks, + labels=labels, + keypoints=keypoints, + assigned_colors=colors, + alpha=alpha, + ) + + return frame_visualizer.output + + def draw_sem_seg(self, frame, sem_seg, area_threshold=None): + """ + Args: + sem_seg (ndarray or Tensor): semantic segmentation of shape (H, W), + each value is the integer label. + area_threhold (Optional[int]): only draw segmentations larger than the threshold + """ + # don't need to do anything special + frame_visualizer = Visualizer(frame, self.metadata) + frame_visualizer.draw_sem_seg(sem_seg, area_threshold=None) + return frame_visualizer.output + + def draw_panoptic_seg_predictions( + self, frame, panoptic_seg, segments_info, area_threshold=None, alpha=0.5 + ): + frame_visualizer = Visualizer(frame, self.metadata) + pred = _PanopticPrediction(panoptic_seg, segments_info) + + if self._instance_mode == ColorMode.IMAGE_BW: + frame_visualizer.output.img = frame_visualizer._create_grayscale_image( + pred.non_empty_mask() + ) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + frame_visualizer.draw_binary_mask( + mask, + color=mask_color, + text=self.metadata.stuff_classes[category_idx], + alpha=alpha, + area_threshold=area_threshold, + ) + + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return frame_visualizer.output + # draw mask for all instances second + masks, sinfo = list(zip(*all_instances)) + num_instances = len(masks) + masks_rles = mask_util.encode( + np.asarray(np.asarray(masks).transpose(1, 2, 0), dtype=np.uint8, order="F") + ) + assert len(masks_rles) == num_instances + + category_ids = [x["category_id"] for x in sinfo] + detected = [ + _DetectedInstance(category_ids[i], bbox=None, mask_rle=masks_rles[i], color=None, ttl=8) + for i in range(num_instances) + ] + colors = self._assign_colors(detected) + labels = [self.metadata.thing_classes[k] for k in category_ids] + + frame_visualizer.overlay_instances( + boxes=None, + masks=masks, + labels=labels, + keypoints=None, + assigned_colors=colors, + alpha=alpha, + ) + return frame_visualizer.output + + def _assign_colors(self, instances): + """ + Naive tracking heuristics to assign same color to the same instance, + will update the internal state of tracked instances. + + Returns: + list[tuple[float]]: list of colors. + """ + + # Compute iou with either boxes or masks: + is_crowd = np.zeros((len(instances),), dtype=np.bool) + if instances[0].bbox is None: + assert instances[0].mask_rle is not None + # use mask iou only when box iou is None + # because box seems good enough + rles_old = [x.mask_rle for x in self._old_instances] + rles_new = [x.mask_rle for x in instances] + ious = mask_util.iou(rles_old, rles_new, is_crowd) + threshold = 0.5 + else: + boxes_old = [x.bbox for x in self._old_instances] + boxes_new = [x.bbox for x in instances] + ious = mask_util.iou(boxes_old, boxes_new, is_crowd) + threshold = 0.6 + if len(ious) == 0: + ious = np.zeros((len(self._old_instances), len(instances)), dtype="float32") + + # Only allow matching instances of the same label: + for old_idx, old in enumerate(self._old_instances): + for new_idx, new in enumerate(instances): + if old.label != new.label: + ious[old_idx, new_idx] = 0 + + matched_new_per_old = np.asarray(ious).argmax(axis=1) + max_iou_per_old = np.asarray(ious).max(axis=1) + + # Try to find match for each old instance: + extra_instances = [] + for idx, inst in enumerate(self._old_instances): + if max_iou_per_old[idx] > threshold: + newidx = matched_new_per_old[idx] + if instances[newidx].color is None: + instances[newidx].color = inst.color + continue + # If an old instance does not match any new instances, + # keep it for the next frame in case it is just missed by the detector + inst.ttl -= 1 + if inst.ttl > 0: + extra_instances.append(inst) + + # Assign random color to newly-detected instances: + for inst in instances: + if inst.color is None: + inst.color = random_color(rgb=True, maximum=1) + self._old_instances = instances[:] + extra_instances + return [d.color for d in instances] diff --git a/detectron2/utils/visualizer.py b/detectron2/utils/visualizer.py new file mode 100644 index 0000000000..cd1db2a492 --- /dev/null +++ b/detectron2/utils/visualizer.py @@ -0,0 +1,974 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import colorsys +import numpy as np +from enum import Enum, unique +import cv2 +import matplotlib as mpl +import matplotlib.colors as mplc +import matplotlib.figure as mplfigure +import pycocotools.mask as mask_util +import torch +from matplotlib.backends.backend_agg import FigureCanvasAgg + +from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks + +from .colormap import random_color + +__all__ = ["ColorMode", "VisImage", "Visualizer"] + + +_SMALL_OBJECT_AREA_THRESH = 1000 +_LARGE_MASK_AREA_THRESH = 120000 +_OFF_WHITE = (1.0, 1.0, 240.0 / 255) +_BLACK = (0, 0, 0) +_RED = (1.0, 0, 0) + +_KEYPOINT_THRESHOLD = 0.05 + + +@unique +class ColorMode(Enum): + """ + Enum of different color modes to use for instance visualizations. + + Attributes: + IMAGE: Picks a random color for every instance and overlay segmentations with low opacity. + SEGMENTATION: Let instances of the same category have similar colors, and overlay them with + high opacity. This provides more attention on the quality of segmentation. + IMAGE_BW: same as IMAGE, but convert all areas without masks to gray-scale. + """ + + IMAGE = 0 + SEGMENTATION = 1 + IMAGE_BW = 2 + + +class GenericMask: + """ + Attribute: + polygons (list[ndarray]): list[ndarray]: polygons for this mask. + Each ndarray has format [x, y, x, y, ...] + mask (ndarray): a binary mask + """ + + def __init__(self, mask_or_polygons, height, width): + self._mask = self._polygons = self._has_holes = None + self.height = height + self.width = width + + m = mask_or_polygons + if isinstance(m, dict): + # RLEs + assert "counts" in m and "size" in m + if isinstance(m["counts"], list): # uncompressed RLEs + h, w = m["size"] + assert h == height and w == width + m = mask_util.frPyObjects(m, h, w) + self._mask = mask_util.decode(m)[:, :] + return + + if isinstance(m, list): # list[ndarray] + self._polygons = [np.asarray(x).reshape(-1) for x in m] + return + + if isinstance(m, np.ndarray): # assumed to be a binary mask + assert m.shape[1] != 2, m.shape + assert m.shape == (height, width), m.shape + self._mask = m.astype("uint8") + return + + raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) + + @property + def mask(self): + if self._mask is None: + self._mask = self.polygons_to_mask(self._polygons) + return self._mask + + @property + def polygons(self): + if self._polygons is None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + return self._polygons + + @property + def has_holes(self): + if self._has_holes is None: + if self._mask is not None: + self._polygons, self._has_holes = self.mask_to_polygons(self._mask) + else: + self._has_holes = False # if original format is polygon, does not have holes + return self._has_holes + + def mask_to_polygons(self, mask): + # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level + # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. + # Internal contours (holes) are placed in hierarchy-2. + # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from countours. + res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) + hierarchy = res[-1] + has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 + res = res[-2] + res = [x.flatten() for x in res] + res = [x for x in res if len(x) >= 6] + return res, has_holes + + def polygons_to_mask(self, polygons): + rle = mask_util.frPyObjects(polygons, self.height, self.width) + rle = mask_util.merge(rle) + return mask_util.decode(rle)[:, :] + + def area(self): + return self.mask.sum() + + def bbox(self): + p = mask_util.frPyObjects(self.polygons, self.height, self.width) + p = mask_util.merge(p) + bbox = mask_util.toBbox(p) + bbox[2] += bbox[0] + bbox[3] += bbox[1] + return bbox + + +class _PanopticPrediction: + def __init__(self, panoptic_seg, segments_info): + self._seg = panoptic_seg + + self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info + segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) + areas = areas.numpy() + sorted_idxs = np.argsort(-areas) + self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] + self._seg_ids = self._seg_ids.tolist() + for sid, area in zip(self._seg_ids, self._seg_areas): + if sid in self._sinfo: + self._sinfo[sid]["area"] = float(area) + + def non_empty_mask(self): + """ + Returns: + (H, W) array, a mask for all pixels that have a prediction + """ + empty_ids = [] + for id in self._seg_ids: + if id not in self._sinfo: + empty_ids.append(id) + if len(empty_ids) == 0: + return np.zeros(self._seg.shape, dtype=np.uint8) + assert ( + len(empty_ids) == 1 + ), ">1 ids corresponds to no labels. This is currently not supported" + return (self._seg != empty_ids[0]).numpy().astype(np.bool) + + def semantic_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or sinfo["isthing"]: + # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. + continue + yield (self._seg == sid).numpy().astype(np.bool), sinfo + + def instance_masks(self): + for sid in self._seg_ids: + sinfo = self._sinfo.get(sid) + if sinfo is None or not sinfo["isthing"]: + continue + mask = (self._seg == sid).numpy().astype(np.bool) + if mask.sum() > 0: + yield mask, sinfo + + +def _create_text_labels(classes, scores, class_names): + """ + Args: + classes (list[int] or None): + scores (list[float] or None): + class_names (list[str] or None): + + Returns: + list[str] or None + """ + labels = None + if class_names is not None and len(class_names) > 1: + labels = [class_names[i] for i in classes] + if scores is not None: + if labels is None: + labels = ["{:.0f}%".format(s * 100) for s in scores] + else: + labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] + return labels + + +class VisImage: + def __init__(self, img, scale=1.0): + """ + Args: + img (ndarray): an RGB image of shape (H, W, 3). + scale (float): scale the input image + """ + self.img = img + self.scale = scale + self.width, self.height = img.shape[1], img.shape[0] + self._setup_figure(img) + + def _setup_figure(self, img): + """ + Args: + Same as in :meth:`__init__()`. + + Returns: + fig (matplotlib.pyplot.figure): top level container for all the image plot elements. + ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. + """ + fig = mplfigure.Figure(frameon=False) + self.dpi = fig.get_dpi() + # add a small 1e-2 to avoid precision lost due to matplotlib's truncation + # (https://github.com/matplotlib/matplotlib/issues/15363) + fig.set_size_inches( + (self.width * self.scale + 1e-2) / self.dpi, + (self.height * self.scale + 1e-2) / self.dpi, + ) + self.canvas = FigureCanvasAgg(fig) + # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) + ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) + ax.axis("off") + ax.set_xlim(0.0, self.width) + ax.set_ylim(self.height) + + self.fig = fig + self.ax = ax + + def save(self, filepath): + """ + Args: + filepath (str): a string that contains the absolute path, including the file name, where + the visualized image will be saved. + """ + cv2.imwrite(filepath, self.get_image()[:, :, ::-1]) + + def get_image(self): + """ + Returns: + ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. + The shape is scaled w.r.t the input image using the given `scale` argument. + """ + canvas = self.canvas + s, (width, height) = canvas.print_to_buffer() + if (self.width, self.height) != (width, height): + img = cv2.resize(self.img, (width, height)) + else: + img = self.img + + # buf = io.BytesIO() # works for cairo backend + # canvas.print_rgba(buf) + # width, height = self.width, self.height + # s = buf.getvalue() + + buffer = np.frombuffer(s, dtype="uint8") + + # imshow is slow. blend manually (still quite slow) + img_rgba = buffer.reshape(height, width, 4) + rgb, alpha = np.split(img_rgba, [3], axis=2) + + try: + import numexpr as ne # fuse them with numexpr + + visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)") + except ImportError: + alpha = alpha.astype("float32") / 255.0 + visualized_image = img * (1 - alpha) + rgb * alpha + + visualized_image = visualized_image.astype("uint8") + + return visualized_image + + +class Visualizer: + def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE): + """ + Args: + img_rgb: a numpy array of shape (H, W, C), where H and W correspond to + the height and width of the image respectively. C is the number of + color channels. The image is required to be in RGB format since that + is a requirement of the Matplotlib library. The image is also expected + to be in the range [0, 255]. + metadata (MetadataCatalog): image metadata. + """ + self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) + self.metadata = metadata + self.output = VisImage(self.img, scale=scale) + self.cpu_device = torch.device("cpu") + + # too small texts are useless, therefore clamp to 9 + self._default_font_size = max( + np.sqrt(self.output.height * self.output.width) // 90, 10 // scale + ) + self._instance_mode = instance_mode + + def draw_instance_predictions(self, predictions): + """ + Draw instance-level prediction results on an image. + + Args: + predictions (Instances): the output of an instance detection/segmentation + model. Following fields will be used to draw: + "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). + + Returns: + output (VisImage): image object with visualizations. + """ + boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None + scores = predictions.scores if predictions.has("scores") else None + classes = predictions.pred_classes if predictions.has("pred_classes") else None + labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) + keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None + + if predictions.has("pred_masks"): + masks = predictions.pred_masks.numpy() + masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] + else: + masks = None + + if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): + colors = [ + self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes + ] + alpha = 0.8 + else: + colors = None + alpha = 0.5 + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.img = self._create_grayscale_image( + (predictions.pred_masks.any(dim=0) > 0).numpy() + ) + alpha = 0.3 + + self.overlay_instances( + masks=masks, + boxes=boxes, + labels=labels, + keypoints=keypoints, + assigned_colors=colors, + alpha=alpha, + ) + return self.output + + def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): + """ + Draw semantic segmentation predictions/labels. + + Args: + sem_seg (Tensor or ndarray): the segmentation of shape (H, W). + area_threshold (int): segments with less than `area_threshold` are not drawn. + alpha (float): the larger it is, the more opaque the segmentations are. + + Returns: + output (VisImage): image object with visualizations. + """ + if isinstance(sem_seg, torch.Tensor): + sem_seg = sem_seg.numpy() + labels, areas = np.unique(sem_seg, return_counts=True) + sorted_idxs = np.argsort(-areas).tolist() + labels = labels[sorted_idxs] + for label in labels: + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] + except (AttributeError, IndexError): + mask_color = None + + binary_mask = (sem_seg == label).astype(np.uint8) + text = self.metadata.stuff_classes[label] + self.draw_binary_mask( + binary_mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + return self.output + + def draw_panoptic_seg_predictions( + self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7 + ): + """ + Draw panoptic prediction results on an image. + + Args: + panoptic_seg (Tensor): of shape (height, width) where the values are ids for each + segment. + segments_info (list[dict]): Describe each segment in `panoptic_seg`. + Each dict contains keys "id", "category_id", "isthing". + area_threshold (int): stuff segments with less than `area_threshold` are not drawn. + + Returns: + output (VisImage): image object with visualizations. + """ + pred = _PanopticPrediction(panoptic_seg, segments_info) + + if self._instance_mode == ColorMode.IMAGE_BW: + self.output.img = self._create_grayscale_image(pred.non_empty_mask()) + + # draw mask for all semantic segments first i.e. "stuff" + for mask, sinfo in pred.semantic_masks(): + category_idx = sinfo["category_id"] + try: + mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] + except AttributeError: + mask_color = None + + text = self.metadata.stuff_classes[category_idx] + self.draw_binary_mask( + mask, + color=mask_color, + edge_color=_OFF_WHITE, + text=text, + alpha=alpha, + area_threshold=area_threshold, + ) + + # draw mask for all instances second + all_instances = list(pred.instance_masks()) + if len(all_instances) == 0: + return self.output + masks, sinfo = list(zip(*all_instances)) + category_ids = [x["category_id"] for x in sinfo] + + try: + scores = [x["score"] for x in sinfo] + except KeyError: + scores = None + labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes) + + try: + colors = [random_color(rgb=True, maximum=1) for k in category_ids] + except AttributeError: + colors = None + self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors) + + return self.output + + def draw_dataset_dict(self, dic): + annos = dic.get("annotations", None) + if annos: + if "segmentation" in annos[0]: + masks = [x["segmentation"] for x in annos] + else: + masks = None + if "keypoints" in annos[0]: + keypts = [x["keypoints"] for x in annos] + keypts = np.array(keypts).reshape(len(annos), -1, 3) + else: + keypts = None + + boxes = [BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) for x in annos] + + labels = [x["category_id"] for x in annos] + names = self.metadata.get("thing_classes", None) + if names: + labels = [names[i] for i in labels] + labels = [i + ("|crowd" if a.get("iscrowd", 0) else "") for i, a in zip(labels, annos)] + self.overlay_instances(labels=labels, boxes=boxes, masks=masks, keypoints=keypts) + + sem_seg = dic.get("sem_seg", None) + if sem_seg is None and "sem_seg_file_name" in dic: + sem_seg = cv2.imread(dic["sem_seg_file_name"], cv2.IMREAD_GRAYSCALE) + if sem_seg is not None: + self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) + return self.output + + def overlay_instances( + self, + *, + boxes=None, + labels=None, + masks=None, + keypoints=None, + assigned_colors=None, + alpha=0.5 + ): + """ + Args: + boxes (Boxes or ndarray): either a :class:`Boxes` or a Nx4 numpy array + of XYXY_ABS format for the N objects in a single image. + labels (list[str]): the text to be displayed for each instance. + masks (masks-like object): Supported types are: + + * `structures.masks.PolygonMasks`, `structures.masks.BitMasks`. + * list[list[ndarray]]: contains the segmentation masks for all objects in one image. + The first level of the list corresponds to individual instances. The second + level to all the polygon that compose the instance, and the third level + to the polygon coordinates. The third level should have the format of + [x0, y0, x1, y1, ..., xn, yn] (n >= 3). + * list[ndarray]: each ndarray is a binary mask of shape (H, W). + * list[dict]: each dict is a COCO-style RLE. + keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), + where the N is the number of instances and K is the number of keypoints. + The last dimension corresponds to (x, y, visibility or score). + assigned_colors (list[matplotlib.colors]): a list of colors, where each color + corresponds to each mask or box in the image. Refer to 'matplotlib.colors' + for full list of formats that the colors are accepted in. + + Returns: + output (VisImage): image object with visualizations. + """ + num_instances = None + if boxes is not None: + boxes = self._convert_boxes(boxes) + num_instances = len(boxes) + if masks is not None: + masks = self._convert_masks(masks) + if num_instances: + assert len(masks) == num_instances + else: + num_instances = len(masks) + if keypoints is not None: + if num_instances: + assert len(keypoints) == num_instances + else: + num_instances = len(keypoints) + keypoints = self._convert_keypoints(keypoints) + if labels is not None: + assert len(labels) == num_instances + if assigned_colors is None: + assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] + if num_instances == 0: + return self.output + + # Display in largest to smallest order to reduce occlusion. + areas = None + if boxes is not None: + areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) + elif masks is not None: + areas = np.asarray([x.area() for x in masks]) + + if areas is not None: + sorted_idxs = np.argsort(-areas).tolist() + # Re-order overlapped instances in descending order. + boxes = boxes[sorted_idxs] if boxes is not None else None + labels = [labels[k] for k in sorted_idxs] if labels is not None else None + masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None + assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] + keypoints = keypoints[sorted_idxs] if keypoints is not None else None + + for i in range(num_instances): + color = assigned_colors[i] + if boxes is not None: + self.draw_box(boxes[i], edge_color=color) + + if masks is not None: + for segment in masks[i].polygons: + self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) + + if labels is not None: + # first get a box + if boxes is not None: + x0, y0, x1, y1 = boxes[i] + text_pos = (x0, y0) # if drawing boxes, put text on the box corner. + horiz_align = "left" + elif masks is not None: + x0, y0, x1, y1 = masks[i].bbox() + + # draw text in the center (defined by median) when box is not drawn + # median is less sensitive to outliers. + text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] + horiz_align = "center" + else: + continue # drawing the box confidence for keypoints isn't very useful. + # for small objects, draw text at the side to avoid occulusion + instance_area = (y1 - y0) * (x1 - x0) + if ( + instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale + or y1 - y0 < 40 * self.output.scale + ): + if y1 >= self.output.height - 5: + text_pos = (x1, y0) + else: + text_pos = (x0, y1) + + height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + font_size = ( + np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) + * 0.5 + * self._default_font_size + ) + self.draw_text( + labels[i], + text_pos, + color=lighter_color, + horizontal_alignment=horiz_align, + font_size=font_size, + ) + + # draw keypoints + if keypoints is not None: + for keypoints_per_instance in keypoints: + self.draw_and_connect_keypoints(keypoints_per_instance) + + return self.output + + def draw_and_connect_keypoints(self, keypoints): + """ + Draws keypoints of an instance and follows the rules for keypoint connections + to draw lines between appropriate keypoints. This follows color heuristics for + line color. + + Args: + keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints + and the last dimension corresponds to (x, y, probability). + + Returns: + output (VisImage): image object with visualizations. + """ + visible = {} + for idx, keypoint in enumerate(keypoints): + # draw keypoint + x, y, prob = keypoint + if prob > _KEYPOINT_THRESHOLD: + color = tuple(x / 255 for x in _BLACK) + self.draw_circle((x, y), color=color) + keypoint_name = self.metadata.keypoint_names[idx] + visible[keypoint_name] = (x, y) + + for kp0, kp1, color in self.metadata.keypoint_connection_rules: + if kp0 in visible and kp1 in visible: + x0, y0 = visible[kp0] + x1, y1 = visible[kp1] + color = tuple(x / 255.0 for x in color) + self.draw_line([x0, x1], [y0, y1], color=color) + + # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip + # Note that this strategy is specific to person keypoints. + # For other keypoints, it should just do nothing + try: + ls_x, ls_y = visible["left_shoulder"] + rs_x, rs_y = visible["right_shoulder"] + mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 + except KeyError: + pass + else: + # draw line from nose to mid-shoulder + nose_x, nose_y = visible.get("nose", (None, None)) + if nose_x is not None: + self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) + + try: + # draw line from mid-shoulder to mid-hip + lh_x, lh_y = visible["left_hip"] + rh_x, rh_y = visible["right_hip"] + except KeyError: + pass + else: + mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 + self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) + return self.output + + """ + Primitive drawing functions: + """ + + def draw_text( + self, text, position, *, font_size=None, color="g", horizontal_alignment="center" + ): + """ + Args: + text (str): class label + position (tuple): a tuple of the x and y coordinates to place text on image. + font_size (int, optional): font of the text. If not provided, a font size + proportional to the image width is calculated and used. + color: color of the text. Refer to `matplotlib.colors` for full list + of formats that are accepted. + horizontal_alignment (str): see `matplotlib.text.Text` + + Returns: + output (VisImage): image object with text drawn. + """ + if not font_size: + font_size = self._default_font_size + + # since the text background is dark, we don't want the text to be dark + color = np.maximum(list(mplc.to_rgb(color)), 0.2) + color[np.argmax(color)] = max(0.8, np.max(color)) + + x, y = position + self.output.ax.text( + x, + y, + text, + size=font_size * self.output.scale, + family="sans-serif", + bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, + verticalalignment="top", + horizontalalignment=horizontal_alignment, + color=color, + zorder=10, + ) + return self.output + + def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): + """ + Args: + box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 + are the coordinates of the image's top left corner. x1 and y1 are the + coordinates of the image's bottom right corner. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + edge_color: color of the outline of the box. Refer to `matplotlib.colors` + for full list of formats that are accepted. + line_style (string): the string to use to create the outline of the boxes. + + Returns: + output (VisImage): image object with box drawn. + """ + x0, y0, x1, y1 = box_coord + width = x1 - x0 + height = y1 - y0 + + linewidth = max(self._default_font_size / 4, 1) + + self.output.ax.add_patch( + mpl.patches.Rectangle( + (x0, y0), + width, + height, + fill=False, + edgecolor=edge_color, + linewidth=linewidth * self.output.scale, + alpha=alpha, + linestyle=line_style, + ) + ) + return self.output + + def draw_circle(self, circle_coord, color, radius=5): + """ + Args: + circle_coord (list(int) or tuple(int)): contains the x and y coordinates + of the center of the circle. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + radius (int): radius of the circle. + + Returns: + output (VisImage): image object with box drawn. + """ + x, y = circle_coord + self.output.ax.add_patch(mpl.patches.Circle(circle_coord, radius=radius, color=color)) + return self.output + + def draw_line(self, x_data, y_data, color): + """ + Args: + x_data (list[int]): a list containing x values of all the points being drawn. + Length of list should match the length of y_data. + y_data (list[int]): a list containing y values of all the points being drawn. + Length of list should match the length of x_data. + color: color of the line. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + + Returns: + output (VisImage): image object with line drawn. + """ + linewidth = max(self._default_font_size / 3, 1) + self.output.ax.add_line( + mpl.lines.Line2D(x_data, y_data, linewidth=linewidth * self.output.scale, color=color) + ) + return self.output + + def draw_binary_mask( + self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=4096 + ): + """ + Args: + binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and + W is the image width. Each value in the array is either a 0 or 1 value of uint8 + type. + color: color of the mask. Refer to `matplotlib.colors` for a full list of + formats that are accepted. If None, will pick a random color. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. + text (str): if None, will be drawn in the object's center of mass. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + area_threshold (float): a connected component small than this will not be shown. + + Returns: + output (VisImage): image object with mask drawn. + """ + if color is None: + color = random_color(rgb=True, maximum=1) + if area_threshold is None: + area_threshold = 4096 + + has_valid_segment = False + binary_mask = binary_mask.astype("uint8") # opencv needs uint8 + mask = GenericMask(binary_mask, self.output.height, self.output.width) + shape2d = (binary_mask.shape[0], binary_mask.shape[1]) + + if not mask.has_holes: + # draw polygons for regular masks + for segment in mask.polygons: + area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) + if area < area_threshold: + continue + has_valid_segment = True + segment = segment.reshape(-1, 2) + self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) + else: + rgba = np.zeros(shape2d + (4,), dtype="float32") + rgba[:, :, :3] = color + rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha + has_valid_segment = True + self.output.ax.imshow(rgba) + + if text is not None and has_valid_segment: + # TODO sometimes drawn on wrong objects. the heuristics here can improve. + lighter_color = self._change_color_brightness(color, brightness_factor=0.7) + _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) + largest_component_id = np.argmax(stats[1:, -1]) + 1 + + # draw text on the largest component, as well as other very large components. + for cid in range(1, _num_cc): + if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: + # median is more stable than centroid + # center = centroids[largest_component_id] + center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] + self.draw_text(text, center, color=lighter_color) + return self.output + + def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): + """ + Args: + segment: numpy array of shape Nx2, containing all the points in the polygon. + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a + full list of formats that are accepted. If not provided, a darker shade + of the polygon color will be used instead. + alpha (float): blending efficient. Smaller values lead to more transparent masks. + + Returns: + output (VisImage): image object with polygon drawn. + """ + if edge_color is None: + # make edge color darker than the polygon color + if alpha > 0.8: + edge_color = self._change_color_brightness(color, brightness_factor=-0.7) + else: + edge_color = color + edge_color = mplc.to_rgb(edge_color) + (1,) + + polygon = mpl.patches.Polygon( + segment, + fill=True, + facecolor=mplc.to_rgb(color) + (alpha,), + edgecolor=edge_color, + linewidth=max(self._default_font_size // 15 * self.output.scale, 1), + ) + self.output.ax.add_patch(polygon) + return self.output + + """ + Internal methods: + """ + + def _jitter(self, color): + """ + Randomly modifies given color to produce a slightly different color than the color given. + + Args: + color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color + picked. The values in the list are in the [0.0, 1.0] range. + + Returns: + jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the + color after being jittered. The values in the list are in the [0.0, 1.0] range. + """ + color = mplc.to_rgb(color) + vec = np.random.rand(3) + # better to do it in another color space + vec = vec / np.linalg.norm(vec) * 0.5 + res = np.clip(vec + color, 0, 1) + return tuple(res) + + def _create_grayscale_image(self, mask=None): + """ + Create a grayscale version of the original image. + The colors in masked area, if given, will be kept. + """ + img_bw = self.img.astype("f4").mean(axis=2) + img_bw = np.stack([img_bw] * 3, axis=2) + if mask is not None: + img_bw[mask] = self.img[mask] + return img_bw + + def _change_color_brightness(self, color, brightness_factor): + """ + Depending on the brightness_factor, gives a lighter or darker color i.e. a color with + less or more saturation than the original color. + + Args: + color: color of the polygon. Refer to `matplotlib.colors` for a full list of + formats that are accepted. + brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of + 0 will correspond to no change, a factor in [-1.0, 0) range will result in + a darker color and a factor in (0, 1.0] range will result in a ligher color. + + Returns: + modified_color (tuple[double]): a tuple containing the RGB values of the + modified color. Each value in the tuple is in the [0.0, 1.0] range. + """ + assert brightness_factor >= -1.0 and brightness_factor <= 1.0 + color = mplc.to_rgb(color) + polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) + modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) + modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness + modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness + modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) + return modified_color + + def _convert_boxes(self, boxes): + """ + Convert different format of boxes to a Nx4 array. + """ + if isinstance(boxes, Boxes): + return boxes.tensor.numpy() + else: + return np.asarray(boxes) + + def _convert_masks(self, masks_or_polygons): + """ + Convert different format of masks or polygons to a tuple of masks and polygons. + + Returns: + list[GenericMask]: + """ + + m = masks_or_polygons + if isinstance(m, PolygonMasks): + m = m.polygons + if isinstance(m, BitMasks): + m = m.tensor.numpy() + if isinstance(m, torch.Tensor): + m = m.numpy() + ret = [] + for x in m: + if isinstance(x, GenericMask): + ret.append(x) + else: + ret.append(GenericMask(x, self.output.height, self.output.width)) + return ret + + def _convert_keypoints(self, keypoints): + if isinstance(keypoints, Keypoints): + keypoints = keypoints.tensor + keypoints = np.asarray(keypoints) + return keypoints + + def get_output(self): + """ + Returns: + output (VisImage): the image output containing the visualizations added + to the image. + """ + return self.output diff --git a/dev/README.md b/dev/README.md new file mode 100644 index 0000000000..287f91f930 --- /dev/null +++ b/dev/README.md @@ -0,0 +1,6 @@ + +## Some scripts for developers to use, include: + +- `linter.sh`: lint the codebase before commit +- `run_{inference,instant}_tests.sh`: run inference/training for a few iterations. +- `parse_results.sh`: parse results from log file. diff --git a/dev/linter.sh b/dev/linter.sh new file mode 100755 index 0000000000..3fdb2d6665 --- /dev/null +++ b/dev/linter.sh @@ -0,0 +1,26 @@ +#!/bin/bash -ev +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# Run this script at project root by "./dev/linter.sh" before you commit + +echo "Running isort ..." +isort -y --multi-line 3 --trailing-comma -sp . --skip datasets --skip docs --skip-glob '*/__init__.py' --atomic + +echo "Running black ..." +black -l 100 . + +echo "Running flake8 ..." +if [ -x "$(command -v flake8-3)" ]; then + flake8-3 . +else + python3 -m flake8 . +fi + +# echo "Running mypy ..." +# Pytorch does not have enough type annotations +# mypy detectron2/solver detectron2/structures detectron2/config + +echo "Running clang-format ..." +find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i + +command -v arc > /dev/null && arc lint diff --git a/dev/parse_results.sh b/dev/parse_results.sh new file mode 100755 index 0000000000..358cac8c42 --- /dev/null +++ b/dev/parse_results.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# A shell script that parses metrics from the log file. +# Make it easier for developers to track performance of models. + +LOG="$1" + +if [[ -z "$LOG" ]]; then + echo "Usage: $0 /path/to/log/file" + exit 1 +fi + +# [12/15 11:47:32] trainer INFO: Total training time: 12:15:04.446477 (0.4900 s / it) +# [12/15 11:49:03] inference INFO: Total inference time: 0:01:25.326167 (0.13652186737060548 s / img per device, on 8 devices) + +# training time +trainspeed=$(grep -o 'Overall training.*' "$LOG" | grep -Eo '\(.*\)' | grep -o '[0-9\.]*') +echo "Training speed: $trainspeed s/it" + +# inference time: there could be multiple inference during training +inferencespeed=$(grep -o 'Total inference.*' "$LOG" | tail -n1 | grep -Eo '\(.*\)' | grep -o '[0-9\.]*' | head -n1) +echo "Inference speed: $inferencespeed s/it" + +# [12/15 11:47:18] trainer INFO: eta: 0:00:00 iter: 90000 loss: 0.5407 (0.7256) loss_classifier: 0.1744 (0.2446) loss_box_reg: 0.0838 (0.1160) loss_mask: 0.2159 (0.2722) loss_objectness: 0.0244 (0.0429) loss_rpn_box_reg: 0.0279 (0.0500) time: 0.4487 (0.4899) data: 0.0076 (0.0975) lr: 0.000200 max mem: 4161 +memory=$(grep -o 'max[_ ]mem: [0-9]*' "$LOG" | tail -n1 | grep -o '[0-9]*') +echo "Training memory: $memory MB" + +echo "Easy to copypaste:" +echo "$trainspeed","$inferencespeed","$memory" + +echo "------------------------------" + +# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: bbox +# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl +# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0017,0.0024,0.0017,0.0005,0.0019,0.0011 +# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: segm +# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl +# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0014,0.0021,0.0016,0.0005,0.0016,0.0011 + +echo "COCO Results:" +num_tasks=$(grep -o 'copypaste:.*Task.*' "$LOG" | sort -u | wc -l) +# each task has 3 lines +grep -o 'copypaste:.*' "$LOG" | cut -d ' ' -f 2- | tail -n $((num_tasks * 3)) diff --git a/dev/run_inference_tests.sh b/dev/run_inference_tests.sh new file mode 100755 index 0000000000..17e422d576 --- /dev/null +++ b/dev/run_inference_tests.sh @@ -0,0 +1,44 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +BIN="python tools/train_net.py" +OUTPUT="inference_test_output" +NUM_GPUS=2 + +CFG_LIST=( "${@:1}" ) + +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*inference_acc_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN \ + --eval-only \ + --num-gpus $NUM_GPUS \ + --config-file "$cfg" \ + OUTPUT_DIR $OUTPUT + rm -rf $OUTPUT +done + + +echo "========================================================================" +echo "Running demo.py ..." +echo "========================================================================" +DEMO_BIN="python demo/demo.py" +COCO_DIR=datasets/coco/val2014 +mkdir -pv $OUTPUT + +set -v + +$DEMO_BIN --config-file ./configs/quick_schedules/panoptic_fpn_R_50_inference_acc_test.yaml \ + --input $COCO_DIR/COCO_val2014_0000001933* --output $OUTPUT +rm -rf $OUTPUT diff --git a/dev/run_instant_tests.sh b/dev/run_instant_tests.sh new file mode 100755 index 0000000000..2c51de6492 --- /dev/null +++ b/dev/run_instant_tests.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +BIN="python tools/train_net.py" +OUTPUT="instant_test_output" +NUM_GPUS=2 + +CFG_LIST=( "${@:1}" ) +if [ ${#CFG_LIST[@]} -eq 0 ]; then + CFG_LIST=( ./configs/quick_schedules/*instant_test.yaml ) +fi + +echo "========================================================================" +echo "Configs to run:" +echo "${CFG_LIST[@]}" +echo "========================================================================" + +for cfg in "${CFG_LIST[@]}"; do + echo "========================================================================" + echo "Running $cfg ..." + echo "========================================================================" + $BIN --num-gpus $NUM_GPUS --config-file "$cfg" \ + SOLVER.IMS_PER_BATCH $(($NUM_GPUS * 2)) \ + OUTPUT_DIR "$OUTPUT" + rm -rf "$OUTPUT" +done + diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000000..e35d8850c9 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +_build diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000..d537643dd4 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000..12366165e8 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,5 @@ + +# Build the docs: + +1. Install dependencies in `requirements.txt` +2. `make html` diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000..6dacdb6c27 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,238 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +import mock + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +import sphinx_rtd_theme + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# to support markdown +from recommonmark.parser import CommonMarkParser + +import detectron2 + +sys.path.insert(0, os.path.abspath("../")) + +DEPLOY = False + + +# -- Project information ----------------------------------------------------- + +try: + import torch # noqa +except ImportError: + for m in [ + "torch", + "torchvision", + "torch.nn", + "torch.distributed", + "torch.autograd", + "torch.autograd.function", + "torch.nn.modules", + "torch.nn.modules.utils", + ]: + sys.modules[m] = mock.Mock(name=m) + +for m in ["cv2", "scipy", "portalocker"]: + sys.modules[m] = mock.Mock(name=m) +sys.modules["cv2"].__version__ = "3.4" + + +project = "detectron2" +copyright = "2019, detectron2 contributors" +author = "detectron2 contributors" + +# The short X.Y version +version = detectron2.__version__ +# The full version, including alpha/beta/rc tags +release = version + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +needs_sphinx = "1.7" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "sphinx.ext.githubpages", +] + +# -- Configurations for plugins ------------ +napoleon_google_docstring = True +napoleon_include_init_with_doc = True +napoleon_include_special_with_doc = True +napoleon_numpy_docstring = False +napoleon_use_rtype = False + +if DEPLOY: + intersphinx_timeout = 10 +else: + # skip this when building locally + intersphinx_timeout = 0.1 +intersphinx_mapping = { + "python": ("https://docs.python.org/3.6", None), + "numpy": ("https://docs.scipy.org/doc/numpy/", None), +} +# ------------------------- + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +source_parsers = {".md": CommonMarkParser} + +source_suffix = [".rst", ".md"] + +# The master toctree document. +master_doc = "index" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md"] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + + +# -- Options for HTML output ------------------------------------------------- + +html_theme = "sphinx_rtd_theme" +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = "detectron2doc" + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual") +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "detectron2", + "detectron2 Documentation", + author, + "detectron2", + "One line description of project.", + "Miscellaneous", + ) +] + + +# -- Options for todo extension ---------------------------------------------- + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +def setup(app): + from recommonmark.transform import AutoStructify + + # app.connect('autodoc-skip-member', autodoc_skip_member) + app.add_config_value( + "recommonmark_config", + { # 'url_resolver': url_resolver, + "auto_toc_tree_section": "Contents", + "enable_math": True, + "enable_inline_math": True, + "enable_eval_rst": True, + }, + True, + ) + app.add_transform(AutoStructify) diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000..3672b1f4c1 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,16 @@ +.. detectron2 documentation master file, created by + sphinx-quickstart on Sat Sep 21 13:46:45 2019. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to detectron2's documentation! +====================================== + +Some nice intro. + +.. toctree:: + :maxdepth: 2 + + tutorials/index + notes/index + modules/index diff --git a/docs/modules/checkpoint.rst b/docs/modules/checkpoint.rst new file mode 100644 index 0000000000..616cb186c4 --- /dev/null +++ b/docs/modules/checkpoint.rst @@ -0,0 +1,7 @@ +detectron2.checkpoint package +============================= + +.. automodule:: detectron2.checkpoint + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/config.rst b/docs/modules/config.rst new file mode 100644 index 0000000000..ae96a6bcbe --- /dev/null +++ b/docs/modules/config.rst @@ -0,0 +1,7 @@ +detectron2.config package +========================= + +.. automodule:: detectron2.config + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/data.datasets.rst b/docs/modules/data.datasets.rst new file mode 100644 index 0000000000..e3ffd2f4b3 --- /dev/null +++ b/docs/modules/data.datasets.rst @@ -0,0 +1,7 @@ +detectron2.data.datasets package +================================ + +.. automodule:: detectron2.data.datasets + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/data.rst b/docs/modules/data.rst new file mode 100644 index 0000000000..cb129b1e2d --- /dev/null +++ b/docs/modules/data.rst @@ -0,0 +1,26 @@ +detectron2.data package +======================= + +.. automodule:: detectron2.data + :members: + :undoc-members: + :show-inheritance: + +detectron2.data.detection\_utils module +--------------------------------------- + +.. automodule:: detectron2.data.detection_utils + :members: + :undoc-members: + :show-inheritance: + +Subpackages +----------- + +.. toctree:: + + data.datasets + data.samplers + data.transforms + + diff --git a/docs/modules/data.samplers.rst b/docs/modules/data.samplers.rst new file mode 100644 index 0000000000..e2040902eb --- /dev/null +++ b/docs/modules/data.samplers.rst @@ -0,0 +1,7 @@ +detectron2.data.samplers package +================================ + +.. automodule:: detectron2.data.samplers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/data.transforms.rst b/docs/modules/data.transforms.rst new file mode 100644 index 0000000000..97d9170814 --- /dev/null +++ b/docs/modules/data.transforms.rst @@ -0,0 +1,7 @@ +detectron2.data.transforms package +================================== + +.. automodule:: detectron2.data.transforms + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/engine.rst b/docs/modules/engine.rst new file mode 100644 index 0000000000..bb8b533aee --- /dev/null +++ b/docs/modules/engine.rst @@ -0,0 +1,25 @@ +detectron2.engine package +========================= + + +.. automodule:: detectron2.engine + :members: + :undoc-members: + :show-inheritance: + + +detectron2.engine.defaults module +--------------------------------- + +.. automodule:: detectron2.engine.defaults + :members: + :undoc-members: + :show-inheritance: + +detectron2.engine.hooks module +--------------------------------- + +.. automodule:: detectron2.engine.hooks + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/evaluation.rst b/docs/modules/evaluation.rst new file mode 100644 index 0000000000..d9d34ff1a2 --- /dev/null +++ b/docs/modules/evaluation.rst @@ -0,0 +1,7 @@ +detectron2.evaluation package +============================= + +.. automodule:: detectron2.evaluation + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/index.rst b/docs/modules/index.rst new file mode 100644 index 0000000000..61c20838e4 --- /dev/null +++ b/docs/modules/index.rst @@ -0,0 +1,15 @@ +API Documentation +================== + +.. toctree:: + + checkpoint + config + data + engine + evaluation + layers + modeling + solver + structures + utils diff --git a/docs/modules/layers.rst b/docs/modules/layers.rst new file mode 100644 index 0000000000..6aeb5213a4 --- /dev/null +++ b/docs/modules/layers.rst @@ -0,0 +1,7 @@ +detectron2.layers package +========================= + +.. automodule:: detectron2.layers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/modeling.rst b/docs/modules/modeling.rst new file mode 100644 index 0000000000..c71ab2c7fb --- /dev/null +++ b/docs/modules/modeling.rst @@ -0,0 +1,7 @@ +detectron2.modeling package +=========================== + +.. automodule:: detectron2.modeling + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/solver.rst b/docs/modules/solver.rst new file mode 100644 index 0000000000..7f4a49f2eb --- /dev/null +++ b/docs/modules/solver.rst @@ -0,0 +1,7 @@ +detectron2.solver package +========================= + +.. automodule:: detectron2.solver + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/structures.rst b/docs/modules/structures.rst new file mode 100644 index 0000000000..5701c61abf --- /dev/null +++ b/docs/modules/structures.rst @@ -0,0 +1,7 @@ +detectron2.structures package +============================= + +.. automodule:: detectron2.structures + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/modules/utils.rst b/docs/modules/utils.rst new file mode 100644 index 0000000000..7693b3c570 --- /dev/null +++ b/docs/modules/utils.rst @@ -0,0 +1,63 @@ +detectron2.utils package +======================== + +detectron2.utils.colormap module +-------------------------------- + +.. automodule:: detectron2.utils.colormap + :members: + :undoc-members: + :show-inheritance: + +detectron2.utils.comm module +---------------------------- + +.. automodule:: detectron2.utils.comm + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.events module +------------------------------ + +.. automodule:: detectron2.utils.events + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.logger module +------------------------------ + +.. automodule:: detectron2.utils.logger + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.registry module +-------------------------------- + +.. automodule:: detectron2.utils.registry + :members: + :undoc-members: + :show-inheritance: + + +detectron2.utils.video\_visualizer module +----------------------------------------- + +.. automodule:: detectron2.utils.video_visualizer + :members: + :undoc-members: + :show-inheritance: + +detectron2.utils.visualizer module +---------------------------------- + +.. automodule:: detectron2.utils.visualizer + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/notes/benchmarks.md b/docs/notes/benchmarks.md new file mode 100644 index 0000000000..8d3c3013a5 --- /dev/null +++ b/docs/notes/benchmarks.md @@ -0,0 +1,202 @@ + +# Benchmarks + +Here we benchmark the training speed of a Mask R-CNN in detectron2, +with some other popular open source Mask R-CNN implementations. + + +### Settings + +* Hardware: 8 NVIDIA V100s. +* Software: CUDA 10.0, cuDNN 7.6.4, PyTorch 1.3.0.dev20190920 (nightly build), TensorFlow 1.5.0rc2, Keras 2.2.5. +* Model: an end-to-end R-50-FPN Mask-RCNN model, using the same hyperparameter as the + [Detectron baseline config](https://github.com/facebookresearch/Detectron/blob/master/configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml). +* Metrics: We use the average throughput in iterations 100-500 to skip GPU warmup time. + Note that for R-CNN-style models, the throughput of a model typically changes during training, because + it depends on the predictions of the model. Therefore this metric is not directly comparable with + "train speed" in model zoo, which is the average speed of the entire training run. + + +### Main Results + +```eval_rst ++-----------------------------+--------------------+ +| Implementation | Throughput (img/s) | ++=============================+====================+ +| Detectron2 | 60 | ++-----------------------------+--------------------+ +| maskrcnn-benchmark_ | 51 | ++-----------------------------+--------------------+ +| tensorpack_ | 50 | ++-----------------------------+--------------------+ +| mmdetection_ | 41 | ++-----------------------------+--------------------+ +| Detectron_ | 19 | ++-----------------------------+--------------------+ +| `matterport/Mask_RCNN`__ | 14 | ++-----------------------------+--------------------+ + +.. _maskrcnn-benchmark: https://github.com/facebookresearch/maskrcnn-benchmark/ +.. _tensorpack: https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN +.. _mmdetection: https://github.com/open-mmlab/mmdetection/ +.. _Detectron: https://github.com/facebookresearch/Detectron +__ https://github.com/matterport/Mask_RCNN/ +``` + + +Details for each implementation: + +* __Detectron2__: + ``` + python tools/train_net.py --config-file configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml --num-gpus 8 + ``` + +* __maskrcnn-benchmark__: use commit `0ce8f6f` with `sed -i ‘s/torch.uint8/torch.bool/g’ **/*.py` to make it compatible with latest PyTorch. + Then, run training with + ``` + python -m torch.distributed.launch --nproc_per_node=8 tools/train_net.py --config-file configs/e2e_mask_rcnn_R_50_FPN_1x.yaml + ``` + The speed we observed is faster than its model zoo, likely due to different software versions. + +* __tensorpack__: at commit `caafda`, `export TF_CUDNN_USE_AUTOTUNE=0`, then run + ``` + mpirun -np 8 ./train.py --config DATA.BASEDIR=/data/coco TRAINER=horovod BACKBONE.STRIDE_1X1=True TRAIN.STEPS_PER_EPOCH=50 --load ImageNet-R50-AlignPadding.npz + ``` + +* __mmdetection__: at commit `4d9a5f`, apply the following diff, then run + ``` + ./tools/dist_train.sh configs/mask_rcnn_r50_fpn_1x.py 8 + ``` + + The speed we observed is faster than its model zoo, likely due to different software versions. + +
+ + (diff to make it use the same architecture - click to expand) + + ```diff + diff --git i/configs/mask_rcnn_r50_fpn_1x.py w/configs/mask_rcnn_r50_fpn_1x.py + index 04f6d22..ed721f2 100644 + --- i/configs/mask_rcnn_r50_fpn_1x.py + +++ w/configs/mask_rcnn_r50_fpn_1x.py + @@ -1,14 +1,15 @@ + # model settings + model = dict( + type='MaskRCNN', + - pretrained='torchvision://resnet50', + + pretrained='open-mmlab://resnet50_caffe', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + - style='pytorch'), + + norm_cfg=dict(type="BN", requires_grad=False), + + style='caffe'), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + @@ -115,7 +116,7 @@ test_cfg = dict( + dataset_type = 'CocoDataset' + data_root = 'data/coco/' + img_norm_cfg = dict( + - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + + mean=[123.675, 116.28, 103.53], std=[1.0, 1.0, 1.0], to_rgb=False) + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + ``` +
+ +* __Detectron__: run + ``` + python tools/train_net.py --cfg configs/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml + ``` + +* __matterport/Mask_RCNN__: at commit `3deaec`, apply the following diff, `export TF_CUDNN_USE_AUTOTUNE=0`, then run + ``` + python coco.py train --dataset=/data/coco/ --model=imagenet + ``` + +
+ + (diff to make it use the same hyperparameters - click to expand) + + ```diff + diff --git i/mrcnn/model.py w/mrcnn/model.py + index 62cb2b0..61d7779 100644 + --- i/mrcnn/model.py + +++ w/mrcnn/model.py + @@ -2367,8 +2367,8 @@ class MaskRCNN(): + epochs=epochs, + steps_per_epoch=self.config.STEPS_PER_EPOCH, + callbacks=callbacks, + - validation_data=val_generator, + - validation_steps=self.config.VALIDATION_STEPS, + + #validation_data=val_generator, + + #validation_steps=self.config.VALIDATION_STEPS, + max_queue_size=100, + workers=workers, + use_multiprocessing=True, + diff --git i/mrcnn/parallel_model.py w/mrcnn/parallel_model.py + index d2bf53b..060172a 100644 + --- i/mrcnn/parallel_model.py + +++ w/mrcnn/parallel_model.py + @@ -32,6 +32,7 @@ class ParallelModel(KM.Model): + keras_model: The Keras model to parallelize + gpu_count: Number of GPUs. Must be > 1 + """ + + super().__init__() + self.inner_model = keras_model + self.gpu_count = gpu_count + merged_outputs = self.make_parallel() + diff --git i/samples/coco/coco.py w/samples/coco/coco.py + index 5d172b5..239ed75 100644 + --- i/samples/coco/coco.py + +++ w/samples/coco/coco.py + @@ -81,7 +81,10 @@ class CocoConfig(Config): + IMAGES_PER_GPU = 2 + + # Uncomment to train on 8 GPUs (default is 1) + - # GPU_COUNT = 8 + + GPU_COUNT = 8 + + BACKBONE = "resnet50" + + STEPS_PER_EPOCH = 50 + + TRAIN_ROIS_PER_IMAGE = 512 + + # Number of classes (including background) + NUM_CLASSES = 1 + 80 # COCO has 80 classes + @@ -496,29 +499,10 @@ if __name__ == '__main__': + # *** This training schedule is an example. Update to your needs *** + + # Training - Stage 1 + - print("Training network heads") + model.train(dataset_train, dataset_val, + learning_rate=config.LEARNING_RATE, + epochs=40, + - layers='heads', + - augmentation=augmentation) + - + - # Training - Stage 2 + - # Finetune layers from ResNet stage 4 and up + - print("Fine tune Resnet stage 4 and up") + - model.train(dataset_train, dataset_val, + - learning_rate=config.LEARNING_RATE, + - epochs=120, + - layers='4+', + - augmentation=augmentation) + - + - # Training - Stage 3 + - # Fine tune all layers + - print("Fine tune all layers") + - model.train(dataset_train, dataset_val, + - learning_rate=config.LEARNING_RATE / 10, + - epochs=160, + - layers='all', + + layers='3+', + augmentation=augmentation) + + elif args.command == "evaluate": + ``` +
diff --git a/docs/notes/changelog.md b/docs/notes/changelog.md new file mode 100644 index 0000000000..afb43fd721 --- /dev/null +++ b/docs/notes/changelog.md @@ -0,0 +1,12 @@ +# Change Log + + +### Major Changes in Code + +* 2019-10-10: initial release. + + +### Config Version Change Log + +* v1: Rename `RPN_HEAD.NAME` to `RPN.HEAD_NAME`. +* v2: A batch of rename of many configurations before release. diff --git a/docs/notes/compatibility.md b/docs/notes/compatibility.md new file mode 100644 index 0000000000..efd1afa122 --- /dev/null +++ b/docs/notes/compatibility.md @@ -0,0 +1,75 @@ +# Compatibility with Other Libraries + +## Compatibility with Detectron + +Detectron2 addresses some legacy issues left in Detectron, as a result, their models +are not compatible: +running inference with the same model weights will produce different results in the two code bases. + +The major differences regarding inference are: + +- The height and width of a box with corners (x1, y1) and (x2, y2) is now computed more naturally as + width = x2 - x1 and height = y2 - y1; + In Detectron, a "+ 1" was added both height and width. + + Note that the relevant ops in Caffe2 have [adopted this change of convention](https://github.com/pytorch/pytorch/pull/20550) + with an extra option. + So it is still possible to run inference with a Detectron2-trained model in Caffe2. + + The change in height/width calculations most notably changes: + - encoding/decoding in bounding box regression. + - non-maximum suppression. The effect here is very negligible, though. + +- RPN now uses simpler anchors with fewer quantization artifacts. + + In Detectron, the anchors were quantized and + [do not have accurate areas](https://github.com/facebookresearch/Detectron/issues/227). + In Detectron2, the anchors are center-aligned to feature grid points and not quantized. + +- Classification layers have a different ordering of class labels. + + This involves any trainable parameter with shape (..., num_categories + 1, ...). + In Detectron2, integer labels [0, K-1] correspond to the K = num_categories object categories + and the label "K" corresponds to the special "background" category. + In Detectron, label "0" means background, and labels [1, K] correspond to the K categories. + +- ROIAlign is implemented differently. The new implementation is [available in Caffe2](https://github.com/pytorch/pytorch/pull/23706). + + 1. All the ROIs are shifted by half a pixel compared to Detectron in order to create better image-feature-map alignment. + See `layers/roi_align.py` for details. + To enable the old behavior, use `ROIAlign(aligned=False)`, or `POOLER_TYPE=ROIAlign` instead of + `ROIAlignV2` (the default). + + 1. The ROIs are not required to have a minimum size of 1. + This will lead to tiny differences in the output, but should be negligible. + +- Mask inference function is different. + + In Detectron2, the "paste_mask" function is different and should be more accurate than in Detectron. This change + can improve mask AP on COCO by ~0.5% absolute. + +There are some other differences in training as well, but they won't affect +model-level compatibility. The majors ones are: + +- We fixed a [bug](https://github.com/facebookresearch/Detectron/issues/459) in + Detectron, by making `RPN.POST_NMS_TOPK_TRAIN` per-image, rather than per-batch. + The fix may lead to a small accuracy drop for a few models (e.g. keypoint + detection) and will require some parameter tuning to match the Detectron results. +- For simplicity, we change the default loss in bounding box regression to L1 loss, instead of smooth L1 loss. + We have observed that this tends to slightly decrease box AP50 while improving box AP for higher + overlap thresholds (and leading to a slight overall improvement in box AP). +- We interpret the coordinates in COCO bounding box and segmentation annotations + as coordinates in range `[0, width]` or `[0, height]`, and the coordinates in + COCO keypoint annotations are pixel indices in range `[0, width - 1]` or `[0, height - 1]`. + + +We will later share more details and rationale behind the above mentioned issues +about pixels, coordinates, and "+1"s. + + +## Compatibility with Caffe2 + +As mentioned above, despite the incompatibilities with Detectron, the relevant +ops have been implemented in Caffe2, in [PR1](https://github.com/pytorch/pytorch/pull/20550) +and [PR2](https://github.com/pytorch/pytorch/pull/23706). +Therefore, models trained with detectron2 can be used in Caffe2. diff --git a/docs/notes/index.rst b/docs/notes/index.rst new file mode 100644 index 0000000000..0c1b65d281 --- /dev/null +++ b/docs/notes/index.rst @@ -0,0 +1,9 @@ +Notes +====================================== + +.. toctree:: + :maxdepth: 2 + + benchmarks + compatibility + changelog diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000000..0a23f0b94d --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,13 @@ +termcolor +numpy +tqdm +docutils>=0.14 +Sphinx>=1.7 +recommonmark==0.4.0 +sphinx_rtd_theme +mock +matplotlib +termcolor +yacs +tabulate +git+git://github.com/facebookresearch/fvcore.git diff --git a/docs/tutorials/configs.md b/docs/tutorials/configs.md new file mode 100644 index 0000000000..33928f37ea --- /dev/null +++ b/docs/tutorials/configs.md @@ -0,0 +1,26 @@ +# Using Configs + +Detectron2's config system uses yaml and [yacs](https://github.com/rbgirshick/yacs). +In addition to the basic operations that access and update a config, we provide +the following extra functionalities: + +1. The config can have `_BASE_: base.yaml` field, which will load a base config first. + Values in the base config will be overwritten in sub-configs, if there are any conflicts. + We provided several base configs for standard model architectures. +2. We provide config versioning, for backward compatibility. + If your config file is versioned with a config line like `VERSION: 2`, + detectron2 will still recognize it even if we make rename to some keys in the future. + +### Best Practice with Configs + +1. Treat the configs you write as "code": avoid copying them or duplicating them; use "_BASE_" + instead to share common parts between configs. + +2. Keep the configs you write simple: don't include keys that do not affect the experimental setting. + +3. Keep a version number in your configs (or the base config), e.g., `VERSION: 2`, + for backward compatibility. + +4. Save a full config together with a trained model, and use it to run inference. + This is more robust to changes that may happen to the config definition + (e.g., if a default value changed). diff --git a/docs/tutorials/data_loading.md b/docs/tutorials/data_loading.md new file mode 100644 index 0000000000..3767cbe462 --- /dev/null +++ b/docs/tutorials/data_loading.md @@ -0,0 +1,81 @@ + +# Using Custom Data Loaders + +Detectron2 contains a builtin data loading pipeline. +It's good to understand how it works, in case you need to write a custom one. + +Detectron2 provides two functions +`build_detection_{train,test}_loader` that create a data loader from a given config. +Here is how `build_detection_{train,test}_loader` work: + +1. It takes the name of the dataset (e.g., "coco_2017_train") and loads a `list[dict]` representing the dataset items + in a lightweight, canonical format. These dataset items are not yet ready to be used by the model (e.g., images are + not loaded into memory, random augmentations have not been applied, etc.). + Details about the dataset format and dataset registration can be found in [datasets](datasets). +2. Each dict in this list is mapped by a function ("mapper"): + * Users can customize this mapping function by specifying the "mapper" argument in + `build_detection_{train,test}_loader`. The default mapper is [DatasetMapper]( ../modules/data.html#detectron2.data.DatasetMapper) + * There is no constraints on the output format, as long as it is accepted by the consumer of this data loader (usually the model). + * The role of the mapper is to transform the lightweight, canonical representation of a dataset item into a format + that is ready for the model to consume (including, e.g., reading images and performing random data augmentation). + The output format of the default mapper is explained below. +3. The outputs of the mapper are batched (simply into a list). +4. This batched data is the output of the data loader. Typically, it's also the input of + `model.forward()`. + + +If you want to do something different (e.g., use different sampling or batching logic), +you can write your own data loader. The only requirement is that it produces the format your model accepts. +Next, we explain the input format used by the builtin models in detectron2. + + +### Model Input Format + +The output of the default [DatasetMapper]( ../modules/data.html#detectron2.data.DatasetMapper) is a dict. +After the data loader performs batching, it becomes `list[dict]`, with one dict per image. +This will be the input format of all the builtin models. + +The dict may contain the following keys: + +* "image": `Tensor` in (C, H, W) format. +* "instances": an `Instances` object, with the following fields: + + "gt_boxes": `Boxes` object storing N boxes, one for each instance. + + "gt_classes": `Tensor`, a vector of N labels, in range [0, num_categories). + + "gt_masks": a `PolygonMasks` object storing N masks, one for each instance. + + "gt_keypoints": a `Keypoints` object storing N keypoint sets, one for each instance. +* "proposals": an `Instances` object used in Fast R-CNN style models, with the following fields: + + "proposal_boxes": `Boxes` object storing P proposal boxes. + + "objectness_logits": `Tensor`, a vector of P scores, one for each proposal. +* "height", "width": the *desired* output height and width of the image, not necessarily the same + as the height or width of the `image` when input into the model, which might be after resizing. + For example, it can be the *original* image height and width before resizing. + + If provided, the model will produce output in this resolution, + rather than in the resolution of the `image` as input into the model. This is more efficient and accurate. +* "sem_seg": `Tensor[int]` in (H, W) format. The semantic segmentation ground truth. + + +### Model Output Format + +The standard models outputs a `list[dict]`, one dict for each image. Each dict may contain: + +* "instances": [Instances](../modules/structures.html#detectron2.structures.Instances) + object with the following fields: + * "pred_boxes": [Boxes](../modules/structures.html#detectron2.structures.Boxes) object storing N boxes, one for each detected instance. + * "scores": `Tensor`, a vector of N scores. + * "pred_classes": `Tensor`, a vector of N labels in range [0, num_categories). + + "pred_masks": a `Tensor` of shape (N, H, W), masks for each detected instance. + + "pred_keypoints": a `Tensor` of shape (N, num_keypoint, 3). + Each row in the last dimension is (x, y, score). +* "sem_seg": `Tensor` of (num_categories, H, W), the semantic segmentation prediction. +* "proposals": [Instances](../modules/structures.html#detectron2.structures.Instances) + object with the following fields: + * "proposal_boxes": [Boxes](../modules/structures.html#detectron2.structures.Boxes) + object storing N boxes. + * "objectness_logits": a torch vector of N scores. +* "panoptic_seg": A tuple of (Tensor, list[dict]). The tensor has shape (H, W), where each element + represent the segment id of the pixel. Each dict describes one segment id and has the following fields: + * "id": the segment id + * "isthing": whether the segment is a thing or stuff + * "category_id": the category id of this segment. It represents the thing + class id when `isthing==True`, and the stuff class id otherwise. diff --git a/docs/tutorials/datasets.md b/docs/tutorials/datasets.md new file mode 100644 index 0000000000..abf44d5c03 --- /dev/null +++ b/docs/tutorials/datasets.md @@ -0,0 +1,152 @@ +# Using Custom Datasets + +If you want to use a custom dataset while also reusing detectron2's data loaders, +you will need to + +1. Register your dataset (i.e., tell detectron2 how to obtain your dataset). +2. Optionally, register metadata for your dataset. + +Next, we explain the above two concepts in details. + + +### Registering a Dataset + +To let detectron2 know how to obtain a dataset named "my_dataset", you will implement +a function that returns the items in your dataset and then tell detectron2 about this +function: +```python +def get_dicts(): + ... + return list[dict] in the following format + +from detectron2.data import DatasetCatalog +DatasetCatalog.register("my_dataset", get_dicts) +``` + +Here, the snippet associates a dataset "my_dataset" with a function that returns the data. +If you do not modify downstream code (i.e., you use the standard data loader and data mapper), +then the function has to return a list of dicts in detectron2's standard dataset format, described +next. + +For standard tasks +(instance detection, instance/semantic/panoptic segmentation, keypoint detection), +we use a format similar to COCO's json annotations +as the basic dataset representation. + +The format uses one dict to represent the annotations of +one image. The dict may have the following fields. +The fields are often optional, and some functions may be able to +infer certain fields from others if needed, e.g., the data loader +can load an image from "file_name" if the "image" field is not available. + ++ `file_name`: the full path to the image file. ++ `sem_seg_file_name`: the full path to the ground truth semantic segmentation file. ++ `image`: the image as a numpy array. ++ `sem_seg`: semantic segmentation ground truth in a 2D numpy array. Values in the array represent + category labels. ++ `height`, `width`: integer. The shape of image. ++ `image_id` (str): a string to identify this image. Mainly used during evaluation to identify the + image. Each dataset may use it for different purposes. ++ `annotations` (list[dict]): the per-instance annotations of every + instance in this image. Each annotation dict may contain: + + `bbox` (list[float]): list of 4 numbers representing the bounding box of the instance. + + `bbox_mode` (int): the format of bbox. + It must be a member of [detectron2.structures.BoxMode](detectron2/structures/boxes.py). + Currently supports: `BoxMode.XYXY_ABS`, `BoxMode.XYWH_ABS`. + + `category_id` (int): an integer in the range [0, num_categories) representing the category label. + The value num_categories is reserved to represent the "background" category, if applicable. + + `segmentation` (list[list[float]] or dict): + + If `list[list[float]]`, it represents a list of polygons, one for each connected component + of the object. Each `list[float]` is one simple polygon in the format of `[x1, y1, ..., xn, yn]`. + The Xs and Ys are either relative coordinates in [0, 1], or absolute coordinates, + depend on whether "bbox_mode" is relative. + + If `dict`, it represents the per-pixel segmentation mask in COCO's RLE format. + + `keypoint`s (list[float]): in the format of [x1, y1, v1,..., xn, yn, vn]. + v[i] means the visibility of this keypoint. + `n` must be equal to the number of keypoint categories. + The Xs and Ys are either relative coordinates in [0, 1], or absolute coordinates, + depend on whether "bbox_mode" is relative. + + Note that the coordinate annotations in COCO format are integers in range [0, H-1 or W-1]. + By default, detectron2 adds 0.5 to absolute keypoint coordinates to convert them from discrete + pixel indices to floating point coordinates. + + `iscrowd`: 0 or 1. Whether this instance is labeled as COCO's "crowd region". ++ `proposal_boxes` (array): 2D numpy array with shape (K, 4) representing K precomputed proposal boxes for this image. ++ `proposal_objectness_logits` (array): numpy array with shape (K, ), which corresponds to the objectness + logits of proposals in 'propopsal_boxes'. ++ `proposal_bbox_mode` (int): the format of the precomputed proposal bbox. + It must be a member of [detectron2.structures.BoxMode](detectron2/structures/boxes.py). + Default format is `BoxMode.XYXY_ABS`. + + +If your dataset is already in the COCO format, you can simply register it by +```python +from detectron2.data.datasts import register_coco_instances +register_coco_instances("my_dataset", {}, "json_annotation.json", "path/to/image/dir") +``` +which will take care of everything (including metadata) for you. + + +### "Metadata" for Datasets + +Each dataset is associated with some metadata, accessible through +`MetadataCatalog.get(dataset_name).some_metadata`. +Metadata is a key-value mapping that contains primitive information that helps interpret what's in the dataset, e.g., +names of classes, colors of classes, root of files, etc. +This information will be useful for augmentation, evaluation, visualization, logging, etc. +The structure of metadata depends on the what is needed from the corresponding downstream code. + + +If you register a new dataset through `DatasetCatalog.register`, +you may also want to add its corresponding metadata through +`MetadataCatalog.get(dataset_name).set(name, value)`, to enable any features that need metadata. +You can do it like this (using the metadata field "thing_classes" as an example): + +```python +from detectron2.data import MetadataCatalog +MetadataCatalog.get("my_dataset").thing_classes = ["person", "dog"] +``` + +Here is a list of metadata keys that are used by builtin features in detectron2. +If you add your own dataset without these metadata, some features may be +unavailable to you: + +* `thing_classes` (list[str]): Used by all instance detection/segmentation tasks. + A list of names for each instance/thing category. + If you load a COCO format dataset, it will be automatically set by the function `load_coco_json`. + +* `stuff_classes` (list[str]): Used by semantic and panoptic segmentation tasks. + A list of names for each stuff category. + +* `stuff_colors` (list[tuple(r, g, b)]): Pre-defined color (in [0, 255]) for each stuff category. + Used for visualization. If not given, random colors are used. + +* `keypoint_names` (list[str]): Used by keypoint localization. A list of names for each keypoint. + +* `keypoint_flip_map` (list[tuple[str]]): Used by the keypoint localization task. A list of pairs of names, + where each pair are the two keypoints that should be flipped if the image is + flipped during augmentation. +* `keypoint_connection_rules`: list[tuple(str, str, (r, g, b))]. Each tuple specifies a pair of keypoints + that are connected and the color to use for the line between them when visualized. + +Some additional metadata that are specific to the evaluation of certain datasets (e.g. COCO): + +* `thing_dataset_id_to_contiguous_id` (dict[int->int]): Used by all instance detection/segmentation tasks in the COCO format. + A mapping from instance class ids in the dataset to contiguous ids in range [0, #class). + Will be automatically set by the function `load_coco_json`. + +* `stuff_dataset_id_to_contiguous_id` (dict[int->int]): Used when generating prediction json files for + semantic/panoptic segmentation. + A mapping from semantic segmentation class ids in the dataset + to contiguous ids in [0, num_categories). It is useful for evaluation only. + +* `json_file`: The COCO annotation json file. Used by COCO evaluation for COCO-format datasets. +* `panoptic_root`, `panoptic_json`: Used by panoptic evaluation. +* `evaluator_type`: Used by the builtin main training script to select + evaluator. No need to use it if you write your own main script. + +NOTE: For background on the difference between "thing" and "stuff" categories, see +[On Seeing Stuff: The Perception of Materials by Humans and Machines](http://persci.mit.edu/pub_pdfs/adelson_spie_01.pdf). +In detectron2, the term "thing" is used for instance-level tasks, +and "stuff" is used for semantic segmentation tasks. +Both are used in panoptic segmentation. diff --git a/docs/tutorials/extend.md b/docs/tutorials/extend.md new file mode 100644 index 0000000000..5e63a5f27f --- /dev/null +++ b/docs/tutorials/extend.md @@ -0,0 +1,45 @@ +# Extend Detectron2's Defaults + +__Research is about doing things in new ways__. +This brings a tension in how to create abstractions in code, +which is a challenge for any research engineering project of a significant size: + +1. On one hand, it needs to have very thin abstractions to allow for the possibility of doing + everything in new ways. It should be reasonably easy to break existing + abstractions and replace them with new ones. + +2. On the other hand, such a project also needs reasonably high-level + abstractions, so that users can easily do things in standard ways, + without worrying too much about the details that only certain researchers care about. + +In detectron2, there are two types of interfaces that address this tension together: + +1. Functions and classes that take only a "config" argument (optionally with a minimal + set of extra arguments in cases of mature interfaces). + + Such functions and classes implement + the "standard default" behavior: it will read what it needs from the + config and do the "standard" thing. + Users only need to load a standard config and pass it around, without having to worry about + which arguments are used and what they all mean. + +2. Functions and classes that have well-defined explicit arguments. + + Each of these is a small building block of the entire system. + They require users' effort to stitch together, but can be stitched together in more flexible ways. + When you need to implement something different from the "standard defaults" + included in detectron2, these well-defined components can be reused. + + +If you only need the standard behavior, the [Beginner's Tutorial](getting_started) +should suffice. If you need to extend detectron2 to your own needs, +see the following tutorials for more details: + +* Detectron2 includes a few standard datasets, but you can use custom ones. See + [Use Custom Datasets](datasets). +* Detectron2 contains the standard logic that creates a data loader from a + dataset, but you can write your own as well. See [Use Custom Data Loaders](data_loading). +* Detectron2 implements many standard detection models, and provide ways for you + to overwrite its behaviors. See [Writing Models](models). +* Detectron2 provides a default training loop that is good for common training tasks. + You can customize it with hooks, or write your own loop instead. See [training](training). diff --git a/docs/tutorials/getting_started.md b/docs/tutorials/getting_started.md new file mode 120000 index 0000000000..e90bde77a3 --- /dev/null +++ b/docs/tutorials/getting_started.md @@ -0,0 +1 @@ +../../GETTING_STARTED.md \ No newline at end of file diff --git a/docs/tutorials/index.rst b/docs/tutorials/index.rst new file mode 100644 index 0000000000..bd3e4e108e --- /dev/null +++ b/docs/tutorials/index.rst @@ -0,0 +1,14 @@ +Tutorials +====================================== + +.. toctree:: + :maxdepth: 2 + + install + getting_started + extend + datasets + data_loading + models + training + configs diff --git a/docs/tutorials/install.md b/docs/tutorials/install.md new file mode 120000 index 0000000000..5f52b2be3c --- /dev/null +++ b/docs/tutorials/install.md @@ -0,0 +1 @@ +../../INSTALL.md \ No newline at end of file diff --git a/docs/tutorials/models.md b/docs/tutorials/models.md new file mode 100644 index 0000000000..9489da2cfb --- /dev/null +++ b/docs/tutorials/models.md @@ -0,0 +1,32 @@ +# Using and Writing Models + +Models (and their sub-models) in detectron2 are built by +functions such as `build_model`, `build_backbone`, `build_roi_heads`: +```python +from detectron2.modeling import build_model +model = build_model(cfg) # returns a torch.nn.Module +``` + +In some cases, e.g. if you are trying to do something completely new, you may wish to implement +a model entirely from scratch within detectron2. However, in many situations you may +be interested in modifying or extending some components of an existing model. +Therefore, we also provide a registration mechanism that lets you override the +behavior of certain internal components of standard models. + +For example, to add a new backbone, import this code: +```python +from detectron2.modeling import BACKBONE_REGISTRY, Backbone +@BACKBONE_REGISTRY.register() +class NewBackBone(Backbone): + def __init__(self, cfg, input_shape): + # create your own backbone +``` +which will allow you to use `cfg.MODEL.BACKBONE.NAME = 'NewBackBone'` in your config file. + +As another example, to add new abilities to the ROI heads in the Generalized R-CNN meta-architecture, +you can implement a new +[ROIHeads](../modules/modeling.html#detectron2.modeling.ROIHeads) subclass and put it in the `ROI_HEADS_REGISTRY`. +See [densepose in detectron2](https://github.com/facebookresearch/detectron2/tree/master/projects/DensePose) +for an example. + +Other registries can be found in [API documentation](../modules/modeling.html). diff --git a/docs/tutorials/training.md b/docs/tutorials/training.md new file mode 100644 index 0000000000..615966f47c --- /dev/null +++ b/docs/tutorials/training.md @@ -0,0 +1,17 @@ +# Training + +From the previous tutorials, you may now have a custom model and data loader. + +You are free to create your own optimizer, and write the training logic: it's +usually easy with PyTorch, and allow researchers to see the entire training +logic more clearly. + +We also provide a standarized "trainer" abstraction with a +[minimal hook system](../modules/engine.html#detectron2.engine.HookBase) +that helps simplify the standard types of training. + +You can use +[SimpleTrainer().train()](../modules/engine.html#detectron2.engine.SimpleTrainer) +which does single-cost single-optimizer single-data-source training. +Or use [DefaultTrainer().train()](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer) +which includes more standard behavior that one might want to opt in. diff --git a/projects/DensePose/README.md b/projects/DensePose/README.md new file mode 100644 index 0000000000..faf21786a4 --- /dev/null +++ b/projects/DensePose/README.md @@ -0,0 +1,64 @@ + +# DensePose in Detectron2 +**Dense Human Pose Estimation In The Wild** + +_Rıza Alp Güler, Natalia Neverova, Iasonas Kokkinos_ + +[[`densepose.org`](https://densepose.org)] [[`arXiv`](https://arxiv.org/abs/1802.00434)] [[`BibTeX`](#CitingDensePose)] + +Dense human pose estimation aims at mapping all human pixels of an RGB image to the 3D surface of the human body. + +
+ +
+ +In this repository, we provide the code to train and evaluate DensePose-RCNN. We also provide tools to visualize +DensePose annotation and results. + +## Training + +To train a model one can call +```bash +python /path/to/detectron2/projects/DensePose/train_net.py --config-file +``` + +For example, to launch end-to-end DensePose-RCNN training with ResNet-50 FPN backbone on a single GPU, +one should execute: +```bash +python /path/to/detectron2/projects/DensePose/train_net.py --config-file /path/to/detectron2/projects/DensePose/configs/densepose_R_50_FPN_s1x.yaml +``` + +## Testing + +Model testing can be done in the same way as training, except for an additional flag `--eval-only` and +model location specification through `MODEL.WEIGHT model.pth` in the command line +```bash +python /path/to/detectron2/projects/DensePose/train_net.py --config-file /path/to/detectron2/projects/DensePose/configs/densepose_R_50_FPN_s1x.yaml --eval-only MODEL.WEIGHT model.pth +``` + +## Tools + +We provide tools which allow one to: + - easily view DensePose annotated data in a dataset; + - perform DensePose inference on a set of images; + - visualize DensePose model results; + +`query_db` is a tool to print or visualize DensePose data in a dataset. +Details on this tool can be found in [`TOOL_QUERY_DB.md`](doc/TOOL_QUERY_DB.md) + +`apply_net` is a tool to print or visualize DensePose results. +Details on this tool can be found in [`TOOL_APPLY_NET.md`](doc/TOOL_APPLY_NET.md) + +## Citing DensePose + +If you use DensePose, please use the following BibTeX entry. + +``` +@InProceedings{Guler2018DensePose, + title={DensePose: Dense Human Pose Estimation In The Wild}, + author={R\{i}za Alp G\"uler, Natalia Neverova, Iasonas Kokkinos}, + journal={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2018} +} +``` + diff --git a/projects/DensePose/apply_net.py b/projects/DensePose/apply_net.py new file mode 100644 index 0000000000..6469bb6c89 --- /dev/null +++ b/projects/DensePose/apply_net.py @@ -0,0 +1,299 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +import glob +import logging +import os +import pickle +import sys +from typing import Any, ClassVar, Dict, List +import torch + +from detectron2.config import get_cfg +from detectron2.data.detection_utils import read_image +from detectron2.engine.defaults import DefaultPredictor +from detectron2.structures.instances import Instances +from detectron2.utils.logger import setup_logger + +from densepose import add_densepose_config +from densepose.utils.logger import verbosity_to_level +from densepose.vis.base import CompoundVisualizer +from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer +from densepose.vis.densepose import ( + DensePoseResultsContourVisualizer, + DensePoseResultsFineSegmentationVisualizer, + DensePoseResultsUVisualizer, + DensePoseResultsVVisualizer, +) +from densepose.vis.extractor import CompoundExtractor, create_extractor + +DOC = """Apply Net - a tool to print / visualize DensePose results +""" + +LOGGER_NAME = "apply_net" +logger = logging.getLogger(LOGGER_NAME) + +_ACTION_REGISTRY: Dict[str, "Action"] = {} + + +class Action(object): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + parser.add_argument( + "-v", + "--verbosity", + action="count", + help="Verbose mode. Multiple -v options increase the verbosity.", + ) + + +def register_action(cls: type): + """ + Decorator for action classes to automate action registration + """ + global _ACTION_REGISTRY + _ACTION_REGISTRY[cls.COMMAND] = cls + return cls + + +class InferenceAction(Action): + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(InferenceAction, cls).add_arguments(parser) + parser.add_argument("cfg", metavar="", help="Config file") + parser.add_argument("model", metavar="", help="Model file") + parser.add_argument("input", metavar="", help="Input data") + + @classmethod + def execute(cls: type, args: argparse.Namespace): + logger.info(f"Loading config from {args.cfg}") + opts = [] + cfg = cls.setup_config(args.cfg, args.model, args, opts) + logger.info(f"Loading model from {args.model}") + predictor = DefaultPredictor(cfg) + logger.info(f"Loading data from {args.input}") + file_list = cls._get_input_file_list(args.input) + if len(file_list) == 0: + logger.warning(f"No input images for {args.input}") + return + context = cls.create_context(args) + for file_name in file_list: + img = read_image(file_name, format="BGR") # predictor expects BGR image. + with torch.no_grad(): + outputs = predictor(img)["instances"] + cls.execute_on_outputs(context, {"file_name": file_name, "image": img}, outputs) + cls.postexecute(context) + + @classmethod + def setup_config( + cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str] + ): + cfg = get_cfg() + add_densepose_config(cfg) + cfg.merge_from_file(config_fpath) + if opts: + cfg.merge_from_list(opts) + cfg.MODEL.WEIGHTS = model_fpath + cfg.freeze() + return cfg + + @classmethod + def _get_input_file_list(cls: type, input_spec: str): + if os.path.isdir(input_spec): + file_list = [ + fname + for fname in os.listdir(input_spec) + if os.path.isfile(os.path.join(input_spec, fname)) + ] + elif os.path.isfile(input_spec): + file_list = [input_spec] + else: + file_list = glob.glob(input_spec) + return file_list + + +@register_action +class DumpAction(InferenceAction): + """ + Dump action that outputs results to a pickle file + """ + + COMMAND: ClassVar[str] = "dump" + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(DumpAction, cls).add_arguments(parser) + parser.add_argument( + "--output", + metavar="", + default="results.pkl", + help="File name to save dump to", + ) + + @classmethod + def execute_on_outputs( + cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances + ): + image_fpath = entry["file_name"] + logger.info(f"Processing {image_fpath}") + entry["instances"] = outputs + context["results"].append(entry) + + @classmethod + def create_context(cls: type, args: argparse.Namespace): + context = {"results": [], "out_fname": args.output} + return context + + @classmethod + def postexecute(cls: type, context: Dict[str, Any]): + out_fname = context["out_fname"] + out_dir = os.path.dirname(out_fname) + if len(out_dir) > 0 and not os.path.exists(out_dir): + os.makedirs(out_dir) + with open(out_fname, "wb") as hFile: + pickle.dump(context["results"], hFile) + logger.info(f"Output saved to {out_fname}") + + +@register_action +class ShowAction(InferenceAction): + """ + Show action that visualizes selected entries on an image + """ + + COMMAND: ClassVar[str] = "show" + VISUALIZERS: ClassVar[Dict[str, object]] = { + "dp_contour": DensePoseResultsContourVisualizer, + "dp_segm": DensePoseResultsFineSegmentationVisualizer, + "dp_u": DensePoseResultsUVisualizer, + "dp_v": DensePoseResultsVVisualizer, + "bbox": ScoredBoundingBoxVisualizer, + } + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(ShowAction, cls).add_arguments(parser) + parser.add_argument( + "visualizations", + metavar="", + help="Comma separated list of visualizations, possible values: " + "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))), + ) + parser.add_argument( + "--min_score", + metavar="", + default=0.8, + type=float, + help="Minimum detection score to visualize", + ) + parser.add_argument( + "--nms_thresh", metavar="", default=None, type=float, help="NMS threshold" + ) + parser.add_argument( + "--output", + metavar="", + default="outputres.png", + help="File name to save output to", + ) + + @classmethod + def setup_config( + cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str] + ): + opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST") + opts.append(str(args.min_score)) + if args.nms_thresh is not None: + opts.append("MODEL.ROI_HEADS.NMS_THRESH_TEST") + opts.append(str(args.nms_thresh)) + cfg = super(ShowAction, cls).setup_config(config_fpath, model_fpath, args, opts) + return cfg + + @classmethod + def execute_on_outputs( + cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances + ): + import cv2 + import numpy as np + + visualizer = context["visualizer"] + extractor = context["extractor"] + image_fpath = entry["file_name"] + logger.info(f"Processing {image_fpath}") + image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY) + image = np.tile(image[:, :, np.newaxis], [1, 1, 3]) + data = extractor(outputs) + image_vis = visualizer.visualize(image, data) + entry_idx = context["entry_idx"] + 1 + out_fname = cls._get_out_fname(entry_idx, context["out_fname"]) + out_dir = os.path.dirname(out_fname) + if len(out_dir) > 0 and not os.path.exists(out_dir): + os.makedirs(out_dir) + cv2.imwrite(out_fname, image_vis) + logger.info(f"Output saved to {out_fname}") + context["entry_idx"] += 1 + + @classmethod + def postexecute(cls: type, context: Dict[str, Any]): + pass + + @classmethod + def _get_out_fname(cls: type, entry_idx: int, fname_base: str): + base, ext = os.path.splitext(fname_base) + return base + ".{0:04d}".format(entry_idx) + ext + + @classmethod + def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]: + vis_specs = args.visualizations.split(",") + visualizers = [] + extractors = [] + for vis_spec in vis_specs: + vis = cls.VISUALIZERS[vis_spec]() + visualizers.append(vis) + extractor = create_extractor(vis) + extractors.append(extractor) + visualizer = CompoundVisualizer(visualizers) + extractor = CompoundExtractor(extractors) + context = { + "extractor": extractor, + "visualizer": visualizer, + "out_fname": args.output, + "entry_idx": 0, + } + return context + + +def create_argument_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=DOC, + formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120), + ) + parser.set_defaults(func=lambda _: parser.print_help(sys.stdout)) + subparsers = parser.add_subparsers(title="Actions") + for _, action in _ACTION_REGISTRY.items(): + action.add_parser(subparsers) + return parser + + +def main(): + parser = create_argument_parser() + args = parser.parse_args() + verbosity = args.verbosity if hasattr(args, "verbosity") else None + global logger + logger = setup_logger(name=LOGGER_NAME) + logger.setLevel(verbosity_to_level(verbosity)) + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/projects/DensePose/configs/Base-DensePose-RCNN.yaml b/projects/DensePose/configs/Base-DensePose-RCNN.yaml new file mode 100644 index 0000000000..7909af33ab --- /dev/null +++ b/projects/DensePose/configs/Base-DensePose-RCNN.yaml @@ -0,0 +1,43 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + + DENSEPOSE_ON: True + ROI_HEADS: + NAME: "DensePoseROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + NUM_CLASSES: 1 + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + POOLER_SAMPLING_RATIO: 2 + POOLER_TYPE: "ROIAlign" + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" + POOLER_TYPE: "ROIAlign" +DATASETS: + TRAIN: ("densepose_coco_2014_train", "densepose_coco_2014_valminusminival") + TEST: ("densepose_coco_2014_minival",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 diff --git a/projects/DensePose/configs/densepose_R_50_FPN_s1x.yaml b/projects/DensePose/configs/densepose_R_50_FPN_s1x.yaml new file mode 100644 index 0000000000..44035d7fba --- /dev/null +++ b/projects/DensePose/configs/densepose_R_50_FPN_s1x.yaml @@ -0,0 +1,12 @@ +_BASE_: "Base-DensePose-RCNN.yaml" +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/MSRA/R-50" +SOLVER: + MAX_ITER: 130000 + STEPS: (100000, 120000) + BASE_LR: 0.002 + IMS_PER_BATCH: 24 + WARMUP_FACTOR: 0.1 + diff --git a/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_inference_acc_test.yaml b/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_inference_acc_test.yaml new file mode 100644 index 0000000000..cd277bee8e --- /dev/null +++ b/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_inference_acc_test.yaml @@ -0,0 +1,8 @@ +_BASE_: "../densepose_R_50_FPN_s1x.yaml" +MODEL: + WEIGHTS: "detectron2://e2e_densepose_R_50_FPN_s1x/124238535/model_final_5f3d7f9875229310fdfe6649459c0157.pkl" +DATASETS: + TRAIN: () + TEST: ("densepose_coco_2014_minival_100",) +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 56.05, 0.025], ["densepose", "AP", 46.54, 0.02]] diff --git a/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_instant_test.yaml b/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_instant_test.yaml new file mode 100644 index 0000000000..dec46d5224 --- /dev/null +++ b/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_instant_test.yaml @@ -0,0 +1,14 @@ +_BASE_: "../Base-DensePose-RCNN.yaml" +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/MSRA/R-50" + DENSEPOSE_ON: True + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" +DATASETS: + TRAIN: ("densepose_coco_2014_minival_100",) + TEST: ("densepose_coco_2014_minival_100",) +SOLVER: + MAX_ITER: 40 + STEPS: (30,) + BASE_LR: 0.002 + IMS_PER_BATCH: 24 diff --git a/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_training_acc_test.yaml b/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_training_acc_test.yaml new file mode 100644 index 0000000000..d780840fe7 --- /dev/null +++ b/projects/DensePose/configs/quick_schedules/densepose_R_50_FPN_training_acc_test.yaml @@ -0,0 +1,25 @@ +_BASE_: "../Base-DensePose-RCNN.yaml" +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +MODEL: + WEIGHTS: "catalog://ImageNetPretrained/MSRA/R-50" + DENSEPOSE_ON: True + ROI_HEADS: + NUM_CLASSES: 1 + ROI_BOX_HEAD: + POOLER_RESOLUTION: 7 + POOLER_SAMPLING_RATIO: 2 + ROI_DENSEPOSE_HEAD: + NAME: "DensePoseV1ConvXHead" +DATASETS: + TRAIN: ("densepose_coco_2014_minival",) + TEST: ("densepose_coco_2014_minival",) +SOLVER: + MAX_ITER: 6000 + STEPS: (5500, 5800) + BASE_LR: 0.002 + IMS_PER_BATCH: 24 + WARMUP_FACTOR: 0.1 +TEST: + EXPECTED_RESULTS: [["bbox", "AP", 58.27, 1.0], ["densepose", "AP", 42.47, 1.5]] + diff --git a/projects/DensePose/densepose/__init__.py b/projects/DensePose/densepose/__init__.py new file mode 100644 index 0000000000..48896fc7b8 --- /dev/null +++ b/projects/DensePose/densepose/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from . import dataset # just to register data +from .config import add_densepose_config +from .dataset_mapper import DatasetMapper +from .densepose_head import ROI_DENSEPOSE_HEAD_REGISTRY +from .evaluator import DensePoseCOCOEvaluator +from .roi_head import DensePoseROIHeads +from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData diff --git a/projects/DensePose/densepose/config.py b/projects/DensePose/densepose/config.py new file mode 100644 index 0000000000..c23aa15066 --- /dev/null +++ b/projects/DensePose/densepose/config.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from detectron2.config import CfgNode as CN + + +def add_densepose_config(cfg): + """ + Add config for densepose head. + """ + _C = cfg + + _C.MODEL.DENSEPOSE_ON = True + + _C.MODEL.ROI_DENSEPOSE_HEAD = CN() + _C.MODEL.ROI_DENSEPOSE_HEAD.NAME = "" + _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS = 8 + # Number of parts used for point labels + _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES = 24 + _C.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL = 4 + _C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM = 512 + _C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL = 3 + _C.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE = 2 + _C.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE = 56 + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE = "ROIAlignV2" + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION = 14 + _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO = 2 + # Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD) + _C.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD = 0.7 + # Loss weights for annotation masks.(14 Parts) + _C.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS = 2.0 + # Loss weights for surface parts. (24 Parts) + _C.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS = 0.3 + # Loss weights for UV regression. + _C.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS = 0.1 diff --git a/projects/DensePose/densepose/dataset.py b/projects/DensePose/densepose/dataset.py new file mode 100644 index 0000000000..4100533874 --- /dev/null +++ b/projects/DensePose/densepose/dataset.py @@ -0,0 +1,37 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import os + +from detectron2.data.datasets import register_coco_instances + + +def get_densepose_metadata(): + meta = { + "thing_classes": ["person"], + "densepose_transform_src": "detectron2://densepose/UV_symmetry_transforms.mat", + "densepose_smpl_subdiv": "detectron2://densepose/SMPL_subdiv.mat", + "densepose_smpl_subdiv_transform": "detectron2://densepose/SMPL_SUBDIV_TRANSFORM.mat", + } + return meta + + +SPLITS = { + "densepose_coco_2014_train": ("coco/train2014", "coco/annotations/densepose_train2014.json"), + "densepose_coco_2014_minival": ("coco/val2014", "coco/annotations/densepose_minival2014.json"), + "densepose_coco_2014_minival_100": ( + "coco/val2014", + "coco/annotations/densepose_minival2014_100.json", + ), + "densepose_coco_2014_valminusminival": ( + "coco/val2014", + "coco/annotations/densepose_valminusminival2014.json", + ), +} + +for key, (image_root, json_file) in SPLITS.items(): + # Assume pre-defined datasets live in `./datasets`. + register_coco_instances( + key, + get_densepose_metadata(), + os.path.join("datasets", json_file), + os.path.join("datasets", image_root), + ) diff --git a/projects/DensePose/densepose/dataset_mapper.py b/projects/DensePose/densepose/dataset_mapper.py new file mode 100644 index 0000000000..b3d37c794a --- /dev/null +++ b/projects/DensePose/densepose/dataset_mapper.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import copy +import torch +from fvcore.common.file_io import PathManager + +from detectron2.data import MetadataCatalog +from detectron2.data import detection_utils as utils +from detectron2.data import transforms as T + +from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData + + +class DatasetMapper: + """ + A customized version of `detectron2.data.DatasetMapperper` + """ + + def __init__(self, cfg, is_train=True): + self.tfm_gens = utils.build_transform_gen(cfg, is_train) + + # fmt: off + self.img_format = cfg.INPUT.FORMAT + self.mask_on = cfg.MODEL.MASK_ON + self.keypoint_on = cfg.MODEL.KEYPOINT_ON + self.densepose_on = cfg.MODEL.DENSEPOSE_ON + assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet" + # fmt: on + if self.keypoint_on and is_train: + # Flip only makes sense in training + self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) + else: + self.keypoint_hflip_indices = None + + if self.densepose_on: + densepose_transform_srcs = [ + MetadataCatalog.get(ds).densepose_transform_src + for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST + ] + assert len(densepose_transform_srcs) > 0 + # TODO: check that DensePose transformation data is the same for + # all the datasets. Otherwise one would have to pass DB ID with + # each entry to select proper transformation data. For now, since + # all DensePose annotated data uses the same data semantics, we + # omit this check. + densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0]) + self.densepose_transform_data = DensePoseTransformData.load( + densepose_transform_data_fpath + ) + + self.is_train = is_train + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + image = utils.read_image(dataset_dict["file_name"], format=self.img_format) + utils.check_image_size(dataset_dict, image) + + image, transforms = T.apply_transform_gens(self.tfm_gens, image) + image_shape = image.shape[:2] # h, w + dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) + + if not self.is_train: + dataset_dict.pop("annotations", None) + return dataset_dict + + for anno in dataset_dict["annotations"]: + if not self.mask_on: + anno.pop("segmentation", None) + if not self.keypoint_on: + anno.pop("keypoints", None) + + # USER: Implement additional transformations if you have other types of data + # USER: Don't call transpose_densepose if you don't need + annos = [ + self._transform_densepose( + utils.transform_instance_annotations( + obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices + ), + transforms, + ) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + instances = utils.annotations_to_instances(annos, image_shape) + + if len(annos) and "densepose" in annos[0]: + gt_densepose = [obj["densepose"] for obj in annos] + instances.gt_densepose = DensePoseList(gt_densepose, instances.gt_boxes, image_shape) + + dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()] + return dataset_dict + + def _transform_densepose(self, annotation, transforms): + if not self.densepose_on: + return annotation + + # Handle densepose annotations + is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation) + if is_valid: + densepose_data = DensePoseDataRelative(annotation, cleanup=True) + densepose_data.apply_transform(transforms, self.densepose_transform_data) + annotation["densepose"] = densepose_data + else: + # logger = logging.getLogger(__name__) + # logger.debug("Could not load DensePose annotation: {}".format(reason_not_valid)) + DensePoseDataRelative.cleanup_annotation(annotation) + # NOTE: annotations for certain instances may be unavailable. + # 'None' is accepted by the DensePostList data structure. + annotation["densepose"] = None + return annotation diff --git a/projects/DensePose/densepose/densepose_coco_evaluation.py b/projects/DensePose/densepose/densepose_coco_evaluation.py new file mode 100644 index 0000000000..3a882f8c11 --- /dev/null +++ b/projects/DensePose/densepose/densepose_coco_evaluation.py @@ -0,0 +1,953 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# This is a modified version of cocoeval.py where we also have the densepose evaluation. + +__author__ = "tsungyi" + +import copy +import datetime +import itertools +import logging +import numpy as np +import pickle +import time +from collections import defaultdict +import scipy.spatial.distance as ssd +from fvcore.common.file_io import PathManager +from pycocotools import mask as maskUtils +from scipy.io import loadmat + +from .structures import DensePoseResult + +logger = logging.getLogger(__name__) + + +class DensePoseCocoEval(object): + # Interface for evaluating detection on the Microsoft COCO dataset. + # + # The usage for CocoEval is as follows: + # cocoGt=..., cocoDt=... # load dataset and results + # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object + # E.params.recThrs = ...; # set parameters as desired + # E.evaluate(); # run per image evaluation + # E.accumulate(); # accumulate per image results + # E.summarize(); # display summary metrics of results + # For example usage see evalDemo.m and http://mscoco.org/. + # + # The evaluation parameters are as follows (defaults in brackets): + # imgIds - [all] N img ids to use for evaluation + # catIds - [all] K cat ids to use for evaluation + # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation + # recThrs - [0:.01:1] R=101 recall thresholds for evaluation + # areaRng - [...] A=4 object area ranges for evaluation + # maxDets - [1 10 100] M=3 thresholds on max detections per image + # iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'densepose' + # iouType replaced the now DEPRECATED useSegm parameter. + # useCats - [1] if true use category labels for evaluation + # Note: if useCats=0 category labels are ignored as in proposal scoring. + # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified. + # + # evaluate(): evaluates detections on every image and every category and + # concats the results into the "evalImgs" with fields: + # dtIds - [1xD] id for each of the D detections (dt) + # gtIds - [1xG] id for each of the G ground truths (gt) + # dtMatches - [TxD] matching gt id at each IoU or 0 + # gtMatches - [TxG] matching dt id at each IoU or 0 + # dtScores - [1xD] confidence of each dt + # gtIgnore - [1xG] ignore flag for each gt + # dtIgnore - [TxD] ignore flag for each dt at each IoU + # + # accumulate(): accumulates the per-image, per-category evaluation + # results in "evalImgs" into the dictionary "eval" with fields: + # params - parameters used for evaluation + # date - date evaluation was performed + # counts - [T,R,K,A,M] parameter dimensions (see above) + # precision - [TxRxKxAxM] precision for every evaluation setting + # recall - [TxKxAxM] max recall for every evaluation setting + # Note: precision and recall==-1 for settings with no gt objects. + # + # See also coco, mask, pycocoDemo, pycocoEvalDemo + # + # Microsoft COCO Toolbox. version 2.0 + # Data, paper, and tutorials available at: http://mscoco.org/ + # Code written by Piotr Dollar and Tsung-Yi Lin, 2015. + # Licensed under the Simplified BSD License [see coco/license.txt] + def __init__(self, cocoGt=None, cocoDt=None, iouType="densepose"): + """ + Initialize CocoEval using coco APIs for gt and dt + :param cocoGt: coco object with ground truth annotations + :param cocoDt: coco object with detection results + :return: None + """ + self.cocoGt = cocoGt # ground truth COCO API + self.cocoDt = cocoDt # detections COCO API + self.params = {} # evaluation parameters + self.evalImgs = defaultdict(list) # per-image per-category eval results [KxAxI] + self.eval = {} # accumulated evaluation results + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + self.params = Params(iouType=iouType) # parameters + self._paramsEval = {} # parameters for evaluation + self.stats = [] # result summarization + self.ious = {} # ious between all gts and dts + if cocoGt is not None: + self.params.imgIds = sorted(cocoGt.getImgIds()) + self.params.catIds = sorted(cocoGt.getCatIds()) + self.ignoreThrBB = 0.7 + self.ignoreThrUV = 0.9 + + def _loadGEval(self): + smpl_subdiv_fpath = PathManager.get_local_path("detectron2://densepose/SMPL_subdiv.mat") + pdist_transform_fpath = PathManager.get_local_path( + "detectron2://densepose/SMPL_SUBDIV_TRANSFORM.mat" + ) + pdist_matrix_fpath = PathManager.get_local_path("detectron2://densepose/Pdist_matrix.pkl") + SMPL_subdiv = loadmat(smpl_subdiv_fpath) + self.PDIST_transform = loadmat(pdist_transform_fpath) + self.PDIST_transform = self.PDIST_transform["index"].squeeze() + UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze() + ClosestVertInds = np.arange(UV.shape[1]) + 1 + self.Part_UVs = [] + self.Part_ClosestVertInds = [] + for i in np.arange(24): + self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]) + self.Part_ClosestVertInds.append( + ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)] + ) + + with open(pdist_matrix_fpath, "rb") as hFile: + arrays = pickle.load(hFile, encoding="latin1") + self.Pdist_matrix = arrays["Pdist_matrix"] + self.Part_ids = np.array(SMPL_subdiv["Part_ID_subdiv"].squeeze()) + # Mean geodesic distances for parts. + self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150]) + # Coarse Part labels. + self.CoarseParts = np.array( + [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8] + ) + + def _prepare(self): + """ + Prepare ._gts and ._dts for evaluation based on params + :return: None + """ + + def _toMask(anns, coco): + # modify ann['segmentation'] by reference + for ann in anns: + rle = coco.annToRLE(ann) + ann["segmentation"] = rle + + def _getIgnoreRegion(iid, coco): + img = coco.imgs[iid] + + if "ignore_regions_x" not in img.keys(): + return None + + if len(img["ignore_regions_x"]) == 0: + return None + + rgns_merged = [] + for region_x, region_y in zip(img["ignore_regions_x"], img["ignore_regions_y"]): + rgns = [iter(region_x), iter(region_y)] + rgns_merged.append([next(it) for it in itertools.cycle(rgns)]) + rles = maskUtils.frPyObjects(rgns_merged, img["height"], img["width"]) + rle = maskUtils.merge(rles) + return maskUtils.decode(rle) + + def _checkIgnore(dt, iregion): + if iregion is None: + return True + + bb = np.array(dt["bbox"]).astype(np.int) + x1, y1, x2, y2 = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3] + x2 = min([x2, iregion.shape[1]]) + y2 = min([y2, iregion.shape[0]]) + + if bb[2] * bb[3] == 0: + return False + + crop_iregion = iregion[y1:y2, x1:x2] + + if crop_iregion.sum() == 0: + return True + + if "densepose" not in dt.keys(): # filtering boxes + return crop_iregion.sum() / bb[2] / bb[3] < self.ignoreThrBB + + # filtering UVs + ignoremask = np.require(crop_iregion, requirements=["F"]) + uvmask = np.require( + np.asarray(dt["densepose"][0] > 0), dtype=np.uint8, requirements=["F"] + ) + uvmask_ = maskUtils.encode(uvmask) + ignoremask_ = maskUtils.encode(ignoremask) + uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0] + return uviou < self.ignoreThrUV + + p = self.params + + if p.useCats: + gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) + dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)) + else: + gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds)) + dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds)) + + # if iouType == 'uv', add point gt annotations + if p.iouType == "densepose": + self._loadGEval() + + # convert ground truth to mask if iouType == 'segm' + if p.iouType == "segm": + _toMask(gts, self.cocoGt) + _toMask(dts, self.cocoDt) + + # set ignore flag + for gt in gts: + gt["ignore"] = gt["ignore"] if "ignore" in gt else 0 + gt["ignore"] = "iscrowd" in gt and gt["iscrowd"] + if p.iouType == "keypoints": + gt["ignore"] = (gt["num_keypoints"] == 0) or gt["ignore"] + if p.iouType == "densepose": + gt["ignore"] = ("dp_x" in gt) == 0 + + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + self._igrgns = defaultdict(list) + + for gt in gts: + iid = gt["image_id"] + if iid not in self._igrgns.keys(): + self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt) + if _checkIgnore(gt, self._igrgns[iid]): + self._gts[iid, gt["category_id"]].append(gt) + for dt in dts: + if _checkIgnore(dt, self._igrgns[dt["image_id"]]): + self._dts[dt["image_id"], dt["category_id"]].append(dt) + + self.evalImgs = defaultdict(list) # per-image per-category evaluation results + self.eval = {} # accumulated evaluation results + + def evaluate(self): + """ + Run per image evaluation on given images and store results (a list of dict) in self.evalImgs + :return: None + """ + tic = time.time() + logger.info("Running per image DensePose evaluation... {}".format(self.params.iouType)) + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = "segm" if p.useSegm == 1 else "bbox" + logger.info("useSegm (deprecated) is not None. Running DensePose evaluation") + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + self._prepare() + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType in ["segm", "bbox"]: + computeIoU = self.computeIoU + elif p.iouType == "keypoints": + computeIoU = self.computeOks + elif p.iouType == "densepose": + computeIoU = self.computeOgps + + self.ious = { + (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds + } + + evaluateImg = self.evaluateImg + maxDet = p.maxDets[-1] + self.evalImgs = [ + evaluateImg(imgId, catId, areaRng, maxDet) + for catId in catIds + for areaRng in p.areaRng + for imgId in p.imgIds + ] + self._paramsEval = copy.deepcopy(self.params) + toc = time.time() + logger.info("DensePose evaluation DONE (t={:0.2f}s).".format(toc - tic)) + + def computeIoU(self, imgId, catId): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return [] + inds = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in inds] + if len(dt) > p.maxDets[-1]: + dt = dt[0 : p.maxDets[-1]] + + if p.iouType == "segm": + g = [g["segmentation"] for g in gt] + d = [d["segmentation"] for d in dt] + elif p.iouType == "bbox": + g = [g["bbox"] for g in gt] + d = [d["bbox"] for d in dt] + else: + raise Exception("unknown iouType for iou computation") + + # compute iou between each dt and gt region + iscrowd = [int(o["iscrowd"]) for o in gt] + ious = maskUtils.iou(d, g, iscrowd) + return ious + + def computeOks(self, imgId, catId): + p = self.params + # dimention here should be Nxm + gts = self._gts[imgId, catId] + dts = self._dts[imgId, catId] + inds = np.argsort([-d["score"] for d in dts], kind="mergesort") + dts = [dts[i] for i in inds] + if len(dts) > p.maxDets[-1]: + dts = dts[0 : p.maxDets[-1]] + # if len(gts) == 0 and len(dts) == 0: + if len(gts) == 0 or len(dts) == 0: + return [] + ious = np.zeros((len(dts), len(gts))) + sigmas = ( + np.array( + [ + 0.26, + 0.25, + 0.25, + 0.35, + 0.35, + 0.79, + 0.79, + 0.72, + 0.72, + 0.62, + 0.62, + 1.07, + 1.07, + 0.87, + 0.87, + 0.89, + 0.89, + ] + ) + / 10.0 + ) + vars = (sigmas * 2) ** 2 + k = len(sigmas) + # compute oks between each detection and ground truth object + for j, gt in enumerate(gts): + # create bounds for ignore regions(double the gt bbox) + g = np.array(gt["keypoints"]) + xg = g[0::3] + yg = g[1::3] + vg = g[2::3] + k1 = np.count_nonzero(vg > 0) + bb = gt["bbox"] + x0 = bb[0] - bb[2] + x1 = bb[0] + bb[2] * 2 + y0 = bb[1] - bb[3] + y1 = bb[1] + bb[3] * 2 + for i, dt in enumerate(dts): + d = np.array(dt["keypoints"]) + xd = d[0::3] + yd = d[1::3] + if k1 > 0: + # measure the per-keypoint distance if keypoints visible + dx = xd - xg + dy = yd - yg + else: + # measure minimum distance to keypoints in (x0,y0) & (x1,y1) + z = np.zeros((k)) + dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0) + dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0) + e = (dx ** 2 + dy ** 2) / vars / (gt["area"] + np.spacing(1)) / 2 + if k1 > 0: + e = e[vg > 0] + ious[i, j] = np.sum(np.exp(-e)) / e.shape[0] + return ious + + def computeOgps(self, imgId, catId): + p = self.params + # dimention here should be Nxm + g = self._gts[imgId, catId] + d = self._dts[imgId, catId] + inds = np.argsort([-d_["score"] for d_ in d], kind="mergesort") + d = [d[i] for i in inds] + if len(d) > p.maxDets[-1]: + d = d[0 : p.maxDets[-1]] + # if len(gts) == 0 and len(dts) == 0: + if len(g) == 0 or len(d) == 0: + return [] + ious = np.zeros((len(d), len(g))) + # compute opgs between each detection and ground truth object + # sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5 + # 1 # dist = 0.3m corresponds to ogps = 0.96 + # 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5) + for j, gt in enumerate(g): + if not gt["ignore"]: + g_ = gt["bbox"] + for i, dt in enumerate(d): + # + dy = int(dt["bbox"][3]) + dx = int(dt["bbox"][2]) + dp_x = np.array(gt["dp_x"]) * g_[2] / 255.0 + dp_y = np.array(gt["dp_y"]) * g_[3] / 255.0 + py = (dp_y + g_[1] - dt["bbox"][1]).astype(np.int) + px = (dp_x + g_[0] - dt["bbox"][0]).astype(np.int) + # + pts = np.zeros(len(px)) + pts[px >= dx] = -1 + pts[py >= dy] = -1 + pts[px < 0] = -1 + pts[py < 0] = -1 + if len(pts) < 1: + ogps = 0.0 + elif np.max(pts) == -1: + ogps = 0.0 + else: + px[pts == -1] = 0 + py[pts == -1] = 0 + (densepose_shape, densepose_data_encoded), densepose_bbox_xywh = dt[ + "densepose" + ] + densepose_data = DensePoseResult.decode_png_data( + densepose_shape, densepose_data_encoded + ) + assert densepose_data.shape[2] == dx, ( + "DensePoseData width {} should be equal to " + "detection bounding box width {}".format(densepose_data.shape[2], dx) + ) + assert densepose_data.shape[1] == dy, ( + "DensePoseData height {} should be equal to " + "detection bounding box height {}".format(densepose_data.shape[1], dy) + ) + ipoints = densepose_data[0, py, px] + upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255. + vpoints = densepose_data[2, py, px] / 255.0 + ipoints[pts == -1] = 0 + # Find closest vertices in subsampled mesh. + cVerts, cVertsGT = self.findAllClosestVerts(gt, upoints, vpoints, ipoints) + # Get pairwise geodesic distances between gt and estimated mesh points. + dist = self.getDistances(cVertsGT, cVerts) + # Compute the Ogps measure. + # Find the mean geodesic normalization distance for + # each GT point, based on which part it is on. + Current_Mean_Distances = self.Mean_Distances[ + self.CoarseParts[self.Part_ids[cVertsGT[cVertsGT > 0].astype(int) - 1]] + ] + # Compute gps + ogps_values = np.exp(-(dist ** 2) / (2 * (Current_Mean_Distances ** 2))) + # + if len(dist) > 0: + ogps = np.sum(ogps_values) / len(dist) + ious[i, j] = ogps + + gbb = [gt["bbox"] for gt in g] + dbb = [dt["bbox"] for dt in d] + + # compute iou between each dt and gt region + iscrowd = [int(o["iscrowd"]) for o in g] + ious_bb = maskUtils.iou(dbb, gbb, iscrowd) + return ious, ious_bb + + def evaluateImg(self, imgId, catId, aRng, maxDet): + """ + perform evaluation for single category and image + :return: dict (single image results) + """ + + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return None + + for g in gt: + # g['_ignore'] = g['ignore'] + if g["ignore"] or (g["area"] < aRng[0] or g["area"] > aRng[1]): + g["_ignore"] = True + else: + g["_ignore"] = False + + # sort dt highest score first, sort gt ignore last + gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort") + gt = [gt[i] for i in gtind] + dtind = np.argsort([-d["score"] for d in dt], kind="mergesort") + dt = [dt[i] for i in dtind[0:maxDet]] + iscrowd = [int(o["iscrowd"]) for o in gt] + # load computed ious + if p.iouType == "densepose": + # print('Checking the length', len(self.ious[imgId, catId])) + # if len(self.ious[imgId, catId]) == 0: + # print(self.ious[imgId, catId]) + ious = ( + self.ious[imgId, catId][0][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + ioubs = ( + self.ious[imgId, catId][1][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + else: + ious = ( + self.ious[imgId, catId][:, gtind] + if len(self.ious[imgId, catId]) > 0 + else self.ious[imgId, catId] + ) + + T = len(p.iouThrs) + G = len(gt) + D = len(dt) + gtm = np.zeros((T, G)) + dtm = np.zeros((T, D)) + gtIg = np.array([g["_ignore"] for g in gt]) + dtIg = np.zeros((T, D)) + if np.all(gtIg) and p.iouType == "densepose": + dtIg = np.logical_or(dtIg, True) + + if len(ious) > 0: # and not p.iouType == 'densepose': + for tind, t in enumerate(p.iouThrs): + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + iou = min([t, 1 - 1e-10]) + m = -1 + for gind, _g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind, gind] > 0 and not iscrowd[gind]: + continue + # if dt matched to reg gt, and on ignore gt, stop + if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1: + break + # continue to next gt unless better match made + if ious[dind, gind] < iou: + continue + if ious[dind, gind] == 0.0: + continue + # if match successful and best so far, store appropriately + iou = ious[dind, gind] + m = gind + # if match made store id of match for both dt and gt + if m == -1: + continue + dtIg[tind, dind] = gtIg[m] + dtm[tind, dind] = gt[m]["id"] + gtm[tind, m] = d["id"] + + if p.iouType == "densepose": + if not len(ioubs) == 0: + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + if dtm[tind, dind] == 0: + ioub = 0.8 + m = -1 + for gind, _g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind, gind] > 0 and not iscrowd[gind]: + continue + # continue to next gt unless better match made + if ioubs[dind, gind] < ioub: + continue + # if match successful and best so far, store appropriately + ioub = ioubs[dind, gind] + m = gind + # if match made store id of match for both dt and gt + if m > -1: + dtIg[:, dind] = gtIg[m] + if gtIg[m]: + dtm[tind, dind] = gt[m]["id"] + gtm[tind, m] = d["id"] + # set unmatched detections outside of area range to ignore + a = np.array([d["area"] < aRng[0] or d["area"] > aRng[1] for d in dt]).reshape((1, len(dt))) + dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0))) + # store results for given image and category + # print('Done with the function', len(self.ious[imgId, catId])) + return { + "image_id": imgId, + "category_id": catId, + "aRng": aRng, + "maxDet": maxDet, + "dtIds": [d["id"] for d in dt], + "gtIds": [g["id"] for g in gt], + "dtMatches": dtm, + "gtMatches": gtm, + "dtScores": [d["score"] for d in dt], + "gtIgnore": gtIg, + "dtIgnore": dtIg, + } + + def accumulate(self, p=None): + """ + Accumulate per image evaluation results and store the result in self.eval + :param p: input params for evaluation + :return: None + """ + logger.info("Accumulating evaluation results...") + tic = time.time() + if not self.evalImgs: + logger.info("Please run evaluate() first") + # allows input customized parameters + if p is None: + p = self.params + p.catIds = p.catIds if p.useCats == 1 else [-1] + T = len(p.iouThrs) + R = len(p.recThrs) + K = len(p.catIds) if p.useCats else 1 + A = len(p.areaRng) + M = len(p.maxDets) + precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories + recall = -np.ones((T, K, A, M)) + + # create dictionary for future indexing + logger.info("Categories: {}".format(p.catIds)) + _pe = self._paramsEval + catIds = _pe.catIds if _pe.useCats else [-1] + setK = set(catIds) + setA = set(map(tuple, _pe.areaRng)) + setM = set(_pe.maxDets) + setI = set(_pe.imgIds) + # get inds to evaluate + k_list = [n for n, k in enumerate(p.catIds) if k in setK] + m_list = [m for n, m in enumerate(p.maxDets) if m in setM] + a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] + i_list = [n for n, i in enumerate(p.imgIds) if i in setI] + I0 = len(_pe.imgIds) + A0 = len(_pe.areaRng) + # retrieve E at each category, area range, and max number of detections + for k, k0 in enumerate(k_list): + Nk = k0 * A0 * I0 + for a, a0 in enumerate(a_list): + Na = a0 * I0 + for m, maxDet in enumerate(m_list): + E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [e for e in E if e is not None] + if len(E) == 0: + continue + dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E]) + + # different sorting method generates slightly different results. + # mergesort is used to be consistent as Matlab implementation. + inds = np.argsort(-dtScores, kind="mergesort") + + dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds] + dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds] + gtIg = np.concatenate([e["gtIgnore"] for e in E]) + npig = np.count_nonzero(gtIg == 0) + if npig == 0: + continue + tps = np.logical_and(dtm, np.logical_not(dtIg)) + fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg)) + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float) + for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): + tp = np.array(tp) + fp = np.array(fp) + nd = len(tp) + rc = tp / npig + pr = tp / (fp + tp + np.spacing(1)) + q = np.zeros((R,)) + + if nd: + recall[t, k, a, m] = rc[-1] + else: + recall[t, k, a, m] = 0 + + # numpy is slow without cython optimization for accessing elements + # use python array gets significant speed improvement + pr = pr.tolist() + q = q.tolist() + + for i in range(nd - 1, 0, -1): + if pr[i] > pr[i - 1]: + pr[i - 1] = pr[i] + + inds = np.searchsorted(rc, p.recThrs, side="left") + try: + for ri, pi in enumerate(inds): + q[ri] = pr[pi] + except Exception: + pass + precision[t, :, k, a, m] = np.array(q) + logger.info( + "Final: max precision {}, min precision {}".format(np.max(precision), np.min(precision)) + ) + self.eval = { + "params": p, + "counts": [T, R, K, A, M], + "date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), + "precision": precision, + "recall": recall, + } + toc = time.time() + logger.info("DONE (t={:0.2f}s).".format(toc - tic)) + + def summarize(self): + """ + Compute and display summary metrics for evaluation results. + Note this functin can *only* be applied on the default parameter setting + """ + + def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): + p = self.params + iStr = " {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" + titleStr = "Average Precision" if ap == 1 else "Average Recall" + typeStr = "(AP)" if ap == 1 else "(AR)" + measure = "IoU" + if self.params.iouType == "keypoints": + measure = "OKS" + elif self.params.iouType == "densepose": + measure = "OGPS" + iouStr = ( + "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) + if iouThr is None + else "{:0.2f}".format(iouThr) + ) + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval["precision"] + # IoU + if iouThr is not None: + t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval["recall"] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + logger.info(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s)) + return mean_s + + def _summarizeDets(): + stats = np.zeros((12,)) + stats[0] = _summarize(1) + stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) + return stats + + def _summarizeKps(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=0.5) + stats[2] = _summarize(1, maxDets=20, iouThr=0.75) + stats[3] = _summarize(1, maxDets=20, areaRng="medium") + stats[4] = _summarize(1, maxDets=20, areaRng="large") + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=0.5) + stats[7] = _summarize(0, maxDets=20, iouThr=0.75) + stats[8] = _summarize(0, maxDets=20, areaRng="medium") + stats[9] = _summarize(0, maxDets=20, areaRng="large") + return stats + + def _summarizeUvs(): + stats = np.zeros((10,)) + stats[0] = _summarize(1, maxDets=self.params.maxDets[0]) + stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[3] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium") + stats[4] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large") + stats[5] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[7] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[8] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium") + stats[9] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large") + return stats + + def _summarizeUvsOld(): + stats = np.zeros((18,)) + stats[0] = _summarize(1, maxDets=self.params.maxDets[0]) + stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.55) + stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.60) + stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.65) + stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.70) + stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.80) + stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.85) + stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.90) + stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.95) + stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium") + stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large") + stats[13] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5) + stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75) + stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium") + stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large") + return stats + + if not self.eval: + raise Exception("Please run accumulate() first") + iouType = self.params.iouType + if iouType in ["segm", "bbox"]: + summarize = _summarizeDets + elif iouType in ["keypoints"]: + summarize = _summarizeKps + elif iouType in ["densepose"]: + summarize = _summarizeUvs + self.stats = summarize() + + def __str__(self): + self.summarize() + + # ================ functions for dense pose ============================== + def findAllClosestVerts(self, gt, U_points, V_points, Index_points): + # + I_gt = np.array(gt["dp_I"]) + U_gt = np.array(gt["dp_U"]) + V_gt = np.array(gt["dp_V"]) + # + # print(I_gt) + # + ClosestVerts = np.ones(Index_points.shape) * -1 + for i in np.arange(24): + # + if sum(Index_points == (i + 1)) > 0: + UVs = np.array( + [U_points[Index_points == (i + 1)], V_points[Index_points == (i + 1)]] + ) + Current_Part_UVs = self.Part_UVs[i] + Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i] + D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze() + ClosestVerts[Index_points == (i + 1)] = Current_Part_ClosestVertInds[ + np.argmin(D, axis=0) + ] + # + ClosestVertsGT = np.ones(Index_points.shape) * -1 + for i in np.arange(24): + if sum(I_gt == (i + 1)) > 0: + UVs = np.array([U_gt[I_gt == (i + 1)], V_gt[I_gt == (i + 1)]]) + Current_Part_UVs = self.Part_UVs[i] + Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i] + D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze() + ClosestVertsGT[I_gt == (i + 1)] = Current_Part_ClosestVertInds[np.argmin(D, axis=0)] + # + return ClosestVerts, ClosestVertsGT + + def getDistances(self, cVertsGT, cVerts): + + ClosestVertsTransformed = self.PDIST_transform[cVerts.astype(int) - 1] + ClosestVertsGTTransformed = self.PDIST_transform[cVertsGT.astype(int) - 1] + # + ClosestVertsTransformed[cVerts < 0] = 0 + ClosestVertsGTTransformed[cVertsGT < 0] = 0 + # + cVertsGT = ClosestVertsGTTransformed + cVerts = ClosestVertsTransformed + # + n = 27554 + dists = [] + for d in range(len(cVertsGT)): + if cVertsGT[d] > 0: + if cVerts[d] > 0: + i = cVertsGT[d] - 1 + j = cVerts[d] - 1 + if j == i: + dists.append(0) + elif j > i: + ccc = i + i = j + j = ccc + i = n - i - 1 + j = n - j - 1 + k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1 + k = (n * n - n) / 2 - k - 1 + dists.append(self.Pdist_matrix[int(k)][0]) + else: + i = n - i - 1 + j = n - j - 1 + k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1 + k = (n * n - n) / 2 - k - 1 + dists.append(self.Pdist_matrix[int(k)][0]) + else: + dists.append(np.inf) + return np.array(dists).squeeze() + + +class Params: + """ + Params for coco evaluation api + """ + + def setDetParams(self): + self.imgIds = [] + self.catIds = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True) + self.maxDets = [1, 10, 100] + self.areaRng = [ + [0 ** 2, 1e5 ** 2], + [0 ** 2, 32 ** 2], + [32 ** 2, 96 ** 2], + [96 ** 2, 1e5 ** 2], + ] + self.areaRngLbl = ["all", "small", "medium", "large"] + self.useCats = 1 + + def setKpParams(self): + self.imgIds = [] + self.catIds = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True) + self.maxDets = [20] + self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] + self.areaRngLbl = ["all", "medium", "large"] + self.useCats = 1 + + def setUvParams(self): + self.imgIds = [] + self.catIds = [] + self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) + self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) + self.maxDets = [20] + self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] + self.areaRngLbl = ["all", "medium", "large"] + self.useCats = 1 + + def __init__(self, iouType="segm"): + if iouType == "segm" or iouType == "bbox": + self.setDetParams() + elif iouType == "keypoints": + self.setKpParams() + elif iouType == "densepose": + self.setUvParams() + else: + raise Exception("iouType not supported") + self.iouType = iouType + # useSegm is deprecated + self.useSegm = None diff --git a/projects/DensePose/densepose/densepose_head.py b/projects/DensePose/densepose/densepose_head.py new file mode 100644 index 0000000000..8cb5bf6f12 --- /dev/null +++ b/projects/DensePose/densepose/densepose_head.py @@ -0,0 +1,626 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch +from torch import nn +from torch.nn import functional as F + +from detectron2.layers import Conv2d, ConvTranspose2d, interpolate +from detectron2.structures.boxes import matched_boxlist_iou +from detectron2.utils.registry import Registry + +from .structures import DensePoseOutput + +ROI_DENSEPOSE_HEAD_REGISTRY = Registry("ROI_DENSEPOSE_HEAD") + + +def initialize_module_params(module): + for name, param in module.named_parameters(): + if "bias" in name: + nn.init.constant_(param, 0) + elif "weight" in name: + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + + +@ROI_DENSEPOSE_HEAD_REGISTRY.register() +class DensePoseV1ConvXHead(nn.Module): + def __init__(self, cfg, input_channels): + super(DensePoseV1ConvXHead, self).__init__() + # fmt: off + hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL + self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS + # fmt: on + pad_size = kernel_size // 2 + n_channels = input_channels + for i in range(self.n_stacked_convs): + layer = Conv2d(n_channels, hidden_dim, kernel_size, stride=1, padding=pad_size) + layer_name = self._get_layer_name(i) + self.add_module(layer_name, layer) + n_channels = hidden_dim + self.n_out_channels = n_channels + initialize_module_params(self) + + def forward(self, features): + x = features + output = x + for i in range(self.n_stacked_convs): + layer_name = self._get_layer_name(i) + x = getattr(self, layer_name)(x) + x = F.relu(x) + output = x + return output + + def _get_layer_name(self, i): + layer_name = "body_conv_fcn{}".format(i + 1) + return layer_name + + +class DensePosePredictor(nn.Module): + + NUM_ANN_INDICES = 15 + + def __init__(self, cfg, input_channels): + super(DensePosePredictor, self).__init__() + dim_in = input_channels + dim_out_ann_index = self.NUM_ANN_INDICES + dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1 + kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL + self.ann_index_lowres = ConvTranspose2d( + dim_in, dim_out_ann_index, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.index_uv_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.u_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.v_lowres = ConvTranspose2d( + dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1) + ) + self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE + initialize_module_params(self) + + def forward(self, head_outputs): + ann_index_lowres = self.ann_index_lowres(head_outputs) + index_uv_lowres = self.index_uv_lowres(head_outputs) + u_lowres = self.u_lowres(head_outputs) + v_lowres = self.v_lowres(head_outputs) + + def interp2d(input): + return interpolate( + input, scale_factor=self.scale_factor, mode="bilinear", align_corners=False + ) + + ann_index = interp2d(ann_index_lowres) + index_uv = interp2d(index_uv_lowres) + u = interp2d(u_lowres) + v = interp2d(v_lowres) + return (ann_index, index_uv, u, v), (ann_index_lowres, index_uv_lowres, u_lowres, v_lowres) + + +class DensePoseDataFilter(object): + def __init__(self, cfg): + self.iou_threshold = cfg.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD + + @torch.no_grad() + def __call__(self, proposals_with_targets): + """ + Filters proposals with targets to keep only the ones relevant for + DensePose training + proposals: list(Instances), each element of the list corresponds to + various instances (proposals, GT for boxes and densepose) for one + image + """ + proposals_filtered = [] + for proposals_per_image in proposals_with_targets: + if not hasattr(proposals_per_image, "gt_densepose"): + continue + assert hasattr(proposals_per_image, "gt_boxes") + assert hasattr(proposals_per_image, "proposal_boxes") + gt_boxes = proposals_per_image.gt_boxes + est_boxes = proposals_per_image.proposal_boxes + # apply match threshold for densepose head + iou = matched_boxlist_iou(gt_boxes, est_boxes) + iou_select = iou > self.iou_threshold + proposals_per_image = proposals_per_image[iou_select] + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.proposal_boxes) + # filter out any target without densepose annotation + gt_densepose = proposals_per_image.gt_densepose + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.gt_densepose) + selected_indices = [ + i for i, dp_target in enumerate(gt_densepose) if dp_target is not None + ] + if len(selected_indices) != len(gt_densepose): + proposals_per_image = proposals_per_image[selected_indices] + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.proposal_boxes) + assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.gt_densepose) + proposals_filtered.append(proposals_per_image) + return proposals_filtered + + +def build_densepose_head(cfg, input_channels): + head_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.NAME + return ROI_DENSEPOSE_HEAD_REGISTRY.get(head_name)(cfg, input_channels) + + +def build_densepose_predictor(cfg, input_channels): + predictor = DensePosePredictor(cfg, input_channels) + return predictor + + +def build_densepose_data_filter(cfg): + dp_filter = DensePoseDataFilter(cfg) + return dp_filter + + +def densepose_inference(densepose_outputs, detections): + """ + Infer dense pose estimate based on outputs from the DensePose head + and detections. The estimate for each detection instance is stored in its + "pred_densepose" attribute. + + Args: + densepose_outputs (tuple(`torch.Tensor`)): iterable containing 4 elements: + - s (:obj: `torch.Tensor`): segmentation tensor of size (N, A, H, W), + - i (:obj: `torch.Tensor`): classification tensor of size (N, C, H, W), + - u (:obj: `torch.Tensor`): U coordinates for each class of size (N, C, H, W), + - v (:obj: `torch.Tensor`): V coordinates for each class of size (N, C, H, W), + where N is the total number of detections in a batch, + A is the number of segmentations classes (e.g. 15 for coarse body parts), + C is the number of labels (e.g. 25 for fine body parts), + W is the resolution along the X axis + H is the resolution along the Y axis + detections (list[Instances]): A list of N Instances, where N is the number of images + in the batch. Instances are modified by this method: "pred_densepose" attribute + is added to each instance, the attribute contains the corresponding + DensePoseOutput object. + """ + + # DensePose outputs: segmentation, body part indices, U, V + s, index_uv, u, v = densepose_outputs + k = 0 + for detection in detections: + n_i = len(detection) + s_i = s[k : k + n_i] + index_uv_i = index_uv[k : k + n_i] + u_i = u[k : k + n_i] + v_i = v[k : k + n_i] + densepose_output_i = DensePoseOutput(s_i, index_uv_i, u_i, v_i) + detection.pred_densepose = densepose_output_i + k += n_i + + +def _linear_interpolation_utilities(v_norm, v0_src, size_src, v0_dst, size_dst, size_z): + """ + Computes utility values for linear interpolation at points v. + The points are given as normalized offsets in the source interval + (v0_src, v0_src + size_src), more precisely: + v = v0_src + v_norm * size_src / 256.0 + The computed utilities include lower points v_lo, upper points v_hi, + interpolation weights v_w and flags j_valid indicating whether the + points falls into the destination interval (v0_dst, v0_dst + size_dst). + + Args: + v_norm (:obj: `torch.Tensor`): tensor of size N containing + normalized point offsets + v0_src (:obj: `torch.Tensor`): tensor of size N containing + left bounds of source intervals for normalized points + size_src (:obj: `torch.Tensor`): tensor of size N containing + source interval sizes for normalized points + v0_dst (:obj: `torch.Tensor`): tensor of size N containing + left bounds of destination intervals + size_dst (:obj: `torch.Tensor`): tensor of size N containing + destination interval sizes + size_z (int): interval size for data to be interpolated + + Returns: + v_lo (:obj: `torch.Tensor`): int tensor of size N containing + indices of lower values used for interpolation, all values are + integers from [0, size_z - 1] + v_hi (:obj: `torch.Tensor`): int tensor of size N containing + indices of upper values used for interpolation, all values are + integers from [0, size_z - 1] + v_w (:obj: `torch.Tensor`): float tensor of size N containing + interpolation weights + j_valid (:obj: `torch.Tensor`): uint8 tensor of size N containing + 0 for points outside the estimation interval + (v0_est, v0_est + size_est) and 1 otherwise + """ + v = v0_src + v_norm * size_src / 256.0 + j_valid = (v - v0_dst >= 0) * (v - v0_dst < size_dst) + v_grid = (v - v0_dst) * size_z / size_dst + v_lo = v_grid.floor().long().clamp(min=0, max=size_z - 1) + v_hi = (v_lo + 1).clamp(max=size_z - 1) + v_grid = torch.min(v_hi.float(), v_grid) + v_w = v_grid - v_lo.float() + return v_lo, v_hi, v_w, j_valid + + +def _grid_sampling_utilities( + zh, zw, bbox_xywh_est, bbox_xywh_gt, index_gt, x_norm, y_norm, index_bbox +): + """ + Prepare tensors used in grid sampling. + + Args: + z_est (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with estimated + values of Z to be extracted for the points X, Y and channel + indices I + bbox_xywh_est (:obj: `torch.Tensor`): tensor of size (N, 4) containing + estimated bounding boxes in format XYWH + bbox_xywh_gt (:obj: `torch.Tensor`): tensor of size (N, 4) containing + matched ground truth bounding boxes in format XYWH + index_gt (:obj: `torch.Tensor`): tensor of size K with point labels for + ground truth points + x_norm (:obj: `torch.Tensor`): tensor of size K with X normalized + coordinates of ground truth points. Image X coordinates can be + obtained as X = Xbbox + x_norm * Wbbox / 255 + y_norm (:obj: `torch.Tensor`): tensor of size K with Y normalized + coordinates of ground truth points. Image Y coordinates can be + obtained as Y = Ybbox + y_norm * Hbbox / 255 + index_bbox (:obj: `torch.Tensor`): tensor of size K with bounding box + indices for each ground truth point. The values are thus in + [0, N-1] + + Returns: + j_valid (:obj: `torch.Tensor`): uint8 tensor of size M containing + 0 for points to be discarded and 1 for points to be selected + y_lo (:obj: `torch.Tensor`): int tensor of indices of upper values + in z_est for each point + y_hi (:obj: `torch.Tensor`): int tensor of indices of lower values + in z_est for each point + x_lo (:obj: `torch.Tensor`): int tensor of indices of left values + in z_est for each point + x_hi (:obj: `torch.Tensor`): int tensor of indices of right values + in z_est for each point + w_ylo_xlo (:obj: `torch.Tensor`): float tensor of size M; + contains upper-left value weight for each point + w_ylo_xhi (:obj: `torch.Tensor`): float tensor of size M; + contains upper-right value weight for each point + w_yhi_xlo (:obj: `torch.Tensor`): float tensor of size M; + contains lower-left value weight for each point + w_yhi_xhi (:obj: `torch.Tensor`): float tensor of size M; + contains lower-right value weight for each point + """ + + x0_gt, y0_gt, w_gt, h_gt = bbox_xywh_gt[index_bbox].unbind(dim=1) + x0_est, y0_est, w_est, h_est = bbox_xywh_est[index_bbox].unbind(dim=1) + x_lo, x_hi, x_w, jx_valid = _linear_interpolation_utilities( + x_norm, x0_gt, w_gt, x0_est, w_est, zw + ) + y_lo, y_hi, y_w, jy_valid = _linear_interpolation_utilities( + y_norm, y0_gt, h_gt, y0_est, h_est, zh + ) + j_valid = jx_valid * jy_valid + + w_ylo_xlo = (1.0 - x_w) * (1.0 - y_w) + w_ylo_xhi = x_w * (1.0 - y_w) + w_yhi_xlo = (1.0 - x_w) * y_w + w_yhi_xhi = x_w * y_w + + return j_valid, y_lo, y_hi, x_lo, x_hi, w_ylo_xlo, w_ylo_xhi, w_yhi_xlo, w_yhi_xhi + + +def _extract_at_points_packed( + z_est, + index_bbox_valid, + slice_index_uv, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, +): + """ + Extract ground truth values z_gt for valid point indices and estimated + values z_est using bilinear interpolation over top-left (y_lo, x_lo), + top-right (y_lo, x_hi), bottom-left (y_hi, x_lo) and bottom-right + (y_hi, x_hi) values in z_est with corresponding weights: + w_ylo_xlo, w_ylo_xhi, w_yhi_xlo and w_yhi_xhi. + Use slice_index_uv to slice dim=1 in z_est + """ + z_est_sampled = ( + z_est[index_bbox_valid, slice_index_uv, y_lo, x_lo] * w_ylo_xlo + + z_est[index_bbox_valid, slice_index_uv, y_lo, x_hi] * w_ylo_xhi + + z_est[index_bbox_valid, slice_index_uv, y_hi, x_lo] * w_yhi_xlo + + z_est[index_bbox_valid, slice_index_uv, y_hi, x_hi] * w_yhi_xhi + ) + return z_est_sampled + + +def _resample_data( + z, bbox_xywh_src, bbox_xywh_dst, wout, hout, mode="nearest", padding_mode="zeros" +): + """ + Args: + z (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with data to be + resampled + bbox_xywh_src (:obj: `torch.Tensor`): tensor of size (N,4) containing + source bounding boxes in format XYWH + bbox_xywh_dst (:obj: `torch.Tensor`): tensor of size (N,4) containing + destination bounding boxes in format XYWH + Return: + zresampled (:obj: `torch.Tensor`): tensor of size (N, C, Hout, Wout) + with resampled values of z, where D is the discretization size + """ + n = bbox_xywh_src.size(0) + assert n == bbox_xywh_dst.size(0), ( + "The number of " + "source ROIs for resampling ({}) should be equal to the number " + "of destination ROIs ({})".format(bbox_xywh_src.size(0), bbox_xywh_dst.size(0)) + ) + x0src, y0src, wsrc, hsrc = bbox_xywh_src.unbind(dim=1) + x0dst, y0dst, wdst, hdst = bbox_xywh_dst.unbind(dim=1) + x0dst_norm = 2 * (x0dst - x0src) / wsrc - 1 + y0dst_norm = 2 * (y0dst - y0src) / hsrc - 1 + x1dst_norm = 2 * (x0dst + wdst - x0src) / wsrc - 1 + y1dst_norm = 2 * (y0dst + hdst - y0src) / hsrc - 1 + grid_w = torch.arange(wout, device=z.device, dtype=torch.float) / wout + grid_h = torch.arange(hout, device=z.device, dtype=torch.float) / hout + grid_w_expanded = grid_w[None, None, :].expand(n, hout, wout) + grid_h_expanded = grid_h[None, :, None].expand(n, hout, wout) + dx_expanded = (x1dst_norm - x0dst_norm)[:, None, None].expand(n, hout, wout) + dy_expanded = (y1dst_norm - y0dst_norm)[:, None, None].expand(n, hout, wout) + x0_expanded = x0dst_norm[:, None, None].expand(n, hout, wout) + y0_expanded = y0dst_norm[:, None, None].expand(n, hout, wout) + grid_x = grid_w_expanded * dx_expanded + x0_expanded + grid_y = grid_h_expanded * dy_expanded + y0_expanded + grid = torch.stack((grid_x, grid_y), dim=3) + # resample Z from (N, C, H, W) into (N, C, Hout, Wout) + zresampled = F.grid_sample(z, grid, mode=mode, padding_mode=padding_mode, align_corners=True) + return zresampled + + +def _extract_single_tensors_from_matches_one_image( + proposals_targets, bbox_with_dp_offset, bbox_global_offset +): + i_gt_all = [] + x_norm_all = [] + y_norm_all = [] + u_gt_all = [] + v_gt_all = [] + s_gt_all = [] + bbox_xywh_gt_all = [] + bbox_xywh_est_all = [] + # Ibbox_all == k should be true for all data that corresponds + # to bbox_xywh_gt[k] and bbox_xywh_est[k] + # index k here is global wrt images + i_bbox_all = [] + # at offset k (k is global) contains index of bounding box data + # within densepose output tensor + i_with_dp = [] + + boxes_xywh_est = proposals_targets.proposal_boxes.clone() + boxes_xywh_gt = proposals_targets.gt_boxes.clone() + n_i = len(boxes_xywh_est) + assert n_i == len(boxes_xywh_gt) + + if n_i: + boxes_xywh_est.tensor[:, 2] -= boxes_xywh_est.tensor[:, 0] + boxes_xywh_est.tensor[:, 3] -= boxes_xywh_est.tensor[:, 1] + boxes_xywh_gt.tensor[:, 2] -= boxes_xywh_gt.tensor[:, 0] + boxes_xywh_gt.tensor[:, 3] -= boxes_xywh_gt.tensor[:, 1] + if hasattr(proposals_targets, "gt_densepose"): + densepose_gt = proposals_targets.gt_densepose + for k, box_xywh_est, box_xywh_gt, dp_gt in zip( + range(n_i), boxes_xywh_est.tensor, boxes_xywh_gt.tensor, densepose_gt + ): + if (dp_gt is not None) and (len(dp_gt.x) > 0): + i_gt_all.append(dp_gt.i) + x_norm_all.append(dp_gt.x) + y_norm_all.append(dp_gt.y) + u_gt_all.append(dp_gt.u) + v_gt_all.append(dp_gt.v) + s_gt_all.append(dp_gt.segm.unsqueeze(0)) + bbox_xywh_gt_all.append(box_xywh_gt.view(-1, 4)) + bbox_xywh_est_all.append(box_xywh_est.view(-1, 4)) + i_bbox_k = torch.full_like(dp_gt.i, bbox_with_dp_offset + len(i_with_dp)) + i_bbox_all.append(i_bbox_k) + i_with_dp.append(bbox_global_offset + k) + return ( + i_gt_all, + x_norm_all, + y_norm_all, + u_gt_all, + v_gt_all, + s_gt_all, + bbox_xywh_gt_all, + bbox_xywh_est_all, + i_bbox_all, + i_with_dp, + ) + + +def _extract_single_tensors_from_matches(proposals_with_targets): + i_img = [] + i_gt_all = [] + x_norm_all = [] + y_norm_all = [] + u_gt_all = [] + v_gt_all = [] + s_gt_all = [] + bbox_xywh_gt_all = [] + bbox_xywh_est_all = [] + i_bbox_all = [] + i_with_dp_all = [] + n = 0 + for i, proposals_targets_per_image in enumerate(proposals_with_targets): + n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0) + if not n_i: + continue + i_gt_img, x_norm_img, y_norm_img, u_gt_img, v_gt_img, s_gt_img, bbox_xywh_gt_img, bbox_xywh_est_img, i_bbox_img, i_with_dp_img = _extract_single_tensors_from_matches_one_image( # noqa + proposals_targets_per_image, len(i_with_dp_all), n + ) + i_gt_all.extend(i_gt_img) + x_norm_all.extend(x_norm_img) + y_norm_all.extend(y_norm_img) + u_gt_all.extend(u_gt_img) + v_gt_all.extend(v_gt_img) + s_gt_all.extend(s_gt_img) + bbox_xywh_gt_all.extend(bbox_xywh_gt_img) + bbox_xywh_est_all.extend(bbox_xywh_est_img) + i_bbox_all.extend(i_bbox_img) + i_with_dp_all.extend(i_with_dp_img) + i_img.extend([i] * len(i_with_dp_img)) + n += n_i + # concatenate all data into a single tensor + if (n > 0) and (len(i_with_dp_all) > 0): + i_gt = torch.cat(i_gt_all, 0).long() + x_norm = torch.cat(x_norm_all, 0) + y_norm = torch.cat(y_norm_all, 0) + u_gt = torch.cat(u_gt_all, 0) + v_gt = torch.cat(v_gt_all, 0) + s_gt = torch.cat(s_gt_all, 0) + bbox_xywh_gt = torch.cat(bbox_xywh_gt_all, 0) + bbox_xywh_est = torch.cat(bbox_xywh_est_all, 0) + i_bbox = torch.cat(i_bbox_all, 0).long() + else: + i_gt = None + x_norm = None + y_norm = None + u_gt = None + v_gt = None + s_gt = None + bbox_xywh_gt = None + bbox_xywh_est = None + i_bbox = None + return ( + i_img, + i_with_dp_all, + bbox_xywh_est, + bbox_xywh_gt, + i_gt, + x_norm, + y_norm, + u_gt, + v_gt, + s_gt, + i_bbox, + ) + + +class DensePoseLosses(object): + def __init__(self, cfg): + # fmt: off + self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE + self.w_points = cfg.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS + self.w_part = cfg.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS + self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS + # fmt: on + + def __call__(self, proposals_with_gt, densepose_outputs): + losses = {} + # densepose outputs are computed for all images and all bounding boxes; + # i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively, + # the outputs will have size(0) == 3+1+2+1 == 7 + s, index_uv, u, v = densepose_outputs + assert u.size(2) == v.size(2) + assert u.size(3) == v.size(3) + assert u.size(2) == index_uv.size(2) + assert u.size(3) == index_uv.size(3) + + with torch.no_grad(): + index_uv_img, i_with_dp, bbox_xywh_est, bbox_xywh_gt, index_gt_all, x_norm, y_norm, u_gt_all, v_gt_all, s_gt, index_bbox = _extract_single_tensors_from_matches( # noqa + proposals_with_gt + ) + n_batch = len(i_with_dp) + + # NOTE: we need to keep the same computation graph on all the GPUs to + # perform reduction properly. Hence even if we have no data on one + # of the GPUs, we still need to generate the computation graph. + # Add fake (zero) loss in the form Tensor.sum() * 0 + if not n_batch: + losses["loss_densepose_U"] = u.sum() * 0 + losses["loss_densepose_V"] = v.sum() * 0 + losses["loss_densepose_I"] = index_uv.sum() * 0 + losses["loss_densepose_S"] = s.sum() * 0 + return losses + + zh = u.size(2) + zw = u.size(3) + + j_valid, y_lo, y_hi, x_lo, x_hi, w_ylo_xlo, w_ylo_xhi, w_yhi_xlo, w_yhi_xhi = _grid_sampling_utilities( # noqa + zh, zw, bbox_xywh_est, bbox_xywh_gt, index_gt_all, x_norm, y_norm, index_bbox + ) + + j_valid_fg = j_valid * (index_gt_all > 0) + + u_gt = u_gt_all[j_valid_fg] + u_est_all = _extract_at_points_packed( + u[i_with_dp], + index_bbox, + index_gt_all, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, + ) + u_est = u_est_all[j_valid_fg] + + v_gt = v_gt_all[j_valid_fg] + v_est_all = _extract_at_points_packed( + v[i_with_dp], + index_bbox, + index_gt_all, + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo, + w_ylo_xhi, + w_yhi_xlo, + w_yhi_xhi, + ) + v_est = v_est_all[j_valid_fg] + + index_uv_gt = index_gt_all[j_valid] + index_uv_est_all = _extract_at_points_packed( + index_uv[i_with_dp], + index_bbox, + slice(None), + y_lo, + y_hi, + x_lo, + x_hi, + w_ylo_xlo[:, None], + w_ylo_xhi[:, None], + w_yhi_xlo[:, None], + w_yhi_xhi[:, None], + ) + index_uv_est = index_uv_est_all[j_valid, :] + + # Resample everything to the estimated data size, no need to resample + # S_est then: + s_est = s[i_with_dp] + with torch.no_grad(): + s_gt = _resample_data( + s_gt.unsqueeze(1), + bbox_xywh_gt, + bbox_xywh_est, + self.heatmap_size, + self.heatmap_size, + mode="nearest", + padding_mode="zeros", + ).squeeze(1) + + # add point-based losses: + u_loss = F.smooth_l1_loss(u_est, u_gt, reduction="sum") * self.w_points + losses["loss_densepose_U"] = u_loss + v_loss = F.smooth_l1_loss(v_est, v_gt, reduction="sum") * self.w_points + losses["loss_densepose_V"] = v_loss + index_uv_loss = F.cross_entropy(index_uv_est, index_uv_gt.long()) * self.w_part + losses["loss_densepose_I"] = index_uv_loss + s_loss = F.cross_entropy(s_est, s_gt.long()) * self.w_segm + losses["loss_densepose_S"] = s_loss + return losses + + +def build_densepose_losses(cfg): + losses = DensePoseLosses(cfg) + return losses diff --git a/projects/DensePose/densepose/evaluator.py b/projects/DensePose/densepose/evaluator.py new file mode 100644 index 0000000000..725f8f98a5 --- /dev/null +++ b/projects/DensePose/densepose/evaluator.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# File: + +import contextlib +import copy +import io +import itertools +import json +import logging +import os +from collections import OrderedDict +import torch +from pycocotools import coco +from pycocotools.coco import COCO + +from detectron2.data import MetadataCatalog +from detectron2.evaluation import DatasetEvaluator +from detectron2.structures import BoxMode +from detectron2.utils.comm import all_gather, is_main_process, synchronize +from detectron2.utils.logger import create_small_table + +from .densepose_coco_evaluation import DensePoseCocoEval + +coco.unicode = str + + +class DensePoseCOCOEvaluator(DatasetEvaluator): + def __init__(self, dataset_name, distributed, output_dir=None): + self._distributed = distributed + self._output_dir = output_dir + + self._cpu_device = torch.device("cpu") + self._logger = logging.getLogger(__name__) + + self._metadata = MetadataCatalog.get(dataset_name) + with contextlib.redirect_stdout(io.StringIO()): + self._coco_api = COCO(self._metadata.json_file) + + def reset(self): + self._predictions = [] + + def process(self, inputs, outputs): + """ + Args: + inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). + It is a list of dict. Each dict corresponds to an image and + contains keys like "height", "width", "file_name", "image_id". + outputs: the outputs of a COCO model. It is a list of dicts with key + "instances" that contains :class:`Instances`. + The :class:`Instances` object needs to have `densepose` field. + """ + for input, output in zip(inputs, outputs): + instances = output["instances"].to(self._cpu_device) + + boxes = instances.pred_boxes.tensor.clone() + boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + instances.pred_densepose = instances.pred_densepose.to_result(boxes) + + json_results = prediction_to_json(instances, input["image_id"]) + self._predictions.extend(json_results) + + def evaluate(self): + if self._distributed: + synchronize() + self._predictions = all_gather(self._predictions) + self._predictions = list(itertools.chain(*self._predictions)) + if not is_main_process(): + return + + return copy.deepcopy(self._eval_predictions()) + + def _eval_predictions(self): + """ + Evaluate self._predictions on densepose. + Return results with the metrics of the tasks. + """ + self._logger.info("Preparing results for COCO format ...") + + if self._output_dir: + file_path = os.path.join(self._output_dir, "coco_densepose_results.json") + with open(file_path, "w") as f: + json.dump(self._predictions, f) + f.flush() + os.fsync(f.fileno()) + + self._logger.info("Evaluating predictions ...") + res = OrderedDict() + res["densepose"] = _evaluate_predictions_on_coco(self._coco_api, self._predictions) + return res + + +def prediction_to_json(instances, img_id): + """ + Args: + instances (Instances): the output of the model + img_id (str): the image id in COCO + + Returns: + list[dict]: the results in densepose evaluation format + """ + scores = instances.scores.tolist() + + results = [] + for k in range(len(instances)): + densepose = instances.pred_densepose[k] + result = { + "image_id": img_id, + "category_id": 1, # densepose only has one class + "bbox": densepose[1], + "score": scores[k], + "densepose": densepose, + } + results.append(result) + return results + + +def _evaluate_predictions_on_coco(coco_gt, coco_results): + metrics = ["AP", "AP50", "AP75", "APm", "APl"] + + logger = logging.getLogger(__name__) + + if len(coco_results) == 0: # cocoapi does not handle empty results very well + logger.warn("No predictions from the model! Set scores to -1") + return {metric: -1 for metric in metrics} + + coco_dt = coco_gt.loadRes(coco_results) + coco_eval = DensePoseCocoEval(coco_gt, coco_dt, "densepose") + coco_eval.evaluate() + coco_eval.accumulate() + coco_eval.summarize() + + # the standard metrics + results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)} + logger.info("Evaluation results for densepose: \n" + create_small_table(results)) + return results diff --git a/projects/DensePose/densepose/roi_head.py b/projects/DensePose/densepose/roi_head.py new file mode 100644 index 0000000000..56a92c7c0c --- /dev/null +++ b/projects/DensePose/densepose/roi_head.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# File: + +import torch + +from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads +from detectron2.modeling.poolers import ROIPooler + +# Is this function good to expose as an API? +from detectron2.modeling.roi_heads.roi_heads import select_foreground_proposals + +from .densepose_head import ( + build_densepose_data_filter, + build_densepose_head, + build_densepose_losses, + build_densepose_predictor, + densepose_inference, +) + + +@ROI_HEADS_REGISTRY.register() +class DensePoseROIHeads(StandardROIHeads): + """ + A Standard ROIHeads which contains an addition of DensePose head. + """ + + def __init__(self, cfg, input_shape): + super().__init__(cfg, input_shape) + self._init_densepose_head(cfg) + + def _init_densepose_head(self, cfg): + # fmt: off + self.densepose_on = cfg.MODEL.DENSEPOSE_ON + if not self.densepose_on: + return + self.densepose_data_filter = build_densepose_data_filter(cfg) + dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION + dp_pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features) + dp_pooler_sampling_ratio = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO + dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE + # fmt: on + in_channels = [self.feature_channels[f] for f in self.in_features][0] + self.densepose_pooler = ROIPooler( + output_size=dp_pooler_resolution, + scales=dp_pooler_scales, + sampling_ratio=dp_pooler_sampling_ratio, + pooler_type=dp_pooler_type, + ) + self.densepose_head = build_densepose_head(cfg, in_channels) + self.densepose_predictor = build_densepose_predictor( + cfg, self.densepose_head.n_out_channels + ) + self.densepose_losses = build_densepose_losses(cfg) + + def _forward_densepose(self, features, instances): + """ + Forward logic of the densepose prediction branch. + + Args: + features (list[Tensor]): #level input features for densepose prediction + instances (list[Instances]): the per-image instances to train/predict densepose. + In training, they can be the proposals. + In inference, they can be the predicted boxes. + + Returns: + In training, a dict of losses. + In inference, update `instances` with new fields "densepose" and return it. + """ + if not self.densepose_on: + return {} if self.training else instances + + if self.training: + proposals, _ = select_foreground_proposals(instances, self.num_classes) + proposals_dp = self.densepose_data_filter(proposals) + if len(proposals_dp) > 0: + proposal_boxes = [x.proposal_boxes for x in proposals_dp] + features_dp = self.densepose_pooler(features, proposal_boxes) + densepose_head_outputs = self.densepose_head(features_dp) + densepose_outputs, _ = self.densepose_predictor(densepose_head_outputs) + densepose_loss_dict = self.densepose_losses(proposals_dp, densepose_outputs) + return densepose_loss_dict + else: + pred_boxes = [x.pred_boxes for x in instances] + features_dp = self.densepose_pooler(features, pred_boxes) + if len(features_dp) > 0: + densepose_head_outputs = self.densepose_head(features_dp) + densepose_outputs, _ = self.densepose_predictor(densepose_head_outputs) + else: + # If no detection occured instances + # set densepose_outputs to empty tensors + empty_tensor = torch.zeros(size=(0, 0, 0, 0), device=features_dp.device) + densepose_outputs = tuple([empty_tensor] * 4) + + densepose_inference(densepose_outputs, instances) + return instances + + def forward(self, images, features, proposals, targets=None): + features_list = [features[f] for f in self.in_features] + + instances, losses = super().forward(images, features, proposals, targets) + del targets, images + + if self.training: + losses.update(self._forward_densepose(features_list, instances)) + else: + instances = self._forward_densepose(features_list, instances) + return instances, losses diff --git a/projects/DensePose/densepose/structures.py b/projects/DensePose/densepose/structures.py new file mode 100644 index 0000000000..9d3465ba1f --- /dev/null +++ b/projects/DensePose/densepose/structures.py @@ -0,0 +1,519 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import base64 +import numpy as np +from io import BytesIO +import torch +from PIL import Image +from torch.nn import functional as F + + +class DensePoseTransformData(object): + + # Horizontal symmetry label transforms used for horizontal flip + MASK_LABEL_SYMMETRIES = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14] + # fmt: off + POINT_LABEL_SYMMETRIES = [ 0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23] # noqa + # fmt: on + + def __init__(self, uv_symmetries): + self.mask_label_symmetries = DensePoseTransformData.MASK_LABEL_SYMMETRIES + self.point_label_symmetries = DensePoseTransformData.POINT_LABEL_SYMMETRIES + self.uv_symmetries = uv_symmetries + + @staticmethod + def load(fpath): + import scipy.io + + uv_symmetry_map = scipy.io.loadmat(fpath) + uv_symmetry_map_torch = {} + for key in ["U_transforms", "V_transforms"]: + map_src = uv_symmetry_map[key] + uv_symmetry_map_torch[key] = [] + for i in range(uv_symmetry_map[key].shape[1]): + uv_symmetry_map_torch[key].append( + torch.from_numpy(map_src[0, i]).to(dtype=torch.float) + ) + transform_data = DensePoseTransformData(uv_symmetry_map_torch) + return transform_data + + +class DensePoseDataRelative(object): + """ + Dense pose relative annotations that can be applied to any bounding box: + x - normalized X coordinates [0, 255] of annotated points + y - normalized Y coordinates [0, 255] of annotated points + i - body part labels 0,...,24 for annotated points + u - body part U coordinates [0, 1] for annotated points + v - body part V coordinates [0, 1] for annotated points + segm - 256x256 segmentation mask with values 0,...,14 + To obtain absolute x and y data wrt some bounding box one needs to first + divide the data by 256, multiply by the respective bounding box size + and add bounding box offset: + x_img = x0 + x_norm * w / 256.0 + y_img = y0 + y_norm * h / 256.0 + Segmentation masks are typically sampled to get image-based masks. + """ + + # Key for normalized X coordinates in annotation dict + X_KEY = "dp_x" + # Key for normalized Y coordinates in annotation dict + Y_KEY = "dp_y" + # Key for U part coordinates in annotation dict + U_KEY = "dp_U" + # Key for V part coordinates in annotation dict + V_KEY = "dp_V" + # Key for I point labels in annotation dict + I_KEY = "dp_I" + # Key for segmentation mask in annotation dict + S_KEY = "dp_masks" + # Number of body parts in segmentation masks + N_BODY_PARTS = 14 + # Number of parts in point labels + N_PART_LABELS = 24 + MASK_SIZE = 256 + + def __init__(self, annotation, cleanup=False): + is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation) + assert is_valid, "Invalid DensePose annotations: {}".format(reason_not_valid) + self.x = torch.as_tensor(annotation[DensePoseDataRelative.X_KEY]) + self.y = torch.as_tensor(annotation[DensePoseDataRelative.Y_KEY]) + self.i = torch.as_tensor(annotation[DensePoseDataRelative.I_KEY]) + self.u = torch.as_tensor(annotation[DensePoseDataRelative.U_KEY]) + self.v = torch.as_tensor(annotation[DensePoseDataRelative.V_KEY]) + self.segm = DensePoseDataRelative.extract_segmentation_mask(annotation) + self.device = torch.device("cpu") + if cleanup: + DensePoseDataRelative.cleanup_annotation(annotation) + + def to(self, device): + if self.device == device: + return self + new_data = DensePoseDataRelative.__new__(DensePoseDataRelative) + new_data.x = self.x + new_data.x = self.x.to(device) + new_data.y = self.y.to(device) + new_data.i = self.i.to(device) + new_data.u = self.u.to(device) + new_data.v = self.v.to(device) + new_data.segm = self.segm.to(device) + new_data.device = device + return new_data + + @staticmethod + def extract_segmentation_mask(annotation): + import pycocotools.mask as mask_utils + + poly_specs = annotation[DensePoseDataRelative.S_KEY] + segm = torch.zeros((DensePoseDataRelative.MASK_SIZE,) * 2, dtype=torch.float32) + for i in range(DensePoseDataRelative.N_BODY_PARTS): + poly_i = poly_specs[i] + if poly_i: + mask_i = mask_utils.decode(poly_i) + segm[mask_i > 0] = i + 1 + return segm + + @staticmethod + def validate_annotation(annotation): + for key in [ + DensePoseDataRelative.X_KEY, + DensePoseDataRelative.Y_KEY, + DensePoseDataRelative.I_KEY, + DensePoseDataRelative.U_KEY, + DensePoseDataRelative.V_KEY, + DensePoseDataRelative.S_KEY, + ]: + if key not in annotation: + return False, "no {key} data in the annotation".format(key=key) + return True, None + + @staticmethod + def cleanup_annotation(annotation): + for key in [ + DensePoseDataRelative.X_KEY, + DensePoseDataRelative.Y_KEY, + DensePoseDataRelative.I_KEY, + DensePoseDataRelative.U_KEY, + DensePoseDataRelative.V_KEY, + DensePoseDataRelative.S_KEY, + ]: + if key in annotation: + del annotation[key] + + def apply_transform(self, transforms, densepose_transform_data): + self._transform_pts(transforms, densepose_transform_data) + self._transform_segm(transforms, densepose_transform_data) + + def _transform_pts(self, transforms, dp_transform_data): + import detectron2.data.transforms as T + + # NOTE: This assumes that HorizFlipTransform is the only one that does flip + do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 + if do_hflip: + self.x = self.segm.size(1) - self.x + self._flip_iuv_semantics(dp_transform_data) + + def _flip_iuv_semantics(self, dp_transform_data): + i_old = self.i.clone() + uv_symmetries = dp_transform_data.uv_symmetries + pt_label_symmetries = dp_transform_data.point_label_symmetries + for i in range(self.N_PART_LABELS): + if pt_label_symmetries[i + 1] != i + 1: + annot_indices_i = i_old == i + 1 + self.i[annot_indices_i] = pt_label_symmetries[i + 1] + u_loc = (self.u[annot_indices_i] * 255).long() + v_loc = (self.v[annot_indices_i] * 255).long() + self.u[annot_indices_i] = uv_symmetries["U_transforms"][i][v_loc, u_loc] + self.v[annot_indices_i] = uv_symmetries["V_transforms"][i][v_loc, u_loc] + + def _transform_segm(self, transforms, dp_transform_data): + import detectron2.data.transforms as T + + # NOTE: This assumes that HorizFlipTransform is the only one that does flip + do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 + if do_hflip: + self.segm = torch.flip(self.segm, [1]) + self._flip_segm_semantics(dp_transform_data) + + def _flip_segm_semantics(self, dp_transform_data): + old_segm = self.segm.clone() + mask_label_symmetries = dp_transform_data.mask_label_symmetries + for i in range(self.N_BODY_PARTS): + if mask_label_symmetries[i + 1] != i + 1: + self.segm[old_segm == i + 1] = mask_label_symmetries[i + 1] + + +def normalized_coords_transform(x0, y0, w, h): + """ + Coordinates transform that maps top left corner to (-1, -1) and bottom + right corner to (1, 1). Used for torch.grid_sample to initialize the + grid + """ + + def f(p): + return (2 * (p[0] - x0) / w - 1, 2 * (p[1] - y0) / h - 1) + + return f + + +class DensePoseOutput(object): + def __init__(self, S, I, U, V): + self.S = S + self.I = I # noqa: E741 + self.U = U + self.V = V + self._check_output_dims(S, I, U, V) + + def _check_output_dims(self, S, I, U, V): + assert ( + len(S.size()) == 4 + ), "Segmentation output should have 4 " "dimensions (NCHW), but has size {}".format( + S.size() + ) + assert ( + len(I.size()) == 4 + ), "Segmentation output should have 4 " "dimensions (NCHW), but has size {}".format( + S.size() + ) + assert ( + len(U.size()) == 4 + ), "Segmentation output should have 4 " "dimensions (NCHW), but has size {}".format( + S.size() + ) + assert ( + len(V.size()) == 4 + ), "Segmentation output should have 4 " "dimensions (NCHW), but has size {}".format( + S.size() + ) + assert len(S) == len(I), ( + "Number of output segmentation planes {} " + "should be equal to the number of output part index " + "planes {}".format(len(S), len(I)) + ) + assert S.size()[2:] == I.size()[2:], ( + "Output segmentation plane size {} " + "should be equal to the output part index " + "plane size {}".format(S.size()[2:], I.size()[2:]) + ) + assert I.size() == U.size(), ( + "Part index output shape {} " + "should be the same as U coordinates output shape {}".format(I.size(), U.size()) + ) + assert I.size() == V.size(), ( + "Part index output shape {} " + "should be the same as V coordinates output shape {}".format(I.size(), V.size()) + ) + + def resize(self, image_size_hw): + # do nothing - outputs are invariant to resize + pass + + def _crop(self, S, I, U, V, bbox_old_xywh, bbox_new_xywh): + """ + Resample S, I, U, V from bbox_old to the cropped bbox_new + """ + x0old, y0old, wold, hold = bbox_old_xywh + x0new, y0new, wnew, hnew = bbox_new_xywh + tr_coords = normalized_coords_transform(x0old, y0old, wold, hold) + topleft = (x0new, y0new) + bottomright = (x0new + wnew, y0new + hnew) + topleft_norm = tr_coords(topleft) + bottomright_norm = tr_coords(bottomright) + hsize = S.size(1) + wsize = S.size(2) + grid = torch.meshgrid( + torch.arange( + topleft_norm[1], + bottomright_norm[1], + (bottomright_norm[1] - topleft_norm[1]) / hsize, + )[:hsize], + torch.arange( + topleft_norm[0], + bottomright_norm[0], + (bottomright_norm[0] - topleft_norm[0]) / wsize, + )[:wsize], + ) + grid = torch.stack(grid, dim=2).to(S.device) + assert ( + grid.size(0) == hsize + ), "Resampled grid expected " "height={}, actual height={}".format(hsize, grid.size(0)) + assert grid.size(1) == wsize, "Resampled grid expected " "width={}, actual width={}".format( + wsize, grid.size(1) + ) + S_new = F.grid_sample( + S.unsqueeze(0), + torch.unsqueeze(grid, 0), + mode="bilinear", + padding_mode="border", + align_corners=True, + ).squeeze(0) + I_new = F.grid_sample( + I.unsqueeze(0), + torch.unsqueeze(grid, 0), + mode="bilinear", + padding_mode="border", + align_corners=True, + ).squeeze(0) + U_new = F.grid_sample( + U.unsqueeze(0), + torch.unsqueeze(grid, 0), + mode="bilinear", + padding_mode="border", + align_corners=True, + ).squeeze(0) + V_new = F.grid_sample( + V.unsqueeze(0), + torch.unsqueeze(grid, 0), + mode="bilinear", + padding_mode="border", + align_corners=True, + ).squeeze(0) + return S_new, I_new, U_new, V_new + + def crop(self, indices_cropped, bboxes_old, bboxes_new): + """ + Crop outputs for selected bounding boxes to the new bounding boxes. + """ + # VK: cropping is ignored for now + # for i, ic in enumerate(indices_cropped): + # self.S[ic], self.I[ic], self.U[ic], self.V[ic] = \ + # self._crop(self.S[ic], self.I[ic], self.U[ic], self.V[ic], + # bboxes_old[i], bboxes_new[i]) + pass + + def to_result(self, boxes_xywh): + """ + Convert DensePose outputs to results format. Results are more compact, + but cannot be resampled any more + """ + result = DensePoseResult(boxes_xywh, self.S, self.I, self.U, self.V) + return result + + def __getitem__(self, item): + if isinstance(item, int): + S_selected = self.S[item].unsqueeze(0) + I_selected = self.I[item].unsqueeze(0) + U_selected = self.U[item].unsqueeze(0) + V_selected = self.V[item].unsqueeze(0) + else: + S_selected = self.S[item] + I_selected = self.I[item] + U_selected = self.U[item] + V_selected = self.V[item] + return DensePoseOutput(S_selected, I_selected, U_selected, V_selected) + + def __str__(self): + s = "DensePoseOutput S {}, I {}, U {}, V {}".format( + list(self.S.size()), list(self.I.size()), list(self.U.size()), list(self.V.size()) + ) + return s + + def __len__(self): + return self.S.size(0) + + +class DensePoseResult(object): + def __init__(self, boxes_xywh, S, I, U, V): + self.results = [] + self.boxes_xywh = boxes_xywh.cpu().tolist() + assert len(boxes_xywh.size()) == 2 + assert boxes_xywh.size(1) == 4 + for i, box_xywh in enumerate(boxes_xywh): + result_i = self._output_to_result(box_xywh, S[[i]], I[[i]], U[[i]], V[[i]]) + result_numpy_i = result_i.cpu().numpy() + result_encoded_i = DensePoseResult.encode_png_data(result_numpy_i) + result_encoded_with_shape_i = (result_numpy_i.shape, result_encoded_i) + self.results.append(result_encoded_with_shape_i) + + def __str__(self): + s = "DensePoseResult: N={} [{}]".format( + len(self.results), ", ".join([str(list(r[0])) for r in self.results]) + ) + return s + + def _output_to_result(self, box_xywh, S, I, U, V): + x, y, w, h = box_xywh + w = max(int(w), 1) + h = max(int(h), 1) + result = torch.zeros([3, h, w], dtype=torch.uint8, device=U.device) + assert ( + len(S.size()) == 4 + ), "AnnIndex tensor size should have {} " "dimensions but has {}".format(4, len(S.size())) + s_bbox = F.interpolate(S, (h, w), mode="bilinear", align_corners=False).argmax(dim=1) + assert ( + len(I.size()) == 4 + ), "IndexUV tensor size should have {} " "dimensions but has {}".format(4, len(S.size())) + i_bbox = ( + F.interpolate(I, (h, w), mode="bilinear", align_corners=False).argmax(dim=1) + * (s_bbox > 0).long() + ).squeeze(0) + assert len(U.size()) == 4, "U tensor size should have {} " "dimensions but has {}".format( + 4, len(U.size()) + ) + u_bbox = F.interpolate(U, (h, w), mode="bilinear", align_corners=False) + assert len(V.size()) == 4, "V tensor size should have {} " "dimensions but has {}".format( + 4, len(V.size()) + ) + v_bbox = F.interpolate(V, (h, w), mode="bilinear", align_corners=False) + result[0] = i_bbox + for part_id in range(1, u_bbox.size(1)): + result[1][i_bbox == part_id] = ( + (u_bbox[0, part_id][i_bbox == part_id] * 255).clamp(0, 255).to(torch.uint8) + ) + result[2][i_bbox == part_id] = ( + (v_bbox[0, part_id][i_bbox == part_id] * 255).clamp(0, 255).to(torch.uint8) + ) + assert ( + result.size(1) == h + ), "Results height {} should be equal" "to bounding box height {}".format(result.size(1), h) + assert ( + result.size(2) == w + ), "Results width {} should be equal" "to bounding box width {}".format(result.size(2), w) + return result + + @staticmethod + def encode_png_data(arr): + """ + Encode array data as a PNG image using the highest compression rate + @param arr [in] Data stored in an array of size (3, M, N) of type uint8 + @return Base64-encoded string containing PNG-compressed data + """ + assert len(arr.shape) == 3, "Expected a 3D array as an input," " got a {0}D array".format( + len(arr.shape) + ) + assert arr.shape[0] == 3, "Expected first array dimension of size 3," " got {0}".format( + arr.shape[0] + ) + assert arr.dtype == np.uint8, "Expected an array of type np.uint8, " " got {0}".format( + arr.dtype + ) + data = np.moveaxis(arr, 0, -1) + im = Image.fromarray(data) + fstream = BytesIO() + im.save(fstream, format="png", optimize=True) + s = base64.encodebytes(fstream.getvalue()).decode() + return s + + @staticmethod + def decode_png_data(shape, s): + """ + Decode array data from a string that contains PNG-compressed data + @param Base64-encoded string containing PNG-compressed data + @return Data stored in an array of size (3, M, N) of type uint8 + """ + fstream = BytesIO(base64.decodebytes(s.encode())) + im = Image.open(fstream) + data = np.moveaxis(np.array(im.getdata(), dtype=np.uint8), -1, 0) + return data.reshape(shape) + + def __len__(self): + return len(self.results) + + def __getitem__(self, item): + result_encoded = self.results[item] + bbox_xywh = self.boxes_xywh[item] + return result_encoded, bbox_xywh + + +class DensePoseList(object): + + _TORCH_DEVICE_CPU = torch.device("cpu") + + def __init__(self, densepose_datas, boxes_xyxy_abs, image_size_hw, device=_TORCH_DEVICE_CPU): + assert len(densepose_datas) == len(boxes_xyxy_abs), ( + "Attempt to initialize DensePoseList with {} DensePose datas " + "and {} boxes".format(len(densepose_datas), len(boxes_xyxy_abs)) + ) + self.densepose_datas = [] + for densepose_data in densepose_datas: + assert isinstance(densepose_data, DensePoseDataRelative) or densepose_data is None, ( + "Attempt to initialize DensePoseList with DensePose datas " + "of type {}, expected DensePoseDataRelative".format(type(densepose_data)) + ) + densepose_data_ondevice = ( + densepose_data.to(device) if densepose_data is not None else None + ) + self.densepose_datas.append(densepose_data_ondevice) + self.boxes_xyxy_abs = boxes_xyxy_abs.to(device) + self.image_size_hw = image_size_hw + self.device = device + + def to(self, device): + if self.device == device: + return self + return DensePoseList(self.densepose_datas, self.boxes_xyxy_abs, self.image_size_hw, device) + + def __iter__(self): + return iter(self.densepose_datas) + + def __len__(self): + return len(self.densepose_datas) + + def __repr__(self): + s = self.__class__.__name__ + "(" + s += "num_instances={}, ".format(len(self.densepose_datas)) + s += "image_width={}, ".format(self.image_size_hw[1]) + s += "image_height={})".format(self.image_size_hw[0]) + return s + + def __getitem__(self, item): + if isinstance(item, int): + densepose_data_rel = self.densepose_datas[item] + return densepose_data_rel + elif isinstance(item, slice): + densepose_datas_rel = self.densepose_datas[item] + boxes_xyxy_abs = self.boxes_xyxy_abs[item] + return DensePoseList( + densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device + ) + elif isinstance(item, torch.Tensor) and (item.dtype == torch.bool): + densepose_datas_rel = [self.densepose_datas[i] for i, x in enumerate(item) if x > 0] + boxes_xyxy_abs = self.boxes_xyxy_abs[item] + return DensePoseList( + densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device + ) + else: + densepose_datas_rel = [self.densepose_datas[i] for i in item] + boxes_xyxy_abs = self.boxes_xyxy_abs[item] + return DensePoseList( + densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device + ) diff --git a/projects/DensePose/densepose/utils/dbhelper.py b/projects/DensePose/densepose/utils/dbhelper.py new file mode 100644 index 0000000000..ef7e316cf3 --- /dev/null +++ b/projects/DensePose/densepose/utils/dbhelper.py @@ -0,0 +1,145 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from typing import Any, Dict, List, Optional, Tuple + + +class EntrySelector(object): + """ + Base class for entry selectors + """ + + @staticmethod + def from_string(spec: str) -> "EntrySelector": + if spec == "*": + return AllEntrySelector() + return FieldEntrySelector(spec) + + +class AllEntrySelector(EntrySelector): + """ + Selector that accepts all entries + """ + + SPECIFIER = "*" + + def __call__(self, entry): + return True + + +class FieldEntrySelector(EntrySelector): + """ + Selector that accepts only entries that match provided field + specifier(s). Only a limited set of specifiers is supported for now: + ::=[] + ::=[] + is a valid identifier + ::= "int" | "str" + ::= "=" + ::= "," + ::= ":" + ::= | + ::= + ::= "-" + is a string without spaces and special symbols + (e.g. , , , ) + """ + + _SPEC_DELIM = "," + _TYPE_DELIM = ":" + _RANGE_DELIM = "-" + _EQUAL = "=" + _ERROR_PREFIX = "Invalid field selector specifier" + + class _FieldEntryValuePredicate(object): + """ + Predicate that checks strict equality for the specified entry field + """ + + def __init__(self, name: str, typespec: str, value: str): + import builtins + + self.name = name + self.type = getattr(builtins, typespec) if typespec is not None else str + self.value = value + + def __call__(self, entry): + return entry[self.name] == self.type(self.value) + + class _FieldEntryRangePredicate(object): + """ + Predicate that checks whether an entry field falls into the specified range + """ + + def __init__(self, name: str, typespec: str, vmin: str, vmax: str): + import builtins + + self.name = name + self.type = getattr(builtins, typespec) if typespec is not None else str + self.vmin = vmin + self.vmax = vmax + + def __call__(self, entry): + return (entry[self.name] >= self.type(self.vmin)) and ( + entry[self.name] <= self.type(self.vmax) + ) + + def __init__(self, spec: str): + self._predicates = self._parse_specifier_into_predicates(spec) + + def __call__(self, entry: Dict[str, Any]): + for predicate in self._predicates: + if not predicate(entry): + return False + return True + + def _parse_specifier_into_predicates(self, spec: str) -> List["_FieldEntryPredicate"]: + predicates = [] + specs = spec.split(self._SPEC_DELIM) + for subspec in specs: + eq_idx = subspec.find(self._EQUAL) + if eq_idx > 0: + field_name_with_type = subspec[:eq_idx] + field_name, field_type = self._parse_field_name_type(field_name_with_type) + field_value_or_range = subspec[eq_idx + 1 :] + if self._is_range_spec(field_value_or_range): + vmin, vmax = self._get_range_spec(field_value_or_range) + predicate = FieldEntrySelector._FieldEntryRangePredicate( + field_name, field_type, vmin, vmax + ) + else: + predicate = FieldEntrySelector._FieldEntryValuePredicate( + field_name, field_type, field_value_or_range + ) + predicates.append(predicate) + elif eq_idx == 0: + self._parse_error(f'"{subspec}", field name is empty!') + else: + self._parse_error(f'"{subspec}", should have format ' "=!") + return predicates + + def _parse_field_name_type(self, field_name_with_type: str) -> Tuple[str, Optional[str]]: + type_delim_idx = field_name_with_type.find(self._TYPE_DELIM) + if type_delim_idx > 0: + field_name = field_name_with_type[:type_delim_idx] + field_type = field_name_with_type[type_delim_idx + 1 :] + elif type_delim_idx == 0: + self._parse_error(f'"{field_name_with_type}", field name is empty!') + else: + field_name = field_name_with_type + field_type = None + return field_name, field_type + + def _is_range_spec(self, field_value_or_range): + delim_idx = field_value_or_range.find(self._RANGE_DELIM) + return delim_idx > 0 + + def _get_range_spec(self, field_value_or_range): + if self._is_range_spec(field_value_or_range): + delim_idx = field_value_or_range.find(self._RANGE_DELIM) + vmin = field_value_or_range[:delim_idx] + vmax = field_value_or_range[delim_idx + 1 :] + return vmin, vmax + else: + self._parse_error('"field_value_or_range", range of values expected!') + + def _parse_error(self, msg): + raise ValueError(f"{self._ERROR_PREFIX}: {msg}") diff --git a/projects/DensePose/densepose/utils/logger.py b/projects/DensePose/densepose/utils/logger.py new file mode 100644 index 0000000000..e3fa45e0c0 --- /dev/null +++ b/projects/DensePose/densepose/utils/logger.py @@ -0,0 +1,13 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging + + +def verbosity_to_level(verbosity): + if verbosity is not None: + if verbosity == 0: + return logging.WARNING + elif verbosity == 1: + return logging.INFO + elif verbosity >= 2: + return logging.DEBUG + return logging.WARNING diff --git a/projects/DensePose/densepose/vis/base.py b/projects/DensePose/densepose/vis/base.py new file mode 100644 index 0000000000..b3f1ae5aff --- /dev/null +++ b/projects/DensePose/densepose/vis/base.py @@ -0,0 +1,190 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +import cv2 +import torch + +Image = np.ndarray +Boxes = torch.Tensor + + +class MatrixVisualizer(object): + """ + Base visualizer for matrix data + """ + + def __init__( + self, + inplace=True, + cmap=cv2.COLORMAP_PARULA, + val_scale=1.0, + alpha=0.7, + interp_method_matrix=cv2.INTER_LINEAR, + interp_method_mask=cv2.INTER_NEAREST, + ): + self.inplace = inplace + self.cmap = cmap + self.val_scale = val_scale + self.alpha = alpha + self.interp_method_matrix = interp_method_matrix + self.interp_method_mask = interp_method_mask + + def visualize(self, image_bgr, mask, matrix, bbox_xywh): + self._check_image(image_bgr) + self._check_mask_matrix(mask, matrix) + if self.inplace: + image_target_bgr = image_bgr + else: + image_target_bgr = image_bgr * 0 + x, y, w, h = [int(v) for v in bbox_xywh] + if w <= 0 or h <= 0: + return image_bgr + mask, matrix = self._resize(mask, matrix, w, h) + mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3]) + matrix_scaled = matrix.astype(np.float32) * self.val_scale + _EPSILON = 1e-6 + if np.any(matrix_scaled > 255 + _EPSILON): + logger = logging.getLogger(__name__) + logger.warning( + f"Matrix has values > {255 + _EPSILON} after " f"scaling, clipping to [0..255]" + ) + matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8) + matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap) + matrix_vis[mask_bg] = image_target_bgr[y : y + h, x : x + w, :][mask_bg] + image_target_bgr[y : y + h, x : x + w, :] = ( + image_target_bgr[y : y + h, x : x + w, :] * (1.0 - self.alpha) + matrix_vis * self.alpha + ) + return image_target_bgr.astype(np.uint8) + + def _resize(self, mask, matrix, w, h): + if (w != mask.shape[1]) or (h != mask.shape[0]): + mask = cv2.resize(mask, (w, h), self.interp_method_mask) + if (w != matrix.shape[1]) or (h != matrix.shape[0]): + matrix = cv2.resize(matrix, (w, h), self.interp_method_matrix) + return mask, matrix + + def _check_image(self, image_rgb): + assert len(image_rgb.shape) == 3 + assert image_rgb.shape[2] == 3 + assert image_rgb.dtype == np.uint8 + + def _check_mask_matrix(self, mask, matrix): + assert len(matrix.shape) == 2 + assert len(mask.shape) == 2 + assert mask.dtype == np.uint8 + + +class RectangleVisualizer(object): + + _COLOR_GREEN = (18, 127, 15) + + def __init__(self, color=_COLOR_GREEN, thickness=1): + self.color = color + self.thickness = thickness + + def visualize(self, image_bgr, bbox_xywh, color=None, thickness=None): + x, y, w, h = bbox_xywh + color = color or self.color + thickness = thickness or self.thickness + cv2.rectangle(image_bgr, (int(x), int(y)), (int(x + w), int(y + h)), color, thickness) + return image_bgr + + +class PointsVisualizer(object): + + _COLOR_GREEN = (18, 127, 15) + + def __init__(self, color_bgr=_COLOR_GREEN, r=5): + self.color_bgr = color_bgr + self.r = r + + def visualize(self, image_bgr, pts_xy, colors_bgr=None, rs=None): + for j, pt_xy in enumerate(pts_xy): + x, y = pt_xy + color_bgr = colors_bgr[j] if colors_bgr is not None else self.color_bgr + r = rs[j] if rs is not None else self.r + cv2.circle(image_bgr, (x, y), r, color_bgr, -1) + return image_bgr + + +class TextVisualizer(object): + + _COLOR_GRAY = (218, 227, 218) + _COLOR_WHITE = (255, 255, 255) + + def __init__( + self, + font_face=cv2.FONT_HERSHEY_SIMPLEX, + font_color_bgr=_COLOR_GRAY, + font_scale=0.35, + font_line_type=cv2.LINE_AA, + font_line_thickness=1, + fill_color_bgr=_COLOR_WHITE, + fill_color_transparency=1.0, + frame_color_bgr=_COLOR_WHITE, + frame_color_transparency=1.0, + frame_thickness=1, + ): + self.font_face = font_face + self.font_color_bgr = font_color_bgr + self.font_scale = font_scale + self.font_line_type = font_line_type + self.font_line_thickness = font_line_thickness + self.fill_color_bgr = fill_color_bgr + self.fill_color_transparency = fill_color_transparency + self.frame_color_bgr = frame_color_bgr + self.frame_color_transparency = frame_color_transparency + self.frame_thickness = frame_thickness + + def visualize(self, image_bgr, txt, topleft_xy): + txt_w, txt_h = self.get_text_size_wh(txt) + topleft_xy = tuple(map(int, topleft_xy)) + x, y = topleft_xy + if self.frame_color_transparency < 1.0: + t = self.frame_thickness + image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] = ( + image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] + * self.frame_color_transparency + + np.array(self.frame_color_bgr) * (1.0 - self.frame_color_transparency) + ).astype(np.float) + if self.fill_color_transparency < 1.0: + image_bgr[y : y + txt_h, x : x + txt_w, :] = ( + image_bgr[y : y + txt_h, x : x + txt_w, :] * self.fill_color_transparency + + np.array(self.fill_color_bgr) * (1.0 - self.fill_color_transparency) + ).astype(np.float) + cv2.putText( + image_bgr, + txt, + topleft_xy, + self.font_face, + self.font_scale, + self.font_color_bgr, + self.font_line_thickness, + self.font_line_type, + ) + return image_bgr + + def get_text_size_wh(self, txt): + ((txt_w, txt_h), _) = cv2.getTextSize( + txt, self.font_face, self.font_scale, self.font_line_thickness + ) + return txt_w, txt_h + + +class CompoundVisualizer(object): + def __init__(self, visualizers): + self.visualizers = visualizers + + def visualize(self, image_bgr, data): + assert len(data) == len(self.visualizers), ( + "The number of datas {} should match the number of visualizers" + " {}".format(len(data), len(self.visualizers)) + ) + image = image_bgr + for i, visualizer in enumerate(self.visualizers): + image = visualizer.visualize(image, data[i]) + return image + + def __str__(self): + visualizer_str = ", ".join([str(v) for v in self.visualizers]) + return "Compound Visualizer [{}]".format(visualizer_str) diff --git a/projects/DensePose/densepose/vis/bounding_box.py b/projects/DensePose/densepose/vis/bounding_box.py new file mode 100644 index 0000000000..7fc6efd119 --- /dev/null +++ b/projects/DensePose/densepose/vis/bounding_box.py @@ -0,0 +1,36 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .base import RectangleVisualizer, TextVisualizer + + +class BoundingBoxVisualizer(object): + def __init__(self): + self.rectangle_visualizer = RectangleVisualizer() + + def visualize(self, image_bgr, boxes_xywh): + for bbox_xywh in boxes_xywh: + image_bgr = self.rectangle_visualizer.visualize(image_bgr, bbox_xywh) + return image_bgr + + +class ScoredBoundingBoxVisualizer(object): + def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None): + if bbox_visualizer_params is None: + bbox_visualizer_params = {} + if score_visualizer_params is None: + score_visualizer_params = {} + self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params) + self.visualizer_score = TextVisualizer(**score_visualizer_params) + + def visualize(self, image_bgr, scored_bboxes): + boxes_xywh, box_scores = scored_bboxes + assert len(boxes_xywh) == len(box_scores), ( + "Number of bounding boxes {} should be equal to the number of " + "scores".format(len(boxes_xywh), len(box_scores)) + ) + for i, box_xywh in enumerate(boxes_xywh): + score_i = box_scores[i] + image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh) + score_txt = "{0:6.4f}".format(score_i) + topleft_xy = box_xywh[0], box_xywh[1] + image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy) + return image_bgr diff --git a/projects/DensePose/densepose/vis/densepose.py b/projects/DensePose/densepose/vis/densepose.py new file mode 100644 index 0000000000..ba561cac55 --- /dev/null +++ b/projects/DensePose/densepose/vis/densepose.py @@ -0,0 +1,581 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import numpy as np +from typing import Iterable, Optional, Tuple +import cv2 + +from ..structures import DensePoseDataRelative, DensePoseOutput, DensePoseResult +from .base import Boxes, Image, MatrixVisualizer, PointsVisualizer + + +class DensePoseResultsVisualizer(object): + def visualize(self, image_bgr: Image, densepose_result: Optional[DensePoseResult]) -> Image: + if densepose_result is None: + return image_bgr + context = self.create_visualization_context(image_bgr) + for i, result_encoded_w_shape in enumerate(densepose_result.results): + iuv_arr = DensePoseResult.decode_png_data(*result_encoded_w_shape) + bbox_xywh = densepose_result.boxes_xywh[i] + self.visualize_iuv_arr(context, iuv_arr, bbox_xywh) + image_bgr = self.context_to_image_bgr(context) + return image_bgr + + +class DensePoseMaskedColormapResultsVisualizer(DensePoseResultsVisualizer): + def __init__( + self, + data_extractor, + segm_extractor, + inplace=True, + cmap=cv2.COLORMAP_PARULA, + alpha=0.7, + val_scale=1.0, + ): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha + ) + self.data_extractor = data_extractor + self.segm_extractor = segm_extractor + + def create_visualization_context(self, image_bgr: Image): + return image_bgr + + def context_to_image_bgr(self, context): + return context + + def get_image_bgr_from_context(self, context): + return context + + def visualize_iuv_arr(self, context, iuv_arr, bbox_xywh): + image_bgr = self.get_image_bgr_from_context(context) + matrix = self.data_extractor(iuv_arr) + segm = self.segm_extractor(iuv_arr) + mask = np.zeros(matrix.shape, dtype=np.uint8) + mask[segm > 0] = 1 + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh) + return image_bgr + + +def _extract_i_from_iuvarr(iuv_arr): + return iuv_arr[0, :, :] + + +def _extract_u_from_iuvarr(iuv_arr): + return iuv_arr[1, :, :] + + +def _extract_v_from_iuvarr(iuv_arr): + return iuv_arr[2, :, :] + + +class DensePoseResultsMplContourVisualizer(DensePoseResultsVisualizer): + def __init__(self, levels=10, **kwargs): + self.levels = levels + self.plot_args = kwargs + + def create_visualization_context(self, image_bgr: Image): + import matplotlib.pyplot as plt + from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas + + context = {} + context["image_bgr"] = image_bgr + dpi = 100 + height_inches = float(image_bgr.shape[0]) / dpi + width_inches = float(image_bgr.shape[1]) / dpi + fig = plt.figure(figsize=(width_inches, height_inches), dpi=dpi) + plt.axes([0, 0, 1, 1]) + plt.axis("off") + context["fig"] = fig + canvas = FigureCanvas(fig) + context["canvas"] = canvas + extent = (0, image_bgr.shape[1], image_bgr.shape[0], 0) + plt.imshow(image_bgr[:, :, ::-1], extent=extent) + return context + + def context_to_image_bgr(self, context): + fig = context["fig"] + w, h = map(int, fig.get_size_inches() * fig.get_dpi()) + canvas = context["canvas"] + canvas.draw() + image_1d = np.fromstring(canvas.tostring_rgb(), dtype="uint8") + image_rgb = image_1d.reshape(h, w, 3) + image_bgr = image_rgb[:, :, ::-1].copy() + return image_bgr + + def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> Image: + import matplotlib.pyplot as plt + + u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0 + v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0 + extent = ( + bbox_xywh[0], + bbox_xywh[0] + bbox_xywh[2], + bbox_xywh[1], + bbox_xywh[1] + bbox_xywh[3], + ) + plt.contour(u, self.levels, extent=extent, **self.plot_args) + plt.contour(v, self.levels, extent=extent, **self.plot_args) + + +class DensePoseResultsCustomContourVisualizer(DensePoseResultsVisualizer): + """ + Contour visualization using marching squares + """ + + def __init__(self, levels=10, **kwargs): + # TODO: colormap is hardcoded + cmap = cv2.COLORMAP_PARULA + if isinstance(levels, int): + self.levels = np.linspace(0, 1, levels) + else: + self.levels = levels + if "linewidths" in kwargs: + self.linewidths = kwargs["linewidths"] + else: + self.linewidths = [1] * len(self.levels) + self.plot_args = kwargs + img_colors_bgr = cv2.applyColorMap((self.levels * 255).astype(np.uint8), cmap) + self.level_colors_bgr = [ + [int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr + ] + + def create_visualization_context(self, image_bgr: Image): + return image_bgr + + def context_to_image_bgr(self, context): + return context + + def get_image_bgr_from_context(self, context): + return context + + def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> Image: + image_bgr = self.get_image_bgr_from_context(context) + segm = _extract_i_from_iuvarr(iuv_arr) + u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0 + v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0 + self._contours(image_bgr, u, segm, bbox_xywh) + self._contours(image_bgr, v, segm, bbox_xywh) + + def _contours(self, image_bgr, arr, segm, bbox_xywh): + for part_idx in range(1, DensePoseDataRelative.N_PART_LABELS + 1): + mask = segm == part_idx + if not np.any(mask): + continue + arr_min = np.amin(arr[mask]) + arr_max = np.amax(arr[mask]) + I, J = np.nonzero(mask) + i0 = np.amin(I) + i1 = np.amax(I) + 1 + j0 = np.amin(J) + j1 = np.amax(J) + 1 + if (j1 == j0 + 1) or (i1 == i0 + 1): + continue + Nw = arr.shape[1] - 1 + Nh = arr.shape[0] - 1 + for level_idx, level in enumerate(self.levels): + if (level < arr_min) or (level > arr_max): + continue + vp = arr[i0:i1, j0:j1] >= level + bin_codes = vp[:-1, :-1] + vp[1:, :-1] * 2 + vp[1:, 1:] * 4 + vp[:-1, 1:] * 8 + mp = mask[i0:i1, j0:j1] + bin_mask_codes = mp[:-1, :-1] + mp[1:, :-1] * 2 + mp[1:, 1:] * 4 + mp[:-1, 1:] * 8 + it = np.nditer(bin_codes, flags=["multi_index"]) + color_bgr = self.level_colors_bgr[level_idx] + linewidth = self.linewidths[level_idx] + while not it.finished: + if (it[0] != 0) and (it[0] != 15): + i, j = it.multi_index + if bin_mask_codes[i, j] != 0: + self._draw_line( + image_bgr, + arr, + mask, + level, + color_bgr, + linewidth, + it[0], + it.multi_index, + bbox_xywh, + Nw, + Nh, + (i0, j0), + ) + it.iternext() + + def _draw_line( + self, + image_bgr, + arr, + mask, + v, + color_bgr, + linewidth, + bin_code, + multi_idx, + bbox_xywh, + Nw, + Nh, + offset, + ): + lines = self._bin_code_2_lines(arr, v, bin_code, multi_idx, Nw, Nh, offset) + x0, y0, w, h = bbox_xywh + x1 = x0 + w + y1 = y0 + h + for line in lines: + x0r, y0r = line[0] + x1r, y1r = line[1] + pt0 = (int(x0 + x0r * (x1 - x0)), int(y0 + y0r * (y1 - y0))) + pt1 = (int(x0 + x1r * (x1 - x0)), int(y0 + y1r * (y1 - y0))) + cv2.line(image_bgr, pt0, pt1, color_bgr, linewidth) + + def _bin_code_2_lines(self, arr, v, bin_code, multi_idx, Nw, Nh, offset): + i0, j0 = offset + i, j = multi_idx + i += i0 + j += j0 + v0, v1, v2, v3 = arr[i, j], arr[i + 1, j], arr[i + 1, j + 1], arr[i, j + 1] + x0i = float(j) / Nw + y0j = float(i) / Nh + He = 1.0 / Nh + We = 1.0 / Nw + if (bin_code == 1) or (bin_code == 14): + a = (v - v0) / (v1 - v0) + b = (v - v0) / (v3 - v0) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + b * We, y0j) + return [(pt1, pt2)] + elif (bin_code == 2) or (bin_code == 13): + a = (v - v0) / (v1 - v0) + b = (v - v1) / (v2 - v1) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + b * We, y0j + He) + return [(pt1, pt2)] + elif (bin_code == 3) or (bin_code == 12): + a = (v - v0) / (v3 - v0) + b = (v - v1) / (v2 - v1) + pt1 = (x0i + a * We, y0j) + pt2 = (x0i + b * We, y0j + He) + return [(pt1, pt2)] + elif (bin_code == 4) or (bin_code == 11): + a = (v - v1) / (v2 - v1) + b = (v - v3) / (v2 - v3) + pt1 = (x0i + a * We, y0j + He) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif (bin_code == 6) or (bin_code == 9): + a = (v - v0) / (v1 - v0) + b = (v - v3) / (v2 - v3) + pt1 = (x0i, y0j + a * He) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif (bin_code == 7) or (bin_code == 8): + a = (v - v0) / (v3 - v0) + b = (v - v3) / (v2 - v3) + pt1 = (x0i + a * We, y0j) + pt2 = (x0i + We, y0j + b * He) + return [(pt1, pt2)] + elif bin_code == 5: + a1 = (v - v0) / (v1 - v0) + b1 = (v - v1) / (v2 - v1) + pt11 = (x0i, y0j + a1 * He) + pt12 = (x0i + b1 * We, y0j + He) + a2 = (v - v0) / (v3 - v0) + b2 = (v - v3) / (v2 - v3) + pt21 = (x0i + a2 * We, y0j) + pt22 = (x0i + We, y0j + b2 * He) + return [(pt11, pt12), (pt21, pt22)] + elif bin_code == 10: + a1 = (v - v0) / (v3 - v0) + b1 = (v - v0) / (v1 - v0) + pt11 = (x0i + a1 * We, y0j) + pt12 = (x0i, y0j + b1 * He) + a2 = (v - v1) / (v2 - v1) + b2 = (v - v3) / (v2 - v3) + pt21 = (x0i + a2 * We, y0j + He) + pt22 = (x0i + We, y0j + b2 * He) + return [(pt11, pt12), (pt21, pt22)] + return [] + + +try: + import matplotlib + + matplotlib.use("Agg") + DensePoseResultsContourVisualizer = DensePoseResultsMplContourVisualizer +except ModuleNotFoundError: + logger = logging.getLogger(__name__) + logger.warning("Could not import matplotlib, using custom contour visualizer") + DensePoseResultsContourVisualizer = DensePoseResultsCustomContourVisualizer + + +class DensePoseResultsFineSegmentationVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + super(DensePoseResultsFineSegmentationVisualizer, self).__init__( + _extract_i_from_iuvarr, + _extract_i_from_iuvarr, + inplace, + cmap, + alpha, + val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS, + ) + + +class DensePoseResultsUVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + super(DensePoseResultsUVisualizer, self).__init__( + _extract_u_from_iuvarr, _extract_i_from_iuvarr, inplace, cmap, alpha, val_scale=1.0 + ) + + +class DensePoseResultsVVisualizer(DensePoseMaskedColormapResultsVisualizer): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + super(DensePoseResultsVVisualizer, self).__init__( + _extract_v_from_iuvarr, _extract_i_from_iuvarr, inplace, cmap, alpha, val_scale=1.0 + ) + + +class DensePoseOutputsFineSegmentationVisualizer(object): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, + cmap=cmap, + val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS, + alpha=alpha, + ) + + def visualize( + self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]] + ) -> Image: + if dp_output_with_bboxes is None: + return image_bgr + densepose_output, bboxes_xywh = dp_output_with_bboxes + S = densepose_output.S + I = densepose_output.I # noqa + U = densepose_output.U + V = densepose_output.V + N = S.size(0) + assert N == I.size(0), ( + "densepose outputs S {} and I {}" + " should have equal first dim size".format(S.size(), I.size()) + ) + assert N == U.size(0), ( + "densepose outputs S {} and U {}" + " should have equal first dim size".format(S.size(), U.size()) + ) + assert N == V.size(0), ( + "densepose outputs S {} and V {}" + " should have equal first dim size".format(S.size(), V.size()) + ) + assert N == len(bboxes_xywh), ( + "number of bounding boxes {}" + " should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N) + ) + for n in range(N): + Sn = S[n].argmax(dim=0) + In = I[n].argmax(dim=0) * (Sn > 0).long() + matrix = In.cpu().numpy().astype(np.uint8) + mask = np.zeros(matrix.shape, dtype=np.uint8) + mask[matrix > 0] = 1 + bbox_xywh = bboxes_xywh[n] + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh) + return image_bgr + + +class DensePoseOutputsUVisualizer(object): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha + ) + + def visualize( + self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]] + ) -> Image: + if dp_output_with_bboxes is None: + return image_bgr + densepose_output, bboxes_xywh = dp_output_with_bboxes + assert isinstance( + densepose_output, DensePoseOutput + ), "DensePoseOutput expected, {} encountered".format(type(densepose_output)) + S = densepose_output.S + I = densepose_output.I # noqa + U = densepose_output.U + V = densepose_output.V + N = S.size(0) + assert N == I.size(0), ( + "densepose outputs S {} and I {}" + " should have equal first dim size".format(S.size(), I.size()) + ) + assert N == U.size(0), ( + "densepose outputs S {} and U {}" + " should have equal first dim size".format(S.size(), U.size()) + ) + assert N == V.size(0), ( + "densepose outputs S {} and V {}" + " should have equal first dim size".format(S.size(), V.size()) + ) + assert N == len(bboxes_xywh), ( + "number of bounding boxes {}" + " should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N) + ) + for n in range(N): + Sn = S[n].argmax(dim=0) + In = I[n].argmax(dim=0) * (Sn > 0).long() + segmentation = In.cpu().numpy().astype(np.uint8) + mask = np.zeros(segmentation.shape, dtype=np.uint8) + mask[segmentation > 0] = 1 + Un = U[n].cpu().numpy().astype(np.float32) + Uvis = np.zeros(segmentation.shape, dtype=np.float32) + for partId in range(Un.shape[0]): + Uvis[segmentation == partId] = Un[partId][segmentation == partId].clip(0, 1) * 255 + bbox_xywh = bboxes_xywh[n] + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, Uvis, bbox_xywh) + return image_bgr + + +class DensePoseOutputsVVisualizer(object): + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha + ) + + def visualize( + self, image_bgr: Image, dp_output_with_bboxes: Optional[Tuple[DensePoseOutput, Boxes]] + ) -> Image: + if dp_output_with_bboxes is None: + return image_bgr + densepose_output, bboxes_xywh = dp_output_with_bboxes + assert isinstance( + densepose_output, DensePoseOutput + ), "DensePoseOutput expected, {} encountered".format(type(densepose_output)) + S = densepose_output.S + I = densepose_output.I # noqa + U = densepose_output.U + V = densepose_output.V + N = S.size(0) + assert N == I.size(0), ( + "densepose outputs S {} and I {}" + " should have equal first dim size".format(S.size(), I.size()) + ) + assert N == U.size(0), ( + "densepose outputs S {} and U {}" + " should have equal first dim size".format(S.size(), U.size()) + ) + assert N == V.size(0), ( + "densepose outputs S {} and V {}" + " should have equal first dim size".format(S.size(), V.size()) + ) + assert N == len(bboxes_xywh), ( + "number of bounding boxes {}" + " should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N) + ) + for n in range(N): + Sn = S[n].argmax(dim=0) + In = I[n].argmax(dim=0) * (Sn > 0).long() + segmentation = In.cpu().numpy().astype(np.uint8) + mask = np.zeros(segmentation.shape, dtype=np.uint8) + mask[segmentation > 0] = 1 + Vn = V[n].cpu().numpy().astype(np.float32) + Vvis = np.zeros(segmentation.shape, dtype=np.float32) + for partId in range(Vn.size(0)): + Vvis[segmentation == partId] = Vn[partId][segmentation == partId].clip(0, 1) * 255 + bbox_xywh = bboxes_xywh[n] + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, Vvis, bbox_xywh) + return image_bgr + + +class DensePoseDataCoarseSegmentationVisualizer(object): + """ + Visualizer for ground truth segmentation + """ + + def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7): + self.mask_visualizer = MatrixVisualizer( + inplace=inplace, + cmap=cmap, + val_scale=255.0 / DensePoseDataRelative.N_BODY_PARTS, + alpha=alpha, + ) + + def visualize( + self, + image_bgr: Image, + bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]], + ) -> Image: + if bbox_densepose_datas is None: + return image_bgr + for bbox_xywh, densepose_data in zip(*bbox_densepose_datas): + matrix = densepose_data.segm.numpy() + mask = np.zeros(matrix.shape, dtype=np.uint8) + mask[matrix > 0] = 1 + image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh.numpy()) + return image_bgr + + +class DensePoseDataPointsVisualizer(object): + def __init__(self, densepose_data_to_value_fn=None, cmap=cv2.COLORMAP_PARULA): + self.points_visualizer = PointsVisualizer() + self.densepose_data_to_value_fn = densepose_data_to_value_fn + self.cmap = cmap + + def visualize( + self, + image_bgr: Image, + bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]], + ) -> Image: + if bbox_densepose_datas is None: + return image_bgr + for bbox_xywh, densepose_data in zip(*bbox_densepose_datas): + x0, y0, w, h = bbox_xywh.numpy() + x = densepose_data.x.numpy() * w / 255.0 + x0 + y = densepose_data.y.numpy() * h / 255.0 + y0 + pts_xy = zip(x, y) + if self.densepose_data_to_value_fn is None: + image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy) + else: + v = self.densepose_data_to_value_fn(densepose_data) + img_colors_bgr = cv2.applyColorMap(v, self.cmap) + colors_bgr = [ + [int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr + ] + image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy, colors_bgr) + return image_bgr + + +def _densepose_data_u_for_cmap(densepose_data): + u = np.clip(densepose_data.u.numpy(), 0, 1) * 255.0 + return u.astype(np.uint8) + + +def _densepose_data_v_for_cmap(densepose_data): + v = np.clip(densepose_data.v.numpy(), 0, 1) * 255.0 + return v.astype(np.uint8) + + +def _densepose_data_i_for_cmap(densepose_data): + i = ( + np.clip(densepose_data.i.numpy(), 0.0, DensePoseDataRelative.N_PART_LABELS) + * 255.0 + / DensePoseDataRelative.N_PART_LABELS + ) + return i.astype(np.uint8) + + +class DensePoseDataPointsUVisualizer(DensePoseDataPointsVisualizer): + def __init__(self): + super(DensePoseDataPointsUVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_u_for_cmap + ) + + +class DensePoseDataPointsVVisualizer(DensePoseDataPointsVisualizer): + def __init__(self): + super(DensePoseDataPointsVVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_v_for_cmap + ) + + +class DensePoseDataPointsIVisualizer(DensePoseDataPointsVisualizer): + def __init__(self): + super(DensePoseDataPointsIVisualizer, self).__init__( + densepose_data_to_value_fn=_densepose_data_i_for_cmap + ) diff --git a/projects/DensePose/densepose/vis/extractor.py b/projects/DensePose/densepose/vis/extractor.py new file mode 100644 index 0000000000..b715a4451e --- /dev/null +++ b/projects/DensePose/densepose/vis/extractor.py @@ -0,0 +1,152 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +from typing import Sequence +import torch + +from detectron2.layers.nms import batched_nms +from detectron2.structures.instances import Instances + +from densepose.vis.bounding_box import BoundingBoxVisualizer, ScoredBoundingBoxVisualizer +from densepose.vis.densepose import DensePoseResultsVisualizer + +from .base import CompoundVisualizer + +Scores = Sequence[float] + + +def extract_scores_from_instances(instances: Instances, select=None): + if instances.has("scores"): + return instances.scores if select is None else instances.scores[select] + return None + + +def extract_boxes_xywh_from_instances(instances: Instances, select=None): + if instances.has("pred_boxes"): + boxes_xywh = instances.pred_boxes.tensor.clone() + boxes_xywh[:, 2] -= boxes_xywh[:, 0] + boxes_xywh[:, 3] -= boxes_xywh[:, 1] + return boxes_xywh if select is None else boxes_xywh[select] + return None + + +def create_extractor(visualizer: object): + """ + Create an extractor for the provided visualizer + """ + if isinstance(visualizer, CompoundVisualizer): + extractors = [create_extractor(v) for v in visualizer.visualizers] + return CompoundExtractor(extractors) + elif isinstance(visualizer, DensePoseResultsVisualizer): + return DensePoseResultExtractor() + elif isinstance(visualizer, ScoredBoundingBoxVisualizer): + return CompoundExtractor([extract_boxes_xywh_from_instances, extract_scores_from_instances]) + elif isinstance(visualizer, BoundingBoxVisualizer): + return extract_boxes_xywh_from_instances + else: + logger = logging.getLogger(__name__) + logger.error(f"Could not create extractor for {visualizer}") + return None + + +class BoundingBoxExtractor(object): + """ + Extracts bounding boxes from instances + """ + + def __call__(self, instances: Instances): + boxes_xywh = extract_boxes_xywh_from_instances(instances) + return boxes_xywh + + +class ScoredBoundingBoxExtractor(object): + """ + Extracts bounding boxes from instances + """ + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if (scores is None) or (boxes_xywh is None): + return (boxes_xywh, scores) + if select is not None: + scores = scores[select] + boxes_xywh = boxes_xywh[select] + return (boxes_xywh, scores) + + +class DensePoseResultExtractor(object): + """ + Extracts DensePose result from instances + """ + + def __call__(self, instances: Instances, select=None): + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if instances.has("pred_densepose") and (boxes_xywh is not None): + dpout = instances.pred_densepose + if select is not None: + dpout = dpout[select] + boxes_xywh = boxes_xywh[select] + return dpout.to_result(boxes_xywh) + else: + return None + + +class CompoundExtractor(object): + """ + Extracts data for CompoundVisualizer + """ + + def __init__(self, extractors): + self.extractors = extractors + + def __call__(self, instances: Instances, select=None): + datas = [] + for extractor in self.extractors: + data = extractor(instances, select) + datas.append(data) + return datas + + +class NmsFilteredExtractor(object): + """ + Extracts data in the format accepted by NmsFilteredVisualizer + """ + + def __init__(self, extractor, iou_threshold): + self.extractor = extractor + self.iou_threshold = iou_threshold + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + boxes_xywh = extract_boxes_xywh_from_instances(instances) + if boxes_xywh is None: + return None + select_local_idx = batched_nms( + boxes_xywh, + scores, + torch.zeros(len(scores), dtype=torch.int32), + iou_threshold=self.iou_threshold, + ).squeeze() + select_local = torch.zeros(len(boxes_xywh), dtype=torch.bool, device=boxes_xywh.device) + select_local[select_local_idx] = True + select = select_local if select is None else (select & select_local) + return self.extractor(instances, select=select) + + +class ScoreThresholdedExtractor(object): + """ + Extracts data in the format accepted by ScoreThresholdedVisualizer + """ + + def __init__(self, extractor, min_score): + self.extractor = extractor + self.min_score = min_score + + def __call__(self, instances: Instances, select=None): + scores = extract_scores_from_instances(instances) + if scores is None: + return None + select_local = scores > self.min_score + select = select_local if select is None else (select & select_local) + data = self.extractor(instances, select=select) + return data diff --git a/projects/DensePose/doc/TOOL_APPLY_NET.md b/projects/DensePose/doc/TOOL_APPLY_NET.md new file mode 100644 index 0000000000..5c38c7c3fc --- /dev/null +++ b/projects/DensePose/doc/TOOL_APPLY_NET.md @@ -0,0 +1,93 @@ + +# Apply Net + +`apply_net` is a tool to print or visualize DensePose results on a set of images. +It has two modes: `dump` to save DensePose model results to a pickle file +and `show` to visualize them on images. + +## Dump Mode + +The general command form is: +```bash +python apply_net.py dump [-h] [-v] [--output ] +``` + +There are three mandatory arguments: + - ``, configuration file for a given model; + - ``, model file with trained parameters + - ``, input image file name, pattern or folder + +One can additionally provide `--output` argument to define the output file name, +which defaults to `output.pkl`. + + +Examples: + +1. Dump results of a DensePose model with ResNet-50 FPN backbone for images + in a folder `images` to file `dump.pkl`: +```bash +python apply_net.py dump configs/densepose_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl images --output dump.pkl -v +``` + +2. Dump results of a DensePose model with ResNet-50 FPN backbone for images + with file name matching a pattern `image*.jpg` to file `results.pkl`: +```bash +python apply_net.py dump configs/densepose_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl "image*.jpg" --output results.pkl -v +``` + + +## Visualization Mode + +The general command form is: +```bash +python apply_net.py show [-h] [-v] [--min_score ] [--nms_thresh ] [--output ] +``` + +There are four mandatory arguments: + - ``, configuration file for a given model; + - ``, model file with trained parameters + - ``, input image file name, pattern or folder + - ``, visualizations specifier; currently available visualizations are: + * `bbox` - bounding boxes of detected persons; + * `dp_segm` - segmentation masks for detected persons; + * `dp_u` - each body part is colored according to the estimated values of the + U coordinate in part parametrization; + * `dp_v` - each body part is colored according to the estimated values of the + V coordinate in part parametrization; + * `dp_contour` - plots contours with color-coded U and V coordinates + + +One can additionally provide the following optional arguments: + - `--min_score` to only show detections with sufficient scores that are not lower than provided value + - `--nms_thresh` to additionally apply non-maximum suppression to detections at a given threshold + - `--output` to define visualization file name template, which defaults to `output.png`. + To distinguish output file names for different images, the tool appends 1-based entry index, + e.g. output.0001.png, output.0002.png, etc... + + +The following examples show how to output results of a DensePose model +with ResNet-50 FPN backbone using different visualizations for image `image.jpg`: + +1. Show bounding box and segmentation: +```bash +python apply_net.py show configs/densepose_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl image.jpg bbox,dp_segm -v +``` +![Bounding Box + Segmentation Visualization](images/res_bbox_dp_segm.png) + +2. Show bounding box and estimated U coordinates for body parts: +```bash +python apply_net.py show configs/densepose_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl image.jpg bbox,dp_u -v +``` +![Bounding Box + U Coordinate Visualization](images/res_bbox_dp_u.png) + +3. Show bounding box and estimated V coordinates for body parts: +```bash +python apply_net.py show configs/densepose_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl image.jpg bbox,dp_v -v +``` +![Bounding Box + V Coordinate Visualization](images/res_bbox_dp_v.png) + +4. Show bounding box and estimated U and V coordinates via contour plots: +```bash +python apply_net.py show configs/densepose_R_50_FPN_s1x.yaml DensePose_ResNet50_FPN_s1x-e2e.pkl image.jpg dp_contour,bbox -v +``` +![Bounding Box + Contour Visualization](images/res_bbox_dp_contour.png) diff --git a/projects/DensePose/doc/TOOL_QUERY_DB.md b/projects/DensePose/doc/TOOL_QUERY_DB.md new file mode 100644 index 0000000000..0003c3b4bf --- /dev/null +++ b/projects/DensePose/doc/TOOL_QUERY_DB.md @@ -0,0 +1,105 @@ + +# Query Dataset + +`query_db` is a tool to print or visualize DensePose data from a dataset. +It has two modes: `print` and `show` to output dataset entries to standard +output or to visualize them on images. + +## Print Mode + +The general command form is: +```bash +python query_db.py print [-h] [-v] [--max-entries N] +``` + +There are two mandatory arguments: + - ``, DensePose dataset specification, from which to select + the entries (e.g. `densepose_coco_2014_train`). + - ``, dataset entry selector which can be a single specification, + or a comma-separated list of specifications of the form + `field[:type]=value` for exact match with the value + or `field[:type]=min-max` for a range of values + +One can additionally limit the maximum number of entries to output +by providing `--max-entries` argument. + +Examples: + +1. Output at most 10 first entries from the `densepose_coco_2014_train` dataset: +```bash +python query_db.py print densepose_coco_2014_train \* --max-entries 10 -v +``` + +2. Output all entries with `file_name` equal to `COCO_train2014_000000000036.jpg`: +```bash +python query_db.py print densepose_coco_2014_train file_name=COCO_train2014_000000000036.jpg -v +``` + +3. Output all entries with `image_id` between 36 and 156: +```bash +python query_db.py print densepose_coco_2014_train image_id:int=36-156 -v +``` + +## Visualization Mode + +The general command form is: +```bash +python query_db.py show [-h] [-v] [--max-entries N] [--output ] +``` + +There are three mandatory arguments: + - ``, DensePose dataset specification, from which to select + the entries (e.g. `densepose_coco_2014_train`). + - ``, dataset entry selector which can be a single specification, + or a comma-separated list of specifications of the form + `field[:type]=value` for exact match with the value + or `field[:type]=min-max` for a range of values + - ``, visualizations specifier; currently available visualizations are: + * `bbox` - bounding boxes of annotated persons; + * `dp_i` - annotated points colored according to the containing part; + * `dp_pts` - annotated points in green color; + * `dp_segm` - segmentation masks for annotated persons; + * `dp_u` - annotated points colored according to their U coordinate in part parametrization; + * `dp_v` - annotated points colored according to their V coordinate in part parametrization; + +One can additionally provide one of the two optional arguments: + - `--max_entries` to limit the maximum number of entries to visualize + - `--output` to provide visualization file name template, which defaults + to `output.png`. To distinguish file names for different dataset + entries, the tool appends 1-based entry index to the output file name, + e.g. output.0001.png, output.0002.png, etc. + +The following examples show how to output different visualizations for image with `id = 322` +from `densepose_coco_2014_train` dataset: + +1. Show bounding box and segmentation: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_segm -v +``` +![Bounding Box + Segmentation Visualization](images/vis_bbox_dp_segm.png) + +2. Show bounding box and points colored according to the containing part: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_i -v +``` +![Bounding Box + Point Label Visualization](images/vis_bbox_dp_i.png) + +3. Show bounding box and annotated points in green color: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_segm -v +``` +![Bounding Box + Point Visualization](images/vis_bbox_dp_pts.png) + +4. Show bounding box and annotated points colored according to their U coordinate in part parametrization: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_u -v +``` +![Bounding Box + Point U Visualization](images/vis_bbox_dp_u.png) + +5. Show bounding box and annotated points colored according to their V coordinate in part parametrization: +```bash +python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_v -v +``` +![Bounding Box + Point V Visualization](images/vis_bbox_dp_v.png) + + diff --git a/projects/DensePose/doc/images/res_bbox_dp_contour.jpg b/projects/DensePose/doc/images/res_bbox_dp_contour.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f0c195c237d8ca70e16f5827b8a3b6e456844a4 GIT binary patch literal 91492 zcmb4JWl$VlupQi;;J!GETae)H5-h<9OK^8v+--4pf`uT#U4kwW+#zUy1SbT4`F^~r z_xJTwO-)VLy{D&prtY27=Vke28^BjlP*wmC5D);Re+Tfg2FL`KZ4RA-xj)bH|r5+zI*gzC9ZVh7HfRQlj zvoSx&yXmI^c0&edv(c7rur!nyCgMMg9pAmeqAd7yXKA&tGc$xQ{u&~J`ys*tLNRmf z@N|~mN5aOyB?!u@%x69e0DPjlfZ;KcRq*mZmdmj9?6}O`cjK1X<)x(c{=}?02A;+MOsnK_48ns0wmw{%KN2+?^oniksa%429 z4e?SncZ{F%vy>i=jQs6mo7i>~ZwgG7QMy#TlYaSK^1E)~!}a@kH+S~YnK5}9SmaFu zUD&BG%?QG{+)C|uMLJG-R?3g!ld};rz%erlJk&u>0M+Wd`l!f9Nba|vMc_3l=RlPOCH#3^x0%jrx-W-DRQOB>BIUkc8V@i($>k+~& z$}P3I-N>ao$oxTMIt?VtM?hHb7E%~uZG9H!;lnu0cM<`^jA6%;vi;fdS|_g z68q`k>9Y{ENa6(Z^ms<1GRj%yt%pCi-%2MTi0zZH5WAvgQ(FA3hc*0_vJW_N%iq-k zz9ilC5dcRkE7+(q<>`Zn+=Gh#PeFsIQ+v~z*=9`itgtV?6<}rgpxLiy=8L3>#ChTo z%Ee_o67rQ7XZIEvaFinjA(R3(a>GcvuO8^gl%2x8>EhfZ-goO5_XVgR3uDXi!i*uLB$a z&ubdI2=sp#`y}Ufx2>fYkd&4q zafeh^yT7JQ0cH66ta!{@n(4FS!ZcTQ?6NvqL6tOKv*9vey~Hn)m9=S&q_H^Z4-E_) z?N9MD4n>7~yNGy;MlZj@y3$&vRs@Jxvwax~$UZ3WjBOl&*<*7+Q!SDyL*)E-KJN@K z=M<@8u6QhyKFm-$Ac$L0@~7D3P1K4tRDbU8FzLE|!WDP{ru_=kxHYHi;R+4eT5rPY zQQp4*>M(mipQapH7llqy&FYN7o-(p2;7&+@?=Cu(Ic5Kf+?6qQM7wA-Af};HU$ojf z$HrGj(CIE`_IT`bH?*eChfG49UzNj}=5>zpD{#^`eZYaGb7X~pGY}5xkIhb+u9-Q zk?!u@b6cGc)vv3ecK!J#F%Us{e1O4y0v@+GAebr$u7<$H8z#sZ1#&bpBza6FeS&P} zs6tswCBt*vj#%RQl!{Q{+>4WUn7i2f_h8sQOzkUsKb2??%tTvd`F;h*^l;w%(qk!o zlMf4hUBHg)yNu7xUSSJR_nu4kBs|JmXh`Vstl<*P^~Ch=_k^ps9GD`^!^Uu z@Y5&ek=`Yuv<#9&5p!>T?vQk}XqFl?juwI0KnF?U(O5ln7=I--BVwO>YqOd7Zzz9h z%sY!Mg;kAGa}-P8&fDuj=C{gdaRve1KM9Qll)vDwc1b>^To0}!{&}tcHcg#kAyQo` z|5kX?vG=vEWQshDnpa$kUAtSGRVaWZ<%&QtxDIfVs3LaU5HO!v94GZ8_NM95$8EfR z9CGbC&l=7jVEddTIGK8H#ngBq!$^Pv(>R?!rho71@CJ8xE02~VFNc@LtWxGu0Ry?A z1h0Lk9yPB$mAJYuy1GuDLYdV3m_{zCIS*k^c_V2@4ZLO_pR3T&jf-=4VVJk22}6TV z3+YJH7;4jPE^0(R4Y>mOIf92VYi+T$cbZ1*sP%oM) zGm+sGXyk1oEj{ktD6X#=mh&~D5qgB!TD>i!h6^E}xd25yy%k7!TF1@wzI3!vQSWJ_ zbUubQd$+cL+tcA-sS-JNWaX__Hij=i1Wv)fuRHsO3?P!FLMdd)e3Wx-;t(=j=>`{< zym1-7d0jSstK`3N^!AH4?`jvPwV^-tx!~A9Bm6ErP254F?StT_T-{0iO+!=T7&y%z zkzF`0X3cQo+FkDY)x9~o(`PE3alX35F%JqljATs4PhS?#t%RJbl@b=*zkiLDLgQt% z`lGA&mM87*^^OIpPkkFe?SP*;ws3O23y^a#?WQQJ3e^MzU-F=KDhe6tn^W%~=eu;Y z>Hhracspn%nGN65N1ES1FJ%kKE9FaaeHS^9_Mmqb-wl|{5mXF+t*8$tW=Lu>BfVTjpT0KTYV^>#1s|Tb88AT~@d3GpLWXqNc_jTu z2K<@kRbETx!d1CYs;ryj8bV6>^c)XgT0Nt}PT~;JZ!=dY0<*qDOW|PhhdIA6G_h0f zg9~!10xtu;aC!8!S%FRk$?MCJqqxLNI_$f=o@8&)tYu+OS>$YYM}lNo@%mrg*X-2z z1qdKdNn_w}TU1S>``Lz9Jbi};n$vsh6!Zy2>49XH>}o@}vdGVA7!rkFIMn)(h@dRnMj1*B=xJ7 z467eBMK~jw`>`w)eCTP8UH+;7e*T~&S8AE)0`teB8C<5cG~qmH+0n_5@c=Z*g2C7r$&BT zqC4#&p-RYx()p&GBw(0(zFkUJBE#aUSPJHo7`(h4_?>R(r#u+TUd* z+{j=5+oIdLbl}*Ng#Z`2=&xqYop3Xx5u^gf^goti|FN=ORK)rD(Mfnc-kpPT1Y0GA zLs_5$w>MK2^Gk#HcDL9!MwkeNby$~ZE9EQN@pEB#mWdct37d>y&HoCiz#jjx>o(PJ;%k=2zR4TL6D%N}@G`3?gsQdq2{{$Y9(Q)go1v1Z9wvqZFYq z8idjnsz}&LZnAO1^#U9>qQu=uW{B{kPDp+2qxVeCDE0R1H3sH6GxV+WcA5F6@Ts14}p%~+n9z;4nAdv}7h0eX`L zK#P`Ax^Mdk#J;)>-x*?OD=p{=0xCi|UQoH4a8gty`K zI^l*EFTjzh2v=GzFNr@h5npLPQ!;0e^+D^9#+5ZoE^A}wuvEJ90o_8N3dXV~@9cwgfIV|?vJ(n>@VD!mh#|9=`4aJG3CVA=0OQdF@W2I|4XduouMtT$9 zZT1SI^4Nty{79Fsp3THBXC#{nLOT=+A-T<#Yl+9_tQnE6I%np6J22eNuu2`IlK=Pc zIj|x3i_Yrn^OcXwsS53V$S$vRtlgc}$|xApOo9v-;gFOCNKANbb@_^2juh+0CC7B` zFahp`9btlXEWR)4{;9ASw!=Oofgz4JFP`jxTLtBBb;K6ClHd`P-LJwi_O>!56Y>Rh z;4jVNRp@K_omLN9e4jRK)IO{mDq8h=5vIh(S@>XQHhIRk5aYNTWPHV9G;CSy1Tl3O zB%p~J6KsS8m85SaL-eaKW1_ML9dId&%T>+~A$YeEqu+lYAm~(pfk9ndtYT-fR;@i- zRsH*L!hZ|+YHpn`WzsYN^O>o)p6M5tV!iOGx#}Y#pA+~(hV~~QL_RA)&Og1>UQK}ZAj$VG5od}K72W^Ry^(O-)sZR=3dN?7w%aTW9iwJp5V## zNgQ23%9`#j3Ge7Eb|NoPpWJXoRk>`OET8@J4kqGl9Kc zqPU-QBo1dkp1%p>a)RFQ=eyhm54@$oX)4F?4!%9my9Q381PLoT14#3nBa*AC_RFWWuY6M7$*09GEV@#XOrx}-43pVek6(?fosfl zhR%}4DZqq5T5D_ej}7&VQ$Y-hlfzU$Ps=cIs{z}l(PGP|GxGN3T<*M_kE9)3jQspWV*s>+sj&d0NVjz(?m4zZff;O9G^%*rl> z-n!9Af*i|OCFd@0hpHhuhD*9!rIk6%5yF|vXH&p;k=CQ^=&N?ciAk6`w zaYpK}Hm~cdavb8bD?(!C8##|;yd@UG`SwBS^v673J)YTiOroOHkLo@?uDSa!B5g_7 zytc44%4$hSRi5jUOeQ^p3i;XDm>YkctMc@)sN+#?p+oA~G^?Y^0dLxA-RVz&#$2mM z=(P-m=^6wdI$fv!>M{uhp?N#ZBvQZ2s;^+I1Q3O9?{{@8v)n`+`OyLfBAFHAbEA>U zj;RP0fJS~Bv9igbI7vS%g6cU!lwYOum_6V;w8{@njuqk+E+P{n<__De6_hSsLat@U z7yG3S>qH^zh#Dn~5X3aWW4#huYI@vq2eEHO9LVF~mTJe9&r4AC@$W z_FEC~u(jfE&$)zJ=7Za+9rYx1 zP!sM+J-Y7Teg5wc)Ou1iP5FwGHEo1);stOC0WGgDRkAp9Hpowv-O;z}Gj8NB^sVRD z;uT*F>)Vu#LqvSSsCmdUBEEPXRM$n-i#h*}SQ4F~>eUpnqRJU)YRX-I9Gp27>cbvm ziw$}#i({Nm|RG{B*MHFaJr?HGrc7wm_fGK6J! zWmWxWEf}=NTTG*_=_&l9+?{nkgzT*Ov`gd(WjJ;%HuY}Lrp zQVB?(#qHBtZJ{#9?dQIGSE^VLs~}rmmc&i3e+xudBm3!@2Z#S$$qqLm`dXA>PKf<% zcHW4vmowi%QRl3DLx!hZV7wJRLZ$sVGd(dJ`MzP8mp*2DgQ^S~Q)XyPy+VDb z(<}AHw~yMAWn2Z>_cT8%dg0_X#S;ynqp~yq z=d53W!DXHjp)qnm@j>fbK5cm!6}YSt59=zR$;Z5`5n$Sv=#SyJ zi|0koPn*IUgjmd+s*Ge$NHvTn99D~+uA9+)6zm^)pHx=n$he_cjrN6Fk*i5DRNj{I z!Q$tYubgKbD0O_w@Gpa{QFhFwZvVYq7Z5O|`zG0E(Wk^CB|AAye-Vw*;DO*~fc!B{ z?IdsPV{Gqfgn3_UCEC!N{)V%txw<{~oYd+d{!indH2wO<<9=PFZXz7_rN)cD00uB2 zG%%k-Ob6@&w5n3BluV7W#PzuxvWJ9{jj0R9%&}JyrXYW z>VSmQ%#HRmEO7ACt&{}s%RJkc3B(tG3r@T8ji5)0o#WI6(f&amr^zOzd?zP+S~Y2vzjntVFtfBp4VKx zgn=+K9lSc`fC4jqf!~>iR%%@esNw zsm8tFhL;iHc5#$39Q(GUT0%$o-fRx|5X5Ye`AD8275}soIuPcUbQB}vi!GfaB$}y{ zZvN5#N7c6B*KGX)-m;7fgP!Ls?bMDpbCt+~Q1CI$NNgY4(OqiVq-@rbL&N}Kd5B=k zW@adaDc*W2mUY&)8K?3ug78TXKoCkO#qQND7V`TnOr>6Ajy5_{tl=V{D<~@10U@Br zXMa$hu|qGSC8SC|cJ=Y(W{|uqPdPJ*uZc%=1%ryf!2E&eu|Cc^c9@(&Y>pUoNJc=3 z%$r6Z4f10-+V_MCxPxa!Z9Ylqc;aqsg+nQoiG%&-9f$vV8Nn2&Wy|?yDu_`Ua+^pbX8$J6vGe>Qr>+=LX zNLr%o)Oju>wkM*DQIWyfc}#F>DP-Jey$SgJxi-m4*1@E61AF82Te=jO>!h#(h~fkE zY*VLNkxt@Xdrv65!t`diNm8JIN@^0&>hO}x1kLa?Z-iL&#(K&p|5g#a5niu%RX|FZ`0}>1`t@lI0@|&(lVGl&-4yM4~iVV&RXY_ zX#B*NIUkb_L;BfQW7dO273avDVWKTO?v3PFbJK+!-eN!?iQo6Yf(3QA+?f zp$gL@@i&V^2#RN{y!&PT-=+p}?OrfHacYTtI`oyJDU;`&H6!Z7z2#-sa0M0?y8ep& zA@ak8sbOuJs(0<^MJ^a8={?P3km+CGNtXT2_XqX4Ej1Y>f=BMn_7BBL{<`qDZX%x} zHZGnn;^@u@nA)|@iwaFlZ8p#6UF$6L`-S5C^)4gK0;!}x8f>hz2XroRi5+s z9*;+yp}kKuSZnNq0tAC^6s9Qt&a4BBh~qa(JJ|AE>VQztps;d8S$#}2MB3q#cqSEM zcC;MOJjIa*lg&^YmJg5Qn-;U9h^9is%b+>VhIUJFz2{1;-rt83i%I&Dzejr#5+Zh{ zc5;4>AYL)tKYKNZfnTK)FGBgR@wSZv?lp)BAUmY*%RWS|s9+4I<<(03nRoMZt`3U-mVcHr>&eR+`BA9y?@% zoR8LLmn|O)Hz=x#Fri zt@Qi8%yX|>@pCTHk7F3!Jbocujcq&#k<_l4o}+jpV?56eiy?7nF-HNJ|0<%O?;0K~ z6z#)0tcCdJGaH0skjIgH2AOkMf}skWG|qw<5q!TodLg(l1e^~rysYJe%Q2t!D+``g%+mxICr4C>hBGi;YDV>YsW zTl7Q$L}ep1CVAHWH-t_vKof_wG~!74r3;);F%J=Y=;_Yd{yqId1cBmh-Iw2n6F)bk z70jbk|A<7Vqtms7)&l-7fD-EYpyu3_E|)QCQY8r_W@p9ZG%ZO1l=N@MZ;nbAav69z zEVmK>kDV>15tt3&dXq7Fy?1BczBdUWhZM4ha;iu((lgpND($y+_r_s;=#=J?QaQ?= zD4NXLh4aMQ$A(ZuTv)#V9fM;F9Uh?b-vX%$jm%|ORSF||!l~LIXOQtC&|-=Rv!75W zZ%Yd3X#VVEdv=%fXT?|XA8=BzLA4^}7#UgejjD3qBC7xkU;SY?J!lJT90J~BjZyZ4eIpfjMFuw|Sn zJFiqx+==-FR|q%0p;0Tam4u7V?z?ks{jp#+TK4-GqfGm5IKYMKYXX;vSStORa#77M z1tC#qyDJk8I=4V>P7>`3#xVg`Zl6V=b$6)Pq4KX1h$U9a*bQzzM;1l6wALRW-da5U*j(m(dGUQJs`X zmOhBe+@j3S{;laCCdR7~v!r7BbsyBHZBTD-| zR^m&F7aJkON)NuD#mzH{ma?U~anT}vCX)YgDE7|ukL8xQS(8g=94iXXZo<<=O~LV# zf2AjU$q>#kwk5&>{+d4qvZe{5pf$#xNG}ADspX1iUvue}uTI`u7#CQ9QnJPg#x@XXD7UoN0~3LR_eBSL=KMsm>B!5-P9ynH zk}VU-0I$T%<*p)(Z1};41SG9Yo(aN+I{zlQr}149+~S2YnRb!VafRV3g11j&7rvID zqbKDGg)oU!WiRb3;rrIPCBRFfb7d;#J0vlvI4$6(o4~~1Kap=K91zI}#VcXbUvkM- z3Sw58lj%m)>iMe3qd)i@XzrL`t2EGfO5F)(fCLviy2#Bgji-zJ*XU5mM$mFg<=<4W>`FSQAGQF%UBb49}c? z8XsP!dc)e3&N}?-shhXQom`~IxU0H$9BEi%p>j*2_4xG&1G~E1s+9d&y0d1m8AGuOx8fapS$nmNYc;X=SwN84QGVB2(tESD}T7BY~zf*h*UP*8zvR-m4eKLq( z99Kn0%;cridWS5$e;3q-BpOt=u}ciGls8(9np;9lsvJM;Hd-+Jg&<+Iv_+H~u__ci zScn&8wpO@AZt`dm{b4@*FZACl;xO!FVx!;KXT*g@^W3I)-C$LWa>m;0qKD*Z8SOtViWMVFyr^Ni4z~p79*@6>OBK7;A2JZ z%^~1)t=4J%DIok#`Qh3tCR1Cd(NM4fz(gh(>!dR8HsBYa-fQn_-646C&Fsc#a-!09 z`2wgOkML!$uMaY3XXt}}l6H{_h0|4ny*|h2I4i9s5>4N{UgP{LJ_8ryWQ_eu9L~_! zF!gZnJOz+IkdJjbJRWx^z&1(~6J)r@BCH0sxa(R@Ld&~6$t|rr-H!&rV<)>q%(3Q9Z8P#^XAZx6T>TBa=2;7EnSXtaYN_WzQe{s1Ve?Xu(9nCI~xHFL5eg01ZiFL1GLNUH<$6!t$;2 z`ihiJN*m`#V(6P?3#VMof?^<)-}VPMQxwP@>=bqQ6M& zI0*N?hoB*kRf4ASkUkkS7vG^OnVDwa|`IYdG>P0=gb4m@(75 zhkF`5I!|HzSXo-7r?+eCB9z$)5lA(5>zQswGB!t=CULAlC^6r|(cTKLegTYMeO%7!vS+2J&b1R8ftV1%imN!`$!mV zdQ8fch#=QIN-KYw=QwNmR}($H6x*56QT@7=#PXV6ONF(a?W2#y6Z2l*pnJ!Z32k%l zcezB$Bhj)m_E?l~F&3mf@qz+gfGAWvT8|o`&kWO01aq99?!$$1Gut?7^CBXblcEIr zKzz_{veJWtWWB3TQ~O(4@I;pB#XObFR5aZMadDE*tVJSF2JQ8Xc(}Wnkaqg`p)} zHTw+?ziYJn6<1WIOJkMKl50n(Mn;{&vn&8PbjK78*YW7qVqY9uQD8^nzR%RQ`%^qc ze9RAV*X6JgcZvqByOz0d%)#%rH>AyzgvyWn7u%_gshRr+IQYq`hU=Au{jYj2B?|zi z)*n0bCl=A{3J(`SZ4P~=;6cT#GYSE{*-~?`!y7LksBPLvi;v*B^oeI!-;0AogtH9v zYBvhlI^aPwAH4w&?r#S=kk!Z(v101AfL2G4?|zWR@n(5P62_C+EdImnQmqG;U_~=vA@FmZ>{HKkCp!P_F$x)rjJhA%?^YfHGSF#@IIf#ekc)%p->8o z3XIHX=tZ1*RwT@rWwo#T87W&06%yJ`Xgkb9aA&}7D!7l$e@ErCB$e`q_|31ocpVU) z+dGE0qteT-7(%*74x=;Ge$A4rhmx6b?tAX{X0X;PB`_Wv-d3c1o%SSjz%MOAEoCvIKJhDh2OYr~lx#dA)Pvvv&BH;7;3(me z!yZlLS0Gvs_2h?pNRXUQc+AF7_i^Lf7UnVvRvX@e%8uA0^A4R+YaLqFlPB0)ZFe?w3*;?{xKvbW!76SfY+uERX%s6 zHS~MBZ*u5p-?6}E>j7_ZUMTuQTWOjGS~Pn?S*ho#I4GFce>YpBlxY72z{yEV+);!H zSXd8I($yct5%gC5>BF!VJ@MG^HM(q2&VW6P#?JW>wjTPY92(kOY%qG$v0~!e%)?QQ0oR+xqF#;)O4n^}vBUc@vgGe~P7O&hQ%RIaK3uchR5w ze5=GO$Kz;2r0LGfd#1d@eY*FnO3m+wtpJA`+;3jaKbG`@vR6+(r-@pgl^R_&9b%R8 z67#JvuG)jFMu}}<6;;*-5*;L>{tJLt51+SW%@d?io|UHQU85~VrmTDg)$+32JHjo`Y!j5 zu#Fh^NnXRTnMVqmVbwB-3c2fDx~FDsltw3SMJZ8tfu$eve6Hzt(e65sL9uh z;@-@a)eCZ25TmU>HWxuXnT}Gah17=NuYmHBfp0uCa!WLOjv+tS>-S1FEjC6Mb`wHh zfYWz>zQIRWS3#%Q${XS438tIRe}^Bf!ci~9s=KihPD+~Y%#*No_C<441?lEjF|H)L;0-n5$d9Z8>IWIl(an4N(C*Rp{fX$;*q zHc65@SR7z2&?+Hi=Bbeqi>UVzoT7*oagUP~&dfq2M)3p#I~lck5RmoWM6bvD9&ABE z;N6vlY4e|rgfq>7>7p+&5)D?H%H% zMg=yv!uks^%+EaWF+Rr9lWq-P=&^mo)?s~P+vo!XZRnFc-#<5czeWtq?F;0_O=&mS zWcV-ACkZug3z)UDm^jWkddXriWH4du=FbRZ@+>Xb6f0CXv7z#X@a=9H^wRt=1yiLz5S%93++dYgcjiR+t8|1X+GIL{I4$5{ z2A2`7MQh`k6iQPQqp#ml7E^kj30)BHT4clt&_`+L-bmK-s!oDkMGzWtkZ+T(fR z!=bo!Kw?zvA;T6*PmH_v0*G@Er;6t&nxoTJWbOT4d^!}N~XLPd6vT6evr zG|!h|D_NLxKaVbt&$J=#D-C$9&Bh~_p3JW_sho~XU)Jzmjp z9R$6}rQ5S+LCLOm+OoSgx$*j9$C-7>!Xg+)R^{&9})q9wKYQ4 zX=yTJzMWWK6e3Ze*j2{S?J|gm@wXq{W$}Yiz`U(Kd(UU#Q2Qm6pN#^(L7!-M=Ix6=W z1rT})CG7_;NAIB7A>;)vA>|S0MM;I{Qle zGMjSI7(I#`tm?VLy`Wvdj3Kn5$DEeFlC}+P^atq}_@AoE_eKHuxVP6>0QS41tpFkvf(puw z+F0h71-Q?m$(14Ux$SoD#j)kZlE3%ww@&x3t>{-6M{;j}AMN1Ny*VO;3GP!QDiseSkFFMHld$X>se6^n zKE?+n9?_>9kD^O-rU>PikYSsp2MtoI$>OWn6)il+VFg8KW}xNTy4}6uc-2(7R0+sm zDjwZ$qudAR$mt3dlPPSXxF~T3m`~;?hvV;S*~#mSj~tk_OUo=?M= z4Qw4QkQIaJ+vE@{H=|wDeTAUGFFPVay%Em<8ykM#n1eHw)n*P3yh10BeMCTDtC|RHV;L zWte1)EsTVtFJm?1_)%(}N9w)+T~_?ua}4l_*S15>_~=ulq3T$l-weH=s)grFnUtlhUknsqtE*w!3(8Npvd2U%8wFM zS;;RJeP6^744%N2&rCFq#`?^7h7~mX0{ANQJ!pTUEVsd)M6}Hx3RhMf8plWn*AJwfJ~t0V{Upxw2o%|9a+Gh;EbF%}DSlQ#>|$WU;jiy{qf+*#tF8y`ye2vXh6or6elZ#8_7uxb z%sfybjJ*@@b?G6TW!nfyXgs?!<;w0$M6TL#wOftQrhIOj%8=!)CW zPc0LTsek`5a z>~=w*n?AUSI7nDYEOZq8DXG_A53k6S3ws}ot|2yiZbKiqP5q@tYYa2{{1fe?*)aH99`^~O;q(Cu2 zW)Jxcmm~q3{RMS8r2XE&yMDE~sVPhV{xR#%yu#vO!sss9cL+>M-=}UhnhwddEKzcI z3(Z$Wp-JW!QYW#qieT#}x-+O4`XsZL=Fn*OdV*y_Y@O0SF=ZkF7}TB84E|ii6PWlA z_qk==iio2CPH3FEnTE7qAJkKHzY7sRHK+DXr3w+Y#w0nj*WQyiN0Q-xqoVP|=jR#A zl)!}Rb#;$ZKmUfi0{*QdZ{u-7N)a~EX)AVWk}$It{HaI4&sf(kq3r10j>$U>^A)fc zz)Ct&zT4_?8ny=Bi5jym8ckr1i{7?u9z`Z$Xj#TaSQ32~M9aOoFDhili&b~VKZaeK zX<@5$hH(8VK_hcjA|xkK?kIW#Q|b15=4e!W$x{#sPI<)}aUG()@uY7v+c{6`j-p;` z4eh@7-ZGPWrDI<|Sa)P-cu+rT_Ul$IKQlTi^dlocMSGlo6gqdLMqim^AM>aq5@DzQ zYf9nOvg5=l4MIxf9h8`V#(QpdpP#k7$qa{xZb9z1G!Qgm%18Nz$XkTj&b3Jod&PE} zZt8N^)aH}>7x=E?JJUSkgc`C0D?x8LxT>4Qs&!U&zduiT;&y)LpF8lU1t*ACe}AT+ zIyF`6tU6CmWh%qak9@FWxz!{?nx_AG$`U=1E0>}f$tD0>WdHo$ar6))__le~xKLYd z_JFT!m>3@?(fd+7HQ=}5`fs#%6QA@89Ht=;Ot7=%rV-c#*|xr1EUA4mHpEdg zL@Y&RhHW>pv;+_gDy)8Ap{T_{?7!*djm!xu1{b!r6ZBPKe1TiMZUQHESS8!qqYnPQyI zU&2RyXRSAm(mUJLyg zrhK(#fAa#w<1%?C?$aAmFLOV*A3i=QFg5Zn$o(p+qT^Zib#g&+7Gtkr7bAH`ub!$9 z`A>xuCUM^CL$5TL52o5MMSHc{U(@OKF|}uF>%p=;Jt6h&Z+Fy+YwMdYl05+H)Ck>o z#_I6{s6Jhia|Nh7qRZ|~E&;-8+EsvubUCcW#)g&2J4B#SO)2o-5;JV|YEuA;<7bSlh9MpNo)v2O@CGt5wHaB2MFAz?`#18fzo-fboF z{Nq&Q5EK&6{dl_-=Ly>SsGS0uP~T=(F(&hB6#GOAg}@9KCfl3a{0( z_N%Xv(n8jod2U0p`lL*;{{ZiNg}F&^FuV}kx^88O&Xm-JUOm&%uEV&Xfl^=Eag1jA z=+m%3p38N@s96-TUUx6=ew09bY&5FUc;L=cpC}?ICv9Tc2T@r0BLDjoXB%0;4cuaj z+`w0BXO*qUNe(FAQY94)q+SnC9OSarHNIvsagieZ@B&~UQ`y_BvE^m+yN?u8OSAVd z;ynx`B{=-khw@ymQB9R76B7;ceWU#3A^U4+q<&tirJT(WfojoG#BJt7CWyvFtK-DV z;carTr0&r-3!f5b2CB}FmP7ZOo592vAO<~knmB(I*$f@o*?Z)F0OUX$zl+m$NM{YK={9Prh5+VX4&$fwK-qQ+Da*p3kC=D5h`;uc#aJ;j=UocZ5 zS~aU>W>=wHky>}>jkp+y^ApRNXUsD*#}lwD#fsXI$ES#GfEbuXBoK(R8l{PK>RM2I z$2?sQeK0*e_Fg!0urxA6L!nl(NfuOZ=dl2Jiu1zRRi&AR%N=S`dGr`|9IS&Y-8l%! zBQv+0VyjU6wip9Ew29qjUD^A<&Lno0GALUgFH8DX->w|YG&9JVxZ2r` z5Kt*DHza|#JJ+rGRs)#KW(ZjGk{KgnETQr|5&CWIfEHn?oy=fI8)+Od0tpqQkOx7AmHDRiy*M3SwsS5S#=X-N6kc67efBH9&<4(>vvQQm`hnS(zUOzub0?*;0)~U zdfB0Y3c_@{tL{y1TNg);Y^GJr}$*a=Gov;S3 zbMSF2fX=cRW4P5+QVtaReZ4*KnnNyPyU7l~ZCv7#2ZDWw@HV0d@RASn#}hYEvq-G& z#1Xv$Jw31kc^<1bPRJa#Pza@#S7(v8R*b0d*Rw6(xWzN4G@3Jrg(QiH4uggVZTTO3 zRJY<;Ch=S99uPrYyw4b}Vm!uME+Fq%#W`6BbOe)LSOIC~@7&8U1hM_q0=}M@S+X4`Vx2EP zK2CR-#?PuLBST<>Vy2jV3A)82Y2}2{J?RAbU2JO=$tJAiQOeq4GO2(4AnpgfZGbc+ zhb@`QOVw#3SCd6VZ@aM6#Cq*@fXCdJ<5kZ03aaGUB$67Iz5Qyj%J<{?K#$!y}t0$3d0aMR?_VmM&b)Ji#oHlGtz2*zelH$UQ6F)@v;iEu`ph~JN5Yuw_p<_4AB; zq5#vRkjPT*l7j(22Xr3;N9pHc?{+nish zl$GeZ$g3kW#>ZBj$3COW><^w6k||<$%w*2c+N^RRvZc2>`Oz2yNQ9SMN`=sP5HJC( z@N13yux#`hDV$4|kfgCip^yzbP#3r5^{-rKMx|xhr;S4+BMq2yRn72!EJV{yBL~gQ zBxB*Bx8neAMfsd7oXET}0@EvvcG^L$Bjx%L`fYf= zV9gwWMyw=J;RcA+{`kIK^G~fa5DhCzX~1B|_l5KCe!kcMJt|p$5QZglG7~^4U~9|i zjqH|JIXuO)L`K2X8Ra6Z?sg}OCm9(mtK~BhGiHuTMFso=$EvRAc~Bpo7|ORy&lb&R zdYA1aVWsD0QX1~S0No9%-tY&!d^YJ_TdFXK=6Zf)9|}rbpi;Y1u}V1izKQTVw%NFo zGd6KLbT3aFkj$tg+m7{xea=2fUyOZgqV(u8eJ?YaP>9a+qlje9{WBELU+G;wcu%PO zRFAchf0uTmUart)5kYu5#Es72l$Be&Hl*1%<$yl)=syekhh%diQQI8uTxM59uxIn@ zCR%l^*41ln2bjl^e~6#(_mIyteYDFQq$(rVnmH6w!9e+~PLpD{Ad0j=*WNrGA3^xfO@HcT((+Q7;?zWqMX& z_h}cXI`@HQnDv&{^pJGk?iaG8W4hNjXqGYTL$ChvVVanoh% zPoK?5`G{HB8&G3F+1r{vd->rB`rNS!2+$CxQpx*7Z+}($VOM!1K_*^W4PUba8Xja5 z$F<-C^`5KMv)S1*9Y;Mh<>CdS*QL;&-&~@Mf zIALB~Vslfgq;(-}M5)w4BkP5RU>DR7S-sR&*Z|ss_n^t8R@ZMaf&r>HyS&A_{OSh( z0DM=zl)#ZJF{K|7Q@P&-GeEg$Nrzmqy~wAE{www4Z)^b8V?CKiT~U-LYYasSs>DF| zN>~wLfnCL6c+Y%|oDJQ?SR;9AbNqnB7bPQvWz-kFz&nqBUOcb?Sr$d9fzz>3K$hQu z>x)KmL?mw0pb~N$!f?9y6H`6vG}bFn_xJi@V#}m*sC`YK*+Sl1?esP`_ZR`0!JGDR z^6kWe*XRx^Wr<*a#K_3{sX}fCA8a4KS#uG^E|Sv)Ko21Au_xd3z&n^;B1u*V#ey%F zAD#dvU1WPEK*eM;u(pfxJk9La%xB~g?1O7E_y z9oT0(H$s7z8N58Uem) z=D#zIHq44uiV%SK!Hn((pPIk;+iq~2^GPXLM8KYDMG$-u6L{ia!ONCv5^ZDXqhC~8+Ads#2TC3ZC;^6U%tHx&v zCf`sB?Mwoli6H*~E^q*#`A8^EpsyVb$>;UMgEN&%1E)j?P=9KrdEn^;2K(MQASqe6xrG;=0p#+oc2H4{FvOjw4pc3U)LN@`o`+58DFaz*0S>tw+1=nDhVL&d% z%|q%i>O+^#tcyHK3Ti^}e$CMhHoZS1e5_6SfH}qtF|1jR_Ax{jq)=Lqx2E5&8p~${ zF$FP57$4fOHd!n%(mjAD-;4onr_<9dmoFRz1&^6U*xYgm!55}Z`3~># z38yGCFEp+4i%3{)=69@DB9V0%%V!p3beFx>gpy(;cYs;gg233IL2fG-%)_UrK@4+6 zn@ICHj_t@RsxYVn1JCa8a0Y9nRSGH5{znRkv!5zVVA9VCJ(fE(>t_TPa&4*NhpCU#r4 zJ$tfJ!`Z%_+PNc=eAGkm@k-IJ;hIH0BAw_)Em~_)Bn|QH9fIk<7NhK}5&T)nb%*$- zvv!@kA2(I+8$AJhf+`e|uRv+L&f>@hODT7cl~xU1Nqk8AdQDwPTdDDP{Uh2J>vMEW zr$f)npN>-j%6=KfZM|2yVcuBU!i}(|N2K+P_IESW`hP{v^$f%?>6D=7FXhw9}2sL>Ahk8HXg{BwMi7GtDK0(H;^90F1cwSVZ;b$@TyWjhd6co=FjO zt2L+xk8h>~q>Hg%d~LU3yJYfc{{W=A&3@_p1fYHVI4oU-?TKysDb^I-5^^%}=f0Z5 zyixbp#SZs}_t(W3VPv3HSdXSOx^tC{Qgx(R0^Jj{8@y}fyAj&nf9YyWoWJGp&!;w;-8UhL3BNvgV>d2ZI8o#2@o10sWg16otMj@)af{}=9oorfX(Q~1 zTSaQ{{sK=fco@)&vfZxAx+`{9tG-#H-4`VxC`hAT9|^EOJUhi7eSA>wcz=C-QHwT9 zr{^-yK3-MG%%c05Nn~<7l26?y_*?VFD-t*It?wPfFWC;&&7662J(ue#5kDZw$~Ucu zA!`YB4#Re^J3d>o-DQ=WkiX^90zK>x(+=@R-(M6v-XGsz6k_G%C@T=9Qz*BT?Sy|NK1p2+p0Z~p-3CI0~JagRGxN=sdH>^(2fA8@G(o<^TS zN_A<~@<~39<5P76%t-@xBr=MVN!hutr$4W@G;(XvQ3Scgd8X2TWf{_=?`g_3{T)>tn)N2LMfv4&`@VuFE z87^c!>Y*%0?f?z$yI>AFz`1wsi^{0z_Ed`uqu6>-mHBzy(QSAcf1wBVY{z2)+DA z^E-RvR7_xg>(d{=0pS7D1P5iE_*(cHBCGN1fRB^216WH#AYap@d+n z?c%E^{qO}c(h%l2YB=_91CJ0Jo;b#qR&2L-B0$LbgaKA2QmwclfG3g(8}Wo<>CF!V1;Gc^LN>zY`!LqiX>f_&mjSRFobeVK9 z5xoNXjW8QF-(E2@tW!q$MpY4N1z4}B18+Yde=mFioMcyBE#XG-4LlbA09G2yO%!ql z%^v9hDQd%47j!uz70mSgQ2+y=x(=QjBZ0q`<8F4tJ;Qe>TPBMVqQNVG3Yiwo`)q#o zfELZRMtW(o;z?ZEvW9)YqB;Knd|jWVW}!q75>*2(;4-fofUWO*U#Il`pF9%A@vFoM zk!ae1U{w)ncRQW2+`AdlJ{6f}&-_E_eX7poAeXkeJ6YC@{3xe5*b@n9;q z^&a^3xV|e#(z20dz8-uX_-oK2%gmBRozC=>>-|xj5>oR$P|F!4Rfyq@cJQdY5`XbW z+eV*PZ2LLz@3uJ%(+AD;-mTbfmjIN7(yp0|j1W%?R>6`>1l>^}f#ItnfBXLcPhh;6 zAQTjOSIU0l5frm9)ex0{7C{=Rc)A|d_a6A}pA!0~!w#?3GM!i9FG%eVL&;B-?|P3* z>iL-SVq%*u>41lrSPwtOzTmV_J`E<)iIzNs^Cw>Q>7S~T^ zw;YcwGn!c=%EcQ?8$@qN1Zq+_vE_s{uDv73Z(Hwx4rO%&F>51F(0^QVJmbd`DuOR< z&->s>47q}Wi7&zR!I>W};7G(V8;aE5-q-?TGB6txsc%oylm7nzrWsV7RdmS_5sC`? z9mSv13Jm0$RaFA5i#~Pdi-rM^=Or^Y+GIn2=1W*1TWW4*vj{ui^9-U^feB2*3r7DCYr6+iD9%$Q6EGpH6XR zMas*gGDT|nN%K5?u?B59Vq=v^ATbn42lMp(a6X%yg>&iOn zD26te$S0B~QWh%m;O=<>w%Ze(&C3f2vk)l;+r?He1rQjgmJ20NSY=_j;Qg>^CXHA2 zP=$;Y)I$Q-$!o%L86Gu}K+q1rnqhm@`GJl?nqdvxFl8Z&6Y~4tA8&7b0X^U(BBKyZ z0z6y#ZS*)|4CIj!8A}#CMUNN#KU@V^7>OlV07I4)Y<&H%^sjt!PT?0;w$=zb0j;l% zum+4!_nCk+iVV-C>VQ+{_8*xzteIpqL~CH`QWtW5o|v?f<<4oNjOvh~UHAm;x2D#9 z_y$)w9%^&mzL2g0u>(jSUoW-*b1{_YlR%~6c_tLMXMcL++56TJOr%JrK;h$7BSR8M z2Jg?%kG?sW&BZIs&@(!%nj>%q7w?_j2wN>`19=}mt^gsPJhoirhFA8I22&ug)#wkW zw-^)>_e49ioH$ep4cgEf`ESN0rTwFah1C?6Us4y>*9VKIq`8_&2}f0Q4YNsmX$SKB zumN(Vvad!Lni=jTWj1_6O;P02gGbO{Pa|deUPin`ekMX3?`q%?`VZF(7+@_D=5rD* zXa=;A3DdZtL(5Lb*J3a{(2e?jNkWMPkx0nk{H@-T_Zz4`Lx2{_W|fvTlxab-f;=L} znfKr6cqT|dGW`Wt|6=DhhN#1>v`$*{xTHJW-^jO@n@PwC=GXD(F22CN8H{rGaW?! zE4`9t%_L!0ypWxG0q~9Y*i~Synwd~MwnDg4&FXxPGqZ9)W?2#e83;EGV88JX)AGc{ zfy~Z&CeI$JmCt65WXaw;2V{MIy154ku4-r{l5Di{!tSh^mP4$D6vD?h=j(xG-XSij z!EjfR>wb8NOm=j5K{|)A{+I%-EiS|6@lif?jcn&oIjJe8Llg?NN(oRvwH{;-PcJIQ zGcud;AAPvNRFI7#n=Q4GfDM?*G-NX}G(|%xR|jgdN9qn2-ik2W%z2DxN0^E{ym@tf zUGn(Pb`N%J=&m;&fBKK0!r7Se>O!QQ;Uj%YLHghU0LTun5G`PX<>`XMAlVR-F7_2z z5{7f>r*JXIm=>pPKKKLKKf`xnW9`Spo|fH1EUnQxOskseQZt1};{=rsTgbYANAQO7 z-ydgn{;%6C{{Z>-PVJY&tgmgm1)Q8o(_rb+q_bt?38TEr$sdg$NY+6WY(lUE=ffDgX#W5Vy2nY)o~(S5cT{?$fPgW6iM!{45b{4UO3q$;R|=(Q{dubGfd{^i1|{e5>X2Ijo5MJhH@7 z_R{|VWqVm!w%%8}Wm$Vc-KW~PxHY|I zoTGbAGj?rR-MciKz1p^B*Tz4F-Ld}w5#QnYVfc^Pyths5&qRr{IR}F-kr>Qw*An=M z0pm~z4W!!fSp3YNC)7JT)E}hhGS8OG<-5ppkV=IdI<>NuJDv~akI@d32S(}{kA}UN ziMwagbF+0E% zsOM(==(m;C)wfqZ9Cd#vbty!-qa-oBWDP8>vjRE7PIQtQzPR1%xoAyhqRYn+k%(~{ z5-;d}{{Wsan1Qe)A7PDwq#4OhW0H-X8~sMuEby3Qq&7US((r^pqu!zh!)=MinO1zd zu+?IPpCNz~Wz_1d#aCcS-)~$unT-NM?85u7H{faG{%{E55 zBfw1&x1}%BcHi3oXLn=(!cni=`QnmhvjvP!*WvE#)Oz8Q3v7ts6}>mh4@@tZ3Eo5& zxZmr5H#-(r{;llV1^)o}lni~(e6@^B)iC0LH-6aM?0Hq~r)QN}i2ndE%CiDC1j@XA zn980Y#R(2eH4j$yuPj3!wwFcokzPM|mq`5NMqj;+`^I-sr+=X1oF6+eW)jCCbF(XQ zK?k2twkZ&j9LI3-YgzyXB!DZBe5>CEF|5;QAywDrD0bSuU#|F=upcF!Ib}f<0hSJ^$=!wsF3&aZ?T${;Yl3wD0J#$d)mC=%BkBCT@cY0sfoF~&td*hFM~n_^ z;62GjXZ@h$DzMUSxix?(uQw3_0~nM3;;}Y#*yLl7ekV7c9q4ZG5LbE%J^eWxV4d_4 z<~2(hMMc;D0Bu=;91n5odBvjcJKWilP=98k856=B>j!%E7Jjra1RU0M=Q9q|xsVk% zsGDUw2KC=;Y7Fdoq*M4-h{|+}J3AfplfwsXxB zFh!O}Pn4~jP`XIHg?{hN`Cta!noS{wvgkuBVuhXs43|Rnk74_?MU|MN+8DNjvy{`)n}uftNB|P6nlnfgJ$M2&+E6XY#-cbm_!1E@v?q zkd<9Pe(MV-em5Vk7#XyM!kD~hyKX@s?m6}&6Bw2_PcG8PU&cm|Z;!D!8c8R8-c1*b zQ9Rvo<@Ei1&HyxdgQ`Z<@+8o{kiw8{&fEcv`Ce#?7!)?W3`0FIHg^PUHub~VdDFTn z6eSX6ph4ci+Pv)9^T64%`pPPi1creFc-Ngn?|)rJ@B+{r!^-iYD@uGoda3z_)qCNHvcHMcB#gL7UNXw01vRm+P%!iKe9TZD?lfj) z{hp>(C)TOIPA~y7S^kyyY4Z665`2!Td@c(8f6!QcMg=xsDK42)yO{p=ka+U3<%yr6 zLlBqoFgJRvOmZSpHP)9O#kGqY_;4KWPlG1i{$6=xW{*iWXo~VaNAG|#I~zlwoW&ChGRIQC3aMxU78j)!=DiP0x^M}%=e_q(+1cA z7?6IS^MEzv8F{j1RLkkts3_IJ_4^;w8Su*tDpx%TWhDx|SfS3?RtZF!U^91Vs2>*U zFa5d0LO!FGnB}5jI9IScvO^D{zzELJ38efYp6HTe2b#dtyD$m`sGtWt;h7?WGS4x$ zX4RoKQSE-4;Ac405-<~eUJ>VjE;75BbF(ZXRWP8>XZ<{%z6zO)j0<^up0-ss)39YD z{{To8cu!EhZb>(0&74BYsc6yJYZw8Op3F`2y@I|FvX8y>t5FjUvcADr^nN^8%~>adR6=Ee?KdR@nd$ZyX-`0=;b7loybK@Q4_z7t$;=j(+ajzS9(FUq&`!|_G2W`Wta*q!$B zzzxNwSsFaYcAEBLt5v=2TyOefuah(_MR`!#zlQKgQtY%bf;Lwz&$0ZrK3;gyM2{1zEu=x zmLyk0BS$xR;&owOAls?+^52cI*gAJk%a>O|88p`-y^qs<$3K<;Lp#$1ec*yQrjBL^ zlCZXtfCPEa)q8y~pHj=Lc^r>%H5!>qmEi0706$!I-qEUNGMQ|rOUw@H64G~q$A`Ng zAT&<<fr@WsQP9t*-w7dhyxPX=UujLKw=V zIi8`OhG%*vI81qRWU38bOCGtm&=qZO-DhyhB&b4!^T2*7MWr{A6MET-HlFB=Y3D*uN1t znZ(pgOm9-(47Lfg`@3T@T#s_rfZ>iSx_~avJMD*}f;7yoplmIwwQk<$is$sg(k!bXGNDpV;{a9? z@yxEPbk}!(rY`a<#2RJaNc699ifi9c0=Mv!Sh;mT0u4>hulT?faH%TkCF~Kl4V8et zUyzh1qGk)@DAH)2?jP%%PSji;#bW-vD1HYd*st zQHz`1in|S@uFq-g7IUSagxM%Mhf~hNvCElq(5ckNQ{4~zv9H;@*F?*7iL-q(v>m36 zWYd&MCN$vu)_{5M&*i=x_<7hY*FfynYohf&)@3r&9zQXYD zQRp47=rUx#8tpz>HGIUGx(gO%0N|RW*MEF#bjg-< z%r;ZIOwNAt!BvY&+JFFo_8eo+vJcg=@Ly5rndL-5He!5?>ck_H-S{tWA-i7Q`0SmW z>Y2W}a`{f1lFRkpr<=;iS0$H$g1ljvOu@~;F(T+(%a z{;8T<@T-kOb6a`xK6n2B5vM`vqw@a%_^$Yc2Wz`< z*>0DoEtgqy8OgGdFlaL7XDpE@6lx?Ti8B7H?3bp3=B&~OocHK@gN>P(eQftJm zYe>JdwkxBFqc;?qZcXmlzYLbKd#^ciQd%A*or%p%`J|m?@mM~7bho zw&QEyV&%yMSg+k9j7)9SGb+ z5qZ;hz&spk_EcYQdLY&)bu6V&^47a!KPIqlg)X)s&-dl};u!t3x-XoH@%zNON9Q8h zSu*PHn9M7*qyntFmVYu^%DpkpnVNEv2%*!BGSb%mS$=dk*@b68ZF_n%&@OP$P-C$0Xph=^?_CA20 zYyjjPH>c$!AZCO`wHhjSJafRXe_w1@@Uk&-rDly(vY7XK2jy%2V;T#Z%S)E;qlzx* z)Dy=e4VuaD`Q3T(vtuKvjEr$bCRtxoX=hZm&|38=zd|e#u>@WK&s>bJGRsTd>a7k5 z*q!h7{IG7MxhQf%JiNrZ(DU&vo_z?|>Er8&f})Y+N!%?GQT#1>>!0H1^22fE6FHL| z>_&H6S7?A$ChLmZZ9blO0=Yasm zdVf=mw8-Tn?&31Yg~K^%TXMfG6UQQl+~Yjt;)ZrQ(zj5Cm`(w>pU6oAVo10308R%Ob}S4Fr%z;X;B4+z+876ZSY}7e{sP zGYcGSes3iwVsGo^gu140CoF8x67tI;2_%dKRwC_(rlVf;4c0BvUIl|fnIm!xPkaHk z`}y#HJOQFky)!$zKp#rFF4tUuf6;{jkjMCRBUbcTfg~RU`iPGhPHmE&Zu(Df_MVtVnXFTJ%6Rc{{>;yzHGFQ z8j=uq+HFt}E%kox-kw-1<#Lig%^;FOR!z0=ASb$ znnrhLZxfO%kMGk9yOjk2KAhkPX4u&~e7oQ0ibl$=41kgK#|!KL09Tmv#5HO=SGEAR zc*TL#eYeHi#uHHlx9NeZpF#J*B&H7*p+ONvv^1)*Sogg_pP(0uk zbza!D={Kx~zzs$fm8CI~eo!xut_-=yRV7Sfz*Y1 z4@&t~F~+1vrGRs!*=iftq2KlP!Lu0vLiaDF&GWbDc>7@tz#>IfMIlWM2E`9U z>_Ij~f;T@#``}sCns`{|U})K6F{(4Fs*2pTbMJV~$VQO?Jg&wyG7UPYvkrDV*4%oG z0o{6^UW+%;vKbt-Etra8rdBc6bc*NCac58f-D9cFGVXOsox=p~xA9N}fJdhKv9Hm8 z6`RerLo;$lRmluIAefq%*Pow?_^xn{Vo2I+%QDLtO)5-bgK~T%?^Z$aU!DNVpDvnJ zP&NFamg(y3i}T<0_QM&+iTfqfmQ^4AGS29)K^9kU%lTlXh=HABQe_OUCG5W5+i!|> z_#dnHpegj`&#v2IV!<(? zk$gna8W0jqoJ3{QH5P%URDTws$J@^kt2@je<#I-!r_<904;nj5oXdBrz@1iX6X$>? zc{1`!Aegjhcmlcl`e0K>0MKEXXFzyukI?&j;w13+p^;i3Pk^yC_U9Ey*s|(SL-DFkht~~s&>M?V6MSM;wjtY~lh?0y}t(`}pbQkJ*woI%J z?21bCi9?INk&51LpQl!4DPW_tD=K(| zo_YF7rBbE3F3y^3HJRK~;D<^0!Pt5Gf7q(pO z$41V47VQp0vb{s9^sbB9zP%r2GP%x;`&`#^myR2gWhS+jEI=fvC(j-o)OxR5>fJsW zz99B^2s$92YynEcTMnmJr-xnVTU@2 zykAlIZ_CS!QXu5qV3I$Cn!{0LbF2=CWHd(u%NC6owdvTA#`rE;+GVmrp9%V7jh2{f z%DRo%X$B))xp3YJuQDuP4#k&-H-u_!7hvgKy|=&`0vVPyBy&tCewfCCsAgW#^Djqn zOnDq)tDnq4F0_%Ti{Ah_3(J)1O_h}h6RSji{{ShkV^~Kr(aAEH*i?abz6s|S&Gi<} zW|K1Uh0?%+e!h5)><*#bX>Sa*Uwi?**)m_Z{VGHQPx-c7f$iR?+X-cIS=nVh&RRkV z8hi(QKiC(4`op9U!9nU7d!cpvW$@!QGZ7rm{l;(z-Q*;XzZix;Z7z%FBD{X_E|K}j zb1{?Z8FjgGllM#KRtLErSUyTDpLGh)qHn~CE2}|L2IA$m$1I6T%VO+$ zQv=WGib>QfJC)34;*Lmx1&l~hjd8)+{{Yiin6PUM%Z!u_D3Ruko-PfLHL&qrkPqet z3zOvgT0W#x@31Y&nmv22H=hGY=&MvN|Ll=F&i|L5kYZWo$PJpfF&~7 zIa*A+W#bZ30E*YCh}g08+S}`b1k1p>Mqy?uH6rO^g?9b#EK+Qyb~xb8#t925h7qAo z^*wg?2Wq;NHqIy2ZN{2&f?=ZEvy+SOKO=dL#m<>vS zPNwH(1CmJJj6U}?sU#VBJGF9REOb2^EatszpKAjhho|(U$z>iTiA4FRZcs&carkLP zZ)Zp?_85sXnI4|gN+gd?nnhYWf&o97JML=$X1;b=Vp!8BIwJ~5#JV(qw&u95KU^~L zgv?v9V&PmdN&qk>+w>oBaMoKrmS{ppA@HC!>tLHdkRb0~*v^roh~{C4ZLeAtYZL&Z zLux)N^sE3=BfRY#$|;7Xy{!+-Z_97j45En3bm_!ez#x;r+hJT;zJ0NjPGT2|V(3xE zv~41b0Yz-zpU`cG<1@(QG>d9~XbCi@U7cQ)MD6HDn7{>on=_b^v>+k3295s!7vIwW zni*irNuw{{qyFxa#Z>y;*`<+5VgdyOa0&O{oM3fsnJmh=jHD5?F_MnSxz;%>2LsaZ1`DWC zQ&2nXaZEEdR%n`@Jq$U#1j6If^S~6x4PMx>jk#U^n2jy`C-T4?Rw``ogDa1vae#o= zKw{$cZ>!i3EC5Dgrq9<9t*{(dt~uaZ8dn@+M#%7-0ahbd$#|~osDZb>FM6!q98tqv zd0+}+MQJ4RaXsI@AZr`%fGn=a2v!32CmdF8SdtGotr5t@fvaM>{+I!SJWi)sV(mvz zZkf4wjM>c8A*!?T@#WIsANg$RrKuBg=*bOvIajMSWOb}%! zY|M~{8lt}NpAfsNH_iYAusd$fx>SIUMcDNF6ZuvL-b&>ocV-Sv27d^wX-AirFCXos zLO>@#8b;!*5xoKi70zWwx=78HC*?GMrU2^jyqXPMgGo&!@DBTWVin5MqdayfRUB}W zA!yc0RR-^m%M>)hMxdYy8(;>ybnW=XSye%F?)dcJ{c+r~x$NW_IU6UFGD?w|iaU)Y zUf?e-b_TZN&m9uTA+(T1Ft1#8uAkM;RT5YiRIeEb$8Xc`fDMmlfTo?2y|)L^6i#7l;y$!sjTQ?X@{*Hzw*az$mVkWRDKG0 zZNjok-km{#6?^bOKas#42fUQ!+ep%aTW3&8pg%9t$*ujc-%y@K>}FCYjGptT=Nna3 zFpnx|`rWYG+tA~?XZm6`RiaF)*+MS1B=dFaL+EkSJ4htyU7g5Asti)~Ed6T$()U?v z{znki^Fy!f%JsbT`u_kK9Zj7Ig?*TwSZ#Q(c2`GuzO3641^)o>Fwu#TnaYM%wRAim zap-YYc+f~K3Du#ISd(IfgXxKj1wwu?OlDN@Y^e8taxfn9e6eVytt4)&x3{h?#1<06 znJGhip-0o-6DDFw;!`YY&1452U|=tX{7g0Gros~Ikya0YHHtFDudyh6FfImULKIy_$B%>9X z6>8M*^xayn(OR}Q@ORL2wMGv{7M;}urc3#NI zknG1n=<($u?>n59Mm@$~B#rxA+~8i03K_jiJr+qJSIlPed0zb_D)M)k&Z8hBNtKO|w_9HP9%~(( zHqMiFd9U{W00Z-FqgJ=HSgSStC6r^Hyk#ptH~6cxm89aFn^&rIGuFD+awd_EUR?L$ zb3_lFL-=FALsosS*A?violh|rXnRLAa%I+M9XqC>K#Z~NY%aM_hug)0bq==|Ny&9S zx0s2eZC{6mm49gMzl8E=Uq1M7=OxT^KF(zqDKK88Q#mpK!K9H902}Hw)$};Cj;Tg7 zcW>~^CE@wq{wHp5l$9zj)=n{h^sdrhg`*h$gI!0mIUN50XL}>?@?4j74sw6N>RB3M%#Lves%3LqbwF8_wdq{^y6#(k!0H{6&i&}+VX^^`qYw_>1!@Y zSV!T)buNki75hgwREBS*a;S@k>+Fo>+6u9NlxG zN1l}J{%O-lNkQAh!`FNC0DED+lh%7@9dypacG51plZtU4SIc#&FIZsv@y84HL3pqMmfioudA}O z8*85Ggrj?JSn&SRf$OSY?0T_pT<}lkKJ^+mmffw%yKdYO>wVwyl|L+n#K< z`@j1>eBSeQu-0|0AHE+7)lAVEQRD~U)8I+TS|#e(4`ozV-utu)o~gD9tGyiD#5s;z zMc8)2%chZr=3;i0OD&|QK9iC(7eSXO6P@DPqBCyQKDHQD9(QcS#q$Gxm(-rCJrU2BlWd5#~p})uxiK)-mH*d2jmIi2cH0ZkN~|*@Db7nlr3hpus83| zdVwbZTOh8?rM4)Y%cn)!!cPce5e;_9PE+Wd3|aa^8F|RFFlnY5LxxM>i+VTNj9H3d!6<)Fc8|9=HTwp|wQ^@Q}JYz-7bLIFG1m%$dxC>y_v65$@c z1gX*m!C~qiv7MPByehpXc*9_)Bxsp+#}Cf65Y-49$NLTrg?b$&cyLY}6(zbs=?;U& z6D{LwpZgZz;TQx)!xl7L;oO0Av!nq|aDRK-&u1sMKGYgHS}&`UEnnYFMrKfk-CHd=9+{TMDzR3}RVa7C}m3w`eaD{juLf?>E6sV3(L<4x4aegAcG>S1%>lT`aE zOg(VjxS!2X%t;ig>(AW`sa-E&LgDXA!PSpm%#;5BUs*ixS+>?32yBqrJgO6Nf$%GV z3jieI+X>MQ{WKLEOLfkeWpkz>ANsqu15?uSIr9~+Og20z$9=fpu%?33iS6o&{{dpn zco?AN78K^SjOYuFQY9VQHI1~WK~!g(D9AjyS*A$}Zy@pE0u`;)N*tzzJpSi?Lbaz* zHukg}!9kG1i1F0tW-PcNKIe>q(;n|~x#Jlg+S?oyg`xHEqHPkvE+o)L7|K}wCCN+7 zk4zs5l$KZi?HQehBqDb^BajBqjv+&T{+F3?#wg{VJ8Fnl!+MDHCq1Vm1~WGn+bzuH zX{h-Z>|^oiWBTc%%Bk2PV>s8b^vRJ`?D3!`@eSmo=Y3q5P3`e=gB2dFd26BwXri$2>hdm#wR&J5|C zOq)bUVt^Hw5jM~On(6yr=kTOVD1Ql7-H)59;zL!m0=2%~{-OlYOtN~X{-TkW&X=A| z{H(%1)$uykQYg06(NzMm!vc~}*Mxg_^Z~<0WjUMF7ELPb+i8{*<_3liRf%R0cX7~-=TYp-VRJk4LIf9PFpF;0BM_veY`MKAK~h4KEh5km4kta$DAPcZ)h@K zmpE7%=|g)zSBsNuh_W4J_rn1bj`92-SIiE-Nyyl3w-jTahx2^glebC0Z9}?ao80ft z-2DVa#c~L6Ka9Q&zeMtD9JnWFab|`(4u8Z~1uAH?@OX-U^yBNma6YubdkcRCrt$i? z1veqzKMh>yV-?#FT}L(X;)Nx2xjuM@-X#es5YzfiSom`_A=rC>`UbUt5 z5||&KFPu_dNE6uMM1-W>kRVk+X-s3ga;Ajgc7`b;A@5p(wbl(tAH9{LaZZ-p@fM=c zBf@dod-L6eO|=mNz?fqOk=Wn5b{_oHoUKM1_ird%>&?k@K!f-4liNs8e2WvD{#>1m z?_$f)`^Kzjm}H|2Pdsy^hA`k6CmXI6mSI+raw$|SWgJZS@ONRw`p}EmzO*W-X5qA?J#_>K|Iw>0dd^7GLNHqP;9%c%+Qk>^=S4HfD zE)mN|KuRP2tM-pETGBw$zvu=&3xy=7fJb zJP(JtLULJkEMz&;rNmWUs&xo^@SOt>R0+;g32s`p#;5*Nw)UL(sltxGc`x2yrwDCjVg^bbU4d-CM&SVE4)Ri7jo9 zsBxrPY@o_O%$o58(d!57-OZmWx)u`QK?Fc(0}C8{3&dk-I+>|bu^PxwdcWS{{BNi8 zQn4+wBx71!Du;psGe?RcsBOpeoRtVF615BY61Pd4O!8#*{R3&h-_iFnCC$-;xwj>4tTagQwM|{bDJ!B0+B4tn7`RlSa7sdN{)^5Vz81+<}I50gtAus#0?ayi#P3E}dtPh+rRc5jXhx^ODfoAg&QfDK_ zdcqWgL&^nwgbZ~o`DL$k*~8b)cq?`iWr`fbB!x`j;`zyh<~a(-{qMr2<{t2grY}1r zj2y8jMlN#ma+hn{a!GGtB6v;d_T31}1W)ef1MTxAgLvM&2mZ-t%c5Z|BC|P9Fgy1A}M*{%|rh5R%i7odu}LcHYN)K}%aZ zditWyy3sQ}yg%jG4{U3q zJCIfgWkOdYxM~1#e1}D0Ya&4G!>#BMT`9jikk~c3uou~9s`cnioqsE}s(fhYA9g-A z)Uj;%p{i@@QFbJbwlAr};#KS^NI^J!$*n&duHe z$K|c+?&KlI_amRbj|$yM8FIh8wX~QKi9Raf*bL`2QtEXk#QdXs^#!%TEWjf1a59_~ z>x%*+L%kCwR>O!mKZ2KGaMC5jCOi!^s$bL3G2qH$6Erv<%b3Msj?xXLQsZFYmq#XY zb~*nKKh&@+A%%x6#TQMvZ%1u(7BD;UL&=uQR{+%^rB3&XoTSP5|QRCOVL--*;W_^Un?7}EG@5?*AFtU4mSdV85sPtUdC{mYs zIw)hzto<}FdRSIagYVfW~_meLK%Z|8y-swH%bgvj6gTXvB1l< z#a>+ospllgD7J#QinWWUGUltRO+BF2c5m$|lic&nkD1&qk<5Rf!byuxbER``o04=M z=55R`GT(y0I~)yl3q<}R)lyrOEDI<3*+OeCZdebKq0OQ67U^OXA#==aW3^Hzw!$2@ zJf&8>Xu!HkpahR@_ln%iv6LAHVmXcIattcg*N`?}sCwS@i?*SF)Flp~tk7C%boEmq z&?EuHzmd*~NX^`F44gA&4Z}&r#IfOfOas&WO8eK!#FpouTMl~Xt`5~Z)v4{dra)@O z#qBbof5NDi$=hAZOI)}mNj|NclShG5dV046C##FnAVXT?Lqsq#iK5o7jWQq8I#$&eAJrTm9HkBn2nHm56v~JCa6Wf zmoEEzO*w|s&dPj!nA)>0q$3(?ly3Bu7`VOl!E@;}aB*+`_9Pjm02R%J+Kpv!^FV(A zgpne9uzW@m5JmEBcj$02a`GYP}+P6 z%sjHr4ZNy-jPYw%BLf0?iq&_NUMHfBvdPuynBb27KC9{$E+|nOB42O=GE2%FfiQm$82xd`_b;NeXHf^(8OCn&Y)&+aI{PS~ z9Wjf5s}Q;}pZ%pWzO5FYdKB^$V3wwXG~?*Sefc2hJ6x&US2v6a%5e-P2F7u8;_FeO z9Ga|P&B4m^QZd_c$9mi)th6{aWG@|t)i@W^cmgb0vgzX5e8={N=YEv>5R_(BO6H^% zfes@9VazA6^D6q{2P$(FBOJnf;D&X=Ppr*QE#>-Z^G4pq;4u}y7f3|fbBE3b!&aC# zq;g>i^SqdB;@l$$F>6|~P_R}r>Szz&{_j!pGbkc;x!;*-dj)B7oHM$A-}J5=u_f40 z`a0IHNCMO9dq=v~_^w*i-J<|8)MTlG1mPGz5uF+|Q-GL=iRN`UwYdgNe}8NcK-1;1 zA5xwHHl4t|k3p8c`i=?8@^;L@j~-i*6l=%x;S3quVu9swqhax%VNncsXzBcD> z=klbNa6r+jF>1{CFtlaDL`9}tYsh-_ZC~zyBlq)w2w0&hR&4^4Ud89E>J9m;G|iIU zR{}Q-=DCa*%wgi_xlIuRc_V#XnuA;;dS>js`7X?FhOyv5_c-Dy=ZABGE z&rM%+1t?Z`*@Ba(ww4(E5UEOLY2g=a!%y6u9H_qsuJ%Uo6U5h9qtsVIY--iZno=XS zIBcS%XCXV3Rj<3y>1Itx{J>}4q)#hla>AYW-A^yX_27A@*q6a`vw@{IP3^~ie4@lI z)*h!MjSLqfRuvv#8Y8C`joG3I2q}y6ejse-}QfqVjbj;71cUXC!+ih zaJbtENEqrs5QSBYy~&Ra-ch{&XSVPb=i3I*(B2<2L1!GWDQF)ohD-Ge(J|(6EtX^O{}G=ObGk z6h$;C6b%XqpGhxU*@z+3Og$seOqV0WFX&sY?+rmf39`nK2Kns5K~LsmDin=*4=`oW za$C9KM+8DGqSy6pAPy!{rV@llw%2omzaNTrLa|uT#40l#0uNmRUSR4UIwHI{;GFvj zQN~4K-l<6Hd9x6XBgG)-=`OvKVvozbkAm|Alhob>dTNUi)hMWS<8B0aD#czZGan?4 zQ;zqLmllx=7&u=;yGxUDS_6rvmHYZT$IDDvWYf_H$rK_+^EVm&8VdbPZdf&Icx+)i zlCss=P>?TvHeCx6a607Ky>9(3Ab^u7n>`_mRhp3&%5o-w5tvc++)uPc$Nsw6T>;HT zzqNn8uc{&4SGR!vs7eijXS}O4w`d0nRjR%gr-7psGIJ zp;|5!Qbf_U-(Q%_qj&4ORPi<(jE77xCf+8l+APE}bE=feXrIAK8+(GqHKgv}v0xKc zF~>@Va}k^<`N+})$Yd1abu$ak*c4G3@nnu7Lx1SiXw;=f_2L-8VB?|o4gf_ef$3G| zzFf1eSXTV$swtA`;na9P7xP4+sBhp-$ei+SV!+Y=G6S-cFI%OxY$Emv?K`k75gIk& zY6063eI3G@pSZp}i5FpOJHP#1=-4Iy1I)fzIj3!%0%ZQ7Uqi_t2rGL%vlMI)P)=l& z_?_yidpBjK``GvFH7?@Gw)uZej3-`jftFQk!4>IsW{k5_F=^M3{-VC z>wyw8CqK_0A$_$i+lO&?emkd1=9!N~3LFk1)p^a{Pj_@}zhV9AtE<{ompF?8XxEbwmYRA2y z-RS8Dy7ukT8!(V)(XIY8<4aTcbUx~g^W|8g8-Lh$?cki0?aFvO<8%y36%~}8-0?dl z=&f42N>56$q{!o8jnYnq!h-JW@x1`hbgbrhvzjEAmT8d1{8lK%XpkE0u(TpI8WKHN z;b&D8a-zanRBSI>`Eg5&)uZ>Kw|H8dCAX|K5oK6M$5w(?s_C%Q8t_36sy@SsPtqgjf#@^Lg~o~M2t2}%pzZ;Yw#O66)&g}%rdF$tKn zg4EK!ae~>2+fPA?Y2GDqTpeQcVNI?ws4d}4=-#Im&b_&zNcj#W7z zZ_9Qi8Ku@D9l~t*a-Rp`BQ|>1Jd|%a=K1jgD=f=G3~O@W_-eGceqN6=OC)tnwCNxG$&{fpCLKIfuDToNb^y3(P^8FA$@|70UWsbSd zITM<6$nLOUm!Kz9(b?UpA7))AT*5>Dy5aPO1;+U-sY#4ni*CvbR?^rjsp|thOuq%B z3wtUJJoNqUshWCZIG&Ve2Ir^9?h~RZ>PD(D4qzbk>G8hRl|rJQKi59=UZU5W!UNGd z-Ja|kx9Eh@LiAgWc&a$ECXNolg;V4JW>e;_xPN|srqVczhTrhbec9IHV%HT`EW3t48Ce3Y%<+D6dYvYiHi0kUtbQi^)MYhe}^ zSxVn9sVP4uBVJsT*L(7sD(zQIT{xHIQWjG=ua-aNWBmAe{H{JKFMq1{g>NXS4=%;z z*!+=VxpnbuN**hr&T^Sx%BA{b6bMY4g1mkF!X2F8B=#fvfMiCT%G?VlBL3zX3i^C^ z!=ArLr944V+fIPXsxb3R+JAtWw+DY=U>eGd=A`8WAtZ#3vNvlL3~pE`)RLRGes_o! zLPAEBm04sN zrOP@*f@yQ#Uixu&sxaQ50!x|TG^57$#W%6#Ha^S8(Z%(ieEKhqikOu)+XO*hroJ~= zY^2y-xM16l>8>xp|kJc zkR3GiXIj^z5$xA_>&TROp<8xpP=4C8qkUeRC+5#MgNj)Z)O{|7*G zZdP6tfV)WAE*+g>4MGAB3v9je)DD`>39zjr^2ckC5GuqZTe+^;aCB_OtGlFRL4CbN zxJU6bB*R$bB_C~^#8yF&fXl&Zx?2>VrMnu(k-S`hKKN$oXFZQZxl6^`lO}v)^5z-b=*R#j~@mcjEVN_obyM=ALz6{6Moni&>9IcybC1v=>?opggu{ z-=9)y$j`52I{$>Q3>cmqdbxWr^{LJbBayigRDAB;+LY;En?t4=u}F`X0VQVh%mA$ zs;_5Y9z{wSe-|g!h~5|bnx3Yj!TtJj1H=PeiR7~OwTo1Xm||Rhw=B z;LJenD!niPi&?uU9_Pt5Oi%nN-oCmIjpaDgR|*i28qd!n^@B8-X%_JmlNbd)_eXbP z_3URK55vuOnDuwZ2rgs{!BxSSJAT{bj?{&>vNP zDn}d2Nh^@m5vaU^Z`nul#U40h?Y+`+9mKSmN+i1{p#8BwR~}oZ%QMqTJ2MIYFbEjT zq5~M}l8Q3Z38%O~g&N!rGf9A`{avPzM+Hc~!^Q)lXdjVlZus%_^Xwrg+$*_QmB7kT zR7>a&GO_C*YI7PHl-fh58coGBeCdb3#)1=CT+(B-(dhO(q_)6V$}EYkat6)b1A9CO z=%~-O0?e<6F)L~Ngt%H@4?Lrwwt{Q_)_hwN>58r@>cns99hHPlX=^7XYxS_a9*9DE2o$*{hWGM7kM^ zk|rP$2M+~p5VOe_yA>yt#q>O>u{HU{8QBi5<{#MvKxEd$X^35_gdmj|nvZ5Rs#Yp# zAW)xm-MTRus58!fGr1N}BdDB4olYFB;Q`5~%>bO|) zL}*6F&n_AJ-snfuk$AQkrMC4l>FlzHSITI?*~~P87F>G0t}$I{K@fFxQ6IIjM?eRh zmTr^t4^yvlMbHd~bu0q>pQ^vgIQVuOr^h6Hm)#(yVmk3tf{$f0HhzvV7K=wqB(9`G zF0FT{7IcbM&hcL;?5VnkX&t(s#MgiRnE(Tu435hjeowU?(U~`Sx88d`boJ2?Z`2Km zL^d54rx!i>coo|C5p-`Fl4#IZ17+)cg}2-9s&lo(1O)pUWu8$siS`?^#xg>%{^%B@ zo%drggViH}Lk`oFc~4&B>6_5H1}hv-j!~ zJvD$_#LAHJrPDO?|Fm#?ua(*~+kWR-M=M$*jYQev`OaR7b|M64GG|y~Wxc3sv^6Vx zcN>Rzl=IeoFz}(pUC+>rAQRe9L6yzJ3%OgZ7%qhx)TUPEzoXy0r(HQmm?^rth4MZ2T$Pi%#RW&HVU(r;9+RBvCW&S3RJ~hgUqoYCBE|#>$}9 zj-SVFavrnzL*56OYRwgDsyQT!TA3$wfr^;PgO~6~<0ZBZj$>2nQ;rMKamuXMerg== z^2;2JenuDCR{Yx9lhPRx)ejAzu^}qtSa+HA2tx5};jTBc@&}YBx2C@RbEXxVSXW4b zvBP1ko8&-2l>Ww#<$=Q%w;2GWq)MQk#B-0wR+!wQ?l-1;@2uovmr!alAGfV3B(>vr zA<4WfNzi%*l!U{I|J6I)Uz`{9jespHI-y`jA0;Kj9&R_TEWV^Eobq)wk(7iPlkUpa zhM2U~pQ9euMhK44+Q=^A4=uo7H%WRl4Sx$di%>^((GN%U9hd-m|lgU9mg>glsCaz2hYhl_f*KBo| zS97g+ILZG3s!)OVRnj>ZdBfL{Xl?|}mriQYg@?NnwM|b7gkGlt`qr~Ws7o#7)jut@p^34FI6XThu3g5`Ezr<;J+ycurctC*=F?DHY z;E2*9<0zR5TP=5J2X1OjkihL_+*m?NWfex3U#H^fXZxAUF?G!R9L%Cy=+F{}N)YY^ z56=nsJG{?_V?s5cvPmZ~$NIO0r<9tmJ0bZi(TKcZN0zYZITruo^}ZEUdu2aq)xsww z8&lGylUzh|CJs(Tj;C%B@uFJ=7VY~|DunzKXePtt7wq9-TI(Hu+^)(vMNXcyUN2IrmM9ploT#~4o^HAYh(Hdu&X zXiY311_4j}eCQ-jC%7$nDM!1;F_D9qWhUK=CA({W3#Xu7#%F(i^)rT4&S==zS@x0_ zt4P8~3lf}CGZ7~ZOR)OD<7f~GWB-ithrjVHeM+Z$QP?CY7zHNsx7kSfvgL}En2ojS zdzlf&YD7Ut8{@`rlr-OVWyop{bQL%HX|{_zYpp{HKm+Eb+->>QegVdn=1w0IKPzUWW-NMJnEilqM~2Zr z9|(jsg#_QN#bx4YJfgjZM?zdU)Gph9q%$h-4$Ur&;`&enn1S&ev%b=Et_D#2=#m%B z=*u+IlUsX9-yBZ?DkCat>$0m!u(8u3?6@)5>A$k*^yXE6i+L(PHOu3CU$rrAkk-ilwNm4lWx0Q$96Xa7rmbw!=uR$R(aRy_MjV#raNiRuI}{ z(S2xS$R ze*PO}s>|v6)f5J)!SLxj>_Aj~KG^~m+IuSH?~`Qk%29B#mbTg3_4G>>Z_TPh>u25y zfFvgDjLKo8l^mH)j)7mS^D9CE%eW#LQ?+ag_mM1>o#3Yzptv30XUFzyo!7thlmsWU zsN!=U*&&amxJ_R7ZRQIg*Rjp4128===hp)?^7Gx}vfJHA)(1$@O0c0l&^ zA6`I7N?xkun^}Z}VVWA+!3GRrw3oh{@BUi`7H9N<8wAx4VkCJY--0yRLEje>L0kpQ zIx?(-un=+I>(JbkZX)mTGynmGTZC0`n|EuMP;~PZW*EKZJXGR+2dfyjj8Qq0vI@X@ z0I=;l@oMN9f;k3$+*rDb0+OWu1K16HP3lw#z_JIkM)zYZ4ybS{oqV#Xo4-VUN_;6S z5G986M1$E#N!p;FUnNnXO>8UWcZ*gZb+%JwS#e{}M@j%U6?+X@92N|&Et{yklf$Yt7B zv%dYuN-SW%64>KwjEn)K#t!g`7Krof`E1{th3j`3hH!+7ufc|f889(nvw-01-;BG+ z-0&xn+$D7?WcnI1?^rT-++O{d6bVEo`fEcT8-9yi4k16iS(Y9L9bvV0cX*+{Fw;M4 zlukfenM8x6MP}ny;3i_aFJ`AIMBzI< zQ~=b@{`9vx0ENWSA{kC1v)+<-XBP>)-})@6EK-f=Q&MfjZsA(Ak`;)AN`2*>ebbPk zO36}|5b+U1GDOzs(Gk7wE5Kz`|F;Yt7X!Sj=!oaV$C?}5PYzy-YjydkW{(pOo@0?* z{%|9PjGWBnFN`V^<)i>Sm$i2xYIgH&0@_$ylAxygqTbK!U9>4$7vs}NwiPCX7U?Pn zkKSw|?Oy&9D?ME@KFA_SQZkag#n{LqH4G)v1%4>WSnQ71T3Ezwo9qI<^S5sWtva;5 z)zho9LL=NMAVjfO7qh1bUX=Ng!&uou+@C0lJJ9{fOsgm|KVgi~pV)d5|J`}yq$!sL zP)lxGmWZk`P$x!vGGcM6c(BpMhL7Z!m{U7zJY$vlcQ}yoG8so7-gZ2UEbQ!Vls~aM z&YirAEH=&_z1uBevyquuOdiP}HZDj*U8D4W7f8kmZ6Z+!PS^VI?`lpP{nPChniUR= z>l6fIEh|mWDXy&#;ubL|<(Sy*FhP`!Qbv+5Wb^OH3!%rZaXs>!c;17P$Q34gi#Vxt zrd&omQmrFKmx3-wu6(P7YQYNZg-Hn1-{Ci-Y5Tg~qW8lk{sR=!`f{XO(`DgiDb)W5 zIMYo?i^*eel-lpEj)BDgLxq`|V`;T6h`Q}}f<+YUdZN}L%j*gbH>)-C8@8gzh_YeF zN$~3Gr0s^=fN5Fx^+6HfL|sHUU)bliEjbt=PCuKUu+wd%QCe(DqOkhI{j=-{n5G= ztyM}nGZmLKJ3+CCu&6wVKJhr%Wrfd}ii>ueKY^iRe`$wsPq@s_qp)b{^+z{Y!Ls& zPb($lQhU_oIxG3Glh;0Cg(#=K_NYPEc{txwXU7cDxkd*F?R#kUYw|$b~wf^?wzhx7fbUkHSPTpGqzLtiecj? z7`^&-&8C9V3NZ2 z<8Ya3vtFOS8y^Wb_Zfg{ZZR6ICB+Bdp_Dc)h< zn;V9W`pluE!>Tvq130I~N!+CZGex=MxlaoZ{9lJuDoTW%`pGn=Q{PkZy+Q;0|?-oxr>c zjGG@`8I-Up7gtTSc9NCtKA;iW+)rIJ*_Rz3)9q@u@gN zt#3Aa=X84`BKNBC3QYL`>I)W;DMKCNvOnpYghZN1nlB>c^}CZGcgaKc4sPZ@C8^LM zHR9oaG{_7Lb>{8In6>8X^q!luAkZd;su8$S_CG*%ncz3l_lPnj#kFzWX|*xtmgLXl znvv8dVZ+O9^AiDsrIJ$IEU)shZ5!l0=;4v0L$faowBavSY}9LGhm#8ZV;tQv(%E>B z1_u^M!jjh%L7YSTW zxF@G%Fx%g1SWKyh>mXXCs}I3A7kJs%SZ#rQE$(k+Ljk_`zZpBbg~gvC)do;#?rU0x z2&92Kc_*>C(d$>U57t>|q3bLa3t=_(>}wrVT25*h(ZVl-=BxCpKExT`Rct94iD9+OJbi&6&mUdk;vx z)^(CPwUabHuXwt!ndCRn!et?0o4vX}Nx^3a81yx0!ba0GJW)5YTB#kZ*-UC>NFImz zNm_9rb2rXE76xBzFzW>Fc={!oMHBa_SD|mcVPu<4qAFHFQ|~KjtY4kq=2Uby(QE0x zg(6!0E$ptE!K#D~v7O!w&@r|+u)OI&?s>if74ZkcXnD~%GF&A&5i4_vOL+9 zo-1$#gGvk-g~tJX{Ez*_^)}6UhHS7QYbvZ04h`~1_CfCPGx!DO&WxDPKv;*4W;`25zn0bEvev8pcKYm!v-oa1~dwGTA zQ{4wKD;4};=!v6=lp_yejt{J*Z!R+PoaU8P*Ff(D(0P}y(q_+Yvu<<2)K^vRAa6L- zeYvdS>u8ERc~6ZwnFZ!DFa%QmtV81k(k^pdDQKsuKl*eEp;AVuC$R163dL<)Jy@arOW6Z>TC1$15dD0-tds94)D3+Ec8^A-qP z$!r`Gf9eDkXQ{Zqk(~!IS1TsA<-VjkJOK>1S25z%kp$4QeFjrR=<%QX@c}mQIy(t$ ze?3|UL$!eYr_U7qJ?(=wrl@z^tGjTAlMO&x-1*jD%Aml$lWBqpag9bX3XPaZmA2CT zw4U1^KiIx*|8cQMf%ITj8!;~oCbEaahBuW4^*q}_)OIj57O|O4B=gV3wP#3Gm<wP*A2=1iv3o(TWYmdR#;iKIm69A(x3QIkQx=wQK5c@uY0M2IAw1ygY_a;l=EM0u(q zuEh&9eDT)edRwS^GNqPAt=0dFY(R5KS)hH|z!PV=+^@FoE>L2*X#N;xG%3vnreoo)9H`HGKd z`{l*=fOSaeS2AB*=B!(ym0R^70fxS+qx~5X2fEnvNYG#MGz={h6z8o$K~2-gZ|_TQ zryB%nztXwnz-MXXY+ufvi$)gkdb0kGtl1;=?;ft}@j-C?Lb=br0*I2Bhdbv9q?bMK zLtUMIcKwE(Ioa##IqCV!+_KRwa0U-Mu~7B^ZO~?s+f}&R5H>S1dN?8BvNM+UB2@HK zr>JEdwX)oR>e<@oGFq`ips9Dm@q*I_^w3CQRGMt$BxqZWMOj_QK=lQh`9irCQb%2; zi&KA`av)!tGfugahi&R{cY5`WEtmyc@LroQ7=vuBE{O=;dQE`PONJ9Uyvyw!mYc>t(1 zwMmhwy|v~WYmV!RLfoH4VKSh0GbnEo=-CuV;>&{%R^)U&d`t6WU?w}k#6Y&#`<2nz z+Bz>Lz+nU9J7RYp2;-|GV6s+Qu*9z#z1L>xqtTJdXSO|&6~`-^#udlCvLj2OA_@IR z7SCnRAR&T}%qaj7=*ByF!jP6`gJ-Q^L5#;Gm8tVJh&)c4^zx>zL4=II&se~WY~Vic z!pi2Y%?6(>Wya$unLaOEtX3A+kjJ>jqncvLBLLed`CUC~_#(@{%ui3!uuyJen?O+x zues6H2`Fn&f)kxhn!}{odkH#T>KiW#a}BuUr7s!|V0>~raC-TD=1*1q5Fh5>94FY2 z$CNzY_9GW1SDv+;k#bZ++8wr@F0>y<$m~P@3FhGBPhNOW_uM z1&LaJwg=rF`HM3WjImyjH~yu6*o)aG{hQN(;ED95$Z%X<&SkoF=Oz=Kmi|NLZ&!a@ z%F+*NOKO8A#_?Yn5zat%AEthU6Tpa?53)0ooHPHL(+=Sjep25`%VMcX56f6x zYH=c254@+RZ<)WzdSbM&ogDld;tHfq_fD5f!W|z@x3;fV@6Q@5i&W~g4i-EuVN;lAT8ndH@a$hZk*U5ZGZl_+d&)+MDC=JIi_ zcq34Z^|XC?H9KLwHOtH^^%J`!lbYosHKr^MA=#mb>QwZcP^-|K_tS##AD)LkM*jiaYe7qQ ze6KWn@RW5&6WeWwTzzw52E4};a7qT#n{(8kPrVq6)& z##1$w5_dOsjonYFK6p#NWdI6*Jh5&3e~v(Vz^3;d+Z2}VCiD*1GGD>(dB^D!|Ivrx z>bXRd@bwnk`=MA@94F1TuX~z6inwO7|KUxTDDxn1*!lc! zmS*w!4a}xI5Rc10`$)8luo?$->*DdY--y#<;Sh1`Vj3l(LIir=_1%xbYatSF^s9&c zDMRDFvjd$_#n2O92GKBJ4|G`4#+Tjq(;CaDa2b7tajPFJv+u+xWUaJ-FvcI1dpPtu zoid2K4G*8~PW4yP_HWRnB2(;LdH(_U+s7nPhJvXAly*uf;y2_AUq|c6y5)-lT!}+( z5{H*Xu2!ZR@q0Zn1>RYUP-G>9Kwo_T*>-}kTU#)pXQ zsk=)d6PWRZe}|-g+inJforQRcvCw+JeS(0$7$a&$7{kqVb)|5ZYS7~{#K`ykDTxMa z^EjF-=Z`~0jekU9AO+3{kQ{@`t*-rk2)72%=MWDW_YI*&pAp$gaVd3>4!mcr3 zRKv#Xq(kojmqwJv8q7U_h&ARLG3Yy=>!(+U*iDcuX%QCO;gZ8i(6{n%q=KNXH&c48Bq!g>Rd53oF*J1O zk#;2J8RaHP@X5sIci0IgHnKFn68NANqf&Zqn4(G!ZL8iE?i5I+YKK?iG&}s5S5S0ZkYn$pGB@0+SVa3s^;ThRwO!YCaCdho?(Po7ofa?d?rs5E+})kx?hZu@#Vu&irntM@ zf1ZEu{dRLCJ4x1B^O|FfbL_&@`h_tDBGxq9*dxOgu_rQgY{P~NK!X^t>SYt{!CVLw zhc*0`*!0a+DZb71&9nbPF2G+B@#zRde)&|@_PpU%HJp1VzT4M?&kpZXiqhNaQwk+_Lzv~yS!lUx zy3+wERY_Qq^&wsBNs?eIOVu@X49b3IT2Us?&u(Iub6_;DXX;Gq&onai88`d2Hzf~! zk_)X%(v(p+Jfn(;JQxAs#!Y~N@)X~5$u?0U<1*p$F{yVhN!b+Da@V@9w zj+o!U-kZHI!Y8U9k7jvKQVyDOuEDU->7aLhK4yp5LPNAdfKPRiDP#HQI;UuQs<_75Qy5LmSq`Cm$o&@QRT#6h7miWL zs6{MrEyUy`iUz=GcTFOCV&1eqLmCZ9TD$f5jVZ5$sV7=Ke_=(c; zPD4JO7pb1>sr@FodEUyUE9KJ9lK(Wh6B!=i+#wlAQNSiAG1V^q)VB0(jjWb$rTb@l zcbcZYt9{v~{8cUD+S`6pqojPvt1VzhFenEpPR_+!?8Qn2W#I z%HCtOnkkmH$Yb8nl$-A&H{@O8z;tvliE6L4ZT;=huF|A`y`he#<;{u3>w1(eZ(q|Mi$jz3{$q zuf)JYwQsDs4)fq2Qlcm2QqW73F>wloC@3E%M zOJb<#Qcsc5pZGdF1^lxXp=4MLE=Pdr_uMT@(YEb$+03aRPpE~kJ7T3*IDMAERygC0 zOI)F~wFDO-Ly#?2E@}%PNavRX&9UPSV)O=Y4#su6EIc3;t8GX`tp-TwIve1U@+D-{ z$TOrnqB%%!aR>zp6r$Ol=XS*tvbI)6fA8#7+-#~wxis9IS}U++6}0I*_u4;bs86WW z{#w1Pw_>VL?)^sV`Y2IfaBe$76&{S$20nZ*`1cMK@Ac=Vm-bIj1@{AVHCR4XGfai4 zGFX^o2TK?0ya;`{`$^elH!_&ZmOF8;9dJYHnoi?aTaFyCUZ zv-uFmSU6?dQ{*RtrT!;n#WI#KR%Bq&$hBe+B)(JlDxtj zy0`!{dM*4XeAKaxcHu=8XCG+^TdH!W0JqB6TKUDay27`ef^v2gdFJDv_obagqJ6i$ zhGiqOnG9ensDi!!;rcbmTflhF9w#-rX-ZD(BMN_k6xbss<(#;-X%fH9S~HWCDu4ms zh*uZhB}x@YcF@43Y4@Xb5^;i3fdGP}u8zAx3csW$Jy!049WWd`UG^5f4MIUo0fC%oTO0&rRnKoIfRC9BaeC#{vX5r}3Vo?nyQI z2f%_$0Ho%1{Rt-#vbF@v>LW!7wO5^S7Ha~m=uGH#3_6lIK2CU@6=_J`Ybbh3RccPR z!x;J2#_uhx%4{rG1Z_+tA7D?MS1be_>Lu;aFG>m%qh+OJeYVp-=_gf?sC==2=VhCk ztk9>JI-9s3KmK=G21OZ>alrx)+-HTK#I-VfOB+C>gn_Np>(KDvZ~ zSQ}_GD0Dk(@7y&!jE6q)U$kJ9HrKFuniRCs*}VOLo995DL~R};^?E@SDgDj0Il^yg z#m2Z?_%0Jj@pi~z7O;Xo_FsANs$O5rPBRt^e<88Yy_unROtX?*wJ8sA~F zOq~dX{^X~(JFk+jt8nhCOFA>N^0Nc_wZo>H!5@<;SJ#LbK8{Aqr^s$$SH^Q$$9Y|7 zVx>Q+6>+)2v0#j^HB-M_1F4IRA4PmVb-frQFPpk<*Upx0i19YLe~g*Ym#s? z2h|4IU@S=hxG0Pc=KF75V|>kDv_tnnNu%R3cDD2im!yLiR}5)}sZ(~(=rG>8yRnDo zvor^Bn)G_1Y?|!}i4*g>H06RaBK$`kfbeQveTQ9GwpHfRsp67Dk=N`|F5)dWLpGLrHjEpPr^_?kYo^#r z7}Auw``>8emX4UCUMfUph6WeN8|+i3;e)>uH-T)}flymu;@1qL5v6xKM1XAX-@N|{ zeY;|q3Ys;c9bWfa^O%Y{sy`U|;y7K21Hun?%f-I_D+E*~I5#~4>qTL3F8OY*VdA;B zekS}HLx%t>03o~{XomqDJ^r-r97gt9^e{HbzAxrEF66)HTc6bR(FGemyQ}#)ngJIe z<{G%~1%%;XyY3V!5K*5vgP{V`-V9+QZ=FbHf`C8U)+K}7qv*D=BqgWeYeh?5AV!5VbF&9GC@IV75*wj^3|D?VfavmH0m7kq#bAI1h31)%}7ZH#q*Fh?EJAv?g zBLv22LU0KqG6?}CZ^Qsl;gX;{+bOzKqC!OKhS48X$`fcJXNrvA&}jkzDYAklI0i%o zQl+HupEGd~S1}XC13#Hhh<3}F&?@TpCJY5l8$#p*R}Egs!!S}PiY|P3G4Y$UL2&0T zIpAO-dwRA>EjRAlfG`U5TI|<OC3;KsWFFOeJG?eiU{`T%lf$`>ic)RI4;a%I2yGbUQ2%vXbC^EX%l3aC4+fBA1~V zYHJn}-tm$T2bJ_%R4)d*nX=RRgZc zecl-D#WGNL`tm&VCm-*Ig6++Z`b+NY{!exPqQasbo=esqtOIrefL2pA+f!Xm+>UvaSm%8Pei4iv^h-KAdbV_|yIW_-V3a3|Ad6_p-Cb({|KvSLDETw0 zsyjnxs4w_iqyPEx2ntl>754iqt6bB%>8Fvg_G+$pL1txSyJb&qdX*HqglY4MzI+fH zSCJU+$hOD?3-SZ~54tkGgRq{Xg2K-){1%mzl>wP#XI2y|#!q3AtB8rfO8Q`%3HSVv3~ zGS8Q-ABzae4K=#8U)u^P0QCtY_S}`NND; z+bWS_IT^DQ8e}|7&n)gS$7p-KS3SJR{k3)vni~3k9%#xuHW3-HdEEpyIH71fgN#rn z?`s+=3Z~q64{RMIk)ZT?gRVe(zWUF`u4{XyjYeH>fAW@K zF6J9otk>sx0r-}d{iZwaZ#y1DyTS2Ms2N$cjjQ@|DWLteK$Sj3YsWwGhAbYf`T^Wo z5Oo@yc|sD@5TMBOF2Nj-O9!vODjKpX0I){~5d2dd?b@Q_*7U47@^&iCpz@`+Nn zG&SF^+Hh?;Lc{y16uOpW*|%pFm2va$?lm5R5Z0aDwzeAX2=gM9Kd4lCDT#`%_1bQT z^{v}ys{U&HJX5~DM9N;HM7Ch8Wq8$P;KgY!CeBpzTfRDR|vw?eN8QtRZPx#P)N_t@wmJnC(A6^z>0H3_l^M@3IoA z{93)MhwGe3sDpsGuZjvG_69%uxTkcTuN^fowsU>-ct1pac)pD~uC*j%oQSPO z9hn882lpk-TMn&LkW*eHU^wMK9b1zSDZFX`ZpNgD++bW6Twb@2PUs32)oviKXjf0H2P zA7TA#0efn#XAyMGO3R!4)!-V7vjf!VtxZ&aIqf02N#6b`-$gv+n+N8CIqmVY{xK+G zW*-gKMIIc`Touw4=R&2#Xeo2H|2<7Xi+#ql=xzuet2~vcC~JwFw@Dk-v=Vy6BW*%8 zsL~(s|H$sBCO&ZvYn}xG`kzF$R8)&otk$8fg&=%efH7)V1Sz$f&tPZP>5mLFRzl~u z$9kyLOqa1zg}4%^cQg*zISUDR)mm8Vy&BYmZ9PHTj_Ygv z+Evwi8;%jh#Yk2JFS$b=epO($2)?@O*X6cZ+>~I!w3qk2fC*x1(+Gz+(%3cjs9qf|$J+v~E$?yV1NY z9&q+YHe7D4c5Q^MhdTYvRue{*3r2`hNa#=I?Xb@`;*wx&t>~+Y^I?7A{P!h_9HDtcMAH zaN&-R&=AbxA%uVETO3E1OuuHJ{&XD6@4Pc-j{Q z$^cy_<6#KDFFP>#`|lHTZ`Hh#efUw!j6jVij~YUINN!`%&^!GBK-2J|9ODg&ODGQf zCH1mzcUeOQ<5=HFBz5&t52K zuUT|L)nHfLc~;*_^_1uRhSiPS7zt3P&ybOyic7G#&#QCnqna5zj!73RF-7}CmA~uz zi^XYiE}B@G?Y#4cS(%4g$A;whQ@vJ8MHg)V0R0a?6Yv%&(P0E;TX0y1IbOK2u? zbGMlyjRHRei1>TYR!8Ln&f-zbY z%6sevMfml}6ehKykKUpN031Uy6J8Lyv~mSQZBY=(0sPa~I0o-8 zCRpiWuN)b^d%=Q=zzi;SUZc{mic7Mb)xXZU|8(1m{6M<_1z3b4Ls6xJo?S1m`@b`! zwWmpvLs1Jvl3tSdfuFc(Y2Y;Fh0J4Sb1DC<0|s8H5dM~W5{~a7-YNzipoo8@#72N6Bwfo zIgNKP^Z{GH9Vm1wG4jv|NHx~XT0B==66D~r?8CJ)8wB;GI@ke(YA~768#>A zC<3o0+-Pm!LW%fif*qZ-csw|gn=>4XQB!Q!LM2^8o$T47cZvT41m;nObTwt>D0rD9 zG`Sddy+Emv3u*+0&u$1Ieh=VC?mJ-#_=k8OD3yQ8>+&|8S1Civ3MvO7ElK?cs0n-M z^G9ElrER_#)#fVOrw@jnMYbi7k6yAgpVU_O{%xmE`DF2O>S9r;{^6)<1+dZAaZ|C> zN{UsD)v;=U(-!3NIqk|1zQ<>wjKrF5b@gTGWpVXuRQRw3Nt{76j=|Wr|K|UClH^~g z@a9c(`A-#aP$VZ11U-X^J016)@|HZ`S*v_Jd2?sIzE2(P&1htp^R$KEcYkq~QxrMr zzb?6!t$Wb;U+z}vyl8l9tALgrwLwENE%#fUW)z@Dia?hcwxAn!m`uk^y!dR;;Z`!@ zJnC0sr)YksT9{Eq-Sksgrm;M;q39A5P4uV46NgJjw}vv3iz7$2B3kYr^+LZ24A+^v z`A%Gf#3A1MiT3NY-Rxclpv^M{gRC}j+jdjTUJXq10EMp+T@gIZRf?vHRJ_hrC+L*{ ztY_SyvPo~scD^FGUQ1BWn)+Fv-R1g4=TbQjdJfbj;Nc)*K%B?2yt-o*NvW|W>b}|n zU7zJO*z#Z>29(dP`^8IDGn^VE#GnpEHUSQRtjKTlPkUIqkSE`3SzZp;GH(K5OTN@* zKNte@?xpj|L;v`|_NzO`s(whnBw?nbk-^>627iNUd;Gdk)urWC%3M6JtG|KkuNg*G z`n?dV+~0j4z$dw@rSb>$ki{U8lR>;d_O$>#JDz$>%SyRCl>9_9F&<%HhGh^4s+Pey)vp#`_8F777rw@T}dW?7t4A zpMN12t~xq4sPag)x%KF}u8=QgkL{Og+W3d=7S25ly14-5CZ~5cI&SlIixcEIY;d5_P6-`p;E-;Y*J#!6rN#T| zzSpJwrVrd?W++c}x_vG?k;LIinQRfz6J9#HSXXb((y{3^@#4CJI9a?I|68!_`DPqQ zpYv_GUqm`>PBA5;g=IEv9CAbJnZU%mU3`uu9|N7`dgN7YIL$U#bwJdxD<&a+jN0eu z#ws-VXET3!wa{yGT0N^2JXp<&QC}|;oa~lrQDj`taohSK!yAuk?mlyi&!?zJ zA-MbCI6gC0j?uP9`dL@IJl43~ zpp;v?U>E%i16H{RM=}V&iwjqWEm-e;bT^6}$7D^L07`loR&$^}sd?WXr&T08h=3fK zi~!`AjUR82)(AAEMWR=RU4uxu0N2%;iu7T0jW>~=6Q^T*f505I7N=4?%y~cen8~6HE5X9_&Sfy%s>yFQ=`PRvf=+KrLfQP zXjR-q%^#=OG}Z`l6i6w$i0IA#v{kqyx!`Za`$G?ibe46wX}}iZ>I`hCc7#~b#u1it z1lLvVY;x{U`?;yA@b*b@zCq2+%e$Mzp`w(TQ9%OqQeAOBZnI z&gy?%toQ#bsx_Y=<)Mhb23O6cvunEQ_j*QiLRs&$2s0%H$kj<;P ziMzp?o2uV&rp)Yvqr6+#V0~^Szj5ItrRlWoxi(VX@)qogFs-6a=V5yDaJo0^eHEaDaBBX z{PzH;D~wQwPjTrp#oWNA#g(j8B<%Pm=Kxw|UooByXf5l}uRB3%3k!MW)<9e7_!q;LstVnL9VCgb zGYTW+^J2%%9gMwH12AH7TU1X&FlyBEd;LhniGO*R+fI?p1i;SF;S=csWEl6Us%ZOl z-#Ow;4nzs9frsV!B?HrJf+L@878<2v0gEA_cLbGUW8?qSoG|4r%0E#nHwCHb%H7@# zA%75unPt#8k!_N{3FGT;2jqQ^P103rDUz$J| zbl0J`VfnqVdRl|rK5W#|E@>e(|E*3Uc;^b%^em;S>rA;k&=hwZUip_cMFFyg%k z0tD!X%ctD+$)a6z0|K~4#DA~sStyZ(3qbK+Kd45xQYCMoHF)s)xX3I%VsI_*Z`a~m zd9AR;1=pqX@_=$}BOoG^xg6;)j3mLgalovARXsJ1f@cHNh3S^%SxS#JK7=hPI$WCP zNuEqw<|&i3Npzw1%?067&Q<;mS|ONUcU4i|1Ei)(7A!>mgB=-*ePS08SkN!_ct3`y zZs9;c6I6E=^7K7XcHb%(?Tw*5^k{O33_FBMG>uy!T}w>S{u}JF5K9nr#4&pwCKv>{15qg5OSW>U&OP zgUnl3(c^J-F2F~a7-1J(vvSrlw5b{LLl`kHo{41oh#8B>hZn`W80Zk4n43|%_&Kr@ z3&cZ&lEEy|wE^M$Kb-HJ$v)a&@5y(O_vhaGIxE2>HsnJ(mwGl*h_7o|U}*bTgsF9l zeKT#9EvF*7^u^7N?f)|gf6CEDFF54ob)TunC_3L2H9gR2X`IM+kSAD$99JYvZsj)%y4Vo^4v689(3g^{{6{ zkF7&2d!vzR2vvpSCLiK3#Hn(CmeazNH<1Sr01ABs_Yj-<@|xqbGb1;!&&1!K->%?z zY;eji4J9sqJ0qFF-o6w~6FSxLjHW@?rb&oupUDnfgvHD}RIOp)?G*8idQqCTC?@DG zgWk$eS~6{<_ii1;E}I85zyNLm#r*jxIrucO^yt{~NIo$ZVX>s{i6v9mUizf~(~*V6 zo<_|%^3j~(+!P)JGzFfXHoMUjtpZf?hEi#>P9HJOreIVOa-WD=jjSjZ{Ruo9%2a+v z4Zn#WukSt;jH?0v0bBwtuj20$e;rw!DVR>$lutu$f*pT`<)m@eVyuQ4cjZ$RqW4VH!ZQYt5ke^we^-bf-=sFL>)_Z zc!e8dNL*r8M18Lu_dAR_x3Oq>2kk_;^M4bDM@4aWf_B{Q$r|V*MBvmgm%H}wCkM5>X)@126OuDsdPC}MJ5>F+$XJAc}c`F`& z%AytP-@gTHtE_gvF2QU^5Wy!qQOQV)&I@Vm>)Cryh_;q>Y!l#kQkZh2NWa2INcq^q zmV57M1+>+3OgEuovhE3BG9sCD`qh$rNz>L=UTX18P1Vx!wO#fN>Z+` zyLG9}r(o0p4yzX%)i@X>Yhv;Ul^a;+^sF6V5<*0@2usRb$B(kHmsyM2*%# z%s}p|OU^73{ZZi(#K0;W`im8vKi}K_d~J6SO(e-Cn1JYd=iGR(C?bxwgw0&AUM6*g zkorJP`>n-qJm(sAfsTmEE`Oli8n@i3g^Z~##nH{JLhdcmu{0Jt>Hsjgcq^G!q5#i! z0W+(pvSF{0H-FxeKiFH~t~-5`ScKSN!;h8uQcl#b3`uClBh~%QxU23(!#;Eb>S!&< zyQDD)xRB*+ymGhZrJf;$+tbx+B+R8GqdPy2q5%;9CDgo=mD!Ld#>*X@)c za+$<@eyMZ*S7GmQe70cu)pCV*{bTx}mnQ%B-?CX{Dt1B(HI^E0htJ5vUP;LH&C~19 zENfJIvrs20Um0<=wDOnu(!(Q(Ir{$0!(8qD_Ku56kNk%O$$wMj|ZED3nhTif~Jkq zr$+prJf$W%S=7h%MooM{Pw(8j>L({`;Y!6$c&AcSkOY2__rExej;?g)Qzs8^C|AUt zi6gRj^tSYY)Hd{%N1vVB_xtB*+Ny{qI*DmzR;81%dtm}ssjJZT+Nqr@;|3*N%&dOGW+a^2wINg))hN1YB#qCL~d6clBrE4r6dFm!keBC?$BBc4WWkdT*AbK*#9 z9Qc1m&}gH-x1wrZcnyFqUCUTv>7}9H#t>|hOqI1mjjWLA*X+=WnE<0T{Y3_FbESJDs-&0D{O!gvr)sT7pErrIuU)C3JtP zGk2-hv_6ZA8H}hk#0z>l*g8}o=aFR2iy2G{wSz8O`K7JvG(mA2$ye=#X2@y8qnrwJ zNv);A^vm_j{u~9GFTB92AXO&7xRg|-1a@<68asN{YoZu?;M0^kBbH+2*rZ{P6n z7@wOR$9V)6@$oCNnDZl)8|s0)Y$Az<%BpaGe?X2oCHuv*n@#9%)0|Hb3-tP@kTQV~ z!5E0K>{d0wP|f87x)6;cnrMN@AAWmMq5h3YIK|Qj@Hkaw@aest9YXs&@Mhcs_^yGT zM~`n;0Mr~DA6pTA$me(K1OD^@%#bk`z5P^pJ?Zad#!fhKn0nAK>F=j#~;OdFTT zk%#w zOYiu)34yRCj8Jq5?V0~x@;`K&nf}$By*#8=2VCRbcITwMO_y+o-PD36}7Bo_) zHOadBVa@lHc~o=c7gi}Nfg6S($*lV!1bb;SV$S&U{{7~eO9k&@mdzInLVAGTY^uZ1 zcE7u3lt+4rG$`1A2!VsIK)sv^Qt$y*RqiO+Nnl8LHDj&SLKwpvSmx3n@s0g#)+OS% z^&jBqafm=9C6>8nD_OAdrw~bo*JNhhsBW1b*H4DC%oeQI_kM2fCd*}us@zlCd`R;6-+D0D3vR&T`a+SE%q2<|Kyq9d&9bND-u4Osd4%M_1DFdTUO3+1nI6QnTqw^3F7+Vn^1IRIkahY%h|1C^D zlU5nrG|E>sz;V3!=#A6An>&J_19#N%NSsEt?$MkHo6S@;=1a^h`Aez~W|aMu%v}LMeRbF-qUv zCFzMb4O~wLH;EJxE8?r~O4T!CZjARZXvY{}ZAn0=>bdSE1!u ze3xmjEB$cK=i$9C{{e1pUa3yw_b9?DGqr5zuRlObGctpB6B_f<(D4-c<y1dCQ_*Z+EQ)A?F5TFYE|F~m&x)r^@O`UuKsG~`-lsEtksbq}lf;I7z6`0fg#K&K#N+>k z{xx{!?atJ{(QFyx&Fd!(^Jqiqr%s5e#yHm1sStUtVI(n#<^@BT7pZ?|Oj+3?`V+Gv zIg5&BTqPlwtCW*r+~bxPSeD#NWECF2`_kr7NJFxOvu32JCEMNDDKsvg6si$Vnp+nG zs7(J|6MGnB-`euBAu>#J=W!A?f`i+xJ2yJVe`0df)zGI-o$6WJ3}O%zG%PSW{8RTV zii#?;r)OrI4*y1`Ye2d!f?laNeJJuRM_2Eh(mc7%xnvLxqa8eti&Sucb&zB_bx+xv zcJ`>A$q-RagZslYRa=TR#0&NvMbc$@!=*d*U$BHpSJQaTHfPFlVrO;aaBdfX%L1z+ z;9#6S{V!e6=pvDQPN9N;!A!#04fHJeO>5&RN|_>KDZM^dclzVvvP`JWZmQoqt_W>C zpx^Y@^gau)=CR~ob#bx(tsg))QaQ$_LCNp>5?!-YToxCn=F zNs?C1N*DZB*IL%e(xy@5@8C~~lP7m7OQmuvz8et}qO~#Xef3i09ge(8ZL?binjh%7tG0@L%~~Xb>q*? zVqH4*Hv`^yPkVt@t}fz`@Hz%`v0@!4_rVh=yI9ov?9|s^A6GRZ_GfIGE=N8~0qz$c zKoO!bASpg%RvEbtivLz!UOo1ER@)=3TDdn*cR+7zu;*f@Lxj5?^PzIfYJ#SSmr-;2!T$?C`I({L7M|ZCGf$~E_U59t--JeWea)FKE8yGDj68!1ed@1}U&rV=O4PbFG6tfS zkP=A@f(Y%LU0ANIPBYc4zdn{F?J$HvwPAN!U8JK@C+-rRp|KTf&5A<{4S2zs(CO*h z;Zm6JCK)9MfU}Y3NI5u<->q4tK!7vD6c-wwv$6cL z;gUM>#eP->6Y?u?PD9n!VYR+ey(tJv*1H4$U&9}opWKfG>6Ioiy`OC6S6cYef8Uyv zJC>VW4f}%5$HBqZ)CG4A7sKFDtgHqGHZbn8UrQk0dTV0WC~Y7R=yQB~08Q$aTv z$?lLLUeJl70}^rncmV6v7Ht5aTfOr^ItH?6VeAykP(b6TL|IFl`HD7cTDW?#Vy|nb zFBa!`KiV-m5z?vV2;X?ixT%@QojuI1#R&brs7M0MI-N1?4tt$j?B;j zhJqY@}EU)kC|+t)#QalF5aY7M~rss}cruVi64P2^(sxZ&zu)O4*PJtUW8 zFqPREc2!(@Gck2er1v-_zUCHv%8+yMAy4E&bdR-5=VifYG}Ble+$3~X)l;Tm@`?z- z77xju&!jga30vH3qDy!NF9&n$PLE?Bo9?ZSC&a_#Vn-b|y!Z;9hyT#5Awl`lrPUdA zdKyI^mL4=^^&dcmv`w}e2Ay>>IFk8idtYA%k2n`D3zXbB;ap)5-%uZWN@dZ5ykyaqf$$3D_$%;0qD~XvBx4`o+sqApfKwba`3} zm8M=go&Nxwk|A6xOcqK=TGs$!$@4e~J!PO|pt;cwK&E7}vUqqEXoqy74 z?$b3CjZVAXf`(hrXB1UFoRTB}Du7;wcMkX$nH;`?mBmGV`oC6o=Oh!~%^7~ZpKK%9F*f99Bu9XkQ? zD7FV4A#?+3HLFzqlIYrM-GJ+Iu`k znaic?4rvUPDu5Za2Cq`n!sX!}^b(Ku;6T!*b$2(Gi%mGHR~& zL(r%1j8oM3@VNu6GFc+XOcsZWNx-~y57R;I>e!0K82o|zM@)_w`x@rn_d{semh){x zCL1oNOV}wHWjhoXQKJc1}{6W#1m38hF=bFw`5 zz;R+B*<$|g9onB}j4`^fv9utomY4J4YhJ|tO4Q~Ni{bUowL|M>ochX_&@+6e0!B+3?jNp8brZda8Y6THVTjq8Nhbl}8xBbcx% z@69^U$o?_t1n7&Nr!P<|=BgztVhb?UID>pSTeJR-?e)7=xN#(ZbQ<19wi_n<7_7J~ zc+;F%0lI*K>ok4xXi_g$ET_5u{Ba^cuER{m`t;9qK-{vUAFm@=qs;cD(Hhg~@WpTp z>x*h{IwEsSy_b9i0^wR{HYJSEf>U-K2Vnz``pnaplf~(kj?{bFOvdKiaC=kT%S?NA z;ZB#1`D3*ev!|H+1W}o17id11_EUo`Yx=d#A-<+!t&|gTKk%JCXu4w~rfa^Qa+U@1SEd_IrguRGa64yJe*J@vuzy81eFlszo z&%nKg{^bBu+g{Y?kC}V#Kh})X<-+FVs-?pGBJG*Aj_fwTW=LWr$*(+h0~c_E!TL+R z^@kb*K+dEW@?EoVY#?vHyFHc&EbwGh?*2;0b77Jy!J zC6gzNf7l`T=pxj)I3;lfi@ygop?nP|p|fPjEY(1Kqx&dophG!iDNzFu#kCq8H%iZS z4)A2>-i)qf7&w4muS9BCSbaoj$`eSW5Le`ZL^f2Ryq!09Lg>jrMYM90ETqRMqGyYJ z?Og8)d?G!Yw@C?;pqJeSdi^9GWF;2lnq>_t53J*8EGahkTKvo$^-+_F^BK#Nr0_xP z+kW1?B>B^1;#S7i0vom@O#_dkZ5}*D?C7@V8r8_hb>hH-qc)wK-1M+Am7*^LA6xN( z3QsO4Id4l3oz21~#XLEMwM&H|MSE>=JjKM z^>fbA&(m+BPn~Fgvji7xO>#DzV)8%D5Yv&6D4~w+vmdn*G;ptnjz(F$%C66jKFGF& zq6gZhoq??mvPAP!4FbY`kLB3^#2ulmfbWvCEmp%(@DLbTD+nN9Y_3}zx7wWAYo+7eH9!?I6kdyd%D9FfT}vX)K+(^r8CeNwYu(i4>f5`K!4 ztQFZD_0*WbzCx?J1LtZUv8D)?a!quVt#<9xha0Hf)(Tvy#SLdLjU79FbveF@3n|#f zxn;JYlB(`y*?|3|@3=#0wn*5u2qK%~Lqrm?uZ`1SLiHi|6@2Y>H2?%GWadK~8achNS4f|yM)?EKG#3wB`==RO&u zK%E~A5-#=uGS{+_`224$_vyaR^J=Wz&9Za7oi`H+WJ=6opf#H#t6(!0ayd8v@N7+i z+u%r}GZT8V65#Z{j<+T6M%zWNB2c>j;v>=PnUENshB@yw!G;?Uo#$7lmMn*cu!avw)qJ}mwl@L-*|#670U|FOhkn5&HV19dYrNO{71q_(~z zt?R%Q!|b1+@o%lF_Vb@VOgrcO_aBC8DUE;~$fKOKE)kESKI>No)Spw zzW*|w?b-S|1Nn6&S^iCLGw@}%bXdQDMc#Z*OkdlbCMNCiG+Wc$93dkh9^VCq6OcHnvIdH^`WAmqJ=df1xJ&ubq$ZJ-9=fibxtA`9%w8j ze00Hxs-QPi{@TLn^{13nHO^98Rh!JJR4!T$2`$8YrjMB(%f9Mn3KE5T)A`u4jO2rf ziU0l;xy{$mJTy2F@a@Z@Qco(M^Bkid8>Lc}H-692x@nI)nj5l=z!IPH%=_vKunEt2j`LP~NwrAc6Z7yCRf2+I6ps zags}Ou0cEtY4Uj01>ud*?c)I8Sx0k~?dgrhr^UDNVF)f=Dn)@ktvbIKMF?$4dmGMB~#7A-BZKlp1SWU&Tx4e8Ex~E@C2Aoc%L9i@eBc*>8U^iPe2Kbo84A z+Yy z8753c3lf4kZ--)irPvml>Fu;y26p{-VF=xw76>^MFq+=ET}};_tuSY*ig-eH^jWN} zbSQBK_TNL}KE$v5WQBE}bJ7tAbVBFCpvdyOC~t%yI3Iqa!k}gp;&~gzWqX2hxVg?l z_llofR^BF8IygV8bV7W?KB#)s(l21=i3X`=69JS4I?1%+n(gqH$DKbx49PY6?E`sR z`jBaCk6n(Ya^gu$vGHGnU;?>JE)^b%z}PLA&>J@x{79Op>575Ku$z0x%@|?MrhF>7 zPQ!`7|L-?e-9S9k8&e)-zPcQQm^J`vZYVx8%dO|F9RtZ0qfv7$HW3D)PFL*mW72h* zGvqSpX=2hl3l)w+%_eF7<@yLHQIgF}%LLO=c60qq@eJDp6W__xybQ3MXk1!y`o<%K zKH3=Nd476+G%-LZQ(LYW*re~Iv}+u94~DZJH&Q7<=QljwO)`BN!kTKXvR#zga#~Zs zuB|2nIZzU`z^wRiZZs9oIG|_fSyz40Kgd`4i=yMVss7UW@$>*KG0@jq*bvHkDnsm$ z=+f2?OZp|pb$8Q0nDfphEq=U45zdmn;0j&U_MnH$pb>yzIzEg7P&b$600=<2ZP-Xs ze~=!TXB<*|eF6aPZ>9P1CFI3opXb2teqh76L-0-VHu~b;fdiPqy3a6e{Gkuq1D_kM zj<`0^`VKyhi(ZwzDQUJCtQ4Y?Hd3Jl7+D~X#mLgLZxl>QyuJHDC6Vtrh+z?;91-0B za3&2;b&0V`9Z z6{Ch4T4yrNByC<#dMNXtTQ3!5*|JOLb~v)Z&tFl(o2=0&RtDz(g6MOt?DbUkIdM`qJQiAVPlTO4mA! zob!uB=aZcFWKWC|C_ySiB1>rFFgW12Tx+Bd^rbEjD0FPvk(anx6BUoK+Q< zc88Wi5iwP$t({g6`wCyj zs`YDUn~ZKyeM#iVOebN*&xGSzQ4MXEWfe}&P;>%ZQM0rd4~5uDM11>5j5uMWpiA z#UF3BJfHsnqRUxz%5)}~&p#9C%wF!my;0erF zQSEG*j3~Y)sd}c?hL;KHDD$Lk$ErQq*K;&1rupu4^c)2eJQ$032Jc5)8BXgI5vDD; z{kEN>ZOs4wxH`+OxVmUdA1K`2-9m5(?(Xgo+}$-;;qDH>-6gm~a7fVLuE8a^y2B=ASkeT^o7x3 zFoV+t(J|A>Vb6dw*s$MP!cEH8ElA>y8V7B>O8JoqGwBQEry+j}G#$ti5WCo0k{%X} z09DBQ+^}q7e{vPEgw4*N`$WB|iA*U=eJQm^Er)0yKXhUR7!_bNnS6790L=h!G}$fG zb`+~uaCu__@4;`Q0_}rNU7PrO71p2LymyQ8^*DTE$w)bv8B#9?MHD>|aD$10L2^xj zg?|EdTlkJn^xg_IB<08}j*6!lu)o&8Aq5VgzqvjtRsP~B`brq7+ashX*pjCIR70-( z@U_I^@Z#9%1KZPSR)#5{A(o9J8!m21hmFgM<9D40XVH$_uXYB1FqfI-AJ5`m*Yj6P4{lSZpPw z+DD9;z9>emYHpqSFXRZz3rqADAsJTbwF=C){hILa47;|P;dS&Csa-Rvd%ZrVHobXY zv-s3|7eA>p=A|9lhx4gd>dl8fR~a(TkiRD&WUH!@nu@13H)OWgLFx8iBq2(W{!Rj6 zc5KYP=p2UDGMzT3b0eT3IZZ1MJ^$OrCf(;@%kKF>N-=&(&yK?xWU&{wz-Qj5C?>~eC>EMiV6 z{N?46O|Hz!m+-C%RWzwf>?11z>kzW5raL%Yds->rhXne>B4usl(*rP@U^Y_+ot5f) zs~-o@yj*jP<~1D6jt)&6kX{uid?MP!LWs2G_gz*; zS!9wf^!0p$glRu#PE|#XqlT8zXpBB;omm)tbVP_avcw)!^#tCqBtMTM5w;YW9Hz^1 z4?I7U$z<|srSCXmQUmL#cUSGW7eUGUXo0xh%br^F6{a1a6>`@8Hi8(U6rJw^2&2sd z{)+*=8;!49bW8{kWs`Gvpb*FQ*u?8#Z!+#hw?K$f2?TNZ?e0`zGtzwa;%wrNwkIwy z7V5pRW!C7qthGgey}7$5alQa!>K?})T?<;a6aP^q_Pv%_yU|9a^o~XY*9Nqae?-6T zJ{QYTazs<3Q=OR^oZ-pAI~T3vCbH61sd(VAB@8E_hU00XD^>T}&+fp`#vy`0gDvXb z^HQk>Zb3|1AjHs~k@Sdq8}K@2;3DH+mj`nefhB~+Q%_>5q@@8K!9TAkXo@|?k=jum z<@2c8!v)6ECTKJTk<+sR$-4`a39K8Nju#1mw52F(2mBx3-X9hWd@oLgI>)HgW2mL+ z#VEk2VCA=OCjF6#Dn6&((#Ps^xR4}C<{21no_CIrHe<4W!*QlR^DGwLDt@@GdfZSn zwo&Qg?p>J=QDzkvaI^gy(2M%ds05QIfW%bHQ1a1y{a2+Me8xw<4|E-?&*4vuN$ElY zL(-^M-5-x*a7mar(00*5)Lz1ct1xb83{=vWA)}IR9XLF#D6DtuI5wZm>3vF7>oIbk z31Z0=s2&~%;1_CS|CIfHY)w8f+Ug)s^=JWCi6d~CKr+T+Z0uFS@Fn^UCf*`-7U*qHP_W?fT^c5$ z?nzvuR?o9oxyK*U9(qqdTuYUW(FF91H~ct~QpzwA^^~(p8*-6l|N3X995K_* z7cw~HqUrEWvN=1cja7|*xqH{pbs1AlXgg|U4RqAoQ;=!rGl%VFX-+GK*PmWDk(=$H zvKQ0ugO;;~6GUw=Mj=6K5LYmBnN@VFk5&}utUr@{{$T)##b{QJk*fMalDOgIDpnyK ztm;P-A0^1Pe-S#_@EPlIwGhT{}?PNFw19KgwCp;EHH)DIRU zBJ&|eyKFdy{*{2fuz-?n0i_(~-iQKlGsZyNDhB91VULw==*BLh4MSaw>@i6F*^=%OcAUvvzGQPxPgAZenogjPfbOg&uQ&|xCY@tA zu*Yi|KE?I_C|DVQx}t>nEQREe!N4$6^kUW@fkjW`2)QP3Qz!5eUxg(PSkVm;!E3)J zxO}uJEEYxGe&g|njAt;N5?|t5Fh{p^jHlq=U>OxBevFms&_-;$?jd*0xoF3v$dwo? zVkl8j4I|rvR0YY$O`zzM5TL$SeNqo?&wOm7vBV{ouw9#gHyd^lYSi|vD6SMJ{OZ=C zguZ~Qo0rg5CurwB6@>n_gM4U86K1c1cZZfFLmsq%g3_U*s};(VQ6TKXshOZ!T`M0< zR#WIab#@hDwx(oZ>o`%@L!;Wgvjb}x)+;+MOXH}C(2xOP%+CFS6UrYySuAtctCNWT z)E&`I{9+dQsl_Rt^e=zfw`~Y>AcONLg6{m%0G9j%mv0djgHxRTTTa8&|8^*oLK?hC zd9axw_y))J4+$tA1?3Bp=?5+%GK;{+Ps>Fcc z_Um;tr+k{79z3tIMIyy6*1p7dTQCC%tuTFk=#DEXeWi-1capMCKnF zj<%%26V+vZXr3Ea=^Jl2gRTjB7Ks(J11!eF?yh?g0~_l$Te+?nbl>Z)&3++P|9(sJ z-00P$;Z=F%iS4D;8lxBMe7)qXQ%HkuuK(pXZ~rCFi4-q$zXW<0SPa2P0&J_&0DemL zT7aSPQ)2s-|FRFGW!j0^P;#ps&85mOnVgUOry5a@(|K)4Zq-&&8sd_(fiY+ZZ6Gb* z;e;lo886YwlA-N`s=CnT&wl_>kD&7cFGrixF*Xygv8Dd(N;u^y9 z;_M1)Z^t@ryd(t!7PfSTV0welM+rFvGE2A~9)I`mw4)uYpfGNwqP9TE?>G z0|Mk7)c=EcAZ)4ZCEbig68-uNu9sagCfTD#;{@Nz5& zX$Y+376Rcz=5dFENV5DVfN7wl@1<1oI9yX1SEQbVY{Mq>-vRED69cvFAb)88o~dFl z52Up$A)auM@rE5l7K*wbAh(8aZc!5TE_i-AC9m7OsK}g=niooaAHO^LEk?7?Y(np) z*~YwU82*Kx0{2f1S>*CJc&Bg2!@no|6@PgZI}|8JQhpdNzK9XV_38UxM(Y^3iomqQ zDtwAbQsZPlD%de@vu-wq>SML$r_-r8f~>yFNQ(^GRbwPTht!c8mtmw*W@$(Q>c$G_tpB6sy@8`Wdy7eqJ>l}}Aes#3gC6EYZZ53&&)fSf@o&T9vR(MOIQAy)FEoAFMQ6{9?u`79N}ZER zo8g|4sQ9d*KCJuM^fP78-Z|ioDGQPFBF@g3*learpI_5h@ zAY6WzVQIq>$Dx_&$K1_7bdMHwDSmbK=Km@_vzWmykVrO>VF7^d##JOn6=o!USt}cb zkCu{7-^END`Wne3S!YfHSuT%K&S!Zm4i(E3wI8RjnkncMmqIRK=d@*E`Zi|6BK`t4iaU%1}?;+iFv*^*3i+U1N6+$U%JQ)kPlq_U!KhqL1nq>1dm|2 z=Kf(3H88>_X&p+O*#!$9PY2;Q>;$)ND3Xak6N|XrfUr?e23%v51w!R{ z41sz+j`s0}+WPNCtKi`h7E>#vYA7=4r$poW?5G0JHNy` z(_Ds~f;=Dt9hQx&(!B(tKPPkT} zkG%q{%=_y`NSP87M>Bp>l}$)#?&;TRLI0sA)4uNj??C|UxY_Tnxez2fRY-(eB|LgN zkxBm(obfRA7sdt(--4}%_eHkfP62v8WW3t&Q){WHpDB9CFAA1 z6{_`efUhv5sG*ejb|2s>`!mn8lJ)bCe~Fd$3mF@4_w<}y#yHA;AS^k4|Dv<5q17So zB4a8s-~r~vHJ(BdW1DK`FE5X|k4V(2T12paG!s4>F@YLic^Hx^I|*WjLrpI-QK|D{=r}wp(|iQ)5Ig$t7!tZRzd`zV6iTyZ5&N z+KL-7TCZI?sIB^2Z{KtxZyyJym&)3RdQ@stu&1U9jO&Tm0;h(=7Q;!=(=mkiXHl>D z%&$T@nz{UUQf+v6{I%kM@5v)=P`o!nX~^+Qw^lRBT!wgY9UrzY!+FDD zWk-vr7slWZO&te*cj7GxHc6tX1uyw#g2L`IreANFTV&tk2NJE!R5 zC(rn&*tV+s8JnI>pamwdBCnW0KS*oU{{t9DEM}Cgv2abj68AZmWuA95hV@Fr;804_ z;D-xPT2d;Ce@lboJ{XtcO%5ih)iSl+-N06UNU!szmhi_)f$4(FB4=oR>>6+4Unyx zN9^7AP}<)HM7|B+MsPW*Hz#R%eAXv^T^2U3XQiQ>%QgoxR)Z<3>KQK{e&|GtPSRlg z6jQ~sEDmgaB%t{us*^Q;cBB0guKn@-J3tF$o!hFHfqc*Q&SsNbMJ>cVWQ&plKi;l9 z?;2)c-V@!vO1CgM%)-03FBh=-qn3x?CE@WT8ImD3GpbY&yMrmW)L4DSOSxP?s3Tze z#Co_|xj&mtoU-Iy=neg!P$IpUWP78CSGYRJGxLEi&y25sBj|}!vQ;C z=E#D(pKKYJS^G?AIpqSKJuI``9H?=gZ4HkkyR|*c4JJ4sNnY~Iw;iMmRlN!dHhpsg zU-Shy7S0nBZWw5|w-@tNc!2LyQYj^nUN1(lCq1#3Z751?OWZDK0t&I9O|}WEobtu| zoqQZSR2wUuT^k&zb8OTRRYXynB9DPPA8mlV_1@b#mc>}%t%|eXylyGQS0IMg zP`WzNoZC9^tM97`vSoSFR`D-+Pg0pgmFLkkQk~Ap?``H4hd|~ zLkSxTVHVnA5CjG5+#&Z=T$TR--6me5R`s^?zOZW{5d(i^B)jRSDrGL*ve$;>;!O=I zD)l^U+Hr>7^Brf1wxJjMYq4+v!p?MvmJs_~@$Bp-HqTk=eY6h*qAAq^FNU)d`Ls_bJ;y-qT0B<7b!A@_*~(?ReZ<`&o?}> z#Nnc66rL=j-Kg-}SKw;PsIFhd|1*rGbSJg>Abx*(%q!`&=0r`ix63#I5^caU&A5r0biQKpOm$->VH>a}o;sc$p#5%r%oJ^RnM)X0fimqL82(=o82Od|YgrAzu ztb4XE!tmI%^PDD_?YQ)a0l{AGk+_lt{*$p3rX%FEfK{{YUulF+Jy zI(6mnBmhm~w-=CVieGr8LoSRGF}=2fwU5nYaDiP z{ig0rWyNts_8H_5@un+h;(fjO-%l<&oUzywr8z&6=mG-Y1|9wdwLXyP9`q`(EK;98 zNYZbu$&Gz-ZOL6)4m)t!oZHR)6IFMW+O!T0XV%Z-q31v1IJ9|-&g+E#`gbOt1 ze+s8IBoA6!yzt`VWP9r>zFGWYkZ8f?9jHL?EAyXQrp@U*c#4<7*LU`I zKgjS5KS5Esn@Xym)$xML!CbTsO?WIEQY%`5ig!22}lp}I#il}iD-xBum#lzs1q8eM$Q+_{s_c=ZGb8qiG|4^y9he*AbCHZIX)+H8nGoDPto!_T} zEhT80vGfwM0Xzc-?rpnThA)IL9OdRk2im&(o1i1e8uLFO_3FhvY+oZ2T0RM`4#^u< z0oPvG0ubKE^KoCo%8RCxrVQqESQQ?9crlaREyA_*^9)kA3@=Yl4azar5Vdkk*yH}I z$Kke?E0@Zq=mi{14F0_C<~v@Q6%a#HiDwXNjw8av4J_y78U$x8+L{R2XXgD;mYknzI*S{<%orP*<~auA8sw*ogwEwaa$MkwA=Is5Z3_M$(oyq~wxCm+ z`-LmXZK?tI^&rYfs^A1~ z+Pw}sZ7kVFuS~ZBHX3LhOUk-|w~34;Ufe4?Fy^t{(}E3HlgGMd0}+OO&Xpfu-N)#3 z1wvGH*TtCde))H!_WL-Vg&yGsU(rzhG)k)WsG|0S&wYG(iPM7wdSc5FDKzz&2kRYP zT#cQ02B0vd5p2pqeYe8Qv?>kw{h9TAX)%foVe1A~LIPM}%LR2>FG-A{(TQvY;gWi> zzaz$b`CCaTbC>@LH6imY^Rv`Z+J2o1F6(8wGM}*fod8DvNC{#Ui<+iB zyu@cF30q~luh23|C(fWLFhN|;0o2FWna&Eekb_=D(GeaBtLdGbZVG7jWj_nI#+^pb zLhTFN`3m@c+vasNw{m4FmWq(3sI^uz{EK;F?s$Xw6GdG7+<8c+8@KHhcPD3S^sx;n zH5C;)P0S0n?ByyyM|Z#zL$oSqkNlx!Qle*SFbkh3Khj-{-Q`QIhZX@Pb=D?;u2Awo zz`;n*kep`qOxQUO1wpdN_AdZBnhlk({Kj$tEfF*>_P-x+2L*0o#e>0Ec#?8Z*^6A& zD;wYPp7%y>qNlB3lEGS`trKP6Qr3BN%y{D(zqk6HXx;@a4GZ&aHcMb7 zoSfP>UM}MKOw(j&)T^3o>&F{VD{&`Z&=&~bmjkD$LTBQ0 zmjh~Ej{$uyJ!+N-k1-|Gajr^q6%KgfJ*}VoPXp@oa6b-@R36*=K37tAh~6~QGu$`f zA=Pia6(}09tJ5>dSGBeYtlsEAuDsInwF!lVv#M->L^IQi28X4 z>oXR@@IbHKx{_^g7P*mwY3a9UZOy`NdKBp-UaH>C$HAkM-yEI(+W|05R>*Y8tkDKR zSU1+oaZp((Rg}^w@w=b*svo${+CFKdv$a493P4(J=j2qO6 zR}|%}5b%Spiwz(YSdln|HMnm{YaT0h5M15Sqt7qeKqK!QFD(8%fG6uVMOB+PGyO+D z+#RCtp_X56PDDdD;o-n&3C29sMG*V!d80Hn+mX{1+OOa9HjC9oWe9MnZmi>yV7>(E zLU}-z&+&^Bgl4jwn#;lxCd%82Ux=YeO%^W)(n;_H0~&u?Q1gpQYEW(tQ2RkgAAcJu znULlDn*}Y!l9)5b3&qqiZD;Kmf7v2Mz^D3UXP+f1yfkKW>wFd> z>)u2s<7UIS;uLk7_kuzG!Bu7kXP9EI)H(bRqz(iYL4F@vgFr9!Bg@xX)p>Acn@-18;-4hfk@7Z#U5XNMhWC(x7L429*zuQ4mfJQWviesGaQX&F!n*IZXSM+VPW}!1Y zVYxk${XV}YDH9~{wTvEW$Fn(71RIS(EUS;9%9JegY^Bi|Q#o*_&wWgR9ROpRXqg!7 zgf_Z=s3v^jwwuuP7nD`SpHNg&)V9clc37+#$3d0Hre~2kcEA7#)_jXDM~V)N=k)8v ztXL}W=yM4|3Y;SY&beK}9+=blz81*P`|?A{0@z~3G~eTrSZn_YrRZj0K8OSk?u5xH zulJ8U3+4F_5Dzy}9GYMhsM2o)^t*_i$IfD-EiwOMoX`&mAeU!dZnzs?Jq$o2;w9{U zr=4|!t&fFSzQGW3(bfsalS!3Jw3M*yZes!&=d2|JF(DCBFiki_SH*&V4tPUHg49qu zH|Pg`U~~R!iOxoWOy;Y$54UR%cU-@LfHGeIOBftQEoOuV_W(cb_w7Y>!0o2z)FMU5 za2Be{ORG2fw)+O8(+GD39ou`^wivx>`z&&)VQXp-Q;t8DXSS;#TP>mU4Y#D|pbee) z9l|ot|J(gJQpRHqrF_h5YM1UAUagzh_0$!k1AgaC;vwU!3$Iro?LNi6tr&W=O$tNWlU=Vh5Kh%fA}|3r3uxRF>KW zRxj-y1&N{Mvr^T&LOyt?se!sI+t=pg0o-iQr7{K^y`9a1adREkH1#^pB(_MVVb1Zg zIj4|}del=qug1Qouh&(X@g6kd|SOM_NtXtdI@tWlB+1TlzmPi?8`;WFKR`BQR{eJ z8-6J}jkaRnjf?(lvPghU(sVqZC(%GE_6s^d;}d;Q#nYax;RhA|#kY+8G*(lt#@mX) zV;hFd;W@Z9Q6g0ESH3jX5rI;r;}&c@&w_X_Jm7-JTcQ}Iz%Z)I*vLOrA`5iQavRnp zc=rI*UnGbbC+Ip#xC=4n!kJWcifH2dqh3wpMnmSY{)%lT*Dr}XtPtN4ynAn$I_xXZ z?6cR!hO5%Y*I3@%1kuwtj_>2ca4Q~J$YT(xZ{Sz+oZ5f|ALt$kpi$z5wQ*z;I0|*q ze7Sw^aHQVZeBG+DZL~@?u`YY}FGP{+go~7iqOPxk?bwd;VWQIr{T3}K+LFRP{RfnJ zo_g>r!TvQp_+EijXb zTh=O_UFTpDEg4G_DR!=OrENV0syb2#LBXY}y$$od4^)ps*Hip|v#Ko180`cjcKkwr z16~d-*;ag6{{eOi<0z7#Qn!dF$EZHGogHJ(H3bsX8H0CR=0Eqb4GtWso3=c)KalFc1BD^?N}l**FuB&u+< z!6+Ea4sN9eT-}0UCXIx5!~JX{x}=I|M;>?or|PD$_qYWPDHk0Gp?B6ZAhfCf(xy-gO{`4?Ml6T>Hpmy~XzTY))( zirqYIFSO*exU95TZBa%!u3xC1mPYY^Sz1mU6Cq|(=c#!ly@c5EN5;k?1u5MWEtpsX z^jNt3KcQRj>LLob8zHTFAYBA=PN_K5FI1xlbfY`ul_mWGp$Q5Egqm5W>4aH~xzrGx z!W*Yb{e2{*jpfoUyzHKKH2B#NQuR2J*RhW`eTae;WvR-s=X6mj(R`(i{>f6~`5OsP ze3F7g30)iKH%J>^XyZD*h;z{!;!RNY*qO{+Eg09e^gC~yVj2DjLM>z*nlNdL@T?+G zk5KCr$w?4QfE7fEU^`Xgf_EGPC#|=w3*l*eb&@49L9W#)YJOk;VHbvNu4<|ucvLfP z1WOj7T9%MTG+1+^#YMhXPW)}}5jo)k_jIMvQy9syed}RutnS&2+MrVdOXLDU6fko@ zle<6IUIuh38Acd5QBoNx7I)v!nME98HL7x^piZboLg^B&Rxw> zSeAYpcOw-^IDqKa*GDW2FBci{Yy{>qGnu+Vp$V1LsE=OAxEs#E^zKsGMGIKt=#i?| z02E#C41p1$z{Xer(@t$!T&z|predM%$a1fiNw$quqj(+t>~7}JU^Eb_>GMeq|M+*U zGu`0939^0MucZXRTeyqA(Fb@ExZC>>S~I-q4>cm-K}c zp&F=$k(%ui4|Vs7q|q%k`6Q@vkoZ6iH zt1DX>w~l#)Nap8yavUqYJDR;3=EjCLEhukyPNtcAZ~;b`;f*smzMi3Qk-Y{h8M=4g z-i}b`r$Xvyn;D45-u$g;it?Kgs=DM=7x%XH=W zPYbLJ)TTs6W0@e#np?R`rh3@*#J?wYj^GN6Em1KeY=+#52^8GD8X;#%(8LmmvMSKi zJG;ulPDo>Nkz<(9JbmEeJ!oe5eLaL z?XmX*n2O9TFtk&4Dde<$;&k^@@V}BI2Uc>>#b5D;Q(7$AXMQ%3gIV? z@!mByziT`}zyOJ=X8rDQ#DJQVUEHf2rOZp@pE-b*J!nr%tkqrj#CzbH$G>RX)xF#M z-u}++^ClK&gQ^}Z{`NRV>2ufUswl;fg!-aW4&Zwj<|Hd{>CQ8}L%3WtIW*9!hK~Czp^+0QG=zz6SY?B^Y-h?ZsjiK zr*kWs&9vvrZ))o>6XiyF8b*1FEaS&3kSxx}QvuQ`zdQwhTGN3xKqZ>v@mRg4^RdF; ztK{4cgFFaga#e4(_X1|igpXfLJ)#dNwX!;4<+>LIJN^;BdkyYh<*fj?a>XnYWFHkL zU;;fdIe`gQLo^&M)_w*A!9+nN70aTR_A4Bkp<}V?o_(swNTq$N#{4N*o(Bbj4^t0E7azK8Ln$qj9dnjcYvxP}r8A#5PZ(kkcww^}H zQYf0FsR93WD2V-0=;{Pa-4&fBMFG^Bik}hRI=tivnrITLB{SZj48W?Lqp!cbn(Pql zV{?YVGqni*a>Ix8cv@PM{_TjoX1iYbK(nt<=Uc+rd4$-aO^xP!)-ggxtyp^_%DZc`D?h>*0 zJbj2n(jdF^w8S-3MkyQ&9LI@yroH(u7Ur`!_GZn{>t`Sh`q$PEts^!$3?45^IHiF7 zwD_g$JBLVn(Dun+7w9B$ix|Pl0T?)$_|__VLi0B0jsqfrC3>24E8AowX`E7raS*7H z=xjVW$UGv4qfC-cTGD6bGv`m8D?eQn?z<5y>}h9?>H*?j>Vf`liRFA3XbsC%Q`F7s zUyQc#oS`0zf#37lbn?GcwL(|Q=gB+UH3l^Pos9c#pKDo_mk=^#5-a$+wNiT{MHi*R zV&cn$cJ3@S(TGkh<$eanzcjVQVkOJdpHD;eQwLO*gG>#+Yq`ZxWA#3^*6IHYwg?$J z56U1mF=jB5im!_@(_P1GGVxW@>iA&g{|TZc$w3J&xm*mlS~U!}#p4E^f<&00@;Tza z6q&!$Tp_6YG&7+qO$^QiWGJx`ea~ZRw0FN{$n|(_<`+ph?!|Tjc4l zXdH2yp_g3J;+cn4<5pS~Xa#i=JAx5g+{C#u(=Y@jhM2)p_(!nVf9X+gblujA#TYJ} z4vmKjDQODCB#G~YkFk^UI98*-P?X39?<<-5HP*(VM^uNW-V122;9wP5xsqk6;%Rkj z!Js4bSDg8y0HX?A90y0yw2SU2rWTIj&|Pvyf;3M769Q1-Kd@w*cx5-u8|Ont??MZ4 zKS%|aZwE8@P%gG)Nc@-_Le7RnLCdi}?q5t|ktXZhN_%J7Q>MP9N|bXSPy%32-*=z2>wgx1!s43O8Cg=l zdEwzk3*^lR?xk4nGZsOa;l(6ML7c|i4MjarMtO_~(IK=REF<#q`W7PeB@-KOL;VJv zZmQ+!Qzw>`G`jEpbd~OtSC`JfFede-UmNsNyn~X9)6@M=Gm{*Se?M~P@ajQr zW%No;+`Ui7fYtZ%V8N~{#fl!rf!>D)@&(LHJ(!>f&&8&3l9vq7&_IyLz3;xVo~3__ z`w?7mv%aEop)6I#=>wF~wGHHy#>}b7B+#dS?X|+{7L7am-&EQF4R*YARw7&aXr;_4|m8{4TA+k+It%qQo>Cq;BjL#c6=AFsXi~j){#>Rs3G`>;Vno{v$ z;WFM$Uit{~1zQ4!WcINmpTcnr(kn9q)L3akAPF!z8gkU&zLn-SGsUhM+A}FVx*N=j z0d(Kv@mVhSV=z>~m%|)_BQQ%xNxqQ4XPGX$+f`?0HWd(d{#2Wq&X+n1H8 z|MO}?8_^`pB-%3#()IHxcRjtW@HZ(U-1|Yg=`p&{%@Z$G@jQNUj22js|H=LS?5;Y; z{LvEcdJuUOvMOxuyeN_Ov%tSoHn`|`cXb=^lW9EeRO2VNwXEPn|DaLqH%*b2M8o0w z6dOyCAN|>)J27+#RDx@9(|j(3&A;a+=#i)`q3Xp;--eNovD2p*ENOqsQ5VL0E_GWu zjzvA6E9L&ET+RTv(^JGgdCd)-Cak{|Jv#Z<<~#_m95hEtG0FS4<8F}DDu*VN5xyl6IsL~{k<*)bEG$Ow{AO&f$98vR!JV+vnPc+PWehqaKrHy zTRZx5&qPrhkz?dMLW#P??AFG`_iEYq!47YIPvwRy;$~*!Nj3bRRdIly=AxHg8|0KB zlA|aKO4#yIFJUI&fwfACh?S>??QKAt+a^7e^LGM49+NIKJViJ~zSqm%MTTxkNvck4 z&mzNA5-co?_PXztkj>hDZlG1bL{{#V4a@3@!l>KSfVx$THI+?{tUwH@bCC%xq`e)+t(Jzp+k_jw6i&eB{ z%^5M$Va}CuJ=y@0z?Fe{a^uxcs(9G=4q=dY>$ULry(!&@|R z6=8mTxEN*=WmY{1cW8tAA5h?a62eQmUwrro{X3hpEvHvBk%8wHZ@J)8ARJ>IVlo*QOUq)L znCUNaC6RgnCfuS6g)?4C4+_wFPwZF+`Rep~nhg!LqA@1s=5~?W|8&Hasx$7f0${lb zR*G4H_l!*92qc-P&F7CSXuFCvL&^FkiGLzS+%J2bl>#lK>v;Vbs)5>q2vDY0ttS+j4H9En19-`c)AG`_f2%>CuM5zQgN#}0@l!0> zgj8`Qp&-03NZgd|ymuhY37MD)Y6a_a-`L%}kZYd2j>#=919zWFL)#c2E>lS=km%-P z`pad5EI{bGQ$V^Z5ZU~Ip0w??U}hbazk``21GVv}?MEYlfl&IDoZA3h=5t?$VDB~P zge*cjUR#O*TR(~Caos%>L_WEjc6yy473BS5)@=7h&ibbxETx!$>&Yl3&RUHYm|^5p z_V7)wzFa zATOJ+1fV$HgeWj7^kfM!Z9prXKqQDS(CXA?rNZ|fMFI9G6PszF!*XWEQh!c~=RU5c zpz?7-Xc!7wqeYWd!b5ZW1W$V#02f;WiUNe;Ip$kkijCmKwoO8z!Eu95GWJ|fl?Tjp05)a=x`0&d7tdIJk zEJs{Sn<^GVvvL2gNIBNj?|})?Q>kLaZ5`;3MVC{n#X{*T+_gtaQCb~&ot2cpz7+N)7s3&JA2_-X~et-93 zl0ux>4TA)g1@(0s;v!`&J2bk4rS+coEhApQoGA9@72y9~&1Ol{+1gx+J^16T_q~y6 z#s@4pIhJ#br2ElJEMJ&YFF}O$6(|XT+pS66A@Cc)6#KH0ULm}`?wY(&N^7_s! zFr3w(wmo)R;OXRj3z-CgWlwZhlo`8K58aL6S5cr*_D-aLEE3$A{{F+pJsC!o`Kb3Ae})@akD*jgguBBs4VK zzm1DgZWg)OUq+Tf%#+l|5v&OD}kHoui-2$8E3Ml)_T1Gj)F(2&Z!bg)d!m~SCk&gK{&+i2;UcC@hqtj_UT(}Sr6s~;KbQfQ3gJ&@7}Z>yLU2zgd6BnD#M!o{fM%1o_i@=!xs2ccy*$P z@bCWwqaR%0q3AFGGZ4;*g``6?bqN<>_-q0O=fZCNu{m_^Us#bEL_+I-x)rqmKh4x)4!BSO4I*OnKX=dZYW6Q(!@e$>K<-y6kQ#7(hVr?X$y#y0NiQP#4es}>1S;GRL z=s;8(pqqM|zc0DFtaC9hd*3S+GkDynQC<_`Cb{4bLI}IV>LSbL4pqRCyFSiIx`^80t+cEpaz&%2f{~@_ul|kP+Br&(n7e6RU9cN zSBwhbYj3^@f@4II3oACn4=_%neq3RRH9-McK{ql&8<@#%= zcq1}~S|OdZm5lJfYz?U!@<`hPkO3HVmMNrIq>n-?6`IL)+*OOSzV=1{s%9k3FLn^k zB97%EWTi0^K>Cd~C;ee~gYAPG zGUoxw2>$?W-UbA@s_+0wy01T&6S+9#8FFPUGBSoTcZ}9oRX2d|xFA=zK4%C0l!&HQ z4I?A#V18v{O0!=(UmlnMIdzqUxk+-+<^=kLF6s(`N!6tC7+qN2z&ngnxP~wu11cCa z%cXbp01u%j>%XQLl6bPiyp9xOrDQ5?PN016%fmsrqj*rw6r)Ow0ni{;V{#^=^81Vd zP_krQGnsjTCP3iFbXB<|Z9{%M=+~Xz88Y!@q(lNyDAgEX1dzVk_uz(Ih2NOx70f?% z3d+iX~V z+4_LP?l7S5Qb5dgt43sN*?|L(uIF#A0DMl95U6{@0b~a0LVtMs*_p2GON`={&m7+^OB%UgV{-4(k%2o2rj;Pie*?wSaVo&5g*aFHSjm+;TU@aQ9 z0MR>pTYa~`ELEY6JGrvxRg+0oUO@zcJhmdY9&L44sbMOlibzyoR92yT0e8;ZAI}t; zR#K8Hy7+I;QBc$e(|&)Z0FfL~Ox^7v8`%M@9!CEDT;Njv(Eipj9D`LtfCaBVr5-DR zj(3tNE8wI5t(BoRe4AjTfI%LbKqkEF07_2bWy>f9+TCo}y){Jn z@q1znjZwU+#a@^T9wF`O$NlkHVo+O5wpEl-@pwqvbLD@g7nWB;qfjxjm7<;-BYO1O ztLcCXB=Lxsx{$YarIg(tT|WC_jW=)^*GmVmN1&~U`d{9LE2-3I%zo2rHDQ5aSM{;a z2hS9g$s3!D{K0%D!ffylr^~PMH? zZ|?A7>gDB%O^t%ANg#TQ=K+tsiH~^2h*~5cnLMxDdimfAB%wpfPP8D>l0{c1`|{hI zR##~xcGRpv002$d8-FAD;__+km%A5Dy)g~Ruzj$*!wk|Xh#wHTW4I%V^}rfAS5mS* zl>vDK8)iEXpxpjgE?guoxt58M0nvP#=jw1??7*kGA(BPWVa>7hKKP8*LNuTe3j(2wa+SvZ ze*XYL>x(Np$EL=jG>TD0d9Q7W9=u=$mRAlPrEl4I7gG=rjjL>1T+)Bub89M!`MKKc z^Nuvowvw?3*>7({&)=Rn!6J>-K~X@41fBrXKv%xz{Ec7=iv+teD7_2`AZh1=d)}^n zuoZ?E(&QCYEFDjZh};V1sEz&b(23qF($BaDNACe=e^ZV;F{Y_9=Ri^j)IMOt=BlgQ z@5cB70znEakP}o}#Gy!FH>&dI^Y+6Zum!}D4Iq-p2sYK`DytRC zCPW}1CXf?#_OtiE1&=SS7In<5jEqBSeI%gYVd_czu@RiXEYYhN&<0baX;4alNc7wN zxHu|;(@IrClc!4R0>L$;eoOk`vSoy>d$O`={iLk)0Rok7{5xIOnIixxBmtAWVyhV- z5H`}wR%@Nf7`ZPC_p>1l89nN2Bn~%k;;&J@7=}q=c-*2is>%kS1&|3F0phYe{e7`~ z(Uy3rRZK|5qeWerE{z7VC{f$j00Sp0JVg@F9V7sTwSMi9`^U@ZJq`<(LL(Be9rY?J z5Z6Jpl=l7O>(8|c31VrPP?I4yU~(&K9#y@4Fy>+bA9j1RMa(LxElq0V)z_i$(cZVf z2}7BkQb+HQG#cx}t4&th`_kC;!=o`6iIO=OP*XB7JFpxtksF`W0%)U_M++K22k$A?HPQ*LzoV>(#m zB*o&3PkQUCj|dhN{{Zc;s^A9mF_=JRNZJKp956HhZN>0zh~v)zm$)nVldMXn$pc6r zj}G2IaqKo5;@#$E7{3PdoiGv>Bd!ovQnzZrF+S1_eckO z_2cu!a~UU^Qv`D}kgA5uTbI4L-m8u%VdH!s;UQu9$r|id8U#>Fm^5+5KoK72sOUjl5GC~@qw;rtOqZcllHN=3kfz; z#sL@Q$>Y#^91k zpF9d&{{Z17T;i|da*^gvT+0`QNut{^+?yBl7sxbZE9LpBFXg# zXN^{P02K=Ol3hEEwjOK>fHqtJ_fUdto=8rTmMf~5#o|yv05Ru!BzY0hc_bgTI#Wm}t8L=HsTcLS$1>T7DHV(&P zH#>rN8)D*o{{V8i*qB6MAxl^cP!P4Gc?+xeQ{{ocGX2^FhDVIMAYYR~Ac4R8bFtrX zfG00DVc}5n(oP3_R)Xmcq@yKny8}aRM%cDrGTruONTM-46^&bQ8-_buaYt>>gn{#K zio_vFU8a&KVI+?1k~d^>7!l>isWg?trL(C(0EQ_YbqBbMY>al0n0**lA!AQ}>$evnQwcQQC z6|wa4-wb6ZXuI+IG6{@ESIQDeYBn^D#e>hH-~)GgS!Pk0%oxf=s$zsl1gpK@@G|NI z@C_VtBR)X1#$Zwm!=Yc@8&d=58o?xa16&Me2Ta3iLjBizc0MB!Kv=H+WK(VBgT&G6 zGtH&Ph$8CH3Y8Q`q2PXdUO(-1)Hbz+fvjT@+4L#Gf$-!*p4= z`rogXE1E`(>Y0&zO$sKpFQ&+*AnDWO8;S~i#s{37>+ThFu|OQEo7hp`nyssUTmWW` zQcmpVRz`&)3|JjK=u_{q?r59fdg8g(i~#QH71o&=f8HDJI2-wW4+jY<=Mi$_B#peF zpAzaU0jfW7vGu__*31WnDK$qsQ2^@GD%F}hjVfDyD{3`>Cp(*z++dbiq-i467!Kz4 zHa=5epH4@Zz~-NI&ZEAUmC8NrUaxwu7dKq_V0nP@6pANs>blqh39v=|tP_i5QC>Fh zjRB%nkv9qm*jJwH0Rw-V05O`6FO+RFDmQl;2!c6NKx`M!hFkUqx5B@+NP|j2F&^-O zP5bTk7-92HWHC<0PU}*vZb<>_U@MLE{jmtrPSH7(0-rXZJOEfbfCk`>dGo*vW#XPp z$qt4>BT_)wt!~YV{JpSOk`;rx%Y*=neN4m(77Cl@^;!CR;<07OhAHBW)zpF12Vw7BD7p0yJfm1o03Be;3RK)w%rr zumIOeBll5{l~96q8(m+x9Q)#=Ng|H$7Dp-qEPxfQs<;+v$oIr*jVQ8i01~tc%s^B#cZ;XZN#J{dM}K@;F_(dxTM2E(g2uM< zeBGWgWXuh3BD%9m4yyoLlT<}|?SKg+asCw}F6YF| z#ZQW_mK;z&p+2J*5X@yUzOGf24ZK`m=l8@0)0XdvEYez2ZA4v${w1U5=UCyDd&4Y@ zv`G}ewGdCrPaM{sDA$$%yZ6znJuyCOTM{-RxfDqspQykiS0KM|5k`$n6g7|={jcdp zuxSi4K=a8o%Oq=KK(a|7?tKQ|Qn-xDX5pVinR3qP7zF`Rh%6mI=Iyxi+W;)r8yL~j zjCb=82tO#lua^~y&3iSniUoidOa{lwR`6o2{t+U#8sP#73dmFfPKW zhUb0u^z!w^B9KPlk^&X-+yP#`V{w2LIf+E0=0Hh6rECM?(|=mq{&5lKSTGKPH&O_8 z00Y1GwlpVMY#7hh#rT1{d};*AfFz!K!0RL;)uFCeZOB(g>rq?Ss6o%kd$J;k;T`) zmgC0-g$*Do%IZKBUW0!@`rru5cX~?5;Mvnc!L=Z4 zc>e(8VCGq}`Jf94G^j7QWdIXLZ>ihqiYvyyvoV)D5(gK@9_EfLRy4T+YTL}+pY?lS z15KC{#QBDsNfb1lM%(zd^FDZ5Fq#6AQCzZ%ZVe6oyYt2@(Tfuqh%#71CgO+9_w>Oc zjz8G45=$d)4w`E|L;WZDdYk~hZrA`6IXXdC^CGovz!&;r8JoT^l_VuA)z{^Bg5}|P z95kh3RMkF~^7>aK`eMq_u4+e)G9!_Q7a<&uZEK7H5lE*@%5=PO77Vt$517%fFV_sf zJH0aAw2a(f5U;)LFtSMpbRbyB(%PNP{?SEP9^0O9#FE4!(2`L)yR{;;066~uoB?gJ z*>Dk$?}I|LklPPW{&8p|%pvadD~QN>OPwaHX#|fVOMaLvU-!(qmrD0+Lvv<`y5CQ( z83`t)k!1v%ZAW3GwUN%&);Hq-L@*=HsIn%8V+kdVNnT|BT-VAy@VH_iBlm$is1FaB zKSF<&2{dl937$3~#(=B))U;31ukLVKjTTvpy96My+hTrVJ!*)rL4J4uV<|j3GZ93r z&cLej5~%JD_s0Bwcsvv3`iy9E4p3+zitMD>8v*D@ybrx}gS?0^c{H_#6jp&Fjg5aV zJRUe6Rs2Gw*a~R@IvR-{cgH6HFB;5#K_WC-r6|YcP&Xi5`4e9Fb6#JjLJ=WXX#pMj z?WGrxfd2rhiEc>PQ6~b;D23viDxjmT;GM5mnCFG(gLouwkdg-uv9Oh`gMgz|e&F&V z?)U%;^uP%mmboWkIV#9p7aTC#VZpO}SDx6R%F61{J?M^5y060ahxbO+U@06?ygQi9 zsy=11>Oe*Pk`bl3*zNNX%^oXkDXJ-Boza35A=V2K;0Oy3VPNlo0Zj9(#_&Bq+ZDS;Y_Np&Lb4N{$hY-`fbwg&PnB3N`fw5zEGkPf9gHIFU7 z`z^)|EY2ooM`C3H;|xGP5=xCfx(PM>umdq<+)QSKq-kv=YK%4nvao944UKV%W*_l| z1T7x2wnZBx9uKG-ZRdWY3;rfdpLkWY%7?_Ld`NG__N)8Xt}QH)&6#nRcLntTngUx9 zWGLJZOIjXy03c5$Z&9flXjusr;B*>}%jIY7y%eB5WqC9K z4m_)nJqh-}1CDu1vqL4+%8P7DhrM5WI=6Cg$?%JtCN5RC(JSusMhR4 zXj=?lZ~;Dh5UqR+8kXu}KwHw9t4^_N#~}ED^WS>s6wD;Kgc)G7_))Y*Hd+F%y*w;W znWf(V%*&Tp<#SQb%Rez?a5#@L7fC`V9 z&E?i4C%c9F$s!&fn!Hwv>cbFmu{7$!Z)alI>SO51PcTVnBkA^>B_&#Fl# z1rn$R00F<5pu78G%Ms2(^F|xKm}+8lhAb_ZSFH*le-HYO0FLOzmy;;PR$b#_t>HTh zQ)k3UZ#{;uU9nE~C5@O%tvjH)f|>*{4Y%=KU#;zj^HF8{uJc$V#557Vj4=l9_b6B+ zja&KNd%$2#8MynoB69P2GouR@KYGpnes(YiCynxi#3Eo6*#OW+t^q$)z|=2#4Do`= zC=y=FAv})=``Y3z{cW8w1tAa@s;5MH1ReXltYTJ4 z6&Uwr4Td#Ke!_*00VcV_B+1@sNK7YFN&<}}0tW3>O5TIh+~JOD*k z0PJe4lUvvEZ;E%iGT(|>jp;Fj=7y5Td)~#5U~8NJv?&g1BtX#(+R5=L@c#As8sGl_ zj7-^0Y|^09u#Jy*WtT}Bl#PfdWAr|w7INZa8=@(88U~=mkOL83R<+NV9)}l&inn&H zrCUU8N6z{3`P3kJj4x^Eea?t^N9!nkxJbUR@`5& z18woAmU!fu;>>1$XsQs|2F{Rdd(qQhQ|2(7@|h9*NY1Ex#W$rq{9dM-{5yGhU<6)P zm11dR3lT9YjW)YgX&jz-p3?Jle^s60LV`I0Mo;lq@*+~2Nqc#Ix zt)A=veRwAQurQKj*ud=VPewh4;?!C5HL8yzlYkv`%O2uXnnsdRQbk|_2Vz0ee8$w* z@273>u5w3)ApP5{frDryfu!zAjtAQp){;o8A`L36iuXpY^!@BtmityBT)8AEWR=6# zzDAo04fFLMOaO$F_lp|BNz_Z0Rwr{n9sY#ZuhSJ*J0n860Re>&HsD#~Z!xv{P{x$` zC~O&G&=`QeF|YuJANsFweMR2*Tq?4(lAvV&0JO5+`f9BCRscp?#ij7)J9%z6M zvbFE#4k%?<&`l5_qaoQqCtwLXdF`j15ulBZp<3>WlX|X+{$I}(T)Bl7<88GwD>&QF zPq;Nz&0qmfB7|4WBL^k)5k+j9BzspFls z^Ra%o;6#y{HdI$uwIom<*AW(9UA$3>8h|tjKsej`=i2}t4A50rkU0$7FxUZC&egqh z>4|wYyLgtWjn!Y%kz3K@3JKpXSfK~OMup$89)Bs^V#hHPu@1(_)T9dl`&R>e0P|;^ z$oxT3Ohb@IpFf}Jcnh(SWRp;lC2tgjsKe&4Pao<__gI>=9VzL3W2Zaq&s z@x}}#Mj$%T-9dA*{{RY~^!ngXNUBRP*&kT|^X>Xz1|iIdwJv2<<8z>TfO-8tTxVyY yB2NA=2Hob7#-0H_=hFq6R#`jNAgLRV-UHVHl}?&T1E}Lrq0c09UcSCq1OM4{bmVvd literal 0 HcmV?d00001 diff --git a/projects/DensePose/doc/images/res_bbox_dp_segm.jpg b/projects/DensePose/doc/images/res_bbox_dp_segm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..855fb7fe49956528eb3649379e4ca74210e8ee61 GIT binary patch literal 156777 zcmb5Ug;N{M7d0H*-Q8US!QFxvcX@yy!3i!c#oZE|0>!1cYg=546!#X_K%qzrl)n9a z^Syt-dv<1LXYJhGnLBg$?ESm;_d9@C3#HVzIhJ`p}H9w8nM4gm=PArUb#2{A4{DH$m-*}pdN|29GS-%2!e z?0*Z1@o@0|_5MHN?*M=V8;u#A8x4gOfJ%abMuPHp1i%0QprZY=_Mg4~KhV%oP%!|Q zSlBrKhOLPKs3>Tt7^vv~xc?c9LW6>e20$kvWx*f=DjBfaW0H%&*pwsTScat(ErVaN z*+m_8e^9FUW-eZHfTE(ysl*_D{xOKQb|Xiptcq4n=RW*~?;gNs4)Bj`r|JeU8 zHvoW!j)94Vih})bRG9PB@BwOn>7eakCiKw|1+>QNJ4YoL_WD9SDz&G)*p{o>7O0<)`Bg!ZOF z0}u1k)Yw^_;m z5(1BEb7K~Iy%LL@4 zrH;wp-hJ%oIB0C?s&52yGbP7u&OyPJ#(r?-Gobmidc3v3Ai({$W*KQoMP}`DPDU=$ z+S-Y^nGuTAvD#J%qBq3E9SI%22rRi-Wc%RCg<}5I;jkNJ%p4ywD&y0v890jD?$lvN z8*#!}aGaS2@l1=*7oaEQOBk`dO*OxfH6q=hubc{J?L7YT#bmUzmSEwFA=A-~>$m*) zgLPuSewTyHJb|EnX9t8aqz6sod0=^a*)hV#lDa3k=w*{O`(4~1?K^XwR+6)$P}qIR zR#s1no}`@;E|DEKs2X1H5na8R9GogCPL5T@?-cqoxaB(h59>ZH#8D&fCR4dTwgaG_S8UH?Fr zSTVFo!ll2nL-6 z)Ccl*ay1kyxB?Fydjp=)IlYB%C~P}U@6|d!lW;Hh!QME(LNjr=Gr+~?O{{=eMQ87x z-iXe3;2Ae<7`7LSMLowOHG@2F>QC{q!wn(hgu1@~g-}Dj%>>?r{ zH3m!XF;X#VklV?B?K{4aC;7Y|-sdWV?w_hfEE$zkpK{odo1m;ry$b#*ZL-u}hsdrt z4TmKCt%-_ty>z#_42a3GU0PImytSXBxOe@aufIH|rASz$ScQ8ZN9)AZbGgRv%x3XkvMAK6>$JL81kcz12I2dh}%w9(7v#8J{qa+Dk8~a`m_k9530tvRJHYDX|3YU=V9BSQ zkUi43+kfn}w4eHQbb zPB?z0O}CjqV=KnaJ+9lPL}br4f{M9J7a6&Vex(MzEL&NOC!mdr9Qy_O3&7dSpdMM( zKs~38I167IeFndPbExJCL1@pPZn4+5f84jDYHLc3{}f)M#pGy%$qK9nk+*M6+vpRk z_8(3D1q8n1^%AHuw$J+uApU-7R<(0Wvz9pBI$`!!<66IrBTIB5AT7(Z<2E17dkvRq zx=YL!e7dGjsyFr*;LWIb5`JeKrLWQX#xC&1K!-YRtJC?l3%aL{YAHEQ)`(TD%?%V| zsHY)dGTXmOGkqh!BiQ=@S_y|}+%LS9s$%I3I1&cH&m=GSL=_WO=GC`p7V^c514$ zi3lomDSWC?Taq9;@`XylKSIcz>J*=cB-1d}W3VvcQ}lhsZdg?h$v^*{ohhWnN^MnA zsHM{ScjZq;UpIOy3TTW;DwrTzZZT&T5BPGbv=Rh;Y0U6f^%rSWi=WJPBBz5bHSk3^ zhd&pH_k2fT&Tynyq;&P78*PB}^+X}3cuWJ>{Q*Wiz4>>$F5fflIWS8w+%n%7#8d!^ebxRA@ zj^)_o2US4|PLFNStX(6h8OIyYg2c^42x!LQhu(WEidb&Sq7_N$%GiZ4oV}OxLr>YD zh?8Ua&}N;Jew0^sfgVwuMYMVCYG%$&53-Yuui1g;Xj&@9jwIJD>A6`nnqoqo)<|lt z6u#+ul@eCz8@yBzG%G+9-QOmRmWrF@U_-WjH5dGjn&fh^KX(*|Q~Bn6i8fBEUKk+S zNpxGGR~BA1;MEX~=!=q_zx+in5TEn|%!ZM~vK?>&j=<7MM9PI;3+ohD z91?i%)w&>8%qoTSW~Tt>5CDZ|=?kPYiu` zqbZS|_EF3%;zt(EZZ6io4Njjn$tY#4997=>kql?r(}_s?kj2^3jr;S*Yw;}Jk}42e>24_MK2uwL9U5!?Yz?;eVVE6WkkX9wSGN*H-p;OArSa9U z7EobMu9we!XuSUS0`x|Bvg=^&kUt2|apODB+HKH^W9J#-mid;xFJ>^O`936Z`IZ&z`$n^-nD7CGHy--xA4e zZX1hrEZ^5unPGX^eQOE4(686n`OVi~00RnBuuP?>w($wnf`g`9E75OL1<;go_W+s$rk`Sa=G20N zN>Kv!C2wSVYfi3Rc!850mtmte@S9uRX1mj8EU8mPsRF6Ab{&#GZZ;wCMCHevngw0d zT3||1&3wmL8LrSLUDTbAmKG9nXLQ`Z*uwLl7OxYGkDPC7?;w6^;yG*vq(Gu55pGfC zR9ZCel{CRhV-j~s@{vNIMZi!1$zH@gg4cV=TS;+B&XeWCB%428tYyFJ7nGp6jaq&u zD-Ed^2dI?sck%k@PrO5rROVHd`Ck7g+rU zFy(I*#TYCGFSyy=5~$b38|x^K)T>nDE?_o_DQ0-oN7|&VF7_RaeDCuLD@@PwZRVcZ z+dKD!fw!59qqHoOEg>4xhqvR=seSVS)A|1?QYXp!Uzu8rR>bKDqZ9PezM~6a&ycwQ ztdZ?h9j!RCwyBtV16n zR?2beG2tL`GpLeU$0;0HWML6NI?Ppxrr!^ot~G`i(k}wXF|}tT9{W|8AJ~3}zud<7 zXh8VW#3%?}SHbS_6{Ho3a|Iex7#XgI_v z?kgze8`fLuT)cZCkc-#);8ZtFbl2IxfNj+@y4A0gDkBo!9TFsw2XHe2y2#G3`;yr? zhu`2qr(yg9hlq^!AYz$KcopSj?1YP!svMmi&V}kf9uhhP5L~&vEiAvWx$wUG0(=5#H#fF7NMilE%*(0$(AFxYU$-{$ z(ly!D8R2C-{edRZWB*hh3WNS0L?cy&XbhgcIoms6$M6ALDKdYL2%Q%dBI<6*8;9`+ z54HAY1fX@JeVf7m()$H#G2JxmO0{E;@&>#m|041gITe1g-F1DGC`5>mik>+0L32l| zDXGvOiEI%iX&Hw-RIjX%B4KQ)XbVZzeoES6-HC@0a? z3v*(vqJsYN7D~t@f{)KIGEuOvF*5B>ZOIs)1?fQ|sL(E8RIt{QXXyeUiK1?;uaW z!3Hov&CIgO)*rczJEI95OylZt&bz%qDbce>U?eX zUj@4JjT8qeCXR!xGAv5$>Z zf-=#C24$TIK(s(Z;C%_X-w@$^H(S+s11&d4cWlKNYH>`^mF}4OfiV{|L$9|bvCS~W z#>Qz%qcq7*G>7N0iD3PP9kW?yNJ|i&7=k302o;rS9m~SNNWej0^WjHpbKO6dsl=z^yjo8Pg;u?`}^|80mEYej&`zrCbSnq41i|Hl2snZ4|Ifm$mEBM~PhM zsdTCVDKf9+Z8xOc@DPE#Z)&Le`ct{uPWip51Mg?uIm65a1&gW8V<7K4Jg*tCJlddX z>bS`#%y$)HRwy@%s7;0OXms;x9liCW@f{*Ue+=Z+y?1(N*H`1(c!lQ=S0}!)n~r8R zk9#u#@oKXDzwfm>HtY1~oNSmo4SXYKbaq$rqYuJIenO_Ke8!JuAn1o$XgpeV=34}Qoo1wFxfGb|)f2fiPcNpA^K5TLaP^bG9uk zSa>lwr1Y>EEH?kCb0fE!MsF)7exz=OEV=CD=4@DT&Xl*-6*ccP?L9-e(z#&T)e<+rZx@%eGd4QM=KGTQ&OOT2)|kFqSBAb- z7bm ze;FUG?#RkxVdatLJcxWi>RQ1za|pzGMUNMzjMyHkygZSStIo<(A5G$vero-gQJy-( zqObqFLO9Gm&q&wjUW+l3^M#b<+{Q-NB&Drwd(#TD6BdOsE%ZrxMfpE#3_U4qQ zc6rp>Omg#bPm3Ir00PADa}9Z#)Pe;68!h49AcYRgjr*f9H5N8o`>!RxW%?jCZ9Rg@$Dlk!EjXk}T@? z(;$qER;l9^8B0_|WXd*a@WvYNOZdJ8{fS>CwLjjK*}pU5ka91)@~uk+C9Qu+FlqF| zk&Zln3NA^^ky=kCZWLeQ2|g|mQA#u20{AnWgT-6;3JE+{JwL$b0zgq>3xsc|e{f^r zBYHdSeN6glDQ0{}Z)Ly!NfT8qCS;-S|LQ{+T6?N$G;l0HMHjEh_TwmKA8U)Sou-Lq zI(M3x=3Jvt`NS?EgRqL`rTDJPlZepF$>P9CH|wAXf>g?knwE4=BM3zh`(A5QT7<8c zSnfPsi^eTTNS%CI9@^#1(KIEk`)2l#GH=UMG~$K}T+e8fsPdHb|0Ya%8OZE!vc=?v zBR(m4R0qmOTUZ}+i;q-on)&VGsUo?e*Z>>)RXnxQdZ*LMV}(pNm|dgF1>H*!FW%QfWd$Pvm;t+eDwbx@HErNyN9@Ztsm&Ln-Dj99~x9CzhlBpmdC5TmT($&C?EYu-E2 zyyh0h^3icpoHf7sYU$YbQHw*oSnWMI(^fv0=k1$Ia`09dTyZu!$Mztil+P_QuAo0Mv5S>1V0wtZ#q&-s8$N zoQYCoyvduKrW{w9Wi86jgtUhm5^YK9vsjH0jnd86DOc;-FubG zw@YrmGAxJ#1y6neWonHH=9e_57D*GZHK^*;tYa;R?^Kd1eSO(WP<48PizywgsG;G^ zi|L*M$*(RYXU~P78T5RRn---U`=g7pJLt`~^5fPP^yksNT(F`0YZ`7lFQzT->_!m3Kv;n@a~T zdq)H2Cw=ssoKKmg(`mU05C4X&HB4Nh<9VUZ1?;n{Hh zMoX?(P&PFP(|Hj0{S;2AUyp9iBQK>>(6h$Fi(5Xk^DtP9C=^E%%^H)$4e-aLY>``b zx7(BgAqAhnjjSokG7@6r_qEy{JGoli^lGO=$Tg(ga7;UKny~44e%`m}w>0M{d|elZ z8@(jI)c+74>h`*EF1s_b`^=LyM0EPFxK*v2uX`CO2MjeoY|6&$1IdJ*o(iTBVI_T! z_jf2kiIXPoAS=Ya?KW9E-Ck*41lN1Y!GcIq_i8L+-};95}bC+?fX8jMf5crQe`3f8*J_W}(fMgzd*KXC!Ne!jisou)}#XVuM(5{5i!JnYiA z$0ks@_bhC^J^{kh;4s5nJo2Bu`97!zOso<2rHbALuWRkz4%NA_6f5~c?&22A2I@!x z>^(Lx4IK!>_N$iN*}O&K%$v=myl!dRHaD#{x`0Ef0Eai5l_3wmap)B9G^EM)ih68d ztrP2Gzd278gH8^9s;?IYatv>Nw3(>I+eIjySQY_$v`RNIC z-sd5|+%@r8CFX$~#cr&l{J>&t)*10ph=!^}m`mIE!IAcrle#hojxj1Dq-?$2<|%7%dDuUs&PDD;gJ0OS zIYd>l>|U?9&%hWfwPT@`x-!wHo8(qK1;L(_JW9l*je0wknY=b@hNEXmlkBBk|bVefQzy7kR+?p+vGqs=7 zXXm2!x@GY8QRN5S)HD=GkCdPN?kNTl?xy{{)ciT6IOH=uBZJm5`Gk#LFR#u6Sm|0tmywIe}rp|$vcKmIadvIx*ri-Cp#CAft9ueNYtjOMp zMj6zISG{1gml6)nX|;rQr#$8kjOy0sVJ9=~x(yStM&7peb#)~C3@BmwA=<;hXUkW+ z!y=b8hvtZ7_(tfV!c+23KdW&sKczUT*v}3q&2HgX*=PvYa=nXliKQ)#|AKIu-Gb(s zQ>KsKBDoiR?n;;Ya5VSVAn62(;%kD_x;NH_->6u{0(?Va3gJo` zhBArlDU99%JvvxwSBiV|K#LgMGj=TM`>Gx)l98WQRyW9_X9?JCTR^Ez!qj+}H*4SI z8AARHyu=vjg~v|BCfG0dOWw6E%odu2=Ht6>6b6A=x&}Y2l1=**DCzh4v*SBtHt|Bl+_|LVVc^!@NasaAJWUEAcmFot(yUr+03&oB3oDORA22x= ziVK4VJEo`6&(zTl{L>mBu}#eGlMsjLQ`qaG7c5b zFx{2!-MNRh2GC0obb30b6q70a2=G3fOU-tP=&jd|jV8tR;YB*F;6|FZ>6W3nwWK@u zt()$v&{OZ%X6dOA3}uCC>QRriNh$)fkL`K!6Gmz8k_hhMB>|dcX1>)bHC1fz{SFJ~ zTc<9g)hmnnYrd*LI1Qd8mKn@6&AgE-#HD))7A1Eg=CI#Sxbl5;G}zHv3yZN)>ogy$ zO?-)J_Z#ZBs9-Mo;x*w}E{{`rLJaQt%}D?!>43}WxrOMDlmGBsc_&65e=fguBY3xi zG7@ZA*AAP(yKGYwXL~x~qxj-v@s@zdFl<>;b}KAi@55wOHe@}32+ZxuoOYz%n`7%A zQ^a$d4nmjcEVEUyn)rGwU10}yP2&qo@LX9^G!M6{uEg=!3$fUlSFd?v_!khz6PvR# zOwvVUc>ELp>XIA7b6hABU}0sg{x!##y=$W>WVj)?UBD0WQZgkn58PC^+%IVi3%*=- zNiv&Y(%N?`+q7y$k+*m7p}zeWq{{?21$g7}sn?W_DgIR5{n5(kK9Quc$)M(~#xVNM zk5e^AY}@>{((5AFdbSIi=$)Cf+RU{P=|}H7nO_0VxSw4w8})?N&onEq7#lvXCXDL*Js^ooxX0|>I~qYkvXEbpx< zsGaVmc3Liet&&(!AQ_Ecf8Q^CdNS?bQi-#*nAmb4A-_()&-ncp5GHrRmiVKOEB(nb zTtpBD05Zrd3^}#H8KYt)q;TN{i!VK8Ua54p7$}Iw+cbB1^i8~M*EI$|uRNA?wd2Ro zOQ)_S5m?a?viLq$?dOUQT$R3Ga4i)g+P&EeaneqCp+8yUS~G*&3XNR}V-_+r%YRtt z-0cUSafIARsna^0Kaev{%CqbiYKad2dREu~G9dIGPtpsE>gH^JXeZEW3hZWB32Hci zVVh#YKIbOy@_(Mw1$o>yzy-FK>nzQ&kJ0T%R5R3zPGq-ZGOY^n;!=PU+&+D8#ndTV zWwohLnN!{y!#$cF7+e#{ROonE%-LyfKdnC*8tK_Pk4XtSY3dzp>Y{$N=%m>-*k_3K z+r)7Yo}|>&oza^cq*+O@2}=)skHp?E#64N+qebUvgLn+_hsPZ^k+X<;%U zcH$5wlSpaKLELjI8d4v2&eh%>T=@;luqy*froG_Li~K0Uo^{4e$I_>7l||1@w@9)@ zK1)@JF<$F;wrZU&i$%s0{1=e7Bz9uMPH>vLCypR>)=e12FjPF&V&DxLFR(lp8SoDv zND_Ij%ed~$K7CvkY&b~3fhRI@i)s{bKBlR=MGKZAe4sa@$(AM6(G9ZFb+N;9A?PH&hdNHZ z+@&?%*PMf|^`V}woX=68r=Qb6dXDPsmW-jWfA%$W9~_O%KI9^ zW#Hh}@8HL=85$Kqw zw?1a+8As>bYkjz(D#xVJUM*vL081wqIc-LwATdXyW}wu-sY<-Lb^=2y==KkEi*Zkz zv!{T=s1qR9W~yutA&&S|bMxO_37EN=Dd(L9p-AAVCCLxRY^rN3&2Ma#&iN*eM^IPl zLX7JAMyEtn{&%&raY#iMgp3a}%`L;b9?)pzg&`Tk67T3Rs?z;NRA&QdU~1qep@_7% z=8O|Z_5nP8TZ8X&w37Sev8uMP1qrd>zOSphNrvJnt2ivO+V9mhUVL9bUjdM|dETmx z;($8YDnFW^IXwvy=)r7+oM=Ktc-O6gfiSbI5fPZF7xo(hm}!-DG2(w;aw=JYn@O`5^Sn**FOBBUEI_mG`NhurX;E}Nxh40GChGu~u=sv8dI|{W zje`y!6!*}K?TD(h74YTZUw83r&$4<=Z&FD3DB4lRP^O6inzy30H!Y?dEvAX#smwK# z_KTG$R>rx!C&t=nh0&d08I#Jr2XQ|xvfpoDKe5EZ2tKrJXnLqvq}h*EnMtz0Ic4Mu zk#_6q?|K%K&c^)qdtO0o<|Fb;gWc4?ZjT*uAvS1)LjuR6`P8tRJxH=PK3d5erBCw* zG6gI?re^qqql+LU=!h(0i3El!h8QP?_@vjFwUk-?XRYMt&66G3AxT4x<>751UTCnN zfaAxK?yLUMZPRvhdw@M%ug-xs8paWwFwO@YL2^|z?o0mE4>ma%?O;vFieQqQ+i7L( z4K6l~N!{cwxpkXw+sgoP7yhO-n-dE|Q|#Nc8bFd{u;2sVSIc!(t;l2%efh7}58gdq zP>-J-+e(RIWl;?U^gtLLT~-0n2;b)#(*KadG&8#m5sFkOu-P3O5l!nhAG$Ae8@?7}vTY4oH z$*y*y6xtm=^#^;xsQ6>>o<}?jWH+teHYoSD4*uK<1ogCvtA&s_o1Bez%bgreg>70K zpih@zdN96s!{@W=o5op-op4ab+C(i*PE|{Gbs0|sV!kYeyEav#U>e#dsxKiGYU0gSfF7=+;X zFRa5ToS;K30hggk(|x-}ueXB4kM+|rFy9K7%y;j;n#$1nTDfjLNiFv#J!z<~t1kv* z+m+(6GxQPNTCF>Eu&)P|uRG$Hab#XHo|H;dBNd>!+zG!ERG5tnV{RIDfTa(kU#&`g z*xA$@G0UH1XMLPXslEOSpmiAA+0ajP&>2(N)DWwqi#?*$DNTnCMM7->?$@>XRxqI z&5q@DXqSwH_IT{`rKYM3@~Sja550frUA8MYq?Yn1OzqQmRgz|NPaRX@og#$J=OA>s zPhvU`Isp~5re?VI=u(q>>cOQJKhK$KWL!|Am)Bu>@fTFtZ$!7IuoeMJIDbxKe^oJ~ z`t0C&dtLaL16>q++^6~L=$-6T@zqy^HOhayP>o!nxckGaxSZz|OYXL5|iYQvJbk6rg#Te$;7?4kN0cess=ql}<` z(%|U#U-1#)Lc~dDhvWphwq>0T<&EbFh;iG zKOi-1zWEg4lpb_-`@F@kWq$cs;{%gqGje4wCBO7p|0lt45zbe7bKpi?)O{N@o_2+N z0CgQb-BncYVKc0dHECd>bT~&UkOxOV%N~1=qLqT%pI)~cRENJ?&fUX17}E8`<1fmC zJoC+3TjL=pxAW;4=9CIdx(lzf!5N}pB1rXB%TKgT)&HHazn_2cAQ1ymWeL@+3*SJqs28#OOaQP^K7z=iuBCXm8>684rU1W$> zVxE0z7{2tTg`(WPUuiCjy4;OOVti+Z&-BvSQU5}&Y+#K!LNYyS*ay_dd9g`gjv?}^Z! zY`BCo;nNV-^gkbn6aucZmzN_u_EuVKN8x4r(l&r6Qw-!dQLl-)W8RrU5iH$fwLNnkG5tr*{9Z`;2cc*{~Adm!2&1c|ac(?65H` z>~WB`qtk4DoM-lg$5Bu$IB42%NoT2iD~ ze53tdu?ufCqr4@o#gDa#v~GIaq5p-mx?-;8G(!rCa9UzN5&*K{NGjlEsx%7J)#-=R9NGFj$FtQw~v` zJe0hcrXdk1X&2@a`pPuJu@Kd@)=MNHpGqHj6wSV8ayNcSaXY*bnUhJ7M5SSTVP_M!NgI~?qxLoJ$dLk6yb zOTs9twJh)_qxd>F)LSv7MTo-&Q_q)*0vj<>$DdGvmcgewc0D@j)AwYEWRlWMDAJO^ z8pYC;^@W3Se%VIT>4RzR{PDUeP_nAxbF1Al`iF2N$L*E)=*EzuGElF9uX_k%QsOzT z-BSQ5@FRXS9V#+!+|Bh!(UI~yn+SyQcL|t5UwWz}=S;+n%qR|?XgaGT=bBDx8YyZj1`HL}w|xmdp0ZbA`kb_kOjQtQ%6 z(g_VjYx?S^PgU2!an_LY!TOpU`#qIL1%y%RqB_QgwK?;4;PbBqd8 zkrUpZmpO~CqZ6^p*Y?^3IXGDa>PJ-q0DHMPVnCiK2Z-qw?WsM9tP-KEs6&L(U(p%jSz&#VpbMN1d6J;`y!+tu|3nCmi$~)wevI0-)a+~_8O5^ORdUy$TBYB!w5NZO$QCM zNDm;LOBe*w>g2wx?`u4h8v4{l_B0-&z{^124)BH#-6@Ei62VzAY96`D@ZmkHi{W)D zeR<8(N}-*GIrB-_uQGn%msYYYAXtMDDr}$Q?z@p33@E&-%07%+;jyYu+fDD#-IA|_ zKR%4#Ru_y~&XQKN9%LdFFZ@(llH@Q_p!E&KNAaNM*#!*1WNMiIT%n15H!wT$`xik_ zT`+5uhZk$^Q9~YY&ceW7Kmbaxdy&Y`XegHXW(h@|G<3Ifl~|Z?H@bxGScX=4hsqW0 zr>5ek5~5|Ki9rAvUJosp{RCSNvJ-KCVW02hNI(U~`qscTLJ9Nr3icn*nn?xx0o{gj zezMYBRa6y9xfn@F`b^&0-quPbeoejg#!Xrx3GK&hNQ{l1M?K2?LZKb)e@Q>ad(6`I z6s*Ho=$_{Jf89u+SZUG>QgK zZY4;*qgEd4g{62vISQCZMeF^?hGG_}dN39ZB&)a*lA{7#v6dC*aErq2A^G_RX*oAU zJIT)0Q!P#@zXeXQPUqf&vU|@8Q+15aAjR3Ik~*C&u(hl9=BPc9l#z>%m~SgloyG0u z)nOlWr>>ro>amYZVi)<+_1WxPr;8a8OnDv2ONh)`jWw4J>@A=lF>{=JY3k3T$YdVN zkn*TmCl!y8uIf#!@VnCRl~QVTZ2t3>F&qlGBjc2ce8ym5gu?7QXwwng8$ZN0`Fj%D z+E{B%0+fU=bW8$mBe%bc(u_X=51+JpBYpDFGQY$yZCJ!P+x=9Kq*%{@5uSu<{-3Xx zsVfV5Q}si2gJ;8KV`K3y&SIQmJer@m$o4R?6G>+Pv*-O_wLtKaxPu`{+bt&{N=fK> z?B=ZL9u0+7&%`W|9?0BOkZ?4rV+1jNPTRTq7m#hpr*TzJ0v;*3t3DLVmhBQxmL-$Q zCO;+o2ql`eW%KFcUkieLU|D$I)dB8Cs68&Z@{X6v@nKG>c4a$C38qnp%(;cza?J?& zr}QSGRjVlO|2QDcJ`(IFouI1D)EDryQW3JR5f74IDhxtI+-zy-#Fuk!v~6UEU0Pu{ z7-$X&24P08_^h$J+I!R|+=MQrq*9+gEm~`_Yg8qgq0CjL9Z64rU#)Y_)b@F_&pRb} zjw^h{F?{*^-a8ZzjHB$A$c;`@m1sBpLj5>-ZqX1lB5Bn$JvGGqz5B)P)$Lv7EwsVg zXCUQ5jo-@-T5MHFzM}jR7bWDma&)^bSYa_%^11@0GY!Zjmn^&(UrQ(cp)-W|S zG}el`Bv#Gl z>Z{k|(hG0sc_2~Ie4`E7!tM83JcMGmx8UEj7A6S0lO+ru7iUJh9gTDfCgBPqFDgoQ zULO0-78Eq$h4_xYW~}1pd`HCYsuG>>N!pf{21gpMR_I`~qRM#0;N#HK?x^RXibqA} z7(3a0!^C~cSK2Ze8?Qj`JG?<O)4MD~qn+3D3Box{0=-4X1eY;+}I$2Ws>hc#%inUX_!p^U;@h zybcsep6Fno<)k9<-goQN6$`$+Re?O zx0HVY1TSR25||R?nR=r(WSVWHd{JWvenu`{gfCyW3~{PEVoG?}oioQV@+7NVGYtpU zZ^>V32RluOpRK23)#^*@Tbl3hwrLLSeoE71%cymfL^qL4Z71uu#~~#_VD5bQc|OEJ zC9=Ii1&>@YFTN}F9uaz<0A6j0FnhiKsLvHL#E_8;|1!9qWtYAJkB`p;c4@z+cf_P& z4{9CI2%A*yO19@*&Tu7RVHP2 z)3f%{;XUZgDLjEZ3#4-ak?^-jai`R##a!s3-k7r^`pxjBvtPZza(JsX+ut6?L#L{) zaf?v%81&7|nWw__TdcGUrVRwP{2u_AKxe;IO+b^14?MA>J_Kb+7$1~Y^`{SHNLoY^ zdkxS309{JSJ-0l;G6q59W{TWvmjQyz7~>lpP;WtL%S^#67NC=kZUFQxl%t41E=@Kube!gkdmegEG zkT6wE(1E}2MmOT}+yUnST<0c(HX8`!B?!UkO~nncjF~nmupVoRE1tqpwg;LjM3Q5e zgHhY{psaT>C=nP{o2fc`8Z2IwuFvs!=49Ac}a87ps?^0P@ZyrGL zNBLV>GaTq+a(1QVvADDiEOKRAU_RYwGcGOJi!=bCBPC5B(YgW{Z>J>t*0saPM>Hwa zzzR8X_LMOrx( zA6A@$>G#bThqt*+00q;Np4(6}BuIg7OnZGF`(5bV9^~8I1a@VPPJnha6u6RZ7b-Rj zEQUlQx!Qwga|tSEK%||BJ?I>~;#_x!+)D(ULf{bD3JCQNHLt@a;+L9YmhB6aX+l17 zz-~o#y}EJxb7QD2^ds;a{{XrV<@@H8v5xuz+|toKyJ?Y|>OuDLLA7ytaF+O*(Jq4k z4=d`7e!eR-S;2Fu_;u<^PgKw1sqx~pZY*7uC5q>r%uQz=G1S+b;_xgnMKoZe7y6fK z3F4EOSVwVe@XXzHj-K_FW%Zd+4y731wQC+feQSEr+(xs&dcz>x4{r6(!!7TlH+&Vj zk+$+f{{UF0?c#$ekgg}WN3ltCp_5@kx0w_}Q+vn#^? z0JLS#F^-7G^8wWU)mv^KbA2RM@yJ=`VXOVs&gb^0c>TjQ+;*_I5Luvw4`Rb&XfctL z;epEoojxeM!eT01vjMlY0HKa%jet_S6H;c3v6XRvdILd8HIxvk*zP{{eQP8UyMMG9 zT+=rkHtI7Tl-0~#Nv43-^0SL*VP+@^Yz~Cb4N^rI5@ad<`l~wzFxHXC3NGtinfl(rDVA-D|kkD&Jb z=sF9C;)_EaK$DdobG36|9YJtguCM7tTg62UZ)qCma&mK;s+5;i#lz~(#CV|6A-Km6 z<^?}JWTRLA06dSPkJEEY5R+TQa_kF%pS?#5;*Du}6#FQQ;~&hYVo&K$aeD}DE*#pB z)|V$;vksbk3JgV%iPll+2e+DwY1w9BjAZny7m6oX{=8LVM~>-=jaVALN(U}y{2&Bd zjp&wer1s4-$jQEONEse!)ub#CoB%2NX_+SZPJy~+fwj2|k;fyFsk)QjYR_xg;xdgs zmN?BhC7R6UCY%DhuKnqMmFL_WV&H@sY)~H7Eoi936h;^x^n2SBeRyR7H2%4uMkBU5 zH7PjiO%WtB55!yr2i2XAiUL7(3~9>bqcQ>mb|m$rfRS>M4NNx!!S7UHqX^1EGZVL( zn3dp_WK)na>g}2a;+Ahtv??*FzVy%+cSNv}N}cBQ z4TKro&`{gWEAq7sZR&4eDX4jtkT*VH6r3)9HFM7xh}=Gf&cT%DZuGp)jRPcrOvpoq zC!jqj8MlHf*jh3Q%M)wU=QKN*ZYEa{lBYheznZjH3mJ+;aAZG}e|0-?a%Hty;)@a) z2zB?z_fRixE)v=SZlRd^K|MkCr{=e~l0gAQG9u-VbU*RySys{BLPiFdT;v|v@@mHZ z?kj;5rwYL8V1Ht00d%(WOmM_9 z(wBuIl4ld*MBjhq#RIl{B1^L+#7(3bS%Qx1ib?G*t!!exXDbZtqd&^tKWf~GTGNDE zNV9}{hB^p4X~*~1n!CJ~+9P7J^PrYacdK9yF_A$WJB|FKOB|>i=hG*H;)lkku(+9C z%G^SXpvc}^ALp7{$^>~#MTR8=@6dbHOn`#ubt?Ko{{Tt_(1_(%XVst0;8Qn}hmn6U z{%T*Bmr=052M47d=2)au<)H*@K<_~!RD#`=RJnc0AInTV%(2I36s$yivw`7rxTq6| zVo1^eU5*ZVQTUu|GQ!cQFQ+xt;t<02v0_BQ)#P`faarxAlb>q{jJn{T_q)=T z33DBw0D80O2m7e91~Cq7ZLUm$F|waD2}^id9$zyJ7xk?;IKm)8;L^8FV39<9mtMbav}^t$ z%(etuyBSk1q->|9BW-sRA&%rXs1h^V6brtoV|R`=MPsWzNTQ_6EvSlEbrs7NZ+a@; zml%fx42%wg9%|7MeH#;?9*m&)pdFtQy|IJkZDwUMw{Fzg6S}<7yB0&fdJ##i8cdS@ zNYr2r$T_AXySb7fX$bV_$?|ugm`;Rq%fym6G5ZeZ_oby(l}vCuDyc1j{N2y*OhIN2M>fjtjO9MQn=o>KlCat6TfwF3I)6?=3LkOJe< zI?+~bD+4A5MqOLJ^a{s&BvTm&rY2Gpd()QJR@WtME&$Tx7AAtxM|BGaTr!_X6p7)I zPbRSJsgMcx6du48vW{D83D{{-x#$PAWL|LTrqw&asYvR7DW)x6}ug+#}kv8r31-;s=Wnc$tAqwOUA{N zY;mxyJ|BC;#PPojn6pLd5ny_;L5}=7;ya?&F6|@^OO)!Q8YHmXaTCvHJpAVesXq0x zf@c!ln>K>xSu)9vyYEMq*N4vuV8$z?8nC?>&|+P1yQ^ai@<6X4T|0hSh)GxifE2Oy zVz#X~ieFl-wZsh!=-!Y=)-m3#ckt{izcX(xlq;O}`_N^_raAeGIgO*}-hHdk+a3?b z;pDp_-eWk~-kQr9WYlQ$1x_T4#r8t}MyL@KGOE^gC7q9AQHt%>m_m9lB zf*u%Uj5rc!OA>vvS04;9u}_`j7L6eKv79OWs|STc#qS}|miH4zqXF=7iq(&TTpa4+ zR+g-KtIBix8VtTI@UI<-{%Y0M_5snN?O90dvbxT?f`FtyR5k8xI3?US5y`{2<*Au> zD6;2Q>57kvcuyO;al|M1eh{&!4<){$KkYY)4-qusU=ie%CmK&|cdEo;oBdrzGuE7& zlgi1`$0rT%;*oHXpp_7YKbsp+RqbOe(Fq+G3UA0PnhuKE&0m=yVmcI~ayVQdy_`zSf19AGxVVaNV~5kE4Mg?MDKRFM zC=PMH-RKNNAS&|ajGpU_H7{*>5$?nkbUy7{enR z5;8?nD|^Vnl&F$1u`9S0Ep0GmVgA~8h8 z8fK7oE7F)-t0vNqXwBS_)`I5%MjogT2my+r;)}IpaqE(}j;+r0kD8l~r znROhoO2TN^YitkYGzUwVrcz~7ud2v-i9sh`B{(D-P9e01`8yX=S{?n zfPEr>SGP|YZBd3Pw%#dAo+EE?31Ou`-D^_ggvcafnPnrWtm(KDnGEQ;az2qkjJB&Y znIm0TrR0T_lEWU=wJ!_1fwXBVPi3GRJLsD)Y|fnv0npH9xMp1^ZKzl*08Q{KW>iTA zd3rz?$9m6r*(5xen;kk(0S-3qE2*g9RbRwkQTDE&<~Hi;bP@^l)~6YaUD?NX5dq@? zPuh^TwkbS!7}XeYw}De=8QxTPWsS0MO#(2O#F&xsjp_dYpOV{ARE}ue0CIPv<+d@% zV`%V5BoZmBTWJ&cOPt4^Ko7wacqKHRC!%WiCfYgI&ZgJ zSDm#)k&iSMRo%ebH2l}iI!A!qxY#IM^q|&~;#9an4cf2aMhct^ZM`MMWVN1iZ#p0dDAW0g-+XqU(mYnf zi{?HtY^dO2ZXy^t`wgjcz@5|!dx-8K)svu&c+T4j#z4i;D-oRMJv!5xWQ>=FW>y1k zTWLSifbv<~aJltLtg*8Yg~kv609!zJwH(C+yn(Qw_NVOrBDq*arx1ZJBFDbYOq;JBgn7?V0sqVRi;a8X*qEYdq^-2dKzt2jAGs=0_R3_j)eH2X*5G&h~yJb zZVsPURq9P)&v4R6#wRhZ+0)lO>63JkNfRtZC5#Z`$j;)2b#A&+;RYj+mdNdnpwM0% zLn3lznN+SxInqr9<#{eOKXPdH(u=v)Hrh29361{rDNy{;jxsbMB>A8VB#nTv2B!k8$lj>(d1zqW>Ub0=m?-!E3Yax-)aFMmd8q9j48N91UA$J00P=| z20MUv27=gG3L`d^2d{De01EUR4pDmyzYpCwSq@s&$=P4Vd|(>ITXO6SOPsC7T-Rj>}jZ43yAr}w6g8igK&zgEJxJY1P`}bLQoMwO+!}r z%?7TnN#^~2DPefgaR!qZ5*&V%s?R#cpq+uH8%4y(v7tHG@6v-#?$#C|)$@!KO03hs zoVk?f1hV-w_=HC7Ybg`eVgNzb7Y5xGqfoHfxCGH|+jmF;91~o_- zt>rGRqagO9aVuESR@}|#9;GDal2~U}(Hmwa8=o{9HdkQVU0bpd&rA)w&>&!RqLWjzfVUSvD#>pYT9#07Tc&%_B9OE}WMxuv z2=*W4S@JNGsU#~FL7m3*$T^3b66CQ9lfFFAJ!qDYpT&?dkYPwYG2VrHbWPZeS(OoV zf;*bZy-5;4%8sOM^=Ko}|xhl@vD3pS@@qJK)fNwcqxQ?IYJS0>F$q^s*SRCL0>(ogV{DU; zq<(5GtYkY$Yc7%(O9t)ssp5|2;$N3Vbr0z(H^oKL!wgX>5_yTNVBn9r??G8m+({zF z-mG_^v{G`60RYbWk8D-d=Rl8u0{HPo5u*ZK&rori1uf!uwOsQ#WDOhl?X?|Q;)N3> zOovb2nTj~uTtx|18)vuLt8=VF3`I7B)u`{Cs4H-b9qxt8AnJdXuX2oTL~6NI9cxO> z?f9P$0%cg;n@QUT6t&z+w{Fo$8F8HF#R3srv<=PDTNzwph_f9H1Dpr-gZ*gs<`+!3 zToOR0R@%v%5=aOGU^>ueClR=EBZgHN+kW)KcQS^NCWi%v6!y(9wp=(dkyH$gzV*(~ zh?+7EkUICx0ApTUg=NlqovE99kdBXT{HD|~Ct>%kP8&77)B|0xG8MsN;;|)LbXE%4 zT}LB5H}OH};l4NbamOvf8BtiAW7V}3*-Z*O+5nCFHiiTH9xeGILFe)Ycx z6T7{&W@isJ0uh`6k8kmy4=yVXo57vc-G;)Ed_hX#q&5R$`#%PoH`emgeHrfRTjpwyb}B z3f2a`h3;B$_-ryeu&c##$vZKKh}2-7ziI_ds44i7X;s)~+KUyoxV1$tp7`@j-CtYG zf+k$J-y>~}20LYy;aKD#Hp2%Ppc>jli6_h>GPvDWwPsJiWt9cps|{*!p^s7TTX$20 zWwuo+SL$qTXeKL?=2)15ex0^Ir)ms)IPPs_B49Q6Z}6=zoQ7ykv5!Wq5*yRK0^zP) zg}JseGiNTIKf<5kye4~@1gUNF2%$ozcL&Kd6u039R{sE3f=NyR8xo`St&3lTZZ2g8 zW{w~aK-oXFdL*}HSYVHYT~9io1~@}T2aJ#~cc$&QXBC}@ypB5{t%4bWVg57#;hzIq z2z5<#(I_}A$_2kc;C|* z1U+$p3EsVT!hCyxaG6BhOXr>_*-=(2hB-b?GB!Sa_n`8U{{RjXf!yfhd_wp zw=$PG-vk`v{43vlH}Ea3ydq0~g%$|d7kjQRJ46(8!mENao};!!ZTxZZ`|+QLUxn~X zds_(|NVK2j(piQxmDW8=sGhyO>+^>i_=nDRS>UgK{s4rgX*B#t*K@*|XGKK;#W{0sbI_^05D+{45!?yP0rNIC4R z7jA_2+Jn^mL2dA*x}H}tMxe9Wu`yjhM&6YAh^64-KNRpj7bhF|JB-1_EaAzKE|o-) zDC#sE>)SccO2qyr{{R+j{{V=hAU+?1gGPlwno7 z{`5RUsY7G{cO7=1J-miPsL736nj=WjhGaPejDGZsXjADY8~w3Xf_G@%1_bv*;(_$L zVIdKm;OBo8W}S?TBO?#KGm1hf28MZo7*IB&$8iHk%T*(=bK-+a8B~S#a+6^J$QjGA z>V1txl3GtHnH)q0`=0dUy^^`gw1jRBcA!xso<$-xa$_2@sj)Rw98O}a*6IUo>TFSD zyPcab$Rh;q2THBu+!ahSHleu(7(OT%j%&*(%Lo`L*yVQpJkai@S)y0A(xrde25s@* z6bnm>`$r87`iS~D-lhGG7Wzv|$c&3_V!O^H$I{+#J5X3#K$gaJnOT1-jAt~Gb4E`v z##>{yYL@KK>sXbTHpCH(eW-v-DwCj;J7c&s4F!#(UU@QS)sVGYyLja)*4GYKPNE<3 z;;9S&o%&ZnYfwW6Gd9U3WR}P-jVV8GTxDjQ;?g0?nav%@`v+aYIvR z@(>s)J-k)!VUflcDn9wF$t@*WNeoyXyJCYy?4VS}ph53lSqk?EIY2%M+>>2FctohL zaHN=2>?r&`-aFfkAmdW8#R(^p9v&bHaU_AirC)`}7mC8naFTu*$Grz#Tp3?7Mm+~W z$F{<$4b;jAV}&H`LY2f9a>0fHU61vn0}EQai7>?H6ba6E)OQ4Cs&y=iq^QSwv{21A zGC0Pd26Iu)M*`kx)CA~72B!?M3lzNBbIb=A^Ffv?i)jo>brMy-YPSl#PMvCW5Y!wq*G+fXe`^j62!i8?t@|kfqhLy^`(~drS;;hr zAaK{#%uL#WN7xO3++(Ko;lN_*uplkdN$H7(RfGg{b&|`A8O!7 z7ZA77gn}9~t27&q4=t>Gv?gYh>|;9tRhCOT*_CIvJn+OeKQXF*Xx>JGc6r+csWj>Olw~8&S z(aRyYx3vsa@KL%UC#?dPCAe=A)XD0MX+8+)R7Q$xiKat=r#Ss-+pAfxi`)HjDJ+b8 z`CrnqL~|i!`WziD548tK_;o_sfH~f@rHOEwhE%WZNh?J>MP^UL#uU@mcM*tW6O5S` zNT5d2IhEN$mJCi7uD0vWDavF$YIRJP3p(N~ZV$JL+Oy(z_eqmo_10_7Nc_Wdbjn&sD+htc$bP4MX2_DQ&8j=aeinyh;eHXqi5JC`ej zmPXLZcRoR?-rGowvc>8~>FrIJbq-?%)f*MZe6Lz!2%}}yGMzGRKgNR~#mkIqY@Fv5 z?X{Q)(uXUnb*f!_zs$ESz>YqE_uiS3`Wu*#21E(@e9&4BCh#oja@v{3ocm&wPJ6Q4 zM)}eRVV>0M+egHQG6g`&H&yDwownipJ|@9tLnJQRPeG(~{b(>Qq-)IQAVx3+Qrhk9 zqr)j>#>2%wc{E>uVzW}pA){UQqyGSB%9f2Q$jZzz zpyX9yu!dmKEc#tnSzkNWpBLhdda(z!#4*HCw(mig0!uQ2gmV>5F>i3#Yf-Ws5_~uQ zbghYBosUcG%oKYAL_*QE>_v5w`ycxx&_5(^c^f&G4GuvAy&)9$Q6j*rsR`>;w!gTA z`MsO9F%uZ2mS})&BPb{>6p)e6c%LY6%%=o<)|I5te9n=R45~oR>!=7iLbB7@YF1pfe9%e`B7vNtd`iIdX9-@P#` zqlIzP&2#7_NmIDh+N}QoEAw*OylUoBUrsj!?~_2U39zzlJIs`onMk+S7_6F{U_b3vvxY^bTU!?c>e zH4KH$kTTtkT{9Hw?YJWp2AVxEWMt_e)vgGemna+Q3;NN4Q>k&+f4vV3RH%I+dl~>6 zX&p4@(i^2ccV}=SvoXe5L}U%U=yuRZkhSE|H!L6ixYwh#nk&u}*YV4uRFzOc;c>a3 z@sE)Bldf@1S<4N*&obgZV8#@E>r;sE9ps%3Sgx<>Shj%2aJ=Y?0CbV}pkhcNk)*f- zuHk={ivpOqWRg1&@{ES>_Nv@N%aTYDAJp2k9)MR*Kh4aM^7O|N;{%|ev3Zi*%&`J` z5$)!*tYv|%rnP0#T&w3E1!XHc$@p^myZZ{!;L?}4Xk}&Ph@9p6YVANNY_yU?>Ibz_ z803wbGIXk7;CIbJyte{KBUv?qc3y^w<26^glASS)~^O@D_#|QeS0tZ!vMDMH*=Ab`x*dQTsq1cL7fAj@k18~VUj*+5-Vr)tt*R2 zt+^%`f*HLh*w#dr(lO^p7+f|Fy#(QsAlm&T?M1f*XATKrioK1Az2Zw|2vlX%Sos~P zt~0~W3b;rhl$~pmPv!QY9D+#4HG$IS(kPG#?jzEmX+D#h0T(h5)5r)!7z@x;uNB`( zh#?=-+JTF>IHi*FA)iVSj8d_}=z~xtkEpj=b#CChwRq&4REhMfbj2RtNIqI}2ANTM ze=_5G44FM!qcnKylh%vFt|qsbD>+`I9QHM9gn``H> z&200++{Kq^SZr6l0cN^!&6UIJ9+f?45Vg!;rLD^>V`&r3RVI~H*!nTBK4>%|yPC<0 z>Clr97-Ka0ISaZ5wF-ow|w{ zxs^4^3Z@l7*kFT4wc?gbhqTU7eMYSe7PnEf(W#Nx`he>|2&cQfA>)f%owX5wDdgc7 z8CJXE#@;dYft~%uKMlLRv~fzt4M%M~Yhdv12@VbQ%Ss(^cj-ZuZ*lP73ymUJ=ev?z zjakk=r4rI~o@P8C$!#Fm1jq!BHLq)V!8m@iX~Ldm3Pw&bO>mwo!7n6R5o;ZjJ01E1 znhfvl?~AwsE!FoG!3SdqBLm-k>AOFUxEC6c#~(|Xq?ghRFQb3d8rR~S5J5C{^F)49 zV}^C#BC>A&2e+}5TyXv)2g_XjF%0Sr@3*xA{9lHAJvR^*yf(&Iw0fhGX#|`3*i(Ev z;V+CW=NDW9h{Z8c__p$lhtEs`d{s2B42VY$44O%^>ybdLyW8)Kt!6hiODY!GLXTD` zJxm-o<1}fcjv*XO2BNa6gmL=x&T;myG4T%w_=AjIHP;ihH&+P?M5Y!Rkymg?$tSOB zZ{dzKW0pkY{7z{dveIPj9(wAn`>)~`!+aV`d3ZMhwSwiyLB%bH;%(W<>ORB02hQz& z7~|YQ%1gp8?h<(lv2RY@br97~A^bA1EW-w)k9dGwgbe%{}D_J0w78U8H4xVqu| zLh9mT>;9{K!^mFacGc`V3PXf{5I#Ayv6lCRd`s~X%v(uQDrslH*#3Zi)E_mx;nBb+ zw3mfltfhtwh*y8P6nlOOytFqKZxR8!YZdms9r4}w5wdu{i~j%)ClKMTG4qMMtDm{q zy6=wF;XW|nAHy`h7hef~5aqkFbr`jj{xnQSLxt0TdSLdT@~^>Sx3ZCJ3r>nmjHR=V z^oI-Md^ws(?yZPsKVP3}^cFljgrtC3{7TVGxz@5DOD}_g_oO)Y!$vSTowbo$R&rWs zN8vZvb)W+t6?U%owgLoG{?Rd-Lx}KrxP8CP+FUG-2Ak;j5wVG}v zxS5hb!AaX4C>phbE8jxk2)#t-uX^ZhiJhgl-3~?#D7N#YjVue02IiSJlEnrRGpOuZ zfG-P{G0xj$;0n1ULKZ~A1AuB)@H&)e@JIlYMNCWSU^!Xo??KkmOB=2LXwQ>Jks**` zR4DyH9Vk*+#U>F>gW8N9Sdd$=<|^+%Arulypo8h#IjaP}x18u)#%Ze=EdY5ZOdg80 z%=aNR9mi@2A@l)&Lw#Oprb5u9k-j4OoOcGY<+_#`1fXTJL;lx!2#K<}Y6c`X5b9k# zigG#IN zV-mulTds!ge@eb&z1iU9zA`*eGt7+a@@eoYQ0fdYq>hAmq@=dEPexEKKED3+dpkLF zg+ZtRocA;xEDIvCmC``YYpJ1Z*Cc^*QHN%(p!0LyMA5|_8}OIZee+#wBv!^bSg|86 zxEt+5zqg7z!jPXq8&H(X%K?a20I1J=&}#8#wvUJjS&E5aQ-jwfX&56UG_2{%Qgf5P zwKscqB@v>;ywMthTi8-?UGia)4qCS`d zLh@?K+^FacOGM9b95BXnL2wwxmm)#vIAtAiUWzi)@@9k$L3MyTW6fegZwJgRypC5O z*43zKV$5fj>a7^}9Vj1WE5v|k^)1v$z%OP4CYiZrnkfXHkO3t30+49d;@BA#)|BGCwln zi!;F1>1%*_tGV%0BAzG0ey_)}aUMLT!JXaILmLRKwKAynPcM{@El1l=B(oAylujTd?n*6nk zK#feDfjiST^Ck7fkz2u{K4UOH&G+$3t*ZvpuPQ%!3S!l0g3eQdI-Cqvf6G7-m~I01>smYmOnftn&bt8-L;9@-eZ)4`QKj5Be72E`2;X6= zJ*r^J!dC79K3IZAGOT4ZuPKJ5E!C(o+7hQ+Y+W6_`r#EK_~31r+g21GnQdtfD> zp&Jq6w5={7Ygw$P0mQ>9d(zW$;f$G^D7~ta&u1N)+{Awwx(^fz&j7KP5ua0iKEkhO zstb}1SUNF3d{WZhytAvg1c8H1S;|>~DmFO%4Fb06(m8|>S&vTwoRTEBM3rRrBfT+Y z5}S*mZmJ(W{1Z$?Xl+tonRLNig=4-bGnx!q#$!1u+MC{{#{IZ#{M1bS{CB0}d33yK zk%Qi>ZRJp8kY@m#P;Ky*i;LU?t3$KnAD6D#J#$WcMQHpy$1F@_kP?hLj0(r#H+Ggb zm)gnsgQ21QGEGZytNX_0T|<$jcmDty1+1^;j(1l<&6CrP{b`;qi)xJthdQ@DL8R~8 z^UR3D$j4fwoJMERb7TzRQC6!-Ad{N{iro9 z?qXz8`c#3`^rx4BU0*<#`Z-ZMX;*EPkF{aih@CMhI#)R#-A}(0z5dLLhWyX^x;c>m z>KhaqQrt>7eb)+^m1MLyj~x&E=Cds$;t|_iNdh}(W;phwCb*JmNs>dB@8-AoZy16u z5Re8O(Qu`*I<5@_!N#rrMFfo8;j^!L%H|iG%NPn-_6^q=rvymy2YG5T;02*m`oy!;;Y z+1;a<*LE1(8a3><7g0!Xp%c@n8qSx9UQZx0GPa$!>S_KLa2h#m=1gij^`3yxW=hel zvG0<-0P)_Kg^(%2dInB=(^FeR6!~{n&rLLF?JX8AaD7fXEeCLki#XLL6B|=^mLcIQ zAWy{gB!KQKPSVQWJ6P_cF^3~`IPqEU!}x{N(q2UfV5ItW98f74yt}tsd8bWkx^FUtpa3=S&XRIlR;g&@7z#%a$liq zi?HBe44>aMd0mD}L`F~4eE6h`O45ELdNI*Qapr;eAchklwRV#`9e!vZ+2={rcOwM0 z)MvGLAz2N`4JWA-c=xNps#lH`Y`H$RJr~}ATWd)om2M=DRAXX2sFtxwCB)Ia#>O^r zr?DLcKdpJfGIgNXk#CxGMNT?^u<4B(o2+x<%=e0xGBWGi7~4MelGkl3iEfXoS0FBX zR?iNO{b%Ob#;&7$F75kJdP`4*E&)r6#*v;s%A4lBCl2^8iE$y!{5x34$Yv)e_wTh| z;QP)jE|GYJjIORf`<`$s=+<^OlNMNbWz$Kd02(p|HywuC{{YS?F!*2a$Aeu3GRJj1 z+DRuzrHJe?iu4>m_zCdW9gs)N{>*t7M0yXT$&6&`{{Va3cl~#*{tNh@j1-MdF_f zd@;n|T$eXICTSZM*O3y|u5qg=_wernz^fR@BDLHx#$k{%Vqt)k?; z)RyGr$ruE$v8^k&_@jX_hU(@!gAm$i$mPf}4fgYX&plEpS5T4zA?mQw_Y6ga=oIRN|Ft;*JJ(^5Zp>GA29j5DNx`Mk<`}b1LL>c z+3vm>;-VR*QnOi0f*gAS5B!Spvb>X9q;kuaOtJ&N>6#LUjiQkxWM=geht)v+{I@W> z+uW;0?E0mMje~nt%uqd#GAVg%qj5@JUQ2WSvmw{M0X=_O8mp9JoB}-1H4$ZTA(#!e zP7<9mFBYp+f4|p=vkIePfvPf33iQv>DGYK z5J*M@YM_Obj4w4cjrTO_T{~NXago&aq|KrSV=HbzJ5hzLoVNN!(Nmt37Fc6>1f}zx z_US{qTS%8&plS!L6x&4%Nf4Oltt)nAR#ixZiYPB{Y_XkDv0zk_iZ9FCuu&Kq`>XfO z2HD!?V5ATNk+oXx*rb_?ake(j^bJFzH6e%u9-XO}3(DXwFujL*%Yj}}*(Cn}!k)3Z zk>ya4V2#du&>NKO86}7vN2h#J?~7vP9FRJXYJILn)zN8j5MG$*X-kV}u33smyQw=h z0U;>%?2<$>*xdgBN*m703ASY}I*O~>#DQ5$1B_%Hs?nSuNYrRD?q!xakl^GDW7@53 z@icic0_D1QqTkB$qa#D1heiUCMiB!cBw%g#pwSKpUQ?>LKh}uxtj>+iaUciOKf1E{ z%G++It#aN?Yd9)(9371YlSm<$4p2BBn4RgUuNq7uqejDRyjCOeJFAz~B8A6FHJ$9q zBfR7istCt}L9EhADQMkt7xQ!`x|4gjpO!?VAE@KKbpjXC#*zzrh=(#WjY?=L7FmwP zurQl#nyvmMyLm7|$a?0O;W6c(RLT)XcOB?2a|A(Rl^njMY@B{zI#yd2k+ibmtI+-t zf=JK-p@(jO^);0xv}NSw%c(l*psXA^;(JN&VNb)0(;@B#dj9}gC(A=_R$GQF8Fo~k07G#xWV-}q(#4h`1?@+6FqflJ!l5tL~X`xe?hBVv^Z%wH~!=;T^ z)E61Yy#{iyN!{@qq_~}BeN4;(kKTkV5+Oj@Y2wx`|61l`MMs%>$xYIUpcbXBmA%BD&2M;iP>42;&(8U`-&8FU(v3QJft|KfP(hR&E?qm8hL1f8E}IH#k|IIpGe4iBc$O z3qh&v&!PURes+KrctLEaQV(NGU0YnNB2*zk{GiZQ)>mTCM#>TY0GN!>XNEy1FJ>dA z4Kr(ElEyAhm6NFy6M6EInG6cVbR6_KU&d~hQ{#%mx@}|h#Ti7 zmb<)`8)TXuT7rR_)6}q~EhI_IQKin7lx4QYTNDS9H@deF&bV{+usv%F@bJ(QSm)AB zX^V#}VVE*WojQ+ARgo$frDfHqgVutq3S|$~=9;!+8B&jc-E&K0R!QrMBrlX8j+7dy zEVh$;#3T^I3^CT1y1JI?NhWDEGo6>Fnlx%AvkVDVQ%SN0k?=Ggy1dXd@GZE~?itVW z(fX)})Wb{aw7ZuEpM$taL6 zY3&lqk}9S#z6}E8sC>GD3I3F5p-CLY2=!wl9fdC_Z9$lA(-o)i`$26qx3?owgen$* zwj4(1hb*=?P=|;k1?+N0+ty+`Na}J>mikG^AEjtrB$paiKtncNe#50CV|iuN5CR4| z_X2>DT+a;B#|6Rl4^>1iup>(|wx{}c%>kcIjR*^|-`=jvCX*bI>LBgyK@ojK06@-i ze>EN@5-Hd!w^|j&!Yq0t8DKX)D3(${@CaCv-E%L?iC?|X4GTOkJ#mn8TDG{jt1;yBV$Zg8>u_h zngZ7GDZy0wPAirbZYT2>As8^<)70%is|ecMN*@f3jmAO$0F5Vnc`c?D*CT*NPfu#m zXH#ldLacnlzB}h?&A*LSeQ=#fLgj(z-RK0fxJfz5q@wN)_&(LvOCsC+*^?mkPn8|n-!7BXWx2|WjWs)IpMDrErNf_F5w z(ftet_&MC??@Z1lj@)zN$vp@(4BOtLMs<2Z?g%@RR*|0JTbMHBV?y>{u0Gt|kgsp@ZXq-YuP*iHcCm9=2C640e z;yKN>mdB{682iyJENn%=`MEWj6tVWj1N=f{y;WFTU9hg(xVtv)?k>UI8+QrrE(ws} z8mzGZK^k{=g1fuBYk&|Wcy_O|&dtB~eLu6OjhdsXzWUzVa6=8EiNp-%g@rnT!d`K@ zoVJ2N0yapBi!Ij(h12xSo`s6gkWIvp-u3f?QR?Zr=msptC+vfIk5odao{5|-CIGv}KcoA@qAc?%{(i+6j z!~DBv-{)@E*rO7Yz|Z*!_yi&a{H43Hf9(nGW@>HAoj(0NhK)m?GgGCM5El->w+sAI zCh^5Eh?pHA+(aYnCV@1_jZKA4{y>tg9SKQuxDzwyYtOTyc*l8+9c%L=RIwlb z=t>0Y$3|Yos7z*YP! zb}BygYW{)pC6F?zZPAagK^TmKG|yYd_=@ofXW(*^FL_fqMVLfsy^dWdPM__wg49B{ z{KyZ#;}FLU&$27L&%aX0UL zLyb`U=z>^RufjJM+WcFCPUC2^89@R5VKD_LqUI~>%M{`=2O~@6FD+j>tQPg9%;B_e zG~8kXz_ z9aI!yu~Q)SnBcBF53o@#cPU>oo&#m0niGGaDpYX;MjgY*EU<>pOU)TwfhW|$*xRj4qs1Zpd7N+j{2 z&3z*Nk6?KyR_?KxU@^_@cH8LsL9!q~Xu3Z*)9H(q6die_V;)zOs{w6|f+J3rLkV-r z0^MSNQY541h{v^5K&Am4Ex8>ryi2UfWC6Y0^8?-6=}o!iTy0=dGc5kXnd%9U{FI_4 zdQ(Pt$ZCfu)0i}-Q(FF`@Q{9lZz?~5=LOKh7ly2_!*q_cY44%PRH;9U!AoNQDF z4&dS)MfN!ysV><^GBQ0pc(P6gcd1J`!hQyZ4}$(_0dsK8%UVO#PJ`%x;D{ywz#apLkgPbtm~l<9})I7{x}a-(0ra5 zC0R7twn*bwJ7Hycf@H-(7R6-=^rr$9@?Rdc!jsVg&R;dU;6i62jNJHt=REJD$5Afo z45p)qe6zR7u*eyGa}Z*P1@fzv(IkZXlv4a5*Hm#l4|fUMq*x@u9_Xh*`h67Ky(%<6 z)RJ%VTcwSiLXm_uRh~tUI#TzSV5Nz=32oe03gTi3d~lLbMEMc7W`z0k$n`VG0*z5| zGLe;|b^zXwf*CE>Cp)%{4fXo)dbvyNd2O>7DutOH%as$E_aLJx5M590mw=Z2@u)sZ znN{tXyrOpG545FRZG!QZx?NZ0rN;VvoR=PX}r~Zg|#G zrwhXS@Rqo$X^M=LhJxk|BE`7qPBHVc*P^ z#5My<)HbDlQnonNI?EN@I&&<4$~fc}FMi-dH9+%i>j1CHL{Nb^*sPa{a*MGl^|LzX zxbzdQ7~&7_VJ?po`=7MhoYK(iLy6wbQ4Ji^-UP$EJRi1@`%L#sVD`6`U_h@CPd-2zMr*0}&?W0A1W2CQ3c?p_* z55KAgR?W} zd?K#n_zHJl)vGN-6M1&SKaD-!*1KlDaa6WX;p9RJ9d45&Fc33`3=nzsoj@ z(}>a5<8W%mY9ebB_|g33A{e$~AHDH11Q8lTq%hp4@gs+Go2&bUEPlqm|dZ=>xMAzJjn=QfizF)?1Rp^^$ zl6(NKO~#e!S%f?4LxR}0xctXR@>pgHqj;rv$XbO|Hg+&-(FY3II>3xlnuW=@m~qxO zFx?<39{cuC%0 z#__1%j1BJ6wV#vnq2o&%qa9p2`a*%4t2 z8BRyo?*5h`7o1U?HR_g$vmTLRl@@{gZe?g!v?P?Y!u(lZsXJ|vYscfG=9{7&7*bgk zXA(N5_rvA|`(IHa!9LV~(YYMsf3tM%wYkhERTyJ+-=8!${8T8~eOd7B!M+hEFa7R0 z=!CA@Vc`ogdkNn$2qPHBRKD->1sBSKfaSh3Jwo9w_!AF zj`lnF)FI%%o$_%)Wm%@-p6ARY!-cCZhVH9<&Z>nj3KUB|O_8i?mR3cjrQ#a?W`@;t zNyeFni}Aq%eUK$09#vJ%;R;%_rbQfRQTi7JJ=<^^;ewoNgBp&SORyT@Lc9k4p~+gg z9$9^%=-VktTWeG5C~V|6#e0gZ=&JTlx(x(_oy|!W5vNY-T%jnE6AG+I)|B7Lne-k% zXP88A(cosBWxHgdM>e)MgJs65Vfq75DXYz7!9K_{CCw*X`xtCh(^(muN9)NxT4>%& z?R;os?kgrGw5Zr4jMwrpg>V%lgv}uiNPujE#jusB(nI3_8P2__PBw&oe zeA+j;5Z-~{D}IU}6!0`tN{>)a(tnu3;k3%6b^82={%$wtb(X{Bv1+!fMfHsMbPKBM z?0r;D|6Kdz=$!)-!iGWdmGjTcpT8V5jFp^=tO4PHf;WtK4PRONzf$af7tl=9o%DdM zzLq++t=qG|=~QEAFoyR=;$vvuTWAlgkhl_dQjle6`s7fjQ>HN70j zE}c44RJJ1B1;gR1zKc=*a52(&XX{n;UyX6a%+n(pYm6Z`+u_q1C&MBje*7rfACZ+? zGkC$U@ee~Zga#XWkF=sI_*D?m1_PQW> zE{$diq4X_l_(&b{=UHV_xfwrPf+hKcuh!1e_AWB_`8YN#A&_%8%^`DTHcX%EB&jO| z_#zGZ-outC(yUe%P0?wc@;{=vcO7Z0B|xA7IQ@#%$jQsaxdb>F5qxpd15L+*Bkj#kx%}^T@oY5bfk4X8Ue-Mt0080C(ScyxHPIFh7gHMH@?ETiqGA6EGLk@xI# zvd>5cC?cEVz$X14v(z$hQ6H`i0Jk)mR&C~%@ymp^DL zXONBPCY~`g7j)0LQn(ly*nBBtD|1Qhx7ui?hPuWua>*6By|GHlu7H5)CLx=?MOSv2 z*oZO-!y%e zO{oonJ8eE%75T?pF=te>=c6|$l;vsV7(2Y^B4L5&YH0c+gjS&~N7#u8rfG8+%)zHZ zv87+^B%6wSNvIKLVaE~lX@iE@!7|SB0&nHVF?P){xQK$x$=2tn55#AZozHr{vLc$2 zfP@pVie$wg!#Dy|>03ROJN^CMEgIUzGpc6q<&iNRdD1D4M!h#0r*j87zbD=}<6 z458dmD$dco^6R3ZD~B+5qe9xe-@-qDaKXl!iHUr}WK|NzJx0tMlaLxtmyG+6_AR9w z{)L(3LKSyQG=F_WkMdZuimjb=v5o+C?g^ib;#FZTqYoAdEQHL;A5qAp<@?GdlqSXA zcc*M{pr0(F*>idaHD}oBV`*Bh&LOGuej2h{L;k@%TwNi%kW!B%{KhJiVD>|XCvo8m#TVznXnvi2h0dXwA9P*+&U8M3;B z(R7-bUx4LnQ=mWqi0)xtgnctB=1a)C)qaJPEX%jX>pa+I$dP>HOuCKY&y(`&*yYE- z{;0U=^yRCE2bMXu-*kN_Z4q9@eZT>ng|1KS=D67hmG{};t@g2n;piD_ziACl^m^GH zZZF_hts|r?r#QmAy(>I{`mz`vqVLUExOMOwMef}o5yYPA0T@X>s#AWe)9JINLc8W;_gGnJJ?MnX#Sl$JFo|t z#aH4Pa|S+WOFKSbE#c`!U4V8iG3)EB+hd2R(k-skcaPu?oXVAL)|3o<=!kuPK4@dy zxDk1QZTjjy{5Ix%WdYyh5u-*OwTX`){R2p^K_hxcDaqeP^4o`OXN5*ezBdR~4(*T6 z6d5K#O>mY0Foq3rl|RWl^^43CgeFv5_BX<1H552wIvdEFta^?ejAL( zHDUGr8Z&*tBQ(l(+Me{AS@)=?m<(Ka=+x2k{P5xa3ze(*J&^`oeEE*1X1CMa#7z`_ zECGsL-v+IxkXY$o&vIsQ&i$cpXlAJ6L9Zw_(50r%e{Cyzaw)W$Xe;jUVlWri#%Me2 zs4r%{LlL0*rJ!tR04`|uMLph42kmA1D z!3I`tCJn|x#DU>Q-Q%}dQ|!)MTomX(&a+GWzIZ?yFbBMfy@d?W?EeG6|Hca<$VT%u zjx@%bna%%;mRKn!P^+_Kem|&SeR3B8wfo8;$DcYExXKfXI(U3^w;^lNGiy6{D0^g^ z$8e%R>iRnOZ8tIW02Oa{8WB=cGJ+9q7xW@@quSGFj_`~2Q6@APsZt*^8ldn#qw#cU zc55Xur5Ci3`}mdKE$XXHlM-qq-19OL;iE#dfg|kP>K7TnUG4VH9qsPJ#(T_f=`W9g z!lx2jxBmd*&R?+}mG#JGB0e#noeSW2-^{-CocP*3Rm{A2?O>e_q{4m=ZY-<+fWwMi?m}l+8+xdjhlZ)9?*gn`q*C=W zACW4L|@~bu-8KNXmg16Li$|*(+VaEJr{~*H#LIJ#c_eaH2=h3 z#uBeAPs3fmMgrd|$^Lgq)KZ-9!o*=N*>FJ0`A>=VU*3>1ZTcFaA|L6se}FxZ#!$B5 z4`zoxkAHw)D$q6NQ5(R$BjGSS7{Gn2u|C@R1yd5x_=WGelDL!Wxjf>rCpHG@xfWrg z=iz@|Jqec^J8U{H7a?JE-d3b0s1D)33$j*Bm?U#aNskm}y`{>s!O17fvZw&>q0ltY za8OSnp6yWxZU(VY;vb+QRGeeKPfXyI%-K_-zUnXZ)c&uR(B>)DIO(@?hxbFx9+>(E zI*+=-yKXV(Q0sPD>`WM3T;3a+9dy~r?(GnH$p6$8$foR3$FRk^qWSL`6}Nr)mEIb} zF8oTPYTXKAGEx7ep22Pol8N;_Il$zaAKjk`+Z7PpT@HuZqLyjBnhzoJ8M(WtEJB&` zYxn<8#3|oP*PL*j@}5k#v-H<9J*t{(ibpG?5(OK(zxoVDzp8x2ON3Smje_>(7#}H%!Y(3p${YP;SJEcTDcjh zIdov7k+!=wtp5Ln>i@K82}ue5_209$uev}?$zCRsa%(Sc<|%-IUXUsi}9ivUn$3c(Cd6Je$Q~V zz6JYQ*%0{;a255u5FhYSybL1*CJ*(3`s_GFxSAYJwf9+G29+$`re3tod)~J?vN>WI z$IdG(URrRI(o)UVr1-QQ$5fiu>{)#kqP{PH$82ju!h;oG@gNJW;r%={#QRI@k1@D8 z(XX5)6#;nhE1?NFaP^`rJA&561S0p5ET(n)`tYZkv=nG&K5VBfK2~m(PD$IYs@#bq z3X&d&4IJVr(N207sodc$hncOpXp-0)n(BLHW#q~RWdsA@U;$OXzcC-Pq)GQ;W3o!} z*M(T)1``PbX4PX{Yg8HzH0fsBqR^3GtOhu%xFAx)s3tIw20OrAwJY!7ulOq1RUE1j zF7SFt0RGS1IfMxT$#Z|sE}<5EG(1I`W8ybMd?DBBT01AuU}$FVhGQqYk9{@m__3e} zS{al!3_^K~Tb8TQ^f^J*?iF)rZZQ6-kj98|wo)ol!{pw|n{iAxFJ+QjD!x^27xmkW z>Bvn|xbG|rU~F7ed?ej>7c3|2;)33iorL1;oak>vY%8EblTt=b-(jt}y;NPzZfh8= zWQ^8*;Ce?Wune9&1r7`9kZ7alx}BhQZe!2$F4V?wJz*GO4s}>TZ3-gQJhsg6UzE?ZjPauCM$fbo#o&!!Aq$K z{v@StS4IvW>k(rjKTsyb$-SbVo#%1d@lS8d>35uug;p&JYGrz@0)M1@9N6pp#M@S# zr65M{a}0@Dbt!S!I(BefVs0NDJlry3l4SA4i#P46sok$GNr$-MxwLW+?2S$LQG_AP zbgn-!I%5&p+ceM)M=(;VCU24N+WR9d5smxf?r+RpH1@EbMFQdaf3+pvs63`|k{)>V zHY)YKlgf65vRx4zYE_lilj5`n#y+Mc;x~k9^X)pYOvKs49{?7chnfgK#cl@94FlzU zu)L)HW=PDphQHzr1l9iog#82bn!)s0r11qVZB1;cYS=RK@Fe8*TpB{vfF*9vvaj`f zpg^C2%c>_^9CQGt=;L!WiOP< zByT?KDqHYP@E;)dp%o4qiSWfKcPUBT<>LyCHS93TaloP4-zf&sttgO`jUNktmp%&z ze*Ue1^Ue~4_)h;1P}pfi3v7Qhj!~>nXY*j>)Z|zdrX|Hm__1=Ll1u5wS7oHXv?08z zB`DCao{AUR5SF&#OX&|Q<-vp|as{(V-t=M_<-;ZyH=Bc(Qm$VQ*)r{nh+>kL1nkah zSB^@bQ!(|5or=Ln9*cf-x10#cDOb9}-AM5w3$d|KnsGFmKZ)@ab`U5M4+CeBZ+bQv z*o;brwOUC6-E?!doyRIYq0wE5&><(z5d?EKBO_~UokC*poB)_oeOR&JVIh+j6xt5f z+GwBw(t(C!h(rDX&@$j2`06>C^}H6EeS;UQ$TeKbfKR%H;WmFX{CP+xMo3&`m&H1u4JWFU_z!O71<9t40urCmjxEmELS%!<(5Z7ifGBXSmw9s z)?}qf<`e70hW(F23WJJ)x_$%`9Q729jaT+Au+f8%L-09)K{QUN{$&>&PPh&OZ+8{w zX^mbGGj2-qC%kenV8-jb{d^XO(Rk8o4WwQ5=WCc=7kr zEGp7$t+`S%y?3$@&b6?;mY$}40Pjr9$uC%uD%7w~&tjZwn+ciypbMASJpHDHH~=q1 z*<=k(O*3bs5XaPax^Z-lOD?V1dF?ujEYb7z)GIB?y8dE0BwAHFjI@T3eZ%er)Bh#$0 zbMZ&zC?ZyClrk1q>Vp&a+ON7SOKTR9&#$7`FGuS=n@r=25X0Zk#4g^hO}EI>*GqQy z{R`P2GL%&YBHz8>EJI5gB2vhno-7Z)RnEUz!a7d-Lkodw)F$X~Ytiqicc2r)z9EL*3_mC0H?q zK1MQH;eAjWrV}cdD@nVR;kal|l8h2rS;#ezp(xrjbBL4YgC6h3Pi*yH5_zsom^Yob&eSr+X0QWyLm#Ub8$_$Z)8u!4i9k)0 zNScXohYQqrqyVVm=NKQ?^FKeiMUbyPT}$QLxhH%!JW2;DukPLaFy;`g^dl4I8R&W5 z>iYaJJ5M|prkI1_=B`BlJ>uq4Rgp{>spq{JK_=)2dDCNmH)AHny+jAvsOs;~KRjg4 z{rN)0om4U+hh@*RR(v@_cXj(X6#B$F)tE{P3p4=IF4a1Wx2=fLV`*W}P_K{>CFAW0 z`wP}?Z6TT;S^cb|hsoQL#fSwZ4PZsG6_GjUHyN>c1bfeqJBgtAvzP;Arng?amAv|f zn^H2=zN3*>JK1oQ;D_Fchh&?WVIKTLe&sIDL6=HtA-M^~lGYkfvqu>Pm&>C^npOQ> zHvO1Dfk8nk9b>)$lZQ+2ishVNsf+^vSqz0wKRALb`vT>S!YPY|*DLnR#EEksM-CaO z8KI!GR|iUuT6VFMrEs*Z-V<+7`lB0q{&MHr&-bMgzq3uuNus6J7RvYhdCM*3YCJeJ zj4aLq|7~sNInuG z^+0*&|0v=X{gpfH8>x5lNEpwi`V_Sm9d)~i^=XBxH(pEJv-8Ce8r-EYQ8w-Wtv!0DuEs|lxMsNNBBzDYf zro5L0Z`#T;ju$FEyo&=7J*$&=c*NhKpSzhW)<{WFw?%)Agr29lacpD<6tnINu1jXvG6oB{c@~IF0f3#|+sQGb! zv+@tXe#UH>yRA(=Xd{B{ANMt{R^v6qjNh@-E^OO9($%0X4&sXtDyVA-i94v_;e|R5 zz8b&EKiX8)7UlkpBqhBtRNj`x#;ON6du506Aor2#UwRIZ6pBWSY`nV%nV;Pz9+>x? zjl7e66|+Hf6H;|njxZl?oi0UJf_EUva1G)Dq-4F|u6iS;ywYz=p9qv+ z+RA1Ci2GB0)d|wP|GX1`qc7XlNhZ3V4uua+FF*MM>WpHO1cy|M0Hm@%&YSsc`@3gf zQ@wa@>Uc}C)$rQt5=X&oe)Q%nrT~3kU^ZBFbKLA2L~2D0*`Rht*!qbIwa)^iI#_UN zbRnJ$LlqHcQRJv19;~(R<4Y%`A2wUfmd?8Qpce8(9*a_OH>_2860u9i>&$5QBMY;O zLP0VL)}L7S#LRYnQovSO2OFvv1nPzJNCpoARzgA@e#;-&MnD)llGvj! z(=0Zjt>P(kj%au*{(=~X=#IW5f-Gv71}5nBn;%2pF7jmMWRn2$!yyxf17T$Gp5w zi=IfzzRRqBo!{#vPR>{{_;)%iGAyXeV! z>@#hMo=+05oi;_0?-Z;r>McQ(x(Q`vGu}5DJ}vvth+Zini<)hLyv@}^OO2rwl4HMk zrQ?e`JKVA;^6O?iq-YEa3|w8Bit2MO76X=x0*xFrb9QY|`5VH{gdS>AA{-))vTRr` zvN?4rm~5Hh8HD)@2<9s^7VUv?d<|7&9C##43NogXIFo;jYica+G1smYjl~A>Xkhu6 zSfO|bhl*C{epv>ta{7&lW~(I6v?DnBC-@z|2Pg7a#hc4|sFL+k-IUbB^*c^YX3k8J z-`|2h&m(f1C{=a})X(DfI5Er2R-IZt@>wRB_xbFH3l-2wdHFg?s8{L$9C1;y69{HJ zMb3~agd70hk+i+d1fMN6ZD_3RLlt|ROd8!>MVI}Y;7?M8ElbAJkz;HTD`JF9+@MsR zS*Pi{9@?NmKa{FDa%S-3Ow?(k#DO*St3|TlArGTex8Fl`z^Rr^!ZtQ6;q=SyJ~);M z{tRQnK}Uo{{qfyODi5OvFPY~(lIoW)zOjK@cvKbv#1&Ha<}eD9a_>KKC#B||i^@86 zCOJ690~uH%%m#I>yqqHVf<;da2B`ap z;yYYc&!sc?*j&^JL)P~;9cPZiHl{*=^Wy=rxXN@%Bfk5>_{nXja+U+Vxkoaq=ekL6 zvR7hdl`erpySB%QVel=7v-VqnMik9^-&L*evwcIP7)*8hf1`jMx-7ZBO~m~3ZIigC*mv{cYC@`cSR zBS1te2mW;KDhyQ|ifc-RfpusxJB#Wbmy%`jR4q!vXQ~^kYVLi?yLpr9-1?uZ_q_#f zyr0XoH#NMd>|COh5l0?;Qbd$H&%U4C=()0c&>$DY68D~Z>wQwotoSJDzFx-QP@FEW zkXm0R?;e3XT8>(;Iz6DZNPv+^dxwg7#0;M{bD>Q#tP|WOe5!_}NshU7Q8>VLvmLT} zAde_OME>DUF~dpOp~)gxDT_rJGh#F*sP z$}TG(YbIez_`{+_3uk>Xl!>3BJSJ0`_?YC6?Vy;i?&XC?OR8fjYgHUVTe==5|2dJG zP*z0oA}fE`_I%hLmd4u}b-to`M;7g){2c!(7a z&mXYSa)*3rDe(<|1O43msKg?c(HU3-5)E4p(Izw@e+v89?Bk??P6Kbbo&S_*vI4R- z_xMm3%bUhXhnXt+W9UhS+F5ZBou1W&%tJ(pc^Z>xg>bfUly?(btPH`*k51V`$Rsys zN;GJQK1n`FUKg-)eVqO+g8G}X{N%w%k8+3mMksXmz|0~E9b0CZv}<<24^{yA&!4`> zV0Y&~*{2u?rO0wfeTj!3plK>zQH@(&SOsyr!12*rx$tleV`+oZ{RscSBm7jSK3NI)6#?3F*-xz17B#9l$L~uC8CH|Hj zf`pfR>aSr3LjfG*9!_j%boFe1LJD*xe(Y>~Ipr>G`w=d?T%l*N1vpjc>Eiq{h$bK7 zf%=_|r|=xjNbmy(CeS|?b%K*q+byY-KaW4Z4ZNfx+IA729=wBXy}cn6sAvg<`^9Z2 zn0^_a#M6jZM;`C|s^&)D*)qqlKBenh}db?lZHCJOmcRFHacTs_Dm|thH~24SVh_} zD)C6DNdBbM-fMQG>ak#qRB$4tly=yg7^BF&P zJFKE~1lT)x7bUzT`P?vmD##B1=X zpgAHtDL*GPbP(1fSp?3^hmQQYor#AzZJ8)waq&bVVt&-3#15K zJ)qOvKU~6BU1(+3*aihF?GQA&c@f7L9)c9Qm9u>=o}bq1_5l7q7LSOzx-MX^vy1qS zxc8SJ<==;%8?7F`&!pBsDvUoLzI44sHFJA8Rr3v2@Gj;(y-A!nPvh34l%%hwv|37> zyaz1*d2Sju)b;ry>pd+qSSYzhpLM>i0eNmt8zwRe~rSN&<5!$siu+kEu3+@+KZ~x z+O-U^bElQg;*qgzDfnuSki?NJ0KrXHx_{Hxqv?9n%T5I;GofF-J9R zH0%$Hzc*4ioV&MRb>L^a^|W+`Q7UrU(k}ZAOlvoMyM@w^*D`J44$JCkms07&3~Fp0 z#GkJvJG^KBBJ2`k^>dXlA*X?QJ~MmIfwN{K47&Dx<6&=0^1e@z;OCTVe!R$dUP7@{ z@CyjVfo@q|5^_%o#0Vm~>AwPyDW4Gk1DsYHfaK@5?5F)Uwp-r0!9iz$PqR(&g~;LTKBZ72aRUO<+6Bf-(Z>#oPKy2xY7 z9gK(u{?4vyPU?fW4b;J6d}Xl-W=ox7wu~E4gqvQJf%j^xN)8?f(uIqhY-z3Jt;8v# zzA@@FqmPQ79!27i$++1M!pm)5&r?UjIiY!Qsx zowuuRY-{CCPf+zj3OsSUkPIk0Xb-ht6=ZMF%RTUoAyZGW<|#Fk_A=wuQrNaZoe62oLf&7Rqgg!Vn4L8S_S{L~Y*BW?#5CYwO)u*aSFf&+jxIeU0<|Q+Oyb__^@J@E|d`Fsn|e(_;ZoC<@xeo9R>O3dBRM?r>wMIHYCmC;4xB` zRF+JPBz3FmcGV9$oL`AoA(FMUf=)vmt}IOjGCR{a}5mo9G4_gZpZgSM=-zj2_U{Nh2%^Idca26XW(P%6=CdGQbEP_7p{} zZg6+tz`Z%7ll@}&y46}mO+Uzrt3(*dcrJ$TTRP90KrK>Jgof(BrQZ9QkK`YKY1t-= zb?6iM6PQ4>wwV*-k1_AYCJ)i~rQDNi1KuEe_m=bkI|pj42u4T=b^2{4`L9G^3Nc+y z8EK~y!?LawOX0a$^GS&r*=+kfiSzD3wS$uPsBzZdQLvp^7=K!T_0_P!Di1TG(X6dn zIqjfxmDJ`Z@^WB}ZboJSd_qRa)#MmMgD$uQUJDOzr>Yim_&oKP657J;+U%)Lx`IqF0qJey>Y1o$Oc=dVWG@4RG9b5EhLk zGvfDu*5Td^_Js%(s1p?bX#3^sBKt~jx#^c2_L!zB9eaxPgC!xC$Ho()qwJdwv{so zcNdXlTJjf5^4YN<+mT}yN$8((I?L69jZ&cyS_@QjbYD>zJs9IJvX&v?%%tr|v_ds_ zBfTACy|BK`sU6#!!^LfCT6r0tj{@`|r9TQ<* z#{|tbiX_(RB^&{uSoz$zu@4DR9^~XQw+YR($y0_zcK37@XJfS3h?)cu<6L4nyRjME z+;N8LAL_WGM@?WT#uhkhce=QirfA{N9GyhF0lbh>G!vXGr^OB6a5#10P(hJa1YO60 zjL?Kp>=(~ne=cR=DpKA;jY z4<-Cn|Gqyk-uh!3XP@e<{y3uib@u|=-^T-|TxR{zTj5D8kv6$|xY5|~5G}P+Flp)C z57QmzwuBlDT-N1Jkgcr8h~G zTBP9bAXvookmk={(6MVub|ku}(m`AMXlw1Pq2mYbzo6sD2bir{vo$xT0ACP4vmKNA}~42}o~#djHCW`z+pa)XsHa%Gq70G;(Agsm1Km`;3~1f9vxiDa^wZ zBfk2wvu%a~TwFH!OtZZ^2hFDV$T!o~j>xD;YJFyK-j7yt2_tCBT|m^~CL&OSN9;Vv zo4akHxCf)P-d4v{v{VaI{iLsI2&aR1*>XLDqM2ynM`cvjS!z>t*=6F&dTJhaDwiEP zuQnUMZmBN!7*#JtN_=87!;Nh6IT&C|;ib0jfhH7N{F^}l&SQ}Ti@Y_&ki&&>1q~y? z4#jHgBLfuX3T!*ss>|CkL@?5&nU#)%^PMyhh7;! z%6AytK(2|=dk-gB#ML#B1|lLo;S<)>gd$3-(QGI==8TzmT$LD`spgc4Z}t6=L4#yu z?`iIMEJbiK%esmS+KC_=ms3VX9%sz0vPy~=eo8f21wcEksphdCRo(S^aIzd-g?n#E z7bcaTHt;N7rkR&aOXN?p z&_t6_X}rl1*4ivXN~^LZ*OnwBd7#^B*3);uJF96?3=-Mi#Rq@yTbIc1quR+jIdmxr zS(2N(ceR!7+wA?1GbqM2L?UWF7uK`W(tk1OQwpM~W% z2-Z(zw8IDUoB1jfi8WjP0UVe0nEHl!ig1#U-x7^(xi?O!3V&g~{{#GSGPKFjlUQHAvs+(od_PjpposfYJ4=r(&DC%iQq0 zk-R5pewG+wMq3DitMUB$_zy7gtfzZIi2ajws4S;@#~G@oc2>#HY)<|nG)7E^F}>sL zd$~ciGSZW!fZ*Wt6ejoU;t4I;!vK@oEz&dO># zOaLv@g<~!=805FZU+!s0=fG2f5?S=IJ7vFtMrZ+dw|$Q;*v-Y6o6F_bW*;C zUv}|ZR7`L&0*#Ci6cM#p~Lc%avl#qm8TPP!edLO6c02-{L%eRQ0Ottw^=xWsA<*zcWg>O zJ}_h9pWOEEIe3SfoAca7B*c{WvreP$~J zh#%>8B0W+oE-6=K(E>#9){`6d7Yy6d-TUeL-2GFay~%|RTn95o^iZJ_7Yn%d8K@%|&(sKRtQJG~ z8EYGPkfYgGW*0tlPWqN)(S*?E?yqZ*_GAZC)4}J$4VJSy=#&@yiZll;6=7kzdNnE8 zdlQb+XOsoJGCd3F-TRVZ{0b?7?`DI$DVN9KSW>0p;EGgRm)lj_7&`lf5xSIkkkVQ2 zb7bHwQnZ|V-&{u07dfx(n1O2*H+7-{7a~^WFq0l7)%V$r4D3R3xdc|d^XsUX(bzT1c|~Hl zCCTEU3I>kYV}$3}R`dCMc8mpyv8g}5P5fg%z6UveJG)%M<D_ZLwy!!!n;XAmYvLs|)>cZ6mp z)t5GRBsS+m)#3vkJ~{t~%a<<3!Vz)~)n%)@&rS(h$KTWatbnU&7^ z#J@wfr6Kdum~GI8H(uz*0Gbd+n@q5`8|!a4+s zjFz(11K`D?bh()fcseyEAHq}m;m6VuToPL}y5b(A*^}|=9bBHRlm<2@& z4GR$TTAVZbr)lA5?RT_5epQ6!-0EuaC=wI6SH_Phhl=__C|hHb`2P=AXBE|E7jA1@ zifeHV#VPJu+})kv?oP2naR^RucXw-XcZcBaQnWz%ch0%n`@38>-$+KX#w&9^(~C;i z-y|LmUXbry=Fo0hfe(RN!;xUc8|~)3!$yQ zNfYBN!I9gKXl}HJ=?HlLY>{pmoH*;Y8YRHAiHZ&IkHz*-HG9;folF`m8M{cp92_Yu zKnB$~OJy=NPFK@aPCZR4u3}WBtSfxh4YhVzO4`u$UgKiIy<$q9q6S>L>*ox|oczlk z!N5{)D2v7OWEgQ92_Bp!j_G})Zad2|d{xTP?r$N}+5Fwez*2{(zl6#=;fAGSynh(! zDr7*O+SvWO)>nrH!h3?YI zmTeCX?dL=T6z8(Pazire9-n&`x~S|tZQV3Hne#cp%ts^(1;~SA0^U5<~~fPwrtN8!6XjB>tTNwaevAr*sr2M+Icq%pU6`S;$Q( zwTE+i1B$0_i3@pEDLwC~Cmpf~b7)VVu|AYw1D&r0d~#onzKey9?lCA|2jg4+DTpZy z22k5MKUu383eU;}`QC`5W!sVb(PYU?P+}i#D3zTua)`83iMlW1TD(ynvuuTTNgm?4zNZ!2{!9nW$zhO$Q%iphI0-hCYW9Dk6HG>lc9@O z2}9CplDF~13yKf`gpNd6j?)s6Gcepu(%QGmXy0q6m02;nRj}V2myuRyTWE-{%}Pk6 zL5?Vj8bp+Gw5-3nUi$FC3;y=B| z#<+Wb3xTNFMQ*uWr`p@=lY$f-%AMC(KTI8;39e|`JmVPDszMOe z*kLI5RBLN?s?y2&MjLi$G_t6k0e zggqMbUtfay(ow~|lE{*|9$h+UiAL2sgdX2_XHI{v@Q5v80S$knx?yq*uvzDgT&v?G z-XXyv2g?Z%&76?GLO1%alA%vurIy3RAye!{hx!7sG%?#!1 z_>4Zkl$PWf>LBKVg%2rq-*lteUshuxxrA@0xObUH3NU-Q4@5XtM?G4D&oT&NlvNQp zhtqYnmumd~(j)&mm^O2E6_Zz=-d|o9bwigjEi$?5NW~pNHP7oDbs-I&Rd{##NX;Lj zdr3S%`M}$2H>mMt`wo;m0(84n^IKp>48(;@jHqw_`VVGEl)>Q_c>rQT_D^F)v{4r~ z64uc#-%60@TuBvON*+frneULvcb|7K?_z(U4ke-1l-NeA^&7#5TvWi9Mxp@F!+2|W zxug%cy)~oTxlisAUS>#j5wG-rf3I#917|Li_ll`TBLX;J6PPCC)VNQ!%XQ+esyDSI zdC$Fv)m>s=qxy|2V$Q4JVz4o{vI*SlhHNzx{ZT-X1%fuU3LSAtwY~B(BV{V7B7-(6 z51;ivRX#e~Wy^BzkroJnm4hsKF(0myGdItPHQN|?s3W*9)N4HJl8ICJb>J(8Ic#dp zIoLqIe5zGFiFr`r4uCfjd4pguU}W1t@SJ}?kS6XCsdb(vg)Y~W8s_pVwk&qul^P5pYL-tTr%cDNUsY0KX4}+Z;IkVmq*U4Yu2Y_7l~jUq~8? z-Fa(&$Gvz=-R^pz{A<&+I--nS0a3rZv1{H#5i0ok%ziNl=$e z|8zIxX&!m_SZ?`OvD^2dswCMglDx6af#zJ4nz6elk!3)9O-xl~iCHrMhNFvlJZysR zUj$O?5RRObk2oO_Ou`43{MIOS1S>GxH!Bm{yZ=g+LWxk`m2-joQ0MAOZpP_a#QQZ( z&mM~4ZgrXONti(oEXZ-naq0P&-D8Hf%fJ4|-gW2?Fk;f4NI_xXZD%g)Ss zj@X{7%8lzoKa4cL*LFW!3J^_0jJiJtb+-m+PDVcxtAO$jrVSROx4*K8TgWP7OiN=?V#bpXKz8-QZrl6kp7PURp zcKdl{pTF27-+tp)*dKrj4p}L6EEeOREw_Kla5hF6s2(mfHMGo=F67nTOR>yeS)TSu z2g`FLlVrr#6zn(+uokkC((QYE?rmA*CDEu($Q2?H@Xd-r_YC)c@+s8};ErYHQLTuU z6ORfF)@JR4TL3IS!S3aM{+&(MKSm$@)z6Upl3{f(;l@t53Cgh%dZMW$8(#)2R@fo` zyAG?GNs%qhmO(-lIVIeHZ{aj!B3HoD5nV!GFF3K#5ExUf?fzS2Vh9;hx@%lAlwX{S z>HQ-UlZfv)Gl2&GSH2p;nVG&~xiH3K(}<5^G>BW`W}}Ih6sXPN4$pO2AwhA{YBx(B z))zCKj44l`f7ooDz>~_9w#1_bFl$VP=SnaNF%xrFrWJPG^+>ZOg&T4)cowGl{sleV zUxwY!1{&C5Hq7ldi%G_zGpgjt{Yg3q-^a=Ey6x$jyAZyPQE~0kg;Kg@Ea7)P^0M*B za3@%*4Hirr@l-QuSJW=AJjy=h&weJJde@@@bnAM)*8)5Nm5jDYLFeRxL6!EH|F70R zKR*Zm|DwNX_upx*|D63GV<)*S8mJsb*M@${z94Zr#QsMvH0{C*0RP)ySJxHkQJlyu zrmY65SSMETF@Ig+JgXQ{L4K^GMJ6t|YP?a`kP%u^^MXFe>Ik8la>mmdMPXByqkws8 z7ZU>!$zh}#qIya3jo~TXR5F}-JBb=ka@$$^ybs8m1h#ItG-A>_i8Wv?;iD%)yrx3K%n}A< z2cnGA{Ogfn%gUrEC4HXuyS;L)N751<4tI#$b`H>7md@BxE|X|pg3=#DQCmmKF<;xC znF9^MM+0az556bk)TdZC0-`n&oOL*i(AZ*rli}>P3HC>JzP_q;EJ{JtCo`kuNxpLN zzY6ehw;b8f{j*xw8Mf_fohP@P3NtZuKt#bzH{}c~LGj}jW~a?ocyYksLX8M9TK(!c ze{Zftcr|UNxC1Fe#cKI51Fz}NRn9r{rj7YO#&S?yJw}70G}F&STGfx)hXU@;85~{3 zMwxx2p>z4f^^(v8h}#QB+LlkWarc;S&OzF7pwau4eKTAt&rXpDe`k(C@Ii#6P2XW= zZNoB4GNYPo@aXbA~?7@XzH$~D& zc~CJ4aCV2b7xubNM^6d6jCO|M%u3i4%dPULKmNtho(L*oF={XUY86e1f}lt>CgNs5 ziQbSBad%Df$AgxTNeeoD+F^&9_x^WnkA>~j>7Q;&k;K%nu`fP!=v(HDJq!R5e@}_a zJ5KZ6Q7Hx1mFekaLVbnA)?>7zF)h<$i^L>5?y@mR7c|Y}Gh8tlUs%C0 zkH(;;Yv3usYS4h#LCZp5>hvw8wM9Qw5)cC02{_=XbQmbr(Un1XfY*jm710~gN>bT= ztRD3B+QIA4tF5*4(b}XrfiF`HP39BQ6ef+lqu~0UBgm}m%(Y~$=3^0v_2M-gFz@CyYfYx6Asb<%9}nK`}K>Fy$OVl8tzU5lYL z_Gl7qeqXuYL3=0cp~jeAlkE~<8gwmqnavj~&v`}j;C4R^?NwSUcT!Y{P=Tw@D^WWP z=^p24gaz`yfzR}m$a63rSV zzmVRsq@pde;SZbn)wjkDEc}-aw ztu>^UTg$DMcCMkE1G)F${eE6DDjkB6dsUz;sX*o?E({}=0&c`St5p9mU8x0B9I{k# z3hr04R^cOk8HFxxosY4SufkgiHFOtB3M+=V!Q-EpLn{S1bP z7%?scWo&#gW;S^fiQg$6qX;BeHxlynHM=&0q#vL0+fS>RK5#Zy**Vw8?X*|lyB~YU zcF12;%b0MMOV>b6UJS8X{4*+NJ3B)q9I4MR*UE&90t(#m2GNrNYA%6J=!$o`BuQM& z6m%XS$iHeGr|v#fHn0p|aiH@bv5`PGSSn0F^p7GcG(d$tVhJ{?x}T+?jPkLAOjUHQY!D}`@H#1r{mlYXamcWmc z^N-d>harB(hGf4L-va+(MFpE69UjddemVb@>u{%|=&ELn zoiqW<%WmwLKhb_0xPOqs0OeStqt_nX*?TMXr{lZdyC8DvKg*QB2;VhmiX>^O88(A? znEs4f=5x^?eQhKNSabR%pG92k<;3I;#kT*glQPd5q(#r~h1u;+U(g4{BFdizovvIz zyJuY!c!tM(BjDs`(M#yYEN`g)#;*l(J*~6(ZZrdtPFP=?<8v5fJ%~MCA*UicyBq-b z?oHg*E>*_y0?EUcS2heMEyDuFim|VY)esijIBGYfGT0DH%Wl5-=i{Th#=;J2`__a6 z4owNDLj^J)E@!%ifgUHmEsf)t{wbPnL&bjKSEWeUm_LHvbiAI4Q*&(RP>D^7!oMvK zm-fUnGja4!dI{VzxN-~9CCew2QD;E>@A=*h{0efKozgy1bKA+K#S<{yh2{b}{gMxx zs{zGO#{^@IkUZ;;3}gvQo1?3z3x)O}g~ogH;GuI_`xz4vKy#K=gs#bq*5mUqQgJj` zo^?;l3_pozSFQG>(@woL?J&H8VeH@OYaUXJZEA}<&KYg}h0Z=o3IRC zD);Fk$|Nofi?DVhe}dKzV!1fZ-x1TFkkkFr@UR)Rm&$ud0zlK?aV}z+ek#Yo_X=dk zdXGsQ=g$kfe~aWAx&IK~;uT>b6X?w!WT&_fu5gx$Wj~bij3qQG2!yo3&?CfPIPi6~ zVEH*J2<~C}6L41<7#U47-8Csb8&XD3Mnw}rNFS|%kpKt)+Cv!+F~T~+ca{_ zg6_h&R{tf2fB9Z|Vp<%ok-gCq#Y5UCQnVG0glo;AWq1ra3(k_3W{QE4lxL4`e?K?j zH=;N-G@=W2shgj~6^ls<390yX)m4#1H%4PH4>^AeO65j;Dq(QqoS76V_-D@~m_FAV zB$3DVFvt3b&MXmn#1D&`$K|zt5?{}#_m4yWr$vTTq%4CK)9{c3-|D`5q>RX1 zC5|XRvtNQjN`lq@= zfWIQ&kw6LSmG+MqJ&gP6X(ilA6tDNJ@nG)H>0hFa%v&*<9De9llVr}x!ER?IDGsrr zS48-bdUFE36uHo^9V&!>$kmn-OB`eVVu)wiWn0&N2pcFN zh}Wwzo7 zH~#8umxI!pCv)i%CSMaC56rA1Ez>ZWm)FN^;w1vjPW)l-j`mlcbE>mzSfVS@ZBX+` zowWDb_&oaVMg=%Jku-Bhgo6#14#;GGmgW#H4p>tETwF=od)HXlxJa=f>&$i}+z89a z5k+HMw%{Z>G(|WJ-A;kVuy#dU<1ITP!4MIhnSk$%TLrZF;Z=m6YbXF^yz;dCOCrJk z7@^gEaFOzh4CRfp;D?eu0La@I{5A8CbHq1GfsrtyFY!~P1|F(_cAp4 zeH(|)CDD;Y5aD=p*f(w~>c47QaYk#^f75%Y=alzZMhh#_=(G8A@?L%RtTR7J=4OmT z*E8zc8*d2-q#p$O;epQL*W1q}xG5+OR)svmL#f_{4dM zjH2!8)aa1xMq9ypZ))20UBxgnAmopW$0?A7BM2tNCkU&I~OiNp-?c2>)TU(O(4&0k13YnMJB3?wdrtbc<9IZ$)Ch?ACZvR zQw-Pu72i=B$^!JCBqU|-V9U2u{1F5dVpgF7ArDkL! zu_jJxu$aCF=~{MX?Be&!6RjEdDLJjkh2oW=9B=&IwrPO`4~LpBgubktT(tuJn(K8&gF|J`^Q%qe9Pk{srYb; zteqa72!(wpHV+*+FcTZnzAR-vXBnb+J0ZWd|5LR&weq8Zi9Mn#pWL4WMiVXh@SGHP z5XNQkvC^R0jq(&CKs1n>@%uS=;3q@;Qk_gwucEVV(7PYxYN2`I(EXuB`8TlL{KNFG zi!s+EvyDDXPCk7d1AcczNp6Xnii%8NZttCm4!YjXtC9)4JR)A)_?cAmChgT z;nh28U1Wlv6c~(zkmf-W@6SxLUHF|=G4`>{{Rp#wGm2leE@d}$sTMYvuA_hR);W}U zt4fXLc#ba8>&^SCtfV%6wq4d$@*MiBDh2)bVzvWIq>}}Vw?yFKyliqI^sAgxA5|U zGu>aIDMxGA!5AuBS4RC_GQ(pQqDkHbYcG?~8G|DKAMz4sdauN{m6{7XZ%ZFy^(1M& zyUkEh_5(p|n>U3Eq2T+K8s9IAwIM_!Z(9G$Xp3P-G>Ti8%3*=&vC|5-|ERccIj;V@ zm+>CuoX4r;P@ByP>hvJW>wqiMvo$E;Q=YcKCJ$NewKt;KFsR6~w zu9R(;og}^d?c4byoRPq%>Trq1!SK!gdpnesCXBl_4w zpsie7Ubqwc-p$+CCiR9X?#2Sh-ol2y1h*!rf{Pu|3qIM7+0d&SEbo*v%;t{7TY4#< zE(O+Dkm^J){>4}w`(}F#H$p!d$7R!=*b9qTJ5U%%umtDG0o^xb zjd0>@dc{06*DIrQO~;G3#4exWQ`-DMV!iT>HopJis0T>HM3Kz{RiWRFBJQW z(Tj7EI}32hG53ioit$(pJ#Kw8&bxXjeoBI}T3z7&w}ki(=JfI|S9qx|J^m>EK6@^2 zx<_g|Mooxl`5pMAx4pfIm&1V51c^EaCrmgv$f`1m9G8s2h`jr6fvp=O+@AxPIT8BK z0_9;#3xuDJI zkL8ZOnxcBI*w5U=>izsyR)|ghnz4hlGV@bT%8&}Uw&5Pa*fOl)<>f?sGFx1wcIYMy z*2&L{pxAK2>w<*VG3ZM#Li$R&gB5qveU>hCGubh1lR=0B@G!9FBr6mVT`sguYEg9s zk~b!VBrJV062>v=9b!>X5?8t@iTR8N)}L~5BO;N3jXPVC&C%r2AG;H-#_T&MqELoY z-WJPDyln{^2~p(EP$X>}WZT7v7eL1|LeRKK@^%steIUMK)zJno9R8ry2?WnIVMi~} zC++nrUx_oYxDSwK4J4~17Xy_goPZt-ZHjqXA*YV<(Wg+>2GO^9nR1y|(#;jAh}5QW zY7B})D1s5WR18x|DVpW3_?tUrCaUOuBOAYG`YMf^u!e`qs~rDZ4BU<)&FgguRvdat zoHlW%T@@!nLV!;}>ke8#7Uhx6+@VbwBi-)jW8WMNrzfN1F80j~;chVmV z>daB0i%d6(7ZPc##Y)v=KLx!D+EEh$;jOm!-*CJ5XkA(*6(dM=E4RZ1I%sfmVkn=! z5HVA9uM`A6RHBboyKICr+yZ9cm-rCmAaak^t(J6A#M_;Ekq>j0zY#VG9JNhIqw29Zr*gVb(CWQ$_^w8xi zgt1gcE6)r9x135_itb|lD4=KvLhMse6mpWSo z4*+1cZC!^VQk!>qm!^lCm~fUe&I*>>_u*Dp$Ebq@NWqAs`0eT6?l?Y$2d?NRI58K> znLIRykVRS``(wOiX9qE?^P4fXtxnJ#6<-Saa;IiId5nk?9gQFfU&#-lQDC^X*p-_BaJfZx$hXJ9- zNONr)3Ks_b+MJ_-VecS3=gEQ!N$tR8QTt0IPq0RPMfhCIXN=t9RU(UpKF^Pz{ts#! z;?DyCHOwq#a3aI7))4~93`9#)6V+=v(*LT)kwUl|MKJ8$Q1ke~{7F4yB2-?Z_9Mr~NdAuqUsS?zpHss^oN70p`|gK4cAzC@ z-a|meicmBaqQ2sZD5A+cLS0fEC&$HpnV!w&$VyLrzq7TMs6Wm|xEr5vSOp#+=hI?` zT=G9?pnVlg;pD*8H7W;^8ef4t0IZGEm0aAbp>|yg3~gNGmS;7X&qS4%T-?rV`yDgC zpQf2v;XWyzA_X^G*hY_hqhwGRznWQsIODdR@YFZaHGlGE^KqolQ4l%&`C)k*>?E`C3-l|HcUDS; zQt>R-fOGrx#4^aHdbGGQ|HlV;+wNTTj$OJR5+EO$_@XfA_wOfo>*Gs>NSYYW9A+%I z7HuGS6Rx28fm|qfpqb6_ux*fh?t!*_yITbh)dt==n~-rL^TX#RgMG+?&7w~EVlh|4 ze`3#sPS=Y>hEM2(+aY@0!aEjOZu6zab|B@PeDp)>@Mf}6?jO~}Uuf>Evzw+-b*$9n zQH+3M>f$f?@e1)j9{S?sh&Ix~3u$N^OM!mnb>tM&#WTb+FxPz3)MT87n7wtR0t)^xft|G!^GE1ou`K(EIQ5-!GbRjqg+TDAGwGqeRdc-+PUd93FIO zaKrPLEg6>QEoVgOuZrW8L+?Uqu zg?MQ3PzLDi#XpC0T#)N#aBEU-H;FaB1~+V|6FDleSgBM7XhfEEkWW2wE#SBol}kAq zcENj&PxL>SRoaJt14YIzmEx8&x#HKDvcFaTzP*_4kKPU(>^tLq9TMhTD_u^){DZij zLM(b1x=PN8T3>Zrhv{27vC$OqO;XO=m=Ofk&O8FY|H@mhx1q!!Tmk8oY_`T)5s@+i za&CSvs6ge#^~$lYPFbraGjjc@~q*K*^)HC+>B(e^F58oFxTq$DAJ zH-0JJGmR2O+1GHb6B{gP4{~gj!_RbvJ79%w&WWsM-I7p{?T<4Wme+Cv_`1GFFRmH0 zk{Nmo*Oz5MZg<{is8co@jNc9l-*mZBVSk$9x3tSTqGLW#W(m-1>sqgNWj2me9XHl{ zCpD02{!xk%r&qVvI4u;^yX3>i0Y)N{y>`WP<4WMdS7@j!;A#hgpjcCcpPJ3rh;Z&M zIiM>glTIM9&T=a0zlg7#A&^VE?6gjY06V%k7MC<7zk=3nx-isOBp5r|Ve#&GZdm&` zLCg5_Fh+D6ow45Dw`H0nY^`$W-5=~DL9@A$p;Av1ALW+X-aia-&~E*Y$AIsbzKn!n;B}Q8Ao(eDUr7vcXOduo zmE7LLcIi&0;0W=5BkA}V_S(b87C|yza+LEz8$h7nXT2xKEdb?+R*9=6h z9xzl5QFW^xk9 z-7hWJL~Y6)rL|ley3p6M#FBAKTscK4; zd}Vc}j}bB*VEWatrT3?@{1_=+w$ zGDJwDUAI2Kjc%|l4S*=Q=8ccQhX4Y^XN&E?V*S!i5UhZ9t?OhCb(kug)Ii2 zj&|&hUTjeLH{haT%`;48=ve&sl1mTypNJ=E( z4&n}5cLE37KaC~uP_a=ZbOS~rJU}P)&-0rpc$sE8a9rZhfNBV&YDUpLz}LWx1=P~@ zkbcdDDuCJFY1pjnB_w6}qy}_e+QvduIpB7&PR069dw5~6nq`;@L`^@Y0sgrXa~L0g z=iMIlYm)bw#CLB9`FYpaOaW)WG77TQ#c@eB18kJQCJ8E=H@XB|w~67GDtSi+<0?#d zfk8RHQ=t}dvA1g>&6y++}4QLk_{6m$R| zo(h`?_~S!o9?cE~5S-Mi!1{XRZ!sv2;U(x~@L&KZo1Wdci<~G^k*GBeEdV5qO+>9v z{At%Zj4m3Dv#bvtxSe&(31MxiQZml;DxewKm9IIf0LPrQdw`C^v%eC;hv}YCEL984 ze0nGY9(fS^lX;a|x2KVf)ut8n_#3b}u*0e>EollcGb{W=M)0e7?~Y$Z+mClaGh&x= z^eNauqg(z}?JbV{Enqdf)0$89FU5Z++12{JMfr`@s}y0&6(zoOK*i*qI#bc#Q| zI<=kv6;kk20)4}v((*Nik&?px!QrI~JM1+KSpvRy4(c_y0PAqZlG(~=@(`Y7JEzi+ zs4@-7pf6n^Un_~o6_oXhhO8y1#?QGlIigDj7)H0NfZD{se;yKY8Fg3>#Y5(YnWYhm zDb4mH4E`M9L>K#6tq#6m*<9jmm4WbruP%`>JV{;xJT_V1vceEO1CMJRwfAfX*uOSB z6^gVJtEI4T{K`G$tEE%x-d-Kso!+AQvTPBjgKpwFho7vn3pdyTO7JMrELNwn^!UwRYZB4g=A@bF6_f9|ob@D< zA5A!mbt820dkm`PFQJ&|PvXT)C}Yzw&TORMAHx+KeH4suY-C?j42K(>GfNI=^w&X{ zX^!YI4?Vook7=AZ74}5iA?Z(!T{_&KexY2Lgh$+t`G(ha51{3?REz`H)Gk@d>@32Q z7sj~|;*pv3UG}ES2XE^??>9(8ab8C5ky(TUf-<0AXyke>FprerV>DzF z&J#jj&)NIZpNoRlBazLcopqJRi;JS_^Sd!gu|}EZb*55^13?Tkuivz>HK1AjjZCd>~9rWRDSc`SDr zYl(S=%7Hci)3u=2ta;2e2~!VG4=bT7=&xN|)7iyreVNZ>`u2!!Lbvu?O3rfoye2apGKuh^AGl(fA9}MWSw~yc-3XQ<-=B<6uq1CNAH{Z-4SskDt~?73vqr9W^Zu zzGQk-)+W`5Pf&xa2A@W=k+f5yl7ien|6ni2T>y}I5xUS2yZ`riQUcm|@<{T$$|tIN zshrq0TzzmNbNG9mcv*wfs@s#)I@OY!Ubx4szG$QZl|#=O zpt(*z+Dw-r0($;7_?L|wIpi|f9t%?kSI7fb7no7F!AhcRz*ip|)-DMOB zl1W2$g*e@qkgROg+300-Zk`m>+c`uf^B<@V#9@}xKU7{BRCQTbm~*$xb1BIRo&He| z^cdndCC>hNmq*S8A+4l|+(XKM5$L1396Tc)gLtHvN_?G)rr^VseIIP>;}?AyeLw!C zc_&M@@BRM8?syL*(3REFV5ZM^{<-49gfH?v=nfe%Q_|)8A(Y3EYxwz!x3 z42lsd7BN|pT|L;|xXU4EjA|#)e@{EA_ncka+x&-vhPx)YI%*0vBS$#t75wmzV%^1> z1OAQ!26cPY$KKW-d}$^V*8NfOZQaj2O_6G3JI6mE5S#C;?LQc^7rdyoKs}JL{^ot{ zQaFQX`g7dw^-bNCbGqPrz~(U0BAf)~WsP>6wf%cN84vV%7lEcnkh&Ts zU<_aWxE0mo%#lYAaXfWvMt>BMIMr$p-7KINaQWzX3(j~yE(vj!LnckM5Azz)cy>PW zu8vJr(InaTHw_7nHtoNO75k}+``F{Cj?_yF=6JC@@;@Zo#g!PHMGNzM zq7Wqc$Z-1(3xry_T06q%6Ktp4WzIhd=0;J4vCBvhZU5%{4~8&!J|LTA(_t4keR1rD zrAJh=2x%fbN<|DK_PdT?5D*b=zcJ%hlM=krv3UxnjCe4am=XK4eC4U?e1kc9?4&?+ z2+9kKJQBSWTHJI9cN2vlcIM|JYf4^tntxD4i^C28HX@iok*ic;5;cEeH+RujM1iat zTW3|wQB_$p0P^L=yq4Xm$~2hFjG!|bcK;hCLj{`Uv>B*+*Z-65(!?bamYMCIRq~n~ zB{Oj!!o!!a@r*H(EiUCXLs^m5fpPwC#cIUoGPZ(qq!A#?nNs4nh0;tjQJ4`i1G?79 zZ+&TFi}4ZY@hW~Xu6kDz-FI2@clngYrn-1tv;Qu1)NSJRiF;XlZ3FGypyxBPAE{lV z&~u%dG9v53!B-L?+I~~i^QV7nw^~IFeIm@MWX;r3Co;)6-OD8 z{P1-g>S=%^uF#O6b4#n#NVHI+WUO*{r&EfaJVtc+M1|q--+>GCu>Hns&ren9U(}^t+l1l+|#a*@~PM=)2r8Q*c^d-<$1XnG-WtkP@lj(w|D|W4G{*0H< zVvCb67Tg$*BpJ?b%&f6s}AXx;Yw0}62jR?IU1h$e&nxhfw>)# zDA=~9Mko*HhJLmZoSdcDny)mPr=kpcwshtduSh6$4AmtSlQ=0?5^nSdc5ya3>vzg} z;u#4vAOs20FUESMw5{p#>D<)DzjDv2o9Sw3=IZC0oASy+dQ_vtu)7A#Y2py2ug)Oc zAZuXg7o#_AiDaG~m4iBIPm-(l=QSkTGh!o#wsWq(jtrkYwtccNA6D`bJDwd~SD8Oi+ z#-XzoOl!>REO;o|UT`C?Gc*F7>5Nb0EPS@zo>w8{?((Ny7N=x(PuBNvNHW5C=&*uF636nmATE zkhfI{h%um&eU=#y&_6`AkqI=MoOxdJGtG8?%6ie_hJs3+#cH_m0!SksjU7v*ggw#0#G)*j6gMkX(p2^Jk`o&32dG+7epAGJWUW-$mC zmeYqxz)R5o*fuGRbB0j%_Y9mKKZp*)@EKD@Tq$pPtc1~;zbu`$IuIj48R3Uf_;~@2 zwp-fFmVH+%@25+Knl)mykN~yy5F$07g5pl&=Jt((36-_#Qgmsui+%T6J)}GMrBEBy4X?V(OQA) zDWqmBDO1agy#~=%ik{M@evJd;&qs+%y90x!ak=4HE6A|8Q9Bu_j3{|N4auQPg&}b% z2{AKFV6R;>h_mq$abq`kwb6O3irT&s6mr7<@;{D*<6Fpji=kv}kZ^=TnjM0~Fmfgx z*H}6nAZHlOu7dD4Fb^%c~xd_divpqih(>=w0mO}X1u2T}pfr8+Z0cs;^sQI4v%VXnMU4ygVjMi6L+~C2z^qZK z6Fivil@Fx2#X<|=-dN+>Mq+s+zdD&dZ)$IV%d!NKV=TWC>y*>EqX9&0>f=+#rg0Bq zOyg&7&DZP1*llQ}*e?X|#Mi4jKI>}Ni&)<^|2<^qL-TC4OV302M;udakU}+Avw)L9 z20cb2|EPLSXc+HRC&J;nvM|n>W(5$zj4vhriBk7bq`g>?i&85=FSM=rKNulh=|Nl5 z!9~KeT=kthHzRMFkK`fk=pmKP>!{PH^TZp$jlf4M#|C#fQ&pMSxEz;?M}A^*cho;( zL=Xs!Z*8Zuw-_M}CyEHl_!vKJ`=tc_(P4KIaiaFx0fp@En**Nx50;m(ogZtPcZSI% z%>0iu9Ire4TCXa}M!SbVqPG=E3~Ns>kKn{AT7l*H#eCh5xwU5ZYLIlV2Un`h`zw)= zVm!){Vt^k(SGT!RG6MP$KnpO6}`jiV<< zMUgylyXU6=EB^m*b(U{YcVW98a%iMyXaS{>mXwB}hYsmx=vE{L32|si>F$s&=}v*6 zyF*GrnrF{@y!*@E|G|8iWBt~;@9R2G(zR~WZ=}^2_5t>f#+_Q;jzZVwuGbBRfiKMx ze*LRn%RFAlaaJ3O=!#B$!~)UkjXEImJ;&*wg|znm_kx8T zvP49c2aT*kRozjh5Xm>^GoQhKTPKMKT?mik-4;?E)T ztBI?|@0Gp3OvaG6D$rL&+>U_7bu|en|M1G;v^PlE5Y zKK;4X)1Tq>Wu&elZEfE+vHJ4|{3RZ8nl>o}P_E(gdON zFXWG+8$Ao|Pwu`H7gbu)OaW zEj4ulY6rwr+`s(O^LOR@Prob>Fa8JU%h^fAxUFN}>aa5WEs`#eWm4WJ5gyVnzniEb z$%;Jgo0XMjC3bzH4cbhosO6TtWDg$l2dC9AEXP$ov&AUl>gVZ1`1H!my zuj;hp0;2K7B3>B`*t`$zS*8DcLsx#<-e9_~sycYMsvldyrB0@4F8@zs@S4cMI?H=F zH}oMo4MnhApoySI`X?y-W!SdAP}(xzrl@ZIY>CFCR!ZdJLx~;Tdu#_D;a(Hn(_5RNDaFZfIkoUNf z7xr?nvUbtUybVR!K(SKyE;NUtzckm;Wt6Ns^n8JyT^z3|ZWX8}kaJcF{YCFmvf#LDCkJcEp6_cS%KPg=f7-8A5 z0Gt@Px4^wy@RoOME$fas@J<)p^gQ^MC$v1jW*nwv4|qRz{?ZOdxD|z-hUt8e8~v02 z?V7G$-NSZZNB!l@O!=ty4XeM|wvdf7X63-7qPRh1Q|jQd`3{dniIqs!G{a4mQQgkB z(LR=#7+3%d?RGXc(o}TY&u}Vu05;{YqKwyFO85UYzJK&4Fkah!uQFSHHC4KU2a7FL zITm3phMged-;11a*nX}>BY|E`$iv7nZF|mOrXLmDaWM}B9c{kE)mZpb;~ha#ZIRh6 zd5M_DDetn%5K+32LjG$(u#J)ROOHgaMl72+V=n?@e^)m7U9=jeFH^4OuO@X-P(d)H z?+>!+T!yf?}eV+K`KOKWRP#Yxw0#q*53{(P$}0WlY24qR%pC%tNvu8Q*q~|q0MjQ z^`G^Qk0fFOA3uO|j%O1({ofHjzSx9qs}Ag!he(>=eGp{8L*HI3yezX)EE+w41uf$i z^O>q*bZ~V;+BAZ$?_WtpO5w>q7B@u!1WVm-8*VdU ze{=e2aU^Q#Y5ETM+78elJuAxQ0H?V1C zXM+9XWn^C+smruj-b)Ao7OeYXN#neI34NWw*{ht8OU%Exi-l z+Ii(-6z6PJfA&ta&ST6+JR9{2yFDMiWMh#NN~&3MkoroNj_FCXMi{7nupJ8)DE*bg zaz&BqE(jmabxI%5MI&DHDan zVJo4AGxnUCOys^T^cS-#M;Bg!!>C?{08!&{(ae|#ID3-ze>`d&RLdt`dP^+M*jLB{xJ}u?ufp46KP!c2`V|El%$qf3|;l^mCEf=c& zd5H6i@+y>4U5g{Xbx41J!gYG6#K+nCjNQ_s$RlKk`Q6n-_{0yD4RN#?8N8q!7|sEI zj79;PEJ9B-f$%OUX}(IbZ2*sd60E$j99cyB4LE~+MLBmL^6Q8hmZgFQ^~hk@{_S-% z^qZrVCetPwy?$p#$+ba%2~(?6X70cc^iRk)#_VP7aBg+eY`u#faZ;x)yCgOz!b$?Y zFYxfyx1@}HG>-`J>+&SlJ5U!dyvD2 z(PcrTdJxyb>r}f4B03lVg|b7Yqq6)bN{{@l^MrDu zoXIQXxNXVKACyl{;>hH^(IC+r0hozHbccK8RCR20Ueq4FrAgy0W?m1t7?o0q5sw}M3wd*s@~yNF6sMLSs+oV$^!{q;5v$9os}b%0pw~Vj*~i8b zBZm9jfb+;Mk@-+MZrP2YbgHhRvbvYV^&ORY(Aio*e~eSvG9;j$>ulgNbBmY{4`l^d z*o9C@5vYrwZ26HDuGR@)-}_z4Y`yJp3SYcLws5hiVkXFW1NU=5PhV=3dr#9FR`DbC zv)6Py>KN(Da6hm~h$H%FEyjf4HcqUp`?(^|$%N0&zOy@cx7jwI=AI8fy1p5#YKg-) zD*Te7=Vs8=`SC?4?EQ=rL~uu?FAla+2`)Okv$S72mQHYOPBe zkf^>qO6{G3tx?B-!LVZ%F}3wf(O*gb5Mh$S$DrTg*46mYA$d17WteI{9dw#;E%92w zHekW`GN~z1ppF3symiBCRgGf)V4^C3v!e^<&W;9pWjwK8lwg<<_f~Pq#z5uEZ&~0Q znQ*_s{Qkg6F{__Wk8id-9%cHU{T=j@B1o8u(fTWKbXrycxW_nRPNz8KQRv=M=}KLX z!Cgge9c0~qe9H%rPsRm5gm0$8vMz=gM%PP#8lyf_&cDy%C?eI)+=o_BatDdgukq+j z0{7QP8`Z&H%f%n>RKpfT|889WllmLB-x5dan6o!s%?}Leb7=>ZuzD<9SM|9TC-YO> zyhsVMf$hHDOz!hn>l$X{PqpKKP8sFtgQg69;Qu^%C%KS4$(6dql_YbG0t6P)eyxNw_FEsW< zgXYE;KPu3HE#M*-x7_!f;)Xjt->{i61`i}1OZU_jz8p<2Xl{lFxG?awmBr!SHwN-) zpkR3YqHner#sZEXbhuxWIo!J?%+1R?+jmg&N?Xq~>?i1w&k2##R%Lzl6XB0QpmY`o z@?tFv{$zZ=R%VZvs~Hucbe@otZTNV?Fu($bj!oU#x$i^hJ9RsAm4D5zT=twv;KZ=c z+kO_9b0sc^w60WWgdu)MUH)DX#w|Yh?%f<&-Qde-!I-Rpi7`Q&5V5*{RpM+1Mr)tD z;UzUL1FF`h!}zjWz;3^HloXTF|#Eg<`T&pW)_ zbIL4m`8h7k;{o)OHbt*KGbJ56>)yV!hP|*BL_=6H6JY&eVO4wIN)qj=^s@8g8mks) z%2UBT)etgzwL<>EW(0$kwF=i){8YijhR?Nux@mTjw=OHvcs?}i8@m5Icit~J4a@2K zlz|4q9yfBV7=Q)8yZ`(F;5KfXIJ|md3S(QqSlow%w&1t)TuibvOk`MOQt>Le4QR3p z6toRtzg7)1x3Xgu`Oxb6wPAva={2jfMoVDV=EhY>gubeIH1|s6YViF;t=-qZbhCcu zqr%8U4R_Apk8idw{ajNiZzyk2L1)szEo7~``>4CS4BeDD#cd6msi*I=o8W%{+}p@# zckr`yn8*7HW$>>w)U10?v&8tyg5Q@hW-6oIiOy84Vt%$E6~p~z->C=LpS9xnH*Q`m z?;&C#W^v5|EUFgF8UOa-IXTZf`I+Ha(oA{`I6y1JV}(bR;~+Q|Ph=FI#icwXo9kA$ zXt4MbbbAna*>3H1aX=e;#sadz+?O1tPUuulECFMPtiZb5NLD3L#q842=EU1(vgSo;8=oAPApX zB=D&HyKrxip7Hbn@x!FuP8m0VtU1#rJ8hUQq^cln(YU-_Bt1s=ICklIJg{`(JC7@a z+e;<|*-x=|=b1rW%`JuJ?`St~4!O^7)jMacJ<_|Cn{s+*Pk zxfyh_;}H*X^f-EUqj7oS+i42C5glzAEuExvtai*65~#Xl*k-uECSqpzm(z2w=Q>Th zGQ1uMZO3Bqc{n{j2Gm4&WCJgOeU;oILxe5$wsvTHh|lrW6Gd&jt0<_$$C{PT;UWYo zyhqvTsNL9+s@cUb&-TF}IcyA_ShMSl(_n^{wAbo*XE9sD-rHW&xAG@hl0&z91x0u< zTPJOy&!o;q&2{89DtNeOe>)Xhk9N52r}A!fzH)I{8jo-XgH`h6(+&}t#0G=M_g6{+ zn*FRFnieBGg9m?xUzlGQPeLen;?`)N6d6u37uf%Ibu<0HR4U-Wz9pl+Jk5s^Mk@Z5 z!{$a`F25Op=qJ72;7F;yY%=Q9=*4Ea<4K&GBeSN-gJ&UH%1pEFYBNw$;Q8I%ps?)Uh(}hR4t!Uck)>|tCSR$G=S_yx-@%{3*Cyi= zX7H(7tzH*v20rZfk^_O(h(~TPl9jMk1kA|L`kW4$x!~dCM zU#OZEG<^AaoiE~s-^fPF7XN_vCzXLbzos}qJ8XM6-pNsjq-6p)gSA0i8#Mq{%b>sUv;yz@<6&fnbA5t?oy|DB6F34;hLF z+xx0AG#F1ES~#Dc$v>@L zG^tv`BKDf^;En^`CBFIq){-MI%+#&!@47j2p z)Zu$9te$Ne=r6OEuy8y~fzJ*Z)|m?q#PX?eo;KIl-EaL??j+ack0MdB>*|yM>qMVW zRc4!Ox)F+=-aTbWj?oYSQg^f?({J`JKRjf{O~2XtqL&Znh4I`~zv>OirF%q<#KWtK z6eXq!`ukg#{K_1>6F+`;94Ry5NIcK{SH{C@NEbEZz%_E4W`bo4lq%BG7Xk=SINuZ~ z`QoAgx7gi!HV)L*!;EF1sc0fw(%R7oO(D$kkkQsjq@(khMU3750Mf&xxkJb##h@>+ z|A9`mhx@4K0H2Ux6S(4jMJfi19ULNE)U)aWjk@QrO7veq*K0P*uc(%1@y%sr|1w!s z9z#|p4l6H=%Z-t$Qcjp=2QtPCU^=jt4H#Acumzw$&J)`|lo@@in~NkTNnB4>KNfZE ze35wy29Wa6bC2=;YFqyg()a?CBLw)X7fD;gRUji@veOqs|hiNFS4aMXWO!lV` zTB9L5;nCqIy@nKvTx*QLL&kbh0%DbjxzkSHaLmmY0ds^RJ(7ovigGrz$mr2DpZr4!X${1ERe-v2&nY7cWg5U$^md>uanRqN0CR7|9>Dg^qM&ln!#<)WZ zVH-j?J4(i%U)JZWhZRqM#+uMFbQ~Vpph1RFSm5tzZ{KNSr6^Egc$vz!f{_hc|vc04B1(s%6pC4J9^`)FYmom2GHHhTB(iT3DeXK z9^uwyu^a0AKYF4@<%Q4+pBunddnt|@Ntrxsua7b`i~(wq1Im2;a#_ahq0`XuxbScO zM!vvK+3_tscggp*&AG4oxTcSzDD_sK4(DIv6vb}+(fsiDnY@iXN~qm_o*(2r;mxOt zIl8C(&Ox# zs;GOXS&h2RE-`E#bF5fj`Z;=otzjdPdd`|XPkyIqb(T2J;pqt7IM9@Og+PStrN@#G zKru162&O46hzLcGq_4Va*`XLK4yiY;|gup;#TK57D1zK-~?l4q#aGYIGtWm^51 zXHI=tD|2LwI@^~aH&x!B{~YLKF;i=5UQ{}^hRDm8`#8I|&y5j&_nFsK4Ec=YzcOsHO_-+w45WhjT>c8(GibB3~|H`b_Q&E}b7~ zQO{?uzV;)(cA4J)LkhgQExHTnuJl|_^aN}$)BKzC3~bw7=nszWq8;&(=)%AF+U?Rz zH6y=U7yoSJ4!?R9v?;74rKJJBAkvh3utT` z4E?ctv|QCYr`m5T%{$r_da67@ZBm(l6zvN z^K@WP-aH}-C=-@*R<+2Q%wkP4Pn!^SZsxc7(MZJP0&_Mg%Vv z?jaa3d0!k1i7POirLd3s9p%UIp6)G4X3KpCHJQ3#{g>7AO`QenC({tRKYoQwI#uYg zPZKU&x(w~fGplX1HXllxwqs|?C(*0lzPqM6jl${q3t`s>Pq2j6yf85nS4aiY#D1e{ zYzY@iIAU$Ou5iMW4QV2(Oo}^_x1~dWIA7Jeh{N+HWUd1+o;hhouJw7~=dtqB(R(BG zr5obhyzpX`vJ=}qK? z>iTkCpGoY+T~$&M*8DMk?eOooSnALHtIf`89f*S2xZ2aZn(P1t%fLQJ?DM+(r?7ET z6-BR3>#YKl7r2w^fpWJS%*s;u)N>WGg|rqmg)ZhivhA%_GD3q{+)Mg&9KR|AEC?$? zwAzvkqM;?8o@{~^(P}&Z&|l;EygBT7{J;}z@i{y{R=TpUL2OLaQ7Z$$lIFBX@Z_i4 z{Dw)|pF{N52Rl1_nZiN6)go=A%DL-gDK8iQmA*9u>49F?#!(}x!-en%QuAJixzu=z zbPo%DGUM~=J7?~!ZONYZo=mrhA3TYd=>bXNGE{x+>(`4#i74I}EoSKkq(v97FW%Q+ zJac<=WkhdmXTRj$OUw!qx#iAmTnC}R-d zi2oauh<>beHSIb9o|~Z_PTQJfn{WOEKsv8OX3D8`s$byT3(Pr!X558A%mHU6bX&9* z1s97Wk6C2x%@uuhB}bElvq7Q(gnSg_t- zVi(xST_-i03#OQ1LxGD8S-TN~;Pv=69qTQy$0}Lz^#D4%ebBnExw+E;5Htmkk7^w)a1(9>}EYymIWZ*x5+U;&U zAH%(3@UKRc*2HAN-8?3UbF&f!qZ_X%$*&W6zJGj(yb|%#=P&as6(je-^^=NCec4iYNl^{ zh4iamuZuZ;43iK5&UddnMqK_rfMU=lMJl<`$d*9#BeXlKE%vGHu8N-d!fk+5^ZSDW zrd!k|ZdlT^g^kau!5AazH}kL|=y$fltt*Vv)^vc(-T$83t-ks^r~LA>Fe&n6fBT+K;Ct;}f} z!Y6PI0dtyXHjfpbbj$a{y9xe(hNafb7`_I-cqg!9GxTZj4E-9H=10wGMmIN0qF_Cx zuT*+Q=-r3$AH(_*jp#d3tBx~7#41<~>XcX-6&-t6Gx zew_;?a%fnLHc`mG+@r|XLh^)(?I8|aqwbs5bb)#TkQo(4@2*hQx>tjh*5Xg?)8wsw z7#P4NCl%0wY^``~Z#-YVgAe#po($(LDT9HfOuj3ajHb>?QzcYti$eFB?B`rv`xkiV zYu>kIpVGEtKD&YvWEPBN;Wfvz(MJE6k(p;O{B#=DyKqz$pT|qas^4!yc34UMSVmgV zVmmKHCvg!pI7dZapJh0e0mtp06$-+@6Wb0+v~328hc?RS>kk7< zj474xX+z~nhZQr!ZgRsCC=IIp)+@DQ=_0R7t&?Xqi9w}v-InH@G8`FbtSt1&z#|TU zg(4$!X7CdS1GBqdZXTgx7+&*MQ(u+2wxVpj7!)5X)Iv-TOaq8^G;)V9|0^Y^ zKq@*=ama>dO?WXgUS%e!fyw=&-SoBa;ftcVTGpwe0Jr6qO;@kxQ1w!r?$E4b20X{tG6oanq0B6HP6zKfQSlFMSz9}>Zu!2L@Vs$kS6U+3F?nON#mGhWG zQ4+%gh9bFsu?Wy56oaI-=dVs~8IC{w6$zspyl^fF+|82Hh*KH82%Ob+rym&S&Mk{Fy zP$_rv4Bx^86?Bs58L3?gxr%MT01-!4=l{qJG_20Ww3OSdm5iYWf7ee1>54Eff7YWG z1(OkJU{Stu`fLRtqVkY-g5;8lP5j8L4O;IF_epTa5))O~uK zxFGkYSlc&H#cDS8^L)@{JLEjReLMZ2A+t#%BFrzM!P5vkl*E>xKT&Li!8CUA%B#wYLd+LZryIJK2$A?2PnCBh>@H) z_#YS|eP47nC05{Erwj`PZ`8Sz_nN`=wb)5{8L{nX1C8 ztSl<~HI_y%BTRzeKDx0 zrTe1g(BnStNWlmC{k8jmzQSb6kXh%AIgRgnWHOk`9l$E|OVIUmFDg4RrRGt-As$52 z;n$j-G<)00nkA%38E5yd{ZTCH5-cw_AIRd;9F+EazHn6oyd-9*-)y2X@%kdn6e|B2 zbAE{{PuJQ?v(Ud5Tf<|%lNj8#uQJU}t%=;>PDDFGydYAh@fEr|xm~(+-%hQ4jZRpR_VZfGH zh1$kf23XPJKCipfi%o#Ur-fJ5ZPj{03!>vEsW$o1F0k;pW7*ot=YtV^l7UUHf{xyF+$eP`GFeuQA>5J2B}4x5zLG{ zAqk5M6}Q6cs{;YQly0ke0VfE3K}!9{!Eu(_m3L0-kNB2ubR9Rtg3Z$f&p~t4!T1+p z_SUaMQ(7!M)_B!j^H4@2)++CiB}r5bLeR;;sKo+g4Z^I)X9z9-{D z8_#@I>+=!JSi@^T@8Tj1|9jsk6pEHfSZ|V&q`+laKtLQ!ZblbYwip@{a-^;XE>r+v z1Wx%+u~Bs4js&oCu8<>y1xCl2Q4s>h`TkPWPSxq|P!6*!aJbj;i z)hlt81nCpg6)uqwTkEJN!96 zeYK=wA|GDC8eWZqT`XagpQB)H{%>N>EPvi+{QHo6r{Z1hk03J(z#h`-^wPgTH1UJ1 z2RZO5p~ApUBON-}X!%NQH#r5sanWtRunq<6{APiR>C6EFu1?EdV;;P72u-7Xq2crm z8+2)sH$pQl^M(wLWonm@Qw%7xSZ=#2BlywvdySKP>QXRs+_<+~5Iv1GlvBAmU)5dKh)BjMCzcc4mb&D;P+p zczqQ_^XLK?prY9vJDC!8GMKW$W)617Nd4nSpK&{Hq^FY$M5;4)pTw_3k8FAmzEy(t zt)|7SShxB54r&is3+fXyR6mgjblKbtZp6Ajp#FN7V_Od&7MI+sUkYyS>8s-uW*4?_ zBM5e8{875G?%E$5p- zS|6xo>K9Y=;F~JL(D#fYi@()jx508sHXrzJH4U22hTyZ1%YL*NPYB~pX~(zrimb_m z`oQu>dGZ8nxr9P$VYp+82Y;;9FzO|{Z@JwuvW@Y;04Tvnr~L5JW&q~=k2)+ zUeC95qP*;<@?U9*W;s?IEuayGA{BP_EN0OOEM%iqg)V2oB{oTZ5@Mumzf$o`YjjDHd+2dfv(yLRRHjIKxp+nuQl z6v@rpwWo=W@WoFgx|aM7qyV$P{m5x6+pU>}EM%_Eg04r&%UHWqitiSu$>CI>n`&Zu z{=Om|o_4yw&~c&x|7p;luOWq3Bc*D*S5MLzxk_L>Y!7H+Efi!;j){Rxra^v=)Ec+Q z{yy#F|AV7fj=nEonwLdycn>{5&l4A`9TfX!1#9Zdc? z+zYR042T?D4lvTZ-U`tN0*eW;cCUpKt_c=c?xf$>z%ar%nlvDndh=*wd=tR*>|I?i zl1LS7Rdn7&QjKfgGANH@OFM8zugET#9g1?hZ`8+iV&s!h^>vN*^q-Zq&-X*de>{TA zR~1p}P8LqYRgEo%ap%u;7JS5R5&|mOm*RNJ(kva9>xj(yYZ&~({UJS@?PW|Fgm+vi zU_L9Ww^IMt1(ILHMQwah?LWZsxqQ5-e9qf}@k9ub9&=polh`zSAJ!Cty}9N|G2HE1;lE5LTVrSOC7~8!+ViI)*>bBXKATbw*p&O?HTW+2KSOR zya?1*_y#AvX6ZV+nTrCcCm{_)#wYU5mQBb9>&FvpLAcCh@)yM37t%!WU6M?TprN4T zGM9d)6x&8PFE5TWS%{MJ-MiIcWArnidcXVX5j zh2*wkGGgrq?n+I3kjt;v^^HtM{Ibb<$ed~Gk*Q|m#sW)qj^;Kq-tE9q~l zmFbJ-h;uV{7Jt0@wg#eHF8k?bx$V=93)?2?VqL=;JM=x7bDKG1+Vr>$xy54}q zlI2d5;DuM%UHoHQ>f-#P6yjoU!OEm2HvLvz#BO zsTEY9T$4#-8JEe1AhK2`R|*$CpS`9#$(x$w z9(;Bzphj-=eM#R4feCi6Z@f6)wY)TdAcNS)(hoB}_Wj^V_7Ee(o#3`e04G}ZiUF$i zuQPe)&6k%XsX<)iA3OzfvGaMEBI{IpC}}J<`6cR9axeZ+Fh@Bx_ zRP*`$$pGaT^HE@{G5IRI-#PJFKt4KyoIl>jCT!O24mDZhd^N?R$gc#qzJjClnFTbL z5*f^jVx3A}h=a1J)pO;IWB$QeMjYS&07*t^Fina_+15L$@Pjv)m8>6rsZ`5u9*_L% z%{emDx&2W2=uu-Chl^GDgOa&-_dp7EFV%l3=U=x1X_ZEp@dt8eMV{0>J)P|FUoza| z2KF;HBSzeUT`$Y#Jd3`?9cAp$3JfSRt02{!^ONde*FNdr$N;18aELVtMi-yL+wrOR$Iaohgur6 z(kVogfC?~2F~19yL6vP{IYXI`HiVpcuaYSO9J)E{a*3MgGcy;k-Bpriosr2hcB^hs zY;2JIDirr?-Aq-Yx-Orr-eI`#tM@d}T>#*%=$V5c_rBui{LG9p-d$`uu^Es0JkX(d zw3Q@xqNmq(n8A+1et^r6K6A-`0A-Wo8;SW!wOY`HU)5BMZX817Qm61gfa99a`E8Za zOL#4{A1cw$F#T(Z9`FC&-RheqaD8Y)qtTh$>PuYL*%GPJzoLbR5?+;6h$4;_U!>xhEguScJk<+on1P<|~qH%^34Qy`Atj z*C$um+Oq{ep6JTCL4~|pCO<8k^6E;`DgP^4hCa>JdWc4)I|M+VeI-dWk$T!f?^qbi zDouAo#}PwA1$N#Uyv;S{#hE!)p6rhH75N@0UPp7#NDz_0zZt{oOnCS>!FP=Ln^*Wm zMyxyPSj^BbH|Jxkrs5!Ab6f1|{BnF@U8fWxyL;199j%Y?3V>%jfa zqbtIvN)vL>TM)l1A!mFt9Jlwu{&kGrfy)u9Ji<`P!;CPCc)plJ?u6PT0CgF(QqobYik`7$b zEO22VmaX>~CH`fv+;eSoWBk+TzmZ}W=^vvAX>M@AgXQux!h}r1I(@dy6M~?*7rOMF zP9aH{Pi9E#>_T0!CJ8&ZV7-63V7vrUtI^*ew*vl~uN9-L8ch+fGJy#ja~x1PdKre= z_{VD`Q=s9zhNtQE9ZPkh0t!I?I)sHdgXVgKAP|(xs?}18x4Y?G;SzgSo^1WAPG*hh zx_<|m1Iv6nt>7Eq6&V*WtwTTq_zl~Oq7F9RyCz6L7&!O66V8n>Q&(;6wxwBc1iA?+ z4r}FZLOtRMLq;x5Yn+dtI2%hvF}P!qrY~3Y2{B2%5;t3tvG>WlRSH z8tUVHolL8?(1%F42r}Mw+9u&ymo@Yaw1Jd#&13c+icmOPGq7*@1lZ%%$0*kg>Br0GRicDY2l@Osf zC8^uu=`9JH&Hn!{mM~B5Bb&??eliPb1Zd zZ(rfg-Cc7zY+bgFk<0v@R7O(}?RG^k&zqLaCW$NIG&Y+VQ2Gmd5s_0fzlb$;KR3F8 zagxHREJkb4kcobB^04s2@jYHu+qV^GFY7`p9!ps9b^2&&+Gr48rzQjJ>%Z5?;mM5R zwCs^`28GQ4`^W%&Oj_Z9)uG0L4g^60od_$YE5FPfU+YG22@=<(MU5JIV)^6nY^e5c zmu2&9kj`yRv}pjVw_6L4DjR(MHkC-myh+TPq6m0dutUUPrM_cqt|{)MY!^vf0^&U6 z;Jeqq3EeU+EBJ~i;V38;>VFh_$4~&7p(Scn4+tRC;zQYvZLXe^mY>l4`~(*W-4iJ$ zi%j;sGTmA+THM~D1~{lq9jZ|^aEo|@8lv`6sk2O!+5a?^mPg|(X7aDc!faV2aAdW5 zhYIwG-PUgl8GaZ7`!wOlYL^dI`wr%YV6Vx5Cy6y0_lw(OeArzyXBqXlS$m@|$vq7r z8CJgIr?67%F?$0nIsNgU*_6U_;tZcV@92y3dFj@ zia|*~)!(z-itmUlH&Pzj>aP{;D{vA#n;}(WO|(e_ZBovN=Gax>Bz*Ti)jfms>e7Vu z%OQ14!`@1aI`cUs%D6^M@ua=+%_OvR%mx_?4NY)wiHh7wRK*OS_M<%0THn)Y{Q^9& zR+XI9_Erq?)pf;QpU;fQ0$oIZjHtM?xu;VSacB{1 zPI3_uME5EoKv}Vc=`nR$fkD$+<=InpB?+Ar!xeq81fxt%b9Q&^EYnZ>-A_S!^1$QLvm$b#Dl)!Lk9W;Z&<~?%t&Nx{P^-t9St8r8zOxC zKiHi=E|$zsYWHbt?4pKIG0(1BKfLHB{Q&@RbmTn2u_^*R+W6gN@X;!1)3J_N8kFF7Hd}nY;wD&3lXI{c#j<~`r z$!zR4XTYr`n7*Mcn%qfwbFXa0o z8ZE0KygcVUC4M2X5TJQp7^4v@R&UsPz6eeQK%t%SclV+u1$3%3h?F$R>GCvWvm;uQ z-(zO{j$!dTc531P=sZjt!uy%t^%hps;Q5_7s2(|o#R*vqnfcqncYZEaaPZLxD|u{} z(dIypawLy2j@I~b?@KvZi`Cec3u_)0DF|OHqqoN8`As@-Kz&$feM=LtGh?30u@x07 zGh!44D!_v1eE`3qCWnhw6WaX{l~$JD>L%zR^bPcJJ0Y_H=0oHN>n|op{{cE-!z?WR zvbF=T04JX~#E0!4JDOx|fd2sJ#y?swk4CT0{@=zQ&}ixF(kM&#`|5Sr-~L}A)qdIr zLaM5f^%ld}Rg>j3V-kDmE62{!>6UtrRYx+W>UKPsk-25Q{4t zAHYh=*pA>pw%jQh+pQFwlXKC z^`fARx8Msj%nC%p^a8uH1#%eGZ|?LgWrRyh*2~C+hGn%Q3v7^y z{2Y|8#idj-Ihp{=@S#f@MrRI18;drN=n7E=nJ3OL;pHY^Cz{Mll_o-MHU99SPG znM%KN0|LIhPlOYgBHSUP0D87?J!!!7$26JP!X5v5;Z5t;@bSWHDcBweGzrs-a9@Lg z+sm(VYdDc%2^sYcCa&%r1B;spKRGgyGkWp+VQKu;$hAgm4{f13Okxd=>btAzefY|e zWI6e1Y@-i-mRd0E90Eic(FEukT{mtKEP!CDLkhdr*K6f-ccD?v?mSPcu)#Y(oLz$)#!jh0RA$w=3Ya6&|v->=U z#YKU`9i7KP&Yz8xql9n~p|-*%@gB3ax!-&+v?zvu`)^{O=vu5vcT@DpTeQ3iJ(YR+ z%P6%xY^FUU@)%T}43z-_uFH>*D}q$}A>Mf-6~L)Ezr1^;(vnh_M&HclX~j>|ROwmC zF&?Zge;i0}WT0(}tM@3otc!fh+11El*+Q68@`o2p$#@qzbeRb1BaKEfBqV zYhtl^Q=TkIqIxOQkXW%I~q=y|&Ah)SGfe zEec76tMiWSEV`i8x&Et1@_r_4_ZbzwU%+H5q8t#0i|W^EpcMkhp&0d%>OgpgBsJuv zrV9G-F``VOqveSU@K-@Xp>B-6^5$P2P1>#&pF;N_(l!}$+g>cpcK{8u+!Pd+&`1rD z*KVtC!dXtuaL=)QBDJvBzCZ$Y8vvqQz%h8*Dr_ru=pklJPGdOwH>+HMTt1$XX z0)zX~@X&YEWd`3_FAY_LGu>&Tg~A%H0*u&xGL`07Csudg69}+sCH+&rzM&|Os!TiCu3Tk(o#C%unlzS^kDEhEi+jp@D=DOpF->?3#F zm(GlRJu6(sr?1?-c@evX%uA32;8uUlHO-Fo0Q=_n@ODZi2Dt|6 z)hGw~JetPdHFRb6%3%0Pk|N<>Hs z8fQ@6!3+7pj6?(9o4^CVnxChEd4sFJ*&EdvuH|+u+l)p}UEZYE>ZwDw0At$K6KavZN3R2RSAIe|U z)ayIdp}W&3G{(;oB4&q+sR$4+l&zF`gl~DCA}!~Ug~3K(2>`uA2%>il7Rtt3UCcxu zUlLvW?TGA}ebyM+l(R3OTH|c_Eblt_-dAbjvL`ZlLwRUS{JW|q358uyzxu4Rd{U6wOqH#>_sk*2?S+&FbaUlSAlf<2Mbc1e*aLZB z55a(?&1H64mddo0xsi~C(j{otiwTqZ(mSg>p5$}2;-{ebkpLahjVcC2E*-V&(|KV| zdL&Vh0W@T%bO2eFV(IRzzT_8ZA=5K$)0X#v6Ds+8KUscS*QI;J`w)MQn=TADk) z($3w5p{mQ8f!7$aalEpYqFZt_H5Iyld!n=RIR`(1seW!@#WgosqGROq6!s%Y(DZd= zy)J~7?E|-`(pX)XY7tWd@KI+WuNw!X$vcdXcvX%Xx2zp~uL%L0Y3l3ysuxoFoM(x1 zfD8K{pgJo2{z}THXTPl*a*X|e{o2;cXpD1wHQ2CT%pbB3--EPNs6)Ht)?dKS*fg= zDA?zC1#6qVv5Htx;&DDNHUZ2pv#MB?fhyK7_KdaiE`5-$?B48eqN0TW!D5s;nt6V` zTs)|;xHHPR5Rx+GM;8gDrKFPqeXV25SLG~$*WnQgGagv~!XFwu2ZCFdacw$QSf+v89-{Vfb5yw|gcg`Qu&W^RROW>h!gINX2j@&g(ywnwMRsj` zfP8}{B6Gz-v6B?8oGSv7YDQY5d;SdDeRhUHf0UYra4Y`IG;uHLBPk>UNLtwrtx3cQ ziD`SAEUEqsY5Q?dK%myD#740dxlqo`drbF)l1QsOA}-uoh=WonE(594YR?f*#3m0Q zJLy7E-(3$UMb*`diSQJ)sd`DgvWj`bMB$L^ztBJ+edba1zdN&F%Uo$B^_tmFolY{m65SH_!)fI%OvVB~K zjzAVWCrLN6N`YH6R_I>ihx5Np<31-=7UP1Z1p0_A(BE%~dx~n6ws?sRvV9L#v7n`- zJi&t#PLcuZ0l+!uWZO;10IJL0dAaN(`{<*BZPGco(JAFxuT`&P1_jTBbIJwDLO0lM zi4sGJj(L=1EV>jnYbZrpY{j_!X{k|H$QLXOv_!OzyHylACN2_m+J6!k@H@r zYp>)ac}aesxIdu=T`seUh=V-pU){`95*??=;g1dyx(L6`JQXt}%kzVJKS6GV%5_$Y zYOTt!Q5Jff`I=FV0w5BTUSFn?UDsdn@)%DKAm-g&f69DR? zT-(3ZqK?L;-=hh|-y(mP2dxb*tumae)wKzX$4=5UCB;vTx!H?@9SA+_wh_5+unPq< zO-pP7=q~X96CPDnOA&!DZ^TYT*}LFH8#aQ6vyZErQXd8hLY~W%elqyoNUiD}kxBX# z%K;qddwj(9IbLkf|50)DSNdO3Ht%cY5)oihE`M0o$>{`8#-(;~paU6j_1118I}cOm zL#Uy{F;U6`SWZb_srEYO-9g{R4BKE;tkR*-tWvmFj?zaB$C}# zdj;?-sMqj(luESNROlskg1?u_UEHgr(%MaVo3aCNA5YEGo|peGcn-H^@Wv}*N!BO) z>qFeaGJFb*mrzNUQ=rM{x3hcjnu*-ay^Fy_M#$xtDCyii)Lf)MxJz6@4SQ{OiakFq zE}W!;Hnf_?hm9olD=2f8p8=F4f*E~ycb-^_kDP!vK{htbG8!l^)`H2mylg`uJ6-#m zb0(*zmC|F~UoE3n%5ND1?V2dBp?}?vaL!@b$$H2qD9F6>Ue9Xk`-zdKc;&;#9t+^$ z?)4W^lY6wI3|n2SjX1wBW8?6F@=OIww(-C7M8f)7)$dlmU+qTR%Kd$iaBbYksdm!# zX7i1kUU8%wjJBUe^GH#ZX78fei01&jBs&3v9FKF^|XTdxNS>+DA4lCkYi@sZ1ab^Kv(OIojQ>@X@+F zeFA>qHh05C=@1$K8Qd`ZxRggB$L@U8wJ(_=wTmDT#af)*jCvzG>MU&C1Zgg2(3=*Qf`(s}4rQN#p6sW^s{pl-Pd0RkKyOhTf#FE z=WQwqXLrx;MmlCyck!bj4bALlTZx$bnJcPu`eyEyLe`=XEjGfcKxsDCQ3!qU z6=c-|6i1trddeLg!xyMxAsdlolNr4ijP(fd_%7qSF-vY1QdL2~4qGC3)kuAvtym5w zv=kx6X^eFEQA4toUYYwTZ1(ZFc7++%=_$j~oLH5!n~K3jSl!oUaOuCAJ9k#+=oIUJ zGQ4OY8~-`yG3x2!Jm~kd=yksEONKs;Np~gpYw7L2I~&;HdN=(ut^9{V4~%c#T1no0 zFw9vrpDBPtE6|OkpJDB63_c=PEw}r~BNF2oQ?IV9*mTCTV#Yf1wq!r<^V^HvAX7av zF`?`{qOvS^?f^H_a@Sp`tLBiuP@%ipRfUl6Alljhh)#N-=J*&i;m0!`1n@tATvHpN zyGhS4HzOHKHBw2eNV|RVxB}0~`f4{#zGY}vQG}pHxfqRFn4 z-VpDlLOfFv>Hmm#?J`9gd6m=MAWWSl1N7JTxk(VJ*OBHC^@{t+vpi4#QF zZ)z4j*S5*BTZKIz;Zm0gueOzCBO^v%)Y^HQd_)_$Ajz1WEWZ3|nE`#4^N~&ZUGdKP z6!q1#xeBW!o+5uz>>N)8s#V>A*%@L(bpDL8$+FC8E`(#SR!TZOTh>dIRyzRnx{PHwmt9Oh_H`|`U@PAwoII8 zk+Y$`uq9}HDSmCJw2*jq$J!wvHI&$#uEJ1r^D5lQfbgs|7CCSg(6}^J&D`eW2+GM*`2K&2Idcuo?0T@Cq7+QaLMm>kl<@A&q&%B52M= zFw>1aOjuOLE@)D5DxX2qqP3C&r2k;L(}WRi3c;&Z%-{(N0F!Jzyyi#{S=^a<$U~U4 z>AuQ^Hi5`T6Aj$S3^-8-3#&nI=b((36EsH>i2UlHgHb)pEZ9oLV1ipz=`x!N@VlCiX!=D2m$3M<^}3kT76t;f16xQzwEK)nh69S#{DN6Od{R1k(RC1nN$0hmG0 z8Uul4%pPviUV?0bkyMGW^MszQo&sVLUN|a_U{edLU7$XmF_qThkG3VR6upd0>p8Pm zLFGan38xZf^_`_0$P7}hODEHMKRmKoQ6g|Rp)f#3)&Xkp z%RzH4Yt_(@UQ>C!S$OJ=i-q`jJ!dErJ3lq1fhYfnEF^3eo5NwnlS6J$p5|aVW*o6* zqKV>#O=z3XFddZr0QXW%R6vBm=bWlfPT4DrNAG-Cj%3tgw1sq26Ojzq^SN}3!jQVS`21?_fuEYKc+JqKWr>+1w_=^Jl=d^Kp{i2I40g^Ifu8vBO_KQiS;Mf`V1V zR{X$bmz|KtG7ctL12+2;+qN+mj|CYOh7-;1)L%IlvNS|`|MZjPy4eQL!^u+s|D4Jc zAIuNry^8w}u(W0RxPzUn@2f1aQidz7(uY5FZRARh3&r!ZbJl8u%A-WMcC^q^vyAWZeY36!JUIHhPk={5dLXzc*`{A1$Sk9l@5j$3+5|wBXYYb);I46=Hy)l z$PLG}UMN1^F1x&=7o+|k0dg%H>~Rfk`#d*|aU2b#8HPNZ7%{o0czBdb2Uc;Z_WsSM zRpR-&?@7MeXWD}H)WY7C;8)?Js!LY(`0OHD0TSjr>c(;M7*E7Rs3uL5t z4;N@9f84oy!f3=&a{?}1H+n*KVL5l83WMcvk7e6VB2oBM8g8p;*&Ga@Abpq{4uTLvsO4%$7PT6qk4s;DnTp%3seYhwu4}f1ika zQQK5X=|E7z5~?c_4wveSY4A#0UGT#TSnM})ah$w~EVI52;fGnzypuOhsj*~9Jb2h& zAfhU9jfeeCeFaH;1Km!%bi!kODTzc5+0NnBl|%pY<3M|ceYaS?jOuF5 zASPmkQ7ix2iJ|&@Ng{&YQceW=R*ArO1Ww*@Io{+b|9CXqsk%bB+9P?{5rUBpI%#M& zO^}z2=7_&zmaXMZ1sAX2zFzv(V#Jjz$BOPbmpb2x4eluyUgA?=`nw%zQ=%78!*&ce ze||oxa*lEGNH7rOD;=v=Oz#=2o4KE6YvID~Sepe=EsnJak1!scoa-rL~N&9h~cHxtI z;&eUC5zmdOI)_^u@L_EtwVEP9QS;8bnyKV!IX<3hA|uPaw<5cXLS^w=v$at%{FNFs zy1EG2=vLH&kw0LJ;)3lJn#%|inh|UBXm;h_S5hBwYaYU!IC3&XmsE<( zBVr}3jg!nf0{eKdtHibe>gTi#nQvJpB`eUG+E1wC6WqAXii{kkwM$xC>k9wOrM4Vv zF18h%*Wabwg!S+Pk_5^ki%}NJ1xn{=Sn0kVx!$%a6)l+pR$laQyd4wpP={Z~$ z!$0T0jCFI3M)q(+nCLhYy$iWg-(2dWu-#ylHvqWr)q<~h%K|HDi$;}0JoW=$=_C+#+QsozIYC}YT&sV88GjwNc!$h9Z> zgJH`HS`C5#Rbh6d3H_ckvb`>He1#4(=UgLri2tE|wF&Oc4%G=inCL@Ys*X^0AJ79t zZ(?Vq!WI9l3Zm!iGHWQ7m`tJyB`fEk1O_k4lZ8IOA{NO4v@ewEAVsFZ*}L%xW)BE@ zZC{{6K087c#iyKHp?|fMj7o&5m+r>;bOUJPunSrfE^)gk!Zs;qMm$dX+mD^n91@Gc zy`C$pLu(~&x;=9_UHvPZGRzS|I!9CKCk5E3UQ#xWJ>({1k{%L9l4M^r0g+T9H31{M zk%Hn3aXocur5Mc_oU<)NOe?54R@TbZ2hsJY)Ww5c`K-^5G9$Xam;^zbwI48@nhUF&-{?_ISz)vT#c(f4?GRvDxoAcyG=KtwAu9$146oOH z!lhlPRKeHU^mLxdBIW|vRYQpDK4!+t`!95Nn(17vP_jao3JJGzT*&L#;T!qQcoE;i zH#%wSrN)ekd$B&>DyS2_k%g7l|I_+`YQyT3cKAq>=59;_W*S&j{bAx+B>qJnc@3|V zz|lvIOo>=x<_o-fvDIs{8@I<=qCPGiP+h7cBpqkVX2?t+bPbn??9C-nt0S^jHb>D3 z8r(xoy3%n=1f{)o2G#03~d2`~+Hmdnya}&F2U_gRZe%cp#}D>@h_x zP}7z{--QNw4JXSfp1+uCEoH%b-Q;7TRHroypcy&6>DfoTIQmpOo?viEqMIJDQe331JjUif8r|%5>8!Ns23GV#fsDe-8FgB&7sX za35Nyk_h&4(JeiBu40gN(c?r!WX1TJscx1{&x<{oXNd*J{z`yLZ1puWD}(sd)Jxo4 zUj3U)!^8^zMP_ez8$a7~p2unUeo@jB5pfFBgR9z?%2i*K%q!H?urfxB_pA`DN;hIt z!fA+v+UR6%?7ap2mzO*`6GqVz3gboRQQ-GKy~W+zx~M5ky))FyX`g(!FCzG z^26_|mA0J+A-_|6LrFl@gJ+AQz|x(Wc7usLz9-L%BNhVf=3m+vsKf(TiS{f~-|xm> zm3Y^4q-}{!B|rbz3YH>g%f9gxGECCs(5X#gU8P$5ymOs@cX;om9)VS`J!)IWT(wy9 z{ReuvhFWLhF zq)~p*<%Ji;@5&k-jwI)xw(J@XtilUxvVcn+_u$%?;x>> zaZ2BYeZLBbNH<|~=pzq;ze6!x$RM=PUtg^FU8Ro78$d08ObRUSW z<-2G;aGjmZ6aos$-;nP_S#P8VJf%~1ZxiCuUKa1p(ZXMFL+MI|hAsCE?on62J79yyq>(5`9|HcP%M2zgUTFwoY! z%%y+%5Aa~U6Rtc62y5mDUWH?JR?HrfxdzR0`WYzmoqxC?^vdAg%%r`1vg`ZpFumSq zuE@~OE%9xqg4ukvY4|$kRH5!bkha5yw)J&~cVos2o}EtKy8(9nz)=?&yRDJA4=uuB zYxz(x!{^(oYfgO97lmJs>?4gPWeIU%MRkM42SxkyZu7nUb7g1xA%_s z4qX2Zh~=Temn#fA&lP=@+)S)KeHoZC{2oi5UbAU2CNmUTEc(N?^NG!r^gTTifu8tc zpCHgsfD|fl4`-pTRSD29qIP+-tIK&VubTYtDHn{LVW!blYMYt#aM;fPQ({O?Z@IXh zL=Uq7D^A-)8X?u9e%E#G#7duOIi;oZI!?o>S48~J>S#5IN0@&Qns=PBVc$gWSKp7Q zlY&~gWV27uu{tr-9>@|hp5e&Wxe$*BctH9pa zj22$};m83W;$3HD`w#FAIp$~=x}bK%rBPPV_K^Aig96DylW*82PEIUZM|4;S^^eB| zUOsmqYNO#|$!r$-L1e})Sm(ja?lU+PU51L)U(|NOeCCR-GenXe4`Et4Y-F-mlzy2{ z8Ycy#FPJGFX(hy^btt9Ur|3L|+F&T8R*&I&!o3H4UDEXx99h#Lv#d+4Sh5dXp@J8xo8-j%DpUQK1G#BY)nKrb&Wf0xr|&RTOX z$$AyXX?wKMY}kFjx8({>HZjEowNhaVP6O|anBOOdh zT$Gw%AC9B2 zenAg$w`rAga62Mwwe)U7Ess&_c89H}m@kFBS0s<*kX zO`Sj1_V&!4i925)87Sp6Cf(@?n$#kx`yiYxPlr%O2YW;FCFfL>zj6|JT!X=SAU>xZaLy}}iiK{k*JUbc+Zy0*L_szfL1mH1 zdcSZc?K`Qur-avbGb0O|9FujZQzZ=dq_KpLXt{d?HddUy1N}l*VVFgU;in&{7ii>E zmm4d%_q=W#Bf=aY#63;6#-G7D^<|w5B@AP$2QkVaN@x=nEd+^HzBvq)eD)BEfpbc_c zqM}(QXEf)d|0v|E+qJ@nQPLos*}e}R2}r0!a;vV~fs1ONs?5`Pu#FKl`4q>;;nHIHJkRQgIJ$gIZeZK|6VencfY^$f3v4O^=gCUb*l6Eu@3T_ml?*NSS zs1Y`1Jzoz_pp@JJNOM$wV5g`AZ=M`p9s#dieF|d?dQ^}0qp#ntA}yHWg)WqO*AZD= z$li?#Yr=#<%)31!262W%DjU7Z4x_Cpdum#0+be2TM&wq%+BhLtwP(&z86ttk5t9z!T zOX*lMw1_xVlGRtE(RL?VBk3|_D}PU~^2nw0KR_eLva$h=IvqfOa%q8H3gl)&oX!bs zQ)yp!Fb6xt{ZG|`*rRX1UXy7}FiqDP2qoEcfSR?^8c)?*EbR7A!W-(Rh z>2gUiHlnwH=~jSQyfmldrZurtw$nayz~L*|RF`y;0lTB`TkF(>U-u)5_Tqh60~c@J z!17Wg2Ej~>OR*Yb9o_2Lz9Skmdyho<&P7UiaQ~Bh=gKzse&n^jiO%#rYh_DonJg|q z)OpRR0L6a*!3OcpJr^gVKqG^)BDv(cakjCC*;~~%9#f3LIPF^!hxE&PmMR`evQ|;@++)q83)w!AYoCzDQ4AE-U4J&djuKC`b zSR(nu%FlB%jbvXX0YmexvqdHm!H&Ne`rnx;h>N0`ZZC6KEMu zKuGoDPrL54yzn)fu-XW=W-NAAGXVg-w|tm7!|Sq?frCqt)XZp0 z%0pCr_9`eAhyXNaEk~GxBO*bc;Ape#uZUtqTL03+P-eckp3N1H9&WuSo~J-SoMp{| zsbU?z_Pb{Sg!A__dGy}H9{IG?lexLv@7@Hf46{Zu3or4t%991`E|+dcUwo2-(Y@@# zfVIi$wN-RmLaHs#(;c(jS-QXJ`n32)+H}UO$M#wz6dp5%haxI=oouI&A)y_&fbJq> zwP-z(dYzNmp>XU`g^Eh&#f;rMUN9Z4yi0;m^c{|H8zEce zzAJ4uFX6~`d;1C>@29e*7j97S)8%KR{j3VbHJ6{?zrIo%m^yFd;@Wdzo^5484u7h{ z**VR({5c$1uRh@v_8~G1l;rgqm5_|^J-6Smn|Mr_ZSDX$f+c||CPV^)O}Ktz-R_mI z7Kz*sxTe0=`y1n**!|^#V};vF;TrDw zp|v3LU^$#53I?zqW{maxNa3S2G3JTvt^AU!G^X`K3sL-Wd|ppWD-~qQQc*NfNJ1h6 zThKi0s+n2h;AL^^m%Wd4dFS34BW2>q99WJGQc9g6Br^{hZIdm{ zuuDq+kI^9h|M_VErku!Dfaaz?-02ezv? zd*>iZlEWGvE)i)6P80IQi|Q9SUIa-5Fg!9(*m_&=RWp-Fr0R~KQ<7{+E6kNYl!auJ zl-2BGe{+XcsNN^5gHdqCdQxjiic;NSvZy$=MJ-rM_zwv)Vhwn|mnTaPB!Xb8pBD*&e8EZSNg^7rpf#nmu7 z=1C5M&es!iFFIvA>?hbm4oJhB6!u(Q>5*?YPN}2SMS%@%v1ZrEP^S$zr|OMEYb}`BhR5TbaovI)f8`Y{k-I3DLHKrg5RT<>EVc zP0ebQw(6X1iKu!5!Y7V3d8@SS?;vBx#(iq5Q(JsNbU0csQ4vrtyb3_xp<{1T_C6LD=?!7lY4YrgOG^z)ZqC`!E9rCSNm9W%U zC(X$ss!E7e2K}RyUB@cfwDXU~%prN4#pmVBVBgk-IHeLN5X^|Em95@@I!34f;Nhfa z!e79=UiD^DzWq|)T=P!y0^3b%hY@m(AvpVHNiSl4XglT=oRYXVRp_<=Q*Kw87V2KN zobWhj@J}@$;3F&0XKsA)m{gXFM3Yd5vu{(;E z{qKvj?jB$_<3aBlNoac5XY`2Q^#JF{OkG8s&^}&$qFgG)4 zhKV|tuwg9X3#aBWm~v8(a)X&BdBRN5u(W~+05BYlQ^Hc>5<j!xYlB}izl4?=0ywE|S!0l;R`xJHN za`Tk4)Y>Q{Q9%Mi2>P z06sO&GFtN3O0G*=e_2dY6zFSJZa@vnUg17c4w3>DER5GgL5a6EXEqBQUa8(uUCg&8 z-WntR8@^JR%Xx7KZ#%G?a7}hHqN%-od{K43i-F=jZY=5IY&}6zmobL6oH;YFUM)Zc zweTi3sv*%K@cKZT$aS&5qP-rs8+WDV^FS>(w@XLnDY#g=s!9 zo@mEy{o?m|1$MFMjTb+~XU3yl?;oToaTj!I${U@6@>}dSB!sP{k4R=j<*lEt`jP%< zmKWkI9V_#XaoZwK$YoG+RhAuUcT(?2b~o7%7nfL4`@LH@`tkLS;5 z`*}KLfA!?O46y?j2dCtl%0vcBm;Rku8;#2Ac#kTVdSkgbc4_u%*?+6C1vqWpK&0_& z{oNZ^?Da_M5uqS=OmV9!cEdLZj=GhKdNcsMCK-9aV!-nAUkrFQ?_1ivOb89T)`Gc? z(qRePgpj%LOPI5yB{H>LSwrGK0PC%!2G4cild;BaSV{l=h`=!oav6y4l49*kbv9S; z#gas~fX8xORK6lA?vJZ_IW$4|e2)0RZx5eTHA(n$_r!X|RB+dC)Z2ELybM{da0Qz! zIKJQB#ho$sb^dkS4NqOy`(pPvZ@3+u(Uui3n#9C#BmzjfpLy||FlQRj61MF~2nKGo<-Ai=b5=FSeUsMM|DG`2^3)97I*P{j|^P!IJ? z>mlG#qe1t>@E=d(Hv(rruW)u1fEzu0R^}n|K+*519%d%OIWn#xbdl#8FPGP92J9$s zMuuU)ma|Lv*@NT=8+;UX^7qH#B3IfA-jDrC?E5_7)-tl_19s-Ws%bW#R=3(V_3yv? z{iVE+-LO|mwVtG#56y65o62b&0y9!9ubV;DJ{VKtSdIHRE_?7pdXWd0D)h)vxHpu! z{uiSjPh!qtb{I+2&0%PodM|mkYSD+bN}TfNQmJUQU~Tipp*xG$eZu7=e%)o%(L#v; zsN|hOTXPDh?T!Hb6j@nx-YS(;R|evEwP-)NoE98=mrSx}y#c1F0uk)9AN@$wfvkHY zQ5DL+?fZ6)hSc%5?%s;HgdREps1~(IdxyjrHLMDFI5+Pg#GoLhf}&)oeBO1X*7*l} z8E)P$4VU^w7n(Xy6|^z&BK&^oITQO(n(m*iI?Cj4JxmV5DL8&%l?3}_gQa>D?X#B% zFLhBoO)~)v0kJ>%zY^N8;C)OJ#cfB&4-fhN`@2^@@3ip*6H79#WFiYL*;pLPI|coG zL(84H)BOD6Y0S3>mUt$iot>Tirp(yCICN-*Og%TGT4uoVi997kW(w)$3rD5eJf`{M z%QRYBJV?9xpf7-Jrbbz3ahgG2dZhlV`}G^@Tni(20ywGJ%J;`Yu~pFY9Zm9Zby0U2 zUa-RD020u9xA(3*0I$RYq8eo<68s>@`LFu5GZAg+YQ5Lpdvdhz=I%3j_S_xKo0h4a z7JlLupioz*z9%}N$|07eS@u32-v`Ls)O{OmTW3I$F*;)~CYC5M8(-+s7c9{7AHZ*Y z++4FY#oTJeN&e2uRJ;A6IrOV$(+(Y$kY07ouMeT1c}x!92j$HbCeod(K$D+iKShVb z|I~rlDL?9koOMN#bpVxsq%<(&2q+;BW&Fl-aNq%NOtW-jlcz}$(dt=cT#s*z zW%@J9U>-Y3bD%tJsJRVZ&ksIehFc$XQ<)lI5)GK{KvP>hZG@PW1q$w}U>VX4csj8( zO)%|@fmd6xp05oL2TybzDDTPbH~)17G!m2*RE><%wt3Ln#);NwCCxP3dAOszR;~7j zbkoR)@;9hk>`%7PKXntAg9r-CB`S*G&K9-$Jb536c7=cSZOk0!`|26N-sEc)T{g|lxk%q4Ox{0s{)9?5GJCk0UJ*SO%EzcD@Rhgyl*b$be_1A+h3{%izDd@5&4|f%*4uR$P zX!%D)V88rad=%<(#G?mb%AyRN;V8(Z-G`|GUXIQcE7Zy95;SL-cmq%ZWR}N{hJU_;u;1JB%&a4!kYv2&#SG4+s-;2t48E|~{Q1Hm zRY#)j9+<*1-pobwp|>iALSYof4Dqmt8Efus@exxR^Dg<7=f*CA#eCb#0{^R3>Bu@R zKCiW?p|3v$ZB-%Uvo&QJrk*Q-!fQo8nHla|P&)S^q0a|>hfcJtVx=U62B(RQtRK|8 z>QJBnBWyAv>6wLRAQW__o74 ziz6^Zx)$t~2J%ZrXf76!GY75A2a5Ae#%a5i5{LsJk5+|HUfd1hbsvkjO{I>U1weWs zjtl5+lac+W9DMJ^BA;bk=eeFR(F!||lfj{^(p*{TB;iDJwBy>~ZbM5}@Orbj+`4dV zEEA&iKR}1bN3|8+DtjfX3E|n;Kuw!3?^1^Kj~6%p{BGP3pDopkHyh3kCsFezm&?pB zkSnt)$p~%g?8y@A;8)3Qp~S2wZ9#-GAX2uyJn{7-ex^-!ZPJ8CGpy?L%r#3-vruVD zp#kbHJdbt?+>WH6Z;P*DRK}GY%}?xz+6L@+sHIXVcxE7w!0Nj8jK2nO1A;=%#$^%x ze5-UjQtVwCadw$#4?!|Bl1VhP0g_x)>F(syDX2<5emX*Um*8AVs7EoDN+?7B=-v3fj+Fe$&M%gID?p zVaNXfI|&3mE=KTG%8%~T^1siLY7b^PKKn^3Ec$WsrKVMjXE71wN;W#y`75#B@JfIG zx|1gEnIYVrLOdpD9$QB~OhUrlzuFA@ZZ@Pr|BU{YH{o-0Qo}!%mdlb2a)U2?$UN}i z(g8_KEC4_S;7s?+H$+6}PJSXJfddB&_b&n_PuSL*+)7F^IYsXOJ3CsNUv95by62aSZTz#B1lO_g_fsm@jxNN+lw9J#T{pL*#wx%jT2z%qpnvK%(~fz=2%JG>5k+5Wu4N>5oI+%wx0Qz(k)7A&v+ zwtHOe?jAg)F~x{u$PYQ-t&n-lvidN8Mn(Kt2^Agth@WkqxZ?T0)tCe1>Uq0sD09{O zPsXO0Q%;R)jU8{ZQLm4tbR#Pa%w~?iKNP3YIY}$Jh!wfiU+oWR&;&BFl+PV4mT;s6 zrR6EFmvVZ)Wt! zBT~6Lh$>%WVOatn2Ng2<#8QMMsdJ#A{;2&Npn0XA3CS}StOnzc;0E%4SLkP3@~&kF zDoOKK&SsGtf9u#bc4H)W;@{um&#%D^ZE0)@^3M|gr}OSEf8cR&CBd`8#6QST^47h= z_>~wNiN$;kmo z#e-l8Bu7femWiS7>phwc1xAjOXADG0asPtAVWY%D>?+w;Fnfr>`)`r^uc*Eq=YHVC z^_b@i&m2UBjU!2uEE#t2({|E{AUh4D8=lS%GfJPuTE;HNY`ssbnM$Ry;$-7Z*q3ir z$OFca8E6e@XS2b@La zN=`I2=y6!^ z{-*W;!*TjTykO$Jk1yc$zHgyh#KD|M73mu8QT7CW>epbIz#EXuMTza$>Q*exDTdFX zJr1=m>*)r?SaR6$OUGSriEb9hGRX5PV@zCqyf{o1D)`_IL~!3<_VW;43DyPa4sGNZ zU}%~&p7nmkYEflbNatVbe&8b&9exhBbX$x8krxH;lopwqw}7;-ceh$y5&NA;E#-|q zMN&lyc_|0}kU4^BDTP(_ESpdwn7AU@7f6t%6! z3=@Pq^bzK)E}lI172se|cb}Gow?n*OLs1m+wbI;0oBC}FWuk`cJE0i2P*REEW%|nBSgQ4z#tY)GUz%`a-++2~e8; z>(~ye{&J|88-MbDxH|8DHsAMuhamQ*Vik$4O6F2ixNL~`{?^SQVA;{n zlTp#PtO_4Eas^TXzJ~@mM*r%!ZeMFQ2%ZRnUlhosA!= zQR%kqI%-qHU$B3Y7SV}JRQpCs%Kt5;mA)FTs(M?ow-w9kS=K%X5E-RPCw_Ct1AU%b zx1P-+zK_4e`*=mNKp;2yNKM%E0Ra_YnZ>7UOK#O@QCOJ|ET^Y_IAm?&bphIRxmFze z2c52#*BGssp5-oF4XtPEfSR!o6{#9XFE|lzJT7%*JxF7O;BOUJv5nJ|X-XT*v=t7# ztZ+*$wvoTsuY?*TV~t3FYlf;@ymc3ksjnvGCc*aSJ<}4`zktOzBIq#uT3<~%753qnRR?k&i505h`AAWz;*e7EtmXN_x zQKv=rSo!~rQ;EkDwIW%3D!x)RE2`2Dji6kYXBKhyF`aU>AsgMtk-66xb2X4cGSS-B z)3fJI8B#p>IU92*rj%FzT>%>=Y<|o5a7C)^FZJg9oDE*) z%FR|?`?=h!J+gfI%+2_v?^^m*uxIIab?RRY&-p=ogrSdx3LyQVO{}Zumw5A0Dk2S& zylO-qVPRpb?bD=9S9BUKy((tP7vF4U8-F`C88gUTRbG=vgZZHpLonT+pq%f`&6_k2sw`!L1WgT( zRmO4WRIZQ>ZX89OmpyPl3QqT(+MDscXKYIPz@2#TiDAkI`X#JE+-VQ?%ZK#fPf{!{ zZia|Ku}7y!h|jwaFOeTV&`Ty_FyZ-W;i%mVIJB{mX(bkld;^`)Xk=p9{KvDSMk98S~igmnuQGP zGzUj)j-ZpHQO(}?gk8b_zMR%UW5@eha5YZ>7jK}b*Sm&!?Cy(>H5cSLa215u{# z@V&N9$jwJVkle#L?)y=~o?ZHTkH_ej7;@U%m}wmXx&Hue3NIUy-7~(MNN;i0PTLF7 zJy)(v6=%Aezvu4BQ0sfp%suGEq*PdN-i}O4^>trm3~Ph#23>O$**u#M+EeI{7!B+juODg}vFMA9k($my|45Obwl>8uA1A?ea z4mtK!fWa1+`+6~BvgFbsUYa*SN-tVRZHQ-VHf_ErM7Vqpti(O|f`c`;y)WVj-uM%&S*#fmV-BrZJzbLmjLW#ROkRlg~K7uWh}9jFgEd z1l~ZmV`}fZO2Ay)8(?Cn zNyhQ#vnl%}zO>7mn8yV~U0BPI3#;Y(0?o9LyiKy$q`y9bV)+QflGtorFV$Gy0|d-y zP&;GNCpE6$3hjvd)$h>MM3kkqW+!opc8u9%9w+i@Xn>VhxNmas@w6+263(z0ree1IM zJE<2iq9Kcn+?mvhpKGXhOKov2Wu=a!eDi%qNm{Tfu%Y+ERh4V}@c6=qsX&B7i$DJ? zMOSL4ckFrj@bS?tf=I?@ zvB=~dVuB;f#NyB=;u~KG;?j&AR$9!fov}tR%3Z-Ut<@RWkyQKwytKNxT9xp64TbZ`+dG2Z#Y-}5@MuIbrrK))z3g2L3t$$5 z!lGsSsi_0N8$@M5U?4AEoc0Q}n6{FL9hL)H9{1}Fib}J9<|)`}lPs|DmR`ODI-jEz8 zv)hkUH_PHU?(fuki-st!=;htdykLtN7zt~fUoZMpG2Z<+GOSTEqA9?hY0Wb;_7u1E zWHlW20yg08-kLeDrybw8+ zNs74lcJeG8p9qfVMC|c_Z){q1x}3YCO4Pl%ax*wB-3Y_lP?ooXOC)P;HR);)(Cchr z>ksfkyayf)UK&a_6#%PsF|nI&Ts@-N^m&z>wkg~a(1po%v>~de=qOVDp^Pny00T@G zm~-Tt%e(!ANqOPJ5s6We5o+w~LF`&ub#_0oQ-rNPUXuFIoAqqlwtd0A@Ib#xI!zWn zPt*+Gjyk1cyUgFpQ3-K|_|tHy`KK{opIqJ~u05R3>r3gRW z0bz&QusIg~_6K!R*aOdbfun3tT(n#I0{Pgx1>{Q$fg!Keh#Zmm0v!@#wE(kJ!mXrJ z{h}B4BulOx7Vv6_lTC#;7K*%8pi%nm=*H!_3565ZGUdkOsTt5B+4L0%>KI$#`37N8 z`oKk-O(@{xc`RC4Mg5G$mP|p5!H*axW*~`)hgST1oo*M`lVsuEfHGp8zN$aBb?UMy z;oQhDp>+0SH|N=kJ<1-&Ym^mh&m11siK1zM>k0S@4|F*xe6CU5^-zf@6S*Rhuxi9) zQAT3NueiV>CA|xeN=Y{Uq6C;u>H=38(KDh@4xjR?F=>GtsQZDCF zLAo%R;}Bl~R3AwuNJP@ulJT`R^>D<5&ecf65g`?ZA2W_s?EqdFK0-LL1g6S#YuGAl zJ%Lrcznf7r9x}zkgnH&c^VAA8aSK$=fX6#v1dqOMP|0iPX!m#p6YhG`Xvh0}xF&a>0#$GuH-q9irdQpn?tGrHsQ{ z@B3EP^OV4IBTAf;^g1M>4^p>9&PU;@Lc$7sR~=E%CGr zd=Pn=9Wt-X|~u8nNE;X#$w(e(9}CiP25Av0|xEF7qRLB!lm{ zh}V`Y(n3QazyT|5H{_eE?LH0()V|dfn=hq$weNKq%XI}h=OfaPX;OnPDZh-fnc_=! z;izBFu}nFu7B84Z$Ybfu1fQLyh@ARO6@CBR>_&pINL}vnV1GDo1NSmH5HJ1OJx3oRf$zvkPylF=D-` zN9x6noXh_Q7^nlDbJ9SJ&Hn~thRF$Lxpigdgts9+<}azpyQ#cXD1R-@--i54P}8B8 zXM*4sZ7}~Yz!&>I+_IcuGkL*}G={)3|5wH9S6k|{82GLq-AbMO$j}w6&kufxsz*EP zk`>!ygY*OA8XyTUH{Q>jK9ii@o@8z_*oYquPwET!^X#(JAvW68R80vCq+g$28`(#t zgbx_iFNSKeRUN2-P_jJm6?qJYRaxx^5eaK^z^Ub|dYPu8+$jRLh|7=6BwTV{%pCxAJ0w{h}nMPQKu=6&2w(3|$Xr7vgg*9!6CLO-u#A1`DU>oTFg)Afe ziPlWy)Pc^;xx4@v(x<$taIAvuUEGe7iziOBY_vXkm8Lr)lrKE45J}2_7A0Dbs}r{r zyvxB^5kMUQnJSAbs{&2LdnZL_AkPITv5DjVRl+}VaQeA9(P4)Il#T2Zfjdx4B9qWW z-L3qJ8STg>IN1tDdm$x_G8aqiIVEIKqNu62ega%yO7Ohj(KjKICy(gy*FOPj^s?-q z&ebR*9m&bDrRZ^tnQa9?Qn&J_hR$_xKY-+X-5R_{v{O=at<%^~zr?7sQNB*TKBYtr zlRa;ZH5OuZFnHh@WlX8bhR}od3%BS+-2-?plZ3Gb>NX=u@&-vQ9Q^mA*$ub80acg4 zjLU1ronxehg)%taSt+Q;4`=fA37-}bG*bh9H3Anf4$#A@uX=kt97yuFGI@Ik$HDhl z;ts(L!%495#fqS$|8N~sWZ()cb;;as-S$q5s2{2N6JJfvv+Kunh*N1MLkOPQomrPr zGT)jGadk*pvZsUCB~e$^K4&aclX^H55A$FRK+w$bre< zvPM>(h2bAbQFbXKGC5yL&^WRM%I+YaE@?emJ)WO$esWR0RTyPAgEsLv(OvqUpR6YSpQ1*J+9?*jf~9K`%|hkN}jJMhC_L1o~=G zeh(q$JQ5g+jtLWwvN=0-B%nt+_yG!_IXw^b|(%eEnlv`gi@5O>m zb%iRBAfDVgWeA$4R*Vhukx$DgH*|&CmGbD*xCv#55hv_O zAvi~1w8<$bCKkjEoS0Vf&8ZHN>C)5KsAuK*G$t|JVEwg!DD5H_EVqPj3-NE03@4+Q z1a+Q$)1fzmq1@w=Q+^`hW>WOTr^Ha!80L*50Ka{ivG=p8M0ro*o zc)v3sCBoFVFO<{(jyMN|q+Hy*HuzGfxd^OuZ{=61>sd%q$*jZ(oBiIZg=%e)U2UK|2IIkA}cQM1!G$KPbh`br%bwDwf<%CEyTS% zw%ie!1lWsn1_CmHI`6J(ioBb6LdUfIu!nOjKy+2e-NRwGC7yqFVam!lI|WjXp>!b? z9Y$#Qj{eVg3nT1c3O{nM#B_?#t|;be$MB!Y1K9?xfhiF3nkT{H>*WwhHE*)I4H|ZJ zEvoW}gccMS=V(y}9_U*H&t{0&)Qi~*X-={hJ?o?txA%JnW}v8Ft2HNl_7wvJOsDCi zu{PZDx^NO+1&S)fy(VfVDRmL$- z^d@ontroWR-NUXkF68e7HU0rfDH9+A%#pu^`>31f%VJg|_#a?i04;V?zor|{yB&ZL z{oNIOUM#SMxE+n$%{8!E7b+T66t0Z;&%I-A78oZICl1JYw?hN#6~f+I2_%=CxgnLe zlAtNyCQzUA`n+AR!-W^g$;2h}XnGonX+*jAU=UVP?+oIs`hCdvB)9w@PErDpu1ZG+ z;ik3`qMF9&it12Ok$|dU>U#At2YQQ1>GP^OFrC4?Oy!qS7a1Nu09(+a(h!lr zE+#p(&x@*Xvpo;5G6%^=8M5M4i;`+0%Cm&gVFQ)noYM=d8IuFTlgd2fzKY8pO6N}HoT*bvsCFe_P-fB2^s0w|Q(I3>h8sN( z)5s=cozo4vJlC%oRJhDoNEkMM(2MS0>oY4NbA**<#l##(7g!ls{ojXSjf^QKH@#fD zJKlzdezhMV6~+vz<09Eu2p`^^J<>QNH&wFfwQYzN;#*nVbL2UB%bSO zN=IjThhnnWJ1O^6_e$4J+rr(Vb!Hnw{0D*;!WI&7) zkUOgRnSUvy2Ih>h!*se%E_Zb&W1&MLVt@ychWXDFRnB!Ghld*%V%?uH4ijakA( zV4--bx~qkF^WGx0Tj{;b+PM2pZ$0hkbC0dgla@O5K1x_=nauxvIBZa%`{?Ms<8T~O zLXeKq>e#L89ruubyJkGt!&9ct>}sYdo|;9p!{Dw_3dEvJm$_&Yp4;o`+fg5NB^5|k z>s>%E&;Y!NjyRiz;jNSdLTmmtNV_7=!Cvmj4`_C;#l->aNSZw2+D zA7;Tyyz6Z~u_9q!K&RK}H2J^VaqZNc0z<~DLb|>RH&<)H z2<=MTs{%`|E7D@47lJ&lfaB9M13v8f4tc^OM+`ZTw@}3jAugLzbyXOn5n4`K2;h_= zOr;@^ruKba^gN7V_cf`>dXY&{HUTuTFuab(qUwST ze8|8gWcue0B4AZm@mqM_yCYa#HIDm)Uk7I$<>!jt+q25sm7vJRXJG&tO?g_v8@hqw0q)P5P<>^zzGIw^zxG zs_v71uoavOr_o0o4_^603N94*>#q*6b8W-C=t3)?OmF4BP+JRP`B^jWZbv}jTft7j zN;(e!6fge%P_qp!(nn@ljGalz@?#6$WgC5e0Q->WfKd8jT?`9}rjcP@7bG`>7 zm6mms5(xjxxB~v}q~hwgJ^wuJkC9D3u^a3l#+SsZ4EL3p62mr&(X)GIV8RhH#G64} zC*PUlH^4IUsJ4_111Y@ZU(3@NP!z2~MboWrvHk@J?0g=@@_emR^M~Yzxf{J!ok!Q& zl7fUCbl~w{RFiC-JrF2csMCLHbQ8+Xsz!lvdS4M%9@0@(wy#N18M!rP+S3h@uDNfA zpKoQ4+5^ult_NQa`!MW=Fd_cOhiUWq9#?7E@z%n%J=7l~ICB#mS&H9k(bJ}n6sV0m%1pFEZC}{hk!J;cqP0o-RvODP0qr(l2O5-4#46G#R_Y- zpeQ<=Z^tek!LCbaO75{OG~g9Q5QoLvhBFGVrwt|;Xj?LN@uJ;h6Xm> zEvbvPhQN!1Z+Do{j71c%5iCis)|?bDU!|Co5dIHDqDUHoqFQdG) zkt)VdRQYq|K;Br29%IWLVd7*aA@J;)ZqO2++iCP=jbP_9>h{B)&pX{`T0=KegGEDk zzesSrNl{@#Jk*jpm0sa$oU6q!P4EmD9QbZ#XH;^{?w9kkGKA1`6*p_NGy!89!uyAg z^N^tn4e|T11)ul@5!I_Ux4#=D*r^u7z%00TzlnJfwvNagWG{Pjk>+O zmX*KMHAJ7-k0^0!%GP@4Mkx0vE&0Sx6%#K(_%E%8~rkd1h5>D1BeigGmd__Nbi4 zXMPe8&xKbeac81e2)6F^vKzwAsl-EB;`LBk9{hVpZkq&Q4 zcGAAaQcPHrqGsLC_-!*?)+Fn60^_iq(_7Pr)~pR9=!v7*lk}9Lw{rKPSkd0D!0;O@ z*4FPc^y1r+#q;flWQbSkQ0EC#{lOO5heNd*w-&1#>j2`?A;<744Jg(ShvHZv`pj9LR%9;iuJ|MVX@*AG^% z=w-edB`kdjVXzG!!ykwYRV{65Q~XoiHjxbtio+~3xuYBGA)4INmW$m434@t*vBkJA zdKn6;_@iOh@d7OJ)qdf{HlZ5-*`*X|5s zs#U439P1X-eHO^ch4wJbt1v&;3RZk#Ld@d!TbmbiYkVk$6z zlp2|R03J^#xk^()&4|CGgp-&lJ9yUv>WPy>B#L?@Cjb1xrdswz6^+m% zRoV90gI7MyJ_-}Xlr;Mg5s!&Y-!;8|H8jefMm#1E6o(aC&sJ1J%mVsXRY2x_#!Lgg zNOD8$`|a0fv3>aQ9&NO@8b$VjJ~00b_2N=})R|T$ja<(B!R0aLDb;7*d#R4=!0n4{ zuiocbQ0Z^p?tAp}3!w}!f@yu#y7`ysX#_Da2>@eK;O)O#wjeEWk6{cIiFESa7=|6t z&CmP-lObAj0RMtjI5LUM6uD3EG#6b~SgY?+7?7BIB>fbl*Q@n<5cgXMhW z@vY5_G;^t1mltxNNMoWS!s18tt5ljr4VGO!s566x zGIqMeMnzFWQ*!QUDw51gzA=J1J6#6H4OhDnQtvqOlG$ZJZ70S3qYJTs{J%;~cfzi$ z?+(r!n%4C`oAE6eW?VYgEpVjdsTA4!%NYmYf*{W2{EXeC{euAEEqXu)oPe-3$M>jG%M{uF-aRMv< zhns{0JB*y@O3mX7n*G?7#wB1%@(oS#+K68CxQG15F~8YBZ?fwe4s#YWk3&?JwA5If zAEicIyeb`wN*QmROltu&g;5t8KsN-^;N~zi8KIY~I9BZh(FO>lw8+?Tl+)XTfEp3q zohG?-jaffv67jf}sJLVGmS%VTP^|EDpi1IEgp#L7Dz~JOxAcCcX}Zz$GyLVmgJz0S zFcH^~A>R8qdMndTsY1)hmhPKXQQhz=QGH)|C(d?pPOa90QSU<@P! z#~0SOzcFgnzYkmm@Ra9sl33f}WCiglDpi~yxr}&sc6#p>;^Kz{;qV`Qx^F#A++S!b zS{8zV@E3xYH1`{USoa+h@u7iKSYh`AbA5wf?0Fh!7iA>w(?&mYzBldB>Ds(osL>cZ z1bs2`$)5`gpebsl9X+;I_h&ku!`D!W>{aU-PR(I?8?(R=EYE$Hc=d#$pv{OU@TaOP z?w%x3&-fRGIfb7jSQik{LB)zWP+XuBLP-s0Ow8Si4skUk;X;^UFnbra9$pV*`fBG> z?DV&w+<#LZ@Qf*=R>uMg#{26Ix$>k+X2$J3|Me3j#|x ztmq~IAVDc5k>cBxET>Tr{^dfIw!r83unYMEw|g`kxJd=i67;25g!u1li)qq zC!OFqzQC@)$uZ@|{9~fT>mulkra)!P>pVyW96d%BTf@ibFpmM`axdE^-WVp&q+~vF z!Hcmgv9LUyi$8Ch`87}~XJgF47ikROI62M1b!mS5-y!&T_%9HLRGan!WQNP0yPd!#egxVyrF zKVkY$GrggAlcAHv;?+nnr?i{mB#I}P{ z$rn&GN-)GbmO3kYNApI$P8(m1u{AXpR{U|dU6A!nI*){tFIX{*m2qE*)sm|es68?9 z_K@_-x)n22E1@`gyDZD9obx~8ju#x>dR;o zZn{`~3M^MFhUR@RctCT+#zz*+J|3PaIKj;{5d{D8?}M>^M=BNcjCmg1L4|H2J*MkzQ0!PZWG85X>-`bDXdXf8bS# zRsx+HH({A;X8OL$qJU6qGZsAB4JJ(VQuu<7gq>D4B=__w-$464D@Inr1_vL(Nos7Q)SMnY-rRx8+= zBGi-6WjKio1vY;Dr#5ZrS07^@xIPCTnO><){o$tngUKVQU8+Gm^I)XfG|C>yhab97 z+MXV2Xe{28oT!g{p^SK|;p3De!ran>N0u4Ki4?tjIy&4@N{m6Bz{mAKw0Epz7{cC<&K_-1tNGumqZ=ej8RD}2?;L0*-Sp12XUy5 z@(W=`;N+*8s>G)Hy%AVCzoX?ihp9t-SGmAz7=Ib-|B^fvps{#NCRnYI-gA>MmK5lL zd*bD{BQ9cFPMa#awSP5~9QKqZn((Cad}H z45b^QY#9Z`QF}go)W^ zXZI;oO2m=D`p4hhxSiKU+n>Qb)%H>R*MqyFh^7fe-mxv}dO&i!O)j?A^5UEF`m0-l z!&j>z1ipj!ikq!cKJKNj+DsqSZ4!zTIzl~1%0PDUOpQ7km=gD8^@NI_Yy4j)$s*b6 zzl8%Yi-=sS{;@gc*gATzrNMnNEpAnQpW!rJJB4~~ME^8*YT5GlT<)7ows+o^k24L$ z64yflz(09aLDrHcM=GH)WQDkTm{K4Y+)?Jb3XVdss_} zJ7Oz3VsM^OOGWs~=*Ih|A!)yQeY(4@^5@#lYW?YZ;cJb*E3a9vYC z^9VHV>7Dsq)uZxQ`5yYi=P-8M!L;IFGrr`#`=^Mb^mV+&s&M&Z?%3BguYXNbw72CP z{|%sj7rUSQb!-B2R`u4TREPdjFZ6==;HV4p(^&30ZA`vTnF-`rkLRy>_>t|6>|)uyf1J|$>QWtHtdVW;j@?`EP* z!;vVB2ak6W$xy^0{~ELBut3jm7IbFGN+8P=j<=!{=$Mrjn2y+ds*YVwT7nc!;(w!c zhtT(e6d(n^gb_{zkyO@qfbNw^S^!KR?2HpQQf%3M%c@>nAUyo|ydI$5_|GWy8$pa33s(x-9_30C%{b@V`zhhVsRz4$2SF3PO zyS)^Gxkl4I(s#Cm5$O)HHO})Ou`QW3^J2d9#jAF~$TS8#*}SWBTAtm;8A03Xx9c{{ ztTb_c(`0~cy^|P2!>g;sa2Ux|HR7(?I$u}$%u_L)87>S~vOjPq(J`R&COLbr)AQ-J z7QelWo}6mx3YYp6Ae_F$Q0^dZjXwgh#VR9!=Imx`>F6(uHecI0ZYaoU`u>(X5ZRMU z$faLP3L$vUQ{RC!RJ*osjk9B6^54k~-DUa+NB!#+XZa<=N#I8UeYYDL;&Fe%d0p&v z54&zl{0@o<`TKRT(NH^qSH!b7Q9_yV>I!%5|JD*J`YC4Wkg*vdG+^XrRbvi0NqA!y6{*xMx2MB_W3WhfY zC{jB7RROFXnMv$c<68I6%1%r1jQU;{TJbO7Bb>Eyt7L53+R_u&6g$G))`FRzbo5$= z<8Z)~W*S+S=^LDO#tsYQ-WGPn#C}DI-eCmsv_MU<)|^rejojW!*{V1q^N~NgRT=Q+ z9?oV@etSnrQ|?}te^tUMqf~vv?}Hmf>q#8$?u-(~ke{|yt@j8gSDZAxm(Ro|k$o71ydRcmKq@!#2PXEYRBnFme_iT!5PjU|SPqXnPb z7Hgk|zuEa_kl8La?*u|EY6Fp}VT^ZKf1!Rfq)%sNSC|Q}bxm4-4Tlmo;!|>%E(7Ba zf=}}75y6ZTZqzfy#0}K!P5eDcG8Jjo6-PnQ%LnB$l@8IqvC`hKa1-+csW(&GNnk-C zAJ!>cDUwE=w!L3;#~y!*)Sd8?J10=do*nPvaFzuD{AxtGb|uPsw`NTzkk+^WyVfWL zpU;?F$7!cl<+lO7C~cXb83=okCYb~yIjMjYee1IZzvK~WJYaj78gY^b1(sEM?0FpV z%pkow{g~3#(PEhP>n-`CNrRY8VqwvqzHj^xVf54r>QtqdH1d+=oe?;P8^I-s zzj0NhiEpKUGG9jcr2WF!C>|i3Y0xe+2vC;Su!R$z8T(hpw6~F7GC-6X&mFvU27Gs&OU)uJrgle?=(Nx#oV-n0W)_e%vk?3&-}?;7{$B z(+rlk+4`{Z5n%j@wj=Yj&}w}EA#*|ZMHOm~n+@_spPffcZT?SO6uG^)L5Zhf&C*;=e5klCfs0cTGn3g_8t^Zv{yB`Z?gK4mMWLE zVogRu<3DB|EhP5&pAWTu!bA+-D6TXqa(_^Xp(>QZ=jEFz{9y$x*b&8^PT`xumzpZP z5zz*iek1EgOAWjy5+@1*O}!Qx;duC8a0~;I;R-mvcuQj0+MH88`{l}VaNeuo45hzz z0rwvAG=S|=@)!&?gf~5OSdi0#HLxA;ALUQf>{3u>Fw=-u?Lq7#0d?S?P)v8~%s5Ov z?NzMeZbTg4N4B9@{-^3ppi_CZb+P>h0sVpZO?WwCrk@}z?td#<7@AM$K<$OE5Wg02+J$b|`R@BHh!#0{flsI{fCyK*bLuiaIl@-Koy`7QiiCEYU znl?Qiu2>(v!QN^?A-Xh_umPsn{)0KPK!;zMc??H{!q!Qg)VyDyxV~2|Alb_&DW#m2 z=TuKs><$L82{`YKW2^;x=!$B}*D@?#h(dZVG>4S4S}m(ih}Y3L;!y*x)g965z<=eJ z!(hh3+gq789O|3tzeuyGj{t9&DvvWMP-X^;VO~4bM1K&{u05f6Jy#E}GQ^Rd!Z73; zwzvbK2uwJ5P5T9!_gji4mqKm@Awwka?Lb!jaXw@nI1`LSSF+7eI04nLI`A9dfWsWNY!Z1Z9xZpjS zP?HR1Wc%ghzI!Z4kC_PL0bT80XDZJ~{Qbm?MVuL><6n|qj1Xe!m-SQu@~6bq6%+p| znO5j8IN?{ z+4C*y`vqW%HMquev~x5oR3eFf=6GWeKxiv826gf#vFtyD;aye$-lZxdXFq!9)uEh> zNDMR>MGMOuE)?`8F@d-jrQb;x%%1JKTgzhmS8z+io}>hkV_hhZA3K8Z?`%V{=%M4H z_>!RYj!&lZ8FaEBu-~de{5)C74N$INooJLsXkUj5oY6VSf z!y;_@no&wQ(Uqa}m61Z_T>!JN;PX33L=LXqGX^ZSh^i{)wn9}5A>`-gV6JcJYwLK8 zoG`SCvcJacR#I?Am}45NXSZfvXXQ2`w>cVF*RNHb#Z1hU&(cVVNYiBl!%vmWa_u0o^luP ztf!i!6XK8cw_8o;9X@$oSsXPKECTDVF&2&@WQr|dlr1v`07OqzGL|RZ>=S;Q03zfQ zNqEdG`23|#6`uJqYUCbH75YQE*Ar(4eLVR3x!H(ecOeOc7Q)FMt8fi-Ah0^Yqobw&g38B=T4 zj7v#z)VXpz^6fubH!f>SL-5|#5^c_V@xJFCuxS=^2eFq(8b+G=1>SP^6Wb?DtsHtB zjy*H^SoqxMo%i9wZQVUf`}@U&uP^9@YPnXx$JsG?R@r~ugF}^zk9zu|UYh=VpO+|| zy930jA$!e_&yHq{NB+RSJP-%$ute;*#k=vk%XCf%>N*Jv4XhOn9FGl3A-{Ai)LI1zWAiD%u8^PX;;X)2HyTW!r}_SIJv>hqII*hc=sz+t`r zX;2>Z#6ky~lXn~zMuRTb)`f8J@KTE*q_;lF%;YV4VShcr?y(`%zFwFj;@ zC^PP2w46RWp?@ud*BJZ(007V$0CYuPwdr8q%N&*(Wxs0>4@4Bxxl`m3Hu&bv5Q zPm~uY6A6;kK}zU zI6z)yfqX6Jx==m&0A~{p#X!QsZ11J#9C`2DZ^|d3IyD_;nzu9{k3igUmKibP^&cd9 z{gn1xei5Nqr|@?DhAgBs%8GFP(Ro`rHeKU*3X3DmH%8a_@9{(pY4y$2CHM=a3}quD z7vQd0B%7pHDF$tyaq&4k%2Si7*j2BdS>_P$Ja#37!h#dM>J@!v9A#o$C*mALX5xlW zoW$Y-*gHv)ot1*v@O#2!v>vA25XOE;8Y{s}em|$4>bpS*RbetJ;i9{1p}1D~e6JKA zK|X$PYW5GHnXzbCGe)K;HS>M};{xkWV7YF*e&t}pzJy^#hlP||@;Vz+#Ao3fZRkGN zz+<7D?Y5*0E~CGm{_s0n*7%34x370YVOP2q>1jmjPnnB|=J!e%oXpop_E+c6DI3Bc z-@4bd`3TQKt1DP3S#Z-BMS*qTN4wpnA0a=HAT<7c_maoivZ161Y_QYAJf$c`;X&uIZmft>wr_=w?MqdaoW{XE`A(Wa_W|dysrj`HRgdyu zioPiMo}+TArSmv(=m_q+d(`!++Mtv%a-!>aAWr54w^ZQP4YmGmV@tc(y_ae5$NlQt@9LCMpc4C6x^=P3hGb7~j2cW*KOVK7mV+8s|Z0V4M-`d@9qN>Lq|jns__YF^yXs5r%2aV%2IJVc3sUOam^GR=ywgM!l6pD= zej=Li1IY*ox16z0Cyl*Hj#GPG*&)0~+hGLJqUpXBL7D%LtGDiI>-(a;gS$%;91(A1&1O5f=h9S;_hyR z7S~d|(3bC$d+%Sr=LO`9Jx=ys50%9oO+-xO}b zzKV=Si5!oiZv;(&rF?Do0~2DSe0K`CCeJW>?pp z(?z9>%4#8Fw+J$=j+iTd z;&3fXv}U49+{!inMYAerz0x{qgIC$`A_fWC{a&k2=QW1Hit8zVrNW!mH;guzl0(8h z8s6KyWAUQ~f#J93g~Rw4&=m!3myS&7nPF%q|b=nOHqmWSaQGRF-1 z%RMaFXkrC3WV^?Js9D4&S#Nl=)oVb^Z}68j6x(RVQV zVbNi)wZBop^P9bf$-@IO^zB2~N>^GSp^-4z24mo#pi&9=z+_;7n>~Ynd3$HdTlzpN zmN-&ictpU3F~tzQld1D%>sFZ21yAvKRf6@&Qk(vsF>9N1vB4Or_MZLu%DExJ6j!NC z1Jewh>liGlT0SOkrbX=cW*w}A{uOSw^C7xsY>b~M;`yr?7E$ai+xlE%XIcc)Y(Bu^WSY)| z4tIHnz&Q;?5cYdp*JE#b>S2Q~MqtU0aaL=nHjQ`q8n`wb|6o$2OJuz6fcbcln_FGR zO>ex(i*aSyqGvTEMsOcr!mRrE!{_T&o)QL1DME&y^$$K|dNkCNwPXNgW?XSKfcxB- z-_cj1q96y~6dCzlG+nMpg3?XXSYu@MzLt!)v!kde?^VbwcgiR^WLphb-pC3nPR{tG zhS7a=#b!~B%Hfq;MHSRfGxCFvon9Kasszi?@RhR z5>+4GlA}H_9+wCv5A~6>{&gmj_G(N-Q%^zIdvUz=?ONJlhz4}*-ID=!yjnKJjnEC= zHKFaA)v^6UHnz|6Diud+qKdgnB?(72K^ft5rDT}Gc(LMpyT3~U$`~F7@m`T!clU)K z*+neeO6rB;w$~5C5Jbcy=3}oa zCK50)75-Uq1|47!jhgyK2(E|KPOv~3UZebFjuizsoi0gdUcBWIVi zY1xuI2&12anQ~__FeKopYpGpixq}OyCe7-uTU>V?xkz~QPl5cW+qa=62*^#304J+2 zTY7pgq4bhX+K24uSFG5lX9f!$g3AS`n2_lr0wyDXR}RXKqR1EWm% zcI{qJg}s9<@?=@PysD<|<1a_w$?Kg6p+{K){zoVs zh(>R*6z6DsAcUTqZE`Q6T6gR+2;Pcf+kKVgHr@woX;Gc$EdF@$$%<|6A8btrJ?-TF z|NE8wY~OLn8A<-8?p~_n?kHw9@;+#O-cOLK>jT3|T$I2j6ERCFdG^KD)gjva*CA`a zI$;@^2tQ~icq+N|BihRCFkU$r7Y zx1{NfLKS9m8Fa(f?9v?Ri!WQeT=e=y)$%y$dC~+lu}i$B*Fd_cD!gjz6T=&dVjeC` z;R44R30AuP4+q5Wv<{*vSu2=m-!&rdn4C$R@4A{&4%T9aq3e~uMgkq23mtk_qR}q8 zTuKG|C+l?2`jcfmk3LhS8{Be!W_L>z?>MJearA~-keqV{-JIZo<-MZ>O^j_z;*p|}o znegOz`5mJ&nXl_L?`uZ^k;x(Pq=OW3>wYx)Vy>lc+KtKF^|ghTMmI$EzX>03#PnAQ z;$e9wdy!cLciNLnAIgxJv?H?VPb7$afMNrX<83UQ#;@9cQYc4kasmDrc^`EcADw!Z zB8-e2l&b{X;F-6M|(>(bLI-o3~Kyv+5?Wxm`hdZ}cDW3Xo@j^|Ueda>yt z2E1s7#th8GHH%gL2bjN0n19w}#gVI(y~14WhgQCcx*vr|qM@Q$p1esEKb$8xG@P#f ztR(&iFm6p&WZSXzPJi^Z+SuLL0{`+H^z8rbep7FzL!JZl$Fo;3p^;#WYdubU_(H(` zquWlzEdI=Q*5OpS)3r);s-tC~+eT3AGJ|u_>|hDh*Rl*TeK~&P=_$f0tQ<4Jw;g9r(xgPRm9S;FH^8~MBo z%(ncU2rMCgKd35YL<2FWg|U9{ItVMgOj{ttU}Jj|HWZ-!+D`_!5n*x6a^MpL~Gq6J>$*PUOd}T{>P%l1v)_wnx=$UDMy|Q@>7S6l*_ zXWT`+{phZojnVfmQ5mpxVDb}jXb4FdrFX1EU7IjhNj55Lgl2__4i-XWFiv$5SiGLe zN@>)sN*{Ox`+F3=dil;LVUv7OD#Owx4v94CY9)O1V=^#tcy(DMYxDz_Vu5t;BzDii z-_d)GMe#xqUm3op@-SPJp_e-nA4U%dC{}Ttv_N%~+GNNsN}TT{E-}4y#QrHwNBU8> zeTfzKWeAQ;l>Zg%>SD( z@g>u433l?V3TFADMo(yY)o=$JL9sp;vB7PnNW-b%F>GalIU`IEEe_*V+Dai+ySBKh zz6L=846z8qZkRa#<_FXBE82&D#HwI)rs}s1GAJmwlN)u$zqe6)>$D6aknH$Z@mXr@ zEHKv?Ic^^%#T4hIMh&z!(bwS4Otv#UR_x+pA{KTSK{0#mg#Xznzo>nP{^9=c6WNHh zm{<9UO4X@Z1@`u(xbi*7ikdaCS*X89x^6^j zy&F_mpJq~tPhSimYSF^I60dHHHIE@k)!`feJuD23^X6_sxSCQ&-Q4^kw2Ku^owiQ3 zfK?Kvw&}_Bhhyuc=j|?<(Xv!W{;9lD-TX&UB>Z}H*MSD}U|m^2bzS|Utna>xHX^VS ztlSQMY6zR(RQ3*WOvkJ^shUf`RU=-h0KwqpIE-P}8ejqz{CM2_S}A>shOD{i3hFcS zu{-3O4<~9T>Ex@j&@!@h0)+-Az?_gNRMJSO{JhDO4MT(SkG(ooGDN6XU+W- zt_Nug%MG*Y*}Kz{2(I=-0|LM*X+&Oz`1folg|=jyML%X&)GW(^4=H%DJNO`6tX00y zgfS40<0D^t&@;Us3Ku_JCVO=_4ivoPN6*nYjZMyi^ zCE;fyLofVhcxuUO5oi;Zz_l{8CLdD*ag1XbioR1kGYTpZk6|*I_kE&qS^zsIr)i19 zzlDDLqo?C9gEGZ3%ILGXSP)^(4r2U}*J8IJtBf`KjaPjNNLeSR61 ztos~Qe}AlIcJ$EAj$sS5zTc?9;OOwCbAguE+fFPVMA28ra<^?x9-dbDh{h1KHY-pzNW00~3gI;?H%%3uJLodXsIgw0E9h5U zn$nv{rLNu~1r+dk-{5)g1(fnNQI_mJ$?O4$)C{QNM<@XhXd-|^{D=p3Pge`_U8k;*z2N3B zj9}VW)U+@)i&53(ncanP&N+MFB8!LBAOG^U6jg1g9_oAI4NV4DhJnGDgQbM_=OtUo zxJiq!kBelly5TQCERcOnRRVC0g0%4C+=@6@b~tRFThk0gCP=?n(y4bfSPr9t%Lc=X zd7~^IlNnh;)vKfC#!rbWeaqUAD1g8l@cdWfoRf6crED1?+21le<-$hdx4syXTHXo6 z1+1*jC)iJ z<@}?olsVm?20;)Aw=MizqOelk!(X-O_z%!&0m0Rh$NEX8xZz3-9ZLlgqYgdVMd;w| z_JRZ$c-?|F@7z7^DJ-FoB4;Jd`cd+D$sQ=nJ$$1Z4BX#vFk$-e?*zKQBb01;FTsct zR2x$(-Yjj=s~^)hr1JZ{wT7ux?c0#`&>xFkxy+gW0Ve-Uacv)>!9au!_9P4y z;x;J%``Vnt`3cmIXjfTq7OCJ(L{!k%tuLl8`B<8qDa_q=zxmLq{ZJXBsw@a7WO z1H7JC%b8aG&u{eRe+g43E=xn-43On({RjAM5bopx7N^hep7P!Qq#f{TAKWcL_-1BG z*6Bz2ww$PyyK3ooHDH$4#!oY1tm`=6{{a0(Q~rJYhKz16H1a3>B5nQybW}r}^x*!u zsn%skY6nYphahz!asYia=(F#@m&D+L zTm`v5G)@z~m1@A=anfXNBh~2gNPj6}`=^^du5CGyhl1$hkiSbod2jOp7GST_Z5^Fk z^p|pfQBxPLo`#9lA)~Cf+^;Hu5ju$*;;89!L&Ck-2t-wBz(1G&U53mE4!hxRhx=l* zB1C@y{e<$A(}*Z;am&lm#~y#VUlm*jn%o*sv&lkbe~uA^6GE^Q4nImY7Sgo;Sx2da zEVBXOrf>{yvUO-JIMm8e^{F3I4Ue?PaYXTbHu&izJ%f`pGdZ_RQ|x6)u@PAtFY-~< z{b@KiO6u^SK={|9XF<*vvH1Hg3{v`!uej*RU+ANEM$y(q#9)75o5LT0ZIa+Txt}iL z=XS|GKtb=NmIXMNgw75=sjFgPub*5N@^|6jJzc`}wc&xZuNOzV^(IeD@YU4ky&43jp2njH}b=;PYZ}l&pgfDC$#z1u?LP;p~awL(|1q zD@#6V_(X&Z3sd`$nO{Pt6o`DUZA14``~RMW?BA|0XQqs^jsjod?_9L?6oS9=uG#O%>7BxP4nj^0Eplb*;aIzYP3?X zt<rcz$2uepv%EI<1uAu9U58OQ48Ax4mOphqKmlCx1_@sr=Rb9i<|5=$UT3n3caF z@h7BbiRZ8W?{L0{1n)r^l5`UGdc5toApwk?#-E!0Dw&q(>VK`GUP}>4LZk=Mg!63Q zS4fy||6I9VI{5NrIrr*JfAi+cUxQt1u9qPytG;|+J^xw76%2X2L=t$s$o5_N?BRz_ z3O>Z@{Z}=-`SP(peI8#=exXHfx=`n_DHURZ6A=+y|NLFW_||`bfUo&qg#UIHeO)bU zuNnFMt&C}h;*yk$asL#hz@BW`pQV`^&kOEk-b#t%$1D|#{O~HC!85oxB=m+`{&^J@ zc^oQgp+)OuMVYdaO*KaeXxMxTYFJ(yxTHDxXDR5*dhcqZl6fl?_3&t1WlU1EbHZ;{ zm4rZJ@=>>!k5_)zPs7=&zvU);>H43f#SQ-hSZRL0l{>2l`c9z;3?GqHUW8uerwet8 zcvp{OXd@ZE8|VMu|M2tn|EnX8C7;7ZyIQ{3h0Hmq4z0UBAN|X{|1_LHD~IU5<=osV zeLwQ6!g?gvPOqyFwV#Gd^E&3?6WhA@aq^(k`~NKf)%Vf_5-HsY$3!jvx27pY?1o#w z>T04cC;T1;XZ{1&Rv+;p(&ydmYqkG4md@3r*qfRzq^;UgJudvup>g>fmI?lYp40Li z+{*o`9Q{jSmn=uxGp{=Bse7@(9A(gwKzGi%j4gzYfNY~t%kqU_V?N^rSWV3S?{k7R zo`~I-H%(u0bAkQ?n9INU5Ae-!`wtzhiS>__KK#cXcUWlP6f3KXCch!bL020vR*Eo_ z2y#g&@5pM~B=n)aD98z!`G0kPX{VLr?b;V`lA?z2sIdgv1uj>^AM5SS*6AW7@CVbC zy0dW;hj(pR=4tUArk|-@GA|<684p;&RmvGtDyHDd`N?mH`Yh~uBG}7HdRLp_ZMfcY z{Bjj9C62#)!<#;-czx&kln-`B_~P5Lw}uElBDVbrh7{vMMgYz6QQG-9g64cbo*#IE z?IioeO*BS4?05^BsiH5JT`C3%_pZE5xdwe!1wMKkP&5#b>YJC9r~*j}j+-$Xly-E- z3)>6MM(MvC8$|;l&=+D_iky$B^!e%Ys8sd%@m|h(k-VN8fkOA}?Cf`nwY~+d!GxA; zlZ4uFgcZGg+qaeVN?anPRZ5r&7HYgAgu%#jWUeo+Y8g- zw2n96#PSJwO2c~r(XUp}-LoeqMUk9T`7%z8LoR7lq&{r!)(wePVzJy3CqFaFwPVyG ziyd&z^lHu#R|;zXxGG8_}!ywm%p(*G~)~u=wU4C>syFiE6pd! z=1GQ~$czNn^h=s6yArB-Rzc!%!GDfu5#RWNa76YT{)PpIs<)E1Ha^4gu#9w`G)3o& z1?)(<;PEdQH4#;roOrjInWhu>IjuFNCW|2v9;aniS&_NXe9#oCYjvWc?053Ck4bUj zlp^4~=y(rOAD1`&?a7#N`bMZ*>)ng>h-0cDFPcTPr6TE)x@r7eNH&=qQ{;Y3w8-l1 zC(+>#CiJRDLbCr(>HWo!@s+@Fy;zj&l|Jz{wt1#en@%Jdw- z={&qP7^U@@Oo`+KWIS0*E_Mt;910~Ur&VpgW~XmfHZ&Ed|rtA*oc-MGXY zam&xQ|5V9gAoBV_@xDNxOIDe|WQse+K3kM*&`n@l>M0E?!&jIV5iy!4e|$&dJoy?= z!Xi9L8+Q9Vqb#MfGC|#MFGKd$dDhq44+=Vo0NS%#Qy; zc=SB)&aydxdhyy)$B~ce6rW5;fT~umBQ1yEe9;`7&K}PE`T5u-l#3NU9=P&iF&}w{ zv{d@rpF-HIFW=?vABmuf*11b?!Yt!!t&t^TTU2$yQ3i= zed(l}{SHOoDT4Pv0v0dxdN19?~dY{G4(RYyi zw7-)1P6sgcH)AQhIt-KZ@p~U@u1@hh^*GAVxX`{sBBjM%bdn=mMQoWI`gDz7Eht z(>hfxKE8fhu5fjCldgy11b}U2uFCKy-X1`H)!Xn@MOY+o=R-7I1`DiX>6TQ43F&3Y;n*rnkyTrejeRDIGP2%_-US2fmZmrBtqKU4DL)8q(zcmdz(h~)l(51kl$<=Vxq;IK0LHnF`FDQ})!DgS3KpXTGd)S0h*wRby< zBFB?zY3^uPRR*Y6oTmS!@&X3X3PmPr$9s_mTkIuCQpY(^j`nJ-2wZiKv_KyP_vlKY z{slq3uQ5`;OF^^*ntDxIWwvOye=|Uk*czbb<|oTpZ#q_i%F*N@Q@zlg9Rcz23(01P z3XMr8fCz!9Wmt9B^0xibJn><;yWJ)m6>uT3_Z{J;wqzxgj)$sFi;%ndinDo z-c|(Ca;_Fi=CzK^1P5BG_|j&xS|o&DD)_e?SW`|V{u72$D8*n(Id}p8o#>(CQrTQc z+U$0R6rAUNBQXiaFl$g{T*Sd?tvd4&HmZzFn}=|^hPPxirq*uh=&^5{JqH7(V3ix) zDUa@kie?W?N>#@8S+nXFOLm`o{ zH>Fl;HQz}YhC6X|opIlJcUlZH%c)*oKyBA+2992p=jvUlvl(UEEo6*RTdZ{{$e7vB zC4Y-8G>_(t-X$&fOD{{` zHL5{st&5SQ2b!>D~a8B1-D^-uGSA%f|T zvy~-s0t@XG)<(xh67+-u>IF!+k(gRzo^Q ziPx@h8&stoXPcz{nB7eN?U(btP|Z*Z;>h2P;-UV#F65+)z0X$r8PnUP2o-D}?N^sS zLRj#cp`-Y?5$sRcc6%5Dp=?{CYd}ZJKs*Wr6LW&m?f`oyY5`w zS9JXEWHdp-@Oq=!U5mS8bkH|ohf@aHiMhe_P{Oplw`xi zliYN?GBc5t`)58=61qk3x>sM-dDV#6yYyA-z!lt+V(u+?)KVt)ucJShsuBuvhlNeu z-61{ZRo&g&wdLqlkiQGlnxg3Rsyzs{A6)CMPysHwCzQdht8ew)>NWvw)+jgREz$~e zb~5D{=>GB4Qc8R!=95@;LZ@R)r7uv)kfP-cW7oT*#A_``oJNJDs{T$myQemyadfaw zBw(+*xG5|$_fH(-)d=~Dv=(sbD4Alpuy!}k!*uoLdA-jqGX_?8CybRPQOV z`dz7seMaYn1ppeY9ST;|7dlxk0kaG`1`{Yho{%#46D_!|T7pnQnV%5{CDMpQ!`Qyo~c|3=6qn;LXpGH*ydn_Zlk{5h^(ehbz{d*ZF2D75x z^+BbCe7mopR^6`48Cu=}$-~8(pFV`#a3pKI$;aaBifa1DlV8?b#@7}T81G#+F+;v| zyn4PBnZJIWEsEr1|9j=Psy8kXM8xS+Ojgx)H^n4DMxJV5Ofcmhm;W{RQ~CFlP1O$j zd@*i2t?mYA?i2;S2RN7Xk9enJxnQf;z%5z{Zuwr8UA23WL5m)}tTdIsy*G=J3oqFn z>%+&nt1qGr=~LqX{jo1eOn#c!;Chw$aJ|aHB=9K47Qms9{t|qAPV$MM&%;BIFtRZT zw|z+iH;Ib~HrV*n|M$})k+}wb)Q-GgM>IC$fL-XR((8-|`+R#|wEeI1^@iJg%NK0! zHxyy`%eZGFGpBkr>>btm^@eU@1qbNvD4z1fJ{tP^h2>_~{pf3?hX%f8!^vL(FsgV$A{lED^%URDJN^|FO5z*15P+8Gc72t_TvT9``F)xrt8uB#p#m_ z)Ws(&3zM`d^8;~obY~9a4mEXN&%}daX2&ss6BIWIQ@A0sw(`c58^y$|e>WbAz70{T z_fL&?KBki?%W|&VMdQyE>m4_`FlOCW#^}jA5*w720@ z4M?j9RZBSN<^}K87<{+jU0&U9axV|%x+|Ov=d;Ur(N8=H`smpW*bkW~l~Zq>SEeOU zgm`JtpTxKb;XX!T7eL?c#>5$CJ+ZzZaXrL$%P?ORiHdrDU&HMD-mU$i3 zn?{suZela3ze&hTNfa^EZVme`j#Y7O7V3J*eT~ zMhd+rKbj^L{@Y&_;$GOXrxJYt!&sQjH4sxy^Q2fF9Ri_^a&M5rNZKE17~tS;nUU5w zb!P{oXbg^4uS(0)baMA&2~2>5LLZ*I>q?PH_VJ zKSsyHztmrGlSVH@W2v2R658>DQ>{|sh`yv6$6xW|bDxPEV<-09ZiUTAyIPPmUZ}jy zP7%mtah^N2XDaN*EL9*5HUKdskMvFvPclMybMnRd^}S6+d^3}&ZACdnwH0|}QS%#h z3ky>?te?5r1y4o?qR*UL0mBY-XP zpz&Ef4W3U@u{9nweq+pL&)9dCka>7@e;2>~2cT;2%+YA~fS2f~iK8*&M3P-mvvtV{ zy^qfPIet-wi*2`H#=gq2Q$pe>LWasex+cQf0_dD@`@1qQMU=p#jsV6-B&8*ApLZhN zePpWwa%B-Q2PU9?&m!v_sYwiqt^@AMhAe^hT<8~7BpN2AW;M&1GX>cF<7K7j%+n;| zlTIaYm}R=)gN?AQ^uN5{|18GrI`;QWw9VvTG*lH59ocM585%>46#Bxp+Tp15fzgr? z&6_IxJx;&2Y47lzh@&AzDa!zpU7RV{_AbCgCr!3eW_+!sY zpbz$1WFQE!yP?{%WaA74uG6isqL3Fa6ozY-lA5iANh5YB&z~-mL{*o$y!L*`Uug4H zJCeXvMG^5%%>ZOajFd3B!58hD;x9Y+a|#6&COHm#dGDTUd&e)u@iq*z_sx2F%U26Y zPftMF+-$nbql&YWd!I_i_wt+O&3YGn(|%|VVi-N(gOh&kutuwjU?rL?P~i94|R#;IUYPa8a+nJVD1coS_jIiq-m9;aR z4RRU5Zn<=Xd|jRrm>iPwLz1pgv|;0O^qRhjfoR; zawg3joK?%g&8P~xNeS(VaZgjBZWK#U0WhI<$}(yV6xvo+XN}ad)FbRprJSbxxa)** zEu}|r{DW{i-a5u9^R7pn7q~JkUquO5?vXS*HDlK?%nUzfkB(j%=i~d^uC4V4=LTSY z@$^_2CKkrSJJFAiqiC7)9e3Gcffbh9u|5FUFM?c$6q4VnlY>7^VJMQuUY?-;VZ9;=yUzSv$Dk?5ByKevhKS*4qcgl<#@0z~I)$z#ctj5lbDpOhKfgX>ebwl-d^J34J_ve4kpaFY z>qLB??`ZPjq3_$XELu zpGc|&Xsp;0OW!%*p9E}(h!|&D)a@9;aE-6FaIj)5Hr0Gu8JFZo!l7?{)C5ju=gu(h zidEQ9?(E}Pd7p7+;OBSMzxVkhx#Am7AQX#)&K7D6DOa|KD!|3JsrS^F&bw+8>fuFr zMv2WZJ5fk&WcC2#wX zKEja8XNPK_N4t-nckg10VkS*BhPY#oN4^#`ldosI)*Dt~6PPS~^T)OV5b-+Z_CzK7 zD}gy$`!o|wO0@$jqQI@Oc`sk&Iz1$r zS9!##uJcGFLLP#2tG zQ8vl4H~3>s>U=%C{mFw0!#a1TuZRR4gq^fr6%;bS#njsF`@6#%J^Pz>y^tf{VPAvD zs}jbCve|jhr+v_hjZorH?yo;a{s(w@Do<3L+@Qf?qH6~|J-4KSaPrq0X`6E>Wzk<~ z2`Pj>%WA}v)c6m;r*#vy`dM|6=9L*n&$ANDezi55RwZ7CEFp9ddXC2HSf%iB4dB=e zJV`VsWrEaHj?5iz?4J7Y#R0sg*nx!m#F73x>ERFn5*T$d+0ej+$8y6PY=-V zds$}^Pj?yA6;UiCpM8I1flzWjwdQaVFWDMz`AzSTbi)Lf>cy^MqGAo1<{eUb`9;Rq zXxN#(jHrHq8y`dhZFt@Dbnbp(z6YI_O9|z;8+B z&HIqP-vg-ngQ=M{$WHYcjIJ$I{QY7jrENu*mgJ}G&1Y(or+dmEC+O+AOS?Oph0JvF zPs3g2bZ_~tSxa+bWdXse6TNReiyB${MzD54pZJM%p2aW0UDDY~Ei0V;0_(e}TTzq~ zB8QZ&f)(?qGnLkEY!kF;xp_ju6yE<)E>k4PG6l?Pm}qGhN>)Y2QQ^)v)kL|`-(inf z!MIz!EuVuJV||RtkuYDW7|Ht&P=oRLGKJHO7Q<(#D_8gI_lGg!<$G_giHuLk zac|W442uF`v5R@(jOMr1$Hmd68&G#Sc=CBS#(W@0{xcUwnt%XLRDr8V#s#mZhl4{@ zE;Sx##?Exm_h)hPdD{Fyt5vgozFt=YD22^0{{cB4NA#vChknQ*Mc~c#-)3`@p_@5H z={P(txc@s-%mnJ-c707nPzR^n#6gxIOc52V=cU1RgH#ns<_&|XaK-pNz4C?$3awVR zYz|=CEelBAjurFewiR zP7Op{%n04eX?v^Ywz-Z$DNfh>WV4*^%$~Sro>BeKE5vpth^)5-)Dpzdotxn`5GU~P%aT&7f8vg+Q;bmE z(FTtAMleolvQNss{n$X_?vOBX-TH>?iRnMcOV8kaxP+JuVB5rF!*eT|nhQ z?k|%>Xw3tnit@ibe0ov%F>*yz`AwiK9No*TPW*neUfI#^B64_47gzCjanIP8_L7#j ziWNjpwjTD3Pd1!xa694(?=27E{FG8 z>!vS=RmVIpayxmYeKk}!xL95^qrq8J`_SSmfwcBFJxPdVM|YBy`Ri?Ca66+G2J zQLKniFt$00h09SSBEMw?)@F!AtM2EL8u@(>5H-^H zrVzi0NnIG$i|rv_oyqBstQ(7y+nE=}9s5E}P*O$fc@JO&u0qEy@Wh!e8iX9`=W>ic zGbcOnk-gHNjlMS4{vTDad8R1AHTap?xpvUJCEHuppEJh3{8m? zl#e6&2T_>HOt~5*!_en_#oCE*6aMmrceY^^kNlXzjLxBJiP3o)5s-aN3V}6rXMV5Dz;J7Zf3RVt}PO3Ou#UGE+c2);|gdW07NF#M?>S zSDHtqUvmNJ?Q~r6_-dN@-TOR^a#LF`4BLOCtd&-fCjU7h$@{DO17WAF>2|t}QJFj9 zvZ*6{q}Y%#iaZwtDSoe~r^;c?u4R2tHk=3M9^yt)?4JOqgAI{fJc2|hQ_8G~1dUiZ4xX$AUWq9}&bgUg6$l;=T|P(F}73G1t-0@+xYu1Q1S^5#yh5V|$Hx#~74{>;(?r znxw$FQeNG85F{}vo2@${##IX~eSFX5)j<3r)hA|3&()@Jot-m-B`yl_SBdY+Q{Kl{ z)ITTYOis2{6tK(t7>qH>^j42n>o#hpzBbI7B4wEUI@$S-CG3z0+49D$nz#O1lPmdq zh4`vt7@J=8ydCklVA&SZVsieAXi&5O_5RzyQaw&^430$|h&NcbVIGp|dh&9k^alH( zJ#q|P+*jE zhAnIV=W~(q1;P7NrZbRQYFa(95rvnP-=L>t^gF!q7^`#q)db2pJT;2kZ)8H^2%qyW z^J{Qlt-e;=bZVwt=d@aPhm41{X2xUXmMkkl+d$W?cs$}*3Q-t(u_Y>n+9C{rF<{tWUq09mwA zOwtSUH zz3@1KJHaWVukmYJh0k?i#*QLn$g2`}acX|UOrt!vSdg@5^;{u=k#6Zb0PmeTeeFvm zW0yREN09rZR`JalS-ioo-?nsN8Qi-@+Jc(&Vu@;~04$Sf=$MpWxjs^jJf`vxy)HVFdFP!0VeyNNt%!Y+ zx)>>N1s41N2k}4-zfN*Idm07hv?D{3;s$a;@79_Y@k403$P>!sMuhADJDQCWTq=!O zjTozBcke*OtntTkvKg?hx&}`}=7hpBi_F|YbFZN%+JiN!^P-7Rf>`7B=}{bVe`-e= zI>`4X2?sbn zaYFRkBHI4|vwUhUeOia>T4Kz`v8;Q<7=fQ4Qg;v;?&6+(mHjFYiUg#A3WkvC1aIob z51My`LcEFPm5#K-XQ5yROi-t^8GC6PLm%l;Z!D^=h@?rReAjhk7Gh*Zv%Y<8eanIe*SV?;#*A%;jNt|$bv&2J>} zs`KFyfw=9Spw%t)&8JWMD4PJe+pw z8GM$pBkOoFN452*gs1)2$M;4w(*+MI{O zZ?Cy0#R}Wyns{35?5&yf^kI)`IDeP~7=Y%nS8_%a4fFRjyQ=Fl!5QT%ZJ+U=t@%ms zHE`&{N3Su)d#~Q4v)bM@yF{5()%9$4%~lx}MtLL}S`{jo{pjyzvO1YxSdmFBox$uV z9pNUy+$*c0V00EIWA>rk+Db1S-e1Fq1)Wa&_NyQ&Op-(5;LDPf7!3;?+a0WJ8jr ze=pvgcz$X%jxy^hUSLz$j{g8^1ybpbBb}9WoaAqxY*4NzWmJA&rDWWlx}B)^7K+x( ze5w|Bj)|A9pifXgN)(EpDS*u2ux$*49gf+}1k8^nWB{FCOZh#)u6dx66pB#GAZ;wV zjxqPBhV69BlSJX$PJ$1lHg^rA359K2ttpKNn+2mD$w5qUR_~?9ns?kRp zTErxpHhpFANX`N2N?s)K%2AM@2k`OGig8jm2`nNZDvwsR1Y&?+GY>vHs3T_8tr-IB zw4AdqJ#z*$H{T%ElWKzPZf)TLOkjX>8PB&$6q1l+S(Y@pm5)W+9^NPeEt+a&Wn`4^ z313nX?s^XNIj$rzHk=1jjA_UsizliChAWbT^8yau*`e7BkHtu1Neq6Vry0kJ22rAI zP?Rkl%ngD+B%KET0KF}FZ1Kwvsa=9$z3?fOl_qs$jiQXO4!GS zok<_zKw~RhNTT6)8wLjeAM&Lof@dw1B2_tM4YryzPNA9^_cDwF{{THH*pwt*uT;zF zcK%`K{3tW*QbRiG2k^2$Cm{XJKJi>hCq8CagrsR8bjNC+mvI7{Rmoy?{i?!DSp2Jk z1qep4MaT$w zaW*vf6af~-MslbM4_90o4vA-k2uWmC!B#jc_VZ5LNU}_>o02dTRk^VVvphyIf}p7C zPkIcOM`+e2iIPQa!!8b<#8v)UNZ5$w)_?+IC#`ARNN(-J_cQ^3sFBkeHppCKAFWa})*P!80re4%zSP2jB*4mqXgW^a>eDoE-JlDrK01E1 z1!yAW#&D#SBw@Sz&^ctyBN5d=2e|XKTM_6D7Q7}y=gB=qS>%l;5=p2R0dg~rn@|YC z&=k<+lw^_yPui*j6z7NZ=X4{WqevB-@aEAbakk?XQrcsYO76W_cc6tXidB>t)ncKwKR8@kE~A2#oLP&I@}`Q1e+Lp^u~FT?@P)T zo+D-sNbBIxM>LiMX#kUqp1x`lb~4ET&!~Dy6)~%o%EGb)W0FYPszF(iwTw1Txb~od zDb)~W2TA?KbpVtzw2*_PTfd6x2HFXoKLSmL{{H}KBvwW==732Xd)GxW1Xf*~?Vs_j z13DXU@Zp${r1lgEMy#pL)N`te(&X|3GLyRFy;T+0B39UvYm}=nba9Y!+#Oe%4 z)7%d=V{J0r2*Cgi@@Ooep&?E(d)1|l5gnN7)_N^a0$GfTWKQ}STMWzMN}VSntIGIZWR0P>ml?*KJ98LcKtT~rX0`@gqf5LGvE!R3*y#o`YTiYn+3?EKMdJ`5%#9_AT%DqiR#H=O> zl`-QWRqfTMnNE(R24UDxbZs9dUt6l|G_l(?MAH?#mSo|kYz7@hD6+h8%_X|&nM-f& zy>;EAYE*`7yRV_Yly$`gWHMV@fofho@hJzd{3*9d%ut!+0Z*XJ1NFRD$%Jj$uT&$`c6C0E?mmaLy;*c zNgZ_d{e@`HBy!lYMRqa(E)V9Q&bURqSndLyGN8uWnsR&e`D-|ZvIpVg4&-{Ad!N>? z6J2YDS5Lx{FoA!R^~j~3Jy1yToVp%chU?~luMLcA%y-c=gVBzoG^CNRon{3UHXf0= zqseN;*5YSh!^Z92s*yZ00?DYp>L>(S>RWLV1?9Ajk_Pm_;cE6gxE&bhQ?cqZPTN}i zt-D+@1}<0u;L=F1J2xRnkmF&~ZTFxA)-qZEv*tvJaKv>LdkCU?RYZ)-t-xU2z3A6- zNi5PU97}TAN)DK-a?0MAq9!pS>2)0yyqW;X4Ay1Z+D>w}q~oHGf3+6nWmqE!O9TLq zeD$kM6iIM`IY^J8b?8rVO-B{OMqzk0LmAE&-0eVug6PsRn3;|fuo&-6#){9OoEZY( zu;_j0w~)nljU+1V2tbmK@sZxMj8a1!jT0QkniV^p=o>SaxG_nkBexmLesB5B4Pu3r z*vLzRjb6w6DHtJ|XVq~Oj^u6OY1CvBj7X(YG8vbquEK$7q%ujNP)jO(Ob0>R6|KZ9 zp3)0>Z0@=7a)gqnb!Hon(Dba^In^#CDs;z#lpW1Y)yQcSn}W{kh6~w1*ct=a+?1Lb z6qyu8G0fbLb3(Ybo&-EV%6Z86R?h4ymRi+0lVGSFPv#wft4S1@TZx^>m2wpQIL~SV z2MDoC8tNRj)!{~W6h(oer{IH&6%gpBh zo}bHu-i(kt-@4k}v&oG*&qd#*0@qQr(>a?-D1_t!32wmEY^~Dbe6B$%bDqcEk218^ zD+9AE&FPT#)&BsMH65_G=Ub$KZ%2kVzXE2+pP0 zRU?LJZb^x?bkB?rKYBD*3XgRZk&_N9lV!c;$CSp(UH4+%3xhp(~ zOKw2)-*S!d!YI$G&4#7IH(;10A5%{g>M0ijre zf;$pA^rhY>o;0|(b}1T{kPew6x7w9Nd2D8A(cURe zbsstUs3y+U`N<}d(yMGdZPJT1%#LCTxaECFdS^XN4&LS2%qC!(*OkrwR?c%netpf% zlU%q{kg8Ydaf$<4StOB6k=zBk<|!+Kq^ZtG`%@Q_7+(t^1|JH@u0p;W^a z>E~+e%hgm-F0!)?Ay*g-JkVMjV>Iz_`47v-KjuG4@4YneLXRg8g_+4bf(h6Tx~X;x zJ(Z%zAd***#N+uu8#M~%c_C(jWnFN~gD>+Re`*U)VQ#7N_O8S>#rlvaUn?9Ik=E>I1_H+Ahy z#sY!OyyV8AaLd)GR<{ydUjTD96qz}nlo7v5i>rv5(V&9MQC1`|?6ev@LiOETBrMU# z9e$yga>fmBgiX3OYCbRM!og7^s-;`sTO<{4m5YBzEsoCAws|Rk(?XiLlD2TKi_OUg@nz zEIC6U(p%rnKf|wW(nzmlU&Fy!Iequ)b3k~V9^N~Se(oEtbCueOHyAuc*|?7;GLfWu zU{a;A^pob6it5~zvvgq;u3a0r-j{_XaU*Hs*akq1&f=3kg<7xzfZ>F1=0LY6k9}E@NBAI?E7`Tx5gzXTbjY zS*+xKVl;szU62A4BHnMGAh z1!6}+Dy=6psEWs147{#!f;(cFk?h1e5InLY5aF?fh#)GO@As1?@nJ8Df=Wwv?;q{h$~)@M)_;{HJS!afZCI(DS>r?bPJ8~fFL3$|wcHQ*m^d9( zhvpvHprnLCz%QhR-^c0iP2DWIVz_pUJFXLL{{Z&A>X!>+c?v}v>6S1EbI`Y%1Pb!8 zc^g}x%bvv0SX)|MTZbSl(Y~|nG0@R@C|peLsIm=M17*h8psy`fNOOWlU3-z{o3&(U zXH(14V6*L5Y~$pNmz9A9dkPhz&eF{iAt6@m{PzhsD z<ueRus|{TbY|@{N1b(Ba<13l~M`$B}j9{y3KJ?UaB+4P3 z*`hu+#Y0Mu5hRST-xX;A=7_LUQ0l!XAUcE0Q}}TmE1#&E=Vf5V>GBTX8WqBcnRn#` z+fnG$H^VRD9b4f@>5)KIi<@thB!<{2{G-UIMP)7%AZW@T<{6^d$Rmint~d1Kz!gC{ zmnBXSPo)0eJ8aamiTbTNGdU&^rzyd)}fG%y}jrqB7RXsU|s(Jt#wsg zEUO#`C#i12x`DEYutp_SSv#E5X`rqZ(*S2<-lq*BaxRfjfuD0jjyPggOzGdH0l957 zdeMjeU22~%shO1FfCLJoF^>W!I6bM@8Z7~xH|R+;1Gcwffw`D|^?CPGGAPo6Q6iy8 zS;=2gp4r7&wCQz(f*H3$dr(niXw{ND;gtTT#Y)=9VRkY7E2|XBrLyTVNI;EC5C%y! z8DzRd=9dGWaJcdBKs30CRaP}2h}`?toQV#HY{~6P{vtU-nFGM@RQ%3V@4g{;!b4u_M6kMB$M*42p*=KDP2` z$r36B2vi=2%}m_RTE`rNbM)gM)_^PcXOLA(L~v%$Q9_OX01`#jh5B_GD%+Vsib4?M z8P%QYCwpR42cZP(YpOz^Gt_Jq*;2YI5<1w&2*9kUS<`Fk5N0C z3JYdd<|__ce|=KoE0kkza&;Xgl&_A}(>W5R2c(SP8gRNx8Wbc*JtfaW?LpF8LvzEe z(oIE>Wa6SbISeN%0o9ZvpfpQK#1b{QQqiAFI}OzRs<9a(xMfGx3|^9?{Ztf4u3BTM z0b(@j9Ta)1z_%*ME#+NI*vlWI*!}6NStNp37)OYteJ8#__o~Tb3{p*VBQwjo2-M&V z^gB=hTPzo|b4bLh6u5wpxpB;r9-EyPmN4%qFC#WC!R9qmI@@RD76@wfeyLPUM%0yQZ zl~f>|Io$Z(f=kFk++fC%VM+G$L6INl4A5Mb<+cGIo8F5YEhW&5z7bO-XCraihQ%!- zmhu+X(f$-b{%m*0*w9v6nIO4gC# z&Q7W=xqd>&B$+tCY<^!98e(Xni7j3-WFPFCAJoz3k!{@+Tof`U9C;*ltowOq5{G5g zoE&2(Y9YC0xED_mZlXZLe0Bg(U2frtqqbRCL=WPDk5ThhxO;Wx-NGCO8*NAl&CSHA zZ2(Bd`cG9JDq@lZxL9Ro3nJwQY|t7+oJ?LKSxRdMrbPq<3O8`&Mrb zx;7UETr-4X0uiy>%|HF4Bych;5tW_2VBnBG{iq@-Bw36{CM=A$HUrwG#Uznso)j33 z$R6V~NTq^)NVeEM@)r6)2eI!}vADN0$!nvS0!YeLv6JtL25y>7cJm_;&a5yxF4U6A zeGaIn1vPyK9Z!GhL%g`2OL*ndV|W-6orv?Az2uJ9a9pbvS>qD0C(d`Ev536-;%yPB z$p@(w93t#2aza2Lk{2hSr5MUNDBI!n22;^-L5+a6Lz1i|0Jb`f=pE(!zM17KfahgD zSkCyOPkwoCp#mrePW#}|{#;&g#JhpjMY~{Y*s0HdV~R>ExFlm8#9_$~k`4i&YS!K# z?DV|2mo71(e1qPKqTbV+%0Vg>)?5>jzAHK@pqe*bY@*#4NXB!%DDbH>81;zOL>LX+ z=b+D84K^k)jtWgNOd;|2rET6xV4N#vY4_3G0kI#gR`TL&cullGFpTSvbZuVU$lik- z>dvTz#Hu-f>A=9Ctam>xaU6)GAq5KKq3KkZwY|s8ZqX7;3e|!E@}g#om}Cyf#ADS+ z%nihGAx>K$9MXDbfk=zY8aPmunSQ4HwA%7SMmV(wXiRRvXD2?9ro$xpI zqVk@88g!~3mG!cQ!(;pV&}ZFCtSn^dMIfmoZNHnv5M3yZ;^m{GCab06j zRSUkOq!CI?;wao02y}G{!=?e=fY$-i*v=J}GlX1yL=M>TQ4Q2lL#ilwaN`9t-`b@T zVKIBBPF%5;i`ATxJX9hg7-W{;n2Q)5gPga0s0Az{A`8eBLUTeM=QyI-`m`)t#Ud6u z#sE3(LcNt`kX%YtdESHoo7A5qUt)vsa~ zGoeQtg)BT~gA=vYlGslo#g{Lv_unF_E<)Rw{{ZVzv8%f%I~oUJW5TsdDyx8SKi$uI zxZ(2g2@R!#1}*rdTdb$2@BUOToU@go62SU1mSL93`*@(<%_7~eDV=lDRpk zPc$5v3%-crIzs;dFWQAQ%Z^HD{{UyOn;(?#)1_YO-dALu!Yh^3cl5Wv-iZ~p)Hgy2 zF{zOD%HYsuA_%bI!*g7YU_E!~OgggK&u1FO<<3JhW0${L745Z{xs4)?qD3c|70UvD z!lVmkiDQk7Wh6O}oPs=11oB~uDCUn`h`x?lB}o2iHNE}EHcP9PDbCm^{Nj%A!#oU{ zz|;P~^gnOflom6FiU*%ax=nFA3<17BN(U|PmenJ;hcZYGg3LBjPTBXXvqTHa#3Kd? zLD4FbTVMWloFcormO*gJq!3Ov2ODOYv_ZrqTPft#r0R?>f!2U+ZrrU;aI9410(W3~ zX0&3BCbe5r5LpS;tP@Mma|Cxr28SAURy$E+GsQe8vi%1x*yu4puM*)laja| zsZl5`^%8oI=}X5v_5qSYk}RNxQrYX@+ZE9uZ6%e-QGnSyk7^5qYp5YSl;>%|-v>2Y zbybyarn+Vj`h<8JpXK{eoONZJMGSw2WRGx37$?uY276qFCE6W} zWq;jX_^ALaZSxVImSdRD=2PeQtC$*TbZpWleIviMHnDuYj_@0e&oF1`2gjNOiW_$V zcjf{zuDu97tFBB15-4(*8#nVBECp605R@1sY=TDn9-h=&oZsst=R8NpLPyel-Q zB)Nl_kJP8s4~$ns00!VpjHtexw!;Kd{3}h3l+H_Pf25I}{i#HikzPrnFAzEiJw|uU z11RD)dpM*VkbsZ(w#`ajB=K@mM$AV#JW_&QLk?CLo{D>BuME79(!>{<`h#RpH+D_H zS=g$^Jv()%(Qe9BhBTZZADH;8>q&#Digr3nlbU`>V`z%UAJHcNiw<&ZNKw=ov0(_qkBli ztiM^*NymBw_C=+55s8o}kpTL!QyivJLJz_OsVA|c%ObJ)X-LRWrN+d8(B^?@WP6z6 zk>p_NGU0{@_BCiN*l!#rNZ90xERn?&Z5ud|oMhwj{do z!8Diu0A{DL@BL^7IU*xj5y&=S*wx!aMVjC1agYhc3>e%2Dv(D|Pjl};dr4KT842|V zCx5*M<`tSuZWV|C`Jfhcx-1W+KmkG6k2#{oA#0~|saF7lr($R>0eKcTDy5DA9x6~g zQOgvkI5=0J&YBughB1MiW2m8rxMeXh463@bp0pO!L1?6GfHfz*L{f-xB%>=K0FUMK zLT4=`a%kNF+dI_A!d)dgUd&m#sjv#}lk<8-BDsa%fP*0KjFj?Lm1OD4kR(!Pt2e6X`m{63cXL7A&PLR%W!gYy#-V> zju&LdnqdTlWT-f$6`ah`%!IZ^7*mrKjI01SW>L5_15(V{C=~tbA5Mo<$d^5PQ9`Pt z`i4<^j`c+{N~-M`SkKku&@}{Kgr-Ie%)v?Q1zz$Pxj>_wM#Exy{irj|9BmTBg<;4o z??Jk{Tbb9AtigTyP&H&AlIm9_k$HRS^NLdEUAhACh7u^xnrh~^VH@(2#PrTKrDlL*lcbOl7=-UbRpX~-!)}`ENbwsq>ppwGz+&b5gIzXGHyujX+$Dy2a7>=++v<(xw?#| z6XoB)rm-Dd!_>C@idwRww#U(_{jM;W4NwQm1_Q+OPebmrzEqBP{H2ccsva zY2G;#GX*09$mu~d%X1_O6z3mL;H?kKR_7PwoRrccDq6?Vqm=opG7 z5s=4O#&gkV9d3BD1zkc#CqvXyQW=Df@Xmc?6Q201$sG4bn?K>%>eQu93lDlEj|)zV zB8AerP%Z|?y#*t6=FoGyPb_xn)ew=#ng zftAT8y#n`*sziHoaTwCa`GBaqvUr3SrHL0~2_KI6rj{@eAp=l9VS`-I$j|drOd301 z!brz*Jbv^GBD<-wXh{zGwRqaP&y>vej(uS1N~mkgYp!Fv{wDix+ zS_xroacCq`4Ci8l=^tT1n2wEd`haN^g#`2?6f4_k(r1m102vtbPfC$V56cT}GDHb7 zDFTw7Vqy~V3o^*1j-kFgPz#X)Au!0;)0W@8OG^-eVu?TnavQ^U6v=_^BvAs#7c>?s z>zw1YL^g1{nB=!)PNO7!edrOlp5`T23LZsrrAYj~Ye$vx?G@9vGGZ_hcU?XR=uT*I zNAmK#ij9zgj;@C%7^29c(kS9nmv#dHZV$Bpxn0LCFKk*Z~*lC{p)6KAq4WlcM`LzCY4fsC-kaB@U6_UNg8UB zGRJQJ0OvvH;kCYs-hC%qNZ~L}*y)^ht3_g3HI<{~<0#ADZL^OR=;g4qn8xwTrC$a{ zUAlJnGzj=>O?frU(vDItkTdEcgAUTgC7VJ>yvI6EjU%N!6cU-t5=5+2=~)itN3}O+ z!XSm@o+S0Z`!IR|=78eq! zX?x|ljRQDGjA0iYSPu1jgKNXC72>!T0$A5L2h+Dh`_S&?9K`2bDUDP<5Mv(R^Z|}9 znU+xDV{IYOY1^$i!yty@<)ey1moj=vw!yz%l(qHGFXgV{WYYUGfxa=*?Mg{`J4kOW zeL+T{jf(#Oodx7)x=8IKawQXBY4n0KzE6sJ;tOl5al@@sSb{fpIl~jw@7z*SNf@+v zoq26MXGE*{0qgy0vb)&NXDE|N00QIbW!qvs=mj03Yl)KN5u;>dk_a6=sDqn5utWwq zS+W6LBR%m}mxnyuW=p-R5ib*_gaQ>Lb(uaRyYm~;sReU|?N>AEwYPJnq>Skxj=y@W;Vo|Yj~Qi^br3w! z?4Y!jB)FGRj!~6g27=PGvRk|*5V1IHFVnZ>{{V$Wz3iHx)SxtN%Vlxc_sQ#5xJhH4 zH5$>v{us{E4D_YuZgeX%VCii~BYXqB12XXYc<(gJ9`BbUupLEaJ&l;+3?foiOfn&F zPOkpd=xuLorJgNGnB_uvJegv53nWYyQU={NKeZAL z7$KTT`OLqEq-i)%2<|=T5ndY7NZ&Hrx!GlG=LfG^sx8DCnMh%cEC|kN%M^`SlQKMb zA$REA>y|{bERv>VP%ugBK$qdc99d994^|iY)|{|UZFvesByM44Z0rFYcd9c-8eBmX zIVL?zo}+(yyl`ZPa9&JlWK*~PbQ3Sj3#nT}YnK2O9-n%#C2fmF(X@zkx*mah8c<0n zh{tans^GU)$OF9|7;L8v3(cd_7_iuqH{ZG@dYVn3o?>n=&lHY!U5PoMhyMUumomf>DMBk1JAyh@=7#A2 zB2v)6*a*K~Jo&|Sqs0uY0x=tnBFso#~ZZ=^i#nm@*B{q0_|#bJSfVkv5tAmue(Z z-Q>q3a?YaLZVvf5qDdPpOLrVft^w4fjQ!~-uUNB0K`Ssg$6SvlfbVCQa-b?ds~+dL zu86EqJjNB6LUvYRl6q&}xZJhGKu9oR0WzZ-bgMiOfLWP9Rlps|^FTB=5=!bH2**gs z*i#a`O>BV)X<16Tfwp@ZL;Y+fo?&E-bMAihSj(7sfyzE~f)82+Wrjt7T%+WSjQx({ zug+z*UTT$R83)Lt86rs522$ozohonv9q~baWYQQwERm_fKbL9>Lo&9Q6>u}Cx89va zXr*3DhB2I+^rWDHk^v$&Gd_dAVO295b<$7O)B)CmM)E@pn$odfq;glU&-|ay8qczD^ zk4PYHeYy%Hk}QKNpgemGBX8P*fz~DF2Gk<@yXvltmNH^Z5zN^D{#v|C3P3?mQjbvo z04lzVd4OF$tZolNXcU%2EE-aExFJgP6mZRU!Y7D+65yZa6%8Agg^I=_R?@>8HuVf0 z6A2(CT(&*4(tvGlqTWdnw1C(Hsm5q{D<7_3Mm-yEMvSL9-avH9HcajfQ()$crK@+150$HF#2Z(KL|CcELM}w2J|5dXg2kIP*kMlB&i-2^V~>*r2GNn7CMh zlmY=BJLZ;WT(W{mP&XcX(b8z4k>v`}?}ky-&}U*}9E3&ISoH1wbPYi)Q(CH+fXT*5 z10uM^Bq5^*2z!cFJq-wF8zO~V(Guw-Mvs_EsnfrT0iDoty(pTeL_YW#>yx<>m~+LtDptO;+Kfq%7P|BtQM}p;SMECsKFAtE9*$0sha(-u043%$ zZ7_VQ4f8@mFs=bnk5Lrdr6nyL%wjO30~J}~h>+2~6sQ`!;(xyF5=|O0Ps*wFPbriXbKm(&I zL>oFY(-p+LtXWVlcPczk5%jAvp$DdM&>Z!qS>onsmUECc3((LO8>tFQ7z(eDGur~D z-OaR2#xTW(7gYl$ad7Gxw5)5Z2fp+K<;5VHEURawUN%NeF29HetL;i6N}8sZJf%L7 z-h)c&d9I;~0iYEnkC946B(WDMGiyCqbgCPNmNL*bNdD>!20$7)b~)Ah8V25ednl4J z&PF%wM&!E28bU)l?A{J&F{?``j#3d+Y6rQa$tt{3bEMFU*^GIhth3$l`Cc(8XGPR8 zL9?0}9w{absh?9h`_$aC%{Z0V?72STx;7%gNL|^>6W{|t<-|7YEVuDp2W;ulp45?o zNn}@vLh3tbu4%Z^G*nW+>%)WYYVyR$?RPSik~PwbIuri@CV_b)cqe%#U&f8Ih3r4d zttG5cBvK@Sgo<6j?Y%{};BuNsgp9`%IVXQ=IWT8jjT~B!r2+nw3y6w@xh`5JY$^_- zsXM*6<`yp#mH-c^A9~l8`pqPcSP`RV9fzlFjUjXRw?SuuFeLRW5_iY>&@XCa)B@WX zWN;3V+Mv;s3AoQ&VFzbt`<=~F;z1zR-o9DUk@=57`%pX2jT&2J zCqt-F=Z`5%o1O!Jxdlyo%h~>Ultn7VJjlziJm1c+VKSYk3TT zCDNqy=r$AsmQf%w#X99ik{3RcMv6f#+B^qIRMJ#+IUQ+P5Jftha1i7(r@bd3kh_@C zfhQkudWsEs?FGtZQ&R$cMv{!AWLo9r+_@w2w|Xt75xa`gOE!a87$|qj`Kn1QrMW~Y zGU0=cxbZ+oypgStyhB_k0T1Q(G@!Y7(SpW(YHpVIqp+G`5ai83)tbP@-8|sJqb0$P8{&4%q8aZ3`(` z#C+J<+e>&Vcc351BRJ;Cq`&}V{{Sk{uvsR#9E`^jHc37)Q@z}RQkv(G=#CZY=iB$K z2ejfmC8UP>)nevFrXYTx`P_cM&?9`xwo*F9nQ%_un0wI242h%>$AjqROW{s7pxX0Z zIcHI%svK+Wy*DMyH`irjm7`~iOoeg2-KZ5Z7P(DC!i$iD$sPXyTD98BrKgp(7G3oI zUwWik?Bi>Ob0xN|5vz^E=NbEEt#@;(CQ&3}cK|l~8VQcvUBHPw#-S`~R1Ue%N<5ZG zZeW%<%1DDqW6;U}0F^0cBf)7UvND!WPNwOR;;z?-_UO{1YHjd(sGz*JCf-hAoV<+S z$OlY~vsxTN=GNxe+(e~f!#|h#R*qX*Dk^%E9sA=wD6vgwW^=183^~p^wrCJskTU2M6|_6}&-^GE zgNWKk0>oGG1A`oc&U^l~bViM2yU8r1K1dGVbazNqbIDxwlfA6K?yo=`J*1{FJKArmG zeABn@8sc@E)V(C{_a4*$nU*Dz2Z3E=I2iNhgKsQTG|w}IxKV(=?qhLNB*}Shh=`W~ z7ZMVEInQz{F8X)2o+&M5G5`#6X#lDd&}M^9!rA0^FKXD6ZeH57cRy@Zc-6FUMq1j` zasud%Md}FZ4NcAElol50%92URCFEZ052RLeT}>;=b1bGZ=S#=fW5oe2MU1>Lp}A!T z7+pP5sb(RKqJ^E7CLdCD9>$^pXEcTbIZaMOw%7+0Mv);$h{L8x)MnQjULJoah$H&zQe6W)$+`S=#Y~PCI_5sX?bp@;+FAB>80Re zfOg#VGzNr^osoo!Tm}rssTkY#q_}!UM9O7lcKReeKf4^JmOo+OfW5n2d>Ar z+PX;=TeMDB<0}>=f0sYyRErNK<-mPGa7K^IjP;h4#Xt6w6 zT|$C{cF5fRmBp>^8WcASqZ_~J-hpWkHdd1AViceVM?f*YXiS!tiO!Z;BuN;rF-F)O zF+kPDz0I_4u(JHRzU8~0BCCCQb#*gI#IIH6arkAkg%n7yEnd~MnLpLnSEzrq9Wg+- ziqd$Zmxkg^LrN zbyYq*?MkB-FDwdlzClyicA&M4#@cmRREAwayc%djV|dPP;~@?LYUl_#s*jfWIM}F@ zmd6^73Hw%c?6OVxXi6YDlpVM6L7{VTZ+Cq({Dor>{{Uy$uYp##F{A2+;6oezU;OH_ z<(K<5(p-TfO21I;MT+IFti(o4Fj8=vmR|G*x_f(h+ZT89Jtf#0DVXKsL}`d z2Dykc!mp^7{JT;sETNdlqm;@vrmPHiFwCswFuzIYMra)2klWltaVwuxV_KEy-qb0s zzEU?xrNzMXfSoj+xc8#O*ESf7Aslr*oK!54-c0gcfCy6AUup#7fS}gw${6Gp>S`_{ z!y=(_Bqx8hGxKIkav~5iY6BsOBxj`oj=Onc3doVZas0c`1tAZ5A6BM27Ciu`rsi5Z z$38Mw2p6jlYFbNTs}zdFtjE*rXz)tf(mSj?Yxr#^bvcAr* zyi~}(pwTLGGlHk{Rbm<;lUP>YefJ*5g0RA_3rlboMfytgss8|2WOx=}!y`I+oc{oo zQB@_jGD35M`8Oh=46!kjm2&aiv*JVGL)h;9q zer^%?i@zxJDrz2R?&2}U7*!3ZgVj%p0-K#YxQn-=>c>%6jp9=CJLx?#z5VDAPZ)|q z$r(|%UFy*nl|+U;aRvae?LjfCvxp>*VoRa!d)F*21nsyCcmDu(i_BQ0RGvX20NNaW zKzdLMek(o8G|@7JG7iRphK0c9IMUM$D|Xf6_MqHHCoPo*Sx%k1ds6_*=(Y=i`HBAe z1h!Y9B)x!@h&HSr%+w3VZ*b;2ba48ZXC05V7fQx-O1M`#b$29eMP5nTR1#a;h*+?8 zJ~2kITYHO+O5CU{j9j0~wFY#_7tC_9;bZk`Jx8@(7BM`t#`=r&^HR}U$0I@}C)99H z=BY$6TmhWniN}fs*+eW|W7JOiz3THw%+91^IAzoJslHxjW{0Ys)KFe0kaHg@G5~Jo zg0FDRr6!DOkbt@CwOTt-JipkD49n9h)- zC^>&>To5KGU~Lkdqjo;kwr?B@i!kCbB9Br0+w`OrW{xF@sL+k{o~D5fS5US>dP(|f z?=om+4h!TFj-r{l3SKf;qaM^5qfTLsjzcNfP!=hf0fpV2+$js*qU9yhT4&Ih0YUHf zph~mCfUfVPE`Qzs0LrTmLX0($oC83^M+TIQK~<1o=goBiBreFZFRM8f)EnWoF#)oz zxgX``m=grmfu) z-#V9?Nhb%TD6zRB<#ge&22MWpW`UMQY-3Q!3UVkWS6K+3PFwRFbv|fu$QgC0DnQ>o z>Q*@x(`;(bdI)AGmT|iFplP7_nDnax>%MbJX_zuBpIVYXOw$NfH|4|_NZW5$kP&ew zHAvE%eepmzlMdRB3JihgHE?2$9$1D}Ppoypp%+PX4Z#C*=B6Qtr3k}4l+Y5|ShcFQ zMxmXBS|x{>A1Y>3(egSB^`)(m*q>U17o>wtG(uHkiRDE=8G2% zF{)h4CoPYS^wxBeJQq1k=WR>x+Miq3kTgw$%m6k|N&-9EsF*duF2e|*3?F>bHf|m; zB{?X2^fY^$X19SyGBUll8Ksa$>g*KhQIU!RKtoNII1+lZ-G9!PoS5b;lJgJ)s1Zzr zjik4hMj=MW9w}>`VI0P)1808tpsl%642Co)Sm%AJ=={9PA}f~L8R=C?TwC)bAOZo* z88hZEJsv8sOOjQN6ob`h8&*ZZx@KPLz;>y$iauDnx{x-O^q}8AdZ?VqY>_mIzyP^q z?N^pGc111_I%HsX%>>KNNdhq*y@uOwP@Kihq~Hf2kVi~?=&(m94a^+HMjd+5VOVaj z5>y3LDfG9s0V71^z_TC}1?Nx?pZBk#C zVqZuJsCUl&Xf!TT&8{9hc$7u20rj6*$Bxtp!xYmJsAO$U+>2#^vVDK)@QGJ-b%F4!4k)t{OLyBY#@s8QY}? zaPg6$$WX*%b6SwdoDsfh<4czrL>Tm=cO-w$N(#p<#|O5S=2Q}2L@&n9@XZw zNiCwIO0T8!=|5M#Yi3E-^?A1r{LE^V@^@q39mlmTVZ#bY(5B$2I>GJ#0A_)8f-AXV z5*CN@G4uAJ0&Q*H-r<%;=LX37f`hi-Y9x{W0B5Ss#pfK%V1u{Xi#3z;=4}8Fs#Iro z3^I21pb2$*xgG^~jK$X&?fq(&cW}cjWTpZ}0g#{Yr%q;(E+BM_!hs6y*kI;?e+*X; z=GHtB>LVRR1)#QR&pOqXT}!2ga@xM$E3Hv&?m2Qs;mF&zeXFf)r;TBf(DGjzS%KUC z06Jn==bG78*Hn)LWs8t=oc5qKl+x*5K#Z7!qXznyQL0>7i+)rQJ2Ek6muvx^^=R#k z0Nf`0#SZ)LkUUlHRz=Gkx11h=gQA!s$0dUiCA5K-Lcf=9>sI0)5Iz0v&nhy+ zOl2(0Ny*!6)^UpF8PyA)mn4!GsoSM1XvW_4?X8g!mjL(80`hf`v6(|066EcJ+xpTd zTtGJUPrga~-RL&%Bof1Nt17}^k58*@XWPlF$fT4&g-V7eB$n8}YMaBR*}h~JmEp)e z0-W{tG^|s{8B&0>iZy}xazCX5RcE4yow zL%Zai_VHeRDFY)xH_)xDGPaVZtyw1Kw1~|lj6n6OoZxhzcZ=7yOAM=W4!>FOKx?M+8IM+*GN4(@#jy+HQ@fDz9v zui`*T78wht>OKgiZZ4$0O;I3@W;rqc05cAivvH$T(cQw!JV#RL9akNzIz+ci!OBug z;gOGQb)e6RacwAVQWqp^1Y{35r*B)Abcc3Q6~M<_(4x1u=aAFng2l<{y&edpmStDV zvSg^oRPjL;(_K$LG`XB+A*lckHAd0|WPXJ}rhvqg8o$^5MSyHpKjL}W$59!+*XJBZ< znR{+yf>&lz85ePZ-y`oq5t(9D)Umh@*e4k)Ky`JzmN1;jW*OGHa6lC=aSKVCbvomG z^vEKI8%Y!k!~}vc-h#+7$Fb@6t6RkO?n^NFZA62kEW_Kq0~Z`Z;T^S@xmOZk=pdEb zr?m~$B$_t_jQuJ`>VL+Cc?#T=c>e$nH#o*O{{XanRorr~W0FY~+^?js)0zdYB+}Yg zuzcj7BgSa!AV8sjk6sU_aC~N@Af2SxGIe9!X>M|_;SgCDKYVteJ=EVvJ`Bo@sU%V9 z1Ge2NXS_>-(q>1RQKgRH549&0%Ml#-+?L%`k^=+V*p9T-x%oD@MO0wPu`CXi&rfqe z5MSk`x4OZSA*$a`=3gIt_NH$O!82M)AD7Dq1a1i(X-g?B%l8?0>ffYY@W0ct{~A}N3hkcf#^XP-^E44EvbSefEbr{WN)l_ z9x0HMO=iYu5-}*~F|hvtp482!1H8WBFLi40xv=~xb;_v8-yu%yJ;ek601l&|Z76Ziz0TaU*2-~m+HAv?XqdzS* z1YjE@xS+e;CZWt`Xwd+|_G8wnW-TMe24hvmtZzt0`zcjoQ*-Wo(O%*}0<)kpb!F+F z8+4$wQ@cAi(BomAqu#35K}sxdvxA+#H5ZsBi;~C^L|leHoc{ogDrEJWkrL&QgfP!{Up zm1D~O8*PwxZ}On5?`Il0khZ4K$4>tMdbD!eXo=+x6$Swy;F{=pb17DBvz_P-6y=J5 z1{t^eQ&u-ITgxi5k{BOL3|4#uTSX+(Db%N9+Y}d>^%q4BODW0giVX#lB@##+9h>jJ zY*mcxw&^G%IzZ9}#*%=UFi}-^V$4ldMaxL+r_r2yP(2*)Cg)HxDBr-Q?j)LPa|yZA zgR{tQmCTmf zo>7$^eAVxz7diwLA7CVa{z69Ft&)GBDnepEM8hq?%(5xYfBQeW@*R#_2hHH*rL?c4ERQ86;Kp zQcKB^-Zk9zCV|6o&L&YH5;s5wF-E>-7Rh$3TV4Zxl!2Iu(b*J^;0@|p_2#xHQeIR+5><&UI)eReI@?+Q)`^&bX-6}+Gc6|xU+<@c*iuUskgAQU@&sEI7yP5@E4prA<>V=Ikp_0piOA@a%ciu<`Z1|Y5GAGc(tr#smjWEX9M-cUS@%$F-pF;Hq+*c zjI!|Zs1+Gsns7G*r2vXFc6b4m%93?%Gwdi5kt}gZ8PwxuUdEZaNv45XIR<4p-=G~U ztt}uoYZT{}sxr(vP!9ekFhUn0bCb9D)#8VlZ6gbE+D0>|3bozK%^%A^?5K~0T=n0r z4m>1{6s|&J)P3kC&dzIy&IT0*ah|m*vRuw43#B3Su<=w|+0kA&gDe3?26sErqL|y4 zjwEd|ZdJOP1{s6I3=G-{+}9RE63h+?5;NH6%~kIpQ4BGrom-Xhy+lr{XDSC>B0;(D zngiQf#c^>OHV7oKWI8~rOB8Zk>x~Y$it6|Z+&}O%i)uSPQ z8dHz3po^Dj~V(L`mejDdk(nsd^L5(buDL+WF;rPwdET`$0jWVB#oeTeV( zq@j^SbH<8dnO6!|Wpm`vb>pxQh)~F^`aolx8j0>D5-WuN09gYWJ(iP>X(Hs?%cvHR zIc)UBIdE;vjd{!?1yDEhW`jK~qLy|`i-tsY)JlVr#Oyt{r-m5M6yU~f)Bvh?WzN_% zt;Dli>6U2IIXEghj@hf)&n%Z>>PJvY{U_;a25hh6NLtyVXAdS*d}D6)-#0uGteT34 zT`U3UDK;orMylC&%OL0vwOQjfs;zqL{Na!Z-h)~@h6uTzOm(U>>>dVyt3?^crszpO;YFVHCXtxhExc$qbm&i-24j{P-HT?RB(^W;eI>BI`8_L2*u@l4+uRK=jNtSu?Mp4AOLH{yv2nK~ z_A~y@v9TJ&7Ao@)`Y1E=`X(p-q`kx#5d=SBwXYfWKBT+E79eJR1j zrI;kQD{jl`#Q{!5h`&dDT^B zi0PHJ4+>YMSiRka-!@$)S(6%@sZu(A)B%kr`7mT)BO{oqcgr5M2fY#jJOsrl=v#am zw9`ni%K}Etl>leQfkKXJURnXJHI+1tcsd*Opx3bC#^&`J?TnJD?%2QvmA`gT9LYMe zqmb&VbAo9oUQiN5e3;w`7qflou`2w-2$z!^V759FLD%-YwplE8Spu;Pay{v&1?8pC zZCT8D1b86yszw&(8D*J|gmx-{%Djq^-gwT;v|kyWmV*n#OF_Qfm>yfHs64J^KvBl818NMx{ht_cR-Z134y zwFRG;mSN=qQCz4+7|UmW+MQLB;@%mD)gRK?^r**NRo2|j1SVt;E~ZdH@t^BOw-B2t zY$a9$mZXq=htBjFl70a^)^=SOVV%hP_n^T9_Vy9ohFN2dO~y9|aw|&t&k-=&4H{8- zaWaf*9w~2^lGW$olam|By1o9?1mUThOQ8y8Nqu2YWgk@yK3?k7+^j`b5)tvSZo?Ef zZe>~dR=GM9#-&fyp0!JBnC)bi8Bvg~`cIzZ&{FSQv&Po$uN-^nVf20Ih^Zni)uSN? zcSc-{c%_!?o4M1JyoqB4nSU|O5;m4QGLRyHr&$B)pcRy27_OukNke5upK8+K@u9tn zX;rjHgfLx+KGmP|6MX6Q z`7nJz`KZ61=2^qp7K?|WJh+Q`S^oer$L(4C!Ce-E%<>VOt{{kcA3HlBaXMUPz#t);oPASo)3* zJw2+NJi)0Vky2#I3J>JRQ&hMi?$shp4s#%3)a^hxVKL?qSZF$qqn%4vNhNktCJwp! z2-_cO6v}NHOQ|k|rWrJ7{WF>{QEa@>7fz2y>p*3DcD5je$ZQ1zVZQjOlIjPvSkVp< zj<0G2L_{JQRP!Hm<9eHkW3rwl1zbBev>p6V9Z^t5`m}-rW81cU>3hPDa3o|Z(k{PA z=-w&0<%Z(Tgbbz5&Hmu5;WRHV%h;oiTj^rwcR1=X{xk|nZp(VXoCjsbbC3svRll;h zc@hu;kTtG8jlHVyTSDAGK(aA%-mu@_N0wkC4J3rKlB?hVaY3AnlHwb*0}cxSI$-q_ z{BNFYmhR#=B{Y+`QPVYBt5mjg&5xOsQkd#;I?$n-ZfRjMfEPn!JNp_2E+ck|-Z%n; z5rE2mrT5KB-Lgh)K-Q?iMKo<@B%XSfhYgOzcKxU^Rb+<_ z@tq}#I*zDC6Xd~iw#b~O`hgqMjXYpUGq5b@3=cuzie3mLNZw%OosL%>F+i2tIA$B3 zTWViwUNw~q#&;^f4Eu`Eo?T$btTh$E9!(BG6#7CR#OgqxMj;`U<76P9>N*_JS#^Xe zx*8m0{{S{=q=HzZc--nIaptJ7mJuYA2lX9FI*$T?SBV_f+Fu|?zL82>h-OBRyDWIl z{(M% zt&H)M{Lq4*Bx500?rCQ*uE1>ECZawIR>7~=kh{nJUz5LJ`A9K)exqzdk-|N?d*x^QWWRCdtQGkN06aEuM%;F{e=Wh#EDWf%O_9`-ma5nnmJnuKT>}! z0`bBP!CglxKB{IkNp)N>Af^{IvX)73A7a=i+Kkh>;v^$Wb)XwRG?-dUE^|>G!$ToN zCm6$Lu05zz!w_#z)Hf!qmkT;7n7v?}N!t_xkgLVWOax}gW83dtT`WrJr&ePp)<=5k G5C7SH4MURv literal 0 HcmV?d00001 diff --git a/projects/DensePose/doc/images/res_bbox_dp_u.jpg b/projects/DensePose/doc/images/res_bbox_dp_u.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd4e77bb3e775f018ff590ec78c18e4e3b4bf82e GIT binary patch literal 158784 zcmb5Ug;QM36E3{CySwb-65JsyzPJYu?(P!Y-Q5<4;0^&2++~Bi1`nbrly z?NfDX&Y9|Yx~6Js`gywlTm5erfU776kpsZN!2uNBAHaVb0BHaM{QvBKZ-jS4L`M9d zp&}zAA)}$9qobjsp`l~qV4`DSW1yj7;bCFp;Ns%pqGRF{;Nud!k8%IE2;BcX5fD+| z9dR+xFy8jMxK!k%w0wANH zqP z5D<}&QQ+ZF-)AN9-~jJf2qxfPebq$pnRv0@!^R8j!F*d*L~n){9>( z=U6TcA+2n&%Bh+SGp+Ya2N`%fW_Oe*Ecr!)!%0_RZdxMZx4l)9PrSgav3kLu5%`2# zC0@l(=3c?%&7z2w+jY$>F(G5R+;Lxzb4UyC;*iZeu_i~e=3N6%t|f0h;HL_I6tb|M znF&ZT%bF0|RzS=^A?}Mf!{&IP>ft5^5cNTYlWRVN-j6d2j4y&Ex%?R`45Nm!Jb@@* z1K>03AoX}mKw5&nE?;dw0-x=XbBu_2*skI9!Li~~oa~|S1Idm4@K*PmUena3=h${p zXGdpmW)|-`kS&=IXJp9fxE;vBDmW}E_I17t6!H-uiG82&MIV7OskU(Guu(UR?bj;paDC~ zu0dJvNI*V8orMygEdtMBp5J2pMtRB>td#jC>ooU9T3g>ygEc>`OBpENT@_?clVN+% zA50Ze}o(h{5k>n*3RvL?DOhquj@>KXusT)YYAMLzCXACceA*M z`3+g(LLcG775&IZrxl9hl3|UCvR!5FXz~_}f*9ZS^Sg*)ljNuCNi zFg8IIAT3N+mE(^1!UAN9KhU~U0e62QFRyu?`ol<_I6&HmIR(j@RDQ0CU)qf?$50b= zM&X=+&DWv=zfSDfywrJHN&NG;UH_Qo zpGz*RO1q)YSa5naiG0zaIO33<%K58F-}v?)z(A92Z8tiLgyK?`o9euVpyUV$C>J#P zTg|(SCe2nk1~4zQtqjjG{>zg%me+fV8Ocd>-^}E^n2@d((#pbaNZ~nxkc^o!LRj)A z71B~nJz7gg*>2*EdcrE?H6|i${W49$B)w}@CLwwEDQzXIRM0r1kmLBDC!t;u?+jK= zOyxy1uNtp6bXdDVJJFcSe}OyZ;cLy%erPDQ8_fsRvIpOKpbcUH`_?aczWLidH6T@R zBB-TqIf^ULrE|T_S6W}i2GM;JbLin*2>9>h3S8i#$27Ic*p_T5!9v`p&`7L8k+&oT zl+z&f%8WsLEFac=LSr0)dA=TntiVqvIRH?}Hnv-~$7scytxeaj?QaZ>WEvQDu#QD- z?9CXu;;bNAraXKJM`4`L1=*Ow6VIuMOjk;8HLfjnfr6j=h+jB7db9qCOa8Xg?duB7 zJkXR%@Z8{|VhEo=#fw@ZNQ^cjNSC#3C|zEN#v)II8-1>NRwE13fJ9)O+DdGO=(gd_9Pvpx|IDsvpM9ksrI~;zQWBIc2PlE7N;4_jlc~rmG`Y zJ!$@kq$)yt(8@YSRw|l@nNns7=$%*#@e#rw|H&_^xS{bvXSv;=iwcEOs z+g7An$ZSB~j&h>bG8Bdo6DO!5t!?oXsP$f+P)eZgk3)xLh8X}Zvq-AwJQ~6-UN@mD zw=!k>=p8eqp&%tPG6PA0#}R)E3nwp3caYVxWcP*_w7=IqHIfIOGK^%Pc98SHJP)k& zL+RD0)-8$pM)AsqMudMJ`!)Qryd>?k0nu5qe|D?{>bn6Vcv-aj{8Vi3E&542TL}r# z4rO(|JLXLPMQ{;Z=DN{Uibl9sm^%34Gx-V=c0zz4NYguV|H$L^+l8Y|u_PLn%i8PJ zT^H@i`2b?B(1)eM(5=j)V^zpk6+oO4P#95yO72#9daO4BaM$$csNO%LY5f+Qpo}0Y7neyp;UupY zg}>`}r=#Qq%xOlWV|bmZN$vGC#4TAfrV25(v&QLPVE6%34*LIe(WVXLARmjxYYex6 zHn%Nh&`)glA2dbDfpY_t9+g%Ma$GavKjI9I!wpR2teXp>tw=3a?2&v$x5ShPA*_7i zir+=zK(Ss!${uZ)PHyMPH2E`(nLjf?CAo~4I_LlFNs(Hw}$^Ix+ z2d!Hmt_kOY7FV>{jrqpB{R_;D$sQX>m0nz&@i>KnGu0q8c?RBWYD4iI7pf=mJ`B6eli zZ7fEqkrXZw;*U7{RW^}cEkJ{yL|}~u+k}-D3%qVJI`|@7D$J{xlSn;#6``d--D$+d zinZ4%dDt;smY8_H*Np_m-E_-_ZFsB35|m|}>oc~Uo8(Yc$cLqn0~h++A13Hw;D(ss83wqITCe4wHUCpO-UCO=RDtmLrmIm$h)vB! zExqUhQ3BP|ChMh2`^R1=wogjVV6FlZ71#c?kDlS4%{N570s{A&NaZhQvqk*{C#f|>$S0hGJdUF$aL;DtgME(Pa z+$qm^OeEJqr>x>2FBgPS^m-LyLpJtz>rhh*d%$g>SRL3 z*5mB7J?_F#s{X1VhyfR4SK?<8#ahcNWj=u#=NiHmG=j7H-?luNj`atJU}yfK@}<3F zh|nSH36*Wy^$iy`g)r0Ve*kpxMwHe(@SWBKd}HWxn95$tTd%XbXrs|$C)$4YFxmz6 zp8Wi@AkARg?vK{JxGAh&;m?(FqN$H1*dIX+x)d-RO;-VEFAdrk`RZWr0WUsF#}fJS zd7u9Pm|x_F2EOe}IVk@%h~kekH(~IvrM%gmj`S#Rl;gX?e7;J!-sCu!9j3P)8skC$ z6pfMbkz$B_NidjjPyKbyT`l~NkFbS#ilYiQAM}?U<*q(CPAD2~p9IUx#)=`ui#i1r zsb{4i>Uv{3O`ciVbMy}(@jmXBp|$?hXhKj0b@)r3#+ByhfqiiK=#ARWg@Ml|#9(C4 zjvG|8hVoWRlV>&9HHtipc@%2Llv%MrH#>~!L;WDRu9%f=>SJJK3TkOk;+|Ol`glR+ zGB6>|jKU@>>Y#8ftJKgn{PlaqSFv%z^gb~vxU_Dl0}oD)0PoGRuEUMe_`(Dv|br=HSSF@}CwlF(1c)M^E7Q9cgCK%rXo z4+Tt}Cs}LKzD3~=P?acP=|IJl^UtdeLqZe`=kITrd(BI`CDn~C0i_*sw`$}(iFl;0 zP|KJ)kdM+b1)e0Bj}Dlgu@#yB(k_gkmTsCZZ%ZM~G|uL&1QbE$>KQUUc~7}z$NBYWQA>qtVX zUCLe$`57X8m);nUUS`?ski9uuV{R7<(Kjet_kVz>%&bj0WZv>lH9}txtoTXrieROf z?}@F*;dCPuQ=VGHhu=)7&4ZB?9%Xe!Z(|A+x?`o$#MEbSwrtH%q|-0VmXv{4Dy(PR zKVX#nWYb6g4R-1&(yp6?zXcrslyZqmg@lsJusIOcpnVMIuZ~K!%}6$ZoQMiOjqJF@ z!atfdF;@*S`3Mo8$0x(IN*yhQ2-onI7j#)sC)luZhl9%m z86O3_T*Jf@^iThJA~+AKDBYh{q|JI8^$*Gb$%jI0AAJZ$-)dtflACY9PJ8$*J%jPhX(Czb_M0n(jm;4o z!pRuLE?Ta0RkzqRpmwMtGdZ>o$E5=&6FGqPq5#G{r(s7a%34q;eO588M)qmK^))b% zWy*vGY4V(1G4UElF?WnMCu^-@?q|6tw7tIHmIr3XO(i_*p`COldn8Xgo^qjO$0{pQ zej>xkr&K&Cw$yl05vGARKV3(qM^$J`Y{R@+2ng-scTn&v`6Ik6IA&j#HtSjQfa!H= z(2MmEF3l1TSyVyZY&o1*VY*arGb2+_=dV35_kr4#Y*cbYbVQT5?;~il|Est~AA57I zpV2%IGWIGTm9tgbp^0ILWo$c__zbTD-@MvnCciWn$A19NKPAHi@2pi7NG^w?b#+w7 ziRem1o6r#=Z$yqlR)h96#6;G1RT zX4PeM^V~zJ(olJl>Fu`;fRkyUR9LDsK0>86ut78~(8-K*1-}bbocXH{A7QIuOp%^k zqDd)ahOF)YjGH*eEmtUWA=%cd;+Qa1h>gK4-VE;ik;FcIQ*bI8@L1{PUT>MS8_QSV zv_fqQ$MH*~Fa!m_d>)6swxiZ#Eb+uT?NY?dG4n9+FFT4CVibpS+^-#=GCK+WE-+Xpr*%4 z1FyjsR=v;Bn`7?vG(d7a0SIGe;z3su83}EO`l4(Dnm+w>-Yq3{2s+XS96pj|6RT18 zuKS`4X2h&kD5k4i*6XLw<*5n_QV?}y+k`2Gk|M^%9mvLyJk&f1?cb>rX;n+wy#E2( zBK4-6RNC`?Ol>->@3|jz36GGo*@oi^T8Rc$Bc@9imbl6rr?2p;{VzmBu%t7(6uj{R9G?MaUop@5L>15rNR)kB=O zQ3twY!VJ+77;+I4SvCFymbhr z_sy|-zm>Qw^~^C~AkjYD5c~NRMh*i9&<~zKyB9**HHI>Y4X0f71Ux#Goxovk(Z2BL)7S&hPR7@TsFwdM$r70+^p_*G!D0`R0jKFs1BB zFWFR4sVcnEi0rUFxLGj|XEihHh~!XOrh=8?6`nJcDuI;H(~0$X2l4lOqtjL~3g(u+ zpG!9#Q%@jIo6ti5yGT_Yv3yg@?np8S`&Aetv52v?E!VpCmuK`Q7N^5QpLc!8&GVCnUz`t>bcH0ky1&wo>|REIq=Omml7z+v2ld%(UKAx@XSuHvvq2%Pjj7F%2M6Eg5>F6UmSTKFj8E~;Lh zb;hDJiI+lf>T+EH-IpUe%56$4;HsGXvqfH3qk zZy*DM+KaN%Ag{~ zLyzSYoFLsA3lA0lY~%Y}xf8W7airBmA)pC%RpFQ+z0h z9bxO@9D>}2w&kK{CW{oa%HFk zGGD;^X<|$AAT#@A44f9APEFC{P=hp`^&6l2+*scGYjd8p!<2*T@;oLkwtnEvatr>8 zs5BFHwo#5qe8U<_g7>!;3z%~6>9 z6y5B9fOCrOv)w{Fs+m!g63C zm$AqGSST#E;(hy8Phb1ROhSyL zZS2)*v772fC}F{dqcD?cfJR#zv^=Feq@b{qrXJZS+np@dda=I8Ns}7h?{RJN0l5v- z8Yhcgq7^6_r(buFWI!E5Z?llK%m+jJBJT=i|R z+y)vol_`5QS#&>l*b;UAcmtn00o`$AYmUq3dt>2r#HE8Zz_yh>cnum!(~)j{+;&41 ze?mGvxDiSl6eJdj1!_d@bhaL;v+ktD(D{RGrbO+@Mq2>&p@Yvk1MMNCZ+WfP5@PFy za!nfmMzVio(5d9GD9sD!*reQI9ZhEykdc);1xKNkNqn}mg^W}^&cSf8WRA=KCrwiF|sWb9Ty@5t!d9zOdd)_8xTi@ zYHsg4=(?w+7mdc>C4y{3xmTACJF!@dX~Q?2bmODP(X3dvI-&Y0V^HA3SKIlE8ONix zwCS&V*MY5?ZxZ6d7Au``m{*D^U%dx}zuwW1T7(11Ph>_s}DZEDN9_aV84P-vek8YM#ioer~Hp^gw%#cy)BH?PLjuiFtkedUt^p^avu%s zJ%3Jh$95--mx3Rm(d}&_%1V66E9fN=i1f4lB{3#>#o?Ka z5skg|8nA2cCi)}z=Nl3hI8GStMd4JE&CSz^G2^=?&nLAoJBy04dAYw1s@q-;Y$x+v z{Ece!Eubu^pAd9ASQj6X^#E$|H?)$w1eNR;F>UtAne$}CfA+bwJ`ZyIIR0o51S2-o zcIJ;dwr%~?-iJDomZ9vLiL~|>E$CK;`{{9Bt%(wJ#1$s%4)BLCs|$35ixAH|;2-9G=yQ)GVlH*o(f-FuKU`!Y>lX z)Jxf*C$JBB83La|SR?`f?hN{~LyRR{B9~4y5%hE+tMBv?%YcBFWrAr09%P3v^)uS~ zXSv4j`UE!iPR+{u|FayX2(Ca0xE?doeFr~9r zO0F`7C?vaZVY>hFdMX{P5pzU)V2^J{)+aX6=Bd^cU1T35lZ*hM(iNx^-CCh5yoAif zZ5HkGV!96Q74Z*;KSlaAn3pUp8E5nOZFg`i3vFeX;=?82Of$pr&kUX=tr;*Ee|;tz zJ(J}g0aeKr9JNu;rKD~NiR4(7iPu2pS&j~Qc9F&A|vryhxv!*a_G16(O zsTjVXH`8m2%mY&$FGzjTpz#68Uee_$^5b1yM>re7%kbS_nvg=CgplAF(j!f%n?^S_>U_}O!{W@AycQcw<`po0Mp z@hC+q*0QKw?%q2i4hEZ3M0GwlUV1V-#oGw~+M)@h-Jhc@Dmq}|-^f2=wDlA&L|5l{ zETW+yVC}py({JzAv3DJ^hcEUeZ?_$OOSxW(5a<{!6@|M{Gy;Q2oSc8+JN*R(-BfW@ z5zuA=Vg^xR0k98DIC0oahznR6C(9=yT^oFy4ED>r3gu#gm`nuWgt&(Fkx4^N3r-F$*V30R!g&YHiSbTdf(F1*%5Q+1*N+dOUi5{TD-t5 zn)L~7K?n={@Ko+3^tYE@`x`s406fEQ?|K9=t(--@M$k)p!tX^4r6JC|Pk7Vh5OL)e zXKa7-#G$m8@H(qTm!Ss$0v<{7G zIGI#z3v1F;&CeDEs#D6g~qs7{?C3?otHhq5!93g`Bt-(i;uZM6}0L}-F00Z+&+ z{i*LbnRs*@mfoR5fEmIWwyGEQexi*RYQD;}3dqDRz_21$2QyMZ5^yIMA|y6|zl&M$ z1g#>9jv8Ox`@P;=2e-mUx);zzeyg|I@w4F^$;WiN+N?VhqnQz^K50uZ^L15Rj8tT1IF#q&dcZ&M4S%ZPZ=9i4s@|+jPc<5X2oh7Fl`fCGl=n=ug5uu;^-G^K9E9p4GAJ3M;vv@EVM zVXiU!nYxcS>K)|s=Mkqsgqe!^!*QF-`i(y@=`jQzL^deCrO7j4GVZ?P;OER@JeJAO z6v-Le&ni~r_0yyo_|kSadY>41ZXpy&()Swcp*vR_Q@Qj#y?v<%vTtJyweBqJoRs_- z=Hif=IU~XEY-6L%iF7mz3FaM+mhn-w+v}=>%8^2+UClH=W{y$2q^@6 zQDUw~K5>unefTy9SWWIUz#s7-`=mA`y|NuW$at_R;pFYaS4%xrZN)7OLGu8GD1gZxD~0MW={gNH;LJ8zZlnwqiZrimf#WZS=P%|t`cCY8 zqnqMN)Cs&lx}aE{l;5Set(@Fwj`Yd~2+UKG!xj>&7e?3e-o(BY zUqbI21WM|AF+dkMNXf4$7KBxb_#$GtS06zZwV4hk6qK_P1wj3AyoX=hq&4lC~ z0%B5!cY;*i?I?jrf0>@Oq)cdr4j05)Dn^5`JspV~=?gw5nwVk<-1LYRme+OIAK5O7+nLxOi4!c+&a^ZBskG%GnYTKUM9L?46X7^A#0-Ii z8L%H*ilcapalbuLKQP6}%oj8yT3ARXbT~R~q%*c@B^r-dOGH{wFt#8|e=14*eF@Sx z&5$~V%^Wpf)fLU}?PnaLux^8)Dj)PtbT*hsD|I2NrJ?P}ql?6cXz-!R@~&D!1{et~ z$4k=stmRb1m3oe)#}ufUtVP`dL1s4>tRfw8+JRerZa7E7Y(bM-lxb)fA$o>gQ; zeWo8mX<=b3xd;L3ZHaf=VDfb)m}e?=1LF@%NSEa*#T|lX_KF6Nv;ZL;t4p1VqdiSo zn2KNPaO8An@6oR?cwW5x7zZo2F0m^BZh|78{cg{3s99MvERhRfuucmm$cyFl}%T;aDKRz=2@@A#UOunYy0V`rH zj41fEeiQBh~#?q`?19cxWMtY3uRL`p5m8q&J_bA1?GOsH6Odu2H_PypF z3>zVn=hVZYZ#5K3xlWeWQM-x3nlQ^T?($C}hVEi{a82{03b0IZxZieDnwKSV{BX4# z9f8#f_%aL=TKhGmo?djf<@@29cnrg491w=pV}uH94l_<#k76~o<-~zYP>Vxusg9^A z!Un&kSS2?qMpO*`yFvudjTHLbK=YT-gwUzl*g%@j=V1}mKU8c&lhtoCSU`f1(xR5` zG$Pa%|BPd4b+TVco2bZvDAt!M+;#zk5o2EX+^+p8fgR9_S`{vyCb6M}r2P4GeTao0 z$n0Ml>dpW(f8tn1iAx_3m-E2g&Os+d@*Lz;6s~$yxKD!PeeT0&Midpcx|;fZaR<(j zDNQLmTgf}IClATd%rMu_UD(Kv;wb;XwX4Gw*3 z;_jaQ_zSjc*{m(eJNvrO0d8Qr2Yw|Wu4hMNiy|8rZpb6lneRj?{BvKd^4_^foD2yR z-D{OZ)hJI;Mz3WaO!#eACSe&{nUJa4qj{Fb8nZ^B$|EjR&D)ZQc)T9EFuCu|!=8p6 zSnDsAGkg5(q&Pay{+V61D}S-7i(h~M^ojCG*={X5^9r0wsXE`ko#hC9oKr+`66(9c zmP-0MZrHe87Dp%uu3te=ZSh#n=*|(w+hkFFMgNVKty{X4O^7T98AL!^6wO2L>P(>m zBK0ZSL-Kf0xtqv#>`hxX$LVC%zl!UhOA4Dotgur%5(pBUiu-KWq;Z?P%!SJjQ6(zI z>?3|jPYAd=O%9jsCxv9jfMO0ZtA&yVZZrBKz&hmXUF+B#xBVvk;j$_Bn>{@=s81`H>TR&WG121VmKNifgK?+>`+}6U)dY8;OVX*v`ZYwkJnbHd2QHir=p+jBbBc1(d+&4I&)FyTf!L;x zYd3t=w@DY)!K`v=N90?dj!F826LX3$Fb;gx1H~&^ zQuXhFrxgn}cv*+qIpg>MpY3l#-6EHZl*rSj&cO!e0oZ-#;mo>`vpZVT4Q8ixfaGu*Y4WNW zNKTqYwWHcf2nA6aAMM);OpazP_k%g9B}|VRoip`+A9m2yjd2}lu@&G;+$R(C=JFeE zgP-laSUGndpg{ro4jZ{91DT-%FzhYnPhX_8T(fGiGg+d5UkbEG2B4iG+wyMFj%LWK z<6n(WI`$el-v(XD!#Z(+`s{A-&UkmVt3RC@8|b<_Gt13xs}O*Ii^Oxrxi~R5H~~wR zZs2yiQ=`-6F8)~6qN{X(x-%%njr7o3sIdG#bJK>}9tWc*r=xmN3YEMy;qN&$MOEyf zt-@AI9L`Zag#-&+BtbDyaqXz#89CIaqa#c%W*j#P(dKk+J0EWo8f5-gY0;Vcs1}sv zgKg2JCY=~_k#wK=cB^NIO52Ao^Bdr?hr@B$qwC__P;i?F}?VhA--Fl*J)#NhIE6 zcis!HsUh$n?AR{TPsVT2F-HG|F^oM{SP?2=8adJiL( zJAB?}7u8X$+{p;))yik%{0{&UN+}&f zZ@7LYQYH%PW_hC2vNYF8ZHYvte!i{D!(!an{UWKjVrAJT?0a(FSRFG6&a1FvN)?bl z#%Xycp}uE)*Qn_O?Izp;Q4i8gdoLCWzOkEN4HdTMyBI%HFeI#JmH0DBmucV%j1n^x z=zF~$O2Qt2@jC2NHq+kus*O?0P9r{Mp+o_+)E>aLHmM5;-%fHo?rYDzoxkn39ykR@ zJNWnwU6MwQn@gc=z!$|Qj{TcqKih2@GW7?)F5#8ur&W-EMf5sG?5&$|C-~#1$P)9l z zmrCgc-xiI3F`YO)vwIo(8u$?3&TZJPVS64RynO*dW zDBEYS@=w>L(>OglA=%qtxYR*J4=Wfbmu=+*A^jIUVrIOtHAw!!a8y8Iun*Bl=7u8N z$Ca3^EBb(X_*Gpf8t+eTxm*Z6@`l1oHX(*SQVvc+NS>OIoz8V<>=NM%-1mIIT|w~2aXxC*!PLX-vqL>N*ZEp zc#1;1QuDpDt1nIV*Dkk+tB5Y3Gg80gTTCv!IdbwJb#`PBs=};Cb0vrF3HM94;}DR7 zJeD?-ZUr9+`1EbbH1Cj9R6qgfyhzTa8nZdRX-UNnS0-ry;VUb}-^g-=Q^?iVV~M~$ z2GvvTN7~F)hm}>jGK(7*6FdhE_>ABNf}1J#+H7cg#Q^e~2e`FpX)$iwl+?)eXO zHSh_;_!i}t0U#}6$`7&I@XI&j3k0opwCVvCV?p%oiB{kDbq6;~*vMp1UUbKho};7O z7;%HWI3}Fh$MdxQ12hn&UjGLe^1+J?VsgW-Gp)Nsxkl~;J-eUj-3&IcFD0uQO$Mhx z&#$5zyyBD4*`w04B7ePj95`oOWDl*pNwWl?V5O2;NKK{ToQS-B%G-bZ4}g1(Gx8(q z=#bE~S0gnfXv@y_JE$iOb)>#O6!#3_Q#HZLdxcgJize}-b#^a76g8a_>EGME6k=x+^1&m}<#pIFutHqKrq zh-Rf7`&~j(bOL42iWUe27b4(YU!d#Ws#jxA_PyyScVvl_DFwN7ZKoB-Kz;O7o4CB4 z^r31a)Li7u-|LeW$uUN%`v2ILDLV^n*Zsi#%!uz`t8e*xh-S~kdw74*36AQrq=-)52yH`qpLW#i|h z*pR6|xPsn4Atb_hTK!MiPzZ*QN=U#bA*GqTjpEKD$#RT&r^;YPGgU zleJecd^@%xlg>zM;u*3W0-18Gr1zWh;;=b}AYAsbyIjGomqF|wU0BZ_Ba{Z#d zmr>S|Ir;RegE_$O4$(Uy4We;DPiYS0E!fY;nd;~995A^rwsWm4bS z`;11`@B(2mcCGk8z>DiO?`wy5y#Eo6B-TK-p?(W2o?Af%S%~g8UG^W z)Wt-J^z^*a4T|{kcv_niE1?JPdcy!o^}eN^I)60JF@IeXD*pCC_q`yKb}I>9NqvG? zUh%^7UxRRnm#Ed<`@9>jjcIy|vMf_n{C$BIR8wOfEUB&`BH>9}4l{0~<%ZEw5*DKy z%yWu}R*`;iFv}-e^KN{-OzTWIxE3T^_iZ6RcI1DHJ@T_!le6Wdv_*?ld^h7V^M-r= zm`-2-304AJ7ZF@WWpsAQi@j7V7i3gaT_N$nyD+y+(9|xbNE@HaCjI(81e?1L1BsNx zag@8A?~axS!xS%SreF0OLaT_3D71> z{U6%fg7K1B?^A`DIbD($%%HmJ$6Dk(GN4z=-sq5c^^>5%Px>OU;7o*+o#2)k@dFfW zaq2OmTsIcmh^z%7tVIN+De^W&(}J9(PJ>e@o*0+{a1{r~y~K07Li+MXZZ^+UsM5n~ zQD#o{u`@eG@moSD=!a*w>FgK!>NG z=J-1*)HpZ_^JgvH2BLrRcU$hk<-3+r&yxVHPgz$EJ{-OEXUUdSao&8bn-Joh4WUsq zsF`!#rB`gfItlGTAL}^(dE#HcF-1OXi(Hu)bk^gK^|~NG(q2cvrg_*k=r_672^{I1 zzxC`sZC)W7s}D-xSx4)}#xTr?j(vgikileD6fTOJu8&FX zDT17G4|KEpIZ5khLP*0`Wd-Y_V!kCaE_|0yM*EiFQz-v>u8mSOIRi^9Ujhs8JF_=| zw1a1w$P21d((9s!jf_%kwq7vgk!U_AQ+S%UV`$()atwDL;^PRc$3oMEb9D0TmF zGOlTiXIxc%gpAe?IuNB_ACJtj3;$(-^>)aCuzp$Ib;rJa6R0`CWE?57Ojox@duEgt zSWT5=8^J9PZB8W0yxXqfOI4jm0!$Nw5?QzR!yKK>Q;ZF{bT}$P z|79TUwNUZgN!y|rT&(KWot8yyY)bWuMq!7z6#TZs@HH;SnYNbv4iyn>*FoB3=lo@3 z4v~y_?K|%;9Wyze z7-JspE}*I1R1$R>ftv$K4Fi*AYSZj$2}KsWIVoLowZ)e|Q1eOMBHP{8+)sx&S(E>w zuM9{o?C~O8!)CitNI|-??~SxrqbUQ0A%q*U!F=()=Hn+_C$eyL8de&(^Ypq83C>9H zx0`yKEwt>Y_doxL^+@xf>&lU(^VNQaxF+11#oQ;T;}1SdySGf>G6a4cEo$%R==t_7 zAc9Gi44%@9Kb&Ec8(D*lNfUX;k=%JDQQ8_rNAXe0bXWbO*I@lIn;YiW3Zq{(C{=mv z^qko1_wB=JRdx0opZ~Hp8^{L^{gfMqJ~UeB^GFf()xX(oU zz?D+5yavcP#DZuv6B+dGnJP@z6tZsXq8WY;;ry@h|o8rg%qU zf@X=aFi~x2sEax&f(8C&ot|{7(O?*hmU3^o+7X~R7dYj_uE-*y0okM>q;8CsykO_Z zOr=&cNKGN2lK4~Uo1u_0g=>WJLU}b+H;d)pVaMLdV;7}6Pw}K8RG<{S6AG4$DBpW!}+IJr_j&3 z$bWoz0nVL*m8VLqv;0U}As-cIzxgDYriar~z*o7Mgs@JL6H=#zlU|s3Kkl(RV>m4R zkOr3xnuP^CG$AWn+^}X%UoyJB@>r9=_yo{>iCRXv_`d+L9A^T}V?kOJ(SQE6a&UjNR}!cz_mhn2qEa zYR9lYPe#3MOO~6;e$j`B$^qTa&`yG3&<8QCz2|59yQ4gTVi997b+dF71>~aKR{P5& zmO_F=$$qorhmoz#)E&k7E|GnPZ*hKIofvU;Dc>!@OTl!|CP4wWR`$5MAF3oZH+h{n zU0v?fFDo)FXQ*pu_NMz>pX&tn`K%*|X%ok1JbS@GW}1Pl<+u^a<&RL!TcvoAvn^s2 zh=_Ej&vM%}qOaMR+w!;mYSg@2`QKW<3Icl)B%_4TOBO@Mj+QP53LjXfQl>Oa@P&}t zfhUxLj{%l!;=hPZMY(6msZP1;xn7jEzrC$0JI#~YNFGXBMb4&AlqCRS0a#x0Fmso- zIoP$8Sqcn^E|u%=IpL3lsTn=ggR#0Y@jn_iJn+^k2@2TGlGww$RzC-pVbtvLD$D&0 zkK{k|^}%^APL|W>xOzEVN7+PBYM@2(yoLP z;VT+^O8qVsFiJ-;dXJr8+noomqYQ3ROLkDweoY(_IDYvwbC99Jt4vhx&Lizp$*}iC zQc#82oEDUa^X-lM)%#g|Nbifar#U_Nw4iKY2nQlUKd8{TeeOU^PRHI!_1GN7ylQ9m*ZIYVl@>r2gvRE z^mF7;0L27lq!L{}ea)~FG*4j(gFV7BwDW5dNrW<|RZklSkY%O1=%K_V_{*D=Kwk`e z&oLp|wrjNK2<*u0d&O$*=%@PfVemrdk$p0wPF`%u0-jm;V(voK%Mh-FSS)9zy3n|VZ6EDQp@ zn2ZKTt&pf-*plV`CibNIO12=0vmj7|IaoFGII`}^VKzONmX>NgFFO3={{f~zS-;ZK zAV=8ewFjpkA1snAd_D+cj9@C12XD1{3lE5`xQrsk<;9!IrAIV#VvwI5lD=@C4qll9 zSTt{^e@5Tdt3A&VvyH8cQpbbpiesrs^R)(-5r+4N@d+WhNSP7xx(Fu(>_ZCci`#b6 z;%0d<2Mvb8 z{{RotvhR3vMKJ~(kEA#nN9*RDS#3qciEh9uZiEg00F4;mi@R_KoCR~7nhe-%5<+7m z1Z_=41+a{n4(ZT&sVuH`z?5x)=8GIr$$=tax_-12j^ZT(BMP&1CmoF%I8-=L+Eg4L zy)Mzcq>KpFjluiTqnQjy#AJiGpowGVhB9ENI~-DG&drmc4dfICwH*3wMJIvCvaG`!X~7Mi4vOsi}M+pPv= z#k)H*LzE9L?LeFoNgK3z8XK{qB(EtD z+^$zU8dg~{F`rPY!**P9*?M=N6yi8;7?sGz*`cf8V0Ssq9z_mWnu5#p^u-0_*Q76` zk6~R@kgy|`bG3B_oy)Di5=4RUw!0Sg>zWB%Ry)C|cI zMg_}9x6%Iq#ompk&gy`p>QclDkhJnk@ zCC7Mu#IQ-kE(RMRMmF-L%NZ>OuDLLA7ytaF+O<(Jq4k7{&F*KVKD^ws2hPejR#}=d6GGRX#jc zmBov&Nn*O^>rH1JagMd-_`C}YQA~iN7u}a?3F5ODSVwVe@XXzHj-LBgUpMg#$b_a=+4puS>LEtd5I~>R@$_PhNC4Q{eWXu$)R!rX0Oz)mK||j$fEbIof|MLqyk6 z5C(9>1K5h2aw0sbKMy~c&=6cgKM9kPVHgAX)$Oihx{xC`tH1bC*Rk^y%oMWcR!>t( zp<;E$H||%lpw!|q1eH^nl`2l%jw+(euM0dzOof)qWOhQ!cgBPqiJmIpZf+9xrLMOpPQ8y^%BhB=xx0ZR3$ zGe%pQq+oUw6qA{wlAlT3ed^lQNF#Uu0C>4v(^nie=?r*XT*0X^Kxigs7SKY>P!iZ3 z37`_BiZRT{Q~hck*gq%}_oiau*QOIH#9363#(+7x`E3@TbJl{ANMS=7l39i}qFYK? z)g2cOFiD}f$kGao^d^mEErc-0ZUa1{=smxU2SIThQD|eRWcpXmcCKtMs3r?_)%>W7 zc&MSRts_kCPEK=GR!MbSTt2MqM}tA6LvfBD%nE*b$wsgKi62D+?YX6hj%#?XWMEtj z{pvVh6l+_^r#MxN;1AwYu_yGWxV?lnv%kv_)|V$;vksbj0YQecFr8%{jP~wfN9As7H2Uu;1%6>+cdeh=g;!R!3Z+g zpgpuf(NTyfj4(au*S0A7z-0k6{<)yWvD+OQl#mZoL`fQ9_=|vm`m?d|Ku9j3jX7MD zVt{~q5@{eLT%=fL8-d{Wst|(+%0z6$?dQ!*YVb_5Db7DPY|u9pvYk4VsQR1lO=9lo zmJ$M}a~L!z?r>e6J#hnvE4~FAHpLR*B~?JcC zNeiFNT+_xPHxHq6uw^~F((^hr42}fMgg9b)1JZ$;cw)VQqaduYHn6^PL%D|HWpN2A zbL#u~t3`4nkYJX>72Hzrq;gE_G3F-!0H!D%v*8lm^DZK7Aj-@X zcU)3$X>)6174tb*XF4gJt+)CD*BHBfrQSKP%Anm6g*1Xl- zX%-|*9bQLz7Zsk?X$01=^7XC>{{V}%c&bFq^<7aW#wF}4M35e*q~kY zOB+0KvMU`~9mt}j!dp=ku<9%7EPd#r^tk*TBn*rWgC1(pk$oEZkYU{k~b10=17QG$?yE=9A03wM=heY%0DUI5k98+kwI7^j#7c@z>@a9blDwukkl^mewFay*#X@gGI z`jq!QRMQsF%?xp)T!}&wSm%6WVN-Et+^f7?K}SIAurZSH*8f zYjMQinWaO>099PzRy>kh&H#9rvW#aPD^G{s@i8=S!zRl@H3-NatWaaW4!BFAY@OOj z9G5B8NI&wTOAW^nJoa&}K)XIk|~Bjicz^eXG#h9uLOh;G73Z1nfl^UQa7E z)up}$+M^xLdr)xTx0lI1tN_T!b)Y@FR(jdqB(kif(nWAd9R>w!N5*dWWz?6}7?BU9 z)%M5QmWz%tx|8NwFkJe9>61W`Ni1@6i40dGQL3*Ib&#>2AmdWgTteF2#(-k(nFv+TAvYO#lK?|qp zC+$jcN8@(*&5V|Ck=c6x02&Ru-VZ(Fb8TRUhM8jy1LsQ;eY00zh8e6==XlMbkaamY zQ~WCjgvG_~;n1*LO&W{`-x#e}_$|Q)R}!_gW7S?$ll*8iIJd&Qb~gFDM_bqjMvFUt z8p%gtGpy^VC<;UMLtf(3f?dON8}RNoYGz+ng_k~8i`Q|X6W6hA?;%> z(Fq+G3-L~M_bx?9^*Zn9G#wSSnz(rb5zwU@ zk;35x?BQ2)``rah_YlseI6XQ*)K6T}VnV7=9-HmnfW$%qsg7vL?zq!aCAH*8_YwXy zOSx|z)M>5(v5ZDC=i8+$t#x%7g_(q8dXH?-9>VEjBVs=>qc;?m?l-l%cBQiz_%93|l4tM1?o0y8X?=aX81MgQ!BjvTdvNAIq)(=0wuOY}m$pgF_QWTnnfQ?SUfD3?o8VNWhsrIUT*Lp4KR11!wvn(*_J=F{^&} zK|uRllchvKmB#F9i!H20(&hl;qT-Z`lM@tq+CkW_N?~rSn@n$L&D@dJg6u#M%Nj_G zy#S$nGOZ&YOoQ2huxG3ij#HwxkSGY>yPB#}T)<1h4@> z-D^_ggvhcnOtO*GbgbF<6PZltxpFb4gBfjBXEH{aph*iWF(V$;wI>U@fwXC5C$iH2 z03~}9686mT_h38FXE?2*6_!X_8E1dVlBSwhUDxnl}J$cckUEF-T)*$Ses2j@_xNTX_;Seon18#%LX% zkS(DSL?a9}02`)hWwC-+bc*d`(sH`7LAsJjxrS0Q&C?wRw~7p(iKo;xb~Wey{z+_dE^WSs511;8;V<7-LxqoF-al;9E;e2 z`LX?L6Nzxy?BrN(oO1@lLuFWb$l8F@Eu`9|hC-JJn+NawXdF4@jm_K+V|D7vcK#jc zGb;fb7gNggf=1`>=9!6>-YH^ zj^-^{ItVX}?Xaw52U-PUGo4xKPU(^|S{a!b07osf{{Tt@NoR4w=hZ5*#>_$&7(e}O z0p8Se6ft=NVL|Or+5AOtu!~+H0$xR&vakX{_H(^8FAU)y5NVf$U3{$NG_+DC06+K6 z1qKbvNo*8b%o#p_O;Xi>&pB}zIxR#B*D$Re~jX9-(tDLA(j zV8YX6fk&y2P{-SD^cUwR!}bQ@eOEW50XxQrzwkAeBHkGD$^tn6(Wvgn%`bC(HTBEh z-0V-K0O$v5ED<}P4>KI}%j~1tg1_D8nxxGVG1>DbYOq;JBgn7?U`|0h3be^>Z7(h% z&uIn$&qGZjv5)1UkPDp|(mFBmK-y@A!%T7sXSWAWtE%-ihdske>|t{nZJj-H-kCQ` z6p^yTQdq$ZI}x454({D_rNRtHAuW;H9YLVHAz^goOERfklG)NuC=V;iKBw+YF>NTj znPY5-)LrreOd^|Ui8DTjx#AB zhUf^OQ!B43Hs5LiAhTmQ%`iavZ7U%}X40VJJ^ujdUW0+k zFJYJA`=<*b%UZeUuj4*24Pq_1b_OAwt_UX;%4LC+7b_q>pCk34B!r0GC5#CJ>C|Xh zXe4LVsle0s&1=bR72gaTLJ1XQfrFz?Tu0Al(#t`(MGGuV>uiDt+pQrm2%x5=)xI-9svnYhx!3yAqVb~QEg={ZALUA< z^Q>wZ`a#7q`Yt6!jxuwx-=zkf-K->sSI#=pt2FRO%biYuOD~g8h;%zz%NrJm1|%NX zphykX(xc!UV3E)-{U{caZKg44je`uXK&DXJHOk5p z3ZAkv`KSv$&&skv99ou8a9gH((ISwvKuF4@3Csd2qBgJaWBE5~7AZjyK@BP%jc@(Z9x{7my=nKhZjqMJ4qWKs5UW=wkdmyP-950plDtZrI6<$ zgK@;%UcJ;2Xl-GJk$`-I%6~$V`b9gp*7N@rN9L4M_LQX zG_0HHM_pJ-15!hNzubB!F`euV-_lwx@xW%26 zoZ=lFpT~1dzZJ8FGk45ajDSe@uNfd5z@T7NqflKVc5Ni?C_N;1wvMx3DaSJ*MPph| zFEyCzB}U(RrNTuU0|bO1-$^}97l($J&$g4J7(aRqC~o;{H#e&sE_~vw(_5G3IR{GP z_|i8LEH_XiW<`&7BcZ0Dp4wY^=Z@6+7kDUxF$727tySP`_VO;(=o)FDS&4M?v0;YZ($Wma^#~bg*vUYMv=08RhwO zS5W6kQG<$$rHUA$R3!5kSi!*`b3sW^+({sfx5Wvflaycz26xnZVy+rw2>L(^;88?q zz?Wmx9A|0;Euwg}Tk&g2FKB&Q zk9rE+AddG!lE^xr-K*TI8-W_Hr3YHkvU`3f!_-Wx8?$LUVB(j%iB|2*FR5{y=fwgM zT(m9B(pwo^Vu-UH1_w9~>j(MK?b;VizMLG2VQs9K;z<;MHUq5&dT|Ndh~b$=HpiMg zcQS^NCWm$yQaWatTP_+2kyH$gzV*)Vh?+79RULcgfHAKwLbB)2N@fd?j*?rkiM0$# z*jAqnn%?RG%(hb@Tr(dPiz?e7V6BzZcF#@xPdRxUAX!Ws5T}EFazX z9%5Aa`XQHyKTpI6?m>$CM;vbYA{bWW=G@#dMjzPFk&6E0t_M%x+`cFQZmvB-T}3>;#BOKB1$pD>Kd z<8@xunLh!RR2NpPHL1deJx9H5-A)yj*;J`tsr#SvpqQ>%%&`&)`gX`ar)ms)IPPs_ zVK5r}H~3bU&OKU&we_)2@3ft90-XP|BC(IvYDiU_zx)b|R27~u^VA94l_H*doD zw50sxQQ2X-GXlf)pbrZ89?O%id$xoFja-me{ei8!PuY(W;zeWND;>ig^ZEht=M*UT z9i*|w(s7kxp7((;JOE94&xLsB!~80BpNV|)#F8pv8AFBbmgW{U*E< z_J4Nb4~aPU39yt52jTL^F(<)QAP?u;-m|#x?30I{9Nb5Od^rcima+QeXs#J1)0|`? z1D@M$*1s?D$Crn*vox|t8B6iEt>qe0HG zw&e81V*b=WYgS*hRfQMf=r~Q}XPEcVY1b$pm5P9WjePprYny9(h;8_V!X*2!S%}sD z03$)@c>e&z+#lkY}Vp24y zAqeb9uDw$hjVY`FmqFEkS^&!?mkP|ugWjzz+)$Yr4o7CC^9>T)@(1KJY1dyyi?psww@=Vf)k~C;v01$do zhnW;u)*K!BW~MnBCK{Nq{nFb|9TqKakAkW_CmG(47bQACat3njdY@{ol3GtP>v0ep z?s}Sa-pO2o8bZ5+ov0H?r;$jFT$skJs%%X{jwdlv*6IP<)Yzg)cRL)zD2x-h9V#zg z;HqJnwGUAT7&Ht=E$x(Lgbk$Za=U(>Xm?X2@hjVDQorvN+xX)&3rm~(R}BruqCSx~ zsegf@-$`qE5s_`oS7QP3^tae{ps=-pEt++mS$e9Dxuj#}u1_$=PRDH3AY%@p60-*C z7-Ju5C=$w~=p|0x;(?&Cn3JAbO!~4GsdsN2g%Ht$I<;UKs8qXXi+>^fG_@)Z4 zE-ZSmG_jys3%Kn{tdX%(pHS{;iEf~kLp*Fe5mu5YvCD`8z+_}(3Z<>DHeej>+-87} zEXx|TthobxH9}l9!)jt9kX7ZjiaA+bh#!{*l$Hg6^Ce=<`E<}aZOpK_S1ecaP#2pV zVP-l(Ju%j*Czc{n6qR+{=e=>AB+O<&!Eua_@t|3>E=xuT&sq|jN05NQN$o_rh1+Gy zkG^X%OG%be!LQ$^r0B`8Cu8hnR@>3Q2`d!i~f2qrAA# z4mBw_p(OG{!^8o0c>{kczYCBr6@}RPB>Xdvb3xZv239R3W6*SM?Xaptbuxk1H5}w@TTL2( zod}@R;g&jLmzyqmfa4x$GR1LiA&GrNm2crz;a8?7FteQv*!0D0OAWQpq9dY0R4=}0 zD|r?AMHd%DSGUrr%cpVYKYGuRqPo)?0Ew-cGBcnNu=`i5;ZrB~9nGD~j$kG|`kunF zZTMJ%(nzi#QK>@X?a|ZMqmQ2NARJ;A!k$!a@Yj;qRA9jIA(n+y{J(uJ?u)6 z>{&qmG#Z>n!Yc<5yBQ0_uc=N5u8TMDrK~qdgE$94&U~?^c+nDZDC0K#*_Jhr2cQGJ zEpvFVz}6PpA&hM+FXwOapw}E&aCk%$@fE)+Kxo(!s=nE$%+_*GA`5p0`q{r5P$=U7 zYzE^UH?Ix@7gd22w@oLYKGo=c61#~O=F#;tavh@Zjyiuj1)qo7x`^W97Wz<-Lq>IG zgK@#-wTqU7%w;WpbV2Fcot>3ZA{}%uWV9@&vsjg-#S4IJ*n5W zkBJR%Dh5%KtJQ@&ZNvC{O@hTtkh^I;0a4TSpuoD3uL;x$5w60iZFu(4;ncF@Vd9^> zmM_3DTj5WnkwUz*h^oa5yYwU4fDbLSk*vt1G;6-}e~^_e8ds5(m}NoQsTG7XD@3z} zbzNnA?^>K+iZ$xQ9@h}Z5mx)qWx%scprIVaQ%qaj95txf4hcRR{q>xaLlI!d)Aet(0tC3lMJdr&gEK?wY|6bDW-UUlQ|1a?^c#E@&3J;`+4DkbN0CEyB>_LqvhP*~vNtd`iI>vD-@P#`zujDP^IZA~ zQdI6WcB?Dt+$yFI{ z#)z4UbavbkgFtDMOc@*CgI1i0xpINNkUv^5Ds?V8@AskM*-D4f2WkKtSsgUwX$?-@ zS=3*P^wWE6x$EdI6e=?mljGkC&!6mlz!d1@+99=4FHy8xh|gYf8pQ zTH0$?T`c8aIPgE`SxV0G4qU#W*VtB%2b8_TLn|*YlIJheS84%EWu%fHP&;C&401-z z8T6`P;CIbJyte{KBUv?qex8Pp%#Jt`8P(%u88ig8o+yNozC)`LJ;h!|wZDrn5|0_* z{_QG1-Rgz4BXnwF-XE3$Cy=iDpK1zTu(q|geR{d7$M_l?ay`M+ozVXPAxU3Its?xi zrJ~(YPrq6%ypJ4_#>k|N^$G_dzmp3KAPu<9Rqkavl2S?$fsZt$}*Zv5f*EOMqNdZk=mN$JUjcU z;UI!iORh;jmudmXB#d)ddR+QN5(S>(Ju1GF={cZc<;Z#D1R@Lt=qs0sZ=;kDf&4wF z8M=pxSuZjfw5b@yD;zGV>IAX%7N=J33%gk(l5J8U^r`8JHN21;X~-I7Md|y?jl~8` zo~`3%j~#M)(YSTY*7E?)LEM9$#;tIWJE8_CNDfDl*A-GNWpElYG5qK@BaUabdEsti z$}uYqiua%__f9#oxRY4)sTN7NIWOhEG-hdHLcY4~=EpB$yMgXUigj{7> z?~0fsA6ks>?@z;S?`)Au#tfab^{sQmxGX>$>!z1j$6Vv@L6vWD@aGGOVp!+9l3a~h z&OenF(sZ6?JS4regJlb_&%JBf-tbNzsM>I*m2synjWo{+;=C5^(!^RvX3WfTHUyds z^T&QJ;0P{nxTfI$0PKMqd+$xy{Byu~)UW>lXk)pZrS$5Bv>jgLgIc^-f_%J@K{P~K z&N5hc9@Uq3@H`e1EZjec!SeTCOhY<@d+k8C7vaAT%ftn*53-grzNn;?!|(bEYlr+N z@qN6XJHWVj zJ;dgb&ROzyk3Mr-w_mgmhj=u$^Kf1ZZw1p~2Nkv$kYo3WJwC&^p!w~u!(4Lxip|0= z?h<(lv2RY_yh9475dE5bQ)720JAhogL9iJJZ|Pps;xF4r#h3G4aQ-24b1{)~Ykk9* zE;s7hzQ>^NNN|7J2gf#6GG1`6iGCtUwPQ({G?&MJ(u3x=d^$Kp){^kMwUwA)$c2BI z6nHo)@{rtFyhsM^%oX;V501Ep60nj#+V8_rU-0K$0`WjHC-FGWGupf_#ry~MYPZ7c z;V;?@w^mLDn^{{kM8ph(g#!TO40aSAS@2@j>0-vW^1-whbC#OQZI+)HsYB!PmHwmMKZZ3I`o zfq)|QG4oLswr6Q=_d}6`N~OH%BTE3t#`MX&k|=PI9Y?EB1>thWIooWU0arN?g>=$` zU}{$IGL&fWNC28DVqZ&Rm7nK9*3wBEs8AX+gN!Kmu4yhpYHppVAgk!ckV6kw zkxja4g(Qvf5%oCk4P?o6EK&(T%V!l&?`M#R+bT(*Vn(=!TIuH}<;K{lSngUd@k&eM zBzP1xn8;)aCP!N6xPl;J&!@I%3kBQZp5QcqZGdr64;3)8VNM%-9>SoaNSx3$e}}*8 zLVIas{7B>U^vAsei+&#{GluGXQNy>&%2;VtJ7euxOwWfT6&S@kY#&ejKjA?p_Ec;^ z5hHApDzeXWq{}^`PTXhHNUG?L8X=XF^OSNt^1AK)w3n$&j&5> zk>Y`_WMtw=H24+Db!r$=M?>vN$!$6f89=!D`}@)C?B&uG2A~FW+|Wd@i32GbNEzu} zO)axrk_E{|9h$m>&QEs>BvJam34LDYHP+HaZOf6Fi5YdrwGRH?DDMhDa;!GZ2pM@` zHWk1sGeN7xp4vVjva@Lt!lwtWO42Y!NNHKqmZaw=e`;>_>`JkqiFu+hGT&-W3%*P; zLCaPW9GvzedIQ58@G8uVnpm8S_Qht%tR3{I&S$7OK)CtcU`{>*ad?02*dHg}C9eOQfjAmd{;X z#RddCMp*6^X^-GWG?03-`_jJhQ?#JH0rlE9!elN`LO`F(|ExkZbXBU2|}PX7REX8vTpxRNV) zG)K&42lv0qms?g1r(RHZ??60T$E94zuH!5K`cJiO{4F;o`tIRZRB&0!a7R;sMIrLg z1ZD>yFMhS7zzWC1IIQYAL{R>#L7hA{*HNY1H+;5sBqQuKXSG@aQWCdtiTy_;bqCE} zJ9|kudrZs+Cr%f*&stGX2#ZG}GBNAofa2OILr)Cv>FSIIMI|h60~T_@bCAGonz{y! zi%J<*O|m=Dp62G&rHuf?jNqC3_3IkoCXSYf6}yqPS?_NBIU<`s!v_Epv^azhE|bBsxy$IwmQ}B9@_mAh-uTS z{q8zy??bzVVV$ly#Z@}vxjku{c{p-FQ6dMNFiCBJpcGcmFET|`5y$kC=9XZ*<<@Xd z(~g;?+B=vfXc}x1`dbGe`_!m-F5^jw(b&*2a=~L`AXdQ`-l-~}Rk*hrP62RxQqs;P zlS||vC-pt4t9B0=)zqE+$7%syUJIy{FaeZjG_oNv^9dv`sN#(*;iPDp6a?C(H8c#- z5LLYly};{0?VM8>K4BR`9k=scXE1`{!;q!FHh29fF!2)v4QT1;)#)^mV(iT^A+T}w zpxnK;ww@^XrL33?Oj1Xk!*tK<)}~Mv0UVQONZU_pTGfq}qb!T7oB=+=%^KU9v6Rxh zrW%y7-Ka6z60NQ_XH4~?M-;+E0P5G!c&23eNTw3Dr7nF-*i+V$T1hia4*a38pv)ZT zG|c(tFHG%&S|^K1Evd7-{8T=rZt7@KN^YW+%wr(dae0t^9d@AG;ZVAR&y2KkW0#S* zG!8EuPdSJwiPxXrNs7eS*^rP_|4FGpjP7DY-Pk})Zb2efmgH47bG08baGF= zDQT}-S=HS^AYjvWa+YA)dK`X~3R|l@h8190k52-ek`%W@m1I5$?@U=lhI^rIpgw!} z&uU^TLu``#%ceBvOC9k+oYwfYkCt*(wKu&@jr(xf`KX!u`0q=_B)T3n$idq+V{a;h zB!f5^#RlIAXn4KAIneC*!Rgm*nseeSN8#ou$c#EmX{{UI6%SiZycGnU>j@kN-J*f$<9%+er4qNkit^OItA&-P43d5W8 z3ST3t;O2v^_}#g&K`S>oc6IMre8TgYW5ViufZcl2qSh`aEVpS7E;R&_JK~j|+9Z)@ z3|cd#R1ZDqG~PgC4RY+yHWiA2vWn}dcxtjm!*w)jx@6WpS)^sNxqHKHZb*$(Hrs*v z(^h;UFji@9V@VIxq4SypTYNtq!RFb>5u9TZYzg+I@A!M#m4@y!7ze4_SK76$uQ(08 z!G?ROU1IeRtGDq6i7n^D7o`>Nt^tJPW;}u4u4oXj;t=qtB~`%^W3N=JM`0HcV7Aws z$y{sF*v?OC)PIJaXiWAFw=rZh6~Q1LnF74O5m{lpXM_|n9XJOBVuD)Ueh+$Vu93`3 zy9{m(8unX@sH8YhiRsi1^_?dWyq-W^m4fZL$u$1}3!E6`vzavNI`y7_&}K@=vd6wk z^aICwVkSTi5QG^y@0x0BXkwg~ky|}6MUK+ZW9J9Uj*I4kctpjV2a`(R;+wp&4+&W` zW=~Q`4&(eQPSVQWJ6P_cF^3~`HJXt;@JC!LYe7=}K=kP9RemNPSQ#@;Jt z^MspgnI|NW!_%k1puq*Uf|2@|{aK(=8^%U5=Om4Q{{R|fFgCF^uw+yCh<~jb+lD02 zT+E{qM!$x>PHR2wu7#UVrw`M9^c9KLR}#d^TxYLLW}mY%OO%M@HpNe$YRCk|q!FaI z)$LkVMWU5uiGkC;C^jD4+aOm%839ShLMfYE&v>b0BQCwMj{fzK*KI3_ZjM?d4&X=!@?s5o`Y$VOB&WGEhJF?d((!-HIm%$GCF6z)`MCmuy^@c#hV zH^Y2wgnZ+}rE!D{^q2R^&U0J#-vfM2#)3@GJ-lkT{4~$lZ~TAjTXui3sb@iRAH)}; za^92*Kx~16H{9nG9s`Vi$-ilzv%p2lN5yztSOPFh79Iu%_}9)a{yX?h__4J-4qgp( zxlMNDBIVHvSYJ5#k>d{9_|r9s$G_<|qavYhWCKk8#qk!9G3s--j6HyP76KaAI%( zaqqQThI|)k@VJq9j~JO)j2KZtP(7PIs?d*#{0nC6W?PBnC_!#s#C@|s*Tqu&0r6~B z{9*6cg>fJPTv|RBIeTSKQ&?;8zX!XWR?6;2iyC>-fE5q6D`wA){>$%OA~y3|+$r>l zW5WA-iK1~&hJG62P0g0(=Y>k-{JrE^YgrFqSdgRsbQnBG_IaWIfJ$;3VSL){sjb z)VN~Ag~sdciVsa~ZSck8@~yrky0Vf(mP?%?H$D$s54B|cVZbf;RlGub7lKx7$8#F~ z45K>$$EiDi8pd1uYo-&({_P+3T}){U|xOj9flp z^L8^Kz#}86^ILomj^A@%^%kN|Y|&05M?BA=SXuwAG%A+Uvs zhU94>hy+r%mM>*;E%||f8OoZ8@6_bFuq5V!*9J!BE~EFLGsVp-&1N1n(2CB5nPl{I z_oh&nXxJW|Xa-CYsKA2M5VDc!=BBZ}=A28XYj937I+N0rYed$~DFBcS7+TrOZ=_un zJ}4}(#?B=zobR_v70TMgy5$2P=OT>O(L)kMCOPYxR_w~G5hCUa3)|Z)eyG^6Ad`wO zA_fX017CG+`Jmf7T*?%J08%!qUE4HrejMWi2N~XhsC34pF#`vuYGwwKR~m7J>^s(6 z3i6i8E&l*2ddBKTnL{dkGPe!05B^uq5NXAY#{*))3mJ@8izmIB%wTTs7O9PB&H8D1lbgrT(GcIM8 zIagzlFh{jo+2UyP$_0yVJJE0Dc~Oy}(!-+xNh1h>kdiPq`_O0y1TQJoSRdy^k~N*t zxvnGw>L1dw`O4TBDtgx~?6$LwCs6?GXf&EZ48!n)V02?l#dy+T6&g0_kyx+9?yes) zDy}+WjcIu@NbflZKm=lgS>%#dXx(xXt9LcjlU+poyh=g(jdcPyvc{5&dx(a(&NV5a zE}3RK7Qn)7wrZF7lIlq?Ky%kL#|e)t`le8dGrw93+`$@%rAIBPTRVPX+OpWRjir}K zuR?i72_sAm3=d9#^);0xv}NQG%c&0fs4E8!xSrB`*i-Q1^vHXGp0q)21-V&n7_ep8 zLs9H6--qjUZE(uGw}}2>*#7_;T1axEB$F8GXe)45;`dxCQv(>z^`UKk$r@WozMy`g zN!qDtOQso#1A3c*q>U#hJJ5PNTjhc#k{LvTHd4Th<0pNqLOcEwd2;r#+$#LQ=vPQq z?_N&B2qxt@Slv#u*93IO>sDKcrLvc7uPyZt#Pgh*3g24WqX<(69;9)r_;{dB zYbLe1k+0o<35kgm=TPlTTG~Uz+>r6BDwe}wY&XXA#}%}ON#neVRZe<>rG|9i0Bb*y z1a42vhh`Ycfevz5wP1<_;Ji&^QjQ%94PR2}ImIBnxQF5+F(Z5m(0En&RmIdIPGm#^ z3Y-y+rh_3;69Y)O8CAgTwE`|=Y-5a%#2%rIsk?0-DuVJrib-OsH|s`@`+{4(sBCAL z;~6plKbW9G-Qhy(co?DEZTIpiiELqi$GE>vTMnvnNc(o7aT%nMgEUdX!EqX>00sA? zV}-z7Awgk`5uDI##d&XUJP>f$zurip;ELH|Pi{)mWPE@O7 zNTJ+_K}pfFT8V=58<0C z#Gs&N^z|$$OGy%bR7r9br5SCpmc;?2iSDh$GpJWTSqHD_SYL;Rw#YRYw)23V=yjWzq0R)s(q^=GvOt}Vn0L}w??Di-29bRpfFQj89mqH9Ni zrZSVB*rG)YcRaB0>I8j{N&?-jwa*TY>2!%m+yjcrMQj`>+oyUp)w;$+V^XL+GfS~( zh|*4z{HQ3vgPd-9`&Ap&u{okKT<2F>7DAHL2DAa4mpVurd(%cVFebJILEmomp9``| zB-5IEOtQx!qIBOB3$alrtwBTuV85J0C`PHrBEt23NUbyc;FirCTlTMi<8)S8^K+(v~OLXb)RkAWI z)}WbUatF0&84~5WQvnN{jfYV{#|L}ciCWzVIE=pj0L?KM6}XNxme>$F9~7)w3$z3eWkQX#B$0Gts{@TIVV?GuPQC@M7&PT3n#C60TU zFv@eO0;#(HIf+*BrBxpji`Nn8JWpV!i?VIqQ!w@{21K{~4mNFIMl}e!* z@>lQn&ISb~!2bZZWW7~bTWz#18ryA#|g?(SNgV!^#o+@-i{f#6aI6f5rT zr9k;l*0avd-upJm^JUJ@#~81KjfCj?wmg5`+5JH*hj9;Tw#-;aFAfCScYmJ>`S|oT zRDyP2UySSOcZ=ks1qFiZoqr|bC%~JQtEZ=Xfp`|gR5fWgH5{fpE*Y}+d6m|U zis%p8em2RISM5}!8JwZTj_sfz@bcQwRQ2iKP0ncc_YvXIRJmGy4I&R~;is1`ZbcPg z;=B8O8W1#Ly+OP#Vow#7QYD|1c4KqkC<$Nv{0DKP2!<5zt*DFb_M@HSln>8g;ELHc zaD$%0`Gtn_P%@RuL z8H!+vB{k&%@?dU+i6(BxF;Nn05ZvRFCPm+wDWh-LBt4uO8Bi)5IDW$jPa$4cls#$= z=)~>+=H_CJ7?k`X>)8b4^+E_=&x0!6BgB)`h!0gMg!2EYvD-i00;{Nezr0~OczuL0+Fwa46!4YGYP%^bVMZ|~!{nw-yBaI5m9+4c_{m3a zcDPx0BM2OYRgPn(LhYdbXuc?`t5&1X$l=o9ib+v zku@2LRlS#Togzmvgp3qcMW@FRSn;}NN?u0pphx%K<_9d(_a30$RN1aBI?mzeQRraj zijO(Ol?8t}Qa%GDWQ*nLsYLQfD=}qbY128b+TazSySkA<_cfOarL3KbSv^#<;S8+* z0}x^Inlu@_5Hw0wz?E6uCJDCx+wWkhjj!hSJn#_~N8&JJ#Oyv~7*rxOJaouQ9mrbB zetjzTRLax2d8qV|zLY2a!Nepc6f{Bd%a}orj0{C`vA$0wkm2Wt!$Zm9VlTNa&Nh|j zr&(kZRZDJO3zqH2+k{iNETAuYJ{}&mvjYu1$4Nxajolo46_ZB8b3y9goR+M zG@ArHd}XkThX+rZf^BR-W*&Ch>E^Xc_fp*w^zx}#Xi5psh(Fa!i9W;twRCYD6YYd2 zUo>sn=n;E*#gS&860QPKbLlj4q%H=e%Bt!vT;|Agg{MN^}KYeOaV8_smYm;#0J|1Y5I{-TYY5dr_PI{=`O`)vF?VnNl{x0qP%jKfqQuU` zs2H&IMe#MY?^HoyJd`HcZN_S*J(gg3bL|#Nou&*gd!T=FItMEHv`TwnCUmp^b-?TD zz#g6PWUxOq1Dl!U8`>n$=~8lwduqtscsl=O?oG^`e2f#azTz_rw&EoDYU$T1H~t|7 zIsl2#pl+f_tTB!AC`U&cON#k%KP-DTpWpVA8;5FBonrC~VPlr($6xI(=PbNvn*JRZl9Ak8LJ3@jaS_@lv%fz`K{ zk$$wYcO*jOp@eF1t>Yhd&w6(v{&CE?Fu4W zbFVujA~K7#sJH|+(T$Xz)smV^^{)L+PRtl&J&TmBZUmFYq!e|3`fTL79AExfdG1W{ z#BqzpmbRb;UfOK^Q*S~34}+fqY$vr=M%GK;gIDn^Bd z#c7=|mBWWdqnqL8ss?9|WKynD=XjXuDm$`aoU1Tu>Q!H$58rqTP5IZfKRkSfPg^ny3`>| zMnEv<69Q2Rhby7e=of}F&b@E@Twjz~1k`+wJJ{>FF2R{A_oX<}aJkp{m}N;+B3qTr zENk$My)|ZXTz-E9WGTTJdr<~oNV0y5I+=nd9P*;00w74p3#ldPpn9=Cm5uPTwIGdS z1`sD^Q0m~KLv?EPGCjPbpFOh@V(H@;HGi}^&0Zu$X99vEEaRcQE)SRN9cA2MN`0J1 zI6ay{FM3~C$F?6Q=Kp~Do*ui$wdU8mE9HP6h{R+`CXCaYyHmxy@6C*j7 zyF8i1lSWEJW2!s<0;BVK&H-s^ksFyJ{A2KrVue5TzC0cvs`q}B82pGK@l1YhBWT4DNY zr7Bw&xOS=;n6lq#jn-A{Oa1m2=p7$mGN<=O)bHr)`{8>=fo~9^PjFwpJC3sKa**~P z0Bse7&P;rlO>e<%GK)`aBn5D^gDEOiF0O9N{G9%)0Y}YtXVe1OtRgQH2WCk7f-jFg zDW~PBAm8T4M#ESs%Gr;s%tKU;HY|@;nkH+xDJy%QgyBlKxxLog2SinjV&PR z0aS}vfO^DfI(7=f%9g@rpUAf@mVM32y1ddrLNmzRsDUBXB<}zU4i_9QUMAsHQ`3Uf z(wl(=5S>|>O;F-}c`D@F7B8YVcSBlzG*hPJ)zj8H` zOLvy?`1a$=xj`DG6JC+J^PD9e7DxSDexAYIrLt&^_HO|4!$r`Ed+_8m*KsLtS!Z)9 zV6Mn}D|BFAvH~WRKYZ;%FTdApYREXm^f4dQGx2ZJ;9Ewtm|V0)(i&qgO%R31&3u?JcQeU|GJ9!YMXXN49q)a+iIzm3fa>}FYjvf&!0x=L>8UpTZ zEemMMkH?S;Kq_btv3#K@WBi5fMw%$0E0a`gIyEyM^waxA@o zxFlkof$gBPsTdzI8&+D%lzw0MgbEW?Dy=_GZlrqwQ$NO)9tpPc5?0o!WRxCx9uewX zC7p_Q??eSg%~Pu=Xl{y)0&U5rFTPlX!0<^=#9;eWM_M9`C9KVb^46|IzEo7#ILP*o zZw~3shH11+hrDWe6YQ(j74Y|t1T7^$xQ0?@pK6t^85T)%TQVZW@=LP!$fSui6#P)c zou2NCm9zP6^JRbg5h5YhimDTr3b|XaMI zm4~jQhCoAORRngu^Sa%MWzQw8npFslrnE;P9fx_l{^e3c-fa+5WihixsD9KBD5OtT zrfavMukSsuZ+F<9Pjp5>Si~*(T-sWpSh%&?$;mKc-wvL>QC@Z zQ9Ku=@Adv#GGGx~60>L@AK7Fwv)015!683i7#Rylk<9XS z{=ttI^XV#4eejivJ$S6Eob4xSfLcu0@9Lcn8x|P0;X8#v-|CgQgtF&XC- zM}RQr@)wl0@w>#JPw&5O3-;4~s1&BWN3{;qJnOFaAB_)$#sB}0h4deQM74?pU;5lL z9L4q&bU_B!LXM1J@t1HU#As~uz{tY|ZaZ=|v^$~dKMrHQBO4`X-(mH_oe81Uoj zTs7|2q>Qq4O?DVmja2d($4=#XgD0q_J$N=H@AX=~c<- zjY;}5f2b4;3c>h1k|2*Bckv9gaZ^NXdoQR}&`f-8wwiVxHq!hL(9AJ-_kXtp#+?a& za3ODTI_KI+M{aXOEGG%Nr)6|t6278SRc!nk(9%28c?f(O0sVUxS;-^g|a^a=&QoMeHbu8Qe z*|A1iV;rtoS}!a86`5!x61qqSr%~#wA@XBxapsz6h{yi{4v7l~|BC)28ImB~<@|9W z51W2(RE;ZJJQ4vRD7QZ0ze%XPmz<_;jqK?8YOBGlYfBvguOH%O@8?gTI%Y2d>$??Z zg||3!X&yUMm<|;|J5;CD&m4}c$z($JQ9x*n6w?`}* z-sVRGqNOf${?kx3{0Vis`&l)jbNkE4e}I>jH%a(6JnqqLR1Xpn0MBmI!^S)La&zvw zhj)cmoz;+i_U&y0znWy83xxr@D*H!}(`|fOBBVSiDE#}c!S$jihXBkD8iF^xn|XxR z_5cwASro($izRkGVk;|J{Cb`Njo1yRx1rBmdq25p7=IwJE}s6Ib-)h0sfiskNN6W< zT_V@4dgEQ5eQ{c3{hw(_{Aqg9*Q(XpLD1!iuy7d$>f?;n>7wrsPA0jf+Iu@}f1qSQ zD}Cb@R<9;+5#57{j~LJQ@0+@<`I0{Ku|3jkc7*-alUTkM6u*Bn_xs;fVlKTrgg{DF ztJ@P<{`g&HkYt-sY?=$}+x83bJZ$8{KXdHOtf`t~r2Gf)g3U}Fr$1k*|GqP=(9fnM zge5YqJ&2fz*u6MD>T^HazEq#yh7sZ8Jei~Oz;tN;=lA+B*@Ob=ls)5{NK$(@97ovv zQm@-`2|j-QYM3G@!TSCYDDucxn+{&UUT(K0$}owyT{O}yY9~M6o1<&IttdhP;QR;B zfw|cqf)vL#b!nH}hauziule&~?_>XGOB~bpgq_m6Sw5%RM3lS?$Ck0m^DjhIAICbk zvJ$4F;1x9AV_N?MOqF_ECw7&Y#RwMA4D3g(r5w>!{%@D4*DHRdA^Z=3-zV{S+4yek z{VbO|=~fJSF1QuU<@^MajnLS9vmBdg5ncBP{m?v!(#HHBpdC~JTm6`bZ9D5ZHSdT2 z^9TP|bA&e=Eaja{CoRgU(&Fq~Pb&`ViZMSkn1vgvB}Dh+J~Ix&WCma4tKQ9s!xG&M zR6+((V6*R)`hU9r|6F+HGJG&KIQ5w+_?7Z)B2U8#8xwQ%AXrXQ=708#D+!jp+ZN`j z8|~G}dBm3g_d$qlL~}F9PFJ$+%snT^+}_ME{fY>0zd9SHvsmwuu%tms^az!P04P!@ zPWN}Iws5g)Tm2i~3rg9O+p_D99V)xdyYl#-o`{+o$l6E4mz5q^g@z;sd^%2 zD5TL!gYQ^hKPz)APNyosSC2n@cf{CO`>it1mZdjD@l-}(5UYl^+zN%lW@?}1XQMUQ zu=_86mtvoOxOz|^d~soq2L{661x9$rb|~n)q@aUV&iLOWjTws_Hy6bCRkg_%|pqR$( zA;1-eQf;2|H}xwfD(Q+?Gn*RlkQZ<#Oq*cNcUL~Y)bG{IkDhJHISFy?+UBRGq5Y;z zZCye4srXOvuMQJEg=}I2z4QE&Z*reQ002^4BwAc0-`XshAzW;BDGE>c2}S^yG+A*W z?{Cu_+hHr^5>IS$K@4iLP|GjIm|_+cLvP~B1qh4n!t(?zf=w76KctiGLIR4B63k{D zA`gpbUB*jamz-@qn)@QTEK`=#bYsufpZwYk7**^MTx)OImrL7H*zYz^xR4dZtim-% znOf|6X-wy>iu5)RCh+v`#`qwNHOoK$&!!2Dr2+PkUING&)hG^rskWv`8F1 zOKMk2?c2Y>EUu1NBDNFWGFz&Dr{(R8-HN)F8qfFgVObH-x%K$fpk9!rx*fjz*ZAq6 z7VCz@-fGO1OE>tgM706F{Jtg*)EwO1GV2aS^}#~)l_NBZp0?~&V3DbdAngKYm{W%0 ztf4Ywr}_k@tp6slxVO#2ld3={QL$;?U_WM557&UAOU0)QA@co}k(tjJHwRi< zCM6I(b3I$zZP>H)hiH$-ugUV{d-NC?@qo#MSE22*ZNvcyn;}0>rUaa*kD{)*VU&a| zoEro5&uFjo@32LTed8RscSzA&vvn5ou%d5cfh`s&S&I7~z-9ZA;0`>z`+OX`vDyJa z?PAOW*oy}S3d624ej0`Y|;135?aJp{>FS}oL zTDmAdJ{8_(S9`I`T~j*4=7a=EGE~OOL(N(=RaA~vOspb3^ew!~NEt#;J|c}4 zbDVe0?Oc`zjA@@48s?RVGS5|*L!x|r7KtfK$N&gY%BJiSGH1eC;lVIa2afcIp5#?i z(v}^2C+)z+RatM!d>aepSx!gq(Ll z)sP{MDottGltM&NQ!Ivk ziRCGciu8}KUBCsQNoQ8l_{0*euG0e0!Jagc)ol4yu0bk5r_Pmj{ZWPv!a+fNG9+RugP z6`W127t(`P#1hIsVpQ?!4e2aU>d+DPEZ@zLsGKcoU~fH88f9h5m1KRXT1{?8uVY;- zr?)vE@9)EZDLs!~E_hgvBAA$t2={Eod#h$L@##!w^5@`N?+M@2C;!Bd93=oHPY0{> zwFR*GnLds2H<4aOH7~ZL${dfO-!Wq}Amb*r1~30kr$;B@!b5W(bVBDXuuaw(;~Qz@ zycmXJ@D*4jN2c>pqd05854TMCJ& zq0M#fX%&roj7r{i`i|R|el)yj5^{$+k{oU=T#4AJ>CP^V2c|dI5q{_I`#DLJ`E)f( z(2KLZ(SXqBPG^D&IyDGyde@8X)SkF%!Plb*YXZWG%jE4O_@gBc{e1wI*J^<;E-_ak z?$K1Mf3KxW96gdD948#Su^qx8uFkgOzHa+ps;a{t&H4kM78Xet>O_46`iB{^&B?}9 zd1yTjs9`rGC=u3slK;{^`t!HwAn5za#}9wwq|^0f!MVE3gq_i-Ri;@q#@KCzV-Kl& zc+>`@+YMMp(iIDUVCXbp+iZ0+*@ixcg;|)CFq5R!Saj+-cM`(deL>fktju|U4sx=U zgtTaRh&1|^K70zo3Vi;8(IW%j@jXcK$XtM@-eGdXb&NsTAD?S1zc79F2cOgC6$=U0 za^8)m6aD}K7cjZ~BQ*h)DS3@cQtkC(q+AlE`=);3TRi8CN^)Rw6bD6x4W*5R`=%b$ zIR|7$3v${RrtG9)qLMwEWZ1z|9>hr}LrvYsNeZkL(a+G2To;mb`ZdZCOYvK4lJ^{( zq6KdLQ#|^W>li^pGDhY?D)i)%1W2o(2fCC#wEk=d1exS8aM+O2K3C&2@z4%2xBE3E z=<}@9oT9k~o7z&#~-R zD&m7UbdG2txUY9Z2wuK`5KE0h@kVeE+ zqW$$i{X3Sh&fW#@r3evU#rhGK530!!4Fv0ty-HD7MkJjyeI& z^%l(m7$E|tRf1I?NB@d%#dwdm1Sk$hzcQ}WjHtg!ifEGFd`3+(R=u5csZWgf1}VLd zC$ki6z=Ip)p>p1s9ul@`v{RB%`N9xTPc44)MxEI)sPMC+=-A|*`Zj_z1AOBNdP(az zRWoHDPYsF!_t*Gs*UX{-8VX-^h||wVuj*9n5nU@AFu`OW6uhOsi^i84N&U_VQY+zr ze?rddOjiV#*`BY*aI*}fFThJz?|_A=91{@}7x0q%3Ks90g-N&D@sSM+`0PJ+R zjkfO4D4+i+&)y`ln1`->!HrhJcR+5CWe6ZDqErw%oDzgUi2|&>OK>zET63@};vXI= z^&&HO;&m4js;NbMWoFP3;?3z>PF@{DU&GkfxGappn-ro|=`qA4xR&m| zEem_I9G6KhP&t=;ZZLOkot^f*G%wnh`*`b5DMvbw-!pj+``oc4wUtRC{|nADzrSMS z0KwhUKf@a`>#0cHGv+Y|nHyRjB=V&K35veF3BD|C>H$=n8j*Iewj;;~a69T_V61?1qhX%=$F zP`}8cQ=U=q@60NF%Sa#T;hA4*m#ZQzdBAY!oe;H<%fxH8`swsi&E)=pvLIryuss3A zi%d$JS2Jm`b)&sLrlBHj-@1$02{kDd!W*iM_5M)KBELk&iz+O9cJ}mj*{MzC4aU5b0ax_)xaU(B z1E}{RUsBJZkM#Tcug@ zOgG>*rk4Lxh=?0eWt*k!(yY<-uQ@_J_V0ulU7Vp*1}_p{dty$6gW1qe#aoS?D6&1H zK||+v-iwi(>yD2mjhXVB37_Idycv5)(Rl3H#B_2#adn(}lAq>nzMzB!agcCntr+^q z{M`ATaTGyXXq65eW7c)77`isb-OGS_oh(vK~qb4OQ%~o!ZLSa*fNv9(EfY5wt zUjvHqJyAq#UDctH?D_vos8q7t`m%ad%eOskJ*oR_yg%eIcT^Rz2W6zDTK6bjCj8eS0g%p$n)6y`eFRkTP|L6*(hBl43Wy1{%-VX%IwL}`ut*Ojk?OVHTtrT)Me{&9lvJG zk=<!~4;`!Ft-CE*sRl*MUQj9jeUP6HVP%=xyYmK5Ao+$M(@PxeaC4 zpAXWP4tEUG;Z}K4#?veg%8`Ek_IPZ~%c@+KQ!-cNj1+vo`BX-Zk27SR@2K#iqy<9;wuD)z+Tg@)7Z!RZmc1~wEqEqq}FTIIwm zRvb^_3ZqY9^I~aEF$2*lFV?7Qg#)bo^*`xg9$%Z%hF3!sj>5UqM|70KaS}eV&bUpo zGCTnE>^ezBX%>ZsN;iHx$YuGlOZCwyn~u&X5&rHg{oN*7h}Ap{d1&t&RsSIVi7Bx@ zYo?4=P_hsXX@>GgnLcR8W{HOTZO#NUY9T>`g`fZ;H$m@|GN!<2h%ahCh72Apmr6MC zNz-BYXiLXl5K9gbs*srXS$i z8HDNATr`$o7VG=*YKdE(5L`Y@2lIVhiO?ab7JM6STJLFZ{17b#u0Es5eVp}0w*5me zjanw6?WO2cF}wa&lJ*s!2LWRxvm==$k6NdIl3ZY{jq45px)X6J#ZJ-vLFai`T4d;2 zWBXRw2Cp11OG;=y-`X-L_(bgH{c-?pRuzhK;_~<4RQ-^jd@(GIP^|k8OE&R_?OCGR649!EjH>wrwbO2|rVAQh+;mL70vLw=58dRkk){SaqQW0^477rtwQSw9nm z4e9vI)EU6gksw~leXRA0KBC1hnFx&K;E__o<%l>)@$As=7XlcVV~m%3^u**xDr&ti zAuk^_FtdK7^IiLc+>e8h%b7x*{n3~iM^p1UtX;ZaHmr>5;rYh^a|w03WDjYMU?*&Q z+}sE>P_r{_ddZon$mizR+7l7NM8*kv&=>OCG~@ah@Naxt)+2U~jwfz-`;DFW9nR63 zF@?v@*1bVf?*5IQZ0I1b2n@*)@ z>R{D&?@I-fnuJ~0$j8dmm%Ha@v0<=PDxXX7+A#Day)FxIzbLR~X`flUD^Z=O#eOvR zMTbMS@B+-QgE5>q=G&24# zMDjn7H7cSjApb31$-X<_La$2|n%6O!JJRO>aq@6ZIl?7lV7Co2s#=ji|3~8joyBm2 z!KZGM?P7W$SXUEG4yqYcP=^S4b(c2GOB)j2>K7V$ihWUamRrm)pG$?9SYS{AUu_cZ zL6F4>LhRUnNrY?(Do9rtVb>OgYOXC?6;!5)8SrPlyp?cv%4&cd(m!!Et0k|aNX4x2 z=kzOg15*X9jlJ9BnG2US9=p=eaie0#e}H{+{bg~(mCpySkN*M8Zp|at=?t>L)#2XB ze>dFs|2cQw=xOJ-}nK|Ao)O1cK@VsRvJ9|GG4Hj*$D|gEt`1 zf&3b_6!d3BqSz1<*JA`0uQ`!boK)snRfBsIJuiDV6+ONaAfNjHBI&YA(CO#H>TpBc*IYQ8r7A+wX~dsAOmse& z%UcSmx!gEygdI{=(q#L4s~M9jh@EXIn-07rjoPDT>2)y zu}mygSzgO_MLR+@NZOH2Nb~qxPAp%4pOCoONTJAA9e0TJ8$!jN>oXuJ$sHrdk0|7+68waPxqwz>X6~d2>9fx~k0EB`7+nBQ_5xV7BWU@H zJB6wrfynK($tO~!!KdkDrf+>J=l=mZ+AZhiNaURKQjh5yWWT(T1Iva#q5Td1Tpu8M znhq>YsP%3}X|9}z1$x-3b17Ez>bcSP`cUSo5`K zk`z+Q=9aw?bz1ADD`MYkQLyq{-Sr*u63$RV^uLqDTkQ!U0S+g^aHv^$sHnw=%!cPe z5*!WbSVNkk3GD4o=J8xOB(;an80Z^9IdA{O#E!*a6q36JO+ZuPwBfO9aee3ExP`* z;*d*x;dI?C-WmR=mZBhiE@C+kZ(GC}0fZM&5UUYg`wHhHS&k0(TU+^`M1M@rwzE4N z;|QpAJJR7mFoLU7erysS`4}r>`R&fj>cN5pVgH>XTb5>Z8g#Gydq5KR&3^z>ie|Q` zH)r(i_*u*f#w3aQmp;8l=JZp2shGBGGYKd~+V+|xb}X?EK`QU~c)Q%MjxseN133O- zjBk%AM0x3qyj%r7PsPbp-k$6dn2~sqnyx3}YQ|hvlr@HRlv#v***8r%vuwjnBeNgw zN%T8=NS)R zjm3YMZXA*2XoREEcNdrQ@pES>Y!NKHTgwz;-iq>t4XzFrIdw$xgG2`Gw}}K_^VMS6pzN8aKbREB)O%pA3t?s^SF zWZx4$BZCgL3tI&w!i)w7J|<||o}fPMl^D8$bA^@KO{Ailj=!BvhPf=g3u-y@ht6xM zo`DM~jKZ3)&SeNCSpwMQ)DMgb7RqYX+tiO`&OfRg^aDLUgyw#$Wx*LG#wbl6p>$HB z(KN=mIVsZ-fF|^Wee_mUy4{E(s?U!tJClI6E%uGZ+i-$UD6=0%d#n~o_~Q%Cex;B8 ztz}v9$5fanuJU%x!Rtf#tE;=`oo07 z*rP~Z6RUp?m9WGRW&(Nv)tE*lGXUggg5p}2mgl9Y@fg%t>{r~9ZKub1I>~PEsIYI+ z=$((ujeyW^Mn-O7BSKc}f)OHC?<*)c2FK+wV)T$)3_4nxbu)Tc<`+OZ%v3sTmhY72 z8T}s48p5-Bfn?yp5}nRFUR<0N-E1Ut zDuEM3$5Gzq1yS%Pt4=;>lEYm8qF%f7$#ja0X)5N#Q_7(n4WWZqLmpjNBZ6mGn>S>k zVXf4bq6cPw< zjwmrG_*gL| z3k930=){}z;bm?I_sWx$FWTM&bL68-7#92?ESyLM-8P?ef|v7&49LrAV`v+=e||Gu zOZ|R$E#F(nh3Fb%l?bjz!1Ne9VJ*bUIBZ^(ssdAQpn1lqpQc zDPmD%;P>aCB-!%5`W_SmKms$}gfl`zk#NT@>WtTRC~2#BuR6Ls9{Oc0!vnwhhl)e2 zsHS0&F@!j^7L~<~#$>ybE(n(P*>xfQ)QBn!Q2gP)W zh+c$DV(mMlt2YZI56ydNW6Z(C6Ieqo@B8S2%_2Zq#^-#}>%yX4&LNIK-oX(J+UF9aEw_Xst{Ya& z9I>0cT<;JLZ5c&fRTw`Lm%f$?{)|J}K`t%Zmf#hHRkz_u z@%kyt!KDJ*nA*^l^xB;B*{47#+sh^^;?Ze>_FE?0KBhT_LFS1?jH=+lN|4cre`4@j zH+$sNe)ol>@zOqKXThjtUl4#QR^D#IaH||22*;pB`7*~>M0TElzHYQ>Hd_GzTTa-! zg=0T957Xtd)5KA)1@6E0^Piq=UxIvSToyg$>5ZE6Jt?tyN2%u99Xz<`HYLUr%?w3j zzr=npU`;Ff+DRp89Qz+Y0eE^F6QU_2?iA+3)3sDFh}GHbr>hRE(#F;}91&x!2(w?%fO0FHX zxB9v#28)sYGXVwMQZ+xdI3l28w)|z+?F%pgiBpG9RM4Z$Wtkxqr{r^~kbrJpG-o-L z+&;gM{&XM$!veXCwe+@t+QGiUKP9Fv6*3ZvRL3>sa?w(Xh%-X85roZdJvdL8PiGzx zDa540VS|PElBgxw?wH}JoSF>g#^^4shb=ukx5c*efvp@t7N1DFBnY|>B{K=aa70$m z{0GLPTjCn5>>&n_SP3js+k0VP|Eor0&{nBhPG#{#{!PxeMBb;vGrAO2o>h5qp>Lp@ z?#dH&?$!%}Uu30I>^Y2itI2@=ebq8xZ1=z7%U?}i`Z8ZBLX|IBNz2#t=b!Vk2hG~^ z-bs#!f@KFY&!i&@LG{5#D<&RGgXpe`usm(j5Ho7O>@3A5H-A$Wu=xrlo382)+Kq~;I*Z7#N)(7-C|0(*C=Zp zYm#hXouYA&j{N)Ie*mp*qLb33R%^0)XYJc8j7T^rU+3McRi|sBm{YN%-L`&^ebwEV zXVM0LLZzMA$cM@99xGdXIgEPG*sfmvz8d#O z0nAm~QJdeZkl9xCDWhNWg9mnyE-iNP83aoKy`wK3OXZHkv}vnjsM4Azs>XzN(Y(-T zsn9INNz6-bIw@3wg<1M^-*hi_mB}TWkf>om*1kMNU&0~ z=7|H9eLl;tn-<)cW|{7sd;x}Fye{T`RdwET^xYibdpo&JD&;;YWV|2Qg+zPZtSlQ& z`rbGPu{`Fk5-qmeasVe`qOSvto2S?Mt<9Xw+ntR9Bi;zFUrW&YtT^7rR|fm_3VXGF ztw3qi-qZLG%hPG;rEm5o_Z5=$W-ZAwsjvhT^X7-IlA({O_E~L<`_JT!B~x)}qZZH8 ziiTw}D3A(_{AX+??D9MH1kh8pK5Va@k!_ML8!uzSov7+IX=;${cDOjGD{IPrJAY%b znbV!TZc5aw=FZ(Pxud=s@>cQv`qVy=jA$@Cizl@soh$p=w8^Yr4FOkc7oVYIcKh*)tOPPX zS)15{KXSVEI+o<`?o1r%yH3~*w>S7n@Krykb``V*<2Uy6{BwqPE}_9o#n76zu2VF9 zZ^Ls9#9Jc3VvN8TG&?PcVM|r5_!U9`{@1tYIpWOUM1(~4bbnS+1>Bz_e zAXZO@_{7rgh|i@BzaFYpWqs!FyuE0<9`Q%QjV4=47qKvMjUke@KZA{3PE$TmaH(D_nkb#uyC08^ zxFzq>kl4D%p8WHVW|3{6QF>%~gSXrnqyOpMJlB@)hm%El`cIrq88Zz-fjKjErsp-U$wI&#u1Y6^&)es@GRjv~LjbgJkQX4Tuk&W+I_nGVi* zDi7FNLq@cs^ zoo_u069L~5m570Y&x#Py7)+XrD+ep7m zSun&rg`GR$*y|{?zo#yz{7f;kmbZIjNc0SC{myov!q(5TZZ)(l8*hsjQ(jdwDp>kz z+7DtKFnW`p`Lzgh3Aq_m3WSS+o6X5iwgBPHKK+G4$LUQ~Yq!H$UZCl(4i0f1Rt_%b zPum-!z0HS=Ncd!{Ks34--^o|3J~!hXQ|2TsloWN;SN;TRsEC}2RQIp9Nu<%nzMd{8 zn+>$X0?}yAnsvw@ll_{CCeGani?H2cRWvi?285aC?B~p%&7r&3>kZfSR`*-mfKV~} zKLD+aTaSPa~m%Ng4;uPlMTO$li%>Dj@{7;>eHH@AD&F%^a0F z@1k~@0UUg%HXIVHaK_x`n@zY|^%mZ76t_xS%GNnwSz~eVGLfpPvjG85x;UNU_j(D# z=Lrylj-C9kiHb&q0VB!L&n0G%$fJC8d3lAM$nIYRnk`zI+AkT)`rsUpmEm$UHEdhD3U6`I9D*=i#GoSGB#@YbiRpG0 zc-|_ncb``qYo~9LJ`@+m&$Vs3$!!GZ_=hRo6UR$fXB6>M9zsbwue1%GTTd3BI4Qmo z^@OPP7%p9+5^784m-SP}_=VwAS~l-UEcYOU>|ORVccn99{WqaAbiht*dj?BHF#^Te z%1IDVbzaNGKC2bd0~hIRMTx%vp_xLSn4Fjkts)%uD{$RMzhZ^QDW;#s3kOiBJ6}q` zeawA}y>ss@Z5!Kjm1c`fE!_0COzz|HBSWmM*UPlxb?zawR!rs<173)yLXHE=LG?S^ zzjnod^VI(I;5@rTstW>1uAb{hvX0hGE7thqD*J;@`oEac z%y9q8dV`z@e+e$F5$*4l>WJN-KYp8P64Ku#o)h?Ko(m;PnS1nuWoBEuv&b*U=@7*Q z=eCtD3M(I!HxN1*4fO_h;p)RGplKh1;;`TEyGu z5$u}XF*>9Yi|$kl-@z^S!twU^i}PKqS=Uf|f-4Z1z4z;xuiLuBNNCeC>D%R$Zjeu` z>s;9Q19<3GV)Qfe82uW|K)_5mTiFlcML{GL*s`b;eH0RLFXzI$($ojLcDr7TV1g(0 zzxEL8fiSxQmkNcT9V{GGH4&)JIqsa*hV#si;};s(LQ>TCJ~0Tfv&EKHKAi{2^nF04 zsbOXd__TXltDSqz>2kAN$)+G)!(^IvPRiHh`W;S)L{zY>;F{uJ=u0zedGH*@@U^Sg z)`W!-Ff~t%0`km!88H3HuPzf1gobK5O+HO=ftFYO90N0n$rqE|;k;O`4)LxF7#}XL?3r0c5rx3aG?EKN zK?TOV?G2uq6dgJn-DH#TBJU2SIL#N(H@v37N>Vh$@|5iAI&q*^ zWa09boDGNFxb_m1(;R?&>tL?tUH$5E-i}*>#Do3m-%9P3vGf#>F8Q#yc;jUg?%-PB zbLDfvs>H2v%Yh{rRS>e$aSQEC>1RFEpo_15KlG9hdrnd@_V_1A*Y*HZ{2BDWqfcy% zS=gia?owzkB|82?A|b0~TbL`~m2crON2s65<+9$R7bX_s0DB_O1U!S;y)2^ZkF^R# zAPQMmV{%TeuZ4KtWjP0n=}cSX4`uc4dl~UEIA2|kQmmm-rc$ZVp@?W@e-_a^SYf$U zL)LMeg;=tT)%iJiGPuTco1-&>$vL+gU(gAaJa`rBZIoXW=y2B(|ry5*nwN)6Y)v<%VTZtgvDk%i=*S+b)2+gIDh0^KVaQ6_m zfBObIoomqVxvc5A&qddn?B;kXVg4iP{nwpW2Vb>YIr1EmlO0+7-oOlY7pWpr({s!Q=|R_+Dyy#bCxU5K<4xf!yofp_~^&c z@cMJ<)5r!X)UoG&#ptWBcMfA6<<`WXFj&Lf*@)1PVWkhrFf%uA6~{WADQFEh=}k&L z<45<}`SCP$nEUZFn=hlz&_V4WEDKL%d3V<)6Qj@Uksg9)#zcy4ws>+LB)>IH1_x_< zst7msO}?kgE8R-92DtVmRf*q9DuJpr4Ri!XyIvjN*ezXa-YZdY>5}8QY^Qd~8Bu|S(viz$Jdfrlg zqVEXr@4%5GeW(M!aMH@|<4kE2d)yVVuTiZ=@YX9W9_+{&${WZ8T$!|&8JX0AOLgNY z>%&)#|JZj149X88Bv4=w?nZAgz(RbP#0~#x^4b&g9O%59YSaxB;NI=*(Ap;@|JYD3jMlr+ z!_nGLD9Dwv+*Sf_;&Zc?O_pbbJkds=RaxpLvz=Raeqsvfg2{c-rB3nQXyX7j8shWX zM5Pad#j;%%Ak8t_-LW1B*%fGRG}Q zXwN&C5s^7vcz*d{LDjW~;ORFCW7t>|~K$lK1W`kra0Lw_0 zjb_h8WLx3wxC_ncic%6Gs<}=Oxm{HIc%PgS@r~C1eDN}coRM^ip4rbJ3OR(_U&%^8 zq<4CAMzHWn8|^-q%?mVn6CCLey>IANqYFm?mhX2%$n8pXgdc;NBqw8NbAiX_E4yQS zX$n=~{#5@J$)+|126>^+Us{(_%hsb)rQO?LH08)HAfwiFzTmntj`G7)o2N6=109#N zl92{g-!YF+=aRZdM+npc*To|%11oTAZ2IZQ+7e8Ch>wssR7zfK723zxRoQQ5nPBca zZ{mSvs)?(jXgudUmL>(d6hGq@Q}2w2osIK;!+jFO`?H?IB`tgI+$NSa zjubwT;wfKRDb5 zG>Xt?%KZjL9&W30C5d-lthfKO%eX~c`&MmxmZH33**02|0}7~c19B_T$*X1wP2MZ_ zy5VUztsPewDsj_|Wb<65DkS#)oLIsk-Sv{=kTSx_xmKJYu;*mE9P<+Ky^@mmD>O|j z0yh2_w9&JH&lG<@eai`?IdEkY#gQbUmsFw|L~6)GWM^nI;`Ve?G=E(4*z7tsh8TQe zf!ExonIHVp+Uv~5Q)^|$KG26isDzVihDH?>8ETCD=+EaUBicOSA^-PQi#&gQgQqy{ zwTdkQ#>_!LnBBM$#uL<@721ZXzPiAm7pnlkm}DfWQ!yu?wm2POwBXl7x?RYlMS;J% zG4AD!GrKjZy1H^LI07mni(RD$qo;Lk(y5^VaB3Q5J=?I|; z`yVi_gJtdQ9ok!yI;Le7JvR22h<+3;HyMpKxfoj;b>00}RfiHx4=>z-$@cCDWZeCI z{PC}oMG;~4TyGL2eFx-S4VxlHf`-c<*9zJq`94DTMb3`+Kh(I+=)YU|Z@z~qFEl0` zNs~!R`a@aeqmRC-B1T|u%LX74$~g>Zb#&I3#Ez^_UygyqMRcgHTh6>dc05l4mpgqT zD-zdfr<`K)WZKnLEv_kyUD?|1c3ALx;;1wsv>3Bx?0$1g|p$ z|C9o{m~tXZ0T=&6$8!0%x}41m;n5Mgs0`7kBG^BAiT2K3dT}xUq%9YA5&s7;$54`+ z?0P2R5~TgQp!?`F7E8ijwnFThfR5=g=(}kpKiyvD_Bw$vbO&4=b+2BpNPF z(KemRQ;&KuZJ{A+zd3OlyY^2_YhsqWy#zcKt3byyiq}H_m@=Z4JQqc z)Z>uIe!B#%6#~V96XIiE>fftBCJtXVW*c6tyaIaBlIZr`bie3dl0BPLTDPNqI3Dhz zdveAFKug5G4$=2M{hUr0fXd&KzhleqCB;)GC3%_pq4pB|`r$vW`yZgsttH()C**v_ zz|E&#Xlaeg@UrMT9R(bKJg-+(B~-yu_SgjClO;qv=f^i?x?WkUP({(CYt+p4H)DDH zym~^PSlbU;_t+RFJu>xET0X2!Zpu^|KB=tauaP7lNkpW;5AG|u;7Yv*PV>h=1_c@h zt-_a@S%(;@gr=)Jn2|uPkd}7L zp(UMS64NKLptm#)-KwthK~K06Hx$Y8^!1nEx#0&zGvs50iJ} ziX_m&_pXVnk4Lb;_8$C&8j$Sx>ut*vsC2pcKS+cF zNv15NV3K+*JbMo+=iIjOYz?5(7N*3zLs`;DHQ}XHr01lhB)V|NyTPN>O9P5VXa%%* z;zwR@)2JCIR@0F_G~RNc&2($hhSwTfiVO&ZzJn*tB<(E>5K`1m>qgecGALCYYeA3V zBJ)A4!g>2{wIyj~uUWLrfM21f?N8}OT#3$EG9Bi1hWP~6ZMi!}X2$$){1gLoZPdNt z<)i66U2)`fD*sc%>4SMoW7Q6ZPdxWhWDz-;waaC9g?x3Sutf#Hqk^~Yty$?(Vjo#q zoOU~($~RP!%{!5^x~UXrVJ0>+1TGlq7nSJ9aF+NYx4{*;@TpU! zI=HzXZKWeyTZYYi;|9x^T%M8zGlKs5+Pw~W#gr9u529#2KwEOW*8E|mt>7CI!d{su zGNeM^0zOJ}c$;(Uoy^Fk=(iaIL7YNKM*SDjf0{I33;lYq&4~9CE)fLjGy=No8DS3T zINs$%!?3Ur`6AbI3Q`n+Xd7Ai>zkeJD)u(+6>73Kk||TNIyp9awqz+g<>rQ$8OLRk zDnW|ioe|eNm(wfq#0}jO;o7gqT{zD6=pddi571x2M7TVi4WF^(xzt2-d&;g;4pm^x zelS)E#%k1e-B&y&h-SF#Yn+_7=-sj32Akid16kXCsshn$t%#)X(yti)(2=YXlWHAe z7l@O$&zkT{?c8-6aum}Mj*dN(;Obk+apXpFK2H2pIB!-=-LP(n z;8(p>bnnW5)}~3!pbQ-eoTOu{_Do_)5rThDwHMdC%t8yrHn@Fqa8I@D?7tn2w30DR zti1R1s2zNi=rSl&WjI9LcY0@`Ue?Y$l<6*r!K=3ZmM;P9C-1bKs2#PLZ}bHu^dyYW z&X|iTmh?mOGs$m#J+7-+?(tbV-_br8O)0bTdxMnR&=;k_4R$O+{|=(GMrYZwZ&|od zJ*)OK>rT2H38J!oM^!OReDH;8z>V>-7z-mmq^&)IJ>d<883#m}3-cX0Cg06($y4SC(7%sOxt1d z2Np-&*3PaxEl-}iIa7%~(uG1*)`{L`e}rx;aNB6Jg8PCOign7q)K~r=3KC4%M#(aP z1R3Lvi^z3XjZycn4J!UFCyE_fv-3pgo&zD8?0I+&s|Gc;$(-P&Sfn=xF(vK(Z_+F| z;t>r7jOeXlsut;otc4>x>0NvCwUJC51o@y;&o)F%3*n&FHux&@KBvJd_aD{S z=IaDweUc|mXu^<#VlIe=af}6Gwt3$<7w@B{2WbL7@h6s%MGwrC>8LC|{Q7DD0i$SQ zDo+n)CP6(I;isoo#K|jkqM`)wpOu4O&UjT&2I0*cIJ$KfAB=+ZAh6a#F3U9tBAdQH zSmc0i>hA)@UmYKmg+V^gm3k}J!c`unb<7mq80$^DQ1oAxwc>>j19DrN*A<;EP{A%- z&CwKlu2@EccS!1mf7K>#PbA=%s#YF9L|n{b9%N?_jf*LHEm&vklO~JGj{;mY{m{vi zM$_}8&4vp(zt@3p9;OK6sLd{JO%s!fCDYtwD3c2Unj514M3;C_0+NQ(u%psCtlNlp zrpOzn5#ZpcoM2=yCS)Dy;DI((f7Hwkv(?Ds zOFGy{PG~LGejuCqc$#}kJ(D_f|3?hQHfgF6D~&IiSRRheKLMhcS!b8-_3Vqn@Thku zO4iLErF{XLUh^~Q6)Wepzj-0W{rTiKQtD#8<{Qsh-abAPd8v?4lH2wbb24jxF^;r3 zRX3^36VDU^FhnQ?OGSviGI~M=tFq4i&j+KUtN&z6vgvd?G{2=XM}+`7Zz4hW;_Uy8 z`H*M&lYUvlqY!Mtg>UVabymN!se@gqNa}ggXK_o<-K<0cX}b6~?~?S{P;*km7XL{i z=)qJX!PLxS(nVk3~N&G-)%|XJMPh!WV7+cJYwyy zu24?i_|roTDZcY@?$}~aQ38uP;;&9VSuPdUOQfL;k~#_H?gx>(c}kK;4(YP&2sJlU z=rCgFhru4xG3q%*hBA^`^dEq%gX&Oo_jEk;6IVmRsY0E`b=9KKm6X50JPA5~wrnWwC5d1C@xJhNBZYIzb~e$g$S`~EB_a&kh`gG-mKxux8=1QV zV?H3KjL(+KiVKyfKOcglHx|t;ptjMJA3`ht zvp$zfniPfUsthmy^bPcVjq&L55B?ri!K53i@D~7}%V-2-w1q-V6T9K;{g_QW8Wo2L zrcAG2K`w~9vAL(E?tY?$gSI@i&ApK8pQzTLx6;KS``jx~-j;L!ks0Y1@6+3TiXh83 z!9QW|Ir~5HpJ9)$T{%6OetGf6Xsx3c%>BOE!%#t!`@-v31<*)Bdg@ zW1j!#?xOZ1>Tejdp(DlBhd$c{r|QzzZce;pgt*gP`9MmQX)g2-ye*O`AWPypNi?zQ2m4R&av-1EV#a-%0s?*Tw@-|83 zyER}4<@AV`ge zJlvavvB0o^rqAbo4egGx`VS|Kw4mg~{PJ}Zx%q5&zxHV~@Whuj6^0_l*7KCEJ#z7! zapIeyyLH;z(`*olklwQWDMkv?8UW_KT8B49LWP{Z7NyUiHV3r7Pht$KS1DTv4|bS4a21IZWORw69NAy4D>fK*~kd z+;RrV!k~YKI-{}OMSafg^|<12j;z0}X^lUnHtWQ)C%kHEOeRJ3L-WJW%(c$egod6z z5Zj7l9m83C2yd5HD{~GHmYu;ws(J|x^SXad(!J>vXULE$ppFvhUV|&0Cp5t;+y$X= zjJHpm)-ZZWyC}ZM^j-F%thNdIXI)4x1pknb+%(D$&}TLUB*zQ>K0_}h7`av&(!jwJ zDkbvUS{T|XQSbQOo*S>C?l9xk_aBktfV0oYtL3f#u>UTar$9@0M`o;5@k9&>i8=+c zXe8$PCstempHM_LthO-eFt?({<@YiVNbkO>0PmJP@M>^2ptQtVL08hVR2#oaqbx0r zwTwRBWx`99GdOVsM6dquS!wd(k`Z#f%WGh{Z$!Zi;>Uq zZxh%!$%epPr~RB^Bx0dc5xtu|n^%8rOrQ?vp8-!r)pq_l@N90G^gjTJm&BDlrOwA~ z=l`EXTvhx^+p?9H!rvYDihdaZok|tmu;yR=2Usv}D*jHo4g+#;``;i2|8Hw@n} zh&y=b*0TymDtW}a0((U0TwwC(xBh%>)@p6RU~>@YbZO7lRKx%6j6TED9Whzv>Y9BW zNZ;;_-!(zJ`J&0D!6Ta|y(rs-jufI}K=gDwg8<_EMr`*4b+%Ktb?S2ZfSK)vY)>6W z#6b*P^UXW~ZY$q01`R7Z@Vf6Zd0>OuJ>OaDugbaKMpR>Lh@bKC+k3K&c7|Y2{gz=U zPH%=u9gTo#x#6VC{YYm@Ci3T>Y}GH?9lwgM3Ud79re>ZC)?^>M?2N-q77R3zA!ows zlQvUtcdQ$Y6=W~-9U_JVFFmW>XJ3e=R8>ev8P>~UmvW5J(T5iUKSMX4`AM#EN{EWk z{))am1I^9v)%I_+QfP(C{!LNwroYomMe~5$W+oFZ95Y_y#DC)_`A|*%1DyU=8LpZc zjFCw>kV8GVNiVr3fgMro*rSyANBqQk7W45KTmEq2*&d0HGZ2; z3n~^zMe@3j_v-6^D!dH;;!WO4Wl{9*XJr!pbn*wX+KNlcZP zS5jI%(xM6u|s-G;ES{YSK*@XT_6WP&!$h z$fB`BmAQ}GWC|_mba{n0$uw<}v0%=BzMcOE*t&H>Mf~zuu~WWCMogf6WXj>)hG?{x z1{Xe>7d7~Bb==w&v#DIbuOo`|w^@Ffq%@iZRM>_#Eb~cflhRpHl}Cj`%^;^zf1Quy zFe*P4!~a=Q6c40Rp&b#lk_$@6=xBpw9{!=JUYl2z55=CB2DH!TQbqN?hI7rRA$sh` zBfdL@^~{F`Cwk_D3vTl3`;Iu0@S&mM>s`sb!aatucpGVz)obKM31DtQX$smV(H`;? ze4EyzZji6=nLZB>h0k2T@;;}DU>OaHW0;k3j{@2Ji&>^_aTofFgAy~lvIfwE zFplHU6^)ca5I1r=JLIHAnjeZ+Iu2-8DWH{e!W0ff{OOZWAyZo`M0U1YGEAKR@W;lJqEC>CJVhq;+R6{4FbT1uGd5F4QRRy%M1D&z9KM**g zhiM{RnU>fr|Gt)Ia%W3AmeNm?9szi_d%WXc^Q08{lLdT=CN=gc~l;L2A#2=mb7*R z?~Cu2)mp%ckHOf*ezHkK?5k|#|GR*`eIyX=81-*x!zUS0RAJFddcsBT<>8q zLGIsU?{-)#rEB3h(q5ak=Gg;Baw0)8wo+0Dy(?oWyfJT3lh@QLYNf=x9tmu}0a znJOW(Oqo!>5CqE}cS|GNSg%$X+HXNtdmDz_?0kjAk*}b7#6l@qUHFW|rB21{_rp{| zy}^>{;ks)ShX_rS)wfx_G$N=~aTQ}py$%61zctP0S2N2`)) zRQNnZ~$7u}YEU0Dez`jntYrrE57D-#P5=bD(Ij ze#1yUXVUF8tLt#5T5U(iH#c15A7oAEL6-nPy80lj5Lj5Cs(bJ1knFWS#hPGtBd@Yad*z*7O)WOQ7FUrtMhj{? zNJ~~KV@@z6he-*8KD7u{WUxKZnozN{qVm+q{5!f+OoF^yux@J+4C7Z47j2f?A;e$L z?G|${cKI~{YIEv50v8_x!?kX5BYn-qA@7Xs+#lUqwn>D97{o3dVg=7-RzMl0@T9Us z)#*|G@S7B1v0K3*Ai20?DnzyrLhe3xk0|;y961dv}2nf9FR{#p%%QH zJg~3L7t&mY`C8xI92LLW9GSRS4ah(F^yQy6tOT0c`XK4PzS__PydVGJv~X8q>DLv$ z6}(!p%id3hl@Icp_TqEiJ8f)u$9DRK<~f?4Ya6XEMQB_z9Rlad9m`^ZMPxTJvB)2O z^r!W|=v;VMK7eFu|4gq~g}%3VE5<^Ne#Szh77n}Ov#uUHcw!)c&Kbm{3OZXU_w6gA z-FKP9Jog8-pgW0JY!J7Fu7!K7mb{g0q3$G)p#t_k-Sk624#4TGgR#h`pzRSSi< z)Vb7i-KMipu}r~?1-AOOc>i$;1L_CVb>#OS02)_mxSQX~aXGG%|WUt=_%_73fHJpD}i zChcGQXnS7lPf=-=Be;s&hDD3JtyqaJpWt4UPrj<>=1JW>dEQMUb=g`)G%A!iPHwqW zdU;^|E@T`FC?XpuO1O1CzZ*39OS+^bp008UI7l75{AOG)m)=a^R88_4Ke$1;m zOG&!LZSF<`jJJVeSEfldA~ngBJCiq0s*PQ4-mx>Kb_t99 z-S#Oh9q-F2UXr{0BD&vL;4n+u*y-oEBO-Pp-3-%xT@3|{f~ymR>Bv{vuNR%*oaO`5 z6z7h*7yV6J&a6AMq*jGI7MdP?3H<1yYTcNb%28PQ6=4gys2hDnKkaGKGrTSZR91)N zl2gN%l23c+_Gl7N=$kI|gxvi<$I9*B?U8_t1hOnf#OFdH9->g4sp%m4)SiR5Xd-zA zXO-|Yl^z$hq18;9S9Bck6SKdXHX?3{hPx9+Ki*hrbMrBfk;{gq=u2zv0p28yt*hv_ z&B+TDTKT=H92W}VqC!sxA3Q6(YANit~aF~+L z=w{`v=jY-8vj#fk-;X%yt-{yCkWR7>E79e36B667%mPVBBvzl#YH}0tZ_{@5!#-GO zDVG^McRSHW__Ld$wio_U->EA$d@-?8aP-ERQ3Horphr@KxVi&h%u(IUt%CE5oWUv? zn9Avzl^nb3Se_*1Sp0(j7|$$7mu12W`JCyN_@znWD!Q6_eM#GSeK;1}Va-&Nwap7WtCZS20m^hcxL zFgNrX39u~JHL-y zdEx0q2oT~{k|AF$oowVd&*Vgkc3SZrha~}UmQv2P?Fkq&znw(k2^=erEgodnHO+?c z$jb=H=Ci&ADXrf~s##;CC|;T2N02L;(2*ZR@064_Rt@u_3o2unvyXCc%1Wq5Q>nq& zcaa97OLOX8dMm}b-l17nZq&i$_`-suF849^`ZhYEj^<524A;1+-ZS_CIf zWQVAc$UlVhD{S6q$nGB#XZ%T#Ifx*huVAC_sQSqGAo!8idN=NYqOWxhEUC|%lPLSZ zkG5fc8=uvmEyF3}xu{U%PC zztD_QDrS4+86qI`ScMY_{B)+BnX*ZEr`DRyC!g5QCILWv>$y|-O`JP9u=pcni zh!rA}v^4Q^6-JGK=SKKXhD;LbjP_*)JN?0n<4#@zO`V_vm2)H)tH`bGzH|Px7MP5k zSdUO(ogtXBJlVV7L8xg{3m)|<(52`|Q-w;=tfnkg4q*s=-8BXyr{xTSly^efZXKah z@7bt19RR?+mFDD>-A+Ikpsj<~! zzMs-WAOWl`yk#Th5HdMbf~0DYUvp$Z>GDj&aPZRd(w!v5307*VL>5RILH0S*F71oe z+raAA(KIW=dQ;2zNR^E=T*SS|Ke2R*! ztsP}XV}(`jByMZGbUw~;%cLKbIs)kqVW%Z2xqtBv8v9B=*ee|Kq%Uwf+iI4K<8KGB zn7>ZgB;XfLu8BG_MUp*3oy{~Q9G1pFA8a5^R}Wq)3I&Ga`VJ%Q@rEBNcePWhAzJn7 zD*Xgy#hBVNx?*!N-X%ls>IeITEWczh)6=O*5hfHge)JCb?Bmb5VY^NHc?oq35N%POGTr^@B3$FzFs{l;vsD`*e;uHsAI+g3a+V@T zlR{4o3U>6!GMDOweZ2;`jVT$<=*GS+?egykLC(xxH8X>aNPtVf;`t+Q^>X?NfFkPN zVu#-ej~t2J9r8xLQX0Jxe4H-$S7>{Q60hQ{D>cjH1qV@mux4OAja_~p3kg!;`$$pWB>MQm`y^4iiDygh9Xl#+w<8-l+)z3yb# z^OVKaRNpHO%Wb`Ci}1d0<*~WNAMy^1ioYisZiVKf%WfS)X^79I{{bv_8#^WJsL*=9 zHQojMBx2>nSrFvbQLi^ud6iE+p|fY3J!h??{F(gcIJ)dBRdGE(=68dy@7W*Q1Romv zKaf(|wR7WR(`HTAY88_w_=Z$TQ!(;9tjShFmGBSoN& z+nGD`XZG1U^r;$NWB(OiZe90^D(K4VSpj7ir=kfXdu)cmF*=2zV1ZoZaNuI-?#x$O zaZ$MtZ1^V6bWO`z$s&aW6uQlO5(|zdGg69M!Q)2j-%Ez{wm$ijuN_ecnKoyD5;wa< zb1qlNw!m4!=6N$RW7~f^qncx;iYj0K1FZIh4=;*+&99*VP7K~dG4R#)n=6mJL;~xs zkJbj*cBbU>N{npaHFWI^V`f)7ESBMOyZFh?R;~L-b%}GZ)-128cSa1n3-^b`e*lrb zH;duXYHcJ_=z1CHWhr%3pH9MiVDrO=Y|MWNmD#083(lP&DPO$i7tD--Z#m%q7&w^}bIr)A&G1FFJIE(GI#X6`k>xW^7pBaftFAeP^P*c3R zGch3IM(-@yx9nL5WI59e{?vYVteywNWU1p}nwv=P_HP%T?lE;a2VAVcYl$oD0%+47 zi{92~HG;&XJ+2=~OzaOrxREw*oi~Rsmk!UE><1iDZ0ecB=gVZ8!*YHIS!8U%8OwWq zyDy17k>A&9*ZAhH8&pe1b>zCw5)NLsK)lMH3T-08oj>VNw-(Equ)QaJbz5cQI%F%+=;M>(-9-NVocNyWa34m}cC zXiGmK*P@a_^>U@GCmD;iDdtyAo=*YLOvvdYX&uvTAvNS=1r(JxdBoHMT15J$@!l6={DtmisM8edD+gkOJE3l}%~*JD ztS&{aw3{OIko(bKjur{YLigQJbkBDaD4ue5>EHk-QY{1@QS^6pHj|ahV6nHcbCJA} zWLI(PVe%rm%sqeYVn^fmQ?nRC{s7iO2dV$l&rqG(pB1(f_aKNT5$k|=(&tNsmS_}5 zTov5uh$;c~_dDLCpUHh1g-09`e>BivDx(L`4*V(%x8*4e&{G!7G_05X=k`t*Mdvs9 zHgd=TSUP)nPq!J@tZqA)M$eOQ76R*G;b#$7Mj`fsW?gNj&Ug?~)18_XPcytS$jgHS zS&08Xu9t6nxvKZWYrVqFa$8KwRm*^5*Hh63FTp{&<6bEX^}>WtRbzUuF^e&QE`esBVW64+)?sTcv6)7<(I z^uO%@kOp2>T%V@ z*0HyQEt$ZGY4@5G&(Yj+Ec8x2;YE2b=1-=kmDdL4`qWpahWB9C*aw6_`rgcL2tLJ7 zHIXDIt^NZQJ{uoyQ%}E==ss>~Ud#-dfuQ7s?T!sa?RY0}7TJg#@ z-B#A3etfBqKftCf+?eNGRFwJL0cEr^>K3g*5(Pg%S8Iv2J|l-v)#KBMUMV>{=ah}$DHTn{)` z)0gMmBy}LxOHqu}0qa8@d18pLKbTdl&ncI|N;HebKD+$BsZGn3Bo}#DDtAvA@mEr= zUgPB^66%jdesQtF``0m&77&@0YG<^BI#YA8u?QE=Ho394Xa8Z7Jv!t{d9gq~(loR2 zK)VI?njuBjjHGd8y;Y-mx~6dK-;|K76gjBQ65EnH(f|qetD~@(h+;h&0TOB$m+($x z?~5e{AS8Tak|LpaH=aJtE-7JRsH&@#aE!uG%&oW{Q$6(Tv-UGQRS+gqU@nwM%htEh zBhSA{Oh?U{oRbf&QEu8Lrd-A*Ka}AFmwO4kxUY61{+&3}O5IFbjWn7N)k7sfXv$v@ zunfmQzxY{o$pplx)KAlcX}QqSma!LUuPInZZz>Ka{*5u77%n(XuG{M-Xu86Oo;2sc zQIDTd$vlevTVQff-T5ehe3p1b8fs0#=!Etg zFruNJg(?Hn?zbC6O=3lyG$!kaqhzO-hRAjR@%p$EhtEL^7}a88JO2|mM; zXvbjwY9-IPpCK>BY~86zhOCPTbE~Df^H(=g9;Ez^#IcZ#8Z+U0EdiEh_H* zC==F)}(gvf5zYCzd|MD?IVB#Z8J*0dL!@bYF><5jni;r&Tc z4^~99o|w)BZ0*jWS(lA%+M0V37~xHP%KllaZrP&}j$FEbdSw9R^SKfYkRcr?P)*u+l_zGE~#a%o1= z!h;*V4P?iI5or%J7PUyn#Y-K|@t5p!BXJb`j4q^3!szt29<^wP?j*HgaXWzlE>F!) zTIAuy;)=pQYQ_%n>|Lx zUi*F5T650FXth8;oTJB9>!7Al#La>3@|Axe^|l`brurmeJAz8vbYo&fI4)naQGny>YW$0ihcuLj?}8!c`WJo0V%Z&yhoM;b_Jjd0>WoJLr` zM?a$9!L&uxeZRRcPn7)amon`0b)@t+twWpK0prfZ?=$WqRM^=P1Q^Kr z%9C$anWON@n-hDkj>OH`UDf*4(R2|BiF@1Go0CC2*0S}qy%q9Hn3W2B zOm!h@kaz9JZ;Af`w62739~8snm8Rf=tZwBT*4h)(M<7z> z3cpL$?nk22ysZ;QDu&x?{?ZK^D{>f8?(yQ@GJS-Ld8iVS(ENdrPZm*$-LPF^MBg_zNw%GUKKeyL=KLN_K|Z+8V}jedXni=SrE@_C> zE8Fz%Zc2UE^%T^z=eZz*$}9hCO8RglVSWV{jtoh|@Gzj2IA|S~QG6y8MGjOXXJIB_qLfC_m{L- z*Fv*TI|27+AbEMQ=Ma&eBcqh=+XFFCm19|OM>m591$ z{&^a}kd9y$=XauQAM_EK6<&O`8MP9TAs2%;_AiZQDHiwiFIIOaZ-xVHrY4Z$U!q>J z4jZDdbQY3+3Zb|w?Z$Y;LaVD3t6v;qm@kW?oqU~5FdiXE*M~@0tSu!fazU)|zbh`D zuU|3yp~*javXKF)rl$LDna~suwW@|L!?}NNho{gG6IsE#!$&)+*4i(vjlZ^2t$s|- z`TUFi1j%}O`;&)HBDOG)cBuA=XUOltb!7 z!|&M`#aa_wh!4;~m$whe4;q072g=7r`Qn)VUpm=A$?9s9-i}_w!Cz@=c9K z_q)(zVputb)1WVG-m;`BKnv_9eGZ`vq0xOy^H|`VCBC|Kg1xgr;I<8_{h}-n$F`g1 z$cp!d>)e`-=YlBFY<67qTM=@%iwAl)@rKTR_VmAq_e*!z>|16S@2iiV92($Z=06z{ zEd^uA@Fd5QKdg7HP)awd0>6a@DD^&8@~j#D>*3HOh@}g(?{{{W?&(39xT^y>M2@j$ zUC#GEi|#BBec3*eRnQx~uYa`JKjnRd$M#cwIS)qJWJT zPGJr3sao|K);N2pQWNQFAi(e4vOi$=4PK!>W#8Ob@k&9%az<_YeIIH18r z#cr7_JRo((Rdu>ngEeaWLDT;M>~4(5-FnZ3htR$mmjyeZfk)KQ8~_YMAwKg>(mm#U zX49smHbAn(QtWrnS2;Dxpw*^7-er9FFwIP%-J4gnTEc{5?^=G@%_Do2^ZrJR8Uf1mEj0tYul5@ zmvo?+2fO9T=EXPjkhRIn5@49Hm2J9Q-FLRqwUjTL02IT&k^KZkBNa~ZY0-suL|N7U zydU%kD38CG{62qXi#`9PgK_;}qe#rtnU2zgrLu*^J`$%j=RM9hphZ%}AFZTV!8*J4 z>yU(}f=-^%(pm&~W{S_>DLBhK`0=Z1KUgde#u;Us|4B8?3mvtZanjmR#~I9a7d@IJ zXzjTcsWR9XocU5~g8*|Hz{V+ZUwqlOBtN*#C&JhFCez&6WL3G~A8>#p1q_JLOym?K z9FIyE2(n$0wKy2(*y!@tY=1NAs6(LQ<4PY`s^MsI{y3O2NpwV8Nt>|ZXLETjMCqe8 zas=WinkS>j5=LpX{a-e6;Vxn{3qA7={7 zCa6!3B%VnF;#SHLx%Ezo-@p2|@8F|sQWMKdRDLS5PZOT3&O8zwFN>~dIu@yQ%z+gr zg1NO1Q)I^{27#-pAjxg&mA}5bIC~{#`9j1;gv}R)e8?wfB6ohgh432~g$eOvEfsm+ z0Z;1zoP5vfN`=E^29P_GajRiBP)zSgteW{UGVOv*zG_D$O2?aA(;|t>%j>e-#@tCv_$NP!Tu~*kqRQ=sWt^bs(V|ySL$7LZl8nYAwS2 zs2-!zM*ooP!J`0;Mek@D`?g%Pyr<52AwH_O@kzkSV9%2tq z^89tIQ$VdTpLLt9I21X`-q?bOS4(to@W-Cq+1xwtx6ph(h&}U<-HbRTTfR=~E`fR* zH{f8h1BEZSb``lz^e3X7937#yYr8DJ(mZjD)uJpG#jSgoEEXgVE zBdYInyrn}@5=ng0E^4J%2nytS$m&IWka#MOa6v3muY9DQ))2 zpm@AVfnJ0&ujrJXk(E`WwnRZod5Am>Jo_Kidk$WoymU=JJ=`*gfX=tXLIO=7;vHrd z>W9JujWR^LaXlrW*}1u`o_QD-61~Z!)e0sK5;X9P+bmKr_dL@qk{1~aP}h*MIs#TW z0G{rDBN+!uX#?_(QJBJDTxm+S6H*b86HQC!BMzYo90gLB=$pJmU2W=7J?i!$Ca41L zoMLLukF(D@(W>Mge(DI!V8|S^ zY3$Tr)Hy6xJ+rE#@ZqK`6CY*m^h@nHS~MVJi2oFuWt{4ke}r%UbSR)oFnxE7{zTD~ zo$wulcgJ^7&aKhAOzW7mSic{oFcDa-35%9pURS2-Yc!>@6sm9|GKY$@4K&_SvS3@s zBo30gT^8-_IZ-skZLgk#)?wTuBU*b7Rc<7(@-NeQsbv?rB7NaCGVI+tba11_oFh#f z-N+IE-?8Nwb)yPN25)M+HP3DCwmJ_%Z8S6-{}W}Yr>wL=>JgB^9~QBE;H-eEE{a|w zLGrH^N8x1(?yom~#8DI5Hm9q&^k;)1mSbi%N4j}F)IlQ8#c{ZtUXZ$Fz+3qd<*vQ1 zn<3G1dOoMj$#@SWA*=>LwVpS#@iPP+S$B;$X_a7F&aA`!@_SQ~g%HbX>Pmfz7CGPW zYrCu1Y?GFeQ7DI|wIHn1T6FKFgJ&tx#n#Wh8*niMaISoQ#@!*>6L!Ok+TSwBwDj$v zU(@Z#=qJYiSZl06$U`~GXEiT%dwOm3L)Nb_zFyA4R4D<~3ZCwHPY<)NyT*xN08ACL z&Ky5@pl7zwet@^7d%^#}(IPyXmmTy6^_BHw_DksqN;i^ZqyQ_sS4yV+e*pjdan{R| z&>*=Y_ex|eN7EHxjnK@%4hNbzH=h(nZj|k>mTcZ_>!0aNunmRkJIhC=hj{lkD$!=l zm6;v}^?!-uQj+-sN5Up;ltLI{$XQ6HKdrI9M`ROHQ4U%raNA_{@Cu}szRsagkK}hv zCp-sOm=R1R8s`4PVIO#$Z^qkl;z%a?POaC~@%2POM9Wg64EqB3+S|xg(57!y-wL6Z zJ{lAvpRt|$Q^uio>s^*krg23M{HiPBP-Vn{`O8uQS#q!gE4S~JJyu`*0<(qjw?y9K z#-m<(tf|6R8^B#LNX)iF4GprqAu!)=eO3rLm0Jn_6y;t~)Dp%N`E*m8An=)?PMi?C z@SriM?=8pu$2;ZbN683QsfB5lBz9s<7;b`dsm^M(#$51G?X)2B(-jH{h0Hep-eo=$YZzJ$XtP+7YO- zK`1*3-EyAd*AK7!e=21Iq|Xw%N~{+C7{!DXjS`7_2wr=`OzuwoLj&oIK#Pk`X)5ol zcLZ`xE%1}A6>i2&QlGJCiULDATDF6XINR9M##?)^y@3SXQ({OES_jO9 z5dQ-j|ElsIpc*?K6LuZRdfTTqlRli}+e#j$6VW+p)EK?a;%>Z|2ff#PDm1?^ZF6L6 zYcs8>aG}|c>3_0S>As#nen$ygyNWGI=lQWsL;q!Be#eE$l31a5El{AKS9mRXLy3$> zml^S?a%S`nwjS8Eer(!uDNuXAANCOa573_IwqxdOKdt)ZB0K(pDtK|FOEPE>n#;WX zj$-9p1#C~SU@PnP!WF|Ef)1kuZTi;C;6-u_1Mbdh zq)}bf<>{@>54!J<-Xzobl|%4poxN{fS+zfLuKyu*`6}l14%59EOd3wNU;ypCSgkfz z{R;L{#hrbUoUg*-nTjxxcWP?iB~OSxF1fGN!igPpIf}uL=)*O=c_#b^D3La~);Q`h z+yCqjF%;=>>P7#90yaniv&xasdoNN<3h31prVT+6+z za$;(MCiNR2``BK+cPVOZ`#UT~M=5ykBW%VeUX^*Q$?C;76Re^(a4@X)n3RiAp&3mW_o z&6wtVz*sC@=sRh6?WY8gT5>T>-60?B*!JN~KEx>staX|=5{>DRkiXHlvy~sYT_x4%PA5?DF{`b2ypUE%Vv4?WuTt_q=z&z?N2uMtj@n*uYN zs4A4@Ti6OwIBVMmu1N;FFkH%>}}8mQ@JT3TJ<^;ugr zl`@fpQ^PN|*@Mm@hevEXFtOp4=*26d)$mn?z(ASr@sj z#~k)m-|DJrtJcWG-}VgHRbkRI-A}+b@ruiL_}1N1NujLi%`O6PZq&JiQz-b`C@Yz) z&+dqhHJeB4;*mSAw>juwTrGCctbe-B!z*W@EI0)y-!f?-v3n0rtPH9WFr`rj%53ylpBip;n+&Ah&x9uJnG@x%9cu-6qDDbGynwQL6CLp%yUbS4GuI zx3cbm|4WzdD`=G4hAzY>lR% zbzdU;bOXCzbmdMf*vt3rbY1);_MktC{hcbl9Q{+CXmwuz;`}+?he#l2SI@n@q!MU* zYmx74XoG&uTxU?Q5hKc%f;)jeJ^ukd+{KM|JvxP5p+?r=Z{BBUL}BCGN5o+Z3ezM+ zTwIveI~fr2nIAM)SnM=O9^4l)g(^MIy0*+OSv4mf-LJhTH+%d${_^W`Td?|cqV?!Q z$oouJLu40z#8q$@(9mQMRj@%azuP1X9}RKmhJxY_STp(Rvvm-AcHoOp*Ln70t~Kj-`lMzLc7fNWIDI*7n2%C9EL1A8WMv$SMMva#ZrGb ze>U>Av$U%h@^2Pg>q^%jBjRy=_5A*9r`Yjwci%r*^!RD#Yvw!O4y2pPjG$HHe!tO< zULH>R{S-569Y#7*6B=4`9HY-H*(PW=a{eSDxnm~t_~D+@i2)5fRNb3x-M{L;-?Bh` zX@8!GqR|^{}Nkq^@u$?h0;UB@u?uUTX zSGDJHzg01_<*GOq->UCef8}9+wAo0peh^RdCWWO&%%e+x^!u4OPERh8vf!qV@LAeQ zm-Eub``8=Z^~qj=q>`L-aI7fEFy=O9&TV=BJlA1#TwT z2QD66`lJKK_)oD#r|>5?*HP?l4F`jEitMq;d(RX(z_QWe6(hz0>T#W!!}mpc+t4H@ z2LYlr>#0B*lgoE%yaYOk8EJRk_z)+ZumU{yssG|=wFE~*ONxTu%Yw?}Ao26HomEb6 z_$mIY30uGRNX`;}{N%t4XI_F>Z}lqW$zFOevC&_5{f6r)>s-S4hk@xy_Tl6<1Zdtf z=TQF#dcO;`nw|?KjHWgV_hx5Im@fj$YEZ&F))ZM=P4}tdUx1X^G(J>%2_46zAKH(P zqg7Kv)_fwb;2p;CEpkS_ds2BZ)s-QHlvE!d#HXy_R~^%I681RXfl?A4w<@*I^gW$D z#DO`TG#n`{JUlhq%>b?lvs|xpznWp#PWO)XXg^%aw7XpKXycgh&#Q{7o)HU~w>rDv zYc?8zM<0V`LrA{7SaJ#rKV)CW76F8!VQ|5bAW<2&_r1`X^;ohB$&2f89BI@~)hY8! zHFV-m%t6HP_b*}b*vTX5m2b=y)s9|pu#FnqNC+RkJKszj%fzbnb{LwEM%og3)MrHA zi=27HVL}JM#eVhI2qG66b+g^vbwLvmlFY5+5Y)0!s|3s zItnO8Jo1z5@rcYfY7}@eyO`c-C*R_g+?8=1!lBTU$Dz)sX~nhZ0x)gM>vs=S6i13W z6dDoXjb_T!>Al2gMONAZWfOAPx4juc6&GW1f{7^Z@q$xjtvRU;zLr3fGd0E5~A4fzT79o#IsjuFK zDk6_GVk$?&{h9hS&c(M~K0{2Nb)Z=S9L}#3If}y|h5sTpmrmwIOoT2IFQqegd?(1W z9byIY_@ipOicShS?q1R^Khy-O8>}T)sNjel(+3lABy`kcc|_~Z{5xboh?kbGW@c(h zMd9r52EghyOu*Vf%zs{r^tpVcK0xQY`KG$M{Y6XJY9pAYd3L6BvJ)_XbgLby4)Q-! zPvG}j&qsClTi|9b9b3Tbjq|pHnkDK!Ve_Vy2>+MeZO`lzb+bnyX{g z+9_gF{zd{@&3}Ku91Fa)6XMb#Finb0EVVN!}qnYP7i=&fyYc z%0rz|ah&AiuMCBcrmDJ1magi_$6xM^e1LcY5Qc9w`XILd0lp1Tw0Ac-D#0b;NO0x$ z*7SwwK?dq8X&mNH9TLc$la{X-<8=(q`tXq@@2AhQU@gM`n8^J zutPlVW+!3gUoi0}$WE2T<-P?O931H?z)s-qVmYJRr#Tf{gfk*)9d-1GMHO(obSgwX zmtHmRwVoU*yZ?Qk^DF_H+cH+T)0mRxZ@1BNOZZes&*!`f2`1nwn_{NR zvk?nUE{e3gs`Bg${md7h39!;AFU0C?M^K^I`QpvxhPPJxS?a>Dq9~{R$;m4o!6tTw zM|aDSaT`AmXk#LEU$`I7Wvo~YXLW>lThd9@n0Zn1DPtB7G3rG<@ISzd@TjNP%cm`$ zW+B8%N$!|DJA}O=*rEHT=V~Lzj(^OBzIn0G#rGe;x=cABIJUbk$tddY@r38&G}g(T z5DtWtG!C|iP@MTH9jt6RX)QOQr4fBV@2J>VxxkUZlQh9=M9;qS>fpeXds2+%TYE=8mZwj zz?h;2mV zZQq}r+aRz<(s&Op0kRVg9v6J*n~gUhDjG#ReX}3QHdxM=3$TP0nGN@?OhkVRT|Dzv zbL>K5Z4W_9B14S<{x2|lo68VZeN2M2_e^IxeUJq@lyt(A-1}VopiCm)y!+;=t`X!RNg8V^)tt;ZT_$gGW)@3_<)@VGMtRd&tHnTsniN=31HxZKGXIJA0-9c6OyAtlJYNXVhkcNDM zfF4e|UYke?DNYlc`#_8UNR@6{Y;OjVB4=&A2Eq0MSE#_gX@0r>sAHwRGHCFOJkS_hwxwd4BXAUJOehi%5k~~AX66tDcczpRb-Ve1LY?>ZB z#BAzzrZ^%^6?5V&n}GXEkM*!lFu*TIIsd0_&1fv{svQPMpzJ|(mIfX=UB*ITAIlZJ z8A;pRrgMQyPb$*E{&u|xKRlx1S0i(7K2qmB8HmROvQ&@BjT4d|_xI1`^omvyvJSYj zElPj?j#XsaQU^{z7mMaEW;fQ~f|{p{Sx^uK;e=t#rX+tm$=(KNeDA7_dv(1zTuvtC z#|09Uoi*ld45IA~9I*Ze&YuXG2*)#uQ8RtzwU}s{aHMNmO055E=eo0Z$yP2!q9zrM zOYowr=e3jvQ&AZ?3pG9z8Sm)wh0pD3KG-i*&i4HxODLL?rx%y5LXGd3A^%#yOphQw z>2@H!*VrKNYtWF!TU?$f?Y+RQTBpfXlJegbl<}?n>cdZ#Ja_)>_sYp`o(UDF*N2szzpBPawF{Lk8_u5@tqu-kcvLxxw4Yq z4HUtK`@sLB0#X0w{y!{G`0Z;jq5?P8Z;%+}?$Grlv_S4sXHyt&1exy`amP8nb-RXAlYw}SEt#&w~9`Vdh4fk0vB2Y=Bdugxlx=7=}R-xefm#sRddhS zA)ug*Dtj<>*gLa7Ps`4tq+F zS){>WNvZE#;&qK!f7c(iGwicrr3#i$cb}V%y6W;?0vFM8qgmauvn0RjC!wew5M6Do zL{l~!ED&`lSev$@O3t)P0^jmp9fpi+aCb6*$3&!Q;QMiN;&cT=gKuI~mE%&c*_H;@ ztW?E$##f}hM_Mf_p)MHxhU%(3kjFfW!D8=253~zuPcU2d*bqvSVx|lixJ}ICX34ybJd1th$}~^9v(7- zop~lrfUWEpgROWDm(R!A7YgyZ(^H{WpuKYl>dkP@AZzs*ii6MK+OY_~FjOCG)d^km zEmXG8G64nX^!H-jr?$F*mONvzs8e zWF8m~2+s;-ndAL~Y#TrL>6|?V3qtv-gKC^Yh@}&#keyfo%q!(+v$ljL`Iv^4T&`Ly z)O?3Tb{|P`5R-kkS}7fV9OfUX-`q^xGA1P{hfIAa`V*R(YCJM5&dhLMj8Xm^ccJ;4 zn5=mmzKo(v7fGRe?clcfJEFPN#2roqJ-Rq31OVVk;py-xGZO7A=h5%uZQ5}pXKLoa zDTE7^w*15;N=B9KCfhw5{oJNaq$!9(7gi1eLjq4?REwg5?rhP0Y^>rUAt}<*Idhaz zF&%Y8&h0tS2Ehvz`LTajpvTi?MpdfCHfPcrKzf&Q zer_^PUEVt2fOm&K1RYM;60|fREn-5G@W;hUSZr#fxGLHY9fI7arvYSfUm}{r=qHr; zrRYy)Ng5-nZ=t@K=!l0pJv{uw!=0s$-IyMiA8L#zBF*g=bvI zv-9KkTVYuaVTnf6DY4&A)pmle$~7W;*V|bzI4c~&EotByrBc9!3%bLdfYKSgSExQc zmG=RkbEvGCR)(@L`U&<8Nkhp*ar|tU?dGC5g%4Hio(p_ZCJi(hjzgRXmAPDtSS>iU!J0xvV=!=il2y19<}j|6zF>R9O!k! zA@KGB2h9o3Qv&^&aVGOd9)H67YX^^5XUQP&ZE)ufKapjdWoA9~nj0+w41yL)o%2MM zbBcpj{Ns=wt&B{-MzLq3xN&+SCyfsORt9))etjWQNA_Qk>>Z9!qN@f8K2#N8rsl_8FW?tT&OdleSf9!KKUQOb^|xSbD)5N7tJc7Rxf!_+pb3O zxumbF!R$gQ)$@vI#Qjlk?y{v%Xie{sIy4A#3wGLEUSWO5pz?E|1ij=25a_u-`qdGg z&YtBAd|VZ*`>uab13pe@$$fy_`RDJxWB>0dIG3~->v%q*=Vx@g@M3W9U6aRaV&U#N z-pJ(9qt~0)%?|sv*wkM=17YLGYvLDhJ!qDpADdR zYpQ|UQL>7pPr!Xkki`hZ)Iv9HVw}o3;3YsDCo#atha&TcKLy=q}^i>sV+=j zpT`UwrfZiYcnn*OG%yDH{{?==o%;ewXV}E_PeF-)s~3Mbi(1?Hfh zDs9ObAVTep;P*QHqWl38ClnK_EFW!+YN^FB9sN}ux+ z(S-z3T~ork|DFj)AUe+KyEe7Y`(HE6)&4GAFj0*7vXQ+;tVk?nAs5fACz;#XxznvC zhwe-oEEJQh>jDeLmRyeOIeYZ!7uuBg8Qm0b!}7&RgO!HDJc&}_5@=Ty80dn|`W*Ha z=ZOw zI|w07a`II0;_J&oGbGV$!B=uBA5Cw%hvv1QLIelJ_RQ0=;P4Mds=h}L5sM) zZ@S4Ty1Bb{lBoF?eh7u2e>5wZ-*@ZqCA~b~%w-JBq6E;q&=-m@g(a(`@s;j)7`~od zym!FBn%kxYIJnmVvV7}f&*;y8?yj0)LsPP!#HeGHEq^DQpRnhvo5JqLJ>t{El0H5N z=%41tmn6EQKKt)b-&Kd&;SpC2Ky&a4QZ2b}g3k0D<8SyRo{*1c*0;RLRQuBa_~7@h z>6!spNaL$GcqoW#JD144nyFdo1D=DqLmvTy-$Y`CJA5R7DgA(#SG)P{>fWq!7{{n5@r3QV%AJ^!(|DP@Bwp2gzWO&Nw1K~_hA+v9oi^!A zVCsu9`%CJe!+N>juP$jsyw2FTHAEhzLotuM>CQ$c;Mf>@4i_+6yD<*7L{^*Egp|wE zTNroC3w+5dze7CG_#I`wM3dz79iC^qNN3b*C9Lo-c^Hzh8hz&uu*}@c)lu0Ou0fhr z#5hd!p)6smn_fT{O1#u&HiYa&4*q?P2e~v6YzqH!$C7Z{XF?YRj`zT(F~B z7$>K-E6L>rX=@wV7dhcL;KYuI`#ETte#S$J+>Ra>w!N{oRCvpYe@-2EkbDd4l9Z_T zav0`Ew!A1ThF@In^6;Cp-|f_sbr8I}$b3>3co$;}`mhsHH~H{uQSI$Sm)7G{BqW~R zMO?)#z*X1m+v9N_Tt=)3eDITQC3j5{IZ%v12Nvx4KOq+%P;GSe&Z_5Yq5U>s5Df-M zZZ!J)tyMlymSwqfPL&Q_zAk@wHcMB|ZiduvZWLZ39KAX)&{H!JpG(`O<6%HKj|S07 zxEx;x4poTKUPPZam3B__LW?w(aVapSeA9{{D9wHv@^yw6+n%ec+*1Dq;P_<^)kVUm zytDEKng0~al$P`k-blh84eY(alzxSzLqFeaXn>CQxXgx9i2kSiboW<=7?gZ8r^UP@!&YCx*}{j`$VMs2bazvKf!9LV=w7^fS7_#P8VT@VYDG z8C4qnq%Q)-{rPE$WeK-H1a6d~5j@~Wbb!^*;C#TZ`xF<7{>r^?ikfu)^yzpUvf)fy)`de(`v@%D%Lse81ZK_K%zD6TEl zIx>aH_vJ()w^{JGI^aaVEe78?5Rdje-Q9Z24{8O|KS&>&z(omt0vJ(Lzhaom@}p$p zuhdhL02Z@VaDY1-zc^^p@%qR6_0Uq{Vc`li5nluk6$H3>4@{|C_=+TVwcx!^b6$K8 zS32GdTr5C~P633tdI>q{Miw)9m*YX1a`Y*IF_i0E6>Xcj-JO6b%zmtd$7-cSg*g_C ze0Qn9Nxyg+VYt@q(q;ri?rH+_145*vXv8S8RL4D0eP=(n4z$8MT$DLJ_0&Dye=2Z2 zY5FO!tdiDl(aZ$t zSo+WL(#_?bTJ`&35j>>mp8GWwmv?wwaR=VBM=MZBHagt#i~v!EWAMli!vZt}Yd1}y z;~@zahKT9K4rwG1VQN-I)3d&tCh%zFKCe_$wAnC;AB%zVVca`UMTw8s8gp|yJgiJ) zjy(=yCs|s}0|jii_%H%Dt@v@J=*btgkr?o?ds{s??HZWJibqw%e#|19VMHo1V z#W<{67BIXo^Y@qmg$edr^5X*qLi|cORE_$+8#LfY4zud%p z4VkX4N-O4-;w^;9!IbIt%PAfu{dk%of4&i}62tCs#ocJ~D9j=crD9{1dB-21l%+@v zpG&g%%|Y3A>lG9q#c{ObX+Tr_#}iMLYB9cE z{garugJTRpJNo=_hZSSC0T*g4E&AiRvGIt>xAR*$5+3GpuqQ(Twu~IL^s`r=Gd@dodFkAk$)DRStupThwwECG$ zNQ9d7`TOmCl__=K7W^(46O9aMrZiB>vw+_1rN@bQ!r<%q1-5bNt#^&f3v+GVb}k_p zP59q$HL}ZlGFz#W_h)=@X6G>E+i!d^<(xytLl7nEEK7Q{i0U_uO0B||V#ZTRJ4rym ze7_{&D`=)1r$3Bj!(ecRTbyS;uv?rbw_4x0I4T}k&pN28b`9IsOMR^o8Ee8A|FxRF zx+#LPrn4AdphVeNi%Cdn2ogN>p?uc8$k_YI|CxI`v&kh`Xh7xU*EFRDW$s=_u-}&m zWxt72@WIo<2jcBA7vuz2YkG@{x}Fzmk)jWe#hFsxgJ~Q9v2e&;4Q=oMLzcIVRfrc> z)Q-52c=zCoU&E7%vta$(kJ-<_K0VVX>!5{BCR2nL$?Xgmes+%I^g1*nZwCr^Eq9m! z4V)0kx3sw1!Wc6)z+gGTm%?NA`K|F4CJM$gE9ngo=-z6l?Ea}utMrelP*}`Q8@~V! zq~r`rk+5~q6||V(Snd&CeO#2BxX|z<;y21|Q@wCAn_W8nm^|ttWdbUE?OEPHdwwvkT=p!dzk3`N zUz->3$e^5Bcf7fgOxX7OGuq@nA(>NuiF%&G5d!IznewCN))~~~Pvyy|MRsmSKs~B~ zs=6JeG|`=8bP5X;3$ibD8dMrHh^&Hg+I8CH=_c7t(!&^EH&FDfrq&bwdUC1}^Ovkc zQK>d_z_=*rHYN&)L^wH9R&Q6n92e-ELk#X+0PPrpfjWz z`{N72E6+^+jOJd;&imD=-~Ggs6~t}vHo+)k>RYY{3>Z1jL}D+XK?feAYo#1cZ$@l*Nvcj?7h|61+-}|sMeG5=6A%vHz!BLXaMT|)d zDWp5-gB+!kTC*@SiejK^|8>lakaLKV!+G!_tJveH3b{N~rd5GF#HZ?Jgp4}5lliS! zIGcj`UM(1N;ihFYqu~w@X$=mH*!v_SY($_Asu+-v7GOwB`@hEA)?7vJ8vgTu9 z3`fd@{bmUPO^FXX1A)vh6DKvn1CAq@L?N@CY!s54)BmW@AOG*V*e$Y}gV;KuZ5fi> zRw3$r_BpBjR3AX(7rI_bPcO{T8na5o+dB&U`EfRkP1%wZoj2Wd{$mQSPVD;65kb6= zVM==ftul9z7X^B}=6P#igKD87J#ExmMV#!?=M$-vAzEXkCCUMEPW%u(n)}~>+%Ww7 zs9LJycoF!kvHjenNc4mm0I=P*i0gRSOCbF?fARnw-~xK2&C`^hK1?N)XUL6yj_D1FJ=je5oj||E~KP=2zI7)LI%N~ya5z;L67B~d@b}6NMPelc=?M)#EPAxcL7VO z!voGwjElAKHAzdtltOu(pi!n#9X13% zVBy3N#;u#@SCNR#&|PjkHumXGTr+J8quiC|L^?IT4w-XoE=+IK?|J1v{Ja7=uTd z=E%6}crZV%sYW+#00o6nioVEw#z^Fi4X5zPwWLEXk5!mq>XW>OR0U>o>OwjE9fNji zcq39DRK&D{Pn!i?>Krg9Ji}W!hnc+S2Q%s-tH^s45)nzE9XozW5r7xW>ueHX^bzQp zR$Z+L?$BD0CkrVh)AqQ@&iO;Ul!Cfku5eq6P~ulM2Q_URt!zM;30v`EYx1Jd^Q()e zEB~R1ih|%+Cx`L_=SZ4N0PA@XDJlG^qg=ImhqU^Ox#$i*u1;cEh5`apa$kNZ{u5?BKMa(Crie>-l*?Nk(o5W}@V`@u-$?NS1>u&aF^Fy# zKa_dMDW2s9+^&QPT+CZZzrLE6@KVC?%2? z=JS_mZ4Hxceu8kXxm14G`Uf70 zGh;gJDcMEw5SnOyw8rW9*o9}T(w~Uj*?_A+`^pM#pjxVAR~qgARIEYP-x*2yekY{2 z7@$$L`zu-y4S^xNRB})H&u?-O#$GF2NmsI1v)C)pkQh^Q-<+X5X>{>)34e zDAnfXPP60ntLVOeo9{W7rEKO+2y~EVE^fNMWxVmaREdlY5-Iv{;$mOy;2#pz36|3IC{Fo3;Bl)%8 z1SmS^#JrsmjP@_!0d9M?Pkd_(>Vf_ib2O!8_O{T;9yV$6n*Pu* z;>D0M5H$ooWRuMBTfYv{vZw-02xi8f#VnfH>D3i%w-D(3Y%oQya>yy$p`2YjoAKXp zY1t9tTWe1q0Z9WV`oz9m{RfaFe=XRVT!Er}KzKCV%layScA-ZD;syPf^Z?(*NAEEt z>GgumzP;1$#MND?vK_gFKbj-zig{r`&XM_M8_iX|k{xTVKJc>sKzQ<3KNr!Emz_>gbN9|y*2awuJr!NFsEcLbC@A(< zCuT4}mES}6o84xmWoJ-DoB}epPGnH^}s06zK|l|7>NeXXJQ#OWeyv+rcmidSRA;8d}aV#Lmd4u#9@cHcwrK?`pAJTQLP{);6;b&vwfyK zB^uSxc#wGofzI;Jw220>eScfu<~X)EhnNuSwuBYb1^OC#fL(LyD_hc7h7f0wJREq@ z67J*`7TxOg)heI5NW{DlMFc3k+7a;WdGvfuDKkH8nzzz{Mm`@MT(io?BP8VWc zBY^DG6+WiaeHJ#MxjPpx(`U^37YhQ4;reM$2g_B>A{{i@?|-pM!O_})1u#UKgS|O@ zM@WF`B{dtbPD`ssH{Xm^5u8K-FfmB;Wn}^JuM^}&J16x0*E*||sN_K<`dv2kltuWAY z)BF7goO3(pmvbJ^>v3JzJ;(BLkfyeSGspWaj9D4$&lifh2J-J=KXEPnFXl;PBn zJ?>i$77#ks`%hBKHH3UX3%Zo3*mF@u^x?HR>!}ry1XaG0$}EXn53^~mQ*Thr9_rp; zLp>Kp3~b)V-R#8FMpT?7y;0cci=1mF>2nD}9aAD7e1BF1Z^$%GujIO(-H_gR_y?7l z!)M&Wd_U5L>t-q!*i02?PINdCHbob#;#O{)IP8_ORmE8XJFw+3`ic)v37&N^hn$!T zv?(%E3%%Dkf(=cztrSpX{p;UnwxaVK#$!Vg+FTgqT;>9tlp8WN#3V^0!rk&>Mz@7! z+d(^-4RU@|4xD>&(-D5`zHhI#(vkk6Y5y-Vng)fQ5sy*KUdV*Y zh|&F_O3{V(|C1LbwfaW+ZB&QPTtEfBA72Kv{fWjQa&l?ideg#Ml`%%*1HEn`-}3Om zQGA5m@Y6}5@ut5hg9W=Mucm&Ho$A-)@<<8GO`|YMmEk^-Ra(6=)i|n;avH&QX*>IJ zA+$BZCbRc!@>v}d1z+IySct*`NW$Qxn>nK{bEV1Wg~Yr!L8$CDD0DV@>-_^0t7cJV zqPhEVRB8^H$6Ax(^-$Imjh|Nb&edzU(Z!YI?^3!rX?@Qjqp-&B(7t>HpEVI8V2-W4 zWRr_!eoEQY6dT26uFB>60~kR}hK@DCq3vOEr&c*Lh4M~uk*8~NuKz#KuScT7xgQGgo~O800!NpPq#6m z_v(^67C^CN!lW%84`xVJ;OSW{+x(@@N_}TlXVnhW7_mrqUM~>D#X%~mT8T#&N41Pt zX4oWkPs?5tjr=qBW6LCh$-!dqZMjZ%*)}hrAtQDR9hExhq(CbRXukh~iDR`kJW3k5u1FD;Y3FI!La?Bs9-3S%{uhn(k#juH{hPrzL86?wXi9G@=Df?f zC%>+=eyl(4E_fsA2`&^l0ZXlYO6PWQ6Y{!F4GT}O&FG*458Gqro%%w`_5094Oi*Ob z9|QxPE))Ckz%5_0~8q(R*=^Z{_1FQsbPMWDyFNU$l-VI$KjKcE3;+44i3L!0}pxIQ4%3 za%Bn1ycpZJ$)LokZ8NKCtV2%8A9YcJQYPa%m_e*&D9G-(Bg6TJbnY! zMCu6j{hqj_qZ^1W-!IuH$R$;oU}&qy0Y1*IEiP)#-a;{m!3UHiJ%lQME4~CiLF-w* zq0t=*tNo(6dQW6~r7*|4QKL#?hE4Bq+|D_VOR?wIvvx-LLx_>Qy0%sf>F$Ft>B`h?#At# zxVe5k;(WbDSximK*3q*0$EJ^QRKSC6RR+`m>7A*P2W&yudU^k4?%%N|sVRuKXK3a6 zzTDQiEn2H>9X#!GDNBFB*+AFxqcr%q+ZJ^zW9nGXYCCE?@j(cKi_DrhA@L2B6 zn^c$g-66Yg;daf+aQd2XmG$YmSC5*Ee(Y~~l)g>$XY4uIIcs8BhOi1%@H@zQPH2dk zWvHj8m!Nh183jOHI}YCVlU(A+t*){g)!Xg}gJ1BxO-{q*o0Eh-ih*ZT8HwoD-bHfR z2G?EzeRo#7`x?D-FWfJv<&Xaw34sbKadW{sv{!ItzaO~gzDojvWdjg z)XMhj@$7ANl-Whul5#Q>LTkG)`JCl=?BdxeDOGMFw`!rQPHtM)%W3z>aL;d z+d^g}^)N1fI;5(=GvyjPUCdbHyUwv~>6M4Pk@#%=cHyBZ`qy-D{srW6*_XMldYOAvb}84Q1R@C;H`$w5+Mknl*#;yCRNN*OXMFuO~56CRQ3Ooq1Q)j2|IoL9g;SV5xd3K)VMxp`~7%#%#ndT1s zZD%%S9`H9DY><(Xs+hACDqb7;+T_BvtBU$O!7uV{+D%Aq;luL-u1@6kuk&KI7mYiG z!Ccb*%eOpJ^ARhARk1HWwBeQPJ9v}6kIt@Is$O-p8S{1TAo69|RwG9=t5`F zTmOe6FPGnLZqoLABHdz}x6fD=I(<}g(4g7be3qjP5N zXmF^XEd{FtS2I)OafuY%r-2$&7x4P2PEnVdfPaI&_WNl`NKGf!>%AiQ#-t1B!*6#+ z#83ylCuUNhAN@&^c{u5IVMM#@!wh+p6-trnb*<5ZLCUlDyS7!PvkN>=<+$@v6w$g) z%0XCQRddk9tdXHE1OBxVN`Lu*2g8ymJhTO=zq^t-mba*3ulndK4a4WG88kcwp@$yA zIoJT2y%-r_Lv?ogT{=1`p~Z>BCXD#-h)+G!LJ|~LeRf$YeW2J&+Dvje1bTE_!ZjAW z6>5LmT+Od|dPum=Ty@$&xNWGexBWl0m3VK+XtP_TT#rCyAPnz7B1!rF%i{kJ&k@}5 z>rCVl1r12JI#=uV91?RA*-;6J#!NBJV;nq5>d+UIrec63^kLdh&%D?JsaD8=Sqd-i z{Q(EJHv4145DYo%0hr-)I%XVrM!jc!c-)YBV!|izA z@~pa#$g8GdB-tQZgy1Hc31CsK6$qW~;j$l161SBaMGS?N*!;RZqeD^)UY^)G)PDV2fyYQR9#Y$?pWVrfKNq?eJ*>9Frp#3 zt8SH-;*r=#YWAeLlx%hvK*}OYN0?Bcy$b&Mw!<{WaR^12R0XqV2LNE8H^|f}xLu#S zb(%$K#S45N|HyY@_`J5_lnBdP>YRn5o4G{{hp+k0&&P3fde}j*Xr(OkY`9bQu8 z|47J@k`2=BeLBnZBqmMStV)^@?aYO^&I@5y>U;Jb(2GK)Mx-}jKz(AQ2H5Y%=-7~n zIfrjXM_>&gn=lF1uaZsHOuJVYb6^7FF&7chXQt}T#*P;@9=9#E;VqboRa51mYthfW z2#({nDh<`#54l9&nM;z)feOqrBt`h_R>=nZhOevZWuKYoH0#2GHueR|iOpR96k(?& zZRtgP5%CbfE^z?=_G}ZKA>E)5C6G1wu#ll@k1S>#+oW2rP1RU=h;nPn?5xbFwyflb8i&LhU8Qj zYL5lXUNTYHRFC@z646(xg%$A2o(?DT(~WKT7!L*|Bzd>P-uSE?NAr%PRB}a_SJHlL`t(K#K~XWm(dl8B{;xD+jV#?P z)x76Fz(foUwNvW2ReDYc%$dlkrN%wo>v^F`;=HA|MZ|GRkXd|Z=a&V5sN*+j8rrqap}8tRQ3Iuy2Io_+eoc(>NvrN znN_)y{u=J)&-|HY1pQOLpcpj@MB&Q(q0T8C2p@hLjF#6}N6M_+eBN%ogZx@-!;nlm z1I8(g%CuLU@i7Cyh08DmYyIU^HnT`M%fWjuC)W%~NrhOMGPp2AOAWStH;tI-CbKhe zJ`E**{#hwJ>(wDojuz_dBtbBIyVl*&(69_&-n{U2MzN~?vXVCBUBh7hn@htw7LR?8 z0%4qNRNgL<6It?iYUzKJEBil~D~mU&iBVjeE3S5ww*3_vsAxe_8i2}%bp5SN+gek* zh;0H)h4vipTkW=fXZbE@<$ds*#Jr!m!r0po3Anbclbi73*v^Nvp6#yNYCR@)Frgsw zwtL~qj3f9K{|8f6`vf#2W&^FsAK-symj?9D!c-x`MHOmsuk|_PsDIsV`xz?qwRMXY zib-sC&>vD}q~J(G2mLp0?Za-9W<*xOfHNX*WXZOD%3~|u%&G*CZM~UL+OxBB1QAV`<-P~Z%Jw6OTp zv)Ng#5gxvIWB1168~MS4A5Gu2n)s9-cv_~Y!@1&CYPse7Z`qC2KkF-eIV{FE$w?E$-{d5S%We>m2j2Rfu|Gtu}*3h@lJa>S`tO5#!Ru|1hQh32M0ouVKE#zs8$wyi39Ce0SVD@Asj=$C4i8Us+ z!-z+-?Ofl8*E5gSBDu!7%#aUkW7f*SVj*WKx8DKAtd0p}D-=x*AGZxjJ2?y?N3F$Y znq|;g$WM*Ag65YB;;Y!q#gM8BAW0&fUfOM3Ep0)`F@b%Guj>FgP&;6d04-xb~E`G zCD78h3*e{cYObqr-PmC7mN}aLS7<;SA-sEh-LL_6;y8TL>cyQsI3LVjtllZ@SMoQZ6wI`qE9 zPoPhPrTr(oe4ANxq;%om{V&VjJ>PW$vPvE`Gf+Y-#|US}2~eIoZz;fBGM#V%`uFij zhx4^!ebONn6F^|$7BSyU#K;dAA*diJ62d z#9sF>LuMtZ!nx0^+B=vE-C0YAH!x#VNn{@HMYFxw$&fqOsd_nL+kma=H2M1}B4j`_ z4&QD|QjnYHp!7f&VBYXOJQ%Tw4MCxBeIOEiJ}e&W(Ay#!wG6EGhM&5Z^9 z7`ste@#~irSXi3XySrmjN9Y6saF)$Q8`cuva^y-BJx#DpWMNEsTSu~8WaIaR- z5yGr{i)c4cS^>_-_Isx{$ng!z7B|VIdDj$O!~eR}i-LigMZMKkw#U;o^u3}DBv>c* z`XPeYT_amK$5YvhYSxt~?~fKTT#Ed7BpXsoLoYJpIs%AA^52$zUrGL#@%?c=OE+I70C_zXN#iA5DATOG{Dq7ZAif?UO3d~=3O zTvB>@L*afoDW{L2PP3ouB=jS?~` z=bF_R&delyhr$;G!DuB2h$_SV1prNIbdGGbcyyWFU3We3tppVDaCY#B+?O6Y9_3GP z9Xl-*cs=Ejuu!b#buS58?GO^|)^5f!KuThI@Z_%9Nm?n`JlM37W422kwCbUx z&+j>@W!v_zkL>{VwSK6&&bl$9C+4p*Ok=dw`Pf{r1lDt$6l*S3T_x9l034JKhlNmF z92uiibtleMd*=>6tuzchf$8H&`WN7oj73Kn4h9=?U8Ey$V(x{6RT+EwQ`K#sCN@3_ z#wXmlhoWUI-j6HG!5lC)p%f)KBQ~jRS2;>1LAi-o=fLQ|sjD(OA{S#cLs=Mg(Wgt| z^gt$?BKHap{$~$ZfX>5sJnjRl!aa{{r&P>A!Mngn+33$bvcfd`6`+R|j1KB=^zZyN ztb|jb?a%1wnU5FLsvhvQ1zBl~;t=(d<$r;*F)JaJ?9ZO_iAV$(`F0XqY|mx1bA@mu z$<~XZRKgNu@G2z_R7_bC@YN|+23^KLj_O1Xvi7=9SGU|&8~(H`mh-Xl3rP*r81NRv zWDHb3Jx+;jx~DJiN28~GFta$tz`ET;$u-JablKxLU1&4HrQJpVE{bMT`~!WeDN#1M zXK1M&w^g{&sLv$LI!n$%C|s}Btw$}Yw7VzIFBt#%VA(h9W!&JHXTLr!&6|bCCj$PC zaFXg|B(+4SV19FS(y^aGXMJb(<%P!~$1Uqg?v2Ei``wFyW@*zWDe!I!s8zi%i)V{WwWY~!63|R=M@ob+zY}z2 zJ42=*)i5lU>q`;^;bg|9$$H$Ve0|DU!HIE-?g0MLkSAc`V zRHV@bX@9Xz)Pr9?V$90kwnbA{#Q=GPD@X)}v<}t6Bb}gvv~2?1L$@#f188{yJ!l3*x*^kXp*-a#?NPzzxQqD5yqt0Nh}*1FOZO|d zVwCaIK`Z)%ddr0Seat_|ogQ5oEkOB zb#1#}KSJu?a53Ov@1bP&J5CTc&^F&8BwC>+Rb%uYAivtkFgY_l|NK+6{QHm1p4$Gh zUoLf-zejuYbv;0$ePO!uTp)^_CiFXG66a8aQ zNc8wuRLsZZio6B*?z@@H1-0&>tA(7~_x}{T+5YhisI%+Gaja=w8QSQ*F?*eA+la^W@n{=?vvGh|xdLi)dp8>C*D4SxA zaK-^KiQ!Xd{8M?;*czIRSgE+DI+8U7>@qEViwg%YkC}fFMF=F*CO?NhwP48_dLCnJ zq7vOstkJ8Cck%O>GziZE{_XaA7c6abB^SN9eTKIB$WlAtKVR#1tbj>6;2S?D7AURC zRH8(B!#NI}BoVvcK4&OYnUKH9+GKUuZq^-@l!`0fn3{JtKTXFm5DEV7YHVJk>qjH-jWB;9vO+{9LjSX2bUlY9fDRK}dd( z7JQ)bd7LBz0#-|Y;>#0=oOZx{s;}~#@UQflr6E6KKoeB@6k+wLDY#nHWRLX%@fo&R z8I#wrClWhBvNW$DBSdMh2s7tvUgz~@MlGJ|(hFL1WfAQ2C$)l5g$ItEgh=sqQH&@} zI~V9w>`A$e<6dL(((p$N(NCiSv53c!#PY@ssIwAA>(^H$Z=Yl&P=>3QpI#;7o3dYz z@=LOHt%`Irt`S|~AWo3Ip z0@EEAjIvAW=z=O%k4V3F(eLsjEy#D65U>NKJ*@6ds!p6Li)Vjrnq=(T8&H=vERR!Y z86oC zlXlR#5))Q0I5`0{=?{wIPx(keP5jsFT;yP>nzO#aIVP0}87hP6L#@}sMn|vbLVk*H zDo|WX?08pBtHOytOUiLo-@td7LVZoQB#j{R^rn8skm$U07K0Rz%4q3&0vY0s8Q$TG4TT zt*wjaaF~OlQH9EZ`a7?T^lqfUTI`_RC?`FH-+V|z6g`?84``ps>C5m7gnKd2Y|FZH z&9Dtn=~OAGOljQ{fFgFKIjcQ;n3pt>gqd*m*CwG$aV;Pk|7yf*O)}xiW=sA?){#Mx z1O^SZrD$pmZkC zbkvoC={h_K_`wins)@`8pl$yBe}qX@3+bgIpE(!`_VfsNU$wmgW9{)Y}m zUhr5f17cCo9XeD1Z1KnZ!0)3WwRf`<@elsTacBN!i~{i8J1p>^O3{82UsQLWIu(IJ zN{+#xs|g#&3(V%THWYKgAd+wjWEm%BfRsm99J$>L&oxa8WgTa;WS+yT#V8(hq@4$_ z14lx80INPu67Xu9!ONoQ$$!AQax~ezE=TyAwo$ECw>$Vy6U&Qds8vV%$zUvajC-p5 zpa2+DaJO*Zivkmf40<$QH)itj0!zm;8$gEIs^NI5rViSvh}5bL$^9M%&vHktz+gC< zjdU}joivEDDFvoaNeQ<@vQpsz9CdDioXUEE)B{D!E0yMR0(pB)Le;)| zb!6K`3v;E+1xrhsk;!bZ6QHqm<3TT`H4t(TWZc9?tITs+H?Bb^TeJNX-0)dUi8>Hp9;2FvI2NLc>n)-;r@Ki=zPp|Af`F#4^= z`}1eUyd7Fh+|vMkU8;)v=6nhKL#D@1C!;&ZmhLytQYF13>^5TJ4v$kF7Z$RN*<)Co zG?4ogJI`-e3%mflU@G18?~R;jCS_B3{>RYvv5;#LFM!p_=nra>pN*u7=DlQDPpv*6 zO8<@H@6TT$b$3re;~KRR(cQ0+h+0E#?DJ51D0pcuQs+NFB=i49$#X~jW=_v%bx`ab ze?i`Fo@<0K4J-@~xA@Qz<>w0Wi&L_fOCD(RYm*uUn8>?b9-E4({s$0Duj&+Q=jBc) z+?@I_$@Iyjl>sAUWBr-hq3B4#U zF`cBTLl;eE8mb~@Y#;C{r6EJm6Y!H+N^6Zj@tUWII*EIN<-{K9_qrzu-e{($4FcBHb^JU9Cs{%{{z~fHumeZy${()^D?Ns#! zSsj3F2dTohFZrGfFtltFmM*tmPoCj+(wAxq(sqjKUoQME553q9P(CUcO5u<2jw$ZD zb^dD{-*P1ekx6b${e7a-3h$ZF+xF>v@SR^A2>=ZJ8E|;G$N0FS_4Sg^zXyX!J&NA5 zt%n;F_lqH$)M#qL2p56p_-Y{oON;0&j5Dy5`TRrF&3-OsZ74u zQa8cjwn@Yu9)>e~U`~mAWtu7oH`isHLDPf1o)JOgq7u+9!VdvknBkP*XM71)m|J?=M-1AVx*u?#|8KbphkMG%M*7#R#$VEDJ=fp1Pv3`p zGOXA}lGQ{fFe%?rqkjjt-DwB6(qabG86h|T$*=q`a4xgtb8%oN*)~_7zXIbr52U$L zWM<7E6Wbi#^R~zG_qY%K|gsQdy z%jI6})9haztkz*cJh8KJGX+zrS6XN8-;8r6WvyoLKd^N|RZDUBf|sHvEJ)gWdF5r2 zcCF~^cj&^es)*!u;1{T4hYe~hPzj{FRs)G98mS)nt`vI-FJl9@h#`(MUejWvj8qCE zW)nD-6vtMexiq2b6iwfb_i0n&i2IADVmI zU4~^fl;Y;4k{iKPXgaCvpL4zGt#fIhUJ4}6k^(3Dqq+h0ism>rm20!qSR)jkl2 z$SWZ^#`Y!bsg!>l7#?a4)V4Ak{Wc9?9>gDJGa@L^r3=&O6L0{BpACNEVPPZh?nLaECfE0w5v>$<5eQ}{QdM&;@U_0 zqNP|W7p^D&0X}0(No>Tl>77%R*c&gZDCeX#E}4K*nX&EocCtZf@N`*)wq%~v!7L38 z$e(W3$|iD<7yy?Al>a&>XJjlly!GEpEtRS%*`7y{Wx0~CSD`2cQx9gMMGP2{6Z(TI z4HNA95Oh=6mon}gFaRgD7V5|Z41>k5;Yu$S zO6ne&KW}AkVItI(`R=nk+mx?-I@4_Xo<569G;w$i9jyX`=@z$Fk!Lm*-Ix)kl9co4 z?{hfC_WpG?lm_*rR63d&j3BC5l}KMkW!;m?RWG0JdWLN`JC|XI2*II3p!97X&xf$t zvB9lDZfyMRCsg{QT3P7!pFf5m_#<){$^E<$(c*%`syth2D}gK6`ylbx!ai36+#=|9 z+A({hCtI;Ue@jyI2E`R&Fo(as@5WWuQqn_`Y2HseU%|ei33J(ITK2~YDaD8mZ6EI5 zBA&9z-fdRu~i?u{N4-Zd20o z?w4X4Ziaymii5(<6t1P#jgX>SVf06)H>GLLT9i`0I!{t%^XW$m8osA(5bl+q3&!zj+EsYcvDd-5$%v^|jj7+8NdV}=UGbe^og6%Qe z1ZtgOVFeva7{aW5^UcQf0n&_*(Ze%(!v}n6^-8?PRG~Q-%EZ0o1tI`B^8tEuYDE+r zM!Dheefg#Ad%wNr`6=N2`k!G*R3s@Adtjx1yd-!bi|+0rx+byDZBW^i!DfU}(&M^o zw69seXCP8L^gELS1yKRfPrrW7tyk%r0lGd{wK5)TQYBX3SFzf!5*Hat)k3VP27wyM zA2ie=7D(+Z;md1H_r9>6ry6E6oAOk{BIcl+K=9OrlA3-+m6 zmVbpytlt7fPw(G;6w~sG)j6jr&yDUs{Rr-FDhhCLa z1*!iKM6B_dvTz4u5?IJD@d<>T29s7v+Fy<3~F`%ldNrv{s$}4dgyS5Dr(r2~=E_DA|zYQ_; z{9t(~1~GhTe?9*-z9*!QIFQl2V@72?Q(Xz~=4#6xK*lWTTNP7#vuPfZPcNzgSp^m^ z>*P%@W7gNY)^7r?UfjR862L0k)E?`;NN_0qE?5v(T66*D@GePut7Jp9vwdY=o%(r# zmL8sexVbv}^PBvm>td2MVVv{J2%nOLGxF?o|KyLpX1g+_UdgEl->f9^w45P_S#Cq- zd>-5t)~nP2cz+>dIjFiL`o+hSSk$a!mW0LUz! z6C&i9e-uw_RZn7FS|AOUK#Vht#GsSv#_l;VhXteJBYlyD##E(gI6s}R)La2Y-f;NR zy5POb30anlkbBCb~R0UMg><;ui|=NIETx+pn1G zJ?lU+Uc-#Vzph8@Vdu-9X7yZF0Q|m?(0nUR&ZQx9Q(*@jfQCdf!e_J7Qlfz^XD^t_ zq^tm^%~}Jz+PW$!s3|c*UfvTtNQnYcxN(j%u52Ou(}n1>p^5gLjzd2tug;NLN7Rjp zMq`a;Lr)=i3ZcRtp2IYiMSmSw2z$tBO`|)Jj+}czv(qFXd|RZ z_iRQG{q>Xx^P6)i=KK0V)~@eo3MCY(nLTv zODfL`mA!a@WOcM!ICm30E8m+ZU)k(QO`;!dV#&3rK^>qm5%?7CN&%ViqSP%yvt_OH z@Tl3^ih0HmUq0cS$rqzRb^tba6RhHmKWNs7@(Mp^7SD_|qdmpAex34eCfbC9nU~^d zntRQvdM9W=N-oHQ0aC@GU7m7dV;Vz05A3H8%qhCqV?NMPv?|O(?7Ifg-^}ZG&+yUX z-3E5cLJKkrC*2zuZD6!{2hy26A^El%!<2GQn053;)w2*WvZqugL}B>f>c*DX1Zn>zENdJ6aGU+AEFeL_ z$u;L`+4*}=S<8ogs8Z#utWB0r=~Be)f)wiT)>PZX#wh9n=5+sR3tO7G za!((1b4^^^&!$}3Hyg#etAy@Ok(1WmjtTN!xY!#EDrD3(Nt*5Y&Yq;Xb}v*5pcr}o zjavMR;V1{O^sRgLm=;6q{AR1QQhHe+i-6cFWR~4l!ZpR}{tN$@czm?Dy)(3fO;o@k z`Ld@RdCZYXKou+$U$^&iNlZvoxg|r#G_#ZN(&GJu;$~|RY*@`8F5OyH{?9#Ih&;bW z4))8MO$qZ09H028tt^Y6lt`OeZ`Ee&;YEa>2z`fTDSjq+)%(BW32~a6UH%2eRaq|k zbw{>VzCzNSBL*B!4IGIpP&r%Bh6Le@i!v=g^zP1MgJw|C;_Mjpq9T%px)DmDQv&A> zq_B2$CmZs+o7;El`fzV%h?C}vp@y9P9VoZnW?4+zTRbCf{+eZ{p)X)qrJYVltvo z{zm$*BSfC+4@SzHT-*zmwNf%@&YD(avH!kgFxI%R19ww5_u6#X`KBWEr)JT5ww`Qa z-Jh%Y%Y?qR&PL4=8}jmN>+(_!9jec^|26rRAf_IG0K700{j7TL*8?xvc%+ww1sX<} zNd6_FJn`P7^=E&&iW%h3Zq-i@01F^{LEH0)M@184hxr>rI;+9O2Lwz)fvcj+g0yBYNbso zyAr;5(O;Lnqrbd+_p@R5BV_A^7U1xZCL@VkBV^=>3xlXjj~m>=%#_o$YFk4_fN!Uys$U$*|Jc_( zYP>vNJS}_ni)Tn7f9pB@uR8(Jmq(E~4=n!!9EXeY)RmrZj8yICM}`~}8geV>u%@#9 zy}2>3pZB(SI&RY(S)Jr(+1B~adUTv^0_{I%8Kii!PIJRU2BD0qT=SmvaOl`^}k#&p~ ztL2R$s*8;26FX{q#<@dIFtU zQ+UR+yIAwtUsnS!4#_+)qH(#f_|vhQFd$WYHx{K;Me#+2}Uz|j>JWj z6Pv>>nfZCbDP`sH-je}IAEPaoPEJcIH#pX<}vu*aeHXQlaujtNL*`Z$`v*H`?(DSELuLC`8tcg4%yd2KQMHe$ zP2`aXlz+*{C3S2$Nl%>-7CeB6`0`Z;r57~v5x5rJP-zk;7AjYvpou8lm}FHiZo1FD z?BRK5W%3o+Y8wuF1z=zTYP4yMyFTW7{R>BSTH%K8jUYhsX1a;_4Yt1*i8EfV1#mua zEW!?0km?eJR_8TG2V&-|?`f8%owTfeCn0!vgR5wh9yF44*sm*a7z9jhCWHW9l)mqP z@_W%K$AS46G{^5wW?DOS3>I)w7E&xq^P)Lxj~`iioyjtB5M&6qjhivje+v-l2xhQ0 z&{mGKO*FK!;1-eV5|2USccaXakFGWMeeaB`=@SfN+XVj3FANmUU$wqHuHRx|z5EBM z`T01b!PRRUbG!7vN708=QW<`sz2jES>^*a5=Vf%PyG(-?)iishPSR4mB^ECgt8$oj zukXs@%socas@gGxY*hOqVK3f`>?vkL5}c=F0-`g%4ZsH2>_)jxr{9?cjMHh5gsOuQ zqPV;!58ip^EJlMTz2HrM7}-fH#|cg$c;}myQiZv? zr|6hyOfwvQ$xD@zkmQmHO%uWU(a62~pS8{vaHc8E+E)!e$QGN7&3En$3tlW{KWJQ%GwA?5v=mFt2B2$-Bd|Bw@z= z?9B`$z_oU74mU6uVl@|ZZ;@(hBtx2okR*>Q)F5H7G}|7v@`E;sqD7K6jDN7ll_aom zubj`f?2p9$1gNIA!PZkai~*-z|D=zmkeYqqaHr2WV~myK1##mg3$*YX+rW{cHwTX^x~Cr81eP+fcoeJUW4= z_eJ;6RbrUvh6pXW3_<2{UUX^(U5JTSr0|uvG-}g5Ha`Fbt%2;lnJ>z7epTn;3!cR1 zqQP9E$iDk>h4j;HfHFZCrQr|VrP@@Ox60M=V}L^%jZs*7t&X^}6RkB+w}XMqxafOn zBY=IdL~bSe2%WiqY*(HZ#k~=N`kr74Q#>ws?IJ#-3^EI|ytnS0%DmD^Ui(PuoR&YD zLYL^nK1p7-4PViRO8;Y90m|BK<>otkW)Z!3Bg~~1m|aZ?;1V=Wofd!oLwW9v+X#Gw z^a@_sz~+6L1VpK@elQCbt~GpbRp?HT2Y?2$P0LPwC8XXF82$s+*omgmRZ05fxCE{t z0DZ@zkL>j>e}U*XB=o`>r{iAPAfnoraJ@DZeQed9o-@BXs(Nv*Igf|?h!q7dWC;fw zI{?IIfFCGM{Y^0QBCc!Tp|LFNruAjMh5h zgr53V+O~5IK%u>};Gr~+k^7jJmUuF$Yje_L8ExJ{Wl){ms)?+slT{K{)h&?LUmq4HVpv_eu%gIQHFa~i+ifnEEFv2Wk513{~IhA zYw)(E=Zs_SdG^=2u@DGIlFe|obM;3C6vHSCXhaN- zX_jA<6K&*B$*V|2OsVvreEH(T=WFU+yv;O3p1$9(UEu=2_bdqcy}b5W`_PEvC6TV2 zvtA+N`>@^7>+n1*=s&atLV&!J&qbhM`fqJC#s6XXqAW=%GsxP*NDWK@gD65kwReCBDD8 zpXaapehK^9$KJWNGY^ie2`1PWt)gTB95D{ueDfw`gZ@SED<3A zH<$a@qGu()&IbznF0~s2j?ZS>pbS6171o-TCU1KO)KE-Z#i79}`9x#=VW|=@C53;k z>q_ogRSXG5fA?Jkco9*WYtf&3ikuI%x#kkNRgrgfSSsSfF}*1tRzUpF9WI#Z(=lhD%5pcK)nteuqiP7PgO z%kv#thQu7nEh!j!ju2Jod}B-0DY?b%@G`fG!22iEkzm%nX(FdQrGP@eC=V{P*}>-& z5o#AlBaa{LY~r`{{-MhlB~gH8mJ^Abjt4^r!6aZqW^flv4Vk^rq&K!ohOF)l14MAd}jO{dj;gMFq_!onQE~2i`O^ zZJi05pEqR^NOz!_m=y84I$1*i0Iy}lF@oqv=%?51&57RkpzCL2D(*CmUy~$lbfC*3 z$mrU2f=C?h9v@9GZ*yTy&FS$c0`_xr?W;J4XBw^RkO5y}^hGX%5YpKUvv;Uz9$>DZOnWZlq_&j~$oh*;1 zJ+Ll6;(e_T4d^R=>`*_j!Z>U8?ks}U+W)L7t~uwgXej^Gb7W*s_~qhNc&+`Ycm0n) zzZ34Nt?uWPA6ff6;Cd_^1@bzxQBc^>dVb*wdp@>C`w*8NXLT5+!A)l#G;3kEGCzp= zDX&AY6p7bp8>`fUcX|&d%HvDm?MU^WCOY_hM)p-gt^jBC7C?pABd zl-iUJ$}*$q9l7K6dhfO%yX4lZIzjiR8RufKUAIU{qP_vKD!Ze|-3U^d63OF_hobTuSNRTr&w>G0(N9aRt(dsF#OLk;m`0r!Q?3kFmV7VC z0}Eop7?{e*^LDEp+caQw>A}|3TE8UjWw0vlT_lq4BFUIn9!pze2BViQCQiR0j_i5Z ze{XqhuXx??-ssKZ%zcP_*}6BGa4G3!3A$(RFg-pCkgp7id!9j4lPU)f!Qw!h$9v+J z(=584bPAaJ-hU_kUydVsyBg#V9Jl>UtBe>r&;zI#m%5?#ee@hHerYt|Ta|Mct%=vfZ^+1-Rb)-~w= ze!F(4rco>_EA(Cwo19)cj_Wc$iTc0~gZ*uCA#@Hy-v>>l)q;7D1YsO+9XRX_Q~TB# zY)U@nn^CW=JHQ}t6QhZuChFh@C$Ru2L)(2XBw7s02VEtu8)vQ@JS)mYI31)xXTRCJ zoJ3l1qan0A(5)Gzx@n-xV&6?sXUOHLM_G%1WoKCCS`2$&o5baJ71kxUBx<)$9-wc% zjK~2qia+0A5>&tpu_F*8Ig+%%?E?Kh!n6|~tAaL~H?k2jKENyoI*Z`>(2FYmk>?zW zeGE%m%dso1Ysh4F0h}!AY76-}38JuDOrl)WD8Y>);yzJB9I)!hjP>Zd1g?s7IVV0! zup6dGAO3l3#1ON^;2%OF^6Jc3sIYgr4jsF>aI2uFI@B zeep-Q{#_#S{o`Ho9$1@4e@riNGF7-S_oagaca?lu(yC5k6a4_p9;i{o;L{e97lyIcD|WZ&#V6caS#3W`7Gt18ULQ!@0*hzY@FSmWu*+QU z)r3g)FeQNlQ%)?1OgJ+Hd9_wCrCs%;F-hFfUGJ z;ph;grA5LF>8KUR9=jN$$MnO3*6*tu=`6?2sIEiI7ZK$QrMjSQgAs_zNlIH&4%c?} zia3nc2*G5S?s-TO1S;T8W;zEasbv3+ zx!j=L;943CieFTPJFxKTp7-esGUf?@{8!k8JD64KxzeR4Uoo%Y*&m?YY_A@QIM$+2 zdC+*8*X(ZJxDhm#0%gDx)uaP!9bE9EEt&iqWj9!Hj?O0CSkBwFzAzuih4_PV&(LKm zunQWoEi`4nGW3JAGm$74^^3a!?-@IdlDkwjHF#24*gkZ#NT(3M)YWr993C>f@dgX} zN9RwQ1|Y3mXaAN$nlP%tpvv9 zS!nq#UIAKB`SM%3xmhjUP8xLR5~ougYBS_g>W6v5Gx`EPg}LpFio?1&FAO)h1r$4< z6_j%M}Ln1CuOwA!vjVto?p2%Lw zVj2&8jOlolq}vXkeOn6LBDTtj@CJ@RDP%WLAMr;uSj*XwcpOgBR{&^~J(xA*36x7P z9-3$doHZ6t%EOkj;@s9)dj@~S(6Qjld;3;)30SlmfH-0oS^tb2jS-H?#mfg=q6C@@ zOz@&45T=!?&p-KU|5~f2y@~lG!b){IEKwC-=P3%tNwFWyxm$4jo=No- z{IH~NMR?L+vnt-bYL6aLXGK>zNQ>Sck4i#&nXa}ROE|zITRem+1F%KhJ*6-2b>vf67U`WFl#3yD z1JM>bN?~?d*QEu)(H)ZkVO}Weghti^6K(a;7PO=*5xvyg3Py&XlTmO#)~35}Cb8_Y z5rJf7$|~ZoZz1I4Qi)!*aN#A#{;)4Q98Ij4FJlld24BuI!~f$E$oC)5h%@)rTP>+$ zmK)YhA#Ow(**)`7k9vz*;sK^ADj!x`P^x1oqm#D?sV{$O;w}6XunW6n(rV(02C;}n zQop*I77P}iz=SeV+5o?=+j zu^BILk9K|Z6hX{OPP+R{G<1%dZO6zcDZ!n^+gKo%D?OXiCzLI0XQf*`zkf+<>Z$2`WpuVzU4SGG1*DbyE_6#KYigR#O7dYlx!iC8FOhS2ge0b6Qt(w4)9|p zS!xHi!XBSVH;h!eKZA+=RHUC8-p?{H_K3GkL!2m*?{uA-?)g8GU|{jqo$dY7Qt)DE zwS4%sm%6C8SG$gB3p8}r#-Tni$;m~zb&gZUovtyX>0^eFal-&?x+GGmu5O5_fX#iN zUP^HYJAQxa^m;^H_Vyi}6qa|ps4tmAvi{1EV8bwS?}p;N!0+^tz>;gmYaCwh zrJh;()+7sZa2ee@;Lr`K8UB$^(3KUcnc zM-cp@x8dcbt`(PjZWcMltKCZ@bU%S6KD0hFNU9E(`J=1* zsyU{T3!f=uOJ4DHV}I_;{&cVXzlzq=b^!-&I(ttHX9E8N4B|pm>Ah*I31jDrAf|(U zcfX(4goOOsA8q@uIq%{BW^?l0KICqehd<`$8x9a&hB^jabmY0<{*qCqiL-j8BuqNFEI2Z)gS?_lQR<#O63tQxnni za~E`9#-5-YES~W#UE?L70&i>mA!9G?leuDKPs}BM!KAZJ{iF@A-Wf<6D6?C%DO=}m{TJG+u`pZ=Zz_!`b`IncE(4VQkdE27ZZrGYeSz=;2A~U(!B&+TjG0OAS^EkTK1^ORX!I9~*8?C{ zFsPs<}jne*@p1}ACj8cY} z#LEHIR7maQjfs@TPa6{=dLw_no0PYcY|qmcrKt;#mJo}@+H5ahFvB`X4Rj5SPzg*d z86~#Vbtw39RONN~#6({LOUG*n{n&Ak;1I`QRK?S6tpX+4viJ-MI0wRYv)Xchq0HP) zYnrOEm{u+Ny%mKtU)8}kaMyUn zzbLv{>?vR>fXLP~Z)jT(>!B%#)eZX(AZ!Rf$j_}IQRoOhQ-_JYj8PhW?!lc@FHC?A ze&Wkyb^zY4$ckVi2WVY%bLU9>2BPZ6X0C1JfQ&FI#zYgIgr8Do`^HJ9x^?TZMUD`9(<^ihm1+S;vY#0x4Da!;rt(~04Wkgc0G$>7;RLF*A<(i3@ zo22~2uCV=B>0fs`K48OMI)VtFL+tn4&mPFq+C;?scJd1A^~7;-^d)ou@|;~bn2k5Zz=Y9zIB7Fvmg`iutK zXt578Om0pzNb8bgu*gNb#OglWgrW)<@=aI06;~1&@JPWUmQwX&0Raj{r`A(VO{Hb_ z+(az0ZrsNV_A0eV*zLO;{sR+xB`!ozJxYV>D~3eBJT>WEnd{*1a-hvyCm-_YUi^JG z0>TrYP2nnIQoz{)g7)O|fbT+Od8-lEAR{0?CPSiC?!*kjluC5vT4e#V0dyJCSoLKz z%rpRXB%NcdD@WU)Yzl;L!CNIi?ZT|drR^-1?Zk>Bts z`0bRwUL@18Kll7{z;y!dIJz4oLIGVbai%D$#dRdXFXR1^PX5gr+0a z^v(Nm{vaN)bmQpkThFJl^%032+#!#Z#Gl}%WDT&$gF>rYF3r2GQJVh%TKunXUC0yu zcRjl}YKSs^a9SIlO~RV3F0uVhKcSmh4^l?zg`14290jMElrC&s2Cn$SU*)_6+dPo0 zeYMR(hS!=0aESpILH9LiEoP0@84ZWj;YKy=8t^ZS88rl~@HY#wd2y{uZp0~HyjzGE zmZ0j&S!6e+!aZfi-Mp7*zEmLtEzoiid3laW{Sw(fbSTxt75`diwFQ9Db-_SKfNXnZ zi~iz9pXgKhL(z>P;0zgj*31iE6+JG%3ee+``AQ?RqSxcnQ=^4EUwHk+GwLnbkI3_3 zb9}9Jgbf}@tcsErYdAnP_<12V`!DQM?bR)mB6Tt~hlGA?1`jJ4XyB*J;5;BlR0G&C zI__+bx6_MhcdVDqz;lro@sre6!t!^56oc+%O2*w0Ol#klF^$|KFI5Vbi)`0wDjKF< zXdVy02$@lI4P&t0-_yPmhHqmIRQ}+{%sXX~AiRsdiaPS3zlsJ3HzhcZ6a%CXuUW32 zoITvFx=uvMBvO7n{PB&RDM;BdPdBXCOA2wq4jYceHrL)SEAzfi=;Pd9)4F(eK`b$h z)pAlndQmHUCvp5n7}Pu~4_75G`3*d``Xl(uvaS^TUb(2x=vS#@)lUJDH#swoVM>g* zM+Y2Rs$c}Bs`wNJEyKI6v#ydHlmg+}xPKkVP$u*I3ZV=Wb^o5kS}yLQ^m-_EB6(JZ zRr^9u5@{dwHb;V%uRB)TXd&8FkE}M|dE;iD+XcCN<4REiIr2D{Qu@br zpJclN!+ZSd55zzJ)hyE6=)6+6QufLFgrm3|dHh{DOQ(H8qVUMKVBpTNxt>&_1dI#9 zHy7Nm(^hWKR2xh={s(A1v#nPe4lJO^`S#_eY(`$-mFJAt@PB~G-=^0x$8noFW8&KX zdT$M{7?;|xXO(}Ih3@%$r+b|&rW_3gl&p5`%snM6Tf%xfuYe=i7vv zBO1YcZX`lJwk>Pj{R`&etkn$_T23Ho*5QI*3DX!}asEUxetZH&hS{CAA4ZsRkBc*1MQ5tK<+9~!5spRHO*X$N3vP=Ez0mFZR5TGH2O++i zAbsWaQgC=5Zz1|T*`8!q9|HC2IKBA~@DpkIv8Cn$6!~NrN0S|XIJ{w`{^8BZ-ILo2 zV+P_3;Yz!A;d`?kYKgZjiWP&;$^M4FYIp(Q;!8nq5n}7nK?r(D{Vgxk?%V<6Ag5Sk zi(zXL5OA~FLLdE=(Sr|3wzKMC2*ny0nXA9nEg)c`Vd$pM3YEjbrtt*5yfgaTAm-0^ z>9tl(QVCDbKdq|fBT;`3hMD{|!?6Wp6D+qIqU?mV@i}{LVYuKxbMI};{B_8F+ zmMT+k+}G8fA-+J_s-$R^IH{Fl<^~U3YJSZ~fS2`{SFe(z(og{MG(&s95vde!dAiXT z_XIiv|MMF72UWafhFQ4P&1he5#xzU z@!RDW_IA@JY#)8~(8AYVdtCRqA@te>P?b{RXr@FfaT3J?|26d7&if_6H~mlEtW;K) zIqN=G3}~*pep@snDHG{JC60$|J)8;(n5g*+&*b1+dU}46 zd-5RdZUl^gpyVU2u6n}``LxCeW#%>n;|{`$9`oSbg&_6N)P>d0i&nF9g4okEp658f zvVf0ar52)8o(hzy$#0Z;uw`b+RI~m0U2Vx8g0MrTJvMHtmZap(2)&t8d7hs!4k5PX zsge)KQjZU&LN|i4h+nb-_CUW>XvfGKVs%pR8Wf_n*zbtd}twi^et^ZKN}SuU~J zgkP|}*w>g`VO_A{4mf&Z?hj?QfglN*ziGe(pL*~`m?phbltx{yZ>F=izhfCX5ts&K zF=q&Cv$}ijHuBXXh@8PqSN09HngnDBTDQ!RsT`e3{0C*yWZF31WO>L*N=|f<){lQ5 zeD#Jf@>&3K0TO9-EoL=tr4C9%fiX$u02OFZeA!O~P$cNQQ)ti|IJ!$))xqgeKg@`z z^}b0V+?3)HEs4f@=-O!(rZEQ9FmFB}wvOOzF1PJzf1jWBB%XiH8p7n5mI@$7WzbqRCP!1IDsGuqvm`D}RijB7?5_zP@ z&vnN^3I%Dg_No@^W`N~%MgWfncjX`lWWp#F|A+&^A~{z9A?z#9iz1?Bw)DtSCa)ir}lF1dg)`EwbppOXmr#gMyU+TpW0yXASx%W^|Jyz zr9GB~(lM;TdHEefC0rH&cBdpI2V1wv%QGV9aHn;AS8@2 zCNQ1pO-9c%O-jvsOQ;fhPs%j34Ny@|09iov^wqt0! zjos_ZDH*DJMCjrk`Sshlu%-KB*Nzs8q_LUe^j={eW{PIWrx9suGj(L;ZzT`=JZ3-O zFY-Sah;xh!;ba>!iQUa6nVPty9zOE1a(ho1!XX*urn}cm{CbkTr4-Z9x#PrITv9vm z42n^g*YC3JM+Q8U)H5noEvAPFN;$pr)`?EqV)?kdm!MU7AWmf6U9%>BN^DLZ=GtMH z`_@3`UzDP23rg=NPiO=c2*0o5uB4CNb5lrQB)crQgMV$x1LNvP&^i5u6$mCNd|Urq zxZ7giM>TI;apl|WYU$u_n+Gv#KmH&z{5|Psr8QMtOGX=$In82ENFrl=JO7BU%c8qe zyE#KPJkLs&Mtd87=H1N-)DXD@HC>+K3i}={|MU6XTfUm)Kv$f-@%AvcXl3>2ebHss zI>EX$;`0YP${VwX6h(R_9i|uV1s`i1q?2BL5&hMwatd9``CuWslZAchm?D@n&YLy} zH>J$uEyjz^3O6sy!ucEWJP-Qc_sP9@Fl6+jOW4Q|HA$g`+Ptu@R9MY+Fnb8c*n{cZH}6jCv};PQY=%A1*cz zk?@r~8U>F8l@qn@YTjHKGF~}>zA)4JEEJNj!_oh9D}3b%@fM`-@=CEg3Ly&Ya*_J5NcbiAy9lst=4?yzsc ztb3=Vd}MQ|TWMmc5juy*RewF+Q5e1+o&HYseRcUCxz4?czc(*mAUaIt=OfNpM)U^( z2bgP@Hf7)`F{b=)rj}o2D_HRgoyx%Ea?5v;){zjr4+LE_QUD_PQzs$# zmjcrq__SlZ@){fnj_P_}NX&v{Y<7zhnve^j^HgG@*=RG4jiGWsm_X^|Qi9u^kGs7s z`srBa#H!rllD7)2TO`}uqp^PpMZ4!?|Gvd-JT8!YYt+hG!w`$g2^8N+s1Z0I*r(@z<*sJBb= zEE`WrXC3pJvVez~Rw`8qH!Q@;yq&@qB*0v#%ir3Vpc*DTYNsi=It+lNP7H?Cbr8s*{bSRTK zLZ>J}%{M))(2kq65`Yw$sQ{0tYt`405eFfkH!?Xu@T-F}@OV!lLGGD3U0%UFF{b%$ zUl{F9J(%a8b`l>Jyho63c@M5{R+v{5Qtt;dGDRB`1@c-7*x}0M)*lMw&#xW{bk!$I zfI{RqDCssE3J=-!Wm?viB%pGQhLNoJ60tVkMv(?c9f^*hs#lh#u4cWnCuXWW<6b%T z{T%>=*YPkVBG(mRIx*y%PXT710vRhbW4Uj+(;9mBxlg5 zH#V8fiBDxt@$I`C^>}#;TwS`%;zK^_%zEWj{3!CG7=B5z^-c7={jigFJ?Mu}B0OCi z*=x7lqW%IRsUmQ$LL#!!XldK4zFRKi<5@;dN32=js5UwIM8SyIZWYcJfR@QAHh-|`21-@)Z02z)PYl`Iz*O$GQx0k1J-5iN4nlNIuE7`njfN@i39S%Wi?+Tx*8LBWa%kj9E)X zuO8V63QMP)hSlF(p?KCLtwlfS)LGjvG}4nS5)MV{uQETQ=dosH#%%=Iv1>8yAD!8?$JiB?nRhiK+S}T%yj=epN~Us&1eNffR2@>2 zxwXb~JzVd7`n4x<;jha3Z-;d|R*WyMIC^@~4{4euqoYfWl%D9+OyLQUYl?iA2}Q%r zp#!<_nJ=rQ9Y!5eSiwrVv^ThUC7p8SDHrFT`qVa5nh4uDnj_bQMVl^&%X>H3mjY0S z632!cQVK1tr7|3*;d4pm+G3RJxL~fCL2--!mYt%f+*#=mM{+3gPpoC3m(`(zO%l8T zX!$vP$;U=jh*4(gsK#}sa1CxPxtX_ny?{l2o7$OGQ1yVjlA}BU$5J_$HfUV|x)QB7 z7=$07y3Me3#Yk7SPZ$TFBuMF`W8=>*CNOrwvw*Eds`<(+6k5a-i~%u%ke%qIlpdO^ za}!B*nQgoD6Zk}$#TC7hM&!53H#X_VOX;_0c^Ho{7iG|%uZ%E=S8!t)oW<*~leF#! z3$5mGM#ezG#%A1jEIODh-hNS@~Y;U??^Z=dqeV^3;|Wwj`+9Q7BocF7JM z!%_L4g&Ts*9-?BS`y%V`>)FIkWydc}MWubK#xVX-Qkf>xE(00^N^X4RfMd0c*ZD;` zT5!Kg?6egjseZx+j&Urw3L$61DR_Va29^Sx)m<|Qe^B*Uz>EDGT53W3tZZf|bpnv( zv>4qXJY}~nzRsjjH4uTt=^T{sUB#BcCQ*hf_rg(ZKJZc&OUqP+)8#{2qTmSX~!3>nlLKQ_s(<5vO~ zgB_;AQrI3*-VCF}#~lsX9xPRJK^uZAq?|3<6dF>a%4G-r#@BDg{yxvLqfy(ZUYJMBCoO;`B@-oOU7p zSr;Ll%X>BL`6DOOt83Vs3-X!t3&$%LmjAN>min?w#Y!}@YUjbhXemD*#rT_Pv98-3 zTE*&^E<<_FrlE6$2{)Z2qwJz#3$qBGW|ps#4;s~FyF_8#sq=lh#+4u!Yh&xs;q86b zDgyfFv;N-f`8zN&jOuogb)LL8>x5=KfdEhWne)n2cYIuZi*Hz^(T5=`v-#L7>TVQ} zt=6Vqh|td4gnfXr+K&lL5)53{NX_SBVKp2CDZ7(>$&90X+9Ke~$dS0sL9PtGu3jb0 zKG@DabTKy^z{Ii9>hGjI=wB{oGymrMC8c*1*!{|+U|}i2D@;YK3-l8zcf-kaft$Hc z^Y?^tCWZCgZ`73SgTu*bYiiPvm;N7w_e?whU0g9FbJW`@caMH>4h7uG44AQA!+o2q z-vpQ@EQCQk2qMH3OD63yJR;T=CIe>lH##)dJo`gJTng&=>q7qn1gU>xQ8(b|PV(GZ zXf14dsS#`>z1G1pJcaAe)cs!K5;^B%p>X!Jw_5;bWoYN?#tKsTkQmuuF!q%`c{r%X z)GV>074>dE+x^!*UV|hYA;Ct)n1S&z?^`C&V%3|Rvxd%n&SBR<;N{5}iOj#*-)}A= zYfk@c_imag8OploAgpqF$q}-%H$ri`vT;y8-SU*ClMt%#r~bSkAxUN6%37>7OPxF z(ff{DRaS!gHp9(XQ>z9SC>ssYh6H9%j&JF;+wEs$mMsL7WA~SlfMHid(*_ttb8fE6 zaj>7hfcQJwDHx~_Xfeg4_Py&u~C$R#NFp0?o1LJe#&s~!x94PL`KE4BGdK9&0Z@9tg6Slt(lfPe^*!6S>bAh)vs#GARUsQ}}FxWdsSd zXosecBH*LoH$a>$m#wR1w`AD2${Ie0zYBd2nNNV5{W9&E7ufr1-R|2uO|M2^&x5B^ ze7-bqX{D7U2Pn=}I)32ixF}hJLPphgI)#X=Q}0d7)v^VMwkoJ0XqB*S>#CeOauaj9 zd4mo|gB*>v50r$A+uUh*a(uSJ+$5u8h4I7Ok0lOkA2)&}Ps|r&@cSMcKDwY_x=u}g z3gXNA=Qr@j)+H~qdo?bbBQsCgz+-Ojl!S8^HB9zMYUd+v%pWb{b7nzArNjUClA1do zhy1(D@*AB7w!6br66}Z{ry!tk5NjWV75Kg85@XGI}Qbe$r^gQW+z0hZ)Bp zGdbW1tg8T+>-5~oCVJw(*|RJCh`h?cHt7udu}0+uC>X7v zr4*0X4whBHGd&>~QItX?WCTWi?RaHx`ngbRhbX7yO&9wJkt4BU$YOt`OG26MQ(}C_2UG51dHflm_f;ogQi|{DVI6 zD2rj!B%lYd%E&G}-a^3G5Og+Nnu)KC*qNi{%Ba(}-6hzycj7IdIYTnEt75W7Z-thV zfJ04wN^(CQ!+B#!cjYt$v~Oae?{~Gm>@q{Ba&a?JLPzGIdG6mNDD+7#XfIgs`xo8U zc*ZX+NgDtL#y2j}?v3RsJKgHFoCze3X;+w~QLec!0H=MUy$o$|7(CJIZ-v4y$AWh~ zk;<7WC6tDe=*Y=V$j`Q+MmD2=Hc?a@Ty+SHr~~)OlMhC`t_x0b=|n%=XiZlWOiq*_ zKY=w+@MYWYU;6K08Z1C3MC_SL%mOoaCD@QR8%k}BrLK)5;fq+_qn9qio3H;%?mvem zkk0kgQywBxL2oY1b+WV1 zR2cf+S(L!kybb5#L;PEm%XBw*pDMWFb)M{Pq6Z5|e~=N*395U|gdt3)Nz?2`{E!Hf zhhE(A_HLOJUe-uJKbQ+wM>aK6u^u77!qotDkUcl~Q`#EqfBBeD8 zdbEBY2@RFCJk?Ue=Z%K*b<+v z1z_&)YG2xi8qZ4*DD*}?I#bcS%uGsl0gX$mr;6e@H%1AYDE=&Z{ZF;`Ibend{8AP7 zbel0Vh;}Acn$5D27xi^qTDaFQgdA3(c%Xb)x7L-JiqQ`vC%$*l<-zZI)m5vRD#{$} zE#iFV8`f(wPWK zql?~*im^yJe%|~i-5xH1v3>fzz6*TokuiLr<+LP2d1FGnr8Z8_P^0C0vqX@=+tvBn(PouV{J)&9>?2bF$Zon4D1pr?JgE}RrU0peUvei{_;bqi4-S-o`rPD1Kgx&^afwXRwL#9V$nE?i)C<-tC%u< zPldN#6+o_1m*gjoYAc-(Y*Cnc#?$j8P=-uox*(Zi@;G7+j*2&-r{dzsQDcK*f#bM> z%^ayN-|L7K0^Te)RX`y2**6t8Z$2?~i1rUimQaSf1X9P(f9;)!SXX4jBlE=*9$g|F$c>`GFt-Vs5*%9_%aw( zgPV2s-<$fSk$LTB>Oz2fbaDRy_GYcpj6~7wVk9P;QBsx-X-~6o!7*O8Rzx0#j3>p( z>m(HI5=mJmLwpYXAe1>~@ssqMRRn47&h2xu%%4xAUjHj+uXWo=CDr1NR~!P~=Cc~( zExL8hY~5#4{crRv8LyT&RvNpJPBQY0bSKw|A>{n&OjguOr&Ae6*4HdaNB$f%uR0`$ z&e?SO<7AE$Uo=^4ex*znBePxlU8X4|+3ra$)uEm>K|s5cJVGK|=!lorzOuZD>f|+d z`z*19xcXf;Ua>Hj#m8x=1X&<)J_6$dt_YS4DVxD>0y8oRa!odqe~c3MUccE*V;wlH zj2G+b-Iq0dH>D~(vBTI3$mq5&#`j@P9}>sL{t3a9zA6)s)=fpQ_O(8&(Qtid9 zhDQI$bMj{c?{SYwYvCTT{Zt(JFSDl;_^M*7D&o)Y>6_4N&)?1iqko*IYaZ;a)D2-~ zJN3vUym1_1MinhL2rdGi1JUdgRLawLNABOp1r3oP596hDIwcXPy2EeEcBcpKh{0> zE%8!4BKmVdcxq~Db7|LG>o?<-?%(-yCqFificF0L3}M&E8q%^3FM`c2Cx9G*gPc%z#8;E$PZ9 zOBOFtmAjg1ypGmR!jD?k>RGv7k3iS$Xuh%|t)z=Il5!Ucy~CGPgr_VDVK)qWBlx>? zUkYk<-N`pnm*QJfNnmF-DNEm?^gmbi;|Tx-Q!?La@CUD7FuvV%(bQFt>;ze(xzgfT z@&^C>Bb2qOxvU-x067*lMT(9_4kTJnphinwbcMBIB!@qO3eJFeV<2DZi z59#P~ri+nG8W5k2V1C&BohC)#EHO$2^V<180E3lHA)VvS!X^O-@GUAZ!-smL!Z3Wh z*zWvU;BSiI?fhhaAI6D8cU0Pr_0M?3iPxVQXW(y=>gCIaYY{3>T>6oGRqQ^O#lWXrRxzo97 zVl>Y1`ePikPjt9TW>Rgi$SZ>-EvZw9NiZ@w85Ia|tm|`&|Sr_)k zID5k(k#WB>fgLdTr##tt>2@5Cu2?L>&`dDf!hxbw;i0{zo1Lnbt@|Rl)5GKuUzFZ5 z&L7BPAL}5C0Qk`aLOYf#)?G6}SKF~EI>PRmsbgOVuP&^0eu}X@lmZgIm?pQNbUs|n z@s7d`HBQ=25W{-0zurjOd{e7}PR0|0?h2t?37Ur@#Cg8HGI|<(`VQE1^axQS?@QO^ zNwC#6^O+4rA*iK+agS@cNNYd2@Qr>(P_#|v;{X5_}=OEWUXVSqjP zt*|4@2l9F^R)xy6$R`Z*<~}$D|E>nx)y798tRA72>YQWfdFRI_$ZS2N?`~40oiI~Q zsd9{`vS$i=`CX2+A0yGtuA~8fjK5PqV8<~lr-(}iY%MN~7+tj^`Nr-|v6_TB6j{2h znZGw1A!!bRL;NLo$Fy1VRf)xEq=b~zmC zB2_j%k_vM|jm4rL?T#@P$R8f+#?~Wji2fU0$Jx#1$~tgnE66wGTo;`ZjL0zKzLYqP zJf_d3vog!l!&e2cCoF5tNs(5=7gTUN3@N1#2b(XhxIk3rYw>LKS`&ybZezZ4-v#l= zD-}}M`E0(21=ol%KSJmUhnYuEAJzsEVASh~69s(38rzsMi@XP)m9(n}DN5@$kil3I z6dJWl-}bx}XHv*q?pqFag>bEdu|TXavQ9s>n=iw4L~zkjA4zN&^B(*Jizc z=Wb6?XB!r@qxwh2t~H7{3k%c!E{M;AZ$d6^3|0JFJ}#9EtPtXIGgDH&BAqD4MC)e& zCUHELL}A!WXKKq&Mb2Il@Q84M3>BeWILmQmhIHj4OMKanM?QHk z#Ej6*f7L0y{=fj&^UptoS#T88chli&%_nh0*b*xe%R|Ay2~HynPCJYW02(uAYfW5{ zjHXap$5>Gc3oS&jgdBd2i)W+~8{lS*Qar2^b<(raCtIb)M7ylD45S>W_H zpas?d8jomlL3e&W)w3wYxhvufMR33c)O=~)fqkRNPmsK9gBj>$m%WhAhC)D2+_u0Q zFX}m(4=GkB=?;L$Pkt1lAtK+5gbkssGYPllZ0@q}pSk0QM@JGsUZf~5PNn^fQWLG$~ z$*rpKx6z{|P7Wz_uT$H0(CZe9=1AmJ=ISX{B|AozX5`fNB;URARP|UnA`duY7aM>8 zpw5>`!p;PMI+j*fl&or83vglz9qB6ehzWqfXJgM9f!XoVHwvzAHZ&7xc^^h3J1|Jt zeHh9GOEbVv;w*WGz^qrVG@6cPe6X9n{U@unB6r8EI7eM*9PVnY-i%>BW|6=LBX9@r z{<-H!W!Q)bfxer-|IRZwfDW(ME~EOUs4`;cr!&Fa?7S~EzWQ|)bVDzZn3IygovIB@ zwiPZl&;Ii{u~r)Vi285O07a$Vn*c%?p6_ZpEvk!vQHd7DCsZl78a5R_$zIAtKTNM> z`uVuJ@ZVL!8B!6LUE{^$K<``F4v~w{W=Hs_tbqXYo+}N61h0m2GutHH*h%U9?H(LA z-=5qSLjP62fZRvsnHvUFjl|QVmQ89Ae~g4jM+>_?2b3mXL*PT$Hclf~=x{jpf-qT^ z8*w&|ajE;ERZ286p3&vLF2xd+76MJJ-ieg7jRTzSjxl4`(VCyQp{g3Epv(z?z%os8 z?u_SPrOhZrB_0nWM7BiH4P~nROlB<~JjKk5Il3p!&ngC4yL%Fzj(qe}0!P7)CrP>k zK|xKALdPPe9?d;9>Z<7=v?p!gf3kY_#5PMW7~>BXP%Mc`$s zWh^E67PWNG*D;hDOL?m4%@lvSrD8g)qz^dvyxOei{FvIn2dD`#&MKz;%<-Ged5+AT zcveE=5)4SDaC08Q1J4CWv{v&=d^`-BxM@N{ab0QH@;|R7piJMt)?)X1WhncUxBX*w+}r$;SGivB$XhNjUT!*gkmoC2tH@dkBjguoANMo zq`Bql{Ci2aL{PxsqVY_RZNEs4$AdZk3VpvE`Qzwo_(vmc za*}ys^9e1t6a3NInvjH=lqm~EgCA{+{L5#8v?#x>knSul0V@iOc{RqPX$7D3PdL7+ z;0#l!@;y@S^=qFpPX35{uIdZemXR=*7PIt?N^AfoTIqT`Vd2C7^>8{Zbdkh>9)P6Z z>M$-p%^O+n-e5A=lyhL|q1L2^Q{mCEm4x8z7_Sw9UKFn95=N_)(AtQw%mtyZB@e1% zk;ZT@zh|1MRUJ}q%-=Xur|Cc+%O2i_e^3q|`Lp`JjOk4FFlmtOdpf)hLQx`BSU(zw z1Fb{5#@6OA`?pp=OiRUp7whLkU@Wf>=AJ$4@Z+x+j-Y$mI zU+i12l$?6uAU%I)1BDFPe`l9WrV`h);-Cbfnad|J9{zQ`UU+)_`BOFoXRdi~_Rn{^ z!YKJOOk=$%mgKBMpxgbc_U2g5-_a>aXFq80->A0e4VF5kcN}qV+k`N3b#Afjyp*yp zdX>!v4SM~@bSd1yN;~dnVmCrg?}0D8;ON>yrBW~bc|(`dY5-8WAxB@W;Hnet$TZO{GMOvmiYB+{RO#t_!3HYjArY$4qdgKTn^PdDCQBZJn%l> zkdk8CyHy;n9aZ4`{VEbP0l%8j4(eKI5(qhFXiXil>hca`CJA|MO-Y+vwk8YHJ0+`o zzmZfpSLfvREg`>no>*~{bv*T_{EmJ?k@#NIc`eWa!soM*1#-JMvj1z2@i)8D!ksZ=l0|i%bY7 ztxd#+q+IXT-1DI508Toh+#^D{2&0-LvYKMEeuMf`GY%=T))Kk6#b)k5qZMF4(X$tx zR;w6$ALUCsNJ#-$R^7AtcoKoPE{lx2b3)7dsYFSq{*os zQ2zy7Gr&@N(ZF~fQyvf#`%d(ie5;cAI>f+#+^T1-1hc)_IEqVcI11Nq;L}=FquABa zG+7lacVV;ryXpI!^=VTFyi(>fh033b%-T;l#Dd4iA0BDW@C|Oi$+`|M_*VaTW&8WZ zz1qYR?*}Hse+vYyIh+|g=0FO$FS29>kwlS&g$~qgWWI8w_rfhvXai``D95J{aryz*zdag2zv zb!{qe7kB^M7c<5`koH^7`ur*{aXm4AIq&PhitvEmWU}l-G2?!7)Em4ZL&hc|;$4ZQ z4=;SgDvK6dJjmMuh?X?;`?2n0xo8i=@qFef-7Z0&3nk9ix!1DAKcJmJh3)!eN(q9zqx?_E#WH@d*+X;padN~D?J*`_!J6_RXQ<}i zOPvHxNnHj_B+ebH!YaWybM{FVYQZ37N7U-RjL2~&LgtkIANS%U%a;R3Jakkhysk^0 zJx(d^6$%_}*;xVk%!Qq~p3G4_&kLqE6q}JPWH$TN_`EBQ^AYn4h)lz3QTpw%W5(AC zuA9)miH5zwdpEL0b0-`yC*!z0SdMe%W^+2OTH$Ysr zeduSNWGRQ0Xn1Fh6&HV&^tkA9H=CyaSI1AbVQl_XJV1jxgo|zT5j0>iv*5GA-Y8P= z@4x%n7gN&Nfy0H-h||q{go>QiV5R-z$WW;+c?PX#ksf9jobcoQj95r|9&vhP>f-Vx zG2(i*#`g&cI8z)hfr~*CE6t7H0a@=(>N0hzObJEwRSq{JSovS>syY*gX(;d&udxi7 zSB+p*8vhYnogLuIG8B8bzvy!7h$qfXvU*!c@Oxjot{4$&zZ0qW{b$blFU(h#?L0br z#NkDlQs`O&j4zur<*SCRe&ff>x#UBfLn)6Z<$l+fHoTKdEmL3m3Zee`przIvIgGoE z?uo5AA&wLh`_aGlY=@0%V_OUMuZhM=RcsR&OZu(t;S%$0ONX{#zM$--N`q=MnGS!W zBUAQnmvZBIrlQHIjUV@QlMPAPStuKAja?QO6vqe7!xDXN|6LFRkyv?-=l4?BHX!8k zq)OXml{!TUWQ}D{hty8AF`2KjO>HStpBBiVmJ*JD{5u4zM**jeKH<_wj6b;`S`&7R zb!`Jv!_IV@xHJ>LSdw&T-i=iN&lVQnhC<12`O3&hDg{cuiqRxS_c$?lSGM>WrqOJL z=~RRKpT`{Gzw%A-Zrt`MFm1W(yBRjL;64 zV-E&$J@wF;`RxG!ypH^#9UZ`wo;NR45yOnCblDJ9Q2D?99L)K&_7rvhG!cR+DswyE z$RdpKng0N^f1niNY54}XB8Pw{H@j{5#qhj$mNjeDtt_iVwLEpk(oP=2iudMpQ zDo^%**M!uFBze`Pl+H>MNs?(T$*}C%)+PUr6Wo{G9n>N1jol1$>VP1Dj?qRpd3@S&jJ^!u|?WE zJCUofVtPkP-Yh9Xg;lvSCvoY)1&f80JoJ$(+Ro`1)TS*AF;B`T^obosKEB71>VTf@Om#PNsi7XHb=5`qF}zd;^hs zsHUd*>P2l^fc<8}x;JEWEWKm~Tow1nWJ9!dtCL4<6{P0PJn!(HtUK?EmPi?IRvG9Z)M&8j&=d&&b$&L!r*O!JHTggzDfM zosOw^mJE9!J+PAU*htld67=E~mD~*=wt~NBi0%^XSvxDT8R@B`|xjw2Na3*I8%|1wUFdK57 z7qteb&a&9y(mlI9?O>1~@56k`kemoHNScJ1k!q;uShT!Aa@9BKVkK6>r3gc9)(W3F zNHGiu$`S$=8a}0c&FP_WLS*9+^(b%?r?gP(N_inyO#DW#dMBwn0vfKzm_b1%QYh81 zw~a2C!DCoN(>Y*fy-I29&pLH^neQuaI=PunlEbuSeGbr7pWK{lT%hz{D2;Y<1dxJs zR(6fLa%#8fD7e>N@0zh*pBj3^3;{CB|c(Y z^OX~h1_H`90M~&>Lgec$V>jEku7qdk-Nms(Zw@No>4_09efshQs~xsVddacGYS3wf z4m~cbd3|gDsf5mS^cJ{Y{Y!-?$0UqX-fVZnuuEdE?n3F#nZ@1WEtj+iQ+mW<2ce$I z0!=$g?c!*4hPX${Z-4DM4w!fbW*liVqdY5$An9p*FRRh>9IaTYDbna}dLKXK`JDMf z>y~^nu6%#H&V)jCf(N(ZrfexJ5@J-^d0}y7iF~IQB=7dT?V;ZRtIz&=(Z2Z^pEorr zcio=buQCnhQJ1}1OwkgjMsxPNShLw^R-@ojmjpMB$P$k;`S=3NP5IhnP*0~_NZeMV z$`JE@c3EsL)=S0?gL3URcpMVx4%=8uE}aQrPQqBA;B0s{?z^a~{DK`Fbup~;ev1={ zQ4nD8yXjCh$1;c^F9zXJUS5zBBOpGV+$O0u*%puXr*~Jehr!Yw;FB0AkP^yFgGGNz#gmq_~RggqR&Qzi4#I2-W0q z{rlj4Hbj(+Q=*vg2!-oI$|-ZW1%@(qWrq4vxorW6k7~4D1jjG^0s6&!7>ItwsZ#WuNG6n3KyvC z!b{~bIL;6@g7vBK#7 z?K00bG1xdWBT-@(qFs;UvwF??v(8qIPk&w}U!KIyyR^~=c#;T_p5WCsow7$DakN74 znT3_jk|yaU3R6DeyfG6F@OjOorcx8QjdMoYq5Sn7;c)8jLCkrXRC{Ik7qb}h6kE^8 zCskN5x06HeveIZl=s{#cI&r0!DI4N+3h6>oZ~F!R?dw%|TEI+|$e0nTOw8jKu%L(E zzlWJ>k-5n!3VgUy&7co|A9@kPDWT`?FHcan;6srtB4mY#3MB_qa2w3}8{_$v)D+-7 zH?ho$Os9UT8hoM-W;>RY3E%`t4W98X7Td+)3z>rPYJah=H(g&B%St=al8^SRqUBJ# zbRRNaz8KYh0OuIWkGGpgKpH@r(up!@jNO;;rNbAXLSGb%_1J1%@uB%H7KkQ zK>l|I?}7xQr=QKKAsFgO33LW+=8y%us(L?uA1QZNN!u3I;+mF^$Ye+JprMNb!cK!} z-dJM)rtl(|q4)~<+AOhmxZqkaXRx%^nRNb;{A!>WM1aQi>s`hq3DGxSZrooYoST|5 zwNl$D`E^mM4Ww4*Ts!d2lLLSkh=Bc!)FLKHWc<%62wB+%VmD90Is*Q}ljmL>+`QFn z;TQc;(EqjDbOm{RFv9yEz)00oTc%0mSln~VvxaZ9VjUM~#9>RV;fRL2STGrB*`LTc z05-5%$~7Ejtr`ldY%{_>LQkV0m+56rw%c!zX8}Y-dMfG*5!EeY0i#L=M9el4jgP@) z8|4iND`Qz$Mt=?4wb~~4NyN$>LQfkp)(vvr@{V5&Z2KcK80hw(7@S*D|J=@c*$K*x zc(7|PjDsE5?U$3ACytQBz5_EtxyvdJf8i(D&-13gPj?R^zsVVl%H2vR<4$3bCMPnx zP!8J#o65F~hx;#*e~;j7EHUFLF@4n`>>MPYO98*7Cr)SIAdjfVK8U3vBDBrco)m7l zE#09;@W4H~H(0%HNncuP1P3&13k*nS(Xicdxz&x%J9lWPhib@dJlPg{UMk1IGH5It zVsxlI-o1WU-Q7ITAz{Zx1drtSKRFo;+#wH^sNU{J*Jml0W>furU&RWK( z7c+L8$v&k~{~IcmD@0Wdp9!>A+y4NsHRBZj2S~*J@ZKszUF)>%G86nUg-?*pB-Gbo zWyW`4KA?-K&KsiyAUbdb7=j<8i%IXz;1+*@t?$%x@L%rY* z$U7|#C~skLb+Gw~ZHOz=eW?S)Vvwv;lcl*1t6|<(%J6dros~KIiaXOYb5lMy&RbE1 zk7+V9QmgPyC9d;dU!x7)^)KX+sdTUzQIdn`#^Yx|y2V80?;3`o%S$xNMj6b;=PSu$ zl4T}@DDG56Z9+TTn6X4V0x22)s`K5qi_SI|AeLRox3G)KD4vcUC4-f3Joe)&U`V$KXb`k>68J)8Ql}#XDG_Td4uWd%lWng}X7|{2vh4>xh}-ho#)UiJ=sNJ97`Z;kC0;F;)dL3&r{)oPWg^a0qt3A-n%j<6k-{sE%(ypa3*r(HMACg%@HHkR5pEx%9&q;rsKq1?{dQ0GZx z2wy2h4uZgp$@MbTEiN6hu9Q~$5bnQxU|t@>FLWU1dQ#kI;>Rgl5=u-KW=|P)BOOOJo8y3GKgx~wib}(~oQVyOb&}8Jd;bKK` zkA5@rr(3fVBTBc2csC-qZbSlAHnGHOmen^^6dEG!9=KG$#rQ(=wkdgY0yYRg{5T%rUjtr+$y2vf19V-6Cr?sIOz)*QrW z3|7SP$G^}|whpTgkYy3d4)e^MB&OH(Dk1MT%XBnETJ&|^v%2Bq56i2fOtw)^+2r+n z_4Sw}YlIKvhniRpet5TIu>E3rLT0e+sTgh;?`Z*+8zU?`3T&6>0ajyp)Z+z4mmWb-E33XlB^#6)gXYM*g@dv_Pyv}~HJjC=h1!ay!V)t4}v z3p)te(5ll`SEbFpl7;Fw0oEjH@A2GWeg0vrJr1GEfPx}QW3okUO3d!r`lQE8r z^FA1|*s&VsTxx4mlmecYy}d10H`nUe(LNBP59n;l%PwuT!41{@!kD7*OI;0*M_8C8LFl08N+JIX z?=4KhtRHCKjB>$`SXuQp?~4Q0>c4L28plR0U+^Mt1(LV!2oqf;E^_qYpiI-lm8cfh z_jokAlPgI1^1JS#us{#HEkY`sY@O77mG#N3@_5gQhq-3$%G5Pi@{ncwXS?7cL^Humu90r2;XP<F~ADweX*;r^AbIq^p$&tnRVm8pv($Bir4i*6AY8S(d>SGFy$T&n+i8s zr~m!+VrUAnR{77F{c#s6=mEkt*I;z)ThzwO?L3@P~oqxtog<~ilq z$cry=za?u|{sZ79KvV70>i}vP^V-^781z4Y zE9+)VY32I^zOPb~!y89WB|PBQm(TvHxezb=%yZC_(7Fq!|N7%ueFBLgbEkrojcXoPz zVHa;KO~OgdzUQYF#BNvPg@Lk%!kP6HY_^dK-1}$V=B2evY+OMV$oYYaVE=H1g??V|(Ye7#7G zp3ZZNF{p$aNTla`6BI<3t4y6!#PcwahzP%qwbI-K9V|_mMd(ugq5kJ= zsdZ1mE}UW+zm45;N4I3A%Z7Xgy?|zF$yYPZxL2b80XFMph#YLD1Vv1_!rG18)=?t- z4X9}kVSLyB8K=s;o4{AG@AdVk*OX{1T0`m(jWpKJi*FR7?^iFjSYu(G$qbk5>)0ZN z?Lymx4RaUX>dMD)c#dcPJ4`ibV6W<{4UOqE94yrbMjI55YEu8r5%CLoU+l$%y^<(b z92bRX))^5|o0x;S01hjVMykROGrYtZU%~%-%G8c|>3nW~IbOhEUaZ`+u&RmXt8!Rb zL7KcBY#%70L_MJqmGc3q;VtDTTEaF^q(iDF;Eb?00$*2++$*oFn2h*p)Qf1M zka{H52fE(iX<`P#SnHhH#jB|%W9J6X*hJy}`Blf)Sn5dcI|0f@9b)#&Qx^5;!SAof zX;%Z#)v=JMWfkH4F40a)#+tt-F%An@RxdSYo$W9WQDesHGqWB|)%sfLEgp+}ATQ2U zBy+eDRzPhW9Z2bggsuwD^UCqlN}XEJGEj(@%#}e8ZzqmQpj}k-MGf=A>7|l(pLC?= zk`hdc@yElOr7~54$&?dYlaHRj*HIrkx*K3>B|-ICC-m&iN~wta1_8x zzW|p=&ac4^I_k_A0BnSL0lRx3Ckb`ZXYUIG>8b*h(E?L%T9cCMsnRPcPnz#E?cL7D zh@7RWh#=t)v1_lMF&<@hRWl3kWc6A4$Dd?t$IBv_)x`Nvg3Ddii$0uB7<`vq6C_{X z;e(n$upH6=c5=bUmm2;ZM|8KRLDv|l$GUBqokc5h-*1dLI_5vRLIMwDotgS{Ee?t5 z&CY$gEX?Y}hMehaDvn6+-~^UIR_>!p^D1u0DH1_rTD6mh2!T6RDjSf{lOS>ME2pqoUF7=6Hk)hW2SdV25uJ-T zcfLxSY|ifZoh0Ee;iP;zDLhB6nybruP1js*@wl2vWj7)ywhox?m8j1ZIhHnb>K=!MBHnpn1>u* z!ZOZ7tI+4X0h!5o<%VSPSrXHLXfuSjh!na?n@Be3kOHQavT-fRQKA9b*Mdy%my*v2 z$c3hTo_-zyWDLrJtv4lXD^2-2PoiRU$vla&^2l5x%XKuAlyS2c(obSB=l7wckkx7Z zAB6TtRq3iC)4lNr68O7lEf`jgy(lFRCtU$4^Df(EQ#`t`2|Q6P@1`%H&lzNJZd46w z{Z*T+NJ+~DT@cl3+CVAI;M|g_ge>L6Gn8-DE~qFuEL)fcc_8+lbp%qa-h5qf;NQ8> zvAF?agd9E?W-7_|Ci(kTv;c1${wQ#Bf_SC%>1OV@S}(!9iGO0P+FhWqcyQ=yyV!0t zM`32v-YrMvah|#ji8%WZb^vP#0h~PmbAboTd?#kg=X$$yMzz``o4zTba?m?J(OzcZ zZAu=s2%Gj>zD^M1Ln|0<>w6ce#QphT@VGeoArZrj<(=Y>SlMW2&O3UH{ za>NwqS8@3g=#oaBLVZK;4;59m{RpqSZ#Ce)UU~4H+$bW9i*D!&Y<9p4FxPTZkMVq1Mw6H!Ha1$IFX;)E)I*KbuZj^B`Fv z@Z%J2UuLdc2`J?m@NK3NzrACcL$Z401wtd$AiSbPY<7`4DenjqOm_tpYtUv&#{-1W z|6ee4`pNiXbtiO zy=2my?neKZWi6m51G-)#zOUjubY( z0B{0P>#=BsZo{;a*I)t8j(T>$_xXXWAo11N$b9Mop_ms22ffj}`*dte8K2hPz@nVg zZvD5KT>JcIe5uIflc|wrf%Lof1vqxe^EC95H@ea}La-@%02ARpV;L#}A$W8+XEr03 zuNI~Q*tkW$bTD%wl6pbUoN`B$en+R#`GAwiO zApQEoNJ!1mjEKpGsrcVnB#?NyFf*l?8<3u-!YswrgzM5U5LegI&Sf$*RBO3Jl}xs* zA|a}NnwB_)WpJ*B>2{T4hD=qCj;;+-Y&739E4QmSciQEWM=b>~&ssRfp#K5R*w`fs z$nlhQu5#hvP6w!aG-0v!YiTp~Ve7JY5rm)`qi$OH&PV(4{JZNH+ga zzaovb%tcaa^TsVsBeeFiO4)W0$k4fMYHpd+V|JRJ4VDc?E))?a8%22w0e;nubK>i5 z06cd(zL&*mn3EIRF!R3(@6uWjnjl`5%6w&mXQX5X3veG=m(-_Pxt`%|i(B@6T62Np zV>ciZ{sR!FUe}ERvdiE2lKlDiqaRxUsMr3dpJ(rh$ z9B{J;Offf$#bVH??m-eBf9Xlxbp%$!d(6sBQ^aNuFP`_20)@B)yT8=b>{HdN6H%V0 z{RfD}yi&~|8*8m__StD@18$d#w5V>GH0vp>LCJKM%4PWqnF{z@5-$G;E1L*^NkB4J z4u~8dljDI2<|Y1dlVBUlt<)MXRABXs{1EtvF_O2s#~ZCAdIJpXK}aj=clBfEta-6& z{ZrlhU0IQ*UgemoXUUQLUNS*lFJH_-(ayzWAI{!0Qpa@If#)i%mYtG*E)xnJ_0^zi zinc!QmM@@(bn zsw#fpPiv%}=}0fq`fuEHU2xsX#Hb=!Qt?1_sDn|=+1}km2j{cV^Iq2F#NO2Fa;cs) zqNynu9&aPM8ZYOo#=gjkZdYzQU#h+-W{Y}lQXDJ6@}&U?8`Ix(_Kwa^$LH- z#$l!Q>LAK|L#s~Yiph@b5EpUt7SMs{Ql|c@>;j8ck6i`$#E#W2}4gTZC zf)Y5XtS}cRiAhlISA9$h{&C^Jq3Q-$&glOBrx0AY)>xuV^qlSQZEbJV*bnAR2BtmN z$AJ)RCQ8LwS3AHmcvRqvd80lU_FzzEW-f48pccvO+c1bWjd`kc zByR4PT1qmYDQsZ=@af2UP4>=?W0{n|RHZB9C0D|av5Y4K+fNn^ay|3~Fi)lH9o(-x zRb;bbX+w}NN*DJ?Z0s@KnF^b4Xp~)k1w$)~rAdefHymF$24;;1BHxFEf@49L1Sq5*6K_`RHtRWzGw-<-5(F(0aeWX-^FSFmA1`3*;d)zU{ z|3rSc&Ifx}CrD7jGWoI`i?7IkZY=Yqj+-;Nfdet&0S=E%Icj(i`F8C&x?s-NS~ZX5Xb*(v z;qlv53fgh3ls}hX@%Lb-^e|p3*B|x$^x|EAmqoe2cxsoX{hgM+ZBy+K+=g)8m)g<9 zkrL~)p!6OKT(3Y8U5S$1VgGN_}m9HS~a-F1@x3^rC9BJ~SxZ1(mt}I#13z528 zyNN*IGGDF)2h_&pP87@Lk9_eOZqVbk{|1!GWzrlgn=Z`yY1JWXx5wo>ZNUVNp5L#U z-V_3X9Mu)HW==ghJWkJr9h9Inove9+upr5R1N<~ zs%wo(%4pjd{#I8nigHwn(&NUV0DY9U@3B!vL{$6oc)erKITum^7?AyXc}KbVHj`kr zOOh_p_}j?PKldUfo26rRmyMivy^rA*M3yu7iHB-3<|>#Z`>g{vW%4s`+jMyy*tnHb z>xaZT>W2usYVGORx*2m6!Y#601=j3~;86jt|9+q|?W{f-iURAUZVx@r24cOmeTm!7 zAH^9lzH5Z^Zu>o#w9w{#$M&onbQx*jBf3o>*jQ8^;8lNr0nX|zP0i)EMa&cMZ9Ued z4W?_^cH03x$WIF5w%G(335!chX3;4f4Dw@L@?v6+O?oWyuX+{gi2rzAJ+BzeIUB(b z{h{6qr^=3fpB;F|%M$Us{MBY2P=z7aAi6SCG9;4N_XK7~_z)5H(}e@SGVrJt|2Rzg zhmxvU8F|la&6i0uF+_e=WC~>Dg(NYj{~CH^z?tlMa->G*k8s1jb0ZW(EJa0QL;^^5 zbSZesJtkWnv#{qw4jYw*xA{B&1AL#BDBb%DJpt#mPLUtSI}wjOy5RAhSPMVEK4r)) zvE>?8P^dqYGnkKrDaSwu;W~AwLLPaVgd{OZ9*h7Zw$)M$bo0PS6Qs2?>YEV8ANKH> zV6;vULuTZe%`eO(;waxy^h^2K&pnc=x~q7vH?|B?d@17*fmKr9PYRT!Jt|!PEvt`5 z%e*54;zVBj9j*^qU;OGz`D_+}Rb47oOAI44r3f=r|F9k`b)SBl*=CPKXux5mcNc{SD&XS}%BPYL3CJv7Q$X_6y0B6GRh4 zX|cVDN|!l_VSAJ0!lm?0rb9e@^t;z-#h1?hyZ_^Bnw+tim<-c z=Ug4l?<;O8&of9nHrw@7GbF#oR)40Z6VN~knK?-Mq z#BKAwGu}=G8qv=8PZse9$;jKw6ha3xcHy0bu)VfQxYpS}G90N2fyMYzfu!brA|KDV zM^QkT4ne%l-o!jibq*5`=8bok0hq0*1Olwg0-k_OY_ICK#o2hr>0&Ytk;!%S9M^<- zKa(C^2)Tx@Z+ZCw>@Q6bBgpQA_VAIzIYV){mGl>%okB|&1VdRCF0i@-okfr!D$G{< zvIwO~D}uiS(_cln#&kO3=oQ}1nU&3m*;z5;;ZYH(0B65tXsOt*U8=4z&Bo!)2%2)0 zR%NW?zAvoC4-|i$76p>4NwCc_m6G?}u!g%=4f@yO_~9oIPIrhA(|bK!dmid z(ayEa0>&8OS`U(2hAB_#S&zgZa*_#y`7M^aqEXMBod7e02huCCkTpbae;V@bP|24B z1Epi}ReqL2O<)LSJwzz-mJpDSiGev95Qx@6=$KO4$fSv)Kxjb=*G#h@kCP|86yT1F zdb`8-W96XN#2B_*1E*TvL&}#M1HuO4p}0(7RBxSAm+)joky79siF+_+{;C7%T=6Q! z%x-*!!G4l0hhtNLW339VyeS2vE{+Nt0+CwES3x3|H;4XG(!YaEtN4{5zLc>Qej zHG?xKx~4eqO7}wua}$vM+tfdvH+C6LzP;~(GO{(yy4W-B#Bk~2_{0uCh*0IJp%-y$ zA>WQ5{)vc=(JjO38daMC<~Rpnp*zb2>e8O6Xu9(BD!j9p@g3NyPxk1myXhgZ7J1h zxUBCyT&R2YJ3p3sXS8dwT(dv!tkxoCgT2)$JzAN4ED{BprOo|uxjg?uW5UH5rk%M1&TH`Y?9lJ#Wwv8O;KKAuIo1<$gAK6SVwA zttG%Y%vOyV&5CDdKv3!|cPn=nEZq-yiisvdyJ=o-(}IC82LDtA9R9QGUaP@vZvT@erRv0 zO($m^S1{{)%R)`g$@wPm-&7KVKI3hxh4~kfMO~>A(Tb7Z4aS z-1VKyM{AgKbco}W!`AF+A64&$^F5K>AqU)o98b9uZc{E1_imnX*wpa|sxr4zgRQsY zZVbTRql`BmD{Qrz8v(nRkzNk7>|Ls>j#(l?SGqHtAFmU!vT}qoi_57CGG@s62jV-U zkN(1ga&Pe0C&8|!MlHLx+rN?4o9u5g`5Wq#ri&5!ItI@n)oKDjS||2_eXKeE^kD&q zG}~Z8TCP&AiJeBW#IklcrU-gU>nUX3Wf5;Z^T{>`zOmial-Ji;WFQK@bWV3pKS|V5 zml*z4>1;XT+sp>QTMs2RngO+mXrPf=lL73g;#K_KvFokVv_O%-HC+p-4nKUkE>EC> z63RG7S6JJbA%f{!@@>g_F2xi}T*fXAtY6V9F4{CQ1 z_0w8tf*_50kMK33KAUef}4P;WF8sutU>No3Vd zozBFKhRnKJH8?3JG4-~dosUe-8E#r8xd(ghTZ_mxfA0Kx@nMGQFg_ulQ{iLx&A<(k z@14j5k?Mw3tHQ0U?4Jz!#_tD)^miI5HF8T$KzNX*fKuW<0~hu;{PW#L=NTrE*-awX z2SofFMvjY9F|8{-EHLnOMd5w9rq}iO{fP;rXb2i|+S+g}{mWo7aGKgiCg8#_bJ{cq zmJd|O+%9+~h3pE|MD`K4sgpc~2iAMieJbE~PS@Dyt!l1kGq6vE!d(-*0*91P)pJ62 z>Ljfw%^^w+f3DLp_PXepJ9oV&bENJbs8=B;ck$2I0U)F2>t&U)2!@|$9u@B=6( z&`00RRtQ>oWWiXqv3J4PH-YAW#S&X)(XSFNRKAc959lLQtP=u|$m>B8*Ly z5_(B(uqZ1EnR6on8YwzK&h!_HPM7AyQbZw_+#Ttgi7jQbV`w>O;A*PyH57U_(2`%vPw zXyZ`qOo(vHCdVIYClG<{$`Bc&#;t}x9@GQ5Sl5{(E_LU8cQr+oZ;D$^+_jAvhq_U^IVNeuA@RvjB{CmU}-w0wdJOO=x+4m*z&%re7kZ1^f!{UkSU zdgTIQZ1gDn;k#|WS{uAlDUMU8NjgdI;~r=%w(Sc=J-hQ3UqcTEu4;j$u+}uGCW@K3i$lIs204`d60yUkHLX{1+=fy+W%cWtQE&}Cz zu{F}h%#CoT@pY=Qo`*CSyk%}h!nddBJ`DtN=Z+qOTQ-~ypbB{|BWTh&)T4ob=cYW; zBIHEuu`y?0H51TguN;dWwYiU4eISmaf+M#Dji5u!Cr~|@c&?Tuh2xfRIa$-Bb~RQ@ zi)JpZshDbuJ%@^>u6)^Xh}dF9J&&3RStJ)CI2nS=pGo$vk=?T!qw9?H^F~RdF*I|* z_^Loz2kzHRA(G}8nCevr1P%Ikpk7%dbj!?8MguS3#dA;0Cqt0U>Sk}tSGwggv_TG4 zj%haxK?6Enag2AMY8YmN7YGSx&UJQb6Ok}*shJ(smsj(lh~~x&UBc zubnTx)Dn|EL^?8kSwY+0j8~S45Q%+bsq;aFBacZE>2Z)r?hPJvVv50aMw{yt0gKUa z5uEEoyknuKQgX8LrU;m9`n*z*tW%cK#Fza3D(n%AWlpYG!rB!j}!wiOL749I}BCNHZQ}I1+l0R)`A2DP{+*3J#_n2h~{!06NUqQJ%3sh zT-hZc4X{t^iaIwU-Wi`KS<=8zLt4ekFDdmd;0}&yXm!RIGGI5-o$0%Yb;-*s_-Zz8 z!n!iXCAkY804fP@=RlJgIa*~>fSjrORi$}(Bk>dyfIOOPD!MFlqL*xugW9Pjym)BA z8j}QXK)RiHSC9eRpEO25VhE=rs3N&j8_5{hV`{P#fNsRn3aS9#H8%L89A7&%gw^P(fzfKGYP`jX{BB zKCG_wc;QW2B2&z6p!6TD5pB)9VcqoLeIt4`x;Da6Fh4NsL2G+<)>beB=j%3Qsaf(n3y!KdGysjfvGeMr8F3j^t1<%&6*0pcnv(jKo~4kcfH^dsQVP zKw=sSX40s5W=;Lxs{b z=*pWDvkD|67?jPZ=V7*J2I1zlZ4bVnzoD*#U=fnsrSp(z)_Dsj)1y{?nuu6NB`~ga zYED8KeVC6&Ta+P`48qR7$$a0K2bhVIqn+fD4uzmOUt1i=>lk?Y;+1=n8J8 z5=k6vpt#uJ4Et2WF^*8T4Z=)SPLC;wO-yR9@=Iu3yqWml4!C6B#M*33h4uG)UKGVZkZv1MiLI7^q?A8T=TW* z#7h|nROi2c6gVS_H+5~)91L%dy*_Vb&_V;VY`|b=+;^c{L?$ySO&+1Fj?v29EI6V3v#s>i@{l!`J%!XTs=1f=8plwWCtQN9rI4I0Ru6tv*>qCtsR<(Er1HPpV*Bj6d-XH|G z9J9<_qxARjitZm1kY;oyGD&E|VB{Jl{LHwD)?fCL*>R1iF-qpplwu{-zwzFH&BTm$ z^4q|eGZWQvK&7psc?)V{9U~eBF})VuOzLKkXbN$!6gx)5^ND24YH%Ka#sv_pMTwI- z-)*zfmXyoQHx}`hbW9eeW3k^DQr@5=Ri1MeBOa@#&o`ZS?)!yDumyBhU zY9lxVo#|(uT@92{!HL-Bo>-wV&2Yt;`Jh#UyeT|n#$QOz zJe~g3%*h3X#J14L*pqO>eX2?+GGU8GeJZ{&Rf^JCE>*Fzrum?V;{?Yv4XCRPS=$tI zXi`a}k+ka>$T>YaS1wdZ7>J_hAmgoE$r{R$5XGE#=soBzP}_3DvbvB7Am{Y^&~ELb z8XT7p7bGtIX^{kQ+5+TrfIasUT&P!dVeaC01Fs}ei+#R2UnNbXhnxK>>QeuvEn z^2)C;WJA8Lgr8wUmf3mHL==YCxA5szM38Yf<0n}MNQ~{1=7VpGTAl*d-zj!3VYrOB zA#xMjK7Fe_bS2b_ZtCpmmj3{R5x&*e7cv;q$Q;KB$s@U?UQo#5)dX`%w==QFY*1bc zb3NN6G6w;62W(Q(<}m!I{YxPY>iUm=wRMjWYiT*{5Ds-kCQjsz_0i8KJJb~KmSfO^ z+|WY+#LX)c%h^JA_S&f%iQl7he zR)xm)&&>g2U{xgVnhIRc!_^)FtmsujGU^Pc>Y!NL#T09yn5&(M>yzz8hTcF#45MN} zWA^@Z_06rLb7e^Fpat}JP^MIGFM43l?UZTe6Ytaqs-%^=iMbM*8b zXwgkQ=!WL!M63|Q6o3UM0{{{2w)Kx4&oU`)B0^Pt1%3Kc0dKhxv@vOrkV1F^ss0oM zmKkl~G05SIISvDFuem414%_97@U_`ER?NYAFvm(TettoSVpbvCpQH`*`p_L$S&{_& zAlqlZ-h!9RlU?fJ(dds}V?*73^S2s;MO;;%VI;!#C&8UrExxX0^2IiR(V zp>`dqNoHPp=HD6{?~rRrwLx|+ZQ=p(f;z@?-%SchNaifUG`WAs*+Ts!eeriL~H4oMTQwA9^I7s8SfNN)OHscJ|E<$XpI;MH)%V^#wS|9w-?@ z8y1A2XzpQcBz{Rc4e4vkXO2j>S74ZLd#taa6gqIw)aZzgQ|5T ze}w^>R=ANC;ddJX4gfTyuuSE>1WKnYpzWrO6M)SO{bd*j{dA{dQ5;^Qs#Wy6e(>`_ zpJI}<>8KyV%LOMO{mnk{&m_G0io_(wk^tKkKQ6)*Hlrnp*Y>L{n6dd+QsrEco~I|J z0B&NHX22s%pPw65m7-ZGE0qxk+N~tQWC9@oBxIIDWK%uN_1YCfJmkVGoiU5ma zBRN#mgVomtgQ8jC0uosikjmQ?`+28rp%f1n$n%k)Euj}USj~43ygm{9BHgMRwx7NBOP|>Oo7at8I+A4gSI!TOvx461zlP3(twQwT)4?qB&j0} z-h;~~X&8g*Pyx>4ZT71X9)Qtn8V(yiPjOahV@br4Y6ZYux_eLv#Mb3BIb|6nfs^*d zR}9I|59!Y6M?prABOgTCB+6pOgD#UWjhHz-{7_e#OXL+$ zNyblKH3>TzWPoRJvBgR&>B_>g1SceswN!$$t7{l+oOd475G6XI1%c9ki1S@QB|c%K zh#V_-@m)aMK{K!5NwDAF?L>;m#+=XzBX4?$rBI5>Fz0Ol0F7yL(A$TH1&InzVL*y2 zDsy!l>ctVI&)^1SCw0ejRYi5knYJf2%2n7rZZZke?LlbnTt@n`S$@3s_MMex* z-xWyWGrO=EVshYjstBE~33uGkFoHED(U`6>MOj#oBruYT(R@)MgV5zhybP9J)B!nI zjGV)xIG{JCM0XFR_Qrf>iqb|T5a~by4_c12oH4uUl$}Az1$)(vtjjo!SOpv8&{yA!ah!Xlle zF+PnrKE3isik6vO1X6+#8yj_6pOXe7E^{t@KWYhAnbVmpd16RL9V+bdG(#mxD}piB zi3Yf0#1jo*ok-q&s;KCW9_~Fd5s{CkfRdOwG0M#%%hMp$V@MHSnCu1*UiB?3Zo1XO zH_`y;4JSOzd63*Xt79U8wUoSE&;U_|1KO#}Lnk%BR3jedmX0Y+vdkscPC!B1-mM!< z+C-r1-Erd-4Z3=HArwMRrP4P7sElNdqPLeBYyzys$fb*kU+kwm&KAeuEni(vSh{J99m3o@2@)ejSRL6{g?^l6L(<#%`fXq7z4vmn- zC#}^^29`TjR4i8SXigetz+v0H5>}2Wo&+LgEx)$)*H))cNTvndeGU7g#RNH)+Sm(b zXvT3V2WI~Ol{F$y%7o6~A@xRsr70cRdvR|jG=YLVpC-8(qYRe}b*9mi*idrEp(G2r zZ0_6dR7^(~A(vAz2kqq1qn;~+_~d9pG6$OJz>WEl4{Ub;_n??0VU>_M1mPP$*0g`T zAz2bQ?c-9h1uTn#8$Cz8AuNK<1xTMUxXTvviVeNMdl91?)39OvC<%7|05Lkv8uA&w zk=RupLoAIHEtM;hM?%A;P?F*=OBc?9hq>}8xNV=1WkpqJy+ucH-hizF^C7iqcPXjz zc2kY1W<@O|lM+N9NymQl3zsspddQTNB#+(g`~_%FB1vRphV69#3y$qUop6bGvD^hZ zWKoT`rzgHoma~uLkUfIwXCu_(B=!Aj@wL{trF8r$BMGGTK1D3?>w(%cmqW{N-E-D} ztp${8%xBXxk4KT3R#@0hvjU2P8z|iQqe*PV)_GY+FB`Q&MDVyQx{L0k+JH>gQo+hl zyta1919~~(iuO7oWn-%@Pf?n7*4yQ6+TfsK#O?1%B0S8(6(U28hfIT==mRyA+5od< z(k1l~)K#n^iS5xNjLWU?!2bYRJ>2rmJb{iSxoi|2Fk`6T>a8 zwzrdyku5+iQ-8bCOJjWsoehn z%7LRdGIJD~RV~IrZ_Ppx!phEM%Y%(a!T$gXN(g0@^;|^mDhB@m z@ecy5@ky1QS7IexfO=y+s0bfGj!5?cHr4+C2<|GfEM_>~**!?wuz!sj-Uw8p%FT=r z?7auIO|gb~0u{_zIZ@aS^aeM&b1wHg>= zibxe5!D7lt>JM+ytg*&zaThK%PN4q)g$7ixEb8+R$|4_zzq&oBR~G!Veov&h&*|Q^ zLd6kSv#CJIC#!o7r@kAp^(JohUDvn0*wbK5`krsK9-es#J?uOlv9huVP)C6LD?sig{Y5Q0A9 zrZa0K2@A%?QZuP`6-eQlTa(HBm)XCWz|=$TG@>WGC?m=oiVLPT{U@EpahH z$tMFD)I8N~k{RPLv1O19qh}lVqsbD(5eT{n8ZzHYDW%FuKM_gR+ewNdLJy|^b)aTx zi=rqE62=rTJqaCg_ok$~x0`}pS!h*`c}Wf38)J{Op*Q+s-w$g zE5?rT$l26<{`Cc|Enk|F6(&&I4;yr%O>ragg0K6m>QmD>>S~tv4$NUQ1lGK+Z|=8T z(4U`ibn{%eQ;?`vql{1*(#<4}WQuUNJjEq&a6$1*UChwF7D5b45Rug9ZnOlvTY1bX zxMICL?Ok_zsEQ>oreUNi^aOV_7J}cOJX}6T`FQ92$LT%y#WeQuBg@0#V(d>KjO+m4 zRRMy{WoWU;B$ebbI6ssDkklKQq=lLlkOzuf8Gm^D`%qqM7#1@Hv$0m&UvMZtEgTl) z{NAYbui`Kzv)~$Ld%5AwxWbol%XH{A>q0j%!x1{Clhg`y(p$OyGyvyaxi<3U2VA#x z?My}#W1Dx$jY8pt78azVv-Ca@= z@y8ft(bSCMv903e>y)k~E3i?~{{ZPs@Y|{FZLZ{wWOcoSLc0F|DGUzPp~9`F2oR3u(PZay>)0PV^EaSsLUk5j?u}H*KgJx@ftE-Z|Dt#AV|of4e>h z^`)NDM-n4S5=H=pBWD=sDz`T=>k^pJ9lsD2>e$-`nR}m`k~vB$ml!&F3IVOe(4f+) zser6V=tV(%Z<0d^|0toF37P+x})^UPW`jmq2XAk(`x!m>dhJ-%6;lychU0KaJkomfz?=k zVeOshDJc-Ls5(e({C=MF-O|byIo+cU>xA2X{x5o^z_PrBB8{}m7z8=!Tg?ImX;`$4 zt%t+Y|-ms>u#;ahG1?Q+CXa6w8Rp zlp%Fc84Ome4wl1q9qCEKrNX;LR53XFcB3qhBJ*9a>M##%Qx`lw<)KHAu@Zn(`NroI z3A;BjVpu!myKdO~(C%cFh~`K`D8^Tz`}w9NmPCvq4O&SgAFCcH9Kx}xruv5C{7na2 z6f!ARSioE!TH_}*(Z@9ULrO9OAJa8=kL4o}jF*t)73?T>i#e8Pl{buP*Zvd|Vz-%E z*FT7)Hmc;bo;j9O2U7hY9Ax*RMB-U;iik+YqulltL(FF}JjG?zq!HwHpo@_#p`}FS zw|!mx>1g4OU-xri^x&U*dT8a+VYz&=Hj|#Y@l-ZwEgE>_L}hRHeaN8Vt<8-{T@0ig zd7v`M5X7sHje(B@)JS2`=iAI3d{>K z2G&np?@=5)#xBZZ)43zLqKOrrIMG{CY<1v??F|Ntny+%K?|RHtI29* zVSkpi zFaUv7W-;JIPAS+LE?9P7S3{FPJ9~F18<~dhSD$nk&&ENVj$x%aEN5*-fNe%Pfy5gf;(Ab1_>){-*F+|&xd_dUGO8U0#EG)8mlkIhRb zD6r>XgQ-@7aBbs|vPj2P`-&;KY-yPlmuwz}g5jiC!#N7Fw^|}9!i>gIAkODKzvW6> zHN>s>&Z``GI(nKox?in;Ra2w@Jom)`mxx=Wgosq^0-XNC9Fpp45q8;CYVIX`MDq^pU7jKBv~{OSX0U13ggrB>p{maI<3JJ?W!cW0P&GJ3g#(Qs z@2?%`aLmw$B24Na3{+g-Gg%>%4!Hm>F;t1sUrv$$8i>Ym-hz{}YACJhY?D?HyTGm- zhf$BPtMcv*!m_H7E(ksWqDt#;aPcAMMm;Rc(`o=UyrMKijN?;#`&C&ZhD3%|Xyjk3 z-k1)T2qhRQPBuH@x<^!AW|eItEKcTvg58w!6^4fG?^U>p%cPO6jEM%t&q6#(VV;L-1=iVNyqlUp60VZx^0qsD=oc0#Z6dIc$S?*4 z8O)O026Z~(PUT5MGPLRkG?L>GYxJVi$VBxedADiBy zNRjNhg$~V!ue>GiN6>TXNXw#T04jXSNoqKV2E zOgQp!){%l))cdywGVz$v-U$WQNryJb0qt-df#CvPo|k(m!~O zHJ!t*jfKIN4B;4ng?2x!JUE1r!pIQ(by+xGv=HczM>j5MxRl1+jft#k2U9`&BW>y#j>DrS? zFJTQ7z@TrW;3=c)c3N`}SH!9*l3uCCDaK-ZBW|Xc2^atx1 z-xNvjo0QrWQ9wBJgGUzMJHrz42^Ch^umqbGB=-lXIHaPvf|HK2%K6hc2#Ak@=|! zza1h&A1Ra$X*3GPUSP=zF;o z8g!~3l{(o&VTk^{=n%P-vZ@v#6y-_j`uL&?g%QaRNZnr;(sTFTt!-d;h1uP;W3rZD2b3z{Hr54K!5n}a3=t&Q-2R*1)vI%4p z%cV@-gaGFsr3Mi-y}@^7#BrRiuAij?R{*?eAQ}=E9;|ga=~ldqT+D?WZWNK=nhZ|Y zHcMkXixynIv)_D*g}Dm>j~|MSg4o@Z9gPE!x#2~mnHBJGKjF`Mu;KFY2@R!#0#D+y zw^wpRzFGTP5)7>&!Eg3L!Re`*!&wV%wyks*l`o@SRU3G?k%UzTW=IM~KgLzxN4BgFto ziWs7fX!(*L8Z;cKAH7DmzPRSeb#&S}+XWw-QQjD*fs;@Lf7~92K0k#fET#<=yU(Ot zrns4g0N)>_1CU;=vd3`k08KAFmDEtWD{INn7r^sO#)_7*iBzj52q%2+)EbDLmi+;I>l%-Ij}!>r zBfYdSOo};MhwcSUrOdW-#WJ^{fdm1Z{ix7N>n!oHGC*=0)0`g}px!9ESfs%~t(8zP z6oa<(7d-1Lp?xjCItiGm9oHwX^P};LR9!awJ&97HP+RKB>r2Nx_5qeck}RNtrL)(+ zwkxBI9VL~?QL|+3J7R*t8s->Vl$`B2JCJi#w^dnI>T9NP52;fFcKzSA9pp&_F^J4W zC;QIddaokjMGSe2$sXY6Vto71AlF)nc86lwUt!-BAZd+_#AoH0<}>+}`9G~#z-5<5 z)g*tEclM^lCf>_<4Evs7&(IH#Gzk)0Wr}MLoib zSlP@@tx|m?ja&Otiz_NS%@KHk&_U`mzGxXo5wqFFAmoGvf8yG#mx(-L6n4x<=S(vIuHrk^^1Q6S4**(vS&$XFdOj8r2xjCog zl^P-vKcV@FgOW}$L0gzP&JYqEMpxYUrQ}IkJ!q_|&Z8ajJo{CyXPPX#WMkAA^%O{^ znn{vOsY4rY``_h3A1xc&#wBGQv#67e^FW@+w66*=<^>WUA5a~tW0j>OAjCkBN$hCy z$gF-|Q@BtI1A;*5y#mt6_c6sI$ja(6;f4tN)uFd#-Z+erw(XiDq)|lCv-1lL{{Z3r zd(f4pjs=xHORGRR-}q2gMuuHcSf&g83VR;^0M3GAk|H&c9E0i^*wx!aMcUl@++-;@ zq2@LKN~96g6VpEQ2eg$Xgg2MY8V)0Smi6B>+{9Ys@J!>NggWmPTLtpw^L zF*(i#rS~;hr4Zst23A9YKX=UuoV1b2qjU#s?^br^;t-?Nl6RnKWVHN7m9ZKzEAA*M zBo4f_2S#;ffVRhxgCHYO1xf8zYi3xZSY*^AIXiV8C^@i>r#qRY-^hxH9IGje>x9e2Y`V=z&2pMcp z9!826P!#Dqdx{D4rl;4A)zmX}6PTlIxq}RG(z=6I{{UII(&sOZqoqWug@Tnj#yvx& zax{-1XTU}3e9;V3=tCQWlbjj~sA(Tg$&WO`2?!*hINFp_=4Os$CA$$$%2qP40O6TI z$TR~|%*5ym`qf`XhgkIqbJw*LDy(nn8Aa|p)fCAptC;km>*UZgZx$g^>>ZhcliII$ z41Azfv7xsIzuJX7vBqOal&o0=z34YrD{(sV6^LG&^q^|ULnYL#bs*(*Vc!OnxYcff zo-pD?0Qsh_E}?jm6B{l!X3pl7mQ<2eU0VTxngJpPjEPquF4ZdtW`1iB0`&Hx+!h)^ zO(Y%FPipI;N#{lZ3O!0kY6>j6rYuziWpItCc<)p)L?*%<`bi}8&NiiGfMb)RNOP!B zWwm1xMC_qPK9RixQOr`;EIo4UPi#{*_m=U;BfGhg7ndJjJ!mmTtAuh4Kn!Ee5JeC# zuXM)6MhyX22Y7@suu?r^eT7#`o@9|RA-yMRUDDtxJQFsOcLzBlnu1A6D#k`L>I1z4 zLm>dvFsZhD;+KpTT}8cH~o8%5m12 zhDljOQlZpxaC=b}c%co|L>j;H*n6G$G+7UyY5_F>$LCpF}^VTDDNPDwlBjU}4g z>x-HT$l6AHQj#jPE`Vxy)S%Cr10jT>#d5`2zK0&(X_(BaRp3z~MnEWgpEMLsWMHxl zO^k!kJ2W#NQ3U6{D$++i&*M&ccAB** zQ-Z_Zi6g>hMUgS3bf8_fJ(o z9DTe{?b_B>WV>a3@%Uo|AAd9oIctd}U<72GAgGm+OMtRD)u%}#r%ED0i5>DENy2CI z3Z!szL5)TT$tHjoZK_0jVn9KLKfDD+-IGEfy)Bix7)bvBcfM(rj0FjSsD7Pmni(Db zYH5K>Z^B5&k>mK#EQ;=?&7mYa=Q#GRvoNzgqn}thP*euIx4Py$kNBJI^nv3Qc~&W} zMa*%C)3d zv0^vPK9&~K+KE(yIoP1+KS~V5b!&$wc>w?)J5cX!ktS%{UYqH znE}QrIp!uIAc2{dMJ#m<@!Eh~h*1fOMgtjb`_#0t2v#V7!CZ#$-RYAAlgPY*9DKl7 zt1fen)e%}k>|=NtQ>f2PedrN_dx(`>4RR}$Do1ZMqGIG2uAPLF5wb7Sr@Y9Yi%60na_-u22H^Wp4pDR&@1anU0vDH!l%Cl4qv{deMz-h`WK77w zV+R}4=?Y$1T%tYxn0LB;_s_I2iQ%{p(&!_+^p{s6#r&npH{klb;n>?DgU`cl zeI2~|4z`iPV4q1HI(Dl?WX&4NQ$ZS$m%iI)9xKty!|f+%nR(q|^vuvsHx>mAUY#WF#=l1T-| znCDH0i8=xNsc0EOV(!`Fk~L(=BP1U54y4I}0?OJ#=>vVU-j}{hh}1KMbQnR7t&a73 zgKNXC72>!T0$A6eIbFNZ?<4|P#McVZg$;DGfz$9i*xTg7v9 zVukcv%jqiHD}JYHTK40Y^0#p^X?>YgbQtMMz1+v=8A&uRMIBer5e&aNZj~~jsaw(0bjOmqsDDP1t$zyS6Fq26N z0de%W+hRTF10Ae-qhz=QXxSL#f(K7(BAI5l3b~F(ZGl}QJ@HnThqT;cP9}aokm#GRd1+q#UeFq&V7F$?SeuE1Bvd1|4RqhrG_FJ%mF(%s{y}!bm{{Uyb zHvBt>MiOBbsf@p!vUUfxFB3-dL2JtqDk&JoPhHN`3SC&Fv&Pf6rZM!P-1yEbl*ZSr z8b8I*0uL*S(7dxwa`Fyt6-vI1wP)$25-=tbHlp_XKksDi*Tn@Bh%KeNh+z*gVx$qB zE84DR&8!Y^(nska_xsgn32k;|XvtXyqk-O!X9c9ICB(X!B^g!lXe}#ACA-2=GRF*b z{X2H3x4n=w1dJHYhE^Sqx$9S*Wr}#$)Q%VM*E=|+lnJ~zO{W`n*SD>}LvXzJiNiLZ037~k5HzO!_V;`wO6 zib7SC0Dj+U1SPVb?p7*-H&M3OLVb@)q_&T7YY2H&ObwX>Fxh+x)qQi?+q{yMvSWsg zP8m)+c9OLxQ`x|SQ z+(D_7>KM|%?oBmfk*hLg+B^`WJsZ7nbhIpzuBj9a$?HI$;lUhPKtm5v3)9}TrGk4) z$fJ$pa|D==g1=8q z`d1u_W_Zdpig4<2vi3Bnl2H*zBak>GdYfP~y&f2B=G7BTpcM>QY)KpM=73W10UK!L zYDpJC2lC^+204CdkidZ^MDO2vgiCqmadRPfh{8H;zk^H6q(p#*=K-KH9A<#%Tr!;m zf^#5K)YUF#<*3kc83d45Bp%ews3PJiVH#B(OiC>ytmo@}O9F(C*}psl(6DKUk zcPGsN)8-ymP}oM;WNaykUN*N)IcZ!KbpvenG>`jGKskkyHP2no-isNK!ekCn^P~`Z z&?zi3ECS^pBxGmp)K&S+w#&_3EW;rA6jJdbEH0(Yr|DgQBfSPeB+@h>7D&|KpUXiI zO0A{^Ob@8H-kn8grcn!qF&v!qq+o!g0z__ReFy9+tusKnN&1fC9cVOfA6h(DE9xq6 zMN~L-(x4)R+kTWZTe*PL0Sq?tL$b`0N>w!ofy$or1VoM) zb!U19%^9v)T0J0v*S~>Rl0}eZR0oe?j-SGUq1H9#2Gk<@hwoHIOBckNBbl-RKTQ%9 zg&-iOsYj@Pm0Lx;;0->lI3V;Ufmvii!KEi#f)uYoM-10&B6xriD9QcdP|>kyT&!X> zZ7eafZ%}!Nm`MR;a@h9IN&&UHi+LnQ(gR=)_{|RtBlXMZ$ER)R(dZL1NDi4s$(_Nf z7{-QY$pc_I>7Y(GbXgH*Cs@+J)uKt`Mbbf2wh7#Qs`4xaxx*kf#~x^k5>T;_M2d5f zwFN}xMZ&}kpb!Z0-!!v2Wt0=?19AN5=`>Kt@`Y&oGLEK$I};e>A}DfCPT%K1)DlHC zsxK%Eo}_?Cu4vv6(Sw9N#Va0$gfoqiKwQxh>Nydk<`SxO@8W<-8^l^Ua?wll`C31x zm&$XHzI)de=O$J`q#m_oSf>mmVXSPTgRXL6WGK2YF^o_mHuAJ)LK0oc4J*uiR6V@rEvOA-YcjYgqddKx|LSx?@ipYmtcXFV;f_> z)d3$$xlC!?nrdEKD1~DT8w9lg5KkOQF&no`_p2M=@&<5qF4O}H302Fkg*XE=*U1B@ z~X zHkdwD2KnhiLOm`rqw^6>%2HNn?qlVJ7#OP06ke8%^r>8G^NIx}k5J~vBeIk9nmdIO z!RS(qG5X@FLpypGOKcax_oHhG&5s(_0BRqVv%LV~iZzg^LV`h~2kP@faux=TDV(-) z>fJ}RF&E;L+woN7nux64<=@AEvINmobO{ zbY(FH&W!ZMaW5+tR12NTuZjXbl`JDU+-DsD&st=>Jk2srat1(NhJdi$NK#nTD!x=_ zwgpYQ8+e(tYAv$nfu3^C$Q`tZ*El`*prDQ^1kq(!o}OxBWMtCg4hO64N}@`drknlJ zpGfaPqjNOZP{jby3X;d$zG+zGk}~BcZAYsPl|yjzMp^@sKcxl(AP*d;2Rgq)K+{jo zWfDeN$j1Gs+@}$SkdR$=ZxkrirIbe@(4cAuxuZOa(MisIDyQ(rnhMK3-w&10N?Fl$ z0w`88x-2LmWS>~Kd>~>tAaZ?)+V5BDHixc1jK-BO+vP*px$aW`Af!I=p z7nVh6l~YmMK7Y!ZM2!_xupz*3e3RO(EKH8qa{`h^x=}|$Xagi~1nl!K;>Ow1I}h@! zNoyQS6pG=5id}&F)ouR(2b9kck=Xt-5_k5alMZz`NJ%(U2l;P6xQUc4!D7)d!XWA@ zle^oFVPf&IILAtb8$G?F=O<9M0JOnP?eYd+-jZi3GO0VkEO2%IM|3!N?d zq#fueEYY1zucX2Nb*JoA<+=O}q)6E$_{CV}>P~B@dE`106(3z_NoB;iOFTS6(P8FA zrXX{jdE9>r1aDmhl#U|?TobqE9`rx%*q%Wgc=|b#_*0FjHr&Y)SOe39Svsk&KLRPwlD$~Iri?^TPLX%V<-43^roBCy;( za1YxxYpb16Fj*rLI03iV&`MkU+z69g2IfQxf!D7}JeEjqV3IkCNP|db=w$8usXIv; z3t24@l(Kqt9-DZgHR8Mo(xEI)Ks{7YUQ2^-Adt_@k(?LDO{+tQTw2}RHN-2Bp;R9^ z8{_q?@*wQXlc)Dby*3`*tqZIus-y)OJDLmdi%9gWv3d)f1KeY+8urpMvyNo4 zuCmB>!q;&*K2jTGgrrh|%)*o0R84Sht(xiMAyGkN>`ny&5l+G4i%H+|{^+I&)fkK* z)~a&d@t%}ern56SEtZNLgO075UK^u;(kKWYM2*~LfvbKaXC1o4$a#Umib3b!+ckL5 zqRV*MW_Bu7evnDr{{W3=*hnvAmdRK8I=N)KP^2IYtB=QBgRlr~P0Nj5{nI}ug5fKt~MZ|=X7dh@lW?xSB z^ThVDw15K~nnqO#=ri`9(y_K@cW1P0NrB7TR*vWGijFgD91*iyT920lOm4kF9YuA; z<&+i{=puP1B(#za=RT2H&2==*CCsvz%bhP7=rCvrYPNFl#)jaiIP~MHRV>6Yv~aVq ziH7W*hl5>iEasPz)07P5UA!8*NU$ry(P@yj2|rU0dIL(Y7)v*YB(Mai-v_y_A~~*B zNg$BMaG@6&%I&M{IsrW)!rs+xuU2F?SrN=Im>zMlpviSN6St0MO)m>*1EAdXGzEy} z@!OcCz+ldLk%Nj)hoonmrdC#OqCwCfdUat(nFP}dauk89Z0+0cO3xg2_>ux%K%isy zk9q^dCn$EEpTg=}xam_7=LtA=gKP_GG9TUcrM_$^L`DiO zG?#D6+w}X^)yl$Wdplt?DH7vMiUvLY)C)~~B$nNd9%~XBKls4yM&i$9cW))cP%J#R zO;N8=k(2sXPPSK;H*R7gogeX4qB22fS)e2~My>KF9lVY^RgK}pu8_!kjE(;Qg-)w` z$At~UF5ACuo#+;j^Cf96X40hqLOKn*(3veQ5Y04_I!MKNiZ;OMiUzJNu5Dv=h_B15 z?pwdOs^4B+T*_OBUaLyO5o*p9Q6jsxdsfrT{{UxQUa$S2>5uWC&lRlkMK29J%|l8f zes6jLn8{+qWTND#=-%`@d0trLjzB?_AFw0os^{f_lshz$5VOYH6;tE%rICvkmH>3V zK?ktxXfI_kx1CmGA(W8b4Kg9Jym$Gy@QgrBU9fXhA20Heu~8>2j?J+&h2)Y=_;ImB z2U3IQ#RK;j7WX&POkr2$5BGn(z6D+b8a}vgISA)-{{YQ>>n>S;yR9Yk^BP6^hiWWW zEp=!jX)r?cUCCMk-95Y$wwuP%;N+f}^Fq9qNy^F*6%`aVp8IvKYlC@aVqmWJzLaI^ z!1iwySi}6VnTP`gS$txFf0+4sR$E|FH2T2qe|zLAY&zDaM(NeLwk+Q8*9 zifY8A#1kt%j4#r95kTh*hTh^EiCp@l8o^$Tw@MVZUn?7=QsQyx0eWdYaqmQluBWejo&?@@6X4vQIM%qJ)9OpCU72#H~2)CQo( zBxj`ohmIjzi*9QwZ|x?dtSIu9XlUoRV1j-sy|k*Rr|^!Cd4_n<*MVkrwGWk%h1t3|F=<{0(F7y`q!1jeq;Ad)?aE{D1A zT(Gn=us9BP{{V)I%t)kEo~Bw?kc7*_0C z$MB%sq=gw!Wt8dLx3w^)Vp7-)56n;Xpi5_Z5=+PlSc7K=^ECqT+uS*h1aSJ8XW08u zbgXTnRKmH_tG-6WRi&AwK_$Jog^LDbzSyJL3wh_{t;&MP#mVewGp0zsW0DI6kJYI4 z9@ThRqIqYH^%**Ou4tf+LkO6kQNcf&tcYT`13C2*j}!})5V3WSQ9J7In!M68GpXBF z3hDb*ua}vbq5P+Hpgd0^^Bk2K05@|%SuPmqNn>4d5Engmt4C_H%p&D6Ad`XI)vg3V zl_gy|FjV=fM=J>rRUnKS1@7UwjOhqM1DQRka6p)$fr3hMhq3mpvv%q(EW?PziakVq zTlAz=C5|PCpwNxp$0HF8#P$gO6Kv#FbT>k*W{{Tv)KS@KE$3S~fGSR`M zV^u1$3>^8crQ>q$iy~pQWLHpc>MYm|m2JrX053GaZ4gB9kEoDFHyhS$eART587Ms` zu=~;0S(8$~)IC}4K*TMr*<3D0P7dE{X5En>&ZXuONx|t#tZqoUT{>R?WaIBvYj#-} z!1AlsyNfy)6My&UshI5$Db?rdYLGv-`Rt495 z=dCHGV9JtxYDoPlgex4JsDlX`ZR*kjE+pnIG^WSj6cmLp=UKr`!14`Ta^pspA(fNs z9`q9FC`B0r4bPgiL-3^t!#$MH65d#g0NEObb`@xrAtDH7Do4}^=seQa$mEPXm9k?!`WijWyIa7cT50W)GfOWm)tD*LqaY5n z284#2)JPI~v)zBrmzqLcOy1Pjb6;P+r-qaUFjHD|v0ICk)=X_O*%W%sWJ2UJt{CA}$Vah1P zk6GP!?@tE5s*GTf+9t%GSK4x3O>2$DsHNe+Ui9-f|RlcLQKEDUcMArD-E zM7l#A?=8Vn*a!!|S{=lI$Z}31zylpmdIX8MX>+i^8lOAkwQF$rBx4=JM$#;A>s(_y zP<~cMKQ}B{j~M|dabI0T%ECgcs~t9=Yl%S`^P{yPL2YrNKS{@$63DX5vPURVV=Bts z3w`PM{!;Scrrav9GH1TssY}5GR|eI9(g<~B9S7Wf=qm#M0JBLGl;)`<*&CCZWnpsRkl`GKQdg-psSJt1NxFEJ;!G4lk4$ac`_M#k+;E#|E@e(-;we>A z>Ck#=IQAyI*jXjD3etxP`Skbq&1=arUnrL1N9JQxu1WPV?~fkTy^jskBSM&E)17(k z{{VJ@aRhf##mp@h+8Ftv0@l{=1;Z=1I2@0tC_8QTqDdLB3p*E>YsLsW4{9v7ZYGrI z0fMPf-GdCBy{H0RUhYSMU69AzW3i#yT|*49l9&k?{6c@io;s$HCWWJn!hs0ed<-1W zFQJO!9NNW#HzUmj?d@LR^KBSQmD0c+^?kfzy4@Dueq52r7>o|1KEk5f+IZF{v^>Q88L@O)O|x$4(7;P@?nXXqZWB~z+Ck; zc&UH078J$Sm=Q+nqpx;@9=(_ zrMpWcZY==HZ@*koptx39EM;H9bEbCezvxiDREuPhX60)q4derg07lm~)0rkAmSKhN z0Y249=G6eUxJ@#n`Wv9f-@Rz_XXLChEJYz;LxmfY(>0%bK4MRoX9j0%jj$9CdJ8PD zMLMiZ?~fy5eALS9BZ-(6VJ6+R4>T*V#h{8TDi*`Cj+;|gf+1~e5tzH}oOBz|F=Hg$ z*=Be{M8hmGjAEH<$Wg6g#9PZR(qF zN&MaDHm)QR#d516#9>W7tPMxo$*kDqltP3u>N}EKV%@)$b=e}_zGN1W;m&#hbJyI` z(N7wYq!y7zpgOKc@}OqcS#90!pp({46yt37#c1Af`=#N-hFe;qJGQALKVC`SBgb06 zicN8}Gkpr$!z*B`b?H_~gv}y5$s=GLYNw#-K<5{V#i6u{35#9^CSpz%MmE9w(XHL( zfy4A@Kp8&$MJ*I*%nd3BNh1SRaq1iO?M+2+k;K0<>t@*%dV%ji6q;La#DtVAG8Z3F z@rqXB>PzG;8i?j*$&dGl>00yY4K#Ofm7XK1bdIZz)t@3jmut&XOZt_MY;~YQDaE{? z>X5q;tPzkr;+(u=9m*ZqNLK?Lb3=;W-=0C6Q3Z>h+tJ{PNvBnOxk5_dbzamJwwmgD zo3EGUDlyB%*az-A(Gx|&i)*BjB-O1O8!-FPi}Eg(`rT8eSQQ5&#Zv0#(W2*Bqv|Il zGZUmxJHo9Yj>qSxfHLaR{Kmo4+=1S*q-QcSMK`2AS8Q#afuiP1%X1tOI~5?J?l3#% zh08L;tEppf8HgttD?oL1yOjcSC6#lnbnXDIdx)A+ZNb!yo|)RI8%Y!k#5sW#y$t9w zPoAH8m?C?54yGS2Sg<;B%sso%F>!x7yBFnht|iaz9$& zf_9T2G9B3XrMb$dmO*7){qfp>_frF;&j2!`YDpA&K>Kv5p6xDKq$T8kTABRlc9hz7M{8Q&(l+ znA*r2m&*i{R)-%+2rb16Gn>5fOEQ2LL8xQr`nYG||cibiJ ztsWQo55t>Wp&2{mDcywzhs2){S#ij?ZM>WzEvB?g{KzDH!sjF~>Fr*3irhO*8ngl+ zdLrytRc5uA!*4ac$2qXU;$!r#cg0p&&@i6kB$d*Hj)ZIl1?O2ND{XHRt7+5$UYmA3 z>3Ho}TNg7X{{T`J-22mUT}vy*ksLB1Eg?SL1pqQguRII8pIBjx9|JTLHtJ zPh94xL&pd_G7_=@_@T%16x@K%Yb+7~3EVRO0IjIf#BEHI&Pzw53#4@z_Mp#+qn>R` z8$%L}ZMW3J{&g3U6^t}d>iLs|PAZYh#C) zdU6Ip`%o2(moV}}29-e|9e`iGEh9;65Nq@?)3C|ktqe%CgNb z*#4BPk|+zL7KN8uD-3P!XfHk3a;U0}k))M7e;NhU#^kICD)Uql2**L%g~VhK2%01T z8*%~74mreV(sI*KMhbE>xS%6lCZWt?$cmt88-_h<48@(}8^#wJvAt);`zcidUy#k7^6WJF~NW4mKI;J^uAqx(lIVeViQa`KY?Y(Q=srIxa&W&S_SY z*4H8>-3V3iXbAE{aNRf@+fF_pR<&GxD zFtz~PVt}_d1ywyChT8%3fID43fm|KpTqU_X(xdv!|v!cc5vYA(^BD2mQ-cWgN5@1nQ6+ zHr-7LuHDcF2OpQ(qT$(6JUGGGv)s@?lS8wj>ffQO80U$M>dNe$v$3JvE)&6FnjAR` ztxFXMSpJj{#oY4@W2h{o{{T80=U3Ix=Whq4bn_D1DbnJG$tHmsx2S@B@rnlR?gqd1 zf;4BS8O>f@Gb_laeOU|bN49aw^EM#|<( zT~8>=kLy>ylw9Z$N1kBI`%qz!UlAcDMIM&VTI!i&FgvtoBRE#BpxWX07a~alC}n+$ zw)L#g!_4~iBQE2(_M{+j3y`Un)2~qIO)+UTtCiG;BROgdb7SSSDy4us0(w^orb$&) zfCw1`W~p(?f?{LI4MUP_5g;QBH1Fnt{#=tvVYdOglf5ObINc}LhCM|R&Do0xq-2p- zEs9AwQZvT8p2W~NZh6GY90?ns0~n)TvwXVUD;Cv&!|g~Jh@Bmg2<`__QrE9Nu|k}z zKSTGRZV#6*huU6rY4{D&jodcvw%K%US?sua_F@su0jiyb?o~D6@ zV9^5$FkJ3))}vCCS%%t48SH-bSG$RMP{x=I)oY?)wvwoI(j0Dk=784L@mx4R#XU~{{RdObj1aXP|q<4EfW6#yDB;Z zwNmLU?^lx;Wmx) zn!3d?h?P}rbZs~#XyVAdwnRtL*ijzBj}*+>;E-rx8x2SDngs0~ZY_RanS^J~TdI&~ zkw-PdNn;`O6P;jn$4W)uXrI>E(Z1iCH1(el%9R0PL{X@o!~Ez5oHFHSb1Z?-pkP#B zV?8LaplPKR@2IxL(_p^UWa1iOnQaIt`w`#oNkt$L%^Cv~%D7U!9MDbMwjvOrbZ625 zIl;|E#muVe`ajxKY02LUNyj{q7;WU#Gaz!=>56XPz+`*QZU`!Yyyk%vwu)JqE+=G- z+fgf=mM3BSY0-jbig06VbpR@z*>koHD{(B=`em9l>Tpzb9kWEXnpV0OPQVhm(tegS z3|U`BkiD})Ts)agz|*&LP%iEulsICcl&b)G3Q>xd5ru5KWtTex?N)frqN`n&esIVQ z*Tn{`R}h%Rtn3w^q=VkF?iFpb6SFW9dS+AaUaO-(tn28=#{{VDZvN?T$_su0T&g&h_Ww{an zFn_Hyj})sVqxz#$ktd1@+uK^+PS$I#7IxJrC(^_1R2GpsMP+d=gA0(k=y%8!(OerF zt>QNf+pqam?qY~YCP_rmlezuUf69SkW>;l+aTav2`*bx)gmLC_%5;DcO8SqT>rTYS zE4{Olbz@5!4-~PNn6YvKxX2It!`g#1zBGu{L@OzFByQOl#c^p$^GE|nZp%%c7jm|+ zqDX~tmINIA=rY>rNgd_Jl+*0Z??9#1norA)B1TlXZrS@+D$8Q|N77phw_Ki;p#h30 zqk`aRez-l?+Lv2alHzHu8(73*}nAJ$fn_hOUg#UY;-gc zeQIN6lE`6PD;-B8-kX44T3m+LoX3zyf)7fhVQylUNmyxTSoZtkjXFKG$0$jLSk+=4 zXa{U#O;-0pRY?$HLf=Wv3N)SwCx!GovwBDyl*dXVOC<0~XLBYW=ym<_Kf<9HLfSGJ z3wk8|-M*R&@XLr|xw(|N)`fIw*kc>ycca|Bjno-L2I!wj$E1VX6uSJpF}8>_k+UD( z28NMIV)0xOtpwToD|Vok)|O%A8dV&Ch%uJ${3+0{ac>mE>W}Gf%A*}ftEIWP#3p1` zkTU>t*k}3CZMi1KYY9WB<*wlDkDcfR$T4kmrJs4?iZB2iDLzse=*GxHj+C+kRq?ASp(|(&kQMSgaxz(=$0TFzCw1aDDMk z&vZD0lrSn=+TaM3b?ur1M9nONRni7^$&8MTM9#OJFy#r4U=o#9$*| zFgehq?Tj2$t-~~PnG44&H(d0fHHDNsMcK^%04~r1&J-LRcH7M-#G%w;l%}QBLX7N2 zdw8ZeQzS3*Gu%j4U-yX_#z^U$)wr;=jo}e0@_j*c3>>#y&{g>&%*ODA(ngY`Wbap! zC}y>e3~6)fI6s%S>r~<9+%m@^tf&bJKX=JBTY{~0BZ&q%%z>B2C%*mXWX&!F+ik{GTyK*1G3;8 z}VLc ziOg2=S3<>oQeAg|}jxz+2f;4GHZxps&bUsK@+8s~i?~zVUme+CNrU0uBQ-1Kr zdIV;MMsBK9^-wX27x#NaihwgTr_!YEKVN!!RQZPz=+FbIFcitQl%MzNS}Zm@W4G|2 z##NFWJ4fj(V|5!9#S`Sga<<5vraquX^y5z$5>D(3Il}|f*os~VB}m?2<)mYk$BG25 zk;9R><+i2vrQ=yp$cJ(m0iSVC&meGQRvL=nk0z-gj%^_i;$tKV1fu2{UN%e=9Y;f& zBQCuKS3^V8{`P5@(?-0CNAJJnVa!X%Pl`D2wGM~VSnM;zALA0Q_96s5R=WHJ|J zj~U<3irJMEBbR`NEu}&3DGA|@GWPFp=_h3r6uNmiJei8=Pal|9M-G`oE|SD-Dkgz$ zOkotOI{}KbDYu_agOtE0#Q<8~c95|kFgerMj`U4e63QAkRYTBYZ1T#BbTpbId0TQ8CZ2|ekHen`|?(J(j+JJ9EjG?1evH6EYiN)}~<(s_lKAP%6= zKE+l{D>kBjpnB9HpHV>D;Gfc!Hw9r`9-)oLe9;ZlOQ)3)jj{ZE&>A7kOqO6Wa!H}G z={j&&4aREn&e5VFU0~w?d8Kb}Bupxh4u6#chCtBsWFVCo2r98j_)QzO84#Q<+Jz|_ zt2)S;Q{9hBAzoC1WS0O9*!L6%MO4o@iQj8;H&XA6VgsA7|JWn8y@!h|PgB2y3aFHQ`Oz)ocXu-jEs#-> z{}Abzr zU895eEpBAo7xqY*!ot%8tUOYSo5+~8EIqv<(o32LFl0aISPzo32xfd~Tzs3K`VII0 z!~ehN001&75(*k30{UC83;_b*4G#$o1MThM-+%%Ct0JSk!SkZ>fi=vb4MdEZ?x88E z{d2_p0!5AU*J#pvBtQ_UMc56xHk0i4f9n8jq&ElxBm%&Dz+Z#WB%*vs-t2uC29UTf zYvFe^y_cXw_K937Qfl#HxpM^vc4~KED+PEvW_Oq*Ea_Q;%UM@pX7XLcPY3IIpLn5Z z6ZO2`Lx>5tO8km?7GA+(4dTdF+tm$hF(D(mym4PovdQ!B;!w>!amI%;=iNS?UQ6A2 zAWr0et6^h5Hy4s({a{LJR|+u)g}5)`4O$R@DhBJBK(q&?&TgL}jDFmiU_vn*spXG2 zVOW*aB?%;-H2^--j?#}OMC3)7>+%&2Lx@=(*(b=@M=cu8|2dWF#eFyueIUCr7~JZ3 zU9F$k^c>kPXlrfj&dB8d0pv&`#v2-NK4}4Rv5StTwGUa|aMQT8F-fhQ84I^L1noN} z(pXqTNmti|wjR=UX$d{4Z;cQ|VGeyVe-u~uvrkzP$n4V<(C=_ZsJ0Fnme6YBz|L|~ zuaYxT@2|xTv#(XwKNgZtP-mki?j;R^rT0>QaA_ z?6#{I$L->P<>vU9CTf%kR6umbF6?X zllKP~;0`wTFuwsSeE1_`xS}8B@T5XvZqjU>;)nKPcMN$;W?`(({m<>BvnA^5V;MSV z`l3rOnu(qYIZAcOKCHD} zuq3l{Uu{x(SM-O}!m11ubIOw2vtBGvhw7N?!$kHUO~$&v!98pwKd)sbuwYM&*@+K} zv9ossA(~XRL5mbWkc%=FEJJuzo3+4`+<$ZA4yH^!qolJipY_w)9ONM7eFo{7T(C}y z(2*zwi#YYy(ULW_Buky-M8i69WKR-mv4eburcX9lX+_t3bA|MNRb!458dSC8s+o8H z0Qo4SED3iq4i;=(=93GPTNS({92r18W!mOR3rUBsZolAW@_F(T3l)D%ITY6VS+Iot z2&M$O+qFhVKAu20s|1S@=!`BNYiRM8wG8^HSCDxA_)nw%19Xbd+EOiJ+&*E{0N3#1 zKGbP)45i5(x2VU=rOVM+TOdeQ@{;s?ht%KGy!#Co>}vw%_93ErKUKFijN&;NWD9ju zcI##laK!oy_)V{^XG2^Vb2PPAv0*?_cziDl(*Uk}kd-CmW&R(4-I%kAPG#llLvOtC z71OsQb#Y3aIv09t-^Ju&8lfpPh}o?)vU!W_f%yvZ;hZ5yMN9MEfLyT@w3qEK(GWfF zU2QVTvLM3ZpUU`OD)%tX|KQYiG|exwKwBTP$HE+27Lh0OdI4-Dn|)uStsyR9z7=g` zg8xLD{sFu{v%7GWY13!_1MuuVn5I(<$~~_c*Q%9hv&LHK`r$UrYM3s}8w>r3#NKEV zG?k;cX2oDV3BDdQKq`7!(!@X*YWkt^92}vA%J2_hH8~kT(85cIqHd!_;8Nn6CPhm! z=~#?iRm!Ivw4tS2gL4lfZsfH4Z2!J`0e^!pmfS3*16W@@g~*09L1(&qFMw;h8rbaU z9)gO^Q(S*1v!8|P!a{Q?U=LG&`N`(%I%WyvP(a;Yty6Y#t}RNGP3RmY6L)DQCri}i zDU*^(mi{H^`(e{bOM%VjEZF72g?C_^&E)67iCiam$1P|vozFzD4>n3pv3f6nM`aZj zYzX7{2Ozpo9dS5MJu`#D=wB%Hcxja0%RR>kxKQ~JgBkPq8$~{<{{HBX#s#;hx3f}- z626oBPXYiU#13nZncozS+mQG1Ksk>`Z{@Lr|_(zhNsj!a$B3S;jpWLLxBbYE-d@ zk}mDSuS0iRMqGgPhvz&?w<#|eWJOEQ#+eF^V=4~%m|VQA787Ab7i}+&5y)t z^McaiJha{E|09Y?s>>E;f+10hol1i{7A?UOz*qa0eZo3oecQ;$`j0XJj{>Z}0{A%3 z^{Zea!;A9uU2W|H;u6-wlHz|(IiIk69O~V3`Kw#&LAmn&oz;&vo-OY zD2a)xf4hB+=3z@Js6y=aVsFC0XGasAqVK+w>Zf$y_;aH7-TkD8 ziNqP)LADPY&vH4m>oZ3VY6!F_MC&5gM#<4O#N&jmoC*)vgS}%61`36E-$$)iNlsh* zF0(QTNwzhY`tyw>-ElE)?*QRYQtm#zQE?u58DwJmB|x*rJ(*ud2msV<4eo8nBb$=ch00Ez$T1 zf{PBR*t^`jaU?rPk<%4d$kOjMv_%)i_QMIqCsY+nG@DC!7plm$F5*h=)P#?KX=y9{ zcPcHVxiif&8vPU0tYsUq&8j*zMJb49b9qQFvlh+5-Z!Z(f&hOl=yv21&0y{UM;M#y z6UQL4uu#*9+sHy;M-hha4lX524pm^34vNCdet3YkVwEsfyFoS^C3wq`4+8W<8EB5E z6iY**tRTEW=T4&xGBs}#lI;Hnz*PJPh#$8$GD@YELpzoei^G%3bD3Tuxnep=cs8R` zyV|btyZK0vDX~T{uy6S+cJMUiS7y_B)Z}mZqcRGY?XyAqhaz){Mj<&eB=sqTWuve! zs-9VB;(0h{t5;V95q6ElK(8U`&FlD3@?+(Y#INbh_3fB<*JFb>G>v!_L5sBUQLpA6 zwq#;9>-CuHlBK;9h{zH9DUDs~^$ia$l_<;V+u|cxht`w}zS9~*tP5QZ zQ`t*??RIe&uQOh3!`ROn#JHf{lb@dyrWH-XbFYLY1@p7L0P`yN!QOErt9 z=_Um4ro#x7Px_wZ5j`A(vZ`2j1dko|rF{1360(N1*S!%V;>$V#?*EoIOQA+#3 z^jZL-RCXo)^LMHI(_lumDQOxZubW95T!li0eGCglGvFU!dkT%c zuu2!aLt@T5oa~r>YQo^Ls$&_?>4nG-9)pu zkMJ1l`R_8@C3e``$?hI9=I{zL#l-!%5bS)WrumTGGP>&_L&34vAwd3D;D1X8fB;Hn zK^lYppR6F#827bL%$?QxW28B_R1LLoX^zMQ52jc{iJwAcjGezfhou;OMeHj4ZExT_ zXl5y<&9mBFLgV3YIg%I^UTZx53>btSlzZqB#mn=VhtPk5dBM6H6=)uHs1O5| zYHMsP433PEQrpkKl)#M0O>C0Ue}Jffd5#*~ESV}DyFIRXiAX=(-OyQezf_Y+a`q!t zH?mSd=+?e@Ub5Y61Z}pF%kCVv)6p^L6qTxymc8N>t=cls%311ASj7pOS3VL%o!EQ7 zX_p3S`tAjELj7de)MGR7A6Gomf@GkE=8-y?^4DJ`ZE$FDC=%i!_xpGO^g7aM9igIF zb}6P$KZ*iG&2?=#7*;FGb+;vWaCyi8zHq&m(nI(RFvuby@|I1;4N0D1Mw!}A;5DnX zi6LfU!i0rNR~wAP*I2)(1W;2%HTShl)2tj;5G^ZC#LmifG8*QSNO^r9> zAQhTH+{a%FSZ4pFX!EaES6@z52=zS*82Ro+O^fcC8jTC=7WABUV z%0Oe~*y6jd#)CH*fmwKMEAx3Rd)LObje`|CfcwfkcZ^u)T;o5K@`_l7o2cPjsA&WX zc_)GjmnEiF?%1)oOG2q7T3=Noqz_~}{s5;8|E}vZ5zugs_b-L6Er;5;!^UROsls)e zqjTxf@+||szlw66C>E-Zm}XSKO|2h} z7~bs}ec-bXS{?Ax+)dfr?*a}FgsOH^wv^}*E`D(W=PV8=rMZq)HpZuJ2H=Z0oSs*x zjcvjSWMij=zbeBKi>o?-W!e7?UCB2ev0Gw3$jfs7dSh09HG zjMQo+dG{Wm&gSa}?`^!}=Fb=@xi69ewd4aV!vxyaJO6;<( zw_ayu=Wffb5|uOFeJv{dbI3kyojM8DepoRBnInHSUI)E4C7tL< zh%_IATr4b$M1Ze97LcQ~Bs-)+RkcIs8;+YhTK5%4W!SG$q^Jr|$XFygNWf@ja_^^W zNH+6CXV(>G2*}00)i@}5U(4Z4J{Nn6lAqdB*=ZbLh@Ht)hFgkL0!nU0Q?+-Dh)Qr< z<_zxiC#V_7FV`x&+Y{#%4%(8$qLA3mSsz_RP_Z4gDOY_)wnLV~u+~V)KA=qJp?poG zu4vpg3hQsm@3eHlCQ7up;b6LF8$+vy_ibvrFj5-yO%x7bqqeh-|8I$` zG0DFt<7kjyg+Y6EkU1rBzj-%wUC)3)H+lYvR5JnMNEoSA89EFhCT?#|ik0oO;2p=! z%dco{+PSXsj;L@(qj<-bNHO5WcTqx$yt)e#!&rn9EF)1_KF7Qxr%zySUxfsr4dmbuYQNs14@DMyKRm8)>GULT8?m-vB!+-ObWCexAQ(hjjYDV{(4R zGpd3XmU;L-Nc?A!dwpa;(@^*AmZShaXt>8XI1pt1=VObY52x;#GvjF9z928wmDHh7 z8XLq?RF_3zdgRlsxOz$EIh)crK{C;q>vbu7UykHBryiEaU48Xf?i+5{(~VR2QGgKF zM=K7XW|qG6)eb1g}D77wjQHQ%#XgUsnBrcB%St^Iko6d(s-G_ zGg7iyYw^}`$#&JdgX@a3iq54{X}V~8{y7WIWxO)Rw$u?1pIw-qhwv%AeOo-sVh(V; zOmDl(V&2xDk`)46zam?H7IFU(p$b^}SXwQPVo#rj@F&_*ap(G>wN2^QhQ@d#`Gpal zpC$iB3OaeLU-^B0xBA;52Bq2G(8Z#4PQhofr$RU{`$AWALOHe<8Wwz1qYAXP4kUGC z;gzSD&ZNR?iz^{*Z-g?(QUuih1Bl@X(vcRdYdY_l6X%mvB3-TLwjQ)`QaW1M&XlH? z$={XLI$!j!*hr7}=tP0<6y2sivUnjS>VEt|g9^Ej@^!hWEg|DGs#h4P@uHd1Nm$@F zH8P57O*RX0pSVhFNzR&Erx5Y%JnUaI@g9{Ft_i{c_8l5QzA#fTJPjc(w+dw;ch_o- zPOAnj$~R=r>0HLn}I4b?RjG^JEkHW^M^Q>p(DIWy3tgBKvD2^J3~_M`{(nhzvrSr1Ebj! z#rjjU9#dZ%JB1XmrbmXcMKYc5_@zL;H_7PABUk7sdJNjlc3YQrHZibMQM8sAH7UQB&X9X5}KD z-WMF_eLHgqmvM=Be&Zp%w4wLZ8ADqS8(v~Db?E9HnBTUu4V9~#{B7$jBeE<-QZMq6 zh+L>1i7rhy2yG-*cdlBf4fc?2W3iIt}tZC;Y3Y?|D* z_AJcKE`$O>T^hfDsJ}YtDcuyY$rW1XXcchNCiR={~I`1$dnB5g-_pCVwO7*<<{}uM2QF%vEYdD=nDy> zc#pLFC)kcmD29n?1kYH}bNG=iF^t}Y`@mK~FmaPh^|n2*pt1>*bsD>e0c$*!NgDCW zOBFf8&He$PvJ#EGkw`S)J1GTyetRUrAvB&kX;zJ?QKm&#?SL#BF4*%d%_i?r@6uv3 z4C~8SMZ&|B7Xo*(yhz9;3x%3PBIA*5;+ha9YOW-##NEO%sch|y-)sOMwMrvZC$Xro$}is;Pwkg zONm(bHAsd6$A-hyeq%r9!I3fF*Ch29%>|RK31prz#_YTDkAXU6vgj6VQtLbhN&Xmn zaVz{Nwm{?sAN+oDWE_#?+5nnLaVT4oW);V|2(Z4a-(JLHMr3qZACF2M8n&VsGq<0d z2)~ezxv^~y1Ic82R$|SKMovgVqdUGSHz3tdujQZC{!L0XqQX|Ry z6qct~acwB=abuTXdNiN=XFt8Dmv;ArBl;>93;EpSSD+C4khY0Awa|a6clZYo|MdMz zd))Z!zP?Q5xeD|chs1n1Yu%f81S;5T;U_e@0y}1sk>P4P8j$IdKfGfFO>jcxY9pA( z)y^nN<_;)wRvW{Udza=%89L+`B0fR?0siwxB`J*@v_h2=FC=O33%1J>VCpz#`@ZW% z>&Ozru($H8f5&Kr@~NLfK;?s{66*r-XQW+qRB=)L*b7_zXmC=+A_ckPn5PF2hrXbt zIujpRuaPNc3ua@&!so~L)e0xf4msxW4lu$k|Iwe5J#DzJP;<0P-8#kkNQONU*dsm3 zkd7_|2%{K6_{gkYl2n2Ht7hyH$TNb?O>tnX@IFQ__n4hVQfJMBCI5%TmL+UW4uPJ$ z?~il{k?|&X#YM)K*yp-XW0w*wYE4B?`)Nn;&)*m{3*nVfhWzX4`HTfFTHFBR@I!Ul zMOK`Sz)G{=@0CvVGxQz2&!O{reN_Zr=1%>nnijZf<_iuzIc+&${d#R0mF;ldfB?q; zIYJSIkVUmF&pL_k-{oc{H4z%#v28@IP=R21o-f{|afS*FX=gz>w5%w^jn&fRa>;s- z7afY<-p<^OwsPMQ_OriBT~J3Rict!X;&I3=(EW;@yOKiTLivjCmr}(cBo-yUKw1Lv z;Aoz~KT2DvT_PO9)|{YiJLaV=*wFcD7Nt|D{$M|H57+T?CcPEK<&m@Vmf8rX(4GC8 z?4jK6%;|=n?l8u3p`D1oyMICFb$n06$WF){2m6>!`scUT%E_ub+bwNn)ym zyKrELr_wIE0s0|{)FD5fABXK2SCUGrj7xtSw)`xAD!-`xQb2(r<}N9ErO!J|t3oJB zy}bnI!Y9@RRZtZohResu#@b-d;Q@YEujKAaDrRBMDdx(7?=L=d<&?+GS!AiVi5VBX z9G9lC`^_<6x&!{ayC+w3hML`tRgV^t;i_)@s&O>Rv1m#B%Uy*YrS&Qmyqvsm=kgJM z(zA6PtxZOL&pRuv)9rx>*FWSp63)&mE93TVHD~(~XZT`I(suJvXY%z@gi!177jc9O z$R-#>=8Pl@X}dr81rv7`$w96_mcy3|-wyQ?2~>xQFlRd?$+dx(h8MYk4$He@$=92_ zcAxuxl)NOSAMFsdq{V*tgq2*Z43<~ULoyjOOSO{3X`f)F`P_q+uO_o;&!U?_A9Vkx zSy=T;aF#?<*UNmjn6?3vQsjh?bO0sGv}s^R#AaURn3^jwPRac6UZPH?6>Ykg-SS@) zvr^Ic>ehb%|55(tkO2wIjopSB!^jNjZ*USFN&lo4w67fj82g`m@;a3G%G6wzPLBUB z?Pi@ZZS@P?hI}0;LsP^Po7RmPPogV1OYuJ;zHeO8=lT!3)81H~#~$>+H`XqC7AK6W^uwrSZy~JYS-MRsV1bNOdaI*xlP6OO6KU5~ zc_VY*d$oh0s)}fG2c)h`oVYKaK7-Z7D zlX>|r)1{fCh(h7Y`A{!aO*`ep3+R9+E{u|jmXc*k7Dy+s)Ke#D5weMI?BM)t2{p9$ zerh9{aLWa&x6@!Cl``#% zM_e^6+5JrKCsOEyv+FcdTx(J1f#vHD8pHj$+-)IhIBvd)`Y-tWepVZ4t1fSC+9Y^V zV{w7Nv}E}kpDcH=d#>sSKYN$UTyj!_#+z0-3CkX*4_IJJs_w5?IDS(oSGcW%oVW@i zG*UMZtkH+Y>;&j2I(x7NG1ybn+7qi1ym=3tV)_(J(sGX{l-ZWWP}te#`U8wdjXjEA z8v;oQ*wvJfiEY9N(hZa?XGaAi2^M`4Om}GGu~3KLlnq0+g!P= z;x2C=Lgy1KmP`MXQrld{ou;y6Si3NRhL4 z_>Qfe6^4M~rS_>8vG1X8jjq8TQP+}(m*qeS#k2epIq6A+rR^Hx1~q;q$>FkL-Yakw z8SSojQ?rCtk*1JnBANjRQo_eP+^5P!u7DWlkN#g$1kRCKTNrh zjNjyqcD`CeRB_wC#@@BQz%DEa&0eWzJu3C(?X6^tJ1m-T+UP6rNo{p)&7QU01F;B7 zz;WHco|VK(3W6l>a@&65{#46kET>~<$C={R>UBrH%N*ej3s;?)S_|T^K&IMEs+`P* zxDAY?@GIlw-(2#K@sbtyy*Dvj0L>^ z0EDzT{{WvQPdNsEnWU#Aa=j-fLk^*29h;U|R)j{PnCVE)r+70clLffm+<;89e2)A3OE06 zk79sfcN{cG&@V(__aZc3r_^;oByv&V?$A9^U}EKxRWdIa|9YB(B%4YfKPT89^sHH>`q=Mmh z-h?Vnp7GHMyrZmcUBM(iq>U0vWQa`iYPAZjdM}C1gRw=h^yF$>{?kuQk0nP1-{%tC zO%ziL=UUUGs&$>it}AX5zZYFZG>}_GgNn9m?yiTGzL1$7Y?s`i=W1olRXf=cWKO}l z+^v@5pjGYFBeT_(R?AYO4)?r0`GfJCgfYg?6y3{`4{7Vh5}J9tM@FLp7^OdGFuSY{ zh{wm+7S)B|tLtH#MfB=yF_)OOSYT+T@pDJUT=%sQv*ujkq66hMcPi@r#0?dM&srsB z2t`-wL@}RT4tku~FzA16FfrY%PK86g(Br9a+*N9jnBp z&uT@0kM%Wkw5WU;>w(6ONna=*YAtVgPSK75OmK3npo|4MQkA!H?nPzZBxqLpfS`fR zK+4g@ks8;vZvIu{aWj0<4a(Lt{S|M*{#YHI(9`j??NF0Fn~$n>poL5*S#myz?IRVD z2^D(t7&}KHiDy@;U$cNAF)ICk>*3N9y!wh1e45hsR8fA?^^|JL8o!9BGTK|bT`ZaP ze$N`(7dta^sl16USd3!NM=5iVO@#IA0P+cvFta~A@+vPnHvAN!DROwO#7}6QZE^CA zIb`XorIHWcbkpIC+L>&Anq`?ZN&Y99DjA=!@fJ2$O1`x<6Lf9MPE=SMwF^eyX16(J z#BOQtLc&{+@_^B8ggy7e@nfQ#DtZa3R$XosCYGzo?@NL99$eK4vzr8U=Z4tX#{zLD zN-pwgQbLsaW0ZvDa7mb`gUKT6FkH~Ob*2RaQ6Zv(Jx~XTn^^KrJ5Sv~8q}V^R0-;$ z-t{aRs%9F*^*B3wnQNCE%vA9ZP%L)>1lUwZrw=nAxXmGce1gl-*9%W58b?#G(6Igb z#iv;HI~uYBEVZT3q01>Mv2Y{STGr!}E+SRa%sX{Jl1SMI1 zo174@b&o4B)~jTtwq{msWc?= zO*}#qT#%0I)ZM-NW}0^Pg;ADq?(DzvKp{H>Oo5cxP#tS>jeMAZ?PSUuEmXPUV50d! zg9>OrEMHE7>a79Kdr=qjGgck`kP4n@ag|S_*!&0Js?=Zq5PuZALgb(Rgj)99D(;}* z3?|NV+jGhXj9u5Knx9IiT|1MyC@1UL8`W4c!18)ogi1%93U=<=FRS{`XYr#kibt~T zn45(1oi7?S?uU$yiAb4?MSFntNN0G{_K( zJy*fSG((>`Z1k!4hRji^y*yUuBKw$`R)UV^3~MyV&^{wtFmyO@#bAPSQ2%_f=@Z~@ z6nrl6kMCz0n-R#C)|0@&nly}4eI{CGXKcN(+LC=jygtqs%c1> z6pt0ix!fO*tA47A9DpdHMJ+vj#zp9dnX#nAd27pFlz#CDkILPD=a-^in^!r+?>NL9 zg+dhKnFOKvfVzL7T2xS8#^%Q&7tb!K42{DfucejsGZ`rld=hjNFU6ns=9KfUe&D}# zYd8VfhjxhAWTk1gYxi}D4sX>W3fgh@8jBhcxz&$szCjRH{$_hm(%jKyR1A=)6YF2Agn&I@M4v-Q-BUN5C9mQGGxN2Bi2`MjX)gwFyS{!+V@9vX`x^o1Lw!yz0yx6&S9^gR%pB*=HO#3rJ`)6^tSoH#> zwcIkRa5LDVfPs11L;di!kZpNrw39jN>gZRK)7HH@?$_^blwoc7Km$%Fq6@)Y)#{vc zT`he_TSkeSvnr|_!Uol{N+GfDKBCj8mb-k9!-na`Rv%N6TE%YEBYYD=zt1~92^EK& zakNiMEYgzs@H%T$MBoeBQoJln2&)l|&!%))6Vojj3I%E+VY0lF%V?T1xyC`fa&|_B z5hOEUqt7f9_6Pwxq+W-fvnn<%r*t8lm((L}1<_Ol8#HGezYptM@xlI}*j2zu1;v?E zb>XDp`R!Sd(Ca1 zqp?!Kw(s(vU*7Z^d4=!i<7tJ&N)$67xj6!IO5lr5Nvh3j#S_lA4Prvy;CEZ#Sivu0 zHMzvRP%5g3T)au^oki~#i-Nr>i6(cN@7as(XuWF{M#Wq^?0brAqNBtZP>65j(pa~V z6y=Ar3%$KK8*iW)Dq=xL-3L6+n` zbj2?O9!C5iSoz(Bi#t`pi&LteUH@UoYDd8P{DRkBiM!CN)MC=T-a+Egx7Lxz7*ypP@i%I|Q5gB8O0{ufjSBaj#chQhuWzG<$8xuS zUQC4LzaMY1<(bx{u=KRdh`kP&1@H6hm`Vu2KTI$n{sR!uoZdM*|9wC_pFo>|uFemS zu95J_x<%webM|hJ1!;sYIjmAW#D*=Wxu!rA!D7 z`ET)yE=g%oqaQaC3LiFinjsnL9=ByIvt?zlg$e?5XG%uoU=4W!8##>%?sGsuBH105 zyXa-V4mv`-mG_rh!n4xGea!K>NQ;4QPy?SpcGu5|$D3h{{oukyRsn7iExr;$0ef#f zJ~{ChI+92qS08k{9#`_!7tDJCDA6E`upWYWS4%wAk68W(nju6A#6k(94I_t)Q1_x-Rjl9AMmw3r%}+Vn)U+```qhn0%E16 zvr*wHJl}{vgyac_8%a$lSWNBwFR-WZff9ZPnDU4P{ z9E-V6Q!nuf@*O8{0)k^i@Q!zeJUrt7wz24a?_HX^KN6aZA#zpkBg<6a7?`OQ+*7`% z45HX^Qzi9Nm|cNkJgs;xLz5wuT;Xko&AORSP{rFMsWsGq z#}e+VcGZa4Tcb^p_nF$s!9j~6B6$RC&8g}kmu_o+o3Z5wn%RuTq~YT1Pd7Sz>-y&~ zuHU@}{)$12pp=?FfES#j@csdSMx?=5Hx&32qWWK`h*(PATEKSO z(|q}kVJ4vGfQNAUYdZxDZ zN@Z(?us`;msg06+^xJC&T4Cp}@$>V(^&3Y!SH{sfXg+uLi!RE=2wta-zE%49`x!^lWjiBB9wSd=hF+JJx2lB^`x+-0ThcP-ZAozox#mZFbpjv&J>)=?2H9N zVyV2-PM9jquv!j4^!ZaYd-E}r`=W*{Mqjlp6S%DZCZzuPF;i2i86}F#pLxrQ02CK$ zvBAk-X(0@V&(Wr!(-=mOtk_?;(@$%ma3V?e0^P$^akvQF-S=asn0Z5x2;_`#C}%ma z5Bz!dd{~L4^M=F^XA#LCwdg4x%jT)<3G`gL=8#ozMH?(1%MPN}QuLK`yRp>~u~-La zI8LeZ78?3-CIDo&&+2>W#f=%`PcJ&y{etcg{ZsNFI#~a-(-^gbQ5PEp+Jzwur+9VD5#vGaTUa7IZrB!zRiLde*ZhBItv{zEeVJzt)Gmu^iDoYnMvYgrPx)3S%-Kiu5) zJyExB{~0zGQouQ52XhLhA8yS7VLr&(#y1of?RsOJ4vgtXiMiXo4T(nw9qFU(QZ(QZ zmzLx91Wm4i0Xp6T=XBdrM85CcA*Xx+$-Ks~oQh3~At7ss&7!W<5}8QB4kpDXY)7`5 z*!XG=xVgTEjP_jDJ*YzP5Ui+Sz$RwMxR%Vs>>oh}*=*1OO$A8qrb~5lovL17P>mgf zoPX@MzZ4qzc4SxrR;wlXdahqG1xh{oWcEm}9YHjY*kn<>7a(s*kCRrH$w+jkhChLL z;T+q4aSD*3U>;>Mgi_re(K?>8DQALYyRcHD@(+OjyE0c+8smPq;Csf2F-NVuM$(b7 zfysnblsX@)K=om&3-R)6vY)Dz`iZz_iOoqB)*saOy4nFc;VRfEouh6C<^_uZpKI(= z)M($2hmN{_+p;OR36G{tvW%N-Pj8l1vX6^8M<_jl88Y#xDm!e{c+R31(8EG}h5*^; zIev}_pMr3B&WNVVwlerS#5FJHNu9CUMP3#NHgb*ufKoDl%vcj|euoj|E8{XYH|FQp z&^YoW8z(cQI8ep8NlRU+J1a{Xy!pMBD4JyC`$1pVi4W6BXY+67WWjvuR0vEZ!o16O z$lh(5d3<<#7sD($ri7){TSlt(+XSlJQu0NZr2LKm`JSWSnueICWM$QaPfJ(kw(f&` zBkqtmrNJdAUfDU%5_y)VO~prSaF-P;8M>WeMfouNxBH-!5{-BAi!2I>N8xrAp+thI z5Ws~O>`Um4BX$o+u^3>y#@3@parR1rZ zypRZ|PiTa-mAZ%o$crjaWdZ$@PoT>3&R*1r`#y8i&X%+i>c?%lJ^{5*KKlDv$_wSD z(g7G@`e3dcamo&{I7U%RCxqq`ve3wAHa6k7Q;(| zN~^Tm@A;1S%WlbD+>m#RxFZ76^j17mp4h3nB89M6MjAJUO1d8JRip3`oP4te}(o@4EaLneo|Q^PVyYVPhL;`tAdZbXUjY=1Z> zc)PCJiO3LWYco1pbz|hHB+l#MN+3^Xms z_~8k~U+r%g`_69I{AYIkTSgwbkju<*7s}Xdn+$7jjb5pLfa#R>sCjx&Q{Io6icb~H zm)g`C2?&-ZBEv!gq+Iyk%0H;Eo!)1llySLZm3@IK&k&HIEtP!IhucP~e&ea+wU!gp ze?q(Z(8O(}-#8qYc^`g%@-0_!%S%<~M)5w7*+`-Nr_Z`R7)l{$5mXTVem+!& z!faL^`c-B8D&aSYzDB|qGUWsp^i@jtgXofoO~avc2xPIuiNBRq?kJu-3f=OYeCM4V zd1FR-t_bI`&Tkv0#%~QT%aJdJ5$4qNO|{@*43ZQp+Ho0Z36HHAiqPE0DaLK`Mlo(N z0x3sg18rG>n2a7vs!*x!Hd0~E7%W6dfgm;R*(t<~lqjny_LX^KphUKXB#;=nz88P9 zx=|M6quO10jdM-yWo^M8#zK@-G>wYcm=88MT@JDs+O~oEx#d88Ef!sFW;R=d7;nq1 z%5QvAhP_fxl%oIaI37Jdyx|iU6jb?xvUTOnf>f=Wvg3BI(ZDL{xVo%-yJg6Et$eJF zrT|?Lfs1DN!oF`dnQ!&kMJonv&r$9}&=!j!x{?@Y`a+sa-Bk=@o^AtI>xEgdcS>78 z?k|0wr*_mq)M1+4Ln~nhi1)V+q{~ZTe2L-eE;%I!-RX{*`_fjD6hd4Di%mQ38%l(6 zs_B|OXhUdKd$Zz7Sk_kZY0D*6$Rlbe$~UYj44L%pc=v6lCN^%4M@0;4>;W$8DY8G28f(dPxv(hOV5u*yEV7F!eX5gtf?GES&gdz}=CH4GCE z0#5L*6Ay696h>(|-zkF6?eX+WKCdD%^FHs0zG$^QHr=jnI8b*#EBUuRRg=4HoN&1e zoU+PE$d2Hg`4Rf{w!Ii#QzQmF4#jnX;DA?ppLr!`v$bHfXw|6t;24ZWjFiYlK}5pWjr z>^<@m*Zzm=AYNp$+ev(}f<1hE6smRUDYo%o8 zr6yWDO}Hn#Ib~$PIxTYG6V(`Xq!L3PW96VTJXcnmvymVbb^B|dDFIJM=H+a2LiyWI zU(1o-@Oy(8s7wbDOzz^4uS8@zElxdnb~Jy=UnvX>%!q81wj(Em46xArSTZ&K;O-?v zPcba9EO~f0z!MnCcyIpt{vJNDjqy>MfgQl0RmzL=844iXl69{;^GFW%?`5=52B&N+ z{uG+FL$tSWA5ExCjQFVn0M*N9?6p&OXrIh_&s-R+WF`**?UUFgK`BN&#??R95%*O6 zJ*(7Q78>N(8x9Fo&k67c3Z{AZ&1Kq`zFl2M2M02i96YOXUp%Av`A6vecH z6X7W~*wDmgs^!Nhfl0!C2MnfMW&XdzRV<{t-kPXg9qnDoE^=Sg|88;2U>W=$0GmK$ zzwXzaCo(`lJGC_r27jfbK##G{Y7bBU0H@0%tb95MV~k)ZC=S)=EIuN(;xPP;%ZoRW zN`Wf#VvwI5lD=@C4qll9STt{^e@5TmRi5XF*~Zq!DPzI)MKRQ*`Pzd=h{Jos_=J$$ zBut3;T?7+?b|HmL#q15G#LV(z)b4WF@sn8Te>xxcoSY8*K=$^h_;<=&>ymi&!W0~i z#(|q_mw4LB-e*O)IBYf-`F_=xd&8P3h%n@RA;8iCdLYAz&8b^%j#AaDHW#{6B| zfIQ$UoaE4E!(fsV85kpKYA7v)WXN|;gUv~0bG8JdY!5V8;*LxS6Ajb%prm&ZC=nP{ zo2fYLXwkxxy%P+p$Jc5E z;F3t)qsY+RjS(ezNQUKdx!BUO$&rlug%| za?I2gU#F%hFDAVqeI$Dd>ZFB%9J8IPs5I_fZTONT4~4bZx3^r-?`_N+K!{KkOcFl( z(u9KP8@|jk$IrEB@llJ&@sL_S=hA@}XOe6rKx2C3X$;sYsHxbGdc1N3eOfXPr{6SE z9^T~w02fY4J+`1`NRluvT0Opx{jT(G4{&Ynf;&3LCqO$;R^mzcT&Qr&vKbJLgqjVV z%qK@Z0+Mzh^fV4$aV|TBsGj ziEy{jkH~NSm_LW_N?6Bx10HE;p53&_&GjJrc%a(2ytqqzPiU7xfDB^#;~&3@&09Dw zbw3WhNpsde`&B+XR+Ytzut{RN=j%;p9dVAe=J>n|3{gygqZi$mY6;@A7+6PfZ1Bw8 zb&j6$^Ht7s_U0lJb zF+gZ0XBN;x%uo{89SNWkq>3@j$W#6G4(uP43H#GAaO=|vm0~QaNMk@8-F&u-Pr2(s zNhC0#jY%xS8&NH#Eb5MnhZrQ#++=A5MtT!QvX;UaWH$kxQS=_)&V!)1jwrM-)G~c5 z=Q~$67t|94y6XOvMZ8o{*4B}xcPA$~s;eZrt}Y)|b|b-{(jmCV59S3wJ!GR-{{U2v zqJj3@(!@tKyjLjajD#maJ@hRAo`%~Or!W&uN<%nxbldjo^O(&of z7;7U5)=}usZ#5RPy3E5JPfEFXqjit#BC8y4729bVur*LQb3f$(BHV98vxP1#jLjn< z&T&X{9%zpS5S3$)YliI950YoRZOIa}!Ph zUDtiHOPg+d{{Spp5Q8m>1KUI$6&Qku!vo%pdt#5Q22c}E@0tv29kJ1=NdWaUM3JT+ zn79ZJt2-YQ1cK@q)0N3aCFBp$IUHq(;n6-h9-?uLR2?oa6O& z%>!{uDbuM+kEy=&)-LXeVIV4pF@r*m=LOm0*AO^zyWml?Y*8)}Ra6WDfOj+%HuFm- zDJPO)HNC`=hzco@7c6_B?fmO1+B?Wdz|#wygWEn$Tj%cLoJA)_2UBOS zf@lGBw)0GI#4_YQJD)$b8L!c}L{_rSOt8vJYW+xYiUhy{W)&xA{N%(#iPgDWsm-Em31rOmCBSIp&MoaoQ+w~y&taVuJIi)j{hN4R64gSMQ1 zeR-?9$!(%GD=#_;WaoPJ0OJ`H72(`(`6lDfR^4R|X zI$p}%9#a;fi2*zG9`zHbA*55OSJD^lwF1aQa-+NI&+70gtGQL>TzY~1RKG7SqhWv! zDEBhN;R-1PY(VcpB2_@FAhAKkAb&?kJ&h}J#@2diwNEsL%20YcH zBKkHyfO;~6;)0fZO8UkRm9?Fe%H6wDL{8|_M(kM*{s^Sj0y&cYnAc#tkaJAOb#n0w zNQb9JPm{d|LUf~^UL1h>ZUR*9!RF=U0F6a5t;JRyOBI-R%-7)$_ByJ>0 z%#jeWli&K#IK07Vj$2zZ9gQvj02)ng9_mi7YCky5h?Nwb*kI6=C3#G7A;H~`Y7JOr zjclS>rPa+|V-E+V3TWYYPbq&cHw-}UwFdg;5ccWHU=?w&>qS|DtPGeL1h{Vd&?_D6 zkxXP6SeZys?@ilXTU?d1xW<=7ZR&TPWqW zu$_Tb8=k}3vF~_%(*~Wb^(pRpsirNVni%6oxe|mVvCjC$!lvT!S~01bQg&rK41UxZ z*32x{F(f{uaLSL+uZrG^*5ir4GfIb$0IIzOW634F;0K9|D8_NpwD^7R6B9=KGHp#4 zs765bVuKy{b;4Z}WbV>H>RF0Zye)U;f2zG_dIXu)%a2c}H|NhGn!&LlBhjYg`xMb<*b ze1nZkO>qlrcad5-6qdo#4%M4?!(>Kl*c24lFbBVC4MRP#4HWt47!lZ2&7vq-%#AAb z3Y~v^)?5p45uQ)Wm9U_-L~rtOX-6#u&F}{ z1E$qByfDeCWwM&#RY41gpQ$l{{T7-tKJbk<8v)whlZJB4g>ux zbM2ZU{4mDFK6i}TG>f}#aKF;9cuZVg@*OD!#L=k0eesIbkAqwgb#W_ORy|eaIX|(W z%;Mh)@yOfe?OktR9U3m*&azS1OzS%83IdS*P}jJ$;J0wx#QZyr+ZmK+Ld%_BwRgo_ zD~;VqFYZ1bg;q2H-rpp2`kTj^4-qusU=ie%CmK(0dZb1fzunht^`~a!^0L}kCy5S5cT*m_|pa_RRt8E|wxTBlQ|H zaY=6Ddt1I$zz5LpolD>f%e9UOSC`9aA3a4Bw+#xvv^7@9KRT|ifC2^N518WO@r1j*^h?d@Fl zu|pUuKheDvU{-EXo0y|;!i;PpSjcAG`e(HQ>`SPX7emoRmoUa181)$N&Tll&P2ioMFDk2Q7H)B*;ZDKB$Fb5qM6r^03n4`|p4#j#?3w31L zV|zw!idT9+IqMUjeSl#ZjNX3xN!%4bE(k&QGM%WAVZk~GZ%NLg8l z81}8HI9=2Yqf040mY4Y}*qE2LXOFu9-h(;AG5~h;&~R7)o8VT=s*()y@BqOb=?9IH zLLr5*)1?3-U^eb6skoq3U(8=o?~SXdA&t7or$#|ITGZnaeC3>1aS$Fb7xttr?U^Qy z;~Jw5HukBs%#SQ=?6JO137|#;7?VtrV;j@|06!(LVyPU_xC3*&CoQpxLmNgxU`Ql( z?M+(S$dRk^b!o;kKUnCLyc zP-OhgKB23z(?IlcnCJZ2nM8l~h;T>S6};SAh+JDBmen3tQldumhfbUO6$kb zwzs=zQbJ;qL;^V%u>=+ z1BX13xx0bPZoOGf-^;xQWnd#>>Umy}NZkGW(=jsJ#Vk=M)7+i+$7&5D#cVjXW8(Iz zjs_OuA%pydE_f5Vfd#~OF>1-sL40Rzg<~K(&?^y~>d#7dOp%h%%*en3Ic=x=s0}5Z z#|xiSs>>TO2wY(Q0P-{kdr{0##pDfz2em(E@fE_tEqH_pc@A>QzzGN0&h*s0GlYCW zrd|cIU*o0o&$+7oSk10-Z& zSw^9sAd1l8oF#2+q~hFBg9}ZP1sXPQmWDtsbZ1ED$HfC_q8kk{$R(cK9X_tB z)YcsL4J)yQ%xSiC_0M`_-7Hc@%MnRq1TgGIcN9Cjb<&pzF&u=pM{IQlg7Aff)0HgB zrE*JWNjRW9uO#}PxirPJqV8plvLjJ|o&NMGQ2f!3HX0P2yU+!aMgS~fmlRYnE;+E_ zDxuch%H1Sz;h1UA$J7*g7G20M)I z4F$BUg%O)dgN*n80Ht~k2PnOUUx)6TEQc*>=c2!i_{M7yZOgDR4CQb^IIdGH44}DL z0rdGFy#XX7M)53QNFPq4Ld!uTKCMm$pT28eOK7h6VB!!+t0WQyxQ`o+v#5J?rJ}bJ z6-L6!4#1A0fmsz`5tcoDU%y&v7J}kFdp4F@4Z33nhj9= zlg-Y*-jx@P7ZGU)z>xm{N>wMFV^GJ^4k?$>aVjivlbw$JC^YWvVI(@fan_Ytr-D9Q z>U08Ge42biq1x71*t9?~Aoj%qKyI#;9{}S7j)8yaK(v=AgvoIeEN(XTH5|2$25Wgs ztAUVDN;eX8@Fx>f|(X+b-+Unhqj(T8i-hm5)rDD#~1p&O& zS>H=-F^f!W7-ez=GKSf%R#2E!^^u>|Kw0j7Rgwte)Utzu-80^a6osM!MpY*uk752* zpCbu6u_0KpjP5t4LCieZhFOSQoxt-z^{QGze>OnPAi|J(d(f|LiM!>i6+~Sij^?s& zQbdq4#^8;%ZNBtK%*h(dCS+L#548rZmDdpAGr=^`^R1yeq+YomD^_6@>}<~;sl7?3 zxTO9Qy+eI0R|QM3!ih*Lj~F!N)#bc$$e?K`2#-fbWA>oWzp#uG95S%fs!I4Gv*F<~ zAeP}>$Zp>B=#`o?bA}nnZ100X{?~u(rLz|8%@da|o~x0y2U=cUO=dnET`sFTNgE%i zF&M|&0+zV71~iK528H1oSq^e2HylmH>)k;XhSnHq9R_KhH}NRqSMpB^s7XenmESnW z_)ugb$2O;h$T1^O?dJ$IILIe8F+pwrDmlc)gDcj9J-9&LPp+{C70V@mn}!H+;o-$OMmi@sa_|3I+vQH3iZ) zX3|dLgVINPXzMldoO3c1RyCyZ^I49fRBiXFTqIGjFi1iT^pn)lcz9`y`)N8cgZH4) zhVPcMb9%AD=gun4HMxFckaVs;og;A~!*v2SWLWoNIvQ#j?WMP#ckV zRAA)iV0uyQt(8|=&IU(L^b3oK=8+|iK3PyT_#&UMXbfUS3-$FrC>Anu@{CC&bRFon zv5_N5Yc7%(O9t)ssp66`o?n+mbq;iu7&xf9SfPp~LQgSuj2sd7G!&H$#F7Zxd{CMx zIYt1WXMIPuD&eL;kE8&;1r$b%33fd}#&)1m+9!)u&bvC%58!Jw(c} zyEc=y4k>%Mm2TX^`j;8bd{7}3%R=1EC9##pD2p-BU~_=}uz#%{-Jx{L>A}dR7TU>< zB#}r1U^>uerx2aUjv16=YmQ-bVV**t=&sVtVX6g zD50!!THHRVS`(`*SHJS0*0_6E3dK4z6;qu)Xi{7J#i<%237(s3{{T7_tPOn&+`Rt) z@Y`|Sg3Inl?BzS4QgVD_^ z=q4+cb1X!Hex0%p?b?GL&O4h~SWE`L4gM9S^N`CCwkfAOklSxSxNDaoE^WDt*~_Po z@Td5137Xw+!O4xBKjI*=cRV5Lj*JtpIpe!1g?ycNcF8gM!?U z%znbREk6u+j}p?H2uD71>bN|xf1gSWdFD8-lmNxwDO`Y&T$r-PC@aMqRw$Y>7{3sz- z8s6uN&d`Njx-6S{{&lzU&*B4dpNC(Da7Zk;e1sEgGTVtJ1b&ks^$htxdi>+Yejxag z?)X^zNyPZNO%kkproEFu?U1>_27~mF;U9;$Z}Q&~@y-`xFgRD??vmvAA#evyzVwG1 z{35b@33!(R_-YS{Z6ftZ(Oj}kr^=!NPx|R!m$=-L@b+eAmPq4$FCsZcQ~l<({sa7E z_^aSs-FEy&>gLK&43L?bST1`p`%rqHh;6V zloCR*1G#Ng7s)e99!Sxld;me|N*-iUV_0x^>6)12XqakZ!}v>WKy+BOx;_f1^qgmU zJY1CN0mvE4vFd%Qwn=F`%dNyfZ@KDe*Lx*$2x$uL4tAhTBA!JeHF9GbvZ=8(3OJm_ zOIxT1Z&PB4CEVpHUa zRULCk$IV=xVT_%Q*{VRs9YQ5$4b(8kKGaYpl}XS_oxQ~aL1Qr|JhYkhWGzze-Z=`r zwZoOhNPp9hy;3!vI8C`HeZlce6<%Ce^K`MrL*mxqX zBvE6R5Cwq9$jB8-TV8CyIor6+0UlYFHEUUN2KZ`(xNC;g#77{j%WV{LvbzvJP7Nt6 z3jyXz#hdi$pmf`rVREimuj-&LHaNn}bb@+gtyE7eM4~Aw>$%T*<2p&0%z=XA86V?7 zvuIqFj1iu+B{q*C0fLj-iE|6K%atE|)?}8GETw~L2d>zl(UQs(Txt!T_0^ECaD$Ws z;GObos0j}-5$+U|3Y~=;hucSaaiAP(QgK2_(`$|~kOpcw$k?`;H32#iL8-$mbj2?=T=N0OJkVu|;@U$J`iUyv%B{k$ zOip2EIvcU+irAJLYoA0%M1-hce9%_%EA)ykE{LyhrBRno}{{S;t5(iQvXiuQz=7JL1Mp`Ysl;p|uyG5qki1Izl;Dc!vwsR&!*qx< zfOH(^%Nl2m5hoIkGjGkAV_5Kd06WsxH;W7nVQr!q#?rF>w*LSs4PnKH2ZxbA5nJ-4 z291F#tL>W5Gg(PHpto>ut()_;0*){Sz-}?qdhp;ebyyKgbkceQ?OuoBE4YzvZ68xG zf!Z$!0$YG-p<5Hyj>YSh;9S##6IKb^@y_+22g7J+bG4Luvg+ zsr{pQVq}WZmdwYD1nSxAKv=Bpt;~zdcak;9Zo;#~v8IWV;H=E~4g4PT@#A;##8z0+ zRhQ~hOP(QU$(Z;>y6M>Eexu@ow=XT!oavKHh%i9?N3o;XetRv1?fFw2ud@!H*134F z=0zys=**|mRCe)2w3<0&Hx~Aqw7v>AMI`m0Qt6w9aV<=qsK%4vj+I1srnsJDI2u8x zZmnj&FK@fZq`EQh<$r3(5zK{`=x}tnKGYK<;nfM$0nQC+OA>lD45?q`Nh?Kh5m}QG zV+v{ON#hX6vy7P+NT5d2IhEOTmJOUOU2T_~$;xQYTAfoR%Fd)kjl+8m)wO5D?e3E| zF{?Q`RlE-L9(dhy<;F?D#-X(uc?U66uw`Mn?kc=O+Bh%dkF0KTriX4+GtVHvj9`1v zXxLBkvh(=`;bqe#UD-Q>?^ojy8)?=^#Yx?=>x?#!=r22CgGAg z^BsAr$FT!p{b)0}a=1xkZ4Re%m{hjwRJu0*05W-Q0!Ppu^u(92+{Az~AWzfgg7EP- zfo5Bl)Xp^O_Qfci_hq<^^P~{N+MRo7_>k8Epk)~O8qA7A zM!WAv`3X|drFj`yhEyG@kyt}Av`aWwRn}L|^{K`9qh72*?Qslo6>q%;TnjYH3K7gz zG{wEa!&;4y;FICM>q=Oa`Pjm~&cQ(N4H7nJp7tWTiG7d#(r7d!iSHw4GU1`vf!>gc zd#I6MRMv#`?^L(HxP|$>19tn<5iyEMW{3vTGJ=B9JS218C)9@w%5X=$X~qEwMkLE&-MgT_ga_>g+DifYnWEv`(L5%UH;Bkx+AOMver=yJt%ZT75NKx4R5 zG(~_qNc+$;CA=~;w+cG$7x-w9FDr&f?&Lh9A-nymE+OT-kRmzM+0c3cnu+c{Zgh{A zrZ|@v9R&sT%$DY5gcchS-yUm9#zr3G9Ak5i^{c^}%GZTn-(E}p^1v;;4cz2p{{SigS=>6x8bO@{ za6DB6=wXsRVG=86_VHRb7Li->Ofdy%y(ie#M3&MsbEAwd8wcKk#4<)3U!9sQMXkU=RW*Ce0QwE*N2MmelKE`1`20?%%9c-K>$x zHmMN$RP@Cf-bf9!S^!_Er;)5nnR`Ih(j=4Q(+&boKd4Oji?m^FER=7wV(E}7D z2P4Sqim4W|xD6Q?{AURabI!V_xSh08`1rE;ehvDqxI#YBRsRKMlLRvPCNxGIrC}wcCeqSah4~ zrk7a3-?zO6RlUc)nq0I()AN3}co!O#zqjML znWXe)2GDc&AlA1O;GBsef@qjpTnw{~@;$32@8F0mW?A@u5rgHfzL=064!5 z_Bz-#A2I3{MaSjHhOGGs%acHAsieoX=cx^ zRRXf_FTOIil-$`Ys?D&{4y=E52dRUHd}uk%6k#E7HEEGRR~>fGo|Wc)BH>>U@vGLm z;+D;KaIgCd2g;+w41<51v~5GsiF3q`V^f;U|+zYV_^=L^Tfo0Pxe|iz}CDxCP6?4i%7w z{_6Ih5q>j#Revqt4&oEfH#lmxzTxC4>-x3!>(Ej>Bk>30t2-$#I8Vgi5hTU8Qz~g` zG2ix}`K{j$iV-!WoG$HUW*9OlU+P634hp=aHx_Ra0lRYreXEE3cYnkzq>J%);i#|p zY>R<*pcy02jGn!FSA_V#fc_e#{{YPVC-{Xe-HU;y)>i!JnEsQ6#sS;*pz_bbVz;u8 zSQecW^BGHL-jU&aZ-=u<9o4Z6$LsU$TlPFVge-tr{7Pb(f8q%brI*ISv-tPJMld*? zwUJv^a$0By@=6Q0t8dEi;VvON<)b7Xt;g2yS2DkE!%9J$ju5u_SqrJ z!ywc=e)X7cCAgWAK*34d9Vi>Nf-B!Zz!7?w`KXH9Gqkq*p~%6dQr>itrGR8(dSu>7 z6gWtZqt&Pa@VR3g?Y2$;tDJ~Jx@keMH7j@-N;G&R08JG!FQu`{&-I||X(Wx*C=D6% zX!0a74KgDm^#pXGNoN$eMIjGrE~#QcZo`<1y#T~eys8L3owJ&-OZ$0_mCR?QHDdOd zl1qfa#uR(kG?yVYH%`A&0BTrrk9{l1BK5`kZ$LvShlJDFmQpvx=wov&ckk zl_bzHBV0qRbn}z+<7`x{cP$urr6uu_JPI1jWHJPkBdv5?K@c%#)7vx!g6;6na2h~1 zz&NOfikMljrwzUjVNg*dPG}mx!{7IzJ+!iZWO4d>W8Q(qzYmld!*xC=;oIe9EHtW} zvG%N{XTy>TjAEU(52yZL@Su}>DmEa9k+w+{S!cP@WeuYgL3WDXrO4=WLAZuTUTj;$ zdg`FNTnSYb=iH3ZQtLwqnU&3E3oUF|OQm|^f@XwN_p z-_@%i(#rgp#IUGV{s!)9*^={TgO>Qn@j%xyGI1oDd@@ z{pj{~a_I_#Py;#cXd+ldfs~CT4D_z1mf5aJ0_39(&0Rs~C%c7`DE(iQzOQqd>uDmk z<;cv$jJo66hktJrcZDE1RvTu747{)#3g8tPpw;5fZ66R>S+t2^Q-jwfX&56UG_2{% zQgf5PwKscqC0NizywMmLZ?z`{-zFI#<*Nw}PJ0o(0pX5#6=p_FEKWvyVzXpc4*FDQ zG*p#dLhf56WT5B`Nrj}i4jThGG#3GkNpd6}gN9!CuR{@e`7=TRA$0-c9%~j0cn!ka z$og_CYSc8*W+TgBtr+(mC?94i#K19zTd0wMU*ZEMnYm*$QVD$^43pdnL8DuXRcR5{ zL;+%T^1XkZGabU*aM>l&RAWnLuCC&P0v;nQcMCMf@*^5ZJz0I}Ux+QFwwcqKSy+aQ zkJPxL%umP#Q^&URC`* z!n0hW#mfkQcvN(clGR;ha`=9U>@yb)e3k8*8Z2?i;>a zI}#E08nfE10VxSvxJ3S=k~)LtuN}RloIR#y1Cyr<+-I#Qs02l$k(n6v@j!8H6rrbv zcl7l}10s@^H-U>eV7bU(HqBiFM#ZHJt0vhU=+AR=YSPAlVa9M*JAU*7bRrd=II@y@ z7#^xmdee>(X>6A9CMA~+;C9DP=SkUm=Y|~`1kM8mI=|^!K__eJIgxVXz*Q%%+fZhk z%fl;3qtzM6QQIAA_YZA;iNrMN)&Bq&9W{5M-NLZW*Bs)iopIcrw9UMnIUp#J1I`#E zw!qK|D`%IPBB}`E`bqOkFkW)&I49}HOw#Qg%o4N>HVJ(#gOGjdR6G}Pq{Qg#Xc)O* zv9XXVV2p26l~5|&Ta70GxIL+9XA;S!@(>gIp48R52aM|KPX6Py0Ix3v)Jhlt$}^f- z5SaOdk{8r*MwW2WG)#&DZBm+=255*X-iBV_b)a_6DU2U5jG+$O`L45=L2%*7Qs1jP z{?r(FiGl{Sbo6TUnn+?BhKNvXZPz&x_Mb*v#pJC>WZOvH9XvGJK>{ z30qQ^KBep_>q#vnnWl$+P}opr4s;r3eDfEkcEPO^#iW+h+1`F?A5ynK#1zEq)`?}sCesvrtn!CiX7+J$B8lPx zNjD8akp~Z+*a>IoM$9}`ou$MrYb~7MIGB1*dRA_HF_Sap7qwF2dpNAt;wJp&=sZv> zac;IU;xp=Rr#(Qc+2#w94p=%lC*Ksb*R3q-?x2t`X}dW~Fl{{!KWYUn)t*BNuq?-? zflf&ZTcS#`9|U)%ETTg_(6>+@J^W|2F%_Y+)c6?y;>$Xif@fD-+^Bl1; zl0Zr@?l3DKfZf_y+FxrY<_?yJ_Q^FZ#;@-inRgCGmGys(0@hdZ$2+T_=12l#zuuYR z=(ecPm~*Rh=99mC%w$F#GBMVvClQ|JG+7A19Zyq0n$9_aiUZqk71FT@tRDFkR#bKx znN;@2dRf$|U{2krH7)LkUE}}`tU2O*jE1lM=BR>D($kd@GA<{Vsym0(z(d~ z>d<~Cd;OUg4*8$=baNtOsA4EIrMQ%E`>q!=D#>VnFC7p1&1PCh#3Q!2k_2|o)N$=d zO>px}OUQEHtIcol&M^#pAs|*9-iFk}eym zqgB%;vFgnuEuG8W8*6ezYN58=58j%y;SqwfOLH1Xex(nb&>Gv}`0fuj&OnUg7?WU6 zwJ(3e-qfr&ahSk8PTIcJt!;V0ZSD*+-Ad~hsD)j>m^4XmJ{Y|yk9}}CCm@*e2Y$Jr zLc@qd!l0E`1WAs)Qmq|?Tt$N0UUMaJuS;V&J*!dv8F`^;_ z{uekg%V#oa)OG7U0iev4k!6p3mFNeK^u$bn93coYbKf-7*3iW{FCw;jVv8N6qQ}k; zmmL?)1n`NAI1eV3!NoUuVjdE*Xw06ZkR8YPR-L7lymqnOMq>^}=xaUrcM!RjOUR)N z6qBb>#R8F=%gbfAnsnByrC1&Q^=PeROK`ImI6GpPf(M!vFB(MSs13anLVTe z&?rc@l*)~XG!@&f{lx^wCHfY)yAB2b)PJ2`S7Dt|5tH?Qg(O~9lk+3dkJ3AjG!MZH zQ1cem+DzKMyyMSEEwg=A}h8c$Lv@$XlFG_M>g-lBbMdMyIB*0Mz+ zTuC0N$E1TqwTekDB}cf}#_Tljf;tL+TJwZt>p`(2r8~k>9(2syy%@*zpvU3%(Qy*c zPdg)`F${f$AQng`EM{iqjl5RP=Lt5~GEPY$ho?`2L4pfy1taw_`m;c!H;jyB&Pf{q z{xry7ZDMU;$fxrV{{Vd&+lD02T+E{qM!%N6PHR2wu7#UVrw`M9^c9KLR}#d^TxYLL zW}mY%OO%M@HpNe$YRCk|q!FaI)$LkVMWU5uiGkC;C^jD4+aOm%839ShLMe-E&v>b0 zBPe~bj{fzM*KI3_ZjM?+xbv>J5PmY1uicdMtJwaZ<_R+ zJK)YE#78i+R*{~J?YaBsY}Nh|yyE`=Fo}O*EAuuVm74lBrJeMHHvBs3NeCKq5ef&s zJ?JqwSK)^Tx&xaX&CH=r;YJC^{#Cuhe*wN2s!`;1$;@y z0?yM-YZzRe3)B0z>(|9@S$+)MT`FA1@df(MPRXO&b{ISPp!xR~{3rZM{5>a}k@0RB z7{Ec6B}ZYmU*5iRfAQbKZN<&8;B#<$wDcCb1ympAYxXw)_?{jvP(;=hkyg7=(9JYvg&#^LfwCqZw%2Gk!9;y)AoUn#ge2Kg?cfMQ0rP~;4I zioXT;^W$zFW199zT{Qq6%4mTtyn zxt>CV5>sM7p2wvFKNC0M_r*|K@ps}wh4B@_PiEjMOVDLcQ{4XIu}8xE8t!#V8_6V# z8hO%y6;HM+UeAsG7v4N^8+mQ#Q@1uhN%6TqY9|x;W8uCeM6z65@Tr}#Z+RA4)z5}v zLXYL#P+;*7!`(5p)A3)Mw?)89Y%xXhX$c|WoI2V*L)^)3PI-~2Bzqduygw6gCu@7F zn67y_3p8YMq!`9>1`T)#IKB0x632Be7_lL7y8B{-(_347FL?Z$Ux_ZPo;oR8?O4lyZFItU9D<|j3%-8UpT@Yy5R%<^ zd)>(_4;w`yyGG1)5{K15{rtBOyW89=BeUw3A~p@}S205NI^vf@ zDBMz)my%t7yvTL$jGn)}4N=N5&H)~18jP|yb1)lhqrEd6qUeZbi%xn49cdeDd8LJ< z03AK^R69hfHV3C#0h0u(Fd(%AETnpQsjP3grxNMf+!KtB zr1Yg4(KWM5KqLc37PfNR=@&&$iVG|;vx!S*JMGeia<;K9xj@J{$fGs1P{ff5j(X;m zyE3Z;NV$T7_V&vksx~YLB;t$6fr5y@*WFvbXg1E*GKC<36pgCacFi20mpH(|#&@7< z9Wkj4K*8zSnSrF0#++e$4)vD;yrr^Bf8|eD-AL0YNVJi;&w2xLWMEQQhp6m3((DDX zeGW(iaqUmF$hx{ME+Pxl9qCJpXs%s~$eXF?)C7d1Te3+Q$;STx-9mZUVK&SQ`1YuK zSdmrKusFtZQxjb#VESRdLf4YfH(JM|sFP03#F{&m@wwM(dEBTe+^Jn(8Oz z;!+RPYp4;umNb-G+(b3Taj8uSbjvZ=wgwYzvsAytmr_ZB1D?62I81qG)iQ)po%+yT z<_Oe9DmiUR+1vFF)t1GiZ7jM?dK1bpNg7~iV0v^1sjR6jqc0$iT}XG;L0C9-#P*Zk z!k?EPrbFBe^`Z-CEy~Mq#e*)&8joRo{6AZ(Ylc<5!HypdQac&L*TCQaTu0VD-`3ADe5=xpYg2=o4Idn;g)4vu3GZnQ2ft1 z$)K(Et-3IUFmdWf8o!r{1m?19TbUaD*YcQ{kxq3E)Wxl&JWa_D8nUTuHU`6dZ%lDp zNMxQn$fZ>0s47@zP5=h8`4C3r{K$4;jHn~;K(i#vD>PWr%tG(pp;GaEL36fA#X7OB zg+U@1({KRZn^K2|%8Cx4xXBb5@RbZ>nSQN-9jjIdpiT+I)-@>M(6H6@E|Z*63(JUp zVlxsqz^w;`Uxi#01=1827{M9M2E136_Vd994U7IHiXKN9-`hPYJH8`z z9mMM^mS$iOj+8qICHcS{f*04McEtwe)Jj_!QpXwl&^^s@2nZFK#$Qm#sIx@4X&*oW zIL1KeO;p0r+pB}Tn+juxz+p*I$<|3x??GRdUMC1f3b#-uq#4t#bJN_?SIp9BjHy!? z!km3Zv;xIoW4OLnY1HLPwls`;$m*yt{$j;rVd#x5~f;J_W zqhli#&1)uk@xDRXl=iB_a*^BwxdGyDNd$DDC8eXg8_5-P9m(=(NgOTH;9x&t(zG~) z!rmbiFaGZT08T3@*pd!?T|a6Ap;@x5Kq_-nIw4I6Mp)-Q^tvXf+b0Abv{Z{$9b07H zf!4UuW>}2vr$5rIZ0=SLW>ffFoR8&7b5F~dlxGJ~&-1M~w9mt|a)Td9UH+Zu19OF$ ze-2PkrLZ6+j#+Qf|%78)aX=}TCm4r-$C_koxw$i&6hBBca_=wF0 zXe1JH{VaQ6(>As#EMn!ftewRO-Wy2_h6Q3e98fXBV!3$QA*EH@OJr_8deM`IM$rPd zYFgEZUGtNQTJrK(ZI)#}M3*62QI^{oY)~3Vp6c8~I)!ufkb3^r zh52}Ea!EP#lUjn|%NS->Ni(NW>8i2iLlmsKwGeuW2w+nvey+Kut(eAC#`q1_JJPt- zl6vFK76-}@MsY!^vdL*T%tAv37-5dIz0K6OGD$N_sod89$LLJ$)D8T88CbW1OV<|c9iX>3SbIT77phwvBpe@?lT=3}bmq?V2z&Na=R>8uJ zx_6^pTdZV8H7bMCG`kjvjU?$m(t?Z#ImYL&wNbrW6PhCx&UJO6WGO8`Yd{& z9fsX8T7M6;Hq%Xeay2f3p=b+X#BO+M%VTv2XoPGR8+wmT>oFaqb!|lnZ={@o`&Nb0 zJ;s%g5Y3lgu<1z{-dS}80D+FaCD(0lD!-vXb545V0k?&^^L9 zw-Mnt9Lo!r$OlrAN{~8j(v-U5_jfmP-pYyj#IxZ*6iDQbCDsNq+7Iuw4BKHR8QM0_ z>`elw{K=F?nbo=WqAZe08^IAen{s{Vu*97GF?v{kTD81IvRni!*B$643BF)5Y11TQ zY>u_a8aWxsZk;{qwnjy|)DtXDK=!QzB3!pBU?Fpnu<9rn;O~2JD_fxl5trY<8KxrQ zw-LsY+X4qe;+2gq_*7F!nJ+Q~Q_zJS{{VVU#^sp&oW!dATYJ!GrxTIg9qqJ8WWu*% zDXXiLYp1lCNO;B=hU!lBW`MQ4NN`m?la1?^C2lA47aGz{C`p^WSFgxnB!C;V#CGu)$d z04_!}FYx`m);!TmCohl)!TZr|W8yD1sN6?1Lsql;&x!@_VRcfG4P&ST;EH#JErb_n zoI%+^QK*7;$l8f4bKK7y{{So$9J%$F^ucM z2mKoSb;Uk5lIbDg&lI7f*&0)-Ed8lCKM=kqONnfn@(80*fRUjJ%jX%O{g?3v`)2$j z@YFI*T%BcGT*1<=2bUnh-G|@=cMULTaCdhNF2S9_1}De__u%gC!6CS72=0NL*=K*+ z?;lv#>RR2ss;i&6pWZhNh_8idBvbwSn#T!(^+fSQJ4&>-jV+8TgVe^yjP1tl)4h4m zcO>vfrIPcbQ-K;gK`|{2AuhvJ0!AAh`$3nrm?16hT^zKJD7$eEadC=;13>Wmdv6jq zb|HLfK)0LZ%kKPD0^yFE@nxQ%MUV}v;pm+DcR2w8V1DJr(PpjBkG+R}6I}fk%sukd z_qn)W@JPtBE3*Gy^WER4sDvwFV#^(vNN-zjh-of{W=?F;&gbI2;dFyY_?|ELjS(8#HgT7#Bh{y5<+} za%9_A>m}TRmwnD{*=j;4H*=+y{~k$%p*$5StYiFFl4E<$^+#q(y5XBfIgLR#_gsCA zK>X;u2oW45TVq1hl=+o2jlanH$}s1tFbo-aWwMFS2P^t$iVfMAOT1W~>*d(#*0WYA zGLCSN@4w9P8)_S*Y4rsA`Qfk3K-;oHX=NMuT2NEy zdy{O2xZ8##_3{#k)#b*2kteGP`&5ol44-X4V{s}ke4+Y|C0?=a4`aY=r1_x5kVf7F zv8tlw3>P)TWfW|s*C5l{Y-@o!XV$Jht0**D4_X0iFCIL_SmMh4f)Q68Y>xm{xJj8? z7c%uA^S`4F2~WMeB0`5ORz80+W`5C8+NtE`F(ezj;qeuZfM+N*@P9r94}%8$GTWXG zq#_MnlDjQ2*xEBtrE%2o>7*;-TZ2|6Iu6 z23h1Nl8iyzYkh!rO;(}scG(akI>j25sFA|7*oN`an=X8~=wYi=hmjS@+sXiy3tYi8@=CS1zX1ED7Yw)YNLD5ihRn z2^U`xT-?X5i)57ecLC-J-e+>G-1>G3tH$pUJ30K7byU;BQ?=b2J$Cg&*KJoIaUYit zOk%7XpXH|c%2LYqa4xZ%%vE}DqO={h8p{^0q~zm#m-MlwQLciE3oEF-s#C!iI7@SI zAl|AfNNY&AN*~~F9mZB%n@Pff@AC3Ei% z<8%Zp?+2Lg$rfuE*l~v85-~LX+C3J<2k#P=rVoJ#g3hVBkWm{vr0B-a@+IhCSfYIQ z>4szwq__NM%7tQw>XSxWYJ0~iaazpI%|RZQ;T(u8!;*wph5 zT%%Ty*5fK;JwUGBy2MGwjgfDE>y@;UZ00sp^1jM}Lq@!EvB_NF>Poo0n%dZaNTi9X zAKTrvhhXlt6*Y-%Ew*{*R0lV5RgBT?p zh1G#z)V|&miFSiS9aV3wVSf+k7h_GFK!f*dxLEGHiNk9tz8H0*=;EKmhie)dy%l+g zs7EUyJtZMzai+hOMsPuaX)BA1zltN}3g{&V5SH=xKcW!!K>Sd7@Y0QgNk;}lYdNO ziELAyAf^dAgce7Z3xvCUruNxGj1Y5^?&;z%$(T+zMODGCn&Q}{;!0md-hq|j*C$x7 z8*fe*4@1_<>GPw%Sbvqg7wfNzV^NxE+Z2bYFJa6X4U^j#dmUY41m5w(@ETnC>|t{+7pqAljN1t>I7=bl_QrFo)q27sD zAEa;SX3Os9{3!~AIfdV=qfJl)3D&82Nbgnqgt>lxAXImIz030EPLx)+f-9*m$#tTS zl{T`Mm(r`(HDY#0`K;G_CW-mwXC!94sDnYcYo8~cp)vx~i7wZmlwF^Z!F#`XuN>q1 z)%De3teiXOa@Wq)CS&lZIHO}w)M{PzHe~5iLAhJ+EtS*T6R)viKZBUAiE{uggZUS0 zlo}7_ReAO19LmS1kjC~$;woPY+d^%iAcGpr&Va;2NM5qS;)iIRk(sxnV@XTydp}f+ zk$@R1a?_LPJ5E8+D1qmO!YPe=OIro;zGFT;FgOM*h0s|6Z zcGSh|O6a5>>MckcVr#Jw@s=VZ9RVsQ##3w%6q2APs$sk;qP6`8;Ca8%sY)vB5UHL% z$B5ycL%LYM{U9=0Vhqe8SlJfHTDp^}W>-&yi2V!%eN2_3NYkcAm9J z`1v2gq@qpMHak%F7u0#2!GR217^>VGt_>6Ufh=@4Y!%szsZyfQ)J;TDbacrR#&djZ24@EBGH+F z={auRsu}@&^RNYfVcgQrtuwNMgIpTa@QjM;-5BvUHm^>a5@7*G+zvyX&6<;wCjsom zKhHsd%JsV{6Goxx?y|mCIR3er?^ndI6^=KoqdPF*{St;C; z=Yfn^%j5glGr`f)o+Pb2lyOSX(H}_h7#IDk2Y<%|`${+tNLiT*Ca}lm=8bwM8^3GE z3t3ADK8#%I6!PY9f3+$9IviwwORgBUxdIA{9da|xpw5h+Lt2u(+}rHD&7s)^(ji)p zv8}w!=(So+WkX_tVDCh$*Bc$8`hw$k(J$Cf0jltBa^E@E+I-edSz2pC8Pa7U4DF`} zZ&5Zw&U9_sljyeqzMr*mD13R7ALsW(uaUwc_Z?AkzsNgdQYqCy*Xqd|N_XX#B>2Blxv zRmV!K^b%Vb$_2ZSyH}PcjZ$N+ z?4N`UKUoxVi7jWEEQv>?4jUg0r^c<&Ai*%~nyv1>8}+nTO)opgnomu(gzo8c94VgJ z3Kzu?0<9`oIk!T(dcu07?Do48?Lj;GyHOR{VIfss{3zAKcumE&T~mN@hk24#I^SY9 z>hv8w#k8E$RgXOiZAUtfgN*yQap72bsCKBUqSN|Q1!|71mMVxd-=4>ft$vg^Qj5&~ zOpt81rLwD00{STI@qr z>}B=HY~9SALS}AEI!d&bp?>g;NS*|PZjHryM4~9!hpm$2Y~Jzp!5iQiG?0ke6B_k{5_Np{o(@8&VrXNnqH z$5ZGwV}AMY6xA z?~+IoDucXGMD6cypz`TFR@qV;Uie6e6+)j03;10uR>P3W^`xj53Kx6qSzCz6GdBvy zPlWegt>av*+=N)`%XN{TL;A#0t?S~a~wuVtz!9@kefML&h^yg9Vf5S zXgtl({@B7G*&`xKr<6QM5(2}w7_MzjAAhWFo^lZG9jDekcP7t)#i+6k6u-4vYG*Gt z6ly;Qgy`Lyf0&f3u8^LGHP`xUN0E2>(8L}hGu^1=nW2#M4Z}JFjo%cL`&u1TaL0*s zdrw8R@)+dyAad1usAud?B{ERIODfYDeD9Y!mVvzs@C6*5r=a*E!j0 z#Rmg4&63e8e!m>`R_h|Fm5yCcmJpBnDQa>B_7s=w%+PR6$uTjEWL z4mdE1f_T&J=B{>P+J|S!A^0=^%9oEQ14PUfP-kEQpJSZ6goO)x?!T?O`4Ng{5-E*;V&da{1J(pY!T zU3S@HN4!1>+aFdR0k*{`l*WYRZS5ti(T*V>B(3f!s^+$d>U&;QBZZMv}T- zr?Q5j=@**6ia-9J*9#T%CSM!9{j~Q+O@_Ck?O37EQMV?_GKZZYFrn1EyB6MvSfhIR z%@C&e*z+tN^r@ar(EI&;yG#>)rU#{g-upbWOLMjBc%(bXt||J~VmE*FN=#hoSd@z#;RMtdZ`WbNBFJy@h|g# z!{4|%=*M}ba&95bO)W}>T$SBE(FzRFpSTJlWT0IR4TDiqj?+yZwR{8FcgvswrAWg6 z0MF?Y{{ZOUbh?NiexCEyQGx?BUUa!4wda%<`v7-%C)JCO4YA{H*Xu-dKN0Ky0s1S& zhyH4S!W*|+FY?~mz~n6d8{jILm_{p&z~OAwvhnXnS3} zw~YpsLDi|R@1Yvxk{eVw``O!e*`dO(V@DVvvMa8_yqZq=SAMEEU}zwWk+UG}EZDoG zaJxbNRJWJll``sb`ErvUE(oie5P`%=1b3O?{vgqQPCW7-ATsJ5(au)?9);v~`ri4X zXuMWUkm~QXJ@c=y;laiu(;>nfbtBqM{**ooWG2RH3K{Cg*(D@R)fdJtdylQ~Ru**A z@R#4w=JIUE@gLxxa7`SBa(#Z&lzMc3bfbg&xO3G1+btvy)7k6()}!p^Rk4oht;+=e zD-{E0!Y- zk3-<0bP@k-C!B5gxf4YBd+k^JMgtYBTHTWYIUTYdo^ky3n@PA*u=>&fqtsQ_?}7Z| zuJF*)gu}6b9G=XRsp~fhUQ5qG|F^q*|KZk+`cuwJYpql9I@n+@D=nJ*XA=Y7mN5VO zwU?7TT>--=_G#?Ua#4qwA43mA*jy7~HZ5<(4X~*JrHT`1;8R}?h5f;6VRQHYESa(9 zuUI6QFlkn7ki+!!SEbXULJx!{C*2{-rCmLbbS;K+Q2S#;bY-WVO?W?99*WKDw|kRQ z{sGGW6M(WD;oU!s%VwWTQ7&4y?=7CA%l{jZ`Ty>njX$uv{2hAIbKcZGa2b^0i!q3N zE_tW3@P1pPMV#2poi(93hf2=TrSBZ=AK*}vDUf3~4315JulO|R{|ESgpVlP8H_pU1 z*3JsCe6OM~(bbgAm%QU)IX2B=kDbt(OqJ!*AM`eB_YV*ylfS#RIG6vi59SZ^Hu9g? z|L?L~P69s<3(vcA1vi!R4;gAR=@Bl6=4Had^Pf2TC&mhNI>uag;x}{scYFVzK{aJz zl>mqRJd3k3a=y43^bko=ln#kluPy;&i60}zfo^45GPoQ7xEZ+B&c8(@5@yZbKfKPa zQ)S%d#_W4oWOzrEq+jN~>VC{gJY|}2&`F=up>K**uiUq8H;4vi^TfboxFoBrUCUdi z8-{WafW+Io>WNw!+SBs1m@9+%&p~`YFf#EfZOY4Gv(%^mZ$mw79iK!!&qv<(JGqh} zL^-m=2>U{n9rSSYtx?dqh*kF+*kU#YYEtH!NtcLpWs0Y<7wEqB-2<1b#*O2W3D)6= zx>Hs|grirKiZjd5k^-D`>e<|d2k4IV=-w<{BwN2_`N^xl2SBNuZ+)CL`MK(*y$!vf z1m9ajei@B-yUOso;3tnVWV^_poojcfWQR}HW*i4NwQliHQc>s0Q(6?$nm`|*%gqK_ zpmahVt+VXoJn6sy0D$;C67_pI&x*89z3;JDB*@(0N9h6Vk|fYvt_{Nst3Gr2?`~M6 zeD5epzM4hpV~Cg*_x|mY&q0`L68MQ%&sY1-wO=y9I=}~t6l*kP8+wpW?Ko2KGVfsJ zTGtuMZkD*14C};Rz4vO=p;xd$aH_a$S}bTxWVv2HW=95zm*RO)}Yml}?|z2bK>Ly4J`L-U(D265db)aCk&A~$6a_|_RBt}SAx1}AS& zZ`=AK&5;fI!FN|C&MN&>_g@}S!dBf>hOh_Q1hy#hC><_MBKH(0eD24qAS^62(~^Hg z&vyC9mB&fI^AU67V_;jvJ-|ZKR3l+#?0UfL@i?zNtfBq_C#XpWBi!W? z3e2_$r=uh5PYv1|R|5-@u-DCf5sdkiSL#-@qtgd~4N=Dc@~7>985bTA0u_um$%+;{TNJ07d;Tyq$%#CNu$0{ON( z>m%RAF*@aEY@`(=s?q`P)?bgQV2BU&e}IOJU6kGcsVj?1|6R*Rhj_{Hr=IZE@DPLs zT%fHYtDT{`4c>R6EE`^E^1Or&A>%K6F+vcV$z1t*t1>~3^Np3p7}Y_EYZ zpBEX;29bcTyCa9o{ubxZq%Hk92_wfvg2d?;ow8*t@vOyrIp0qvdKZdT2h7ufc?BI3 z+uGuGM`Y-K zcW=1+-iS_z8wQar)BGd>whCOp0F{*~*#zuHO(Ui;IPSJxR`BFC;SNUYxRlvcBXBl4 zuusL8-?dxI{9UYaBNGJr5X|{mAfV7~hAD8|j2`PL;vF^N?m8sspT_XrzG`JTh}6d0 zyH3@u@vKcY43AH`gH=`6;R#`+d5yc5`yWyRAFEbpp^B!~2##2VUA{~N)9DOzeoGgC zl!NVbPv%Jy5DeZ!-I8!CXP%OJ)r@-20H4}cPneyQ(*mMxKQA(fA>~7@Z>~bfd-^`Q z{Bb%;P!HLYvdb15Ey2~ovwyg~;jqAGNnV>VUyiQ)n9=P98l@Oglw-5nCcjWG11qwl z_#r>BS<&HedNA`-aL1D8$kA4aK}3hpr>`KNAhM|p0kh8M5&IEPrecs=6Cm633$TSw zpFH_oae~GUEejp5bx50oHZ=xiv5q$0UD#nx)m}1!cnZn!i2)Jg>1S+1uAYHLkv-{1 zvS!4N$Du+ix*tNU<);k&ZoSYGBYpY4wau_NuKMUyuG(AJjlp*DSWJ2a*0ds^kg-iu=hk*6m^HYUL$fiAc&5c`GI9 z2o$wmLfY!6hL7KkrFT4`{W$3^ejWAFW}goFX<#>tk{}Im-MCMQkLOTVrcj8rI~Zq1 z9c1xE@i~}kKsJ@jAjZQ+`xo@f+Z)O+S~wwil##!9HS*vlpg+iBm6#K0`?Sft1RI!YfU?eI}#nzV*l!&zKg3o^rAP^Elp|YOfsly-ffH*W7I_#f+lz!& z@jumVsetKreUr~Lo}rylhcw9YdRTmBsyXVL?`d2AOH(Hx$Ua{0gH2qrF?Lm1WE9&~^#vVoRu? zvzU@l)FMQ_mo_Dk3dn^+GfYK%qxq%!Lp4&PbKv5SXgv)6_Cko&G8ttdnZ<=<44=_7 za!wF)#7gkX>?*sL1i0e+YS{Sj-$^fM3%i`|D2L>gDp1K>GFZj_+zh|UqFQ4PZ*iFi zMGy1|I$COK%?5p6gz>hRPeQ#znAZ^flglQ6Vk8#3EvWvXx8bkK)$0#(f8C;;9_;pw z^@efP7&(^=bfPc5v1E^~Q4FrtFYQzPj3cET`A0vMUrM)VL0(Gsd!|27K|?+*>3k%Ue&2RKf#>lOLz_o#SW! zo?G-Er8{cz47Q~2e*odfPjiW9+=?G-FnMtFilK=rTU;cl8UGV?w;_S09~)~|n;XBYWd<3GkB;p9 z7gRdxP*mW1hiCem7c26|zBQL?7{Oh3-{hR^k~N4hfbkyyUrz!XciUAs4L$#YunlQ3 zbNd-!WAyV3?#lpSg51^*#ts34d~d^-ik%-V9xC;IGdCX@VoS%>#5@9B|B}%*X6!+J z=@B7_#>*Vxa`iju(;@^G2rai{1iOwIP9}w?VaZ+Tiumt5iZVF`#uqyAE|3y|Q3rEe z!eXNq;#(BVcZ@~C7Ra5>6`Nm`Z3U1VFIF=fGgL=B^)&?OoOo?5ZO72LvoU=aKI4uf z|L&NxLNg_*<(cY)h&YjbG+tl|0ikLK<6_Dp+V!3(=>m;|!Jhkv8EG-S!>?sYFh=_~ zp_fE^YfnPgTlKih*~6~Z%m=L;Aj?Jd^DfS5 zbz)U1E#|7i&%6rBjo1FehE5zm1t>7L#P}sho7I-uby(#!%&0wTLh9}Kn=wJY4PI-m z*EHV`^JhHSKR_Xt5WdH*jvnk3U2W{tFg4+MZk)d-{qf41T$=bNZu{8HQ?;?(_oRjo zV%Y;xFWJI{eNpn6oaj$V}wP-{{Vo+2pmYBo+{kzQDVW6EPWC07OSyvZfpXCK~E| zPv7QcT-SsbC&lvx(GJ-?C1gRZiZ*9;-N-BRrcVWNk*?C0GijD0CUG^WImg_1gDgAVsTRelZ>ZqL`LYg&U&u+`ZA8bq6c;s} zLTI~+<5G6yL5>+dhtS`gTWFkD_E+t*VjOSUj9m^`FH0o6lHPbC{=xD@b-N*`-P&6$ zWNR}HX(H7#o#K6?56k`&6vUuglyD`#NNVRQycFJ2KOI{Hn29$HS*u2gM7Ax4IHPwLo12w8Mfv?BR)VwhGaYFKDhsrHJQtfI0Mp*#Y2 zGdla4sEQEQ})8t_CDTUQ*Cis1`_m=z+Jw6B8 z(!2DzG7p&g@-HY%h|HNDX!VMr`TGkJ=@Quv(_w_IL5*(U?K&fM8|{JH56ck{rM=nC zx%lJESe?2xh1N>jNqq1zE`+U%hw6%s z@?^0*_e;kUctTuvD=v}^1FakRQz$Lp;JJ%aAMk-m@-zwOgI}2F-DYBk!$NpRv8~Fr zrS2#TSEHMkCykMvEu(oL1Hc(OBR~V1+MU;mdd5uO5E&-*La<*QSe1<|TK_C|-xf%2 zzpe^7{k5d+yJ&; zfb+_TjO*ZwE4Qo)8pD5fo3Kh=wWO}!Q)Ur+cSAlzZf|s4piJTev&QRYpaPyCOFN3c-EKEVRPrMoqX_CCZP+N7|a-NFg@3a9%*i5W4BVNw1^eCNu;uMHp z7gxw;2swVRNFw&wBUuCZ(TbKOCz5oSzgT9XO1E`RVzu~3jEe5CSbh`I(V*j?;QUgm zK^BogRRqezEImvmiIbLO6*OGVB5iR%YdNEY$9NPAr489qR4p~!kYVzUDo2)D* zaRVhP47V9INSKE7d9W=PbH&vt!D|I?kvVOaRkECX)`y=*dspbL7`AqCE+NN7jGS8X z4-gP*y^C7x*uNRfx7LL$to!qDhJ(DPpTObQGU2!tfUw+?D z4O;NTUhq3?594}?IS5zFDHo(`Qa?NoR2`j)Ar6T+Q!tRlS4w1>#=<1sT_y5`J9U+~ zhZ4lECzb!*hURR0m%2O_)uYa#jzh_grr3zkr3h2wvxfcr7w03V4GxOwu^UXNKE`FJ-@!PdbUP&MoGgQ(t;3l~ z@+^22JJ{S2EBxE}(<{nI?7gAT?vgH{yoKH&f^_2HrT z!Y9ObmLZyml?&F#r_V3Kjzt1%ct}?^Zr-9%b`W!r5=ki2+lW%<(?~&wN7l9$MX@jt$1~`}HISj4Cw4D;jW8?ArkctfAS8wYQ3)lkWn0%+jK2l5 z{6N~96EONTB}oU{<9x&HZ?scVGm?dVzl0e_g-jBTBq|bB<3*L0KK5)vH*2>JN4|=2 zB*q-dR;!#w(oa>xm@xc&n_BZE8LVQ(*G$&ncl@mhp9kQ=HoFMoH(H1k<2J?JuV>b2 z8lpoQO)uUE;(`E;b&!$F2CkN+(gNQ%tt^@h`azdetXV+-m7iYRU@+j*(~VMB#9%D& z3z<{&m1HkU%73;4l+*q_%$ygs^i%Uz8x-X4NtH}whs6q^F<6xkuUKRKk^mbbXG?OkZaKIYBBl6zZP9R52c#V^?s7NG;x zAIH>JBJS{dRzqV6`Nk-iAJ99vF7g)f`qg;xHcOfNIJ?(ov_Dw7V)6gby6nLWt)Zb|4|uq7BO(WBS@!*jb{ZNEtWm#I^c-9PDIBFbrM#D|WxRlZ;(e zAyn5ZK(aRVwS?A2-lB_F!@0aLA*Iy+A>4Z<;1q#G)Xjh}$6jDZ+ZS$pJ6_7-jRc10 zz1)3pI}vZtu=5YDE9&@Ys{9XdM$t3e(sRtI>2Ao(?Vozv9bz{mvle~Fo>!E=RTAWK zq>rogN1Q&gaGmPz=z00)qs-07mu%q5Irz8i^Us@mg42Som`L6eN;0PkiZ>Nd4({)x z7P~yQ7lZq_>xqL}_uQ7(?%iC-IlZ#cpmbOm%;KCQD4k_WR=CjKkmDE>l>hh-pzm^6 zXthx*zWfC1t=6d%$&b#RsofEjHw2~0w?VNYZGkdPdX$$Wl2PGoIiGYf5wGI!{3upjwAAF5w*VsF!$_;-_=CP_Q2D+qZFDW< zL0rGtaF!E%?Y$wed{sxi=mRjswHQLTA!~JHSHF#GzK04@#Ph}xdIY!aUTL;&B7vw39 zXnz$gy{kH)I>gC`UpWeMV^vF7AWDDwjoDOTPs%arfM1lJG7@%Xks|S^{xC*fp(R!zs}Z}@waYOu z@apP32ooDru>>;kaHXbmJU5(PX}9Jn7VE(i&gpr;Ffp=0mVbqa^=6bMCa5SE@r+mF zIW`Ot=YrY&mqfI!~a&^u1sw&*X#=2c%b)vpcud&PMw zkq5AOq9%>>H&3I(7h2&n^`5emds$J`4ui+Ym{XavG?EO26BZqON>Wo8ohAS(k6LU? zdb!%hMP{}SXznhV)j zCE0+(j06u|_!_z6!p)G@qvS=?{)U12^0OE1_#fEGQlme?H3g;9EV!!zg0ypk zzefrZ7M^9?2N|>$Vrxr`mZ6lNkz2-jv#ev!jZp$EWw6fH@KIeLzw$PBk~;IM9>o3n zz8yX4rTg31(O{o2Kuc&K4~E|Loz_aG5HG0x6{VAv7JAupMz!ygP86}>2_~Ogi~bzs zB&mfrW&$wXh3vq5Kb8wF>n(tIRwG-kLPS90gA^kna#J2v@_Fy&PGX&ZPeL3R{NBr1;wXY*YI?R#W-@C;T^9mmGc!X;|I=YI zcdq`mZa%LJ=`_T?B=qzMXbJFJ?4A4KjW5vV*TY>bMUD%%8oZj7kan^gzB-St%uMhL zOLH-&&aifU2Bhd{QjI^p!vmKrsS(y%zA(6rjx}L=-t>i=UG8M_ht>kt0-^fkcyX5? zz5QF+x`XGmyTmZS)Vs2fP6;E5jt4WuB`PhUWOxNiKTYf)mb`#YzUo^&A(mYd3GJ+p zA#Y}EKK>w(c8T;cQkWvR5>VY;z$wvR5BeizX+uD}D}sd1dH!h;7wq{Vwi2HM0@A(3 z$E{6gVKidsyO5SiO;6Aro!;ZcSx-j|`KEf(r0;N+B?bJY+J(i4i627kH7q~+T^4QT zg8(x~C7YmwN2$iJy3y@d;HO zeIAqKAU+HIE$`L43R#y0V}<#S|=JELpbDD zmWq;_I^9}LJ*e45&Lw>G^l48S5k5; zip5<$r#RKQlT*&KueF!Cz;;O)qN1JJ5iNM_$wfLQ+P%ABYUA8|-EqzR5zLZc_&mbxFs2^+%|UB1m7GWWSR0!XuAv%Lwh5)6CyN0DII=h=&@B- za5)1ECu081=a{!qX%n@^Dn*96@+w;C6N%?Ej1D($LJv=a$Rj-l-)U*$+t%?9Vs zHpMyoJ}q6}Ag!!5!g&J^)nXhIzLOxb5h&b>rM6R3{HY{eEv^jC&#FDatGrfdb#d;{ z`l>4G5_zQ8G2p_09G9t}?lI&%BTu|xUOBJ&?G8oqeu@EKo%b6_DNgSmtK|BNb`<O%;Wd)WG}7)X|?)YYF*XW<660&jDssc$10x`HL9NJSQ##PQQ)P@RUy$!cy8U z9XO?xBY88M3$XR$PU05_ro6_Ls>{w3w^mc~m5?yIze;3JCM{{_Y7SpxU>EIKoXd}q z%d?kjU<^##btuzVy<^^jw=fm*L=GE)2IIFi<{Prj9JxspS6JJCrtzJ!ixR)zeuN#M zD^fTj8pFzRtRqiYu?3 zW#^x^b*C*Pp75w9vx46!XjYqC-_KhJ8{n{*`h3j%eN;ejZO9OjUNhL%{8NSINBtbX zFcud{C+e~K{JH|5zSZ*W@~2+--=Oxw$v={2TTO7lSb5FodL%6BwL*}eg-)!sK(?y~ zq;VrRcKj6bxrF1Z)3QB4Uj``mU|8Ez%;lFaZqIzmpcA-FzBK5SBleYZ^!My5CtPk+ z&xJ9pCfd_CaS^A^@*>x*5!Gyx6+1mIF5e z8J@eJHv#uzm+Z&|$bJ^$XRkp@8|;P@dO)nU^}xIOEo8E;N5=z8=NR@=Kc^%#ioZEa zR78r?SYn^vX4cjT7QaV3hHX(NfmVpGz`q@;tRBIXR+?*dhEB5gKdHP`8r&l^f|qi; z_(ic%+9XuRTnkGLEyRRgD2AN7u18qyYkJcGFFt1v=i84zGDdvH2NX|8XlmC&{=$5g zCf2prMbvGBL9CJKGoE~B-ClF$G6m~PM^v~$M4O3D0LTdtf!N5<|H7lHJqNi6^|d^-ah^{6(f42 z18=Raf2pC5{@Era~$&es1Mw^uxsm@0o-$&+R?8WkQzWEttZX@9~cG!U8 zQ^|(^0wD=Y*|rw%mecJ1aja0GWHB4l^8SU!s3AgL8Y)7WP+gc>X!Q?Zm~5T-LF8RC z5;qB$Lg5!O|}16g&Z^kSXD_tU13#v5DDl!^UIu8(95mX7k5}j16NR9NxeKBbj$Lk(ZTU>(P}RH><;p^NpxRy5*LM%KiNve;G*Gacp{11) z2=>~{-Z@2|N=f|8=3F-YedV_;e>XeqKv2@@PklbbUiS~srw>W9Ojbl4J8y}de;l?l z0Y7ffh#D#!a9>P0o#@61^qQTJ%AwRaU{U?C{ga3oz_4@vT(_;qwsC2g1~ld443VE8 z56|3HS2OTB{p?WWOr7j1x4_T9bKdm|)~SnOo9U-htADF<(Ss0wL?paqJ#jt!)LUOW zWV?R#m0#|PK=%w?{~SxfV5C!rd|k-xBD_i$0^x>9`8PuEvb_V%I(sO^i7eCG0_~ErJ|p4CQ$u*mu9o{#Ize z9eG$nT>d0RM@?9mKBAq__IgaadQDgIfm9KmD6nH*3SEmjL$T(Of$YV%XfJZf&XgIQ zn?G=Eu_&I@XCGpg3A{}b`<~AOzl&qY;bFrdR?s&5882#+utm`fPo&UP?wQ5h<|}>r z2HE#%aPI4=8BXx;8Pq)biDsc#4BxE6CTB&C-eE2;F*~j3iKj^~viP$ZSqBGy$7_2{ zysy+b6ug;cEnwmSa%orh%`lN@`smDQVse0r{?>Ls9xm~%JHYh0ZXTZnJ4x}_l!K0r zLgpJmrKk76;Gm3sogEMZ!yc->JBvpo;|~8SWANi#H9hhqfB$e_((RNUCQ(}|c-!4j z!+{YV6cCbZ?R;-OkG5JS(686$n5r6kWh)LP71dPyWd$ipy09zRW}-qxi1R{ zFQpw3#VY!SyI#-|FW89X3swo#kYIeNGSd-wZRkQiud$LhCQeFt{P1kDc zz+^rZtIGWcus{9-&48nu5B}bDqvC2A6S18v#j4B6bON!(Yt=pyIit+Xtq=FPij4?0 zALB3f{l=WU`OV7N>mXSB8Fhv9%j`L!X5OKwZ4G>qU*H02Q9vr|>7kVXHT!TmVliA6 z9=iPa>4pBaVpPHbi5y!Qrl^GIvAXCtLwKj#sw@WOM;`Pn(l_2%-FtAlQ?0W7?D&1N zyF3=1j6g=fsqxC|S^71lLjgiC&mJ)Y0hfx62oi00STMN_$w(7mr)SZH=L6T3f=4ZF zD&%x?lkaeRw2aS4a3MfVDSb?++eqC#n0sbss)&0Su|giS*nNx8*u%fhwPO+DsnE-u zR|E#n-Q&qxuX;u$h1P?Vy$gV!mNY~w;U~S7_Ec818ANw?iMS@<^vF&aN;ktm_r%V~ zQN|j9IleF2Y|9uOA!@}lO1T_siL1G7>G#*fPije5xRbrsAvZ{$6q!@FUiF+B?x8@! zT0Myt?<3s!Io7+EIqw{h|1$d@bK?*|FVDNUF&k@#P?E8;zvA|Ia=%X};oeF9k_VU+ z#a0bkfh4Idils~0{vTIo71d_fMeE=WMT={SLveR^cY?dSyB28hq6Hd?yL+LyTOmk+ z;uf?x6!}ihx%>-KvMoxqMh%S;+QEBZx(DHcA{K3(*DyJ<0odKN|r~CiP0%V>Kw0L(c&3o zL9HCuK?}B8qRDwNI~B8Wwieex)pvC|c4P+HxdFG4%y^q5wFip3ti$vN+D8Rukls!I zYMfa0UCR~J0!^A|cdd_bkYjY{f`cGjwAp{Xq>Jg}EE@&zY;@_BAW9en>Cs*5L_c(d z)oE&P`9_9y%OMw-QFk%DD6WitUuxI{U2V@bX95^Yrczlu0aOpQmHU2D#7e@mQspy4 zGgD&~m6E|YrfjF4kDY4UZXLnyqF;*C=$m2d)biH-A6DWQVW;r(X>jN0k9TY-+N%*v zx-?K1MB*5x*7v>iLZ0P%re+B>vjAHcf6HS!-axtJrTy;`mKVc*HYrm>FQ5oO%3jmq z>L!!VkX#r^(+4$^-nyY~U|xVQpz5wr%p+1D06}?Xc^Xqu3*BEk(n?rsuf~|ceH}TJYaQ)yfOL~d7fw+>wu)Dl_8x(Y#I9Cc`LaGlj zOy6;D57@quQeTWXFv$^9*w#olZQ=UtCff!1M}v|2DHQBA;{;zfi{w=A{s_QN!noK} z6;7qc@WpN)>SO6KXBzbg;peHbih(F<^Q+jp5z%di3t zVF&b=DNch>Nd%0(f|Q@=t)F{>q08ZXd!mo7|hZ|fo(2j9+fkls7; zjyR7NE!9q%B4&%VHr`npm+k--I@Sy(-f-kJ?jN?b4LfG5OV>xNWEzpJ zG8{_t!^|yFf(Oc7v0sHta>)1-2?-{bhq-&@)jqO3BH zp_H_KoMRU!o=S3n_GY1Zz<0hG6q3;(Q#F@HA;xyP9d$};N(dNS06toaa_+$MZ2wy$ zsy09(1Z`;{j08|Ai`QIy`y?4xHP}U|`8m7$r*0jc5O=>nJW-SPsbHJ1Lu#pMs(I>n zR*1l(BH3~3&Fa!&S04>?+rdYTryVNZH&LZG$~P>zoOf406RxN3#)ed^eA+V^BfNE~ zd4lW0F5^6B)my##6S1`_Q~2cvD=7egW7MIs>tdtd^0=|OIkdKW&o^&924f#(=t?ruz{!@iRFH844g9+gKF9Z?S-8r7Z>!Y2=xB`SQX^qal4_3h>Rt0HIhlT||S z9>a@3-PgJJKpLucRdXf>; z`c9hm@Tb`*84->Y@2i!1I{l@oPNOyOd>_E0XjKrE9KLTUJj` z9^RNpjcuCn3@sDdQA0rR~3D%Wg^Tq-*l!j4wTj9y(&`NzK*VIbC%Y}y9 z{{Z@3!r=^)Pb!QKpK!;j`3>$7}Bur3KD!&}r+AF|U({>PE-Xe46jaQdi7O+J@y&hrdj_cwaK85w66m zL8_EVEgPoI-k_MW=!2N;c27?Mr0}iiK9)y^;y=`^mBoR$maCo z%no<$RiTa1AF5tn=!A}b0=Acdj!YC>vlD4`a5LY9h`Lv_85r92%DMLhd}phUB#AFx zzilG6u2-6`puVMI!;$ixGdb`BXBpHGxjOs8WQ1`FMd!`zSS!M2_#F?m@Vk+^O4EGM zg+%WvYV}5sv?kkEA<%fD#X9e8H!~nly=N`LhIMYsa$|0e%ww|O}R~o0l^Sndj zVR@ogNB(Sg3bO@ziMTXzeIjKZ`^i(;<4MYYAl7uodCo0VIxuP@k#GyjeGO$?NV1RYBhDxZ_Gj$ zPK$cjtaET)AkiiR22i1(u8gkIy!UE;m2AEEe{)jHu2l`D;OJ7vRSuq0AKRWLFacg7 z=B*I%xTmvRATcw+5r)3NyVY@65B(ao z*5YJN8?|an4Xtec*_ba(&b3X6X1?13#Bdob=}k2m4tgAbDl8Qb-k^8w2#P4DaInxP z56r2k9KK-)#d4Fj&l=ssbJpE_lxn1;auwbsT@Z#ZW_Hm(yr@X6y2PC(!gVe-#aV0E zJ84thT8ss?uP~Ai_JjNhPzU~LK-mm>J9H_kf8;^|bJs-VEAQkP&@Z4Q0C+Gh+DWu? zpZmji^&e`}=&)3xhy4ND93&39O?mg2we-Te3!eLfEFIBEw9Is#m7* zGGMbe@(x;6I|Ian=u1E*yrD=)7YxJiSdf>vp=kQ-6y(X?l@)SCC67i;h^U%ycvuwk zaKbG4`x_IRk4pB57I@pg+^RDL>tz5dy)-g}@UYO>rUNWKO&?_M%3HL_j6|Vaik3E< zONwL#T0wD-`+A(gqCnyPy?*6=Mjl655(g53)U2bFyeo{oF)r{L-MzvYFk|_V5o{W4 zz#X%_&~bk~;Mo`n}vFN_xk zi)^jwEKJg8p<1GR9a}u9!YMKmkXDrGX?&#e?=(f>PNERdrRLCT$z-Lt#&qvg?plIzFM5<#;DHz z^{9ym-Aop<)XPDaYT!zCWE^|0xfnU5XsODJD!RY;E08jq*m%FiK861aYZjDW>%;Xu zkCeUFR2nwgg>orV3IUq1+@7V;UESv*d`Uu7Rh|-3M&9$yW>4!ph9XKX>$;R89K!)uKM^L^LiPZ^wQE2l*-IZSDG09f&@g0~n z2Se)h_f{y;M-Hz=(gR-b<|DFLqR);(;8CnzEmTtax*8@@NW5<0YWN*?w=;KU)T>Lv zz^J3BYY0>?J>`xF{>Em~Q2j5g#QGZ##ZWEd!rc9QAjjzhnCnWq2G#DLI+DwKZgYKC znw>J0w4q!s98K|ufl+6E-fxCAMA*4HEEy8+b!IQR%0$6QJutK|JuC`6Z(EzVhf{TB zO!Wk1WX5MLVr1>UAFc{KiYDD_GAHK3sG9CsvmXQ|$SFl`$3TW?%iZK#1_ch{$D)}) zIW7hd;ZUh0?8drPx!S3O$U9#Wz6Js8|Kyg@*)+ZepJX)5W?__4dM))!51lj&W;hKfcQ$1wxXi_=dOy;p1 ze>b0Cc6G5&(MGkV-^zv|+))(UlAQkphCiHG2D06a>Ir#v`Yw>OSw>M?QdwHsspm(Cr`-NY8I>@g(-?q*`_d}@@Xk<#%TQ3Pp4p7GEbSy7@&gY{`R&1@e?asvE7h# z@Jv29ocyX*9bsH)R1sc?L*3KU*4CV){=r0$6m7;<)O54Qd}AYzyFKfb|Ja?jqch%V zKiQaz%&5QyX~gy?M?cHdd~YODL@*xT-|TKE>Tm&`2j71H+(f#=AN77z{e0a%oDIOv z!Y_)hW7_6>qS(5c3})sOma9qnc+?8;Os0S2yg6v}B-L9BAp=eJJpY#0xNZ)&C7lC+ zKN?i%zj0|)MQQ(ULk&?*99PrI{M+Zhhn+iGzyB)c)-^8irF2VEkNq^kV`6WvTO_?8 z*g-(zXtFraJLHFs3I|B{d98hCYNHjaokX{4z8ff;8Bk9DPHr=|#n6jzQEyzSyLNrY-pdU$sTJt_!i%3uG&5|^0!*05s34PYGE5$HHV}DPwvEjsP!qP{ z{EZw~GyRgxjbHZw)Y&IaCrjA}M;Ulu&rTaMRG34F9Gng&$gV{(*-_;Uq?Bd1Hd7}) zWl%FN3^frUc}u_Nb|LaO3sl%>m#U-9L#(48 z7S^BVwQbYMvgVr}Dw&obsfIoZUwC`iKOuWG5aUnGB$u#a&{Ri{zaw6bvB_Af0lX)j zaxz@kDm`~2urspt1Z-q@_Bj)dg-;iyKfPs(=vU9cqk{ubf zG7x}4OS3A88&2W*amXU-@e|UH>U7cWf?@9uzu}yZ5b(o7iS|M-fXQ2xdq^mW<)?fK zuikJ7qwf+SMH2W!b-nMf1RFfD?SwZp8TbkRKz-OktR0jkFB;6-#nfYIY3n;1o;;MH zm&?($aoz~$^>erWN?7HL17KgMq%74Cf9>fVX~bj@b`2t(-RkvctzDfu8~KV~#{IG! z0#X#1{%GSqv%XlT`{XbEZ0AVFDDYt;s_`WBKY;A{LN6L#I}0QmciT_2@WtWbO&@Ob z$)ne&D^dgSM0orqJ`S|NwfpKhU8cO0Qt?*uZ$)tKD)^!Ek_sh;{Y}SCC8`?caX@vs2a+VX7qphBDM)+^wC3_OtI@DO?$oF}ZvYWZRIC z7226g7oPKtZ$t9jXeB>VzkSz22NgsH!<|mEEfOC)<>R44o(Puxk?X|kmCvb{<$JB2 zvA(#d`^gUt@2`5qK+b<+YrE_8U}*JJHTn~^z(eC+f$r~BFRf4QVs&E?yhqn#GWMJ{rhdxP-!bd zj^qt#*=BG$(n-H9@H~SrLrI98VrzX}3U!}^A@q#ZAyK8~`!UeL;~M^KT%<`&SJ{3A z&z_XP=byIGQ4beaT|Zrb3@94tmQn-A?-bjD26-36 zVm1sfKi)^ZptinfHie7#YrDMx+trAHtQOys(*6QeSYwL7BW>#0T=4zWI-z3(r$|W_mIuUHPW2jdVX35doh(GhK?75+Vy})!a6>% zNB+mrIp8-kuVEc2N{!S10QqeUZd8lB^f>VHy1xHUbD8I5P7w1TFEm0o^(V3@>~8z2 zhu}GhBy;5cC9*k>@J7KeU`|#RL;S~)`HsT=H~u_Y?Z6f259h$XXLCk)H-(uQd~9QVy1eD5>hlbwjHonbc}dwctB(k@;XY;rt!<+T!%mr!0CF z!0)i*_WKNDt|XT%*$(qMqXJ@^w!BSaGZX%oHmaYqZL~ik%Z4+0x;|0XsdkM|ytU$9 z(OI|GASa!8E3t|k&DiI0xQ%{xqO!vTAY;~S+}N-IQ{(U0Se>^z@5|OyQ_MTjv$|=N zW)RNSU>u((pstI|NRdJ}f8$DexEhqW{#C=OL#}mgo}Xz5V;ObK%ZzOm0(i zf<`I?(Kw{&sbMkaQ`f@NbqJ=4`;*?Bg0ZGy1-uY{sQYYAH;^1NC!&#$_YnaDI#ygp z+qxskOFT13EyZN;DEh2L_qoWw=bahpPU0DgAW$=~yPgSQ56Jl{FB*Y|hsqbdnp>Ev z$bhw$Rj|6=*{rt*QcPW)RDW{ujt8Yhpzk#|6_3 zmVW*u?<0DB=)cD9e+CcA?4&8apQ#m*5?=fr#~(J7Rcs0;-Mfy8qSVy2=44NxjO=SC z1XySxFSc{nt;ywZqLh_iGS*j&AW_oyd(Qtbkdm-BpoH6pVG7*NCR{s)wq4SV(R~Axw)nG-7%)sFD`Lf*)`A zx2srcL6fI6FiNG8HJFCa=|WM!g?QSI0y(M$6Jee0kN2=FT^J``J}{ADj_w{MmFX<(9y-YnPfZDX zzTLVg1aURcUr%h1z>?}$2qvh0%8!3Yk<3D6UrwZafk@j6zoky^?BoR4*|$ugVtcYtSI2xq@7MQ2A$uUTt}g+6yY+fjbhSRl)Tk zc)==1F*42QkWK?{9_djisSrbDWd&1fSM1I_Hl=;+_&~x4z$>MVNaN8fKUo;L5+<#3 z(p_nlayK1`r#zRBsRwolPD3Ca-7T$-0lz^Bii7WJ5R3bg%8f%T4N2~h3M<)pNO#4= z*|*z^2b5hi$pX`<~N87o2>=aRq#~LoBoB6}6WPz|Rx^P)@{_h2Ym*0?GlM&pL zOc!RP^CvEd6VFG~grjW$_sadJ2XXHZXIcEYO=1)-7wIh#x{2qI)8O}>Ug!B<|HbI2xu;eN12juy{ mb zdtBt0>uYqHHsOJ;bU67X;X^E!?@szT91-Dvcl@}55mhIr#=}pD_tN&sa$(gpRFSf{ zu|OE(Kfpzg*Bd`vq81Q^UB!lK@`yV}=!x~dq;d>&vLXpX`+cb@HN9g2df;c>j*m`4 zFKGc33+b`Oa_`xy@MMiMh*U$BroM>sg3qtH`CqHOT zd8Gx+0BjC6)SniPsR1JdyTe%=*kxnU%P7gc|lU+uH?Sh)lMCa)x+0(1Oqvyt{JIz!^9<4>`?4_8cO=7^~Ot2ViGt z000?MWaXVqLm<`9SIk08D6JVT>*c?&(IrZtm-7GwU|}S324E(<=Y`{idj}pMr{pjF zCci@2EA{cCN&)lg_4n~{@H@fPCNgni0}1;702#?F$<9UZK7mKq2Y$7IE}g>h-$>WP z-~YvMEAM;SWQ+K&!Qfq*a1DW_@Jkl^305YnDKjBq_I>>fI&JR$q_wswyZEM?y47oX zghZD<8ZfT?`nLWbpgt>^(z&_PYDm{{YDn6F|3X|MN11X;cOOOZ}FFXetb3SIO%-RntJ@#xsE*l@kd&shclx)<6b#Rm4 zn^+HJ!{evLzm?)~((-aKRaI!BY>|Zb@3BlB!Yswu4Zem8=_U!_yo(pPz(;Ass{CMy%@*exFHEdh%k^?DrQktr7;7p%SD{+Vhy* zCI$z^dF%dX!^_Jt$w9V=;d)Y!T6P9X8Oo{w%Q} z42$5fs?=;5en8TuRP6+OL4FrH+>8xJtFo zn2PN_4LEjDx%@Pmq|#8`GXEtLrBNMO9my7)o>VL7Mh;7}^{+6_($-wb?Dd0^oT1=z^aOdCDvQ6r3=?W})Q&0m!^1Z(CAX;79C+#wpDUagD^9 zTOQS@o-MxmO8~PbSZFqu>}{cIcI#EYh8&}*E|PTg*?>}VX`M zb{dSmD<074tlh-;Fi6*$a(0~Fi0YW*GsroiurSTn1X5}G%GUj0{E*!@zyfhlcAxLg)6R$BmuSVW<}Ogx&`+T)6c)XaH%NWuiN8= zv}l53y9-?xO%t)*O_ffgxxo(=7hjJ!mV;=t)#6Z_KnZjTW=Tq_2vQ1t?f8Ylg;7xj)&61Lc+##@&nzYe2C$lCgESITDH2BZ1p)Y275h^Z-GbG~uVh~ZL9QRj?94hcR`*Mx-A zJ2trw`ubhwN_;?io%9$l3B{cnz21EH{Rc)`#()KRw8R{ET!*o_h|t5#_-+1TN( zkx+LuPLP8UWl(ZK*KZ-jy(vbrK0*dS7^?sul;mEJ=d*1Ww-Rpr_-fJml6|sPt9I3| zzW4|JbQU{%V+%a$eYo(ARw{|Brj`gCPsSJzw*-E39_iXySO+COT`A8<)B!2OMmRgJ z$BTNZkoEu>UP&b#99Rlc>9hWq(joOk4?L1n>c!)8D$0Q}cc-b@3W zWbINxmq~*68Hpe{+&sh32ovloHxqrMeMt={$qx5^ntvU&d$gg#QN>!d-C#1?N*!v5 zr<_%K63sm=HgV5#Zuo8~FV&RCS`71F;4Hs&aVNdPnqm!q9UZ&Wth!7S4h%98j?u~L zOcDDoOojWfO}5AisK+b3PN8L+f`@Q2_v-u~VD-uw6E*y zjubhZAJcz(e%RU-x2{siuPci7uUP>~Ruan!E^5OXl$DTPr*=_N<5A^QH_U}&2>8hM zVhYl50`8?m3BkJMI#Iz(dEms%jy6P=!S7lcwfUt5@PPK>Po0z5G*SJ}kzCX2sGd6s zsISfuE%RX^NnW{;g6sSSenU=Vd{|gS`scDwNOut`KE~Rm^_uxH0=VlP`6%>FqCJ$U zM7FI5-C#dqm;nzDmGW|}`v-qCCh<2YHmOarVe!@ou0bN`xMuCoSQT~x=E=Ye>WRQ1 zOsR?N_?Cc^8y_KH{9HaYufj+RE#D$t1kbo%9LKDbdl=p)4`H3WBKXw@2~PUbmGzTN z2Of^myCQRk{o>FJiiGO&X73an{UO3VWQ|6%z zgg}SAN_8w)@9uV6gS``3+1{u9k%Pu+apc2?yMn8<63SL2YX8z*l(gyUf&bg`5X`?T zgOIUqm6Z&(_Fx!W`z^5( zc7zt%IjGncx__a-?7^N4DfQob(?`um#|Gj<>%$8z8$f(SgTEgA*m8}@>dOi2;6ntmARq4jas2fwh=_% zeJ@ac{dQa(lTLan{mt=k@E)t)io5h%LB%NBuVefZC5`D5KG_3b$Rf1})=#yJY))O-qQW1>SBNAZEu{o_>P z#}C@2h6FO0&DrA;DCWO}>(ipiNbhPVu%C0+Rb_HQr94DTU%OM zk0xs#uQ>f@B17{Nh7j%iBNCo&v8Uzwgr_+h-M>A5PvhUHJ(sedbNog)bJUlPbt~pr z#V#0-Lr55yNVWPh6TN1G`Q-KX=qx%xE1)ane9CQ}J|*+Q2$;EPLLS6Yd9q;p@xJcD zI8EO>v}~4V|EVY_gu>gw%+HO943E5IvV;sd^Ce*hG31YTxNf2di?~;x#fc!K!go#DN;&%}?JhV|=XAIRnHj-a&JE|9#a|*~1S#t6yhJOB7M@VJU*Ir&dj3 z$7#9Fynb{s>z((%*81P0Gktfx_EH@R(5}UTy($>|W*5?kS@rQFg_6UvovMe8~V~&RNQqcQ_h{I>7*Cvj`)koy+9Um^N z0DS!~*k9bM*YOM45k@_|VvOU#KdK$ z-&~DL&S3fK+CRxo)r^cHy8&vFAGc)voczb<#XRuo_1*FX(U9?1qXl1I^y`>RKgS)- z>;a29{{vWTStz|YKXP~aommk7{Y=l{AmSnC0!wj|RZISxo056 z*}v^K_vpa*)7{%eaLBuXwJqPOQ=ek*SG6#7&iC(WV&V?h%Se%J01wzLQ1sVO?A`F+ zZw-28iWZGvrcT|NxCrN9Z()fb|SpPL2_ z|D1LP1s?~dbev$~w6Pq!>`9(eJhDnK?eK=W{k>~SO)&m3I8;C*v0wDa1(_u0Q3-l$Q!ir@zm z3^B3pa^SqM3qC`CsA$*>FjO|I|NKmI2K$?{3myWm&ceH#QHDS!gS9ziZt~@+Zf-sQ zV%Y4ON1~%FQr`F+>F^Pp)AWe8x2K(^@}sswwMo332+60CdHK|2LXk5)2?8#Nd^t5+h#o z&7)Lv!7>D^(QUb@OzH0*E*!m7&{++snFYdy(?5+;25^IMnS2G|JoP$bl0AXzPAxQ* zmB#~WO>pPMyELIJIHsA+z?JQ@Q%<&m(%Z@ZlD1e#k}1b@!8V$~7+|i^TAcA3q~gDt zrAdU*v4|ndwlVE$`s9kU_+A~sbw4ykCMZLY&E%->jMmj#V$w5{z7w;9{V1t~;0+l9 z^_Hl1o_y4WBe=>rP|AbU>fCqi-1bUcGP3F4hW9+*Jc#Ft*kzpbRFwlDx?F*B8=(1c`J~BsD;fhKIrCKyOP8Cqf?>3sDh7N z=6d-su%zwfd(A1%QfgZOaSFPg4+Hkt9ra!Q4Mu%0E3vIGvL8_(IMnFd!z`#N`Q7NB z*Z%;8fQ-9k#txQf5jg$6Eop4FtSm(GAa+YV~z=H*Ke#Ykkr~s9pYtFe{!h z$d|_TT-D5cBFO8*PbkMpg4YjUf@sHl{%9E0oWlm{h?k9wuEg z(it~${0TN61B0+XFlf>x$e$FZIB}i#NxS+!>qC5SA=(MzaFdCu+py$@@rILT1>BuC zAt-Rj*bj8Y^D-uwODtjDOhc`F4uc&Rhz${hYafSdF_06D#MBehEJO^p>If7WT_CVd zip%-(FB*^Cew$1K$4OQ`RsU{{=8HQdm)(x~=ZMEU#jHy?OnMV_DV;Hpzzb?*UxmJ} z;{|#d%dKeqrb78REEF?AFiWTkfONQOAs+rdw>8dFOP4Ig6mE&hqvRQ)C7o>>08l{S+XrV0bsLd?@(vq?sVNz|b zK*7TxP~JahW8gz1a9+lkm1@?eTUGdFe|D>w!aVqB=BF0ot|8!3SB|9uEz8LDThg&gSAY* zcJwMV3s~}m%+KPL`qdEI-NK35GiW5D&a_cx4IG)TtQa;htTw$2xUfU_6{oSdg(OU< z{|2;YFzw0ehm$1;X^rGb*5DD-=Oda$_0gG}>OzcNpn`XHAs|u0Mxg@Ws$qA$;B-1% zX$tZJ9Clv|1cyY7x9cP)gChX!Hq zY2y;RErWeu)V2yX4+?r-Hm3okrrQ0b+B_S0pzz@Vq6rifwDv||D6SKw7~Ti1YYZsx zbLe0p_=&6ycOgMntNL~lN%Rom6_PBa$FoXj2=!IjYeq`(en7zmmr5kY%2g^A&8;%C zVmRF>mnRquowjn~O5lE|=5s7z*IXBdl&WTU20ZJ&yAEPxvC^dz6>ya2jv*C&EQc=! z7}GEC^IFfCIS@iz`Lt_e`tEpCqSOZXx+Q#cUG=|nS95VR-WT__mgr1mlWwV_-qlva zVQIBYpe`orrQ@^VV)U@Y#g&ev8v*=lH>=MY?f&CikOp--PJQEC62LZ93Y=D|b6M29&v}q?m5fU%G4%G%lUq&R(W`?G>aaf?E$WAO3u zsuwnf{wUNc%F3gyo!t$vvzQXvEQk)wK%8S#K9@T9xd6#x5*m?<u7 zorIEZV}?(C!YEz7a;1Qj?uo5OkAG5WN0kpYr?USynZuw3#ZT+%}F?;!2i=$H5Gm?f&<-$ zhgp$y_it%|f&Ks>o@7$`w(4Tjw8ARAP>1U=xQWh5sMj9Nse?C1=~9$W4*BHxbtR;V2oRb2<&|IGj+3V4aAFI zN)XJvO8M<)$8Bl-g=OF$dwph~8U|0i2mJQ+$92~~T*B)3B}q2iD>oE(X7zs6Ra-A7 zDevGscK#NEBj#uPZ+E?N1sW7-dhKOEM8Il9!*%VZ`1>gkuW5Vb?*y|43!ANf2K=oZ zJbY0`)s<7x!AF;G9X0iY2tGg(Ma9JJA13j6pe9XJ%@5s zpI4VTA0_a+GKJrxf95prpkb%KLK(|Au#j1S(6OyX$#KJt*@Tu)e-=LqJtnckRz4~Z zL0%TQ*@GiR5hYJe>SMBH=}3jzBkGWbV2GFySIL&v{?231cpjz7?C-`7*L$eLmxfdb%k{bAi(vVN&4 zNchQ%6bW|-h|}$0v)Ps6#M{PMoaxj8SnE;nD<0xR=kKxcxB;)g8H1))hmEuh$cE)R zaccNrsy3U82$){&z=C%@sytYco%k=B8{CIw4*jYcX%C2&Xz-xVRX{=$O@0s}9eZUl z&GX|zNL*}x_1W^cdpXJZTw(mnwqrm;6sLlEC+7#ry@XxanF8agGbpvn8yy=LU1&AZ z<+@{33>A)L*Wi^z+f8I`mHQfCyEvCnz@EetUTeEuKpE4?|F0RnYmq%HaqHaZXQD+s z=;PidHMpB2uH{)x6pIR}qp|NME=Xpx*)0judm7XN!l?2)7hvwZE31beEgT4421qvmf zNa^44=tvcXGhrV*)Y5CyHK@g-X!nn({`sH7nO{#dBVqwb(t{O6;qM8*c70WiN68G` z$1SM~yr8;zpxItTx9qdmLSUoaxZL93Y7DLIwLZ|>O77pPpmic)Rci zh98wMYCf_PL?82=w~H%Epi|tg^(EU}ZDD3$QRpou{#O4?arj( zIEI@(;wx<4%5)&6PoG5QLxZx@a7h{`1@fNFeCBTNUUNR@V(E~w50=1@X-L_{d0?a~ zm27ydFKjd8ac?^nE*}$wQ+Gc4?7B$!5(a0;4^AONsCY)V%0@L_;TU6S+C9@v9va!S zh`x6A$M!IeCZw))njO%h@P3)Dgw8B1rTmEXfcOl>R8RFkp5lDrXQg2D+-GC;q05~lm!MxI*<@)v zO0kwTNNLh&$N^-;Pv5RmUo*FVWP2n?;*g7~=Gg{S?C3NaQLv@a(uNzeHo8TO#X%3le#zn7$4+o9ous-n+= z2UFNFp25-RzoSTL!O`fnY0|a75y(@x^V13IA3b)hZH$ooTpj8}cv60%E2Zm1DdTMxgAse$YCQuTU-!nyvKpKsvLJE90?a$9C9Lwq}u8u@0n9x>GOo?k6rWcX| zoM8^M8?2hm;7wP)-aU}??qzL4^N<-UG<6k(uuX96mo0NaE^w5(5rPOhAdkgXkgWLj zTWoxgHW3R}HUpD*C1-QEe{Q@Vb?Z_rM&K@)mV9xu-I9u;3wtIj1B$;yA*^QTLoMR3 z9N6#Qnz~XNABJfX%z^<*C4=8n^2}PE7lQLrkR!X+%k;|9*GFQvv~ zX_Zzl{KxXc*W(E(l(wB_rRJQ8baNe)IqPDO;XEtTJPKV}%-C%}uoktuMp|_z&~koN z2|`RCxhSA8lG-NC`N|&_hmiX|G-uXK%f`S6GWQgG3 zm+hjSCDngE8D7gZJ(q=*=w(Fp0us(@p-q{bJ_TA?zekBZ(R7@fxx-f%r3Pz*

)vnNjuTv|$@jG;J!18kV$)3smLkil zWXEBRXS@G#U-%RemIt)-2E|0sXGvz09dpN`@+QoBaM&sJ;^B7m9M!COMSzWqtDL0B zuI#93g}S$srdS=Fet<)s{2Y+AJi;M8pc)h&@;v3qY`o~nt{dv7;O!`;02FS0Pz$9o z*MI)qyU8m|#b%+&$v%(!?oY&~_;_pcWS-iSD+Z0F0Xn*3(9lEP=2_>$~^qa(haVt6x)@OhWvPlMk z1}RrnXX<-*`DNp}RW;sQIVbkART#f+?@d2o{xW)YzlHYZgReZVh0>Sa*jyk`8R01~ zsO?G~i>1`tnW>_^t!ryjdseLt-;F+6<`p1TP9KfF(ssv_9vt6{r$}e6%2Y9aKliHO zbywyM)x$@xvxwK_%E!z>(|NMFSC~XY{@wqbUXJnSWNroG$m=y<;XMNbDBw z2P?{{#{{Zl%>toS2H4(O8hp&@a)i)@vN1*Iuuoaa%a3p!i~!6u^c*d$m&CL2&9eQ? z9{#45A?D{U4QY);>J25?(Haq7zpvL`C_ zX0E*@+L`SFXShiZN=K2$YipeM%&_vzxIkka=Q z00?A)^{!OrSR6$&$X|t8jIf7XVFz2DSCLxS72{=WPA`8x?QSXv@`wo#5NOk@^z-9W z$ecXlp2Nr~w#1>+FhEK8Y!0N>!Yxm~CuS zVrPUcQ>|fqGq9@(gzBYJg(SK)sYX`AcCB3Oo#P4uW*L=6?XpjNyL80f`qu+;7f_6d^nH)e&fC&up(1{a2zCNj|4M%`fG`6BYdlF>%z&Vsx}7^OSRHddZpO`7p;@`|dYk&zKa||8s7wb#-D>)~V9`m; zx*Q?VE|+Hn_sAEW=V%D5n%W5IZ!}dI@3q`33r(d@|5!~ex6I9!mw75p6NvZ}(G_5) z^!u%0&o3h?#9xafvmj=}&&OgS(A96oQ*_{3@cM58)>W!Mt8*;{4?ta&mKcd*CfUjr zRVF%SqcTq3sFS~ad%K%ZM~`}9_$FvrsY5N^+A*$LJgm(%UXFT+1SFgF zPyqt;n=+w6+p=$GI69Ulm#*ZAC)oSj+Ex+Qi&Mu6c_0<%-*1>jDe?JQDW#ar3Vk5X_$R_Aw=3>)z z4|Fb_!>0+^tV;Me==h{nPs3GHJR+Ahvah+74Gd4z1sto!&BadKqp|;PFJj>HOfmJ? zUD8xPn0**Em{T>!Nzel2?BUE>&xUXGtgfagHirNch~o^lszz=`?+xQD88CgMTaZh2 z7r9EFMT`X+hB5~R5O`X%sqs14MjvjVaFAo4om+X>fhOa3)^W*6{xPn=B@E> zC+lp|c&cy(>>(7#%<21vIamaS(|$c*R}UK~|Dt>uYE{v6encwY&O}qmN9j$Ra$&Y@kR8=8ExdN;w1l=JBg>W&$ALal2ZYb84` zZL&^`==GAvSH zpuRuW^X)SrdJ-WI6XjP%Rv)phPkshI@F43R*X#~WUX+1dE!%IvhQM5wijl%g1)1EQ##g)5fc{KPvc8!+2 zJlHkm6N|QW^odQ=TF@4bvm>lZ{Md2It-UyI_biUpswP!9ii-iPrCymaq_SAsZLYD~ zgP_%=g2`oPYs28Qw2IjoJy?SOJVh&ZiO4=Q5E^$HD`{U+fbv-w3fK|q%KBJ8hbr0` zj=ZeEicA0D@t=t3zroIX1{`Mq{KIJWgeoieZ=Bq-a0Hdscj50Qnav4W1~`?9v}(yH zZg8g6-9rzJAB$u9Lq!YTYq3lHl?l#*@ntKceugNUb0NSZrY|fcVWBq#&V*??BtMql_Hesb&;_ytz+1BtywD;+eP~uTOGe1y z6L>786~ZPa3Bxo9qEfGL>E`hjWN4I%f{}}ecT6w2XqHCT_5CYXX7dp_K4x_yVU}kg zpt(kNg|G~LOVQQ!4aN4l9h1i?Gc;2YEYl?3Qg}@iY5Qggl&rxp8U`KGjcYZQEG`Vs zm{hERkwPd}nTTuf-S`zfeZ_)pxT@@`80G#ld%Uv~rtSF6v#=ZK9ouSZkIvbIakdHi z6u+Q90@hKOpT$VG`!DKF zOPs@M^3zzcAZCL&0Ic8*Stdb)WOmx7}JT(9?hYxf7I&s-proEQ2zTPjte|R*7AHOSlpX{GxTdYu4dh&sd z*GgM^VIazNq&}n|8Y!XKC|Mgf@AcpvIHR12&>S}YzJw<-Ys-Gd-sWzEz;W#nw5&>R z2D`)xwEE~j=kX1r4nxT#zFn#O2R3Q9%XZz?`QqYeC)RXE#|Mq*!n8b?RWTV)*0`OV zReIKerZwtB*G91?vH(#>j;C&09kZN91i--^93ntL61gPchT}Xf0Dh7^jB5@lNXH+Di?c(>Q1ls|Ti0vGLcLXsbX*7hT4YYC_LzITY{RU=8+ zQ0B=0(wwCgM&S3RCh9kB$ERVEU*2{_r|E{+4==QVNm*YTeHm&e_Vhz6K5Su%Ukp1P zRle`ELmgVeg)waczGwVO&JM+L4IE`sE}#$CeQ` zeHhQuOH#`>Llv$|StT|h!ULQyrVo%Sgp-;Pgrn55{1a&eP3UkVYtH|=7h80X*b zqcw7}X>8fYmFF^m*2ypx2BI}UP%Qq7Is1hN=qUJxoKY*#s@NkPOK+MPTS6Gy#D$eH zKRbDIMlH!WZKxQ}*CuDDBR5$g9)f6aX^q;Wyz@2?xzgdQP0p*g2h`LXPyYj1#_HOd9I83kO4XAQKRuSLoS+Gi162F-U49KXhnu zxXQ(-9L!RUt`l}P&Tnqwz;)lsSj~kZ=e|GaX(d0kK@6bsYqDqN?^maXUX-C6V-9xz zUa_XEs*KBSu~&>Q>m4u}`IzT45Vf(<12QQfWu!#r?_{hDIS_N?z?9&ee+agUsj5w{ zOrN@mWzG5RN~v(L^Eib-xbmxmimReZlEx3!sce%>2f1&Gj=L!p z)rCgBm><;!6pUa0HlcE|7WHG;;GbbyS=lrTyy95$y^^w5CbN5I8=UbooCZ{@`CehJ zKyP0O!jRBqx3QJB(X+Vy&r_Gfg*~dMA4ALCmul0>z5ik#=^*uOD1s_uf8`l~3W#_o zl*biw>8D!a#;id`F_qx+Azy)OK7+-MQ=U4nW^gBL=_rCC-%Wr}lRVRL#0SaQ{q3vV5p)~a30KE;nx@fjvNy8U4SvGt zULF(J+FtRNPL#)7!*3Ek^jvw^Ge8;f^%`-ox?sp5smEHdGGcsv5@{xDFpy)vqJ z%#5NFrF=So`b)8?V5=fQ?fQh)TVQqp`>>Y_Qw;g1q_~5&A-2j5QG|*l$Hu{46qm;C zCLC{S)XLwoh0zwpp7HK2w!1G}qYFme@x|Q5+`gtj*++p9f_Ay$V%cZ%u<_{GXilQ< zLmIH3tR`w0jLe4YMfvNm(^0g@2ZGq9`dkjFad+lpeGFkqZrHBM{uTC;qiW^2|(kP&!%G%V^^A0i+~u z!P{#k^eH#B(t)HiU;Jk=I!Ex|Pv6gO(hMFq+TS;qja&BKoqX#*`de8NFv9kaoO! zfBqI73gGx=f~QZ3W|@=!vu%|`dbd3v5-VO!v8^E9nI*?$jHfv1>PL?L=yj6ezclB3!j=k4G%9P^ef4 zjuK^`T~{9K7O&8i+ke_6SXm0KZVXZ=%)TI}q{;9_yIo!R?0J32I5o4-Xh)TSPy-tH z^1g2eRy!I$9eCHR9tDT>tZj$9N!HG*uZDbz9c|nTf+%I<-ys)YeA2|MD4J6*5e$yR4 z4#*B+6bnL6pTf~wI3XR+Jqi_s%?kMWW-Qfo{1yb;whRL2Fcgdf~7FQU&>pzM+CWi-#^r$VbUe|2SbfvN$0!wfr@VwS04U z2Ag6lA`;`3{wJ{_aS0oJ2>}j-l@H?zD5`8(U|T2GgRdh{V{Z3ow}=sb{ozGUZ;UM} za`J(P_XEYAX!rzMogr+kTaYGJC8kpbcUyP7zE|#XNO7_4f?q~LK#x?6k7g($oiPSdLn3#NELG%W9NbHt&W=Y~&II@*R{ygIrr9rYf3m^AFuU z;ccmY(-QQA z*W^Gbj&IlQx=8C1c^ZcKsO{+;$2Sk7Mbqo;f@5rgQ-)w@wM~4VB#KhpvLnxqVkq?> zb_&Rwu0+)W%_Wb(yB(%P=$$Zg#npkD57=IzEy&r4vBfJ>)Z)(eNdLv7U%lCm zyLVVVW?fCqY*MOpz(-bOHT(7qT>r+)Jvwn^rk1@4WDXEDHiTfcGW4|qX`cpIB3Z8} zoVmizII};x{PgGoHnuwb@Yd_<9>(*PTYAAfOsWJL2t-*I=cseVEZjhvsT}`|0Ys~? z%-$ZHTuoNFL#(f?sS9SYFR<;G61B}`pvtE1+Ig_2KIsclOwj$lkat|C^|{0>IbIg8 zTlggSVGH#>MZA;dk5e7%u?{}R5J`RM@vrLY8gR%((dJIbSnM6Gm}wWzC5NAvp0+xN zPe(-5fP#!djMCzS8LxPeP@7^Z6r`%x=ToZ5{_3v$=y%ucCIB8AwSpU+<}qxB`u*b# z7>-|cBmn#vzHAg;&ld@kV-_;$FDR3m$bQIR5REq%oVuH6gxw9hWq^6(0`__r7GWg? z!PJ{;13-a017%*Qwb1@Gs7bNOwkD1hj2C0(gQNyrTSJ&*V5HTzZSBmo=`b09SU=ZF$33esVnqHgVIt8{^%`? z(p&7RD3;wj>WGsEXg^O+m zG6|@2NcBeZ1>$ThL2sQyJ)QX90%{s}ZFMxG!NvVNyuh@H(tj!427}!rNDfxnW%NB_ zsTF|BD6}bC-~JmXy-_vrc(0j*d83=7S(es1?ppV6d9qgu6}ax+P4av|l-H0F5^Ql9 zCJg+V47>c~_4t9pCSDrrJ9(84SgvA(-RuWOfBZ8*vGVXSf;6@)h8|}t_cGF@$Wez* zPnoSyBl>{8_#MR@;bR6@*M5>)?egJ%Y*|fot>fYT6n74M4NzND`(~GtV}=^g*cf~7 z%Yl!Xs?WTpThqI^Sv)|YWysrf7}M3;PNz)R#vr=38ZPNKTiYglfqCJvoVG zsH^l7J;5_JGgiR{A=WoF#~*CiOje4C=w7j4()Zf8w*K%q^=WPruvBKNk*O7l5~yEDOxRlL>gxFUrIDzh#T#EdPM zGTriW(-n<3Yr7F*@959f(h-Xt3z=kcol{7gCn;0CW-|0C>buiML*lya}6-#cMYuQ`@zMStPs0+=laH=md zDOS9pRtvKzg&+MlweV%DQa+!4T)4ms<%uPhALK_p2E@Dhbt`HnB|B@rSOo=B3rA1K~7iq;j>RM5OVme`|qCn-vDp-3^ z{+$c<;+SB3Z!fc=Cjgpz-95kkeU>__G1aXnfJ$yn|X@U?!8_8YBh-Sn*3>l%;v z+G$v%J<*+XJis+olUq>sa`zdC6X80n){K>4dL&%9cduCtxK>n1Aj}me2zp13J#GSP z$RdYGdy6kj{1k)(A(hMLiMJ;CRrLp~G~enKY2R66Zrt^cb`!que4y%kOHv;tBij8A_h&r**;g^PuFqVX^$Uc z*qL`|%(SOT#!NLwGY3-jgY7zgAdN3G{i>GhzqWw__g|I-1^kF$?Qo5Y_DBw5bc0)~ zp0#jSdO(gb0H%GXa^o6wMs!~o^<4)(CJ@3M|LBjN`6h5|GjJ+TuB@q%vnL#jz0%- zS_TSQ-LKx=tB8G>X7V#N)szVi@eF{b>fl_Yadtgvzx+M;0fw7j*^GQb>UBk6DqbNt zVNY+iw)}FMBaJH=*->)m&%QnsT73pgEq!Pf-(GA-4Qr<)0yKsu2XFqhpjwqURbU>vU=!*3%&%|9KO1EaVR6m;YhP`T(G z{I9d)?J*`yBNz@=0v`oWY&l}s~?)u+3MID6_ z)|x=LZwhXlIuF{Yf=bvMtgm17?I?|?lMB)yD+TwRmibsO#18eDZEDFURwYR@ZtJ_NVws9$1rXtsR^q|J~pxpt*7~{O#-}UNlXwuJ1yIem05|2KM zy{BGMR3ANpv)HK4QI5r1UZkL{ZgSU%sE9Hf=R1CPVv`OTLTBygn9y!k7lP5g5yh7{ zh*x8dRgd<;T%F zbDS%tfQ1M&5^{8z{e0cjF#uO8J!t}~ZCtg?zwsMWm`qMMUd$?1>nkkoewrd&oo5zva z0sWMUnfj%lhJUjLm^fLSVthPh;`DJL5^DYB7Q!lk_gar3&ximqpL#c5l+n!-3^4ue zA}VvVsWDL6JCKvB`!A0`enNWy;GfYiriX9~wgb4ojZQ9xwli5P^g@1QsAL73+;(4! z;FFm&H!|yuz4~-JtUvFp+`Q8DWd~l=u~rV;pvFfu#B^oo{#*yMlooVSFcCr`me3-wnyk0;NQjSvc}U|(cb(Xi&xi~o~<8>$v`)+aJpI_^p$m^C9P=f_Q~EwbP1lEvkc z6yokx>%(atw5V_i|5lgP%i=E7v+;NeHgZI)$%PYv=_=@(7qAF|#^6ZUHu{eA85A>#=ZF*@tMjsxA89i)Q`7efVp+FvX`UA)@dhy<&VV1}NNY>ZbOG zk(C!fQ_bo=dd_&2w*K>zOvT0;nYO}}!L&~{Rz4PN`Ssxyjh`KO37avHOaaSKZGf&zz_np3kJi?!^HXSJ{LhAQfG#_SAn6m#6lP~KB! zPCL{QF;Xo5h}zipySeMV8(~$uS?`gn%T&lko3Q8?vqURv+FQol`j#K5)8H{s@K0#$ z_(bmVbEQ|N?UWsSS_r-rX4F%VCre8`LgRtUc4?ey!_k{yKd8QB_Y;lep-+#jgkw+C z%ew1!unBO+W7VF;hPN==SjzMuq?Wy{Zz6MS<)reSPaCzTnq9<>R9Fd~pNEMk$iv zG%{ZN9B+!r3-I6B3(p_91mB)TegB$6#6O`jOU{umC5)L(_*+ud5*Lf@2Rl>T6g`CV z6~h$?vKD%%#!8l?NUM^lYY-@pNVr>zi#J1E7tf3BAYtZ*#oPMew9OEn?|d>}R;p%I zGb?|&x?LcEAk>=JI%#e7TN|yKN}TGZ!VjbTOUTcF(^sEAT1K?oZh}Y2&hD>I1C0%Z z)OzuL^rgwBL^trEl9D7c$`;j`MZb|!3~fP;9$Y2bD4;a=UV{i(JiOG>VNVkW8)3r6 z>g}<3tOQBkA2%%wsDoiI4W!rkm+ZQFswmP^et*HVoWV~4*W!F}2}@`-TVbRrFr7x0 z=t-etvlmG`Ftrtx8bt=d5}S_i{HCj{l~}{=t>o!Vyl%GZ6L;|nZ<&7zc3k_er6Nx^ zL6uW&&gw&}mPa@A-p`5s=g;B$il~_(>_4n7xRQ@K@hzQ;Tfs@mhVl1?(Vhn;Qw%07 zb}4oaf~=eZDGL#%1NHrn5}u$Q^u!P+dBazPTkG$P3H|uaGkxkd{zQ9=z9|*^*aU%v zxap3CC@XcYD@J++)bCuvmpNQX@T}YI;R5mU>&{A{jsEBiwv{`bj8yoQVU#0>27kp9 z1X#F_bu|z@bUpJ0;iLgddY&em7>(-Yj83|ynm09hg2~=^maXVAU`o#UbxZtDdY(=E z6WCRxxU%1fG8m;vQeRJg6rtyA4)sHL!72T|Z@jMFKH6U05H`!6>`sf-1dsFS{S4&ay-QAI&FO4#P~jqq=04A_H|Ii(k-frkB#kXKEZbT7kTbcb@G?@8ac=* zs-s^Nhw~{V+j}~}IlgK_%MKY;@f)Z4xN%+sz#lmpx}F*0eYN`F3N!`Fky0jew-^I9 zwkq4)_zq%tsax&^N=#GI3miM%Cd{icH(vzvpa45pPudDj@hd)#BLM%ZcQGbX@m`c|2nx{zE=%5Q!=^A2#zi*`tl5^A45Gn>ds8S&1;;!$k3)_PJ^RxFM8vS;zB_#D+p z6(@zrLAvDNZ_f2A8am+vQDwc(x%NnZy8F6`Q6yf7__k)=npS97K%wp=g!fBTYL#N% z!xpjwgvdlx>*GJ@-&-a;VWNioSw04>Eu(RcIE*SiyQR-Qq_}M@%Oh3ivsqV6{E9rF z8Dny*pFE&UUJgDkRpUsZ%Z%et;eR|VY_`>TRK5YwR+Nu!VoATGPornQNK0Whgm14z zhN$`IDV575{KjNfUh#;)q9SP0DNf5=YAGt;B;TO9eHu|Ch&}5WrFyJt z7V->)$I>tGCCRe2(6^w=1X-f5R$1x$bM!8*xs-K%Bu#Cj_$2OI*-&Ec+vSQybZfNG zu(#iSX&DnOz;AK?QLJKB#FAj|y_VMg-6_p>)k65AW zp|6frF#7(VUSayJ#-L;zly)CJUS?rvwZuB~p;!Ki6?AsA#TA9_-*kirY~ecUHhZA>uiKIW4%YM8Q5QLmUh1G6CN zV|NLBsCW-VMaV+e4&AJ|^R$;@DJex?HW$1w?p;|0j>}8Fot9?OLz`Hq%R?5Us&jNu zO3IGe-Fsp1kylSW?v&8CqnS%fAQc((U&#ZF2^%k?OIAnyf(k@d8% z^>WQdOl(Za(;a@hGK=@=9|f)~4`Rir#mM3J&So+o0XyYj)Md9nhb_1DEI5)yZUR-?RZPck~QlVaJ?Z))6DOt}x$x&`>$O~uXrWBd zr&!TIU0tO;VRgNevZv^6oH^#4POGZNw^ns%IzOp4JlcgRkPVuOS$ES_*;s7nuvmqA z=3m|141?_J+KHlI?+4glJ04&iHxMCoS7@8p;N+>beEjVMt41tGM-g<}G*0*G$W@Gx z8i*kd%?~+$RM+4*^jps=)k|<{6O|Wr?HiXU=|40+mj{mPwps`*)HIEiqpJGEj9NS2 z3_C9_8_yR3u}Ljaxk3IsgU{mOqvvO$HtvFL4ajK}uMVdRVHpBsQ%0+?45PaKVp1Om z7xx{*bYik$R}4gryhMH?4(&%n9dW5h3|>~8)r*ynF?J{X<^7M#lO#(3T<&72p6eb* zC`aX7Q#&=C!tzz~vRU^MhOerKlP>5hT?Tbv$7(m1M!c8yichf*s8Dj&Wx-{lR9YN| z{d?gg^jru6j_9P3dAv&tiMUACS`mjaM(-g_5cX1#Ao%5i@n>aSY1vFCk%r9Af;knb zzh1fnZlAYn>;6fO)&(}2Ypad#inpNZeCTEQ(3RUI{)7JbEQbWA-eUxTQq5?D9A`Fq z5~_qy19gwF_T5VBM7;ZgcC#!o~y1EdT zQS}_ulv@Przi@E_5t`^$a67HSgqs8pGPkmV-B*V`0<0c1Scq5qaLrGoDXsQ{42_6P z(ImI)H45JZ;(V)&BF0awn1$S$xz{603yp7tpho$-do9To7QdyiyWKQlA70R{q6yZL zeqY;^&#dvgBe)DOs;4`J(azWY>2omh!RFmrgidX#aTpSXPAlC~adA&K zy3&^Lko8^X=&S0Xl*>>pzp9Sk_g{on+U&-TK73BLAFZ(Y@`Zvmc{Ic6*oi}C#-!~j z{FRFJglW+f$DDP*f7>leW=9@H*eP^yMl|A?O_&%!tGM@yJBbSQm?L`)}H zth5*LNFUv6s4iwq@TUs9n5bUEa%l|ECoAS=&<$aluMJ_W#H?hLPoE@wGsfiZ{NjhY z3W3@emP?vuaO|PhS`70X*OZ;W_xB;_p4#H4Th2}G>5wBKL*%|QnzoKoqfX3}(H=;? zc0#*MUHr~>9v?&}frE!wQ;nm}q@csB9e3CA@xw_PuIc+_vMbYvyx-h}_f)jlie17B zv!`?dAryMN?1e&f1=F+IAAk`cr2A$nOf-#Oa%QypU~atDi3-w`#6X6a+HjBa`^J2w zThr7e2>H$Xi^Pc^#^cs4x=t?E!Ra1xs9}1!ZVkk36}bKHglPAWOknRnz&P7VN%&rb zDc!;5hs_8R6bpM}Baf!^7W(wnQ(G!MEFa+dY_st{z=U0jqeX35{#i1KBe!jFIhO_^ z1s=mGA64fgk?AHB%jt~^cC^!l<4N9`3JbTo7e12lYj@A- zi_S9UQ@I+3DI8VCrhTd!m5K07MO8H?A7p5z0}lU{Cw4xrmXaNZ0dMo_mUjz^R@5t) z80KkkyGnN_-}cZn4#+TWY4?-W)TYk(hr=^3))h@iR7N)V1P+}TV7%d~9;RXv$E`hJ z8%hft1uC2m6g92H!byVLa@h#NJ^Q2^XHho1XTkrn90rlEl>Oz!8AV3E4T~DR@Fu>k|zrc>H2p`vL`8biopGm$x8y z^%?HAY87jQIi!KdGhPw)4$av3{Hb)}B9vb$HxPAO)z0Il%3VDsT;8AWRvL6t*b3B+ zvA!9b69C6=4$;5vZgQ-*{RwI zN$&jovXfR4@9|Fust$$u^>%i+(AcTd#1^awC%GBtaeBVhra=*QSm|^H=)VWBGD0jZ zp=u@`y2Vu(!-GGs4&KjU(oI9{yLhm#-D)phlQJmi!W$@5MZkK zYUlhPKs#9+x3?D#gd$^D%`F}!7+$J$J@hbqpp!x49yVCY^VeZG7pH&Wm4q!BGg-mX1h;DaIkhD_L5o#j1WOxqprbo|j_~i}O_JfI_otC%?1ec~ zBMx(Bne3LTg}#72z*u^A;y)I0AyD1e><34ftMV#3s|y3>m=t{r3ApO2o@}%#AYjNjg4twD0BH^H^5D1Z7;u4EZ1lw~hE{E6hWF8cCKl|j= z#Y8^5@O&RSScpZWDbQiUZih|6)heTUu13J2al^_XMHpMkm=C95t^6qffSi)@oEroo za|$y|QuS5t(Bo=W{s(Z@G)@d%IO8C`SdAR7%)O8ie5eyvRS?6~1jjRUHTY1Q9o7_~ zWGiuWk1w4J!SN^GN%s^2b(lc52kJ$XNp)pxIO=nbc?Qyz6|HX4Hmul4g;j|l7oV44^>UjA5s=66hZN(sdvAKvJ9%WwJ}HwGBrs1jFjWgt&g?UYPG)fjHD z9(wL|8Y2?D{{fyCNWm9Pr`Sdf@XBMGw!zzwR3KF#*L*se%6IDu84}j)s>0M#fux%3 zCD6Zi7YndbZ5aJ~ERrNh0eYiFHQ650feQ)7G_%c5r;hVg^IfG+F!jAK;^>&yO$B zZtk%dRpqbT(>lTPYT^I8GFj4ZMn)Rv$)Y6m9F?5JVf;#MT_=ZxSzl-ekrHC?OIuS( zD?}falk02|jIT~uYGYnMVQ2Y^7@%6;pU3{^h@>{I{sRy`SIpcBR^hN9BU%9xYn>vV zfVYDTV>FdH9pH$JLJkki-)*dIGWop{X;J!HHO$!X8&<@Aqd4uHV~rIx66uv}&4p}? zL?XsEUP6GHGT-5^eXnz;;VcgiG0YdLIQIaDdHW0?P6}>@{A*lDR-bPR=8TqWBlAnQiL!E@voazJQhO z4S?R{$|X~fu>NC$82HOS{J+~?Twj@ij$@6I?@fytEHqN>GiPtpoo*VkNbbJLB@{Jl zzWFxYs|RjJXl$aXb&jJUdRU5Wy^xFyOc9lX8(}>G6J(PEX90hP4ox3ev~|8VC=IMl zE3DeU4Nm*$_A+j=%OJXO>RR8F z?#Sanx5+w$$6aGFyqKN{3^g4-IPPmw^Q(z-1YN4o>!7Waft&x?`qJaiQ1Ft+3ZtcT zptNs#Be9Ja)YM080dASE)}&i+RwVIDZ=pAbz?$fW(+0oYhNvl3mB0Ol$#J}^sYs+y1jMW#RFT4I|h`# ziXTthxL1$iv909$Y9--dQctbHY@n0Uv~j8eSEC##QV=f|!=t;Ojd|&O%`ya#X4F|aP&s6na5%W*kMEt1seOUi8AXQz-=i&)k0>{mcK7yB z)2R}Qa!bC$L1ziFyQ8^Ygz(qrYr{-85k`!JErGhPn;BGBi?><~F5^%*iI2=)eNKyBPN>#e8QowTp;{{6f;Y1-h$oQ;E|Eh7ruWL)|mR8;PMP%_AKb7 zz*Wc&AKEuV{ZWT`Qq_vX$ri5nX%#?AXnQ{p9|tqnmCGv@kI#^kmMteglN?SF+dTdO zE(|!Q1d?revtT@3vQqlq$cib;iwo>4tKdsdhd7@Gu7J(p-_>OprmX@?v(f7qiGoP@ zdCa-A8wD2|gNx;=FzYjqxQW?U%bAe;mD|&sHuqb;UPTja>*fxw_X~%q zV%J-__@aAKarSm(OBjv*TIV#EGx^L>%P1^8kfTLS`Z-ObNW00=-L>8WL;^lim^O2L7Qtt#On4!)eWk}aOK&|E>K<|P`G-J;9%;>CK#Rac6O4! z0NPiW-I*_Gt*DII4#Qv}fGU$ktpQm@43sJQ;z=hQ(_)2#n9NW%a2{SkfRXRD0?9vG zS8u1}baC*FI9zxL_kA!IoXj|`B0Xm@7PRrm=2>2X*kM*2kd1$KGrzW@eyR)wSVZwy zSXq#<%d+^m_>9i!Cg5l?k-%Cl5gX;_<=XOfO}T<{01k=5jw!j8f=DQfgLzD{0-*+a z8H?By-sG)h#h@6j2|2#`dDp_An|D3ghf&Piw6JKDAP5Hh66UfaR$?OtJi>oyW8WcgtGfaQWR?2H%+#h6|?r_}SSFoODOZ^%w& z9cozl5pw#!@~>t{t13i=^M9+KNR?hy!56~V21_g@4J5VcwvNh}XK34{4>4xSw5fS} zeN6S>fAuV(Q;)-h-)l!J>m}TdC+$a@ea!~C zin^}j8tPKn4HLBT^U^lR_Yfx*5**CG+1LYVu7Y`~U_u*YjuZGgC;sHBZx;z!$bPdy z!^{}+75pc$2FZbeeGEe@34~+#lyd`n8@Qm~g~w5Imn`Z7mb&7X+IsL-w!ao}K6eNm`#7Kjmuck2dnX zQ21br!c88H&EAq%&q4`ANBI;dr9voXQaE_Wr3x$`}ZV8za{NKq;k zVZ+Vywfz;k3PY3QTPHclubiP2q)uREmNhND@V-j8H$}yf9M1(St7+RRPyo5>LBuu! zOav8Y$JvQ-z2o2EfFNhSGg&yp^kB(k{4Oiv79x}m0tNE-q#iCIoI zgKsJEI;Nnpvp^d<)6&Hpc%F1orc8%|IbA`Sqqi;QrqM;q3|n7QY2?W`HmY(*F|W7v zqAH`tu4`GUkGpy^C2qx89C(b%Ry_{F$yVo>25#(?%mKUr6)q0i68FhL6wHX z{{eWpG0GJ6_tML)7)@N&eqVU*0zpQp8kfAUAgZ}_+scZiMMu9hNIuEaWglR8r z{Ln$T@PrNpX=~G_K?n59T&1(P46GB)sOs1|*&kCQu7xcC*{ERIA*y!Y$CY$llMgCGOWa zf0d4mu{Xx8qN9E#2biq%+A&4lOPt75KZy^Ju>Z=IdJuZhda%*?53tJ?`@yEAGTtvX za$KcQeu&kuAV-Kvo?TO9nFXVx&4ZE+?VdE#o0 zbB)$C(vIfdYV z_4=K@Di$+(Cy4v$}GoG z(of-rfwgXu!Gdn46Vx?BxZ5PVl~sO~v*35Q`|CSU0deiLj8}31ck6K#f&IR)sc@jJ zdy0V~MM_IGqkMjHf0HM*=j9MnQYuZ9c^*=#pC?juSo66banx&xM+W!WsDG5vEOf zU(wb}c{&LAvnQYidpPX6!_7f81gvXH5-h~N`>Qrav;)t0^G`T`3~E$;+sU-6Q0eP= zl0}lq#`y_)TcV6KFDAEBOl%+L?A=F{s02Y9 zzK38nX}{Q!oLLr9ICltOi0ApROZ3X zau{a?!R@6pA2?&CQ3k4dPOrQlb#!?#kf(u#55UtzHC2iT3gT|4#e9W#PC+N2$tOFM zS8JM05sq|vE2Ho@Y4HK5nL0|J)Vi7z3L*)2ch89b>C;1z@*wguy1%WIj~6=3TXZYP z{Y-(OesDn-9hVoe!Sma8=li!|B=_Hzn3dU%(l!$CL$g4di6LV4j#Vb)3nlX2O;B5R zXPbT|HA^x1iQGJfRcCJL(Bx)m27}$H3a(ZuU+bz%YWKA=U~~=EpDIH-os`p32KRnj z3F~#Az|qlR`A{cE{>VC$e&tu~du%ZKv2YqQ3$fyp{j{t_phL^zyAg!_`Gyo?cZTfo z6zgli&NESJT-PR;u(T$5h3wrvF%ON)@x8+6LMT+3=d(pidLyT@ks|{N;#py-cybRl zFS@@-i>>2x>VD?;0lq=| z03l<_9dsBd3zfHEc|leG0Rk;g6yx+)wMVe`i8KOt0x-g`ziW*#$E$u!kC-=o(M;gV z>gtJ0NDO?sf2)pH@F&YXS>b4a*uI@mkydckwr>jhol>(6Fkz^vB|c>$rs?!YKt%X_ z7{Tzf77bL;+#`zU#oK)z#If_&pW({s?WM!fOM=D3?d=&x3HI2Xpp+j??O^AO}Q_tmR8Yz|xod0f4L&tLhSgZzT|>d!|Ij)NXccAkSJ z>c;B*6t*u^1}jTTRo@;`4zg4lfZAW-X@WLe9CDoXX^^FOIk(3ho>$21lLMkG4VOJf zFw->;FvA?zWDTvKgNL6W@TxVXO^GWDUAJ*$*_uyjx!m<+?$&gR&J{*!Wc%($XLa)y&Q4Ak=htfBCXIWo3^0v>s2sbiLVJIv>A%zmPDJ@tIF z5ep?G2voW^>a10Sxq0mFmH+`5J;BOO^YM2CxZ^fUP@wzavZ^}HXg~~f8sBBB=pT>r z?6!sMoXewu=}U_kx|rsYT+LyaG%O-xlOG~N4lQE<+W{TE_2ZO=IZc=fh>)Al9b%DT z+KOWQ31?4#_~SIsHuBG$wXU(UDl{Xg1XysOPUkGqud zb!X>w{2IvxaLjiU(6XFRo-)^`Q~3u+!Wcg=P6UdzupFF^&V+*h+`tG^Q2htEN`*Zk zPmjd{yKF!L={^(WR+=#A)hRl4w8eaBpP^?R{qlKUq|_gcpLG0(HuU8h1!* zY69#EjxRTj2Z!(yC1Zsi&Gi>)Uff-; zG@n-ShK#vvHzyo$%29QB z774c1^{3?ur{Z3P{^zpC&=ihdTVLH=^Y+2B{F<3AXceFn%c^bwhA^5Ikh)0Lk!m!& z`%JtN?-5`recph>0H!ERrLguH3e4mEKT)n~Ol$Tp!_da;eClQ2mN>CIvdqj>MDf4E z{sTOH`y}oJq&59R+K z0Kxqi+6?scs(g|cT5r>U$NSie>lgk|JR7W^C0k`^1zDvl(*BIF)@WHhv1kwXjCRV| z&3$IpIl(b^2Gj2&MNbOZ7&I7rghi2NevwIog>{9IE;b+*_uB1SE_s zJ9Rf6Gz%fcLS(gHIYC@w{A^e^Q%U`FV?A`pw+gq=+O0Y>N?GfKS?Sg(l5t+Jy|%V( z00CWOaW}cc79_vR2~nu#n+3cCKNU9gmWtwn>QeV^{S&lO85E{G?WX0tdz8F(a0E4| zBI1SVrS{e>zQ;H2E8TAjuRvz&3}#GI7ggS#Ts9z!-o){pq&_H6CK+X*wJzFN*F4`<(@y7 z)X(%86Z)6KJwTpPO1;D(|>@3A;M70ivBbj zWSx#r584#Ptcqx&9LN+?@dhFaR;dXJX0~*(sxR6v{IA%4jj^6=5@0Jg_gLPtCR3irWo6r0>6uudQK}E z;Y{fT;zW;%3eHwQgajN3(pk9wWh37up^g9Ad_d1GRWnWBvWHS(9I72|z}j zXhd&E8ULDPtMVz1fRko&9xYP=;q&y5A!OQG;xN^%gA1>uQU@29Tc>}VVWGmlhX&MI zIjOU(V`jz<>cU>lQl$I7qPg3MQoOmW1p!9Nn92JF*#t+efyN}w-eSmrk1P&(uSw3x z6KII5_ZM^RbehWQrLL#T8n@`1u~yx7=6acx;T??a+YgT-ZQH6k!=6nhSqYd;Fa$_T z=^O1HIxGC@kw>OFzF_s!z44)`bEb4BdQxjEFW93VP&zvTrbPGO46)d8N+&V0@O!QhyqC>+l$x@Z^he{u zKquofw(6~-HXdybO&Qf6I2@`rlqO*|rYjqm zQ$TCKMifr>@ANo|#Rx|`aj`I~>gXp)=fLN6GAhxy2~~#S@I3;m{KvLKUC%38s8kq1 z#l8vz2aP4peS5>q4KLYXUS-ybZHTund~a0bLLO9S%oK>w#@;SUnPGmZ2V;6lHb&|B z+Q|VN&nl*vE01AdV5-!~S(FrYf*Xu&gGQHlF-LA`P+`7!<&sr3%4s7n0nH)bBFfvJ z;bjZv;KI4+o{R?=qnSCY2r0J<7=|MITC-UU#`;$kcUx1~Z| z-}ey8;$OVm>-&w3-DCnB&{mb+r{3~jAnrz?W?}4qPzw#5x zQl>w$_1x;^yIleSRE?XD@F4LQj7Hhpq8FKy%wu25rW9V`rvV$>ChOnL5MGkY(Di~x{? z1%1{S^1X)~)Zuv~Z{>@3?}Opp_S;_&I2LWvrv@E&e+>&$g{+q;oHe zRV`<|7*Fdeghf;X#iKOS$-{;Fj~m!zpkS@vVG8dofsgvPodBn3FNGNn_&I)POqm~_ zSFxBSbZFsGjhYtulxWVCCT^kOt6gYHV-DX>IKzhhx$lK`4F7NsZUWeKfWOPaF}S7z zjg3Jni>JtPS|-#7>2rvwl;eb2WxQ+N-9v){=jdsd23a#6wo42^OYS!OJQXe*aOSH4 z#TCD#lgLO-ZLS(HAmfW0olR<8SAF7XmHkg3!1T}i){CiPK2>7QA*UQkrNy__9+-vC z$xQ$!wJmMk;y9GMOK|wgA5=VyH96rS!_0g&M$g}rcgITS=0eH8z^v@frL{&{@63Dx zQr%w4F^|xtLqs1vk7HmL@BV?!c_D|u$14tYbk7o`P$4o6)6ySD!?aR7S{_Rzp0@|6 z?P>sw7%Ce^ZT<>9oAY1a>)e5#R;d_82=>P`cZmc2yI+NSK0aNR8K0V}TV+itU|8Y#jGn2=r)(8 z@U3_AJNOwi#KO?1hhc0KcUY76167%}&UZ`6?SBwK5^#e>F@WfA$;~xAoo%iz73vur zRzbsi(vCo;Af+qt`we28WR5AwUH*5@S+lsPCIAGO*pQQ5Y_bYNEhFZR6(E8DXDf?1 zjV_jZT1j14E_jA1vo6Zny@)n_x&Xw+EqBE=VEm$hzkVIT2+hQgHnQ`S6ba`1KLtl< ztXuo~bM*vNVm>}p)$rLUl%6M=o)2ALc@V&(6hjGi&B+xk+T7Njr_YD+U*g9Q(UH4f zCR5iSL~Z2uu#4qvUSP18SP(m-a(_1(6+6kbQ%E2PK$;WLSo?!UT=2AYsB17USdy|& zu^R?;yWzP9%)ir(DX%Y=XBf*FO4m*k83Gb|D#>v&V0@qGfJ^QsiTj6g18WVhRIYR+ zo918iV-AQ~P#Yc+c%nV$i6*H!gxd4HbWlF}N83-gxCne*&k4b8i%>IAGvGd3htbhM zm%|ztcX05fp;3}Ig8>I01tYlwWstwoGm`QZ0lG4IO6l;e3Q<-%A8Dm8-p0#T*#QP3e6?|XW!1=P z50%7DP-~|Fk9)3smBid}J$xJ|k*mTd@1zT|Cud!;^4cvPol;n+N-JE~!G~HDD#qB^ zK?+<>WwZi?5w{}(vg(v2OyA+me4DaVWpk7)6{M6CMj)BVB9EWYoF3hib$Wf7Ui&2H zmTx!(2v--RRSbl=hFgK zFCXy9_~)`O6!7(BL2mu~X|?%cP@#r-V`uMrrzep`))$#qp=Rt)%A`Q82EEK}K%HzO zE9FCHb^9>R@$TTymXvV&KHgt(n-;Z}s74-R;P2Z(o~{xn#75KBK1KRGa#5=p zWDsoE&pwgyvgIB6g{}Yb8xg~+b>(XlQWe!FqFOv0Tb*{yes_BK20AemzXdaYTDcGu6r=Mu5t7-iF*Auv6Qdpk^ z?zFdX30hGeqGY2C6)okNG5yLJ)$;NQ{8SvRw)gciwJCpfAoRqgD{xPQzB?dX& zo{c7>(G;@F?AW!B(1XygwUX-Uo4+5Vkt2fy`@-Kegze(0gcs8qDC2hNE7NlY`8XoQ zI0(-W0X1U#x1I_E{e72RVztI@FYs@K2RoRiM0n_j%Q`mS$Ml$&rge(2X{|A88I!8Q zB$Iw`a$#h#?GIu82S5TT?Qr5843fTy-rC{sS8a&+!D#Dv$p48d@^q;-7q}z227(FSnXL``Z0; z%T@sahymIe7?!n$Z&;ozVM@4`%Y0kSPv8kXc4HHcJNQZAa=W-1t2nA`8raQA?Yu18Ds@0iqO6PLqxlT4imIrCvV%0~B{jQe~Ob2`9YCu3l zDH9D6d5n!&&veTuwml9MHph_AQKsdmix9U5)1+hGUEF0-yIzbpv=A@kigBvrRV{B7+=VtVsy^M~ z6JcvMmTSuj%3|5^62$SeoPX7J{)BWlG~7~LPt1y61e6{Fyo#D!%ZEcIVbHMmiMMp( z?~grKCTI^is3e_vneO==V-f@vbR3ImGlYnjja{l^E!KR85jV!>5_1WK9T!TbUAYod zfkp?k841mBwVLT;m}x-5Sf~~wlJKv*cuu?Y%t9vllR4^drjbS@9Ihmi@CMNG8B7@$ zNdD1HHA+SR-zOL926B$sChO(Y{GGpWPKEDg$TDbaS>tz;@W8B$uz7Bv%1wWw2$PIy zTX~9OzTkbBIt1U#d%zx)u$mTfqGmmSRiMCDf1c=1xDBbGnk zfc25JLp;X8AO5YD8Ul0}OB)lmntxv8U-lrK@nrQZGZcsst*h43Z+~l@BEp*FBjk{L z;fg@az``EX-e^2&DQiD5B+~iU9ad9QNM}s4oz({sktBRz(aGo9s&wq11!tsEQ3Yn9 zutQ~4i>Ji-_7bv-`yv7i3zarEctFPR1Fg5gVN}C${M38 zca`bxAt%!Sb)gsas%Rn2P}uLl{Mv-DXzmgbX@w9tQH%k!ca-#3Pnk?VfnZaLg{u=B z1C<&KpVm;`L2fx*0-X$k`IfNfg_Zs{N`eDE*b%c*ig9zAMa`+>jt25bt3UbZKH0Tv>$1FPGZw=(@j5|OKa*wg zOKUTbS7gZZF*pV1YK4$|SDx{qU|Wh1MQntTiU?E#k*C1@dC<}!ZM&i?b-NldEsj>2 zLcw0q-^nd-pXbp9r_ObyJmcB#V6bb6Y|7bxmu;+o)JSWkHSX0=>6IX6FVGx0W!> zSwEce8hF^=93C!4EQLd-QFa+dF`Id^kYJ_-#9KS@S;ZEJ&3YKdNSU(yHjJ5~`6}MK z@{;yfWzG$VJrPa_a@~Z80aJ!{n1~F=lmaa&5P$1v%;D{jz<>1s2>Q!>M$S)O1ovBPsR{SENZ4j~ywhw?)R%v?6n#wG^3opU*cfRrK391U?rD<{91#FcL_=Ox6ifFVQM@9$;tc9Hnp zz;i3LLyEj(v_w4I76Z-XhLHC*01`a#MJ9~n7;nK`PVM|=-0L*0(s>ei7cCCo-AtNL z>=Uz&L9RWNMU*DLs#Sf6xS=&g6qei+ua0&cCcMKXI4Kpp6AjR?WUGB1r~ckf$P1-} zJeTs*cnT#Z=4lnI|L0>?FZU#(*IHW_X0WD)sci4g)JXr%ZNX7 zEbbOqlKpcZLQs}Gd|4tz-1DDcJh;H@ygC!I*hLXhyoFNC(ijzBq3%U!gJ-K+Xf^OTdrKg4S0YVCy& zwyf&lgIhwr7GxBj2VrALt^2{^k(%RS>V{Sy;E*fKs>a+oHTAz$U|9o<9>}@8!sBgY zbq3Rh*02up!KU8J)JnDvZixd`NOYB!04Wb#_OAu`4Fo8pja6pp7RA7pd zr8bU+iT;bwx_Only@U6Ho;qeMsJjnRU^5tZq1VzFgI4eRt?W>lwEP-*Vy=%qchu)U z0E`@p1k1OO9=j+9ZfG9h5>#sZg~g*$F>{;}MezHO1-X9Mu|7YAWC!yKtSy1$XXM0A za6i+22*ZKQ{+(W^t>%pO5_KYhjfAHXN2@vn^@TG^BoV`E!&BEq@-gF<^ow;#fbpyE zu@xECgrY|S9)hRc{b+f%ZDnmhJ{Y?yOH-5p;x=fUW$U%@I1fC;8x7j}qS7M4L*ot- z6AALY$HvWJEQ6SvG9j}$JZ z08En|z`?nK@p&QDiNyyQF{aIgT2fC~ae6k^(!g8m_{;T=N9_2A*vQL-(z*vBmvCy9 zt$(FueK%rh4mEx6%GdJ*37zscKw0-)JCTN;>(!AK-(LZv0jB84uYKBPKL

kZ%m6@ygR)Fy(t}-sglY3n(DV2;g|NB(a{=X+^fvCt*Mm( z5HE8H$+#aYdt%BH3|=-1khwXSExz$RdmgHfo0^(x7fybH{vn^_oin-?@g)5VNJV<7 znHXfa$;OL6|6mzos65~vto<#);88;@ zt9-kLH`K4EtWMDer|CCpxMQU9BBX;#c;cj&suXz+HGfcz;PjmiTIcY&W%$43)3D-} z?;3opf6;*NkYj^+A3l=~{3bdiFluoPpsQm8gZ!E_?y&ln^dnSlqiJ|qTVC5&W1mMR zcg7<5rDNPdvF4KT&}A996<}oT3meM*!#dBhUTObqcu4bwu8uzf8nN&3w7*5(502j| z%|i<%RjFlaM4u=goBV1?FHD+p<5WDvqTkYK6e_fAiO2hLm1hau+~J)J!bP+sHdr2! z?4=mFjNDJzT)$49cE)y=vKjdPti4ufGoQKP921^uIi+g6B|&|kB_NTC1d*m@g3YeS zBKSsXDX?omAZfOkL z8_RaA0m)gr+xkw|N%Z_lqisy2<)+`deVp7@QD*crbh8uo4I)klT3lE|@S$XDV(S_L z{7Nnb#O>IR^@gf`@LQ(PrMNXNaK=h+>Hk%E!-R8TZ<3K*fj{TcYJi@h4v)8mc0{uR z?O((r91Okch}}I2`*@q(dHmnej9OfT!W>x5Z2f zDrg(_NBa9d`qR_^o^}$Ro%cgdf^5nKH6{d_B`2qpg1vQN)nZ2}>^mZwDD%svMwM2W zdj52~ADLbCM%0y<5GLYh&Tdt2R;cw6RRuWMy_#AXd_)RcV}~&ZIt?j4v&b0CE))yT z+}#f&jvMk3t3Q$-O`v72{H8R5ufUj9SjGfGdE9IULtkH2=Oma6&~!=iFJqTgy|G6t z6CPp2IK?AfCh9-o)K`IKD=*WPRh+Z8b<&DHoXUqfDI8^V2jY*s_!qN_j+58e9?a&l zwPD*;zf9E3QUIH|C@?7{{fiP!algS@w0biY?AM$$J;jp98qxtBI!A%o*Z3{ z3BMC&#_*_WOCvM8gx-K!oB@cgY~vR5ZL|7@TTf%&Iyh1$wY)CH1p;`Wkql(JvS=99W-)uW4o=3ZQc8q9 zN^+qu=A~a>ljcqq>XuV2QLdeJQ`!sX@8h5_b%o=PX~EM7fdj4MAHt=?`G=75@885$ z0Zhcp3#8g$8V;oR&>@^hXsR=x9FDOkxxQET7eU%4iE5+!-{@(!~xN{>Q*c$p;>ROf4p%bM+mB}4Do{?Oc1P- zm)nUvXLDb{(rg9Mr*kfU$nDtCFodl%- zhQG!eAP0Fg{h34v`Vv?%gH`-Ot3Q{ceQxxr{@_M4SSAqPysCjwbDqZvKoRv?(yq4(td>2ke_?iW-KF{>cLQMovt-PL#D zfqX0{$qQC201+#Su8`YR?{5~hwTW!(t#{{vs4uNK>Lbg4jaq_m1!%(>`ru4ZQU<$T z%^`wKacq)z^hFPmwX$_=^|Qy2AjkfFc<5ICjKu^r_h!;>e~w*-=Rio>_U)crg^@Z= z7sdX@3Cd}`2o5|{pHoch7*J}wuKTG!_%q=0)e4s8c1mCJig+F;$4{{r-2iPnO0 z@v(D6Fsmek{mu*Guo;-$ywm~D6V{fh>TtQWg9y3IvGj6KN>0w|F2U4TBj>`{RJ z6aHFhwm5>-lPdNfSLZi}zwWg^d)PEvD3W;1C-)xkUd`8FzctBkviJ23x{O|j#sHp- z1-~n4cMf@~YNRq=AS`-|bcqSWb_u@4rfJ=kMq4eCO8AC!LA~v#prr3BAc#5{7x|nI z^?)8HqehF$rJc*-1tFQuYT>HK_s}>-qug)p_cDAu1dd+Q!tLH`R2STw161*Ja7fH^ z^n(fH*MBBHf92hOw?&}xEBcs;)2qKM!W|TCDlYU@bFZWtt_nCpK4Yhze09kn)*zXtXiZc_GAca?_OwA z__oyc-t?C_;C$6=mDFp3okyAtSN_jo-{%$IPQdp6z7EJGRq89n?dm`t{aJW~d;$hR z*i+SsTYXE>T{~j|ArGxdPO%WX4*5}KdW2ML10fT6*zL5k=sS0ela%$b*@I10wTA_v z0;52kH*4B#Iys7h29Wy5IRnKTVo-w!a%iG;+g$PuJt$>uYlk2|yV37ji&;etXX zzrz0mM4CaqxJAC`O@&bBrYapoMI2@af zVtiwQ7Urjh{ib-sQF}|Gslp?vC$t5V$ypw^7Ojhs?bOOimEl??Zq_*?Hy$Lm|F9o| zvez$aF;-Vvz5meZbEh?t&KylBf!$bU1^7Q)omEs@ZMdz2ySqby;_hz6-6goS6b%$9 zF2$YVPH~D;0u*<530B-GZXpH!lReJW-nY5P&06Do-}TKopBa%cNrTh#D5&ZD3-kC5 zi%lqMF~H`@DBS@qmXmKOD}k12UR6Qq7inNi9grdxku!975|dkF=lPysM9Iw# z{mtoq`hIN(55pq_A1Ym%q(8eSFo?;6XJ%FV?Q_^<%oK%&oLhem^=3tf34at_bZ_ic zDJ}zwH(jh~LJ#c?8Zr(Y)h-FXy1?8_OAHOE$=s;u5gmt;D%%l*tab1Ax57YEh5NAk zu{XvYzOF7_RcZ{@a)OpoN_yN!GKPFUy*C#yYQKEkI%$L%>_-2mmMYhywomTH_nEI3 zf5xuY<0d7C^3_Cm=x}un1=Wr|O&;>~*b?%~Hoo8LGdT~i@#i68A^&WNrLMMnWOD2? z14talK2dF*IMi%|*G^WM%;`=G$lsqmSEheY4>u{$w%|I1RPakur!Ik1&yEtB(GN0s zN+>q8?VMk3AT?+HiGRqQGMF~?Rh2QM!0MvfTgHi)f(Pg2z54T&u~mK|q_9Z)79{hO z+7GY&xk_{aO0wKqonIg!9DBWFH}wdkqFLybyVs(nRfkHKwOWR>fqevZ_h_15ntJl6 ztp+w_;-?usZZZ$!9|XUzDBaxAghwr#jjZT;NZ-&pbGLPG?+YPzMjGgQRG%JKWdIW0^FJe* zABum~>=*wmLiMk7t((VW{`2Ox!PHyJpfd%q_9DL*tuT6g%ebv0QC!_Mc>`X=vG}V< z-TdQ_Bu5+wm(sc2oVZy5hX^IgWE{klTI!hE+@$C;GbZo>v1H2XbW7m+FeFY?=Fu1X zhRYYJTGhy)y37`%k-ikgfmy}Yh5T!HfvepMxUe(~D*TB#WyOf|MwP0c|KJwM?wwS4 z-fBEv!2YGoVAhSy?rdpXy$oG^O_$uT6ZGX~`NITKmp6T{u&P)yJ6Q_}1#m5`E-5xwTl)Wxob~8Q;PQw;;#$M6UOa?mc z)07FYkF}|1c=77b0GZM^sr#?@H-n>lr$IY7N?8wHa>?dN@(jUQAAJS55+P zO#YSK=|y0t9$NsDmSE5+!ak05g|%NSzvf+{$#aq+^7kN&QMTnyMej1p8dBY?dN#Lq zeUo$_Rj$o8pvSJ?v1{6-W~G<<0>pV<2oh8j67@%W-+l4pTxD`_sH9O3;mx|9CezaV zvfj#^ihV_O;w5)vcyptJH~Sgonab}ByE~;iLorz$8I6$7(Bt`rd7ecp{P>|@&g)u9 zuYd0i%fiK>o~)mWBsfz)U3TQL#TAp>rr~q6s6vyW%WqiiAHOkQkCMF6^dv#M)eMyl z=}30XrGL0F&>#MC6k8zsm>)6aotmBwl;^B{3yxjuEemjlE&b|NB8;50GW}JG%ar$Z zs=1KteRpkr&al;qA?tuKF{Q`lmEYlFNGde#>w>J?A8FcEc??qau*^eTm>aIt#|5=bTyHHK0#eO7+ZWqX)i<49V89XkYFnOGqqvAlH(pN-%v|z>c@VsBH zTc@^%Sx3R9crbOtjNe>T zC&AHnz*W5G>H?RXJmQ2MH*Q<&NRg!4S7A|nj=?T9++vTVyDz;Ewx(2#A~!njZ&W6U zwQ%STtr)AMDmy@K7eo})yTMj3gKaailPnarNxS;L-AQD(2iSs3!R=i(&w8YAoQCCA z?_pSL3*euKZa9{SD1GK8!T&^yD)H#M$4unb2m0ORCwm_URV?1I{F-#=i=Uu(wBx@6tML8Q6 zrSaAnxZzsO%%_ml?+}Hck0KV?7pc-}5W`URW_$^CnFowU{eAH#|9HCUf%i6qAh!># zLl#3f0O9(n4<|Ay4L$b&USjEzq@?R*a|OYku-}V*re|Pgq%_LomEKjfrXO7}sB!wnC&L5%T+YScpF4 z!<_){)*F=4{wpGJ`@-IO{^xl(U@B)LyZ=6!4H?S~UTJ2=&TF795)iiB8ORkr$`;U+z?_Xb5&Z@LqH+kRR7mQm_5&XXff3*Dt};V$zB zGgKf~&!NZ!07#fyNNv11R^gbwpd{*~-U~Gf0GkcI_*a>R26M+8V(uEOCq|&{q~13{ zSIXwU2voSm);MO7QDV}qG#|d^hr736!2(=+pe~0hwB;Lf_ z1{vZ+o0Zds%Tb&a@fH2W1Eqi3rCw@g^?cJbM5>9DlN*DI1|&uu=C}a4 z&D%j2flS6l)Y}GbPmOAr4;>KX)K@%#8Gan5AxyY;EbSotihg4V;oBzg`PZRB(b}dP z8(4e>DqZ#{f*W z_g_8l!hhwO7s&YA%rNNKN3vxMrIvJ0_eysM-Q?TRAHs<|K{tMP@_ps`RyMC52&bY} z5C@Q!mPuCjXke)|Z>uYbnMIKVleMzfZ`>km6>bVsTqT;{&e78btxf7PaeOHjf5m;s zkw{gLwcinqmW+3i01bQcuz`Ps}`4N2<^2Lk!wxL-2*b3yk}m z-qB~Uqut?LFBn)*l={t?J+auGiQA|cDIzngar_5}Onw-2rfz00mmm50#?Z8}f89qL zA31d;t};t?c>CUHVY}0A89$1(kjeem%pst)GZC~Ws-Z5>&6m@{a=W3al#2ti+wf?@ z<2=Ngn3z%=M`PHwY)3*$nrp!hTak=9;Bjla3iZg@HA75U>MRqG8ib!r?Tm{-H{GL% zoLKro`v&R~H12kD-kZ+J61!FDub!x(R6|h*mwhSjTaXtIw8rbi4~EogiFSG#-c9`Z z;MMD094mv61|9oR8Nm(${5hQ?u@qciZdC}vP@ihFoqL2tw`d25Y zFV6IVbF;~laTT|;V<0^Z`KMS5To93LJe2(p@a04qv}8ATeWu1OX0TgBn^b+tsGK~> zfFw&7kRnJ~qz{i+QBy^gff63^UOK%H0iL(;TN;up}5{gNk7M*triyXLTwqkD4!2U}x1mNHipq6;w2&0-$Ce zTeLy!)bbWTsfx+C+8lDeZ#(QLSm4|&KPLkEBHke0WbL#%?Vg;J?P=1Q=>i!?@%aCB zC^|N7^*K9RxMv3rj#v%R&0U3x9ur7gnB<4X3}5aJ)nCiZwCl*8d7Zcs|U)u!gHp|I=|IPy3&)0ScVky0Yh-Mz;3 zzM@l zs%w6zI@whg=H`!?68-HeZ$6yDwVg4&Bjp_Z7%^#VIJf|=rCw|y=+SlM&i0sIWC9@dAK)Em>z_4dE|O>kjPhdaCLorkuWGm8+}s3^H*Eq2_&9 z;+@h6WV0P-OB^2EMEgR&da>Aek!+S!Vr;TRv&?F%H$wR5Z`x@u=dJX7Db;88{K7FU zf94;%z3ah1GpQHMY=yoNp zdZg=2MWWklL)L67DhPC{5-F-uF(^+EwiTPexj1~$WZ`}?n7On(gJ++qSjD{fv)6Z6 zTN~yIyZNM?H~#osCU(DSJ#@N4@fT1i_>?a^5m zuoOf1twQJByW#O}$dp@9a-i7G79LPt$V!=iv1jV^JI8;|rB}odq8>>Ya_t_G zpOQ3%slsrYPz~I*C?G#gMOYt6VJ3o*|H*T0#=(}j;a&u4_QrJZ_Rji7N>dp#IZCyk zo$Xd{6bnB^bDD3<_CRghzHeO+iBuEeV z3E;e+D^2lfs>;~QG8QG0ip`!i2C+u0%3@ZIOW-CW`OBH>BNl}iv zBc8P}G4FCM`99)=2igN;*ncA|fvc`J;U0xD1XTixR#-~!0l#uq6wQ;SD%Mlr)Y#!L z_qlw$r0WL4>vmBH@=p>2L$MBh6bGD)xI9$AWn~4&X?HOsILSe8@p8L!mb3gXP4mxo(}7U!LlnX8C;W)5n|_fUJh1EEzK4A0=?V)9HfA=h*pD zIVNA>8EypHM-eD>i2Q$mSWImkX!L2rkhzf+?sar^;`iuZM`<|~8G0zOgX84y<(0f! znykTz;fG_rGEUP0^ds)&1;(hGIq z{DrrosJvnIKqZpvb@i|+H#5!p4DGmYZ9Y7F2VU_ZTdrf)(v;VdRHth15{Y^Qy7oqm z9&KT(?rhQj4mFN^x6MZPCSDY1_kr^;E?LJ%6xI;mlPsDX`SHedH=nXb3quNXTxB<* z4?$PsmaTQwp=CqlwnRjjg^r^#d&|vd*`9R>=`f8J?dsgyx%kD&w->*znkQx zkQvyAEN;*q#{@By{!-Wvceu0Oehv|-A|(Ve?mzwx&A?DIP;HZ`kXh2 znKq;ogK%Kh=scDAr}fb4)|ZOcaWoC{6IC$W(L*>!6KmMCiBJz{UJ6sQwJu3HLLGr- zGHs_%VSq1O#qqXXFzk@?%K!oAi=@Ou#V%!$a|JGf$ks?LGYo3c8ckpaY$4PYmW>Le zBJ-)3wx#?||2~U+zg!qP4p(vTP2oz3PLa3tj zSwt~e4NE%+y2@l_I^imAA{#Qzz6#z4kdHj#q@N#koX7prsO0L}iZC}UPqDC^&VwKc z;iZ3VhBrEAC++dsZI~kupo@JG-rVm|I4{SyEE=9@HzY6euvMSD=$hMbG(&qa5k->etq;JA?)jS0Vl8Wi@5K0 zuFa>R5@!$Q6}ignHEv#?Rl1W%yV9x|+)hhVqZ#XSKc$gP)Wl3?yMwc1cDQm`OGwod zcFb4LWV8zbn8b0|7lbXiiFH@7ozw%3`DdeJp|I^;n6-a6sj!Uysht-HvWWka0RWDa zbzt&-I1d%{Gc;(PXzH5RuV(TRZn}8$dst#j3Ub8sSG7iFl=el-JUXE6quO4q&%VJ> z<`>IQLFw14^(H$_woxAZQS~FU;J8pYikDU}g($>w4_<;*Q4?NLZAO_baPlofKO@cK>NmlM-{bBMSpv#?M2Az}>&V%5znV(-a0*SV}5)&EB zn9iu|b5nCytUC&~P}yDdpu_m~@s}5G=m>RQ%|vgfiMjFk)#w{Psv0&6B`V$vlo$+VjE*r{5)oGu57W3MqUIdX zQ30+>pqNj)de%QH$;d+oN8U(bGP``BD{_$GZobf2$q)avcd5mZhk>a%WriHs4l$^z zn8ln3HlXCeYi%LwQ)6X;ej$n2cbO~`V`r*HMwH>`$j(8i(C4QcnHP(qJ+c7EiEJPCdiNs z7rozi{08(eaIx7sw%710INR7W7Pk2iqPF(!X1g5@xd)gia!J9p~mHSqlaj^GUs z_ebBxSt`EX^bH;V-ILEOeB?ET+0w;F5EgYj2%%H1Hx2uWaJs-C_1$sj3{<^3wY`BO zH>vEo#d&a3$t+k1#U$+%UsbKr`d>m&T2uwNGHa+%DPc-uKp2pLjS`jnNrpPmIP`kF zOen25g-NhfVY9uFR5}~hhPB`L8jYKYW{g3|Q4^YF|GT>5wyBBqeq?)X!rxUL+$!A8 z2L`H(nXg~Zh9z}-mukP^!t=@iqJ)NT?9j4|lx{=sFJDhA@MBs%O@w>&+8#E|HgJ+i zhGber0}cQ3cdg|rc8CG_+LwyfMhkBOm=zV-!@MwDAU%e?o;VE^TN`%mT|)PTaC6iV zXCiea8>ZaUmQDKl`c-V*gM=%QalvrD-zjHV%O*@DtL3RdVCY2a?2mluP?Th#DU?&N z>fL*_vGu9q-VC6jMB+nRpx;>89<{W}Znav?$IKDV;U!@(iV(!y-yJcIGH$%w&&xnI zg3|{%{!&}FqKG=czYuA_uYgM5c#bo50>R!-t zZV>99Q|(voY8WKZuOpUV9U?Fgr8H&YPXNyr>n5uYzQ~}U#eascW4GV6VouW3iv*-% zu-$Vs`Du=qLk9=(#5NP1ve;%4_nnGVmFK)i#M%K z!CN$v*cq5j4O&7r6YA1;$32XhW<4B28vR4x$8J+LLbaj~6Bfuy18**Zv)I!rhjs09 zn8pWB4YF5x!RjYUa7N(V{xGR-<)+kC+)!z{X)F^9IQxEykQmbx^ANr-)=Sn3xf3}c zK5mU-_m8{}lV8OT2+l@Yf3;<9@f^-J&$A-@EFW<^*{h!am2VG2WmzW61+>4=TQnhV z21QzKHMdXyNVYX(P94wy2$Bq93ZgW_!}ObtpZ7R&boyjl3(|UCWK2o)Hl8f@K+q#j zGyv`^$4elt)n%_g4JQ);9r)WHhYYLq+LKW}u_>3THh4`)Dd|N_0ksG19u_pI=WF5Q z;>39i)4+)d@87B2qo=WAinngXTkD~|OE@z!tR6*hYUjjn|1-iY^S|X$4W-!+WiknL&-70;UYJ};{*Op&%#a<03}4q?3&YxdY384@qFE^A*>qg79F?wemhc9X zK%$rs;v#j!F~gsJ=3ER+2fL^6#Ur~w3Rm(NALa_v+lyzB8P*>ta~=z4jk$1%2i^72 zDZn%vBErd`kz6uM>mMiVaQW$Uld~uHe`nggll~JW3BLdjAEtGmTL0O`~!Ayssn9?&fI>;=D>Q*G4ql@k*e(L^=rX167)@8y<^U|BVv-1 zNHp1S>zT)}kjM(zM0w&>&|h#L?)=IPg~_axc{$OP(Ic9b3#l7}vBaH2wH;{jb_0;> zm&&j7`>QIF*Lff&FQv|mTlY*6-F6^CsixG2`JmeJQhrB97s2MCTH@ZHQILw&an-MC zVwUSvSk~u2pI+`!&=Dy)V3|kaHU@PPcuvjc-xVA`Pzyia}91aqnqI zJlk>6{AWc}H)g#-_4Z~$A>0jqc(@-{U92d=O9Jp^G(5Qok&?%s$?0jPU%H0Hs*vKU z6(vdJ@?`U&U2Zx-tr1bJ(RIkI3dP?0Ws6hce;NJ|_hmZn<95}CWi~>UL)cJM4exY% z3gvl2<$WI#)$@0Yy7M?k;Iq5q*6QCOZzih^0P#8uk>@=Pqxg85FOaS z;}!|WM==L>Hi;iKZBTRfdM6b~*eWmz|2z+dz6;+T<8k|D4LI`j6t)$C;Lr32pF-T) zg+b2tzySAnR#PF?j%3F^z?AHr(-zEDuDL*DYxFpx1Bk`iet#N>5l4{4Mz-+k8i+7t zZV_zJWT6VT-}AwB9@l`2SH3m5Mq!8!&NT|Aj+m-x|wvIXX{&Wi-&cmj7_s zfA3rK_rTx2L)+TjS8myeT7$(>;8Dw=mwJi)q=+lNi+Xl1yz7?-j!=dOD)zM1WKpa& z{{)j3#EGeu2Xh0pDP`a(81S>xamnQTdWg(+aPCBl+Y(07-eztzfwsA<`PfH-@$|*B z>`eQX>h#~kpa=E0D3LKWVdbU7zQ;aG|M+NjFyavMb20=qjIr7$VTLiCXAN8_+S=C( zmjo?NciH(f=AMD-flAOzU`%#=+JWN~1aY)dw;qpDyx!<`?efbjZ11zVpng!g?`>Y9 zBpk7Xjqt(am|GLb&nHgg5&$PqE~2xSr}KzQo5#H>!Gyns6iS-wNKDw9IZ}jqD7(%` z`9^;)QhcznwzaLoSaNRL50#S4sHVd){me8w8G~QLm5&=r+kPjB`9rNHZoOmkBODEI z8&4An!*=Hq{=^#_S!yU`Qb_Vxb0bw7Wl?G;JAtgD9lLd2frX1@|sC(V4J_rn0 z*f`9r*3}=ZS}$-Yk7PrA?9BghfLlJp_9|I1-h1yASnBpTKWHo2j=Al_As`863NActh+ZuqwNuOo3Djd)-t4 zw>}0DV8&hI*y6tV-89ADW~|Ee*9Yl#$jw#!S>VUL&zB_Y>ujJ3p`vmtJaiQ#fj3K= zH)PJ=U8DQSsUovLS=bRj<49v*t#_+A9IexCGKeO&bl2A2TmJL9B>dm$h3TnYK=-07 zuqgrqG+ZV{>pl~FrOc7RwF|7bPNOfcEQoQloOjm~Y+DwvGW`)xq(S2Jdvo?0X=Y7I zt(U>EY6SG=jNWXZMTn-y^UjhXFI+S_sWv0|G?n_X5x|I#&dDF@87?TEx7*%c?oGlX zR-Cb8fRt0Z)3x7tc(~5ywq=D^(tgs_pkoU@~hYYsgXbc2|=|*9!N)ac| zI8907H)>4(-2-L1tu^tc&U84C4**N1OiG_=A~hf6%u#J$y6VYsh3syxbMxN>K zuI!#YOmruT66hZ+tu3(IBN*2f5o~!^!ahY@SjIDCZZ}vAV-;fLG1EG!*^= zAI%MdCj9Jkk4tZH^5hB$MEK~d2MjC@^MW7Ah{a?4v8uNGCa2%lI1%f63sGxrz1b>( z?k)%?e&ig3k{>Wh{sa7p4GMYV&(pAJ#P(un7q6RX-`*jT@R%hKPYwCIB>n_D6y}u= zm)s~&4~c{RZ^wubf^7x>5UB`p9YN-7j1o65z!;jYZho$-u;t2IBvouVBKY$^rujYF ze&E+%Q?4sdLp9G{B39g$hY^@w*~lpl5VngX!3u~`FVJxVU?d}SM^u@<0ozN!ne2&f zKaF5AM$4=(#@c){Ii+taVjkHCzFY&|eVihQ%(BWdmm%3th*Y#y5n*wITCy&iV{H<; z2e`v_(nNJmA{s>?<_Mn#=zb5Sz_VI;J=o|>LAj)8)%Sh!s-{&0ScJpKq#5_9m>JA;%lOM z95{24mC4L*ZZaZmeR@p*=Z9q`e)R zurv>3B3kRUZV+<@&$Cq^j%HFvEow0}kgKcL&zEkmkI4JY2wa#oJPvgH58zvmS{ym` zvt+1hWz9v~1;IKjcMAOY9{^XB6z9-E8d}$z$M;uT1H@%l)#(I>UuupF5miJJ&WtUSggcss!@Mb>#DYjeQ5_klf;4|q`#8U#i(iD2o27cnHiGx5*^#h+Z&&po1R^e{WQXhND_|AfS4bG*}0LkdLJGcwB~j`bKO#i+x5dMsp8n z8j-YhX?+a8ebddTb@_7s{Mdh5{Dq79EA_yh#uV29B;cx^y@_!|_oZ3&m<+i2@Yv7h zkR}xKZF%@2rS)lX&btpAnTx@+TK7ZN2PG!cHl*%`7{9Mgj+5j^sx=#vuCt^1ajjim z998_VQ?U4u!9j=|W%!Rvh%aedCQ#3kMHzYR zO?V?gXutQjc8NrTqU-kXG%@_0?94Kq?R$*u-#;Rrdun9?aQ+6$M4fU}`(5EySNeNl z!CfO`T25Tq`0nkG7J@5K7UF`Ha2xDV2_-qcj+K3N?+)7R*0Rq63*Mqzi>mE5ySKrR ze>=8Ue@m+6TWdGebmJ!OQXlZhUm32G5c-*GcO`*Q1`rX}#ew&DIaVAT_))uR77;|Q zo$)UMqamuB0i%3_>iqMII)f53jk3|gL)^YBQwF58I>no(tzBInOux>W#N+&8XQ#!h zTQ6PPH#W>2XBj15GV{a{Whb_*B7fUCLjE~do##{*DXW&{>#yy$Wj;S$CA?2t9r^Sp zy=j3@611evuIUCIncSVed}3W7bjPY@_lG|VlK1M-8JMJ`%s$c`9+cTzq_RJzxY21; zgomWoWrJ@LEEkXJQkumIqzHIvmAEK3ScyUZ?p<)%ex+JsJ-s?#obNAMHpe< zOJhweank1c>u&Rg=-S=}+&ZR{yv~Yqf>Mu5E+EbuiTW|J2&Rf7R4OHR9^XreP zDe|Bz?&2b0+sX>zpU_*VWleI-EJlvkFHd$2q_3sop&MtY-BNtg)MZ6c*R8vTskPo; z2q!fSDaRVp98x3V>+{athCcg>TFO*~SCESadZRN(=}PwTe^eRB%a~H!FcfpI&!yH| z3J}qnRF5jIX%q7}l*(EG)^`Apr6m&*yZ-}J)}EF>1mr>!puI7AP;h*>vByNAx9O3B z6q7^tFhdg{;$=&J9hPq<%{S7njf17zPgth^0p+wtfe%x*M@MGR>yWEz)k<3>CiTZC zWuzg1JSraW6PBDoWv zjaW08Je-Ypr*?(yL~iMJR*D%X&6ys<-8xk9ATw<;qtQW8O?}oMy2eC&_NIZQyf$;y z?d_vQHw|=UKd+zIF-m>DGs#b3%q4!2xjX~i)8YFuhE5Jt7G@@=8mnVDOg8eU{Ei|o`em6}OVd9S~9^pxhB?;9U!Bl@fcte9`w{Y{v30A8 z|LuK^Ipp;4EH89w=1;W=q65A$PEyY+s4 zR>0Q2LVU?TOLI1_`iAmukP)gp((CaG1OlKIeg8aT1kJ}7J4$$Jgj_xh>nSI*kH8XL z^BMxxERu}g>_AKK9b+t%Uc|-|eLy?C!)5d-ndt;~B-oKr2LF5gEfGTo;9x@70 zmJaAWtb*n9(%L^H*=P3ZY8Mvc)AL#viY#B4d(X6u?|%yJX=e>x{n~={Nn(!zy}fm8 zVShgqEhQvNo^Rk0;=GfC-nRem^(w}50w)5vz60CAEFVPFdwT`h`rcs`+#uK0!hp@y;3Ig5Z&9#lCsz4cEdqghv02@`nY4fj~X4O;bFN_K|39SLO&SoE`92 zgO4*S5MBb6gsWz1v?6swc84d_xr6myM*)%Ruzi%Pk7BH~6%C*Euj|n{=*@4(1+|U2 z=kz?3C1+d2N(1DG_?*;Z6{xsYFAN3J1a|j4H1A-{RE*WAa0L&N;H4jd3wUC0t7r`` zHb^Jx0;@w@uK~}|tm2zAk8OcULBV^;u3YiF4rDAA4Z+!knhkkY<_J2MfElU{Fej7a zo3pD&*?JbcR=HjidO%lO4l1lI_=(uU@k3qb=&Rdk4CsLT9u6cOmXnLZ`!-j@|4pXWn?_bqMu0x7|&7wgx1I0x9BheW*ozvi_ z*N47r3}Wu`2%?HCh;|YH5c9!=14ro$30p>kWp`+)Z~iBOr zdGU7TeweD~VUf?jKT_8yd2vQbY?{L#{SFH;bVvD-zqllVa?sfaHRX_LVnkmbCEfe? zum1zQEf_C=;wTN@p4mk;)g}=z3R;Tg<*T|kEx5U|_ixbVhpn(nU~<>9`1^QiX}BEf z9Ah6%KKP`VuC3yeXy$-_hjX`0$pm*WsM0jeOW>)&{Zs}=nPw)=QXbFsy*v9wYC_fi z0cM6yZILYL3lpyIJEUWeJ0+8c1Jz@+_lwHdtJ@8J6hN;C?8|V&NeHH%@!JzZBBN}% zHCOY8(t`WoQ!hec&`dmpHIU9K8;>!$SKv%C1UUDntNbTwT-v>4&;ss4x{_7;5 zi8@xzwiLmw6{g%C9Wm&$6^=*TIQFs&3EB!KrzuG+)4fJVVpNZ=wPuvmECGz*COP7h z%wt7vTjWwNcI4S-MpfEfIhTo!bW}^L>@7M@fPG9-2KKf*^-#XXHeo z59u!uj`v>#IC!Um=cuQ8+&L263|<$9w>4=IXvylS1rqCA4PB{YH3{_uLpvQ4y`s%S z%?S;kbUnxkg1l5C%blH!f7dRnxwapfIAaLz<(++$(5`V_$t|aH6^^E%C?Adyd%y8n zH$82Q`CwtYKTzDA0%R#d_9!JJ7zpcgYL<^Ya3Wu257pXn;t!*%dtdhf7zNh3!` zU2{E3Sn+KHJDH243Q0^Ze`8D?2gxL_dNmwgr#RR3%OFHGkl|V2yVjZUr_-9^re?}=KZTb5SD`5s+H60d)b@BZxCa~)oa zEYX${qyn&~yb!jjw&=?gb3V#tt9p9~(kDU3KP?GOkhqP4Dg;}~<8SxG4|;(YHnGy4 zRF^xC^2bv@=Z@h1cl0XwUIdovH3!th;(eFeL^_XY`is$19LRel+w>=@aJx5?hnju8 zIpPcr?*6CvN!{aU57;i=-X`|Lta99NrfF(q(Ea&y!&)N5+)FkAJb|uTw>6Z!eIClU zl});N;d>F6D|h%$O89|%BQ8_W+Vd0od&mne-h!jUK6Jlwz(yzw0r;XJVlHe}H{@nF;~ph@5x_YxmMG8~6*4 zmB%mdy6N01kKUU2KDaQl|MW0>B!gFIa>^^sPc%?EG%c;~AG)~G5}yc+%zXRn=HhP& zZCkQPP3B;;NV0jpf&JtDWanqk-i^tU8H=+G^OLnR*c$5CY>#=msV0J zkW*y)rH_J3op5_>g~bNULn9~~-HG7537vI5%t z;YrNA>Rt*+=8Ft`0j~MF=ob(oT1~>|odyCTpUcsdRu*M2)n28vo?`Zw6Od08GJ~lX zzEI=qQy-U^cn`H;uVnx=@d8zm9J^zB3Y1+CG?jin@BdEeG#i9(6Y$F`x~=wJa9u6;j?DF@5c2|oL7MB!ojMo0jU)S73}SCI8H?c!bTI|)B~W1>o@GV3N+?m zooxf|WIEG;*H<%_qHe#pjsUQ@brkO@os$^Zn7anK9$%V#C}v!KY;2QmOtu9C`vE?? z|2X4g0&0w`te(1PDS=%E->CLn-H%A0dao(~Rp1``#~}xLldFqZslXK)n02B0;|nh! zn^(fbwfZz}Ys&qO(mK-Ban?OVSd@cd#xoConNf|TF(_9oB|PNh zd>*S&Ra#7rIPNWCE7Al1{4zIh!a98Rw+*erJM2KSi=8S9olgH7xQaN$=2Chy3c%a4 z2ml5eKdT+hJ=cd|o8YSgK8aifA|T^@Su7q_t3nAsg;R`p3LxIg!K8hYtcu;IQ^*o7x z;%#^EZdM`Vr8GC+!QeraZLq!E2}|D$3?>)kJC>|k9~tC7?0yZQMjX)lnnadG(|_si z8swqo*P`_pBF|ItJ0ugZP?D(doj5Hu>)pG0Yt_fCRfpy>-8eF&tP%uSuHaV~Yy)wJ z|JE;hxyDj4N-^#IgsZI*4A0`9oe?et56)gPG1up>XlsA2zROt$wZfxxA-GBr3D;RB zDs-mb(x%i2RYXMQe4sY|#p7!Hhnf+Qv_-vz)H|G`NgjKiHF0AZl6eXHuRM|L$hLG+ zq2tvSvj2w_n|cR}dDdnYuR>%)Cq0o02?8xmy#JmaSQ0bc;1+Q`(ne8I_4V5v*HCC# zT=yvO4Yo^ei@*HAk_G!#`MV}w(R6STgoqS#NuOPw0*ix=7kO*Yh4qJMzb_zhiAFxe zN<&PSIw$ec(Cow9`oQra1j{P}n^|urlk7yL52ZSe+b`B=!4$WQZYx{sQ#NT#JWY(m zZrj9P%R+0pHFf8^^H4I|8g#ELO*a>wP~yT#ir?q;(JsMFFl-plb)x(HHsbz^R_xhN z$790}1Kl}nImtYNwjvX04v84x7!yaLa;_l ze}lb6MQQDC7eRw2n%=Ii+c*D2h<0ch_BKD)^k z%TAd&GW;!3POX3uUN=O(*bPW34`hN(-a6R)ok~pizGeT}cY%>FQ+#opQ73;uGxu*G zU6BbF4WYexmE!tHmanhRsEcZ7vKzbpj5%V0Fq5Xlm1oyT$u{aoqr8w6DXQrpLmZ6R z%nwpij@3x37Vn#q1JVN1CWJk~oziF;zvco$WY|bJf~^7G47o=Fe`3F|Gmf)<#z?m}*1FMbc*M}{e&*g{I;h~f`}q~;pJt_V zvD32P=@zrznOMa_CSkf+LQ6tzPhjroS&+iW8`^cGc0_wVS6{m!&$3_ONFhj#=&VCQ z1)Z zsV7xBT{^YKsf$E=c%LP9#M-?@MJ(Re$BDvd2bq+?)f?!*l)u$E`2V;%>%S%+zU_~e z?(UKr-5@P3(g%4Rx6d|z|M{-g743v5E>K0q z(W>3JC<==xrl)6SaNww|q6IKQyAM&k{6!M2aT9+dci#*hb&(+!^Q5UN&nMh@0l`st z%%7B5oSTz~)tUEX)x%gM;>E`NcgfKo{5hRf=<=YN=c1WtO}ek1gIr(A%XO>K${S-G z!7+aL(Qjc4;c=4%}5xCU#Xk@ynqzvN^?phqMkPEZGX|DXg(;A zy&C&lBCd^o?OxTwTF2$_o5*$l1IZpPL}Y%Z32>c*5`og!a)dKJCL6*h^B;f|Eqpr! z6jhCs87R`!3}1)wb|bh}p3XOb#FxCF?f(FxnoR;&{bhe|4&eWLiV30lH9x`=q79=F zH=REa|L7vQswQpM5LeQNFA;~LQsnx@8E1SVZyv+0lb_$zLG5_LjfL3B{pdA=SU+d? zLptQpgJlD_*b$cH=OS*P)id-2has1+i=up4 zh+YZKs7kgd2P0W!2pYv{xr@h--3-J`S3rndJ*hVsOiHg?>!$D101sgMira3o@!pru zPq+Qz;PKH<8us>5GJYAgJpKGLH~rMhABH|=Z(y2c0uWu2dU40LSP{S$UiDq0L~xVX z%SOpV3-Z!9BfjAJRSu|%GotDcU~xI^*6JxmqXtW z*aH6i2jKg&9nr$6g4nJzeM3$^mTerzB7El0TT@S`Td4diERat0un9$c=V&Vk0to7Ua+WiH>*2Hm|~g^_!Kxoz-#y6kS43?7Zh{krYGW> zc+G`=67SG@3C94&xSqN0>zTVI9zx!l(~rSWjO>)c2&-oGoBnFhSF@7<%tTkCMrN~R zvSHxT!ejGM#N|JO?T=-T=7-n>LfEr8NJ{c3IR)`P29qDH%z+*h0;QopLue7t92?Ig zZ%x25!f$Occ6%^a3o4At^`Bob+W@4t1wv08*B>leSp)u}-F7*`0h|Y`Zm@kjS@71P z1K)CQt3m}^UFS)lke8~KoOJFvfWlHc6Dncu(iDBw!vZ2qS=gJtyWGMeTZT4E@d0)e z{F=`k`^bjnuDIK5qK#_IpZw0^7Q7m-nE{^MMk*)e$6MlFGWl?jX?ndlUN9mPbk{hz z*5;oFVEx2s5jx%bcM+j#W_eU5r2Vl3>>k+Z&8V|aj;h-0-QDyLsTECdxW`)&9)kY3 zQ6OvQ&XzqWE~_8F?%T&`6F;bZp0A?{T&HDLWf?@mlCRIU)X}(pc4iVmC7`gDz$rIb z%Ruf`%=CMRul%&Uj9N}83GCU4$<1{vJVbEfUvh=tnX3lVOcQb^i|CE{IyQglio;gW zz!vkOmA@PbX+KoNp*Jb-i>~~LFnA}rc6AOvk+_X*zf+~ zEOg~vMBrRdIHfms?8*5-@V%eAAELaNiHQ%wdK-e?ksXa2Zf8CAB`D4C;8?{(MAzGs zx#FKx`2Rtc%Fkg_^Gya|QtaS3db>Yln`ys~+8&M34z?P7v$m~$WENijGtNa&gQ`@( zC=!(1rG3#;oEFdnte5Cb9P)|LD2BnWUvBybg2y!btW~`xPb(Q+EMu(Vc2>ST;-3Hh z%i@m4X)4F$9vOt<>j=%bNzzj#x?(G~GK%U@)Ch_CVR*`)I08h_;2rb3K%b**rwV|E?>W$SPWZlIP*0i&6-v zqMM==3n`1=T|=9+OEk9lK?HVaDJQ@o{f!440LQs9di{^~xKIvM3!Wh+MpTaK3! zo!hhe-mntivSaAudr^4pstr=uekC6GCiebWRu`@)9+a1%ug=Lh@mA$n&NMJWf|P-e zAv)TFlvU|4XXAVZRI{{R?iD{*q1#edqxMrTIeM`vdAaka@Lt}~;eUWq9+d5T@_Wwm z0H-WGq9nbYAECC3l$lyAnM`km!Grpo*aQLLC zbCZV`BaCQzVH4w4&eW}KhDL!byAxf59p2sFae<`5br(8w4vZy%V#abwMOLxKh(HR? zF$I7kp3m}6ap;wa93A0Bet=DfvS{pg95)CePiiQqN{+3Xofs0!l~mB6dmqxnOa^KQ6X7h*bZ9W=+EH3x$1Tgq2myMJvdZRkrQ$?{2C^)M!c*<`W3C@6*% zS1dPviTNDj8Bb!Cj>dlFBn;8G#H{W9#vC#20BF@zhp85kd1pX`Ydu~+b(IogvHD$vbVnbz2X|0G847l z>OQt(W;;96SgCAuBAH4luSU2Ty_sc{M%ACUxVmc}IzIL&@JOdwf=4eyR)bhQ)-4XJ z(EX)I=VeuA;Lp#%o0@`RHGfA1*_F=ni>i{2W7>>c`6A6sYL*P_2qsoeftnf|b6bI^ zkDx)_`9JT~hrqo5{C;))+aC~^*Q;T55tjOjl1Zh=$z>a|3V^wgi$HHXp=&Y+`3F?V z2YAV0(+Dt7|G_=j3eu^Z$d)%&pwFtR$gpRxDPGQ=g>x1DVSx?{aT8Ia+Qq6lBQSD=| z`lRbs=J?d9hOw*U-*7WhAGA;(9b615RGTozbW^h<--^wBOaTrL7sd|!{_}b>WTeVT z{vW_#$qx~|rcoN>h??grHjD<5@;5}MBH5|_^HTuebo0)5+xoN|2dDshLfapduw*e9 zS@mKg2Gno{!rGRN!-oJKT5@LyMuML36`|ELQjdj}Vz$5`CQyjJ>w_n+?zbyD%|3vw z)7P=S4!MN2XDQsX*2h(8{v;-!7!`*m>TT=5qFVgzT@=1|QbHILuW-#6ctjzvsR{li z?|&mv5oAG@l))gx{}nEE19?I;T}{jV#vu!TD$6ucM@6{;QeTi$J@K3$L-8a#mjm)6?#n25|=9Ug^#zI ziD=v&F(*tY!2fnssV&J#c2Wf(lJzX*G!nw}kAh(SsVzB@zsk0EQvz)rz==Z2od#cf zq;ZaX9%v+AZQl$?r!)8%U=xYg6O_dLC;7PrfCkRx8QO7HMCG7#aCErqi=|#>Mw1$r zd_9R~97h_fxm^b^RoNLS)-0@;_dt=F%yKhKp4Fb&$(4tu0C5 z)5M^Q+}xxifpT(hx+j45p)C;tx;cm(+n*SvgrXNe)FJ-|(BDJTEu zQdCVKxwLQcL2nR#wIX#xK5KShW2u`a4w%>#fdQ^VPg0)o4+6-r3)y5}-gLfF)W-z$ z3wN&#UH89er-TAd_L>o}8q3Doa)$+!2uo1qGfb>N6bu!@Q1|GW&LI#;)o=gy!yH$f ztLrl7r;J}$9Doc0HeA?hMZvD1RPMu}012PQsVqZ*9P=jpr){3M-I&eV6>tdTqTllE z-V*Vw#rtWc>dbWic0{!{P(1wLUI!fnN=QF1igQ zNkqKQhYm_W#_yBgdLJ6eMHoz=fKINN7XAFu)fIu-69nblZk zaXLhL6hRK9)KOIMi6H=|!8d&rED3RnP9QPsV;9|Y)d2>&UG$qu{A0N-xiSJ%GXk-l zj&Rt!pE+_%O4g}SwX2X+djxQyS6*Q&59f@MvbwTke*J{pBuDC`ihPqz)3%_{klja|r$5p*Vf_ayzhXvxAh# z(@x-Ee|3TY%m_8=^AJfy?(HI3#;bU6S_Ui5H(a3WJ~o*vgEJwe&*T{Kuf^~*4%`fI?Z~%0iKvDA z2>lN!9lgKW9QOw*jA=aR&!19?ebSl2IolMQ=j-{vTH?K&FFck~{0^jS z1o~S-7-^y5z^IDhjMVwRi4zR}-hZX#jrEh+s>S{OIsb@`c{>FxjHSxN{l3ts7iTI_ z`f5=Po1IMyh&hz;$eAA4r@zfZ+EbeFAHe#)2rAB$P{7Z=TLg6GesHQ98jS(3C#j4% zQ~5O9!?tL|naGJ61YI35C>3o(XQWpT!WFU<>Tdg>XKDOr;&oU%^SLN}4 zu*jS0O8*b=QK9S~f7g8FzeaG1%W~4KuS>0ZQ6#NWZx>eX;m1wo&~)a&I*5fE8TLc%Q0rx$N#WQ{w<0)V;?cYaF({P zhjGwfS0|&X7tcF)^x9jnf`Kx|R`Htg;(e`tovOJtiJ{p?mmJaQh*ccMxm1~H^wmiq zY#byhxN9Y9jN8K|?@!^3k@_!JF@k($rPi4LHoqXg1ctFmai5NVVXUjGs8gqbCO<(c7w`j(Ax$6t%;gOA!}`#RYl zIjkjr=Z)npZ}&;#FWevXy5E?ntXwK>5OZBiSc1U zPT2^cw6pgWdbt_Ga;hM0q^50pkuV)!lm(sraUTdolaq)=6lA=;x`2RR(>s)J>&9Fh zefg(8Bto?ajhZkZn=AEKz^HQW-wO!OER%>m#l@ILI@HT*`MT>M0Jo?3KR})HhTg*Y zP8}`Ae8Hg3V8kYijso>r2~+hlvhr%T0Iy%4X@~ z{nyJTVcZndT*5Ue*_(BYWCome5+b>;GP{8ts>LLb-Oic-_UHof276c$B4;9Q7nnX; zUPpxzX_yjSMMQEaxn5Jp;a3+d0EKm;og;}usJ#++96Kb;$%L5(mZ4>X^DCxiJLcGo zt)j?xJ^J89os9}Dm*`h|9HdXtT%R8Tr7(`7P-A~mB8;V*o(CLk6mTLw?s7YTCP^p6+W-JAPIg;-tQ3qmQ9K zAWzEx9!fc@IsLGWqgMoX@NHou%AQ%xPBs-c3Fqwx4ZjT~ZaH!6R~ee~jlyd%h&s#}3dDtRqw z>M*U;+~m~U+C*cRqLBL4fV-}-3GN@q#o|ybQPn_eZx18y1{f*xIi?paThD8oj>YKNn|~V!J_mi?Mv&jb<8t`p-{Ugfq>af+rp1_j2(b(HutMM}aOB zl@$oj^0yvY_$mP;Lb$2{ZuS2G=poy`_MZ9!ip%dEXC6(}RM`~lKkE4V*GGP||K0NC z?#~7?`moOuij&+mocv8zP8Cv@hH@Flq=9bfH5+D3v9^w{@Pa2k#m`e^N6t6DEqdLZCyE67CqgFrfQZNyT!uBT{>jm{ zC6@#TN#9IeZZRt`VKk-xjb5SGoFYsrvO#9e0F{Mr2jCbvrZe+c6fBd*=`;KjMkVZ= zveQynol!^^L0G{qgnNpAOnf!Y0w{P6fe3E+ea-nZ|6&8pF|S4AIU0`YhiY>V3KkCo z?Sysng4b;KZUS}QWQD!X$>MbaO77c=K7MHk_QoyFIOvRCGrf-{lYY2>rY%U>BoCk; z#KGpU%5Slck|s)St_dcEd(=@a>>VL;f&nd;d?wug0dV=y6rsqCS0)Y%K>h+s`{r~Q z(zwX@1-1LZ{no{}o70oP+2PR>1beUCr8leSZUb zlTO=RH{}+q2s;H{*lUJ4(@*Nr+-tEeVD=A4ta0C`~)|>0^bSmuA9Js#px42vn5ySfPqr(<*!87iUn^F z1_3)-s(ADT{DqHBmEM!Ju@3$A)Dx$^9?PF`1aeomYMSz8ir4n zO0)UAu1EfPyrYVjd~Mk^kEXc1*c#abG_=(GZ(%ve8UrEG2jvermi1UAL+mkK7Kpmo zr{2i){uRnVmQH&vo94SHnGco}SPwm|F#EgzJ=zlcf7QqCe24*<90mQX!_Lr=RRN_n z-OeD#a5ZKNLLf-ch447T=d5ZZ2+jvXe6;rkRyM9&d&bT8H#0O}Lj=DGuhzKmu^17_ zzb_XQDM0`vw~%61`#`3=O#rT6(0CX7S1y|QZps9<#k485MR4@;Ex%Kh!=?%oLT{iS zqh7;&l(u=3zIn!&tx8 zMgP3Rzt$f7E;Ai{&sjU<$6a&&^&OEkvnpzb*}=|!u?w0+hh37;H>h!lIH|gB`0IyP z|DHU~N7YAyjU4iS`v~%$Qr8my?5C-|78!z~E&iYGy2HnRl@fb$GtnOxD&p&$pO(eRoZI?Y(JI9LKuti0Wk4 zeZP)Gk}5Pu@+?;fuK(?vXhYBAlV@r${Vvem%AERwt1|JbuH9sr;UvDrm-9aWah}+t z`gunlX~pI0#q2yrtCx?l zrr5Ik_nEbEuBJV9d%OY8O98T;k_1Eud1Sf(!sXy1_bAp?DXLs$_AB{&T}R;|Rt(y+ zJni&g)Iw>Of^0;nG&wS+`{&;;N#@AOV;25$wVUy<{{U}he-q!|{`y=prLWmrl9ou7 zYxL=akg_k?9U`KIoE`3++L*n`^e4?;2=)keqj-!zpWFD+Mf1I8iw05gA9cXiWQjNG zQ)j6dh_Rs>`)wI4K10n$-yN1q#~GFs{OC==Q*1}oKkeA99@=>OsS<%{NwNk3u} zqwQ(v|in?$O!19Vas%7F*(R82O&;H!f` zs+!uW__sd;+3>3+m&kX5n%uQ4MNEI{1vy`=munU+uJ;Iof20r4A*9M)gDiuBstRE~ z38iT4u@t1#lPslVlYJ2+s8rWq_{@KRzdv8m3Tf11Up_VjJ7u}{7tlz&Hx&4-MW!E$ z$#&W$t~&V3MF01>rj z-_0Q9m83)7q1tqE4IQP^PiQ=K?F){waE2tGnOUvtGMR@_yr6KPeON&5aNS+ExfkIt z(Zv*Twbaf|bgW82?w_Zmj&>RM;5V^?fM#6h#_oF>HQ-MHbPIc({-r(|1S39I?*}TG zC?Yd;hG=6e>G5f&OZ^J;wTJHpkhcsQ=a`J*PMqznJerUj2?!L$B9XJaA`0L=*w%Yh zf+QFlqOTdjn#6#Oqk_7#*Y8MqIO&Eu9@X};(FKT-k zk;7y8#w_}J|Do01Wc(JB4donXx`K}Wc(jSI@z%Y1CHxZKtb65e7>z`=&XlCLfPG}A zF7IXGx)tjaAm~ zpe?{){sv~iAe`#;rB3=oJs8kk*BS8Qgo-NCCJAPgG=wxiZmAn}G`Uk*HLd;!@XKi6 zpjF7%QnE9YKRnjAci6vm!q6SbOkiptnx`_+ea+Z$bE4`-c7%YIY0ld|-|CZzte|wB zMKY%?o+3s?5Q0!0Z}K={3iw;v)WuN~i&{Fp9j3jkH}&h2sbzoMSIEl4$XMk+zV~Ik zR-hI4+2yi6M}HrHWPpMt>&JmMf3U&`Bs2uj#K3>svxFW$8eg@vbmTZglVbaG)B++L z--k%Z9ZzwvSZe8lqGP4b$&E7bv=?lfo3)&tx`&HFNCk*^6*`No%S&+()E#Bqv5tBJ z=V`ovT>cGweA&N@XHo&}!xBv!?o?&pbZ-*pL!83Wl6!L*`*k77(oSP4^bNoi=bSd* zgV~Yq-ZX+4Nn@B6>u~-oY_H6ca@@f8@}JK_2Os($oW(GGjKcd3v`{fqii)MXFX4BO zraB0S{=8sm)0H>&=3X7LF~ISacwO`3T1K!2fw-#+{DA~g`l<}D7DAJY6OprQB`2qK zmeXLk^2(5I%L}Gr)nS#kGC-|3J3520r*xBkU3N9K4WXl3R)wpfS zdvb%LXH>MhGl-n{I>I-jhFRp_V5GrKOC9%M0OG+UD-dV0{bZY|TFx(534mjcFYlM-JTjRCKtBK}i-(DtVCJ^Oq0m=HKxgrNR;61g* zyJaH(GJq08DcOggfHQAgeq5@c9{NJ8bp+$)Vy^xe(!JSsqkHYji7O6Eo5;&OD0 zFTSWejjlCIAVDcIk?5`H7jnZ;M-Ou&$#2QJk(~lnV5Sc5IvZzZP_PNmCQj;LtTUso zTsd8%;Ek_MRbwtLv5nn?W1uE^v}T#kYcawIzV=Y`CE>{~8Y)Eq8K+AUx>?PIOKeF{ ze|fsagDF~8_HeOCHo)_Gh#>(V286ftWC;V+0we%<8I@7S4kc(!?HfpCkQM5@EQ`!H z?Y--Z%x}Tx8D0*mwV`ND>F209X%?S z1`ra~3Zk8ITs=2a6tbC2Wwx~1U5y*^0sMAK74aDT1lN9gZ4p#{W0n~Wb`YhXLD#DP zA^?|Ms2RS2ual4*qAS|n8DHHj?;->1>I%{V2V@Ig) zJHEna<=zN$m-(5)kGBc_VCGc$oNZkf3jFo|gG56X8Z`I~$&`~of#3}x*&_Q?0Rey& zvv#6hy)-4%&+t~R7K%r&q5#~xrs=MXoqWz;sH@3v3MMu!4-r7iK)6hT7O_V`t z;`0t^rp+~$trgKwy}2mch(j(mencEEYS?HwO`stNfE3RUJ2&ASdPeJQF^TN(--ICz zlo%dxiRBRLvy{ow39>&i{S8`NKSc&2nW}1g(6?8;#=zpLzgswYJ>zzVQtpvPNxUiE z{T)?x8Y;`)&*YU7cFEI*ZQM}GA)`vSEq5|sZ{sLu)6Ac&Pv%Nvm#pEsc-rcpryTzH z^-<%z8O;Gjie?tC?|>%#kj@lXL$C|%|7jiG@nAw$Rkvkh_EPZh#?aG)wm`8_4tS#rf1PutRt7nf5yD z>p47Jrs=RH$MHS8>*eC#(ayPJc}xCkGE+Tv$nqIJSw*ty?n+Ih6uog5j<)-@R{i}E5V@!@gIn^qT@@U?_aP{0qvDgfz3G_w-^MSv?;M)>C5}lo1hxdq z52Q@jN)5_v+5rRPY=e_?S}E( zfgSZOBzu^*Q$>%`)C$+mn^ArQO(m}*rAZ<}M<}X!tQO2nX;{aG4LB2f7L_aaj`S}K z->K5OSB%&D!DPP;oJIzuB<3)sTPTENk%%$A@?fRumc|v0Dp$l=OGnWmpy*l zrlG^*?=#O!3ky;CGmXir+JidWmI|{A#IrWbOy~1St0twYw*`Z~+n@Ffx!w03DPMbH zf(C^}mh4pHTb8Wm(glJbA(o!4z>^S%=JlImXmFjl2pY)q-LVH4+J+T42&)5S= z-hDK>8mL9c~6@oWpbjiJNG(qh%` zFO=dUT(a}KV4eKH>1s@@oN*L>plHv7!-L<_@iE|7#ve|P-OYSClsUbFi+RwceoNnO z`pb*?5)eS372rl4><{`6kjBQ;R>ednpAMm3$^Sq-TusF1iSbF^_gF{_K-j9(9zOih zqAbAVybsA}1kws@mN^40BR>{pnhXzW&~pdOOfPR-S0p7>t^67qbAEU<#b^QJu(!0b zMs*Tl$3G)xjrmQ7xe2Fxu0R%kux;vhLh88m={|()ds(B=YOHkYUYFzO5htlw-5IeROQ2B{jBC!(Wbgw(4(We_bLu&dG z&@Dj}tm4BJ6=>5w2@PS;rGsa-|-mC(FeZNcTlWZBH6tOLS% zfk$&YfSfZ4Ck`;`##}kOzU<>7Pc1w#7aO_q==YtaI-%-vr4CR*WuhKMg;u#5yRB;oKWvu}NHUM?;@M~IJ8Ej2TSvdGKp~O^fWEAEeLy;h zs>IJg%Gq5B_MZV5s3ft^D_Nl%`0O*(0d5|)hW}C$OYEYqJl&v7&-Vkd>JNL0iQK8^ zwGRr(26DZ|n18>iVW4xD`msJ6`%;HSCfh*fB~TM7?!HdyU_6RRAOhF1b6cKY_?_}K zlBJDtqCn2sq88S7K~b&UVXM+Y#7;##dvX+jp$%E3p$3hk^_<+B>_`2Msb5sg{{WZ# z0fD{tq46nIjF;{Z(VJ4;VqA6qV$kB`DViszLOBXR|0+9>t1{W!jr8IKZV=90*Kk=_ zz_&AEPGIYgZCbTJIwgz?DE+O`GGLk0;|CHvaj@2EFw{hvIvJ%saT}o zwbQxCB2>M>C~bYzW5x$Zwyr#5QHA>MKY+;;z@3vUs^=$Q!nby2J94)wm~q6Ne5|o? z;60N3c*p_)=1D;ltN5t&9JhZ9=7IlJH<`YYU60~V)tS1r{{XycT#5RNE{1${820T5 z2{8w){;y-f)wi<8*RFRwnP*>hNZriKx=qm^yimx|_Z?pEX_FR7`-)=!_?$DHjt?=C zi(lbIKiUU2Ox?u97`gv~Je}2xA@8X?R9j&+Guz{r@svnB(6cj*OZdG!ZGMY`@OXpc zI}}f9rnaLpVKoll+JYgp4l-o`=f6ZJnX`!D)0WVf4oIrUWlh-LBeu7hV@+{2(}Hz- zviHeB^t(r{AuDoNS305_>}4~zZX7dEnrES9uwTy z@gxcmI=A>7!EHJF1HW%O0NFxT@T;_$sb)D8So9WyUH8vo+mF{r^Fe-w@zKdg!gPxR zhx=14Q&&b*Lcf5uCmbaCh{YOTVJRM88DD;nP!Ko-H>}O)+%RQ9$nK5A=O^<;!mpG- zN{!fbd;cFlHgNWe^4`+?>A`E35*p%`Oi?AXg3uDgN`HrNjgA68_f>YDJe@?MkM^W~ zlGTfKzq#uM5Y)4!r&Pe1>0m8vZ@*zIQhT-ehnpNl3&m!NhaI{z_3_sB5z|y8Rezuy zG`qERN#d=E-}ONw^+HlBPzsK@AdXls5yBROi_zb4(1at(E zE`V7ijhdd<+g zB$XsK3&7p1Bks?6JFk87j|353&UFfrQ_Bfda;{oAvA6Rh9~=rNXpZ>)HHJsJ^E2y$;=Gb5q|5+~RYiYU$f#4eTqr5bVVFgicmm@sRR1bSQ76Dm+W-Fp5RZsd$>;te zKKZQO`Pbq{i}e=@Uy4u8qf7WC&for(Z9s&=4=?K{_+>|b5@1{I;VKaNlx+kI-vdvzW*fPDIoCdk;$ylTu2J`1d|&87jE@O_qTM9Pnls# zNlHGfPXyw{=(FHwSTZSghRXuGb7c%($iF)dePQ32Oa8uoz}W_6Kgwbs!YeK>^4jDd zG-h>|ALcRS2q}s7a4=(i@5lLuu`Eo&_L~m7xmMy}6sfjs9RL@AQYM8T^3Hqk9rNdF zo6T2Rqdv~Ow`s*knz-Q4iLB{qW1OL*`s%FNQWII*Wk)Yjp=B|}Wx6gcU~y;1BR{sV z^K}1ko?GXrm%`eg#-pFW@2Jl!mFV@Yzn09qKAxRQn_M4l(o-O`958RNjxY5K(|4E* z-i69uaT+B8LB}%2G%D^$p&wh)E}W_#&toFURY#u-(Inn-(da)cP5Y%*`B^P;*ZflL z*i|TI$bcnDomO?|AMciD%=h*KEIi`DDlt*K!c8zH9Ztb6(1od_la@3u4StSuGgZoD{L{F*Hcohml8wP4vb!wREKIh z_7}o2`|t83FM=^E#Z~=(4l%cmC3I!8i*>Lc7a(uJ;SgHhibIdSQnm_)pVF3}=44wT z-!$qOtn_W22t?PK0`hMtpw^*Z_-1=nDGk4zO4MpQJo#MU=cl+d)Q=LgTex^Xf&I>c zOB~=|xoC^q6L7DY_P6-o4-W)?1A>u@LGe80ccxB&tvzIB6lNGCp=?s-4}BW@0nVJ8 zRwtOGesq`4Mu9YjsGvO*ErP7I?(H&46LmRyw_yliUE?<*ox%&EAVmK-+VgZ9|8KdQghp6QQ8cK9zXX}j;)jr*S*($EMhoi#S7RpBXGq6zeM;l`!wcPa z1KV|6XG5{R*>h1_;01~n=Vx(L4PKecbpgPeI7x84_A(359JMzx4F zYDjwpg(~SdlelDT6onOi7<^NVBzN2zVa+Lob5i*< zH2R{-(hMA24T>p=L;PAD#s8rns116@yjgO-PkercvFXG4Isd1EuoSC!cwUu-rB6gh z7l##U62%qy8QSHmH!&u)6fiU0yu|K@3Dt9xHRKPp!ua623Zytl(W}`W?g&qp**UBF zRVg2r_No&;23uzgVr8ZnTG3aqaP{`+>NYJcc|#exP0byM)b>i&qwmUzsKLE@6LGuN z9n@T$k5T^Wm>uug#_Ruz6|C<0>^uSdWl3jpK}tn<^x85jbU8v=%G3|yUjbEIuVal6 zDmk7G*V_w=T;Dameg~MpHy8%S>;hF`xhIzcyOuU|e?l1+xFhb(t3G@%ct0`w>&%|} zWQM@Jjg%#+hi+W@LaOV>(mm2C#fdN*dHgA95n75(?L4ETJpwbLxEdSiZPOmB)Li$l z4GNz|(i})oA0yJPCeW4h+7fLxx$1S+C6At5+-6|Zea?O@IQd_ZG*HRBYBDQ@@pW?5 zPx<*Ag}Ha7(Ze2yce_AX+x57arx#?UKdEJh0&#>t*Y#@yle*^LeEF1<(+=`>JCyc7 z_}vU%)w!0 z+MQl>30Yb(SfQw|tlxMaQ5e(Y&vKfB2DSg3LY`xAC&|jx*wG`KG$Ig9%Z8I zx1hOPvuQ}Ktql=~XuBNu?t>sO*v3C6eyT977=25p(74-{)(2BhPb+<3iDsmUrf7$&-9P8?{z{e1gZTn7-zDSy%neT0*HWp5~ zkB;S@ovq5UMLZ+*p%j5-+He3@xkI71-PiP;P5`TL=IHF~-MZz?d((WK*i@BF z^Y6!f>^FZ*?qKXzqh)f!a50f}j^*8Gm|f<{a{#u-j7cm){z3x1<6;$4-zRU!CagW% zj^-?LvYI4&K`dt?h+Kpv=2j}lhxHLxC>gkJq7Sjo(=n&XJ7b03&~0BC#sG3gb>Urz zN-08Hma6slap|GB43o34Zf&bQ3T_((tF5~q)5*HydLJG7L*I%;3k=Udy+5n}qez}< z3cl`sb4igltQYLMCrXVl8&o!Rf>;y*L9QrHoid(%=;ADF9P%se*wpj^N6|Q?!Rwr# zCxxE6k05T{SdAnuq8SGTT)yf*{GK)h{uq;aB1KA`_~p~u319amr|HK?%Vu^I&Cg!T zqyZd-97q?H6?`S0y)0QRjaVj}wl@6-%pf=bN}C6iO#dzyxDvv@lOQ>goU?#aI!rJAoB zP!lM&HkUO<5@{Z~YwbpPvpF#34R|`}8Y9&cVJ}GN15vLV1O1pWINaX#m;>sm7MrR3 zC?YLXa>Np2<3hdt!GLyT&D1DSQ=kylGC@h0C4bA2Z)}%G&w~p~RAC^n%Ly4M@|wXA z`s<}OTOC)a>G~eO5AX)d$;EPPd>FiTcg(L`ZXowJcDCl4Zq>6lk}Wqi@6f*5#sW@- z&8P^J6Zx}r)b#;SS0>Q1`_H3Z^2m-r3k`2qpjxr3;?F_Q)yJ4-I#X3{Y!;^Nnc{lE}5*ezhLClx1Ylt%&B9R z+*_VS2Uo5GBV>GdJQ$ibZ0j8~DpU{ACh{t5JJGT$4ZExj z-~{(($JBG?zAYqmXsPC!ujktKCQR@%S-W!qSTg%-HU#(GHl=23%`M;Ke6%BUx!OBy zT9f0awHTyRs#(UMPe`ATKsgbfw93h6?HRkyZ3TMSb}?P3z86XiG%Y&#EA45jX{zSLuG>?HK1wb@$)^& z%JhapX`IF}9voxy-mESBvIqyUN)frVt-fACE9HzO5E`cx&m|lc3e&a6A3E>Gtgb5O z59F~!wd$S6s6F#RZ$ynaO&qe}8Sf^-=(4DwOCO9m44wCHFg0)I8iIJD)ZF!6_A$68 z^aqPGh?qf8eEm#K{%qjSb;-XnZ@c9yuF;lM<5;O=pjVAwQnXH5&^hkzUiSV6XjA;n zKx=y{cpc0ZZF9eme)0Kyk}T!ATE7qWvk}u^L?~wK?59y`@1CZhVg4@Ir|;hT0`=yV zBtKxXuJ)Ft)MD5~Ts67{$PG!GM8b825VVost+8Clpr5A_UEm3e$6<03Wx5O+rm@w8 z1#>3Z|HTcI278>yXioFG*$SUBwUFrAH#LLab+-R~Sk`H3X@EO$Xa~8oikmrA+Ozf7 zWkHe+4_0R{=vC;IyHsFOKh^RH?{9fHZEP|*{2G)vrUf)}T#C?uN~JgE`Xteo%sY(; zrlRB~so>xL0LLfBCUdJ6Ha5iqL);?_gRAH^G-4G&l>4U!4bRRS$#hdx9OI?>? z8>Xz>hv0FH%P<_D?oge<{^SPITS6gsLYBG_n#9qn(P8)})NehFEiBItp7Zy2i?kT} zMs?M3cYQqEdCn=9^Zxj8xC)*^Xv;^+BYZ*F4lnX>GMD=BZV;%Rce`d1tm`kSs~S z>{J*)Mum;^^vpkl6Q;HKKGcGlR{BRdl)Bvdg&E0bfDzL>;kUk3$dJlxHpgc zQSfH-1?f||J_I8Fs5-AFi4hb0a`>on5dkzS+6t2J5s9?HtySa02N9 z{nayX02ei@tXsyLo#qtwhqu{rtL}F_USZ_?6I^^O;b!J-FGrv)`_RAYMyakm@|S#T zy^nFl=gUMFlSQZW%yj0-3kYT%a(ZY;%t&TUMVB#p`PzL+Ju&hq^)O8(hI;Nmc!5kD z567s?+-P)yQO&E=01U(ArswP42v!N!mJsC?mwV^+GmkGifLM7XVKib**9((eM5F}; z`X3+1e+>&dgn$fc#sXJZfvUWjOzz_7IzZPi#|56YZwqHq5Lq;Dfg zA9?ua5?Q`u=D?Ln9s1Js<)@cS?7c;0#^D&?!#>gAl*nwcYN-A?{j~- z|G|0o+0Qw9ulHK7wG0XBI<4jq8f8!#<-U38Qkqoavb4HjT~Pjsr95}0l!MXw9!pl) z5(hNDMz{b}jgU>_1}Er?auQ|+UHy}m=F^5E`m1KL9wKPzheDhgWURHD5wA^)Q&jj4(1DHkDoTy4MP^QQfx{KVA+-w^ZK%NTzbw~yCA~#upzsc%Z?b!=cV&~2= znv*@eQCaTMfp_!?b8DxkXsF>vr*2n_`^Ozy@(4}QhPuKN-BV(!11s17^fnFi_jlNn z8tYgJ!={G$W?-PirHD*~rIURHcm(#9V#=q1Qu>>3Z=_=NfUBDKw)qWbxLoCanEn{1 zqzad9U{Oifwi%A=lG`o82^?dV7LGahCABmvNEN%hywBN3ZYMKRVJ+LnJ&34BlPb%e zv@-CHOq}DU8T;D9_KW|^RdJ&H7?Regr718}gi$iBlBfIDm7 z%+kyWYa;Iooe@Jf=_I*pemZ}82h>s*XUJ^Zt-0Og zf}$Qa9b65Wr&CuIZYE(a565sG(=D`vXbx<-vY$g|{UiO`!kB{|<5=QK(ZxS6qAgEj zarqH1u`euIp0GFKzzv!^US?q{eoZDYt7W%3XMkd*zgP3;75yzQk1`$|aKx~%ThjrN z%>|Va9Up+lT_+tqh$v)jCrWT|xO#G1A!VuVL=)(tmd8~RsnyD5KHB*05l4SDxRK1{v`vFCE+*FVUkAwEp*q>8%8yWfR-{!@e%NpRnz z;2^|S^d;0Sth^cp?SE}jGMtC2Fu!!n9oF5iSG6%uH>UsH3OR7cj8k-@85QLI1+%cw z-fMmJ*-?RCb(52v;2IrSd2<*yfXC(F-j7f_3X-&zsa5&Ys7e(kb!aKH;CFZ3OL*E1 z;%0*yX=Idy_7}aRIL|vgETc8&P!yzC7ZNwBo<{`d%kJ6;V2+xKS7!c{yqQJKOvwd; zjwUkiS9sJv60No;8GaGXn*7mn%94*`7^owD>JmsJRL~QRLCOv&=5jZ*&rS<5x91go z8Ed^wh^H`$|56TNO>3relN*vLP*UP!S>c~4O;o#(TVTBtZr3 zz3vL6-_cwA%Nn13p85P16>YNCjkyH-rycP2++=6pcF~ZrU%$k0M~+U)v`HSabu-+` zV`xxjxe(4@-*laYRP~Q%dgau)D_t;@w`PCDxeDoc9Qvy8^=alVIrpAj|MWIEJkvBK z;UWqGbL%-nLB#ZKBU!jGPp|Me*O*=f+%J0x{iMdIpwUw9b`wsA$jZ>4`N_r^Nb-Nf z?j8*wI*cw#e)~{3{;^r2XN5QW?+Em~bPD#qIp76!*YW=H z`QouY$7DJN1t#7La4c9P(t-I5*%c(=>{uUqO{BDA;JKi_jM5k%dzPPVei#3K-Vf|! zU{;KGIDMi5V=hapZCGFY1#Pe9=*v@gb36tjxA_U6INw`Yi=eA8P| zh@?ZjOjXS1j^X*M{z4>K=Mr=b^jQfBMs$2?4^O zjQxI3ykT6;Q+tsaUl=zi8?Fz_YZ=0%E&he@A55XPfOoh35awAH@oC`9*hVd*iZ;7Z zQYmfTF1j1numVX@qZqo}{#r}t=3;w$BDPc2xkBZlat=(V9q~t6z=**RE#wIEy64ed^ z@IK-j8CrLUK9pW?OTKd}2=VT=`dkod@JWn_$hZL#UD0vAasKp2f^OHoc%>)2c}p@cT|^f~+PFX;F4 z-i6Hl|J|lJB~E{L#Ib>&fnI2W6P{opOIDR-E2f)#(qxkvZ6TT$*6F+RxhA z=-Qx!U(i9@6s|C@6>z787jl>1{rmVVh;7uKQ^ci~ckL$d|Jwt7T|VR}=RXaJ@9^l1 zHn|DWe}E3>cbrupY~{NdHhyfeR!s7>hHKeAN)wXC)JJoD|J2NUbR&Y)B67!S7m6Ga z7DTgiZ@l>??t*iLQ5bpp|Gops#?fz43q=2JJD1ufJbGp}$j(#q;5=;%pUQ50M^;>U zL3Yo_zUy;43F)FKswVF&wzY&NlkZo!*vR z-&OtbYyl;aV+E;6{TnpJv^D6TbHxKM+kJj+7!4J zA8zm1U#t0MiK!*k3^(ieEJ{)U&6-*}Ff{ll_b!0fqtl-jBS+tj}oc)rrIGDGIk zSkXBMQN<2GG=32QX4jE7iadr=9#zi`jMPiN@N98(<~gQLqO?v6g&H$OQnVA!Mg~jf zS~ri1+qB~{z_#^>;F~`qbU7xKb9nD%FJIj=_FB2h`7=9x@M^BUo1;xj)5_{z+~4g> zDl~C$ikjeR`_%l6_Ac?)^^~nC?`&mUOJavKXlikAp%wB*+a35wx=kO$t;W#c30wI|@(kZ{}*V4`qry#S$ z+VEx%2t7uzoL)QTXdAs&z#FV9hR}QO9|iHNi;3f9en|h?FTWz(ott_tQ%FbgN=}J( zH=&asTmkHmK!0AJJskqCYe_k|j`9dC!=;{%e6Cvoo7Qa<-H6xSP>ylH1CQ=-ub;7!YJ(qT3Dc!OJ&{xCHLB}#9dYyt9UDs`vQIA*cG3ZL~$zavq&ioFEbt_rH z&CY3wzDzfC2TkiK57F54=N;jk{w(lkw8{I`;L3w#3~_j>c#>3=iFX6Kjti=Z+e?80 z=gY66mQFO@V#xrXS;`0p&QH_Sj^wJ_VIWGH!tB|Ncr5`c^g;9f985Tk`IF@PV6iH7 zPg1?_5x8jamDF7i^lnRl7xt>FiudC#!!`^)B8iI+tBZ$q^;BOVTngR^8K`TmgaxFq za1viAG=_|Mf<9+tu72kJ9%6I$bGk$xfDXS~`DHNmo@n=f8f=NZuw(Xx+m1M|JGaeY z@uRzV?C$81vGQd0>a%B%(8(-zoJbPedsPAu+Il`_SGPn!z`EhCwO2b6hJV!ChAD=E zb^t#+cPMpZS*Gvoo6@sEM7|OJYX#<4nu|cI^7^aGK3wufu+?_prjTe$=FjaspXK}b z`^~cfvPySfEkhXu#>~UX@<*so?+alrivl$b_{(erPe|b!Mqbn;EzQSX{M1T-{Za*X zX-n6szt?Mi^szZ5OC%E{1hRB*9X#Xd#pB$|&oXi2V(k{3*^48oe(v>xU&Nx*2fcw_ zMPtLRDWlmg*7N4oqTeI`=YtJa`BFAO&HT@K%7&gjWVc{~28X#t4&*Js^dstu3;1ru zAY*sF?PZOEtGAxmgg{N~GJZy?j7y~Y&d%O+GpT}nO?H|Fc{7?Zppf8lXHDyKj37_i zd~Acdu6iJ|pJEC~m7IgZYKPT{sCbEjU%5Zp#@3B_IbY(Q)+H%UWVkp@%Jxqq+DFH_ zv2AzBe%I=1#iT5kbA&_y1YndWhHfN)k$&z??v|+RsUPyPw-Y-0 zD-kB#F_$7kf4`AROJHy4T=+}*dU3D(-)&YB?s}yEQ7+YOq+CtX^}a_f;Dw+a z)`ktuEfI;!dx=@w!0Nu|R zu9lP%@k?^?5%QfqpI}xYv2UW7xN}~}IY<4-eO|I21~uk z?t@+X_%S|xOV&vAK0NJvp{F-m?+Nrsx0ux=KAmgdVJpdkM0Z_d&tk+SeP(JbQet|) zbch6SV_dN|-Gzm>c0<+(<=JslOD7o9;fu*DXWnJ9WhzAZ+l>qKz{`Pgk6i_=?0j69 z$$P(>UcB>C3_Jd{e@j+tD1&=%c^v=qbhe1$i_iCmVa=W;h`S~si?Q00YZeqygbU$C z?$wk~e9q3dIxa(d0>@R*D0o{yzDpcImbpXF)^L3_AciVgCb^0#A1&^_OZcn(oQZ`h zPA6_q4|PdnmM_mn(yZ%F=;N4nBmz63f08UE%&*@d0lJ;pGcqo@Yh#r?I(<4&{;N+# zo@T8RUOQbajQD!RNs@wQvAP34&MnN64W2wqNyYj-z3Q^GB>0aXPT;S{GHh%rxx8kJ z4p;Xma_n^-p-ebD=weK;S>)LO0#8+MTb%0ee0vWhyl!ZTG$p=Ut%);b*efhIIkZP7 zx(t9Z8&k}n(wW*4`Q(d|+R6YIR0qkN8$zSQUIgB^g=?Dcg(bc=;*whCT#bX@7(^nl zGgVpQG#eU*s|VNK$StD`@0v~7NGa7Z;?9>-2qBL9!z7-xfxRWg9$g8m3g+xV``nkz zsb6FlKP|x);;lcwUeDNc_x6)u&kltJ`Z7uZNex&=Xa=dIeja>V$P1RV@@K{`PLm#6 z++TbAMWeu#oj-Hzu8UB;Oa9Z1C&WL((Euh%la6}&<`Oal;Ca0|ke$X!Vl#za&rtRh zu$klZx-7$q1So=^)orzmGP2&&y%!u{(2S`S#zZ$uafIb_*JJ+=oqHZ<~N`d#r=l|O?b6Z@?>IN`!m}+A7rdCsl&!MSg7rHJm?_wl8#d-qw(ZuAo?HGa;TULK>lFLSVLYJ}ZHLxWf=CNBYXvO5 z%=XL_o0H8ENUd#qvloQwGq7~X{pH1mWPYLj`JphmCI;IX_myAFYg^9L_{#1>W#ydx zR3)}Ssm?iJ2e>O}3Cs+hHN%x{+&uCR6bYv0&!)KkIU+Mux59@h1 zl*e8$%XrWaM&8U?5}-Q(Z5{6sa2Wq>j_X?=*FZL=T$Tp?z{7$CTBiB?X3f+=WXyg* z15N6zg^uB`!lYKMak{=N z0JJ8HL?n(|+i{3ze_h^DyKr!hXJPS`E&8&qA$6dd(7&Kb*6HC)HiEjM^OWd2!o4y8 zN$PfrVIxSA1av`g#Eq(Z{b-~9nbWS+09RM>hd|qTG1eLp7v4?FaUe5j@Sjx#OSOI; z7S?kR&Pi?8k6xBa=F}b85gNSr-WbrwUt0iiJ^Xr6nPTy2FIL@5@hFRL9y6Ou7HCd? ztYdB7`je>RXw(*wRONqL(t(=U&6$?it#l;9iUGvj^Lskc&Nz(K(TSWysVW?F{{fgf zP$!PV;A~(H9S#k9!VrwIf%WThe6fVd@tUS7XT~c&4L&^GItrg|=pC;oRxjh^OAl$y zGJ6ECP=ehF0G*F5fH?RD!te!eUdWS6;)`-4!~28|7Lqv&FSkC8LpY8)YGR{wEpSLucn6pr1ZtT|JPr$v!o z;K6=0%GcOhIx82UwTr1`Tw`igimA$g)LwpCe}ZS5kjxi08FF%81a`a|m_H(TTZ@vD z&{;KUUP3DD2@~EzOcqXx+Ro`?X!haUS3dZNnEqN|f}Q(ET9N~TVd;zS-Iy@sV)jL5`0=R;nW^<7q6Am#i$9^=XxH+mC~V>b9biIHD}Y_N#vacSF- z7A{Nv95u*x&SZaewU7Z-Ry3iDFlWYc1j(4hJA*Z7AFSTT&lz6Wl5A&uLLug(aFFv` zA<U4KEkq%>|%tGFgR>wFr?lP6JyHn$i>B&*8GXL)|LDn4{s~x1a@7*4A7- z=Pp9CqcEFvquFK#<@qd-V80(oAI9eUV@G2v($S{Sy?}Pi+K*F1qQEr9)@oi5ywYG; zA?KC7+d;F41QlOV;WySB#(myoF4HAk>HZiVR{d$wRMU|PhG|U@-m$Q7$mz;m(2iVj$BG#K)Y%6IM0l+{_Cxz97X`k4SqBoQ&A z!@I-QAPPtzE-Au4Gr`ca&e|pQeRRjbH0#&o860TAB)AV$UZ_lRh|eNTAt#k%1q391 zBuZkncLc88DiiYTnQ6vDWhudiI;tzZViifA`?WgcCMH%K#;J`G&Zo>;=siM5(HD`#Db&QeM zKOCgO;jG;4`uKD_=ZZ@O(@FW=x~48I7CsG<{-hOvsp#lAD`*;77qAf#x|tv-q1v-y z6+jf#&c@A3Hd+dwfIS^^-}^jz*csyg;c<4$j)y`3#QN>Jlg?GVb*qOTZWm z{4!J-+imxlUD8>p7YR1@G;1Q_fxla6aH}4TL;E?F|FbjxK(;03(H)(`#Jc=^2xG!I zVubzz`fz{QtFheSz$3x;7y`x1p&|3ZF#n_0(EVT&*S7jr*&|5YX7#yz_M#Ubn_sa> znLNX6FxouVXNmM{wr7c~()SveOU>{tDrOVKKAAebwKIrrdqJ4HwA8vFEHo_B$vY986~@avNvsHqA|nOIq3D#aZkWP76W?h*w&L{kzr6f|2HNZEFPxWQ6e z%OCG8#b13WxtRG{o{he^LgaQBHppxeTsszQ_QPJ3nsA;`a?>`U;wA8?bu9Nj;=pCr z3(h6P1I!dbh@vvwmUj!l+x=Ky~~WU4Y3Y`GJ4m(nUNrzWL{G_V>kr<$AwM71w` z&bv)JyVS@)=XK6f_E`s@N1;c{M^vVB9XO=DnNR0b#F|97!ChO>wnWPGR)tiG$wDHE z@*E8SCJL~?z;pyzpY0(wCN4YiP!a6_^d*b}9$g(S%yiXlj1JJN)bODH0733(x-4Sy zRqr_HmMO+6Ao(8`mQdaEg(l;vUq36`#14xDKs4xE9)HnE4cnN2(p$tb%twWI*3n!J za$!s8Ik_R=6r8zyfo1$J)&c+A(F_#*u{@);cNBd^B^H+UkOWf2@pK1c*$9QjJ&ng8 z_n!vm8C6V)1wS33*YzJ&FTncpx; zzJ|`uOk~n+n{xH~@#8v%KiTM65f6(TGq#Pj+u7VFdL345gotsc`Pr%_%9-w=+x%nI z1YMbDCw)b0xZ$$LO(#%EDp|_uS7^(yX_B=@9&C!f`LdT%m*n%eFT+Z?!v6s!xfs{G63-tbCPzq(U%njQ%9un!xX2DBSi}Km9>~_8N;>t^ z;#v7J#XnxuGSHc`PH@>q3`Ve|K%vC4UDq8?$AQ#E{(2FAXgRZYnrx}n8s40k9G)J+ z$#SbQt7IRW6UE=-vYM(Mgj}VbltT%Plk-#QvMicR7XI}?_?7}_{rbYzjk*dEp`#Vf-DB@-!xRp* z!NLB&Jkr|OSN@LsX4|p-9Vv<6d78tMw&8V5hvdvb!-?@k{p|(cmbUKy0MX8x&EyBW zCMIAfwQpf=-4TBC>(|#}wtO6~@Eo)bkdvf$7C`=d1n>zZB|{c<1^rzDiM+<<2A*j?L>)rVfz<=FLMGlM_RK35w8 zl5`pf`{#KNgIRRnTkmrp92*H1)4Gmn8LW z2HG%&4aaW0?VA7)VJrK>I+}{`Tj4z?bGs729$G(ex2?2RCjg&N(XsKne@D{iF)Kfc zHSIhieL|`#L>@wunw)|+-5I*D0KINUhnnU%`hOk8%#>rwkNjo8U~7*2^W*LqPLfcg z*(lj=A25{bCbGVAa%^?lXZYgM--%>0#lpj;h?+?IM%v>yo)VMMeW!_kgcf}xcj&mQ za;xOcO)rC3!B<2YnyoLUn-9A_j~r{ko?9kh`a?oyQl4NC4YRe{EiLRga=S>s%e^v`Fv-iKrA_W*50o|dMIvi!>JROY zHSzznKrBhN6zE=4SaV;qdv-N(aKw^M1i)rybnvxN4q3wR76wQ%Ussvwf6f?07)*24W3ygYk;6$<||A*FSG< zXm8|s^vbY9+88~*s#uY7d5%WT@Eqw6mFW)w&zP$Tq^9eSabZW_qK z7JmkbvcnO$b^eJD8`B<)WPO_X6RlTzf3t)H@LG`;ch$KIPcfieD^XOem5yY50APkP z9J&tt4Hqy8Lo-u0ICi}SZYQU*`Y8m4jd#Z1tZCAL_3^7+Y%Hrq8BsQN=P{vFrr{VQ z5c9};she4WA9|m{TiNndk4`+3y)8S*LMoN(L^*2hjE`W{H-`J!j8fcFih$!_prH>s{pMwhrDAUn(p^l--OAXA-In9TOQ^k}$va+y1*~H)S|69HLX80YSaORtXl0|&oF6WhqlR~VVGrrkq*4Cn zdHF(Vxa{|28pSatG0K#^NQGjJHfp^>-i^xY778#61*CRYUFbvsKbHVGX^OM9weXZ^OLE^V0SL!X z2f?B*=gVDIB}Z|<8wkL{W!0S=d)?5| zBjsM829?(I&*ApR{PlA&c^2Pb2wel;YQ%Y+qU9P3dHiewz$G)PwH-&04yiOSp_g_M zuI}Lt2%>BXD`6*|9laE5A+CGQlK37&x!!&lV|hIA^Ii#)d{-B%+68?|;sFLdDiD*w zP0YVH-0s*~x|3w!#y2b7dUfCv{c=5@jpIAvN7?} zFYhP$I0%1^nqgj{Oh99SuhVxwEGi{wo})9R@hW&CZ)O3anm-w=#Z2qAaVD&6YV%uf z~m(I%8Kk5wFXPx^nWTl@$=JlQu4={;@Y|! zBQw1xH8&Cr&+TY4MuYzWU`$?|@uLGLd5|U-*INrN_352_+ZTUadABi-bjB`ve>bJ4 zyiPFxqu{UJQcKi#*6;Zi!D*fH)qYrQKgjp86@Z_C1CT0L%v50E5UuZ7X0MH~(TE!MtW7b5^|QRFdj=ryLBu2{}6~YH6XAc(6pT?pGm?dcP`eLj$Z!3v&4z5kjB)i;OyeO-j11{eb5$)rfX48~2R;0{SBX_|1(@S~X{vTksw`@aTh)a2TAxGS= zhxa6Ds3vNKyB4tSPioHfMv4O1%ow1>TX}gT*FQL{i7*Ctbi}a8a+gN+fXZuMk6Exu z=&8bEc0I=RarMf!4`F$i^*x%#1qk2D`}R)`AAY9ucK!`{vPe8dpF8iJI(M!icg!J4 zit0b{Dz4v0wGMszK0JJmcEaN|C3+qo;AXRYJusi^Q1&8~PmQ&M$i~H0rD820FaC0X ziNwH&G@(qw_X69qH#D&I5)^V9bP907v<4dtN2tOTN408Q%DM=PHXWxoJtP6+@#6sc zFzJ5*`2+P6?=aDdRI>T0bF{w!n|$geu0g<31&ow2K~DG|9E4P#yqxiDUC0;!#Fddj z%pukCx&X4I{mu7@JGKIGHfe|R3+F>9%U>a;YXDo#c`L1nl^P^^+=xky@et!KZ+IRE zneTTDs;d{%Tjx`^o}}jWN6YgvW#DrrY{R~|vn0%CFuBkn`}|W;HnL-Gp{Vx{oH8YE8d7u|UVpf#qbu-uc(>i(!GNtq9I zh!;wxrc|njIRUOaL=WSu>$OPWxT@a2OtW_c-aBLWhOYq>XH;|~6eDf73!6f=ZE5L! z8bB^XrS+de_gBTYNnZyjQj@21B3aQyMOuC!Y2xZ5qLfO(;V18DN0sjhuDXU?ORO1^ zsJF^1cB#JT8fI8Jj|AjB_??`<1?kQHs1i=lQTu=VDkiadvz*kYO9VA$VA-K&O{J%+ znT7&wX<;2qj3I%d8fpd^U!oi`Rabkf{nBvY@JrK0x-YJX$q^k2WKYMWcXJMeNiU%p})nt$G-*M7-~ zc(1R(DoPmDOAPGz>(}t;Ry;tO`9U|Db5mhYbJ1_=nrZz7@-$m~wBCz)n%57NI3U(v z;TwEspIZke<5JNZCtyE^OMSzG#^)Uc;CpkhUa<}z=gKjQPgDbBG?o;s%Io?Fws$Lh z*=$q9QeL;edNwFuo;~k<@v~vLh1^F-^ZcdM=Okk!J0y{yd1~Loh2P-CV8#AhZ+NU~ zig}+-LAgZ~t-$Qec8-EXO{r#bfbfy*kvFJ8%pf<35pm*bk{D}=L!J1^uNRoe7&V%< zoeWY3cM9-Hw@pD|v*$u+RF$^)k5he8q--Zaf*{@^QChb^eSJ{0cudXDJK=h?>((23 z8L^_om68Hh4T|Bo?bmak%C>Ygd}MCXc5#`*z$Y1#8Bjd0z-!w55RX|v{!YMe=&6{RISPd|@OyK2PdnCa%wnY;E4)*6T{M5| zmB^|}1Bfm<5ynY!6LoaYwwEed!;S;u*2;~426p5R~ z7;}Y7sf-1(v_{?B~ot{)8F2Zz((l1$oKQe$3+(W$j zYj3n#*63!TjU5WvcD_NO?M&f!>(+X#k#XOIzqhXN!eDcLy_E058b-*tsFk*5y+u%` z*#rX#;r#hgA6A~$gAi$cQF|-35F% z&O7^=rFh4`TMZ|0&c-!C^}DTS%@T7|ZTDg5m<>oRLs{or=1LEe(p&DuPDe)r%iwyN zhchp$Xo{3T1Cc}fOutJYuvB~a;NPm9)`;sjfXs(3QEq!v`ei0g95<0KNR09XED(!0 z$f{*1WQhLCD~}_kSr$cJN82*!1~fKCa_3OTwG7WrxtxUUWEP4zZ+AjuqMldkIpYX* zp;bHCpIy77-ob!7lE`_z)ec=QnpT>!0LH``u8QPY^^#A~8# zC$$r2FJVD|H9;loSD078;l<6s7voW|U{?3Td&!&bFG~P-o?jN+eNQ*u!sQP>%j$9hVa<2*r1U9~|T{BRcIVFbtT zrLPvUyl?{TMD;PUCeERTt|ONQDy|7~AAv8*|2l?Afz~dq4~Mf$xT?G2D}8$)J*u@i z6t7DxrzA%pFDw%W>+>e+%>)7*@4P}0#tsa&o;R}rBul~VUHi=N0h2~kK>HDHyddY@j=Ak<2O4)nzY-c zm!{B$cqVTQVz8=>2duDg{OX6XFzLAQtI^haxvBjN6fx*+d3r|_*GS&OT-^1r5L%xu zZzZb!rA5$0%1K{@DGK{;ApY>gnFNwK0*X?^<|+n2U>1*znbfgq!xsizkstx*1Q)M> zg4X^0*cuj-H897R%0<2NKuVs4Sj%;HH_H5Dcrl$(V(idd_#Wv1s_jPIF-b8eX|$gm zK@Eu-cy89R{{iSnkoPcE1|iQ%Cd_Q&qC2hV^kmpZ$CvkUOmERFxj$t*Y^QtN{m(r1 z!&Cw*h1Kc{kMm>)&7teJlevw744cFR{it(xQOjw7%wBL$)Jv!L(6LdB3-Z)Gz`b*1YHFKbbmls*!Znt9ES_X;fn^nO)T=0DY)I3K^|mb7HN6ND9VqpcrT&F{ zW9v3qmCo7#iRQMA6T~(IB zCQE$S-9u(q{Tei$D|U|57 z9#6bIEv=?j)h=J}YExe`w+`e+4lX`e-hgs)ha8M$``$w=QQgY_SD+J$uF zDM%C3PCfP=g>sWv0)cP<;lq20kNA#ytR7VO&42U(=O$q;E}j3NmcU8%%Q*I(;tWr9 z+W>(2vSZB=4%SM&$)SaKDxE*u?oO*-As@Tk-0ss!=#cTb{>z*e{g-bYKw=c2nWun{ z>tsfKc-3m8%7^2A2D=TvT-xdxP}@qKsj}qlhcAw1;))>u8%D11?iHbK)*4dD##uXN zMb0V---&-X?6Wwh9iZw;Z#buV)A$?_jMoFuJ-FB0OZ@n^H|I6vb8mFePF8CwCqO_U0`Qa#uSn z-czG`;~+ia_W)n*PwP1+dE$#kT5T+A(G-#CSk6?)IC3w<=Mm3$ji4^Yun8qLu-6z^ zK!B3J)oZsUY6*?t>EpfC$6CL6!8eG40F5sP(Q+=nxBC?X=G|qL2+edMF_({P zZQ8v{KsEfdM<&ST4&lOluChg#GRMdfKZe*^^MEVISGq@9@1u zQ>o+3et_HTcRjy}L9US;6 zU?m@S#jy*`E>G410sbt*C!p#YEORvC70b)N2kKa?1F8BD3)wfP zgDbo-rPbic^|u22adKUp(Iy)oPKr zHyUPnbq%h0d%frLekqYG>&l1u)hT7yfLv?TS~CuZ2gN4d3L?_D?zQt)LwJwarmpglL%Q=8oV@qr zjB<3iQ|{6o*wS2-hJM}Vs@~P-`pZn%c;}$Qs4(8^z?Sju?=7*M96@lPF}-mCE3P%| z*xA(T;c>tx&Kjn287&K&Jcy(sR0XawS`H5cR2?baR_lE07Cl@7?3k<(-f{_!yA5~=sB03c9& zpF}RW{Cd={#{va*l2Y(u)BT0ufk7Tdq(T7-!5C5KJga zBx$+dC#qPuwY+C<6DuUFuqgR zISy#e#q++TDv7g;w3c%uSYWt6atE;D;3}t^-wD24MsGMg4CH0X)h!njm%67V%$&Vh z!zahIO-WggCkaB$fO)=IGiQ8GV;e=wan^2a{U4a&$r%@|Ot!`c*vuLqQn(cx*TQOv zwla^z`kyB_leSVN<;iMB_*cw}2#QV!EI438y+UecSEm!RmE}1b5cYm&g2kLqwOxei zB*0R{Q?nvRl|mzr-iIc*ovvyM^TW=t){)#CUYaH9#gt)R!EwQhP-@Y3(dEZ_tRph@ z=-lbF!{_7bpqpFEGt{gWP;>n;^=7E2{waZ5UYuXZjVW;Z&h&`v{*7aQ*qbV5ogS{XLF* zm*&&BP>z))Rya)zS@Nye_sYacod1EXOkRWsE79(#VX_3HLd8e2*;o{A5 z{YLQO2TkB;?NlTO(|HE6rKc9q!X+bLW}Uk%T5qPScu~BJqU`4Q_s~c~E=7P#z z(ZqREo3B4TU@oCRPS!glxPCl8B9k?r3$lTgRVAqmWgHpAcqE^A%0ohrBUzx+F~5Hq z|DxEc>6vu)h#ldfr(*1E9VPNQ{Xv@Vf;&ztkwrmC*+Cx!4rW*Yew^p2n#|QAe(&N^ zi*2E4wBrB0++qi3uWfLFTguaL7)OIMEBDA*-lq#y4Gwzur_qbr*0Tr|rKLel#Q8f@ zk`kTc$HxLH-9;NvGYlzuGVxtQUVbwDb^BigH`swZg~r4&dLt=xh`e~)nRUOlGHT~| z;vE6u*d1U;{ox^;O|>i*BF3ZeQZm(k7TL{>sS!GYpa|SMR^Q*opf$O9bD2OwCRLJ7 zKrkIEfu@z%GHZToW$nS5(7;9Q2Y9c4b6nHPuz>!h%)Y{8D0_`rl#)lHZlkR(5JM6N zot}zPX>IKH&^AAhrl$kz5W_s<)>ussS%@>0d=*>jigW9v?UY6FB~5a`I)F`Zrt>#9 zKZ^=o!!xhv8(xEG!@mB{3E+4f+^S0hFBsXOn={cNFS)%EkLj^3M+gY$DSZ4umA$Sc z$JrlCO$~Mh5ZMGc*5>m8roJ(Fzjbb77jPo0nP{Qxrc*547_JleX0$pzOe@fjRkHSV z8Mr{;GmBnnQ8kXJa73sMvtG^jTP)BfkXV&ghW7ENkQEufu?vP@&7+DCh5!yN7BL5;VKX|8mT<* z(Z$6*BhYRzHa&=`GG$tWK3zHtP1E+!DuDK$_7DyP{kj6>Q$MKcaLcDuZT;%A2zv4v zD77(Q`YI1J>8M8FaXE`e_`;H(>BQ;&Y#Qj`^XDIds8@tB!q)oBf>}|OIdp9H8(2Th^U*gG($--1~Iq7jHRMP*2!`uSq8-zVkjZ|u8_gF#vnrxySo?|9hVE`JMCpmUGT?&iQ=L_gAJ$k<7YTZ*x9$`YP@YH25u;wct1BjW}Z> zj5_yJwZ#pmk`QyDQ(g0&5>r%D$Mi@x@ynqP?h#%Wj{BqE>XC9gm6I%tr_@@fwyOrC z7^O?k@@1vZm}Ms4-IK$TZ`MX^2$q;wIVc3aiWotxmj#m`|6iPMxO{Wx_t={w*A9M+ z%6`7C^tFZEgSbWV(2-CM%OUGAN@fWG0i&@doF{QEzvWpUx7N$}6C%!>CI@Nl;yv&R zXf^t=nYnmtVlgR{bx`#Ieom^hJuZGSjDV|k)K-aE7@cbpKF>W1J`!4iaCfasphsb_O!vi$; zBZs}pmoDiH4KZh0-;azNL^?Bh;`NP($f-!GicMRZ60`kCsx3NR>xU7{T`v-K~{| z6r6Y^=UCU4IkkfHGL)Md3l@Fg_BK}O*_aU4=7{l>yyQI6@Y;b{F88KM?URr9h6w=!}j zU<727jdGV;qq+nYeqZ%l#EX9xDEv#iLpkwjiBeDuUCbw7hOpR^y)@yZ=G)%T=ouVc zk=5mr;u*-OU|UtmJEuLso|XT*IgrV)y1-&sVnqVFD_(acJudqqruW~iz-9ZF z$&l~#)U_1d{{&LsRRB!T19DnD^V zdm2&t9ta9_%)f8&N#L^f4@|6ybGO~Vox{Z?7LrVsL8x4V8Mw;nUtYFrj-EV4Rm^az zcyn*!#ckh_`?w6j9!qSg@5MYN+j>l`^Q{9+<*l&3`c&+$9F>@KCrhc05@&A5`S-ji z8rzBZ&9c5b{F!?Df{A`oQBnD-a~<8ND#`VQT^;9#E*-2&H9iZ+)jJlEgaBL2XFxrr)E6+Ah@DBZd`p& zp~A7yW{AbCuJ$~nn5pA|akUL(ND8~uI0UJanY%;SW+rnvy_oq{DB7c|sFQ z$?NJe42&0^yqw|q=){c$`lDBnYaK6t=(ab!m51* z4zs%eF$$ts!gD{_XWg9XsA8vc|ER9Hp0|bl{UE{+*V{6y808G|wv>o{LdAQH!dsk% z2QDd>5jpc}42^-$?gI0!0Bby%=W*Orxj)XF4#vpyG!4y_n#~Ui69!+77VD?X<9gTy zug41INXHHoT|k+%=AL0$BGbM78Rg&v!?aV`7QGZEb`Zc|IW?k~(Gg+d)yX0yC$aFa z3*slcBSuZTQgn|exccA;n_~3SR+afyYdCse(-wExKI6`X)TD>u>St!-bI}4GBcj%d z`hx(_ZW+ACAa$NpCY6YgMkW85^lU;U069FIwq%iV?}5L5qz*VtU|Ghl)S!fLsxo1@ zk~i2M5{APYh}vS0@%DOzS%2hgPE}o?qxsn~Ome1aM-p;qA!zxk1coM4m_duz3uhlt zL``Al4j;LU>K6bAp`ao`o1bd$sG7uwSmgt+=PC}Frn!saIw$mXCQOm^2$J0a=|vs0r%Lx85LU)IDRl7 z5^mgukv4bDdKx3;LFk?tTIDx*@|EA$PwcHH*w$$U(d-xckeFt_yd4{&P8785%a>WCt`NCef?6%e-ORSR>;p^6X2u0?IgJNX-f(;+qOfs+T zpQ#>07lI=7^f_&E8y{LTmEj_JPs!8VXi$1KotJ{%u;bKf!d;-cHXr&2Lo|f6(d!++ zMp8M3{P5-u$Y;ypSagl#M?U052p8>rI5Ji;0%Ye{pl@IY8htzw zKQw4!EoqJjiIP82xnM-v{jJ7cyUvGRqT#$I*P`8VWQHl&UZt)EV$;`rfYGo$kQPF3 zbn`B${8Jg`%O{6F6U#;f5Ue#N(F%e%NRL*}KO|57Kf-(nj!93&s`4Qs4W_}dJ1yh@ zFck1bIv}UJtezfJJw@A*hAPqP$$Y4t#yb^(FAwJm!wEnbLc+5^K2ewtm44fx@c`Y& zX6B|IP+Ai0i{6afJRuWC1})w60X>SOu&w}Ht}^xX4g3#!{}vsznnM0W<63Wx{QRQN zt=j}0N`SHbom|VNoXM-M0#Qwt=RE>$d_f*W!eBkt*RL-Q&)nvb-|oaGoV~HXWBJ;4 zB%s^gsoH|BwBES=s}Zj`{DuX(h8Tk#4jyY^a{{k%4j9&l% literal 0 HcmV?d00001 diff --git a/projects/DensePose/doc/images/vis_bbox_dp_pts.jpg b/projects/DensePose/doc/images/vis_bbox_dp_pts.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a81dae4a73c88dd20ccda83648ac7048ad36c95 GIT binary patch literal 83397 zcmb4qg;N|)wDm0R9^73PTO`Qh?jZyVZi~AUAh^3rfS@5ja1X(K7Zw76AR*X72o4Dn z2zmUz`raS#?$k_8%}n>b)74ey_Bnn3ef_rykZ7uEr~)7m2vC1KfPZU%5`d2OpZ#}5 ze>4m%jQr2fQ+1sg#7WIqk|!;+dJsp)X>=X(GBYYOr-w- zBB|7Is>*WCF~g@qhIHBW<=F46_*;s(Y+L-e%VB`3^~{8Rf!cVglFiTz?J8u6}L}_^kd%%_wUNWN%}eI;%zYbrVvJr(hCg;kHCkOShj$E#a_nnZkT7gYNLZm2a8@e z#j5;u!^TEwkX~ExlN6t;`reY0)pI^dn>Wg-Oy2<@d+@a7*C4IwJA}&D2CbEdI_>A; zS0N^U#U)hD$Gv|$?D}U3KhIvIV-Eg3wkE_8-Q}1Oe^;~o(HF3|Y%wu-N@Tiq=c2)j z7AtuYVtmOM7ymbRn2CYpVFfpNO^%WG7!pOMYfqAHlGd+=x^EP>0VuFpZEFon1; ztz8vs@Ax#}QntHQgBMP>M?INpmR%DMe+LD@5b)e(`nj5t5A1Xj=S&Fe%F4pPuKz_h>FGTWEITD2P#9IA4uOoQbV8R^xZ2dtN~qa^9-;mRe1sa2dyc44kT0|6Vpc zs2wu?p2d8pv4eNA=A-8MfxIyityoypH1~LmOHEv-!*XH|3uq`z>GVQp=Ptf3>|75C z-oP$6zL*=yI~J1%3IC*TIFao;cWR4r2rOdP5(U27KKDX>3-bB zwz4;s%cnQ_G(Ra!DldWUlKS3j!zzya$>maDFR>)vg}>N=|C=28<%nr$Lt!v`#v~$& ztqpG@m8r2nl*YL*BAN3~BH9e;?>n0>TPt7nGk-T&9&pIvKDGTT2L+<~pSyLwK`3i) zrUu|}O-vFK2#f1MTg5|^9S!nz90+MuC|)-T=4~oLo5VW=aDgbShxjHzdPd3pXn`II z#rYrjbh$MzrBi#|ys&hiJH3==67o<~PHI=s8dyqWnyPY!RI(WKC8_xB$0zA)*+j&G&oG}XKyspj0m~hPYHfIGbXafmQf)&@X-e|8SQW}Jx$zr6+E%B5O=QK z3Q>*Cm{9zgz!*~+gx2h6o1{-FNRDRrUV3{{j+iNr9$uTyT9Rie$(})J5CC?=rRu7I zW<;quv%kogV6m-x=N%exe|#`GI0_sfO@q&rZNb@_3fw;H`lB6b1^+4f zj_-|g(NpM1qVhe>kv7B4?wsu%-yp`F$14$D-1YVl9Mx1zj*^5!v-E<&a(G8rwlFI` zYk|R&@;`KIzlZF$76M4slTPe-$#fSyCP@RJf)}zT&_Zm15e>_++9zlTN92wP+O3I0 zoC51l&TBUB*+Wlsc-V<9&+V<>AT3Yv zBo9NC2GXd}L3}S99h3CzXjH$s_~^%oDQ4#)(cV7*Y%gop3{gi_<)}@W)3qTOD_`rJ zjqAzKM}Zr?@$%SRQ=PK6Owf&wtsD{c3i|4r@$Sa)LOW`P|G4h^U&;}Hs^7T5 zhY!ZO75k2QjhwLZ`ip8`6?+DzOm5F;H?RjmV&xeQaQjQ`??dd$ch>bQ?L#|QN8W#_ zs-0$NJcT2l+-dHiDUtFCID9l&(r|VioZAchert{S4m~d8RCHJfC9TiploIUr{Lj`d zauhPdD%DoL;e;z7{$Yoi{HIWZsV~J!oZI@X8j*4YpjPrrI%Lba+rwjY3g5|UEI&1I zA3;<`OaOR~QL$f4;j3}rbFh0gxaM9q@i!0e-+ZS`P}WW)VbS&uLxh_N%364*txM+q zMlJ2Rf2$AR@e}91apyRhJsx^TNuf-7+o3uOS$M@A-V@&g6u{w5n>?3z}{Ezp0A&l8gzcw9$l)Mz=oyTiixV zHGTU~xqv|fDo!;uA17LFT`u{3fJC>X%EztH-j1bPL0FVUJ=~ykK`y8HOkkWJ^+O7R zgbyvDXSEPES5V+TfZTl`PyF2$cP6yQ7AFOrqnPO}+cqB(H=)R$C_>$%UY;7nPAbH+ zI*z*@Oz$ z1G7^lE^gHS1CptZ*#p2=2&l$Twpd}*PNq`y3!2~%dL@^PtI76&@^|mj0CO`9<*xMD zAX_bs-VbwUWA8sSu5`J_O_0{5O>++RNc^j03}V95UVMu z^OQ^j6-k|wv8BM}-gUc)&-#~^XO&LFZAXEqD`*2xJq!Ak^$^ zDi#Rxs5qOjp6^MmNqJAH9I><{^tk*>yY>$1jH1)R()F*s* zy7U)zVlIA`bXq!#7UYzEp&Xi%ym!4sro4>AbS=Ft4E#wxmiHqpFjSt;} zk)XPWa;S1&^)rDD%)l|}UZeVW4=g*sx8jSB40ai{3sX%5Hx!Atuzfra^F(yoofNTC z$eiYwmjF5Eg2-%PMw{H6_|%+>69N|)BYl6Nt+LS^K{EB~q5oQbdYn+Y+gu`*7r#9A zxBlFPGN|kBtXW&?JKbqq{DVv8SH(WxGY=!U?({h<;YXR{4C^s=T@%Pq3wT}%xW2Zj zS0w^#yzd{djFzFs=5S}h!mc|nXlFsZt5!QStS*8~2P)$9sQfl+2k-o%mT*V3bUE zyYA|+XN`o6xV8S1N$H57h!Vg;FUlFgxFxmCR=#g+^+ITPA01u??EuG2yQw~anmNvS5xyH6q zmFX3hBr$l~hnG~B;ZI`k2s&*kqgX#ft@(Z|eSF3u$M91wb2u!=HYiI?pS1QvB1oRj z3J$o8WXDNZYIpfPIrcaF@$@y32{K=-0FE+$oG~09l=CUz%2|9~o^OZnReN|gsAyrF zG=2l&B5n9gMB&|4ZDVX;Ugaz;#YoEX`w4-8Mu76UauRM8=d-zhOe)5UPlWuJi+l@M zu|B~cl!W4)PJ6$2!t+blf_;bADe3EBWZ(^3_0OgZoEUT%`0L+{S}sTC$HaA_VR#2Q z*rMDWC!%HXf?xn+3g%Fv?(s*pRlaf~r>FOl*_3KaDP&kOWlc+ZMo0q$zp`HX119Xp zro%5OX(NW7Q!8gSIOLoJK;kPb%?2!#@x6d?!usT+3LGSo7S$hw8k;GKeia~>O-*pKe`L8eqoU}eiR1%XDi9-zhA zzqy*_(#cp>Lti2i=dr+%>Cf(jL$B^1ya&V_;3v;dJBr)$j2=c#yGJo4-6>{LsS$J}p0=TXw!}*{9P;_|)!;J}di0II ztMlsaJF70)+<-ncaRf+P*9wQ=!{mWqu*@^RQgV`qGW-G_xa*ykYR(A3CDO`M~4xZ)7 zn|beKXEMlAh&3BGAQak+^-Fdh)m<3J)HakONEyv1$x>EZZsvY=KEyUX!dU1(MVnoV ziI!O3L=xt`Yg>fDQA#qUy7<)ZucZu;aSHssq8{&fXC_J5;*Y26K7({LZ}RaA^{(P< zE`I*8C{tAbNgUyW`|IPxYwJmS`^`{;Snn5Hnt$^YvmrOs)i|qH>#sJt&EhmAQY!h0 zO!_|5W}cDszj;&R-W2iE6BUK-!6ph^I2~!1`Lu35&y)ZLOr#ScHa}cN#NFJ zcl53cuBzp;6fC%Z81K^pV@tg_Kc``ha<57wsGqLsJqb=H?Pn9CMNMY&Wv5}4bow)V zRuiJ|yxsJf7-wI+a5Um|UwTdv8VA6%V?>mK4_XUrRS_Q;AdGq38l;!v@ie#n$HO7kF1%qQXv<3#eA7`>vsNa!VY3`Q#Hgnq~)?nkl=s~apou~ zJJTKD@P6BT{QLSlOkVMWq%GK+YH?x?nl<$G z_#mZWS|j&8`AP2GpPa8}Yoj)8M^xw4M@V!pTQ?Gj{a4_cPZJ z{)2dbYbbpfD4H=yFbVsbht-RJz$-Ej2WrUE`L-fxOzpg{ zGwPb}*~6_LZp;U+hTddCUTNNi6u_VcGJL{IX!eyrnD_mGxWWj~%Q>H5@+hMoXZ zRp|~YjDnVYaITjPX9Ed!IliA?90=CXdxa#>$WvHp6AV@{aO+ygL&F~@Q2C}w%iRBa z%LKTW6KaOpErd7P!YtrNNWAJ0WS!|jlZk2jfNg#AW$4p90*Q3jbpsl8`|dZKH)cWAjXd{ zql(STg?|8%bXVrK$t!13v{xx>!OL(!x`1?wcP~XIMI@5hY593=FV`N?`t`^K^~LkX zz&O@EcB{bQPWIs9aW_G_3WY)q(XsDFi#qSWzvlZ6N(hj;KEqYvrsc z{^m3IB)3FI$`o2~dAn_&Sq&eqQx94y=-hlK#zgq=WGjJSp3to`K9J79)5MoU)=UZI z>!j@Ko8d7fpWFlr{6PdfH_m#xIA8E(vF0tmI`n$%Gj1gx+t_+WN9U4?bzwm<_yYCL z2n%B8MfZx~b$;d;>E~4)Wj@v4wi9}@XV{7vJp$ag0`W3j?_|53%QG)7(w2LuF#$@f zUrxXF|7_9(ZIi_zVpB~RFMayo^mh$yL9!s3`9(9BbjM47F*h+cA$yu$-)KKf#hRSZ z|H6I{Fx)ZmIt(n-A|$R1!Z_2C|$(tcmHd!$>ePb*_$aJF@WB5 z1G!v7$Y_cC48n^xU6sRBwwwm&(yVpa;R9ck4D>k%8X2aB*5p|T420Nyiode^Fp=7A zMeMgKu%+2EGtiDiBTOknukM@2%>+3uS})Q3t^}luvmJVZPb2Uk=ff>+e7QDF&q6qd z=<9mvem`=-ANx2NjGKK9ZyNNew$z%^&_!SzJ_hDJ!Px!c%q2rl4}P1#XsV|bR=~8G z6~e_&YlJp`()oThepd<@i)9k9WSW+=)@5EH0=_V(zh2w7d^hxTKVwFmc|&_Hb;W^5Pk zAd!)z-PWd*mnUMDaZGbG`AgI@G8PI^IWA+eKFlWmQTrv^eX#C z$WwkRCgEW7WVw_;pPMR~?wf(^t@7aRy@GQoQin?Z8oY=TV58p=0hU>~K^^FJ@}Zcw zOHXF)ACRJv(@@YGHCDU|Lf>7t@Ci71R)$Kk8gl`w!pdrt;dSLMMzu zJ7}aUbEM1IU(1gUd@9r}V z$Sf0fV-3nKDZLumgLaOXPSs3ntOnqaX)^RFmZ)zW{sYv-p+bPj$#ebu%(55$Y_-)O z`Cc}p`57J8tQr$Rve#%be|nceLfgyBw}u+Y(m zA2iSCNXxKMU3ny1eToDdu5Cp^cH$gA4ROT7tun`vB(qAzgiwR=8H(gp929{{YC)*f zy%5?I{Yovzv>JO@mG?FIv&Fk3@FiE+C+|GlDa}^R862F&d3^C0{k3w8N5tI=hO|yM zQChRaD<^aR@bpakN=zHel3G=5c#Qk8qQ3y>m+)VFF|sml#yYy1jQuNJN!UeKP) zyOvf=x;ys2+wqM(YZS>Ac6?VXIAUvj((jPPf&UM{@kP?y1{HHj7V|fVE!uSPLIcP5 z39r)Qy(aq?i1FeqId}g7ov@+YeE0gti~$IlB_3o(LiQyDLZIVe@(Usp1sPTe98RG) zBlkX5+k!2KZ0}j7vUpwvb>BE~nt*;KSGG|!*IyIq*{eu5-$a_}b2n7{b4rnFqkE|L zk8Y}S!JFt|$l-|Z-*>cInr7`&u+Q_@H@SaE`w@6d0zp92@c`jfe$)WYRaYcRq&5wi zheX*O?ZxTNe%RsCt~4~IaJk)(^(;c7HY4v|1SMp9Onf3d0Y|yZgx=ceNltK;rR+(h zm~%jGu`>FE9d=Gt?A$HH-21U?r}i!~_vGKNd#EocUVXnBq^V4!j3g6S{haXvmh@e^ zNe;|XG^@F2KP@f9uoE!#ZiE1dW#STpmeT6d^Hl7C$?R}Hk_tXTkpZ(^0#2l}uN^0k z@4HYD!l>;>ybOcjFY*Mw{FF$Yvwg2VeHaonkP*zKrQbjqo{`1*+85O75bIn)54&( zXO6VGQ=LI}GP%ndlK+5<=oq%`!YRLgEf#d6c{n$u^$&S7DjC}UMSE%$UN}?z-JykT z!scKrLR_M&)f8N6<4bS#bhJt&i_)!5h26*fQ)waX zn4H}I$|~)f7Z&B~XxNRB`^CkcZBCTz1=TV+&v(0nxlN(7Js4YfU;Ng@JDZ#tMf5C~ zel4WrEBW%oa1%wMq59EbuRj*+;pAVxL28X5Unw1MBrdjPG5VpKFcV%W#xj^C_5r)k zl^hHyU6~~8ZdCpAH;4f8o<6nmC0fLDKl*uG83mog{Oh4QCYdy zFUsZ7LTnnDt_;}ECP%>sE3S6UT=^4jTm*68q2n@t4;wctFtDL-e01_sePu^U=*gzM z+%#8S^ox|SJm`?}3aw6}Y`3F!~$QXT++{F0yMwQ_a z7BHZD)w-7Um7qnJZ_(F%J-@!bc}k3zuLZmC87o%*P{QJ_%(?dCdZHxqFqS9* zC0q~nFZBDRuy6#WmV zb}Nf}GO2irm>zF7&|EAJVeGpFA^zAL?FB%G2^x*m*93E$(@v`p`x`k#0qTqq1fW8? z=2mgO+4N7pLt9M!v%i7Chd|J;2NRwMNjpXxi^9li1LNfow1HA4-D!f%I90MK+yF!U zDMFVD)yJ%bYXoqZoObjgDna%J?BAb1cTawq(f{KMe3{UJBM(l9N0w8Yh=UjW?L==s zLYiz=lNHSk)O;AhCdg?PNNB!{Db}nQ)xQ*>GX&%2UX=rYNMN=(#|`KY)5SbZ8J? zp}=7pK;VXNK5-;=Aa)?r{9sTnm0;I3LNhG)kz+v*#+SGMN)9YS1Y@nPC~nQND3lSx ztj>eE;QUN0XXM?8hf0wPoKfWZOP{PIOB)|r5pZ2COa4gCB3)<&>nz>>1GGtF<|H$z zCnLUeiE`xzUk*}cLzEfP9kpn_Mt}{!lokH(E>+eYN*D-id%VR#)?*Z{(ydYVWqip5wD~) zPt}m0)JkvcYy3Ifj~*$SmMRHut1+giRsk(9ZhS8}PU#V8kz$v`1vD`8YzY~)!t(+z z$767A8%p1tD02tEG(t+BuK|EQzqr&-8^@H+E3&OHK;CBMyqnHN6)th6UIO;(MA?HY|3ZIBT{O{JuHx^lpo)lN^sEjT_~b_Gc1H> z&}kYS{;BzUCOQ&ugk9S4mBpM*6{(Lwu1fepygTI>nc>DUL1{K^rfEy+l$^Xzq6Ct~ zdSuyu54FuTNVZd+x6jm?tUSfW4LIf{hcGB-*Ds#lZQ`$WJ|~;*RbVUoVlzT%E3xmF zOEE!KlOrA3!0_p1+@gx_ZhIhG=)Tr z&yU)s5NVHJGn$~Q368$o_7wN`eM_N3rntTMm9FI04RcFUYtNuPrK0k{RM3l9r{_my0u^RzkiLf&av3hFPTI=?cIOH1U0fhE|{bNEM&MzQK%q1Z~~IG8>c+M zDhPk=;v5x0w}o86y`;cZKaLRVVGa%Wayj(QFq5J-jcX%5f*nn*Bn3e2W%0D04{D@* zc&i)O!+zg2I@!R(VkZAyQ)=VBUjn^in;2t?=r#AH>L`1nk@}N|?)CiEZIT#g2HTWC z0|jfT3t450hp(f@yNVW-4ld}1rY)cDy__yr8dfm3mfoIxjBo6_-Nf<{*2u{v#W8s! z^)Z4F9pQhr2h~fXo$)%VT0mWz%R#XUM&k_GsjGhchpvd$OY7njX;g7Z0f4eBMZAT9uaI2;x{yXR=eCW4P(v|2P6pXiR3ev7p*-r zrp|%c=D+)@b{qX2X8wnf0*cU1YtbZ@4E*n21I7`8g_CD1<%pl#Jb9V&c{qR6>GknZ z@H{ev18N$gMNp$6UC+~u`x}KM)EoJ7vFq}7Z>|~>t@fU8B#+#@GmxX&mnKuz#8h?3 z>9&k`0YK*)Eo&E=#I;@|UHx-<_TlWx(uAhcseOXQtm~AUQT}0b^UNNbGH(;EUh@QR zFK+lYUz#ZNlulC4_PHsg+XjqUiCdhg9(9vSj09j4xeEl5C`gt#ENjtydSHPp%Dl4C zPc&GV4&4LmH^R6-aAHnE@N6OGJpWD*e+SCH-sM;?Ea<>s#c$olG_tUjuJRAqEQ^07 zQ+)Tdi;(dpg%Wpmg1ABK5IDd&CYvgEe5Wt-^rY#P8|Y%7^DOjIz}DFprs8lci^xR% z&d`(o*kX3%t$m^tc`p7BfFbr~BnhBU7TcU3P#z%fRp2p2 zIqOGB+?Rh7s3D4A2z#97Nyn)b;cZZ9HvnVMJpL*zL?sa0v zN5x<42MNn0eU{#r;>LHLMI@r<2<8_mYcEuP{_#4Tt7gsCXF|1tNr^Mtn~(r{^3Vi4 z;X>r&B(%xmLFEkwrm=K?WUFcO-(`zK=Xd>oagT4ys+6Yyl9LQZ zXyQcSF`m?We#hRHX-0&-eTy${;ov>+NrCPX8{jWG@{1;6He^se{MjsF8_Z!c4`Pl8 zCS+^GWlk3dT*^@fzpW`{yd# zB?WhgR#i4;1|_|Jz%LbRVj5K*GBpL-Zr-bQ6^4I+=Ac%8kBFkQ^3wtSneeX*NQJ8y z$pS|sA&ev~0u5+Jnbb*{7^B2Yl6a|*ZDh6Zk!YHwy)_n##)j~yjSWfc7xa1(rFqtR zZ+O9YgYP>kLwY4aEH_p`*Ei0UaW8)G$M?%2aK#pI>~mkYD8F`!T7$scZGXj55c+fJ zmoMYP-en7~jJGU05!)wQ%rDLzE|#qUp5l6JfAfnTW)Ts-nOMj67Gy$dpvA6kOj2vQ zX56nS_KXb0)Ah&|>{!(jBjUFwT>uiSsOo{>Eiy^<2o-3ias09Z$EuC*o0gfIsJmsj zI_Mtan)RjtiD0PL1;Zd)wWanZV^q?`c%2#e1m)ViV1|T)odG29(t)JFce>i=^l1-L zf${7&1GFB4cyrmGv2?hprS3d8Su)R|$xHlt@S=eK3DyBI+-?s$jij{8Ns z1F!5RVcgP&EO$?(G%CBFi?3Utn{5&top?JIo5m72_Wt7j=4nY?H%0FQUeUmO7pS}W zs|iW0g1$GR+dOWU`U(==t(5*FydKw{mbwjZioT>uS+gNTh&h=-4GK8si@k?jsC{w) z=vsCE0ra#q6{TWo%kLEwJ?efAWDa)s?&&&+_#F7keaAJn>ETST0fBq79Aj7P?i3ta zzSp4s0rG>GARolpBfH7k)mQV!19yB}D;Y-4lRwo76EmTI5y7B9%&PD;@;971T0}e# zAuRQag4l@gwNCR8B2h$G3G58WXXev6OOu8731Ll6PTq>ma9WRABNFa(7qU<6zL$x5 zhbz%E+xuq21#F=Uqbpnt#d)m`%eMXE-nFJRGyBUtPToM)LVZOxf7nLl6PYQ-lh+gQEH%CU=sCEnP2PCfAh& zTAWlOB!=!NS`uxokUh+IL?5?iDv@Po@*)vm--Vgt$=z14PvNgD-Boc&KzyhDcO@$; zZAwW_^N)k1CA;=n;(H`$9)C{l^5cMh&CqdO3*P7g|CZ*O`)j#Ud}6zmr*;Jj;Pa&C zA*nxt80~h_W*j&t!HMgLD1Q;o4^Pdb{U0jPV-ydr8kaTS`Gceaf6AzSMWg4mYFTcZ zj!8YaX8TPoYMKK!Qn4O&>uABS6miJd6Ja%39Pg;*|4MiJXXW<@DXMC=V4I9pgZ8Uv z;Ne0I;6i_G_l73H|EKkfdb<72N0rco(^twdw@5UlC0j*cjmRuGeSylfb(s6Y6T`b* zQh!C7LDkfsZ02=*QK}3uY~`o{hp{dx9lqqT%-@foi%vwYV$M_3y?1F9dflac0rp6t zE6t_5`3IQu7t&BnJ5){w>_3&?!=XPSvXCrW3)M#<6qVlkT#(t5rJa3mieHWc=0YD{ z^JLRd!6#oS;(dJT=jrIiep`sSiYw;(Tz&0uCqb8MRsL1b-Q0DD@Tasdmi>$p0v>({ zbtI$VS3-z0@KuL7UH{Gqn45C6pH(CFQ2?9CMt|qV+9YN`g0*y3+bE*j?B)m+680_R zTT)UwUC916*pH%qKm$JflJfku>TcP60vs!reZ;OOQBoS3Tjxy93UF*IBf>26>*qm&C|Q`e>61qEU%?3&Ir8rY}8T%K7j?*4Ax&5wnqU6c4>l zx$0Z_m!Hf|2ZR@Bg4a*mSHc-fXZIyG{2@QOm8a0b5srUZ2S0k(I%_ZFn=Yq}>uJt@yeO5f2 zp6olBUqmJslmob4(lWA@0=LTabh4jE{}^a~tHeqzAe|%a{~i0BKF`^LW1Ao2)_Wz_ zB&~9%%i@n;gXg*P4K&nnL(bmU6r-pniE;-r`-Caqkp;*QuW#GdgK~}g+|xR($+n4G zLEaJ#zmU4*bu#yD~H43?094stBn} zs$JWMM7EL;+uLaH33Bx+W9(Z1aD9It;y>zOJEsxnM^}27+612f3}Z4T=WD8HJLvY* zGHy1{r#?(i(ljhJ^T#*xQpuDTsV8H|T$)AiUylAAecz|YQ8Jx&emRJ}%&uHerTGuQ z?GMMW>D)G*G9Tf6LWD^@#xkr?ocwa-_z6)7_-Xm)v(fCU%)-h-d0S^}mR%uGmRJ@I z1&0BeYp+`2#s00e5R3ypO=LmKQya&;^iAc0`Nl?xi{u4f@x<0-WKh8#LJF6Rv%}U@ zPXP%~i?Wz!va3k<>ZQkx{2t14iw3O-w~Efo<=3E7yA5BZkw{2qxrIIqfYrWE3lzWz zaKNCir3IjCL<4W*SpG|6{wuNOG22-sy2EtdG5eFi&icU$u-|yxo1%BC{R3_WVq|b8 zS3XSlmkii~R=)-bt`4~0Rrmkwofq3m65(t*9sc>a#$X7Q@}ZtKBrxl zI)O}*Vi#aS`SlAU`^$hZtC|GuaOZb9d9OF?0wmZlb9ZYW>_*(HAbSS|wz`sogOK|N zMw3@poiHgY-w7^SayEP-yt~m> z!%Xy?zu->OuwU`2eq^Pg-0h><2eEPWJ#3k4*}8E52oV%=^3?lK3lPFOi7yi{K9{I+ zt=@g3VNn+9yeHdWeM=2_&-vz0OY9DFv-E_WV`ICkN5%PP4e@Ne+*Ede!k|Uk;%1XD zjd~b|^y1W&&K2W<|4V8RLp^Oz5C=Swm1JtAwJ+fp?;|;i;k*9zu}tV1?`D z$KFc0^`S}Wv^%=spQetg3&ikkA~tvP#_=)j8{PvQ*%rRi^ppzr!q}_EfCxEQ(iq_#zP={_5kKy0F$KC zilBErDy`@RQu_;(&U?g+bvAZhxM=rKX!Ebu<}K4``U;0*)?V?gAs4ZId#1zKDmbdt z+7YQh7Dc7e8N#Se1Q=Srh|Rs5jA*t<`jX7FfF$smybGPg&0{E%%&6)-z#SlbFT7R8u(Z)U&dkrVkWo^n%+JviUKUQa|j$G3>qg5NUTeET3uHt*hA2?#Ci_R&Dva8rmR=u5Fl@CVYIN_~!!lDVdKa1A)ePQa z(PXumdRZikk3x!FJd7lRjYwiw2x6o8)IS&^Zo+R~3+Ac9naN&GkrUmrCRFhBTE*u9 zlrMHQ+X^(CGFsFq@EMw+tDi!D+=)~)&4WpFlHp61rxpyc;W z_l{{-R|MV`@ym*MBJ;4-5+UYm>Ia`-&Qk%Zhm&;c>ChkggOs#a>R@x+ zgq9KwF%v?@@fm z68bN%Mwnh3oY8}u^67JpldtSdnPc|C+CN4#}rE}IJn+)xsot}J;$tz)>s zb{=0UyJ!_o&>zS{`{K|?d=D6HFEg(yB-l+1^M+*@nD8agrs~Khi$7;;z{@A>^{Z#q za`|h-b6n)MEZ?CbnDV&@GCBkJpP`Bro9o4D-LEewg-w=BS(W1-3*elor$<~wi6?d< z_v}a#S*Jx37@~7A=2?fuf*2lx^oMG9{3<@pp?PR*oMBz*iDgV)`D%<=8o@q)pfv+6 zt~6RoxVU`>wk1oyw5_DHiI+E;cN=n*WW|@b`=^UrnDreQk`VG)<(J~-^MOGlHLYtK zsK7PA{aivK|8Ymm_)8UzMWoZVN%eD=2_e<5Q&W3z*yv})NPAlomPYH-f!OQ_WIlMA zPK=kI4%MH1`_SFsC5iJSQQ)|HH`pB&LHoER_V|@*4uu28|4{XnL2Y%>+JPXYSaH|j zP~3u3EJ$&8cZcFmad&qu?p~m{1aEQI7I%mD=KFr#`!};QvuB@u=B)MD;#U(7;D-s* zg@SbGby^`2#5$IYg%Xx@U#XOJL3=U6*dgpMA6$vnGAKQd?oNU%;AX17;w~_M7!%O* z|0X>eA;f>em^Pj}N*u}kW6M9d_SOE?8A;p^>Z&g{M&T-OIHx( z53H5-xQKjCFLQ8D%au}kGC#(tuEQ^$i3B<1qd3y0sbOD*p@}QY_+H}vH`jT)n^BB+4*Di2VcV590u|hcJ zdr*;Ty=WH`Ee96tu^-HBv1PcYiOo-ZL(!~uc8t3S5|(KppS_~nv0_7RAr*~f5kNBH zE=!wTw&mztBGCxxX8h-CHaL7nrAY1OjdCzPMRFg*s-7yEMCQ+tMo$#{u}iY1pG0R# z7Wo~Yq8HbjPUEbQN%adj#>t>_5x_>ayOPgXknVR%q_X-MoWL4QWBYy`zEN}tsjeNt z8b7-6{;)qulihS3enx3h#pw-{Njfp}^Ke!D2Ls*e`gyb!zJ3y(E9D7-Yt#n(qe;3S zYqB9J@$B6G4mFK7{6o@m)Ag=hl!W#%Dvc`YWyIlmJM^?s9ATe)q2*=RAk+KVwI8D$ z&x=OwV~>IqI?qHk@cVV#AN3zd3E=>u8-*K?z9wFblcG1AS1^k#whT6BW9v$9bvl3% zNs6*lEE>h(SM@JbEZ$W8my-LO)tkp1NvnCZ_}V^+NvM}8b#QB8T5h46G5Yh?D1ax6 zPSV=caW>`LFd2zP)`dHHqM7u-WqlK;V+mx)1WE(D*1hv3rBiJ z(F?HR0iCRjQ?;%}ZM}WI9m!SD+=KW?6I7zBhr^qKFf{Gv@R%-w?ypvF+c(!jLU(N zWJI4HsqkTbcv-7etx)`*zKnF#w}QMJ zDsugi1R7DBWVJ`FZWM(498yJ%1l*{YBbuNnF$cX0Q^WJ1SC&DuZz znkZThsgnvbUWQN!o99NBfs~mh(lw2yrR4K%l-IBEXwC9WNw`f&vDgu&GK z9gg@hu@q97Q+G+J!8_AmY59mIY?)L!D)SfHnbv%~O$hyS_mseiM_vNq0e{lGHWflV zo8Qib?xIDBuVmueov!L}>7Mv>=(1^WWQb1pq4YiiLUC~!d%+Q53iXtnKce|#MmLyy z_g@?THu({VCaUl`4Ul0JGsjzK&d?F8K4XI^SUzXp`)HZRneX7HhfD2w(^@rm2l`%; zOJFq0>=`nA>kf?bD)}USZ-zFlJuK8u;_?s#6Mk!fhvcWSwaXo_-)WaQ*7m`B-~3Z| zZ<A7FE|mMy)vPY6>uOEU3bN78FaR$Ay3v$nA!=iVruKb(lG2lp+{o%Sj>bM+pU zf6<@Nq^}-S!ShSoQR)3<+(`7PkSS+XpTLWr{Qm&|0kjB|%)FXKQnI&~sx$5vmG;{I_s&$lH*uc%dYNfOjMZXG zP|X4(&*I#8IbbVAqv2ZC>4K|-tQedQSa~@&hb&)s!YX#UHbQMkm6+7MFas`6a07UD zBlOxIm!gFVu%Y#MWU0oCCYIb~F*Q`8a?s7=J{sZeST^?!lz;2x6INlz#_3DT58U;z2Qo{p#me{}pea|k zZOU05>C~kMRxg|l)~LV^gv@aZcCqxhCE{05OsMZj6F|?#a%SYm>8P#lJ&vQN?Knvo zHCrDjliKeWzeZ8KKjaJ2o`&1YT^(`t&~a1h7;>dH_q}c{{qGAZLdZGEpfGg(*)vw< zr`t^pS00!qn%BBnmByHAi76bxNbU`TE$?0_Qp99c=mLrAq=0zBr~vHzYB*&zTSHVc z`r$=zv=Y4gB(EyN@$mC()QD7Im?F-rXz}cwXEzqyp#bh6;TWVtC_c9DH3@+Njw(t+ z7Xn$#I1YZEU5_>>Sv21qu7chCs5l?odyMFYh}Rq!P^^B&?y5k1S&o1V+G(O|VDbL| z>i_4SKH21n$v=4DyRYVAONSGUGgs2)AD{aP3pt;@LtPZwF){=}&QB#1Vcv>HQ5F^E zs>i|6qg1xPCs@1o_}HN;I+<&3!ge#{JF0m2#y9KoiZx6%3(5X7An({y99|>ER=je_ zLu7&af`1X5`l4Y?bynb94&p{!3H9(ki8c;Ru+B*2vhK}7tJiB|VW*YNJ}Qf@#`M|Z z^cp5>=DjZuexN0K^<~dc_rJyKrTq^zd}-Ug(5pC=%Ts8U&?H@1*z^$H+04B#1_zQx zmI9sRxo7+GQv73SmQ_OUY1J^Mb98OoGC#01p5OJf+<6<)rhF;^@Doygbpeb8!3UQB z8jN}QcJ7vd6D>^%P2s?n7%^J!BqLS`IY$Vp$)(=Hd0 zU(4Kh+K0m9-udJ6uDf-k=WT?nWVtojhe5J&5y0)BjyTDf!eEnb@p+CB``2xpeI-k5 zB5w9$q@s4E1wWW7-$Og;I=*xSnDMiWz}^2t4}j&_ygXpj+dSUw{Ov%y3S{zPYCh&1 z<$9Hw6=Bt3@ill~cI&v&FX3afS3Y01GS(yr>0iT2@cPB%Bzg1le*n|}01>ZR9K2?0 zN={t)Gv=<|l2~nOp_BPDM}{X2A&Azy2yWi=*|f@&Qq#-8q4tkD*C!b(8gxD64M@7~_M?u~{F z$zDZvdSqznFDKW~wkURVUJIwb5@7ArrcOJLXGMY)`U%i#0IGD6sm9)nYvkfwSgZ~R zh#(x#j~rzS`~EU4aOHwR#dc>38w;zp36Qw@k*90W{fMJgoJBLc45|DwKMTa<$>`|n z7Z-SB9AafTf7wt3Wv9#$a0$zt^0>w>%p%HK3DZ1sgGCas+Oa3g35r2kqk}g=Z=(7* zT=~HuARRGR_X_aGPYN{4CZ>OusX7QSXyTV9ciQZz@ClDb$6Y@{218?iO4K(Z;(>QB z0|PBkzKiBdWK+HYxJ`h zUtzvd15$a~l_2I~;jf_IZQv@@BdP#cAn^J_W}h^xeCT=oOPAg3MXn`v89(jUnHBT_B;%<-B0gqJU1TC{%7h$bawipd24NA{!uT;7+Dr9tC_G>GH~ z32ECzHoIEtVxwx6AD&9YWKagesfTDm2!^{u_4C&<+IC zBu)=894b3x*|*V%yM6*LruZ>Pzu{8DAlUD(GRWPM7x7I0JsystgGdap`Q1P6 zm5K#H7L-*5TYnnm0|NM0bySJj2wlH83Km}c`~B)nFm5r-9|@1Xaq8FbJDesjjkeu- z84+Dwaf`eHZt_rzFYENrA00xby1kRVAi}B3*VwDv^;|hKL6XF3w}8@uiOXp7JawTa zBC0PFweDhk2Y+4srcEiY2}?D#r-wbN2DYr*5I~>qTX-+N^}x9od`=HeFMW|eAwIwL z6Akdf#jzGRNP-V@1~SsbaU*T^T(NO;f>lRK1i3Jb-EMyoM2xeVB+)EJBVI+5t!NaT4mhSI> zZ(+*JUwQSov$MK?dxswF#rP7?1iq;B7o+?b#hyf_Nq{i}En@wGI@lOnZEf?`H`i|o zLJ|~h8Ll@;hN4~>?tc3u{8?yFSN|sK?Gz=S$`hCY;@Khg3ul9z@i%PAHSVOgAtNDv zH~U`-m8B_?e~@}Jr>6XK<%p@M;{aWKp99Fv(&<=`ro>afAP@~FGxtsj4*RpAbQPj? zWmZKaw!JpadPu9Sbt(^>y3$?gxFZWd1dkEkGlAK<#P*4L2HuOjTu&p=LxphuK}w7T zr>Z=eUY`Py0KQmiWmIM0+a1Yn zVN8!lA*SP^Ek8PM7jUf4zZV+K$AO9^wblJ0&1p(_Yyb>P*>3;%8R!1XBuSaYgH0oP z3r=-funV+OT|Y^#FxG)gpz~+!wdKB|gz6WfQ{~;Ho@>i&lq5Jy=E}N4 z_d>!*R(8u24+e`OLPRghB#d($>S+XT`Sa$U}k@RdN@sCQCcN=%J!SLU^QoLTm&z{ zvtI-O(#{h&Loju`GoN9CL6$@VwTbF~(qx?#1)H5GC;-%>rIbspr;ct1I z3=vW^{amg{+j>I%^Y%2)sJ)sgFT~hsF~hVD66V&;R!RSk9X36Y!wvV&o0wEewAky( z^pvh)`lR1qmKag(qWsd-eE$=SWX=VOBc?&)wV=FSz8Ptjs`u2z(Q;ZBWcK5kp_ zHbyjR@l@w@D{EVAm^S4cM@_M6h7%Wo*-atY9+=hu(#$_kCW8d4uGXb$jDm5e4()X? zKm#D=UxaC2FZuK`O2+Fd({T{%+oe<=&pOLb)=!pv`X3<5nRKAiok^C3Wl0FGyWc&~ z%w@ZLrNh`%N$vi$)eMtM9exAna^wOI_`;V!S4Pz#M|_}_JL&0klF!kMRgGu5hErkE zNUj~8@lynbRO0=8xf3P0i}JEaTNkP$#H)MkLXF_kSzpISj^I9)!kdL4B?SEoE5T;z zv&?0VM$rRKI_YCIozR5P#+6Vy<_`QZs^^M|0nT0##X_=vd>?11|0!Jgk$H6QX-9u; zuA|7Te@pf*ct$ctHEBp^NEK{UB zi7U47&zVWFiHGhH#{E1mHG`vfPdnqrvPw4Th(ux-EDH&W+JM+p8%-&T3l(5iy%3xQ z^svZ;sozkqLHl%IL;!8kj4NL=E{g`yz1YF*WE3AgjVQ;o63f2(gaor+reG2=3{n7g zy;mJ3oK;>^>VE!hdG2dALXd|@j-T^k6z57` zH|hrz5=0gA>w}#DazeNO5|%IKDcq&f>YrjBb;nJN@(FdC#s+nh$N2W0`j^T~8z~`r z`c%+_O6pPdpNOw^xY#l*Z#8o^fMqnYm1CW|EZWi7oxz04Gem z>h2Yk4cRwOfvgs|oag(RKzkZkK9kDbUM1E{euL?Q1P`=PN7HrCn53gHtw?)r@0xeP z36XW$03um8<+0gvP3>+maWkTMzMrEJ*MotpPq{fZ8JwCQ#^Hh8G|}=Yvx~@_YR;UgflLcbAaZflagn>(u_R_d3l}F%J+L}T~K`Ywz4H% zni@0in(Z=|mPp3kBtEAlrCIuv1nv%ri>KLGETG(29BsZ7%{-Pv-3cq2CBDo+9=>|| z=^~thB~qt6VDe{+nFnz2W*EbiyKaOA-RF*MrX0|9&s1si)daeT`7ejIsIp{n>0-G9zR=RhMmeTnqFGxLTh zPL)&mv%M^h=K5IN09~Y0 zng>iFzDoEjp1nP<>|o9|sagtyv3D(m1_d7w8l6jD51AM8lxReS;QNvPpw*tEs)Xoh}4h@_tTHh zpqUBdhKd_P!{>*7JOFPGnP>8L^oQVeJ{|tC#P;m2@7QvIahh?{WWQzwiqEr|f}WSp z`pN+kk>)Qb<(!I^sSPT^8mBC5tldWG_%r=aehHYTRbO zwJC*Vt`wTiP=As%6XS(+Uzkm~xYA6H7$>MKUT5amE-DTVZX5+6pMH-QZrt7|jv{ms z>PWm>G&i_{5FRqqaL!G1CcFNHREYh=jy_i}3n@cJfn-8q%_IlQ6q_R#FnoWN6OQ@D||Z#zUwd zHFaPq%l(N=!ZYP{@L^~;lx75AHYG)(uRYqR3a_QsH;A(u_C1WGo1pLVc1T*|R$5lA zDo05OfUjFt6u66Q7EjQ0gX}}-O4{}#$da|tFpqBeQEQ2XVebOfdX_ECwI4qWs#?O{ zP`i}Q24b0UX{B)2KDrvl`%w#r+JQGiAV8%|XzS!VafzrS$S62-0sIzoU~7>{}{cClB+QGMjC(gbeIVq9N0b7<9+t5%V&td7uqxUxOa%IB}vSo+##omUXFMSKm zzxvSzNl!2XN;))W`3_AQigpMAnr92DP{J*Bd4GG7LA&=+=ozy}BRnQQygN#$ec?Eb zAlUAN@so(VDp6d{5$U7@4RQ6dfFPDnJdU2EUCCfw=x!}3nG`aMNiVEuIl`Go8P=Z2 zsroUYE!(za>!0N(dhO2@?%};c6A{TJq;ddzorz06upsu4tMhIJXySw`ba7ZkjU(Vy zka%sby=xoMW>3i{tIu4X_UdtlrDYT8ESgBdi$C~~_l9EAD9#RPGsF4bJ#P^nSC#tf zGs>!`syWnN#a(r6cRaNDJ-65n{$40z_>+s1VuwAT#6PJ|K(1roZrW)(Sy&cpqhf%} z65#vJWZBgB;y(J5PSyh5W za)=A1LU>2_*SD~DlMip`BP}uPIE^Gzh`N+D6Uhj@51T)jP`Q*BAMF zt|n)I;(yCXeA>U&RZbm+6OYV$@6CJfOdhI-zO6 zf*5JHjUa}LYzcyV3B&N?TzXhIgHyUKI=A#~_Q@G%M^{iO-JF;Yz87a|7XNEca|qer z#LH9eNhPi!*YprY1H5Jt(g@7CC-{9ru2>p`pOE}6qCMy2({;9U*~5N{_XD9yb1}F< zCgSL6kB{48;xL1UNW*%JvQuyy-jeg@?rJI?aWIKrJg1u=zY&_~i#|7Y&_+qF>)Ct)$DbZsWbZB2(+a)%Y znO-F^uqJ(kY&xkb6;BH`zcegoXvyIImliWoWJ!9Y=ULVC!2LnzK|FNdbbK&_2yI9$ z3zD$`ml`72K2ry46NymF(MgA@#EpOA1GW@6Qg$r0499upyP~xPVnNC;U zgg@R`>zYjVt zd}zF+4EX>{g-&<9krx;*(U<(&bPJ(BISBU(S0$~J@&*5HC=+VOw@zRN&Mc5+*raWQ z8_VP2H}H`icZu{gXpT!|PcY9sxRPK{6NhcsJ1van19HDB4VI;HjdG+~Q%y13lz8|! zeJA0|iOL3>%LW^{=cx2i-G~a=NexbZDAwOe8{>x5{k%A^Pvy;;EDDsFGDVg21K8{15+zEEdqf&2^fSi1U}A@jeJazQ1XtKDx>j5J@^hp|ibzEDVp>-7@J(Svki$VwI=DQVq~M){EKp8prL((}BQFCv|fde?r% z10m1rv=n@~=AO!tt4z7(K6QX_8F`x9{cF9?W?X=4J8+5m-|=#d8q_}LA9 zT9;w}o_R=HssG*cr(fr`PR*WW`5)%ZXE9Vr$vf5pYI9@5TE3?ABRtW{Fjq1%^U!Bo z;wa#dSrjgVR{Ucu9BmY@_p_;z=b9eMNU0 zcYoOfH!;Fx>$&ba42@`pGdKNuj0`_&j4{0DT9gWgYoq)&%)tbWP|fBC^nHApmMaV@I=A`0=;1U*9KrbH9U8SacOp97H=26ut=Bb!afZ{ITG5} z2k(S>ea38J3j>!Pi?2>IPD@K@XfxK0uyo{s#|^EM=}Wk#9#KAtbGfR#yV<_4FnXs; zSwM~jSE#f45eC*PXn{oRIA}iETK-v=qQO+U8_7*EKaeuP`mH-SvOXG2PPqwy1{0M^ zCR)di{X7!X(&u+mK@YS}Q7zL!E}{>N(AR^o74y_quDK0K;4*`0`MUPmW=>`R-q1yJ z(m^=v2Pp^N=Up1e_pdDWm0wO{?UWC$`uDQ(Rmp^JHme&?LZhymCGKsqLMaz;i|nx` zBHu;;R$>2Ku+GqJ9E6J+Y7hR+kcV3z<-?&&F<58O#HT2$BLCYvN#Gl#W-?@MlbH3| zTXFs!)t~ie0wVZD3|Ab_YVUa)Q+UWYRh;yQ^-7*3H3^)+DI|5_wL0!Xm_da*S)>uI zE$9`(OAa~+Yc@aJ;iOS5JbmF2$3l|O9~W(Bu`)Na(%_oVW5aUwXuv$11o%3#4Lw43 zh|qFX`mYZm8p&8+iQy%1#;z%~^t~C=!4UVf*~lSdh;3w-h~G3%2Y0gN{yQ|Cy;9|$ z8L{t17AO$4Xf7mwu@( zN~P$+g%mNakmu^rl6$QcMbMcHwh$X50tcjeb!?uDDY_IMvVKEpk12}Ef2CT=C_e3k zg7v5REp9^tb@Uq0^O&Aj?It=FfyDtVirH+{Oeu52^=gyN`$%CYKk@ zfFj!usr<7-`yf#H$%n-8OTXZp+j32vrG3*CE1OWtVYz`zO;NxZ#$s3C5YwqPuaL_` z*AIv44S(a``flA=PaJ~?T{C#sA#V;wbHw8CtjPGo0~ zwb8wwd;aG+7c&$Uj|rF6D~_~jel1r}Sv0&TRpq2shgP!t>L&EKh*9CncigLd+^fU4 zyvZvJj=}vE?aL=qsN~v%AvIf(K!)l$-@2Jnxxv3$`KnzZuDs5@sHVC4mI$IdnxaAQ znvl!z#oD@;1@(nCLEB^qJYY@?8*B_=*w7~9V|Fj6{Fw3wmvRqNjWArQK&nU^--U~JEW?C8 zwW~nwJjQe$tO-)uQym7Jlkn>v(a+o*q)T3kjKl)0IgWgk?t*)Lt@|K(D^w)a4~bi> znjcF3&7kHUYFjlWe+5iR)>`Z)_g@uCFCBShHrV?AprFNFNIQe`Dg`*&sW~$B!11$c zfzzgI3{ve@gUraXLu;<9ju|kEf_@EqiQ1%Lf&E!@wM!3quzBessQb9WufYsWO;v}6 zbY6dtqenlt2}P*v1>sGLuM8K{di`0`a$a6!33%BQuo}1A_WlLgiTrfWhkZPS`GcB+wXoew!(qM};CXMUszS*z5hvSBejD!% zw`2Lnh9-l#oH}w{$lPb190cXva`Zxo5z!vbhs+_{{@{?POxk6uknd5qlqy~M5{@~% z1}KDgg6W80d60rbPnD%aiG@fms}4WRa5qHgkG{J#^pkCU&W_uYI#)r{>F>fj2sxCr z|3@`Vqsaq6seE!c6F9GgaD^A;8hS2+Igc zW3GcdUyFca&<4(V^z*(no{ne>88+lrhP=NzZ{~Ajcl!3+K=MJTO&awXQ510r-t+ZV z|2A6bc(>)w7)vy%4WbgI<8;-@o{UMv@_+?0}C5v$Bo;8o_HLeEi!e4I>m7a}%WI`t&Qe+i^X4y2~n1>wMM>^Ik?GvQ{#pbSO1<*7pkS=$LBqSf__ZYDqsY~#ZH!UaA|Py?2O!O~T_UqaoF<3f z&qENvZb+x96J#c)y!w+vJ58VIyX(PIXd}f893U^KhZvwVm{;1$;FGU@Qo>MLXKST8ndXALuIUNkLmmAC>3MIzu z`67>@?(KdM1Y}W7XMztUL+wIve>XytapFpiOwro-Scqll<-tIw9k@!Z{KJ3N9+nB2 z*BQ&0J6R0DBk`dD1S-HCQD@e;Z~vP$gY%$%An&MgT50gw@U|h&$fqILs{66sI;YvWK6KJZaN(hupEK}z1ZeDi7j%y@ z4u=enQVMv-7wZX~{#i=A9PGjO8@K|dDWp2b50_?05K|$ZSvDAc&iEFU%dD%0RCV{X zjh>H~9YbH>ku*VP?BPIfNf@m63BqHXG;tY$Jx*HULc|$$Z{LC=iz_#9k4gl<>AknU z8+fJQzOit(^GIRA<~(Hxa*`F%!@3@=(j0d=?OV1|<1Lh|+zsLpANGRHUI9+N&}MzR zPYD&b0J>N+O>aKGUStfC`}iH~mrjhuh<9s8k^tKp4;r6){MAC8?ZdTVF6WR`RC z58$n}epP8bCR2Zm)#;ZCm}Ljtn`84Z0zTF#Xfu~9OdLnDZJZMFYw~}{mqJIo3jeps zEzR~2Lz%(yR~-LqKu7L~sn7sYIvJ;LTTdh*d=izHu5x#bAZi~d)PnMNFtj+VDZURb zoIgO1H3|e7Vm?I-{C?3ZOwsHIjJTR+OdT_CWl0ezav^dMe8}=gED&=!bXAT??EpcK zh+W)r2J{*oQIienhg@G-St_wDEg<+`=hRyfg~n^2feBqOI-1mQOqQj3Xp8rnDt;AD zR}VpplMmh9dU>sj!;t8eds`pGlQv_1(<5ayF}aG&^|@oM!^VHpuQG86+oN7hsQ81S zDGPPF;Nl=*oSwGlH1}ttrhA*R?&LjbQ9d3iPsO z-AN=`jmZ@1$%x4A9H0SlAUnre2@J1g-rzB1_<(rWWImP(&{lfUw|*@Zujtf5|E5NAkKGnn`5^$F@YIW5GXMAZ~iD5j?>mz`5hLOHFr=xgj6}v+0hcpgkPh8l| zfWk24;TnvqWnw{t+2QJwwXaOM*3;zB<|cUxq1w)Oi!&vahB-#*Fu4F|^J!^gTyyGW zcOVyVh0V5OrbP(CeFe$ohj|mEli7#o+2tB^c?Wx#F8ag}8_$}V&}cF&VC~1!f)9kp zAFfJ7a&P!jgov-v4Hh&j+A5w2MV^=l)A?P=gq1q?TS6oL;%C#JJG zlsb|L2*q@0v0A3K<3`e3+u}m#j&rK+=YNNpy!2Q;KVF_qaK0xkS8Am!yrsFOj?JJe;cZ^5F%o0xt%;tWGw*~&CYg%O=E)#gXs>sx z8R@TdEaPRhUKEB_gaNQo7@D;UX(>m@Y*^!k_l{rB#Iay|VaRhkUJ!Pg37jq+-BaiS zy5p;Co;(SX{K@$yVvZz&hg~^LJkLR-)Hb5iW-U{DukpN%Ze)4t>||Z{n-4vqLiE0; zeI}oa^T{h+N=u+7)c7m_nYecv;KkNL@*m)t$4&+4)Oyfm_&A_Lcqp9*ux+hc`!xSh zvGq#*lOmvE_q~yO6|FFmF><7)^wdcy`1!hjce2hpvgilAdlPfNAudq_15t+UJmfEi zAkEe9X}r)X`~;;Pkh2Si@e0^_XJ&LobhB)tDD1kqa zA0H0%V3QW3_Hxd+ri|+VU@HaP*%wFv9`<{OLA}3aLc#bFSCjiDSX;0pxeuz`%UQoIN2e#`K6YFc_a- zpT-;;VDJxP4K^ZmD%=EDw=gjx!(y}6$0_8{D;LKC&27SlK^o)b4-Ra8Vmyt`ShB~4 zyiH<=7;^_1I?3v@87j}<4a;oRfeyArIUi~1Wzp533$^859ybs{p2#iW@Xu?5{%Vi0 zMb`3x2NprK_-`YDe=wX6-wKo(?D`|!W@AH8hw!8BEA!LJNnB5F09HD9r7+kmHf^>} zMYZ^6L^ixFm>Q#Xw~ThOEy9dz*P9N9SXzeL-0@$vJjh4Y$0;(#A{ z0tjH)%4`{Xq|#*KqzzSDgBa$`M9^LnHyr$A(3IfU2@23;0c@zqlR}+}*t|uetJ45x zg+5nOxX_|m^O-8U@9>r}vmXHZiPedrP!5NwGk#3&C@qFtKxOSHp36g7u?A9LWUb@7 z`XI=0Nx88IV$7R-C}3|uqN(x0>H7K~KoRC3oEf~;vJeXNriciB*M`$Rj5eQ5e4qh9 z2wshE2(B&%z4@D6#)6F(w;nAe#LhiP-nCVcCxj~JEHSjoAILA*SY?Q225)r0w3CFyg!>m^K2+72 z^mT9E+o+lO*yhrSR4ZeqK&N)Ld^+*0kMlLEG0mX3i}L+|4mi>gB_2ks_K(;wk(rK( z{;4bBR8B~;k}*c!&%It@_$#zYz-Mbl149Ox&%N#a#H!(OAG-tsh2VnLarjvZ1(d>> z-{SJRwSR}sU;SMBWT!#FqGU^o*oNzK+fVM_A>ukV-xa~mfgEPpXqLXtQue%XleOe zNCw^&ZRSc9`TG#IXAK>~)3xgdO1Eqcuu^Jyt_T9(A%8GE6gz=P7p5SkjjV!|mT^u)9XC?dzdv6s%*F zLy~nL3bkDycWYDIF=&A*BjsiGST%zMp=ZJP?q+f+HiP4+L*ER^Ef+>kj4 z7qoBLAvb2;o?ug@TI97YOTiSbT$njy=_J}}2Z`4&n@ojLdO0$^E45f{Bu z4w@PLbm|1r5D? zL(1@ZodAlQI6|V@*x-ZuWW0iucT*Yu8_&Y-kRb1s9l2>|#f8boX&$sj zfv+FbRyY&IA3lIG5A++GZdlDFhMKa6bQ)LHuznamAkV&Q16MDf#`dyU8fe0yk`r>l z#t^XA+09Wxp082TWa<))kBm(fw_Ov8AZEl_g5BxsYDWcf)@JAoF_Pom!Ysi3U*A~* z_)GykGi-N`q{pN8ef=pxV2Ik6w~JfB_RyVduM2cTyXii4q;{kC^@_<=b}nQxl=oy@lP?h}Kr-@?PDl%X+M9NDyRJ<9e>b~ZnCh;)W?bamw^ z%d8Pg0B`c7D2a-WA0`F4u6+qy_E=TXa`?}Pc#(rstINUAOK3gj?#fP103|5iwJEWf zk%b>oMx(HN9H}n+PvhxBlaK%gS$azJ<>SFrjK>*b7|q$n!8g5|9v_sU2J@2$aU5vs z58QaO1W5f55Ucf_Jap9Fv9Z?Y==ihwBFWdYh?V9U;8E9qfKmgKJ3B_6@C9F*7hdn3 z!(Pc_sK<^k!wa!p!0d7F89I;Nn&FG`k!5p71?f&A+7c$X5+$p9hTP0)CE5w%` zEhpG9T)wQMrVnvkV9pzN$ek3jA3<6wZiOoguahJjc`*aV4~yd6SAaxhKC{fx1e<&9 z3C}Y8Fh%Nb;8qI0946zE|HgcH%*q};?L~LpZ8#&!I*rUHJ!heLROUgjF>jx)0uPGgUOIE=-Slt zC5$EsBX3FXG;oI}P{`h9MOXO;^VsK2J#YWBYms1i#fG-(&tGy~=+ig#^w;(hfU}sk zoVgUyx$z!4zmLqqs3})#$M!at3-OhFGrm6U`l`Ij-KPXy<N`4y6V<7FDioUJZwzH{AkT02Xd)_-%FLEjOfn@)^?IK;|$TxU= z&Uad7vlcGh>+wA0x)S>RgFS;A+Bi!2=SvI{0I}I}|t^+;fYjC&uPkp)WclVsw8h|4=scUXdLeoZmUU zXbo|~#?DyX=&<7Cjvu;te;3ELJ%j5qhN!i>A0wQ>JbKdUC2krUBg6BmnPVu(uBs|E z5Q>yjpea3$vP_ms>bm$cc(`X!TIxdXf!@RdboixpF-MRg6+J5P508;?D8G!_-yWxc zPZu*N6RptC9=yy`;}~f1Dp4&~tGOzr?fVvnSB^UUvsPc6Pp2-yG5cNK7k4|Fi=t%t z6p0V20d>4s?5P*NL5_>!PrlQofhSQnY8}0gaCill z5%xDwJ#fi({qjmVy`*_$(8(3Bi@P{BoY5(hs4O_U(2x6PsQ*lJ8!`8ks-usUY3|$> z+LKKNv%0v|jD2!Pg-(&H4ROed=EqV#`um!?H+jlww=?v19S7bxPbF%3px6S-F_C%~ zL4CgW{(J%i-*@}wnMn-=;S85`u9fkLF_}GOVZ22d_fk*;!OgE^SgOLGh@ldT9r3l> zC2^qX*lzD>pXsCd|FwxQ8K?iY=n~k{}%pD|A=_>28b5XbPA-;y{=m%YT3;vb}w zvrx+4BpUC!M~*1JMoXuZ=@p+XV@+7@l`*=Ps?u)8w4QuM<44Q9MZEbjW{TzGZ&0b4 zNPST@F*K_g4;&1`_MxCL^-m0z2heh7f9*k{PfKpqr0}UBR1gv0H5%yWUbg&8Vs&gY zgD92yl~B35mcmD1@mGZ!N&0##eS4B5GR6^ntRx3ku`(7q{Y6;p5hD%}5wIug=B3br zFf#eyj2iINX&D5ye~UxYkl)npH$*T$-f|8x*sJDd&{x`0iG~m`eshpX;*M)w-r|+M zF-0u#U^hW~hv=6rm2cb2e}A~h>vFakOweU{>|&Ed5=K%k0N z<3O!2Gfz})W4t+aNALT}4xP^w^L1)1!_D^COzN~yU;2YPk74%Zn+d{Lp z6gDC>iBhJY{)K!+mB+mz`S{8KKn>df|6RtPwYR?Q$3n0uzD-Hr85G8Xe!N4~fHt`FQ~6xywj`oL)EJk_z}r zjW4+8nq;xPN%5qQ>6^P2l8%%(+`P^wmq}BZA#`qM^d;fmh>C78=SAV4ymSSu{yN?w?GoMt-c}$!FD_UVgmu@P z9LN3Gn1}DR%@TJxi8HY)P9e*vze}Vutyy)-J$hJ6WSx{uA0y_`!i{ItSQUBhA;SRT ze8(nxt2uO)K3`Pp_YEfI(UlAlgp^&W0^by_@c8Kb%=vxWzMzD}FYF-5GR`SgwqwjW zS+)OL$G)K&O#7T|x~DL^ne*!Z_wNv@kdW6YTuRm#Rco@WBpnYC0R&cJKYKQCosXIu z|DF*=N+#10Or83Wx%7(+fAy$#em5R*H+`{{>(O(Yg4RA$7^}Bc9fY_Rt%?xJaS%E- zCufKkE$Ie5bJWMDcfz|ejR+EX>lmcj@a8Q$X=y|1TaFw$@8+Nm*bEsBCDTrQPgapE zj&~f*FnJ2mzwc6#^58FD91iCYk&Ig69N<~%`MHlX z4Z0_Ir*CWNJI7VaZw-ujofS$VjC4Z`_-V7sNWcO6UEUEk1nc9jrl#N->qq+#JL`O&BYNr> z?QJDIMKdEihhRfUbQ+9*W%aRCiN@I~%v2_5UXQ*Go5|&x`X5c!zY%ptPDaiuO^{R- z001~(cH~UIkJnI()~t94exS}I-ZK~dw8Ws23od!BymDJLgP?_$xtBR3CM_^vX^TW!UI0G4*=Bl%osy5X~yh;Tl^nCB5I|AU~G$1H;2-RMPs4J0yS_tq-mIs|lipsdW%X`f>N-1%#V4NUkae1HamUWuo4Px1#T@9-mLk`l z!if7IO|K+TrC3I#78r<~A@I)x++yuvi0|Q7PJfgR(qof~_8whYUUpY~4|tLX>p8Yt z#1QTIW$Pn{Pqo>92<`1@bPRegK=?jI66qLEvP_QccQx7;L~%vh$$qtnI{D5bBAl0VOC%(5-w%JZWWe_R12{8vW3u`i#kE%h zM{PcxcR1axCviL_(q(@p^6hhHpi;`bSaosRWKzv!=pr@&23oAnIM_^Bev2rx;hN(h zcgIBMw`Z0>QxpIGyz#VuCio;k6U=^|Z9O$GKNk0!=XPis`y9z*#g4bb(caA;o}lCBJca9{*c=$2_xUy#vkUjf=pvVDemR>+gE|<>qLM zK!a&Qc%k)k_TJ-i(~C>O_230El*ey*GUglOZz4pJl`e<2D@y*|BaRS@PCk!Msho_r zRvwJqsoMM~P~!QrJ7U@xej^;Q+MJXUIGByuTJwWu8!B(pPhOkxe-?X!G=d}UTT3T# z+*vX|h8dX;8FFM7AL@;y3AVgq?t{MgrwgjMjnSHsgw<&-v8Z^_|NhhCH2m^Cs0KE} zCMFMpFY=syc3SSgMI?G`lmO>sm+Zq8BJwzUSCl5cw6uxX7n-=kcd6GWEwr}jN6#{; zLf=>zQXawmk_lwcQdG&{@S7Fk<0a7_u!oockrAtKrS#-=iKEPd!;pckT$j#|iDF&n zjcps^Ydt;9R&+V!?*9U`lAX4CdHiTS_wR6VrxeOiu4p9qM8&c8sFXeiI7ZnAmL;?r zqcM*bIjXP%8V5^{n0nzp~ZX3AQgfi8X);3hiFTSYyhKNmn8Hx?B%neVzec~+d4bt@g`kR zM;dGH5F%b=kxF2Idv26E*zZ=!a{J|6s=g4hK*|cqKMWqidgqjow$=k^(_ri2|G`23 zcU%J&={B*mNE!f%1zgUr-uIA!*P1uNXQT5O2VqMC@iYm83qh;Th+#HrwM3s^Isb97 zJ9nbBk>9~1wApq{thLCOgen6l$aAwb07pQ5=val#Cq4~t=roa#)`{t`n|TZ)ZPbV2 z0Keic^=hm^zD}OpqXJ>Kft+Ra1yKt3V(r{?o;5w?kIxjo7Z9YIa28JdGDPN#Q~o*< zsvh^zoKGz>;R@$VU{m}vxi>W0`U+S;sI^C`8x{m<_x7klBURLDO&5>iKnb*Cj3lU%qAf%1x-fg<|0CypsD-f6D_o>d53P?KVB#Q zoE+cgeq?&6p1hrl3%feJjqV>*p$KR?m?``R*l&Kh0lj-3nk=0p+;j|5T1zr*d4obP z)OULrxL#-i<9?-^j_D(DX7~~PUSKU~3nuzFMVOR1$cAbfhthV!WcHSIxoY_BXF|=q zpK{}~dc}N0wXvL5RZj)H%=V5(TnuuDd|I5MNz3J*M^t3aAOZCR{?jI(a1CY|vDVss zVGz%j-V{y1*$j*lMKyRddy$&q4k}zJ0BR{m3cB;WsC=eInyfQZxe1lqxgRC}LL`4a z3Py7vd%%35diyXISphTP&eT~u6gDa9fwOMWL~F1J`5E;>022pES!759Uw%?m_0qRD z$j=jFNaB#Od|a?NeG=d4!6^CqKo*2K4w33{tIv2N3m{Ha_Di?Z#cuoZzg1&^mROc? z^)l24>1_TuYX6rT)wNbMOIBfv8k%*2t#9z-^QA$=Gy32UMnmC`j1T$UoEF`eSJ9+v z4fq-G%3Y`gb-r7uCmZSv`jDFn;E0rTZOY?zZH zE#`W%1?Sh6Y=$e(UmTH_Ud~J3=o>n@60EXWxs>=du?HiQ`jC^cz^&~YBQE8veT{cU zSXS(cVh*7qZw8MD7b5uC+}V@g9fM}cSJ-?rPGU13`NxRPf(3_%5fuRNyr{t*6ReUq zOB;LigO~9iYz7L0lfypZ!uVkx`|7N8{c-Q@5S*SS#IC~KD~bu!x+N7gd8u!W6`UR$ zq;ZxK&)SA52E_bLW3731TnE`7Q#KqWlaq><(UnK7vINMZTthp2SK#SA*QKE}*Nvw< z1&0UEBy|&mz023yOgfj{FwXVX)yDk$0lYb%fR{RWfG&5n@kJ)0{heGc@s)8t_!CXD zk#-UX>LXH=SZgZk$d$CI z9Kf`7k1>D#tBWbe21QAbkg3q~zz^BQt&d%Pd%wjk6Byiq>QvE%F9Hq8-E7=`X}h6< zhUqtq>J0WH>B+Qj z=U5|t#7|eD^oc2(3rGRI=Q>8=>x4S{;lye|>uc=-)pGovArr$cwF5}%TIJGS+f)S6 zpI%VknSW^1PI;^iDG=HWeTaO_Z$d`c!rW~1aU{2}Fe)e!d;v>-@bNAulB<(dIBGMA z89-y+u`lg|`R~;eBI*0w8r{fcXtky0yj*iBfBcHTL5r>r6J6qM~)e|&$xoX!9D)m6@lXQKNSzSnN*^?hsve3OAb z)=@V$u)nj-qfzL;}rT% zoh7RcKR_Er5{^v;D$P=)ee-d=@QMSM?i!flkO!{DOtBHGEqOUB{1niIHCMCZw5>^r za2)78S;f)^VN^wWv5Og)GlV5pl|`PcT#MX743f} z{zNMrh7CL*znd}m;=m{LWKR6Sw7i`ask(6#j%~KR?pc^qsJfHR)HWAZ(F_!C5cQUE zn`s@)`~?NjV>-A7@bgJ7AaviJwTpbQv<4*%hJJp>3ULeKJi84P15ST2Y&rS}DoQ{D zF(Pjl#`I+&&0FtG^63>%N;XShUr;OC$mGJ$6SXGPy8IZ5 zeiQiP+x0JZyf9ijhbF|r{C}H)LVIMdt=*Y8Ph>x}`&gIPde86ZsO@%$>R2BBsVVKq$CjaO$>YknD zu{CFzs3u_F50 zC#3dz^Ru-rGM!ke5M?Hb`^1|>v-Qk(U42H@CW(40SZ=vxL+)!oMT9`Jz7&B*^$sl@ zYAx_lNpQq8K0z_Z7uB?<79Aljim2Vsb3+|6=!=V;Bb}E*qLzFp2N`K9Vt%r?D42e> zNQ-CB*cI&Qfik*{#j{qOPgnhYpDVh=BVqL9aa~zEeM>~x{Ls?JvBmN9gZE^6zD-rt zc*$o-c^?=2a&yOd=Qn@OPPiv8fa4)t5Tlsl851)OT#GrLKc8x;Wh z&GA6lx+i|d(nBe`As{$hPLz~r`rw`oA#tJ3KiZVjyHvVFBeYLJ!`tY9_>*wbT@oQ-l)y~EHPPLzza;3uo-VJsZN#|2n zd-G>I*&;dbvJa*i)Jyi#JB&+%OKJJyzP?E*>`YcT0B?UMse9q}BId$9Py9`#3|Z4q zw&5CIO$2I0**l8`AMeNgKdH~$gPjG^=Nqcgz<5@q_uToQ)e@Ysf1sIINVIWMDXghn zy~}NPyMX*Dr8_r7*3oXZZsQ@4bcWp;wm&(ze|N3>KJ$A5+yZ_L=fDa^>y7QMfXJ+s9rZJ9JOT5th0>02hu<#{M(RTp7{%C>x z-D+_N^lpSBypRt!l)*EJq+%3q5w2y*mJZQVajUZ7%|#$fOjKOOS?b8BcRtsoV~!7*{M}Vb)P) zd%Za{pX!nSQoh`)9JRa8H4v&s_ZYl_v=&tIq2hd$quKt3iHnHltsn@v{|iT)g|xI1 zs0zDJ+Fl!5tOfYDPs=IFxdr}-N23hHT<>EVu!=3363O;t{fgsNiuoYd=x`yBY8`q5u^2rK&Wj(vn$sZ2q`N=tL$R>O=JP-)2-cJA6H2H|f)7L`0-T2`- zKMFycz**0Dv!e6vbzS~4<`TuNCZDY&LjxE2bwd;6C2`UJ01QVInI)Ot4&`uHvW9R+ zv$DluUO#jO?RRAXRV$aZ2REbINclm^3TsK2WvVv-r}MGnx^VS>a*4S!FDW|tZFaN* zA=haD5n6XmXnQ@A7FH1LT9SQlcJylE2a!B^`g~9`fdZZ0aBQi$In6GUwBS2kM|9yh z$dNPU=wl_)RJ%>YjmY#Dl`DVpFKI6~3-|g7#TYHA4xiYuUR9QhwQnmi78!G(N+B7+ zm1jMfM6g^23dPi&>~|V5C=3BiPLSP06C_YzblcmybXE*jf!0Z6^)7hY^U{%Rh0(+W zVYlO`fcVAtjn%b_SN=jsHRx>0axW#TYK4*%R9E;~`yU|QPev!snXpC(^Sx!OPyxtx zxcB%dP=(ie=M^CvR=MtzzH!~tsYjhxxe|av8#lqA^LV!ZhjjV|eo3PB8WEt;`T;^w zPQtxGManvScDh!fhqH-8_O=3a>IcE=Tjq%Nv*R&atzXJ|ZPLa4;!@?^;^ zVJ)1QvIkt(qaP8dQy)&wY2rcaGBC1?pVi6rq zUtI+Rj>%-x_&h98o;y!aNJZYHLozsHF8~^mdY<79fxH<|7<5x=?DA>04sfX|I`ZLT z;F!HeY?nci&;8)XJ@V>2=tr~Z@)J14_hSx^GJYH-0UGzQFO`|Tno)hA$~J8y6+&rx zfnUk^H?2fKOc6#itevO=WztuCvP4$EUdq6Fi*e4>*7%CERP2MS~{nG zvB2w!rYfD{fB*iDaJS|kAHQXlG%xQ8bzOzNYi53M{tqzhn^voiRp#gt(g=K)&tG|W zcRNDZR4*JT{?`q~B=blIMbh4-e=Y!NkhI#9uldic zHX}bQ;~yx3`|AC%$42@CQ ze>h;%0idH)8QcgPu|t^YZhxpHRMJ-V3mHC)Bgl+jS;i@V-hrf4PBG ziH#DVlPFv%u748BxQV$!i=%s1609xa#$ zhFz(#lqhGvv`1|un);h^&4@;`{?t@Q2)1|_EK+FE;0fwX)Kcwsng(m5Jo)XQY`x2t zAO^3tH^v+%a(0GwWtg+8H!yXv)#s!bnc7}iZqr*r&W3o_zCPVz)$^GR3k={O~bEw3~ zp&DW5HoVqJR3v*OJ_^|KtFa}9Sc|w0CzPi1P6U(lqXT#Tw939gD09J|(s2_>P&&u2 z6mq^qMN5{FT?gH^2r!dn5n5peN;-5nB}|9)1Zd8ME{}{9uzKp~K0vX7!t1t?#z}p? zps(YgR_64|5llVU59Y^sDrt@@A6Cnp4MlYdMQkHtdX7>9h5?YP4l1h)BW2QY8Fd+%Sw;# zo~e|Lk9zYd7B7~p$RKaWg~%??{O51qRvU=&xS8=Dw)8A@hrClcn@4a%?*X1;U~ALj ztmKj(gyTjRH%wG@>Jf2md_Y7o`2sG{Dp+oGz=y$ueZ#K`zvU*v1zNKit$DwYg(~X_ z-)i_<96K%ktS#V9yK#KtPIp+Fx?-7nY=7%d6?1s8gH34W2j%SQoiE6LF1sO4{H?1{ z`sMU!b57C3^UR=`?}>IzF+D(E52exbKY(xa3`O7ivsMlP8B~_ls(dzCRuFk4 z20#TfU<4zX8eaMp3PHI!T*(MF2j{@|^<0a3YeNMbH~j&7K@oDz+9aAMo^C*^hu*Eg zezO&BLYeyXlmK1KZEJY={x932YSytq1c<6YTakf<2ec`=M%82g79wInesm$M?Y?}u zv=|}Jn7I3cM$2dB-`>-LSm`F{p0>&*E6Af}P&lwpm8}@f^THk7;2?gzuqzg=_-j8=Jjzeisu^tgXl;)}pB`x) z{5yfXCZV21iDE2Q_-%b(Rsh-WydM{8&4nkXr!UBukV^Wc7O$;J4K>UtcI&3Pa4$RW zdlLI9qc1Ul%U=J?9(Y^ASK3Dj?aX+0K)j`9OW6q2Uf_*gTi58U#JazMv^b{oDhGRO zdpcKto2eV#ZebG;3%OW^pBg4R5QC|I_>So+k>$PTG%KS9hEV(_4RUj_3i`RXI(cdE z{ixZs$3-1Y#>#hWa}S-b$av!v^my?sIr^FrFaA+Ap+c`xq0XM?AW-fFR&%hv&14Gu z79q>y_lIlF>evWa!D95K>3HekAmpomZ+A<2J!{ABho-rGP?A3gR>;Q6+r}L5XlEhd z3w_CaqTx?G;11*$9C?J<2r{@4ra5lNFuCbbeMgWiDV72~9Lb)qjP1N_=4}kK=kKPr z&et67f9($xZqh7@9-BD<%{XyEJ{2Z?m(HE63zXuAPRyTLy&ENO1)oZ#93f%(Ne^

{hdI?Tz_{;eEm{kX9*vN)bXQRpQFsp13bzN2q{@sL1O1mIXhfsbW@!V(!am?CUii z1q}6uQ&6Heo3u9A{B=Vvd4%)9A>*C2{i>>nEGmVUky;(X_z$_EOb3A!id2)I@2=b- zgp0W$Vp+Sj{%7rgOf93Z9?6!)=tuCHBELalfTN^T(3+Xfn1!`8*ykc;d<)9>0O$~0Cz8kAi?N!m$XB)Y zpKb@o^s|sN&1zy#5(U3qp)NkwJegvQDH(x@!HvgZ&AdH{wvUNfTw@B)ZC(JQ!g*>& z3b|eqLD1E4C<1v}H>G3u>g`$d{s#uuDzemyUx)jVyC5w{@IsmoT%ismk;iDH@I&o= z7$8ZYf50Rt`PI-Tz?I$fm4qu(hC;<{Q z>u+aqiw|{=g!5E-wOb_KcLO9JAwMOB9o>MFKOk;ZS3$0vo0N&ZdlEvrdT`t;yX8IF zi1|Mlt&ems_$>0WCQ=*73x7zTv;iFXVaJ=iHxS^mVIo`yFYznnHo_5B7m0q417{9k z<{+o)RB|RhOEt}Jfv>Q20S6R0KYt%4t}K390tj29Joqfi#Xef(a&(kyN!*c{Y*EM+ zZQoGVspKH_Uf&F$5QbTgucz?FXOCu_?6~#{s{M3PzK9ETj-|x{41cMB+Xmeb^^s4H z{{xVoh2bC8^xz1;rA+@bMG_vtj@nKEn3spO|F(GpzGK}?fm$TaJ&bPwy2DWy+lOw{ zn`)5GAjH6tyHdj$pggx^o$ucP5-k0He@hdIYih3S1u8Nb0m@ztS*c~8T;P@W*k;m3 zsCN(ovUAzRWFb=v-FAdP&ZWN=YR1OnR%2Ijf{>CUW=;7A;o8j_jCIWji zqs^yn(HpHuW3HslRF!`#-J(I9fpv{!G2b7?YG(DHE)IltP^Cz{_1ymmD{0Cxsvn_& zj9IU}!S+=UyN_o^Ye&JtdD7M?3l37%rlHT9{=1=xw^<~XSc(`Dh_LlQfbMM?Jh2%( z=Y9$948EwvDGI~|e#KYH3@vw*Yg6+Mj&^(6RTL<@QDW$^W1~p=R-gK64}(*JkfBE- zPauM5tJHpKO48*gQ(U9uJwmHy*=E7>RBH`JbgbG317`~wT5&^d750?#b^grrCC3rw zQCf3!BF&3Q_j|J8a2sztZx?#`RR$8tvnHA>;O}3Ekb-H6-_dRa^r+GFT`$Bz`~x)m zt62BsFJqF6JU!eH@`T>zc?5rz9lw2#)w~(pUpI2o$GM-#Z5h?Gt?Nm&vOoX9RARg^ zMc4`SY4J0J=_FonjOjHWeVmp>o`7hkSK+y2-5a%UU8^t&H%OTwPt}b{a&~QvEiR4U zj#B3OZvLC#@>OA*%3JSN*k*0N^VLYu-n@nHv~9+l*A1A12UD(>EW>w#!>itSu)$%p z3gi<$qVgsSiHp8{TC)GO9ge-WCQ{Y)TR{(svn%k1zPJ6>S~$p;l{DMLz)qECy-Ts$ z&jnT!vajwk>Dc>x)3NSKG|t`U^GP{A|GG9yfSPd`$X3C+V%rWA$Pm&nsw*%5Cu~=- zr+cC-a$jiUCYPMnz=YbPdJ$fCvT3KDlBLe449AoYBe=q~(!Q3+ zEm|UItJM4u&u+IWxt264{cSN`m(UOMBf*$jc|vDV{H9q`}3? zVHT$Cb9wRdCJV(5_Q^>&&*z;@bX|HoTfN2@ixyYm`%m)~xqxd!J|*lseT7*`F3L zD0_DH@1wBlw-uIgR5jY}@nkA(g%o1B+)l%JVqa;*AzXdyH*gq}UxsRPSZ)m1QcpO1 z7$?$)g!AaN9Tvfq3kzwEvdu30jaPyEps;Q69{}~3Rt1)}f@@>7^AvmQN857DmF<)E ziR%c1*bN>AI@vf^a;KKXc)1!=K4#SVY5XMYL8k{XQR*)aP$D@grvi!@<=0*P`I+Bd zDo+A-OWDYCUesGVvvB8K0B$>Gj`&L|g&m8mJ?Yj;^NAsx(LcH!8!S;X)I(5pP@> z5F<`UD5J~Pm9xLcC?m!@Q%P!tv)J=L6ur~iGsMh2b(`+g9LS0~#aUPFNFO*Bx^)V+ zu?V-?x!8N>8&Eh=U;~r|lU>Ht7vzlFFvmc+PMYm#^XIDNe6;HkSA>6n3@%&;DD+Wr ziT?-ag<7!knjf&u_D9i|U#a##%KD4Fs*b*z3uYuD5VcIPTaOPb^yc~to4KAQmFv~c zWUNH9R+0Z8ah9Mw+z$(w=_(%AG5o4a$-bfPX{7kUP1(KHUXTCG$PG%;S;q}1srEA# z8UkbS6_q({4v>6SB61g9-|Fje<<9z;Os9suva%Di%x^{D(s$M7w^v3&C*HHZcoj;9 zgTA19BBt%i@@|QDE2aN)CxyaEnamy}z+)Lg5ScZvePQpH5{gAm@S_+x|6y>S?S1NV zvd@Rw{jkXQ>&o%R@qZ)&NEAwxZ zfw552?18$+Gs9Fv)8SIv(Mykrwka0D-R55&s_w4ILPzQmGy~m`vbK6$QRc$X?1?S# z+5tNrj!X=P4lyLkijYvO26~ zxBe~z)Pyz8wD0?L+bdi~jdfyl2ZfPEoo|={3_XH@*T|=b4z(k``;kozTK%7WLy4YS zg_cdes)Ru^8!ti`9F99c7NACbCW3Ej!kG8RiOgHVR0vv5gKKq;oWK!;joT*9HO)%| zN=H=@1ron7W3P%E!S`tg$-B8h z7elJzxuTsk(Q#0X;zQ6Afe#wA0rAK*bQ~n3$CUk#OvviMqu5eI;-lPWW|P?a0itI( zaJIUIQ^2((%hBH*9tHSd5Us$*o^4e3@Dvs{L&4^4<#_qd*&OT@j#mv#ZrK`@+(=>w znRD;A?j zdAVLo=j3KA34M|-)aou9zknpFT$N!Ll=$wwi!B#hrsX^CWW1_kovfdL`+s$oHtt=9(|VR1E#nQ? zU44JG{=Gd$i93BhJbvF3eDnFw)A&;ju$GM#?VDlj>Vp5?lWGeosJhzV3RiL_gr=7x zq@^lJ!wwmd%EY*^f(^@-;)5Rp#qPt1iAu{jdHoFHOM^F$SkB;nZ`LM z2X0DhR#CYQ(Qi*eC64$LjU+<7DuxSHXOL2K!R(WFa6k2XW2W3-1~mN1pNc@0dM5&6 zr%wS5JD@IndD>d}RzH+oIEgrm`^i@RA^x+R8KcpryhbD=K(M3RqClKG03LK~&XzG_8N|{JM~vvY0mM^5PR}h~ne&7^O9k$>1r9qx?q)cC5%(Q4+X~+ntx=|SB_z^Y*upk|9MYSG z|19~VkcqJi#m`8Z*2i@Mh)KaO95D?SLesXTsk4Y1!g*or*R0ySGiw%T85wmWhmWa3-${9-*#E%~?2N|v@w-@8He9T+eeg1!hhFB@t6 z#g5h&qwNFe$y9dLY#<#c79S{Iq7CENBCuq-s5Zh!hT7+e%1PYq-B501_lGt*)F*_Q zWnx>5A||tmB%tHFP|@qfaa`QvL|?TyI=s>Vd&Z#PM`SPt#Ff>bZhB=*-1#x3I|HzybRc_ut9&V4=u8YAiBsjGalnz8@W?2((w79(Z8_~A(?-B>rb z`eIw+)!_{O!zj8oj+i`&U?e@+SHr5U%rgjPYYogX>}dxbq0pO#Nqrmdy88WPZuuS- z=uuUyXp41@+RT2Tr$n*alWQJ3ID3jq2L=J(H~PB){Tdd}Ci zhyTnE?1idyghwugJB)e%?O-7N_94qyc^<1xSpy3hY=KXF^BIHbjXVC%12o-55AR3p z=+uu-VvGCkMf}Y-*RNliPz=;Ws@ym3M+FH(GFIq`zZ&=YKu|1&#=jZ3gtAFHP-2Ij z>Bep6-}AT)a9xdqY92?v7Ba=%36M>`5j+~4Lv0cv28FH6S~*%3H6cQ4RZ>>SqzbuR!!t9 zPBZ!W?PQ3}<18OA>#*ze2B>SGZ^l1Mw5Zi3>0<+xPPY`_43{!5_$ z(-PW%^|=T9k~!HLj}^KAzUG3^ z$agmVCHqA}xn zza*h8Yl6S@_H&oaT-Sq>6g13}@@L&*`^rQWK69XWkejBU%6VD*40nzOvWC&4A+F`cF01bcLafTq62>79KHwm1F#CslsvDeiD z&gG;_^O^>C5XyT!p^-W8BM{97p zh_*-cP2d$?4*lqzpfSxXNHlzmk&8D9AuhF8Dx9x7`JoEH(Y!s}fIj$IJbvStdcnNfHjsbV~!1tQx@tx-TS0DpW# zLYTRJ>TTZXqMFoO+C4q=NanFHh~MH;juRqFMf>=-woQ5Nkwj>EZ zWmqRqJ)799}a>A~ftg$1kN z?0LeO^aZni$8?^#+5n zG$8?4s!}XIDXl}lc2YNdfu9GSB~;Ic(EY9eXj{}@decKg&7G7#`P?g001k0+1cwo2a5(`{iJ z%<(gS@oyT{UtH8x1=m@a?pEPnsrH;o)`cF}jLw^{@CE}uEI{zfGn5!Z4NO%pVo+2L zV?uCD3WU)Do6^77vsqMQE>q#W-BweyCYn^R>R#1*rp@#|g5}I5ti>^2nh<2x6anD* zUtZ=A-Y;JE3xY0vh?U*FVLI}#(Y6-q(&C>>c>svxh;THTGI+z^{c)$1FD0&`*}x)tmC-lL%3q<$Qe?FM0rW!=o_0z0io&WF za_X-_OsGu4ibVL6=$iXiPx}9dEq}83kijctPV=DB`G2c$VjgE)YA=F@SYwK3&JM#~ z`?&lNQIZFocCUwI@D9CnGOuDBEY^sNHd^mTM#H+ef97h~_gxGBg|maP?<~92{^dM~ z#0p^cHS9yKaIOp>h>mScKKc3U4FK`AT#31yq{zPlM^h7n3do)CnfD42MN6efcoUV) zKg(H5ci!o-31!X6QQAjx<5Zk|n}l1TIL4ZPVz~f7@&aowZm(0ab3SYQ1bK2rX;Z)q z{@52x7Hzp?`-?xUjP9AUInQ?dDJNykPHU&{XxU?p?VOG zI!tu&Wuv(fb_!)y_qIfY^*_3ye7UH3LB;Z;R_;8}(G5M_!8xJ5-vKyFFquKk_QT)l zHm443n@|~^NRjpg`R|9{Ug2#iUzs9^$c2f_&@b$*31L?`*LgFcJ{Y`6fFx2L-E=fL z=c;?#kxga*xXs{=ZB-w0RN*f)w=JJ;(HaCw+541D=mKw;PZ6Hm9PYLO9(F{8>NNau z&8PgUxgtH+lC++oKYzT@gGVTYexxYx2$^EUQ)TJ|q>M%won1bijQF5e$QKC(cUEf) zM}qg(69CrKTa(n|XY)u1IW<-3rL(@~2yOGLjh}r}Ke!rH6j)RMG8S}xmTuLw_=9y! zr>ZV$iyU3QO~XsqMa81%$}&h|*9!nqPT}JXXc^mvO%f@m{58K;yatAC0ZpHb*c<5e z6DNk}0fbU#6&s1t!^r0Y*ws}}h1)cF zgt-^-KtOFd_1N;{?Wk1d3g&PUpk7k(Kft^0x_4tFL(OT2`=O*}<+8cdoI<*DFV}k7 zF{K6%&ae@(?2i8cBrtzdgdV3L7l`U%ye-{BMnBF9Sm(gR?XPe3iu(3=0)q3nP~oS( zL;*KEm89;{j{yU?bE&`BeZO`*pm z=!PK$&Y3IAUB80>W$>UD>MORwuTG&r8o?j!EtDQ?Ozd9?f%L!@v{X?8G=r7D;(Ziv z)4A+qsjkZ7L;A}4I?7kIna2GEn8{G7Y3fZWn4{U}=al_3pHNW~T-{A`Up!el3G{FJ z;}1U98@Xe#5!zQ!)FR$CJsWA=o|toIr*Z4(F?}&_8gQG`<`A z>CMO$p*IilGjDR7MRgiCMy*5XRZ^h{!ti(uwJx(;<$&(72~L)H%U$h_Gi{C~qnJ05 zatyY+2azBymJPH}GCf12m!Nig&bde7R0vn{=KoGQQeBatEC&hX1W#0cujL~LjD%FS zuyV3e)WtIV>9PIkD4Ie+s&gb|iV8iJScCt^)myd2(RN+5jk^X24#5MByF+jY?!lel zPU8&(cP9|s-QC@TyIXK;T%X;0@1yto19ecFy#Pag-29|D)MMH%GxSyytQ3xHS=iTFE2OK>@DWbKwA%D*+|8bI zzOu61{SDP67#c7$@TUU5o%k`x!<4pCt%U$Q7B{h=!i;ru-9Qi@rH|PMw7Igj!?LQx6 z*M@@U2fk_!!UKs2lPz{#LDG|`tAbLNvDK%{fkn-F#I@M+TAyNc3g?xC)a@^TuKgp< zknx|&3T3D>sy|CGzcxRla$cFxZq}wXDh$78KCEHcH3T<}D$}nL5W32!#5D-d5yEs4 zam6%YjIN|{iVk+heGY6@2JCLgE7AW6AHl`LN&k}wY_1B2Gl;_1lDAMfUtozLWrc#Z_4$Ph#w_`2Ea2(lO_OmCQg?dQ$+N8f%${w+46c*D`R?W~vu14rxCiSq#$&L6mKX~y;8;fV_7EOws zp_@|mrLV<^hiG0(xE3-rCJifG}~Y5 z@I23&*Po~NcA|e9e!Lp?AHv z{{Yt_r0mfaj)Rwa1~w^8l27XjZr62Tq{Z2sIcelR6bzblcQ)Pnu}_~QQR8(&H*h9M zMne4x7*^BiV)O|#eqrm59<{LXUmZe+NoBQ(o?tG3UO@~>B>`xh(sCuHSyXQiUnP+V z%2Z~#T0RSYu&`hcmb9~u9K^E{(*ZQf9s5fF(0gGHgikjforwYQfW8pWqcpmvT>+b_ ze)y0)Oa6#|CIq_V=;vmXJ33s(S6OS~nEw#3)zGAhsosq+seOSA0`*Vq&XorMHrEzt z1E42~fOBMr=ax0d(~zuA3_`f$kVH05D~-(`(YRww@!gYJLFiWL@FwGiaLh#Wa_#PZ zkgMzD5qe71$ZVHA^;C*+eS)$14={f_LU>@w`b7;(>z@dpKm-^2PdQ|}Abvmls1O&} zQ#`MWE!o3o6;ZY&IK4^4#3$Xf4)`mEC^~BX=17OQ&pah(x zE=X6MwF!JCD)Q}ZpFA}3gGXWkdFjOdVsH}TnGRWh-a+`QUzw3+v*I{G;MQP=kTqVq zV*hsIS&z8$SVu`Fcn0_QasozL{Zu3cyXe`o4D?*xwIE=QSg%HoBoxRRCu^bW$!6)3 zk9y2Zi}?d+u`FNbxXXun+YG0YQ!ox&^Fj-Na~REtq1F1$oigSfYqKQUtf2th{7;2o z8HKGnIAEbo6;J@~(#(eKkp}RcZ31SX+_R$5n^dd)h#JOFSXyQ!cQYC8@+dP|ePg$2XNGyGjQ%n%d zAfsvj6M*!hr0bxeqENRZe*La=d{JKhY$l#hpirdeD^!PG+1sJYJQ?%Yi(KB0A*DTB zd36|RO>b4Z*7CV^U^+DcLwd(>xCX|qx;<|q@4Xhb&zgWL_>SsoH!9g!w=M8*+(kH% zcJy5Eg!j8MS#&v9=o82olX1K~rAIjyQ89*v2gwn#a{{{v3%j~P|&PgR?%Q6a#!ZdJpY$(jk!P&gI>qX&TiDK$u+};nL(@E0fbSIp% z>$Ksfv$2cT zk-WJg5A=!R66`j_QR2I~(oRY>FEf2uzcLqi~f3dkC*;DBX zX^~PgM-#VO;+SF zGPbRjZy1LhP9iQBPhHJ;IXJ=WP9rzg)6;dZ$QpJSvFMM)QaErlG$$JJd#OW^rbh@* zEAcL)u^sdRLiR8du;-egi}I73Uh)dCmCMD*lY(^DpNm1rdlB!qaego8_l1}C9_AdU z&|g(g3=2K~^w|fQCXt0}Z8u6DAJ(nv08q!9a~8RgVeK(D#$&xquj&6Z*(k4XKGSa# zc%X}O`vWdM(t)ez=itG4b~PM%O`BP$sFrpCe`Q3Lr{m>%sNiQIOajn_+SN!D{ksnF z_`e1gT>hiXde}sce_enN9h>h994x6RztF|qXKgBWfSY&y4UMZVwZq7X>ecqVRbtS* zez(76n@R&%5MKaCQYKxvYCGo)Ok_F1c^1MoJYoUo4JA(e7|NkeH~&7IUooxv#rr>i zVJqHTp8&YFLKxrlSM3dbj!_Dh5X0Jep?UR#uXtByw+`%TWxEuo3{zq+Mi_h3?af)7 z>l>!j#7*UBHZP`2F-?*tZ7N27g~t~(jN#FBaDp|@HvsiBAa4>=&Fj3cDmZFy9%d~@8Lda3(H;riD9R40^iI60y zk#O~EQ}Q*2C)cahxHMwDF;5q3Ee&aR#Qez|z7kCZ5mqpB>u(PzL!(beEt}I&9OCvX z^XVeUDt$(j)&6AM7q<1O$s4p({Y<7G*N+}NW%10 zmS)6)X+^UWTeI`8W+vy&Kgo?6jz0D|mX(k^l-b|v9s^AM3YB;V_QpNDQgdkGf2L=A zL$)kO99vLO73%an49L2L$Uf7f;syFnrJOa3q`k2}%!fHK+a* zoPy9&D|OBQ>3V9vFZt2vzLhTx}Mj#PoMozG#i_<4!ZQ@3fozQ@uL;qM~Gww__B35 zR#%s>hsdI+>ra@s{z+xr(d_a7nAtlFy1e#he3VoCaRzC|Cf*Nlg}ld8EEV;*-QZR4 z_y*z;6{@bX>;&%qIo~16DKV;geSgW~A1=h(gJk>_;}VZh*VK;~@`3+cBrFOf9AdkK zXsiuWHC7=NXv_G#5Z?I!+MLA_q-*TE+xMjP84>J9s3$hx+l>cLV0^tXTYK0DFZl^H z>rQ%3!1(%g${q26#bX`oq|fGxDXKaUq?FyYxW+;ix2281m+enfL%6YHvjgYa4cya3 zA1ibJ)K{^dR?U}-^keBA=J5X2Bsp{?d{ewZVbptX7oS=60eS@ZbT|yPyKL`9 z$rzxN=R0BqqR!PJhDo%J!5+O9=E1LA~44xt#!|Db}${d`DX{awbk$`s%z-s^Odky%(Q*2(( zQu<^XS(v?tXbN0kdKc;W4pnT#eCdTy&I~3yDMgYCC=I%8pQQp2z&e-ZGHjk7f9l3} zu-CJ*FMC}Fh>1!GR7U(CnQt8aQj_P+7@n9zpNW9(R31#&n_v#9oZ9mCg3=(&^uT9X zS!0|)5;VdSRjy+zd--n;O-FCn0L=Ufd7p7)Mz1@eI%yB7#VZ&OTa0iGi9%Vzo-djc z{sE*~va8ayxpi9=Y5ms#0`W@nhwNXDQN^KRn-{|qO{5yq3M@B?EbmBXu_j{cvv)}I z554flVW}erj@Zc_79=_~HTtS>DL7!AWka3(;HEWLyZZv4Xn;Gi6g%KT=FRYP?0n6Z zDS%9fwFJ_;kSR#KaYBD~Y!*LZ@a<`91b|J3)K$xaETXr!tL`xe;GK6qjPxkic(l9i z(tP&Ush6`YpGp2fcW#Wr%)rBo%Cqje-_25lLc9=akiO|F6bIq^XF~NOG`BtUf%Vx~ znFPFb%*7NC-}_k*qe+`NVu-*==P^0f!`rlu-a7B@Vm3-g#HgBv&Qu#YL(d_TA{M}s zSD|dFt-9heq$X7`;UrFa{$v_N{^Pe`aZOH@X&&+w<%*H;)E2-@AFL{14O$+6jMyx= zb8M1pgBeWa)nt4FfV6#KnZu+p7W+jj6+5TOLBfY$geVm$g#|x@N zvGrjL^ef((1v-5Wr%%R^R+N9h-1rdo7$o|F&`fe`Q_n`Di$A@M zJ9sI#e&+iveX2g=00GjA;GwyC|H+WcT?;VTF8J##>9R7aCNC-kaRU+F=JU-v+ZEh{ zYmARfPAWN8g5B77DM$Qxc_E~J5KS0iYovF7@bp`<{=e%q+m#!&N`}6_g^3DHoz%zV z!vSa@R{R|{XC>B$rX;SoeC%)V(QA1dxFboyqig3f!hea*y92^}0mNqih%%mk~WrL5}x=#)dR zsw7Zir)6F9m0c6*a;#y8_pR;}1c3kQF zAHa3T<-Q5!=e@?^tj_+(dp6SIdrufEYyEF%;OX*C06Q?UyB0wg-&67q{`TO<&W23P z4LKHpzduQG?9Y5TYURdS?QZiqGC)l471{BU&|{0?q~Nuj>+B87m*4#2BoDZw<}pFw zkczwt)9rsZtCt8DBMno1yF}4PW-RK=M3a<(V0p$=*cdL|RPeEMe~f8+k5a!#k@uwH zgK&N7y6e$y+ii*M8CR>j`4nc#_tR}g#Frm^aHdH5-qU+Bcu}#=a(tmuec)EvU`hNxj4=LmQ@2~C_8LM_ zeT=w>O$9Ts;_Hz2o6aUzH_B6>5U^U0Lb=YJtch3C)BrzaY#$8zcZEc&qq05tWcs(? zY%S^AGe)4$Xxp0P?;;a3E9K*=~2(26MeIk{wjOJqE zf0L73M0a@z)YgF;r7ZBLFmSS-6mJ(Nce!8~CGOyKi_(vdK_`@^1kssXma1VW-Qt$OP5^3YMoQ@BXWuuXaM zdMT2PC?;rS7dtRv|2x<0?yd)H%>}!ibN06sQ>v>PT4CSj|MSu#1&;odCr3FKwN=Ob ziEyM7i}_g#MYV1zxApNV{)IQn2vmH|>Z0whlU@rvKCLX>or>Bp{9+)^D}A~U8=!X; z)Vf$F4aiszsQHdom&kYz_NI!IQA_gN=mv^yGp%uZ&=f}7LZaW4OR-3!D zdJ`sg!7M~U81m8i!(%@_guP6)H0mCElUlK)p@RjEJsA$})vKfu*BRr8R2DeqwJV>D zlTnxZ`(D^uWPA9s1HF>l5us#RY%oDMx*wZIV8La4N?5?f;IGf_-KW( zvPwMr7OHrzv%U;#@f&=;9>3|(9vrrXD8`%1gLvg1j7uqCqm7_{i3HlFa-FyaNzBX{QP`M=o`hbaDHu(L%Ak-0(-D0YivzODTf}YG z88gXp>9Ef6-V|~X|IQo92~{m6{V6yd80t@@=nk0{H-sF3v#QHyt*y>>$6&=mU3VJ? zd!KEiP$o*ynXGW89s<>4CxAYNrmpZS_vBSGBK3?cOD+ktvRs4Cxk~8(p@iQAL0LbC zx}UpF?7=vi2q}^A(wEvP?}ot2kc)_>1K2HM>j8jLyiP)oa{17HjQr=hW&upIwB^_* z0BYixU8!RQtB?~*Ui^TJqPJHxH>r|+nultI{b(nV<~RHpw2RFK>1U`uj+#+h3)+`Ut>;&-@f(gWsK0ltvOGZWngU_g`dT6w~ph% zdq0%MvNY+aI8Qg=!c%h;EQ6LuofhZgxD9}pkW|^g1t1+oK1gh7-8R%3J`nzfq41!X z$(De%a94(vdO_`l^FuB16I;1_a%@S3Ctzf8XhR#y@+n^Yo71ne-MBu>$qH8~)^N&C7Ft|Eh{M(Y(tOE6P0Q3j9fPuPp!GCr( zaX1ep>?UY6g(9@go`lkkD&vW7p4;RK%#I+>vz8r5S!{RI*dAK8n`L|bmTuDw#dTcH5Vc(afP%*(h%x+%1rvSQ<0G9VQlTko>svi3x-sUhUb;S8SXnWwll^Rv9v~uE zjY!e#KYQI-eh{rD{wIw(C)R6@jn*0mGtHD$zNuEVqQYJhmN@e%=KPngYq}ECI`}ny2+@2I!qfv1gIP!>)35^^3OdL}e^t zaz{d1T2p)%`%}(BOa8hI{AsB?8n);Fl~SFoV~nuebvsPL3C4)ewktur0`Bmry%{m=CIfz{gX3Ua%`^aX5yNhO z5+N8oK8%_ef@Y)D!ovXV%PxOT$Z4QcA^oP9a@4i>boXQo05T3G+W(N->hufnwf}#? z|HYm`KFVKqeYuV-rh<_tPUqyCNHFFigXHkVApbbvlVgeyCg7Fw3ciqtPpb%UbosQr3 zs;RLLTS%2SP~($il9g+l;GeG}t&i|=L6)1Xp-Lt!y03hL?HwxiQV2ZaJSgNokx=5W z=gJlj2&gu4;o!*dyRyC0sh_Y=Je}{~K>Cpw3B3!e7D)l|l03Jw+*g+*WhQ?|rrzh8 zZ}@kIs4Mx&4{M5YO+ABO;k^f2~&df3MFktu8nU*1xkHEWjc1cEx)hbzOSx6LVQF$dJI0@I==5 z)BqhS_X_Nl7?i!N74Cd0!q{CoOd{b%6o!4XKDvVeFyo1WA(+kRi#|qk8ty9sM zt&6VRe}Eywbc$8H@rP*h)w>xIFKrqg5H0Kxtplcc?WE0w=#2t;q$ zZih|#@@%M^kQVi0?(YfLcJr-Wd<}m6;5u0Nx&mE~S%(eeEXU%+jA=D>to~@e0>v?Z zNc^vJ3;%nEyhK(6X^kr=IRcfBY(~*16Q<=TR0UTRWdQ?nB5`-t&1`ED4yU>nPWNX0 z#=mFUOE}XCpj?m<@on?h`7$n5wr(uf^|H6yLA?Rr+TKHU(--a@tC?q|U<75M0I$F8 zF61-c8H!;Y-HwoU8}`c*Kdi)&4L_xENXyZ6L2P{>Zy$ukwdQ2I=4cHkI&c|i-4ChP zq;EA)j-qCLyZhYfqo>Szum4B7Kyxn4V%4aCqiY>smV{9!zJ1JM(8gFVS|J8;>OXnI zZKG`D1P|qEo?IAYjpdo6V2%bjHZ^YWUpVh`xdT5Z@f73ozl|y7X*mX60oedJ*b>d# zZ?ZSS0+10ro#jRWyr{JY$Es_~Q4xbFE-BJv00ir77i#g~B2G`!#3{w}Zi=FZx?)r0 z5W@gJ1#uQG9p|%YzhIP4Rglr#LdPbojK_>!nW+FgINP<}w-=BpHD(!2OdE>vq8ka+ z>V(=Ut$CsM?Gn(GH(xT1Eh#=J5B{}sanw?CH*^4Ff1riBH&(trFpYNri+#Rzan-W4 z0w4m1uLfx9|hF~>01OVji-O9lBkF+06#-7UEuj!r|3 zUA>@KkqyhB~cfmg@OG3@61L0ul&UN5}I($WCEhoY;tO`GE7 zf-6uT*|Ma9y;6a9-7;xi!*F9~M}8Qm59(cikFAWB)cjTp-BnsD?a`_sqlstF$$d9~ zrp;~UAD0VG6XoX)6Wx3hlo<~}*W2~82}C0Bx6_44sUKJV>7~d9Iz7$^YL`M`hc~?d z2C&VHPk*3@A*dAxitAOD*g847pC@3TA^xqQ&-O`a_}>{;3Z!SUCt4-Yh{VP9Dl zuN+R)#gKlV>K@m2%qE_JaKx;Ise2k367DQi5@~uMAa4hV>I3jYA_TZnNRG{xr$3)t zg8f#jAC)ow17x#6NR$?*6u7kNm0aYhCQO-*9p(F8e70IR*(S5n)w&~4 z68u4R%5o{%m7gJrIdWHm8iE5uke}}jdVP@Kqy~g86u{dc|7lI>lSZypt_uGLcRvmS zqZS_34wKtKaifHtePBF!ub8?J| zJ4+H!$X3M#bij(;7H(fS+tx@*(m5ZGoj8F0g&PjF+ozUFw3HOh{3C7n6@)(7^1nS^ zi&$OcpzUo9Sh&Q$3Xg#D4>%dG>l;J!og_TvXVliSz`{!#LDE9NnWF~z=VlDPgt3Hm z4^y9^h_eqnKl-Pl{w`>f(zgaT6+Pd)o)U%N>+gd_y7E7y{@ycNy(r)>>d`+m7} zC7Zt&^OMC`vu@=&gK$>G&^On&Q^;zZ7a#-0{0;E?nz@bDtzKQoAccuJJFW(#Ov$6VvBaiC$*}+6kCP5=NT)_*m z4E&i-VJ^F+s9zYiHkv=ujRXmCnDBB5ab;AGu4|?d$&Zftg#YXi`Ie3i_0{`@qjEJe zt~CbXB}&x+vZk`8W7&&8D4obZ1K&j1+OoWO*~d%FWn#z(qI{;=5^aoVJgSez}^B@Elk*d_q~DU3?tr1 zLay&dEnWD9nsvIHKzngY?Tq5NTo;xvSrS?Hd+`0z%PC1}5Jnm5xtrE`TlE@Slv$lv zwxnnKHwxL=ONNb=`Lb^;?8CSL%snEcO0P_e$qXk(zw$0`4`4)w+aN!g))ZulABqoN(dQmr=ftQT@8V zLxv8G=Y+rE1&dq9g)&x}n$jp5D6k5wbqScFYZFP=>rl^QfpV~_POsrQIixec4()%O zSIZKpLSHVde?boSCq_^N>v9d7|Lr?>%{=exg&iWTKGmc6u+u62h_qt?#Vi}0{o0-Q z9M!$H&xlv{2F)QjdHo-y5~XT-c|-9bvh(V)Q37Qacq9PZCR<{2s-Yg!?I~#`f)0@wH{A*W5Lm;2=rKC z_~$3Yb~=F0Leb!&5p1_Wxbln%I?_pDZ4dyU$@7cm}4Xy5I1eTpk`mW3p; zZ;{6s!EX!>qSY%iIV{tkEWb646W!|o9F(c&vk}l0%XLzzS1TRlcmPraG7~koy(r(~ z)=}IEP}eC)NZ-7dsJ`&7!3m;L^=WR!F?aU3hH(-pcQsz|T`^QGXj9Mog(A@D!ZXgw zS4vnPdbYh|0y-VqevT%H!K-v#O~#>B>K~C+{pv;9JhsDdp{O32hK+G6Z>p}CEgS9n zyYa@RnxeUxQt(B8>3%r?I*zI0M9|4@tA%&xUOua4F5BY0Kuk47%;o9eef{GmA9u;b zx47BR<#sr_*YG(K`#W`~TO68T*2{(7M>^c^$Aw_|6$H0Cdizd?E=)=nRg2Y-tTTU~ zm*BeIK^vlP?3%y(34~6Ea#E2TsW~b*;x{+jctw{-MgSbtD|jkI;a$y;rycq5LBkZv zDD~#5T3K^B92z`*ScHVjtdGuEG}u#Z-6}DIuT7cN?#uJ{y(qN|7sRdT;2FKz#rf%h zW%yVseKN-3%CaLv?og3w!X+O!HLpM@^sjpHsGCZ^a^@rkF>&wSc0K2d4nRtc_FMn? zh1vbHF)L|^k5+V29mb@~;n4)93~6PCq)lL7!K8Lh9@6blCNoc!BXNa}DdL)pJ|J=! zA^?6Nc^$7hOFxUqK!U=BnuKpDSEzE^8yPsI-P6*YT4Trqh%r0;<;jR?&@0G?BI#R zsO~D_vugm;fc*#51BP7kOD+w?duSb%I-n+&R+BrJr?zG0$~~YN8N$v<<87eDPM+%y z(>?rQ=Ihq=D0laBrK^U0+HuAhz@|3k(j;4yURv+V^6rcVZwfZY`+2@ra~0e^sxRxe zoI3~CYTNZTeLFbvi4zE&=9ecvV9K5B)t;l_q!ai<|g**QHq!zs8K{ z9oU}MI{cN4XE0V^ZH2+(4*=qct@%H8H7B%gb#}u+dSNTOj^|4nXLL5V!M)qSN+_Fk zg~!%iNIHy3>Q8+SJ#r0Y;WKzrA;9HW(GM1X zGqYy2wXENEo?vD@EmTgWIvU_Q=C#c_^qaF-aYHA`-)u1QoqZyiD%il`Ab58-xO20D zaS#kv_N9QC$43L!<4UmqQ)YP#s|IxI8r%((SH|vhLk>UE`2!ci)7LT6{P`;V#$Y|F zr+(;T^wHN;69&J|Qm$MXcvPDKJ_XXgO7*YGgE?2d$$x|${UTF*2p{|5P|=Cp?b#B_ z3nI|6*w?4+75+UrJmkKWw1#yOlF1m8=0bGU?wAgjk~4ula#6I=d7vG)MY~DCPl(GN zENdWFSxS92YOGc`k?wJUy|UP%N0BCE|BoXtT;$2XPTaxR{&^>0wFaMyK~rBVHzUvy z$M+qYa2aBo!YfOU=tGgN?&v&soW9?t(0QOv| z!a>`qfWTIf0_VC;-#CW(gH&;HV`Nz^Cmozh1A-rCR%sD~qg|cCjqUdjsgvcr@Ggg1 z0lrRU-b`}5XEr6+wGemx=MESQ@*fyZBk{-Rk`XQ}d+3^ts1St;iIk&G-U>CU41- z9{kE}bA8VAz~Wo&7ccuACMF6>g(qtEhexi8MJH}<87wgbTZVeRuJz{%@tqoQUx>97 zzKGKWt_QIA`*@ko4I4_Xj`xJ;BKE?Hj*5O~t5x_)#TFQYxb7_BD54V{7$v*x(MZ-( zOy==qdNT);&ljFZV=P{5$pyR2d8i$zxb_>#tci^Dm|P2M05aeecA1ZXI@a`Qe0bSU z?Bpn1wGgPr<*i#ZMwF+4WmFBqbcc_;j3ASzZMKU z1kGk)w@*LXp=+JqEP?`;o_D=W;sr)c#Vg}URLT*da1^Mt&@b-E^P%QFdN;@J!bfSU zqKRHB#=am&QNVV*!j3<4cf-Yjm)w(nL}VgCXHS=7Gd!t@@mw#b%|@)Q?FrqhW)}-T ziMzLW7q?REdi5(;>Vnp{DLdq9@A5ds-dw$^UzTW#Me3#QpBRSE=Dc z);|xCq+8*ULuxX>tm&!q3QU1K!bX{zFYgHEJ> zb{Eer$qpjO!H{JnQczlFEJYH1O8fB!?LPqYTly=yo_Mk}jG?i)OMzc68BZ0f*wenm z7gVW%+dWCA{{R*WHeF_Ki5t~}(4@`@_ok;7mz@n5*5^EFPrI{E%S_OUTvCdq(@K4Q4)U2)1d zhJV_wpk4Fs5nw%~lt3gDq;9NC$yB}Z7YV3zKKtAMlwWqS6t0yP>Fm&K9>KS5qVK^) z7d)q55z}`~0XGd`Ns_%RogiHR*H8x39D-o$>Mh|4du3=nzr`imPlDXHmk}<|mCQ1< zjO8%M@L|S9M)VuMaEIb0dX4p6R1g5e}K5v+q1bDxG5B{+tBi| zU&0d|eUQ2a7K}%Ree>)@)bQ5S)}Z_J#a+bPIG}Zsz%gj%@{n& zg0R6nsrm~)|7fEl$3w?7c1&A`XCZlVM=F=?(mBpr%>`|wox-+A6aV1*ft`y~7U8d{ zD;%)g0C95n>+4?BK`Qv)F7@oP8>NEkx7&kWUZ(GV5(mrY(jJkyyy7UnvlAs)eXl6h z&37hp@koqwio3Qq0-|dGCHLF0p^DI2#1^F9;Fd6b0A2!DL;EmshZLS$JksvXW6gaZ zmH*TVQ)yt*Ci&sr6B?fx#|)hokxLz+59awxoOPu&8i^f7paBfYt~id)D+4WnY%cb( z$I17h6VU60MwQf29FMY{S8yPWSZvp4smN2QXYy1UB|=V4h&?GksWrxu6|$KkUb}JE z$5G#xW_@UnRfr$2OYn;~j3prc2n0}MUjPskk;atTZh*bLeEH29ki@Mugny&*iO4J$ zqboF^;B|MQR3<+*BOULJC3(R$s*%59Q`^Ttq0e!t`;J0@&eg^XoGm6wGJ)q30l-_g z75+tXu--%HaeRiKzu-<4%(;xGT0MSssRz>ha`jgsgyfLO#n;9{SbqR0>+va zdaQVDEjfz@r2T=9F%n0{6OOFNgPiRK1%8r^#Oi8Vj5;#9+u@IJ|CwSg7(*cEhYxs-EB0Dj!p9`^t^yU3PW#fuI~wFkF7wlNJFkx>&vRvZ`j#BdI(KNiW1)B)Y%_|fQSq)_s0D8qFBq}l{VquD1{E& zo(mLkw}h&sYyi62k#?A_xVYt$Q1{`{%IOV>+-0MxT7+SBtu#5*j03F@w--J{Gv-o8 z&o+!Z#}Xt>kl;PFoc1VF7zjFcXdUGVyR zDgrv>V7Sb6%EIiQzI%7}V}CICN2?j<y=m(;v*aZn!g4fs-CorDx`itpVBFk*`*w#My9GOWvgJqXxs)9tT0jUm4Tk0}0 zR37-XeF~AyDbLc7 zhW5~@%Mh!^(B`J~-Pp%$UJ^7=WB(rc{Rdb^(5xpTkyDE0{|~?k2C7jzR~D$PE^7Cr zMO+X5qf6$*i>1mFdu)R_T6no1G7U*-YBiOkd7@xNmsuTsZI*+>mhrR3Xv3n&2?cnD z+z`3^Zf#H~DhwRV#mC>|VV%Zg#T$!}YQ>b1Z{n5{Rm$~9p9pkIRa)da!P$=8_~>*} znSEW$RJ{6WUK0PZjWeEX33+K$SW`XrefCdBY>NWR86&{B30Z^gMsifi z9q&cU_A4YZ-_>3+tRl>34@6gf5PDrb>P)%5 z@5iQ6_42aw(k`jmVtvs0otV|cy+UpE=^MB%jj~%zC)nM}Z{owZoY8r3a2Oqjw#`<=oKOA{m(C2o5;nJ^ev!-i}T7d-B zA~x+rUU3N*0;`~`PvmxyERWlP1)0ounFvb^|*NF%Ev&z(+sdTc)?fx7fh zyW)kEIijLMpF}SlarTMTh2zJRj4j#ofqjl|lE9!TH>yVwzA-9J3Ty0;IMAWuJz1cE zp;0ye7geERer(h(d6+9O54oWT{xWt4ny30w>#IKGcK=VxR+{zc%OjR!wxO?U!k=Lm zT9v6@kSol8x6;6ryA*i7yy&!L5 zi!QGy;kCmx`}u$DAWnWYvV)yJ{DOSvrAh3@O9QXlWbbT+R?6PG+$;;#9kp~dl#?d&>xz<`PcmK&nPcP zIPteeHe3R6Y^YYEQ$84)&`gPv$e0+Q;&>Gv2p*PGtv1oaXi0)V_~fZ1xv9ri^DvYx zTh`6?0NY5(ZDzl-C#&JRc`o%#h5G1sLEhoF0cGzpT4B5|Mfaro5JmL5z5z&^V`Fth zmZ6#tTVH6Fk_KF%OFxoc1^0p$afQ$N{`~>F!-mJ#)KFO3SWXgl(dcYrxAn&X;cOj_ zMqCc-Z!}-HjYJMX(B%4epAf$JX_=PWTbdGit|Zlq=azpg*OZx!b8bHz1nz2u&Qkdy z^Ac2r4vElF+@f8X8_=aD_P(1KoT`mVy0&&Al=^sh-wa=r$BG%Ma$X?e${o>9+gaI(Yy?(x8s{ynkc%n+7K%=^_ffxIOy-IoHGo z7%*psAV*e`;d!8UJYCSPIDBe5>0^5bpP>f^uU$!fY+{PvRajc~#x+a{b_!;aP^f~$ zBp&{bH`ovJ9eQS_eRn8pMJHqf*fA}|Zofe{yc$u`M!X!U(T(7*P2A04B5AMBY@F%? zlq~a1|4&C(8PG=4tv9$^ahKpy+}$Zspt!qBOK}ZQ+}&QJSX-_?Ur=s+s?+^ zRQ-{tQ02cOQQgfuFux1JrjSX|$6vs6cZK5A2lHN(8#2{4a|37IlWS8o2wd^% zld6oc{H(nuqZtat8rn8uUfMy}e>{Z!K$?iCTsb7(fLsbwfCHxv6>wTD+{!|jd>;4z<^3SJB#5!b zWtCQ5La+0s{N{0g-J?Qk&*8<@!ikylGrcmx4x5a_ve(=qHE19 zia$)JUp%C!)sH!mBrVZspeVhx?X)>3ZM{EqilDzuCy7{v-aRBp!Pyf6A((=EpqL)uo_5)_2;smj{}k4a9^)-iHHx+=Q@gx{!3K30?^E6oi878+MtHH%`Y)|&vC#Bhx&Zu@$b_t)` zF*e&7$jfoPd?3d@jVouxr7JDnla0?ygRqQNfLae(Tk|8Xx~!FwB`Xac4vNf7#&Vm5y(K3H?^gGx|&pSw4KzGC6=UNn(k( zv>YH3CS@GhYKEnq%f~`0@VoGR&sH2#BCj-4T`_-3Eg9+Vd#Gz3GBhSgZWXxUF>N8 z2TozX^yNodOJ|wpC`IC*4(LL4M(!TZ7{^dT;MkbVSIZ6MSPvlgT1W%1mvD0Kvut9vkh$;z|*9YpL;3N*CX3;#}6{~_7L zUNwt&&o9N|-%c>6u_MI2;U37VTJpR5 zC-C1}?4_5xoXl-)1ZQdr+cdF#j|u(v=YjIa!zNg<2zO}NB)4gQpyy}+z|8p2Kj5Y_ zt`=Gy*Tl}537`HMLR~`=3?G;ml*f@6u%VE+Z?jm|cT?ka|7cnC%^?8qP+f+32JKWF z>Cs=b24MV84&_wr7g;pX9`xo_4}QdQlyWdJcLgItea$uM@gR;6%F;%i#DH2IU=eZp zxlHm>2|+`tl-8Y~VjPgovYV4dAs6kQ^_k>*xa_{{CXr!i zvnl^bb@+LT#I(*!*%1*HoRIk_5HgD4_Q5V4WvD7b0vk*BVI9E80rWGo)T6}!QOK3R z`~d!R=HMvHE-pw|hJLknlhygzKc31qUW`=3$OR3lNvctQS?z^OYGK%1ZM=ToS1-h{ zM4N2Q7nbMN_o@FnGF)3LcB0()P7BfhbKnr|=T`V)R_zYE!?Sfm2x+ zPYkt)|KH}~Vnv}MJFvc(b;{#)n$m#IpV;2AkTCoLGpUC5%I9*WW zUI^VFR#;+%<{+LV2w~#S-3|nk#kx&{oS$B%L=RVLK;+;|XP{>qmuNAYB!xYL<+zlY zge;i?axj*8DTW>XUL>H9nV6cDCyFGBF>7@oY#us5v~f`YRRGIb+CAw**tBp8Ts{19 zUyW)2dX+LodOPKc%@jYHuEp}{An|}NDh&qp=9}D+>p;_-v>mU)JWU+RFk3G>>k>gI>U|5%X!yUh-#=*S2f=E2SXslxi=X z#SfKWZCO_08N6#iX&+VeA*BPJ)!JI$R5BZEL0m>T7IDw#x-mZB1aK^Xg}1$*qU|R5 z3R>1i776{^IeW`XvaJhBcQ7jOBNzrda@K~Mx)%>9>TLp8$jd@nMfzI|d`@aFF8Wdj z*f4YW{dg*)mJMgg#C60OGekjO!Q&aw^l~ZoO zJKI50k;|P9I0PKEa|H8J9hQ-F40BcIb)`?zfO#Us4 zQVGerNZylXtwBJ#sk5^7M^*$&i=3Ul)Vr{WwJ;RE^V6D6($;ock)lp>6v^5TcANrh zFv#!S1u`gx+k{!ee9kxq#h2#u zO2YXGX}t?cYRxm@YtBQdJ7q4m-d!ou{tn=J3UK=X4|-Y)qU)ja2=f=E_PO+)hqt+Dcq~xVdZ> z%D_?7r^~31=TwFj2ztxpk++VT7>wF_hkEpQ?HH#0yTuObr_Wyly+Lw%+4#q)X{nPy z*c52_>*-zBTKcwg^sPOInWzSYVushIt@PD0ABxB|Tu`u>Wk0P%)Eq!~Z0k#pBcQqM&ti+w*-yD?i^UlE&q7Q4q)xpH+-zrqOpz#}AzcnV%JB{A}4sWqKL$-RA5(Bpc&;|Q|#6atGP z3L=nd(6uK$9$5htC`8Q&*OG@$B52UR0km=t=4nZ4rYGuU^^pgO&lL$WJP|cexr(UT<-8BOWQwNdl?5)Y{_-tA!{cAv^N|DEhv|;@jRm#(UHa4Oe%n@@{ z%LZ4=2KW8@;n^w(Xh-xSor~e2J$t_exuHui&8}kepLB_KB6;!&blT8#Ac_acJB_@B z-hP3U^lrlEG2uI8>NagnF{-X-KJ%i&lrxI%y-k^Xfz7wAJBQV0NB?~=wyu+cv8V)CT)vbF0t!o-F?SCdRfQ5dm&WYm!7=l8#b z)o*G;@KZrmEFgRhAA;`olZcH+7 z_wQ6O;DoEEU0BfjGj`t~Ov;b343bh!_yEI5Pmrvs1Z)zuqSoGQ#5h%GpjCC4#odYr z@KfbsjN+82Lk&Z)#X74HmOxV|X0w+4Fq*9moiaNQpI~`g4{q0mJV`Ft^t`6kH2j`H zml@&mml>b-)3WZ#{8e9?I*)z5{0**7BLt;I7meiTL(JOR(Z-)Sw1%XJ0;5Er?SV2B zohki9RR}@A^AGmb_1~fvbe5i#*%s?(n30@?C0pm9)w@0!0jHk{KmM2x zd{pfy$G1IhOommvjuCak*>AHnB=z(?S|c?45?Laj&*5?RF!aBc3Nkngqb7^Y+Rl%c z!kIwR-L{Ib$M3qH6en`j<>r#@*2MKTi@nk&|1(+1yn&I1AJ^fdFut%&hSBS0E}Tuf zwAY%oMp)CrHP&nUwJlQNKA3%x-MAKR5BSfFKmPpSkFU4e-?|c9>hMb)9k*sya9&RCsn3LR_h+MlKxA1X?ti}#s0{V&PCz8GX_mIqqm*c#iA z&{dd^FEg(D5 z`f}_HcEyp`kh-aXzgosSt^5_Q@ofs*Cp|AGXxleI`7dp@N@-@Y$*@xJ2Y9=l&>8ot zTrQ(k`zySt+&1~R;gA<>@tjkqhcgHwKK8A9l9x6rUFZnCYjf^pZqXr4Y?ps}ENc|M~+oEUt8(tuNir$L8_gOST8I=3#`Be{@H( zE4DQ2$nDl&t0Y~+$NN5F&|eirrtmJ&{;;jV$Mx|)A-5j9zvR6;bL!XSwO+tG6Z@uD z;^O1!69yys;*-LzDWP{;BkKq@k+(>JBa6bg6mhxx^W#;dKH4$-jn*^DI5)PA_Ydm_ zphyN6mnS5#Y$0ARhJkQqaZ|Pfb>_HYFy70Cb>!MWD@Q*740juK$9KFovs5TUu<{K% zXEvjfw6#aL4^md;tP5Zn_Jik|D~9&pN#D)1uN^c&aSxi;2Y#?06Je`4w{r4UQo#jb zxA`CE=zAUyH(e;6Ji`|Swz$-Zp@WkG7ytevQH+uqB4af$9&mHmx%!J_?)G<855J!s z%3R|VEv~zy#61y6hq}P>5cnlxFIe`0L(mbawsks<#ix zfj^AaYi>JwZ(^addYU5HU1fTdo9!jNK~ePp{JpfT4cYHNCfCfzHg@<3_Y2;4ic=eI zhz4C=cZUqXmprc4F2I+DAZ!aWmokw{`YW?lD{`NVbpMen%PelOt;Sc!6K?CHyKdwx z%+^d44?N3i_pLun9mhz|OtNjq_9$&V0e(2P9>E!H5~^CP!TT|n;+5hvT)mB3%P)d! zu>E3zg<^?0%pCWKZrzL2hoX*lz~HXi}b<`+1>X z4EqbL3#P%kBF8GNEXRQ zE$CedeupY&Kt+__MK#g@(GFy1N6_uha4DtXJPaLEyvhs_VY(uFFzavYLCGrXZ(&h- zIiFRpUkf9`S~CA+ntv3+C91=h27` zpumGyq(K@xS4EaOEHrWOo2rSeMvlQ44E+@=e`1#Vq1S^#(#69>^g?CCrcvc3hfc($ zFXT$tzqzcF($6f4rvcXn&eKvau`E~vDw#6;vX?oEPf>Snq{A-Xxd#z~MYHbbFCd;L>h?75d&A|Z8HofHx zCMsRYG_-;(0_vVLtqbj%82=ozRf=7{^B{3d*$vrfzwhY80f}H>v!~iXp*dc~+H=SI zP5=6I)oe>J+d1J?3w_cw6}S`;30Fg}sL-p^nI)Et8Z^5%@-q{IE6a1;7v6QU3WjC{ z)!n&W5^y1;Wx7pm)4P|k2z|Zo4%gphS*a+7xkm|+s;Yw%G~cHKHndkCs%i=Fi14Tl zNmVDW_Xe-~z{8?uqqq~$QaQA?A+mdqT1%Ir9ss(RW}#B6qbaG`8b`%r822nHPcl>2 zhI7%7mAU1CN#FE`Uhkd%EpiYBK!J*6EjwAGi$e7w_h$~T-NJLj{T9TWGajB+Y2h{o zujUMPAU{*+Km%C{6Sq&{J!Z?vVhf3L^~lJ1&?Iw6I*UY zg`dE`H5wJ{sPY$i5pG*(&=!xQakn5?Z?z?O#yb>3@ZSN{P7Rvjt-sph^UgFT*X*Ja z-}xP;mGZB)VJU*2xZ@U5#0PZIZv8Hk*+M9Dl49drW+9xy{;do#T!w@0rygh1Bj^O< z8xX`SqbdjAoQp(1YeFIyGt5VS&W4)Zt)wZvPc<=6?@1#}xhX}x%r`A$Yb^Uhk5@3LP&gB#suhwkKB9}i)U`-L0_5*Hl)$r5II*D2zaoN#iML-=V*fzZ*~O86T;D^&!(wHvylf>rCHG8?KP#{udMta% zz%S0T{OO?j{x^rIZ^r|A6ou4$7lXJHDC4ip%_HGhX{>0Yxg0u}kbV!cA0G7lEFCZZ z7kzx6TvH)-_;3v5;LEytbG~Ov&OOO3Yh^)>5C2;(QO>PPR)gSc`6tBES`$ma%X$&2 zQakI2hO~Soo zHDQJtIK)VU8(M&-;e}WpXhOE(L&_JGtA%!YDEjxDG6;$_gkevc?`66{72!}FKD+-V z^DweVD-SNcxg!%kq@K`1KcUr7{gA(-118W9cTKA$U{Ha1i#%|Y*n^P9PU`>wygwJv z1pyB;j8rKQyL`%li6Ce@p(a^G##fH}^!-5If}marsopFpT2J;%U?$8yvN(!Jn$-bB zxPJm2!14nBR1ZF*lv}9;svbyRuTJ{Dx81e zsvQ-z?cYs1$bN#3D+_aJTF1%)>1Nw%?xY(doX!Oq46yBfh&&{X|7)0$yFT4thqCQ8 zV{LKPSX=urzZ14IJdhF2)*VC3RsNAv^+aKUv;v-)T^N=UpnQteRs6JDTg3AM+ zmWU*<;=Fg|kD z7g`b&JQn%y4u26QtHWOPxQj66?T*Xw+;l}dC_b`%U<)gk4#VIJ_8<{`1K4Zy$^I-J zEb7ZRM^-D!Go95+3z${44!xk zgjQcEI9oY|X}uW!Q$A|y0eDykFM9b;?{H4m**0}h!LnbO9VphlFeI06UcP)P!7VzT z`wX!3E{NpszV}CN92*IoIxgq0bR#4aIFTMu0bII5qL;Q+?^>%_T=P~1TXz<^WqBO5 z%qn>2{#8BdzKp*2o3V+?%y{QrCnvri>!>dOuKcL;zg3b|o>a*`6AU zW#u9+Y%Z#SZ@@T$QBFE7gMlo)tEGXf<-AJ=QSfM`hz*k^9w)C_p}>Mb2~|WosV0qv zcx04&C}X3%FtxvFi!U< z{vDJxVnpFra%EpLjbXwva)_XbqB;-Z3_*@smE`PwkP3hwcV))wn{*OCE(^h$-S+YQ z4^AjoiLL3FC^u~msPcai{m3SS@ujbvi|n^ehE8tqahHgO$l|n2pq>tqo_T^|EN%hK zUE~E-={+37f9)!DgLgl4k~(dCZY~i{=I8Nq_&78bcYI_TP(&^<=YI@XyziMYi&};X zL9JHlmH?TW5{?j)x2jve_mXzJsvA6Y%10UN>Uk$%RsWmtWcO&mMC=UHS#!ku z$ZB#S-eL!hCB+&$n?l~Boxlte9iZFCw5C-dE6;+4AqM1Ym_=2J`rlBs6HQX|0P_^N zkk}G(-rdNtzyCS1xHHw66^6BjTdm{#YLkoh7R27(K#Zp%*ibd*G-T!@^rnxXvB3=y zH>Q&2sRUC`0(+d`ShtoWvjoOBO!hT-ZZ8KAuTby{k3b3mzMbYegfmQQVxv?rqWB_8Bs;ZwZF@yAek<8zhi>iTPM)S-U(%b(n_*AkUht*dXsSks*yhe{YF)y(ZxE&wy4>N6W2Tgi-d^{@ukAW?#H~(GEQ0?JVJl| zsEv~DCraarDB>Ek=F;}ay;oBsfUSE^U5@vsY$Sq8@oe^TV8rJyB;HGmC&lU`zbXJ9 z1jqVKpXeT;ViY8E%NoU|Po304lnq#~2xdN7$HgJ`Bmf4^ZPFnb9$LqWKkUKyRwWym z`q3r~=b&}D`aSkitx%Ns87BdP(|SZC7h$LcHUz}dp&fz8gr~c4nX1w{xMpv;=CY-Z zw(2fFKl}h6MhTyh+HE4@m&jZ-o3w=#y!!VgP!Em=x4kw!k1@`|9EZl~)UKezy}1K9 zHJw=}npLdqoAaF~ZFSd4(>=c>+p)g%tyLwi*3w{o?Hx89jOd1+!kq4A9Tgm1aae$= zjIb8AxCUrggzJTy>P=^4774<&oIGY<_HZ*=6D{t>6)7q{t&aOy7p;g6n#kO37sTsu z4rJzLE5ydg70mvIpITTmGPVw}Q-N=!nbx|0>%kj5+7W7-TW5a*WtKJ;!+C`xhjK&U?i?$axa*$ z7w_Ole%@9AJm9sg<1Kgag`l^#Uol{83jRnoS)ewihicLGJr~2oYR5@lvBp*Br+5hB zNbv+)Zm~{PID{JB_WPc~)t`8lEd6z9^71I`06yof+H>l4G~0VEByAj2l1Yx0@RSMzvBe+B-D__YOQg7lKWM*SbTQr^?SFXFp3S~u6$RyK*~Rn~q&D=d!6X@Ep2 zCK_XQz{*aIB{ahL0k`Jwv2UU@j$QGig-796nofc=tnQ>9nIDFY(wjdx<39KX@ zQo4z$XyYtBDl9*Avg;R2Ae_7+dVDFIbUlgV%vo`08#CVMj36~~V{VdQhN$Haz)_wX zpHGfK*QO+@n5y#LLu4BrztzUwBkr=qnG9exUVEJ>B*?eOCCsjM=J`sqx;nP#p-o>p)t*Uh2bz zrZee%n`Y(~7D6Lz>#q)MY*zbG3WN`d;@vtS-pWrg@_R19>WPlauB3mrMl*kHBK&%f z$qq*+E-v!5T}SRo(lg7j*$nO{I6oo`M;sZIb(kJcu+h8C*nX1rdhM_VDW>(qIv2O7 zb&*-&38anvzV@9Q&L^y@LALPYr();8j0Z~*c9x@n_XSOby~m9k59EBp<<{{H)?eKZ zU455ba&&;Y0X2-|_)*tZ%rH-c*R8-#4DBsk{82g93BW7+*rh5WUuxb(`a%a~L)vbF zOat@&0rP0!rq6mr)EXQu^+CYWC>Z9++sOuEifAyqO!TR7vkUF~I1ftB+lf=5aq{%b zczAGI%cCi+8{WZ;wh5G6pbmZ94AB68OFZuKa?Cc?c~OW5>UPlv*iPXlNr)sxyX9YWrdKpr^(?f7&xwBdR z$YtmwcrGpoSI&NDZ)y}2szelIIu4&2N~fqHpWvnH9aP1!A3`u~t5HfD7or;7eJSIH z9Ii`qaO^DW)($F3xAI6@ZvQosel!TW`pdnNJW?mZ8B2g5;P+)OW@ihn*G>WKMqc6l z>U~yEVW3hm2{=pDu(|lGqq8z;E+xTE;dHJv(0RmZ4nv(v*>;a=%fLKh@&~LzW^K0- z$;G3OKd)Zm(TW?Sq?xR@+nxt}A7EQKZqva22mLl0@i4ptS+c() z`DnTshEy{+&G~rc?CPek@KdpuN5<@Ude1=r9ZaP!*wbypNn$1MUgT@r(&OcM_ZCO; zty?{RjBw0}tQ*mm9*i|4SG=r2dfQemuoJ^?Y{4wh zzFc-c%+tZJ%+naRD6S1tKT)0g?~KB4If>1Dw8d}mO_sv)xlGdiFSsDJ%*Ru5B5fXy z3+P>l(C@=_{l~+#mjdpNNyMz9`1)Cvc8l}+aFPwGZhPZ_d6^#+xW-PY^vNOzBzRu9 zdPlBkKADvup#QE7GWiWmNA77X4v%cL$D2I0?Jbc-*)~F@;X1W9OhP=&p)==BEl8rk6}oZKRz};XXnI zHp?jr{18Mu;)QYviL9fL3J3bk3KeaK5UdE-yq>*~cI@i9=@^5!Z`w0kZn(4iS3J*FB+Ld(=M80^tV%FyPrYcYG zzl{?MhbR$Vmn-<6tz6|ZEDt8HdVso(?0#1(51-OqoYOXpl>~d2rZ)akeAb>c8K;a^ zUI?>1ph)SwTup4Z+Xh1j&7?zDctwhRO-QpDA`7&GI^Y_)KKdueN+`qfmlL*(ay|%O zLP@@=yk3x<8T(uve%^$VYY14gG!}Z6LhK-vdkLG*HTBk;U*B7xo@gbl0Zwv1FmcnM84QWLB90SFvgCY{d>aXLLPyqRO(_}(4MTuG+ovAJ0SeNyx zGID^I%8sTWGF#YC5+G=3g@K#)Vq|z76r)XM9Jsg~X7bR>9Xq_HGmOwMj#$+Odqrv! z9aTvccSW1v#$j3x+pG-wJLz`6Kpq7m-{A;q<~P+=k%>mPb(wkHwn?z<8iM23Q`au( zoDfv9q6`>hD!U#n?X1uIJ8Rjm9+qm9n#Cgje>mV3MHYf*PWF!%+1l*dkA%|M^V zWjbK&1e!N{G!2*#PoF|Rg!#pvZYTY6DmmD>ukhvvTB(Cej??WxR$3Xop(_X)3r(vc z>n*X@Vhu56bAAj;ViPVg@5aw@pKB79%{QJk$hd6ZOO>koP3Z;`x+l(hsto{Kxk9m} z+KkD1S*dOE9(gzFJA*i`r(J_f;i?N|4&$T&sFB1D)Jk7%8zhl$kDyT3&jXxHd)_ z+~@~-1Hy&e+jUw0Qz@q?;Xm^=EB~39e97y`-18)f!iexKWA8)!0MRBtvT2TPVnR~Sdfy5XUQQT_T%wVrr zL{fjxJ(;h~D!TmEVmUNtOmq4v>6-&%7ea!)6aTe$de#b$1UJXR@iTTGqhDQiA&*+q zLO?lKSzi@l@>XBqj`>Q{&+8re^_d}$JdOBkN2fyMN7=Zw$_<**P&k5E_8vcop z>|8JJX)-Ki4k|&eO_j>w<&h{O7(_4e46Z_*9q7OY_nA zEN}T#qM!>-I}W!y+z*X6&ue~bd@tT*N*m*Vw!hcTI(|GqH)U%pk`!=EBw(%)fu&iF zF3-D+r}#?~ixi9t1vp)gK7U3i_y$)|iS=XNwk)Og>SB2SIhu=1a71x*735BeKxMiH z8nJ089A|j4r_M%jt6g_#Z?2FQ=8b)%`@H@eK9R9t%?VIYP$^_|QEv!ez)T4LH^A~g zQOqps04zs2yQNpkjPn6zc=qK+&P3uajq%)H=lf3H7P}T#w_&#ju5xzySe8E$=paJd zRx+$VStVzE+$E^Wy0lMXxW%guzS4-0(p^i8Bd4|L<#T?8zN6-lqPzf;Rwpse!{_OS zC`2n#il1aAl75n1WGoSaiLY`d^V>BXvQs{V*axibYz2=Yd##{L-bJ*|LdRWIc(Nln z7NZcve%J)enISsYROQLwC#lp_D6+`>{B^OG)CD9`)01j=6=|55Q7TmDZulTfh0*_= zTw9npFbweC2q~yYPA2CpC4#9bO;@wYayMw&)j>+58GWl1U^Zee6{vtJ$Y26#A(<6c z;K;21MF$YBIQ(s;g@r^QOCj;)`xM_?B-%Fud!zE3y)YhiuacR(eS*Q5%Ps`{q71F5 zFK}|9vXowW*#0Rj49SS}*&?U>FXuwq>u49yf?O@Bf9MRsFr+P&3q0O{Ba)gY=9L5+ zs10H2`-uTJ>?pfYCf@!^Yyq|r0SG$I+{7-9_Qk^q2IKn~qIR$=@{Yt1Mb8E=A2xoW z4{$`FL|<>Ixku4lE20$5HWIGe`hf{31 zhieub2_w(vpTxd-eq&Zbb}xM;cs@tiA-N{t!C!5w{LNYm(-Lf`#8WV#EvOOOaY@BYH^ zv)5ry*%95cGRQy!G}^O7#ZHUeA^ugNYCiM=?DMQTDfBi=jKH;LgJNZ!G#pwCr$qL( z9b~tns32qTu3_4160k1uox zaUx2UiM;&rN(*>)b5E{Y;dsn^g9rFdRtpe*X0MGuZj_mD=1`GcJWdb1l^ilcBae^J zjQn^2`~`*4Msr{2&iD5V5<3kuL!GBCQUZOqBsd|_cc^f%`}xw-NRtUgrqD&U@~_-Ue*%w3$Dm zerY3TD`3iWU!PS1gdy_rjI>C5_u2UU3Djz?4#4zyVwhN!&Z`S!>EtG?6v6xJb%mOy z=(;YqeDb8#FZrRSUffPKV>@q))=kKWRxl5oc0;bNU)&?9z*f_}`49g#rQ-c~XK>+9 z1SeJ38B_5NL7;YrL8+}47wb4g#&;r_H3$xL(O0k8K32CAa0wwzj)OP%Zx0CIY}X$c zDuNpL+B+YP9V(1GW6^5HWG3#=$0v#*AY=}$hD~IoG@YgOr;ez6yY%R1>C(bqDI_4O%cO)ZBCI+ zA(DqdM$Q4Tzq73q!{&wu_nta22RJ;u$18e5Iim1Av(;&{w}T{m(Yx?egq)7UuDMwO z^c6JtOZe)HJX9=uuzzovaou4EpXX9WZqk+&>~A;Y{ZhRYi@N*#>gqvrwZToFdBpKU zu6k)Ibdc!JZ(D%USkAO+0ls6FYeckjRl-5+vF`3?NFXemtwtvx!eMRZBcS+1^JUTG z;yy2Y{6G&OOLWYD^XGzqYulsJ>{;kf1Yb4R(ww7@MjA zm=U`^wXCJ-s`Hl>g#(_E)-l4?li5WLh@ z|HIK*2`SmS{x9k%`ZQbN=%O=w>)86%8geEAx;5ubg21cp&(1KdS>(yg&6zgzp ztddOJr4U!`(w>Z8w6$*1!koNG;m5lat4D{Yq`RWnQTOX`0j2hJ_T)?mG~*7#B0c)Z zZoZ~!hVYP`l}LM!U$V3ZL+VCeq%fPP%Qd3lD-zzXcms6pp&#P@L4cx5q!yPimSi?= zXIT-S(&4m0SZTYeSULV}uX=E$sN%yH%4D85#pPbD-g5&1khKhzuSz9)%G?4gXfzeZ zr>^rqlzV(C%ItWENE?t4&dOA^KDj8gdHS5}=pUgx31XFUQ@l>T!kn%=ra$|5yH|il zUx)j}U%#*JIPq`$`}$w#McTu)Se+qe^9g&3A?6>*Qn3lbEQ@Uznl-^%sYt*|S04ZW(X`O-r-(`mkDVNn{ zKcmZY5*VGUP9(b}Fhg$`I)wgVsdO>t1Y>k0gel%{OPU;}GN2`|$+U@E0s$qBtxH5A z(C1laiks31fh2Wh=v&H5(*|^lmEZINVpByY+q>f~>VN!&K z$NM}{^8A&$0i`Rmo$$I_9nUqI7rfaU3CcVz7%P{XVR5Gs!Qz2>woy9;Pomt`mj`&s zx!G_+lkEOo;JKwvBKF$86$_yOce;A6Pk#S0&zi&LyVemczKtan>s07vpRkO@;49FX z1-M3wYss+=WuS!xp2}Fex5(JEKL^0vI?*ZyTXf>|?%32tV-TzSG+<^^ghb{Va1GK- zHL6?!G5~7bL52?l7te1_XuoX9z8g;ARp5ymy5ICSpe<%m zFnXL2a*`;PEs$MLKRlYVP2WN*CwO=R*S}bHkFPJp#a|-P1v!#1q>BA;a$MP>4HCPX7H8b-pGg5oV|qGsu>F_I_vCR{yi(taz% zH2ywE{(HJ3zTg?r1R%ep3poDPf4dHcwfkUdK%9n-k*qSNqU@G37PsQ#y`EK3-=yz1 z=@9?tEQITK(pj(<6UE>;S)^t6*a+|G zlW(LYn}Qsr`+2@dXJ8(O7JR7cgijz3o`6q#@is#$7%eP-W;A`31bMPNpZ(YiQOe3w z0-aDaFv$v!d$s`n)#*6;9V5hwke|k98P_7@#>H`2!S3?qz{xobt3&0qkk%t=oFjC` z7vQ#9(_UePr#S07NPBEL(G?Y{#O9fp43BwqCt{|QWpZuj;ad8P9ZB&Sw4K=C$eR?h zoXV2sANBINJcCAy3BJ39e*QglV<+TDllQ|nZc;*y2km?o!Lx?pvA4APBlKV}f}(=ypIv=>ugy%w~wXHiv&S!|};*we%4 zS4DJEWL5pOc()TsRSL|CZoC1Ro!1!ecuuQ}f@Z!f9D{^DKK+&0Q+hPlllU#MH(*%< zqP;781B3%JCr179U-xcdmk{0FfU~&s7hNLL)3u2=V1^6!#l`$P|BQ$27cT#1rp}Q! z;DsxGzzjXF^D@XEv2!KkdHfN>Qv{F>@(kMU+~=D1JAy$tVEYKx2-)KGHt3S^g^?8g zsrd~^c?0&aL?|G{Isb-&l3>p=Q_u=e(5^%W>>4}KQ*;ol!6-lPz3EftHPtmatW!pT zr}m)jgli`{#-}F3SI0MCBJ|P>W zVndzV)dw+*Ppxl2Vqgqx0vP|s?>+@Ry+4XK{p=fb2fGQN^m^HOIq>rFTYcy0uUD9J z(&G<`_%6~fL7fV3Kw8clFlGt70c{BnjMuP$0^G2GMm)nnr^O3j%ie&jphIHV1m;p! z2Vucrd@&@M2x`rLa0{A(MH)C&czFXDVJr9XQ69Y;=0Fa819nT_fRb9+GQeO@Ux;2! zK0em2P6e$BB|LZNz5&g!dz1WU_*C%&_Q}lVqU)K&4fbo<|DsX(dLi*@A~CUX41-!A zz8W>ZFvDi_zesrWy~@7E3L7P7e!1C~H((_cCRzUmDE_aX zD_A4k|Bv7mdi=*Tvi~*u37Z<+dC=;>8^D`?UCQF~1^dDFHT8cJ2EPHVm-(0GZ@|@W z2}0N$ieZbIKM9)wP& zJ3+9G)`6M3y>^9t0K0^=wi^0e`Dn??{&H*jY87D*H@~Cs3%L< ms!j*Kze<>gQ*#UOhV=~iRDbE*seX~cTM62Ey@z%Bw)lS$^NKkD literal 0 HcmV?d00001 diff --git a/projects/DensePose/doc/images/vis_bbox_dp_segm.jpg b/projects/DensePose/doc/images/vis_bbox_dp_segm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b17f831724797da574a171b568225194e363fc38 GIT binary patch literal 79599 zcmb5Ug;N~O7cRVuy9al7SsWIZT_k9N1(!tv!QI_GxJwAU5FCQLFTNzW1P!`K2%6;b z`|7)Yz&%qnH8nkb&gq%1K0VL#{9F0A10YgYRZ|53fj|KCPIY5CsJd7Xuv?4F&ao&%6XhBSvQua>pPcB?BqwTBSEF2v2o zx6L5Jd=m1P)N3Yw_t(#HQkc@Fl(#jXkR$LO59Rfj-2Y+_RP42P`$D!$qM$VO5r#NW%rW4teZ-Y?+UK397?D875cUI8NhHx`N*=HfSQ0mS!r5g3% zu&PDO?Uum6UboLa*BC;tx9WG0KCZOsS{E!z3~OP4%n4-Eoux>No;HO?Q_2|{+9=c3 zoCZVoI2Js2R;;Hdg*E9-1 zwszawkQ*Y`Km2N1I>&)MC3Zv6UyuJEz*HbR_LMX#fsgm6GyiUHu-Jf(T_*nlyDuON zXiod9AsZ_xQ}vui=%bUz*8oHkgmQ;J33XS~F{Zi||mYi_5iCGNO)^xbZ*b*_WsQ)U<1_sG2dw_hbsUCD`tt<2N zG*OA08LH~OZ1T!h-|WEuS1Hd?_-pV2?2R_Il%(zY+p`c$OD=YMzCu6`XTkky`$k}| zD_KDwT#4Xa&o^{Z`m)iYapW?2qU1J9&qBFzDaI3V|8bmSxln|-p2+;G<(mNmaIwDF z44!@|KdqoN@#zifL4-Z1#Rq0*v&3fik&B?RX2ymZOceK zg&7I)W*Z9#Yv+6(kSI%E4Pu;7qmSuzBUzxL^o>~UJ9fpsXDFtE+m#5gW{5OwG1ty`_D`mL2H%Cx}SgUDX}+I{+AT3bJgO$AM-$d{DwlT8=4~=uL!JdU5(%qyx&>(O-dKfOES+SsLWSsv};YM^(?uMAUzp zEH{4R6zqOKk~Nx9O5bynr$Josm23XvU;YpJ)mIxCs6Sh(Xn8^Q0@IS641z_L`)l;OIe& z>vsO(!40-y^-#Hy`Hv}^v5mn)5A{+vGv-$q z?z_6qp{2Hbx?s+FRX8SZEIrd*suDsr0qK*@2FZC6_w_&^9oXmg-u$Ifd8teu~F1cQ8K$+q8 zEZk}=rjE)b+yhR3-k&^#%@dPFh0XZd($LWLoNkA~c@b*$k|SEa$%PSRXa0}pMi@>g z4dNjPUUz&;%dxRFDxq=g1+1kB`&HB=LOzXioCvQJPc`xDk`CI!`>? zNYewr`!PMDsa_!e*E$}-{CVGUlIj?AR}YvX(3FUl*JkY z!kd1&VZTmyt}(XYZs2q3WVzXTZgNgCo@8AnH?h%}0a8CU9Y>bVFwy;D;#{y{PUyZJ zhY1VhJaN%4QpL=GWQNc`R9kct(QTA1bIahw(ber|oV)1P8cxAB^Vv1WKj@8?~#G*0>CX zQUvT8-kl-}@dNn*U97W!p4H;BXNuba+d~T>MnttVsoBq7O>pjQgREOMe{zHCpK+Hz zLSN-<%ayO~@}NnmYy}KyEW13**jUAH2*T~e-&W$GKv~@Mdy*{T0%)NVbIjz*_T=c{ zqYpp!a+19$ubY*aHfk@`Ztp|<$kJwj1ZzWCjT`5a1Dw|RBzWmkMTQ|A?VVDQ>G$H^r+4q0Nb4*h<^cWil~o+q4ZLdh)af5`;1r84ZSTa6teQrg zDt^4J7{s00!5A`lzob@MHrGv5H|q5^5pyr5GWG23@VgI2nPZq6ky@oBKI@^yC_lTR zeG{;*zckvK=uNf&yOvorRo~w*8<6_Nmnpc}eY9@_icLP0yR7j}F zNyV=LN86U`CZQHg92E9XN1?JE4t2(9G(rVL{8mZ=sZi;(;mgyco_Z~(V%uqMR)E-3 ztoYc5rM;j6)Q;C0&-P;Qvf%o3P7`PqawFIojPg2!=v{S>(5)v;?v}$;x~+t0TS`HS zBqf@=lTX$sfa5lF=X^ATvZuL4DTj;a^s2UlCZ_wrH4FyC*Bi#Uks0MkA|S25L4)(zmZpk(f!w#Z)wcnb^Cj~OV(2Ve2m$&y;&SM`;r{A+K&Du&$x)Ou`N?_RgdMRJjLwl%W zz5^~Z`x#w=RV;gRK~27$owl}qZx)3%9?qA%?tg&0K8K<$ItX#eobW}KdR{e*;6afd z&1zYB&$>ubgT-5#$4Hy)utNA;Zu(7Uu2Rkh^dm*WQ=bK|;XB@J*&lKLW&LG3db%F| ze4@W^u9=r?MA#ZdA9nHj@BNtLm1-Rs*fLW!9HS8v$q}9@OnXs><+rL$4@_3uqV3CL zRyJ6~OQc*o^F*Aibh8WUr-HmX+>_k(3f@ zs4J#ysTEd)YPHfz#}Ra<8A-Zt7Du8V?a8Xfj9W~I^K#&gCS5KYgsg}H)Y-M{y?AO*QW-y4gzRas7 zZnW1~0c^Mz(P}KBY-Gl2JyOGZ!QJqoyQoQ*mf@{oMxC}*PNovzqub&PN(oSAU)c-% zZTTXGKsfwhncgH@(N0v}b%G&9fVtAy!dQ$~s_ld1`uy${)~W&J&}PiWJjxx*mK%W` zB|LjtGo#cN{SA@xrVDa7Pj)#*l<0RPuLqVQ0@s+JPspgG0}f*rR5np#0a`%(x^sPLc6hM-=j`G}&1Qro&o5su>f< zf{ZJS=7%S3sl^P?s>w#l1Oz9ZgYNqcM>zr+L4am9%yg(xRwS7<9J2MXv+I`8nBqVw zV8}RWT_e}O3Y$^T*Yf)#FMb~;^=VOF3!%tWD>HP&?jzFC1bQ4)5(2u>l{J*%VeHtF z!Z&3QIt{Hyxg26rUdBsM=w?j3kz+s*99;W5-b5GX7E9c+#4si@0z>kVDY;Ylh0_zd z)T&<8>T0_ygcjQpbpeaec~(o(_EJt9xqe=ZhTuPl!lc(9)jVb&ZKi$0Mz;Vwi@(7S zJ2M#XT0QN#KF&cfoX&Yux))xSvl_VC=6=iTTfKJ*_&qF6H#b{fDNHTRPA2p1Qae@q zE_N9JA)1~~J&pY~I} zV=9`f6_MFcB@?gy2LP1Gqkfg%gNT4uX}fwB$Aq~p>|{29{eGA<>0v)B;4oc|RL(p- zqY*`y>Tp~CqPpdJ^fluT3-CHe9Mx^Zl-BfjX2^F)zXc2#V%ULCiq^3$a>N=Q-iT_A zf2Q4F%8KY|hG)Yo4h|DIQVatYC*Ot?=sl*l1D)FgTR(;EAEZblKh&pgYy{R`)x8&) zZ+)Mg=`JrRre5M(CETKpsGTE-c!S^Z{{A5>l$U~Ut<(8TLp<1}`xQ-_(N-$$4t6no zIOiiX%(@uF3AT--y9&t`@6nf*Qb-S~a!CCGUMSC@_BrkKVzlTd05uXy{Y>8wyrkoJ zFuEDHhr#Vj(}e+4Ch`|!e2~99v<7?br$=Fu=Xh)EPNt{e_|2Xc_7SK8%Gcw*tuvso zV4sRP3Q-1>5xDndT-tlzhDJPlmZh+KhKZR&iWf<>J~d<5pF}|W?2u;nM%|NOY4pFk z;$R3Rsz};sZnjz0fnpTdWziXlggC_6zA};-J5$i^wIzhg3R1rnX>}Ci*#fIbz|K^T z!R8#(=7M|>yTL>RSsh7h^A(_%DNv#bDYTR|j4)_o{Gy*f=++!2{I)Yio~Wcg3-vwW zG*}M;UqY`N$E(adR4Z3-&`Ax=EuXITnG5)%a-u-GZJ0JsJpsI*ohNG)wYkUIn}|YA zyl5^Y2Hqbwo8^H@kl5?|3`#kR^(TaB9@uwgd;J}@{Q8YghRxk?)k;ClplL{Yf=;Z# zdP?xUMGHG!1t(l7TYzVk1xe~B@^?m&y=U8DA4E`jCFz4XU1wFq0|&6if1B-T4Xge6 z9$mfU-QH#*^ZrqSz4NGu&}Lm=hTKst=4NtkaPB!hf*9t}+nF&pj%Kz7zMjNVK8$IU zn|X%RqcT|LvS;$&{Pr^Ln7j7`h|7Kq@p<_lieo-Y+Qr&zA(9Ry4pV#eAQow}1+23_ zvOXu6bR=#b5&;mAMTthEr6V37Knp8QuDzH9MvndRttfT|4_O@-4U znXaO5ML%u8j!^Se!#C~V?4-=GmVxKsdrv;RtFZ^aCq-y!jw4GX_hLFhqNjb;7ZFVk#9gJyR#7D`?#l03#V!!-)(Xab}e&;xdV13rZU{2MQr+IeU z{iDRbmcx}>$n?$n4S~SC>s1}*YqUI7BhR8jlvQ$X{>}G@o!}J>nN>JieEvk)Me2R{ z9Zpa<4N(Z1zm1i&P0N!V(LaDczwzGXh_L%{)V2aAS=3{gQ0y9A#jWBiX_MtvG7+hX1;&j9l95#upXQj>2`r~rp!}4AW40$N0QEF($9p^J`4h1>wRgze*^7O>0 z)m%veJ99-)W z{P$TeWEzrhI|*UoH)mmu7{xhE)jf{1^T3ie;1m|Bn^)Jg`$)|aIzoJJEv>=958-Fw zZr|%}wbt4oZNF;=0@tX@(nID*8X=jzUtZZSPh0X4bA4G@>W|0P?mvyOg>$U7G{mdu zI}3cKB>1c%B-}+N1r_M^Hvs%8$t8kWWeRO}jn_GNs(IgS)2M^)_SScpC1vCVh+HwbBvijipGWf(KC5v6~uL$t?Y@_Cwo)L>0 zLa6dCnl32W=%w)5i!eeOeDqZY4||2ieEqWNA73+;pvrEwT@Og-5l?a-PRmRS0~sC0 zKIwXCPt$d76HeGns&{TpEBitF3A3D`x8g}F=3>f`pbW>vF&bkrf~QCGdmL@BU}4|e z#ZQ*FksY*Qkba=?mJQSNoK(D#UNo?F{uk+@i!`T?4GzEg ziAG4|efwmK-^E^Du<0HaZ@h%~lsQZv;@8?t1-gvls~4%_>)@J7jrj+_xgLGfs`QSp zY;$bMw@xyB+RjV%omdh}K@Lr`4)f`R*;Duc2alB6mpzAXOXJnJdW6!ur)UeW(tot3 z6}Z$T40;k3cEhuaaB?Rh0HjUB^)Cr(DU!MBXrW%CCd5?_+CXFHcPw4D`6CUlqk1Zb zPeDEXE;~smVYsLPA_1NcJCc5TNTow3QO5g%#N%;f5H%?i3m19kc{S(uhp!hDVQ3hp zljbwnmQ8}Sbi?#v{{W+-{@-cfu)_!}tEu*8Z9nuhrF^s?aMNnc_eq`urAnJ~j9T!y z`I%D2qVTEERC3KN0siLW%WfQyH{JT+Z%r0msk9^SBfeziUzMEWhbqqg^)%?Y$%f_7 zOLH}1-v~HbboTepiip1-n~n_@w!E6c{om(H93^Srv-$M@s-2uzmv4tVIi;UFqD`x; zY(43JW787GHC+w}ud+cG-;{be5fCy@w48qZ5qZ#z%yI64TLtFO<4-nk1R%o%;VLL& z(6kyelSo}LtliqO{t#;T!DhMONk5kL>9YZ6;&sLH95Aniq|ja3*6IYT7REy^xY6$x;IY8`AobI-TP)d6Zb!f(~GVPNNnWa+=a7S zQWe5L{;U)?eB2M`aU_lF^Fgzy^OHqWt>>V_H7@M^`)sW!!Fx{x>QrvGs^pkGzvzTI zz(sy#o&gxr7VViQyX%PR;D_3gqWnsk`Y6)omuzlfVFY+l61_AT;_HM>*vk;Ib%qXF z4I0yztdjWrJ}CmasO}Qz|1zD3dZu^AsyDYwIEwn^ifPSXn5MvtFgFqAI9 zOt(m(nDiMD6iIqWv?3kpS-m=wwLv(G7Unw_79F{TvzBd-a4jWY^#6{ufdlZKKv2fC zPKOg!U3CD;&s4LogP}cD&Ryfhs=2w*r_H7$KA7_%ezTMIIp^m7@@x#<;;>0HV{mF{ z61?prS^H>$Pp{umAV;9ldwl9J9SJF84m2WBStw zO=kPkRqy%5Xfs`X=ZC9pj_lC-pV9OXUM3Om^L-7%VS>UfH67cQ)rnODWj5_?4Vsc1 zrF}w8K*ba^lcz^RFk86Bwmbwac)0Na&tG=stcAXCf45>`dRS|^|`bq%rtJ-RyjC{jN{8g}zHy1d+!bUhG*O^J21 z2pcY)1gZ`u>h|ugJ?ds#&_mGP0riLDU49Ws=6RZW9`p}$=C`k&`fcn_7lvR%GhVEZ8J!azNP62h4w7I@C^LypE3_1mWJfI z5$3Wv1Yr$twh1gUG~?x^teu^m8QW1?&;FD%q!?J6xB=upH|?|i{<_Y4U18V#PU4-B zl#SFIr|X;X@-xE9xk~y8c&Ly4Lbq#kPr$YRNL%V(k<2oBO&kU-H?&ZfC+RiwvCQV) zFK3$V-l*hLN6nMv4y+4z#bTnIebB}Ca|{wQN7t9~e|EvjYF|BfWA4Zm#ESm`%!NJ| zo^OoVw71B7Vz7neOn_sc)^JFVfmmNo`#n`6N5K>%4_CiqBfUKOy#F|C=I=8f3TyD~ zC&ua-;-JldM=QMV9;)>>?9&4uKCHfn59$$8BY??R z3WRTzw8+M~9`sY<`3LF?lX|6@C76A0uvTT0wrFJ!_eiATA(nbmqyjO)Of?xRe$R1R zJaYI&;M|xQXj$BfcQLZiJ5hFu{?8Xt4|vQy@o5z#BB{2*Gl?P$>ts0Sz(9AKz+5;xo z&kW59SF?mUo0{FUJ)AECBiHDX3(o5H4i08ixX^+t`L>h?UWj!s2|Ss6;{@b#ID)=t zOkWg>U}U<&$WFSEqoE|roQEWurTa^A4D;#y-$zS9{U%|i*Vrf|1YM1CfmTsxJtJ7= zP$j;90Msqf$qXv(pb^xLdi>iz4|pIFNgCBwbcN%AzF;@XtE8^*jc&AN7B2>F%#za4W(CWFPth)f?zOhC}A-&ByDY5i?w zIP%v}Qfamw1a*51JMSCMXFj}j_0FF=qPAm?&QL-TE63iw`}v;YoOap(VGp{N zV*CdnA!6~S)itNt5-EJgwI&R!gdf_);~}PVaxpdFrO8tS7U_ZSX^~UG6>G-M-!;ec zr^|e0Hj?6$2-;o0Y}W%r7U1w)Z~2q%QtXTA{^HYIL!A_D%V&Au@ zQKJ9F)1)`um+)e|Ns93;H}t&I+V}?mzbS${p%I{8A%~6C?8t}JqdsvnD+0iq7utJa zo$a1wM5Im_!}Tbgem9bFM{c5wXwuXG(y^mu%uM|>evZJ>YHwmBIKWbgUnG-PYpm0t z-**_r5o_^HTcdPJ1+aNpj%FZ z4p4H?`onNqlJUwe%GJo0erlCtum4C|U|1zNHd{fA9RTSy7_Yqf)W#W!XGlo?_qWisz{W$x z!>j538X|{)2tD%m`^e)l%E?25JZu-nJKHFl5+clB0|9tB4w!I=t|ID~mC$jo#dM0Q z@S)^af$Vr}@R<)rKldPMdmukH%M{a6xSB9oDUCINUVfzq$Nbv_dDe9f4TMyMl2%B( zA?oP5H*@>i$#<4D*-$b3a4{9{SI>Wh7dS zkBQNkJXA+kLYSB7U02Lo<~miZ|2F>mVGBu(bNi-G)qm(@eD>Qf6wWGE%d(j{qO(eJ zNY$s77hC`S55SAcNw>Mqe<7NHU3KQQ=M3eAy|%4@ylY$w?|ocus0VcQuU6(t397b( zY1Xf6@17bZTCt4F^KY|Q0_$CEP7|h!Zm8arqIBu-93T6U@V@LwJFvpi`u%X%FK_k$ z+xFs~+gboNmal;C`{7sXhI1H22FhkUwA#Idj8L@Hca%W*46D9;8i@^;18KoVM##He zGiYVB3knfLCzorDodEb%Tg)bUODWO8+uVrf+Z=mTLNU&auhLMmj&)D6_x0 zBgjwhJc$h$@!!J6?yd$a=3p(h{M5Aa+u z_4Mx$1IL{ipAer`#cVc?WNx_QD;SV$+S!iJ8GPz>Kx63EcnK{Y`p&fNSbS+({TU_yOb}gD z?HZM6)aY$2M3BUp=7TTbm$Yt!tYA=yL$D1mQ-<4T0hSE4#cQSwoL?C2zpe-D_q#3F z@E{x|lXrO^GBDa>mjcWJyDSd^xhQ(3U{=}805|#xS`>(d-zWo9<~d-t@y};fo4vla zciyMr{EFaqOW|MjZ!T_uepv^0mjqMoN0_bo}+uRC^0#=^?}jU>B(JG>n&^ z!7~pLUk;6Sjcr~W7!!+C_ITADN)rB)@PvSnK;EaWtNFo^Y@k%Ktg%mnE*88c=3zck zS>FP?5gfuvzCfn#iOA7nI2)S@s%=3RGsIvSOD?0KRcvUThNeILuHVz3e!#e$Xz24D zpO>cjIoB+0mBY7c*0NfEHYDSpb5TK`AMPt77yBbs`5!>JQyX^Z7u_bJsjnYJ7T~ zbFy0a8*(m|K9J2LcL+d{*}eKr`_(qoex;I%Gr0Eb?@ZBewdP}RljLo_WnJIxcNq+l zN5$WkPuN6r+G{fPAbr6L;nfg?*8G(Em$&-pSg9%@Q{L}-bI-%>dsca8wfrrOuMs`Y z0~%<9-+HGdX+1B7=*bj?i2)TCSts|meT#wGD-S*OCS2ubeG89g0_+xT(jvLSx*aQc z(`X-gVE-XA>N&?#iP>G^%s9onLIjSE_TBJh^@!;+g;nZnLVjuANV}&vG_TrUz$OOH zeO>I8LLO~|ZhoHDE84jS5?|llj;rK3zS5b5A+>kisGZ|CD;FKGnV7$SYxwc?M}mU1 zQh`;@GKdikIz>J{H-`-=SSlBP```L}i9iY=Sq%>=*k;f)6zd-#yWcm=d4@lv`*|aH zm+$>Yr>6Z8T+!1|UX*;Ou~#xfa1#5>RsT&Lf&PDxkY8l1hQ3TZ7G@ch)kB#(Voq#Oni7S?rhXi%WF18@3@vV<2l5qG@~TjxsVYeG*!h zIe^^;_<6lemfLkm?YGep{3{AeYq{5Dc`u^MMLFTBvhgd%f2T{9AYsYEMSKjRtIj^| z_S55}9Jne=`=yZd`PVu6wifkw%839xA3GWbMIq3EjYkVw#B1MbQ1U6_*ML7ZNFU9j zYf~O`8JNPsw{u0^a-0guh41T#4x!LTeke9F1O3T9cArXc`t^+=1jvz$r|@3EGw7V{ z1;I(dEtwjnJ?j0=YSCBftj*l_>V7vbl0U9_B%_>7K`F8Qk6XwBGRCH#6J;hx&D3=U zEp&tyrL*q{DindPyY!*oJA9CIQV_a0!6=RrYx?S`Zr1Ldg*R^1U=_sk_>{{IPn+=FJ-B-wi&p2>$fA2~3@Ze}F&4Mub$m32%J0&Lo(A zmvbT?3{Y#}eYTJqqRnD)(MKD78O-lwht|_Plqf1}5o%yA*|s0D|ADV^sA)$mM^UG& zR~%^r(!;FLigy2AMcpY<5XTxY#+IuX?4_uR+7wBfz41vg!CNa&xsyOZ@M`AY5%$O( zqU9e)fz6lP zxae8F^KsysRi^1p_mW0)qtgf;;7kN;hG~u1(Ej#u-Ah@#%+ z%3F~p(GNlOja(c{EZO+FsXg##rn$Gjq|XuV5isGR(o%N2sD#+0F;h{}Npc~TS=cv7 z1P)V}K29R0wUj^dX28Ujh_Yqp6})HLvj{Et2a-;P2aq-2itwW{SL78nYB)`%8qpwv z!#-gVZYSZ7hH3XiE|pR;ayM}m96EKB$=KT_ag>Is=QZmEY-tpw5OI;{e{gJo&e1B$Uz<`;aaP@ zgzjI9qcIeO=41Y=$a5C<8C{kHS=9#Y8Pxc1l;mVs?^jZ##|qQyIyr^;Xud{4#PS(w zD&_c6tLgpftrmHUD+v7-344H-q;tq+K%auGfm)IDN~w5-nfX(asF)ocBh*_{5tWf6 z#Fv5l!OAF5Ns-kAU^Ls-zwB~Vu{z$orNpD!dy@8Ny$Ee#gNx<^fB2V6s)w`P8QH!A zJo|wq&P`WyqtXn{P1qEaAQhu%@Yo`SApvtHGM8w~CONPmPuYn#wv~6tY|c)cf$?Nq_o)w6Uj!iRQ+r zMgNl(_nG($9%L!3QN!*>mn~OEP7XG)NPSWTBv5eAku^;;V?Ir}R_zoz?{}McO&wgj zIWkfwlgzcrXVuQ;jbbzofdJ8-sBZu5^>kY%iX-jTvt#gs!eP08H z93%o`Y80fB&x6f4gidVl^G<^Gs)GaR_YW}TC)=?ISHrZ0A-H%GMMxEDds4;@w6goJ zBN1OL!8cq41CdS=;PaSIDP<}tUfB{Eleo(CrJ-O4pU?IOpQi*I4+>*4Dp~&lD3tDZ z1Z_)%@Lp$OvxE_qbFsy_V%1~Il?}WpHOgdH==&+%{N*ca`qL4<%J%C_lHtPi0}^4~ z0LE9Z9)I|IBep@Is1_DJxU`x4X4fZmNG3Iw4~k*0x6RJn55m~hooDsT zxDzM5n;I+bFG6WahzY$mXRXj$#zbmlRDXd}S@u$94%jDnYj&n1yt7{ieCWVmeoVrl zKdyLO4z3i428#CUB55qyhU}h-0>S@5w~!ukLJ{NnW-7LZc*AyI;+U`Uc&mcuvxvsB zs`*xE9-q#hE~o5DWHyJ`lruyc9gxePHBoi!y9;-U=>pbNt$Vy&{v`SN0<@+@0&>H`k8EzR~M3}_KX?ZKEMI`{+h zfyF|6asktmm_FmB)Zr}oC+JDGb}fS0Nc}fjCXwS#TUSpv!}xg_#9O2MwbpNn@u}~T zVZU&5V?}m@zOE(@m8Xdmce@|O*}ovJYu-g6WYh|4r!J2+G<<6xMyB41I)1@SJdq&~ zBbBgwYdWi|4E4^^V8UJ@ep$DMT#?w`c4b+khCUJ&GLIpC*0Q5;NnxLu7s>BP8Vb8j zk5BMVqKN*Gj)k+#>|h*^)p*M|N&bF2%45UOVi^XQt+YOfI%Md#fjEH7QJHOg^ThN; z{0>ZCso+S9u7vO3X^*X>eXUw5NPxr29#kP%(IfP`u=x-c zK)$3yv-g+YZTG-)mA5H%EoHQzi7KYB-o5l60LIJt_DeXDTu5u-_q*mm(PgR2DlIM| z^$Slfe&absly?`B{o1Ce^mBo4&)XrQ3=Y596@87y>i+?nv5`a=-|ASCjDBId`K~oG zxZKFpo~s&r{wZ5AYhiHYh!rR_T8l#hg9G2EL%-5wFeEe!rEVQa8-mh&LKg*Kw)U83 zL#*?q{RU<|^#ixVi?7ci&6BxjK}XFNCNl>lWA@UR-^;m|Y|?xq;>57h9O`4nPzPXv zJnezbUAE0sG5JKw@o0Jy1&a9cVQs#BPuEg50y0Iva|Bk8&whKhx76gQbUcaqvvV5J z#d`RxWT>vw^DxMtmwb>-?{>1vE@_q4GfGoccPa#veNw+`YPGB2>6L93+n!SkZus3t zHZ9+s&El_l%a2QN4B%UYk zyN%8OXFHm)onuA>bzDr;Fg@$Y8p_0*R%~)fj(^i?1%s>8qgnEOmxeKl^&5 zVWT>A%i7*Vb=wQ9AS#Zc#szY9WQXiz4sEF^Xsbok7*8mCZqzH*G zJJ$YyFJD>lf<)B5VAP6F(nsz^j&t}Ouo=7;t;4lKjw<{A!Lv&>QrZ>mknIO+uc#M{ zEs{%T(X*U}$#ycaRiyJgS3O*_;D2?rXY<6Xej`C~F$~h) z8iS=+qGm#S2o=IWs;n^zNyqTRuxCQ2FaiK|-EF3BFg8O(5wH1v6${zm)9#ej~ORp2-d z{T=Jd!{2`ZJcUURPJ*4~KpMfwuL0SIbe!5Io|^Z`zeO8It!I@;Pg8l-Cx2YI7=|mF zMknJ-ifXro`~zs}3zNVZ0-12uTAK-E{sE4|Z2Hw8DcR3Br^?IO+_U-p157w-3)Uf=J&S6|8G%f-Ba;U& zcpaPA!VOAhR1Cg5O_*c-lJEC1lfhs8>p<^+mw(@L)$%XAf9;+jiCT0gdn+e3U_V*n zn>qLe2#~s@NG-JjR;NjwlRk|jkTLof&T0VPZzC1{I;Skt^g3} z0{U14y`>ua>zpnrg>8*1?@3BTYP+Y-@q&nZ5f_&l)Ayv?$_Uk;_zZqBf>f7>9`mzl zIb5XU8*~2vEZ^Efhven(>@`*&lIm?vu5mvKp})DnI_&AaUpdF}Fz%U{7FZbt-F@8Z z_w$Xk=}jKk3A~b=Fz&9K)lJVfigMv%6sCNZUM*{USMLf)vAXuKEuA>m5m!V%5zVz;I9^mv%+E4A>Az% zFPF%OU+=$9$Svc-bKu~Cx#9>mG<^2ep8<>#XbT5M#wK}06gUoFjGF$)xXr;J+lK(n z>iHBq?y7HC7+=9=2K#@F@S6YVP7?_P6N^_lRGWDk+8{zC$JsqhNAlTMnNw8Xlr|;m zNzh;Q0h319cB6+YH2DfSmuqY8wolwJut3$FvX^R@ad#2VwCQgVMUUpdEIb2qV=}<< z1ZsH9^@8@T?Lv(mNd}GKy%#6xPaZ%fOc?LDpy>9LK#B0ikg|@%DJwAodnFQ4%Fx&0 zDN0Ks1!EDSob1I7@b-|$bQ8E-uR_h8mL@n7rdFsOGA47D(;WlC-Xf_K$&A(&Tqfvv zcNSpl0%Y@X(NmA%n3lfloAI+)*YipF0ORQH$d(7<<7norK{ki0yM8cgZvpSrqTgo4 zMUv~Q@1=ia)=@7htzOEgCG|#&vl3T(#b}BQ57sCPQ(B{)w8$|oGxT&@TVLlEA$o<@%H8@7CB zix50?^75De@9Eo|sM20I>@$Q^wUN8Eb_)%2$i;`{Gei0STk#xZ4=z6jJ)?WwB%?h{ zTP4}t+^LW7mOcwjmzeuLi&C`W5>jmRp7F$1g()Z$=4>h2;_n3?XNGfga=Ddf2E+{f z12ohVA;g^WT5(XGuZ&S4f`;;g+)EFrqYnYrS4TuF(oOhn+yuqBnB5%hFLhX^Vv0)` z&J#nhR4#??@DPjn+pP+wT{}tdK}jxd!2aJL<8zASQ)j-o84mpr5b`;j+0oPO;2j@N zWg<_!Hw~Ig*gH{Xn4M!03WW^si(-h}+CXJ%K-w@HYZQujqdFdho-#i7$x;l|`z8PE zxn(??Qv*)p`-QHfH0Xs9`?yUX2|U|)RFSPa3*H^aOT=kjfmD3{VGlGG9a3k@M_Y8C z-)rVxJlutoFD5fCX6=4Bi^<%aM7+wo0TdoucVSzThaOaV3Mq>t;8u z=7aoQ8}&rh`)#%7P_TgnZB!L&x7c3f*j6q^2~Q^IRM>lZ4EF7f0Y9sh0O;Q zd%Y?VJAtzmU)er=c8NnJ&W$iqAQ&vX8j!wuy897SDtYTzk^k0;3RAk=ic4^JE5+U2DZw3zl;G|T-{G9A|J>}G zjAZPw*3MqDlWikD~P8W~lw1}_7qH6isOc5-jE$zKnHGojWwm%14- z0;T2nIE*WUAC|zSk3K3Hf3I5Z`JA#;Vmu1tWg=JGg3idsM3S(@v5*dqWk6MEQpL-l z+zFUVJ=c^SF>utQmJPVlg}T}dr{q`2bP0i0S1YYF(($22*Joi{?!ZlB(mwiJJv!F3 zD%rwfY=zsQf}HEmmnx~rY&xj!`%n+0c9&*=VOHjxpb$Gt7V%mc1xHIlw7@D!qEMKv zN1%ixu%V3%%fRB!fTXhVQY|%+AM@nfk3OCu%4Do5QWDFoCh*v1lf;LhkBM|HAz(ea zAG(*leih5AW2cnMP8C+*k4$^9QVbrn95K}!G!@x?zv@I|MA}aU5cNez9Xb=HpAA}} z^5a9F5e#l9&stx0I0hsU!6`yYAH8B-^Nx}a{op*UJ0bXZ8o@h za|fkIQ^Kp!LL%IHa`M7ng#xT2pOG8aR`V=edtStnr_>pE6w-) z5?%}5Qv4!=QJ-~JA71NE94-PX$oJg$tin{a7v-MyCRY{Z5!?)^5TF5ec|WdgMK!F( zUNw2c*eWKg(nEpFo&LP@Iu$BsorWfy8v*97oH(m^dwc08Bg|=0n3sVSLf7W|6EIC0 zwg!s2G!mo9Q!E>kgbyQ_mN1dkK!r0MslQ^HrSrL2u>M`Q8DTXpIw`&9vclE-wS1~M zxT(8A0e8^2<@%?O?fR+(jOBmHiojbUVvVBHpvaQ}M2eo3*J|4L>UEZjF3N!l-@#+} z28Fp+gw4UrtE<@J6lvnl_dN?5`#hBNd6??~XK6jA4Q%zn(mjHUk+-2V5$lCT%z*w% zv0~Hmp!lcjm#$|wJp|Cby%ktfjQbJVtwo}fJ(4Pe%M%bO<#=%=Ur{xTGq!*2jZ!>L z*Is`Wve7%Bf!E9JCF0-|H>o(8km~pCRHD4!UReA=C#X(*xe@`DF5!MO@qn!8e3BIm z!`JYPSl_Q&y6S~Sm}n^%A(`LBtljoGG2vrdkv>mepAvvfkI!I$V^C2aZMych8KZ6W z=Q?>coc2)a5V>)*FyA@u(ItXFB_~xc_)B=bRPHD`4|8*?*Ck^3cni(ne-Htwe@Q!W zX~=B9=DGV{gN^+8^+|5RJTZEtM#`>+%*JcoaXXTAcmbPLFU}ZN3JJ;_xfgZMY575s z=dARJ3O_Ni0{KKl`PnTp_2y(q;5|*fyPhzyls~ViUlIni#;FcZsX8N%H(MFXd2?%r zu0Hv{F3%}U58*0cOO3bCVFt!n76TaHvwb|4kbhT?Iw~K+i+afLzo(XT@Nia=4Te2sg?sa*6m?)a0zr8 zQe6Dw^-TZl&gYhV-K@A6A(9fsj+JcpliZseA4BzK(!SPCY$H4RZHfsqXJ)J?+QqQZ zw(RGH^MBZxnh>&oDz*bu@JihOOK%8#*T%vD&S_I(;|P&D@+k{&e!ccZ{>C)n^=*t+ zpK=W`n|61Hm$5|0wgZ|zWV`H!WEe4)MkWg?5XWUy$1c;hQbR!d@R6EEZT!gbyrS7JWs`5N?!ImrZ5qdsuU8-b|$+ zVX5{YQ{+Z(L9uzU!J@9uqX>xXTQ=gLX{BGp`~<6)`u9 zTvflP%gicCfDTa+!EPN~xa5v}(SQ-VSfEF5%f$||+byP*y3HQc;KuzA5ETR@^x)Q}0N zTRs~7;rU}mKwC@hD5~SGN>zISN92l9jc--I zGYjPJ@#`x!7BO{si)WX%&Jx%gCM<{k2fmB#K|`;WZ$G4^=6ZP1sTM5D62qK)ia8qP z<(4GE=djB|z(f_1d)M-L&26LY?>|lI8glhDCmk zbv_b<>zh@Xq=M@?GYoFyLfwvoAPo<|f+`b$idUGI%3lYK#8Ao3)W#_x49jgV!I2$j zqwb#~s{j6}$|W`ipV2w{u~*x#JQjPw;*3U%8#&p2?4;yQp1!fYi}eCSdH%vC`(6S5 z!lsdb(SHY9vzX)*qMF+IcIuOH$)*$b;F{Hj65Q9 zR$V5zvKfUoqS6wq`I_g~edoq@yD@v`>OU1T{&&MsIu`vWF1}EF@oZAvUxWyA`K_ah zXd>IGhFHoP->$Nlhndue_puRSv$FmmJxQklgp|}=z0|GjGp$UEANr7MhDd*;t(Y=# z{`)ovI>GHE2|Fn(jybgSUmmMqx}`WRlbxxnHEodfeJrz}w&ddE!i(ZNi^zB28&M$SJXQ0}CQ_l39^9(qeb+ zjGO4BDtJ~9ae3dK-suzXlfF-pcqhmE$?LjpV=R`@v8yS$p(j4=`hUkCMFLab5;Z+n zrV9GC%k5ZC)O{zBF{5Z=G$AnDpqv=TkNJw99=swtoy2}l39h^8a$D0^y4^DnBWf_r z|0$7~S2t-0o2Te7X0jV}dt-}sS(?IhvEGCb3v5umUri@g)c3X=My!;H3y%bR2R@|3 zP39(If!@-G%|OR3`CHSy(pUB6aI1zW^|fd!4WbwsSEp8!@;|T z4Y;4j3FAjV$JRf>e<_2(cExW{$C{OlNY5&#FaCh20e#h>JL_gNxs|pIPVdC_pR6Dn zKF0v5X&Anx5>sf28gjH>toM1)Sen;li=_^uFQT@CV_&aw!U&<^^dvbd+$YRIe&q29 zwGjeM+Q4=M+5B1)UcTMursl@2|l0T%{-l&vL(x+zyq2`cJ`Q^BV6CZesJz(Ql z9adbf0}gPp%07KN`eM}pZ?!v+fN!T2&+0e>FiAI7xX0D?3QEFY(<@o=sd?*4xUZ@% zEoIEgRlm(71_AH4?vnH`mB{V;0oybSshpS*AVyQcU%guU+;p_PXEFN6a7dfRb5cp; zaSX=hdeiw&`Y|O0n{$fB=G{lymYxAD?m`=dl^a1VLtTKNV&WXa+g3v&RtY3_n7o$Tw<+O(LL!N5#}%~rPY|YFc0mb+kf~5 zI{}~;-^JxErU%MM!`8*VT#Q>@og@Hv4Hd-6yG|K9iq2>+;HOGub)N>W#>n>}jTHpa z4r0p68JHtkY$UM}XZPs8YlU$=Ul-XDPRgtzVcsQh}TM110q?flGj1DAQ1#FLir zO?BaJV3h@0D1-QbDc2xc9aYKPZ8b>0g+#@r$Cx6%R%=HWq665cMzKw&z?e6$& zb{nhwqQ|qKGKTJBIFcX{6s(`aV1XSZuRI0@SxRc;8smhaC0=P$j|!O`8LWWg>yhRs zm6P)#$H`NN;vVU@73=4KI<7~j%1*K@J+S?kjPc=w8t1JbnIFjzFDi}x&tw#bNI;=JYM zvSmzJ&a!jbsh{16A34blkW?0YY#8mPRzS0MATU=jMAzJCF0UHt0qDC%lcT2V^N>*X zzwsNeV+!+_JF#$NK-e?n3sbRyyc!ZOMqbjj7`n3)Tf_ok>K;5lq?|_ZD4kgx+C2!@ zT)GM2Z6hk?H}v>G=D;p3La_+le;2*D0>x(?9gZgtH&~*jJz&#Nhh@Fd=uVudCcpds zu`HR7bwhHdWjDx#xOQf{6A68=@vC0$p$#56B^JStr#Gi?mfxx@Zyw;ofh*Gk=njjQEu}vSvYjy6>N1u#BjnJ>QIZJl5_X?wuw?8l!dwne?3`#^K()nq-npi^3U&+vjq2h zU8aOy11$g!b*Zf1`=`Ev(gtk{KwG!xa!{tGn^Ph~>+BGS=y%3@1)HMCf)16jnzMM| z%G*(IF_EM)6_2+$&&2ZSwji3+`+fUT6nnI|9=3G_y6TTR`$Nh7$^WcrC!`h8jFxE~ zFf6%?HH39bT`&qUEEa zKG~MR3-fdtSHm4_bN-1xWFt0{bmT=2@YKBQwHWPXTBJtd^Q^lW;3S@7@B7rk5syz= z2zt2XCp6}4gHE|u6C?zGJ`)QdM$V@5?|@Ri||$#w0xb? zgL&>Tszq~OePwXe%G@nGlb^2YD6V~L(-^co2xLx6DDc1?T;opY9yI3v(O?;9tB$LJ zr$zc?&d1Gp%x;q^Lkef<&*SYA$HCdVQrZ2Lxpk5yKFJ6Bi21CO{wTRmKPpJPc{7Y2 zflLwn$Q?#x9_&PrOl@9J14ew)L$UScY5UN$RAp}6$Ysry$vQ@C(`%LG=f354J^ysQ z@6~UDxf*`eN7P@v7kk=548O;jG(w+|Ft08M;g)%E2KdrZ1ii7?0{b)c@)HW(K7U&g zvuvC5@O>}Nef}vAqLM6Z0+Yt%jsuBXs|+}^mY0+XN#H-kV855c-I9u$c9K0E?dd>|9rt`)2=5FcaPML|T;S1f-Gk$TY=*bY+HI z|6?J0wJ@DfpbF`W=W@JC3Ut>x!Y@BwX@5tq=Lz;C_nO>QIQgt{<3wjzvW8=|bMm)$ z+;XOo+48CjYDPdKbGWK#MV4X;;+=i)jv3l&6!G|H&9X;pZJ;bYrDNE^&<@tU@xV9e z!TEk1VxiXyr>+iAC$3D8`5g2a82vAFshV8CJ&yHG`V%i)b>!xs*Z%tWq<&%i_o^Qn`yHt`}iMFHx^l22C5?H`)x zPIx}j#RQ0Qu0D%&S6?EX(|R>fpNsQWUSwaIm?sA=Ilh{#NqVX%U78kW#Io5hgSFPT-u-p(LDq_cWx%XR`rue7o zc`SRrBvinaINz+5vucvLO+9Wriu&kja@A1Vq8u>$cbL*Y4-#B@!<SIBHfN^IANH~nY{Md znk5WSbexca_V}rJA?)2fpl6UJvJ5azyY2->mfFt;!VQr2DD#>B zo%m~%P;|ALrTT>!;R$Uv#21dPWeGWJ(3KrJS-m;Wkdfz?e?6Q0!%*3(lBN?C2VUSD z_UfT-&*{fIA5T%XHO1+1=222zbnc}XL%47^{gc0#q~znljHSWi+`i$;%&757x3;}5 z65`90y!TsGX}I#VS{${z0%+R)Z>scqBFHuq>$Fx@cyxu`^Y{61MgdeKANMvW+$vo< z5evhRGzQ$d@aA6G5rE?FB5ty0dsX|t{D0B)^d+v0ynBIbm%VJMAMq=s^~$QG-kVts zoP6F$)`|^Ca5ZOjlmU5Dm`d=9U}!DlD8R+4nDUGrhbpKTlf<(Snf@IW$t6%mtQb#} zdO|3~UKba_lL56(F5-4XnT~9NZ0535r94gkq9M>3@2;zTgBS7FqhlQ4$ZiAwHjA-k zbddKztGL(~~ z#1vY^iE%T;U$$!@YA}=LhjTcmLK*Fd zmLS^0A|qX#{_~;vEC}h~#|ZNJ^BSOk4}m*D@tBs|!Ayn#R#lcag)*KA-JRL~%98Ns zkUpVF6n2&i8p!Z=Jw}tk?c%lovbV1V5 za&j}Jq_SShvzQCY-nU-8={bP9t?AsyGno-B0(<>tK@6kI2&FWxhw0q1!8E{L2YV{_ zq{B%v$3IXxgr51G_{AZu^Zn7y>t3;B|4GQF&r(8G;0E%9cHTmiHOM3B8?hYUN`~&) zZURp7=|2-0_e!lhI%n>)S@LExBgTo*D_2dBd+3r#g@OB5`|*VRt6uZF zJS$P-uB`VG;{^k}}{_;*mV$lj-b__p{%qYpa4^&oPy?Rj-mOUXBxhk7e1n4ncMB*ht zGZ(2(i_iXgNNKEBk~sK;Dld`b+3Fpd%;5Wd0$cnlAbWwddsUCHi=4Ai&XCigp)(>& zrAHG$&r9Ln#7(iWFrAIZtGT`|t#%9B<}%N|d1zH6UDR5hvLrDf zVFC*&fTL$*_vhlLvkkL+QUphdo1r3a4SLHTO25uBYc~0jKgk&?Q8Tev*b&LyxAvBv zGVY>tVORbH1l_*Nwanfbk@H>}eItl*d8PQ?2>FNr)?1vhUV#k*YWXMeYf(S3nreFT z!9H$#jZB|2D*skRe7D~j;-YfZ5B@wl8ivKRtqA!yy#n~QLc+2<;hon%(!j@4sfdq; z!v%7iZB$dp!8{;_wj{k{FyHMOEEWY8VVV8`bGS7=Q;hQ6&P)1XRY7@b<9$ItV2xv7 zSkaH1>Hfv!hcsGOjAfiKLf&6|U64Jt@dbFpGtxe?8+GSokDfg%82)yv%A#MMjwY*bJ$-v7Wq zJL-1tqSOrYPFkP|H&WP$@UK(rjqs1yjmZO5@}A94wPX&x5ZtM}F` zp+U1!j1@#VlXuy!^Aprr%Npc{V;w&w|={`(acarZaMd-gK9pN<=6t*s&5a0$FU-5BQInU`nl z;Wnic6{OSBLid`F=U+se(LoMpZN*MiNby;uGUtaa160lb0cxQq7hpr;36^pAJG&|} zz<+?oq?WX+ksT~6zt~gO+0$%@#mZ_<<*OZ4mhTI%Gx!tLT9v1?57}}EcHj* z3(C;RuCEJZZg~rdGa$R_dnPjAIo7X{vf<2$CbV}@yLdj}@Yu_UeA62x0S5ew;rr$v z^1H7nQ^3RXhmQ)5mc+ezkktyRVnXG%fXyIIV70Dk4_RhSD0u+8SW}oT!;07{)tczk zpF1DRa^+^rRCWU8#9bOsADE^P7v6lL5A4Zv4U;DFT7?mfxRl_#_L)Dq`NQi?)=l)P znEZbLdC?iLeR`rbMNN+N`27)~yzB3+@j-X5A7)w_p7p5;bV>68C9U8lbEj=!0ltg1 zFI3r&DHW)+fa93yv*+7$M(HLBmI!nh|Lf}Yjx+QpmQm**&00d#th6DES~g;lUv~%i z_`sTLQ4N~QUW8giTjxfFHA~~p;{`<1hVjCFG;x(~q%@-?dFgn_dNX~iq+~hqCq~{G z9iUkvJ*4u*M(()3=)Fpy!Af0j;aPHG3*9>F`>x1RE>neI0%04u9CoysNP$*6@@m_R zQnv|hp{4&>BdkRsL|NN$futkytIBU&*0Dww8VmxJ=}3$GW{!T9Z93QLbu3o%X6TqvW?;fUH zt>}f=kHF1>=*qEDkofX))O>q0tQycUCWW72@Df!&hYcwY*%%oOMxlOx(;Dk!>Mz{b zN8}J1MIyev=~;`Vgfqn(9l5hGQu2N0mMaN5J{#O!we+-n4}2EhA;Cs;ux~lKh zM?FFs;+IXa=UI=<{m(_6@e(4RMfIhG-AH7^Fm1n;t-s6T!g~#0xnx)(*_rPZn#e9h zt@mTSIz-ZCuqR0jLZqWEuxMr>+b+4BoHBPv@~{LIA;SgtM-ztbG_h1{OIIkMP+KJ@ z==g9F6i3{#q_rHDfxsX_vFb(ozRQ{>q{{Z?F=O8oMHo*0R**h6IQ{F;D zeX(q5kW`Ip{v8m7z2~NHf&Hf)#z7a9p6Dkp`U^vHR)sx6F!+T~=zRY@*4(%ojL5(y z8fjPrkp^jVuZP|=NR{r@QKbDHOGVv8W-Z~CXN`qWOYTe7H43u7w=`qa`|D? zWgt39tKl|AjSJL0@1a-EI5H02KsBxj4EDWsOr-HypU0i)sA>du#=3uNdRxyal@ zBL9&r;XDic@GC3^K!VFf0<|-PtF?PTwmWd{@l21i^>1SEM*O{AFY|9UtqE>VKdL4#u+1tjN zwt_af%=mqlXDdzp#aLIl=^sPfne&8QW*L_{@JgG%cJqfw`E`cD6GV||c5hb@or_LL z$`8dJA;SZ+D&1UO#L$s-Gmv4yve5i|5a2|de2c$+^k6lmY>QYut_o-e3>2QkY{*x; z_xh`jzouKZ2^XeQSOC}HF1Sd9HQEKq<43+axoXpBAHdyiE)<)?O=PLJ2l0wuTz2C@ zz7-^L_5O`Z|7=C7b+$|NtHkXv&|{EVHeMM&Ndu35Nphs)evS}oLC1)WPM&pfCFcLa zkU?sxesCFY?+qAS>L?*mV9Yv7cK!0z5cKiCYCA??P`HS-p&=^ov z6X4HGI|#HjbJszzF#i@#MXn(bt*AZsBGqO1;_UbzfSFrNH&c=XDZOOQB#sd>Z$f%f zSWWAAu^3X+$&3F4ZW}4c6etapbh9C4Pym+9;|D!@KVR;3aMG6iTW|=^zaD8gnGfyF zSok9^)Gv-NL0}Dg+!8*wkzfxKfMVg#v?<^v9Lc43os)U9&cs;IX1IS&OvT(Vi49MG zG&NWeC{rrdq&|CYlE6WdGDr+i<;Y2hUD2ANap;s8TkRG5MU3@hb^X^0u%45Iu`qNO z8ec(2o5_tB#t_m6-wqpt)+tIA$NZE9!b@q%%-rt4bnAAK_!#aj5j^EBF^-7;_l9PO1h_GJ#IIi$&-0U z4+*ZaVi|Ywd89@(9CQ0|!F1C8VKcp+He3P>{=$YV$hNG247a310DTD9*X~r zN!NJe(5ElFT=2vk&5p|4Y`-p}va>}dN3AuJnyv(ciLHR1z;!fuy3ubT7*SZr(!|b!>shrKucP@lgOY@A5?qk)eZ)J97 zAe;pNDqWIfXoi{E$KPg*j2AT-!xMe4AMBOC?{k6p69g<(+ZHzK1)Hra>cWO>y_&Sv zT2#AVzk{0}3GQf7M50fjdG>v*W(JEIUe@Pdq>yxw2b$NGMmZwmB zN2Uf?NYeYdABYq%aO03M5zr{Z|2-gSTR2q?$`}544e$)7jqgi)8r;p;XsjT8)Xn>M zJyK2n{y$|G=BzVYC!!zH@mf*V9FB~t&xnjn&#B8+^=ZhRCuLSOoLRDO({g!Ov99@& zvAZDB1v!vzJ1JIN->~DUsKF*TJojUH-88JA`tYRoHZLG%@y+ATcm=j z5>HxK9TZ6#Ym~W+jj~CJd?2*)HgWoQ$rcpm#!P6%(#VV*%|R+ijOq0!5ldoQEv`%?>dbqY8=e$ILe&uP0%(Y<~lvH+Ab%v zp_J$LXxyQ$*u}?c-k>3+oNMXrFuQ}{Vt0w;IQ!pY(Hi9Ro;6_k-BxP}vaZkA7`~h0 zK$D_0O%=FuPWnXjA+~45C=ZUb!VKx%f8$A)quu-rd(u4}nXZ|snWdEU4R-`{C5)?N zrbAx~632@LO5(Gu&*2jl=n_$=lRP|(qx`eF1Hs?}YmStOi9ARXm`sE_ezzf#49FZs zV_}37L05*jk$N>e)Yo%UDZ;bbyB6)Hwa<~eJ}2^Z0jI%Ww`re<=y&?Am0fIt*p$H9 z8e1yD`=J64=mtC(_(*?E6zaB*OGl6-=<_aWoJIF|OSBOxIS7W(kMr$oTwz|8R<3IMRqTE*W3=fi&SM>+C29b zokxA0BT!v}4t;4j*6bpOt9N}h7T{Hw{=A1@qOy zX0xuv^meL&`WJEs?GbI&A-}oYlPvYol>RD7bV2<8*S3aS3ZxOGn7DT=ji~qkDr>BF zOT*|V9)9*0*&NjWX#E>BeIEICA0OrL&qOPjj&FnzrRZ!Qt=Wl%Yeg=XU|bd>ElLSt zGKkE$YTGjG*CNaznac|09KTZoz{m5ug}_KWn}|94&%O@pAyF4#SSTN}VMzC@xc@K$ zmK{hAoT8U!l=g*1!<8kWj+>wPn&0XFcFaFlTW(v&#g$dMG>-($dv6g=N~{kRa(gXI z_Q2Iu>b-h)N>3$B^gae^@S7ou#16nMRZ+sd=~V{tjB0kJ6q_s2xpyJmfQCARXp32L zR*Rzs+k@X|v|AJ_kpn=xpKzJwKJ(B+eO31Zg)2APF9w}oRg~4ZO=6#mlaWlDpWQ-t z^sk0lj$|W*X2^#y3+2+gb677CIGd9!KN1Vx_c8*q9-h0Lk-aRS1GIZufRYjK=8_mq z_lLY(?8yND$hX;!FP3OPn!^+RK(`bl1BXa~j6XrI7LJFC%qPZT-!ht+BoSA7s?+h; z+0r%gWiQL)Z(J|z2{z~0_F!?!0CFL20faQJ-5jCmH+%)w^w$~VZj}BCnyF5kOPe^6 zD)KY0HL?}Qp(wgE7Aoc6L|#z&1>%~i*q1m2CQiqdR##@)V>k)TUOsDWkL#k^ffG~C zkd(!l%Fs|#H%NhgX6odRRV#$=6tAVzC$Dxygp4-&OO5HW@8rLbTfcw zux~2gADWS`t3%+j*MH$z{uD4z*ZY>>K8TUo;Mu)djVZ-y6&O)DmOcMH+`BM8S$PC3d;G@Y&x&Z zP)GJ*DGs_1nS6M(tf~5*$U#P6!x$t?r|w89fz%+60%SuBxhmtDynSLGJl%A^rA z$ujpyG(Cn)^Y3qu!)KnK^uODFRRDW!cO9DqP;%^dm^Wlnp4LC=;N+nx_!F9rQl215 zp#Q8vqzotMBV|BpWEX8M&>{cbE6YGGK^`LuFFS&WjwK>|!J&~8bqoU zJiH#!I6+~6Mf~6!-ta=P>pCa6K8|+$5w`rhuC7;Bh$6CAvF&?4-x|g5$a?SNsBHJG5@BwCp~1#`g_GP?rgJK_m&(hmUpu zuO$b3;k6B6=P8!%4F7@Xbior^nB828>1!GF!>avX*$p1^z0C%OxU*cs%LS%|CP!Lo zKNZ}+X5hV-87M-SIpw`$wVa?pzOiueaw#C7A?r~8WxMy7FTX}!yBB6M9DNs| zU^+x8=n{xnr>GZ8O?+TD4g7WW&iZ}Kla|i)rO|?CUc!<=97kRo4AOX^UO|j)5prcO zbwrf=7~6%LPc;s=x1Ev7N#-$a4kv>HhWOJ^b71vwchEgEn4}O2s7~XCFiV*!^-WNW*{nc(v zS@`m}L#WAo{PU!4mmA4P0nO*m4_QLlr$Omo6Qwy53u1B!kj|$5bY6t}C!o-{6OjiV z?s(v?>`fT@V6g}-H|)vTke6G)etOa?xZ_DvcU3N;t+sjMaFy{U_m#hQ4TO=VOsxvX z_ScJDBhx9?#6ZMB-?8O+OcI7*VvY6IjO#_LUq!1@-?=?snbDS6EC8y{D@99ace0KO zu3Z}~ww!%Hmi`E&o20`!Di)tc0oKj&S0(l;4rl4I?LZ!hA#aLqJuJ49RMVtY)#2bj zNT)WR*POb0gkb+5BBD+@ips?aQ5YM8;>#3G5EaPT<*n%Oi9?)j(=)e3vX*&PP&%AM zx_s4xZ5<@_?srl{16yi_(5OM~NfRWF=GU5byYP|juWJIwUz4R@&(_7)zLU)jKjcda zH3eWKE_fKX572+o5!vx6`k^EE`W8krWPfL5(*QeHk{ zSufsi{l(S6<{9-w%<0*CwPN{g5-cWa+w@r_Lxu^{fHSf3Ooh6{u4_Ua@f5O>Cru?u z5pwe*)ny}Y@l<&LZE_q%R=Ptg*5QDtje12W7l8hX_ljrP9W4ZR6<`Pv$ex_o*lYM8 znZtKyhb$;gw~(_Ew z573qIL%X`%{ujheo1?}!`$=`;;?Zet#CA8S!BupErR>UcL`VQ|q&Lr5RxSK3%SqW+ zmoeCzLi(E-Vw;ibzUdK1!zn`}9_ZY5uS2u30rNZ~;ZFtO=SYk4GQQfbFfN#Ho5mQ) zN1ua&04jem$qP?u*^rs$3$R_RhuRO#YCm8*Y0C40nWM^|_LugaDW4v#D7{of)>I`P zoDs1cQ{^R>G}F{noZ`>TBM}DT1-!U+aqP_YlS)EWw80`8ZaIasLJZwULdx+)TiWGM|gqrzmled`LBReQ&hQP z#@I|>%9Gjh#* z$ai3Vh;UE`?VbVQ?bfR?#j*Tl=nPg_X3`-GT-6J;Gc{KyO`{|gezTOQocm2iL0dz$ z;8eU>boS>->CVx~`uQ9F47KAyTTLTvXwc|2`7ORU|C7EO9K%Ut*-|Paj3?k{ugX z`n+$zDXu@)%OiwL!)84>0!fk-4*HuPwqujW**_J{buFcGp%__lt^XmGy*d3717ZP=ub z)VKN)$J3QH3np3x@KaGq~95 zIeUicIa3X5jQK9lK0ax&c}vY5B78IwIoidyutNp1mv4?^?EIRv zyhamV-HCYJIp<^a!yT&GcRnM=Jxmo?1;OOZOLWPWmIZ<1xY5KJ}&WF~Cf5q`;SM z^X7P8&&+@F?6@sgtPwIk`W}^m|IMvY`ZK(b8eY6-A2}g_iiIyJnvXmJQE=`3(@`6W z$$@}@kK&JV8Kf*7(keyyN%mAzb_4qyh#59J_7SafG}P-XWFQ-n=k$j_RXf1-> zM@`*KoZ-!`3#9mhBFXAle|~g?<&0h)>g7kXqPm7OyTFjy%TD_0Mn4XE^)32BzT-K# z{VDT(hX`?dqjg}_0PjI5(T_rUX#x1-T-bzDtfPUb3;bjbv4#Q!9dgs> ztj!b`pU+%$U5g6H1F8{3IFsUSnM)p@Ov~y8(>hr`{BRe(kYFhYDVh=Sz0gXu`Ph-~ zu{^W6*sTZWXGCBbyJ_+g!;xm8ZKXF5hL46R%3oypWuP*j8O;y~dBCb>{FY2#@x8$s zaBU3)5#Z>~X$j^T3)^P6{zenpqpNlti z7QI*J2xiRW>)PTnM|fCCoxm0Gy~O3oq0u@#Vk4R>)hzkifBJoq5#OVbY#h$!cAQ$~ z10;U9SyCH7pLC~+y}I&i&B>)dM6*2&8XiAPrcqbxgZ5~$f*@p#L(CZ*S5f2V5(qIiZn4-z$?%0cWb?O zA6@(t0nqcBX#b^dU&suV);*IzbDfFys=Omcd0!Is4}b$7;k|S+)fHm&)aZ zopPz+7qEp>-_nZa8b>fOA09CzWXUOy|8c45k@%y1q6n~aiS#|?krO^}GtzYDuK`x8 z+w&~(hKWyXxBCDvbNF64_C?A<8?ZaS0X^fE&QGU+i**>Ht5q}m8eL-P6&|7+eTpuGx7rxNFLTY!33AlD!*TnR zb9K+S4rg}gsm5REG9oF=!uaPqgzzNPS$C3}Ol!U4Mo*09Chv09>b6e0uQ~E^g)2D- zt5i=2WHRB$5d7+%g$~4?h418MIxp_-o4ES&?qZEp|81o_XIEP4Gy93aIC*zdyAJ?5 zrJGNrPjLSMwtgqI_|*lIfZdpx{j{W5%L;U2Dmb>dC@jE%wp93|a zSZuqo#!By*SZ{I>{kbI$fx&y}JIB>9=FEmkdcI-Je=Q?S*8BCYa(8_hbJ6rGpw(B7 ztN#F{Y!26L@&$M1OgW#qI7qnc8{lK*%bi@nxoolLlvAr}#iFBkLi9CjR{F)MKiLN} z*N5|2;-b*7a0)Ci#t^7-U#srIxlZ{SflUqabs4tO@(Nno002wkhn4%J*rF%TeEVVZ zw$~@I7DpcIC0(8BQ@1<^DU6ajoUx%ypS!b@%O~qBK0fbInV^miJWhNFuK02q1!C`Hn6?{sNtH0RzO>e!{Y(r~P;eZoY%ey@vWD<-1*IAVo0>h=D&02-OIl$+EA_j}lska7_w#+s5gbvKkz?d1?=Nn|ND# zo5$Pz1rXjp=uQz_>M0Q)YntOpYfGO-#{9d>EH5l8&-i0*F}&q0Z!bX_|5CC#{neQuwF#z$Xyv0Tdjk$zyje94> zOg5oP_I8^tQTJL7wN=1sgLQYEfNE{3sM&MGLHRghjp8i2s4ivBEYn5vp?>Z7=v4uo zN>7`OveX3dwePCnveFx?Wa z&dG~D4Y~I}P63bBKV*v3&hLjlt9-0&oiMt=9Ml~jC$GRMy(Ct;_*HY7d>VJ? zuvnXVHuJhX`ROI^N%x8J?*727X?OJb>xw|w$zDd`=v|?lx5Mm_^H0uHoDRe<>vfCB zr4NsD`;wXX$cK>kc;++TiqSL(WM1E4PJ?O9oE2G+OQO!L0sQD;$!8KQLs9p@_pI?B zz=|uC&a-w&>QC8kDJA$HK$?kuXLa7s@*gSA!MmLxkLFYpT-2SZBc9ld=+zD9JT1=6y8B{Ax{g%ROAzK#YEXX?R?b* z_fPuI9#Fh_Z7e5T!#8;~YwB=-lX;CSW{G8qvG4nU0c@VBA2_f`Ih*PzS-rbm98crE za(%F2Ddp@v_b_+LMX;_-Q?q(jZ1Pz7y?q#9Fm4X~LiH@}Cxkutf4Dl!t~R)?-3GVf z6n8ID+`SYnTHLKT6c28tI3!4M*W&I@k>F6Egy2Psy9T&V&d2Bd2N@&D-g~WetvR)+ zwMS>a^my({m8SDzN~IBsK;ZkvkiQA+YlPd#j~J8x^SoB&iu_&S=Sx?|L)u94J)#5o z82V(XrVzGcvuGKij-ij5FGp={8$scIh0{d(R#K}lz&~$>U68A5xV5|`8Hg_*t1{)$ z#FV<>*dN|Mi*hc)-RuH9JPz}WP}W{RR4#F6~`E$#@tSdP}x`}o0%ddX}%qXJ_%V-$lrE1^k%x$9#-nR&1W zWd&|P9KbuiP@47`C7fwOj&M=Kwo1NeY@TcG{&t8zD^Tm+V2fgQu zLq_rz_W2so55D!1+Rdsfx;fawI&m1N;HJiqXSDdnOXB|Xpbx4xuAg{f#pxv7lYPyO z7q@GLelH_E8zPss3^%lL9$vpZQj5`so?=q~-j#mD+rU3KhEmkh;{?dL@r%?L+7U#> z3{&_4Kp0xQl%0`rV$vCsB9z;Wmn7%%ZA zihX;5Hjc|Ql>C+~vZ-@CnW{V4HbptU$%x9+_b4>P5Tlu5FKG4GTv8JrrylM&9cNt! z$$DAFIA0tIvK*1GCpI&V1Z}k2n3aEgCQ}ZFP;h2)Je4h(1&;+63-i31t~d-unO-je zMaFP4b3@k?SL?Hq$ZYqMCDC|&ib_8a8Neaj2tSUwYTC%y3(GiKajwnPEntcler4*< zBrl;2Vhc6rB|Ya#u6cT+6#C_|V_?-?)&y|r8~4rd!JxoI6d6H0Nmf#aU)niK^G&1J z-yetNtZFgeY|>PMCIEfkLqu^Re99m_m!NKv{sQtNJJM1R-a{U~Jfzj)`v!9P{l#|S zc}07CHBEy&@%eTj`dOoXd^d}mN}t97l48<-fGHlY8OGJKLwfVuMVkU;@m4+W+kvUT zy!X=NIHf#drHer`L(Z=q(usJM))P8qUViGRRVx`<(v~+vyejMHi&(7rnU9v3Gh9ur z{8}vHPg@ifSzmY>#jJ354kbtT8~GKd}D;i1cisk)%p?1#Z={E@^D` zHf%cBQ7RL2QL9i=HCq&z_dVNZ#t3aVp@fET(^eRg{3IKA^+VZ{5IAn?m~31ix(E5s ziK1X1G>Nse1*;{8)8y-VKUi<|Ru|0;O_o1|9)GsC&gcX26`E)r?(&j*%B@Sr7!pLw zSwYTS)Q~^i;@x42EI{x3Fz|Gqgs9bFX4Z=h-2HFM-nFfGaa zs=-;~QH(r^CM8|V(jK7dnrdelsB3K>)R}e5bFs9V*hYN=X*+4e2YMU$ls1woi{ne& z4*0*WjWy#U%}b$=Ss)%{VRstVgh}u4Z1WhiPI#;ExCjqtMK?}+agg`z;6e&NZF+zI z6TcFH|nd}!^lZCS8#MJjDPWbduUP+o}}X>BSXMJFfapV|f9*JcLgJDxB~ zhEQNq0oGtBR-Lz^S(XqRppz0gRPM=B6EayRg8)O>x)tM%O#^j9mCfgD`5zAz?cgj% z#!dTD%2xy$=`0|`@)tS%CLQ6^n>Qrq+?>Gxo@gG0} zp_g))l3eec&{QmtxojUaox--UJi<-PQBOXDAhurcZyxCy#9R&<^QY{l@)svFaH~Z1 zb;qI@vp+F?N@K2|KOQ6lf5Z)3 z1R)+T5xlS0etQT%2Is)BP(6WFFTMht%v#avn#Q>ufYZZ3o5Mj;UPAf#I6!K{A$YFm zO~HxTCapS~nhuG9g5aojvV#qqFukSs=&oO490r~WB_xxl3><(9`oT#$X(xXaY0Z+6 zLN)RGBe4MyY_e4k!wO__YuL6J(i~fDnQYXdParG_sY!= zOaKGOT`>qiD91~3y5$?sj;>9o!-)Ou+O$*={H3%WwI^hxR{ss2)W|f<#WBdZoUpnU zM!w`@jSvYD0{E?@qo%yrdbV@_Xft+K-$IcKZV4_(8%Km=GuNjceLUk#G{a%hkLtZX z^+3LCdn(a4ZzmUL^js3JtAtM{-Tt*kG|uoWd^|Z`yE6X+w@=GqST)Pa{w6ZbkE3e!yjcuY(Kz4~*qgyNZ zXt5IU@BC2k3!Gni{`K^9ZCI1(PXCK=J(e)zj|vryRvJW?U4 z?2RR78clhhVM6|D9ZZMFeY%yMmyCJB@E~`|&3OeG@GQJJIZ0W}UtGXHjEaTt^px~a zJv#dr`vyW?d2A$fnwvZV?Pg(l<@;Pc{{gJm2a_V9^upe(#eXW6QM{nkv&!yts+P@F ziwYB=JMF)G9_(gK77U%5b`I3evcTkU1i0W+0nFlG$)R)a3~<)+O@Xt@Hccfg+w-wl zPP5PZN5-#k48MBWJbGfk=ir;^m&o3hJF!6d_E%6d_v%Z0T9{g=O5$B&#Eu?jI12z* z_FIBW#Ru6x@aD$8+}GGZFCh&3-QfNc#p!)B>o~eP)=HX0G_!lQTS(pXY!aCLr9|@b zy)la;t*a$R6^yyy{ntiP6m`d5G&9npxQ^zgxNV<%#ekh|w$+XxQt_<1xDo?Xwcvmr zixIrJll;Y#psDq=m6xonF?-KFwYhyOhjP$StsycOfiE>-xLgLv&`Y7sAxVWMdx;BK z>c(x$10H%f3k$V+O_S=(SdrYcZAl)<0JEi~n*16SVm>2DycNii?s%-}mop!XDK%}= z5y?o~@ivlfIxYE}Y{^&Bcnn_f!^1c|*4KjL`u=#S|nKnb> z0Qco+Cl(+Spcr<_{psQildwQKvDUPP%l!rXv{h^)0l&Uv9FWh&d7fZYNKywPpc!;4 zbXHg9zxM*|IatE42s4|tqj)D@q3O%C2?6y#(xw)NnK%5)pIR(-*4o8}XDY!_NqGePk^0xnx1#T4{}7>@>2krValgRnTDui(X$oKz z&l7Ygm{!2!e|-HJPkc}EvqZ2^>uv*OXDv%6&EcJ+ov9jQjekhgw3*fQzzOhrmHVXJR7;BM9j*q+}PXai^ZFLV`j*}p8 z$mhE?{W;geUTlguER}UVilh4}AKgmmtglq+d<1yuAn!yj27@+7Dxc*cMy!&>^( zY_l8b^8L({#NBL*)+D}_ot~d!5*q4e{qXr7SZ}%iqxAB=9noEe-JV(CPk&OQf#17B z(1mrd*9x5q%_hv&K9Z2M4IFB*SuRj^ci(b&Fa;c{Q7h+<_@!Af;D6FSN7mAyuxZx} z!-OGddb=nGel7j-c?|^)FYU0utwe?Bmm&8M%~K6~-Wq(Ti3wq(Qm+qBh)6Emsh?dU zLRHc zw5f*n>2zl&!~?zl7j$1)!W!ayaqZK_QnEB0Su%zagqFdNZqUsj&#;I~HQd;Jw)V|A z)x?{wp9|M!Z2^Fv|H8+_rHWFTB+M2-h0Kd#m-{ib%Q3MAohm|Cv%4In-hC>jy;BHM z$4%_3JV~5v9kUhiWK;2e;!{BCURXtN&o~KU25;Id#LjF{&Rm?lFlqB-U0uQd4`KTV zD@y)`h&bI06Jf3`y(f%J(xrpG*j31 zV^;6|kitg*>%I`wqI=&f^3KF~r$41WwXc3jW?={0zyVpR@b>g&{?XAEBkJcyIX3k+ z5{2z|nb#pUfX`sLmc09BD8X+6@59YGx44!j9DMbV4vHx;lwj!-B#9D4r)uEdQjrP+ zVWbj0yPQbfLD}h}|J1R#*He$A!hfb2P$QUMx?N7jq^Et2xbQ=vsCMX?xDN+}c0C`T z)yjGwi0-zqvw5y{H*G2Wa)q;Q z8HYrbJ1~BwL5kz&*pp3x8{R9n!*}m< zloe^>r34$9d5ZM3%8O>HteL`dbhSUc6T-DLD(QHU_5)NZH$AN|+tE-3_Cry@ADlFi zVSi+=FJ5?g2pDXnT-jlb&;^1@PBgpa!Z)R$zFv1qv$X7NqqnpUO-Cv;2P8rLbyC2O z!1E=`h<99oNS@lkxzRE40g;eAAS6cZ$Jo9tJsB#0GWTq@2lu!RC=qTux|DV0y?*@8wDnh>-v+$He zoBseL4|s*L#iL=XdkKhzogj##9PH6>Hy@Z(Rb1qO_!W4&+6S)E6~4wug9L)}PU&m| zHBUkE-Q$QwGs*cqIhvbtflB#zBxG8ocyR9nvWo{U4FS<1lk9yaDf8yagSUx8Ta`PT zb?-p`0ro?c4nt4+PZ$G*Zv4~kB3dP+blN0L$0eq@oHtob8-CMP;g){K*Oy~`Iz8&k zcunP(QP}u(SKnq-Q<~L!a$_duw~IMH&Q#+dMDxSRr}<;#8(Am7zx|h0+v|1l#GiLuxByQu+k9w_nYP%D57=;b}b=c+D z@hB0^go)iA?9uJ*O7-6rwD%ZO(j(P=^>GYDD|_wgBLJnW*+mkRdiXn>=g zN>2ILR0*n)3;sZ=Mw~|Zy}Pz@Z`$v_LMCzV{1ok2xS`>*J&#+kTZPD4CB|quKNkt; zl(3OrG4?mTGWal^%*3ncBgFF8C2%xUYDEkt`dh7R=>!=_erA2JrpO`!k zVu7b4@H_h(Zn~)MoRY_zpnHU;bNa3ZmZdkGI(C(`7N!B&OPJ7d)tseWJ@ggKDsbBV z&Cv;NzN#1%ej~kiqQqDMmIuGBR6O_<{O}vLkVVDZ^t0O~!1Q}Ia!RCg{ENq)<&30~ zYAsaIeUYYBkb_K_{xKvI-}yF19}HxVXRRmlVvxIZJSfZcXE;Tzk^Z}rpm~1-zRST& z^TXGNjV=GMw-9F>gf!jR99FOPf9Ylo891qTxRzO`_@S4|AaLJ6_+>t7Lu$DIJx6jw z`r~@d364{>kZ*juWBodR`_P+`M0Fj?!_3U%32U#|P2xIQ=koVDf_hEX)`8nTY9aN1 z`X&QyVH-<#ezOB*ET2C+Oe8w2%Lxi$OM*3UcRo9!vUQ^v(cr=LcYTgqy=>@GB`oSuJnZG!ct0VL4p6`VQ0frzg}7bg{6O|i)ArVd;=tsoZ8G(Y#bQ zv4AfhIa>QBqE!*HkM&>v8_>H{KEn7G34Hd+QgoAdd#|=klXR%F{{fub%BKFo)ev%o zk252QzyQ8}844N1jpGn38q9H*-8Fq`XN`&yvW;0x|3qRYe-QRWHg6Zov;6J-+~fyO zu)v|iEiT5Q&HiV&Z*}G%lXnL$T%LBT!m<|43ExRfudk?{8$p`>aw%C3#3p}p z+f4c`ut`OHr?CelQfGk)HD1K~W$fK)U5FGgu84!(&#!l;lTY&`k+36*5D`-A=E9>TQewQO>g|XS z&c!;dGjRo|H2&TaQ*~n?{uaW&4pL$YdgJ`3=^GCzDtEo_Mb+(q>gl-T#vvP~sAbw#d=K9oRYY1A{UPy=0LZAVeT( z;=i$+QwTiBmXV~13lk=l5;sR3!K`JrFmLt4a~&;v^ta|pEkB;KUIge>_sOJ7C=25H zwF5SgBj2AGNhyhzKzffKw~FbVKOJs*k|oFW*r$CBSVHbH3kq<%8FthydKVr zW`1;w%aflx=p@IqRA*=2mbBzaI^we)CjC5ZUh^ zAdbcN+PLF5M=hD5b@j`z3Jqb;(lgDAoYni|X#N9v0`-3^Ch}jE=mQ&iQun#Uv}LuC zq-|~ox0J8n+nf)3R=wnW^@(fXS%3n%_x{>aI{yN?R&R?z$ae`^jovF9IU|ELN`hzw zeayK3{%mePhZ1M`IM~72&wshNHiix%>AD|ifQg-y*sPstHyfz9`3JL8w0fU|*WzB3 zB!q`UP5nA7?SG7Q+LPiSOLmIhZp`w8raU#x@$u=Qo{8*S4g~lM#M0Sub;b(ad!S<% zw<67@OFDYaB%rKdsUoHUx!4Cz-op+-}QYo@`Pp1#vS^i5@-%tfz3{Q9S53}evCu1eh3KW z4ftR+0x}kTL^xDP*uxL}rSbnZJoK`SZ(G~tmgs80W_*BCz+q?0n?A-@%Js=i|B0GP zsFe0Yk3_kTR7T^*0G#clA_!`|dv#@JYi%J?D)>EFUHzH<5t}Nn9a6~}&Q?w|rrs9I z65kU%>`^x<@2P37_!?+45&SODho^C4Vn0xcA?^HHBd{VZIWCG#6RZjF>P5MCA>vyu z=W>9*)gvVNt)_OA(DwE9Q`X1Ld9v-Stg5b`+@6m=7r^se*KNMb8#C$hm}eRR3f5JJ z0mA{xYsid&7}>Xw(U1nU>W$B1+>&hPMYki}CT(zw_HKEBmChxl0>Qzxe|maVI0kdr zD3?9-#{#?z%3K==M5Eg$o1=NduJf7kJTv32K2UQBui(EEZsB*U&`l|O%PDmNm+JaJ ztYEX%8!6RWog=Z@!BLPVCs*>1Y^CJ~c8>#t>&G4PeLH^NGFW}wN`u30+8VF^cHQG^ z2ZSHL`i1Jw^h3~wOE<4pW$A^wS9{X)<++!(@{B|aQ16m&UZFtJFSIz)=Okgc+b!#N z#p5A*O!x7OR84U4@z2i@KZ6r*uOPZxwK2^lj#a{c1O#@{>*pD&4oz}{)P`?u=|Vdb zvKc=di!T_7TiA>sn6A_Ww&`*to{D8ERQ_|U{amJYw(~3w)BF*EL5)6Bl=Fx64^eCQ z(jue75W-V(kT(CzN^kAuylE7In>FQk@I%I4zny)?%0agGO zZO$HNxmbQj#Wf-Bbh|cZXq~SQU3q(Qa~5RL_G91aowhDAs!fO(IYC*B(4W*s(U#RY zovOA)1&ea7Zv|R$i}_f62;PM1rJvi^cE29m`aP30=VOCW z}39pP2PN?}Qa%X#B94X{d4PheG~!*o4cK0B9vZu*+aEB7J{Ttk zAP@_+=6N0;QE^qfg_&`W)rt4Q8W0I5<9mObaLWy%pPO;eODE}%HxQylUsB-hfME_b zfr%@KB`+nboz%ClgEEn6q_4)q$(5@u_yIYiDF9`k&ovMq4fjsUy+g=Yzhx(?s4b(9 z695U2JNV_i12U??>v|jz;-h7~6`I=?LPwZL%reH_*i zYyhpLR;JGr^8H&9j_UcLS}63bH-un9@Sy>X@>kR!*{5x+#UQSB~s79A6nMn}3 z>YoltOUj^O<7F;p#<&1M;PK<5RYD6cg zwQZ9cjQUJL;2jZKlrEgRoe#`Y(Em20^~#t^B;#xt7NZ>}Jxs)2w3-a*u))L1Tb5hz ziM05VV1!XO5ZGW;lJP$Iz{}q^K_Vs@Q&5zuV5|DD4`mh3kRKtFb#kx!GtOa$FQcht zz&y2sw!=yC)tB*@$9^QkW$JW|Ykkg6M|PzG=N12FvZqjIjz!EU$FIXfv(cpiv{9ZTr^N?_gpYu;b62 zcJcR|?WP}x7c`b<>wb35!aZQGgK*J6Aqhj^&#x+E{><0QL_MI@Ws)jM-8KL=ptkW6 z-{Gb0E@vfDxm&4q-lb`3;k}ua0wEm%ENpSW$*NPj&39DIow?J;&`i;s(dYiBk5jkM z*v@*NfW1F$$nJD461}4v{;w_@k2vSZ;sI>(pJ?ELI8c0flw`JB*b)90%P9V+X3^>IFLejDMkxdO}Jo z6mtrRyP9MOB>KZ8I4#sR_eh1T#bd=eLI~yXbI2jo1pcSb?*)g=adgMl@IBqB(ADFF z#pBkrRq5S1w=(LPOfauev)nTu0rHt~yYzWJOCpcC899PlH0~jNDq~?V(%W$Nsjd4{ zb9}JAG8yx6rjqqr6=YD=niiV}I4jWg6kSK`5% zmM01u%g;KevtOo{r5Au;tLeHcoh~xJUliznSW^eA)TA)ic0T>(nufP*drpc@gsSI< zD&7yAis+}5@5MI~>WQh@LW!$_*my|kDODdbhIZGig&gbJBv~&>pKb}&eH$T;b{Ny0 zLJO3WK#$-L@JaEG;XLM-Rd{QkgkrIGHe<)g!uv%Oi`j>u3zQ=HU&9V@2mIBfmScH+*h#cmoj4K8@GGr{EWgFSO;N}E56oWPw< z3LFW}{{z4r{{uXBx_lX>(wHvjBne9)zD?ZP_`~Doq@!Bz1 zoJ-#ZZjs}a9xZtn#17=v2MBGXZGyK)h?EklhT)0#h~PB)CmOcO#>E7r2u46HA?kG> zJ_S)FbW9?}c_+N8p>4tXaS+v^bZPWdb^z-aa%}^Cug?gkUfXl~5Hj8c@v*Ku0|sq~ zUKlaJ&8E7D+wU8YEm4}sl*D!hqF-cAco5@FpRBLABrS?`_#Z$c(qj#ujO?2OJs5J2 zc$a$03R3ni;nyJ(-Qs{C^`^#;m)z~5-}L)#)2<+Z(gNI5C_w#_6L^DkAQ6}29bU;>G}UhMlat;3}wxEdq1HI zbm6bzsc}#r0*|TbhecSGg4NFu7b}=&41A>;e$d6oMpXDKdUYu;Yg=d=G)V2s*CYPh zLg%mKw5Qdimby+~{kjk1eT4$rZwItgSa6*-&bdsLO&+S}FR|k^R`5;!#Oi4&9z$l4 zR%&ot{lSvTi<71X@gNT^k`qBtD|9{H!DIo_e=}h4OZn=00p$AWSyxjop1c+RZ@;f- z1Qp`b+6`!H{Ou8{1d=!N0sPo6tvtkaA6k;zhOc9hXE%_0z>@%xRJV&CZy}r!u4Qt4 zl9%Yh66kE3F?zYq#zDLvDTbdmMs&}ViUu6zRw#0Ns`DoWew}uFp>ckCzHC@8NZFwl1tIb^Z7j#*^*gHQXwsN)p^qmgyccy zJ%vOC(nTkqn@E7zr)GukC?SvWWw2}hRKt0HN(Esplof|s&1yoBl871Xcdr*V!=9q& z*xGwbU?H0d%P^Q{sKH%$`kUeF_vyGqs@A#;-@eTZIz#{gn{-u;56;UO?@rsBQ)`_N zzJ{*N7hA}gscnP*bZv0|tO)<#eA;-wiT4X_RX*p5Mh~`hMJ{6{)lStTNEo2ZuZFGM z+Vy{#4bF2}yxsJ6EC<(jevceSMTBf_&$h_v$~{o4kT`EDAsCmKt3;T8?A_dZ%=DHE zFwr!pyk#coh+luxjtF~krr!N*f=lCFu{ku)Qp-59TNd-5pl>-Wd^xR@J)m=~6*OW8 zOZUIg3-c(0Y><3CYD8xpwsuvo%x(VCqLEVnaff|Gn_{m`*q*q`=5gtn{gQ6o&%5rL0ukwm`HoE()sOxjkM9H;GNN* zS0fr)ej!s(+7c?nEKCtYap0TNe*l!2r{>cYrT zZJAR93@vA1a@aFs`p|vm1es`Te_8q`RKia@*sN5x!F@~i#b*7GDWW1I2!YLDEN&9c zGmb45rK6_a`v)pgX|&)=l&K&kxh%Z=-M+aQLm~PbRbxN*3>|JMfTyDd(pMFctAMT6 ziSehfD^QYa3o6m@6*H(Sp#4}zktLPR6t^`|ejhdIuCe?kec~g}z>OwKg0W<_$Sjd8yw+RcbkX0EbDNgs zKR`F(BJ{2NdhC25u0H;GkRgY7r2ZwH#DF{N2$<+bG{x1`e+2P@2^as$b20qDqr-<5 zjpBtECG|u;+H|+-RJK|x%WvEDU+XV7UKI2r;@ZgO>ATmEzp(^VSBGEbAnaUy$ZpX4 zQ7-Lqx?f5b+?@kj5LwRPO-bEK?Rrpq{O=M7wLVKbLl43k0AMwvuRRgoq*lz~-llW| zPm)vuqAd}*Uuu!x+RV>$GtM?4X!kb8@C$Xo3mCwLT|wH{Wj@X(Ao9kZBbjDXhL)HN}i;fQYCC2bdH3Id$(FR>dXgDsv$60>g4y=sD9qM;6O`*sb}kT*+Z~2KSS#T;h{Z-F z3lLIyv;cJpx3n=!ie?L6f3$)pe){TZk!Mw zoZ1(6wHD)_!92m3z3r<{mV!-uiS}Lo%C*>WUEyr&(^lyW)`_0{>}PlM033^g0t@4T zZglelnOm31v4*jxk1NI%$umRH0xqXPSjwH7Q;&Q0sn3G&m8?GL#g8q~n>aihx18Z*1;@;u#ozBcKS5?dmS9p7NL7vRtPN5P! z{KKV6Tf_f9Y%k4%eWtV06f^+lF1>-=G5k1ol094Fu!kY0_qhvyh_bgaIq!h@1NRp+ zX1!|~w|gk&5YbRpOpgY|E?9Aujx6peR8kGGd(1^dubpLhh9n}Nz#h(swuUSw>x=XJ z!n(c; zXyW(w-{51Lgcg_rI&HP&vsVKJc>v`$s-r&d;U|8OQ@Ro!C$Er|KzvyK{K57&u2J_- zwG5vLNYWN8RV3Mu!J}tK6WR+Kc*`$uUor%YfOi6|fl@5yPk4X)X{F>pSC5c?R-z{Y zE7j65#dmO&b7ne8@((TeqL4K~6)aT#@a<$ooB|wmf)(_BUt`=qH*if!zrSK`l}JJ) z)kGK(KoshMUrusg+6I-$@wug9H|* z^eBeN`1r&=MK%;-Nk>4b*+GRqSN-cd5lNEAVSbO+3aPDUk`IToa^QL}#ZBc2RiN+5 z!97s#Z+r{?d=1Re7N&SJVTkOVc14xcXZW11AnJ-?$xhqxGU2PGqz;mK|H22yWox4S zJ)S?AiQv%JG)<<)ibZ`R`&=Zehj*Y%_P56Q$qgr0kue6=e){i_8f6})ZY+Ls3ROA- z_s1Vhtb9!$6m3X014k?xWI*1|f6R_2;IY&^1fkNFie%7%gdpmMs%7tMeCU0KQTiF2 z{&_rF;|M>Eruc8xv8&J%Um@5x+V4{oQcbycK=w9OxA@(C50nM?aRb#QPc{+hGM-V} z)c{Bk#H70N_m?xNwIFx5I%XTqxaB2?2h;|BfW_r z(G2&=(Ba=Dh{E^}uv7>L5mo5cGsM-vs@k}}f82hF%7Cr+g%S0yANC`ZZF>)+DuzE6 zg9Chqg}e05;U9YQ^Je4k=7a3p`)eOCs~DqY`qMFPA7cM1lgcBtXAEH_$-n($UQovn zFW_8-ktoH-KUgWyl|MhYM)QOim!iU&OzXOx*Dh2BjBfpC#Hf6F3@MrCU8nPAO_8E( zf8k?0wjObcm1KHw^!KKjkUJOwEV_jZ*)#}3iVVtjz8t}yf{mDtf`vA$N zM7H&2zj4GZjmjMfLIc@YJHscB82h}z2wX+_9N(xw*7{^En6|3-_=e=d2oAdR)KjJ^KonQzk{{{hX8@XX7?p>M+uorWZg@u!Yj?M< zpg~eB8a@MEzIl{D7tH>A~~*&M~8YMF8C4`#2k;NLV^$(pecZ|+qbp{>&}Oc4Z%puC37?~i2F(s zht{=YfG|2*HE-;AfdtU%ScpZbDYpkgUuwY<8%}0IUgu0+K;pu(z@+|T)IDNW+bRs5 zqjuLn_~u)(aL4x@zUrt4a|e%|-@Q@) zSbJYXYLP|(%(xlBeTFGj1C-J{w>30{TB^$^A?k>(Q#tdHPZQGobH6EaUD|zy^X_F_ z_IqzfN3uBH6XwRo99q z1$^y5w(CvKV3S`L-oerna>fp1*S61Z%k8_PHt zDjE9C6uZ@zXM_B|NNqDElT(LkPXY>{xOg);Jml$?8kLyospda^2X4{?yo$b8;>Hp6 z^~PCG#qwPP%LH$r{J9GlJtp3pQ=pf{(lPr>=NrmB-s3@)>Sv;;qf|h zO)@6#Xiz@xid3vVT8$3&_#1A~e63DJ@VKnaPC&W4tF`5CBW?QhrlxW4Us66d>+uw* zr0^d9|4@aBVc}G~OL3^6z=qMYuIFhhg>l9)N%j#_8e=)?<+Xshq3Z+2%tRf<$_!QJ zc((kKod_jX*+J&yjz}iZ4WD4f$WzGEwrzBJL9@M4;J&?XQYZhYkkXgs?q#pY@>n9z ztV{wGI>Q((x4=!8fN{1I7HH;pH`lJDf;tw`p;B@}9WES*HZG| zBz-?#)yAU!MktpagVP(yj{%z}p=wT12@4GR&|@gsdGTVDx~7s5lQn0>fc1|rD7w}$ z_Z!lGU(#7^*jmMWKthRBtlP;xMnn;=#xWOhsB;@t)6Y>~x9#k~hRneb#%RbUxB;=j zbIT@ls#f&_d@IE|*n9^a<+glj3F%W0jI?;l>3<1*`3#3iAoXqZdXS%}y9VLqcT9>= z$+Sr*HA`@iOx|Mn&#iZ^3ptqyXqi<&o7y9SnKCrV@u%fy0{gig;WY)f$r$q>F z8b2%R?-V)?9}99-E~#J5{~k`G|i3nh-rG8*WiYv&NPa3Z3BhbTvL4e zo(rXO6Gc0I%lut=gQzYF{+5MjxNb8$6cyTaL(adK-=XsBtxj`lHk3b%1aOwnb z%0Ji>_|=7TJoK*|eYw!mx;1SmNV*QasEj0?8GHXfz#E>`zERuW`V;DNYhZ~T68!!hjqjr4%YL-+S zB|+E*x$hI@ATrQ=JrjO;^%DV3zKj3XP084Z&VTFzFlN4yVF;rLLs`gPd+?wHBx%3l zoc+aX4Pe1>d^<>9DOZsn;5P8E#iW{~WTDYf71eE9{){weqop8Rwnk*=A`_DNmyTJf z;zV-@sd9M?!wxRDp3^rg?>qjqVL^UK$Ry8)yA&-7I}y|!1HOh$;oFxFoWI^@jM@y_ zFvrz#BTPg8(|!FmH-#4cS^yG-F!A|g(%*hnf}m@&Ms;FgyL=NxXlo-qk&S;JeC^1x zEYwo|iJ0i2#Bfbc3$H7_o(TV5N9t%+%DEmulv7>V?J(erlsgVW-bub+*G5OJ+7IfP zc8zG!_d%qPtpf46FC0Ao84Q14M^+}`#iB4WI7{_~-VO_hC+Z#qIZ$Vz^>EC)&8p!X z{rjH2jrb0c6&0E1llyja2_#YgItWldzA3*IW;ZgX(CF??90$$F_kZX}T2w0HPLI7H z=HXYy)%d-h3-@+oBXOiQHXSEy<+|_rnSi&Rz(vTehn_xmknq=@b38$}pYj}Z{8R<& z)>=n^rF)1dYMMF`nn7}7JbD9VE4QbD)nC$M<;1hvS31czL-s1F8*M8uyf`tX00Cr~ zeaWY7ZJ(rr{PK%M_u{5(iAS3R-(k+c^8-Z307);FyF|OB_cGAh>bua97Uu2kU>p;g zo-u~iA-spUo3lT`ASPe<)ejA1mi|evE+T}_K7n*mcF~uk#H#&z&_WxogKKA{p#0U#h+;5-*=vui@d&4?Xpuw zdayva-e67g^p)KzbGapATpiccqjje{ZS(VQDg(MV06l_iL8VU{jVN4YUt7}t;dfsQ zCfEkO?bS0lULal2tWbPEu;w#>a@4z}M+IP~>>om*aIeuBR#N{D5Mt;P2TPmbChjiLoe zr$UO;p_z~{)4NP0jdjUTZN9h}<|jp)Hrh?RAD|a1jAenu*Pr8UH3 zNOPf^+=*LHd@qdQ27HtB4mmEex$!VY49I!kMZ+b7DE9ygRU?$5-@@Cm1)m$daGYP! znLVJS6dr7`Jy16pAd=!%;^?zJB5|Ua?kvEna8XiBu48O+-+;;w0Jo=A_I7O+@aBz{dvYK*XjC779;-gLu9MrpM{;U zTdDBe$^I6alz+Wzrz`#;R|Vx|=2C9;`I60d5n1FtGg|H|acwVrB_~NqaGmA4%OonP z^h*O7hwf*O99k1YA8*PiIE-nF?|)mj7q{N3H*EMEMVo@KbjPwq)83O(Tn0LHsk{88hPz<2|!4jP%dHbk!`zzY) zM@-trY^F&2&q;vVk{M%GaoV^g`cQJ~^#PQwsK1%6>7|yeF>7NnCl z1!ZgM&L?XCik-n`aG&#y*o|YslltkkU}r@}Q}=s^*Z;@WIlssC{n376+fHLlY+H?O zCyi~Zv5hvi)mW3pYODqmG`5}3o%`IMzV{E9nf*Lx&N+Lp_gb&j+k(>|Z`51{Q~M;N z&}?)yDrO6(4JEq-=V#IO9;C+@+mSKdcEQJclXV0q@L9cz^^bW5HJ`Kx&%#&!Yk8i2Mgm}J_@ zg&kx!PGKkLiw4buMq%6GIg+%5Td3#65jSkKcVjoz=u&Gi@X!i*EJ6kxdkdwk#-r-PwU;5pYHrwY)eOmy>_1_kSk&pYNgG#Z@kzQ9~silp< z8gVj@UNhIvBi2}D4jB$*p)U_?CCTNM=a*IiVizV>Ln#a&Q!$FLK{EB8S4xU{rG2h! z6y=;jsjL<>xrZ`IGE0b?sf&vgkpU71f$3q=DF4(lf}}RX#Md< zH$e2j#)D?WYm1pAlZjo3&<^X`aI|7r$cgv%W3u@+aW>UDUwmB{vF6|4@yfWdkW}|t z`EYiz^vY}aC36aMJZs2F&LhfgwQUdXVT17WQrjJZ1a@DwE&1U#Qo$JwksTd4U)~3P z1yuTw#zc2OnW8-o)vRr}ePBmu* zKSsaCpi$W?$blSRye``i%+qwXn4a9qrFA4^Y=ffBvsO~$tt6L%hG?O+N@j@n+70-N zv-NB?$~YDssa$8ll>PhO{`e}SIXM<&)JX}CYyY_Mw2IAnS_R|GFYNZ}Ihb<-75IwY z*|Z5Sl!U_2dcsaY~wO1q{(OYr?U+B!?z9#7*~-|DJDCW89b z9b`{x4I3La_t)`~WOns`upjbYgeGYmdAHlirEZ3E97g-m@y`P82*o_;w{`a2r1oF; z!#Oj*y+yc73`EiC0hn+q&gvs{JILSN5+zpJ_Kq7RJntnRqo432<{1&@)Ha$^#03b-d{Uso8L+Szv_WqnoW*^Y5--2#|8R9a2(xCA5VcG;1sAZ;0A2b3 zCL1?*Ah=>PqO|eu(t|jrv_M3nm{u3V`;SjOn&uN?sjy>-;#h{A4j-vmg|(y~pU)sx zLg+Cs4FQCZlTlie7^hj)t|rpmM7U}NwXqNalD(}>W%A;8swYGGdk})6z%NR6NGYVp zQ4*681$oH^CW0traxoZQ3eAhp!C7L5_{R$x$IWD57SOe%ZKtw>sdYDZ6V^IG4Bm!5 zUm?{KkN!GdXBwtG`u^^wPi;-StetbEJ@)$=ps%a9d2%fK+zsoA5Hvc~Mye${-1ux& z5AF>1J;X(?EWJe7dNo5FL_ceOAsMDiW6W@}fQZ=Yu#wPkK!--cl7@ZlBDz@vzD^K( zG2#s-`clZufp4N<{~ada75gQH7y3{K!>~*WZF# zNK9BZ_Rux9JkoRR-4!0lrV`%nO={6Ri74jdDnYt_I|3_g@42?p5cClD-F#1G-6el! z8!FKmjxQ=YKTr(SyI#(aWq$7(4pl*!QTm!){zP9k;-419Vp1m}ZT4XE_W7mA^NvQc z|95>+CV~i!Rt}fmAW>6PciRx?P|r&wGRXMDP%_;VJ*W95Y%577&9DLaY)Xw}et8u# z&W8$0%?#vwb_%Wf$JDlUa}Yj43?)?=JquWoJoQcVI(gnZVHBT4GJtgIwgUzkJ6M8C zYH?F#(~6cafgZ6$p+h4xe(SgVhY&N{(8y7!xOBZzocOWJB0@|p{JQWlscBF5Ey;~f zzhKl<)8QlEN$4)HCa%gG8Z-hS>Q=bRRs8-~`@*EfKwvqR!~k-zwet^MfjYd#)ZuoO zN#oZL;IA29>`yDR7j_j{jzH9cR_AFP3%LjNq`wO?`vbOupFo}q>g8sH$cqNS{0Et^ zz$C9?K5yQ4-Z3E58nv^^w#~|?FTH03NM~uSKL?G?x?;dyh)ku#P%ImeZt*?-q~n2q z(xRrkG)2~Q{O-f8{=CNS@keuxv#C&)i|P$-aL$A+^7D{>&(Y8St*=m_&B*Z6Q~+Qc zg~w8El_)@&0oyzb&E#t?l%4w2NN!?$50VGFoA9J?fEcj1%fGra&)sIn8rJq{0BukM zEIP8Vac5uNEcEgxgo2-jnPr`ZbOEM>&^>V1ed#8i&Q0)ENIRAdsx2+71C|5}Xq_5} z%^?(16tb@fuOicLP8Y+*Rn{`+{{d`TF~4znK2Qs+%tzun;wQlGTed&H@MucA)%+~M zGVQ@-NjbJj*;P>wYd}$xiOUpf}o|7t_O&xlwY_RP{K5 z6L}mxvSn<5%*{OVQi`ZnjYF=kTrR(Aa=U|+p7C=3HpGs#o(W!sb!CMf8mmu zx9Gn;DlHtebd!E<4XU_{TEIvE29tqE-4$LMzL)5PI>0p?BaiwZSvVR_*0;75fXusy z`S04r6Ac7&Jps|?D=#1t6MJL#Zd0JD;fL#S!YDF7OfW_RtX3fj1KpY0T6(-vn0`rb z*))K@hwcM%M2r&C39&K#<}Q+ZE^IP)a4^kl**Z~0Fa63OCQJ*V}aVj z7jg=`uq+xo4mq#i*s3i&Ctp5C-L_RoKi7#9{waa$BeCu6f{DK({x9!D?*&MJc+yaZ zI>S{%$90uh;HP^0R)x2UDbVl#lm5fjqX#H32+=2 zJ{)(Xa`m0Lum8cbH?UAUrB0sGQT!~q4gxe&nGb{jxQU;cRrrHvL#N}yY;PIpS}_%xnO#Ki9~y!%>tgclHX~g z>^LI*iz9!BruVc+e6&WxqdJ>28>F*dRJmvUwbM_dK&nRp6DyM$70K#fj-$mf zNrT)CA`gQjx&3$;ER~;(LfKt45brx{oC2T_qhu2_@j5^Kdo=-4NCC~gVfE>s`bpyu z4fRs-ax*AGk6tV)*>C`-K5J@spPleMgw5xl8?Nn@l$mY+0iI&S0`|F?I--Y*RhEKD z_bpq#P^e#|A*Tcgvx2lgzOf{6jl**2y<0|#1o8f_?yiS8a;zcG4R@uN6_W$nBEh;ebZM-uy< zaq6ZKJU7u+&Z_u>cC28ENML*hBw{gn^HK+Ogu?b9#)2KL&vkK|ggEfmDRG42=N$qfFG?=By$gQ|@JX0rSthlokS@tvL!RUMN0 z(b?4zu2LHw;}~679keVXF1$m5<@GigI4jC`NZ{b2a_a;}+-cHx!KQ+Gj}+(*jML$*|-^q!LMWk88% z(-2NvP-FXOKlU}V(UY9pxubtO2d<~7wnAC3du|Y)`MPw`QDzMcj%PKl&)G8ObmXJ9 z|McZ{?Z&Qj5qbP@g~FUCCnpSrdfDq+yV);sZ=(bAdA?8?hfwk0a{0PiUwycnN|QOr9+?5Yt~Fv zfu>{hu=@D&kmPKH475CSDdvh!GX2z(FZ+Ztu}@2xW7(7USi~PF+g&ER4F&R^Wboaf z0lySWKO{&hcRQF>42PnB7`IN|^LC@2EzS+U>hPzbH?TXT0w7<~Rt?wxK%Qga`29wj zY##TDIVOB3h~{1kE#N?b0Wu?dG-Sp2)t7d%D3JY)Kr3Km z)QZTHoSxcYXZv~S(OT+FVV#BqC4cHtm51`vSDY!->((&}=4lmk@uFsn+>3efC9XsB zNG(K%i#KOgqgn_?OE?MI9pN%e9DtUxq z+y6$BR)4t7rm<1kx%2o@s7}4^o}luk4q>=kHawU}D$u+*NjbCkrtz~_31QlUDU7*-8!tsMh{bQm?#UJ6+1CfFmIgZ#;`#cvCs;AIUeIiQ>zO_(q z{tq4An!it@%nBw`%<8Wn13z*^Xk+)6#}`|8uk_U0`yl(|!2Of46C{zCN5fNnE1L`S z4gHob*EDV?Y#%G(Nf6o6FHay&%@_iO#LQvKI%?he^!W+ADGP}ru)(v=wqy!t4uCYa zXIduad{-!MvN(IeYRU3ojajDe%M%+Ali^CpbP5;GyZ>BU!Mx+>p3Q&Pf_JUw?B>B# zdOZqmojROuQGCQGE4M=k_y7Fhz#P@rwjwcspv><-E<)vLk`+tWOTWSoM?k`gWZME? z#Os-W`H+h3N4I>(s1-3pW33{c;;sQsv5+Jv8t>7Z_WOVBJ2#ea3X*V5Q-(iq9bdi9 z;(Nrr4P?-%=qB&_mS66l%>Yfc_zDo9&T8Sn!`Ql5%Zk5nhs&GX+G&+&8>~QEix*sC z+&NS2g-;6xE?VC6vvi&+2CZ%S{L}vR*y2EIK+b1cYna+-tG80EGdMzD0QVHPV+bNnT7?Uv|@fujD-Vd055%J^y|`FPiH~&t>ZImv z%Xh3pXK;RE_hNBf$q%thgu=4oS3LE2pqozYULqOp32*R|(EZ5I>H7MNQzoKV>Yo%n zK8uele?V+09}q_gzPKu?e?Bm*Sw!F@Sn5d+SO9y?Adbi+E`%;IVECDn4>hlj*<#mT z>aALVwxb;@Z-a1EEc!}93ZM=`b0(31xMWb?*9@8>M`z>F{LRP`>Y^$%W3j)P`{CQ(1m|}8|CfcRz%lq zTazhOQI|XyHod@n3I6nzHn5I5!Ptj- z7e8G3m0f}hlZS9u>M1w%q0irHp=RK=Uk%4SPJ#pOf7fR&Hbgb>$!?4X_n(s)suB2a zMpQfr;l!@+eC*UZI@jS$@WZ9SCz#UIU%b7$YQ=X4x$!W-GHRMO|3MwhA^O#{0_BP^ zV4;ct4o7W9CR<5s$S&xC1PclPu$=k2lWB6fC#wl=+KB;+#qxcP-#)Iw+a-2AWU&*z z0(>#9Zr>ruR`}BDWoa`65#@OL8xUK$6)~B3X)~G1;m78F8s^`+zg;DMqJ;%WOvg+@ z9ZL*VBql2e`-@XpgkB0X=2o=THOG9A7DlEUBHcbG=1%Al^n6j@a4H>+8fYBLj)v|3 z04MR|(1x8~jP!X7{rvZS0uqdWT$;QEKZ2lLV`_F)YDqlWH{Nd+5RFs+T}SiO%#s)U z+?mBs#)h+y2Oxu|o=!qVwQBOc0V|%j5(A4+`d{Qztyw{nvsEX!xa0%E`oyd^JQXRU zkmh6ZOufiZCf{5G4tn+G2l5|SdaaGYEkcrU3sC!-tM#v`vpoQwOdSgoV`k}mp}QC- zKn6CYOOL%WS?5EY{;fAny2h8)CJ9RgFc|bM%dpOusDKX}iWYn7`vB5pm$;#h(B6X{ zynSaoisQ;u?*Cj365%^4bRA5B7CxJN&CGv#LmgGql zhkC!3Y-vKjYJZRoeX^J!4U?Ug6MpzK+PSVtBaw`#h$`^8dkN+T*vfi|11JKUPC`hf zzv2>=XyBra_6>gc?@Ny$XMd|O_{16A+_8W{4W=tM6S(^iU^)v$r1dQ>tOyQFu(%)Z zV=m!6Uxl(b;u06bsAEkR0Z`5Q-OLDP5ZTlIm{V2!>_mZ;bpU8%Z}_3R9Ndn0vbKw7 zkg6dh3I7KOv{(SNH7Tz7K57Ph4hXa6J=jSz)wbes!$b9-iGK^y@x6Q6F{8EB)e#qv zxC135jr@?n_c|p*I2naq_$Jz1lx=*%_wjPA0*I2n(uvV-;a_{TIF)J)yANkbDjWzY zz>jTHTh)=3MZyN1nX%_AKnNr~1nGdV(@BGOBWT5msEbpcnPav@8e zF4f2wTm_$N!Ll}(f=a4=$RnD}gkQF2m7AxsEn=yr4TCZg`+Xgx#wYpg$&|N(-p5X%wA+I;~C)SEAJU z(g}^peF!s1yA2GWyXO+A$yi~=c0*DtW^Wg2sha}4Ewx`+!+ z*d`{Wz(^Hah0;U;Fc>tqejBUbQ^GM=aA;x++1hOvkKe)+uOGow9azQOI;I}}T7Xm> z%GYYf_A4k=tH`EEP2iK~Hx`_=ATFBaaJz5)@jDyw=S;DSZ2+enzGzoAAfA+e)Y1G` zre;|+x9?fU<^PARXW~q2X(_$TQ-rp^!5Z$qS0HaH^lhYmGUg!Ql-&)qI3_-t!t=}5 zO0?412-DTeHOJU$R)Arb=tSLktHS`|(5Fp5CV7iaO&$bHcsQqw5Nn*KN#d!cw`DcB z@@RHxxkmad_Pr(_B$otXc*Al{vT_-FQR^8@N$=p5eW_-Ji+?V~(5&hgWRi`9YK z+>P~n)Xx{aK(12z)(wF| z^;)#2>;z}F1^Ws5$rp6yB$xeZYH>Q2)sZ4s*ewdTiK4atxC^M6c}T}0)wmrB75=gw>upI5^PGAii3 zsO|*ROr_4QTlY@9UF0G)C3UZR9#Ke>KI*5h)9ylfbA}`0J1NvHp>^9N;5Z?Z{px-6 zj2|2_<~L0Jr*P?pIC?2Oa00HFd^}C`AF`q?a3<_8YL}6ECY5BmRgI#ezO@dQN!i}q zIP!E8cTajB+k|@gu~z0}a*pDB&G^C8xZ+8ASc)>ClrENoT_1D&P$S!V@hZ^)EtreR z_Xr1&Tgj}i`uxf=sN>wlF5oMp)#O2N>vkD3YNZh{bP-d4x{+{vDr(IcJMm%^yohpS z3C%v8CvK)dCfE3TfD~)1L;1|hu-0mL__J}Vc2G3uLf8*HODH<{&3YyPYfT`w!p|zb zm2*^qhNiW{vpbePP5B2uxF7!cPRm+AfJf`bJ}-bGLo{26k1_34{$jI1hYQz*+v8Rq zTY*^q!e%BnIqS_SndN~d1^ukYg|bwEdiM2oe=A7X+wq0147g(NgoYo$2LFw{-4+f@ z)2XZT$E=0O+BKEgk%@+~;-Ch<2pf66oSg31Z-ZLM;}+FV=ybY9t+eu;7Jc*PmXPXz za8z`!hS-76ME9DIwHg|F$}I^$4Dj&I42M#$p^wo9H9v==xKu)ITd zzG~N!-%Rwz*+r=@yYXRxGe#chw@dH7)P3e`BkUSW8&Zeax0;Z**VT|O_otb2kX@;^PSIGNWPJz6xBIC< zBj=(?ed5W_d?b$0NFZ?nmc&vbF*)T3`;54v;rtkY3={qEun$NGI;G!Sl>Xavo!1Z& zxc@VJnrcZK>RD|w#JIUQXR2p-JyX*?{BIqAk3M4YCo)6g6k)=X1s}~1042WMRO20p zmTADBc1e%=Jn-D(Xr&qJ39N_uPaPjQ@%W~r^3-+Pl4UG&hqevlyX^Bp>C##$&*^He?EF`t4K|c@56yox7BOL>j^TXAjI{KHG&tnWm)4{T%7`k(@1wZ zeM$Pk9ZDTzrUiZt8!R1fG<8F`87XApt#`rHX_6go2#;gm>tFB8_ITnV#4`3 z4L}Bu!fZD7r?PfE`Rg4P!%>+O}gj1 zA4<A1_xi}QH$p=PXEO?^z;Ch5RQkw>*gG^-2 z>7bnKTvYt7C@DC|mG{!ffnI&f@%9Kx0-Gz`3mPrO8^Sn}DwoU`R7QKGQQu6xJc4KE z3Yx{n+E-iF;?Rh+bh1hz_>ENa{Q1HURU_Ehepf`2-}Pb?Wh zE{TDT*-oZ*86pGP;Y90`&aX9Q-+@>`+tcd3p~X$x&B*fK&`8ib1{=4`ehXlW7p;4} zgP>~^4^{r^b zEGL9r4z56^z(z_vujSMo%Rsl!9D%TSkljw@#=UoI?%;O0I=h|V!eis*@^CP~F_C>>lD|&L>@HEdB z`E`gA)4G){g+h=a$v>!(!lD0_$~56+Z9OAb^5Vk9f+L_a>5PUt{G&$uy4T(XMmJy7 zrb({C>c{C*c;8*cE?46UaG4sI)f8c0;*Yjsvj{5~i!-^fI=|R1uWYV?3uglZ#J-89 zy-78-G(WokyVX1}XFV^^rJ;?b%Nt*7`Xmc&G_s%*kC_dNw{NGsGBITaCEG~jDJD)9 z_P*WCBuDeNMvj07btm;|ahF3=u^ie@DZA@jN;&I_;_Mr^6SSu5H5hBqXB!>4G z(Co`++aduC!90T@Cl)cEZMtrR z;kHg#va<$o0Y^@P^0{_k`DsP| z94v3%*^!weRc0tQ`6bW9)|)sCYsa(? z5e$z2Swt{(`G3)@LjOjp@eNF)-1B2B7Rm-M2%CT&aa~9SBFMjTUs3tvmV_4siX%IH zUJIeaLn0oS-n$n>RxGKnVd7NLoxv{#ky+`+SpMyZ0wfm$qrovB{pUX`AshAUF~>vB zt%3j%MJet@wV%QXGvffPDkpE3x&67L{+yt+i3!3<(v<*p-g&8Vo!++@zToV{MU3cq zytPU!kNBUXy{Gwo{uKcMJ&*jKd{E`pW5&(jHm7X_td*=Y{X_dxoZ1AZ@P+f@ik3tQS zJFiBC!9&LmOVgTKnj+0&PK7gd1xbCs9TD)8zS@@vR#4Bn{FJlYCo&pL6!+~JBPmHS zbQc(^`kN)96f)eZ!+@^vuxZxUm4BYwqZ2r5KDK3bm~QHRT2yTLB(C9E{}R}gabC2f z?DMWW_tuX9P86-$Tjs#|Q9jS5&A~;4M(BrEs81=oD+OS1WWWILCgs>(MMPrbB}Sz; zZzR6#pu#J(s!Z{P5>7Drh>8LW!74ewT#BQLWyM{h)*c$gl>GcL2g$w4G6td^zYajO zBVj442@men$v7({#uF*~RvQO3zYS4(_)Im88XV5$&^ju>{y83SUTu8^a2>4d3RF0Y~xw*B`diFNh@ z&dcmSfc6{;>z@+6wy+?_Wxst<*8+k#`~||WFQUvn2nS{fQV!)NkYgM3`pM?=fD>&h z487;ML<$i_xyu`P5s!GAo_6x-J97ghgmFg^h0ct&0rLl@{#)vnTAhbRcrVOH2ntAtS0SPdBuDEDot)-0etR}B0Jmap zkuE=t^kq%m`13<|c!faRAYZn8&P0}Uu5V2J*)7)h4v}TZmcCabFj=euP`(>{ow2mM zN3d$%^|Z|hRNe%JKEq^CUf85c%~6aI5P2tlHfTAc5dQ}dqv}Tpc^I_fRD>>L+T$i} zV?6We*tMB6gPwolBvL1_bjJolCT?Cwcc9-sz^2~ht$7pH(T%s>YaL@&&e$0VAk;h9qiK)#h&K$p^e!(T^s( z+uY4=^fyI1XYN1L;fSd|rPu9vv*hnA$jVI7UJJ~Z43LgNIdYTN!P}Rl7mINFRLG<7~+09e6;DNPT zs|;mAmym)SNbZHVF^ho-pE-vdvqZqsDU_#ERGO@}dj~p=F3U%6NT&_gHdHKn_6mFy zE=QR-uB#5yxJ9uGRt;^9;=+KoHcMwlk=pe2AWr(_A+EP!=7@ZZ2u6yDtMq{_g6Z}2 z>3gR>YTF^gV6@io@hn0D7ndH(!dEyBT7>FZY@q*=DP&^J7?-2ap%|8_y^F&V!Y~+S|3H-wd zhDOhS8}gJlhP+9Wp1G_yx=8Fb%5=4aU@o5?M(9GDc<^eU888vgqyyQJg{j03{tFLw85y_q*)tE24ODowhMXwkZ(T$BBwz=fcb4r4e#yQD{`EW!$t~5KI;eYYt`7 zwpH+4E44IZYplE*oKZmPzvPUxHM#X{3WoJdU|O_Oqc9_lw#NNE5c=NEPntV!y+r+` zot*wy_|ufbXou9I3bMW^ar7ppSYaK4)yFBC5MO8>-K~vBSB|_Z;~EV+M-Dub@MqGy z*!-R-X9owAfaxn=%)6RgtWBa)<0C z{ARdxJ)!`#bBH-4B#Q`xgeofpnhd`C^ATiZ%>(C|Qr&>8)==0Qs+zrAj&{vA-=9FH zy#rSBVv=gxK8*Sg0L>wL8PA@sh6P44hP$`|AzB+7mHK+7l+-?QNTo@hLs3?*Id$+| z51CLFcXhx~7YjVLCr8pKf zewAo*o7Z4px}2W+Da^V+#Q>L%p29I0OGsA;{=vuzz5S;4-JDD4(X`+pzOE$jFSh){ z+RKitIZc$Dw}lt+zx2~!eoO5vov#~lwGE;?p$&iJX-*J+%U~UJoP^F)jsEQrS_TiH zN+0z&(f6AD$RfF0L?PVlhEp_Hk6OJM(zo3zBi1!0Z%6#>L}J;-zReTlr#Cl5#qB-; zTR;&vI=ktwK5s#XOY()!lFlEI5w;lr2`zP(d4V8$^!R+NWPhOJzk&&q+gJ`d)wN`?>Y58P+pEgk6GN5&mhll}T^0WV9A9$o3j%KAH zU<0&tzpo7#H=RCKDA}{D39v9NEz`WMstHY7gUS`@ee-PPf&)C>?gq8fuMAK6E`S;L z6n!^jI@Jvx`F)EV|KfUZjS9Gr3O%R&l(-AU)(2r@*NCv&krOqsT~>QX4lne+8;_qG zC3_;Op{8SQT!sv9yjn6T)kYx~+Q}6^?B9aCrj4K#0SqX`M^dvD!ne4OvooTNq<@pv zUuqugASG3`;~|~5k|Oa1w?QL!pLBE(aQ+w05I3ucsO9p4p!2)l%S4r`ZQX8+~jN3Xv}`&5-X{KTt2yLt~< z#SNaDC}}UWX`$7*hqf6oBs*LaFPRK0-*nmA&d>n`3g7rB11dT!t$400$3uh#kOOQE z3pIbJZ;0`jREG4r(-j8~ju*o?sfw_E1TSI+NJ1MG+T21t7~0!CLbirPAMCk$^D#g~ z#ifV#?lu2_+6Mj#OL|my4(rzKaU*Crw~nEN|M}9=g1HH%rGnXigJvv4hr+rGZ?meR zov}ii3`p`H`Qu_5iYg2^ZJPz>%*;sP7&s1}WXUF_TVXUUfp#Ye!n`0ACppLWW&e8Nt$ZXDjMgrAZR%bfyXENa1Mmk2 z$G6+xnpEEC66KPTkpdAn5DxcLxtX;k$5Sz)-cynmSbq&~`Q&B4^gHTsU0M;Ie+JAx zz)?i_y|#fgQl#}xwR)xs@*|C!L<#@C7Im0=dr;|Q;V2u7Zzn?xK)nb9=La%eZxDsE zg6B=)m_m?qp1i{sg-h6RBka>6u={49YIqjA^vRAQG=1xIGk^5oLpR^vpw7AT;-3-rO1+~#bAB(bAP~FQ<;tI?L@VAxmypk z{RdOnn96$EYk5pA9ZeR;r9_J;#nk^?#aj6z&a`}rO?^xPHEN?(EnYJkaL^NEJeA$% zZvSUKKrHo}#NNUpGGG6mN)F7Q=206~OHO&LK|u&ey2+Kx6C(X4a6v8yijFwCOx`j6EH5Pz6MC&i$579km3i-lUI{@qW0j zdS#(jvN2a=P)Xuw-|aNAnVuwj8dhuvnhdPY(DrEkjg9kvI}Dr>Mr?V?@HB81yl)b5 zNKibeh@7YPJ>Y2OX;{ZT*4yhBT6wm|o=z^;`}@~U7zLcXy@s6?(+w@q|7yE4|jm*?K=SJOMw>IN(zWA6CJo;u@ISg{*;74LHx8O#3_tD7?+v zA%+2)bh@yn$u0u@Q|Q8XC1BG%1-4Jmcm384oF7QKlhQJSpN1A!on7I7N|kXc-4d~= zQWiB*FTq?2-ScxBAIooPx+0o^pqd(k$OhgUl~^TZO4;%<6=h8%%KqkUNKtrw_9w8~ z`&yx1F}0AHMYnqMQD6scmC*Ilb8dpx-m&8bi!-}O`_WV*p@|FON1dMjE; ziqD(qu0B!2dHk=XB*N+(;~mCjh_p2fvh_EFs&bkD`1T%OUDmd+W@d4vycFZ~xqIry zu?k|dg^3y)jv?k0V(At{h|e7XUxjc#$l6lxUXaD!035>Z|Dpp;Ek6@&jL^#q#6;8M zi|DSl6RHlyFgq0k{VNC({$Rt|9B`MSqDFSF+d?We3D%=x!t|u~J=@V3D#zq~^0)-~ z1XMN2kh3E-gvPHGh-`U}6cbNY`cu=1)3{4u#gi1~Z0yF*2y#R1vneTsXP`B_`Z)&k z*L;=49{+@f4U?%R?)#JrxCNnqlm9FII_A5k(R~A~hx)I?Sn$psGZZSdJJfAWqssC#T@?onCnj zPj1!ndB-Dtd-5PncC~8jIre^IwgLm!F_rOqq#Y?f>^bw+&BoRhsO8Y&?rLTx1}-HqU`nW zHU42_fgxY9QM#n~F@RNMe^m*K;c-ny@YMql90-<4Fx==OmbPbw|n zwWgBnw)_S>IM3Udy&jFP{hmsQn*rVaiz#w`V79SgGf8mt}~cg;~YMMuPDqX%J&`TB9JCq&0{!TgVh zP#V_bG*~<6NU9uP1(Jwk-;o6MW~OsHir;?p&izrHV=_3l+E%M1#DY*Fyx@adezH+V zTniej_QO~*k>$`@-X;NK#~;L>1g)Qp zh;~}Z%HF{`QH;v*j1(013{Q~iJ`q&&@`=hhwetgHV|3V!>)#E0kW0EhU(L|9%ap%H zC0;VbLwI@;WoVdj!p&%M7Q8&;1Iqorq~qg>TNk81&iNMn8nXC6a0(2>%a$ak2p(-@ zP1r5A*T~v1K}I}*XHToB+`-U9PwpsSQ}rbOMr_uaTIkrrf#4h{$r!AjWrFeUHH%%` zhO2$!W7A=X-dn2_$@x&UQZe8|*n(KCbS&N%2{v&hNCqbC@#UDJ$$~bnM+~f1Cg=0{ zb^%!nfpxEe*TX52mqaKhi@T^jVQ>7qSqg1W&4qXQ z_28-`b4yE?nvosBNjq`?570+JL$NQaXNN)w8(3h$!bw$=!t3mF7HY^MYLyj&sV(fX z(=(J38*a;anrs2%Eanqmq0gOb#M_aUmT`_z0klluByDf;rmcw@D+~sZErrH$fVUbD z3MqOZk|?g_$G)1sS`Ay!wN8?Bnkjxo^0UmPYAV6AMKL|cn1EXuZ{!u)JO#QR2!}fB ziZ*bVaT~J?r(`nkFu7+H_X7P-9@axFRh`Fp2+ea@~pgX4k%UA==9RZX7ow5$fY` zmBr4pOE8A!`;9(6|FFBys-e?dNA?xuv)$6p8_oFso@9aNdyBvtbp79eaL_kcv{wj% z-yoRR;N<64sxLnGJUzSFTJNQOFrF9s$Xt0G<(El7+ z-C#2H3u6?(luc1h5XzX{#-q&Om?jgN=t(g!HcXxTy8Em|EzX=)xmj=0N|S#{UhgHg zwM?^=Zjw{>6u7;GB=krD@X8>d0%tX@Kfo7e32Z8iN@q#WU7I5fS0!lQCpE3J@$yP4 zI*aSiO~t=GM`BH)onITAQI)`>ZMpl_iro_PGyj2gNm&i= zX0WF$5j00eWe6L01$^iiDc9%y`kl(JmDV}^p_g*Wn_2AJOa zo{CwvzlW!FnIOqcCD8GqSvL;U$9F|NZZTh>R0 zQQg{f(ze&tRhjn9Ngh;XOdY2Zu~;|@*ucv{{O(p2B|u}9 z-r27@wi1JofGm)|hg|-(%wkHPz-w33G9=~*RxX7M<%?cL3NREJsW{+j6tGBV*oTB( z%tZDj442d+Nc6)oux~9UN^gkspyeMI3~oa5Z?e#n8IGNMQN7-24=i~}=$6jZoHRe7 zwcG9&zrA%>^(+NyC}Pn^HIz(P0AF|CA4nFO9V*kknlg2lwKY!pJQ~KP(fhOa0dzZe zI)jifNgWEkp^sEHPtgz0Z80^x$TY3wob>yj5J$1>C%p@#ZwIwAia>$lcM@!ZY2Ic4 z=;JOFx|T|Us}`6{Qt3qoK*s1t8sG@lv2W5&-AV4g9QrP91OPO_y#vl-eWsbYhL}gH zm?-BVW`~!#UJd=8`A3!1(oH#s7TJHE;KjvR!0#IQuL!9D3NfVRB`AFUoj97A~ zzo@y_85BIsF)?htlI(}D2^0I#&6ctYS`~%kCFm(GK+zY`+)W7WaFYJ2rp~~TPLRr! z+M4?hKznsCTd)T8G-$p`J+`YU8&(gO^jR_9dU6m&tBkrx3&+P`ZB{10D|5g7sojo0 zE7a~d^Odv)x?5Pbv+ZHS^5`fYI$eCd)uu;qEx%{1hBpWps7ks6w90L{{@Eg^V5>l5 zzdHFYP7%e%`&DU*GFdk1X&J=Ql8?91+Y9?$#M9Yx=L`Wstmfn_ z4*p)pJ(R9c|MwB+IP`N!5sf=}`vnA8LNcYo0H4WK{eBr$!0l!(84uV#AD92`MKIAg z79@6)Z!A@`JJpv=|fC}NQzJN7Q6f->kj40|GlmiN#%nwOP} z^Ay+NTf3VeFN(h#jrz)HtwtvoM0Xl%jNq>D6xPuIkc5v-B$*;Yl+^T|{}Aqm6lj;P z607E>_qr>{ZMcE9U(QZ^eONhfv#_SN%FJ@%3*RTp&4NWgnz^hC=uQ53+0H3fydbVt z%&P7}RBmLd(d$Zg9$-ZbENcq-MuD)I%jiF+I3GJ!D`E9e8dp@=Ub;`q!YowF6zGU= z=1LjzCS~#au5x2gX}oBNP#FT~b+Qk#$wJ7VFoQ5bP578_RvEIiuBeuZ5p(n8MO$8A zJX1m~JDq)%+lftRhPTn36;@&3HgckLZGSSr$ij`(0`82X6Y;ja!N{>srPzXoM z>kw~p4vUeJ5*J$C0VWSWaKsE3@unLdHVI}PR-JYLTISU}l~TYwFH-%doKp0GRY0=X zD#}D|VXry+(#MV>S}S?>^~-)q`aqF2OfPp!Vo$4%L-A1p++T$lEW4Oux8k#~#gT)9 zxUn3GZ+~j3{cEX<9|%!f*|m}au+V;m`rf`=q9gyGimo~=imr<WruA=A=*0_oPjAg$$#pt{Nm*+t_{VP1!SKwbPZW3d3rUou5p) zMr85iCh5kSPeoSc=YH{emI!^q?+>x4c}yS5dwtA6t`~lo)a8aLu4ndyL~O?q$*~C4Z6Q>y88cC9Yv&fV!jLG3 zJn?dzyigM(3fifei=k2+@*I-k{-G1!p<<3--}2!_6(b#8XP}o~SJMU&ezL;SA%@?_ zhKSJTMwPc3`!<9#1F&|G{_YWWLn$1*$tbc~ci#^Q z8R^he-gHR&wOw9_$e%MxU>nV;q}p<9pZ5IiTfaU3DYi!hL8?U!nZUeFF8C&2A)_f1 zQc}AJXDXgSm#Zep#{B4w762`@_6IOgvjprCh^n%FLDIT3cQX`iMo*`978U@v3IWNWhh0|p zae1T&w_X3RO8EDV3t&K=V2+~|a39J3-moJPLxH7KI`;L12D@R)#B2y+IF@-@e$Ve; zT&ty7BPU^-M7@uy)$p8}tPP$j*wNN@xD`okx|b8_LXS=fh9-9CAI= z0epUssvk0`DhD&SR=b-1?woRn(u3JDFzvbeO!?`qXkQj>L&ox-;X*Hi>>k(iJ{B|G@7WrT zav11C;aHSGMvv!yoH7^H*e$|QeB;_>I)d7@^iT9|IO;j$_GN@CWlk$a&=eKrHvnEd zDP+Y>ESFh0(!)$@(#GZo?F@lfskxu)A^+P1TCUQHQSLD_(rK6A1+pB`QkjyrDsXV! z)Ry@Jnr)&P(b4!v^i>7>WzuPyy#kcp0lZ!ZWum2jke1YwsTIk96~=M!Xy3A_zq2}Z z+_ChNB?V)MNRQj58hL4=!P_h~#dqIa=w9mv^@#_p^Dl9-_}1*gGt%o>#Fq)zGFccY zEDoEwzND+{RWt}ABc?VCXHsRR4^zpNPbDmw6q8ZWX#}8?x3&*~p@36*8A(PgR7ByoJtNFvUd^+#a6^D3{6A5wM6fov>VU?XE$P2r( zRv=bI{Q=^uahb!OElVu@fg)e zxhiyMI<}-l*6*JvK{GL^{fj;^{AIcRc#M{=&3Q9>K93TFK1}M6@t!kl80S~ zQD_Z2o8yHuSbY z$`Bu$g`b$hc*RM4xR%2fni_)9inJvBOj48MFMz5N`H87r_ia6|?W^L}Oq9$SiLoyy*2y?r-I&`! z+&zYX>Vc6D-#G5a5QRv5rS`f!RNu{?OuMFbK4D|o;c642mCT&wI^e|Mh1ZkWd8()7 zFK~$ZMt{Exg9eWF@Zt@cJL%1_l*nx=Vp}KtLVETc#HptHojCrCCTBsX{Xu){-Du-d zIu07&OIOITt|^0;d(!|f7us9@cz#2*?dXKEi4j`RZ3q2}uLm1!A(N3jRJRwkMr~v8H~XrGbNUnH|LndC2(xa#u|&_&sIzw#2&DzDu{Q{Bqx5P=Q)J@Jx*R}d zTj1RP0ozvOlW4HD>fk4|Un!M_vj`oaJ3|!wsKjpT6GhtLtM%OzKu8$PfAMeTOVh&=ro8FL}R4)hE!$%2q9C!B6aA;j@L@5M>8M3dGdSF zRAW1KI1-63Bek?}baU(`I^PLfqI!K;(t>-g=N-7txomq_k*H4ljUbFrg$^HHXYNecZ5265%gF!FM}{W z`=rkzM!T?g-FgInPnzwPF9$}03p*2Sim|*vWM+Lry$mKB{xrR6va}Wv1#c@}cfhJ~E{TLJ2etc=fnIu}8ld+R29}?^vV1YDw-6 zmX%2+eU+~_#pti8={o;?_z)s=W97vEs}Ju@)gp01fIr&mY%pRDbyOj#NF4PuH^S`H zVDBi_7)D$;tf_}@iPE^X7PZvlX6G?62-rl<1bKAvNyM(QH%1=Cn%o>q)D=_ro1%CV(@ljoB(#7- zuI5Aj^k)#3^#}T?OUfQJyD5#3rX77}<1(1A>+h(R`={a&OPHhXK??x~nbIj7Ao44Im~^AZJrZs8xw zm~01kzAj&KxAAy?{p827z1(=R@)Gib$3`!Zq0{@Ki@^FkHoI8%B}VM5(me)J=O)0a z+%8JtFN<{|SL~8EQ`{l<8OSnKk-*--pF)g_yrkrf`}rzB$P)oq$}n84ox z7$o;_M+CSIO?<(5oHZz>mnWAJRtq+9l17ofXyesd2&5(bm;B>D_&5E@O#@_J5Hv@e zkaGr`i#3!Pd)&csSWOURnd?eHz#Xd3(J5kQQawFY%vk0hY7%V%Q0uVf8DHg9xEpMy z`>rt@h$s`s(cR|>y^#=!BIR~aN>^m|M@d#S!_VvuS-@zTU_K>L%Y9&`-q%@k<(6XU zIy;_u6)n4}mh5O>B6zgi5_F`bs%xLPG)Om5F-Apz_cdw5y4?N%_hqecEmC zpUh?Vg^yH78+Mv#qWFP4^;^>#^J~`+%LWA?u-dQv&no0C3%yu83J<*!_$DUMFa;Yu zC;ScecYQY@qKVtS5CO6%6a8>)^|d551o|ZwNOGn`ojK)gUU8^bK__v7MBJAQetY(^ zR3RfZ(q>Sr^3?o_;ZGUT&UqG;ss~P`qmw6jpya{wk)x496JAkGdlK-$AGt1UXNvI5 zHk6|#^ivI%OSo(=J!lmbmgB84ZW+F4gJ@2#CY%Q{u2FAm5{Df7COLG~me_%H7|PI) zMA$V-*1m?7xw%RqCxM}trwvuhMr$kZR~iaBDl-NFW>nSx$||9LUV?)Ed0WusNaiwNBi$dXVp^BZJYow z*)3`b?Q8+f!R!_qZ+J5Do23m%6I`j}qHbvc0UX~gKg{(k?rEej4f~8Pgg$|EGCzUf z^NOX*=_^53Chm(vNpL7uy{}*pAlTHPaqAXKi#_tW<74N4vAne z>ylk)U^36Wf)w5c-+*>8+$ms#F$yshEag>`&b{(&qQ@<+q_O#WIh0)>5fqx^+X6+a z&%e=8z2o{=7PSDy<|CQYZ6)*!dA#IFPL`-wg#HZ0jZD?f^HUcH2rN=1Rss1=$ERbd zp24Er+LOf8^8f`myq*YZj2|N9E;CQ`kx>dtBw5Vt&%H1Z`cfR{|5Ied-i}%i>0*VX#J|kW`Kv6)MG>LD zPUxK$?53EBUBb%~AV0E7VgE`aj|RLB5YrV-Da^fMaR67d=Fo>$*}k;^sQc(u4I7gm zgIJjnnWO?WSq9T06Y-C#2H)Y58fNAUkMfj|r@k&U&xXCg#+})CdE4RwiSIVuj-@gH z3(m28vDde-uoor!@MKJ`Q_#)v1gY=l+u5UXK?@JqZ20ffzHO?Wmj@R)f-EB8779K^ zZgf?=5hyQ>cF0s&4ST!z{w8MmT*03s26nKXp7b$RIm z!G%*=XY(@WNAsxcNt~E*lW${V?RRm*+Qv6H!z2om#xe?JfQ{3dB+w7S6I!*qC}zd^ zS*=_5u|q_G5Yot?uwi~pZ_(~kcdWFXj75{cdx^jh44F(3pn*)=k#+JnNSPkPTxTu> zS*V(tFkP-h=h0$)1K;Ue$b4|0+)Q(dqjx4Y0rz!-xU2K%r9_Jo^2>4s)!5{!lS@6k z@@NAL-T>sLHou}zCI_Ay1n7V|A>F@_^BGY7U*QB6O}ST~v`rp|;I>1|V!DltWK0x! zWV&7{ABpz`3fjNk5v#anr5U+l^7GK0tN;6V(V^~fAG0_(?@$6z;R?-E52P@VI`Z$0=H2TA0U8&Oo7gp?AY&h6HRd~4OJ ztN<56&ooyWC|77zvKN=$(F=FiJ9iynCJ9f^3}Wl^5JrccOcfR^ufAk?v=pBp3_+m1 zR+T2?UGsn?Hz8e0ZT4<`l#ddqWq)cjUhxIIFq#x zMF6EO20j#x3EgfGX3R>$pXAGF@1-!m>V3Bk!F|G-=qGNB>%H4e=!C?FRAr_j5gSvm zssH(nO=9$e zCN~mHn$zsKRELyKp?FX5m!&kY>Uc1T(^TEK@M$^@ir2nePR^TubNesu2U$v^x0|7; z1!Sah?2OvMO`l>~k0^@5SyO*x<^#%3P6im(F^0MmefP1x){=`U%qX7=KkOMqu*5j_ zxIo2O1ly;02E}rwO7zeLHL6lUQghoDJIK8fm$ym#CBtBDdx1*p|RhyE6(p?zha{@g$L z(Y5>*%q4T&()16l<6&Xx?|?0qDAJ^tp?qdr;TAnjoz0L`(``TTJKs=VvV!GatxVwgKkik1#Tl;rdD) zBdoL<0jN7vnD*2fFSK~q+k~9LaDqO1QDltsfgP%lNfy)VV?b5>OYukC-id&DpyYle zm;kj+4lGsyfs@1sTY#O6L<`jz_#JI(&=K7TYz)uxM3rM{v2$(jbf{Fncw@fqcFSsS zFEGgmoZi_UEX;U36+_?N`S^F|P+a@w~XO)IZd+Jq%sN!vK8N!SlyGh{T8?qjM!lp6U!^t(hJQ(t00+N#T?rao zM`%RibY>;8%#-e9<2t1+-t#cDf+?pggMGoa2oB&UPuJiQcWhvVnx7)dT!Een{+{!c zT{XN^m$7W&i-zoI1?Y^NHTu;0sCEvZOc0SUq`vEdPh;*YgdTNRjx)te^V97r=+}-X ztKbB<_I37BXDxe#M<~sZz5&d7njQrW2g=u|9}+4skN(#DKYPRMsy}G-1Z$EXr_lXg za9c%h&p&Y}dKY~_$47Jn`Z9TTKc}dE(GtmnAFMyQp4JeXblPSNlz=rNQB3uoL1`BQ z#Qc8HJQ&?enwcmYeU~ZYgM&{FDw%97)Hn#2hydbam& z9p0(EL8Lt9tvIM7o(4Lz4?|mKk#Gy>;s~dgB7Km_!GkQ+dZ$z2! z;6w=-x;fweU+XM!A`| z@QiwG1-<6*(^qsOiu86-Ve`B(8lZ2`mH27GG)P!InDT?jrWl&KZHGHrG>UymHDTwmNeTz7SE=54e$Ack8{oG=}z`P9?%A6%m*9VP1_}vNTm<8Wo zin7fKuyONy!dzF{8QmH^jp0&3^tWr|nSv+F8nzCOs?#l?L-YsbPEeh-P%hW&sYK1}*;EM<&^xsaX{dz@XPaO9BRDLSqqqb}UEZ$wo(up4dW8N;ouG*?! zh4F&3IBpGxZ93%Hf4xlkf~uI%+JOc(H-!0*aN8;~UxE>zNn`Af6;3;^J^}DHTWmyId);GFU%<^N+g-}D+ z<(FwZy#w?Uh3-+(X5;n(PI@#(TBp&v zu8l(0M8rCp&cgTA!1bK?2g$2AlRl{m?q`q}ZHemM4c3#P!k+c<({H5-qw#5{w~VCo zDHOBvdW+(8V6m;7A3Kl&RTht^G2!O!*IRJeUh|%83z0Rtq234{sT`l+*<;QSDRCD* zgQ?wSjmFNK7iB7ct})kYZ@n%s8bpl=HrQ0Id3C)g@MQ?KidG^H8WFr?!`2!D(T+mx z`htB3PRqLmdZni}njsy#R85UrN4paSfhYRKs$~xmTAd|I>WL|zoCSVF%?n6Meh?Wu zLBLay|GExdXB9^NHm2#jADiz=Kq51D|GtBg?JxPiu%_>Z7r$#D)iplqly8;z1@zo^ zx11^*!K&quQ@g*qYQmH zMJBbcZC~pdB#xj6Y7v8wmCOn{aXGH?zt_C=(>BN6Ba2aFu%yU^VEUO0*&XR~PJOA7 zQ!}Jc5?RhzKAx(#RNZZ*{rB@m3Q(Fo_LN65uy3HPMW%YAC>vq=N0ax`t8?`NUKO#3<$tO9b<0!+z}dKOD#P@T|pe9zhBYO%j!gwhiFsMSpGq zp-1tYZL#oORvssYyTq`qwfwG!TLgpQm3U{YF=~FFcc3tOt^U2gza_ujMH6pO2Y=@4 z?d-sBiN@UksD7Nzt4qm1I7eSx%^FMmheFp&3QYq@s8;s-=6g1gCCSpvQ6CpI*}^PS zRg4C_ND@J%5i5H{cN^V0g`-D6nf*AyIm8FYmt}O9$&EzjQaUqha+r**$(-*$yDjOw{q_9Ck@wlfPlS`@*w50a=CXUN(KQ`=81J19w2 z_Cx5H2YSkTwrac)wv${`I!F1EsU<=ok=QSi^kD@Wc1PTVMEU};fbffD~y zmtDqm!P~S;jUvXmN1XN>((E%w%_h1NNvg;x78z#!PPAPj|t3^1A>|JhpHIWbI1obN6YbDrc*gXO6r$LRZ1_hH9-5|yU``8qAGocW2!a{ zJBg&?Lsz4(?F=0ZPEvH@7%ozEu;&LmNBmYx6Fx*ip>lcU3e=%B$Zbxx6`8foPV-xr zLuKJKk#F6|KNDf3O&elXEtR2iAwY2MD1B z3JERE*?(W161QUjx?d6S^b^I87_`nl0mb5s7Lr|Oj-|u`H5bi(>G&97m}__IV)1hs zs^x3WP*Ryhm_Kd3<`G?8)Z&%WMxPrds30P^0nN*6j3wBz*okHVKPhuLk@J!7m0ik} z)L%}m`7@-&038-}Xbn9Kior|fIr;Ts|AW8vuJsHu{@&|*NsD$Cq_Pkbg%HA$V|`2wg+;b5yR5N0gf)e@!0pzGxAvmTxzikjvFf!1=WcNM z^2Y^BbxuylCf8p#qc3C_-U?NZQ;e&{>xzjFzjkgZB=^TfT3JcNQFq~R4vV~sxvVc7 z04O;Y3$0EY^>c8)3f^`kTQ7ZWJ0BLCxZ$wVPHr{0+^UyC?Pvao7tN{I6=4 zs2s{b(s^rei7?@qD*BpTalPT(M@!%12T?oQ)t8vySOwt_zeN!`dorYn@3JnmL4F~S zKqA9Fr7OHxGr=GWcz1P$5E6DVf&@+IM#``!Soumk5N49bdMmNudF5Mu@UA}^h7N9L z5yTTN8~-NExv`8zeT@B6ZJbF4GfSEOoiwll$MF7C)ts4TOG{G+Bk0L4Z9;a>ByHe~ zjBgnNWg`K8jJM@-mnJmK0V`KSvkL8tExz$hVxVcXRN>t-Qrd9h>A9#=!IiIWVkMFQ zrJq4?E^uND8IzX<)-Q5R_V533Z3T*=%i+5GUp&cT;~emJnD%%0Rj9FRy~8N9rgItupi&Tkw5w>*WA0 zDKb?wf$qmB*g7l3WFEW%-}k6xwX?P$2Grw+vgi%Yw&tf$Nca)hi|HK%LG*?p@}#U^ zw&zq6rPxCYwXSdSke2;qTG!jQDDU;S`|lWK+mNd$UgMvuxt%=*W9fWt9-c0wdER_X z9{Q^@j8=_>+fhKaSj9Hiy}&p}BN|`z)95h0{056c>B*uoHW5wlR98@CbXsX0bmj7W z%_SQ}$*5;lQnTX6Y-i+=eJiD(hv2x6pK`YNZksB~Tq8=m$e$XNZH)VWVY|wI>{Q>F zeudhS1e_{Zw9!&=3}j3o4v%foZsZ#8Irj<8j$FQ-BOHX446=Z-lbdXol??Ch-Gz3c z#AXuSZ|3nfrXe05Krddf1MRdY_p)O++B&FkzpaEU_33%#v9QufFhyAxiN=$6wee;*|7)p|93 zF(f39ogIy;i>);6ZFVBE@3>a5wDB?j;*`h7OPPx#4g|4&A5wCAvC_}~L4z@fiO%PD|$#?J>xW2Lum%s@i3ygnV z*CVb;_(lLYq_P!iV0%0sI6~F%a=T1cq^?ivLtfHJ4qLXUmh)erox4yYFcVAa*S@2M zd=m+|2#tB%zyD)*s)P10^0ch<&WAc`{0Hm#$O>-a=y_r6{)Z<$xN6$O&K6E0d3>fG ztc{68lc5{1ukB|tiBU>@MR1ao@In8nGE~Zq?Cu|@ZX{D1j<5K+-YkCAmyNL6VrqLI zjSc~o#xQv!1`^prSxdt4ag+kN=^9@3l^EqH9gzlh+P!>}t5W$OLLpXjq0F}vBtx%< ze>&o)dtZpO|LwlP_fDiPovS&7SVk+Xuz1?y9<@~;#4k4eQmmyg<47MfBwkru?O0nS zkID=hHa17M7ZNR_dF}hD-Ol6{a-1>Nb#~Sn*sls`#|#VatO1DO4Zk#2{?Z zNhP^FFt8D9PHLJ2*$ojzeY7^Zh^1pklapU3o8TI6qvZu9PR|CnH`?G%3>@nxjK=b7f z*4s)U3^m4S{ZyI%!FK`w<{;7OrIBq8?Qs;*95+(r^x*f|t}A;KjrEp(vh1E|?taBG zBmUx^tJRV%T34te^3nB# z7D@FLsJBMNT>~HGN`~S1sBh~^l#PWh0CfHvw#<|4B~==`-!`^>u1RA@)c*d=PR6?n zy&)6J$%@23Z>Gohpm7@YUgIPR5Oomhi7!eQg89P*>@;>Kyy9f$Z_<}9)3V>;I9n>kgawwwi& zsE8zmYbeE_Do`<%AL;5VUon2T6kV5SS`ufsx)qwL@#X&Ut#`%hg&6GhJXj8uI)VFJ zCLzg_n=p36)qQ``ql0L1bsnPeJ{YxEELB}3ny13I3l!Gxy_;JfJjH92U!6qy2LBOR zUC;mTqT_}R^_Psj%5Pfyn%q;4pLt};7>LKyFx3oVl~asJ$nmzYfqI7U`}wK0o$v!a zdxdeT*+Tp{%y5K(xDJ4FprLesU5&#`WR?qE!2_9aCrhMdkE@F<9}_d(=LHO^ja#|ol!qgqfDJc zrCj1IegMve@GE~y6$U~K>)a%Tc426c2=tbOmj<7q;9y3O_6RLAN*3J~IxQ7)j#OO> zV#~>aeCjcgcEnbrHAezZ!;7*esIoEA>fj?SiE2Gx$m88+`V#hnoG#YREZXtewpYr zpJE0e;}D%K!S6X5oe@KYXTYo7Sv*ygq*WCt+Xz$^v#Z!w) zRAq%}s8{Lwm-?C)_^~>`{$*N_8JE^~cRSk#HDJrbV2vF4i1HA>66O3=)B!QRS6ne@ zDSebZOZOi|+x+ue9RJV=cgw64X!nv6L;E(tZu6z)BWp2++4+uU;o)(Exght@UKm)% zK0_wLtT`8i+h}kUEKNsZ`qSD-cJ5!f+}GJ4Aq3{{n|@r8KN0U`-f|;HuEgog%3^cU zJBI&Q()TcQtuuaD3)m=-`j>r^Shz+zN0|{HZe(01d>t=`7*=K z&6+!%kzjk>w<6RnH(#2XQm!sf(+= zjNfvojnD;?dXZElhLOiPKuv*ja0`mvZOUQV`hV^E`b4w2~}u6G@jt1rkRr z#o3-gF>H@c0HPuj=%2dYo-Z7uZ`_}UP89yPB`@$q^$a?ft`Mh($Aqo|uy`Ai(1Flf z?Z*N7M1+sSWDG%5Y@+};L&qF^+KQsadZimmHFrG)or{WQt20Znj>d&BIEBj}V`{Xld>H&xa zZRlx(@F4;q+`4~Vls<`g0v>WF;9BhQr+<_WDNk*(&!E1Dr_yK8Lff$qO8m(EIIx`8 za{84YU~7+J&mai!C%N*0S{QKtx9d2?r^h3nlPAApACTD8gSPZZ;Z@#rT7^{Vc`ZKKy8}Vf}TN>BF~_9U=YUt0-r%5PchG+R(i_&7cmjbA4|Ii31=f*j!(yWjT=`qLK(9FT-Q>OABD>xet_qW6$(yX_r8;eGGzI1n^X z;s5hHn|9gH`3#C=yj6a@0p=Fq0t9H^7GC!?FN-|N-`X@TV62@2!$Cp}$H#!)Qr0u* zE92w;!%7WJT)Sp`+8v literal 0 HcmV?d00001 diff --git a/projects/DensePose/doc/images/vis_bbox_dp_u.jpg b/projects/DensePose/doc/images/vis_bbox_dp_u.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e21be74331f910cdf2579e2e5cdaaba2f8a595a1 GIT binary patch literal 87042 zcmb4qg;N~e6YVVS?kmVFswe^=5C~9yxqyFbfINVR@SpuRBEA?B3etau zhJu2Of{uoPfsTfbj)8@Xg@K8KiH?qqkBx(ihlh`cfki+_fJgZ9jQ5`t(0?ltk01)s&i1?s?0{}GuARxX(`x5W}HAEy30y2PtiiZ9& zt%nC7fDn)nk&#heZX`5BbPxg}5-6H1`JET9R4-`%L;Zi40Dy>ujDm^)LVKB& zz<t+p>o z{;dL7h%dhJ5%B?O;IDC3yy@_!+1W=^W2DrDDh{y^K~>!D5=OQWxc6RjZe=zMtAE=Z zNBPOD>q(i_qrGeXA_Cx*?6~;-gmmX%KP)5e4R%>pfwoO-dtwNGC4rKq@e=Ny>(jPx zl0$ZTYKnD(djViaPN3+Fg0stq-~)DcU!x3?64~J};p=oryq6Kutyn#Tw>(&*kj+LG z+(}(ZHw=BzNAYCUk> z9ZdVy&uhE#2*GXR6oVoLlqWhiRxRd{ep`=?PNgZ0XoLl$b^QZ^sBKEiZTXbt3(u!4 zd_rTA%Ew0vK~P8rd9g~WipO((sngQ3NzvvK*7S6u=}3c}PN4^6MdrTNy}Z1Da9<)U zN7TQ%LTuyUAtSD6T``e%0}7<~o)+EoQt0dGDU2FML)a0lQck_+c5i03$(=aJBqfp> z6*<`?3=(^v13KPU$|+#rg`5>D#(g7 zBz#T{)_R9@x_#p@AQ&XVFm|An8gdpk8`XiQ32UxPXst}Zs-tdHM1kK2o^04JQKhLj zF;1z+xK@g~NSO)uk~(EO`ln7}*v=i(%|^%kwMOY;9|OblhP*Az^i3-%|AZLU$bS`= zg}kFVN$*Gv{QF>@L;FROF$wbd#En$@oFCUoi1dN;dTs#gkrTP&h#-#A<1~ChZvqD4G2Y8&hN_ zaGaA(r4`f6D}URie%W_9Uu2)kO$maXaJIMJ^w(%bM;I%LrpXh*nh>A} zxN5Yx>cHoDNX4y3gn{H-1*DWR-lShX;eZb0ry}-VMeK3A<1M>Le2%D&IsXF4)A}0N z$s4vkk4j0CNbI~o70_VBari3SVbT$TdKguCTu`6_<|C2CpN(v8Xm~pQAgdvc$5`Sr z5aw+q)#D=8Db@nE2^uGDQ!$U&*!f#n0La>u=Hzm!t20|F28N?wULp2lX(*XCHv7a( z!SneugoG4Gq?btGTXO9*E+?i0ASCQyT(eG(0q<#Ku~`PvWIDt3vTPS$3ZI!RtT>}0 zM}*FUD_#QjdJtBufE0M|rdia$cEt7# zF$1PDqa*dtq($5m%*5Bv7BE0ikb8ZE;Cx|@?B<6?XK0X?CnJGs3IAboH@IbqNk;Ph zCfT+88=`4#qDbP&zh@()qzHGnjJt`EXVfy~nv?^_sc8aSQR0}Z1GsXU4+!)%acr+3 z(}W1tU-GxJK3rTrz8&*1JkrfX@=~FL!0$;Z5EO21hSTuAeo3mNPm)vC7mNRidRQ0v z>@i4{Zs%tMceZP5!W#pCli*MYsBF+lB*I4ryuboUU2d0{$n~WtTwOLPRJdcfmC%!i zfJ-eg(;M1=e4N%xvK;^F2QL+lsqflTZkP?6hbOMJBs=J8x|}lQ@m*9?{B$$JJ4zaL zQ1rX8r)+I|uh~6%3PF;jLpiZE?Q1e3fxYBLJ>~E{qsXkX=p*;?)9~Sb_?`kghiD?y zNHoVQjI`~dXD_JU-!HJKao=`NFieZfS}K&%5_4W&fDc_)RCpm#@xG-1H8e?}q`Fd& zfM1ODi)-qRP}~5aJD&TKzjdWSfyi!k;_lbrd1!l6Kv-+7AV|gLOMQ(vvTXKQh6_$N zjremtiVdT{zJDT(%U+}D`nk_V|Jzxn31x4V4K^eW8oNjCEg>riQy(n}%!(0cI z#nr?fbrv(lN(p?$D-N_-15xC-qW1e1PrJ==I4~1e3&o#^mHfHV$^k#z;?c?zgR?Gc zj3|=FaF=~s*t{B2n@3YmYxifu|5=ND{vNerY2K7q!Hg)J-DzYjO?jHaFn!wvRDDa7S&rj_J`>dWXK9A?8rqU5t2wg_DJ zwr)-&c@p37iTS{gvZq$&Wka&>ySnYSp#s&|x_!rkZK|E;?FJ4g20iMFGzU2w4--l3 zm6;KB*z{Vc0dns16NSm@>Jou*0xrp~GYP0BYM4q`eY$?_AI8OHxtDO6f~@k(l(3F`a}d-&s5z4Zh1!wDo<}dQ4BHwWIWO*=^2FFp53wK#9?&q6sxVBMLKGln0(;cDHs80x zvMfDuga>!=8`4BFpw{u{R#&saYqh*<(Rhzrq**6tu7>%VzeCyxb@l8Cnh-{7R$`xy zp+f4-dRW!`;}~5doa9Zo*tbI^mm7j6ed^Ng1TuQr`Z7}kA$-_(t(}6Gn`As{0~NKS zgesa_X37(#ei^r?MSYX}hregRE)*jtobIN;WqxI{zzB09)ML%~kI)~_Oe4DL%pYu8 zl;KE&x= zCJh7PEa;KFR7!F1jt)h*ZO?J2I<=pL9b6*zw~~c2dPnsh2iHSx=EUXE&H zK3E0pkA|}T1MSp({?Rh3l`r_jHZbYZqeXl7Hlz(vE%BnN027A3S^&@P_07pC9i^&x z=d~Tx;Y&7AKxR|qxK)|rrPaeZPKkLI{14DR4iARv7dL)Y6zYz57r#C3M(N}NvY9@E z-nSRrOC$feKv7VeW^M8KX}mrgG$BED+nV%Xq*h`-{}14uw}7}#_h~cuJ7(dNp{V61 z&){E@T=&V5W+N8IYMi-1Es;_bWi@TAo{|}3joJ3(MiO?mOtI^BqZ1{Z(=*k;eAKGl z1BKGjr(r5sGbv|z98CT0@I!#rDu;l7=)^tnac5=k9pv7Q2D~+{uM4m-M58QX3EhZ9 zO)`3?y7<(O&kqCK)hbpdH?2|Nru$U)x7+di&dukOl`{^WFd3tv6 zTNqs&Pm80)FU%u8`nkWx|A0Rwz;8J=n-6TLn$}_2ot)cH^8Lo4pB1U~bW+-HFQkxm zzBEL}qDPE_ue`3#iYT0SgJ`4c_>7t=I3Th=eU`(1=jop)2&7i!t&mAAo!}HgfcCLk z9sUfvaqI0C@iPU;l4Z9Pjo%48^T2UQjDuf<$<^cFX-je_;hK!IqvB6JN+NMkY1WSq z5g=W@Uv4~!!fpr}h)L%$tcknf_9=`$J< zctZ>&=R1??ZQi3hd@+D`DJ`i&RXq; zdFK$r&PG^Newxzsrh$hb%Kf7=lpHFLDA8yB>XQ%C;)ZqTQV`!a8W83}pAU)6SKrmm z0)ES+BC6nt1n=Ktg@xl3J`0!4a_nu62zz=HM1+=z7PV~%&0fPqGkr%+Y`c2s^r#m6 z*K6Yl0m>Xl;hYY-nvhT7vU9t0|A3nMfjonx>{lqcycwHJKS@Qpk_)+d4=^eV zSJ5A{O|?ZdaeVnO^Ws|73YgL7DboX{uLHjxw-Be!k7BGkQ{VGra{=e+wOfuM#mmB2 zL=E0fdl@Bt@R4EN-E!^u-H78wUP*~L8(ubv1(Odc?w73xC?e==Hfb>XUrL`fT$xS; zuTgfF01dTiiDV;PG_R*vf^Y9yziwX*dS|BXuJiI$S`0{pbn;vZ=*~!du@kfuzxePV zyO8_GHDS)3q8_xXqmdYfvJ5;%3uU5o^0;!yU*Hi zKbgjqgM1~*Kn3E(W?6@Ox8TtvZz4^RsFt$%h7GQ_M)UG*FM(gNa%WxRP~7<*Bej%N zz|PdvB*o_vUJIXiXo{G~0=` zcGruQ@=4SMX3{1+Z(?eR6WOk#+VHy=GQifh)G2Vac=v4+63da0Z8l(a=8DGR9OB2J zpcoVW!shJ21e(Q*-TA&^R>dMxW3_i}6>YZb%a%yiapM9QE)j<@NzsAkn+9Z-1{~uB zV$P6=F;`2$b=+yvRBf_uFE=h~^1bYHB@(CG&U_0DsSybz^SgX zJ+k2+6SSvh+QxZ##xuWO=2Mvl3EvJf7l>l?W!t1VVI-0vpds-KgTQ+T5bc7!CQBVd0+c*9d#vv*IUGo#Tlfw6s1!-6*`J_~5gyb*ucnnV`H zuNKJ2Z1|=hIP4{MYQ0461JBVk!!`-dXtK3j?GtECPHI^&U$Zp*#;FT2jc~sDh4+AX z*?fVrgi%ommAwlaGs3?`IZ1UPclh32c!A$JDUBlEeSgIC~ zXj5l(&-w@ah{ptQa!(T}iZtXk>jWC&GPsKi%%DqID66SZG1lWMeU_wDHWh1p^s*6B z#(`sRcL~f`T~7?#BoQ5<-xBY}6uxxbwsdS#Fn&y9Z#Uab&j26B4339=LX^Y4VVSNa zblig(IabadA4IVvy$Q6nX|CHZOP@&OLHP20T~*av3F5h#8)!bfz7Sw?-6ELdXDkT+ zsBKA=n^rzQPX66#zIqx>P7hni`8h~N8jh(k=vZxK)V}0uSn<8avnFY63*n7!|7%vg z$~eD~dcRR*?&jxO!EQDat&AIPC4>I6*8&b@LuO*9ZV{MI+d~GR1FbaiaPerizi((n z;*Kk;w^E$|zNV;mymr!;rmn7vCDX$IkMqeads(f0s;E0;w1@ytrlm`8ejZTGGLJMz zz-s{G0l!SNyYt_Z4OPQGD+z=fCqHJG37=aa*X6+!xkps>A&-1!yHT*n^3ktfpEeL zaj6Kz46Vn7{26uJ5W=in1N}anCestm&_gbw)F>;&XsdT;bv(2Af%LJ4nmGpCwtSS1 z#Cnu24N|P^*LM+TnzW1`$C6HPp;qWOU6+Vq$~J7h4~3XJaVG3>Mi>+Qy@Rasxf+Up z+fXsHJjoRV1P1p5(3SgFQonrG^JH{m0xjV9Ei{t=zJH|MHdfC7HE_s7OtN{P2Q38 zKx;!~c4S*Ei5h%E{`cGxrF?Ao(EF;F98aN-gAjJ6jaWF~*gqJGGU6sFBQzB36QmUJ zC1#;m0Kg-dhzLm5PCRE((Nz#|9Aw=fw+X7IBaMx`r&vx_+x7wQwN(g9?8b-;hcsKn z&RPqAq4I>_jO)r0rk1I9w1u@)@Tg*ujRG!uu$D$?>NnM^%2qSY-2vyM%2-PVRb!DY z$2BGGA7D3x#83$w!-s+F{8JJRp=ve^DS3dO1vkzibPZlw72z;8yHX;_1p&uA<`xYF z+oP9m>lU7oTY`RHK>HjGeeO}z9yafXG6b0_jbU}MS_W99O4RBJNcM1p=HaOgbMm=C z{conTSDR4_U8S*3f-yzQ1HL%uJpm7(Cu5&lX7`^CKY$)S1PDe_D_S?Lz6utdyEs7XfTTCgn?7L zmh6%?F=|QPk9ui3t9rl7LjKwc(@^*&2>2c76g6hHSbD8)CDT%&gfPj!!Ae!5lxJm- z{kZPkFhAkjixCVb@uCJcDD}cSjI64?y<4R=><^ z`M3`*BVcDrXV~r%Nj3q)Y~DrVU&192A0;C7*W70hluUg=fdzr8Yp9%9kZvAKq z3?kJqnG*dZCMagn0!T=^x+$=5WKPJjc&kB0EH|_ggpzE^t63s0MS2G$B+h5M6=ZfD zc^ZbzH#--aq7`^}pdT3z&cV<1K%32%K8wchwT+ms%x+LkYCBAHf`dI(9*iaCnTT}w zbO&rwObtx}X-dXX%09^sT7{PpI508ujbX#3vD!p_pVp2H{`xa_#S`Bl8p7t-Sv#Oz%Zq4zrQ z0n)_{l3vv9TLg?On?l$rjc4j)W=#dHgmGE1@jIlN?4K4qqP3s$#oqdom}FA8^#1Pl zlW3p=;g1`|w#~SR`TYYRK!EBl0vUB0k7C@@K9XN@e!Xgrie(}@K?&OdvF(LWo3uIE z6i*mUu#rYQVpnGa%6mT;jZd~-!LCrjyGKjZs#jjB%^JAO~Z&e?kA1JB>)gFT+} zs@M!#Ba0=#7GJkr(P``+CL?!kl-YIE8{YTZF>jsLWxiyW{yP!h)EJBnfHtwPHl%

`YY{xyOd~Y+4_kW-7 zSrP%J*e)&s?oV3+mqSe0+71hqzl4^KTNm2|nB^28MhETn+O_%aN8C`-a=wMk?QAYg zLX3t5-Xq*+6O*B3x12@?x^N7|DU|~N9Z9sFaeF(d!sTaLGpkQR@Yb|PGcN39E%2v$ zAjaprg>>=Qlt<}f#X&QXGH45zk^oKewSq}Zti}3?Mey3w_Ns{J1)6+D1%YU?Nw{MF z4wn=ewl6`cBcClQv&Yssm%?;GAq-bd^XBX|59i0U-S$dh&eno9pf5*+gm0W)hu;p@ z$_;IG@yPx3rOx|fhv%R>EWU21t%3BS;dB=|F+aASF7f3GH^@O&EZqJ0)`cHwZccU_ z&rUIB$Zcty%+ecMc*bOY8sN~CLyOY4dsnKLoHwk29L7@CLCdYq^QD|PsP7NneGHcLxN}_ff=!Iu_{m*k&-qnp}p^~Y(lkC zx7%Dd9|X!JGCf%-t<0EJd7aKz*D-(qwDx-kUm71eFvK;YJ(i6giq$N!9>bU4$9D;G z6XK6*{+yXt!ym82xhycEi5hy^ozAu^F3&;91{-DEr=BaNK3|~ZOXC&WP4an_s^JaR zWVw4NEw;^n+KpFl_?(2{bwe-PKI0r5+3~PD8rkW&e{brf^xpb!*6j8Pa~!@v`zvo& zXUta47=qODl*c^KkxfmR66h`vGnq%?<`;2DrTnS!Y+?3a8=C>{e4 z?ii;vJS@&cbMmcg6tl7-uh(Hn`JgM>TK~1e@ zLy48)mEp!B`w62|!2=WgDz)K3Lz5Zo*JK-a9LG@fJ_-@UQr}T_<)wuK02=fTy#2#3u61<} zdG8I^PZN1C*4e zGQaU#d@EqjH)<7v6c{agMRpZr9hr5tsDUw*Mpc)xe8PqMw7XP1H$TQawQ?d%I+g$v zQZrvo>RH0jpyHXz)I4GH%7C#6G*WZn%At1ymub2qao|XK#B#rbdHAYjz4}~Zgw|vO z+TGlJSIf}n45FLcWaW9U0)}12G%o1InhBB{P3Hpc_<7go5*@K)C{dg)g#@R-B)YqI zmcEG@xBDh#9!I2S96zzoyza%whkNtVQdQJv`l*NM@|f8vJNOjEU4l@VQc~Occ2~*v zIv=AhDMq*(6TS3Vrsdcu95x*o#Dn^I>lvY-eXi-ln_=w%0O>W@djxceR1@8sAUU*m z7rlbjOywQ@W>EDFM86$(<#yT8OG1Wu6D|>vUvrk|RHEzYG_kVum3*xDFOA5JWZ_-r z+Fx8vKVKi$p*ExGiPCDds?ecEAI5KC^84KY$K7W1-N$fgv@;9DvYJ<33(X6&R|nj<2s)F=t7(%Ua6F`bYb6;(&d)d6)ewx-fdd&tSVq4MN29OE9H|X z76=&hEPJxh@Zcw=HUs<8yNV~OkVClPJuSc_mB$yf)a!GYtixnX_kC+246o{Q>Svo0 zJ4^AQdz;oM@&XLoNsK5(0{hOK+|Un;e}XO;ZY~DFg1q*?d+O2OO?y6njnAOG`#YJu zwEc8lgCF&qZ@^Z%@t5eIjP;XF@kNR929-<@Tk{*r^_A|jW*9}?qI&(DLdeWxv~(w? z1=a^m;Bs6`SI5_6fKR7*yn#I{T;wiwAJ#uKknSG&m+ZzAx4}1)PeJ#!;ha})$HkEU zn^qO!f50{>+VG$=7~@6&Pw-2Uk!)skkr;M^{l{iqK6Km4YPHSGBF16a<87$mL#s{2 z(Fs8axBfuhlrrI5sgB~4vuD}V5((-!S<{_K7&wZo`Xq%foLv_(v-~R?LPd*TNki>1 z8S1!KS4<@wGxw*29!ZUrS$Sk`Wil@*8{E}_--1Xq{RUHPi7p9L? zUXs~Y_;yTJXY!IdC_Im#oNcTT$qrSr7IiVpN{Unh&D>C)^B;g{L9sWERjR&+SkW@{ zC!`aFTrWv+G+$OGz=gZ#g?YtVYKd}9z2W>GM)J6IrnB&K4mc@Y&ozBq8^_Yq)vIZr z_(Zs#?&%)2tf^dHZrW5$B!r;6CVyK#4R8vRV$!pJs}&d%xa}?3(Z}+ih00RkQU<$5 zRuw06h$KWUrwA}_E|EWv=r^|08*c1+cz{lv{S!Aa_I+D)Q0rhswmODYJTzY|GeygS z?w5g}3j!DT&eKcYp5$<7{4?eoMl4=v1RD_Km44&Vt(5t*u8K1figeR&D*h9$vYQ%L zdU@565UAx+3(VFI}m4fU2~JCTNjy}aB>Otq3B zHBV4niOqESAF#hp%{@Q0tSS`!^Gf}~t1-zq(@Ngg3PdYyijJTgwYRWd@QX9!gY%Jl z3$U@(?8~)Y)$ufxMVMilMulG%!{U58vqrKb$ZH1wT%MY$*!T*`t;CslrAtvae)f{Q zs-OU_tTiby9suX<&Kp7FShukfU{{2R zZD;RqHz!?wmtk}IsOf|L!}=dUWF~2_k-LSIpQlJIn6wg3eZOQeP8!3S2oEE&(4pDvY}O}A)h~VHNI;ksF>l)aaNS1JrnQ%(OJ<*>mQM{qKQ1ro`Xk|iH1COY zko-k)y&FoIx_`U-K^{xvwZ0jzWlSGyzxlzNcGQ0W`+8v%@uw)&vCITn`|Qs}0m5c? zh~CH1&K%Yi+G=WG*Vh^zNoN@ohZ`B#=gKrtEKp98!q}1Go%kOM7ah~Ryohj357gw> z`{B*fJPk=jX7Px&C`+d`(AMjr@T*THJYUgmV!pD6HRjqiFzN_M6K+HKN^SWCbGgcY zh`B#s>iBYI;R%thEn(}Zv3}*!N4I5XkVDA-6|C}nK`Lx7N(>SP9K^OT<_C4PsU}c- zfM{z=khoLOg(D(Aa;N5fV-?jvKz?U~EHGxQ{3FTid)@IxZ}zfbk+7vNwIZ+-Ls6w) z#a&=5)1ZmE8*#5!C@8A;NcN1w&-8|%MK%M_!sQ6@>%sk;7FT2+6Aki`&^Vun-jF|$ zF4)ELM0;!8xKL}8kl|qKd=z0m5E|nW^sN`;o0dTe!q&%gkaM22vcGm|++sfZgsfN) zZyw|CrhLwEk~Q6$>2a)nZFJ<~!jIst#n`&c*g%#&XsM;g+v|vlwHGycq}?*qYaTSe*TTL77$4==QFDHsqrr>?}1; zPyB7OOA|#mHyu0NL~poxa86jt=C8Hm$wvMjnPqz+O~s2S(V#ax@~-~8(RsYd;x#rG z4@%CrNs0w!!-7jME}!qiNm;CR2?|)kZ}aL8Xg>XJ+1yQ7t}ZjQtxR1Rt!Fo1A~AWc z6}tXrpg;2Y+AGd&=U~+&R7NTZn5`;-&Xd!Bgh+?UE{V4XhtB4d(CDHta4)vW)fwY(GCz4|i#qN@R zfQ3nE#W~VRVIy4T#4Y3kadAVO_BdWih3G$2J#})_2|O(nxEoPdu`)Ggp;MmRf8RAgTs=FJ|I#a45DyM6JL^ zZZ6yqCfcRh(+za}sC>)xh8667>mp=b87KOKNq>JJJ`?AY9jZm<9}lR{#7E)v)BP5* z(h8TxG_SfJFf>8QE!;DxBr+*Jwg!!15onhu>uOW@n$a`iF{frCW44GT7zes`(}I>~ z9oc(1TQm`-!cSC>T?F6)ZrouigASNuH(@0XI}Rm?cFa%7FPe_m15P5jlg1nXYr9S4 z;KDk#=2r*gi@dTE-E49NrN+@<8Sz51cYQ>@Al+OEC;eDQ?8uS!KfMoI6ZQLUru=Lq zZHdaw&C<0%I^k!paz0W0$+LFU-Il%WDxd(OM^(luoB|DX_e>Q_Sy4p^I>21tc}3#6keqTO3JbQhM@ogRk`rpOJk)#ZhE zY^-g{AI44O9q_TqpqRsu{QWHu6f(*83_y4U)xSW&(%jVbh(V(5rpG_gmCv^*os?;* z`Cm5mf^vL@65;9FEp#CD&=X4GUmE8?ke5!b_DSPcCHT2{prgJ~jw_5{Ol;GvTLiIq z2N!7+JD72j5-*LZR+;1zjm_?r0VW|T&K9EOd+H!z8kS*fy^AN6^6={DB@?if zJbN0J4k{fz1^&$DG#Z$j;Ce6iiYh`ZQE<)FS=$rC(Vu6cp5yD)W8PPidQt5)t9K2( zKr)n^#?Z-oP$1P_-x6a?v6V*Lg@-h}IKd`|T=ir-{Ag+UIMJM5u|6wOz9b7>80D<{ z6HT-lrQg|y*-P9nx@a~P7>;{hDxx;2IG;>LC>{5=q5MZ}gG9W7)WNiTxus#}Gmps} zYx8iNH<1k2j!u>^2Ec=64?B#pu40fTJUIr%H~NZh2X}2z zNF9_@SH9mqs$3dh)e#TfGPyKtFvfzjrO%f~O<70TmXE{fGklBiwB^AmgoY-s))`e; z2Zn+CIfUe54ZTLvl|;Xy(&#vHOvu~%AV^vrz%U0t_hUg}*tSZ-r*(a++b52ec8Nvf zRFUrQf3ecY{N38Rn8D%Tatd%H>ir}-CSY=IcqF{>uu}X}RsB@ZOdT%@(PZOy2;v`E#Hv24(-r*AgJ`dKjJ&meTQwD;x?*2Zh!?Xg@cutz6SgEjj29=(P(3_Bm!n^yh67>^;KESj?WvVsyCe`Hi zKw5X2)?G7vj6k@F`-f(H;L)Kv=+ghl$>D@4%pRVaj8k*e@7eMA(rWo&*X;eel`c=ZKgegF}ASMjc)urxzFbVJOfQo^#X@&Sw z(^+U?a#%06B$z>*ya007_=%V%>8+t=Jll-yH zR9&B0L66UL<1v&UDzHKbOIJ$)N5t2Bue+GP8l=lFs(y>&pDM$ksMSgvEN_M1v*M-kSf{5IINL(3V{C zkS90ytnRu6m2Hx-hlj|Db37%p^{33Yk_b~iaFp76RGIUV&jQ zm%7xFv85QBw}*j$EC6D5cC`ovBM>-qylAQKDM7X}b5yw~y$!ts>gDGgr*7s%Q7mka zTdP%sKKHby*+WIlKBHVmp@!+=>by2#i52&vzagg}_FLa{Hqq=h98D|W$xil+UDA;9 zyRFe9Z&a=ah84Aot{4fKz-|VX^(qCAA6OSRELwmoTqVBZpFdh75jS0xq!{1#d_09JXzj=ttnA(^&n)}Q11hQs+5hl zaphoA}Ib`ba9X~25f1mdVA+d9$7fUX^Gjx@w zl~rzs=VSoy4*YVy-%9p9$!F)JBt}=(pyETW;k*)7!wu=%U&+@98Xa#bm@Iz2rCnwC zrAKZd(k)YM>AH}ci_>OY<4ds~I&U(hSM)7bnq@U?WZN6L zIhVmuG1xuOK>ETp2#d&uCB&*S%|n#^^lT?Rf>Ay4FE$bUci}r8%vwVqb1>uaUZp0Y ztZs5=iKvP)5j4(L&FMu2jeW@2FtDyzg6CS|q~}w57dS+ewZGL{u@YK%FkdE0KfoVH zaTx9#e~_oC-t(P@mmn21#jv95yoxJ#it|`x@#5lD_*|_Il#(<7Sgc$)5k8kohfq#2 zG%WoSa<)}>@1WAa{so__77$0Z!2El1vynETw$|IBgdL_>$Eq?k+#WKDc-Fad=z5pvAqj7|Bykk)=}GF(dfAJ3rN0@LpzWbaec zy`2e!8ZLyo33E^h+3^ew@s?(iDR(uM7mZoj)!lx~Qmwei2shHgq9<^vbjs!^Bn}{x zboWrnx%W@ky`WET3{-xIHpscBbk)_CU}@+ir9eav=w(v~YoC%cyA6$L=u-(GP>8*l z#r?dg!^N+k!zmN9b%M$vCV8dHCq#*Yr1e5efW5&YdinIIJo$NZWa3#(Cod*MyJJ#? z?)={C1XnG@@0|$EXNoWkvR%_iRF7!dnLj4zGhcWYIKW=s3=I0#vtdZ4zLZ;t63s?w z=+?hXM1YO`q#h~?a)um1rRi*09d~E*#lK3RopJrU(&~(o>%R}G9oQ;DQdvlI2Py@m zw#jE$2D@GMYkyOr7zlGs{Q_Q)xQ(mkCnxlwNm7yuRpS7DU5d|GoOGx>@(pxHGEZw{ z;l)-pT1HJ*W6{muT5smDC{mf!wMJqAnbeOIwqgV?>LQepqSRx8G8SGkr&mE!cGH!~ zX7`GuS7ejnagF|V6>()dPApWd++Mbmp6-cLg5(L`eNUAL$6pIoY3>!Zt?mXstf|{d zjB5`trsep?l={~YXB;vNVB_5X@j&slA=LVAv!i{@kI*+9_#z82ML@#MrR#`A10JlL zc2mLV5@90%g83Z{EtapyC~wlvPtJ`?G6XD@arN%2rXteEV1LUmv+Akei6c_pA`FmLyDGw8BR15CwS!@8ep7w*9C&Dge3IkxlI4cI?O zmeYSz0esn3s&d$mvOhB2*3y&~AKd5#7OV`N!TzrzKbsFAgl~3}!qU2!UiR&my!k1c zf52Da)8p`=YfkZA{QNyOuxpzZde_sbXKNJDMf9D>YadM*rUlY=Kc^wn$l_H>nAK=E9_T_@cS}aZz?^)hXu|$pi_rDuh#n3cqQ4%U zE*UFLbPrbkqlitdW5>bJW+h_fkg>-t^HYg-FB_`tRa*^mwgq4|lBtd2=l`nbeTNy1SX zUPejQkoH)+9*qUtBNW2lVw%RqkUc3+E7;9TCCUwlMVg08x%a!8@t);Bz%6^vFdxnJ zs$yyQY1=rn*jUx3^f2cuq9|fuSgXeLSuOk3LZ3X`>x#gJAn9VME_x{nmUC7(0^=gv-e=GZyl!EdYCyX;lMF_x0wU`6?aORc#KE6kbD!7&k*;YU!sJ3$`eW2-9S zoKzdw|4AcW9gti{#q*t@Pc(Hr3&w=Lyu7ws3*2C8W9!DnS&vghD$vf3sHAkX; z@%7=fTSXI@=Udq-U z%Yf$IYcW}*z!AgFyAk@hjL2*ZlKkq)j7SgP?YU! zKUFGHGi~}OH5+pr!J6GZGDc?PT8%3Jg>3OO1zizwOwK+G#?TcF7$ThB+Qbk zwr8gui^JL$nnCz?`(rX-Y|g+!g7vQt0V$~K>gCxw*pa)LC@NrTc_b-U zC~wt+nZAIlKGmfLme9oPk+md0{k_;*6nIv#p5i@uG}kJD;@!<)71lhzv~V_-u{aR8 z5h5wJTWLL_5>=#F-m$Cp=(mMY4AgV&tJlPcXj;5_b^*Lcq zA&>r7t`lzIG~qo#c>qgFdZqS;lpnCN9|4QgYkknTzhJ}>Cit8=!B?cUWy>2+-^ccb z+;?JqP~PRUVEu6rKje9NVs--Xa*40MA_GRr{A@s+c1UX-O4(PNSG|^%uma14qe~5| z(hsgLy(+R7eNmcpRS3?s5AWzK2&ul~gujYCM`wC1+YRtI$hAw1#5ra>JUB?3k~4p@ z6jgUq`22eR7&?S4cTGhXc%stw_|pCLK~dYQpT|u2?OH23#KdrZ$xYd^f7oL-*TSidU_{M89(+0{R%RQ;1}djivSfM3wl77AFo98t<( z{_qHgFgLOf*l)t-Lg~HzCN+(fP*HnQZX)@yZhCt^+TZ7>h~~}P1-3Ew{h2LXUI+?B zww!52cTLfl;)B3BxGYM_h=8H{Qwqc|zti_LZW^4ZcZr}LG|sU3HkdO=T3;n6zhtU? zm+3uXu&-2A$h%(ht2bXq*QcORGQ_}T|Fh=5EQpE9{wzkEHYd|1>rhXEIlMMJtb<$<4F^9UcB4TLVlOehvE zMqf|Fr?WsxMk$wHr0X#V!VByEv=Dz!aE41Kd<>`E!gl@(#7$p=UXfsc(NP7IHT+}opkL-=%Y85U zLKjtrme$-i9vl0KE5R^fl0d(;*W412jK{hO*si6_Q)@qtD!k0vh#`h-ZYkm1y;AJK z5l&y>xRb=j=xRze^u$>MWlkcq-hS4X5qkR??Xn?@TKa*9J?cOM#tzGp@c-PmtA&p6 zmxjhLz{u1iCp;Zel1=>6?b|nJO8@l_e>}cq+)VTEFUb;wld^r4j7~#@61~Zg(~hx~ zF=cBoF*OZ+Ey3H*aJT?ulrV^9W@>y_Mp@0Tb9Q=t9Hz!r>Wzds6`R;MNgR3BRp+^1 zySAIws2Yo)3Q1LBWH)5TETY8Kt_3936?P;08P&Nai0mg9o;L4AFp+)>IIBz$$Zkq} z3pv;`{vo_ncG1P<(wIx~3J|;pTzK{+bpK@YAvDISV27T+R2;5IdeH@6{k;6H!5p+b z^7b!>Yuq;5Z5_O+|)Z*&|G(D!*>u}vMpdBDw6%#CSfwPH{pPOUQMXveTj&egMx zQQb!Sdy{@_0TL!7!8hzyRI?y1O=M1SeseyT7>IjI`|HD}orc~$W&`k7YQ zYD$w+^JZvBA}GB$0k-Q5dCiUxP5XmNK4#Wgs^-3t_Vr@TA={!hMdpXOlYT60Yu^B&{b zruRm#qOMn4NirttSX-5vNfbv&uWV!(D=6^mz&2WQc~W87b6Fim2scI8Kv!r<)f6&-XR4g0Gv;s9Aju;^>i4n$Sw&z| zXc0{9eo9GArhLjor?)b1>Ant_}U92qP!{nK9&oPDDBIlteCST zc#{J7N7`TT;c-ftHC9iFKizzattz*$b2RiTn%Olx zHn|iF*da;Bc{GccWb*U*YG3R_`ITI0)ZYugt>TXs+?no6EnD*c}_vJ8*FGJ7Zh>Er9^k(57 z;kB+_vdi#zzbUd7C@9$HxjhJKigF!=N?grT)3ay)$eAhIOpcms)~D2PYWd`QCV$_v zc-s?5wJ~)Ojg^<(zc?#@>_6JEe+t^C7)2@Er}*`U0XGFaEY5=#R8XpCr3lVT99YI_3Hl{}L0G+wZeL{#Y6h$HAjJ(NF$CNm;<&t%y=bNq5wU z`0CREDF=+D5>w@5Yc(^ixdgoUpG7;IYEUwdaii-*esqlf(3UKgwx9HJpWa4N`6m`E zs9XKjSD|^7(8J~H&R#FTRPm+4`9FY7f;d5EC(;VKj3JtR4zfGqWYE)xKAB?ka6Put zDV*u(2PJh>m3?tu>g^w&jwsN&Ii}e7wp$NEp37eNohf5^%$$jJyqv8-Q_ldwYM3aW zAf6J1^Q|++u6t@yA%v&hR=K9V(AZ5~a#bV}HWBdnac5Q3%Vp7xYj>1Fk}BEt@bBII zknNicwq2(9xxW_=IyO7oS=rTL2*U7R8FDc@ZUmc|)VS=`l(IyKlGEW~*na@K;K>AAz$)h86t6(Lg)>eSv-w_)3bg^| zx+6_WQP$fcDpBX(17(<0Ln4hjWrOtasrCr+KH9LD@(lS{12|a}sbbnQWm9IwOD0|* zZq%q350$)ftWsApfYn>L`8AO9_jv_z;%S^HZFHQ$okpw7spa?5ZM>Cxbde9LGRzOLU}vB`C4r`z)zX zAZj#3>eIXj1~%FCvhu%69);{G`M2Ramnz+bpl(F$xRyNo8Ed|7a-E&r3Be`unC0WQ zP5-;tFF}DC2QwERug}E{B|fPRKUKXw3XC&Qwi6*%9!1Z)jyKd9rDRK7a@c-vrA}2} zSicF2p_+;RwD^MTEfn2BR%Vo+ZT91@{OXH^Vg0NMEu8}c@2} zd6YCvKCX$(Fu9qLP}ob@@bO<`xGzuaI2DT4fNaHofCJud-~L@vc*tEf#8P~9ds-Pm zfl$bWqHg@4u<2cUeZNs$Pd8BEMNT65!wd-seJJNW?c#tCNujlM<3|erF}lg+n}7Yj zy(`rWRIk+18_;)D`&cjO{SR>G{(l=q{jTeKZ!hC$buzGF92`&CMFBSiD@(IK?S|%3 z0J-seA!rCoLiMN?g zE~1lvbJI$*u<2bL{qr8;=rYhYdFSvmze5+c-lTqRFlqiOzsB&1JO$iN@Be&33bmLe zsw#VjMTjMP(I#PgU;d3LF+FQr;l-^@JnW-O%r)erqFHVh zXl)`y+S+A?pVWRH83dyW0qFzt_-hS6ThKHnie;G{zFO6kG7S}n&c6^D3^c(G8*U53 zfsW4A$UwTFVz9O*bqmW)ET4HGL_nE~!CG+1nboFh_iWsJ_&%lXTPL7YFp`|Jmr0Z8Bz8CG-(-i_X4Z%3h@{t{>(?^*zK5lZ1&Pu|36!v_*zQ2IikRyU_HCU z$?e#g%jgk5k!DUuJ|&cRmKhP=tgKHeO!rYO8hClL;vU%G7LcHcd)y8F#y;%<&w`-6 zNqK%nVSX`kf}%_7@zir6XGR9xPuelV177Yx`!DCwp8AsH5*fGo8X=F?BBwzJ>7Ns- z`;|#zG}VH_cfp78HOH6Jbs^?wNv-WiQx4f-Qilp26g^*_7&%(x8bMlWONo!QQpBB? zvJFhZ66jdK6zPBtx%gOzd~u^qbcE@84CD(mo>U3*4$*-(FbHuCKK)=e?P6<11D z?;-_(wdV}p^H>9QvZgV^wH%p?MJxW|A2eP0s>gNRH<$R-8n&2X@U5qV)zd-@ySIFz z-neaIlBaAAxb6aNz2D2pf9}1rWjH#qpvkUaOY_LPypnUprYO#QDkh{LF@hXMEoZF% z6mOA9=UV!;c~W$Hl4RGS!*D)Y{A?oq>u~0F;}+fsUDMrk>LzT?eLaWqRPhL_^LNBj z$G;@0cn!s=a$K6P0k3~u4(gHUMMk~45G

GIhm01O4PQa`++*KtT+4^}kZk`9%92 zYW@_55uXdu=OiJ04)Pgc%2oKj1_v`0M@8o<6PuLXc5V1AwwTQQE|UjiH;xiDxCDT0 z6Jo1=i{Q@3?5f(nGNhG2m+EJ~zo$7*3h$3X!C~or-#FL=kxGxC>mo%E&=kJdi(0Fw z$s`+wBO$sA2X}ty1&}kmi4ZXzaMfqA{z#Z^NDvNIsK_)Q7mmw0^w~;5B^_|w0`MS;0Nm@*NH%Ucp#)q0ba+pf^u%8; zlm#^3zByQ>rqjR`yicw_b(sE&`~N&I3ORHgdt%pCRSf+U*^cS5^O@P6nak`aWl!)s zfXd>}oXO)UQR+PC!oj(FX=-q+^7o0Qqi|WhX{g(f-*LNul`^!3$C$%Zq1mpdOmJKD z@8H#z-|qfZ2V6~LJyOi}7{mF&b#cG1D{W);{mWsDTb)VE!)1}Q>bLbD@)OS0T0Yo- z52CFX4`z0Z_ElK2i`^8Cr?1C7omk@Kf}y0{Y125B7M4)XW8Ww;=482m`Gs6P(b&QZ zpZxpytNBBoSgPPb1|tBw+Td;XFZzW!0(5nFC_DHV0rTOx``WD{=s7r^{LxK|eH?1l z;f}#qxFo?Ka+t%9>?${qUp{vqf1EsTcl_bo^=x+z*n4nBn_e@e9q|*uu9(Pt%b~BS zjNS4a$JX=~MTXnsDj>VX2y~NUkR{(mDl|(OS+0%lpCRFZWflGYy+E|D^@IF;1?2Hf zAt=pvqOX+|N^RJRg_l&g$v8Uc zMtjQNPhv&SYX5c;|aHo zk*nN7@k%{*&H4JGRDxq=Y%2v5G%quuwz|rPeuT^ft59YF|C%w>Y~Ec$ zOtvK9@f_L50RsF|p^nS&JuG;XI9S{`Q|1QMuv*JaVaJ&mnYcGERY&T?LL{>9kqB@Q zEzgf0S1XRS0)kcAsiRX^9KFMCdjNvXB~&}P&Mo>VCG!W4Ko64T7sfY!c{ZbhfB_?GDW0*~xB)=_I{gXI^F zL?$^6ISu2&b?k9QRv7ga`gZV#X*Nu5wVgN%&WNVCbvzDi60F<=G!xB!b=%HIpz+d7 zs&ClhbXB@pTXNx+)p?w{XMni%3+4hc1TAm~wZ=rQXGx~v<>MJnD}zEi8_Vb#EB{+O zgJzf>&Ku$ZKLQznJ5+C*3E`?7x#SNA)eObi=%#O5r z$q?q|QE!&Ay2#);Uw9U*Cjt_ur z0$L6=9brF9kZpLRdgj!BfGZqQhaz|6*PDUV&nZ;t%d)j79{4Z5R3T#gr1!M2b@Cqr zvWcp;iFyg374B4t2jXBqn2({w(SMb@)wQ_)dJD~l|2?!@-L>P(=oUCSR^IVEoqt7| zg%F$@S3z@9fEd)owq%OgLUi9>YAg|9roF>2$j;ietopLfzYcS~p7?R(#ooM0chlY_ zI}R6DC48*$x+}Oo)Ou}L1^4D!Va4jx*^{Br2=k`&MwkWiFF8+;Xlp7o@i#i%kGja0 z<03}Omr$`bw13CsYX8iNR9#W3Azq}E^JqhF&%T3( zz$P*oJ5Xyqe?MYnX@L;;G->4ohE-2AR1WHmaK_vpjpb!h!o%TJwNa`Yf2&h9T7Nt9 zI09amNuy$;+32x#%ASQVa*!_#XsYIV4^SsFSHog-5Ujuw=h?H?)HVG+UDm|gMen7k zye;VimuRkUn*Fu9Fr5<3>Tp6(o*nKTzP~9E$)1Yvf*Fft=lxWE^g-em_&rQQlud>8 zhGQEAzE=$X8iY&N7$JGCqKp@oB1;vmRlm~@waSZ3`0+WXIVA^% zy_cJj@$Kt~pB|vLsM-`)k2P)LR^?S$$ecQB)i5MosW}pOMqKOffax{&2OA?T1>DZ+YsR9CfQb>gtxyx@9%fy6leo{ z_gI@FlRVw7UaD(ot4>P@9MwNGq2-X{$OU$eo@Ft8Ginbk+@QSHQx3q?SvMu!Fn>1+ zaer|%sVK&VsHYtHD%Ic=$+8w5UBOkNZ@n9-6h?JWALt%I^AW@ALd+ElQH^dP_miz3 zDE+k)!{>B*x<<@xk7ISE6J zPLvuqqbwqEYoC*N&cA6jRlf74pR^l^dUw;0 zsY?J2O|5XFsxDsmhfVXj+7M|sJmcP`KMx2gqPTsz$Av%Zb%$sPxq@l7nE$uETqbqd znHuC#=9^fL+Q_@5Arrkn+}Okreb@dY9EOgbjs%sTqdtC#qN zxXSEV!)Z?OafW^H`hNYVkuFM{x8U+;3{M zNg&J#hDc~>X&Bc=mblAmtFVi8dzdf&EWaGU$2r{L{2dGn{LrUk9Q@hB+g)WDZ_VgR zClq<{oh=9v(AurrYj_${7BIePiA;W;4{Ti^fvrv~BCI%~6`~7cRS@17`|E>b%reM? zQ#n3kIbWN9i%cl8xdY7|GxQ@srWvR2zpB>^K2OYjSozKh*gS7WYoV!3<^Gg-gH%!3 zag|6-fRw`&2ddx-+t@_4qbTfUku_EiYAWm^3HLIWN0K_JfM8f><~CdoO2W$SX&dLa zU-w0U0A`>`tQ@*(O zeIm6C(?2ZC=k-?&*&b497b`S0MSD6lDW zLnmZb<9-P%;t4*e?c>FwY2?&C(ZEr0fS8Jt;zSsC*A;LQZRlM|N3n<1cu5+6utoRc z2HJ?!mV@hC{gb>QS?P1L>GZmbbV3;=Bf~5qRCU~?W#kxR#QLk@aew$puW)&+D>;)-{d9o{o{^s`Fjyas5~@;E z98JqZv6zER>Q@ZV#_Pe%B&!~3a?4*fXSbE*#nHyfsPjKtd2QL8x7YK}{dMVG#Oa?+ z*fj@tRsas;2(+5S76!0C4i_5KW|gVc9%ZL83VmHOMPP-`LI;5J5v8#vJoZEz!C9{x z1l-! zDN|`ePCrsTWc>Oq=pVrS=p@Slcri4e#Ljw+3d_&6XuHVS8+~`ZfZv%_%*;IU6NkF%I0^&{-&s9#Jx~?1 z5c?4Y-aqtJEXg|3L(oGO+yk5Jx8?UOE4Vkd_&b)LVLCNdxW~%qYe_F70l7%^ehMD_I_^)eHd_w!rDF;9dMRlj<3(;ol~G}(Uw5$ z7u5f6PlKWGXFSiRH{qZ6^MMR)gchedc|v>g56Z-y1%PFLJC6Lq7hb?2L?VF9Fey@7 zHQ`Z6=}>^?fRW-mZ8RSYLjix@_>>XLJ^uzYgn8I~n-xkl_FLAM3jnqcMz_so0odRm zWryj6TVg~_ZhM%Uf9{M9j>^T7$^EH_|9p`a>UlNWnQoiE26S|iJ9GN))l?iV{y<13 zOYrdheSf`R1_Dff%XMp+B-isSMU{mO7&_hE`+(siyB=+uQ`lmeq&l>ec!Im%G@1?A zQ;ZgBrmogtOzYVSlKquL+n&9(w@Jb0BT;QLX)e|(>~s>s^BhS?;}GA5=%9ei(AH1{ zHu-h(ot0ylX}cgiDw=|ynjUuAM`LP3_1D3QZz8RloRZvNF26ldi+lt&(HQzTVc&NO zyST>G0{$={L}bF!g)J{1DXOa%P+|4QVuJ#GN`e-HLA9Bpb1H7Tf7ePAfpD!P!%<1A zWb8|4Z&}?wfX*+FEkSH-VEV!io_j$NUdQ_qpd1lSlWzMZZtU41{%dtf8A!9zu(bNm zm>gXc`CHumi3^{=|FgxVV74zy{eHe4%Ibp;tF0>;3Df71HB6cy>=Ym4U|millrUt^ zRz;Cbqc}EK`csHN$_44{ZYYB*RO<1Ufu?c33UWBlrW*;JiQVEzeIS|vkYt#nKdGtZ>F|B-%*t^`~OlUQ{e0L@R zQW(bJJeL@&J-ooRCHcz)qlJ1_>SMrIuGtRa>T=ocEqCPcaH@i((q!|Mi}1r2OdtS) zp1)j0v=mM)KrHicyRer8jZYk!McjS2ER0nf9-+i>-e=ELWwWn6NteFFJSrr?ti?IG zMXiK2Oh7nkkyDqyH^IkjCR^$t94q8PS7>EY$KLnKr^=IVQ#!Myr)>vh9B=={yS^p> zN}C}}gls$5%gB-LB`7A-J7=)KbAwlVMq--y1q5z8Ne)iBoz^Pw_eBc}qZS@P`7EDf zr5%Dtbp?=Y+I<<3!3;gcVA%EdjWvjMP^HH>*v<)Dyl2ek=i{6zgNN_=y`9 zf0@LXm&Hh3cC7WiOP*f{v|?bMKSfLFW^9`JE%W$kifJXo=WO*a-r;NMu@S>ySkKmY zKyY*;|0y`UL{Jq956)Pf*|^2i@2vG6VS@ZTvvPk|_#GHFZ?g%|rTNFIl%==ixi)WO zR=lzOh@3rNY2I12UDR-om8a1Xo|%i@*XC-eRG*1dY?|oV^(MtUjJnww0powYQQ7!% z?)S%^N|C6B;muHmEQ3kzQwJ*oS$7eub3`X5ZrY-rBRU6HTf{{sMRmx!wzb*s6n z=aMBnefr@m%NEyHdT1}Cc(e~$^S5sX3aTqHR?llt{SV@F zkKxJ5nREUHwK~3G-X4eNkFf^1m8$2ycqOf%{6F7vt31Wjo#lGf#QEFVhOEoUbg8+f z(Gs!yH(^~?OgLrA;)gZHqrYA*cm6h0X%n%kuCy$T&9`T^%$tzz0!&Ddm<5Bg8;R7F z812g~s}Kg92mm|fL?wGJEDdbDN~0m0C}Q=&`m#%N+G9fVx9P1}R_z^qbvMOBesQIq z3I+uh)RUCop;H|t_1PbHINm_Iet!l1$bdgARB74uEvkhM&DjdKME-tr_9&B(L#IO#Y)JT+7wT@oCfJ1QJB%sN*Y&5) z&@Z0!Tj?fI!(H?<0>F~wR$F6@bslihg=VJ)j)~c!OU|osMhj}b5eT0h90oEBmxUou zN{A_#lqjccRvSK{9(q|UXgTXe-ddNV%wXu_k5ly|Kmv|mchN~3xb%P?5K?{a)1nLT z?aHU4tW-;F)?F;<_U6o)KYh(F5Odly4ef-$3EHaz-Z2V@EVDoh@?ON^HssSHGfxB^ zDje+qG(Me2dxtq(;Mca1>HEz~_hBEtTU92q6pc;!a7Aw`7dfSGuLlJwC>U)y%2@<1 zu=&2UO4ucxRoIiEAx}sGkc(*?)a*f6 zF8(_a>dRGwWpK4pM3K)4V0sP!%KrQ+U)0mmv;*Nt=;ne%FH(ONig7~iB)@`wOX2E{ z>MUSGkTbxDxgq%^(o}Yl6t`_`1>n`yNWFQ|_PQUFCEy=TK})9ZU}3c~i{%o;-%r54 zvT(mc5YVobIeg)av9f`fmI<|HZM-v-11gA=Cyb z^%cX0s~tR-i*)h&JDh^pByAHrIB&s}AizGxVMebYo%5u(9u>t2z%9gblD5QFmueRg zj|G%`L~9OI#c27g6qGAK06SgQJqkwCO!GlTpFW40Q83tb|1B(Dg;0cyGf~yMD|ooD z%Dq}@O0ec^C5(}8wGF0ZBAfDD)|Yw@;1rjt8vlHRmKYZxbM4YScyDgCU%Vc~J| z096mdltTswu7WQQ&Q?BYqcw;8olAD9e+}*hn``GtyVlWiQV6v5nOudSD-E5Vi~XrY zGJYh4Ufq$Proff{+^yR1`A(X2KVolJU73o^lR3R5Lx-vMFq)S-HxWBXo82}vx{(A6 zghJUCUdt;%pFZW!vXRFM4T+ig8o5j5aL3 ze`x^195YwLRM}uB?r$iaoyCqdA+O8lYlO@ZZE$WJ#L6)_$a{ZWpE>Ougx{UmM4jKs zOEFI3bwV@4hJz0PJcARB(Rb+0WO+mG6_X2r--t^J?BUt8Lw|awim;oPVlF^dD4amj z+TD6xOn-qM=g*FkyUKeL@mVGMbvZC;Q01{gN{AYd%-woZwXo1-43HBTBi9e`#NxDVDR*0!CKtv#VwSJ~qrT zT}Hr#C}cEYv5b5Ej?vnw*memp)Z?q?;vL5RLYYF*-YLJMht1_(EIFXc%V+~qs$u7s z7P=T6V$(^Y$p+SxhRAbgwzlg8%fXg(n^+r}CIPXB@lE^J{E?1+`11*&3pXSyp4O=7 z1uqomwOL-{orYJdsV*rl`yK9d*!9aUtJO#S;r!Cw%imjn>cbhuPJ{LnfH!{x>*$(* zxUx5%hw+N>pi@=I!C|!3!~C_G5dOhqjGx>wHE|UI64oRK`=d1uQxlRcytg1(BM`wB zf!sLbvTfiN;`#OmIHe6|!(i;0uCJiwGoA4bCor&ecqBZT7%#-RNZ*;=Lq8gKc&zvb z-b^nfeFY>XWg$lnW}9cd z=u%`}t7zro0xk-)rz?<(^C&BDg%h}H506s~8fSD+RLf4Y7@o6i3?`Bz( zq?cAnmWYaTVYDL=Yo!aOZv1Q!YiRW4OpLG(yu@_EckriLMd0Ks-L~)9Tv<4_$1s6+ z>&q#xS$c$f`;`l;+d}ne;2o$933(qaF$13u+i+{QJ$0Mhr=z0&i}Yha$5uh*F0a>; zPOd{H@^ErD9jDt@83`8`cci_}M!?45#{!rEv$!a!wVR))QZQ0f-M111tov+3-GWab zieWYX*mkb8%FJ|1WB9?v+Ojy6H>{u&e92R8Gg&;vCqP1($j?G9puU$M@>0r6c-4blRr8@o|SVAZdwWrl;qPCoNoVfaUr6 zXKnFuU3r;xqW!_t%Ylz(f)Bjm%On#4UEI6CbKK)7^S=^z_T>QJeg<#zqrZ7-R16$+r= z88=cEwoTy==OsFBBegV;+j4WXR zt`?yblE0tO>I$#_cG7LHJJqlLs~h`!K=y6z4{|r zfdKAq#r{7)RH6N*7~VdV{millub`%6Knaj0N17^G=V8Q%dP~;QWAX9oFj*U<9}Bxm zt|%%->Ir;$HHj~W=Pp+rJcB1)1MLo*4pzgls`zHbNk(&L3*;}5B>y{rnkHs8QKQpW zR>U9X3dQ6b8|6`m;=A8{g2bOZsPhH#_?&1*Yl9G<+8+AhGXABe$D1LYLz=8&0ChP~(X;T>cU>GDlyF+0_8q(_*zp$7I#g@9kPw z$B`EG1;WWV?OK+s>XGlD|E(o^PqRAGW(z9)HDE<~7}D+*%eA-U3)&NGbs>*q&rOgg z#{d^z_{&c$MTTk;$-A|$!0W%dMJCt?Y&g&rV5{J?O^(p_-!#%Hz3ku;plj(S$e^`k z9Kqhm0~R-U9NF9es;Ei^N`_6yDgH%75JtGX3nw1}xWDF{H>Q|VW!PMSwTAkCHjPt? z`AP9#@&u%0@L6NqI8cGULO;P9Di}kRH~pWMB_`wO?B{CowuU_NjDufy$a#J*_1}Ty z_oA_$$ai;r&a4{0OyVaqa7*T`8Ore+&^F(bte(WVpW;*799GN6D^;>>6SU{EIm7I% zwWh|v$#Cwkog+m~|KkABMx85K`j@~vO(=-lk~oz=o5~dA{H&9oL*kX=lFClm z=U6DaipHlo;bYQ5GIjq1`1~J$QVj_B8gpuqtX&;adyrFZ9dGa}P;C}jR|;VJ%@MrK zYkC;%IfHP{b{MW5y*4jft6$F!_&nZTU`5Q=ETcr6iX%N+>sqnZaffu*k1DDh17x(# zN=~W{6Q}q>Cc?}6IlrRZ`xt2sM`WnTrlWs~<#GB^P-pd*;{b%F&CA9zm2!Jv^w}j~9qx6bsGoRx zxWv%{ovTvcaH7)^@n}V$_gC<)0?WV#;O zm+<&v#W|^&R--^9pmeJ6%Sh*BdEF^$>8L=1DV`v?O2qi-{hQHj**hoF7tG4Ej8xdt zsH>|6GZ2hs%EI9M5@ZTZK2d2c^we#^^ZNpVVr&i)w*l(}dT4;jtI@C{=LmvyGJ)Tw zYIKf)%_4bZw|CDdV4wz9GRZ&%_E@q)6lAr$a(4qgzSgGxh+$2GphT)48lW2E_GE9E z)!lTN$T%A1U@JIvdzcy7V;;8yp9uGkD2%k`!7*LsN>2u$-EZb-;okHdm7*9dVL+82 zyd>J#KNiobfO>|5MCM8GusI6i=wt6EV)>PdM7TJ($}WBM)_z2G;nxj(U3k9@2ziD# z;_*SWW$7ues5`unX24pL(&@MT*SwiumQkFS1=+A%tLLh#usRdl{l+JO^1h!B>w>}M ztjNo6e&&e8Kbc(z^w*b3j25sz)!69Bu^PF1)4uEi%zhCXQ=oL>!saslUUhPYDMpi@ zu^g`=gfeCP4?`$;J_O$JS;;5K3Bib~hD?*THeNe)7O%xNS%t$UV+0pDR}XmR7~09CL99&1tUT9BBSy6<_}kfab>+ zSWic&tHgN&so&ew6}B;$xHBG=Ojv~Gk?1ck-Gr+DjKP{v1aW`WCfzx@L z+vU^HSn14|pcg-ExgEk{D~1nu3X@$K{f_gKw_(_(RINozYno1j1@$ihH-N#!)%n|A zFs2?|Yj^kMycT?X3n~`+ZQ5ZmCF{3VUSM$HIZutUF*dVcBjPd!+phPEIVQ&%bEfLN zyFI(Q0DjntaKPDJ?+%*(N%ftEf)btOz)KU>FSv4x!{XOov5En(C^`q>UJU|CNl-na zIYKyRiqY+)M^fgE6a!|}H;1$w)U-)9kDLFk*`}oXyLQSN5AU$A?-$D-);zf?v|YH> zSb(Atfn#^FZXQc&(ZgWmW-l)QCM$Firh;*@v>Y%_)sCM#eCvTj`~-o;6$XL4bxFS_ zM2~#j&>bxZlg6dFt&odKF3M8KdyZdjI*a$?#qbQyGdp%bi$Eo!DqV|ZB3aXB+3G6Gai zNmVv`6wHf}M8X*qI0nJUle<8mhbxo9z+M?DoAJ16D(TM}p-&Z_F7@q5lqCTu=c5$E z3cQn}Do)eH2`%DP8Lnl{uJS8<{rpaGiGTa%s6L2#@_kL1o_jEfFYx$`#dLF+(UqO& z%^3B!q|L37IGg{_$q!`5cgl@0I{MjRRPLi^>QqFJoz^)aY(BL%lH7~IIX$?-EI8Xs zmKXc=2)oS%XO6RWTh(0Y2jw4A?5jehonpHRg-RvcTqU-ijvzv>iN|~3SNrEA%-Y?I z#-P&ZVI@{bhR-1ZLP&2GEq7v3qcFC|ZWq3u>AP;F`#0U9A}M+)NrjXwp;r6vBUUc5 zHyx zH`d=V!{zy&HMSG|y&?Is%k38Nm{?n90t*#&OCvzKlCgCMVC!r9j~R-OA*;QO3&i|y zd0$*N4>J#^(o`U}X@e@PHZF;aYmp<2wiiJ_Aa{MWUx`1ua$dQ(E+2iCYeYJsMG}L- z`lsMwkBB|qMKfD{`{I-WvOhgiwm3XYP6qvm%gmpu&VEpgSCk}_fZQ#zJJ_r>t&nS- z%QmsFHeE*tw`9eB7@a$B{E^D?0G;BT(hg&%7<~voL zb>MVI+8xqH%`zzooevoVSZ%FZ3x-GY*5j5yMbk9*sytb)M9!wX^7$RH$H1huCwo7L z^P2=7P?kwA0ys?~ZyQ6ND;REV&gweUV-XPl2%v{7vp|bWl$E{NuyKMM$=a~-(~Hch@i+re*1 zji=zCpT11b{{b8XZEzPqbvjTmmSLj8w_Qb}%!syJ7>q=)(Dlg4JWl?4RJmL3~) z`nX&~H2w&%IChKjsPYI(uX_iTS*N=+k!P;Cbo{!Q7=@O?BHM+;+_>VajAVi`bj$g2 z8K*6X(+^p;kzL#sdJINsPG|t0nCz|PIm%DVP=1Z ztn^5%bP$zsbq*Fzwd9e}`t$k+<+B}bc>DJ6=R$(rZmp=X2R!ZMPcuYRvGRKc+T(aD z-7ke)pGU`NuCP4Ie~hj_kQiEXFsl`maz!68UA4H-w?knfzHO}EPG_$PioAP&?7Q1o z73yoSReIJ{9*?^}pw*zTOmg;(8P=nmo~zw+tCdrd5 z+<~o;M!L*``bL0U6jBDncKr4dcYVw|^?=3mLkQvp{x~t%(tGD${g2~b9;%9X#p+|e zF##l{XEG;WXSjg?3D-P6m+a7r^^;#_#uls*i}9UE!TZY#So04R2X8bzl%^H?A6&fa zBr?%V2_?Fk-U1z=uebgC6^1`)LGuVFo@~1u5zECBCTU96blym~(qsO!8N;Eda&G-Y z>QLTfjrT^zjFSH2frA-DxPGkh`~KOYX-6)GIh-mT7)}N6T>iK|1pQ};VH^CEW!~Gn zwyqkZ;A~gL#njeXy-zcQq89-UUf2C3d_YkZqXnpGz+nQeGm3fqJBvNE{-zq)jzg|D zMqF#t>W_OmOmxmV8A(mg6P|?{_;I?>7)#_uO1OjTyAj<^2FVuU%c^qIYgUx@e{2@e ztBwYoYM<2Df|Bgk+m__9P|skDhkC%uajecw0gHkP<~rja{XO@TWO;y-wXjp?!>Bb6 z^8U0MH70wwk;ylwu*Dl;e+Ol)3Fl1g@cQ;9{(i#BeVM{52B!?ROidI(eUUf6!2Cb1 zC@l%~X=FpqDyFeS{E7ijY=^LojHVB}AY@8J*b(9`^T+iS4bUWl$@Qr8ma%iX?*SPV z{lSJV@xqrhbO{$MIFlBKm4XJRBLb_~gnlY%ztS zIk>zsgg&ROtF_2_XO~L*%ZraOFmdAp{H-80Gez={TfR$R4|*+5R16ZXF3{tnMd~%V zhh5)AEDCk}yTV$`q*;mn0#o$S|SL?{F3GDLC z(G>J=8N7e|2T<+hAHu!9)F-PtcpG~MV&3+DDxFi#!J+sQf&%*=Af9~WLyX8Zks16) zyz$uWt%bT#I3KYu6EF0H!rH!SoyLu{C`~EpxUBJ|DooM-kJGTeb#k&>kk4&97%+Fe zK9T)>Z66;di6b|PlkOA-&ZWCua3(EkX>Od2R;!h3eCN|6bsd zlOdN3erm|HhZCs~NYO~?Zc7>(?MkT4H|`>Q@^TPVM(mP@?mNso_!6^WWCVaF0a@#u zT)LGd+#p3RhdRfK-}N+s7ho()uj>AB$z@l+ZQLU%nWJe9IsOO0fmy)*m#^z=$l>qN zY-@?a3i_wYIFA)jCUJQZ2$L#kqtSSngqju|OsQQVzxZNAGPM?MxhC59qcur|Rf9cJs4 zBkpjLdIGzDetiQUnXf0>4GqQ{oh3qD{~66uT@ip{^`wfoU*OhV*Z&X|F5w6x*zV);&gxjF>+OCaitwkQ;j6%Q_j&B~!omZr$uIaUHP3O-4F{|L3=e#hu*#>ZLr$vjODx#~ zCzph*9A;^tVwPZRtrzRCtzK`@E9E30z?D*n%r!R0AX`*gwX~8DR-);wVs;xc6JRC< zV|*)@?!_aU5Rk0!JMfEhfa&an8t(3$$eOABZ=SCWc8vhtPJ84N?+vlqBf>Zs-my~tPT03>8ulF=nnkP zs6y>yk+BkY3=l*sDPS#e{Kj<7LpT_HKv2dGt09XsEH@Rgz<|5Tb}S`KB}0Lq1fZrI zn4YecUVtN3x-kd@dM{{l;%iew-9=^+2T6Z9nkaYs8UqPi{^ed|IetF4_06cN5=>84 zo>Xn+0}kggu&_J-#gkrJ4c|7eT=NX*+k1LedHjmhQh+(qQ}KwU^H^Rl98+!CkaMBj zOdzfCsZqy*Shov~Mg-S~xj1-W*pJN&W_NN(1a1HvFfvr-Z~_jZcf-moV6F7TcYxPy zt{%m!nm9}tn7p#eJvDZbJw4LZW!G?b5ptQhzG<1<=yK-kleT8(HHvaAzh3MTabI~$ zJN#F2UPxG7RlS#9X!VAZYMFm(je{d(AT>(cmT&|Q5{3{1j)8qqZ;}@Ri08GNX3ZH( z_g5n)CrgI;+R8OEm&1k!?3%YM`GeBd>l+FWg}Sh3n(ZDh4=YqeLUwr$(SYOQ99t!?VAZvQ9e@|^qJcCn|+}z`KKUfN>fXH|J zPtb^T40Bj- z^5S>*B$v2ftFvc`=mnSr6j|#YQ^xi7I?zU1myDLxuT@xl0?cPH{9Wi|-ha>1V);o1 z8{~PN)?a$1JVjruS$7uT?7O-c;j7c*OOD}xYx?CStvSP1y>G?xebf?m)RK?jm6GxU z{}pDuFbNFyH_Ps<4Y3nw>Q2@|z{~N81H%#@q0IOtB8-nsq^ALfSY=WvE-rF%IUKz{bM@Gym!;>)#a~P z$WliK#X^tK8#8M6=IOcbzh!RM(J4}?ygLANyc8q-0j^hkV#@@azuXtVVdz?899tXpQmEl$BcUqUSV#i9fUV z-7aWays_M#!z`V(Y=*TgjWq8&pkL)jW}cr$*N-|@{LQMZ=_E1fdq>N$^5o-pzPX8> z4~o8;<%h>uuc!#E8xn@PqR8}bpltCbMe=Q-@yJWJCbf&aj6Nb5<>`R1crVRpubzls zQo}v!`h(Wds}evteU5EBliJcM_&g#zyz{0HGiA-eSE!JVqqbJJI9Y%y)8Z1&>}(aV zJF4?zkJ9UdWFW?9){nF50-n6S@&XC3?C@>^h$2p2RE6-Iz`!dld5b8ygt(SkxShim zHY(_Nbg1$0>rQJ64dsz-Cx0iNbTzGXW7A-WMJ9P}+WEKW`~2>EXxM)M&C1UQBE=WR zTYnnBZqv#@eFippr8=Me76Hk9G#K3CL^fK);{mq+0AxF*5bM_Eq1R8tbQNHHG9pz? zR)5_r>SUc|nr{cqyum`b@%B{cSR5``&SIm6SoZwN?0|;=O^wzdo1}9)`d}E1Xj4LV z%`A+K+9{HA%5I5?ktYpTCq4t)qR<~38xp)4Cu;!Q9mF-d)5EyEMeT(#wAjg;AnX-a zW%+U9LSN%Sc*%hk^5IoZw+v4 zqI`)lM9JcBzU6)WAK(ECvUz1`m;%QClCEwjEq4p#Z_-F+#+`G{3d43_$%ggYK{GtC zHa~WPoRmq3bWD%fdYfKt<>zFpWQ{*$@Fgv?eTp|)T~eP??%%G<@HR{co)?ruVGR(a z$dp0(&NNo9_IJ|>lkZi39klU4(YW}~(_$bSK6$H%rwmk@N)fg|;*h_bDn9HDkNwKm zM0DpAc>Cbaly{dj$Fj}Q9Fkkt6?yt|DrTGuDbRR^## zBr$4bR{1v$u*g!$OVa>N2kM^!2KJch#aK*b_TwD`hOYl_c}iu#n{_-pVUI9Qj9F63 z4$#;8AD|q5wG*qwlZdJi1AdoQa1GCs6&CZuaZn{-3qsL~cr7fv^d^`##fy*$y8E*+x+%I# z!K*(%I|*LlHW}@&R@DG^w~VKejRJ z4zAlid^|RZni;3+c_lqNK#ac?Hh^san+iNk6haXAqZ4-Q*A@9k+nC1p`1<6>l%ku_ zXuM<|+8J=&S6)GY7y*D;F>M;h^ULww%Qe57;D3N##-&OoQ*1$Rv4R2vtJWC8ZG>Kh za|azQY7Cq>P6!MsW}yj&UI@BdKovs*SEBNRX%M?|#~wyoGE$Y-<;0^E54p36m=crh z=)tRytUh`qBY2uZk$_E%=#FrBr7eYqk4-vVo`&tzjeGli<=qD=8T^uVwo4Emk?82K zN|G$9-C=<0M6dd7oL>zEUVb`JCy~uV1hb%U_>S;Z*j=OExd)fS+)LfhzxVBHYKrif zRNW-81ne3SOhKz==B}0*MlD`a-{bv9CVaB>cCzznaB!Eb*Wr0Rts?r&uJ-jH?z=u~ z_cpkr=`tmjx9?zUVt(INBs%@(O(QQ3$L-|oK7r>kN?+WeAd(^Of zN_^nQj*I{V z9Z!8K06Yz4x-Q=#J!LkP0zK1yYBh@xrnJ$dqQ2PyqrL#we6uOHKQqw&M6Y*5h$4SK z6itF#a+z`}@t{{c43>SByM z)Z>aTOrQJ_0b|LSnv>z7*heh9FUy|*iS0U`om6`j1*@GsxQEFlw3b~T4 zU{dmaF?5)}od$DvrR8#a?5>>c=`Zafx;UA|$=qVTc^iPXUYX7tGG?BvUBs+AhJ*I( zcy7}BiS`3C3VpYWsVRflWK604J{P2d!&a6NDvt!}9)#=jATp%z6-h=^DYeTal}B_| z6Yzcl>*M$%>0~3T(w1X_WsOBF++BQyCfqqn~d;4H;%JYCjJ;q zj$wSWNm)+7rsT_OKSxT(sB`zS^p|2N?KDkDBNBWjtb$jXO0SE=S%L;PjVVrforiow zE+c}4Sz3IU##UbR_beaG?ep4<%?;Dh1t(E;$Di5AG2~+>OLU?mru^&a$#de$cz=0= z(nbhI;2~j?zT#DkVt<+(-UVo+bC9;Av?Mz{<^B_FNuys$$C>O`klB~uM_GMw|N92$ zD);0sEXr?~IWp(PX@yFt#%i;9@&XOgmLK;ldt{a6hnPC;7Az8?mdBJ)e#-oO#>R5R z2OgDR9Vn`y*NFZO~drPTNVWPqu3=rYg*Mvvbjb$BkH$>1?F3CFzH;w zg_tiOISXkM z!k?5T52s;IUU8Nt(Ymejm>m%xUe;}HWN!d7gZP7f&fAeOY|giTEO?DHgBk{pu$LAU zR%})2PSF)2wHwi&a|OPq3`{XrFsjAQ+#7vL8#`9?t9B2kEl?zyo*JH37HM7;4v2gu zcOvxCx~!bzJW(t^-@4F;e89XKUx@wms@YVv?U6-bR+bunickikDk_o-->+Oy+bm!D zl{FvFVY)35_>aXly-U5Zuhu!YOhqN1Yi-NuewJc1t)}`8Y-BEFZDVz-m1sg800|K) zD~n+>jhT zu05ytM+EP1 zrM$4K^g2P$Dhq>moL3`*Q+L|WnzcX6=PZEmr(b}AC{!?J-(fYQ0ZqX?f8%tI#Ho*fd~_<@7-=74m_e}J~#`J26Vt>9sw zs=PB=Ip1dS$>43WjTU@VBHWqslMVgZ*_Gaunl+SGY15MRzgpD_rq4jJol(%h*I(iBp=)T++XMQm%l#N9OmBcOvKS`5e<$} z`C{0w!VfK*Dl-q4VQs>G(Kkl)3Jp{>WqQR$$E~vJ+1@bh9;cr z+&9vQo9J_fpGWdEAZoabFV^&Ph3JQjChh<^zUh=z{(QmFKa3s70jn>EZHHSM5CPG$ zA-%jdJ1s^{O$|jZd57Cqqf>eice@8Q7ygFlV4QiNeMxD)Fq{7fI8&rz zNk5h3B7J9{@g)%Bq3(_QRs8`tVY%Nc6%P6utML02I1H?auYyXOk}?=QwRDbHdj5A83W2X#j-qxH~fKL$;@f=_JLQ+%|NelZ~-K+Er-O z2$yhX-46QY<=iT|()ow73Zji%S<73LT-WI-G|!3bt3n}-kc&T)2bNQ$)j=;g#LpiU zNd;=Jt`3;lm?yW3eyc%AX8bKw()n}9jDZm$k%`Yf{|;`wm5roljeCmlI@O#xF>c#! zkDXg9`9#sHC93&>MEP+^BqFlJXxyXxw}681G->BF#z9*|`}gN7xi|BnXeYEySS{FI zW}Z$BvB$H3n^^3YbSmGlriu0UqSvpr5No`Ppa&$xs+^WKJQN=EScXm5%KWrr8D>RB z4Z8LK1^P<|eM*W@4We*VHs{nlq!)7(cT?921Fa%=LV~i*?73;r{Hg~om!*ckc3Nfb zPItuI?=>8td2*7!Yvxn1lO1X>H+28_nZ#G)ff+mz^JR^{DG|ZjGC(r1aP5_%OB{5T z9sfwa)Avk2$vVCYh%vzw#tIQ~$0+y{qi?EGMQ<=p(`iiek)`%=uso7toS(;o&;`9# zmLG>Yz!ZAg@OVZ8Yp?&YgF(|~(fUv#O1gss_%I2@X~CtEvo_t2+M8&t=Yd3PWFba$ z60AW)DU6SjIzJAL3O1t5{x0)~+h)lgrsnRU!`z6Y>DEGLiADQFeVmxtpyGBZd37;X z09otjk&(xVy{ac!lPAtw@Ik7<)*r9q*m&MG40byo5pMr#1v@Y2Av5QkWq`kG6qHFT zz>!I&K+vdq(i6w~6eJeCHbPMo`bu??_HEdReuOF4E$>Lz3XUNRON?E~4@7-Itwv;P z8Onc1iu~{H<#ybEryrbdd*(Azt5nVz)~)&`1@PT;HVG&T<)SzAgm(y7%=@W%=&$E& z?r%mz7sxj*ay=9&iQg zJ>Lx-Ifr}M{bPe82P=ga3j$~#!v(%hPR~f6IZ`T@loUMU9H`&O>bSY-L1{#UJJI(O zrq}Z{7S&9RtTd!ZaNP!Z5Q&vPn!#h5bQ_T*%>L8qGR%A>J&wDba{Jv_v6BYpIE0Nw zW@LUE8(;`WaK|tL5cf;Km5wGcLrb@6gs*#RqKBc3+_DLBklf}x63aLaMzl2PlTD}E z+{`fvA;KL>q};>f;Oo}@QxAybCEp4T*l{&@L}>lIb~83rDSGi@=01KtG9>xJ2bIY`c-;%Ii-FB{3&JUc41GTrfg+uGf#z4~i7 z$$PO%FmBOi8*PM3>lA7ggU@Eh5?Nfl8G=UZRlK4E%K(>(>SONPE}*7+j)pYErR^vc zm*6s6fXI#cv@Dw1zq&HQ9phmchYX=b8jt3eYc5(2x$Fhhbfb|plsj5GQeyA-@kHnQ ziPQW0ACZu~gI5#1SQf|CQ*(FMwP~^%CRBaNLe6Bt=YNAF!pzmS7FxdGitlvpye&RK z20zYlCIXk?@!Z6bHJ-Q0Ku!`fFnw>`rwQ1pnzY1esRZ!&(1*B^AF3!NxiVhsyzosc zPVh}>aL)5n?EN4`b~VaoR#8>OiA{K#W?Di-^w!-(?tyPcD}F3hH$qq6?AY4#p+aof z?2tVLp{odYtdmK1;vhs8!Z&DpdXxZ~a!@4k^G;DbX$P!9{F!}rc%{+5+ZeiIXkY0o z2OTgt!z74pic%_>RoD2G;x5T0PE7MPb%6OcC-znIMJO$gY2!6e)PMK?R>^3qh2x2Q zH5Fx-#_Q$OLzYvq91lhlef-zgPwt8y*lmRNho(1hksbN%$p40EVXowJa3wl7&C|U< zYzznGV@0PsEJ7?POvN-&uV`q;Pgl84Ys~V>nyTJ}>6p(cf|V?*jWgmf(c}OI%HsE! zS6Z<8Lqg<3L3=MV|C2LDRZQigDv@=~aqju2q6J!Pm{~tK|2lipRL7bf#a~-PbNUBt z+Z61cXoi9*4c_XKXvEH2%bR}AX44a|^cwA?Y3!KRvs_Ppi+@yTiOVFJwBXT&;eogAigvm=+#pC%s~dq7Ff5v!F9BHHmNU9 zEsoh#Go68rZuxmT;j1*Q2nM8F+x`Qj8LbC2sJ(tW_z$q2OBgv#h*nW=yHxD{xDp^K&}d0 zsj?>t0BS@8>8TGNt1GbA_;1+W>+=lwUJS$**!Lwr6bkVSzoHnw7pab97Z=wTb^69d z`Nt&HOcnzhwOIhj&cp&SaRUXFMd@~tinat5yt_IQC;8kQbC(aq#a4WmP-TUI5Q4PC z&k42qp*AGmE2`|E2);lXUEh>s*(kwjy+^#KMCdG3RF$jFX$JG2QuqfP!W#Xzr zsCeR1JXW-+35$QzYt6dxjrID+v=?po16F&gu=_2_0&Mj)5lNp`zh zr*bO1x$f4K=Pcv?B4v5`WbR46*YrWmNu*pQCM=`e-*|Jt>`W|bZqt-W2F@%xzkm?ye^^|K92ae zTc^GuUPNQx<>V3N@IdZ=fEaeWmB2MW(-HnCl#2R_gIu=Mn8~-X!UHsM@OO zB_n~ND^33aib<~H?uhylmW(HMEaA5aoBherHC(aUl|_y02tj0p#`G=TnL7Mb01(So z!{NGRLt#3^C-Js~3FirK);)0a@S*Wa9c6 z(h}pv97(bGpvOk7K5Aq3qvOX~O8>E?a7XlQT_yq+aSvNWCf6scaMRK)nYf&EuDf~P zgr2^#a?OO)>3Q$}0IE}c#q|dR)*s`p*mXrZr!>*k7P#XPakY2^olR`-fj7~QnQ=aB z^O`(-cy-layekCp6M>M|SyWs^7YAK1y_v( z8N(6{o~rRuL7sx>yWPcUR%86^ViU0Zk*}QNa7xj$Ip;{x^Wm1CVYNd3@3CYp|6I{D z`S>sZr?Dg-K@){61zmH(lWM&PE~dH_U1EN=`LddPA`|2HE#t?Dw8%QJfOua=i)khQ z%tEDFVHor$9?SmW=l9tucpNuJfB7m@EI+n)HrJyl8#>zU({_f}AF@|}1@(CW{~UEm zKamPPt;dEXcth^CCR)X;l4S|4dQ8V{`AyyK79{kh(L+{fkG0V&kHukzcjPp9)Dz-5 z2)y7=+GZR4b#6GCl0JxH+EBH>-g*|7QJkb0BL>8kk%Kp{*FW-?e`|i%n9!G1$Gg8P zOlLB4`9<@!)6%pE3J{YXk_-ed0j1QleQ{1sa9>CX22=`-*XAkbKIXh8q944Y7hP3I zb*Z`WTi;|;sGYtw56dh4MV@t-6$(dN!P1}tXw%y@BVrf z+c@j@CJhUZfuv!ko@$*baYSB5HUir9$yO=CD60aBUu7W(3?}@?N_~a@}uz%d}+AM_~v*wFo!-f*}$Vjd1g zWLDoE8H3@tpQ4}z*nl7@Eg~jaO}XDF)^jtt!!I$hRx!-?Md+bkCkc9n{Q+sE|JekX1^7A|4tc=!g6mqI3 zKk;dwhiKl+eXXL`2PP6;9#SL3=6y&Rr*opro0zwt!8+Cxl~??!@I)iseO=cFKr@L^ zI&#ug+|P1jk>Md-cl`)N-%|etUas zg8$3Ad~h>}$jS4*g?YPfhO+V~&x_>lfUY3xEx^1-=FQ{mGD=?0{niM>#TjN&(=bog zhWqV#u~uRkUpMtpw@KpjyUgbOAchpHP&7#u98WZ0$Mg0>2jJC-DE7s7-#7imXtu9O zAT#hY%||AnwC=T9`t4>Wqx+JNWdBQUho=PL*buPu*q;>&elfFl37xDth|lzJk$PN@ zh0*f)`d=M}($DYfZxs;%h_@dpj-2ZS@@`-2>UUfm&f6uNm2WIj54osjeuM=R&J23D z-t=d-L9o1ccPIXYaRAVQk#A4^F=B8WN{t%~ItIx`zRrjm5!J2dz&TULZioYE9v_3! z`Kyx&l|2D_0JXBKEGJdzjQ5Y2Jk#4c}jVZ;AX;D`Q33DAm0}y z?$0}6H2PK#1@yv_ z5AUkr8wbKK)1?VXSGTo=*jUdo2f!l_;;RX2_*VC^{7S)~bH!RdX-G52jB{d|Lwpr! z$Z4=VkMu~RK8zXc+r(9vc+$$`q&3b|&7))09IFfa@t4e~P~u=fmM3 zjy7Y@+(x6wlj#(;IqOpO{<3}g?@x4<%fjrV>z6BildX?}kio;+Y z?r8+A6Xh0%dXii~vk6st zGuaUWg=qI%Fg$at6EVWpZ_Sdvnp6P%w+ChYo~&!Q57lm{FP-Bv1Dx`8uao zcW5D{SO@0Maq!aeh!kwym(r%&NqQn|jDKBcPSB0mHw5+x8%3Tw;5VQLf;g?}X?)<+ z14G=yZFNhOOGPP2fyg}B*Uo4vmX<6`nnlrV`-Q2?5at);x01%q51c?#yUh6?4~7!( zpE{HgMKd7Q?f!gWz#A3P(NK-Wxu#$8G)2C|%U?4x#b=*Nz7(nNuIK-I$6Shks3HCF ze4MQ4^dCSGZFkl_nw{N&rDpr|=A?#b1daGVz~}twdIIBfi0A}lq?wGXF(4zVg-oO* z`J1mDGlQOG_=>`%ycHlP$y41;1qe52x=XK@p!->FaMF#Vzg8w=9Nw_~;P(=XFiltZ zi~+vQRFj&cYXCBBpX0?rxkN0p7(xCMP76q4xFiR5M*KjT+$RHpjOA4EdgYXfU zpyYM#_!*RiVxWpsfqan*$uM~B)fW@-P5bw6rNSkr4%l03+P>$tW31e??ceZ}{LnXU zW9c;bpxD}L95qg3x`}LSr1DlW?Mr5G^gi? zba;s}c~a&MZP!KdG)X#gjNX0m!CIqe09|A909hJ_eH;ye^mvl>?OGM~R#sXkUDpV| z@$%Rnwht#?g>+LQqLY$PYB~P+Z9c;^0rn0P&b^z7__ZjedMJ9a^oaTC(H90b(%~4B z@!e@~B%clCOX2bw)G626;wfb&CLAb9C(w}X|6pLK*f(155%=+=tAV5EZsUz%OtSV-&t=X0Geaam zFTu%W*#nOyrPfLF+6V^exM@M&L4(>EH7zJJ&&+A|=-fyAvQRMI`jf(wwN) z`&;hB$|PO!nPg*4Gl!u80?`P_+Ed*_$Ve4`kcAx-SEvomhc7M99idS~tf_Hf)u{R)B_VW_!aP2u!>>O&EGdBiQd>o1YZ%+KN~qA^ zJ^M8=Rl1@vtz5e2H7)ek(xjlr@Rp{#ATKZR&o8J;4Cmo?ApXW^>E?3Bb?VZZq^6vw zYC4uhooI{2rV=BD#V!U7SEbI)Z(`~?a3{NJHNPWLX<4Nhd9|RId=kwhYW&ar#EMw7 zvDG}JV$;(ZdX}diJwEuYzL{U%%|Ny(|6M>R&cltf)iQl3&WWY}?=#H4$9wu=E_<{q zYR1Pee2%QlGqWZ-a>Po<>t}PDrkPtm-cYSfQR%;sWFRmZy43EQ8^;q5n z^K|%A;(W`l2cYI;EU9He>K1Q_J#zb4tG-&+V1e(Wj;1SMNjR@gc>AB9U)u-Dh5DYn zw*ZRO2_7$pAoHh8A24i_Xn@D48)Rqqy6xQszDIDUqmjzs-S;3w&BCDAOed~g(3G zSsjg6mdhOa#?u56)-$x7=1qL((Xj}uyUVg z4E5d}0~!&`uZ+3Kxp_Z)O5nV_7M?(voMSG&c2j`9`BP9bn)j`N6VJdr0Fl>i2s5@} zx}GBDlo%*Jnz+W`Ova99N9J8$nzOt0@x%hc6QsIEd^rzhC6c-%pqKvDVN6D>kSuzE z2DkETLQN?vcbl@ia1koD`^9jQp@<|GhUJEfXt8h0V^lc(x5rAXKEUlc8mm7%-c^$> z-v25S)NKqPwlwJ`M|=YRVfR^&fBOp0ORaiZ&Qd!iT^H~X383<%-DNP!%vmSRDNx*( zs0)l5e>JI7Tl$rgUdhR*7P^IIymIOPiysz6F>)ZEzfp1TIN(NnRd9FE4rZ)*tEcmZ zf*-D@4v_IM4?V9)>qlMcf%ee}r$4PImCxT7MHp8!cl^E_tj#^CAGGMS=I z?(hZFd(*^6imj4Orr1KC)kX~JI1d5+T`_tBiwo!&yzIjgc8S2;5cLNjxsCJ{okt9{ z)v`Ls&2UdWZmt8~9*_c0J0{3l3^2PM2e~*%3b>r2$g^LuJi_YNVIrytKX2;0f?mZ< zGx~x?xlG_p99GZG+l8d;SucVyf5W~&uPdLfCrCmrtAmBd2_MJRGx^?R9-!koemke}Gt6)Q%pH#ik9TSJHj7&c>xp8}^uZ#K8a`a71Pv z#Ib7&r>nVbBEbO0<+x$34#bZQ9ais8=O@yp1B~xDE|Y16Wb_3ib46hI!TtHkz_Ad! zdE{ea&fmjmjPAL+(KC1vO!DJrOLHw96@ecqx!^}Xkq9>yu}|^jTREO}5ZBLbY4e%Aar7=58JIhncOd7QBv-uPil|$^% z1hUAk>uKM9^W?7q(pJtzXzT~S?Cy420ym9YYNv>jVE|~S<(+_}#aVQ>rI-|zQ_{%G z9UV`&eXp@XkPekad$=_!X2Qamm@oqaUOusfS3JGB#muyuaa}{{hHOq*XXX}(ddJMf5wuASBc9^MlhLi;PC zy36vJO5(8eEILb973FpuJ>@Ab z$36jKlO=kgU1i}AkKRI%tLxg^bH%nrm8X!$im$q3#6%*IVkqU^59O$QXY{BHW2XmldEhx zE|}jW35t-K+M*|9m%$X?UYh61p1S{GKIXhERH3L^;V57*{b`T1_YkdI@G?*e+Lge1 z7`w2`R@fbTfNS*+%&I!p`=DA>6t!zr@t;}VoTrxZL?4&+1`TCa@?KyBkv|xZ_5YgS z_r@-&G-0y;YhhK9Swb^a1a?6~|Me+07VV2|(F9Z?zn|O^KL5t^pb-`+80}+PThb79 z5v_LpJEqD={21TOdX8Ravb(erB`9EkBj}U>oR<#XUG)(HqzQc4zhrU>nQs{*}36w_&cN*E@eV2k9 zKHVM$&h=$q&;4?}vPinIedQabE-b4&*fyd0jjT(nVqwKQ2@jLm%$-##&dEHm^Kp5w zEf^K)^5R4|{MYuBv5H!bDik?X8r_Don7RIEb%{6t08`fVwsY{F1gUTqiv_VmHgmZH z@=6YKm3|LXFeA}?q7BGK&=&KmFBX!uMHlbHSw9+Dcm14npM-kPV=q(W)LVSNr;<<{ zdDSEIr_Z8EZ~RckHLXvntJmY>*1uwluMKKLqUY zFTIHBGIJzkjZrOQGew*WKU42azw;lh%>cq9%3EX>57moY`xWU*P?#ff$SsQ zh8!u-)C^Yi_EtGBY!ZD7`bNETJO44mPXFo}c?uUPDUM^4NANo0zwKB;YvW!++6`G0;I$MJde;#4+q&PwPh_MDE%g-T&rL^Sb5tpq$73DJfrtU z)-b(%TU_dwXzl+4G%p+f2M8!1SAf-t2w-1!kWySNNPxuL2mC0AeHwMk=7-mq=iY9Y zWMMBkH!mlc!H3ia3fY3(@L?M1ATa&A{5V%4v6iH0o}*(S#s2}ky7b0|=<1nflSocK z_c^ZNHvNg&PsGz3u?Mrkk)4-?;*@8?j%`G1(>x5QHjt^!jUr4I!uiExB1hidf)(>T z16cpWaJD2;?glspxWK^-@y)pvL=Tpl&Jtgx+IJYJ(^)V#^_Li`8j~sDPB5K02_^-j z6JrAw6IEXJZ_<)f0)|#noI&u_(p#)!k3#}B7~_$T;$H!|RLnaIl=_}ya%f)fJVT?1 z;`$$7boR~2BigRp0XTp?f_|ZN$E0j%at)yF*)-NaY%t$l zt6zs5V(D9S?^?r_o^5b#7f99Fkscd=eKOB9?v&Gig3_0QW09r}h8*-xt3prcE^glk z^(H280%U*e^27czk+OAU|M{Rg;)IW23lg7*f!8ohG}*BHG#@1sW9qo3{O6E%7{k2!w<v!3T>KhSk1GL3tJ zl%cn)n}vPUFLqd7Yb5BpnZZKoLI&twK#~1Y+ae*bc-JvJ`{kPQW@`dE_hDmrEu%pc;uBwQa1I1Bdq5ulU<> zy@WlM;+q&(Fsgtep<*bE8=)r!RA;}vA_ti3TPqVZwibX?vDo}z^ZC%zPu*=LdfUzQ>fI8wO4lYH(WgcBumxu_$ zHWleI-Jzte4mc)J%!+jMS4BXD0I_necOwjweZr zgVUYPM8*&_`n;#aFtP-a1DZ=>f38_wZ5vPpar-DsA|r@cB2VLV_*fyW4X?xWk0Xhc z9JDs7h|Z;{H_Gs;;kR}aMACvL7q5?hxZx+lx6nrmZsU5160Ps&mR}&i4*p8~Ei;~v?PH4I$%^#{srC!UGA2%#@J(>@>Bn37Q7G~ zb;A(J!)}>U--~iaMh&^wkjxYl{chGnw;&ddBwU`1>_dSrsj@fce`~N%_qEXtROPLB zUNHe+qd6u(ovz#wH;qSC7sL8ojucNWJ$anLk#f8Y6SBz%G8hbYFAobW$VZ?uJ1%e^ zP&I)0_|8t@}!YZzN_r#$($JC6QCU_5}7t;Ag?V|r;y83#q@d0nc^r=tpseK($Lz# z01xI;UvVW&&t{PQ8iszh$l0Xq*6PN|zsA`D=a=((0O=i*S)3S$Pua<|f_78Zb#y*C zI=j~&D)YSJ8D>CscqLRfF3Dj+N^NF5CL?+6k-nrF;8qu|@2*}?OrG`Cdl@mF*nO+1 z^4a{RD4nFw+T1o*r1xU$YVJv5Osg7xZ%D#b)36d(p$@rYgmn0fOz*q~eK)VXk^bls zw)NQ761)#^kLz`8{tw{tgWZC>?SuNg&~+CJ;@&8_+#{{Bt)f$0`txR^FeXOO#zIhz zlv|DNvkb>X3G&h;q!MY}E-1_xdN~~#6BgYP;@M>Tnp_&{@W4gEp>V!9=u`=WS-9id9^XZ4wWRNc3vF>W{M`M5?l zR@RnY!`oP9^o=aiN9T6RL(O&M2ki+{$b^&Gp}&6owL+(lC7J(ch2T5I{5*l1dF*Js zuWXC64vVqSh9%|AAL6|m%%pnt<^{+~U3KMMn- z?2EO1x}}M(q(4?i;Lv;3gMFyH37@|Ep>^`G>mD!fJ+7&|Fb+TRgQ)8UxxPT z50sIB#D}GtR(JS=*}tNnp=h-s<{Eo1vF5Q>Df-G_GJ( z;#*^9tz}2|dc*Qsb=D|i{#PwL@nyd`2+!!VCA_4DWoH<%QH_|aK1 z2!!L^(z%EaK}PB)*@8KEyP1W>aVQMt>Cs90_qva6U7@jivw3k4lP$6H+Q)lGv>lVp zZQ^Ew{CVvSL<^xAqLuC>Nx)J9h@!{^HNVPcwK)-mNesMNax4@5QW}c%?k>Q9zGI0* zAr!KXbY4#axRF^0KAvITo3eXp^Yy9w?nQY2FHoi=L5e&Hdk_y{cYo(Vjwh?WDL3@w z?R?fJ4cc%D6SiE)i=RzU+#`5}YvwpUrZsncxr&ZOq#3V`ci6~0rZv48iWm90JEc6< zZ%z_q!ot~WBecpF5a2otqL(1;Y9SY2Q-VpiWuV$Iyw>UZFt0l0Trwc)#3I<{h3GM%ur^jXrUe&Sy%u<$e|elc$*9h3QmNy z_h6v|*z}5!IAd+gpPIit@_MNs{m3;u(%1KYU3YR-@wXA0V1NE$f_$71(42GXT5;S@ zb*GSAOXLq?vBL{D<#j93$7&#hDf`~lr4IEB!Q{AtojgZ{8RhyQKa8<0?7daqCH~g} z@(67IH;8RcFrVem*LI^+SN3ooZKZz#bh?b1z?E(lsFl7y6aI?eNs_RJWX}V4sTW{OGDv*RkugFP^g}1kih6Mcpy@U zrmDvnSMrmW{nM6`&Ld_+kO7Q%#+0|v#k1Krj3CU6&m*`+S$^Sr^XCro@+g$ypnekP zU`F#sz-vE$6?%!la@~y|4`|ELK@lx+XUVH`5Y`eExj#(Vbj|T*=Rx(Eu*It)$M>U_R619u(T!{R zSQMEZULXJax?+W7kI^Q=VVJph!aaoCmf6?mIe}myFsJFrGl<`~bSP0M;`kFzeRn|0 z9Hcf~KWb}#9&Jz^R!9W<%TpjT{;DD2DwcrS)0q zZUQD?%^1!3=Z3G`v9s3C5dqFjYu>wg*vM@n}dzJ!{^Y=kGv*F|@WQzX~P5 zxICOUIcOXrf%w7Sn?xg?#YTqVR*;Tp>Fr`k7uWB-bvst#ThbEFpa_fm9Hk~}3<5YF zRDsl$)s<0yG3*lU^-Q<#KiSr28WnqJ_cFq=m1)^HUk}Wq%bZ=;7UNJ8!sJEfTtT1C zfbY0ybVcqVWn*)Ik=PzsL`~P5CzFM8uVd<_#_r9>ySHmJ6!{Z=BLi=w(cR5w z@crUR9^~<_^!=LZX)yO+bC`Li6PgWte0NyY@`n-DG_!b{JXv;00HE*9|Htzd>^|qM z{l~1@Eej`0lIZugXwtvJkJsaMVi*~bH`<_2cD}MZI}IrkCILLp`xt?<<6&_w89H*5 zcHzn65JhY!`2`>UuYJI!Ov`46T0GwX|LZ*%D`EF2T%gwX(MmU`u89|8)D?K_M+l;_ z!WmNf|9d$qSdpDdmcs+kCWbhL&7ao`W_??7%jqVHJwI3o)LA{`=E366#_>$VOVS?o z99T1;#gin-j2%75mb*PIx_WF_eg>Nj>BX2NAxfT2sG;;luB*d-L+h?0trU#-2-I;a z=DV`$QoCi#)-ibNJ~02PiK*367V(1n6~@jiZhjiYmL*C9Yc*3rCzBvL0yKm9gVy?d%L-;nV&1?RLtFvlrtL@r#aEC&1 zFH+py-HL02Qz#S)!L_)%7I#Wjd(1Jeah^Q| z7t%7rqJA}Xm2VA3;Lcw@cw5_aTMLgRXApmX-D+Pm23U^cZ>E=4REpc&Bwxg?Xf0eW zm!EKk(VfuZ5dYQWStkEOrMk_Dtn>S$z^`W9u?hKhN?`r)AQ>t6zN0gl-x0@Yb60YE z4ca*8`$U$k>g9Cg?k@Rfw|qOQG4~tKi4tl4j>XDX#$KvaM-O`WExo+#WARi85oUgv!B5)|>SZSDWw7*7BH`9rY~@|c z(mKJ=kL`y3#nPPJ4XZ2kYUu=R-d`Vs zqGwr>;^E&@mcEiLFjDxruz+Nq-P7WZM_VRI4%KR()*g2qf0%-QEf@x-Y`b1Eb7|gT zt;;hIk&IJVZK?qp>3LBTWfxqMQxQ9e=HE~Dout67_!ujn%WI2urBs&LhY~VC#Q2Bg z*kEB>e8aI)PNxc?0bY)09+)%VsgzWLn-*+RuX;~FV)9+$#^_g%ZlCjBeSbcw4KisR za{r@gt8R~FuXlIxOUd>``fi0&`hP6wPUOU*Bqugi?V7W@%E(2n$@#GF%vlm#*xe*; z_kOKX+pzAo=0A7!B)uzw<(V1lOY^+T4BNgn;%>mLka_{ZfaU~b4oZdf#-c2m@PS~6pDEY2(sflnG zQtIpE{7hZ{sEAvv%-;-2=YMZjYuxe_1bJpWg*VZP5d6xisYIZnH-9z%89uPDE)=ss z$rsf)BGQQ=a;nuk$&qb0lH8B}6+bDR@OPr4(7h2hvcAw}ciP|x2FK1uEI5AP$&iNj zN@b6o!rrU=f^XbEQ#E#LCtno80;_8xbpDtN3K5(ADoQtAW#mZ9iUA@c+^c>HKEX_> zU+_{6&_a)s*QC1taY|4aFHJEkESorsinhH$1W9o2*(Pxta(O4|-4!aIheye24<^bV zgWtM|(iYZhU(nuI!`S|{jd+Wl^1Vpu>s5Vl(bE=#v9X*k+c7_nJ`f1wC%hHf9o4X$+Zpz5BKu3SBpBy-MuUFld5`j%mG~ zwU-jfat4tiqCSetYGgK`EMcaF0}o4Hn7J#n)S5F|gUz$GM+@EAi4Z9mV3QA#s0mNo zIg%27*!a=JNZ}8Cze1y8PO$i(X2V@pcSYrAy*4ZsZUS+1*bI>(L)yaj4W{v$7`A^C zh4CNYuPor@;=7xU?8K2~UES^Nw8)mU!QMqBD`2f79altZ8LzI!Ea@y zi2$C3E!ujcF^l`*b-1wQ?(0xq z4Uu?f({ z5V)EA`?i54rW9t!IPy@+YLl1gGCz%qRf6P9utiDj*3{WVDvg2=U9>NI_<%(l9pNHA zQ%gykGo?%TMfL24N#kx?D+vH2n$VT#sj6Uv-#21Z-xBX><*;5W6CTGcpSuqMPk5~|Nt+2D6x07nt$rOTz&#m2sLN1PDCjITqF$#+iz>%7?rh&sx_9w^tA?gwaqaOC0 zNaJ%$CFPmA1q*3j7?5ewqzt7(y<^%Tl`5BaGges2U-a^p%nk!Z;A9Sn)U|SCog-2u zt>_`8F$*uI9!Ua>J2H&k}vpYC4Qd=%x@VXi}(Gs4bin)O_G=l zDfFmSZM?}omUwEtf|P*2xSNTkWqZKnbFOkeUl1@!Nb{bF6*{Obooi#1S$cB&Hfv17 zEkkJ`(dVUe1I61DvwaO!FYP2lCuF%c@bVgt)9yz|GKhxH|A3QtdT|(j7vvelGkBCM z{O3s5f~VvpJnCQQSWK})Y}7uZLn{)e z!94W{fs&u`gVQmO*z#W{ZjGjd+10#wxLhw;E|c1r!F+L=bkmz+xao1Z)aUPii1sT> zy^TkGt%96Gz%q*~ld@Tx8MAK~LN{$@~xEe7Eh(~$K9TC|0dB|=Y6PMNaI^)!T z0EH(TprqYIgmAEQdz(1t=;yAbJtljl&IF`5onoXPq>PIXPXrg2Fm3^at1I=-^4aw8 z@KO6rayUfxFY;S~X$JR!v9$%sVND>R<2rOgGGR5=xR7%r^-PSEj?V0x%cl%#o9Jho z69~nId3ODF^Q;??{on7whi{kI)iI@Fdpm>HxM9qx8NXN!L~Ys0{R;d{enZ0t<6MIB ze4erIjXYbi`CA)k?I%Cg-9NZo$awhM-SM4zfP1IDK1=miNeJEL<~JM4`=$@CF#vag z8+MUo+4CI5{3JTnKT|$4@L-&%!-VKSzPQwsrBQA!r`%WysFKRfACpvfQ_Kxpvi1Z- zl|8%~#X@aLT;_Uu?)sK_?7H!3rB=o)X66Qg1C*k$mfPxELp)@y^dk#q!;I8^?313` zR4hE=5*dt6+LcJpUed3ES{M{&p56ZcS=GmMi7`?C@UA zFqwOU1-GZslx`AyXAZ>ab^iht+Qz4u_v|5AnW-4pQ^7-XcYXEvGR4x3=rSFfk?OKeRWg>gm?i^eGEj?h6oqkUDbQ}4~R#;tC zK$@`|Im?WuO3D<$x$&c>g$gLXD;l>)>GiE?es_}kOW&NMXYPLhe}Zo;`15~njzn_H zFerazaT;Atd^);TIzK;J6p_w2d0v9I(%DFrZ?4aF*)E6W6U7yN*=o>cminB|C+vGo zY{bpK%=9H%=m(d_If9?}i@@dI#P=q%58NaxLnvq3_?|uVQcA*j10-MTuKj|IV-Bwg%dr5n&=ZLEOoDLO0vLiZ6V@ zHn`gE^O^h)&_^`Vp{`bCR9IH0Iqy(e%M$ykt#kJ~to0rQMeZH%NmMx{Kuowf+968^ zCDp`})Vlk*j%Z)RVzE2LBZWa~?emr_r!C7uDlNbVv*<*nq0Kse+q~~%zE>F&*n6KG z`QLardK5SIEq0NeVnq7WS2f&utW-IECC%)h%xwmIj%COoX-!BO#pspa`@7zMwk`qt z@@s|8eqmKOJYwcjJctS_y~Q9UUT~iktYfUE)YNl+A1xQ?t)6Etom9AW*Pb-(lfKY6 zZ#*I)a3Oj46ivA1MwCg_lzHi!%n={%=CGZ|wb=@7I-TG6(AKLC2PL(?9m3AAR``ku zquP&r{gGO2l`G_GPWnd90TZs-54Yq=;-@hcRyhYa0mzz0nmtqPK57yyw(lA+ z)%9J2Dxk;VS7T!~ONn?fV0N1?rj1BWY6n3I0NnnPP8X224^282TFfUG+JoOfR%RSq z(xn80AA4MVn%;EONXx`k^OoH0)lt26rXoA|*m2-7=bjC$G)$_!uV1HMd(0LtLJtQX=hS}|fXG@I6s1M)gN z{@*}YsJ$KX(SLyX>ml;hCO_!Vs(jR8oY_?C5lwvOu9 zk;@~soE`0Lo>r-)hVblOqq)*#(*0%sqwz_bFjQ&&4xyjGM*YnM}+?$ zz-Xvy0tws(3joB`05eG2LfePKiIBjy;2l$fm{y(2#6wUJoh0S})_B?qOp|9fgzU{& zF&}Dh_W6E1!#;yTpMHQ4wQ6kz8+4G`rgtX(zV-6&!yKzM3J0L zYJ7~p0m1)(m5Jdbc)OL2gSwggI45{fAn@En(uZTOCQ4J6D8L&<77l=l`mvL2xn4)l zoH)wiqt!BnmPY5wV%zyVX=~w2iGOJ2aR}Knk02{0fD^Cb(K@WaFz&00W>7cpQNlzI z4B5qhX(NmaGsRLp1$_w#No#v;9oj^-<)?>3YU7eg%~ux}pGRQ&A}N!}mGXv+>jh%o zh-?;(!V5?fL?LcsF|eC-v~;rnCBIw1mD5WLjFRu~Xoa~8K>_qAKccK8K^_g6>^3a$ zcE=-uue{fDqtTqsXg9*W$O}2AE~&4L$R)B}G$=O=_Yc;)zvLXRo)+B4nBpc@0O3W*J>l`aW5qE*&V>)GtD}W5$5rLFBjOvtR9JJxK1s z&;Wye^t$Z$+ocUuig>a6siA#G%iSag)ibWC6^_+^*_?X%EyQU=JP!-22|&eB0mZk7 zcP57HIE3fJ1uvh*0TST;OnmFaCbnL8)!qXy*%CU4M0^#+Mnu{c10{+)BNO;hzJpT~ zt%20oG`pxn7NdHl2&PZN1Vd>8cx7v}Rg;FhG7WLLg%>O#6rNs0zYXdWWLw)hT3gB4 z{Mboab~}1|FfuCuw3M5qda+GC6A=-|Wp^;Q?Pyp+?WEH*b(irS#x#WdsGj(__RUvQ z%g9R%))=69om(Da8u+ac$yn>Pc#|`Zv7o&`Mu6jHqSQs=_7ajAM1RUimNCXX>0Z~M zVSlIi!C(Kk`8K_f7vCi4-=){Xwy?O<=3w!J(Be*V$p7+NQ-&lF_ha2uXv}-)1%~#` zZ@c>X6hUQcg9tP(r)A+rFrnFRvtLGXW#_k>@gwu9C=6y!%H?nwTn!*aS#?zklh@nv zG2}lKHZHOw@XmAk&O@^&<0hFZCcoLPe=MFc-eNgV8@YAzpZ z4!;sPh~ntzEmHhtd#vNvyt9t!9`R$4PnCgUmXsTw}(Dicjpw6a}A{ZM{$o7Ft+XyZGXpdC2WJnGv%Ju?Pd4*6 z(AoEq1f?>ikSbHdtNHfbx0xBp5TsWYM4aAs0-SPz1gj*)>X^a^s$GOX4BJEdA=YViZ$3iR+=jP_JdLW){KEUTDR1?@_^|J543WkHwhk z=bXHbTRt#>zp?g9hH#$F{GXIeO!Ak&_7i57=EC1@%G+kv7F=lN##*G4#Gd1C$=4~$ zQ>4LhXeg1sX6RIF#axb@afM?2a|SfE&H*L81dG9L%S6_u%jb%_$+1n+oOHss&&gNN5#mcqVeBp9oZT z_0xP`?8rBp~SbjTy^*ccy|7pf~+ET*6vA5jbXfa1D=n2Hfh(T5k-; z&DExB&N#ff^8Q!H*wlBiD$KKk2Ol#*ed_;~0Ns=_uSF1LeVf6Q5OIQ5xrEkO!U5Hv zQ6g~t!~Mi50w$ZN%kj`pvTeeLca?7GMDH;W$h`%TDI}@RtbpbgoiaYWBE|@C9tG++esg zz;J(xSTlN|?~OO!4iB)K>moE$ZK8@gUrLcCD* zCC3Icy0rf0<~Q0gu<2t6X&R}>9)PF3E8qfvtAWCA7QX^{5v?v^X%Eeg&+BPDynV7t zQ%MJk(kfEnswMswPO77G`dmr>3wDdR1-vPX`p zrV^rvCav5X|5BD|D&7?>Lo> zbjyZ&d)yT9br`B_oV%>!I^)-nM5LTtD*G{X5+sID%90@prq7K%#8<>p#dV+)|VS@5&YpE0<1_UUyDvdrX6Tk|l- zQfv}w?=VL7rS|ylfl1g4j)X@UKH0H?Yam zGS%NU{$^}$PKfTWlmV}~q7tciDAb*K(YA-^L2@I^3U4DX{4wlU)}<|ZqQv-RTmkM! zSTsStNJgwbVVm=$QWs*5?Gr3B{aUQbVOB)892J+OZ!R@jOC`TACE2Iq4};s8U+$y7h`0a3tN`U3OfE zX0`ysgf9CrF;iE90VNfALBzL>qJ7*OzIfg}w-95cEV-T=k*oeU%o)0dRI=RY z^yApLwL8gNXs^8T29<`Ut3FM`l*<#s7R6Hi5^#VVaCQ(UO;}Gvw0hLepO;}D6kXQ% z2@}x#-s=I;ktbui`2%u7>gjFmClt&SC&B%(49eP$HP(?oI4!{HITL*O1>r7du%dj!PU!YDEG+~ON5k&C$nf_n=8ify_lly3*zGlO1YLH z+f%*)(X#;OboX`ZKHI!2wmpyP=(&f#fgG1@^ty9EPKhp(rxer1K1fz&grGcb{7^-k zV54Wk$pqicBP-!Ao1E6wqIX%-ioNZqZX|6)5P~nX=_ztG#_p+HE|1Ga|6pzeEvjnjPak12ANDr!WfympH=KAq%0zj)apMdP z78!LSM&0Swo!~k3;hyA$zQOwo(2@nrUud%ce7dvxO?*m#L_tA;ic8W}AMCsNy*qNp z5lLKX;LU7UZQFF+N;>kd_0#xOPT(tuU4EX;^X34mLR9e zu7;tqDt>i&+-;V}+vm2K5U{*EG!$V6xehO(Ebsf$uv9a5%QqKjQW=+fuz@)`sji_>^ zzs6J0#PQqU0H<5M{nW+h4k?%C%~>jCY}IO^rN<2=imnqR5%8xH7RsuoxJkUj_w2=m z#v)gg|A4QEW=Cm!w5v&nC#gd%>9H&L$C;Nw zUtY2?SX&&*?&r3PA+Wyi2D??26&{zJ9q;61SL~pv*-3#*X&gocN#~(qYBt?pN`H66 z!;BeIWfz(^edD0hp4E3}0r|5d-~I!19GcmEf=&)B^5%G%TezHa{rDB z8^Q}eVD2UV)n0ryx9{h##JS0HrR~)P1FaiP%=Bh(AAh|$Faw%)Y;K;uUkpq=$sFJK zJ}x8buU)+L^7faI0ky4lQtMaq9YRsQ7SSnexYK>dl zJLlciDrDvoF-n=xJgl9IM5S6qj+T@qVxC!NJU-&D#Gb2T|4WL6d!ULZ0M&;mtlUX!&+CUIHtb|Bh_Q z9&0t8zzmti(>n3qRx>Wu-w&5U&(5sF{DjA;z0wjhQGk^U&#krpf%-#j0zZe?QRe$a z^4&c&vrUy0Ih$aD`gTDF3LW=*4;U-i;#v?8$v+`-dH4o>?;h&08(=1_Y+yJ;=&rrk z?#}wo>19FLy7&bFG9M=qyllj$GDnL}zU{|zwbmxsb|yWtCt5+|{e8oa&Frdc9I8V+ zn;u*elL#@1B^{znX=%|zOb~}ayh6eiaTEtxptnABT;qQ?0)UvqR~$rm>6WI}Chz$Y zf5Xh{!c2t47RYE{@92~tTAzo7J_V|odn4wvUp^Yzs^B~GRQA3zmlJ8~ssHJ6Ti?!+ zu)@!a3eybg;pNsyH3NnzonV}Yh`uj?h+u0iQl9>6Pg`_8El|#>YfFY^GN<3R6!9uM z8GrKfeg|@Sd*h0s|0idLA(LszAyE6tCUv&o5rEZG=c3^;Z~zcCBo$so={b*O%=Fmn zLRLoL0j+@a6P4qYv!J4FsMCkBEV=+J@sK-_r;A~nSn5$|$N6`Vwpk#RyRCS*#5Tm1cMDsS$DcXEq!wmCyrDs(&t|c!NfOJ-KZ&$Jx4?yA5f&PS#b{r zR_=>jZidHG#wzI)zRPu7iw3D?%JyuhJeX>MuZAhp8QyTKH9Cd?$WbIQzJRTj*ETPH z6Y8K*#L!?LEnW_K=<_F>k`x7W(zw}w!Ye+vqmY$ClrY?EzX2)2J?%dqsTVc6!^|e&y z+H#3RRg|>X9LNT~k5A`u%tCSLuytWXH;wUaC9aFjkB0nK_Dz#l*7*tZsLv*Xq~vu1TuL2W8gPaF$gb_klCQ60 zj1%1qf3#H32gHTusI#Gd-SzJKduhK$(IO^71!x=8q)fpY#Yq!7ymhh=T__2tahq`# ze!7GLkn}!wu~XgJ!l`kpo5`KrVe=E8Dz`km&5e~oLJSZ5)u`_XvtjD3iFZ=G=&lpY zvrej1JQJq%oI@$i{D+PQGCVwo(Ua@~{I*Enb+8ez9ES9IC1AKiS&_9iqQ9$5{G%ru zY1fzi)hasqO;3;y_bx1jRZkX>6{Bt*dq(uld=b^FI1dk)F^|mm!C(S;i`iV=ukOXhZv&w*~wfo}yk=9*G zup~W+tk}KCyO5{j{~aLH2ix(t{xBQ+c|f^Hi-GhNT^z{@fWD2ll*X*~*-EL-KP8}1 zDOGK^b05Xx5?lm+&xsWn!!Nng8pk?Zp-&*|F%>wkK{5O{DzFZAwXi_TqG_%E}M(XO4jP1=$k zf2XYX*|Ut~r&gOX^539k@4NW&_-wCzFW!9d0aFu%yJ28y>sk_f;k^w|ti2B9;lqEDt zQ8s!RU2xS?AgSdCSs5k+j+gs2+NK>6qGq=3qBU=tLF%V9fr5J;VaYM-Na*$9hs_(lEf&k zcU2cf$KQH4S7Mir0?>Z-mn1Y-rh4{95LRA2%>z=@Xg{iSG&P5o&?BHZNqb`StO(tb zIIswMuuZh?^Tk3<=@VY9`AkP`u_^BYd?>kL8jvQ@G^Xxo$J~nHm_WjeNxD(jDncqE zZT@ZRO5o~G3I^d@_pW1qOGw2a$^lv=uvs&YO&kV+-b3d4qLS^qeU^h;Nw=hOPnz$- zO5^7L3?>uolNW5KW@`Llj*+h;YO>SoKv`eEdfC-|AGEiNnBYB~KH4llzu8`;LYSm{ zzr5Yfk24V?0-`A?h!UU8HkX>KqO4=9o6Fk0U&QS_gz0tXgN~ol9AwbPDC@puf`w{k zqb0W~glO-+JAnEuPM_Okbc5gYF1(f^M)MgifcWK)cRu(vQMnbdhJD0I)h3^)YW|SN z@d~FBUn6}Q`{%8;i%=jT`*eWAC-BdgI?de-X(v6;Vz_8lw@y(#LM7QlQFfy z9{0UT9cHDB&#usSW1~1Yt-0F*&H)DYvz3YDm`gjYJ!f4u)W^!3k3i4Ng>l5Bm%{onf0m~CC2=9e%`xcnhK5nGgI zMzd!UG3gy-%TAZ~Knd&w1{HV>*K<>caCE zhRCJC%uIy!9)-+w<)8R=sMS##4o-ezFycZs-iQp3QZ&6W522ghPZGS@8$E)a9jGLg znyKg5bO{$e3NQY%EE*)9s-s^p3-vEjx5BsQ33P7@fsg$YzZs}g&yva#>8chkM7h8D zoza5izweH3dgRGMIwkP@+vvo_G0cYrz?mXw{hZb|e|-I0kJ}(;OIQa}uN zVEZW_d4tXzOM=eqjsBaGRb#H)@ji2@U_>Vlw?gP%PxVqt{4|NZseyM#kEiS=xX#t6 z*q3@~NH%vks0SZ4?~r?}_i7G?}z z0{uHdkmSwnzwGW1=Cy@LH+K+7Qu0zWM45Z?%+g%?S3e-1fxTTncU$vj-m*|!4yReN zU}kT*J`m2sb@3&h#SgiO34qpmfl%nurbmjM0jAu~Sd@5kQco3N{VU zD-N#~6>jsz3EzCQ+>aBC)bBb!^uM`+2EL->S|!?8m+cUV6rLq zjN-)rf`G=u;3NhnOx#&y66I3ilD?q!Sw4B37Qnd6fcJa=uI#(y?nNV%V7l8^S5&=H zK_m2j9Ju4n!`5o6jyYnU{^$*EFAis~9n}m46yc#Yu}$eH#yUQS6lNjpN}Gha8#uu4 z6(Q^Y0eCVwQoek~vO2cHhxxvO;#EmN#p(wVPi(z?ebYD<0>TFJ)t!9$R=g+ywV^rJ zW~bZ7q03yU=ElU%;rpQ!H1Wdn+#Y*$5kW93O=gu9exl>L@0JZTvHht0EJ9AdKk`x8 z2Q+!HAFl=!#}m#ULm@cw9>6cHqxUgAtsO6y%6TV?dq>495Yyz{5Slc>gv_qWMW{u4 zWJ0%NhW8B6*`>Ixob*{wyqT*DYV8u(-NYjucexyeFk&M%ld-tOKYLJ$VR|T|-HI9k zVIpt> z<|i|<{;d_yTZdbarz{%bR(B#tI5rg31F#2n@^d~Uy7L|H$6win1j0;v=3m4SBYE>( zYyE7feibQ2z(4ZrUBwI&t0{rxF8%b>K&N2!4Q#N}rYx6-Q~CYB_HH*jgU|B=_bmi? zJ+8(q&l=a7t487x`tAT2RKwu2-Q1wf+6Bwg26i?ODcu~ny1MEIC$h>t&Jfr`etNaU zYeu!uM@DNUr7yCSRKSv>*<<&9KM0^|k>zm^)vPW4C|m6E0L4@!N#DU1NHp4abhNqX z6?zuV=~5VE`V@RO=8D{opQ@Eli}hX*I~d{4J<+FiF3 z!E!~@@-xEgBk@VbfiAjxnnSNnQY8nclthIf+m=D8Cu9N14)yREXNj(HiVc>8VLsgR z!(VC3YexT4rsX#VUU^i^G-jM|>U-COBP{EhKlW?NHWSS0Dw~I*GzNsf)zp-mRFnbS2G)yg`wGjiZ}?a%o$F}7?L`Uf7}+=t^5s9o)qCm2d^kaTSdY+(ZtIi^cT6Q1$&8IP zm*LKpx@!a;oC%Q~HdO2>i&+ww9tp2_A;dA3eIgpSzT>7+O?_ux8G+?r4Ou+O)IQ|- zj-3xoc|>3GYVxp4yq1g6Og5991F4Bny>*KFyq`{O%5G`2mOOv{MJ0*uu70W{;{suU znUH^Libvq}{6hT?P&wjs$9frEhJP?NaPRh4Jl^;8dPq7*ckFEBH-DJ3c5)fy@5O-f zi8>?Z6~u?0>Og6F$U25F9-Y*b3=moe@zKAvas_PdttvSs-FIL*4Zb2hTDiyktSU{c zeitT2UNgEGBm3bV#`=B$I`eh|pSl9Q|0~hR6R6^5;I=9Bjh#B6KTSMYCbCa5P@8}S z(kWx5=grb|la4c3d@_ymENXyel0he}^v*aQ85lCTZmsRtx&VV+^lhUo_t~}k-}(IV z><+lZ#~?5DW7IX^U`kR-?&3_UpCjq&NI~*S!SG1$+YcIYgWIpFx9Aue+>NuVV)u_E ziP;$@@Z)sh-!36TaUtwJp*G2XWqw;W(?W)qwRl#h<}S1-AifW$g!ZC!w!DSDMxrID z6k<6Y&dh1bQxO8@SmLba-w(aF*5OeHjzXM2tKK(48G>?`onX3w-@LhV#1L|mgI4yh ze`}rV4R)i!cK7jX-9MWVf9gM=iiIkP$` zEM@++jFFFDZ9EZGYT$%Ct$uZZ=AwdSM~>H{Fim5;g^h|fYb1PdrGUm#v&_eZjQrP} zcJ;2al>axc*QxJb_9tJvysr}ilAbx((^(Jy17InXKL6?OBTn>kH2HM@I2h$8Jx2FmAI3PheNP!e?Y zo7r_v8YrLQL^>3}r=y`^WH7nVd=BC|oPT58ZA+eJ#<(2atsT7x?gjWnbs;pNj56Lv zF{HZXefANjZAt5*YOAeTQoq(lN==snF6#!XFqR*hv6?;DNMhKEdlXGz_3*;CaXE$D z&tDQkBDFW+_Za_&z=AD6tan(YNA7+K-3hKIvr5+3?f$J z+dZh#Fhy*vMz(iEhB`;8@-p24Xd%llFL(WzkV(aR{-wMc{KM9^Cph)q$pGiDj>y2E z?yKiWq>@Cpre3uRFIVmcQk4X4-iEHC3#YZfIB+%186eB8GTw0AkM*Hpl4y^U@J3pM z>fsj5ARWAdB1X!vQMm{KN%na}9kH3_d7g_d;C5DmA?2O8_#ZyMsY=orqRz8;Dw^5A z89CM#`v^eh-6(Y$pS9^5c&@;e%+zC0_I+ z@?v1{al^P7l2Lj{lT;f$;R37yY6g_(cA211qqjp*UxnX`9%Rt1q7C14EUpPJZ8h)5 zz42qzPw_Q7@;WYSedR?J%-z#ET1D;SWf##PKRlsB|> zElYZI<_-c5lt5DZw0y9uhY}(t3N-(a6b90@R*XiqARVi(0T#-4bMvnw`T+X>bp!Va~ zLJY^Drak{Nxps!FC2uC19Aq+(td$UD!NuUb3R9$wQ{^_V)NW#AZ_HM_TuLaNmJ6=CR1ns&-8c6L2VE5)cma{kU)nYRrh}S6 zBCoD!PM5K41D2vaOTfgSUeV@=Dct3-y#ueFhG(lp#<%>+3f0Pt6Zlp_Gw?kjh&tS} z@=uohz4^#fP&fb(!MXfXI29-D$IgSrzxGgM{4+07VgJ^@!2vXjdRy{>j_4^gay_@# zGShFVH71%{Wv3n00#VYw`P$1k0bF8&X=cva!_q%BloZ8P7z z;MhMyS~!FjU`BJ0dOBkhUSJKCJ;)1Q4lvUftQRw(snvRdp;Ak-`mkkam_8=rXVa%2 zjJs8$)giJ82FFD5lRxgmO|_>1-04YM$M3t6iMXC@GE*bNEmoT7AAGIOMTF9o-q62= zWAu9t%a3zLW|K;M2lvvpBj1c(p+!s8tGFfJ!CrYiP^t(e%%j!e5R&8$dMz6PlPUD# z?@%bN-0McVy;qw#S9?F|hA#Y<8CMo79jK!5YC=Ym{zwi#A#>c{v>UVcetwXu8#~Q* z+l;wivPGYYcRHXVTc(`k>t}n~;cI)Bb`-~=@&8RTDB~u~*y0ojke~+o;bI%)e5L7# zaTah`+`fWSdCb-Dx&+{!$w`D(?1%mbuFa^o|0Bf@*3}Fn~&SjfXqi$-nO;zn|PA$QZ$DCWd>dX3Zw|&wU zddZU1)tbKfSqe0y}A{k0B+g3TSrf zz&r_n@^W%4L=|!CPI3pVj(4$Ah6%4gWqJk3@;~bB#g|g5*?!t>yEuH#e;PcYgJ~T1hp|Wv)+DTFF2X@D`IVVkP6OHGCwE#Z~ zr>===N*G>FPAveuKL+hD=O?2s;B9&dP|j6KZczw;`* zY~(ZlyWQF?dmMOy^(jSsd`vDsg^IT|w6~9!XU7%eylB(r!5#r!{Mr+D=h0fwQWLG{o0?UULg+vYp{bKA|obh2JDkuMgEKKfuOR1zv| zs7a>HQzsgKYc&3&C?oc6P)}3TvzI%DRK;TCK(!mgUXem#aE|~x%K7KVfdr1x5$Le0 z&!pA7+g!^1t^VdS%b$}no&!l;7k>L;Z>QqlqizfpSMoU1GgN6m(GWb50Y?+1?)0xG``!CMgtUHE{yDMO;# zQF_54J3=yz58~8sqO_Y=Ed;4Ke)<{(K?cBj6{p9ZpWD6ri-O}t@Th_C@xntlMqYmo z9E-%};V zkb}((J(y(@fh^6lHUS_B%HqEDqp*zN*U{A3$j7?~lnC=l@D+>RP>B*+$B2z5Ry#z> z!MWBV*rTX-J&~W$tbrB#kzc{c+ncoiJKl9$bB9O%D@~q3JX?H)qCL zdB%D2y@zXg%Jb+3CY$YF#5P!JDC{fwu|>_H45!*MXuet=b=NTKX383iv+hWO-(!MBY%&_%K4}G7<|hm&BC< zqThzD-gIFK;AK&N6R|zTd*;I`o4qTpXHXg{Yo)8e*39jXN|w*#=Nge`DxaYc{|~^h zv5H>%u+~5g3f0B*puxXSyY9StstJw+yyRe4=4Q2bV`kYIKmvSLue;JEdFwiAN9#>% zJxbwuuXy+k$R;zXx}>heo3F0B9gX?GDtN*DbQM!eInVj|c>uFO&%z!fQ-UYqRJFWp zRK$6_5Kq*L3)4%|am3)r?%V6W5!b4SVK@|UpumK8cNmPN;!eg>oPrO5ZLFo#mYqU~ zl5sY+`W|TK_(rZN?io!59rk%Dro3Vs^as)_*VAIKVDfl%EuAej6p@mp?@roOJNqf7 z2X)2Iu3bYuh+b41 zQi}YOPrPhfEU&WyWBB0e@53-vri6L@u294>Lnm)DcRz@u zZE6EsDfV!XxY4nLqh)D;pg(hmAwfmEC1>c&+q{K2AB+KSIth|esA#e(@yO8DGX4b4 z#pM$b;p4p57&8zd9UX={r7IT9Ty=BVhGi9{G<1f8!nRCt+z48pB5%9U0ul!F+tWQo z>n1AzR~L6KiTNj^N^PfOoL%>fW|=&cpGd>{FW5cxR;L(?;!4O;c{+6S9*J392%d26 z9Ys;rvhfe!XZ#Xxu-QY9L4eQZBC%ef*!&&6L8O!dwSy+q6_X%`tk)#2<=)r`WDS=L zW@wYahZ93$oI@))Dxi9V4F+N~4c*EQp7APYMsI?g#b8JYlfc8ttalsPi%D0SLgwt< zStHQ-TMO-n*1fIKn<9Rl8UH7O0f4YAOeKS_KVbl3UYl{*Jzo5EHG^6SMtKUJUVFkh zr_wJSDUb^_iL#}zbnE&2k&f{7ob4E^#RB2w6@$D$ChQJ4z#YDNG6bB!tO760nai*8 zraX3$WUuVxtD}>fAe~-oL)W95J0fmqPh)c1-eIe$2N1!7`C5>XfVNNZz;r?-aT;gd zFEU!+wa398-@Sl|I+k1R9qV&4%yXH;FT&5wd!Cgq`biB-lH-vU{5kVgVs+=VhwBiW z^tYT|+>4b)kKa9syP#boXd(PiVu$7q9ORW8%vV4Ac|5NHxR>!j&|Q zbCywgwhCqCo zM7PGz527gPzxYXG>{z*v*D=7CQ$>L&@dZ8zs+c8l@0pCVplesAa0tTCl> z_>xFlny{dl?lpJa%KET|%QyaO#M#sNP1piHS^M_v8br-$AI>9{A2P*hMe-{{+RYj+ zCFE&3p-`l&`hLG9E5-F+ErwIU5BVpque$TF0ZEZ%(S$`#ax=|KObh(1W9FbfZvWAC@%~(2$7tGOh z?{9^VFSnnn&?k6W0zp`AUY>Oky2F|I_-w?idBEr~0B*iz3Cc?Zp#WJCb(}j5Z{@5L zSPpi8Ekd2luRA=Z1tVD<)PC-8*SEQ?xp%9c-YQFeK$484*b5)9zq3&v-G&}EbrFsU7#1R^DSI53FBL8>aqreU70!#A zbVDV*3b_|U0fu;U{&W6UgZa!1xA*xs{+ITCzsCiR5Hhb5%#{B{DX~t~@4PLC{|8vH zvfjAT(8`~Cuh?rhGAY+0eaMC?x0J}V%}kr5jS;iA{m-Mc!nR(SD{FB}*K#zLTdb%F znaJ~Cj&kR-!x!rvcuZP{Y<}VP4wv&~7;F~x;ov_H^$jxn>Ws!2wh#9}k=2RKtWsjm zh5`p4n%m}vID72gK}zqT7oU73m}?vIflp@_B&T1p{sUN#7osi~{awF$O!Ly-95Pji zt~Du}VfH&9+Lal||Mt@!`cB&zV*d+DB>7J;%kWP$h%RI71|i$TUjO;;h!dyg&`V< zR}!tiGxB%NJR(NRTt==J!t*)U~*Uet1Ui^IeJ^HKGEXV&P8{6BV ziqVW;d4~7#Gapnc@!a|8hM*%o4%OcioCDxr@HXQ5xQdEJ8{A*(yk*81;3y|)EVrn7 zKWbO!3v8iIis`8uJ8z^7eP#)kIH5d9R3wl&xBbbi9^fOXE_(1Gog*V!8QY?pxb~o`nbe1Wk-9cZd5C ztde3-t@?``qJOa>S`?MB}4 z-P$p$^?7%+_@Q0004f#u;-gzr4fwmv*Na{(YA?xm(nZe9t3Gy}KiZqCtFP|< z1>kGsot20Rrhq&$=VP91PO!D*gT+Ql@k{g6wPIDi@&}zRw?yS%4Yukf$Xwh!KHH@2 ztED>1!e^hYCee61#;Ql#9i^^X8GIK>uMj;4DKeg!b2*=WahvsQItBZ{S{+II8M*7* z?X=1)Lwp|)Xu4LPqFBp4vDB6p{sX|EgAIy$l#h}-^MCo$T7v63s{&6Sto~H-^Pa4K zPc}RtXkX|pTpT5QT}Lr4vO|rvG0XlhBrxDR1}}3;rj$SjE1&iExVXGLS(kMPxgc2M z$8I*OCH3}j2*GVV{FSK9kE%bSNo7_eGo<$8t1}k>~0sC-(=mu$F>Zfm6KGI=7RJPcc%dh zTB;NCpUezRzQ%#bbt~*ghlFz~U|Dnz=C>D?j+IQw(jApU0T7grW}s-}EsE1_?Kv#; zc`Ic&Wdv4Wtc0B+G4-tc;zDE?w=7Cp!(1xd)6=2ue9=^T&I57GwpH#tJ@oHl`p9iX zLV8g#CK0!iHM^1nG)QotF)~jWHLULI5{=mUjPVSyeBhC{rTU}l^%UuSve{7}!&jfXt6nV{BN44*I^lX?v1YPb zKE2?p^7TZCi4S5)tUnfo7fpi|57(W6B*k{?Nah{dxG-RIH=OXGq6k^1z5nuOYU+o=7ewHqToshAR>%8t}6S>3)BR$k771>p{A5~CiB<}yVRg1d;Q z$!ubhd@me5s)3QH!6Mc=^E8l;Zx-;UfQUMXLYNOiV}|Ns$Kn(1%MAx*3!28S9_uZqFw+_u^OB3xAEWZ+nrs_NqJG1UAr7PJWw#L zoORG`%mrewl!(lhMGyDIv>-3;lrdtntgHs}jV)hpH-KkMxH-w2A6D)gMdw{Qk1hhIrY8?t@o|s?slYgtPzFa#Vel#?PJ@zHuhydIjd*?cGZ_$wzTY( zj9@f}klS+x1Jp1iLCbBzN_Pz~2BwrxLSp1cPJBho0&Eg+(8ZS(WcjL3!$)(0F>eL* zW4mhP|nWPqXhU8^CUEI?Vp3{cKtHfux z9BJ(DI|DyBuer93IF_Nh_0+mH$yl(Ei@7Ddy=RN1^%5Wdu;E;1Kk?2d?b}u)WlqG* zAc3qNfe-Pt${dd$Jq2v17R1k}Mn%P#*Iz%=4 zchn#a0h7Io4uW`Ailh*>@%%CPj4LIDHlx_nsbF?6$h#E@z}2)N@G%@{3!T}wE=Bub zlH1<^XOoC zIqg8cX&3sgg>}-Zt670Y>yM7uHdeO~NTpq_;URg{)8xpvNSy5KOmft}} z-+W?r=SiN)uFP+L#Ux&IaM!NKuE!HOabe_42)VZ!r?X5(eBx00I7>tS&mADCi^gmH z`z6K0##U0s! zRx*+I6?j4^1x_X7nM;Hjok^EnWc-p2RH@@`y%oRTj=q%3cI1ETk4; z-6%WPfXz?j`*0MPzZ*MLKfs!-9pq4^=Jvl((n@VAt_$^)vQmU_iC0ad+~Hd507ZA| zZoJyJ@+b@*0K|C#Q|(_Ye^nH!cQ6W(rwaNfR{OqY|`_l-Tyy;UgW!x9S-ld zR!%S|c<>LLKvW1UG@^;&J!$znDiVp(8OrsoQGl}Ep&Vy~T>&}Q=UqMTbw;^oSi)xU zGvJmRe5G(Xb)v>W2CbB$1Xsv%vV&7DU#*tDmT(i-==LuB1GlDkbCp{S?5Pgz{iXg} z9?qB;r0~9#iv{0#cN>^CG#N~%>8{Ztyjy+q>~F){^see!rcSfNO3=lLAQ%;@$&t?} z(1wFgWhcEn(Z1TVifRXx#~5kxu4&xh(;Wa=;cv?#`2#|&%Tto0sRr;!@^zhE4p2or zHgU`(ujGsEp|On7sr#H?*r|QXT z_;dT3AZ7LhQd^j~_&}iT?tTv9+f|gU9?i+A&q~SXDG=mG6ph@F=q6dBi(@t{P3DDg zVa%DE`gUPwOIZtQc;UX=#>c7i=zIvenYs(Ks*811>B+{z@9Jk|b&gVyaVNm`Zw0#p z-!Mf?8=MqFH2QC0MKeUzX8>{|Qruj68~bHRojJg}ptYR4xI2@DIpJXt$wixD4--EC zU*CTe1%XmLu1s)<0%{R|q_scO!TWqrVVt4keOO_u)8muLbua{%qhWO1ouSSx9?MQ*{+ zsDt58;U%h`9EUaVZ&W_cURxb|0DGbeQ<^wDyj>~3%MV4%%2tOez8{J|QArq+sDouZ zAZYYS5HC~(>oD!`?4CFkTYB4jp2a(Bzk<9SBn7|f;For%t^O`#kUHET1d@ZtlDM%n zri_&fSax^(sDSO!wxMl^!fXbc=nCshVf5_Kh?Say0RN2?z!u!@5e{)n1%zyz$o z>TQ&|-t~)vT1Jj3?1Esa${8NjT(=B4&85&f?CXA0yNcu)(R!n;&u;N3^jg{()8n#p zV7!3gFbn0og9ZjS-ZmDJ8FMeUQO!dL|5A~>a2^OvY@C@IS88F!jLCm4{+-?!X&IBT zGW~^QeF^~Pa$6@C1xS5S`LT$WwO@*fhj!0zDM?WE$)iw}E)~)^FlilRm_FnK%72II zz@ib0Iy)mvg^9nGR)2f&_m;A9L}t;v)#DHvW@FS=>oq<_j@)^E*BcpQ`IbRvk%jsD zl+VWfvs}BCZK-{+87)hbJ^OG-kL<;<)2-)=dbQmPe_8d_uX1q`J!4Z_43~)jvIKr29h3+bDz#pv@&7mQ7ro`-&Qbk(BZdd z;&&v8Lc%JDkldhnYxkl8%HK7=p?7E^!*tp&cTYK%<($laS6m}&qx-a!^@K%+)M+>~BZ^SI4BubmCi^--J{ySz$47$gqFEtoV*B(1MJWIE)EB;hlB5u0K`3swe)5;V>V(z@} z>9iX-eQdF{pVHHnPliHi&H&*w1ltTJ{; zeV@)SFd5EbBJITa$~oN-8{+)|USJ}ACItmBW;Ea~A&@8$MENOrnlI`tEy-EXSbhB3o9ruHp?i;VrY#T7b&p2dl*)=ZZSM6?p4lm6XFBMFY)BLCG@{f1gvU) zynR5Tr*bOdL?cbr-BWhtwSXTc4t@2KgCN(Gi^Q>GxRg#Q?kF@!DIe9ls-X-Tyto6O z{zC3Q%@YtquGXuTL;z4};^xgIa2aDM#NTRxhfI?{q?=|h_`zOYhCG`IKxxt*M)>f> zgdi5(mN~R}FyW>PkO%r(>f0D+cH4`K-cQ9gjY`eH!*jK>$kVsNa&B4G{Ji`{ z#t0R{qM1Xx!-Ayf!XTR;auRac|T^^tUm?Q%_pVbR`MjBRyb0jgup;z*g z2(0*~_%79Dcu5p0y`9GBYeSU9&lL(y`hlDM5Vsm;!^?D%i$nL#veNtyUGw(=!*;C* z1wJ=?<6}}LpcPMUmo4Q?s1wQk8kTO|;M?U`-0XR9s`Sb(a(6hAVK7&a;>)1aUA!q+540*z9L)L z!ctQ7Q@w^&@KcL=QU(>lw+<;Mx3>g!j<%Rz%T;2FIQ42mq{3SU0V)xaDyDHsjOal` zW5H)^pNv?*$v?l(k1*ffg-BAH z8TRyl7aA*x7&KMmuP--hw0i-w&}dFb(hS(JNolAm4C?PskCwAr*ss;niX_+A1VVQ+ z-;_Sf(>|jDLUPTyZ;>Dcmx@($W@|{gR*P!g_cYpk0=bU5o{G_%vLW)g_&yWtmc)M^ z+>+!Z{ZeEqaJ|%ffr|U9D|jYaPj(G%&CwqlN&4Bvg(LBq{{Uv0ghqK>(pQ!HtAIP> z!n0?8UMupiU23kT-F6bxgtLs&o5j%(Zt^48aQ4I30~drBM9SXK%rIMHh3P9E_Jjgs zUgnOCbKkUtl;WxIe3j3$difO`55GazHi2zhlat5EfyzBF5xS2ctE>#zc`1EKBVp;c z=eJ)|x;tqC;9T=~lZx&G1iAa+x_Dp0=Nb^KMKLzfc)P%eM+gKDebZ?ze2cUk-;Y1#(oNbI%4r13b=@Xp*IrZyMdGnSiM~n)hDKOD4NmbqUOi1BzWUm^hM* zv}{2HZXg;$(I*Or*p$mn)1~t?~ECIAT5Zb|ll5ubN*j^FyLDjAf?b<=fX}Gm~hkJy&n>+HYfZiCbr7c)_#*2;e7Li_PDZ&%Hn{8 z{V-xxFj4uTL^#m5x>@l`Do&(<06dub(rVa=gIX6w&;;~Ek(jCNi+=-Gdyt#`m{F(4WV=H*9&-ioX z#(x0wQueO{%>trB9NW9^zpiqz5hR>uuN{7d(k`NvrZTfeUq*+K)a(cs7^c9(R+%9* zKWcLQUG&A1=qY~!(9>Wc47C>X{{Q4q^e=dIeM5cpR-<^V`9)e$^`KJujpT0?CoNQ6d+qfT4LFM z07-^+pMG}xuk$2x?I8;T<83tak&APSg9IUw#**=IMeNZ6zM6A+;d=X_V09XH9qW=R ztah{A%a$5IEbE(;T98sPFlc`831Q5kZ{G3CmfVd?3;4pTa|B~9mPO?(Kw zUXPCIe0u|i9?=;(g+eQA=Nn)i*gr!h}TnY!aYml({17 zA|Wn^7B3$UBGx~j`Yro=orwm>XwGDAF!yE8qoBNO9U5m(sS*8>h@AbU_R11%=RaOK zD%Vh(A9zxdCwM2k-ssHxn_ftgcTKw7p=d@yJ?x3|s-Y9EYB1@$hczNPPkqm6TRzrT zJV6ywDW15XJCwp0T}dNbQ(cRCc6iLmR*xC4m#c4_OI|&Vf zP0?gdAh-3|jmMN&QChI-)Z9dXoqCm~oyjheSXl6ljC+d2W_z(0SaA1bk` z;B(f6Hk5YF-KcU7M&DPS;h$d>o0K2q^QeP9xc>zC5xz(wP%BPa1boc?WS=9G1(2$V zet~z*w19k7)pYh%-T`cc5q=tXn2ZN>arJH<9K{$WP;s>V>|E03QUULoAc{eGpLC*s ziXezi<35?4fq^WW#;({p8-#!lzN1ed&@CgCy2(KSk0{$aG86NAh}q2eZEgrBZlb&8O9G_vB0@ zsoI5b$Lf@$`e?|#=?AtVNCO7#sVnau-mhQBaO{9ys;OyhiiBD>!=;Er0^{Ee9lzk< z`m;_Ee54-uXk4(03^_xa>c`msUT%w;eO8=fPaVj~p<{<-aecL1CqPGEZPAtTH(dPZ zJ{i9HYu0o{~Tn2iLYjBFrhM(!M?m95M(^% zDiiF%Y%RvZU^)!iVtJrFemc|Ew`Mo^Ak8EWyqevt-7W0eG&8cB>WRGJu`qY~cmr=m z>xh;9Xus8p5EA#Hm|FdGz#B-%pCZD;rO4eUu=F1Qa3FQv`>ugk6Qsf$?UGgFTgQsL zk2e{_*JXI`#meD%@Z<1IR=%tcBfv`NrSvb!M;Llgh)*|NhH`%A5(a}QtoiEha}=g% zaYs$l{f1&z3y2?iL9=1>C;t1?!JaZXU%52N1ri{@1(rqXk~zEL(EdY@)!4K1`2@(3 zjXn*$b8}0iw$EY_Zp6j}oRE0pz~+d!#SKuAd3*`|9Rq-q4`AmWY2!>X0giFQv*U5JF3gz}=_1qvXC+0j>W4U%jVi?;PBe<7aUyMTVbAY+)_?5D<@~ z-7lfJ=0Tm3hr8q5h*3k8awN8zX0r@EhhmEzzuSoBQZzn3IXU~ei==HdMu&U0oO$AYt5OgU9(Tkt=0Ss$GUdJ7{ ze9;2*iI#GaXbyc%JoS4QxN(&5UB)Txzy+o&^FS5h?_myy) zos}3t`F@S08CBl<9nRK~NskKninK8jAZKS|>FATg5qPvp#E3c`{>%u-+DZ}{<11-4 zgFMw$%jz`xWreXF@75toyVi#eSo|BXv=+CwbMV7@cfj(z07H`GuS*A;7YtAeH80iw|8hj%hO`%{RBz z`?WZgqrr3*BUU!>)BP@t7eRk2y-9!G;=4)EAX$+#H zn8fwRZ5O~6%o17pA%H@XKlYDd+Dx#|?_@c3I{OtY_VnfCooMk8<%NOyFD4r0sYd8C z{aN4m9WW<)7LpdR%3k}Xk{Pfy0*v`?u#jgc$2}LGevQozD8ssF4wZr9#@bTRb3WE{ zANk2$qRnIt&+T_v%p#F8osM0egu7N-!yXYYdP&Ue4hql}!E75z1tAF&#*{l}V7)>D zRQ}?h3y-osW{ik!=RVw{c^@9eYU}fXQbwku?8*F=96RNtNWKv1#FfbzC5TbSUy^J1 zQt0jp)IQOVuB()Z6lv6h^Av1IZTDP1KDS$7?HgI{ur37MqKV8I9+pL}lDN@H^PUtY zd$DN&-gd_#Ef(cZnRzQj{LQM%oOvk;L0MI#{3>FR*Aw<`eR#U-wE9+x z5tYyFz(fB0plZz>H`0HaWBlvbjgBAv^`EVBQoy{cz%u61+tKTO^2-JdGBpunPS&@ML=TA&{qmwMgvX4KlgM#u4?yc%I(9;d)zbWv}wj%Wq%D-y>3*P0VG@(?= z%sZs!C>6#-4kMpLcR6dlKnAZHPnhsZuQBa$2+{2PEL8ZZek>@@wkakVzb_z}atfkd z|8nAx>~dRJNe}E)Y%iLe2tC%F}<`R(PXI$ zy$O;aY^!%pOS_(FpIR#TLk0DKA-2Ps@1neF{SWY1$JMG;KQuIKH96XO5wzKLUAI`$ z+(laUjmbXR>lH7$z3e|xc-d%Ic%quFjbnd?yby*4<}Vn{Q(~Tf5nFImg(|N_m6!Yq zUyMG&)2Dg-!O*;v2a|W8ESVe2FX4Tm3sM=C0GLAf8>Q;&EL1p{Mq znd^TnLui@X%@r0k7?Qh zE>g$qkH#-`*zI@ga3*lwr!3%FWFn{1~{dal+*0|cS7YtN{#cf=qlWha|M^=ot}+OD2e*yEhJ31On)wy}}X z353<@XFbL5P=vPo$80~V5p3QRD?Ymk)rqJjY1+lwQZ6<&kv1r$9)>5*5-@|WRLk1MZys|dE`~vjbwacYs{L?i1*TcDn&OzR#2f4X6?y;IBCAn<`MaX83r!z11^PbZ zN{~5s0ji{lx|M!uz4y`p#$uY$X}oo*(ahKI=8NS&4%nN;~2nn-Fvwd4d$~7wRb` z)COuFuzN!=2Rh}Im6&6p3diGwB6>p^omXR3aD^q8&U0{VKcQW8b=QfL1?I8KiBzzM zLi{o4&j6Hzg(ta4*ON9urh4XK4etUJiPH!(knLKt-MKxIn0_uQ1&WBe(5Pj7^#y!e zL2bDlAM~MwvyqXui116{h9*0fHz8e2xng&FLim50>Yx!ptG6L+*keSK0N$CC9(LbZ z9seyU-aM7uXvVne=05H0SJV128+t!eRyv>53lRiT!|#4_#epDzhsQ35T5Wz_wMDC} zM9tUZtVP6Vi+WCnvkTq_ffj^;lwqz$`oB4sS**QEq4c8g48&Vni*x-<=kqZAxZdSD zna0h{(%XKJc@uU(_ii#8Cn!HEBAOBX>)&l7<@RMWepl~3=s&Rs7+@-8TAljWDJ|9f zd$TRPrM%XxjhOe{Iq9j&At61Yze`XZ1UrOA;VREo=jh8Hi@g`G9BqlTyoz{ow%y&T zn-HbYE(*h=wPDWfxLd_mb&IzrVuxq_404LEGfRR{sBJf&ztnI3VVjCm_M5Xt3}>*w zJ!#LK#afaMqf-kvZBn@~(;NpD(Oo)xpXxAB(WcnRVL^S8;!KB#>n=u~a26gV%s~O& zRe8SVRXT(nZ*AI=(fke8S{Tt`=%q?31^3bHi81OlQmeBG;cv#TAAX5uYdi$mb8!k5 zqbl{bqMse{0kOjlQ0x>V7XP{Sb|@ib*YWlR9G&;%MbG?;AIU#<4qRM_pqYS+u~919 z3`qr(I;7y`B@AsEhdgU~&lUSg%KnSygM~$W=qH7iC$P)V*`3QpKgc3gm`nnjhpk3+ zVg*Gv2d^$gm5?7ja|S|!*DEJftSFJEo-snaGt^ej^9eH|Q^dg%JFc5#-vFd<`Qs zil&jRK9E7_f5UmcQ31&W=TXSs4F=ysRIZLuu>-3e(}ij>0Bc2}uL<=A3UH@<9N<#zg??^(}A zF5C9dgr~_89Lj3>h1N2|OCWuw!2kXz?7gEBy&EEYJS zj*4d<#XEILxJA*uge{am+vzKSXPknZ#ag>8LbvrF`*lJ@@bZP+H1lP&0#sgWIk4m! zwKrd(-2s}0W~%CcksIDfbSA)9u~Y4fD%0_u59mUfDA*U>;GMF9@ri#XP8cO=q6eNo zJy;cwgDR}uM=c#nc5Gd4;~W^Bl`w-9{AC_}zq%y0RXTI4@KV2as17R1mI z^I9$wC|)Kdstj^f#3$OM5U8!LQ~hiVYQR##nm{2etG}Fm&a|RPYQ5CMexYVFZ-fe- z%!BQ3$9+fv3D_Gzt5+?&D3n}+H;@acXi=cDE=NLou2J6PZIZ8*)Zt;vWw!<9ACAvg zDf30T8;f(v^8KDZ&mR+UN$8@t0gYi~4C{3#B>TZ)qN>zrRnSblPi(Rmuj~HtORZrR zRFx}CUu+Ukd)c`RA&q@jKbH{>a;xLnq#L(>Uk+C(2+EWF!pl8#u3@`XA=7Uqy+(io zw>%KL>T2jf;8&&W5DIYkDV7{>BL4$$)Bqd~SYtnxIL9a5TAENGobCj)o4fw%DXe$t zlBc(DzbU(D>!q`((ieS_og-8M)1_cZvZiVd?NNGIjNqPAjac2cc^{{5OsvsUqY+Ka zKEMH<{`WRY=M?dKePYp?x3y>E$Qr;fj!$)>MXH+Twy6R|JK%H28{ElC(Tzk)+Kag) z_j+sRHAecxUx%@ybipkzMdNo!J^3K?3cvdTEifHfjed;dkhU{ef1OG-dEPQ#kNlBW z!5W$-)bRPG1woF$px$p28g3x(lqP+QFXl-i|lHq?b8(j%HD-E-v_q&L-G>uL6Cdwc1n z$nbmO?I!6tk2gObcca~aEID~nJTckwr8I2O2E;LID^`_Z0Nn7Lv58ACO9B3ffZy!K zqWQ!ENsOb%JLPU_$u|Z=01Q(B>%BL5{!Jo%#g(twPpM*@?94GHOHMlL=0KKj$w)JvMC*6B4UgE=H!C(+yoE?ekf7Y8ByjO zK9y<9KR)&0I}j)F-%8J`xYB=&xacT8*@1%=#Kl5niEIGL1fG zmtD41k(7p;F{~bAzIrQ-pTrlZi1Z)~8AsReV^8T^c+^K47@jw`-Tf9zGy1daQ`K!t zhn$)9br8K0BJr(k(f zYw6@?Ud0N_!zY@*+e>dixhP@_<0#HQ7z1Npkr<`u4VFH3CtC+jTRnl(QZ6CDv14Xh zS6zoU`4^WLt)0|DuS+P_b4YMqvtiCLb$;7h*^)oadIhjw!QL}g4F-Sp$zs({G}Cp( zg4eGPzYyjD7SSULwm|z7R%G8J!VQ7jGqg*Le1rfS9yKfQ7XH+y0Ty*jKx4snUpMSS9ydqP| z?ZL|3BFGc9Ic=^|!1^a&lWl?5ZJ@`^1wz+FqiF8ZI`o+gDnx`F1#UQ0{wd2^!_xYB zO9C$w8%Ec@JwqBV+*Qjk7yHi+WBpu7#=`hXEVf^8&&W$ijZgZf@#BYN3QyHgw%+5Z z+|PbpSb5nrtYl#aG@{UXvG^R@-%;+@bGfMW2C{ODagy*%Gcnskr#Sy>ia}7xOgqJI zP-&T3@B`XlkfRtP#NE+>T(W+gsP&IJ4*S1p@EB&|=j^M!HvoZo48&kSYVMeU2U9&5 z*cSJe#8YN~56q^Gj_spE^Ij@hLYB&WVC&O1sKwn}(CKqjDRt6m zp7J}xKFwuTq~wtF!)aMk;vHaGja1#w=p%kd>cOo9+P;%hSyUw2)m2URT0&YnJ~$3q zKkBrH{cmouVF?H5uNPsdiT)hZSPsk87!$v)llBM3B7Su8nq+*{XfVQpI9YvWEU8Dt zrnx%tra%TTtZ+Utr8xhJq-d%9{-V~IgAvd_zI`*%U(oc+JRgf=*3az=~>|zqCFwfC>(Lb87BRqs3|0h*Dbb48hKD4RK(ybZ^u%+$5vN95%F$q46DNf48E&FydD*;XuFDFaDJ?rfHoeu?Al|ng{ zVoQ8J`X{qRhsa6ME{6zsMd*B2sOJ9oYBf`#j^RrwHPkz|>!+&gJ12YcS{KZRBP?#b z85tFIYF=K|DwlEzN{RhP3ruFhYm9dC$6`;OE5aHZQpS#n`J_&Js%xP6Mo)oBbzP$9e&CO^`N{Nuy=~MKw0yQ^J z9fBO*2@St87o@#NT2M6mkKqtD43_oo$m6`-%9n+g=ZHb9NFiFONiNri(BwJdkT1bl zKTz7)>BCZ8+&WgLB=n#FCuh%g%1^_zQJ7t{Xn_RYT^Qcp#VU`AO4DL<1VoWJ3DwV; z-y42yed%^?H(%fX68Val>v_tvE}5!J2Y#!!KDFd;wPS_2G`iaNtv7zT-nze8-$i^O z%$457V=La0p?v-TLj#TVHN2AT*-`QqXGN(Z@HLj^^2&}YUUKqw)pmffX75Qu+$Bp= zaTz!wHm7$`j{aHptvsmLSr@?KyixU=kkDZ{d++kI*(IEUU=N zzW$G?tv~^mg|(GF^Q&I3qM^@I`!u^Mm`ZV`e9ga2&r04?I4GA^{#YbIfg)71xqvAG zR3H;tV!zHhT1j>ltztaB@x=B7`CvkQf3yzWExsUPAGEDGM)U2^YzS4Ocm|gC3Bj#A zFYp%zf_bOO=KlWUAo5Ej5h?VP6KH;N&AQ3}58tzNBpHtR5um)w(LQcb7IaS!_jlCY z7dIG+z43r9tlD`*m5N>`JYl0c6xUe>MQeC_izdKyxYID)r9v@?OdMl$! zo#1gFMP}gvZ||?3$*XQJZvX_5&#R#aQtS%6%g-+hFX?=q4r$z*;&pq_-C4xAQC%B7 zA0aGjNPG}8w#TPEb0C|^`&#c7YaO` zDPiaBi(URG#@z;$7i@{gH@HNZ*o)(QVTj}l?Lc-OcS_tOUk;vPGw1JS5D}&;AM~xl z*-gHrV<1m#gY&@tVxKhkyG*$qOFxxNi^v6{r3O`yr$5+-ua7avb$kdgFQ~g*rkf?x ztCxOnbm)sv)FWU~Uv9UcO{pw^8{9j|A`f-wS{4VA) zFiqz~Ka5-WIyv%HRwDe2kn(TE~)aoLcHH2Mm(eOIw0F7?Shb zLHz;z;_@}4kmiQBl+O4UM;h>N6030ur*-hsn(}n#IW9nR+|s6WY!neMc{BYaV(&*6 z`n%Y30h!{;vT#b;`TGK?B=j4DkcNEBc85r5oGHFcF*%a?&Iz1r8*Rfn<#7Ig?1ORn zos9ggjR;iekA0@K_l{vR6A^F2WDkvvDz`iGr~E>`r#*K1yW1|v)R*yrSEscZ6G#$) zY3+*6ofyzppS}jCbZ~ZW$_L;BOyfX&*KoH350y~d;MVZMOY#-BNt@Q1>#xMSiGn%N zr(ZSGI zcUk;%<%5Zu2(ol^9tv$}`kLR_UsbL3;YYrT;p@{;hO1a7%+s_cyk}bMH)ExOjU!Dl zf1d@UibX}Ha-t6YdOU_eXXIAHX@Ai8uU2TO79M+I69XLK)|P%p8{f<{xZIyl9v0L{ z|4=it7&e`MR3lKfTxZ*d=gc|$IWttSaXH84wPD-(r2yDec|AQC3O80X zZz@ny2PF62J5%EomXSO2XO>-UKJ1uP1@6QyG)wGm8*mJfeHk2dU)}#)6zeYUR`K8y z4v{O_rcY4s>~-vdn@O;!era!xt>4u>*u*LBPr>btUaB(WTYYpO8IJrQbTXbdQq0Lw z9DW{kR{4B72_ac51VH8{Bev{<9 z2@d{{tqWshekPG^N&e(j0<@e4o8G&)*$(hFjG2~Dfb zuo`#Wss|NVu2#6j7};lpB`oQ3cu1V+t|wP-a58-8H-Dzvck(xb|10vFsj+n5J5{M- z9Ji$(1#xbq=C)v(wk|3x;7)qqeVr~FYZ!C&QB_gv`Lqy)5*p@>HizEpzpY)TW1L5L z-!yHs#We~Y)pyTjR3M-8J^oVf5hoL+RK!}WNdT&@=u+yLYtCsExq1Dmrd6JCUE@^1I_Xlc89 zRtIK9wpqsCosIzu?(mwaTXNSfkB5AS&%kwv*LoFL6}^#sEo?57Xo_h05G11WMo0%G z7K#-5GSm+^yBf)J(?7pYnS2X{oH-R^QDy zv_{bdHhu?FmIYm!NxYTFDLZ02iEyyDFJbdp&*6VYFw$iBDdDFQsfWPmZ=J2M^YfhC zVMRaa@Nz9=?6+{Kx9LfO+jLpJQ@tWRjbG(*^aUDNplb*S!K9OI|Wz%Qa<4sAues)SP zE@0ScUe=~P}zgJ*1*Atsy9+K(^0 zalgfcU<&81ngCp+udTNYQjIvfrdGJBIL zBYa|{4kn3f`$R`}a~OYyrBimCm)T4Yr7njakYGm8s?{NVh#HV?-a-YS4!o4nK zORHejPnWK^L05s%Yn^~5hfQVtc~ag4pcd#uoh|zvWnp_mxv)89lK)KJ?`90{qTSR< z<=ML&`LOFPv;Hf=+8@pM&MStP)OYBH>r*d~5=`r!>hGg`DMN7qMAeR3~;QhQ}7 zLNsB4xjy?`Ggj%dKW3Xp8O+lkUe-z8ne8};90w%r*-YwM4GGOg6+VBg*JADSa11zo zm0IVi2sD>6zcw6EZYJC(zI+so&4yAl?Ve8!N20e~}GOjUfFp{3S z4#_y9=@6Y;4t7gd53G=d0x`Yyr-GT!_zn}0i&%=7L`nzy093K= zu^G+P9sF$v-lcCtEr+p9ouD;1+|vj_K0xoJ(*CK}-f?qyifC2*L&0?C3>~WaN4rBw0Vh zMIAZ@%peSyD@X;U_SkRd&idP9Y5#OBnH)NtrNY6y+epgQW1ts#1!~6(O_wwTdZWjM zq@dKQCCCEqgb9ER{9CYurY(LBnca8?LPs3bYRG&XT=YHU7L;bZw*YY^Je1n8CPUH_ zklALjyIkOn&fyK>vh^4Q!02GTc^bK$8Br$k0)m_c2d!y#kY%`Rj)8U??2t9hgyS>) zfVgdY417QM^AJc(rQFAEjYKr13?BnOZx!rR5N|-(>@(-+|0#fy)?*75*mkMkXz)Iy zG>Zad6#J`)&M96E?kA1p{=e0LRqUT8Y#)~Tj6D!dX(s+o9fbzuy9I7wf0wNu@MHJg zXhg_?1jrPRQx6@~j!cgM&p;1w?-reb5O);Uyk6VVA$selZ;bwOmwF zdKlF726iU%Xy|b6zxFk39+X1>&5e0>Hu@-;=B}D`2x*-T6$6EYr~2uK*l$=`F7vNh zlG!nkyZ6sY0pJ$)C3ZFZHZl;ov_P1EH5)Ac#ZHNgJ?RJk4XH76gkF@YoV$hGK7I(6^E$+dCyIX>VECiPYHn711!QFzpdmwm#;7)J|5D2-v zU)}oyZcokB)SNoq&q()FcR%Of(!UJ=k(#oqG5`n!0-(B0f36~pZs@3eHJtf zwEqMP0|Olc8w&>q8w(p72agaB2bTaB8ylY(pMa2vh?ocmkA#$ji1hiK=szXE|7N11 zVLeYI!o|jYzWVzP ztmjb(@v}|T=SPKwg91eVAFlryMQ6q!5s=gMCM9DLv`(q}jwvLp=M#p=@9iT$r(gxi zZ(i8^TL$2vKI40_$E(SfRVN z=+AI8>`nRC&g#1Jix%6*DThQ1KquPPmMv$|9&E=(zadmcG{fFub^Zf{(AyQ3*b71x zKA(QG^m`uzFBu>C41|GGslTYEsCqrt7P>4hndPr9;!RB@nvc{u=zaDA|B$_{_ADvM zCEbODWl98AluEAc-KECmuPP@puE78wdyez3duR;x43tKVqrqG#Hp$1nGuzkGo767c zR8kOly)qA{l!AHK{vxGI7@G{FBhvCTOS-BB<#ugrNvod5nfbv*oeMAW?c8kj@*lH)#v89!i%C=fIRn9Q&C=rti@)PqrAHTw|c(>>*!E3q$iV+P|U3 zRSNAd<-l(kjy|@f1V7!q&Sd;k9(hpeewrHB0Lp2kp1yLl!UB;0NKy}}Rpb=* z7@4rAOhMj$%P|g2oI-XIpo>9BqFGpq`Ha`paz)ua4xnWZ zG_FsUn;>vaGM7;zB`ICIGM!L5gB3yQ^Rqjg!Ey<8RimRdYXC{-(z}yjuV848(2&jt zFTVGL3(HpcnE@20*mJU`(A_5QqHt!1=EPA3m^IBnRWkDw8G=^YqwxNI>wGK@A-Ev9 zua^+#KGB?{4)n}L7g79fP$Nwt@z!ou>>p@i_@Mej-v5! za0072(3&4fcU(ACNQ|5bLqCb?h)gHGcT>>BP7pjQw6CQ&{*_x2vX`6FVKa@J!kQbLkLAav)zAJH`=ak+Hs3LYj~0kL;%RNZ?yu5`jxbf0Kq!zQ z8&F^Ckse5QonYoR%y zx9t^RD|^`fG%6VZN$fbs6xL)VaB3H8GiwXQ+>a_d%*|B=2~x-re~)aetNU~KK~D1} z5o>|lK$x$Mbhn#ihh!7TE@YgtMfG*W+Sb$OT!5TiVP+POh6cNpa&S2I#U*M#o~DX< zeWPE@x8fY3R8dhS3YkTU;tly$2Dc+~5+EA>J3@;NuL0jFbjj~5lu1m+t3?@Z{xp8m z8+dUh`OYXEdzS(vTub4fx{qxWafB1u3^)vqSyDW*lA@yb!>dTtg$%$PctOdtuj z$@YS^c(DiV!Ty~_X+x{iB~c0uGIj$`3A(oDaBfLa+Z?Z1$$tc5~K; zC5=3Ls0W-qDIpub8en-TZ4Bv<`tif3{*tDrlv3ccOkyqiVSXkxT3L41k#a%siZ*KZ zFpci~98D~B4wU&DA16RlBdXRWG^n zCVr2JVpLUgY zY4n^DEp{uGvJI}Ym%rF!9pbu=6jc3MRWX?RLvT*-k;wHTp01G+Q(R`fU?DnPe{kB%wUcWJ~5Ju&;EOsUI z)$|N;P$4%T>obZNVdSN_{m{3Wx>P~cko2{7%_ zzVG^9^$i3na?|>WnE=H5q0!prJYodpXd21c@pon0Hg8u6J`>^rya!2Ab`hj>FkA$J z|CQ+S!^*e@zj!q5)(6XDBV;E-aMZVslNcgeX|IWMk(Klh?{3GGYhaQ6rL)mWwp)B~nn>$1< z)~R&V2TH3)u@O12wv-I$1^4lyLS&}n)a@7Ol1R#&=~@eL`=vgEbFwBaV6&<5kNcl( zu6j8m?l!$VF*K~i5eCJ86@pm3Okx4Wn0B}A7C+j1D^!H1|8-G9Zl+M5TE-Ryz^i5n_GHHh4;p%YXKR^c_ z6m@9X_S+5$qfeXffls_|t6;EE;OD&_>6_mn$hN zPR1s2aX}n%(P!6MSLY}Ty@;abFb&|cqf6x*_ZjMYuzWukWcAsK+^Mqqdt%L(m2ZE8 zJi}^mVeG1|0E<({&Z*w_A;1*9t1+JJmlVyymO_LWQtsPb6nY0gp6_GMZ*iQ~_dFGv zYwCce3`Fd0U!#R6b(|+h?#Ui}(GNCbq5^`5ll3#Gx(Y_NUd06x)w7DXP@=ltZEkDR z?Ck1|;zjG#)V-M6{Ij8_APjoxh%@T7a@g8~LscA+aM6~v_7u<*NFOitBRW=GaNeK? z2b$m8_yNC@fVly>x?@HU7QDXb!9AKWM*ij5j0OMQFutZgBCVp;LCHCo}#f zyJ`ijP0rc_E&rzdW1ALhp5eKMxgBMfbecBzvk~;M33FWEK?hsXxn>GrLdid#jRN5* z#;5tA&YOTy;l4lD%>xCI(+{5+dlxF60LG| z>GvqXB_Sbq-)nC#D=13>Po~jB-yggfziwprKLAIM^mLpF)0o_B`i6riZh#d=pj%x# z7sw5ZFx5DOFigA(Y;7ZNEjS9v?r3*X&b(X4+&rE@x*g$+69v9&TuMZh z2^;@>C>aOv`c$MQ>{8b+-MPabbk#+c_>!V?CCiEeGyT8CO?<;PGneRUA@nYQ4B>i| z%eVmxlq@;f#jG{~1{3Ya+l%R54{asD|Jrg`y5bE} zp>TB8$isC_b@N+ZNAiJ-@=NSeu5RbE@#Q+7OgAvBhj9rm*D zxP`{DvTC>20~`A=SH@VqGxkqwD&jSZh&rdKXe>zH8TVi>Fa;bV0~ig9p(hyC&qJ zPBbQXWO^dyA zhJ-sCEkrNQhe&-hiH_vzxB!o0haOpHi9tddwkoAs1|*ct8K|qX?r)^L&~iBteB8h4 zzO=Vu4QnUmwlp*}C|7Vy82x0~;Fw;S`s*bj;W78Pnh#3KpuZM9c+VoGOhBUjHCg&8 z_oZ^R-fvXbkWMf3mLi;icwG@SR;aH(hH zu&pH#4YY{-d0@Gq=R)dR3Re7?&bn(6QTbb_jyKhY&xt3rxz@7xm(%gZ^-%=P*w zkUwI<;JyI_PpQ}ezupCo#2O{FJv+5ecc@SSZc@}!OX2J=v#~xHp=oWUG$)h3QgaML zRb^4q7LFLBA+S>#?x?X&dkT!~TKry_(u6ekb=M~h zW~}5|m@J=TwD|qz-BxB|SZR}qtgW@p;+(wx^ zJubd`gbUZQX>_K&DzIetgS(G8zBDE+u9=VlWS%%(F(qc4bK%gf(64KMk=kEZipQ@A zeLLV6E${F^;QDP3WfUrq7Qx9!R41Ft250 z%A&Rr-94y5VLYNQ>OSd-slZRE7G3(mfvYV{HZ zILUQI+~4%MzZ?gDog2BnQXn1X?yRq0Uc8(c-BV5nn!22_fiU34+ufqSJCj;VubcPX zT!p7QTs3*W^hk)YX_p{{%I8Z|a_J&UY1|g??qKgVD6-;aUkx<)Rxh{eR+w&^(Kq*? zRhLBN%2w!;U8GoDAn@KUKN-=ilU3z#9=t|WMK9+%eOFIt^I!OhMS0U7X^ffS13kq$ zr7O8N57^=^F0KhzDg}(qU^)mm$Xu+tyDJw=4FmWCB2C#0<_C8)lE9<+Bt)`Jog%Zd z0JZejk*`sR5Kt!I`m>Wso(d!yX&S?w6=z6Td=dM`-*PEMo2!@TL@vI_si76;FHe{* zw7$GgtOaHo+ms?-A9a)(gy+mw^|IOq(+lJxc(8bQ3K|O* zria*R*U}Y$>%6YlXzD+Jlh)kkWotW4rLh9IQzlDz=2_q`t%F(U-(xjr%3d`T^K1{=T`H zvWAx;y?Q0z9)fH}IQHWlMJSvKIv1o=8TT7|uZQO7ky{WQC4Kc_j*1zqd#9^$5Ho2o z-bTO^>mnc?=3|e*DBY|x9D3iL4lKK@yI*~*#ZV;niW?wqH;~=Gpz*q}?5(Fw(!sC$ zY{5vp$-^STkqAITYhY$5n6nLzoiX^XPI964B1+Ya3yQww-?eQFzD<0L0bX%ltJ zS4yxuwyje)?7P|aAYvuSXtPcGQuM-}wCD~|r#=Au@roKQudDKxXy z2TjUoqjOyF%G9Eic$!867`w6X*wdo5z3jRZpKD^y3VNDCdh-`tIK8Pc+hr-RgzK?| zmiv|Qeo5o<7NJaOGp}W2!$2d=fWa>t$!N6o&Je&~Ft(}2`qK9{V5nWAl5x_#749Pe zU3}zkkE%dd(^TK%u?!FbihXV;6Oc=Zellk}RYXYpM(~1{>6|5dgesolOAqZ+fVT=^ zfom0;;wC|Gtd4kh^5V7|a8{0u6)ci1j^0Ws@ky)c@{Uo1&7vPksg?5XNw(PlP8bfWWBLKA8G?iMr7Lw9;fHudcNR zi2hRnA1Fn@h2OxGQt4i8mSk>f2aQ+u?Hj+-Hb)zA!+Ue;dGVa%8qCe=%#_#-2N9v~= zo76PJ^Q<$EyhUslXXAUt_wr!LiQ9 zC>uu@8^p&jmOyrAe2v+>w8&Hrd{5SCLSPriwg6sDrjEzp@}lTf6S@R=!h+|GnBme2 znm6}zal_-1-6nHw#R@AeN6Se4V*~wDz}I?e=S>_lh-+Hx9NABme#^)X0UZqm3A6n9 zD5Sf4)PZ9W&UK@cqOT{@JfbIlrRVh%Yy+LmnKk5uIzcl13h2*tR?SUM3~aJG$IQK6~TG#D7zN02G_98aGuPCyC7 zKW4LZyP3*1dlI@0P1bdtXZuerfV9-v*>RE|>d@#G*n#JqwR3Uor%sdx5BD0gv&+GW zBz=Uf&NrnV0$%^tKi9p8TxtVBE))Y2ozVBM#ew3E9>Kz3A=7>#03Ls5nj= z#^c~f=w!N7YzwQaS$=mlO-aeynEuQ^7`)waTLYGT!!>YJA#HA?#^vmY)I~np5q5XU zeC-6Ox+*4EE`x@Zy4@0N9X27nF6XHY0aR&Xu}~XkET`y6`_c1rtVF?r>_dl+Q3`-vj1w5O`|wYEhHoD-2xL4dKYchS={1FIWog!l}TQ0iFuD! z6;09pj@N1~uR*LqZylmCjo}v1w+WXc*P>#J3?btp6 z)cIo?i*Xc>TYqr4bmP!x!tHpCx=J{X&o#4L>$K~;$hN!f(Z~+(-CJ`PmAAG}>EAbx z*yD(WTNQmdTydLuVn|X-lJB#D2X<9O&dc@{0#L2}j(R`l=>E`jdd{SnVad7K>OgEA z{5gDl*d6Zu3eJM=-&|+Z)Bw~LxFSUz#>y`b8Qb01FUcwfaD#MK-SM_?1y|eSl43xS zh?EkW{g9d;zjdiv^z+eMxH@>fDpvBa&RFLSQ{h6Z>C-iV;@MGoyFH*P=XF$8V8 z9F@A5l*>7d-aZNe%Vq%rg3C^kvrh|nhS^j)708AhhN)H|2%?D-IZkaTyEInagQ{{S=i!%+bAV6s#> zRtcvQR*rfYpfl!b)|~P|&7s8jShtv+IpmhD_l0ao^l`!w*M>xmE(cn{<>aXclx62s zDx~&TwlbvyE(*JjvJIbaZPUJ{@Ua`PRef-#alv^Hq4zzeD6GASW{DTs)!c2)W28cF zKKNR~mIQ9N)-wiMRSscvhtx(BY!MmG`|QO~O%8Ob4x}JMR}|pKFAq`2=T6mzcy-_^EyAT6)pM(n?aFZ*PieCMY9- zQFvJ0?)pwjz=eSS>*{cjq~n7J)NseAkJR?FHi~(uvETf6E*zSpkso@BY}#F)Xg+^Z z*m&*&feKUDjRF>|g&mIBV(*Ag=`lxaM=#(Mw&Nzk+%!<1E(P1t7MbIu*YT>_99b{- zOIIzq1dV_21iL&(P;*XUuYO6xm!(Bxdd7t_BYrJXSnPMGL#o;=90+nef}HIvtt{v8 zULmzHVg=Cw))n=&-zzlrI{LEX7rrqb+2~WKX=w2`gB6|-2d2)s4lN9Ekp!)@R)oSm#~#E zu`S(Wk3T#c?PX3=y@AF%s&(l?jm!;P@}(hFkt?A&XyIrDIj+^%3-tp@mW> z8zUAxH*Ck5IWAMmW1ZM1IrC0ckC@nu(NZSGkK1F-GOze^kGq$si^F$}cvajPl3LLaB+8iCd z=1S+L(NcFh9||;TRu%gPIKjjk9&`oaTpJLHe1)6HrA6mU;@3HTYSb6RwlAwtUr)pw>w-hD`Qo2E``% zb?bu0;?k^o5XsHVr`b^pMR8B!q6cMpEToP`O6|;#$!vHAsI!f@36*Th2v>5E2@cHn z4n8Y-^gD*}2Sj63Y#JtMvl-~6v7Rw|*l7{8F=|BvqoW!GXr6eYHG&yM7Bzt3*(4>LWA$hbm{Qf43+Xn}ln|`f#tJFP&s&!{CWXi15ALj{>A3Uo!`req4hBSwkK%+*^hJSKe8Q?O-- z7x*0}M?*;a&ONgHOA@zuLex^SF#Gx<_2YEciK1o71N6oOY}a16A^1$IZal10Js4L1!S!aL$LxMvE>K4|-6Eo1(IQRuhh~FuN*N7f z?0FBho?u4iZ>GD+kF7A@|{aa0*ws>LU+i25{QW}wz>SK(6tZdOBf94rea zOz(y6yj4~TcR-Z{DAd&gavG%rgu^nz*IoqwFeI>%(podlgW2l%$?*K|^nDJ8D^Uz-9A({P~|zj&igM zq77b^n1=@D+vGn<`z`%+@>H)X8@V%B*=VRu_ePX{soX^kPtXA>$OPrviJ8?p_eRO*+RKCpQL-uLy^s1&yXzi`Ox28ErAL)0!mk$k zLnJ9MVTJK;-L<~#tA{q-Sqqr%wXyriOZvo<2D^42hKpxX3Nj0_WLY%JziH0BL$NDk zA<*U>r=EuK@&ubdP8~-%vnHqQPE@};&tlY$YMBiDD7j~goor!j5@m^c(N82aPOcFW z?MiUW-q9{{eLk}daIU2LUZofrdq&B!uW24|7#`&`pws_`dYr)eh_K!LNqjP{%`nO| zgFZV8rRc=lzI$xj6GCop9ycY%QGCklUetVzda!{{Q!b7#fS~s2zayjfEE@}LyWcpU z*K4D@i3V4Li_l@mN1i47?*u;axM{`WqrpHQ1RUz>e()Xo*5nFXsgxIto5_>CYye$6 zAn*ar>fs_c3{*e~F6nV|Zav;>5=?#HTNp#r#nL~3hN&d9B)qz~ARS(Yc=@&(np}0# zd3JB*|5aaOi^k=Pd2(~@3yG#r$EA-KPG+$Zp{V#d<@SVH_m?I;PGcXMgXO9R2qC~M zqgR}+n@^J&g$c%RPxU-DV?QPY-);!oKsil$s9$oWn*j!v;LoZuY0yn?g%#0!UPlV` zXYrQrBAD2|6EUVb>LkNrWsA=R?e<16sC*YK@u@0m!e2n;WnpeLy^6t30^G>lEM@HD- zG_XR+3Y`+1+|rg6R<`I4=ps$+RP$x!DE)21SBmL-T)iX@&*SoSOjN7mVXkeZlxbn4 zqejwIjYrwDEE1PdG?m=+kl6Uc4fa&HT05JW9^b5Hi>O0Bat#|9ALH4A1Ji#1X0=~F zrK7X7w>Q3Q`35HCIhAxT22lvH2N$Tg8#`Nh)|ds}Z=gK0TO1Hp2fYlx-iXa!H~xLc zFJ6yjJScLNY-F|)d=PSB@-cfzi32b`)04!llaqPnZ1ZWnvz;!DO7`^0R$bAtexNFb;DsM^9Qnkdg{ET`W}u??{8>=zWFWhs4cqL;%6-O6FO)aBoh#!f5#}8 zRTy?UeV@d=GFwbw6v=Ys>|%vv0eCU&;D@o+lnye4C&gg+N1sUwG2HxFUiLo@<3aPA zjq+ml;;YUNt%33AEA%@?mBS;v>?$dD3d7Pelf%d{QD57=5E%^s&TG(PKOPK;Z?7VDT+_F_ zdE{*6keEM87wP%-jDtb;X=CGjnt+?vCCHhqCr@Hb*zDB!Ky2;q$CqDf8pl!=8vNlH zd20`$tk(C>NqU)2FB3vJHKjoKlWy_Cb2L!HcBXet+b$q(%W9!n@%9WkPMQwjYv3gw zvNP2p9|~|C8DWB_l_$qQo(E*m-87!aK|47@XrEhXTfV*%Lm?SV+81fRPQp2yQ0rt)=S3P zw*CW90f>+Il9tK>RL*Z{i$L$n@-`aGgg`|8V$4^SABw zi$SyK_ApGM;w^4V6%HRTgMw^*(P}m5&Avx% zyZ_~n_9u$n2CB3fRctT({`mEgp&Xt`ZM7Mqa+2JU`e+nQif-;Ej8cp;h-1FIIN7R- zo)Bi1$gX%W`4^UsQ3oaApUandR8u^-=h-4*J5$Wl1jAP6sO` zm1lTr7+c9GL(${Q!tBUrLO3B=88<4#w$;5G=N|yMZq$RHQB=cvLhgM%RXU33NbEP%CQ(PO$GixIUhQjrl^)CeW$keRkN~ABgy!HYxF9nGru1x|;4>n`4h* z7}gV&tI-aq${2n9ZW=!M&kYw6TK^HryA?cQ6Oyo7Y=&roRNO%4ceQQ{#rx`7iB!~~ zv(zMjVXFbl`=@l2WNqQ^5ob+?b!=i%_}{2aT}l-Aeh`ymJ&X*6uO}pXXZon)YBbV~ zhT#~!J{!26)zB0e&*I*3y3867iENQgqKzke{1{4_ntFJNI2|6Ija4O< zV7pOmDvjZmf#c%Okf4QfydNNAfNcTC^gVGb0wKoDZw#6W=}mR@!=44*y>ElL|-`}WF*HOQkde9+3Jvf#Wkj}0hN674cG{4Qii36-rFI8uq{NJ1@{j%l{3Uh+ZVP_ZV{P;452sZp zwyimI!xW+nN@1{52+nB~{aaUNfmN!nk9Rag^h|(OePRFN+t1m(f?;a!0t00l$+xM) zc{rk1tNSUWNMR|()kSpGR`-P#Z@GA)Fu6;qL}Q|`*H{kvQt-dFfFN9gdo9u*TD0yw!yp&4 z-r}Sjw(67UgE1`7p%3n!pC;(o=n%0cPV>%O3exs+W1idsbFQH={G8gyOOq2Qhix4EQjgg zn!qr~C^6+F1CFnHD00v+xNh)LPsCcg^rSYABm>O=Y4e%R?FrR8=CtN!f3}sqNd~^o z>qTI$C*UjfyqNnicV|{Dv{R6YRMt5fY1@A!h_=0;=%EX&Qml7u=YJtUljYZw$+Am_ z7kNGAnSUu`wpm)}goMho72d_%bRWlmU(l4T6uh$1_n{d;c_VuGc#aZh9dgij8h$&u zc~2+RpWsjQ58wzTMkwQxrq#hUt3t!V>JA9>r#EqKB3-ikRCsMOc&fryt!mZmFwZp9 zM0GbGfA>&kI=0I*J&cUDHpbE#-_crPPol=AVS&L|t_LcK7bPH99x`%vF^>bdTN4xQ z55jSUJxn;K-XT7NFNH&La1X=b%fEpefNAau&}sHkzSzj?#YNpH_A(VH1>F@<-U#;M zJC(6@Fb^ToNGydcg-{OP!(p{>z}w%pBMwfBnT0Yu-}K!4ag;OTmuOvKLKUuPv->;w z3Ad(wL%$c+s3MUZKOG8_-v+n(V-|HoUeXc1!k3N2^RAZv(4vrZ5Ly*+U`lmFj*yT@ zA?B((=n3bQOgsnXnpTdY)!kNQ13Z0(E#dfhDVj6|`xAL(VvRMf?^$6N_T>+HhGV+- z&z(;;3yi`nMhz5aU(gh8{qXN7Ztt~*lYanc4?57&>9Ct70d&Pd)4T@9-84ei51xm`R3a>D>G>%nfG}2G;W0nZ-vBH8 z6eU(YHZ?69euJ3%?8(pi4DF{0d^-YJ==gS*le+Zk19Msu9rR9w?kcbsJsZQ5Z^#k)*Dt>%d+ z$iE42b^3D0coiT1$2z0nD@WZ#{V<7nGFa{i^J*xXG_N=}B_KyAdB#*;d+Xpm1P6zc zi#fJT9=d_X5u<8D+s0G(;K zJA{viElB-^q?ofrN%nIri}GBYMNix_eG6lK`P{i!@ya6&zFFAz!~+I#mUHZm5qZ#i z2vf-9mR$8(6J2I~)Co5!pwJ6Sbb}CVZ^J7JGsEn(cA+l>>?V=Fm&7cO_&F3c8J829 z3{Ladmv0yfG=SeoK7ahIlBPTi-^|P-2siT z(QnpR64u}W^KNdgzlO&i7DD=P+8GWO_BB8^ynOzIS^l6z=`Qhzt&kCIN#1MvUlX4B zGYdD*1P+&dwOH!yLI;!?d%!BYj!Nm@Dwr@wghVeWG!QsP_a5iV-cNpYt@qUIodY-+i~s9Ut_4TB0Twz0h-xo`}N zLUy@%#0yy!_!3H==Lxp2z5;i=8B)}eJ}9)sMjBU>S;!mbD3neWz@j~o^4Sc9$n1EE z1_GLuRuY~k=_T^92t5**S?oSeQtIaj$bmZFG1?b+0bixco4e1oqI#MGKlUooqp=%v z938PNN@;=)%D>!#qA{7JS@vmD=~NohB6l_n{kolWvr9;krWvMvCpLNOo;>GU`bN6? zJ`b9`yBn4LY$Dbl{-)N}Hrcf>eAy=3l0t5pOU%?m_28VgCKi)w>YB_4vg%zGosS*o zs8XL#wB4O5n71Gl;YP-limOc7M?#k3aAnXnrjGy25~ll+c(?Q6OjmcsWbrL?$t$KiwoK}(gXOPi2%T01!@b-x^qdMPkA2z#UT*|$-WQ)3*`sTt zSN+PIThQZ86RCYy&4M2Xl?Wf8$4a(qR`7<&$dqoHJHjc|xUkop^>H%LjZJ=_QY6i|P)< zCsok2z-NTE<#&F*%T5~}B0RDN?^=D5u~F4u?+@ghf1)ngm@QbW9Lg=^c!*8iFT?1& z82bQ15afq{7VV+B3E?fz6fK7b6i3lII9&_mW+Xr4@PE+P(B)zcPU!#Sj?EezWHXQ9 zobfqpB8rC|CUqvmkmEg9w*%_eDbWAvUG&)Dg>I~lmq_Cvm$~gM3I2apg|S_;Gi*d;&&0ndwv+<&r8+hRMQi?J-cxV-MM#mEnY6G`2GRzse;!Eyy0t=8%MOtqNuK~|V+Mb?II z9Lx{h8>d&~;s#Cdv~wjLxL>^OEUF1p$Q`Gll@M(>^sHGSE048L%HT$K`$3Pf)_n-0 z+Al2Vw(s_~aMSD%cx5;F_s)F4^1^I$);b|MKdy>vtp{Pzi0LNUF{#I{Be*(5a^DG& ze&>Zs0@4rr%WbLZ4iRu&KV{EPg({0~4HHdF}tSc%-d9kLjn zAENpal?LK|9np7@Y)r4Wt;(}7wCqngpb8qqQ+Jw?1u99B7M*RwM8#dfMBKBT3!>k! zA5AN01L%1YT>|V%_EbhcA`N#EfAPE@0MZ_LK$4J<7KEDUYkhl6^P0w!R;szAgn5Ne zpzlLIg+-ufNud=^v_u_RMx7ulyaSiA{2Ufuy%$LzW$qakUElY~%HTPU%SrL5{MeyP zl|VN*!dhEnXjyuuE*Z{M$>(b5^9fGwpAlG1AA6V55}NzFfmh2j+X_{(7^-4qn zMK#o`>w5m@vl1GBNhb4F3o&$RPja(c9}v8@T%6uh-Yq>nVPrMLCjiO3rqvXap*Z z%qUe}H^yGpoU_i%-2A;ZM4+E#e-4mZz#@^Bra2GASk9?&bL-U8FS(0(-P`d1Ee&Rwj=vlHFzh;94F`gtlx^`qCE(^ zs!ou|twVZ4_jXKwiY*qMck;T`XHh5uL~a3Y{JRkSzZrrk^|7k>?@xb$u`ellutgMq zUCe8;hir~mKXJRq^|Ct~H}*;r1^J;KSJPF%Dg8BhktTA(#_*V_mD=6km};FbVs?~LJSw@bCgy}}=wn~8c(_3|I!tA4_%$yesopOtS~ zR>qdpIH~a>D!8Fh3#)gbcW-iYXfgoFd!G8UK2+e&p7 zrCd4jL*=VihBnx}CCbo&1%xbqMAO`Rh+p#GT#lJ4dDoC5?Ad)7eO``TAEwjd<2`dIRm))t34&j~#l2n(_5T*`00jponm_!S%dW8{>`^(Zr02*((~$@qWDN z`V3;t_VqU&{1}ZElKR6;NCO?c^DiHR6M_E#mJnv7ON9^_1$}qw?4QU1^;AyL}6Lh8Q1M!if=aP@- z1-LQaX9k=AnyQlP08ibyXuh3EJb8mzJw(@10B_#O1Qr`kGjc=yS%ynt+mleajy0NW-F zyK!hEQ6NR-)#NMo03vXLI-2Mzz5jWuIN8@J&TtH3ZYq9ctw4jr^y-h*Z1$i zQkpa~u`c!rZ9c}zx;1Z@&{z8P&l$u~Bq7-%<#Of=vcd*X9)XC8>cN=(!8-ekFwCVrD z)me7M)dkzS2_D>`ad&rjclSVWcL+{njk|jY?(WvOC%6Zf5ZpcQ?Q!mxd+s0DW30Wa z_E=Rl=Tm&8;4bkmqHY#DwqG%X1OfP3KOjX^{(k#gvaZ%0zj+$m`W8FqHeHX=aWcs0 zxyP`p8Q6G*^7>kWvtdws=aUeuQ zqP|ttBo`k;@=OEMp<6e z9sEoakLOqTCn!ov)cK|zh39sV(yQN_XewEf{ue&@TeW_ogT2&)c}GhT))KMTrV4fwVrUIpwY$Ati*&wyD*#Su0@tgo@KhQx_RvaeQKN2>RGL_HjYk7&- z^G6Bjjq5RVjMl*S8#2cvxm#;v?YU0ar$~CX?lrV?p@S1OqKg<|TB&psuFSLxPUcBn z7B71mP{?cN_FU+RtOQm=g>ejyMt*=EOKXqW64DkLLE9|g0-d2G;Us$BJq_a)e0j8 zVhH^xQ%G$6mYzH>t4eEvvJR099%c1cS!Hxn6C9`7B9i*YyHjcyEj21Qw}v12X5M=AqMSheutL7V-kw88P3QZ zZiFq_UxuC?Qz4iS4&XmdG$G+d?QSK&)PyIpY+WUPQ-3Gyl22T#h{E--I-;GOz|fHX zd7IKqV)^bFasEwPC_E2h+5I>;snyyyTvtPtm1MqyLs3bId?K+wdpau~Qf5l6XI_3L zbVTtVU}*Sh$dzWQcOw8dIbn2jRrLadHs0B%j@yvwzu~XTRhB=l5=-M4;`g7G`yo(_kulX|={9^e6l_Ir?&Krt!aJg;^fM(haQ*W8#+rsIrs%x+Dn7#g8sKJ< zCGgOY3r(|a2hNf+mQk>B>zDB6i@7ruAs%)*nEr~W?JR6Nm4Tul^w?AQZ#T!?{U$(Y zOLsR#h3SS9pUAnbE_BEzW{DAwm>d0tq>A5nKHi0^U)Uck%wZ<8Q zI#{Zmv;K*Jwhdu*kP53^$LLA^*wJg}AX2x8!PeW^pR{)zOvQlih24{`#}9CX4mU^u z%Zgl#VCor`py?wD3z@8#($uf8;lI%a)Du4w(P#2ZT2iQle(Uc;7%#_n(DjWI?~GRi zpOilX?k}|2EZFIkP1v$dA5If!3Q#p^nyN{EcTjjik4XU|8M%JDag=m)`(g}dh#OY| zb~GASCd(2FCWHz7Jk6LXxKpozP-3}cfk$+QgTV_rD;K^{Bt-aPQv3P2j7Hl!~6uJl@Oe zMbCQ;e0QtUxjav^j%VB7xEdG^1oURysjdH;27*{UJuvElv8{yjT$sCPnQvF`jRG0g zF(y=5O9*`l?_$yzTJzF^v_}(Xt{7;o5VD}O-W`7wfz4sW<6ccwoFCnjgo!h*mH}a` zO>q7WZVZ_s>xXNkkh#}q^HX3>m*cbz8+>$g+kICDR8C6h!PO$RzX03!nlIEaY&1KW zX#qc5Mq&1Iwd<<|PRP z@s)(^BEHAh8iqTUSE!1s2ryJRYBm>#M(&}J2f=LNYwvVC<&nbQ#G&oxiStA+rQ+>0 zTpdI)IxaB8i8LcxPUmFX#kjkgvWKt$*yyDgPVWShnc2M3O~ytNSbT(x&DdE8-o)A+ z2zQT4rV%%+(*NOILZubuQTXMq7X~?n%?d~v(l!KYnvUC$VEnJeeJv>F8H(D7Xh!x( z$sgrE7=g$keG*M^g^4^6BHH3e;L>Q0g2=R;+TIB?MsSU zACo#xs`|#@{E9nMH{aevxi>cGChT*K$3t5k)Y>HCo?nGp4M?35JH2E>=sEYg`{%n^ zvuh$JM5vb*?G}9kZnnL0&lMR}lHaD!b4;x(fAdA9Ca@Kl&!2Gg6{;EFO?R&r#;N=D z)OmM;8~plIGG4Ix@o*)jE_X(=D3kx^K%|Qar;jvo>_GAcvb-LNhWeFyODYtfc3)BB)EQbE(hF~@CxOMKuSz&Lth z9nTM!LSP}#MrozN=5_4VcJcNV;oKH4RRX&rEJ6K0z>3}d!`qg~e$vjDdy&hXs|J6Z zM3JOV1RZ=L(?Kl{FZ&s749&TASY*5tx`|J)XDar~K4ys=U?N9f=Fi>}9S1y)Wl+EM zC?v|tw^#$}x@uf1`sXbl^dI1O?|t0r|OY4I~Y-pRhJMjxkF!6NnG?}TvNstRiP!2CZjfGFDNE&wah~G$zK)xbZFhO-_oTk-3F+_qbJyn8EJ=k@jLqi79kX?GJ24@l&f{SWZwb{8ui6fpOH zpQTq>^Koz0nBt$1IDQY&{eeFf30a(a{;f8BBZzriy$6TQ^KYYsCh0#wTt8i0GjHH( zVtYr#SSgx*f|kkIe*pf6;hzl9RFVF9@ei6eVo3Ix8fm|%Hnnk`o!>9O%)vuVBSuVB z*c{Y?M-q^@`;I?uO52k+hy~k1mlzorSMJqPflpZ&vg`E}k>=HT;U5JDGo+n;luRQo zVo!aQX9rRFn$RSP`k#E1BiUjCRvMLnRfwsM9sI#49I$}}G-SCimXtAzi~j*S$hw4C z%|wmmY0Z$70=B0||Ez^O^BjL=x*T(7IQ)ud3uncmAil{1+WZfo)ARLz1$BxlcO^coPL5eaHY^EU8lgiN7 z9dEP5ZKdy7we`?ZC^}Ti#4>mS$t*C?HLRd{CzGqF5A&&*FqN&NazS|P_;o+rY9b!l zT&HUNHF{&80c?*pAYIO>#b`LFLv=8$3`MZKF7)Uq;M+BWg*uE2c*smS&mSF1KeCs^ zQ7W@cy0(pjCE~tTtD&qHau~4@Ytxb{stgebVdl2~2Wa}^9?NK$K4hhK6=I6k>kT=q z@csGCyjJ50sjZd2QvkM9VZXxrBnoD9iWJEWBPzBiky51+zyRbxL4cJG!Km*n<5pyU zqc9gtv4TsOXwtmH??+MB)S*r!^BDWOg=M+tx@7)|r4eh& zFJi;HaFpiW)pk>%KFM=CLqu-}k*cBH)c8h5v8iIcFt0POtE`S4j>D+;%R;|A;v&dx zLt+BlO_wMKw3V*x?Bv@i2eg@Cl=w$UBC%Hqnz7kImOpsog z8TggqNNe17FDL?2G6rsVwtQ^};X?cv{HwDp={fV;T448scp%8f*-zUeBY^4lEI#oZ zLdwjH*pgDKvEAh1UlzfHb7kcUC!Doo3!p^R=9E=4>bntjbyXG*tq2Lvot%I#k7uiz zArBB_Zko(@`fXHinrGNWG_B|jtwnULjnUsc{kwU)c(^oU*hE|$e-XY`s4tpu+3#A% zkfkhTLp+`qGy~RNu>t|W`{Im$^CHdif=|J>tN2Yoj7@xhbPQe*8$Rl+N^4b^wX}xi zCV!}Sd|pJI2g0105biZ+2l?y0IZf8m)EDy@GWwUk@}k1k;Z0CdR-qV}6+UZfX7bdr zz8k9I{)yj@K9@#gX!K_`>_`U+&SISibe>yG(m8a@1XatY-4C7#1kt#UGid;*by}VV z=ZH5TC=V|O4;csBV*;uP&#VTD(0Z zJVDI?thuF$(Ay>I2j~mA!Crar)D0Q)U}u7uZpYRntEr5k|4}Mc=^uYg#68YJl3VmZ zS9y9l(p|(Nvy{=5+Gxm6Ap0j4(U6cr(T>(j((_I5je}xe+T+wf(zVj7YiqBFF*sN0 z9|5I@*rX2rwGsr5%1$vMbR9kWCS^&g&5nYRT)a~=A#+=P$r;2m&X&P_r}BGoW|mBo zXdvNfEoqaeO~fI$ypSix4;=-YFj)Wnp8&0@<-CF|J@e51up1=I#W|knuivGiBo}uJ zH;S=#RR^>x(A7v)5G>AdwfO!m`agjBDnV6!s3nTrXHug>^zs)m!k^4pjQ08(iYT7t zNKK{nV8Mp`jg=+;-A~hUO^^%}I!kI3_LInAHg|W&1QCu=Ew%I%c@_#em-z{CfLt zx~RW5Zk-*4wOVF9X4tdr!m&l*!#-@@j)iRa^_}?8^1bXmHpHAp&>m32d_c zlu0E(WlfJu_=ea}&yv!rp6Xb=Gxvh(;}eT+hvaD_vD!qQHI7?SSyEZAet~qDjt{7G zOx-jIVOIgn>T-X_i?OJp;F3dzkq@Ol3S?L1adPGE&RhG&Uc9r<@!&*d$V6%Xg3o7_ zyuBO0K^}D)VF?!d8o69wviN|A&3{y3g#H516`i!L3Cx=rd$sFSs-OwCov^`Kr zMfJYOf=4Onr^g}R*Ir2MyU+C3at8lLP7AgHZ?wRtrW~qVrEe@i&dkUDJgl$@`;rBQ z?NtIFV0hQ!@MqzPDKcK#pslkmL=B`-3&yF>V^XV#j347s4-f)wrbImtMCCsmv!V`l zKfG96;P0XTSb3E3k2Pxmbmr_Rp7l#Tk#=5Frra1H@3XP-&ll!-pKB_4t=C~BV#yA@ zzd(H)CKa4-xI%%%-*+!&U?UCG;ZSrr=K2)wh*L3M1*!TgYpTv@!b}Y>yA%}Y28$Oc z2KJrR=~eO+;!&BZYH^%3YF??p`?j#hgCPd`NhMfFg3Fh z|7V6DHbezaATV`_iza7YB8_8v9O8Qc!jR`*CoQS8=TfMTp^A&c%BrX?5aE>OmK>^7 zwh4KPL2I28fD7{7oy1W{R&u1)gX@fa@4=KHCq{e43e_tOF)xv4%_+$^_}Vs8nj9o& zRtN}beBc~#5MZi-`w!q+v3esOF{3> zyl5IQQ{{}7Wx?h_hf5&^D(pzH_Y*s`Tca^C(yZkf@V;8|m1-l0%g|^h+ zA-DvIe|M|sRfWuxC-8J(5RKy;Ls5V7;Onle{qc5W{OpFXgQ4T6xwEBN>mpc@S(d&?w8OLB;r?iQy?hY`e&Z?GPk;AedhI6~h4a=7DwL%{*4r&KZf*!NblKgxoHO_b1m@RU zI?~&pT_hHqiTeBr4B``u_(vWT?iOOvH&D5s^4Pd4JIYY)>xvpyIu4g|oI=kD-}EK} z7C^y==ZZEsG+#}6FmovLazhmc*}RD)ERUj+sGX z1rA@`d+?*^5pjJ7C#j>C9JDlUHrQ#o5S0`&L*~GvN|`*%HBODqgEhid1GGNq zUJ3Vh4?)vw(+8f?l}vSyz7pTDSl6wxcXT+-CibW;gf-WpRxF=1J*>SvZH8RKtE!(r z_O$gN-un)V~vgtGXd>D`G)z`(^f{Sbfwt3Aw=juXfa}R;+$bRXtt8T5nS(b|gr! ziNK0Vn*1&=ZqaQ@X|n41f`G1@hz4wT7(dFTQ2vXUaao}Y;Wa!ld~o@p`tgCRwTLUL zBt321Wsuzq?R{QDR^JlF1ICF>JW>g_ zmKIAZTR1y?&@BIo35y-*oMIs^ifEcz<3W#0ef^>`F5ZF{qhQGq2*-Pz7~Ob>Twys?7C9xv*iHu1Pss zO$OQ5E2j1bCFV$59|nyItkN=@ve#VlBHJ3yx<*Dr{|Llyl`7?oOfqzA7uo@g#}8ii zj5?xngvN;>N&v`|frEFGsQ4m)ixA~Y0gHEEU4;n`jUJmOH*<>lMjlMX35cMgQLB!~ z0=@;kW-q2H`Ods*s&)_*@K!6IbapqK3P~lohm=sjM9!9y<(X579Vn4-ou!9%brb>` ztEwX-YGC|a-3*sa<0lLfY4UWw-i(H8#pd8O*#-zY|KfeZ;O`A!ASlRI!GP{4+f4Tie7={=?ug=Od2C6Qpgg$?N6W9$1AR4=d*Choop}1zHhBz!_2+f-wfx_l-Im9 zCWBHsbqVO5d)fE)Km>cuBXo*P>CsbRSRu$=Y^*Q4XQEsjKas70=dQcXr%o~o*!aJY zJgHBRnl^^N4~(sS&lI#hQ6~x**FEN5x|`63e>No=N{O1m(Fc zj@`iUq)bfGX_ggfUKZRBr{8Kv8wq5b&nL$ZJuaFRJhp$DUt|H}iB4j$34WViIf0g* zRGa1iEoE(}kXCD&fCJND%p<|I@){b$?*6&dnM$dRatf%3-D(lf#2_IH`~oIq#7-F& zVeo;#lcJdMUi+Rd7Ub$tkcMxrnl$hm@Lwm)tNh;lyi&57gP$`=O1GE=(xU4v?>|Dh z7O>U;n;{mFTEPmOaaoi0Aa!VkC$qMW)FG~va}cZNuH^j=skSUXdUE%w%`ehe)?R)x zaTb`Qk!Q=rJDbvwNB2w24@7C_5BC%6OyPKs<+GDOISAwV|rFr>hkw<(L(3)HX`Ldia$|LrJ|MyM^{rTC!|8 zt*Fie${)^gL`)Qxs@ibc5zew#?uT*!J?{#zAvUsLN|;nf^eDbOyeSMk!qeU0MlKJ_ z?Eurb!dNY0)Jbg651tMt9=(32aqUwOTkA3&Yylx|WsnQf zM1M!$b&asXv0{GP?4-btUUQDr*DZ5|1V_fmiS)?3tbbs@stDt#J*F^pw*Seol`r<`7+NKdRyD7Jj=1tN*$B?8bDqr zhsckmD9tKEB?XXpf{vK=9=LI`&r73-v4w)yG`EBRx)ObNSX~fwPi`sLehm@zB=xhC z81N7C9PY5pvr1?GJ%*H@7P_vx{%27K8dJ)x5ys|17%YG-&C>H5!c;QbIJ?}P`hSGP z3xBy$$W0ag`SM^P(%f486xxyd*B$8oBV~v&n*n`nQ$aOp-1QX1X>N>6V_%dD@HqJ1 zm&htVR824UMpAa0m*Sp|A)Yb<1f~5VR? zvY&vEw#3SLj>le5!x@Qq0MGwAae3P4G>R3>6=8#WU{6qm(-U0U!ZFrs@8Zf^ep0J(|2mPUU{FY%6&J(?)q zLSJ22Px`LQWJuOAml@I(w=erkOyyHHRa~wwF@W9Qm${A_$&E;P?;%9*QgJCfV@BaT zr4cwAt8*rFzoFbjf_>a8b;;oo3}VdYP)l#knN79sgH@kcYE^kv`Jr4sJCYuS2rR-0 zv`K=20O)IMOfBRK6B!qtvT$b2FF=gy=?7F;9)8^TmEgR+E&(bL;Zzwm;&Btt_VM4UOUl4n zm~F~ThZFKNHHjaiuU_rBdH>(V7Xyz+Yf}8tSt3&?Hk65(QVO)wIlYet1sRUXv^HyF zk&NxN;x8qsV9{FHEohR&QMQLayzEV7LyF%_S*qxyV4*ydknG^+>_%&=&&Z~7;IlX$ z%U&eiht<89<4`Q*p46xR02nGVekMPNtA1HJUjuUd0m!8OBp#_X&OaXsU6{-l0f*g{ z{l>&i*BvWsx(^m{jfJ7auVU}K(lhfQdX~P5% zYMj`F-F`U(`vfb{$z|$TW(VYM{vvOFB#-bZX}V^|KkpzmpsSAnMl)gGC?LbTw9kPI zj;Zz!?L8C1!OqNEJB*q?{#KT6D2g_&e6#mH9RMFUjNu-+moBwe7;5ETr#aUr@HMCT z+VeKwti9S~mb_6Xxcw8BOOD+zREX7+7zwK-^6A$q4R1dTLl?!c>-+ALmE_}569yx# z&T7p)$TL6k`+Ot}mzz&RzdPul3y#?MTJ|w%+%`8im{%Uv8#m3CQ(IxHOYVDKfn?L- z_S|2X@z%ZVxlRn6R3?9`D#NnCP#RrhUu;)r@3pk@O*8why;**Z2IdQ`<{84;Wq{c};Nhb;MUyzN3GU9z9W-hEbu7D>pLbi+6c~q6Dthw6#=e z(=0LZSw?W`0crehzQyOt^2BmvrhVVC!onX4hvp+>53K<_4iEuq4us8vU876N(cJDj z-vo{7nq$HvXrF_0x)OydwU%eDiXzlaji*mL%7uqfb|p16(>?ytvR=P+tCQ|>TBp0g z+nLeYJ3jSG%TJHeyG0Bu^xjoWVA&N(!O_IW4j3DW1f;gH|5rI4WMV8CXi%_9fml(< zDKF2$I4%F1!1?{AOyWi@N6 z@C<~ksa)j*!DK^PJakVJFyBLHUpWEZ9_G?kMp#FLTcEwjR~MF;_BJIkh^&}E9^lN; z{$V=HZeV4u3#jn_QGYXK<0j0(5kjOPh7C%RP6);<`+mWMjlac38F2q*%E!%c5R;Wv zu;yG|WaJoa@4tBS59>=(gZ52`L1y`z@WcbJ)?wiHwWPp8kh7C9cv3~6R?0G$h=e+{ z6K=D<+dNL0D7qvK^Xhk<=lA|R2cL0WjZsBBp*fvV!Som(nDj87{ttIfbB^zs8uKcn zS}0*FEHDEqobp2(`d^IfvJzlAv4c$B8xv}NR3&?s9WmOojXS%B7#~Q3ZwM3)Q(Hua zq%+0-Z8*4yNv}@qBG^4Ef^JirK0i(9r|mk4bz7urE9Sh++tp+%2(Lqg4V<7!j*Txx zg}nQFM-iH_!_3J}3LS2~=$YayER zWc|jw_4#rpjl(*gQxIeE?r(GVYX?eUi9iKxcQfLTQcph{D%MWKkZBx|#@btHrY7mk zn+9K=Li)<4$44-g>#7mIq|gG9bY>t)GJsQM3Y zmYrs#(hx$C%386q=FHf9{Y;)aN=LN9vNAPv7VwTvDd2@Vh6+%{Suy-Wx zN1eF9(PLopY3yH#6M#sI8%O8Z0;ZF%A9Csvg@W|gve2weai$`}&ne}bN7;QN8MWv8 zHG;PjFEetbqF_qOmSZXlKfy;Lhpq0nCze3YE({!YHAvI1?WxLAlmr0v$l6!NFJWgI z_tp|Ugv-$^PiJ!&YS{qXchN#>P2p0FFY9Yp2U%m=ga;!Im}w*t5Zgh;>~eERnr1Vh3qJy~CXrn5bh z`h$~^B2(pZ>=6BOV^3ikj-BX9UrWe5Mq`1VO*6o&{htBM>S3ahBv%#P0uEhPW9R+r zX`H$woy1r3r-5(o4`-1~2!;|~b`yN7LE>g6984-d`1MkYpG8WqS`ka(>9ksQV}a&57ZRxj%2@L%^2++IFE;t zQS1P0Bxg}6J2XwTX}#t*!XuJ*N4`{=Y^6m|X=&nNt9iFqMK(ku=w#M6VbB;H{v zrZ3FD)9=Xy9s_wP-RRtoXh?=6@G0U<-a|}aPJAP9kf`$^H_F7#3VBIfBC-$te1AIQ zrNS7qsHzyCDwBfm@&PB*ZmG(?^|v9?_&SPZp5PiZaN#4q+Rxn=iSFbXBHo=*v0xFT9|w?;)T zdLlcm|Kc&(X?V4q>5=BN+u=(8v~k&Ex%OxW!vEGxX0U}$czrGHNA;Px`Cp$_iTngc3@!Z$Y^*n5iZE-yMYs%yFoP0$V71- z@4DI!iIPYih3_Dd!c?DF@3qF#tgy2F41E3ITxb=9ATNVw2rn(upA(V498nvGxkRLQ zFT{^n`{*oIr8bk+FU4d%q2X>6Pk7xB3GN;Wy#J&E+dF}*;S+>5PzWq310CC&ZJqU_ zNt1Z4p_WVVJ1bKET^%nu@30m~vVTt5^Gd%SrDErjOKGlVTEmvB(1P}a-r3xF`}&N# zn~J5_w9m5AzP~Uu7|fi15$DK<$89?0iHrA1l#|@H ziL_amKQC$#`lT!~AJYysGJm5$d4rTBuz7yzoWEkZ%qZI;pCckjL~&6^gBnu5)=dR&+)0-dtQIuX4NOSJsw+WK)Sw7yq+!WIK;rvsZLa}?{KTTxR`xdiCtoguBqVDsWg0UTlmMUpD4XbXf zf6d=BZ!g5=9{uKdy8N^1dXbu0pthQH!KmY)^Fi0c7u0j$on*1~{zHvgUe}n$O+m41 z+Rf>4t7;w8wCQO1%O@*-)y==#=pU)Jw!F65-??AYbe|yGKP7}rf+?^0$M3T?5Sv46 z@@>OPZpa4R49QZB6e+z*6Y12{WMdRH%1U%~CBSFq-|N7IIIY3c!qgju@S5vu&lo5r zsSfsx_72qFRhA;5b}Rc({awpAywr{-TC0RiWK z@$q|kgHYu1@9>^-_%SMWadAad!z(saoTdpW;H|V2Xo!>5 ztc6TmLyFpH@-|hyH^~=Wc;AuV$oG5kzmVdEki=HT#PfQn;zk-SOPR_zaFsBc_g-Kc?47?Y; zKR!5gc?4ZF6<(19P8<2;RoF8r{miYHUETb>+QUvdene4NO3uM)zH(`R#s^8067J|w zMMn*V{QLVFdbzL7roqjgF;@P}E6lA_6#)Bt6_-Orb^{sh3c3eCQ3kGt-q%aW979rF zB~^w+6M43npK1-R$z~SoZFzg=EdJTI%@~%PgW)MM-D?ylPG^*L{jO?d96*+;CTr5F z7OGdQ;?th>TYUzVRtF|`zHLPU>3MfKUzX`?p$fAK746Cq+N(*{iyoY}Zr}EWopZ}0 z?9st;8Zg&2qkNn)=gj)lmC;P+EY~lvbUs|v_?WOPRdl{gQJ?@F2UYC+`aEnZ;!Q#8 zR6b%*jPimit-Ml{bRByiKr91ix-2|rpeAO zYAGi^<~sowa4eG{p2HV>-*Zn{_x4=UD9r;9UTt_5zQTNozF-fuquBZJ&@W_LToZbN znlg+@c_};yIa?Vw56NVvP_v2@Y;H|-YL>VD!`rlozqiCBH9f79LYSpt+{@}LWV8l5 z&|*)EjSy?)NEaWlAh?EslJ{zcDvz<2K5{3Aw3?m@n&DYhz3z<@<(4%{`GIbiXc5Ip zJz`b?B}+5OE;wkFqxbIO0#yHr75W0MaF&KCs&rut$GKG|30u6LiwZGo!GM|jW%pjY zzG=*(e3UigX@n}QmeK@+CKD5YYMQgi7?-0}Y$i7iO-zx&Z}lG6W9U!--#-W2S7Vl) zl3W)KBoZv(V`rx-tgdqU6XOR>Rtve8@tFljwc_Jb0vSunc-eAdg)?WL>YzmoZ8=7! zPF(hk!YpCOS$jVsz*=76LLcqZL@`7|=N#?Q)3_laF&u+4Di@8-Ctk_vram<-AQeL$ zgMzbTSb$WNm05>Zz#+C%LHP>TW|d#mXnf9W9F?%r&d88X;l;f!i?8nj=EHf;9QAl# zvyB6sy~M(5ZPEnm>6o`;DjrBu!>SrrZXwMe<>cxa!rJfaf>7eS$`~duvlZv+CLv}y zK8&QcRicg38(HE;AP)L8Z0h9iZ~h5+Ln)K=$X4t&kMtY$Vjxh+Ys#*?qIILK!C7Kw z>@Bb<+qz0|xip@*?1APtYxVt^Gw<-;5KgJT35dD?Tx1OI84_-R;~tV(zeOQtmfq_6&ib)T2eBJ76(iiiun zj7n|uU-Y4Ep)Usj^W~Ug`P%YO&tmPuVa_whm**D}s})7nkxa#dh@nc4+Q@q3?HT8W zmFM5}KiXJlqD$Mh1sdvF5$`t10^+XkFj%WjO=NyA(!SLWyNPljNOogqEs;gx4U*CI z$I7RSdT_kc1N3oj#__DTEI8o8Ed$KpQA-^c$6-c8mrQFBHq2oONQ0bXYd(oMA0$q7 z!+PF~aB6u2$vQugREeIil|6{M2dUYba!O}^Q&)i@Jolk@f?+yxHD~kcrxC{SSQ#+U z#)oqZ>g9k%oyX%huXT1rV?B+I)NQUV$z<&m!g{|3u>VG>$AH_4301}Y}g>KW0h2%ODPFoU+Zz}FM{b+UnsST0=QO( zFga)++rZhfL$wh=8>=7U}K`XeJl_SB;Hj2 zh0ABHQHp}%;09~LXTB_H$eP2I;Gfv}s1vAt?xm$O3U`;Cl|NDA6ysjyl1bql0u}-G z>tmzT&}RD{th!w?lXBG_`PExe%Ke2>c~~4Bo*|W*GSSww(nP}5Xjh)M9f_DHrBXNA{!rynbql{K<$^0y?&op1@fNr^Q zx(0Su)@)52VSo2nfB711Y$r*vv76F%Q#4YUzv^F~AcQ_FWn zj1~8>uQ*%o#ZN%DQ!nHnSs5>qe3bUPb1f9ah;18LsyeSWXrHzG@kG;)cjmPvR#x2S zTCZZU?r6WNNYrnAag0-z*B)ldG)irxab!7^4;an1(acj${`?%~x=2_!E3CeuhB3xm z)C^X`k=i@TB}T3#s>9}RlQ!6x5nky;)DBq}COB^aKjyVKj`bKCj+|8OuOAok<)rh~ zV(ZO%%s9`WHFeG#5lpLAA zxz+JB-B-p`ewM@rYd$7jC2FHxj-7-m=%&V!d>TA#1u8}5TV2@3&%aoQ(UO z%k!%L01*vPkTy6k_L1-gnK=1Nbw!MBn>1m@vNJ_Gt`?2OM2=>nmd=q$?^QJKrl}AU z!MwLfxgJNM)|hA3V#v%+YaOe0R9bUv1>wE2RWC0uQQdRxj`sl2^$On7l8K(~-d)U(f+Vro;tcpwD;1ywTtUV7T3^AS!QuO0{orRk!UKMzbCj0+$%o z8JG6vYr7kaz0aFeJ?@G0_X!nYhG3Z%)_s3$*c9Vc9Com`V|xB@inY$LW-;=ERrp-} zL?&@0+XITU7@G;TZNszdMK#Z-|jt#5SQWX>&8jSep6T8pogJNVC0y-=%T) zlr+lQeN8LW=X}FfbxR068J12bXYw{Xn)*8a1P3qH{&B@S0-l(%iHu)zka;p#d81=U zC&^F714cax<5?@lVI7og5P1x}`+7O}sAoq!l!z>V#31j{V?+izytanDavp&M63(!% zQ=hL~I=>WJTer1XQS>{@Hh$7(p_GitGhMnLHun?=pFkl!95C~F#xtT2_v&(e*^9mf zPD~57NMRJb=zX{$RM@f!@Y~fZ%nsv}L$MlOJEAu^@FEtv`hxY6okC`i1LGERGL#Jz zue}j~i}{PyhsEhj0TTVRn?aRxT075DXGtY#wO2`yuh@FlY= zr?|od(zT&$o#z@;Qhyi7rIY>{SWkmwBpi$Y43CLJn5>9pdVXPls`As}=wB5|jDlKV z5?@6Wb%@}K4~YQO*8J@&z}JJ%=+q3giA@z*1$DoTHt&Z$^hz_zEIRevx~?2Im5acD z`KrCY4%>zFZ)zH(GlF``FUHbfhJD}TH|Tx?FVnKbk)2`+U;9W0ChW8<#livuIz<{( zWlXv6&*bD$g-mUeLU`G~yj8iLLd{h>P8~78fFR!t?b>v&2a?f>j4iM(O4{|eIyZy? z5v_*!8zUvV;CXqfag>PoFbP;=%0<&(_1u)%^7syZNPb?xjk84){EC>xX(fw7ZL7w3 zQt!5yc0ivzHp_niKVg$9r{-h@cyHL^rv9wcc%j_SU^}UW3`I2=_trvuauR;*G9Ax) z&50OGd$f;P*7ruEHeXPMt!wiiDt5;)p|~rT!fXJRj>dvSYn9SsPq(L)IZOLH^2^r4 z*b#3aRrabX914k5xvWP6KZpkEfqyY0!foxHEP#uobmh_dj4`d+6lFRhlhRjaVdx~%hKM36t%U0S zguB_zE5J*S;t*dQO=z(3kKk+%1fx3o94{EAWMH)M{I#1YcFI4p(CJp@o0?4-M#^T$ zDu(ug)&he`xgvqhJH@N_FfPZquJ+V%srl1%KgiewFoVfz3Oq`uWzzA8sQ2mYYCN^9 z_=RnAG$_>cRV?_c^_A_?pK(Zdz$RWvZP?6REldnEnapKc$m#0Xnnf__G;D@Qh#z@W zD70m(lgpS!CB2mlLt%Ck-Z?Nnj5a^2PZEt!0tGdJt@seNzpvg7C5uweB;X0ls2+G> z+q=Ktx0Ca7D5{bq_{O28VxwKv_v@_!fMUdkx)^UFQr|g`?lg|!3M43;J2Jz@IYg~StJi70u>Xtesgz#Fd58QN5%41F_+yaTEVA8wy` zalN&YlZGNPuQ=PLQC!^{$DFnZGwA>xr&pxze+Z&=I$@RJnDa)FHr1h_mXW-6gx{Ai zAOZvOWi=;|U+6ufSfS4!d!p4gCdo zu~wLhi@3Iyu(pKOuYt48IE5V@%~8qEWBWvB@02Fe-k z@XP{&L8neIvA)h``w~JH;w{y)ak5uzd@3`Nw8=YFqd+MPuS(J1>>w_A)M`GGracdL zjN!lOr@$g+OT#=)RO5-1H;c0KPkeNja8RWC~9MQyD@5ngKk`El^A$=T;_^?r=S5mtIiNRW>xY2u)UF&F;xA|>65h7SbT ztk$k5HlMp&m(x(8`SLZCosjF*P z4ylQcf8`lg5TQLe8M%r2*En8Ey$49RPxdCT%Sup60BCG%QH;>hgtiA_3oUbtMInbt zP(qe~VF^AR%B!0#QWZtVGw!MK2i+15+n~B&NQPvhNG$d_$gOJVF1NTKJVAl@w!>8; zZOlf(skiZd`WkAU#n)@^*TA)!zhsi?(_y$t9DeY>yVo#I!mk69J`0-}n-o9?^_4qM zWAQ@>F_<929SPaE2f+PWKFqOzMl2aCsqhj44wF(qGc1LF_iWmj1v_1J#!xtNw=jNUV<)HH8ZF9wovsL7%N!eOuD%Dvxqk+*8`m!r z2kAOfGtx^kf0aN;*l0HEK4OU2 z5%sLLtO10)Rv*XnFI3PpQAShweJ>0Dk5*KP45n#E&&Z2Bs~!-ajx-)~oAJDa;|s+#Nf=mb=%s0PKK9L>PRqFv&I zpzKvvlGV_pdo{yN31{{du870-Bg|ij%(&L5ZD@#btof!dgmK?z-`1dHWEq#?CzcW`cQOh?GTL-9I!y*wp z5c&DJbe}FAR&@&7W2S=26NX5l$0RIh)6Q!kcdvi`O?2!_!v%s7@UN<46^*`6g^Pd8>!cK?;k(k ze_@Y%_kCUG`FtJ+44P^y!6y+FMBuU=q^N4X)W58)3A=YuH^Je6GYw(JL4o*57?BXn z5isoDz(rzp2mG~S6p|t=(dxf5EtKxCb6J@&Yq08k>r|b%`9i99`bS-xq*5bS{pSh! ztieQ~)$FwOkSXx2hvuXY`Fqc{HoNnlh<+m@9F-{4QD%~C`#9iTzj$XMr%N9WSNI#G zccS%N;W4JNv5A@@EJcpP61PL-hvG zh4^rT)$h=oDr*5fG$4bT3NoN@p-zQm>-_I&{AmEPVnb zH7W2tX!s9M;jz9wzD#86DE7tfi>;)fsh+$RDIY=;eLaAR2`8x;&Qa}a6)ljV*NXpz zkNx43{Xi72t%)l{M|0%3C$-SZET515Y_X0iO|kwBV3cd=bV4ryS7|D!a8}Ae-Zw4 zpD!NuZ?{iqE2GZ8nvdRhkP8Q!ReuBH=fvY*{xIRvVz31SRcF_LFSJsYBn^)d%Wmgq ziwT9lE?(d>YOqNku0QSGm&%N#(-~r}I<{{K4aQqCrb=}CKXXl=+bo;ghEDdidElNd zd^&eUee#Q(RK|MUm=CLE9LKL9`4^2%!^CB#*h5*WXEE`vNcleiMd%;t!cUJ_C#*L2 zyiSsWqn!xK%5UH=UzCk+A(l*vTGap8R|Kk}aZ1xpfKolXpRga2{{w)Io@7V#AAIu^ zUcUY{855Xd{{2;5#zfJjdvIvlR^Ud6M$%xGL-I_mb||FQ=Vbg=sz}He)Ir);wPm9G z1qm|Mo)l*O*knM7z2Zod3Tkd1quk}-iBbr9B!6^Odc=BFm|z?h@H&&(A4KfZq17bz zPr$JdYvG_c^JUSXb%faseJINtb5zj@;AX`o?Uv$kJiT;>di>OWG<v`ukT`Y1X)e4DVz*j=|h^M(XLbZ+x?%r}hY|CHC*HYTD=Zw$2}w>CQG-T6l)w=s?v1}^;`M4%Pd}7$Vp%T3_xt6tVBy^ zE#iHR=kGZ<92;mzbV9zV>}53jWMx65a)OHO(D><70ZdjPxCiZmj)#OrKDqNRh{fp- zLL1rx(7tl!^+AL5mzK4a`EqM_4PHaal7x&l@@Q0W8ORiH7jtU#H&wW6eyw%T$F5>p zZ`MR=m1|qT4QF%PwmGw~Pw;rEvSRFT@KpG)nzLdH z`fTFvPWuOrUSXhZ6xglLjs64iM-@PR z#|wtq$X2@{I~TQ+IzKkAXoA$Y*z`Y^`gS>5`<|EP+qIuuE&Ou~ z_${aEbZ~w>$oIZkgWL0q(yY5j*Af`++tK{T+~!2tuJ+!#H(mL)?rclhL^KxvKo4Ug z_>zQ6zAp}5=yjJh9un$bsFgv-1pzkyuGi=#d#|mXxIa4$@AiT=-otZ;X*KYC_}Ylq}cQk4V6dhpwu$FP1Kq+e(-`8(CpW7OvV%}y1B zo*E|$)AjRGhU!Mg&Y=VaAd%lV_3m`GhkWu*Sm-2v=q~YzCI8gq;UjYPl(7FRt3;pudPZQo?C`dherumQ+==?A%sp zj#^391>YD?aPfuiUwqyX0`>gHXHU*}`S zIWDd;H(8V>OWm7P|FFUCh%iCKyH;|WwU2_&;l9nm9)vxlwD?WVX~g0)A<*QIqh}j6AzzBz|3! ze0EAGs|T5~oUtlw$CI!#J1Xal7+E-b146j~x>=kM#=}qJdp8^Ie|`P~_-mDa)E=bw zJ`IzV>6~;RASQ|w`6}4knqJgRUi_hfe%gAzLLMUY8Yg|&J=WZ@XUw9Jl(~-K8u3l2|j@% zOq3wNoZiOGaFz$Cu}{~CoAoLyxl7PwV;m)j=CRUTx(TBzi`mYNA#QJ_pq&!rSgz-a zvUE2h&xM>1uWX=s@dn{hXWb09n%s@vV(uMHqc|_wq8H|Fd5Me_+wLpxg>w=77~7sZ z0r!pSA-16s6N8)C&OXC`g9u+B1K#TZ-F3554#JMfwL|y^;^BwGJIp&M!?&GgU`yVq zj~Kv(;b@Iw?kujyB_ksAraQ@N0kk%Mu$MqOx0r=I%cu74{#v;&z|4B-6!`$Qu;utv z-qBAg&jGdRl%ap2=10%3L*T2nE~dl+IK?Imgmav6H2VE9bQM4ImMF|xN4*A#L&S90 z&WMHR1FWz@xqe5uT?FO>*vfKMJ+C6$a!m?k+J=JP7XL)m;f!Ze2+9lXL*~n*MlyD$ zvk5{x{Lk=EAs!)Lu3%VYn?){}Ba$KpS&}rD$>D?T8G80=GQKJ>@SYEi3}q7!(3DWg zoG^FuKR~;hF=_99DNu1~==>fZC6SrBG7Sfv;fDdTI{OBN!MW+kU4B4Qw%)^+1A7qZ?e6qtLlEI;rXusUM3ieb#r1hI^*|>1uI0v>6kGvlJawga%$L;p2K}s1n z+14+jBKYUMOGT+V<}?3Er@r*myo~>B3Zb-VXWK%t%5J!=KPBo5l&U~ zN7C4mRmOr-N~7;1-)W7{l21j`ER1scPqdsTeEL%45grXvXSQ1kw?C)=ZX}?uJ*e^n z$&LRuh%6U@+tpjTPE@=6^TV*64nHx>?TSvdIF+m7Pt7>Kog!$PH&E_{o$43F=4fo4 zP6~98M%+Fi!DvTy#ZY_6oRX>SPovo8lSZL9j&}LMv2&?uY&4$J!T)z>VQffGi5nGz zW9ua{Q2#)4z&kpG9eF+n=AUQ^&HH+Bew4CLbZ&TT z-Bu8h@u+q}Sx)*C7FpwdzgauUx9eWY&EcOjfs>M{>bax< zorI!NTH9R6l;(i%a}*lXMB7!`oIQr4`iMFGy(O0#b=$@61ljvwo=V{b2<8r0Ci0e3 z_a-FWp>l#1Qoo3h9;qWGn<0kMxOsU684%@5lBui9zG5Ffv~l!~eCTT>*)Z4SJV^O% zk?e0DS{AHJ~%Wa|X{C!92LoR>|g_iyh0mXvdKTGSjO9|w~s>}|Ssic2HM-wr&W%f^N ze!A#@`4dr`2zzPYS~s`*xX`?vov=)lcvm;C{TYifvHFYzfE$RG5IFU%U9x(0ii5*o zS(-2XsM${8w`GaLOq18H*3_BvPRA@SP0;kBf6B#&wt~D+K1s@%Df$2dHN#x7p782U zIb#z9)|+sHmDB+WU*)!Z#q_+^U{ySoT7=q-?@9qij_6VJD1x0Vjjaj*-!>&pp| z7>#xuE#S_`H9T4hm0sqxkePRQ6f5XBp|JA*4?sZjqo!Hc{eUrnl1hO*z~DcC=ZD;# z;IhavJc+M~3u^?1YD(6xCsjYH{ZIjmqj>JPq@Hrq#ff$(q#S9skG%kgq#?J^#zZoq z(kOkUeGWy*_eO6NlRw*7G#q~?-LcMz$%M+nmuSU@t0he#WDdXClLX$Sld6_|SPz^( zq=Z~S`KXCT^zLhx>K(5$#93m%DV)-MwH+hC0&|Vn0&V{2)(obl{zdJ%j~)#adgyMt z5e}RF5%$U1SQc=tFeF5RS~9U$Jw*QEeq*&J;y%P=MaJRf7w6<{JhM+LsuFx_^d*YY z!`4{fFw}o_LiGXuIi$oxW}*jI!=$YxuTLtM`}l*ou?F*ScSb^O+p$b3g`=>ScxJ@@p-xYJZ0pGI)ACr*6qGVP_rb06c zWtMlT?R4BM9ERD|6C)59%bASfe3voY`*f~Z$tL;-wy=@*@I|iPV`@X6!EL8=8U_GB?fm^x)$^ZykH3oj& z8Qq!+3$sKONh6mSLz)_=?av>f8Dh0bJooduMT;3j|R8zE%H~Y7;z+1#ZA>9dQE8VTpw+2VUjhgohwrUuL zder!1@?6-dcZwZ-KY^NX>8@MOW$=jZIQ5Fy1=Z)I2@d;=AH7XDR)dQWnDAM9{IW@i zDJ-P+R=J%k{DN)&=*`TK_CKEaeR#K9qxmxJA_ZfzQr`)7^A&tQu=`EsurB+2i5|z~ zoAX%*OB=C0XFK5W!p=NZLp3Zdv)qWsTQ^s|lJxI`sv419Qu$KrG9*lF=JhK8UL=ld zsj)@d^~E@WmJ5{-iL#gT7E(%W9Tov!Gx^j)Q?x~GR+%{GYaTmkDBxLE9mtkzNL>F@yf=>dd}Nis9h@82?seLbm-KO}g589`A*~<=cgda)e4!-sc6-o1 z#M_^&K9sKh0NjlB$+pst{Ng?o`~V9TR~;AvgtDPH3C95ru@IHh1S5*WwUKcS9IQLV zXpray|IqvAhoY!kqA2_{_d^-G-ji_(DwhV3*&p{ng9*~Mxykz9h)HC!=RSq-Hei{D zF;>N&=551|-++?bN2d#kd+WJ#^UtPDRT=8T{x*V*>}!e;3ju$ztIKP6cjIj;6DF0a zW%4U$=Ys9+oBsfWg0?4nuUj>>Mro{U*gpEHUQwb=|9(V0;o=8!)b2A`1(gam>1+oZ znOo;u%3R#96X8>)0zyLD;hIaixiL1boSbYkm}!$S4*^awtXt=kRMCZcPN^pi{WQKX zJiQ>PZSRPBJ!Lk_narbuB%@Qj5qr5ub#cZeGSL}pBMpI!zinStPKD@yD9;>4+itad zpwahp5AX0_32wrab5Sgvqq!41M%+0|EB3dK{0(*5v*^CJc$eoT-wIlX9OD`LLJP1EvFOcb zRsc7eK&!#*3@mnfdkrWRDj&+Y2BYe#z~~)HZ1K`CvTWjYi1W^5>8nfcg?(N|QgaVp z;;W{s%V7Y8+(_@wa~Lu0_Hde&Qg-A{P8|wo7?-M?@+NeNZkf0Pzl&(+p9M;e0k76h zoCvgHfnEu{=`+^d2)_Dh=D(wGhU7248+3jqpQOLL;Iuz^c-9d0gyvGq>x&}!O_hJn z0dq!+x}v5Y-?>B(FpF3LV}9q-`u2a@4ab;BY-2BK<>Oxh&j$gGpw`t^+b zX83c*)-1r}n*r2C@fwPi!G11dkTqlbVX}NG3(BFM64M5wW%@ zU_p=RsQ@ccCv-blshD%rFqjO5v<)kxIR8?_ZZWY#-GysC2A>0T%iQhwIr;XN*8T(Z z9E#&nj60hM+rJEOXztlcl zO@rhqiKtX?28!YaKA;@b6c<$IQso9eFtF(ESZ`YSK3(jInIhWA64|;jy80$Z5JpEv;~ zE&CTV6KCMO_>f2ttH8D#Mp4ki+B#uptgI<7#zmT$tbJB=sDP~0dvC$2pT!(#1lax3 zbbxP3%FhR<)UXJvQJBN!C@zG^<}>kN4#9+y-{xB{ zm9vN?Cit#FaoJ>czn;X3qg(#Ot!JPxn1fJQr=hGN;8RZuv7gRWw;W73zvj@2i0bQ{ z(sF$4S%2wyQrHb8iHImo|f5I-$*X77 z%-Bs#ZJ=}c`0iCYp)i5bxOW_LxeiQbn@-hNw3#fVbkqB}yl$%^(@8wNTr5tKjfrDr zg#f#YTok%dAY->7C1ObMxr!NwfNka;LXHX_r&GNpT3W~6TwzRX?X`t;*_Ov!zmNX- zi;vArNYp|r4g>X22y??FYjZ5nM-AF8x^L|ag?M(iA5S-pc(|AvB_}|@c|{`ks{JN1 z@s-QqygORiHt)&f9AW>F+aPM>95T*LK=M`g!3MOz-sFp`m*FSZ7XMB zyREn}+Z&kCo=q1B-w3hw1ZoYIY5g)OB&?f0q(|K(%cD}JZ`H!d>F2D%5L>*BH7uf! z(5umEg|P4a3K{_4{|7km9n9R>&3P~SsPEwAeRQIRSE;u!T^g1mF$VPKwGn$uxz;^ob6KZpNjcztG<^!yz8 zAptGCmRae9ve}Ptq|JvX$!j8wa`zA#9tE{4K)%z*DRZZd?fKGmy@wvINAr$q=T z>0QN;JZ~l`!Nn{l@9o;CRKrflG>IK0;!4ul=9N+j=veYTI{Q+&Bv*duFR5Pq-3Ptl zP*nBXQj>i2TVD#O#nH}-?ym2H@AZ{JjQTl+KYSnl`r|TnIfhl?a@6LF!WA5P+oRiV zS{qkawIde7@fw8r5!L*yD0wrjGA(mU%DrX-?<=sDEpuvNuJ!Ck)l_yi{blnzcxG(< zmbhGBN2_I(*u+fL$Kt5XM++jS+c^EnG4xlSF2SnRxI{rb&paM~zt}S~tsFpE9|M)o z_9cu(05>j%pw}RY`-Q})6xGErhy0+HR>fNxR$cqd%(C)5KN?>SK~&=9Dk_H(s4Sl= z*D=ndQ=Zv<^Om(W(-S`nM{6kyQY71>MdnD_U~w>?kYR4U@)C7!{~G*9YebO`!ZH6g zedsR1+Qlo;eh@B<1a;Cuef=VTstXffP%Lq>#LT^;uCfKJpvnON7>zD5KPB?R*m}5^ zWmp$OMTa`i@M>(cVh}nP!m-*b@isB7Z+y^gshUeT7*qMoiT^FC%-QW%qTwmad3{^TdPe;B4?wtj5^Uz3=u`<=miIOztNZENJk`x>BP&&=4zqCK$MwVnR%Zp?^Nwpec&)WJC~BuL-y zsjt#@eqXCZo4=Inr2pWFsL|fP(cyqzLGe2qIVr_yX+ervq5s0>bM47rVZ4dWbAi^# zyG;)bpOqR}Z)l$nd?KH7d7xc`(zDW`w7_{nW>%Rr$r;MwJ=r|EN^xO);usI95cii) z1FEEI8Ah3HtWr7!6`6QnVTWL8EuF5aF$wANmAMWtvb__*dLb1t>p#l^zpmL@Ev-Vb zZnNDbR7u0RjCqGXDp!7~kVS8NVSBFEujTyY{v#k1K;|fMs^7x^#awAsa!Cq$cvR^+*jQz0lY0PYOt~E#N_MGyHto`Qc zy|vzOZH;3&)2Bl1lg1n|>WjPb2WK~fr0-ErCE<~xPFt%=^ik_EdqPQsB$dT zg%9`vvAImnE~%cHQ#FFnXtE7nySR@C%SuaD8LZu{4KAP{KkE!kz`fdxZ-@G~Smqzd zT^aNwuWYKlJZ6z>`CR`r0VOUWW`gO0bGE^@;vUmrjUu{6YLrqn>36@L|313nJ6&k%e(S;P`5doior}E8VRp?#W%pzP zj_9EXGXCWVbXZaOqk?pAVIDPRUBb}XVf_D$r_m-le&4#B#7_)e@KapFFj(AHwR!iz zpZgFVod^_@ot_%(ejeOe?zc z3^29VdB+suF>#i)>f(D8WoO!g7wDGA_6eASX+1{`=(k1dt4zKgX|xFHzrB@}0>%MM z0Locv8_pjSZ5W>eclrUlz*x)6^AXqK1yBIK*%)dy#X!$f^N4P0w^J47J!%YZ@VIAS z@;PftU$*Qt)+qJO@ zw25&8rv+A5mFO}+_suGTL!;~kEd*K0T_H#VTL(wies)n=A^L!-TEjyVxqwGZu`3%) zg$}MzQV;{{nfJJ8csNCb@k#+OBS}hmOo@EjI)>fA{GIV}6Kb!5M>haG4{fObWJrJiXs6+)<-cR~T@VW$?<#m?S(aJ;+3d znfP~F_+^Z>P4VENN{Ncy^c9--!-94kE4`V#>Qe8 zDXUuVq`v2)2OW9Mt+<2>-{X>Rx3T2z)O_RUHhr5`c@6KnRXvu47OskYA={++nin(M zfl(3b*j>mVuCo1F68rZL@J>-ClZyrMYDOm3u&-kU(7zp<`G--3;96hSM|t!oA zPW!vx;=z}1pw1AjH7D#jGnn*^C- z=Z}{=1K%F?L#8>K{%J0sISG7HUNoc8;|l~B4KV&iSED*NGp*(Bt^1PddLarH^pQ-rO;Y6awdm+7k`X3;v zd4Yo^#t5Rg3o{qxexNN6tiA#*fAlC-R%1@f$W@v_PCM2?=6hG8{8iaMC+xUG>dTw# z|7NZTR4*a={{tW>(fC437Gz$jv(-biO;uDbMTXBpq_B=sw3Y}7Mp&w37w3G)NY8m? zD&$ds<*ui`7fmx^+1q15l7@&_oU~tfdfER{g3Q@O=s`~&H(zWCByZhfR8u`F)Uxtw>69AUm}64?LxxU$`}YEwv>^*ezU51 zVvKN6FXdgjuZl+O#Em-0@h^i-Y*-KS{c!V28oi0bGK!Fgm_ zT=l_vbxdbhb8o$t)9E5nyjM4ZJ4%*TAcq{(>d!b5MmakMD|+ZC)N=5@S678g=LP=< zu;x_IzRHsjbq7n8={7wIw5B{4P0QK`({{9|eXriwoYkI`vyE6R`FJQtp~_opkWMLz zWM#n@DK0Kf+*nq;!-t7wDLyx7^!XdQ} zU-5Wl zYqt#3@a|^2>dWLOb38&9=3443r90CIBB0N&Bu_k^8d(}wEX_q%(ZTyH;_9&DG#y|L zxPp^Je#Wt4od_bz{ieNqB!&t~scXWx853O^GuFHq1C^8@b5ioCX3?1+pN`+`31FOD zI_1y;qUuscwpj@X0EE%3EBF9Z^=;>rp3s2WnnO+&HB~LHucFmfVZmHA8+RV5O!N)f zDE)aU_&yn_fwb9O1bN+r_eUni!Y7u_C}U72C&Ee+Vo*Db$wXdswf&iY^=znr0o7DD zs1iZ#MK?SICnk%(I;vpGvOT?vX*>#NS)4!Vi$FzYAn>Ft;ce>9ALA~&k43qczdu5mIH^zMh8>|&MA z#iNY~-`f?xHdgKRt$v2`BTe|dVxUZ)=E$>^kDNid9DrU)c-bQ&kxQ%)jk7?&YN5g&LH*|%tT!$sN=^kK=T@509H)b?m~?x_j+EPmHEJB7+nM!k3Oak^>0 zg^{K71y6X-EdSd|B3N8j*pjdqC;Prr%Z7R1dEA7rhNa>IZIsM)~jHsZCR`N4zR$_o!# z=#M(Q;w&#=@4+(xv7NJ(SZ~0TEY%9yjtA^mdLgL+W`D1-hOPcx(~J||ercel<> zaO+MwgzvqfnQ+`+nv2{;Jo6B#FD3xk+o?fE8#9|fTJhwP5!+5%w6xAPEu-S;j?mL( zuZy{@dR=g-1esbS+EGy8X67e{(5Y57D;0DAZ$<6~s+x098ttF zd8D{SMEcUJd^QPLg{r6X{CsVHo3tgN%;c|v+&@F1{ZWV+i?$o9z@+q8oAmXB(XHn& z2;gQ#rr*)=jU&||X|+l!mq6vJLv@mW@Uw@ZSRbKMZRM-FKg6XOA=u0Y$ZmB<@hJ6c z$)+gCC0PgwzbBC?;QG5Q_`Nq6+Jc!R8JC7F0wU1dXn$}Dmmc`cS=?B?d0K6kV86|u z)x({Qw<8g;mx9!W#K_G1s;L2dBA#crcRS4m_sZ!LfDPnv=N|KuAJSX2o7+d)Ogcd`7_w%1&*Nlt+1qkG25 z-1C0}tYZGOxskmm@z+kcZ|U`GaGzi5+l?69r7Xy)(PB!Mf=(6ku#j>pPT-yp`e^IRaDS@gn%V(jCwC41G zhD^I-!+UIXTl=q6t=>PDAd7(oZz}cT=X8cOTpj+XH+s2@uH|k^1kmjL&1o%W#;;$! zH?EL!2y_odD0_5Uu_?3-lL4O5=UYHuVrs4ntsM zw4!zKOlnCT50B@eDEwr_Z(+~jk#6Czs|R#Kk?_u*tzaqn@8?*Py3ekr*_SwIJ}bNU z&VPwu&tD&vD~^fmtHo9oRGHoN)o9eYrJ|*-f5S7D9z^tUH`OCJgh44iot3td>LZf8 zI?uTbC0jlj(eE+^|JpPA$UKfxW4;Z;v{0E@f`iOr+xC}q3j(Yf4lI94Sz1#DofYj_ zXI#uz*Df4F=Ju^8*Es_|I{Z|?D<$mHwuMdF*ECk0ASIq!e3I z2WKwUWsJ9x+Tfc0s)rwtGyy?R#S8XI@CdV`R%hCp6V8|wBmFbye1cyNL{Fd+ZMMv@ zwhXB&GlFd&yKVZiu}LSoyP~97Hf(&IKR;#F;AE<@=On4n;b>I?n2dBqxVWd&_XM3> zom~tJONtIZe{+|?T~YlV$YB6VRQ+&1$t^7tK=l(($S<37?^PSpSUr;Amm*$LGuId- z88Bp1@X!ZNbMx783=k_HUwjJU3zc0Om2TKrVM;k~aFGWqsOph&oVH#40G28<8%Sc? zaNuA>N)mZ59~RlJ!}oxw^8i-klO7%a;8+i!(yh=n{V1(-mdCmMKy zrK-8_0F0b#vQT?AHh)tBS%S^BkJ?(l-!|#Y!V;<7hl@EF8523G001EXp7p=w?~*i& zvi5W%>!i0%xNRZ%rQ5+;wG1SRa>q?M#>aDq4H}9*ua-DftnfFKt#opKA-vtn-s@F6 zi6rZEm}yzLi5Uy;U%Db}{iXwdcFoV$9tcF0=oU1G*1Y|$>1ewUww;UjonrLR#F`9Y}-k%kY+f$%*idZ z^EfAaVIk2au?!O?{1PJN4mgHV@dLKJFe$iPHO;cSVq%|z8{__pVXe~p^aD1x*VZfU z>muaQ$(X7yhW|E+dpUZ)_?PJY2Ox(oc*BTq`*OhOhjksM+&2~2K?s&uJP6IR zqB3iH3XA?!dNlWBKF^f#3&scusMu+FWf?+eOv4AYXtc4uSpMLZT161SYC&*&0ZR!u zHfCEeMjpcmC?4C#=6P^fJT#o+5URpQuAP}FQlHSnzs-ZpU%6B6nQ{>)3&F^FCf;{Z zv_FLSZ^io3^C$v9EB5a1M6EKe+ z?yYzx2EF7Ps>8tht&b3KO&yQ9B!I<_aLlAgKLl1%U4qvsVOd_VJBv2YqAPDd&bXYb zkZ3nbIQk}v{%QEbas!8)amPUeA-w?V`h^i3jrf?mF{TL+hxy<%^EFt0`KkNg#y2#Z zGh6Z*|1BCy5@;8dNw7QM@+fS)gz_Y|$M(CxE4S&RqhP&pHgVnM-y8onusmUTA4X&B;_9bnbu`f50C^~b%-KMTD4 zvQ(kxi#DRn`rgga4w%a&s354nGZ8YN8Pklw+w5!Ez}ZFn3`kXf!sMW1pLCHh zf4%xqh?bTT`VleKalH;8%JxKSNbaA%7`FQl5Kg4doyf=3>$h}w5Wiz|hnNQFN*dVd zU!RXewT5yAm=jl(9i-m$U9c6uS_1{2W7&4ntgJD?NqEMyoRG`$aMDL8jSFJ-b*f88 z<4HJY36rUm3)rdA`1Op{sMO%|@&j^QH30qs&@%oQ1nr!$nixh|X$tV z`s<&$LTD!+#rE|YxHseC={Q$OSh+p%3e65l(b!T;mB(xHa>tgk~=JP-J>xC3|g?~sgFfbo}u)qze$SrQw z9X{N^08i2%ACM>mPzW zN(UW@<=Btdlnj!4hWKRq$zcSyeBvi`1m%$)x860$k^C*;_2s=!{K!{P$`{Vr)Vgq% z9B0+}jjzq;qj2;cQF}IY!W@jx_%#V+8sBaK*AGwuP6r1^2Z#brz^m^)c}Gs>h`V9_ zhXa4!q=bvdcfCv(Y5(e+7{|sU=9aCzEu=jhHCri?78XHgMDqRp(t#Gc`Pf%)v@8_x zZVn!-nv}waQ+dCgrW_`NL=LvToq5hk zG!p)&-CN#aF;?8$?+ak=C3bNWnXHtLlU$R!8Aliu+AY1{x^F^Gl93pH3~beQ>E_1{ zKF1~mSfY0;jkvfFbRbAweW)NlmKv`#^Ll^#_Ybb!=w!l0i}NNtAkeW@nUj>ArxoP{ zWHSja`UUd$)E5 zS#lkr_~}il8V3!<9R@s3p^%M#MFZ|fpD{GTev7f%1aU6b9Lg{)o=&)jg7k)VA&#mJ z_6+gG>595(sWCvcVm+szF@PwQHBoZd1@0}U@{nWYMlWYuzIKZ^hh-$1N-A?QRW(>S zE&p$V_An}0*~cU!{#Amwt*Rs6_G)QCAFe$dX+)_0cu2?DE${^gaXsmqY^0I*C0$F6 zw|R$4co}-MS}>gv#Cq~&`SV=cbHWDg?4!E6$}U+XW#{m(1ieU6U4sP?`z`FrmA9mS z!H4h5i;gStPX=0OXU@+YOtWe>8;3iw1S+}0yCujzrwdTx|M(?{Dz6U&Yg2MskN8@* z^(uV1tw?F>t0KE(r6l=|uD52hK$W>PS$$OY-FD$j5TBT|TV-U^G-_Z$v3 zg`YAP4K4M3vFl%Ry(2#HoTw|$V4du1xHVknp%^ON{!@^-mC-~wq;jP#Q3V{s-(4Lv zu%{YcRrwVmTk?ok7@4wEgb~UBjN>m9`z*-(p4mS~msox>cjs*+p|7cWOHpNCiJtqR z+2F-L|K11(s9Y1ToLQtnTw!vNG%j-P1dVU`{ zF9IxMI-+#|$2XmoNk729?fQN78??D?slOz+ZnYM^1T?3N)Z^h;KV}3FRtSv%EO*y-e zd|b9-yvtvCi#o(P8r5g*hd@xXZRx)1$>X0R@P+|x(|=21HTOjstQ;UnGSb-;O_I3O z!pLO|B((F3yFrkJ0zOPN?ct77>G%bgL}hi7)Q1SC73BU3->KJw zU8?H%z=gNC6A3rSPzwsTg+}RI!Ein2$EuXNK?h)!w^euE38mujhY&tO@|w;BJ|{ zKZ`2Vc#BA=n&bPIA6ug(DoU^IIE0%XE?`u8PI;Mhg@9CG2ko0Z6NSO(#5H*N1*TGm z_t_k1tDdpQ$JosT1(VYbU)aG*;pZW*j%G@2=Ga(>e+*Q zZrRBo&}vZ*^$zREL`ByIw|-4V6h_VKk?r?IA^y!#wp#+SsEO-6XMQ<+$BT4|icsaF z*qr&}-CMVC`v=Y$v3S2^L7KQ?weKK<9^>x(HTToss}=J{BkL3n8dKagY1O8lOB^ z{k0XZOiCn#jSok(A>&%iZCcjvJAWk$)Tt=9<>nr5%h|HRx7!&XaQK8K* zgbULgU5kkA;CCM@LHav0$EqYy!nv|^a^lt=?qBYkttd3JAZ*wuJstGN9!`;R+!@C{ zFp{^BF5PLzFinB8hv=1pKltLZU4ggbR z(R{xUh4%aR2?bOJNk6;_5uSapAdsMhd5s0cK@}BEU|$dVn#K*SlI;tWdEcUjo4v?L z2cJZ^;{*u*2e7RUAP17G)X-+kVME5?yn1G23U6Z}XoCw5cHeqmldzCUsE!c(51_Vb z6dpE5o+P-kWDDT(Xvr@`7ub`@0kcoknl zVThti7OZUi>@&#!j-fiPR|8mus*kH@IF$49Qr>b77s_7j|G<$cq9HOW*=qbzcv6hukyA!tI&p5&(SaZl1o-15C0 zmo&Tf?w#~+qY+N(fm`?&gZ&p%z<$!DwIx+&Az@gX#Ix+yAZ~#L%AzACuEfNvGA`%A zaGKtWkF6ZlIrq}1^dRY?d-$^Z?3{{~1gnE#f|4|spRN*Hb+)Qy59udk5sUgXW*>0| z)9*VA&w7>8iCeT~_mx`cgge5ibNb4wQY5B3)zM4*@@YB1&)3kY<$H;`2oK;myGviC zzfaT^9rb9{CQj?<=;6m|*6O6O_KQ6AU7JMHy5}Yn&eX5{JGe}+IA8U9k>AskUm<5& zdv9~1Q*6TovkxUIUz2Kedk^;7_2aXNhDQw2z05L#o~EUr+(36Qa5DW+V0A0c@|=H+ z)Qjvh7T7GVNr2c?yBxZrrm@p>6NMaM7Qrme21XyoL2CupCU-s&L# z4uDVSjT-?-@|Ud`>xcDK2dS6dV~+o8@G^&*eTyin>;BA`UWjaTkj?q;Hr%qtTiZeL z7pYS^a#~-(`hYWT5Sq*b8%~Am)0pJgabn8QnH29e|;*?;z@6((2t$ zR;m(lhcWxdzT%H61P4JykLhW>O1g*ly#2DsTiUP^wxW?!gr9Bw&Gi{Ja`~_9^a80M z+*mo#jf9EHOk5ws``Q&7NgyGdYB!?qSIpxJ3}jWXO*bp4^P`95yVlmTa-hCif`lAZ zJ#-7PLfYojvoY&n+TCnlYdA}B-cM+|GGRT@t?mZx)9$mzkptL3$e!G`Y6GpOyx2Sa zlRX+*aCQfCD2Hjq>ugn3VYGcPDuCF$}P z>|a%N^zWY*T!M4%RG!K z%iW=TVugM} z-iYiitd&15UsB9;3JQKh)pj8E*~^Fj;EbfM5NHmL*9QiF=^1{H%NH-p29_x`q$i{> zS5FSVDTP3{zuRdLGNM||5lcVDFFO&ib)$~!qybGs5KIiJ{qMZ9oah}_DA`L3P;ps} z+|DgWgk|B-$C?jr<;g0&=8oEI_iXFaMt^=POa=zneIJ*sk4oP?mKJ&3ECVr92EpE} z(5hJBFFvZ-@l-ZmQ-8Ky8x@Z-A9r!u3YR7aZes<8(fUn|ej#SjL z^n0_o`1JI+{emm|*Mt5YruFG9>_5Qz(fyd5>B3%0vv%kIGT%x#JErfOw#Yza)CdQO zEM!a@`mPHHt%x3nMFE{Nf8DyD@tfm5DHLz{i>>Y8YF1bNQ7+x3#NGd61?rZ%EMYFk z{w$kZf1gw6dnNlkzM+kDWc2j?s%`>^)!CFY?aPoKd*PdKIxvWM{Rbeuh+xO6=A%82 zA`EKzlV!Tx{$h%BCgRWVAh)nD%ez73n5ICWHW0S~#150@S+o+gH6QOj0hiAMECeGr zzBLcTFfI#{k!T9wLug&Rk~GsbM=o&MJX74be|!L82FF>qlB*#UP4zZD znNYDVXa#TJ)7>~KyjSj8auX=^QDNcs*h^eWXx_-KR7|vcV;jYiNWzv@cRUN&pO3_Q zGl;ym8XyeLC*WV!eaOo5Xp#0Br3|@t3yHWHXRbl-Yz#xphQ5~#2@)bkxtaXalcGll zktnm_5cDftJh7!U(*$}TLZ@Nr+SLQ6F%`Zj=s9pz@jTQ!Mc09ma*R84Ub!g)IPuPv zB+_72Y{w6fG0?# z!c~et)U?g2A$aB@?)-2v*J)ux(@KF1yY;RgP_8^E4#l_|b$#fvvcws28 zAkGQtJw;mrpx~1CRZo&}GytJ%7k6O1SvFQ5HmY;CyMO#+)GN?%-+|bRa8h6JN#%p2 zZI{m>>gCQ4@Gp{LU=SSX({(y&W-%`fpa-Xp$6dnK8`S!8X+}0;*4XU-Fi&9X_;}(o z55V1Vxf@8fo+N|HU2qTJ<$_GW`cEnK+tqS*&1n=!t0mCj_9w9@I<4Xv3)=^fIMraF z--u&h8&A}X4nl5Z^wV_9x24_R?`Dv-u+Cn*oeZ#d^Kkz)I?VJQ{;+qU-zcnpFF|EJ=a=1+~ufc}(J(AF(gcZcz{GPkv zQhCUpr|1=FP@$@NPA75MsxH>zuh-DK|MG2e_l+2|?pV3!)yfmtBY|S+9LcWcuU_-c zby4Q=??v>gR9yU4WGvYwIW>H19{u#?@WndzkC3avqWm|TcP>9WTb2UtWXx|#(#Lgd ztDpN`g(2H*o zZ;ce~T==~7;qM7^ZsNRhtu~k?A!9)AYN$Lre267YESfp_FGYb$Nb}wujF=3cQx><- z;XqQNBKl`Gf4*dfSJ0v8j~m5r;_CUnL@J9+B-AG%zT*w)pP4N0NZcp+@d*t+Fd!nL z6wvKUz4{-w$KoRy9Ju%vrk~xyr4Fm)LIf==YQROZ2&$0_+WRZIfG7HSs-Ybbd&aiE zccWdu9TiyD$N8D%xfg73^rHc|1Ry%d_+i*^b2}dA>BWl4eJsl9i{*QFZlcQw`9DZ# znNvp@EY*tcwp9%n;188I`1GPo#vc>JSb~z3V#i@ruh3(c$FTM$%+bdHl-lhuhi!rd zDra@2`XbZW{hS=yI35CVbomN2R8<`5`SnlbwR1@>94+4wjF9Etw-6VLX5zDAx8S!c zydM9GjQvd_J(JN%Ivm+KqOLH_mhA|k%wKcU6xyK^l+Y$4eOJr47DQrFZ(O=(R%Zy| zu}?F)caF&mtPBa>{BhmIPJtt+KEsf#B0W{ki3FQ0SV25_Kdlv=uCWU15LE5FQ9mLp zipl4}H*~D83CxPCb%%sk!N7a$?Vzu!;y%=`)|^Cw@Cz3e+H{`%~FMyn`g-D>n_ zW=LF0@SDW+y<@~R3rM|&XPK@vlEZao}{uqf=-t_TFf@XmQbhpxsws z&ht@GZ?1#nnmk1i$|6oBMDy~~4?9P&@_QBe)y0cu5YO2~+lsYg|MasCKGinJ>rZV{ zO-#5Ns*o;AMJUJYQ{SV+$G@XwJ_b+H24YXnv%J4#~u8FKXoXwFpBTc|5 z;Y$3yk0c_>^s9unG;CmS{P5P1$CcySWzk|7U7<#T_#){ga0w zIroujl6K2YZvu0W>&-bBM~tn{Xtq^JNo~BCnboO8?3sB?E_n1+hE~a2S{E*tPB_Gh zRBFH3D-t>qu8+LmweFeM(tdJT>QCt`kE=Ibbpi|7v0PtY=R?)Dx>%Q6FE)CA-_df6 z>o;RR-|w((x&7XPbPtF0Jc~$sw9eQOJ}YjuUB#dC-*br7jq)e6-AwWW&7McX^CiL; z-PAf~(XK<`A?XW2LMhSEz=dFKs`)o&T{JlnL0L*`)%V8a%*r)r+ zp`-V?CO@f+>ka-Wii1XJ;xTc)#u%ZU%kOuko$}Uy9&qcQKMbib@LZuImK$Y%bbX_L zrZ3+xzw8UGsO7Ki6z!{&z+Nlq>HtANp4oqP+AQ{jxr7$IlBZsvWI5l}48GAg7UxOr zd_ufN2w7#a>j7?4Uy1`{lLOMPvEi27Nj!p;Wvk$f0jEBQ{#TY(XOl1NX=O|x=Y+AL zHMn8Tn=C+UA);~ta0QTq#yWlF9OdIF4o$T17d!V1SsDlK+^S(GQP)tY-BJ>69GJuI zqeTnQRqZfT0e~}D+U?<{6F{4Zi5mC8gYIz1l#LbJfov(!uxy`uK+B(=2DnU8J8#X? zSr^mqXd$|bhZPToy7z6WeJFXl9>;({^OCtMt-LcV!Qbro%}B|D&mtP((!))MD?Or1%1X! z{Q}ijh{U$-V6Ltz^w{GsjhrLhU4eGFwWi4L{T3^w z>Ews2!M|pw5D}Qt{Qb=!c6!xcJL;AD*ghrA`1Fw~ertmCii zgXyR-09B|_$ec&$KtE&Nf@2%qJwj-%zcxGPGN6flI`I`mWsT5k=TYrLmAlhDHP2Ob z8`of42Qi6Q32P_>Zs3ZYJS2Ar_ACyd20s&Q;kf=p1eq4ucP12EDD7_TM56sE*6}T4fUN|`cw1gieEcfCdf$4xrVZb0*m`1gs znjJPwW7j{M@X&E}cQXhuvemGka`o#+EaGrDMpgkT#YH%JrX0~N68OCJo8MczA4Yq2 z&#Dn`7nOIbBhtoK7cke=S`$=|R=sA{>E%+Ti#lq39I$wS4!O9S2NpdtJ3f4o0C93`fIaHsjb<)vIx1DC+jGB zDGiFKISD6!<`rV20&w9r|Fw-QG>Ugt(F*P5J5HSHgCltaEp0|}W29PZq@pbWf!aH3 z>o5@Y_Gf);GCPlK8ve%kgaUkv7b)3n?og zcKN+R?!114;9P#Ns{`&W3iphhZgyk>EY3Kj{;4!lX`i;HDK9X(7^x<{^#}HFx+8a7-5{lsm~QXx*Fy6 z${GtldbOlw&zfu~J8@9c3^$59ir>zke*2IX&@{nukEZ!@0!v zsol3Gz12@XxGC1KVIawM3_B4SBo0OteNHY|R<(;=9IFXz0sUUoC680RRs=JoV}N0_ z!F=!PbySl_d$NsictjSg$0>dNh=vTC65n+|x;i>2*gtcSw(WKG_n~K31L&x>$n?K~ z`lh~qJ*m8hd+o%+6C0*oXJ~rN?$Kw)DUO>+o*M|9Ev%!j(AnZlts1=ws>cCeihyR? zh_i)uXXe7rLRmr1+o^I7$-ArZ>`;a?X7Vg3&$M@Av*xFJMfD(qA*&q*VL$$9x7RDb z#~l#~*RA1_DdEN4l<@zZ)uoO|DP3GN{)&iu1G_}mK^(6&HKhtU+8TaE<#t^bX#o>j z4q3Jv%T-?7Z6%D&t0FU5x++&~$l_?4QdZX0rn2}EPePG?QrdaElg*}1ucIVq&-#iZ z=l9c3Zj;9&G}qWpnBUY+pC$94fcu7x{xM)<3hsja9k?7Z5zizUHU0m?PD{X$v!m-} zmFd!9iOYw96IZK4XG)^7J(J*(=#zh6HU-sCHaUJI-ZNyfj(?N9`)m4nOQC3w$+mm- zF)Bo=-xTT2@Q7rZq)7uHM4iMtkYX9S^FkrbO70PuXjFG(z>5{gz9-1RDD}sOOdj%) zlk$TjnQ4W%h2M)r;nW{YR?RtYL4k}6@x9kKNZY$`mn6ve{FMR@K?NVvIE^zYp)s^h z2moxk9y=TA<^B)ARI6WJKT*5WCcZz@VMp;YEti zb9lEn;^$~tLZT<+6SEU}wvsE0Ax-`Y^TS9`g|ggv_V}T6?S1Uh`;p%v1098pTb4X; z;+@J#NjX^ZY<1@_(*IW4PzKdtUf}Y)8=pZO{NM&)*PM=jdi0}JoHNq%`a2i6Uuc$w zqQ@dm;6lTZhLK7X)@6&8ydCQI;^U=F!gShKR{nyNH5P3L{C7AgAA(hT^iaIDvaj!Mxkg`Ctl5#21G!g${<*`pw$igA zWg&6W7 z`!@)+t#NQFpaUeU(3oOi#5{ffsbsx=6rmtCQpeOV!^SBcqmIYwd8qkW5UAj zXCeLbeE)7<0acM#M~_H@Bwp|Lfle4>htAG9OAxm15Z+n0+5CI|bI<-G|02=)L4)d_! zAZurLj^jVDP>P3xL9onN~=M9z8_`l+v>e2iYnT*ETPU33y#Bkk-p@v z{upThwsr;1k8$qX`I9CwgXnkFdLXz5yb5oe4SQ$SZxjMdF1Lk3=<;84feuwaS5qTg zGJ=khzu3!V8IdLro&`&0%Q&RTb+j+hH+l)M@U!i9i-llaPeYEv-~jR{mHz;fkB3m# zKyLzcx%(RaY!H)}8cFKd^KppORoBpNqUXA}Cl?#p{+i4mJb_5X!ChB~M9kIAh)|ys zA_I=mtOBcj9B7b3*|*@Eb!u#hi+RZ-Zv|1_rT+ayf!DiH-4ow|F0ZcK7@tGv-v$y<~OSpRTi0Nuy4tC>O}<>+t8> z5qJQ{T+;W=B2?>gi6DWKPlOh)NS6L6qF1e)#AW3k44}eA>jH1+g>4XVmBz#e17>N| zN5JQ$jN0p>C$3o9UvLIp4rZ+Z#My*B6J*cHxpiX|0fkS;u5NB)Y*tI*x~l~dL7R0= zuC=)Y0CA=N0K74PdX=ok;K8D(hfyTP7WN=qKSU>NN@gsNc}4yCU*Ptf{MgGN>MhKp z2mt&R?x>L#gaB*Bk{Yr+5C{_b1c3Ww1~iFOuM^ObC!#)l+s;hfSsJ9Xm7jX>vVSV7vB1Yb8Unoi*8-$qgezQ&jcD14szui1A2$@jF{l}PK2dE(?KSa` z3cHpdhvuhn8RTOXo&9>4_L#gLO}A)rDP_xk^FV>5p%#=RpCq*Kc#=@h$SjdMSzZ9& z+6_w^=ih(hfHvfV>(2Yq^7h-Hhc7iY^-Bz(=abO1Tl-rQvmu{#6c3+y(r@DN^gM+W z2+`?3uVs!mp0$o-N2QOO$Yhqw-lS8$)V81rF8>7rBqM@9eEy~+`AlqqjC@{v2qLXn8^k7 zPV}Hn3{EPs58ld9UJj7XLF z_L7e&1^z30Pxv^IZlKSD3gb)x!p2C8 z6uB*Qn7hb=YI*N>++L-9$0-?6+o{eMyC{e_8)7fM+4`{@-8FGk2?n6Sfgvrx!Ytq5 z5%}1+a0{9}=BizhPInhMWOnpNZBO`MLc&X!rjx%ZMplZZ#$MLHZEOC#%K@m{y=Zvs zqod|O4O7xdZ6cUO4J9H?U-|q+sR#!ePh)MJO*$?Hj~@@nYc|t#v1-bl=c#>2u19Q2 zarET4yd40!!=W_GtK2=-OA_w%%gNl!VeRqp)#W9%k(J&$oS=z{LWGSB+u?i_-tBc- zd5Yg&L^}rWc%gGgana8^!X^An>kuB0<&$Lr@*ckgX1R}$jXQLdSDKs78sFa-9bJD` zENA5@$`R4m*J&a%u&D5!*3s)MRIh8CB9(}LuhsRY`z+C6^xk0yznGVH9TVLVn zw2HGNxB4n?{AC8JlGN>;8{vBkrd3B5G;m3RxTi47$xTJPeefy7<;j7L;@?~FIxI+G zo%t6kbG0I7wj|2JYtxlet;|jw06Ef+1B=(7G+|B~s?ClI8}4MEQSxrTyW`p_ zg>0J6K+D?RzN@XI>6z%$4r*#q9sL|3IGi`HG8!E01F^w~QgO;*!1AT_=m#txSI#gu zK6roGco;4dNhC=#bOGHA#tQ~4A$hnC%WDg_*^}l|&uJRTNws?H3crBvMG<}JIrR{y zbi3D=?#HfP<*2p{ongJh-#hU)!!#LOm2FB4CWPTNJ}h%<*~`|uh!Rqu#oM1lizXBC ziq@BQMejm4&#HKaXpzKKmg6yx?YDa|SE%B4Z9gQV)PM-u>Y*z!TxqH$rrB0@iAVGG zC06nsI&aC*^X$a#Klsh)O!ZZXb9TG)PtNE(^wd6VTtS8d=?Pw`f-FR7;{OBKJhN)i z{K$HnPSxC@9A_ZNqq#ig#vq<8&cYzuIT0jy_OXL!DHp0mK~^?v-AQf@o^*?S@q-&feY!5FnPimF|L?Vv3+j z5tW$CZZ5*yE;lZaz3IIfd3Tp7FYOkbN1HY!cYx=m{ueW^T<7rbap!C@Yt7S^Ygd@J zeqX5d?5(k=#reZaPv60i?8sZJvyUG`s{LbP!7Y08S3d(Qm8{Hv|I9C}D87hNRO6m> zNHbFi2xs8K!9au>5=^Nh(Z1q!{)>IAzmNS9Qu}PQ{jd3SDpH&1j8quAk7|hJh=e0X z8Gz>cize!AFryNq_gCw2I!yh|j}~;{MASNy591;8mBz-ImF7}yd&wp~V<*d7OU|SV zMrLeM4ZS!jqJfg6A3ijHyBsb2I-VDVeJX@h%G3_jARHzW#hiXUgcS)|z8N44BiNtLq;!*A*oa-!7re(-^_kuR== zngW9oqnAf-(e@u;K6}BIGRlTVvxMF{i=EzVjIJ*W%C;ph2!`1tsnAsm9<>!}Y|7mr zuBWXI{*F7@@h74c>;C{XQ74v;b@fhlh~oU{k(5k`K_cZ8V?jrU7H*EXWKB>=ye5g^ zBoFa7fK6)t?;V>d#^^OC5pJfng{}D;{-hzeRb!;Ni1-3I-M@Q!<;RZaQQ;3EYF7RT zv&lyjBL@{ccix(QMk_f{P+wDp$6Zq=XX1(g9|~M6w2zNRGtJU8Qt1@^B3$fE;kYQ4 z_9E5UYiIhR`&prKULzz0md%=Z*H+A@>}vMG&!5rFgZS1H-QZ{5EK@eil2eGzlU>^H zK^FjKU!#Yn&yPcZh!L5{Dstb&H|A`g{T?Lc4IH42DZ^CFWX*54;vJZ)Iy8qq7*itr zUi9g56#E;^1gz^K!%asnl9fM5V11{lYE6Xt%hM*%N)9(QOOov9erT{mK(s@d=Qc2XZL zw87V-RGCZ!JZdd2kpK#0DfEwETjjN_%OOHtRLVG7tmDP2VIKp5#52<3;BH#4Pakkg zE*vS}N#lx*JX;!@$2!7xApZ@tN>-5yTXX znYaeW!>;TxkxbY`^CIZXO&VbZ!ROcWK3;TNdD>_mCQX&HcRiN;e4x&a>LP9DuT!av`w?Y^T}Uq_!Lx*avK*7y#HkI2(tM{(ZsANX_iX^pZ? zT$CCB8P=jo#hk!S7e2akeJ{398eH!+>n`$i1p@%}O?o(}?;JL0uMg z%+SU1cmXkGHYu4#IaLLBK_$|Pldxxk$uT>fbS$}IYFz{d8!mf7ka)OQkQ9<`6# zp`(6#=t&lhck`0~?nxudR)^`uVHDHmHI=qcO6S6NquGT`QywnM^K$FV8evjYVpf<| zy+JFXj{qiW!s>2S0DTcBlovK5*gAXz@>dwR|3=wm?%o#}CBmlMOao6c`qa+Szk2do=~TQ;b(u$X~%mq&1$e zU6m%LR8J})L!}WcK;2Xrr(M_OQ$G${Rg(Bzg=@p-06WkIfSZP^>X&!gLCw=^Ag2n3 z!_;zXH(>%ZFT;U1fa6i|Ko0rai^$LT9@p zf+*<+%o?X2O~R6VrOzrqRBgQ%=a04TQ^TYfNZ*M+h%$yho%}BmogvIoprgzZT5(9V zNQVw|Mw0;A0MK@Dm(p3)>}`}9gHnTAl+x7px(|@wUxACkZ@4f+;<%^B;I;p*@D<-H zLU%u2%2-$54PR5Jscyxl+d+ii%d4?GI(?{Gdap)7)`eFY1Sb2}%ZJqTz2<_U6R`fX z4?KR{KN$EQ0C_X@%KSaQm70?9yBMSFGWqX=l@a=;yXV;`xR_G`$U4}|=6LkulGa0F zq6D6qccfRW@)MS53JlM!6M~K&zO-DbT}Znj7P7XIv<3?Q3V0nlb*scDlGq86$gwCh z!?8fXpuwm$Ya97x|i#;%RKlhKkSpu70Y6C zI2547NF_H|?v+VPQH{U4Pf`k`JBC!$&NvgXP@rxktu^6nYE;79+2g&qH)ISnQ!7qB z?N@l!VU=@;SAmYcCvL(+H-o#UrZM;6(RS!0L6=u`MGAXd0eufn&)C=cqLIfX*7+pu z19O9!WDH-iA2imdmr^qO2X7e*t453~U|Y2!3EO>~JR;44;&YRO4doNa!XP&ct@4DtJu0`@2b`;xDe| z<~jl{IrECFC`DQ&1y^m49ZWV`zE=)4698;8xtoM|xIErI+HKt;{b(?IGo4xJwsZ2l zbC17;zM|q1k`v^l-Zbpb{JQ9cC#mxmlZHjxCf#(`K1V>q7Ub|rFKkLf!>y%K=x7=2fijR``fixkLY~td~hj>kBXVdA}Mxn{EVbK$s(YFz)E02c$m2v z*y@=ddqk^9nQ~1KQ|wPxA633+T3uE7sxZ{)0a;%#xODi`*2ijO$gY|?k|kiEd_GuD z=ji%F*_$LMZ3t}{ehEh!+but;_qCP^nsHl;-U*!vsk~1C{VUsvO}fG|FXdWQK7Zf* zJwX8a_ourZS9m9D!)T1-LrPQA@bC_9 zao=uGRp^v?B0l+0ItWO*7G9N+clVveel-;{_BKD);GTIgz8!ppR+*ghXQ)it2wj68 zr8!tC!dF|hbLgA10Rhp2O625rx%I!<5Ra3o;eJ6f90Y>}N-qo+Jvrv@82&mt=jBgm zKdD;kOM7u27cA2sDF0U6{3Pg*YQ<$+s^c%t|2CFp?CbKh?ILiIs>jSEEHTUNktH&fmRIi%Y(w9s#~iH9bf#}|aMF*d%{|Aq^gL^)_AmlyMV2HA_G5^EEmCGu#t zv=n7~Kqj|Xd6n3SusKf0#{QlZhN#qLo6%7*iWaw4;q)^H$wIgL6HwT^yp$i+w&f)4 za{I$iFy#8>m7>be39W1wd-cfz;3F4wlGTg$YH|guB6YUxI_>IP(`&z^AE?4<%*Ec2 zO!eL{C=~;H6|ZAK?1C(byTpL@5#{8oS^dSB<2dy^}InRGYAY8kD< zeS=Mkta0QiD^o-fH(aO2>@07k)i~>NcM#5TMQM`1Y@&-g8mnC_B}a(H{}ts;Qgu<$ zz&v>>N>6uc6OUG7b!-uP3&!Yj-{eX4VlIIAtC9(pkEK(8zRS(HDx<$jMmG&yc&2A> zuOH{i{(`}nB^&_mr+;sXJeOK6JxKY>YC`m13#xrIlTub*L|(D@1Q!Y2fRq`PjO|Cw zl7iy*C7;cgq9cr@&CL1V>XqKmnG?F2!HQZX<>Tjo8wRk(mx&F;F`T_oZrjf+SGCEe z$CwFhLjLiF7MWiRlnf;jHnGwb;C@VG&V6=h4dv2@bU`0-z~=Gz0)I zvT1p*Mie7}`3&+k3yuq-7ciLlx)AwZQg7^C zk9y;rMsU%(0^uI?jUXqa#&*mQ^e`n+*u&o~-+f1DoRNt#hbtPuBP&%<#-*?~#8Z;B zm9w5HX*T$hnHl6bq4XRv>>-Cr=`RE5H?{L0VpTQyJ-a* zclws>_Q!^^im?*p%c<4{B%Af%_j^gc>5wplF&4;Lt(k16!ie{2u)i8>l91qgtU}f6 zmypjkKT1_C_KlceRvt(3fW+gy*E7_H?i9wqhzPYh1NlVIkHKPIDNBr8YL24VzWJGx zDlR>H$~?0Td4bd5#+|r5PUJichJ;(Rhb0@VOyy~pquL{%y}NmGzIvC`hZypiR3rjP4UXbQi5r_4pben z20`$XVInH%P?pI%-6X%8yFs9-Wkv?OUX!wP@KvbhEPjtmKzCTI^+7^!gSrpZu^-bg zZ}BiOO-*{eCgqsdgR^%!Z)3_)Fwl7VI~!_~Tjo0A10_%9f+pq%zQ6yl1!=-&LHyq? za_Wg6*Nu07NB!j}J~45{)HSFd1+l~h8ejti66&c73Q{DuHHodftAYout?ReI?-HOk z@4#gv#3u)FAJ~|CN3}@pwp`N|P?8wAABtaO?sJ1X7Tfavrd)2L;xw(dtj}17^ zi+&2UPvm`F)&`POi;cN{h|e;}tQC>;n?GLqUvf1EJFA0(ny_ZWW`Q*!jYcsG=p*I@ z>1v2Y71IriJ9_Z@L3kLB(Y~L)U<)`u!UaxDhLMpyl^)|6rAygy+et@J-Xh1A&A|NA zC(|5u7<|s}VEiYGg3-$RB#i$Bbp9{r9?hL~v@^izaZi}r8Yw=2TZ6e^N>`vB1!<88 zxMs_n8Oi1M)5%Ak9u_AT_@!S z@86bJ#Aofs)$N;M=$lv$X}UJ?Xa}VdFL)|e7iH&TVfmF;E17O5%_6{F05)Ws67$8m z6&zQErT$e!cSg#K85^lJyu#vSU(=9H`N8ItCYn$z)q#J>O6vx>6vdBi`6!rEjWe%P z>WfZ(pc^+_tt0Uk?)q#t#M>Ebu#MfN@Y@OUlbphlQiM(Ox&OV=n(-=9lV>v_=j2gc4o#jTc>-so!myt?gt z>Zed$`)2=v&h&g2IRvBYF;NeR-{!}p?IwY6=lOAH10I0J5+2u>w*J6~n;TXl%OyXG zY7~ZFmEWHqL%Fy2(?HBSwUz^?730tMx|IWtHq%d-^EmTSHo};8AtLinAHFON#tE{9 z+o;j16#v*Ll98U3ZITjQg0%9G4jHtyhfX~U{wpe@nVlSFEXbcNXYpW`$WHt7Uas(` z`}4`V=*vvpe*nt&KPAye#2~hv)4ZSP>f4CbhNA|lK+Y?aU**W0*6csauT?m`qbyIw zWIY!u@_F=6d5pb}JuQoBU$uajR8%+aj}D&=9vz=BCx6#KqI{DH_*(_s#c4VPaaYP; zcxap^JbG=u{(8H;ssP3v?y(-Pdu-VjTJ;BK_UY=@Y{g{`aOjEWC6p7bjd6RD2EGWv zKmDoKFsM6L=XD_G=^CC)A8+f(n=CY$KB~*U8sANcm+pDiqFa`(@k(A%A`KD6&mGko zuC3eu2CoZ$|3g~jcRt>P{4IgBKs$RYJ2R;QnuHFYMg-$d(1dcld zlA~|78U3p(D$9<^9poUyac#9vQndPUN|8=;Ll6-8AoHFWUHph;sz)FVbQJ7|`^NX9 zgU8ilM z=H8*^jsV=fG)6{G)v|%DM{Sjw{Aq>z1sJBY_NyFc2m+WH45=)}Z^ngMf99*Aw+C;YKs@<*90DyE0^GRp?w1Igo1eCtEXy3Rzn3hb zEp80l4()tZpVY_qfKjkdkJqV`xlE zqla@>Q{{T7(M0JP`&k!-HCNkG0bFP^2M1t)vW)ZFuI!OD(*Ui;fyKuIQAa{Yj);CT zs+5x|ArAJnZRae5&k?RL0Iu57?FLdj^JhE4BJ;7`3{NDWw~SR-5kTE2^ivMMO-kui z=f-#=i16lhmU03_j;ainblQ9VE~=;gxhtc0M_1su3{IDS$S2O8Dqkh8K;Z2bbR4D- z&P&}WwPy*yXz7RCF#ot8O|kF;VK6116E!(=T`%K4lEJ5xV%1Lt2mVa|af_aJ%@) zA~w0*ByG7ZpITT`HtcqgV5Z6bym62-qg29aC+G`}=khzQQ>9;ESV)N6?4~UXsu3b+ z_#<0#W&2<+BK>@-ZdBF%yg=kXKyykmz%IuV3qTpypZ_`7Q&`bfL?5HOP|zt-j!E?! z1IQDDoY}4!mNvvbX79zvfn+As)dzuC>%ahPgXQh&n*zu-kWP`KgRPTD7R652(qxM) zN%+{3?D96{>tDmd!37scQ}zvvmX&LE?;AP0SAj@~=#m8hqfW6#eS?2)=kBi!FLl~9 zKfeS4JPCXQ$5Wwc$TrPFMz-sO6qlmkjOORNF0b=7da6=6(0U?qQcoNU?s{BsySa6q zd@DkC838#4oy&DHZ7=z=*ySOSyS=j!rYgJ~o@Yrm7R;h?#0|1cI{OIr}w1E)2-hCGD)5a*1UgVzz%e3 zM&LV|0F6cbtYR7m7t z-~aLNw%xb9Yy(}vl{xEoyG2D*_;3J|Q~p_61lmEbyXe#H5OX*QArl)4aV~ZA(Ooc~ z-DgaY{Av$2HGTL=0ya5r0f+f7p@*Tx#iMXiXR!)5?R+H2LNRYM4VpQ2-PFCU_Oo^M zxCL%-%kEy}Saa%|(9MnxY5b$;>HVZ1Jsh{9^UdSq4}+Lk0ePgWIN>XMPlye;W;_3~ zzVc+XFR*f4Q&2+lK8V2r1J2!=5mb2DK03?->Gbj(*ly{F0pPU1q!qp1{p`AYw2d}0 z(g66vtE;F#yP^xw0|KDR{{dt=sl6>f+T|WYB)bMl2${uLIvG}@N|EPL#0}XeGnZUS zna1^1svh|x3RW)|Y4>nlOl>Q%%0IG%_swcGUOlaD0_!9?djglxH?&?&%SypY`U<$N z9wCTzQ6f#X=QpMJ2wP#$s?%S)$|NSw6XpV1tj91ZKo63RU%Y5I85OQuoW6oUySv1O zokzXRGK(6(xXbSWDeVK~dPosB=R$JD)L9skGZ=~(DjP&)aChZl7p%V`BRktnpf-$c zHr~<|2K=aH;DS#3iTr5?oy0C0Cz5fDtx#J99EFpUOmik1$GQ;rgNM-Zv1p+V24t^x z3RM`a5(2BvKo=)h3L}Y;)V5E3n~U>^8{|sn&hDR0J0l69xqB5KY+&F9RT(-8$?0ha zLPZRehoMVae%;ndMn}}--LvM{CP1HnRK$lFm$X&85HMnHLNj=*s15rt_i#08cdkmD zJGIyrQNwa~H%t%~$V!Ev5ox&d0mVoH&d9MHpy+U?Q>ixm84QV3g>u%`^~DP$-oD$* zu?t{DRv--16Hmc_m~SSC&gM1Os5Y%6hh#YO1<@~_OccBnwcE0A4ivmomE?*T+nvS}FItz-d6!$CF_!&#sGMw@a>xcbvFp^fZx( zB+YZPb)B}A9SQal;FY2C&Aaj8IW0k?7+fr0A3H{*)Ly^9rX4i0WO@$PTE{P*sorPY zdZ=i3nUM0Za!EC?(uaO-`)l_AKb74kevG;QF#a#kOMBv8qh0sPopUTbUrg_l#wd-u zzHxw4DuYdU0`^2Y>2EVN>!kHoW`_u@xOrx>j_Dkgs-yVk;Q z^=GG}_uWazCFX}ziJwrp@2S*$9TEKle7w7!=(q4kmC7C57m0iBXGNFSg@v}<*Ch+t z<^Flz<+EbT??rkVP8qB{;PKL(qUn%IuK_3D!s<<#1s$Z*QE6@*zhLTTX-?`mDI;R3 z?Q2s5$8S5@wpJ*E6kwKspH>d@aC`+M)!b0wp94|<_| z_P9?QS*%YA`m|M8W==&+i)?baBVQ}1jsm)7G*L33U7D;1Dax0=^ZQ$jP*Grojo>?U z#u9EFEQirh%`tDeD3)@{NDe- z9CNH=@4c>Tt@C`HcE)Fvc4%zmzLa?4>qglClQ zl3MD-;Iogo+I=ni8t;tsROz$$s_Y5&@UsY}4Mp){%1R;z)udFv)e4`x!*w}*71JH< z#SYY^zc`1#BbrcwpXEIFHE& z-|)?@L8q>&j+E6)>q#T5dn;%9x;zIFPJ`Co*|G8zC2|4D4Fm_&M1Qlxeus+^%dN+Zs2AlFm%sFLgDyfRx6Ly>#?Hf5^kZV#?eOUF@?+Cy5PzjA&Y}AMeCP3aLDAC;J=7uSdFv+aE>2)piPm zTi)WAr#l~?z;y*00M{7!sO@gbR9<<767HPq^OFKH1mlI0R)BBa;49F`FDQ;;B}XUR zVcH}k_PsbptSc)9eQfSB%A$b1k>9||Gzml zp`cG;oFEFs4>@9bfED+S?sr?;VV@K&pLpgzHEH`|2OBvazNN%TB)NubX}RLbPoJ6Q zf;IfRZZ64h*2266Y&%yDGL(!kpf+?MNsvd6hnF}k&?#x=PJdd(HNqdY`Z=^gn%}fa z_#&(@-nN6+IHvfXAaV;dk4*WiHW9C@HMM=;cn}IQOVFZY1B>&lW6P*l?u|4Ibn*J) ze9BiC%Aku*I5}V4B5~u5XfIT8oVHe>S+UVy3b%k+OMIlT{fTwRDT9I30h9LeI>7Ss zH+I5%@O0}h)nQW6X`)`@2KYIWi3E)Ar7R8GJSVwC(2@tox;lqWUb?tr)ESMU%~U=> z9i=_PWM6$@iU5DVau6(q##|Bey19E{Als28E<0;BMb<;WtM)Tt1bhO`ae6oJ`U53~ zbhCov>}ry`}?edolDzNqYTv%E;T z22AGQ1`R|N!I&x;D#Z_O=9bIkVM#I&<-*SnyS=2hHe=^Gh*zAnS!Xh>FuCu)m>1RRA?Bm7@-8RM&fd$kNIos`5j@U_w;&}_s)B9Vp=W9Rbj z{Bb*dY9;`CTkhm6_3RwJ&FeL$2(REK{-LmVC7DH);_qwAE4)Y(=@F6al^a*YVh-_5@xuy$WWG8K{MHKspj@aF1bzg0&-9dN3JGBUi<2AO0Ll6-C6ws zJ%rA4&ZE-yT;u&wvm6+t^Kz>4P=gC*F>mPoBB*M#GpW z@}4FMaA=|;w(KMqr3Qr$*eqbk24fBK3P!u})fMs*45bo9zjmud`9(GvpzlQf#C}jR zv|k@%`3;Ix(8$W~T|FdvM$p2lrCw3Ywl zrA)M#w6Pnw&s)m^{2T(){vCZ=3k$Nv$qpK#(%XmFReqbe9-SK9I%OTVn?)B_j!pk| z9cz0L3$th8A{cv*^FS~|st(Im5VDHs@pNih9vSs)i~*R9rAlVMdVKZn?t)XWs3@RX zUq5f@g%Zr#DS1tUD?^I5(H|KY8sWIH@#&hf-uQ|hgRgCI610Y`Sh(DqEpYrE#n6rjIDnb<+Lth; z7QVEP9mJb!7Q1eBd0?)2B90o^1WT-}=p_Ml#uX?Y7LSLg|7)EWtrvz{&z#Y zjsGt8b#&HZVnGQ@sx(0gsuQuAL!cAesr!_S^InXQWlM{NcD?JGXL1wS_ZhHuRsf3Kp`_zBFI@cNVEgn7rP-7D9LVJu&0V2aP&|Zx4x>H529C zSINiAen#T0Mk!UZmFH@h-Z@Eo6HYL39Hi^vs3Io~Hn+oB9*Sj-oZk`e54})ZlspM@ zohZcX!$SLXV|et1YiFlOl@*ZGy5L)xes8l9uAl4TFm)M>nGr%;yF0B6bjFlNGdw80 ztSl~YgtJThV_DHeuq{n24P3eb_kJ(Uzk8`6klP6=ovwFk%RKQY_7%tjA8>yjow1{P z^4mI@Kr_*Ocb@&Y;h_;sQ14A!qvFZe*xY`>T!?8Dx*+}+I)F#5YcmpsMG~Qx)x{;G zr9SvYh@v22oOvG~M{&AK4>yx=_4oTe_@h0ShAA$Y%+*B6_1evl@2n*!YXmv?FFllA zTHt6>wg08+S}M3ra_e`_EL=(>*;?^ARspiue5hMJfP}Qb?00X}w7#qu$&WZqFUWLC zp0EzkcPXH1TWhyTTELg@#$IM<1kLB|L!$V6m+oC+(^5m8bq}Rh>CSI852ajxZ{`VbU1b&PFjM|I^a7U|My3IM>(?G z*`0q4+w$)GmP8zfJLjZYDt$Icvx59NMY&~Z+Z`(83zp`ut7F5IocnU|>)kDVF0m%0 zYCWp2RBdJU&Qh~*cE&g z`Uql&SX!6@t6g22|IM5n>?956LcR8dI1E(shWp2!`Yz4Lisb3)egvj{sDaNsdtDKo zzpr_kC{XNwsR^C)?3Cy4d@kJi!?St5kiP*)Eu`SPa#%BF{0&InGj{YR5aWuq_#Y75 z1YNuOX6W?8oExoWonbcU1u;yt`NsF#a2dl6Mf1%`)&H$)-f382h|4LW`Q76*5|(C6 z=#|r$%-E0$QERC!s1ca`lQgs0YQc{`L`W66^tbFu^`G9~S-Z;9|E$Q|>F|u&jY4Je zv6!l&SGQ@ax2LkOj0K$?ahE-=ZLSS!W)5o!n@?zK8hndi70ldDsA7^#PjP`<5A_H_ zLB&^>7oC>&2gS#&y{1=X6z1=bQZ*~3J^G<1aF|17%>$V@Qa?MxK$M>_nI-RmHj+pD z(@a~WHN8GnbYUk{E7Xc<=ZDwCj$S?PS%$R4(h{b%*+`v_AXQ7ZK6iw`IS8`)Gx@ud zb8>;6?5J+6Rr@t%-Y(`-TWXj3!QoP&O8ISnbYBc#Z;+<*myn0Fhc*hgL8Ae?(xM1< zDKC2Zm@yCf(xYDY?tH(8Bx34XS4mmaovE2rV*-6I_u`kk2^%d=s;RmbCoAp-5nJuX z16P!kyjsn}Y(XTU%WR?seLocG?0dkx8(ey^pDRIb5se0%((B4TeWb2P}roY^@E z(pgh+>*V&ar|`!A`_9pwyg4u)1WG8xHQF+36G8t2PIwZPO|?nmRGsewf~FvY{2d>( z9iIA9h?1IOU-f-%Yc)S}Z^&mLMDLP{o)R%!pWfEo3-nnfI>eI2+1ZC6|FhvkDl|6a z+2A3i-p^N_OI}`6_ixse(AP;_ZhpHQp)8ah=r_7M6Sw7r-+wzg;d{6H0?efs6~g@K zEZ(^@B=_;_`1#@Yu31`C+EPh*gz-p(_ExVaf$VXrR~2>e!RgZr%co*W5JWx;V8em(%tz_xwNcAvnqPm&>a=)tZ7b#=d?%j*>jLYbRP* z>T!MIR*Hmz&_l#?TZeQ8_SL}lwcGFPNsB&9>t*S{X90maUhcD<&uM1A@p@KAO4erZ zHQFAok!wczuJ-sr8|xxxFb=+BfT{AjiuHsz1%^D;!0mcX?9z#EOu|@3Se8GAdnT zejch|bVGkA9P&l;*o3OzzyUw^JT|`d3{Jsj!)o%y$bIb_ z*|cx#NmmNCv65;w5+8hgT$(P|tRz>wGf&Yr1Lj_JhPL(6+$GQdU4+?3y^TBk8b2j4 z67G1_p;&U=)wQ^d3Q_(ipYLV;Ff{qd)Dg2uZ^K}*KpjVAVrrZ1onv36S$G=ugpQ|2 z<=~vdY)K3r=z!F$eBnDtn9<1+SK_fn-w|dHi9G&y62bWw)?w`6wtM;Ym&t^c#5)FV zg9Lho{pvWfpj;lf#c>S-?bww$O3}5 z{8ntihxxk+>l>_0L7a_L#l@6C`dpV*O*WUpvKUiF;dmcR_+BLj8Lhzh!?a5CzMqAY zg==G+bSO^*vvAV7#J<#sx0f8WOLTa{sO@w2CyXxf)NLP~7i$^Q(JRYuip&t6&tGK~ z+}!teqL@oCn0@I&63qOXQFOJR`7wNb$+0m)!S0JhVOVSMhTJxYxY1$Uj2dc+8WY-h z)uHe`>{=%o2ufVZWNbpw+^UL+-AgDhXJV!4l1t<0NiRSQoxH~dIP)MbB&PzMEVT@3~1>1-8soYRjL*}$Fx!c)Vr= ziU5n{i>xJaIGuz*RUI%zO^EK^6Y0{ctoSj-j%qVy0rz}%`G zK=qjzEt3A?H{LTkdw~#Bm9zYPB=!1lH3kCuE(etn^C+n9xXRzvqpJTd`Djh=Fbq6I zWH7Xa#Q>sGvSiJI!4TdOBa23a7AT=D&fI8aOCQF!b2z(Ybca3hATsxTout}33zQDjP^7FJq*wDCT<;Hi(el^PVGp$0pg~rMC(LIqd zVcJ;%?S^8|>H=-A!o-_4ljm;;O=Q(ULfcf~2c@5$8$kACC~xRM5KN6J5I_2A|18Ez z?on(ggC`YPG)mHl3WjX>b;+Xh1Zu?iSmJjKv zQRVj0+?@UXLg&vul!%0*sLy&YQBWy5=-Bng-6PFytaze=rbVQOgykd|2il#@H`ilg zvH1B!3mIRyV(+HuD9;aUkv0e%+-mZlf@F->^m~@rRJBHtq5{RP6?3`k_&NQl*F&Vd z;w}u(InR-rLERZ$DgL)U9Q)#{chYNX`WIu=WJytXFgRs?lP@C6lg;dVT36=J-$91}J zncPnwokBC6^b~7mv46{;Kg-aYfr6`u6eLsqA+9(Nr^tO7H`<}VfAm)0T(Z&*sgEyZ ztptjfcFfbQ*s#7>?vC7vnKK4vVD4seJM5d-dwxl~q^AVAWZP38B~!sD6ncnNqGiLp z$M%YNUp^=g>PgsD+vS$zzmdm}5y7jO>li?_ASGfUmeRscI#N0(l75Z!K?yI8u}pAs6TzipUg&9T zkUs2xG!bSQjtboGfB=-)t;Q5|;?fjzlM+($CWroO0wQ;j%PX<4B91EurLeQehH2sU z`D{cbS0$B}z|}+Wd3qe{V%id1+ za9prBeY%)5f3ss=8*G~sG-|uLO#OhIxvAw#6;8@ACKjF@S$Xbw!V!R;5|tL~lG)0q_831kx6~Y#`p5N890qp z6>5`v#c7me**@8Sx*-FxiQ7U|O{0zHG^_M#0WjIVJ)U#`!c*FFL?hl@P`#lE`LVwEf8W94^e+Q~;vR44X3PCm2pv1b}srpGT;gEN(M}wC5`MCed?+RgVgTD0o zkT5kfSm3@H)1}H&Dj)y#hmYy|Ty*jayDnre0^1vxH$&E+MIS=3xtgjc(>VkfainQ8 zOH5w&_>?sP#dpJ^mlPM>?M=oRYW4?lA3n-i0zen|e*hq{JAVtx+sWFU1v5@nr0l*x zDTWRT@_9(=-rU6@0)omacI0vURGUOnse z3K_ZRVEVx7eb6>sUF02ceYPelVI6FT?-N*8PJPSAf5<`jM|sNk_)=A-X{IT=7o3xw z!IWGebi3&&eebNMxd}RHc5>*UypR(^nI+h&^$|_szf4gt5ab8Qr zfxgDUIgPvjHC%CTFAIDhBeoSNL-RWDk^tQuGvm>{>s%LV$Y+hA~s$>tiq!d zC8h)#8~F_;-EGODTtFK#!&vK}63DD_NV8(0`RtO+6?dGS9?T6Q;rcmYNN3WyBL}vGCg1_P!617XO#y;Ed zhIV$N*b0PcGlIlSbjo+or?!DE z@aeRWbCfVxc#eq*OJa4?nlWf4Nl#v|LYJGTB;|p2WirA7`fr6nF+v^Bm`7hBmGpN+ zooC6GkJK>5<#;ry!Hi=e-5*a^;%@x%%-`rmL4X2!a&rF7xB}s)|61Pb;>T+F8(c%w zf0rMMvy>fIuegRiKG@*H6N1e@`uOZ6kuY|?u-2szOdzzh+D>T`N6duZuuX-1lfGKN zd6NB71n85qM~lD8bA&Pyb^{V`1I{BGR?{mvXVl*+j!}YDfl>Dj(TC(0Ga*$*H1woz zv)L?s2Fibzh%Sd5l0PWbV#hLlk6T`bO8(-x#~(%J6Oa?4I%L_KTcS`-Z{I*6vovcB zUA1@L%G5fO#dFy8Fq?0wKe!QDOMN93%0AE5q#Z#IfHAtpB<@Ktoz^aySI zhYQFs5mr_k?9jGESV9w2axkh_P7z=vc%$@$55h0Xf(4>je#Bw;y8D^})|nSIUIK1- z(D-oKJQ$O`g^cqdJ&Ty4(pF9>f?nO<-rM58`8nn4Hfm!BGuwjWC|Q?Q_!wM=$*O<#OSLO0LlQk5Q6)GDUylX>?cs;+O&E2=6^N;t~Ue+DObU#-q)>XD`;O zuc?CZ)<-?YI?1m1Upjyqf4l}QB(w03?nu?-u6w%TSElwW9Fj`JsE9Yi|Q7!>Rc z27}eOE1QU;K#63tr}JR`w-3WjrUUK#wbwD0-8f+1o;pu={zaLoskn_^JJ&~ zG^){ue;d3Xfp&f+jZSWy{qX7MBvV^w$xC>yuw%IsnD_LSTUM4ESj^RA3^2C7XfZnz z?bJL8^TwXYuT9Odh^3Pje(XSBn3>lU8P5l~2En%8qyzd`mS-p&1LZhosSAK$R*Xa# zbCgqufkVn&_w6-OxzioMYw2hA@N!R?AvzVO$=?0Tr)^vnJ=qlIB@;(M`#WG2@Tkyy-x58Hz@ITZJBU& z?V1oindR^ENXiRIg;x&FEC4=&ea+E{V)liaI4M0zR`yg*b6#JaQ4`HpZaRzD)Xi#V za{6Yu>*TMY2<>+;2mV@BN>nEgJBfn(Cr@OO*K?r_$9auf#G2v2fWicbE6*d*PJlyH zhB_?RF4}_=G=C%Ptu>> zSPh}B%?Tq$P;%RLf5<^hBpkcQX-M|EhcFkY_Nk*{_79YNJp7wG7aFKD^gMiHEJriw z5fV0gM=`|suPYoTB+;eXeBAum_C`pH^0R&{aweCv5IqQ0`_|5od#j*O8HO%auDiwZ#bLTe2=HI8sZB$gD>C0kXW_czIdzk0t(q}qd4(Fi4*T*3ffeW1rsAM== zbO+H|us`P7!`KaC8xg8FirLqzw4{+ z_B~xci&2}9m!Lk)e3KchWJ(UkrY^?FkW=5=_6w890~7NWsFA>o^CY)+{B<*82LW)3 zDDxOa>AY6?+=iOLUG6XYIC%?i;1p6}n@rJU->7p7gjn#eh0p};G&XCx;5vGwC$5KHG4Jk1_*tvGux`EDDtRQPh-cZilen5aO{q8Pdr7CCqjV-d@usN0uv z%zEy7M&Ivg|7h#S@59^Hoh?Lm#(ysnZZ;&3>=dC%7}_X zLqk&BbuC6!kzJIvX|}Feh)LdR=Gxe>1&JUl;x!kn40m{w!#475dGp_kHRbUeTUEht z!}bc=>m*?}kO_+qJHNaZ9jDm2OUqPeu^9VL9RZRH3BOS)W{&VVT(6#$$XRF$o=(5t zNmmD;QAwt99+-qs!E2ioZ91-I=wNl(%P4e=)N-Y19~S?VzT^+;Z0`#kdutC}5W4zF zH(zJQd&HjI(f$+&Cs(&BPAD;Cb0Q|wf>+3fOKu+*^WVb@pxd#!BE8rj${n z5y_&-tj5Pyi~mv9B60WAtv8Q>0i;1ACb2-+G{A_j!IKaqfs3D=)-KiV_E*W;zO}g5 zgU6$wD4ROS7Tt#Xx?0s2H^Qu=gP`l)dW+lSeY+&(Vf4trt^&Hf`zc&br`K{9PxCo@ zw{ZF@N^KU}N|=4@N(kM8Bc~{A)9c6T3Lp7lf(2&8sx=gAfDln|g((pkF$FP)W+KhX zqi6GRg1Xw}=9>Ztj#{Kji^>a~2e0sNDSqsC2SltwuQ(YwEypKj*Yq1><3?DsOT2w&(NX%{6#;?TH_# zHOQ`nwT81{d@kK~=|Zvog3H5Arr%qh9y1N}-#|*PZ=Wt6M>DU9vC1I4-g!gKt6OxK zGW{wX4gt|5pMG5=$^<|xPCY)nx4Ij`Q6wd)^eM?;YqDsO zD??`yl4e9FPvgAyF`NWkocfvF{@{}fY1yh#)L@tZiDa+%zYdg;F#loN9K|pF8>sYF z9ql*w;jw6MVb-RE&haWv>uJ!zGz9T{lEdMVsR3A7MY7+#5K${j<&`YpXqv%_v4Jcs-kJ}sI~M62sV z#V!p;@Q2-Y!hQ*KypsRiLTZv;A? zJ!>4Jsy~%(MV}YS6a%x`Si(5i)W80Be}bGakNNULGq6 zb|@sWv^q-~HA@bl7X!1&86V^=cH8kP_>0QcH@}ipe{)MAfhHNega~+|3L!7+UapL=1T2 zfBWrp-pkQvcJwWcKHSo$Pe3YuR^K~obI&lWFQBsl2%XiBdF`LW)@grndHHah&=Jh$ z9MnqtZPh}5ks+I`supUOm&6Dn zTyD$SfXOz#g=7VAjVqf9{4`FA1|Z7jm2gM+rrSFiF0=wrvp&`BV%jPxe2a z-tI+^*^-jRhfFxNCq?+n1W4lYPbX_+IGC_5^uk7VcAojgt#-8H=&(A%U$5YuH^yr5 zCSj@-PGNtuu@a^PrMpm6X?$&|Fr>03FK)Bk& z$n&y5m=}b{Z{n19y487($*wGXnSzcV6pTu0vcP;&f9m;)p{Pi#XK1~Y>5E_u<_K#W zGK$z8iAJpDE?g|h8lpb-QXcvp%KzFI_Kjj*=a(>KV)DUH4D>-0x9P`!-6+{5njNKk z2JZim6K<`AKWM^m)Y7&9vi z1*neYSk4%>2^g%6;f9?#^$EFv<4~&t(cuu#cUCI(n<=? zmwfVNRr3)tYq0!h^W*PT7Yf#ou;?EW@kh+WZe}uq=9e+-@1 z-$^hqNPz@RASr$chsG3HiCfT4r`1g{@P}6u8O3TSj$MO(6wR`H2o%%#DMo$DIk%GD zXuJl3k?T<((+nUorT77f1QkN2I%Udn|LsB;!WEG|{-5b@#bb(9 z2q%L-X3uha4pdIeXkU2HM|(W@j9+y@c>ahq4@JWv=W z>Kc*0R3hp1%H+aK(K!+~N#0R|Ql(GaOSmm@@B$9^K#K12($aAm^8Xs0V-KUE9%$(4l(wE!sR@{HA zZsRj}Groxnf0qqvqkqY_F+A9oA1z}Nl!ncG$&F$GubR1DNd)gEWnIB5Xw##bXSaen zSWUI3mJy=IcHz0@q^;CNa@ACuA_mc&Rs{b83N8l#Bt#+LLuQ#WwUZIMA}p|bam8=* z@6g76K2$Ybv#&c(S`3Q_izSDt&b@x}n&#ZsJ3O}zhmJ?z!CwGA zP^!X>NipTgjKnDNH2`>Wb;=2)M$AfJ=JM6URoBJe_!32;@uTx4d4{>g&|&&J8Fp_s zA)`pSYO{v&`ky+zpym@>{|gAX(hxOm0a6+*P9P{Xq9CsdZ$vV$h_WXAG)ZO*YAQ~v ztomeKVa^$sy0%203BQAtm)^)<06g_q9kgmC>ef+iiCQUJF6@2J6HS<#0T9j8u*h$o zI*VE`J}SOoaDlHCJ--`%CkE1XhhNB&%?%&_)s#-i_KDsJB%+eqsaGOqmH zje0dgnpgXA8(Co`TWfCu2L4v@X@|uXf=CSH@MD2aYKodPGcEiS36^49IjN7Q@sO(j zxKDsGAsnSl@a6+^MmbFeh^FB6_DQm`nUa?przYdw4coE>DakUkpQmjjK!18#g7n_(#I#%+NfNiH(W+7C zY5%Lz4b+7LJlNwW`s#)%)(Qk1Wm)Z0%X^CM!V;DiT1MX>Mt-$Da!TcDe0Eg}hK(b5 zzKj)E{HEy!KPpxFlmEzFNDyu7kR6sMJmrM=jJl}<(erL269Bq!I1RIrLn_lJM=L5b z2D}>z>YHag%or|nvbB6#=P0HA^GZj@EA&10^8^hOj#go14-8)mfX~&_8@`u-`Eymh zRH<2<%rcrf^V7P^fkzbrut~d=Cl8!H>Y>H)4f#uR{9Ydp6s|&(nJvE|b-Cvh-3aZT z$bFT2M+eIh4iCqKHYaACup!1mk^5f)CU0h&nm+~(UOZ|LkTgD@ zcY9R{tcG|G<}9L!b%^Gp&DFTSaD?PRXihiG&mt&X6;G?=IK|J=H?#rowj6>LMpRX8;YR!=_+N$+Z2I4~zPj#$m?YpX@C`|{t8e$^D9XGyR;y@^dfn2-*y-ZQ3*bUsP3tEEsK ztG?$8Ld!(02+G@lgMqUhaP?pV$__-cbBoqj)A#j#-#TG9t9s5C{&<9uszd98aC81Bt>wU}9lggkXpN%Z5qUX8 zX5Q))Y$Nsl11H5C@_r>A@p>B`KyWR?Gqn8m~dS{DoaCUHt^!@~dY#;ZZS3I8XM8C{uTpc)x^Lp55 z@~_y8uQ zd9jj;pJ`B(tC;1tNPh?Qx-GxlRqgX`^*~zhn~}!YV$191f4_hAnIVZR zp|U74Ia*4cHr?_-oux0SB#<1x_zO`|2QN*I&QELZdC>PX#-~%yuiNNHso6@NB*M-r zOmzx2{qJMK`@P836zko5Q8S=3S^mK5-=N1!F?N}9c!GGg=s`FA4s85qgp4FzAc8Dg z?7^c|IO!1V!xKR#H#qvv7VRyBL^+>~xQE{U<6?%{%f8J{Oj%J4Br?X{@riAS25OU1 z72d@y40b8bme%az@iAJ<#jrlt^Bes!?EEIGEUfgGyhWPOmsrgYM-fGpDb40I!q&y zVloM!TJhQcLOQ_ONR-O!e6V`QnP30R85VL0IS()JA1bcny6zDvoKX{Muq^kGY6BlF zxB=_xtU{VBmw_O^ChK2v6&}%P`?@qsr+)Y zR6)=0k4;&{FH)p7@*ZPgoYKy+#sAA#VByi5LkajAvOGk9`iSo20-J$-HD0udaoG_VFcUc|7l&ez5J_3&h+cd@&p|D!&NlehX(4;sd zRrTBFu6fEy>j2@_+S(k>G0?2N1H#yIQHEC^1~!BTSSpCs{#H)AHPu!h?-VQ%>Shz| zWT=`OT@KrEEzHCL(VxX>{8%gt{Z?+q*ml7QJ-6IGb(KZt%u@4zs|-6HEfAGJ-<#~@ zd}A*`$<_?zv1#kIaLH8kAr&=77gSeaAoo8ppz)0OU&ROjP@BJ8Rus) z{qgMYhg7e=v32k=jHWH*!dIw_=x?$wUireP5#18Bz?^qYc|}U8xm|$5A#~QiJ!TDz z_w@K+_ik8pRY=ySC`7aDZLGX|!2NOcm_uW24JS>fj|H|W9!u}%fznM z%5UI9j%SOJqvz=r+9n2ypcW!F(x_W>PWwv3?}fe2AQ`#jdK4KqnUF8eyP%ER7n}4y zATXrBLceRt#lDiayqtCY?PsAO{1UT`}86nlC+|NA~o$J@5fG7O% zxDsF?^A}`f=Md4TevJSL)gz?OVzuv@PvR@Tsunp_nPhSy0rlgL1?y>&z2`d(o$Z&e z-^gcu4@rC5e@x2HY8Kh3jWPLP>(wjIaw0(77SWo)jm1mj@$uw(0N}p4_)b}F5%Ime zW2z3>4xc2e8b;3(l$t8!c!X8(?SlTxsc+!fTe2X`$TpaK{gjbkNOk=SOL9%Wq2S~L zT#Zzd_A(@y)GNw!9j4>O?R$f$Qt$#PGZqD1>wG%0s86p=qC>o#Oh1e{5(tMSw)|Z$cJ=~?E%~6X96t&iY=*_;_kSd9@Pf>Pl2~v zx(sv|piQX^$w}5Xf<8E3*Vl0>>KlsaDRG81AsA6mgK5elQyv2g8#&pF@eRJX@~^K| zB{X10v{OpGU?dZ;3o%nkUA$v#=^St9^+Kb}jF>4R`;ZF?KuxNikoWcCs~iahTE|rL zRfI=!(|ge*(!D$DBiDv-4GPJQ%l7*eiN*ESXb#zdXgak!p)yaMp1i9&FLMTp%AoCJkS8iP4(zswNae8Y|Ac zYFp#TDl}O|i|h?($vTgSu+~ zCuU$Z>tf>u6DxX;XJwCjOm4aGp_utbfk|HkP}d;ffqwLSQwlscY+kb1z}p*s5V z^}t{e9xkDg{2Z1S9~I|WHbsBT08nLQ_1+n@XYb0dW=3pnWm(6UK1LTNd z2*X68OZGy^-yE;18#w(ZXG=6*R@onljyh~2V;T36^Q8U`Zl!ud3A`oJA2RWhqtdxM zx!gL6^KeU``k|}}fp7=Pga@)uOb}x8dOT*mDwcR>V|DrvWUF&b;}|H=7H|mhzc=7 z13&JC3{iZX2q@|e^>GB|uVc0NzfsZUu1(Yoc%n2G&jR8O=~ps&JUN_KII!7*WT)$RuX%{@N{};a=#?Y1Ge`0Au>;)V1e;AE z@as-1M5bgDT#pb7WV6@&d#f~wlcw5-tmy-Bn3uRy5KB-IMXF|Q8VzT&D9cUjmlyyA zBRQWtFFw^u9!h_Hbvbl|vvCJrp})A(?zo1-ahFDP{v+m=nm50riXex}dlV_6q-!GtdQy0fG(~5RJ|O%dtS<&%1{$oJqFaSI!vhA&PcCR>OjJq9V#O$O*h*e3gSp z;X@qjn~JI2;#b*Y?d=`1*?{h8KpH9s>4uy;7V{S3dUWh{Mi+>Wd3Aep)aGWyAM=W# zO+uynJ4mneXU3Yv?H0DNywfL%x3xUY6jwv2=A~&=#FtUZ!_62WYoOK)2c>B_q}t`A z8h$r)U>fmij8_5CjG@P?9eMj=MpX?;6ms!$*xcUoy&9rv(iWt*SJC~FRTma`Z+lte zaTP)Ka&91fdGbpphP;F}_ci+`8K024W$eJg^tGdy3f@eI(6~ltD)$DUm1@0JLx|q$ ziPtkyVU|L{*na(BZYR-DBgsHqJgIo&s_3>4?U}!+NC|i2Kl1Zr9hj8+^}Q|np%Srg z5n<#f>H3oH&f|o-kDv3-D%5~q+8y`X21_dcUqx3L7S$F-FC`$IO6ZUx-J*yNq0~@H z2q^soaY&Jp7~)At4MPh&O1cIFq=yvgl$MZ^Zibk6*T3t>K5OrDzk9xW?%r!n{NZ;S z6Zkx5LDdpBfICnxs(+!Vyh zHwmPJ!&KLDGbysv_D$)UJx|GQ+>L;>dXbx+46dyOael5a}o7RzNUEjcmtfU|74>M-oeXfVZdL_Ml5eu)Le zv#c0$B4eq3K8fVyW9xTTCfRXymLD$wSYOZW{~mbvDx<#$yuqHZJa=bK2`f?R+c&P( zr2i*^P7Cv*o~(_|bDXsVVxK0cb1V>#w*9beD@ibLkunNF6=Zg@2+k>&1#aO`G$;$-QIFC5k~MS$ng!z;lw++Qs`XrT9A$ zFxxjPWkw!ij^dFpZMisIDt^m!PxL{ztQvJ{>D|N-<7)1y);!Cw?*v77*zKIn1xvGD z?m9s>WqwuX%4Y$WpfgoIcfUMJhIhbR`c89~KoopQ`SJa&VoP-`#{h!qpre=*U5BsF z<)yW>N&k~L$)<5pI6TuJF72G##N=sle9>C$kK64eg4B~8X9`COQ@}Niw5{;R{8;3F z=0U2ZvBz`)-p5nyNgs7`E5vd~JuC#+Urc*Y9r(o&<`r1GSFR=BL|#oY#oFD;M*4eW zV*k6DZE7BA&!~Jv^W}`)fUb<24m_490YL#)91@2tlz85ux5*QlVQQtU@gJ+Db*yBw zR})L|sKI=Q@)qKi(goQBDsNrS39WJ~@O#wI z)ktasLSxlkfwt^67CFXchHlm$OpVlLI`6zDLkKU=WToVB1J=Qr8lx6YH53c3PElxBX^9`BW? z>gl@?yXrmMb)o>xM^nGqa?HCP%NCs?k|IV$o?tgsn0&|d360Ocj{Mnc)7Z)1vlp5E z-fgT`&PioCnhqp-g%;&5*^Y42<)B+RtlgQyKl>xv&_f#UkspwzWJd>W4#-p!PTvQ% zyz@pDE)>D7SL;*xF=6$$j1mvUs>@ZxjA@0VN(KAHQO=_nkq)=GW-4x9+#wlwCMClE6{1lSppO zTsGvfnTC%8&z3&yWKP+Q)BDlpgNSETEnfsx+<(DA7q$y{)OfG4dc{b@syEV1y-XP} z#Rb>RX2AH3+D@f@MFrm_(KDV;r~mBVv|vj5KtkA3nDKZQS?b;BGpAM&1plVvp_OFz zI*n`ZV+1^E(GX#oDH$%Z$-)6?M3ngSt`DbUE<6r6TsH(WOW2i(0Vv3*W#0$W< zy#X@*Ol4O_Dp+Rsunx%^$Ja8Y{kjI_a&X=zQ&u#vT~RmQzY(SJRHl=GNssr0|}rohovu2eKxcmH-^xNa65tES+CM%PcC-6p@du!$Aa zWh@NQ6dn^0OJiheiG=Tw+?lmHbS`+1*=1k#i)!ttY=O|VUHEbL;6BNl9GMdzw7a57|lQHth)Y zaxzjiA|2h&DE6GRTEYMt!!Ha1-(|)c)m5x~H6+f+>^xdoeLlySvzbW9KmXRpJVhRt zocYMT@_Ngw*joCuF?rfA=Cfo7SYTkPc8T7*l=rmjP&cXig9e-;Q_f_YHMzNcQev#R z_=;d;6#d~gln&?)FrqG==xDZb-2!hZ+<^Umit!naj7QF)L zkb`N*F|d(Oj3EPc+pzSWpQ!iVgeE)jPbyT#AcsUVJsS_-&l@*5X zblR229bf#e>UahyeIMdR_gCm;<0vYFgq0!|19+ud<^h$7MffSYE1OA6pu0E1J~4C0 z)z8;wDM@Z%qJJSu&I?;xm%_gmqIdIIlUtzp{?etaX4(ku=A%b#dg6yBF$?{qwCDc+mTE z-~AxS+g5m(&xO-Qv-4?PUB^*FB22jU82NNh@rK z6MRSp-iV^uW9)r`oweCuRTK@l+}Ap%okLKSgZ`Pj-!42$l-qmu zwM6@vRLJ`3Tu;vc!H0ByN+o{vkGf$|?b*>`$EhJqq`8l<9{awfX7R$;x^I)0#4J() zpp#r;NIPufSrZma5eaYFJXZpHs0U=e&*YUw)d%jH`r@rPJkhCZ>$%=^bHZEqFgcwp zT8^@cWE|7yzH_AMX~^lif#;_znt8;q8+na|$Wy%Gz7R6%uk{d#ve;iGXJg z5l9pIBV~Fod{Q4@1)?-iB9MbWfDqCiS)JV6@d7?#8E^&oYl?YSU?&AQ*_GX6hbB8f z_(Cu2h`^WSMVv5nl?Ys_|Am?TY96DOT?t{OxE?GBkarW7DRH3AV2yiuJ7zxoLIW=Z za-qUpl^~L!{=7n1DbOgw_3{P?(&`cph`=hel^HTWiRjNFB>ZJFNd#KkLFo<5W{(;H zOIdgfMg`tN_c`~EK)2@gvO~~s7E?4x)?m?Tys`&7r3SmVgPfM2f~a{Oga$|hx~C{^TsA3)e{)uhyQ+p9W#KzR z5B0GPm<_@-xM{WeSD?k}e_5qV&xpXUQc$86JmiIw&`7i1hkETv< OCY)o|2 Dict[str, Any]: + context = {} + return context + + +@register_action +class PrintAction(EntrywiseAction): + """ + Print action that outputs selected entries to stdout + """ + + COMMAND: ClassVar[str] = "print" + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Output selected entries to stdout. ") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(PrintAction, cls).add_arguments(parser) + + @classmethod + def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]): + import pprint + + printer = pprint.PrettyPrinter(indent=2, width=200, compact=True) + printer.pprint(entry) + + +@register_action +class ShowAction(EntrywiseAction): + """ + Show action that visualizes selected entries on an image + """ + + COMMAND: ClassVar[str] = "show" + VISUALIZERS: ClassVar[Dict[str, object]] = { + "dp_segm": DensePoseDataCoarseSegmentationVisualizer(), + "dp_i": DensePoseDataPointsIVisualizer(), + "dp_u": DensePoseDataPointsUVisualizer(), + "dp_v": DensePoseDataPointsVVisualizer(), + "dp_pts": DensePoseDataPointsVisualizer(), + "bbox": BoundingBoxVisualizer(), + } + + @classmethod + def add_parser(cls: type, subparsers: argparse._SubParsersAction): + parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries") + cls.add_arguments(parser) + parser.set_defaults(func=cls.execute) + + @classmethod + def add_arguments(cls: type, parser: argparse.ArgumentParser): + super(ShowAction, cls).add_arguments(parser) + parser.add_argument( + "visualizations", + metavar="", + help="Comma separated list of visualizations, possible values: " + "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))), + ) + parser.add_argument( + "--output", + metavar="", + default="output.png", + help="File name to save output to", + ) + + @classmethod + def execute_on_entry(cls: type, entry: Dict[str, Any], context: Dict[str, Any]): + import cv2 + import numpy as np + + image_fpath = entry["file_name"] + image = cv2.imread(image_fpath, cv2.IMREAD_GRAYSCALE) + image = np.tile(image[:, :, np.newaxis], [1, 1, 3]) + datas = cls._extract_data_for_visualizers_from_entry(context["vis_specs"], entry) + visualizer = context["visualizer"] + image_vis = visualizer.visualize(image, datas) + entry_idx = context["entry_idx"] + 1 + out_fname = cls._get_out_fname(entry_idx, context["out_fname"]) + cv2.imwrite(out_fname, image_vis) + logger.info(f"Output saved to {out_fname}") + context["entry_idx"] += 1 + + @classmethod + def _get_out_fname(cls: type, entry_idx: int, fname_base: str): + base, ext = os.path.splitext(fname_base) + return base + ".{0:04d}".format(entry_idx) + ext + + @classmethod + def create_context(cls: type, args: argparse.Namespace) -> Dict[str, Any]: + vis_specs = args.visualizations.split(",") + visualizers = [] + for vis_spec in vis_specs: + vis = cls.VISUALIZERS[vis_spec] + visualizers.append(vis) + context = { + "vis_specs": vis_specs, + "visualizer": CompoundVisualizer(visualizers), + "out_fname": args.output, + "entry_idx": 0, + } + return context + + @classmethod + def _extract_data_for_visualizers_from_entry( + cls: type, vis_specs: List[str], entry: Dict[str, Any] + ): + dp_list = [] + bbox_list = [] + for annotation in entry["annotations"]: + is_valid, _ = DensePoseDataRelative.validate_annotation(annotation) + if not is_valid: + continue + bbox = torch.as_tensor(annotation["bbox"]) + bbox_list.append(bbox) + dp_data = DensePoseDataRelative(annotation) + dp_list.append(dp_data) + datas = [] + for vis_spec in vis_specs: + datas.append(bbox_list if "bbox" == vis_spec else (bbox_list, dp_list)) + return datas + + +def setup_dataset(dataset_name): + logger.info("Loading dataset {}".format(dataset_name)) + start = timer() + dataset = DatasetCatalog.get(dataset_name) + stop = timer() + logger.info("Loaded dataset {} in {:.3f}s".format(dataset_name, stop - start)) + return dataset + + +def create_argument_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=DOC, + formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120), + ) + parser.set_defaults(func=lambda _: parser.print_help(sys.stdout)) + subparsers = parser.add_subparsers(title="Actions") + for _, action in _ACTION_REGISTRY.items(): + action.add_parser(subparsers) + return parser + + +def main(): + parser = create_argument_parser() + args = parser.parse_args() + verbosity = args.verbosity if hasattr(args, "verbosity") else None + global logger + logger = setup_logger(name=LOGGER_NAME) + logger.setLevel(verbosity_to_level(verbosity)) + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/projects/DensePose/train_net.py b/projects/DensePose/train_net.py new file mode 100644 index 0000000000..8dc9deb874 --- /dev/null +++ b/projects/DensePose/train_net.py @@ -0,0 +1,81 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DensePose Training Script. + +This script is similar to the training script in detectron2/tools. + +It is an example of how a user might use detectron2 for a new project. +""" + +import os + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import build_detection_test_loader, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results +from detectron2.utils.logger import setup_logger + +from densepose import DatasetMapper, DensePoseCOCOEvaluator, add_densepose_config + + +class Trainer(DefaultTrainer): + @classmethod + def build_evaluator(cls, cfg, dataset_name): + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)] + if cfg.MODEL.DENSEPOSE_ON: + evaluators.append(DensePoseCOCOEvaluator(dataset_name, True, output_folder)) + return DatasetEvaluators(evaluators) + + @classmethod + def build_test_loader(cls, cfg, dataset_name): + return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False)) + + @classmethod + def build_train_loader(cls, cfg): + return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True)) + + +def setup(args): + cfg = get_cfg() + add_densepose_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + # Setup logger for "densepose" module + setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="densepose") + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/projects/README.md b/projects/README.md new file mode 100644 index 0000000000..60fae36e89 --- /dev/null +++ b/projects/README.md @@ -0,0 +1,9 @@ + +Here are a few research projects that are built on detectron2. +They are examples of how to use detectron2 as a library, to make your projects more +maintainable. + ++ [DensePose: Dense Human Pose Estimation In The Wild](DensePose) ++ [Scale-Aware Trident Networks for Object Detection](TridentNet) ++ TensorMask: A Foundation for Dense Object Segmentation. (Coming Soon) ++ Mesh R-CNN. (Coming Soon) diff --git a/projects/TridentNet/README.md b/projects/TridentNet/README.md new file mode 100644 index 0000000000..c0889665b4 --- /dev/null +++ b/projects/TridentNet/README.md @@ -0,0 +1,48 @@ + +# TridentNet in Detectron2 +**Scale-Aware Trident Networks for Object Detection** + +Yanghao Li\*, Yuntao Chen\*, Naiyan Wang, Zhaoxiang Zhang + +[[`arXiv`](https://arxiv.org/abs/1802.00434)] [[`BibTeX`](#CitingTridentNet)] + +

+ +In this repository, we implement TridentNet-Fast in the Detectron2 framework. Trident Network (TridentNet) aims to generate scale-specific feature maps with a uniform representational power. We construct a parallel multi-branch architecture in which each branch shares the same transformation parameters but with different receptive fields. TridentNet-Fast is a fast approximation version of TridentNet that could achieve significant improvements without any additional parameters and computational cost. + +## Training + +To train a model one can call +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file +``` + +For example, to launch end-to-end TridentNet training with ResNet-50 backbone on 8 GPUs, +one should execute: +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file /path/to/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml --num_gpus 8 +``` + +## Testing + +Model testing can be done in the same way as training, except for an additional flag `--eval-only` and +model location specification through `MODEL.WEIGHT model.pth` in the command line +```bash +python /path/to/detectron2/projects/TridentNet/train_net.py --config-file /path/to/detectron2/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml --eval-only MODEL.WEIGHT model.pth +``` + +## Citing TridentNet + +If you use TridentNet, please use the following BibTeX entry. + +``` +@InProceedings{li2019scale, + title={Scale-Aware Trident Networks for Object Detection}, + author={Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang}, + journal={The International Conference on Computer Vision (ICCV)}, + year={2019} +} +``` + diff --git a/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml b/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml new file mode 100644 index 0000000000..1dd2a15af8 --- /dev/null +++ b/projects/TridentNet/configs/Base-TridentNet-Fast-C4.yaml @@ -0,0 +1,28 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_trident_resnet_backbone" + ROI_HEADS: + NAME: "TridentRes5ROIHeads" + POSITIVE_FRACTION: 0.5 + BATCH_SIZE_PER_IMAGE: 128 + PROPOSAL_APPEND_GT: False + PROPOSAL_GENERATOR: + NAME: "TridentRPN" + RPN: + POST_NMS_TOPK_TRAIN: 500 + TRIDENT: + NUM_BRANCH: 3 + BRANCH_DILATIONS: [1, 2, 3] + TEST_BRANCH_IDX: 1 + TRIDENT_STAGE: "res4" +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) diff --git a/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml b/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml new file mode 100644 index 0000000000..bc83c2f9e7 --- /dev/null +++ b/projects/TridentNet/configs/tridentnet_fast_R_101_C4_3x.yaml @@ -0,0 +1,9 @@ +_BASE_: "Base-TridentNet-Fast-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl" + MASK_ON: False + RESNETS: + DEPTH: 101 +SOLVER: + STEPS: (210000, 250000) + MAX_ITER: 270000 diff --git a/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml b/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml new file mode 100644 index 0000000000..fda2cb6622 --- /dev/null +++ b/projects/TridentNet/configs/tridentnet_fast_R_50_C4_1x.yaml @@ -0,0 +1,6 @@ +_BASE_: "Base-TridentNet-Fast-C4.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: False + RESNETS: + DEPTH: 50 diff --git a/projects/TridentNet/train_net.py b/projects/TridentNet/train_net.py new file mode 100644 index 0000000000..34cde48971 --- /dev/null +++ b/projects/TridentNet/train_net.py @@ -0,0 +1,68 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +TridentNet Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" + +import os + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator, verify_results + +from tridentnet import add_tridentnet_config + + +class Trainer(DefaultTrainer): + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + return COCOEvaluator(dataset_name, cfg, True, output_folder) + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_tridentnet_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/projects/TridentNet/tridentnet/__init__.py b/projects/TridentNet/tridentnet/__init__.py new file mode 100644 index 0000000000..2fcdeb45a0 --- /dev/null +++ b/projects/TridentNet/tridentnet/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .config import add_tridentnet_config +from .trident_backbone import ( + TridentBottleneckBlock, + build_trident_resnet_backbone, + make_trident_stage, +) +from .trident_rpn import TridentRPN +from .trident_rcnn import TridentRes5ROIHeads, TridentStandardROIHeads diff --git a/projects/TridentNet/tridentnet/config.py b/projects/TridentNet/tridentnet/config.py new file mode 100644 index 0000000000..5a484a0013 --- /dev/null +++ b/projects/TridentNet/tridentnet/config.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +from detectron2.config import CfgNode as CN + + +def add_tridentnet_config(cfg): + """ + Add config for tridentnet. + """ + _C = cfg + + _C.MODEL.TRIDENT = CN() + + # Number of branches for TridentNet. + _C.MODEL.TRIDENT.NUM_BRANCH = 3 + # Specfiy the dilations for each branch. + _C.MODEL.TRIDENT.BRANCH_DILATIONS = [1, 2, 3] + # Specify the stage for applying trident blocks. Default stage is Res4 according to the + # TridentNet paper. + _C.MODEL.TRIDENT.TRIDENT_STAGE = "res4" + # Specify the test branch index TridentNet Fast inference: + # - use -1 to aggreate results of all branches during inference. + # - otherwise, only using specified branch for fast inference. Recommended setting is + # to use the middle branch. + _C.MODEL.TRIDENT.TEST_BRANCH_IDX = 1 diff --git a/projects/TridentNet/tridentnet/trident_backbone.py b/projects/TridentNet/tridentnet/trident_backbone.py new file mode 100644 index 0000000000..e65d9d3f32 --- /dev/null +++ b/projects/TridentNet/tridentnet/trident_backbone.py @@ -0,0 +1,223 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import fvcore.nn.weight_init as weight_init +import torch +import torch.nn.functional as F + +from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm +from detectron2.modeling import BACKBONE_REGISTRY, ResNet, ResNetBlockBase, make_stage +from detectron2.modeling.backbone.resnet import BasicStem, BottleneckBlock, DeformBottleneckBlock + +from .trident_conv import TridentConv + +__all__ = ["TridentBottleneckBlock", "make_trident_stage", "build_trident_resnet_backbone"] + + +class TridentBottleneckBlock(ResNetBlockBase): + def __init__( + self, + in_channels, + out_channels, + *, + bottleneck_channels, + stride=1, + num_groups=1, + norm="BN", + stride_in_1x1=False, + num_branch=3, + dilations=(1, 2, 3), + concat_output=False, + test_branch_idx=-1, + ): + """ + Args: + num_branch (int): the number of branches in TridentNet. + dilations (tuple): the dilations of multple branches in TridentNet. + concat_output (bool): if concatenate outputs of multiple branches in TridentNet. + Use 'True' for the last trident block. + """ + super().__init__(in_channels, out_channels, stride) + + assert num_branch == len(dilations) + + self.num_branch = num_branch + self.concat_output = concat_output + self.test_branch_idx = test_branch_idx + + if in_channels != out_channels: + self.shortcut = Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False, + norm=get_norm(norm, out_channels), + ) + else: + self.shortcut = None + + stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) + + self.conv1 = Conv2d( + in_channels, + bottleneck_channels, + kernel_size=1, + stride=stride_1x1, + bias=False, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv2 = TridentConv( + bottleneck_channels, + bottleneck_channels, + kernel_size=3, + stride=stride_3x3, + paddings=dilations, + bias=False, + groups=num_groups, + dilations=dilations, + num_branch=num_branch, + test_branch_idx=test_branch_idx, + norm=get_norm(norm, bottleneck_channels), + ) + + self.conv3 = Conv2d( + bottleneck_channels, + out_channels, + kernel_size=1, + bias=False, + norm=get_norm(norm, out_channels), + ) + + for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]: + if layer is not None: # shortcut can be None + weight_init.c2_msra_fill(layer) + + def forward(self, x): + num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1 + if not isinstance(x, list): + x = [x] * num_branch + out = [self.conv1(b) for b in x] + out = [F.relu_(b) for b in out] + + out = self.conv2(out) + out = [F.relu_(b) for b in out] + + out = [self.conv3(b) for b in out] + + if self.shortcut is not None: + shortcut = [self.shortcut(b) for b in x] + else: + shortcut = x + + out = [out_b + shortcut_b for out_b, shortcut_b in zip(out, shortcut)] + out = [F.relu_(b) for b in out] + if self.concat_output: + out = torch.cat(out) + return out + + +def make_trident_stage(block_class, num_blocks, first_stride, **kwargs): + """ + Create a resnet stage by creating many blocks for TridentNet. + """ + blocks = [] + for i in range(num_blocks - 1): + blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs)) + kwargs["in_channels"] = kwargs["out_channels"] + blocks.append(block_class(stride=1, concat_output=True, **kwargs)) + return blocks + + +@BACKBONE_REGISTRY.register() +def build_trident_resnet_backbone(cfg, input_shape): + """ + Create a ResNet instance from config for TridentNet. + + Returns: + ResNet: a :class:`ResNet` instance. + """ + # need registration of new blocks/stems? + norm = cfg.MODEL.RESNETS.NORM + stem = BasicStem( + in_channels=input_shape.channels, + out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, + norm=norm, + ) + freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT + + if freeze_at >= 1: + for p in stem.parameters(): + p.requires_grad = False + stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem) + + # fmt: off + out_features = cfg.MODEL.RESNETS.OUT_FEATURES + depth = cfg.MODEL.RESNETS.DEPTH + num_groups = cfg.MODEL.RESNETS.NUM_GROUPS + width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP + bottleneck_channels = num_groups * width_per_group + in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS + out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS + stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1 + res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION + deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE + deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED + deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS + num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + branch_dilations = cfg.MODEL.TRIDENT.BRANCH_DILATIONS + trident_stage = cfg.MODEL.TRIDENT.TRIDENT_STAGE + test_branch_idx = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX + # fmt: on + assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) + + num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] + + stages = [] + + res_stage_idx = {"res2": 2, "res3": 3, "res4": 4, "res5": 5} + out_stage_idx = [res_stage_idx[f] for f in out_features] + trident_stage_idx = res_stage_idx[trident_stage] + max_stage_idx = max(out_stage_idx) + for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): + dilation = res5_dilation if stage_idx == 5 else 1 + first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 + stage_kargs = { + "num_blocks": num_blocks_per_stage[idx], + "first_stride": first_stride, + "in_channels": in_channels, + "bottleneck_channels": bottleneck_channels, + "out_channels": out_channels, + "num_groups": num_groups, + "norm": norm, + "stride_in_1x1": stride_in_1x1, + "dilation": dilation, + } + if stage_idx == trident_stage_idx: + assert not deform_on_per_stage[ + idx + ], "Not support deformable conv in Trident blocks yet." + stage_kargs["block_class"] = TridentBottleneckBlock + stage_kargs["num_branch"] = num_branch + stage_kargs["dilations"] = branch_dilations + stage_kargs["test_branch_idx"] = test_branch_idx + stage_kargs.pop("dilation") + elif deform_on_per_stage[idx]: + stage_kargs["block_class"] = DeformBottleneckBlock + stage_kargs["deform_modulated"] = deform_modulated + stage_kargs["deform_num_groups"] = deform_num_groups + else: + stage_kargs["block_class"] = BottleneckBlock + blocks = ( + make_trident_stage(**stage_kargs) + if stage_idx == trident_stage_idx + else make_stage(**stage_kargs) + ) + in_channels = out_channels + out_channels *= 2 + bottleneck_channels *= 2 + + if freeze_at >= stage_idx: + for block in blocks: + block.freeze() + stages.append(blocks) + return ResNet(stem, stages, out_features=out_features) diff --git a/projects/TridentNet/tridentnet/trident_conv.py b/projects/TridentNet/tridentnet/trident_conv.py new file mode 100644 index 0000000000..7e2d5252bd --- /dev/null +++ b/projects/TridentNet/tridentnet/trident_conv.py @@ -0,0 +1,107 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.modules.utils import _pair + +from detectron2.layers.wrappers import _NewEmptyTensorOp + + +class TridentConv(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + paddings=0, + dilations=1, + groups=1, + num_branch=1, + test_branch_idx=-1, + bias=False, + norm=None, + activation=None, + ): + super(TridentConv, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.num_branch = num_branch + self.stride = _pair(stride) + self.groups = groups + self.with_bias = bias + if isinstance(paddings, int): + paddings = [paddings] * self.num_branch + if isinstance(dilations, int): + dilations = [dilations] * self.num_branch + self.paddings = [_pair(padding) for padding in paddings] + self.dilations = [_pair(dilation) for dilation in dilations] + self.test_branch_idx = test_branch_idx + self.norm = norm + self.activation = activation + + assert len({self.num_branch, len(self.paddings), len(self.dilations)}) == 1 + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) + ) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") + if self.bias is not None: + nn.init.constant_(self.bias, 0) + + def forward(self, inputs): + num_branch = self.num_branch if self.training or self.test_branch_idx == -1 else 1 + assert len(inputs) == num_branch + + if inputs[0].numel() == 0: + output_shape = [ + (i + 2 * p - (di * (k - 1) + 1)) // s + 1 + for i, p, di, k, s in zip( + inputs[0].shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride + ) + ] + output_shape = [input[0].shape[0], self.weight.shape[0]] + output_shape + return [_NewEmptyTensorOp.apply(input, output_shape) for input in inputs] + + if self.training or self.test_branch_idx == -1: + outputs = [ + F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation, self.groups) + for input, dilation, padding in zip(inputs, self.dilations, self.paddings) + ] + else: + outputs = [ + F.conv2d( + inputs[0], + self.weight, + self.bias, + self.stride, + self.paddings[self.test_branch_idx], + self.dilations[self.test_branch_idx], + self.groups, + ) + ] + + if self.norm is not None: + outputs = [self.norm(x) for x in outputs] + if self.activation is not None: + outputs = [self.activation(x) for x in outputs] + return outputs + + def extra_repr(self): + tmpstr = "in_channels=" + str(self.in_channels) + tmpstr += ", out_channels=" + str(self.out_channels) + tmpstr += ", kernel_size=" + str(self.kernel_size) + tmpstr += ", num_branch=" + str(self.num_branch) + tmpstr += ", test_branch_idx=" + str(self.test_branch_idx) + tmpstr += ", stride=" + str(self.stride) + tmpstr += ", paddings=" + str(self.paddings) + tmpstr += ", dilations=" + str(self.dilations) + tmpstr += ", groups=" + str(self.groups) + tmpstr += ", bias=" + str(self.with_bias) + return tmpstr diff --git a/projects/TridentNet/tridentnet/trident_rcnn.py b/projects/TridentNet/tridentnet/trident_rcnn.py new file mode 100644 index 0000000000..f458b25c44 --- /dev/null +++ b/projects/TridentNet/tridentnet/trident_rcnn.py @@ -0,0 +1,110 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.layers import batched_nms +from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads +from detectron2.modeling.roi_heads.roi_heads import Res5ROIHeads +from detectron2.structures import Instances + + +def merge_branch_instances(instances, num_branch, nms_thrsh, topk_per_image): + """ + Merge detection results from different branches of TridentNet. + Return detection results by applying non-maximum suppression (NMS) on bounding boxes + and keep the unsuppressed boxes and other instances (e.g mask) if any. + + Args: + instances (list[Instances]): A list of N * num_branch instances that store detection + results. Contain N images and each image has num_branch instances. + num_branch (int): Number of branches used for merging detection results for each image. + nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1]. + topk_per_image (int): The number of top scoring detections to return. Set < 0 to return + all detections. + + Returns: + results: (list[Instances]): A list of N instances, one for each image in the batch, + that stores the topk most confidence detections after merging results from multiple + branches. + """ + if num_branch == 1: + return instances + + batch_size = len(instances) // num_branch + results = [] + for i in range(batch_size): + instance = Instances.cat([instances[i + batch_size * j] for j in range(num_branch)]) + + # Apply per-class NMS + keep = batched_nms( + instance.pred_boxes.tensor, instance.scores, instance.pred_classes, nms_thrsh + ) + keep = keep[:topk_per_image] + result = instance[keep] + + results.append(result) + + return results + + +@ROI_HEADS_REGISTRY.register() +class TridentRes5ROIHeads(Res5ROIHeads): + """ + The TridentNet ROIHeads in a typical "C4" R-CNN model. + See :class:`Res5ROIHeads`. + """ + + def __init__(self, cfg, input_shape): + super().__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, proposals, targets=None): + """ + See :class:`Res5ROIHeads.forward`. + """ + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + all_targets = targets * num_branch if targets is not None else None + pred_instances, losses = super().forward(images, features, proposals, all_targets) + del images, all_targets, targets + + if self.training: + return pred_instances, losses + else: + pred_instances = merge_branch_instances( + pred_instances, num_branch, self.test_nms_thresh, self.test_detections_per_img + ) + + return pred_instances, {} + + +@ROI_HEADS_REGISTRY.register() +class TridentStandardROIHeads(StandardROIHeads): + """ + The `StandardROIHeads` for TridentNet. + See :class:`StandardROIHeads`. + """ + + def __init__(self, cfg, input_shape): + super(TridentStandardROIHeads, self).__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, proposals, targets=None): + """ + See :class:`Res5ROIHeads.forward`. + """ + # Use 1 branch if using trident_fast during inference. + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + # Duplicate targets for all branches in TridentNet. + all_targets = targets * num_branch if targets is not None else None + pred_instances, losses = super().forward(images, features, proposals, all_targets) + del images, all_targets, targets + + if self.training: + return pred_instances, losses + else: + pred_instances = merge_branch_instances( + pred_instances, num_branch, self.test_nms_thresh, self.test_detections_per_img + ) + + return pred_instances, {} diff --git a/projects/TridentNet/tridentnet/trident_rpn.py b/projects/TridentNet/tridentnet/trident_rpn.py new file mode 100644 index 0000000000..c30137f312 --- /dev/null +++ b/projects/TridentNet/tridentnet/trident_rpn.py @@ -0,0 +1,32 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + +from detectron2.modeling import PROPOSAL_GENERATOR_REGISTRY +from detectron2.modeling.proposal_generator.rpn import RPN +from detectron2.structures import ImageList + + +@PROPOSAL_GENERATOR_REGISTRY.register() +class TridentRPN(RPN): + """ + Trident RPN subnetwork. + """ + + def __init__(self, cfg, input_shape): + super(TridentRPN, self).__init__(cfg, input_shape) + + self.num_branch = cfg.MODEL.TRIDENT.NUM_BRANCH + self.trident_fast = cfg.MODEL.TRIDENT.TEST_BRANCH_IDX != -1 + + def forward(self, images, features, gt_instances=None): + """ + See :class:`RPN.forward`. + """ + num_branch = self.num_branch if self.training or not self.trident_fast else 1 + # Duplicate images and gt_instances for all branches in TridentNet. + all_images = ImageList( + torch.cat([images.tensor] * num_branch), images.image_sizes * num_branch + ) + all_gt_instances = gt_instances * num_branch if gt_instances is not None else None + + return super(TridentRPN, self).forward(all_images, features, all_gt_instances) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000..684e1310d0 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,23 @@ +[isort] +line_length=100 +multi_line_output=4 +known_standard_library=numpy,setuptools +known_myself=detectron2 +known_third_party=fvcore,matplotlib,cv2,torch,torchvision,PIL,pycocotools,yacs,termcolor,cityscapesscripts,tabulate,tqdm,scipy,lvis,torchvision +no_lines_before=STDLIB,THIRDPARTY +sections=FUTURE,STDLIB,THIRDPARTY,myself,FIRSTPARTY,LOCALFOLDER +default_section=FIRSTPARTY + +[mypy] +python_version=3.6 +ignore_missing_imports = True +warn_unused_configs = True +disallow_untyped_defs = True +check_untyped_defs = True +warn_unused_ignores = True +warn_redundant_casts = True +show_column_numbers = True +follow_imports = silent +allow_redefinition = True +; Require all functions to be annotated +disallow_incomplete_defs = True diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000..1b313e60a8 --- /dev/null +++ b/setup.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import glob +import os +from setuptools import find_packages, setup +import torch +from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension + + +def get_extensions(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, "detectron2", "layers", "csrc") + + main_source = os.path.join(extensions_dir, "vision.cpp") + sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp")) + source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + + sources = [main_source] + sources + + extension = CppExtension + + extra_compile_args = {"cxx": []} + define_macros = [] + + if torch.cuda.is_available() and CUDA_HOME is not None: + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + extra_compile_args["nvcc"] = [ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ] + + # It's better if pytorch can do this by default .. + CC = os.environ.get("CC", None) + if CC is not None: + extra_compile_args["nvcc"].append("-ccbin={}".format(CC)) + + sources = [os.path.join(extensions_dir, s) for s in sources] + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + "detectron2._C", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + + return ext_modules + + +setup( + name="detectron2", + version="0.1", + author="FAIR", + url="unknown", + description="object detection in pytorch", + packages=find_packages(exclude=("configs", "tests")), + install_requires=[ + "termcolor>=1.1", + "Pillow", + "yacs>=0.1.6", + "tabulate", + "cloudpickle", + "matplotlib", + "tqdm>4.29.0", + "shapely", + ], + ext_modules=get_extensions(), + cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000..168f9979a4 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/tests/test_anchor_generator.py b/tests/test_anchor_generator.py new file mode 100644 index 0000000000..e122655239 --- /dev/null +++ b/tests/test_anchor_generator.py @@ -0,0 +1,90 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.layers import ShapeSpec +from detectron2.modeling.anchor_generator import DefaultAnchorGenerator, RotatedAnchorGenerator + +logger = logging.getLogger(__name__) + + +class TestAnchorGenerator(unittest.TestCase): + def test_default_anchor_generator(self): + cfg = get_cfg() + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]] + + anchor_generator = DefaultAnchorGenerator(cfg, [ShapeSpec(stride=4)]) + + # only the last two dimensions of features matter here + num_images = 2 + features = {"stage3": torch.rand(num_images, 96, 1, 2)} + anchors = anchor_generator([features["stage3"]]) + expected_anchor_tensor = torch.tensor( + [ + [-32.0, -8.0, 32.0, 8.0], + [-16.0, -16.0, 16.0, 16.0], + [-8.0, -32.0, 8.0, 32.0], + [-64.0, -16.0, 64.0, 16.0], + [-32.0, -32.0, 32.0, 32.0], + [-16.0, -64.0, 16.0, 64.0], + [-28.0, -8.0, 36.0, 8.0], # -28.0 == -32.0 + STRIDE (4) + [-12.0, -16.0, 20.0, 16.0], + [-4.0, -32.0, 12.0, 32.0], + [-60.0, -16.0, 68.0, 16.0], + [-28.0, -32.0, 36.0, 32.0], + [-12.0, -64.0, 20.0, 64.0], + ] + ) + + for i in range(num_images): + assert torch.allclose(anchors[i][0].tensor, expected_anchor_tensor) + + def test_rrpn_anchor_generator(self): + cfg = get_cfg() + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1, 4]] + cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 45]] + anchor_generator = RotatedAnchorGenerator(cfg, [ShapeSpec(stride=4)]) + + # only the last two dimensions of features matter here + num_images = 2 + features = {"stage3": torch.rand(num_images, 96, 1, 2)} + anchors = anchor_generator([features["stage3"]]) + expected_anchor_tensor = torch.tensor( + [ + [0.0, 0.0, 64.0, 16.0, 0.0], + [0.0, 0.0, 64.0, 16.0, 45.0], + [0.0, 0.0, 32.0, 32.0, 0.0], + [0.0, 0.0, 32.0, 32.0, 45.0], + [0.0, 0.0, 16.0, 64.0, 0.0], + [0.0, 0.0, 16.0, 64.0, 45.0], + [0.0, 0.0, 128.0, 32.0, 0.0], + [0.0, 0.0, 128.0, 32.0, 45.0], + [0.0, 0.0, 64.0, 64.0, 0.0], + [0.0, 0.0, 64.0, 64.0, 45.0], + [0.0, 0.0, 32.0, 128.0, 0.0], + [0.0, 0.0, 32.0, 128.0, 45.0], + [4.0, 0.0, 64.0, 16.0, 0.0], # 4.0 == 0.0 + STRIDE (4) + [4.0, 0.0, 64.0, 16.0, 45.0], + [4.0, 0.0, 32.0, 32.0, 0.0], + [4.0, 0.0, 32.0, 32.0, 45.0], + [4.0, 0.0, 16.0, 64.0, 0.0], + [4.0, 0.0, 16.0, 64.0, 45.0], + [4.0, 0.0, 128.0, 32.0, 0.0], + [4.0, 0.0, 128.0, 32.0, 45.0], + [4.0, 0.0, 64.0, 64.0, 0.0], + [4.0, 0.0, 64.0, 64.0, 45.0], + [4.0, 0.0, 32.0, 128.0, 0.0], + [4.0, 0.0, 32.0, 128.0, 45.0], + ] + ) + + for i in range(num_images): + assert torch.allclose(anchors[i][0].tensor, expected_anchor_tensor) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_box2box_transform.py b/tests/test_box2box_transform.py new file mode 100644 index 0000000000..d9e7aaf6ee --- /dev/null +++ b/tests/test_box2box_transform.py @@ -0,0 +1,58 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated + +logger = logging.getLogger(__name__) + + +def random_boxes(mean_box, stdev, N): + return torch.rand(N, 4) * stdev + torch.tensor(mean_box, dtype=torch.float) + + +class TestBox2BoxTransform(unittest.TestCase): + def test_reconstruction(self): + weights = (5, 5, 10, 10) + b2b_tfm = Box2BoxTransform(weights=weights) + src_boxes = random_boxes([10, 10, 20, 20], 1, 10) + dst_boxes = random_boxes([10, 10, 20, 20], 1, 10) + + devices = [torch.device("cpu")] + if torch.cuda.is_available(): + devices.append(torch.device("cuda")) + for device in devices: + src_boxes = src_boxes.to(device=device) + dst_boxes = dst_boxes.to(device=device) + deltas = b2b_tfm.get_deltas(src_boxes, dst_boxes) + dst_boxes_reconstructed = b2b_tfm.apply_deltas(deltas, src_boxes) + assert torch.allclose(dst_boxes, dst_boxes_reconstructed) + + +def random_rotated_boxes(mean_box, std_length, std_angle, N): + return torch.cat( + [torch.rand(N, 4) * std_length, torch.rand(N, 1) * std_angle], dim=1 + ) + torch.tensor(mean_box, dtype=torch.float) + + +class TestBox2BoxTransformRotated(unittest.TestCase): + def test_reconstruction(self): + weights = (5, 5, 10, 10, 1) + b2b_transform = Box2BoxTransformRotated(weights=weights) + src_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10) + dst_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10) + + devices = [torch.device("cpu")] + if torch.cuda.is_available(): + devices.append(torch.device("cuda")) + for device in devices: + src_boxes = src_boxes.to(device=device) + dst_boxes = dst_boxes.to(device=device) + deltas = b2b_transform.get_deltas(src_boxes, dst_boxes) + dst_boxes_reconstructed = b2b_transform.apply_deltas(deltas, src_boxes) + assert torch.allclose(dst_boxes, dst_boxes_reconstructed, atol=1e-5) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_boxes.py b/tests/test_boxes.py new file mode 100644 index 0000000000..7694fa1361 --- /dev/null +++ b/tests/test_boxes.py @@ -0,0 +1,63 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import unittest +import torch + +from detectron2.structures import Boxes, BoxMode, pairwise_iou + + +class TestBoxMode(unittest.TestCase): + def _convert_xy_to_wh(self, x): + return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) + + def test_box_convert_list(self): + for tp in [list, tuple]: + box = tp([5, 5, 10, 10]) + output = self._convert_xy_to_wh(box) + self.assertTrue(output == tp([5, 5, 5, 5])) + + with self.assertRaises(Exception): + self._convert_xy_to_wh([box]) + + def test_box_convert_array(self): + box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]]) + output = self._convert_xy_to_wh(box) + self.assertTrue((output[0] == [5, 5, 5, 5]).all()) + self.assertTrue((output[1] == [1, 1, 1, 2]).all()) + + def test_box_convert_tensor(self): + box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]) + output = self._convert_xy_to_wh(box).numpy() + self.assertTrue((output[0] == [5, 5, 5, 5]).all()) + self.assertTrue((output[1] == [1, 1, 1, 2]).all()) + + +class TestBoxIOU(unittest.TestCase): + def test_pairwise_iou(self): + boxes1 = torch.tensor([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]]) + + boxes2 = torch.tensor( + [ + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.5, 1.0], + [0.0, 0.0, 1.0, 0.5], + [0.0, 0.0, 0.5, 0.5], + [0.5, 0.5, 1.0, 1.0], + [0.5, 0.5, 1.5, 1.5], + ] + ) + + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ] + ) + + ious = pairwise_iou(Boxes(boxes1), Boxes(boxes2)) + + assert torch.allclose(ious, expected_ious) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_checkpoint.py b/tests/test_checkpoint.py new file mode 100644 index 0000000000..725b488fda --- /dev/null +++ b/tests/test_checkpoint.py @@ -0,0 +1,48 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import unittest +from collections import OrderedDict +import torch +from torch import nn + +from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts +from detectron2.utils.logger import setup_logger + + +class TestCheckpointer(unittest.TestCase): + def setUp(self): + setup_logger() + + def create_complex_model(self): + m = nn.Module() + m.block1 = nn.Module() + m.block1.layer1 = nn.Linear(2, 3) + m.layer2 = nn.Linear(3, 2) + m.res = nn.Module() + m.res.layer2 = nn.Linear(3, 2) + + state_dict = OrderedDict() + state_dict["layer1.weight"] = torch.rand(3, 2) + state_dict["layer1.bias"] = torch.rand(3) + state_dict["layer2.weight"] = torch.rand(2, 3) + state_dict["layer2.bias"] = torch.rand(2) + state_dict["res.layer2.weight"] = torch.rand(2, 3) + state_dict["res.layer2.bias"] = torch.rand(2) + return m, state_dict + + def test_complex_model_loaded(self): + for add_data_parallel in [False, True]: + model, state_dict = self.create_complex_model() + if add_data_parallel: + model = nn.DataParallel(model) + model_sd = model.state_dict() + + align_and_update_state_dicts(model_sd, state_dict) + for loaded, stored in zip(model_sd.values(), state_dict.values()): + # different tensor references + self.assertFalse(id(loaded) == id(stored)) + # same content + self.assertTrue(loaded.equal(stored)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000000..d79f07c498 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + + +import os +import tempfile +import unittest + +from detectron2.config import downgrade_config, get_cfg, upgrade_config + +_V0_CFG = """ +MODEL: + RPN_HEAD: + NAME: "TEST" +VERSION: 0 +""" + +_V1_CFG = """ +MODEL: + WEIGHT: "/path/to/weight" +""" + + +class TestConfigVersioning(unittest.TestCase): + def test_upgrade_downgrade_consistency(self): + cfg = get_cfg() + # check that custom is preserved + cfg.USER_CUSTOM = 1 + + down = downgrade_config(cfg, to_version=0) + up = upgrade_config(down) + self.assertTrue(up == cfg) + + def _merge_cfg_str(self, cfg, merge_str): + f = tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) + try: + f.write(merge_str) + f.close() + cfg.merge_from_file(f.name) + finally: + os.remove(f.name) + return cfg + + def test_auto_upgrade(self): + cfg = get_cfg() + latest_ver = cfg.VERSION + cfg.USER_CUSTOM = 1 + + self._merge_cfg_str(cfg, _V0_CFG) + + self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, "TEST") + self.assertEqual(cfg.VERSION, latest_ver) + + def test_guess_v1(self): + cfg = get_cfg() + latest_ver = cfg.VERSION + self._merge_cfg_str(cfg, _V1_CFG) + self.assertEqual(cfg.VERSION, latest_ver) diff --git a/tests/test_data_transform.py b/tests/test_data_transform.py new file mode 100644 index 0000000000..8fdfe8056e --- /dev/null +++ b/tests/test_data_transform.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import logging +import numpy as np +import unittest + +from detectron2.config import get_cfg +from detectron2.data import detection_utils +from detectron2.data import transforms as T +from detectron2.utils.logger import setup_logger + +logger = logging.getLogger(__name__) + + +class TestTransforms(unittest.TestCase): + def setUp(self): + setup_logger() + + def test_crop_polygons(self): + # Ensure that shapely produce an extra vertex at the end + import shapely.geometry as geometry + + polygon = np.asarray([3, 3.5, 11, 10.0, 38, 98, 15.0, 100.0]).reshape(-1, 2) + g = geometry.Polygon(polygon) + coords = np.asarray(g.exterior.coords) + self.assertEqual(coords[0].tolist(), coords[-1].tolist()) + + def test_apply_rotated_boxes(self): + np.random.seed(125) + cfg = get_cfg() + is_train = True + transform_gen = detection_utils.build_transform_gen(cfg, is_train) + image = np.random.rand(200, 300) + image, transforms = T.apply_transform_gens(transform_gen, image) + image_shape = image.shape[:2] # h, w + assert image_shape == (800, 1200) + annotation = {"bbox": [179, 97, 62, 40, -56]} + + boxes = np.array([annotation["bbox"]], dtype=np.float64) # boxes.shape = (1, 5) + transformed_bbox = transforms.apply_rotated_box(boxes)[0] + + expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64) + err_msg = "transformed_bbox = {}, expected {}".format(transformed_bbox, expected_bbox) + assert np.allclose(transformed_bbox, expected_bbox), err_msg + + def test_apply_rotated_boxes_unequal_scaling_factor(self): + np.random.seed(125) + h, w = 400, 200 + newh, neww = 800, 800 + image = np.random.rand(h, w) + transform_gen = [] + transform_gen.append(T.Resize(shape=(newh, neww))) + image, transforms = T.apply_transform_gens(transform_gen, image) + image_shape = image.shape[:2] # h, w + assert image_shape == (newh, neww) + + boxes = np.array( + [ + [150, 100, 40, 20, 0], + [150, 100, 40, 20, 30], + [150, 100, 40, 20, 90], + [150, 100, 40, 20, -90], + ], + dtype=np.float64, + ) + transformed_boxes = transforms.apply_rotated_box(boxes) + + expected_bboxes = np.array( + [ + [600, 200, 160, 40, 0], + [600, 200, 144.22205102, 52.91502622, 49.10660535], + [600, 200, 80, 80, 90], + [600, 200, 80, 80, -90], + ], + dtype=np.float64, + ) + err_msg = "transformed_boxes = {}, expected {}".format(transformed_boxes, expected_bboxes) + assert np.allclose(transformed_boxes, expected_bboxes), err_msg diff --git a/tests/test_fast_rcnn.py b/tests/test_fast_rcnn.py new file mode 100644 index 0000000000..17df9d9560 --- /dev/null +++ b/tests/test_fast_rcnn.py @@ -0,0 +1,102 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.modeling.box_regression import Box2BoxTransform, Box2BoxTransformRotated +from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs +from detectron2.structures import Boxes, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage + +logger = logging.getLogger(__name__) + + +class FastRCNNTest(unittest.TestCase): + def test_fast_rcnn(self): + torch.manual_seed(132) + cfg = get_cfg() + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5) + box2box_transform = Box2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS) + + box_head_output_size = 8 + num_classes = 5 + cls_agnostic_bbox_reg = False + + box_predictor = FastRCNNOutputLayers( + box_head_output_size, num_classes, cls_agnostic_bbox_reg, box_dim=4 + ) + feature_pooled = torch.rand(2, box_head_output_size) + pred_class_logits, pred_proposal_deltas = box_predictor(feature_pooled) + image_shape = (10, 10) + proposal_boxes = torch.tensor([[0.8, 1.1, 3.2, 2.8], [2.3, 2.5, 7, 8]], dtype=torch.float32) + gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + result = Instances(image_shape) + result.proposal_boxes = Boxes(proposal_boxes) + result.gt_boxes = Boxes(gt_boxes) + result.gt_classes = torch.tensor([1, 2]) + proposals = [] + proposals.append(result) + smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA + + outputs = FastRCNNOutputs( + box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta + ) + with EventStorage(): # capture events in a new storage to discard them + losses = outputs.losses() + + expected_losses = { + "loss_cls": torch.tensor(1.7951188087), + "loss_box_reg": torch.tensor(4.0357131958), + } + for name in expected_losses.keys(): + assert torch.allclose(losses[name], expected_losses[name]) + + def test_fast_rcnn_rotated(self): + torch.manual_seed(132) + cfg = get_cfg() + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1) + box2box_transform = Box2BoxTransformRotated(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS) + + box_head_output_size = 8 + num_classes = 5 + cls_agnostic_bbox_reg = False + + box_predictor = FastRCNNOutputLayers( + box_head_output_size, num_classes, cls_agnostic_bbox_reg, box_dim=5 + ) + feature_pooled = torch.rand(2, box_head_output_size) + pred_class_logits, pred_proposal_deltas = box_predictor(feature_pooled) + image_shape = (10, 10) + proposal_boxes = torch.tensor( + [[2, 1.95, 2.4, 1.7, 0], [4.65, 5.25, 4.7, 5.5, 0]], dtype=torch.float32 + ) + gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32) + result = Instances(image_shape) + result.proposal_boxes = RotatedBoxes(proposal_boxes) + result.gt_boxes = RotatedBoxes(gt_boxes) + result.gt_classes = torch.tensor([1, 2]) + proposals = [] + proposals.append(result) + smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA + + outputs = FastRCNNOutputs( + box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta + ) + with EventStorage(): # capture events in a new storage to discard them + losses = outputs.losses() + + # Note: the expected losses are slightly different even if + # the boxes are essentially the same as in the FastRCNNOutput test, because + # bbox_pred in FastRCNNOutputLayers have different Linear layers/initialization + # between the two cases. + expected_losses = { + "loss_cls": torch.tensor(1.7920907736), + "loss_box_reg": torch.tensor(4.0410838127), + } + for name in expected_losses.keys(): + assert torch.allclose(losses[name], expected_losses[name]) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_mask_ops.py b/tests/test_mask_ops.py new file mode 100644 index 0000000000..cc92e13948 --- /dev/null +++ b/tests/test_mask_ops.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import contextlib +import io +import numpy as np +import os +import unittest +from collections import defaultdict +import torch +import tqdm +from fvcore.common.benchmark import benchmark +from pycocotools.coco import COCO +from tabulate import tabulate +from torch.nn import functional as F + +from detectron2.data import MetadataCatalog +from detectron2.layers.mask_ops import ( + pad_masks, + paste_mask_in_image_old, + paste_masks_in_image, + scale_boxes, +) +from detectron2.structures import BitMasks, Boxes, BoxMode, PolygonMasks +from detectron2.structures.masks import polygons_to_bitmask + + +def iou_between_full_image_bit_masks(a, b): + intersect = (a & b).sum() + union = (a | b).sum() + return intersect / union + + +def rasterize_polygons_with_grid_sample(full_image_bit_mask, box, mask_size, threshold=0.5): + x0, y0, x1, y1 = box[0], box[1], box[2], box[3] + + img_h, img_w = full_image_bit_mask.shape + + mask_y = np.arange(0.0, mask_size) + 0.5 # mask y sample coords in [0.5, mask_size - 0.5] + mask_x = np.arange(0.0, mask_size) + 0.5 # mask x sample coords in [0.5, mask_size - 0.5] + mask_y = (mask_y) / (mask_size) * (y1 - y0) + y0 + mask_x = (mask_x) / (mask_size) * (x1 - x0) + x0 + + mask_x = (mask_x - 0.5) / (img_w - 1) * 2 + -1 + mask_y = (mask_y - 0.5) / (img_h - 1) * 2 + -1 + gy, gx = torch.meshgrid(torch.from_numpy(mask_y), torch.from_numpy(mask_x)) + ind = torch.stack([gx, gy], dim=-1).to(dtype=torch.float32) + + full_image_bit_mask = torch.from_numpy(full_image_bit_mask) + mask = F.grid_sample( + full_image_bit_mask[None, None, :, :].to(dtype=torch.float32), + ind[None, :, :, :], + align_corners=True, + ) + + return mask[0, 0] >= threshold + + +class TestMaskCropPaste(unittest.TestCase): + def setUp(self): + json_file = MetadataCatalog.get("coco_2017_val_100").json_file + if not os.path.isfile(json_file): + raise unittest.SkipTest("{} not found".format(json_file)) + with contextlib.redirect_stdout(io.StringIO()): + self.coco = COCO(json_file) + + def test_crop_paste_consistency(self): + """ + rasterize_polygons_within_box (used in training) + and + paste_masks_in_image (used in inference) + should be inverse operations to each other. + + This function runs several implementation of the above two operations and prints + the reconstruction error. + """ + + anns = self.coco.loadAnns(self.coco.getAnnIds(iscrowd=False)) # avoid crowd annotations + + selected_anns = anns[:100] + + ious = [] + for ann in tqdm.tqdm(selected_anns): + results = self.process_annotation(ann) + ious.append([k[2] for k in results]) + + ious = np.array(ious) + mean_ious = ious.mean(axis=0) + table = [] + res_dic = defaultdict(dict) + for row, iou in zip(results, mean_ious): + table.append((row[0], row[1], iou)) + res_dic[row[0]][row[1]] = iou + print(tabulate(table, headers=["rasterize", "paste", "iou"], tablefmt="simple")) + # assert that the reconstruction is good: + self.assertTrue(res_dic["polygon"]["aligned"] > 0.94) + self.assertTrue(res_dic["roialign"]["aligned"] > 0.95) + + def process_annotation(self, ann, mask_side_len=28): + # Parse annotation data + img_info = self.coco.loadImgs(ids=[ann["image_id"]])[0] + height, width = img_info["height"], img_info["width"] + gt_polygons = [np.array(p, dtype=np.float64) for p in ann["segmentation"]] + gt_bbox = BoxMode.convert(np.array(ann["bbox"]), BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + gt_bit_mask = polygons_to_bitmask(gt_polygons, height, width) + + # Run rasterize .. + torch_gt_bbox = torch.from_numpy(gt_bbox[None, :]).to(dtype=torch.float32) + box_bitmasks = { + "polygon": PolygonMasks([gt_polygons]).crop_and_resize(torch_gt_bbox, mask_side_len)[0], + "gridsample": rasterize_polygons_with_grid_sample(gt_bit_mask, gt_bbox, mask_side_len), + "roialign": BitMasks(torch.from_numpy(gt_bit_mask[None, :, :])).crop_and_resize( + torch_gt_bbox, mask_side_len + )[0], + } + + # Run paste .. + results = defaultdict(dict) + for k, box_bitmask in box_bitmasks.items(): + padded_bitmask, scale = pad_masks(box_bitmask[None, :, :], 1) + scaled_boxes = scale_boxes(torch_gt_bbox, scale) + + r = results[k] + r["old"] = paste_mask_in_image_old( + padded_bitmask[0], scaled_boxes[0], height, width, threshold=0.5 + ) + r["aligned"] = paste_masks_in_image( + box_bitmask[None, :, :], Boxes(gt_bbox[None, :]), (height, width) + )[0] + + table = [] + for rasterize_method, r in results.items(): + for paste_method, mask in r.items(): + mask = np.asarray(mask) + iou = iou_between_full_image_bit_masks(gt_bit_mask.astype("uint8"), mask) + table.append((rasterize_method, paste_method, iou)) + return table + + +def benchmark_paste(): + S = 800 + H, W = image_shape = (S, S) + N = 64 + torch.manual_seed(42) + masks = torch.rand(N, 28, 28) + + center = torch.rand(N, 2) * 600 + 100 + wh = torch.clamp(torch.randn(N, 2) * 40 + 200, min=50) + x0y0 = torch.clamp(center - wh * 0.5, min=0.0) + x1y1 = torch.clamp(center + wh * 0.5, max=S) + boxes = Boxes(torch.cat([x0y0, x1y1], axis=1)) + + def func(device, n=3): + m = masks.to(device=device) + b = boxes.to(device=device) + + def bench(): + for _ in range(n): + paste_masks_in_image(m, b, image_shape) + if device.type == "cuda": + torch.cuda.synchronize() + + return bench + + specs = [{"device": torch.device("cpu"), "n": 3}] + if torch.cuda.is_available(): + specs.append({"device": torch.device("cuda"), "n": 3}) + + benchmark(func, "paste_masks", specs, num_iters=10, warmup_iters=2) + + +if __name__ == "__main__": + benchmark_paste() + unittest.main() diff --git a/tests/test_nms_rotated.py b/tests/test_nms_rotated.py new file mode 100644 index 0000000000..d04a99c15d --- /dev/null +++ b/tests/test_nms_rotated.py @@ -0,0 +1,159 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from __future__ import absolute_import, division, print_function, unicode_literals +import unittest +import torch +from torchvision import ops + +from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated + + +class TestNMSRotated(unittest.TestCase): + def reference_horizontal_nms(self, boxes, scores, iou_threshold): + """ + Args: + box_scores (N, 5): boxes in corner-form and probabilities. + (Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob) + iou_threshold: intersection over union threshold. + Returns: + picked: a list of indexes of the kept boxes + """ + picked = [] + _, indexes = scores.sort(descending=True) + while len(indexes) > 0: + current = indexes[0] + picked.append(current.item()) + if len(indexes) == 1: + break + current_box = boxes[current, :] + indexes = indexes[1:] + rest_boxes = boxes[indexes, :] + iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1) + indexes = indexes[iou <= iou_threshold] + + return torch.as_tensor(picked) + + def _create_tensors(self, N): + boxes = torch.rand(N, 4) * 100 + # Note: the implementation of this function in torchvision is: + # boxes[:, 2:] += torch.rand(N, 2) * 100 + # but it does not guarantee non-negative widths/heights constraints: + # boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]: + boxes[:, 2:] += boxes[:, :2] + scores = torch.rand(N) + return boxes, scores + + def test_bactched_nms_rotated_0_degree_cpu(self): + # torch.manual_seed(0) + N = 2000 + num_classes = 50 + boxes, scores = self._create_tensors(N) + idxs = torch.randint(0, num_classes, (N,)) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}" + for iou in [0.2, 0.5, 0.8]: + backup = boxes.clone() + keep_ref = batched_nms(boxes, scores, idxs, iou) + assert torch.allclose(boxes, backup), "boxes modified by batched_nms" + backup = rotated_boxes.clone() + keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou) + assert torch.allclose( + rotated_boxes, backup + ), "rotated_boxes modified by batched_nms_rotated" + assert torch.equal(keep, keep_ref), err_msg.format(iou) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_bactched_nms_rotated_0_degree_cuda(self): + # torch.manual_seed(0) + N = 2000 + num_classes = 50 + boxes, scores = self._create_tensors(N) + idxs = torch.randint(0, num_classes, (N,)) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}" + for iou in [0.2, 0.5, 0.8]: + backup = boxes.clone() + keep_ref = batched_nms(boxes.cuda(), scores.cuda(), idxs, iou) + assert torch.allclose(boxes, backup), "boxes modified by batched_nms" + backup = rotated_boxes.clone() + keep = batched_nms_rotated(rotated_boxes.cuda(), scores.cuda(), idxs, iou) + assert torch.allclose( + rotated_boxes, backup + ), "rotated_boxes modified by batched_nms_rotated" + assert torch.equal(keep, keep_ref), err_msg.format(iou) + + def test_nms_rotated_0_degree_cpu(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.5]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + assert torch.equal(keep, keep_ref), err_msg.format(iou) + + def test_nms_rotated_90_degrees_cpu(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + # Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]: + # widths and heights are intentionally swapped here for 90 degrees case + # so that the reference horizontal nms could be used + rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1] + rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0] + + rotated_boxes[:, 4] = torch.ones(N) * 90 + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.2, 0.5, 0.8]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + assert torch.equal(keep, keep_ref), err_msg.format(iou) + + def test_nms_rotated_180_degrees_cpu(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + rotated_boxes[:, 4] = torch.ones(N) * 180 + err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" + for iou in [0.2, 0.5, 0.8]: + keep_ref = self.reference_horizontal_nms(boxes, scores, iou) + keep = nms_rotated(rotated_boxes, scores, iou) + assert torch.equal(keep, keep_ref), err_msg.format(iou) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_nms_rotated_0_degree_cuda(self): + N = 1000 + boxes, scores = self._create_tensors(N) + rotated_boxes = torch.zeros(N, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + err_msg = "Rotated NMS incompatible between CPU and CUDA for IoU={}" + + for iou in [0.2, 0.5, 0.8]: + r_cpu = nms_rotated(rotated_boxes, scores, iou) + r_cuda = nms_rotated(rotated_boxes.cuda(), scores.cuda(), iou) + + assert torch.equal(r_cpu, r_cuda.cpu()), err_msg.format(iou) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_roi_align.py b/tests/test_roi_align.py new file mode 100644 index 0000000000..a745678808 --- /dev/null +++ b/tests/test_roi_align.py @@ -0,0 +1,86 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np +import unittest +import cv2 +import torch + +from detectron2.layers.roi_align import ROIAlign + + +class ROIAlignTest(unittest.TestCase): + def test_forward_output(self): + input = np.arange(25).reshape(5, 5).astype("float32") + """ + 0 1 2 3 4 + 5 6 7 8 9 + 10 11 12 13 14 + 15 16 17 18 19 + 20 21 22 23 24 + """ + + output = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=False) + output_correct = self._simple_roialign(input, [1, 1, 3, 3], (4, 4), aligned=True) + + # without correction: + old_results = [ + [7.5, 8, 8.5, 9], + [10, 10.5, 11, 11.5], + [12.5, 13, 13.5, 14], + [15, 15.5, 16, 16.5], + ] + + # with 0.5 correction: + correct_results = [ + [4.5, 5.0, 5.5, 6.0], + [7.0, 7.5, 8.0, 8.5], + [9.5, 10.0, 10.5, 11.0], + [12.0, 12.5, 13.0, 13.5], + ] + # This is an upsampled version of [[6, 7], [11, 12]] + + self.assertTrue(np.allclose(output.flatten(), np.asarray(old_results).flatten())) + self.assertTrue( + np.allclose(output_correct.flatten(), np.asarray(correct_results).flatten()) + ) + + # Also see similar issues in tensorflow at + # https://github.com/tensorflow/tensorflow/issues/26278 + + def test_resize(self): + H, W = 30, 30 + input = np.random.rand(H, W).astype("float32") * 100 + box = [10, 10, 20, 20] + output = self._simple_roialign(input, box, (5, 5), aligned=True) + + input2x = cv2.resize(input, (W // 2, H // 2), interpolation=cv2.INTER_LINEAR) + box2x = [x / 2 for x in box] + output2x = self._simple_roialign(input2x, box2x, (5, 5), aligned=True) + diff = np.abs(output2x - output) + self.assertTrue(diff.max() < 1e-4) + + def _simple_roialign(self, img, box, resolution, aligned=True): + """ + RoiAlign with scale 1.0 and 0 sample ratio. + """ + if isinstance(resolution, int): + resolution = (resolution, resolution) + op = ROIAlign(resolution, 1.0, 0, aligned=aligned) + input = torch.from_numpy(img[None, None, :, :].astype("float32")) + + rois = [0] + list(box) + rois = torch.from_numpy(np.asarray(rois)[None, :].astype("float32")) + output = op.forward(input, rois).numpy() + if torch.cuda.is_available(): + output_cuda = op.forward(input.cuda(), rois.cuda()).cpu().numpy() + self.assertTrue(np.allclose(output, output_cuda)) + return output[0, 0] + + def test_empty_box(self): + img = np.random.rand(5, 5) + box = [3, 4, 5, 4] + o = self._simple_roialign(img, box, 7) + self.assertTrue((o == 0).all()) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_roi_align_rotated.py b/tests/test_roi_align_rotated.py new file mode 100644 index 0000000000..d7900678ea --- /dev/null +++ b/tests/test_roi_align_rotated.py @@ -0,0 +1,176 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import cv2 +import torch +from torch.autograd import Variable, gradcheck + +from detectron2.layers.roi_align import ROIAlign +from detectron2.layers.roi_align_rotated import ROIAlignRotated + +logger = logging.getLogger(__name__) + + +class ROIAlignRotatedTest(unittest.TestCase): + def _box_to_rotated_box(self, box, angle): + return [ + (box[0] + box[2]) / 2.0, + (box[1] + box[3]) / 2.0, + box[2] - box[0], + box[3] - box[1], + angle, + ] + + def _rot90(self, img, num): + num = num % 4 # note: -1 % 4 == 3 + for _ in range(num): + img = img.transpose(0, 1).flip(0) + return img + + def test_forward_output_0_90_180_270(self): + for i in range(4): + # i = 0, 1, 2, 3 corresponding to 0, 90, 180, 270 degrees + img = torch.arange(25, dtype=torch.float32).reshape(5, 5) + """ + 0 1 2 3 4 + 5 6 7 8 9 + 10 11 12 13 14 + 15 16 17 18 19 + 20 21 22 23 24 + """ + box = [1, 1, 3, 3] + rotated_box = self._box_to_rotated_box(box=box, angle=90 * i) + + result = self._simple_roi_align_rotated(img=img, box=rotated_box, resolution=(4, 4)) + + # Here's an explanation for 0 degree case: + # point 0 in the original input lies at [0.5, 0.5] + # (the center of bin [0, 1] x [0, 1]) + # point 1 in the original input lies at [1.5, 0.5], etc. + # since the resolution is (4, 4) that divides [1, 3] x [1, 3] + # into 4 x 4 equal bins, + # the top-left bin is [1, 1.5] x [1, 1.5], and its center + # (1.25, 1.25) lies at the 3/4 position + # between point 0 and point 1, point 5 and point 6, + # point 0 and point 5, point 1 and point 6, so it can be calculated as + # 0.25*(0*0.25+1*0.75)+(5*0.25+6*0.75)*0.75 = 4.5 + result_expected = torch.tensor( + [ + [4.5, 5.0, 5.5, 6.0], + [7.0, 7.5, 8.0, 8.5], + [9.5, 10.0, 10.5, 11.0], + [12.0, 12.5, 13.0, 13.5], + ] + ) + # This is also an upsampled version of [[6, 7], [11, 12]] + + # When the box is rotated by 90 degrees CCW, + # the result would be rotated by 90 degrees CW, thus it's -i here + result_expected = self._rot90(result_expected, -i) + + assert torch.allclose(result, result_expected) + + def test_resize(self): + H, W = 30, 30 + input = torch.rand(H, W) * 100 + box = [10, 10, 20, 20] + rotated_box = self._box_to_rotated_box(box, angle=0) + output = self._simple_roi_align_rotated(img=input, box=rotated_box, resolution=(5, 5)) + + input2x = cv2.resize(input.numpy(), (W // 2, H // 2), interpolation=cv2.INTER_LINEAR) + input2x = torch.from_numpy(input2x) + box2x = [x / 2 for x in box] + rotated_box2x = self._box_to_rotated_box(box2x, angle=0) + output2x = self._simple_roi_align_rotated(img=input2x, box=rotated_box2x, resolution=(5, 5)) + assert torch.allclose(output2x, output) + + def _simple_roi_align_rotated(self, img, box, resolution): + """ + RoiAlignRotated with scale 1.0 and 0 sample ratio. + """ + op = ROIAlignRotated(output_size=resolution, spatial_scale=1.0, sampling_ratio=0) + input = img[None, None, :, :] + + rois = [0] + list(box) + rois = torch.tensor(rois, dtype=torch.float32)[None, :] + result_cpu = op.forward(input, rois) + if torch.cuda.is_available(): + result_cuda = op.forward(input.cuda(), rois.cuda()) + assert torch.allclose(result_cpu, result_cuda.cpu()) + return result_cpu[0, 0] + + def test_empty_box(self): + img = torch.rand(5, 5) + out = self._simple_roi_align_rotated(img, [2, 3, 0, 0, 0], (7, 7)) + self.assertTrue((out == 0).all()) + + def test_roi_align_rotated_gradcheck_cpu(self): + dtype = torch.float64 + device = torch.device("cpu") + roi_align_rotated_op = ROIAlignRotated( + output_size=(5, 5), spatial_scale=0.5, sampling_ratio=1 + ).to(dtype=dtype, device=device) + x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True) + # roi format is (batch index, x_center, y_center, width, height, angle) + rois = torch.tensor( + [[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]], + dtype=dtype, + device=device, + ) + + def func(input): + return roi_align_rotated_op(input, rois) + + assert gradcheck(func, (x,)), "gradcheck failed for RoIAlignRotated CPU" + assert gradcheck(func, (x.transpose(2, 3),)), "gradcheck failed for RoIAlignRotated CPU" + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_roi_align_rotated_gradient_cuda(self): + """ + Compute gradients for ROIAlignRotated with multiple bounding boxes on the GPU, + and compare the result with ROIAlign + """ + # torch.manual_seed(123) + dtype = torch.float64 + device = torch.device("cuda") + pool_h, pool_w = (5, 5) + + roi_align = ROIAlign(output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2).to( + device=device + ) + + roi_align_rotated = ROIAlignRotated( + output_size=(pool_h, pool_w), spatial_scale=1, sampling_ratio=2 + ).to(device=device) + + x = torch.rand(1, 1, 10, 10, dtype=dtype, device=device, requires_grad=True) + # x_rotated = x.clone() won't work (will lead to grad_fun=CloneBackward)! + x_rotated = Variable(x.data.clone(), requires_grad=True) + + # roi_rotated format is (batch index, x_center, y_center, width, height, angle) + rois_rotated = torch.tensor( + [[0, 4.5, 4.5, 9, 9, 0], [0, 2, 7, 4, 4, 0], [0, 7, 7, 4, 4, 0]], + dtype=dtype, + device=device, + ) + + y_rotated = roi_align_rotated(x_rotated, rois_rotated) + s_rotated = y_rotated.sum() + s_rotated.backward() + + # roi format is (batch index, x1, y1, x2, y2) + rois = torch.tensor( + [[0, 0, 0, 9, 9], [0, 0, 5, 4, 9], [0, 5, 5, 9, 9]], dtype=dtype, device=device + ) + + y = roi_align(x, rois) + s = y.sum() + s.backward() + + assert torch.allclose( + x.grad, x_rotated.grad + ), "gradients for ROIAlign and ROIAlignRotated mismatch on CUDA" + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_roi_heads.py b/tests/test_roi_heads.py new file mode 100644 index 0000000000..70da179c3a --- /dev/null +++ b/tests/test_roi_heads.py @@ -0,0 +1,108 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.modeling.backbone import build_backbone +from detectron2.modeling.proposal_generator.build import build_proposal_generator +from detectron2.modeling.roi_heads import build_roi_heads +from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage + +logger = logging.getLogger(__name__) + + +class ROIHeadsTest(unittest.TestCase): + def test_roi_heads(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.ROI_HEADS.NAME = "StandardROIHeads" + cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" + cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 + cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2" + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5) + backbone = build_backbone(cfg) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + + image_shape = (15, 15) + gt_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + gt_instance0 = Instances(image_shape) + gt_instance0.gt_boxes = Boxes(gt_boxes0) + gt_instance0.gt_classes = torch.tensor([2, 1]) + gt_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32) + gt_instance1 = Instances(image_shape) + gt_instance1.gt_boxes = Boxes(gt_boxes1) + gt_instance1.gt_classes = torch.tensor([1, 2]) + gt_instances = [gt_instance0, gt_instance1] + + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + roi_heads = build_roi_heads(cfg, backbone.output_shape()) + + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator(images, features, gt_instances) + _, detector_losses = roi_heads(images, features, proposals, gt_instances) + + expected_losses = { + "loss_cls": torch.tensor(4.4236516953), + "loss_box_reg": torch.tensor(0.0091214813), + } + for name in expected_losses.keys(): + assert torch.allclose(detector_losses[name], expected_losses[name]) + + def test_rroi_heads(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN" + cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator" + cfg.MODEL.ROI_HEADS.NAME = "RROIHeads" + cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead" + cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2 + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) + cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead" + cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignRotated" + cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1) + backbone = build_backbone(cfg) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + + image_shape = (15, 15) + gt_boxes0 = torch.tensor([[2, 2, 2, 2, 30], [4, 4, 4, 4, 0]], dtype=torch.float32) + gt_instance0 = Instances(image_shape) + gt_instance0.gt_boxes = RotatedBoxes(gt_boxes0) + gt_instance0.gt_classes = torch.tensor([2, 1]) + gt_boxes1 = torch.tensor([[1.5, 5.5, 1, 3, 0], [8.5, 4, 3, 2, -50]], dtype=torch.float32) + gt_instance1 = Instances(image_shape) + gt_instance1.gt_boxes = RotatedBoxes(gt_boxes1) + gt_instance1.gt_classes = torch.tensor([1, 2]) + gt_instances = [gt_instance0, gt_instance1] + + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + roi_heads = build_roi_heads(cfg, backbone.output_shape()) + + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator(images, features, gt_instances) + _, detector_losses = roi_heads(images, features, proposals, gt_instances) + + expected_losses = { + "loss_cls": torch.tensor(4.381443977355957), + "loss_box_reg": torch.tensor(0.0011560433777049184), + } + for name in expected_losses.keys(): + err_msg = "detector_losses[{}] = {}, expected losses = {}".format( + name, detector_losses[name], expected_losses[name] + ) + assert torch.allclose(detector_losses[name], expected_losses[name]), err_msg + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_roi_pooler.py b/tests/test_roi_pooler.py new file mode 100644 index 0000000000..127498f487 --- /dev/null +++ b/tests/test_roi_pooler.py @@ -0,0 +1,85 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.modeling.poolers import ROIPooler +from detectron2.structures import Boxes, RotatedBoxes + +logger = logging.getLogger(__name__) + + +class TestROIPooler(unittest.TestCase): + def _rand_boxes(self, num_boxes, x_max, y_max): + coords = torch.rand(num_boxes, 4) + coords[:, 0] *= x_max + coords[:, 1] *= y_max + coords[:, 2] *= x_max + coords[:, 3] *= y_max + boxes = torch.zeros(num_boxes, 4) + boxes[:, 0] = torch.min(coords[:, 0], coords[:, 2]) + boxes[:, 1] = torch.min(coords[:, 1], coords[:, 3]) + boxes[:, 2] = torch.max(coords[:, 0], coords[:, 2]) + boxes[:, 3] = torch.max(coords[:, 1], coords[:, 3]) + return boxes + + def _test_roialignv2_roialignrotated_match(self, device): + pooler_resolution = 14 + canonical_level = 4 + canonical_scale_factor = 2 ** canonical_level + pooler_scales = (1.0 / canonical_scale_factor,) + sampling_ratio = 0 + + N, C, H, W = 2, 4, 10, 8 + N_rois = 10 + std = 11 + mean = 0 + feature = (torch.rand(N, C, H, W) - 0.5) * 2 * std + mean + + features = [feature.to(device)] + + rois = [] + rois_rotated = [] + for _ in range(N): + boxes = self._rand_boxes( + num_boxes=N_rois, x_max=W * canonical_scale_factor, y_max=H * canonical_scale_factor + ) + + rotated_boxes = torch.zeros(N_rois, 5) + rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 + rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 + rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] + rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] + rois.append(Boxes(boxes).to(device)) + rois_rotated.append(RotatedBoxes(rotated_boxes).to(device)) + + roialignv2_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type="ROIAlignV2", + ) + + roialignv2_out = roialignv2_pooler(features, rois) + + roialignrotated_pooler = ROIPooler( + output_size=pooler_resolution, + scales=pooler_scales, + sampling_ratio=sampling_ratio, + pooler_type="ROIAlignRotated", + ) + + roialignrotated_out = roialignrotated_pooler(features, rois_rotated) + + assert torch.allclose(roialignv2_out, roialignrotated_out, atol=1e-4) + + def test_roialignv2_roialignrotated_match_cpu(self): + self._test_roialignv2_roialignrotated_match(device="cpu") + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_roialignv2_roialignrotated_match_cuda(self): + self._test_roialignv2_roialignrotated_match(device="cuda") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_rotated_boxes.py b/tests/test_rotated_boxes.py new file mode 100644 index 0000000000..052ad8b193 --- /dev/null +++ b/tests/test_rotated_boxes.py @@ -0,0 +1,578 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from __future__ import absolute_import, division, print_function, unicode_literals +import logging +import math +import random +import unittest +import torch +from fvcore.common.benchmark import benchmark + +from detectron2.layers.rotated_boxes import pairwise_iou_rotated +from detectron2.structures.boxes import Boxes +from detectron2.structures.rotated_boxes import RotatedBoxes, pairwise_iou + +logger = logging.getLogger(__name__) + + +class TestRotatedBoxesLayer(unittest.TestCase): + def test_iou_0_dim_cpu(self): + boxes1 = torch.rand(0, 5, dtype=torch.float32) + boxes2 = torch.rand(10, 5, dtype=torch.float32) + expected_ious = torch.zeros(0, 10, dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + boxes1 = torch.rand(10, 5, dtype=torch.float32) + boxes2 = torch.rand(0, 5, dtype=torch.float32) + expected_ious = torch.zeros(10, 0, dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_0_dim_cuda(self): + boxes1 = torch.rand(0, 5, dtype=torch.float32) + boxes2 = torch.rand(10, 5, dtype=torch.float32) + expected_ious = torch.zeros(0, 10, dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + boxes1 = torch.rand(10, 5, dtype=torch.float32) + boxes2 = torch.rand(0, 5, dtype=torch.float32) + expected_ious = torch.zeros(10, 0, dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + def test_iou_half_overlap_cpu(self): + boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32) + boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32) + expected_ious = torch.tensor([[0.5]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_half_overlap_cuda(self): + boxes1 = torch.tensor([[0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32) + boxes2 = torch.tensor([[0.25, 0.5, 0.5, 1.0, 0.0]], dtype=torch.float32) + expected_ious = torch.tensor([[0.5]], dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + def test_iou_0_degree_cpu(self): + boxes1 = torch.tensor( + [[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32 + ) + boxes2 = torch.tensor( + [ + [0.5, 0.5, 1.0, 1.0, 0.0], + [0.25, 0.5, 0.5, 1.0, 0.0], + [0.5, 0.25, 1.0, 0.5, 0.0], + [0.25, 0.25, 0.5, 0.5, 0.0], + [0.75, 0.75, 0.5, 0.5, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + ], + dtype=torch.float32, + ) + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ], + dtype=torch.float32, + ) + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_0_degree_cuda(self): + boxes1 = torch.tensor( + [[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]], dtype=torch.float32 + ) + boxes2 = torch.tensor( + [ + [0.5, 0.5, 1.0, 1.0, 0.0], + [0.25, 0.5, 0.5, 1.0, 0.0], + [0.5, 0.25, 1.0, 0.5, 0.0], + [0.25, 0.25, 0.5, 0.5, 0.0], + [0.75, 0.75, 0.5, 0.5, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + ], + dtype=torch.float32, + ) + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ], + dtype=torch.float32, + ) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + def test_iou_45_degrees_cpu(self): + boxes1 = torch.tensor( + [ + [1, 1, math.sqrt(2), math.sqrt(2), 45], + [1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45], + ], + dtype=torch.float32, + ) + boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32) + expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_45_degrees_cuda(self): + boxes1 = torch.tensor( + [ + [1, 1, math.sqrt(2), math.sqrt(2), 45], + [1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45], + ], + dtype=torch.float32, + ) + boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32) + expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + def test_iou_perpendicular_cpu(self): + boxes1 = torch.tensor([[5, 5, 10.0, 6, 55]], dtype=torch.float32) + boxes2 = torch.tensor([[5, 5, 10.0, 6, -35]], dtype=torch.float32) + iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0) + expected_ious = torch.tensor([[iou]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_iou_perpendicular_cuda(self): + boxes1 = torch.tensor([[5, 5, 10.0, 6, 55]], dtype=torch.float32) + boxes2 = torch.tensor([[5, 5, 10.0, 6, -35]], dtype=torch.float32) + iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0) + expected_ious = torch.tensor([[iou]], dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + def test_iou_large_close_boxes_cpu(self): + boxes1 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]], dtype=torch.float32 + ) + boxes2 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]], dtype=torch.float32 + ) + iou = 364.259155 / 364.259186 + expected_ious = torch.tensor([[iou]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_large_close_boxes_cuda(self): + boxes1 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]], dtype=torch.float32 + ) + boxes2 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]], dtype=torch.float32 + ) + iou = 364.259155 / 364.259186 + expected_ious = torch.tensor([[iou]], dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + def test_iou_precision_cpu(self): + boxes1 = torch.tensor([[565, 565, 10, 10, 0]], dtype=torch.float32) + boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32) + iou = 8.3 / 10.0 + expected_ious = torch.tensor([[iou]], dtype=torch.float32) + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_precision_cuda(self): + boxes1 = torch.tensor([[565, 565, 10, 10, 0]], dtype=torch.float32) + boxes2 = torch.tensor([[565, 565, 10, 8.3, 0]], dtype=torch.float32) + iou = 8.3 / 10.0 + expected_ious = torch.tensor([[iou]], dtype=torch.float32) + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + def test_iou_many_boxes_cpu(self): + num_boxes1 = 100 + num_boxes2 = 200 + boxes1 = torch.stack( + [ + torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], dtype=torch.float32 + ) + for i in range(num_boxes2) + ] + ) + expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32) + for i in range(min(num_boxes1, num_boxes2)): + expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0 + ious = pairwise_iou_rotated(boxes1, boxes2) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_iou_many_boxes_cuda(self): + num_boxes1 = 100 + num_boxes2 = 200 + boxes1 = torch.stack( + [ + torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], dtype=torch.float32 + ) + for i in range(num_boxes2) + ] + ) + expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32) + for i in range(min(num_boxes1, num_boxes2)): + expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0 + ious_cuda = pairwise_iou_rotated(boxes1.cuda(), boxes2.cuda()) + assert torch.allclose(ious_cuda.cpu(), expected_ious) + + +class TestRotatedBoxesStructure(unittest.TestCase): + def test_clip_area_0_degree(self): + for _ in range(50): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + # Convert from (x_ctr, y_ctr, w, h, 0) to (x1, y1, x2, y2) + boxes_4d = torch.zeros(num_boxes, 4) + boxes_4d[:, 0] = boxes_5d[:, 0] - boxes_5d[:, 2] / 2.0 + boxes_4d[:, 1] = boxes_5d[:, 1] - boxes_5d[:, 3] / 2.0 + boxes_4d[:, 2] = boxes_5d[:, 0] + boxes_5d[:, 2] / 2.0 + boxes_4d[:, 3] = boxes_5d[:, 1] + boxes_5d[:, 3] / 2.0 + + image_size = (500, 600) + test_boxes_4d = Boxes(boxes_4d) + test_boxes_5d = RotatedBoxes(boxes_5d) + # Before clip + areas_4d = test_boxes_4d.area() + areas_5d = test_boxes_5d.area() + assert torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5) + # After clip + test_boxes_4d.clip(image_size) + test_boxes_5d.clip(image_size) + areas_4d = test_boxes_4d.area() + areas_5d = test_boxes_5d.area() + assert torch.allclose(areas_4d, areas_5d, atol=1e-1, rtol=1e-5) + + def test_clip_area_arbitrary_angle(self): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) + clip_angle_threshold = random.uniform(0, 180) + + image_size = (500, 600) + test_boxes_5d = RotatedBoxes(boxes_5d) + # Before clip + areas_before = test_boxes_5d.area() + # After clip + test_boxes_5d.clip(image_size, clip_angle_threshold) + areas_diff = test_boxes_5d.area() - areas_before + + # the areas should only decrease after clipping + assert torch.all(areas_diff <= 0) + # whenever the box is clipped (thus the area shrinks), + # the angle for the box must be within the clip_angle_threshold + # Note that the clip function will normalize the angle range + # to be within (-180, 180] + assert torch.all( + torch.abs(boxes_5d[:, 4][torch.where(areas_diff < 0)]) < clip_angle_threshold + ) + + def test_normalize_angles(self): + # torch.manual_seed(0) + for _ in range(50): + num_boxes = 100 + boxes_5d = torch.zeros(num_boxes, 5) + boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-100, 500) + boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, 500) + boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) + rotated_boxes = RotatedBoxes(boxes_5d) + normalized_boxes = rotated_boxes.clone() + normalized_boxes.normalize_angles() + assert torch.all(normalized_boxes.tensor[:, 4] > -180) + assert torch.all(normalized_boxes.tensor[:, 4] <= 180) + # x, y, w, h should not change + assert torch.allclose(boxes_5d[:, :4], normalized_boxes.tensor[:, :4]) + # the cos/sin values of the angles should stay the same + + assert torch.allclose( + torch.cos(boxes_5d[:, 4] * math.pi / 180), + torch.cos(normalized_boxes.tensor[:, 4] * math.pi / 180), + atol=1e-5, + ) + + assert torch.allclose( + torch.sin(boxes_5d[:, 4] * math.pi / 180), + torch.sin(normalized_boxes.tensor[:, 4] * math.pi / 180), + atol=1e-5, + ) + + def test_pairwise_iou_0_degree_cpu(self): + device = torch.device("cpu") + boxes1 = torch.tensor( + [[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor( + [ + [0.5, 0.5, 1.0, 1.0, 0.0], + [0.25, 0.5, 0.5, 1.0, 0.0], + [0.5, 0.25, 1.0, 0.5, 0.0], + [0.25, 0.25, 0.5, 0.5, 0.0], + [0.75, 0.75, 0.5, 0.5, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + ], + dtype=torch.float32, + device=device, + ) + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ], + dtype=torch.float32, + device=device, + ) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_pairwise_iou_0_degree_cuda(self): + device = torch.device("cuda") + boxes1 = torch.tensor( + [[0.5, 0.5, 1.0, 1.0, 0.0], [0.5, 0.5, 1.0, 1.0, 0.0]], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor( + [ + [0.5, 0.5, 1.0, 1.0, 0.0], + [0.25, 0.5, 0.5, 1.0, 0.0], + [0.5, 0.25, 1.0, 0.5, 0.0], + [0.25, 0.25, 0.5, 0.5, 0.0], + [0.75, 0.75, 0.5, 0.5, 0.0], + [1.0, 1.0, 1.0, 1.0, 0.0], + ], + dtype=torch.float32, + device=device, + ) + expected_ious = torch.tensor( + [ + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + [1.0, 0.5, 0.5, 0.25, 0.25, 0.25 / (2 - 0.25)], + ], + dtype=torch.float32, + device=device, + ) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + def test_pairwise_iou_45_degrees_cpu(self): + device = torch.device("cpu") + boxes1 = torch.tensor( + [ + [1, 1, math.sqrt(2), math.sqrt(2), 45], + [1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45], + ], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device) + expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_pairwise_iou_45_degrees_cuda(self): + device = torch.device("cuda") + boxes1 = torch.tensor( + [ + [1, 1, math.sqrt(2), math.sqrt(2), 45], + [1, 1, 2 * math.sqrt(2), 2 * math.sqrt(2), -45], + ], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor([[1, 1, 2, 2, 0]], dtype=torch.float32, device=device) + expected_ious = torch.tensor([[0.5], [0.5]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + def test_pairwise_iou_orthogonal_cpu(self): + device = torch.device("cpu") + boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device) + boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device) + iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0) + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_pairwise_iou_orthogonal_cuda(self): + device = torch.device("cuda") + boxes1 = torch.tensor([[5, 5, 10, 6, 55]], dtype=torch.float32, device=device) + boxes2 = torch.tensor([[5, 5, 10, 6, -35]], dtype=torch.float32, device=device) + iou = (6.0 * 6.0) / (6.0 * 6.0 + 4.0 * 6.0 + 4.0 * 6.0) + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + def test_pairwise_iou_large_close_boxes_cpu(self): + device = torch.device("cpu") + boxes1 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]], + dtype=torch.float32, + device=device, + ) + iou = 364.259155 / 364.259186 + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_pairwise_iou_large_close_boxes_cuda(self): + device = torch.device("cuda") + boxes1 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259186, 27.1828]], + dtype=torch.float32, + device=device, + ) + boxes2 = torch.tensor( + [[299.500000, 417.370422, 600.000000, 364.259155, 27.1828]], + dtype=torch.float32, + device=device, + ) + iou = 364.259155 / 364.259186 + expected_ious = torch.tensor([[iou]], dtype=torch.float32, device=device) + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + def test_pairwise_iou_many_boxes_cpu(self): + device = torch.device("cpu") + num_boxes1 = 100 + num_boxes2 = 200 + boxes1 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32, device=device + ) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], + dtype=torch.float32, + device=device, + ) + for i in range(num_boxes2) + ] + ) + expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device) + for i in range(min(num_boxes1, num_boxes2)): + expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0 + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_pairwise_iou_many_boxes_cuda(self): + device = torch.device("cuda") + num_boxes1 = 100 + num_boxes2 = 200 + boxes1 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32, device=device + ) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], + dtype=torch.float32, + device=device, + ) + for i in range(num_boxes2) + ] + ) + expected_ious = torch.zeros(num_boxes1, num_boxes2, dtype=torch.float32, device=device) + for i in range(min(num_boxes1, num_boxes2)): + expected_ious[i][i] = (1 + 9 * i / num_boxes2) / 10.0 + ious = pairwise_iou(RotatedBoxes(boxes1), RotatedBoxes(boxes2)) + assert torch.allclose(ious, expected_ious) + + +def benchmark_rotated_iou(): + num_boxes1 = 200 + num_boxes2 = 500 + boxes1 = torch.stack( + [ + torch.tensor([5 + 20 * i, 5 + 20 * i, 10, 10, 0], dtype=torch.float32) + for i in range(num_boxes1) + ] + ) + boxes2 = torch.stack( + [ + torch.tensor( + [5 + 20 * i, 5 + 20 * i, 10, 1 + 9 * i / num_boxes2, 0], dtype=torch.float32 + ) + for i in range(num_boxes2) + ] + ) + + def func(dev, n=1): + b1 = boxes1.to(device=dev) + b2 = boxes2.to(device=dev) + + def bench(): + for _ in range(n): + pairwise_iou_rotated(b1, b2) + if dev.type == "cuda": + torch.cuda.synchronize() + + return bench + + # only run it once per timed loop, since it's slow + args = [{"dev": torch.device("cpu"), "n": 1}] + if torch.cuda.is_available(): + args.append({"dev": torch.device("cuda"), "n": 10}) + + benchmark(func, "rotated_iou", args, warmup_iters=3) + + +if __name__ == "__main__": + unittest.main() + benchmark_rotated_iou() diff --git a/tests/test_rpn.py b/tests/test_rpn.py new file mode 100644 index 0000000000..e014100d92 --- /dev/null +++ b/tests/test_rpn.py @@ -0,0 +1,206 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import unittest +import torch + +from detectron2.config import get_cfg +from detectron2.modeling.backbone import build_backbone +from detectron2.modeling.proposal_generator.build import build_proposal_generator +from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes +from detectron2.utils.events import EventStorage + +logger = logging.getLogger(__name__) + + +class RPNTest(unittest.TestCase): + def test_rpn(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RPN" + cfg.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator" + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1) + backbone = build_backbone(cfg) + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + image_shape = (15, 15) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) + gt_instances = Instances(image_shape) + gt_instances.gt_boxes = Boxes(gt_boxes) + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator(images, features, gt_instances) + + expected_losses = { + "loss_rpn_cls": torch.tensor(0.0804563984), + "loss_rpn_loc": torch.tensor(0.0990132466), + } + for name in expected_losses.keys(): + assert torch.allclose(proposal_losses[name], expected_losses[name]) + + expected_proposal_boxes = [ + Boxes(torch.tensor([[0, 0, 10, 10], [7.3365392685, 0, 10, 10]])), + Boxes( + torch.tensor( + [ + [0, 0, 30, 20], + [0, 0, 16.7862777710, 13.1362524033], + [0, 0, 30, 13.3173446655], + [0, 0, 10.8602609634, 20], + [7.7165775299, 0, 27.3875980377, 20], + ] + ) + ), + ] + + expected_objectness_logits = [ + torch.tensor([0.1225359365, -0.0133192837]), + torch.tensor([0.1415634006, 0.0989848152, 0.0565387346, -0.0072308783, -0.0428492837]), + ] + + for i in range(len(image_sizes)): + assert len(proposals[i]) == len(expected_proposal_boxes[i]) + assert proposals[i].image_size == (image_sizes[i][0], image_sizes[i][1]) + assert torch.allclose( + proposals[i].proposal_boxes.tensor, expected_proposal_boxes[i].tensor + ) + assert torch.allclose(proposals[i].objectness_logits, expected_objectness_logits[i]) + + def test_rrpn(self): + torch.manual_seed(121) + cfg = get_cfg() + cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN" + cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator" + cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] + cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]] + cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]] + cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) + cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead" + backbone = build_backbone(cfg) + proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) + num_images = 2 + images_tensor = torch.rand(num_images, 20, 30) + image_sizes = [(10, 10), (20, 30)] + images = ImageList(images_tensor, image_sizes) + image_shape = (15, 15) + num_channels = 1024 + features = {"res4": torch.rand(num_images, num_channels, 1, 2)} + gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32) + gt_instances = Instances(image_shape) + gt_instances.gt_boxes = RotatedBoxes(gt_boxes) + with EventStorage(): # capture events in a new storage to discard them + proposals, proposal_losses = proposal_generator(images, features, gt_instances) + + expected_losses = { + "loss_rpn_cls": torch.tensor(0.0432923734), + "loss_rpn_loc": torch.tensor(0.1552739739), + } + for name in expected_losses.keys(): + assert torch.allclose(proposal_losses[name], expected_losses[name]) + + expected_proposal_boxes = [ + RotatedBoxes( + torch.tensor( + [ + [0.60189795, 1.24095452, 61.98131943, 18.03621292, -4.07244873], + [15.64940453, 1.69624567, 59.59749603, 16.34339333, 2.62692475], + [-3.02982378, -2.69752932, 67.90952301, 59.62455750, 59.97010040], + [16.71863365, 1.98309708, 35.61507797, 32.81484985, 62.92267227], + [0.49432933, -7.92979717, 67.77606201, 62.93098450, -1.85656738], + [8.00880814, 1.36017394, 121.81007385, 32.74150467, 50.44297409], + [16.44299889, -4.82221127, 63.39775848, 61.22503662, 54.12270737], + [5.00000000, 5.00000000, 10.00000000, 10.00000000, -0.76943970], + [17.64130402, -0.98095351, 61.40377808, 16.28918839, 55.53118134], + [0.13016054, 4.60568953, 35.80157471, 32.30180359, 62.52872086], + [-4.26460743, 0.39604485, 124.30079651, 31.84611320, -1.58203125], + [7.52815342, -0.91636634, 62.39784622, 15.45565224, 60.79549789], + ] + ) + ), + RotatedBoxes( + torch.tensor( + [ + [0.07734215, 0.81635046, 65.33510590, 17.34688377, -1.51821899], + [-3.41833067, -3.11320257, 64.17595673, 60.55617905, 58.27033234], + [20.67383385, -6.16561556, 63.60531998, 62.52315903, 54.85546494], + [15.00000000, 10.00000000, 30.00000000, 20.00000000, -0.18218994], + [9.22646523, -6.84775209, 62.09895706, 65.46472931, -2.74307251], + [15.00000000, 4.93451595, 30.00000000, 9.86903191, -0.60272217], + [8.88342094, 2.65560246, 120.95362854, 32.45022202, 55.75970078], + [16.39088631, 2.33887148, 34.78761292, 35.61492920, 60.81977463], + [9.78298569, 10.00000000, 19.56597137, 20.00000000, -0.86660767], + [1.28576660, 5.49873352, 34.93610382, 33.22600174, 60.51599884], + [17.58912468, -1.63270092, 62.96052551, 16.45713997, 52.91245270], + [5.64749718, -1.90428460, 62.37649155, 16.19474792, 61.09543991], + [0.82255805, 2.34931135, 118.83985901, 32.83671188, 56.50753784], + [-5.33874989, 1.64404404, 125.28501892, 33.35424042, -2.80731201], + ] + ) + ), + ] + + expected_objectness_logits = [ + torch.tensor( + [ + 0.10111768, + 0.09112845, + 0.08466332, + 0.07589971, + 0.06650183, + 0.06350251, + 0.04299347, + 0.01864817, + 0.00986163, + 0.00078543, + -0.04573630, + -0.04799230, + ] + ), + torch.tensor( + [ + 0.11373727, + 0.09377633, + 0.05281663, + 0.05143715, + 0.04040275, + 0.03250912, + 0.01307789, + 0.01177734, + 0.00038105, + -0.00540255, + -0.01194804, + -0.01461012, + -0.03061717, + -0.03599222, + ] + ), + ] + + torch.set_printoptions(precision=8, sci_mode=False) + + for i in range(len(image_sizes)): + assert len(proposals[i]) == len(expected_proposal_boxes[i]) + assert proposals[i].image_size == (image_sizes[i][0], image_sizes[i][1]) + # It seems that there's some randomness in the result across different machines: + # This test can be run on a local machine for 100 times with exactly the same result, + # However, a different machine might produce slightly different results, + # thus the atol here. + err_msg = "computed proposal boxes = {}, expected {}".format( + proposals[i].proposal_boxes.tensor, expected_proposal_boxes[i].tensor + ) + assert torch.allclose( + proposals[i].proposal_boxes.tensor, expected_proposal_boxes[i].tensor, atol=1e-5 + ), err_msg + err_msg = "computed objectness logits = {}, expected {}".format( + proposals[i].objectness_logits, expected_objectness_logits[i] + ) + assert torch.allclose( + proposals[i].objectness_logits, expected_objectness_logits[i], atol=1e-5 + ), err_msg + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py new file mode 100644 index 0000000000..0aa35b5628 --- /dev/null +++ b/tests/test_visualizer.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# File: + +import numpy as np +import unittest +import torch + +from detectron2.data import MetadataCatalog +from detectron2.structures import Instances +from detectron2.utils.visualizer import Visualizer + + +class TestVisualizer(unittest.TestCase): + def _random_data(self): + H, W = 100, 100 + N = 10 + img = np.random.rand(H, W, 3) * 255 + boxxy = np.random.rand(N, 2) * (H // 2) + boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1) + + def _rand_poly(): + return np.random.rand(3, 2).flatten() * H + + polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)] + + mask = np.zeros_like(img[:, :, 0], dtype=np.bool) + mask[:10, 10:20] = 1 + + labels = [str(i) for i in range(N)] + return img, boxes, labels, polygons, [mask] * N + + @property + def metadata(self): + return MetadataCatalog.get("coco_2017_train") + + def test_overlay_instances(self): + img, boxes, labels, polygons, masks = self._random_data() + + v = Visualizer(img, self.metadata) + output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape, img.shape) + + # Test 2x scaling + v = Visualizer(img, self.metadata, scale=2.0) + output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape[0], img.shape[0] * 2) + + # Test overlay masks + v = Visualizer(img, self.metadata) + output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image() + self.assertEqual(output.shape, img.shape) + + def test_overlay_instances_no_boxes(self): + img, boxes, labels, polygons, _ = self._random_data() + v = Visualizer(img, self.metadata) + v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image() + + def test_draw_instance_predictions(self): + img, boxes, _, _, masks = self._random_data() + num_inst = len(boxes) + inst = Instances((img.shape[0], img.shape[1])) + inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) + inst.scores = torch.rand(num_inst) + inst.pred_boxes = torch.from_numpy(boxes) + inst.pred_masks = torch.from_numpy(np.asarray(masks)) + + v = Visualizer(img, self.metadata) + v.draw_instance_predictions(inst) + + def test_correct_output_shape(self): + img = np.random.rand(928, 928, 3) * 255 + v = Visualizer(img, self.metadata) + out = v.output.get_image() + self.assertEqual(out.shape, img.shape) diff --git a/tools/benchmark.py b/tools/benchmark.py new file mode 100755 index 0000000000..7310833a21 --- /dev/null +++ b/tools/benchmark.py @@ -0,0 +1,138 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +A script to benchmark builtin models. +""" + +import itertools +import logging +import torch +import tqdm +from fvcore.common.timer import Timer +from torch.nn.parallel import DistributedDataParallel + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import ( + DatasetFromList, + build_detection_test_loader, + build_detection_train_loader, +) +from detectron2.engine import SimpleTrainer, default_argument_parser, hooks, launch +from detectron2.modeling import build_model +from detectron2.solver import build_optimizer +from detectron2.utils import comm +from detectron2.utils.events import CommonMetricPrinter +from detectron2.utils.logger import setup_logger + +logger = logging.getLogger("detectron2") + + +def setup(args): + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway. + cfg.merge_from_list(args.opts) + cfg.freeze() + setup_logger(distributed_rank=comm.get_rank()) + return cfg + + +def benchmark_data(args): + cfg = setup(args) + + dataloader = build_detection_train_loader(cfg) + + itr = iter(dataloader) + for _ in range(10): # warmup + next(itr) + timer = Timer() + max_iter = 1000 + for _ in tqdm.trange(max_iter): + next(itr) + logger.info( + "{} iters ({} images) in {} seconds.".format( + max_iter, max_iter * cfg.SOLVER.IMS_PER_BATCH, timer.seconds() + ) + ) + + +def benchmark_train(args): + cfg = setup(args) + model = build_model(cfg) + logger.info("Model:\n{}".format(model)) + if comm.get_world_size() > 1: + model = DistributedDataParallel( + model, device_ids=[comm.get_local_rank()], broadcast_buffers=False + ) + optimizer = build_optimizer(cfg, model) + checkpointer = DetectionCheckpointer(model, optimizer=optimizer) + checkpointer.load(cfg.MODEL.WEIGHTS) + + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 + data_loader = build_detection_train_loader(cfg) + dummy_data = list(itertools.islice(data_loader, 100)) + + def f(): + while True: + yield from DatasetFromList(dummy_data, copy=False) + + max_iter = 400 + trainer = SimpleTrainer(model, f(), optimizer) + trainer.register_hooks( + [hooks.IterationTimer(), hooks.PeriodicWriter([CommonMetricPrinter(max_iter)])] + ) + trainer.train(1, max_iter) + + +@torch.no_grad() +def benchmark_eval(args): + cfg = setup(args) + model = build_model(cfg) + model.eval() + logger.info("Model:\n{}".format(model)) + DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) + + cfg.defrost() + cfg.DATALOADER.NUM_WORKERS = 0 + data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) + dummy_data = list(itertools.islice(data_loader, 100)) + + def f(): + while True: + yield from DatasetFromList(dummy_data, copy=False) + + for _ in range(5): # warmup + model(dummy_data[0]) + + max_iter = 400 + timer = Timer() + with tqdm.tqdm(total=max_iter) as pbar: + for idx, d in enumerate(f()): + if idx == max_iter: + break + model(d) + pbar.update() + logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds())) + + +if __name__ == "__main__": + parser = default_argument_parser() + parser.add_argument("--task", choices=["train", "eval", "data"], required=True) + args = parser.parse_args() + assert not args.eval_only and not args.resume + + if args.task == "data": + f = benchmark_data + elif args.task == "train": + """ + Note: training speed may not be representative. + The training cost of a R-CNN model varies with the content of the data + and the quality of the model. + """ + f = benchmark_train + elif args.task == "eval": + f = benchmark_eval + # only benchmark single-GPU inference. + assert args.num_gpus == 1 and args.num_machines == 1 + launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args,)) diff --git a/tools/train_net.py b/tools/train_net.py new file mode 100755 index 0000000000..642cbc4888 --- /dev/null +++ b/tools/train_net.py @@ -0,0 +1,155 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Detection Training Script. + +This scripts reads a given config file and runs the training. +It is an entry point that is made to train standard models in detectron2. + +In order to let one script support training of many models, +this script contains logic that are specific to these built-in models and therefore +may not be suitable for your own project. +For example, your research project perhaps only needs a single "evaluator". + +Therefore, we recommend you to use detectron2 as an library and take +this file as an example of how to use the library. +You may want to write your own script with your datasets and other customizations. +""" + +import logging +import os +from collections import OrderedDict +import torch + +import detectron2.utils.comm as comm +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch +from detectron2.evaluation import ( + CityscapesEvaluator, + COCOEvaluator, + COCOPanopticEvaluator, + DatasetEvaluators, + LVISEvaluator, + PascalVOCDetectionEvaluator, + SemSegEvaluator, + verify_results, +) +from detectron2.modeling import GeneralizedRCNNWithTTA + + +class Trainer(DefaultTrainer): + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + evaluator_list = [] + evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type + if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: + evaluator_list.append( + SemSegEvaluator( + dataset_name, + distributed=True, + num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, + ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, + output_dir=output_folder, + ) + ) + if evaluator_type in ["coco", "coco_panoptic_seg"]: + evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder)) + if evaluator_type == "coco_panoptic_seg": + evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) + if evaluator_type == "cityscapes": + assert ( + torch.cuda.device_count() >= comm.get_rank() + ), "CityscapesEvaluator currently do not work with multiple machines." + return CityscapesEvaluator(dataset_name) + if evaluator_type == "pascal_voc": + return PascalVOCDetectionEvaluator(dataset_name) + if evaluator_type == "lvis": + return LVISEvaluator(dataset_name, cfg, True, output_folder) + if len(evaluator_list) == 0: + raise NotImplementedError( + "no Evaluator for the dataset {} with the type {}".format( + dataset_name, evaluator_type + ) + ) + if len(evaluator_list) == 1: + return evaluator_list[0] + return DatasetEvaluators(evaluator_list) + + @classmethod + def test_with_TTA(cls, cfg, model): + logger = logging.getLogger("detectron2.trainer") + # In the end of training, run an evaluation with TTA + # Only support some R-CNN models. + logger.info("Running inference with test-time augmentation ...") + model = GeneralizedRCNNWithTTA(cfg, model) + evaluators = [ + cls.build_evaluator( + cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") + ) + for name in cfg.DATASETS.TEST + ] + res = cls.test(cfg, model, evaluators) + res = OrderedDict({k + "_TTA": v for k, v in res.items()}) + return res + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume + ) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + if cfg.TEST.AUG.ENABLED: + res.update(Trainer.test_with_TTA(cfg, model)) + return res + + """ + If you'd like to do anything fancier than the standard training logic, + consider writing your own training loop or subclassing the trainer. + """ + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + if cfg.TEST.AUG.ENABLED: + trainer.register_hooks( + [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))] + ) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/tools/visualize_coco_results.py b/tools/visualize_coco_results.py new file mode 100755 index 0000000000..6101081589 --- /dev/null +++ b/tools/visualize_coco_results.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import argparse +import json +import numpy as np +import os +from collections import defaultdict +import cv2 +import tqdm +from fvcore.common.file_io import PathManager + +from detectron2.data import DatasetCatalog, MetadataCatalog +from detectron2.structures import Boxes, BoxMode, Instances +from detectron2.utils.logger import setup_logger +from detectron2.utils.visualizer import Visualizer + + +def create_instances(predictions, image_size): + ret = Instances(image_size) + + score = np.asarray([x["score"] for x in predictions]) + chosen = (score > args.conf_threshold).nonzero()[0] + score = score[chosen] + bbox = np.asarray([predictions[i]["bbox"] for i in chosen]) + bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) + + labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen]) + + ret.scores = score + ret.pred_boxes = Boxes(bbox) + ret.pred_classes = labels + + try: + ret.pred_masks = [predictions[i]["segmentation"] for i in chosen] + except KeyError: + pass + return ret + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input", required=True, help="JSON file produced by the model") + parser.add_argument("--output", required=True, help="output directory") + parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") + parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") + args = parser.parse_args() + + logger = setup_logger() + + with PathManager.open(args.input, "r") as f: + predictions = json.load(f) + + pred_by_image = defaultdict(list) + for p in predictions: + pred_by_image[p["image_id"]].append(p) + + dicts = list(DatasetCatalog.get(args.dataset)) + metadata = MetadataCatalog.get(args.dataset) + if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): + + def dataset_id_map(ds_id): + return metadata.thing_dataset_id_to_contiguous_id[ds_id] + + elif "lvis" in args.dataset: + # LVIS results are in the same format as COCO results, but have a different + # mapping from dataset category id to contiguous category id in [0, #categories - 1] + def dataset_id_map(ds_id): + return ds_id - 1 + + else: + raise ValueError("Unsupported dataset: {}".format(args.dataset)) + + os.makedirs(args.output, exist_ok=True) + + for dic in tqdm.tqdm(dicts): + img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1] + basename = os.path.basename(dic["file_name"]) + + predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2]) + vis = Visualizer(img, metadata) + vis_pred = vis.draw_instance_predictions(predictions).get_image() + + vis = Visualizer(img, metadata) + vis_gt = vis.draw_dataset_dict(dic).get_image() + + concat = np.concatenate((vis_pred, vis_gt), axis=1) + cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1]) diff --git a/tools/visualize_data.py b/tools/visualize_data.py new file mode 100755 index 0000000000..8ae8253e23 --- /dev/null +++ b/tools/visualize_data.py @@ -0,0 +1,99 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +import numpy as np +import os +from itertools import chain +import cv2 +from PIL import Image + +from detectron2.config import get_cfg +from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader +from detectron2.data import detection_utils as utils +from detectron2.data.build import filter_images_with_few_keypoints +from detectron2.utils.logger import setup_logger +from detectron2.utils.visualizer import Visualizer + + +def setup(args): + cfg = get_cfg() + if args.config_file: + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + return cfg + + +def parse_args(in_args=None): + parser = argparse.ArgumentParser(description="Visualizes Ground-truth Dataset") + parser.add_argument( + "--source", + choices=["annotation", "dataloader"], + required=True, + help="visualize the annotations or the data loader (with pre-processing)", + ) + parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") + parser.add_argument("--output-dir", default="./", help="path to output directory") + parser.add_argument("--show", action="store_true", help="show output in a window") + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + return parser.parse_args(in_args) + + +if __name__ == "__main__": + """ + General utility to visualize ground truth dataset. + """ + args = parse_args() + logger = setup_logger() + logger.info("Arguments: " + str(args)) + cfg = setup(args) + + dirname = args.output_dir + os.makedirs(dirname, exist_ok=True) + metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) + + def output(vis, fname): + if args.show: + print(fname) + cv2.imshow("window", vis.get_image()[:, :, ::-1]) + cv2.waitKey() + else: + filepath = os.path.join(dirname, fname) + print("Saving to {} ...".format(filepath)) + vis.save(filepath) + + scale = 2.0 if args.show else 1.0 + if args.source == "dataloader": + train_data_loader = build_detection_train_loader(cfg) + for batch in train_data_loader: + for per_image in batch: + # Pytorch tensor is in (C, H, W) format + img = per_image["image"].permute(1, 2, 0) + if cfg.INPUT.FORMAT == "BGR": + img = img[:, :, [2, 1, 0]] + else: + img = np.asarray(Image.fromarray(img, mode=cfg.INPUT.FORMAT).convert("RGB")) + + visualizer = Visualizer(img, metadata=metadata, scale=scale) + target_fields = per_image["instances"].get_fields() + labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]] + vis = visualizer.overlay_instances( + labels=labels, + boxes=target_fields.get("gt_boxes", None), + masks=target_fields.get("gt_masks", None), + keypoints=target_fields.get("gt_keypoints", None), + ) + output(vis, str(per_image["image_id"]) + ".jpg") + else: + dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN])) + if cfg.MODEL.KEYPOINT_ON: + dicts = filter_images_with_few_keypoints(dicts, 1) + for dic in dicts: + img = utils.read_image(dic["file_name"], "RGB") + visualizer = Visualizer(img, metadata=metadata, scale=scale) + vis = visualizer.draw_dataset_dict(dic) + output(vis, os.path.basename(dic["file_name"]))

H* zI?zUpx4DS%RZMBL?#oP+akG%hpK5C3Qp$wpK=Z*CIjseX&R8zl;;g^_1=K#bigC~3 z`LZIR@=({Q8iCZEcdriFv8=+XIX@ot^bu?!(8`N&?@d6UNf)A10!e-bn9`sEcM+ZX z=GmwZV8AEZd%oJ>k57>?7+``ZmW$ z?4S8uH572L${f zREhJ=Bq@VJc&SbAqTenV88AlcE~bajXS7?k=hb~!1+jJak@oW}EflUN9}rO>x~n?4 zoj|PiDI~ANs3Em?`=Q4r$;Iw7iML5oTr>|3criqq2m{0lJk zZrGibLFuzqA}JB^A;6Q+d55vQ--K43HtV%TXM86ffEr1@dZLvlUQKMoSs_%O&Cn3W zKzk`e_g;8o9=Up)r)#~=(4c%HSe|$t$)EtKo!NbrTuO9ayCAuxV+}HHYR$Sow!OlG z$c1Kt9=~h!Ew>kiToqQDoUXed&rUZ6;m6nhq(DlcC5WtAP5`jrH?C7rP4N#o8bPpw z#ySs}f6?%7qn!QZ5sVf2t43*9KwwD+Hmy(B4bCSTr^QToN`Bxong^I_2~l`RWgY+* zW3=iAwQPWt*175FU+mCb1Bhse>W6}HmH(I$jE6pzcrO+6uxyF@$RY~-doLMW!O~jL zC*C48Qg=6oQ!wTH$lmD+zOPgAsJo~O$`o2YLh*WaA=*iJ*0W@!(!k{g&pTeBr|%kN zODVz5&Gado9A=={KZ8%ONfKkc|M;eHgN}{c5Efk*TU`E`c6RgfmJSJVXz7+_=EK0X^$YY_q_H>4Ysec*YHv04##8@5$ z2?rL5FI0^VASKIyBpZ>*^XCIT=lsvn)S8xh$5q@5A3pzx%Z$Us@zrbHiEEXsWnz^V zrqWA-`z;ahOMd4h4SLq5^h7{t!b+N5;6(+9P*r(`@8y-G!G4Z#q1f}sNgmwn1yv=c zI1-Ff3_*pJ<2`A<#z7Rn$0?tdd&o!?QG~9Bi{Gs7E$BqJiuWrWd$BY1L8-E@yFGz* zny$5^&!blIO#%3N#3gYsXp#X zPO4-v2`-Lj$hK}Bm&uC>Z|0~Ka){A+SWSJc~PX9FTa&4@WNQB zVYa~l`;&Tm{n2amZHA6XC2MhE%N^*> zUkVes>7T(9v!vWq%KvKtIc|`RQ})z#h*<_YM1RCD94Y@#mAjuAJ7ChM_hXDtuumN~HEn&q;N*~Yc`?>-*V&a+hyaB~5?LZIKHCb# z_a!5Cd`I<7Q~^+&v*4+3!V*%>;0Mz;J4!OpT6Uzy(P&BU#!pZC)iPU!^`bhl-@=n` z#+w&uXZ8$_4-oM=N)*hP6g6=l#Ij;9%Q2pm?|e1_ljC<=AQfZ^T3F;X!OYG%9N9Sz zPoqB|!2ag>MVd}OBNL@r$$c#SeT88bU@pZHif&C~@EQD<8dS0ZmAEUXL$UvIpVY{~ z+_9fJ(4d~q}Q}G zR-8!{aN%fe#@p`Os)QFTt|*8kbArMsYc!>h^@JXaA#m0qc(H4vvx~Z?0QI5fEHiVb zNngc(Y!U_BmRO-#+3_&u%}QO`I~zW@Xt8FeV#kMw{0}gdJO+&XX+I;UeJUDQC1F2% zZcaC1&6gl*>4NtkAn@44(NWg`8l|bzG`s{qGx-_O(5Fa(&Uj3@MGBO7$li1|e_V7k z5*$;hPZi4$`nt4@94qm5!`N^zZ;>AMuge*i1qvjLIlAwe;SV0=VoYOkymP!6&Swad z@IRlRMEewCh^F+v_Z_80)-#u4)7moT+jh9+v5*qQPCQTYe3#LN2*MEwmb@2Yt-eC_ z6wCyiOzO{Zp-B{*D<%4OJ4Xga#{`@AO9qB&`d&2e|7k4z`);si#kaPPQ23+3u?C#! zAc0^6Gyd`fK>q_&k*fDkB=Xl9ZYlL(Lg-h4QcXx}5M>iBTB zzN$~nCi&2@CjS#PyvE%ybzg_@Ea4gg{!vv%_x(5g=|j|ob_3K-e{i395Xp-T^tlfl z@LJe>(Dc2$)6t#bn*}UFI5Ill4)E|Wq^1yw*$4!66WNa%jsN>Vq^5Y?=yhybIH#9i zyMy9Eu)3JH{p7SWRnbROk8DpF9y*rXg@>zvj;yF*6W2A?KEzXFkd2Pj7UQztZEk#g zvs8tbWfslLWkM0FFC=%D8@dYIR)iz3f|LK$i21ns|BF4_=uGSBWc-mVZiE*Urw+=_ z_m91*8aX!-t11N1z^2Y)qwe;xbnfkoSaWV%ojUlS&$1_&uv5KJ<0y*-bG5Epc{&Gw ziIEuFVKC3w+MkKUd2zZ52jLNwS8d?OXC=w?V;k)15lpk=eX+&84V1e!Ou}N~;4SlO zQCy^y_?~LtOx)R2Z7YXrX@l7n^o@$cp|T=wQ26LQljG5%#SOMlvHfc!o#k2D3Eq4g ziuX*|qNcwsoANgvvN|3In&nWv9gji6P%TzPyadml1CBle$lcpdw&sb|J%F6mC)gLN zzi~b^rcPPMIx$0tjYV*B%CK>r=xVb1j_o!ElHKv+Ch%clo;Y%lFlyz6_=m+BVWvMA z|4;(>9xE~ZDdjHWWikgzV-f00z9EJ28?qw7Nk;V)1 zU+aGQ!j-%~9j8|^-+ZmHl-@#Fx;>KKZ?eG3ExYga6R-vHm_G2}H@f+~`5*L%(c zN7^(hvxSAl*W_N|r8&MyEp%ISe7=BgMK@L8F>&>6?<Fa^f3SnO zKZ>#}BO?7CjFfo^q8TE-H2iw);dQxt$jRHetJ|^ohmN!tr6p%OfCtrOV8DQOM7j)p zH?bH1z`IVuj$~0z(p{wDz*jtCr|~>)YHdzEb7pP(w0@ zq41lgl26QXk$0KJby3`*9WoWnH?iZ@o{*IyYPm`T6Ai3)RT=Kc?`Jvq>qMTo74RS6 z>`R^H$~!#@2Om|i9~k9sHU`63WXDTiQeFrqKCq&ejFW3C6oG#$=p`Q_Nqm0utbfkI z$wq{)Yf!c@&Fwl!uB8*Nku>L?xL29(3KAJuCQW1D&eJ5tLn2<;%f4IpoPrI$eR6q? zdY!Ly!FPQkMu~N(@N3&GyPm)MQ>o>Fj}~gAwn71a^fP zMsgA?GG?MsVAToh!lgu8`Oh{pIB7#~=j*#DL@EnT3INVRBesA@*_NkGYfw5SK*W6& z7mUc<`@Ce~?|WOhMpV|zVxjh^x8Nv+N+oR%%k94vb>)~ockaPMucSq&ht5e-X3cnnH<5%7I+;LxTnmT-7`gpo_Ek9Sza6@~nW7ufoY6_%U}einbQ#xdT|X z>-a07@KOmF|Iz+;^`mqlCNW3QzeW)w>d?%y&lThkG3AQ|JG$cB2F4Xb^wB|8QWE*n ztCqk9G{VjbEIy<0e98EOLmjM&Mf&ux<+&v$F+Is5K^2eDSxDp&CCNJIQ&(88&=d7S4`Y_>*C2&zfM3iX)}WN5G0s0gFTw2H?;4Lx zkej$PyP+vN|3}zi=nX^bIn?EU^du#~NOP->4=CWxRru=o9YJ(;V>eN0s6q#B%)_S1)=xnn{Kg zuz`UE4I|_whbtEN#GI;fWc1wS~yC@rX#JQ{!7;8h1Zf~n3>{!?xl!O@8 z;dCXsg>zo^IG%c%-@i%-55Rp?R+5w6{&IaWCaB2I(NQ7U1eens!#te1wDm)S4vVGRd>I^jt&PyE`uMEKM~$!Nm4x0- z2tp>>P43 zn0pKb@R_Fn2Uwon&M=J%cpD()Poj#^+i#@$Brj&e7;aNnr>sU= z(FH)1rc2LNN#8(!zuRcYuv4M;^kuPy+)CN-@TrP}ijKBO3af~!$$Oh97MI&6uwjG; z^C1g{Q^P&ah2912Klq05l>Ua|bA&ZL8RRS9KWZGov0_(6&Nsq;;%gI@7oQBDEMAtA zoxgXIwEYKwY>18aIloXG;d~8zFioa21sy)T7*K~WCDFXEXta%NP=E51!h}os$1m?} zUMkbKAo&kD(Xo=>Y9t63CF-WyhE(6hAMpxa^<_(2>Gs;(0KYroPpu=ha^v^%e<3v4Zo zeSO~|#pUQ=!>4D=CzUxkgs38nW+wdQZ~T5$Qd;V-VdE`PS<3D%;Wc4sbd&Jyn)fQg zL~Xw1_S<)qV-}`SJk11y`0wy^^AY{e&t9$WwPi4^T3PZyEFJxr(?|M|*+XH)^j1Du%8p=^*PU z?AS5^qB3(KNxh`l0A!(^K3(s*LySrvcRP8$Le~PMEC-7eB7#^0A4!q8TO?E^Nln(tEL8ZO=ji z<*KR?_uD#)COOM6t$hbw(l8?MhXp;VGg1r2w-sjU4am zFhKSw(dX9buO~PPG1fQuI(p`xG09qyU8T?uUp2J5pKjQkAIi)Fp4yX6y|kK7#I7Yc z4vbjfMGX213Eb})YTrx9iCT2N394)n!=K;#iFUjq#lNxh(X~5^Q(E{iexuUBnTzi| z(=gob^4Ik_>`?004b1yg#k@w+Bj6?Mqef)FVvl>pK44!2p$U}du%}&5X{e#37O-yEN0Q@h^Tf! zPw@P6bMYcIS?Fk>4-E65j8UAF77cg?H>JT3YCa+uray46$G#Q7p9uVsbCUO51%w3l zUN)vo1ItDX1gN9lm_>&cB#0s%)Y`cJ0Y3OCpXc;%qK|M&zTMJm7wT=zUz5=lQ6iv9 z{tuuW9N(Jof%a3NhPN}e@hBZg0zZAad5bIs;z${OMAvDL43^L9)$b4!JU#IYP+J+d z@9i`B2e0&gv@Krkcq+czunC0T!Fe%K`c8lOw^SGm%>Xn4dx7toI(fE<$1iRv>$E-7 z0~o~m8~X$jUxcpcLjD7wA((@?g|8RHQh)S2xcmojlXv(2K>ojmsEco@iiPp$gAHPU zmh{a}9k3e9&iI)w?em=8s@_UsO1`M^jH=Shz-0S{<&_s|eR?h2S}0u+uh(SPy{=UR zoGJii;4OJ<_?klC>Fm_gn+t6uxBXSiphMC8qkY6C@{eqq(R%;&w)btV03FTY@hQWd zo%08tFbI=N%!fDck143=P-C5sL3fb>bWfV}Ce~$rV&Uj%KhNua4!0-Db;{U2!EQ)W zx`1PY;z2&{RF;GP3b^l_|9Dw_-Q5jwEk^a-@(;tNG%U~>G#*r7%MatFxfHx>};8#dvbZ90G zF;mZ3cdyxmMrsuHI=wH^HYpXUGpfJXD8m8GX2%JuRAXnJ(tzsJO5dJN-k%k zpe+}b8tTN#q$r`r{#PHsmtHoSm?gx)o_zgoj^MSkQlWJ8+{yQSFyfj>)Eo8+{TXw! z0!_#CcacDcwsh;WEylP)ym|q#-eS_UG%qAhPl&|PldYidI|Uf#+;8YK%D6SY>!~z;R{{O&|nZK zzjLqHX}&LHQ(Y-v@(xJyX8JQJh~lh4MtFMB=Q~5sq|+76`|oDNw`CmBB2;HljlavW zGQU+W6kD{}gtP${TJt%^uKk7PFq*ap=LI7)bXPZm`^*&#rr%(?EZ>s&25`qOr>601 zJ8CzN&nYu3yC?U*a>;8o=Il{8cn$kR8!noOxSD0um*Pt1(V$d(oU)2dHYl>db7{`a zDB>BFe*NpN)RNjtgbUV8!|KQsR~xI$-F;hDK0&CBaKM!c0cg+oA$}ynV|e>wvR<9H;?A?Tbs!! zEH?Kz#$c%JJ7)Y-=ieLA;T@4V>d~4hwDo@AZG7pE$Ps%ihShcY_^*pq7{%?Lt_@iJ zjJABV@bUG}qG@|zC|FROm=i>!CATSStZSHxRh$|W>F41b;fuY4F275w>$+|l8|l$P zLLRl&Ij{Fe4a>h$ajL~fHDPP=5F`P5*^e+9SOo1{cXV8^=@eKBCC;zQC;dgKJ5wW> zKdgkF(1q2mtwU0sxotVCk3UL_;u+dL!j5&_RH@<#P=^nDsPsmp(5+axw=ng1Yvwce z7xJI%kJlIk*j!%&Ejy(al@7$yi!FK5j>~K>Bkx7jxhfsuJK1FxQX)BN2e}y$hb>{X za95w%^I^fJS37zrm#;S;bc|8dOa$h#9tl-ku0-LiT`(N=hnq>H4$V!uO|Vqh#}nqm zY|*&ejh7!?lN|(n@#y*^T>35l4N%FBN#oR)?!ca)GWAub8er)7QL9n;YM(aj)D7u$ zqPwaz$Xs94lOJ*zvumkhEMIhT?D6Pc!v{kgZNotBtHQ@Qr}-|A5F(s7#kW7`X6kjt zD}Sk8;%|yw9qzT9N_N;7+q37}q^e_hm{``K0f32WepkdYddy^3S*^ZE&#QH0uaXK_ z2Q=gL;*QAtD3$vPh6HiQ5(9i2x;Ym)uay5&}Jooo%;@sAqP^;p+%MmLqoDe31Sw%2Q687+zuV z_ny^4-u7)h0^8&av)y8!RPEXOjj+TsFa_<66-{%Xp6jo$6A=fI^o!iY(FRuwJ z@%cR1C?>9B!+*GVi40ej1bi5pQUBBJ!9G=rK~+`YXv!^<(P9C5*P76ZQ0n!7eT3}dYn7V<^6)y_MPVXyGRIeKRWJ&&KizK zP}XH%*`)P_HPZVo=EOqtdTpz~&2UV`;gwKtHACg5m?ZRLAXX^FMs+L&v?xhzj)x2g zNCY)5wl|SepjWm__w$dE)thHcN5wzEfR7;k#1e!?3iL#+?2%Y-qNXo((Y-V6Ja&h9 zjr;1;AoaNQyQDo-v~8w*2maonwOcAEUHg+rzoY?U_G==Aw;ChUnIRFV1in!3f2Ogj zy&s+neY<)$`#V|A>${i+$kg8UN?_#`K8?EY3Q$N$YhF>~1c$Wk%_m(ZAj;kiaF>>4 z|H%4BlQ_$_y;g9UlBi?JnsJ9^!Esm|-my-3cYE<1-*dXv>Tb=@!F&b{ed$_^)v}M2 zv7KW!51pl`N_k@GZ&s(RQDZ~0V2DF*)~m7qY#qLpwKJbLM735xwS}162AkB|e%xw4 zC@!U&fVJn3JrxQ$j19Rkjpb@4Je3?3Hdt3%VQE&?XxNJQmsx0;XcVZiV4 zeO=CRdiLv!s(wGhByU@aB*?UWW+3OHd++l&&Gc5WxfH(hd@RP{)g8?qAS^!Bxh*qx zC8i6w9uwJnmS~E#-G#uu`_Y?OFm5kKQ#%dJIX3x?e^YhW+Ml?0r&C9eFB~l=4<3tq zsOakP`#MYoaxjs0O^c<^xu~Fhdv4k{?iyH+h&KXwet-AeihKOmC9o0yO?2q+`4(=L z(sXMna{qU5n}dkHK)VW#1?sxsebJR?e%T4#qziVQ`jr~w;~VQ!o`W_{8NKV^nB@B5 zC17*il(`d`%rXf*HPz|Dss$z1^;N_+d^=@$nGTFh;VR;2W0ehvcTSM>H5;(je={wPf%B5BOqXEbGojlw&i#$vk2va1q@}34isE=ndd% zBa&W~5bnuXPangYhk;T+Bd`+kjyiRofI4XnC}X1)0VF>@s!W$lkUg4t4&z?t7IHs} zB|+mh9_Yo`sLzXbhXI=}6jG-JTcLNbAK$5tB`g1geq7^!_ransJ4sXOFOu<_HNocQ zL5zm|j`u^UZz^1Fs(UZA(me>P38u>VNCy)+)-W1cmg-|KzVn+>bQHU;)Z;ia!a~X2 zYYEaS$;7b)V_UV*mIL>iOPO(Hfeurn)Tlc1F?$8oOy*%UUthm7ba88@(k8WbfL24MHzR4u%G6w&y)C1U@p;xvB@%3Ye3Xw-!KJsj z2^^uqwE4l(pE%K#JxYdID9ZRKQ80vFX_i=j#Tlo9My%-(P$A72WdtqEtdwOBi6V5m zd>{=2&mm+Vc-n!c{Ks5DF9!E^o1L_3rYxY{0J__zxFI)FL1#7gr*IJlRqvJWGUl3 zAB~Zmjtr7wQ!s~wN|cDlt|M*n zVL8^PMy7z_Y$JquW{F)D!k5Xdm`56K2zUUi0iy-caPlowW%0JoWbqScg+v>u*|(4~ea*Y%op08KnhnA-ICyOKpnV=Nd4VKwnbVJUG6SpA#6 z>76hT-C+y35Y4HCs27j;rxVV1EiOKw+7K)FCX!{WUf`9WJMGno+GHut7syol#5C%K~)fE((7Ei-7(i^Rxb_(MWP<6LAF^+=&KHlmLl(O zADdLQ+_bMI!GG==pDQ$$rQOpcRG#Z^NwCic8e4aoh*y&tNGjbH!Lw}*QP%sS2kf~x z*$uJhwG2chrlw~YVC=l|<2i92DyN&DjvEVC4`llUa{^!_ds zUdcng9j6p8fcA~rT0ZBZ3b~HGvwbdhU6dkR8|=+fH|*ZW+4Om)J7?MM@9KGat1AWq zDgZgHdApsjy~VB&n*nK_B~icAi&gb6?nYe5eX8au52tCQCYmNB-#p>nId7L(WM70K zMz%6fb6?=kfl61m2g%)^!0ky;@JDET!S&F8fMVz7Dy=@m7-fhRnB0{9?r^U?h2)dI zY?iR~<|era6UKyF?7zyK;k70VOXd23d1JTBqYH#Ej>()mh_i)-i`pd96^z?so<=)Z(k>1HWxin9^o?R#}Q=YR{h+^KO9mS^93nqRvQB z>%KwOI>_C0HGUz#{dT{ z6-R(2@HOZA$#|20TZY))i2cq_Ser7T8=`|v(}C8T61-Saa(W-kisnusY$_2 z{O!b{vG`n^W{cu#q(um>1_CP&s2MJ1dQt^e9J>mCw;C?kjh5VNF|qscBh3Svh`~qL zT#TE@Y61Ddh@x;)!FUn2PaDM-Iv4Bgut3G2xG93X5^s;y-sEzw*OS>Mgpmy#)FF5 zB11pqU}P5dRbU@OaIwNv6@db?F3XH)4GrWaEC3bBkq$QU`*hJne%JQ>LZ@BvE@Rwj zHiC=!%OnqF6BYq079bsU_xUK@g;E$(adIDp5sMD9%vbk z3p%X3jfr!R9mHyFAyD6>o3>%0g)UU~Azr*-Ty-!Wth(wK% zQEQL_Mu$=^RpYg_j^B|H-W&m7wt90MVu7wc{~21Qz+`UJtsvx{TXv1QulYONY>7V3 z{?{wMokE>akCl*uVX6=`BXy=uIC-g=hZ9)E8>J4!CSnvKdRrQ^-OVJVhg}S;!13`v zz_FE3p8N5WZJw=r4GWvSW?Y#Y>yY+}HgVfDR`T7Q417O;9RA@w5#K9mD5Z#}nb5$p z{6Nt_t_xBg?|-|ke$%OJ$CY>2woNY!zW-TCC8bs<2x55|5=j}R>orZTJ%}te!!Jvh z4-VI<{bb$|(G*#1(`?ofUguiTTDrM7^PK4WvKk0DZ)2nS?#D5B=A1&^T#5Sr;=RIz zm7Z%e1rw&LG?gtAXW+fG`q2{$#@fa6`M01EG5@?BGVgD`SFvJDmE8QCKS zx$&Fd5Q5L2qVeQ1XKKYH<3<hCwo9_q%(XRC$0 zzuV5ecDT=d+rR3UZrDev9jm);XT>VNZ&gAY$36qAOYk&0VXw8Lx3v(}_#$ZJhO&f+ z*{KSO0EEU~6oMd`CwOyLgu7@?%_6y;Yah!hm>rPBeKuRssCROBgZ*xb!oe1xsd+b| za9znFu_?La`0Jxyych6atXDFLF%8|g95MOD9R!N+QpicDRS<;=e|E~?YOg5-&Hieq z#ey18#a)X$ymGTpcS$s}9`tZHec9n1iq<^E5+$g?hx6&^B%z&K^zR7A--3BSe2k238aY;UvqA9vtMT^t~u8{ z{B919edwMBeJ;61j?ck(#Y!(3^SX=E~v7t9N)tXsFesGe?sNp>LFq ztBs~(5%gKHO{RBNjn2uvaAd8j7lU{BU05+-3Uj&@rAX4IIT*@}8{eU@38TKtO(2#x zxke==|EvbCTs<`JP_QiKSP98fI9dlwPYK3(Z3?Xf3vrGO#|eZ+;b9f*YgDrHWxnh* z4{QOW{;vJ942DjD08nRR4U#woF{~qWW_xYTQ9l7ETpcUAGXiq0jk>AB zfRV&ZEEL|K=_PJk&j$k={4<OizPZ}aUf$Yy#@e)Kh0XrSapYmEFH}#Lx zFCjK7J3{e2ePJ%RzO0n}2=t=z)xAci zn6ylvt#8(jT9KqIlE=^zpney4=Ji{+<%gvGPYPxi*HRLHjOf-Wwm&D^P4VV2{Cy0# zaKRlrX{K=AT|Q>M0rtu%<7Qt)_kP5+Z9h62%bsu)S^RH$_kVk#CM&FSY10=urRwV7 zh3zL%!hdutEJT9XVY11Z6OqwfXK3Zhiq`vW?IISrUr#tr{sWwpp5J2L*KHn?SEBq)clx-PI{e**&J_jG1tnh%%Z)%h1tla6iPeAsd#n5?|^&hPUN|m#RKrz__ zRn;mvUZ2?<5scAN^U~JOK8gUZ$8B;uSnDj?#zy`il;k zBQ({z`^NAyXlcRe~nkp(k3n~_uvGhL^?N13=2=HpVKW09dJ5YQW_KBhC`lvS8AJ0@fV2*-?`1j zcUj(4fo!pvwpVr4Wvg#rQY-DU_6W-dD)v65sJfA=2yt%3Dm4%2`OQYgoLyTv!oi<6 z$Q%hOw`lh*_p*Q3LX&wz%EV~?10=a|_EL0uwUKG1oM;jAQfvng+axld%At`88@>|U zvZa`GzJ6)f$?sGRmKzca5!H%cU)}2b&9|z!Qs&)m(pvGp;t)GGo>7~@HJ^$$|6bQ) zeUWQzl!6voGU0}Jw%g1wKps3Ok;)l`dm%ix|26BaWZ!hKEsBpd?9;RucOA5sIpnbnMRYXK(M_ohlEV3!|;QU-% zwY8&%O#0rMB@N$DZrNcWR32ks5}r6gb#;>Kxe<$#;!y4jydryy1Ub{a-$0s_J{x*) z76hs+I_x5Hn1YJ@xorPlYV&zGnxuroKcL_K+}Nl)BAR5m9PYP;18G^q-%q*TkJ<{^ z{KR1Lhj~MZ>`dl8MSza57XPv_D1fE}eKn*Rp>+sgHJN7=DA}D0M>*GPK+^&;zx*jk zSd9*<1XzlkX^bPuq;an7DnYC`zHbk@@%TjE>ACCBvElDXCBBNB5LWbzNn&pJEZ2~}-A8J&Z0i62_UVCo<=fYp9W z?o^1?S0%RLlCh6L>#Z}IZ3CVM;#!)AjuZE}b9d9(b73+1^z`nX~{|9>nX%Ea3F+XdHr64dB*gV`o9XV|_)Hvl$= zcMYPT=&_!YTPh#M=CpA5r-oU|7iq;hV&JQNsW7a|;^WM^BnPDT$W>J+ry5ua$KOXQo-?umfOh zv=s+HoN51UdtZ#WiWQuZCy4}h!V5J9R>BSGy*Gltc5@WY|Cs2G`E?|-e6?Fcwcmgn z?0)^%J-gRONiIs7XRbpfxu zv226>!DmGpUmY@{W@{J2>faoP!|`s?Objj~y-c}z{fr+AAveE=kXh?pDjNDY;m9yG z6tp$w>YO90iY>mM{{hU;Y`{q|>Bj1mHQRFE z2)U=?%;3^4!9AezEDH*2WRbic1YB z@RWk&m9`5S-5#~+6ld1eePX3QyC~*{$L$8|QZTVpXrmn|l(Jxeu_IhuJA7_NnHbBD}eR7a4%gR9c1edzD3CKF_bQCuD;grXr> z@^@cE;wx7TGhv-};-MLa+O<}xONV`mfel6>TK$oZ!;x_1wI$^z9o_;0JoI^s7<^v3 zl@LKN)paE{1ID$lc9!vgfYCv>$G}&57@Zb(0=j^X?);%vH4N8*uZ0%eO)pGBGM`vX zYR#?_3WXOcva2Og&9VE-grs6e%tytun7QflRRU{}(~^LoRShF2Ui*-iC3hH&@PQjb zXUg}zSUU9&f1w7~~=3g*BhECr%#Y*_}bMT)_zJi)h>>)N~b5=R$DAucOQ+X`Z5 z()yWC`4mi3M?IdW0;(Njszhke^3ocnJkkY4^0Zy^u^&MLl>;&6p)BbI3V1G(nVz|n zelWv2MJihL5_UxT?yEhUuZbxS!~a$)$CoLD$RZtI&eX$F_MZ=tRqcKqD@?k39d#xO=YBIILJ{ z`kT`4QMb~3WYMZ)Q2%j8Xd!t{kz>B2DNo?{AZ6$82=^PLkyedtb|kq9$AY!BzrGPoZ!G5HA=1P2NBH*| zj;!GmidNcyya$=ve>&ULauYk>QnZ*8XxpGK^ZS2y^e)$iM>7noXMAyV%-^xiAZ)~k zy43JqKWg|6B{QQn?B8s3+-~jfC_ZuB#s2Wq^`_X7Q6pb?%1PVZZ9mWYQr~obHRy9c zh}V*Sx*_&6l1VW~cou3&({E?6wzu+Zs;JY1=DCFD-~kl)V=%Z4?zLe0!ZGvi!$6Gy z4PU`y>gjsr*e7?j#0sg*J}H+c;9bpm&d%m*#pbtHx=(8K0bXH>{24zzDLrV!-o$b? z$KlZE3p#oF`o!%c{oVv*8g5s9aI^m=_JaZ*p9DAav}Qsj zkfvYkX!s9cAKRb7O+Gwey=?^E@#1H`_4F~~224kO5$Yb0(-X_O2x21-kU_R=JMBg2 z9gdSXljJ9hMPgUkFI){Ly`$Uj`Zf2zJ~LdXuCA#vZ>NHn@Bt^E0!*K@pN#6`M!O~P z+lZC!4S&v|a+(nd4k%pn4R}Nc4J0Uz+yzESk-SLV%`i9KCGHqR=VDxH5|PC1(tQ~K z_>rQ5C&Vm;;5O~i9&C>4v-ElffA*Try-n*3)&&N9{IiT-@V*7@X0_?J3|JhkWJg&> zG?_@O=C`xC=FhqKVU@ob`_AzZ>vkybC=HYAD(e!wtjMbXuGyES{UL>Z^aOECp8+Jd519;{>62OJ)6Ia&;X% zU?w;Ip^(YYAi$sf2HOiGWA2g5gCqv-29QHQX*FyzlZY-e`#ztvw`V zBf7-0O|AOr=iOqT?SFv%l!24ttSpOr>qs%%gdA?D4(LEg;5~*XOGkk3lWeo%+h8(r z*K6DPmfth}NMKwOiz3!J)Scw$wz9~hmjnE~( zFeJl3SINve0t);OS8pBH^c%N*kM2q8=$Zl(5s*e=#0ZhxXryb>4T6*+BHbe3=&{ia zN_R?Zw19+^fJjIPzh~EdKmXj<-+OJZ?OQv~<8!=^!-l$iwAQ9g`9z<_-gF*_yUT;M zO){^gwj|cmX7$oFYaT!@M%&iO=?AG7OQ`*opjTt%$JCLxDt)KZ*^tq$2L#%Vf?x(gy9_f1)bi<$Ss$OW3Qdoh)zDHr?Z0i9tRz#wrITS2g70oAZTY zmZ=;`^Jpwyw2$%WoG(q3f-H#wB>K;&qd0PM;A(<6Ijl`zdl?KyDtj9+mkTdu#6=-f zyJNU&w9X5*Gv~pyfv7;?>RSAfiKCkb>=aZy$9lhEqpaQR6l^>8so9g67%(cr4l)-P z))_R|aiM+dHJW!l<%cFx9ws#C`iuC(Rtl!$-g5fr={D32jJDh&g19r%iZ@zN{2E*e z;G;=4a`A5M^~7)JigzH^E|liP0aiW;^%F5cB-v+UlY3|1sxr3Q62Tt+0*GV0EAC~Q zQ+26OYb*oe2jpy7X?RCNjnC-4<4tVDW@c83kv#7}`<8&2&BT}}08d8P(5@xbw6Dp) z_m>$^Ql{u73Fh7CJj~FDg?Ea6ZSIdN_!Xjt9Cuf-K3R%|UtAQ_kH{r-43N7wy4?vO zCRWLCW5XtoFtHf6${6pjRDl>!0h}(p-8|&_fMYdY0GEs9nBahqu5`K6GO#qtOl$8o z9zw|i>B^N8Y0J*&M#|k)s~D|Ic2n9s2#Y5 zliR@X0->1;NL6PqW#TB)(`gFx`1mH%xN;3oqj2qCuER^=5=^b2N1e!If+l(fA2en$ zHo1zqK1T`Jpy5rBgknKQNmVXW2M&C;LqeN-BWV#~i5jx$6DZT`hKvUVbX8HKC1Zr| zAc4TRF*)>s(U{Iv3>h2~1yrV}G+~uLilaGEmO~^WyZ?k~gm4imim%$-ts)5I;kOXt zk55k1EKLtOPDM>UijZIKKFoEB8uOAu<07JpJP7v*dO{qw^0FOp2&5xh{zAXx%0j{8 zLFVNqLMfxLS7Olu*4G?jt8IjVJf}<8DM&d?WFJqi>6;i2@(YY>GgV5$0X|;pUQXb< zpc!TTJ$O{3uFZtrjM3XJFy~%eZG@J@Jy(8y9z<{~oLC+oCchR}z$vvPK`~@V#cra1 z_C(YgZKAK>xvLgvDR`BG4T^oE=W$6IN!aIWi7*e$JMr&w(y5iK6FI5mX!5?&ziUiA zlJ;ygX}SyeGj3VEc9g5|)wTYulPt52zD+>6nZ9maD~`QMkP>X{X9rYg(^NBTt8b!Q z%*u0ju+qx#LG~95oWM6gw9+WsO22-VksBf0W$|3BZtlWshgjdp#Nu=2J`);>e=DGqwDrPGB`y<0I?)Pl`n=`9HG2Rnhc5_x2>SYrdBo{@&#zW8}a8 zb@9y`Ms@N!F)jEmw0Nt2J?`=Q9w?X9Cw6K>cjE0_Zheo=EsJHPMDxb%+Pi20?|O#S zeqyG9%z*X1i+W8W!4KwAuD*e^Mj8-)ZQ_qlN#+e&8l>-q3O)wE14AO@11l_4!+Uf; zMEz8ps=0EJ`X!_OmwY`&sLEXMJ}XZ5+tG{sIe5TRb;a?=$cbGvKLj;r{{y*I$bS&8~|{JbS{~bx3E+?)m`U*6cmx#7c1r zJS#euT>nO`L4qXKX?3p&HOXrv6aloxmrv!@5;9w8`rUQd99&8F24@m+Z)@zv2{n)< zENiYFRPm`2nLPQY7SEi1)*>JXqO)KW-I!a`*t-h%;>ky*{k%5w0OxRtg+$l%_sfst+5t`{T#2-QpcNISne;{3t5?AD||`HKwNZzd`{mrhYl9s6Mn>hV374|N7SL zuK0LE%lZDF`~C0eG94d?*jkS#XtBq0$g=*{JhJ8^Xk;;kNzX@EHTml!b0_8L$QoVNVzN*JM^UzmV*!jH) zw4Y&vU9emZuu2Nr1PDyLn)AE;@aGx-_@568`Qqc!)$Zd=EPaUQVhlP1>0>%C#pL%k zrr12^lEd|Vb4O`D6w%miE)_jTFMNFe@niO?7{2U|+o|iB!Ns>@weCy{uDe=CBWb1c z@}2F{ju-Xg3}q}o*pJ3Czh9jVFm!t2H?K`be)!^?k*_&LK3Zd22CYMOOuy}Zd$3pN zT1LZsJ|n3%*cLAIeI7OV=o0#-wHfp=doTNa(u9)C^s5*4H?>tk_c$)tBmY4iy&qaF zcyni%nz~M>^^IMUdZ;Q#L%hU8Hrf7#4CgXtugdrl;Q0r=@IEGs{hX>^-bzbe z`Ge2WUSY&up+0Z_ctg41%_F>CNHrqeV%Sc!$;m19M^(XEc6}b<|4LA zKx@WIz|U3w?084`Yj>W#uO)OqEK)-(^FGLC8@UbgCn6Vn!MTFs12HBN?PZhj;?`2XoTawNsnKHPRQw{;ZEw(u$1{t`^12w4hW<&P2b_z{9RMsr$S z)k2KqQ9JROfAVK7Y&HuNfBkCmqhGE~^kVd@jSTK9=Ru$EL3wioF9&ic@o7lJ-+pAM zoASj>QYxXkU^dI*|Ab{2<*YHxv{uN3w-&WMW%&L1UGv^xL}zx&Nl7=S*t3tOd(&QW zks6WrRH@?D{0L)X5E~S`s}MEo5WxV?Gvpk9LT~jzYUev7rH=(?#cw}Ur1^dVPsGnD z$Y*LO$;Jc@z@&U6`7@zhB>#+GqyQB?H*Ta%X>oS>M^NC?J;5&pv~-5DPQiW_37@JP zt+QfBAqgVV-|XQL5~Om;U1x+}o%{s*IyxRty-&)J`xjfZZ$Adca%BYBBxGdDij~)p z0j#}Y{5iB1 ztIvtPw-u5tcBF4-NI8M)bpu4WL78#h= zP8aD!Ff1`sZmu~5NHb=1hEy+P z8MSwsx|$_pNMgo)qBFawH?{hsSLPK9ql!xj*a#RYNn9SaN!SiWOqa;KBE)ZF_I1H@ zYok5D-LSkwi=Q}(<@;p332hSQ3kj?d{0g2yp$qhp(?`IpWfv>?$+{V1&Ix!GP~U`4 zMBy2fbkmcSP{63|^H)Z4UUt6`UZsHz0g7m{%^yq`6OYbG-ha6s03=9pU%2f;ogC*Z>G_$HP zgF)o{JsOzdB7IVLM=JqPl?`^EA|$yl2@^%_?4_<QjLd3fGedqyPPOrrD8kf9gBZ9k2X)#qjV^x=AxJid$WlkQN=j_)lBdt=zF)|rno?cYV zT=_N{JYx)#m9*I~!#6i0fm^mU?DqP${Gn6u5Ka5N3FY&0;TFPUfYen_*|OOZw;{B* zuQm{-Err=7&|!WZdwMT2CgPaMNmeQgfFQ1n9tAU=NdRi6u%%#HlIzr6cHSQ?R=ZjW zu=2AR(5x!Tu#+Zki`;5WD5R&1Y60O_62$7ng|i$Nlm}w+Eg4QQ zvju28!(vl~K#E&Xt|3OaX0bY~b|Nmhxv4d0jnqxP6pCNsp3eQzR5NPd*QjaM@!a73 z(d9A6Ec?Itqp|^pHmA)PNU>-%+U~50?GyNX`7=lyL- zr_=E&jzq5BBDVA+!J}RR$RV9FFP~!i!r?z-M5xJ1l+BIJ zP-U95%cpY5;YlXp+4F5QoOOO*oV$xFBJq@HXe+icwKx75za%`TiW<~D{v4+Tx~;k&`-7|U+MGV_ zhGL`&ZEwU|WcB`G#cgww+MWNA96N+_g;I1YJ~HEVm3dqE^dI^=gUG!ZaY6Urbq~?Y zzgmac^Mqo4He7IvSAN_8hUZL)RcHb2dm-cOwkgp<`_VX)$qWC*6?0XeY>qy<2;b;s z=lShRlZi4*MzI&2KOSbhW=yGqzF%Nhc#>zm+*AAK3)Q_ExOMER&hD@5tG-v$tz-$9 z0b5_XCqMDJ9rM|jI@kGiRJu}%X9c8fnzqw~oExok!iabOysY38i5tfp+n{YgW5AM$ zw1iA$_QumYvA;dDH$Pl!xwq!{XJ5vd$oV{p-$swWR&Hvc^u~Hz6U3})?VDicO?$eU zrSaD~7p2`Y0{u@d-u5GBC!xXOP>wfSw!q+5O`9$}(Rl-RLgxsg%EsAc;ZLQ<)un<} z{DCmMWWedn!k415kN*LTylN3Lp*k8RI=4ogdCoTJBxp_OL|^bVAvWao%G##?FW|L3 zi139kjX84{vnT-7wyQVp<~21=>=B}RON(EAip(ZHjBBirar`MhYW9H7>xDrPYx75Qs zv4u7kFNK)%KO2=>bslYQUwFyk?Q`cDk#^fSyU|%;|`L z(;iBrEXxtIrm0IrmYDhOQH=sJ25+yt<`9G$GlfNP`^cHFyoZ`VvExst zcn!yj{kkxXwO*Q5?K_~05&GAUH^ug)2FLgsg{PAc^7NpZVul%cyrCGI zpy|LJtKH3-P~S!#3fTz=x>R{MWBQJz{$rLlz-3ZX50(=HD@Yuylu98_=JDNKQgqp+ z72;(i?rkR~15;}*78p9(us=*$Z{(UKNw(NqWoDd`Ckx$zQM3~yS+wl5JR425Vc+6& z9Mc4el#3LQ#)~t){G6tXk?N+AK9}>(EM7SRP zfeO-YD3l29t_m2eJyQSlsX%MMJB}lfI=*HI6%)tTYTS*AozO#{mfKB;0wJv=cBbnQ z5iHQA%c?XG`;n@MX~K=do@FU4Er-%aiXuFm1xRpbr@mTmu}L*eK&zqc$O#G`aCn_FDU@X5@>*t0mT%&xjl7nT3|w+A=1-L%KN_ztAH~ zd9k!!f+FykK;QbiQo`dz4Qt0mILo~JWG`sB4dI3iQbPzf> zPS)ogL&NW^@KzNfrIJfsfXB|9F*F8PbL-yh$fKrywkDHAX)Kl*5qIS6S(ENNu0*`r zk+&x=Xo%dLda}DxzH48z3++QHGZP)G6rAykWSssj1m-CqNH>w+bQ@SmjL-aVtpk)8 z+uB?Xc*3LjZ3RJ@4Idu`Y_!PhR3tpFY41rpbbmcM)>wvwJLDHvGz)}Xe~GU`8C9O} z(K|N~!(yepnTuF}1=#D~Mab&Yk#PnN0jM+VltR`)SDIM)#}A>PWqmjVJq^^VasotL z;@2^9d#sC<5!Pn15{&m|DJ6L|W^lnrhEK4PBd88R8S&5B&VR8CDWjNrOrFe2$#Jdm zK6J#4NjHRTYj6+rO*TsU)+ShKRS9SU#nKj| zYfPHW6Tl(5f#pAe)rg;4iE?&PR$fvM+R&!CM^?)7sq@-8_LRT??6A)`2p4y}&F+sH zrqs`|IqfvC;;cvBSAbA1Mhr`M4`YIclh}pOrUkrJeoXASB>C_9*>?E(5_mG$C*2OV zTZBJk7cTEBJb^NqX|~XCsnW(9Xpe0_QNuEaY3SgVKJ6Bh-z6%L2~-d>2owE8bgFHA z7Dqh%)JJ{PG$OXzqxC>bi>|kDgdE|EhzWZ<1S|$_G|J3abW=fnQ(uMc+-v5l$QatM zSvDw%bR=5BiRA+Ef>%)6sR;59%nW(=LCMd_emRK_&?shDH6NG<`t%)ReMX}jUQLrf zbiJ>Z`AF`sn;#3R&a6Jd$MCUk{y;?XAScT7P`~e}g9MnqzM{GJsEd*I7hrfWN97NL zKbMKPPxmP|^EPRYX`wG$B%h7LImJJ;M8$Si_WTF1$UWVqS2O>(q%3s#to))~TCRQ9 zE9PGWIltBk{X1j6ENk{lCQ9@AUry4k6{l_Ic;Z#4EjQCxlKE%+9oA#Rw@qyK3?4Vh zQ=jzJX}lLph24pK@hP6?rIzy5amv99(>>EDSsb*hVwmWSY46CwFwY$p$G4S3e#y~1 zvV-v^j7F0ihUeEaP2MMZQaYRuBO0jH3I*d0S}9mr51x$_3y%H@S@WwbU}Jvr)YiAA zVu4@qTd{+m?SXRJz~x9tUK7dUhd)X5u3lo1yGJ=-fCU{@t4G2=iRH()KmWMQeu-K+ zWsOS&1JE<^pF^+@^oq{QsHsj_lKdxJL@M)SrqzjkVEsMYj?33`TC7T1`x=fsOtV;i zK@t-3=+mrg_E3XQcVb^twrmv*LXMv~QZ6owGK@V7ld%%DaSLx9?>U3tU(yc1jUmD2VMMia{uS=}Irl7kyr>x`_lPmNb(5v`7JalwRSjvuj;6(M0ZK>~-zBN*=vMf^tR zJw>|k4;@_h?t(mcd;FJGP)}Hm^s4UDTqS1k+qXqF6}}9-ksh!7^D;f)XeIAcAfG#4 z!N53;bE=VfUxA_8mmA{6^l8iAqEHf(2T8oxxYX%jaFi>GSjuY>Xnt|?GJs+UH1D}K zm)6({TD8yX_b~lVuK#A6Q|fQ^oaq;N?+PWQ)kgXC7cnc!`oyphx@*L1{(6hd()%J&ofCaq|0 zgcW4EWD6gS@o)&;{4J^DOn=Mo(dZ?b|B>QFI<4@eE7*Z)9yf6OS7!VH_5LVzX6`m2 z!CiUnGZdl>3D{jMu9+-YvY*k#l_~wXQI!<*C+#`ZP*vXXb4%8uRF+jh4h6@vN*^RK zr!cA)EGUck2x5IOR1=x(lQKQu|FlKb?59juGxm)jnz&xde6RS>7NNU&Svc6IF=^z; z&UQ!ZSP~un{-R-6(5AV5&zd_M|M@bske}$&!Ssh?g*CnR7bCuY_OX_C4eA=P5s%V!<^S^wf&T;Tf1#DZ#AkI9@sv=qD3joa zQSPX^g;epYBr+p8b%oSB$woRql)@pr$;7(J&>c0m}?-)397ZDI2VB0KO{L{X< zpP8A-fJ{(U}BWWcdYb+mBv?C16K8cH1 zt}R*bRuCkA8#(L~hVjF?$vJ}!B-CI_k`|DA(?k-)Vgnl_?0y9!4iqG|>a>CPktCT{ zvr*-{thBID{75Z`JqNwasmgv}e19@3{T}3k7(H~+ZQHIVR^NfVQ1Q*%;gdN{;MokU z8sXG5vXR5DYJb)XVSuq(SsS0;#gS(YY}1SPyl1V4jJMjJwx6}T(S>{hZtE3>jizspBuuZV(OOuWlstZd{Jk(xRD;PFW^2ASl4CAI z#b4vi1%w4j3L1C6l@|X9w?4tPq)RDaqC|TiLHPsNqnj-e76xt)5jZsKFNkNa#0_JA&%kC12~To zdXoX#Zz}aQBor~MbM?;r;PK=sR`46N1yx|l+0BQh8s>@jD`hc=@$t19qHEQeBuO81 zOV~$$Ex^JeU)4`~r8 zFebxgQe3{g>j0LnT72@@B9oq;ILdcD(`hC1#|en? zUc4S~U5HZNyc$i|5AXc%j*ROeW0-?�G`rrT~Mvl|uRR3Gai07bClu-Rb2$v|FLG z(szIKtPKhhX)b(PCA267-)iw2g_!v$E6~aI7C7dMNsaP=>u%5qb|!F`G9q#!;1+TB zx!2#O<00Y-Weh>;U5QH5)$ybU$1WcoJX0*o=d{+6|NI~~2w&P_sO-xNI?&RIk)-Y0 z3bXKIX7XiF83|#6eg1P0g&`s#nrSUANwqAu&x8AY)qHoq&U^lJC5q4ILw5Qd@-~ep zJdfG-DFViQKe^VUGHg5K^}~>5rQ73#W+Q%nPAPOM=(~5q9`z6^y4W+nQGaaJI2=iL zC!*>=k&{4o{x}rtd$74^cKl515Ik+6_ENlF3IJXs_9B0I#UN$E?>!%P>oG0>fhTjs z&z+jQJ$~t-)a@p7ma5(KPj{AmC{v#Vqa8_rJR`h%Wdx4&y}fU@7_0t7C5h);cYnFI zM-F~*muv1GfBAib(Af7ga8DX*Y{a&+d6^`qTDLyOA#xycK#2Sv+PAuO~`g{>q&XMqbskW4% z#v8ke9IpEsw+`BVf7TCq2kT}fb>Luo%_>hU$Kh%Hxi(0vu)Gw#3cQRQR3EXk3w*Zu zIX?Vd8qIq}$%~LFS#?Qo5T6lRajR^=3kt$Vh(!&`+9^6adzm`W%bzZ9dmi0h-B?sa zmNm&ab&y~$+@s#_P0fyeR<)RmQFuy{u%P%PK34*K0@`OF?ZapoC3 z!tez^=0f{C7xG(U=d|_`1Y+tnwlbj}InLs}hpXEf;X1FvWe=PemiP2G?DMW|u8k|d zq9Tu)o~&N~Gs*h#myJL_^MgPjg1^$kLK|=u!PWaZ16MmA_P{dho->jyS7l%((d*-d z9)p$FcYd{NTJ(S_ZImQQ6i|wX3x8>}K0y=QbCK~MfTkn1V)lu6T5-e*$blFhPw(Br zGq;C!Vx9CSS*;}vPYCTi{`^C#h35EqybA+8G(=Jm?99+1O2c`YswFekMAT2B(DfW1 zjO70`_~r7%G??jASQ3lO!}=0wnUL>Xx13i>PmytfKLw{N+YMpUs(i?C>0tLi(_eXr zigy1p*Q#{;=L#Mp?pYyd#2%;*yfP6bO-R(xuCa9B%MIqk)M@7D@x&2)aeQj*r;_}< zvOyFqFg0JS8Lf0bOl9p*3zer0peVmh7hTFQ=4by|p>V%3@MIBl$rG8>P8K8gH2fOj z)$X$t9%kS#XxPVsPWh0E4p<_Ny0V*#gxjDZEWoGd<kWl=>9slg2wwnE9%?9f~!5N6sf$y=f&l(9`uJVg@Y{Q_y&k5D&rUqjT}O zHf(8-QzNJ|NN#?69Io90rhOc;Hl7EL=QZh5J5~1_>LHHgktMc**^s7v6Siifr4ld8L)|;=WF%im@{TQaK~kdLakT!}Kv+gWcjWZ-gko`* zfILDzwZGRUgt6l}NHzg#x0L?|4jP-jJL|o{3`iJ>V3ETmIwkknoJ)q$uqA%Syff`- zksvcwr3;Bf((80cW>k^LKY$hzjd9h+yXR0k@rD#D>#pkjNk^Zd` zO9f_diVMBzPSQnrc^k#X6pkz%iV6~!qsSh=Lu+Q|K?3)aB*Lf?9R33+HUiX&1a)to z1I)nTy17IuF6nMk{FQQk;j5oKFKUb0M?M46y~YaQEx=fd_IDvl@<0ALs%%d zP60{UsiHI{w8??f>XstXLQ#6=^@KX48Wfw>XVrj}lH%dH#5~LveLQN{5(cXT6D>wX z>70#g>g44fEuuJrBn7XmWtN!l-Wg@{PBA9+m!~fDc!tpI9+JO4Mb*Z)o{40fSRqEr z%Cplp8vRM?loNVY2Nd8HOqE}XM2A_v1=+nY{Lzq*+#WYUB#s6*^_ejh* z4@b*HUbCVL$jnB-@JG1bk>T&*vOlW^CATjg2?z(Q{w-Q8uJBjc;3N`Y=0Mj8X)(ue z>s`#;oppAKQsHdh^@}t4!e{c`H(BTtJXMRf7Kt#b6(~TR8~03A@F&A$lMg$5^%m8^ zMjA{*&$umuD)L#gc$y|O(B}`D7+)9)COgjWDTe2fDZVfZO49&}N=p=P%^q**m9c3a zm?DNk)@_rT;$mJlTBr>E^hF*T4`5!ZsiUq8u7(?EgY>!4_kFPxRyBhB@dZ$Bn!sKkHX3Nd|~@h0MfYA zX|Ej*7F0XgZ&5$@&Kv_7U-NwzKg2Q|SGp z&G*?7Vb+eeizRDz9fe;`sYS_%pA!el*a5v&G=H~(n?sF1Hqd5&t2p(*^Yy5?QR(M@ zc|>jxK*1JxxH?`&S5-g`j?8JCXhR0ew7zSO6Kk@}{~01AWSY}X@D6QmYtf}xcXR@i z2sUlcx8i|M@4WjDa3N>1_USsnG-iP`X|WS|OR{45{q>!H@{hjvrMC4ywJo;moKB7aKwMP(;R>C*CT)p}x3jMv7XO5Lv#Uck z^k^|mvPJoWk{yV~~G?BMn;MnawGwO_Ke}*9EaF4XUuzj0*_W}0FRBv05eExI7^=R0&)ThUqaC6)iu5i;W zUMC{f?7o*<>h~z6_Tc%S6sS1k{4ciX;huVa0)RzTFN6ZO{ju=K2-LqL9d;d5DAG|X zmcN1*mi$F&e59zCDS)T)Rdkg$I{L1AEg8PG-H6?P4$qBWgqow(MAcU5<2Z!~I) z1TcK=5bPoc*RV>ya46pyiI8895R3%N&DS2QgY5S#gD^s)Su|-st1D7v^YkL2(*nXT z7053z0FD>|zb2E{%{!d$3R8?t}W4d~*CyC0Ur?t~oL7Z9t_<)^E|dTE{I?Jz?sF-|1?r1zem zJ;t=uVXiU)j}em`qH|Ho;B{;%52~uL>uj7fb4M($#Re)nk1*p*tcX(Th;6FlUA|)y z&dh8kQ3@UU7J`*)l`}mbf2LmU7oqhHnMRbf&pi@VF;DVHf1H&kOVa7RL)Wyr$<_x^ z>UOzFm5$~CUBMXrArl$?c8$@LTzUn}foCd>A17LaF|4qsJ*1%wSPFR>XOLYH5wuvap3GOAH>mG&mnfM&@Y;a*Ex)c; z$eiHJ5^3ooLeD}F+dUE?-Fj7m7@@9vby6-AGF14iXk@%GB#65O= zT!Rh&V{24UF`kv9f@WoH`!o}rmnyU!fMSiBp&Xq{J5l!lM=h%1p-6K+wvvKq= z*gaIgFJB#_!*u(AAOMi@f!G^CDQ&Y_TUZNM%9Oa~JW)v*_-x44o@7oLPv2MS5?>u% zuFU-hPg)R@Va4K@4%wkNd`>F(3v_$NYvHMX=D%1e9JgCWatNs%PVPdH`k}B-7~#n( zr;8{hDTS-J%;9bb#RX1$XGdx=$_s;vJAl43U&FxHwkcRR>R^8B(Yf}gNsDOoI{_I9 z#0xX(K`=fbIm5q;OYmW)9}l1fBNc4p;9ux7pCGLJ@Xf`QTP65o89>Z#gm; zO^Hb#$W$Oa4y9&)25k2HT9}Vq`P5o-_gplqx1$}5Uai({&yrgqbbnEsI_mxYTPfsz z5vy^xs*Qb)VE7eLGwXaXPCe9u$5v4wryH8NAM%=52AV&6*Y{fwd>C0@tGHb$RnyBZ zWdaQ&wDLNN_p{R8gzH6{&z{H^L~tmk_T~3{-Bc32t9eZG7-u1Xc?TK0hBQn_irct2-;} z&!)+Z;^_8A(sYo{C*U{xtmF~F_Lsp`jIdZgZo^YaA;v5=*t5rfFq-J$2KO`WMk1qF z$`nIQ60xhKMv>R>Cfg5pa&scM5Y)eu$i7lr(DB-{v*34`0qexR+(LF_^|PHew?z9% zzXK!qZ`y=mXUdy8Vo7qq z8io)yqbp9u;{NIM$FK3l7ByslRQ(Icnbf1fJb0f*jLVOy@2QgYMu9osB}p`nXDl^~!Ls(mEIZB? zkCrh9p-%pMM`_gNIH^S{AI^Rk3&1E&75VjYdDDEAebs@0vvS(S1i1QtN zM(4%LnmCp4gHW-nEAAIZrsLu**ZX2$N)9KhGedw6J$JFsf z?GO=B?NtmFDOcaA4FAdg{r-&QS!qhcSHWyT{1bjHGW`7ZEeAO_kI?xUf0722V#vOx zgjzYW-@X)<{!+`@MNscl5*ku9S*IdXpIiP1D9tU*C5QM_s9PT_|2#Lre>aJT9h^X_ z!Xx4xI~xqR{GYzE|MxT@Z)b^r8unz4J-m8buh+X;oP0>ZcTQ<%E+@)&S13Y`K#nH3 zT8wo^KiuSOBb;timZc-p_p88gpH(&Nabxk*XFp!^GLe!~=4?^v_iNKx1c8ZL{{v*- zVAp8eW{$~f65tJGHm`vH0UBS&w!iV_;`Z$wjUDgPBd?U5Dc77(sGm`kUg?zg{-kwK z)z}hL{6UCk0K{=fsX*t;W%}c5#&w{bOyA%a0>-||cNkt9F!A+)fG$A;2OPg9Z{i7U z9b5P}<~P`A|5Qy%Yj=*N{N#x`SESI)gI^RO!~=QSw6)84S^o#+6ST+CyZb5zl}$09 zw=y2lv-=M=QYsViDl^l(c{&SM0fj@>lGvP_Sp{qad#I%B25DdB@Wo1UwoMZCWU?++ zAiVsNH9}fHm{Q0kAPO8HtiWscl?WTY{{TIEFQbJWPRM~Fm!-5ebU=`^4Mh&y!{Vq1 z-6UuUMh39(>hBx*S2(}#AsZ~d48D1Gt?rwuXPMtrq0ky3zE|;hs2FD!--j}T`^7jT zwB*p6tqkw=Xp8lEHp1k%+k_C}S0NJcA7Dk&PIP$Egf!fI@uEIw_3m+46NkzM7vWgr z^M+E5g)4LYxqJT?j!XQzmsNlwlmnkBRzYX2XD&vRXrSX5QgevB%g@h9bryIMHB%lCG}Rcxax-$G@J3fmr`#!&d6aYiBTLLlQ!P#( zVh^Vd^)Ej~La+(&$wVnV!_z%vjPQ?*(8S(;;)+->rMOk$DH6dax!aOL zG-2H3fr{P;NB6i5E%`}=p*jcxo0lnw(A7UZ(V+tIFeGVlm11H{AOJZflXfx&Igz|C zh&h!G>YuT)4rCOdRYk#&jf69#Fc8OKuI?ljFW`@%)4r<{5*k$45I66z@Xk7(XOk5NFEZ>2%lHD1f=XscKVP>>gSWmD)C3^blHGe ziOZ@@3M=quCFFr{^q;>ZjTCC)Igx{IIC)-=k5h(4(_Ky196?oEJI`IOlF-4-07K3D zSuzrRm<8H{4EZGF50I57E57>lqFewrqW2o zR_v%}ygnV#e1B?DxdPIkaco0|xr90 zr}>phXQX}-k&h8RKb?bb$>?kXA3!~C3>vq1ru#VquS@4ET=xJ566zMHMR7^yO_H+P zFJdA>zH42xb^M67)v5}Z{~|a{2+EcPr*x=m(OPC%Zn_(m1cX_KG?$ywIZV>VIQDSR z2UZEbE}AZmPSuXgKNCygBH>IzQWq0N+CCss*tvu3KOSGqF+`8iIO;*yD=edwm3;yf z1)0Pi(t$mHzBzY~XUaRt2|Jz?#JD=de}0|L`i%hx5?ROC3I~E%iivm6Y^0C%mtkFy zJ^4c%B}murya;TkGAP4i^xO>tOn9!kMJB^KjACdWdtG`Br;N)m6qh$5R_GF>IxrE- z?lRdE*Emuxw4kjqip<5U>9!Iz8cdKYoR{mDmm|V7N&qR6;zhKdCE#!P#eaqE*(IE= z9jHyy2M(H3ZtZe4lJrw$=%K50UA~OZ$#5Zc9Bit~xCDt2%u(2AE0Y$Uwba*VI9BnK zFuARvLx>6tSO|O?4byt0dPV3S{qU&5kT*w&Tx@0~l7qs@G^PG;A?(4s86KmikSXSL z(yxx1>?SU*%(}YrHj`YlSK#-?np(1j+T&91ZFtB)-$L}vZs{Rcr=?KgP&zafM{|md z%T36u{9AmdWAMpPjn<`4)tYmq4J=ih>V7(8+mX}P%H8L9y!=VIg`JC3L31h%>pYJmrT$s71U!5!u;SN=W4eZNo-E-zGeJpq*CFxt zlWqcHEX=%%!5bC^r&5&&3`aiJy5$?MwWxs#14W9EPh;-E@!!&x zpWY0Gq$+967gFy9)i1&W*Cqu82pa90zWD7&YzTxpJNS?Xzw`JDs+84BRZL}<@}V%1au+pxijuU<+_ zjT&T+t@pXG&Hv!nssp`PcNrwmBMe3H@}onyO!JsQp@8x;-SgMI@nC|6zy zP;D0kC9XZSbaksIwYX?yG{v*X*a8@=9XATN<9t8XSiw^1ysxyYXV(5}l`rc3<2N6w zUHSbseM0BcG|=AtvDv#uW}?p^LILlIfYDdqN4#D{@=QdZo_gB|^vlLkB`Sd_0#Da? z2LIsPlK=`r0EJs`@6ZZpjlg`g_LU3O)?-Ck7m8?2y4`}U56Iy6uLW4|8pXv04F?21 zWNN>VN_eE)X7)=udq0sv`(;_N z`5)VG9iLBgdN6I`>BlGA(}@BaDSq|?p{>!`iht+EWH<`t*J7V}4Ocfzh9&+70IQsu50r`sJz0MRfAfl}!cHXn zVx&AYqe6N8C;B*D;!-2oyK4P%Slngk+lBNMxQiq_$KL!u4Xy1CJTZcRxymw4B ze)?5VQ<#~TlzGmgaZR|4*ss;F=-YpQxKHd~zv?;`Q`m^~f+V#{Wv9}a-9XV^<;kLc z9jxX*j$EVCX5M5xc4jSd-z|ygrv8N z=2s8cIoJ8m1r*u`4ZAS!&7>TJck#nu*Y(Z>PQjS_4-+ZXxIQ_4Fh`L+hn-PU&l>%+ z<+;PYwP(1%Ob6>OXM*UPYC*^pqUZ?9++az1CXNWb(oyF~U~utMwig;SWA*RE2(rTpPK> z&#oCG=`RrJJQKa_@@@%yViYk9?9a9<%7F`ntQpG7gIMZ;TdTwVkh`HLR}eE}CP3*P z=%@C{voJEY-yXe6De+_c6C9)I#p2d+XD{P79lqF`D{+RQ@5Xw6xEU*XQ%W1x~)Oi%?Ui zQQrXe(=~Ps4cKselhI>{q1q#AJO5QbF8Nj44VM`*)FG?!BWi@ju;X;+pT}h@nz76ZyM}84iLMdYXI}9god8wGR|ix zGtnYFtlhrisRqwM2l;}da1d)s2lk`wjCT`NlO{qYXT57_N&h%dWxL&plrZgZkVF&)!Vcq9l@kFuMc*0VffO z`;x9sMracjR&I%iTZ;pjxFezZLdbrEM&J<2bjKY^7%;=&+2hV4B<&a@50k;@Tl9i9 zB2~zE2td=H#4#`;M`)D^ z=my~NAY*e11tX_deTz(`l6P-rb!+5X4K&@U0+IAky zj)i1Y2IosD#)Vls>8-*Rs=Q-$oSpdu_2{=@K9Fn2xKL5U__=PfCD{V8JzzaN^qfBs zpT)1uJ;tIOhI%l&+&=g()gd; z$zNF7J1@?^tw|5~JO=YwS@6wUYyrf#Jg-Qj^_5r(zcwDy47e!Ds~_VHEe&FKeaJl@ z8J=cFT}$YM=GCxnm1CEw*&>?r^;?aplu@gZBUUQ>)R~GeJfKsMR+##f@QXnijr2tA zs%jw@eD3kZa7~SA{O@5dyWp3JfHU5Nu~-2`8SKb&IlFEt@10zTRq8dyizcHGxAcvG zg@_Aq?I&tV55%MDppDq{H;ia) z1xYvGz#j^5dww+xhTs*tON5`dfKoABT02*PXaw*w%4%PLt<|qhZ>Fbppi^fDUy34L zuPJZGd*wKvfwp&buR~GBh*-@mc|xeX)M3RGEE$;qo`xrqUOV*)w^J^MUR3v`yi<~k zo+;+aeR9`A=$O~vYKqM|l-TU{_?>D_x#O|RC_~zo`WGWYdG8T4ze93WwDTGYJ%Mk4cfB=ZUCsFtuQQm&PT4ktT`dnVRmv&357oRZtQNTM-*YT3hlq>OZM% zBC8n(Xdn=chLY)xjtZ)Z*A`Q_sd*Ip>Yp83uGD*$rV&n`4b0LhuKLNBB=%eZ`HyO$ z6c#OGaX1xtkm#x-(X-P6FcbPR&LdofG|iA>)r!*Y1qS0#zToo$_w!|Zrff%6Hv8~k z`&642-U0{qCt1MWef~)Q!t>-9=v)@exFls!{nZCfgG0Z6MtBz_B2@(6t58V0yZYCUhEI9k^&|rwyVDW52U*nNNpNy<&JybNaob-R#8+1ih@pp*eSK zrre54m>ijA`;9r{&RPF!zV1%-QO+v(rog3QXV??zH@hj%y%r1{5|URB45vbEE)1V^ zyo%uo<-m!I!)59d-2Y}T?dO}YF@|_1`X1TnTE8fI0{cYX3MPn{zg{kBiBHF+ zec!b#RT**01rYXD|MWpl-#T9Cqqm$=nlUMWZ%^G=o#4g$KIse+eM7<>`P`zSW7hWJt%GJIVr?9=f`ZP@Gz?Oe3YfA3X$-fk9a+ns*DuSmfew6!HF)o~p zT-3b^|Ls9iwU?OVYo~ney}XHZT?mtly{|<^2XDcCEF$;ER7PVxJoM*g2V$(#dhKD~ zeE8?Je)#9y_>-c%%c+aHx@VKJ$|w*qqEB^Ys)9!khrf!>}GZ(|6BC}dh=Px5F)hJ_Dv!+PR!CYwzJd2)BYde z{PBR}<@f&Q&2=<5OwG&tVTbTnb0d9H7?s8m-O-w_L7c0K}iuIsmwJ3bX zl+-xF4m>cBw*#K(u12J`C_<~-KvU$Nm-YHdT}n2PmVh=EGWm|t8nfEt6iq$0qeF>LKP+SSd%*XLz5fBudK$?(1d=uW1560i z#PR*ompytKaMf>_J71UI5-QP=ardpyF6+q@*4e1yE9n|OF+q zPHyaq#yti*0+4G2JKTvjYEACXV{>hY&`g;e>l-6AWgTiWP`Y6vrp?(ds^dW25^PV~ zv@lS-fz2-?BNZy1S1CD_dVoWqT3OXNJXYP2!|m^rEh`e4FV5FMN6$DJ-s8Z`4;cvq znU{%Q-pejTlJpTPL%?$X!^44TC`1dGJ@GKj2KK2SFGgx$W4dg>?dt5X`I#lhI0-e3 zVE^kgQ_6F@Ze@oC9VtvbmeD`Ix)lM~a=~qEE4fR4YoC}>J=K5E3WJ@4%d(tqfE?~) z@Z7DPj!D8*?NU!}Wy-dvSgqIj_!JR68iB?LlAr$gs5c8S3116g01#2+><`ze-3;8D z^Bz6Onb2^vLJh{msBiOrnGp{r2lXGmK=Zl-#B%%OwWM7>kf5Wdt`%mY(Nyn%n8m$! zf^nkK=eUPWzIKuHM-KCYdRB_NBl-fP2QUh0B4Q7FbbTI`qvz0DDZG>sA;NdKfpC2G zw*)($=5S3tM1+J`)S=bZ;wclXg~U+L2#$_rWI+{LQ(@7F#qjd0(y_6gSeBMB9lDVp z{(_%5_QT6`x3qE#@DMB7txDZYxmBS`xw_Mto! zex517@uJ*0D=W#d@V${et(JaSJdyx+Yo8Kq6g!mdJtZ=b+|BWTd_dCrfa|eyJtorO9s0PeBq(qx5?5euV|z zL@l6S@(&MFw6p#@r+)~zxGyO?hRlgx{8c}tfKPrzgP1O0!k5t6H2d?G;Gw&w;Gtgd ze*j%0#9mHT6|sCvz_B_^>~*B#@N;+W_!<#>Ou&XE7guM5DX$KdJ=mO@vNEk{&zT|y+mZGM3>2>d~3qQ*OI-7afHK6>W#)9<)r zp|#J%s6@(K148Yjh~WIc257&$S!}6{s`H<@5Mk>tpqBO&>lf>cA#91$FmCU=4|o>8 zFJREyHlK!|6lSOD3LqY|+%R`qE@)6YRv8(KQ>)~>!X&e1G#Fhfx$@_^3RA$ExU` zskVfy^-y3RL)e6M(#>1!6xQ1p#Cd`yOPB4+Mi2vm$qrEg7|9aHz9=fN2;md_JC}B$@C)z66_jX)nKEjf zZV)@F6Us;XCGYqGV9=rqp@((34zkB8qDfV%1uW&LKkxY&`#JZF%PgQorFkHF{ooC&-i!MUuCB%V295_%$5T z8Y?L~W661%Aj3M?p%jaUn+yQ&c|x29O@XWdVn!-p*IubZKrF1wwKD5qGU{wgsw4VL6R#!#w5r`qz z>igBmG_7&MeX~y_wEqEIPH~~m0BQ04LFE7RR2e`jsCKz(;bz+%U)R{j>~1s5Jfjt+ z+>`}4u-Ot6IkvOOIOIWX?9z6unJ(SuC!F&Oh(@u3ZHZiY@Dwkb>p|35I^MF?{~7}iUW~Svb@y1TlC?N1R4pJv2OW? ziEhcxHZSV)h4Qh5Je&r%g5Qe2Y*JbIST=uB;2&t5A4k$qw_)1le#@LeGlBwm6AGbP zlb<{ami{uNEJt`Jg~j~@0v%|p3Gf&KoO=(~zS|X>ufCUkTVI8*y;%>#1-dJZxq?k` z?;}%xCf}V651AH-30QXLE_Zo_Y?{d$o>iB9uI)8ixx^o7K=gZd(zh!7Te3z1Xxcf7 zSDd@~kWXPm-b6UZBPg+SurA)@J%#B;{fmst*7n8q_|P=vx1jY6=Kc}H z%yo*J&tV37O0t}Nhymq2)RT6VcdA}YaVu;8K5x5cn_!T>M0gh67lcDxBMzx+AQhp( zV^|u~lg8oTz1#BPWAjUQf!0v?*g#ohecwnRm-1?;;fvkC5ygDoQ!b=jz@u$mRCD?BMExxO8I>Ex@M^H#4*lD* zP&0Kp>h|c;vr=Ys&Es#w!1JFCgSA)YI|ODK8bTRJ@0uFfJY{YxsA2Z#id#hNm-XbP z^d}yxjZ>1QUM6tK;5Q`#_%@T57uBF4d6T)_wd#?Zw#{y5OQl}?h~z=n>_wu-v{vuf z)#rl+TKwAN=X6vK@c}1D_LxxT{{SA$w4=Y1+(w@~iNGbPF-PYU{lT}h_bZx`A)NEP zr_}!ef@&&MtELpd5;kWU~R@%p>9Q3S;`n}gJFeO^8jvW>t7 zn`bP!(Gilo>TPQkKr^i+YkNgp(bwEvb#PRe`f0OhY60WuvH1&g3CGT@0FDKkI9*T7ti=sr+-0F^A0xP`S!T zM6o`2qx2u(-%#y_g~>Itr+EK`=C(cawwGY3_2l_|@!gIwJrPQz-1ftpo$3F9_5f(; z22Ec^1|SyXVWePzm#xB9V_QPNEhU7XW_d%yk|yc*o~iEG62rT# z)?fpgkzelp$tlI{`N5(RX=awxLhzMV70%qF=}q071fT@d z0`6p|1Z#VL?1oz2sRuDy1~k7=Uqka?UiX{9u6j9Kd2c$R9b=`nx!oWyb>fHpmN{Je z_)OtdIuY4YuG?m_#hCMHRY4|YwTGfYI9~R5GhZSWm2813_nB)qTAfL<`rxdS?QmPD zW^}@bsD(s=J#Al7Sqh)3#u~+UIWuuTz|;9|=+AMK?Q4d?!fTC2;GX>m89gp}l`BhnmF6I3X^I<* zHA4nE9g(h_*m%4T8ZEmGbB|}p^_pl@iKe#Wq?@}))zY!v{f$yX?IZg`t1Z8b0vIhm zFz(U8qBcwZ&ES48M}lC#CiWLd9#wVRwQtlQ;gLjqudQEOVu{%^ByF|-p%mF9NNYeW zTwIP)?RF4e-2$AK8b5nR=bIAMG*a^i*)UGl!43;HA`x;t7j#j`I95xC!anpD-7kzo z>SvL(O~Z+Zg&~#b|9{O7CK37b2h68DUT#_?W~_y(FKJ;cgDOu$Y}p)?TzSAi1j-Gr zEFEVg1VO!#wkoTq?N*zl{lGqT69O+)fgUe6qBXu*G1+xn^@HsqZi4^KLkAcm9Rvo= zxG=wJ9}VISF^Zyl{FbF1Tat6#))rt2i@heS!9j~nQZM{ileyTwAN~@=8;^mb1MYrc z21z~D8xqr);+!)%-o2UKX$_3Bu;#r+)2Yr8xr`6GlL7-1F+zDgLQB(*s>%{>p=Erx{vLER!plL~2W6n!&kU_)$B0*vX;}|G`6#~|awL4;sHm@vBsr7#oRYb{_IYcZ z-;`?6yXj{t*in&^$hp)`9{g7UXs+zM5XnPXmH?F_2HEH56aG-39%56PnQN%nG7(G;&S85TwzyhWAq%o45wAuc?ay%}^uR$M+Q zF6o%32kv*u;^?-{Y%kMbEPe?H<)bDn^v7F1WdcWsI)Ihdl)2o~QtH)GaU!^84Xy+U=1u^bv}mQpo0Inxyi~K8N@q)L#DQ2d`{d(g zL95{&po1j?m}^rdj6?xGBH=jB(d)Sx%`a$n<5bph@KJMK;bF^j7vh z;Gt;t9MpZ>)|PURVwWi-U2EjGD<8LLKuVTTj7Y?)$Uom zs!VbpN&8SVT&D;9JcZ9;XLB{^ti^Au*Cn_Tu@p0c+HzZMv3fRUKqa|dvLi?oJ*6`WEOW~-Nc@(pDz07mQy9hoP>f^PKlF@}NU_dyK|D=tqW1@ix z0MV8Iwv4LR$-AoZ4T98i_q+jv-};7(V3S86sCO16vJpxVBHJvzJoIQnor&y#^yoSU zdfYTM;uA>~aNOPi^lchhk9m+D)X>%c+40J7tB1AMvvyU^iypDI>rPJtUTFRqoJA}! z*S=Y2(IFZ6&rC)n_bco_z)Q{LmHIy!j53Yq-&_lYyX3=mSPyU8-dMCPOZq4rJQPsv zyd+;%O>Qc_SXR88|JFMA)^mB&ow6{=oeI`pDKmV-*x}@*!eaFA&GNJUFG=t=X{`*D4N>#}PYPUZ)QB zNWC)e7wP}@PJLZ_!jV!1DGa@W*mY=dT~Gy{mQ6{_#BVDYy>oWp;SB5E$qPfuIeoleW! zFyy@2oKJ;C*L`TG&X+YuxXWt!Ie6pvQWPHBqW=zFAvK^Lq}mr9UoI{h?Adl=PFvRz zIEs>yx;w1YP^G0I3@zKd6+VJU)r`U0Hx+C8g=-kn2eC{SE}t&YXayg|`5 zdi9DkS@ZYv;r{@Xx9T}VORNe%?%f?YCH1=s>4UwsCp>oS3NJSq?+9A`P%o}qLITzv zM|(YD{CD!Ze;anyu%(25ioZc#{j1jg5%kZwdpo@Hr2&95w_XOc=GlapGO}1JeOEMH zHJR$}`L(kBEzz9b@oGz{?}##ZL;;s_mRQI%dr91dn6-S!WAVGJ*M4WWFJNKa7-Zkc zV~X0%zn#4Q0%dqYd;sR%-1-mjS1~j@Ge%UhaWWKl@AJsW_?`Xa>eGLKuZMl3!cr4; zhIKbi%#y`d7jGWhU8i5@Smi+7omo%jqA|@%pr1rTrPpa%-sMdYg$qm)J)`q&#X(BT za+W^`d|F2ic*~zOE{u0BFv)>OGr}vU!GYwcta&LMIR}l(Xv9f?xsW=#5@QYUjc7KU zzB2${@!(v7+l`O=eRG8FNZQzn?EHeK3XSVqwwXsGH(gX8Z0-9c)yXQQ!h`P)n+Qm& zGXX&SmwRyFtgl?swT8E5hRW-_4K~nFm3&T!!`~A%`#Hvif#4v;A9EU@Bxm2PZ@~qj z3(RH_((;ated8~c2lSrX?+90EbGT1b6gtv5b$aUXOA99G6kV^gxcP1cOO)^CQ;Flo z&xeOWN2aFzsA+W3!Ys?WF3Ge*c6~Z%KkcbXMeIKoA|>8dkyBz&ma_Bp=*pW=yV_-5 zhWsZLnTMCs9j6TXcin@kJ;$#lIa}Xb&aDY1nLNqdFO zzglK|bniS+ZuFP%wjRtnB?zA&sD}S_oJq2W%(R{nm53=PtQTpELp(n>*)c|XS_?hT zM&K%oe*X}t`sQbRpqIzW5LS5!e6mk9-FU8KchD&=mcbB;9sni zg%6n3eJF&?ZqZ@R;iZMN2Q3E~5~clbMrHhwh?)6IxGX05S^f{zXqB|XBcnfmrf(H> z+gd4gjxuLdJugh}o3%Zz^OG1r9{m_2s!EE&*($pqwQyoGdxK zpOfKH8*M#90e)Mz^?Y^%dP|;esJ!(9p@DceC4mMQl7^(}`nOmNH;u>yK%t7*9VkE| znTt*XHmlTPZ7Bzq0$5Os^GBN2YfIsLK94c?ICFIEUqaq}%Dxq(e+eO}Sc`q{<*?d* zq_xA*4592G-o6CTx^FoO#wr2y3-`+>NR$4&cXP-RbdZSGJS-z!eQBv?8;|z2~smI5(eq#Ce|teqK+>cKbN{VmjLD z&pSn*{93cKNl)Iy&iJqNva%=LK!}KB{mPvG_mPtIRz6?!@d2Z;HT4rhhauuF1mBxT zyiT`~#SG;ER~#I-Vyc$2JOC>doRyVX8@{`BiLN~XYa06!8?|f_Y`a+uhoWYEgFAWu(FEGoWx>#JwfYR8 zqnb9ZkZgU(h?o)QL-@4@3LKky>fIy?5ja$Cwg4ghlIbJFcZb_QF_0u;g+t;B)1h>A zYZBI1rO!V13S%hbix%+70%S@DuFJyXDJsl;QnB&0dAru!Quh~@Tz)LD0SDZy3$JnL zhxMIM;qMnnZ8j{GKRq&)!OcNpFqumvaL{H}%a}^Cs{KS+YBWdKua(HHk;TwrN_n_N zFrE$;IE9klGV!(s6rkD(E)+EgTwepZ0r-IJx@b+(^6!9!Jb)Q(Yq8PVq<^IuS0$84 zB9W}!Hh`x->hA8)Su6G<1?}CU6=p9L4p}hY=+tf*xlh)@NpPbw?RlC3!v->E#!cy} z$&jV&7jJ%O=v3=CkSv}00a4v4nhYMLKa+kinFKj~_zj(Ff zM>A!?bY$uz6e)T5{AT{q#a!IwQzS)ho^tXllu0?e|%A(3VMzKRxZ`uwZkqnJ^39(H4ebOGVZF+N`MAh5z; zihRmN6)i`6^SW9+#+=Ncb`h-{{lgO{7>`VjM3+pTp-Ceos#8K27hZ*nGKfV6_HA@V zDMF}L*?tOsM3eIC%d#4O9HPRfY%;-G5l>->9A^e?AYBwJ+ql_OW!Yav+%tVJVCH>yhA9e64vZaB2Z*@!`WhlGOL9ohQh2e@S81*#NVE8iy z&d_ryO-t$6#k7cj3zhA;s%s7${pa2hU{WrebR#w zJ1uFg5ea=G(>g@F+w|+tG@Y*Dkv^z){_MS!XRtqMhrOdCm@dnJGch|m;h(G<)eK(0 zv$H})e4AC-mHz;%F5t^2`y##f z6qc61>p6pl8XV!$J0vAUqRm5jqZE?i6W*}R-+i0zm*##+|4I)&zKGH9Jn6Wbz6!n5 zOt%cM2vL0W%S58(`x|+so@J3eF|k@RHz{23;l_UVr0B#>>p>EY6w))|Z(WjzUBOO+ z@sp)HZ0pjB$fxJTZP#WMA61wuEG1smt*Ul}UZO-@0S)1y!lxxeJr6qJV6FDo*95YC z^57Qwj_KM~sx=2&zvo_8HQOYi^jCpL$s?na8nYfq&3&CUt|1sKh>PvO_>K8K*l`h* zBEy9oA$8T0U3n&QYiE>2$_(WCg<<;Q8|l`2zoy{UA(@w=i8xmM^R8+M>Zq49Z( z1I~f{v6rn%8=FPE@OV~Q<~^E@+}5GR)LLBpkHX059sc2Ts=GZY9a}7AS|m#?ffFn@ z+#*)%N$ur)d8I9Y!5e^uyj@a4d!LI)U zWT#HLe&L%d$%9d+DN88vng;a61MV;UxqsPc-3d+?ys?jytdd6dnJOKnG9dO=W2oZ* z0aD}}333krsdH$YaQ&w2EMo&$ckIT zgciX7m=nO38hq3CPcTGFHfT6!ok(}e>aoq@wU6)FWP41D0EVI-B3XpyxR!tMJa6?+ zyL(wp*x5>d=;O)YwoeYvw3LZ2>z=tiBH#?}YYQoGad?_N-Tt@UI^XUh!2()fuqaRJ z!|&)V?=JoO;bq6SCBIwdzV`Qn@4CDMA8cp9&xb_~rrM{d^x(f>u)?N{;w~$h1Q7eE zTmuf$(?U#993KjPw^Ahj@nO755VVP_nP%pj_apgAlO7r374Uj-+wN&#&GG5_Md~Md zy_wtB-iZ_V^Y1yg_`zQ-dAkzlv>gDq@f#oE2&SyQsJ86?hl^)<|C9k-<;Bybk?jJxO zu4h@UxZBfftd(oV*{`}UZ7cf;+!2jwfC_`4L)Y3mUUfErflkpmdAr0xp2-ZX*K`f4 zz~mX{vtMAEBFD5+$UQ{Dabf9MLS4b;s@kbqvO{acNY|H=e3!JIEGk{gH?y))C@YHy^R) zEWdzkL?u1W#Vz&J_^YvwQ$$(Be*0K{3d;$v#9@4P!DV`Uy4s|Jvkly`-;_v{DtaH& z%gk?udy75GueL|P{JDQ<`mNSGgUJPlOo>9bjNrTzcAKpE)uN5`P27HoADf10$iv5u ze+YCVlr`s!9NE~dNlsPBl+7PTL@OYo7;uG7fWt;km;8GEkEpr6q3P(xTk8rT~gQDhZ2~ee%CMBmQ09VLD-H59f$_U!kLZ*q>@N2)wm6 zLwfSEXnVvNhYsbbczqN2C@az$lE2%b4#fw*8lKTS{5M0gRy#5pQTmvOb1>i99beDSM1eqLdNx%BKbsha_&qvXpD) zhm87enJF}u+F!aN){V@>9ojqYcKUjzz`Y z3?&FlxhUBwVW8^zXLDL4<{qL}>ZZrY@4ZP@)amsP#@E=BjEL37I!NtQ?SEl?`!FtwAowRG) zK-m*3Pd#5sN)(ZS28SF$`lAYq!0CL+1{t{|-KzNm2Jp-vzHr=nmwkhXsPOC{XT=C$ zcAE?{VZX$~k#Q?c2Ybt}7XSM>Q3RoWypvMKTmyHaKi(7FhcBg$)E(g^v05Wa3!n$w z_AIK>c-m5cy&m*ouClNBq>Thb8_mx030=ibg;urG4C5((R`S-%mulrq8PrU#dy}ts za#?9Zqe|(37e2Doj7}!UHcq*YH06Dg_OF&xTA)=p(nwfvlv$)CF?V6bKV+Evtp2_- zoYkvH@em0PPX(a#@uFc9-!X%FGx7Tr<|HeF(96#tGo}QZ*1@jrlDr}ggV>C0r4nUS zN+4>qibU{R<-U_dBT+q*cRrD$$uc`u3YeZ8J0z7)73jM5E=CS~@Bbm96yf=r;ALXx z8Cm=uym52w7@-^RcIU13mp3z1VlR|VlZi)az`WFx@GOpeU)S@Yp!1j;&8HNo&Y@=E z8-xD<-2kI>%Xr&iyZua-Ib%l>qyf=aw_ClsGm2Fd#mke`oNVTR2lORf3L?P#&lyfG3r(DbmPIe$NXbs2HyqeYX z;vuo<_*-GNIDoW)`ppO8h|eP5$JMeU%Yi4O~StogEPwW0jQ!dlbM zkQqA7oaa(XI_EDLdA9Xgv=&d)=BBRFMt+ANKFCgZBdN}D6=!qRyRd;bWYsCq!{-6k zeVjX{u>!(s{3RHd)m)hy4lPunyjs`>nk|AaE02|h*kGtrn;@+=@_}e~T=0n+HcMYA zt?8#Ls_XrS2te=W86tss9(9PQb!R?G&SPw4%3V#j(3ew3_P}^7T|3=}p7Th>kCu!B z`lu9GrtP;}Cv06tfrr8dR=+MXHQK>fFk&G2(g&U`D3+_ou=Nr}VeV8r!+n1*NK`{9 z(sZ&cW?*G-cAr(`3N7|f)ch(1_h$rHq9P^OatxCv<;$l57*m?6MZ^$h-lu6qEZLtj z&fbDC+Rh#WS~kydSM53i;arp(;Jg&HO>8uzPsxrfDeMm$-){%=W~kEO7aRY5O0MQ6 zG7p0AKH;GDFEU^pdKelVnR^8rK%zAvhl$C0VoHz)-wo4@Ab#kPYRclq5 zWK@5N8i~GGAjMD~uw>G8fa@)qM}WPa7||xt@KXA^U~|6CvS@Ez7v4lWqhSzI;C>@` z!8gHE`M_Jg=p2|o>F8^fw!>)2wTEukCj6q8qB@Myd^*3S z-ax&5rI(s+h5sL5ndVqmlfpt(g+ge9W_Y)K6-D(=atXtNK_q z#o5VuiNVD^fiIJYyPrOh${ad`&+gJt)2-s6RXNprpb23OCSs zds;=&(jBjrGIccy5@LlGBwq?sA6x!z-M+YaFio198dB!)@Ggha_x3-4r=-%4#q%R_ z*`LSg=J+NpxC232IspIr_?a1MH~i7i*MsRh7P8gj`iN-_l>Zs7QOd2~*Y~h&z65^b zRWEbzax2pMb@*F#sbDE4KGh{DVSNZ59N5}6Ubo%HiwtYH340m}x{$fURy+8{8ICJy z#!O!@?%zMYI$Yb*r3ELw8}vfGd*Ms~kW6tb{2Wz4RjY#E@$_DS-Tf=QFSblwUVQnC z{QTSR^CR4=q~bL5)%5T*?RMZM>C?77OU9xfX-j6$wbldG3ZkMWHUk>+b^AVyP2;;G zhLn#q=|;{T%qvWK|8&aMv>zOmv)4Y@^jh{S+V*OWgcj(WwC+hVd{q-*7jHg4yWRAzK+39QY>i-9zqp|EHJkE#l zR4k_AXoM|Hdrlg3>b=Mv>ABq4^f?*)9C>2m5@~9NCX|!ug-nG&`yiFjs3(OyFzQ5u zZoK+B9S$aR3%!SiYtAP()kiSwXsJ4pm+qhs@&$jt3K>f08xvbH5+f62qY#XW~U zfCzXVDw#QYxR^NTqz>NY5m0$BqKea6=%7B-X?EJeT3F?Z&8dw0H> z5$wyQR0!{E+AbBGO25%fyAxVWA<1NxgAQq~1%JTG#g~!f2mM8>>cX-)d}X<4#1&YY zlAF~~V_1CPe&Oe4>5mq3KWhz66L^Q z#}cVef_u_2DX;;j?zL`csn8Ns#UgHIxalui$5g}ZskRg;2_TRa0a2dd$urj`#^tuo zZ-T}@o*pVgSvOnAWO>uFz|FI$&8BrDJVvbm26>&1_46YGE76yre6#cVEA~m_ePUz+ z0v0E5z37RmMb3MXoge3uYJVwpvegk^kXUrryS-RA4X<8Esk^MHk%JR`;WQeCTX`D$ zBL~Rhh+l3*wJ;ONUva%bfb)gmI87st#HmYSZG1FeMaJAwLx==OA=lNChY>PL9jNyYSY1A49s+2ip)Qp=oyZWA%_EMeXx#8Tei*k_Qn@4x zb~;(FQQ~qA?MmCJZUT}<*?MQplx;fX44Rq&!eRSA^%2rT&-1{x!S?mL8pPaThE=%Q z;|D5%+>&X*0NKrJBPumA)j2n?4f^U-CCOV?CB{kKFv?(kO&nDkQL#b*@g@#g)Tm9U z(e6_KuFO1OA_y+G<7TxBD*L1EbLJqg=B09M#H~TeJAkonBo5|4&k;L%*$9%|hDUo!+@SRNB80!bDyKhn%9eBe(T9q7q)yEt98cxPXHNYNmfFR|iC1u+? zi3?TZ9&)$fX~`~rE14-pYjPf0LmT*zG)A8Sz| z_I~wuvEVxl?KKw-rSN)x548%yPt1&AKY7WzQ|y*emNRlBraAp3uIj_Kr{eO}|G@Z3PefL`8jeUIps>?e@v9^wtH#-k z%X4!O(M`*!AR#BEt(5}Ab2nb`X6*(Hc2IOoeGDrx!ddJ*!m6#ZU5Q zrNr6+ARqi8syLWjYo2NOvHI|=_Stq6K?apuysbnpL%<@d@3@~t69;v^jSQVV&h^na zd917!Zw14QV;oUW3N6B1^v$c)dd0krDc(R_}8PZi|N;IIXv}o6KoIoEyq4om!=~q5ow2_Kx?=O;{ zIJ)r>MX86Q1LvS}ZQ~iAnefJ^`CEZO>#f%R$JJW}wAHoIy20JuEw~g34n>Lux8QEY zT?&O_g#yJAq(G7265OFs+#Le6Xp2jMLUCxnlXLdD`S(@sR+i1V-Z|efo)JrgFnu0s zBVX9UhB`(42#c1lhPY9wIWbqx;=0Ds-vObKl59`xb@c4up~?vO8fiRq;&f`$Jn}yZ+qPX9YGc)tP4!LKooz zx)`v5KPoAR}gC#73OKlTUf?oxlvbnhi+Tcs8JPa@0|f z&rTE;ASGEPYCV0+{kcdfLw~Uo5DGFEr0w8cCrCaiuAzDrk2L<(b$X$sG%I89$);kP zsixU-{Bf`m|3f(#{4>J;MGSWM0(Rr{kkdOP0C$0_BAhgkaMj3z-haqrR&pcK;sgue z0(HPEh%igd3T?kGq29mY=sQyv1NoKOxkpl&u#Sf6#3KnMqUw)(s5P(;wY5O=KXwH zgCznBK-9h9+zsKtY~9{NMhQxz}3b!r}1({dsOe!RFeX zUqlH;pY2+H+$MHPYQQyXX+cL*-t4$yrb)6KhBHw|>H zac9@3A8e}C{L-eZnodY`KVNUyG-arQydWZ#mT{ItTU{X&mK84bRf)NG`0SM77is=20zw+H@zmxQxZXACUEcHB)GW~xAG`s$2qb0*ZWti0^QicMOIyN4K zI~NacGem}p*Sq917P9Q!*3$0y(q!IQFFed%uUYVm0o}oX7A8w{8G{D#ZA;4Jiq57? zsqJ6x@fS}LV%`)d9Jz|38Hw4f(BvP7k?zUcu>f-d4lck6=t4Dni+76${@lu+!6uJo zpdHmV4Z~VZEviX&N?b;k3iT-}t)t8!3-wSK-E3H(xH>U7Bl}fs6xx|umtk`bOhGLL zt@n8eZIKJYkC4Kg7k~!TF>jjHMxD&%&%3my%9A(A7y}~K^0;v4u&sc_IB5Z0 zXtf%eE%P;o>+r9#@w$u?yUw8ygfgN)5qtjr+wBOB_HXAXA_nR8XSe8y7-3dDBZYu< zN~!{HL&zl!dnJ3EpxTC+*X(Hr$`DmlSDc!r=r(|7eOeFehk)wTl zk|l4BbtQ=hq~CI+@#5=0hfTU4kLjx>mWFt4R$*w%g3Y5fl{6i!%l^IQYXUv;15 z@|uh6ji1H7hDZwf!sbBDOqgB;@#=RgETiEPryrl0#-yB6`XmnS;y2b`F~h|*E=8** zr0BT2p3ZDO?(4-GdKTGn_nIMxopW~GWMFUDq>h40l#%y_Y^{DjbijdIllNO{MYa3Jq5h(1_G3%I5rVyEaaw)f zjSpKU56_J~Mi@%<+CiM8R|^r)rI0gS|9v$rk%hksY4HjW8D;i z@J5-k4l4pAl3{6ziojx&yszYfH*FntI$N8RbOuAEhqi%$#y zZ%Q?qo_EU7vN*I`x(1fh!WKQ=INFXc!;~#lg z(bx4XHu+6>b%zW*ExB@NK}=K*N+bA0+Q=nfJlkjniL*t$Xm47LbS}9`;~4|EzGNA@ zkDbacbEmv>%c27vPuQs&{Jkk25%C&pI83A}5ORhlGNc(Co3Aut(~zU%rgE`qY#VI- zZ-WDiBLU3S5+{9`zx;upf~$brPM7Cn36H*-q+|SCpT)=M;!K`viJ-_h)C7)D^&(uX zO%8-ZaKiI13`o_Yz+CsJ46p}CvWhBmfeiRxtLqLgh7OQj%{o08Zo=N2zDJshN(^pz zFj#gJ2GtK_DarelkzAClt#y=_JzzBrMF`Dj^}*1%ImfS%ZmB(^xWrE3#}O(4;)foWicvS2v*5M~N# zp*FRSB_n#cW1GlSoab(0@bEzX4egjFUI+E1KniWTWCM4dc0e_uNOD7Y?CHG(g?8<8!HzFTD;jEGjFsEDA#MMk(I=~Y%$TQHFmKU7ga1ts>mc4OvfEW$K4`gK2qvT-DJeQlz;==OeA~#bC%+Dq@)n;afAL7uksz` z`C~9M1y?K&Xclpllwsm5W~1@6FI91@@ucg5m6M-aupNbiIDF zW<^fIhF@V)xwMe9(?phHo%tH2Nq+sly`z)jons5H_75S2*=R;lU^8FVBy7yPw6St! zEnp^9W3+QNd+vyTCF=maHidz(;B(gqj>?q|1D#_(i#dOCM9&gB&yNE+y>?Mz8Kl+b zgnUZR3+gzccvkOsH$ka9>oQa7DRF?hEmO5DwF0}7S>?X)8=SFlvyL>dzsNR*n1=O=*w z9XTy>OKJ99wOP565#{$un74)}TxAxR(YjzTJd}uzHnmpe^{*)?oPxMkDS&&w7>KjL zHzQ>nu53cy+SEaF;EOoepL2NRj8wfFBPrZ@d;eE zbBCzLWuLFew6c!K1QiF3pVO3l$h}tWBW2=K2fZ6f(5>G*OV04hR^fCy5V5GvSj*?qGWl} zc&@EKBxBXH$oDjLCyBB7VrF*RGgbQIPAa?Ld~w|?u_pqZ@~K#c>V?6>)0Nr%0PK6_ z>rP373A*H6d<~_oJd>1Iu|0|Hhk5EZKVLJZl|`rPYLrY-WD*_`OV~d2O-4a$-%q9$ zf1<^?F`_=XQ`XDL_{2_RAuD_fw+8DN7Eg)UC;kV(2gZm}L+Dv<7$YFUsu+@dYEnu^ zBZL}(uAPrs_OATE6#5SaRk7(}D+xlJZzDVvh1PjbS7*?+{F6w9-NSR4dFDX&(c@3_1R|dS5&#YZ%>Mw|%k-}MLHi}eObO%v0*#Hct{y*go5gzqtDi65^`Oj?x%>yp7N0r z^JUn{5sxSbuQJO*Ua%BNU4?Feh{_u>2mJ+fhRQbV&=8nDZB&^P324eH$M2X+6y>=# zcnJZiMT;umT)vtU#^y2LqbEZ3Cr2VPTh(jT(ZwGLCO`VfV z74hS_Ihgb3(M0{fNDAMKDOsVS5HQ$ThB= zi7G)56_(DV=L5nZ+L5vorgcG5M#)QWCxvGa^dK1uevOEyvFbU&SHA=+49@ zGdF9wsS5F$J^?W$CW{&F)n2?6bKvDU4)HaGGvOqikoOn}e54h481+DHImAvY4xxR` zzb#c8l(e&5RJ*^>6bZ#gpdB5#eKf(k#=Hrdfct@()Y0@j>06>}KVX<1kJ&5I-F#YP zXkKNXUVy=WhH#IdL08bLD~XWw?z3+SCC$;>>)Q&=O3PAD#+Z208nWsDdQx!vP{VPI zYGfsA-f@-@c|hRT;1P`cg@$VLR5^%hE$nJbarm`@JNkrQBc_AWrk`Y2LS%*0(VZ34q5$+7KDfMc`x!8pOvZYnd%I2i`- zwg#$hoV~+09?@7>gMsd0ga1 zGLJeTzU&R%?QHC~P`G&M+SQ`gXBX}&uf8nEkJR6j3(8Z`4MZt5DfE{8(qye4xqdKz zRBcs8<1yx?3wHd=BX&)bJiT(|AOdGhIaw8HXJf_2@&8IBO$}v54g(#XnHZ6+Y|u43 z{em&*g<@;ec>e?)9R}+YgjOK$JroeUkov8nRiiS$EORuSq58_BGEuuo%>5KbrJ=AA zMMNq0tss#nw&Z#_Wk!GYa)vfei}1&8o=D~{1Fkp>1eCHcSs2<&1UrzTIBa?&cKs>Y}l)WthdAm(V<(?CTh95Q3H^7 zD0hh_!_cw0qnEi|dc%e}JgvM-Vh&UO=-DqlMV+vPr;DP2Tr=>c`-wMP$2342Pf8zKANLz9JS?)@LeWZ z$8eBAwM?LA3w^p<-|qhW>qD)6O6{<>QNpkN{{RI6lVVR$h7+%yzg@c|FE`{gjIn4L z7%_@&o>nIn7$XVkuLf;XJ9P?*i#}LRJ{w9}a~*7&%JuuBxgwbori8v<*A(>#HlNI{ zgl$QZ=urVLY|6@%5cdzM1JlZB4+p8?vOZ;>idj}dd3G%dg4V{Y$E7x z1Vexdj-{~b5n2bq)}G*uxwVsu;r^1S27>k(xgE!;-p_IP%j*MrC18aPoyNMEwJ&tiqRWU^RF%j=2JDtQJLLbk2A2f_Jwi&p^&k=oQz~yyU1L@YBaj@_*@EnaMZy zwJUby{CI}C4ja5^X$I?5<)b#>H~Wino$MzNJ-^yB^BBt10P=})lpWH3V}(&+V_}mS z_g-&k{l;;?Y%j*J2Pu1Aq*glU{Z%vw-KR(n#2iZYrM3 z21$Gnq#dqJ4*o}RP%?C&m0;Bx#mIu5oH{4$R)QT)Wbp-U>vK1Z*ZysRcB*cPaS#a2 z?VSFHPzfapU%>d~c!Q7pl&2DspH14Q6TvX-|uFUMi8EJf!FX5VD87HZlQ!2SLCD; z>IN~J2jyIsdR#v8U@$gZO>_@lpfuzIJE1aZh`rm zwh+SYFY+J2cb@Eg5VkT`OF6N;dZul@Ip;jQ(~JEI$U9FKRf)M1M@fM1=4|ax=poM& z66c8T@L5O#XT29=^^?KYriUbf&zv(>4s+QV#4PWXe6x($8cLLYL%8eXPDx&y-n{9= z?#B1+ukB5BU)!<(qcR@tpAQ1Zf3>k6{sod-P@A_pXA=KS4NHP&m1L0_gI+(p*!qZR zU@egSM=I_*s^OdnZ|_h?g65UPYyqnNG})kk`;VP`(aRsqK!wwMmZ8{_%Kwtpw>vw&;H~jD5IcK^|MM;=fl3czlk6(0;nHh1 zDOwMw491udNM;5kDUO*kRli56m~S*H9k%`fe@|bY zYCcg*4YQUQbUW7eN~6q!b=>%`!l0XEdAyslJn-B2Zskv0LqIaI%*sM@RA&*1hN@tAJ0f8`8mvF$a(SknWDgABm*}_Gi&gIEN*NHXer+9v0K49f8CEX= zV=-otR4xMfDn(=1_w4Hp!?S^PuB>}>jvZT}?sgVH9b3@i$t<;j3Q##ja4Mizp z%egj!18N<;vJ?(BSxBz5Yt!gst7LU=4RAPsAM<*m=@uT9%z@O+@|rC=uSzkuwl*UT zt}>lZqt{^I$Wi!0Wg7qHnkNWwkv~s&jVN>_s`4^1!9z>3Z{h&gY|vrgaw_7{5uF)A zbI$rdt!5)%ZdUP&(w;lA#sp-|sLW5z5SM{IHa#sLd;!~t<1&2Oty8%Ila2Ek=aX&4 zG>?c**5nG5>Kawtn5C9*zKE(slGyedyr5%s04u`K!>_B<;{)n|hQ4aGw;&v-p~Hek zXhMXhWt*UY=6gV1;-AG7I%mGaz**?uWiZ{sXNNAPzi_bT65!qh~z- z>t`IDCvlBFoa~S?KA6jnnY9rkvJ#U>O4bBuvW}y(kVwqdj?Ke8Ot>C3X0GI>a`4CF zkV(P7h0m5=KP``wPmj8Rj+oP1I}SX9A}j&2Mr5PyS2YoyL%@tiIBnK`mRUssH~MFK zW04Hx*}Gij%y#hFukxve{R=dK3>tr;5Z|dmucL@@P7>LM z&w7}fjBOaukw?U{(VX)>Ds87iz^ATmYeyCl%1NQj?wqRl-hTNC52yS7LSwk+izfo3 zurDl2#@WT_Q^aE+)j>CV=lANJ{ci827E_Xn+Zu(TKi(^*h{A1&qWd6G@W6HC@3@1^ zi$AE%hB$Lk>{R1?;YGPAXpwe>({f4u%tkm38!?n=5%($7bOKS#50Y^#mNU;>LLIC%W|cALs`b*`;Wa_7w2h>=m|yf zGX8VVD9R8p)Q}ohl&`Gtki{lATMgvejFM5T2D8zZU|!5Saz8AM8?7o({?%(SSXH31 zHG*0FM*qq1!R9f{;Zosu)yxa@%mNe1?x^GcbARKdR$gV&5fq?ej3y|sI1JW7|9eI7 zJ4V_p^`D=H)c*reS8PAmlvvi|qZtfdXmFf7AX)zLK|6DWt6w}TD^3<&RJ`XGn4s6_ zng;`;WQ_jjJdDS<0mk?n63?JmpSxjs`3DHc69Kt~RM4N7V!c;RY5%Y^X9zJl#&wN~ zO-8R<-3L0bGFfyP0?zdQ&(nT6qi*#KiLemc`i&*!%@p4GEjp~|9zuN=PR&IzGW6nm z{JmuLQs3VcZl+d$njvfFFcrF|iS; zZhO3^MM>`}zgMAt8W0XUHZ&BVmfwiK0=~@zdp9iqu-YIOQhZfWzdtXMAh)G*{{tKt z7Zgk4vSph%RGR8Eov8p+4%oY=LT)S5E9>9!J{Gx@*1zNXS}Cv{tu|27_>~pCQ?d#L z&O5K^50{hyyUE9$x5{McpkUz*LfJ_?`$K4OjHX$>r4r%U=_7wDuc|ly*MU4=2>heo zk+3E8{pfLYAr*MJxvn9!;}0vvMoCH1eHEox)5Nb;P=TP^Bh_qT>X)y{IrXYT85LMV z{{#Hgd;o)$JU%*I!!bX|qdhH?;*n0~5UOclV6g1!i{vs}=Rl|&1(y6BSetn{l=s)i8LMew*323^B9wt)oq;$KXS7yPpXjs z3-iW7_sQrVY`pI=`ycHi1I6+3BfWa>Y;ug|Y`eJ}q5d^~^grVdFx<(h$awzEX) zV%O~$t#l*Hc(Z+GCOp=!lmh=Lxr@WFRke-Evob`JS9M{d0uz)ua^)eHN@|2xDD>$@ zE!tOvC4^UYW|D&4QpqSp$TrDjUNQbWt_jLTpx(Em?HI~8t8spW@k5b)+E|gla!;TR}o88_u5C8w>hUGv1 z|4fB!@#TuKG;RLs`d4aX&7FIE*u38M*P8{$2mXaBqM4J|zasgv2(?GW`6uahHGjt0 z>`Noq*ZMr;Ir_r>cO+zY{s2!yfB9%wxA&sXL4bx4dq+IZ^F^wqlw@MjKWFoKOAc(X zAFNCEEbp^PI~eNxT8Y9w;nnl;{|tlbY;Sa{v|sDb4WCgR(?JBsZ`D-AXPGC=pZZE)#H z0fAcN;sT1QGSW^JRSH9?*0I%C>~s``M$XZ*rB@_QZz&_#gUrblo7LOX{gNhhL{wBo z2I;Cw1eYj{1_6aqDFcDCxI<}vRx7n$CK^Dr<@Skf%yZrmvk7Iuawn_L(=xSct3auc z*&eJ{WC^`jo}!@3kiEC3Dei`}KlyNbM9GaUXL>z1)sms=$t2lnhQP`OJS{Myyn8&R zSt*#2^W9pvFpJd=r73}Nlb`4_*v>0%M!M(N$EcG3aa!(vSw4DDVGMI4%Mp#ErPsyT zil-E85fgh+6?9tEh9>0E=4{%lL;+-vz`A*U-(9Gbg5NM5-f1WTb-j4@X-0Cp)qnTB<>nv@+eqtHMD(83@NpCXua-FP4F^2U%L! zvQCAxld>DT-HWxD%MoBeRR09CNZKW3>*SfJ zv?7_3e!*3kNn+@^!0NGTbZcrX|J{W(tI60^wNcr4gJrqjeF$FeA2MM>m9hp z4hQCCGn=&amxtRKtHFjuQBWfe;94P!V!d_xieK7nRj>+*Sc`cfctO4(RU#mQ5_fq_ z{Il)F4t#4?Lh(XKjm|JUlB_C4dLb6OoI=SFy;42`} zN75vc_B*92(n#646=fSxb2w%~ZP(Q#;Xsuk-m&Lb>40Ok#QhN0@1t2(_20N+-s_Nh z5oG3&xPIkFm?#YMror-5NT)_A;z-+KFz4S?%eBd6ZK_x6=EKIe>geCO2;~URiD}kpPr`vg@8heD%z{r)1$=JmWwHfgH`NG^T$?ol|` zs(m@f7s$e~HuMv2a;o$eSAEC_#ka^7&~^rUen!lXYxeT}95p*)ake3JnY3Qa5!~+R z*O}3^7s^hZ58jqxB}NF%yGOlB_e=?T0j0Z|QwqCUP#p5D3vnNZj2nsIe^Yx8z?a7$yR^2}$XjwZ< z6K}x0qi-=(4*0X#VtJ(uDsfP)EzKX*6cZl4snlZmc83?Ya9oY}@<30XQQ3$iKMRmS zhoDwZ5tzGNp13miAtCU1GC7bXthJg{2h5h19emDkrug1NSRL%>#=g$+)%}dp#D#nN zMgH5@@b7 z>nd%UB{~ZKl67-fL8Lxa4nN@cF!kBDo>dTr^4w5OG3xKh=m~kkx#!g$xMIvkr&uTG zQ!rL()0j*C1At$;<-+-CM8+|rM0VhAX4%s0#DHtjj#mfK95X2oddytO$IL`#;{&_N z*8ycH*M(*xDNDXgK4ZMegt(&%Of7yR8yWB=LLm$n=9bu8b z-Kn7EM2lX;e^(N$&}&e4FzQ#H6OR=M!D2Tr=M0=F)>zH36 z>_m1l^RP~GVpFPNSxWys0qQd=A7b*z^bMv_mhS)o_-9LX__V-W^URpdXb>vihVOK< zAX4MpqdFHulpkb|Yg0NBgbf)KRx0I;sQHb+oc?%p++np8e4M?$Ie+c zz#3+@ss&sGIq^7XSbre_VGD2!N*!VE_L{9%8;DBa{JyKsVQ;R*N);y7dv~n}jd#-p zkLghYG^W{7^C?y_Kqj_R5Hs@(gxwEh-BFTSf*adsy3&AgmA&Yy8}?}NH&X)o>cvU} z$S*}(wZ8=nnhb-)d@>5JkGfM21Vsl?|1eP8y&fQp=d#ou<`To6NQ&t)`9tIhS=RKW zQDu?AckHRAx>Wt(2dS>#NGg(-egxTD7Pojai3`g6|TTSJ*Q zh^!>>H@n)iFI!Mj{v3)=R-Q;>-y}#WR^pAu+rL#bNx<}Ymr*Dbx>eiEu~918sIo58 zEA<3XR1#BXkmbl{%oCVTIC~ILo}&FVtWUhMo$G&<5DTA}mH1mljATRNz$zY&Y*~Csw_9|hb7>oXi zToM{-RQyL@BLNb}B;13+!B+{(9}Ko~$+DEMDk=p0zb=z_LE+vch+jOk9Z5Rni7^GT zB;vtp`c6jgkAB>YwQ&*+pLDY0L@3kLa)zq-xH2>0>scK?9 z(CoU*11A3Fx0|bvPNSb^p1o|=FTK37`+W7~&?q{&h$Gsj7!Ed=v0O~o{-OWY7LV>m z$9_$<9ujo!ZbsD&NmXN0!!x=zFp&Uv$L*i9e8I=(%K$37=+H+SrTiN$InMcFP02^q zD&SaY}}=!2K;;7S8x;iK8CM4TU#6xBcFJu0o> zvZZM*CC7DSZ)lb}CXAWZj7`7sz2vJ})9E$;#e+jeUN=yH7L4mJfk8zRhbhU;52QQb;*RS?0o~=6BAc3q4MMk08ae0B&>30QnbfO zGGjosZ^yIn?>O}&7Ofldd+jvw*5-qJ_jhos8Q*3+ASM}^2}k2751*59*WqAUtzH&R zWiPeI@%dh!e{pE3-B+>26msPWAI3!i+!M_fqR(FONLkl1kN9ZM4N3B9fj-7*ALuFlA4XBM~d@(-4j_C{WPCWZ!1LOJH6qC+gFw11j(xeMoKRYP^|7B0e#D#HN zip9pX`2*iC4yzlGM)L3GVLM}@%<^`&qG*LumcqA`2vC$9xlI6(LUPU)a6!Q#0!tVGPUsq+Z+xy5p@hgTCJu z`O{{>jP8oT=t)6N&rK$Z#OdnVi+|LBFw^Ujw8fWs?u)n&w&Iwz-v&F+_1w{F*DydI zk0Az?UmHNqE9;tSPLGPedTZ|52RCXLJ)JO2?+vduXr1ud8@qrv{E9OJtP`MOtR9bJ z{*^ipm!(ahNOXnL`|xW}lH#9!r;f~a=&@p05&Gwj9kkv3(V`b}uS_c)M7ClJ*DkD9 z;P6j36Tf(a=tZgZ_tjWTBiraWB`<1tSM$cO^lW>BkiKB=W?plylq8U=$XP}nei;;3 z+Mrr)AvqH7Y4MM_%I~HL!%{$OibB9v8p(^y3Qp82m{eXoq{3Ts$`KH`4 za8UX5K_Emx`14QQJyaIEFsA6H8m=8aJM-yQ$y+^`y_De0C_6+}__sTglHI~So9nl1 z@9yWw2&}sO1eHs;jQJ~c;-5`@Hzo2@;mvb=&ia&geF(1>EsGv9{;+J37=q|7N7TU3 zY|TDtnjKNPalG%lf**B-m%Uau^*bJ2Ux?(|HuASH4|->jl1UiE(QEB3oe{nQn=Hz~ z2;}as>dZYPx3>JEcCu-Dwi3-7^om8uY>3W^+!VFF%2z9_PFaD{4@(R%yXANdqW^5H z?wmW3k1Sw*YZHdMCc3KgAL#bgDC&6&Py!imd6}3ZFe#Q?R!+<+x}u+Od*ZMJT&TDo5t4W5;cIkM}#yG^_>os7>c20a1dO z;j8}ug1>(dSJYg7@P6Y*uYvFO>#aWV{20^5l7_lqW{Sd~#<%&`dXt!Tjq;HR5Fd** zhI(cDE@)KXH8&Lz)R|#4sJTf^ZKLXs;%h}#LO347^6|OcTSKE)qCLWi;#~}dgN1Fw z-GaMlYDES9pJpiu@Ke=Xl~Mg#HZ}evqGpxslPiAFPe_IV?x(7|yid!n zAOsmgqu+kR@HFt0FtN6Hr3S~7xs|A`C)A^wl3!v7+%I*ZlBhF~@>)7~lwAAQ>5s+! z#+hn+cJ+Aa-{A&@Xbd$YJwSA{6ZtsINpDPVQa7t3d*tp2{+oW+9U!gdAu1Fcd~Tnz zWwveQ&nWdBGX$Z3LQNQ_VoGYf9Aq3Zw#!Lrv)ca*hFV^kZ^anRl`_%9DNFTQWdtPfe3j1edt zmW&2dna~dpk}~LOIr=ekSDQ+nx&^Ssa`&=j&OQEsl#B*x$jv2 z4B<4hv^ZB~N-yPxi5}y~9u{qiCXs&s zyfBaCKbTZB+!xGjT-j*(*YW+*B|hAWYN>_MC=V)2Vw9s%=%Ux0ScO4TcM#mQJO`9c z(vVwtdquu|q66;4nXVcKzWryper)C_aNxqE`6s?Hjtk?^y-utWHFBQk{_q7fQehYR zeh2LEhZ?aj8e1jOb=f{33O*b>l)!cL)0m@{{|6E1-`A|APj5;ST~d1aAIt~ts>w=4 z@c#qo?!I)zo~-}C98U$I$8RRbWmsjG%a97CY&N!CsS4AtA*foGj_x(%Juww>sEB$Q^u*si zz4%F+W>1h^;Ugh-f$gjNKgKBzpNNC4LMo}(IIyeA(Grf&6nTV72&Ykc;L0#V)#(@z z#OQ{D-d-?Q$*59Z%!H?ThV~PV(!dD%{P-VPAT0gc&07O;buqT+tEsGsBe$})=!!$$ zL8cVTJ=R1LK6YBCgDAf#dI#1h;aRglu7-yWlLINgX`HZH11bSCYE~zDIj%STZ_?2> zT`A(f!yx3*Wh_R_$PHI}*(lR0_B5jA4XZxs2>IYrb$7a}_n65Ka6$*x+R6T^?Yoj5 zZot*o9>x&%?i1P)fs_7h7LDq~GxChGV+N<4n2$aTx{8UZ=^|&k*Z}N3JB%jKS7sh9 zJ%&c_JXHu}6Eu27mHb-jRB0*zl-}B3Juu3<&hzsV5in*;`_Ov_X1B=AXY8zf)?sm` zLo`T>Y{!;_ucWRqd>oaC4{Wa(rwv6q&vFD?Q6ub><3g`$0ql!9Sl$$PJd5V?wi9ZYw$cu^iUm{@i+uF+#& ztjrXOhYbs(So!>54pf6EDkoRtboBZ{%*kG-x1cqm$I2V)xv%_W9Pc%|t?G#1kxUVT zDIxW=-&Cy06R(mb-#Yy}v7s%?&9AQ*=T;BGsD|fy*+bRE<}W4SBx;mBCprV!VsYt zZgWG17FBXE=yN-gYX;7+L$4$G-1PPpB#9mA%;pTNU;B7BF-3%P9fI6|!Z|7n(Uc#* z)}^3#$VsfiI`G6m&9J3k_Q6wRt5g-RwScS)MTg*RS^eQiP#Q4!xLk`O zUKXkZ%QliFo!${f&46`*(RG>Q8cu%2vB4O<#NVuRnU#B54PrWv)eiEy8=MqCaP2kLVR zox#xU5RGkaMtcS>*tN%yBZkQaip-UJ^^%v#Y;+8V5+Ss7wo`8?iP-{t@A!Q~_=^30 z=-X_-w58`4exa4Bv)6iY4$^L}hsdjUthMZBwjPz^MX1Ec?Bvmq17SH1hw8LQzb^R> zrYh{q5?vc(B2t-F=EJqtlC}b__mg?jU-MJz3bP6g-aBe?h{zHdTe@+STMrq*Nn3 zw)`JJGFEZJYNBLy1}1hrM!c==s6v+0{K*|c5!FizW4e^!^8D$oAJ|4HYpx zS}%=1FWF6wNP5x9KEDrDrl=5$s_c|q-ia~4h-+!IXQx(ZJk}G8C2(WF`u-=)cxNrTYg#W(irL;Dw{2ay# zv^V?resm}E!{@{p6)x35SftW%O%#nfn^U%8Bm7YoIXZzKzC73`xa20wbPR*CLRMP% z!?f;4bbi6VJ(mC=5w6(xlmr1>_C(!mj=*B*ha_)v!OBeVG{1l&x4fF%iaS_9-Qu4O zDwpM)cguE~!&0pugs4u~w%@piTdyhPgR@OkS@-qS9G57;Fx~|%&5`d3J`alZFe#H2 zFg9gB`eDV#&n#M&bB=uDtiS{jAb>&6&HakXBMthC>@(3a)djE?X z-4%&uS+i@Br4$k{=Im4U@47QvqZO2W&sTT+6bPd4QM=_Y>xdea{w;>LBacr~{-Mvv zh%tD{N|31Xxlr!kl#4Imf3kFRG9<6-p1rtXX*uPCe+&cDoUVJ{j5x>p4Gn9N<4+kioziR7Y7iaP1@KmnnwzTywV9SW2gbqb=iyo9b-qJdmj4)jCZ* zUK?{0r{qUHe^<~>{N@=~%$d1UGSl0sDUg)L))>G*S^AZM#jCSz+5eg8kU)kl=stlGP7 zX7>J$b#WTzzlZ`QY|xk>{mN2vP*ExD1hA877~mDlqFZ*}4#VfS1!4GK(Y$lUY}<(W zO;W{)N6KEsn@dPN8A!b+@z$@eOrvk71E7{E$Bs6h>0K}K^Fzdy__irK3c5V#shJND zYzcBUnV!j+M4j6wi1qF zPuM=XgXi^&%lNbQzpmbw8WqyI1;AtOzT{AN^Lncwu6)^Xh}dF9J&&3RStJ)CI2nS=pGo$vk=?T!qw9?H z^GA+o%uO8derk{wLHsonNMyN&COVZt!2^Ds=ogkr9WwJ25rE70@m$mM3DD#-dYPN_ z)$X}Wtq?<%W13CF5J1kCTw@*R8ipC5#liwwGo4+Ugyc*dDrQG@W!3#?VmV_{rPU_{)NaNB(x?E%udxJ-v7^1LU zQKtIE0Alo91ZO(X?-=N5)SRrmse&dOKCcv{D-`9lu_gZiUy8c~V;NJcmN4dl$!-%O zSi&|2ldIl^CneD&R?|CWbdMARFiUa(^*anz&o(d1lLfJ;5!QkP1yIM#$USuXREXws z9utNGeLa7A6kORQAPulj?}|Ka$hU@P$<}nR6cE<2a*N8?W!wSL%`FbN!v;(S`cu6( zaW1(zWq&P3&Dd8)Sj4v>W55MLE&XT`V<#(2s!$V^KWenEFC>0qf^Y|uO@&uQj&xG( zk}!K!B)5+Z7$Z_(jp!FsuL|-2JCo*!$P7UgII}W(J?5j+KzXjw8|vq=)$)2 z)5dX`=T zv+Y4mIMf&xQ|ij^SB@0bq7^*G>JLHt(H7j>#vR{I2humATcd0xBLnpgv=_IxZDnHs zKa6Bl+yK%BP{ojGDh9_i1fFG)q=cOce{)1b8xyK_jLGU42XZJFW(ev^pc+8H#YSQ> z6>_nm=t1pNl==e@0|C<+&%F)+CURvX18iySK;^Z#WzD!PzMPY{^QgGtl3Af?4v_w! z!Lz@L91E!k5s*oO#0+ONLn~ZE@~W$(5_6my3`EDM zV<6`hXbMQobm=HM=dA(IMMmM|Q#^=+pWWLAvo zgpi5q0MJZtD0b%kD0U~fqr05sD8VBQimQ%d4NHcRagP;el^P~d(Fp!StpemUk_iY_ zWH?|)QBWg>ON>Ga>n9@}F}+qLmzrK*_-qM}6>DjlM=2tOk$1@PKuH-cG>O+!q4f3{ zp{m{J=Qva&U^CjHN%}IT#O%U}2?iuHYB|_#ngO_J?ORNn>kIoD=mrA}mgO&;gGRFi zEB!h(XX&VgmOg2PGp{O6HW{D@l&d_L0hvZL`_wJO##QF8FrW%@y{VLn7~%jfSZ-MK zpz1D?O{=KiZj=Q#QwNeXWd+w`fHUn=47|BQTs(aQl9WQDM+yAnQ;R!d1+ z8r4=Z*~wmlf*#of{N84@C((n9nw1k;Pb810EV_2^dev@V{J_Zgh!KE08okYdWm#gn z)w-R}^Ar_MU94P|QOhG*9^*Z#_VGaW(-PpwHc$>pqR6G>Sm%Tl(gxkBQn6ayGC>54 zvJRm1pc+_QnYHP}OBo1M=f8gxI3tQTb#2rgvA#a^`Mr>#2oBA&0fC=!-i2)tn9QXD zdWN!o-KYk(>jbP5R;+5lJw;eX=H$%9wpAb>6nPum^! z_(9r%R%v2qkz!Cao?8aa{^G3q=0hy+%$TkfK-!qOTYFhG9ni53zSKC z>fibAKxX1bJ9%J%FlIg1Bo7p|v`-;zOhcq&K*l$tTOplH(hgWsM!Zn17>VZ+$(YpO zH%bE-R6?+pCG*UEw$DmhQ!g~!TgF+@OlzM=1p9ZU&awb3H0jEkyNHR3c_hGP!|CWZ zpj}<<(@vCZj}?qTuR{7^`G=??A<*=@rVz zWx}%P89fi5wF$XpRKDUJ_4FkBP^GqBbWnvMwe9@6Rb!4%?KtBnQ3p>boNwptLAS+i zPXT*xle_ttr;NLLaueG%p1Kz4#k7j<>z4ljlo7pj_Y!i_$w@K7brL^OrCv}`g`x=N zlVK-f2W(JY3wF4>XyI`98!6iqw7!LYOkSmsh6TQ(@8Y`0h#Pn3G(b7bQe^H(=xSr0 zPIlo=<(az>dzuQB49z^QPK}f&e{HIfxVe#;No6Wb%6&LJfb+F6IfY|bBVdYuU-2I_ z4r0hPKm!C|U$JTnSTyplF$N-xokKrR@$FN5vYSC>nmxvfY)-tJs#84=?_K9DiF6d7eicG0&h>KX|LI6iSh!rCI*?;GP$W9rrX>r3T1 zYZ}G8VTfG$0+XKH&2<#>3M^kjnbNK8K$9{>B+(+GC$CYs(!<5lOxBLL)f&roWx7nrV-`B63jbGZI} zDe>5b2u6Yn&RA#VvBA_cjQ7c^Ngcw?ksCOV zP}UB0DeJejXk2e>{LmIA1yW3JnhIRc!_{8gsdXx$8FCp<)j+YgiYV7ZF<0nMT%T$z zHqrthbr=MK$L-tAKXGdmjwEPIV@|MjXLI)c>QY&*qO)nriD6TPa50AOKqPa(6>M6PUdJeQ`r<(LbadV>cqJY18=XnC&doi<&5yP+1Xn&g&Q#OwHQgy1Q>_ru@2<@ zAa9@Efa<#WuqWjP@89o1OX8E>YT?o7k6vR#`j_ukF4os(-J(pYYWlW5YLLjXGsz&* z(5Y0%^QIonUDU|hgo;UQ?hj!==Lz`-kSm8m;OHz)$L&sc|R^37G*o{+PMY^ zl1RaIRa6}S$9g=H!xs^LZaU4SmKc-j$yXrV?bE$6|LijaLOWW2-xeM)kNiq z3HpkL(n^BWp1X#aYL*4wMK+rcodum%;vY2MH8*sR|W5!erEUAoU`bNH=KU z$w`=y#Ngu}(upXziJc&~{GCTjH&9Pn8Ck$D9uF^O-yg7`8uOhaj^^MMLHf4M7I&H_ z0LC94m26-VdQe+87OEX`@$aOsZ@o6~o2bM{U1ZbD0G@<*pjj@M;yZ>_(t3^m0BlgM zCuLNASkkY#J7XtmJ;kauvfnC&o+F}V>!=>!eT4=^UzNZt;IKLwNIiDWXe~0lnQ#f! z^uLqf`&2y8yekx;mO$EBbGAP9(A}b#@7c_rRtWRGEWE8$}smt#P`V zt;~xKlK>MgPBM1~G#Kh+iQYL7ppk}x264SG=p!Z)#L8PxaB{zI6y#|I@<@)wZgO;w z(}74Wz0$j&>YYg+;Xr1Ut|UdcUB09`5A zlt&k+>Y05m-^M)9XV|2zI%)^JDAoFED))frEk2QkL+@ z!$N>uk~5yTpbQ#Ax_ujDE;0RRai*~4SfCH6jCI?kG6ynnW>PeI4%ptUGbC4N6?JFF zN&++x^5Z2`lBA3`dJim_q+$=LKnFXGx7w^kdILqQXgF;7J;hn2jVBUGs22ft>Fq!x z6I+zf<&$1rIfqWn>s%NylMBc%v#V zA2BUHxoueMUxbSE)=`#q~ zgOlII1$m^tK~)5tWcBkq|X z9VhvZHPixA<{C(W!nc1F)D5%~I{rkP4gLPqNUV%$%>a@%_o#|h2&|(HcF*|Mmpu)* zcyL&dr1lgDqOzwqQO>MU8eINBW>R-tcQsU3T#1`vb6llegT~_^oj%kSj^)H}t0kA~ z&u?ln`c9(QPvP-Yjw3s}1O2*52g0Td}fN$MkEm70;&&Mjm;+vZu(^>P;xia}l^%tl=1O!|J*60b9$l34P@kc>K2 z+2m-3beygT$66#Bp^FgArPQ5B-hHa6A}fcxk4%JZkEVc1PCkqTYczp;yAesx|xVSZzNITo-2ckgXlsw9%?5G9Gu9P+a16?=q4E$ zWJno-I7ZJ=?^+{>Lb5Dz+eW2g0J5$KIqD4|D~mW4B5cOsmM#2V)Ej${_8=ycDcCUn z>H=N2%uchS@)@>K`bAOXGRV8Iok5q&G8oX_Ft{GiFE=a-!@O+9{7ZSv&x8K-TnzFOU`N`@?0W8RWPdD*!tM2(GyOoN^1 z12vP{0JCO9iG4(M6>A8idu2%DGKYUqKi@~Xo>`}mG5p(>!BNv6=vQ)z-WZ}hzGCTh z9TfRA19)Y%*4FZKm9{wjKlGwp304@wlEDBY-#u#6MG{;uDpDi*ORsa>Q!5?8kCO1} zhBnoO_vt{2g6Lh8#8}~c4|;Aj1Jo&Ny3fv$0qeq4oCJ8_U(^$HTl5msXPaB-{H{{V$01TxC{t|NDHHt@9Sn8d&d z6sjQ%%g`RefoY{ONuf|nB4bkl(00XY@e3!jx3`kg=ua7snJRZu!+yRyR(+(wE+o=% z9u81+C(TXGz-bhj%R8nR4$3+MKwB%F62l^sBCKN^&Hn&%L!Q=nV>jkc&PT$wcWR}c zrA>2Sf&T#c4!~8KDKfLl>_n@O4@_o&f%GhL#xgl=tHO-#DzYqQINI4pq-}UV&W&#j zE1_jAj1eO*LG4p)VVXdNa}XygJOe)sy;%iVLigC(JBk5*(;KG1C+Qvq^YsBvY`GIY7ugGI~?+n60lP zwRH$aT^snb+Lu_;>P^1AjynP=xg(BOC4Ew=t_kQcJ5X7OSm(yKk~iiQJuprxUMnlP zV~$Q_bH@JwPIe25UQ79+CP|l>&H+6=x1%I7J^QV#+dP=SbI^NGFLsSI%x5ZEG$$Zn zmh25u$_2Qb>BuEdUV!*CX;tUBSRArU-kA?=RMb|&4t2Up9!6Zd4#N}(*(`=RC67un za}&WonvdL`H2+DL__Y@qKAi$GH zc#h@s8gJvFsF1{$EVIajw^ql}-f3bM2E(R+pH4cfc{Ep8E>z1Z6_-DlM?juU0(T8_ zJ*FinypnJ_>KlsWI>jVwtFBqHb_Xv=*p4K7ki_=--p+DuU#Ao_3z zS_WmEoe@T;mN1}>#E!W8(^6gA&%thNxoWY`DIvR~Y;pZ5cZugjxVLsG7%wI~k^U5- zqswC}#*XnwbEx^xS_@iQzcnNvnL}(mZPJM~#Er@dxG`r^o|(^6RJXWxV+oWdwdHev zg}Ua3{QH}!C%JH^AyBW<;}iz8vPX_(ir_8RF><@$gW{OGn8Ns3NHHikQKTyL3_F?&L2u8VE*~NJc<22`={@(Rnh25Q;qftcCy+*V z0B@>*!Dh0wSmfqN@)(>S(g6CXH#12KG%F|%6qqvq01?N3Y75O{Zm4bTorrC<_X30R z$pyJ0?^Jr%^B5A@@C`FP-0+ibFs0mRkva{!(9O(n#z=YO^#Ym3l6#-$K)*We$+nj< zfszY%Uev^306DjOnA9#AdYToj#Fsaq9L`GRit<-hpcvq8Dn>+p*^j3#g1__r)+wDQ@vJq2@;+^o$<6b)_YeS-6Wy3oLH3 zs6sjh?Ok;XMa+37{P4gS`<-3r2`?15c+x49mP4>T#U(Y($}Gs-qa4qyC>y3~rJ66x zY~>D4dVp$Yx6>2rD(j>ab?Od#&}hda!=&I#h0sXtQY5-D+#=jW#iBT62U`1Pu$t+u zM=Uvv$S_;q%|FAhU`Zmqk#iUtWaanYsm%f6bbEB4pS!Dd2RU7;^KpjaF3-eyD9IW| z76d9-0fo<#=9Y@;-z#Y7%N(v*81);{@+^^D%)v8&0y6je&@rB7q>K@PoDHzu=`C^V zRh~CEeGKEV_otrYmbTLRjKy~yv5xc-C0QEhOA$P}^*3#(8@gz@hTb{WNW^91Bp=H@ z2lvv?X(Nddr3oVdLY|H>&{b|OV-6)KsO|ZHuU5v`G}GOD+>y%2s$5{{=qLoYGeUr> zs-^<5BcT-yFEFT%$UBU@j&XuJVwjQa`5_QVB#6W~Y-w!&0F@;iGDUD1SRh=S9)JkW+Dz)r8XwYq>RxxyZLZ*RuLlFF zu>C{ZJJ3>6A!Sf>klXnEz3ID^mrQpI(T8z_$lw0}Yu>4FEUzI*qkS^Q0oHmJ^FV=G zR%Vg4x(u=GO$CL$t<(X|7HHpDJ_mzENDT>vB3fOL-ccmu|mkR9}P{iZy+KjS1i_Lb!sK7n3N4Vkl4FWucipl{~ z=Np_*DD2$DiD2)N?Ym>|L%EVdM!5>IjAeQsy&hR)NWvi1q>@4Ui19(@6^&Il)HfgE zXgb)Tl}fTk0{;NnjAZ7zIOd;7X+}n)`)023{G@!5lJYh|Uc!fHvzcazQ+UR$f452r zF7YoE*nHm>!_X*6>zshvyE1B{;ZSe#2JC=m%5)O()7hD?s;Cz!0dw1Pa2)Dd&d zlr*TEw(qOIy)7Iu%l`llY(AV5?@vt}noKt@mR8bp*FGwy%>|=eM4@9?@E4r3F>TJKqn^5P*^SJX{&van-x`*i@&u2fFTzbGcbkLsbm9e*(E z?5DQW1!%dJ`jCp~CyU1)zV zshO03zybwTn8$$;IHzD}`C-|8T@FnE?d{y4Ze|<3UVYHafN4Rfkx-;8QpRH3G0b&u=sae^!yr5uEzt z^HRymEIHU9>Q$iJ8+ha_k}=i3NTQpT#+i{>cERXqE)0tpcOh0*>qSLaQJBgk8QkZ! zCv4Xex92*nQRM0BXyWNlt-w`Nq+n;Sd{7y9gb5)cB|89zHA!Z6o;eONoN4x{RgXwi z0EHy6Be!ZITM1=BDq-gOaC&>t4NywL-ZjM2Hq}8HmUe51Hbpr@PCk=_fWeP!IySKd=B1KT43XJvhQ#Uh~BaTj| z>Bc{;09W$QAb?sUgEx^vjq@anoG;U;(L@GNW0MMTjE3)3o$bbeB-F|<=T0aZpF+Zl z<4$+gLxy7jawH9?fC#9%zGksTkOo|U7Z}Y_Cq;Z|AOWn5bnih)nY9?zy-kv8!WVcI z!;tDR`&Gnu9Q9>YB1{l`14Wh6-r?gz&WsH#>(FEEK)JV+MzP>$8k^tRs>vKOBr`Kd zBK=x}N)Zs>jTZ;gwrdQF=;~^-xt}xoM81%Mpx@!_8U+xD0~cRRxd$+@HfD(NnNOJld*fpKD%(km;p zg8*Pr+{rD#X%y>)LEm6N=|zfp0c;>~FeI@U$9e@!pP82QhCrWf=#oJl#Bj7q!a&*> z57qBcBuMr_${YsCU~_>+vb-qCG=m5Zl)%U*+|VKyI-q#T(&2=Jp528T=6O-`BRQ08 zi41~O(o8P_*G5Dat3H_Qq?89iM5#j04gAVSGgZ!K@wa%7Yrm(%#HOcj)-cP z9LtG?#KfYu#~cdywGUI(a+05nIP(uAKgX2 zytcZSNhP#ksQw}}`&M@jy7>!(E*Zix0SfGYdU$aeBZbjmjEURVwn!i0K@q%*DTw67 zk(S25RM{+&B$L9U5wQb)qdV2&k_pT|E_8?9LVl0|>}o8gfX^kao?r?CqRKp}|>tFilW)D1e+Bk_Xn}XB^A^z z#V3tikoh3s8V0Rx;rRu;xR)+5p?rhhh!)=4nj%3e5v;f;BYakTGea^u>tz<`x&||y z@kfP8n8&O}Bu#@Jr#%On4K^(pM+GLGlL$fS_xGhBX(fhmt(m6ZM{owjR9(+Cyhhp~ zs77_TBMqzmUgTnd9O0x?Lc&!Xz;xrK1V?f5*9hnik_r{@dQ~OWZ$Cfkh?A&ytyo|m zE6!-ShDDI05sy_PHf|Cn(aU8~%_pS-k(Zb9kD3X{|K@mE`w5xiWqb$nw< z{(IG}tPd2{G8I7FG1HMs%;G5A7)W$=(!-_)S_0e#j@--xEN2L~`iLE|;-VXv;)7IB z^5MoAJ?hGnEZXi}IdaFUJT`Jj@l`}r9MDCqhoWfI00$y^u>FxR*+qhJ*ure$)s=_V*>-h9ivnbJO;qYTy?{lnn_Bk5`kP zm21d_%*av3;1WD$gA={YlGx88#g{Lv_unF+ZbHCg{{Xc{#(T1ZvBd+Bx#4A{l~sT^ zU-@&kYU78=e<8K8a7p~)t=3c1`Ov;@&PRkx1M>Q_43b+OC^wSGw`?6ha8mA{{Rnec0Wnq?N_?Dm5g&ZK>Bib^tZp>uN}42H$oXO zS&*KYTpA46)0Kw~8=D~6! z;jrtr+gC*l5OBMNGU$<{5u|Ohup_Q|=8to6Z!P4KTi_voQSxZg+sAWnaSJFU5EKKx ztZYY`4tOm&5hb!n<`4;HTr!jX6u7!E-adUw!3LxHywPppiaTFPV7QL25EWhuA+s}TVDDC_#SDPkrLKXB9&yp z2d;PO4Mb8Ezd&Tpv8eaR@j#8@JKIAP$fK3f`+-wwb2XIlOpWMZK^V?{)MzDTmU!DT zLUI@t&JT>xZx`m)DKJnY4GIPVkanQC=UCl~=^*DpD-|I1OrE&?==@^U7frtoa#X04 z7W%S!((%tdh-8qYiy$GCwtDyW#dL9lq_VjxHf){8Y*1JuT)_)+lbxprat>;?>Z>Z< zO?1v7^$74cKg0H;yon%&F&T)2{{V^G?^XGGkqj~ZS&}{TkTQJx&>+`3iFS)(*K5>o?a{v$EUvg9MF`NXgr`wHD++(CTGSnur59DrgspWt9Wm!X!rO zQ+5Z&E1`e`a3)q%Urt+KK@|53(_?2d656EtNg3bTl}TAuH{-`1cyPD_dY3UQdW;z zDOG1tj`<(qR=J*7vgd4igC3%Z4AV(6K2)KNxBf5NwFG>$Z)+Hpm+Lx-INvl0?2Ah9 zqZ1%eA_3G7^eSVOr6eH4K#)o7X!6Lcep*s86awR7Knk~ZAr(IiOXjiYB078~QY??P6dI2L5}F0BBAzw)4oS{ZdkV46$)3VR;^0M>wG zk|H&c9E0i^*wx!aMcUl@++-;@q2@LKN~96g6VpEQ2eg$ z1jeH!$5B+*aOz@W8C6Sl>p?n*%uaKGseR2>DMUDuL6wl;kKyw|XDuXhXx#zZJJp@J zxP&P6WS!_5SuH;?rEkkdC4I#OB!SnKpy z#RoPK)aP{_f5xf#f@Xb0XQdSu6aY+cA7x>`>}oEOM1+QA3_20qP!8Tkl0uWQ5rfy> zi9FHA6NuYZ{c4(@LWW=g11*XJ$k9UT0-YysaX~(m)cWz-x`u9|a};g2Fky~5S5Ru- z>obLOm&Z}kqE$k{N}XdKq0+e;N02k%BK1CKhAH$Rjls#z4Fyy*kEdkEnqdTl5>Om% zN-1+QM=}!Kh^J*M8CVoJETH5X0jXwUbOrtDucJe(dW5;_+KLrbAJj67+;^%el2unR z=|k7aplaSP%2MndnSztruXz-Fpi#f!w?p6UL7rIRF{El%EP}($0_NpzCtgCc1^4Md z-IBp2)T~P&!NnFq_hEHr|cAnvMrS6vcMIxq-P>QXyUQD)UKVyGi4gl$8|dZCgb zHW261NhhXpwJR(G9Gx0NokEK(s~D0eWePF$jp!PVVwSmK>z87CVwr;S-4aK4a_GFc z`uXcYiZxs#kYWI19%zCnfqi?XHYzY^3d$sS#4<2kddIa_OP*wqu^{?R)Vrm?RCp$B zB<>D!MKuJHlvRj~XVeFJ28Kjobpoon8S{!>H7uHodX91Tqg)g*jEyLLImY>^SahIx zhNO?K`{sZR%ZNsfuFRcC?r};f6JR`g3%25(adUGSTq=}0fg5I!=1OHNGLteLfbl_P zWPC#7(;23S1r5SuP*S>1`SG<_@btNcHHAvf#|L@}5p6riB4%KuZRB*I>EyYR1qyd% z#z51IQufIvgKsHdIJzH4^BFt@}Nl|doWv_gpXo7 zkwr4Rw(KydvMI?Yd{LycOMvlnL3tZa{{X}7N=T~kIsvKUQiDEd42B~b#d5`2zK0&( zX_(BaRp3z~N4A69`Jo~SBL$FZY-ApX9w^o`GZJHrp^H9~-xLLS?hA{jAajWfgQz2Z zm0Mzx-L%=zv79pYB8v^dYlXPFDfximRAUDn=s1ct34(Q;XFV2ywlj+`M2ZqAIv%2w zkjx#L8PBYObKezdBcA8;XZ*WOT9m2Xfb&F=;c1a%P&BTT3URUSXek`}x4Bl*cGzxd zm6{}ofHD9$QP!d<;AJLh6X|3{$9|csTBI#=5tszNg(ZvRniSIJLEoth0p39YO&k%0gXoEB%bsO-L|O_t%(9LhCjpwR_@KB z5MCA~ZiW&+2d-(Qj06dRs2{gl=7vXqnwnr^w)~_)+>am5fn-;8Hf;$Z-&U-27_PJB zGd-gpSUOZyLtb0m2_4@OZ@1D1j8P<5ro9(2$D%}xk*938nhh&Un;0Z$t`LX%RQu34 zoDxGMk~16-rC1z~-m)gq+adjwU+xf$jE_|t1A}V6OF%>046`~@t9;@WW?%V zxZ4?^I5NZ)fDGf(-@{DHIv}-=l^ZY`vyF=pzH32jEvK{+qXRkEr|A?Ki0Ez`AQw^; z9>ijWdu)j_M#lgQjXr7FQa6X?#kog}WXPu!obrO^1wm)h}%!~#walJf{rRAN%BjC#_GpXyS3<^f-%Hq+) zQ#tjFsU4^nwKBEE#38jQ#x=HjpL*IBk%fRAEundBgVB3Zh;1dAVYi7)<}fpjiN@xL zEXBNWfh(+%HXVuEZSg_ZTrF+Fx);z6hS=_Xy{JcrTuC#-Cn#ZafrF>swkE%VX&}3Z zGpuM4RQ)IRsE$Yy>S<&|*Cb`I2Y(bEdK+ul?dQ;WjU$A?I}!G)MP^zxl*gcqqc457 z&OBG6mxtO+V|eD$uY)07sd)HnO?NfS@{URk00X-Ylo*zlNi5nDMdml8_`vB-#T3S4 z1H`OX(j4HFUutsCgux5RJVU>C?oWG5m!q2>~&8Z1Kq&vSb~~p7akS>Hh!$ zC2b&R0B3CNnp*iTBT&v2kYN`cTOH_^2HS^RtHp9I1d*$`3~_wSvWEgT)2&9Gj`ZgXw}#^6#S4_Vm(o$T4g1p9*E$Wg+)SEZVym_WI#REB zF_7L`<${ev9ae(U7P?67B61}X;dJ_PGINvSp16YE=G<`WR4yQm-Og~t^*i?&q@;NM zRpXI#G{|&Hzo2*iwGup*R~Bs#>d9~eo5TBhcHw$EcrMA5X++8D%2ibgTNUbG5b zSfsPY)3>HE^r77N&MTFpYu48uCC~y6*u`jG*`Szm8#fB2T&CKy^wNnKS%iQ^eL(EvI1Y`y6S2JeX2Rd2f^pJb~>a&EkyD~g^Sq7p9nqtxmNmfgV zbrhF%Uj~9@d1bef5{Z&PY%kNdZ~Uq)?(=j#-_ ztR{(OY#lAAASfq7XB4DzRHtDzYtHHy0Q#}0UU&|xXaJVD< zXcAr=UD({Vp)N|WIl=4Jp%&r|Or$WzmIr*(b}1UOCS-W}Hn}l^zfZk(>ug>!jUpU69Ah1gDjr@@NF$IiBzl`*Grb-dY^K!{&!80y zSZqlf;Q62oyg_4a9IZ9Nm)OUwv~ z0S(RrKxErA2SVYMv=Rx-il?cn+|A2TpyDzKAg)L~sf$onc%_7CRE+8jbL!7iN;SOm z6F;fqk+Fq1IiQFC0D6})#1<)46^fnmI#s5I=>R;!(8AaVzhB?ZE3F6EG}mtIkA+ge?nZi05?O7fL}RRlIVcZr2B(`Tnc5^F3Dgwse`-u&Z=?}FZCXaz zrc_@@@iIigkZyGjo+u-nt8|jY`e*Iv+KCjmL5y-YEb1&VxFb8}uJM(LZjvQeP<1II zQKaL&CE6wgtib8VP)CzMcC*VqkQE=*kAEVfGO( zS8cjg-Uz@XkQGJ%?oXNlsknJvL*W}?k+7yEc-q}G<)vjR>y5M6(m(HE0Ol4**H>N7 z-isM?36ohz&X7lX1to?>fLx>GjEw!eRr$=e%gtRZ!yx$-Qt_fJ4yDYe>0N*$z9=#Y zCXt~4vPP!_{+*~I301Vfse$zt`_rhc9IMH2OlK!ODHtFr0FfJ+pF#TygEK(7N&32g zI?!ldNz6xzw!ofy$or1w@V=M7be` zSv~yKZWc*2A{E0ZIeGS`LTDA1L@XLob+{o)^b~N-cETr! z0TS+ih*UIeS{ExAjay3$Y}?cxVkQzmSzNX~v(kWVZlc~v5ww8V1HLmu!wCIy`Z4L- zdNg_j%+dpGCZMLKFp)3pw7g` zIS7gzlhe2P&@}{+O=^qE11G5<5-XZFgfw8`4{=Jzp`i@pWKb70M7oYdX!(SyojdrT z5=QYBj$E`-{XSNY>E-gA?0O^gMx_9ZR;< z0}BaN%dUku12osk1E}N>{g<@>`G~-r4^!{kXd{sziZ$jy9g7`l4ZDBdF<67whQ2i~P=m!*0tc5}p z5)BwXSDG7;urzT@?IHS-s1?y)CPD-?arbEb9%?A@DTl9dS%r z-6&>^TB#4yS880wAOX?Uq79uH>5AfBRxGF#oyw0C1br&ZBRSk>9Rbf;WV~F>GEQ;^ z!Fn13!*wB9V^FI303N^!n|C(RJ7^4BWz7R7Wu25WX$ZQod+$L(98w9Q%CSF5?^7Ei zCYM||9`mBfSQV%+p;%6azpiOMR(W$sgT8 zfXD;KDZ$RK*w8fd^O;1EmU1z_YB7n#VWcD%U7N)UHF+fw$>>lu1KiP`MQEhwK804y zW6cF+p6`ds$;>S1x`rrLb3-dc^9Iz{MS_r{)WwPL0ibT; z3v`xS*sepibc7x$GYpc*trCUScF$b@0F^ZwMyje<5cptxlU9}{M|-J(Nh4hk!0+E{{Tt= z2#V;rELtW$lpRG;;S8%EAN4LfXWKIHRmfSw4zlhmgt zd~f||7q&7M0d3kz90P&eds8!@k$z$*pGvo=ZPubV%g)RRNlOBYYpmfZL(%Z&PhX4;G=cNMf8~z_CwU*F8SZf&io9|3;cy8`5BDXf7o=_tNyp79$ zDyJ5Bn~dGHw2dO9+ElkpU~DJ@B!R(}DVHiRLjIFRie6>fJO>3+ODOM=xuv8*6v|)$ z4sz@7NyyR`Vv4|%kGMT(HKw!|DV2=720o)nAxPL-g7RRO&+xJ9MzrD=aoR~}&}#z) zJg=Ihl1k0WAQLC3I`zkj0zKr9Z5e2WxONJEPr0Q9%OWsTKD9W16yn+-nbv!tS-1VD zHg=KSMiLmg%Bol??St(=wD!^7@}3Da5~Z1a#W0b@JZWl@1js&~+K2vidpmUbqZ=zA zF}YAXVxrzAQYR7fBQA8ef)4Zp`7~z+SHXk@WUUJol3Sd?l!+T8pA}+F+=6CM^TlFNy3my!-4Xt5b0(-1ifA3Z;n0yoTp%103fn{&786hG|Po6tSR|eP>BJe}sF}f#7RW>k2BV0Y*;dg4`n3Ju56e zg69Fd?beNZXqqNdEV2OQkmO-~tGJW=q!y_Wjwt}<`cIy<6Haa|n(gBtCe-+{blP<~0T_J}H*uN`^A0`P^*%+e{;7}cYU5sx7i6uH1eLwH*xD>3fz;Yez@N> z_4}gfm`{?uBpZ|OKndAlSs;6KWsu-w&N|R-<%(vR=95jr3X!6L?nDNtAXcjk9waU&6j(afbeZJJftI2B9!z(Q7E)X7= z$68**i(81Ef30K%hi3awX>jz6U%kw-#33lo(G2Y%`e|9-dx0(Gt{4`>gV$r*?Oh~` zCDN&n^OcK|zr&pW0CiG5FD2yR^(X@xe@-*jg3-+-$Vmbp$~%_tQxkK9oI6L$0@{p+ z$F(i0^><`{Vl&wy=>g9Mt8HeSz(SP`dvWBzuJHk-z&>*P9AF#41e>1+Ka`W z%Ie-rh~Zd_ZknTBqa!EwtetGHEN;R?MLIv`t3+N4Jx^{(Y=GUx1J{wIs>JbWb<-`_lH1^Cf4xpGuSg7;XmL=uI81qI0E@-6UeX#T#IB#RFFs z*H+QG!dKMv)U0$L801$M?_|RvL z(s*K*hY=xRrBT0Ey#Y*leMDrUt^4r-(2#hh$Z zNy}sPbtbd#B$jW>LQw(KpzXhk2kvey?yshskgdue@cT0O6=(`0>V|O&KFojmRc1{u z_HCrQ2@H$%4%Aq#TI$e5&xQ-pcO`q!7V1lTd7Dk+X>qwee9*5Yl47!iMMXsiPk(ym zxHooYCerU}bfYg;2eVaU*Ye_0K-jAjU*4;-bhOU%J*<|X9MCE)c1EtMW96OP=V*6houX|^q?^h1hMHTk+yOCJJ1DT4|*K7rXvN9Kq=`=mageOGp7g_^?j*X zEsCsCD-yFGO)(6usUo_=#s@8=?x(E)ZXO$%TH;Vu%H)C25z>Oz)h;9i{M?kHR zc5wue>`8Pz&wAyBp`EjU$A9@~yv2$|N#qhR4WY;A2c-bF<}=(AG)$pPgR%2K!$RW{ zIMULLD|Rj8`A}|SkfSOLvYk72_ND_(=(hj?`icJf1h!Y9B)ou;5NzQ7ro{sB+uS*h z-5frq8TLNZT`L*WDq&pd)!!pxD$>l-ppxF)LdAoz-)vFrgclR?R^>rtV&wD(6dBVb zUopuAg2(FAdXH+nEYUo(#`=t%Jl8Z(M_RP znCZ#D^H#VK22_=F=)qIwsT`~%Jye1)XcxPN<};)r2o7ZSrNIJXh6V{K${xqsw$0n9 zxU>!<4x`k6R{bdzNn?p(C^RE|C%p!KyQo+)r=*{zuJb0AVBo$%+o+~4LYIt|D%t5m znl$E^+i=K61p#82lQBE9yMRLbS6r;2OHBF`04P2F)CpF2P!-+q7eDg<0Me-s(op5` z&>qwbv~X!y)k>^`2R>`5c-*_9$e3+e71SI0i#7vgTXH|k%`h885WHjRBoU3q^_w3x zT_naz4@vAk^mUeG)UWjqR(nt}3u|^+0_0@i?e?Z^*%A!uUSTAh9+axaBJbN@LYnSV{iVLPyW`JpM2qCe#G)TB{jZ)@U(9Pp~R(z5?H#tIcwx##& zPp#`XXqyDc0Af!{0y{t!ZC4Gft`R`n@0v!9-W3|tPi&EzSp;s5 z!A_MK0Ck`=BsAWmq)F<}b^ic5UUFlYvP;ZB4xnkB5ww=_yAY#h$BJ6(SVu9UsBG`w z6cKJz!6A)G79il&k@TwC)cAObKwJ}R(Fl2nisgVkspRz<doufv|llH_zUxvl%VYGU~tpxhYqYGH)=BN9Dl8h7vQRfcJ8QeT&1Uq}h4ch3ENs2{jnHl5?Pge-dS9JwXVJa(W) z7^aw&a*eOo*J?GY$ndb1B?dpu&U^H$q;gF#x=^dFgJ6&-D1=KOhDSOIoj!rLnyl!v zL<<8G#z;fgAXV;>qrT|-T#!6r$U~>=oQ>;wRlS0WE zhA@S8-vb7Le+*N@n^>`p$n!yXEt=r-tyyKt>0zH%ueXZpWLtZVT#>kQumb7DUdiL5`lIL_{Hv|0&14(0|1iE>c z!hhr7{UWIYNg_D21M>IZ#TpBM$t8@bn4IaIyD$0_BWgvmNV9T-lZG9@IG_Y(=Gt>4 zt*K@hUGf3@RVSNOxVE^DOsM{Y&|~l3w7It&g@#3oL@Wq!qjGv?v#*EDKg+X&Gqy*k z0YKla1(sMMomLlj#@mgF^H!0_jwfJQgq=ES9%wgVi$fGZDieog?X@*{u?uTpjK$w< zdR%QCIHG$P~Kcxd!vdeEH-CN0t2^vl|&wN&e=foFF!9XcjX8m(P{|}!;wwBvGU*!?$3gE%NVe&Ayrm?+sawW6 z&}YRsx0E)i3$Yr(83WEK%f{v1A>EXPa52|3IIZpZ*e`Mgp$!V0sD^hL@7E8Yow864XGO&F#FMqi+iQLw^YV5z^-yURW7b= z8a{QLZ>XH%n>s}UoGQ`??0$B5LoTd8F|c&^Aa|^}8QMl@ru2u%+Z$(KXup|xZexN+ zVx%%I;{&)Ng`mq4uBDB@X2IJfXb!Hoa>f%}vaWTmoL~y(xQNM{aCJu6>7A;Pw6Q?E zLzoaZp`8Xv?0S9bZQ^@(CD?qqVnNZCVeQ_5i<|p}b}nM&TuFiuf>&;y)HhUlk--0lRN;*jj1G2upPGPQ$5;Tvq_hd zX-1Yk0Q*vLOtBHpNeOM#5^%uw_9B|M{{VAqgbIub>`McsbI^Ox02emWTioEu5Y=y| z^)G|(p48RZXeKtYYUT35#yS!yJ4d%S4$Q^nUDOT0txg{-S9hgdxrLOHTyz7kS^=4x zO0q&E0YZVCcE)HI1yVQ{)JC>u1$JG!`_`qPCf?%W7}r9vUHk%bNkC_K+I4`u5V2s) zyApe3^`NPEgGF^7!&bIa^splv_!Sonwyg|F2r(|~%HLS_@l1rCTQ+H6L=udZ7&ss6 zwKHkJ?=QGZ-P$}a@*kBpxm6iEovP?6A$g$1WsCM#`k z6RYXe0UHgwd8OmEVQyT^oBc{#bMH;Xbu_X@u^c*~Eg?SL1p!%I9(WgbKCr_WKG~q5 zw}MMk8>tyOdgnDvJaGo$krk0g=7%5DQ*r}6q>xAe$;f5@09#R|i{(q)PD@9p3#4@z zpwEe-o^4AT!xD{cf%Pz`ypv2}qK{Y1wh=+^{*4|>S3@3V1}9QM9{oR+99yh*_Orgs zuHi`BcHVN9548bU$#V}RKDj_71F#F@G_$mp!3MuW9Xkx2`q0FW6HSsrxXUQ&54W{K zLP1p%4H|(A$7%+mjiWe;&@9Ip*pZKFB+=>y#|%jaGAP(mrb30_l0+EXI2Z@p??<;{ z(I1ydFqqa@obR;+!0|1NQQR>`MZyp>mj3{y2767T2<1w+*Po@2oYsZqj}MA8p3-en z!hqZ(A5MSd&tKY>l0^!*1)+7;irziV1?Ia9RRW`AX=0~u=Rvxh-08PTSD1j1Mmi4E z9wQ)tM9`oJ+mH@wk(J+VP~pC!-- zC6wg##Rh`O8i^zh4`06hu~z4@K$7QuAQb~_Qc#m1QmU)FYGP`tE?P$_eHq0A(aiF0 zatw+$@x3>2^Gj}F`JE&khd>XS%xo%>!2VBA8yTaU$eqMWbD=OsoRjZB_3GHmZ3Ck^ zPCXJQ={fJ31)Z(4zb$p6E?EBn%s%wJ!{r7^Vs{`7#c_Lt((2jM(;hp}G|&*t(gB12 z00pYDj#>+Xbw~~yZL>maw{#l8$8Tzjhh<3c;|FHXb3p=44$h0Keuk`Lo+dM^E3$UZ z#)ojYPX&f(aO5tvEL0(5`%ps{bIdW0pt6(wXm6cgS3{k=9+lI~OKhi0iW?-F1Zv)* z3HQb*8@IR`{{Y?y(Vn1ZHFP8Ytav3Sl{WJ#PK7@#%iM$;k{ zVxuQJZ%TS3n=!}>W4P~9&CMlt3#8$5zGw>@M{-es4W}!EK;-sja-ufd8sg_Pnf2;MUB_|lNI>EjAyX})$IEC{ zO8|BR^sW<3lB%fy5HbkOQsb2bn6Qi;r z9l+`;TJ`6)C{vY(=zjDK!Sd!1`{j?Me|>OR67vd31bUByK#JaF0!3_t+xUIz(`wfe z^v~3Pih}8Grbh=P%rlYZj)rS|yk)T404Pmx*6IXA0*7z45ha)$lYpah+JcK5i#e6X zrh4g7Eyw0%pQ$6KwG@_Iv~7|`NUGcBZ})!8bCWymQcianph{L)qIk}y8#4B$ZkB1F zR*p;=PIu@9b*-cZ>0+GdRT*X!0>6okydiQIJMI27NTKFiNW$EeNn|(`X=08`jd7{ek`SXi5$3HNcJ|1TsV^H1M5fr!-i@L>ET|A` z^Z|{=C?v%U^AL_yxx)lXs)B)S~qF>C`(IkKZr9PkG zIH0t55`xhW_+>!ZdQ~o?w7p(TVU=R)LB{@Tpn2m}nkiYeMh-K$phB)L6I`lenCl_B zP#OsIPnl(2Pbh#u>T_2SOwf|58^ZyDSG_PS%iA3>5<_7^Y(K3U8JV@)$TTpF`uicE zPSKX;+vT~MV`t7=s36fINoQ#+q(7EV)(2d4q-24jVi~iieZNpWY3oiQl_~7bghEj|(&{C{aut202 zY_j$T+N|-LRaU!ZesIVQ)`LqB|iJS_=U ziZhKzp(OtRg%*3&nnq)2up$lJMgcx(8A)QD#170MKnuvel6|O>9vyEiQ({Mzf8i2# z<@O(Z(o(GMvE0U6kl+k|d8Uz~m1MM&)f$|vo+u*T+S2ZJvt4kPeNuvbEI!mRX%nPY zHxlSDu?wD!ld!3Z<*Y8Zh}<(Ass8{C>en$u989u_qi(JIC;tE{1+y}{qr-@^rH9+0 zs!St`Hw3BEfQye&^Sx=Z;0w555(0i(ByZ1B63wTd)#D}Y!Lzuto_u9T79Txm@{%zfw-y0c07QN&2fmMz;> zY87R%e52_th4M;sRmrTXCaUus!xT3eZ>owbd*C(bAVd=c#9i(ZR{bIoJ8 zAawr#8eQ%LcE}H{VsoPd1&wKlQJ3?TO0Vc)xntIYaTGI43~@#gqGAgUfa9eC^6Q=@ zlI_@Qd@2KgyHzAXZ3(u4wVBRy(5*QfQNskEGM_OjZlM03dR4SbbhhlLIM$)PyHFl~ zlD37BVCKjZ225c_M&ljL0>*ok*B!b9SmP`KUsmIKyS>7ytkIn^wxQul^s5)UvAgEW zq{}*EQ+3iuPvmF;8c*_I%GzTfNUP#ViRMKcr(mJPI6h zTJq2hajdCsP;1!nV{>?obmmD_a$6W0yi%9W$|IR3R#b8vDxBb&P8XB~ksl^E z0!8fKdTnJ>aKa?zBVe3#G#!0vV`Y-aVO%Q^$0OdFgXb+SL+j3C$RoiArBX1rFv~2g zG?!TR`{Ipojc;+v*M?ZtVF&e44%oz+pt=%>o+~nhWsDh-10Am7MEBx-{%Djq~P@a`rb+YDH6Y zPRAaS4{TE7<>HO>L8OhCkJJqfA(F-7xFi@R&;HAH#RR`HJ3#V)sIEXn7|UmW0yNr4tJLZEnPsquXxm_4x zoyhz5pur5#SVwdU%N%dG#`)`7H?DYzhS&tcFD@ofeM7}9^6zT%aM{U?-F1J>_Mk5f zT-sf%Ntq?}g*}vgR5bZ}t5&*5%B&KK*Ig?*9O^9LLC2x3$(Y$@C z!pY<2fhp#N_33N|_B5I;yu3yoI{AUlg(qxa)U53B&SWhdtle|47@#$coIF99jNdNS z;IoAX2OW>zlj3mdF-lWX>cWhS9s78uI8!Vw^D|sIS$?#P<0Nb|pK7-i`Doe^5~nBB z7eK+wbN<{f3L;A@6vV;TInYMfpv(=alIBa1A&N}}JM`;C3N6Pl zD(TX6{`3b|w`*bu84ZA-Y&_JkZ%Dk1W7O z3&?djD!u>*6d9<=E+M->FyOL|*gZ#zem2iCOLu(^N@*u>qo!&uty103Ixc2WN@Jq3TEd8N6k02b zqXA5tX-PkxrJ}=Qu^qpa1~ROW;o3h*V;iX0t|*@-3zfD+VnSz>;@hSktwJ#dVhD1A%zzq9}(4Iix%B(dN!5&(NAdYPz z59VVe3Iw9&8D2I_6dgxHnj&jS&j#2N(m*D|>k&VN`%~{U{tV28Wv= z1gOA4RfJrg^MaTI>&PtGTwW zbpkbEQzNd_E@RE8y9Q&^+LzO98|swccgK-H?3}b>vICcXa9R6zZeP{ z%Kr=l4Gk3y69Wqi69W?y3kM$u3mXp`6BCyZ7Z0C+fRF$ShlrSnfcRxh@INP@|E)wu z!FX9nfQ^a$^7a3xJ;MM(4A3@G1Tu&bKq3So6M~*c0a^e+LVn5iCEx$okWoNLr~n!| z2IkAO9sz&^LVC#-2?H7R#Za(8NXRIJ01*Q(DzUUS8l#n0WCl736JJ^D&^d<80x7@F z_Jx48w@({bP}Vn!j5)Ks9g|${WAyNdyls}QO$B`C#VgHAwEv6x|HT9VWE501bR-bQ z%j{dimzc;eVbHLUK&bzh*Z&1YWgz0!Mw7PkA|~O>Xk9>OfYO8k8=u;5ZA2CCWmdI>D`L;q_ARBb!;F26mQII-{Se3pZ<*klWADq+V;g5rpZyxuDnMZurHR<6y)L&%Ttzu zG5w=qE$e~Cc}xpl&h!{BrGouO1G3rw_j&-Rsf8#PsL0Y(C}It7ojhs?R6Hcdhjn7c zY*;1=z7dwe*BB_U;j%(aWBDvpJq-*LYvc(NLZIc~nwnHW>4SjFYqZf%+7Tp8+nP!r z+3mmiwZOIAc!Z&Li89-m@frDk{T+i{m^TwSS;gSA^g^Unp8jXRj22v3?Z@{SUGl%o zLC^5`)N1&A$p~Q1!cC+WnH0NUmr?)T$~`VTgjcjC#cYyswkix}+*b1!Aq-zbU|w!$ zjf?iy*NAN#+-Jvr@S+k3cP0V7v1y811Dy1gwaJ;SEsQWQ6J;B}siw`yM%&dixL#lO zWb!&GALCBa^?UhffvXR5)9W9z9KLr07Jt`GF_&dE&L91l^U-AD%r#ZRwNFAoK3X4( zY{D59<>5z^vK ze%(3RxOJHiI^5w1Y`E-A!muKlfuAsTO?NXi52-w#b-nU8i~wM7r-S^l{;A{ZuVBOS zCAoXR_jqK;a$qzTLWA1PWz-4yHYe)al z)5k1$`+-H7E$uF)VbtOtOIMkVGCu$aJd7Ur!MtlEdsUhb7!C)jobRM(jIT2SdlEhRgSiH8S2=twXgkZNW#e9L5jb+i;+4grp8xUWfC;Y@*;L1aBDgrh zENQeRXS=m*!D+G{W}rDjf12-~U-g+zhoK+F*4CDkD1-UPR~ zyEUl*L1zQJ2eD18Cgq|k>|Gx1)~I8!{{G4PU8@jfkw_5_VyJ@Bd>h3tZWjWHgtuE5 zQGFCu1g(yF#I%)HN$Fv$6cFP>u+0>Q#wk;|l1+MMUA1;- z#6Q2&uzUMd<#usCU26NDc7jO&I1YUVa6lDf&LWB0O6^)jZHm_Wz4`vff|6A&-S6TT z_?IrEaauB4Be*oiw~&0rHhV_lbMHqW$`(HQG>1G~d)>{wT|FlKPR@2UMvR{)_>(95 zDE?-^UAB-yz5AegCUKl}I)vrgj=z1G8arR{M#^evK4wDo$=P#a@*3mzJgN^rDvm&T zk@W9hh|0Ow+Bad4+tz(w%KJ8YD~xT zOSGj#BDzDLjF5#01E;U3#D7fL_V%tHt>HSzHfD~( zBrqy;R9;?98RU3lsz4OLn6jtCByY4w#uj^c=DIUCSgi&s&G7|NZIuUH*K*)K=M*ey zaZB>ln7tSyqNp9Sc+h7cme1l-x^2eH6ib?&rH4htn}AYC&lfU9{O7cRWtx_6}wTajN=8vhzl4{HwIq9qprU7f2tOY3`fTcLqv4 zs|ptI+QQJ$Qm_?O8QAwoP)y~}^{8}E?eE4W|2d4xuTk&3&nt|G&R?xw0Ga(g)CIp6 z+C9L}lvroSXc(1VgFMF|d~~ci0TrW@7^8ZvSltE|EjCeA&^ijM84g?;2sty@ zv%#(Nt{a+X>qRF#wnNC9DG~{ck*|y$m3wrX*$Cs>h1rR1*neMLZ51oN{dfMR>XS%L z9+{@#kA%l-7&RxSK1e@*KgME@Ecpkb>tR>>{*1D5`!#+uM>^?ieSONjv_tw$m(cCz z(2I{aUsMwJ8ToWu$aVNxX{Up4=C|$huO4eMh0rFCIXyyviz0YvZMHfkXvEFsdhKdl z-!`hr(63Sofr61TNf-U0iHt)iEkTp|0>q;;TsA8*Yq3M`J z&d!+^({Td&s5L5sqj zq~R@0hzX25xFBi$ANdW8l9XGUD}SU_=pgC9>PMRChfSx9_w#{OR3! zfEA@>(FCFX*r%0!aFbg15^djh0+V=^pRk)U_%!k|612qnMqTjF7B=GnrB4l0XU9;s zu`1_BP~DnWLX3{ukWH6%*zzcUISa}eq6S8?&%r;%@8}e3EfJpmBz$kpvdZiyOJO6J z^P|w^(~q$;Bw3O`*TunA@_Ad?sz?ZKMZ|8gStgt0R?@U z;~jddNTTC+D;X;%+Y${6^A0BU;7#=HlR1d%G}a73Fs%@MST(70hC0}}%eCp1%MH3F z1OQDp>G0?H*tMQ^F`dX`h{jty_IrI-Ff6X7Tre*lvg;h7K8g54#Xrm7_FeHPLcp