From 703b3ad1844f2360bcbaab7ed93dac94209d1ba1 Mon Sep 17 00:00:00 2001 From: tuanrpt Date: Thu, 28 Oct 2021 16:11:59 +1100 Subject: [PATCH] Add LAMDA --- README.md | 121 ++++++- model/dataLoader.py | 165 +++++++++ model/generic_utils.py | 211 +++++++++++ model/layers.py | 67 ++++ model/model.py | 608 ++++++++++++++++++++++++++++++++ model/run_lamda.py | 210 +++++++++++ model/test_da_template_lamda.py | 177 ++++++++++ tensorbayes.tar | Bin 0 -> 136192 bytes tf1.9py3.5.yml | 109 ++++++ 9 files changed, 1667 insertions(+), 1 deletion(-) create mode 100644 model/dataLoader.py create mode 100644 model/generic_utils.py create mode 100644 model/layers.py create mode 100644 model/model.py create mode 100644 model/run_lamda.py create mode 100644 model/test_da_template_lamda.py create mode 100644 tensorbayes.tar create mode 100644 tf1.9py3.5.yml diff --git a/README.md b/README.md index 27ca0ed..a957d4c 100644 --- a/README.md +++ b/README.md @@ -1 +1,120 @@ -# LAMDA \ No newline at end of file +# LAMDA: Label Matching Deep Domain Adaptation + +This is the implementation of paper **[LAMDA: Label Matching Deep Domain Adaptation](http://proceedings.mlr.press/v139/le21a/le21a.pdf)** which has been accepted at ICML 2021. + +## A. Setup + +### A.1. Install Package Dependencies + +**Install manually** + +``` +Python Environment: >= 3.5 +Tensorflow: >= 1.9 +``` + +**Install automatically from YAML file** + +``` +pip install --upgrade pip +conda env create --file tf1.9py3.5.yml +``` + +**[UPDATE] Install tensorbayes** + +Please note that tensorbayes 0.4.0 is out of date. Please copy a newer version to the *env* folder (tf1.9py3.5) using **tensorbayes.tar** + +``` +source activate tf1.9py3.5 +pip install tensorbayes +tar -xvf tensorbayes.tar +cp -rf /tensorbayes/* /opt/conda/envs/tf1.9py3.5/lib/python3.5/site-packages/tensorbayes/ +``` + +### A.2. Datasets + +Please download Office-31 [here](https://drive.google.com/file/d/1dsrHn4S6lCmlTa4Eg4RAE5JRfZUIxR8G/view?usp=sharing) and unzip extracted features in the *datasets* folder. + +## B. Training + +We first navigate to *model* folder, and then run *run_lamda.py* file as bellow: + +```python +cd model +``` + +1. **A** --> **W** task + +```python +python run_lamda.py 1 amazon webcam format csv num_iters 20000 summary_freq 400 learning_rate 0.0001 inorm True batch_size 310 src_class_trade_off 1.0 domain_trade_off 0.1 src_vat_trade_off 0.1 trg_trade_off 0.1 save_grads False cast_data False cnn_size small update_target_loss False m_on_D_trade_off 1.0 m_plus_1_on_D_trade_off 1.0 m_plus_1_on_G_trade_off 1.0 m_on_G_trade_off 0.1 data_path "" +``` + +2. **A** --> **D** task + +```python +python run_lamda.py 1 amazon dslr format csv num_iters 20000 summary_freq 400 learning_rate 0.0001 inorm True batch_size 310 src_class_trade_off 1.0 domain_trade_off 0.1 src_vat_trade_off 1.0 trg_trade_off 0.1 save_grads False cast_data False cnn_size small update_target_loss False m_on_D_trade_off 1.0 m_plus_1_on_D_trade_off 1.0 m_plus_1_on_G_trade_off 1.0 m_on_G_trade_off 0.05 data_path "" +``` + +3. **D** --> **W** task + +```python +python run_lamda.py 1 dslr webcam format csv num_iters 20000 summary_freq 400 learning_rate 0.0001 inorm True batch_size 155 src_class_trade_off 1.0 domain_trade_off 0.1 src_vat_trade_off 0.1 trg_trade_off 0.1 save_grads False cast_data False cnn_size small update_target_loss False m_on_D_trade_off 1.0 m_plus_1_on_D_trade_off 1.0 m_plus_1_on_G_trade_off 1.0 m_on_G_trade_off 0.1 data_path "" +``` + +4. **W** --> **D** task + +```python +python run_lamda.py 1 webcam dslr format csv num_iters 20000 summary_freq 400 learning_rate 0.0001 inorm True batch_size 310 src_class_trade_off 1.0 domain_trade_off 0.1 src_vat_trade_off 0.1 trg_trade_off 0.1 save_grads False cast_data False cnn_size small update_target_loss False m_on_D_trade_off 1.0 m_plus_1_on_D_trade_off 1.0 m_plus_1_on_G_trade_off 1.0 m_on_G_trade_off 0.1 data_path "" +``` + +5. **D** --> **A** task + +```python +python run_lamda.py 1 dslr amazon format csv num_iters 20000 sumary_freq 400 learning_rate 0.0001 inorm True batch_size 155 src_class_trade_off 1.0 domain_trade_off 0.1 src_vat_trade_off 1.0 trg_trade_off 0.1 save_grads False cast_data False cnn_size small update_target_loss False m_on_D_trade_off 1.0 m_plus_1_on_D_trade_off 1.0 m_plus_1_on_G_trade_off 1.0 m_on_G_trade_off 1.0 data_path "" +``` + +6. **W** --> **A** task + +```python +python run_lamda.py 1 webcam amazon format csv num_iters 20000 summary_freq 400 learning_rate 0.0001 inorm True batch_size 310 src_class_trade_off 1.0 domain_trade_off 0.1 src_vat_trade_off 1.0 trg_trade_off 0.1 save_grads False cast_data False cnn_size small update_target_loss False m_on_D_trade_off 1.0 m_plus_1_on_D_trade_off 1.0 m_plus_1_on_G_trade_off 1.0 m_on_G_trade_off 1.0 data_path "" +``` + + + +## C. Results + +| Methods | **A** --> **W** | **A** --> **D** | **D** --> **W** | **W** --> **D** | **D** --> **A** | **W** --> **A** | Avg | +| :-----------: | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: | :-------------: | :------: | +| ResNet-50 [1] | 70.0 | 65.5 | 96.1 | 99.3 | 62.8 | 60.5 | 75.7 | +| DeepCORAL [2] | 83.0 | 71.5 | 97.9 | 98.0 | 63.7 | 64.5 | 79.8 | +| DANN [3] | 81.5 | 74.3 | 97.1 | 99.6 | 65.5 | 63.2 | 80.2 | +| ADDA [4] | 86.2 | 78.8 | 96.8 | 99.1 | 69.5 | 68.5 | 83.2 | +| CDAN [5] | 94.1 | 92.9 | 98.6 | **100.0** | 71.0 | 69.3 | 87.7 | +| TPN [6] | 91.2 | 89.9 | 97.7 | 99.5 | 70.5 | 73.5 | 87.1 | +| DeepJDOT [7] | 88.9 | 88.2 | 98.5 | 99.6 | 72.1 | 70.1 | 86.2 | +| RWOT [8] | 95.1 | 94.5 | 99.5 | **100.0** | 77.5 | 77.9 | 90.8 | +| **LAMDA** | **95.2** | **96.0** | 98.5 | **99.8** | **87.3** | **84.4** | **93.0** | + +## D. References + +### D.1. Baselines: + +[1] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770–778, 2016. + +[2] B. Sun and K. Saenko. Deep coral: Correlation alignment for deep domain adaptation. In Gang Hua and Hervé Jéegou, editors, Computer Vision – ECCV 2016 Workshops, pages 443–450, Cham, 2016. Springer International Publishing. + +[3] Y. Ganin, E. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, and V. Lempitsky. Domain-adversarial training of neural networks. J. Mach. Learn. Res., 17(1):2096–2030, jan 2016. + +[4] E. Tzeng, J. Hoffman, K. Saenko, and T. Darrell. Adversarial discriminative domain adaptation. In 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2962–2971, 2017. + +[5] M. Long, Z. Cao, J. Wang, and M. I. Jordan. Conditional adversarial domain adaptation. In Advances in Neural Information Processing Systems 31, pages 1640–1650. Curran Associates, Inc., 2018. + +[6] Y. Pan, T. Yao, Y. Li, Y. Wang, C. Ngo, and T. Mei. Transferrable prototypical networks for unsupervised domain adaptation. In CVPR, pages 2234–2242, 2019. + +[7] B. B. Damodaran, B. Kellenberger, R. Flamary, D. Tuia, and N. Courty. Deepjdot: Deep joint distribution optimal transport for unsupervised domain adaptation. In Computer Vision - ECCV 2018, pages 467–483. Springer, 2018. + +[8] R. Xu, P. Liu, L. Wang, C. Chen, and J. Wang. Reliable weighted optimal transport for unsupervised domain adaptation. In CVPR 2020, June 2020. + +### D.2. GitHub repositories: + +- Some parts of our code (e.g., VAT, evaluation, …) are rewritten with modifications from [DIRT-T](https://github.com/RuiShu/dirt-t). \ No newline at end of file diff --git a/model/dataLoader.py b/model/dataLoader.py new file mode 100644 index 0000000..8d1b81f --- /dev/null +++ b/model/dataLoader.py @@ -0,0 +1,165 @@ +# Copyright (c) 2021, Tuan Nguyen. +# All rights reserved. + +import os + +import numpy as np +from scipy.io import loadmat +from scipy import misc +import time +import h5py +from keras.utils.np_utils import to_categorical +from generic_utils import random_seed +import csv + + +def load_mat_office_caltech10_decaf(filename): + data = loadmat(filename) + x = np.reshape(data['feas'], (-1, 8, 8, 64)) + y = data['labels'][0] + # y = y.reshape(-1) - 1 + return x, y + + +def load_mat_office31_ResNet50(filename): + data = loadmat(filename) + # x = np.reshape(data['feas'], (-1, 8, 8, 64)) + x = data['feas'] + y = data['labels'][0] + return x, y + + +def load_mat_office31_AlexNet(filename): + data = loadmat(filename) + # x = np.reshape(data['feas'], (-1, 8, 8, 64)) + x = data['feas'] + y = data['labels'][0] + return x, y + + +def load_office31_resnet50_feature(file_path_train): + file = open(file_path_train, "r") + reader = csv.reader(file) + features_full = [] + labels_full = [] + for line in reader: + feature_i = np.asarray(line[:2048]).astype(np.float32) + label_i = int(float(line[2048])) + features_full.append(feature_i) + labels_full.append(label_i) + features_full = np.asarray(features_full) + labels_full = np.asarray(labels_full) + return features_full, labels_full + + +def load_mat_office_caltech10_ResNet101(filename): + data = loadmat(filename) + x = data['feas'] + y = data['labels'][0] + return x, y + + +def load_mat_file_single_label(filename): + filename_list = ['mnist', 'stl32', 'synsign', 'gtsrb', 'cifar32', 'usps32'] + data = loadmat(filename) + x = data['X'] + y = data['y'] + if any(fn in filename for fn in filename_list): + if 'mnist32_60_10' not in filename and 'mnistg' not in filename: + y = y[0] + else: + y = np.argmax(y, axis=1) + # process one-hot label encoder + elif len(y.shape) > 1: + y = np.argmax(y, axis=1) + return x, y + + +def u2t(x): + max_num = 50000 + if len(x) > max_num: + y = np.empty_like(x, dtype='float32') + for i in range(len(x) // max_num): + y[i*max_num: (i+1)*max_num] = (x[i*max_num: (i+1)*max_num].astype('float32') / 255) * 2 - 1 + + y[(i + 1) * max_num:] = (x[(i + 1) * max_num:].astype('float32') / 255) * 2 - 1 + else: + y = (x.astype('float32') / 255) * 2 - 1 + return y + + +class DataLoader: + def __init__(self, src_domain=['mnistm'], trg_domain=['mnist'], data_path='./dataset', data_format='mat', + shuffle_data=False, dataset_name='digits', cast_data=True): + self.num_src_domain = len(src_domain.split(',')) + self.src_domain_name = src_domain + self.trg_domain_name = trg_domain + self.data_path = data_path + self.data_format = data_format + self.shuffle_data = shuffle_data + self.dataset_name = dataset_name + self.cast_data = cast_data + + self.src_train = {} # {idx : ['src_idx']['src_name', x_train, y_train]} + self.trg_train = {} # {idx : ['trg_idx']['trg_name', x_train, y_train]} + self.src_test = {} + self.trg_test = {} + + print("Source domains", self.src_domain_name) + print("Target domain", self.trg_domain_name) + print("----- Training data -----") + self._load_data_train() + print("----- Test data -----") + self._load_data_test() + + self.data_shape = self.src_train[0][1][0].shape + self.num_domain = len(self.src_train.keys()) + self.num_class = self.src_train[0][2].shape[-1] + + def _load_data_train(self, tail_name="_train"): + if not self.src_train: + self.src_train = self._load_file(self.src_domain_name, tail_name, self.shuffle_data) + self.trg_train = self._load_file(self.trg_domain_name, tail_name, self.shuffle_data) + + def _load_data_test(self, tail_name="_test"): + if not self.src_test: + self.src_test = self._load_file(self.src_domain_name, tail_name, self.shuffle_data) + self.trg_test = self._load_file(self.trg_domain_name, tail_name, self.shuffle_data) + + def _load_file(self, name_file=[], tail_name="_train", shuffle_data=False): + data_list = {} + name_file = name_file.split(',') + for idx, s_n in enumerate(name_file): + file_path_train = os.path.join(self.data_path, '{}{}.{}'.format(s_n, tail_name, self.data_format)) + # print(file_path_train) + if os.path.isfile(file_path_train): + if self.dataset_name == 'digits': + x_train, y_train = load_mat_file_single_label(file_path_train) + elif self.dataset_name == 'office_caltech10_DECAF_feat': + x_train, y_train = load_mat_office_caltech10_decaf(file_path_train) + elif self.dataset_name == 'office_caltech10_ResNet101_feat': + x_train, y_train = load_mat_office_caltech10_ResNet101(file_path_train) + elif self.dataset_name == 'office31_AlexNet_feat': + x_train, y_train = load_mat_office31_AlexNet(file_path_train) + elif self.dataset_name == 'office31_resnet50_feature': + x_train, y_train = load_office31_resnet50_feature(file_path_train) + + if shuffle_data: + x_train, y_train = self.shuffle(x_train, y_train) + + if 'mnist32_60_10' not in s_n and self.cast_data: + x_train = u2t(x_train) + data_list.update({idx: [s_n, x_train, to_categorical(y_train)]}) + else: + raise('File not found!') + + print(s_n, x_train.shape[0], x_train.min(), x_train.max(), "Label", y_train.min(), y_train.max(), np.unique(y_train)) + return data_list + + def shuffle(self, x, y=None): + np.random.seed(random_seed()) + idx_train = np.random.permutation(x.shape[0]) + x = x[idx_train] + if y is not None: + y = y[idx_train] + return x, y diff --git a/model/generic_utils.py b/model/generic_utils.py new file mode 100644 index 0000000..5f26644 --- /dev/null +++ b/model/generic_utils.py @@ -0,0 +1,211 @@ +# Copyright (c) 2021, Tuan Nguyen. +# All rights reserved. + +from __future__ import division +from __future__ import print_function +from __future__ import absolute_import + +import sys +import six +import time +import copy +import math +import warnings +import numpy as np +from pathlib import Path +import os +_RANDOM_SEED = 6789 + + +def model_dir(): + cur_dir = Path(os.path.abspath(__file__)) + return str(cur_dir.parent.parent) + +def data_dir(): + cur_dir = Path(os.path.abspath(__file__)) + par_dir = cur_dir.parent.parent + return str(par_dir / "datasets") + + +def random_seed(): + return _RANDOM_SEED + + +def tuid(): + ''' + Create a string ID based on current time + :return: a string formatted using current time + ''' + random_num = np.random.randint(0, 100) + return time.strftime('%Y-%m-%d_%H.%M.%S') + str(random_num) + + +def deepcopy(obj): + try: + return copy.deepcopy(obj) + except: + warnings.warn("Fail to deepcopy {}".format(obj)) + return None + + +def make_batches(size, batch_size): + '''Returns a list of batch indices (tuples of indices). + ''' + return [(i, min(size, i + batch_size)) for i in range(0, size, batch_size)] + + +def conv_out_size_same(size, stride): + return int(math.ceil(float(size) / float(stride))) + + +class Progbar(object): + def __init__(self, target, width=30, verbose=1, interval=0.01, show_steps=0): + '''Dislays a progress bar. + + # Arguments: + target: Total number of steps expected. + interval: Minimum visual progress update interval (in seconds). + ''' + self.width = width + self.target = target + self.sum_values = {} + self.unique_values = [] + self.start = time.time() + self.last_update = 0 + self.interval = interval + self.total_width = 0 + self.seen_so_far = 0 + self.verbose = verbose + self.show_steps = show_steps + self.unknown = False + self.header = '' + if self.target <= 0: + self.unknown = True + self.target = 100 + + def update(self, current, values=[], force=False): + """ + Updates the progress bar. + # Arguments + current: Index of current step. + values: List of tuples (name, value_for_last_step). + The progress bar will display averages for these values. + force: Whether to force visual progress update. + """ + if self.unknown: + current = 99 + for k, v in values: + if k not in self.sum_values: + self.sum_values[k] = [v * (current - self.seen_so_far), + current - self.seen_so_far] + self.unique_values.append(k) + else: + self.sum_values[k][0] += v * (current - self.seen_so_far) + self.sum_values[k][1] += (current - self.seen_so_far) + self.seen_so_far = current + + now = time.time() + if self.verbose == 1: + if not force and (now - self.last_update) < self.interval: + return + + prev_total_width = self.total_width + sys.stdout.write('\b' * prev_total_width) + # sys.stdout.write('\r') + + numdigits = int(np.floor(np.log10(self.target))) + 1 + barstr = '%%%dd/%%%dd [' % (numdigits, numdigits) + if self.show_steps > 0: + bar = self.header + '[' + else: + bar = self.header + barstr % (current, self.target) + prog = float(current) / self.target + prog_width = int(self.width * prog) + if prog_width > 0: + bar += ('=' * (prog_width - 1)) + if current < self.target: + bar += '>' + else: + bar += '=' + bar += ('.' * (self.width - prog_width)) + bar += ']' + sys.stdout.write(bar) + self.total_width = len(bar) + + if current: + time_per_unit = (now - self.start) / current + else: + time_per_unit = 0 + eta = time_per_unit * (self.target - current) + info = '' + if current < self.target: + info += ' - ETA: ' + eta_hours = eta // 3600 + eta_mins = (eta % 3600) // 60 + eta_seconds = eta % 60 + info += ('%dhours ' % eta_hours) if eta_hours > 0 else '' + info += ('%dmins ' % eta_mins) if eta_mins > 0 else '' + info += ('%ds ' % eta_seconds) if eta_seconds > 0 else '' + else: + info += ' - %ds' % (now - self.start) + for k in self.unique_values: + info += ' - %s:' % k + if isinstance(self.sum_values[k], list): + avg = self.sum_values[k][0] / max(1, self.sum_values[k][1]) + if abs(avg) > 1e-3: + info += ' %.4f' % avg + else: + info += ' %.4e' % avg + else: + info += ' %s' % self.sum_values[k] + + if prev_total_width > self.total_width + len(info): + info += ((prev_total_width - self.total_width - len(info)) * ' ') + self.total_width += len(info) + + sys.stdout.write(info) + sys.stdout.flush() + + if current >= self.target: + sys.stdout.write('\n') + + if self.verbose == 2: + if current >= self.target: + info = '%ds' % (now - self.start) + for k in self.unique_values: + info += ' - %s:' % k + avg = self.sum_values[k][0] / max(1, self.sum_values[k][1]) + if avg > 1e-3: + info += ' %.4f' % avg + else: + info += ' %.4e' % avg + sys.stdout.write(info + "\n") + + self.last_update = now + + def add(self, n, values=[]): + self.update(self.seen_so_far + n, values) + + +def get_from_module(identifier, module_params, module_name, + instantiate=False, kwargs=None): + if isinstance(identifier, six.string_types): + res = module_params.get(identifier) + if not res: + raise ValueError('Invalid ' + str(module_name) + ': ' + + str(identifier)) + if instantiate and not kwargs: + return res() + elif instantiate and kwargs: + return res(**kwargs) + else: + return res + elif isinstance(identifier, dict): + name = identifier.pop('name') + res = module_params.get(name) + if res: + return res(**identifier) + else: + raise ValueError('Invalid ' + str(module_name) + ': ' + + str(identifier)) + return identifier diff --git a/model/layers.py b/model/layers.py new file mode 100644 index 0000000..4bbe159 --- /dev/null +++ b/model/layers.py @@ -0,0 +1,67 @@ +# Copyright (c) 2021, Tuan Nguyen. +# All rights reserved. + +import tensorflow as tf +from tensorflow.contrib.framework import add_arg_scope +# from tensorbayes.tfutils import softmax_cross_entropy_with_two_logits as softmax_x_entropy_two + +@add_arg_scope +def noise(x, std, phase, scope=None, reuse=None): + with tf.name_scope(scope, 'noise'): + eps = tf.random_normal(tf.shape(x), 0.0, std) + output = tf.where(phase, x + eps, x) + return output + + +@add_arg_scope +def leaky_relu(x, a=0.2, name=None): + with tf.name_scope(name, 'leaky_relu'): + return tf.maximum(x, a * x) + +@add_arg_scope +def basic_accuracy(a, b, scope=None): + with tf.name_scope(scope, 'basic_acc'): + a = tf.argmax(a, 1) + b = tf.argmax(b, 1) + eq = tf.cast(tf.equal(a, b), 'float32') + output = tf.reduce_mean(eq) + return output + +@add_arg_scope +def batch_ema_acc(a, b, scope=None): + with tf.name_scope(scope, 'basic_acc'): + a = tf.argmax(a, 1) + b = tf.argmax(b, 1) + output = tf.cast(tf.equal(a, b), 'float32') + return output + +@add_arg_scope +def batch_teac_stud_avg_acc(y_trg_true, y_trg_logit, y_trg_teacher, scope=None): + with tf.name_scope(scope, 'average_acc'): + y_trg_prob = tf.nn.softmax(y_trg_logit) + y_pred_avg = (y_trg_prob + y_trg_teacher) / 2.0 + + y_trg_true = tf.argmax(y_trg_true, 1) + y_pred_avg = tf.argmax(y_pred_avg, 1) + output = tf.cast(tf.equal(y_trg_true, y_pred_avg), 'float32') + return output + +@add_arg_scope +def batch_teac_stud_ent_acc(y_trg_true, y_trg_logit, y_trg_teacher, scope=None): + with tf.name_scope(scope, 'entropy_acc'): + y_trg_prob = tf.nn.softmax(y_trg_logit) + # compute entropy + y_trg_student_ent = -tf.reduce_sum(y_trg_prob * tf.log(y_trg_prob), axis=-1) + y_trg_teacher_ent = -tf.reduce_sum(y_trg_teacher * tf.log(y_trg_teacher), axis=-1) + min_entropy = tf.argmin(tf.stack([y_trg_student_ent, y_trg_teacher_ent]), axis=0) + + y_trg_pred_sparse = tf.argmax(y_trg_logit, 1, output_type=tf.int32) + y_trg_teacher_sparse = tf.argmax(y_trg_teacher, 1, output_type=tf.int32) + student_teacher_concat = tf.stack([y_trg_pred_sparse, y_trg_teacher_sparse], axis=1) + + y_pred_entropy_voting = tf.reduce_max(student_teacher_concat * tf.one_hot(min_entropy, 2, dtype=tf.int32), + axis=1) + + y_trg_true = tf.argmax(y_trg_true, 1, output_type=tf.int32) + output = tf.cast(tf.equal(y_trg_true, y_pred_entropy_voting), 'float32') + return output \ No newline at end of file diff --git a/model/model.py b/model/model.py new file mode 100644 index 0000000..05aaddb --- /dev/null +++ b/model/model.py @@ -0,0 +1,608 @@ +# Copyright (c) 2021, Tuan Nguyen. +# All rights reserved. + +from __future__ import division +from __future__ import print_function +from __future__ import absolute_import + +import tensorflow as tf +from tensorflow.contrib.framework import arg_scope +from tensorflow.contrib.framework import add_arg_scope +from tensorbayes.layers import dense, conv2d, batch_norm, instance_norm +from tensorflow.python.ops.nn_impl import sigmoid_cross_entropy_with_logits as sigmoid_x_entropy +from tensorbayes.tfutils import softmax_cross_entropy_with_two_logits as softmax_x_entropy_two + +from generic_utils import random_seed + +from layers import leaky_relu +import os +from generic_utils import model_dir +import numpy as np +import tensorbayes as tb +from layers import batch_ema_acc + + +def build_block(input_layer, layout, info=1): + x = input_layer + for i in range(0, len(layout)): + with tf.variable_scope('l{:d}'.format(i)): + f, f_args, f_kwargs = layout[i] + x = f(x, *f_args, **f_kwargs) + if info > 1: + print(x) + return x + + +@add_arg_scope +def normalize_perturbation(d, scope=None): + with tf.name_scope(scope, 'norm_pert'): + output = tf.nn.l2_normalize(d, axis=np.arange(1, len(d.shape))) + return output + + +def build_encode_template( + input_layer, training_phase, scope, encode_layout, + reuse=None, internal_update=False, getter=None, inorm=True, cnn_size='large'): + with tf.variable_scope(scope, reuse=reuse, custom_getter=getter): + with arg_scope([leaky_relu], a=0.1), \ + arg_scope([conv2d, dense], activation=leaky_relu, bn=True, phase=training_phase), \ + arg_scope([batch_norm], internal_update=internal_update): + + preprocess = instance_norm if inorm else tf.identity + + layout = encode_layout(preprocess=preprocess, training_phase=training_phase, cnn_size=cnn_size) + output_layer = build_block(input_layer, layout) + + return output_layer + + +def build_decode_template( + input_layer, training_phase, scope, decode_layout, + reuse=None, internal_update=False, getter=None, inorm=False, cnn_size='large'): + with tf.variable_scope(scope, reuse=reuse, custom_getter=getter): + with arg_scope([leaky_relu], a=0.1), \ + arg_scope([conv2d, dense], activation=leaky_relu, bn=True, phase=training_phase), \ + arg_scope([batch_norm], internal_update=internal_update): + layout = decode_layout(training_phase=training_phase) + output_layer = build_block(input_layer, layout) + + return output_layer + + +def build_class_discriminator_template( + input_layer, training_phase, scope, num_classes, class_discriminator_layout, + reuse=None, internal_update=False, getter=None, cnn_size='large'): + with tf.variable_scope(scope, reuse=reuse, custom_getter=getter): + with arg_scope([leaky_relu], a=0.1), \ + arg_scope([conv2d, dense], activation=leaky_relu, bn=True, phase=training_phase), \ + arg_scope([batch_norm], internal_update=internal_update): + layout = class_discriminator_layout(num_classes=num_classes, global_pool=True, activation=None, + cnn_size=cnn_size) + output_layer = build_block(input_layer, layout) + + return output_layer + + +def build_domain_discriminator_template(x, domain_layout, c=1, reuse=None): + with tf.variable_scope('domain_disc', reuse=reuse): + with arg_scope([dense], activation=tf.nn.relu): + layout = domain_layout(c=c) + output_layer = build_block(x, layout) + + return output_layer + + +def get_default_config(): + tf_config = tf.ConfigProto() + tf_config.gpu_options.allow_growth = True + tf_config.log_device_placement = False + tf_config.allow_soft_placement = True + return tf_config + + +class LAMDA(): + def __init__(self, + model_name="LAMDA-results", + learning_rate=0.001, + batch_size=128, + num_iters=80000, + summary_freq=400, + src_class_trade_off=1.0, + src_vat_trade_off=1.0, + trg_trade_off=1.0, + domain_trade_off=1.0, + adapt_domain_trade_off=False, + encode_layout=None, + decode_layout=None, + classify_layout=None, + domain_layout=None, + freq_calc_metrics=10, + init_calc_metrics=2, + current_time='', + inorm=True, + m_on_D_trade_off=1.0, + m_plus_1_on_D_trade_off=1.0, + m_plus_1_on_G_trade_off=1.0, + m_on_G_trade_off=0.1, + lamda_model_id='', + save_grads=False, + only_save_final_model=True, + cnn_size='large', + update_target_loss=True, + sample_size=50, + src_recons_trade_off=0.1, + **kwargs): + self.model_name = model_name + self.batch_size = batch_size + self.learning_rate = learning_rate + self.num_iters = num_iters + self.summary_freq = summary_freq + self.src_class_trade_off = src_class_trade_off + self.src_vat_trade_off = src_vat_trade_off + self.trg_trade_off = trg_trade_off + self.domain_trade_off = domain_trade_off + self.adapt_domain_trade_off = adapt_domain_trade_off + + self.encode_layout = encode_layout + self.decode_layout = decode_layout + self.classify_layout = classify_layout + self.domain_layout = domain_layout + + self.freq_calc_metrics = freq_calc_metrics + self.init_calc_metrics = init_calc_metrics + + self.current_time = current_time + self.inorm = inorm + + self.m_on_D_trade_off = m_on_D_trade_off + self.m_plus_1_on_D_trade_off = m_plus_1_on_D_trade_off + self.m_plus_1_on_G_trade_off = m_plus_1_on_G_trade_off + self.m_on_G_trade_off = m_on_G_trade_off + + self.lamda_model_id = lamda_model_id + + self.save_grads = save_grads + self.only_save_final_model = only_save_final_model + + self.cnn_size = cnn_size + self.update_target_loss = update_target_loss + + self.sample_size = sample_size + self.src_recons_trade_off = src_recons_trade_off + + + def _init(self, data_loader): + np.random.seed(random_seed()) + tf.set_random_seed(random_seed()) + tf.reset_default_graph() + + self.tf_graph = tf.get_default_graph() + self.tf_config = get_default_config() + self.tf_session = tf.Session(config=self.tf_config, graph=self.tf_graph) + + self.data_loader = data_loader + self.num_classes = self.data_loader.num_class + self.batch_size_src = self.sample_size*self.num_classes + + def _get_variables(self, list_scopes): + variables = [] + for scope_name in list_scopes: + variables.append(tf.get_collection('trainable_variables', scope_name)) + return variables + + def convert_one_hot(self, y): + y_idx = y.reshape(-1).astype(int) if y is not None else None + y = np.eye(self.num_classes)[y_idx] if y is not None else None + return y + + def _get_scope(self, part_name, side_name, same_network=True): + suffix = '' + if not same_network: + suffix = '/' + side_name + return part_name + suffix + + def _get_primary_scopes(self): + return ['generator', 'classifier', 'decode'] + + def _get_secondary_scopes(self): + return ['domain_disc'] + + def _build_source_middle(self, x_src): + scope_name = self._get_scope('generator', 'src') + return build_encode_template(x_src, encode_layout=self.encode_layout, + scope=scope_name, training_phase=self.is_training, inorm=self.inorm, cnn_size=self.cnn_size) + + def _build_middle_source(self, x_src_mid): + scope_name = self._get_scope('decode', 'src') + return build_decode_template( + x_src_mid, decode_layout=self.decode_layout, scope=scope_name, training_phase=self.is_training, inorm=self.inorm, cnn_size=self.cnn_size + ) + + def _build_target_middle(self, x_trg): + scope_name = self._get_scope('generator', 'trg') + return build_encode_template( + x_trg, encode_layout=self.encode_layout, + scope=scope_name, training_phase=self.is_training, inorm=self.inorm, + reuse=True, internal_update=True, cnn_size=self.cnn_size + ) # reuse the 'encode_layout' + + def _build_classifier(self, x, num_classes, ema=None, is_teacher=False): + g_teacher_scope = self._get_scope('generator', 'teacher', same_network=False) + g_x = build_encode_template( + x, encode_layout=self.encode_layout, + scope=g_teacher_scope if is_teacher else 'generator', training_phase=False, inorm=self.inorm, + reuse=False if is_teacher else True, getter=None if is_teacher else tb.tfutils.get_getter(ema), + cnn_size=self.cnn_size + ) + + h_teacher_scope = self._get_scope('classifier', 'teacher', same_network=False) + h_g_x = build_class_discriminator_template( + g_x, training_phase=False, scope=h_teacher_scope if is_teacher else 'classifier', num_classes=num_classes, + reuse=False if is_teacher else True, class_discriminator_layout=self.classify_layout, + getter=None if is_teacher else tb.tfutils.get_getter(ema), cnn_size=self.cnn_size + ) + return h_g_x + + def _build_domain_discriminator(self, x_mid, reuse=False): + return build_domain_discriminator_template(x_mid, domain_layout=self.domain_layout, c=self.num_classes+1, reuse=reuse) + + def _build_class_src_discriminator(self, x_src, num_src_classes): + return build_class_discriminator_template( + self.x_src_mid, training_phase=self.is_training, scope='classifier', num_classes=num_src_classes, + class_discriminator_layout=self.classify_layout, cnn_size=self.cnn_size + ) + + def _build_class_trg_discriminator(self, x_trg, num_trg_classes): + return build_class_discriminator_template( + self.x_trg_mid, training_phase=self.is_training, scope='classifier', num_classes=num_trg_classes, + reuse=True, internal_update=True, class_discriminator_layout=self.classify_layout, cnn_size=self.cnn_size + ) + + def perturb_image(self, x, p, num_classes, class_discriminator_layout, encode_layout, + pert='vat', scope=None, radius=3.5, scope_classify=None, scope_encode=None, training_phase=None): + with tf.name_scope(scope, 'perturb_image'): + eps = 1e-6 * normalize_perturbation(tf.random_normal(shape=tf.shape(x))) + + # Predict on randomly perturbed image + x_eps_mid = build_encode_template( + x + eps, encode_layout=encode_layout, scope=scope_encode, training_phase=training_phase, reuse=True, + inorm=self.inorm, cnn_size=self.cnn_size) + x_eps_pred = build_class_discriminator_template( + x_eps_mid, class_discriminator_layout=class_discriminator_layout, + training_phase=training_phase, scope=scope_classify, reuse=True, num_classes=num_classes, + cnn_size=self.cnn_size + ) + # eps_p = classifier(x + eps, phase=True, reuse=True) + loss = softmax_x_entropy_two(labels=p, logits=x_eps_pred) + + # Based on perturbed image, get direction of greatest error + eps_adv = tf.gradients(loss, [eps], aggregation_method=2)[0] + + # Use that direction as adversarial perturbation + eps_adv = normalize_perturbation(eps_adv) + x_adv = tf.stop_gradient(x + radius * eps_adv) + + return x_adv + + def vat_loss(self, x, p, num_classes, class_discriminator_layout, encode_layout, + scope=None, scope_classify=None, scope_encode=None, training_phase=None): + + with tf.name_scope(scope, 'smoothing_loss'): + x_adv = self.perturb_image( + x, p, num_classes, class_discriminator_layout=class_discriminator_layout, encode_layout=encode_layout, + scope_classify=scope_classify, scope_encode=scope_encode, training_phase=training_phase) + + x_adv_mid = build_encode_template( + x_adv, encode_layout=encode_layout, scope=scope_encode, training_phase=training_phase, inorm=self.inorm, + reuse=True, cnn_size=self.cnn_size) + x_adv_pred = build_class_discriminator_template( + x_adv_mid, training_phase=training_phase, scope=scope_classify, reuse=True, num_classes=num_classes, + class_discriminator_layout=class_discriminator_layout, cnn_size=self.cnn_size + ) + # p_adv = classifier(x_adv, phase=True, reuse=True) + loss = tf.reduce_mean(softmax_x_entropy_two(labels=tf.stop_gradient(p), logits=x_adv_pred)) + + return loss + + def _build_vat_loss(self, x, p, num_classes, scope=None, scope_classify=None, scope_encode=None): + return self.vat_loss( # compute the divergence between C(x) and C(G(x+r)) + x, p, num_classes, + class_discriminator_layout=self.classify_layout, + encode_layout=self.encode_layout, + scope=scope, scope_classify=scope_classify, scope_encode=scope_encode, + training_phase=self.is_training + ) + + def _build_model(self): + self.x_src = tf.placeholder(dtype=tf.float32, shape=(None, 2048)) + self.x_trg = tf.placeholder(dtype=tf.float32, shape=(None, 2048)) + + self.y_src = tf.placeholder(dtype=tf.float32, shape=(None, self.num_classes)) + self.y_trg = tf.placeholder(dtype=tf.float32, shape=(None, self.num_classes)) + + T = tb.utils.TensorDict(dict( + x_tmp=tf.placeholder(dtype=tf.float32, shape=(None, 2048)), + y_tmp=tf.placeholder(dtype=tf.float32, shape=(None, self.num_classes)) + )) + + self.is_training = tf.placeholder(tf.bool, shape=(), name='is_training') + + self.x_src_mid = self._build_source_middle(self.x_src) + self.x_src_prime = self._build_middle_source(self.x_src_mid) + self.x_trg_mid = self._build_target_middle(self.x_trg) + + self.x_fr_src = self._build_domain_discriminator(self.x_src_mid) + self.x_fr_trg = self._build_domain_discriminator(self.x_trg_mid, reuse=True) + + # use m units of D(G(x_s)) for classification on joint space + self.m_src_on_D_logit = tf.gather(self.x_fr_src, tf.range(0, self.num_classes, dtype=tf.int32), axis=1) + self.loss_m_src_on_D = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.y_src, + logits=self.m_src_on_D_logit)) + + # maximize log likelihood of target data and minimize that of source data on 11th class + self.m_plus_1_src_logit_on_D = tf.gather(self.x_fr_src, tf.range(self.num_classes, self.num_classes + 1, + dtype=tf.int32), axis=1) + self.m_plus_1_trg_logit_on_D = tf.gather(self.x_fr_trg, tf.range(self.num_classes, self.num_classes + 1, + dtype=tf.int32), axis=1) + + self.loss_m_plus_1_on_D = 0.5 * tf.reduce_mean(sigmoid_x_entropy( + labels=tf.ones_like(self.m_plus_1_trg_logit_on_D), logits=self.m_plus_1_trg_logit_on_D) + \ + sigmoid_x_entropy( + labels=tf.zeros_like(self.m_plus_1_src_logit_on_D), + logits=self.m_plus_1_src_logit_on_D)) + + self.loss_disc = self.m_on_D_trade_off*self.loss_m_src_on_D + self.m_plus_1_on_D_trade_off*self.loss_m_plus_1_on_D + + self.y_src_logit = self._build_class_src_discriminator(self.x_src_mid, self.num_classes) + self.y_trg_logit = self._build_class_trg_discriminator(self.x_trg_mid, self.num_classes) + + self.y_src_pred = tf.argmax(self.y_src_logit, 1, output_type=tf.int32) + self.y_trg_pred = tf.argmax(self.y_trg_logit, 1, output_type=tf.int32) + self.y_src_sparse = tf.argmax(self.y_src, 1, output_type=tf.int32) + self.y_trg_sparse = tf.argmax(self.y_trg, 1, output_type=tf.int32) + + ############################### + # classification loss + self.src_loss_class_detail = tf.nn.softmax_cross_entropy_with_logits_v2( + logits=self.y_src_logit, labels=self.y_src) # (batch_size,) + self.src_loss_class = tf.reduce_mean(self.src_loss_class_detail) # real number + + self.trg_loss_class_detail = tf.nn.softmax_cross_entropy_with_logits_v2( + logits=self.y_trg_logit, labels=self.y_trg) + self.trg_loss_class = tf.reduce_mean(self.trg_loss_class_detail) # just use for testing + + self.src_accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y_src_sparse, self.y_src_pred), 'float32')) + self.trg_accuracy_batch = tf.cast(tf.equal(self.y_trg_sparse, self.y_trg_pred), 'float32') + self.trg_accuracy = tf.reduce_mean(self.trg_accuracy_batch) + + ############################# + # generator loss + self.loss_m_plus_1_on_G = 0.5 * tf.reduce_mean(sigmoid_x_entropy( + labels=tf.zeros_like(self.m_plus_1_trg_logit_on_D), logits=self.m_plus_1_trg_logit_on_D) + \ + sigmoid_x_entropy( + labels=tf.ones_like(self.m_plus_1_src_logit_on_D), + logits=self.m_plus_1_src_logit_on_D)) + + self.A_m = self.y_trg_logit + self.m_trg_on_D_logit = tf.gather(self.x_fr_trg, tf.range(0, self.num_classes, dtype=tf.int32), axis=1) + + self.loss_m_trg_on_G = tf.reduce_mean( + softmax_x_entropy_two(logits=self.m_trg_on_D_logit, labels=self.A_m)) + + self.loss_generator = self.m_plus_1_on_G_trade_off * self.loss_m_plus_1_on_G + \ + self.m_on_G_trade_off * self.loss_m_trg_on_G + + ############################# + # vat loss + self.src_loss_vat = self._build_vat_loss( + self.x_src, self.y_src_logit, self.num_classes, + scope_encode=self._get_scope('generator', 'src'), scope_classify='classifier' + ) + self.trg_loss_vat = self._build_vat_loss( + self.x_trg, self.y_trg_logit, self.num_classes, + scope_encode=self._get_scope('generator', 'trg'), scope_classify='classifier' + ) + + ############################# + # conditional entropy loss w.r.t. target distribution + self.trg_loss_cond_entropy = tf.reduce_mean(softmax_x_entropy_two(labels=self.y_trg_logit, + logits=self.y_trg_logit)) + + ############################# + # reconstruct loss + # self.src_reconstruct_loss = tf.reduce_mean(tf.pow(tf.norm(self.x_src - self.x_src_prime, axis=1, ord=2), 2)) / 1000.0 + ############################# + # construct primary loss + if self.adapt_domain_trade_off: + self.domain_trade_off_ph = tf.placeholder(dtype=tf.float32) + lst_primary_losses = [ + (self.src_class_trade_off, self.src_loss_class), + (self.domain_trade_off, self.loss_generator), + (self.src_vat_trade_off, self.src_loss_vat), + (self.trg_trade_off, self.trg_loss_vat), + (self.trg_trade_off, self.trg_loss_cond_entropy) + # (self.src_recons_trade_off, self.src_reconstruct_loss) + ] + self.primary_loss = tf.constant(0.0) + for trade_off, loss in lst_primary_losses: + if trade_off != 0: + self.primary_loss += trade_off * loss + + primary_variables = self._get_variables(self._get_primary_scopes()) + + # Evaluation (EMA) + ema = tf.train.ExponentialMovingAverage(decay=0.998) + var_list_for_ema = primary_variables[0] + primary_variables[1] + ema_op = ema.apply(var_list=var_list_for_ema) + self.ema_p = self._build_classifier(T.x_tmp, self.num_classes, ema) + + # Accuracies + self.batch_ema_acc = batch_ema_acc(T.y_tmp, self.ema_p) + self.fn_batch_ema_acc = tb.function(self.tf_session, [T.x_tmp, T.y_tmp], self.batch_ema_acc) + + self.train_main = \ + tf.train.AdamOptimizer(self.learning_rate, 0.5).minimize(self.primary_loss, var_list=primary_variables) + + self.primary_train_op = tf.group(self.train_main, ema_op) + # self.primary_train_op = tf.group(self.train_main) + + if self.save_grads: + self.grads_wrt_primary_loss = tf.train.AdamOptimizer(self.learning_rate, 0.5).compute_gradients( + self.primary_loss, var_list=primary_variables) + ############################# + # construct secondary loss + secondary_variables = self._get_variables(self._get_secondary_scopes()) + self.secondary_train_op = \ + tf.train.AdamOptimizer(self.learning_rate, 0.5).minimize(self.loss_disc, + var_list=secondary_variables) + ############################# + # construct one more target loss + if self.update_target_loss: + self.target_loss = self.trg_trade_off * (self.trg_loss_vat + self.trg_loss_cond_entropy) + + self.target_train_op = \ + tf.train.AdamOptimizer(self.learning_rate, 0.5).minimize(self.target_loss, + var_list=primary_variables) + + if self.save_grads: + self.grads_wrt_secondary_loss = tf.train.AdamOptimizer(self.learning_rate, 0.5).compute_gradients( + self.loss_disc, var_list=secondary_variables) + ############################ + # summaries + tf.summary.scalar('domain/loss_disc', self.loss_disc) + tf.summary.scalar('domain/loss_disc/loss_m_src_on_D', self.loss_m_src_on_D) + tf.summary.scalar('domain/loss_disc/loss_m_plus_1_on_D', self.loss_m_plus_1_on_D) + + tf.summary.scalar('primary_loss/src_loss_class', self.src_loss_class) + tf.summary.scalar('primary_loss/loss_generator', self.loss_generator) + tf.summary.scalar('primary_loss/loss_generator/loss_m_plus_1_on_G', self.loss_m_plus_1_on_G) + tf.summary.scalar('primary_loss/loss_generator/loss_m_trg_on_G', self.loss_m_trg_on_G) + + tf.summary.scalar('acc/src_acc', self.src_accuracy) + tf.summary.scalar('acc/trg_acc', self.trg_accuracy) + + tf.summary.scalar('hyperparameters/learning_rate', self.learning_rate) + tf.summary.scalar('hyperparameters/src_class_trade_off', self.src_class_trade_off) + tf.summary.scalar('hyperparameters/domain_trade_off', + self.domain_trade_off_ph if self.adapt_domain_trade_off + else self.domain_trade_off) + + self.tf_merged_summaries = tf.summary.merge_all() + + if self.save_grads: + with tf.name_scope("visualize"): + for var in tf.trainable_variables(): + tf.summary.histogram(var.op.name + '/values', var) + for grad, var in self.grads_wrt_primary_loss: + if grad is not None: + tf.summary.histogram(var.op.name + '/grads_wrt_primary_loss', grad) + for grad, var in self.grads_wrt_secondary_loss: + if grad is not None: + tf.summary.histogram(var.op.name + '/grads_wrt_secondary_loss', grad) + + def _fit_loop(self): + print('Start training', 'LAMDA at', os.path.basename(__file__)) + print('============ LOG-ID: %s ============' % self.current_time) + + self.tf_session.run(tf.global_variables_initializer()) + + num_src_samples = self.data_loader.src_train[0][2].shape[0] + num_trg_samples = self.data_loader.trg_train[0][2].shape[0] + + with self.tf_graph.as_default(): + saver = tf.train.Saver(tf.global_variables(), max_to_keep=3) + + self.checkpoint_path = os.path.join(model_dir(), self.model_name, "saved-model", "{}".format(self.lamda_model_id)) + check_point = tf.train.get_checkpoint_state(self.checkpoint_path) + + if check_point and tf.train.checkpoint_exists(check_point.model_checkpoint_path): + print("Load model parameters from %s\n" % check_point.model_checkpoint_path) + saver.restore(self.tf_session, check_point.model_checkpoint_path) + + for it in range(self.num_iters): + idx_src_samples = np.random.permutation(num_src_samples)[:self.batch_size] + idx_trg_samples = np.random.permutation(num_trg_samples)[:self.batch_size] + + feed_data = dict() + feed_data[self.x_src] = self.data_loader.src_train[0][1][idx_src_samples, :] + feed_data[self.y_src] = self.data_loader.src_train[0][2][idx_src_samples] + feed_data[self.y_src] = feed_data[self.y_src] + + feed_data[self.x_trg] = self.data_loader.trg_train[0][1][idx_trg_samples, :] + feed_data[self.y_trg] = self.data_loader.trg_train[0][2][idx_trg_samples] + feed_data[self.y_trg] = feed_data[self.y_trg] + feed_data[self.is_training] = True + + _, loss_disc = \ + self.tf_session.run( + [self.secondary_train_op, self.loss_disc], + feed_dict=feed_data + ) + + _, src_loss_class, loss_generator, trg_loss_class, src_acc, trg_acc = \ + self.tf_session.run( + [self.primary_train_op, self.src_loss_class, self.loss_generator, + self.trg_loss_class, self.src_accuracy, self.trg_accuracy], + feed_dict=feed_data + ) + + if it == 0 or (it + 1) % self.summary_freq == 0: + print("iter %d/%d loss_disc %.3f; src_loss_class %.5f; loss_generator %.3f\n" + "src_acc %.2f" % (it + 1, self.num_iters, loss_disc, src_loss_class, loss_generator, src_acc * 100)) + + if (it + 1) % self.summary_freq == 0: + if not self.only_save_final_model: + self.save_trained_model(saver, it + 1) + elif it + 1 == self.num_iters: + self.save_trained_model(saver, it + 1) + + # Save acc values + self.save_value(step=it + 1) + + def save_trained_model(self, saver, step): + # Save model + checkpoint_path = os.path.join(model_dir(), self.model_name, "saved-model", + "{}".format(self.current_time)) + checkpoint_path = os.path.join(checkpoint_path, "lamda_" + self.current_time + ".ckpt") + + directory = os.path.dirname(checkpoint_path) + if not os.path.exists(directory): + os.makedirs(directory) + saver.save(self.tf_session, checkpoint_path, global_step=step) + + def save_value(self, step): + # Save ema accuracy + acc_trg_test_ema, summary_trg_test_ema = self.compute_value(self.fn_batch_ema_acc, 'test/trg_test_ema', + x_full=self.data_loader.trg_test[0][1], + y=self.data_loader.trg_test[0][2], labeler=None) + print_list = ['trg_test_ema', round(acc_trg_test_ema * 100, 2)] + print(print_list) + + def compute_value(self, fn_batch_ema_acc, tag, x_full, y, labeler, full=True): + + with tb.nputils.FixedSeed(0): + shuffle = np.random.permutation(len(x_full)) + + xs = x_full[shuffle] + ys = y[shuffle] if y is not None else None + + if not full: + xs = xs[:1000] + ys = ys[:1000] if ys is not None else None + + n = len(xs) + bs = 200 + + acc_full = np.ones(n, dtype=float) + + for i in range(0, n, bs): + x = xs[i:i + bs] + y = ys[i:i + bs] if ys is not None else labeler(x) + acc_batch = fn_batch_ema_acc(x, y) + acc_full[i:i + bs] = acc_batch + + acc = np.mean(acc_full) + + summary = tf.Summary.Value(tag=tag, simple_value=acc) + summary = tf.Summary(value=[summary]) + return acc, summary diff --git a/model/run_lamda.py b/model/run_lamda.py new file mode 100644 index 0000000..c9fdae2 --- /dev/null +++ b/model/run_lamda.py @@ -0,0 +1,210 @@ +# Copyright (c) 2021, Tuan Nguyen. +# All rights reserved. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from model import LAMDA + +from layers import noise +from test_da_template_lamda import main_func, resolve_conflict_params + +from tensorflow.python.layers.core import dropout +from tensorbayes.layers import dense, conv2d, avg_pool, max_pool + +import warnings +import os +from generic_utils import tuid, model_dir +import signal +import sys +import time +import datetime +from pprint import pprint + +choice_default = 1 +warnings.simplefilter("ignore", category=DeprecationWarning) + +model_name = "LAMDA-results" +current_time = tuid() + + +# generator +def encode_layout(preprocess, training_phase=True, cnn_size='large'): + layout = [] + if cnn_size == 'small': + layout = [ + (dense, (256,), {}), + (dropout, (), dict(training=training_phase)), + (noise, (1,), dict(phase=training_phase)), + ] + elif cnn_size == 'large': + layout = [ + (preprocess, (), {}), + (conv2d, (96, 3, 1), {}), + (conv2d, (96, 3, 1), {}), + (conv2d, (96, 3, 1), {}), + (max_pool, (2, 2), {}), + (dropout, (), dict(training=training_phase)), + (noise, (1,), dict(phase=training_phase)), + (conv2d, (192, 3, 1), {}), + (conv2d, (192, 3, 1), {}), + (conv2d, (192, 3, 1), {}), + (max_pool, (2, 2), {}), + (dropout, (), dict(training=training_phase)), + (noise, (1,), dict(phase=training_phase)), + ] + return layout + + +def decode_layout(training_phase=True): + layout = [ + (dense, (2048,), {}), + (dropout, (), dict(training=training_phase)), + (noise, (1,), dict(phase=training_phase)), + ] + return layout + + +# classifier +def class_discriminator_layout(num_classes=None, global_pool=True, activation=None, cnn_size='large'): + layout = [] + if cnn_size == 'small': + layout = [ + (dense, (num_classes,), dict(activation=activation)) + ] + + elif cnn_size == 'large': + layout = [ + (conv2d, (192, 3, 1), {}), + (conv2d, (192, 3, 1), {}), + (conv2d, (192, 3, 1), {}), + (avg_pool, (), dict(global_pool=global_pool)), + (dense, (num_classes,), dict(activation=activation)) + ] + return layout + + +# discriminator +def domain_layout(c): + layout = [ + # (dense, (100,), {}), + (dense, (c,), dict(activation=None)) + ] + return layout + + +def create_obj_func(params): + if len(sys.argv) > 1: + my_choice = int(sys.argv[1]) + else: + my_choice = choice_default + if my_choice == 0: + default_params = { + } + else: + default_params = { + 'batch_size': 128, + 'learning_rate': 1e-4, + 'num_iters': 80000, + 'src_class_trade_off': 1.0, + 'domain_trade_off': 10.0, + 'src_vat_trade_off': 1.0, + 'trg_trade_off': 1.0, + 'm_on_D_trade_off': 1.0, + 'm_plus_1_on_D_trade_off': 1.0, + 'm_plus_1_on_G_trade_off': 1.0, + 'm_on_G_trade_off': 0.1, + 'src_recons_trade_off': 0.0, + 'lamda_model_id': '', + 'classify_layout': class_discriminator_layout, + 'encode_layout': encode_layout, + 'decode_layout': decode_layout, + 'domain_layout': domain_layout, + 'freq_calc_metrics': 10, + 'init_calc_metrics': -1, + 'log_path': os.path.join(model_dir(), model_name, "logs", "{}".format(current_time)), + 'summary_freq': 400, + 'current_time': current_time, + 'inorm': True, + 'save_grads': False, + 'cast_data': False, + 'only_save_final_model': True, + 'cnn_size': 'large', + 'update_target_loss': True, + 'data_augmentation': False, + } + + default_params = resolve_conflict_params(params, default_params) + + print('Default parameters:') + pprint(default_params) + + learner = LAMDA( + **params, + **default_params, + ) + return learner + + +def main_test(run_exp=False): + params_gridsearch = { + 'learning_rate': [1e-3, 1e-2], + } + attribute_names = ( + 'learning_rate', 'same_network', 'src_class_trade_off', 'trg_trade_off', + 'src_vat_trade_off', 'domain_trade_off', 'adapt_domain_trade_off', 'num_iters', 'model_name') + + main_func( + create_obj_func, + choice_default=choice_default, + src_name_default='mnist32_60_10', + trg_name_default='mnistm32_60_10', + params_gridsearch=params_gridsearch, + attribute_names=attribute_names, + num_workers=2, + file_config=None, + run_exp=run_exp, + freq_predict_display=10, + summary_freq=100, + current_time=current_time, + log_path=os.path.join(model_dir(), model_name, "logs", "{}".format(current_time)) + ) + +class Logger(object): + def __init__(self): + self.terminal = sys.stdout + self.console_log_path = os.path.join(model_dir(), model_name, "console_output", "{}.txt".format(current_time)) + if not os.path.exists(os.path.dirname(self.console_log_path)): + os.makedirs(os.path.dirname(self.console_log_path)) + self.log = open(self.console_log_path, 'a') + signal.signal(signal.SIGINT, self.signal_handler) + + def signal_handler(self, sig, frame): + print('You pressed Ctrl+C.') + self.log.close() + + # Remove logfile + os.remove(self.console_log_path) + print('Removed console_output file') + sys.exit(0) + + def write(self, message): + self.terminal.write(message) + self.log.write(message) + + def flush(self): + # this flush method is needed for python 3 compatibility. + # this handles the flush command by doing nothing. + # you might want to specify some extra behavior here. + pass + +if __name__ == '__main__': + # pytest.main([__file__]) + sys.stdout = Logger() + start_time = time.time() + print('Running {} ...'.format(os.path.basename(__file__))) + main_test(run_exp=True) + training_time = time.time() - start_time + print('Total time: %s' % str(datetime.timedelta(seconds=training_time))) + print("============ LOG-ID: %s ============" % current_time) diff --git a/model/test_da_template_lamda.py b/model/test_da_template_lamda.py new file mode 100644 index 0000000..cca9498 --- /dev/null +++ b/model/test_da_template_lamda.py @@ -0,0 +1,177 @@ +# Copyright (c) 2021, Tuan Nguyen. +# All rights reserved. + +from __future__ import division +from __future__ import print_function +from __future__ import absolute_import + +import os +import sys + +import numpy as np +import tensorflow as tf + +from generic_utils import random_seed +from generic_utils import data_dir +from dataLoader import DataLoader + + +def test_real_dataset(create_obj_func, src_name=None, trg_name=None, show=False, block_figure_on_end=False): + print('Running {} ...'.format(os.path.basename(__file__))) + + if src_name is None: + if len(sys.argv) > 2: + src_name = sys.argv[2] + else: + raise Exception('Not specify source dataset') + if trg_name is None: + if len(sys.argv) > 3: + trg_name = sys.argv[3] + else: + raise Exception('Not specify trgget dataset') + + np.random.seed(random_seed()) + tf.set_random_seed(random_seed()) + tf.reset_default_graph() + + print("========== Test on real data ==========") + + users_params = dict() + users_params = parse_arguments(users_params) + + # data_format = 'libsvm' + data_format = 'mat' + + if 'format' in users_params: + data_format, users_params = extract_param('format', data_format, users_params) + + if len(users_params['data_path']) == 0: + data_path = data_dir() + else: + data_path = users_params['data_path'] + + data_loader = DataLoader(src_domain=src_name, + trg_domain=trg_name, + data_path=data_path, + data_format=data_format, + dataset_name='office31_resnet50_feature', + cast_data=users_params['cast_data']) + + assert users_params['batch_size'] % data_loader.num_src_domain == 0 + + print('users_params:', users_params) + + learner = create_obj_func(users_params) + learner.dim_src = data_loader.data_shape + learner.dim_trg = data_loader.data_shape + + learner.x_trg_test = data_loader.trg_test[0][0] + learner.y_trg_test = data_loader.trg_test[0][1] + # learner.x_src_test = x_src_test + # learner.y_src_test = y_src_test + + print("dim_src: (%d)" % (learner.dim_src[0])) + print("dim_trg: (%d)" % (learner.dim_trg[0])) + + learner._init(data_loader) + learner._build_model() + learner._fit_loop() + + +def main_func( + create_obj_func, + choice_default=0, + src_name_default='svmguide1', + trg_name_default='svmguide1', + params_gridsearch=None, + attribute_names=None, + num_workers=4, + file_config=None, + run_exp=False, + keep_vars=[], + **kwargs): + + if not run_exp: + choice_lst = [0, 1, 2] + src_name = src_name_default + trg_name = trg_name_default + elif len(sys.argv) > 1: + choice_lst = [int(sys.argv[1])] + src_name = None + trg_name = None + else: + choice_lst = [choice_default] + src_name = src_name_default + trg_name = trg_name_default + + for choice in choice_lst: + if choice == 0: + pass + # add another function here + elif choice == 1: + test_real_dataset(create_obj_func, src_name, trg_name, show=False, block_figure_on_end=run_exp) + + +def parse_arguments(params, as_array=False): + for it in range(4, len(sys.argv), 2): + params[sys.argv[it]] = parse_argument(sys.argv[it + 1], as_array) + return params + + +def parse_argument(string, as_array=False): + try: + result = int(string) + except ValueError: + try: + result = float(string) + except ValueError: + if str.lower(string) == 'true': + result = True + elif str.lower(string) == 'false': + result = False + elif string == "[]": + return [] + elif ('|' in string) and ('[' in string) and (']' in string): + result = [float(item) for item in string[1:-1].split('|')] + return result + elif (',' in string) and ('(' in string) and (')' in string): + split = string[1:-1].split(',') + result = float(split[0]) ** np.arange(float(split[1]), float(split[2]), float(split[3])) + return result + else: + result = string + + return [result] if as_array else result + + +def resolve_conflict_params(primary_params, secondary_params): + for key in primary_params.keys(): + if key in secondary_params.keys(): + del secondary_params[key] + return secondary_params + + +def extract_param(key, value, params_gridsearch, scalar=False): + if key in params_gridsearch.keys(): + value = params_gridsearch[key] + del params_gridsearch[key] + if scalar and (value is not None): + value = value[0] + return value, params_gridsearch + + +def dict2string(params): + result = '' + for key, value in params.items(): + if type(value) is np.ndarray: + if value.size < 16: + result += key + ': ' + '|'.join('{0:.4f}'.format(x) for x in value.ravel()) + ', ' + else: + result += key + ': ' + str(value) + ', ' + return '{' + result[:-2] + '}' + + +def u2t(x): + """Convert uint8 to [-1, 1] float + """ + return x.astype('float32') / 255 * 2 - 1 diff --git a/tensorbayes.tar b/tensorbayes.tar new file mode 100644 index 0000000000000000000000000000000000000000..e1a24cfede21e1c50d76c78c027460581eef00b9 GIT binary patch literal 136192 zcmeFaTWnlcmL6QE?p3@?aSw`eD#bcQ zvZP{F>71fO%Ebg2dB&c>7@jo#mpo)JXiNf35Hx}y2<9Qk|CXo!&%->Prvx3KVPt?@ z9vcDjf8SdB-0DI~YDw)GlTuZkbM`*_vi92Rw)bAE*J!&PXRWr|aL*>5(BIe}lh#npiq_{^IiT^3u7B%ZbIM#d8uzr~+>+Z_?{!<-ZmCx9HSg2}4NB`Br?gSqcHL&JT}19;bF15NdL>1i^;YL@spgh? z>;Cih_EvY7kJ{a0(Qd4lTAhvRTEl5~wp*=cd1syvnmu>E)abgG7Z(=iOSPS*d-=@L z+@)d(e@>&f?X=^sRqr-?n^k>rpa^|^ZbV)Cyfmub<~mzcdaHysX-n9et=dlYPOVkC zjQ%X30p+EI#nKt(oWqZK#ye>{-_Ta#h2Hu?z18ei*LGRLcBAZf{ER7d-YREX!o2> zcQ*zhZQW(Rk*(TVqvc-SnHvIdyW?!tS{#P0?fKH3nlpANM}rh^D0FJJUG`s}FU32E z=L6kaT)0p=O`wkHBpO~`Mh=Wgw>ejOomu$|C7C<-_IAx_ly@+2TibJ`v)XoI-(2x= z(Yn8DHoo)l{!@pD>U9CZu*l1U`I(M4wjcV;q!>MgF${nY z$~UbfkMgt4M~jBUaV6GK5q8#Uz4~UAGYI@KV8Mo?cQJN+)zTZ-GeArYgMP?Fnra|=0IjdpowK{6?{!It$MB(5%fCuro!L8X4;Is1(9oabfj^G1@(4JFkwt;Tt$a9X$snYkp zS6W_N`kjTv-&t7xMr_1Rm4JD7&^%&WdqE-YQtokTZD6eQjLn@d-D)(tK)5Ye1}j7- zRxz#+$SJjlDoBT{N@$LmLuN{$M*a_G&#eDuqt-S;6&uDslfqu^U*;WvVpN%-oTdeq z2rbTwUZ+~`v|0_Q5}kJ0Z$hW*Vx0W;!~yl-FW$lA^Q+@86-=d&xcRn1z6u+F#cCWkL1H4p@Bt=2P9rw1=E8hX)#&iL`1Sz^P z+?#1gZ%sA zL<3_4$OV=A?a~Rh(ddEb>l81~vKZhA9w?2N$Ggp5v({?fYdDtyZ~S!JYUJU(PH|EQ zC2ApVnTpIzj#otV-LD>yz=Ns@Iz%jVn{C*W(C@Mm`_fvc(;C2}0OBE0A_mI~f-PXO zWR5bYS8(~kjVhbwt>R3(FI?DRnRFn2~skw6-gWk|XswvCr21sBH(l#=>z znEDP#w`t0F`EMVjMU|xo5)a?`kP=}`NL+pxAu$Hhv6vqqo<LS-$J?iGEV$tB zBK<=PAdk`Oxd$fTtxo6Gc6Xmi;ln7P7(ui5of7-fb)lT2^`KFd`|r$rhywFLrh^a4 zrtKReYBr)1Y2iv(U=Xo8TOy@ni3y6H@(ET9GmF;Gr=o3R zVJFn`4AzExhMD86#wTNDa_JjtlnfRq<6V8&UJe#BuAB85Y-BM`g-&fa4VdH{17$og zz}Rj#u?V|01P!d5F3ruwg+RQ8CD~f<-16gsjJ@4$HS1^?4PWhQ^X_HH^|0R7`8xz3 zE+@Zh-hkbww7%UI15gPjAuKQpW#RgAz2lHXmwFv3Q%nXdbV1q3_06UmzKd%Y8c}l{ z?P#@jWl37`mYR0yZlk%eNdt(KvPJQ^RIfR$jsRHdMB;+naRHlXZ~(RjogN-Qm0WmrH;mLX4*?bfHeZ5a6w<7MLJJFrYHPdIYXaU4vqB16Lj6{Gm0(w( zql8cvrcE_gPFRqsz1M6t%Db32LAgU&9MeGLqRn<>)69g~{mQPAK6yI)o2p?H|Hs^V ziu`}+4KDT#?Eks2ynM+2p9W5hlNratXHu2LX3}CdB<)tg7!zdD@^d*X)|B4d%IY{nl-!-I*#lv;fj_KJPb(#za06h zc|ledRYK^tYW2our)7gf3rT;dN}_K|+rT@yU>2`ugy9k^6fRkBL|DLDZB?~uH~Ki_ z#nGqXR<0pEHZ(m(Us!7oFA8=$Zm&8rfh(F)Fu?jijFsIPt8$*AISK2*z0wXPgJqaM z6tZ6e_&6OKrUYf6k!44);E=`b4D;YK;J_#(l1aq}H{k1W1H}SY^~e}8QX$+U zisY%13{gptIB2ISRO^r1)J+nA5UV*l29zI0{Y7TN<^85fIU*OYH8Dg+ZI3rZQ_zmp zNY6fefObTifeAFi)af_gn2W^Lzg6o&451Wh`b`U-wjlr;%x!D!OTLxqaw%NhEo)Kp zx=i9rp0jJw1x>siqI;y2@S!Os#1Onw&`bMkjVy!FooSSp)Mr_WPx{#|uBQMXs|Mm~QJsZm&_IhaM1IWXZ+y}&{lwg|2 zY8Xv%;!7IDgj1!fW?EyFmJ^#>1O#%Cu41aJMsB$wU|AM+!zHyyGL8DZIbAJ_JeEQY zoLHc_995VP%}7D!kq&CuXttKE25V{BI(Lkq*a zIzYBPvK$rA=!YiD-@)b=Gx4AS-&rJ)^3V<3Qq#=2jL@%j^TRAnvvIx z>1b|sF|;cg(LWGZA%Gbx(V9!^O|-<-KpJke!pex#VO^ujqh?}6C2Z%r)fl92LFJ}0 z=81w)=4X7%3bs>Ht~YbEbop}0h%BX<_gNj-@Os0+cC1FJ(}s<&))liOw)3d1&VLcM zK#DqQ#UpaB;Uvi>WXkzc9XxDUBM4~4np^F0Lnr|z9o%_fxZ=ft2k0_1YBa=Xb(zuy z&2yo85YaB8!$FCJ&Gp{|t3S=Q`}dZo zV}M!+^gu=fO&|o4@l&El-UPg{Vb0kUxSSd zdSsw{ZwnUXdv}&b3jp`FmYKRdIyIzgqtm&q7^jhqzIMPY9%xt;IR2ZFGgGs^VJ4c7 zLZSL6B-z(KyU?8-NZ8b`}(0}f3HhP;_y6$oH{%*6?!ZJJ# z0|})NBwr?(y&uQ6A!oDQ1gju2~p=^k|78jQ4N=1y9DVl z;G8PG3k2LebC(#W*&1zK{_L!?Qce>zFBpcH&Ff|I7IvAtLBc^~Et1FI(n0WXQ-^Fj!S@G4nZv?QvlT(5QIS!5Q(omDF%#rd z-(kZjDe-A6S|8T&*dQ=s|3_6;+^f~cwgdEl{hv7QKeYejjSD#cf4Kkiu?_Rsf^@@% zD_eg29T;Y(#)4|ugHOk7?igFcVZxUg0wd>tceh@vZ#Jsc$Kw+0;rxFr!T-bg#fyuF z{y&}u-W)LgvB|zr@9y?CJMA;)E`m4IAGmo3(Ep1U&KvrF0Y1R6@o#Y%2yp2CtsAPHC251pZ?M6KYu2X_(im!JZ`Sw5Pa*w*`rx#zw> zO^KDcq&EdED)b=Y6#@?;^)$2KFM|(vBVWbGUB*{0antIj?pvR~g4S65L*Sn zjwklgNK5t;{q+6h`-vYVew_Gy^au4NSH7_D=3L54_SU^(s2qF6(D}z)rjqyle|S0L zI>gKS&K0~gU#m>WF*5Zz@>dCG0>%F7ovW8_yxp!re70-nZnTzDugum0!X z{9x^e-?~;WFc1Ew%_oIl_gDD(G|{T0r#?%4{_m{BCy7t2Pm-UcK1qL)`6RoZv(t9w0Z8X^$Pcs&cEzi&d>T2_Y>`OfAW4}Z>l|w z{6!4q1nyINQ~i|lH~lFoW73o{^;zm0$k|Uie``{vO^rx-11UejaO<%5r!eeCR~%xI zR}8m#%~@VXmd68{MhJ5hpT)=hOMDT(ZYDp?qUyAQ=)Pqq36dMhy~Li?%Y2&ZCq7Nu z>HF!uB<|9E3wIgZrSNXn&i0e{lNQV2b&A)YTF#%~KG#q7QyUn7&tJx~EU^|(0g_J> z&nNC%#Ne2oXev{#9Ijih)cfILI7g85}DzILSW9%A5td zB_MQO!Ygwr=Vkmjzs0Xt_#o~4Hm|?NuNi)^@(R)=dcyEh9OjH1fSj|OGRpSSJZkvG|H0qfgg31#`TqV{FNGr|UPe}& z6d$JJmJn|;x`otxW@ziDcXaS{y;lsKVxV%E-Mutv;@q@=z{`l=3s#Eb?)c%!Q5k#% zScz@MQ|@>Ji%NLbRj+_XX~Uh@@W}Z(zX)Z4=Ugs)Qws`#eu|j{^28hGD89%=+!DSL z6O&faI-WdcojiIhdmL9q>sWFc=yL*}Qc`l)h3XvptZC zei}40BQ*0SroDBObQG+~PH%uL*jXVQxxG9XQw}&~-OTj!{gj#T1yHliJ}5JYMXt~C za&}(mTmcmF4@^oyQu0gzohyVyqF(?;P8d2j74!Q5G$0tiNa720!a0Ut=XHD;{BY*^ z;0zxWoCRK!E;vj4dX8Tw_(dFd$abCc{5prPxuOF08+>qyUxM)8LMl6L9OM}a8S{Tp0BDl>x;WZgRoIDJpwr6-3d6I42 zC+{g@w0f|{GsiJ3D^aKaG{2Gg_N z(eacCb(!_a{39YM2Faj?BYOo2zmJbg1W4pTmNC~}#9T{bZXLrs)9;f>=L*t;F{a!S zjPbw6&0`$nbsb}lTi>#a_mddel=DY2&N|+?$T;RPa+8oul4iVrF8Or4i;{0jqz>kr zVm^-dzzCZesUtiYJ%oSo03S5UWYj#9ADNY$uHMnXbE_gU{n6Kfr9zMgvEPU7>=$DfYbI#?iUw1gf&JlcFTUnXQ-~q33nZABK_@4AK z*j?4#b)H9>S5z$K5h7($FNY6;Gev<@LWh*jN%4ymL#VhA8o@MaM}!Oo80+QV-0Ild zt;So_5!{RT%3FEsXfmHkrV>{8Z}JR;19Lxl%uky<;a}m!FY{kS3ps22zgDZE{YOoJ zkE-~ajvt`^FQ3=*e;42hbot`Bi?IK|Tm1P$`|s1h8|wf6@W1`x>Oy)z|L2Ukf{#my zG|>MG)OkL(DoML2YM%vLz=RFq1kci<#ZRghU+4S$L%AUdC%u7g1N|Hxr=9^ z^`BLn*|~*_XJh*R*%33(%r+1@5-mc|r5PpxMdn3(*sF$|vr#;CRqopngKxG(e6UNYX3b(Ls*}4#}a~O$tg?IlvI<&f(EwSJ~g$T26S#SElL^Del3%Gen)kms`Z{@It>6@pg?yc`H1S41fk z77G=M*rj%oK^-w?mX|S==$$}y#xy+*UVCp{1=8ZnmS`T-v++NrDg|>wXoyJ!(s1GVf+C`>gjf~SG{E19 zp!sAmTa@|6y1ycSBdAb}|En*6M>PB!81B#imzFLdHVn@Hhx~te5l(>)`Tx^EL7)Hs z2>RPEq7{-bU%|)yYkUR#pLYM`R;rRxn|~{PooShtzYf54f?OqUzhY#9g8kdN9#APZ zQg+Fnw5LAi+Q0G7ED;$t;>@;jM1-zMo4W}qivVGWsa8;CN6S$f{Z(#}OPduO4o9t(tpY|r9_2-Dp zcxw{&fj{r3uo7Y6I_vyvlUmT!h_*ioB$!;m7}MPWa6wlWgsFNt|MxIyeS&*r(&KD)CNhby}u|hh&Y^NL=U%wuSD?} zjQK~cW2roBNc?xyN+%#1LP1X_U_oMTFU?b8b)q8wh-v)Dl>J}0=mD#Hkd&R{eMosW zB)m3cVJzGv`YCxnk0lxZIh2teQmLTaOx*|m*^|VqjU=#ylA?uFEImFC;waX{R4)TL zu>fl#lq6v8Jz#DI_?SVRkhIl`C{J>Nfti~HwKU;bQE(8JC|VXO_~D6 z{YPNj6s(tAOXF%-2CHG2{uJE)CfLu-D_$^jJ}G5Sr8<6B=GG{w724# zOCAmf0@pHuIefSlA&R5uCW}>hN)2#aCkL zq;)d&qIEoV4BCVu(Ft6|(1>r;iFN#AO*Bji0{vU)K#zz(AqpLRAu%3-FtC3jNJNz_ z1$AYL=+d8pvXOzpBBW1LlIixxK#QMA-Yn2yx}PPA?q%97AW-^N()~BRJXRsCKVZ!u ze>c*5Q+r4Dj_y4ZEB~03f2ZaCH<{)&^7{l#IpK(Wy{|fVeP+XJP8j z0pb5t5PkwnzsVk!aqR+-^;zTrVj6^>l4r;9Z2IOA5Z!0{(}MEYE$~@(@45bS{o_D+ z{N6m;N9$p#&HtI@eB65mDMjugK#9-wb9naCey;x<+V@<)fV+QTavaedkty&Trob~R zYPC#ZBt?-GOXj`v??1u`R5i%zYKl;>==|}H>ii0i_3b~SjtyGx#hM<@B%+?LZ%w}~ zM`__*@7s>kadb&##8lHYn~QuiwDaqsj?mM$X?1_yOV8b;;5MZ!8Q&8$N8u&6?%ru~c;fjVn z>L>tZ2A~{)n}U(^XezZA?(z5X{y8ciq^Jfq<2O+-%AjR!Ut ziT(l~T@TJoj2TQ=k=QxeZu-{$p?zEYROd zJHOildXYyWjpjd?^+X`kdz$%lDG&l#-+Bql%lJ*<7ivL2+s{3pz;!-EO{t-P8i2%r zc^0%I+0V&xa~`{SA`A85JN-gGzs`*$#g&L_?*+|Lrj%| z1gAvFONKXvPnJzR|KnR~3ZKh_Qzy1jt`Ho&@>19vG*_&oa5xBGclfon%&TP?U)kk@ z+b$Ej%a!E4JNRr3ifO-#zWp^mE(b7?;Rc?3IxmEu%Wo%eU$z|P3dlPrbuiKTxB(gG z-Zl_#YKYjLmD7f;IzK$SwHct54=KfhG!&+{}F2aPxy$TB9T6hb=YZe&*K0l z$UIY_%#*-#|C0V@S5{W8U0VV9e4Wp6iG|pL-be~=WxJCkg9fii8s12*IPbAUq&^!n z%5sdngO_Q&sn`z4$_pbFc6E_bl~uDU%w1o)(}y~qE)AUF6~zEyWxCK!av``Q{f?ks zV4@hX3jYJVkS7ZAlPso+sbpmBck*~L3gY z{vBZd4bB4nIu_vl?Y~$+yD0wOqxRor?*Avxe&T;$@%Yca{g?Ch7g1o-M0f=s*~+2z zU$OOCEzA5S@k@@C3XQ2)nZZ)2*c<&N(}6h+TPLZEeEq8SWR_6_q{R;E_%@C-swN-7 z&*7gyAUgls4E^4EoRQxZ$Op{-bIVJz{u7D+zKrX``TsOfln(o^cI-x}SG(1KzkbAc zEpGODUH8)2vk23^xxI!hRa<7W0=Ha6FW2Bp}<#Za9Qo4%g}8&-<8$}#Vq)K473(TaFk=)-(fxP zHrC5}q?^EKFTyWcDSWeykqRA(e=}8DV8H+$uO(wz#4|-v( z%#E7mg|`Mk8Lyu?M;3;PX5N%8cY!mn*`>8z?3;gU<~0^7%`(0R02AZ_E4p@Fw#;4w=Ib~Fqld1*EiT~`EprwTaD(xRdI$&}q7T6}q6~+*&A10m zM@GFq5D(*MsSvLN9SANH@PZ(H)2#zXdv8U?FNn{9pdog7uJqAIk;GET?$oy#gPG3# zzTIl@@}1rH?DEVwOb`-EYB9aO?66 zWQC5?n<;HNjrGejKIPHDU+dHydjXyR;aRk&WN7`2!0yai&282f*0!51dttM;)taH# zKy=}<_9N`#n|AXKz_^RBpfj5iDCE+`#l`NBY(|HSNpV!?)f%8&3+~dh;ww z)?2`M0kaj$WM#qu0f@Ws5f8iwh7w^~Sm>-Ye)e^y!H2nSl9C^I>^81~nzD0saOBKW z7ZkMBa1jqhdvv#H_ckvrVZTIo=bL`_mU#nrUhq_ZX>q3HI&};qI-8?NB0# z9t!1;l#5{RHBiQ}g(xR8ES>M(-flGRiEG-(TZRnXYWZQqZo~1=ZA2^Y;!a>* zNFKqx8PbC1CS6zJA0X6HyV|O)!9y;u>Mc6Q#U%u71l}Q&RnK&$)n%;&XoBn9 z;GIxx+O4|Xn&Xn??;@_egl9xhMz%Tbh&P&1l6SNz^QB$AKnbhx@w2LqDp7gZtgt$Y z!CPIv!{7KT6kMB>TX(?VHX7xiwWdF8Q`2T_j<7B8-4wtYWg`lll+8%uK($q5N0WBt zH^`@DU?dD5t#5Xk^+uVJs1^a#^5J|oXawUP3!5`A4vO2Ux($Y_XT^HOs--P{{ozgI zEg48UXjEF$bBn1uC zQf0J8x0M;pa1J-uAPFFXnt;);Nz=a-TO$28NN;ouzuQB(O+q~P7%-rZidsW8D(VT< zh$uAyDh}k==kY4pS3?kj4g_t}iUt4B00Cfp3XOjgFDd!tq}L74;(-u;GC_(k!yC<( z(np<2=(=iQ622pxUb^fKXv>!Q@(!tsULuAm8!!I6_05-s*8ut5*hOLb!NAm5XWso^KHb-KFH1i=`OGCeg{)wLe)SA%4tVaq%X{q}+EfGws z;Z&Xq0i=1VyI zUT>`7U_zu7(V`K0S}G2E^Oe>*qVj+5H)H>WyQe|_Z?OL^AKHJPh5`WF1w2<82dN9C z-*YkVNqRVg?!x@yo*}mdDPC)WO6@MFu^5bjbg{SYD~vQViIvkl@@=!H4>kIyF^Z2~ zM{NF!sdG(i0gqnd@S6|k|LqN&|6P7#>B6D^w_!*={wL$cAND>hXpkXbec;#h1bV>w zAISgWfc@v<(#6B`-;aN=N0bP{UBZT9SovX8?yUE^t!>8V?ltd-_hPJFff@KGtz{|B zSd0{LxFUGKS{f}$!6{VtasE{*aGvZw!CTlD40!-er#cpkvC2YeoVLE=s z6sqfqNE_+onlf?ZkACJZ&$2))eJLo6Hf8{J2KAY3nY6J5?ZIONaB!O&Tb-s|tvhhU zUWK&obh^7SuxjhDfTE+L_NmJ|P&cS)M*ysQv^We~+wcfogWfV`C`W@7Zzy!IV-WFr z^>vsqVjaZuf$pj0ZhR-v@G^aoW1PB8+VGH-&rp)NgZYpayB&<*);85)sa$(EVrqOP z9*v{_#uvzAfx$!Z|BK@Pb$Jo~pLt&3iT{1Y_kSHQ{;~a-k17W2&;Q}|<-A${xv+Hp z{J9JC|8o9t|KAhGe5qi>`p-B9ulk+n2xR*-15-p?2wW}sN+0NY=)D!}!@*J#9r&ra zYqU$#vhQRSjG%Fs3=`}HK33YjZLH#uHyC@4AEJSewAgYal?*fDixqsPyhL(uN@9?T zvMC>r{W?SDZFnhI2aJ@>jdqnh%FhN<8NCy#n^!Qz5#icHZ;jxO0Sh*4Ts$2PJr&1? zViX=uVs)dF{2oD)`NlVfUL-$WYMePYlt4?X(?eUAe$q@p4Y=C204t+15<%m^~ zqjIYBz3-Kl7ngo#VexksmcJ1ju~Q{r9;2qH1ZBLUQtmP9TI?B{t81UtfyGZV=wl0k zoT9!}!+!5ykncnlEgCb2d~d0kI?>rP>%S=xU1EJSDeUF`WnwuM|9z~3o!);lkkkDB za_$Vei7ort;c6D+%!hV&IGY1emwAA}~e7Yf2YrtP^dDU-G(6&1t z4q%F3PO!Y9awI7V8ZQ2B%UCfM%^(Ki_}bAU7h@=sR(XqD_xTtOaj>W?a~Rx6#;n%Zfnc45_?E= z3#AbZ&e!Z=Cn;7ooXddskmFe~{5RY5dONtVYbr7`IbIQ}1U?3|XsDh+Z?+?<2|7e9 z41?7xu}2u-PHO;@0t^a?68A;Q3mVbh_yUeBjtnDFSPIlk_|q+$3jIRSQbmi%TtO|u z!Q!*Slyq7MT^u|cwXLn%Ljh=@HahhkSsRv^F^uur=$Uxm%7#YW)HX0TM)2jB58M%~ z8M9#@KFrkdtQe_d`K`R^haVkcrbr#;r>79jtc!gJ*f(?j%)6!QB5(F&F^OTD5#fF!bT)@faql0T9Qtv;cwkBc##E^=PPj^}sp(7*KY&l1E2X zGN{1w?`TvtSw`X0uY<*cD5C;u0#8R_>N_CarYWOis`0gbke100Bp$xQNT_ik5%K08 zTu6+8bS&lv2)Pro*U=PH$l@rz^(gd`Q1Jg^q*zsRIBVwq0;0q`46&fc2Kl>6_@e24<`L8gNbO89HhHs5a`n37Z&~Xr7=c=m7g);F0#%X~O>YKe&3lAm@K0=l?D)UO3$U{p*1If!`fx z?30rn{<>dP(V$GVOtd9h!hEUL$c_gHp94ijPey6sN?3G%n7@%58iMi(Rtz(X*3YN# z-Yg3{p#{NUY7o&t8XIRdJ{dEUOW#nVWUxRP@9M+$a_TLdTKaCET=b#1%u*z%NI>YF?0)MU@cb%+O}1g;TX< zA?XiQN%U=L8+a$3PR8pQVYn0z!f}N+;&nI5{6k(GeHw1%8ZzvJ3T*U+wf69$pm9Px zA^|7h8&WX9`ap~o*38COmGhJw_P}Ae(henqWtcw|U8HF8ZCy;{79V&zE{|rxA&cV~ zGs;mB9()EI7=?ToW{UYD#tSHwW@%;Hb8t4XY4EZ-}O#9jlR^efR+Fh&BV@9bxM9 zD-TR+$fjPTDMgxo(}JgM2pS7>+gkgQZ)LiSUFf~~rf37^JP}S&5#@Zzb9Sw5c8Re@ zyd9!@q?GWXe<~?jp+W9Q{sU_@ub^rvD6XuJvR;A4z}s?s8uQGp@mk`>fFALK9DB|U zxi-#;28~CTjd$eBvvPnemK64SXyyaR!*jVbc9tv5hf5hvapFrF9G6o#G$3bSxk}54 z&C^!Wi%3^7RaPUnA7Wi~nIjrmQi~+hsNb8@)w0NADb&D;1)9rIh568o6l5OhpoWc> zvtej8SWD9$*LFE%7HNr?ryqclsKO9wvdH&Rnt3QCVKasFO+mgW8y>=g&98wr34!Luwn^$G=jPE%b9e zK;uH>7)R~G90wzrKMaEP;fv;KA6gjZ)d5oOK1|Vu^3n12e%baTvSDE($!(ZCp72KU z1d`U62{?urhb4sKo)LjV3ilNw%TZLs*0oX6-b0h+?_l$bnfxL-V&g||Q%*+6`hBUh zc2mq#aIq0^KXA7wI)?is){MNSGU(3S>SAbDin&-MhB2Ok`+)gF&YD-Y0RXw>N6FH^dpc`g*^BHBfCI8ga2 zo9n*`R)3mp_wPrQVS_%nZz)yR7w^#M*PsnN&20&sw=?}H|~Zeh?!Od%3Pf(8+6 zq^!0ZI1c6Z@Y~$jgu^)M=BsQO=$DYqgB4qzWb0y!wSl6JLFl*#}?!ERP>wgN+P&WT1?A7xN{=yBJ_P z>@WTh1B0o{qfv$-h5mDQv(bYe=TeWW_jjAE7M9_+VH)pjJ2>?1x`8OJWtGAe zew4A+fVUwy%4=YOU&Niz@sOY0U%dD4`8(*gYUb?+j*AlN5YYmo*E7+>@8;A>X}vuY zidcSbAOOfPtlXf2Wnu$?7>IZ{w9Uf}OA*;Fd~rGYBCut|i^~HqB8m!L!b$YAs450k zgjNYsmg6Zr|G-zueEanha)#X)g&=YU!=PT!S{~&h%0pZ+#cc7+H4Ed99xXA1@g5GK z;K>+!cqBMV8KF%^P5?wZ8TmY%H>Q2xoiF_aEkilO8#UU11kDSEAB8!Qwcj19->X2XLHCvoJ{_~UW1l(>lfJwVIB@=Vck8wKW}{kV7{y2XdpQ3e z%kckj@%%F4KOOr2cr#?~BR9)k5qHP!Blc0bo3Jg!5qs_n)RaIJ zL2n9LROms(D+C@y>I~q;KZd5|Zse=@xQyK@F+LH5{qtAQ8bmog$ylcd1B#%HX~yGZ zRNniDR`#RBj}yO-{-D0(iUuxAdCA^-EUaZQbpA1yspNhCA70M54ne3{<50v)Q>U&> z$T2c?41x$}j5hqMcdlN#@piig@!77OyU}W{-Ef<|#+fc0Ya&9Cdm}RQZs-ok8}S*2 z=w&BHJ7r++U4~IZSV6?>br}Bg`Y(U}o&R$0_|O0Qw|@D*|LTAK%@5Xo_^oU80`uUH zaS{1R;p6@aU!P_r9_A6m!z}bH+#w+9A7>ay_EUTlZ2f`+mn}+6-khC8)cF)bd;dEt z@k!zn>yzXssZY|MWIoBR=j^nd`8dBff#-adT~8x+W)4p#84wvEQdve;V%DesI3>>} z_|!s__XOhdA|@~5MO&Z0jAw`+eUiBVl1~$i@lFg5yU;!{I!8Z^uI03A*U_KC zEkxWBmn7bV@7w~1Xevc>19ODuU&AQ};(M~oUb5Zxa>mmMZn|DpeUu1_dCzcOB91}5 z)1s$*Xt&;R;5U59wGi2buCNidtJ!b?O`g!1%Xrq7XWgkFz%>HnG^$&{)g3RZI_6wX zBExz|1{`=uJWtNEJ5YwJTfx;G%s6bx^sF73WfH{G%ll3b7zxy6gz-eGgh=5GA3K>$ zOIYg@*1ea*5np(-)xlvB_pJs0)nMp3M~EOY^snOLQw$`+ePfV0#4qUIC5LhC8p}kp`wGlm(uMTN}Ko1%*IA#Y_Tu3Hf>y zx8x#j315kcNvnuZ-p8zyM~`J0)VpXMOHKoQPT*5YN?h4kkm(@m@Sovr6Lq)Wd@VTnqN*ufHB8iXUamj`3Y0jC&O7(u1YgfD=aZT3N#K`e58 zmWNQ&LgxygkbhuO3X+m%3g}!UtaQHsjGQoZZYt*Y0cb!le!=jbpcBq9{7U#;qx3zD$FYeDKIUOmUJ6Z|5MI}$RTfz~DH{9I81`wc$0#4j0zZ}N&YI&bnzA{_^_ zLh+w;R)g`rg{v$0xNKFTFqu4-d{LlpMu6$T2oU-l#jE(Z{{den#&ez{U`C^W>s$|l z0QnnWokfUs@Hp@?2nZ68J11POK(dYzW2o;mMr@OTyZ4IVHur?rWB_sUuNpr*n)8aWsuECa48@|<`sg|l3pR$1vNLN6EkO;;DjR@45nwj zqvI(P>M~j^R8AVG7$k!lj_eg=`F(s`B0wS!vW&U*BIa5ebL$x9nSP&4I#-Y$j4@@F zV2uAgZXV+puj?3d+z{=%h$!C^81Fxlan|wHpzk`4d5qj7B$K2W@1ILP9q*!y_moH- z%s0h+9Pfb+@gql3rcb+Rh)b>2mmQRAz@%4NpJ zM;KwqZFl@IGs1QcpW$H^KI{)PB9AlYT<-dHhePZf!56}e&t>p{*SJhyzaD%~dM48M z^GJ&Y7AKZJKwxo+$u2UCMh7P$5loYIM95Hpu>*mF8A+UhgO4WjnP?1f2;gT1<9bWn z?-M~Ty!j>mi)bMxgsb|0H~wF%)zJQ<=D>$h{7t_P(EpduL+4*Q2i^bN#S0hCQ~zJa z-J$;fG|-0nfAzop;p#$qK>tsn_AB_flt=^pzd)VmW2=(1i=y^fumw!m5KizcEn57f zYVmcx&p(tKl5o-+=r+*L!4i|7FNi*r#1jbC_ra!d1-2;Cl`G933t z`_^Wyy?Nv9Zueb;{=D&hY85xGH@3Pry3Otx5xrWiGaI$)=1$jLjw$_-splI2%oOYH zI+V`{LYH=!1T>i!@sX!>Ch?ESq@Gvtaes&}fh+_ZK=x7;vVIW;vF4yY2Yw;x*ARjN>(|-km=#lppr8_3@X!c-vuH>R&wSzrjx@P z4m$=ti6)9r4^kRU7>A63Ib2Es1Vb8t$P*M~6DpFke*Ur4Kv?)kxDj}y1Rf&LK?sE1 z%1-@Z5@I1C0<6P5boPx@KMm14xtGCHU?A`_xsk#LdOhq$(7`E~L$C+b`k6i<$D9-w zGY}`uyI}VzwJ-HE5a_b^{@It>7lKy^+#CoXS41mxsP{sLB6f+Pq%xo+R&wS%CFW8O z#w<}9D`~nKz!bE0Drvea;P+Nt2DUP(Xss%nXYE#sp+ADb0jK0ob-^WOC$iJf-A`Dr zKwNxTbrmYw_@7cfeiV5iHB^c^mw2jJ8_qeRD8d3Gl?13Yz~E_3%O{K3qRcnkd`12u zMCd^Nuf6~t!tifkxIh12T0%hPA^s0B;L!g66mZbz|37+VfdA7jas?mvukjV|f7<<% zTd7J)ZT_wFb*5!n{yG2*9Iad>Z@*$>f`a|qx*kv|Hd1!Up0uYv=Gwpn?!Fe-|0i)b zgO!70k~W3AQ&=lFE_c(od)0oK(#&NrAu#AiD_Hus(yFf|Ui7 zDk}r9%~2@HLzfo+1am1bBQ6TOjJN?1RtTwreCx?c6*_StRgjgEO}ZjOSaN?rzt}TK zYeB!H#jtgfPI?m&zXt4@S`I~t5+2WPsc9#myJBxM)sx0wtNRp}Spxn|gO0!zb<>mv zqs(7Irb&`#-%Nv#V-1RYoMs*5gKT3L`*~Q=VU1U*5hk;V7ceU8Ni1Ai`_nI|y}dd2WS zq+0#NW+-aA8hW8GG`A{KG1mubgrwQtUlmZC5$qAf;Vk2YTf$eOcr1AWa>21w9!4bo zJ8Gp95DlTBrxP$CF}IiI@v=Hmk$=RrKvw==xak3_dytf!<9$eZHYB_@WMQn_B>E|N zK94mS|2edg9#W~H+)UjE{@Ihntc@hFgp#6#RIEKd59%o9#8fW>Ik5n9BD5r6?mb{` z2KbmkoshKEj3`fXf`OTv1vMGsSy6BhrYM>tfmazUZB6wjVNRL?#{EZN+!V~0SYrh~ zP60#z2J+|>@Nuf2bN;JIozT<>K62q}YDKKRiiBBEB)BB-4NsJka823I;XvS8CNPH& z*JAYLC_0LPF{6G8OE+GcryA7=D0W|>sPh89O8gRQB<=8AVN(0iDcqSw05Rix6OnD> zUe4_A_i|=8zgqbNbaG}{th9^bqLEbz&X=SQAwaA^QIoI?y8`P>4cDUr3BcAPnrE2oh0cOF>kn8n$ls0h-qhZay`y{2 z#L7P=cjV!TK*0CtMxHm9+sC2qk0nb#y9}As{a7;aS+ab3ph%6@;I_+HbOl zbzHjuWPKKSfS3m1r{vjjJe$6G1Vs1Q{m&w9=|t__R)ISYV&_) zIUo0)K}wO^2vFj4{T!bCw4dughxR?!FW~N9m>fqmM`Q{-hbi#PibHdT>IhV19iqg& z^Y1^x5metAFN+(4^T#`?^D8{pxBrYf7OuOfAs^zHSwWGRMAY;3t!cz-gO41nzJA+r zI*zWXjF@W9S(d>Cno4NscVya~(6yS^m|>A$OZ-}9*2(Bu!pelX<0;?+W?*v7txUxZ zM|#$#XWjLzdL<)=Z^iUHeulE8-^0g!3tx#N{tQiv@gvPynmh_pKW)ueFGuF>G*UQ+ zvtR*Puz)OC0Luyl8&Wbd0-3uzxWL@C4Dp8mV2Ho9#~Bhe5u~s#kj5Oq=9U+@BLWNl za)Kst^wZ7>h?u}Ko1N$85t!Q`I#@k)*f~wgXeUJw z0WMPDkmv6RDD^o66s}+P#7GLT;Tj?cFj$@`DD@c)d>O808c@P;MMEEz6o4`VP>#S& z!AKHFsxRE+@8$i2R6a=aB79aRB1hf^fh?p*XwMZEW$-c%-^xVfNZoi~vw*+A$0dDD zwJisQbPeX0Eq$fENDlv zpOf|GJht;hR_eod`h|XeojXa2D-qMSZk(y)eGPR3y^+6rSKfZ$gHEGQF;xl@of0K4 z8Qv5=SvK|jk8f4d;(IqE!!80{r5GL~^9tYIspH@acLJrbIcTm}N#S%5zV7gAYnfNe zGQO1*4mRPo%Y^Q7C3)`-K3jtn=^ga$ukmp?fQbxu@Z{5ZA^co_JAwPMfeE~qiwt27!?K!M_< zzLMr98i(5?Jj&@PW-gDF*R7383I}imoSyYv&-#gHec%;>Lp&qp-fyFn|AdcAttgQ` zj)mB1aL?laCdfQfq0E!Oa|4t9W>;2Lu3cNf%%yb1ORU5e^hQ!}EBl=!88moB((p!d z#d(`0BK6srQI=y=c{#Mw(#H}`FGqsM$=XI`Dz*i(^1_HWbY)Uk6wIS^t=DQ<<~NC7GOSc+OvSGHQUS*g~CubE{)-w;FFz-E+BCNIrs}!#_e`-287d^n2^qjr^`KK4AWz zTV9IJ|3#etIse9C{D-H5pLE!NwPQC*z1pn?{PjDXt>R{{*L5$QJ&Qo#o7-z()mvt1 zkn5|_%e7W#?d%p7{u@s8abrjQ{GaV@_i%Os+pN#p*o|%@zQI;_wb1PDw%3Y3Y}M*; zY6ZUmjS>Y_+D#hmCN`48aX`DZyC6a5<(T4oKiutYcG_^s>tfwu*FTsjZU~IwVZQX< z7EV{t$FJnUIqB;^_`dHpI+P|3h*?kLV;9@pv1sF#H=J5`6Z_lY1B?o$Jgw4!&SgYd zP!giINoDB=* zQGbOKN^dXJdOc?q&LYr{*$)>qAvh+^yaWVtG8TcRnoNWf9`wRmnHx3B3vUg8GG0G( zjw}on&AcgH?gD3CvrB8c*f;;y%xf%Enq>~72Vha>-R@@RZdJhefTQ_A)7^%Ufqn{f}Cj*NPJARfliQXyUk zIuKkY-~~bYrdtP&_TGw&Ul3!X)v4L#xza};MG{LTyHnq03}!m_`*y3r%XfC)v&%Cd zHSN8bxrOdpZ>NV^KC-d-Y44knclY*!n>Wu6*866w*}ero!L7?PkQF*kZ>F^AG}bTA z_>@Nnf2~tu(sW7*$bP!t=0@_1-fuq`w@2WO}lvq zVBAGm(3ws8x$0fQc7yKDOqd0L>BY(|HSNpV!?)f%8&3+~dh;ww)?2`M0kajWWM#tP zveW3oM?CN%7)pd`VWG3q_}SN)1|R0SM@oL+vD>%`YDzfm!I3jhT~N?g!$mw4?a|$) z-P^phw9FoT)9>CgZ{W@gp6V|x&XioIj$uTH_GZu>Eoi9S#1^;(ljYp<>sCF(9<%>X;}+li%Gm>a^a%3-RhZ1X2@b>+QD@NcbE@k8^HfLT5b;io+bp$ zOoQ`!H@5$Wp?aT^{CEER;)RI*r}7_E!bAP6KTSia2|@ZGJs(pl8SaPU$0I3$nY-f(Nxp>;Q*|3W)yj(Xid9Qn{QAS2$Xhaybke%&mk>H@ z_Cd;2ztk{w3~UJ>zo=Ma&{Z2v>@`gPC{qBP7TM1r2_quVGf6>1wNx3c(QRc0Gn~WC zHAn)8peA56Ow#mk#g<6_4bmGO!|(P`Zj%ttJq8SlVxwwQ)Dx-^QECEI9B83Fk5|dQ z8iEjXAZVLbEcgf9768Vl(D*m;l9EqOdfo3V9thzl6Ql?;ywPkaeblLhuB#R%zOcjT zrK|4fyX=@R?~uCaC1RMe@#4>0U;W_i@4xqMM1dQ87c}V>DB9A9GTewVIp|1l_V?Pi z+MT;?-?3+j5tWP|I*u5661LYs1RH@!+nMG^TJf73t;+ZU*M=@M-73$1Zy>G`5PA09 z4ix5gr-ul6y{5aqEA*2=Ixt|jx0{a8$Jqd_BQpR)@tK3smZCIZkSRgyGjeo@JB)#%@-zIv^~yEJ zL50vb4V4V2s1i{Gdn92)E&wT#jmWUuY_#kWl2)6Sniy&(!=atmO&7MBg=%30kK5`{v=YKG?k6d){ z;sNVF7cRUpJpbP~oc{s*p5%qm{}m0mzGGHKbTW(Wn@{2YQCzM_PT^#>+R+;0-e#Xf^PvD1F}hsCDjV)upt!_0(vJ_raLZO%>I@-f>P3sgdL-2N z?QNPjb|7w!vd_@H6sgRmKB0P#35rA>@TraT$n5@?UqhO-v-{9LP@#7v7=yD14QMF5C?ox(18U(T7Imw`OQ)P{#-* z6mL-W6${)PJ+f1U3Ew5AhC@4KI1AA^uG;SN+A879$RL!&{n=a}p7p)E9W(RGN&qxO z1C~Z<@QNa*X`f#fDMQ)h0^ksJ!lamoz(iA^{`i_6y{ltk#m1U3R%<)hjkv{0$lNXv zLx>adTwYxGhTd_H+2!+QhVV_d3X=)vCgW}e1#ra)R`5ooaF(c`I|aq61(ipHrFN%% zhVzjNd$ucsHB{{C#vRFvA6?Ro){4R0xP#CyzGr=-cz9>hSxKtn+@X z3ce@?QN?H3>$$dDHQR0p0xnzH20*;mjxi$^SEbW2xEOd!>8CRa@Gt5yg%0t3-BH5on9i=YpZXx>4pxr!&lI84OkPgLcGr-@de z3!etdi&nZ09hxtFv=@}ckt;Vr&YcYqjCQF3qd&ulf`-K3B@vMKS0haa>_0?#$Ql&r z&tEkdxbA|p-B!KbbYotNF%#8aYetsw05fG?^MOA)uRliDML>DCS??VrnX(B)eT76YA{?O_4IzzbyqMCVOqq_~+k=w-~UbP@sg3PccltP{=Sq*IT4dphJ zrSKuh8(THg54IC`euAc}1cbJ-p+ik3VuQ+~Hd7j9q2bH{g0pzdkE?|mmT|DR2qEU3 zkVu)<`Z+*;H?XH9rR4L7qvG=W>{}+yPFZ?I_Z)R*$`;#%}}YcH=K_y zp~vpt4QJMG*sQ+~h_CQ3@>;YmpB(yi&2zN5_;fZt6g~ro5Z(9*JL4S4z#;Tk*O6TW z7}X<21PAJXNDFjKaJLfNa5ZG@eb?+v2)i$wlbJyTfeoTU2ee-j{W_8AU&b@AbRE@qdg34(yz;z`6~fLez3g;T&Wk zgMTxuP~;L=!MEG8(=MoA$XF!w7n_Q*F)V&Za7U;P^d#D4=}^4erkIFu##W)oEo{g} zi7vHhRuq}%GWM((tB>*8U>Ca!LL6BZ2cQKBP0TiGmT^d)8aX~}#Ho_Zq&W66;f~17 zXaK|;GjOaUY#}oRL~iy}>1}7@hq6261KALLDO|smoFairT|+dD=~P^9QBc-; ze0qwiNNF$0cR;*In{UB-z+Ww5_11?Gg3coNCs9KDIhb-2b|?l|rfX4=cH!(OC2eVxFT>-$s>B9#L=?TK@rVNk+|~q6 z?P1Ign8&fRjW$Kb zfhcEi6Cm_K0;utb!DVjQ3N9nVs?RW0O%>+;b(1+9;b01l8r%RzI=tp~&X*2OYpG)f zTuO%E=+XweRdaTQl0wV@?^M|^qVq<3s1zjjrZIENt8yLbstn6|Og4h(F?*vOiJoZk zL`4>ZNRlG#oScUjaXJHB3wsbxVhK7er4S#v`Plz6$pQ+XJ){58K0qD?U_x*Km!I$9M_v{0YF(QAK?$gdUn%~W}S{*)B$rr;Cs|GsqbV&wdX_@H z`_Qv8Ro<*Q|ZiczZ>_ zFn9)d=HBZiI4i?4lkWfM!YYOR6TRjgh&UkUx}=F1;cm0mE`Gt5t_1FU#H&Evckaz! zIu5vskGNift108n=srVV!Q+VKy+BntbXXqQgff?=zsfAqu_HwzPKkpfxFaZ_#K-S? zH9lwHhhQq!Hac4Iov_k1SuzR2ipD)SlLQzj=E^e(#+xDX0`{7m_4qXHi~f1)yD z{J{WfvT{%V`UnV?LLU0Gt;kDNEGdid?-j$%8T9d3`I_6@*y=Rx>WD1@It+6ebp^rh zhA!bNIj#w#Om*nO(Q(L5#TPm6)K?jNIGhy6ENM$p0$*h@0R9m9;qHn6JgGp#okVSe zBQ1J!r<*Le!|J7PW{xXhQv~eubP`2JZU8a7ekNan|98d@JWdeiLJ(Fae8@pvgNq;f zkuhNX&)*VnoBrMZ18+L?z$0#w)HSO3XQRU!FWGHYve=`64L1fTUUF+&0uY3N8GwgZ zBtogX9C4UV7dr`n<*1dl#KVvV`AUjo9d*S?&s-Q)y7n0T0B(kISkLU#tJRqM9=5z< zob8B<9pbFOU40qJD1~1fuh8)Zt8@7%jn;`59cd~a9RB^rArR&H(d#Jx_kDpptluBM za|h^u=PoV}#(!Bt1lU9V@5Ap&{`&m?b5P;Y{Qvu?KIH#f$%^=Y-%7(X9OAz++Nzyu zWh*)P%~uMD2cvR95fTC%(MkCC8ZO9o;Ey-CbDiEr-%Wh@y9jNQya}$2r-(TRS95S3 z4L^ZkL10wyUX80PJ&oSX!Bt(p59iSEZkwe8I$a4MZW0l$US!tJ9!fph?XrtiJ} zK6K;<^gdF_V7G>gP>aI1?>#%hpT=BKIDGy)#v=*j0r&?H)>SD4E=9a#rwcccaDd@u zm2_w*kZHZ)?h#z|AU71SjZ0mqc+;}uiwzJLDhR>DmeVglCD~r|thOU=*wQ@11mTRn zsDD*?+-{G4hdLi1N5&d{l9FwPV+0(sQN4dqdKm~@U| zoH^q3>D?{1x}j#$OqZ3 z*qdPZ8RCb{n2yDJFYV)*#G8z|6M;BFwq2{i*;QcXJ#+!`WL$tC$SP4B;U3a-0pcZ_ z_PuHd^(W%K<5$+qy{vlaSecvja>g5tBYxV6tX@_P^oLor@1DAAEY6V>I`p+_ zUJ84!{9rwgUnmJY?syYa?a>x`UtvQ{K=5hnRZGI+1=b?Mi9{3+9YF18$uWj^6jqLV_u7I1{u}&~kAtu-%a11l?JN+(NI-c9u@}AsD}iJD>st zqf8tAnfEdqu(iMqe|9ei*XkF|U4E~y^A{SA1;Y0KjiAe!e(@HA`zHDl4~XA*YZ4(f z;Jp7P1Tn_*pxityfS+j=AAJ zJSEq7hu5fqZ{^KfdARr1w+gqf{Dgr5;5eKSG$Ht9YOt5p(;PCso{qfI66JA0|*jLVHm&bZvD zWYy(HC9A$VJgdn#!#naEB8oicZ{Bx{BBhD9mO(_DM)R;B=eygXLC#&o7h^|GCK*M7 zpJ{O0V~8YqOl15t0u1sQ|7GQSGAS;(uLqb(tmI6TiRR!!tct)%iv65u6zt&yKGQx; z3nQ@gO*K$sa!FA4NRE6D3V;M;Njo3(H0A+9oZJUhgtC{ppK#ZESq<3&JVuHc+3ZS1 zf*Msa;_^d=`QGtSZaVJ0_CQ`VpqXXFt4Pd^JN73j8l>sRgOPrG{~X>x_31X@!QAfW*!}Zhjp3FNrV)mcO{X7 z=Maz&?mrOKmIvR&8~Q*yGw_Dm>@j}3p}i+83ERMi1b^hhSMYIP#aAOyv0(O*QKSYT#{a zU88giv_0?dMY+NP$@0iL?i~wi#t|IuYpYi41P%ZA8#Lh++|UX`B?C=J$u*=0gps3O zZ5m`2BPgI(G)@}bglL?Q1v~`(cbFCartVMq6ic)TQmn#^0cM z3~iy!LzBc5FK^oA*ZniHCntoiBPF)&G27Fi%MTTjXJ%c>MRJWw?Yn^#Ax2(9_rdYN+cM z(fUZAuHYjS9E`xM|HJ1$`DyyzyN|xNmol;sr7mu-K^&-eVK}1E1pQ+KVY!CUiIJah%^-h}V(%vrONv4N5LykN zESBP+{mW8KG?*In4+-RzUcri4y)G86%8>#8FA$x%iDn>(udk0|6O1~cK>#-6C8zQn zwwS2zn5e6n$}~;mp`#hIbtlK|PS!5O>pBhzc}chKSyi!PRal~w0P0E!?FkUpz;~2b ziHJ$MC;;7h690}-FQP_$EhZ6DfRnMA#|7XEjqbiBVHM3j9aWMzVvI)pqr{IBzpvtZ zQuGPh_`J}@r_!?*jaC-fqZFF1G_qN^;33v65nbg=Ht0-+o15Tyh7AoQeTkVnx*z3e zKaL~Pm@Eit`)%wgiuECdK7`9IvUg}MrS>+FfEk(>Qa{zR^Lk%o! zEtLu3r@^$Kq|H7H=+|)}pa;5()fJ(S5fIr70Dd_UQnD$^q;s=jM7t?nJ>!!7{5$lyiCMXT={L+GM%SOyWX|J=ROp*Np}l4sEk0 zC|xjgo-xr1TI-jLUeuh*RTvH32=)9`SRK+ypro36Vj3*5RzSixbd8D|?VzSZ5@O&+ zMtO%NK{vf3ZG< zSPh%Xb-Nlb_WrPyl1bMBPvD=V9Ad622NAlrY20bj^+?`HwlPx$LDQtP&xIlGuW>Y6 zZP0YOWhTNOiBql$yOL}{l#228Wr&I3q@D|)`4%n)7-1qzx!yUs43q6{J#~kQ*9bGM zq50{{f}toGBtS*r=ssc6JCR_Yi6u3>=YXJP!uVOg$;X0L<$va+Hu1rRQ%<%de=6~f^YUSH;Pw$V(cZ;~4W+o7{toXv$l$_baS48lDA5eY zZ92BD4P6U)pP=X*k~`IgqwG#-vFv0_LHVOZ(-yo^k;qC>jKo&zEixhn?4X>AX(I1E z9NFAw4G&^00M%cEhFjv`0kO<3+B(9A@Mak|@lDKRts+U^W6l@J%YDl_gUNtB3~9Xa zUFeE)C@WRBR`gk)@bW337O?GzfxrZL?AFCgzgF~FpJ0?`ai~d?R8>OGr|Y=gPtqtR z9NW*+bkm21LS*#VgLIAz#kfL@b^&?+i zp=U+RO#K%wv3GtNzX7a36B91+nBmWP|HMQ&ydlPL^*NfYP+x~PQ0^9eFS5JEF*s54 zJXdk;m9Vx;9H8J@UBq+ByJZZfOE5ucs8+bz3#+>-oc@*L;y0Kv-Yp;uxl%7-O&GAJ z2$G znH=fGum{zcDFs|+42+y)zy#?&{9qO?_it`3W5iWop1rZLcJoFPL0;McPX)-{nl~cd z#trQrFpE<3Y2w&7H|d3-ukIWSz@K3w_(%5}I^D?fvlZSFeE5c*$JXTAEf|kuJt-Ux zI2v!{1Y<_&qHfXSkR$OksDDGaTBPsSY6j~~M)0jvKZ_Nqr*Tx4ns?4rLXLpNg|OpJ;5@3~ z9-M*T;+9%JyIbV__a=4=upQ5{ny;U0jqR3zxQ)T-1?USZZ6&D5iaOKeJPNI7-2E@u^yowHMXJl<3q*4lvX`u&bzlJc-A*N#9YAbaB4txJB$J@k$}& z+I$7OREBr~{`g;yGhm@e3OtGE zAc;D1L0qJ&d+?U6W9;6NWv$p_*sHaKcYX1+UpumkhbYUOj_dNmWfRG|tOA;1ONwD4 zc@Snq*%D>FikPZ*6db-v90HF;QfY6q!+aW9OsXXFMI6-O56Z@ZE%OW#jOL;sNz`*D zv-lwW3f3?PR4(IDj#f=Ud3h8=NGpJnh-oFH#|mAgP(>ND*9*8-QW&D?*4bA4&MO+`QLK^n3eZr2wp{C zo}@7x(dQ&>IuTT>ggjEgDy?KiE&AkJ^LUm&Ps3HjXW@=FDsEP`9{M+0#D?YuJ9{Y~ z7A~t7-nTFq*KW#c2`Xo^TkO!ss0-o(vxg)%w zMd&AU#P2Y~^$9Io|LZc0c=3wCLG#16!MK9FJsK%ldCzt(RJJiho`r`N#;S(_1HZ$k zOpdbNYrEVs-VBg(Oc3%897$`GLdz_gbOkPxFEa(!;?a2k=AGbXu&gI|EUQRpxa;*q zLN4I~Ej+)pXiRF)0W31oYe;%%{b8a7M&H5$*7Y66t7{gI=EAs;^G@t{1tG+eljt)R zk-B6Xyf`3^F?zI7OKHHs&kq+%`AYdzUarANj-X~h2QqvT8_;z?9cNM7Z)BgiB(9Zws1nMU3zkhuDm(M=IyY`1@cx;aI*qVRG&f0(Iw>Q#U z1CG8sIL^v(4(G8ZT#oZN*0tea*}!q3R*>^W9IHi`sh2i#o243h2Rw(CV!2k{tkf!- zL$x8Ct29#U!<+-WQ}F(VJBIMfk=h8Z4A)M{sD4E4N%5{ghk|+7`{3ciQC{p3929-} zF&_F3WjGv(F|*)A#5lkdQ5=LcrC=^0+GClrr3#Z^R4T8X7w)f%r(w#$deaV#R44+x z=}bPYjU=OoywSA3Bj1?38VE~US~1vU=lDDB9?U(2VP@OyJ`Xp(UbwDe}9Xh>(iY6Ta_QD|K{}Hl;+dvzxf&tkoAel z#yR~rQt>1{}!1q?r19<^<)qGFQNZN{qLDq=bZlAQM3D@ ze&_VxunM!{^xy6`nv>BrKMz%jdffa_`fq6XX#bZX3Fh?QdgNk$N z=SQ$d|35R0G6(A9ty*||xqgqqYPv9l9%ld&~XG56Vc^MU`}<0k)R z;w&Ud`8RV^`8O^j9ie*IE8e)=O~(RZ^bLid--_C;x^_cqBjF-v5C_($QCahso{n{!i$? zO=J8wcV%kI`TvJGIJl#L|Iz8ctu?k*8rAKsjr$WSMeFq6-gWwK6|9VK`fp$|oB-9# zJF4aJ*8TVD7skI{tM83poY=Y9y4S+|=-1Re+xwN(JgZjs_UxC}Cz~sf`D?uqp4nL2 zx>NNU8&}5_f*P;h_8O~K$8Wbk zxw*D7ar5rlMt$OTYjb0qZFhUQewDim7b8K{*S@SaTlY5_czVO>zXkd(PXEo9fphwA zvXnN4|M_d@j*0x}xTVMV5BhJjar_74KP&@v>purK?6Xe}@Si6@|1H7|-An(?9H{?B zqy53BeMXFe1_!Jt%2&)~xISNj$iI!^dyJyM64=b+%An=nWc{k}YLI=zf)+XB%fA82 zQ294zk;=D_ow=+08`q&a`8SHeos|3=xx}sf8+5R|TV&e~&iDT?{RmUskk9za|@y3NUoCs_1N-nQu{;sH--lJH>M&Y|E9_C z<=@CxtbD`5ZzeX9ZiIw|YOP)6Nyf@& zG1hmQ#86*s1C@V^L@AViW0XJ>L`I^X@^AaIvaj-Qzh>^d$=w2XVQ4mkgm;^_$ruK- zJCuJD3L$BVDgYaie*+)f-OZi*N-fK=jxr&g=Mh`0>ddj~>Qg!dnIQb(9ZvoYnb7&u z4gU|su}%R0Pw2n-_`kFNIu}@<&j$GaGM@WG^x5`pz*bmy`ft&^d5n&`w*O;lzD2X> zVEl*rZ^Hi-`u}kL&*{HKvv6R?0siCk-ynDZ_RRt5znRYZZziGtW)1TF`fsMQ{+l_L z{+m#W^&83Zxz;8yj3c)hN9;aPWkmjsR|YKq#?f;(`L__d(oripdUdEN)RxQ_C0!$r zr8!9cjf|8rKyDZ(u3>9lz)n;&C}Vdh`8OMjK>ke(0HLyEFc|gAc-P*hG@^;f9w`4N z?m8?HYh=?NwN%2ZL6pJddLr^~1OUPLsYT`M)>-*Cp;68@?(Ib7-$H0MpU4d>|CY#* zD4c3(NmJmqeGz2jzWm!u%n}ZH_~C!%k(45Tadi1NNf^ldf0Me> zQ~oXCcnLj#Q2q@9i81*%5}@A`RX4mQV)AdMoBW$OK>m#g80FvMDSSBjH(~*uQ73jy z-BBl?O@o$yV{=zD<1HQ~QfXl*ijVm-G9+{VY2@DobYX;z$iFed;L3iRCq(`YY7vb_ zz3CAwQu(+2$?3oR+dBx&d!M^7G&z!%Fpmp_Q$zVTp0O{YEWX%YmoHIr+LKT`lz-#> zXl^|UDxho?3U}$#)80q; z%*#xt-^+^p+voUhbs^Xif*21}{>^lde-i_N3G%4)&{_V?=DshHbMd3`$(VxY782LAvHY&wHyF``WUg8l4q#k=K%h*59 zGptq~E1dT^cZ^=Ax|)FQIsSL@Z^(oJe!Ag*$JKm`M)_d;Z)(5(+w|P*Wyk*?&BB2l z6aRDiZ%+Ts>A#_gIsLclT2n}>M83CCuio9-09k9*N}*l7?%i!T{Wquo=Jem%jTr1t zd*m9nCanUv2Ri+?h)$o=f1_@?(|>dNZ|EEzsZTfi&vJqF!2zs?{kLEL4gMdXfpP19 pPN1gH{&V_ojZYv_-0+