From c146166d4bb1628dedbed7b566a1862474c8b6c2 Mon Sep 17 00:00:00 2001 From: Louise Deason Date: Tue, 8 Dec 2020 13:54:10 +0000 Subject: [PATCH] Initial release of "mmv". PiperOrigin-RevId: 346305536 --- README.md | 1 + mmv/README.md | 83 ++++++ mmv/config.py | 85 ++++++ mmv/eval_ucf101.py | 465 ++++++++++++++++++++++++++++++ mmv/imgs/mmv_fig.png | Bin 0 -> 54006 bytes mmv/models/mm_embeddings.py | 519 ++++++++++++++++++++++++++++++++++ mmv/models/normalization.py | 143 ++++++++++ mmv/models/resnet.py | 329 +++++++++++++++++++++ mmv/models/s3d.py | 503 ++++++++++++++++++++++++++++++++ mmv/models/s3d_test.py | 88 ++++++ mmv/models/tsm_resnet.py | 353 +++++++++++++++++++++++ mmv/models/tsm_resnet_test.py | 65 +++++ mmv/models/tsm_utils.py | 177 ++++++++++++ mmv/models/tsm_utils_test.py | 60 ++++ mmv/models/types.py | 36 +++ mmv/requirements.txt | 9 + mmv/utils/checkpoint.py | 29 ++ mmv/utils/ucf101_dataset.py | 70 +++++ 18 files changed, 3015 insertions(+) create mode 100644 mmv/README.md create mode 100644 mmv/config.py create mode 100644 mmv/eval_ucf101.py create mode 100644 mmv/imgs/mmv_fig.png create mode 100644 mmv/models/mm_embeddings.py create mode 100644 mmv/models/normalization.py create mode 100644 mmv/models/resnet.py create mode 100644 mmv/models/s3d.py create mode 100644 mmv/models/s3d_test.py create mode 100644 mmv/models/tsm_resnet.py create mode 100644 mmv/models/tsm_resnet_test.py create mode 100644 mmv/models/tsm_utils.py create mode 100644 mmv/models/tsm_utils_test.py create mode 100644 mmv/models/types.py create mode 100644 mmv/requirements.txt create mode 100644 mmv/utils/checkpoint.py create mode 100644 mmv/utils/ucf101_dataset.py diff --git a/README.md b/README.md index 43d7ce7..5183689 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ https://deepmind.com/research/publications/ ## Projects +* [Self-Supervised MultiModal Versatile Networks](mmv), NeurIPS 2020 * [ODE-GAN: Training GANs by Solving Ordinary Differential Equations](ode_gan), NeurIPS 2020 * [Algorithms for Causal Reasoning in Probability Trees](causal_reasoning) * [Gated Linear Networks](gated_linear_networks), NeurIPS 2020 diff --git a/mmv/README.md b/mmv/README.md new file mode 100644 index 0000000..35449ac --- /dev/null +++ b/mmv/README.md @@ -0,0 +1,83 @@ +# Self-supervised Multimodal Versatile Networks + +This is the code for the models in MMV - https://arxiv.org/abs/2006.16228. + + + +We also make available the code for linear evaluation of a pre-trained model +in UCF101 and the JAX checkpoints for our best models. + +We use different parameters for video compression in UCF101 than the ones +used in `tensorflow_datasets`. We provide the code to download and +preprocess the dataset. The eval_ucf101.py script reproduces the results we +report in Table 2 of the paper, using the checkpoints provided below. + +Visual Backbone | Training Dataset | Results on Linear UCF101 +------- | -------- | -------- +S3D-G | AudioSet + HowTo | 89.6 +Resnet TSM-50 | AudioSet + HowTo | 91.5 +Resnet TSM-50 (x2) | AudioSet + HowTo | 91.8 + + +## Setup + +To set up a Python virtual environment with the required dependencies, run: + +```shell +python3 -m venv mmv_env +source mmv_env/bin/activate +pip install --upgrade pip setuptools wheel +pip install -r mmv/requirements.txt --use-feature=2020-resolver +``` + + +### Linear evaluation + +The linear evaluation on UCF101 can be run using: + +```shell +python -m mmv.eval_ucf101 \ + --checkpoint_path= \ + --dataset_folder= +``` + +## Checkpoints + +We provide three checkpoints containing the best pre-trained weights for each +of the visual backbones we use in the paper, i. e., S3D-G, Resnet-50 TSM, +and Resnet-50 TSM x 2. + +- [S3D-G](https://storage.googleapis.com/deepmind-research-mmv/mmv_s3d.pkl) +- [Resnet-50 TSM](https://storage.googleapis.com/deepmind-research-mmv/mmv_tsm_resnet_x1.pkl) +- [Resnet-50 TSMx2](https://storage.googleapis.com/deepmind-research-mmv/mmv_tsm_resnet_x2.pkl) + +## References + +### Citing our work + +If you use that code for your research, please consider citing our paper: + +```bibtex +@inproceedings{alayrac2020self, + title={{S}elf-{S}upervised {M}ulti{M}odal {V}ersatile {N}etworks}, + author={Alayrac, Jean-Baptiste and Recasens, Adri{\`a} and Schneider, Rosalia and Arandjelovi{\'c}, Relja and Ramapuram, Jason and De Fauw, Jeffrey and Smaira, Lucas and Dieleman, Sander and Zisserman, Andrew}, + booktitle={NeurIPS}, + year={2020} +} +``` + +### Models in TF + +You may also be interested in using our TF-Hub release models available at: + +- [S3D-G](https://tfhub.dev/deepmind/mmv/s3d/1) +- [Resnet-50 TSM](https://tfhub.dev/deepmind/mmv/tsm-resnet50/1) +- [Resnet-50 TSMx2](https://tfhub.dev/deepmind/mmv/tsm-resnet50x2/1) + +## License + +While the code is licensed under the Apache 2.0 License, the checkpoints weights +are made available for non-commercial use only under the terms of the +Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) +license. You can find details at: +https://creativecommons.org/licenses/by-nc/4.0/legalcode. diff --git a/mmv/config.py b/mmv/config.py new file mode 100644 index 0000000..f877acb --- /dev/null +++ b/mmv/config.py @@ -0,0 +1,85 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Configuration parameters for MMV.""" + + +def get_model_config(ckpt_path): + """Returns the model configuration to be used with each checkpoint.""" + + config = { + 'audio_backbone': 'resnet50', + 'audio_model_kwargs': { + 'bn_config': { + 'create_offset': True, + 'create_scale': True, + 'decay_rate': 0.9, + 'eps': 1.0e-5 + } + }, + 'bn_config_proj': { + 'create_offset': True, + 'create_scale': True, + 'decay_rate': 0.9, + 'eps': 1.0e-5 + }, + 'config_audio_text': { + 'embedding_dim': 512, + 'toaud_bn_after_proj': False, + 'toaud_head_mode': 'linear', + 'totxt_bn_after_proj': False, + 'totxt_head_mode': 'linear' + }, + 'config_video_audio': { + 'embedding_dim': 512, + 'toaud_bn_after_proj': True, + 'toaud_head_mode': 'mlp@512', + 'tovid_bn_after_proj': False, + 'tovid_head_mode': 'linear' + }, + 'config_video_text': { + 'embedding_dim': 256, + 'totxt_bn_after_proj': True, + 'totxt_head_mode': 'linear', + 'tovid_bn_after_proj': False, + 'tovid_head_mode': 'linear' + }, + 'mm_embedding_graph': 'fac_relu', + 'name': 'text_audio_video', + 'sentence_dim': 2048, + 'use_xreplica_bn': True, + 'vision_model_kwargs': { + 'bn_config': { + 'create_offset': True, + 'create_scale': True, + 'decay_rate': 0.9, + 'eps': 1.0e-5 + }, + 'n_frames': 32, + 'width_mult': 1, + }, + } + + if 's3d' in ckpt_path: + config['visual_backbone'] = 's3d' + + if 'tsm_resnet_x1' in ckpt_path: + config['visual_backbone'] = 'resnet50tsm' + + if 'tsm_resnet_x2' in ckpt_path: + config['visual_backbone'] = 'resnet50tsm' + config['vision_model_kwargs']['width_mult'] = 2 + + return config diff --git a/mmv/eval_ucf101.py b/mmv/eval_ucf101.py new file mode 100644 index 0000000..bf28f5b --- /dev/null +++ b/mmv/eval_ucf101.py @@ -0,0 +1,465 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""UCF101 linear evaluation.""" + +import functools +from typing import Any, Dict, Optional + +from absl import app +from absl import flags +import haiku as hk +import jax +import jax.numpy as jnp +import numpy as np +import sklearn +from sklearn import preprocessing +import sklearn.linear_model +import sklearn.svm +import tensorflow as tf +import tensorflow_datasets as tfds + +from mmv import config +from mmv.models import mm_embeddings +from mmv.utils import checkpoint +from mmv.utils import ucf101_dataset + + +flags.DEFINE_string('checkpoint_path', '~/tmp/mmv_s3d.pkl', + 'The directory to load pre-trained weights from.') +flags.DEFINE_string('dataset_folder', '/tmp/ucf101', + 'The directory with the ucf101 dataset.') + +flags.DEFINE_integer('eval_batch_size', 1, + 'The batch size for evaluation.') +flags.DEFINE_integer('train_batch_size', 16, + 'The batch size for training.') +flags.DEFINE_integer('num_train_epochs', 10, + 'How many epochs to collect features during training.') +flags.DEFINE_integer('num_test_windows', 10, + 'How many windows to average on during test.') +flags.DEFINE_integer('min_resize', 224, + 'Min value to resize images to during preprocessing.') +flags.DEFINE_integer('crop_size', 224, + 'Value to resize images to during preprocessing.') +flags.DEFINE_integer('num_frames', 32, + 'Number of video frames.') +flags.DEFINE_integer('stride', 2, + 'Stride for video frames.') +flags.DEFINE_integer('ucf101_split', 1, + 'Which split of ucf101 to use.') + + +FLAGS = flags.FLAGS + + +def get_sampling_offset(sequence: tf.Tensor, + num_steps: Optional[int], + is_training: bool, + stride: int = 1, + seed: Optional[int] = None) -> tf.Tensor: + """Calculates the initial offset for a sequence where all steps will fit. + + Args: + sequence: any tensor where the first dimension is timesteps. + num_steps: The number of timesteps we will output. If None, + deterministically start at the first frame. + is_training: A boolean indicates whether the graph is for training or not. + If False, the starting frame always the first frame. + stride: distance to sample between timesteps. + seed: a deterministic seed to use when sampling. + Returns: + The first index to begin sampling from. A best effort is made to provide a + starting index such that all requested steps fit within the sequence (i.e. + offset + 1 + (num_steps - 1) * stride < len(sequence)). If this is not + satisfied, the starting index is chosen randomly from the full sequence. + """ + if num_steps is None or not is_training: + return tf.constant(0) + sequence_length = tf.shape(sequence)[0] + max_offset = tf.cond( + tf.greater(sequence_length, (num_steps - 1) * stride), + lambda: sequence_length - (num_steps - 1) * stride, + lambda: sequence_length) + offset = tf.random.uniform( + (), + maxval=tf.cast(max_offset, tf.int32), + dtype=tf.int32, + seed=seed) + return offset + + +def sample_or_pad_sequence_indices(sequence: tf.Tensor, + num_steps: Optional[int], + is_training: bool, + repeat_sequence: bool = True, + stride: int = 1, + offset: Optional[int] = None) -> tf.Tensor: + """Returns indices to take for sampling or padding a sequence to fixed size. + + Samples num_steps from the sequence. If the sequence is shorter than + num_steps, the sequence loops. If the sequence is longer than num_steps and + is_training is True, then we seek to a random offset before sampling. If + offset is provided, we use that deterministic offset. + + This method is appropriate for sampling from a tensor where you want every + timestep between a start and end time. See sample_stacked_sequence_indices for + more flexibility. + + Args: + sequence: any tensor where the first dimension is timesteps. + num_steps: how many steps (e.g. frames) to take. If None, all steps from + start to end are considered and `is_training` has no effect. + is_training: A boolean indicates whether the graph is for training or not. + If False, the starting frame is deterministic. + repeat_sequence: A boolean indicates whether the sequence will repeat to + have enough steps for sampling. If False, a runtime error is thrown if + num_steps * stride is longer than sequence length. + stride: distance to sample between timesteps. + offset: a deterministic offset to use regardless of the is_training value. + + Returns: + Indices to gather from the sequence Tensor to get a fixed size sequence. + """ + sequence_length = tf.shape(sequence)[0] + sel_idx = tf.range(sequence_length) + + if num_steps: + if offset is None: + offset = get_sampling_offset(sequence, num_steps, is_training, stride) + + if repeat_sequence: + # Repeats sequence until num_steps are available in total. + num_repeats = tf.cast( + tf.math.ceil( + tf.math.divide( + tf.cast(num_steps * stride + offset, tf.float32), + tf.cast(sequence_length, tf.float32) + )), tf.int32) + sel_idx = tf.tile(sel_idx, [num_repeats]) + steps = tf.range(offset, offset + num_steps * stride, stride) + else: + steps = tf.range(0, sequence_length, stride) + return tf.gather(sel_idx, steps) + + +def random_sample_sequence(sequence: tf.Tensor, + num_steps: int, + stride: int = 1) -> tf.Tensor: + """Randomly sample a segment of size num_steps from a given sequence.""" + + indices = sample_or_pad_sequence_indices( + sequence=sequence, + num_steps=num_steps, + is_training=True, # Random sample. + repeat_sequence=True, # Will repeat the sequence if request more. + stride=stride, + offset=None) + indices.set_shape((num_steps,)) + output = tf.gather(sequence, indices) + return output + + +def sample_linspace_sequence(sequence: tf.Tensor, + num_windows: int, + num_steps: int, + stride: int = 1) -> tf.Tensor: + """Samples num_windows segments from sequence with linearly spaced offsets. + + The samples are concatenated in a single Tensor in order to have the same + format structure per timestep (e.g. a single frame). If num_steps * stride is + bigger than the number of timesteps, the sequence is repeated. This function + can be used in evaluation in order to extract enough segments in order to span + the entire sequence. + + Args: + sequence: Any tensor where the first dimension is timesteps. + num_windows: Number of windows retrieved from the sequence. + num_steps: Number of steps (e.g. frames) to take. + stride: Distance to sample between timesteps. + + Returns: + A single Tensor with first dimension num_windows * num_steps. The Tensor + contains the concatenated list of num_windows tensors which offsets have + been linearly spaced from input. + """ + sequence_length = tf.shape(sequence)[0] + max_offset = tf.maximum(0, sequence_length - num_steps * stride) + offsets = tf.linspace(0.0, tf.cast(max_offset, tf.float32), num_windows) + offsets = tf.cast(offsets, tf.int32) + + all_indices = [] + for i in range(num_windows): + all_indices.append( + sample_or_pad_sequence_indices( + sequence=sequence, + num_steps=num_steps, + is_training=False, + repeat_sequence=True, # Will repeat the sequence if request more. + stride=stride, + offset=offsets[i])) + + indices = tf.concat(all_indices, axis=0) + indices.set_shape((num_windows * num_steps,)) + output = tf.gather(sequence, indices) + + return output + + +def resize_smallest(frames: tf.Tensor, min_resize: int) -> tf.Tensor: + """Resizes frames so that min(height, width) is equal to min_resize. + + This function will not do anything if the min(height, width) is already equal + to min_resize. This allows to save compute time. + + Args: + frames: A Tensor of dimension [timesteps, input_h, input_w, channels]. + min_resize: Minimum size of the final image dimensions. + Returns: + A Tensor of shape [timesteps, output_h, output_w, channels] of type + frames.dtype where min(output_h, output_w) = min_resize. + """ + shape = tf.shape(frames) + input_h = shape[1] + input_w = shape[2] + + output_h = tf.maximum(min_resize, (input_h * min_resize) // input_w) + output_w = tf.maximum(min_resize, (input_w * min_resize) // input_h) + + def resize_fn(): + frames_resized = tf.image.resize(frames, (output_h, output_w)) + return tf.cast(frames_resized, frames.dtype) + + should_resize = tf.math.logical_or(tf.not_equal(input_w, output_w), + tf.not_equal(input_h, output_h)) + frames = tf.cond(should_resize, resize_fn, lambda: frames) + + return frames + + +def process_samples(features_dict, num_frames=32, stride=1, is_training=True, + min_resize=224, crop_size=224, num_windows=1): + """Process video frames.""" + + video = features_dict['video'] + + if is_training: + assert num_windows == 1 + video = random_sample_sequence(video, num_frames, stride) + is_flipped = tf.random.uniform((), minval=0, maxval=2, dtype=tf.int32) + video = tf.cond(tf.equal(is_flipped, 1), + true_fn=lambda: tf.image.flip_left_right(video), + false_fn=lambda: video) + else: + video = sample_linspace_sequence(video, num_windows, num_frames, stride) + + # Resize smallest side. + video = resize_smallest(video, min_resize) + + if is_training: + # Random crop. + video = tf.image.random_crop(video, [num_frames, crop_size, crop_size, 3]) + else: + # Central crop. + video = tf.image.resize_with_crop_or_pad(video, crop_size, crop_size) + + video = tf.cast(video, tf.float32) + video /= 255.0 # Set between [0, 1]. + + features_dict['video'] = video + return features_dict + + +def space_to_depth_batch(features_dict): + images = features_dict['video'] + _, l, h, w, c = images.shape + images = tf.reshape(images, [-1, l // 2, 2, h // 2, 2, w // 2, 2, c]) + images = tf.transpose(images, [0, 1, 3, 5, 2, 4, 6, 7]) + images = tf.reshape(images, [-1, l // 2, h // 2, w // 2, 8 * c]) + features_dict['video'] = images + return features_dict + + +def reshape_windows(features_dict, num_frames): + x = features_dict['video'] + x = tf.reshape(x, (-1, num_frames, x.shape[2], x.shape[3], x.shape[4])) + features_dict['video'] = x + return features_dict + + +def compute_accuracy_metrics(pred, gt, prefix=''): + order_pred = np.argsort(pred, axis=1) + assert len(gt.shape) == len(order_pred.shape) == 2 + top1_pred = order_pred[:, -1:] + top5_pred = order_pred[:, -5:] + top1_acc = np.mean(top1_pred == gt) + top5_acc = np.mean(np.max(top5_pred == gt, 1)) + return {prefix + 'top1': top1_acc, + prefix + 'top5': top5_acc} + + +def forward_fn(images: jnp.ndarray, + audio_spectrogram: jnp.ndarray, + word_ids: jnp.ndarray, + is_training: bool, + model_config: Dict[str, Any]): + """Forward pass of the model.""" + + # This should contain the pre-trained weights. We set it to zero because it + # will be loaded from the checkpoint. + language_model_vocab_size = 65536 + word_embedding_dim = 300 + dummy_embedding_matrix = jnp.zeros(shape=(language_model_vocab_size, + word_embedding_dim)) + + module = mm_embeddings.AudioTextVideoEmbedding( + **model_config, + word_embedding_matrix=dummy_embedding_matrix) + return module(images=images, + audio_spectrogram=audio_spectrogram, + word_ids=word_ids, + is_training=is_training)['vid_repr'] + + +def main(argv): + del argv + + sklearn_reg = 0.001 + model_config = config.get_model_config(FLAGS.checkpoint_path) + + forward = hk.without_apply_rng(hk.transform_with_state(forward_fn)) + forward_apply = jax.jit(functools.partial(forward.apply, + is_training=False, + model_config=model_config)) + + # Get the UCF101 config. + dset_config = tfds.video.ucf101.Ucf101.BUILDER_CONFIGS[FLAGS.ucf101_split] + + builder = ucf101_dataset.ModUcf101( + data_dir=FLAGS.dataset_folder, + config=dset_config) + # Create the tfrecord files (no-op if already exists) + dl_config = tfds.download.DownloadConfig(verify_ssl=False) + builder.download_and_prepare(download_config=dl_config) + + # Generate the training dataset. + train_ds = builder.as_dataset(split='train', shuffle_files=False) + train_ds = train_ds.map(lambda x: process_samples( # pylint: disable=g-long-lambda + x, num_frames=FLAGS.num_frames, stride=FLAGS.stride, is_training=True, + min_resize=FLAGS.min_resize, crop_size=FLAGS.crop_size)) + train_ds = train_ds.batch(batch_size=FLAGS.train_batch_size) + if model_config['visual_backbone'] == 's3d': + train_ds = train_ds.map(space_to_depth_batch) + train_ds = train_ds.repeat(FLAGS.num_train_epochs) + + # Generate the test dataset. + test_ds = builder.as_dataset(split='test', shuffle_files=False) + test_ds = test_ds.map(lambda x: process_samples( # pylint: disable=g-long-lambda + x, num_frames=FLAGS.num_frames, stride=FLAGS.stride, is_training=False, + min_resize=FLAGS.min_resize, crop_size=FLAGS.crop_size, + num_windows=FLAGS.num_test_windows)) + test_ds = test_ds.batch(batch_size=FLAGS.eval_batch_size) + test_ds = test_ds.map(lambda x: reshape_windows( # pylint: disable=g-long-lambda + x, num_frames=FLAGS.num_frames)) + + if model_config['visual_backbone'] == 's3d': + test_ds = test_ds.map(space_to_depth_batch) + test_ds = test_ds.repeat(1) + + pretrained_weights = checkpoint.load_checkpoint(FLAGS.checkpoint_path) + params = pretrained_weights['params'] + state = pretrained_weights['state'] + + # Collect training samples. + audio_frames = 96 + mel_filters = 40 + num_tokens = 16 + dummy_audio = jnp.zeros( + shape=(FLAGS.train_batch_size, audio_frames, mel_filters, 1)) + dummy_word_ids = jnp.zeros( + shape=(FLAGS.train_batch_size, num_tokens), dtype=jnp.int32) + + train_features = [] + train_labels = [] + print('Computing features on train') + training_examples = iter(tfds.as_numpy(train_ds)) + for train_ex in training_examples: + vid_representation, _ = forward_apply(params=params, + state=state, + images=train_ex['video'], + audio_spectrogram=dummy_audio, + word_ids=dummy_word_ids) + train_labels.append(train_ex['label']) + train_features.append(vid_representation) + if len(train_labels) % 50 == 0: + print(f'Processed {len(train_labels)} examples.') + + train_labels = np.concatenate(train_labels, axis=0) + train_features = np.concatenate(train_features, axis=0) + print(f'Finish collecting train features of shape {train_features.shape}') + + # Collect test samples. + dummy_audio = jnp.zeros( + shape=(FLAGS.eval_batch_size, audio_frames, mel_filters, 1)) + dummy_word_ids = jnp.zeros( + shape=(FLAGS.eval_batch_size, num_tokens), dtype=jnp.int32) + + test_features = [] + test_labels = [] + print('Computing features on test') + test_examples = iter(tfds.as_numpy(test_ds)) + for test_ex in test_examples: + vid_representation_test, _ = forward_apply(params=params, + state=state, + images=test_ex['video'], + audio_spectrogram=dummy_audio, + word_ids=dummy_word_ids) + test_labels.append(test_ex['label']) + test_features.append(vid_representation_test) + if len(test_labels) % 50 == 0: + print(f'Processed {len(test_labels)} examples.') + + test_features = np.concatenate(test_features, axis=0) + test_labels = np.concatenate(test_labels, axis=0) + print(f'Finish collecting test features of shape {test_features.shape}') + + # Train classifier + print('Training linear classifier!') + classifier = sklearn.svm.LinearSVC(C=sklearn_reg) + scaler = preprocessing.StandardScaler().fit(train_features) + train_features = scaler.transform(train_features) + classifier.fit(train_features, train_labels.ravel()) + print('Training done !') + + # Evaluation. + test_features = scaler.transform(test_features) + print('Running inference on train') + pred_train = classifier.decision_function(train_features) + print('Running inference on test') + pred_test = classifier.decision_function(test_features) + if FLAGS.num_test_windows > 1: + pred_test = np.reshape( + pred_test, (test_labels.shape[0], -1, pred_test.shape[1])) + pred_test = pred_test.mean(axis=1) + + # Compute accuracies. + metrics = compute_accuracy_metrics(pred_train, train_labels[:, None], + prefix='train_') + metrics.update( + compute_accuracy_metrics(pred_test, test_labels[:, None], prefix='test_')) + print(metrics) + +if __name__ == '__main__': + app.run(main) diff --git a/mmv/imgs/mmv_fig.png b/mmv/imgs/mmv_fig.png new file mode 100644 index 0000000000000000000000000000000000000000..6a5551f0efcc0436d5c4ea3f9150b8acea1422a3 GIT binary patch literal 54006 zcmZs?Wmp|cur-Va4em~mKyVMP!7aGEySux)y9W>MZowUbySqEQ!#UT!d%s`%nZ0Lv zx~r>KtyNV!R90F90Tu@q1Ox;@OjPJ62ne_n2nZ-OG!*bnGzBpW2nZ2~m=M2$i`Gfz zCkKuGxxx44`m(j~^qn=%rfG&b2WS{j0cc;oD`buUB+;+Af}&u#5Mac_#5a0r-46N@ z?&o%mZggc8>yHy=fff?;XJ#YIbD8!I5LfO@&R*Adrz_1;`?qfGG2{>dK1fJl0({WW zpu|M~Uf4?TQG{u(RAxfc{-0lEA%v>qKk@&MyZ?O@X&?9@{2CJNGVuS5m-lyw=|6`4 zAMXjV2h020{|FZL@ISu%kD17w>|p<|NdY(YeFv30+42At_V~}4{`Zq#ovz^jdj^P#n}Yx5y%|DPoig=KPq|L4g6^`tL51yPu0 z23<}F-TyHbQonT8e_8e4Cv$Ycw5l*UGNpS_!c5eNfH5(Yh(WNqwCKduaeudz;NYeo z%O08tC)T1Te&#&Vu(WY+FU16R5M{)QF|Q#R9L*+8AT89j5XbmYXwYp$4@IA*LGtn9 z5CYJSi3cn7Wd(mvOPXIl&)GOjGo9GEtz1A3KvQm@4ASFb%*B76HwuI!=>M#trzKN= z?PUHiBwrhfiOu)lMI|`qX=cAi)^^+xIZl2#G~|tK2U#b7!gAy=aB@S$NU_AKSo2r? zT0U%yei#C^0naIevv&+;Jsxye5rmp~^{Vr+mR^)<%0`nE;p z@E?yvq6CworNEdy87Li~Dz_@_{_L(_6+FzWM$gKk-VPT%eiF<-KdW_5iQk6_I0tis zmQyq?q2g)Zw_Ww$&4oO9(}mcQ>@-yn`#T8yazE{a2IIy72R&GZa8ao*&qb+9u8SY0HxanZba$@RRyrUsOEOX-(t{Yq`}a$!)7Oh-S*#=j~MzV%LD=O!b1&pwX$ z2oTA^OwRb=Vi71=TA#j#I{eOnQlGKWdXvpaU_w&5l!eB~)&RVS4-2}fjle^-RhiO( zYa`6*8}qV_^czw}l6r(gJlRt3h>IL4;B#oY&MdP*dulJ9_Z%LIjdTNX4?%<}34U;} z!P_@0C~_Cf)g&YVNFg#{x8tl_jn3R_B`a&@Y*#i&qO4f9YF03`r);pMKkV6C5_tY` z=U`I4z_Z;Nng{DeqeNWd`LXhu?Y<1(ro4BoP>HBpJs}ehxf2qIhGMHs0>rm!WA3c= zRxL89NwxHZH2-x39!U-6`UZNEXrvHuFs{^&Mh8l>49m3mq#cb2>c;{qZ&ecObTk3oA!A(nNA6Zi93Mt!$06o8uMh!dyYL zq?;V82?cB$N50#(N^3fg8}}GRD|8S~Aih76!K2SqLhnnROuQN##9E9tAV_hg(vlS+ zJ~qz_nqK13`_8f_zICB%E~{FjY?`TiSc)t@#!!5^G=xMs2EZ*i6cuaQVzIaWcHGN* z?qiN&)SIGClT~*YH3BbA7pg0=5skW-5q1HhGid74Z@t`w5-gimXkXhrYS9}JvP$*B z3t>KvxtoMeaXYfJrDk5u>wv@veVVQOkO5x$9IuuT#&<-109d|q6O@bDJttXLcQmt# ze+hENn(u6_c0rLI$3|O=Dr5r&*-8UB{>I&+54l$UHFDWCBc2VblYkl2sGZnqpU4jm zlo&}JtSZ^MU!PMcZ>+2zBt2_!bcn!5U7|=dt|Uv&^a)n(B=0vDry8#9?$ZP_wl&Jh zO%*OPGVfO`oD)mCb^r)rM190p$I>xM-O?}DjEvAWI&_(Yn#1n{(H;Z&;Gx@oJA=P9 zm`lO`_7K6 zpMnGyG!j`9t#CYAF@ZCZTZcdFOPFR%mtQj%aw3P^nV&1zjv8@H3IvK=3FTsleA*GM&Nx?6xu)gd>F@MA}zdGAqfP{qz@O^^I**wE3{G;fA(||}w+->+_mg5cbAV6)Sq9p~2ZSkux5K<1iZrww z+F``Qq0fR8J0cce)D00A0F0iX>4n(U?Ex7j?+E<60s8y$)w3)tM>O){T{C6ASV;h{ zej~X!Jj1qWbdXVd_6t&AqJ37b=lXZUUm%iZl{b`eI}Vum#zzSKj>9Q#Qy&MOg@mi& zu3_I?2gTbI8orOVo3*wAsQXUf45os@p9btesF;qbAX%l_yQ`k`8B(pA+09f+Oxn!; z9t|o6n81b(*cdH^SDPEpFdmI3?8Nq$=)Q!THXfFJm(237PP!V+=#h#K{VfwcfAwD+ z9D(DoqQUsx3j#w7fHvq9zVPKh!m2mu=cT84qadFr^|;HertfiYoEyc$v}j;C|eE;SDF|+YuKZ|#qwU|V!eWvl$eO- z?H1Mp9Q3o;#@n=^OTM0Zb|Gn{sHfAR;g&#-e^*`$qeksz8IQ!yXdBAPPsYNDab7KQ z+Az%&rK8Q$kxTK%dHP$7?A4vr3K!EA7*U4dP-&0K98nsG&*T@Dp5MLNH%EA)e`v%$ zy?h}8d7}R~*qG%{(23ISghe1+ta_|PhG;jYP-@w2fDKe~3vbaM3<+TixY25woBNwA zFJ|dw6KOvpA&W10GAd^I8#H>o)7_@H+a+CeBvrCnk$mqbra96X@ zx3@2!9@JE$Xm^qNE0agIZ{2J!)1s@J3pd!E&!&IgNXchZOMnQ^63}i*u$iJd(FGbOToJ6X09Ay(_1^; zC%c}6v&qa@b-#6Z4Bith9yPmGq!3Eb;z55VrBT7iTACs|6VnSkd6cyb7uX25_M|=( z)sMvx`35)84VHOw%@#3f1+BL6TpD0GUJ*oW=wsR~0NWKY?!0Z8j=8y=w%UF|R&CXI zaN}ZprBQ%A7Rthq8i{lu2*Fdj6uR6%wJ&_zTz2jTiR_>Av~3Pl2uKhjj1&HXcLviHm$2*LWjXxwy+q?rTdF_EcR_{_x^U zv5kFm-u|PaqX#q|zn?OaW8Lxj(NP=F)AhiwYQz`fix^yO`hiM)QxSOaGj^AKzw(*C zL_=S311Hbzl9{XBohB>(A)XcnLpAPlI3QkU^3aW6#R?t3Z43xj=7p^|mh3*ggC;Eb zt&nZKR3TB~$!}nzxi)X5h}AP|vt?c_Ev^6)x*BZ|ygLrWT$#JMUCTqG7H(M`SFph5V3~k`2dr%AwYY`Be<$fBc0<2|cSnx-My6v?6>HC)MM9H?eu;us zI#hr*3F9M`Xt%Lu<;+L2ulYnYV&GdjL(IPJ?cwdI*@_5vJTmL;ybyN-1q2boL8IH= zx*M>RrEPCbEkq?|g_6HrbY}NmsGv2*y9fhgWbt1nYaJGGKUOD77k5cf0);wIXtbS> z!Ou=rt3CKBG4Bc@w3w|b(YpU=z|6wDf_LXrQukYM@xFf`*kllY{=ZU9^TLZBGrt!whDXR z7dZ9WW#AA{4=gx7`{ATj#zse^1oDfO{Qdhgc-`U+>Ul}c@94|la8t4ugQL{n*b&`p zR0|)`_6r!H(rF*QoM>8)fA%`!cf3^c@H9%q3@ys_FcIf+=wZu{;jJ+;d3rWuq_h?3 zm0C+mp0ZU`TQ_IK%%JVsPeLAe*Z`IFJwVOrKYk#y?j{H)t6A$hD14H z2`CVubiTTu?$;Y(R$Rrq7-jKhR8H24Nn5_thWyP444ZcVF|u{Df>|s5+TY8W)Vmug zBsBE)&DN`1R)f<-uP#|+02Mf#un??l3!8Z?gd)16KA+B`qf#RI=M`88XdKzBUT}^- zZ~D3B=y+tWvAUEF|9!r=LqF2QiF7jUlxu2xPceZf(}xfR$Pfs>V@G+<6fBj~lu4c2 ztJ#Lj%P*Z2yZjrM_6KbMIU4K@Ws8@Bi+4ApdyT$opQ^KZ=Sw%_PZh)%GJhSx5+L$` zpjEwQ0B|C(s#oxanVXvsuz6I1lQKa^cIEPeqHO(jO#}uPmtGP_U5n|AK)GpYXxNE< zTBzQnL@j1Y>X?36@T4)m_^1HG--gdz}@OR7BPv}RiUc1k4pS#aPwoTy1 zB>0K^7=@yVT(EqAea4Hu{d48Nxpp;?9~A*hflW#e96HDW)6Oysh4X<5b&Nn8W0S@R ziJ^hlUSpz>gaVGQmZq}l*?hcBM}-U~VB*cpbRaq+2wL)F*#-R+Ej0B9L^E-DFTgOjcn#A9o0#gN0TVx)5n<(9lHMQZEHsN0+P1=wK5!+Fp@*1e*T224h-d{`I_ zn%eqTU*a)ubdTsdVt1eP0%BSLAPDiVub0|EznORxul{7jwzq@Yie66I$UoAQOMFe8 z8vG@sJPiauQEIULiU*L^r5^3Q98PhgOaf>4?;3`UJ3JGcw%gqRv5?-qBj|oY{G;OL zQ|(xmr{R6GzP+atlE8gno0;e63#=ukLvPhsWl;saCo^~5&cTSca77j2NByJfno1#0 zm#;zH&cI;{KWZY0Xd&}U^Vw(ass)OSh`fTGNeVh=SO5gj!QR&z#WL@H)b6HNxVZFL z&~xJ9jx=4YHZb~(!RSY0w@r|OFF>XW)8g5;8BIdG{MJ!nZ;AVmRg=)Z7>#g)w607? z(|D<>1t{WEumhfJaY3-?9QMEBl+fZrf2F9}tLl?ktzTIxpmaC{*$-~?+Sr}t{JM?< zZ6ybk9<&V9@j~J!69UBCXIg79rj(1W5Y?2JxC&|e6#VOZb&Ty1f*O5t+z6@$s{$Hu zHtRR2Z+Yc0c8oF+9OBJtV!)Z@TZSN5>(DSPiZIFtpD=v=u^s1?uVW&U*Rv$yfO~$3 zbkx(c`5^pW?V@`gl(Qw*!@@hj^X@hoVB<*f3M5u?K*6YC-~1H`e`uoAPvG*%C)&zA zb0woTT0UZ|&n7#5gKNh~;R<8nK3{G?BUBKX@SLSX8=-zFFfo{Q9r6KU(k}J}a5;?SKs5 zsR6QWKw%+~`mS3%k(*)LbB6ygU;WZ+Ogy0{@4aSe{qvHH^CWV0+=u0Yz^rpWC{kNr zUcv#eC5DN#lJ^RWgm+qd1tGnf-@;Queu-nir~MKqwCQ~0pOapZZz))o z?ChUc;9~S>;5acp3mhggTMsn%+uud(zU`0inajMHkpR;J)FRPpfVA43bc}Q0zMXe@ zM&}KGeQ#w=SB2#n2OM5}(TXe=Oz|E@8QRu$sZ)SR43Ya|r|s3P_YC{ zuFyoPFOSRIFEX1#ml;^OoBlL=z>AE9H)Tcy)eVYqCbtCmGuaIis}rJ&mvJsk)S z&u3k=CPDV!Q{Fe%Hko=B3vcG&KF|s#8k^F)?kS(>9Cl*YK{anE9dhKg5b>?n8uhKk z+%XwD28z7!*I#s~%_~xhrL+7uRr!*9I|_gZfLfxm8}~oUNmb6K4(OJWjyM$Hj2%rJ zr<*0rRSwKKAu-2GN3SDfg)OEEH!%gYWNoTJcIdyeZ5I5uFI6R2cK32B^$DSzT_0$9 z$xYoqw_)B}^#Lj!uWmv@UJH&%&O~e=5)i3&O4TOu^7fZ~2l;vobrFk@0D1KDpk`%V z*rG7!fw`)@(BxtKj*9p>J5-}q(NSFc;*3!OUS?FlPJ|VKI|x$Uw+m3u z@OjF|?8ilwfjLlX7X2VXL|x$5gLxT3rA(=@L)b1pR(q$xkTV>$8a^ z;FW@byFzhX_Pksydnuxyi9J7kWB`bCE`Mf`ygQuw0ft}SHT8ln+0P*t>5LaK7vEVNqeo`i<3%i zE4#FW#Pmxz$pDZ$J1LQL0I4KK@&bGRwwz}MIao7hvc6wfcYd8Rm0_rZvm#BF#Jh0aE1@*C5$=@MI>{LnL4)G>^GcW zm`0xW1yOu&OpxrpRDHF*Zy?jT8&zot5C8FNL+jt^cDa7z?V@$R3A=I<_CF6Fd5-p# z+aH0wS2Cm|pw|5C>>eqr)PpK4|ohTa*A>@ zzO$t|O70r?aZ+LF89pm`1os|Kbp7qw-xe(G`7_9NjDu>viNIp#s(xO?SOcoxQ*O?4 zClLa>ICN6DA4w`Zd==%Lrb25-SLFvfUhYN}!#3u|R%%bJR%OqFWiBgtC1H2t{PDS> zxT0BhL}?x*uh5qB*#0SRS$gI)!C@7^_5pkw!;aayuJC4>P8f-Yt%QJ-d5!xyn|)u; zQbtO(tcIHqzDF2@7X?V42TjD=Oo0@y$wr%*+hjQrD9`K_B7j^t!FZ@^M`>F>Fz$gd zN?b@~-nqV^D08iN8|+vZV9}UG4?62pFRQxx+u9H9sDu&eJCgt*EG8OUCMH_-stEQZ z+E8DXMl&gBkud?Qew@{?$hf^F9L|$sZQ@ij+WSOx-Ed&kWO!WC8BjdYdohejfWHzW z<d+KWG_BMsZE$KAHBk zVL-Ev{Nnm8Kk-&JjTIV5G4*g{v~GExSebv`9KTrIk|Da70Yc~GVZz8XkY&DnS-W+y zd7w^YxqbpzkiCTG0~A7avGET=f7_Bp;x(GakH|#>=Dk*pLYHV6HE**T#YD{V2@yWs z*CAe5v-j^FE~{tc^;Phe2xRZY-|@sFPt4;1!MZZ2uyoh*1^xj{uuamgziX&T9r)w4 zf=A)cTxXZRm~FH7FWoNH9+h@d;%!vacmVc5gE5~RIP1)At=Ch=bS3^@SRV)yP41R< zq`w-ZHj_9Bo#^6_>j2EJl7@8C!3OnqgSB$uw9X`Sq!hu6H?c+5eD4Z$V3q88#Ua3$^;KP#@rGdLlll6VcCS zV=41PCtU7sytdhcF57?#^fyFUV)tk@K5)QGrcs^Hzb_!*ssU(zB@}9WTYfCUkEEeI zR^Bfb*y2Uc63M1ph6YoImqW8mbEd5y1+OiC5Pf^=l{#6}p6Q`WW$?i=$rNhat~9A< zJesta(^%wMdZv6?)S3{a55&v{p>e8PG0io^rYVJHU!Ag^3t0eLpE#G~K>u!3hJh3c zfBFH=mvSm<+P>OW4eujHDC=>x^K=LDJ zY>{+W!JPTMCqm81sNfWLw+ z*5BxtKk{2^6)RCj&L`V4?5+EQ1i`UGshgRZ^I_MU?Qb<~B;v?tQXQo!bYlN*{eQ(9j%FB$y@SW_>k%{7fMV+y$ zCyDw)4A>|DhF*2ZFKrc>fXu6<5j(=gd0OM5k@*xjo|VbwHMRA~yehZ4bnas1%Vck) z)${U3Gi)#>G*K#WW%RRYQJ91HbY~)JD)6!V&_bbIS3%+$&2QIVBa2>j$0rEG%)=?n zv?qC}az9er567$?fq}P%x(Ae0=c>H_*$eR2N`K_w;<-in2gsQ|XiKK@XH#4l^|P)h zx0FxVUx0rwkc=c{5-vb>0$E>|Kf?W1fj`6v`%t)#$=;1$V1Ux?;7@q$=YI^_yIno9 z767L_@o6UD{?=*Er@E=h#6WwH=o%9YhQI6dcI(P9ThCHnN?^2@Egma7F0wO{2L}qj zjO_!wa_Er%T3Yu+8)gUNhEZ&4gjUSl;# zq!3&2U0HWnsP2RuO*Kad4~?X-ArqgJ3NXNsNiLLxDbHv$adOw{7fwfSz&`R<2H^8+ z(+-refOlwpHIiwu+Fp1liZ&S983?YXcqP3f9hHf5pn#|o*|@!`aM5|TJB7g@4)u~} zgf#v`=@@Vz-Y7uC17nlz{5;>EqTT+DhXN^J7Saee3)>#vj%>yiX;#m@&m(iG3? z?oqA6_zDqiT)EjiC-uW`EuJKy$NCS0GKY`*!54 zg#IkP43_Of1;YVKe8ZVv9dTT`1km__{B1X+oq8UXbKWdu&68aulBfn)XgP3ua??b` zs3deiIv>f$kAcsWp;q2`e}z+=^C*F9;RoN?vp~KKBvGM{SFyW32uTv>5QBvle7q|6 z(>EbG08o%M=o`tx>?p)F>C0#rV14^2Vnjb)9{S!k3?Il>04>|xTIGNZfF(f*G0B>B zD2tR~Cv^>}hzB(`ksSa@;dslhZdQ)17YmwY0RNi*8i}>08?-oW`yA6tKbnO6-*w!? z*84K2txf!qsgtPAC21g|S4o!@5?}L4JTIZi3q{z47|9(hd+wzBwQxlC*etxA9#lLB z+(vg3`lapNB;_^;8Ts7)^yW8ZxjFP~qmd|Bp(ut5`T+XZBd| zSm+D|z{5=#g}VBUtm(psh7K;PJ&rH!Af4D;kgjpVoL2La5a?iP6tc$_z5dI|gTe!& zpkkCw`WL!b>1h>?Vaq{7^z02#2I! zLDkOJN~!-TKP&bIj#|2Xsl;Xm1(M0(iM0IL!l_9>{S9=!+5NWzHqOepO8E5d!#*Z_ zf8y&P$&k)$fd`3#$;XQ#)m6^aR2mVzR~pb1rQKDY;FKwtN3<2#!IDE&c{D@|35h3* zp-(K$m9k&}Wyr#I-aCDDvD_^$r&Dbz&5Hb?!}~~NPk(Pu;eWdj8?$m@VXV)cci8o! zWeH)aA=1SVL~Er7M`&mT%81qq7V_23)lL)4hd1oR?jYlISV5aZC=!?_YQwj{syt-6 zwr{DF3zUZ&^$hhDN2flHD-KC&LfNcdpSOKF!s9$DK|Rl}f?zi{|F z;RO0Jix}Sy0ypajm_@lz33)>1vE_Vqn$q|D6PqG;c)a9khFVvUOH%rip<^fGg_xbY z5h9gbLSZne8vrFy53uyK;|sG#3=frhR7)+HS#pc1LtDEuqFb-{J{h@QT5v!JiNO z-}SadKd3XbuPih@fvcrF!dk^rlw#!E-}xmZaW>7hctE%}C0Oic+E^&@4mn`6)?7{j z$b_R+Y}ANahFezM63WY>vaEn_$6;G)DQJm}l^j=CLntLW#4i^&OEqcIxqzlv3 zu4Q3Ssd(PNa)ht!&nR*brj@%`v}Ud^Uo#sN1e4vj%BV|7n3{ql%Kp&=G|#$F*NIqG zy}TA6E^LnSsJlz_-Y`+`uiT~PD;fe7FCAHG{31K)!*WK`6*So&_qDAUx0Yv z*@j`IaZMaoqCe!TpZOfT0N%I&?k|m4D_zCP_;#-zyBIV@sd}XOPfCn9MijziPTx|t z1M#qD2%N!Ry4b5GLhKK{>a4c}Dp)2EUnnO00hRD!SWqCI0DBKy5RHEXo_)BlTR?*2 zKfnBsJAghrdMkt#G=acI!iWTR<%* zvzt>g0o)A8^8|GjXM2U|M{{FD^E`d6Y~I5e`{S5HMCkQEEX{F9%JufJ-E>r1#{8`q z`h$e7lhf;TfsAH+N2Tk0v0H~CuH_{KQhht1|BMFoVW6^aM=1Z{eq4Af5igg~-Vicb!3elXM743IA2?$TaddPv+3D>) zn#x2`sS#w=eyg$Rc>4#WN)}590q?kCs0RVLtq6NXE-=cOx5WWS+(Q?BfL zxZGwTNeubm@Y*DN%ETC7)~oe{!yrQkSU)#~+TiKX@={{{jnFo`7UZrxz~Al{cv#HQD~<^1msDuZsjiOLuL zgfAcw>Kzw7aJ@^7fT92#5;_rNT_U0?e@+_cn&_wP&i3_w*(PkdnU)~SSNE%`l?3{f z61#*kH6f^kR~&YBcTE*X33l}|He!ec71V60$}TY|{%%9;f&uf}iUUDRQ#r~hFQ=y6 z#mvnY+ndH>PH!}t5)w@T{twc;K-`_JhHq`@$u_$KyS_~@ zDu~D(fNR}`tnKwkW;Bt7e}5ed^Wk^z5KV*b+mUPvVCbd-IUj8jWPfVA$7T(aHOT>Q zEiaZ7gziHHCC34_#vO%xps}Gr37wkvvJbU8fc{^Ps3x}oj6j8XSyWV{5>B#ho&wYo zakMgYgr1+*-=nhMU-qefJKY>ib+1q@0F^Z&2m$iRrKQpYBBSkB0BHoyM9kUVj+8rl z*5B^fEEa2o=NbWT0gMasBAss;Ci{}EhRJ>F@RsY z*z6TNQ?>%S2Kk^N#C_I~c%P=C6+*_O*j58R5qcn5HR9e-vF~TtU!Db zu{4pU=@Cv#e8=@Trwaf;iN&d_R?44Djk7%)!sZ$lEz;uHvI3C%X|t4$k!pBeZ)Tdd z!oL01?S;DC{I}sd1j3j1^-!kiZpl10DJh8atOy-<@Fg#nf^E?R?p#n_5}%n3PpjRsB_79?=TU(jh_Ux`T}8 z^{|P>ZbOvfVGaN%z9HVjcqXs6SHTpy#rua(P4fUC`_ND4l`B2gFZGolL$Qb$f?3pO zc;zEpkzT7GoOL`~!4q()6mIeym)8R2#WvQKv@6fyrWPRkM_E`|SSTztcznB+q94`m z2MM?3=tj~ASk6|+hhg*v?V{8FB@Y)Kiar$H2`bWQ$ z0FeenWy=v&qC$Z^Q~H?rp|0$EPaqr+fg)@WFmJtFz|N|A2O}X@B{>^`K&tn6wJVn_ z0R=czjx~yo9@jjTkws=F83o1mj0`X2z3=7p3slHlnX>-L-|A1HWBzq{c_ir4eZNUe zb3c-P%vNbl;S}4}9gpNqCE_wN%H)rJeqgh6bClkO9niGRJ1A0rs04t68-oQg&UxNO ztBG8p^zH5S-1}R&W*gf)xFMc|zWz49e6FG0@qEQ4k8mu|)b7hp{M&4fN-~=Vzi#aj z7cu*Qcr&_t=1C#NA*?b#q9WgKDRHIM>GUTC=NlRZhMA{$TA~aZ=NPTjgx_WFc>E=} z)JisRSnYuR)| z!Z0b4Q?g(E`GOc`nX8V5IQ1m3?F{GbVq?OX`qYzP7$v|L4{?y$j|Xe`yySi>3h2Byxw0@oP~p^H&_?}{Apmf#*>a&CR##5;yxXc_oi;vHvfa7`~E@Q z5c}faovPrEjrK}INh#JW%w%pUE-tM+0>0Kh(cIATER!^lU#RC#-Vw zMQgJ))N1o`+N4lF(l7|l(8y@ZACX6PNWLI28$i8HW1KkxLYEED#MG$D)Mpq8hADSIbpl&~VDiaaBrcXphq8L-#IfB^1{2xgvDj z9Nj$I-rt^G063q0F|9B_=#7YoaLFqDLkqDL1)_;nTU)Cb>c6t0v5SsEXO-ghH=>gV zpjWbriiKPtURUwi8w0r32?5@^UN}HAE7PQj^PrBrk@Yd&hK#VuU=+UXeJ(|7H1C^A zKU7UeC~rp~9bMMRisOQ&t!fq7(nLT6CN~&7RI$Kd&)i!8Zl=ilp^SIfezrFO03F0y z9+&+GFc{sjSQOn`O%bQBy@YQ*>Q>EUe)*>P=2^7zi<|CA1jre@=Hg`H`EZw%#Tj{K zB2z~m8$q%2paXa{)q7|%YaczhNSa`n+8)(U1iEr|1U-HF`66(LVC!zjG%j2Ox0Sk} zcY}o98lNM2>T-Y%1Q-MAR^5zMfXpU9uUIe`Qu97s&i?Atv>#)kDkG?Mb>g8Zf&)WW zQTz1k_f?&E+J} zMJu4&C8W6GGnwvF+4%Z=k64H+wI&$O#HRj2kUQ60HgHc@Q4jEufGLyN9WJ4-#4&#$ zR1z{&=}|Xr0+YPGc%RzUR7ir)*0-`2pD9$(TGyB&ToJjtbvZP$$mOf`NAcwc1|;$s z2gmvnDCHHXr+p*oK)fwG9FONqP!OjBfCMqu!-jv;VNrS)b?iG3ZClWSO*2i0MkXer z>zj0Ov%^;3U)SF^s|N|fC8~VZd>#PhkdM+wy5t@*VJs{hID99zd)oEmo^C*{M#(I% zFYIpbdyIP^A2sx3jkNQCYOPVf-b2h_!OCiLB%^`vvogUMCMISXG^mp^>gvEKm--mv2yFcATQwNO5oJW;n%p+X+B=;(@bfoa(cm;Kr263orM~=$bB+usv(u*L%J@4eJBQuL2`_%kdDg5TQ5w zmdR83$USf*e&_RS{)zpf*w8}w{xJ9xjwNbdPDsiIOp9XjhGnu1`l1E+YtIN$7c25_ zcyNcM?-7rH60@0=6DhG{xZa;Qx78hpz|I$kWXJ})53q2*JCR$XAH=hd7=|7m2>4d_ zHkQh?pTm>7n5;kLn2{RseB=xi_qlnjF6}|(|1jb^wy_KxzW^dhQLh#QBVMYYu&{rb zS!j(%Jg1|BK!)eBAnf~gG77FfkqVUVWDdV)LYO4y8njJsA1M`}_>t=qp0I7} zzv;tuk5z`g5zTR=nS6lRusV49*^cl#3bGs(R9-83Rrtvdkcv3U8n%H;BU2~~it{F& z@RCryLIwci>zvg^!4j1HA%>O=WGx+w+{x3lnYlw{-F)>$(Yo583%?h8$ptzZnu&rLvIO*rjL%TGigFwS zB-i%>_dS3^mUR3sfbn%9?rOra`d!(6rv`-Dk})*IWtDKbkMJ)?zCjI+!vlfW3P$)> z0a}IP0gc&S!IhjJ~M+n`SI$d zrjRwW*The`=TmF%W#sTiK%L>apHvU44VLFxtjN3p>|LS>e!!Ot0%%+i5UUJ@1s#~O z)gK+B$CYu*0+??UYQ`^UVt*jh4SnGd+pqg5JgxYAGb5wO^;Q2I%ux{e;G=QcBsD)- z#Ln)Dzn7_%kxhMtxw;KfW>RoWw9VyGFL8e5JQX>-c{oLYs1n*m&1XQyHwhi2EjP51 z2`p*zv@-3JaVtW_OxL;IAq{~;&6J*qvvrH?^ju9*(T}-rMJ&;4v|fP`3qP?9q_$C| z*+D6?T=Itdo|nw49N|YTW)yOx%Z|bJ@<> z)LLKmTd4$r4F!X)fQW|U_y$cYDO8y79T9zF6d~_53HR93&I-70F4%1@u0dGwnM(Il z)pp&$5Z7n8AEE2?;8iY@eTsDt;1fVKS%zZvfM_Zu;gq`O)27aHM9X0kb}GMHOZ^*t zdEijI<7+H)j=%OJ*`~w(#K~PmX^h2u-Y0PB^_-+5{UK`WIbyxhf-6K%6tS~xg5#Ka zk|tq&(6ca9Z$Vi}P4L=a?~7H0D)c=-!;V<(k)>`WDJIsbp*(vL*n}J~>i%4ihAwVA ze3627Am?+A{$SfV2xoxE^sSxXQRiM?nXF;BCejR7CEs-Je8vw83!7cf_I`6sRE)3u zl8LJijXKp*DzEHDWJ((2!LD<*eoS28e+JW}w)$0F zA$s7n0w446hNA$3g4kRVw*0$hw%&abWj1jOH$yx7Ow9dk6wi~%Zj=}zthW(I5q&k(~RCrf(I+z6E^){?)`>StA#wJ= zEz^I`q=v0lDzLXfp<&}Ptl;rSBW7ZY;voDb2iXHn+a80Ut{cDPoLPUM)g7(keddfW zjUeDw^OSj7|K`wbfP&!$Gl^cCmj||=>72EbwZZYWZ3?a{~^Ox-HPPq($AY}-*SW-A*6&MUUVC{mEo zkrS_UUYBs_g2OnvD<_O88IguOO=4D7R%ZuM;zXKaD)ebq7B6j+dShu(FEvMw`H7WY z1RJI&9pB_*WxBU9JARhVf^+ zbsLvRVdunZM+9yM#Ev;jtM`sbTYY1NfKC|uNfwl&?RlHHBx)nL5uM99JNf`x#NrRy zP*T#7jIhm|@*2V*?Uik271pfVan^F2K=#{SKLeVE(Ba*ng4f6>@0-uMUJt@2HX`O- zCUDeM235&;++>$Bm=r9TP*!995u8MNRw`GA>y4sD`|TeojweCz*naM3&Bfb~u7I#e zG)%)aQ_G$BzK^%!{r-BSq_*d>)#P}rbo;IV_PD~-5UT&M{K0OiofW_Ytu)mR-43L( zVM%2`E#g6oW|J@&eJjfyxE+ne8NOnUMQjeytGHj^27iWRlL`zBETw;p6(Mkq z!K&N4{J!f8QH$}6OEVdvzUiJ4itww;R=$5=G}Ys-s^jS>>NqvcW1&)W_*Uy9oSj0b zDT5=x&2BEa-dO*QN< zjGpss-ke zOLAwu@7TH-1lgvnQ6l#ZKn_@D4hpv9R;N&XDk+*J%44>fhTa`NMQ=4+EGg_`z+SOcHC;bMUDdI^Ua1kee{bj%{@02^-zCN9Z z!t5oB$qnQ?fnmRU8pgC=zZCz^UI2Da0*UZU)ZapExMFVj5vLg6lNaDx2a9J}nz>AD zTxIv|@}XO8W$r|#(*4(t-1loiS}1wlwueRAKbrBXF8c4s2&!=1OEL6hZWk_GG=8T{ z$0jq#y{bsYGW5@IbR*En$fxK%Uk(x~R~os!-cwAi@X4p<@A@x7Vq+a!M=-eosYHG_ zXnETinlbH%I0kXa)l=oaE&`)iG3Oo6m0x}(DSfvZcx)W+uEUFeo!k<+XZt5-S7*@n zbW}EMm|PiKt}J30N#QSvV*6{L7o5kRE8~KVK#5+0SO#a2w)KQDu_VS8^lbo(qNM=u z7ZOyRIJe575@*F9w)!`g&Qy<$?-D?=qxG-B1$2RW)I~Kpaq6zK!=1ec8gM{>27coJwD0X zZr2Xne`V62j_=8?sJ!m(^*tW+ryuxojTC|L-8=tkAO*`?4?-=s1dCSXH{V=M967`j zHK;SwWK#}z(V}uk&Om&SYfCoK*e{V9e#F|fdxDMUWO01-7PsV2_0o+`B~C4uUyd=^ zDP1LQ7;vsVVY5mjjU(S481=e7ilSJGdYUQ=8ex{tYr(5>_LF@M9NdiZLBzcFjKD{L z=T?MFUSSk29BT`>t^5cAqfEH!VAR{JWYyT{4u)N0=P%<_48M)h*LF`A z2888zGBo&Pe;O#9h8@Fkd%?T;>-Kw^#`$wU*M2qqA;PeiP=A#8Cwj1hnE68&y+l@21i*9oawCU~ADDa{bim)TL+WU23>1-Eoz(P1b>#Ci7{j(%GYeppNcwP<0$^b z@?rWd>)`Xo!8xB|JNM0~b@Kx#qLOyjELVD@p{7o)ktH@xpphrKqP2G};&hiVx75p$ zkEt<*DJGi%#uXb?hqVa8L52vEg_-%63+uaSb-$jR_8k|E5ncvwR%Ka7X;~Sko*+l7 z-L&5R1huI7Cz%Aq@^%NH4pBDS41aLGV)DHpot%xxfgf-g5`LeOz^|QEyjGO!Y)eq)7kk_Jp;x3(y5>d<*YgN`9>bmK4~wj%QI6-QLL((#9VNBwRD zZYK$KyPPwVD^6)!dADwLn?Bz{6%1FI1iO-aO*=idN}(0D^n&eeiN6nUazt8nbe8W1 zIj=yeWg(a##Pk30bdO<`eec7-vu(R3+dR1@+nQ{h{LK?4+qNg$_T-vu+wM8@yRQH9 zwilgq?X&l~vDW?BTynJyw;Zh-eYjofnw<+fw4-TJQAr<(i4uh5ru?n5#Si_YSQB^x z4&3iI(tfyP4q5n46H;fykd8XM8KR$!o3mUo(dAC%cI?eRFO>XW=l#C@nawGTN=<{p z3mchYPSju}iLDs`cAc-|DUBQ@;+TI5pbA+X6h^Lr%E_Vx7&mt%;c#^_d=~J|%Puq+ zQ*PC0Z|w(fJRooYN*Ax{w_=np02^)vUD0 zm4T}FI$#qF%aTG1#gJXVVqTc+k=i*?mB{%GwM5>W{+1v6*34nQ`B^}x@j5$dTu9gv zU2xSXK_Ze~4)u@s7iRM);%djJMW2s{S zI!(A%oBt3D^q!a#87;3_u988;WG=90D8_9?a1#YrzT2mKVR40+HJN4Lx`M@d7>l9m za0VriA~gH}L%lzFcDD`$&?Z_Y`7IB6}#m@1}i|FfeDy zntctGCQEIVYzl!r%y-k6c4D8-6&IbkCmIYxcQBQ^npu-Ec|H70%i72<20JsnWButSf-Ldn<8h8dM;YU8ELTozvE zHx{VFm@XI+LBL_fPW|++JM=-FgXd~G^-A;U0wWIrI3r2!y}flgPzB0pwbafhp@aZf z;GnFE?_z6bI0&&z=q@frq|7prWY|tMV7PnOM?6us{NIvdVSFeR>-~Rnz-q#Nqm`qo zRS&YKH-^g<@Ng27cszwXoAUzpu9_ysC)=rBYV6BFukMBXI6XS`Xt`yWeyC<%uSXcS z=g^)waAI?c=l@#(QezWorOUhMM3-%$6?G?KSccsa6df^c=f3rtMJZM@ z40~LMoiui+^;_*2_hYx)y;f6Bf=SD51(uFPZJ3HMGBPQ^4MKyP7r&tyD-@BfcEn)L z8k6K#$>K`|kB?f+z3H=Mex$Fw<4|`%UPid#wO*;opB(l5+*RgaBLF+2CG#RMMj*S9 zL%aL6SiG@VT&c8Za#m$lJgHJX!TEyMP_Fju{-oKHAU)JpJ{|Z;sOwz7Kvxx!of|!! zC(h3lUZ@tAwAkSSU$@J&hX6i>?sV?JVuaQyoiu+jl)?_1aY53^+i7Kg_t7jMXyxWP zdw3tTT#Jv)U!?oM>)~DTxX$XMkJWyh`kY`_zUt}HyFgiQs1M5MShlTiG?$9jG#`ae zGn(DPTh`W48M^jV@)=aW4RhXchz5-;Mqv2TDRQy|YIs<+1Okpu8+M}sqK9Th-=3!M zG66bcZen>_Q0JL}uTScMYsPxkiO88mLso20kO>jS^+f}D%au=%TgVFb?hBe1V)Nb6 ztVz{U_0uSsI*)gVtgV_=#wK4!*g~QoE~wy)ooh5 zg;n2q(FQF$v_l}$ao-H}%Xb>qznVcr*U+3-w1!mw@EmCuS|f$_-*u(l`4zH9j)eDW zbrYN#)Kbb<6!h*nS8Dbg*=?H|(Hv2^1-&z9n&`foon!QcC1Yf6O8p30{AV3dVvTcd zFvHQ})*+Bgg5(Z&6}eKe8rfJajg5T{j%t}mr4xvWDy2#7>p)6BUWUm_&QM@(WQjdS z9_k_n-iCC(0E2-g9iCjv@}5{B=ma)vZ{iQIga@ZHu&~U|4J#X$g8VrYgG^Z2nSp|N zlc>I^ozUYpopd!F(3LtFuw%7i!WRdoYDi&11fpNJ2OdfS=9dGO(N~0o^cPo7`=;r; zW4o+ey>^_u7CV*?U_ivwq?8a%vUXf534ZbAS~JFf#<@S)l~M@)l%qTg(v$E)V)G6) zWAP1Jq`8rp9dFRG^2eLQiEAnjo2d1f5^)LsvAlsUL@ie9_oP-M5Ws# zW3p3)_D2_ZI#%f!3bZJB7trxjBJekwJQLyV8srWu<`&ooCPQ_e_F1c^-iw=xCkN3` zbgvT_y#6e(v4e+#Is5+7nDUj9Ot`}7)YhSmnw6$@orIxO^0p!C5%oJ&B<^IlNuS0x z>_WyJ6+3*5sG@vfErp4h)`wlVH*xeIVW5pCI7P%3K79pRrZ>1nK+#e&`AAy$|X{JB8OGmjXsR>{36XP@4468ij2N;FE z5d^YopP)$eM-Rl%c23k&Z#kzJOG~`2FwC|qowirSbf+H2Y@J(ZDte3<)@{y)@})q_@I32twB-u2 zwSkYlvVJ?9F9lXamu`r6r?DK9LxIj~O;pZD+f|Vd%BOZ`01cLtD!7%v=VFg#%yWLs zR3>`RyH#cH|BjyHTXfgRgiOu&3^vQ=*j2Y0nouMz*5{hHgQO770ZlLLrU3A&fw1g( zYYIBmgxJW;*z>bG<)(J9cfLq;tfp3?KN;P^bg`Di#{~rrCzfifq0P6J2JBHcAHqww zunp-wgb|5Rmy%Rrf4f9JJ!x0*6s({@4u|DsvaN^q>!h4Fb69{jsXh#B*- zUB1knaOX0~ei@STeC&vPAPede_2lo65QVr~*#g&0=EQ-({nIxe_V&je-m_)m0i`BR z)3}$nnOwjD!ZE)udz=?@@2lrr(7)@8}Aq^YVbXiN_N>|LS;hreo1WbEE;kpK)v)^Fnr()oJ8;Uu6J5u!(t+XhgYj(NM6IU#(|21B zq27oDF8^W7K4WiCiX|{SI{TJCbgANyjLbxW*Xj1W;e4l(fc;;6r8t}2b=r;S1e~3# zS;4Gr5OOu1-D)I}I2VCXRrX&uSG>^P6mJP>x)fldXxq=b>mJu7LygN4HHG;?g}Zq3 z>nW3hVrcP66K<&Mk6b}wnH=(vY4P`KP>M#;YPEG(Df>h>Zj%b5p6FnInmk&U7H`c- zwo#^anYNx=2gbI+`a!C16sZ4=er>65TR&h=0cQV{5;JcNlf!&SjUTTePMj4KIMQU~Pl2OEuFpDI?NmLx_n;pvDL~UG9$=?gq@r@*G zQXpB0I!F^P!xC?>VJoji!ZE!z1oP9$G9=z?CJl95mFh2UXdD@SS*K0EA3Sjw0#2}l zmO2mF?!WA?I?bP_Ga?;bOC^hW(i?tm+&Uu*&>HheP4apbtU6!V2zK2o?br0~{6;FS zB&XC?UGrAH|1vt96T>JqogSFA36ag;GzLl=)r+wZlfiKb3#8U1Nilc=G)mSFtkw~* zKIaE3v+XENG1#nLgz+Wwd$1{-P6Oy?7INciW124WBLjFtjLvmAEm2z|@Qr#WHC-ou z?hKUr%-K1fad=ws?hwYr#Pe-0)xe(43xv*(smhtN2Wsh@P4b`#LWiyU;)&K?WPwU0w}(J$0+)uIdjL!<@70hEkfZnmTqw3TB`|y;qHe zly3{9h&w|84Vnuei;z^u5ZJ9f37=-l1*g>apGk#`{KEN-Tg}g){|e+a5vyE|mQq4I z)qk+M=BEGc%mA!CLBeya-G)nSR-Hb5pxK{^Nae#XU-A=mjU)UCSVud?l6!ien7E#| z-8x1yD4pNY5)mTu@Ah%{T{6FJG5FXHpXb zy=WiS=2|foZo>6e@@pVqL<+h z4lm8W^V^*F;tABG<+6M-+@;TqF^0L8TK2Xa<2D|J6*dAy$ZICto^fU{B`AujVbONq zRtVy3jwNrgmg)+{EaNjqcHUQh{lHbsFgc9`t-V^lAwn96JLXeKXc>ftcFqDbP_asF zciuDOQE5?kR!SXH+Nge1$TQ@!r32|DSY{)m$Q*HWs|mNs*eoojf=J%>)C6!6er-~4 zK-q0j56Jf*y7lq#>H1c{cH!V1A)CD$ZmZLWFRDtFP^{9rZudiB@A@DHy|zQXLJDIK zkbtC+#+`?iDsS)$OAb%bFgSiH4YDYg;BNaF()8p}<7#;0>y=p8fNyKWsB~muXZ|{t z&?wX}mv>xAH3c4IH5yOFdswr^R_kX1pw0;cLSw-_3}O>)Y&dp}Du?^do~+nWBpjB= zidR5|kNj+eGpJfyK zdCjDX3}Dyq#JO!Z)3A|ptai=f$(Pe_btMUjH3k8y=g+EX_VxL^!{yD8)JLFd$hu`0 z8`?Wl|KA{0USe`gO^Q<2GqK;@$cvSg%>Z9%%&_gq&9@Tlxk+Q!iUgwo*B;DB2ev&2l-32l2ZZDhs{`E)-K*tp?RPcH_I|nN`(JFqR(l9oe&h>^w!UZ{jl_x zo|}2X3;^n;oMQ2``QkJ7D8(H`M&5mWA?-n65g9iXLaZ(vvV^1XZm8csXQm5v8C#e& zNti2jEtr8qr2%OX2iP{SCi3sFH0FuEOB* z5&GU{tMZVzn#`Zw~&H5r6UWypqc@`9v4ySlQFV5F-*Z?zU6^4rc+oJ)>>{ zqL0vYKEKRK?$03n6V~EE8c|J&Z{M;J19L#dS(DuuPS^cmKlnNB;m98Nd7uC-$x=O_3muq&_zbZ60=JA%oR)dsAl`Kj18T$v*qe6`3q=q` zNV||_gh*A6NcO?X`6bZuuKSFi*Bn6!w*E(fFEBScEO-l0-&|3*t)QJVE0Y`{sgA z=Ua8wdelpfH7X4@uAa(*WY|VM&L*)9?`CUcb`_u9#@t)K7o)p%-2`b1f9(H;toY1L zVTpxR&#}5;$G;y{Tr@ogY;_Ff-ED`NE>kd-AGv^6Nk3`WvY!e`B@rq~hH}B;O9{-85>p(cZ*wlPmtz^!tvh3 zOl~3gQbpPiGIL&R)aCfl!vOK0zyLR-I2Yg~2k}#DFFX*$U$N;=4li}BG+zmEu5~C@ z5{l1q3UF?)zM21#=sCwX_JRU!13AC}>*t)+t`FbNoFA?ZO`f`8CbRef$!vH$6=Zhv zf9DrSx0bRBcSYC}9%K~eDz)K<)t=`a-}V3%YYojBT&+$cb&fwg9j*-e1ZaK`qZUnp z-`8o-^Xr|Aer-5z_I`YQK-y$fu-Pxzt^4f=(b6GJv3r+EHV|aYij)$0)1OHdvREZ_ z(3K(@hCy1{L+YLbm;DX)*0%(nmry|-*+%)v>GCY=RL%B-1s%$s8Ml|GUgAiyD5=4WF zz+uu%T#hbrKV8oh29S{Ck2u}_q;rgSR(4}}@zV$0~l__8W zuFw#+L$>#NGwd-bp8OQK;u;!j=2PaU0hXZ+2O_JOq){QMZJ!>c81t&7~y z&YwF*JI|jTKb9pJ!CaKUb%9i1 zCdV2}<~);L-n~CUjXA+2dMPH9Aw_$SVAn~?m|3=6#NE8e+91kC@gb>)*;z^ig(#)# zQU;gb@P?CKD2W0WD9Lyr6dJ%OWWcXC`oFI~ZlkW$HN@aE(-fjKYzY%~P23L5xv3eD zR2sGPczz&6dc)POH{#?+A!uvU{8Ch&Ep4#=JrcFT%0kqR5z2~34Qlsj?>Af!r`#6W z>JyyEl9k~nkq@ZeVcAGgQx4C=vxY;AM{b;U+(_jS%Cd9)8+j|6iOc(r4x?sUMUT52nHd5L1$h;fM*~}LniG+)^(uE(r;%2;#)no{? z1gC(V9e22?#{Hefy+2M+^$_5i-P^FeU1-YG=N0aI@&3ODJ0&%0b%15%<&xrws4T+& z2`UP5|F>5-ZayS!3)={f?wj?CHl&~3{C254Lq$42iVp`ogpWf0)%U3LGo*>PSxZve z$^XK=x4co9Qgz35b)X4NnT(8%F-nzEXx3%V%I@`9FA4&Mg`=)Zgebq1|16l62TGY9 zMw|Fgt;7)+(Fj-yB_}R*Y7%)bat9SasYN_8b#kPN?`x=^G}U-00pYx2K`eVS(+#rS zU4%0}VBT`5zB;(ByPg!HKF~KVSQX6R)_v!?ev7q8+H7iREgCtI7ivfO2Y}U(DOdg{Q;a3VHwIgFsk!EUA8BiCgLHUm&%RN z_wnR;itl6|YI~qPuCWR=h#?Ld7#8%ne&r0V8G~Mw?8!v#py4T!LUs4)7*Hc>GqF6g z6D$3i2xj|syY?b;IFx$;S40i5$ra#=LdGMSVAd(*%8}#m$}~|dU?Qfq?sT%#x^wBM z{WmR@REWHQ?%Y}Ly6-paVGoQM`r6s;&VA-KY62U!#IHi|A$L;59;%YbkAtG_o_y&X zj;b3GSH5j6Jwh$B#@QjFgQ55^=6yP`6qbE?SU6ePTA_oL zKXeUcG)leIiFOuiLUw6`$O>HhRE{+{V}@c}ci_y3AXJ5BuG!G4?9?8Nc+~d}T2i)q zu2`fMx?sIk!nPDh0_6S(V5rgDhpbRA&L5%n+eN%1CU!r2EXp~(1bF+C3#y{L*W-WY zK^XPI#+Jf6EGx>W#;IuQDe$LI`5zu(A4jnGEQY0!E^HdcEIRCY}=bC(DfT-9onAxTj!xnmz zNgRnf9hiMVihv#Pwv+ZFWE@CE(j#&I75vvhAR~#Jc~u=-QDkO6UK_bRMT(dYvkXey zxn?wKB3L$%wlES8pe2sxZcr35>=xjLXDkAP_7oB^48`t9gbUSbf{Tl{c1fk4IDJsw zmx%gb;8#~exR8u&IpxTGU*R7kunltkC7+D(x+ZsoNhwGH#5lBg0_qm_3Ky2PJ#)@> zY%D(Ry-AfDlLiKRT1hW)5jZS%EPmtf?CKFbR@|rC5B1`*3Jy%%qz5sC5nhkN$A7Ka z8HX3<@<;5&Ityc>0mk39U!bo)#`}XFQvrYEFfp(CPSBJv^_ zLhXnBGkXr5x6kFRX5MB=Z}*Zt#kyjN(xKn-0$fC`w(^|FU$4-HxHc{$D2(S2@pxt; z0yii+<=IGK_T^n3@aVqonUlLG?|oW~Am)v+TGuyh z&5`L7gBB66XUZrOSI%i9cWYoR*C>qS;0?i` zxmYaPmN$P70B`xYs7whlt2Yer7tn^dA1B4~gt{GlO+2s<<7!;eW$Lwj1aZc1qPnVmp#8ZOPB znzP|v9oWatW{!8{lyHyH(pvqL;3Lmb6XnX-*a~`GWnYWPS2=)8H2V6e7u)WEu)EDZ z9yv?vH&WB)`JMG&xVw*cm)6)7e>eHdu#*{c6a*P6>M7DzT!@zkAgrU?% zJR*-F&XrvqZ}GJqvW=EH{VI&bMf+Iyb=|6rJ<5b>4N_EWkSI$EZrPZpYAKsdM5_mC zaZ)c6y&Xh*3P!(=%Tz=tfMdFkOD&9DTzyffC!IERwkgKPd&Xq7VJV=X*1w~gau#_wow+1i%%s!N6Qbl+olf3Y4@USmk` zaBN0U#`(cPJz2F(Z)ya18#Ax8gF468Y5pX6EgKdRQ>SYt5!-iCIezY=PGgKeGe2{q z_}h6;RqOyU#UH^CYCml-4sxLIcC{Q?G!XYHk@GIKcruzj^T%Of_&?g+u%|nZdaozu zI;Ybr%)^u9+O3(}yLbYAy! z4k*Wp^rMdwK?yQ|`bOZ5F=*cJL)f0Shd4N9oYwMMS?p~@1F*P5L#=OS21e(=%e!~{ zNuk-cG695(-A#ggtQ$anYQ9iP_Mc<@=BG{?6SKd+SydKJSXLbLUZQn$g# zY3@uhbBGBy=X_F25&duLRe0j=UfF@5(K@Loa877>yd_o=CvEbTYA~-0d8rHT^hLcn zgkzh9bplBb-+3uJ3|vuyS)TJOw6G=tZ}$v{4!%3oFt%>tPCjWc<@W#&zU>m$srIgL zM!hD?)Gc<*t_ei6Yq@J^Dkunc&EJ*75qw(tjlBFgQ_9cO2N$Z{j@n|AmND=F4`uG+ zPy?qd(Ig4~u@i86YNuJuG+p(!0tbEwUO#3Z8>+_SVf_?24i-5cc|5I7^SUBhsYZ{R zRNiR6==l3d7bm(hSB42xhGq>`9JTFGqGwi=9KL&VO$g%jc59_C2LNs$2K`P9;Gvoz zwuF0rI^tgTe|4`06<0Qawl%&Xa$GC&Hw%16DC`jYtma1DvU8YHv~@<3maU04Xr4#S zo~6x=rYL{JcdcgGAT;P;D|4kLvas)&VtaA^jN_7(blmKGNfUarU3ZzNc2X{T%9ogd zT#66yMatXD-;^~d<3$=q?!W_7IcWciIOVIN*~n!1Gh+MoNusJe+{h8OH!h+i<=_C! zlvT#5Yj(k4JR})y_-o8@V$5!ppH}s*;`+lOnGA zUQg%CyhrsN|06h{Jk)=LT=m#rC0*nJbg=yksH52k;plkmbXxo_(6sxDO;aPIn(G|} zQQlUL+6bnOK$`=UW1aCX%7UbKA||Q^3`e?_Th*LL0W1uY8S?|9XcRZVtY6gZie+CA>Gh&kgVX#J4<*l$u@4`1w}5`f;Kc2RFgjkTT<&^pVUp($$hs)s>kZ&_UhZbyLwom)$aEKK(9kvf=h6nM< zlo5ZVHzKx1tuq`eS8+YIjGfkF#e<~*5Um0Krf?d#EK(53vE7{+YI6GA{Ks}PFZ_#< z6e1a#_cD<`6$S0inCpma`7mI>Cd6=ol9s{A8=}nm)>vABsluj`Y>Wi&VK9+wJN$Mj zchCBRJ5R5M!J6#rdLTB9>r8Ilb5CJ(!j_VFbOF&x9s-Hr?px}lWB{Q%dKK|v(V8I# zserA2P=yV-z#6@c)8#Lvz8up&KHQk$F-y=RCVS4arVeZFsu4Gv#1Y|Q6kefk0!81Q z*Wc+uwXe=W2$kiM1WDb-Mxf{?vT&-q`zM3|&PqN_A`9cypCAe2T^3Ke9ld>afJ}LI zgS&{;l5AWqHh65t&(H))u8;Hp4u}Lr`xFx;aJwt(lfY>SUK;~OgbDt`{=An07K`)q zQ1pWz)Y#gYO&meMm9)S@sxP!kgKSsM&eRq-xZr~zh6Z2d=(6Raj73*3FDrlAz2jTE z89T3DX?7(gqz+Lwbaw#RGw0y2KZ}>WEbxx z$p&KNG|50c6S$PRk`pbvtvjL4PhX@sNLP6TURxYeK=Q|qg|!7F77h~YZ<>sV z0~{9mIZs^VYl8of4yJP)8hb>yq}xach$slQV}3eM$b{#&cY&c;ti+E=nWT^yHJB{x z_ryf;@sp?s2@DTOSaN{Z>Oc`~ZYiI`y{{kSav_xtoh7k)ce)A#>C26*IuB!Nv9=aj_ zXeJb}?!_>#K~5d9SuD`e3vA713&5k}K3&-asr0a9&F3YuHT@|m3N=Y!22DB+m*!3n zaxHH}CFGRaP}_!6g~%=a>(}9>s3&}sS?fkuyqY_`xnEtBHxG{qdgl7JLai}yUTtnu zwwtb|Q?mL`>oPvO6JL5>BgtpGOK2CW*(d3i4q~{Z-$kh;S4PE`OV^?E9Ug15t`X`} z(@dg4(r}!b0aWl@GnJFJ^!F6cLTqN-#z_M&heiM_!+%EYl%zjBVgO;10sVcyh+|xC zf;tMq`W6UB`M_t6B)6nt?aE~k2zb2GasL>R3c&{zhT#`WAN3g{{{Bx4_GgAtIwhJ2 zl-1U~{frnFzU*Qji{+>-kE^eUXECJEv00}Ix=U%Spc_3^NQ|`EiZ#jyQDi`Ze*D2o ze0WfhD7BB1+0jNVo^Gmb_|`tk=6u1Bobw3e*FYi*(@?MIKmr>TDW&AU2DD;t$A2B5 zz)a*rpe3HdRSzktQD^g)+hThvy@|ljC~jjoK{_feojHe%1a~S8&7BX5{g5^^bC3){ zrNKx_&|D+%dk2mI|1Wj-{3h~r%d3#vxv|;|6a%2puCo_p445>TAQVLM6xYoXiSph_ z9`UgqVA`y=U&m$u@?~4e%l%D7+9U(p5T~3ZCXQU%E;WW-jyLwsk{5y~(36s^1Dw<# zrW&lMg4^dwj=$Gw!oCn#PB z>i|G4!FA0{0q7z7OFuSRd84gzR@aNo+RqYsE$1E1AA(Ja`k{@C5m805p8;8(omMBX z%^Pzd-*Yoe*a~vdYX6P=J4<-Z{wUYTi6voz03`=FZLF(akZzN`YxAuLpLv*jFk54~ z_zEzi_5B!bke_6Vf^K-9-d`ss_h}@t$3Q6{3rQF=bRo+1W^q`kK4J_2RvMtex$5|1 z9_hTpIH((LR0y5*@pv1I+Wj@6s=sY0vb~A<+2%3CI^*^i5jAScZp;*ShW0%unGM84 zBTE$sv8q>|LJr8FV9`TT3+P63B8p^7`~;ItxE^QtZ9Ei#&HMF!cLO4e`c8nm&wH^W zAV(|oAL*A8?+NlCKmk`)8%v{DVr{F|Vg9LXy$eNEIf#lOE_{QXEMKnWKP()+ggn_O zaX2gvVJM;RH2q~QBP;lh)9Bl9RM3IO$rjru;n4O-wMMjVs=(vkV9zHgZIT>1qd((# za1zNQA(Xx7GX79l{;p+*-^p4jKOTy&y(#+YNycPq9eO_8|EVXnx=pXcB(bnGiSSZ| z@%SEBT~8#_{AX47xmpdOO-)TJO}4mLlK;dun68|}p2j9m&}P{!gF=qL_)w*TKDd7# z`BhI=ottAefT!qzu#Vk0zd>ks1XN@KjmT}Nmh!RKR=hM z8XmN=BiyKXw9?SP9v>gyetJU=hJ0)==|T)hR4kSst9Xg3>6DY ztQS$P=PvyI;Vu6d6K)#l>TMA}BCl+3_KR_yWt~0Y2rA|C+oRZX@+xb(Fh;ndD)B6~ z3KN0<|8E0CgUZOrcqG}i=O!HGB_H_tndwJ_0(+-;aCB5yQUdd|?usfEO@QqWe8>Km zogtQ0-yI8*uy7RJ-X-?2uV=B?lcI*(ORFekaKgJDUGBM)v5vCl2>bGHdY@H4n_`ZT zq%s>sfaD5<(kvGQkT^K8_JjV`C>0>cBLRU? z^xA$g`n2UAYZ}8>(lt`>C2+8%!pt!-vesXb;D0les@-}m{SQB!eIitpQzMNAo5)*~ z5l;K(MD~E37M){rnR9o61NpsU=KnKGODBI^~R7oGfIr|Y0FL2>7DK^?NTjF*&VXx?7&<3i2{+tw?WX| zz(Tp&Y^Bz3r1`AM4iN|mtLrJAt!1&aF)(I&dI&E8aWRvq z{aLE2ITb5|Uu}tjO&$G;6GZsOL zo)(B7{0itZ1|!y=<~zzl;q(#KhylF8DaUM7=`=Wjc%G}N7KEmP8Ts)J?~~$GP+d&F z-@N89aK<9?v-(UpSX3957M37a<%IeWJO&;P3KrD-z?Juc;T@lx(Oc!2{zpuLK9D2M z9G^cM1vG<~dktbrq>H8434B5S{U5UF46^J@cltx@QQBM%zH4H7AJ62EDbp1WZ-*Md zFs6?5SUUcNl8u?Cnfs~^-^+nB9@8tcSSrW98+&F{z*o!Rf1S}_`=bI&I$&#fv?V)bMpx4FIH;2UU1Hmo zqsOo#G8PX9=WH$27)%Hn2Lb}>eyFDHfyVIh+n8l-MBhjbz~%_lc$vN&B9S~nNnohf z_gqbN{3eAnyh@8gONN;rYLQZGqBHrNUosX8_=5}U4w8jDjsxh&$8hf;xCv&X|Mfp% zDT6MM*Mfx4UQ{@caC7%f=^y;nl{ysDi9+iC&PJn@lXc^|hq(-S1G(NpjdD~8wuV#FF*HEBgsZqG*1ik17j3OpNkkCI|>*Rf%+D2xRV zYGXruS_0Y^nPs|z2}AeW8lG4OW()0E47a*{>zwzbl2TIY6TsW${v96h}(wskOG7Y(l$2mPM#`t6*B%0IoArPR3? z#e;1r%kW!S)@)E^^k659bXw5;&8K>7Pa>7ftTrw` zmD%O%9K5zKr1mXZl zd!eLZiDVPzPMzlKwCG>|vgkI#4MzP<=&Lbz6#2Nv2I^6opXkjV*BBcL()%eMAdY{h=`(XO zJF$Cd1TBlvU@F^~zohi;Hzn$4mwGIDDE{(wAQN)`6M-2=sHpa%^8_*3AIB#q>dX-r zCw4)7SY{|gzZdfq0cojq&#3ncs)QtK;yV>eHU0jE(QB@q> zYsMe!pzlOjeJ54=rVvV@R0j6acX~FJE0TM#8qbro!kWrrM1lCgRFgAQoy~+u6+fg%UwPCQY-`GzRcU zUhDJZV6m>^`3Ho()PkIpVHj!HuljoS$1i<$QgHT_UyQ{dJ2j2RktEzCHk`cKaclZ% zdD6&GIQRoCFPKk~0liY-t@`f9 z4M$8b-a-xRkk7>3P;}k_r3)CK^;&y&iuE98CNS_Z(P5*#E?6Y>91u`Ix>Thzp#4RA z-Y%m}_`?VZehF1De!a z4LW!C@mKZ(DV6kjqp~YCtA7;xI<)S|F1VvpQq#g`-;k%A12v@t=};y?DM}${7!D~L5i=%PVWAsjK@x>0fZKf7 zy4eD{53#R`TK=eAyvac5MYL;4rbUFL-^~#2UwZ`Hf7NC;TowqUmh*3!5ewIh|Rw<~k1mHYtQoy-*@($*Ig&rnZ6 z;`zoD{GTC^rh0b`QUx1iLLSbRN!dFSqIyAGReE-@cz%3RtSm0% z0_;IyBAc`rd0qZ%82L9S1KPNFZi`90e`G>bU>ZVR;z)89cG8~g2}C@DV2YONNmZJk#VuLZg?)jbzEWQvh&(4u*aYLyY*`2CY~? zC=#D@wqd^)P~^nF-}+0ok6rLyoAUApi1P|E0@HTTfD`e{MUmiXFdVf-x!{_KaEXcm z-G6H-9f|SX6lFyz-{MtB)sam@c!wI7aHx^hoE!XR7zo75 z@(K#e%v`iTN7GAoBU`|L5CVA}}XrBrfGP&-CgydiVy_BOErI!bEa1t<6x0=h z7snr6r0P#y9|0Gn`Au1zIb#|`2*}Z-dv~LDbJ%;NeS8qQ zGyE7pzx%~^w7tM<4pmOiEp3=nj=cQNgpk+Cc+_!C(%gZIuVGGOg4gOa_LNvpihaE`#pG*WHi9eGs$6iil zc{yNzIRPv!9!j*&sIf_&NFr~R&!&5`HS=9Uf!vXlz*o~%I4PLO-C37;r4I6B?EJuE zH5~W0KTa%53xCJ`K{{l;)O2(Z%{gg2ZqFPd&S5=AGet~2Fgt<#zt;&rOfE!UZjexh zXN@-7N!&H(N{b*0A_^`N&778>AROj(S-XXHsVPrw9RR1YDBexa{rLs&O zt*tH7;p5eR(p85q@j}vd1%1!$kc9G-M9W_$X@DhqaH@XC-T+bx_wd1=hP&SU z{;v!_6=2x+V^e*g1XC~SQq}Ayk(0r}y`^|L&vo>E^MTujk;m z^xmn|9%S0=)Qt^^kdY?Dl%F0uDV;Bf<6$0lOMg4)`&U30GAi#u-7reWG|X_6<*AM7 zz|`*X05Tp~`%|$8)1J9biyc9fL&1tCIeAeW3%2+^_$XTWm)~8EJW7W7lUgnnAI)%n z(X99#iqx7~I$c7&q`IrH{`&6QXUG!}O-KCu=m6y4P_9bdh2k3Jbl$w!K*+pIcV~h$ zGp3#nYj1;$$723WmW(j=A4n8V0)}YRva6tm{FqC_ON)&FgwW3@aS*eRKy=e%5rKJb zgA`5^T#IZco{?6CuMlt8011p{MuWEN_tUG%#oFDe!_Cmjso&((b0{!h?VRMD4iMsR z?=gt^kIpH~VXxqEc!|ExUm)ebXtM^cQt6-qt7wR0No^#i5}^?_DRP2)3uKftGT3cS zP_8Ei1kzcO!({qR&C3(QVg;T0y?=GbUrSGh+Bx8zSz$JSNo&WB#197uerv{q-=`KOPBXGA zojVtV=(Sy5EavYo>=TS4^uohn3ArtFLnHI&x-ApKGKk!_JLye?B^Hh=q=_*@Frv`* z7ZMUA5Yw@1iGKdVwD2IfU%Fp3nlgN!Mn$yW_)m_V@l|#_{eL&%FeXC1XTo8ge3aAi z5`tD#!Btg05yptn=M3OJ|6fw?AuI-#%s9hLtZDl%+$$_+A<8%>Rux6f1QZ5 zb|X7wfnlx~MN1pM{r(MB9FCud>aS?}CLdWImu4XUhb|KUAw@3iKNL0MTsyP)C=|r^ zFXW;H@7Snf+i#k~X@`0@j}+^e`Hh$c4jK;XEy<_GT*q4c+v{`f#b4X2L&hqNZI$A^ zj$XxZ-a$y+#evbn94FWe-DCiX(Q*7~E@x@t?ZVb@I(+k>>L|W-<57M;$im8~OPmyt zI*KLmLri}_-D;^LA|Ulap!2WNR5>?py9mX|KnM;RjhJBiF9MheStmyRC4T#tEIt>S zQl+dxv2BE6E?|=f=6At_Du@cdmvyzR&!`%4d+4U1T&>(vXzf9yHt8zwoKpX?#ss>!B#hEZveR-P}hH(kNmQ(*5!7qnOQ$2@&_@-w62d zGZbxsEloD#fs`B&TapO1=3Alz-cF~^C^Rr&|LA?6HNZqtG9$5Rog9~DAgP#8QBq4P zw}a2+pjd}USWsqb3n4x%ycJzCxc*k81*30`gf-}r4Di{L3B$Sj-Q-4)hd3K$< zReg~aCUCP|bJBiU`MOG5`SeRntVA0uqi!+>C9@ZaA5Y0)_OmFQT|4ei??W=gl%5Ws z)Fu~#`)@N_eOm$jT)i}E77ndx{7GMrjtR?beeQFIHERauxQ-1$J# zCPaL&N5%-k+1jrpf_F`n3~|n1PB62W;KoUQWJqqxXLdKyq~OTE$=B+s);)8_d zSGKFH=?V8Q%3o|nGtk5~F3{^m%0m)?N$ti>{+haYfLhKrio|mCQ$|F_-KRz}3GEr$ zE*e6>onSyvIFzMsh(AOdM5^~8l{>tkvdU4ONPra_e?K-(iw_TTB+70xPj|M`j0|xL znmoEH;qB?|;6(tM&#N7Yu(PBkCp%?BD3cJt!h7N4&8!0Jz<+pc(`>cS_spOAPiGU~ zmg=m9v>Xj0hZEGCL zOc?Anbnd&#@xbYS>;3+~5xX!Owq~A^;P)IImI}zLCct*~p=xF*YuJDtQaV;>eDPlsDGI54IJ1)c?7GP@cC#j9mC+KZBdZL4_{i z9w3$;`XON~Nkkj2W84X&Kmo(9ij+~f8Ia!OFD#imYXg11=6s3fuTx0AM0dahMRE1a z82;`+8uYhet1CJySGl%^C=vtanf(@mS7qKz?%tCPR4_jCcLE#Hz^P<(Z86p-q;^_j z3x$Y;5Cg)q2ufqQmtqcI(f?>;P-l}Ec0L!+HZ?}!P;BcNK0>zN!}5Pf3kQd9g;|yA z%XnfsJ1qnQT;NilgNMAsA;xugP;cfy>r7i27bSjf#esQEY%~7 zW%PPQLP;o=yg=`tZimTgyLRvE*e}f6k;)Y9GCi`^+P133OVGVmWQ-cn5?c9iK_C@Q zNXxglo6j~!a{g~U+gek>nYm-wz57P;va}F9!Z_MG=d&mxceOwQ1Q?wQ~x$X6>)?or~b_HdiZKw?kNS2ixs!;~iOhNJ|Y7je5c? zErtJD7!n`X^%sWy>KTr1rw9x8?LbH}H2w}dTt0^)fXJov4@J;g1z?GNs%nUzyQp0I zGBZ$*6*z&C9t~Q`E~1qaZ7frE^8vg#dlj^=m9r3{byL23wZ@WI?4dUW$*Q*%vqC^v zh0W&eOy~dN_U9G{VJLqk*%Sy+s)o_QV~!g1Fy;67sWhJOePv+^P5O9 zu%VqmfL)DYF!LfvFbj*`zi>Osh4HCtb8PeI>ukxQRKkj7Wn ziHhLgJxIMK&_B*r9K}E@d$TCNw7LyMlZbk0MBMKM(V|N^;byX=_Rt%8Hx@oiuy$HE zU=Hi_)wCn(d_z351GsbBbQ5*;OT&~i7-N9-k*BHAj}j~2NM>>9M&!8reLE#7^JlAd z_Z^HqQ0LpF%kNP|yOOAJ8Us17la$>oZ$4un{UF~v1P;JrjJAuj1$)@09`RkhH==`S zSnEvT>x%q0Nxq(z_IumM6<+FhAReuCip&TvbYo3dhWj}}hEDpUK@20RtzUr)ZWBIc zl+)A22`m_uBQ$-U0Pm=5n7!nk+73GM3cD1*! z87D*-1fgCWf0~r_;AvJcKt+2);TC}s{Fw1)1Oz4IG=q*(8=;+axZ-kR1l=dBnV5y# z&9{DiiOZ>+4O`(C2rh~RP-Lqh$TAqkuMUm+)0MGg1#iH5`T{~4X8&N!pyZ%eA5_Uz zk0SIbCvs6bY||KTTbx_ED7n@W5@$+30%I>qAVF4I1+|%@0kMDNJ1il6@3SaBZ54g1 z23}e7IAKN@q8Dg;AiP2P+q!9k{e{S}52WT{1JWr)5vDx`UKqB@4&EXj$`?{}`xT4T zcL1>$h|w3-=W3Lr03sE+80HaG7(^G9I9^TWEM2oa`!|9s zKRCornaCt;06!Osp6=5PaMff4$9R|%%0lBdBnps%g2izmC=3Ic4RLe`c98*yEhCV# z!nYT}$#Bu?Sz;lPs^di?=SCdQtw^)QM6ru-nCF-qgb(G+ZN zbXzA8E8uWsnS5&CUW$glF53RsWkJnqHk(thm>+qr6H0J`1jj{w5}PW&=uig{n8zRP z-~;Zp3AWsQP}rK|3GMQyfuRx+P?3*`J-2Icf@Hqhy;Qw;h2D8bBA%=H!!>8>=i1`F_9q!IsOGs`6BzabF>p(TCq`D z-i}CsY-xZ=a8wpa5z@7>8=;?qQhp(mPi4YuQXlQD;STe;AH|YFJKyLaNIet#5j?#< zN*F)8+sAp|TK3?DNKomD%B0%*%zx`^zI0+C#)G<63E~Mz-kq8T z3yBxS(&HhBSO0{16)WE>prldc(0PT@%GOulvo<8VVakZX7>dqcnyLd~$MEl=*7d5T zW)y@ot@&Qd4;%Ty-!CeeOz@?K1^0PN2$~2&PQV>f>*2wX%H$;R2}GRYA5*=lxiQ+vH0 zxeS@T(;>gXe2W`|4A2@|U2ZcSUQ`a`NqogoB<8DL|0WxWR3-x+s|^(NN-otkrfnI& z5@$u{2CW>y(2ZxJ@IPQ#u_&`xOA~*KqbdZL42vRonFK2#iz=%iSIWr@8k8cM6#BkF z@6{gKZl{c-2I_|=4T*WYe$-Ddx+XTUomo`FvAoqvv&Acmo5QghAY@}0l>gvYqBe6? zdxIKwoD9Bs_OLGFB=CG8n0sb+Td7vu!O{JSsDH;$XXmps6lo3<1?73=x^;pmI7;?u zjpO=Tq+z^~Kj~=afOXmLi=`rnJrf)Zg~wK+GeZ3CVL>gZ>;NEnm5jzwKzcPy&=U?5 zocE82x~;`)du2Q5lP0;`C;#Ou&I6XLko~Mw@sLQM3^@&&5Xl%1>p*D^k<&&!-tIiR zWy#r(v=q*AdtXok9_%V6CCg8};p_jRe-- zq6oNQ3$IRRQ4@Jv*d3-HSve#=j|9$89#z0JrsEo#IgBM|f0n&x*V6gYpm0(4TPwjV zXDxKc1C@9A(Lax+{BBcz*P<|~O?bx)c7*B2K}sWNj{qc?*)4%RFb%zs7zK<_`L8n-wa@nT39iVpMMDHhmZm zPv-CM8#j}2JI!2Uf=OzFCj)s!2mg6>|Hc-9QQ}lb{8u?+WRUu!4*LfA64(#ObTDld z>pZgyHw!D%Z%ig|cliqpcw()qX4%INE)k6bZlxfD<)Hh#vqi9>fKA^>jS-918xzQ7bys<1mhgtJk%bnMO?@$>Zm7a>Az$|g4(@Ju2GZvu` z)akFa_s|k3PcXd(VY{HSCPF(q1tGq*qAJNB)(o1JD*o37bRsMY8mNyIQfcoy`-h^# za_h@%$Dzave827~x0dgsnvhRJ$REbTRN&9HQ^*l3r($2@8|s0xbG6U{?v^AdUT8;_ z+3@RXVV1cU`ny3CqG|am6R__$-Fu&k`fcUHO=87|y)FzoF_e@CE~b9Ak|7;cUVrGiG)*S$VDtvZ6$#VQ1(Ho?ZlvK>5i+F702&EY=q(L==_7?3@K*aThnviy(uF?%^NC=8a zMItSL>PVyDmM;Jih@(8-$f%8~ zmnF58+|9mmpd6WeIHqI1OW|MK>on6=%STC^@QlrDBgWH3=^nlZIdPXyQv+hIfxoH) zMDfDvFL^*daqPu;Sjq&1&7{rq{gE{Y`QHZZuDv(=V``eoYkT$_C&e;Gp)X@6t|5GO zUBQ&_VgX~MNj7WL19fBEMJ!E)Yi-E?)*bGRTL|2I(Am~p@YazN;Hn!Y%6A712s<^^ z%H!>&i6M;+o`|33sq~`TF$62vF0AbMM89MCI%z#&xYE>zTMJym?oaOD&5q(7-Dzg< z2|h!77TCX!Vz=qThw>Xb58$*xxccc|F6IN1eMG;`%Njv8IZ>WcoP55D>KL z$?IpZ4hu5evPs?fSW%Q@8nr^I@lJn)6Mv3R7G{CGh6dJHR~Dob@IPx*6w+Ls!6CaZ zC+a3^)bn*??nSXg9tn2D%Xt1?b<4MPY zHpE@XbdMg=zF;;KI{-m`<201{s4ti@m+j`MSKAcQO=RFp>(Qa3S_WU*zf1ebAH7{` zfzq?uK8J%EEGUaR@L= z%FH=R;{=YCA-PkS|)we47HM+cHD|hix zcD&}ua@nbEI#+DdG5Q>qxsCUrkB#mcgq$eeIt<}oeFdD%<7D+L9&cxnFGZhi#N1UH zVwMrZ?!oVui{c1HAgeHF+5xN@59pu|v@<)!dJ1~{cgW?Z(8kFJJKRc=Xw_d5W(aEc z{5t9;*8+spCIuh2)FUJlq+8;ZMUC6I44iqx7Q@`z{jrVgzYj^JE<8VZ3 z(b%MwR8mBkJbrF46mGEz*cm8V^U>d2{bxGE(bP~A&4E`4@2hd)34|rnO z@r#R*5PCtCKFAf%blw-j1L7}_LTC$}^N#;OMM#^v`Tf<(r{ndhg3d)}RJiqTw4x$! z#cizi`luaJdY~C}H+=u-AQm`3xPUfT-gfP^MaCljWXQEYx|YWwe<(@Ecz25U=Eoz7 zfrW*Yhmh0QJVl7dcI1%nOxm>=*mzi|=Pz3FOuJF6zl}{YH32!J)c^G}Ym_P5!=^QO5f$r^!X~3(2mbm%F2YJ*R9+PL{jBHA zd8Ubj@9j8*+PfCeMMsq+4xD}aOJ_(l_x?s_>mTHLs1b1@y!(Q~DnPU|Z^of~X{Ygo z{sCq9#=dHSv+hf0XMOu7DyQsv?p1k&5s&u@e2!Qww9O8wzH8NoTQ4v8#@o23ay-fd zK0KrjSaT(sF`dQ-{j<=?N5K(E>qJugb&WoyFhE$X|0|K*S{Txjmf!A4t`_uZtWu-( zQa(i2nyYlW3+pa!4R$#pSy`pbpz>Ke8A4!H{3f4Ll4Y; zv)IU0#T|>KoEs*#q`X}{=J63pW`DP}zYJ#l>XoM$uX6@~U(UbIq(nbjMZ|cjf^=W3 zuSnJ&`J9Hvzo=5hXdb1)+daeQPj!apeGzB({hGYlBHn&Jb4BDDPkSG?p34^6h6W^m ze_(eC7*f*aH_n*w&c89q@A<2?0UFP+ym}adpjU}}NVGZ{1&(|u=o{FrYCEP&_SI0{ zZ(K})d2RH$h{S4tdj|i_rWMeu;mg+s0QYy7AwQ?tKb%nX&}~*u4y_MGEkQcl*5wr9 z^r1uJ)5l(%9s(JQn-h!Ji&>Em=>2~K8AVQsUlxc(;9dw0X{?*hhz9ZI3o#$s8XO(! z$B(w>z*$(@EKIly{C0ZZ=eT8C1=mat1L`79N!tn9g}*!ZQnQ4vSCazK1BmhbOfUM2 z7e7lKe{yh}yY8X192BgMJ{G-)$LM^Gjs5Dx6S%-G=))7N+-c~4_VpgKV(+O3rWrOA|7oeZu&b-o+`#;f_=BBkC7G4G})_h7oi%gMcRegafu;(ySwUAE2_CC2(-7l=!$gwd92*_?=}V4?1zTZsAp zYwWj4>h&q|oUz6TNE6cxjW8Gq_a?mu)LGd|LOLa`K*~LvWl*htFm`fS7LyqPz*1+2 z+~6vY;+&sKEHJD^W6bokrQ0Ho(-Qbg@PX&kTz7xY<8`75Mijn(ZBr`rQLYW!4)NJl zf2X$SyGS7gDa;iQo`iE4AAIe(klxy}1m(ocY!h8-u^Iwio$We(l)B&XV|dt!soR&# zouN=X3{eXd5H-KSJIazz)I0E1LdReV_FDN#Mj$U5nwVuSODC}!IRXWh4J^8 zX#V4wG%WO=3&1{wM+r7J3vJF?5y1PsUm#pWo!-yoXRgD8 z2#n@_L6(n8+hIp4-kC1m?q}a}wZaBYiUnkQYsJh7&~kYWI(^`ueTQB|a;rn}BC&rR zg`&?#r~O8wP#9Am#Qk%$Ln!{^p$6@1!5ZgnJ^}Em&Q3^uue$+Y9*7&9iatO79FdNb zf_}UFsdBQt0Qve=T>q&oN|J|BpK$o>*rkL)LwmE276w_a2x_}VtXUXL^KZ=At|_tV za=YrR3E*|p$aD@b$;r?Ac#qb%eo!DCv z2%J<-wMSXRWQ2UYVTm>CQK`)7>o~vZA_TzO+6CX+Eds6C>coEPO4ed{eTGGba)b(jQpvktOvD!~a`5*1u3m-M z7w@A->X2ncC*43IV8aEcgflgOuRj-CTcw5Pls;7U2geo~pY@+}I;bfEsr+O59&=gI zL+kmH*9G5U*3K-anBaz*X-IBS%wyR?EKE>iB9nP?kHN|ZEq-V!`zV;?v~CSpM;sFU z7-~%7z$Jy@eG%I_{6e-Re<&2Wk!17^M2S10vXBC*XfPlU8BFfBppbC1bg)x}EC z)cJ`hx%D6GO zQT^si%o|A2zGSJ}yNjtdse&FP#&$KQZ9|CJFQ2b=51wK$6;zNNVPh|F6wr@1wY*>4 z=r*Z^>9vj)v34qw$kbT!I${`mWl;nGe|k1BR5D{KVz zmu4|6q6iZiyz3-s3EnD4b=R^5%-UaoonSR~W=+M=b&*5hBK3yY+y#sabI-pt=pod3 zZk$h2P`T;QYd0p|@#TWfxK3}*=gQPYqzL+c=yjF++b$Szk3kpV;P}n#W%ue%1XKa+ z12Mrz^Wi2;DhXG9{0-0K!)0oy(~vCChXS<4zT#5RBq?I2v-Kz3k|0zuKEhsPN2t!< z!Bc{}kAt5DP-+D+TLi>xu*^J(dwxoDqpw z{~97c;_!9+BItt`tUR#%NI_*R&GQ>w@LdVWADgc*O6-5+x)r($(#2CfoBpjFk+f4w zTBbr@wKC*Ye}j<9brA>W=nFXc`{s$ZBaEcuK#WGB$PYlR7rdv5|9D^j zn%JYSv0f&d_JCjpH@|9XRp?#ZA=ZrBo}aYYl9aH#B6I&{9^`GdYjzUE<3I{-d`I=I zYkE8U^ekC=rWV|LAp@GlZM}%-FDbfXo_dG6xpgcXa^*B}?0gUp-~H(RpNgtZ;bT|E zHyu$O(Gk817{tUN5KumzDiA4D>?iCIx=+|tKc8T`K@?=)0x1F$gTiHF z1-9Qb=Fae(`Z_bx%3iXC!us5`+tzDaDm&VL7IIxRv|)dhfzDF&cTi_zy!z^~mHVY| zeEy1N=OFeE`?0;t?#N%dY-K)t!sLdhV17-({TC(Ayvr2V;FKBuO1Cq`_$f|Dred9h zdZBFG7Bd+~?b*SwKJ`_0>K{O(!6+ha24_)NdOUYHj=@N-Cht5tMa4L;35P`*sCu-C zAD2e;xbJbP8L0JJjzJU11NI#qEiG_{XsLyFY{qd=VZrobRJ}^r!Yfym2p(!RV_{N@L-D z_mwfe@kKzpCpevBdj#LH|%7aazng?i}+) zKkA-sxZ_8OP;61g^;x4wg{;BfS)rRF5&M2@6Hc4**GYePoK~E;I`OIw7huwKdda!H zTpp37Fr!WN)SSRMJ1!usEn@Lz_5ZV82&TX{rX{+!w#=v2-U&Bu`4F1txu%5qtG6`~ zXph$Q^UwM{pVDDZME_A8|K#3_opZfu_g5BDQ56@h3)rLOku$wz_K|0*u*;CR@u^%A zBjohEDlF~5dp9JzW!TO1Z+|i7(P9lQmE#%H5Q%E*hy@s_sE3u^GeViclE_FSIWQqKz_<8T+fB)d9!LT`?a~W`1X+*nQ1b5uY zvYLRKSg_dzZ)q)4g_@%wuFfc;wnmU#+UCCsw6&^%_GvApdAQh|%H269uJQ`q=6)y> z0e6J}Y#tMf7Onk(<`16kwQr3q><9nApT*mQ0wUNZj8wAdnYZ@loHjbk_4Noi$t+re zNDY1lOT=Ahg)`J;#he(Rf^RzA*S=A{Zg-&(>MMxA;ZPJ|B*4w(?B zUWT8~u*&_UnddQh+rzmJehTTSTKz<#p8Q8I2C5bL3Lcce2qtG7pHMaYMOHV`^XhOg z5q;0AF|pm#@S!WgM#Ss>;+cJwQZm-UD^^yQ&N9C22VnRU2x0EluTE8xevxe(;wk&`QeqREZi%CX1yf+ zgKifkAFg*>sZkYuwe`0Q!LJv)g?GJjSR&9YUd9{RTUpoiKqEb}D3e0ev$ao}TB3{DA6Sb8wBkn?^e@Y%#sPN#`-} zrvdoQ>gIjLFzyk|>{w1iHWZ6cDTUt7By5gRUTk>3(VzUy+nCVB0K942Y5$^J%5OMgGp5N(wK`;URIH(&K4`{!=@&lJ-f!<{B@e8%H- zu3qbyr+;Vu{~O_i8x>0mI>N$f>N}tw3`&Q^y30;1FY9{5R-G+=D=@IvT+Hm*yZXqL ze9Kfo0zrNgkPBWlIEL8HEY=5;b9ryb>~DHCc8N|Mj^_oW%YPz8Ztbty1i$w|!@vIS z{qkEt-4nH%b~@T(_ZsErnM#`P*L(aq2UR#bCs{9uz{&tN70@c*YHz&{Qv-}T+3`EL#Qc3<2m~<`JSyU>q{GN0jowQ>WHlgqU31x^%$-O??VJ+~R_t6Ns z*NS_yTJn{#v4jho3xjs7g(^>@`t_fy&inRuF}0;Eo4dbUutUblsZ90gZ-uGc-NdAa)UN~@FHCqXTu@L{1*L>)-) z+k;_-C-G^OLMR?_4<~R^CYMIaP}GVDTmi-9OF;%IvO)w%>+?9j(1)`P6{p|lM1$lg zcmijtzY(ap#ARJ0Jay~Bf57WskZJB25bO0UHLMPbsq!gTEpPW>LG_HH#dsKqv^-ImZZPjHWW?r?!33b@+YB^uw zQ^Csl&+yO43Ld3Q)Iw5L zFK}n!9M9Ie_{D3~xivkTArh!x1SLGSGQsF36%M0SEuuy0J#(BOuI|zr9)dy-UEp=I zBrhO`qdAY#v@gcadcCLlQL5?}R#w2d#S`sRYp9L8(;1-%l1InE-=48v^?5@0D$Y9Y zrW|{0=Ab+{XlfS$PqFm;%z)#?tAK_-Fq3k1O<;T1W?MMz-66OJpsT|12-0&i5cPi%WNs`VMNLDEZb^pjHyVg$p~tw%%Y~x) zi60=<2E<3=#wSUL9W5o9vv3*OWPugWgqX7Cf;&bkXva~Pl02WBe6auXw^i0{35FTf7AglETorKfub zblUqs^N8QI+-n94uW>7p@%UpWGD+|SApYJ?D)`J-L96|>Nk}71Y&2DB0so?k1d5V# zXn9?FfdS5URYPb{km5T`a^>Qdy={VqQ01!hw@}M>URi5X&u<~1p3^8$gsu2dxmemi z4uDZ7P*t6uLDv6H|45S52cYvq?>GpZ2WTJ+=L?>;mRy+|Q8A$y7!1GOASufK+}uMF zZMPLwO}1F=UMc$g%`;31_}(N9{y`tW;<<55I+HB9w=^?>S5nJWh%?Smb6bHxBlKIS zhQsFz&S>@u;Z>WYduhcf1TrA7)Jgpf^M%zCM*ZOW3M~ib`;`AJjO?BwsY6&9*S~Pg& zY;GgJs8#6-RYT9g2QE_DEZI6MfoYpk_CM;O*T?ptj8kx~!aQ~USfVm-$by4Gzk2A+ zU=lcIC?=uHihHX^Z}7^LMUK)qqjj(WMtkvZyzSRd|N0saw~09rpLV0EOJhGPd!$)X zCQ?%jT}&y6#%^F@Qrntn-iUj;kPfA&5p+RSGe{SZyO1BsQbD_+94-re6 zkIRF;xt}vCIC}&rbO3OW!i(vVTPk+tW>7Tpp!pXu)HJXYWYA1oLn>%{G=}JpH|!aq zU3h1H;;M!!^?j`4KG9Af`*d1Bn+ZBSUcgj9GG~!aSJ>aJWfIGd#)>XTqis8@JzoVH z0v9p)+^O^m-g``l9oa_6zOW)I+A^Qaob!?IG@_Tg*5uVvIh@UwVTLD~EPKNovB5v> zqfewNCI;VFLdELrJ!j-*!uhWuZz2TMIBBQ`%OkcKIil0sW~ZNu0>I5#TOY=V@mmX6 zf$>%yE8HQKayjuYKgxzNt+=3dnkawpkEl5X#!#$JM*ln9QV?k>Olkv!E}b|f;ghhL<`Q5+gk2JRhac`t)y?+V)8iZY=LNHCQNLj2L5P}(XA zqft^o1uGtIjyR08UNvg~I6tV)kLs5o@8*|yjpdTk6}2@Z1(;+F%dK;6MTJC@nM4wA zz{TQ<*eI2Q7j`!(cuFg(>H>Sx^2e6tLwIrcQhk&WK=GZ0f*_Ij)zcZ1Q1V*} zT`&hPB9dsF<1x}l5h>_bR??voQB9gHa7qg>x({udgs@7xd(%g5JdE_Vv6g_Mkvj)Z z$*A_VW)svp2XNU~{vLyDp`P@^BIr((T=v-P5)_&(u-~7j8k>y}8U^=z)pSL|@G%%F z{&v4pf}e_RrSLDssyA!BM9+W1xWAE^3x`Nq$nQm> z#rQSilGn`!Q*d#EwHdx)=N8niNa|XKd-WktS3GQL=M=<|=#G0qS?Nikk1BFvLS3e3 zsJ5J0h$f1O)~E^Ni%%&L1GCVfzDidTVJUUdJCE4}oxVw7{kCy!);z}J3M86eykARl z61w;7U!!n8lMUp@60fAkt8e0_QvK&|T#XpAHW4V#=S*lQkKMy82SeAQSEC;~!;b%7 zR@};b^*U$VEMe;52)FgLDypj@yZ%cf9RbFlAFvt!_~-NatI36$5IG*>k*^fj-}u6G zhG-b~&vWT@62od0m8Gw=i=P_hBF;Kl(O`W26^6~|$=6sg>zx=SzkFK$;(Qi89!n6> zU=bY?YM1V@YXe~%EfhWF1sex@Io;@}@bHj0y&<7T#U{*WNy?hx$Q+1kG$9NU454^7 zB*jE|+;*o0CZo;?m8$fU%g#hH3Mp8N(CRj;7&*$>nld!AsSw3VkPW?=)UB^>@0Q! z)ngpTL#Up6qCvJM=rok$?F_!p>L^1%84iQb4*s;>)@wZxGk}mxmquLR2G5{wSxp^m zenRA4m}QfD^X?ayvq?itM5OuRo@PL9dxWTT$VN5Q--)@H1S!OSX#vn5uT#tnIoGBm zT#SF#yQ;6FGj=~P^IPet!%mDg`jzA7{uz10k5BNz_>!`SZHe%gByuaGPVkOQocviY zjj*^YOu>*+#5tSVK8U#8{LOGxz@1<}(_ZX4x7zK%_asnn91=>I!j zPZVE8EO!7(pWuoo$Df4^v*E|rwtR&&=Gdx< zmbk(dJ{N~3ee2flH1za|fm6QnG25F&n|k6Q^Jq6J{ zEAB5O8Q+Ba=*T|*vtEX^C!gJ*T6ynw%&%&Le<*;z2|QBMX4K4xB|(7MT)8L0Th|_M zfO#x8%0YRJ(L9 zqt33Ud^_h!iA{L=!-bjXbrWc&sa`!S!wNWWbI7q-Ya+xLkLR<%O|G8NTZ}#ZYu%iA zQQT-js%lv%+;lG5tZQOyz}n-c&F(5NOk&URf?FkOJhC`b`tWg|lIQ*T;xKn!KVX7b zAknE`yQhcF&ycH%ovpm!;MQC(a5N_C>H#12)$4Bye=xJ>3ce=|3MiS`c8SU}Ul^h4 zg`k|YByB`R7{+%aMS|`A+qVJ@)W5EU@#ACK5~O>{xy}Sq2nRyPIfV%&cb@xLkP%3Q z2=j2ZV^h$25Qt(K#bij4LQ?elC6It4rHGcY*t~mN{axQF?gDzG$j>T|C6;)Bq>6)Z z$$#G8Scc-d?-``z2q0n6aEFfu`@wG$;rksU7#QS+R5~=vreen}V1liD;}WTf>pT$R zDU)J*IF92n5$skFH`~y~FD{c~FWwEvMD%CrPjr;y7kg`UG0kIOR^Xoj66vC zkupmd59qF*V*UiGXTWchETte{gd(LQzOifq-R^4H^QD;5)?Q?X!rMl$}OX(61e0u zu|M~U5^cGEYjHCjVu^bN0ywALTW(RKU_cvHP5V5IBmQ4UCt#kWm4j|>p?rM35@a(t zi3A=X*kr~8#ZGskzHdiJC!YyVT_n8LlojA zimby+USyfErJ006nnF~A$*x{9vX7W}F_vK>SwfTuW3nb?_>Zpthu-CUIoCP&b>Gi( zU)QzthoV!gn^PTOo&AU*zPF)1S# zj$Z6@Pq2+-j+R3(?g6yP?*82KwsN%F3hB9G`)J!ZBwNf+u$-SDWp7awn zCB)^DVYGeic`hH(Xf}>Cj7%~_+lH|fQ+HA^nc-cw?z8-DisFP>wv7Khi(-d^YB07; z`92rk@speAtsBkWM8YGJ;OM_Xy9J3uM$;c6GcMTXFd!X3$T){)Z}J&zGV2H+jJe}z zjPy*n25CDZ1WqFR^Ta#O6C+=;c-fx`*Blec%Ix;YG!3~^(qGxD`+4GAjsR-wIob&@ z*sd}YUn|#I{$li?t&6~hJ9iWdET&<8wPzOa;ALOPqUp`vx=CsyKT~KVq`pEhY%_T# zruYssWRb~Dt+MZXCzWrcO&~kPF6()0rf?pq;ac;p%bsw~hPw0yd^e3fw7A(Y6F4au z)_xIh&{{p5ofEiSCMf<#Ji~c*>y<2p@D-5=eCjj@0GlDsi?e%myn6%`K3?Z)gVcu_Zh(<-pO7kS%dQdfrvptl+e7hXH(pV}{;it1&}oLZQPUy!0Y z^*qwWl^CFl$_qG-1c5GlfZQ4bQkV)58lHRq#&t`uqnL4@Ls@?MfcjRVd9ymQzUN*i zCOc&=nSAMzRpSep0Rl`qg|c`J(bY&fkvh^;xUzD26YF&cSMyzULRb8{AGf;yZqgUl zw*A*#zi5M^K_SXytzJ>=_Qud+M%FYZGEOCa2W)rgvZ9(!J*T}^{_uI!_69<}!GzEv z+yj*egnN(L|yW8|TmO}THsM3oa*s*0SkYIfQ!$?qj zvsB-1az!L`_y(ydSi-GB+BA4gQaL+b&nklFkyM%K^k5{h%loP@s>Au@&85y%@^@+9w5gh8^-mL-Rh;^4+gNS`Zs9VL-m;0J$i0dAa^xr zBU)B+kk_i6$NA}M_#j8`7onk!w&6yVS?N#b>TxA-bj=D7;Q2jfdfDkMOscf2@!Aod zCL>^IQ7&ojGzypI3D-w5BCE~)f&Da$)Z|n(jx2Uq=li-nq4t0O}*|hJpn@^WW zNH&V*Y-on=Ud%B+dDyu^N2ij@z7gMf^C6)^gqJBaAD=|&Jd(KXiOxV#hkITP*Bvd= zxawykHXCxFOH1Uyc>DE6OUZ0K)t6G&836efD7{Ra=;IY-bGBtT(Z1iq)N>R0t#yin z(E59G11Uhk@KbI9!Y9=z!6t{D5nNI8a}9Axwmp=35>q; z)f2TQJ&+F@=fM#ZwrEJuE*a1eIu`ZnBNONL+|W(D*i$4~O9W&CL)Q=VwU}VL-vh#- z3wzw!*ACJv`Btnoi3T22$vs?A+xgXPkxSXo$+n#149 zg+A8(_EAwM{FKgeEOxiTzwm4}jr}a%fRe>_#q&IucekM^uwc*xj&fm%DErn#j5|x4 zcf2zYr4VfZ?YF&Rap%tnZ0!tYniYa8Hd)-`evWV^?Je31Q14&9Q#7GJE@Izo41#`P zcNAF3k;!IFt;8x^Rxe`B1(JFfB}e{i?8%m9#0~S16{s9AJ%7Y+YV9w86CY>o7oNOl zy!I|)`b$c}24O(lCY&g10Q$7$Gv2I!4`s!*jyUEl99BGtw+8E-LXLljyb@JaLF ze-U?$Ux0?Z5BQ_1`@Zc>1t)Ewk;lUqtAPnmA?5TRVxK5pp!iX#i3#=OyPU=XYh&a| zJm0)UD8#W=cD8^JJpAyudUS<)>h=V!?X}Vmv~2>vxAoWG6eaA7g;!?H5D5-gn{=qJ zs|WjJ$r_@sT#s^L2wiKTy5a>i?%Oeht{5F`#BV~rxlEDBvCdQ~KEcu?vpUqw{Ls;z0S~S^SuixP>jX9kJXQwjHczGhXqdR#hMded z8&&^)^g(XO9HCk@?XB5r%oABdt)Pz~mc*S%=7T?)GdIZMF;;mhj&$hYC0Myjyj2I^ z>0>#ML*~-*jQH0JA2)Me2R`m@kpsn_(hPdn&Cii|b!w7|>&+9P9^#>U#cTuH>YOw< zd}rGu_y=lO7HkH5E7_M*`xeW~p;6|uFOG49<*O#+BJ4l{;`wGt44PZm3txDb7R;hj z?N-73#$00B-7b~j8Sm)U;63f%in$|V Text and Video -> Audio. + vid2txt_embd_module, vid2aud_embd_module = self._get_pair_embedding_heads( + embedding_dim_1=self._cfg_vid_txt["embedding_dim"], + embedding_dim_2=self._cfg_vid_aud["embedding_dim"], + mode1=self._cfg_vid_txt["totxt_head_mode"], + mode2=self._cfg_vid_aud["toaud_head_mode"], + use_bn_out1=self._cfg_vid_txt["totxt_bn_after_proj"], + use_bn_out2=self._cfg_vid_aud["toaud_bn_after_proj"], + name1="vis_embd", + name2="vid2audio_embd") + + video_embd = {} + if self._mm_embedding_graph in ["shared", "disjoint"]: + video_embd["toaud"] = vid2aud_embd_module(visual_representation, + is_training=is_training) + video_embd["totxt"] = vid2txt_embd_module(visual_representation, + is_training=is_training) + elif self._mm_embedding_graph.startswith("fac"): + # Activation function if specificed in the name, e.g. fac_relu. + activation_fn = None + if len(self._mm_embedding_graph.split("_")) == 2: + activation_fn = self._mm_embedding_graph.split("_")[1] + + video_embd["toaud"] = vid2aud_embd_module(visual_representation, + is_training=is_training) + fine_rep = video_embd["toaud"] + # Eventually activate the fine grained representation. + if activation_fn: + fine_rep, activation_module = self._activate_interaction( + inputs=fine_rep, activation_fn=activation_fn, + is_training=is_training) + + video_embd["totxt"] = vid2txt_embd_module(fine_rep, + is_training=is_training) + else: + raise ValueError( + f"{self._mm_embedding_graph} is not a valid MM embedding graph.") + + # Computes the audio representation. + audio_cnn = AudioModule(backbone=self._audio_backbone, + use_xreplica_bn=self._use_xreplica_bn, + model_kwargs=self._audio_model_kwargs) + if return_intermediate_audio: + return audio_cnn(audio_spectrogram, + is_training=is_training, + return_intermediate=True) + + audio_representation = audio_cnn(audio_spectrogram, is_training=is_training) + + # Projection heads: Audio -> Video and Audio -> Text. + aud2vid_embd_module, aud2txt_embd_module = self._get_pair_embedding_heads( + embedding_dim_1=self._cfg_vid_aud["embedding_dim"], + embedding_dim_2=self._cfg_aud_txt["embedding_dim"], + mode1=self._cfg_vid_aud["tovid_head_mode"], + mode2=self._cfg_aud_txt["totxt_head_mode"], + use_bn_out1=self._cfg_vid_aud["tovid_bn_after_proj"], + use_bn_out2=self._cfg_aud_txt["totxt_bn_after_proj"], + name1="audio_embd", + name2="audio2txt_embd") + audio_embd = {} + + audio_embd["tovid"] = aud2vid_embd_module(audio_representation, + is_training=is_training) + + # Computes the projection to the text domain depending on the MM graph mode. + if (self._mm_embedding_graph.startswith("fac") and + (self._use_audio_text or (not is_training))): + # In case the audio text branch is not used during training, we do that + # only at eval time (is_training=False) in order to not pollute the BN + # stats in vid2txt_embd_module with audio features during training. + fine_rep_audio = audio_embd["tovid"] + if activation_fn: + fine_rep_audio, _ = self._activate_interaction( + inputs=fine_rep_audio, activation_fn=activation_fn, + is_training=is_training, activation_module=activation_module) + audio_embd["totxt"] = vid2txt_embd_module(fine_rep_audio, + is_training=is_training) + else: + audio_embd["totxt"] = aud2txt_embd_module(audio_representation, + is_training=is_training) + + # Computes the text representation. + txt_representation = TextModule( + sentence_dim=self._sentence_dim, + word_embedding_matrix=self._word_embedding_matrix)( + word_ids, is_training=is_training) + + # Projection heads: Text -> Video and Text -> Audio. + txt2vid_embd_module, txt2aud_embd_module = self._get_pair_embedding_heads( + embedding_dim_1=self._cfg_vid_txt["embedding_dim"], + embedding_dim_2=self._cfg_aud_txt["embedding_dim"], + mode1=self._cfg_vid_txt["tovid_head_mode"], + mode2=self._cfg_aud_txt["toaud_head_mode"], + use_bn_out1=self._cfg_vid_txt["tovid_bn_after_proj"], + use_bn_out2=self._cfg_aud_txt["toaud_bn_after_proj"], + name1="txt_embd", + name2="txt2audio_embd") + txt_embd = {} + txt_embd["tovid"] = txt2vid_embd_module(txt_representation, + is_training=is_training) + txt_embd["toaud"] = txt2aud_embd_module(txt_representation, + is_training=is_training) + + return { + "vid_embd": video_embd, + "aud_embd": audio_embd, + "txt_embd": txt_embd, + "vid_repr": visual_representation, + "aud_repr": audio_representation, + } + + +class EmbeddingModule(hk.Module): + """Final Embedding module.""" + + def __init__(self, + embedding_dim: int, + mode: str = "linear", + use_bn_out: bool = False, + bn_config: Dict[str, Any] = None, + use_xreplica_bn: bool = True, + name="embedding_module"): + self._embedding_dim = embedding_dim + self._use_bn_out = use_bn_out + self._mode = mode + # Set default BN config. + bn_config = bn_config or _DEFAULT_CFG_BN + if use_xreplica_bn: + normalizer_name = "cross_replica_batch_norm" + else: + normalizer_name = "batch_norm" + self._batch_norm = normalization.get_normalize_fn( + normalizer_name=normalizer_name, + normalizer_kwargs=bn_config) + + super(EmbeddingModule, self).__init__(name=name) + + def __call__(self, input_feature, is_training): + if self._mode == "linear": + proj = hk.Linear(self._embedding_dim, name="final_projection") + embedding = proj(input_feature) + elif self._mode.startswith("mlp"): + if "@" not in self._mode: + raise ValueError( + ("Please specify the inner dimensions of the MLP with `@` symbol" + "e.g. mlp@512 or mlp@512@256 for a 2 layer MLP.")) + inner_dims = [int(dim) for dim in self._mode.split("@")[1:]] + embedding = input_feature + for inner_dim in inner_dims: + embedding = hk.Linear(inner_dim, with_bias=True, + name="final_projection_inner")(embedding) + if not self._mode.startswith("mlp_nobn"): + embedding = self._batch_norm(embedding, is_training=is_training) + embedding = jax.nn.relu(embedding) + + # Final projection. + embedding = hk.Linear(self._embedding_dim, name="final_projection", + with_bias=not self._use_bn_out)(embedding) + else: + raise NotImplementedError + + if self._use_bn_out: + embedding = self._batch_norm(embedding, is_training=is_training) + return embedding + + +class VisualModule(hk.Module): + """The visual module selects which CNN backbone to connect to the graph.""" + + def __init__(self, + use_xreplica_bn=True, + backbone="s3d", + model_kwargs=None, + name="visual_module"): + self._backbone = backbone + super(VisualModule, self).__init__(name=name) + if model_kwargs is None: + model_kwargs = {} + bn_config = model_kwargs.get("bn_config", _DEFAULT_CFG_BN) + if use_xreplica_bn: + normalizer_name = "cross_replica_batch_norm" + else: + normalizer_name = "batch_norm" + + normalize_fn = normalization.get_normalize_fn( + normalizer_name=normalizer_name, + normalizer_kwargs=bn_config) + if backbone == "s3d": + self._cnn = s3d.S3D(normalize_fn=normalize_fn) + elif backbone == "resnet50tsm": + width_mult = model_kwargs.get("width_mult", 1) + self._cnn = tsm_resnet.TSMResNetV2( + normalize_fn=normalize_fn, + depth=50, + num_frames=model_kwargs["n_frames"], + width_mult=width_mult) + else: + raise NotImplementedError + + def __call__(self, images, is_training): + """Connects graph to images.""" + features = self._cnn(images, is_training=is_training) + return features + + +class AudioModule(hk.Module): + """The audio module selects which CNN backbone to connect to the graph.""" + + def __init__(self, + backbone="resnet18", + use_xreplica_bn=True, + model_kwargs=None, + name="audio_module"): + super(AudioModule, self).__init__(name=name) + model_kwargs = model_kwargs or {} + bn_config = model_kwargs.get("bn_config", _DEFAULT_CFG_BN) + backbone_to_depth = { + "resnet18": 18, + "resnet34": 34, + "resnet50": 50, + "resnet101": 101 + } + assert backbone in backbone_to_depth, ( + f"backbone should be in {backbone_to_depth.keys()}") + + if use_xreplica_bn: + normalizer_name = "cross_replica_batch_norm" + else: + normalizer_name = "batch_norm" + + self._cnn = resnet.ResNetV2( + depth=backbone_to_depth[backbone], + normalize_fn=normalization.get_normalize_fn( + normalizer_name=normalizer_name, + normalizer_kwargs=bn_config), + num_classes=None) + + def __call__(self, + audio_spectrogram, + is_training, + return_intermediate=False): + """Connects graph to audio spectrogram.""" + final_endpoint = "output" + if return_intermediate: + final_endpoint = "last_conv" + + return self._cnn(audio_spectrogram, + is_training=is_training, + final_endpoint=final_endpoint) + + +class TextModule(hk.Module): + """Text module computes the sentences representation.""" + + def __init__(self, + word_embedding_matrix, + sentence_dim=1024, + name="text_module"): + """Initialize text module. + + Args: + word_embedding_matrix: 2d matrix [vocab_size, embed_size] to embed words. + sentence_dim: dimension of sentence representation. + name: module name. + """ + super(TextModule, self).__init__(name=name) + self._word_embedding_module = hk.Embed( + embedding_matrix=word_embedding_matrix) + self._conv1d_module = hk.Conv1D(sentence_dim, 1, name="text_conv1") + + def __call__(self, word_ids, is_training): + """Connects graph to sentence representation.""" + word_embeddings = self._word_embedding_module(word_ids) + word_embeddings = jax.lax.stop_gradient(word_embeddings) + output = self._conv1d_module(word_embeddings) + output = jax.nn.relu(output) + output = jnp.amax(output, axis=1) + return output diff --git a/mmv/models/normalization.py b/mmv/models/normalization.py new file mode 100644 index 0000000..639833f --- /dev/null +++ b/mmv/models/normalization.py @@ -0,0 +1,143 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Normalize functions constructors.""" + +from typing import Any, Dict, Optional, Sequence, Union + +import haiku as hk +from jax import numpy as jnp + +from mmv.models import types + + +class _BatchNorm(hk.BatchNorm): + """A `hk.BatchNorm` with adapted default arguments.""" + + def __init__(self, + create_scale: bool = True, + create_offset: bool = True, + decay_rate: float = 0.9, + eps: float = 1e-5, + test_local_stats: bool = False, + **kwargs): + # Check args. + if kwargs.get('cross_replica_axis', None) is not None: + raise ValueError( + 'Attempting to use \'batch_norm\' normalizer, but specifying ' + '`cross_replica_axis`. If you want this behavior use ' + '`normalizer=\'cross_replica_batch_norm\'` directly.') + + self._test_local_stats = test_local_stats + super().__init__(create_scale=create_scale, + create_offset=create_offset, + decay_rate=decay_rate, + eps=eps, + **kwargs) + + def __call__(self, + x: types.TensorLike, + is_training: bool) -> jnp.ndarray: + return super().__call__(x, is_training, + test_local_stats=self._test_local_stats) + + +class _CrossReplicaBatchNorm(hk.BatchNorm): + """A `hk.BatchNorm` with adapted default arguments for cross replica.""" + + def __init__(self, + create_scale: bool = True, + create_offset: bool = True, + decay_rate: float = 0.9, + eps: float = 1e-5, + test_local_stats: bool = False, + **kwargs): + # Check args. + if 'cross_replica_axis' in kwargs and kwargs['cross_replica_axis'] is None: + raise ValueError( + 'Attempting to use \'cross_replica_batch_norm\' normalizer, but ' + 'specifying `cross_replica_axis` to be None. If you want this ' + 'behavior use `normalizer=\'batch_norm\'` directly.') + + self._test_local_stats = test_local_stats + kwargs['cross_replica_axis'] = kwargs.get('cross_replica_axis', 'i') + super().__init__(create_scale=create_scale, + create_offset=create_offset, + decay_rate=decay_rate, + eps=eps, + **kwargs) + + def __call__(self, + x: types.TensorLike, + is_training: bool) -> jnp.ndarray: + return super().__call__(x, is_training, + test_local_stats=self._test_local_stats) + + +class _LayerNorm(hk.LayerNorm): + """A `hk.LayerNorm` accepting (and discarding) an `is_training` argument.""" + + def __init__(self, + axis: Union[int, Sequence[int]] = (1, 2), + create_scale: bool = True, + create_offset: bool = True, + **kwargs): + super().__init__(axis=axis, + create_scale=create_scale, + create_offset=create_offset, + **kwargs) + + def __call__(self, + x: types.TensorLike, + is_training: bool) -> jnp.ndarray: + del is_training # Unused. + return super().__call__(x) + + +_NORMALIZER_NAME_TO_CLASS = { + 'batch_norm': _BatchNorm, + 'cross_replica_batch_norm': _CrossReplicaBatchNorm, + 'layer_norm': _LayerNorm, +} + + +def get_normalize_fn( + normalizer_name: str = 'batch_norm', + normalizer_kwargs: Optional[Dict[str, Any]] = None, +) -> types.NormalizeFn: + """Handles NormalizeFn creation. + + These functions are expected to be used as part of Haiku model. On each + application of the returned normalization_fn, a new Haiku layer will be added + to the model. + + Args: + normalizer_name: The name of the normalizer to be constructed. + normalizer_kwargs: The kwargs passed to the normalizer constructor. + + Returns: + A `types.NormalizeFn` that when applied will create a new layer. + + Raises: + ValueError: If `normalizer_name` is unknown. + """ + # Check args. + if normalizer_name not in _NORMALIZER_NAME_TO_CLASS: + raise ValueError(f'Unrecognized `normalizer_name` {normalizer_name}.') + + normalizer_class = _NORMALIZER_NAME_TO_CLASS[normalizer_name] + normalizer_kwargs = normalizer_kwargs or dict() + + return lambda *a, **k: normalizer_class(**normalizer_kwargs)(*a, **k) # pylint: disable=unnecessary-lambda diff --git a/mmv/models/resnet.py b/mmv/models/resnet.py new file mode 100644 index 0000000..a395593 --- /dev/null +++ b/mmv/models/resnet.py @@ -0,0 +1,329 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Lint as: python3. +"""ResNet V2 modules. + + Equivalent to hk.Resnet except accepting a final_endpoint to return + intermediate activations. +""" + +from typing import Optional, Sequence, Text, Type, Union + +import haiku as hk +import jax +import jax.numpy as jnp + +from mmv.models import types + + +class BottleneckBlock(hk.Module): + """Implements a bottleneck residual block (ResNet50 and ResNet101).""" + + # pylint:disable=g-bare-generic + def __init__(self, + channels: int, + stride: Union[int, Sequence[int]], + use_projection: bool, + normalize_fn: Optional[types.NormalizeFn] = None, + name: Optional[Text] = None): + super(BottleneckBlock, self).__init__(name=name) + self._channels = channels + self._stride = stride + self._use_projection = use_projection + self._normalize_fn = normalize_fn + + if self._use_projection: + self._proj_conv = hk.Conv2D( + output_channels=channels, + kernel_shape=1, + stride=stride, + with_bias=False, + padding='SAME', + name='shortcut_conv') + + self._conv_0 = hk.Conv2D( + output_channels=channels // 4, + kernel_shape=1, + stride=1, + with_bias=False, + padding='SAME', + name='conv_0') + + self._conv_1 = hk.Conv2D( + output_channels=channels // 4, + kernel_shape=3, + stride=stride, + with_bias=False, + padding='SAME', + name='conv_1') + + self._conv_2 = hk.Conv2D( + output_channels=channels, + kernel_shape=1, + stride=1, + with_bias=False, + padding='SAME', + name='conv_2') + + def __call__(self, + inputs, + is_training): + net = inputs + shortcut = inputs + + for i, conv_i in enumerate([self._conv_0, self._conv_1, self._conv_2]): + if self._normalize_fn is not None: + net = self._normalize_fn(net, is_training=is_training) + net = jax.nn.relu(net) + if i == 0 and self._use_projection: + shortcut = self._proj_conv(net) + + # Now do the convs. + net = conv_i(net) + + return net + shortcut + + +class BasicBlock(hk.Module): + """Implements a basic residual block (ResNet18 and ResNet34).""" + + # pylint:disable=g-bare-generic + def __init__(self, + channels: int, + stride: Union[int, Sequence[int]], + use_projection: bool, + normalize_fn: Optional[types.NormalizeFn] = None, + name: Optional[Text] = None): + super(BasicBlock, self).__init__(name=name) + self._channels = channels + self._stride = stride + self._use_projection = use_projection + self._normalize_fn = normalize_fn + + if self._use_projection: + self._proj_conv = hk.Conv2D( + output_channels=channels, + kernel_shape=1, + stride=stride, + with_bias=False, + padding='SAME', + name='shortcut_conv') + + self._conv_0 = hk.Conv2D( + output_channels=channels, + kernel_shape=1, + stride=1, + with_bias=False, + padding='SAME', + name='conv_0') + + self._conv_1 = hk.Conv2D( + output_channels=channels, + kernel_shape=3, + stride=stride, + with_bias=False, + padding='SAME', + name='conv_1') + + def __call__(self, + inputs, + is_training): + net = inputs + shortcut = inputs + + for i, conv_i in enumerate([self._conv_0, self._conv_1]): + if self._normalize_fn is not None: + net = self._normalize_fn(net, is_training=is_training) + net = jax.nn.relu(net) + if i == 0 and self._use_projection: + shortcut = self._proj_conv(net) + + # Now do the convs. + net = conv_i(net) + + return net + shortcut + + +class ResNetUnit(hk.Module): + """Unit (group of blocks) for ResNet.""" + + # pylint:disable=g-bare-generic + def __init__(self, + channels: int, + num_blocks: int, + stride: Union[int, Sequence[int]], + block_module: Type[BottleneckBlock], + normalize_fn: Optional[types.NormalizeFn] = None, + name: Optional[Text] = None, + remat: bool = False): + super(ResNetUnit, self).__init__(name=name) + self._channels = channels + self._num_blocks = num_blocks + self._stride = stride + self._normalize_fn = normalize_fn + self._block_module = block_module + self._remat = remat + + def __call__(self, + inputs, + is_training): + + input_channels = inputs.shape[-1] + + self._blocks = [] + for id_block in range(self._num_blocks): + use_projection = id_block == 0 and self._channels != input_channels + self._blocks.append( + self._block_module( + channels=self._channels, + stride=self._stride if id_block == 0 else 1, + use_projection=use_projection, + normalize_fn=self._normalize_fn, + name='block_%d' % id_block)) + + net = inputs + for block in self._blocks: + if self._remat: + # Note: we can ignore cell-var-from-loop because the lambda is evaluated + # inside every iteration of the loop. This is needed to go around the + # way variables are passed to jax.remat. + net = hk.remat(lambda x: block(x, is_training=is_training))(net) # pylint: disable=cell-var-from-loop + else: + net = block(net, is_training=is_training) + return net + + +class ResNetV2(hk.Module): + """ResNetV2 model.""" + + # Endpoints of the model in order. + VALID_ENDPOINTS = ( + 'resnet_stem', + 'resnet_unit_0', + 'resnet_unit_1', + 'resnet_unit_2', + 'resnet_unit_3', + 'last_conv', + 'output', + ) + + # pylint:disable=g-bare-generic + def __init__(self, + depth=50, + num_classes: Optional[int] = 1000, + width_mult: int = 1, + normalize_fn: Optional[types.NormalizeFn] = None, + name: Optional[Text] = None, + remat: bool = False): + """Creates ResNetV2 Haiku module. + + Args: + depth: depth of the desired ResNet (18, 34, 50, 101, 152 or 202). + num_classes: (int) Number of outputs in final layer. If None will not add + a classification head and will return the output embedding. + width_mult: multiplier for channel width. + normalize_fn: normalization function, see helpers/utils.py + name: Name of the module. + remat: Whether to rematerialize intermediate activations (saves memory). + """ + super(ResNetV2, self).__init__(name=name) + self._normalize_fn = normalize_fn + self._num_classes = num_classes + self._width_mult = width_mult + + self._strides = [1, 2, 2, 2] + num_blocks = { + 18: [2, 2, 2, 2], + 34: [3, 4, 6, 3], + 50: [3, 4, 6, 3], + 101: [3, 4, 23, 3], + 152: [3, 8, 36, 3], + 200: [3, 24, 36, 3], + } + if depth not in num_blocks: + raise ValueError( + f'`depth` should be in {list(num_blocks.keys())} ({depth} given).') + self._num_blocks = num_blocks[depth] + + if depth >= 50: + self._block_module = BottleneckBlock + self._channels = [256, 512, 1024, 2048] + else: + self._block_module = BasicBlock + self._channels = [64, 128, 256, 512] + + self._initial_conv = hk.Conv2D( + output_channels=64 * self._width_mult, + kernel_shape=7, + stride=2, + with_bias=False, + padding='SAME', + name='initial_conv') + + if remat: + self._initial_conv = hk.remat(self._initial_conv) + + self._block_groups = [] + for i in range(4): + self._block_groups.append( + ResNetUnit( + channels=self._channels[i] * self._width_mult, + num_blocks=self._num_blocks[i], + block_module=self._block_module, + stride=self._strides[i], + normalize_fn=self._normalize_fn, + name='block_group_%d' % i, + remat=remat)) + + if num_classes is not None: + self._logits_layer = hk.Linear( + output_size=num_classes, w_init=jnp.zeros, name='logits') + + def __call__(self, inputs, is_training, final_endpoint='output'): + self._final_endpoint = final_endpoint + net = self._initial_conv(inputs) + net = hk.max_pool( + net, window_shape=(1, 3, 3, 1), + strides=(1, 2, 2, 1), + padding='SAME') + end_point = 'resnet_stem' + if self._final_endpoint == end_point: + return net + + for i_group, block_group in enumerate(self._block_groups): + net = block_group(net, is_training=is_training) + end_point = f'resnet_unit_{i_group}' + if self._final_endpoint == end_point: + return net + + end_point = 'last_conv' + if self._final_endpoint == end_point: + return net + + if self._normalize_fn is not None: + net = self._normalize_fn(net, is_training=is_training) + net = jax.nn.relu(net) + + # The actual representation + net = jnp.mean(net, axis=[1, 2]) + + assert self._final_endpoint == 'output' + if self._num_classes is None: + # If num_classes was None, we just return the output + # of the last block, without fully connected layer. + return net + + return self._logits_layer(net) diff --git a/mmv/models/s3d.py b/mmv/models/s3d.py new file mode 100644 index 0000000..db32fbc --- /dev/null +++ b/mmv/models/s3d.py @@ -0,0 +1,503 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A Haiku S3D model.""" + +import collections +from typing import Optional, Sequence + +import haiku as hk +import jax +from jax import numpy as jnp + +from mmv.models import types + + +class _MaxPool(hk.MaxPool): + """A `hk.MaxPool` accepting (and discarding) an `is_training` argument.""" + + def __call__(self, + x: types.TensorLike, + is_training: bool = True) -> jnp.ndarray: + del is_training # Unused. + return super().__call__(x) + + +def self_gating(inputs: types.TensorLike) -> jnp.ndarray: + """Feature gating as used in S3D-G. + + Transforms the input features by aggregating features from all spatial and + temporal locations, and applying gating conditioned on the aggregated + features. More details can be found at: https://arxiv.org/abs/1712.04851. + + Args: + inputs: A 5-D float array of shape `[B, T, H, W, C]`. + + Returns: + A tensor with the same shape as input_tensor. + + Raises: + ValueError: If `inputs` has the wrong shape. + """ + if inputs.ndim != 5: + raise ValueError( + f'Expected an input of shape `[B, T, H, W, C]` but got {inputs.shape}.') + + input_shape = inputs.shape + num_channels = input_shape[4] + spatiotemporal_average = jnp.mean(inputs, axis=(1, 2, 3)) + weights = hk.Linear(num_channels, name='self_gating')(spatiotemporal_average) + weights = jax.nn.sigmoid(weights) + return jnp.multiply(weights[:, None, None, None, :], inputs) + + +class SUnit3D(hk.Module): + """Base 3d Unit combining Conv3d + Batch Norm + non-linearity.""" + + def __init__( + self, + output_channels: int, + kernel_shape: Sequence[int] = (1, 1, 1), + stride: Sequence[int] = (1, 1, 1), + with_bias: bool = False, + separable: bool = False, + normalize_fn: Optional[types.NormalizeFn] = None, + activation_fn: Optional[types.ActivationFn] = jax.nn.relu, + self_gating_fn: Optional[types.GatingFn] = None, + name='SUnit3D'): + """Initializes the SUnit3D module. + + Args: + output_channels: Number of output channels. + kernel_shape: The shape of the kernel. A sequence of length 3. + stride: Stride for the kernel. A sequence of length 3. + with_bias: Whether to add a bias to the convolution. + separable: Whether to use separable. + normalize_fn: Function used for normalization. + activation_fn: Function used as non-linearity. + self_gating_fn: Function used for self-gating. + name: The name of the module. + + Raises: + ValueError: If `kernel_shape` or `stride` has the wrong shape. + """ + super().__init__(name=name) + + # Check args. + if len(kernel_shape) != 3: + raise ValueError( + 'Given `kernel_shape` must have length 3 but has length ' + f'{len(kernel_shape)}.') + if len(stride) != 3: + raise ValueError( + f'Given `stride` must have length 3 but has length {len(stride)}.') + + self._normalize_fn = normalize_fn + self._activation_fn = activation_fn + self._self_gating_fn = self_gating_fn + + k0, k1, k2 = kernel_shape + if separable and k1 != 1: + spatial_kernel_shape = [1, k1, k2] + temporal_kernel_shape = [k0, 1, 1] + s0, s1, s2 = stride + spatial_stride = [1, s1, s2] + temporal_stride = [s0, 1, 1] + self._convolutions = [ + hk.Conv3D( + output_channels=output_channels, + kernel_shape=spatial_kernel_shape, + stride=spatial_stride, + padding='SAME', + with_bias=with_bias), + hk.Conv3D( + output_channels=output_channels, + kernel_shape=temporal_kernel_shape, + stride=temporal_stride, + padding='SAME', + with_bias=with_bias) + ] + + else: + self._convolutions = [ + hk.Conv3D( + output_channels=output_channels, + kernel_shape=kernel_shape, + stride=stride, + padding='SAME', + with_bias=with_bias)] + + def __call__( + self, + inputs: types.TensorLike, + is_training: bool) -> jnp.ndarray: + """Connects the module to inputs. + + Args: + inputs: A 5-D float array of shape `[B, T, H, W, C]`. + is_training: Whether to use training mode. + + Returns: + A 5-D float array of shape `[B, new_t, new_h, new_w, output_channels]`. + """ + x = inputs + for conv in self._convolutions: + x = conv(x) + if self._normalize_fn is not None: + x = self._normalize_fn(x, is_training=is_training) + if self._activation_fn is not None: + x = self._activation_fn(x) + if self._self_gating_fn: + x = self._self_gating_fn(x) + return x + + +class InceptionBlockV13D(hk.Module): + """A 3D Inception v1 block. + + This allows use of separable 3D convolutions and self-gating, as described in: + + Rethinking Spatiotemporal Feature Learning For Video Understanding. + Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy. + https://arxiv.org/abs/1712.04851. + """ + + def __init__(self, + output_channels: Sequence[int], + normalize_fn: Optional[types.NormalizeFn], + temporal_kernel_size: int = 3, + self_gating_fn: Optional[types.GatingFn] = None, + name: str = 'InceptionBlockV13D'): + """Initializes the InceptionBlockV13D module. + + Args: + output_channels: The size of the output channels of each block, ordered as + [Conv2d_0a_1x1, Conv2d_0a_1x1, Conv2d_0b_3x3, Conv2d_0a_1x1, + Conv2d_0b_3x3, Conv2d_0b_1x1] + normalize_fn: Function used for normalization. + temporal_kernel_size: The size of the temporal convolutional filters in + the conv3d_spatiotemporal blocks. + self_gating_fn: Function which optionally performs self-gating. If `None`, + no self-gating is applied. + name: The name of the module. + + Raises: + ValueError: If `output_channels` has the wrong shape. + """ + super().__init__(name=name) + + # Check args. + if len(output_channels) != 6: + raise ValueError( + 'Given `output_channels` must have length 6 but has length ' + f'{len(output_channels)}.') + + self._output_channels = output_channels + self._normalize_fn = normalize_fn + self._temporal_kernel_size = temporal_kernel_size + + if self_gating_fn is None: + self._self_gating_fn = lambda x: x + else: + self._self_gating_fn = self_gating_fn + + def __call__( + self, + inputs: types.TensorLike, + is_training: bool) -> jnp.ndarray: + """Connects the module to inputs. + + Args: + inputs: A 5-D float array of shape `[B, T, H, W, C]`. + is_training: Whether to use training mode. + + Returns: + A 5-D float array of shape + `[B, new_t, new_h, new_w, sum(output_channels)]`. + """ + # Branch 0 + branch_0 = SUnit3D( + output_channels=self._output_channels[0], + kernel_shape=(1, 1, 1), + separable=False, + normalize_fn=self._normalize_fn, + self_gating_fn=self._self_gating_fn, + name='Branch_0_Conv2d_0a_1x1')( + inputs, is_training=is_training) + + # Branch 1 + branch_1 = SUnit3D( + output_channels=self._output_channels[1], + kernel_shape=(1, 1, 1), + separable=False, + normalize_fn=self._normalize_fn, + self_gating_fn=None, + name='Branch_1_Conv2d_0a_1x1')( + inputs, is_training=is_training) + branch_1 = SUnit3D( + output_channels=self._output_channels[2], + kernel_shape=(self._temporal_kernel_size, 3, 3), + separable=True, + normalize_fn=self._normalize_fn, + self_gating_fn=self._self_gating_fn, + name='Branch_1_Conv2d_0b_3x3')( + branch_1, is_training=is_training) + + # Branch 2 + branch_2 = SUnit3D( + output_channels=self._output_channels[3], + kernel_shape=(1, 1, 1), + separable=False, + normalize_fn=self._normalize_fn, + self_gating_fn=None, + name='Branch_2_Conv2d_0a_1x1')( + inputs, is_training=is_training) + branch_2 = SUnit3D( + output_channels=self._output_channels[4], + kernel_shape=(self._temporal_kernel_size, 3, 3), + separable=True, + normalize_fn=self._normalize_fn, + self_gating_fn=self._self_gating_fn, + name='Branch_2_Conv2d_0b_3x3')( + branch_2, is_training=is_training) + + # Branch 3 + branch_3 = hk.MaxPool( + window_shape=(1, 3, 3, 3, 1), + strides=(1, 1, 1, 1, 1), + padding='SAME', + name='Branch_3_MaxPool_0a_3x3')( + inputs) + branch_3 = SUnit3D( + output_channels=self._output_channels[5], + kernel_shape=(1, 1, 1), + separable=False, + normalize_fn=self._normalize_fn, + self_gating_fn=self._self_gating_fn, + name='Branch_3_Conv2d_0b_1x1')( + branch_3, is_training=is_training) + + return jnp.concatenate((branch_0, branch_1, branch_2, branch_3), axis=4) + + +_Layer = collections.namedtuple('_Layer', ('name', 'module', 'kwargs')) + + +class S3D(hk.Module): + """S3D architecture. + + Any intermediary representation can be obtained by choosing one of the valid + `final_endpoint`s. The final value returned by this model (when 'Embeddings' + is used as `final_endpoint`) is a single 1-D representation for each video in + the batch. Another layer can be externally added on top of that to obtain + logits. + """ + + # Endpoints of the model in order. + VALID_ENDPOINTS = ( + 'Conv2d_1a_7x7', + 'MaxPool_2a_3x3', + 'Conv2d_2b_1x1', + 'Conv2d_2c_3x3', + 'MaxPool_3a_3x3', + 'Mixed_3b', + 'Mixed_3c', + 'MaxPool_4a_3x3', + 'Mixed_4b', + 'Mixed_4c', + 'Mixed_4d', + 'Mixed_4e', + 'Mixed_4f', + 'MaxPool_5a_2x2', + 'Mixed_5b', + 'Mixed_5c', + 'Embeddings', + ) + + def __init__(self, + normalize_fn: Optional[types.NormalizeFn] = None, + first_temporal_kernel_size: int = 7, + temporal_conv_startat: Optional[str] = 'Conv2d_2c_3x3', + gating_startat: Optional[str] = 'Conv2d_2c_3x3', + name='S3D'): + """Initializes the S3D module. + + Args: + normalize_fn: Function used for normalization. + first_temporal_kernel_size: Specifies the temporal kernel size for the + first conv3d filter. A larger value slows down the model but provides + little accuracy improvement. Must be set to one of 1, 3, 5 or 7. + temporal_conv_startat: Specifies the first conv block to use separable 3D + convs rather than 2D convs (implemented as [1, k, k] 3D conv). This is + used to construct the inverted pyramid models. 'Conv2d_2c_3x3' is the + first valid block to use separable 3D convs. If provided block name is + not present, all valid blocks will use separable 3D convs. + gating_startat: Specifies the first conv block to use self gating. + 'Conv2d_2c_3x3' is the first valid block to use self gating. If provided + block name is not present, all valid blocks will use separable 3D convs. + name: The name of the module. + + Raises: + ValueError: If `temporal_conv_startat`, `gating_startat` or + `first_temporal_kernel_size` is not recognized. + """ + super().__init__(name=name) + self._first_temporal_kernel_size = first_temporal_kernel_size + self._temporal_conv_startat = temporal_conv_startat + self._gating_startat = gating_startat + self._normalize_fn = normalize_fn + + if (temporal_conv_startat not in self.VALID_ENDPOINTS + and temporal_conv_startat is not None): + raise ValueError( + f'Provided `temporal_conv_startat`: {temporal_conv_startat} not ' + f'valid. It must be one of: {self.VALID_ENDPOINTS}, or `None`.') + + if (gating_startat not in self.VALID_ENDPOINTS + and gating_startat is not None): + raise ValueError( + f'Provided `gating_startat`: {gating_startat} not valid. ' + f'It must be one of: {self.VALID_ENDPOINTS}, or `None`.') + + if first_temporal_kernel_size not in [1, 3, 5, 7]: + raise ValueError('`first_temporal_kernel_size` can only be 1, 3, 5 or 7.') + + def __call__(self, + inputs: types.TensorLike, + is_training: bool, + final_endpoint: str = 'Embeddings') -> jnp.ndarray: + """Connects the model to inputs. + + Args: + inputs: A 5-D float array of shape `[B, T, H, W, C]`. + is_training: Whether to use training mode. + final_endpoint: Up to which endpoint to run / return. + + Returns: + A 5-D float array of shape + `[B, new_t, new_h, new_w, sum(output_channels)]`. + + Returns: + Network output at location `final_endpoint`. A float array which shape + depends on `final_endpoint`. + + Raises: + ValueError: If `final_endpoint` is not recognized. + """ + if final_endpoint not in self.VALID_ENDPOINTS: + raise ValueError(f'Provided final_endpoint: {final_endpoint} not valid.' + f' It must be one of: {self.VALID_ENDPOINTS}') + + x = inputs + + # We define layers with tuples (name, module, kwargs) + # Not all kwargs are present, as we will need to fill in certain properties + # as we move down the network. + layers = [] + + # The first layer is conditional on the input data shape: the channel size + # is used to identify whether the `space_to_depth` transformation has been + # applied to the input. This is used to speed up computation on TPUs. + if x.shape[-1] == 3: + layers.append( + _Layer('Conv2d_1a_7x7', SUnit3D, + dict(output_channels=64, stride=(2, 2, 2), separable=False, + kernel_shape=(self._first_temporal_kernel_size, 7, 7), + normalize_fn=self._normalize_fn))) + else: + layers.append( + _Layer('Conv2d_1a_7x7', SUnit3D, + dict(output_channels=64, kernel_shape=(2, 4, 4), + stride=(1, 1, 1), separable=False, + normalize_fn=self._normalize_fn))) + + layers.extend([ + _Layer('MaxPool_2a_3x3', _MaxPool, + dict(window_shape=(1, 1, 3, 3, 1), strides=(1, 1, 2, 2, 1), + padding='SAME')), + _Layer('Conv2d_2b_1x1', SUnit3D, + dict(output_channels=64, kernel_shape=(1, 1, 1), + normalize_fn=self._normalize_fn)), + _Layer('Conv2d_2c_3x3', SUnit3D, + dict(output_channels=192, separable=True, + normalize_fn=self._normalize_fn)), + _Layer('MaxPool_3a_3x3', _MaxPool, + dict(window_shape=(1, 1, 3, 3, 1), strides=(1, 1, 2, 2, 1), + padding='SAME')), + _Layer('Mixed_3b', InceptionBlockV13D, + dict(output_channels=(64, 96, 128, 16, 32, 32), + normalize_fn=self._normalize_fn)), + _Layer('Mixed_3c', InceptionBlockV13D, + dict(output_channels=(128, 128, 192, 32, 96, 64), + normalize_fn=self._normalize_fn)), + _Layer('MaxPool_4a_3x3', _MaxPool, + dict(window_shape=(1, 3, 3, 3, 1), strides=(1, 2, 2, 2, 1), + padding='SAME')), + _Layer('Mixed_4b', InceptionBlockV13D, + dict(output_channels=(192, 96, 208, 16, 48, 64), + normalize_fn=self._normalize_fn)), + _Layer('Mixed_4c', InceptionBlockV13D, + dict(output_channels=(160, 112, 224, 24, 64, 64), + normalize_fn=self._normalize_fn)), + _Layer('Mixed_4d', InceptionBlockV13D, + dict(output_channels=(128, 128, 256, 24, 64, 64), + normalize_fn=self._normalize_fn)), + _Layer('Mixed_4e', InceptionBlockV13D, + dict(output_channels=(112, 144, 288, 32, 64, 64), + normalize_fn=self._normalize_fn)), + _Layer('Mixed_4f', InceptionBlockV13D, + dict(output_channels=(256, 160, 320, 32, 128, 128), + normalize_fn=self._normalize_fn)), + _Layer('MaxPool_5a_2x2', _MaxPool, + dict(window_shape=(1, 2, 2, 2, 1), strides=(1, 2, 2, 2, 1), + padding='SAME')), + _Layer('Mixed_5b', InceptionBlockV13D, + dict(output_channels=(256, 160, 320, 32, 128, 128), + normalize_fn=self._normalize_fn)), + _Layer('Mixed_5c', InceptionBlockV13D, + dict(output_channels=(384, 192, 384, 48, 128, 128), + normalize_fn=self._normalize_fn)), + ]) + + # These parameters may change thoughout the computation. + self_gating_fn = None + temporal_kernel_size = 1 + + # Iterate over layers. + for layer in layers: + # Update + if layer.name == self._gating_startat: + self_gating_fn = self_gating + if layer.name == self._temporal_conv_startat: + temporal_kernel_size = 3 + + kwargs = layer.kwargs + + if layer.module is SUnit3D: + kwargs['self_gating_fn'] = self_gating_fn + if 'kernel_shape' not in kwargs: + kwargs['kernel_shape'] = (temporal_kernel_size, 3, 3) + + elif layer.module is InceptionBlockV13D: + kwargs['self_gating_fn'] = self_gating_fn + kwargs['temporal_kernel_size'] = temporal_kernel_size + + module = layer.module(name=layer.name, **kwargs) + x = module(x, is_training=is_training) + if final_endpoint == layer.name: + return x + + assert final_endpoint == 'Embeddings' + return jnp.mean(x, axis=(1, 2, 3)) diff --git a/mmv/models/s3d_test.py b/mmv/models/s3d_test.py new file mode 100644 index 0000000..74a0756 --- /dev/null +++ b/mmv/models/s3d_test.py @@ -0,0 +1,88 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for s3d.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import haiku as hk +import jax +import numpy as np + +from mmv.models import normalization +from mmv.models import s3d + + +class _CallableS3D: + """Wrapper around S3D that take care of parameter book keeping.""" + + def __init__(self, *args, **kwargs): + self._model = hk.transform_with_state( + lambda *a, **k: # pylint: disable=g-long-lambda,unnecessary-lambda + s3d.S3D( + normalize_fn=normalization.get_normalize_fn(), + *args, **kwargs)(*a, **k)) + self._rng = jax.random.PRNGKey(42) + self._params, self._state = None, None + + def init(self, inputs, **kwargs): + self._params, self._state = self._model.init( + self._rng, inputs, is_training=True, **kwargs) + + def __call__(self, inputs, **kwargs): + if self._params is None: + self.init(inputs) + output, _ = self._model.apply( + self._params, self._state, self._rng, inputs, **kwargs) + return output + + +class S3DTest(parameterized.TestCase): + + # Testing all layers is quite slow, added in comments for completeness. + @parameterized.parameters( + # dict(endpoint='Conv2d_1a_7x7', expected_size=(2, 8, 112, 112, 64)), + # dict(endpoint='MaxPool_2a_3x3', expected_size=(2, 8, 56, 56, 64)), + # dict(endpoint='Conv2d_2b_1x1', expected_size=(2, 8, 56, 56, 64)), + # dict(endpoint='Conv2d_2c_3x3', expected_size=(2, 8, 56, 56, 192)), + # dict(endpoint='MaxPool_3a_3x3', expected_size=(2, 8, 28, 28, 192)), + # dict(endpoint='Mixed_3b', expected_size=(2, 8, 28, 28, 256)), + # dict(endpoint='Mixed_3c', expected_size=(2, 8, 28, 28, 480)), + # dict(endpoint='MaxPool_4a_3x3', expected_size=(2, 4, 14, 14, 480)), + # dict(endpoint='Mixed_4b', expected_size=(2, 4, 14, 14, 512)), + # dict(endpoint='Mixed_4c', expected_size=(2, 4, 14, 14, 512)), + # dict(endpoint='Mixed_4d', expected_size=(2, 4, 14, 14, 512)), + # dict(endpoint='Mixed_4e', expected_size=(2, 4, 14, 14, 528)), + # dict(endpoint='Mixed_4f', expected_size=(2, 4, 14, 14, 832)), + # dict(endpoint='MaxPool_5a_2x2', expected_size=(2, 2, 7, 7, 832)), + # dict(endpoint='Mixed_5b', expected_size=(2, 2, 7, 7, 832)), + # dict(endpoint='Mixed_5c', expected_size=(2, 2, 7, 7, 1024)), + dict(endpoint='Embeddings', expected_size=(2, 1024)), + ) + def test_endpoint_expected_output_dimensions(self, endpoint, expected_size): + inputs = np.random.normal(size=(2, 16, 224, 224, 3)) + model = _CallableS3D() + output = model(inputs, is_training=False, final_endpoint=endpoint) + self.assertSameElements(output.shape, expected_size) + + def test_space_to_depth(self): + inputs = np.random.normal(size=(2, 16//2, 224//2, 224//2, 3*2*2*2)) + model = _CallableS3D() + output = model(inputs, is_training=False, final_endpoint='Conv2d_1a_7x7') + self.assertSameElements(output.shape, (2, 8, 112, 112, 64)) + +if __name__ == '__main__': + absltest.main() diff --git a/mmv/models/tsm_resnet.py b/mmv/models/tsm_resnet.py new file mode 100644 index 0000000..572541f --- /dev/null +++ b/mmv/models/tsm_resnet.py @@ -0,0 +1,353 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Temporal Shift Module w/ ResNet-50 and ResNet-101. + +Based on: + TSM: Temporal Shift Module for Efficient Video Understanding + Ji Lin, Chuang Gan, Song Han + https://arxiv.org/pdf/1811.08383.pdf. +""" + +from typing import Optional + +import haiku as hk +import jax +import jax.numpy as jnp + +from mmv.models import tsm_utils as tsmu +from mmv.models import types + + +class TSMResNetBlock(hk.Module): + """A ResNet subblock with Temporal Channel Shifting. + + Combines a typical ResNetV2 block implementation + (see https://arxiv.org/abs/1512.03385) with a pre-convolution Temporal + Shift Module (see https://arxiv.org/pdf/1811.08383.pdf) in the residual. + """ + + def __init__(self, + output_channels: int, + stride: int, + use_projection: bool, + tsm_mode: str, + normalize_fn: Optional[types.NormalizeFn] = None, + channel_shift_fraction: float = 0.125, + num_frames: int = 8, + name: str = 'TSMResNetBlock'): + """Initializes the TSMResNetBlock module. + + Args: + output_channels: Number of output channels. + stride: Stride used in convolutions. + use_projection: Whether to use a projection for the shortcut. + tsm_mode: Mode for TSM ('gpu' or 'tpu'). + normalize_fn: Function used for normalization. + channel_shift_fraction: The fraction of temporally shifted channels. If + `channel_shift_fraction` is 0, the block is the same as a normal ResNet + block. + num_frames: Size of frame dimension in a single batch example + name: The name of the module. + """ + super().__init__(name=name) + self._output_channels = output_channels + self._bottleneck_channels = output_channels // 4 + self._stride = stride + self._use_projection = use_projection + self._normalize_fn = normalize_fn + self._tsm_mode = tsm_mode + self._channel_shift_fraction = channel_shift_fraction + self._num_frames = num_frames + + def __call__(self, + inputs: types.TensorLike, + is_training: bool = True) -> jnp.ndarray: + """Connects the ResNetBlock module into the graph. + + Args: + inputs: A 4-D float array of shape `[B, H, W, C]`. + is_training: Whether to use training mode. + + Returns: + A 4-D float array of shape + `[B * num_frames, new_h, new_w, output_channels]`. + """ + # ResNet V2 uses pre-activation, where the batch norm and relu are before + # convolutions, rather than after as in ResNet V1. + preact = inputs + if self._normalize_fn is not None: + preact = self._normalize_fn(preact, is_training=is_training) + preact = jax.nn.relu(preact) + + if self._use_projection: + shortcut = hk.Conv2D( + output_channels=self._output_channels, + kernel_shape=1, + stride=self._stride, + with_bias=False, + padding='SAME', + name='shortcut_conv')( + preact) + else: + shortcut = inputs + + # Eventually applies Temporal Shift Module. + if self._channel_shift_fraction != 0: + preact = tsmu.apply_temporal_shift( + preact, tsm_mode=self._tsm_mode, num_frames=self._num_frames, + channel_shift_fraction=self._channel_shift_fraction) + + # First convolution. + residual = hk.Conv2D( + self._bottleneck_channels, + kernel_shape=1, + stride=1, + with_bias=False, + padding='SAME', + name='conv_0')( + preact) + + # Second convolution. + if self._normalize_fn is not None: + residual = self._normalize_fn(residual, is_training=is_training) + residual = jax.nn.relu(residual) + residual = hk.Conv2D( + output_channels=self._bottleneck_channels, + kernel_shape=3, + stride=self._stride, + with_bias=False, + padding='SAME', + name='conv_1')( + residual) + + # Third convolution. + if self._normalize_fn is not None: + residual = self._normalize_fn(residual, is_training=is_training) + residual = jax.nn.relu(residual) + residual = hk.Conv2D( + output_channels=self._output_channels, + kernel_shape=1, + stride=1, + with_bias=False, + padding='SAME', + name='conv_2')( + residual) + + # NOTE: we do not use block multiplier. + output = shortcut + residual + return output + + +class TSMResNetUnit(hk.Module): + """Block group for TSM ResNet.""" + + def __init__(self, + output_channels: int, + num_blocks: int, + stride: int, + tsm_mode: str, + num_frames: int, + normalize_fn: Optional[types.NormalizeFn] = None, + channel_shift_fraction: float = 0.125, + name: str = 'tsm_resnet_unit'): + """Creates a TSMResNet Unit. + + Args: + output_channels: Number of output channels. + num_blocks: Number of ResNet blocks in the unit. + stride: Stride of the unit. + tsm_mode: Which temporal shift module to use. + num_frames: Size of frame dimension in a single batch example. + normalize_fn: Function used for normalization. + channel_shift_fraction: The fraction of temporally shifted channels. If + `channel_shift_fraction` is 0, the block is the same as a normal ResNet + block. + name: The name of the module. + """ + super().__init__(name=name) + self._output_channels = output_channels + self._num_blocks = num_blocks + self._normalize_fn = normalize_fn + self._stride = stride + self._tsm_mode = tsm_mode + self._channel_shift_fraction = channel_shift_fraction + self._num_frames = num_frames + + def __call__(self, + inputs: types.TensorLike, + is_training: bool) -> jnp.ndarray: + """Connects the module to inputs. + + Args: + inputs: A 4-D float array of shape `[B * num_frames, H, W, C]`. + is_training: Whether to use training mode. + + Returns: + A 4-D float array of shape + `[B * num_frames, H // stride, W // stride, output_channels]`. + """ + net = inputs + for idx_block in range(self._num_blocks): + net = TSMResNetBlock( + self._output_channels, + stride=self._stride if idx_block == 0 else 1, + use_projection=idx_block == 0, + normalize_fn=self._normalize_fn, + tsm_mode=self._tsm_mode, + channel_shift_fraction=self._channel_shift_fraction, + num_frames=self._num_frames, + name=f'block_{idx_block}')( + net, is_training=is_training) + return net + + +class TSMResNetV2(hk.Module): + """TSM based on ResNet V2 as described in https://arxiv.org/abs/1603.05027.""" + + # Endpoints of the model in order. + VALID_ENDPOINTS = ( + 'tsm_resnet_stem', + 'tsm_resnet_unit_0', + 'tsm_resnet_unit_1', + 'tsm_resnet_unit_2', + 'tsm_resnet_unit_3', + 'last_conv', + 'Embeddings', + ) + + def __init__(self, + normalize_fn: Optional[types.NormalizeFn] = None, + depth: int = 50, + num_frames: int = 16, + channel_shift_fraction: float = 0.125, + width_mult: int = 1, + name: str = 'TSMResNetV2'): + """Constructs a ResNet model. + + Args: + normalize_fn: Function used for normalization. + depth: Depth of the desired ResNet. + num_frames: Number of frames (used in TPU mode). + channel_shift_fraction: Fraction of channels that are temporally shifted, + if `channel_shift_fraction` is 0, a regular ResNet is returned. + width_mult: Whether or not to use a width multiplier. + name: The name of the module. + + Raises: + ValueError: If `channel_shift_fraction` or `depth` has invalid value. + """ + super().__init__(name=name) + + if not 0. <= channel_shift_fraction <= 1.0: + raise ValueError( + f'channel_shift_fraction ({channel_shift_fraction})' + ' has to be in [0, 1].') + + self._num_frames = num_frames + + self._channels = (256, 512, 1024, 2048) + self._strides = (1, 2, 2, 2) + + num_blocks = { + 50: (3, 4, 6, 3), + 101: (3, 4, 23, 3), + 152: (3, 8, 36, 3), + 200: (3, 24, 36, 3), + } + if depth not in num_blocks: + raise ValueError( + f'`depth` should be in {list(num_blocks.keys())} ({depth} given).') + self._num_blocks = num_blocks[depth] + + self._width_mult = width_mult + self._channel_shift_fraction = channel_shift_fraction + self._normalize_fn = normalize_fn + + def __call__( + self, + inputs: types.TensorLike, + is_training: bool = True, + final_endpoint: str = 'Embeddings') -> jnp.ndarray: + """Connects the TSM ResNetV2 module into the graph. + + Args: + inputs: A 4-D float array of shape `[B, H, W, C]`. + is_training: Whether to use training mode. + final_endpoint: Up to which endpoint to run / return. + + Returns: + Network output at location `final_endpoint`. A float array which shape + depends on `final_endpoint`. + + Raises: + ValueError: If `final_endpoint` is not recognized. + """ + + # Prepare inputs for TSM. + inputs, tsm_mode, num_frames = tsmu.prepare_inputs(inputs) + num_frames = num_frames or self._num_frames + + self._final_endpoint = final_endpoint + if self._final_endpoint not in self.VALID_ENDPOINTS: + raise ValueError(f'Unknown final endpoint {self._final_endpoint}') + + # Stem convolution. + end_point = 'tsm_resnet_stem' + net = hk.Conv2D( + output_channels=64 * self._width_mult, + kernel_shape=7, + stride=2, + with_bias=False, + name=end_point, + padding='SAME')( + inputs) + net = hk.MaxPool( + window_shape=(1, 3, 3, 1), + strides=(1, 2, 2, 1), + padding='SAME')( + net) + if self._final_endpoint == end_point: + return net + + # Residual block. + for unit_id, (channels, num_blocks, stride) in enumerate( + zip(self._channels, self._num_blocks, self._strides)): + end_point = f'tsm_resnet_unit_{unit_id}' + net = TSMResNetUnit( + output_channels=channels * self._width_mult, + num_blocks=num_blocks, + stride=stride, + normalize_fn=self._normalize_fn, + channel_shift_fraction=self._channel_shift_fraction, + num_frames=num_frames, + tsm_mode=tsm_mode, + name=end_point)( + net, is_training=is_training) + if self._final_endpoint == end_point: + return net + + if self._normalize_fn is not None: + net = self._normalize_fn(net, is_training=is_training) + net = jax.nn.relu(net) + + end_point = 'last_conv' + if self._final_endpoint == end_point: + return net + net = jnp.mean(net, axis=(1, 2)) + # Prepare embedding outputs for TSM (temporal average of features). + net = tsmu.prepare_outputs(net, tsm_mode, num_frames) + assert self._final_endpoint == 'Embeddings' + return net diff --git a/mmv/models/tsm_resnet_test.py b/mmv/models/tsm_resnet_test.py new file mode 100644 index 0000000..6d47d08 --- /dev/null +++ b/mmv/models/tsm_resnet_test.py @@ -0,0 +1,65 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for TSM ResNet model.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import haiku as hk +import jax +import jax.numpy as jnp + +from mmv.models import tsm_resnet + + +class TSMResNetTest(parameterized.TestCase): + + @parameterized.parameters( + ('tsm_resnet_stem', (2 * 32, 56, 56, 64)), + ('tsm_resnet_unit_0', (2 * 32, 56, 56, 256)), + ('tsm_resnet_unit_1', (2 * 32, 28, 28, 512)), + ('tsm_resnet_unit_2', (2 * 32, 14, 14, 1024)), + ('tsm_resnet_unit_3', (2 * 32, 7, 7, 2048)), + ('last_conv', (2 * 32, 7, 7, 2048)), + ('Embeddings', (2, 2048)), + ) + def test_output_dimension(self, final_endpoint, expected_shape): + input_shape = (2, 32, 224, 224, 3) + + def f(): + data = jnp.zeros(input_shape) + net = tsm_resnet.TSMResNetV2() + return net(data, final_endpoint=final_endpoint) + + init_fn, apply_fn = hk.transform(f) + out = apply_fn(init_fn(jax.random.PRNGKey(42)), None) + self.assertEqual(out.shape, expected_shape) + + def test_tpu_mode(self): + input_shape = (32 * 2, 224, 224, 3) + + def f(): + data = jnp.zeros(input_shape) + net = tsm_resnet.TSMResNetV2(num_frames=32) + return net(data, final_endpoint='Embeddings') + + init_fn, apply_fn = hk.transform(f) + out = apply_fn(init_fn(jax.random.PRNGKey(42)), None) + self.assertEqual(out.shape, (2, 2048)) + + +if __name__ == '__main__': + absltest.main() diff --git a/mmv/models/tsm_utils.py b/mmv/models/tsm_utils.py new file mode 100644 index 0000000..13531c3 --- /dev/null +++ b/mmv/models/tsm_utils.py @@ -0,0 +1,177 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utils functions for TSM.""" + +from typing import Tuple + +import jax +import jax.numpy as jnp + +from mmv.models import types + + +def prepare_inputs( + inputs: types.TensorLike) -> Tuple[jnp.ndarray, str, int]: + """Deduces input mode for TSM.""" + # Deduce if we run on TPU based on input shape. + if len(inputs.shape) == 5: + # Input is given in the standard [B, T, H, W, 3] format. + tsm_mode = 'gpu' + num_frames = inputs.shape[1] + inputs = jnp.reshape(inputs, [-1] + list(inputs.shape[2:])) + else: + # Input is given in the [T * B, H, W, 3] format. + tsm_mode = 'tpu' + num_frames = None + return inputs, tsm_mode, num_frames + + +def prepare_outputs(outputs: types.TensorLike, + tsm_mode: str, + num_frames: int) -> jnp.ndarray: + """Processes output of TSM by averaging representations over time axis.""" + n_channels = outputs.shape[-1] + if tsm_mode == 'tpu': + outputs = jnp.reshape(outputs, [num_frames, -1, n_channels]) + outputs = jnp.mean(outputs, axis=0) + elif tsm_mode == 'gpu': + outputs = jnp.reshape(outputs, [-1, num_frames, n_channels]) + outputs = jnp.mean(outputs, axis=1) + else: + raise ValueError( + f'`tsm_mode` should be \'tpu\' or \'gpu\' ({tsm_mode} given)') + return outputs + + +def apply_temporal_shift( + x: types.TensorLike, + tsm_mode: str, + num_frames: int, + channel_shift_fraction: float = 0.125) -> jnp.ndarray: + """Performs a temporal shift: https://arxiv.org/abs/1811.08383 with mode.""" + if tsm_mode == 'tpu': + outputs = temporal_shift_tpu(x, num_frames, channel_shift_fraction) + elif tsm_mode == 'gpu': + outputs = temporal_shift_gpu(x, num_frames, channel_shift_fraction) + else: + raise ValueError( + f'`tsm_mode` should be \'tpu\' or \'gpu\' ({tsm_mode} given)') + return outputs + + +def temporal_shift_gpu( + x: types.TensorLike, + num_frames: int, + channel_shift_fraction: float = 0.125) -> jnp.ndarray: + """Performs a temporal shift: https://arxiv.org/abs/1811.08383.""" + # B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels + # Input is (B * T, H, W, C) + orig_shp = tuple(x.shape) + reshaped_x = jnp.reshape(x, (-1, num_frames) + orig_shp[1:]) + n_channels = orig_shp[-1] + n_shift = int(n_channels * channel_shift_fraction) + + new_shp = tuple(reshaped_x.shape) + + # shifted_backward = reshaped_x[:, 1:, :, :, -n_shift:] + shifted_backward = jax.lax.slice( + reshaped_x, (0, 1, 0, 0, new_shp[4] - n_shift), + (new_shp[0], new_shp[1], new_shp[2], new_shp[3], new_shp[4])) + shifted_backward_padding = ((0, 0), (0, 1), (0, 0), (0, 0), (0, 0)) + shifted_backward = jnp.pad(shifted_backward, shifted_backward_padding) + + # shifted_forward = reshaped_x[:, :-1, :, :, :n_shift] + shifted_forward = jax.lax.slice( + reshaped_x, (0, 0, 0, 0, 0), + (new_shp[0], new_shp[1] - 1, new_shp[2], new_shp[3], n_shift)) + shifted_forward_padding = ((0, 0), (1, 0), (0, 0), (0, 0), (0, 0)) + shifted_forward = jnp.pad(shifted_forward, shifted_forward_padding) + + no_shift = reshaped_x[:, :, :, :, n_shift:-n_shift] + shifted_x = jnp.concatenate([shifted_backward, no_shift, shifted_forward], + axis=4) + return jnp.reshape(shifted_x, (-1,) + orig_shp[1:]) + + +def temporal_shift_tpu( + x: types.TensorLike, + num_frames: int, + channel_shift_fraction: float = 0.125) -> jnp.ndarray: + """Performs a temporal shift: https://arxiv.org/abs/1811.08383. + + TPU optimized version of TSM. Reshape is avoided by having the images + reshaped in [T * B, :] so that frames corresponding to same time frame in + videos are contiguous in memory. Thanks to cr/288510308 which allows to fuse + pad->slice into convolution, we reformulate the slice pad into a pad then + slice. Finally, to avoid concatenate that prevent some fusion from happening + we simply sum masked version of the features. + Args: + x: Input expected to be [T * B, H, W, C] (where the batch has been reshaped + from a time major version of the input). + num_frames: number of frames T per video. + channel_shift_fraction: fraction of the channel to shift forward and + backward. + + Returns: + The temporal shifted version of x. + """ + # B, T, H, W, C = batch_size, num_frames, im_height, im_width, channels + # Input is (T * B, H, W, C) + original_shape = list(x.shape) + + batch_size = int(original_shape[0] / num_frames) + n_channels = int(original_shape[-1]) + n_shift = int(n_channels * channel_shift_fraction) + + # Cast to bfloat16. + x = x.astype(jnp.bfloat16) + + # For the following, assume that x has 3 channels [x1, x2, x3] and n_shift=1. + # Shift backward, we first pad by zeros [x1, x2, x3, 0, 0]. + orig_shp = list(x.shape) + + shifted_backward_padding = ((0, batch_size, 0), (0, 0, 0), (0, 0, 0), + (0, n_channels - n_shift, 0)) + x_backward_padding = jax.lax.pad( + x, + padding_value=jnp.bfloat16(0.), + padding_config=shifted_backward_padding) + # The following shift gets to [x3^+1, 0, 0] (where +1 means from the future). + shifted_backward = jax.lax.slice(x_backward_padding, + (batch_size, 0, 0, n_channels - n_shift), + (orig_shp[0] + batch_size, orig_shp[1], + orig_shp[2], 2 * n_channels - n_shift)) + # Shift forward, we first pad by zeros [0, 0, x1, x2, x3]. + shifted_forward_padding = ((batch_size, 0, 0), (0, 0, 0), (0, 0, 0), + (n_channels - n_shift, 0, 0)) + x_forward_padding = jax.lax.pad( + x, + padding_value=jnp.bfloat16(0.), + padding_config=shifted_forward_padding) + # The following shift gets to [0, 0, x1^-1] (where -1 means from the past). + shifted_forward = jax.lax.slice( + x_forward_padding, (0, 0, 0, 0), + (orig_shp[0], orig_shp[1], orig_shp[2], n_channels)) + # No shift is in the middle, this gets [0, x2, 0]. + mask_noshift = (jnp.reshape((jnp.arange(n_channels) >= n_shift) & + (jnp.arange(n_channels) < n_channels - n_shift), + (1, 1, 1, -1))).astype(jnp.bfloat16) + no_shift = mask_noshift * x + # By summing everything together, we end up with [x3^+1, x2, x1^-1]. + # Note: channels have been reordered but that doesn't matter for the model. + shifted_x = shifted_backward + shifted_forward + no_shift + + return shifted_x.astype(jnp.float32) diff --git a/mmv/models/tsm_utils_test.py b/mmv/models/tsm_utils_test.py new file mode 100644 index 0000000..b070610 --- /dev/null +++ b/mmv/models/tsm_utils_test.py @@ -0,0 +1,60 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tsm_utils.""" + +from absl.testing import absltest +from absl.testing import parameterized + +import jax.numpy as jnp +import numpy as np + +from mmv.models import tsm_utils + + +class TsmUtilsTest(parameterized.TestCase): + + @parameterized.parameters( + ((2, 32, 224, 224, 3), 'gpu', (2 * 32, 224, 224, 3), 32), + ((32, 224, 224, 3), 'tpu', (32, 224, 224, 3), None), + ) + def test_prepare_inputs(self, input_shape, expected_mode, expected_shape, + expected_num_frames): + + data = jnp.zeros(input_shape) + out, mode, num_frames = tsm_utils.prepare_inputs(data) + self.assertEqual(out.shape, expected_shape) + self.assertEqual(mode, expected_mode) + self.assertEqual(num_frames, expected_num_frames) + + def test_prepare_outputs(self): + data = jnp.concatenate([jnp.zeros(4), jnp.ones(4)]).reshape(4, 2) + out_gpu = tsm_utils.prepare_outputs(data, 'gpu', 2) + out_tpu = tsm_utils.prepare_outputs(data, 'tpu', 2) + expected_gpu = np.concatenate([np.zeros(2), np.ones(2)]).reshape(2, 2) + expected_tpu = 0.5 * jnp.ones((2, 2)) + np.testing.assert_allclose(out_gpu, expected_gpu) + np.testing.assert_allclose(out_tpu, expected_tpu) + + def test_apply_tsm(self): + shape = (32, 224, 224, 16) + data = jnp.zeros(shape) + out_gpu = tsm_utils.apply_temporal_shift(data, 'gpu', 16) + out_tpu = tsm_utils.apply_temporal_shift(data, 'tpu', 16) + self.assertEqual(out_gpu.shape, shape) + self.assertEqual(out_tpu.shape, shape) + +if __name__ == '__main__': + absltest.main() diff --git a/mmv/models/types.py b/mmv/models/types.py new file mode 100644 index 0000000..bac7e52 --- /dev/null +++ b/mmv/models/types.py @@ -0,0 +1,36 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type Aliases.""" + +from typing import Callable, Tuple, Union + +import jax.numpy as jnp +import numpy as np +import optax + +TensorLike = Union[np.ndarray, jnp.DeviceArray] + +ActivationFn = Callable[[TensorLike], TensorLike] +GatingFn = Callable[[TensorLike], TensorLike] +NetworkFn = Callable[[TensorLike], TensorLike] + +# Callable doesn't allow kwargs to be used, and we often want to +# pass in is_training=..., so ignore the arguments for the sake of pytype. +NormalizeFn = Callable[..., TensorLike] + +OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState] + + diff --git a/mmv/requirements.txt b/mmv/requirements.txt new file mode 100644 index 0000000..5be8814 --- /dev/null +++ b/mmv/requirements.txt @@ -0,0 +1,9 @@ +dm-haiku +dm-tree +jax +jaxlib +numpy>=1.16 +optax +sklearn +tensorflow +tensorflow_datasets diff --git a/mmv/utils/checkpoint.py b/mmv/utils/checkpoint.py new file mode 100644 index 0000000..e8ff30d --- /dev/null +++ b/mmv/utils/checkpoint.py @@ -0,0 +1,29 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Checkpoint restoring utilities.""" + +from absl import logging +import dill + + +def load_checkpoint(checkpoint_path): + try: + with open(checkpoint_path, 'rb') as checkpoint_file: + checkpoint_data = dill.load(checkpoint_file) + logging.info('Loading checkpoint from %s', checkpoint_path) + return checkpoint_data + except FileNotFoundError: + return None diff --git a/mmv/utils/ucf101_dataset.py b/mmv/utils/ucf101_dataset.py new file mode 100644 index 0000000..41304b6 --- /dev/null +++ b/mmv/utils/ucf101_dataset.py @@ -0,0 +1,70 @@ +# Copyright 2020 DeepMind Technologies Limited. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ucf101 with custom decoding params.""" + +import tensorflow as tf +import tensorflow_datasets as tfds + +# Utilities functions. + +tf.compat.v1.enable_eager_execution() + +_CITATION = """\ +@article{DBLP:journals/corr/abs-1212-0402, + author = {Khurram Soomro and + Amir Roshan Zamir and + Mubarak Shah}, + title = {{UCF101:} {A} Dataset of 101 Human Actions Classes From Videos in + The Wild}, + journal = {CoRR}, + volume = {abs/1212.0402}, + year = {2012}, + url = {http://arxiv.org/abs/1212.0402}, + archivePrefix = {arXiv}, + eprint = {1212.0402}, + timestamp = {Mon, 13 Aug 2018 16:47:45 +0200}, + biburl = {https://dblp.org/rec/bib/journals/corr/abs-1212-0402}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +""" + +_LABELS_FNAME = 'video/ucf101_labels.txt' + + +class ModUcf101(tfds.video.Ucf101): + """Ucf101 action recognition dataset with better quality. + """ + + def _info(self): + + ffmpeg_extra_args = ('-qscale:v', '2', '-r', '25', '-t', '00:00:20') + + video_shape = ( + None, self.builder_config.height, self.builder_config.width, 3) + labels_names_file = tfds.core.tfds_path(_LABELS_FNAME) + features = tfds.features.FeaturesDict({ + 'video': tfds.features.Video(video_shape, + ffmpeg_extra_args=ffmpeg_extra_args, + encoding_format='jpeg'), + 'label': tfds.features.ClassLabel(names_file=labels_names_file), + }) + return tfds.core.DatasetInfo( + builder=self, + description='A 101-label video classification dataset.', + features=features, + homepage='https://www.crcv.ucf.edu/data-sets/ucf101/', + citation=_CITATION, + )