diff --git a/README.md b/README.md index 627c192..81fd088 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,25 @@ -The Portiloop +# The Portiloop ![Prototype](https://github.com/nicolasvalenchon/Portiloop/blob/main/images/photo_portiloop.jpg) -Your training curves can be visualized in the Portiloop [wandb project](https://wandb.ai/portiloop). \ No newline at end of file +Your training curves can be visualized in the Portiloop [wandb project](https://wandb.ai/portiloop). + +## Quick start guide + +- clone the repo +- cd to the root of the repo where `setup.py` is +- pip install with the -e option: +```terminal +pip install -e . +``` +- download the datasets and the experiments zip files +- unzip the datasets file and paste its content under `Portiloop>Software>dataset` +- unzip the experiments file and paste its content under `Portiloop>Software>experiments` + +### Inference / Portiloop simulation: +The `simulate_Portiloop_1_input_classification.ipynb` notebook enables stimulating the Portiloop system with and perform inference. +This notebook can be executed with `jupyter notebook`. + +### Training: +We provide the bash scripts examples for `slurm` to train the model on HPC systems. +Adapt these scripts to your configuration. diff --git a/Software/plots/.gitignore b/Software/plots/.gitignore new file mode 100644 index 0000000..86d0cb2 --- /dev/null +++ b/Software/plots/.gitignore @@ -0,0 +1,4 @@ +# Ignore everything in this directory +* +# Except this file +!.gitignore \ No newline at end of file diff --git a/Software/python/ANN/portiloop_detector_training.py b/Software/python/ANN/portiloop_detector_training.py index 3aabc72..b79d61d 100644 --- a/Software/python/ANN/portiloop_detector_training.py +++ b/Software/python/ANN/portiloop_detector_training.py @@ -68,7 +68,7 @@ def __init__(self, filename, path, window_size, fe, seq_len, seq_stride, list_su if not (self.data[3][idx + self.window_size - 1] < 0 # that are not ending in an unlabeled zone or idx < self.past_signal_len)] # and far enough from the beginning to build a sequence up to here total_spindles = np.sum(self.data[3] > THRESHOLD) - logging.debug(f"nb total of spindles in this dataset : {total_spindles}") + logging.debug(f"total number of spindles in this dataset : {total_spindles}") def __len__(self): return len(self.indices) diff --git a/notebooks/simulate_Portiloop_1_input_classification.ipynb b/notebooks/simulate_Portiloop_1_input_classification.ipynb new file mode 100644 index 0000000..e3d9ac9 --- /dev/null +++ b/notebooks/simulate_Portiloop_1_input_classification.ipynb @@ -0,0 +1,3545 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "P4V_P6xtqEnl" + }, + "source": [ + "# Inference and simulation of the Portiloop system" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "import Software\n", + "path_software = Path(Software.__file__).parent.absolute()\n", + "path = path_software / 'dataset'\n", + "path_dataset = Path(path)\n", + "path_plots = path_software / 'plots'\n", + "path_experiments = path_software / 'experiments'\n", + "\n", + "print(f\"Path dataset: {path_dataset}\")\n", + "print(f\"Path plots: {path_plots}\")\n", + "print(f\"Path experiments: {path_experiments}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HOufcSJRODBO" + }, + "outputs": [], + "source": [ + "# all imports\n", + "\n", + "import copy\n", + "import logging\n", + "import os\n", + "import time\n", + "from argparse import ArgumentParser\n", + "from pathlib import Path\n", + "from random import randint, seed\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from sklearn.model_selection import train_test_split\n", + "from torch.nn import functional as F\n", + "from torch.utils.data import Dataset, DataLoader\n", + "from torch.utils.data.sampler import Sampler\n", + "\n", + "from math import floor, sqrt\n", + "from scipy.ndimage import gaussian_filter1d, convolve1d" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "j3u-Z8D1N8yl" + }, + "outputs": [], + "source": [ + "logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "LEN_SEGMENT = 115 # seconds\n", + "\n", + "def out_dim(window_size, padding, dilation, kernel, stride):\n", + " return floor((window_size + 2 * padding - dilation * (kernel - 1) - 1) / stride + 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0bdAMG0GH4r7" + }, + "outputs": [], + "source": [ + "# all classes and functions:\n", + "\n", + "class SignalDataset(Dataset):\n", + " def __init__(self, filename, path, window_size, fe, seq_len, seq_stride, list_subject, len_segment):\n", + " self.fe = fe\n", + " self.window_size = window_size\n", + " self.path_file = Path(path) / filename\n", + "\n", + " self.data = pd.read_csv(self.path_file, header=None).to_numpy()\n", + " assert list_subject is not None\n", + " used_sequence = np.hstack([range(int(s[1]), int(s[2])) for s in list_subject])\n", + " split_data = np.array(np.split(self.data, int(len(self.data) / (len_segment + 30 * fe)))) # 115+30 = nb seconds per sequence in the dataset\n", + " split_data = split_data[used_sequence]\n", + " self.data = np.transpose(split_data.reshape((split_data.shape[0] * split_data.shape[1], 4)))\n", + "\n", + " assert self.window_size <= len(self.data[0]), \"Dataset smaller than window size.\"\n", + " self.full_signal = torch.tensor(self.data[0], dtype=torch.float)\n", + " self.full_envelope = torch.tensor(self.data[1], dtype=torch.float)\n", + " self.seq_len = seq_len # 1 means single sample / no sequence ?\n", + " self.idx_stride = seq_stride\n", + " self.past_signal_len = self.seq_len * self.idx_stride\n", + "\n", + " # list of indices that can be sampled:\n", + " self.indices = [idx for idx in range(len(self.data[0]) - self.window_size) # all possible idxs in the dataset\n", + " if not (self.data[3][idx + self.window_size - 1] < 0 # that are not ending in an unlabeled zone\n", + " or idx < self.past_signal_len)] # and far enough from the beginning to build a sequence up to here\n", + " total_spindles = np.sum(self.data[3] > THRESHOLD)\n", + " logging.debug(f\"total number of spindles in this dataset : {total_spindles}\")\n", + "\n", + " def __len__(self):\n", + " return len(self.indices)\n", + "\n", + " def __getitem__(self, idx):\n", + " assert 0 <= idx < len(self), f\"Index out of range ({idx}/{len(self)}).\"\n", + " idx = self.indices[idx]\n", + " assert self.data[3][idx + self.window_size - 1] >= 0, f\"Bad index: {idx}.\"\n", + "\n", + " signal_seq = self.full_signal[idx - (self.past_signal_len - self.idx_stride):idx + self.window_size].unfold(0, self.window_size,\n", + " self.idx_stride)\n", + " envelope_seq = self.full_envelope[idx - (self.past_signal_len - self.idx_stride):idx + self.window_size].unfold(0, self.window_size,\n", + " self.idx_stride)\n", + "\n", + " ratio_pf = torch.tensor(self.data[2][idx + self.window_size - 1], dtype=torch.float)\n", + " label = torch.tensor(self.data[3][idx + self.window_size - 1], dtype=torch.float)\n", + "\n", + " return signal_seq, envelope_seq, ratio_pf, label\n", + "\n", + " def is_spindle(self, idx):\n", + " assert 0 <= idx <= len(self), f\"Index out of range ({idx}/{len(self)}).\"\n", + " idx = self.indices[idx]\n", + " return True if (self.data[3][idx + self.window_size - 1] > THRESHOLD) else False\n", + "\n", + "\n", + "def get_class_idxs(dataset, distribution_mode):\n", + " \"\"\"\n", + " Directly outputs idx_true and idx_false arrays\n", + " \"\"\"\n", + " length_dataset = len(dataset)\n", + "\n", + " nb_true = 0\n", + " nb_false = 0\n", + "\n", + " idx_true = []\n", + " idx_false = []\n", + "\n", + " for i in range(length_dataset):\n", + " is_spindle = dataset.is_spindle(i)\n", + " if is_spindle or distribution_mode == 1:\n", + " nb_true += 1\n", + " idx_true.append(i)\n", + " else:\n", + " nb_false += 1\n", + " idx_false.append(i)\n", + "\n", + " assert len(dataset) == nb_true + nb_false, f\"Bad length dataset\"\n", + "\n", + " return np.array(idx_true), np.array(idx_false)\n", + "\n", + "\n", + "# Sampler avec liste et sans rand liste\n", + "\n", + "class RandomSampler(Sampler):\n", + " \"\"\"\n", + " Samples elements randomly and evenly between the two classes.\n", + " The sampling happens WITH replacement.\n", + " __iter__ stops after an arbitrary number of iterations = batch_size_list * nb_batch\n", + " Arguments:\n", + " idx_true: np.array\n", + " idx_false: np.array\n", + " batch_size (int)\n", + " nb_batch (int, optional): number of iteration before end of __iter__(), this defaults to len(data_source)\n", + " \"\"\"\n", + "\n", + " def __init__(self, idx_true, idx_false, batch_size, distribution_mode, nb_batch):\n", + " self.idx_true = idx_true\n", + " self.idx_false = idx_false\n", + " self.nb_true = self.idx_true.size\n", + " self.nb_false = self.idx_false.size\n", + " self.length = nb_batch * batch_size\n", + " self.distribution_mode = distribution_mode\n", + "\n", + " def __iter__(self):\n", + " global precision_validation_factor\n", + " global recall_validation_factor\n", + " cur_iter = 0\n", + " seed()\n", + " # epsilon = 1e-7 proba = float(0.5 + 0.5 * (precision_validation_factor - recall_validation_factor) / (precision_validation_factor +\n", + " # recall_validation_factor + epsilon))\n", + " proba = 0.5\n", + " if self.distribution_mode == 1:\n", + " proba = 1\n", + " logging.debug(f\"proba: {proba}\")\n", + "\n", + " while cur_iter < self.length:\n", + " cur_iter += 1\n", + " sample_class = np.random.choice([0, 1], p=[1 - proba, proba])\n", + " if sample_class: # sample true\n", + " idx_file = randint(0, self.nb_true - 1)\n", + " idx_res = self.idx_true[idx_file]\n", + " else: # sample false\n", + " idx_file = randint(0, self.nb_false - 1)\n", + " idx_res = self.idx_false[idx_file]\n", + "\n", + " yield idx_res\n", + "\n", + " def __len__(self):\n", + " return self.length\n", + "\n", + "\n", + "# Sampler validation\n", + "\n", + "class ValidationSampler(Sampler):\n", + " \"\"\"\n", + " __iter__ stops after an arbitrary number of iterations = batch_size_list * nb_batch\n", + " network_stride (int >= 1, default: 1): divides the size of the dataset (and of the batch) by striding further than 1\n", + " \"\"\"\n", + "\n", + " def __init__(self, data_source, seq_stride, nb_segment, len_segment, network_stride):\n", + " network_stride = int(network_stride)\n", + " assert network_stride >= 1\n", + " self.network_stride = network_stride\n", + " self.seq_stride = seq_stride\n", + " self.data = data_source\n", + " self.nb_segment = nb_segment\n", + " self.len_segment = len_segment\n", + "\n", + " def __iter__(self):\n", + " seed()\n", + " batches_per_segment = self.len_segment // self.seq_stride # len sequence = 115 s + add the 15 first s?\n", + " cursor_batch = 0\n", + " while cursor_batch < batches_per_segment:\n", + " for i in range(self.nb_segment):\n", + " for j in range(0, (self.seq_stride//self.network_stride)*self.network_stride, self.network_stride):\n", + " cur_idx = i * self.len_segment + j + cursor_batch * self.seq_stride\n", + " yield cur_idx\n", + " cursor_batch += 1\n", + "\n", + " def __len__(self):\n", + " assert False\n", + " # return len(self.data)\n", + " # return len(self.data_source)\n", + "\n", + "\n", + "class ConvPoolModule(nn.Module):\n", + " def __init__(self,\n", + " in_channels,\n", + " out_channel,\n", + " kernel_conv,\n", + " stride_conv,\n", + " conv_padding,\n", + " dilation_conv,\n", + " kernel_pool,\n", + " stride_pool,\n", + " pool_padding,\n", + " dilation_pool,\n", + " dropout_p):\n", + " super(ConvPoolModule, self).__init__()\n", + "\n", + " self.conv = nn.Conv1d(in_channels=in_channels,\n", + " out_channels=out_channel,\n", + " kernel_size=kernel_conv,\n", + " stride=stride_conv,\n", + " padding=conv_padding,\n", + " dilation=dilation_conv)\n", + " self.pool = nn.MaxPool1d(kernel_size=kernel_pool,\n", + " stride=stride_pool,\n", + " padding=pool_padding,\n", + " dilation=dilation_pool)\n", + " self.dropout = nn.Dropout(dropout_p)\n", + "\n", + " def forward(self, input_f):\n", + " x, max_value = input_f\n", + " x = F.relu(self.conv(x))\n", + " x = self.pool(x)\n", + " max_temp = torch.max(abs(x))\n", + " if max_temp > max_value:\n", + " logging.debug(f\"max_value = {max_temp}\")\n", + " max_value = max_temp\n", + " return self.dropout(x), max_value\n", + "\n", + "\n", + "class FcModule(nn.Module):\n", + " def __init__(self,\n", + " in_features,\n", + " out_features,\n", + " dropout_p):\n", + " super(FcModule, self).__init__()\n", + "\n", + " self.fc = nn.Linear(in_features=in_features, out_features=out_features)\n", + " self.dropout = nn.Dropout(dropout_p)\n", + "\n", + " def forward(self, x):\n", + " x = F.relu(self.fc(x))\n", + " return self.dropout(x)\n", + "\n", + "\n", + "class PortiloopNetwork(nn.Module):\n", + " def __init__(self, c_dict):\n", + " super(PortiloopNetwork, self).__init__()\n", + "\n", + " RNN = c_dict[\"RNN\"]\n", + " stride_pool = c_dict[\"stride_pool\"]\n", + " stride_conv = c_dict[\"stride_conv\"]\n", + " kernel_conv = c_dict[\"kernel_conv\"]\n", + " kernel_pool = c_dict[\"kernel_pool\"]\n", + " nb_channel = c_dict[\"nb_channel\"]\n", + " hidden_size = c_dict[\"hidden_size\"]\n", + " window_size_s = c_dict[\"window_size_s\"]\n", + " dropout_p = c_dict[\"dropout\"]\n", + " dilation_conv = c_dict[\"dilation_conv\"]\n", + " dilation_pool = c_dict[\"dilation_pool\"]\n", + " fe = c_dict[\"fe\"]\n", + " nb_conv_layers = c_dict[\"nb_conv_layers\"]\n", + " nb_rnn_layers = c_dict[\"nb_rnn_layers\"]\n", + " first_layer_dropout = c_dict[\"first_layer_dropout\"]\n", + " self.envelope_input = c_dict[\"envelope_input\"]\n", + " self.power_features_input = c_dict[\"power_features_input\"]\n", + " self.classification = c_dict[\"classification\"]\n", + "\n", + " conv_padding = 0 # int(kernel_conv // 2)\n", + " pool_padding = 0 # int(kernel_pool // 2)\n", + " window_size = int(window_size_s * fe)\n", + " nb_out = window_size\n", + "\n", + " for _ in range(nb_conv_layers):\n", + " nb_out = out_dim(nb_out, conv_padding, dilation_conv, kernel_conv, stride_conv)\n", + " nb_out = out_dim(nb_out, pool_padding, dilation_pool, kernel_pool, stride_pool)\n", + "\n", + " output_cnn_size = int(nb_channel * nb_out)\n", + "\n", + " self.RNN = RNN\n", + " self.first_layer_input1 = ConvPoolModule(in_channels=1,\n", + " out_channel=nb_channel,\n", + " kernel_conv=kernel_conv,\n", + " stride_conv=stride_conv,\n", + " conv_padding=conv_padding,\n", + " dilation_conv=dilation_conv,\n", + " kernel_pool=kernel_pool,\n", + " stride_pool=stride_pool,\n", + " pool_padding=pool_padding,\n", + " dilation_pool=dilation_pool,\n", + " dropout_p=dropout_p if first_layer_dropout else 0)\n", + " self.seq_input1 = nn.Sequential(*(ConvPoolModule(in_channels=nb_channel,\n", + " out_channel=nb_channel,\n", + " kernel_conv=kernel_conv,\n", + " stride_conv=stride_conv,\n", + " conv_padding=conv_padding,\n", + " dilation_conv=dilation_conv,\n", + " kernel_pool=kernel_pool,\n", + " stride_pool=stride_pool,\n", + " pool_padding=pool_padding,\n", + " dilation_pool=dilation_pool,\n", + " dropout_p=dropout_p) for _ in range(nb_conv_layers - 1)))\n", + " if RNN:\n", + " self.gru_input1 = nn.GRU(input_size=output_cnn_size,\n", + " hidden_size=hidden_size,\n", + " num_layers=nb_rnn_layers,\n", + " dropout=0,\n", + " batch_first=True)\n", + " # fc_size = hidden_size\n", + " else:\n", + " self.first_fc_input1 = FcModule(in_features=output_cnn_size, out_features=hidden_size, dropout_p=dropout_p)\n", + " self.seq_fc_input1 = nn.Sequential(\n", + " *(FcModule(in_features=hidden_size, out_features=hidden_size, dropout_p=dropout_p) for _ in range(nb_rnn_layers - 1)))\n", + " if self.envelope_input:\n", + " self.first_layer_input2 = ConvPoolModule(in_channels=1,\n", + " out_channel=nb_channel,\n", + " kernel_conv=kernel_conv,\n", + " stride_conv=stride_conv,\n", + " conv_padding=conv_padding,\n", + " dilation_conv=dilation_conv,\n", + " kernel_pool=kernel_pool,\n", + " stride_pool=stride_pool,\n", + " pool_padding=pool_padding,\n", + " dilation_pool=dilation_pool,\n", + " dropout_p=dropout_p if first_layer_dropout else 0)\n", + " self.seq_input2 = nn.Sequential(*(ConvPoolModule(in_channels=nb_channel,\n", + " out_channel=nb_channel,\n", + " kernel_conv=kernel_conv,\n", + " stride_conv=stride_conv,\n", + " conv_padding=conv_padding,\n", + " dilation_conv=dilation_conv,\n", + " kernel_pool=kernel_pool,\n", + " stride_pool=stride_pool,\n", + " pool_padding=pool_padding,\n", + " dilation_pool=dilation_pool,\n", + " dropout_p=dropout_p) for _ in range(nb_conv_layers - 1)))\n", + "\n", + " if RNN:\n", + " self.gru_input2 = nn.GRU(input_size=output_cnn_size,\n", + " hidden_size=hidden_size,\n", + " num_layers=nb_rnn_layers,\n", + " dropout=0,\n", + " batch_first=True)\n", + " else:\n", + " self.first_fc_input2 = FcModule(in_features=output_cnn_size, out_features=hidden_size, dropout_p=dropout_p)\n", + " self.seq_fc_input2 = nn.Sequential(\n", + " *(FcModule(in_features=hidden_size, out_features=hidden_size, dropout_p=dropout_p) for _ in range(nb_rnn_layers - 1)))\n", + " fc_features = 0\n", + " fc_features += hidden_size\n", + " if self.envelope_input:\n", + " fc_features += hidden_size\n", + " if self.power_features_input:\n", + " fc_features += 1\n", + " out_features = 1\n", + " self.fc = nn.Linear(in_features=fc_features, # enveloppe and signal + power features ratio\n", + " out_features=out_features) # probability of being a spindle\n", + "\n", + " def forward(self, x1, x2, x3, h1, h2, max_value=np.inf):\n", + " (batch_size, sequence_len, features) = x1.shape\n", + "\n", + " if ABLATION == 1:\n", + " x1 = copy.deepcopy(x2)\n", + " elif ABLATION == 2:\n", + " x2 = copy.deepcopy(x1)\n", + "\n", + " x1 = x1.view(-1, 1, features)\n", + " x1, max_value = self.first_layer_input1((x1, max_value))\n", + " x1, max_value = self.seq_input1((x1, max_value))\n", + "\n", + " x1 = torch.flatten(x1, start_dim=1, end_dim=-1)\n", + " hn1 = None\n", + " if self.RNN:\n", + " x1 = x1.view(batch_size, sequence_len, -1)\n", + " x1, hn1 = self.gru_input1(x1, h1)\n", + " max_temp = torch.max(abs(x1))\n", + " if max_temp > max_value:\n", + " logging.debug(f\"max_value = {max_temp}\")\n", + " max_value = max_temp\n", + " x1 = x1[:, -1, :]\n", + " else:\n", + " x1 = self.first_fc_input1(x1)\n", + " x1 = self.seq_fc_input1(x1)\n", + " x = x1\n", + " hn2 = None\n", + " if self.envelope_input:\n", + " x2 = x2.view(-1, 1, features)\n", + " x2, max_value = self.first_layer_input2((x2, max_value))\n", + " x2, max_value = self.seq_input2((x2, max_value))\n", + "\n", + " x2 = torch.flatten(x2, start_dim=1, end_dim=-1)\n", + " if self.RNN:\n", + " x2 = x2.view(batch_size, sequence_len, -1)\n", + " x2, hn2 = self.gru_input2(x2, h2)\n", + " max_temp = torch.max(abs(x2))\n", + " if max_temp > max_value:\n", + " logging.debug(f\"max_value = {max_temp}\")\n", + " max_value = max_temp\n", + " x2 = x2[:, -1, :]\n", + " else:\n", + " x2 = self.first_fc_input2(x2)\n", + " x2 = self.seq_fc_input2(x2)\n", + " x = torch.cat((x, x2), -1)\n", + "\n", + " if self.power_features_input:\n", + " x3 = x3.view(-1, 1)\n", + " x = torch.cat((x, x3), -1)\n", + "\n", + " x = self.fc(x) # output size: 1\n", + " max_temp = torch.max(abs(x))\n", + " if max_temp > max_value:\n", + " logging.debug(f\"max_value = {max_temp}\")\n", + " max_value = max_temp\n", + " x = torch.sigmoid(x)\n", + "\n", + " return x, hn1, hn2, max_value\n", + "\n", + "\n", + "class LoggerWandb:\n", + " def __init__(self, experiment_name, c_dict, project_name):\n", + " self.best_model = None\n", + " self.experiment_name = experiment_name\n", + " os.environ['WANDB_API_KEY'] = \"cd105554ccdfeee0bbe69c175ba0c14ed41f6e00\"\n", + " self.wandb_run = wandb.init(project=project_name, entity=\"portiloop\", id=experiment_name, resume=\"allow\",\n", + " config=c_dict, reinit=True)\n", + "\n", + " def log(self,\n", + " accuracy_train,\n", + " loss_train,\n", + " accuracy_validation,\n", + " loss_validation,\n", + " f1_validation,\n", + " precision_validation,\n", + " recall_validation,\n", + " best_epoch,\n", + " best_model,\n", + " loss_early_stopping,\n", + " best_epoch_early_stopping,\n", + " best_model_accuracy_validation,\n", + " best_model_f1_score_validation,\n", + " best_model_precision_validation,\n", + " best_model_recall_validation,\n", + " best_model_loss_validation,\n", + " best_model_on_loss_accuracy_validation,\n", + " best_model_on_loss_f1_score_validation,\n", + " best_model_on_loss_precision_validation,\n", + " best_model_on_loss_recall_validation,\n", + " best_model_on_loss_loss_validation,\n", + " updated_model=False,\n", + " ):\n", + " self.best_model = best_model\n", + " self.wandb_run.log({\n", + " \"accuracy_train\": accuracy_train,\n", + " \"loss_train\": loss_train,\n", + " \"accuracy_validation\": accuracy_validation,\n", + " \"loss_validation\": loss_validation,\n", + " \"f1_validation\": f1_validation,\n", + " \"precision_validation\": precision_validation,\n", + " \"recall_validation\": recall_validation,\n", + " \"loss_early_stopping\": loss_early_stopping,\n", + " })\n", + " self.wandb_run.summary[\"best_epoch\"] = best_epoch\n", + " self.wandb_run.summary[\"best_epoch_early_stopping\"] = best_epoch_early_stopping\n", + " self.wandb_run.summary[\"best_model_f1_score_validation\"] = best_model_f1_score_validation\n", + " self.wandb_run.summary[\"best_model_precision_validation\"] = best_model_precision_validation\n", + " self.wandb_run.summary[\"best_model_recall_validation\"] = best_model_recall_validation\n", + " self.wandb_run.summary[\"best_model_loss_validation\"] = best_model_loss_validation\n", + " self.wandb_run.summary[\"best_model_accuracy_validation\"] = best_model_accuracy_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_f1_score_validation\"] = best_model_on_loss_f1_score_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_precision_validation\"] = best_model_on_loss_precision_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_recall_validation\"] = best_model_on_loss_recall_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_loss_validation\"] = best_model_on_loss_loss_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_accuracy_validation\"] = best_model_on_loss_accuracy_validation\n", + " if updated_model:\n", + " self.wandb_run.save(os.path.join(path_dataset, self.experiment_name), policy=\"live\", base_path=path_dataset)\n", + " self.wandb_run.save(os.path.join(path_dataset, self.experiment_name + \"_on_loss\"), policy=\"live\", base_path=path_dataset)\n", + "\n", + " def __del__(self):\n", + " self.wandb_run.finish()\n", + "\n", + " def restore(self):\n", + " self.wandb_run.restore(self.experiment_name, root=path_dataset)\n", + "\n", + "\n", + "def f1_loss(output, batch_labels):\n", + " # logging.debug(f\"output in loss : {output[:,1]}\")\n", + " # logging.debug(f\"batch_labels in loss : {batch_labels}\")\n", + " y_pred = output\n", + " tp = (batch_labels * y_pred).sum().to(torch.float32)\n", + " tn = ((1 - batch_labels) * (1 - y_pred)).sum().to(torch.float32).item()\n", + " fp = ((1 - batch_labels) * y_pred).sum().to(torch.float32)\n", + " fn = (batch_labels * (1 - y_pred)).sum().to(torch.float32)\n", + "\n", + " epsilon = 1e-7\n", + " F1_class1 = 2 * tp / (2 * tp + fp + fn + epsilon)\n", + " F1_class0 = 2 * tn / (2 * tn + fn + fp + epsilon)\n", + " New_F1 = (F1_class1 + F1_class0) / 2\n", + " return 1 - New_F1\n", + "\n", + "\n", + "def run_inference(dataloader, criterion, net, device, hidden_size, nb_rnn_layers, classification, batch_size_validation, max_value=np.inf):\n", + " net_copy = copy.deepcopy(net)\n", + " net_copy = net_copy.to(device)\n", + " net_copy = net_copy.eval()\n", + " loss = 0\n", + " n = 0\n", + " batch_labels_total = torch.tensor([], device=device)\n", + " output_total = torch.tensor([], device=device)\n", + " h1 = torch.zeros((nb_rnn_layers, batch_size_validation, hidden_size), device=device)\n", + " h2 = torch.zeros((nb_rnn_layers, batch_size_validation, hidden_size), device=device)\n", + " with torch.no_grad():\n", + " for batch_data in dataloader:\n", + " batch_samples_input1, batch_samples_input2, batch_samples_input3, batch_labels = batch_data\n", + " batch_samples_input1 = batch_samples_input1.to(device=device).float()\n", + " batch_samples_input2 = batch_samples_input2.to(device=device).float()\n", + " batch_samples_input3 = batch_samples_input3.to(device=device).float()\n", + " batch_labels = batch_labels.to(device=device).float()\n", + " if classification:\n", + " batch_labels = (batch_labels > THRESHOLD)\n", + " batch_labels = batch_labels.float()\n", + " output, h1, h2, max_value = net_copy(batch_samples_input1, batch_samples_input2, batch_samples_input3, h1, h2, max_value)\n", + " # logging.debug(f\"label = {batch_labels}\")\n", + " # logging.debug(f\"output = {output}\")\n", + " output = output.view(-1)\n", + " loss_py = criterion(output, batch_labels).mean()\n", + " loss += loss_py.item()\n", + " # logging.debug(f\"loss = {loss}\")\n", + " # if not classification:\n", + " # output = (output > THRESHOLD)\n", + " # batch_labels = (batch_labels > THRESHOLD)\n", + " # else:\n", + " # output = (output >= 0.5)\n", + " batch_labels_total = torch.cat([batch_labels_total, batch_labels])\n", + " output_total = torch.cat([output_total, output])\n", + " # logging.debug(f\"batch_label_total : {batch_labels_total}\")\n", + " # logging.debug(f\"output_total : {output_total}\")\n", + " n += 1\n", + "\n", + " loss /= n\n", + " acc = (output_total == batch_labels_total).float().mean()\n", + " output_total = output_total.float()\n", + " batch_labels_total = batch_labels_total.float()\n", + " tp = (batch_labels_total * output_total)\n", + " tn = ((1 - batch_labels_total) * (1 - output_total))\n", + " fp = ((1 - batch_labels_total) * output_total)\n", + " fn = (batch_labels_total * (1 - output_total))\n", + " return output_total, batch_labels_total, loss, acc, tp, tn, fp, fn\n", + "\n", + "\n", + "def get_metrics(tp, fp, fn):\n", + " tp_sum = tp.sum().to(torch.float32).item()\n", + " fp_sum = fp.sum().to(torch.float32).item()\n", + " fn_sum = fn.sum().to(torch.float32).item()\n", + " epsilon = 1e-7\n", + "\n", + " precision = tp_sum / (tp_sum + fp_sum + epsilon)\n", + " recall = tp_sum / (tp_sum + fn_sum + epsilon)\n", + "\n", + " f1 = 2 * (precision * recall) / (precision + recall + epsilon)\n", + "\n", + " return f1, precision, recall\n", + "\n", + "\n", + "# Regression balancing:\n", + "\n", + "\n", + "def get_lds_kernel(ks, sigma):\n", + " half_ks = (ks - 1) // 2\n", + " base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks\n", + " kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) / max(gaussian_filter1d(base_kernel, sigma=sigma))\n", + " return kernel_window\n", + "\n", + "\n", + "def generate_label_distribution_and_lds(dataset, kernel_size=5, kernel_std=2.0, nb_bins=100, reweight='inv_sqrt'):\n", + " \"\"\"\n", + " Returns:\n", + " distribution: the distribution of labels in the dataset\n", + " lds: the same distribution, smoothed with a gaussian kernel\n", + " \"\"\"\n", + "\n", + " weights = torch.tensor([0.3252, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0069, 0.0163,\n", + " 0.0000, 0.0366, 0.0000, 0.0179, 0.0000, 0.0076, 0.0444, 0.0176, 0.0025,\n", + " 0.0056, 0.0000, 0.0416, 0.0039, 0.0000, 0.0000, 0.0000, 0.0171, 0.0000,\n", + " 0.0000, 0.0042, 0.0114, 0.0209, 0.0023, 0.0036, 0.0106, 0.0241, 0.0034,\n", + " 0.0000, 0.0056, 0.0000, 0.0029, 0.0241, 0.0076, 0.0027, 0.0012, 0.0000,\n", + " 0.0166, 0.0028, 0.0000, 0.0000, 0.0000, 0.0197, 0.0000, 0.0000, 0.0021,\n", + " 0.0054, 0.0191, 0.0014, 0.0023, 0.0074, 0.0000, 0.0186, 0.0000, 0.0088,\n", + " 0.0000, 0.0032, 0.0135, 0.0069, 0.0029, 0.0016, 0.0164, 0.0068, 0.0022,\n", + " 0.0000, 0.0000, 0.0000, 0.0191, 0.0000, 0.0000, 0.0017, 0.0082, 0.0181,\n", + " 0.0019, 0.0038, 0.0064, 0.0000, 0.0133, 0.0000, 0.0069, 0.0000, 0.0025,\n", + " 0.0186, 0.0076, 0.0031, 0.0016, 0.0218, 0.0105, 0.0049, 0.0000, 0.0000,\n", + " 0.0246], dtype=torch.float64)\n", + "\n", + " lds = None\n", + " dist = None\n", + " bins = None\n", + " return weights, dist, lds, bins\n", + "\n", + " # TODO: remove before\n", + "\n", + " dataset_len = len(dataset)\n", + " logging.debug(f\"Length of the dataset passed to generate_label_distribution_and_lds: {dataset_len}\")\n", + " logging.debug(f\"kernel_size: {kernel_size}\")\n", + " logging.debug(f\"kernel_std: {kernel_std}\")\n", + " logging.debug(f\"Generating empirical distribution...\")\n", + "\n", + " tab = np.array([dataset[i][3].item() for i in range(dataset_len)])\n", + " tab = np.around(tab, decimals=5)\n", + " elts = np.unique(tab)\n", + " logging.debug(f\"all labels: {elts}\")\n", + " dist, bins = np.histogram(tab, bins=nb_bins, density=False, range=(0.0, 1.0))\n", + "\n", + " # dist, bins = np.histogram([dataset[i][3].item() for i in range(dataset_len)], bins=nb_bins, density=False, range=(0.0, 1.0))\n", + "\n", + " logging.debug(f\"dist: {dist}\")\n", + "\n", + " # kernel = get_lds_kernel(kernel_size, kernel_std)\n", + " # lds = convolve1d(dist, weights=kernel, mode='constant')\n", + "\n", + " lds = gaussian_filter1d(input=dist, sigma=kernel_std, axis=- 1, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\n", + "\n", + " weights = np.sqrt(lds) if reweight == 'inv_sqrt' else lds\n", + " # scaling = len(weights) / np.sum(weights) # not the same implementation as in the original repo\n", + " scaling = 1.0 / np.sum(weights)\n", + " weights = weights * scaling\n", + "\n", + " return weights, dist, lds, bins\n", + "\n", + "\n", + "class LabelDistributionSmoothing:\n", + " def __init__(self, c=1.0, dataset=None, weights=None, kernel_size=5, kernel_std=2.0, nb_bins=100, weighting_mode=\"inv_sqrt\"):\n", + " \"\"\"\n", + " If provided, lds_distribution must be a numpy.array representing a density over [0.0, 1.0] (e.g. first element of a numpy.histogram)\n", + " When lds_distribution is provided, it overrides everything else\n", + " c is the scaling constant for lds weights\n", + " weighting_mode can be 'inv' or 'inv_sqrt'\n", + " \"\"\"\n", + " assert dataset is not None or weights is not None, \"Either a dataset or weights must be provided\"\n", + " self.distribution = None\n", + " self.bins = None\n", + " self.lds_distribution = None\n", + " if weights is None:\n", + " self.weights, self.distribution, self.lds_distribution, self.bins = generate_label_distribution_and_lds(dataset, kernel_size, kernel_std, nb_bins, weighting_mode)\n", + " logging.debug(f\"self.distribution: {self.weights}\")\n", + " logging.debug(f\"self.lds_distribution: {self.weights}\")\n", + " else:\n", + " self.weights = weights\n", + " self.nb_bins = len(self.weights)\n", + " self.bin_width = 1.0 / self.nb_bins\n", + " self.c = c\n", + " logging.debug(f\"The LDS distribution has {self.nb_bins} bins of width {self.bin_width}\")\n", + " self.weights = torch.tensor(self.weights)\n", + "\n", + " logging.debug(f\"self.weights: {self.weights}\")\n", + "\n", + " def lds_weights_batch(self, batch_labels):\n", + " device = batch_labels.device\n", + " if self.weights.device != device:\n", + " self.weights = self.weights.to(device)\n", + " last_bin = 1.0 - self.bin_width\n", + " batch_idxs = torch.minimum(batch_labels, torch.ones_like(batch_labels) * last_bin) / self.bin_width # FIXME : double check\n", + " batch_idxs = batch_idxs.floor().long()\n", + " res = 1.0 / self.weights[batch_idxs]\n", + " return res\n", + "\n", + " def __str__(self):\n", + " return f\"LDS nb_bins: {self.nb_bins}\\nbins: {self.bins}\\ndistribution: {self.distribution}\\nlds_distribution: {self.lds_distribution}\\nweights: {self.weights} \"\n", + "\n", + "\n", + "class SurpriseReweighting:\n", + " \"\"\"\n", + " Custom reweighting Yann\n", + " \"\"\"\n", + "\n", + " def __init__(self, weights=None, nb_bins=100, alpha=1e-3):\n", + " if weights is None:\n", + " self.weights = [1.0, ] * nb_bins\n", + " self.weights = torch.tensor(self.weights)\n", + " self.weights = self.weights / torch.sum(self.weights)\n", + " else:\n", + " self.weights = weights\n", + " self.weights = self.weights.detach()\n", + " self.nb_bins = len(self.weights)\n", + " self.bin_width = 1.0 / self.nb_bins\n", + " self.alpha = alpha\n", + " logging.debug(f\"The SR distribution has {self.nb_bins} bins of width {self.bin_width}\")\n", + " logging.debug(f\"Initial self.weights: {self.weights}\")\n", + "\n", + " def update_and_get_weighted_loss(self, batch_labels, unweighted_loss):\n", + " device = batch_labels.device\n", + " if self.weights.device != device:\n", + " logging.debug(f\"Moving SR weights to {device}\")\n", + " self.weights = self.weights.to(device)\n", + " last_bin = 1.0 - self.bin_width\n", + " batch_idxs = torch.minimum(batch_labels, torch.ones_like(batch_labels) * last_bin) / self.bin_width # FIXME : double check\n", + " batch_idxs = batch_idxs.floor().long()\n", + " self.weights = self.weights.detach() # ensure no gradients\n", + " weights = copy.deepcopy(self.weights[batch_idxs])\n", + " res = unweighted_loss * weights\n", + " with torch.no_grad():\n", + " abs_loss = torch.abs(unweighted_loss)\n", + "\n", + " # compute the mean loss per idx\n", + "\n", + " num = torch.zeros(self.nb_bins, device=device)\n", + " num = num.index_add(0, batch_idxs, abs_loss)\n", + " bincount = torch.bincount(batch_idxs, minlength=self.nb_bins)\n", + " div = bincount.float()\n", + " idx_unchanged = bincount == 0\n", + " idx_changed = bincount != 0\n", + " div[idx_unchanged] = 1.0\n", + " mean_loss_per_idx_normalized = num / div\n", + " sum_changed_weights = torch.sum(self.weights[idx_changed])\n", + " sum_mean_loss = torch.sum(mean_loss_per_idx_normalized[idx_changed])\n", + " mean_loss_per_idx_normalized[idx_changed] = mean_loss_per_idx_normalized[idx_changed] * sum_changed_weights / sum_mean_loss\n", + " # logging.debug(f\"old self.weights: {self.weights}\")\n", + " self.weights[idx_changed] = (1.0 - self.alpha) * self.weights[idx_changed] + self.alpha * mean_loss_per_idx_normalized[idx_changed]\n", + " self.weights /= torch.sum(self.weights) # force sum to 1\n", + " # logging.debug(f\"unique_idx: {unique_idx}\")\n", + " # logging.debug(f\"new self.weights: {self.weights}\")\n", + " # logging.debug(f\"new torch.sum(self.weights): {torch.sum(self.weights)}\")\n", + " return torch.sqrt(res * self.nb_bins)\n", + "\n", + " def __str__(self):\n", + " return f\"LDS nb_bins: {self.nb_bins}\\nweights: {self.weights}\"\n", + "\n", + "\n", + "# run:\n", + "\n", + "def generate_dataloader(window_size, fe, seq_len, seq_stride, distribution_mode, batch_size, nb_batch_per_epoch, classification, split_i, network_stride):\n", + " all_subject = pd.read_csv(Path(path_dataset) / subject_list, header=None, delim_whitespace=True).to_numpy()\n", + " test_subject = None\n", + " if PHASE == 'full':\n", + " p1_subject = pd.read_csv(Path(path_dataset) / subject_list_p1, header=None, delim_whitespace=True).to_numpy()\n", + " p2_subject = pd.read_csv(Path(path_dataset) / subject_list_p2, header=None, delim_whitespace=True).to_numpy()\n", + " train_subject_p1, validation_subject_p1 = train_test_split(p1_subject, train_size=0.8, random_state=split_i)\n", + " if TEST_SET:\n", + " test_subject_p1, validation_subject_p1 = train_test_split(validation_subject_p1, train_size=0.5, random_state=split_i)\n", + " train_subject_p2, validation_subject_p2 = train_test_split(p2_subject, train_size=0.8, random_state=split_i)\n", + " if TEST_SET:\n", + " test_subject_p2, validation_subject_p2 = train_test_split(validation_subject_p2, train_size=0.5, random_state=split_i)\n", + " train_subject = np.array([s for s in all_subject if s[0] in train_subject_p1[:, 0] or s[0] in train_subject_p2[:, 0]]).squeeze()\n", + " if TEST_SET:\n", + " test_subject = np.array([s for s in all_subject if s[0] in test_subject_p1[:, 0] or s[0] in test_subject_p2[:, 0]]).squeeze()\n", + " validation_subject = np.array(\n", + " [s for s in all_subject if s[0] in validation_subject_p1[:, 0] or s[0] in validation_subject_p2[:, 0]]).squeeze()\n", + " else:\n", + " train_subject, validation_subject = train_test_split(all_subject, train_size=0.8, random_state=split_i)\n", + " if TEST_SET:\n", + " test_subject, validation_subject = train_test_split(validation_subject, train_size=0.5, random_state=split_i)\n", + " logging.debug(f\"Subjects in training : {train_subject[:, 0]}\")\n", + " logging.debug(f\"Subjects in validation : {validation_subject[:, 0]}\")\n", + " if TEST_SET:\n", + " logging.debug(f\"Subjects in test : {test_subject[:, 0]}\")\n", + "\n", + " len_segment_s = LEN_SEGMENT * fe\n", + " train_loader = None\n", + " validation_loader = None\n", + " test_loader = None\n", + " batch_size_validation = None\n", + " batch_size_test = None\n", + " filename = filename_classification_dataset\n", + "\n", + " if seq_len is not None:\n", + " nb_segment_validation = len(np.hstack([range(int(s[1]), int(s[2])) for s in validation_subject]))\n", + " batch_size_validation = len(list(range(0, (seq_stride // network_stride) * network_stride, network_stride))) * nb_segment_validation\n", + "\n", + " ds_train = SignalDataset(filename=filename,\n", + " path=path_dataset,\n", + " window_size=window_size,\n", + " fe=fe,\n", + " seq_len=seq_len,\n", + " seq_stride=seq_stride,\n", + " list_subject=train_subject,\n", + " len_segment=len_segment_s)\n", + "\n", + " ds_validation = SignalDataset(filename=filename,\n", + " path=path_dataset,\n", + " window_size=window_size,\n", + " fe=fe,\n", + " seq_len=1,\n", + " seq_stride=1, # just to be sure, fixed value\n", + " list_subject=validation_subject,\n", + " len_segment=len_segment_s)\n", + " idx_true, idx_false = get_class_idxs(ds_train, distribution_mode)\n", + " samp_train = RandomSampler(idx_true=idx_true,\n", + " idx_false=idx_false,\n", + " batch_size=batch_size,\n", + " nb_batch=nb_batch_per_epoch,\n", + " distribution_mode=distribution_mode)\n", + "\n", + " samp_validation = ValidationSampler(ds_validation,\n", + " seq_stride=seq_stride,\n", + " len_segment=len_segment_s,\n", + " nb_segment=nb_segment_validation,\n", + " network_stride=network_stride)\n", + " train_loader = DataLoader(ds_train,\n", + " batch_size=batch_size,\n", + " sampler=samp_train,\n", + " shuffle=False,\n", + " num_workers=0,\n", + " pin_memory=True)\n", + "\n", + " validation_loader = DataLoader(ds_validation,\n", + " batch_size=batch_size_validation,\n", + " sampler=samp_validation,\n", + " num_workers=0,\n", + " pin_memory=True,\n", + " shuffle=False)\n", + " else:\n", + " if not TEST_SET:\n", + " test_subject = validation_subject\n", + " nb_segment_test = len(np.hstack([range(int(s[1]), int(s[2])) for s in test_subject]))\n", + " batch_size_test = len(list(range(0, (seq_stride // network_stride) * network_stride, network_stride))) * nb_segment_test\n", + "\n", + " ds_test = SignalDataset(filename=filename,\n", + " path=path_dataset,\n", + " window_size=window_size,\n", + " fe=fe,\n", + " seq_len=1,\n", + " seq_stride=1, # just to be sure, fixed value\n", + " list_subject=test_subject,\n", + " len_segment=len_segment_s)\n", + "\n", + " samp_test = ValidationSampler(ds_test,\n", + " seq_stride=seq_stride,\n", + " len_segment=len_segment_s,\n", + " nb_segment=nb_segment_test,\n", + " network_stride=network_stride)\n", + "\n", + " test_loader = DataLoader(ds_test,\n", + " batch_size=batch_size_test,\n", + " sampler=samp_test,\n", + " num_workers=0,\n", + " pin_memory=True,\n", + " shuffle=False)\n", + "\n", + " return train_loader, validation_loader, batch_size_validation, test_loader, batch_size_test, test_subject\n", + "\n", + "\n", + "def run(config_dict, wandb_project, save_model, unique_name):\n", + " global precision_validation_factor\n", + " global recall_validation_factor\n", + " _t_start = time.time()\n", + " logging.debug(f\"config_dict: {config_dict}\")\n", + " experiment_name = f\"{config_dict['experiment_name']}_{time.time_ns()}\" if unique_name else config_dict['experiment_name']\n", + " nb_epoch_max = config_dict[\"nb_epoch_max\"]\n", + " nb_batch_per_epoch = config_dict[\"nb_batch_per_epoch\"]\n", + " nb_epoch_early_stopping_stop = config_dict[\"nb_epoch_early_stopping_stop\"]\n", + " early_stopping_smoothing_factor = config_dict[\"early_stopping_smoothing_factor\"]\n", + " batch_size = config_dict[\"batch_size\"]\n", + " seq_len = config_dict[\"seq_len\"]\n", + " window_size_s = config_dict[\"window_size_s\"]\n", + " fe = config_dict[\"fe\"]\n", + " seq_stride_s = config_dict[\"seq_stride_s\"]\n", + " lr_adam = config_dict[\"lr_adam\"]\n", + " hidden_size = config_dict[\"hidden_size\"]\n", + " device_val = config_dict[\"device_val\"]\n", + " device_train = config_dict[\"device_train\"]\n", + " max_duration = config_dict[\"max_duration\"]\n", + " nb_rnn_layers = config_dict[\"nb_rnn_layers\"]\n", + " adam_w = config_dict[\"adam_w\"]\n", + " distribution_mode = config_dict[\"distribution_mode\"]\n", + " classification = config_dict[\"classification\"]\n", + " reg_balancing = config_dict[\"reg_balancing\"]\n", + " split_idx = config_dict[\"split_idx\"]\n", + " validation_network_stride = config_dict[\"validation_network_stride\"]\n", + "\n", + " assert reg_balancing in {'none', 'lds', 'sr'}, f\"wrong key: {reg_balancing}\"\n", + " assert classification or distribution_mode == 1, \"distribution_mode must be 1 (no class balancing) in regression mode\"\n", + " balancer_type = 0\n", + " lds = None\n", + " sr = None\n", + " if reg_balancing == 'lds':\n", + " balancer_type = 1\n", + " elif reg_balancing == 'sr':\n", + " balancer_type = 2\n", + "\n", + " window_size = int(window_size_s * fe)\n", + " seq_stride = int(seq_stride_s * fe)\n", + "\n", + " if device_val.startswith(\"cuda\") or device_train.startswith(\"cuda\"):\n", + " assert torch.cuda.is_available(), \"CUDA unavailable\"\n", + "\n", + " logger = LoggerWandb(experiment_name, config_dict, wandb_project)\n", + " torch.seed()\n", + " net = PortiloopNetwork(config_dict).to(device=device_train)\n", + " criterion = nn.MSELoss(reduction='none') if not classification else nn.BCELoss(reduction='none')\n", + " # criterion = nn.MSELoss() if not classification else nn.BCELoss()\n", + " optimizer = optim.AdamW(net.parameters(), lr=lr_adam, weight_decay=adam_w)\n", + "\n", + " first_epoch = 0\n", + " try:\n", + " logger.restore()\n", + " checkpoint = torch.load(path_dataset / experiment_name)\n", + " logging.debug(\"Use checkpoint model\")\n", + " net.load_state_dict(checkpoint['model_state_dict'])\n", + " optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n", + " first_epoch = checkpoint['epoch'] + 1\n", + " recall_validation_factor = checkpoint['recall_validation_factor']\n", + " precision_validation_factor = checkpoint['precision_validation_factor']\n", + " except (ValueError, FileNotFoundError):\n", + " # net = PortiloopNetwork(config_dict).to(device=device_train)\n", + " logging.debug(\"Create new model\")\n", + " net = net.train()\n", + " nb_weights = 0\n", + " for i in net.parameters():\n", + " nb_weights += len(i)\n", + " has_envelope = 1\n", + " if config_dict[\"envelope_input\"]:\n", + " has_envelope = 2\n", + " config_dict[\"estimator_size_memory\"] = nb_weights * window_size * seq_len * batch_size * has_envelope\n", + "\n", + " train_loader, validation_loader, batch_size_validation, _, _, _ = generate_dataloader(window_size, fe, seq_len, seq_stride, distribution_mode,\n", + " batch_size, nb_batch_per_epoch, classification, split_idx,\n", + " validation_network_stride)\n", + " if balancer_type == 1:\n", + " lds = LabelDistributionSmoothing(c=1.0, dataset=train_loader.dataset, weights=None, kernel_size=5, kernel_std=0.01, nb_bins=100,\n", + " weighting_mode='inv_sqrt')\n", + " elif balancer_type == 2:\n", + " sr = SurpriseReweighting(weights=None, nb_bins=100, alpha=1e-3)\n", + "\n", + " best_model_accuracy = 0\n", + " best_epoch = 0\n", + " best_model = None\n", + " best_loss_early_stopping = 1\n", + " best_epoch_early_stopping = 0\n", + " best_model_precision_validation = 0\n", + " best_model_f1_score_validation = 0\n", + " best_model_recall_validation = 0\n", + " best_model_loss_validation = 1\n", + "\n", + " best_model_on_loss_accuracy = 0\n", + " best_model_on_loss_precision_validation = 0\n", + " best_model_on_loss_f1_score_validation = 0\n", + " best_model_on_loss_recall_validation = 0\n", + " best_model_on_loss_loss_validation = 1\n", + "\n", + " accuracy_train = None\n", + " loss_train = None\n", + "\n", + " early_stopping_counter = 0\n", + " loss_early_stopping = None\n", + " h1_zero = torch.zeros((nb_rnn_layers, batch_size, hidden_size), device=device_train)\n", + " h2_zero = torch.zeros((nb_rnn_layers, batch_size, hidden_size), device=device_train)\n", + " for epoch in range(first_epoch, first_epoch + nb_epoch_max):\n", + "\n", + " logging.debug(f\"epoch: {epoch}\")\n", + "\n", + " n = 0\n", + " if epoch > -1:\n", + " accuracy_train = 0\n", + " loss_train = 0\n", + " _t_start = time.time()\n", + " for batch_data in train_loader:\n", + " batch_samples_input1, batch_samples_input2, batch_samples_input3, batch_labels = batch_data\n", + " batch_samples_input1 = batch_samples_input1.to(device=device_train).float()\n", + " batch_samples_input2 = batch_samples_input2.to(device=device_train).float()\n", + " batch_samples_input3 = batch_samples_input3.to(device=device_train).float()\n", + " batch_labels = batch_labels.to(device=device_train).float()\n", + "\n", + " optimizer.zero_grad()\n", + " if classification:\n", + " batch_labels = (batch_labels > THRESHOLD)\n", + " batch_labels = batch_labels.float()\n", + "\n", + " output, _, _, _ = net(batch_samples_input1, batch_samples_input2, batch_samples_input3, h1_zero, h2_zero)\n", + "\n", + " output = output.view(-1)\n", + "\n", + " loss = criterion(output, batch_labels)\n", + "\n", + " if balancer_type == 1:\n", + " batch_weights = lds.lds_weights_batch(batch_labels)\n", + " loss = loss * batch_weights\n", + " error = batch_weights.isinf().any().item() or batch_weights.isnan().any().item() or torch.isnan(\n", + " loss).any().item() or torch.isinf(loss).any().item()\n", + " if error:\n", + " logging.debug(f\"batch_labels: {batch_labels}\")\n", + " logging.debug(f\"batch_weights: {batch_weights}\")\n", + " logging.debug(f\"loss: {loss}\")\n", + " logging.debug(f\"LDS: {lds}\")\n", + " assert False, \"loss is nan or inf\"\n", + " elif balancer_type == 2:\n", + " loss = sr.update_and_get_weighted_loss(batch_labels=batch_labels, unweighted_loss=loss)\n", + " error = torch.isnan(loss).any().item() or torch.isinf(loss).any().item()\n", + " if error:\n", + " logging.debug(f\"batch_labels: {batch_labels}\")\n", + " logging.debug(f\"loss: {loss}\")\n", + " logging.debug(f\"SR: {sr}\")\n", + " assert False, \"loss is nan or inf\"\n", + "\n", + " loss = loss.mean()\n", + "\n", + " loss_train += loss.item()\n", + " loss.backward()\n", + " optimizer.step()\n", + "\n", + " if not classification:\n", + " output = (output > THRESHOLD)\n", + " batch_labels = (batch_labels > THRESHOLD)\n", + " else:\n", + " output = (output >= 0.5)\n", + " accuracy_train += (output == batch_labels).float().mean()\n", + " n += 1\n", + " _t_stop = time.time()\n", + " logging.debug(f\"Training time for 1 epoch : {_t_stop - _t_start} s\")\n", + " accuracy_train /= n\n", + " loss_train /= n\n", + "\n", + " _t_start = time.time()\n", + " output_validation, labels_validation, loss_validation, accuracy_validation, tp, tn, fp, fn = run_inference(validation_loader, criterion, net,\n", + " device_val, hidden_size,\n", + " nb_rnn_layers, classification,\n", + " batch_size_validation)\n", + " f1_validation, precision_validation, recall_validation = get_metrics(tp, fp, fn)\n", + "\n", + " _t_stop = time.time()\n", + " logging.debug(f\"Validation time for 1 epoch : {_t_stop - _t_start} s\")\n", + "\n", + " recall_validation_factor = recall_validation\n", + " precision_validation_factor = precision_validation\n", + " updated_model = False\n", + " if (not MAXIMIZE_F1_SCORE and loss_validation < best_model_loss_validation) or (\n", + " MAXIMIZE_F1_SCORE and f1_validation > best_model_f1_score_validation):\n", + " best_model = copy.deepcopy(net)\n", + " best_epoch = epoch\n", + " # torch.save(best_model.state_dict(), path_dataset / experiment_name, _use_new_zipfile_serialization=False)\n", + " if save_model:\n", + " torch.save({\n", + " 'epoch': epoch,\n", + " 'model_state_dict': best_model.state_dict(),\n", + " 'optimizer_state_dict': optimizer.state_dict(),\n", + " 'recall_validation_factor': recall_validation_factor,\n", + " 'precision_validation_factor': precision_validation_factor,\n", + " }, path_dataset / experiment_name, _use_new_zipfile_serialization=False)\n", + " updated_model = True\n", + " best_model_f1_score_validation = f1_validation\n", + " best_model_precision_validation = precision_validation\n", + " best_model_recall_validation = recall_validation\n", + " best_model_loss_validation = loss_validation\n", + " best_model_accuracy = accuracy_validation\n", + " if loss_validation < best_model_on_loss_loss_validation:\n", + " best_model = copy.deepcopy(net)\n", + " best_epoch = epoch\n", + " # torch.save(best_model.state_dict(), path_dataset / experiment_name, _use_new_zipfile_serialization=False)\n", + " if save_model:\n", + " torch.save({\n", + " 'epoch': epoch,\n", + " 'model_state_dict': best_model.state_dict(),\n", + " 'optimizer_state_dict': optimizer.state_dict(),\n", + " 'recall_validation_factor': recall_validation_factor,\n", + " 'precision_validation_factor': precision_validation_factor,\n", + " }, path_dataset / (experiment_name + \"_on_loss\"), _use_new_zipfile_serialization=False)\n", + " updated_model = True\n", + " best_model_on_loss_f1_score_validation = f1_validation\n", + " best_model_on_loss_precision_validation = precision_validation\n", + " best_model_on_loss_recall_validation = recall_validation\n", + " best_model_on_loss_loss_validation = loss_validation\n", + " best_model_on_loss_accuracy = accuracy_validation\n", + "\n", + " loss_early_stopping = loss_validation if loss_early_stopping is None and early_stopping_smoothing_factor == 1 else loss_validation if loss_early_stopping is None else loss_validation * early_stopping_smoothing_factor + loss_early_stopping * (\n", + " 1.0 - early_stopping_smoothing_factor)\n", + "\n", + " if loss_early_stopping < best_loss_early_stopping:\n", + " best_loss_early_stopping = loss_early_stopping\n", + " early_stopping_counter = 0\n", + " best_epoch_early_stopping = epoch\n", + " else:\n", + " early_stopping_counter += 1\n", + "\n", + " logger.log(accuracy_train=accuracy_train,\n", + " loss_train=loss_train,\n", + " accuracy_validation=accuracy_validation,\n", + " loss_validation=loss_validation,\n", + " f1_validation=f1_validation,\n", + " precision_validation=precision_validation,\n", + " recall_validation=recall_validation,\n", + " best_epoch=best_epoch,\n", + " best_model=best_model,\n", + " loss_early_stopping=loss_early_stopping,\n", + " best_epoch_early_stopping=best_epoch_early_stopping,\n", + " best_model_accuracy_validation=best_model_accuracy,\n", + " best_model_f1_score_validation=best_model_f1_score_validation,\n", + " best_model_precision_validation=best_model_precision_validation,\n", + " best_model_recall_validation=best_model_recall_validation,\n", + " best_model_loss_validation=best_model_loss_validation,\n", + " best_model_on_loss_accuracy_validation=best_model_on_loss_accuracy,\n", + " best_model_on_loss_f1_score_validation=best_model_on_loss_f1_score_validation,\n", + " best_model_on_loss_precision_validation=best_model_on_loss_precision_validation,\n", + " best_model_on_loss_recall_validation=best_model_on_loss_recall_validation,\n", + " best_model_on_loss_loss_validation=best_model_on_loss_loss_validation,\n", + " updated_model=updated_model)\n", + "\n", + " if early_stopping_counter > nb_epoch_early_stopping_stop or time.time() - _t_start > max_duration:\n", + " logging.debug(\"Early stopping.\")\n", + " break\n", + " logging.debug(\"Delete logger\")\n", + " del logger\n", + " logging.debug(\"Logger deleted\")\n", + " return best_model_loss_validation, best_model_f1_score_validation, best_epoch_early_stopping\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# all classes and functions:\n", + "\n", + "class SignalDataset(Dataset):\n", + " def __init__(self, filename, path, window_size, fe, seq_len, seq_stride, list_subject, len_segment):\n", + " self.fe = fe\n", + " self.window_size = window_size\n", + " self.path_file = Path(path) / filename\n", + "\n", + " self.data = pd.read_csv(self.path_file, header=None).to_numpy()\n", + " assert list_subject is not None\n", + " used_sequence = np.hstack([range(int(s[1]), int(s[2])) for s in list_subject])\n", + " split_data = np.array(np.split(self.data, int(len(self.data) / (len_segment + 30 * fe)))) # 115+30 = nb seconds per sequence in the dataset\n", + " split_data = split_data[used_sequence]\n", + " self.data = np.transpose(split_data.reshape((split_data.shape[0] * split_data.shape[1], 4)))\n", + "\n", + " assert self.window_size <= len(self.data[0]), \"Dataset smaller than window size.\"\n", + " self.full_signal = torch.tensor(self.data[0], dtype=torch.float)\n", + " self.full_envelope = torch.tensor(self.data[1], dtype=torch.float)\n", + " self.seq_len = seq_len # 1 means single sample / no sequence ?\n", + " self.idx_stride = seq_stride\n", + " self.past_signal_len = self.seq_len * self.idx_stride\n", + "\n", + " # list of indices that can be sampled:\n", + " self.indices = [idx for idx in range(len(self.data[0]) - self.window_size) # all possible idxs in the dataset\n", + " if not (self.data[3][idx + self.window_size - 1] < 0 # that are not ending in an unlabeled zone\n", + " or idx < self.past_signal_len)] # and far enough from the beginning to build a sequence up to here\n", + " total_spindles = np.sum(self.data[3] > THRESHOLD)\n", + " logging.debug(f\"nb total of spindles in this dataset : {total_spindles}\")\n", + "\n", + " def __len__(self):\n", + " return len(self.indices)\n", + "\n", + " def __getitem__(self, idx):\n", + " assert 0 <= idx < len(self), f\"Index out of range ({idx}/{len(self)}).\"\n", + " idx = self.indices[idx]\n", + " assert self.data[3][idx + self.window_size - 1] >= 0, f\"Bad index: {idx}.\"\n", + "\n", + " signal_seq = self.full_signal[idx - (self.past_signal_len - self.idx_stride):idx + self.window_size].unfold(0, self.window_size,\n", + " self.idx_stride)\n", + " envelope_seq = self.full_envelope[idx - (self.past_signal_len - self.idx_stride):idx + self.window_size].unfold(0, self.window_size,\n", + " self.idx_stride)\n", + "\n", + " ratio_pf = torch.tensor(self.data[2][idx + self.window_size - 1], dtype=torch.float)\n", + " label = torch.tensor(self.data[3][idx + self.window_size - 1], dtype=torch.float)\n", + "\n", + " return signal_seq, envelope_seq, ratio_pf, label\n", + "\n", + " def is_spindle(self, idx):\n", + " assert 0 <= idx <= len(self), f\"Index out of range ({idx}/{len(self)}).\"\n", + " idx = self.indices[idx]\n", + " return True if (self.data[3][idx + self.window_size - 1] > THRESHOLD) else False\n", + "\n", + "\n", + "def get_class_idxs(dataset, distribution_mode):\n", + " \"\"\"\n", + " Directly outputs idx_true and idx_false arrays\n", + " \"\"\"\n", + " length_dataset = len(dataset)\n", + "\n", + " nb_true = 0\n", + " nb_false = 0\n", + "\n", + " idx_true = []\n", + " idx_false = []\n", + "\n", + " for i in range(length_dataset):\n", + " is_spindle = dataset.is_spindle(i)\n", + " if is_spindle or distribution_mode == 1:\n", + " nb_true += 1\n", + " idx_true.append(i)\n", + " else:\n", + " nb_false += 1\n", + " idx_false.append(i)\n", + "\n", + " assert len(dataset) == nb_true + nb_false, f\"Bad length dataset\"\n", + "\n", + " return np.array(idx_true), np.array(idx_false)\n", + "\n", + "\n", + "# Sampler avec liste et sans rand liste\n", + "\n", + "class RandomSampler(Sampler):\n", + " \"\"\"\n", + " Samples elements randomly and evenly between the two classes.\n", + " The sampling happens WITH replacement.\n", + " __iter__ stops after an arbitrary number of iterations = batch_size_list * nb_batch\n", + " Arguments:\n", + " idx_true: np.array\n", + " idx_false: np.array\n", + " batch_size (int)\n", + " nb_batch (int, optional): number of iteration before end of __iter__(), this defaults to len(data_source)\n", + " \"\"\"\n", + "\n", + " def __init__(self, idx_true, idx_false, batch_size, distribution_mode, nb_batch):\n", + " self.idx_true = idx_true\n", + " self.idx_false = idx_false\n", + " self.nb_true = self.idx_true.size\n", + " self.nb_false = self.idx_false.size\n", + " self.length = nb_batch * batch_size\n", + " self.distribution_mode = distribution_mode\n", + "\n", + " def __iter__(self):\n", + " global precision_validation_factor\n", + " global recall_validation_factor\n", + " cur_iter = 0\n", + " seed()\n", + " # epsilon = 1e-7 proba = float(0.5 + 0.5 * (precision_validation_factor - recall_validation_factor) / (precision_validation_factor +\n", + " # recall_validation_factor + epsilon))\n", + " proba = 0.5\n", + " if self.distribution_mode == 1:\n", + " proba = 1\n", + " logging.debug(f\"proba: {proba}\")\n", + "\n", + " while cur_iter < self.length:\n", + " cur_iter += 1\n", + " sample_class = np.random.choice([0, 1], p=[1 - proba, proba])\n", + " if sample_class: # sample true\n", + " idx_file = randint(0, self.nb_true - 1)\n", + " idx_res = self.idx_true[idx_file]\n", + " else: # sample false\n", + " idx_file = randint(0, self.nb_false - 1)\n", + " idx_res = self.idx_false[idx_file]\n", + "\n", + " yield idx_res\n", + "\n", + " def __len__(self):\n", + " return self.length\n", + "\n", + "\n", + "# Sampler validation\n", + "\n", + "class ValidationSampler(Sampler):\n", + " \"\"\"\n", + " __iter__ stops after an arbitrary number of iterations = batch_size_list * nb_batch\n", + " network_stride (int >= 1, default: 1): divides the size of the dataset (and of the batch) by striding further than 1\n", + " \"\"\"\n", + "\n", + " def __init__(self, data_source, seq_stride, nb_segment, len_segment, network_stride):\n", + " network_stride = int(network_stride)\n", + " assert network_stride >= 1\n", + " self.network_stride = network_stride\n", + " self.seq_stride = seq_stride\n", + " self.data = data_source\n", + " self.nb_segment = nb_segment\n", + " self.len_segment = len_segment\n", + "\n", + " def __iter__(self):\n", + " seed()\n", + " batches_per_segment = self.len_segment // self.seq_stride # len sequence = 115 s + add the 15 first s?\n", + " cursor_batch = 0\n", + " while cursor_batch < batches_per_segment:\n", + " for i in range(self.nb_segment):\n", + " for j in range(0, (self.seq_stride//self.network_stride)*self.network_stride, self.network_stride):\n", + " cur_idx = i * self.len_segment + j + cursor_batch * self.seq_stride\n", + " yield cur_idx\n", + " cursor_batch += 1\n", + "\n", + " def __len__(self):\n", + " assert False\n", + " # return len(self.data)\n", + " # return len(self.data_source)\n", + "\n", + "\n", + "class ConvPoolModule(nn.Module):\n", + " def __init__(self,\n", + " in_channels,\n", + " out_channel,\n", + " kernel_conv,\n", + " stride_conv,\n", + " conv_padding,\n", + " dilation_conv,\n", + " kernel_pool,\n", + " stride_pool,\n", + " pool_padding,\n", + " dilation_pool,\n", + " dropout_p):\n", + " super(ConvPoolModule, self).__init__()\n", + "\n", + " self.conv = nn.Conv1d(in_channels=in_channels,\n", + " out_channels=out_channel,\n", + " kernel_size=kernel_conv,\n", + " stride=stride_conv,\n", + " padding=conv_padding,\n", + " dilation=dilation_conv)\n", + " self.pool = nn.MaxPool1d(kernel_size=kernel_pool,\n", + " stride=stride_pool,\n", + " padding=pool_padding,\n", + " dilation=dilation_pool)\n", + " self.dropout = nn.Dropout(dropout_p)\n", + "\n", + " def forward(self, input_f):\n", + " x, max_value = input_f\n", + " x = F.relu(self.conv(x))\n", + " x = self.pool(x)\n", + " max_temp = torch.max(abs(x))\n", + " if max_temp > max_value:\n", + " logging.debug(f\"max_value = {max_temp}\")\n", + " max_value = max_temp\n", + " return self.dropout(x), max_value\n", + "\n", + "\n", + "class FcModule(nn.Module):\n", + " def __init__(self,\n", + " in_features,\n", + " out_features,\n", + " dropout_p):\n", + " super(FcModule, self).__init__()\n", + "\n", + " self.fc = nn.Linear(in_features=in_features, out_features=out_features)\n", + " self.dropout = nn.Dropout(dropout_p)\n", + "\n", + " def forward(self, x):\n", + " x = F.relu(self.fc(x))\n", + " return self.dropout(x)\n", + "\n", + "\n", + "class PortiloopNetwork(nn.Module):\n", + " def __init__(self, c_dict):\n", + " super(PortiloopNetwork, self).__init__()\n", + "\n", + " RNN = c_dict[\"RNN\"]\n", + " stride_pool = c_dict[\"stride_pool\"]\n", + " stride_conv = c_dict[\"stride_conv\"]\n", + " kernel_conv = c_dict[\"kernel_conv\"]\n", + " kernel_pool = c_dict[\"kernel_pool\"]\n", + " nb_channel = c_dict[\"nb_channel\"]\n", + " hidden_size = c_dict[\"hidden_size\"]\n", + " window_size_s = c_dict[\"window_size_s\"]\n", + " dropout_p = c_dict[\"dropout\"]\n", + " dilation_conv = c_dict[\"dilation_conv\"]\n", + " dilation_pool = c_dict[\"dilation_pool\"]\n", + " fe = c_dict[\"fe\"]\n", + " nb_conv_layers = c_dict[\"nb_conv_layers\"]\n", + " nb_rnn_layers = c_dict[\"nb_rnn_layers\"]\n", + " first_layer_dropout = c_dict[\"first_layer_dropout\"]\n", + " self.envelope_input = c_dict[\"envelope_input\"]\n", + " self.power_features_input = c_dict[\"power_features_input\"]\n", + " self.classification = c_dict[\"classification\"]\n", + "\n", + " conv_padding = 0 # int(kernel_conv // 2)\n", + " pool_padding = 0 # int(kernel_pool // 2)\n", + " window_size = int(window_size_s * fe)\n", + " nb_out = window_size\n", + "\n", + " for _ in range(nb_conv_layers):\n", + " nb_out = out_dim(nb_out, conv_padding, dilation_conv, kernel_conv, stride_conv)\n", + " nb_out = out_dim(nb_out, pool_padding, dilation_pool, kernel_pool, stride_pool)\n", + "\n", + " output_cnn_size = int(nb_channel * nb_out)\n", + "\n", + " self.RNN = RNN\n", + " self.first_layer_input1 = ConvPoolModule(in_channels=1,\n", + " out_channel=nb_channel,\n", + " kernel_conv=kernel_conv,\n", + " stride_conv=stride_conv,\n", + " conv_padding=conv_padding,\n", + " dilation_conv=dilation_conv,\n", + " kernel_pool=kernel_pool,\n", + " stride_pool=stride_pool,\n", + " pool_padding=pool_padding,\n", + " dilation_pool=dilation_pool,\n", + " dropout_p=dropout_p if first_layer_dropout else 0)\n", + " self.seq_input1 = nn.Sequential(*(ConvPoolModule(in_channels=nb_channel,\n", + " out_channel=nb_channel,\n", + " kernel_conv=kernel_conv,\n", + " stride_conv=stride_conv,\n", + " conv_padding=conv_padding,\n", + " dilation_conv=dilation_conv,\n", + " kernel_pool=kernel_pool,\n", + " stride_pool=stride_pool,\n", + " pool_padding=pool_padding,\n", + " dilation_pool=dilation_pool,\n", + " dropout_p=dropout_p) for _ in range(nb_conv_layers - 1)))\n", + " if RNN:\n", + " self.gru_input1 = nn.GRU(input_size=output_cnn_size,\n", + " hidden_size=hidden_size,\n", + " num_layers=nb_rnn_layers,\n", + " dropout=0,\n", + " batch_first=True)\n", + " # fc_size = hidden_size\n", + " else:\n", + " self.first_fc_input1 = FcModule(in_features=output_cnn_size, out_features=hidden_size, dropout_p=dropout_p)\n", + " self.seq_fc_input1 = nn.Sequential(\n", + " *(FcModule(in_features=hidden_size, out_features=hidden_size, dropout_p=dropout_p) for _ in range(nb_rnn_layers - 1)))\n", + " if self.envelope_input:\n", + " self.first_layer_input2 = ConvPoolModule(in_channels=1,\n", + " out_channel=nb_channel,\n", + " kernel_conv=kernel_conv,\n", + " stride_conv=stride_conv,\n", + " conv_padding=conv_padding,\n", + " dilation_conv=dilation_conv,\n", + " kernel_pool=kernel_pool,\n", + " stride_pool=stride_pool,\n", + " pool_padding=pool_padding,\n", + " dilation_pool=dilation_pool,\n", + " dropout_p=dropout_p if first_layer_dropout else 0)\n", + " self.seq_input2 = nn.Sequential(*(ConvPoolModule(in_channels=nb_channel,\n", + " out_channel=nb_channel,\n", + " kernel_conv=kernel_conv,\n", + " stride_conv=stride_conv,\n", + " conv_padding=conv_padding,\n", + " dilation_conv=dilation_conv,\n", + " kernel_pool=kernel_pool,\n", + " stride_pool=stride_pool,\n", + " pool_padding=pool_padding,\n", + " dilation_pool=dilation_pool,\n", + " dropout_p=dropout_p) for _ in range(nb_conv_layers - 1)))\n", + "\n", + " if RNN:\n", + " self.gru_input2 = nn.GRU(input_size=output_cnn_size,\n", + " hidden_size=hidden_size,\n", + " num_layers=nb_rnn_layers,\n", + " dropout=0,\n", + " batch_first=True)\n", + " else:\n", + " self.first_fc_input2 = FcModule(in_features=output_cnn_size, out_features=hidden_size, dropout_p=dropout_p)\n", + " self.seq_fc_input2 = nn.Sequential(\n", + " *(FcModule(in_features=hidden_size, out_features=hidden_size, dropout_p=dropout_p) for _ in range(nb_rnn_layers - 1)))\n", + " fc_features = 0\n", + " fc_features += hidden_size\n", + " if self.envelope_input:\n", + " fc_features += hidden_size\n", + " if self.power_features_input:\n", + " fc_features += 1\n", + " out_features = 1\n", + " self.fc = nn.Linear(in_features=fc_features, # enveloppe and signal + power features ratio\n", + " out_features=out_features) # probability of being a spindle\n", + "\n", + " def forward(self, x1, x2, x3, h1, h2, max_value=np.inf):\n", + " (batch_size, sequence_len, features) = x1.shape\n", + "\n", + " if ABLATION == 1:\n", + " x1 = copy.deepcopy(x2)\n", + " elif ABLATION == 2:\n", + " x2 = copy.deepcopy(x1)\n", + "\n", + " x1 = x1.view(-1, 1, features)\n", + " x1, max_value = self.first_layer_input1((x1, max_value))\n", + " x1, max_value = self.seq_input1((x1, max_value))\n", + "\n", + " x1 = torch.flatten(x1, start_dim=1, end_dim=-1)\n", + " hn1 = None\n", + " if self.RNN:\n", + " x1 = x1.view(batch_size, sequence_len, -1)\n", + " x1, hn1 = self.gru_input1(x1, h1)\n", + " max_temp = torch.max(abs(x1))\n", + " if max_temp > max_value:\n", + " logging.debug(f\"max_value = {max_temp}\")\n", + " max_value = max_temp\n", + " x1 = x1[:, -1, :]\n", + " else:\n", + " x1 = self.first_fc_input1(x1)\n", + " x1 = self.seq_fc_input1(x1)\n", + " x = x1\n", + " hn2 = None\n", + " if self.envelope_input:\n", + " x2 = x2.view(-1, 1, features)\n", + " x2, max_value = self.first_layer_input2((x2, max_value))\n", + " x2, max_value = self.seq_input2((x2, max_value))\n", + "\n", + " x2 = torch.flatten(x2, start_dim=1, end_dim=-1)\n", + " if self.RNN:\n", + " x2 = x2.view(batch_size, sequence_len, -1)\n", + " x2, hn2 = self.gru_input2(x2, h2)\n", + " max_temp = torch.max(abs(x2))\n", + " if max_temp > max_value:\n", + " logging.debug(f\"max_value = {max_temp}\")\n", + " max_value = max_temp\n", + " x2 = x2[:, -1, :]\n", + " else:\n", + " x2 = self.first_fc_input2(x2)\n", + " x2 = self.seq_fc_input2(x2)\n", + " x = torch.cat((x, x2), -1)\n", + "\n", + " if self.power_features_input:\n", + " x3 = x3.view(-1, 1)\n", + " x = torch.cat((x, x3), -1)\n", + "\n", + " x = self.fc(x) # output size: 1\n", + " max_temp = torch.max(abs(x))\n", + " if max_temp > max_value:\n", + " logging.debug(f\"max_value = {max_temp}\")\n", + " max_value = max_temp\n", + " x = torch.sigmoid(x)\n", + "\n", + " return x, hn1, hn2, max_value\n", + "\n", + "\n", + "class LoggerWandb:\n", + " def __init__(self, experiment_name, c_dict, project_name):\n", + " self.best_model = None\n", + " self.experiment_name = experiment_name\n", + " os.environ['WANDB_API_KEY'] = \"cd105554ccdfeee0bbe69c175ba0c14ed41f6e00\"\n", + " self.wandb_run = wandb.init(project=project_name, entity=\"portiloop\", id=experiment_name, resume=\"allow\",\n", + " config=c_dict, reinit=True)\n", + "\n", + " def log(self,\n", + " accuracy_train,\n", + " loss_train,\n", + " accuracy_validation,\n", + " loss_validation,\n", + " f1_validation,\n", + " precision_validation,\n", + " recall_validation,\n", + " best_epoch,\n", + " best_model,\n", + " loss_early_stopping,\n", + " best_epoch_early_stopping,\n", + " best_model_accuracy_validation,\n", + " best_model_f1_score_validation,\n", + " best_model_precision_validation,\n", + " best_model_recall_validation,\n", + " best_model_loss_validation,\n", + " best_model_on_loss_accuracy_validation,\n", + " best_model_on_loss_f1_score_validation,\n", + " best_model_on_loss_precision_validation,\n", + " best_model_on_loss_recall_validation,\n", + " best_model_on_loss_loss_validation,\n", + " updated_model=False,\n", + " ):\n", + " self.best_model = best_model\n", + " self.wandb_run.log({\n", + " \"accuracy_train\": accuracy_train,\n", + " \"loss_train\": loss_train,\n", + " \"accuracy_validation\": accuracy_validation,\n", + " \"loss_validation\": loss_validation,\n", + " \"f1_validation\": f1_validation,\n", + " \"precision_validation\": precision_validation,\n", + " \"recall_validation\": recall_validation,\n", + " \"loss_early_stopping\": loss_early_stopping,\n", + " })\n", + " self.wandb_run.summary[\"best_epoch\"] = best_epoch\n", + " self.wandb_run.summary[\"best_epoch_early_stopping\"] = best_epoch_early_stopping\n", + " self.wandb_run.summary[\"best_model_f1_score_validation\"] = best_model_f1_score_validation\n", + " self.wandb_run.summary[\"best_model_precision_validation\"] = best_model_precision_validation\n", + " self.wandb_run.summary[\"best_model_recall_validation\"] = best_model_recall_validation\n", + " self.wandb_run.summary[\"best_model_loss_validation\"] = best_model_loss_validation\n", + " self.wandb_run.summary[\"best_model_accuracy_validation\"] = best_model_accuracy_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_f1_score_validation\"] = best_model_on_loss_f1_score_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_precision_validation\"] = best_model_on_loss_precision_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_recall_validation\"] = best_model_on_loss_recall_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_loss_validation\"] = best_model_on_loss_loss_validation\n", + " self.wandb_run.summary[\"best_model_on_loss_accuracy_validation\"] = best_model_on_loss_accuracy_validation\n", + " if updated_model:\n", + " self.wandb_run.save(os.path.join(path_dataset, self.experiment_name), policy=\"live\", base_path=path_dataset)\n", + " self.wandb_run.save(os.path.join(path_dataset, self.experiment_name + \"_on_loss\"), policy=\"live\", base_path=path_dataset)\n", + "\n", + " def __del__(self):\n", + " self.wandb_run.finish()\n", + "\n", + " def restore(self):\n", + " self.wandb_run.restore(self.experiment_name, root=path_dataset)\n", + "\n", + "\n", + "def f1_loss(output, batch_labels):\n", + " # logging.debug(f\"output in loss : {output[:,1]}\")\n", + " # logging.debug(f\"batch_labels in loss : {batch_labels}\")\n", + " y_pred = output\n", + " tp = (batch_labels * y_pred).sum().to(torch.float32)\n", + " tn = ((1 - batch_labels) * (1 - y_pred)).sum().to(torch.float32).item()\n", + " fp = ((1 - batch_labels) * y_pred).sum().to(torch.float32)\n", + " fn = (batch_labels * (1 - y_pred)).sum().to(torch.float32)\n", + "\n", + " epsilon = 1e-7\n", + " F1_class1 = 2 * tp / (2 * tp + fp + fn + epsilon)\n", + " F1_class0 = 2 * tn / (2 * tn + fn + fp + epsilon)\n", + " New_F1 = (F1_class1 + F1_class0) / 2\n", + " return 1 - New_F1\n", + "\n", + "\n", + "def run_inference(dataloader, criterion, net, device, hidden_size, nb_rnn_layers, classification, batch_size_validation, max_value=np.inf):\n", + " net_copy = copy.deepcopy(net)\n", + " net_copy = net_copy.to(device)\n", + " net_copy = net_copy.eval()\n", + " loss = 0\n", + " n = 0\n", + " batch_labels_total = torch.tensor([], device=device)\n", + " output_total = torch.tensor([], device=device)\n", + " h1 = torch.zeros((nb_rnn_layers, batch_size_validation, hidden_size), device=device)\n", + " h2 = torch.zeros((nb_rnn_layers, batch_size_validation, hidden_size), device=device)\n", + " with torch.no_grad():\n", + " for batch_data in dataloader:\n", + " batch_samples_input1, batch_samples_input2, batch_samples_input3, batch_labels = batch_data\n", + " batch_samples_input1 = batch_samples_input1.to(device=device).float()\n", + " batch_samples_input2 = batch_samples_input2.to(device=device).float()\n", + " batch_samples_input3 = batch_samples_input3.to(device=device).float()\n", + " batch_labels = batch_labels.to(device=device).float()\n", + " if classification:\n", + " batch_labels = (batch_labels > THRESHOLD)\n", + " batch_labels = batch_labels.float()\n", + " output, h1, h2, max_value = net_copy(batch_samples_input1, batch_samples_input2, batch_samples_input3, h1, h2, max_value)\n", + " # logging.debug(f\"label = {batch_labels}\")\n", + " # logging.debug(f\"output = {output}\")\n", + " output = output.view(-1)\n", + " loss_py = criterion(output, batch_labels).mean()\n", + " loss += loss_py.item()\n", + " # logging.debug(f\"loss = {loss}\")\n", + " # if not classification:\n", + " # output = (output > THRESHOLD)\n", + " # batch_labels = (batch_labels > THRESHOLD)\n", + " # else:\n", + " # output = (output >= 0.5)\n", + " batch_labels_total = torch.cat([batch_labels_total, batch_labels])\n", + " output_total = torch.cat([output_total, output])\n", + " # logging.debug(f\"batch_label_total : {batch_labels_total}\")\n", + " # logging.debug(f\"output_total : {output_total}\")\n", + " n += 1\n", + "\n", + " loss /= n\n", + " acc = (output_total == batch_labels_total).float().mean()\n", + " output_total = output_total.float()\n", + " batch_labels_total = batch_labels_total.float()\n", + " tp = (batch_labels_total * output_total)\n", + " tn = ((1 - batch_labels_total) * (1 - output_total))\n", + " fp = ((1 - batch_labels_total) * output_total)\n", + " fn = (batch_labels_total * (1 - output_total))\n", + " return output_total, batch_labels_total, loss, acc, tp, tn, fp, fn\n", + "\n", + "\n", + "def get_metrics(tp, fp, fn):\n", + " tp_sum = tp.sum().to(torch.float32).item()\n", + " fp_sum = fp.sum().to(torch.float32).item()\n", + " fn_sum = fn.sum().to(torch.float32).item()\n", + " epsilon = 1e-7\n", + "\n", + " precision = tp_sum / (tp_sum + fp_sum + epsilon)\n", + " recall = tp_sum / (tp_sum + fn_sum + epsilon)\n", + "\n", + " f1 = 2 * (precision * recall) / (precision + recall + epsilon)\n", + "\n", + " return f1, precision, recall\n", + "\n", + "\n", + "# Regression balancing:\n", + "\n", + "\n", + "def get_lds_kernel(ks, sigma):\n", + " half_ks = (ks - 1) // 2\n", + " base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks\n", + " kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) / max(gaussian_filter1d(base_kernel, sigma=sigma))\n", + " return kernel_window\n", + "\n", + "\n", + "def generate_label_distribution_and_lds(dataset, kernel_size=5, kernel_std=2.0, nb_bins=100, reweight='inv_sqrt'):\n", + " \"\"\"\n", + " Returns:\n", + " distribution: the distribution of labels in the dataset\n", + " lds: the same distribution, smoothed with a gaussian kernel\n", + " \"\"\"\n", + "\n", + " weights = torch.tensor([0.3252, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0069, 0.0163,\n", + " 0.0000, 0.0366, 0.0000, 0.0179, 0.0000, 0.0076, 0.0444, 0.0176, 0.0025,\n", + " 0.0056, 0.0000, 0.0416, 0.0039, 0.0000, 0.0000, 0.0000, 0.0171, 0.0000,\n", + " 0.0000, 0.0042, 0.0114, 0.0209, 0.0023, 0.0036, 0.0106, 0.0241, 0.0034,\n", + " 0.0000, 0.0056, 0.0000, 0.0029, 0.0241, 0.0076, 0.0027, 0.0012, 0.0000,\n", + " 0.0166, 0.0028, 0.0000, 0.0000, 0.0000, 0.0197, 0.0000, 0.0000, 0.0021,\n", + " 0.0054, 0.0191, 0.0014, 0.0023, 0.0074, 0.0000, 0.0186, 0.0000, 0.0088,\n", + " 0.0000, 0.0032, 0.0135, 0.0069, 0.0029, 0.0016, 0.0164, 0.0068, 0.0022,\n", + " 0.0000, 0.0000, 0.0000, 0.0191, 0.0000, 0.0000, 0.0017, 0.0082, 0.0181,\n", + " 0.0019, 0.0038, 0.0064, 0.0000, 0.0133, 0.0000, 0.0069, 0.0000, 0.0025,\n", + " 0.0186, 0.0076, 0.0031, 0.0016, 0.0218, 0.0105, 0.0049, 0.0000, 0.0000,\n", + " 0.0246], dtype=torch.float64)\n", + "\n", + " lds = None\n", + " dist = None\n", + " bins = None\n", + " return weights, dist, lds, bins\n", + "\n", + " # TODO: remove before\n", + "\n", + " dataset_len = len(dataset)\n", + " logging.debug(f\"Length of the dataset passed to generate_label_distribution_and_lds: {dataset_len}\")\n", + " logging.debug(f\"kernel_size: {kernel_size}\")\n", + " logging.debug(f\"kernel_std: {kernel_std}\")\n", + " logging.debug(f\"Generating empirical distribution...\")\n", + "\n", + " tab = np.array([dataset[i][3].item() for i in range(dataset_len)])\n", + " tab = np.around(tab, decimals=5)\n", + " elts = np.unique(tab)\n", + " logging.debug(f\"all labels: {elts}\")\n", + " dist, bins = np.histogram(tab, bins=nb_bins, density=False, range=(0.0, 1.0))\n", + "\n", + " # dist, bins = np.histogram([dataset[i][3].item() for i in range(dataset_len)], bins=nb_bins, density=False, range=(0.0, 1.0))\n", + "\n", + " logging.debug(f\"dist: {dist}\")\n", + "\n", + " # kernel = get_lds_kernel(kernel_size, kernel_std)\n", + " # lds = convolve1d(dist, weights=kernel, mode='constant')\n", + "\n", + " lds = gaussian_filter1d(input=dist, sigma=kernel_std, axis=- 1, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\n", + "\n", + " weights = np.sqrt(lds) if reweight == 'inv_sqrt' else lds\n", + " # scaling = len(weights) / np.sum(weights) # not the same implementation as in the original repo\n", + " scaling = 1.0 / np.sum(weights)\n", + " weights = weights * scaling\n", + "\n", + " return weights, dist, lds, bins\n", + "\n", + "\n", + "class LabelDistributionSmoothing:\n", + " def __init__(self, c=1.0, dataset=None, weights=None, kernel_size=5, kernel_std=2.0, nb_bins=100, weighting_mode=\"inv_sqrt\"):\n", + " \"\"\"\n", + " If provided, lds_distribution must be a numpy.array representing a density over [0.0, 1.0] (e.g. first element of a numpy.histogram)\n", + " When lds_distribution is provided, it overrides everything else\n", + " c is the scaling constant for lds weights\n", + " weighting_mode can be 'inv' or 'inv_sqrt'\n", + " \"\"\"\n", + " assert dataset is not None or weights is not None, \"Either a dataset or weights must be provided\"\n", + " self.distribution = None\n", + " self.bins = None\n", + " self.lds_distribution = None\n", + " if weights is None:\n", + " self.weights, self.distribution, self.lds_distribution, self.bins = generate_label_distribution_and_lds(dataset, kernel_size, kernel_std, nb_bins, weighting_mode)\n", + " logging.debug(f\"self.distribution: {self.weights}\")\n", + " logging.debug(f\"self.lds_distribution: {self.weights}\")\n", + " else:\n", + " self.weights = weights\n", + " self.nb_bins = len(self.weights)\n", + " self.bin_width = 1.0 / self.nb_bins\n", + " self.c = c\n", + " logging.debug(f\"The LDS distribution has {self.nb_bins} bins of width {self.bin_width}\")\n", + " self.weights = torch.tensor(self.weights)\n", + "\n", + " logging.debug(f\"self.weights: {self.weights}\")\n", + "\n", + " def lds_weights_batch(self, batch_labels):\n", + " device = batch_labels.device\n", + " if self.weights.device != device:\n", + " self.weights = self.weights.to(device)\n", + " last_bin = 1.0 - self.bin_width\n", + " batch_idxs = torch.minimum(batch_labels, torch.ones_like(batch_labels) * last_bin) / self.bin_width # FIXME : double check\n", + " batch_idxs = batch_idxs.floor().long()\n", + " res = 1.0 / self.weights[batch_idxs]\n", + " return res\n", + "\n", + " def __str__(self):\n", + " return f\"LDS nb_bins: {self.nb_bins}\\nbins: {self.bins}\\ndistribution: {self.distribution}\\nlds_distribution: {self.lds_distribution}\\nweights: {self.weights} \"\n", + "\n", + "\n", + "class SurpriseReweighting:\n", + " \"\"\"\n", + " Custom reweighting Yann\n", + " \"\"\"\n", + "\n", + " def __init__(self, weights=None, nb_bins=100, alpha=1e-3):\n", + " if weights is None:\n", + " self.weights = [1.0, ] * nb_bins\n", + " self.weights = torch.tensor(self.weights)\n", + " self.weights = self.weights / torch.sum(self.weights)\n", + " else:\n", + " self.weights = weights\n", + " self.weights = self.weights.detach()\n", + " self.nb_bins = len(self.weights)\n", + " self.bin_width = 1.0 / self.nb_bins\n", + " self.alpha = alpha\n", + " logging.debug(f\"The SR distribution has {self.nb_bins} bins of width {self.bin_width}\")\n", + " logging.debug(f\"Initial self.weights: {self.weights}\")\n", + "\n", + " def update_and_get_weighted_loss(self, batch_labels, unweighted_loss):\n", + " device = batch_labels.device\n", + " if self.weights.device != device:\n", + " logging.debug(f\"Moving SR weights to {device}\")\n", + " self.weights = self.weights.to(device)\n", + " last_bin = 1.0 - self.bin_width\n", + " batch_idxs = torch.minimum(batch_labels, torch.ones_like(batch_labels) * last_bin) / self.bin_width # FIXME : double check\n", + " batch_idxs = batch_idxs.floor().long()\n", + " self.weights = self.weights.detach() # ensure no gradients\n", + " weights = copy.deepcopy(self.weights[batch_idxs])\n", + " res = unweighted_loss * weights\n", + " with torch.no_grad():\n", + " abs_loss = torch.abs(unweighted_loss)\n", + "\n", + " # compute the mean loss per idx\n", + "\n", + " num = torch.zeros(self.nb_bins, device=device)\n", + " num = num.index_add(0, batch_idxs, abs_loss)\n", + " bincount = torch.bincount(batch_idxs, minlength=self.nb_bins)\n", + " div = bincount.float()\n", + " idx_unchanged = bincount == 0\n", + " idx_changed = bincount != 0\n", + " div[idx_unchanged] = 1.0\n", + " mean_loss_per_idx_normalized = num / div\n", + " sum_changed_weights = torch.sum(self.weights[idx_changed])\n", + " sum_mean_loss = torch.sum(mean_loss_per_idx_normalized[idx_changed])\n", + " mean_loss_per_idx_normalized[idx_changed] = mean_loss_per_idx_normalized[idx_changed] * sum_changed_weights / sum_mean_loss\n", + " # logging.debug(f\"old self.weights: {self.weights}\")\n", + " self.weights[idx_changed] = (1.0 - self.alpha) * self.weights[idx_changed] + self.alpha * mean_loss_per_idx_normalized[idx_changed]\n", + " self.weights /= torch.sum(self.weights) # force sum to 1\n", + " # logging.debug(f\"unique_idx: {unique_idx}\")\n", + " # logging.debug(f\"new self.weights: {self.weights}\")\n", + " # logging.debug(f\"new torch.sum(self.weights): {torch.sum(self.weights)}\")\n", + " return torch.sqrt(res * self.nb_bins)\n", + "\n", + " def __str__(self):\n", + " return f\"LDS nb_bins: {self.nb_bins}\\nweights: {self.weights}\"\n", + "\n", + "\n", + "# run:\n", + "\n", + "def generate_dataloader(window_size, fe, seq_len, seq_stride, distribution_mode, batch_size, nb_batch_per_epoch, classification, split_i, network_stride):\n", + " all_subject = pd.read_csv(Path(path_dataset) / subject_list, header=None, delim_whitespace=True).to_numpy()\n", + " test_subject = None\n", + " if PHASE == 'full':\n", + " p1_subject = pd.read_csv(Path(path_dataset) / subject_list_p1, header=None, delim_whitespace=True).to_numpy()\n", + " p2_subject = pd.read_csv(Path(path_dataset) / subject_list_p2, header=None, delim_whitespace=True).to_numpy()\n", + " train_subject_p1, validation_subject_p1 = train_test_split(p1_subject, train_size=0.8, random_state=split_i)\n", + " if TEST_SET:\n", + " test_subject_p1, validation_subject_p1 = train_test_split(validation_subject_p1, train_size=0.5, random_state=split_i)\n", + " train_subject_p2, validation_subject_p2 = train_test_split(p2_subject, train_size=0.8, random_state=split_i)\n", + " if TEST_SET:\n", + " test_subject_p2, validation_subject_p2 = train_test_split(validation_subject_p2, train_size=0.5, random_state=split_i)\n", + " train_subject = np.array([s for s in all_subject if s[0] in train_subject_p1[:, 0] or s[0] in train_subject_p2[:, 0]]).squeeze()\n", + " if TEST_SET:\n", + " test_subject = np.array([s for s in all_subject if s[0] in test_subject_p1[:, 0] or s[0] in test_subject_p2[:, 0]]).squeeze()\n", + " validation_subject = np.array(\n", + " [s for s in all_subject if s[0] in validation_subject_p1[:, 0] or s[0] in validation_subject_p2[:, 0]]).squeeze()\n", + " else:\n", + " train_subject, validation_subject = train_test_split(all_subject, train_size=0.8, random_state=split_i)\n", + " if TEST_SET:\n", + " test_subject, validation_subject = train_test_split(validation_subject, train_size=0.5, random_state=split_i)\n", + " logging.debug(f\"Subjects in training : {train_subject[:, 0]}\")\n", + " logging.debug(f\"Subjects in validation : {validation_subject[:, 0]}\")\n", + " if TEST_SET:\n", + " logging.debug(f\"Subjects in test : {test_subject[:, 0]}\")\n", + "\n", + " len_segment_s = LEN_SEGMENT * fe\n", + " train_loader = None\n", + " validation_loader = None\n", + " test_loader = None\n", + " batch_size_validation = None\n", + " batch_size_test = None\n", + " filename = filename_classification_dataset\n", + "\n", + " if seq_len is not None:\n", + " nb_segment_validation = len(np.hstack([range(int(s[1]), int(s[2])) for s in validation_subject]))\n", + " batch_size_validation = len(list(range(0, (seq_stride // network_stride) * network_stride, network_stride))) * nb_segment_validation\n", + "\n", + " ds_train = SignalDataset(filename=filename,\n", + " path=path_dataset,\n", + " window_size=window_size,\n", + " fe=fe,\n", + " seq_len=seq_len,\n", + " seq_stride=seq_stride,\n", + " list_subject=train_subject,\n", + " len_segment=len_segment_s)\n", + "\n", + " ds_validation = SignalDataset(filename=filename,\n", + " path=path_dataset,\n", + " window_size=window_size,\n", + " fe=fe,\n", + " seq_len=1,\n", + " seq_stride=1, # just to be sure, fixed value\n", + " list_subject=validation_subject,\n", + " len_segment=len_segment_s)\n", + " idx_true, idx_false = get_class_idxs(ds_train, distribution_mode)\n", + " samp_train = RandomSampler(idx_true=idx_true,\n", + " idx_false=idx_false,\n", + " batch_size=batch_size,\n", + " nb_batch=nb_batch_per_epoch,\n", + " distribution_mode=distribution_mode)\n", + "\n", + " samp_validation = ValidationSampler(ds_validation,\n", + " seq_stride=seq_stride,\n", + " len_segment=len_segment_s,\n", + " nb_segment=nb_segment_validation,\n", + " network_stride=network_stride)\n", + " train_loader = DataLoader(ds_train,\n", + " batch_size=batch_size,\n", + " sampler=samp_train,\n", + " shuffle=False,\n", + " num_workers=0,\n", + " pin_memory=True)\n", + "\n", + " validation_loader = DataLoader(ds_validation,\n", + " batch_size=batch_size_validation,\n", + " sampler=samp_validation,\n", + " num_workers=0,\n", + " pin_memory=True,\n", + " shuffle=False)\n", + " else:\n", + " if not TEST_SET:\n", + " test_subject = validation_subject\n", + " nb_segment_test = len(np.hstack([range(int(s[1]), int(s[2])) for s in test_subject]))\n", + " batch_size_test = len(list(range(0, (seq_stride // network_stride) * network_stride, network_stride))) * nb_segment_test\n", + "\n", + " ds_test = SignalDataset(filename=filename,\n", + " path=path_dataset,\n", + " window_size=window_size,\n", + " fe=fe,\n", + " seq_len=1,\n", + " seq_stride=1, # just to be sure, fixed value\n", + " list_subject=test_subject,\n", + " len_segment=len_segment_s)\n", + "\n", + " samp_test = ValidationSampler(ds_test,\n", + " seq_stride=seq_stride,\n", + " len_segment=len_segment_s,\n", + " nb_segment=nb_segment_test,\n", + " network_stride=network_stride)\n", + "\n", + " test_loader = DataLoader(ds_test,\n", + " batch_size=batch_size_test,\n", + " sampler=samp_test,\n", + " num_workers=0,\n", + " pin_memory=True,\n", + " shuffle=False)\n", + "\n", + " return train_loader, validation_loader, batch_size_validation, test_loader, batch_size_test, test_subject\n", + "\n", + "\n", + "def run(config_dict, wandb_project, save_model, unique_name):\n", + " global precision_validation_factor\n", + " global recall_validation_factor\n", + " _t_start = time.time()\n", + " logging.debug(f\"config_dict: {config_dict}\")\n", + " experiment_name = f\"{config_dict['experiment_name']}_{time.time_ns()}\" if unique_name else config_dict['experiment_name']\n", + " nb_epoch_max = config_dict[\"nb_epoch_max\"]\n", + " nb_batch_per_epoch = config_dict[\"nb_batch_per_epoch\"]\n", + " nb_epoch_early_stopping_stop = config_dict[\"nb_epoch_early_stopping_stop\"]\n", + " early_stopping_smoothing_factor = config_dict[\"early_stopping_smoothing_factor\"]\n", + " batch_size = config_dict[\"batch_size\"]\n", + " seq_len = config_dict[\"seq_len\"]\n", + " window_size_s = config_dict[\"window_size_s\"]\n", + " fe = config_dict[\"fe\"]\n", + " seq_stride_s = config_dict[\"seq_stride_s\"]\n", + " lr_adam = config_dict[\"lr_adam\"]\n", + " hidden_size = config_dict[\"hidden_size\"]\n", + " device_val = config_dict[\"device_val\"]\n", + " device_train = config_dict[\"device_train\"]\n", + " max_duration = config_dict[\"max_duration\"]\n", + " nb_rnn_layers = config_dict[\"nb_rnn_layers\"]\n", + " adam_w = config_dict[\"adam_w\"]\n", + " distribution_mode = config_dict[\"distribution_mode\"]\n", + " classification = config_dict[\"classification\"]\n", + " reg_balancing = config_dict[\"reg_balancing\"]\n", + " split_idx = config_dict[\"split_idx\"]\n", + " validation_network_stride = config_dict[\"validation_network_stride\"]\n", + "\n", + " assert reg_balancing in {'none', 'lds', 'sr'}, f\"wrong key: {reg_balancing}\"\n", + " assert classification or distribution_mode == 1, \"distribution_mode must be 1 (no class balancing) in regression mode\"\n", + " balancer_type = 0\n", + " lds = None\n", + " sr = None\n", + " if reg_balancing == 'lds':\n", + " balancer_type = 1\n", + " elif reg_balancing == 'sr':\n", + " balancer_type = 2\n", + "\n", + " window_size = int(window_size_s * fe)\n", + " seq_stride = int(seq_stride_s * fe)\n", + "\n", + " if device_val.startswith(\"cuda\") or device_train.startswith(\"cuda\"):\n", + " assert torch.cuda.is_available(), \"CUDA unavailable\"\n", + "\n", + " logger = LoggerWandb(experiment_name, config_dict, wandb_project)\n", + " torch.seed()\n", + " net = PortiloopNetwork(config_dict).to(device=device_train)\n", + " criterion = nn.MSELoss(reduction='none') if not classification else nn.BCELoss(reduction='none')\n", + " # criterion = nn.MSELoss() if not classification else nn.BCELoss()\n", + " optimizer = optim.AdamW(net.parameters(), lr=lr_adam, weight_decay=adam_w)\n", + "\n", + " first_epoch = 0\n", + " try:\n", + " logger.restore()\n", + " checkpoint = torch.load(path_dataset / experiment_name)\n", + " logging.debug(\"Use checkpoint model\")\n", + " net.load_state_dict(checkpoint['model_state_dict'])\n", + " optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n", + " first_epoch = checkpoint['epoch'] + 1\n", + " recall_validation_factor = checkpoint['recall_validation_factor']\n", + " precision_validation_factor = checkpoint['precision_validation_factor']\n", + " except (ValueError, FileNotFoundError):\n", + " # net = PortiloopNetwork(config_dict).to(device=device_train)\n", + " logging.debug(\"Create new model\")\n", + " net = net.train()\n", + " nb_weights = 0\n", + " for i in net.parameters():\n", + " nb_weights += len(i)\n", + " has_envelope = 1\n", + " if config_dict[\"envelope_input\"]:\n", + " has_envelope = 2\n", + " config_dict[\"estimator_size_memory\"] = nb_weights * window_size * seq_len * batch_size * has_envelope\n", + "\n", + " train_loader, validation_loader, batch_size_validation, _, _, _ = generate_dataloader(window_size, fe, seq_len, seq_stride, distribution_mode,\n", + " batch_size, nb_batch_per_epoch, classification, split_idx,\n", + " validation_network_stride)\n", + " if balancer_type == 1:\n", + " lds = LabelDistributionSmoothing(c=1.0, dataset=train_loader.dataset, weights=None, kernel_size=5, kernel_std=0.01, nb_bins=100,\n", + " weighting_mode='inv_sqrt')\n", + " elif balancer_type == 2:\n", + " sr = SurpriseReweighting(weights=None, nb_bins=100, alpha=1e-3)\n", + "\n", + " best_model_accuracy = 0\n", + " best_epoch = 0\n", + " best_model = None\n", + " best_loss_early_stopping = 1\n", + " best_epoch_early_stopping = 0\n", + " best_model_precision_validation = 0\n", + " best_model_f1_score_validation = 0\n", + " best_model_recall_validation = 0\n", + " best_model_loss_validation = 1\n", + "\n", + " best_model_on_loss_accuracy = 0\n", + " best_model_on_loss_precision_validation = 0\n", + " best_model_on_loss_f1_score_validation = 0\n", + " best_model_on_loss_recall_validation = 0\n", + " best_model_on_loss_loss_validation = 1\n", + "\n", + " accuracy_train = None\n", + " loss_train = None\n", + "\n", + " early_stopping_counter = 0\n", + " loss_early_stopping = None\n", + " h1_zero = torch.zeros((nb_rnn_layers, batch_size, hidden_size), device=device_train)\n", + " h2_zero = torch.zeros((nb_rnn_layers, batch_size, hidden_size), device=device_train)\n", + " for epoch in range(first_epoch, first_epoch + nb_epoch_max):\n", + "\n", + " logging.debug(f\"epoch: {epoch}\")\n", + "\n", + " n = 0\n", + " if epoch > -1:\n", + " accuracy_train = 0\n", + " loss_train = 0\n", + " _t_start = time.time()\n", + " for batch_data in train_loader:\n", + " batch_samples_input1, batch_samples_input2, batch_samples_input3, batch_labels = batch_data\n", + " batch_samples_input1 = batch_samples_input1.to(device=device_train).float()\n", + " batch_samples_input2 = batch_samples_input2.to(device=device_train).float()\n", + " batch_samples_input3 = batch_samples_input3.to(device=device_train).float()\n", + " batch_labels = batch_labels.to(device=device_train).float()\n", + "\n", + " optimizer.zero_grad()\n", + " if classification:\n", + " batch_labels = (batch_labels > THRESHOLD)\n", + " batch_labels = batch_labels.float()\n", + "\n", + " output, _, _, _ = net(batch_samples_input1, batch_samples_input2, batch_samples_input3, h1_zero, h2_zero)\n", + "\n", + " output = output.view(-1)\n", + "\n", + " loss = criterion(output, batch_labels)\n", + "\n", + " if balancer_type == 1:\n", + " batch_weights = lds.lds_weights_batch(batch_labels)\n", + " loss = loss * batch_weights\n", + " error = batch_weights.isinf().any().item() or batch_weights.isnan().any().item() or torch.isnan(\n", + " loss).any().item() or torch.isinf(loss).any().item()\n", + " if error:\n", + " logging.debug(f\"batch_labels: {batch_labels}\")\n", + " logging.debug(f\"batch_weights: {batch_weights}\")\n", + " logging.debug(f\"loss: {loss}\")\n", + " logging.debug(f\"LDS: {lds}\")\n", + " assert False, \"loss is nan or inf\"\n", + " elif balancer_type == 2:\n", + " loss = sr.update_and_get_weighted_loss(batch_labels=batch_labels, unweighted_loss=loss)\n", + " error = torch.isnan(loss).any().item() or torch.isinf(loss).any().item()\n", + " if error:\n", + " logging.debug(f\"batch_labels: {batch_labels}\")\n", + " logging.debug(f\"loss: {loss}\")\n", + " logging.debug(f\"SR: {sr}\")\n", + " assert False, \"loss is nan or inf\"\n", + "\n", + " loss = loss.mean()\n", + "\n", + " loss_train += loss.item()\n", + " loss.backward()\n", + " optimizer.step()\n", + "\n", + " if not classification:\n", + " output = (output > THRESHOLD)\n", + " batch_labels = (batch_labels > THRESHOLD)\n", + " else:\n", + " output = (output >= 0.5)\n", + " accuracy_train += (output == batch_labels).float().mean()\n", + " n += 1\n", + " _t_stop = time.time()\n", + " logging.debug(f\"Training time for 1 epoch : {_t_stop - _t_start} s\")\n", + " accuracy_train /= n\n", + " loss_train /= n\n", + "\n", + " _t_start = time.time()\n", + " output_validation, labels_validation, loss_validation, accuracy_validation, tp, tn, fp, fn = run_inference(validation_loader, criterion, net,\n", + " device_val, hidden_size,\n", + " nb_rnn_layers, classification,\n", + " batch_size_validation)\n", + " f1_validation, precision_validation, recall_validation = get_metrics(tp, fp, fn)\n", + "\n", + " _t_stop = time.time()\n", + " logging.debug(f\"Validation time for 1 epoch : {_t_stop - _t_start} s\")\n", + "\n", + " recall_validation_factor = recall_validation\n", + " precision_validation_factor = precision_validation\n", + " updated_model = False\n", + " if (not MAXIMIZE_F1_SCORE and loss_validation < best_model_loss_validation) or (\n", + " MAXIMIZE_F1_SCORE and f1_validation > best_model_f1_score_validation):\n", + " best_model = copy.deepcopy(net)\n", + " best_epoch = epoch\n", + " # torch.save(best_model.state_dict(), path_dataset / experiment_name, _use_new_zipfile_serialization=False)\n", + " if save_model:\n", + " torch.save({\n", + " 'epoch': epoch,\n", + " 'model_state_dict': best_model.state_dict(),\n", + " 'optimizer_state_dict': optimizer.state_dict(),\n", + " 'recall_validation_factor': recall_validation_factor,\n", + " 'precision_validation_factor': precision_validation_factor,\n", + " }, path_dataset / experiment_name, _use_new_zipfile_serialization=False)\n", + " updated_model = True\n", + " best_model_f1_score_validation = f1_validation\n", + " best_model_precision_validation = precision_validation\n", + " best_model_recall_validation = recall_validation\n", + " best_model_loss_validation = loss_validation\n", + " best_model_accuracy = accuracy_validation\n", + " if loss_validation < best_model_on_loss_loss_validation:\n", + " best_model = copy.deepcopy(net)\n", + " best_epoch = epoch\n", + " # torch.save(best_model.state_dict(), path_dataset / experiment_name, _use_new_zipfile_serialization=False)\n", + " if save_model:\n", + " torch.save({\n", + " 'epoch': epoch,\n", + " 'model_state_dict': best_model.state_dict(),\n", + " 'optimizer_state_dict': optimizer.state_dict(),\n", + " 'recall_validation_factor': recall_validation_factor,\n", + " 'precision_validation_factor': precision_validation_factor,\n", + " }, path_dataset / (experiment_name + \"_on_loss\"), _use_new_zipfile_serialization=False)\n", + " updated_model = True\n", + " best_model_on_loss_f1_score_validation = f1_validation\n", + " best_model_on_loss_precision_validation = precision_validation\n", + " best_model_on_loss_recall_validation = recall_validation\n", + " best_model_on_loss_loss_validation = loss_validation\n", + " best_model_on_loss_accuracy = accuracy_validation\n", + "\n", + " loss_early_stopping = loss_validation if loss_early_stopping is None and early_stopping_smoothing_factor == 1 else loss_validation if loss_early_stopping is None else loss_validation * early_stopping_smoothing_factor + loss_early_stopping * (\n", + " 1.0 - early_stopping_smoothing_factor)\n", + "\n", + " if loss_early_stopping < best_loss_early_stopping:\n", + " best_loss_early_stopping = loss_early_stopping\n", + " early_stopping_counter = 0\n", + " best_epoch_early_stopping = epoch\n", + " else:\n", + " early_stopping_counter += 1\n", + "\n", + " logger.log(accuracy_train=accuracy_train,\n", + " loss_train=loss_train,\n", + " accuracy_validation=accuracy_validation,\n", + " loss_validation=loss_validation,\n", + " f1_validation=f1_validation,\n", + " precision_validation=precision_validation,\n", + " recall_validation=recall_validation,\n", + " best_epoch=best_epoch,\n", + " best_model=best_model,\n", + " loss_early_stopping=loss_early_stopping,\n", + " best_epoch_early_stopping=best_epoch_early_stopping,\n", + " best_model_accuracy_validation=best_model_accuracy,\n", + " best_model_f1_score_validation=best_model_f1_score_validation,\n", + " best_model_precision_validation=best_model_precision_validation,\n", + " best_model_recall_validation=best_model_recall_validation,\n", + " best_model_loss_validation=best_model_loss_validation,\n", + " best_model_on_loss_accuracy_validation=best_model_on_loss_accuracy,\n", + " best_model_on_loss_f1_score_validation=best_model_on_loss_f1_score_validation,\n", + " best_model_on_loss_precision_validation=best_model_on_loss_precision_validation,\n", + " best_model_on_loss_recall_validation=best_model_on_loss_recall_validation,\n", + " best_model_on_loss_loss_validation=best_model_on_loss_loss_validation,\n", + " updated_model=updated_model)\n", + "\n", + " if early_stopping_counter > nb_epoch_early_stopping_stop or time.time() - _t_start > max_duration:\n", + " logging.debug(\"Early stopping.\")\n", + " break\n", + " logging.debug(\"Delete logger\")\n", + " del logger\n", + " logging.debug(\"Logger deleted\")\n", + " return best_model_loss_validation, best_model_f1_score_validation, best_epoch_early_stopping\n", + "\n", + "\n", + "def get_config_dict(index, split_i):\n", + " \"\"\"\n", + " index: index du job CC (not used appart for name)\n", + " split_i: index of the initial shuffle of subjects\n", + " \"\"\"\n", + " c_dict = {'experiment_name': f'pareto_search_15_35_v2_{index}', 'device_train': 'cpu', 'device_val': 'cpu', 'nb_epoch_max': 150, 'max_duration': 257400,\n", + " 'nb_epoch_early_stopping_stop': 100, 'early_stopping_smoothing_factor': 0.1, 'fe': 250, 'nb_batch_per_epoch': 1000,\n", + " 'first_layer_dropout': False,\n", + " 'power_features_input': False, 'dropout': 0.5, 'adam_w': 0.01, 'distribution_mode': 0, 'classification': True,\n", + " 'reg_balancing': 'none',\n", + " 'split_idx': split_i, 'validation_network_stride': 1, 'nb_conv_layers': 3, 'seq_len': 50, 'nb_channel': 31, 'hidden_size': 7,\n", + " 'seq_stride_s': 0.170,\n", + " 'nb_rnn_layers': 1, 'RNN': True, 'envelope_input': False, 'lr_adam': 0.0005, 'batch_size': 256, 'window_size_s': 0.218,\n", + " 'stride_pool': 1,\n", + " 'stride_conv': 1, 'kernel_conv': 7, 'kernel_pool': 7, 'dilation_conv': 1, 'dilation_pool': 1, 'nb_out': 18, 'time_in_past': 8.5,\n", + " 'estimator_size_memory': 188006400}\n", + " return c_dict" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "E0ZltgveVnrI" + }, + "outputs": [], + "source": [ + "def simulate(c_dict, split_idx):\n", + " \"\"\"\n", + " on test set\n", + "\n", + " c_dict: configuration dictionary\n", + " split_idx: index of the initial shuffle\n", + " \"\"\"\n", + " logging.debug(f\"config_dict: {c_dict}\")\n", + " experiment_name = c_dict['experiment_name']\n", + " window_size_s = c_dict[\"window_size_s\"]\n", + " fe = c_dict[\"fe\"]\n", + " seq_stride_s = c_dict[\"seq_stride_s\"]\n", + " hidden_size = c_dict[\"hidden_size\"]\n", + " device_val = c_dict[\"device_val\"]\n", + " device_train = c_dict[\"device_train\"]\n", + " nb_rnn_layers = c_dict[\"nb_rnn_layers\"]\n", + " classification = c_dict[\"classification\"]\n", + " window_size = int(window_size_s * fe)\n", + " seq_stride = int(seq_stride_s * fe)\n", + "\n", + " nb_parallel_runs = seq_stride // (FPGA_NN_EXEC_TIME + ERROR_FPGA_EXEC_TIME)\n", + " print(f\"seq_stride: {seq_stride}\")\n", + " print(f\"FPGA_NN_EXEC_TIME + ERROR_FPGA_EXEC_TIME: {FPGA_NN_EXEC_TIME + ERROR_FPGA_EXEC_TIME}\")\n", + " print(f\"nb_parallel_runs: {nb_parallel_runs}\")\n", + " stride_between_runs = seq_stride // nb_parallel_runs\n", + " logging.debug(f\"stride_between_runs: {stride_between_runs}\")\n", + "\n", + " if device_val.startswith(\"cuda\") or device_train.startswith(\"cuda\"):\n", + " assert torch.cuda.is_available(), \"CUDA unavailable\"\n", + "\n", + " torch.seed()\n", + " net = PortiloopNetwork(c_dict).to(device=device_val)\n", + " criterion = nn.MSELoss() if not classification else nn.BCELoss()\n", + "\n", + " _, _, _, test_loader, batch_size_test, test_subject = generate_dataloader(window_size=window_size, fe=fe, seq_len=None, seq_stride=seq_stride,\n", + " distribution_mode=None, batch_size=None, nb_batch_per_epoch=None,\n", + " classification=classification, split_i=split_idx,\n", + " network_stride=stride_between_runs)\n", + "\n", + " checkpoint = torch.load(path_experiments / experiment_name, map_location=torch.device(device_val))\n", + " logging.debug(\"Use trained model\")\n", + " net.load_state_dict(checkpoint['model_state_dict'])\n", + "\n", + " output_test, labels_test, loss_test, accuracy_test, tp, tn, fp, fn = run_inference(test_loader, criterion, net, device_val, hidden_size,\n", + " nb_rnn_layers, classification, batch_size_test, max_value=0)\n", + "\n", + " nb_segment_test = len(np.hstack([range(int(s[1]), int(s[2])) for s in test_subject]))\n", + " labels_test = np.transpose(np.split(labels_test.cpu().detach().numpy(), len(labels_test) / batch_size_test))\n", + " output_test = np.transpose(np.split(output_test.cpu().detach().numpy(), len(output_test) / batch_size_test))\n", + " logging.debug(f\"shape output test: {output_test.shape}\")\n", + " logging.debug(f\"nb_segment_test: {nb_segment_test}\")\n", + " output_segments = []\n", + " for s in range(nb_segment_test):\n", + " output_segments.append(zip(*(output_test[s * nb_parallel_runs + i] for i in range(nb_parallel_runs))))\n", + " output_segments[-1] = np.hstack(np.array([list(a) for a in output_segments[-1]]))\n", + " print(f\"output_segments.shape: {np.array(output_segments).shape}\")\n", + " output_portiloop = np.hstack(np.array(output_segments))\n", + " labels_segments = []\n", + " for s in range(nb_segment_test):\n", + " labels_segments.append(zip(*(labels_test[s * nb_parallel_runs + i] for i in range(nb_parallel_runs))))\n", + " labels_segments[-1] = np.hstack(np.array([list(a) for a in labels_segments[-1]]))\n", + " labels_portiloop = np.hstack(np.array(labels_segments))\n", + "\n", + " output = (output_portiloop>THRESHOLD)\n", + " output_portiloop = output_portiloop.astype(float)\n", + " output = output.astype(float)\n", + " labels_portiloop = labels_portiloop.astype(float)\n", + " tp = torch.Tensor(labels_portiloop * output)\n", + " tn = torch.Tensor((1 - labels_portiloop) * (1 - output))\n", + " fp = torch.Tensor((1 - labels_portiloop) * output)\n", + " fn = torch.Tensor((labels_portiloop * (1 - output)))\n", + " f1_test, precision_test, recall_test = get_metrics(tp, fp, fn)\n", + " logging.debug(f\"f1_test = {f1_test}\")\n", + " logging.debug(f\"precision_test = {precision_test}\")\n", + " logging.debug(f\"recall_test = {recall_test}\")\n", + "\n", + " state = tp + fp * 2 + tn * 3 + fn * 4\n", + "\n", + " # f1, precision, recall test: metrics on full test set\n", + " # state: tp / fr / tn / fn for each data sample of the concatenated signal (test set)\n", + " # labels_portiloop: ground truth for each sample\n", + " # output_portiloop: output of the NN for each sample\n", + " # test_loader: dataloader of the test set\n", + " # net: NN\n", + "\n", + " return f1_test, precision_test, recall_test, state, labels_portiloop, output_portiloop, test_loader, net\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "QX1UtDC7a-Se" + }, + "outputs": [], + "source": [ + "logging.getLogger().setLevel(logging.DEBUG)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "k0a0LgKyVQYU", + "outputId": "c82db852-40c1-4b41-8e52-165bb042295b" + }, + "outputs": [], + "source": [ + "ABLATION = 0\n", + "PHASE = 'full'\n", + "\n", + "FPGA_NN_EXEC_TIME = 5 # equivalent to 20 ms\n", + "ERROR_FPGA_EXEC_TIME = 0 # to be sure there is no overlap\n", + "\n", + "\n", + "threshold_list = {'p1': 0.2, 'p2': 0.35, 'full': 0.5} # full = p1 + p2\n", + "THRESHOLD = threshold_list[PHASE]\n", + "size_data = \"big\"\n", + "filename_dataset = f\"dataset_{PHASE}_{size_data}_250_matlab_standardized_envelope_pf.txt\"\n", + "filename_classification_dataset = f\"dataset_classification_{PHASE}_{size_data}_250_matlab_standardized_envelope_pf.txt\"\n", + "subject_list = f\"subject_sequence_{PHASE}_{size_data}.txt\"\n", + "subject_list_p1 = f\"subject_sequence_p1_{size_data}.txt\"\n", + "subject_list_p2 = f\"subject_sequence_p2_{size_data}.txt\"\n", + "TEST_SET = True\n", + "exp_index = 0\n", + "config_dict = dict()\n", + "exp_name = [f\"pareto_search_15_35_v4_{i}\" for i in [0,11,12,3,14,15,16,7,18,9]]\n", + "max_split = 10\n", + "res = []\n", + "for split_idx in range(max_split):\n", + " config_dict = get_config_dict(exp_index, split_idx)\n", + " config_dict[\"experiment_name\"] = exp_name[split_idx]\n", + " res.append(simulate(config_dict, split_idx))\n", + " break\n", + "\n", + "# res = np.array(res)\n", + "# std_f1_test, std_precision_test, std_recall_test = np.std(res, axis=0)\n", + "# mean_f1_test, mean_precision_test, mean_recall_test = np.mean(res, axis=0)\n", + "# print(config_dict[\"experiment_name\"])\n", + "# print(f\"Recall: {mean_recall_test} + {std_recall_test}\")\n", + "# print(f\"Precision: {mean_precision_test} + {std_precision_test}\")\n", + "# print(f\"f1: {mean_f1_test} + {std_f1_test}\")\n", + "# split_idx = 0\n", + "# config_dict = get_config_dict(exp_index, split_idx)\n", + "# config_dict[\"experiment_name\"] = exp_name[split_idx]\n", + "# _, _,_ , state, label_test, output_test, dataloader = simulate(config_dict, split_idx)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BrN_rGrOczUu" + }, + "outputs": [], + "source": [ + "logging.getLogger().setLevel(logging.INFO)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Wn-gd1XexQPi" + }, + "outputs": [], + "source": [ + "seq_stride = int(config_dict[\"seq_stride_s\"]*config_dict[\"fe\"])\n", + "network_stride = 5\n", + "nb_samp = 8\n", + "window_size = int(config_dict[\"window_size_s\"]*config_dict[\"fe\"])\n", + "max_time_stimulate_s = 0.25\n", + "constant_delay_s = 0.064\n", + "\n", + "print(f\"seq_stride: {seq_stride}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "kw9CugZXH-Z3" + }, + "outputs": [], + "source": [ + "def stimulation_analysis(THRESHOLD=0.5):\n", + " \"\"\"\n", + " This function extracts true results on the portiloop system\n", + " All delays and dynamics are taken into account\n", + " \n", + " Args:\n", + " THRESHOLD: float: classification threshold between 0 and 1 (default 0.5)\n", + " \"\"\"\n", + " n = 0\n", + " total_stim = 0\n", + " corrected_stimulation_delay = np.array([])\n", + " spindle_list_stimulate_list = []\n", + " for result in res:\n", + " print(f\"Split idx: {n}\")\n", + " n+=1\n", + " _, _,_ , state, label_test, output_test, dataloader, _ = result\n", + " ds_test = dataloader.dataset\n", + " idx_list = np.array(ds_test.indices)+window_size -1\n", + " edge = (ds_test.data[3][idx_list][1:]-ds_test.data[3][idx_list][:-1])\n", + " beginning = np.where(edge == 1)[0]+window_size-1\n", + " end = np.where(edge == -1)[0]+window_size-1\n", + " length = end-beginning\n", + " print(f\"mean spindle length: {np.mean(length)/250}\")\n", + " fe = config_dict[\"fe\"]\n", + " len_segment = 115*fe\n", + " segment_state_size = len(list(range(0, (seq_stride//network_stride)*network_stride, network_stride)))*((len_segment) // seq_stride)\n", + " assert segment_state_size == 5472\n", + " cur_idx = int(config_dict[\"window_size_s\"]*config_dict[\"fe\"])\n", + " spindle_list_stimulate = []\n", + " wait_stim = 0\n", + " wait_in_spindle = 0\n", + " in_spindle = False\n", + " seq_idx = 0\n", + " sequence_counter = 0\n", + " for i in range(len(output_test)):\n", + " adder = 0\n", + " label = output_test[i]\n", + " if (i+1)%nb_samp ==0:\n", + " adder = 2\n", + " if label > THRESHOLD and wait_stim == 0 and not in_spindle:\n", + " spindle_list_stimulate.append(cur_idx)\n", + " wait_stim = 100\n", + " in_spindle = True\n", + " if in_spindle and label > THRESHOLD:\n", + " wait_in_spindle = 100#42\n", + " if label <= THRESHOLD and wait_in_spindle <= 0:\n", + " in_spindle = False\n", + " cur_idx += network_stride+adder\n", + " wait_stim -=network_stride+adder\n", + " wait_in_spindle -=network_stride+adder\n", + " wait_stim = 0 if wait_stim < 0 else wait_stim\n", + " wait_in_spindle = 0 if wait_in_spindle < 0 else wait_in_spindle\n", + " sequence_counter += 1\n", + " if sequence_counter >= segment_state_size:\n", + " seq_idx += 1\n", + " cur_idx = seq_idx*115*fe + int(config_dict[\"window_size_s\"] * fe)\n", + " sequence_counter = 0\n", + " wait_stim = 0\n", + " wait_in_spindle = 0\n", + "\n", + " spindle_list_stimulate = np.array(spindle_list_stimulate)\n", + " spindle_list_stimulate_list.append(spindle_list_stimulate)\n", + " total_stim += len(spindle_list_stimulate)\n", + " spindle_list_stimulate_delay_best = []\n", + "\n", + " j = 0\n", + " failed_stimulation = 0\n", + " for i in range(len(beginning)):\n", + " b = beginning[i]\n", + " e = end[i]\n", + " best = np.inf\n", + " for s in spindle_list_stimulate:\n", + " delay = s - b\n", + " if abs(delay) < abs(best):\n", + " best = delay\n", + " spindle_list_stimulate_delay_best.append(best)\n", + " spindle_list_stimulate_delay_best = np.array(spindle_list_stimulate_delay_best)\n", + " corrected_stimulation_delay = np.append(corrected_stimulation_delay, spindle_list_stimulate_delay_best/fe + constant_delay_s)\n", + "\n", + " for i in range(1,3):\n", + " margin = i*max_time_stimulate_s\n", + " accurate_stimulation = len(np.where((0margin) | (0>corrected_stimulation_delay))[0])\n", + " print(f\"For margin = {margin} s\")\n", + " print(f\"accurate stimulation: {accurate_stimulation}\")\n", + " print(f\"spindle not stimulated: {failed_stimulation}\")\n", + " print(f\"total stimulation: {total_stim}\")\n", + " print(f\"ratio: {100*accurate_stimulation/total_stim}\")\n", + " print(f\"percentage stimulated spindles: {100*accurate_stimulation/(accurate_stimulation+failed_stimulation)}\")\n", + " \n", + " # corrected_stimulation_delay: actual delay between actual spindle and actual stimulation\n", + " # total_stim: number of total stimulations\n", + " # spindle_list_stimulate_list: each list of stimulations for each different tested NN\n", + " return corrected_stimulation_delay, total_stim, spindle_list_stimulate_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "0XiIOi9M6OY-", + "outputId": "e4910989-a76b-4a94-a043-e97afc834744" + }, + "outputs": [], + "source": [ + "corrected_stimulation_delay, total_stim, spindle_list_stimulate = stimulation_analysis(THRESHOLD)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 467, + "referenced_widgets": [ + "cb4ad0731b8549f483d086e84d0c8450", + "1aeddf94f6084a19bba2a5311766a6fb", + "12099b37a72d43078194d5977407662a", + "307138cdaff54eec978913a3a6ccf9aa", + "626fade444f040e18bfbcd1143e7bf05", + "52d0163c6ec64bfca8ff55709794be53", + "c146aa43cd4b4941951f6794553f77a6", + "15722a6ed7144cada304389c713fa184", + "6c4cc61f3df1492889d3632fa06d3f53", + "4037152492fa44d786c1d98b9c0b966d", + "ff78d6effae3404cb8bc4c76286b41aa", + "62a91a2a431440779e5ddf94240fd8f7", + "fe98486cdd4d4e988a7caae86d4c44e3" + ] + }, + "id": "7NYg3sVoAXNS", + "outputId": "dbc3d640-4b4e-44c9-e59b-a3a7c552ef42" + }, + "outputs": [], + "source": [ + "# interactive plot\n", + "\n", + "from __future__ import print_function\n", + "from ipywidgets import interact, interactive, fixed, interact_manual, Layout\n", + "import ipywidgets as widgets\n", + "from matplotlib.collections import LineCollection\n", + "from copy import deepcopy\n", + "\n", + "plt.rcParams['figure.figsize'] = [4, 2]\n", + "plt.rcParams['figure.dpi'] = 200\n", + "plt.rcParams.update({'font.size': 10})\n", + "\n", + "\n", + "network_stride = 5\n", + "seq_stride = 42\n", + "nb_samp = 8\n", + "fe = config_dict[\"fe\"]\n", + "segment_state_size = len(list(range(0, (seq_stride//network_stride)*network_stride, network_stride)))*((115*fe) // seq_stride)\n", + "_, _,_ , state, label_test, output_test, dataloader, _ = res[0]\n", + "ds_test = dataloader.dataset\n", + "idx_list = np.array(ds_test.indices)+window_size -1\n", + "edge = (ds_test.data[3][idx_list][1:]-ds_test.data[3][idx_list][:-1])\n", + "beginning = np.where(edge == 1)[0]+window_size-1\n", + "end = np.where(edge == -1)[0]+window_size-1\n", + "length = end-beginning\n", + "\n", + "def generate_lines(width):\n", + "\n", + " # signal:\n", + "\n", + " seq_lines = []\n", + " seq_colors = []\n", + " seq_linewidths = []\n", + " seq_linestyles = []\n", + "\n", + " end_idx = int(width * fe / network_stride)\n", + " cur_idx = int(config_dict[\"window_size_s\"] * fe)\n", + " spindle_list = []\n", + " sequence_counter = 0\n", + " seq_idx = 0\n", + " for i, st in enumerate(state[:end_idx]):\n", + " color = 'w'\n", + " label = \"Not evaluated\"\n", + " adder = 0\n", + " if (i+1) % nb_samp == 0:\n", + " adder = 2\n", + " if st == 1:\n", + " color = 'g'\n", + " elif st == 2:\n", + " color = 'r'\n", + " elif st == 3:\n", + " color = 'b'\n", + " elif st == 4:\n", + " color = 'k'\n", + " # color = 'b'\n", + " # if st == 1 or st == 4:\n", + " # color = 'g'\n", + " xs = np.arange(cur_idx, cur_idx + network_stride+1+adder, 1) / 250\n", + " ys = ds_test.full_signal[ds_test.indices[cur_idx]:ds_test.indices[cur_idx] + network_stride+1+adder].detach().numpy()\n", + " yscore = np.ones((network_stride+1+adder, 1))*output_test[i] - 5.0\n", + " line_n = list(zip(xs, ys))\n", + " line_score = list(zip(xs, yscore))\n", + " seq_lines.append(line_n)\n", + " seq_colors.append(color)\n", + " seq_linewidths.append(0.5)\n", + " seq_linestyles.append('solid')\n", + " seq_lines.append(line_score)\n", + " seq_colors.append('m')\n", + " seq_linewidths.append(1.0)\n", + " seq_linestyles.append('solid')\n", + "\n", + " cur_idx += network_stride + adder\n", + " sequence_counter += 1\n", + " if sequence_counter >= segment_state_size:\n", + " seq_idx += 1\n", + " # print(f\"idx before: {cur_idx}\")\n", + " cur_idx = seq_idx*115*fe + int(config_dict[\"window_size_s\"] * fe)\n", + " # print(f\"idx after: {cur_idx}\")\n", + " sequence_counter = 0\n", + " # print(i%8)\n", + "\n", + " # vertical lines:\n", + "\n", + " for b in beginning:\n", + " # if b <= end_idx:\n", + " b_s = b / 250.0\n", + " seq_lines.append([(b_s, -10.0), (b_s, 10.0)])\n", + " seq_colors.append('c')\n", + " seq_linewidths.append(0.5)\n", + " seq_linestyles.append('dotted')\n", + "\n", + " for b in spindle_list_stimulate[0]:\n", + " # if b <= end_idx:\n", + " b_s = b / 250.0\n", + " seq_lines.append([(b_s, -10.0), (b_s, 10.0)])\n", + " seq_colors.append('grey')\n", + " seq_linewidths.append(0.5)\n", + " seq_linestyles.append('dotted')\n", + " \n", + " # threshold:\n", + "\n", + " seq_lines.append([(0.0, -5.0 + THRESHOLD), (1000.0, -5.0 + THRESHOLD)])\n", + " seq_colors.append('grey')\n", + " seq_linewidths.append(0.5)\n", + " seq_linestyles.append('dashed')\n", + " \n", + " seq_lines = np.array(seq_lines)\n", + " line_segments = LineCollection(seq_lines, colors=seq_colors, linewidths=seq_linewidths, linestyles=seq_linestyles)\n", + "\n", + " return line_segments\n", + "\n", + "lines = generate_lines(width=1000.0)\n", + "\n", + "def y1axtoy2ax(y):\n", + " res = y - 5\n", + " return res\n", + "\n", + "def y1axtoy2ax(y):\n", + " res = y + 5\n", + " return res\n", + "\n", + "class StimulationsPlotter:\n", + " def __init__(self):\n", + " self.savfig = None\n", + "\n", + " def plot_spindles(self, start=0.0, width=10.0):\n", + " fig, ax = plt.subplots()\n", + " coplines = deepcopy(lines)\n", + " ax.add_collection(coplines)\n", + " ax.set_xlabel(\"Time (s)\")\n", + " ax.set_xlim(start, start+width)\n", + " ax.set_ylim(-5, 5)\n", + "\n", + " ax.set_ylabel(\"Signal (arb. unit)\")\n", + "\n", + " secy = ax.secondary_yaxis('right', functions=(y1axtoy2ax, y1axtoy2ax))\n", + " secy.set_ylabel('ANN output')\n", + " secy.set_yticks([0,1])\n", + "\n", + " ax.axes.yaxis.set_visible(False)\n", + " ax.set_title(f\"Threshold {THRESHOLD}\")\n", + " plt.tight_layout()\n", + " self.savfig = plt.gcf()\n", + " plt.show()\n", + "\n", + "sp = StimulationsPlotter()\n", + "\n", + "def interactive_plot(start, width):\n", + " sp.plot_spindles(start=start, width=width)\n", + "\n", + "def on_button_clicked(b):\n", + " pathfig = path_plots / 'stimulation_plot.pdf'\n", + " sp.savfig.savefig(pathfig, dpi=200)\n", + " print(f\"Figure saved at {pathfig}\")\n", + "\n", + "startSlider = widgets.FloatSlider(min=0.0, max=1000.0, layout=Layout(width='1000px'))\n", + "widthSlider = widgets.FloatSlider(min=1.0, max=50.0, layout=Layout(width='200px'))\n", + "saveButton = widgets.Button(description='Print', disabled=False, button_style='', tooltip='Print', icon='check')\n", + "saveButton.on_click(on_button_clicked)\n", + "ui = widgets.HBox([startSlider, widthSlider, saveButton])\n", + "out = widgets.interactive_output(interactive_plot, {'start': startSlider, 'width': widthSlider})\n", + "display(ui, out)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "79GWXjsEUPdS" + }, + "source": [ + "# Interpretation of the results with Captum" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "quI9ZHDIlZpt", + "outputId": "07eab47e-c760-41c5-f488-d112c9ec19c6" + }, + "outputs": [], + "source": [ + "!pip install captum -qqq" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JeXsjagKDno7" + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "from captum.attr import IntegratedGradients, DeepLiftShap, DeepLift" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jieb4u_IEMRz" + }, + "outputs": [], + "source": [ + "result = res[0]\n", + "net = result[7]\n", + "dataloader = result[6]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "TUrZ93N_HGYy" + }, + "outputs": [], + "source": [ + "i = 0\n", + "b = None\n", + "for batch in dataloader:\n", + " i += 1\n", + " b = batch\n", + "\n", + "print(f\"number of batches in dataloader: {i}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Z8uNxoewIL_K", + "outputId": "1ffa1780-3173-429a-9117-ef0dafafb0d1" + }, + "outputs": [], + "source": [ + "# rebuild a sequence:\n", + "\n", + "idx_batch = 0 # index of the batch in dataloader\n", + "i_min = 98 # index with batch; spindles: 97, 200, 281?\n", + "seq_len = 20 # number of time steps in the sequence for RNN inference\n", + "\n", + "i_max = i_min + seq_len\n", + "seq_1 = []\n", + "seq_2 = []\n", + "seq_3 = []\n", + "seq_4 = []\n", + "i = 0\n", + "\n", + "# baselines:\n", + "nb_baselines = 10\n", + "baselines = []\n", + "len_baseline_total = nb_baselines * seq_len\n", + "\n", + "for batch in dataloader:\n", + " if i >= i_min and i < i_max:\n", + " seq_1.append(batch[0][idx_batch].squeeze())\n", + " seq_2.append(batch[1][idx_batch].squeeze())\n", + " seq_3.append(batch[2][idx_batch].squeeze())\n", + " seq_4.append(batch[3][idx_batch].squeeze())\n", + " else:\n", + " baselines.append(batch[0][idx_batch].squeeze())\n", + " i += 1\n", + " if len(baselines) == len_baseline_total:\n", + " break\n", + "\n", + "assert len(seq_1) == seq_len\n", + "assert len(baselines) == len_baseline_total\n", + "\n", + "seq_1_tens = torch.stack(seq_1).unsqueeze(0)\n", + "seq_2_tens = torch.stack(seq_2).unsqueeze(0)\n", + "seq_3_tens = torch.stack(seq_3).unsqueeze(0)\n", + "seq_4_tens = torch.stack(seq_4).unsqueeze(0)\n", + "bl_tens = torch.stack(baselines)\n", + "bl_tens = bl_tens.unfold(0, seq_len, seq_len).moveaxis(2,1)\n", + "\n", + "print(f\"ground truth labels of the sequence (only the last one counts): {seq_4_tens}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WWxvs_49P1bL" + }, + "outputs": [], + "source": [ + "net = net.eval()\n", + "\n", + "device = \"cpu\"\n", + "hidden_size = 7\n", + "nb_rnn_layers = 1\n", + "classification = True\n", + "batch_size_validation = 1\n", + "max_value=np.inf\n", + "\n", + "net_copy = net\n", + "loss = 0\n", + "n = 0\n", + "\n", + "batch_samples_input1, batch_samples_input2, batch_samples_input3 = seq_1_tens, seq_2_tens, seq_3_tens\n", + "batch_labels = seq_4_tens\n", + "batch_samples_input1 = batch_samples_input1.to(device=device).float()\n", + "batch_samples_input2 = batch_samples_input2.to(device=device).float()\n", + "batch_samples_input3 = batch_samples_input3.to(device=device).float()\n", + "batch_labels = batch_labels.to(device=device).float()\n", + "\n", + "input = batch_samples_input1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lRGLEIIDTJjp" + }, + "outputs": [], + "source": [ + "class PortiloopNetworkCaptum(nn.Module):\n", + " def __init__(self, net):\n", + " super(PortiloopNetworkCaptum, self).__init__()\n", + " self.net = net\n", + " \n", + " def forward(self, batch_samples_input1):\n", + " batch_size = batch_samples_input1.shape[0]\n", + " seq_len = batch_samples_input1.shape[1]\n", + " h1 = torch.zeros((nb_rnn_layers, batch_size, hidden_size), device=device)\n", + " x, hn1, hn2, max_value = self.net(batch_samples_input1, None, None, h1, None, np.inf)\n", + " return x" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hcqkTgi6SC1b" + }, + "outputs": [], + "source": [ + "net_captum = PortiloopNetworkCaptum(net)\n", + "\n", + "with torch.no_grad():\n", + " output = net_captum(input)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "vKHGpJm8Wnvb", + "outputId": "4bf3bdbe-c6b6-4e3c-9cae-e819341a0f1e" + }, + "outputs": [], + "source": [ + "torch.backends.cudnn.enabled=False\n", + "\n", + "bl = bl_tens[3].unsqueeze(0)\n", + "print(bl.shape)\n", + "print(input.shape)\n", + "\n", + "# bl = 0.0 # comment to not override baseline\n", + "\n", + "ig = IntegratedGradients(net_captum)\n", + "attributions, delta = ig.attribute(input, baselines=bl, return_convergence_delta=True)\n", + "\n", + "# attributions, delta = ig.attribute(input, baselines=0.0, return_convergence_delta=True)\n", + "\n", + "# ig = DeepLiftShap(net_captum)\n", + "# attributions, delta = ig.attribute(input, bl_tens, return_convergence_delta=True)\n", + "\n", + "# ig = DeepLift(net_captum)\n", + "# attributions, delta = ig.attribute(input, 0.0, return_convergence_delta=True)\n", + "\n", + "print('Convergence Delta:', delta)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 845 + }, + "id": "9-7Lftl1g7vS", + "outputId": "ebbd2b75-545e-4c92-af49-82dda0760291" + }, + "outputs": [], + "source": [ + "#FIXME: x axis is not correct here\n", + "\n", + "print(f\"output:{output}\")\n", + "\n", + "per_row = 5\n", + "\n", + "plt.rcParams['figure.figsize'] = [per_row, int(seq_len/per_row)]\n", + "plt.rcParams['figure.dpi'] = 200\n", + "plt.rcParams.update({'font.size': 5})\n", + "\n", + "fig, axs = plt.subplots(int(seq_len/per_row),per_row)\n", + "\n", + "attributions_unscaled = attributions.detach().numpy() # / attributions.sum() * np.prod(attributions.shape)\n", + "attr_max_amplitude = attributions.abs().max().detach().numpy()\n", + "attributions_scaled = attributions.detach().numpy() / attr_max_amplitude\n", + "attributions_scaled = np.abs(attributions_scaled)\n", + "# attributions_scaled = np.sign(attributions_scaled) * np.sqrt(np.abs(attributions_scaled))\n", + "input_scaled = input.detach().numpy()\n", + "\n", + "for i in range(int(seq_len/per_row)):\n", + " for j in range(per_row):\n", + " idx = i*per_row+j\n", + " ls = np.linspace(start = ((i_min + idx)*seq_stride)/fe, stop=((i_min + idx +1)*seq_stride)/fe, num=window_size)\n", + "\n", + " xs = ls\n", + " ys = input_scaled[0][idx]\n", + "\n", + " segs = np.zeros((ys.shape[0] - 1, 2, 2), float)\n", + " segs[:, 0, 1] = ys[:-1]\n", + " segs[:, 0, 0] = xs[:-1]\n", + " segs[:, 1, 1] = ys[1:]\n", + " segs[:, 1, 0] = xs[1:]\n", + "\n", + " norm = plt.Normalize(-1.0, 1.0)\n", + " lc = LineCollection(segs, cmap='seismic', norm=norm)\n", + "\n", + " axs[i,j].set_ylim((-5.0,5.0))\n", + " axs[i,j].plot(ls, input_scaled[0][idx], linewidth=0.2, c=\"lightgrey\")\n", + " # axs[i,j].plot(ls, attributions_scaled[0][idx], linewidth=0.5)\n", + " axs[i,j].axes.xaxis.set_visible(False)\n", + " axs[i,j].axes.yaxis.set_visible(False)\n", + "\n", + " id_title = idx - seq_len + 1\n", + " id_title = str(id_title) if id_title != 0 else \"Score: \" + f\"{output[0].item():.3f}\"\n", + " axs[i,j].set_title(str(id_title))\n", + "\n", + " lc.set_array(attributions_scaled[0][idx])\n", + " lc.set_linewidth(2)\n", + " line = axs[i,j].add_collection(lc)\n", + "\n", + " color = \"black\" if idx == seq_len - 1 else \"lightgrey\"\n", + " plt.setp(axs[i,j].spines.values(), color=color)\n", + " plt.setp([axs[i,j].get_xticklines(), axs[i,j].get_yticklines()], color=color)\n", + "\n", + "plt.tight_layout()\n", + "\n", + "pathfig = path_plots / 'grad_explainer.pdf'\n", + "plt.savefig(pathfig, dpi=200)\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Pl8Buz1ahbvt" + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "simulate Portiloop 1 input classification", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.0" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "12099b37a72d43078194d5977407662a": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatSliderModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatSliderModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "FloatSliderView", + "continuous_update": true, + "description": "", + "description_tooltip": null, + "disabled": false, + "layout": "IPY_MODEL_c146aa43cd4b4941951f6794553f77a6", + "max": 1000, + "min": 0, + "orientation": "horizontal", + "readout": true, + "readout_format": ".2f", + "step": 0.1, + "style": "IPY_MODEL_52d0163c6ec64bfca8ff55709794be53", + "value": 136.4 + } + }, + "15722a6ed7144cada304389c713fa184": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "SliderStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "SliderStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "", + "handle_color": null + } + }, + "1aeddf94f6084a19bba2a5311766a6fb": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "307138cdaff54eec978913a3a6ccf9aa": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatSliderModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatSliderModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "FloatSliderView", + "continuous_update": true, + "description": "", + "description_tooltip": null, + "disabled": false, + "layout": "IPY_MODEL_6c4cc61f3df1492889d3632fa06d3f53", + "max": 50, + "min": 1, + "orientation": "horizontal", + "readout": true, + "readout_format": ".2f", + "step": 0.1, + "style": "IPY_MODEL_15722a6ed7144cada304389c713fa184", + "value": 9.8 + } + }, + "4037152492fa44d786c1d98b9c0b966d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ButtonStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ButtonStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "button_color": null, + "font_weight": "" + } + }, + "52d0163c6ec64bfca8ff55709794be53": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "SliderStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "SliderStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "", + "handle_color": null + } + }, + "626fade444f040e18bfbcd1143e7bf05": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ButtonModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ButtonModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ButtonView", + "button_style": "", + "description": "Print", + "disabled": false, + "icon": "check", + "layout": "IPY_MODEL_ff78d6effae3404cb8bc4c76286b41aa", + "style": "IPY_MODEL_4037152492fa44d786c1d98b9c0b966d", + "tooltip": "Print" + } + }, + "62a91a2a431440779e5ddf94240fd8f7": { + "model_module": "@jupyter-widgets/output", + "model_module_version": "1.0.0", + "model_name": "OutputModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/output", + "_model_module_version": "1.0.0", + "_model_name": "OutputModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/output", + "_view_module_version": "1.0.0", + "_view_name": "OutputView", + "layout": "IPY_MODEL_fe98486cdd4d4e988a7caae86d4c44e3", + "msg_id": "", + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAF8CAYAAABiwjyTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAewgAAHsIBbtB1PgAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOy9d5wkVbn//zmTc9iZ3ZmN7LKwLCxBYAUkKCJiQFC8BhTTNX2N4BUV/flVMFwT6teAIopeCVdEQEAEFJGcJC9hdXGBzbOTe3rydM+c3x9PP9Sp01XdVT3dXT0zz/v1mtf09FR3nao64clHaa0hCIIgCIIgCILgRVnUDRAEQRAEQRAEoXQRhUEQBEEQBEEQBF9EYRAEQRAEQRAEwRdRGARBEARBEARB8EUUBkEQBEEQBEEQfBGFQRAEQRAEQRAEX0RhEARBEARBEATBF1EYBEEQBEEQBEHwRRQGQRAEQRAEQRB8EYVBEARBEARBEARfRGEQBEEQBEEQBMEXURgEQRAEQRAEQfBFFAZBEARBEARBEHwRhUEQBEEQBEEQBF9EYRAEQRAEQRAEwRdRGARBEARBEARB8EUUBkEQBEEQBEEQfBGFQRAEQRAEQRAEX0RhEARBEARBEATBF1EYBEHIiFLqt0opnfr5QNTtKTXmyv1RSm0z2rk6T995gfGdF+TjOwVBEITSQxQGQZgHKKVWG4Jbvn4uiPq6hIWLIs5QSl2rlHpeKTWulOpVSj2qlDpfKbWqAOc8MYdxcnu+2yEIglBqVETdAEEQBEEwUUotA3AFgJOsf9UAaAdwJIDPKaU+rbX+bZGbJwiCsOAQhUEQ5gdxAD/LcsxRAF6eer0HwPVZjn94to0ShLAopZoA/BXAwcbbDwN4FkAzSIloAdAA4H+UUjNa68sL0JQgYwQA/lWAcwuCIJQUojAIwjxAaz0A4FOZjkmFGLHC8G+tdcbjBSEiLoKjLAwAeLvW+g7+p1KqHsAlAM5KvfUrpdQDWuuteW6HjBFBEIQUksMgCIIglARKqYPhKAIA8G5TWQAArfUogPcBeCD1VhWArxenhYIgCAsTURgEQRCEUuHjcNalv2mt/+p1kNZ6BsAXjLfeoZRqL3TjBEEQFiqiMAiCEBqlVJ1S6hNKqfuUUt1KqUml1E6l1FVKqeMCfD6tHKdSqlYp9SGl1G1KqR1KqanU/1/m8x2vUUr9Qin1rFJqINWGPUqpvyqlPqWUqg14LStTVXfuMa5lSinVr5TapJT6nVLq40qpzmLdH+u7GpRSZ6eua5dSakIpNaiUekYpdZFS6ugw3xfivK9OXfv21Dm7lFL3pq6rrgDnUwBON976n0zHa63vB8BhSOXWZwVBEIQ8IjkMgiCEQil1EIBrARxo/WsFgDMBnKmU+rrW+vwQ33kggGsAbAhw7EoAlwM40ePfS1M/pwD4klLqTK31vRm+66MAfgTAS7lYlPo5FMC7QKEyxwdoX97uj1LqTQB+BcBWVqpBib8bAHxSKfU7AB/RWo9l+84A56wA5Qh80PpXZ+rn+NQ53zrbc1nsD7pHzF0BPnMngP1Sr08C8Js8t0kQBEGAKAyCIIRjGYDbQUJ5DMC9APaCSl2eBKpiAwBfVUpt1lpfHeA72wD8BcAqABMA7gOwHVQF5xjzwJRi8ffU+QFAA3gcwGYA4wCWA3glgMZUW/+mlHqD1vpO+6RKqbeABGMmDuBBALsAJFPXsg6UgFsV4DqAPN4fpdQ7AfwvyHoOANOge7MVdG9OSJ0PAN4NYI1S6iSt9UTAtvpxOUhBYmIgwbwf9IxOBHAQgFsA/GmW5zIxFay9WuuuAJ953Ofz+aBWKXUagMNAiuMogG4A/wDwhNY6mefzCYIglCyiMAiCEIavgqzb3wXwddOirZRaBPIScO38byml/qC11lm+82OguehaAJ/QWvca31mGlMCcqo5zHRxl4VYAn9ZaP29+Waos53dA8fDVAP5XKXWg1nrIOq9p4b8IwHleFnqlVAOAN4Bq/2cjL/dHKbUWwKVwlIWHAZxlVgJK3ZvPALgQFF76CgDfA3B2gHZ6opR6L9zKwkUAvqC1HjeOWQrgytR1fCLXc3lwgPF6e8DP7DBer89jWwAqQ+ynEO1RSv0/AD/WWifyfF5BEISSQ3IYBEEIQzWAb2utv2gL16nSru8GWWIBYF+Q0JWNCgC3AXinqSykvnPGEMg+C8eKfD2AN9nKQuozca31JwBclnprKUgpeYmUEsC5ETsBnO0XzqO1HtFaX6O1/mKAa8nX/fkqyIsAkEfhFLtsaOre/BDA54y3P6mUWhOgnWmkFJD/Nt76rdb606aykDpvF4A3AXgKwT0vQWgzXncH/Mxe43WdUqo6j+3JxDKQonaPUqqjSOcUBEGIDFEYBEEIQy8ylLDUWncDuNl4K4jCAACfSVW+8UQpVQlnn4lJAB/LdHyK/w8UsgS4S3UCQJPxuj+AFyQos74/SqkWAO803vqCh3fE5MegTc0AmtM/Gri1bl4HYGXq9TjcioiLlBLh+/8caTBej/se5cY+rsHzqHD0Avg5gDNASl0daIfpfQG8H8AjxrHHALgpaIK9IAjCXEUUBkEQwnBTgBj5J4zXqwN851Na639mOWYjgCWp13/XWvdk+1Kt9R44u/AerJRqNv7dB8qX4P+FqlyUgXzcn2NBngqA2nlTpi9LKU5msu+rs5zfD/Nzt2it+7McfzuA3Tmey4sa4/VUwM9MWn/PVnB/FMAKrfUntdY3aK1f1FqPa60nU68vB3A0gG8Yn3k5gHNneV5BEISSRnIYBEEIw9MBjjEFzSbfoxweC3DMK4zXK5RSFwX4DECVhABAgSrwDAGA1npKKXUDqGpRBYA7lFJXg/Io7tFaxwJ+v00+7s/hxuuHAybX3m9+XimlcvCamOd9MNvBWmutlPoHgHxVSzIVraChTnYIUlDPhCda65EAx2hQ0vpaUIgZAHxWKfUdSYQWBGG+IgqDIAhhyBQaw5hJoJUBju/NfshL1YAAKnN6aIDP2LRaf/8XKJF5f5CA+t7Uz4xS6llQhaO/AbhVa21bsv3Ix/1ZbLwOmvy7zXhdBaoSFQ/4Wa/z7vA9yk3Q44JgCutBPQX2cVkF/jzyVTgKQysoPOm+Ip5fEAShaEhIkiAIYchXrL9JEKtwc/ZDsuIykGit94JCnb4Jd5JtGYBDQBWArgfQpZT6olKqHNnJx/0x4/BHfY9yYx/XOMvzBt3PIWj7gmB6XoImEpv7U4yFUOxmTSrhfpvxVr7LugqCIJQMojAIgjAXMAXTn2itVQ4/d9lfmqqo9BXQ/g3HAPg8gBtAuQNMK4BvA7gutRtxoTGt5PUBP2MfNzzL8wbdyTlo+4KwxXi9T8DPrDJe/8v3qMJh7hXRHsH5BUEQioIoDIIgzAVMD4C96/Gs0VpPa63/obX+vtb6DJCF+wS46/C/GcB/5PvcHpghWqt8j3Kz2ng9hdwUhlzOuzL7IYExE987lVJBnvMRPp8vFqbClE9viyAIQkkhCoMgCHOBfxivjy20pT+1x8F9AN4CymNgTi/keVOYVZSOChgKdaz5+RzLxJrnPcb3qBSpZ3B0Dufx49+gXbaZEwN85lXG6zvy2JasKKXq4N5sbk8xzy8IglBMRGEQBGEucD8Arly0AsBpxThpSvA2y5oWY5OuB+CUC10M4NRMB6c2XPtP461cBec7jddvTO1MnYmTQM8iL6TutenR+UCm45VSrwCwLvXnNLKUny0A74ZTpUkDuKfI5xcEQSgaojAIglDypJJZf2S89XOl1PKgn7d341VKNSqlgpbuNMNusu7/MFtSJV2vNt66UCmVKYn5U6AkbQCYAfDLHE99G2jXa4ByGL7nd6BSqgbAD3I8TyZ+AboGAHidUuq1Pucvg7t919i7hIdFKVWX+t4gx+4P4DvGW7cF2RtEEARhriIKgyAIc4UfwNnReDmAR5VSb/cT8pRS7UqpjyqlHgclM5scCWCbUuoCpdRBPp8vV0q9E8Cnjbdvnd0lBObrcJKQ1wH4q1JqX6t9ZUqpcwD80Hj7Z1rrbbmcUGs9DeArxlsfUkr9KKUcmOftBFnzD0PwDdaCtuFpAP9rvHWVUupE6/z1AH4L4PjUW1NWu9NQSmnj5wKfw44C8KxS6uNKqSVeB6T6xHtA+1S0Gec/L9P5BUEQ5jqyD4MgCHMCrfWIUup00A7Da0DJz38A0KeUegjAXtAGbYsAHATaX4GVCa8wnaUAzgdwvlJqL4AnU9+RBIUeHQn3/g/3Avh9ni/LE63180qpD4OE53LQxnVblFL3AngeVAL1BJDixDwE4AuzPO9lSqk3AnhH6q1zALxPKXUnqOzpStCO0NUAXgRwI4DPzOacHnwKlMy8ASSU35naIG4zaKO7k+DeU+OjWuuteTr3egA/B3CRUmorSEEdAHk9OkHPwayGNA3gfVrrTXk6vyAIQkkiCoMgCHMGrfULSqmNoNCVt4EUhHYAb8rwsRjSd2AeBykGPAd2Anh9hu+4FsAHtdYzGY7JK1rrq5VSowAuBSkwFSBh/dUeh18F4MNa6wmP/4XlPaD78/7U361I3835XwDOAO2UnVe01nGl1CkArgApBwAlV9sJ1iMAztZaX5bvNoAUzXVwciS82ALqEw8U4PyCIAglhSgMgiDMKbTWAwDeoZQ6GMC7QNV01oCs0TMgBWErgMdB3oi/2YK01vofqbCTk0GhLYcDWJv6jnLQLsnPg6z2V2qtHy78laWjtf6zUmo/AB8EKUUbQArSOKgqz50ALtda/8P/W0KfMwHgA0qpywF8FMBxAJYAGATd1z8A+E3K45Ov09pt2KOUOhmklJwF8jgsBSkJO0AhUb/WWudzp+l7QRv5vQJUdeoAUH9oA3lUhkA5Hv8AJWf/JcdqVIIgCHMOJfOdIAiCIAiCIAh+SNKzIAiCIAiCIAi+iMIgCIIgCIIgCIIvojAIgiAIgiAIguCLKAyCIAiCIAiCIPgiCoMgCIIgCIIgCL6IwiAIgiAIgiAIgi+iMAiCIAiCIAiC4IsoDIIgCIIgCIIg+CIKgyAIgiAIgiAIvojCIAiCIAiCIAiCL6IwCIIgCIIgCILgiygMgiAIgiAIgiD4IgqDIAiCIAiCIAi+iMIgCIIgCIIgCIIvFcU+oVKqGsAhqT97AUwXuw2CIAiCIAjCnKAKwPrU67u01mNRNmahUnSFAaQsPBLBeQVBEARBEIS5y6kAbom6EQsRCUkSBEEQBEEQBMGXKDwMvfzi4YcfxtKlSyNogiAIgiAIglDqPPnkkzjttNP4z/4o27KQiUJheClnYenSpVixYkUETRBmQ39/P26++WaceuqpaGtri7o5giAUEBnvQilRKv2xVNqxEOjq6jL/lLzXiJCQJCE0ZWVlqK+vR1mZdB9BmO/IeBdKiVLpj6XSDkEoFkprXdwTKrUCwE4A2Llzp3gYBEEQBEEQBE8eeeQRHHXUUfzny7XWj0bZnoWKqMZCaGZmZjA5OYmZmZmomyIIQoGR8S6UEqXSH0ulHYJQLERhEELT3d2N73znO+ju7o66KYIgFBgZ70IpUSr9sVTaIQjFQhQGITQtLS1429vehpaWlqibIghCgZHxLpQSpdIfS6UdglAsoqiSJMxxamtrsWHDhqibIQhCEZDxLpQSpdIfS6UdglAsxMMghGZsbAxPPvkkxsZkd3ZBmO/IeBdKiVLpj6XSDkEoFqIwCKEZGhrCjTfeiKGhoaibIghCgZHxLpQSpdIfS6UdglAspKyqEBqtNbTWUEpBKRV1cwRBKCAy3oVSolT6Y6m0YyEgZVVLA8lhEEIjE6QgLBxkvAulRKn0x1JphyAUCwlJEkIzMDCAq666CgMDA1E3RRCEAiPjXSglSqU/lko7BKFYiMIgCIIgCIIgCIIvksMgCIIgCIIglCSSw1AaiIdBCI3WGjMzMyi2sikIQvGR8S6UEqXSH0ulHYJQLERhEEKzd+9efOMb38DevXujboogCAVGxrtQSpRKfyyVdghCsRCFQQhNc3Mz3vzmN6O5uTnqpgiCUGBkvAulRKn0x1JphyAUC8lhEARBEARBEEoSyWEoDcTDIIRmfHwczz77LMbHx6NuiiAIBUbGu1BKlEp/LJV2CEKxEIVBCE0sFsO1116LWCwWdVMEQSgwMt6FUqJU+mOptEMQioWEJAmhmZmZQSKRQGVlJcrKROcUhPmMjHehlCiV/lgq7VgISEhSaVARdQOEuUdZWRmqq6ujboYgCEVAxrtQSpRKfyyVdghCsRC1WAjN4OAgrrvuOgwODkbdFEEQCoyMd6GUKJX+WCrtEIRiIR4GITQzMzMYHR3FzMxM1E0RBKHAyHgXSolS6Y+l0g5hbqOUemXq5SNa60AZ9EqpGgBHAYDW+p5CtS3tvJLDIAiCIAiCIJQi8zmHQSk1A2AGwKFa680BP7MWwL8BzGiti2b4l5AkQRAEQRAEQYgGVeTP5YQoDEJourq68M1vfhNdXV1RN0UQhAIj410oJUqlP5ZKO4QFCcvu01GcVBAC09TUhFNOOQVNTU1RN0UQhAIj410oJUqlP5ZKO4QFyT6p30PFPKkkPQuhqa+vN+MJBUGYx8h4F0qJ+vp6HLVoEVBXF307ZFwIIVFKrfL511Kl1EiWj1cDWAvgGwA0gGfz2bZsiMIghGZiYgI7duzAqlWrUFNTE3VzBEEoIDLehVLi1+c/hyOvfifWX/kr1GzcGFk7ZFwIOfKix3sKwG05fNfls2xLKCQkSQjN4OAgrrrqKqk/LQgLABnvQinRPxDDje96CwYjzh2QcSHkiLJ+/N7P9DMJ4EKt9W+K12wpqyrkwPT0NMbGxlBXV4fy8vKomyMIQgGR8S6UEl/9yG58/vfrUfe736H8tNMia4eMi+Ixn8qqKqXeb731P6Dwoq8A2J3hoxrABIAuAE9orbOFL+UdCUkSQlNeXo7GxsaomyEIQhGQ8S6UEgNTTWgcGQHGxiJth4wLIRe01peZfyul/if18oag+zBEhYQkCaGJxWL405/+hFgsFnVTBEEoMDLehVJieHoKfzr99Mj7o4wLIU+8GsBJ8M5tKCnEwyCEJplMore3F8lkMuqmCIJQYGS8C6XE+JRG95IOJCP2MMi4EPKB1vruqNsQFMlhEARBEARhTnDKKcC1d7ej6YJzgS99KermCEVgPuUwzGXEwyAIgiAIwpxgagpIllcDk5NRN0UQZo1S6quz+bzW+uv5aks2RGEQQrN3715cdtlleP/734/Ozs6omyMIQgGR8S6UEtXVe/HLs9+PD0xNIcreKONCyBMXgCog5UrRFAZJehZC09DQgOOPPx4NDQ1RN0UQhAIj410oJUZGGvCyR/+Jhog9DDIuhDwSZg8G+/iiIQqDEJqGhgYcd9xxMlEKwgJAxrtQSoyMNODgTS+iYWIi0nbIuBDygda6LNsPgAYARwD4PoAEgPsBdKb+VzREYRBCMzk5iW3btmFSYkgFYd4j410oJbSexK5VSzE5NRVpO2RcCMVCaz2mtX5Sa/0FACcD2Ajgr0qp6mK2QxQGITQDAwO47LLLMDAwEHVTBEEoMDLehVKipmYAfz39GAwUucKjjYwLIQq01vcBuBjAoQA+U8xzS1lVITTJZBLxeBxNTU2oqJC8eUGYz8h4F0qJNWuSuL7pDBy8vg4VV18dWTtkXBQPKavqRil1EoDbAWzSWh9erPNKLxdCU1FRgUWLFkXdDEEQioCMd6GUmJioQH2VRkXEOQwyLoQIYbfW2mKeVEKShNAMDQ3h1ltvxdDQUNRNEQShwMh4F0qJqqohPPLyNRiKOCRJxoUQIQdEcVJRGITQTE1NYdu2bZiKOOlMEITCI+NdKCWUmkLPkkZMTU9H2g4ZF0IUKKVaAHwFtHfD5qKeW3IYhGLw9NNAczOwalXULREEQRDmKjU1wBOHfQAHVm4F7rsv6uYIRWA+5zAopV4Z4LAyAK2g6kj/CaAj9f4ntda/KFTbbCSHQSgKhx5KvyP2IguCIAhzmEQCQFUVMCmWfWFecBfC7fTMm7VdD+CSvLcmAxKSJISmu7sbP/zhD9Hd3R11UwRBKDAy3oVSYWYGaG/vxk0ndKK7pibStsi4EPJImJ2enwLwfwC8TRc5REg8DEJo6urqcMQRR6Curi7Q8YmE81prQBV1M3NBEGZD2PEuCIUimQTGxuqwpieBupGRSNsi40LIE68OcMwMgGEA27TWsQK3xxdRGITQNDY24sQTTwx8fMzo3mNjQH19/tskCEJhCDveBaFQJJPAyEgjNvRoNA4PR9qWhTIuRkaA++8HXve6qFsyP9Fa3x11G4IiIUlCaKamprB79+7A1SFMQ5BsiikIc4uw410QCkUyCVRWTmGguQpTMzORtmWhjIsvfxl4/esBibwSRGEQQtPf349LL70U/f39gY43FYaIjUKCIIQk7HgXhEKRSADt7f34+37l6I84FGihjAu+vC1bom2HED0SkiSEpr29HR/72McC73JpKgmjowVqlCAIBSHseBeEQpFMAr297Xjtriq0R2zyXijjgre72LUr2nYsBJRS5QDeDOBkAIcA4M41AOAZALcDuFFrnYyifaIwCKGprKxER0dH9gNTmB4GURgEYW4RdrwLQqFIJoFkshKtqgaV4+ORtmWhjIuxMSpUEoss1XZhoJQ6HcBFAJabb6d+awDHAvgogC6l1Ke01jcUuYkSkiSEJx6P4/bbb0c8Hg90vCgMgjB3CTveBaFQJJNAY2McT7UmEK+qirQtC2VcjI7SthdDQ1G3ZP6ilDoHtK/CcjhKwjYAD6V+tvGhAJYBuE4p9ZnitlIUBiEHJiYmsHnzZkxMTAQ63jQEicIgCHOLsONdEApFMgnU1ExgV80UJsrLI90JdKGMi+FhoLoaGByMuiXzE6XU0QB+AFIGhgGcB6BDa71Wa31s6mctaHfn8wAMpY69MPXZoiEhSUJolixZgrPPPjvw8aIwCMLcJex4F4RCkUgAvb1LcFpyJZb09lKAfUU0YsxCGRcjI6QwyNpdMD4LMt4PAThOa73Z6yCtdR9ISfgzgAcANKU++85iNVQ8DELBGR+nCaeqSiYdP+S+CIIgZCaZSvVU1alwpHle0rQUYA+DrFEF4wRQjsJ3/ZQFE631PwF8F+RleGWB2+ZCFAYhND09PbjooovQ09MT6PiJCaC2ln4izlMrSdavB9ati7oVguBN2PEuCPlg717guefc7yWTwOLFPbgJ29CzeHGkCsNCGRdjY0BNDf0WCkJr6vedIT7Dx7bkuS0ZWdAhSVu2AL29wPHHR92SuUVNTQ3WrVuHmpqaQMePj5OywK8Fh5kZ4PnngTJR3YUSJex4F4R8sHQpRRslEs57ySQwOVmDlZUtqJmcjFRhWCjjYmKCFAbxMBSMLgD7zOKzRWNBKwzr19PvCPOm5iRNTU045ZRTAh8vCoM/g4OOm31kBGhoiLY9QukzMAA0NRUvdDvseBcKx/Q0lbdsa4u6JcUhaVWbTySAeLwJL29dg6Z4PFKFYSGMC61pzRYPQ0G5HcCHALwKwD8CfubE1O87CtEgP8SuCVEYwpJIJNDT04OEafrJAE84EpKUjlnbOuJ9iIQ5QCJBwuInP1nMc4Yb70Lh+POfgVWrgKuuirolhcVck83XySRQUZFAvDyBhO1+KDILYVwkk+QFr6sThaGA/ADAOIAvKqWyBienjjkPwCiACwvcNheiMMC9E7GQnb6+Plx88cXo6+sLdDy7NEVhSMdUGOZ5OW/Bg64u4Oabgx//9NP0+5e/LEx7vAg73ucDzz0HHHkksGdP1C1xs3UrzaHPPBN1SwoLrxOLFrnnRcph6MP1ex5HX8Q5DAthXHDF2Lo6CUkqFFrrLQDelvrzIaXUZ5RSaduHK6VaU/s1PJB66x2pzxaNBRuSZAqufX3k4heC0dbWhg9+8INoC+gXn5wkhaG8XKwUNmZta3ODO2FhcPLJwObNJBQ1NmY/ftcu+q1U5uPySdjxPh+4+GLg8ceBa64Bzjkn6tY47NxJc+nu3VG3pLCwIWVggObF5mb6O5kE+vvb8Ob9j0Fb/7ciVRgWwrhgOam21lEehPyilOKwol4A+4M8Dt9XSr0IoAdUQakDwBo4m7ptBfB5pdTnfb5Wa61fk++2LliFYWjIcbOJ5hyOqqoqrFy5MvDx7GHYf3/6LTiYu2eKp2vhsTlVRO/pp4Fjj81+PFu8jz+eFPHq6sK1jQk73ucDW7fSb1bQis3Pfga8+c3AihXu9/v6SHib77vumsYTc31OJoGpqSosa+9A1dRUpArDQhgXrCTU14vCUEBOBCkFjEr9rE39eLFf6sc2HenUewUJtF+wCsPoqGPtFstuOIaHh/HEE0/g8MMPR2MAsygLNt3dc69s9v33A5s2AZ/4RGG+3+x7ojAsLMzY7K6AtS76+oCWFuDee6nCmy1QFoKw430+sCXl6I8i2iQeBz71KeCyy4CHH3b/LxYjhcEMZZyPmMKpOUcmk0BDwzAe79qK4xoa0BjhgrIQxgV7GOrqRGEoIPegQAJ+vlmwCoMZGiMKQzjGxsbwyCOP4IADDgg0UU5MkMIw1xY6rYFTT6VJ8+MfL0wYyOgoVbtJJkVhiII77gCOPposaMXGnHf27g32mViMwjNiMaCnpzgKQ9jxPtdJJmmuOuIIYMeO4p//xRfp97Zt6f+LxaiS2lyaR3PBT2GYngbq68fwdNfzOLy+PlKFYSGMCzOHQfIPC4PW+sSo2xCUyJOen3qKFsAbbijueU03pwhq4ejo6MC5556Ljo6OQMdzDsNcS3res4dc/1NTJJwVAi6lKmXrik8sBrzmNcB//Ec052frdWcneQuCEIuRhwHIX1WtRIK8aH6EHe9zne5uWpc6Otw5RsWit5fmg87O9P8NDlKuy3xXGMx1wg5J6u7uwNtO/QTO2+8+PD19UPEbl2K+jYt77wUuuMD9HutjHJIkFSUXNpErDDfeSC7Y664r7nnNSUg8DIWFPQyzsVK89a3At7+d33Zlw6yQsnNnYc4xOkqT8VxTpuYD//43/X700WjOz0pCTU1wo8XQENDeTq/7+/PTjq99DXjZy9LDou6/36nKFP7TY7sAACAASURBVBatgVtvnZv5YZy3sGwZJd0Wm8FBmjO9DAjDw2RgmEvGhUQivKCZKSQJAKZrG3HZ/fthx/ji2TdQAAC88500F5j5MZOT9Lu+nsqr2vtiCLNHKbUq9VMe4jPl/LlCts0mcoVh505yq19zDXXIYsELmVTuCU9vby8uueQS9AY0i5oehlzv9V//CnzlK8W1cJhehUIlP7KHYb4rDDMzwMc+BtxyS9QtcYii4pAJW69bWoInsdbVAevWUX/Jl8LACpMdL3/88cChh4Yf7wBw553AG99IybtzDTYUrFwZjcLAZUS9zj0yQsLbXFmzkknq32FzwEyFwXydTALt7b34y18uQXt770sCbRTkMi6iYHo6WFUtLqCwfbvzHt9f3lB0Pq9REbINwAsADgjxmdXG54pG5ArD3r20BfzkZPDEv3zAE65sSBKeqqoqrFixAlVVVYGON3MYcplwpqboGU1PBw/dyAesMJSXF25TtdFR6oPzXWH45z+BSy4hpa9UYIE9qmQ+FgzDKAxbt9JYaG/PX0Lu9DT9Nr/P7IthxztAzxuIRuCeLX19pEQuXUoWfb4/xWJ4GKispP5hG0hGRykkaWxsboSHxGLU1l/8ItznuP8p5e6L09NAIlGFZctWIJGoirSIRi7jIgq++10yymbzYvI8aO57IQpD0cjVbFVUc1fkCkNvL7B8Ob0uZoIZD475LqgVgubmZpx66qlo5uLYWeAqSbl6GEyho5i7IXO88OLFhTvv+DhZDOd7UhlbrUppN2tWGEZHiy8UAs7CvGhRcIWBQ9ja2vLnYWBFwVQYzBj5tPH+y1/SrmYZpLUdO6idDzzge0jJ0tdH97e1lf4u9oaKw8M0V05Pu+cEriLa2Egeu7lQcS7XHBBen2tq0j0MQ0PUH4eGmvPmYUgkgG9+M5wnOew6GBX/+Af9fuyxzMexAuoVksQKQ5QeHcEFKwpFjMspAYUhHieBDMjfAhiEiQmyHM8mTGahkkwmEYvFkAwY0MgKQ65CsakwFLP+OFekWbKkcILu2Bj1wfmuuA4MAGVlVBGqVGChWOto6trH47QQNzYGj/UvhMLAXjvTe2fej7Tx/vnP065mGeIcBgZoM0wzvGGuwAoDy4HF7hvDwzRXAm5lhfsIbzI6F/JDcvUwTU7SfFFT454Xk0mgsjKJsbEYysuTeRNgH3qIvJ/HHhtcEQu7DkbF6tX0O9saVp6KoDf7HN8LURhKjqWp30Ut2VMSCgMn8RWzIgVvJjbfLbuFoLe3Fz/+8Y9D5TBUVzuVFsLmqpj9opiL99AQCQ3t7YVTZsfGFkZI0q5dNN5KqbqL1sDGjZTcWkxjBROPk/BXXx+88EIhFAZ+JmbIgjnOurut8V6WWjYyKAxczWnPnuLmpjHDw1TpKBeGhqjtUSkM4+OOwmCemxUEbtdcMHTlOt6nphyvtO1h6OzsxU9+8mMsX96bNy/LI4/Q/FRWBvzgB8E+E3YdjIqtWym0K6h85eVh4KqxshdDQckaZKiUqlRKrQfw5dRbWwrbJDeRKwzDw+T6ra0tbrwrKwziYQjPokWL8N73vheLFi0KdLzpYQDCC8bmRFfM8AAWHNraCreBEwsHYRWG7m7g5JOD5/3099NO21demVs7Z8vAAFmphobI/W8TheVqzx4STLh8brHhhPeGhtw8DPnok8mkc26zDebr6mprvFdXkwSRmrCTSeD5593fOzhIoVbJZG4lif/9bypr+p3vhP8sQDHbxxyTm7LChoIoFYbaWnptzne8TrGHYS6sW7l6QSYngaqq9HlxehoYHqb+ODq6KOO8MTkZ/Pn39JDhYJ993Iqm1v79N+w6GBXDw3Qvs8lX3J/MfmVWSTL/FnJHKTVt/vDbAJ6x/+dx7ASAZwG8FqRgXFvMtkeqMGhNnbmxkZSGYnsYamsl6TkoWtPmZc88A1RXV2PfffdFNZdVyAJbi1hhsO93MkkRDn6w5bO8vPgehqamwnsYamvp/oSZjH/9a+Dvf6fSlUHYtIksTT/9aW7tnC0DA44AZlsd3/lOymMqdhLnyIhjOYtiL5axMVqI6+uDC1b8mXx5GMzrNr0cpqCaSFjjfWKCPpiSQH7/e1JeL7nE+UwsRqF8gLs8cVAeeYQEtVxyIIaGqP3j48C//hX+8+z54f5qh2i84x1UDrxQcF4T4H4+PG9yu0pNeJuYSDfomHN9mDyhqSkScr1yGGZmqD8qVZ3Rw3DQQcAnPxnsfD09pKAuX+7eRPGb36T3uQSzSdh1MCqGh+k+ZpOveA4yFTQz/xAQD0OeUNaP3/vZfq4B8KOitRoRKwwTE8AJJ5BAFmbRzAfj4zSI2ApmMzwMfPnLwd2T8509e6jSxSc+AYyMjODBBx/ESMA4CtvDYCsM115LOZR/+5v350dGyKXq1Ue0LlzyHwuUS5cWbqJkD4O9MAZpm/k7Gy+kiq/le4z97GfAq1/ttjB7Cf6cDwKkCxV/+AMJv8VOiB4d9W9Tsc5fVxd87ksmnV2I86XEsgK+aJG7DabQ0NNjjHe28nAZH1AyZW8vGRTuuos+Y+am5eJheP55mjNy8Trz+crKqLxrWGyFwTRSPPMMlQD/8IfDf29QxsedmHFTYeBnUqrhIfvvD5x0kvs9c64PY5hjhcH2MCSTQGMj9cfm5pGMStMLL9DaEgT2KnV2ur22Dz5Iv++9N/0zYdfBqIjHaX3J1MxEwvH82h6dFSuckqulpqTOUb5m/QDkLbjY43/mzwUAvgjg/QD211qfqbUuarmOyBWGu+8my3EYt3y+zl1T46x/NnffDXzrW8AVVxSvTaXMc8/R78pKmijvuuuuQBMlWYQyKwy8OZRfzDHXHvcSrI48koSdQlS54ZCR+vrC7cOQq4ehu5s+x88lGyxE5Tvc9s47SUj8+9/p76eeoj5y003u4+JxbwGMQwba24tbJQ2g5xtGYdA6v/H4nL/Cc182Dwsv5DU1dL/Gx2c/Zw4PU1Lky1/uFihMoWFw0Bjvo6N0E2prX7ppO3ZQ+M+BBwL33EOfGRlxPAy5KAxdXfRsclEi+XyvfCX1zbAhkPE4CeW1tZSkb/bXHTvovb6+wm1iNTbm7fnieZN3+i4lhWFoiObIxx5zW7LNuT6MXM0Kgz0vJpNAUxP1x0wKAwu/QedUnuttRZzny61bvT4TfB2MEq66lamZ5hixXw8N0ZwDuPvc5s3AuecC27bltbnzHq3118wf418/s/9n/Xxda/09rfUVWuvnfU9QQCJVGLhj1taGS/zLB2YOg9fEu2ULTVjFFmJKFTNeurOzE1/60pfQ2dmZ9XM8YVdVOW52W8jhe+xXUYUnc6/wsR076PsKsYcHn7e1lV57xd7Pllw9DD09dD+DKgBsqe3pya9HZutWEm7Yw/Dww6S8cSk/ZnjYEXRM4dws6ZmLYDkbWGGoqgoW6rZqFXDmmfk7PysMvItqtudiloJua6PXs/UyjIzQgt/W5p5/JyacilbV1cZ45xvV0PDSg+zupvjvAw8E7ruP/s25aY2NuT3Xnh56NrkouNyn/u//JQtz2LCm0VG6PKXI02D2je3bnWoyuYQ7BYFDkioqvD0MrOSWUpEE08NoCtf5Vhimp4FYjPrj2Finr0LA4yLofhX8zJub6XnzZ7jvevXhMOtglLBRKlPYpbn22CFJ/Bz4b+aPfwR++EPaVFWYFf8J4IMACmSWzB+RexgAt5WtmOeuqUkv28a8+CJZyAYH50b5umwMDQG/+U3ulniOOy8PvHk5wROM6WGw7ydbEf0EC85zsT0MWjvCZyGsHGZ+DVCYHBtOcAzrYRgaCqcwmG3PZwL3jh0UesL3nwUHu13xON3HsjK3wmAW2il2pSL2XAWZe7QmC+o11+Tv/JyP4DcubEwPA4f7zPZZ8jkbGtKFBrYquuZHfnj19S+97umhuXL5cnqeMzNOOF9LS26Vcnp66LPDw+FzW1jAP/pomq+84s8zwYockL6pHpdc3biRjEqFgI0IjY3uscLPoRQ9DKbB5sUXndfcxzduDLeOZvIwsCJbU+M/Z/J9m54Odl42DjU3k2FoYsJJeD78cCekc66htdOfMilsfgqDmUtiH9fXR95kMarODq31ZamfCAJjw1ESCgN7GIopmJvJPF4T7969FM/Ir+c6X/868KEPecdiBoEFzvFxoK+vD7/5zW/QF0BaYatpdbUTl+unMPjFK3Ost+1hGB93rP6FCBligZIVhtmUBP3nP6kCh12lKFeFgWv4B21TfT2FaAD5s+SPjlK/6Ox0Elv37iVBxxb+h4dJ0JqZcQtB/NwaGoqvMHDFoYaG7NbPQng/liwhy7xfqJ6N6ZFlhWG2IWZ83Y2N7nmQd2cHgMFBY7zz4K2vf6nBAwP0bJctI8GRr6Ox0bHYhqW/n0INZ2bCF6WIx0nIaWigNoWdG7hfAN4Kw5IldJ2ZCjXMBi7I0djoHZLE85G9bp1/frpnr1gMDFCse0WFW3moqACOOgp49NFwhQUyKQytrdQf29r6fJUmczzbYzsepz0XzHWdFQZWxmIx6geHHkrzm9f4D7MORsXUFI2hbBEcfB+rqtIVhupqbw9Dfz/100KF6wqlR6TbKNkKQzGTHllh8AsF4aoJQHGrNxWKbdvoWv/8Z+DEE8N/ngXT0VGgoqICixYtQkWAXbhMD4NfSBLPt34KA1v86urcn+XnUlNT2JCkfJRX/NKXqO2XXQa85z30Hlt/amvDhyTF4+kJepl49lmnHGO+hF9ecJcvd4Sn7m46j/0sOSSposKtMOzYQVaq1tbil68MozCYMsHMjLMVwWzYvJmEz6AKg7n7bXs7cPzxs0/WNjcDs62MXBllctIY76ZLYnQUWtPc0NpK1vxYjJ5jba0jgPk918cfB265hUKHbGIx4LDD6PXwsDN3BMHMl1m0KNz8zQoKP5PmZrdSHovRd7a35zck6Sc/oWfwgQ84995WGCYm6H56JT339ZFR6PLL3Rb+YtHVRWNoxQr3nNTd7Si1uYYkmfcgmQTKyqg/VlZW+M6Z5jphj6vHHgP++7/JQ/SHPzhtq693z/UzMxRi+b73UdUuu32ucVGi8LXX12c2yPB9bG319jCUl9OPrTA0NORv7d20Cbj5Zlorlcp+/HxBKfW+2Xxea315vtqSjUh7umkxK/bGVZOTTg6D13l7eoB16+j1fFAYXnyRFKBnn83t8yyYjI4CLS0teMtb3hLoc14Kg71wDA4C69eTNdALdqnaHgZ+LosXZ9xDKiemp6nt9iKS63fddhslhppCBt+bXD0MBxxAnosgDA8DK1fS63wZxFhhWL2aJnqtHYXBvlcjI/S+LQQNDACnn07XUexKRawwBMmfMu9ZPO5YImcDh0XaivTMDJXLPeUUUqYYc76sq8u62XIguAKZl4eBFYaZGWO8841qagLGxjA6Sv2blUHuA1zpJ5OH4dWvpnt5zjmOEMzwPg4A9ZcwYeJcDhkIX66b74HpYTAVBq6g1NFBuwPng+lpugcAKQzcL5qa0j0MtNMxPTNz3eJ5JR/9MhdYaVyyxG2QmJhwlK8wCoMZO2+XVZ2YoP54ySX+Rha/PUUACi/S2p13wUqa6QXn8N3OTpqn2FDw2GPAK14BnHNOCy68MNg6GBVmZa1MERx8H1ta3OsQKwxA+ho1OEjfm6/15GtfA66/HnjDGygMbGYG+PSnge9+Nz/fX8L8FgE2bfNBAyiawlAyIUnF3kDNzGHwmnT6+x0B1lwwLrwwvXTcXKC3lyY+r2oPQeCFiybSaYyOjmI6QEKEGZJUUUGTj12+cXycXL/33+/9HZy0Zeeb8ALU0ZFbrfdMmJEXs1UYdu6kdh9+OLlvub+ZAmBrK5UlDAoLrWwJY/79b+A//iNd+YjHSQCrqcnfBM8ewf32o2c0MkLvtbSk165PJule2grDgw/S/WhqKq7CwGUEOX8qmzBjW5nDsmkTcMYZ7ufCeQK2h+G664A3vQm4+GL3d5geBoCs3EFDkqangd/9Lv06zZ3GzXlwcpLeUwoYGzPGu+Vh4HvR0uKEyrDFsaGBnrffva2tJaulPSdNTtK94bCrsP2Cc48A6vNhSrPyMzBzGLwUhn33zV/elKn0kUDseBjsHAZ+Jnb8PluPcxlD8fjsvSWDg84ml6Yl29xTwq8fjI4C553nXv+XLCGDnX2d09NAZSX1x5qa6Zw8DN3d9HzN58fyACsMIyPOc+/ooDmW561du2juGBgIvg76sWtX4aptAc61Z8vT4vvY3Jw+D/gpDKOj1EfzVXmPxwHPB+eeC/z854UL/Ssxwu7B4LWPQ8GJVGHgzpfLTrdefO971MmCnptzGOzzau3EZivlXjC+8AUqJelXXjGfZRfzycCAE8+bywZZPNmPjQE9PT34/ve/j54AsS1mlSQgfeJi619bGy1cXm3z2w2Z28TJlvnEVBjYWpmrwsAJcwcfTL/ZMj8+Tt9dW0sL4WOPBfs+FnZbW9PLAv/qV1S94tJL3Z8ZHqZz5WvDL4AWivJyUhgAUtp6eugc5r3i59TQ4Laa7txJY+n444uvMJjPN6yHIRfDxqc/DdxwgzuHyNw80mwThz88+qj7O2yFYfHi4Iv1X/8KnHUWLcAmnJRaU+Ps8wA48+ORRwKJhDHeuZHNzS6FoanJURi4f/tVNmPKy6nf2zli/J2sMIS93+aGfGH7VVCFYfly6uP5yLvjsDzA8c6wh8FLYQDShTfun7kIb6edRhWuZpNEHYtlVhhqavxzGK64gtbuP/7Ree/FF2k+8cphWLSI+mNzc0+gHAa7/8RizkblXCV4asrtYTAVBjYc8lrV1UVyQTIZfB30YutW8q6+9705fRwAGYj+93/9w9BMhSHTODI9DOY9tT0M5v94H5t8GaA4H5GrJfL3FiLcuMRYE+DnYABvhbOz8/0ANgDYt5gNLQkPQ6bQoDCcdx6V+QqCmcNgW2OHh2kha2vzry7gJTxedRUtmn7lQaNiYoImi+XL6XUuFlKe7CcngdbWVpx55ploZQkhA2ZIEpCe3G4KB2xds2EPg5/CsHJl/hOv+Lu5vGFdXe67AfN6cuCB5NI2FYZ4PHxIEk/8HLJhPs9nniFBg/e2YLi2/LHH5m/Pit5eGiMrVtDfW7aQq76z0zskgC3OPHYuv5zu7cc+lm5NLTS2wuC1mN52m+MhGxoKXs3IC+7X5r4ZLBjaxQD8qvrYY6m9Pfhizd/5zDPu99nDYFdBYUFh61ZgdNQY76OjJN2mYhz4mTU3O4o1C4z19f6eY60d4dYWcrl/tLfT77DrwvCwc0/DFtMwK1EBNJ/bCkNjI82lQDBDxfg45S75GWq2b3eEpa4ud0iSn8Jge8YHBpzSmWHvF++d4bcPThBYkdp/f3doKffxTKFp3F9MuXtqirqZl8IwOUn9sby81VdhGB938oy8FAYO3dq71y2HmAoDt5efNSsMsRi1rasr+DroxRVX0Jz5+9/nLnSfeSYlu599tvf/Tc9BphKz09PAq15F67CtMPB8Yz8LLks9MZGfil3sCeR70d9PRoX5rjBorbcH+Nmstb5Ba/0OAGcCeAWAnwIoao2qklAY/Cz9YTAHQhDBixUG1prNz3PHbW31d+V5WWmvuIImTq5FHgXj4+l5CjzRcQx7LuE7fA8o96MGBxxwAGp4VQVZ0b0UKzMkCUgP/+BJOVP4gRk24aUw7LsvKQz59O6YAiUwO4G2r4+ErzVrqI2sMJjWzOpq+l8Q9zS3jRUGcyF+7jkStExBZmbGcR/39bljd2dDXx+dixfUZ58l4aOtzb04mcpXc7NzH598kiqocCJnMfdh4XvoVa4XoFCp172OPDaAY5UEcvMwsJfJVGzZw2CHbOzaBRxxBHD77e7vsBWGxYuDCxo7d9JvO4zGT2FwG1SM8c7uvpSWZSoMHLpnKgx1dd7z+ugoCcleIXK2wpCLhyFXhcEMkwUy5zAAwQp1fPvblJvAuwbbdHU5ymhvLwlvtbXpCddsOAHSrb1mYngYL4M5Z87G0GV6dVgBAZw+nik0jJ+/mWuSSPhXSQKoP1ZV1fjKDKZyZT9/zrcA0hUGM98iHieBlZ81P4t4nHchT18Hw7BlC3DIIfQ6l3yYHTsoXOe442iu8NoniO9PU5NTZMOL4WHarLa1NXgOw+hofioIMrbCMDBA58zHd88ntNZ/AHAZgFcD+D/FPHfkIUnV1eTem63CwAvX8uXBJnEzh8EW1LiDtrZ6hyssWuRtLXn+efpfVKXtAKo4cvDB7kWD7w1bfkxl59JLgY9/PPv3xmLAySfTYhaPj+LRRx/FaGom7u8H1q4FPvzh9M95hSSZ99OMEwW87+vq1dR2L4WhvJySf485Jr/lbzlJ11QYgnoYenrc19jbS8JPezv1dX42psLA9yeIsssLICtZfA95r4AlS9xjgIX3hgYS5sPEdGeit5eeW3099Xu2nnM/44XYzgfh/tjTQ8oeECyPIAhaUzLtV76S+TgzTMorbIY9NKZVka2SYT0Mo6M0Rg45xHlWiQSNpZoamgPLy502LVpEyuXevW4hYDYeBtMIYmIq452dzvhyl7U0xjtLYilJnMdrU5PjYeBzNTb6hyTxffUqDcz9g3eKDrsuzEZhsD0MHR2k1HJfZsGY2xZEOGclcdMm7//v3QssXUqv2ZpaU5NeYcr2MJhzhelVCaMw8HNYvXp2oYqcN2IrWNzmTNWquL3m5xIJsuJXVbk3NEwmgdpa6o/19aO+fcNUrmzrdzzubHzY0+POJSsvd3ZFZuWQFWGe/+Nxuv+JhHsdDMuLL5LX+bWvzc2I88QT9Pstb6Fr9CqAwdfe3k5rpN8cayb72x4GDpczFQat6R57ebmDtNseC4kE5fgtXeqcn3eYLnYxjDnCH0D5Cx8o5kkjVxh4Ym5ooJ1Uc00A4kVq9+5gnde0oPHfDE/Szc1uQYYtpgMD6QMvmaQJYPly/8H/yCMkzPz0p8GuKRc45ODhh533eKJjS7C5MHzkI8AvfpHdtW7WJu/vj+OWW25BPDWSOeb62mvTP2cLOX4eBl4wvYTyJ55wFh5bYairo5CYe+7JnISoNbl+gy6mY2POXgdAesUSP0ZHScj40Iec98yyk21tjuudBSkOSQLCKQwstHDfHxigzy9d6lYYzFr7YZNAMxGLOULi2rWUOFlR4SgM3E4zJMmsoPTcc06fzJfCsGMHcNddwDe/mfk4U2HwEir5/rEANzTkKAxhLd7bttFi2NTkWPpNq6ZSjidzepqshUuXOhtHMfZYsivSZCIWI+Hk1lvdVmXOYaisdCsopgc2kTDGOw/EVI3jxsk+HL1hBA0NzvGDg3RdlZX+CgPP0V6eu/Fx4IQTnP5dTA+DKTzy73vuoec/NUU/vPt7eXmw+88Kg9+x3d30vKuqnGPYw+CnMHh5GFiwDRNSz/PhxMTsNuDie84War6PpsLgp9zy++a1ZgpJqquj/lhbG/ftG2aCuK0wmJbxwcH03CAei6ww8BxnKgy1tYDW7nUwLHv30lw5MpKerxSE556jtnKZdK8KiPwcGhrIi+F3v8x80iAhSRyVwQpD0EpkfX3kPf3sZ93e1rExGmf77ee8z2GgojB4wiv8AcU8ackoDFVVJGiHKS1pYk42QZJTbYXBHCRm5Q9TkLHdcSZ79tBiu3Klt/A9OEghDk8/TbHbhYKVGtO97Odh4Fj29naqCZ8J0+Xd0rIUX/3qV7E0JeWzZXlmJv35ZQtJGh6mdrGHwWtyYLe2rTCwsLNmDf2dqf74ffcB73oX8NWvZr5OxhQogeAhSaywcX1vgPojL+ZmoipfC4ckAW5rmh9cVYpLTfJkzULusmVupcBWGPKV9Dww4CwYBx5I43f//Z0Flhcn28MwNET9tK/Pee752riRrWyVlZnvpa0weFVSAZy5gBWGI44Ib9R44QVSepcvd+69HfrCnsyeHhpHq1fT+6aCyxVLuEb5kiX0DIK0Z2iIlNVEwv382RJrK6ymh2F83BjvpodhchJv7Po1Htq98qV4cX6+3AfsvVMY7rNese29vZQczlW9ZpP0zP0qaKEH+7nwuI3FHIGxsZHi44Mq31TZx1+Q501C1651nk1dHc0VZplLnu+AdA/DyEh6laogsLAeJoHeC/ZwmIK42WYuveyV4MvHmusChyR5eRgmJqg/1tcvzagwcKidl8LAJX8HBrwVBs5haGqi98vLnfk/Hqfv3r7dvQ6GpbeX7vsBB7hzm5gbbshcIWjnTtqrpLWVctO8Qo3N/RX42r3gjRpra939yqySdMAB6XuA8PcGFeo59Or++8mAx/CzNz1UsZhrQ3nBzarU78qMR+WZklEY+Hcm9/PMDA2irVv9E+WAcB4GXijNScX0MJiCjNk2e+CxdWbtWm+F4U9/IivSpz5FwnkulYpsvGL2eeCZ1iIecIsW0TWZ1R4A+jtbuVWzVKGtFPAun0cfne5dsUOS6uvdlvrBQVpU2HrrpzB4JcbzYtTYSBNvJrcuxw9nU4wYO4chW1k6Zvt2J1SIrbWmwtDe7ggFswlJeuop+k6u9gE4iz9brViQ5PvNC3q+9hUZHHQWjLe+lcbkscemlwk17yUnkcZi1D6+V/nyMOzcSQJ1IpHZa2aHJNnPlp8RzwVDQzR+Nm0KXy1ryxbHE8bPyBZSWCHlMcmVp2yFgecrgBQGrYMpgENDTk6AKVCy59BWGPzq4LsUBoAuiDs3nJAznitqakjgs+c7M+zTHvMsKFRU5FZu2/Qw1NV5GzL8sEOSeF4aGnIrDNz2IArDk0+S99zPwt7dTYozK9EAXXdTEz0rc77gseXlYWhqoj4RpmJcPE5jdsmS2SkMpocBcG/0WVdHldAAMqTYivzQEAmj5rrNHoaqqvSyqrxPmp8yCjjJ1l4KA68bbDyxxyKv+TxvK+X2MPOYGRnJfR3n6L7Fi6l87JYt6XmYZ5xBEQl+7N7t9POq6kyRUQAAIABJREFUKndUAWML9n5zLMtiXKiAMXMYurqcvmVWVQKCh+s+9xyd4/jj3bmetsKQSND9MYtkCIRSqhLAF1J/5lgoPzciT3rmQeoXb2hy+eU0iF75SoorTSaBBx7AS7uNMkEUBtaovTwMw8PAPvvQ/82kPXPhsicqDjVYt44mfTsB6aGHyCp18MH0PbMV2rZtI6vHLbe43+fBZQoFk5PAxo00+EyLHre5oyNzOM/kJF0PL5S9vf248sor0Z9ayTZtIsvyQw/RxGdz1FFOHKQteNtu32wKA8d+A85iBJCAlUnpYctz0GpKo6N0f/08I37s3Okcx5MrXyPgLjuYa0iSvX8D9yVe8LlqEY8DU9DhHZXzkSBuhum84Q3Aj35EO6ja1YT4d12dY5XltrIQy/1itu3atcv5Tu7fXnA/4ypJthXa9Czw75aW9L1AgvDgg8DLXuaO47aFFF4ouc9s2EC/TSGT5yyGvTNBLcrsYTQFQxZCbYWVQxFqaoDpaWO82wpDf79rt7CWFrdxgb/XFhL5/nrlg5kKtr0/hMnevcBBB6VXfjLj+YOsKyb2c/HzMHDbsykMVK/fnbtj091NHoaWFuf76uqcfszzhTnf7b+/Ox+FreZHHhluf4jeXlpD2VuVCxzPXl/vdAV+vty/3vEOCoebmkofl6wgmuuCmfScSDhjk/Zzof5YVdWP8XHvOYO7qZ+Hoa7OyRHJ5GHg5296mFlhaGnpx+WXO+tgGPhet7dTHx4aco9jNn5l2ohvzx4nlHf9em9j2Pg4KVh8Hdk8DBUV1A6+335JzzwHmpsrBoHzdY44wp3HwGtmW5t7rAU11M1llFKrAvysVkodrpT6CIBHQVWSNIDfZ/72/DKnPAw33EAC+WtfS533e9+jCgG/+Y2z6FRUZO+8WmcOSRocdIeLsGDH71VUeIckNTR4hxIAFC6z335OpaJMwkwQ7rqLft98s/t9vg+mlWlggKxc1dXuUn3swly1Kpg1liedRKIM1dXVKEvFIWzbRgtYW1t6ab54nNyqbBWyF4Z4nL63upomJlt44GfFCgPgPCvTRb9unbdbl5mZIUva6tVuwXDTJipNZ/e7kRH6bg7/CKowdHc7Cz2HIJgKg5moykoJL4xAMIXB9EyYm3f19VE77brhpseM924Ik6S2fTsJNLYgYl5XVRXtVtvRkf6cuBJnZSWdf3raUdw4+ZCf42w3b+R4cCBzLPfQEAkB5eWOFdpU8vn+8LWw8OC30aMf8Tgp9W95i+Nd0TpdSOH/7dpFY2X1arqnphBnexj4OQdRGPbsccavnRBvhsR5eRimpozxbm+J29/v8jC0tLjDY/z6dSxG197amj5fm4popmIYd9xBhoCf/5zmU63pGU5Ouj0cwOwVhqEh9zgCaN7MBo91L08KQAIwb6pphmPU1Tljg7/DnO96e93jkZ9jW1twLypfV00NWbqDKAyPPUb5JeZcODVF44eFcMDp56ZXZO1a+m1XY+IKZObYN5Oe+RwA3a/ycuqPtbW0/ng9Ww5jzeRh4LXQr/S3Ob+ZgisrDFqXobzcWQfDYIY9H3oovX78cSdviZ9tJlmmt9fJ8zn4YDLW2YZKv93kbXiNralxxhHgrzCYSdJczjcIHH63//4UBcHP1VQY4nF3jlMxq+dFxIsBfp4HKQq/AO3JoAA8BOD/FbOhkXsYzOQyfs8LrSmp50MfogXiueeAm26i/91wA0181dXBqtnwYOAKJUB60jMvCl4KQ02Nt8KwfLlj9bMr9uzYQQtMmEXe5MkngV//2rGus7BvC0W8KJkKALuLlXJ7GLq6aDLYZ5/MCgN/J0+elZWtePvb3/5S/elt20jA2bgxvXSgLeTYFgMz3tkrnpmfC09mgNvjw4sRWxp/8APvOvZ/+xtNVHfc4e4fN94IXH11el6JGdbA7Q4ycZmTOAvyHC4AuD0MvPAolbuHwUx8jcXoebLVx4y5Bdyba4VRGP7yFxIy77jDeS+ZpPvP12XCY5nbaSbMc9tYaOD22HsR5EpPDykMlZWZq6XFYs4Y91JWBgfJM8Z9ieeETBZvLx55hI4/7TS61mSSrtHLwzA4SPPamjWkNNghL1NTzvEAzTVKBQtB4UTP2lr3nOGnMJg5DCMjxng3NwkA0jwMra00TljY9cvNYaXAa2M189lkUtB276bnvHkzza1//KM7wR7IzcNQUUGKJOAoHl4KQ3l55rwpwBnrfpX1+vpobVuyxK0wcNUqwF2Gmec7+76wEPyyl5ERJGieDffroPkYX/gChZKY4S+mAYO7wuCgW5EAnAIHZqz95CT1laYm99g3+x//DdB1zcy41x+v9Z71Wq/xyh4GXm/ssbhqFY0rUw4w53/25gwOtuKVr3TaEQZTYVi9ms55883kpe3ooMIHZWWZvcH9/c44O+wwkmtsZdHW7zNVSfIyoNoKAz8H854demjwvX2Gh8kDvno1XRfPXdyu9nYaDywfpfaHnO+E3dl5EMC3AZystc4x6zc3IvcwmGXiAP+J/YUXKOTk4INpYly7lkJgNm50NiPjmO5sCoNpUfDLYTBd4rbC4GX16uoiZcCv3J79f78SoF4T/cwMJUx/+MOULASQy7KtzX0erWlwrVvnWNwAd4gAJz5yGzo6aCLPtDcD309eDCYmZjA1NYWZmZmXrAGrV1PZ1bvvdg9wW2Gwy9SaVhwvhcHe3A9wC6K8GG3cSH9/7nOUgGsufhMTdP+5hKcZ5vHCC/TddiiVec+43UEmrsFBstYddpgjCJnKh+lhMN/3C93wYmyMBKWKCnp+LAAODJDAZlpF+TfHg9shA0Ho6qLFy8wRsZVIE1uxM6+T+z/n2LACYe9FkCvNzVS+1C4ta2Nasb02ZKuqorHCoUpcVjWsh2HHDmrT2rWZK7Ns2ED34oEHnNrsthBnJiAC1AeWLk2vbvPss24hnOeFhgZ3Dg2QrjBw/3PXwXfG+0umSLOGKkstoP44Pp6uMNiK8MQEjREvhcHMjcnkYdi1i66Jww03baLvetWrnGebyXP9xBPpu6ubYbIAKQUcR233+UybkTF8rxcv9vYw8Njt6KA28zG1tXTehgZnbjbnO7sf8v9e9jK6Vt73IxumwtDfnz0mn+/Nv/7lvGcqDLW11G8GB92hiPy7vt6tsMbjdH3ZPAzcf5JJoKKC+mNjI0nSXuu9WTbdfPbchevqnL5n7gfF5+Bqi9yPGhqc84yNUXuVmkE8nhoXITEVBqVobb/sMuCii+j9e+6hZzIz493HEgn6DvZmH3449VV7PwfTw6CUv2xk53TahgPA28NQU0MySNDKXL299H2sDPMczc+Ic9pYPmIDxDznPwP8vB+00/PhADq01l/WWs9yq+PwRO5hsHMYuOMkEhTicOONZI28807q8MccQ//n0J8TTiBtfPt2mgCCKAzmBOFVVtUUYs0cBtPDYC9A7GrjDm8nK8ZitChUV9NE4OVhuPBCmgDs8mjPPUcDsqrKqb3c00NtNM/DFp2TTiIrJS9WpsBWW+sMUo4nXL6cJki/xcJWGAYGuvHtb38b3d3dL4VWrVpF29xPTLgt0dk8DKZgnklh4AoO5numi/6EE4AvfpHaMD3tnjg5/GXdOvpt3rOeHveibLbLFIbNBSMTg4MkLL3wgvOd5v1nl2si4QhxfH1AcA8D34vOTuc8tsLAwgd7cZTKTWHo66P22Qs94FaqGC/Fjq+TPXC7drmfaTYLWFAefJDGwOLFmZOBTSu2naSttZOoPDJC709POyFJYXIYXniB7hGHYwHUR0zPGf/+4x/p9/veR+/ZCerm4s3su69bMdq1i4wq55/v/tz0NI2Vtja3wpwph4HLWlZWOuM9zcMQi7kUhs5O+izPg37J/Lt2OXud5Kow9PY65TorK2l8joyQ0YKfaSYPwxFHkKHBxAyTZbiNQ0OkOPOcE0RhYIVv2TK3x5LhMbVkiVs44nG1YoWjEHKYJODvYTggVWQxU3imCc9z7e303LIZRXiuMedLU2FQylF0zU39GDu52vTa2AqDn4ehro76o9bU8TNV1rPHqynorllDz8Uei/y8zSpwtoehuRno7OzGddelxkVIeP7le/Nf/0Ve8hdeAN74RvrNoZVenh8zBwigZ79xI/D3v7uPM8vLZlrDTAWL/wbc67eZgG7exzAV7jgEmQ1HfOv42dsbIi4EhUFrfVmAnytSOz1v0loH9Ofkn5LzMPDg3rwZ+MlPgLe/nQb2+edTFR4eIF/6Eg2Qj36U/t65kwZ/rh4GW2HwCkkyS+7Zi1hTE+UosBXFFkoBZzB0dqZbP7V2cjEuvdT9Py6ttu++TmJvTw/dCzuBkY975hnHXW4mAZr3h5Wc5cudmE2tgc9/nsJQzPsBOIt4eXkL3vrWt6KlpeWlhWP5copLPPlkspIytsLQ1kbP0azgw4ujvekP4L7ntiBquugrK2lH1euvp+NMjwEvuOvX029TCOvtpWdte3zMSi9AOA9Da6s7Xt1WGAASckwBIGwOA98LLuM7M0Pn5kpYgLMYm67rXBWGujr/vR1s7Odke1iUIoV58WInRyRbjG2Ytra1uXM7vOCShkC6wjA+TgJKSwu9ZsUjlxyG/n7HCpjNwwDQ2H3zm53zmQKpPZYAmhtNAwO/NhVmM0zHDInja/YKSTLLWsbjznh/qRH84M1dsFLXMTPjhF76hSRxn2xupv5hhjQMDro9BH73u6+P+v+iRWRAeu65dC9AkNw4s222hwFwngMLkNxnwygMLS3eoZLcRxcvdoQjVtQAp4LOzIx7rwU/D8OyZaTUBK2UxGsD99FsGwFye01POI8bM+xwYMBtRWfscWmuLWbhAbNKEuD0y+lpQGvqjx0dLa7vMDGrJJlzqhklMDXlhAwCzj3nvWKGhtwhk8PDjreuqQmIxVqwYUNqXGThjDOAs85y/h4aomszE62vuIKqKZ51Fo0BtsJ7yTNe9/ass6ivmH3dNC5lko38FIYgHoawCkNTE439o45ye23KypyphNea9vZwZZGFwlJyHgbuiOzyTCTIdX3WWcAFFzifPekkGuzr15Mgfttt9F1BkmSyhSTZHgZbYTC9Dsw99zgTpr0Dq5fCYAuoPT10za95TbqV4NlnSSBfvdqx7PT3O1VXeDBxm3hfAg4hMa3l5qTR1UVt4sV9zx5SSC6+mKpaMLaHAajFIYccgtra2pfa09lJC2l7O1n4GFvIKS+n//O9NBUGrzha81nV1ZH11Eym5XvOlJVR+IcZPsMeBra+mcJyXx+d134edkhSQwOdN1usJluuOSadP8PfZVY+Mdsfdh8GU2FIJGiCZUGrspL+PzREC3tPj9P3zKovXtx2W7q3hZOpzc/YZWdNKivdGyaZilFFBVnO9u517gWQHw8DK01hFQZbWbGtd9w3OIchjIfBVNa8FAZ+7q9/PeXS/PjHzmdtgdTPw2D2dU6WNPupGRpi7vQ9M+N4f7xCEVhwHR11xrurWkRtLX0BSzYATjmFrKVnnum+PlsR5vnLLFvKBPUwxGJkpOntpTKN/f3pCoOfh8FMDs1UuhZwqumwMsqYe4r4wZsbNjbSWLTH98AAPVMu8zkx4ZTyBGjte+wxb0WIr8lMLi4ro35tG6Sefz49IRZw5jkeC9lCS3jHct4JHXBXewOc8Cbbig44hhSGn3tLi7vwgKmwAm4PQ1kZ9cclS2pd32Fiehj8FAb2GkxO0rrEhTlMr5epMIyOOt665mZgfLwWjY2pcZEBrSk/4Xe/c2/+ZodzHngg5Tq95jXAK14BfPCDzrE33QR897vOsV4Kw+teR0VHbr3VfR/M0s2ZQpKCKAz8HMx12QwzvvNO2mvDbx1jb3dFBRmFzZAkrqIHUD/kjU7NIhFCtESuMNhJzzyg9+yhDnTNNTRYvvc9GhBeHHgg/V6yJFjoiOmC9PMwsIBnKgd+CsPkJC1yLJTZccKmFQmg0ne2FYuTlU49lTwDpsLxr3/RNZoJrgMDdB5OPgWc3+3tNPBYkDCvx/YwcEgSQFapRx5xNHpeCM19HOg843jqqacwPj6Ovj5ncxuAvD5PPulYoOwF2I4XN5UZW9HizwOOjPLMM+7r5e8zWbHCbWHbtYvavngxLaj2guW1mZmZjA0Et4Czd4rDSXgiNZVJgK7TXDRyDUni0LwXX3R2lAacEo2HHUYhe9w3KypojHgtsjt30hg74wz3+wMD6Z+xN7YzUcot6NkJ5KtWuQV283tm42HgBMFFi9yCsReZFAaz5CHghA+2tqYLINkwwxqckL70HYWVIiXd7HO2x83cdZXZd18ax/x9rNyYz8q0AJv3xXyfhSU7h6GqCtDaGe8vSSBKOdIzTyCp6/nhD51+6BeStGcPGSq8PF5mWsS++7r0ERf2/iY8pgC3YH3MMenCsjneTeHay8PAz8H0FgGOEpCpP7AS76cQs+KklBOmYY6VU06hOYuNSOZ1mRWttHb6cUeH+5oGB0mx+shH0tvHCgM/wkxlp/nx28q4rTBwXpWXUGuH2fHz4ufN38WCqq0wrFkDdHZSf6yqok7vZfwwLeamsOmlMNjlis1xZ5d95jmipQWorR3H7t2pcZHiBz8gxcCkt9fpf1zswYxisOnoIC/9q15Ffw8P06ajX/yic9/tBHyAvFEHHeRWGMJ6GPg+8CWx4QBwexhshYHvy7nnAldeSX3Na1yY4amsiAOO15znjb4++j/LLaOjyrvh8xCl1JFKqfOUUlcopW5O/VyReu/IKNtWsgoDb2bztrc5deX94FCTxYv9PQxdXenuNL+QJFOIZaPa9LS7lJipMNgeBD8PAwsoZWW0k6nJ1q20aJx+OrWfy6YCpDCsX+8kck5P04TG32fuqgmQEL12rZP45uVhmJ6mhXjlSsfDsHs3taOpidrA18AKBy+kIyMxXH/99YjFYi/FzTNHHkn3hkOC7MnYDv+ww3Vswd2cmLxCkrws3MuXuxc+Fk44hp8XA62diIqBAXc1CrZUM0EUBv6+5mbH+miH7pgKgynwhNm4zVwE1qyhfUl273ZbZltaaHHavJn6PnudADqn1yLLcc+2t2VwMN3ancnDALgVhtZWJ38EcHa0NctS5sPDwIJwW1vmqi9aO+F45jXwNfGibJdnzSXp2Rx7lZXORnvj49QfbY+BiVdIkn08K4zsWeC222FHgHsfDMAdqsRtMRUGJ4fBGe8uCwB3KrNzWXh5zqanqb+uXOmuqgPQvR0bc5SswUH/MqHm+OFxbefW1NRQeJY9bs352RRgMykMACUVM3yOTAYqHpN+Sf2mQsnrBxdnAKgUdHu7U8XNy8NgPl8gXVnmynWmIMnwrtjt7fSd3/gGGY0Amg9N70l/PxkgbIXEVn7Zg75nj1Pti/FTGPge8L4KvDu23X/++U9n/Rkbi6GhwTtXyW8fBrOtjY3OBmqmk6C93VkL+JmwcsH9qLUVaGmJYefO1LhI3a/PfQ44+2z3WmLmK/I4TSazl+U19ybi8/Ic7aUwAOSZMBP5zWvLFEJnhyRNTjoeH34GQRSGoSHgxBOpv/7xj+5zJBL045UDZOZS1dU5e5fwGBsbm/8Kg1LqEKXUgwAeBvAtAO8G8PrUz7tT7z2slHpAKXVIFG0sGYWhvJwmCB7QPT2OxSUbvCvq4sXeHgatSWD89Kfpb9PDwOETmTwMgGNdKStL9zDwhMCC94YNbsv33r1O/DNACtCuXe5JZds2en/tWhKubr+d3k8kKPZ1/XrHc8Fu8KVLaYIwF1veaXf1arc1w1YY+vooN4IXikWLaILfvt25DnsfARZWyss78eUvfxmdnZ2uBQ9wFlTej8HPwzA25sT3871evJjOaS5SQaskmSxb5hZ6e3oc4dCcpMbHaWHiBcK2cprXFUSg5Z0/m5rSFQbTqlJRQYKdKfD4eRgSCeADH3DnZNhWoy1bSKgylbeWFqem/3e+Q2EijGnZMeHFzN5UkK2kXgqDl4cBcC/Uzz7rvm8HHECf5z1JAOpb5eXFURi45CMrBLYwx0K3vdEZexjCKAy2d4WVYtNQ74dtEfTyMLDQwcUHeMzy3hKAe08Z05tm900z5MC08HZ3O+PdJVHzYOcC+x54KcJ3303jbt26dIXBfIZA9pAkHj9mngHvrwH4V9+LxSjpGXDHwPuFJMViNF+az8tM4+BrMAs+8HkyeRjicacf8ngwv6OsjHJaOMyT74uZzGsrDLZnivPevPY44RwGpci7+OSTlAu2Zw/Nixde6L6WTZtoLu3tTQ+F5Tlp+XJaR6680ikRzCxf7p6zOZafn+PYmGON9/Iw0P+c/ug3zjdsoPXE3hHb9jBwG0wlkY06lZWZFYa9ezuxbFlqXMAdLmyGdXL7ysqcdWnPnuxx+Rxixs+yrc0Z535V6jZsIKWKv9scrn7zvnmcucaazwFID0lSip5tfb2znu/cSUbeVavcG7MB6f3UXovNkDaOoHAUhkhF1YKjlDoZpCgcBad8ahJAd+onabx/DEhxeE2x2xnpUxgfd1sozRKmVrW+jLznPSQov+td3h4GDs254Qb6bQqhXAPfnFS4egfgFnB5UNkKA08OLGAkEk6iMkAKBS8KAAnzU1Nuy8P27bQfAkDbwbMHYutW+r6DDnKsM2wdW7qUrEfmrpoPPEBt3GcfRwC0Q5K0dv7H7TrpJJoEdu1yhBC2InF4Dk8ciYRCRUUFlFIuqzZAk9LKlcEUhqkpsrTwxL1iBR1vl5IE3B4G7iOdne5zM5xUzgITe6sAug4zjhRw/sfn1TpdYWhsJBdxpo3FzEncT2HgGGN22fNCWVZGk68d+3nvvVRu7+tfd96zLWJr15LlaWjIGTP770/XXVVFtdO5bwLeyeUAWX2bm+n+8HiYnnbCtjgpHnCuyy9816x/blecOuss4FvfovHKcBUPP4Vhetq9k3ciAfziF8Bvf+vE6/Pza22l9vKYteFFm5XImhq6/ywM9PSQsMD/5yo87LKfrcLAHoYsoc9pFYS8PAz8XNmj1tNDHqedO90JhYCjMIyM0P2zw+XYw8AbN7GFd3LSGe+uAf2jHzmuEh84nMEMCbrmGspFOu44xyjEig7PbdkUhslJ+jEVhmSSPm/OCV45agAJdTxHm0KUl4eBKzHt2OFWcrlP833m+HNTWOS5089DyXunAM5cbCr3AM073EYOHTLviy2I2Vb83btpjhgZSU8QNg02N9wAfPKTpOD/6U/0Hdde624rQPP0+LhzLbbCcNBB9L/ubgqjMamocJdkZY+sKajyHOiXw1BZ6fTHI4/0Frzvu4+OtRX8qSkaHzU1zrgcHnaPRfbcJxLuvVpMhaGxEaiuVpiYSI0LuEvZeikM7e3O3GOWbPVDKToPywnj4844j8epzaYyBpBsMTHhGDnMsF0/zzKQvjmq+Ry8qiTxXKSUk+fJ1f8WLyYZws6Fs73S5nxvzocdHU558oYGmivC5I3NNZRS7QCuAVAN2r35UgBHA6jXWi/TWi8DUAdSJn4FYDp17DVKqYBScn6IVGGwQ0rMSdAOdclERwftT7B6tbeHgavk9PbSOe2dHU1X29QU/fBk4qUw2IvYzp1O3XYgPbRGa5qkGDbImVUzduxwFIaNG8lKMDbmaOmHHurs0suTBodq8eJgTtwrV9JxWrtDVXiR4wpKLCwPD9O5du92Igx40mEPAy/+Y2ODuPrqqzE4OOiZvLVunSPc2WUKzYXTFqZXrCBB1wwnsnMY+Dqnpsiyzm0yWbaMvocnatNbZQph/Jv/x/cxFqPvNz1c9fVk5fOqyMGYbuLmZjqW+6KZQH3YYU6pXXPRMPshw54FU5HwUxgApw9qTYmJvAmRiZ9revdux7pmh7YsXkwKmJ0/4rfJqWkBtfvIvvtSlTMzTAlIL7lr8v3v0zNl5f+cc4DPfpa8huyN4+e3aJE7wdiGFWHu+0q5E/e2b6fxY25mzHHmuYQkmc+ey5p6CaY2jY3UH8xdV23rd1UVPRvO2entTd+cz1YYABob3DfNvUBYiee/qbSlM96xdKlzQWVlWS/CVhhGRkjR++xn6VoaGqhdrDBwf+N+6KcwcNtNhQGgZ2uOKfZce3kY+PuzeRgW///svXmcXFWZ//853dVrdfW+JelsBFnCGggQlrAqWwCFQUVBQRQYEFRQEMf5jgMq8x0XlN8ovkABZWBkUOHrMCOGERTZomxKZAsJIWtv6X1fz++Pp5++5566VXVrv9153q9Xv7q76ta9p+49y7OfBrqv/f2ONxtwexgmJx0F5I9/dI5hgTiWwmCPjakpygExOeYY+s1FPbjdk5PuHDZTYTDH9/btvG+AY6FmzAp6AOXKbd0KvPQSrUcvvuici+8ZG5T4mdnhdevWkVfkm990EncZnn/N/LhIJNqQBMTe6bmoyOmPg4PRe+hwm7xCkoaG6PnYm5mZ3dh8xjy/1deTUZLnk3AYaGrqQW/vzLiAO2/OXP+7u+n+NDc7wr/pXY5HJOIoGfX1zudtIwyzbBnJGryGmgpDLEMREO1hGB111iKvKknmOGGFwRy7XvmIdj9lzwRfj6+9YAHd55YWavPf/gYMDBTGvEfzgM8DqAIwDmCd1vpKrfWLWuvZWmRa6ymt9Uta66sArAMwMfOZz+eyoYFVGLhEZLJ4JfaYAmhra3RJQ3Mg2KEWdkgSDyrT0rx5MwltPLmwwsCT4rPPusOPVqxwqgQw773nKAwrV9JnN28mF/GqVXTOmhr6fjzps8LAk4C5T8SSJfSddu92W555wWEliq2ovHnb7t3UDtMVyoteQQEtwOPjGlNTU9BauyxUzDHHOJOMLRiZXgJbYVi+nJQo05Jse4O4j9jWUZPGRrq3PNGaCa6RiCMg8LNmwZEXA1shM9sYL2TG9DBwab6xMargYodOvf46Xc9USrwUBhZuzcnXVhj23dcRpPk583PlGHcTc+GYmKAEtZdfpmfPn2Mhwc7P4bGVyEJuehhxD7pEAAAgAElEQVTsErWxOOooJ5TEhuNhf/976iM/+hHwve/Rc+Fwje5ux0VuxsDb2AoDX5sVq82bSanhZ24q3OmGJLHw6cfDYMfIj415K8jm5n2dndHPMJbCYFaoAZyQAxbuWWCbnHTGOzZtih9HZWErDBwmaSqLZ5zhtNGvwsBtZ6GJv4PtYQC8E9V7e+ncJ53ktlB77cPQ0OB8nvPlAPfzefZZamdNjXsPBBYMYyX12wqDlwJeXU1FP77zHec10xKcyMOwezfNZVzOmLFDQvn7HXYYKT9c6pcNW3xONiiZNfS51j9AY/CXv3QqZZlUVlJf4PvJ399UGBKFJBUWOv2xoSFaMJ2YIMXLT9IzQM/PfOYlJcA3vgH85jfOa0VFpGjwOItEgLIyY1zM3A9+LqbC0NdHx5t7UCSjMLDhy8wdiTWnNjdTO9m6byoMdr8wGRnxNsrx/QDcHkjTeFFeTv26u5vm0bo6Z54zsT0MZu4Dl8EFSBbp76fvwuN6YGBe5zCsA3kWfqC1Xp/oYK31EwD+DRSetC7LbXORV4WBNxNizFAfO9TFL5EIncOsE93e7gyaRAqDLYiGwySwDw3R8atXu9vZ309hQAcf7FyPd1MeHCQr78svOyU9ARp4Bx5IygBAg2/nTmciXrGCkoffe4+OYTd0bS1NbjzpNzdTm8wcBoAmLVY+/vY357N8fwAn/ITvweLFtJj39DjJiKbCwItacTEwPV2Lj3/846itrY2yUPH95KoetsJgKmBeFYRqatwWI9sbxAJEvKRbFphaW6mPcTwk4A5J4nPw8fx9WZkyFQY/CY5eHoY9e0iYMBeHhQuda5hCq5fCsGULWVzsJEPbIsZ9gPsKfyev8HLz2T71FO37cdVVtMjw53t7qQ//5CfUL/l1M8zFK3+E4YV6asqpW56IHTu868drTQpgYyN5wf7nf6gfXnIJjUfezJDnDKXiexiGhykEz1xwd+1yFOYNGyg3iPuWWX0qGYWBy2ia94mFBj8KA+8Qzf3Ky8MAOILE1BQJKhymxM+Y+3lpKc1NJ55I98Uue8khB2ZISEkJ0N4+M96rq+lLxcvUtrAVBhbuzApZ/f3OM9yzh5RGr7LWJnYMNwsWnZ3uSkb8vb08DFVVpByaoRN2kQbA6fvFxW5Fx5wTfvtbsqyvWOG24rNgGCvp2a8yfeONFC7ExFMY7NCT9nYaw9PT7twujjs35++lS8m7MDxMIWOAY4zo7aV7w0YIPpefvszw8+I+zZZyLw9DrJCkUMhZf7wEU3MdtJ+9+R5/b3NfG+arXwXOOsv5n58R91+qkFWL4WFqB9+Ppia6phlWy+snh6IC/kKS+Drm5n+8DtieS4b3tuFnY+b52YZMEy5rWlRE42901P0cAGdcsMJnexi6u8kjVV1N7bDHrV1NixUNwO1hOPRQ+n3IIU4ocn//vM5h4KoR/5XEZ/jYfeIelWHy/hRshcGshe5nQNk4Zbic18x8iLY275AkHhy21TscpsVscJAE0M2b3Vav//N/6P9rrnGuZ4Z13HIL/f2hD7nbeeyxTp7C1q2klLBSwRbyd98lhYFzC/l+tLU5i6rp+jM9DMuW0eLFXgz+/jxh8x4MzJIlzmTErkAWtkyF4cgj3VZOLw/DwoU0MU5MxPYweCkMSpEiZXpe7GfFfSSewsCxwK2t9OzXrHEEaNPDwNdvbCTLHn/ft96i6xjVIpP2MHBd8c5O+s7mPWtpcYSKRApDa2v07rz2Am1aPbnNPOma7nWmutoR/DmMYuFCd/5KXx/Fmt9xB/VPVjySVRhYcPGj/MfKYdi1i15fsYKeza9/TZU4wmGyhrLibYYx1tSQYOx1vu3b6TymoZyFrC99iYSrq6/mCkHuDZySURjsDa0A8lwsWxat9HkRDpOwxn09kcLQ2Ul9jgU6s3paaSnNGfX1ZIFsb6fvFQo5z5EtiLaHga2KURKED/woDPvt52w4x3sdsKU9lofBVhj4+dilTwHvZ9bX5yQjm0YAr5CklStp/jjoIHfMuGmhfuIJmke4oIXZznghSbEEv0SYnlp7LuT99fg7t7VRuyor3R4Gr3BJNjS9/TbNIaZHmxXy2lq6D2Zsfby5wMTO+2CFybw/ZkiS107P5lzqpTCYXoRYHgYzh4GNgfHge8Seg3AY+OPBV+PGou/PHtPVRe2xc49YKeIqh3ysnwiKww+nvnriiTS3mwqDlxEmFHLnSpjzdF2dk5NmY64pPOa85CSAnoWZT8UKg2mAKCuL9vyYxRcAd0iSef3LLqPStCwzUYhd3kXVbMK9L5mi4nysx4qQPfL+FEzrBruoJibodzoKg7kI8EAuKCChQmsSznnyN5N5bCHWFBS5U5shSc89R0LZySc71+MF65VXgF/9ihLIbMHtnHNIaNmwgfIVXnnFOUYpmrjffJMmiENmCmjx/ejooL+VgsslOzpK36WggAZZf79TYo0XAlPpMBOx+X2AhMZjjnEmZnNPgrffBrq7W3HLLbegtbU1KuQCoMVVa5rIbYWhpITa7bXQAST8mklxo6P0nDhUhSeZePsAlJXRM9i+nZ79Cy8439uMkzcT2My4/nfeIQu0GR5QWEjn9eNh4PMBdA/siX3ffZ2Fx+wXXgoD5xWYk7ytMBx0ELWvsdGZ1E87jQT+a6+Nbqe5Q/gbb1BuwGOPUb8yN7fj53D55c7i5ldh4EWHhVY/CkOsOuHsJTviCFLet21zdng//HDqy+3tbsNATY07hMDEK9yxqoo+/+ijtA/FggXUT7lf8Jg2czMSYVt++e/f/jY6dtwLO4zFK+kZcEKSWIDjPmV6GLgNnIvB+VwHHOAoTl4KQ0kJ0NzciltvvQWtLDl6aS0xsBWGri4aV6bH7bjjaK5ra6O5zBT4y8tJyLHHhV1W0tzPwVYYSkqinxlb/u0iGV65JfX1ND7uvtv9ekGBsz/JW2/RPL14seM95DLLlZU0PmkTPPc5vDaf9INpeDE35gPcmzOOjlL7mpqcYhAMjzVzfioqcvrdMce4vw8b8QoK6HypeBhsw4vtYTAVhlghSQUFzvrT0EB9ygz5NRUGW+EcGXGs6NyW6Wm3ccgLlit6euh5FRQAGNqCO4v70Doz8HiTRrtPcR/gccrhzX6KunR3k2ywaZOz4SWfM5aiaYYomvN0vN28vRQG2z5gVjyzPQzs3eR75TWX25EdsUKSysupIAav+QsWAF1deRdVswn7/VYl8Rk+tj3uURkm70/BXLi5A7F2norCYFp9GLY61dQ4sbs7djgLZbwcBi+FgUOSOM/ADDcCnIHJlk9OWjM5+2zgggsoLvXll0nwNzcoWrrUsbSvXEm/eSEwK/iYm+jYVstDDyUBq6jICVMwXffm9Xjzu9pauvbu3U7VBzskaWKiCueeey6qqqowPh49cZm7GZuuRsCdh+ClMJj7RwDeVZbMhOlYi+2SJRTSxZMjt8mcyMzrm2E6r73m3ffi1bEGnFjVwkLnWXV1RSsMbP0/4oj4Sc9czWrRIpq8ecK172lJCT1nc/dVpai8nV1FA3AqHk1M0HmPPZZeb2oioScUchK2L7oIuOeeaEU8kZDAgjUrDH7GspeH4YEHKH67spLGy8QELZrnnEPvHzmzjc1LL7nHBccve4Uk2RWwAHpeO3fSvT3tNPfrAwPucpbJehhMhcGsyJVIYbDDWLx2egacDR3Zsr1sGV3TTHrmc7GXoaODlFEzdDNWDkNfXxXOOONcVJnBzD7hRZ/PyYKOqYyfeSb9/9hj9D/HzgPeHmMgWmHgzed6e91eOyC2h4EVhkQeBoBC31avjn49EiEhfGSE5q6lS0nA5t2Xp6ac8e/Vv1NVGMzQTr43PB5ZOe/udhSEBQvcQj4QuzTniy9SzlBDg5PbBjjllfl8pochWYXB9jAUFdEPGwwB6mZFRU75W4A9DM76wyWxzXFuehH42ZtlRrmt3Jbnn088prkfdnc7BUyqAJzb1oaqmU7IBgvba8XfsanJ/Uz8eBj4WdbV0bPo7KQ+GsvDADjzgbkDOL8ORHtk7ONshcH2MHDYIv/P/bery1n/ODTc3HHerqZlhhvaa5rJ4sVAe/u8Tnp+BpSPcLNSKmHwrlIqAuDLoLyHZxIcnlECqTB4bSnvF9vlCTgWRa5B7mX1ZkHNXuT5t+1hGBmhiaCvL3oDFna3b95Mk4fpfmcKC0mL/tWvKMRi7Vp3iMSyZSQsFhY6cbMsiHZ1OROJ6QK3FYbzziOPwDXXOIJjKOTERZoKA3sbFi50wp34Hppuc7JCluOII45AWVn5bPKgCQtXXvcacDw0XgrD8uX0vHgxs2OKbQ9DLCvLAQeQ1dIOgTAX7elpJ0HbjPvdutU7lCde0hjgDqMzQyS8FIYPfxi49FL363bJzk2b6H8OMTGT2+17fsgh/vct4efT2kqlFFfN2Cra20lYY5f62287k7ytMHjF/Zqk4mGwF9rBQeATnyBl4NxzaYx8/OMkWHKfWLaMLNQvvhitCLCBwMarAlttLd2PoiLHowdQvxgejl/mc/366OozQHyFobc3saBoW2NjKQxLl1KbN2+m9zlZkO+9vV8JC3u2l9HOYWCFYXi4HAcccATKWfpPQmFQis7DQqBXCE59PY3DJ58kI4lpLY4VCsj1+3lu4Z2SJyejrcVez8xMRvajMMSiqsoRqFta6Fnwppe2QG5XAeOKVH7DeUxMizw/X7OiD0Bzj5nvxhuqMbEUhgMOcHZ7X7TIySsyPXipKgz87E0PA7/G6z+vxbxH0saNzvGkMND6U15ePjuvm0KwHZIEOOc021pYGB1JEAue15980ilJXh4K4Yi2NpTPPIzmZlqrbQ8DC/ctLZQXwp5b2xPmBc9T9fVOv969O7GHoa2N5p/paef58tzTbtmkOeTQFuTtKkmmt8eci7gvmmud/ZyBaIXBHJfxQjT33Rd47z0Py9f84a6Z38sB/FEp5WGaIGbeexoAZyfeFevYbDBvFQYzhpBjL3kRtSc4M4fBFmILCuhvrk1vJktxaVJz0QWo41dVkaVp+fLYRUXOO48mmXffddekB8hr0dpKk7e5PtfVOTWKARKY2CJvf6+rrwbuvZdKGJrU1tI9NpN6OUyB8yo41l9rtzWDFIYRvPnmm+jvH8H0dPSCZ7o+vZIIbQ+D+Xn2hPBiZFctYQ+DV/ytyeGHU4Jsa6sTogU4CoPWNMG98QZ99yOPpHOPjdFk67ULZyKFgeOiAadfc0y2iVLAww/TjqAmdjWX736XXjv+eBKWvTa5SQV+PnffTf3+9NPpb646xAoDu9iBaGu335Akr5j1WNgWWLNM5X33kQDxox+5PQCc4PynP0XHBcfa1Ml8TgzncFxwgTtGuqaG+jD3S7OcJUD378wzgS9/Ofo6XgqDWcEolZAkL2G2poYqRf3tbzQfFBS4v7v9rNjIsGuX22hA3sPoKjWlpSN46603McKTajISNdwKg1cII0D3cGKCLKOmhyCewmCvD6YRxSSewmB7GAoKkhtbVVVO+MfChY6n9o03aAytXu30Sbt/e+W4+MWM+befL4/vn/yECgQAtEaZQj4QW2EwWbjQURjM+cCs2JNMDoNXSFIshYHnfXsH8sJCWn9GRkaiykAD7vtqlgnltprP17x2onYXFtLawP1spKICb5aVYWSmcz37LFnU7T7Ffb6xkUKYWWEw98aJBX+/hQudPLLNm+lZxgqj4mdjr5G1tVSxr6+P5tPbbnP6D5BYYTA9DGZ4pFkcwlSOAfd94GfA5zE9f/E8DEcdBbS1zV+FQWv9HIA7QV6GQwD8SSn1mlLqx0qpbyqlvjHz92sA/gTgsJmP3jnz2ZyRd4XBnKx4wrDL/aVyPlNh4ConHFKSjIcBcD7Hkw0PEC4RaC66DNdc9ipryRQXk7X0xReBv/s793urV9Pkc+657tfr6+m7sTX50ENpsnz33eiJu6gI+OQno4Xf5maagO2Ftb7euec86Q0NkXBtehimpnrx8MMPo62tN+peAXS/lHKUM/t9npA42cwspcmTKFvtbCGJ+wgvOLEWqhNPpONeeonuFV+jooKEvbEx90Lb2UkTuenCt0nFw+AV/hILsx9yhaKvfpXa8swzTp9OV2Hge/yDHwCf+hSFvF1xBfCFL9DrXgoDx2ubIUl+FIauLmezw0TYC+3bb9N1V670LifKsHJo7w7PO6PbeBVU4MXXriDC95nHkL1zMJcA/u1vo69jL8SAIwwPDiYWUrhUZSIPAz/P1193BFbzu9seBjYyvP22u+oPexjskKSaml6sX/8wernzJ+Fh4PPE8zAAZJl/5BFK8jaVy1gKg9cz3HdfJ6zOJBmFoacnua/HuS9K0T3ff38ar88/T2186SXn+9oKQ7zCDYkw74sd1sTzzf33U3nQ8nJqWyyFIV7S9aJF9Jnpafd8YOZDpBOSZPYHnl/sZFtTYZicBJSi9ae3t3e2r5hx+aYhyiwT6tVWW8CNhVl5bXafk8pKPLz//uidsXByJIMZmw84CgOvt1yZ2A6d84JzCyMR+ru4mDznf/5z7D1w+NnYCmFhIY37rVupb3z1qyR7xAoVshW3WCFJPLf09jrP0gyZY9iLwAbUsjKScSYm4vchr5Duech1AL4DCjNSAA4GcDmAmwF8Zebvg2fe0wC+DeBznmfKInlVGA4+2C0schiMHZ+aDOZmOsyKFTRYbcGfsXMYOJ6S4XJk/DmeXDgUyCsUZNkyEkL3SVD0avVqWuRtL8QJJ9AGR5+3tuWor6c28uRz2mnA0UdTlQ6/Eze334wVtuGF1LZSFBcDIyNNuOmmm1BeTjOeLQxyUmN3d7SwArg9DPZiacdZ2h4K08NQURF70ly9mizn997rVuhMq60p9HLfYGuh1zNtbiav0bPPel+Tk+sB6j+RiHsPjESY/XDHDhJejz7aHQ7EE2wmFIa+PiqnasN7SNjJwaZw5WcfBvYw1NX5K91vC29bt1I/TyTAHXwwCTVdXe7n1tBAC+ePf+wW1MwdtplTTqHfHIrBrFpFz4Xj122LJVtfe3qihVovC3JREbXLq7qYDW8oZ1ZJ8roXrMxw4i3gzm0aGnJf66CD6NiODiccDYid9Nze3oSPfOQmNPGXT1Nh8BLOeC4aHPTe/8TeMNGr7Pajj5IF2O6X8RQGM/QS8LehngmHMtbXU7hnKEReqrvucowL3NdiKQyphCTZG2Ca99SeExcvpr7EG2Jx3+UyqfG+78KFJKR3dLhL1i5fTmsbx7/7nY+4eMTgICkh5jjgUtS2JZr75fQ0Xa+0lNafpqam2TLKHR208fijj7rHXWkpPWN7w0mGhWk/ShvPhfy7qbAQN61fj6amplkDVE2Nu9oj4ChFnN/w1lv0TOIZQRgez6eeSn3rjDNI7ohXRZKfM49/c+wvXEjGuF27qJ+8+WZ0eK+tMNipS6ww2B4GM9eRXzNLq9prhml8iTfuliwBios9asHOIzRxE4DDAfwIwDsg5cD8eWfmvcO11l/W2qtAbnbJq8Jw333u/9mymU5IUlERxTRzyMDkJMUdcpy6Hw+DPYGze58nRh4Mra00iL0G7pIlNGmwxS9ZlCJhzvZeLFpE7WALeCRCk8DDD/tXGL7xDQrziNc2jsMcGKCkM34WFL5VgLKyMoyOUvfxmmyrq0mAm5qKrTB43WveUZonOzskyWxXPIGrsNA5t+nlieXK577H1/UKoVm2jBLZ1651by7HdHW5+8KCBd5JmLEw+yG3o7nZrTDY1qBUMOU9r0ROVhjsRckU6P3mMNhVb+LB5+dpcNeuaC+YF1wUAHDf6/p6qnh15ZXAv/yL87pXOMuKFbTI2ptN/eM/UswyH29WpwHcVZjsPAYvbyVAY3hoyF+YFgsf09MkdHvJ6vyMuPwi4FYYbIHyAx9wNrd7//ud12OVVZ2eLoBSZSgwt4BOAlNhsDfrZMy5yPSIct95+WXHowu498ZgYimlyXgYks1hqKmhz5v97qyzqK284ZlZoc0rRCOVsVxcTGsPKwz2XHjbbbRvw6mnAjfdRK/xmsF5DF730IYt3Fu20PxmrjtPP033sbExvifdhhUns0od4MzBsTwMTr+k9aegoGA2if+994DrrydlzVTEqIa/dw4D4A4XSwR/d/ZGFpSVoayvDwUFBa5cLbNcKOCMP6UoxPidd9ylsONx4IH0nD76Ufp/yRLgoYfoXsRSGBYvpt+8f4bZNxYtonlq925q69at0UbBWAqDXVbVTno28yrMfTWYeCXW48kuFDKcxG6Zcxit9d+01p/VWu8PKre6YOanVGu9/8x7f8tX+/KqMNjCsCmo2LXrk2HLFsf1yspHbW1sD4NZVtVLiOU6+Cyk8uTS3U2D08vKffXVNFHbIUXpwpOVOeGsXEkCrF+FYc0aEnq9KugwPKkPDFAsOU8KFJLUg0ceeQSdnTRLel2zpsYRWOIpDLbwwK59s1SsuYDzAm/vkOoFx7p7KQyDg+5nzX3DrqpksmaN8/e//zu5hU1Mlz1AC8zIiLtkbTzMmE7Ty+alMKRilTT5y19ICPPaWbm6mvr29LRboEjVw5CMwjA97XxHOyk3Fvvv74xB3vwQoMWVhaM//IF+T07Ss/dabL1e4yRixg5x6Ox0XvOrMCxYQO3wozCw8MHCUixh9pJL6Df3UTPB1VYY3vc+Elr+8hf3+ItVJam6ugfPPfcIerhTJpnDsGKFM5fHyn2xSyszkQi14eab3a8ns7GnWQYbcCqOmQqD1vTjtdNzPBob6f6a8eiHzUQY814ffO/tZNh0lX+eo73yQr7yFQo5fPJJKosMRId7mlWPYsFz11//Sr95PLInr6PDbaVOpt12WVeeg71yGMbGzIpetP70zEjpTU3OPh6As+6UlkYr+Hb/42v4GYt8/9iI0VNejkeOPBI9PT2z+UK1tdGbDe63n3O/1q6l+++1C3YsTOPGEUc480ysZ8cKNyus5vzb0kLK7NAQfZ8tW6JD09hI4bXBLRDbw2DmOnopDPaaYe4lksizNzk5r3d69kRrPa61bp/5Gc93e4AA5DCYVFXRpLBnT2q7PDNmiUzW/FlhYKHL9jBwjKTXgsa1qHlAmCFJsazcq1YB//qv/hKbkuGLX6SqSied5Ly2fDm1xSv8J1V4cbMnk+JiYHJyGv39/RgaonImXhZDs0JNvJAkr/auWuVM8LbFj610vCFSPC65hLw011/vvGZ6GLxCknp63AuNyfvfTxaZY44Bbr2VfrNwBUTvMsvCq18vUyyFgfM8+vtjC6HJcthh7nKFJhxOBsT3MMRrQyTi7BGSjMIAOH3Oq+KNFyUlJAQXFbl3tt53XxrXp5ziWCnTyY8ComNzeRdxpbwVhsLCaMMHzz3ve1/i69mJoLGM+/fc45T1BZzNE1lBsgXKhgbHEsnE8jAUFk5jeLgf04kaEYPWVn/5N7/4BRlaTA+DUs7czHsKAN6VrmLBZbAZc2xVVpKS6pVs64emJjq3aZRYtIi+47Zt1EZWZs1NI4H0FQYej7ESyW3MDS0Zr2pwJjU1ZATh/Xy4z7CA3dGRfGlY0+gDOEImGwxjeRhYYVCK1p/pmXJazc00L/PY3LWL7mlBQbTgavc/LvvpxzDB35kVwunSUvSXlWF6ejpKYWAvx9QUhbByH/jKV4Cvf52q5KWCmZ8Ta15dupTG7qZN0REQS5Y4+2rssw8ZGm0PA1fzMqtVAbFDksyKSHwtPx4G/ntggMZgvHFQUzO/Q5LmCoFSGHjiaG9PT2Ew6+Vz4l9NjePytIWdRCFJy5eTy5M9DNy2zk7/pSwzRUUFVVcyvRoLFzqbBKUTqmJfR2vH4m4qDIODdbjsssugFJnTvQRHPwpDrPCEoSEnP8SunmAqMok8DNXVlAdiCiB2SBKfm2OZEwkiy5ZRmArDm4oND1OfMxXEm26iClVeYT9eeCkMkQgJTfy9MxGSlAhz/JiLjRnv7WcfhnffpTHiN4fDLon81lv+54GNGx1BgTnzTKo+duWV5GF4+eXMKQy8EHJYx8KF7pAZPqa8PDpU5qabaAzzDu6JrmduZhXLuF9c7B4PCxfSItze7l+gjKUwdHXVYdWqy1DHLskkXb9FRY6wF0/RvPBC4M47o183c63efJN+2968eNi5A6agahbJsAVVP7AF3vTU8cabra1uoc7Ol0h3LPN65vf51tbSs2DP05Yt7hK2XvB+Ln/7GwnMnJNnljNNZDyw4edhKwycqD86SvfTrOJr9suKClp/6mY6wLJlZLVnD1Rbm7NeJfIwXHwx/U6kOAFkJPq3f6OQZwCoC4dx2UMPoa6uLqaHwQ67amgA/uEf/D0vL0wvWyxjCufRbN5M+V3m/LN8udOmww+nOZaNql7VqnijVSC6SpIZqlRcTJ9hI14sD4NXSBLPyfEU9draqdhvCjkjUAoDxxN2dKTvYbAVhvp6mpjYhWu74uMpDAcfTJPYyIizi2FhIQnTfuPTswkrLX19mfMw8ITmpTDEqyjFmHXgvaokxcphAJz9MoBoq0Qk4pTeTaQwxPpeK1fSee2QpKkpWgAT9b1166jCDEBC4l//6ljgzEn8wAOpNKqfhF/ArTD09zul/LjduVIYzNrrplBmWkirq+ML3Wb/8eth4MWmr88pe+tXKORkYpNIBPiP/yCBp6qK9ktIJz8K8FYYqqocax1AJV5//evY/XvNGnrfj9zNIUnJGvc5bGLnTn+7SvO5ucY64CQ9AzPCmllvNQlCIUdhSKXC1zPPUJnSxYsdpcwuoRsPO0/B9DCYfc4OwfDDaafRJoJcYYxZujTa45hpDwMrDDxXJEIpEox5XuYNTRPR0kLhlw0NTlvNHcOT9WzzfbAVBt5wzPYqeymyJqtXk6Fn9WqnTXxOW2Gw+9/HPkbP3Y8ns6oKuPZaw1hnuK54neQcBhbKWVFNVUGwMSMavKozMjU1FEps9y1WjAoKKNemoIA8IKZXndca+1gGGQUAACAASURBVDmYOQx2AYZIhOYpXhP4XGbyt23843HG/SDeOKiuTqDZCjkhUAoDCwehkLMbbiqYwirHM7LCACRWGOyOa4ZucMlQtqD7cWVmG1ZaEiUCJ4Mp8Jmu3ZISoKioDbfddhu6u8lUFcvDwMKZLdizh6GhwV3WkTEVBtsqwQt8T09qCkM47NRIt0OSAH/KKi+8JSXkeTr8cCfZ1G++ghemwmB7X1joyVRIUjzMc5tCWWMjLdpak3UyUQ4M4N7sKRGmtZeFZL+fjQdXF3n8ccfrlapBwvRQAc6eDvvt5yiRH/0o8KEP0XvpKnZ2SJJf6zcrDNu3eyd5e2GXVeV9GJqb2/Dss7ehjSXtFBQGM+k52b5bU0PK9/g4WURZ2ffbN5JVGJLxMJSXU2lsex5bvpzmP9MDzQI+k07SM+BUFfITnsmEw26ly889vOIKCr+8+WbntcJC+ix7GJIJSeL7MDREgjrPFU1NTshpPIVhfJzWn7YZV8kJJ9Drl1xCberujo6lZ4XBK3wqyZScWdoKC3HbjTeibefOWUNTcTE9z7Ex8t5wv8uUwgDQXjS33RZ//r3wQvptV0JcuZKe3cqVVIHvzDNpr47mZsewxQqDnT8YKyQJoDE2Pu70Qw7FNEMBY4Uk+VEYqqpEYQgCgVIYWDh5/fXoqhbJYNbL37PHSZyrrSXBbtcu96SRKOm5psapBcwL8Ukn0cTktcFXrmErkZ9SjX7h8/DujaZbcmAggtNOOw0TE3SQ10Cvq4ttzWXDzBtveO9rUFvrvG5bJdiCkYz12SRWSBK30dxFOx4FBaQscogEQP0oHYXBrOZi98NcehhMq6P57IqLKcRlbIyUhniCHy+QjY3+EgoBZ5Hv63ME+0woDADln7z0kpNn4LdNNrE8DAcdRDHDU1OOQNbamr6gwOENiUKSbGprHYV2fNxfCFaskKSBgQgWLjwNES5flWMPA7N8OcWqm15jP1RUuBUhr5CkVBWGWKxeTcKiOR9UVTnhHIAzlpPxaJhwzH8yCkNTk1PZy+8c2tQEbNhAu66bsEdgdDQ5JZAVnT17aC3mvtncTKFF7e3ucWMrsuXltP5EZhapgw4i4XztWlqfTYXB9jAkm28Rj0hFBU578klEQiHXHjCmkpJpDwMA/P3fUy5EPE47jUqt3367+/WSEqqy9NprtKZ/9KM0T5l5dqaRwuybpsLglVsIuBVkuzqZbfzjv1mBj+9hkByGIBAohaGujlz7mzalvqAD7l1OzYEcDlM50fZ27+ogAA12L1cfuyE5uY13azSrsuQLLvU6PJy5iYknXNuSX1oKDAyEccwxx2BsLIySEu9KOw0NjhXWFlbMjdtiJUybCoM5ybAwn0yVFJPCQnredpUkbmMyyZScbLdkCQkIJ57ofS/8YnsYzEXY9jBkU2Ewx575fXih9+Pl4Oe6Y4d/od+09qbrCbBZs4aSjF97jQThVMeJ7WrnQggHH0yLKCsLtbU096SrwHMOQ7IeBqVIeOLd6P0IlOxptRWGoaEwqquPQZgD3lMoq2oqDKkKyMuW0f2NV8nMC7PKGOD2MPC4N+v/Z2JsHXww/eYEWcBd/hZwLLix9pJJBHvS+/r8e1tZyOeN2NJZZxsbndDFZPo5exh6euhzbCln5cprfTZ3IA+Haf0JGwexQau2lryf3B62dPOc5WfDRL+EKytxzJ/+hDAQU2GwK0Hlkg98wPu5XHihc7/OPpt+23ufjI/TvTLHQkEB3Ut74zbA6dtmxIWtMNjGP/6b71H8pGfJYQgCgVIYCgudSTtTCkN7uxOyYy6asZKet283y7c5PPYYhTTwpHDqqfT76KNTb2cmqa+ne5dpD4NprQFosZ+cHMPmzZsxODgWc5DzYl5UFC0gmApDrByGnh5a1GyFwVzg/cYw27AFxbR2zu7e6aM2OXPYYWQRa22lmPSHH06tPYwfhSGd3WH9EmuzQU6G9rPhlNkP/Y7l4mI6Z0+Pu7pZJli5ksb26687VY1SobCQnhPfA649vv/+JNC88gq93t1NfSldBT7VHAaAwj22b6fv7qdPx/IwlJSMoadnM8bssik+MUOSbKEhGbjCCysMfhVRnovYM9HXR3N+SYlTVKC3N7Uchlgccwwpy5dd5rzGcxd7Xu2Qj2Th0M2WFv/jpKGBwoi4bHI662xTk1NxKZmcILMindnupUudTdjieRiUovVntj8asMBqfi+zSlYmKwmOFRdj84oVGOvtxa5dTsELs1yoXYEoaFRWUpL4D37gvMZt9QqpZFlpbMw9Fx1wAE0LZuU1uzpZLA8D36N4z6W+XhSGIBAohQFwOmg6oR11dbSQj4/HVhjMCSlR0jOf88wznf+//306d66rJMWivp4WgEwrDHY1IqqI0I0HH3wQQ0PdMa/HXhrO+TBhhSFW7GttLX0XDhMwJy0WEqamUg9XYYXBfNYsUG3b5r/vFRSQANrSQotFutbw0lIn9jWRwpDNHIbGRpr4zXK0APWDiYnYyewm5rPxawkGHO9Spj0MhYVUrvedd9KbWwCn7CDg5A0tWkSCMdeD512+MxGS5KdKkhctLWQBfuMNfwIlz4Pj43S/Cgo4lLMbu3Y9iO6REZIKktS2OCRpaop+pyqQL15MQnh3N+UN+e1XPBdxdSAzp4N3pTcVhkx571pa3LfKy8OQjnLCY2XrVv8WbN7jJlkvjRdNTc49TcaCzoqOHf5ZUkLjqKsrOmTYncNA6083TxIGLLSblm5eb3hX6kwZW7oBPPiJT6C7owO7dzvXNj0MdlnyINLUFJ2PANDcZs/xpsJgzkU//jEp83ZIUrx9GPjzfjwMjY2SwxAEAqcw8GLup0Z5LNi6sGcPWUB48jAVBnOiqqlxSl/6TcorKAiOsgA4Alqm9n0Ih+k72gmTpaVAe3sjrr/+egwONsYUiNhK7SWo8EQSKySJP9PdTZOM7fpkA6efyhZeeHkYSkudSdOvUMbXT1cAZbgtXJErlsLANcazhVIU+27Hv7LAwzHQ8caJKUAkU0mMhSBWSlKtZuTFmjX0vY4/Pr3zmNVuOCSpsJAERK6UVFeXmZyiVJOeAUrM58/5mavMKkk8FgoLgY6ORjQ1XY/G6emUdtPknZ5T2efAZPFiOs8775AS5FdIZYWBw2eGhtzzP+/bk25OQSJ4/MQKt0yWlhZnDvJrPGlooDmdPQPpeBgWLnSKiiQzThsaqI/t2hU9N+yzT7RnzlYYamtp/Wn06NQcMmyGwbLCMDJCSkOmFIbG5mZcf/vtaCgMYefOaA8DhyQVFmY3hDTTmAqD3W42atlJz0B0KHeiHIZQyNmtnI+PRbbGpJAcgVMYvvtdqgBgxn4mC88jnZ3u3WJNAcecUKanqXTf5GRqVTyCwMqV9DtVIdpGKSdm3VxcS0uB4eFCVFZWYnCwMKZAVF9Pu1zfdVf0e+XlZG2M5R7mxa+rK9oipBRN+hUV7rjLZGALsamwKOUIY34VQc5fyVRirrnQ2NW6TIUhm+FIjJdCwv2ArYrx2mFaVpMJo2ELaFcXjdF4lUCS5SMfod+rVqV3Hq6nPzFB52Khq6XF2T+EEzhjhXf5JdUcBoD6MSfl+/UwTE6SMMB6gVJAKFSIqalKFE5MJJ2/ADgehnSTijnc4d136Z77dXRUV1M40wsv0P9dXW5FjnMBMu1hsDHzr4Dkd5W2qax0nm+y4VlbttDvdBWGVGL0uQ3bt0evV/vs44T5MXZuTUkJrT+FHglj69ZRLhnvrwA4CgMnIGfK2l8YiaCyvx+DeyYwNOT0z/JyUtbZw8Bhb3MFs5qkV0jS6Gi0h8EL28PgFY5YVkavn3ji3FKq9lYCpzAcfjhVAEhngLHVYudOUhpYYfByDwNOx+dyfbkQyDLN5z4HfPaz/jag8UtNDU0aprBBIUl9eOyxxzA62hfTw6AU8F//BZx8cvR7rCSMjnqHbJgxx14KXFUVtcveqdYvFRVkZZucdJ+bhWS/FnHewGfdutTaYWO6sm33ba4VBi94zLBVMdHC+5OfkPKfDFzFpbs7c/kLzDHHkPXy4x9P7zzsYRgcBJ5/3hGuFyxwvC8sCKXrCUonh8FUVvy0g889POy+Tl1dH/r6HkPf2FhKHgZbYUhVSGYDwY4dyQu6558P/Od/krHBzlPifXsymcPgRUUF3QuOpEnXw2BadP3eD55bt22jPpFOyJ/pWU3Gi7hkCSkbr70W7Z193/tojjNDimwPw8QErT99HNtltenpp71DkjJdsahvagqPnXMORt7bhHuOvxfHLt09e73Nm+l7ZLLUea6IRGht8yoxX1JCa9PUVGKFwSuHwUth6OujYjRz0VC7t5FB+11w4MnirbdogfAK0zFjN3nStjfzmkssX+5OXMoENTXRu26XlgLT0xNoa2vDyMhESpOv+RmvyZStZe3ttEjYAvKf/0wu9VSFMVYYAPe5uQiMuZtmPJYtS66qUiLMZDmvDesGBkjgsGtr5wo7JCnRs//0p5O/RmMj7SqbzP4NyZCJECfOT7ATGhcsoIUvHHaU2XTDFsNhErb9uO1teP8Yv20wDSemXlBePoHJyTZMTE+m5B7gsIN0BfKGBrr87t3Jl7M+/XQqq7ttGykMpsEoVyFJ5v49QPpJz6Z13m+/ZsWCla50FNoVK5wE5mT0SN6ZGXBCgZlDDqH7ZH43W2EAaP2ZcF6ICysMmU5AnigqQtuCBShq34rLn/sXYMF7s9cDqD/53QMlSFRXkyFk1apo2am01BkniYwXfhQG9qCWlqZXYXCuoJR6KsOn1Frr0zJ8zpjMS4WhuJgmw02b6H+vMB3T+sYLBMc5imuMqK2l+2EqV+XlQHt7PS699Arcf39qZWXNCdtr8uYysbyo2ArcPvukF+pRUUHucMCtMDz5JHDvvckJeZlSFgB3SJLXhnUjI9Ruj1y/nMAWf642k8na4sw++9DuzJ2d2VEYMkFTEwlctgDS0kKC56JFjvU3VS8Yw/2TFdxkBMymJuCeeyh3ww8sAIyMuIWBoaF6KHUF6sf/IaWQJC6rmq7CoJSzg7Lf78QccADw3HO0JvT2ui3b1dWUFzE66sRVZ4t4u9gni+lh8OuRZ4Vh1674OwX7YeFCp+JUMhQVAZdfTgUC3v9+93tnngn85S9OWVogOiSpoaEeV1xxhe/rsVCaaQ9D/eLFuOInPwEuvZRemLmh5l4tdtGQuUBFBSmSr74KHHus+72SEifMyI+HgSuCAbEVhkyWg58DnAwgnU0leKTrmb9zukHFvFQYALJAbdlCLk5TS37+eeA3v/He8pwX5b2o88aloYGs7ubiyvdmaIhizY86KvnzmkpCrMm0sdFRGDIdgsO7idrnPuqo1L5PpjAtU3a8p7mxXL4WoLIyauOePSToZEOwWriQvuOuXW6hIUjwPbAroLS0kGDT1ETelaIid2W1VOD+2d+fmjB7+eX+j+V5kIshMbMbW3plOvqAQ5JKx/tx1upR1JeWA0htkt1/fypxnayHYfFiEoK2bqUcAtPDsGKFUwQh28mV9qaU6XgYlKJ7kYzRJhKhcdzRkb7CoBTw8sup5UHcc4/360VF0ePeq9xvMoTDZGTJdA4DCgroZO3tzk6JcHuKk9lULygoReOjuzu67awwsLcvHmYOg9beOQysMMzFMPAU2Y70hfwaAHmRAgKXw5ApDj6YhI7du92WymOPBb7+dfexvEiw5XYv6rxxOe88WpjNCbyiAmhqasfdd38bQHtKVmBT4I01eTc0+EuuTYVYCkO+MRcaW3gxK6zk02JVV+dsuJQNuDrazp3pCzTZgis52R4Gzh+qqCCv3A03ZCaHAaBrZVuYZQHArrHe2NiOUOjbaJ+aSjmHYWIC2GfkdfzmpSbsV/xeym3ke8xFHvxSVOQkotvhbhMTZEjKheBiKgxjY+l7s885J7kQRaUodK6zMzMFMo44In0vWiLsfRh6e9vx7W9/G+3t7b4+zx4GHq+ZMgi2t7fj21dfjfbxcVfSRGEhtZkVhrmWwwA4nnN7reG9gjo7k8thGBmh8Wffi/JyOl+Q1uFsorVeprVensoPgOMA/DeAUjhKR07rzc5bhWHJEsph2GefxO5aXojZfSYeBuKjHwU2biQLHENlHsPYZ5812LUrnJJ1KRJxBJJY1pcFCyhPYcWKzE+4lZWZd09ngvJysk7H8zAks6trNmhoIA9AphOSmQMPpAW3vT1z5Wozja0w8PM4/HDgH/4B+OIXM3ctXkgHB7OvMPCYtHMYpqfDGB1dg7CtSfiEy6qmVOrJ4rOfBb7wBeCss5L/7IIFpIjaG4ZxHL5dyjgbNDVRwiiQvochVVpaSGkK6viysUOSqqrCWLNmjWun53hwaeJM5zCEw2GseecdhDs6orK+2XI+F3MYACcM2W47VUl0/o4H3wOAxlZbW3R/5ypJe4vCkApKqQal1HcBbAZwDQCehH8FII16oskzbxUGTqbyU3rTDkmSzutgC6cVFcDgYAXCmysxPBxO2tIHkLWcN6KKlS+wYAEJjVu2uMMHMgFXWeK/g0I4TN/ZKzyCBZx8KwwLFpDAla38gtJS8jJUVgZnF3Wb2loSPjgWnZ9HURHwzW8CZ5yRuWuxQpsLDwOf39YLtK5AX99aVIyOphySNDWFjCgMK1YA3/teaoL9woWUA6S1u//yfDA4mH2FoaKCwqKA9HMYUqW2lh7F4Yfn/tqpYO4PAgC1tRVYu3YtKnxae1hh6O8nATVToZQVFRVYu2sXKjo6orKD2avR2xusNcYvfGvttddMek5GYeCiDbZsdcABtDfOXMvzyAVKqRql1L8AeBfAFwCUgfIWHgNwhNb6w1rrN3LZpnmrMJx5JlmivvvdxMeyJVc8DImpqABKi4ex34OXovvDlyedfAi4qyHEmnQOOMCpFZ5phaGy0knADNJExcIKV5Qx7w1bfHp785sMzLXXs9mGBx4Arrsuvb1Ysgk/i7Y2embZTJI1c4ZypTCEw+4wl7KycSi1HeOTk2nlMGRCYUiHhQudzdtMz6jpvcu2wsDeKSB/CsMNN1BobrobGOYKcwfyggJgamoc27dvxzhrEAkwQ5Iy6a0eHx/H9mXLMD466q7jCmevn76+zK9fuYCVAru4iLkZmx+FgRUFVhzs8cVG2rl4j7KFUqpKKXUrgK0AbgIQBikKvwVwtNb6g1rrv+ajbfNWYSguJkvUgQcmPpY7MSsM4mGITVMTcHTdn/Hgh8/H0Cc+nLKwlCiUyZxAMm2h4fOFQsGqiBUKUb8dGCABy2xbOOy8l85mS+mybBm1IVMbBHpx5JHArbdmdzfrdDDL/mZb4cyHwrBrF5UfZaqru9DUdB+6lEpZYchUSFI6LFrk7HBsynf5Uhj8bH6VDdaupZwNs/pdkCkpof7DDq6uri7cd9996GIXXwLMkKRMKgxdXV2478AD0VVeHjUhkid+7oYkXXcd/d5/f/frHEIEJJ6P+L5rHVth4HszF+9RplFKVSil/hGkKHwVlNisADwF4Hit9dla65fy2caALsm5hTsxT+RBsjoHjUgEOLLvdVz9wzvRcMIJKZ/npZeo0kwszMofma7PzOE9QdyBMxx2rC6mwqAUVXDSOr8Kw0kn0f2zF5K9CRa0OjuzP1ew8WJkJPvP3Ux6NgXZiYkGvPPOZ9EwMJBWWdV8KwxLlzpGIS+FYWAg+waEmhp6lmNj+fMwzDW4u/CGgg0NDfjsZz+LBp8Dgq39oRAZIzJFQ0MDPltSgobOzijLZEUFhUCNj89NYfiii2jM2pb/ZBWGqSm6B7FCkvj8e7OHQSlVppT6MkhRuAVANUhReAbAyVrr92utX8hnG5l5W1Y1GVhhYEFNFIb4lK47A6Wn1iCUxo1KVBaRKzNlw+LH4aZBnKTKy70VBoDqYgOp7X2RKY4/HvjlL6M3XNqb4Lybzs7sJX8zoRAtzLmotmLmMJgyfVFRCIOD9QhNpJb0PBuSxCEkeVIYuMJSfb077JTngf7+5Mu1JgtXn+ntFYXBL3yPeH+QUCiE+iTcI5EIlQd/913yCmaKUCiE+ksuAd5+m7ZGNqiocAyQmdyrJ5d4Geq4ShL/HQ/TO8oeBntNY2Uqn0awfKGUKgElMX8ZQAOcPRY2APgnrfXv8tW2WIiHATQwSkpIUAuHsxuTPB+4+SeNeGF5Hfq5EH0WqK0F/u//pY3UMg0rDEGcyMNhp76/PbledRUtmH7C7LLJSSft3WF7VVUcGpF9hQEATj2Vdr/Odm6VqTCYwkA43I/m5vXoB9LPYQiF8hZrtmoVjZ1Vq9yvs9CSi5r5ZvhTvqokzTXM/UGKi4H+/n6sX7/e9/rD46a7O7PGwP7+fqx//XX033FH1OAMhx1vVhDXmVQpK3P0fj8eBiB+hSqeP1Oo1jxnUUoVKaWuBSUzfwdAI0hZeBnAOq31cUFUFgBRGGYpL6dOLd6FxIyNjWHLli0Y4xCDLPHlL1Np10xTVQXcdRdtehQ0zD0ibIXh9ttJqBGrZH5RirwMvb25URg2bSJZO9tKWiwPQ2npGCort2Bsejr9sqp5lJDLyoA33gB++1v36+xhGBzMvheHr9XbS1WK5koeQT6xFYZk1x+W5Xt7M7u+x2tHJOJ4iueTwhAOO/p+onUoEgEOPZTGVSyF4fzzgQ99CLj44sy3NYgopa4ClUe9A8ACkKLwGoDztdZHaa0fz2f7EiG29BnKy0kYC2KYStBoaGjANddck+9mpMWVV+a7Bd6YC41XPLVYJINBSwvw5pu5Efh4Tsq2hyEUIm/r+Li7n60IT6Fn01lo6Lk/pWx3l4chAB3YdnCUlDgVXbJtMGIPQ28v8Ic/AOvWZfd68wFbYUh2/eFxk2kPUrx2VFeTcnrccfNLpigro1CroqLEtoNwGHjtNVIWuCy0Hb1RWQk8+mj22htAfgTadE0B6ATwLQC/BKCVUikFRGqtt2euefERhWGGSIQmFLH4CPmEk+UA8SQEmUWLgBdf9LfPS7qwwJGLMLBTTgF+9zu3XP9Pk/8E7HqVMhjTzWEIgMLgRV0d5aRk28PAAmtPT2Z2et4b4C6T4jYgs880l3vY8JidS9Wo/MD3z08hEjPUT6I3otAA6kEKw7fSPE/O5HgJSZqhpkYUBr90dHTg+9//Pjo6OvLdlHlHRQXwyivAypUiTASZlhaSn7OdJAvktpLIX2eqe5tyfUd5Ob5/yinoKClJSeB3VUlKReLLAbW1JJBmO8SMhdfubvotYzwxbDhhhSHZ9cdMas9keFC8dvB1ON9pvsD9V+vEx7KC0NeXm6INcwiV4Z+cIR6GGWpqyIUoCkNiysrKcOihh6JMVruMwyUA33hDJtgg8+UvAyefDHzgA9m/FldlamrK/rW4KpmpF5QVFeHQrVtRNjiY3j4M4+OBlZ5YwLP238o4hYU0xnkLAfEiJsasklRamvz6w5bu4eHMKgzx2sHXmW/FIZJRGPjY/n4KwZtPoVlpcEu+G5AOojDMUFtLE0ouLIZznUgkglNPPTXfzZiXmG7b+bbYzCcWLKBkvVzAAg8rDtmE+5wpyEZKSnDqiy+SSyUFD8NcCEmqqiJPyNKl2b9WZaVTclNsLomxFYZk15/yclLUpqYyqzDEawfLEUHb5yddeH3yozAUFdG97+0lj1ouCkQEHa31nFYYJCRphkiErGAHHZTvlgSfiYkJtLa2YmJiIt9NmXewFUYpESYE4rzzqBTo8cdn/1rsYTC9WxPFxWiNRDAxPZ1ySNLUFKBHgxuSdPPNwFe+kpv5v7LSKbkpHobE8DzICkOy6w9veglkNoIgXjt4z4/5tl8N75cwPe3v+NpaUhZEYZgfiMIww3nn0e9jj81vO+YCe/bswd1334098bZqFlKCrclm+Tph72bNGspryXa4DOAIZ2ZFpj3Fxbj7wguxJxxOOSQJAPRYcD0Ma9YAt+TI9mcqDGIUSAzfI97oLpX1Z8MG+r14cebaFa8dCxYAzzyTnX2E8gkXSfMbHllXR+F3udqzRsguEpI0w5lnktY831yI2aC+vh5XXnllUrttCv5gD8PetJGNEBx4/jMVhvrSUlx5112o37Mn5ZAkAJgeHUNBQD0MuaSyUiqhJYO9oWAq68/++9OGzLxpZyZI1I4TTsjctYJCQQFw//2OxyYR9fWkLLS1Ac3N2W2bkH1EYTAQZcEfRUVFWJALc+deCMfY+nX5CkImYUXVzJ8pKivDgm3b6J80FAY9Gtyk51xSWQns3El/S55SYoqLaW1mD0Mq68/Pfw60tmbWo7O3roOf+IT/Y5uagG3bgI6OzCprQn6QoAchaQYGBvDUU09hgLdvFDIGrz+iMAj5wCskaaCwEE+dcgoGKipS3ukZCHZIUi6prHR2vhWFITFKkaIwPk6/U1l/Vq0Czj47s+2SdTAxixcD771HSdK52LMm6CilpjL8M5nL9ovCICTNyMgIXnvtNYyMjOS7KfMOtsJkckdSQfALyz6ctAkAI0VFeO3QQzFSVpaSiXZWYQhw0nMuEYUhecrLHYUhKOtPUNoRZJYtI88OABxwQF6bEhQyvQeD7MMgBJvGxkZ84QtfyHcz5iULFgCXXeYk4QtCLrnjDuBnP3MnhzZWV+ML3/8+/ZOOwjA2BhRLMfaqKtprBRCFwS8VFVRpp7w8OOtPUNoRZFatIm95VZWUrJ/hZ2l+XgE4G0AdcqwsAKIwCEKgKCgA7rsv360Q9lYOOwy4/XbrRa61CqSlMEBCkgBQntLQEDlbpLiBPyIRslTLZpZzi9WrgR//mMLKpOofoLX+VKqfVUp9CLTxW53x8o60G5UE8giFpOns7MSdd96Jzs7OfDdFEIQs0zk1hTuvuQadDQ1pKgxjojCAKqGNj4t3IRnCYdonKRIJzvoTlHYEmcJC4DOfAT796Xy3ZO6ilDpbKfUigF8BOBjkWWgDcB2A/XLZFvEwCElTUlKCFStWoEQWf0GY95RE8AbbzAAAIABJREFUIlixZQtKxsZEYcgAXDpZ8pT8Ew5T4mxlZXDWn6C0Q5ifKKU+APIoHMMvAegA8K8AfqS1Hs11m0RhEJKmsrISZ5xxRr6bIQhCDqisrcUZ69fTPykoDLN5zuOS9Aw49ej7+vLbjrkE78UQiQRn/QlKO4T5hVLqJABfB3A8vwSgC8C3AfxAaz2cr7ZJSJKQNJOTk9izZw8mJ3Na0UsQhDwwWVyMPXV1mAyF0vIwqHHxMABOJbTBwfy2Yy4xOmNLXbAgOOtPUNohzA+UUscppX4H4CmQsqAA9AH4JwDLtdbfyqeyAIjCIKRAZ2cnfvjDH0rspiDsBXSOj+OH111HOQwpBN6LwuCGFYZPfjK/7ZhLsJ66zz7BWX+C0g5hbqOUWq2UehzAMwBOASkKgyAvw3Kt9Te01oEwL0hIkpA0dXV1+NSnPoW6urrEBwuCMKepW7oUn7r3XtR1dTlbkSfBrMIwIVWSALof7e1AQ0O+WzJ3uOsu4NFHgbo6YHw8GOuPrINCOiilDgNwK4Bz+CUAQwB+AODbWuvufLUtFqIwCElTXFyMJVJUWRD2CorLy7Fk+/aZf1Lf6VlJDsMsjY35bsHcYtEi4Npr6e+grD9BaYcwt1BKHQRKZj6fXwIwAuBHAP5Vax1Yl5WEJAlJMzg4iGeeeQaDEoQrCPOewcFBPHPCCRisqEjp86wwFExISJKQPkFZf4LSDmHuoJT6OYC/gpQFBWAcwP8HYIXW+ktBVhYA8TAIKTA0NIQNGzZgv/32Q0WKQoQgCHODoaEhbDj5ZOy3aBFSGe1FRYDCNAqmJkVhENImKOtPUNohzCk+avzdCVIWdgA4XanUNm7WWt+fgXb5Qmmtc3UtuqBSLZjZnW7Hjh1oaWnJ6fUFQRCE3NHfDyyoGsIQKoAHHwQ+/vF8N0kQhDnEiy++iKOPPpr/PUpr/VI+25MqSqlpAJkUurXWOmeGf/EwCIIgCFmjtBQowwj9k0JZVkEQhHlEaq6EACAKg5A0e/bswaOPPorzzz8f9fX1+W6OIAhZJN3xXlQElGGmkD7vwCUIKRKU9Sco7RDmFKfkuwHpIAqDkDRFRUVobm5GEWczCoIwb0l3vCsFVJeMAGMQD4OQNkFZf4LSDmHuoLV+Ot9tSAfJYRAEQRCyygmVr+HZgcOADRuAY47Jd3MEQZhDzJcchrmOlFUVkmZqagr9/f2YmprKd1MEQcgymRjvVSUzIUniYRDSJCjrT1DaIeydKKVWKaW+l8trisIgJE1HRwe+973voaOjI99NEQQhy2RivFcWSdKzkBmCsv4EpR3C3oNSaoFS6kal1GsAXgLwuVxeX3IYhKSpra3FxRdfjNra2nw3RRCELJOJ8V4dmtncKhzOUKuEvZWgrD9BaYcwv1FKlQG4AMAnAZwKx9CvkNkSrQkRhUFImpKSEuy77775boYgCDkgE+O9qXAP/VFXl4EWCXszQVl/gtIOYX6ilDoFpCRcAMzumcklWVsBPArgV7lsk4QkCUkzNDSEP/3pTxgaGsp3UwRByDKZGO9Lwl0YKQzLTs9C2gRl/QlKO4T5g1LqAKXUbUqpbQB+B1IYIiBFYReAOwCsBdCitb5Wa/37XLZPFAYhaQYGBvDkk09iYGAg300RBCHLZGK8N66sx9uL35/BVgl7K0FZf4LSDmFuo5SqU0pdq5T6M4DXAXwZwGKQktA3c5gG8CWt9fVa6+d0rsubclulrKogCIIgCIIQROZbWVWlVBGAc0EehDMBFMEJNxoH8BsADwD4HwAjIIXhY1rrh3PfWgfJYRAEQRAEQRCELKKUWgNSEj4CoIZfBikEz4GUhIe11j3GZ3LdzJhISJKQNF1dXfjpT3+Krq6ufDdFEIQsI+NdCBJB6Y9BaYcwp3gewFUAakGKwiYA/wfACq31Wq31XaayEDTEwyAkTUFBASorK1FQIPqmIMx3ZLwLQSIo/TEo7RDmJAMAPqe1/lm+G5IMksMgCIIgCIIgBJL5ksOglJqe+ZMF77+CwpB+rrVujfOZQOQwiGosJM309DRGRkYwPT2d+GBBEOY0Mt6FIBGU/hiUdghzipMB/BTAICgk6XAA3wawXSn1v0qpTyqlKmJ/PL+IwiAkTXt7O771rW+hvb09300RBCHLyHgXgkRQ+mNQ2iGkj1KqUSl1jlLqVqXU40qpPUopPfPz00xdR2v9R6315QCaAFwMYD2AaQCFoF2c7wPQppT6uVLqbKVUYaaunQkkh0FImurqanzkIx9BdXV1vpsiCEKWkfEuBImg9MegtEPICDnV+rTWowB+DuDnSqlmAJfM/BwKoBxURekjAAKVUS8eBiFpysrKcOCBB6KsrCzfTREEIcvIeBeCRFD6Y1DaIWSc7QCeyNXFtNZtWuvvaK0PB7AKwPcBdIBClurh5DvcrpS6Qym1NldtsxGFQUia4eFhvPLKKxgeHs53UwRByDIy3oUgEZT+GJR2CBnhVtBGas1a66Wg0qc5R2v9V631DQBaAJwD4GEAYyDlYSGAawH8QSnVqpS6Uyl1Wi7bJwqDkDR9fX147LHH0NfXl/hgQRDmNDLehSARlP4YlHYI6aO1/prW+r+11oFISNFaT2mtf6O1vghAM0iBeXbmbQXKgbgKlAORM6SsqiAIgiAIghBIcl1WVSm1DMDWmX9/prW+LJvX88tMuy4F5TusAKC11jlLjBYPgyAIgiAIgiAEGK31e1rrW7TW7wOwFsCPc3l9URiEpOnu7sZ//Md/oLu7O99NEQQhy8h4F4JEUPpjUNoh7J1orZ/TWv99Lq8pZVWFpFFKobCwEEqpfDdFEIQsI+NdCBJB6Y9BacdeSONMaHtMtNY7c9WYvQnJYRAEQRAEQRACiZXDkBCtdVpaXFBzGPKNhCQJSaO1xuTkJHKtbAqCkHtkvAtBIij9MSjtEIRcIQqDkDRtbW345je/iba2tnw3RRCELCPjXQgSQemPQWnHXsg6AIsT/AhZQHIYhKSprq7G+eefj+rq6nw3RRCELCPjXQgSQemPQWnHXkiH5CjkB1EYhKQpKyvDoYcemu9mCIKQA2S8C0EiKP0xKO0QhFwhIUlC0oyMjGDjxo0YGRnJd1MEQcgyMt6FIBGU/hiUdghCrhCFQUia3t5ePPLII+jt7c13UwRByDIy3oUgEZT+GJR2CEKukLKqQtJMT09jcnISoVAIBQWicwrCfEbGuxAkgtIfg9KOvQGrrOpRWuuXsnk9KavqjeQwCElTUFCA4uLifDdDEIQcUFBQgPYftAMAFt+QegGSHbfvSPscghCU9Sco7RCEXCFqsZA0PT09+MUvfoGenp58N0UQhCzT09ODJ/ufxI4/7MDzLc/PCv4AKQHm//GY7J/EZP9ktpop7CUEZf3p6enBz775M7z+ndfz2g4hfZRSJyilLuMfABcab+9rvjfz/l6JeBiEpJmenkbf233YtW0Xar5Uk+/mCIKQRaanpzEZmoRWGhO7JtC9vhs7bt+ByJERTI9Oo/LYSl/nCVXKciOkz/T0NMbGxjA9PZ3/dkyMoffPvXj++89j8Q2LxXs2d/kMgEtjvHf8zI/JT7PamoASiBm8s7MTk5Nuy1N1dTXKysowODiIgYEB13slJSWora3F1NQUOjo6os7X1NSEgoICdHV1YXx83PVeZWUlwuEwRkZGopKVioqKUF9fDwBobW2NOm9DQwNCoRB6enowOjrqeq+iogKRSARjY2Po7u52vVdYWIjGxkYAQHt7e9REV1dXh+LiYvT392NoaMj1Xnl5OaqqqjAxMYE9e/a43lNKobm5GYD3PaypqUFpaannPSwtLUVNTU3Me9jc3AyllOc9rKqqwmmTp2F0cDTqPhUXF6Ourg5aa88NbRobG1FYWOh5DyORCCoqKjA6OhplPQqFQmhoaABAG+bYuTf19fUoKipCX18fhoeHXe+Fw2FUVlZifHwcXV1drvcKCgrQ1NQEAOjo6MDU1JTr/draWpSUlGBgYACDg4Ou9/geTk5OorOzM+q7LliwAACwZ88eTExMuN7j/j00NIT+/n7Xe3wPp6en0d7eHnVevofd3d0YGxtzvcf30Kt/m/fQq3/zPezt7Y2q/MH30Kt/m/fQq3/zPfTq32VlZaiurvbs34BzD2WOyN8cUV1djXWfXoe2kja0v9eO3nAv+qf7UdpXisKSQgxVDEXdi6qqKpSXl2N4eBh9fX1oe4DmguZLmtHV1SVzxAwyRxDJzhEHtR6EF099Ec2XNOPwGw8HkLk5gvvqYV88LO4cUVdXh1Mjp2JX4S70T/ejuL8YodbQXjlHpCpHmHOEiS1H2PdCyA+BUBgeeeSRqIXjwgsvxEEHHYSNGzfiiSeecL2333774WMf+xhGR0dx9913R53v5ptvRklJCR5//HFs2bLF9d5ZZ52Fo48+Gu+88w4effRR13stLS349Kc/DQCe573uuutQW1uL3//+99i4caPrvZNOOgknn3wyduzYgQcffND1Xk1NDT73uc8BAO6///6oxeryyy/H4sWL8cILL2DDhg2u91avXo1169Zhz549UW0qLi7GV77yFQDAL37xi6gF6aKLLsL++++PV199FU899ZTrvZUrV+LDH/4whoaGPL/rV7/6VYRCITz22GPYtm2b671zzz0XDZUN2DW+Cw/f/bDrvaVLl+Kyyy7D1NSU53mvv/56VFZW4ne/+x3eeOMN13unnnoq1q5di23btuGhhx5yvdfQ0IBrrrkGAHDfffdFTT5XXnklFixYgGeffRYvveTOh1qzZg3OOOMMtLe3495773W9V15ejhtvvBEA8NBDD0UJIRdffDH23XdfvPzyy3j66add7x1yyCG44IIL0N/f7/ldv/a1rwEAfv3rX2PnTvc+M+effz4OPfRQvP7663j88cdd761YsQKXXHIJJiYmPM/7pS99CeFwGOvXr8emTZtc751++uk49thj8e677+KXv/yl673m5mZcddVVAIB77rknSvC5+uqr0djYiD/+8Y949dVXXe8df/zxeP/734/W1lb87Gc/c70XiURwww03AAAefPDBqEXl0ksvxbJly/DnP/8Zzz33nOu9VatW4bzzzkNPT0/Udy0sLMQ//uM/ApA5IjBzxAUzbx4GLH51MWpPrMWTtU9i293Rc8QRRxyBt956C4899pjzxt0yR5jIHEGkNEdcABQMFeBwkMIQa46oXF+JZ//3WWxa474PieaIlRMrE84Rwx8Yxh8G/wAcNvPG3TJHmFxacSl2f283Nv79RrRNuZ9NzDkC0XOErVBkmpmk5ssAQCm1FMDn4OwoPQZgC4CHAfxQaz3sfZb5TyCqJIn1cG5ZBrb9+zY8MfIEPvHJT6C0tNT1nngYHMR6SIiHgZirc8TOnTtxzz334IILLpj97kD0HGF6EeJ5GGSOcJA5gkg0RwzcO4D37ngPFddWoHuoG8/iWRz8+4Ox4pwVCT0Mbf/ahne+8w5KTi9B5bGVaL6ExoM9R7Q90Ib2B9pRfmA5IkdGEnoYNt25Cf81/F9R42JvnCPse9h0SROaL2nGyF0j2H7LdtR8rQaNVzW6PpuMh+G1117DmWeeyW9nrUqSUupcAA8AiBVnuQnAOq315mxcP+gEQmEQ5hZv/vOb2I7tOOHGExAOh/PdHEEQsgAnNNd/oR79J/fjwAMP9BzvfFz4oDAqj61EqDI0m+NQfVK1xHULabP1n7di2y3bsPRrSzFRMYFtk9tw3HXHofsuEqrj9TG7fy7/5+UJrxHrGBN7Hdzbq4DtuH0Hutd3o+eJntl7yPc+3fyOXJRVVUqtAvAcgDIAgwD+BcDvZ/6/CMAVM4duArBaaz3gdZ75jFRJskim6kcyx84nKiorsLJyJbrv6o6qmiIIwvxgsn8S47vGMfK/I2j6Y1NM4wAfBwCDrw6ie303xneNY7JvEt3rZY4QUmfH7VSZa/DVQRQvKsbgq4MoKyjDB27+AMLhsK/KW4tvWIzjdh6H2jNq4ybehypDKF5U7HmM11rP6yCPC7ste4N8wM9nx+07Zr87PydWFI7beRwAzIV54A6QcjAJ4HSt9W1a6xe01k9pra8EcNPMcfsB+GK+GplPRGGw8Fv6j7XpvXFBbLimAcPrhjHcP0yCgZRKFPYy5rMwwN+NBajxwnFs7d8aFT7B8HG1Z9SiYlUFABIaQlUkeJlzhClgCEIiWBmtWFWB43Yeh4pVFRjuH8bbb7+N0dFRhCpDsx6t51uex8YPboxZ9te2cttjmIXbxTcsdr0Xa63ndZDHBbfFbPt8kg+85jx+Pt3ruzH46iBqz6idfU6mXMDHpSor9D6Y3d20lVJHA1g78+89WusXPA77LoA3Z/7+vFKqKKuNCiCiMMxgWjL8lP8zO/58FprNiXjjBzfi+Zbn8ea/vYmHHnoIw+Fhl0UmX8LAfBbehGAyn/cUYEEHAI7beRxCp4Twe/X7mHXvTUErVBmaFRoO+fUhqD2j1jVHmAIGzycydoVY2Fb/UGUIw+Hh2eRzVgC8vFobP7gR3eu7o6z+3O9MJcBWNMzxba/1/BleB3lccFv4/IOvDs5+ZrJ/0nVtu88HWZHmttn3EnCeDwBUrKqYvQehytCsl8E8LpFsFese6aGsh85/yPj7Pq8DtNbTAO6f+bcawCnZblTQCESVJC/M2Df+P5t1jmctGZ+pcF0vFrwwAsDQ60MYfHUQGz+4EQMvDwSiHrPf2EE+Lla88axlYF+aKMZ3jaNpqAlfuvFLKC0tReGNhdHHWpNKrGvYlqBU79tk/yT6X+jPeh8RBID6KgsDz7fkvv56tmOlTeEeAA694VDsN7pfVIEDL+w22ffGFDAm+xzloffp3tm5YcftO9D7dK9rLs1ULLQwd+B+ziEtAPWnhVMLXf3RDIUJVYUwPTo9u2YVlBREWf2534UPolAiPtZct0KVIfQ+3YvnW55H5MiIa63nz5jroNf5Kz5TgVBlCEOvDyFUGXK9xwo5f8/wQeHAGh55XQ8fFJ715pjr+XE7j4tSdBbfsNildMUbt+Z4Dx8UxvTYtEvJ2nH7DoztM+b52QxywszvIQAvxznOLIN2PIAnYh04H8mrh2H33bsBOFola5ZsGeCBNWs9yNJgYgGANeBYrkRT+wWcQTDrgutzW89Ml2auLAemNSDePYs6bsYyY7dz1jJQFUKoiv4urixGOBxGYWHh7Lk2fnCjK87UttbwNXgSMC085rM2vRnxdpQ1PR+mJce+94nulddxXtfye85E1/NjRYpniUr1nEL6mP22YlUFKlZV5GWRz3aow+IbFrvivQsLC13jPd1zz8aTV7mVB/5O3eu7Z+dS08rL41v6emwyNVcFgVhePLs/xvRqVdHrtsLK/a72jFrXsaZisfiGxbPjmy3nZt/1Wgft84cqQ64Y/sFXB1193lz7AHiunUHADDlkRcBczwFvhcAO0TIx129zvAOYvUdmPlTJ/iXZ/ZLAgTO/N2ut403ob3l8Zq8hH1WSlgJ4DwAeOOYBhLeFUb5fOabHqUTYwIYBRNZEUFBcgOFNwyjfrxwAMLxpGOFDnKS7oY1DWHjFQiy8cmHMa7FCkuiY3qd7UXFkBZbcsMT1Wt8f+1B1YhVUicLQxqHZdg5sGMCi6xfNHs+f6dvQ5zrO/g58Hv4efr6DX/j6ekzPttvrnvH/XsfpMeoL5ueq1lRFta+vrw8vvPACjj32WFRVVWH77dvR/0I/Ko+txJIblmD77dsx+PKg6zyxrs3wfer7Yx8iayIAMHv/vNrG54isiVBllooQdv94d9S9Dx8Sdt3n3Xfvxu4f7475ffn5mc/L65ypPL/tt2/Hru/tQtWJVag+qdr1ObOv8v0c2DAw2//se2i21eynsY7zeg6Z7H/x4L5pXp9/m/3L6ziv/pcv7OcHYLYvxWsn9zmve232R8bruZjzi9n/vfpSvPPHaqd5fvva9njPNF5zlz1XAt5j029fWnjFzOs+npfZLiD++pHO9/W7hpltBuDZX/g+xJqr8j2O4o2BWMf3bejztf5ki0RtTqYd22/fDgBRMgZA6w9fwz4uXrvMfpyNvur1DOx5KNl+ZY91lvV4PJv32pQjhs8exsX3XsynORfAX+JdR2u9M977JkqpUgBcI/h/tNbnJDh+EEAYwAat9bF+rzMfyIfCsBrAizm9qCAIgiAIgjDv0Vorv8cqpRoA8GY9/6m1vijB8e0AGgH8TWt9SOqtnHtI0rMgCIIgCIKwN2ImZ43HPMqBEyrKstCWQJOPpOeNAI6a+bsTwFScY4X80QzHE3QUgOgtWYW9EekXgo30CcEL6ReCTap9ohjAAQDqAGyAE0KUCcx60cU+jueEiky2YU6Qc4VBaz0GICvbeguZQymXR68tmZhAYf4i/UKwkT4heCH9QrBJs0+8m+HmMOaOzRU+jucEosEstCXQSEiSIAiCIAiCsNehtR4F0DXzb0u8Y5VSNXAUhmCVs8oBojAIgiAIgiAIeytvzPzeVykVL/LmAOPvN2MeNU8RhUEQBEEQBEHYW3l25ncYwJFxjjvJ+Pu57DUnmIjCIAiCIAiCIOyt/D/j7095HaCUKgDwyZl/ewH8PtuNChqiMAiCIAiCIAh7JVrrPwN4ZubfTyulvDZk+yKc3Z3v0FpP5KRxASIfZVUFQRAEQRAEISh8HhRmVAbgCaXUbSAvQhmAiwBcOXPcJgDfzUsL84woDIIgCIIgCMJei9b6VaXURwE8AKASwG0eh20CsE5rPeDx3rxHaa3z3QZBEARBEARByCtKqaUgb8M6UJnVcQCbAfwCwA+01sN5bF5eEYVBEARBEARBEISYSNKzIAiCIAiCIAgxEYVBEARBEARBEISYiMIgCIIgCIIgCEJMRGEQBEEQBEEQBCEmojAIgiAIgiAIghATURgEQRAEQRAEQYiJKAyCIAiCIAiCIMREFAZBEARBEARBEGIiCoMgCIIgCIIgCDERhWEeoJRqVEqdo5S6VSn1uFJqj1JKz/z81Oc5DlRKXauU+plS6hWl1E6l1KhSakgp9a5S6j+VUh9USimf5ztWKXWvUuptpdSgUmpMKdWqlFqvlLpCKVWc1pcW4pKJPhHn3OUzfYLP914Sn7tJKfWiUqp7pm+9pZT6rlJqaTptEvwRpH6hlFqmlLpOKfUrpdQ7SqnhmTlnp1Lq/ymlLlJKhdJpk5CYIPWJOOc5yziHVkr9czrtEhIT1H6hlAorpT6rlHpSKbVrRrZon5Fb/k0pdXo6bRNiI5Px/KA9A+f4KoCLY7y3fObnIwCeVkr9nda6y+vAGYXiDgDXebzdPPNzOoDPK6XO1lpvT7vlgheZ6BOxuBXUH3yjlNoXwG8AvM96a/+Zn88opS7WWv93ZpooxCAQ/UIp9XXQnONlgFg08/NBADcopS6UeSKrBKJPxEIpFQbwo8w0R0iCwPULpdQpAO4DYBuYGmd+VgFY+/+3d+fhclR1Gse/r0kIgggywbDESdAIog4iEJEkCGGZAYdFRBxAHIgyI4ggKpsimz7DoAgq4wDKYth0dGTEhTiDkQmrIPsii4AQgiKbRtYQAj//OKft4qaru2/fuunKzft5nn6q6tapU+fenFTXr+oswKVDLaAtyW8YRp6H6O0/y2LgOuAUYCawA7ApsB3p5v+OnG5L4CeSyurOkTSDhaeB40kBwtScbyOftwGX+AniUtFrnViCpHcChwALSf++3RyzCnAJzWDhTGAbUp04CngGeC3wPUkbVVFO60o/68VapGDhWeAC0rVhOuma82Hg+pxuCjBH0muqKKd11NdrRYkvkm4QH6uiXNaTvtcLSduSHjpNBBYAJwLbAxuTrh3/AvwIeL6KcloLEeHPMv4h3ZTvCIzP25OAyJ9ZXeYxusP+UcBFhXx3bpFmDPCnvP8FYKNW5wGuLeTzgX7//Ubip4o6UVIHbsh5HA08mNcf7HDcFwrnPqzF/qnAi3n/3H7/7Ubypy71AvgScDiwSps8v1co2zH9/tuN1E9d6kRJPpuQHmYtBPYrlOu4fv/dRvqnTvUCWAN4Iqe9uVGmkrQr9PtvN1I/fsMwAkTEsRHx04jo+RViRCzusP8l4KTCj7ZokWwDYLW8/tOIuKXkPCcUfrT5IItqXaiiTrTwSdIX+D2kG76OJI0BDs6bdwEnD0wTEdcAZ+fNLSVNGXpRrZW61IuIOCIivhwRLZ8w5uvNx4FF+UcfqKKgtqS61ImBJI0ivY0cRfrOuK+y0llHNasX/w78DfAc8L52ZYqIRWX7bGgcMNhgFL/cV2yxv9iR+bdt8rm/5Birqdwp+Qt5c/9BXJRnAKvm9XMj4uWSdLMK67sOvoTWD0OoFx1F6id1W958U1X52vCqsE58itQm/Tf0GHRYffRaLyS9Dtgrb14QEfOGo3zWmQMGG4w9Cut3t9h/L+mVIcAb2+RT/PK/Z6iFsqXiNGBl4PyImDuI46YX1i9vk+4G0tMjgGmDK5r1Ua/1oltj8/KlYcjbhseQ64SkSaQmMQAHRMQLlZTM+qnXerEj8Oq8/uPGD/NIS5Mlrdnt6I02NA4YrC1J45SGSD2b1EEVUlvCCwemjYg/A9/NmztK2rBFfqOBz+bNYnqrKUl7AO8l9U/5zCAPf2thvVWQCfy1qVqjycEGgzyH9cEQ60U3+b+eZl24q+r8rXoV1onTgZWACyPisirKZv0zxHrx7sL67ZKmSLqU1OLhXuAR4FFJ35A0vpICW0seocaWIGkuaTSkVp4Ado2IBSX7Pw28hTRywZWSTgauIY2Esz7pNfM7SE+T/zlKhme1esivg7+WN4+MiMcHmcWEvHy2TZ1pmA9sCKwhaayfKtZXBfWiG4fR/I76/jDkbxWqqk5I2os0+s0C0veJLcMqqBfFh04zgLNY8t51DeBAYDdJ20fErT0V1tryGwYbjFOBDSLiqrIEuTPSFqRh054nvVb+OfBLUjv1DUlwP737AAANwklEQVT/4TeJiB+XZGP1cRIwnvTvd2YPx6+Sl890kfbZwrqH0ay3odaLtiRtRrqGADyMx+FfFgy5TkhaHfhq3vxsRHgo1WXfUOvF6oX1M0jNnj8P/C2pyeLbaPaBWxO4WNJrey2slXPAYK3MBP6OdHP/HtJTnnuBTwDf7uK139bA3qSLxEAiTci0rzzbc61Jeg/wEdKwhvtHRHQ4pJVG5/huOrgV3yi8ujSV9VVF9aJd/uOBH5CeIgawT0Q81/4o66cK68RXSBNwXQd8q6LiWZ9UVC9WLqyvCHw0Iv4tIuZHxKKIuDMiZtKsL5OAA4ZSbmvNAYMtISIeiIg7IuL2iLgyIr5KCh5mkzogXS9pQqtjJX2S1DFpU+AK0sRvq5KeBLyV9IWwOnAEcJknZKonSWNJF2ABX4+I2zocUmZhXnYTHI4trHvynRqqsF6U5d+Y5K9xfTnSbdjrrao6IWkr0sOql0g3l2UjqtkyYBi+QwBui4jzS9J9juZDp3/q8VzWhgMG60pELCRdzJ8D3gB8eWCa3Mn5FNIFYg6wdUTMiYin8pOAuyLiMOBf8yHTaI6EYfVyFKnPyXzg2CHk0xiKt5vAsPgkqZsmTLb0VVUvliBpRdJMrZvkH30lIpa4zljtDLlO5JvLb+bNU1vN4WPLnKq/Q6DNbNO5P+QNefMdbsFQPXd6tq5FxBOSria9NdhF0piIeLGQZCbNIPTYPPlSq3zOkXQk8GZS06RDq27WYEN2RF7OAXYqGbWucYO/ch4FA+CxAU+EHwY2y2lW69Dx+Q15+bg7PNdWVfXiFfLoad8ndWoEOCs/XLD6q6JOvB9YjzTj+52FNEXFzq9vL6S5LiIe6Ln0NlyqulbMpzlS0vwO52zsfxWpJcMfBlVia8sBgw1WY4SDlYBxpCHNGorDYd7UIZ+bSAHD6qQ2q1XOJmlD13g6MzN/2hlHc3jcy4Hixf5OYLe8/hbg2lYZ5BvGxvwcHkKzvqqqF38l6VXA+cBO+UffAz42tGLaUlRFnWg0RxxDdx1jd6N5XZkJOGCon6quFb8Gds/rozrkU9y/uIsy2iC4SZIN1jqF9YHNRor/QTsFo2NKjrORpTiiVtlQvZD6vDSeNl09fMWxGvomzUkhfwLs7fbrZpZdUVhvNyEsNB86LQT+ODzFWX45YLCu5Y7Om+fNeRHx9IAkxac8W7TJZ0whnz/j/9i1ExHq9AHm5eTzCj/fakBWc0n/xgD7tJmRc9/C+g+r+j2sWhXWCwAknQLslzd/AeyeJ/GzZUQVdSIiZnWRx4zCaY8v7Ju19H5b61aF14oraLZs2ElSy7cMktYFNsqbV/uhQ/UcMBiS1pO0dYc0qwLfofma8bwWyX5SWD+xzVjIxwNr5fXZ7r8wckXEItL8HZCarB06MI2kzYGP5s3LI+L6pVQ86yNJx5EmcoQ0ueMu7rtiZkW5L+RX8uZE4OiBaXKT1tNo3tOesXRKt3xxH4YRQNJ0YHLhR+MK65Ml7VtM3+KJzNrALyTdClwM3EjqLLSYNBHKNNIN3Zo5/R3AiQPLERGXSrqMNA/DhsAtkr4O/Ir0inAyaUzm7fMhz+JRkoZFBXWiSieRhrlbD/iypMnAf5GGTp1BGg5vdN4+pCwTG7q61AtJB9EcOeV3wOHAuuUvoAC4Z8AgC1aButQJq5ea1YtTSd8hGwPHSlofOBd4jNQM6VM0Wy3MBi4axrIst+SHu8s+SbOAfbpNn18FFo/fCvj/Lg+/BJgZJdO752ngL+KVr49beRzYKyLmdHleG4Sh1okuz/Eg6YnPvIiY1CHtZNKF/M0lSZ4CPhQRPx1sOax7dakXkubSvk9LK+tGxIODLY+1V5c60UUeW9H8njo+Io7rJR/rTt3qhaS1SK0YNmmTbDawR4vm0lYBv2EwSJ1M/wHYltT5dAJpluaVSDdyD5BGt/luRLTtkBoRf5K0DbAzsBcwhfRmYjSwgDTiwc9Iwya678JyIiLuk/RO4EDSiBeTSc3b5pMu8l+PiHltsjAzs+VURDwi6d2k1g57kobZXQ14ktSKYVZEuP/bMPIbBjMzMzMzK+VOz2ZmZmZmVsoBg5mZmZmZlXLAYGZmZmZmpRwwmJmZmZlZKQcMZmZmZmZWygGDmZmZmZmVcsBgZmZmZmalHDCYmZmZmVkpBwxmZmZmZlbKAYOZmZmZmZVywGBmZmZmZqUcMJiZmZmZWSkHDGZmZmZmVsoBg5mZmZmZlXLAYGZmZmZmpRwwmJmZmZlZKQcMZmZmZmZWygGDmS13JO0rKfJnUr/Ls7RJWl/SIkkLJa1TYb7/mf+m51aVp5mZ9Z8DBjNbZkiaVLjR7/nT79+jBk4BxgBnR8TvKsz3S8Ai4MOSNqkwXzMz6yMHDGZmyxFJU4H3km7sT6wy74h4CDgXEPDFKvM2M7P+UYQftpnZskHSGGD9Nkluz8sbgJlliSLijirLtSyRNBvYAbgwIvYehvzXB+7Om5tGxI1Vn8PMzJYuBwxmNmIUmhtdHhFb9bMsdZRv5u8ivQHYISL+d5jOcyOwMXBuROw7HOcwM7Olx02SzMyWHzNJwcJjwJxhPM+Febm7pFWG8TxmZrYUOGAws+VOp1GSJM3N++bm7cmSzpD0W0nPS3pQ0tmSJg447u2Svp3TLZQ0X9Lpkl7fZbneJ+m/JT2Uj18g6QZJx0p6XQW/+gfz8kcRsbhDWXaVdLGkhyW9IOnp/HtdKemLkt7V5vCL8nIlYJcKym1mZn00ut8FMDOrM0nbAv8DFJ+UTwQ+AuwoacuIuFvSnsAsYIVCugnA/sAOkqZGxO9LzvE64AfA1gN2jQU2yZ+PS9olIq7t8feYCKybN0vzkDQK+C6w+4BdKwCvyXlMJ/WD2LRVHhExT9IfgDVzugt6KbOZmdWD3zCYmZVbG/g+sAA4CNgM2AL4GhDA64GzJE0BzgPuB/YD3gXMAM7P+UwkDWW6BEljSc2DtgZeysfsCbw7n+so4Ml8rtkD32oMwhaF9evbpDuAZrBwFbBvPnZjYDvgM8DPc1nb+VVebjnYgpqZWb34DYOZWbk3A/cC0yLi8cLPr5K0GDgUmAZcQrpB3i4iniukmytpRdIN+G6S1hiQD8AxpJvxBcC2LUYVukrShcAvgbWAE4AP9fC7TM3LRcCdbdI1mi1dB8xo0XRpDnCKpNU7nO9GYGdgHUnjI+LRwRbYzMzqwW8YzMzaO7jFTT7AaYX1ccB+A4KFhtPzcjSweXGHpNcAB+bNo8uGII2IeTTnNdhd0srdFr5gQl4+GRHt3g6smZfXtOvnEBF/7HC+xwrrb+yifGZmVlMOGMzMyi0A/q/Vjoh4AHg6b94WEXeV5HFrYX3gjfOWwKp5/QcdynJFXo4h9WkYrDXy8k8d0j2SlztJGtfDeRqKAcWapanMzKz2HDCYmZW7N9pPVrMgL3/TRRp4ZcdpeGWn4UcKIzct8QGKk831cgPeaELUKWA4Ny8nA/dJOkfSnpImtDuoheJ5enkjYmZmNeGAwcysXKsmRkUvd0oXES8XNkcN2N3VcKstrNTDMQvz8tXtEkXEOaR+EotJbz9mAt8B5ku6T9LJkrppYlQ8z4s9lNfMzGrCnZ7NzPqnGEBsTPc31g/3cK5GP4xOnZWJiKMkfYvUuXob0ohNKwFvAj4NHCTp4Ig4o002xfMsKE1lZma154DBzKx/niysPx4RvQQC3WoEDF1NAJc7Wp8AnCBpDDCFNILSx4AVgdMkXRcRN5dkUTzPQ70V2czM6sBNkszM+qd4sz1tmM91e16u2u3M0w0R8WJEXBMRhwB75R8L+ECbw9bLyxeA+wZVUjMzqxUHDGZm/TOHZv+HgyVpGM91ZWF9yhDy+UVhvd0oSo1z3BwR7sNgZrYMc8BgZtYnEbEA+EbenAp8VVLpdVnSeEn79Xi6X5Ge9kOaibrsHHtLatdc9e8L6w+U5DEW2DBvXjqYQpqZWf04YDAz669jSLMqA3wSuEnSgZKmSdpI0gxJn5B0MakvwP69nCQiXqA5p8Q2bZKeDzws6bQcPGwu6Z2Stpd0MnBeTvcMcGFJHu8hzRcB8MNeymtmZvXhTs9mZn0UES9I2g6YBbwfeAfNtw6tPDWE050J7AxMlTQxd2xuZTxwQP608mdgj4iYX7K/0c/h1xFxS8+lNTOzWnDAYGbWZxHxNLCbpOnAPsAWwNqkuQyeAu4nNSm6hKE18fkZaUjWCcCewIkt0rwd+EdgOmkY1fHAaqRZre8mvaU4PSIebXUCSSuSAh+A04ZQVjMzqwm1n8TUzMxGEkmHA18izU69wYCJ5arIf29Ss6YngUkR8UyV+ZuZ2dLnPgxmZsuX/wB+Rxr29INVZpw7bH8ub57kYMHMbGRwwGBmthyJiOeBY/Pm5yseynV3YANS5+xTK8zXzMz6yH0YzMyWP7NIfRNWANYCfl9RvqOA44HLcmBiZmYjgPswmJmZmZlZKTdJMjMzMzOzUg4YzMzMzMyslAMGMzMzMzMr5YDBzMzMzMxKOWAwMzMzM7NSDhjMzMzMzKyUAwYzMzMzMyvlgMHMzMzMzEo5YDAzMzMzs1IOGMzMzMzMrJQDBjMzMzMzK+WAwczMzMzMSjlgMDMzMzOzUg4YzMzMzMyslAMGMzMzMzMr5YDBzMzMzMxKOWAwMzMzM7NSDhjMzMzMzKyUAwYzMzMzMyvlgMHMzMzMzEr9BSWsV7VQspF/AAAAAElFTkSuQmCC\n", + "text/plain": "
" + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ] + } + }, + "6c4cc61f3df1492889d3632fa06d3f53": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": "200px" + } + }, + "c146aa43cd4b4941951f6794553f77a6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": "1000px" + } + }, + "cb4ad0731b8549f483d086e84d0c8450": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_12099b37a72d43078194d5977407662a", + "IPY_MODEL_307138cdaff54eec978913a3a6ccf9aa", + "IPY_MODEL_626fade444f040e18bfbcd1143e7bf05" + ], + "layout": "IPY_MODEL_1aeddf94f6084a19bba2a5311766a6fb" + } + }, + "fe98486cdd4d4e988a7caae86d4c44e3": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "ff78d6effae3404cb8bc4c76286b41aa": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +}