From f7e931ba3dce4da50430a4e41348348f269c7010 Mon Sep 17 00:00:00 2001 From: katharinakorb <62990236+katharinakorb@users.noreply.github.com> Date: Sun, 5 Nov 2023 20:14:04 +0100 Subject: [PATCH] Add files via upload Additional files used for analysis --- thesis code/network analysis/freeParamCalc.py | 49 ++ .../minimal_architecture/README.txt | 18 + .../minimal_architecture/config.json | 368 +++++++++++ .../get_trained_models.py | 55 ++ .../pfinkel_performance_test64.py | 282 ++++++++ .../minimal_architecture/training_loop.sh | 11 + .../optimal_stimulus/README.txt | 8 + .../optimal_stimulus/optimal_stimulus.py | 219 +++++++ .../optimal_stimulus_20cnns.py | 293 +++++++++ .../orientation_tuning/README.txt | 14 + .../orientation_tuning/fit_statistics.py | 475 ++++++++++++++ .../orientation_tuning/fitkarotte.py | 373 +++++++++++ .../orientation_tuning/gabor_dict_32o_8p.npy | Bin 0 -> 247936 bytes .../orientation_tuning_curve.py | 244 +++++++ .../orientation_tuning/plot_fit_statistics.py | 272 ++++++++ .../psychometric_curves/README.txt | 4 + .../error_bar_performance_pfinkel.py | 223 +++++++ .../render_including_minDist/contours.py | 603 ++++++++++++++++++ .../render_including_minDist/render.py | 349 ++++++++++ .../weights_correlation/README.txt | 28 + .../all_cnns_mean_correlation.py | 274 ++++++++ .../weights_correlation/create_gabor_dict.py | 87 +++ .../weights_correlation/draw_input_fields.py | 156 +++++ .../weight visualization/plot_as_grid.py | 194 ++++++ .../weight visualization/plot_weights.py | 101 +++ thesis code/shallow net/README.txt | 12 + thesis code/shallow net/config.json | 53 ++ thesis code/shallow net/corner_loop_final.sh | 13 + .../functions/alicorn_data_loader.py | 107 ++++ .../shallow net/functions/analyse_network.py | 103 +++ .../shallow net/functions/create_logger.py | 40 ++ thesis code/shallow net/functions/make_cnn.py | 114 ++++ .../functions/plot_intermediate.py | 84 +++ thesis code/shallow net/functions/set_seed.py | 12 + thesis code/shallow net/functions/test.py | 58 ++ thesis code/shallow net/functions/train.py | 80 +++ 36 files changed, 5376 insertions(+) create mode 100644 thesis code/network analysis/freeParamCalc.py create mode 100644 thesis code/network analysis/minimal_architecture/README.txt create mode 100644 thesis code/network analysis/minimal_architecture/config.json create mode 100644 thesis code/network analysis/minimal_architecture/get_trained_models.py create mode 100644 thesis code/network analysis/minimal_architecture/pfinkel_performance_test64.py create mode 100644 thesis code/network analysis/minimal_architecture/training_loop.sh create mode 100644 thesis code/network analysis/optimal_stimulus/README.txt create mode 100644 thesis code/network analysis/optimal_stimulus/optimal_stimulus.py create mode 100644 thesis code/network analysis/optimal_stimulus/optimal_stimulus_20cnns.py create mode 100644 thesis code/network analysis/orientation_tuning/README.txt create mode 100644 thesis code/network analysis/orientation_tuning/fit_statistics.py create mode 100644 thesis code/network analysis/orientation_tuning/fitkarotte.py create mode 100644 thesis code/network analysis/orientation_tuning/gabor_dict_32o_8p.npy create mode 100644 thesis code/network analysis/orientation_tuning/orientation_tuning_curve.py create mode 100644 thesis code/network analysis/orientation_tuning/plot_fit_statistics.py create mode 100644 thesis code/network analysis/psychometric_curves/README.txt create mode 100644 thesis code/network analysis/psychometric_curves/error_bar_performance_pfinkel.py create mode 100644 thesis code/network analysis/render_including_minDist/contours.py create mode 100644 thesis code/network analysis/render_including_minDist/render.py create mode 100644 thesis code/network analysis/weights_correlation/README.txt create mode 100644 thesis code/network analysis/weights_correlation/all_cnns_mean_correlation.py create mode 100644 thesis code/network analysis/weights_correlation/create_gabor_dict.py create mode 100644 thesis code/network analysis/weights_correlation/draw_input_fields.py create mode 100644 thesis code/network analysis/weights_correlation/weight visualization/plot_as_grid.py create mode 100644 thesis code/network analysis/weights_correlation/weight visualization/plot_weights.py create mode 100644 thesis code/shallow net/README.txt create mode 100644 thesis code/shallow net/config.json create mode 100644 thesis code/shallow net/corner_loop_final.sh create mode 100644 thesis code/shallow net/functions/alicorn_data_loader.py create mode 100644 thesis code/shallow net/functions/analyse_network.py create mode 100644 thesis code/shallow net/functions/create_logger.py create mode 100644 thesis code/shallow net/functions/make_cnn.py create mode 100644 thesis code/shallow net/functions/plot_intermediate.py create mode 100644 thesis code/shallow net/functions/set_seed.py create mode 100644 thesis code/shallow net/functions/test.py create mode 100644 thesis code/shallow net/functions/train.py diff --git a/thesis code/network analysis/freeParamCalc.py b/thesis code/network analysis/freeParamCalc.py new file mode 100644 index 0000000..015b085 --- /dev/null +++ b/thesis code/network analysis/freeParamCalc.py @@ -0,0 +1,49 @@ +import torch + + +def calc_free_params(from_loaded_model: bool, model_name: str | None): + """ + * Calculates the number of free parameters of a CNN + * either from trained model or by entering the respective parameters + over command line + """ + + if from_loaded_model: + # path to NN + PATH = f"D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/trained_models/{model_name}" + + # load and evaluate model + model = torch.load(PATH).to("cpu") + model.eval() + print(model) + + total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) + print(f"Total number of free parameters: {total_params}") + else: + print("\n##########################") + input_out_channel_size = input( + "Enter output channel size (comma seperated, including output layer): " + ) + out_channel_size = [1] + [int(x) for x in input_out_channel_size.split(",")] + + input_kernel_sizes = input( + "Enter kernel sizes of respective layers (comma seperated, including output layer): " + ) + kernel_sizes = [int(x) for x in input_kernel_sizes.split(",")] + + total_params = 0 + for i in range(1, len(out_channel_size)): + input_size = out_channel_size[i - 1] + out_size = out_channel_size[i] + kernel = kernel_sizes[i - 1] + bias = out_channel_size[i] + num_free_params = input_size * kernel * kernel * out_size + bias + total_params += num_free_params + print(f"Total number of free parameters: {total_params}") + + +if __name__ == "__main__": + # model name + nn = "ArghCNN_numConvLayers3_outChannels[8, 8, 8]_kernelSize[7, 15]_leaky relu_stride1_trainFirstConvLayerTrue_seed291857_Natural_1351Epoch_3107-2121.pt" + + calc_free_params(from_loaded_model=False, model_name=nn) diff --git a/thesis code/network analysis/minimal_architecture/README.txt b/thesis code/network analysis/minimal_architecture/README.txt new file mode 100644 index 0000000..02dd410 --- /dev/null +++ b/thesis code/network analysis/minimal_architecture/README.txt @@ -0,0 +1,18 @@ +Folder minimal_architecture: + +1. config.json: +* json file with all configurations and cnn parameters + +2. training_loop.sh: +* bash script to train the 64 cnns + + +3. get_trained_models: +* searches for the saved trained models in a directory +* chooses model based on the largest saved epoch in the save-name + + +4. pfinkel_performance_test64: +* load all models extracted by 'get_trained_models' +* test them on all stimulus conditions +* sort their performances either after number of free parameters, or architecture \ No newline at end of file diff --git a/thesis code/network analysis/minimal_architecture/config.json b/thesis code/network analysis/minimal_architecture/config.json new file mode 100644 index 0000000..4c84655 --- /dev/null +++ b/thesis code/network analysis/minimal_architecture/config.json @@ -0,0 +1,368 @@ +{ + "data_path": "/home/kk/Documents/Semester4/code/RenderStimuli/Output/", + "save_logging_messages": true, // (true), false + "display_logging_messages": true, // (true), false + "batch_size_train": 500, + "batch_size_test": 250, + "max_epochs": 2000, + "save_model": true, + "conv_0_kernel_size": 11, + "mp_1_kernel_size": 3, + "mp_1_stride": 2, + "use_plot_intermediate": true, // true, (false) + "stimuli_per_pfinkel": 10000, + "num_pfinkel_start": 0, + "num_pfinkel_stop": 100, + "num_pfinkel_step": 10, + "precision_100_percent": 0, // (4) + "train_first_layer": true, // true, (false) + "save_ever_x_epochs": 100, // (10) + "activation_function": "leaky relu", // tanh, relu, (leaky relu), none + "leak_relu_negative_slope": 0.1, // (0.1) + "switch_leakyR_to_relu": false, + // LR Scheduler -> + "use_scheduler": true, // (true), false + "scheduler_verbose": true, + "scheduler_factor": 0.1, //(0.1) + "scheduler_patience": 10, // (10) + "scheduler_threshold": 1e-5, // (1e-4) + "minimum_learning_rate": 1e-8, + "learning_rate": 0.0001, + // <- LR Scheduler + "pooling_type": "max", // (max), average, none + "conv_0_enable_softmax": false, // true, (false) + "use_adam": true, // (true) => adam, false => SGD + "condition": "Natural", + "scale_data": 255.0, // (255.0) + "conv_out_channels_list": [ + [ + 8, + 8, + 8 + ], + [ + 8, + 8, + 6 + ], + [ + 8, + 8, + 4 + ], + [ + 8, + 8, + 2 + ], + [ + 8, + 6, + 8 + ], + [ + 8, + 6, + 6 + ], + [ + 8, + 6, + 4 + ], + [ + 8, + 6, + 2 + ], + [ + 8, + 4, + 8 + ], + [ + 8, + 4, + 6 + ], + [ + 8, + 4, + 4 + ], + [ + 8, + 4, + 2 + ], + [ + 8, + 2, + 8 + ], + [ + 8, + 2, + 6 + ], + [ + 8, + 2, + 4 + ], + [ + 8, + 2, + 2 + ], + [ + 6, + 8, + 8 + ], + [ + 6, + 8, + 6 + ], + [ + 6, + 8, + 4 + ], + [ + 6, + 8, + 2 + ], + [ + 6, + 6, + 8 + ], + [ + 6, + 6, + 6 + ], + [ + 6, + 6, + 4 + ], + [ + 6, + 6, + 2 + ], + [ + 6, + 4, + 8 + ], + [ + 6, + 4, + 6 + ], + [ + 6, + 4, + 4 + ], + [ + 6, + 4, + 2 + ], + [ + 6, + 2, + 8 + ], + [ + 6, + 2, + 6 + ], + [ + 6, + 2, + 4 + ], + [ + 6, + 2, + 2 + ], + [ + 4, + 8, + 8 + ], + [ + 4, + 8, + 6 + ], + [ + 4, + 8, + 4 + ], + [ + 4, + 8, + 2 + ], + [ + 4, + 6, + 8 + ], + [ + 4, + 6, + 6 + ], + [ + 4, + 6, + 4 + ], + [ + 4, + 6, + 2 + ], + [ + 4, + 4, + 8 + ], + [ + 4, + 4, + 6 + ], + [ + 4, + 4, + 4 + ], + [ + 4, + 4, + 2 + ], + [ + 4, + 2, + 8 + ], + [ + 4, + 2, + 6 + ], + [ + 4, + 2, + 4 + ], + [ + 4, + 2, + 2 + ], + [ + 2, + 8, + 8 + ], + [ + 2, + 8, + 6 + ], + [ + 2, + 8, + 4 + ], + [ + 2, + 8, + 2 + ], + [ + 2, + 6, + 8 + ], + [ + 2, + 6, + 6 + ], + [ + 2, + 6, + 4 + ], + [ + 2, + 6, + 2 + ], + [ + 2, + 4, + 8 + ], + [ + 2, + 4, + 6 + ], + [ + 2, + 4, + 4 + ], + [ + 2, + 4, + 2 + ], + [ + 2, + 2, + 8 + ], + [ + 2, + 2, + 6 + ], + [ + 2, + 2, + 4 + ], + [ + 2, + 2, + 2 + ] + ], + "conv_kernel_sizes": [ + [ + 7, + 15 + ] + ], + "conv_stride_sizes": [ + 1 + ] +} \ No newline at end of file diff --git a/thesis code/network analysis/minimal_architecture/get_trained_models.py b/thesis code/network analysis/minimal_architecture/get_trained_models.py new file mode 100644 index 0000000..b150ff6 --- /dev/null +++ b/thesis code/network analysis/minimal_architecture/get_trained_models.py @@ -0,0 +1,55 @@ +import glob +import os +import re +import shutil + +""" +get performances from .pt files +""" + +directory = "./trained_models" +string = "Natural" +final_path = "./trained_corners" + + +# list of all files in the directory +files = glob.glob(directory + "/*.pt") + +# filter +filtered_files = [f for f in files if string in f] + +# group by seed +seed_files = {} +for f in filtered_files: + # get seed from filename + match = re.search(r"_seed(\d+)_", f) + if match: + seed = int(match.group(1)) + if seed not in seed_files: + seed_files[seed] = [] + seed_files[seed].append(f) + + +# get saved cnn largests epoch +newest_files = {} +for seed, files in seed_files.items(): + max_epoch = -1 + newest_file = None + for f in files: + # search for epoch + match = re.search(r"_(\d+)Epoch_", f) + if match: + epoch = int(match.group(1)) + if epoch > max_epoch: + max_epoch = epoch + newest_file = f + newest_files[seed] = newest_file + +print(len(newest_files)) + +# move files to new folder +os.makedirs(final_path, exist_ok=True) + +# Copy the files to the new folder +for seed, file in newest_files.items(): + shutil.copy(file, os.path.join(final_path, os.path.basename(file))) diff --git a/thesis code/network analysis/minimal_architecture/pfinkel_performance_test64.py b/thesis code/network analysis/minimal_architecture/pfinkel_performance_test64.py new file mode 100644 index 0000000..03927ec --- /dev/null +++ b/thesis code/network analysis/minimal_architecture/pfinkel_performance_test64.py @@ -0,0 +1,282 @@ +import torch +import numpy as np +import matplotlib.pyplot as plt +import matplotlib as mpl +import os +import datetime +import re + +# import glob +# from natsort import natsorted + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" + +from functions.alicorn_data_loader import alicorn_data_loader +from functions.create_logger import create_logger + + +def sort_and_plot( + extracted_params, + save: bool, + plot_for_each_condition: bool, + name: str, + sort_by="params", +): + figure_path: str = "performance_pfinkel_0210" + os.makedirs(figure_path, exist_ok=True) + + architecture_params = extracted_params.copy() + if sort_by == "params": + architecture_params.sort(key=lambda x: x[1]) + elif sort_by == "accuracy": + architecture_params.sort(key=lambda x: x[-1]) + + sorted_architectures, sorted_params, test_conditions, sorted_performances = zip( + *architecture_params + ) + final_labels = [ + f"{arch[1:-1]} - {params}" + for arch, params in zip(sorted_architectures, sorted_params) + ] + + plt.figure(figsize=(18, 9)) + + # performance for each condition + if plot_for_each_condition: + conditions = ["Coignless", "Natural", "Angular"] + labels = ["Classic", "Corner", "Bridge"] + shift_amounts = [-0.05, 0, 0.05] + save_name = name + "_each_condition" + for i, condition in enumerate(conditions): + # x_vals = range(len(sorted_performances)) + jittered_x = np.arange(len(sorted_performances)) + shift_amounts[i] + y_vals = [perf[condition] for perf in test_conditions] + plt.errorbar( + jittered_x, + y_vals, + fmt="D", + markerfacecolor="none", + markeredgewidth=1.5, + label=labels[i], + ) + else: + save_name = name + "_mean" + plt.plot(range(len(sorted_performances)), sorted_performances, marker="o") + + plt.ylabel("Accuracy (in \\%)", fontsize=17) + plt.xticks(range(len(sorted_performances)), final_labels, rotation=90, fontsize=15) + plt.yticks(fontsize=16) + plt.grid(True) + plt.tight_layout() + plt.legend(fontsize=15) + + if save: + plt.savefig( + os.path.join( + figure_path, + f"minimalCNN_64sorted_{sort_by}_{save_name}.pdf", + ), + dpi=300, + bbox_inches="tight", + ) + plt.show() + + +if __name__ == "__main__": + training_con: str = "classic" + model_path: str = "./trained_classic" + print(model_path) + data_path: str = "/home/kk/Documents/Semester4/code/RenderStimuli/Output/" + + # num stimuli per Pfinkel and batch size + stim_per_pfinkel: int = 10000 + batch_size: int = 1000 + + # stimulus condition: + performances_list: list = [] + condition: list[str] = ["Coignless", "Natural", "Angular"] + + # load test data: + num_pfinkel: list = np.arange(0, 100, 10).tolist() + image_scale: float = 255.0 + + # ------------------------------------------ + + # create logger: + logger = create_logger( + save_logging_messages=False, + display_logging_messages=True, + model_name=model_path, + ) + + device_str: str = "cuda:0" if torch.cuda.is_available() else "cpu" + logger.info(f"Using {device_str} device") + device: torch.device = torch.device(device_str) + torch.set_default_dtype(torch.float32) + + # current time: + current = datetime.datetime.now().strftime("%d%m-%H%M") + + # save data + cnn_data: list = [] + cnn_counter: int = 0 + + for filename in os.listdir(model_path): + if filename.endswith(".pt"): + model_filename = os.path.join(model_path, filename) + model = torch.load(model_filename, map_location=device) + model.eval() + print(f"CNN {cnn_counter+1} :{model_filename}") + + # number free parameters for current CNN + num_free_params = sum( + p.numel() for p in model.parameters() if p.requires_grad + ) + + # save + all_performances: dict = { + condition_name: {pfinkel: [] for pfinkel in num_pfinkel} + for condition_name in condition + } + + for selected_condition in condition: + # save performances: + logger.info(f"Condition: {selected_condition}") + performances: dict = {} + for pfinkel in num_pfinkel: + test_loss: float = 0.0 + correct: int = 0 + pattern_count: int = 0 + + data_test = alicorn_data_loader( + num_pfinkel=[pfinkel], + load_stimuli_per_pfinkel=stim_per_pfinkel, + condition=selected_condition, + logger=logger, + data_path=data_path, + ) + loader = torch.utils.data.DataLoader( + data_test, shuffle=False, batch_size=batch_size + ) + + # start testing network on new stimuli: + logger.info("") + logger.info( + f"-==- Start {selected_condition} " f"Pfinkel {pfinkel}° -==-" + ) + with torch.no_grad(): + for batch_num, data in enumerate(loader): + label = data[0].to(device) + image = data[1].type(dtype=torch.float32).to(device) + image /= image_scale + + # compute prediction error; + output = model(image) + + # Label Typecast: + label = label.to(device) + + # loss and optimization + loss = torch.nn.functional.cross_entropy( + output, label, reduction="sum" + ) + pattern_count += int(label.shape[0]) + test_loss += float(loss) + prediction = output.argmax(dim=1) + correct += prediction.eq(label).sum().item() + + total_number_of_pattern: int = int(len(loader)) * int( + label.shape[0] + ) + + # logging: + logger.info( + ( + f"{selected_condition},{pfinkel}° " + "Pfinkel: " + f"[{int(pattern_count)}/{total_number_of_pattern} ({100.0 * pattern_count / total_number_of_pattern:.2f}%)]," + f" Average loss: {test_loss / pattern_count:.3e}, " + "Accuracy: " + f"{100.0 * correct / pattern_count:.2f}% " + ) + ) + + performances[pfinkel] = { + "pfinkel": pfinkel, + "test_accuracy": 100 * correct / pattern_count, + "test_losses": float(loss) / pattern_count, + } + all_performances[selected_condition][pfinkel].append( + 100 * correct / pattern_count + ) + + performances_list.append(performances) + + # store num free params + performances + avg_performance_per_condition = { + cond: np.mean([np.mean(perfs) for perfs in pfinkel_dict.values()]) + for cond, pfinkel_dict in all_performances.items() + } + avg_performance_overall = np.mean( + list(avg_performance_per_condition.values()) + ) + + # extract CNN config: + match = re.search(r"_outChannels\[(\d+), (\d+), (\d+)\]_", filename) + if match: + out_channels = ( + [1] + [int(match.group(i)) for i in range(1, 3 + 1)] + [2] + ) + + # number of free parameters and performances + cnn_data.append( + ( + out_channels, + num_free_params, + avg_performance_per_condition, + avg_performance_overall, + ) + ) + + else: + print("No files found!") + break + + # save all 64 performances + torch.save( + cnn_data, + f"{model_path}.pt", + ) + + # plot + sort_and_plot( + cnn_data, + save=True, + plot_for_each_condition=True, + name=training_con, + sort_by="params", + ) + sort_and_plot( + cnn_data, + save=True, + plot_for_each_condition=False, + name=training_con, + sort_by="params", + ) + sort_and_plot( + cnn_data, + save=True, + plot_for_each_condition=True, + name=training_con, + sort_by="accuracy", + ) + sort_and_plot( + cnn_data, + save=True, + plot_for_each_condition=False, + name=training_con, + sort_by="accuracy", + ) + + logger.info("-==- DONE -==-") diff --git a/thesis code/network analysis/minimal_architecture/training_loop.sh b/thesis code/network analysis/minimal_architecture/training_loop.sh new file mode 100644 index 0000000..fc73dde --- /dev/null +++ b/thesis code/network analysis/minimal_architecture/training_loop.sh @@ -0,0 +1,11 @@ +Directory="/home/kk/Documents/Semester4/code/Run64Variations" +Priority="0" +echo $Directory +mkdir $Directory/argh_log_corner +for out_channels_idx in {0..63}; do + for kernel_size_idx in {0..0}; do + for stride_idx in {0..0}; do + echo "hostname; cd $Directory ; /home/kk/P3.10/bin/python3 cnn_training.py --idx-conv-out-channels-list $out_channels_idx --idx-conv-kernel-sizes $kernel_size_idx --idx-conv-stride-sizes $stride_idx -s \$JOB_ID" | qsub -o $Directory/argh_log_classic -j y -p $Priority -q gp4u,gp3u -N itsCorn + done + done +done diff --git a/thesis code/network analysis/optimal_stimulus/README.txt b/thesis code/network analysis/optimal_stimulus/README.txt new file mode 100644 index 0000000..b35910c --- /dev/null +++ b/thesis code/network analysis/optimal_stimulus/README.txt @@ -0,0 +1,8 @@ +Folder optimal_stimulus + +1. optimal_stimulus: +* for single trained model +* generates optimal stimulus for neuron in selected layer + +2. optimal_stimulus_20cnns: +* generates stimulus for neuron in same layer of all 20 cnns \ No newline at end of file diff --git a/thesis code/network analysis/optimal_stimulus/optimal_stimulus.py b/thesis code/network analysis/optimal_stimulus/optimal_stimulus.py new file mode 100644 index 0000000..7e858b5 --- /dev/null +++ b/thesis code/network analysis/optimal_stimulus/optimal_stimulus.py @@ -0,0 +1,219 @@ +# %% +import torch +import random +import re +import matplotlib.pyplot as plt +import matplotlib.patches as patch +import matplotlib as mpl + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" + +import os +import sys + +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(parent_dir) +from functions.analyse_network import analyse_network +from functions.set_seed import set_seed + +# define parameters +num_iterations: int = 100000 +learning_rate: float = 0.1 +apply_input_mask: bool = True +mark_region_in_plot: bool = True +sheduler_patience: int = 500 +sheduler_factor: float = 0.9 +sheduler_eps = 1e-08 +target_image_active: float = 1e4 +random_seed = random.randint(0, 100) +save_final: bool = True +model_str: str = "CORNER_888" + +# set seet +set_seed(random_seed) +print(f"Random seed: {random_seed}") + +# path to NN +condition: str = "corner_888_poster" +pattern = r"seed\d+_Natural_\d+Epoch" +nn = "ArghCNN_numConvLayers3_outChannels[8, 8, 8]_kernelSize[7, 15]_leaky relu_stride1_trainFirstConvLayerTrue_seed291857_Natural_1351Epoch_3107-2121.pt" +PATH = f"./trained_models/{nn}" +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + +# %% +# load and eval model +model = torch.load(PATH).to(device) +model.eval() +print("Full network:") +print(model) +print("") + + +# enter index to plot: +idx = int(input("Please select layer: ")) +print(f"Selected layer: {model[idx]}") +assert idx < len(model) +model = model[: idx + 1] + +# random input +input_img = torch.rand(1, 200, 200).to(device) +input_img = input_img.unsqueeze(0) +input_img.requires_grad_(True) # type: ignore +print(input_img.min(), input_img.max()) + +input_shape = input_img.shape +assert input_shape[-2] == input_shape[-1] +coordinate_list, layer_type_list, pixel_used = analyse_network( + model=model, input_shape=int(input_shape[-1]) +) + + +output_shape = model(input_img).shape + + +target_image = torch.zeros( + (*output_shape,), dtype=input_img.dtype, device=input_img.device +) + + +# image to parameter (2B optimized) +input_parameter = torch.nn.Parameter(input_img) + + +if len(target_image.shape) == 2: + print((f"Available max positions: f:{target_image.shape[1] - 1} ")) + + # select neuron and plot for all feature maps (?) + neuron_f = int(input("Please select neuron_f: ")) + print(f"Selected neuron {neuron_f}") + target_image[0, neuron_f] = 1e4 +else: + print( + ( + f"Available max positions: f:{target_image.shape[1] - 1} " + f"x:{target_image.shape[2]} y:{target_image.shape[3]}" + ) + ) + + # select neuron and plot for all feature maps (?) + neuron_f = int(input("Please select neuron_f: ")) + neuron_x = target_image.shape[2] // 2 + neuron_y = target_image.shape[3] // 2 + print(f"Selected neuron {neuron_f}, {neuron_x}, {neuron_y}") + target_image[0, neuron_f, neuron_x, neuron_y] = target_image_active + + # Input mask -> + active_input_x = coordinate_list[-1][:, neuron_x].clone() + active_input_y = coordinate_list[-1][:, neuron_y].clone() + + input_mask: torch.Tensor = torch.zeros_like(input_img) + + input_mask[ + :, + :, + active_input_x.type(torch.int64).unsqueeze(-1), + active_input_y.type(torch.int64).unsqueeze(0), + ] = 1 + + rect_x = [int(active_input_x.min()), int(active_input_x.max())] + rect_y = [int(active_input_y.min()), int(active_input_y.max())] + # <- Input mask + + if apply_input_mask: + with torch.no_grad(): + input_img *= input_mask + + +optimizer = torch.optim.Adam([{"params": input_parameter}], lr=learning_rate) + +scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, + patience=sheduler_patience, + factor=sheduler_factor, + eps=sheduler_eps * 0.1, +) + + +counter: int = 0 +while (optimizer.param_groups[0]["lr"] > sheduler_eps) and (counter < num_iterations): + optimizer.zero_grad() + + output = model(input_parameter) + + loss = torch.nn.functional.mse_loss(output, target_image) + loss.backward() + + if counter % 1000 == 0: + print( + f"{counter} : loss={float(loss):.3e} lr={optimizer.param_groups[0]['lr']:.3e}" + ) + + optimizer.step() + + if apply_input_mask and len(target_image.shape) != 2: + with torch.no_grad(): + input_parameter.data[torch.where(input_mask == 0)] = 0.0 + + with torch.no_grad(): + max_data = torch.abs(input_parameter.data).max() + if max_data > 1.0: + input_parameter.data /= max_data + + if ( + torch.isfinite(input_parameter.data).sum().cpu() + != torch.tensor(input_parameter.data.size()).prod() + ): + print(f"Found NaN in step: {counter}, use a smaller initial lr") + exit() + + scheduler.step(float(loss)) + counter += 1 + +# save image +if save_final: + # get short model name: + matches = re.findall(pattern, nn) + model_short = "".join(["".join(match) for match in matches]) + save_name = ( + f"optimal_model{model_short}_layer{idx}_feature{neuron_f}_seed{random_seed}.pt" + ) + + # filepath: + folderpath = f"./other_{condition}_optimal" + os.makedirs(folderpath, exist_ok=True) + torch.save(input_img.squeeze().detach().cpu(), os.path.join(folderpath, save_name)) + +# plot image: +_, ax = plt.subplots() + +ax.imshow(input_img.squeeze().detach().cpu().numpy(), cmap="gray") + +plt.yticks(fontsize=15) +plt.xticks(fontsize=15) + + +if len(target_image.shape) != 2 and mark_region_in_plot: + edgecolor = "sienna" + kernel = patch.Rectangle( + (rect_y[0], rect_x[0]), + int(rect_y[1] - rect_y[0]), + int(rect_x[1] - rect_x[0]), + linewidth=1.2, + edgecolor=edgecolor, + facecolor="none", + ) + ax.add_patch(kernel) + +figure_path = f"./other_{condition}_optimal" +os.makedirs(figure_path, exist_ok=True) +plt.savefig( + os.path.join( + figure_path, + f"{save_name}_{model_str}.pdf", + ), + dpi=300, + bbox_inches="tight", +) + +plt.show(block=True) diff --git a/thesis code/network analysis/optimal_stimulus/optimal_stimulus_20cnns.py b/thesis code/network analysis/optimal_stimulus/optimal_stimulus_20cnns.py new file mode 100644 index 0000000..25e3aeb --- /dev/null +++ b/thesis code/network analysis/optimal_stimulus/optimal_stimulus_20cnns.py @@ -0,0 +1,293 @@ +# %% +import torch +import numpy as np +import random +import re +import matplotlib.pyplot as plt +import matplotlib.patches as patch +import matplotlib as mpl + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" +mpl.rcParams["font.size"] = 15 + +import os +import sys + +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(parent_dir) +from functions.analyse_network import analyse_network +from functions.set_seed import set_seed + +# set seet +random_seed = random.randint(0, 100) +set_seed(random_seed) +print(f"Random seed: {random_seed}") + + +def get_file_list_all_cnns(dir: str) -> list: + all_results: list = [] + for filename in os.listdir(dir): + if filename.endswith(".pt"): + print(os.path.join(dir, filename)) + all_results.append(os.path.join(dir, filename)) + + return all_results + + +def show_single_optimal_stimulus(model_list, save: bool = False, cnn: str = "CORNER"): + first_run: bool = True + chosen_layer_idx: int + chosen_neuron_f_idx: int + chosen_neuron_x_idx: int + chosen_neuron_y_idx: int + mean_opt_stim_list: list = [] + fig, axs = plt.subplots(4, 5, figsize=(15, 15)) + for i, load_model in enumerate(model_list): + print(f"\nModel: {i} ") + num_iterations: int = 100000 + learning_rate: float = 0.1 + apply_input_mask: bool = True + mark_region_in_plot: bool = True + sheduler_patience: int = 500 + sheduler_factor: float = 0.9 + sheduler_eps = 1e-08 + target_image_active: float = 1e4 + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + # load model + model = torch.load(load_model).to(device) + model.eval() + + if first_run: + print("Full network:") + print(model) + print("") + + # enter index to plot: + idx = int(input("Please select layer: ")) + assert idx < len(model) + chosen_layer_idx = idx + + print(f"Selected layer: {model[chosen_layer_idx]}") + model = model[: chosen_layer_idx + 1] + + # prepare random input image + input_img = torch.rand(1, 200, 200).to(device) + input_img = input_img.unsqueeze(0) + input_img.requires_grad_(True) # type: ignore + + input_shape = input_img.shape + assert input_shape[-2] == input_shape[-1] + coordinate_list, layer_type_list, pixel_used = analyse_network( + model=model, input_shape=int(input_shape[-1]) + ) + + output_shape = model(input_img).shape + + target_image = torch.zeros( + (*output_shape,), dtype=input_img.dtype, device=input_img.device + ) + + # image to parameter (2B optimized) + input_parameter = torch.nn.Parameter(input_img) + + # back to first run: + if first_run: + if len(target_image.shape) == 2: + print((f"Available max positions: f:{target_image.shape[1] - 1} ")) + + # select neuron and plot for all feature maps (?) + neuron_f = int(input("Please select neuron_f: ")) + print(f"Selected neuron {neuron_f}") + chosen_neuron_f_idx = neuron_f + else: + print( + ( + f"Available max positions: f:{target_image.shape[1] - 1} " + f"x:{target_image.shape[2]} y:{target_image.shape[3]}" + ) + ) + + # select neuron and plot for all feature maps (?) + neuron_f = int(input("Please select neuron_f: ")) + neuron_x = target_image.shape[2] // 2 + neuron_y = target_image.shape[3] // 2 + chosen_neuron_f_idx = neuron_f + chosen_neuron_x_idx = neuron_x + chosen_neuron_y_idx = neuron_y + print( + f"Selected neuron {chosen_neuron_f_idx}, {chosen_neuron_x_idx}, {chosen_neuron_y_idx}" + ) + + # keep settings for further runs: + first_run = False + + # keep input values for all cnns + if len(target_image.shape) == 2: + target_image[0, chosen_neuron_f_idx] = 1e4 + else: + target_image[ + 0, chosen_neuron_f_idx, chosen_neuron_x_idx, chosen_neuron_y_idx + ] = target_image_active + + # Input mask -> + active_input_x = coordinate_list[-1][:, neuron_x].clone() + active_input_y = coordinate_list[-1][:, neuron_y].clone() + + input_mask: torch.Tensor = torch.zeros_like(input_img) + + input_mask[ + :, + :, + active_input_x.type(torch.int64).unsqueeze(-1), + active_input_y.type(torch.int64).unsqueeze(0), + ] = 1 + + rect_x = [int(active_input_x.min()), int(active_input_x.max())] + rect_y = [int(active_input_y.min()), int(active_input_y.max())] + # <- Input mask + + if apply_input_mask: + with torch.no_grad(): + input_img *= input_mask + + # start optimization: + optimizer = torch.optim.Adam([{"params": input_parameter}], lr=learning_rate) + + scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, + patience=sheduler_patience, + factor=sheduler_factor, + eps=sheduler_eps * 0.1, + ) + + counter: int = 0 + while (optimizer.param_groups[0]["lr"] > sheduler_eps) and ( + counter < num_iterations + ): + optimizer.zero_grad() + + output = model(input_parameter) + + loss = torch.nn.functional.mse_loss(output, target_image) + loss.backward() + + if counter % 1000 == 0: + print( + f"{counter} : loss={float(loss):.3e} lr={optimizer.param_groups[0]['lr']:.3e}" + ) + + optimizer.step() + + if apply_input_mask and len(target_image.shape) != 2: + with torch.no_grad(): + input_parameter.data[torch.where(input_mask == 0)] = 0.0 + + with torch.no_grad(): + max_data = torch.abs(input_parameter.data).max() + if max_data > 1.0: + input_parameter.data /= max_data + + if ( + torch.isfinite(input_parameter.data).sum().cpu() + != torch.tensor(input_parameter.data.size()).prod() + ): + print(f"Found NaN in step: {counter}, use a smaller initial lr") + exit() + + scheduler.step(float(loss)) + counter += 1 + mean_opt_stim_list.append(input_img.squeeze().detach().cpu().numpy()) + + # plot image: + ax = axs[i // 5, i % 5] + im = ax.imshow(input_img.squeeze().detach().cpu().numpy(), cmap="gray") + cbar = fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04) + ax.set_title(f"Model {i+1}", fontsize=13) + cbar.ax.tick_params(labelsize=12) + + if len(target_image.shape) != 2 and mark_region_in_plot: + edgecolor = "sienna" + kernel = patch.Rectangle( + (rect_y[0], rect_x[0]), + int(rect_y[1] - rect_y[0]), + int(rect_x[1] - rect_x[0]), + linewidth=1.2, + edgecolor=edgecolor, + facecolor="none", + ) + ax.add_patch(kernel) + + plt.tight_layout() + # save image + if save: + save_name = f"single_optimal_stimulus_{cnn}_layer{chosen_layer_idx}_feature{chosen_neuron_f_idx}" + folderpath = "./all20_optimal_stimuli" + os.makedirs(folderpath, exist_ok=True) + torch.save( + input_img.squeeze().detach().cpu(), + os.path.join(folderpath, save_name) + ".pt", + ) + plt.savefig( + f"{os.path.join(folderpath, save_name)}.pdf", + dpi=300, + bbox_inches="tight", + ) + + plt.show(block=True) + + if len(target_image.shape) == 2: + return mean_opt_stim_list, chosen_neuron_f_idx, chosen_layer_idx + else: + return ( + mean_opt_stim_list, + (chosen_layer_idx, chosen_neuron_f_idx), + (chosen_neuron_x_idx, chosen_neuron_y_idx), + ) + + +def plot_mean_optimal_stimulus( + overall_optimal_stimuli, + chosen_layer_idx: int, + chosen_neuron_f_idx: int, + save: bool = False, + cnn: str = "CORNER", +): + fig, axs = plt.subplots(figsize=(15, 15)) + mean_optimal_stimulus = np.mean(overall_optimal_stimuli, axis=0) + im = axs.imshow(mean_optimal_stimulus, cmap="gray") + cbar = fig.colorbar(im, ax=axs, fraction=0.046, pad=0.04) + cbar.ax.tick_params(labelsize=15) + + plt.tight_layout() + # save image + if save: + save_name = f"overall_mean_optimal_stimulus_{cnn}_layer{chosen_layer_idx}_feature{chosen_neuron_f_idx}" + folderpath = "./mean_optimal_stimulus" + os.makedirs(folderpath, exist_ok=True) + torch.save(mean_optimal_stimulus, os.path.join(folderpath, save_name) + ".pt") + plt.savefig( + f"{os.path.join(folderpath, save_name)}.pdf", + dpi=300, + ) + + plt.show(block=True) + + +if __name__ == "__main__": + # path to NN + PATH_corner = "./classic_3288_fest" + all_cnns_corner = get_file_list_all_cnns(dir=PATH_corner) + opt_stim_list, feature_idx, layer_idx = show_single_optimal_stimulus( + all_cnns_corner, save=True, cnn="CLASSIC_3288_fest" + ) + + # average optimal stimulus: + # plot_mean_optimal_stimulus( + # opt_stim_list, + # save=True, + # cnn="CORNER_3288_fest", + # chosen_layer_idx=layer_idx, + # chosen_neuron_f_idx=feature_idx, + # ) diff --git a/thesis code/network analysis/orientation_tuning/README.txt b/thesis code/network analysis/orientation_tuning/README.txt new file mode 100644 index 0000000..ab1f47d --- /dev/null +++ b/thesis code/network analysis/orientation_tuning/README.txt @@ -0,0 +1,14 @@ +Folder orientation_tuning: + + +1. orientation_tuning_curve: +* generates the original tuning curve by convolving the Gabor patches with the weight matrices of C1 +* Gabor patches file: gabor_dict_32o_8p.py + +2. fitkarotte: +* implements the fitting procedure of the 3 von Mises functions +* plots the fitted tuning curves + +3. fit_statistics: +* contains all statistical test for the 20 trained CNNs of each stimulus condition +* calls the 'plot_fit_statistics' function to plot the data \ No newline at end of file diff --git a/thesis code/network analysis/orientation_tuning/fit_statistics.py b/thesis code/network analysis/orientation_tuning/fit_statistics.py new file mode 100644 index 0000000..e3d6bca --- /dev/null +++ b/thesis code/network analysis/orientation_tuning/fit_statistics.py @@ -0,0 +1,475 @@ +import numpy as np +import fitkarotte +from orientation_tuning_curve import load_data_from_cnn # noqa +import plot_fit_statistics +import warnings +from scipy.stats import ranksums + +# suppress warnings +warnings.filterwarnings("ignore") + + +def get_general_data_info(data, print_mises_all_cnn: bool): + num_cnns = len(data) + num_weights_per_cnn = [len(cnn_results) for cnn_results in data] + + num_fits_per_cnn = {1: [0] * num_cnns, 2: [0] * num_cnns, 3: [0] * num_cnns} + + for idx, cnn_results in enumerate(data): + for _, fit in cnn_results: + curve = fit["curve"] + num_fits_per_cnn[curve][idx] += 1 + + print("\n\nNumber of CNNs:", num_cnns) + print("Number of weights saved for each CNN:", num_weights_per_cnn) + print("Number of fits with 1 von Mises function per CNN:", num_fits_per_cnn[1]) + print("Number of fits with 2 von Mises functions per CNN:", num_fits_per_cnn[2]) + print("Number of fits with 3 von Mises functions per CNN:", num_fits_per_cnn[3]) + + # mean and stdev 4each type of fit + mean_1_mises = np.mean(num_fits_per_cnn[1]) + std_1_mises = np.std(num_fits_per_cnn[1]) + mean_2_mises = np.mean(num_fits_per_cnn[2]) + std_2_mises = np.std(num_fits_per_cnn[2]) + mean_3_mises = np.mean(num_fits_per_cnn[3]) + std_3_mises = np.std(num_fits_per_cnn[3]) + + print( + f"Mean number of fits with 1 von Mises function: {mean_1_mises:.2f} (std: {std_1_mises:.2f})" + ) + print( + f"Mean number of fits with 2 von Mises functions: {mean_2_mises:.2f} (std: {std_2_mises:.2f})" + ) + print( + f"Mean number of fits with 3 von Mises functions: {mean_3_mises:.2f} (std: {std_3_mises:.2f})" + ) + + if print_mises_all_cnn: + print("--================================--") + for idx_cnn, (num_1_mises, num_2_mises, num_3_mises) in enumerate( + zip(num_fits_per_cnn[1], num_fits_per_cnn[2], num_fits_per_cnn[3]) + ): + print( + f"CNN {idx_cnn+1}:\t# 1 Mises: {num_1_mises},\t# 2 Mises: {num_2_mises},\t# 3 Mises: {num_3_mises}" + ) + + return ( + num_fits_per_cnn, + mean_1_mises, + mean_2_mises, + mean_3_mises, + std_1_mises, + std_2_mises, + std_3_mises, + ) + + +def ratio_amplitude_two_mises(data, mean_std: bool = False): + """ + * This function calculates the mean ratio of those weights + of the first layer, which were fitted with 2 von Mises functions + * It first calculates the mean ratio for every single CNN + (of the overall 20 CNNs) + * It then computes the overall mean ratio for the weights + of all 20 CNNs that were fitted with 2 von Mises functions + """ + num_cnns = len(data) + mean_ratio_per_cnn = [0] * num_cnns + + for idx, cnn_results in enumerate(data): + ratio_list: list = [] + count_num_2mises: int = 0 + for _, fit in cnn_results: + curve = fit["curve"] + if curve == 2 and fit["fit_params"] is not None: + count_num_2mises += 1 + first_amp = fit["fit_params"][0] + sec_amp = fit["fit_params"][3] + + if sec_amp < first_amp: + ratio = sec_amp / first_amp + else: + ratio = first_amp / sec_amp + + if not (ratio > 1.0 or ratio < 0): + ratio_list.append(ratio) + else: + print(f"\nRATIO OUT OF RANGE FOR: CNN:{idx}, weight{_}\n") + + # print(f"CNN [{idx}]: num fits with 2 von mises = {count_num_2mises}") + mean_ratio_per_cnn[idx] = np.mean(ratio_list) + + # calc mean difference over all 20 CNNs: + if mean_std: + mean_all_cnns = np.mean(mean_ratio_per_cnn) + std_all_cnns = np.std(mean_ratio_per_cnn) + print("\n-=== Mean ratio between 2 amplitudes ===-") + print(f"Mean ratio of all {len(mean_ratio_per_cnn)} CNNs: {mean_all_cnns}") + print(f"Stdev of ratio of all {len(mean_ratio_per_cnn)} CNNs: {std_all_cnns}") + + return mean_all_cnns, std_all_cnns + + else: # get median and percentile + percentiles = np.percentile(mean_ratio_per_cnn, [10, 25, 50, 75, 90]) + + print("\n-=== Percentiles of ratio between 2 amplitudes ===-") + print(f"10th Percentile: {percentiles[0]}") + print(f"25th Percentile: {percentiles[1]}") + print(f"Median (50th Percentile): {percentiles[2]}") + print(f"75th Percentile: {percentiles[3]}") + print(f"90th Percentile: {percentiles[4]}") + + # return mean_all_cnns, std_all_cnns + return percentiles[2], (percentiles[1], percentiles[3]) + + +def ratio_amplitude_three_mises(data, mean_std: bool = False): + """ + * returns: mean21, std21, mean32, std32 + * This function calculates the mean ratio of those weights + of the first layer, which were fitted with 2 von Mises functions + * It first calculates the mean ratio for every single CNN + (of the overall 20 CNNs) + * It then computes the overall mean ratio for the weights + of all 20 CNNs that were fitted with 2 von Mises functions + """ + num_cnns = len(data) + mean_ratio_per_cnn21 = [0] * num_cnns + mean_ratio_per_cnn32 = [0] * num_cnns + + for idx, cnn_results in enumerate(data): + ratio_list21: list = [] + ratio_list32: list = [] + count_num_2mises: int = 0 + for _, fit in cnn_results: + curve = fit["curve"] + if curve == 3 and fit["fit_params"] is not None: + count_num_2mises += 1 + first_amp = fit["fit_params"][0] + sec_amp = fit["fit_params"][3] + third_amp = fit["fit_params"][6] + + if sec_amp < first_amp: + ratio21 = sec_amp / first_amp + else: + ratio21 = first_amp / sec_amp + + if third_amp < sec_amp: + ratio32 = third_amp / sec_amp + else: + ratio32 = sec_amp / third_amp + + if not (ratio21 > 1.0 or ratio32 > 1.0 or ratio21 < 0 or ratio32 < 0): + ratio_list21.append(ratio21) + ratio_list32.append(ratio32) + else: + print(f"\nRATIO OUT OF RANGE FOR: CNN:{idx}, weight{_}\n") + + # print(f"CNN [{idx}]: num fits with 2 von mises = + # {count_num_2mises}") + if len(ratio_list21) != 0: + mean_ratio_per_cnn21[idx] = np.mean(ratio_list21) + mean_ratio_per_cnn32[idx] = np.mean(ratio_list32) + else: + mean_ratio_per_cnn21[idx] = None # type: ignore + mean_ratio_per_cnn32[idx] = None # type: ignore + + mean_ratio_per_cnn21 = [x for x in mean_ratio_per_cnn21 if x is not None] + mean_ratio_per_cnn32 = [x for x in mean_ratio_per_cnn32 if x is not None] + + # calc mean difference over all 20 CNNs: + + if mean_std: + mean_all_cnns21 = np.mean(mean_ratio_per_cnn21) + std_all_21 = np.std(mean_ratio_per_cnn21) + mean_all_cnns32 = np.mean(mean_ratio_per_cnn32) + std_all_32 = np.std(mean_ratio_per_cnn32) + + print("\n-=== Mean ratio between 3 preferred orienations ===-") + print(f"Ratio 2/1 of all {len(mean_ratio_per_cnn21)} CNNs: {mean_all_cnns21}") + print( + f"Stdev of ratio 2/1 of all {len(mean_ratio_per_cnn21)} CNNs: {std_all_21}" + ) + print(f"Ratio 3/2 of all {len(mean_ratio_per_cnn32)} CNNs: {mean_all_cnns32}") + print( + f"Stdev of ratio 3/2 of all {len(mean_ratio_per_cnn32)} CNNs: {std_all_32}" + ) + + return mean_all_cnns21, std_all_21, mean_all_cnns32, std_all_32 + + else: # get median and percentile: + percentiles_21 = np.percentile(mean_ratio_per_cnn32, [10, 25, 50, 75, 90]) + percentiles_32 = np.percentile(mean_ratio_per_cnn21, [10, 25, 50, 75, 90]) + + # get percentile 25 and 75 + percentile25_32 = percentiles_32[1] + percentile75_32 = percentiles_32[-2] + percentile25_21 = percentiles_21[1] + percentile75_21 = percentiles_21[-2] + + print("\n-=== Percentiles of ratio between 2 amplitudes ===-") + print(f"10th Percentile 3->2: {percentiles_32[0]}") + print(f"10th Percentile 2->1: {percentiles_21[0]}") + print(f"25th Percentile 3->2: {percentiles_32[1]}") + print(f"25th Percentile 2->1: {percentiles_21[1]}") + print(f"Median (50th Percentile 3->2): {percentiles_32[2]}") + print(f"Median (50th Percentile 2->1): {percentiles_21[2]}") + print(f"75th Percentile 3->2: {percentiles_32[3]}") + print(f"75th Percentile 2->1: {percentiles_21[3]}") + print(f"90th Percentile3->2: {percentiles_32[4]}") + print(f"90th Percentile 2->1: {percentiles_21[4]}") + + return ( + percentiles_21[2], + (percentile25_21, percentile75_21), + percentiles_32[2], + (percentile25_32, percentile75_32), + ) + + +def willy_is_not_whitney_test(data_classic, data_corner): + from scipy.stats import mannwhitneyu + + """ + * Test does not assume normal distribution + * Compares means between 2 indep groups + """ + + # call test + statistic, p_value = mannwhitneyu(data_classic, data_corner) + + # results + print("\nMann-Whitney U Test Statistic:", statistic) + print("Mann-Whitney U Test p-value:", p_value) + + # check significance: + print("Null-hypothesis: distributions are the same.") + alpha = 0.05 + if p_value < alpha: + print("The distributions are significantly different.") + else: + print("The distributions are not significantly different.") + + return p_value + + +def ks(data_classic, data_corner): + from scipy.stats import ks_2samp + + ks_statistic, p_value = ks_2samp(data_classic, data_corner) + + print("\nKolmogorov-Smirnov Test - p-value:", p_value) + print("Kolmogorov-Smirnov Test - ks_statistic:", ks_statistic) + alpha = 0.05 + if p_value < alpha: + print("The distributions for von Mises functions are significantly different.") + + return p_value + + +def shapiro(fits_per_mises, num_mises: int, alpha: float = 0.05): + """ + Tests if data has normal distribution + * 0-hyp: data is normally distributed + * low p-val: data not normally distributed + """ + from scipy.stats import shapiro + + statistic, p_value = shapiro(fits_per_mises) + print(f"\nShapiro-Wilk Test for {num_mises} von Mises function - p-val :", p_value) + print( + f"Shapiro-Wilk Test for {num_mises} von Mises function - statistic :", statistic + ) + + # set alpha + if p_value < alpha: + print("P-val < alpha. Reject 0-hypothesis. Data is not normally distributed") + else: + print("P-val > alpha. Keep 0-hypothesis. Data is normally distributed") + + return p_value + + +def agostino_pearson(fits_per_mises, num_mises: int, alpha: float = 0.05): + """ + Tests if data has normal distribution + * 0-hyp: data is normally distributed + * low p-val: data not normally distributed + """ + from scipy import stats + + statistic, p_value = stats.normaltest(fits_per_mises) + print( + f"\nD'Agostino-Pearson Test for {num_mises} von Mises function - p-val :", + p_value, + ) + print( + f"D'Agostino-Pearson Test for {num_mises} von Mises function - statistic :", + statistic, + ) + + # set alpha + if p_value < alpha: + print("P-val < alpha. Reject 0-hypothesis. Data is not normally distributed") + else: + print("P-val > alpha. Keep 0-hypothesis. Data is normally distributed") + + return p_value + + +if __name__ == "__main__": + num_thetas = 32 + dtheta = 2 * np.pi / num_thetas + theta = dtheta * np.arange(num_thetas) + threshold: float = 0.1 + + # to do statistics on corner + directory_corner: str = "D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/corner_888" + all_results_corner = fitkarotte.analyze_cnns(dir=directory_corner) + + # analyze + print("-=== CORNER ===-") + # amplitude ratios + ratio_corner_21, std_corner_21 = ratio_amplitude_two_mises(data=all_results_corner) + ( + ratio_corner_321, + std_corner_321, + ratio_corner_332, + std_corner_332, + ) = ratio_amplitude_three_mises(data=all_results_corner) + + # general data + ( + corner_num_fits, + mean_corner_1, + mean_corner_2, + mean_corner_3, + std_corner_1, + std_corner_2, + std_corner_3, + ) = get_general_data_info(data=all_results_corner, print_mises_all_cnn=True) + # analyze_num_curve_fits(data=all_results_corner) + + # to do statistics: CLASSIC + directory_classic: str = "D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/classic_888" + all_results_classic = fitkarotte.analyze_cnns(dir=directory_classic) + + # analyze + print("-=== CLASSIC ===-") + # amplitude ratio + ratio_classic_21, std_class_21 = ratio_amplitude_two_mises(data=all_results_classic) + ( + ratio_classic_321, + std_classic_321, + ratio_classic_332, + std_classic_332, + ) = ratio_amplitude_three_mises(data=all_results_classic) + + # general data + ( + classic_num_fits, + mean_classic_1, + mean_classic_2, + mean_classic_3, + std_classic_1, + std_classic_2, + std_classic_3, + ) = get_general_data_info(data=all_results_classic, print_mises_all_cnn=False) + # analyze_num_curve_fits(data=all_results_classic) + + print("################################") + print("-==== plotting hists: compare amplitude ratios ====-") + plot_fit_statistics.plot_mean_percentile_amplit_ratio( + ratio_classic_21=ratio_classic_21, + ratio_classic_321=ratio_classic_321, + ratio_classic_332=ratio_classic_332, + ratio_corner_21=ratio_corner_21, + ratio_corner_321=ratio_corner_321, + ratio_corner_332=ratio_corner_332, + percentile_classic21=std_class_21, + percentile_classic321=std_classic_321, + percentile_classic_332=std_classic_332, + percentile_corner_21=std_corner_21, + percentile_corner_321=std_corner_321, + percentile_corner_332=std_corner_332, + saveplot=True, + save_name="median_percentile_888", + ) + + # p-value < 0.05: statistically significant difference between your two samples + statistic21, pvalue21 = ranksums(ratio_classic_21, ratio_corner_21) + print( + f"Wilcoxon rank sum test 2 Mises for ratio 2->1: statistic={statistic21}, pvalue={pvalue21}" + ) + + statistic321, pvalue321 = ranksums(ratio_classic_321, ratio_corner_321) + print( + f"Wilcoxon rank sum test 3 Mises for ratio 2->1: statistic={statistic321}, pvalue={pvalue321}" + ) + + statistic332, pvalue332 = ranksums(ratio_classic_332, ratio_corner_332) + print( + f"Wilcoxon rank sum test 3 Mises for ratio 3->2: statistic={statistic332}, pvalue={pvalue332}" + ) + + print("-==== plotting hists: CORNER ====-") + # plot histogram + # plot_hist(corner_num_fits[1], num_mises=1) + # plot_hist(corner_num_fits[2], num_mises=2) + # plot_hist(corner_num_fits[3], num_mises=3) + + # test for normal distribution + print("-== Shapiro test ==- ") + # shapiro(corner_num_fits[1], num_mises=1) + # shapiro(corner_num_fits[2], num_mises=2) + # shapiro(corner_num_fits[3], num_mises=3) + + print("\n-== D'Agostino-Pearson test ==- ") + agostino_pearson(corner_num_fits[1], num_mises=1) + agostino_pearson(corner_num_fits[2], num_mises=2) + agostino_pearson(corner_num_fits[3], num_mises=3) + + print("-==== plotting hists: CLASSIC ====-") + # plot histogram + # plot_hist(classic_num_fits[1], num_mises=1) + # plot_hist(classic_num_fits[2], num_mises=2) + # plot_hist(classic_num_fits[3], num_mises=3) + + # test for normal distribution + print("-== Shapiro test ==- ") + # shapiro(classic_num_fits[1], num_mises=1) + # shapiro(classic_num_fits[2], num_mises=2) + # shapiro(classic_num_fits[3], num_mises=3) + + print("\n-== D'Agostino-Pearson test ==- ") + agostino_pearson(classic_num_fits[1], num_mises=1) + agostino_pearson(classic_num_fits[2], num_mises=2) + agostino_pearson(classic_num_fits[3], num_mises=3) + + # statistics for each von mises: + print("######################") + print(" -=== CLASSIC vs CORNER ===-") + # 1: + willy_is_not_whitney_test( + data_classic=classic_num_fits[1], data_corner=corner_num_fits[1] + ) + + # 2: + willy_is_not_whitney_test( + data_classic=classic_num_fits[2], data_corner=corner_num_fits[2] + ) + + # 3: + willy_is_not_whitney_test( + data_classic=classic_num_fits[3], data_corner=corner_num_fits[3] + ) + + # visualize as bar plots: + plot_fit_statistics.plot_means_std_corner_classic( + means_classic=[mean_classic_1, mean_classic_2, mean_classic_3], + means_corner=[mean_corner_1, mean_corner_2, mean_corner_3], + std_classic=[std_classic_1, std_classic_2, std_classic_3], + std_corner=[std_corner_1, std_corner_2, std_corner_3], + saveplot=False, + save_name="3288", + ) diff --git a/thesis code/network analysis/orientation_tuning/fitkarotte.py b/thesis code/network analysis/orientation_tuning/fitkarotte.py new file mode 100644 index 0000000..0e2f3c6 --- /dev/null +++ b/thesis code/network analysis/orientation_tuning/fitkarotte.py @@ -0,0 +1,373 @@ +# %% +import matplotlib.pyplot as plt +import numpy as np +import os +import scipy.optimize as sop +import orientation_tuning_curve # import load_data_from_cnn +import warnings +import matplotlib as mpl + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" +mpl.rcParams["font.size"] = 15 + +# suppress warnings +warnings.filterwarnings("ignore") + + +def mises(orientation, a, mean, variance): + k = 1 / variance**2 + return a * np.exp(k * np.cos(orientation - mean)) / np.exp(k) + + +def biemlich_mieses_karma(orientation, a1, mean1, variance1, a2, mean2, variance2): + m1 = mises(orientation, a1, mean1, variance1) + m2 = mises(orientation, a2, mean2, variance2) + return m1 + m2 + + +def triemlich_mieses_karma( + orientation, a1, mean1, variance1, a2, mean2, variance2, a3, mean3, variance3 +): + m1 = mises(orientation, a1, mean1, variance1) + m2 = mises(orientation, a2, mean2, variance2) + m3 = mises(orientation, a3, mean3, variance3) + return m1 + m2 + m3 + + +def plot_reshaped(tune, fits, theta, save_name: str | None, save_plot: bool = False): + """ + Plot shows the original tuning with the best fits + """ + + num_rows = tune.shape[0] // 4 + num_cols = tune.shape[0] // num_rows + # plt.figure(figsize=(12, 15)) + fig, axs = plt.subplots(num_rows, num_cols, figsize=(10, 7)) + + # plot the respective y-lims: + overall_min = np.min(tune) + overall_max = np.max(tune) + + for i_tune in range(tune.shape[0]): + ax = axs[i_tune // num_cols, i_tune % num_cols] + ax.plot(np.rad2deg(theta), tune[i_tune], label="Original") + + x_center = (np.rad2deg(theta).min() + np.rad2deg(theta).max()) / 2 + y_center = (tune[i_tune].min() + tune[i_tune].max()) / 2 + + fit = next((fit for key, fit in fits if key == i_tune)) + if fit["fitted_curve"] is not None: + ax.plot( + np.rad2deg(theta), + fit["fitted_curve"] * fit["scaling_factor"], + label="Fit", + ) + ax.text( + x_center, + y_center, + str(fit["curve"]), + ha="center", + va="center", + size="xx-large", + color="gray", + ) + + # update again if there's a fit + overall_min = min( + overall_min, (fit["fitted_curve"] * fit["scaling_factor"]).min() + ) + overall_max = max( + overall_max, (fit["fitted_curve"] * fit["scaling_factor"]).max() + ) + else: + # plt.plot(np.rad2deg(theta), fit[i_tune], "--") + ax.text( + x_center, + y_center, + "*", + ha="center", + va="center", + size="xx-large", + color="gray", + ) + # specified y lims: of no fit: min and max of tune + ax.set_ylim([overall_min, overall_max + 0.05]) + + # x-ticks from 0°-360° + ax.set_xticks(range(0, 361, 90)) + + # label them from 0° to 180° + ax.set_xticklabels(range(0, 181, 45), fontsize=15) + ax.set_xlabel("(in deg)", fontsize=16) + + plt.yticks(fontsize=15) + + plt.tight_layout() + if save_plot: + plt.savefig( + f"additional thesis plots/saved_plots/fitkarotte/{save_name}.pdf", + dpi=300, + bbox_inches="tight", + ) + + plt.show(block=True) + + +def plot_fit(tune, fits, theta, save_name: str | None, save_plot: bool = False): + """ + Plot shows the original tuning with the best fits + """ + + if tune.shape[0] >= 8: + num_rows = tune.shape[0] // 8 + num_cols = tune.shape[0] // num_rows + else: + num_rows = 2 + num_cols = tune.shape[0] // num_rows + # plt.figure(figsize=(12, 15)) + fig, axs = plt.subplots(num_rows, num_cols, figsize=(10, 7)) + + # plot the respective y-lims: + overall_min = np.min(tune) + overall_max = np.max(tune) + + for i_tune in range(tune.shape[0]): + if axs.ndim == 1: + ax = axs[i_tune] + else: + ax = axs[i_tune // num_cols, i_tune % num_cols] + ax.plot(np.rad2deg(theta), tune[i_tune], label="Original") + + x_center = (np.rad2deg(theta).min() + np.rad2deg(theta).max()) / 2 + y_center = (tune[i_tune].min() + tune[i_tune].max()) / 2 + + # fit = next((fit for key, fit in fits if key == i_tune), None) + fit = next((fit for key, fit in fits if key == i_tune)) + if fit["fitted_curve"] is not None: + ax.plot( + np.rad2deg(theta), + fit["fitted_curve"] * fit["scaling_factor"], + label="Fit", + ) + ax.text( + x_center, + y_center, + str(fit["curve"]), + ha="center", + va="center", + size="xx-large", + color="gray", + ) + + # update again if there's a fit + overall_min = min( + overall_min, (fit["fitted_curve"] * fit["scaling_factor"]).min() + ) + overall_max = max( + overall_max, (fit["fitted_curve"] * fit["scaling_factor"]).max() + ) + else: + ax.text( + x_center, + y_center, + "*", + ha="center", + va="center", + size="xx-large", + color="gray", + ) + # specified y lims: of no fit: min and max of tune + ax.set_ylim([overall_min, overall_max + 0.05]) + + # x-ticks from 0°-360° + ax.set_xticks(range(0, 361, 90)) + + # label them from 0° to 180° + ax.set_xticklabels(range(0, 181, 45), fontsize=15) + ax.set_xlabel("(in deg)", fontsize=16) + + plt.yticks(fontsize=15) + + plt.tight_layout() + if save_plot: + plt.savefig( + f"additional thesis plots/saved_plots/fitkarotte/{save_name}.pdf", dpi=300 + ) + + plt.show(block=True) + + +def fit_curves(tune, theta): + # save all fits: + save_fits: list = [] + scaling_factor: list = [] + for curve in range(1, 4): + fit_possible: int = 0 + fit_impossible: int = 0 + for i_tune in range(tune.shape[0]): + to_tune = tune[i_tune].copy() + scale_fact = np.max(to_tune) + scaling_factor.append(scale_fact) + to_tune /= scale_fact + + p10 = theta[np.argmax(to_tune)] + a10 = 1 + s10 = 0.5 + + if curve == 1: + function = mises + p0 = [a10, p10, s10] + elif curve == 2: + function = biemlich_mieses_karma # type: ignore + p20 = p10 + np.pi + a20 = 1.0 + s20 = 0.4 + p0 = [a10, p10, s10, a20, p20, s20] + else: + function = triemlich_mieses_karma # type: ignore + p20 = p10 + 2 * np.pi / 3 + a20 = 0.7 + s20 = 0.3 + p30 = p10 + 4 * np.pi / 3 + a30 = 0.4 + s30 = 0.3 + p0 = [a10, p10, s10, a20, p20, s20, a30, p30, s30] + + try: + popt = sop.curve_fit(function, theta, to_tune, p0=p0) + fitted_curve = function(theta, *popt[0]) + quad_dist = np.sum((to_tune - fitted_curve) ** 2) + + save_fits.append( + { + "weight_idx": i_tune, + "curve": curve, + "fit_params": popt[0], + "fitted_curve": fitted_curve, + "quad_dist": quad_dist, + "scaling_factor": scale_fact, + } + ) + + # count: + fit_possible += 1 + except: + fit_impossible += 1 + fitted_curve = function(theta, *p0) + quad_dist = np.sum((to_tune - fitted_curve) ** 2) + save_fits.append( + { + "weight_idx": i_tune, + "curve": curve, + "fit_params": None, + "fitted_curve": None, + "quad_dist": quad_dist, # quad_dist + "scaling_factor": scale_fact, + } + ) + print( + "\n################", + f" {curve} Mises\tPossible fits: {fit_possible}\tImpossible fits: {fit_impossible}", + "################\n", + ) + + return save_fits + + +def sort_fits(fits, thresh1: float = 0.1, thresh2: float = 0.1): # , thresh3=0.5 | None + filtered_fits: dict = {} + + # search fits for 1 mises: + for fit in fits: + w_idx = fit["weight_idx"] + quad_dist = fit["quad_dist"] + curve = fit["curve"] + + if curve == 1: + if quad_dist <= thresh1: + filtered_fits[w_idx] = fit + + if w_idx not in filtered_fits: + if curve == 2: + if round(quad_dist, 2) <= thresh2: + filtered_fits[w_idx] = fit + elif curve == 3: + filtered_fits[w_idx] = fit + + sorted_filtered_fits = sorted( + filtered_fits.items(), key=lambda x: x[1]["weight_idx"] + ) + return sorted_filtered_fits + + +def analyze_cnns(dir: str, thresh1: float = 0.1, thresh2: float = 0.1): + # theta + num_thetas = 32 + dtheta = 2 * np.pi / num_thetas + theta = dtheta * np.arange(num_thetas) + + all_results: list = [] + for filename in os.listdir(dir): + if filename.endswith(".pt"): + print(os.path.join(dir, filename)) + # load + tune = orientation_tuning_curve.load_data_from_cnn( + cnn_name=os.path.join(dir, filename), + plot_responses=False, + do_stats=True, + ) + + # fit + all_fits = fit_curves(tune=tune, theta=theta) + + # sort + filtered = sort_fits(fits=all_fits, thresh1=thresh1, thresh2=thresh2) + + # store + all_results.append(filtered) + return all_results + + +if __name__ == "__main__": + num_thetas = 32 + dtheta = 2 * np.pi / num_thetas + theta = dtheta * np.arange(num_thetas) + threshold: float = 0.1 + use_saved_tuning: bool = False + + if use_saved_tuning: + # load from file + tune = np.load( + "D:/Katha/Neuroscience/Semester 4/newCode/tuning_CORNER_32o_4p.npy" + ) + else: + # load cnn data + nn = "ArghCNN_numConvLayers3_outChannels[2, 6, 8]_kernelSize[7, 15]_leaky relu_stride1_trainFirstConvLayerTrue_seed299624_Natural_921Epoch_1609-2307" + PATH = f"D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/trained_64er_models/{nn}.pt" + + tune = orientation_tuning_curve.load_data_from_cnn( + cnn_name=PATH, plot_responses=False, do_stats=True + ) + + all_fits = fit_curves(tune=tune, theta=theta) + filtered_fits = sort_fits(fits=all_fits) + save_name: str = "CLASSIC_888_trained_4r8c" + save_plot: bool = False + + if tune.shape[0] == 8: + plot_reshaped( + tune=tune, + fits=filtered_fits, + theta=theta, + save_name=save_name, + save_plot=save_plot, + ) + else: + plot_fit( + tune=tune, + fits=filtered_fits, + theta=theta, + save_name=save_name, + save_plot=save_plot, + ) diff --git a/thesis code/network analysis/orientation_tuning/gabor_dict_32o_8p.npy b/thesis code/network analysis/orientation_tuning/gabor_dict_32o_8p.npy new file mode 100644 index 0000000000000000000000000000000000000000..be2c03c08b2211ee33cf1826a9f90be2fe85c803 GIT binary patch literal 247936 zcmcG1chuIkvTn90*hK+#+X#pquwp^wiwYuQ15^YlQ3Mf`u2eA;QKX1;5hZl7QWXUJ zLKP9Eg9R`GQZ3uQw`F_hnM~e$*ZuFTbvSFE^{mV!dGkvqGxJOUN8EAG?RPc$hk8Qw zJnx~Fom+G|uev_(x>i@8S6QFes(q&}ogQe{ynUyKT0(yF1CMrYN%GFEAL!7M+UH+Z z?aIpfQu-hN{{@xxllp)7e@82&%yDOZ`#zz*x@xx~VAtKJ6%+bOybIJ^pf}Ra`rJQ{&-1ByY5(+&#_waCyELuOw=`ezeCPl01TeNe>Ko{l zw6i|<&*Sra_Vu)%QTERd(Jgqs_Jel;-T8laXDqkkd(4~LO7yvZ9-rsS^XK2ozn|BW z*Y`hNZ{Cl*KY73M{^kArpYCryzkHtgeDitd^N;^Sdnnl3|FZoS?#KV~eG8XgaWBp} zob%6)KvU(D{y6`5d;1ZLQ@(r&a{Kd$BFxRUR8=#%tHe$UlTC z>D-1s4&U-i?hm~iZ7M*I`JneF+|Thnymk0@=oj|%o(8$SVnbE*i|3C>@u_+1YjPWU zcg*_IHKaf0)g^~lhaU4mkM%*Wrm4fw&vpFcJ;=@8o;RYOKK0%W(8tl;@5pWFeVf{~Np3?Qx^BG;^9JgVdBfQ& znnI8HpwIi~Kkj-e`eA*L$NJ?P@jTxj+=p@Ty?GDiHuV0qHrM9<(EIydZOM8%59qO< zkh}E@PDej=`UjgJ*Rnp{=6a6rVQSZhF%E6$iqAkx@dk2D@tZM^zv2px$C~0N z^|=bw84LY{+@GgV?FUZ z=+#51o%Qje)F1p9`f$smJfD6t{a#*=S-s^J%mY3I^p(G|K=2`q)74Tt>%*g|e;yys z_&R;>FK1nVao}$lXLeuS5N+@`pgw$Hdm#85>&vEg)~g|@e;yy6E6?-g{Q}>_xL6~t z2YeIr`Eoxz0R-QK-d0HMthawh{+>ME9F+5TO!i0iFOJtOFc0`IFqHYbJJ1n7#=NRR zYG-}CJM{-YhCc2~^Bt=7`a`t8hs+)hk^LU3`xc2G;~c%jyFz@GcpJt4yBtr&UxPox zPI5de#LJ;6zJnjL{SZF}=J@I)-r5{rBp$~O5}!pMB|a#L$Km-D-<`zInB%d;*C6pQ z$2%wS*hxGq#OsSv{Lb+wf3K4GsfZtAy}7@Ycxxs8njDWMUK-JZA7flBlg8)$(XxMD zzSqloyo?7w#yBJK*hqY~qE`~XIua|giBtC;b7h#=6 zDPM6CkMsE)s`VbK{XJCnaH#C}P~Er8XXC3=N`oh5z7ZPSQUP%qd{^_5$qJQQqna`?W z@7KV*;K#5_v*(*T(XYTiXL(*v=F8@}&pYP+{5_-YXpa7c?-f3)%FMqF{W9M*y*D&P zKk#FW%ltC)Wz*?Rm-|C+UaIjJ`WN`N?4KF8`)>5h>+vVdZprf!KjwKmy?AqO7g0ZT zSj}g1f9P%Zn;&Pr?0@F7e(4(x(GUC>nGC zS{bjZrTVv(`ZD4v`Ui@iJWlG-KG&xy9zc%x{Qv5m)VFiJI>`J{>gkB5=*J%TzHMbaPQKUW`ZC2+UXRqD<149N4Zd28@3=|o)l$FCdM&@7 z>(!CsJO8fKml03V4zixOAk7!0J`Mf~eXd_;y`lJx-_85urpo())Rz%YG0w~P#(Pq| zS}jWTX(RROT))ox&?(iobGkeN+3I-UB;{P1f^0)|X7_ z$M#ge%t*-1b#u9{-}l(&4gZ6cq7%(MxNFe z`gp|P&XAkq%5#0*bUSAfX4Jan7L{nWO9u-*^Z{2Io`$F?kn zUOiahNnm{ZXx?Y@{v}-B3uo@=hkkbZAkL@54SlMiANVmaw+njdpU3C<3iHeF<=@Zi z$?MDOEv#R@r|^C9z4`m2>_^_8ykB|$@_y$1HL~A%{|o0K=abJXpI<)De7^a-^Z93c z$o7%#C5rtN>`Ck^+grB3Y>(MKv%O~f&GxLtzO%hMvHvLdA>WUDUkdj}?vs=ImG4`= ze^KsZ;eN_}E!^L4+nvhi{mB*G7>~NKW-;ufVjI3kcE@)&vmG3;d|&h}lMVFtvBy}i zmaID$ZT)t29&Z*l{RjHTMb9??y4H1u1H;kN`kM=RvQ_Z{_iENuOs}epfs8U!liuye?DTt%Tgw=>G-K{@!gn z#)X;F_5kfq&9+1Cx9M#_w{#Wz3vvs@4SM}6JpS9pJa24tHv84kuNKc6=ly3R z^yamH?&fhT2SlJARbe;Fzg)}vWCk7JdBfO=ybroh!wJyGBZl%mm~y{P#5n)p3f>2O zb&VC!htbV;KyD77vL2{5^wkf3EnYE97qBmw2rw?GcQ(*o5jei+(U;XgKRxHi$&j1vHSPqu6OQ4y=D%BfG4x^Z`^{Ot zVn9Q*U5z#!7)P{w8uGY))^MPi_3n7c<5PFB{oB`X8xMIHQD+3uH&1zZIK6vE^wZulm(RtIsmgW|=2qDNxt;jyY@mO8I=>&=MfH~yp}lkLKFIa` zC-8dh(Z_9o+??@nvX`TtGu9^Fgj@e#paF53|28^!t!2S`Xw{A5br@zXWouAE@bjAjkJHj;}fta(qAQ zX+I!0Rd>CFwxj)G{dZq4h8+6|^t8W_W50o6bJc~A8#)hQ9DFq2Uz`{8k%PJ=qt~lD$E$UmG<5?Ra77NvtRP zgj|IUJWh2jF&FZ1&ME9aRR1N@p$~fcB%p1#us`eRJ^&5f56JZy-8-Tk>HYvc-6zN$ z-7nU=yV(CJx_{6Ix{r`6x}S_iZTQ>+-Cx$zeP;RI#TTQE`wp}_YupJr`~l;z%WANk z`~~BLdS^rK$e#e!XKT;mHu)RqBl#b+{k{^9p$-28^yL;j4>|l7FpPMU?FIgg^^4~5 zJ%#^+UN=1PA;{q$fpK2s{*?9PUm=ITWo-I4%i)iq58;frA&0*PhL?u$ z{SD;Lp?BopSx^2RZTNqdQ#^ni@d2nQUOQL2f9X0WHNhmQ%a~dWwILdy0oZ*ZAyNkjHNtdq#?%kQ<7pXj_V}K(*=k zZIB0wzrg%{Y$!fMAFiwZ1>}n2H_*lx-i6#ydg|C|_auuRGcAs*5k(40$Zmd@bu2pZyi| z?%uI$xgX_2tf%}4=qX=fJ>^fpc-gY#hbX^7+mva?_HQWvLjTa~0>00h@-yf)&`90ILdE<(M&eLK>089dh2&L1Cbv?u8zO4CeTs- z47vKL-Kjvkx9L5sZ@RuC+Q`3I-=GYyGhRD4jc@P=?+@~K=so507^jIJ11<4m$Sv_> zpdvm1c_4nkZQ{p3L;L}9NBkHVi628Ah##|__%X|g9|IlnW0pTti~T`3?TKzICw>gM zCw>Fe#E&756Ymc|@FB<*@nazP66EHu7T9d%k?per@FGvwgMEbn^GBp~=Pxp}hh&@b70w*PoRx)18)bU(n4F~80C zNA6RU`xU7k1%0IY6n-xdKL&capIYv#rTP}+abLR6`F`tT(|ynP->*;pAo~Y@ZSoh{ zfB22bpMW3Zcg>3AZ{ot_f54AfF8(R|EB9LRU)i69TFJj?8+v8ZN-24OOrqMrILR);_toqeK!{8AFn)T|WWT#k1V|MapH598q}K01k)Ug9VCG4>-| zBJq{@aS__{Q~b^E$5G;QAztVBE%Dq`kof*>iuZx|G3L$rfO#k72TJmVDEULqC%ohr zLGlgtqU0ZvkEoI^*ujDr|=R50D{^KMca*`h@$(M}ePvFOx&k{do zlzb~LP5GCRd`vY<`B^+ZvpI77W^1`H#_COO7dZ~C*{XEU)DP$f0lgO z=KPxYF~%FozjHpWB|kTkuWQNQqvZ3-ru^PXzV9Uecftph@B{GUBDRafAENLHo%sdv zW9Y5$4=;Qq2tRScSFG@tK>V1$SNM%4evI+p$1E25~(`D)24h z#~2TOjQK3_V_@cM>chm}GM`ibO8m}dzISore@ggZfgg@be6heEGoKWG>4_inx`ltb zCW((`eyS@x%=|+Me+570{+os03f~R)CjJY441MAKSopGTo%nO+)A4KJ*TT2Kk9poR zg^vqA4~-LFcf#Le=JUeuYUN&U)5JSMWHrkkevS+1wW{i(hNevIFD z`=tIR^*Hcj$PM`;tONcDsLDz`FW2{8NcFx#{ZHzFxjq=~N%g`Y^+P4~#7O>==Ocg0 zDD}r&kM#Ga`Xu}@^zhHcgt?w+#-{qFdNkELz0^P9@6jLrpT|{5^-?YM(^w+aQ*(V4 z@dWb*iZ48W*;J3!6n`+@Q#^v)Nc|S^3UZz6yHfAfQvY?i9!&8M<5d4tFSb%YwiGWh z9`Td)Qg8NBe-2WQM*M|7zaLwvUt6hX8>w#_sdq=IeyO;WVkb1o8DfM~EM=(ATKgPV^$7myeVY$@%Z8@p`=l1}4A3#2YakfEvKR~{O zarWr+zF;m-?+s4gACPZBpWi2J@AO`g-!Fo^XCQyWy!yNJ-l6mRhrEaAZ{>YN-b?(} z^nQ}xQ*52|zLMWt^c(5@#mRe&J2t(~6y9qnKjn2zPwzQ}_nigly(ho_m`B$I+*c=3J(`k+bvCXZ~fol_d*_f z&YKK$t@>ZXSoC1J$4ef%0sZa7%bRn*6=NSq+cX&A&{pfN<@*uqM-RFC{k26v_0IQc zy%&DI2>o>9YGWbyUwtAY)5Tox zG5fyec@!$zzTU~}j(uO^yujU2^aAwy@1@;=w)aU(xc{bW zd@;4ngZ=pa`f)2pLZ07`b35ye^v~n-e0l!-d-?Z`tS7H8uQ#vX%l8z%PrldZ@0a~B zvOjsh^8OX}Q})-&e&_wq=aJ8+a9(nL`8*5fE9afhKifmLk8Cg5e!SRIwy$h&+5WOU z7VJ~(HQR5t=WO5E-n0Ejxexh%IJqw_-ygY8`F`d5mhWGX`Rm zcQk=K%$mn`r|LhL;%C_jb0F74X1@xIzg|$x{Y$iYo$&wBsDul#-b zm&oJoJ#}ld-d#L~`%f;%@y-4C%QDE#;1{;De&ErcLa#4Le}B<#jhk6swfmPq)$n1y zzuxs=KW9$odkmZFd(@zx;lz&USpiRTqXgf-oJzQQx%m+>%DMSdcWwdpFuxY)bTxFsP!{{hp+VadY<>#W2W;w^Y3QA zYt2|5=hv<0bM?30cPo#7`u-G`7M2_N8FjUx4WL&;=5&HQUO%@VP=7Fd1myOm)5kMzU(NXz z{domOSN;5SkAC{^0O-S~s~_cYZSHLhwEH(+1-aJ8c4PfFCpTk#@p&%+-Q(+9viz0w zUa8NywHoC9`de7>Q_8&hr> z&{RJC1L)oIr`G}X==)Yc9;bf09O%EE`v&A;UMoHi^UG1aAU7i(8_V(|wy`|^zLm#? zHSh5KSG~qw7K7ri+G>WA9}ld<}e`UgWk~mXsb#0%s|`I?=v41*3I0}NzOkbAN(pzX4{KIE3{4{b;G2#jQ(kSnrRAnX_NK=usOWZ#g7r~eoZ)MWpV ztByGPNGSYoy`QDS80S(<($o+wh zR{<^EXUHSnZ=j<44tb#a4^)fKX8&TyKR^$E!E*8+j5X%=gFFtN(+TLw-#{M7|DbKQ z{>%vf1i2j(*uNO^U(jpvXFx~(4f;U-4j9S*L2k((0^`C{yFl*9U!ooEyYE&Y{3+z7 z=GZ|%Oa7Mi#~d@A_2iEs*W{mpp8Pe-d+29?f&4k-u||n;XdCkPXgl)%z(DbUUQJn1ODPmbaX^s&|h?IB0}VLino$SuVumQ%c9ImIu?5zkmp@eOjsJ4T9s zkRu)f#kCbl!J>?&eN6JTls#)FI zkRx9~+mHHnC=mG!`Qer~I&p z+M4o3prZT{a`oE<#jGDZ`&GzenF;A0H+Yc0PtBf}?opXLnm}(UUqydQ`76*}29NclA6hWIhiUfkz)$dP{o)rblYLyr8M^@okP z7joq9K;5SQH9+L|(7R<1-2gPik0JNO2hfi7M>w>>7Z@8vwrlVS=ne5>$UX55V7$Lt zItSv%&}-tyEGK@<*uEd%>p*-4?Lhn(2)w2ggpQKV>>Ff3;K1be;#lDSjzM1JJbB&#~2@opJP5P>rn^O`r`PsUaA}N z`oWLU&(2TZ=j40U^z{8f_Cpar#=KGX%gFxeyq~ha;K!IR?|&GZ&cn(1gk$8q zr~4h`z8ks!M*Knc4_5p|d@T77EB+*if6?M^V%g+>z>o1AwqEj2* z-{J$w-v#l1s%i3vM*O3zll&$4G0!_N`O|`b{crNO;K$Gh@yAa5vlV~s#D53k$LOcT zzdP~wTKvD3c%Vw9_>kj;KPtr!FY&|;N%1Aen{Zo-KSttFkoXiNUfCSKB%bB?<|W?Q z9RKc3@i0hy)MZk<%<r(!f^EvxS%I|W%XNe!<`wID> zdCu3>u_=Gg`Mg=3@_QrszPT&q|AzQ6 z)&+hHG{ldw9`IwJ6+V&qg>RDh2KX`b`lZB2GC%R%5??XGUwq~>!f%xDoy>p0kMX;H zRN_ZU_>vd?6p0^0Z+=bu3j7#tE&MA8AItnK_7uJ*{4EHd^TO{!=6k~btnk6i5A`L9 zFM=QA`=ap4%r8~uo5DYh@KNw%tjkVJe3kw@=R}MT!e_&Z#BVd-^~WUsoB42lKhAtv zlOE$UpY}8!^J6~9F+bY)J+w1F$9f=-pC$f|^+J#JGcHJcKl6V>-wS!J4`4qqUeo?S zj{O3HA487)1X|i(mP3DE3i=Cc{rLv&RDX(6k8*VX&=2gvJW1J#?1kCk2_$$nt>wEBLtk3nokx~yN{|9-H zdZCf}A^aue>fuyh%=JcdOR7J@-$HN6{{nNpQs12FmsaYTzFVqqTB&#D`llm*Uqs{J z|AFz&R4>i-)3`O&Q*(XQQ9Qx#Aie$5@XwMy!@HrI10exbk3^ha{n!#e#=4BumzC6;)%B_VjCjlIkot6fKQ`y2`n8vOc07>k+p1@(cl%ub zE|==zR_f#azEm%FQa?w&fcfLaslFbh-W~^~`n&!))#KHaRG-&yh3MkS{?WZ%OY9n)osIm-mNwVS10q?-Oomdauy( zexd$O?-}`h!}Lq<9r^vkE==zs$p0`e@oq6G0w~TkCOMGDDOi`-iy4vA31qXGL%1K9!L2!#+xB?n*wz> zvL!J5b?-pv<3Fxr-fIUR>mj$}UwIkmi;f%*y*l}dk=*Z#Q~I)gT}#eGRE>-7gxodm z*bVY9Z}j=xu6_smdwu@UzG#QbYrPM>U+@|GfA!i&%xCO*)hDq2+-j%Pikg??V zT*uI@7Y#-~-TZLY+qd85ar)!x0djZeuj|l1y!biik)~R&t&rO)mu=&5Z+!Iu^!DAg zD}m;pSM&Pam^m*(9~*zt8FDu>y&vnNw#;JrCL1AFcVD{^Xx5y&A98>Huup;N#F^V5 z*Nxxhd3B3P??7(;O#2^Ctj6aPy1c;aR15Fp@3%)c--U5;TOGEu{C=F`}u%=b0l-_Y?4zQ=CPofdYY_k1@97$54vaWp(t%yH4Td+8Cjqblj| zN7_358loK*X^xNju-h9Hqy1`&W{~?oCb#FdU;P}~=H%fX=^GcmHI?;CO0nNDH?Qmpy*YY2=M8#%-KpGPFW`JC4EUVw-5sp4l*g^C zu^xJRL5Y<>UHZ&rkh{08ngg`gGdfK zvcIvj&f|EYjxAom{f2Ja0)5;$Y$nhTsx^n@Ctmz4P+j@JSYT{imiwDG*YNsGlQlcg z-*z}+BkR8zae(_bx??ruX4P)K=epFgi7a2!^nIYcY(YhC*Scs5P+zo@{f;X2IQuW# za~7}Hm#FYH^!feR4tjTIF}2mlr*fUn{V|pI#rFJtG5XogZ;yapSC;d=wQdi{O_y6Z zzfs3kWd9fSk9#2x>;K^WjYpla7vog_uYjS$nl3>9q52Sd^;S(jFZ06saV)R*Y#+$| zkQ4ZPLS;GMDxCx5?zn$^%=#%O@$bck$FW~_GX~9r+&=fseAa(iW^gg)>D-$!K3}&! z>#w_VIOKZo^K9pCshsZxr?8*YwiL%BwWSUFQDs~5dDxGBWcxDD7CndY_WI&Z!1(KI z0}OrA-`}&X>o+N)wy%ELOF;G1W$Ap|4Vs32`rzoUKsV;OanOh6!6yDabDZYyv(NwY8SeMVIc+h{ z^?2xh#+E}3Fpdkufo9bS4+8a7JNTUJ#vwevZn`4v_kh}yx&LRK_@4P&*RfrN{nrm; zd8_kye%1Sw-rVkg1lzHzay#F5UFw5$PPe@{hkxhD#+$g`h?SFzv98z8`4H$={Cf-j zeJb}($7!rz@Jw@{z4YK{ZZ|z=24n3C%Zf=JraL}&Uv0@2?)S{B9YD4Fl#S5a?^Yo!{P%y?KfihprGyR-5)(C7DKw9!wm z{1@-P`n?hFkKfsU68c4&4|-E7ov(iMPVQ&u_aRp`rCv|#Lw`r>g&gZ=Jp9EKkSqE= z*55Fg&llehy`%krT+{xb?P=mh1}{9zL%v z(2@Nyl06ntJCc0@1KBI&uwS6sJcQ3zk$pq2)}8Pm5rOSl6Hz&JY1Gvub!nmWMPu~b*aX5-sK@9F*n z^*5tguE;+?Z^mC%k@e(1fa>A;O@NX73-s={*?fOJ`5&}B`6Hkv|Ac<kMBgG4DSDGK8Hy7-A7l`-* zx!Ty(GgAD4+)+G2+faN$+fuw@dB5Y_JsTZ{VnASz)1N6^pWxjpriZ(a!2_FP*eWFa>_@5mhux|pnL_WcMY3agf`_f zjFjI%j(i6g%RbBJgM0}3cuMPGKu`G+^lnu1WXF_Gp&cl{0(#1~(9cl*1$2~;v7YiX zp!%i;ug_Ed2EDJafcH~Teh0ljcKf4{Ys&x7wo6NOW;x}DkQ>Sufr0Wz$Q9+2jF&#d z`BtQS6M9YgC*+#)QK0#Aa(l=l<*R51%3sk&KFjjD{Tf1UDBlJ8M_+n`k@8{aRjVFd zf${6_20+#20|(yrF%8;1ke0;>SSn4VIr%eF7u#5y%7aW1uE}%<>D( zK(u3rOZfLa@ngvK!g9$k5dVSRymAiPC-@Qc=Ix3@AqRf~#{YiT1_*uyy=r!N(i1=C zHt}O1_!;_n;>W;1{1|fZV<7k)-c9yK{1|da{1~W+9|OT}Sx)@8h}z)4K=)-G_KW%bIBZYt ztPj%P%lL3on$PC><@XHnbIcb@r}cmz3)D8 zV?Iy(7^vkwfgfX>lKW=l{>8)6eN_KV_fs!P_f^ULHFBSo+;8w>{{5Ee{(~R$@3c++ zA^VGPd-5M%{E03h{zd$a`b+$e_@nHfg7_;X{>zF#%l^%Ml>D8~{%=|Ghv3IpS3F+) zrT9-n{220rees&n<$??FRChV10 zPx6OCKG8Sj7h3WSCHaSvd<6WM#}Plq`kdr1`n;6SfFDDz8%e$+`Hz1p z^GPT9rOElG-yr!g_%ZaZspQL&Kf5DS zJ{`uU{8~xAZ6yCzl8<}I&%uu|5BM?ANXyV71$7H@k{21%Yd?=1e{3!D!v+by@%%6l$ z*=31efgfX>X_okx7e3~NpMf7kA2NT-d`|dX*q8X868@)VB|ez>A^pV&o*(=e>(#<1 zGrx?VB);i{e@5cR=;wu>M&if#eM|fpa`0nr?-G7X{FwU_KW4e`;rxD_`Ep#E__HQ{ zjDGs7#IN<7#J3&sW61G)JfHA$@MFlaKGxHEq0jtYmrQ&=^M744)dO;UAWFRe`-5># z>IvY-m@m+NGD`g+=6Xb~Pe{E2=Y#P^>KSfPs&C|aM;P%#s((m5#L@X=?zGfbgdYjbW;(t;-4*VGX;g5h}wbbvVp6AJbL2h!rPwIcU9%#Ro`k>Sc z)u2>A%=JV|{uATjPm2iMYpMPiq#gYG~Xom%Rj@wHSB1wZC_ z+oXCa_%Y`5O{Jbn@dJ9q6YiJmuelzZ>$6g?MSMa(FZEnc@eA|idT*5aFZeO~h0>`$ ztaH8iu2etH^<+))6XUhin>EE(jEiehJvvH#I=>(1`gQ1$>e;!z?TH^_oR<1`{CIvv zo`?7`P#vA><+*;2e4&`cUh3eI_u$8v*GqjL`3&Z> zl;1#}-va{WKP;E`0wO@e&_=$5erjBLUqC*EwyvJuAFRAbAphe2@?H_-{URPn?-}|T zdEY3X-aCxEf2b4WJtV)6P(FzH^ZQAV_mup;qEAilEkWL2Ouh6TYN72b>F75(C4Q*L8y@CCQc z)+s&ER;51W_1Pn?pU(ZyYVa=Pe)gUPK=*S`w)4>A&e_nrqkiOba|dr44t?A@wGQNA ze{atF{nyt&jkb-$-(&d=SGPhx_f4Y>K=pKBzZvc<%kj^gG3Xuii#^A_1AW-^=F31^ z_Njg>KdJTeEWhmHx6xKlPF)HNZ(s8ftqq?Y8^m{p#G#`rJQ{&-3N^ z^Y7)~&+EzS%j?y${`@_K@00J%-=Fs*?@!*ZynlH=^Zw@j&ih|D4>_MG=atVdpJzVb zeBSx|vpr<{2x2eUezHAf`^xr~?JwJ7!9K-av;BIp=WO5E-n0Ejxexh%6z+@MA1(JO z->-b%^8L&AG2hR8UzOZndv|ejoV%V?t{%{@-ZBX1Z~8pR_e9mami?k{*Pin&wfgSy z(EDLWk7BIa^(lD!Je;@thmoGdC zv`zk641IW{)fk{(yeUt z_+rIn-0zEr zzXZBT^*)B&)p+F~P@lE#b;#|oOTGg7Gp=|I82X*g`{SGReIEUs|8pv1hp}9Lu_o1x z&Cg9Xp}*->dJpUKzhB*XG|%UDzs3F|zMTHua6SIffPVIp>-u1v-&2PDMEs%DB(%e# zbNTyXm74q=?(FyX@w}_cFXwT3-(K`n>(js2>Cd`yE#%?f(~>{#*^KKpuIICZAy<3d zO!RkUG@q9(>bVAT-?r9vp!sOyTGnrUBCYfIi}-xatRp!;^7i{q-2c`;#-X1s-?}z1 zTrzkX(9CHz3K(a9`au!e6Wg+%(;t7h9&+_?sSkj5%!nn>$K99A1KLv>je*?UzJc>c zGkL^g-0y)qlfAw%@&)vBy^om-j4vO@zhjSkXbJj-&tB^bbUg;JUXLur_bAM$^aA8| z>&!>E-;d`s<94NrErH%W|7;PV>Tn{@?|)d!_e}qm_TOEzeFXISt({}J-@cseWxX0g?%J1m6m9#>yn#U1_;0>Hwqmu((5sQHx?)Tj;T)X?k+hOdjC=#w(rpGTh5Q;sb%>7 zy6$@!xcS7&0 z?o9e`@7v1yTC18vuBz6kU>hm>hPv-oaR$-ib_~bnp7h3)B zCD4pquoH5>zTaoSFy*KfkjKA&eh=vOe#ZM94jaq&&D^;AZS-@4yYM>Qn=O{2pIUR& zH^BUUj5hjt(lb(j=%d;FHqg_2kgGkP4Mtn5Uy{G0--kTXdLXy7KA?H{2lhjT)(^e9 z;HFiOa#ofpu)JM3wozx3FNz(D6)M2PbS>R5LH*Tj-Q@H6zD2Sp=+)t0 zb^&U#H(=ayd1oN(v4}8`eKL~0LT<@^f$E-;eD0p?8+!WtI3pp4{WDfxb}!_P?g!eI z?hD$Q?hi05?%oP=+%KSep%wqWqx%QFZS;3Ppr`u@z3NcrQJ`Jgt0BvOd+HQ!)BQ%< z(R~Lhy8n=?>f1*E;U6G3m}F{An@tv+-yX#;)AU71hx!+@Br$Fv0zO$aFZtxB7fk16YG77Z zB;?31f&RJX$zCb{gx(F<-J6l}Q|Rr@t8YRZ`77F*@>yV@{1$TLyTC~KFXZv2YbUba zJ(<3r@@44FoO1ObM?MX7lwWh3_%Sdpt=F>{dg8}GNBkIafhII;Ef+$R3T z?Os!t0@ciGK7t&44QPlTLk>O%^m}H^fE;`esCK=~=Wd7}LvN~%st!5$G1|U#S^iG& zNyx!38HpdWp7=4)d~q7f9r0t<6F-I={1s>`DL$V-{1|Q3dF5jC1OEm3AL}oGyzqWZ ze3|=+p8h+v+&_OG%=VS-P3$k*W42Et_G-m`mDsZq`;H}(y~j_J{RiU5SeI#+?uV26 z;{HzeC*P;=S-M|A?pw_FPwr!+`W3!MAD`~4JzVau+-E&6-EZ(?^i#x-f!QCZ_TnEt z6n`Q9BUVZN#AW|N{21ec_#Z|57;UO^P6b-=SJ{7=uaiG3__sF6-#PJrhWK$2jnjRT zf3%&GztqH!F<%saN`HTt{cHBOR{SscG5Q6c{IlAS{IzeB{CD=}re*T);K%51CMEx$ z<3S{TjCFt?LvJO1#2il~zIfut(5tyA{y5^tXlsd2O5##B*Oc#dq*ymP`E4`G7k= z zDPMMyKl^V}KJ6vHHpGu{J}&3q#E*HMlArq#DPPa|JN+{ktil=+htKBa|U8R1*zHsN2w$CU6hC44ROw^$|dxtRIgbBXU|{-<=}gP9-t z-ia?}{^*Dw7t!*;kFh>a{FqVrXAnN>gr9oi$NU}p6Mv1uXTuGN-+~`wJ}dk;^Wpq{ zocXeTBk|_~pDs%L8uMX%Ab!lhL;M(RBm5lefgJo8i1k9B`MohHw_6LMLqCf1D$4Nb>P~VYykCpll>>cA^ z|BQ4Wc%0OawA7Qpk0FnApU^MYpA6kMw7t})BJpGNQ&PXuQqRKuh1~!7)G6qn>tCAg zJLK`rR3Fpi51?04Kg;#B*gnOSPeXiFHPxU+aE1qw2s_*4` zpUw3@sRtUV52{=*oFer@sVBmJ^88Y7gnz}nmi#T`M(UH1{4wNuPO4wJw^Kbc*EhpI zQ@ztm{Zlng_0U`&4W(1P6!8J`#x+t;mHKLsdaIWDtD|_s{iHsNc*N_G`YqxW)`R#3 zxs`gamin)kda%AE)rWJvSpAghmsaY@=HgUe&h=(b{223EsYjbGsXm?Gk8}On|CH+4 zI@h<`r+Rmg`nREYk9iRPd7b}E_3~UlxBrxSy42UB)Z2s9-!0`E_%2f?)#ufjsb24M z{hs(S=5{uevsc2V%zk-kl!0Ld0+9{(|e1N_ZK%Py~hN3 zpFuu~{z~3&bZvRhk@p?svzWi|{v+=}f%0F-^LtU~o!*Z^eovzO8T$O*Sha|8rP><+yQ0BsZJVdx3uNO3q_d=j(ZX|4Lcrue$58TzB%{_HE4LdZ%|R_slPk z^Sr10H~?s7?tGc&8@*%}(BD*h8p|tQ#PumvzF{M@)$$(6UstZq^M_q(805|@>Hzff z?@aNcMfG+?)Q;!9&T+%GIgS0az3}OE=x3L8Wq<2VsJ{yRY>9d+fc~k`{2l)2+xho( z$9)%~zZ+GK>m_E-(znnr%>36f9(PXd70~PJUgCRfADP5_IM)3(LLV1(Vt;Qd&02|m z=Dbg4Ft$69dA-)nd!e7&);Rf-606hrcBk@ursags(OL!x3910{v6B^6#rli_-6RtDfTJ-ouwepa1@`m$ngo?w`l!`IO9G z_`Ur5c|EeeyxzS2DBq*y`wHJH-*03;^8V!g%KMl1Gw*LzK?Q0^L@?t*LFR=X)&F*IpdqQKy`iF{y2ZtE&V&~zR!_-k8F=)*zWDu zQ}2P^FaCn>xj(S56WXp!UB0)f&bYfVkKb9nD==LDatqc^Y&;EU%9rQ(9*#R{5%g;8 zo;g7G<-PNukJau-_xyv?heK|sJjkfJx4(e(ONx6zt}dyu-y6VrjCyb<&*yf3$KPjfYQF{j z;`MvjKg1`;ZO1(EdhCnZH>zg*SKaOoeTeN+j+2f%1 zXa3wDsE!&l2^joZK6l&8CjVXY{$VWNm;PN~{rVH{qHVSuOJ!y_w!$6I}Y6@y$XGpcK2LhY(2OI^rmL| z_ZHnNw>9Vf3+nPd=##cSjDGsfZi9g4)2{qG=KOz6Mn5~l@pt(>r%i(1-Oz9-<4?Pj zeA$Iv(9b>Dst)94`2fDhddQG^+AF68_$u6%{>x%%OachJvopEDEv?Bh3&1iH8Tr1;rq zs@ie8Aa}HWprY?#J$)b0UOwVnZqxTeA89{;mi7neX}_Qkw113eUicd1k@lDM-ThKv zp#6v5yncWBemWn>)r>3I9&vtX+rtiUe(LCa(cjW}109_|^oHyK=zs4NlKvWAuj+Uk z-wW6i^!C2*+5_DY+d2Xx*&q7D9)bRkpPq*t_6oFQzuYE!hCZ(Oi~WcrduKV>KQPq# zuqE{N%H5UF#(hCs_Z-)q<#eAQ4|KnPmhKzmhVCCQE*``8*3kWgUJV^m4;YpWXv+P1 zwW`DNO(67C=w_su=C=lU@Y|@@J64zX1dJJC>9G1A6j@tSA2njN~tYq4i-e10DI(VnR#) zmGQMFl3kMjg}T~DIRf~;uHEgidR5O@e6XqGuEH{_e97M?||ms zvHU&iqH^n?cN8Ci@y;V&gx)_mq#F?N6lnK%m<>d{Eg~=!e}Vb^81b3)H@>Q@e z&#(PBkz)1NGxu^V*am*e*7f<;m^zN+kBOrH_e*#U>%PoMB@>A$F<*PvCuRu%rEcD24f#KlP zdw_=WU+7ieBZmPE<;SeQt}TDBI`f;hkZa1Pfnj2W+CWeInA_*|KL_Y2A1{V}WBT(9 z$k!o{ov!D6F;YGcy&`@L)Zg`O3_bFHU?6@BRKyQ}p7?Pwp(g$SjKq&22fqM@G7TF6 zHSuHU1Mv}HB!0|t;>SQo{Ft##^%OVGfBik~_sD6l1HpfwH^hg4p7=4#i5~-v8_nP0 zh#y0*>+HJ_=th=Hc?j`i^z+1z8Ht}kuZSN5P1{K`AP1iVhQ*y005$Pr=sodcpe8;D zxj(CUFCh3L&>Xfpjc<1<&lew>@HzTh;>X;N_$M$BAB7zJ80dc6(vS7TUxAVMG0+h| zW;yZWB5H&GGVVKkIqUO(|M=_FwxZXUr2g8a@lNIgKgKxygZ!TSzLxc9Ss(Z@`dQ-l z_&xArpe>ocFMqGEkiH-MnET8A7}+l)`fPR`rP`RnD$9*o$Be>B+(_%YUH?oaj<#lF1Qn-}}b_6UBA{_dJ&uU72W-k9v! ziG2sLcYA8Ge~F)t$rFfI$kCk}jBtBV*SDN^75l!Q2rFfR( zo4F^&yBz<##6uYzf4ie$Gqfcp7=5LT}%EJC7)B0-{~Jy zz6XAceo^v4@MFviehkz(f0TSu6F-I={21sur2JD$K58bV{4^3jE<(SYze+x9B)`r1 zu95sVNIo1)%8%90DPJ~{KbxL8pHBI;9-Q*+oPYa8DIW(v#_u}G*RAC5M)G+}{1|#A z`M$X}<^TTX#0RwS11o&N5I^SmgiolbgiBDyI<%u5`W4!RMD16Lken$KldhlaL;>SSn<03*Ud@le@mELu82zL0Tkzu|Y8&Fm zEGK>p%A7dWuFVILm!bp8Wy_xD2=BHG@z}`soxapIdf8d$K`sD)PJJX zgOt>Va3A>hh#zC#Tu;LNf!yVKlhmJ7u186I$}LFsD%?+uS9D()rM?x3AEOSPpj|@_ujI~p}(n$R>5I@H6Td8kqsds`O z^ZJ^mdMNxq`XL^0f2p4;iWl5Z>Z{<#(ED6}&Gpz(sXmK%g#KFUw?XQ;zI>|h=6Y{D zI@N!@)Pq%{R3G+IFZTaT^<#fWswW4V>dU#_Y!0OQbF7!@(YZdI-;ZOiU*DAK*@)j* zmsy_b-G%yhy;KkPQXe-`FGqgB-%a@fzBkv`E#(tvBfo$?)=KqwyEWD4oz&|K^?S-! z&_CDrk+K^F_+PpwI6W>bvxQ zp=zi13?uIwF28qNoZde)<$D-!pH1&0`Mt!gO7ACL-cy3SuUL6+(Z{Fv7kzqqk3s&4 z-*>grdkykaUe_)1o+Ix&LEd}v`;Xp{-h+_;@;Zng13l%(JfFNLMarLfKFX(oEExI1x^oKjp>~T$Fy6X{ zd0poDyLdi(>Vykezu?;iJkQ3_^MUGM&DGrhV5JQ}b#sl`tlxPA^JU$n?0B?8pT7JZ zy23?Iqo3QlZ~*tOciK$u|NOnnxnI*cygoOk>Q~Tfeexpi_rsDM(EA%IeFD@c?`OF? z@sByso4rprfm|&*`~~heXe0XrUvW>`*GVR#pQ^NJ8_<67<5%csukEl0XxyJ1Cv~+4 z`8&+MlIdKF_e_9Zcb~@o$V{Bi>$Q{q^)b&=aW~H!s=c4$uKfMu+_s|6{qy)dpOyLZ z?-hPu)|1zl*PGXGdHLS_{dqs~{^b42`=?|-^Zw@j4zmAR&O^!hHrX0ifE{?f^=hs)C!gi;hoyC6A{`n=_yZ>lJ zG5Wi^4coU~w~p_*t9#<}m^ap+F%)_~^suzPiYt1dt<}$L&tb$F@1mb?anb9Ln;#!z zyAMtO#qnJY>%SIy`^K6jjL&R+4gIX%KNYB3xXZZhEB1q4ZU2~m-`CUZzx40@HbZaf zoR#vwA4_jRKR>Kw1ez;rdg$%3`#Dcj|9Er~_wU|^zsE0|HVOS)`;%t@;~D?pe*Qqk zFVW9UKbhC1^d1jDquIRqLx)A@@U{nhZ28E@!lpPfULGr>`~@ zlROSNb~!LiZM+ZtP_G918y-yaoe_6J?_-Bf+^^P{!1{TcM+5Ej?%ja;#}bQJzq;=S zKs9ahM#y8$ntOrt7ux1R9;>~&59n*Y#(u=_p8XN@?u%2{zp5tPo`YVW^ydpev*s8+ zceiZEMDF)~`g@Dl5+@HR16)=u^D&^0^rm!9AkJ5i% z#LS$}e%N2zi0whYJev2_RDESB<_YU6^Zta-raXwY{{lU|8&Mlr|yLE@J84kUF z^ppBPTl0deiU{4y7w|s&yDsaDw%V5dyFsz?bFcg#vfeumtKx_n-qd=0nuyA=6(;CreUrq@=i>wd(;N9lWg-@FD`&R*?gEqBdHx-P<@Ywp*T z=YEU*`vKkA$DfaU|I%QD>ppHX6`~F){F8}&}52IW-p~qNYntZeNKg>E) zvAi&~C(4CWryr~32A^>=u-R#@Yk}_USrd>S`@i$N;=qZ|;JJTh{Hwrb=I^z?@qa&$ zM}GS0TkThTY2UMvA12PcO3STv)9t|W&utz@xiVqlx_{@m^Hw~MYc4Yb=pR{eGBEZ0 z=2ety{xw1OPdMULZBKL7Mv7sLx?aPW(RCb0ZTFg%zu~vf0>cvnK0-ZZ)loBn`G~J{ zy`?eJ3-aTDl|};72G>4@{CH+(1kgP?`?c51>E%)rvA+$d599z(&Rs`50edmhlFAHX8lK*Jx&Ib@F6$ZSw_aJ#wyfn0u^#2%bY80JF#kVAJEm>ycmHsXSaoKl|KVHsn zOAATN4Y5HKM?wW=8Jv+jG`~-bI~7w<>tk7T(jsG$ZwYU)pJ@-^bh1` z(MNzm^b=rmM~^~2^cP^W!PW@~{Ra8@>jkC)i|9X^FZz(?@7g3hhrXowqCWxS5pg}_ zL%#w#(YJtJ^e^N$qK^T~0snYFpNqZ*Ooy&<2e1)+4*5~^J78XM!fBc>`X3PbAg~er z5c$v-fu*a`sbS!8?leO^1KoI z7??$0*K(r21Jikb8K7y=?~xCEADF~G#&a+BF))4c@DN}l_A&BdKLDG}-akuG>|^AY zrQa9_bX%WuJDw-8M}WE4AxGkQ6njPU#Xbgx;Ww;?{37-Z(22bRY{dRSz88B)QS4)V zF7^^I@4LuIpgZdRbgT01we!0aS1Hzs|KI}VS zUi4Fa4*O5@XPk3@J{S8~%Za@REX^z(*D%T#lq)Mvc^8<)K1RM5`xw}q`QL}|9QG-& zh<&VSv0stz#Xbf$Vjlxx?;_ue{j0d)6O-_q@yFqhHJvQKvGT>lip z*E&DxO`9+AZ=ie-vLzto7RZY{Z{~`d}X`Zd%tf*SFiPuJ|nLPC-Utu5X zcpOyaZ@Q$)W7x;|{pQ>%ucOKDaCDXD<%TNXDes-w$6DUbgTV7a>|;FV`4QHy=Sk%G zV&_fd`D5o1>|?y=?7ZUn741BOeT;nA$6CLgf8NeRzimAqd0slPkF}hgr|tP_=WVj{ zm-ols*Yla@b^fxR-#pKqo$vAHdfq!b{~OZ>is=W`7xFSyf1p0$dsh9zo4%1v|0w%Z zeZ)~eF?}VP{z82wnSN8|t@=(f{U_d0^&!~D=x=j%)t4I6pQumyaaF&fz79f>tsqZ?|f2j}qHB3J?ec69g_2>Aa>C>iPmvgGV?M?sAVjpAN zyx7MbLD|>NGrtg<6}|8Z zquJx3zuD(zucLh^-$;8=AMN{iW3~6uKjb^DU>_qN^9#(=sy~GO5qDDcmy{E#|HPX=CHJoW6|5)J z=VW~W8?ld3Pc;7w)}yA)e-q80)0|iRJBj`tU;RJX{6R_9KkAFJAMk!-{v+%ct|-r&{;(wbALXO@%ly{nKQn(? zWBxVB6D?={x4doj$CbZU{~Y~wQREZqhrB|0$S)w|8J@TPKaqDx2lEeR^A~23k9c0p zpP0oyMjG<8BMAE#SYEFFNctxsk5P{POZqd@z16=-e<$o?Eq96eL+Kwif2lYBsW*Qr z&JX+^&J+Dz^S?IckM-uCb^BL;ZJ_^lX7%Ud{KD_i-y2q{{@-l=;B5Y3Z~kKXkHhHd zPsaJFzia;H=AG()ZpGv=>1|9vC+3Vs**3$Scb{r%DW|L)s*ACR}N_XE-H z3-WID{-C@&`F^y^-Bj-vd>^}Su=|H(_Yn>6C+xl=iGGIP<9$Xttln=#yYDDJe|rP! zEu!zCT(tWU=z~Z@KSVz7Q}SN*eueif+3sJ2XYLj)XZJJV+InBp-rv}LPU%(ece360 z6ubXPpVj-IZ1+R)>3Uzp`y3rN&Id&Y~YDVr*zrbzCE`ncA=Z4?hh=`8fK69fAIzul7eh@$G9b0j3T657y`B z-mUYHHrnJ)q+|TkL&$Hoe&jJN_vk)e%bhZ(p8x(|GxT?TkKRWh-~I8di-4i;%rC$T zk+xWEJj#c!R?CVfE~oo8e}9jzQ@7}}yHUP;x~iVnxx3i|DCft0t9D8F_MoB2cP9=T z2$cUjt#ho_m{16x~Ut9IwvcQUOB47OLZvfLA`>OupHa_5flq=^Q zI84i*{z^T6{xtkil#6p*JOa=C8J8Ad6P8muyWFzv`^ayOT;&sBzVzSPjyR&H{+=JX zX&tAHhhK#JaK#iIpSbN0mudN3`{+5Cj=Sl-uH?rlJHL+Sj6Y_Y`Ps_zeb(1l{ro-t zKHJ0gwe7X`vp;SB*k9{E$Aja;@#6S#JUPDJ#+&2sY(CogvH9ZsaXvY}oNvxQ*F&)N z!S&LvA6rjcUtDkP`m^>z)L36PDC-)W@&YPg&l)bS%o18JnwI4uk&kq?Q}>^R2+O-2j^=AO0QH2O7_pMHtn4{Q0~FRA;e+3V3)wcP(EKL>R2A9X*z zeY~F6?w|YWK8XMMxAr4%{43Q@^X5kmMLlKtBUH}hQ}0tfCwwSe?{A?#f7RMA zqFfj_pZdMZ$d|fE$M<)98u`ttC*2E77f)3E%pFwu0h$MY)c)kVzJ5{jcUfQeumA6d zy1&D!Cw-23^6-n^29~*B(cg`q-=_VEzdG|wycc>p9mnRMJ+4BzbW8mo**Nw$T0U$x zn~r1r)rFrTpZCY%#R=2!e*9^`46SFQM;`*FYp>GZ_a{H6^}3H^l|MJ%_5||%ANH;1 z*jKB+(~)%fVERYEIB-iH-!S|0pP*bg{#KPY<$=ma+MIBv`m1uEek&v2J-E!ZiVNO6 zT5etx4P*?GvCI{=%vx0tT^=NWJ6_@yV}x%{7x&R^VS@}*k-sQUkBxySi~ zQNCGm=H+;v78v;mu=M&|_>`fAS3|moIop z<$F2yihc3i?LVT*$=&B1j&z!F{#_{7JlWK9XJ$Da&%+4|Kd$9Z+y6FTTz!SBP%dA7 z;$^^e++8~Z-D-0kg#7aC$iD*PJM$0H{MBb(1I!He&eC%4 z*nRnjAN>XC(sSV6$S)fY?}KzcAswRF_nWivJY98*t`om+zkYb`7rMLpM`!Cb0_o;= zTV1T>K0bB`Fur!!_Q3L+xeh>n-08)m75_0(>+^s8QpdHtGTXT*muH{tQRIi=v&I9P z?UvH0Sz?Z`JBuIsogb@5c> zm-qj3BQS^ib^oTfe)lHI`}N+dzuUWc9{K6Iy1v5sKVE@+xA6QUfo0;!w;?}Y&`ZzF zX6IGwduRVNL-R*J@G&sHb^ZjDOLu=?fbQ{Qp49Y7E8LHK-*cGuHxK$s=hgLHN%Q?n z-)nv8%R}Enx%BXfp8*+vj5O*Ak`Ht*f3Nbb9JbPvC|6GZ>MUTC`ZfRL74BD*-^X*b z2k4}Iz#{EMzL)j`o%9FK(LZ3CaI%hnS!n)|$S>#pcm*(DKko?SH!@y8jGw08{@t6% zPcpuW)BkfLp1Y+lo~kJG0SuFe-Hd$97cejQAKm|1<`emi%r7v?d~3eUzv6vs>G%g( zAINvIUVuT?5Av~|fI-$5u*iBtKGq*F%6i0etWTidb}21iWc_NstY^h3vz?3lDC-^Q zz8-nD;*KvKjeP76V0!(q?eRRx{s9KrPn!N!uR1=m-?ZFr{rUmj)wifzaEE_W>pMOj zqR%%O-Uk?DKX(P2=^yOQO{4!~?tmWjq(Q$ zo&ocTTRsQGd53&2=N~Ywyzc{e9_4%l201T*jhvsz&vKpu(}s^egy%SKfjECPE#Gq? zALlc$*hHT< z*MA@RQS=R9c<9H`nlJhYFb|t}ED-t%@}a*diavwq&~Jcw#M&C*OAs zo+r_lfXx<14h0s`r;rc*3h2Mz_aZ!ZqJIH{=wrYl`Wf<}uL1qCOJ1sJ(dRV(rq^yz z+^0koc%DVy1xC?-6-6ILe)IY;#jwFw4bn;UXXNJ<*Vz-8wwSP_ zJ{S8K=|=2hpcnfX&!L}de%}K%UBo^{z7u^OSj0X?KI~&HC;Gpl*vH6sVjlzZ2K@)) zIqU~Tv5$dC>|^A^-T=B6_wkx9_6RVFeGF{Y>U{+A%X(*B1dLNY`~u%8!M;Jh7yB3( z#r{D)>>;3EWZK=p=Cf5NARqPAVH8kC9*2_(JDDiG7TG*lWNb z_A$^cxZ;~A=fyr&6#E#@%SH#>5A)$!ez*)+#NI`I5c?RI#XiRK_Wtl}-wXEp>9X1$wl9f2kNUmX$M`+i$G|wZ_0RfSPOJTgeT;mLPn^Au7wluy z1N#`**!Xh1Gsj=-W8`yw{7E)nHh*PoozG(P>ukP@_($-&$<_ncN3r!1Z2iQ4exmEi zi~j}X%bIn)1zUgK)?+iGuFqiWwG683w`^S3GuL7>%V+g_k$l(_eW#ba=-3g_pi78Tx@@b1?qm!VjpANU?1!FnS6kKjDF@ltNeg{ zjQni!C4E=rO)&Z6Uaay6_A$y)UU`#WtvoaNmiMpnE|~nwhgW&%Og@%vs$9!rAEQ3l z$H1_R$ybxN?&m6h%b+TcDWCJVRbEqmQ=WUVkF|X!@6#z&{(CzQ+VjEA3!We0dOJ_- zd~rN)hSc*1_A%ubro` zkF~xn>vsU=fTc*Z|6PF|7Mk{4>YD9G^Oeb!Sn~}6VxxF z=^N$vs(*wYrjM9@;!R&^#6Cv*sn0a4nto&YPC2*gKf&~&VER$+S@osH^e1QfR9>O# zSK0Kf^5?35In&3S>1XbTs;{;B+hJ9o3)JsS-^<6C{%87NG5s)`zUWPV^rh;P-t@~T z_ObSBjjDeZ>Z2!B{S@{w^4)V)e{D>krG6Vetom-I{%iWM?^OMm`m&o*_2=@Zs!u!9 zuZ!v1N$g|%P8R!E+p|~I&tV^TeRP7OWZnaOc*(>42YQNB)iBDAfrZIcRoBfj>sP+)-WBh(Hd&!&q zM0+aFQ|&9-Th8pSVD=d7WArFOWR(}Bck8+Lq3ot%- zANDaYn16x(2KpbejJK8(`&i4Fzrwv#{TK9Slm)7P1M`V;m|q~~yCYc4KN4g;ARWzr zg7t!OSU-49e~Xv(g*4V1^0EGaSdUuH{5M#yTF(4CSkEX|WPRg#WBwp-{vqrKC%ySA-S*XgNq?qWsromA`8$*OKa0qF zyiflq&I7H_{HLuyb?fS1b>?q%=6@{V#kF5UYv_bVh(;uD9Kh68&Wd3WnY4vA&^KX0ecl&Rv z|2vvLJi6*1r@uVRQ~l@A7tpR~{&o4|t!j6e|DFDLC;A7<(O+LKGylE$^NacS)3w## zPyc_k`vBe#M7u9&&ZzeX!XKe_OJny7$?h9?|ImC_?<1gp;rFxXW7=N3zu-`GvTRiVy>^>&)e&)$~UjzNLi*ftj z?sM#Zhxa{kw|f5reHZ22mi2z9Ik?^z72Y4sU+`zE*EL93&m&B3=0!1HGI z?JofOx&GV-&+`x4tGsH4Uz?B)?>~4B^3$F5|0vSXkIzIpY`NWG$ai=5)%RFmTxPRj z$Zytr@kq^oc|YA};q=QML^@yj%_!vOyoIjQviwu3m$$B+GG``Y^Y4S65+h41fp6ZM4opZEkAt$wzj z@VBL`EA#atxvA6cD>p9WBqJD$A{z3{<3{spY8g$ z{l)#k`q_St567SL!SQ2%**@+s?q9$3%-wYzefz~gV6*uWe+9-vufH7YDLnWG-N$Z9 z=U}AE4F{>d6E1oDN|bZQZg7y6+xfc_fpPuy_tLc0SKhtr1eA+A{j72zU9_R@<9O%f zlTgkrx2x{o=Gs$qzm)eLIS0?nkUx%5+-}e6k6L0>AJo&f&+3a8+@bq8^*iBgln=lD z^(!dnF56Z0>1MI-wf)V?PwF|C*8KWjt@rkghN7IGyo4hA%l5IpIOJ-T2Wg=p@1lH| zZ=re~T)I+0J-PRJuK?XT2aQF(Tjr}OM-F-SVl8ju$NsW?tgrN$Fdpyw{?~n`^}Kz? zhnoIv{a^WXWY5=7Pn_OM{a)$(gH^t9J~)2tFWblZcz>Lp-K*B~h5fH=-$Bo!UcdQg zWAS@&%sw{*Ilr8bcKocr&f3TNnu}hmez|uKQomXm`tg&fC#>G*31EI?it3|WFPvY_ z2glD_f1|apvHC*apHv@eE}Zvy)YojNpt;iy>Nax9~{4A{bl=DU%dTW z)f@b-$6kl}$`hxk9vy5w7h8YD)(hvC^TF|Bf7w3Pm-6(AMG z;rwzwIDYIe+sFF+!58n-LH&b|(ftuN+W#p1o!x#q9nak#*1s0$r(ZA>7`OjP`x%~E zp`QD5-8BH^)95YFLwo-+*-7>My$9{gwCT zF5WwO^%20(|KYl>7F$)vx2$p2ui=+Q{m75jzGC$?Z!A>jVb&zAC*FLg?%%N8Y4@Of zS$3^@Zk~LFu7^DG`9Gq*DD`W7(taTO%l3JzuRJ|&s+Jr6U6nJ-&QSkUnmSIgS!?Uw zs4qzUT29&zEOXv9pd;9hpY@mRi&kGgY03;OH*Tj-f%&Oz4@P}i>PJ0U+OODe^%2OA zGX9D-9~{5`)8E=Y&-yI>I7k}xxaDSOy-vml_27MA-tMRCfbIO+d~p2OU$&3+6=^@} z_0k`_kNyFj^cR?CT%dB&*?QspHZ~s|KlYdHV}0(KaZ^!GnK?<@nf|o(4PDeP>l4WJ zhxydsv-#zGaQxU`wvYAssSD{kzul-Y z@qUo=9$0?8kjl?;>8`rp z^0K??xfx&S9E^OD)-bG>kW zIUnu#S%2BSwm#8^w7$bnI2-j8(dU3k^hF@oAJ+@#m-E5#V}INBu|Cn~w4R-R)^!=T zT;i`Csh8`S>ksEW%5i?%`LOY0f7!mq>I?7Rbpq;(qMs_-{@{A%`r~?O=hxE-9t--G>xa@p7V1KV%?W&7It#6H$~#6AWlv5$dX>{rwS z`wm!^tM$7TYWq1p96xXUW&2?Np`IxArPe3*G3tT+3Cv?^{qCFEevXg-ppHN1qp|U0 zf7|w1eY`);|E%@9p|$-iAME{d+5dlj`Fk8c_P1>x>pQx(pZzH|{*CpQ{pWaaex1z+ z$FJ?LwU70=o9g(Ni|hPye#(D)={k!xe_St|-*!H1{McW%kM(i=qz&r&jN%{Cd9n2d z|BTLutryNO$A{y`{<3|nFWdUhf2sS6`=hb?x&AmloZognZ2z_Wwf3>Tu#xRw?q`!1 z$ya&A{SW&X{cx{Uc};nd?7YcG+j(Z^Wiok_tiJp~l}AbJS=0mjQ^)1DDzAAS1Uqm1 zyY)Py{BzsedAxW%@7wlSeXx&FUpTVLYs%Y3?Bfo8Z;^VQQ9frokDZf!SQ2%*}k?u)8}08sxNx# zqqd&8{Rze(F84>U_009xt{0nM&IiYj{bl=DU!*>5`gPc_ z>hF!%pSqq*A1`7bBi)EShV>5n3FyV%0m42(KJ|Tn*Dm9?Ksl=~h<&X2W`AU{kCFCf zKheJMVvnIdZ}tc6k>;K`zCwN3>~FS@_D2%?81*$`&*FXBKgsMr*ms(5_G^BA#G+fG zoY_;fuV`Z3hM`!pU^{RiIs6Pyo@AN$Mpu|D1(d$X4tv5(Q8uub(JH0GaZ#6Cv9+WEEl z;P|n>&f3TN+{x9xFPB#TK`{SB^ws~7OZBgCy>NbEALIA3jUW5V_OZT9|Ag4b+P+Tp zuh5^t^~d$X`Q?0Y{Mg^NeO6yHrTSM|e+&ID=8xg}<9gxzay~eI>~FC4u|BMSjCYd# zfcA5La6NPValN$jYxBYJV}G&!|L^+f-)hX?via*E?@%s?JjMHvUs{j(ON04iA@5Kw-C6y~iT-NyAF_RI zeIid$59BK_%6Xt@{>fziUT^;7Kz}v;;pT5=f7w3P*T{K*`keVs=}*P^(Z%nXzZ&NY z%9VGjza8ff@;QF&FWblZ=aT`w`b3{Veb6sZU$FavGOFH3c=OkDy>Nax zAMN;Af7w3P*WMQ#Qtu<8-FJAqPvQFGdg1(XJ~)2tFWZ-_K5zFO+3r8`CiT9C>zV71 z{&3DO=c64z>o42K`uscfz9!iH4fjXu59j)`^}_jW=flR2{bl=DU$*8PCl4 zQO2h-{Le;J?3_*cf)GJcow!Ms0i(-uG7mbdtC#=Ek9 z{vLmy@xg3g+g`>WTf8v)$NsYajIZVRFg}>$$M|NBFUOnX&-vi|wDZOIcbiYnFXy|B zpJu$Z#eZ|XaQ(FF$>Q%3FQDs#@$VL|-mXuJ2WR{?*E82Q*L%DEExw=ogYnJWKip4@ z|K@(<{^Ne+{$xCJyMOI_5XK8LzL)X1{2qt#u8dD*JSpQp+jvcjuZ;G+3FD6$FU5#w=X6J`5e$umCMzPIB2G2?~#Jz8VmyCsXCZR1_}J(0zOGk%-jTQNSG@yz@l z%`sls;(Hm7%lKKwt24fw-&-+$yN$P&@4ve^*X?_^Xz|DR9vb8AEWVfVxcnZ9@#<}S zxqUCzevfAH){Kwl_j-&!X1p-JXGA=bt_O>MZ@(9_@6i|!&iHM{TQfeoFrL}IXY}^H zB;)h>y%^))8L!Uxa>j!*emgVX+P-IGJahR){4wK&8Q;tAR~bLccvr@!w(+DE|5@z& zXU11Do;lk0rTl)C@x6@4<@deL;$0b^+J66R@t=&>%=Ue08_#U<$BY+dd@tj15x=YZ z&Axwb<5L+=YTt)5-kR~zjAv&2G2?~X_+E?0W&CWk@3;AVIO9nfzs-2-#=h@mJTtzp zM*H~vGvj+3i^t{n;f!}p_I*3!!5P2Z##>u_G~$C%FXN9HFU`G)c3 z77xz&ZGQjU#z))t+l)U>7B9^B-e}*?CySqLq$oAKp|-=ACj zHsh@sAIb(r(dl^s9`1izkb&JL8K25{(l-9n;;k7!%6R5B{@CJc8Q;rzT*l8b-j(sG-r`By z_)&|ujus!y_*%vvGhUeSy^P0Y{4C>L8K0UgK9%v?jJIZdG~=0hf6RDc#`iKFm+`ZV zcP$qG3i}xAit*cww`ThSj! zJ+JtG7mPpV|8-b=FXM3;Kg)Pm#-|25KY5-q-a0To+Rk6zA2VJU_OZ^3#p5!5mhrBL zw?TficyPvVQ$JvQw6}O>>Jy9?W_&N>akItGGG3kW<*oj5eU0C45Kq5i}8W9mnY z?`1qLG^*q7O#$YGyE>&!5P2Jcx!L^a#Hi&iHNmtJ?Tzi)a32{BavEOn;d9#~44$ zcvr@!7K0v*e2<|10@r$zMx; zT=MUd-c6>)we(09}nEckppUwH={DFT-=Yjmk z#z)Qh=X&7!;CkWu;djjx#e!sJWH#>YUuUGhng&zAhN{J(zkGm}r5 z{Kw==AipsAfXOF8z6$c=wtTkak2bym@)?j%nf%A(D<;1%`GCpaOTLYkAJ_N>T0R5g zd-lf9Ouh%+ACs?`{KDkhAb)Q#zFlwp*5some>C}?$tOWR<(3b__%_HdOg`Y2zt{K@ z8soPnA2s=-TfS%VGaH{W`H#t0OnzbV0h525eA$8g*2Y&s{%G<&lb@M<%HH^oi}4kc zU%2J-HvVn$Wjo`ycE(2yek{lj@;#HEnS9FRQz2h5`N7HOP5y09zHH<3NX8dJ{^)3Y zCoMlS`ILXbe@wpOmS5QTbI8v_zFqR;lFv39KN9(3z45z}PuUs&G5K>E+r%e81@)alJ7YfD)?2Nyc{7d9_ipC#B{@IrA+4z~sr%ZmK z#`ucCpM-fM{}TCo$+sJgpNjlfEq}D}|B#>A&r$i5$&W<7V)6@<519PDV)9dE|#HVpUU`@$$w0~V)6@<519Phu@44k?Ha_KG{KqX{G5MX0&zt<) z&O>td~?b8lpEteCSNi6g~ukCOb%$@rAXe@wn&@(Ys>k^I}_%WnCtjgON2(Jh~*@iUW8xfuU( zFh0|4{NTm-yrIvcf8?Vizcu-&$sb*e@0t9}|-GLUCF0R{$uhLH^wgvdjjL#82>K$cFB)RK3nq7k}o!spV|17$zSV?ANT+8 z3s*j1*hgp=`F5l6#t$y-Rd*hEL-!u7{TRvrLf7>797jBFXnEbuu%O<}y#|!=i%mewJlZ`LOoBYS* zD<;1%=PMe2aI*O%zcu-&IsfE)CO>|@kN`IU^nmwdaFcjU7z#y?B>NPcGWDU<)$8DBB^ zgXIapCnHX(#!pd7gq#5YHRqXXg3K`{S0c*!YEcev`kKe7oeg zCLcBVqg%db(-+96OnriU#Vx{%!JQgD+CsXMEJuC&~9rerED1 zQy(Q?G4)mQd6R!Tk}q5A9mKWlukEx)VrDHrk|o4r7O;g%2B_OMYDP*^+;je6i$bZuyjr|CoHmw12$W zL*(xz-!A!aTRvOkpKbY`jh~r(%CL_yZ{#b6eT?yF?K!jW$hQmo80BdXl0Q0{y-53! ze9EwoQ6Kq=$uA6kWz<9awB_41eq8cVlRuh#&$NHRPmFqbf86pFlV8~QfXUxWzTKAJ z+W4qxpOfzy?L)iBr`+22}UpD!z$wy88X!1Q{eQP^p zy#tf+A2-HVOnz|kc?a@u8(%j0t+Vk_(?3PNXYwMR{e8uz!lMgr;e{ajTYy7z6qb7ee{mipej``vvlE zlP{b15ykkZ$sf)83-U9QPnrD3y#FA-F!VpHC-QH%eA&it&HEMdN0aZF{LJK2ZuyUm zubBMc+4#J9-$TCamfza=sL3DgjPIHJ%)DPJ#(x~s^4oQ#z45$%{tcKO-bMdMAYQSk z{y#t?jw0>PoKtTEx+ktv`^+D+pvtM{_)k{X_x)qX0mB(x{okbg-ts3QofrS+T%bE- z(_=aUe%4cW1Kqp(>Hi3%L4Vdb^zhBp!J5CyOShqXns29@fq8}-rs*%w+7IX#J$q%v zvD2^XNV=?Y$0NWzw*KFEGy4m-A??7$mqLviKpCUoU_Ic5p=`zhyZJWlGlo%ScJe$Fd+zuElSTHj$eXk0}4cn#hCl5Btadw*9y9;P?a^uXg-wJUPA`?_lE}Y(CogvH9ZsaXvY} zoNvxQ*8|rF*9+H=xAnyJm2ADW>(ADsxAn>O%Js|j+^%n1?_B@f58NNo_6zq9_Y?P5 zw*AKa$NkvuPus8DzhU5S55)YZ>6;D&mM6QX0h>q8t$wRt@04^s+`5YD3C*r^UX64! z^oM;=E-mxj*+94FiMnso_#O1Q|NR&J@H}sE-61IN*6C5@z`@h&I)C#Rwdcy?+x-ps zVTtGN(|VTsyV}og-Vth-H-G={ad_S=H0^M$XN!Jk1LKYRX?y$|H*0s=9CYJ7Y>m(&4nA`^_J)>n%w8HwPYva_P)@hH3fv7kUg> zwjHE$rED?l6{N$qf1214%zpKc6)zw1iQ*dR4Pe-Me|_I=Jga{Hzv@49|Ate3t#ZL# zHA}}We0jR|tDJt#d;0!<3w{CQ{c&FSXC421#6oX%@q3?))A30U->U5jw?&=DFmueF z`rZe7JpfGWZu}asY~kMq`rSWMxfyp~taMS&vxn&a0>%XvdLQZLjuX{ioKG5cE6S%? zON~-|{2iU2vcb+OC;gQR>o~>5{x=ctr5?L#eQvk+K1A9zb58>LbuWG#@B68r>v-pz z_qZSFxYgbl1Jh@Zo}l>b()E7m>L=^G&9}gFU6lL7icbO4C&%53a^aZyUjjBg-_ZF= zn=GL1DMt)BM9Y8sk&Z)Lc!$3s9Y!Bq>%Cy+)kv4m&Nv6}hrjoF2$($-w|>$~dvPp>`zY1iwTJrqwqb~Tg>v-a2Z?56*|#yjnHqWV9=p+oEb zTl=yrQ9hk{?=^~F-*y2oUcaNBm+|zzy-<&T=3L#!al7Xa)BL@^Iu_6Kts9O2#(U1x z^S61u{y%0|;JIT^uKaD=%YmWKhC_jI&UcU3^oAd;1Z;*5?cEXdeLfrtET8t(edoU0 z`A($snV0`T%l-EVop*P|fG3a+@7*_6akkF}0n2s&({sAn>a>9!N&7(?YJL8$I}+0I z=pA(&{GI#j`|0($Uc-BF?C4h&@0+UYJbiuVgIfNijm86;MLr*`cOWc@I_#nd${&5_gY2Q zN9z0K2t4=qPPhv1g)`271t{xW&-1+g{1thByuti4P`*gMz9;2@LEcxC`Z|)1Qa=#C zr)l|pVDtOc)$T+4x{`MNp8OmbrTxHka;p2~KPP0APtsqYm;NitcmVT%zP&_I#;Yso zBI5_dcmnhIm303)8E-8o;|~mvZldWj>8CMBm&Zrwz9^?$b28q;d;){aFVM++Yg*>N zBgri50T^X{0R0{NYyaHwJD!blQPvX>>kEkW228U4fO(G326d2@^$9GO52)kw;}H)b z9sW4{55O$zyNi5T@4zJMALuR~+FPG*@X<;gr006~cp&zVqU_H1|WfAo2?cc?OIk-+<|hGtL1v zBL6ygF7i-O;JKWy zigMlp!(u<*h;qC?&T>8@?asJH_hIZQyZ8XVF)>f?o6&pc8#Y(|cW| z&qxto;~XWpcj1z*qnTq&Rbk|x#3z~^d}(nDIoMKP2c#k_RHP!+p!&a?$%iJ z6-Cj{fKl`{#pz$^{4}D^b@5#EJ7C_t-`T*h;IzYmN%X;vU?ch=(22eXjG{j(KD^!E zx{?l}Ujm)zo4`i&PoVEycZfb;=8JwEJbz?|eSpwkfkE_H#Y=zC=Vg~UukOn8DEcoj zi#`l&9z9q0UwNu~8p;>ZpMj0&)4+V=^4lqjz733GA1hwIsLF}7Y@gG*cu(|oAoO=2 z^m$-D^^?^*f=Tp!AoPDti+v1qVjlzJQJWsq6-;6u17S}9qu9s5=Ig10fkEt#jv(w~ zMX`^8jo2%|yvo@t>vOSZx{`LQ-l6M0iG7T;pY4U)^tsr_C|EeGi!T; z*vB2IpZ)RHzwoEp-^l*kcr-RXu#Ztsw(-k0p25bK<6Uh0)6sQ4qRkKI%h~*KK7Fk7 z+t_?_{+o5`dI)pX^$~5oz&_S?*m}yd)%DeURo7dw^%uDwZGFan)%DuMx_-Hyv#oEx ze_iikiMsxa?Fa6U#`a6otL~r1_7nG4wEdQrulp}uS@&bH{poGLh8ybsEhpCf>}J&c z?VhRoy%}Hke=vDK`QXGpM!(}FRepGrCzLNm>|@MpHu>W>H+f|8iSjC%{K}7*JTv)5 zc}MwIOde*Fk6!F!{e6?4u#b@rGpc+ICU480RsK>Q2b0fH>|>N~;Ow^9A-Xo_jlgirB|^kLMH5tH#c+xJ^CJlAUin?~{kNpz$e0E3H^E&bTw)5QE`QCg`&wFp@ zKlK5(Wz`S-o>gDSyH)*x`b04OB8h#B_B3K2YdcIIp?(7U7|+ucroWgzL;a>PeJ7j# z)BLgOLyhT2u#dG~U-hSK`jj{QiuzWof0;g3KCb$iGkwjO{#Hz%i^o;{&V5()J=n+S z2lYXBq3MTTRejN!{z!eYn0}cKsQRWe{j)KB)Kfn-eKj0k^;d8DEcM%L`fk&=>c7SG zVQ>0z^G(&4{iv!x`%|kv?ZiGtd(*a6-wu0K{X2<$jPas=9>qRJ+RtD0_hkA!_4_RL zG3qH|9|M~q)gEZfKA^qe&3?$Usy$K6zJPs<_oCS!N$g{-&+HT0E3{vN*)!>wYTp#I zcWD2>KF0gm?4w}zQZ)OiwWrL!N{>~0%Zq)i?J#>R(LOVK&5M29m3muy&g{F!>^*1p zUo?9#n0**&FPi=6#Xi=0%)W$utm$#p{`6uWqaDfYQ`)P=?AK`atfPHv_AcyW)C2oi zal*B|@PF8i*vF_3`M}nmzOUNX&g|_#``hes{Jz#B_AxL>`|y5i&zpUZ{@}Tn{-GTD zt7!fKZ~lV(@9ICmcxgT6UuewVfbqriaB1~Nc=Jz0nGe*5`O$jESARy3`9pd7J21cc zzWGD){nbCx`b*4z;_j;clp^Z~<%{`S=znqMkICkrakHDh#{4(*=V1M!J>jL{e*pS# ztN*9<2bq7!$$r4^_|2;S2>V6fGyhU!{-*q!>VLw1L-}O>DeOn28`+=wT=py8$NmKt zv5)on4b}gZ=?^pi7~}!Ul z{Co8GCG-Dzk#}g1Gyh=gFEsyQF@K^H`H6OU^EX1i;=OGCNdH;&PkQrLLO!EBrScu z+jv{`=i>ZAJ*~gj{J%-gJAKdm!}J$t^B*_wSATM2{^e-?X8NCT-lETSe62D-1Re><4J8|ORT!+8(H`424HRew431H6~apI*$r9z>tO^KAZiw_WweH@~g^ z`5^W&>IrXG|9$JvH~&8DW4!P7t^WVmv)%_Zc0a)T0&n*R&C>NgA&LHk_sX61zJd1- zS?psxPj)|%?7jl}8QM{{srMPV)ccLd`wr3X@Lsh0kYe{E>5F<_672p2`XcH{OV;}p z|BT(YOsn@V&^J*}Uc27U#BJ(*O%eSR?|Hk=;r&iy_dWRvyZ_m_-UmVd)%MtZkrVwG z=|=Qr)C2t)2z^@9a~`>;rgwiwpXX1{xJc6r9dQY;S@eocfpJB5bqDDK_Zy`+CFnX0 ziwqi!beT3rx;d#W9tHeFWR7oYBZD$2RJ-@FMJ%4NFl-RWcWJa7vxG7iu42@@a3 z`(@XiUIL~w4t`$Kr(X3SFwXmpuCMaHe;v`0ba;JT?O&L43*C=-tBw1jT)tC%_4u!M8;|$WmRlEKTzV7D>z(VL`+@s|`-S_bvHisT)$TXjf5rA=yFYEeI@`ZtkNVw)AN`T;yXN>t z{bHf-68$m%;rGv}UnQphRyj9oVco~^)cSoxv)mEeb+K-48va*c{$}NafX%o?du#ff zkIv{Iy~5hM@8az5tGx1m{#Na%c-&Q^kzc-^dlbsMkKe^r^qsYe6kv7T!VkS$^m65A^--4wwQgJ+{_y37u0X;r(*&o6iE{ z8Nb&4x#Jep`rX!>-H7sT(OaGX`aR1eP2Y0VGr%~i_v8+qpSHj(U|g~Cn5TZ*=(*`{ z>{sR8b@OYx)7LLwi}%to$10W&Epxc%zD30smU~9?Uwr-L zuDst&e*RTpnE%ixHT^Fe{}=WciE`n9pQ;{m@{m`Mjy?8#71%8M${i@5`(ODXFf^Zh z42;uGEI@bo+|O$MTeqL3&o_B*ct;+mVe?%9%t!Vep?KpB_aHyd{cM$M!`9LI;zxJa zdR`r*<5YgThR$yoG5krq7f(4?$G5qA`c+7$wFVACJ?VuBqqV+QM?RwIwI*u6((l(- zd!StR->l`A-SB0gA2*Ns1d`Q^;rbbmG9 z&oflh_sy&0k`6fQ5~RaAo1cmI;;9FX1g1a#cn#3+^xB2MFm3FSinGPVJCbgmK5cKL z^EyxO1x&MlFaXcPz5}iVmbKs3{g-FEO82ju_0|wP_shRr_xnS`&cS=l*kv^xNA7w6 z((%}ZkJk5hxI))+{_NE;H9hqXpg-*gZNLB1M>?O4`=^e->oMbgyr0(n&5gh^^^g65 z;edSu-V0wmZ~-u{Is9^9bL*-XAwN%eG6UV)lU@bJAp>3qHlydA04zH^*9VxdTIB8y z@=yMFG_d*LbnQ>v?`NH_a{LkFP|o*U|9LIH@g7=_U$y6RNXNSlc?$29R|mhP_~ky+ z6kq5+5tx@)SnJJ?pWL@A>9E2EHv!`wb7{Zw_ZK9jo97p(`{VG%bf39N^L~N%!{y_r z1Dmy`>-uQQ4&zZipY-L!`rdhOX?^bUz9*qvy0(6gP(Ise3es-grS<*X_iOdb@&Bv+ z;a{s9LO$w2d0>$Df$`G5I)9DSk9zQX!1BQkRZo!ipq!WX0nuJfOZye2KfwIM1*wDl zJ?FX~7^VM;kDc5XSY&*d)^z;%E$vT|{f6?`f50gF5$NXlV1Pbf^Xa`hk_=Cswl^^SYtZ6&-n=>X zNTj3ecSYI%KzG_fBY|P9&Ck@d$cwI|v&au13l&xb|f7|UIP8_x$FKFd8+S;d((9}IOd_B0UJ-c>g!~3JBF}+Nhx9NMsfnnP1XK8-lxt~>(^9zXc42bhhap_m?046#A zx`I*8L!g)Q5$NQ+1U7Phb_8*r0-c<%igMlpy_~;YLEaxnIiHdCa$WYR z0!7YupqKL=*jzvV6~H9=Ko=|TXXpEXUhHG!H)0?0Nm2^Q`^WANJ2#e~a~>)XK(8}%(C_VW?lcyZ*4yuUH6Bx{ZefIc-v3hUuA{5-`syM*Zr3luKTfY ze~Nvq?VF?SU+(A3{oTLrcYj#j|55B??U%`iWb(qB{E+_#RCy99UrgRK4^;W%#6HIR zu#bV1SB=T9M(ktFr+hPcNBI{`9u||2u#dGpCO?bGQ$MQ8S7-8;@;942&L*Gz{U)zX zen*q%PV8gU8%*9ilmD=fJMx_8LlpZM>9}e=KX{(-eBpT$?fhxaBRijXUKO#AQIEIt zjOQEAyKLuQNp>EJeXQl}ymZIf`8l(mr@_wG_Pn+8m-olzc{`t9sONQ=RL^go=h4ph z@_aq-dHyHU2ZHGbeyOT21k)e9=@YPzwSA^<_;;)R;Y}YY)K88weI-@>rI$U9 zT~YNr|7q3tqUnEL>|^vR9cB8V*vF`c`Xlv8_wTA-xDPI!s&6~7kMTR+^l|Fv z#q{+o_OZ63srq~)_HhU8GJQXp{_n*;M!9VE0qkR~cgJc!q%*2Lk+-V$g%kT2^}s#` zX0eZf-t3dc?3HNtOVgv;Gw$YU-$b)_X#c=I*7s>2?Pm6p*-ydjDcHwd{I1x?`n$Bh z%pP-QpGC2cksr){%Vy6Nv+ro{McRK)RC^HivDPd0F|hfU*^g#VMzb$zZ^AzAO1a#l z+M|*7soATvU(@hv&%!=NdD^?Qf0Nk9T94fyBaPoL^Hlqp_B8ElFZCfG^=o}*kK^|> zE$u;mGW(tOJlc=v@wsa6d+8s_XY&Un^A98$4?M^CpuBsr`V%mINMk&K&7IZ%5dTp9 z5g32G2m4sj{1-*+W2C+LH!z=gpZ<^L!s-u+<{zQIq`YPR6Z5B}G1b3<^@R7G`Cr_D z)gOcPr@w3dn$~|K_A#EvPWA7&LFVs~^^JOx`Gc_jk#5Xig#Cf{y!n%`e~@;vpA z{wM4=yhr~O_M@g}tNttOSCq&8Rs8Ru#gXsCK1Mp2KaBn{$OAn0Yghl77kPp5$^2{d zw?V$(d9z*h$I(9r`xx&-J^|f+)t~3hzej%`-!5-e<{uflndrh z#rdJ{nZGs5`GT~Q^9JwHKikN8gtU93`fr1rS13pSF8#fJfcbxyto~q}e|Qh}F|hR? zn?Ko^f0_PfZ~o^p$^6muPn*9wnE$#le|9$ic6zV+yR)3{XcztA!TjUU2ecjLKTqaQ zr+>Yezdets{&)K0z4_-?`QM(H|tdYfBLrG2k?G?_XWZ358_4j zJ|Uk`?-$&ecHdz45AmLQAJN$T1oScVtFilwAo?28ZkXL~yjJf!vfY1VyAOf>r{(Ov z1o|P;QS?Q9{%!T|`(x{Ui|=LkFT9Vj`x)q?9eK}-ep=sK`5?v7cE7V^z3(Y@{}b#! zDBJyzTc+L@m45a9D2l#Z->>&e?S0eG`wztPxZm-b->l&;K)UHO$Hl<1-%Bcw%7PmV z?VvsXoo6I4j=1k?U~|M{Lx66?uLme@xyCN~JkO)!kRP~mRiyp#>!`d7_uMlU={R2o&+6d$ zBa7<$ahLC3(flc)0K0`@~gmn_1kZ0dXLRs2bOnxRXM)@us1uB4n5X;7nn|+SM6zc`-_ihx>LtLjJ*Cj zyqCtmbtAA`x3Z3_-{7=qDCZ~r;Zq>vkC|pZ%k%xVKC8d|J^TH(J=Q+9*IE19{;+@6 z-(dZ3$HT^lzf&Nt`3T@SWCxL&w^BG;3xFRnMP zzi8``>yzu1>zC`9>znJH>%X!6;B0?zzi|I>KXHF?zj6O@KXQL^zk1uh`S{=KK2AU0 zbS&0`pYOe+f#LE#Ct@AA4gYeL;x=oj-qNg@RZnb=+Df0N-shi*{Bp?-yD5HuukT7a z-nUCYI;R&d1U4gg7^CUa$KDSt`zJa%K5t{Jq2`g|4sLcAF#d3Cm;hzEFuLkB< zf7JZ6-y!#R@&4$AbUd0cca<;4Ty!&@`yO?E)44q!#d~43*(UK)GgdQ_F8pI@ot8hQ=hyCdb|q!V?1n+2A68s+oF?@k5! z=RPVxH+c!Q2i)27zpVMkO;Y(CZ@cJ$4)R0geTgG3yaws~Sfle8Q{OQtAMdX7Tc!^i zi*z&h(>J1A{Bex-!|!)w)z_YP&*OQx@Qa5PU;O4eVD2;WSYR{TPP=yn)01=e*ZfTm z(EM~!onJTUK-E*issDK#`SIeV>-ll_*5@IeK3I4N-uLHsE(E$0=GJk@w;pm3^8L0G z2Lj`UOKbVO;M(US9o8GK^Bel#GaT=gxBqej&|muMHNe#W46Qe8(LDiaH`kobo1W8J?`oY$v|{KHr22P~%? zx<9b__53HIe7PoHrg-+8qZJQa`8HtQa){RNK013;7v+!MMb}reK>z!Yjt?HM>neV_ zh3?Pr(#l$YdU^NL@V%67}^x@fMyVtQ-JY~m33W|w@$cE^XG50-)@elMsy`VygpRxFFRhX`DueEr=nag3w#U=)7Sh0 z7?;07_j8=FxYqB6PpspA?shk!p7iXLYk~2Rg>-#32ToA=(af`-wnP5+=6%%TUw&WD zb>1IGN#i}_15sX4-q+_+AJ9wv9lZG_5smeAlk1e{QbZp0-%R|6u7fC*uzcG9N%E^P?#9 zrO##ly7D;5d@4@(Vd@i-_A=kdFEal?_n&=re4MNgq@%1C#Wi=*b?sjH-N9XXo?qGh zG@z69hWsq+Ps_=A1Ugxtn(nuYwj)hHdz7ZP8lv+XWqo(0SaZ&tqk&leKrj0N7-fHS z@VxITT7MaTULT~L>?h>AdFuQY*>752_Mf8cM_`cssd)IsUZ0Qna@&qP_j6C`1x&KP zfm!xD(8>M>Mv(`=BJu&)xbZ7=1%t>BAmjWjA;(Sw-^A4Eh{Obxv zIS+wZ&PO25OQ4hUvjdd#6o~T`SmeCbw4A?PN%Q^~=QGkcuYp<4Z=jR&9GH&2_DG-~ zxt`8%lJg&F=mS9L2SB&{Hy;Co=nudm`h?;l=ik^t{uf(5rMT{)dVVL-N05eof^tFh z6<`$o1?X3s^$@U#e$x?jr;VEijH3SlgXlxRB>EAsh`t1LeNP#vc=&Dybn*O?OX@s| zzJ=$|zktxkw4CT?z$p3}uq=Lr?$5ORcpcA1^t&#e&;05lMbZCI4*H;?=!Zb}`aqrE zw9O8upqyKMx~`Z1kGA&?!>cIT##ee3kScXUkzNHslsxMz9TDj*39Izbi}bSeUPD5F z02``+2%!f8&yo;82qko+Eri}wR8Ud*=ANDVe0=#`T>jqo#lP-rcK7T#XV32J+_TR- zgt&U|h<7fs0UvIg1@Oe=gltz%MGvVe2LdCuH#jN|MhXb@M9qOp|%sg2z0_9 zfm!%5ux|N-r-A9Ujda~);h%lT!jFOA$H02sqvKQ-{t67jk9%k*{1_N6TS~{Z2tU?( z;m1IpAA6Id)t6DNy}!F%@2tOS{evIldfp%WSo^i-E3dTYEy9m`(yrS3@coLtZ?N|V zKgRWCsWv|0rZ!&S$GAR_ zlv&#R$TnX%e>k7qzuNo?Hs6ZPzxuC_bw0Xn#ki^%vWEOya-7IDsDnovq(s>)HLYt?&BHw%*gQ zw*KoHO&>VZ58%gof72gH_%Y;Y`Xx}`nEt7zkHC-7j{2&Y{_?_)wS0r7->UHA9{Mx= zM}1gLKYHQEXixnagdaneedSzS2Yw8!Gc^6{O&=%I&(-v`GyUy%Yx+Fx+Vp!deNX+* z{eb&J5q^yR<2T#>;g@dviMRcQ`wjOW@ME-h+@FLW>-7t^{ma{a27cU^W<~fhko#Y@ z{V*@r_Q(43wqN?a+Wr}BKTWp3a=&#Swf(m{KeqkZZ*Tjx?cdzbxxaJ2=l-9~9`Lg^ z`#^s8r1pY0`vLqI@6*{AW^W|3KML)UO`Cm^9&PqYF#Cn}Of>t3_D(*&*+2Q%W)C^D zkAm4t;KvxJOnZvCC>&H#K`Rz25B4YW8S0`xN{b*MT1^nmrr8)$H3c@vg!1@rgL4@r!DFqvIdON5GHKuQ$FDjK9=J8=vvQkM+LBcfgM!gCA?T z@uOsXsSNw0@~3EgDjC0W#<#$aaa}MzmW`iz<7-*?abN1ekAdLFK=M8EKWBU}8$V3K zkI`=uevJ0u$3XH+@=fq#Ef;=gIG@@%^|-9}&$zf$xESg847-y&&WJ;kvR>^LK=oXMP_3kMeHwhh+1Qc;Uxr zPyb0ae@a@Z`B&nk=5HyJoBySnKPH%ehW;ANPxOQNs{Org{+)H3zX$VMpNIY+^ABM? zpd9N1*I~T?1|7z!NrT^9Zu_c&)_WkCs&9Xn?`eOdv&cAE^ z-fI3|?00AvW&gu-U_Vqe|1tf^*gw%On7^6+=RCOiqpSI+gZZno`LF5E20uo>eu3ui z4*NI%w;S60;m-Wy-u&hCpMxLk^O%1<9MJsjMeGYaPkrC~@#deWzn=d4&Yy4oeb_VT zugux}|K*5w9?+c+*m;3JrJWyy&D(iG-npGGggx7NLwu{9Ke!c}zZ&)zo(J|Akmncm zH+G)k+xbRXtetlxJO6;ahv&(5K9cOb#M}8vu=5o6Gdo`qeyr{7`~~(VT1s0`n|)sUIs}dEeoX{TJg8?+K=#_Ra(5du}`*7`NO@?e)6ESY5|)^mlhc zd06$hO%=BsmbCq{b6u|Ow;ytg%ImfB<9N)s?uT5DzeIU!`2Bmoft;qXIPq1N+F#}S zcll9Y%FAiv)nDeG{DJb@`u>=2-@7rTbR7W0pH1 z=hr`O_C@a(%8Sc+19nGw-JwWwVYm7vF+U>;e_kiO+(S0rb{d%nrFWjsC^Y~fY&+~5YK8p7F;)xfaeSLqyCxPy_ zo9I53e}493kn?iC8>jV0-md$j|JV83XuCUpe+@9a`_{w2xXR(LY5Aphy$|I1u``+V zZ13!P_E)Wc-jDa^^YHojynKGX58sdP+r7VyM>jq;UL3z*e*bPh*!=J|UpRj_pE$pg&9}<=XY-Nslk=7HH*-F7e%pKxHvhRExIVh| zV(W+NiR+8&jq8u=k?WJ|HQM@(6W`msFW=kG_0vH>H)zf8==a}fuHzKb$`=C5qwWe| z9W+MK_m|ZDzb-dL^;a6Vz`>~ZYn;8Mw!3Cj+egRT-q!shKiBz|PMb#cRlRav-EZPi ztKN?G=?`A*_w{I> zKKaW8VEmxP(WNctoUG+%_kOF`JK%0$jwk5)s_Wc#f|k$F8xC~yTz)IC-2Y>3?>3$B zdC1|+^Ip;N%m45q(C@!mTaWY1toveF@6T;~hJD)B*+b_K>!IDC4VB0Fvu^p7UjKQ$ z92jPrayRPJX|uclEKBb7rpgnxd>fe8ezhtt`Nd>l9JPSzxp>J!qqN;KyJTSfblE=8 z4LwTNeYt7+DUkgtAH1vWxF1it_eIF%cQ5O>m)T~$wvX%Pe)C3PeC3hLfPTd*^}G6M zhu(npVd7fX0_#kR-=^)Jug?PA5@`z1uQuf^VA$=Irq}M;O7)t*WL4E$b=(rI-+iYU zrtN=m`Ypin$V(3c(|{9oeA7yYzXsVI9CV!X1`p_W3qSkb9Vm}sq|UQ4qFmO)b!#u9 z^C~^{;z`=i$3#?e02m>4&>2uXkf_c^ucrE7nlGm!F)h-@!jT zoz6e^r+=M{diP||d6)*BcMjzI>a|+$|5A1S=5&PW)wD$WF6lSl9)osu$5(Z|f?$3uRzVOK2z%>c%+h2<90`5U+1WCPTP`TlttFu(Vf&YSwwk{Pn= zO{;P`X-D06@^*)6zkcX8JD@(k_w<@QTsLXO-GOnDZ=3`S!}b}e@++fsUgiVW(RCPJ zdt$WKZ*coCVE)U@M`^o*AK5~2lOZegrQG}34ukCebpN?pK57x2ck;hP+FxC1_z0By zhi*O>?cEhyUk6N6miV>GckFg0FwVEp`9MFz`iB9_hw}{q=953y^7_ONbY9fIKR*%W zVbS;W`}@u9f7Yje?@4Wc&sr0J?(N&O|8&tcC#yVZqYZmf?E6b!qVgG=wRL>qyxP7@ zxwVLF8?OPmTy{k3Z@Wzk z^xtLGrc=V#XS-<=_;Xn{|Gd6^D=Li=1Y7`AT4s z`403l|A9f)1JJEMWQCqUI^>Zpv|QGYmdkolS=JX2>kSxX{VB?N)N)y$K=;^+yQ}>E z(`#zItmmE-7g^sxtao4?IPJy2Ec!s}-+fEvG|xXzgY1TEvVoQ_yV(AH5zD9V3Ocz#{q`_3^@T88ECn@(f^+{Q&jY zAAr~|RF?e%i2X!m*_A8ZT{{mt^1D3_p6kwA54(Me6 z)A|S3y0#Bl_D5il{Zdi(Ppy~z6j)}R^;%$l>E*GgkFx&)d43#Ze};_x8i@T{@v~+7 z+D`U&AohEeW&a0eu?G~zKG1Tp7l5!IfKluTEuVRceuw(kdU;RE)0wyY3J7}y=)^uj zyCU`q(2M;7%-@>%8q_DTZ-7PY9bgvw2N=X2(sHqnfOYQ6bsa~spCG$|x1FHnVqXEX z*jvCP_LrjAW56Kx8L*1Irsap9r)s?fssP=z z6AteS_|1CyTt)aXWbk7k?0sMmeyrsyAGfKNZ!s(Zi|_?47ybYQpHNx&F%WzM=!72w z!H>0G_z5tN-{nU=LGTyI;4{D?{8(k-JHU9`5WPM;`~9YWgdg{$yb51Z6n+c@p8|qk z0o||xyDM&4^*Zn|$lzzdB76-9{s#2I=TsJctg`SuVA*~R)e~X=6TjaV8T=6C;EO=; zN1zvetg`T9V7Ywa1${yH!Gcc$z3^io_$jamKL&!o0^>BpwgCp=x4z`}x`*`v*VP>+Sx*p2rJ+M|m9Ho;TU^gCF;Be|x`d@5}f1 zL)&=N9c_HpZsP@hjO)7bwDAQ$Mmfhnt=hgvu*a_ zk*#-I|AG3z^aJ$;^#}Mdo-+zR20GI>?%Jk*ituBUXVXvM$B+y4m+3Pv{8-y<*z{d6 z{YQOROg|RWmvQr^KjUdlpL)};#q_N={R@8FllxIWQ(srp-`@0jF#S$_4}Og6yzK|k z_J?ZwMYa7S%-8mlvaanf+;43E3AP_q;m7DN|Euj+ZbsX`Y(LA~-)z4FKi2WE{V;KV zoR-WzoYHv&i41n{oeNfV)j6ux!DKV>;>8n z&g=>BW4vEyZeOEqd_Fkd=H@Mk@$?QYg zi^c56V)kS)`!eic_U4|={`4<2do;b+>{D;{YB2k?2tUUC%TJnp>*qFm*X-X&d)Vyb z&R#bAxtKle-);7F5PppQg4y5T$B=2C7uxG)zen2hpEmnG{<7Kob;D-=cYMJ3K_*`? z{@_<>e4O{l;H{@MDx`@*Cqj;K$nD_z?L~5q_-w z8hdFds`0fvYvXT0_;DZ4W&F+wKZXo`3?v_{2RD8gjV}h{ zkM+pLC!O(2@MFFH#l}C2@zFfI@zXr0@l`idA87tTC;AZW zv-uC9FCo*vNPi>rE6St!Bi%a9Kbf~Pf8|BZe_5An{>(`KrujRa=y$zd^gS?a*!-h) zv*s^N=0C-L(U*GqSLtu1|23FDHk*GI`w`mHe~bMJa_8SQf3G+HFZMgsN7?`MKI5Ce zIGF#~nLjz1e;NC!wwL`CSf6kHXzaf|+{gUYb>-&2?)=&2-}b_f(XN{R+nYb!n|~bq z81{q^R*cNdsHfBoj)Ph!t#JM;f%I}fO5 zwDSR;7j)+bcAk*!e4*HRL$vdUXy*~IztC^7^NRX@JHPOMZs!^4h<3gaPH5*H@n`M) zBk?@M&PR%!m*m6R`H5eyou`C9w(}KtWIJzhSGDsOp2xuc#JE*EuL)wGLe8tV^PFJk zJJHU2c>WWGA8R{1AM(ey^P;+8J3or|wDTlCZ#!Q~Vo&2e-MfRg2G(UhQ$JN*sO3L( zd%SnFmQR1?L5jbeWt$$_|6;(=zfZ{+gb6MD>MV<*E}< z?@xSM?Wg*$vvs`uOIO{1@^IKvIu2!#S5N4RoX>jpdc_6rz8P5GJ6ZK;dTI&vD}?>0 zJxR;AnEL{tk4KFGx?|pZ5Ez!e<2l9E=X)I(UpZrn*1!A>mGc|NXnj8Rd~F{GEvnb2 zwbC%X?uWPOeaZ%7TEAE9ShW1K3A%r~Yfiima{1A5YxIDJjlK_IUXDzju*$z*?4k%gN--GpWlPur`q@8_v82E_s#ab`TaQ` zI6pXFy7^=CiSvu|jq|VCeC+0@%~#IfZa&-mjyB&p|G6HxKAf!=t{<+aZhhH$tG52S z^=Rvp>$O|IAMLvUzIPpW&X&OP%N-8}x|uFJ9hiT%Z{r&u{X9eVAMK!6r$1ZE^Ior? zf%3F%iw{Zn{BkFiA7A}2#XXu`9H#lFuH$&lk8VMEIpVWB6<<3~v2MSF&eJgOr=zv} zqO(o|=I!R%Me(;!EZWC?X8x~rfn}C!jsu2|hF_uOTkLz6%8MmkPkz-OJ+AFeSWhwk zVxpFdUuG=I-5Nu#(d+gcbQZ8KKD4dtp%Zi;EDyi^5X$||uRNpe2A=qm%ImDHSa1K^ z(<-kua3ZkWzwk}Ke9!Ht1MB^pF9D3Ne!PAU^*4NY0WdvvsJ^e{hk62X{QF_A0R7m% zO#z1Mzx^h#j2QB&)=z2w3sT;hPS^HnJ^ijUScBV$03~l+11*9j#+iz@Hefk^SA7E#Q7-C z5A3D&ezE6u{)Xx2(Cgx6vn!U7quab*bFHhi|9{+}`#`@^H`KZ)p8{ z7i`=Y*{wcde_*};xKk9@-E#!6_{o<6{c+n~uJYz@T&VILhpRuTuKt%pwfxNcb^@k} zCvV#m%=7N1>o+Zub^X?hesVF&>tFA@5||!t=aqiuqgD1t|N0`WpZ6+VZ}HBp4}u&< zy}Y%yA2@V>VBY7R6M=E$Q#u~*j9D(!@(Wke_pHBrK;NS*y1PDqSZPZ8-b4Rucwesb z*AG8gapA)c0H$I4o&^cJn({*mK*DBp1oU61wS zx0)R>=Ta)?H6Q&I>hrLTh5^HvJ4b2z@64~?yS{z+laT!xgLGZxo34FV zsdCzXL#>ZL+Ckg9^3+|ZPdEHT`>V(POy_GkWZK*G`d3dJuk{a~@(?hN+v0v8&yW4R zTioAA|56VO(jMpzo%S}MPe0T75~Y90wf|HbSGhlAJP#1hr|so=RhH*hS>6X&(ZTg|Fv2! z-wT-K`vLv(b6%mcd|$1X?+uLd{ehSdS}yYg7-YU^z04mi_t)z6PUe@&3mtxd)?YXL zWR>^)ui?PF!jwyZVd33J0R0mW?638sRyw;cay@gF3xV;Xr*uBWeecov<$6Q+N4b~v zp(yJGSY-WZds$DwAnOZ=^#;WHQcY0DC-x#6FVn5P)*`I(;_A5o%zqDNTGa&XiAoe?8oqMMzwEU=LbzX+`x4S^g z!<7N(&s}X`AJ+}LaRkr}{zAvW%YF+v%Ki)F`Eix~S=-&a#OpvW`!^8#IS~6h5c|E# zvj3|r_JGP_AE+$$0x*dEpygsu0Mj-jp8$HXH-Jv;50#()czs~JeDfuGf_dESx=)m{ z@J%YOIdGz)*f+rP!?j-mMzMc@PV6DA7yC%d#a;r!egY=3r+~1pR2F*+2>VM>>@lqu z`%LS_UISLK-+)EzIbfdkn&W`(oBy@0;)_o#3iR8~wM$>HTymDKH$UH}qamjqmKXz! z^*nvwEcPYjEcT|Bi~Xsx*rUKC_Nk)StBPX30*lzQielda>xVmJU=aHk=)@idCb5r! z@sJ(c`n~s@Ew%nn`!1m6?+o6$hxWpcfn~v#Upepk-qDcLjAtGMtozQgO&@X`a5OLo zKL*C*h8+p4V($ape519U8$9^{$VK=uFbQ7(f*&giKUNfe3@pMofZ!iM@DX4Veyk|` z7zq9Xbi!wVS@^Nm|L&?Afbr0!u26Zvt0(kCPG>)RJj$?i|}J$6n?Dr zTh6WPBnUs&cEXQ=;Ae`$*MQ(}K=3&r_#H3_Kh}ET$BM#_f%ONswfQFeSmn_>7N8S8 z2`pEgsOvkQb^IEAv_E{bj!O}K3^@uv2D>%6JLUsc}n^UJlp@M9qOE-(xK1%e*~ zyZB?uR-dhXvFoewZEbJ;7vaZ{o!#Ht^SDFW^ZC_|(dW&-vgbeD-beUxPcHKt+WQJW zhMa9Yf{hROv6kETaXdM`;K!)v_=6uqt~<5wQ-mKw_V)d%ea~Ru*YkUy)4qRLvdxEb zNShzt=1a2qQ%ZGLmUbN+i<4^jL_+WwTbUc#|$ z{lwvIJ>{L-`tnD&^%kaU>(38u>(MRJ)@NO|t=B02EghG4+j_3|wDlc@ALBlGMqB@z zH+_&zKZK8(zHl=%{Q-WA_Ta~g)Hl>WrjNjnQ6EfSQGXTT$0#qtkAY73F)#{0)^gK_ z`6<(nvo(DgOn)ZoQ`4`}^le$C>ECSnnEE-IzV7t5>2vD$X!;)fxF^pMxF6X55QQIW z`5kTl$ij~y*YRzC3HP@BCcSC<&*ZirrR&@Nlqa|S%G>_sY(Fb^wf)W6eg}Sx_Yd3; zxj%BhwEdI&Dfd_T{n+cc&W&mNFVByw?a$TrYd56r-*I@`&(ke!f6qT?`+Zrj?f=2- zfo%3cqP<}DL!dn|x!D)>o6X+vv_H%qp?yMoh4zaxd#2L9F?%PQ{gccdDrO(m1)9C& z-ZuN`>1I#47n*&Q=4tj;GW*Lb`!%oH>{&OV*|*8;-7uipzuD~J zVD@n~d%2kXoXnn%W?v_>x1-tL;Kz6m+UK;_!H;!*nLY33YxaFIdq0`|UxXj)d@_De zgdb~v!jFOe;>IV^vyES*MH=5oD>nY&g&+5Df8!@k_%Y;Q{000Nax#7ceheA>Sj&wM zfgeN8#+S13Cr3VI{Hl;|2|q@AFZ>u7g&zY8`Pw#(zm**tpG(4z`>3Cz@jdWk$l%97 zZ~U+tUj#qaa`H*?OXHi#_$T>jGJfiXA8WtHUxV>kZ~Qiq@BX&&-z@wX?RkD&jW0W? z?@2k@193eN{i$sHy9hsqOny$jj_1>MgCNJQJ-o4i6s2EFV9oVf050f;bQY|Wb=1;^M6G1hjjiC^Otn~6Z5CI zSIoa+{uazH+$WhorkH;w?$!J?b>-&2@dI`rfp+z+=HH1jpYgsD_i`aHUugay^A82{ z7h!#9JM$-bSwE1S`J0^ipXiTre{23J`m3-$(LS0#E0}+)$a?NeIsISG{9)PrW6%ev zhkgJ;Uub*b$6Ef~=5O;KHUFExw)x}2Yv!L5ebht$qMv}!SHNKYJ!k$t=r^r5e_%5I zApM2(A3|TET{Qn9^eJTMS70%JWH$e#6Md}pqMsE-Ujy^==HDFK{GItl^MB6M{GrbL zqjf;@mwNM`7W1b@`d7{0>dpU3e=Pm8^w-jV3x3>}`+M{6Vn2gi&Hs!24sx8={KLmI ze{tNX`H#cTn?KnPZvN$PUh_BS9h?8TPTTy^p8jd`R~PeN)1OWMHvQf7e|P?H^N+(G zz`ubk;-#hyA#eTti!k*Fh75hff&I7WY z55OKmInNK0ohR^o0sI*C-T8x^M-)4sa3{9&ioo*=JI{z)w(|}Dv7L9!*3LitbnQGO zz1z-5%GT|?B#dn5CuyE`o)US!V&^UH$aek$d$T8xk7DlvtDO%8v4^#MmUey=>^!N6{fu%i_B1fezUW8ze?dRP-ZS)Z{iHV+1ctEK zvOstH`l|us4Y#lUWtsJC&+EJX)>=uQyFPZ=`8~P59=Y{3!1V0Ps{j3~J18%#6IZ(* z<&;^^_PpL(fBw++d~whAJn4?-HpFw4$=}@;m{xr~0`pmZ=P%1$y|w4{>@P2Ql=9B- zz2#Kz$H!Z~f%A>?kK@Dn$@$9hqs)4?=k@HbTOVKZdMn>ueH(oLy5zUjZjB2! zKYr~kGfc~`eC-loxOcejW7TBVckS(Z_E(l_{rL^IRy)A;+x33l<8DNM<=I1Yf6Rw$ zao3k+)_3jgdiEDqeEh^dp7((1GcfJ5ruxP5_J2}4vaY)5BU*mY$j7vu$*gC4Uf=b% z>3+=)KK|ny^?K_sT>gOC!SU{0UqE@>=IWOeO=f+v_TH|q)?dALs{>%0E$v+=a?i(!Sw(4Sv7yrgLT@%)(mv%YK3>#e^q z!v>@J=;we3n;mQ8$M;Y7_En#ydlptZf->uiwdeKhuh@8$uo%}jW1=^ zvpuix`m_1!?D^daf7JcZjUChW2ODq4@u6(>U3E-)Qw|o;H8^Jw3;d z-}_r5W4!rvq(%o6LG|?Rh=>i|^%~@ZIwq zJL>-F&ikI)t>x{Fbp599S^ndqx)lH`9F#bl13P^nasg zojfNnKX&K>Do-A}(ElS_J=^no_J{l7{fj)0j>iF?->vd)E&q>SU>o(@x`iH6dyI0i z`o#8jJ^ORtnRcH0q~EyuQ!4?(J4gN((5gj$VGr zfg+bfpI;4_7aaNnU_EQj-j`+8vwe5H^~d>#`OriE@_m8$-aw8Y=EIj|)_3jgdiIw_ zgaP;A`Wen&8yGHrbu(aDV&Y(6-tJesYW;bq@9|}s^=!}U*&o(NAJ@xz19JXhy+P)9 zb@SWCkM(TN>)D?d{h;qB`T>|_zXL|m55Ow=S?fhVd|75a+k3m7{RPoKeYrk}J_15t z0=?)fMbTft^2;sMf8$MNJ=^no_U9I9ehuhHTpvXLDvEvuV*dhSzxlEpte)+8J^O=x z@5yye_ERACSD=^u0vKe!P?Y`R%QEZPp4YR#XyX@Tf5UaK7l6t7%d)>g&a%HLn#_8( z=k@H5&yW3}FW2*ZvEM?*{tM*!F=f_w?RmY8XA=8Cujlu$@$_OppgfDc0Ic6_{ZnSW zXM4Mz{c*k&8(+6W8{cf>TWvhtwrzYVv!3mFJ^S-EUyImhJ$bGu_8E}h1NMQIi+u*9 z%zC!x_3Y2v{Pi|}tJtG`x!&3Qb@qPQ<}YQ|vpuh8e{S{Fbw762f0q3p{ra$s+JnKy zKimB%v!3mFJ^QP7Eb`Gn=_gOvYX-chKl;rDf%z|sE(#Z#OmaeyQ`eM4? z!UwkA-nIE{GV8ncc73+~s_2#CyPJM+rXRfNhh+MJGV7DI2fx;StiNpf#tFaG za?|%t_%Y;6eQo;zW!6`=x9iy-^(FOZm`eX<)4##=FJ`Y`-YBA4l6S zGWSp0k14aB?Rh=>t2Ta-?@N2Z_BZfjj0g8i%HYQ+@9Yb^p8X}-BQ}1??2lmkaj^ZE z=f{*;-?iuU9B;E9>i*4sNH(6H_K4XF!R&`%GV9r%*E{xS_Ku&*-U&9o(fWt~p@((O z`&m7|AFr?0ANMEje|7n0pMf7^{G5$J#JQw+8 zJ@BISA$RA;CU^DLp4WH%ng0XNi}gf)Nxn}0{@>;E>42?%d>@|2_+>c#cLzWY2i}-~ zZklyZ)p~#I8DExJ&-T2Y{r%th`t|xt^5viCc*Z|{d*gw4zTrRJ2^cOJxDPNMzWy(P z<+_Cq`?Ab>w&(R-fBzwWR5brtH2+yOe{eGYSulTDH2)c8*0VjYXMfOVm*lZSWUFU;UeErj=+{1;$Nax>wdPN+vfrS*%6h+^9yg^FNi(Sm**@8w(|?htY>>(&;Fv#-(>SQ za=y_YZSVKr^;f$!+WAmn|2Chi*uOg7SGV!!c}+68tLOaZ`eJ_;FZ^}KL;k1YH~(Yt zrWP;!b;rm4r{Z}TFU^@$+50@Yfvg&)=6YewgvZj4x)qakAf+Fdmul$&6QK{BpH; z=4|o8j2|u*Pt5pY#v9}J)zCwXM`nC7i0%d|2`^C_4}w`zmH=4G2@XLpUik= z#xFCTneoDmA1)S8%=lu&JE?yEZ~mU^tA2mx?DuEhet*XJW5y#hKAG{#j9+FvbFg^f zE`Hd4FW1Ev+wbKVf1Lk+{$7s1?_>Nh8IR20doo_Ri(j_idwTnQD&vQf#S=5WnDNGpKh749%=l!+D>HsM z+3!=kcwzfJZL;6frhoH$TE-_cUYYUBjAv%NFyn_2AB%O+#TQ$=aj^K~Xz|Ehe6qzW zGk%%z%#0Ui{P5JjS7yAi|2Mx^b{4!KLKg@Vy#upchH)i}X z03#WP#HFyn`-#S=5WnDNHd z;*S}R%=qML@fVC=hQ7i4pE_Oyzn8}MWxNREjTwK;cx1*WGhUhT%Zz6(7B9^BVa5|P zzL@dG(c)XWcw~!jVZ1Wqml@yk@8WUFRPi|Se`xx<0>oi^XR#{y0t*p9OmZ-x>d30%Sb%)bV85;>j3a%y?tQA7_h4&K95C#VcFBFkYDP!;B|pd@@6Od@yU!= zX8bbanHew4_+iEq|4-xR8PD9s3tRj!B+b;KAG{# zj9>2dAI1yY{?x@2TYPaBZ_M~(+wT~k%y{Mh8^3Jv%oZ=q_+iEqGrpMd#yme}JTl{x z{a5@Sh_n5k@xzQKE*4)*djauTcz?ztGd`K|%8XxTJTv2k89&T;V%jr|H|GDXRN6oP z=KoeYd&=U68BfgkV#L2;{xJTS@yLu%X1p@vm$St)GhUeS!(BYF#TR$+#uk6v#UtDQ z*)U$2@yoPN886KEVa5}8@x_ccw)kVlBQrjk@yd)}?&6s(UYPO2j3*9ek2Btw@yCou zrajMi=?gn@yz@`rY?Ti;)xkw%y{Fe z|Bs3B$&6QK{4(R2886KEVa5}8@x>NzTo`|Be2($Sj91PUzf3-ujUO_8nDNBqlZ-b8 zKi2t1{%L%a@yd)}W;}BjFKqF{j3;J%G2@MSe$04e)Z;#A4`lpu7thRiVT&JTJn>ik zpBUqj8J|r4&-i6;@yv`DX8bVYi5Xwacw@#NGak8%Pd0xAfGoG38!p`D{8BfgkV#XUY{HtX@yv`DX8bVYiRo`*yfOVz zj7MgCGUJu8UVHeC7SGIhVa5*!izluYU(9%8#vd~tneoYtS7!V&{c(&JroWEy#Nfxc ze;03T@yA^}viSoUuN*9Xxr=9Jys*U&L!avVSbQ<#jTwIoeXR8spUik=#xK*~$#`ML z4>O*a|K}LZf6Dk{#v?O6neocb;+GlE%y?nO4@brmTYNF&jTwK8{jQJoW&UBtD>HtX z{$$1rGk(~8#s6jU{Fw2`j8A5~GUJyS&&+sX#t(P##1>!7cw@#NGai}o$z8m%*dG{o z`s=%RVe{uRo|y5)j5mh8gZGaXkIeXF*heVu;+HL+xr-NO{IJCnGrqXkc?IK-8IR2P z~^OWX3Bqewp#ij2CA7Fyo0MJ&`#=6-`HY$0xXX8J`H!pRL!SEo_>mt9dc9gc zW#%i+mOmg(l|O*_j=TKFmJc~ueq`oLX8vU6Q}z$Fe8tuB7c-x6wEV`*cZ__1*cX@& z`K$6BB+GZee8tRPJavACWceAo{KuA`f%%b(aO>U@EKg@;BiJh%y2yNWmLIvxmu&fynNOMdilIMym|vD(kNJ(6@0j_Iz2!qj{wMwZQ{~s|@+n)s zV&*T-mJhJYZ*2JhyZpzJ?+N#3eq`oLX8vU6Qx38p>35j6f5v9tWe%x7FJziIt9`AwNGnfa5m z=}K4s=BX8vMt`HaI?%YlA&-}+-K4jC+%$Lmk$;_t=eysPk{Kd>?%>2g8cg+09%!kbU$jq0_ z{K>)gpUiy4%wH_|3%|U7Meb*o|F~K{WadX^zGUW4W!dUo!GT;yt6;BVE2?%U{fV#>{Wb ze871!+!J+vp~ojq&$jicGS&g@_2LuP(t z=1XS&Wad+5zGCJtMm|;@f3wG<+2>vUW8ufB2R~M1zGTav%zVnsR}6lP=Vd-)<~NR( z@0j_InGe}peq`oL20zyO_}LBvGG8(C7gy#pw*1D-cZ__uxG(bE0*mEG&c>gZKbiTI z$+wulnE8yE-8qT@xIJ=%=2T&7(eDmMtjK2pG>~Ze8pYMRw`HPv)xXW*B`Hq?YnE8;I9~s{R_hbHKd@sGfXKeY6neUkSk1?P1K9(Pu`I6}$Vm@W& zD`x)UE}t>|P3C{<@*kUj3i-zIJj|Ee~>-tre)KI3fpjhXK_nE$MpKaKg3 z>2G8HWad+5zTz%_vE?(SKacs2ng5vikQ4JGTfSuGPi8*lX#T`#{zc|9PL|)8`Hq?Y znE8;QpYfi|mrVaA^tqOs|C9NPna`O1Quo{7z zW9C10mJgZvk-g|`}wxjZ)wAhOGK#q4l@gUIeaHpcXXS*>f&-~n&9?BnE zTklg|8hIDwwDwQcKU@B}&1sO`e5>pI;?isD^X2OnZ}0c(*PFj~uA5&*e{q(brU2_R zQR9EoW}m8luXoSe?zha@$D_acjUVcGSX)V~!+U3Mwj<*_fSKeJxCR2#Q7j(Q&b zgh}6d56J(orW~xE?Rh=>i`GBy*WI7bW6xLZd9yu#_dfQ1-TT`6b38o9$HuE0KO0Yu zue0&?Hvaq`{673%{C?f{wC~IB?d|(_^TFl^=L_c#=M(1_=UcJ)$N9+l$@$9p%lXXt zoov2GoBv!7TpwI7-qsJ-Q?>QQ^~Uwb_2_MVa=mi>)=zHsM^_IU1visi8wLD(1(;<+;%aXGH#dx}5&-uYU8 z)*L$D@>7SldARW=N8ozD{2My|>JIaag}reS`m*f&x=z#V-*^nNU-GUBto{?VQ}eU~-hv$WpZ;lWKmVyZUg?8@ z`uug3UB>p&&$0b0fo|V3t^xWuqpt6K)wlHh^A0=TrtKzwuKOa-kL!oS-tS5KIA-Z7 z!2IAqt4rJLIzeq=Wk?KXLXa6dzmQ zHDGx7%?d1Mzx*7~&$-A1V48GDn?DHZmHMLFli%TT&I5^FduY` z_8ZQdd@uJm^-``@J{Q`R3j{@e0Ug;F;QA9lX?a=%=1Iz3Pp6aZ!Hl zXPx~Jl&5pP-Ntd_Cx<~UEBxg-{6A8DZ`C`{Px$Mj zey{NG9KXQz`H3x$0=n6~-lx8J;H{9;4NJ9o@bS*iX#KhiJPoW9|8Se)v=>e6i(IZv zPXhgn59vDcYqmVy!F@0S<@up!F41-;Z*V2h4SP_>FHMMf$ypJWpSW@qEB2&kM}*{6P2S{7(a`ydN;h z`}PEbyg$&%cqq#F0Hcf-Fv<7<%XAk_1o{X6pz|XCcmeG%%J}!uPQC{a-v=1vdjZRn zTOI{;PtNfRU|M0*Hc#by_fRh1U-9qT>-$!jACQC07hsvYt`zWqkv&-holpzm~hP zivz3ZhaT!hUjUux4`2~}0?eXcfMLc-8)>=dA7C6m<3WA26aAzp`U+Sd9ef`!h(1#k z{RT|GURUp*>fEhg(T6>>KVZf~fzX%0Ec#PX^eNC!IrnCjXPPj&FS7f`)ei!r=wo0J z{R|AEuYq3lH?R&FwV&c9Cv2_@;N`x7w9ex)5c{9XvL6D=vqM$?`wvIxIOM%A z*|RU@PWDq^k^L2j{T7J*7s&JDEc>&T%YNOHa_rwgSy1;W%Kon9vfrz`>Mpv!x(^4A zhg|15Ro8hHd!diZ#C`y}0iV1Bgna>oy#dT(e*nX%)pY*2-Hy=bu41qBP%rii5cUi( ziG2ffM~zXriv0sQEc^DIebkG61dL)Y0bxG@>sue60rX;DY59P$I^K2uJ#_sfvB!E+ z5Bm%VdkqNt4VcBA1J-xX(Rt-|-(pZ7veFZLkNiG2vnVlQgB*pFJi+#BtCueZq& zDnEUw&YvjuXHVM09tBphPl4{hg$Dz}Kd#&lm}h%sZ(tg}(;*w2vDXSal&U|DDR%^;T%vmUJN#QxTL;m5!z{21tj9|QBYj~~>7JY>4F zfn~XH8W8rrqVQuN_y8~qKTw?M_d|hofweC03p(M)K=2D7_=ckJV_@BXl?#CGvJuMw zeSfB1dxBZ`F%WzP2z~yI<7#QbsMM;}(Vg_E9eU z7|8Qu@MFlu>ceHNeG-0+euDKEtbg8*_pkOmMffq=msi^J277++WAqRH4+K93R=)o< zZ9LL<+W6G5ZM>q5AIFp98-*X^KH$f|aFczHrP}uaKi2#Ft9?IyPk!HQ-@DAxzJIa# z5VvphBiMZ5{Na2GFSq&S?{D)hJ=W%57Jm)y=WTv+zGj=h+2*rbsLk*Cua9-U7n}dV z)`PS4(XAKZ$9O*%+j?(~E&N#9as5*t zn0}~hG<}hVHT_Y)+4PC~XVWji^bPe7^%3<`6@HB8&KorS1%8bC71M8>zBBz-sSl|i zcWe66nf|P%Pm}3aZ~C^*VEUK(*z|J{eys0j`WyThGW9$4y*K^umu~w({ASx9ituAR zKlcyrC){6(?Kf#x+kbxE_M^O9+n=)SSKPmf@MFEd?Qfp@-GXiZ%fgRwKWFasqqaYDzfRo0Z9n&Wwf()=exGdr2S3LBI{Se3g4qw*> zvoA934YNOJkANTJJ%iaR&g_?bY_n(b@y))Wy%Wv;>Fgo1kJ6*fUMib5`zZ}>_Ea(Z z%A36fevJEhv&VwjXLb5!uSK)pirI6WeMfuG?7wREV0zu`!<(AD=*)ifW>112<9^`B zKxg)6n9%G|KW(#5(@f1?bpxCITFsuNee2BL4F@;-w@kciar76z)$HS7_A>Y}%E6EI zzGhzs;m18GPvgw~HhVk?Ki2kUuY(^$20sR-qs_kGx!L<=WV8Q0`GE0*WPE}Ap&Flv zQyRaJzys|s3~Bts86P1(amH7`kM;h>XUecYD!)nOJH~&&k9+cb;K#she8~$xhFl+Q zd@7J%8Q&uRA|DIJ&z$gM+&2q9272Rj+4x;Bz6XAc{>cYBet1gbi)nb{k45+~`T;)% zl5diKf*)(W@MA^c$31D6jlYu5I^(zGyT$l#eZ28uo*zR-Kd1+iPm^DxKa@x5U+bT2 zd^~Q__<1$HPX3PP?MZv`d%O?iD(?q$^1kQ??+?Uys7(KX`4b}j3o|r-LmhAa2jR!) z2mBaFe+B&)(fk=@a`SJ*NzLDpR%-qad~e*3{t++y7;-lMNqN`&DKF3bJp3zVqvmg+ z{{{06_oaWPn7;<|6Xky0=Fjo3H~$XiGy2E;24cPggZYQ5`HQNo4=tDV(nGzhA4T&w z(f{OcZT_ez{229aO!HU83C(|n^@@J7`M2D4&EExnto@lktj^i|WA(AS7Du@k{h-&G zKh2wejs7<9W7HS($CYn1{~Yv@UcYwp-xc%cfgfu*{e9;DtJgPwU=;m_{-6(m^dHin zNdICqe`7ZPqnoAqBfaQf^q|C=yQ~pVE#|@hk_sDzVw&Ue_AIs ze=7D5)Tg1%-x9%cX1cINM`=KrNXnEv5n{^BhAV;}c> zzxk8H{^nm^p!u7@kF{U=qs>3f^JDDKdcVQVpH2UEFn>4w-}HwU^N-i}o4>q>eSrIi z1DZdb{&nzU^h^J{H-Ee{|GXR8{PlU~=D!b{H-ElArTO>i@2CI29MR4L%AD!k+Csq~6lb zNBrn^UJ~s51oomnkDae%;m45O&)WG*u=5yi=QD}tH9Wr&dlmQfV!!JB=V<3W?&5a- zlh$nKLEg@Xz>m>?G#|knAcuZ<@CXi&)0U(+;}N4y?yBk zT0iH|U-v|=n;x(0xlUP3v8=Z8O(@TAt#cAEJb%>@XkTWZ=U`y{*Jpd6Jk9*cxhfwy z@(y4=<`-&5)t_zpIOKBRgZBd6T=R_S0gt|VJTTVt?^b!n7j%5mgw2nH>`(hd-?tpG z*)5R6!e`u}?ccrby1vNecMCobbTfbP2C(*)tXh84_a9aK(OSC!helLDMXWz5?zV^NO{W%{vKf3v1 z^QW6nHorLEIR7{wIX^jHIe)XwXU^}!`EK){>w)Wo>!n*iww}DLuWr5B`r~?xwm!LD ztF7Pi$x=h`{lk^R_5u3E_SgWJjy&~D%!mB;+-)7_f2chg7k*gx`MBEx2cg~{;15Il zaMhc-&f-@6ogl|07vEL!=bPzzE32HT^Ul4KZbo_i@L z;w$SqD<{2qOi$YRG4pPY_Bow%DrEQT3(fJS z^3v|F0qfkGv~{=9Tzda{+D`hu;kcvqe)*42G{5Slv+Hw)(R-=B%6qN%3i?e;-1jap z95v_LDqperV?cN9&J6aqG!Fk@MkaJ_(G2{&_FZJ+jp(p#RMqBZ29NlXZOK zg~RTLT)(-zeh)YKSbfj*=5N}4&Rs?OFEc)*-?NOq{HDINFMIBGA+F2k&N3Y6-ks$D zl=};2)bCi|T;+C@hcPp${goH}-V>1PrT-|vyzuJRqkUfKK^?a;dDPu1KQi&ER(|NQ zz;OFwI;wA45)~el`g0{mqN&bH_W+JqdCc@Px|l?H6@DmuW`odaf(Ce8_S8 zgGb={Jnd@-1IsECw$Xkj9JMVl4%$AVygv8cgCOTsXB!Q4D^9r$=-pp*{K9{|soy6Y z_1E@%n>=>|t}E|Pz8Y9J8Q#{{esk{)*}rzQK2JLQ9bNxr?`2urmzx9!W9z=QlXuIbX zC!g>lFx|NGcwkuO#}D^$-CA4eeZr4h{^fc=I|oWTe4*dZpYr0B+U~HA?gIL{iSD0% zr8o5Z#|y^3f&Sd2nLh-k-RINw?0-9_u7~pJX&Kk0t!B`1i;J(J{iXAEYv1dVjqXH! zeSghofNti6-T;Q(|EPL~=f`EYKfczpdLY^Zvs|y`7wmQzFiHO^Z#{#qxANE1GUOo7 z(+A4)0r9*l%ku-XybrLxwu#ymMc%h3GTt8;xBubzTQN9l_jXQj|9@@$GgDmk57ijsF%jkLz^1V@y@2~A-J^+i%51?P~ z;hlg{=1*S`^9hLg1$6t*y*Ci^Pf_M0Fv z{Q2)51Wd9X`hs!%QQKYhDEblT zS32}qEf@U>tfEg<7X7OA3$5Pt;bs5Oev9bizO;jW1{TrRDi51c*K-nmj_Zo(ca$g5 z_gXLdU(01b07ls#fY>h-pFDPw*3Ys0&3!4)vcCYa-vFKLKZ>#+0fX#MS}*$*Fw6c0 zEDJr^^p@;zJ(02B0kQun?l$8?TK@d?#{=EkuMF!8RM|gOo^syNKt+83Cb0(;e>L<0tslMlV|^)y{Q!hL0fc>_ zDE0=>UpD*j9@>dL0xaq1leGTyoz8{q#eM<8o>3J02I&4W=ow&HZrw}zg0P2xu#bSS zmw>RJR2F**D1P&+f#vC)+xN&(@8>UGd_qskgV<+4*lWNl_M4*Eb6S4G1==6%y*}FC zI`8HvPba^54CFFkWnEve7a=>bAGLg+k-GllCpLa>r<-vd>`!16dlcxzK2=%lRbUeP z6&McmILZL*#kV&t7#zPwJ!aV_^E?vprBBg&#vM!f$}!J3zPj zrjIKg^x(Z(KIc4R`e=95)#HKSPe3nx3RuT)ex%C6kAdJ{z$pA!@ty0g>xrC%9|OVP zfZ%f~-~9bY6*pOHH((Hc+!q8N1eO<%ToV|CFRCp37zjQItiq3h`I9Zi0>M9fg5aY- z@KYf8vC6`afkpT+Fbcob`jgA8TEEwxkM^b92_FXX{Mefugdc19iPoOiTYuI1cf23( zZ_ndTY0p=L&!ZpkW9^Uc!}qiI&7ZdS&o&;;#;0uF#;g9OjbAsOHooDdHs1N|HvZ1O z2fq*aG45N%zo9ro`<}_ZZxDWr@?hUz^0?}JNa8=~;kj+T_@mnV;e7JKk8z#M< ztv9Z}VC%7RecF00d$;wQZ9OMj-(^5s?}6*z^a1rl5`K*L47)b{5f5tmq?mpIKi20n z{gX@|Q9l*aSKjoO7k-TXyy-XUJMd$aJK@J#f4k{N)0frsXSmJusp(hhTk2m&eSAgJ z&z1Vx^f&l1`cKsFrthi$xgVru+WwG)ANQnvwf%$piNs^8evh`_IN`_Wm-|r`eyrE! zwqF(7zqp@qe+#zXRpG~I&;1bmSg*JJGTHvg{j^SM`)hin?YH%}ZU5!@akl-L`*r00 z{Z`x0-F|I<54PWjm)rgy%pRb95X@ePW`WDz5qYg_cr^Z{?_afvrn?wE5+=W zI(xHc$`j4Lp}j-|v+!fwCl59IQ}{9N zlW3oQ)a+INVzXb1*|W57z1h2Yg=YWy9h*JuZfo{2?PY(w+0Vj{^?qhwgCFC5Uih)1 zZ}xZ*evEe2>~-4j!R&b_{229RrDpGEv;V#EfnfZg2tP)@9e)sh47vQM@r!&+;~U{8 zjeme2Yrn=%g79O=|b+4y22e_XNgN$_JlKlvv3vC76r{b!Az z7U9R}5BwMyhBZD*eoMYvXIe<@g=~D7=f{w>U*pf@)3_exS^85n{*C)V4sw5Bdbsg* z@^|w2Z2Z2;`}ELG-Vg07{Q>45sKSp?9?gG%@q+AR{DA2@&EMdE-TV(2@1B&?Kj9v2 z{tA2_l*`}J?55Y7e*@oB%dc+!4}5Rb*B6_Ag#MCb{u6(C^QX|ig878&gZW>)`D2Rt zXR`TgFh9}G9ozglsn`5FS@<#ThxrYR<`2UB*Yd&5Uxf7m8S4dDr)mBrtfwAi^FLv| zL3Zyo|CE#Ui1t{Yz+nEYO#jvf&EJLfjqB(S%jO>oq7P6WR&M??=nM2i|5`MEn-hMl z<^MQ#er-?xobY3md(lrQkHU|E&|g6M`-1uZ+z!nj2>plaz>k6SA6C(qkc;^j>2FN* zKYrW%k;0GBKlCxM^Iw`jGoIi4n_l!e`s@6kqVLfj`XA-k4}jPo6wRNC{R48Dv-w-G zzw|`LegmvGHveoke=YsDiT+&k@5XO8e{VRr`G4sT?)<~DAL2gl+2%j?vR|VABKs%G z^DE8YT+ILM%^w})HJ6 z$^7qC>@_X&1> z!t<1{Q9EA=`?d2H_a{4l5qlHYb>}m7UQ>B~W9K=}&Ucd7vuI!UYUe+0k#-(bcs^w3 zMb*xa%D3BjlDn#%FS$Q0H3ZkkW7;`U3gh;N><@T%Ibhg#uEuNcyHxdmnf^K5Prcvi zsGiiP;q#t_cKNQ&&IG1Y{_;!3JYWMY|LFJ~fMtq1SLInYy#^STe*8LM{N%Y4dV=}b zjg{ZImv_+bAKo7S8(V!<9T0AQ<5I|B-V>KX`+V6v+Mj#i2wjJ9+RZ0w z`*8>A^TjEzK8|+bu(fsntotrdA;+aQn5Z~(LDkb?#QkG%UHsm~7X#fv>*#uOi*36- z+NC+R(0#ma@=@d0Z;hLRcKMy@9sq`2FaI6d`5#`=`d{v(Cn4t_Z_vi?nRXuNfAKwi z@38pt_oF^eI8^tyxaSr6zJ9N(-b8;qKc>ujw&(R-f7U=CPY>hk4+( zK1A0^UgXawLiT6v`u{hw2Tan|R$uKXJlwEcMV!TLRJhXLo{dH?31 z6Se-Kd*6z5I3wtKa|fUGI@0;Tt94z+oo;&x_1x3LR8I}vPMD5#9z5|upzqQ}&r3J> zWYv@Wy8qJk=C+=5KgvV*Lq=)+_0t<2c-Dmv1^K@SGO3w{U0uQ6lrlhKHmx4=e-YsX~Vs> z{kXz^pG4YUwXDud*2bg^Fk2 zb1l$seEC(taO6l`Utyn9Z|sn?o4Lw$z%b_!l~*_9g!MFkw`=#)`diLg6BrkM_asgC z*>WVX{B@TrG`-Z*#{-+!eyZd4_p@*5kaWInOVvN)fuEm%bnN-eQK;vqo>$lLPG<~4 ze!1bl!+~Led8!YG_wJp9dhW_o9s@Q%9;0%S&%R=9&HsAOyHLM;(off!@AKhEq??f= zdnulN?GVM~=8pr$!CO59OkdpffTr)Au5uYCjeDgN`L8YVD$w6`h`ui#w}*~{pLl0N zeLr+dZ9lB_@s(Qthp#RLhKKi_jPm^CjnjeQ^84NgrpaqQ4{R=fOywY^72fDX{=?g7 zI=}GK_?jMb3hL$E*3tP-zZ_lP-($B%1Cz9e=e)G9X?dUG_XD%ykfib-rGKdB227a`ME`-!DqYoHit>G0U%nR@<@@!O0ke!B(8+je`k^&-J!8B(CA-M@1GCJB;`d)&ih5q=OHt+z`BCN* z=uX;0*R{X%kota^f7Ht&55OSu0dyiSs2@arfQ`r#u$*@75TLtcWG`S6`O|!nM_?BD z1cuEmnJz;eYY`o1LcjdWgU-cVo?`R^2LWIX`$6XzYK8? z+qcy5kFvhB-m5=dtGMXXs+R^?k6K^WC(teO?n%J1#jG`fjjU&2jwh_wDVSuv1F`;r zUiJgcKVsxyMcFSpBtOdj0W7kgXgclD2k6h}w_7L5W&hE9*^hvY>`%b-=~?G$x$Iv+ z>}S9r`CSx(KpgYPsy6$WOAL0 zlJiE>a{d7G{PFv1zMM~ra$W(8oL|7W-@M5{FXvka>dSct#Q6sdcbuwn5{8Vu4fV2| zmq0J)C(6^9IVzXo(=*2--<@@>t}C3sNaH*Ph82Fk8JOj~?f}a9ttjU?(A^Pr9Ne&9 zjY2xf`45CXpyfNxc^Mdb?)+q@U>5yBQS=F55&Z(_dK{w9L;vWIB=iwQ(N9nwSL-qw z*bJGd^X5gLL3w#<*atwj^KCBy{Q+0&ctRgSeJ}bEFpItfOfv?ZhiL~f@K)33r15oZeZs@D|q91C$ z=!?K0`Xewt*z>Q7qF;7M8u})%5&aXGL>~o)rDq)7iG0ylkuIXYDn8zM2C#YfJY8Q| z^j*}8qW=QRUJG9X43~X(5b6cdmx0iqfz9OIYrfdWNGGw6f%){gU4hWYI|ZSi1B=+l zz_ePQGl21m<#k_(Vjp)%I*Wa*c=PXj0voXhfJy9QMX?uvLF{8K|6=;^PDw|xkAbj{ zfl2IR%|CYA<27IG;|}DDeGG*Cg8VG@F))aIjC?QlF%b5TqS!;4zreG%cS?2=`xxlf zy;Ijs68jkGyv{jmp&sllEf@P37{wj~`bP(9{~EE6Q4jVT5cV7}iG8f;{wv+tDd|S+ zV_+2f80f`5MtR)kz}~i5DP?v#Aq9|ueO0kzzVJ&pRXkM%igFIxLy=lVWx?{E7f z_Pf@fUHc3B81LctB>TR+Z2jIc(7xZsBii_+j>wcJRf8>72{WFSvjC#TLS7-Ze*}v|;!S>_uQQe@I&*vcaG2WNw z*Yn$peT?>^o$oyF8?leIUt%AlJeYn!eIf5t^#{Ly)hFDJRlk6JjOV=RAI%3rVziCY0fqjhkX6i#%R{aR}vG&XKC--&Lr{alKzbdA0In%$K>0{;N zs-Gq5Yo@3|?Y;djR$^>Lsy{ksr-|fPIW~G5aFVsrH69`vdkd zo@>lLX+~6gC7As}dj|F~>IJiRve`edkM(_K9~HBg((%=P3T98iKF0IW>@8>ZS3`Tu z>@(VHUhHFipV@QC?7L|8o)`NV?@f=GJ!tkJ>|@P0`_YSijC8oW+Ly48wOzA6z1YV{ zd$UiY*{iUR@tiYzHs4$ATiUy{e;cunQIGe>S?pu9hjJk51D)B|(d_ML_BY;#a=c&B z>~+}39g^SL^U{B$v)IR|m(2dBKOmTYAU;(61sEUH!*~I``4_@v)!%^eMSG3;BVZqE zn*NIaRR0C+V|`xc59P)D9Yy9BX*bdQA?6>Uza-yR{U?wY)T4g|@`N#zB$`ftLEqu1_~`jB5B{XK>Lp9QNwD4T!CnZF3@1NE_9fQ_u54y4WBl+6Ex^@jZX zYx7UhUnT1k&t>yx`R%HItL$q2t_!OFtI!{2{;|@l`pa^t{{nWD{yqBp8uS09!>T{fnSYS}!fgIS?3Z{xK5hO*^EcA}=*=JL z=T-k?Hh(4dXFTW4pXp@(?v!*fe<%H)$^4;3_J7pJd4PKKpO#+leW>X!)xV1K1ofi% zUtu5Xb8`LwoA;}~Hk<#J{@hIeuAFE3y!n6W561b|iFR+V{$h7p^&jKB)cWRM4(4xm zpI85LBla=M>7Vv?e_R%;{%f4qXqW!&X#VcT{NF+BW0W^bR{yx`Y5sEap9j$w(0(!h zdNO}I^b5`JS^e?QKhO^J5nvMi1m(@j)t~RpzfXTZ^c&QR-RwS~cfB9reSx?81L#X= zC%j$n7oy!a@ctp%eMBzxexmfK_Z863v>$e#k$Ar$_A%Py{YSa0-iH*?|Fl0n>U~Mx zxZa=eJ|)bk_bYy?-M9R%-oLc>F?K(bKCSmP#qMv4=&NWK`YW&zeOA+=-*!kk+Ioe+;!JG=S)`sX6;KR)+Rv=ch#+yrcfOwjLj zk1TW<(s9Pa=V`r5_Z|UsPwjgOFqZSz*ZhO~={}Qt_U(^!K6{wXXI%P+d$oM-A`=vE zesEHUl&6bd)Aitbt)%I&;!W!z-+lI=wjcJ{Li-o)TjwI==e_sR{j)iAx$BW`R`^8E zk7kP_RIdwT|MfzrvYEN? zGbm3xty%5j_@nN-Vf9(1Q}X>-ub2+`?xtBm$05A)<%g)p`(vita@M!!y|u^o`98kC?T_`3{bm21eNX#+ z_Py=*+jww%I9?n-j%Pc*Hr^cnc0O!=IA5GU&S$pyZEU_d|C9&HM=LKTKa?lR7v-&$ zKgy%YC*{?f{CbmT$~Wbm^6zauaD8yScw0YQPh4MIZ(M&|k6fQzukHHnzTQ@t?{vm5 z)c@T)cgRSUk7a(m3>a=5G8pB)?`G;(Nl#1|i*!h<4+rMer=5X%dEDZ<9`ZeZI|%7` z!+QE$IQ{SEqMo1hU|sjmFRtsj`EtQ3Z?`O6*Y^o4+>Ltfy%%&HH12ENKgyWRwLN#( zCnuxbwD+bbqh7r6>v6!eUGINr`ib{%2IgmnJ=H1bzxwcXU|HoCs=uXPjmm!>y^7Xv zw*GD1hc{dA6g=nNy;A+D<)#?%Tyw)!Q;;7Yt8t5N!Y!{MT}J+TD$3Km)%E;nz90UE zru(jcC$PETZapvZM!!=3R@va98#TYnu8#rzMmMUyT&|l{&$aH|KkktHJpSOhzs6`6UjXLKgVBF8^KIZ4Yd=2WkO}h5ga<`$*bMx(nsvo(#`agmE z)cp?KcbiqpOw`ZgR@3(VmfaK5;jZU(T$-0(R{3y;KVJ2nb%$S!^3r$cB|yKys&{q> zrj;Mo_S39;p3`(WaUja&XH3rn!>>P4xd`XaxC809`_h-9UirE2#lZZp@yBWYLeF0g zY%aX>CSbX8XtfXD`K_J@xyP7{`mV=^w<-SOxUr}gnoq`S{e5n){o4894$Xi5<$X~v z9rxlsNT(BTyAT*hjjQ8y=-`Vq|Nb|w(t4K+)%oy4&erj4`o>MQ{E`Ju29`nF^a1)K zH#ive;|tTf0rQK!&q01OY^&=kc8meKhZj5#+0qUT{4e%1s$@6I{t z0${oDy1lgg-1UY4;|mkC-SFR^bv*sQr%l!Rr>uEBF#Oy_Fo`??%Q@=}=|K6g z>-GW$kw4@^9<_YKTkcRCJ5I+bi2R~n5_txOV_s4DNQ6K987-W5D{p}7J z0E}nMJVEniJpsL}FJOLTK|S~4*&Sn059<*aWqkt6v(vkwUXb+*#Ck@4lJyOY$HYy6 zLDoOYlk5k;Ao~N*A3Rv)KFI!odPVjVU_SoEeSl8(8`N{%4(`x$*^hwOpMc37r{{T; z{R{Q5p89jbW2oi}wzew6)C@r&`t0kL1AypjDA=wv@dKK552_FJHzd*VP~ zmi@R>Fv|W6Y-GO%rXz=HzsfqpFGjuQu*Y@%VZTSYm;GPsZ_z!W9Onbj%XtBGa()1V zoF|>gm-7W!e%|12VBWB6Uroz-)Pek7UdOYM^9uPmzkpHBGhid*;$4*J(yaeL>M1Gd@6j%=Z*ZrFQVyWpU$N39v?~mntMmf%FttaO2 z-YIFH_d5+3<@`s!7kvO&L_biRu!4>Y^as>)J-UAkY_6a6BoO)r>NTQ&C~kPsjld%M z3Ci<%ch`O6$o22ka?xjiS@auVxa8MUfoc3Lx}KpAbxPWcegrHx#)$Ir&XrdI!{+Oq zg8U%*6|fO~3z+t9lBQSvg~|o=vkugIZrD@6@_1b@(C1JO`W>){z6T7V|7pD|Ha`d$ zML$G7^hID4{SjD1pG3J6{SxR!-vnmSKY>y7(N3V~r$8_ID)NiyufV*n~jXPtYfmWzF?DE2Wh zi+v0X(;wD;I??YtB<;mM2Ikr4uMaF@51?MzrEhv5$d` z*dM?!`lj_X|I-Jx{V4Wv2lB-}Mn3ErU=;fXSeE}p#}W3A))V^}SeiMyew^4#T2JgJ zU?cW1F#kM3+s$GhqkhxG3vqo z12$q00%0Gbyb*g5=z8pU5Aw^uro0b?eF^kiKK8ukukrf39g>DU3T(taR^0CWVJL6J zJ_g1Y`d_N~V&4K`9|Mcn$G|A|aVK#3Y5xRfOZzUh+*^Iv*QlSxJ_ZJBzuC6Fugt6O zXMd9QFKkx(>#hH=kMSNa_CK(!T)#K+`)xcpKIPRqUggj_ejLwa- z<|mzB=gTc{tInU>r_N`w`3*MTdEGkyjmZP$Bb&VB-c^1Q<;mm=_A$mIiGKzdZn#n9 zG4EaFGn>2?liyiS4e*7Xu>{iHYRdTJi7>&x4EYi#|w zkLr3XO;A<3iu+fx{fzq?_q$;GU%9sKhspLwZ~JAk{j=RqZGUZSzvcexY(I8l zA8Y?@zlME`bP)SkzsL6XlIngRZ2#wZQ0#mtc3$xOfPLH{?RYz1%K7!Yfqjg28#|Aj zoln8et7PXF&$DFbTeR~o*!jovFxdH6?7Vb#el|Q$dA=@c=k2QX{N?>I&*w($WArN> zZs)h1=ibiu_Pn?A-+fW_fn@pt>|_02U-gGz`ULfhbZON${JvHHh^CMD^{al8OTl;&ea@MFM}5zKS@l2agZccbA2!!ieKDK-CG}G_5H+|pq|6=w)H2VPdv9>Svafdt~&7MeRUo_*Yy%EJe*7wjJ8D8y^ za&EO(qS-Ir>>1d{+8?ub!pYVC@n#RvJ_=$V!~ZiQjdp22N3*A$+1Id-QQw>W zO?$iytoFJ4`Oe!=PW!!~J#Y3sz6b5o{!iu)h*wqrfTO>_SO0-Ge**mr@#gAp$mV~* zc;kJU{)t!3UqSzc`7Mse$SN{q9DXo9S{4J0#JlC98 z{V_@85AD-m1NlVSiM(q2=HF?|-{X$0{vT)lAjm)JV?6+)`H!Nk7d-D|{U9If2}u7_ zeysYV^04Zk^5(A!=D#Y>SASONTm4(~cSZAm`99SjhV_qju^#}jKPVnj{b{g|J0%cI2n?}L4ebaQF-2WGL4Q4jl})))I2n9ZMv z{S);Q{f*{-EQeQrWb;n-PrBLFUkUpd^|4ba;AS-&Qp}*dOV*d1E{`GABcIX!!QV;qD>cc(;CiBPIyZ?Z`hyLY5>-|W$ zuHKiFpTD{Z`Q_bupHl38C5>HL^;F)!GwX>2d|A&^cj~A?D0iC;yBO%dc}4vLe%EQgLw&#YPlFWy{n7b~cYmdJt=sxg zwIBWU+x`R3#a+JB{mSn=`eChi%V=E(q2KIJagM~due%iW!cl`x1^Pwi_1Ai*Ev)^? zo!i}ybXuslo_}#nx0g{b@7+h`FD`V+7^K|>pFW}Wc34mMi?rp*!!?~QKL_Q_Wpl3r zriTym$SvPQN=Z zwvFoZP4~%x$1#8V_t#NBUa_%$f4Oy+_fens$8Flm+xqsrv-a3N-^cfR>rZ3- zYx`^c=l3}KK7KF1pX0&t;dpWUIG)MIm*dUx=X`K}+WE5iYvKU_~-UtDioe_W4TpIon8 zziz!n4##|#6RzwH%o|=X9P;2ESXJ#i_i?Q2`I`;(+;@l9|F_TE_aCYCyZvD>>ih2v z*W6@(q{~y+{{`jlkTJSWn>A)%q3NY3j6->xaC_aaZ?*ryec*?2C@;T1M%O_a_p0uH zZrbm3--$cFrt3Jqv&2w5pZ8sOH0r0NuAdBaFKk!W(b~Q5M|qs|YnAIf@r&oQ{^|E# zi+X;=UJok9FUA7P>Yq2j)OGf?oxqPS(DSM6HedIN=J=m=9sAF_zJq#Uy9I93dV6iI z>n(geL*LV^@;lu}%A1!>LcRRzsB08Im|OLhjl;O;-7D!UUR_1uK>$O+m8jNz6WW$@x6M_n`W0Y@m!PM*7e)Gy~9k@i-UH2 zT9N1Rx`XGVp1*qI=YaXYFKBrf`sXRA=XdB=_pu%Sp#J4BaM?#uFMfN+y+C(<<*5k^ zJ~J2f!*kPS0Q2aJ-$r>Dw3zzkT&(}s;D-KO$7#d+Zb3c2PtA`3UvOFJIPiE!V%;iE{V$Nm{=-boNA~(|!x6JfwH(?;T>-)vv~L z>BtKvpj`fcN9&jSpLz=Eu+_u5-^Ka2>v+cZ_jm&J-8a22MtL)PE0w>{f6@i|{D-st zjPiWNsj3f_3tqSh>Dc3i%e4N$v(=xNuDe?2EB@l_vr*oxJNO>0_x*qLyUTIwAA)o` zd$p7CT^5l@) zk&e%g(s@Xa*8Ai5uj|jz=Le>~C{G`sFhc9~*-iE0=HHcvrRmi?gL>}KM7)%MFumrO^!aNou?{qh3mX}zm&)qOAhVmJL>zspJ!P%rQGwaQWR_HW)q z+P!tbG_C*Osq=u%IX`^_Ebo7RGs>HH7gYJq3oh_5(!TeA0h+(e-?aZ_bp5}~eD6j& zekt7dA^27F?wVV#2FBMHP&srmUa03*zGS+hj3+S2 z_@Z8t@djf26=gn9j`;yLGG9P<^q1$LJj#3mgUm0`$$X<6^AB_{eOuor@_};53owfO zXjUh&M&hoRid`U8dwez+2tm-&8olqXrQK&)S2 zkoAmmtZ!hE^$yIVo;*RzWj_EmvOfUbM4|Ce^!+J8kl7NMmhF#U^!rcQ-MMDdz54U2Szy$ zfJx2=l;gZml=B1VCa?S?%5lB`^XZ4^ddhPCpghZY1oUz~0fU@ZsE6|li1SR-a=vM~ zoOi%R&OgO}-*K;&%lQcOa$W+XoS!Jic?v9Yz5?@@jh{n#knIyhc4Q z=QpsKU(Q6mBkZ^lPKEUFa*Q?^pZXWyps<19W?D{=U}RVF4W{=s&0jeF&IEKLSEuLOtkDz_7*V z4f37nSHMQ}EnpdPuda(I`WWg#KLe(j%!>9-S7}#ud!Em4#{TcPjx{DkR^y5F+L(?1dItJy?zk$%l6}K9F zG0LH@1C!XtiYJd6q~)6qyBL^6-&fpW+V4=_h&=!dVjlp@q_1?nm#q%f?+#*5pnmT0 zoqi|m4XrQs2Qa?=;iuqiS7yKcRl!;^g7LM(iuCC-yP08FP?FzMHr1G$8CVV7mN2+OHt?8|uNH117PL zH7)ir>cRd~6#E#M#6CuO5c?Qd#C}v1`xxc0kAYe2V_@miMdcaxDC$MAkAYt7W0b>w z1^Rx6)o~rVvyNjD`xx~av44R{>|@m9{jtBTrh}EkKGu3-A1hjWY`<(>-`Bq1`s2ku zM*Z+w?JxW9magC9m#g0g`xy0#eZN1fjt9r5*mz}*pN*$Ksg7?W{)kR_-rM*$HXrUe zn;)AmcXOS;xK*9cVDsz6AA;xOFYEj#lLz=u&@V6k6d>$leGlb{@@4W?O#b3Gb5$OL z$tUGC?pEcuF?ptZ$9t;0N0Wc}i}0Ri-?~1^*LA&k@h@qA2HSc%qpq)L>n+;)^R^zj zKAVl|dQIZrLc6e!f%*8lzMZXiu7A({K-(Ow{gC^k?U%*&&usf?Bla=sH|x~>misUF<8)fx zpJ5-Pp11uwELHb&?(f<5`|`)S|MNU(?0g72*7G8ZecXX|>^upp*7Jqu4eVpQCwy4X zBX8%E<9W5NonOVyGdthH_jcad`3L)0-(%+^>|?Z>?fmqmo~MrItDU#O&R^ai^L!3= zUb~;``OWh@*!iCAy!UqgyYH$#P|mCR0qkS#&$Cs3DAXrRzbHeiz7b9TNYqCz2ZQN{xm(p2%Sly#4AdtVs`{lfeUtiUKDz3o+4R%0 zv+1jYtN!XupKVOP4I5T{H<qS!cT%4Z&sF_Co4!x|U-;K|#eBQ-s(k?a7|%tqkAd0j3E0P|=fytO`m{f2 zkBEJY`o-*(Z1zh!yxKFK_Kn#)<&kRtM6r+29_^!G_ELPT+E2mksU-F>+V@LVdyDp0 zGJ7nTeMWl?_A%-Qv*%zR<2{Ypd-2t3|7Ed{Q7@W(Sj0X?y<+xbGJ7)3F#FQ%P1wiU z{s(4{(mpkNwK<^Lug>h*Z1!!qzuLRBfBkr~hwc8@NgC~<9GIlOrp>;FecXZa{j2?* z%pQ-l&!s=8Uxruv9riKm7wJC`--B}6|M*_5Pyc|}$68;;2Z-@Pc_a2QFz!|T4bl7$ z7;n_W_yaK?s7L<=>|=e-{2Pt=J21cc9Q`5YA4%pf3FbecKLzrm?Tb8V`{sXf=8y5_ zpP|3Ty;%J><*Dk=p?@ceJZpc<|3iNegn)|b|o^@j3f z{wb_Sr1P(;{|f6B^{{?{@!RU}qW>$IKdf9{{bSeSpCcN zH%Ie7NApKV`lrob-N^Zj{?MO|^Bd{(uj=o{`HuHAM^t~fH~%>O<=*_~^2cpzM!a} zFRA-vbLn-Dqh9*m+YbWSKKsw_W&hY-^HZN0s2?YM_5v`Ecy>l7o?qyTqBu4`q-gE4 z{~RBVU$*aM|JYtw;xW}z!?$<7sqGIK@&5m|&;E0KIR5Q?*!b~#**~_&`(vhA-qyG0 z+xBdJ`98j%-)lt_wf7pz5ITT2gir=(Qy21JlQ|C$MNTUaDF&noIlQIJHIv` z>_5Mk{bPHSAIcNui}KdWpUETp&+(!B@_X4ownzErdf@tC`|Ll*=l{+x$B*C3{yI$Zq?6q-MdW=pM>Xguk&Bl_Agtr>UoPTb05-anGx@zJdU_V z_lsohxBa*ANf#V38ujDM-PCWB`geO3&-o?m|Kz*3&r*MuKlz4vs8{+-dkDz(gY}=| z6FC0k51N2_e*1Zow0^&#PjyN&xCp~q5z6btlt!M2A>p#bbpOpXR`E~Y3 zdnxaF`Tw@xSpPXb9DmL)<%jY~`RDrbM_h7H2fo+ZFV=sK563^+{8D}>pWfs@*!tr7 zb@To(<56k1Onz1M?KE!3^YI?H$i!37|M2+gst?Cqf6{UCGcO#ell>#tM19bxYM8S1G=5M-_#*c zzB^9kH2upDN1E+N>7Uk@_JH~NXEsCo&5$cq9?EJP>A4wtp7D3BH{mW_&wkGH`kv;E zWwak`pZzZjPdo+hiPAqj7o_5k+9egFA^6B^e=}c`;pOpW!#UaO{U+xP({D0eL|2aM!f6g!E2kq%|CjVStUe*WNW&7+u$A{z3 z`Hdz&t$dpNqdm0G^^5haX<6TBzmfG0^m0A{-GxVtMmhEepp*R?7-WA%dDD5C+VRbc z>+Fwo`ST?QX}O&5K%e6#1zDmO*+Eq#vdTmOrVkLUQ?{Ng-Deaa{0KZ<^dcEkG< z2mb%sXa8+{vW-9Im-0jTr2IQuUwNh3`=ULz&;E0KIR5SYn*2~cDgQX%wclL7qF-xT z>|;f-kMSPp>JG0=&9jPfG(6EObfkM-PLVAl&#?!|rurVS47it@(VgMFZBv5$dC z>|@k}JppXSd^r~A#Xd%PBla;cZuDG#U=aIQ%dP!j{mZ_#2m1!~-0v5z&x?JG`ml$9 zN$g`yi@k(;S?psV+b`CCey>}-_OEoY_QXC$eJ}PgFo}KKAqaaBn8dyVI%~h}zm1Ro zppGBE*L_+0m&e!kU|*tLFZMAoiv5Z9+V-vg93PHpLG;&%0#jQ+uAgTK9k0 zXZoD&ugTZ_Fxmd)ZNCY>t>?SjwVub^pZvS^ybE?daX-tq*Zm*%rM55jF))8n_rtb* z(-)kb@7ea#hWqPscHW77+#%1m=Sly%|HD51-}c%6Nd3d~g|bmS-;13Gj_1R#>Ume} zd}_}VYrpNkjZZLrCY%02eIeWV&hrfRG5S>wtLI(YzMUr=AC5oIPq#zWXWY?M|7g!! zJAZk9%=WYOzp?S*_&b~5<{{I!Og~Dd&jiyys4o<2pZ(|fBpd&BeyNX`{^m{JqW*bExjoL|Zh(@`*>!BSWKc;WPK7c%V zu|KrF>F15uPe>QBk5Nv2z7hKv>9A$BCtx3=UN(EX`TIhjqkY;7-t12=_A%(&3k_St`q568cqUy~ooC*?nhyrF-={4;F7?Z1r=$3NQqQhq3(lz*--`j6N? z`_J*=_;Y?KKa@|(Ki3!6uQz|SU#a?+=?^PMRR67)^919B{Q>A@zW^rlKRVeD@xA_q z>fdY3A4mUOGJkcttNNGe&kN?iE#_~f|1ZdXhxW1mq1{Y>p|xMkpN{<#^@91Uz4@2v zkBs!+%6Wo%*q^n&?AJiH&;Db7@09HFj``EgKN`(njq?HJ^rt%dZ{<8edu{vHe~u6R zwK$*fd@_GJ{k!?C>aWK6hI0CY*?zSCb9^}d<@tJlKz}pNSM8six9AV-SD-h4HQQ(Z zIX)c!c7DwtUdGn@1NzIMA87lcFZ^%&>_5kc zY~znDp1F;`wD`_8e$?V|89&Q-SH`C@o|N&QjBjq^FD<^4@uQ4SW&A7SYlFq_GTs%x zuU0vX2u^gKDc50 zvBfttezuKwwfJwwr!xMP@wJTKWqdI2kC|pU>+|`xJ!`*xpT%!8KA8P$`)l#Xj2CA7 zH@}zP&+%x-$Kt^`ejHDZFXNdx{+tiaPdi^0|IT>zc781$obyk4pnOnXC_mohsg*B_ z52pN49$WdecyP)uxb)!@!wo;Tz_1TjF0Ae<@#m(BI6_Z zd#N_QlkuaBPqp}0#@8}_m+`^gey_schcW(=@tuqxbrzq>-y<=;mhrpz{WI1Z#Jop6&O4jNj$& z>!QUU^Y?4~y&2=P8UM}SUpb3kXM8;4cNrhd_~SOd+2W_$_-y;V8RN_OdoISuGycAf z54PVEGQOGd(~Qq<j#@8}_w~Y_B_~Z8XvKBwh_|9bUql`~w z{40MC&EGFGKA7>x*?!N<_-X!Lmhq#BzbCi&SN>j`@w&4>Z8Nb`c2Q&WIe(%os zX~t*s_utWeug&;%{vMj~_m1%b7Jtn6X2wr5KD+Su>Gpea{{A~!d_3dt3x5yI_+!R5 zTl_TRvl;*0*zesLzuv~j+wZ>_-{1a!fW@#%DAByNxfmcs9n@GG2`F!Hho+ z7T?VH={7#w;=dVR4t)gk%J^Ew?=n7^@yCpBZsVsdev0wmj4y}2gzsQ{JmYs6AI$h; z#y2xwjPcow|7QG@XZ*Uw$1|RY@q3IvW_&Z_*`mdtF<#6wzTD#186VI1d&c)O{y16u z9^|^u?_A!w0<(wbb$5_vd-)-ZAE&iDC z&7A*a@z#w0W_)?E_+!S$Q@$7<+{PbUd^6>f@!7@VzZqZ7_;tp|xAFHD-_Q7CZ}H8H zpJseEyh#Ijm0zb{{tAm$oNRcU$*g`wjVG)wT*vee67Xr7TaIi_+yKY zWc(%LI~hO9_|$ekv;B?x9pi(Y#UFEj*_8 ze-VsNW&EpWe685WD&H0#%=_cO_-2cr=K0O@+*y1%<6jwH%lKXD2h!PEyC-^}=F#%DABoAKp{$3p)ZA5VRh@%@ZHW_&aCS#R;#jQ?hQ zIpfzEAKzH~eH-6z{~y5kMaD<+|F0O|Sr|WR`a0uZ8DGozUB(A9{8QxKZTu_aYt4QO79Y&`W5zc-v;SxhGJcftsf>SRd@bX586VunADcZ&`;_t7 zjQ`FSpGy0d@wILIuEhtF|Cs-$1^XED!1!#&fB$cMIliCqwZ-Ch86V8}W5zeP@zeHy zT8#f@e0i|=b@~e!zl-rgKN^ca&KBQH{{#II7=NwD|I@Pgb;idtewY3Z#ve1jneo$% z&n}GrHh&7^*BKvQEdIWY54QMY#y1y>pJseEf--yF@q z{s}1#-}p=mGQN0{I10ZGyXVRd^6)O8Q)2NBjZ!s_*aXsW&AGVgR{jSWB=Ci zxAd^z+boi~e*Z{v5zTYRv^A2Yt0 z_Z5uKX8bqrHyFRp`wzz7Gd`H{$Bb`g{50dUdEdhL^2GRc{{N=M-}C;4@yF0#@g2$H zrx~Bk_;1FSC&sT^d_3=y7~jwT4ZjX#)tw&b66#;;8NSMtlY{Kv*uO#WT+0h7PCF@9q5;|AliCI2k> zWyxPleq8ViVI7g*w-Np%j6eB+$=^%9UGg`R&$i{SHGW+3?|Skn8~-u+iOGNL$5wu2 z^7kh4LmU4z`Ekj=OMYMS2a}(e_s2}Lob~xU+av!j-^cf}KkOg-Oa5biPy2oLz5ITT z2girw#qr~Ka(p@79DmLS=ZEvf`Qvo>c@?DT$xfq`W`Ju@_O@0OPXOrKT{K4cWCjT+` zCD9M^H3?r$UjYf3i3&ipPPIQPd zNilw9XZ+3NhmOWSP5un>XOo|s{NKs=#e?y!kYBkmJ`?golYg50*5uD7KX=RjO@49X zFDHL0`B};TO1>lV*ODKX{JZ4$C4VsaiOGN5^0OMh6ZvJu|ERxTCO>Y=ze|2!;}0f3 zu`~W-@+*`7mHe{N_?^g)3w|NUJNSzfjX#+D#N<|k{>r3|1SBW$RA98;+8+j_?46KH`Boyadv{$uhhlfRk# z(Bz*k#YTBza;q}$&X9EMe_TSKbZW)L-H$=znT2d#A z?`!8{clr-)#KQA`I(JRnf%STJ=E5F8XP5x}y$Cxkj zf5Sf3_Z$B)`IX7v%=JY6Y4TgcKE`w8=O+I*`Nh4hU-GAtpOx#K{IV^7E%yiG-zC2< z`Gd(%T#Wyi{H)}ECBLk*{fhj!+|L5}eT_eu{KVuxCciTIU!(1xjq%r#AD8^Q1&TCfwX7Ed5+*|%y; zw=;fm@*k65nf%S?5!HuYoje^Y-Ze|gKFN`6-3f2BT7{@VZL$HjbR7PJZInem8z)@;B4oC;v40t;wHFeqZtjlb@LWg_d90{14=ZCjT_%1N~3NpG|)T z`Ge{2Apdd8uT1`COr zeqZtjd*df==#L{mtMR{*UpA7z*7$MBze|2!@&`xbCvN?P$V+I2awH}d`J>rc`0ZapVx`l=~w z?!k{~J#i!f^TN|_2Bx8Z)c(0Y)c-l~gSVa8iTsZjnu>DXA2ZEz*5~tVud(*q_u2c| zANG&^b=Lp(d-#3)Ui*HI2girw6>R+4@wD;fcys)N%?Ibl+kA2UIG>!~VDsJ1zsW-@ zA0{u9AIcNui}FVKqda<(&qR4O`K3HNlW)py7J=>#^bb zwDrpM>vp?#Kg@evc6VJTY2L#FRX!elN7reXJzoC@Gn}?~T{ma{O4p?ueDYPO=XMxy z4$!ai=>W771}%9Q&<#0ZJ>-XRN2p$ro?2P4Y_X@V)A+#w_oLihJxA{Y^AgMG?<;(l zF7^LMZa!H5eQ+sqHtD&r`of*yq$6wY`7t_77lOdyU6YKW(_T zu2aA7Jl$9F!ApOt^>*3$NuYar*axWRyZ^K5lW!ic-So#vi+ zk3P50dmrMtxbF(uKK#|l&%W!&z_`~$FTV=;@ujo$ zec|{e>hEBhUp|lg{8Zn$ip&12Fj&1 z#dGD@JOJsY+Z(#y`afLqDAIAMW!^`5K4zJ>6xUC0Am1H)*6YALVaZ9zcU}H@AFvtu z-9fxGsoc7)=iHBSf9T#Shk5_= z$DzFW=Z_~M-|c_uU|>A6-)_iHPjo#+(_hW1@7w9XOEiDZN@IcH_Ne)O-GAMJdj8Ep z+Dc+4rt_d~ZF32a7=?4{|c=hbz*$h#*Yoo@K=@J^I3H-8*3fB)5`n*YnudcOF3 zrd^G6_~8;AkNm<<<5AB~yfY!+_xVuUk3F9`3i+W=eP3Q^-cWsR@Kz5YKR>+pWMJ&E z<#U>U)&(yE%YO!DZ*rd;xQhXO2RC68S?qiaY|#^FQgjPa>}<4k2U%~xEb9;D zLDnO%IsMup$Zur50>hTG*3|sTyC0pke_A0(sJ3qG+*{JU>Gv?HsmMS?||XB5q*)5 z{SX+_9({m8_Df(R`={c~FV%G~`)jA#(LO%9iV7TK{mGdC`Gs>gv*T8(t0=hpH z+0V7!W#8#IV82J&-MZu?ML7>pzsUIj#Cd^yoF72H=_QXMALk1&9GM3Iqntk|FMBO~ zji%*%LfWmr{L4Upz}4CxoM$KxEBt&j5a%7rgPecBDCeP;%lQaQa$ah_oS#U?2Ydb% zi1QWsZs*%x0^T-1$l5Lx0fn{pL*uLcc&c^bJMPKalT59|1NGpQr6ZUqL#~{QO!i7k#Ef(r(8& zIxg<4Yu`hD5&cKYMIS;r^dn&U^sIA{5B&*P9`8H@`HkpTNH?NyDGr(VAo8J)0mG+f zjz@kHeGQmJf77(+b0}{_zXK-G_mE#i|3lh$+|U>4DEgt|YF$PHljx5qPh;ljye82v zk)K81R22Oa|@1&JE*=1eI4cbwF^cdKZ<<}%;ugg`?wSNVm|=$vIq7?KI~&)5qksJi2Z@`B=#}TiG7Ux zEcUUM?>4t9Fp7N)^zka~7wqFsNhh(7fnMw(l*2v(mi{ZGzrfPTQ(+ksB(W0WVc?-a#8Mt&0e7#OcuUiApr$D024 z^x;6*#~lJ$>|-G8OXLT!kAdaLo8Cq~>`|a!;Mw{T&HeSWXkK@@KRmZm-ZySGa^AT)*U>|G0ZT|crbv|Jq zBR?Hi=iA%-H&aa>Og^H?OA!AGz6bU(FrQxK%bUD~6;1w39;3-;*tyDUF!_z!RC$Jd zjQ3}g_hj;)Y(0dV>iQ^LFALQ5!}SzxeK}iiUi?$qKUi*%yK1RFQ_Lp*M-EYE4b^qahFXv-8u=)ADgWUmH7b^FHx+F1%%)E^reDH7)_l`Hi|M1(PyOjtU-hQHy05D~o4x6` zrtf;wf2j|9(~n^vYx|}@7qO2!FuvPW{n}CA7W-K9O&<@YpGVWzVIQMBJ!1O2>G#cu zs_#3ok5Qg!514&Gdm;a)+7GlRoY@!A>myWOYQ!#rg98m46#_X+P_E#`_4EC{pkJ!g}FYIHW7yB5P%)TpT z?|HNTlGw*u|JP<8n!T9stM+4K_GGxb+Ly48Q9qdd8EB81eVXT(y=wMrG<&v)eT?U5 z?|QR;X%F-M7-`f)IqP>w8qWjK9`cjf-{Hm4Yim8T&%0OO(Qud4sRO|1R|Pyd4X8!*0FPwZo0Hva_Xqf^q+{1?gm8I8;z>R~<= zWqyH}Z`8;91B3ZXARj1)ydWR)159EcBcJ{k$Q$yV`DY6KHRiv8yrMkcTm3tbXFTuB z|3iNe>|?Db>p{!uKa%x=H2q8Qq3Umn=6`bLk1FP$isr8h=D&h{jCPCI$J!44UFQEP zcU6B_c)R+?upgj)WBxPP$9N9=2e6pGEt~(Xh<%KF`sdQ8)n7;d9sPOOztC=^zwb2j z|Cv9~$^M6O`U}%d)qmKSKQWkpF^heS_j&U_mY&rgN&jTLz4|Mo`7gcsGoAT2v-vyo zmDT@Ae`unA)cmD5AN0HAygK`vXs=pli0oo6uFW|lOuP4zbkcNJNa!-G}`R7Zo>aTaApP(N7 z`MFg8elUN3HvfMk_OZ5a_XE&}&@S%}yy#2Xj@>UbqEDebZ(Q#mqTNS$yPt49>wSg$ zyxw2HKF0G!^fw^yJDlBr6uS?B{)cvBk9uDM{Sft=mFs;|@Qh`=RFjdS3+n80FBHfywTd%C7aksr+T! z9(XQoyINh}|NZwdsMlQhr9S89p1zenx5mj6P_OAdUiIkk+8?e#ete?W>A<|ia;Ko& zE!=;9V3{#q*K?fTP3@St=$G2ReA-_3=yPA6em5}PQ~ysPt+ub8D`DXsbbQ?}-&DKB z&lq=_wzuy)+P|>e(BXJ4zdrE>%|HF>2Y`N+iyzSZy_S0g=+4deB0qlG`&D4xY3qK# zP%f`>H+u(_hy2M|s;4xs2mRhS=Z4{^m(Duk8LdC}cC}CAN~e_$$#=)~*Zn+P72ijB z{`)r10o|G7A4hrKbmPAR%QDAQzvW>Ek3~8yb?jZ5zxSVB>_ok{SDFD#J9gK0{pu@x zjPmBZlQh3{RGu=o^l~~*<)bdT-#5!&xhXmux;cznpK*Kjk5td{ADB$q(g;@>NXUD1Vek$|vQO@*7Q_TlqG5r~Gq0aD8yS zaQ$#SaeXCQZ(M&|kHyw!yIyVm<{r;>!+eL!no}|VX`?|y6-O<0CGy>~`&|!A$Dg47 zLDzk|n^Er1K2z60dUpE7C~xjwz8CVte)V@HM%g#i;TmII^faStIy53XoJ;&queD18XfPUO8-6xuFf2IDTrtc5B-{e=WpN!}7 z%{ToWn1+ma8THdj*H6>u_AjbOhTWEa9OdryzLSvOtiG}C6U|AD`ccchi)z2}AD2;m zE^YMW6x7eZd`kC$xZ(b~KlmTUe~9PZt;bG9d0BRMU3YO@(D8^1zJDLeL%$XE`_rMr zE=4*IUs=C5t+Ki5=V8<)AL99DwU_S%#vN~*k9u)zuNSobJ4fsJSFTz836%Rke)SLJ zhgF`wPH~g%&qjW8{c#!Ta>I{`an|4TT+An5rS0?n*q^b~9Mtn)ANo3;3x46(fbqnu z$00waAIB@6e)#ps&)-eZ{Wh((pYm504-*j@XTA6Z++ zt9cwT7I)5L>Nhpt}*WV-gKfS7cpYYq>V~}5FZE`>AhcTn40Nv5Qda~C@a`OV*EUXA>? z&3mJP&A)B@zIdswyAzkz_ENm`0+fd}S6971f4SD-cs^gh_o4dyp}hmr%?D?W0LDj# z^h17g{#z>V?uve@7lqZgxJAnk{#n;&`1Jk@Q7@jb>~UKEi8BsFxtp|_)-N+QQ+bX< zKGt=xVmOn7+T;#h= ze|ZgrnW}sVi<(XRk%u+9F`PTdCddSnJXnEOv`klxR zy-&Fc7(aeX$FuzW^<=F-ZqaGL^vdwJItBd|56uMTeqVfm{CM;VbAW!`Zff7Tx1X=_ zcgApR&Uk}@R2qpWHtk)#jvt3<; zzT;j?iA!~Dtz!#CqDTqTqO?kBQ&*Ex*WR8KO{I`%(PAk^Ny>JAT-+y(6`TX^~ z<~cL-et+gYbIy6rv@kY(s~y|v`v6^?m2wVt`?M?CnZ745Oge$jo2TzxMeX$80TGD( z0K3gi=K--_*iQQg7|+;yB6vS{*duH|Wyv+*vHzH-{RoV-Kf#-;_uzfvY5#)9erBZo z4LkNbFt7h}7x0$O1JEz-c`bO;`Jg+{j&y#Qzy6Ot-~*j6#>d-pf1E$q(`((^193iK z_jF#lADv(Dkl{t&$VVA}1#SogL|f$*Qe zO#YOS{Ody4$=?DE`Css%ef2kik^D3GME;tQ{5RXlp92l~ckua(Fb(ZY{vU{V06tQD z02+!H;1NH7X}^=Ffk%7+`ZnL)0}K>@V0RRcnEzhZ5Ah0oI_VKek>AMEnJhcnm~*29J0Rv`_uY zeh2X!c7N*my}+j<-V*;$@gKZ>@|Y#yGvx=26PvvY9{B?>9kELM7UdVP+au26d?r)= z0lT4m1n6%)l=n086|`gYj{uR+R1t=Mb$A1ae249n{{WE>fk%GC_NE7NK9eYag5BL? zdEccvr^$YQ|0ez(mhvz350sDbc*@VvKTy8L<0yXvpDCYXd;6WYfk(av43z(Y5B)xW z2yNttj04*Z1CRWXam(Wm0v+X-um{RFfsyi0@Nq#+KA*@>!6RPk#adV-PkF?i?=pd)<@-jY5B zrgf8hfH(j6nBOmxJ_c{rz4%Y?w&$2RY$tsTK9l|dhQ4Vgc<3XxH$A645c&!B^!Ww6 zZh`bMct`peyxHIzg|(?oJ)qknP79UjIb;7<}%$ z>^1PvzraNL7<~Er$MJh<7rW^q{mXcNtjuSAeS+st<$2}#`aZV5yr0qc&HDa<^gcho znfyMlzqkDUbdaov)AeywPr&=-mt_5-u4m{d>s!{_Z|3?_>0PW&M8s3;mwu z`!(UlQqKesA4RPWM+LeGEQ-Ap0-xA^Xwj{xrH@z3yM9`?>7z zvfrVPF;6PzVK+G+c3(L!nd(b;pIFY5p0A|mEqy5Gubjv5hn&x#=hbzR^J_^TW1gVr zJJ*-H7VgD>&cYy2(u$Cda@`k0@q@!OI<=J_ESHzL4LR{GsF%;RMMqO1@z=|A0Q`_tN~t+${Nu*Zd{#Bl%3pZ^ClP zcar8mB_B$fA0^F~oaRqS^C^3%Xnq_tU(O|e)_gi@er?)FzU?&sj<0JzuK9T!A^AG=G2YjZKIZq<{N9m1 zUPyG*{NJe_aHNmHd({g;^+Q%Y0euX6B7KbclIjmzU-U>)eUeTUy<%0rc-1pW^^JK) z^iCjsjQJ{h=x5PKMK75tqMx9T(a(@RX0)oeRDaolqQ@%wtiI^AqTfu>bE@xx>OK1R zZ=c6}PW7PKF8VN%J_a9DKPJ_aQT1gceav>%pMmso72Z$vX?#fZYSFJ*^=zmTeH&Eo z+80Fs7Cl_NzzvyrCuO_A27Tn^@dzS>JN$58$8w@7@JGIqEf%0?^Ol+u2SEK zrQY$3)IadOG0xHV=XtbVg8jkcw4Q?f1AC(V1Rnbfi2cUA)@QIE!TVCb(Rxm)??kQl zV1HvA_B#;!ALF2pfjA%Ft=5x_)|WErWAIVyPmazTkJI{8rt`>ctzTKKXW{&UH$T*C zi}5($K+{X=VfiktkCl3v*3T^Y3yjMzN`1|eKf$wEBL{M>a?|BG4=^uwh-2z|_U(#ODbnA8(XeKEW&^+r2W>W}cZc)rv6q$hvOE+o9vihj>$P5MevtZO zsps0$rM?@r-fOk~>$M(?_=oo}6c4KioYs#KFTp3RFT1;>-fXr09Dk5{bfrG6f8WV< zmHM^UdUhmzjCo7F+npx$Z*!E?!(9`tk5fLt&nxwFt*3jfulvKK-d^hO=`g9sBmdy{ zrhEjLJ4*e&)bq_SsqdG1e|kph|5onHSLFB=;@l{ss9a z_t*OwPx=_oL4FFp+~+vTU%@B6?@15I{ZF|MN|gV?9?N}^-X9@f2JiHK$zCe=O{USw zSMvO;ZsE9;Kd#*y{mda}vHyuxHMp+g$3OK?@P6>#;@@^Te+b5<->w+|w5bOB0XJk; z8y-J;udd+zfA`?uk=|`3e(Ly!6TqA2-dO;Q^FEx2{^`#bzYeq~4DAg*H2nJ%V46MZ z1a7yyo4=R6ZNXq}@9|xSh3I#5{rSN7>@hD}P$W z{oZZJ=fc1IGC$Y0+l}AH{VMBcZ+e<_m~AsQv(>H3!SuB^X)52xR! ze6RBT%J(eaw|wu)_t*Upb$^uoQua^K{Z!dsy5B1MPxoWlpJl(6{ag04*Zp1D@4Eku zo`-Ti%6Td0C+m4C=c}Bza{kJBEa%hfc`fHR{p+T-_|A6MD_Y?Drv-Kh&^PIGEqGTy zaULBzjk_J~^zgz-u;-h5bOYKiz8wTU%)HXG{q7_A9J@=FbwfKYs?L5W#P*!Wh86d* zABg?x&u4pWspIFr;2g@W}JUFA39_4X^E}jX@ zn}k-o3bj-#@oJn4fR{a>E$z*X89A zu=@vo;qx7KKEuD;yn86;=YG)K&9ImI<5;cERv!1jbben`{a*HO_TnMee*Fsihi3=P2Igz`VLuxuAIRgwKD)dN-q-Jvfaw(3KXJ%@ z{5@l{M|-jTf`#`iB)faGY9P?R^F8l3<8P7mTHk^FruEf1{-hIYi{D%<`^Rp3OwQ5y ze;JPP`LU+WF|XNUMH95mJ=MDbQ@inZfe)hxJ_)p6jbh$(8Z?FbJ$xqbpV;v5aj^T_ znzsg~Ub}N#%RkL<=ofn2+zPyTuU<#)_t;_m!MlAdKi|E%mdA%z59Ix4c3V7}?dGdd zz__wSPmHsB?QtBcc`Fd|JME?=XCIM}YmTO_p zmyewZwAJ5~?^yo|_9waT?!0f!L$x_?GSmLt3**fGUAVv7e#RV(^WWaImif;w<{vC01-={I3`Q>{4y>{8Z z`5gGor+kg^<^C9L^vg^CE${Ka>-Bf_eVY5xe6VAFw8Mpi`Fj|8KKQiT*rkm0er%`r zWu*59Z!S9YeW0V?2j1RsCBKKOuKS19gZtC^Fw%OVf1vdPCR$JMSYNhpsV(!-`okXS zdw{p}eb`Rl3%sZA$NVF^PiFpqv><8El?GNzSFTixlkLw$uP5TM< z{CfX(K!58{J|EbBXq#30-N;D$vkL8XL->1G+P~lv?PnnNH|&A-J0tCXw%>YuM_?HJ z%_ZO+ofqa;wde^nbe_P6YKun$J)Jl3krvf+k^Bev+;;q3jCWUO-c?`G1a?FI2fQbLgm(V&u^~WD z{tA2~{{=MU&%md39oYZF-+{N}|F|FdL)atvN1$!~XfN=d{3r9|Pl18_D|q-@Ap9?Q z_+v)$&)^;TYhcI+aeki2pTlm*zk@gLcZ+Dl{{!8LU(5xM_yEilFTf*y0DborU4e<> z3+&;Ai~0i%#UJp9M`*{3oAv-AUV+aPzksIB_k1oQ#W&ay@4$yX`>X_diihAG#YbSE zc*#7)PqfWRn|ZwuU%{K6(>Ji4;xFvw{uuEYyrp=}{eFFB8SIYYIWSXv2OlWjqirev z0|Vs)%u{{<%#<&HNB#ixluv+1egTX%mhk?|@3eUxc5CMG{s|-7-G_FhdJ#=D%*l z@1H0?gFR5b26U9a!5%4}118Gv*iQK#+kc&O7kEqgAljDlLtv(S5xhC$X338zp9G&M zzXUqUH(`&Ie**pD>RrHF%1?pjo+I0U&y>HSowt5Fi23$Cx`B_B?=s#o?so9Vhk?0i zpKF20mtjwoKLa!6)8Jjz$yYK`z75`Otlb-kd>nixeav>!$Ka8_10&`0Y$tsTvouGJ9(n>8NFOs#`WWbrAKIIF(j!3g)Tk4*f5CK88JB`(7934{X5eA4%VWPo$5*L;tdUQxo=+<^DK) zD(zwq+8_ED;|=L;pld7hLm#7^YRdB~??d_>^ILsi=wtND`|Ix^{SUh-zgK_1@v*;lULs{?Vb^Y~wl+LiA+Prf(wG3;r)?1!>H z{8ZU5W&gw)-|>Db`zz>v3-9Xw)BPBZll=*OjCt*=vVZflvY!*_W4uS%@3HLvQF0zk zIUkGUyqMqR{N!EbJUKmIM$a4cG3K*+9wXJS@LcF)V7frgFZ40@t0w0=mh*mvod4nv zY)A1A(8m}bZV~?heT?x&{fkq71APqMEfIfI{8M;G{8jN^Uj13|Z%O@K#s8^4G{1>| zbm}jS`p=~PwBldY-v;%+k@PX%H>!Vjq>u4l(8s{MulRF+p!)aX?~DI0@nA2B4<%ms z!!>?rJTV$yO1udpB>tqOuN;HtKp*q-c9(eNOZ?J!X1~x$zFYF&e1qh}B|o<9C11{( zKReB*qvqFH^X*XbZ_US{k1;>=F)(WWUh;WY^81qSkJ0>J^?)n-po!>(q906u(Gx{q z1k%SNn_P^oy$RqUt@@R`ehAG3=i7F~(=o$J}4_ zVQrKE}AbSoCkv!{z?C=;cWI znER1F#=L%u=oNXn%k& z^^`Q?|D?X6^_H+e>MxG=8|E$b8NWj6HBRd{c9qm~jMjIspSi!*f4tU%g4Tx|od>ql z`M~py)|1j|sW0I?f%kO2fTbQ)>QhGRRXCrpTdil|{8mvr|4-^&QR`pscBzMz`k3h; z^|Dew3p=Eq27Qe2iTnw$QhzJ;xIt2%3tF%9Tcv&%w4P_QzUQ>w7s#axYqdUG>b2%7t>0=rH|;O=UFc&TSL(l74@UgM z^JA$OYyCKCJ=tn~Ig&oc_)>pPJ)|C8>eEi|kF(aV^9reFTZ-Qphj`9N@g2_#XG=Xi zYJEJvB=z!2{rqmJr$?=?)1SLrj&YUxdrhgwLmy+_{O&8q0L_z9zem2pZ7cQtO1+=* z8_ZMg13cwF=$G`qAeQ@sa-Xoj+%K5@|t>3vMOpFuv#^XdJKr+gLj z<{5IoQ|^1>O>+NJ?t@%sxgRR`MajwiQMpe_yU6{Lc~<+%k533Kxtfo9ctlfku-=ZVy zp|Gp`z3Vim=3MaM+^#%6-Z1oC9{1(ew-`Tojq9i8%x72!<{i^#f{!O2FoErz`}AFi z_WT!zbNjpPF9Fk0J-NSsUB`* z94Ecr9d`T2;_m40A3AUm(5*c;f_FFe*#eADE`0)g`TNJEUF@ZQ8DHir^OxsUp0Dpy z-mkoGdH?cz%I_<`xBULH9$wd{tXEmTvYuYox3b>4{^ff({XUiNRlc8o&+>iC_jdaI zo$iOSKgxco>>u4vWq+0ZR`y@nk7a+B{aW^K+0T{zt^2*~|8gG6`6%b5oS$-@f}XE( z-pcta=dp4=^}JTj@29hG9*aQCF8oWE{Xz>f5Oy&I8pFD#7LR$XuTWnu?&Qy#u9eVP) zOFQ!f@M+QCSdXPakIML?ZeagmTAzI!`unS!UW$J9$IS5}KK>IwKh){F0er4;%q*Vg z<=wZSzpJt3RiL?|I@`njuRaHR7_cjk4^0l>b#aIMHUNAKJ;ZNadn=#kaBBBY!TSw) z9nhZs)t6N~zUBu&H~8^Q;KTPnE&=9AwdS!soHiO5w{Mm2Q1?)o@BCYqgEw6rk27@x zpXYLaoR01IIqYe|zdu|^=)aP`C+K^gF`M~!%+tWU=2-Rz;ibc-qiyERcn=saePi`P zLVM^z-B8JLC~RO@cl8K9j(^bR5Ttu;9?HJg&nvEy1S` zmfrz1Km6Vgd}w$A@AJH%?o;6Xrsaa;ju;QSeRKE-V7_2hAK1;1nfCx~gP|?Jn@ifB z0$wZyxWrzkfFD z;lAa~!Mj76aGdjFr||pI-}B@1;M#omG~2JcBB6imdmGPh23^VLIQH8)9Cq`=qG4=b zQU6w;`*F{n;M45(vR+$fy#PKx^UWLJ?WpUP0>c~f?_|1Dj(8LHG`iLj@OkBVGk|{l zQ4zd9YL`cV=F-_Cz^Bc1c|Nn?oF(Ahz}D}A&%1W`n6dRg)`K@apWFycE9Sh#{4X2$ zxoP)}{9bOvonv5kpRSz9_IUNP+<*7&s~Fo1=W*tvN4CN4H|)v#%k1{cHnz{5mcY9| zZRc~5PaQc5yx%lle)l~W@c#C1Oyl*j&8D;8vNaaSx*px|Ym6`V$Bx<<@Ah86`xpIT zw{K1x28=Wx?3f?zRDa|sw$t;$M|vM1-Vc1B_XXnp+1_?IpUX(UkNKysein$|54)%J zKs)TQk>?MzUd+?_0kNL2yEgw=4~(?l;4Q5`FwQ?`3HY$3?h8Oi--~(rerPB9o@}S@ z3*L>XwSqHQUj0v*Ly@QAmJ6o0{&`{Q&> z*a9B$8ki}5!|o}b1JjpV`F%6Rd$c3Pe_*D3pbBlu4}g9^%@4pk${&D%@(H$6egRCB zZ!k~!2jjuN4FI1hKSA43zQTO>SDyp#D4zj_J-5hrp?n8+L-`N$ln(*p$e)ISN4~`N zwoNZ(d#AIHLpxG_#n}JR&Oqc}u;>5g2|(m$;O(+=rUE17Z_J-Eg8glz{0@9N@n{}z z4*Gj*w$GT&`#iO{k)NLrn!x8c44&~Xw*P$MLX0z%UjmVDf=B)djFgXpPd{zz0L+xH zGEey{+VltZcwIc@x8N=1yFlc>ush0!fyj@+N6MFhp7Lk#p7Lp+BYg}EOK!dpcI4kc z+oA8d;E|sL{jMXf0V02g-HfPnD-iiT+fVCt1<;W`1`j=ewk3TGgg$1zRRdW!(#PN( z>0==D2JEIq=6wo10zNk%5rC2OG3=J~G1{S;{JXo6^f7qXuq%%bq>s5j=^-HW5$w=Q zz(o2Od?Gys%y;!+KVwNBqiz1Z{Ux9yeOyHdeGGJ@kHH7hZ$L}>7(DbH(2za`5B&#> zd%p50_;f`T?*rFo9PjVEWi!90C4CILC4G$k(3?O<`k2R&9tHYYZTWqmSJ_T~5smqs zi@T!@eG5#h&W+%qf7w2A=@a0~{c)Ng?P7Q9q<YreCT65-(D%dKkX&!5p;dh7+J5fe$dDK-erAty`8Rq*6(3RA7kF6 z-z$c0}{ zV_qlH$5mJ_^>@CR_&?}lwjV71F_At-JE;Hk>Q96E*QEZo_}}=u_~YWA{Q&XT#eat; z;?L9N;@^wE4@1QN#}W_rl=zTJyr?em!)iRS8egD~@qYC8o!QUYgCriM5}&S-c;z*I zMU7`AzGaPfUgKZVc$hRkI*pe`<7d)%>NLKlE)s7`{H@#{YkbZcudT*!r}4bRcdzk2 zY5Y&GNIsA>KQJX<(EPz`K9Ng)q4|a-ea!2i`AE|I#I=-srQ|QRndCDizi}HS-zoWz zJ4EuKN`BN>@}-hLm3+$IF8LMoF`nzrm;9^bV`(4B&tl2fF4Fu>^Es#a9rQ8AXU+fY z2a*q#{Lqh)e6f;0YCc)=%c%Kg*8H>Nqn`9J=1rQfh7TowP2WpCYtNJXHj+NBqJBp6 z->CU8^fAU;&6iWjpEaL$S4n>DG~ag1CI7DE<9kbf4t>nOudC$mC7%yFB)>2DzWq}2 z|9HRXfuaxWIMEBx$9T`!TJ%KG7hd%S^fBy?^f3_n7|(U2j~Raw{gO!^qaBOBQN0sK zivEE<=KiXWjOwMR`YEcOa;mSq>Mg7KE2$ofs?U<@HLvTgHS17GyHnJ0R^=y$v??D<2{_xL@S7rzg@r{Bw{^#QB(0$*L~ z2X?2_6a2eUU$9zl!1`hw)*JId9|Q3{z~lP>@x8#~`!TQe4x{xCd~eu;)<;Uc1p5QL zq5T5PrM{x|7DxLFyw!S)*ZPd_FZG%}Q@&>pw~BLD>JW`+KEc zg!6&6)q0ZIqV*-MHw8LhuxG7DnHo}`D)lP!j?}M8Ju5es`c^zk>s?y^N?H#q^)ajU zvY_=d_zS$RA^(Br!Jh!(U%)%9$2qOfC9T(at>2j^q@HKBzUQ^xSL%Q8cbKo#2Mzf{ z%nN-C^rfD7ztk7K)*J05Qh$WM#duHtm-}nI(lwU)WvOSHTcy5P>Yb(jX-|@RXsM3| ziU*jd)KBvUsi&6uYQ9qHt%xspZlHJrEcIDW@d$1Ae^S3qTF*6F-;G-DE%jfo_28iO zVXyUKqxIvY_2j7aW%Gd4oAVB-KS!-cyLwWeF89Y)>(_|a_&w$Vt#50++iCsV_Lh3M znJo2j`yZ{B50d(MqI?0rC!Z$ub>tJ6uhie;byAP_TA%l|q+XvJOZ~o5&+j4i{Zj9b zdr18s`3~myl>hL&dS4L6%Kbs*KB2wbFZlU#-+=rI<6IxPk0|#O<`22AuzG(H7R!A` zx!=eq$$ba(F~(cc$Beb)ekALCiN8wjPs)8t*83Gp`k0@q_b*QGW4zwaARlEr<)^?v z`RYQvC*`l)*885Q_diDOgR(zGy&6jPi25%m{_)fIbuP?IR z4r}W2IN$nD(_zooO`HyU+^YublyvEI=B@83*!TT0u!qsV4F%>Ce&#xX4-<+c&;=GVJ*X`Ts2Q6NmTa z{&im9d>|hF>NJdtn;%*a42Qk(2JFGL+6;69maso}$IoHA{dn^8%rD!Q^9^&`_6IS} z+*s#9@cGX5uL0wbbHdkYgLm6zeg!mB`o0d{oplJ?-Tn2RX8S(!?^oyD z2X}!z-&>Qvn|uBAMX;x%KIsGAZyK}(m@e$q0DQSWF71k4`H1XGOV_WgXIbB}-evvE_b~c>%J(YYukt0l=b@aBa$YLuN6%9^U*)`& z^H})9Fyy{q`p=<8d8(a9&~8cHsI<$ny6R&863|ABd02ze|yx|N04x za~+n<;&C;n^a6(4BrnMe9%})+IcrI0@bQg~>_6RsohE>{8;<3?&OCVHT-eRAW8McJ z)^wW-OmBVhCV2PF$fp^1{f*CYy0=wV*z=-Z{lVL#H_Sjg{eH3`N`+oD#4vXJne-Jm?jokl- zwj3YRv?*V}?ti&|6ZdP>_hZ=I(P0@d_4t{8N4%=j7}%3t&3Tc(avz@8PLh8=E5E-l z&zrvbkwFHoU)C?9zkBYm&w+XN|K8_ueJG28dZa4k^Uq`k5gze`vS_(eixRm?3T0cz$pLZY1>tN?}XFcnGoih}C z3XOO@++f4;!0dO|YK%{<|6ty4k$-0-EjsdL@Km>($M&&924H;b_}L&}KI_MB;O&lT z9FN? zpB=xE<7Yae``u{AcD?%o(}*S`(9b`A`FLP{_$odxZu;Euu;fp+fIUBLTiNA~0P$@@PIbZ5WwFYtNc8s7KmzG@P`j;k%}-)GTO*iD_OeBR=n z^Z9*Zt@eE0Qmx4YU{AOIbtLACr{uXnvwuB4zy89Ot6;aE^j!=-f6;vv&<}ZYHu$)) zqnx)fYledlM@2qw{=pZX$%2 z@uwN!)2#YyfN}JxyifCg=T3q>ye0JtyYdZL_XSJWg3qm%_eOu)-~!ge<^C9L*xmKZ zd-HhekAC^1H#!3`A9&1 zE&YD*SP!)G;F^4%9jzDmwC>dTKuhZhd!+RRV!grVZ?|$D6X<(@cc*R{2ekCPV2||u z(Dq~33}>Y83qB8ill^d_?+@P5eqcN857@C^fPwZ8_|W06BY~Or7x+Z`4Q=0PeqUg+ zQ+fZIzKf=^yh^0+4kCLK=>Qx$^W2juRY*qwv&GX9~aE#a|8bcK8<*h zzc>6F+iT5x3<&=Rd;GH6oj~|U@QM5-+G*SnKF{!{%%>w?2EyOMZrc1I>rDO_d?5b} z%;c|OPdn%EKFzbcv;R%x-@)5Mewqk`|409LcB7@hMDc;`6fb~?AK(MU6CmOX_(1W7 zk>U^d{B_I7?Ur??10%&N@R8yd&{I4EkN5_36z`a)_y>#>55WhDk8G!S3EolsMBC@b z=K>AISMcflh99$?;xG8h{V~O7@Mi748-bqUH|&n$IWSXvXFJ7vv?IlTV4!>eyr=vC zm?&QWpDBL;dder5r~HDE@(u8o@(;#~ZyyCdP=11Tqf4&wJle$VzctM3N- zZEdc`IM@GTK7WDqG5AdSn0eC2z(o3(dD6$gK>C>Nq>q7y^a%Jw`WR?Pudw~$5f7jZ zJp*(*`aKICdIxAoAA^S;0_KUW5_n7c7#K-Efe)mQf$4Dh|JMD;!+WC*{ROl~ygIFl z5c&*gNgsocq>q7y^f7qoV_^7b^7G)K|A6t9?GJ*FH`RF%?L_(*Xdhem9C+wS#``9+ zKZ8Ezev|vMKZ8Do-FK_^G-bnpx+nz80(OzJ_3xoAH435p!>z@ z{we#(_tO1!qU^V_|56{>kGY-fPup1bYtsE|u9y7`eaz3*{a)GsR4>B&da576ZuPuG zJwJ)`G5AdNCq}AAp?}u%=k+{#J)c3(t2so@uUjhTxt#C#zMS`R{^NP#4~l^CjO`RBlCgyr{b^D#p1u9kMX|YIq`4B-#PVv(8v6H=Bs}^L;R&z z{}~Pze_HXc>TiqxjjP2ULmy*(g8FNx{@as2#{6c9`1j)PL-GF_4`PWA8ZWHI52x`Y zYJ71TZ(?0_)zI$h&8>0|uT2mHca8$;V7v$*7Z^LbbDd(y`k=QaNiMGvSxh^iO7>W4)781G|M zUu4xAk@PWm=wn9JCqea!Jw)`2?0{3Mi=(dT)B==GxCv+8-gKfiZN(fiQH7#EI`dI0n>?4@2121xxNXgwjXmHGnK z6XUSHJigQ;NFQT-)OtmzFZBzj^$dJJ^pB&Z-eERL{UeY*#<)@+!G7R*wSJQJlX^<2 zuUOK@up8Q680WPfW3)cwwO*67euMpr{@A~aq>nKU`WR@m9+b5{g!2I2Y5mA)JqhQB z$7#I@=c@|+=)3`=)~BG4xu4dr44qflael!=A9MRlt$%4fEN_?kSgDuUHc~$;^|bh* z)YsroFy4JC^|w-wOQk+{wbbiM{my7TuhjS4<5KUd)c+2adLaBC&rALgSn7x74yh-) zF;ZVF^+x#Dg?JCGN5)oCpDguCQ|g!GuX#T5-$0}FPD}nB?QpQvLzC7=5f3mg;scMX zCiPUS_0?QQ>a9WRuZTD37qvd?DITHiwSH^*N%F$rf3+TLwLXk^i1AMA z$B360m$km^wBGEr{)~7FyVv@3xj#mH2A{N^9ksq~wBC*Q4!fgxUqxshkotJidO7j~ z*v(F>+gp04U9+r0YpB+_L@?^N4^5yw~+ci@)_8z*8j_WfUl+Z1A1S8 z{0QTaF99>@V~lfp-(dCr!G0_E5#@d&>3v1Hzwi&qeMY(8Kt6~0Lb?C=v)qRy%KwQN!?|1BOa^F+#f8rLo b4=VRV?gqIpD)&cjp4=yu`z1S-?wkG}L1=NJ literal 0 HcmV?d00001 diff --git a/thesis code/network analysis/orientation_tuning/orientation_tuning_curve.py b/thesis code/network analysis/orientation_tuning/orientation_tuning_curve.py new file mode 100644 index 0000000..7faf2e8 --- /dev/null +++ b/thesis code/network analysis/orientation_tuning/orientation_tuning_curve.py @@ -0,0 +1,244 @@ +import torch +import matplotlib.pyplot as plt +import numpy as np +import matplotlib as mpl + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" + +import os +import sys + +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(parent_dir) +from functions.make_cnn import make_cnn # noqa + + +def plot_single_tuning_curve(mean_syn_input, mean_relu_response, theta): + plt.figure() + plt.plot(theta, mean_syn_input, label="Before ReLU") + plt.plot(theta, mean_relu_response, label="After ReLU") + plt.xlabel("orientation (degs)") + plt.ylabel("activity") + plt.legend() + plt.grid(True) + plt.gca().set_xticks(theta) + + plt.show() + + +def plot_single_phase_orientation(syn_input, relu_response, theta, phi, j): + plt.figure() + plt.subplot(1, 2, 1, aspect="equal") + plt.imshow( + syn_input.T, # type: ignore + cmap="viridis", + aspect="auto", + extent=[theta[0], theta[-1], phi[0], phi[-1]], + ) + plt.xlabel("orientation (degs)") + plt.ylabel("phase (degs)") + plt.colorbar(label="activity") + plt.title(f"Weight {j}", fontsize=16) + + plt.subplot(1, 2, 2, aspect="equal") + plt.imshow( + relu_response.T, # type: ignore + cmap="viridis", + aspect="auto", + extent=[theta[0], theta[-1], phi[0], phi[-1]], + ) + plt.xlabel("orientation (degs)") + plt.ylabel("phase (degs)") + plt.colorbar(label="activity") + plt.title(f"Weight {j}", fontsize=16) + + plt.show() + + +def plot_all_tuning_curves(response_array, theta, orientations=32, phases=4): + # plot tuning curves + plt.figure(figsize=(12, 15)) + for i in range(response_array.shape[0]): + # synaptic input + in_neuron = response_array[i].reshape(orientations, phases) + mean_syn_in = in_neuron.mean(axis=1) + + # after non linearity + out_relu = torch.nn.functional.leaky_relu(torch.tensor(response_array[i])) + out_relu = out_relu.numpy().reshape(orientations, phases) + mean_out_relu = out_relu.mean(axis=1) # type: ignore + + plt.subplot(8, 4, i + 1) + plt.plot(theta, mean_syn_in) + plt.plot(theta, mean_out_relu) + plt.xlabel("Theta (degs)") + plt.ylabel("Activity") + + plt.tight_layout() + plt.show() + + +def calculate_responses(weights, plot_single_responses): + # load Gabor filter + orientations = 32 + phases = 4 + filename: str = "gabor_dict_32o_4p.npy" + filepath: str = os.path.join( + "D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/investigate", + filename, + ) + gabor_dict = np.load(filepath) + + # collect data + all_responses: list = [] + after_relu = np.zeros((weights.shape[0], orientations)) + for j in range(weights.shape[0]): + w0 = weights[j, 0].detach().cpu() # .numpy() + + response: list = [] + for i in range(gabor_dict.shape[0]): + gabor = gabor_dict[i, 0] + if w0.shape[0] != gabor.shape[0]: + # TODO: for later layers + # get number to pad + pad = (gabor.shape[0] - w0.shape[0]) // 2 + + # pad: + w_pad = torch.nn.functional.pad( + w0, (pad, pad, pad, pad), mode="constant", value=0 + ) + w_pad = w_pad.numpy() + + else: + w_pad = w0.numpy() + + dot = np.sum(gabor * w_pad) + response.append(dot) + + # axis for plotting: + theta = np.rad2deg(np.arange(orientations) * np.pi / orientations) + phi = np.rad2deg(np.arange(phases) * 2 * np.pi / phases) + + # to array + mean + syn_input = np.array(response) + syn_input = syn_input.reshape(orientations, phases) + mean_response_orient = syn_input.mean(axis=1) + + # leaky relu: + relu_response = torch.nn.functional.leaky_relu( + torch.tensor(response), negative_slope=0.1 + ) + relu_response = relu_response.numpy().reshape(orientations, phases) + mean_relu_orient = relu_response.mean(axis=1) # type: ignore + + # append to save: + after_relu[j] = mean_relu_orient + + # plot 2D: + if plot_single_responses: + plot_single_phase_orientation( + syn_input=syn_input, + relu_response=relu_response, + theta=theta, + phi=phi, + j=j, + ) + + # plot tuning curve + plot_single_tuning_curve( + mean_syn_input=mean_response_orient, + mean_relu_response=mean_relu_orient, + theta=theta, + ) + + # collect response for each weight + all_responses.append(response) + + # to array: + response_array = np.array(all_responses) + + return response_array, after_relu, theta + + +def plot_mean_resp_after_relu(mean_response, theta): + # plot tuning curves + plt.figure(figsize=(12, 15)) + for i in range(mean_response.shape[0]): + plt.subplot(8, 4, i + 1) + plt.plot(theta, mean_response[i]) + plt.xlabel("Theta (degs)") + plt.ylabel("Activity") + + plt.tight_layout() + plt.show() + + +def load_data_from_cnn( + cnn_name: str, + plot_responses: bool, + do_stats: bool, + plot_single_responses: bool = False, +): + # path to NN + + if do_stats: + PATH = cnn_name + else: + PATH = f"D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/trained_models/{cnn_name}" + + # load and evaluate model + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = torch.load(PATH).to(device) + + # Set the model to evaluation mode + model.eval() + print(model) + + # load NNs conv1 weights: + weights = model[0]._parameters["weight"].data + + # call + response_array, mean_response_after_relu, theta = calculate_responses( + weights=weights, plot_single_responses=plot_single_responses + ) + + # plot + if plot_responses: + plot_all_tuning_curves(response_array=response_array, theta=theta) + plot_mean_resp_after_relu(mean_response=mean_response_after_relu, theta=theta) + + return np.array(mean_response_after_relu) + + +if __name__ == "__main__": + # path to NN + nn = "ArghCNN_numConvLayers3_outChannels[32, 8, 8]_kernelSize[7, 15]_leaky relu_stride1_trainFirstConvLayerTrue_seed291853_Natural_314Epoch_0908-1206.pt" + _ = load_data_from_cnn(cnn_name=nn, plot_responses=True, do_stats=False) + exit() + + PATH = f"D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/trained_models/{nn}" + + # load and evaluate model + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model = torch.load(PATH).to(device) + + # Set the model to evaluation mode + model.eval() + print(model) + + # load NNs conv1 weights: + weights = model[0]._parameters["weight"].data + + # plot? + plot_single_responses: bool = False + + # call + response_array, mean_response_after_relu, theta = calculate_responses( + weights=weights, plot_single_responses=plot_single_responses + ) + + # plot + plot_all_tuning_curves(response_array=response_array, theta=theta) + plot_mean_resp_after_relu(mean_response=mean_response_after_relu, theta=theta) + print() diff --git a/thesis code/network analysis/orientation_tuning/plot_fit_statistics.py b/thesis code/network analysis/orientation_tuning/plot_fit_statistics.py new file mode 100644 index 0000000..0865697 --- /dev/null +++ b/thesis code/network analysis/orientation_tuning/plot_fit_statistics.py @@ -0,0 +1,272 @@ +import matplotlib.pyplot as plt +import numpy as np +import warnings +import matplotlib as mpl + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" +mpl.rcParams["font.size"] = 15 + +# suppress warnings +warnings.filterwarnings("ignore") + + +def autolabel(rects, ax): + for rect in rects: + height = rect.get_height() + ax.annotate( + f"{height:.2f}", + xy=(rect.get_x() + rect.get_width() / 2, height), + xytext=(-10, 3), + textcoords="offset points", + ha="right", + va="bottom", + fontsize=17, + ) + + +def plot_mean_percentile_amplit_ratio( + ratio_classic_21: float, + ratio_corner_21: float, + ratio_classic_321: float, + ratio_corner_321: float, + ratio_classic_332: float, + ratio_corner_332: float, + percentile_classic21: tuple, + percentile_classic321: tuple, + percentile_classic_332: tuple, + percentile_corner_21: tuple, + percentile_corner_321: tuple, + percentile_corner_332: tuple, + save_name: str, + saveplot: bool, +): + num_von_mises = [2, 3, 3] # X-axis ticks + + # bar setup + bar_width = 0.35 + index = np.arange(len(num_von_mises)) + + # position error bars correctly: + lower_err_classic = [ + ratio_classic_21 - percentile_classic21[0], + ratio_classic_332 - percentile_classic_332[0], + ratio_classic_321 - percentile_classic321[0], + ] + upper_err_classic = [ + percentile_classic21[1] - ratio_classic_21, + percentile_classic_332[1] - ratio_classic_332, + percentile_classic321[1] - ratio_classic_321, + ] + + lower_err_corner = [ + ratio_corner_21 - percentile_corner_21[0], + ratio_corner_332 - percentile_corner_332[0], + ratio_corner_321 - percentile_corner_321[0], + ] + upper_err_corner = [ + percentile_corner_21[1] - ratio_corner_21, + percentile_corner_332[1] - ratio_corner_332, + percentile_corner_321[1] - ratio_corner_321, + ] + + yerr_classic = [lower_err_classic, upper_err_classic] + yerr_corner = [lower_err_corner, upper_err_corner] + + # subplots + fig, ax = plt.subplots(figsize=(7, 7)) + bars_classic = ax.bar( + index - bar_width / 2, + [ratio_classic_21, ratio_classic_332, ratio_classic_321], + bar_width, + yerr=yerr_classic, + capsize=5, + label="Classic", + color="cornflowerblue", + ) + bars_corner = ax.bar( + index + bar_width / 2, + [ratio_corner_21, ratio_corner_332, ratio_corner_321], + bar_width, + yerr=yerr_corner, + capsize=5, + label="Corner", + color="coral", + ) + + autolabel(bars_classic, ax) + autolabel(bars_corner, ax) + + ax.set_ylabel("Median ratio of amplitudes", fontsize=18) + ax.set_xticks(index) + ax.set_xticklabels( + [ + "2 von Mises \n(min/max)", + "3 von Mises \n(mid/max)", + "3 von Mises\n(min/mid)", + ], + fontsize=17, + ) + ax.legend(fontsize=17) + ax.set_ylim(bottom=0.0) + + # plot + plt.yticks(fontsize=17) + plt.tight_layout() + if saveplot: + plt.savefig( + f"additional thesis plots/saved_plots/fitkarotte/median_quartiles_ampli_ratio_{save_name}_corn_class.pdf", + dpi=300, + bbox_inches="tight", + ) + plt.show(block=True) + + +def plot_means_std_corner_classic( + means_classic: list, + means_corner: list, + std_classic: list, + std_corner: list, + saveplot: bool, + save_name: str, +): + num_von_mises = [1, 2, 3] # X-axis ticks + + # bar setup + bar_width = 0.35 + index = np.arange(len(num_von_mises)) + + # subplots + fig, ax = plt.subplots(figsize=(7, 7)) + bars_classic = ax.bar( + index - bar_width / 2, + means_classic, + bar_width, + yerr=std_classic, + capsize=5, + label="Classic", + color="cornflowerblue", + ) + bars_corner = ax.bar( + index + bar_width / 2, + means_corner, + bar_width, + yerr=std_corner, + capsize=5, + label="Corner", + color="coral", + ) + + autolabel(bars_classic, ax) + autolabel(bars_corner, ax) + + ax.set_ylabel("Average number of fits", fontsize=17) + ax.set_xticks(index) + ax.set_xticklabels(["1 von Mises", "2 von Mises", "3 von Mises"], fontsize=17) + ax.legend(fontsize=16) + ax.set_ylim(bottom=0.0) + + # plot + plt.yticks(fontsize=17) + plt.tight_layout() + if saveplot: + plt.savefig( + f"additional thesis plots/saved_plots/fitkarotte/y_lim_mean_fits_{save_name}_corn_class.pdf", + dpi=300, + ) + plt.show(block=True) + + +def plot_mean_std_amplit_ratio( + ratio_classic_21: float, + std_class_21: float, + ratio_corner_21: float, + std_corn_21: float, + ratio_classic_321: float, + std_class_321: float, + ratio_corner_321: float, + std_corn_321: float, + ratio_classic_332: float, + std_class_332: float, + ratio_corner_332: float, + std_corn_332: float, + save_name: str, + saveplot: bool, +): + num_von_mises = [2, 3, 3] # X-axis ticks + + # bar setup + bar_width = 0.35 + index = np.arange(len(num_von_mises)) + + # subplots + fig, ax = plt.subplots(figsize=(12, 7)) + bars_classic = ax.bar( + index - bar_width / 2, + [ratio_classic_21, ratio_classic_332, ratio_classic_321], + bar_width, + yerr=[std_class_21, std_class_332, std_class_321], + capsize=5, + label="Classic", + color="cornflowerblue", + ) + bars_corner = ax.bar( + index + bar_width / 2, + [ratio_corner_21, ratio_corner_332, ratio_corner_321], + bar_width, + yerr=[std_corn_21, std_corn_332, std_corn_321], + capsize=5, + label="Corner", + color="coral", + ) + + autolabel(bars_classic, ax) + autolabel(bars_corner, ax) + + ax.set_ylabel("Mean ratio of amplitudes", fontsize=17) + ax.set_xticks(index) + ax.set_xticklabels( + [ + "2 von Mises \n(max/min)", + "3 von Mises \n(max/mid)", + "3 von Mises\n(mid/min)", + ], + fontsize=17, + ) + ax.legend(fontsize=16) + ax.set_ylim(bottom=0.0) + + # plot + plt.yticks(fontsize=17) + plt.tight_layout() + if saveplot: + plt.savefig( + f"additional thesis plots/saved_plots/fitkarotte/y_lim_mean_std_ampli_ratio_{save_name}_corn_class.pdf", + dpi=300, + bbox_inches="tight", + ) + plt.show(block=True) + + +def plot_hist(fits_per_mises, num_mises: int): + """ + Plot to see if data has normal distribution + """ + # get correct x-ticks + x_ticks = np.arange(start=min(fits_per_mises), stop=max(fits_per_mises) + 1, step=1) + + # plot + plt.hist( + fits_per_mises, + # bins=bins, + alpha=0.5, + label=f"{num_mises} von Mises function", + align="mid", + ) + plt.xlabel(f"Number of weights fitted with {num_mises} ") + plt.ylabel(f"Frequency of fit with {num_mises} for 20 CNNs") + plt.title(f"Histogram of Fits with {num_mises} von Mises Function") + plt.xticks(x_ticks) + plt.legend() + plt.tight_layout() + plt.show(block=True) diff --git a/thesis code/network analysis/psychometric_curves/README.txt b/thesis code/network analysis/psychometric_curves/README.txt new file mode 100644 index 0000000..1c47aec --- /dev/null +++ b/thesis code/network analysis/psychometric_curves/README.txt @@ -0,0 +1,4 @@ +Folder psychometric_curves: + +1. error_bar_performance_pfinkel: +* caculates the average performance of all 20 CNNs across all stimulus conditions and path angles within one stimulus condition \ No newline at end of file diff --git a/thesis code/network analysis/psychometric_curves/error_bar_performance_pfinkel.py b/thesis code/network analysis/psychometric_curves/error_bar_performance_pfinkel.py new file mode 100644 index 0000000..21825c6 --- /dev/null +++ b/thesis code/network analysis/psychometric_curves/error_bar_performance_pfinkel.py @@ -0,0 +1,223 @@ +import torch +import numpy as np +import matplotlib.pyplot as plt +import matplotlib as mpl +import os +import datetime + +# import re +# import glob +# from natsort import natsorted + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" + +from functions.alicorn_data_loader import alicorn_data_loader +from functions.create_logger import create_logger + + +def performance_pfinkel_plot( + performances_list: list[dict], + all_performances: dict, + labels: list[str], + save_name: str, + logger, +) -> None: + figure_path: str = "rerun_errorbar_performance_pfinkel" + os.makedirs(figure_path, exist_ok=True) + + plt.figure(figsize=[10, 10]) + with open(f"./{figure_path}/performances_{save_name}_{current}.txt", "w") as f: + for id, selected_condition in enumerate(condition): + f.write( + f"Condition:{selected_condition} Path angle (in °), Mean accuracy (\\%), Standard deviation (\\%)\n" + ) + + x_values = np.array(num_pfinkel) + y_values = np.array( + [ + np.mean(all_performances[selected_condition][pfinkel]) + for pfinkel in num_pfinkel + ] + ) + yerr_values = np.array( + [ + np.std(all_performances[selected_condition][pfinkel]) + for pfinkel in num_pfinkel + ] + ) + + for x, y, yerr in zip(x_values, y_values, yerr_values): + f.write(f"{x}, {y/100.0:.3f}, {yerr/100.0:.3f}\n") + f.write(f"{x}, {y}, {yerr}\n") + + plt.errorbar( + x_values, + y_values / 100.0, + yerr=yerr_values / 100.0, + fmt="o", + capsize=5, + label=labels[id], + ) + plt.xticks(x_values) + plt.title("Average accuracy", fontsize=19) + plt.xlabel("Path angle (in °)", fontsize=18) + plt.ylabel("Accuracy (\\%)", fontsize=18) + plt.ylim(0.5, 1.0) + plt.legend(fontsize=15) + + # Increase tick label font size + plt.xticks(fontsize=17) + plt.yticks(fontsize=17) + plt.grid(True) + plt.tight_layout() + logger.info("") + logger.info("Saved in:") + + print( + os.path.join( + figure_path, + f"ylim_ErrorBarPerformancePfinkel_{save_name}_{current}.pdf", + ) + ) + plt.savefig( + os.path.join( + figure_path, + f"ylim_ErrorBarPerformancePfinkel_{save_name}_{current}.pdf", + ), + dpi=300, + bbox_inches="tight", + ) + plt.show() + + +if __name__ == "__main__": + model_path: str = "classic_3288_fest" + print(model_path) + data_path: str = "/home/kk/Documents/Semester4/code/RenderStimuli/Output/" + + # num stimuli per Pfinkel and batch size + stim_per_pfinkel: int = 10000 + batch_size: int = 1000 + # stimulus condition: + performances_list: list = [] + + condition: list[str] = ["Coignless", "Natural", "Angular"] + figure_label: list[str] = ["Classic", "Corner", "Bridge"] + # load test data: + num_pfinkel: list = np.arange(0, 100, 10).tolist() + image_scale: float = 255.0 + + # ------------------------------------------ + + # create logger: + logger = create_logger( + save_logging_messages=False, + display_logging_messages=True, + model_name=model_path, + ) + + device_str: str = "cuda:0" if torch.cuda.is_available() else "cpu" + logger.info(f"Using {device_str} device") + device: torch.device = torch.device(device_str) + torch.set_default_dtype(torch.float32) + + # current time: + current = datetime.datetime.now().strftime("%d%m-%H%M") + + all_performances: dict = { + condition_name: {pfinkel: [] for pfinkel in num_pfinkel} + for condition_name in condition + } + + for filename in os.listdir(model_path): + if filename.endswith(".pt"): + model_filename = os.path.join(model_path, filename) + model = torch.load(model_filename, map_location=device) + model.eval() + print(model_filename) + + for selected_condition in condition: + # save performances: + logger.info(f"Condition: {selected_condition}") + performances: dict = {} + for pfinkel in num_pfinkel: + test_loss: float = 0.0 + correct: int = 0 + pattern_count: int = 0 + + data_test = alicorn_data_loader( + num_pfinkel=[pfinkel], + load_stimuli_per_pfinkel=stim_per_pfinkel, + condition=selected_condition, + logger=logger, + data_path=data_path, + ) + loader = torch.utils.data.DataLoader( + data_test, shuffle=False, batch_size=batch_size + ) + + # start testing network on new stimuli: + logger.info("") + logger.info( + f"-==- Start {selected_condition} " f"Pfinkel {pfinkel}° -==-" + ) + with torch.no_grad(): + for batch_num, data in enumerate(loader): + label = data[0].to(device) + image = data[1].type(dtype=torch.float32).to(device) + image /= image_scale + + # compute prediction error; + output = model(image) + + # Label Typecast: + label = label.to(device) + + # loss and optimization + loss = torch.nn.functional.cross_entropy( + output, label, reduction="sum" + ) + pattern_count += int(label.shape[0]) + test_loss += float(loss) + prediction = output.argmax(dim=1) + correct += prediction.eq(label).sum().item() + + total_number_of_pattern: int = int(len(loader)) * int( + label.shape[0] + ) + + # logging: + logger.info( + ( + f"{selected_condition},{pfinkel}° " + "Pfinkel: " + f"[{int(pattern_count)}/{total_number_of_pattern} ({100.0 * pattern_count / total_number_of_pattern:.2f}%)]," + f" Average loss: {test_loss / pattern_count:.3e}, " + "Accuracy: " + f"{100.0 * correct / pattern_count:.2f}% " + ) + ) + + performances[pfinkel] = { + "pfinkel": pfinkel, + "test_accuracy": 100 * correct / pattern_count, + "test_losses": float(loss) / pattern_count, + } + all_performances[selected_condition][pfinkel].append( + 100 * correct / pattern_count + ) + + performances_list.append(performances) + else: + print("No files found!") + break + + performance_pfinkel_plot( + performances_list=performances_list, + all_performances=all_performances, + labels=figure_label, + save_name=model_path, + logger=logger, + ) + logger.info("-==- DONE -==-") diff --git a/thesis code/network analysis/render_including_minDist/contours.py b/thesis code/network analysis/render_including_minDist/contours.py new file mode 100644 index 0000000..7083c56 --- /dev/null +++ b/thesis code/network analysis/render_including_minDist/contours.py @@ -0,0 +1,603 @@ +# %% +# +# contours.py +# +# Tools for contour integration studies +# +# Version 1.0, 24.03.2023 +# + +# +# Coordinate system assumptions: +# +# for arrays: +# [..., HEIGHT, WIDTH], origin is on TOP LEFT +# HEIGHT indices *decrease* with increasing y-coordinates (reversed) +# WIDTH indices *increase* with increasing x-coordinates (normal) +# +# Orientations: +# 0 is horizontal, orientation *increase* counter-clockwise +# Corner elements, quantified by [dir_source, dir_change]: +# - consist of two legs +# - contour *enters* corner from *source direction* at one leg +# and goes from border to its center... +# - contour path changes by *direction change* and goes +# from center to the border +# + +import torch +import time +import matplotlib.pyplot as plt +import math +import scipy.io +import numpy as np +import os + +torch_device = "cuda" +default_dtype = torch.float32 +torch.set_default_dtype(default_dtype) +torch.device(torch_device) + + +# +# performs a coordinate transform (rotation with phi around origin) +# rotation is performed CLOCKWISE with increasing phi +# +# remark: rotating a mesh grid by phi and orienting an image element +# along the new x-axis is EQUIVALENT to rotating the image element +# by -phi (so this realizes a rotation COUNTER-CLOCKWISE with +# increasing phi) +# +def rotate_CW(x: torch.Tensor, y: torch.Tensor, phi: torch.float32): # type: ignore + xr = +x * torch.cos(phi) + y * torch.sin(phi) + yr = -x * torch.sin(phi) + y * torch.cos(phi) + + return xr, yr + + +# +# renders a Gabor with (or without) corner +# +def gaborner( + r_gab: int, # radius, size will be 2*r_gab+1 + dir_source: float, # contour enters in this dir + dir_change: float, # contour turns around by this dir + lambdah: float, # wavelength of Gabor + sigma: float, # half-width of Gabor + phase: float, # phase of Gabor + normalize: bool, # normalize patch to zero + torch_device: str, # GPU or CPU... +) -> torch.Tensor: + # incoming dir: change to outgoing dir + dir1 = dir_source + torch.pi + nook = dir_change - torch.pi + + # create coordinate grids + d_gab = 2 * r_gab + 1 + x = -r_gab + torch.arange(d_gab, device=torch_device) + yg, xg = torch.meshgrid(x, x, indexing="ij") + + # put into tensor for performing vectorized scalar products + xyg = torch.zeros([d_gab, d_gab, 1, 2], device=torch_device) + xyg[:, :, 0, 0] = xg + xyg[:, :, 0, 1] = yg + + # create Gaussian hull + gauss = torch.exp(-(xg**2 + yg**2) / 2 / sigma**2) + gabor_corner = gauss.clone() + + if (dir_change == 0) or (dir_change == torch.pi): + # handle special case of straight Gabor or change by 180 deg + + # vector orth to Gabor axis + ev1_orth = torch.tensor( + [math.cos(-dir1 + math.pi / 2), math.sin(-dir1 + math.pi / 2)], + device=torch_device, + ) + # project coords to orth vector to get distance + legs = torch.cos( + 2 + * torch.pi + * torch.matmul(xyg, ev1_orth.unsqueeze(1).unsqueeze(0).unsqueeze(0)) + / lambdah + + phase + ) + gabor_corner *= legs[:, :, 0, 0] + + else: + dir2 = dir1 + nook + + # compute separation line between corner's legs + ev1 = torch.tensor([math.cos(-dir1), math.sin(-dir1)], device=torch_device) + ev2 = torch.tensor([math.cos(-dir2), math.sin(-dir2)], device=torch_device) + v_towards_1 = (ev1 - ev2).unsqueeze(1).unsqueeze(0).unsqueeze(0) + + # which coords belong to which leg? + which_side = torch.matmul(xyg, v_towards_1)[:, :, 0, 0] + towards_1y, towards_1x = torch.where(which_side > 0) + towards_2y, towards_2x = torch.where(which_side <= 0) + + # compute orth distance to legs + side_sign = -1 + 2 * ((dir_change % 2 * torch.pi) > torch.pi) + ev12 = ev1 + ev2 + v1_orth = ev12 - ev1 * torch.matmul(ev12, ev1) + v2_orth = ev12 - ev2 * torch.matmul(ev12, ev2) + ev1_orth = side_sign * v1_orth / torch.sqrt((v1_orth**2).sum()) + ev2_orth = side_sign * v2_orth / torch.sqrt((v2_orth**2).sum()) + + leg1 = torch.cos( + 2 + * torch.pi + * torch.matmul(xyg, ev1_orth.unsqueeze(1).unsqueeze(0).unsqueeze(0)) + / lambdah + + phase + ) + leg2 = torch.cos( + 2 + * torch.pi + * torch.matmul(xyg, ev2_orth.unsqueeze(1).unsqueeze(0).unsqueeze(0)) + / lambdah + + phase + ) + gabor_corner[towards_1y, towards_1x] *= leg1[towards_1y, towards_1x, 0, 0] + gabor_corner[towards_2y, towards_2x] *= leg2[towards_2y, towards_2x, 0, 0] + + # depending on phase, Gabor might not be normalized... + if normalize: + s = gabor_corner.sum() + s0 = gauss.sum() + gabor_corner -= s / s0 * gauss + + return gabor_corner + + +# +# creates a filter bank of Gabor corners +# +# outputs: +# filters: [n_source, n_change, HEIGHT, WIDTH] +# dirs_source: [n_source] +# dirs_change: [n_change] +# +def gaborner_filterbank( + r_gab: int, # radius, size will be 2*r_gab+1 + n_source: int, # number of source orientations + n_change: int, # number of direction changes + lambdah: float, # wavelength of Gabor + sigma: float, # half-width of Gabor + phase: float, # phase of Gabor + normalize: bool, # normalize patch to zero + torch_device: str, # GPU or CPU... +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + kernels = torch.zeros( + [n_source, n_change, 2 * r_gab + 1, 2 * r_gab + 1], + device=torch_device, + requires_grad=False, + ) + dirs_source = 2 * torch.pi * torch.arange(n_source, device=torch_device) / n_source + dirs_change = 2 * torch.pi * torch.arange(n_change, device=torch_device) / n_change + + for i_source in range(n_source): + for i_change in range(n_change): + gabor_corner = gaborner( + r_gab=r_gab, + dir_source=dirs_source[i_source], # type: ignore + dir_change=dirs_change[i_change], # type: ignore + lambdah=lambdah, + sigma=sigma, + phase=phase, + normalize=normalize, + torch_device=torch_device, + ) + kernels[i_source, i_change] = gabor_corner + + # check = torch.isnan(gabor_corner).sum() + # if check > 0: + # print(i_source, i_change, check) + # kernels[i_source, i_change] = 1 + + return kernels, dirs_source, dirs_change + + +def discretize_stimuli( + posori, + x_range: tuple, + y_range: tuple, + scale_factor: float, + r_gab_PIX: int, + n_source: int, + n_change: int, + torch_device: str, +) -> torch.Tensor: + # check correct input size + s = posori.shape + assert len(s) == 2, "posori should be NDARRAY with N x 1 entries" + assert s[1] == 1, "posori should be NDARRAY with N x 1 entries" + + # determine size of (extended) canvas + x_canvas_PIX = torch.tensor( + (x_range[1] - x_range[0]) * scale_factor, device=torch_device + ).ceil() + y_canvas_PIX = torch.tensor( + (y_range[1] - y_range[0]) * scale_factor, device=torch_device + ).ceil() + x_canvas_ext_PIX = int(x_canvas_PIX + 2 * r_gab_PIX) + y_canvas_ext_PIX = int(y_canvas_PIX + 2 * r_gab_PIX) + + # get number of contours + n_contours = s[0] + index_srcchg = [] + index_y = [] + index_x = [] + for i_contour in range(n_contours): + x_y_src_chg = torch.asarray(posori[i_contour, 0][1:, :].copy()) + x_y_src_chg[2] += torch.pi + + # if i_contour == 0: + # print(x_y_src_chg[2][:3]) + + # compute integer coordinates and find all visible elements + x = ((x_y_src_chg[0] - x_range[0]) * scale_factor + r_gab_PIX).type(torch.long) + y = y_canvas_ext_PIX - ( + (x_y_src_chg[1] - y_range[0]) * scale_factor + r_gab_PIX + ).type(torch.long) + i_visible = torch.where( + (x >= 0) * (y >= 0) * (x < x_canvas_ext_PIX) * (y < y_canvas_ext_PIX) + )[0] + + # compute integer (changes of) directions + i_source = ( + ((((x_y_src_chg[2]) / (2 * torch.pi)) + 1 / (2 * n_source)) % 1) * n_source + ).type(torch.long) + i_change = ( + (((x_y_src_chg[3] / (2 * torch.pi)) + 1 / (2 * n_change)) % 1) * n_change + ).type(torch.long) + + # stimulus = torch.zeros( + # (n_source, n_change, y_canvas_ext_PIX, x_canvas_ext_PIX), device=torch_device + # ) + # stimulus[i_source[i_visible], i_change[i_visible], y[i_visible], x[i_visible]] = 1 + + index_srcchg.append(i_source[i_visible] * n_change + i_change[i_visible]) + # index_change.append(i_change[i_visible]) + index_y.append(y[i_visible]) + index_x.append(x[i_visible]) + + return ( # type: ignore + index_srcchg, + index_x, + index_y, + x_canvas_ext_PIX, + y_canvas_ext_PIX, + ) + + +def render_stimulus( + kernels, index_srcchg, index_y, index_x, y_canvas, x_canvas, torch_device +): + s = kernels.shape + kx = s[-1] + ky = s[-2] + + stimulus = torch.zeros((y_canvas + ky - 1, x_canvas + kx - 1), device=torch_device) + n = index_srcchg.size()[0] + for i in torch.arange(n, device=torch_device): + x = index_x[i] + y = index_y[i] + stimulus[y : y + ky, x : x + kx] += kernels[index_srcchg[i]] + + return stimulus[ky - 1 : -(ky - 1), kx - 1 : -(kx - 1)] + + +if __name__ == "__main__": + VERBOSE = True + BENCH_CONVOLVE = False + BENCH_GPU = True + BENCH_CPU = True + BENCH_DAVID = True + + print("Testing contour rendering speed:") + print("================================") + + # load contours, multiplex coordinates to simulate a larger set of contours + n_multiplex = 1000 + mat = scipy.io.loadmat("z.mat") + posori = np.tile(mat["z"], (n_multiplex, 1)) + n_contours = posori.shape[0] + print(f"Processing {n_contours} contour stimuli") + + # how many contours to render simultaneously? + n_simultaneous = 5 + n_simultaneous_chunks, n_remaining = divmod(n_contours, n_simultaneous) + assert n_remaining == 0, "Check parameters for simultaneous contour rendering!" + + # repeat some times for speed testing + n_repeat = 10 + t_dis = torch.zeros((n_repeat + 2), device=torch_device) + t_con = torch.zeros((n_repeat + 2), device=torch_device) + t_rsg = torch.zeros((n_repeat + 2), device=torch_device) + t_rsc = torch.zeros((n_repeat + 2), device="cpu") + t_rsd = torch.zeros((n_repeat + 2), device="cpu") + + # cutout for stimuli, and gabor parameters + x_range = [140, 940] + y_range = [140, 940] + d_gab = 40 + lambdah = 12 + sigma = 8 + phase = 0.0 + normalize = True + + # scale to convert coordinates to pixel values + scale_factor = 0.25 + + # number of directions for dictionary + n_source = 32 + n_change = 32 + + # convert sizes to pixel units + lambdah_PIX = lambdah * scale_factor + sigma_PIX = sigma * scale_factor + r_gab_PIX = int(d_gab * scale_factor / 2) + d_gab_PIX = r_gab_PIX * 2 + 1 + + # make filterbank + kernels, dirs_source, dirs_change = gaborner_filterbank( + r_gab=r_gab_PIX, + n_source=n_source, + n_change=n_change, + lambdah=lambdah_PIX, + sigma=sigma_PIX, + phase=phase, + normalize=normalize, + torch_device=torch_device, + ) + kernels = kernels.reshape([1, n_source * n_change, d_gab_PIX, d_gab_PIX]) + kernels_flip = kernels.flip(dims=(-1, -2)) + + # define "network" and put to cuda + conv = torch.nn.Conv2d( + in_channels=n_source * n_change, + out_channels=1, + kernel_size=d_gab_PIX, + stride=1, + device=torch_device, + ) + conv.weight.data = kernels_flip + + print("Discretizing START!!!") + t_dis[0] = time.perf_counter() + for i_rep in range(n_repeat): + # discretize + ( + index_srcchg, + index_x, + index_y, + x_canvas, + y_canvas, + ) = discretize_stimuli( + posori=posori, + x_range=x_range, # type: ignore + y_range=y_range, # type: ignore + scale_factor=scale_factor, + r_gab_PIX=r_gab_PIX, + n_source=n_source, + n_change=n_change, + torch_device=torch_device, + ) + t_dis[i_rep + 1] = time.perf_counter() + t_dis[-1] = time.perf_counter() + print("Discretizing END!!!") + + if BENCH_CONVOLVE: + print("Allocating!") + stimuli = torch.zeros( + [n_simultaneous, n_source * n_change, y_canvas, x_canvas], + device=torch_device, + requires_grad=False, + ) + + print("Generation by CONVOLUTION start!") + t_con[0] = time.perf_counter() + for i_rep in torch.arange(n_repeat): + for i_simultaneous_chunks in torch.arange(n_simultaneous_chunks): + i_ofs = i_simultaneous_chunks * n_simultaneous + + for i_sim in torch.arange(n_simultaneous): + stimuli[ + i_sim, + index_srcchg[i_sim + i_ofs], + index_y[i_sim + i_ofs], + index_x[i_sim + i_ofs], + ] = 1 + + output = conv(stimuli) + + for i_sim in range(n_simultaneous): + stimuli[ + i_sim, + index_srcchg[i_sim + i_ofs], + index_y[i_sim + i_ofs], + index_x[i_sim + i_ofs], + ] = 0 + + t_con[i_rep + 1] = time.perf_counter() + t_con[-1] = time.perf_counter() + print("Generation by CONVOLUTION stop!") + + if BENCH_GPU: + print("Generation by GPU start!") + output_gpu = torch.zeros( + ( + n_contours, + y_canvas - d_gab_PIX + 1, + x_canvas - d_gab_PIX + 1, + ), + device=torch_device, + ) + t_rsg[0] = time.perf_counter() + for i_rep in torch.arange(n_repeat): + for i_con in torch.arange(n_contours): + output_gpu[i_con] = render_stimulus( + kernels=kernels[0], + index_srcchg=index_srcchg[i_con], + index_y=index_y[i_con], + index_x=index_x[i_con], + y_canvas=y_canvas, + x_canvas=x_canvas, + torch_device=torch_device, + ) + # output_gpu = torch.clip(output_gpu, -1, +1) + + t_rsg[i_rep + 1] = time.perf_counter() + t_rsg[-1] = time.perf_counter() + print("Generation by GPU stop!") + + if BENCH_CPU: + print("Generation by CPU start!") + output_cpu = torch.zeros( + ( + n_contours, + y_canvas - d_gab_PIX + 1, + x_canvas - d_gab_PIX + 1, + ), + device="cpu", + ) + kernels_cpu = kernels.detach().cpu() + t_rsc[0] = time.perf_counter() + for i_rep in range(n_repeat): + for i_con in range(n_contours): + output_cpu[i_con] = render_stimulus( + kernels=kernels_cpu[0], + index_srcchg=index_srcchg[i_con], + index_y=index_y[i_con], + index_x=index_x[i_con], + y_canvas=y_canvas, + x_canvas=x_canvas, + torch_device="cpu", + ) + # output_cpu = torch.clip(output_cpu, -1, +1) + + t_rsc[i_rep + 1] = time.perf_counter() + t_rsc[-1] = time.perf_counter() + print("Generation by CPU stop!") + + if BENCH_DAVID: + print("Generation by DAVID start!") + from CPPExtensions.PyTCopyCPU import TCopyCPU as render_stimulus_CPP + + copyier = render_stimulus_CPP() + + number_of_cpu_processes = os.cpu_count() + output_dav_tmp = torch.zeros( + ( + n_contours, + y_canvas + 2 * r_gab_PIX, + x_canvas + 2 * r_gab_PIX, + ), + device="cpu", + dtype=torch.float, + ) + gabor = kernels[0].detach().cpu() + + # Umsort! + n_elements_total = 0 + for i_con in range(n_contours): + n_elements_total += len(index_x[i_con]) + sparse_matrix = torch.zeros( + (n_elements_total, 4), device="cpu", dtype=torch.int64 + ) + i_elements_total = 0 + for i_con in range(n_contours): + n_add = len(index_x[i_con]) + sparse_matrix[i_elements_total : i_elements_total + n_add, 0] = i_con + sparse_matrix[ + i_elements_total : i_elements_total + n_add, 1 + ] = index_srcchg[i_con] + sparse_matrix[i_elements_total : i_elements_total + n_add, 2] = index_y[ + i_con + ] + sparse_matrix[i_elements_total : i_elements_total + n_add, 3] = index_x[ + i_con + ] + i_elements_total += n_add + assert i_elements_total == n_elements_total, "UNBEHAGEN macht sich breit!" + + t_dav = torch.zeros((n_repeat + 2), device="cpu") + t_dav[0] = time.perf_counter() + for i_rep in range(n_repeat): + output_dav_tmp.fill_(0.0) + copyier.process( + sparse_matrix.data_ptr(), + int(sparse_matrix.shape[0]), + int(sparse_matrix.shape[1]), + gabor.data_ptr(), + int(gabor.shape[0]), + int(gabor.shape[1]), + int(gabor.shape[2]), + output_dav_tmp.data_ptr(), + int(output_dav_tmp.shape[0]), + int(output_dav_tmp.shape[1]), + int(output_dav_tmp.shape[2]), + int(number_of_cpu_processes), # type: ignore + ) + output_dav = output_dav_tmp[ + :, + d_gab_PIX - 1 : -(d_gab_PIX - 1), + d_gab_PIX - 1 : -(d_gab_PIX - 1), + ].clone() + t_dav[i_rep + 1] = time.perf_counter() + t_dav[-1] = time.perf_counter() + print("Generation by DAVID done!") + + if VERBOSE: # show last stimulus + if BENCH_CONVOLVE: + plt.subplot(2, 2, 1) + plt.imshow(output[-1, 0].detach().cpu(), cmap="gray", vmin=-1, vmax=+1) + plt.title("convolve") + if BENCH_GPU: + plt.subplot(2, 2, 2) + plt.imshow(output_gpu[-1].detach().cpu(), cmap="gray", vmin=-1, vmax=+1) + plt.title("gpu") + if BENCH_CPU: + plt.subplot(2, 2, 3) + plt.imshow(output_cpu[-1], cmap="gray", vmin=-1, vmax=+1) + plt.title("cpu") + if BENCH_DAVID: + plt.subplot(2, 2, 4) + plt.imshow(output_dav[-1], cmap="gray", vmin=-1, vmax=+1) + plt.title("david") + plt.show() + + dt_discretize = t_dis.diff() / n_contours + plt.plot(dt_discretize.detach().cpu()) + dt_convolve = t_con.diff() / n_contours + plt.plot(dt_convolve.detach().cpu()) + dt_gpu = t_rsg.diff() / n_contours + plt.plot(dt_gpu.detach().cpu()) + dt_cpu = t_rsc.diff() / n_contours + plt.plot(dt_cpu.detach().cpu()) + dt_david = t_dav.diff() / n_contours + plt.plot(dt_david.detach().cpu()) + + plt.legend(["discretize", "convolve", "gpu", "cpu", "david"]) + plt.show() + print( + f"Average discretize for 1k stims: {1000*dt_discretize[:-1].detach().cpu().mean()} secs." + ) + print( + f"Average convolve for 1k stims: {1000*dt_convolve[:-1].detach().cpu().mean()} secs." + ) + print(f"Average gpu for 1k stims: {1000*dt_gpu[:-1].detach().cpu().mean()} secs.") + print(f"Average cpu for 1k stims: {1000*dt_cpu[:-1].detach().cpu().mean()} secs.") + print( + f"Average david for 1k stims: {1000*dt_david[:-1].detach().cpu().mean()} secs." + ) + + if BENCH_GPU and BENCH_CPU and BENCH_DAVID: + df1 = (torch.abs(output_gpu[-1].detach().cpu() - output_cpu[-1])).mean() + df2 = (torch.abs(output_gpu[-1].detach().cpu() - output_dav[-1])).mean() + df3 = (torch.abs(output_dav[-1].cpu() - output_cpu[-1])).mean() + print(f"Differences: CPU-GPU:{df1}, GPU-David:{df2}, David-CPU:{df3}") + + # %% diff --git a/thesis code/network analysis/render_including_minDist/render.py b/thesis code/network analysis/render_including_minDist/render.py new file mode 100644 index 0000000..67a0dc0 --- /dev/null +++ b/thesis code/network analysis/render_including_minDist/render.py @@ -0,0 +1,349 @@ +# %% + +import torch +import time +import scipy +import os +import matplotlib.pyplot as plt +import numpy as np +import contours +import glob + +USE_CEXT_FROM_DAVID = False +if USE_CEXT_FROM_DAVID: + # from CPPExtensions.PyTCopyCPU import TCopyCPU + from CPPExtensions.PyTCopyCPU import TCopyCPU as render_stimulus_CPP + + +import matplotlib as mpl + + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" + + +def plot_single_gabor_filter( + gabors, + dirs_source, + dirs_change, + source_idx: int = 0, + change_idx: int = 0, + save_plot: bool = False, +): + print( + f"dirs_source:{dirs_source[source_idx]:.2f}, dirs_change: {dirs_change[change_idx]:.2f}" + ) + print(f"Inflection angle in deg:{torch.rad2deg(dirs_change[change_idx])}") + plt.imshow( + gabors[source_idx, change_idx], + cmap="gray", + vmin=gabors.min(), + vmax=gabors.max(), + ) + cbar = plt.colorbar() + cbar.ax.tick_params(labelsize=14) + plt.xticks(fontsize=16) + plt.yticks(fontsize=16) + plt.tight_layout() + + if save_plot: + if change_idx != 0: + plt.savefig( + f"additional thesis plots/saved_plots/gabor_in{torch.rad2deg(dirs_source[source_idx])}inflect{torch.rad2deg(dirs_change[change_idx])}.pdf", + dpi=300, + ) + else: + plt.savefig( + f"additional thesis plots/saved_plots/gabor_mono_{dirs_source[source_idx]:.2f}_deg{torch.rad2deg(dirs_source[source_idx])}.pdf", + dpi=300, + ) + plt.show(block=True) + + +def render_gaborfield(posori, params, verbose=False): + scale_factor = params["scale_factor"] + n_source = params["n_source"] + n_change = params["n_change"] + + # convert sizes to pixel units + lambda_PIX = params["lambda_gabor"] * scale_factor + sigma_PIX = params["sigma_gabor"] * scale_factor + r_gab_PIX = int(params["d_gabor"] * scale_factor / 2) + d_gab_PIX = r_gab_PIX * 2 + 1 + + # make filterbank + gabors, dirs_source, dirs_change = contours.gaborner_filterbank( + r_gab=r_gab_PIX, + n_source=n_source, + n_change=n_change, + lambdah=lambda_PIX, + sigma=sigma_PIX, + phase=params["phase_gabor"], + normalize=params["normalize_gabor"], + torch_device="cpu", + ) + + gabors = gabors.reshape([n_source * n_change, d_gab_PIX, d_gab_PIX]) + + n_contours = posori.shape[0] + + # discretize ALL stimuli + if verbose: + print("Discretizing START!!!") + t_dis0 = time.perf_counter() + ( + index_srcchg, + index_x, + index_y, + x_canvas, + y_canvas, + ) = contours.discretize_stimuli( + posori=posori, + x_range=params["x_range"], + y_range=params["y_range"], + scale_factor=scale_factor, + r_gab_PIX=r_gab_PIX, + n_source=n_source, + n_change=n_change, + torch_device="cpu", + ) + + # find out minimal distance between neighboring gabors: + mean_mean_dist: list = [] + for i in range(len(index_x)): + xx_center = index_x[i] + r_gab_PIX + yy_center = index_y[i] + r_gab_PIX + + # calc mean distances within one image + x = xx_center[:, np.newaxis] - xx_center[np.newaxis, :] + y = yy_center[:, np.newaxis] - yy_center[np.newaxis, :] + print(x.shape, y.shape) + distances = np.sqrt( + (xx_center[:, np.newaxis] - xx_center[np.newaxis, :]) ** 2 + + (yy_center[:, np.newaxis] - yy_center[np.newaxis, :]) ** 2 + ) + distances = distances.numpy() + + # diagonal elements to infinity to exclude self-distances + np.fill_diagonal(distances, np.inf) + + # nearest neighbor of each contour element + nearest_neighbors = np.argmin(distances, axis=1) + + # dist to nearest neighbors + nearest_distances = distances[np.arange(distances.shape[0]), nearest_neighbors] + + # mean distance + mean_dist = np.mean(nearest_distances) + print(f"Mean distance between contour elements: {mean_dist}") + mean_mean_dist.append(mean_dist) + + m = np.mean(mean_mean_dist) + print(f"Mean distance between contour elements over all images: {m}") + + t_dis1 = time.perf_counter() + if verbose: + print(f"Discretizing END, took {t_dis1-t_dis0} seconds.!!!") + + if verbose: + print("Generation START!!!") + t0 = time.perf_counter() + + if not USE_CEXT_FROM_DAVID: + if verbose: + print(" (using NUMPY...)") + output = torch.zeros( + ( + n_contours, + y_canvas - d_gab_PIX + 1, + x_canvas - d_gab_PIX + 1, + ), + device="cpu", + ) + kernels_cpu = gabors.detach().cpu() + for i_con in range(n_contours): + output[i_con] = contours.render_stimulus( + kernels=kernels_cpu, + index_srcchg=index_srcchg[i_con], + index_y=index_y[i_con], + index_x=index_x[i_con], + y_canvas=y_canvas, + x_canvas=x_canvas, + torch_device="cpu", + ) + output = torch.clip(output, -1, +1) + + else: + if verbose: + print(" (using C++...)") + copyier = render_stimulus_CPP() + number_of_cpu_processes = os.cpu_count() + output_dav_tmp = torch.zeros( + ( + n_contours, + y_canvas + 2 * r_gab_PIX, + x_canvas + 2 * r_gab_PIX, + ), + device="cpu", + dtype=torch.float, + ) + + # Umsort! + n_elements_total = 0 + for i_con in range(n_contours): + n_elements_total += len(index_x[i_con]) + sparse_matrix = torch.zeros( + (n_elements_total, 4), device="cpu", dtype=torch.int64 + ) + i_elements_total = 0 + for i_con in range(n_contours): + n_add = len(index_x[i_con]) + sparse_matrix[i_elements_total : i_elements_total + n_add, 0] = i_con + sparse_matrix[ + i_elements_total : i_elements_total + n_add, 1 + ] = index_srcchg[i_con] + sparse_matrix[i_elements_total : i_elements_total + n_add, 2] = index_y[ + i_con + ] + sparse_matrix[i_elements_total : i_elements_total + n_add, 3] = index_x[ + i_con + ] + i_elements_total += n_add + assert i_elements_total == n_elements_total, "UNBEHAGEN macht sich breit!" + + # output_dav_tmp.fill_(0.0) + copyier.process( + sparse_matrix.data_ptr(), + int(sparse_matrix.shape[0]), + int(sparse_matrix.shape[1]), + gabors.data_ptr(), + int(gabors.shape[0]), + int(gabors.shape[1]), + int(gabors.shape[2]), + output_dav_tmp.data_ptr(), + int(output_dav_tmp.shape[0]), + int(output_dav_tmp.shape[1]), + int(output_dav_tmp.shape[2]), + int(number_of_cpu_processes), # type: ignore + ) + output = torch.clip( + output_dav_tmp[ + :, + d_gab_PIX - 1 : -(d_gab_PIX - 1), + d_gab_PIX - 1 : -(d_gab_PIX - 1), + ], + -1, + +1, + ) + + t1 = time.perf_counter() + if verbose: + print(f"Generating END, took {t1-t0} seconds.!!!") + + if verbose: + print("Showing first and last stimulus generated...") + plt.imshow(output[0], cmap="gray", vmin=-1, vmax=+1) + plt.show() + plt.imshow(output[-1], cmap="gray", vmin=-1, vmax=+1) + plt.show() + print(f"Processed {n_contours} stimuli in {t1-t_dis0} seconds!") + + return output + + +def render_gaborfield_frommatfiles( + files, params, varname, varname_dist, altpath=None, verbose=False +): + n_total = 0 + n_files = len(files) + print(f"Going through {n_files} contour files...") + + for i_file in range(n_files): + # get path, basename, suffix... + full = files[i_file] + path, file = os.path.split(full) + base, suffix = os.path.splitext(file) + + # load file + mat = scipy.io.loadmat(full) + if "dist" in full: + posori = mat[varname_dist] + else: + posori = mat[varname] + + n_contours = posori.shape[0] + n_total += n_contours + print(f" ...file {file} contains {n_contours} contours.") + + # process... + gaborfield = render_gaborfield(posori, params=params, verbose=verbose) + + # save + if altpath: + savepath = altpath + else: + savepath = path + savefull = savepath + os.sep + base + "_RENDERED.npz" + print(f" ...saving under {savefull}...") + gaborfield = (torch.clip(gaborfield, -1, 1) * 127 + 128).type(torch.uint8) + # np.savez_compressed(savefull, gaborfield=gaborfield) + + return n_total + + +if __name__ == "__main__": + TESTMODE = "files" # "files" or "posori" + + # cutout for stimuli, and gabor parameters + params = { + "x_range": [140, 940], + "y_range": [140, 940], + "scale_factor": 0.25, # scale to convert coordinates to pixel values + "d_gabor": 40, + "lambda_gabor": 16, + "sigma_gabor": 8, + "phase_gabor": 0.0, + "normalize_gabor": True, + # number of directions for dictionary + "n_source": 32, + "n_change": 32, + } + + if TESTMODE == "files": + # path = "/data_1/kk/StimulusGeneration/Alicorn/Natural/Corner000_n10000" + path = "D:/Katha/Neuroscience/Semester 4/newCode/RenderAlicorns/Coignless" + files = glob.glob(path + os.sep + "*.mat") + + t0 = time.perf_counter() + n_total = render_gaborfield_frommatfiles( + files=files, + params=params, + varname="Table_base_crn090", + varname_dist="Table_base_crn090_dist", + altpath="D:/Katha/Neuroscience/Semester 4/newCode/RenderAlicorns/Output/Coignless", + ) + t1 = time.perf_counter() + dt = t1 - t0 + print( + f"Rendered {n_total} contours in {dt} secs, yielding {n_total/dt} contours/sec." + ) + + if TESTMODE == "posori": + print("Sample stimulus generation:") + print("===========================") + + # load contours, multiplex coordinates to simulate a larger set of contours + n_multiplex = 5 + mat = scipy.io.loadmat( + "D:/Katha/Neuroscience/Semester 4/newCode/RenderAlicorns/corner_angle_090_dist_b001_n100.mat" + ) + posori = np.tile(mat["Table_crn_crn090_dist"], (n_multiplex, 1)) + n_contours = posori.shape[0] + print(f"Processing {n_contours} contour stimuli") + + output = render_gaborfield(posori, params=params, verbose=True) + output8 = (torch.clip(output, -1, 1) * 127 + 128).type(torch.uint8) + np.savez_compressed("output8_compressed.npz", output8=output8) + + +# %% diff --git a/thesis code/network analysis/weights_correlation/README.txt b/thesis code/network analysis/weights_correlation/README.txt new file mode 100644 index 0000000..d399d53 --- /dev/null +++ b/thesis code/network analysis/weights_correlation/README.txt @@ -0,0 +1,28 @@ +Folder "weights_correlation": + +File: + +1. create_gabor_dict: +* contains the code to generate the Gabor dictionary used for the weights of convolutional layer 1 +* 32 Gabors: 8 orientations, 4 phases +* Gabors have a diameter of 11 pixels + +2. draw_input_fields: +* used to calculate how much of the input the kernel of each CNN layer has access to +* draws these sizes into a chosen image from the dataset + +3. all_cnns_mean_correlation: +* includes the code to plot the correlation matrices seen in the written thesis +* includes statistical test +* includes code to plot every single correlation matrix of the 20 CNNs + + + +In folder "weight visualization": + +1. plot_as_grid: +* visualizes the weights and bias (optional) + +2. plot_weights: +* loads model +* choose layer to visualize weights from (+ bias optionally) \ No newline at end of file diff --git a/thesis code/network analysis/weights_correlation/all_cnns_mean_correlation.py b/thesis code/network analysis/weights_correlation/all_cnns_mean_correlation.py new file mode 100644 index 0000000..906c081 --- /dev/null +++ b/thesis code/network analysis/weights_correlation/all_cnns_mean_correlation.py @@ -0,0 +1,274 @@ +import torch +import sys +import os +import matplotlib.pyplot as plt # noqa +import numpy as np +import matplotlib as mpl + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" +mpl.rcParams["font.size"] = 15 + +# import files from parent dir +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(parent_dir) + +from functions.make_cnn import make_cnn # noqa + + +def show_20mean_correlations(model_list, save: bool = False, cnn: str = "CORNER"): + """ + Displays a correlation matrix for every single of the 20 CNNs + """ + + fig, axs = plt.subplots(4, 5, figsize=(15, 15)) + for i, load_model in enumerate(model_list): + # load model + model = torch.load(load_model).to("cpu") + model.eval() + + # load 2nd convs weights + weights = model[3].weight.cpu().detach().clone().numpy() + corr_matrices = [] + for j in range(weights.shape[0]): + w_j = weights[j] + w = w_j.reshape(w_j.shape[0], -1) + corr_matrix = np.corrcoef(w) + corr_matrices.append(corr_matrix) + + mean_corr_matrix = np.mean(corr_matrices, axis=0) + ax = axs[i // 5, i % 5] + im = ax.matshow(mean_corr_matrix, cmap="RdBu_r") + cbar = fig.colorbar( + im, ax=ax, fraction=0.046, pad=0.04, ticks=np.arange(-1.1, 1.1, 0.2) + ) + ax.set_title(f"Model {i+1}") + + # remove lower x-axis ticks + ax.tick_params(axis="x", which="both", bottom=False) + ax.tick_params(axis="both", which="major", labelsize=14) + cbar.ax.tick_params(labelsize=13) + + # fig.colorbar(im, ax=axs.ravel().tolist()) + plt.tight_layout() + if save: + plt.savefig( + f"additional thesis plots/saved_plots/weight plots/all20cnn_mean_corr_{cnn}.pdf", + dpi=300, + ) + plt.show() + + +def show_overall_mean_correlation(model_list, save: bool = False, cnn: str = "CORNER"): + """ + Displays the mean correlation across all 20 CNNs + """ + + fig, ax = plt.subplots(figsize=(7, 7)) + overall_corr_matrices = [] + for i, load_model in enumerate(model_list): + # load model + model = torch.load(load_model).to("cpu") + model.eval() + + # load 2nd convs weights + weights = model[3].weight.cpu().detach().clone().numpy() + corr_matrices = [] + for j in range(weights.shape[0]): + w_j = weights[j] + w = w_j.reshape(w_j.shape[0], -1) + corr_matrix = np.corrcoef(w) + corr_matrices.append(corr_matrix) + + mean_corr_matrix = np.mean(corr_matrices, axis=0) + overall_corr_matrices.append(mean_corr_matrix) + + overall_mean_corr_matrix = np.mean(overall_corr_matrices, axis=0) + im = ax.matshow(overall_mean_corr_matrix, cmap="RdBu_r") + cbar = fig.colorbar( + im, ax=ax, fraction=0.046, pad=0.04, ticks=np.arange(-1.1, 1.1, 0.1) + ) + + # remove lower x-axis ticks + ax.tick_params(axis="x", which="both", bottom=False) + ax.tick_params(axis="both", which="major", labelsize=17) + cbar.ax.tick_params(labelsize=15) + + plt.tight_layout() + if save: + plt.savefig( + f"additional thesis plots/saved_plots/weight plots/mean20cnn_mean_corr_{cnn}.pdf", + dpi=300, + ) + plt.show() + return overall_mean_corr_matrix + + +def get_file_list_all_cnns(dir: str) -> list: + all_results: list = [] + for filename in os.listdir(dir): + if filename.endswith(".pt"): + # print(os.path.join(dir, filename)) + all_results.append(os.path.join(dir, filename)) + + return all_results + + +def test_normality(correlation_data, condition: str, alpha: float = 0.05): + """ + Tests if data has normal distribution + * 0-hyp: data is normally distributed + * low p-val: data not normally distributed + """ + from scipy import stats + + statistic, p_value = stats.normaltest(correlation_data) + print( + f"\nD'Agostino-Pearson Test for {condition} - p-val :", + p_value, + ) + print( + f"D'Agostino-Pearson Test for {condition} - statistic :", + statistic, + ) + + # set alpha + if p_value < alpha: + print("P-val < alpha. Reject 0-hypothesis. Data is not normally distributed") + else: + print("P-val > alpha. Keep 0-hypothesis. Data is normally distributed") + + return p_value + + +def two_sample_ttest(corr_classic, corr_coner, alpha: float = 0.05): + """ + This is a test for the null hypothesis that 2 independent samples have identical average (expected) values. This test assumes that the populations have identical variances by default. + """ + + from scipy.stats import ttest_ind + + t_stat, p_value = ttest_ind(corr_classic, corr_coner) + print(f"t-statistic: {t_stat}") + + # check if the p-value less than significance level + if p_value < alpha: + print( + "There is a significant difference in the mean correlation values between the two groups." + ) + else: + print( + "There is no significant difference in the mean correlation values between the two groups." + ) + + +def willy_is_not_whitney_test(data_classic, data_corner): + from scipy.stats import mannwhitneyu + + """ + * Test does not assume normal distribution + * Compares means between 2 indep groups + """ + + # call test + statistic, p_value = mannwhitneyu(data_classic, data_corner) + + # results + print("\nMann-Whitney U Test Statistic:", statistic) + print("Mann-Whitney U Test p-value:", p_value) + + # check significance: + alpha = 0.05 + if p_value < alpha: + print("The distributions are significantly different.") + else: + print("The distributions are not significantly different.") + + return p_value + + +def visualize_differences(corr_class, corr_corn, save: bool = False): + # calc mean, std, median + mean_class = np.mean(corr_class) + median_class = np.median(corr_class) + std_class = np.std(corr_class) + + mean_corn = np.mean(corr_corn) + median_corn = np.median(corr_corn) + std_corn = np.std(corr_corn) + + # plot + labels = ["Mean", "Median", "Standard Deviation"] + condition_class = [mean_class, median_class, std_class] + condition_corn = [mean_corn, median_corn, std_corn] + + x = np.arange(len(labels)) + width = 0.35 + + _, ax = plt.subplots(figsize=(7, 7)) + rect_class = ax.bar( + x - width / 2, condition_class, width, label="CLASSIC", color="cornflowerblue" + ) + rect_corn = ax.bar( + x + width / 2, condition_corn, width, label="CORNER", color="coral" + ) + + # show bar values + for i, rect in enumerate(rect_class + rect_corn): + height = rect.get_height() + ax.text( + rect.get_x() + rect.get_width() / 2.0, + height, + f"{height:.3f}", + ha="center", + va="bottom", + fontsize=15, + ) + + # ax.set_ylabel('Value') + ax.set_title("Summary Statistics by Condition") + ax.set_xticks(x) + ax.set_xticklabels(labels, fontsize=17) + ax.legend() + + plt.tight_layout() + if save: + plt.savefig( + "additional thesis plots/saved_plots/weight plots/summary_stats_correlation_CLASSvsCORN.pdf", + dpi=300, + ) + plt.show() + + +if __name__ == "__main__": + # CLASSIC: + directory_classic: str = "D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/classic3288_fest" + all_results_classic = get_file_list_all_cnns(dir=directory_classic) + show_20mean_correlations(all_results_classic) + mean_corr_classic = show_overall_mean_correlation(all_results_classic) + + # CORNER: + directory_corner: str = "D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/corner3288_fest" + all_results_corner = get_file_list_all_cnns(dir=directory_corner) + show_20mean_correlations(all_results_corner) + mean_corr_corner = show_overall_mean_correlation(all_results_corner) + + # flatten + corr_classic = mean_corr_classic.flatten() + corr_corner = mean_corr_corner.flatten() + + # test how data is distributed + p_class = test_normality(correlation_data=corr_classic, condition="CLASSIC") + p_corn = test_normality(correlation_data=corr_corner, condition="CORNER") + + # perform statistical test: + alpha: float = 0.05 + + if p_class < alpha and p_corn < alpha: + willy_is_not_whitney_test(data_classic=corr_classic, data_corner=corr_corner) + else: + # do ttest: + two_sample_ttest(corr_classic=corr_classic, corr_coner=corr_corner) + + # visualize the differences: + visualize_differences(corr_class=corr_classic, corr_corn=corr_corner, save=True) diff --git a/thesis code/network analysis/weights_correlation/create_gabor_dict.py b/thesis code/network analysis/weights_correlation/create_gabor_dict.py new file mode 100644 index 0000000..281dedb --- /dev/null +++ b/thesis code/network analysis/weights_correlation/create_gabor_dict.py @@ -0,0 +1,87 @@ +import numpy as np +import matplotlib.pyplot as plt # noqa + + +def change_base( + x: np.ndarray, y: np.ndarray, theta: float +) -> tuple[np.ndarray, np.ndarray]: + x_theta: np.ndarray = x.astype(dtype=np.float32) * np.cos(theta) + y.astype( + dtype=np.float32 + ) * np.sin(theta) + y_theta: np.ndarray = y.astype(dtype=np.float32) * np.cos(theta) - x.astype( + dtype=np.float32 + ) * np.sin(theta) + return x_theta, y_theta + + +def cos_gabor_function( + x: np.ndarray, y: np.ndarray, theta: float, f: float, sigma: float, phi: float +) -> np.ndarray: + r_a: np.ndarray = change_base(x, y, theta)[0] + r_b: np.ndarray = change_base(x, y, theta)[1] + r2 = r_a**2 + r_b**2 + gauss: np.ndarray = np.exp(-0.5 * r2 / sigma**2) + correction = np.exp(-2 * (np.pi * sigma * f) ** 2) * np.cos(phi) + envelope = np.cos(2 * np.pi * f * change_base(x, y, theta)[0] + phi) - correction + patch = gauss * envelope + + return patch + + +def weights(num_orients, num_phase, f, sigma, diameter, delta_x): + dx = delta_x + n = np.ceil(diameter / 2 / dx) + x, y = np.mgrid[ + -n : n + 1, + -n : n + 1, + ] + + t = np.arange(num_orients) * np.pi / num_orients + p = np.arange(num_phase) * 2 * np.pi / num_phase + + w = np.zeros((num_orients, num_phase, x.shape[0], x.shape[0])) + for i in range(num_orients): + theta = t[i] + for j in range(num_phase): + phase = p[j] + + w[i, j] = cos_gabor_function( + x=x * dx, y=y * dx, theta=theta, f=f, sigma=sigma, phi=phase + ).T + + return w + + +if __name__ == "__main__": + f = 0.25 # frequency = 1/lambda = 1/4 + sigma = 2.0 + diameter = 10 + num_orients = 8 + num_phase = 4 + we = weights( + num_orients=num_orients, + num_phase=num_phase, + f=f, + sigma=sigma, + diameter=diameter, + delta_x=1, + ) + + # comment in for plotting as matrix : + # fig = plt.figure(figsize=(5, 5)) + # for i in range(num_orients): + # for j in range(num_phase): + # plt.subplot(num_orients, num_phase, (i * num_phase) + j + 1) + # plt.imshow(we[i, j], cmap="gray", vmin=we.min(), vmax=we.max()) + # plt.axis("off") + # # plt.colorbar() + # plt.tight_layout() + # plt.show(block=True) + + weights_flatten = np.ascontiguousarray(we) + weights_flatten = np.reshape( + weights_flatten, (we.shape[0] * we.shape[1], 1, we.shape[-2], we.shape[-1]) + ) + + # comment in for saving + # np.save("gabor_dict_32o_8p.npy", weights_flatten) diff --git a/thesis code/network analysis/weights_correlation/draw_input_fields.py b/thesis code/network analysis/weights_correlation/draw_input_fields.py new file mode 100644 index 0000000..3d30bcb --- /dev/null +++ b/thesis code/network analysis/weights_correlation/draw_input_fields.py @@ -0,0 +1,156 @@ +# %% +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.patches as patch +import matplotlib as mpl +from cycler import cycler +from functions.analyse_network import analyse_network + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" + + +def draw_kernel( + image: np.ndarray, + coordinate_list: list, + layer_type_list: list, + ignore_output_conv_layer: bool, +) -> None: + """ + Call function after creating the model-to-be-trained. + """ + assert image.shape[0] == 200 + assert image.shape[1] == 200 + + # list of colors to choose from: + prop_cycle = plt.rcParams["axes.prop_cycle"] + colors = prop_cycle.by_key()["color"] + edge_color_cycler = iter( + cycler(color=["sienna", "orange", "gold", "bisque"] + colors) + ) + + # position first kernel + start_x: int = 4 + start_y: int = 15 + + # general plot structure: + plt.ion() + _, ax = plt.subplots() + ax.imshow(image, cmap="gray") + ax.tick_params(axis="both", which="major", labelsize=15) + + if ignore_output_conv_layer: + number_of_layers: int = len(layer_type_list) - 1 + else: + number_of_layers = len(layer_type_list) + + for i in range(0, number_of_layers): + if layer_type_list[i] is not None: + kernels = int(coordinate_list[i].shape[0]) + edgecolor = next(edge_color_cycler)["color"] + # draw kernel + kernel = patch.Rectangle( + (start_x, start_y), + kernels, + kernels, + linewidth=1.2, + edgecolor=edgecolor, + facecolor="none", + label=layer_type_list[i], + ) + ax.add_patch(kernel) + + if coordinate_list[i].shape[1] > 1: + strides = int(coordinate_list[i][0, 1]) - int(coordinate_list[i][0, 0]) + + # draw stride + stride = patch.Rectangle( + (start_x + strides, start_y + strides), + kernels, + kernels, + linewidth=1.2, + edgecolor=edgecolor, + facecolor="none", + linestyle="dashed", + ) + ax.add_patch(stride) + + # add distance of next drawing + start_x += 14 + start_y += 10 + + # final plot + plt.tight_layout() + plt.legend(loc="upper right", fontsize=11) + plt.show(block=True) + + +# %% +if __name__ == "__main__": + import os + import sys + import json + from jsmin import jsmin + + parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + sys.path.append(parent_dir) + from functions.alicorn_data_loader import alicorn_data_loader + from functions.make_cnn_v2 import make_cnn + from functions.create_logger import create_logger + + ignore_output_conv_layer: bool = True + + # get current path: + cwd = os.path.dirname(os.path.realpath(__file__)).replace(os.sep, "/") + + network_config_filename = f"{cwd}/network_0.json" + config_filenname = f"{cwd}/config_v2.json" + with open(config_filenname, "r") as file_handle: + config = json.loads(jsmin(file_handle.read())) + + logger = create_logger( + save_logging_messages=False, display_logging_messages=False, model_name=None + ) + + # test image: + data_test = alicorn_data_loader( + num_pfinkel=[0], + load_stimuli_per_pfinkel=10, + condition=str(config["condition"]), + data_path=str(config["data_path"]), + logger=logger, + ) + + assert data_test.__len__() > 0 + input_shape = data_test.__getitem__(0)[1].shape + + model = make_cnn( + network_config_filename=network_config_filename, + logger=logger, + input_shape=input_shape, + ) + print(model) + + assert input_shape[-2] == input_shape[-1] + coordinate_list, layer_type_list, pixel_used = analyse_network( + model=model, input_shape=int(input_shape[-1]) + ) + + for i in range(0, len(coordinate_list)): + print( + ( + f"Layer: {i}, Positions: {coordinate_list[i].shape[1]}, " + f"Pixel per Positions: {coordinate_list[i].shape[0]}, " + f"Type: {layer_type_list[i]}, Number of pixel used: {pixel_used[i]}" + ) + ) + + image = data_test.__getitem__(6)[1].squeeze(0) + + # call function for plotting input fields into image: + draw_kernel( + image=image.numpy(), + coordinate_list=coordinate_list, + layer_type_list=layer_type_list, + ignore_output_conv_layer=ignore_output_conv_layer, + ) diff --git a/thesis code/network analysis/weights_correlation/weight visualization/plot_as_grid.py b/thesis code/network analysis/weights_correlation/weight visualization/plot_as_grid.py new file mode 100644 index 0000000..8a4f6f3 --- /dev/null +++ b/thesis code/network analysis/weights_correlation/weight visualization/plot_as_grid.py @@ -0,0 +1,194 @@ +# %% +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable +import matplotlib as mpl + + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" + + +def plot_weights( + plot, + s, + grid_color, + linewidth, + idx, + smallDim, + swap_channels, + activations, + layer, + title, + colorbar, + vmin, + vmax, +): + plt.imshow(plot.T, cmap="RdBu_r", origin="lower", vmin=vmin, vmax=vmax) + + ax = plt.gca() + a = np.arange(0, plot.shape[1] + 1, s[3]) + b = np.arange(0, plot.shape[0] + 1, s[1]) + plt.hlines(a - 0.5, -0.5, plot.shape[0] - 0.5, colors=grid_color, lw=linewidth) + plt.vlines(b - 0.5, -0.5, plot.shape[1] - 0.5, colors=grid_color, lw=linewidth) + plt.ylim(-1, plot.shape[1]) + plt.xlim(-1, plot.shape[0]) + + ax.set_xticks(s[1] / 2 + np.arange(-0.5, plot.shape[0] - 1, s[1])) + ax.set_yticks(s[3] / 2 + np.arange(-0.5, plot.shape[1] - 1, s[3])) + + if ( + idx is not None + and (smallDim is False and swap_channels is False) + or (activations is True) + ): + ax.set_xticklabels(idx, fontsize=19) + ax.set_yticklabels(np.arange(s[2]), fontsize=19) + elif idx is not None and layer == "FC1": + ax.set_xticklabels(np.arange(s[0]), fontsize=19) + ax.set_yticklabels(idx, fontsize=19) + elif idx is not None and (smallDim is True or swap_channels is True): + ax.set_xticklabels(np.arange(s[0]), fontsize=19) + ax.set_yticklabels(idx, fontsize=19) + else: + ax.set_xticklabels(np.arange(s[0]), fontsize=19) + ax.set_yticklabels(np.arange(s[2]), fontsize=19) + ax.invert_yaxis() + + ax.xaxis.set_label_position("top") + ax.tick_params(axis="x", top=True, bottom=False, labeltop=True, labelbottom=False) + + if title is not None: + is_string = isinstance(title, str) + if is_string is True: + plt.title(title) + + if colorbar is True: + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="1.5%", pad=0.05) + cbar = plt.colorbar(ax.get_images()[0], cax=cax) + + # this was only for flattened conv1 weights cbar ticks!! + # cbar.set_ticks([0.5, -0.5]) + # cbar.set_ticklabels([0.5, -0.5]) + + tick_font_size = 17 + cbar.ax.tick_params(labelsize=tick_font_size) + + +def plot_in_grid( + plot, + fig_size=(10, 10), + swap_channels=False, + title=None, + idx=None, + colorbar=False, + vmin=None, + vmax=None, + grid_color="k", + linewidth=0.75, + savetitle=None, + activations=False, + layer=None, + format="pdf", + bias=None, + plot_bias: bool = False, +): + smallDim = False + if plot.ndim < 4: + smallDim = True + plot = np.swapaxes(plot, 0, 1) + plot = plot[:, :, np.newaxis, np.newaxis] + if vmin is None and vmax is None: + # plot_abs = np.amax(np.abs(plot)) + vmin = -(np.amax(np.abs(plot))) + vmax = np.amax(np.abs(plot)) + + if swap_channels is True: + plot = np.swapaxes(plot, 0, 1) + + # print(plot.shape) + plot = np.ascontiguousarray(np.moveaxis(plot, 1, 2)) + + for j in range(plot.shape[2]): + for i in range(plot.shape[0]): + plot[(i - 1), :, (j - 1), :] = plot[(i - 1), :, (j - 1), :].T + + s = plot.shape + plot = plot.reshape((s[0] * s[1], s[2] * s[3])) + plt.figure(figsize=fig_size) + + if plot_bias and bias is not None: + if swap_channels: + # If axes are swapped, arrange the plots side by side + plt.subplot(1, 2, 1) + plot_weights( + plot=plot, + s=s, + grid_color=grid_color, + linewidth=linewidth, + idx=idx, + smallDim=smallDim, + swap_channels=swap_channels, + activations=activations, + layer=layer, + title=title, + colorbar=colorbar, + vmin=vmin, + vmax=vmax, + ) + + plt.subplot(1, 2, 2) + plt.plot(bias, np.arange(len(bias))) + plt.ylim(len(bias) - 1, 0) + plt.title("Bias", fontsize=14) + plt.tight_layout() + + else: + plt.subplot(2, 1, 1) + plot_weights( + plot=plot, + s=s, + grid_color=grid_color, + linewidth=linewidth, + idx=idx, + smallDim=smallDim, + swap_channels=swap_channels, + activations=activations, + layer=layer, + title=title, + colorbar=colorbar, + vmin=vmin, + vmax=vmax, + ) + + plt.subplot(2, 1, 2) + plt.plot(np.arange(len(bias)), bias) + plt.title("Bias", fontsize=14) + + else: + plot_weights( + plot=plot, + s=s, + grid_color=grid_color, + linewidth=linewidth, + idx=idx, + smallDim=smallDim, + swap_channels=swap_channels, + activations=activations, + layer=layer, + title=title, + colorbar=colorbar, + vmin=vmin, + vmax=vmax, + ) + + if savetitle is not None: + plt.savefig( + f"D:/Katha/Neuroscience/Semester 4/newCode/additional thesis plots/saved_plots/weight plots/{savetitle}.{format}", + dpi=300, + bbox_inches="tight", + ) + + plt.tight_layout() + plt.show(block=True) diff --git a/thesis code/network analysis/weights_correlation/weight visualization/plot_weights.py b/thesis code/network analysis/weights_correlation/weight visualization/plot_weights.py new file mode 100644 index 0000000..ebb0550 --- /dev/null +++ b/thesis code/network analysis/weights_correlation/weight visualization/plot_weights.py @@ -0,0 +1,101 @@ +import torch +import sys +import os +import matplotlib.pyplot as plt # noqa +import matplotlib as mpl + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" +mpl.rcParams["font.size"] = 14 + +# import files from parent dir +parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +sys.path.append(parent_dir) + +from plot_as_grid import plot_in_grid +from functions.make_cnn import make_cnn # noqa + + +# load on cpu +device = torch.device("cpu") + +# path to NN +nn = "ArghCNN_numConvLayers3_outChannels[8, 8, 8]_kernelSize[7, 15]_leaky relu_stride1_trainFirstConvLayerTrue_seed293051_Natural_249Epoch_1308-1145" +PATH = f"D:/Katha/Neuroscience/Semester 4/newCode/kk_contour_net_shallow-main/corner888/{nn}.pt" +SAVE_PATH = "20 cnns weights/corner 888/seed293051_Natural_249Epoch_1308-1145" + +# load and evaluate model +model = torch.load(PATH).to(device) +model.eval() +print("Full network:") +print(model) +print("") +# enter index to plot: +idx = int(input("Please select layer: ")) +print(f"Selected layer: {idx, model[idx]}") + +# bias +bias_input = input("Plot bias (y/n): ") +plot_bias: bool = False +if bias_input == "y": + plot_bias = True + bias = model[idx]._parameters["bias"].data + print(bias) +else: + bias = None + +# show last layer's weights. +if idx == len(model) - 1: + linear_weights = model[idx].weight.cpu().detach().clone().numpy() + + weights = linear_weights.reshape(2, 8, 74, 74) + plot_in_grid( + weights, + fig_size=(10, 7), + savetitle=f"{SAVE_PATH}_final_layer", + colorbar=True, + swap_channels=True, + bias=bias, + plot_bias=plot_bias, + ) + +# visualize weights: +elif idx > 0: + weights = model[idx].weight.cpu().detach().clone().numpy() + + if idx == 5: + swap_channels = False + layer = 3 + else: + swap_channels = True + layer = 2 + + # plot weights + plot_in_grid( + weights, + fig_size=(11, 7), + savetitle=f"{SAVE_PATH}_conv{layer}", + colorbar=True, + swap_channels=swap_channels, + bias=bias, + plot_bias=plot_bias, + ) +else: + first_weights = model[idx].weight.cpu().detach().clone().numpy() + + # reshape first layer weights: + reshape_input = input("Reshape weights to 4rows 8 cols (y/n): ") + if reshape_input == "y": + weights = first_weights.reshape( + 8, 4, first_weights.shape[-2], first_weights.shape[-1] + ) + else: + weights = first_weights + plot_in_grid( + weights, + fig_size=(17, 17), + savetitle=f"{SAVE_PATH}_conv1", + colorbar=True, + bias=bias, + plot_bias=plot_bias, + ) diff --git a/thesis code/shallow net/README.txt b/thesis code/shallow net/README.txt new file mode 100644 index 0000000..a3087ad --- /dev/null +++ b/thesis code/shallow net/README.txt @@ -0,0 +1,12 @@ +Folder shallow net: + +1. config.json: +* includes all cnn parameters and configurations +* example for architecture: 32-8-8 (c1-c2-c3) + +2. corner_loop_final.sh +* bash script to train the 20 cnns of one cnn architecture + +Folder functions: +* contains the files do build the cnn, set the seeds, create a logging file, train and test the cnns +* based on ---> Github: https://github.com/davrot/kk_contour_net_shallow.git \ No newline at end of file diff --git a/thesis code/shallow net/config.json b/thesis code/shallow net/config.json new file mode 100644 index 0000000..c5965a5 --- /dev/null +++ b/thesis code/shallow net/config.json @@ -0,0 +1,53 @@ +{ + "data_path": "/home/kk/Documents/Semester4/code/RenderStimuli/Output/", + "save_logging_messages": true, // (true), false + "display_logging_messages": true, // (true), false + "batch_size_train": 500, + "batch_size_test": 250, + "max_epochs": 2000, + "save_model": true, + "conv_0_kernel_size": 11, + "mp_1_kernel_size": 3, + "mp_1_stride": 2, + "use_plot_intermediate": true, // true, (false) + "stimuli_per_pfinkel": 10000, + "num_pfinkel_start": 0, + "num_pfinkel_stop": 100, + "num_pfinkel_step": 10, + "precision_100_percent": 0, // (4) + "train_first_layer": false, // true, (false) + "save_ever_x_epochs": 10, // (10) + "activation_function": "leaky relu", // tanh, relu, (leaky relu), none + "leak_relu_negative_slope": 0.1, // (0.1) + "switch_leakyR_to_relu": false, + // LR Scheduler -> + "use_scheduler": true, // (true), false + "scheduler_verbose": true, + "scheduler_factor": 0.1, //(0.1) + "scheduler_patience": 10, // (10) + "scheduler_threshold": 1e-5, // (1e-4) + "minimum_learning_rate": 1e-8, + "learning_rate": 0.0001, + // <- LR Scheduler + "pooling_type": "max", // (max), average, none + "conv_0_enable_softmax": false, // true, (false) + "use_adam": true, // (true) => adam, false => SGD + "condition": "Natural", + "scale_data": 255.0, // (255.0) + "conv_out_channels_list": [ + [ + 32, + 8, + 8 + ] + ], + "conv_kernel_sizes": [ + [ + 7, + 15 + ] + ], + "conv_stride_sizes": [ + 1 + ] +} \ No newline at end of file diff --git a/thesis code/shallow net/corner_loop_final.sh b/thesis code/shallow net/corner_loop_final.sh new file mode 100644 index 0000000..682d73c --- /dev/null +++ b/thesis code/shallow net/corner_loop_final.sh @@ -0,0 +1,13 @@ +Directory="/home/kk/Documents/Semester4/code/Corner_contour_net_shallow" +Priority="0" +echo $Directory +mkdir $Directory/argh_log_3288_fix +for i in {0..20}; do + for out_channels_idx in {0..0}; do + for kernel_size_idx in {0..0}; do + for stride_idx in {0..0}; do + echo "hostname; cd $Directory ; /home/kk/P3.10/bin/python3 cnn_training.py --idx-conv-out-channels-list $out_channels_idx --idx-conv-kernel-sizes $kernel_size_idx --idx-conv-stride-sizes $stride_idx -s \$JOB_ID" | qsub -o $Directory/argh_log_3288_fix -j y -p $Priority -q gp4u,gp3u -N Corner3288fix + done + done + done +done diff --git a/thesis code/shallow net/functions/alicorn_data_loader.py b/thesis code/shallow net/functions/alicorn_data_loader.py new file mode 100644 index 0000000..71fb6db --- /dev/null +++ b/thesis code/shallow net/functions/alicorn_data_loader.py @@ -0,0 +1,107 @@ +import torch +import numpy as np +import os + + +@torch.no_grad() +def alicorn_data_loader( + num_pfinkel: list[int] | None, + load_stimuli_per_pfinkel: int, + condition: str, + data_path: str, + logger=None, +) -> torch.utils.data.TensorDataset: + """ + - num_pfinkel: list of the angles that should be loaded (ranging from + 0-90). If None: all pfinkels loaded + - stimuli_per_pfinkel: defines amount of stimuli per path angle but + for label 0 and label 1 seperatly (e.g., stimuli_per_pfinkel = 1000: + 1000 stimuli = label 1, 1000 stimuli = label 0) + """ + filename: str | None = None + if condition == "Angular": + filename = "angular_angle" + elif condition == "Coignless": + filename = "base_angle" + elif condition == "Natural": + filename = "corner_angle" + else: + filename = None + assert filename is not None + filepaths: str = os.path.join(data_path, f"{condition}") + + stimuli_per_pfinkel: int = 100000 + + # ---------------------------- + + # for angles and batches + if num_pfinkel is None: + angle: list[int] = np.arange(0, 100, 10).tolist() + else: + angle = num_pfinkel + + assert isinstance(angle, list) + + batch: list[int] = np.arange(1, 11, 1).tolist() + + if load_stimuli_per_pfinkel <= (stimuli_per_pfinkel // len(batch)): + num_img_per_pfinkel: int = load_stimuli_per_pfinkel + num_batches: int = 1 + else: + # handle case where more than 10,000 stimuli per pfinkel needed + num_batches = load_stimuli_per_pfinkel // (stimuli_per_pfinkel // len(batch)) + num_img_per_pfinkel = load_stimuli_per_pfinkel // num_batches + + if logger is not None: + logger.info(f"{num_batches} batches") + logger.info(f"{num_img_per_pfinkel} stimuli per pfinkel.") + + # initialize data and label tensors: + num_stimuli: int = len(angle) * num_batches * num_img_per_pfinkel * 2 + data_tensor: torch.Tensor = torch.empty( + (num_stimuli, 200, 200), dtype=torch.uint8, device=torch.device("cpu") + ) + label_tensor: torch.Tensor = torch.empty( + (num_stimuli), dtype=torch.int64, device=torch.device("cpu") + ) + + if logger is not None: + logger.info(f"data tensor shape: {data_tensor.shape}") + logger.info(f"label tensor shape: {label_tensor.shape}") + + # append data + idx: int = 0 + for i in range(len(angle)): + for j in range(num_batches): + # load contour + temp_filename: str = ( + f"{filename}_{angle[i]:03}_b{batch[j]:03}_n10000_RENDERED.npz" + ) + contour_filename: str = os.path.join(filepaths, temp_filename) + c_data = np.load(contour_filename) + data_tensor[idx : idx + num_img_per_pfinkel, ...] = torch.tensor( + c_data["gaborfield"][:num_img_per_pfinkel, ...], + dtype=torch.uint8, + device=torch.device("cpu"), + ) + label_tensor[idx : idx + num_img_per_pfinkel] = int(1) + idx += num_img_per_pfinkel + + # next append distractor stimuli + for i in range(len(angle)): + for j in range(num_batches): + # load distractor + temp_filename = ( + f"{filename}_{angle[i]:03}_dist_b{batch[j]:03}_n10000_RENDERED.npz" + ) + distractor_filename: str = os.path.join(filepaths, temp_filename) + nc_data = np.load(distractor_filename) + data_tensor[idx : idx + num_img_per_pfinkel, ...] = torch.tensor( + nc_data["gaborfield"][:num_img_per_pfinkel, ...], + dtype=torch.uint8, + device=torch.device("cpu"), + ) + label_tensor[idx : idx + num_img_per_pfinkel] = int(0) + idx += num_img_per_pfinkel + + return torch.utils.data.TensorDataset(label_tensor, data_tensor.unsqueeze(1)) diff --git a/thesis code/shallow net/functions/analyse_network.py b/thesis code/shallow net/functions/analyse_network.py new file mode 100644 index 0000000..937affe --- /dev/null +++ b/thesis code/shallow net/functions/analyse_network.py @@ -0,0 +1,103 @@ +import torch + + +def unfold( + layer: torch.nn.Conv2d | torch.nn.MaxPool2d | torch.nn.AvgPool2d, size: int +) -> torch.Tensor: + if isinstance(layer.kernel_size, tuple): + assert layer.kernel_size[0] == layer.kernel_size[1] + kernel_size: int = int(layer.kernel_size[0]) + else: + kernel_size = int(layer.kernel_size) + + if isinstance(layer.dilation, tuple): + assert layer.dilation[0] == layer.dilation[1] + dilation: int = int(layer.dilation[0]) + else: + dilation = int(layer.dilation) # type: ignore + + if isinstance(layer.padding, tuple): + assert layer.padding[0] == layer.padding[1] + padding: int = int(layer.padding[0]) + else: + padding = int(layer.padding) + + if isinstance(layer.stride, tuple): + assert layer.stride[0] == layer.stride[1] + stride: int = int(layer.stride[0]) + else: + stride = int(layer.stride) + + out = ( + torch.nn.functional.unfold( + torch.arange(0, size, dtype=torch.float32) + .unsqueeze(0) + .unsqueeze(0) + .unsqueeze(-1), + kernel_size=(kernel_size, 1), + dilation=(dilation, 1), + padding=(padding, 0), + stride=(stride, 1), + ) + .squeeze(0) + .type(torch.int64) + ) + + return out + + +def analyse_network( + model: torch.nn.Sequential, input_shape: int +) -> tuple[list, list, list]: + combined_list: list = [] + coordinate_list: list = [] + layer_type_list: list = [] + pixel_used: list[int] = [] + + size: int = int(input_shape) + + for layer_id in range(0, len(model)): + if isinstance( + model[layer_id], (torch.nn.Conv2d, torch.nn.MaxPool2d, torch.nn.AvgPool2d) + ): + out = unfold(layer=model[layer_id], size=size) + coordinate_list.append(out) + layer_type_list.append( + str(type(model[layer_id])).split(".")[-1].split("'")[0] + ) + size = int(out.shape[-1]) + else: + coordinate_list.append(None) + layer_type_list.append(None) + + assert coordinate_list[0] is not None + combined_list.append(coordinate_list[0]) + + for i in range(1, len(coordinate_list)): + if coordinate_list[i] is None: + combined_list.append(combined_list[i - 1]) + else: + for pos in range(0, coordinate_list[i].shape[-1]): + idx_shape: int | None = None + + idx = torch.unique( + torch.flatten(combined_list[i - 1][:, coordinate_list[i][:, pos]]) + ) + if idx_shape is None: + idx_shape = idx.shape[0] + assert idx_shape == idx.shape[0] + + assert idx_shape is not None + + temp = torch.zeros((idx_shape, coordinate_list[i].shape[-1])) + for pos in range(0, coordinate_list[i].shape[-1]): + idx = torch.unique( + torch.flatten(combined_list[i - 1][:, coordinate_list[i][:, pos]]) + ) + temp[:, pos] = idx + combined_list.append(temp) + + for i in range(0, len(combined_list)): + pixel_used.append(int(torch.unique(torch.flatten(combined_list[i])).shape[0])) + + return combined_list, layer_type_list, pixel_used diff --git a/thesis code/shallow net/functions/create_logger.py b/thesis code/shallow net/functions/create_logger.py new file mode 100644 index 0000000..ec27e2f --- /dev/null +++ b/thesis code/shallow net/functions/create_logger.py @@ -0,0 +1,40 @@ +import logging +import datetime +import os + + +def create_logger(save_logging_messages: bool, display_logging_messages: bool, model_name: str | None): + now = datetime.datetime.now() + dt_string_filename = now.strftime("%Y_%m_%d_%H_%M_%S") + + logger = logging.getLogger("MyLittleLogger") + logger.setLevel(logging.DEBUG) + + if save_logging_messages: + if model_name: + filename = os.path.join( + "logs", f"log_{dt_string_filename}_{model_name}.txt" + ) + else: + filename = os.path.join("logs", f"log_{dt_string_filename}.txt") + + time_format = "%b %-d %Y %H:%M:%S" + logformat = "%(asctime)s %(message)s" + file_formatter = logging.Formatter(fmt=logformat, datefmt=time_format) + os.makedirs("logs", exist_ok=True) + file_handler = logging.FileHandler(filename) + file_handler.setLevel(logging.INFO) + file_handler.setFormatter(file_formatter) + logger.addHandler(file_handler) + + if display_logging_messages: + time_format = "%H:%M:%S" + logformat = "%(asctime)s %(message)s" + stream_formatter = logging.Formatter(fmt=logformat, datefmt=time_format) + + stream_handler = logging.StreamHandler() + stream_handler.setLevel(logging.INFO) + stream_handler.setFormatter(stream_formatter) + logger.addHandler(stream_handler) + + return logger diff --git a/thesis code/shallow net/functions/make_cnn.py b/thesis code/shallow net/functions/make_cnn.py new file mode 100644 index 0000000..b3ceceb --- /dev/null +++ b/thesis code/shallow net/functions/make_cnn.py @@ -0,0 +1,114 @@ +import torch +import numpy as np + + +def make_cnn( + conv_out_channels_list: list[int], + conv_kernel_size: list[int], + conv_stride_size: int, + conv_activation_function: str, + train_conv_0: bool, + logger, + conv_0_kernel_size: int, + mp_1_kernel_size: int, + mp_1_stride: int, + pooling_type: str, + conv_0_enable_softmax: bool, + l_relu_negative_slope: float, +) -> torch.nn.Sequential: + assert len(conv_out_channels_list) >= 1 + assert len(conv_out_channels_list) == len(conv_kernel_size) + 1 + + cnn = torch.nn.Sequential() + + # Fixed structure + cnn.append( + torch.nn.Conv2d( + in_channels=1, + out_channels=conv_out_channels_list[0] if train_conv_0 else 32, + kernel_size=conv_0_kernel_size, + stride=1, + bias=train_conv_0, + ) + ) + + if conv_0_enable_softmax: + cnn.append(torch.nn.Softmax(dim=1)) + + setting_understood: bool = False + if conv_activation_function.upper() == str("relu").upper(): + cnn.append(torch.nn.ReLU()) + setting_understood = True + elif conv_activation_function.upper() == str("leaky relu").upper(): + cnn.append(torch.nn.LeakyReLU(negative_slope=l_relu_negative_slope)) + setting_understood = True + elif conv_activation_function.upper() == str("tanh").upper(): + cnn.append(torch.nn.Tanh()) + setting_understood = True + elif conv_activation_function.upper() == str("none").upper(): + setting_understood = True + assert setting_understood + + setting_understood = False + if pooling_type.upper() == str("max").upper(): + cnn.append(torch.nn.MaxPool2d(kernel_size=mp_1_kernel_size, stride=mp_1_stride)) + setting_understood = True + elif pooling_type.upper() == str("average").upper(): + cnn.append(torch.nn.AvgPool2d(kernel_size=mp_1_kernel_size, stride=mp_1_stride)) + setting_understood = True + elif pooling_type.upper() == str("none").upper(): + setting_understood = True + assert setting_understood + + # Changing structure + for i in range(1, len(conv_out_channels_list)): + if i == 1 and not train_conv_0: + in_channels = 32 + else: + in_channels = conv_out_channels_list[i - 1] + cnn.append( + torch.nn.Conv2d( + in_channels=in_channels, + out_channels=conv_out_channels_list[i], + kernel_size=conv_kernel_size[i - 1], + stride=conv_stride_size, + bias=True, + ) + ) + setting_understood = False + if conv_activation_function.upper() == str("relu").upper(): + cnn.append(torch.nn.ReLU()) + setting_understood = True + elif conv_activation_function.upper() == str("leaky relu").upper(): + cnn.append(torch.nn.LeakyReLU(negative_slope=l_relu_negative_slope)) + setting_understood = True + elif conv_activation_function.upper() == str("tanh").upper(): + cnn.append(torch.nn.Tanh()) + setting_understood = True + elif conv_activation_function.upper() == str("none").upper(): + setting_understood = True + + assert setting_understood + + # Fixed structure + # define fully connected layer: + cnn.append(torch.nn.Flatten(start_dim=1)) + cnn.append(torch.nn.LazyLinear(2, bias=True)) + + # if conv1 not trained: + filename_load_weight_0: str | None = None + if train_conv_0 is False and cnn[0]._parameters["weight"].shape[0] == 32: + filename_load_weight_0 = "weights_radius10_norm.npy" + if train_conv_0 is False and cnn[0]._parameters["weight"].shape[0] == 16: + filename_load_weight_0 = "8orient_2phase_weights.npy" + + if filename_load_weight_0 is not None: + logger.info(f"Replace weights in CNN 0 with {filename_load_weight_0}") + cnn[0]._parameters["weight"] = torch.tensor( + np.load(filename_load_weight_0), + dtype=cnn[0]._parameters["weight"].dtype, + requires_grad=False, + device=cnn[0]._parameters["weight"].device, + ) + + return cnn diff --git a/thesis code/shallow net/functions/plot_intermediate.py b/thesis code/shallow net/functions/plot_intermediate.py new file mode 100644 index 0000000..3596398 --- /dev/null +++ b/thesis code/shallow net/functions/plot_intermediate.py @@ -0,0 +1,84 @@ +import numpy as np +import matplotlib.pyplot as plt +import matplotlib as mpl +import os +import re + +mpl.rcParams["text.usetex"] = True +mpl.rcParams["font.family"] = "serif" + + +def plot_intermediate( + train_accuracy: list[float], + test_accuracy: list[float], + train_losses: list[float], + test_losses: list[float], + save_name: str, + reduction_factor: int = 1, +) -> None: + assert len(train_accuracy) == len(test_accuracy) + assert len(train_accuracy) == len(train_losses) + assert len(train_accuracy) == len(test_losses) + + # legend: + pattern = ( + r"(outChannels\[\d+(?:, \d+)*\]_kernelSize\[\d+(?:, \d+)*\]_)([^_]+)(?=_stride)" + ) + matches = re.findall(pattern, save_name) + legend_label = "".join(["".join(match) for match in matches]) + + max_epochs: int = len(train_accuracy) + # set stepsize + x = np.arange(1, max_epochs + 1) + + stepsize = max_epochs // reduction_factor + + # accuracies + plt.figure(figsize=[12, 7]) + plt.subplot(2, 1, 1) + + plt.plot(x, np.array(train_accuracy), label="Train: " + str(legend_label)) + plt.plot(x, np.array(test_accuracy), label="Test: " + str(legend_label)) + plt.title("Training and Testing Accuracy", fontsize=18) + plt.xlabel("Epoch", fontsize=18) + plt.ylabel("Accuracy (\\%)", fontsize=18) + plt.legend(fontsize=14) + plt.xticks( + np.concatenate((np.array([1]), np.arange(stepsize, max_epochs + 1, stepsize))), + np.concatenate((np.array([1]), np.arange(stepsize, max_epochs + 1, stepsize))), + ) + + # Increase tick label font size + plt.xticks(fontsize=16) + plt.yticks(fontsize=16) + plt.grid(True) + + # losses + plt.subplot(2, 1, 2) + plt.plot(x, np.array(train_losses), label="Train: " + str(legend_label)) + plt.plot(x, np.array(test_losses), label="Test: " + str(legend_label)) + plt.title("Training and Testing Losses", fontsize=18) + plt.xlabel("Epoch", fontsize=18) + plt.ylabel("Loss", fontsize=18) + plt.legend(fontsize=14) + plt.xticks( + np.concatenate((np.array([1]), np.arange(stepsize, max_epochs + 1, stepsize))), + np.concatenate((np.array([1]), np.arange(stepsize, max_epochs + 1, stepsize))), + ) + + # Increase tick label font size + plt.xticks(fontsize=16) + plt.yticks(fontsize=16) + plt.grid(True) + + plt.tight_layout() + os.makedirs("performance_plots", exist_ok=True) + plt.savefig( + os.path.join( + "performance_plots", + f"performance_{save_name}.pdf", + ), + dpi=300, + bbox_inches="tight", + ) + plt.show() diff --git a/thesis code/shallow net/functions/set_seed.py b/thesis code/shallow net/functions/set_seed.py new file mode 100644 index 0000000..b52815c --- /dev/null +++ b/thesis code/shallow net/functions/set_seed.py @@ -0,0 +1,12 @@ +import torch +import numpy as np + + +def set_seed(seed: int, logger) -> None: + # set seed for all used modules + if logger: + logger.info(f"set seed to {seed}") + torch.manual_seed(seed=seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed=seed) + np.random.seed(seed=seed) diff --git a/thesis code/shallow net/functions/test.py b/thesis code/shallow net/functions/test.py new file mode 100644 index 0000000..344630d --- /dev/null +++ b/thesis code/shallow net/functions/test.py @@ -0,0 +1,58 @@ +import torch +import logging + + +@torch.no_grad() +def test( + model: torch.nn.modules.container.Sequential, + loader: torch.utils.data.dataloader.DataLoader, + device: torch.device, + tb, + epoch: int, + logger: logging.Logger, + test_accuracy: list[float], + test_losses: list[float], + scale_data: float, +) -> float: + test_loss: float = 0.0 + correct: int = 0 + pattern_count: float = 0.0 + + model.eval() + + for data in loader: + label = data[0].to(device) + image = data[1].type(dtype=torch.float32).to(device) + if scale_data > 0: + image /= scale_data + + output = model(image) + + # loss and optimization + loss = torch.nn.functional.cross_entropy(output, label, reduction="sum") + pattern_count += float(label.shape[0]) + test_loss += loss.item() + prediction = output.argmax(dim=1) + correct += prediction.eq(label).sum().item() + + logger.info( + ( + "Test set:" + f" Average loss: {test_loss / pattern_count:.3e}," + f" Accuracy: {correct}/{pattern_count}," + f"({100.0 * correct / pattern_count:.2f}%)" + ) + ) + logger.info("") + + acc = 100.0 * correct / pattern_count + test_losses.append(test_loss / pattern_count) + test_accuracy.append(acc) + + # add to tb: + tb.add_scalar("Test Loss", (test_loss / pattern_count), epoch) + tb.add_scalar("Test Performance", 100.0 * correct / pattern_count, epoch) + tb.add_scalar("Test Number Correct", correct, epoch) + tb.flush() + + return acc diff --git a/thesis code/shallow net/functions/train.py b/thesis code/shallow net/functions/train.py new file mode 100644 index 0000000..6f13d84 --- /dev/null +++ b/thesis code/shallow net/functions/train.py @@ -0,0 +1,80 @@ +import torch +import logging + + +def train( + model: torch.nn.modules.container.Sequential, + loader: torch.utils.data.dataloader.DataLoader, + optimizer: torch.optim.Adam | torch.optim.SGD, + epoch: int, + device: torch.device, + tb, + test_acc, + logger: logging.Logger, + train_accuracy: list[float], + train_losses: list[float], + train_loss: list[float], + scale_data: float, +) -> float: + num_train_pattern: int = 0 + running_loss: float = 0.0 + correct: int = 0 + pattern_count: float = 0.0 + + model.train() + for data in loader: + label = data[0].to(device) + image = data[1].type(dtype=torch.float32).to(device) + if scale_data > 0: + image /= scale_data + + optimizer.zero_grad() + output = model(image) + loss = torch.nn.functional.cross_entropy(output, label, reduction="sum") + loss.backward() + + optimizer.step() + + # for loss and accuracy plotting: + num_train_pattern += int(label.shape[0]) + pattern_count += float(label.shape[0]) + running_loss += float(loss) + train_loss.append(float(loss)) + prediction = output.argmax(dim=1) + correct += prediction.eq(label).sum().item() + + total_number_of_pattern: int = int(len(loader)) * int(label.shape[0]) + + # infos: + logger.info( + ( + "Train Epoch:" + f" {epoch}" + f" [{int(pattern_count)}/{total_number_of_pattern}" + f" ({100.0 * pattern_count / total_number_of_pattern:.2f}%)]," + f" Loss: {float(running_loss) / float(num_train_pattern):.4e}," + f" Acc: {(100.0 * correct / num_train_pattern):.2f}" + f" Test Acc: {test_acc:.2f}%," + f" LR: {optimizer.param_groups[0]['lr']:.2e}" + ) + ) + + acc = 100.0 * correct / num_train_pattern + train_accuracy.append(acc) + + epoch_loss = running_loss / pattern_count + train_losses.append(epoch_loss) + + # add to tb: + tb.add_scalar("Train Loss", loss.item(), epoch) + tb.add_scalar("Train Performance", torch.tensor(acc), epoch) + tb.add_scalar("Train Number Correct", torch.tensor(correct), epoch) + + # for parameters: + for name, param in model.named_parameters(): + if "weight" in name or "bias" in name: + tb.add_histogram(f"{name}", param.data.clone(), epoch) + + tb.flush() + + return epoch_loss