From 51827da846b2dfc4abea363d1c0a8e259bb7a64d Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Thu, 14 Jan 2021 14:06:08 +0100 Subject: [PATCH 1/9] Added source_detection directories --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 1be06ed5..0a5058ab 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ radionets/simulations/test_data examples/example_data/ +source/detection/yolov5 #evaluation results */results results/ From f24d039d872f6f795449c39ae339225e367a9632 Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Thu, 14 Jan 2021 14:08:26 +0100 Subject: [PATCH 2/9] Fixed typo --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 0a5058ab..1b730171 100644 --- a/.gitignore +++ b/.gitignore @@ -9,7 +9,7 @@ radionets/simulations/test_data examples/example_data/ -source/detection/yolov5 +source_detection/yolov5 #evaluation results */results results/ From 9603246573b7b6314c89420b5afb7cfff6a86fec Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Thu, 14 Jan 2021 14:20:32 +0100 Subject: [PATCH 3/9] Added missing files --- source_detection/Testing.ipynb | 227 ++++++++++++++++++++++++ source_detection/yolo_data.ipynb | 263 ++++++++++++++++++++++++++++ source_detection/yolo_model.yaml | 50 ++++++ source_detection/yolo_settings.yaml | 6 + 4 files changed, 546 insertions(+) create mode 100644 source_detection/Testing.ipynb create mode 100644 source_detection/yolo_data.ipynb create mode 100644 source_detection/yolo_model.yaml create mode 100644 source_detection/yolo_settings.yaml diff --git a/source_detection/Testing.ipynb b/source_detection/Testing.ipynb new file mode 100644 index 00000000..7e44f46f --- /dev/null +++ b/source_detection/Testing.ipynb @@ -0,0 +1,227 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from tqdm import tqdm\n", + "from scipy.ndimage import gaussian_filter\n", + "from radionets.simulations.gaussians import create_grid, gauss_parameters, create_rot_mat, gaussian_component, add_gaussian\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib.animation import FuncAnimation\n", + "from matplotlib.animation import PillowWriter" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def toy_gaussian_source(grid,comps, amp, x, y, sig_x, sig_y, rot, step, sides):\n", + " \"\"\"\n", + " Creates random Gaussian source parameters and returns an image\n", + " of a Gaussian source.\n", + "\n", + " Parameters\n", + " ----------\n", + " grid: nd array\n", + " array holding 2d grid and axis for one image\n", + "\n", + " Returns\n", + " -------\n", + " s: 2darray\n", + " Image containing a simulated Gaussian source.\n", + " \"\"\"\n", + " s = toy_create_gaussian_source(\n", + " grid, comps, amp, x, y, sig_x, sig_y, rot, step, sides, blur=True\n", + " )\n", + " return s" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def toy_create_gaussian_source(\n", + " grid, comps, amp, x, y, sig_x, sig_y, rot, step, sides, blur=True\n", + "):\n", + " \"\"\"\n", + " Combines Gaussian components on a 2d grid to create a Gaussian source\n", + "\n", + " takes grid\n", + " side: one-sided or two-sided\n", + " core dominated or lobe dominated\n", + " number of components\n", + " angle of the jet\n", + "\n", + " Parameters\n", + " ----------\n", + " grid: ndarray\n", + " 2dgrid + X and Y meshgrid\n", + " comps: int\n", + " number of components\n", + " amp: 1darray\n", + " amplitudes of components\n", + " x: 1darray\n", + " x positions of components\n", + " y: 1darray\n", + " y positions of components\n", + " sig_x: 1darray\n", + " standard deviations of components in x\n", + " sig_y: 1darray\n", + " standard deviations of components in y\n", + " rot: int\n", + " rotation of the jet in degree\n", + " sides: int\n", + " 0 one-sided, 1 two-sided jet\n", + " blur: bool\n", + " use Gaussian filter to blur image\n", + "\n", + " Returns\n", + " -------\n", + " source: 2darray\n", + " 2d grid containing Gaussian source\n", + "\n", + " Comments\n", + " --------\n", + " components should not have too big gaps between each other\n", + " \"\"\"\n", + " if sides == 1:\n", + " comps += comps - 1\n", + " amp = np.append(amp, amp[1:])\n", + " x = np.append(x, -x[1:])\n", + " y = np.append(y, -y[1:])\n", + " sig_x = np.append(sig_x, sig_x[1:])\n", + " sig_y = np.append(sig_y, sig_y[1:])\n", + "\n", + " for i in range(comps):\n", + " source = add_gaussian(\n", + " grid=grid,\n", + " amp=amp[i],\n", + " x=x[i]*step/4,\n", + " y=y[i]*step/4,\n", + " sig_x=sig_x[i],\n", + " sig_y=sig_y[i],\n", + " rot=rot,\n", + " )\n", + " if blur is True:\n", + " source = gaussian_filter(source, sigma=1.5)\n", + " return source\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "#%matplotlib notebook\n", + "from ipywidgets import interact, widgets" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "comps, amp, x, y, sig_x, sig_y, rot, sides = gauss_parameters()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "6322ddaaa5674267b3b857669539ebb9", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "interactive(children=(IntSlider(value=1, description='n', max=20), Output()), _dom_classes=('widget-interact',…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#sides=1\n", + "#comps=5\n", + "\n", + "\n", + "\n", + "\n", + "def f(n):\n", + " a = create_grid(63,n+1)\n", + " step = n\n", + " sim_source = toy_gaussian_source(a[0],comps, amp, x, y, sig_x, sig_y, rot, step, sides)\n", + " fig.canvas.draw_idle()\n", + " plt.imshow(sim_source)\n", + " plt.show()\n", + " plt.pause(0.1)\n", + " \n", + "fig = plt.figure(figsize=(6, 4))\n", + "\n", + "\n", + "interact(f, n=widgets.IntSlider(min=0,max=20,step=1,value=1)) \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/source_detection/yolo_data.ipynb b/source_detection/yolo_data.ipynb new file mode 100644 index 00000000..9787ed76 --- /dev/null +++ b/source_detection/yolo_data.ipynb @@ -0,0 +1,263 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from tqdm import tqdm\n", + "from scipy.ndimage import gaussian_filter\n", + "from scipy import ndimage\n", + "from radionets.simulations.gaussians import create_grid, create_gaussian_source, add_gaussian, gaussian_component, create_rot_mat\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def gauss_paramters(comps, pixel):\n", + " \"\"\"\n", + " Generate a random set of Gaussian parameters.\n", + "\n", + " Parameters\n", + " ----------\n", + " None\n", + "\n", + " Returns\n", + " -------\n", + " comps: int\n", + " Number of components\n", + " amp: float\n", + " Amplitude of the core component\n", + " x: array\n", + " x positions of components\n", + " y: array\n", + " y positions of components\n", + " sig_x:\n", + " standard deviation in x\n", + " sig_y:\n", + " standard deviation in y\n", + " rot: int\n", + " rotation in degree\n", + " sides: int\n", + " 0 for one-sided and 1 for two-sided jets\n", + " \"\"\"\n", + " # random number of components between 4 and 9\n", + " # comps = np.random.randint(4, 7) # decrease for smaller images\n", + "\n", + " # start amplitude between 10 and 1e-3\n", + " amp_start = (np.random.randint(0, 100) * np.random.random()) / 10 ############ <-------100 -> 1000\n", + " # if start amp is 0, draw a new number\n", + " while amp_start == 0:\n", + " amp_start = (np.random.randint(0, 100) * np.random.random()) / 10\n", + " # logarithmic decrease to outer components\n", + " amp = np.array([(i+1)**2*amp_start / np.exp(i) for i in range(comps)]) ######!\n", + "\n", + " # linear distance bestween the components\n", + " x = np.arange(0, comps) * (pixel//(2*comps))###########<------War 5 statt 50\n", + " y = np.zeros(comps)\n", + "\n", + " # extension of components\n", + " # random start value between 1 - 0.375 and 1 - 0\n", + " # linear distance between components\n", + " # distances scaled by factor between 0.25 and 0.5\n", + " # randomnized for each sigma\n", + " off1 = (np.random.random() + 0.5) / 4\n", + " off2 = (np.random.random() + 0.5) / 4\n", + " fac1 = (np.random.random() + 1) / 4\n", + " fac2 = (np.random.random() + 1) / 4\n", + " sig_x =(np.arange(1, comps + 1) - off1) * fac1 *8\n", + " sig_y =(np.arange(1, comps + 1) - off2) * fac2 *8\n", + " \n", + "\n", + " return amp, x, y, sig_x, sig_y\n", + "\n", + "def gaussian_source(grid, rot, comps, sides):\n", + " \"\"\"\n", + " Creates random Gaussian source parameters and returns an image\n", + " of a Gaussian source.\n", + "\n", + " Parameters\n", + " ----------\n", + " grid: nd array\n", + " array holding 2d grid and axis for one image\n", + "\n", + " Returns\n", + " -------\n", + " s: 2darray\n", + " Image containing a simulated Gaussian source.\n", + " \"\"\"\n", + " pixel = grid.shape[1]\n", + " amp, x, y, sig_x, sig_y = gauss_paramters(comps, pixel)\n", + " rot = rot\n", + " s = create_gaussian_source(\n", + " grid, comps, amp, x, y, sig_x, sig_y, rot, sides, blur=True\n", + " )\n", + " return s\n", + "\n", + "\n", + "def create_mask(pixel, rot, comps, sides):\n", + " x = np.arange(0, comps) * (pixel//(2*comps))\n", + " y = np.zeros(comps)\n", + " if sides == 1:\n", + " comps += comps - 1\n", + " x = np.append(x, -x[1:])\n", + " y = np.append(y, -y[1:])\n", + " a = np.linspace(0, pixel - 1, num=pixel)\n", + " b = np.linspace(0, pixel - 1, num=pixel)\n", + " k, j = np.meshgrid(a, b)\n", + " mask = np.zeros(k.shape)\n", + " for i in range(comps):\n", + " cent = np.array([pixel // 2 + x[i], pixel // 2 + y[i]])\n", + " rot_mat = create_rot_mat(np.deg2rad(rot))\n", + " x_0, y_0 = ((cent - pixel // 2) @ rot_mat) + pixel // 2\n", + " x[i]= int(x_0)\n", + " y[i]= int(y_0)\n", + " if abs(x[i]) < pixel and abs(y[i]) < pixel:\n", + " mask[int(y_0),int(x_0)] = 1\n", + " return mask\n", + "def head(grid, bundle_size, pixel):\n", + " grid_copy = grid.copy()\n", + " grid_copy2 = grid.copy()\n", + " mask_bundle = grid_copy[:,0]\n", + " gauss_bundle = grid_copy2[:,0]\n", + " for i in range(bundle_size):\n", + " rot = np.random.randint(0, 360)\n", + " comps = np.random.randint(4, 7)\n", + " sides = np.random.randint(0, 2)\n", + " gaussian_image = gaussian_source(grid[i,:], rot, comps, sides)\n", + " gauss_bundle[i] += gaussian_image\n", + " mask= create_mask(pixel, rot, comps, sides)\n", + " mask_bundle[i]+=mask\n", + " pair = [gauss_bundle, mask_bundle]\n", + " return pair\n", + "def simulate_sources(\n", + " data_path,\n", + " num_bundles,\n", + " bundle_size,\n", + " img_size,\n", + " option\n", + "):\n", + " gaussians = np.zeros(num_bundles)\n", + " points = np.zeros(num_bundles)\n", + " for i in tqdm(range(num_bundles)):\n", + " grid = create_grid(img_size, bundle_size)\n", + " pairs = head(grid, bundle_size, img_size)\n", + " gaussian_bundle = pairs[0]\n", + " point_bundle = pairs [1]\n", + " return gaussian_bundle, point_bundle\n" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "def yolo_data(path, mode, pixel, bundle_size, num_bundles):\n", + " images, pos = simulate_sources(path,num_bundles,bundle_size,pixel, mode)\n", + " for i in tqdm(range(bundle_size)):\n", + " cent = np.array([pixel / 2, pixel/ 2])\n", + " plt.axis('off')\n", + " plt.imshow(images[i])\n", + " plt.savefig(path + \"images/\" + mode + \"/image\" + str(i) + \".jpg\",bbox_inches='tight', dpi = 200)\n", + " plt.cla()\n", + " occurences = np.where(pos[i] == pos[i].max())\n", + " comps = len(occurences[1])\n", + " with open(path + \"labels/\" + mode + \"/image\" + str(i) + \".txt\", \"w\") as file:\n", + " for j in range(comps):\n", + " x = occurences[0][j]\n", + " y = occurences[1][j] \n", + " x_0= x - cent[0]\n", + " y_0= y - cent[1]\n", + " x_rel = x/(pixel)\n", + " y_rel = y/(pixel)\n", + " width = 64/pixel\n", + " file.write(\"%d\\t\" %0 + \"%f\\t\" %y_rel + \"%f\\t\" %x_rel + \"%f\\t\" %width + \"%f\" %width+'\\n')\n", + " return occurences\n" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 1/1 [00:00<00:00, 1.53it/s]\n", + "100%|██████████| 4/4 [00:00<00:00, 4.62it/s]\n", + "100%|██████████| 1/1 [00:00<00:00, 4.51it/s]\n", + "100%|██████████| 2/2 [00:00<00:00, 4.40it/s]\n", + "100%|██████████| 1/1 [00:00<00:00, 12.97it/s]\n", + "100%|██████████| 1/1 [00:00<00:00, 6.18it/s]\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQYAAAD8CAYAAACVSwr3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAAAMbElEQVR4nO3bcYikd33H8ffHXFOpjbGYFeTuNJFeqldbMF1Si1BTTMslhbs/LHIHobUED62RglJIsaQS/7JSC8K19kpDVDDx9I+y4EmgNiEQPM2GaPQuRNbTNhelOTXNP8HE0G//mEk72e/uzZO72Znb+n7BwjzP/Hbmu8PwvmeeeS5VhSRNetmiB5B08TEMkhrDIKkxDJIawyCpMQySmqlhSHJHkieTfHuT+5Pkk0nWkjyS5JrZjylpnoYcMdwJ7DvH/TcAe8Y/h4F/uPCxJC3S1DBU1f3AT86x5ADwmRo5AbwqyWtnNaCk+dsxg8fYCTw+sX1mvO+H6xcmOczoqIJXvOIVv/XGN75xBk8vaTMPPfTQj6pq6aX+3izCMFhVHQWOAiwvL9fq6uo8n176uZPk38/n92bxrcQTwO6J7V3jfZK2qVmEYQX44/G3E28Fnq6q9jFC0vYx9aNEkruA64ArkpwB/hr4BYCq+hRwHLgRWAOeAf50q4aVNB9Tw1BVh6bcX8D7ZzaRpIXzykdJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBknNoDAk2ZfksSRrSW7d4P7XJbk3ycNJHkly4+xHlTQvU8OQ5BLgCHADsBc4lGTvumV/BRyrqrcAB4G/n/WgkuZnyBHDtcBaVZ2uqueAu4ED69YU8Mrx7cuBH8xuREnzNiQMO4HHJ7bPjPdN+ghwU5IzwHHgAxs9UJLDSVaTrJ49e/Y8xpU0D7M6+XgIuLOqdgE3Ap9N0h67qo5W1XJVLS8tLc3oqSXN2pAwPAHsntjeNd436WbgGEBVfRV4OXDFLAaUNH9DwvAgsCfJVUkuZXRycWXdmv8A3gGQ5E2MwuBnBWmbmhqGqnoeuAW4B3iU0bcPJ5PcnmT/eNmHgPck+SZwF/DuqqqtGlrS1toxZFFVHWd0UnFy320Tt08Bb5vtaJIWxSsfJTWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSMygMSfYleSzJWpJbN1nzriSnkpxM8rnZjilpnnZMW5DkEuAI8PvAGeDBJCtVdWpizR7gL4G3VdVTSV6zVQNL2npDjhiuBdaq6nRVPQfcDRxYt+Y9wJGqegqgqp6c7ZiS5mlIGHYCj09snxnvm3Q1cHWSB5KcSLJvowdKcjjJapLVs2fPnt/EkrbcrE4+7gD2ANcBh4B/SvKq9Yuq6mhVLVfV8tLS0oyeWtKsDQnDE8Duie1d432TzgArVfWzqvoe8B1GoZC0DQ0Jw4PAniRXJbkUOAisrFvzL4yOFkhyBaOPFqdnN6akeZoahqp6HrgFuAd4FDhWVSeT3J5k/3jZPcCPk5wC7gX+oqp+vFVDS9paqaqFPPHy8nKtrq4u5LmlnxdJHqqq5Zf6e175KKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqRkUhiT7kjyWZC3JredY984klWR5diNKmrepYUhyCXAEuAHYCxxKsneDdZcBfw58bdZDSpqvIUcM1wJrVXW6qp4D7gYObLDuo8DHgJ/OcD5JCzAkDDuBxye2z4z3/a8k1wC7q+pL53qgJIeTrCZZPXv27EseVtJ8XPDJxyQvAz4BfGja2qo6WlXLVbW8tLR0oU8taYsMCcMTwO6J7V3jfS+4DHgzcF+S7wNvBVY8ASltX0PC8CCwJ8lVSS4FDgIrL9xZVU9X1RVVdWVVXQmcAPZX1eqWTCxpy00NQ1U9D9wC3AM8ChyrqpNJbk+yf6sHlDR/O4YsqqrjwPF1+27bZO11Fz6WpEXyykdJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQMCkOSfUkeS7KW5NYN7v9gklNJHknylSSvn/2okuZlahiSXAIcAW4A9gKHkuxdt+xhYLmqfhP4IvA3sx5U0vwMOWK4FlirqtNV9RxwN3BgckFV3VtVz4w3TwC7ZjumpHkaEoadwOMT22fG+zZzM/Dlje5IcjjJapLVs2fPDp9S0lzN9ORjkpuAZeDjG91fVUerarmqlpeWlmb51JJmaMeANU8Auye2d433vUiS64EPA2+vqmdnM56kRRhyxPAgsCfJVUkuBQ4CK5MLkrwF+Edgf1U9OfsxJc3T1DBU1fPALcA9wKPAsao6meT2JPvHyz4O/DLwhSTfSLKyycNJ2gaGfJSgqo4Dx9ftu23i9vUznkvSAnnlo6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpGZQGJLsS/JYkrUkt25w/y8m+fz4/q8luXLmk0qam6lhSHIJcAS4AdgLHEqyd92ym4GnqupXgb8DPjbrQSXNz5AjhmuBtao6XVXPAXcDB9atOQB8enz7i8A7kmR2Y0qapx0D1uwEHp/YPgP89mZrqur5JE8DrwZ+NLkoyWHg8Hjz2STfPp+hF+QK1v09F7HtNCtsr3m306wAv3Y+vzQkDDNTVUeBowBJVqtqeZ7PfyG207zbaVbYXvNup1lhNO/5/N6QjxJPALsntneN9224JskO4HLgx+czkKTFGxKGB4E9Sa5KcilwEFhZt2YF+JPx7T8C/q2qanZjSpqnqR8lxucMbgHuAS4B7qiqk0luB1aragX4Z+CzSdaAnzCKxzRHL2DuRdhO826nWWF7zbudZoXznDf+wy5pPa98lNQYBknNlodhO11OPWDWDyY5leSRJF9J8vpFzDkxzznnnVj3ziSVZGFfsw2ZNcm7xq/vySSfm/eM62aZ9l54XZJ7kzw8fj/cuIg5x7PckeTJza4Lysgnx3/LI0mumfqgVbVlP4xOVn4XeANwKfBNYO+6NX8GfGp8+yDw+a2c6QJn/T3gl8a337eoWYfOO153GXA/cAJYvlhnBfYADwO/Mt5+zcX82jI6qfe+8e29wPcXOO/vAtcA397k/huBLwMB3gp8bdpjbvURw3a6nHrqrFV1b1U9M948weiajkUZ8toCfJTR/1356TyHW2fIrO8BjlTVUwBV9eScZ5w0ZN4CXjm+fTnwgznO9+JBqu5n9G3gZg4An6mRE8Crkrz2XI+51WHY6HLqnZutqarngRcup563IbNOuplRhRdl6rzjQ8bdVfWleQ62gSGv7dXA1UkeSHIiyb65TdcNmfcjwE1JzgDHgQ/MZ7Tz8lLf2/O9JPr/iyQ3AcvA2xc9y2aSvAz4BPDuBY8y1A5GHyeuY3Qkdn+S36iq/1rkUOdwCLizqv42ye8wuo7nzVX134sebBa2+ohhO11OPWRWklwPfBjYX1XPzmm2jUyb9zLgzcB9Sb7P6LPlyoJOQA55bc8AK1X1s6r6HvAdRqFYhCHz3gwcA6iqrwIvZ/QfrC5Gg97bL7LFJ0V2AKeBq/i/kzi/vm7N+3nxycdjCzqBM2TWtzA6KbVnETO+1HnXrb+PxZ18HPLa7gM+Pb59BaND31dfxPN+GXj3+PabGJ1jyALfD1ey+cnHP+TFJx+/PvXx5jDwjYzq/13gw+N9tzP6FxdGpf0CsAZ8HXjDAl/cabP+K/CfwDfGPyuLmnXIvOvWLiwMA1/bMProcwr4FnDwYn5tGX0T8cA4Gt8A/mCBs94F/BD4GaMjr5uB9wLvnXhtj4z/lm8NeR94SbSkxisfJTWGQVJjGCQ1hkFSYxgkNYZBUmMYJDX/AwqkUdVj8DQ4AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "for mode in [\"train\", \"valid\", \"test\"]:\n", + " bundle_size = 4\n", + " if mode == \"valid\":\n", + " bundle_size = bundle_size//2\n", + " if mode == \"test\":\n", + " bundle_size = bundle_size//4\n", + " yolo_data('./test_folder/data/', mode, 640, bundle_size, 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/source_detection/yolo_model.yaml b/source_detection/yolo_model.yaml new file mode 100644 index 00000000..9656bfa6 --- /dev/null +++ b/source_detection/yolo_model.yaml @@ -0,0 +1,50 @@ +# parameters +# parameters +nc: 1 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, BottleneckCSP, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, BottleneckCSP, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, BottleneckCSP, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] + diff --git a/source_detection/yolo_settings.yaml b/source_detection/yolo_settings.yaml new file mode 100644 index 00000000..4e9a2ec5 --- /dev/null +++ b/source_detection/yolo_settings.yaml @@ -0,0 +1,6 @@ +train: //net/big-tank/POOL/users/pblomenkamp/radionets/yolo/data/multi/images/train + +val: //net/big-tank/POOL/users/pblomenkamp/radionets/yolo/data/multi/images/valid + +nc: 1 +names: ['gauss'] \ No newline at end of file From e318831570ddcb8deadc84182718fde35390f8bc Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Thu, 18 Mar 2021 12:57:46 +0100 Subject: [PATCH 4/9] Added SSD300 architecture and FPN improvements --- radionets/dl_framework/architecture.py | 1 + .../architectures/source_detection.py | 70 ++ radionets/dl_framework/data.py | 16 +- radionets/dl_framework/learner.py | 5 +- radionets/dl_framework/loss_functions.py | 7 + radionets/simulations/gaussians.py | 114 +++- source_detection/FPN/FPN.py | 599 ++++++++++++++++++ source_detection/FPN/FPNeval.py | 85 +++ source_detection/FPN/FPNloss.py | 86 +++ source_detection/FPN/FPNtrain.py | 230 +++++++ source_detection/Testing.ipynb | 227 ------- source_detection/evaluation.py | 76 +++ source_detection/loss.py | 85 +++ source_detection/model.py | 551 ++++++++++++++++ source_detection/source_data.py | 169 +++++ source_detection/source_utils.py | 18 + source_detection/train.py | 229 +++++++ source_detection/yolo_data.ipynb | 263 -------- source_detection/yolo_model.yaml | 50 -- source_detection/yolo_settings.yaml | 6 - 20 files changed, 2303 insertions(+), 584 deletions(-) create mode 100644 radionets/dl_framework/architectures/source_detection.py create mode 100644 source_detection/FPN/FPN.py create mode 100644 source_detection/FPN/FPNeval.py create mode 100644 source_detection/FPN/FPNloss.py create mode 100644 source_detection/FPN/FPNtrain.py delete mode 100644 source_detection/Testing.ipynb create mode 100644 source_detection/evaluation.py create mode 100644 source_detection/loss.py create mode 100644 source_detection/model.py create mode 100644 source_detection/source_data.py create mode 100644 source_detection/source_utils.py create mode 100644 source_detection/train.py delete mode 100644 source_detection/yolo_data.ipynb delete mode 100644 source_detection/yolo_model.yaml delete mode 100644 source_detection/yolo_settings.yaml diff --git a/radionets/dl_framework/architecture.py b/radionets/dl_framework/architecture.py index 0ce26c2e..89d8d8b6 100644 --- a/radionets/dl_framework/architecture.py +++ b/radionets/dl_framework/architecture.py @@ -3,3 +3,4 @@ from radionets.dl_framework.architectures.filter import * from radionets.dl_framework.architectures.filter_deep import * from radionets.dl_framework.architectures.res_exp import * +from radionets.dl_framework.architectures.source_detection import * diff --git a/radionets/dl_framework/architectures/source_detection.py b/radionets/dl_framework/architectures/source_detection.py new file mode 100644 index 00000000..1645f140 --- /dev/null +++ b/radionets/dl_framework/architectures/source_detection.py @@ -0,0 +1,70 @@ +from torch import nn +from radionets.dl_framework.model import Lambda, shape + + +class VGG(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Sequential( + nn.Conv2d(1,64, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv2 = nn.Sequential( + nn.Conv2d(64,64, padding = 1, kernel_size = 3), nn.ReLU()) + self.maxpool = nn.MaxPool2d(kernel_size = 2, stride = 2, ceil_mode = False) + self.conv3 = nn.Sequential( + nn.Conv2d(64,128, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv4 = nn.Sequential( + nn.Conv2d(128,128, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv5 = nn.Sequential( + nn.Conv2d(128,256, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv6 = nn.Sequential( + nn.Conv2d(256,256, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv7 = nn.Sequential( + nn.Conv2d(256,256, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv8 = nn.Sequential( + nn.Conv2d(256,512, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv9 = nn.Sequential( + nn.Conv2d(512,512, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv10 = nn.Sequential( + nn.Conv2d(512,512, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv11 = nn.Sequential( + nn.Conv2d(512,512, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv12 = nn.Sequential( + nn.Conv2d(512,512, padding = 1, kernel_size = 3), nn.ReLU()) + self.conv13 = nn.Sequential( + nn.Conv2d(512,512, padding = 1, kernel_size = 3), nn.ReLU(), nn.Dropout()) + self.fc1 = nn.Sequential(nn.Linear(512 * 9 * 9, 4096), nn.ReLU(), nn.Dropout()) + self.fc2 = nn.Sequential(nn.Linear(4096, 4096), nn.ReLU()) + self.fc3 = nn.Sequential(nn.Linear(4096, 4)) + self.softmax = nn.Softmax(dim = 1) + + def forward(self, x): + x = self.conv1(x) + x = self.conv2(x) + x = self.maxpool(x) + + x = self.conv3(x) + x = self.conv4(x) + x = self.maxpool(x) + + x = self.conv5(x) + x = self.conv6(x) + x = self.conv7(x) + x = self.maxpool(x) + #print(x.shape) + x = self.conv8(x) + x = self.conv9(x) + x = self.conv10(x) + x = self.maxpool(x) + + x = self.conv11(x) + x = self.conv12(x) + x = self.conv13(x) + x = self.maxpool(x) + + x = self.fc1(x.reshape(-1, 512 * 9 * 9)) + + x = self.fc2(x) + x = self.fc3(x) + x = self.softmax(x) + + return x diff --git a/radionets/dl_framework/data.py b/radionets/dl_framework/data.py index ecdb07e3..840f1e16 100644 --- a/radionets/dl_framework/data.py +++ b/radionets/dl_framework/data.py @@ -64,12 +64,8 @@ def __len__(self): return len(self.bundles) * self.num_img def __getitem__(self, i): - if self.source_list: - x = self.open_image("x", i) - y = self.open_image("z", i) - else: - x = self.open_image("x", i) - y = self.open_image("y", i) + x = self.open_image("x", i) + y = self.open_image("y", i) return x, y def open_bundle(self, bundle_path, var): @@ -113,11 +109,11 @@ def open_image(self, var, i): if len(i) == 1: data_amp, data_phase = data[:, 0], data[:, 1] - data_channel = torch.cat([data_amp, data_phase], dim=0) + data_channel = data else: data_amp, data_phase = data[:, 0].unsqueeze(1), data[:, 1].unsqueeze(1) - data_channel = torch.cat([data_amp, data_phase], dim=1) + data_channel = data else: if self.source_list: data_channel = data @@ -130,7 +126,7 @@ def open_image(self, var, i): if len(i) == 1: data_channel = data.reshape(data.shape[-1] ** 2) else: - data_channel = data.reshape(-1, data.shape[-1] ** 2) + data_channel = data.reshape(-1, data.shape[-1] ** 2) return data_channel.float() @@ -278,6 +274,6 @@ def load_data(data_path, mode, fourier=False, source_list=False): dataset containing x and y images """ bundle_paths = get_bundles(data_path) - data = [path for path in bundle_paths if re.findall("samp_" + mode, path.name)] + data = [path for path in bundle_paths if re.findall(mode, path.name)] ds = h5_dataset(data, tar_fourier=fourier, source_list=source_list) return ds diff --git a/radionets/dl_framework/learner.py b/radionets/dl_framework/learner.py index 9ff41d87..a2864cb7 100644 --- a/radionets/dl_framework/learner.py +++ b/radionets/dl_framework/learner.py @@ -23,6 +23,7 @@ def get_learner( dls = DataLoaders.from_dsets( data.train_ds, data.valid_ds, + bs = data.train_dl.batch_size, ) return Learner(dls, arch, loss_func, lr=lr, cbs=cb_funcs, opt_func=opt_func) @@ -65,8 +66,8 @@ def define_learner( cbfs.extend( [ SaveTempCallback(model_path=model_path), - AvgLossCallback, - DataAug, + AvgLossCallback#, + # DataAug, ] ) if train_conf["telegram_logger"] and not lr_find: diff --git a/radionets/dl_framework/loss_functions.py b/radionets/dl_framework/loss_functions.py index c49da3c2..690fe549 100644 --- a/radionets/dl_framework/loss_functions.py +++ b/radionets/dl_framework/loss_functions.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import torch from torch import nn from radionets.dl_framework.hook_fastai import hook_outputs @@ -508,3 +509,9 @@ def splitted_L1(x, y): loss_phase = l1(inp_phase, tar_phase) return loss_amp * 10 + loss_phase + + +def classifier_loss(x, y): + criterion = nn.CrossEntropyLoss() + loss = criterion(x,y.squeeze(1).long()) + return loss diff --git a/radionets/simulations/gaussians.py b/radionets/simulations/gaussians.py index 7c9a4d03..66ec66a5 100644 --- a/radionets/simulations/gaussians.py +++ b/radionets/simulations/gaussians.py @@ -350,40 +350,55 @@ def create_ext_gauss_bundle(grid): # pointlike gaussians -def create_gauss(img, N, sources, spherical, source_list): +def create_gauss(img, N, sources, source_list, img_size, diffuse = False, bboxes = False, mosaic = False): # img = [img] - mx = np.random.randint(1, 63, size=(N, sources)) - my = np.random.randint(1, 63, size=(N, sources)) - amp = ( - np.random.randint(0.001, 100, size=(N)) * 1 / 10 * np.random.randint(5, 10) - ) / 1e2 - - if spherical: - sx = np.random.randint(3, 8, size=(N, sources)) + mos = 1 + if mosaic: + mos = 10 + mx = np.random.randint(1, img_size*mos, size=(N, sources)) + my = np.random.randint(1, img_size*mos, size=(N, sources)) + + if diffuse: + amp = ( + np.random.randint(30, 40, size=(N))# * 1 / 10 * np.random.randint(3, 5) + ) #/ 1e2 + sx = np.random.randint((img_size**2)/200, (img_size**2)/100, size=(N, sources))*10 + sy = sx + else: + amp = ( + np.random.randint(50, 100, size=(N))# * 1 / 10 * np.random.randint(5, 10) + )# / 1e2 + sx = np.random.randint((img_size**2)/720, (img_size**2)/360, size=(N, sources)) sy = sx - else: - sx = np.random.randint(1, 15, size=(N, sources)) - sy = np.random.randint(1, 15, size=(N, sources)) - theta = np.random.randint(0, 360, size=(N, sources)) + # Doesnt work properly right now + #if spherical: + # sx = np.random.randint(3, 8, size=(N, sources)) + # sy = sx + #else: + # sx = np.random.randint(1, 15, size=(N, sources)) + # sy = np.random.randint(1, 15, size=(N, sources)) + # theta = np.random.randint(0, 360, size=(N, sources)) s = np.zeros((N, sources, 1)) # changed from 5 for i in range(N): for j in range(sources): - g = gauss(mx[i, j], my[i, j], sx[i, j], sy[i, j], amp[i]) + g = gauss(img_size*mos, mx[i, j], my[i, j], sx[i, j], sy[i, j], amp[i]) #DPG # s[i,j] = np.array([mx[i,j],my[i,j],sx[i,j],sy[i,j],amp[i]]) s[i, j] = np.array([mx[i, j]]) - if spherical: - img[i] += g - else: - # rotation around center of the source - padX = [g.shape[0] - mx[i, j], mx[i, j]] - padY = [g.shape[1] - my[i, j], my[i, j]] - imgP = np.pad(g, [padY, padX], "constant") - imgR = ndimage.rotate(imgP, theta[i, j], reshape=False) - imgC = imgR[padY[0] : -padY[1], padX[0] : -padX[1]] - img[i] += imgC + #if spherical: + img[i] += g + #else: + # # rotation around center of the source + # padX = [g.shape[0] - mx[i, j], mx[i, j]] + # padY = [g.shape[1] - my[i, j], my[i, j]] + # imgP = np.pad(g, [padY, padX], "constant") + # imgR = ndimage.rotate(imgP, theta[i, j], reshape=False) + # imgC = imgR[padY[0] : -padY[1], padX[0] : -padX[1]] + # img[i] += imgC if source_list: return img, s + elif bboxes: + return img/amp, [mx[0][0],my[0][0]], [sx[0][0],sy[0][0]] else: return img @@ -405,7 +420,54 @@ def gauss_pointsources(img, num_img, sources): return np.array(img) -def gauss(mx, my, sx, sy, amp=0.01): - x = np.arange(63)[None].astype(np.float) +def gauss(img_size, mx, my, sx, sy, amp=0.01): + x = np.arange(img_size)[None].astype(np.float) y = x.T return amp * np.exp(-((y - my) ** 2) / sy).dot(np.exp(-((x - mx) ** 2) / sx)) + +def create_diamond(img, num_img, sources, pixel, bboxes = False, mosaic = False): + mos = 1 + if mosaic: + mos = 10 + mx = np.random.randint(0, pixel*mos, size=(num_img, sources)) + my = np.random.randint(0, pixel*mos, size=(num_img, sources)) + amp = (np.random.randint(50, 100, size=(num_img)))# * np.random.random()) / 1e2 + sigma = np.random.randint(5, 10) + for i in range(num_img): + targets = sources + # targets = np.random.randint(2, sources + 1) + for j in range(targets): + g = diamond(mx[i, j], my[i, j], sigma, sigma, amp[i], pixel*mos) + img[i] += g + + if bboxes: + return np.array(img)/amp, [mx[0][0],my[0][0]], [sigma,sigma] + else: + return np.array(img) +def diamond(mx, my, sx, sy, amp, pixel): + x = np.arange(pixel)[None].astype(np.float) + y = x.T + return amp* np.exp(-(np.abs(y - my)) / sy).dot(np.exp(-(np.abs(x - mx)) / sx)) + +def create_square(img, num_img, sources, pixel, bboxes = False, mosaic = False): + mos = 1 + if mosaic: + mos = 10 + mx = np.random.randint(0, pixel*mos, size=(num_img, sources)) + my = np.random.randint(0, pixel*mos, size=(num_img, sources)) + amp = (np.random.randint(50, 100, size=(num_img)))# * np.random.random()) / 1e2 + for i in range(num_img): + targets = sources + # targets = np.random.randint(2, sources + 1) + for j in range(targets): + g = square(mx[i, j], my[i, j], amp[i], pixel*mos,mos) + img[i] += g + if bboxes: + return np.array(img)/amp, [mx[0][0],my[0][0]] + else: + return np.array(img) +def square(mx, my, amp, pixel, mos): + x = np.arange(pixel)[None].astype(np.float) + y = x.T + return amp* np.where(abs(y-my)<=pixel*0.02/mos, 1, 0).dot(np.where(abs(x-mx)<=pixel*0.02/mos, 1, 0)) + diff --git a/source_detection/FPN/FPN.py b/source_detection/FPN/FPN.py new file mode 100644 index 00000000..dcfa3e43 --- /dev/null +++ b/source_detection/FPN/FPN.py @@ -0,0 +1,599 @@ + +import numpy as np + +import torch + +# + +from torch import nn +from math import sqrt +from radionets.evaluation.utils import load_pretrained_model +import torch.nn.functional as F + +import torch +import torchvision +import os + + +# - + +class base_maps(nn.Module): + + def __init__(self): + super(base_maps, self).__init__() + self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) + self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2) + + self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) + self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) + self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2) + + self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1) + self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1) + self.conv7 = nn.Conv2d(256, 256, kernel_size=3, padding=1) + self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) + + self.conv8 = nn.Conv2d(256, 512, kernel_size=3, padding=1) + self.conv9 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.conv10 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2) + + self.conv11 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.conv12 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.conv13 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.maxpool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) + + self.conv14 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) + + self.conv15 = nn.Conv2d(1024, 1024, kernel_size=1) + + self.load_arch() + def load_arch(self): + arch = load_pretrained_model('VGG', '//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/build/VGG_test/temp_20.model', 300) + state_dict = self.state_dict() + param_names = list(state_dict.keys()) + pretrained_state_dict = arch.state_dict() + pretrained_param_names = list(pretrained_state_dict.keys()) + for i, param in enumerate(param_names): + state_dict[param] = pretrained_state_dict[pretrained_param_names[i]] + conv_fc1_weight = pretrained_state_dict['fc1.0.weight'].view(4096, 512, 9, 9) + conv_fc1_bias = pretrained_state_dict['fc1.0.bias'] + state_dict['conv14.weight'] = decimate(conv_fc1_weight, m = [4, None, 3, 3]) + state_dict['conv14.bias'] = decimate(conv_fc1_bias, m = [4]) + + conv_fc2_weight = pretrained_state_dict['fc2.0.weight'].view(4096, 4096, 1, 1) + conv_fc2_bias = pretrained_state_dict['fc2.0.bias'] + state_dict['conv15.weight'] = decimate(conv_fc2_weight, m = [4, 4, None, None]) + state_dict['conv15.bias'] = decimate(conv_fc2_bias, m = [4]) + + self.load_state_dict(state_dict) + print("\n arch loaded \n") + def forward(self, image): + out = F.relu(self.conv1(image)) + out = F.relu(self.conv2(out)) # (N, 64, 300, 300) + out = self.maxpool1(out) + + out = F.relu(self.conv3(out)) + out = F.relu(self.conv4(out)) # (N, 128, 150, 150) + out = self.maxpool2(out) + + out = F.relu(self.conv5(out)) + out = F.relu(self.conv6(out)) + out = F.relu(self.conv7(out)) + fmap7 = out #update # (N, 256, 75, 75) + out = self.maxpool3(out) + + out = F.relu(self.conv8(out)) + out = F.relu(self.conv9(out)) + out = F.relu(self.conv10(out)) + fmap10 = out # (N, 512, 38, 38) + out = self.maxpool4(out) + + out = F.relu(self.conv11(out)) + out = F.relu(self.conv12(out)) + out = F.relu(self.conv13(out)) + out = self.maxpool5(out) # (N, 512, 19, 19) + + out = F.relu(self.conv14(out)) + + fmap15 = F.relu(self.conv15(out)) # (N, 1024, 19, 19) + + + base_fmaps = {'fmap7': fmap7,'fmap10': fmap10, 'fmap15':fmap15} + return base_fmaps + + +class adv_maps(nn.Module): + def __init__(self): + super(adv_maps, self).__init__() + + + self.conv16 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) + self.conv17 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) + + self.conv18 = nn.Conv2d(512, 128, kernel_size=1, padding=0) + self.conv19 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) + + self.conv20 = nn.Conv2d(256, 128, kernel_size=1, padding=0) + self.conv21 = nn.Conv2d(128, 256, kernel_size=3, padding=0) + + self.conv22 = nn.Conv2d(256, 128, kernel_size=1, padding=0) + self.conv23 = nn.Conv2d(128, 256, kernel_size=3, padding=0) + + self.init_conv2d() + + def init_conv2d(self): + + for c in self.children(): + if isinstance(c, nn.Conv2d): + nn.init.xavier_uniform_(c.weight) + nn.init.constant_(c.bias, 0.) + + def forward(self, fmap15): + + out = F.relu(self.conv16(fmap15)) # (N, 256, 19, 19) + out = F.relu(self.conv17(out)) + fmap17 = out # (N, 512, 10, 10) + + out = F.relu(self.conv18(out)) + out = F.relu(self.conv19(out)) + fmap19 = out # (N, 256, 5, 5) + + out = F.relu(self.conv20(out)) + out = F.relu(self.conv21(out)) + fmap21 = out # (N, 256, 3, 3) + + out = F.relu(self.conv22(out)) + fmap23 = F.relu(self.conv23(out)) + + + fmaps = {'fmap17':fmap17, 'fmap19':fmap19, 'fmap21':fmap21, 'fmap23':fmap23} + return fmaps + + +# + +class feature_pyramid(nn.Module): + def __init__(self): + super(feature_pyramid, self).__init__() + + #self.toplayer = nn.Conv2d() + #lateral layers + self.lateral7 = nn.Conv2d(256, 256, kernel_size = 1, stride = 1, padding = 0) + self.lateral10 = nn.Conv2d(512, 256, kernel_size = 1, stride = 1, padding = 0) + self.lateral15 = nn.Conv2d(1024, 256, kernel_size = 1, stride = 1, padding = 0) + self.lateral17 = nn.Conv2d(512, 256, kernel_size = 1, stride = 1, padding = 0) + self.lateral19 = nn.Conv2d(256, 256, kernel_size = 1, stride = 1, padding = 0) + self.lateral21 = nn.Conv2d(256, 256, kernel_size = 1, stride = 1, padding = 0) + self.lateral23 = nn.Conv2d(256, 256, kernel_size = 1, stride = 1, padding = 0) + #smooth layers to reduce aliasing effects + self.smooth7 = nn.Conv2d(256,256, kernel_size =3, stride = 1, padding = 1) + self.smooth10 = nn.Conv2d(256,256, kernel_size =3, stride = 1, padding = 1) + self.smooth15 = nn.Conv2d(256,256, kernel_size =3, stride = 1, padding = 1) + self.smooth17 = nn.Conv2d(256,256, kernel_size =3, stride = 1, padding = 1) + self.smooth19 = nn.Conv2d(256,256, kernel_size =3, stride = 1, padding = 1) + self.smooth21 = nn.Conv2d(256,256, kernel_size =3, stride = 1, padding = 1) + + def upsample_add(self,lateral_map, upper_map): + _,_,H, W = lateral_map.size() + return F.upsample(upper_map, size=(H,W), mode = 'bilinear')+lateral_map + + def forward(self,fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23): + #maybe add batch norm + p23 = self.lateral23(fmap23) + p21 = self.upsample_add(self.lateral21(fmap21),p23) + p19 = self.upsample_add(self.lateral19(fmap19),p21) + p17 = self.upsample_add(self.lateral17(fmap17),p19) + p15 = self.upsample_add(self.lateral15(fmap15),p17) + p10 = self.upsample_add(self.lateral10(fmap10),p15) + p7 = self.upsample_add(self.lateral7(fmap7),p10) + + p21 = self.smooth21(p21) + p19 = self.smooth19(p19) + p17 = self.smooth17(p17) + p15 = self.smooth15(p15) + p10 = self.smooth10(p10) + p7 = self.smooth7(p7) + + return p7, p10, p15, p17, p19, p21, p23 + + + +# + +def create_prior_boxes(): + fmap_dims = { 'fmap7' : 75, #update + 'fmap10': 38, #was 38 with old 37 + 'fmap15': 19, #was 19 with old 18 + 'fmap17': 10, #was 10 with old 9 + 'fmap19': 5, + 'fmap21': 3, + 'fmap23': 1} + maps = list(fmap_dims.keys()) + + scales = { 'fmap7' : 0.02, + 'fmap10': 0.06, + 'fmap15': 0.11, + 'fmap17': 0.16, + 'fmap19': 0.2, + 'fmap21': 0.25, + 'fmap23': 0.3} + + + aspect_ratios = {'fmap7': [1.], + 'fmap10': [1.], + 'fmap15': [1.], + 'fmap17': [1.], + 'fmap19': [1.], + 'fmap21': [1.], + 'fmap23': [1.]} + priors = [] + for a, s in enumerate(maps): + for d in range(fmap_dims[s]): + for f in range(fmap_dims[s]): + x = (d + 0.5) / fmap_dims[s] + y = (f + 0.5) / fmap_dims[s] + + for ratio in aspect_ratios[s]: + priors.append([x, y, scales[s] * sqrt(ratio), scales[s] / sqrt(ratio)]) + + if ratio == 1.: + try: + additional_scale = sqrt(scales[s] * scales[maps[a+1]]) + + except IndexError: + additional_scale = 1. + priors.append([x, y, additional_scale, additional_scale]) + priors = torch.FloatTensor(priors).to('cuda') + center_to_boundary(priors) + priors.clamp_(0, 1) + boundary_to_center(priors) + return priors + +class predconvs(nn.Module): + def __init__(self, nclasses): + + super(predconvs, self).__init__() + + self.nclasses = nclasses + + #n_boxes = {'fmap10': 4, + # 'fmap15': 6, + # 'fmap17': 6, + # 'fmap19': 6, + # 'fmap21': 4, + # 'fmap23': 4} + + n_boxes = { 'fmap7': 2, + 'fmap10': 2, + 'fmap15': 2, + 'fmap17': 2, + 'fmap19': 2, + 'fmap21': 2, + 'fmap23': 2} + self.loc_fmap7 = nn.Conv2d(256, n_boxes['fmap7'] * 4, kernel_size=3, padding=1) + self.loc_fmap10 = nn.Conv2d(256, n_boxes['fmap10'] * 4, kernel_size=3, padding=1) + self.loc_fmap15 = nn.Conv2d(256, n_boxes['fmap15'] * 4, kernel_size=3, padding=1) + self.loc_fmap17 = nn.Conv2d(256, n_boxes['fmap17'] * 4, kernel_size=3, padding=1) + self.loc_fmap19 = nn.Conv2d(256, n_boxes['fmap19'] * 4, kernel_size=3, padding=1) + self.loc_fmap21 = nn.Conv2d(256, n_boxes['fmap21'] * 4, kernel_size=3, padding=1) + self.loc_fmap23 = nn.Conv2d(256, n_boxes['fmap23'] * 4, kernel_size=3, padding=1) + + self.cl_fmap7 = nn.Conv2d(256, n_boxes['fmap7'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap10 = nn.Conv2d(256, n_boxes['fmap10'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap15 = nn.Conv2d(256, n_boxes['fmap15'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap17 = nn.Conv2d(256, n_boxes['fmap17'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap19 = nn.Conv2d(256, n_boxes['fmap19'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap21 = nn.Conv2d(256, n_boxes['fmap21'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap23 = nn.Conv2d(256, n_boxes['fmap23'] * nclasses, kernel_size=3, padding=1) + + self.init_conv2d() + + def init_conv2d(self): + for c in self.children(): + if isinstance(c, nn.Conv2d): + nn.init.xavier_uniform_(c.weight) + nn.init.constant_(c.bias, 0.) + + def forward(self,fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23): + #stuff + batch_size = fmap10.size(0) + l_fmap10 = self.loc_fmap10(fmap10) + l_fmap10 = l_fmap10.permute(0,2,3,1).contiguous() + l_fmap10 = l_fmap10.view(batch_size,-1,4) + + l_fmap15 = self.loc_fmap15(fmap15) + l_fmap15 = l_fmap15.permute(0,2,3,1).contiguous() + l_fmap15 = l_fmap15.view(batch_size,-1,4) + + l_fmap17 = self.loc_fmap17(fmap17) + l_fmap17 = l_fmap17.permute(0,2,3,1).contiguous() + l_fmap17 = l_fmap17.view(batch_size,-1,4) + + l_fmap19 = self.loc_fmap19(fmap19) + l_fmap19 = l_fmap19.permute(0,2,3,1).contiguous() + l_fmap19 = l_fmap19.view(batch_size,-1,4) + + l_fmap21 = self.loc_fmap21(fmap21) + l_fmap21 = l_fmap21.permute(0,2,3,1).contiguous() + l_fmap21 = l_fmap21.view(batch_size,-1,4) + + l_fmap23 = self.loc_fmap23(fmap23) + l_fmap23 = l_fmap23.permute(0,2,3,1).contiguous() + l_fmap23 = l_fmap23.view(batch_size,-1,4) + + l_fmap7 = self.loc_fmap7(fmap7) + l_fmap7 = l_fmap7.permute(0,2,3,1).contiguous() + l_fmap7 = l_fmap7.view(batch_size,-1,4) + + c_fmap10 = self.cl_fmap10(fmap10) + c_fmap10 = c_fmap10.permute(0,2,3,1).contiguous() + c_fmap10 = c_fmap10.view(batch_size,-1,self.nclasses) + + c_fmap15 = self.cl_fmap15(fmap15) + c_fmap15 = c_fmap15.permute(0,2,3,1).contiguous() + c_fmap15 = c_fmap15.view(batch_size,-1,self.nclasses) + + c_fmap17 = self.cl_fmap17(fmap17) + c_fmap17 = c_fmap17.permute(0,2,3,1).contiguous() + c_fmap17 = c_fmap17.view(batch_size,-1,self.nclasses) + + c_fmap19 = self.cl_fmap19(fmap19) + c_fmap19 = c_fmap19.permute(0,2,3,1).contiguous() + c_fmap19 = c_fmap19.view(batch_size,-1,self.nclasses) + + c_fmap21 = self.cl_fmap21(fmap21) + c_fmap21 = c_fmap21.permute(0,2,3,1).contiguous() + c_fmap21 = c_fmap21.view(batch_size,-1,self.nclasses) + + c_fmap23 = self.cl_fmap23(fmap23) + c_fmap23 = c_fmap23.permute(0,2,3,1).contiguous() + c_fmap23 = c_fmap23.view(batch_size,-1,self.nclasses) + + c_fmap7 = self.cl_fmap7(fmap7) + c_fmap7 = c_fmap7.permute(0,2,3,1).contiguous() + c_fmap7 = c_fmap7.view(batch_size,-1,self.nclasses) + + locs = torch.cat([l_fmap7, l_fmap10, l_fmap15, l_fmap17, l_fmap19, l_fmap21, l_fmap23], dim = 1) + classes_scores = torch.cat([c_fmap7, c_fmap10, c_fmap15, c_fmap17, c_fmap19, c_fmap21, c_fmap23], dim = 1) + return locs, classes_scores + +class SSD300(nn.Module): + + def __init__(self, nclasses): + + super(SSD300, self).__init__() + + self.nclasses = nclasses + + self.base = base_maps() + self.adv = adv_maps() + self.pred_convs = predconvs(nclasses) + + self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) + self.rescale_factors7 = nn.Parameter(torch.FloatTensor(1, 256, 1, 1)) + nn.init.constant_(self.rescale_factors, 20) + nn.init.constant_(self.rescale_factors7, 20) + self.priors_cxcy = create_prior_boxes() + self.feature_pyramid = feature_pyramid() + + def forward(self, image): + + + + bmaps = self.base(image) + fmap10 = bmaps['fmap10'] #[256, 38, 38] + fmap15 = bmaps['fmap15'] + fmap7 = bmaps['fmap7'] + #fmap_13 = bmaps['fmap_13'] + norm7 = fmap7.pow(2).sum(dim=1, keepdim=True).sqrt() + norm = fmap10.pow(2).sum(dim=1, keepdim=True).sqrt() + fmap10 = fmap10 / norm + fmap10 = fmap10 * self.rescale_factors + + fmap7 = fmap7 / norm7 + fmap7 = fmap7 * self.rescale_factors7 + + amaps = self.adv(fmap15) + fmap17 = amaps['fmap17'] + fmap19 = amaps['fmap19'] + fmap21 = amaps['fmap21'] + fmap23 = amaps['fmap23'] + fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23 = self.feature_pyramid(fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23) + locs, classes_scores = self.pred_convs(fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23) + + return locs, classes_scores + + def object_detection(self, locs, class_scores, priors, min_score=0.01, max_overlap=0.45,top_k=200): + + batch_size = locs.size(0) + n_priors = priors.size(0) + + classes_scores = F.softmax(class_scores, dim = 2) + all_predicted_boxes = list() + all_predicted_labels = list() + all_predicted_scores = list() + assert n_priors == locs.size(1) == classes_scores.size(1) + for i in range(batch_size): + boundary_locs = center_to_boundary(offset_to_center(locs[i], priors)) + predicted_boxes = list() + predicted_labels = list() + predicted_scores = list() + max_scores, best_label = classes_scores[i].max(dim=1) + for c in range(0, self.nclasses-1): + c_scores = classes_scores[i][:,c] + score_above_min = c_scores > min_score + n_above_min = score_above_min.sum().item() + if n_above_min == 0: + continue + c_scores = c_scores[score_above_min] + c_boundary_locs = boundary_locs[score_above_min] + + c_scores, sort_ind = c_scores.sort(dim = 0, descending = True) + c_boundary_locs = c_boundary_locs[sort_ind] + + overlap = jaccard(c_boundary_locs, c_boundary_locs) + suppress = torch.zeros((n_above_min)).bool().to('cuda') + + for box in range(c_boundary_locs.size(0)): + if suppress[box] == 1: + continue + + suppress = suppress | (overlap[box] > max_overlap) + + suppress[box] = 0 + + predicted_boxes.append(c_boundary_locs[~suppress]) + predicted_labels.append(torch.LongTensor((~suppress).sum().item()*[c]).to('cuda')) + predicted_scores.append(c_scores[~suppress]) + if len(predicted_boxes) == 0: + predicted_boxes.append(torch.FloatTensor([[0.,0.,1.,1.]]).to('cuda')) + predicted_labels.append(torch.LongTensor([4]).to('cuda')) + predicted_scores.append(torch.FloatTensor([0.]).to('cuda')) + + predicted_boxes = torch.cat(predicted_boxes, dim = 0) + predicted_labels = torch.cat(predicted_labels, dim = 0) + predicted_scores = torch.cat(predicted_scores, dim = 0) + num_objects = predicted_scores.size(0) + + if num_objects > top_k: + predicted_scores, sort_ind = predicted_scores.sort(dim = 0, descending = True) + predicted_scores = predicted_scores[:top_k] + predicted_boxes = predicted_boxes[:top_k] + predicted_labels = predicted_labels[:top_k] + + all_predicted_boxes.append(predicted_boxes) + all_predicted_labels.append(predicted_labels) + all_predicted_scores.append(predicted_scores) + + return all_predicted_boxes, all_predicted_labels, all_predicted_scores + +def center_to_boundary(coord): + return torch.cat([coord[:,:2]-(coord[:,2:]/2), + coord[:,:2]+(coord[:,2:]/2)], 1) +def boundary_to_center(coord): + return torch.cat([(coord[:,2:]+coord[:,:2])/2, + coord[:,2:]-coord[:,:2]], 1) + + #"The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical + # They are for some sort of numerical conditioning, for 'scaling the localization gradient' + # See https://github.com/weiliu89/caffe/issues/155" +def offset_to_center(coded_box_coord, prior_coord): + return torch.cat([coded_box_coord[:,:2] * prior_coord[:,2:]/10 + prior_coord[:,:2], + torch.exp(coded_box_coord[:,2:]/5)*prior_coord[:,2:]],1) + +def center_to_offset(box_coord, prior_coord): + return torch.cat([(box_coord[:, :2] - prior_coord[:,:2])/(prior_coord[:,2:]/10), + torch.log(box_coord[:,2:] / prior_coord[:,2:])* 5], 1) + +def jaccard(boxes1, boxes2): + + low_bound = torch.max(boxes1[:,:2].unsqueeze(1), boxes2[:,:2].unsqueeze(0)) + up_bound = torch.min(boxes1[:,2:].unsqueeze(1), boxes2[:,2:].unsqueeze(0)) + + intersect_dims = torch.clamp(up_bound - low_bound, min = 0) + intersect = intersect_dims[:,:,0]*intersect_dims[:, :, 1] + + area_boxes1 = (boxes1[:,2]-boxes1[:,0]) * (boxes1[:,3] - boxes1[:,1]) + area_boxes2 = (boxes2[:,2]-boxes2[:,0]) * (boxes2[:,3] - boxes2[:,1]) + + union = area_boxes1.unsqueeze(1) +area_boxes2.unsqueeze(0) - intersect + + return intersect/union +def decimate(tensor, m): + assert tensor.dim() == len(m) + for d in range(tensor.dim()): + if m[d] is not None: + tensor = tensor.index_select(dim=d, + index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) + + return tensor + +class old_predconvs(nn.Module): + def __init__(self, nclasses): + #some changes + super(predconvs, self).__init__() + + self.nclasses = nclasses + + n_boxes = {'fmap10': 4, + 'fmap_10': 6, + 'fmap_13': 6, + 'fmap_15': 6, + 'fmap_17': 4, + 'fmap_19': 4} + + self.loc_fmap10 = nn.Conv2d(512, n_boxes['fmap_7'] * 4, kernel_size=3, padding=1) + self.loc_fmap_10 = nn.Conv2d(1024, n_boxes['fmap_10'] * 4, kernel_size=3, padding=1) + self.loc_fmap_13 = nn.Conv2d(512, n_boxes['fmap_13'] * 4, kernel_size=3, padding=1)#channels were different + self.loc_fmap_15 = nn.Conv2d(256, n_boxes['fmap_15'] * 4, kernel_size=3, padding=1)#same here + self.loc_fmap_17 = nn.Conv2d(256, n_boxes['fmap_17'] * 4, kernel_size=3, padding=1) + self.loc_fmap_19 = nn.Conv2d(256, n_boxes['fmap_19'] * 4, kernel_size=3, padding=1) + + self.cl_fmap_7 = nn.Conv2d(512, n_boxes['fmap_7'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_10 = nn.Conv2d(1024, n_boxes['fmap_10'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_13 = nn.Conv2d(512, n_boxes['fmap_13'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_15 = nn.Conv2d(256, n_boxes['fmap_15'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_17 = nn.Conv2d(256, n_boxes['fmap_17'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_19 = nn.Conv2d(256, n_boxes['fmap_19'] * nclasses, kernel_size=3, padding=1) + + self.init_conv2d() + + def init_conv2d(self): + for c in self.children(): + if isinstance(c, nn.Conv2d): + nn.init.xavier_uniform_(c.weight) + nn.init.constant_(c.bias, 0.) + + def forward(self, fmap7, fmap10, fmap13, fmap15, fmap17, fmap19): + + batch_size = fmap7.size(0) + l_fmap7 = self.loc_fmap_7(fmap7) + l_fmap7 = l_fmap7.permute(0,2,3,1).contiguous() + l_fmap7 = l_fmap7.view(batch_size,-1,4) + + l_fmap10 = self.loc_fmap_10(fmap10) + l_fmap10 = l_fmap10.permute(0,2,3,1).contiguous() + l_fmap10 = l_fmap10.view(batch_size,-1,4) + + l_fmap13 = self.loc_fmap_13(fmap13) + l_fmap13 = l_fmap13.permute(0,2,3,1).contiguous() + l_fmap13 = l_fmap13.view(batch_size,-1,4) + + l_fmap15 = self.loc_fmap_15(fmap15) + l_fmap15 = l_fmap15.permute(0,2,3,1).contiguous() + l_fmap15 = l_fmap15.view(batch_size,-1,4) + + l_fmap17 = self.loc_fmap_17(fmap17) + l_fmap17 = l_fmap17.permute(0,2,3,1).contiguous() + l_fmap17 = l_fmap17.view(batch_size,-1,4) + + l_fmap19 = self.loc_fmap_19(fmap19) + l_fmap19 = l_fmap19.permute(0,2,3,1).contiguous() + l_fmap19 = l_fmap19.view(batch_size,-1,4) + + c_fmap7 = self.cl_fmap_7(fmap7) + c_fmap7 = c_fmap7.permute(0,2,3,1).contiguous() + c_fmap7 = c_fmap7.view(batch_size,-1,self.nclasses) + + c_fmap10 = self.cl_fmap_10(fmap10) + c_fmap10 = c_fmap10.permute(0,2,3,1).contiguous() + c_fmap10 = c_fmap10.view(batch_size,-1,self.nclasses) + + c_fmap13 = self.cl_fmap_13(fmap13) + c_fmap13 = c_fmap13.permute(0,2,3,1).contiguous() + c_fmap13 = c_fmap13.view(batch_size,-1,self.nclasses) + + c_fmap15 = self.cl_fmap_15(fmap15) + c_fmap15 = c_fmap15.permute(0,2,3,1).contiguous() + c_fmap15 = c_fmap15.view(batch_size,-1,self.nclasses) + + c_fmap17 = self.cl_fmap_17(fmap17) + c_fmap17 = c_fmap17.permute(0,2,3,1).contiguous() + c_fmap17 = c_fmap17.view(batch_size,-1,self.nclasses) + + c_fmap19 = self.cl_fmap_19(fmap19) + c_fmap19 = c_fmap19.permute(0,2,3,1).contiguous() + c_fmap19 = c_fmap19.view(batch_size,-1,self.nclasses) + + locs = torch.cat([l_fmap7, l_fmap10, l_fmap13, l_fmap15, l_fmap17, l_fmap19], dim = 1) + classes_scores = torch.cat([c_fmap7, c_fmap10, c_fmap13, c_fmap15, c_fmap17, c_fmap19], dim = 1) + return locs, classes_scores diff --git a/source_detection/FPN/FPNeval.py b/source_detection/FPN/FPNeval.py new file mode 100644 index 00000000..13185ca3 --- /dev/null +++ b/source_detection/FPN/FPNeval.py @@ -0,0 +1,85 @@ +# + +import FPNtrain +import torch +from radionets.evaluation.utils import load_pretrained_model, eval_model +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.patches as patches +import h5py +import FPN +from FPN import center_to_boundary +from radionets.dl_framework.data import get_bundles +from tqdm import tqdm + +label_map = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') +color_map = ('r', 'g', 'w', 'y','brown') +def box_coord(coord, img_size): + x = coord[0].item()*img_size + y = coord[3].item()*img_size + xmax = coord[2] + ymin = coord[1] + w = xmax.item()*img_size - x + h = -(y - ymin.item()*img_size) + return x,y,w,h + +def detect_sources(checkpoint_path, data_path, img_size, n = 0): + data = get_bundles(data_path) + eval_dataset = FPNtrain.detect_dataset(data) + eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size = 32, + shuffle = False, + collate_fn = eval_dataset.collate_fn) + checkpoint = checkpoint_path + checkpoint = torch.load(checkpoint) + model = checkpoint['model'] + model = model.to('cuda') + model.eval() + #print(eval_dataset[31]) + with torch.no_grad(): + for i, (images, boxes, labels) in enumerate(tqdm(eval_loader)): + print(enumerate(eval_loader)) + images = images.to('cuda') + print(images.shape) + predicted_locs, predicted_scores = model(images) + predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, + min_score = 0.5, max_overlap = 0.45, top_k = 10) + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,8)) + for j in range(len(eval_dataset[n][1][0])): + true_label = label_map[eval_dataset[n][2][0][j].item()] + color = color_map[eval_dataset[n][2][0][j].item()] + trux, truy, truw, truh = box_coord(eval_dataset[n][1][0][j],img_size) + trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor=color, facecolor='none') + ax1.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) + ax1.add_patch(trurect) + + for k in range(len(predl[n])): + predicted_label = label_map[predl[n][k].item()] + color = color_map[predl[n][k].item()] + predx, predy, predw, predh = box_coord(predb[n][k],img_size) + predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=1, edgecolor=color, + facecolor='none') + ax2.text(predx,(predy+predh-7),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) + ax2.add_patch(predrect) + + ax1.imshow(eval_dataset[n][0].squeeze(0)) + ax2.imshow(eval_dataset[n][0].squeeze(0)) + ax1.legend() + ax2.legend() + +def image_detection(checkpoint, image): + image = torch.FloatTensor(image).unsqueeze(0).unsqueeze(0) + checkpoint = torch.load(checkpoint) + model = checkpoint['model'] + model = model.to('cuda') + model.eval() + with torch.no_grad(): + image = image.to('cuda') + predicted_locs, predicted_scores = model(image) + predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, + min_score = 0.2, max_overlap = 0.1, top_k = 100) + return predb, predl + +def classifier_eval(arch, img_batch): + + pred = eval_model(img, arch) + _, l = torch.max(pred, dim = 1) + return l diff --git a/source_detection/FPN/FPNloss.py b/source_detection/FPN/FPNloss.py new file mode 100644 index 00000000..f76fd154 --- /dev/null +++ b/source_detection/FPN/FPNloss.py @@ -0,0 +1,86 @@ +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.6.0 +# kernelspec: +# display_name: Python 3 +# language: python +# name: python3 +# --- + +# + +#just a copy of the old loss +from torch import nn +import torch +import torch.nn.functional as F +from math import sqrt +from itertools import product as product +from source_detection.model import center_to_boundary, center_to_offset, boundary_to_center, jaccard,offset_to_center +import torchvision + +class detectionLoss(nn.Module): #0.6 #9 #20. + def __init__(self, priors_cxcy, threshold = 0.5, neg_pos_ratio = 3, alpha = 1.): + + super(detectionLoss, self).__init__() + self.priors_cxcy = priors_cxcy + self.priors_xy = center_to_boundary(priors_cxcy) + self.threshold = threshold + self.neg_pos_ratio = neg_pos_ratio + self.alpha = alpha + + self.smooth_l1 = nn.SmoothL1Loss() + self.cross_entropy = nn.CrossEntropyLoss(reduce = False) + + def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): + + batch_size = predicted_locs.size(0) + n_classes = predicted_scores.size(2) + n_priors = self.priors_cxcy.size(0) + assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) + + true_locs = torch.zeros((batch_size, n_priors, 4), dtype = torch.float).to('cuda') + true_classes = torch.zeros((batch_size, n_priors), dtype = torch.long).to('cuda') + + for image_i in range(batch_size): + n_objects = data_locs[image_i][0].size(0) + overlap = jaccard(data_locs[image_i][0], self.priors_xy) #overlap of the boxes in this image with the priors + overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) + #overlap has shape [a,s,f,g,....] + #[h,d,g,h,....].... each entry is the overlap of one true box with all the priors. + #each row describes one object. Max gives the maximum overlap value and the index of the object. + + _, prior_for_each_object = overlap.max(dim = 1) + + object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to('cuda') + + overlap_for_each_prior[prior_for_each_object] = 1. + label_for_each_prior = data_labels[image_i][0][0][object_for_each_prior]#very ugly shapes watch out + label_for_each_prior[overlap_for_each_prior < self.threshold] = 4 #nodiff + true_classes[image_i] = label_for_each_prior + + true_locs[image_i] = center_to_offset(boundary_to_center(data_locs[image_i][0][object_for_each_prior]), self.priors_cxcy) + positive_priors = true_classes != 4 #nodiff + loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) + n_positives = positive_priors.sum(dim = 1) + n_hard_negatives = self.neg_pos_ratio * n_positives + conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1)) + conf_loss_all = conf_loss_all.view(batch_size, n_priors) + + conf_loss_pos = conf_loss_all[positive_priors] + + conf_loss_neg = conf_loss_all.clone() + conf_loss_neg[positive_priors] = 0 + conf_loss_neg, _ = conf_loss_neg.sort(dim = 1, descending = True) + hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to('cuda') + hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) + conf_loss_hard_neg = conf_loss_neg[hard_negatives] + + conf_loss = (conf_loss_hard_neg.sum()+conf_loss_pos.sum())/n_positives.sum().float() + return conf_loss + self.alpha * loc_loss +# - + + diff --git a/source_detection/FPN/FPNtrain.py b/source_detection/FPN/FPNtrain.py new file mode 100644 index 00000000..0791750c --- /dev/null +++ b/source_detection/FPN/FPNtrain.py @@ -0,0 +1,230 @@ +#just a copy of old train +import torch +import h5py +import numpy as np +from radionets.dl_framework.data import get_bundles +from FPNloss import detectionLoss +from FPN import SSD300 +from tqdm import tqdm +import matplotlib.pyplot as plt + +path = get_bundles('//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/train/') +iterations = 120000 +n_classes = 5 #nodiff +checkpoint = None +#checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar' +batch_size = 32 +workers = 4 +lr = 1e-4 +decay_lr_at = [80000,100000] +decay_lr_to = 0.1 +momentum = 0.9 +weight_decay = 5e-4 +grad_clip = None + +class detect_dataset: + + def __init__(self, bundle_path): + + self.bundles = bundle_path + self.num_img = len(h5py.File(self.bundles[0]))//3 + + def __getitem__(self, i): + + x = self.open_image('x', i) + y = self.open_boxes('y', i) + z = self.open_labels('z', i) + return x,y,z + def __len__(self): + return len(self.bundles)*self.num_img + def open_image(self, var, i): + if isinstance(i, int): + i = torch.tensor([i]) + elif isinstance(i, np.ndarray): + i = torch.tensor(i) + indices, _ = torch.sort(i) + bundle = indices // self.num_img + image = indices - bundle * self.num_img + bundle_unique = torch.unique(bundle) + + bundle_paths = [ + h5py.File(self.bundles[bundle], "r") for bundle in bundle_unique + ] + bundle_paths_str = list(map(str, bundle_paths)) + data = torch.FloatTensor( ### + [ + #print(bund[var+str(int(img))].shape) + bund[var+str(int(img))][0] #VGG + for bund, bund_str in zip(bundle_paths, bundle_paths_str) + for img in image[ + bundle == bundle_unique[bundle_paths_str.index(bund_str)] + ] + ] + ) + return data + def open_boxes(self, var, i): + if isinstance(i, int): + i = torch.tensor([i]) + elif isinstance(i, np.ndarray): + i = torch.tensor(i) + indices, _ = torch.sort(i) + bundle = indices // self.num_img + image = indices - bundle * self.num_img + bundle_unique = torch.unique(bundle) + bundle_paths = [ + h5py.File(self.bundles[bundle], "r") for bundle in bundle_unique + ] + bundle_paths_str = list(map(str, bundle_paths)) + + data = [ + torch.FloatTensor(bund[var+str(int(img))][:]).to('cuda') + for bund, bund_str in zip(bundle_paths, bundle_paths_str) + for img in image[ + bundle == bundle_unique[bundle_paths_str.index(bund_str)] + ] + ] + return data + def open_labels(self, var, i): + if isinstance(i, int): + i = torch.tensor([i]) + elif isinstance(i, np.ndarray): + i = torch.tensor(i) + indices, _ = torch.sort(i) + bundle = indices // self.num_img + image = indices - bundle * self.num_img + bundle_unique = torch.unique(bundle) + bundle_paths = [ + h5py.File(self.bundles[bundle], "r") for bundle in bundle_unique + ] + bundle_paths_str = list(map(str, bundle_paths)) + + data = [ + torch.tensor(bund[var+str(int(img))][:]).long().squeeze(-1).to('cuda') + for bund, bund_str in zip(bundle_paths, bundle_paths_str) + for img in image[ + bundle == bundle_unique[bundle_paths_str.index(bund_str)] + ] + ] + return data + def collate_fn(self, batch): + + images = list() + bboxes = list() + labels = list() + + for b in batch: + images.append(b[0]) + bboxes.append(b[1]) + labels.append([b[2]]) + images = torch.stack(images, dim=0) + + return images, bboxes, labels +def main(): + + global start_epoch, epoch, checkpoint, decay_lr_at + + if checkpoint is None: + start_epoch = 0 + model = SSD300(nclasses = n_classes) + biases = list() + not_biases = list() + for param_name, param in model.named_parameters(): + if param.requires_grad: + if param_name.endswith('.bias'): + biases.append(param) + else: + not_biases.append(param) + optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}], + lr=lr, momentum=momentum, weight_decay=weight_decay) + + + + else: + checkpoint = torch.load(checkpoint) + start_epoch = checkpoint['epoch']+1 + print('loaded checkpoint') + model = checkpoint['model'] + optimizer = checkpoint['optimizer'] + model = model.to('cuda') + + loss_function = detectionLoss(priors_cxcy = model.priors_cxcy).to('cuda') + + + + train_dataset = detect_dataset(path) + train_loader = torch.utils.data.DataLoader(train_dataset, batch_size, + shuffle = True, + collate_fn = train_dataset.collate_fn) + + epochs = iterations//(len(train_dataset)//batch_size) + decay_lr_at = [it // (len(train_dataset)//batch_size) for it in decay_lr_at] + + for epoch in tqdm(range(start_epoch, epochs)): + + if epoch in decay_lr_at: + adjust_learning_rate(optimizer, decay_lr_to) + + train(train_loader, model, loss_function, optimizer, epoch) + + print("Epoch:", epoch) + + if epoch % 10 == 0: + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300' + '_e' + str(epoch)+'.pth.tar') + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar')# apparently not defined + + +def train(data_loader, model, loss_function, optimizer, epochs): + + model.train() + losses = np.zeros(939) + for i, (images, boxes, labels) in enumerate(data_loader): + images = images.to('cuda') + + predicted_locs, predicted_classes_scores= model(images) + loss = loss_function(predicted_locs, predicted_classes_scores, + boxes, labels) + + + + losses[i] = loss + print('i', i, 'Loss:',loss) + optimizer.zero_grad() + loss.backward() + + if grad_clip is not None: + clip_gradient(optimizer, grad_clip) + + optimizer.step() + + print('Average Loss', np.average(losses)) + f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/loss.txt', "a") + f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') + f.close() + del predicted_locs, predicted_classes_scores, images, boxes, labels + + +def save_checkpoint(epoch, model, optim, path): + state = {'epoch': epoch, + 'model': model, + 'optimizer': optim} + filename = path + torch.save(state, filename) + + +def adjust_learning_rate(optimizer, scale): + + for param_group in optimizer.param_groups: + param_group['lr'] = param_group['lr']*scale + print('Decaying learning rate') + + +def clip_gradient(optimizer, grad_clip): + """ + Clips gradients computed during backpropagation to avoid explosion of gradients. + :param optimizer: optimizer with the gradients to be clipped + :param grad_clip: clip value + """ + for group in optimizer.param_groups: + for param in group['params']: + if param.grad is not None: + param.grad.data.clamp_(-grad_clip, grad_clip) diff --git a/source_detection/Testing.ipynb b/source_detection/Testing.ipynb deleted file mode 100644 index 7e44f46f..00000000 --- a/source_detection/Testing.ipynb +++ /dev/null @@ -1,227 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from tqdm import tqdm\n", - "from scipy.ndimage import gaussian_filter\n", - "from radionets.simulations.gaussians import create_grid, gauss_parameters, create_rot_mat, gaussian_component, add_gaussian\n", - "import matplotlib.pyplot as plt\n", - "from matplotlib.animation import FuncAnimation\n", - "from matplotlib.animation import PillowWriter" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "def toy_gaussian_source(grid,comps, amp, x, y, sig_x, sig_y, rot, step, sides):\n", - " \"\"\"\n", - " Creates random Gaussian source parameters and returns an image\n", - " of a Gaussian source.\n", - "\n", - " Parameters\n", - " ----------\n", - " grid: nd array\n", - " array holding 2d grid and axis for one image\n", - "\n", - " Returns\n", - " -------\n", - " s: 2darray\n", - " Image containing a simulated Gaussian source.\n", - " \"\"\"\n", - " s = toy_create_gaussian_source(\n", - " grid, comps, amp, x, y, sig_x, sig_y, rot, step, sides, blur=True\n", - " )\n", - " return s" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "def toy_create_gaussian_source(\n", - " grid, comps, amp, x, y, sig_x, sig_y, rot, step, sides, blur=True\n", - "):\n", - " \"\"\"\n", - " Combines Gaussian components on a 2d grid to create a Gaussian source\n", - "\n", - " takes grid\n", - " side: one-sided or two-sided\n", - " core dominated or lobe dominated\n", - " number of components\n", - " angle of the jet\n", - "\n", - " Parameters\n", - " ----------\n", - " grid: ndarray\n", - " 2dgrid + X and Y meshgrid\n", - " comps: int\n", - " number of components\n", - " amp: 1darray\n", - " amplitudes of components\n", - " x: 1darray\n", - " x positions of components\n", - " y: 1darray\n", - " y positions of components\n", - " sig_x: 1darray\n", - " standard deviations of components in x\n", - " sig_y: 1darray\n", - " standard deviations of components in y\n", - " rot: int\n", - " rotation of the jet in degree\n", - " sides: int\n", - " 0 one-sided, 1 two-sided jet\n", - " blur: bool\n", - " use Gaussian filter to blur image\n", - "\n", - " Returns\n", - " -------\n", - " source: 2darray\n", - " 2d grid containing Gaussian source\n", - "\n", - " Comments\n", - " --------\n", - " components should not have too big gaps between each other\n", - " \"\"\"\n", - " if sides == 1:\n", - " comps += comps - 1\n", - " amp = np.append(amp, amp[1:])\n", - " x = np.append(x, -x[1:])\n", - " y = np.append(y, -y[1:])\n", - " sig_x = np.append(sig_x, sig_x[1:])\n", - " sig_y = np.append(sig_y, sig_y[1:])\n", - "\n", - " for i in range(comps):\n", - " source = add_gaussian(\n", - " grid=grid,\n", - " amp=amp[i],\n", - " x=x[i]*step/4,\n", - " y=y[i]*step/4,\n", - " sig_x=sig_x[i],\n", - " sig_y=sig_y[i],\n", - " rot=rot,\n", - " )\n", - " if blur is True:\n", - " source = gaussian_filter(source, sigma=1.5)\n", - " return source\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "#%matplotlib notebook\n", - "from ipywidgets import interact, widgets" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "comps, amp, x, y, sig_x, sig_y, rot, sides = gauss_parameters()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "6322ddaaa5674267b3b857669539ebb9", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "interactive(children=(IntSlider(value=1, description='n', max=20), Output()), _dom_classes=('widget-interact',…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "#sides=1\n", - "#comps=5\n", - "\n", - "\n", - "\n", - "\n", - "def f(n):\n", - " a = create_grid(63,n+1)\n", - " step = n\n", - " sim_source = toy_gaussian_source(a[0],comps, amp, x, y, sig_x, sig_y, rot, step, sides)\n", - " fig.canvas.draw_idle()\n", - " plt.imshow(sim_source)\n", - " plt.show()\n", - " plt.pause(0.1)\n", - " \n", - "fig = plt.figure(figsize=(6, 4))\n", - "\n", - "\n", - "interact(f, n=widgets.IntSlider(min=0,max=20,step=1,value=1)) \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/source_detection/evaluation.py b/source_detection/evaluation.py new file mode 100644 index 00000000..c1d89b0e --- /dev/null +++ b/source_detection/evaluation.py @@ -0,0 +1,76 @@ +# + +import train +import torch +from radionets.evaluation.utils import load_pretrained_model, eval_model +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.patches as patches +import h5py +import model +from model import center_to_boundary +from radionets.dl_framework.data import get_bundles +from tqdm import tqdm + +def box_coord(coord, img_size): + x = coord[0].item()*img_size + y = coord[3].item()*img_size + xmax = coord[2] + ymin = coord[1] + w = xmax.item()*img_size - x + h = -(y - ymin.item()*img_size) + return x,y,w,h + +def detect_sources(checkpoint_path, data_path, img_size, n = 0): + data = get_bundles(data_path) + eval_dataset = train.detect_dataset(data) + eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size = 32, + shuffle = False, + collate_fn = eval_dataset.collate_fn) + checkpoint = checkpoint_path + checkpoint = torch.load(checkpoint) + model = checkpoint['model'] + model = model.to('cuda') + model.eval() + #print(eval_dataset[31]) + with torch.no_grad(): + for i, (images, boxes, labels) in enumerate(tqdm(eval_loader)): + print(enumerate(eval_loader)) + images = images.to('cuda') + print(images.shape) + predicted_locs, predicted_scores = model(images) + predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, + min_score = 0.2, max_overlap = 0.45, top_k = 10) + fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,8)) + for j in range(len(eval_dataset[n][1][0])): + trux, truy, truw, truh = box_coord(eval_dataset[n][1][0][j],img_size) + trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor='g', facecolor='none', label = eval_dataset[n][2][0][j].item()) + ax1.add_patch(trurect) + print(preds[n]) + for k in range(len(predl[n])): + predx, predy, predw, predh = box_coord(predb[n][k],img_size) + predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=1, edgecolor='r', facecolor='none', label = predl[n][k].item()) + ax2.add_patch(predrect) + + ax1.imshow(eval_dataset[n][0].squeeze(0)) + ax2.imshow(eval_dataset[n][0].squeeze(0)) + ax1.legend() + ax2.legend() + +def image_detection(checkpoint, image): + image = torch.FloatTensor(image).unsqueeze(0).unsqueeze(0) + checkpoint = torch.load(checkpoint) + model = checkpoint['model'] + model = model.to('cuda') + model.eval() + with torch.no_grad(): + image = image.to('cuda') + predicted_locs, predicted_scores = model(image) + predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, + min_score = 0.2, max_overlap = 0.1, top_k = 100) + return predb, predl + +def classifier_eval(arch, img_batch): + + pred = eval_model(img, arch) + _, l = torch.max(pred, dim = 1) + return l diff --git a/source_detection/loss.py b/source_detection/loss.py new file mode 100644 index 00000000..87350ef2 --- /dev/null +++ b/source_detection/loss.py @@ -0,0 +1,85 @@ +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.6.0 +# kernelspec: +# display_name: Python 3 +# language: python +# name: python3 +# --- + +# + +from torch import nn +import torch +import torch.nn.functional as F +from math import sqrt +from itertools import product as product +from source_detection.model import center_to_boundary, center_to_offset, boundary_to_center, jaccard,offset_to_center +import torchvision + +class detectionLoss(nn.Module): #0.6 #9 #20. + def __init__(self, priors_cxcy, threshold = 0.5, neg_pos_ratio = 3, alpha = 1.): + + super(detectionLoss, self).__init__() + self.priors_cxcy = priors_cxcy + self.priors_xy = center_to_boundary(priors_cxcy) + self.threshold = threshold + self.neg_pos_ratio = neg_pos_ratio + self.alpha = alpha + + self.smooth_l1 = nn.SmoothL1Loss() + self.cross_entropy = nn.CrossEntropyLoss(reduce = False) + + def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): + + batch_size = predicted_locs.size(0) + n_classes = predicted_scores.size(2) + n_priors = self.priors_cxcy.size(0) + assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) + + true_locs = torch.zeros((batch_size, n_priors, 4), dtype = torch.float).to('cuda') + true_classes = torch.zeros((batch_size, n_priors), dtype = torch.long).to('cuda') + + for image_i in range(batch_size): + n_objects = data_locs[image_i][0].size(0) + overlap = jaccard(data_locs[image_i][0], self.priors_xy) #overlap of the boxes in this image with the priors + overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) + #overlap has shape [a,s,f,g,....] + #[h,d,g,h,....].... each entry is the overlap of one true box with all the priors. + #each row describes one object. Max gives the maximum overlap value and the index of the object. + + _, prior_for_each_object = overlap.max(dim = 1) + + object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to('cuda') + + overlap_for_each_prior[prior_for_each_object] = 1. + label_for_each_prior = data_labels[image_i][0][0][object_for_each_prior]#very ugly shapes watch out + label_for_each_prior[overlap_for_each_prior < self.threshold] = 4 #nodiff + true_classes[image_i] = label_for_each_prior + + true_locs[image_i] = center_to_offset(boundary_to_center(data_locs[image_i][0][object_for_each_prior]), self.priors_cxcy) + positive_priors = true_classes != 4 #nodiff + loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) + n_positives = positive_priors.sum(dim = 1) + n_hard_negatives = self.neg_pos_ratio * n_positives + conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1)) + conf_loss_all = conf_loss_all.view(batch_size, n_priors) + + conf_loss_pos = conf_loss_all[positive_priors] + + conf_loss_neg = conf_loss_all.clone() + conf_loss_neg[positive_priors] = 0 + conf_loss_neg, _ = conf_loss_neg.sort(dim = 1, descending = True) + hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to('cuda') + hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) + conf_loss_hard_neg = conf_loss_neg[hard_negatives] + + conf_loss = (conf_loss_hard_neg.sum()+conf_loss_pos.sum())/n_positives.sum().float() + return conf_loss + self.alpha * loc_loss +# - + + diff --git a/source_detection/model.py b/source_detection/model.py new file mode 100644 index 00000000..6b09d974 --- /dev/null +++ b/source_detection/model.py @@ -0,0 +1,551 @@ + +import numpy as np + +import torch + +# + +from torch import nn +from math import sqrt +from radionets.evaluation.utils import load_pretrained_model +import torch.nn.functional as F + +import torch +import torchvision +import os + + +# - + +class base_maps(nn.Module): + + def __init__(self): + super(base_maps, self).__init__() + self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1) + self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) + self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2) + + self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1) + self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1) + self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2) + + self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1) + self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1) + self.conv7 = nn.Conv2d(256, 256, kernel_size=3, padding=1) + self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) + + self.conv8 = nn.Conv2d(256, 512, kernel_size=3, padding=1) + self.conv9 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.conv10 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2) + + self.conv11 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.conv12 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.conv13 = nn.Conv2d(512, 512, kernel_size=3, padding=1) + self.maxpool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) + + self.conv14 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) + + self.conv15 = nn.Conv2d(1024, 1024, kernel_size=1) + + self.load_arch() + def load_arch(self): + arch = load_pretrained_model('VGG', '//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/build/VGG_test/temp_20.model', 300) + state_dict = self.state_dict() + param_names = list(state_dict.keys()) + pretrained_state_dict = arch.state_dict() + pretrained_param_names = list(pretrained_state_dict.keys()) + for i, param in enumerate(param_names): + state_dict[param] = pretrained_state_dict[pretrained_param_names[i]] + conv_fc1_weight = pretrained_state_dict['fc1.0.weight'].view(4096, 512, 9, 9) + conv_fc1_bias = pretrained_state_dict['fc1.0.bias'] + state_dict['conv14.weight'] = decimate(conv_fc1_weight, m = [4, None, 3, 3]) + state_dict['conv14.bias'] = decimate(conv_fc1_bias, m = [4]) + + conv_fc2_weight = pretrained_state_dict['fc2.0.weight'].view(4096, 4096, 1, 1) + conv_fc2_bias = pretrained_state_dict['fc2.0.bias'] + state_dict['conv15.weight'] = decimate(conv_fc2_weight, m = [4, 4, None, None]) + state_dict['conv15.bias'] = decimate(conv_fc2_bias, m = [4]) + + self.load_state_dict(state_dict) + print("\n arch loaded \n") + def forward(self, image): + out = F.relu(self.conv1(image)) + out = F.relu(self.conv2(out)) # (N, 64, 300, 300) + out = self.maxpool1(out) + + out = F.relu(self.conv3(out)) + out = F.relu(self.conv4(out)) # (N, 128, 150, 150) + out = self.maxpool2(out) + + out = F.relu(self.conv5(out)) + out = F.relu(self.conv6(out)) + out = F.relu(self.conv7(out)) + fmap7 = out #update # (N, 256, 75, 75) + out = self.maxpool3(out) + + out = F.relu(self.conv8(out)) + out = F.relu(self.conv9(out)) + out = F.relu(self.conv10(out)) + fmap10 = out # (N, 512, 38, 38) + out = self.maxpool4(out) + + out = F.relu(self.conv11(out)) + out = F.relu(self.conv12(out)) + out = F.relu(self.conv13(out)) + out = self.maxpool5(out) # (N, 512, 19, 19) + + out = F.relu(self.conv14(out)) + + fmap15 = F.relu(self.conv15(out)) # (N, 1024, 19, 19) + + + base_fmaps = {'fmap7': fmap7,'fmap10': fmap10, 'fmap15':fmap15} + return base_fmaps + + +class adv_maps(nn.Module): + def __init__(self): + super(adv_maps, self).__init__() + + + self.conv16 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) + self.conv17 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) + + self.conv18 = nn.Conv2d(512, 128, kernel_size=1, padding=0) + self.conv19 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) + + self.conv20 = nn.Conv2d(256, 128, kernel_size=1, padding=0) + self.conv21 = nn.Conv2d(128, 256, kernel_size=3, padding=0) + + self.conv22 = nn.Conv2d(256, 128, kernel_size=1, padding=0) + self.conv23 = nn.Conv2d(128, 256, kernel_size=3, padding=0) + + self.init_conv2d() + + def init_conv2d(self): + + for c in self.children(): + if isinstance(c, nn.Conv2d): + nn.init.xavier_uniform_(c.weight) + nn.init.constant_(c.bias, 0.) + + def forward(self, fmap15): + + out = F.relu(self.conv16(fmap15)) # (N, 256, 19, 19) + out = F.relu(self.conv17(out)) + fmap17 = out # (N, 512, 10, 10) + + out = F.relu(self.conv18(out)) + out = F.relu(self.conv19(out)) + fmap19 = out # (N, 256, 5, 5) + + out = F.relu(self.conv20(out)) + out = F.relu(self.conv21(out)) + fmap21 = out # (N, 256, 3, 3) + + out = F.relu(self.conv22(out)) + fmap23 = F.relu(self.conv23(out)) + + + fmaps = {'fmap17':fmap17, 'fmap19':fmap19, 'fmap21':fmap21, 'fmap23':fmap23} + return fmaps + + +# + +def create_prior_boxes(): + fmap_dims = { 'fmap7' : 75, #update + 'fmap10': 38, #was 38 with old 37 + 'fmap15': 19, #was 19 with old 18 + 'fmap17': 10, #was 10 with old 9 + 'fmap19': 5, + 'fmap21': 3, + 'fmap23': 1} + maps = list(fmap_dims.keys()) + + scales = { 'fmap7' : 0.02, + 'fmap10': 0.06, + 'fmap15': 0.11, + 'fmap17': 0.16, + 'fmap19': 0.2, + 'fmap21': 0.25, + 'fmap23': 0.3} + + + aspect_ratios = {'fmap7': [1.], + 'fmap10': [1.], + 'fmap15': [1.], + 'fmap17': [1.], + 'fmap19': [1.], + 'fmap21': [1.], + 'fmap23': [1.]} + priors = [] + for a, s in enumerate(maps): + for d in range(fmap_dims[s]): + for f in range(fmap_dims[s]): + x = (d + 0.5) / fmap_dims[s] + y = (f + 0.5) / fmap_dims[s] + + for ratio in aspect_ratios[s]: + priors.append([x, y, scales[s] * sqrt(ratio), scales[s] / sqrt(ratio)]) + + if ratio == 1.: + try: + additional_scale = sqrt(scales[s] * scales[maps[a+1]]) + + except IndexError: + additional_scale = 1. + priors.append([x, y, additional_scale, additional_scale]) + priors = torch.FloatTensor(priors).to('cuda') + center_to_boundary(priors) + priors.clamp_(0, 1) + boundary_to_center(priors) + return priors + +class predconvs(nn.Module): + def __init__(self, nclasses): + + super(predconvs, self).__init__() + + self.nclasses = nclasses + + #n_boxes = {'fmap10': 4, + # 'fmap15': 6, + # 'fmap17': 6, + # 'fmap19': 6, + # 'fmap21': 4, + # 'fmap23': 4} + + n_boxes = { 'fmap7': 2, + 'fmap10': 2, + 'fmap15': 2, + 'fmap17': 2, + 'fmap19': 2, + 'fmap21': 2, + 'fmap23': 2} + self.loc_fmap7 = nn.Conv2d(256, n_boxes['fmap7'] * 4, kernel_size=3, padding=1) + self.loc_fmap10 = nn.Conv2d(512, n_boxes['fmap10'] * 4, kernel_size=3, padding=1) + self.loc_fmap15 = nn.Conv2d(1024, n_boxes['fmap15'] * 4, kernel_size=3, padding=1) + self.loc_fmap17 = nn.Conv2d(512, n_boxes['fmap17'] * 4, kernel_size=3, padding=1) + self.loc_fmap19 = nn.Conv2d(256, n_boxes['fmap19'] * 4, kernel_size=3, padding=1) + self.loc_fmap21 = nn.Conv2d(256, n_boxes['fmap21'] * 4, kernel_size=3, padding=1) + self.loc_fmap23 = nn.Conv2d(256, n_boxes['fmap23'] * 4, kernel_size=3, padding=1) + + self.cl_fmap7 = nn.Conv2d(256, n_boxes['fmap7'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap10 = nn.Conv2d(512, n_boxes['fmap10'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap15 = nn.Conv2d(1024, n_boxes['fmap15'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap17 = nn.Conv2d(512, n_boxes['fmap17'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap19 = nn.Conv2d(256, n_boxes['fmap19'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap21 = nn.Conv2d(256, n_boxes['fmap21'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap23 = nn.Conv2d(256, n_boxes['fmap23'] * nclasses, kernel_size=3, padding=1) + + self.init_conv2d() + + def init_conv2d(self): + for c in self.children(): + if isinstance(c, nn.Conv2d): + nn.init.xavier_uniform_(c.weight) + nn.init.constant_(c.bias, 0.) + + def forward(self,fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23): + #stuff + batch_size = fmap10.size(0) + l_fmap10 = self.loc_fmap10(fmap10) + l_fmap10 = l_fmap10.permute(0,2,3,1).contiguous() + l_fmap10 = l_fmap10.view(batch_size,-1,4) + + l_fmap15 = self.loc_fmap15(fmap15) + l_fmap15 = l_fmap15.permute(0,2,3,1).contiguous() + l_fmap15 = l_fmap15.view(batch_size,-1,4) + + l_fmap17 = self.loc_fmap17(fmap17) + l_fmap17 = l_fmap17.permute(0,2,3,1).contiguous() + l_fmap17 = l_fmap17.view(batch_size,-1,4) + + l_fmap19 = self.loc_fmap19(fmap19) + l_fmap19 = l_fmap19.permute(0,2,3,1).contiguous() + l_fmap19 = l_fmap19.view(batch_size,-1,4) + + l_fmap21 = self.loc_fmap21(fmap21) + l_fmap21 = l_fmap21.permute(0,2,3,1).contiguous() + l_fmap21 = l_fmap21.view(batch_size,-1,4) + + l_fmap23 = self.loc_fmap23(fmap23) + l_fmap23 = l_fmap23.permute(0,2,3,1).contiguous() + l_fmap23 = l_fmap23.view(batch_size,-1,4) + + l_fmap7 = self.loc_fmap7(fmap7) + l_fmap7 = l_fmap7.permute(0,2,3,1).contiguous() + l_fmap7 = l_fmap7.view(batch_size,-1,4) + + c_fmap10 = self.cl_fmap10(fmap10) + c_fmap10 = c_fmap10.permute(0,2,3,1).contiguous() + c_fmap10 = c_fmap10.view(batch_size,-1,self.nclasses) + + c_fmap15 = self.cl_fmap15(fmap15) + c_fmap15 = c_fmap15.permute(0,2,3,1).contiguous() + c_fmap15 = c_fmap15.view(batch_size,-1,self.nclasses) + + c_fmap17 = self.cl_fmap17(fmap17) + c_fmap17 = c_fmap17.permute(0,2,3,1).contiguous() + c_fmap17 = c_fmap17.view(batch_size,-1,self.nclasses) + + c_fmap19 = self.cl_fmap19(fmap19) + c_fmap19 = c_fmap19.permute(0,2,3,1).contiguous() + c_fmap19 = c_fmap19.view(batch_size,-1,self.nclasses) + + c_fmap21 = self.cl_fmap21(fmap21) + c_fmap21 = c_fmap21.permute(0,2,3,1).contiguous() + c_fmap21 = c_fmap21.view(batch_size,-1,self.nclasses) + + c_fmap23 = self.cl_fmap23(fmap23) + c_fmap23 = c_fmap23.permute(0,2,3,1).contiguous() + c_fmap23 = c_fmap23.view(batch_size,-1,self.nclasses) + + c_fmap7 = self.cl_fmap7(fmap7) + c_fmap7 = c_fmap7.permute(0,2,3,1).contiguous() + c_fmap7 = c_fmap7.view(batch_size,-1,self.nclasses) + + locs = torch.cat([l_fmap7, l_fmap10, l_fmap15, l_fmap17, l_fmap19, l_fmap21, l_fmap23], dim = 1) + classes_scores = torch.cat([c_fmap7, c_fmap10, c_fmap15, c_fmap17, c_fmap19, c_fmap21, c_fmap23], dim = 1) + return locs, classes_scores + +class SSD300(nn.Module): + + def __init__(self, nclasses): + + super(SSD300, self).__init__() + + self.nclasses = nclasses + + self.base = base_maps() + self.adv = adv_maps() + self.pred_convs = predconvs(nclasses) + + self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) + self.rescale_factors7 = nn.Parameter(torch.FloatTensor(1, 256, 1, 1)) + nn.init.constant_(self.rescale_factors, 20) + nn.init.constant_(self.rescale_factors7, 20) + self.priors_cxcy = create_prior_boxes() + + + def forward(self, image): + + + + bmaps = self.base(image) + fmap10 = bmaps['fmap10'] #[256, 38, 38] + fmap15 = bmaps['fmap15'] + fmap7 = bmaps['fmap7'] + #fmap_13 = bmaps['fmap_13'] + norm7 = fmap7.pow(2).sum(dim=1, keepdim=True).sqrt() + norm = fmap10.pow(2).sum(dim=1, keepdim=True).sqrt() + fmap10 = fmap10 / norm + fmap10 = fmap10 * self.rescale_factors + + fmap7 = fmap7 / norm7 + fmap7 = fmap7 * self.rescale_factors7 + + amaps = self.adv(fmap15) + fmap17 = amaps['fmap17'] + fmap19 = amaps['fmap19'] + fmap21 = amaps['fmap21'] + fmap23 = amaps['fmap23'] + locs, classes_scores = self.pred_convs(fmap7, fmap10,fmap15,fmap17,fmap19,fmap21,fmap23) + + return locs, classes_scores + + def object_detection(self, locs, class_scores, priors, min_score=0.01, max_overlap=0.45,top_k=200): + + batch_size = locs.size(0) + n_priors = priors.size(0) + + classes_scores = F.softmax(class_scores, dim = 2) + all_predicted_boxes = list() + all_predicted_labels = list() + all_predicted_scores = list() + assert n_priors == locs.size(1) == classes_scores.size(1) + for i in range(batch_size): + boundary_locs = center_to_boundary(offset_to_center(locs[i], priors)) + predicted_boxes = list() + predicted_labels = list() + predicted_scores = list() + max_scores, best_label = classes_scores[i].max(dim=1) + for c in range(0, self.nclasses-1): + c_scores = classes_scores[i][:,c] + score_above_min = c_scores > min_score + n_above_min = score_above_min.sum().item() + if n_above_min == 0: + continue + c_scores = c_scores[score_above_min] + c_boundary_locs = boundary_locs[score_above_min] + + c_scores, sort_ind = c_scores.sort(dim = 0, descending = True) + c_boundary_locs = c_boundary_locs[sort_ind] + + overlap = jaccard(c_boundary_locs, c_boundary_locs) + suppress = torch.zeros((n_above_min)).bool().to('cuda') + + for box in range(c_boundary_locs.size(0)): + if suppress[box] == 1: + continue + + suppress = suppress | (overlap[box] > max_overlap) + + suppress[box] = 0 + + predicted_boxes.append(c_boundary_locs[~suppress]) + predicted_labels.append(torch.LongTensor((~suppress).sum().item()*[c]).to('cuda')) + predicted_scores.append(c_scores[~suppress]) + if len(predicted_boxes) == 0: + predicted_boxes.append(torch.FloatTensor([[0.,0.,1.,1.]]).to('cuda')) + predicted_labels.append(torch.LongTensor([4]).to('cuda')) + predicted_scores.append(torch.FloatTensor([0.]).to('cuda')) + + predicted_boxes = torch.cat(predicted_boxes, dim = 0) + predicted_labels = torch.cat(predicted_labels, dim = 0) + predicted_scores = torch.cat(predicted_scores, dim = 0) + num_objects = predicted_scores.size(0) + + if num_objects > top_k: + predicted_scores, sort_ind = predicted_scores.sort(dim = 0, descending = True) + predicted_scores = predicted_scores[:top_k] + predicted_boxes = predicted_boxes[:top_k] + predicted_labels = predicted_labels[:top_k] + + all_predicted_boxes.append(predicted_boxes) + all_predicted_labels.append(predicted_labels) + all_predicted_scores.append(predicted_scores) + + return all_predicted_boxes, all_predicted_labels, all_predicted_scores + +def center_to_boundary(coord): + return torch.cat([coord[:,:2]-(coord[:,2:]/2), + coord[:,:2]+(coord[:,2:]/2)], 1) +def boundary_to_center(coord): + return torch.cat([(coord[:,2:]+coord[:,:2])/2, + coord[:,2:]-coord[:,:2]], 1) + + #"The 10 and 5 below are referred to as 'variances' in the original Caffe repo, completely empirical + # They are for some sort of numerical conditioning, for 'scaling the localization gradient' + # See https://github.com/weiliu89/caffe/issues/155" +def offset_to_center(coded_box_coord, prior_coord): + return torch.cat([coded_box_coord[:,:2] * prior_coord[:,2:]/10 + prior_coord[:,:2], + torch.exp(coded_box_coord[:,2:]/5)*prior_coord[:,2:]],1) + +def center_to_offset(box_coord, prior_coord): + return torch.cat([(box_coord[:, :2] - prior_coord[:,:2])/(prior_coord[:,2:]/10), + torch.log(box_coord[:,2:] / prior_coord[:,2:])* 5], 1) + +def jaccard(boxes1, boxes2): + + low_bound = torch.max(boxes1[:,:2].unsqueeze(1), boxes2[:,:2].unsqueeze(0)) + up_bound = torch.min(boxes1[:,2:].unsqueeze(1), boxes2[:,2:].unsqueeze(0)) + + intersect_dims = torch.clamp(up_bound - low_bound, min = 0) + intersect = intersect_dims[:,:,0]*intersect_dims[:, :, 1] + + area_boxes1 = (boxes1[:,2]-boxes1[:,0]) * (boxes1[:,3] - boxes1[:,1]) + area_boxes2 = (boxes2[:,2]-boxes2[:,0]) * (boxes2[:,3] - boxes2[:,1]) + + union = area_boxes1.unsqueeze(1) +area_boxes2.unsqueeze(0) - intersect + + return intersect/union +def decimate(tensor, m): + assert tensor.dim() == len(m) + for d in range(tensor.dim()): + if m[d] is not None: + tensor = tensor.index_select(dim=d, + index=torch.arange(start=0, end=tensor.size(d), step=m[d]).long()) + + return tensor + +class old_predconvs(nn.Module): + def __init__(self, nclasses): + #some changes + super(predconvs, self).__init__() + + self.nclasses = nclasses + + n_boxes = {'fmap10': 4, + 'fmap_10': 6, + 'fmap_13': 6, + 'fmap_15': 6, + 'fmap_17': 4, + 'fmap_19': 4} + + self.loc_fmap10 = nn.Conv2d(512, n_boxes['fmap_7'] * 4, kernel_size=3, padding=1) + self.loc_fmap_10 = nn.Conv2d(1024, n_boxes['fmap_10'] * 4, kernel_size=3, padding=1) + self.loc_fmap_13 = nn.Conv2d(512, n_boxes['fmap_13'] * 4, kernel_size=3, padding=1)#channels were different + self.loc_fmap_15 = nn.Conv2d(256, n_boxes['fmap_15'] * 4, kernel_size=3, padding=1)#same here + self.loc_fmap_17 = nn.Conv2d(256, n_boxes['fmap_17'] * 4, kernel_size=3, padding=1) + self.loc_fmap_19 = nn.Conv2d(256, n_boxes['fmap_19'] * 4, kernel_size=3, padding=1) + + self.cl_fmap_7 = nn.Conv2d(512, n_boxes['fmap_7'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_10 = nn.Conv2d(1024, n_boxes['fmap_10'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_13 = nn.Conv2d(512, n_boxes['fmap_13'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_15 = nn.Conv2d(256, n_boxes['fmap_15'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_17 = nn.Conv2d(256, n_boxes['fmap_17'] * nclasses, kernel_size=3, padding=1) + self.cl_fmap_19 = nn.Conv2d(256, n_boxes['fmap_19'] * nclasses, kernel_size=3, padding=1) + + self.init_conv2d() + + def init_conv2d(self): + for c in self.children(): + if isinstance(c, nn.Conv2d): + nn.init.xavier_uniform_(c.weight) + nn.init.constant_(c.bias, 0.) + + def forward(self, fmap7, fmap10, fmap13, fmap15, fmap17, fmap19): + + batch_size = fmap7.size(0) + l_fmap7 = self.loc_fmap_7(fmap7) + l_fmap7 = l_fmap7.permute(0,2,3,1).contiguous() + l_fmap7 = l_fmap7.view(batch_size,-1,4) + + l_fmap10 = self.loc_fmap_10(fmap10) + l_fmap10 = l_fmap10.permute(0,2,3,1).contiguous() + l_fmap10 = l_fmap10.view(batch_size,-1,4) + + l_fmap13 = self.loc_fmap_13(fmap13) + l_fmap13 = l_fmap13.permute(0,2,3,1).contiguous() + l_fmap13 = l_fmap13.view(batch_size,-1,4) + + l_fmap15 = self.loc_fmap_15(fmap15) + l_fmap15 = l_fmap15.permute(0,2,3,1).contiguous() + l_fmap15 = l_fmap15.view(batch_size,-1,4) + + l_fmap17 = self.loc_fmap_17(fmap17) + l_fmap17 = l_fmap17.permute(0,2,3,1).contiguous() + l_fmap17 = l_fmap17.view(batch_size,-1,4) + + l_fmap19 = self.loc_fmap_19(fmap19) + l_fmap19 = l_fmap19.permute(0,2,3,1).contiguous() + l_fmap19 = l_fmap19.view(batch_size,-1,4) + + c_fmap7 = self.cl_fmap_7(fmap7) + c_fmap7 = c_fmap7.permute(0,2,3,1).contiguous() + c_fmap7 = c_fmap7.view(batch_size,-1,self.nclasses) + + c_fmap10 = self.cl_fmap_10(fmap10) + c_fmap10 = c_fmap10.permute(0,2,3,1).contiguous() + c_fmap10 = c_fmap10.view(batch_size,-1,self.nclasses) + + c_fmap13 = self.cl_fmap_13(fmap13) + c_fmap13 = c_fmap13.permute(0,2,3,1).contiguous() + c_fmap13 = c_fmap13.view(batch_size,-1,self.nclasses) + + c_fmap15 = self.cl_fmap_15(fmap15) + c_fmap15 = c_fmap15.permute(0,2,3,1).contiguous() + c_fmap15 = c_fmap15.view(batch_size,-1,self.nclasses) + + c_fmap17 = self.cl_fmap_17(fmap17) + c_fmap17 = c_fmap17.permute(0,2,3,1).contiguous() + c_fmap17 = c_fmap17.view(batch_size,-1,self.nclasses) + + c_fmap19 = self.cl_fmap_19(fmap19) + c_fmap19 = c_fmap19.permute(0,2,3,1).contiguous() + c_fmap19 = c_fmap19.view(batch_size,-1,self.nclasses) + + locs = torch.cat([l_fmap7, l_fmap10, l_fmap13, l_fmap15, l_fmap17, l_fmap19], dim = 1) + classes_scores = torch.cat([c_fmap7, c_fmap10, c_fmap13, c_fmap15, c_fmap17, c_fmap19], dim = 1) + return locs, classes_scores diff --git a/source_detection/source_data.py b/source_detection/source_data.py new file mode 100644 index 00000000..64a8319a --- /dev/null +++ b/source_detection/source_data.py @@ -0,0 +1,169 @@ +from radionets.simulations.gaussians import create_grid, create_gauss, create_diamond, create_square +from radionets.dl_framework.data import save_fft_pair, open_fft_pair +from scipy import ndimage + +import numpy as np +import matplotlib.pyplot as plt +from tqdm import tqdm +import torch +import h5py + +from torchvision import transforms +#om utils import * +from PIL import Image, ImageDraw, ImageFont + + +def detector_data(img_size, bundle_size, num_bundles,path): + for t in range(num_bundles): + with h5py.File(path+str(t)+'.h5', "w") as hf: + all_images = [] + all_bboxes = [] + all_labels = [] + for r in range(bundle_size): + grid = create_grid(img_size, 1) + num_objects = np.random.randint(1,4) + bboxes = np.zeros((num_objects,4)) + labels = np.zeros((num_objects,1)) + if num_objects == 0: + bboxes = np.zeros((1,4)) + labels = np.zeros((1,1)) + g = grid[:,0] + bboxes[0] = np.array([0, 0, 1, 1], dtype = float) + labels[0] = np.array([4]) + else: + for i in range(num_objects): + rand = np.random.randint(0,4) + if rand == 0: + g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([0]) + bboxes[i] = bbox + labels[i] = label + elif rand == 1: + g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([1]) + bboxes[i] = bbox + labels[i] = label + elif rand == 2: + g,c,s = create_diamond(grid[:, 0], 1, 1, img_size,True) + xmin = (c[0]-2*s[0])/img_size + ymin = (c[1]-2*s[1])/img_size + xmax = (c[0]+2*s[0])/img_size + ymax = (c[1]+2*s[1])/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([2]) #nodiff + bboxes[i] = bbox + labels[i] = label + elif rand == 3: + g,c = create_square(grid[:, 0],1, 1, img_size, True) + xmin = (c[0]-(img_size/50+1))/img_size + ymin = (c[1]-(img_size/50+1))/img_size + xmax = (c[0]+(img_size/50+1))/img_size + ymax = (c[1]+(img_size/50+1))/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([3]) #nodiff + bboxes[i] = bbox + labels[i] = label + + hf.create_dataset('x'+str(r), data=g) + hf.create_dataset('y'+str(r), data=bboxes) + hf.create_dataset('z'+str(r), data=labels) + hf.close() + +def create_mosaic(img_size, bundle_size, num_bundles,path): + for t in range(num_bundles): + with h5py.File(path+str(t)+'.h5', "w") as hf: + all_images = [] + all_bboxes = [] + all_labels = [] + for r in range(bundle_size): + grid = create_grid(img_size*10, 1) + num_objects = np.random.randint(15,40) + bboxes = np.zeros((num_objects,4)) + labels = np.zeros((num_objects,1)) + if num_objects == 0: + bboxes = np.zeros((1,4)) + labels = np.zeros((1,1)) + g = grid[:,0] + bboxes[0] = np.array([0, 0, 1, 1], dtype = float) + labels[0] = np.array([4]) + else: + for i in range(num_objects): + rand = np.random.randint(0,4) + if rand == 0: + g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True, True) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([0]) + bboxes[i] = bbox + labels[i] = label + elif rand == 1: + g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True, True) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([1]) + bboxes[i] = bbox + labels[i] = label + elif rand == 2: + g,c,s = create_diamond(grid[:, 0], 1, 1, img_size,True, True) + xmin = (c[0]-2*s[0])/img_size + ymin = (c[1]-2*s[1])/img_size + xmax = (c[0]+2*s[0])/img_size + ymax = (c[1]+2*s[1])/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([2]) #nodiff + bboxes[i] = bbox + labels[i] = label + elif rand == 3: + g,c = create_square(grid[:, 0],1, 1, img_size, True, True) + xmin = (c[0]-(img_size/50+1))/img_size + ymin = (c[1]-(img_size/50+1))/img_size + xmax = (c[0]+(img_size/50+1))/img_size + ymax = (c[1]+(img_size/50+1))/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([3]) #nodiff + bboxes[i] = bbox + labels[i] = label + + hf.create_dataset('x'+str(r), data=g) + hf.create_dataset('y'+str(r), data=bboxes) + hf.create_dataset('z'+str(r), data=labels) + hf.close() + + +def feature_data(num_gauss, num_diff, num_diamond, num_square, img_size, num_files, path): + for j in tqdm(range(num_files)): + with h5py.File(path+str(j)+'.h5', "w") as hf: + gauss_grid = create_grid(img_size, num_gauss) + diff_grid = create_grid(img_size, num_diff) + diamond_grid = create_grid(img_size, num_diamond) + square_grid = create_grid(img_size, num_square) + gaussians = create_gauss(gauss_grid[:, 0], num_gauss, 1, False,img_size, False) + y_gauss = np.array([0]*len(gaussians)) + diff = create_gauss(diff_grid[:, 0], num_diff, 1, False,img_size, True) + y_diff = np.array([1]*len(diff)) + diamonds = create_diamond(diamond_grid[:, 0], num_diamond, 1, img_size) + y_diamond = np.array([2]*len(diamonds)) + squares = create_square(square_grid[:, 0],num_square, 1, img_size) + y_square = np.array([3]*len(squares)) + arr = np.concatenate((gaussians, diff, diamonds, squares), axis=0) + keys = np.concatenate((y_gauss, y_diff, y_diamond, y_square), axis=0) + shuff = np.random.permutation(len(arr)) + hf.create_dataset('x', data=arr[shuff]) + hf.create_dataset('y', data=keys[shuff]) + hf.close() diff --git a/source_detection/source_utils.py b/source_detection/source_utils.py new file mode 100644 index 00000000..5ed45bf5 --- /dev/null +++ b/source_detection/source_utils.py @@ -0,0 +1,18 @@ +# + +import h5py +import numpy as np + +def open_detector_bundle(path): + bundle_x = [] + bundle_y = [] + bundle_z = [] + f = h5py.File(path, "r") + bundle_size = len(f)//3 + for i in range(bundle_size): + bundle_x_i = np.array(f["x"+str(i)]) + bundle_y_i = np.array(f["y"+str(i)]) + bundle_z_i = np.array(f["z"+str(i)]) + bundle_x.append(bundle_x_i) + bundle_y.append(bundle_y_i) + bundle_z.append(bundle_z_i) + return bundle_x, bundle_y, bundle_z diff --git a/source_detection/train.py b/source_detection/train.py new file mode 100644 index 00000000..721c6db3 --- /dev/null +++ b/source_detection/train.py @@ -0,0 +1,229 @@ +import torch +import h5py +import numpy as np +from radionets.dl_framework.data import get_bundles +from source_detection.loss import detectionLoss +from source_detection.model import SSD300 +from tqdm import tqdm +import matplotlib.pyplot as plt + +path = get_bundles('//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/train/') +iterations = 180000 +n_classes = 5 #nodiff +#checkpoint = None +checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar' +batch_size = 32 +workers = 4 +lr = 1e-4 +decay_lr_at = [80000,100000] +decay_lr_to = 0.1 +momentum = 0.9 +weight_decay = 5e-4 +grad_clip = None + +class detect_dataset: + + def __init__(self, bundle_path): + + self.bundles = bundle_path + self.num_img = len(h5py.File(self.bundles[0]))//3 + + def __getitem__(self, i): + + x = self.open_image('x', i) + y = self.open_boxes('y', i) + z = self.open_labels('z', i) + return x,y,z + def __len__(self): + return len(self.bundles)*self.num_img + def open_image(self, var, i): + if isinstance(i, int): + i = torch.tensor([i]) + elif isinstance(i, np.ndarray): + i = torch.tensor(i) + indices, _ = torch.sort(i) + bundle = indices // self.num_img + image = indices - bundle * self.num_img + bundle_unique = torch.unique(bundle) + + bundle_paths = [ + h5py.File(self.bundles[bundle], "r") for bundle in bundle_unique + ] + bundle_paths_str = list(map(str, bundle_paths)) + data = torch.FloatTensor( ### + [ + #print(bund[var+str(int(img))].shape) + bund[var+str(int(img))][0] #VGG + for bund, bund_str in zip(bundle_paths, bundle_paths_str) + for img in image[ + bundle == bundle_unique[bundle_paths_str.index(bund_str)] + ] + ] + ) + return data + def open_boxes(self, var, i): + if isinstance(i, int): + i = torch.tensor([i]) + elif isinstance(i, np.ndarray): + i = torch.tensor(i) + indices, _ = torch.sort(i) + bundle = indices // self.num_img + image = indices - bundle * self.num_img + bundle_unique = torch.unique(bundle) + bundle_paths = [ + h5py.File(self.bundles[bundle], "r") for bundle in bundle_unique + ] + bundle_paths_str = list(map(str, bundle_paths)) + + data = [ + torch.FloatTensor(bund[var+str(int(img))][:]).to('cuda') + for bund, bund_str in zip(bundle_paths, bundle_paths_str) + for img in image[ + bundle == bundle_unique[bundle_paths_str.index(bund_str)] + ] + ] + return data + def open_labels(self, var, i): + if isinstance(i, int): + i = torch.tensor([i]) + elif isinstance(i, np.ndarray): + i = torch.tensor(i) + indices, _ = torch.sort(i) + bundle = indices // self.num_img + image = indices - bundle * self.num_img + bundle_unique = torch.unique(bundle) + bundle_paths = [ + h5py.File(self.bundles[bundle], "r") for bundle in bundle_unique + ] + bundle_paths_str = list(map(str, bundle_paths)) + + data = [ + torch.tensor(bund[var+str(int(img))][:]).long().squeeze(-1).to('cuda') + for bund, bund_str in zip(bundle_paths, bundle_paths_str) + for img in image[ + bundle == bundle_unique[bundle_paths_str.index(bund_str)] + ] + ] + return data + def collate_fn(self, batch): + + images = list() + bboxes = list() + labels = list() + + for b in batch: + images.append(b[0]) + bboxes.append(b[1]) + labels.append([b[2]]) + images = torch.stack(images, dim=0) + + return images, bboxes, labels +def main(): + + global start_epoch, epoch, checkpoint, decay_lr_at + + if checkpoint is None: + start_epoch = 0 + model = SSD300(nclasses = n_classes) + biases = list() + not_biases = list() + for param_name, param in model.named_parameters(): + if param.requires_grad: + if param_name.endswith('.bias'): + biases.append(param) + else: + not_biases.append(param) + optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}], + lr=lr, momentum=momentum, weight_decay=weight_decay) + + + + else: + checkpoint = torch.load(checkpoint) + start_epoch = checkpoint['epoch']+1 + print('loaded checkpoint') + model = checkpoint['model'] + optimizer = checkpoint['optimizer'] + model = model.to('cuda') + + loss_function = detectionLoss(priors_cxcy = model.priors_cxcy).to('cuda') + + + + train_dataset = detect_dataset(path) + train_loader = torch.utils.data.DataLoader(train_dataset, batch_size, + shuffle = True, + collate_fn = train_dataset.collate_fn) + + epochs = iterations//(len(train_dataset)//batch_size) + decay_lr_at = [it // (len(train_dataset)//batch_size) for it in decay_lr_at] + + for epoch in tqdm(range(start_epoch, epochs)): + + if epoch in decay_lr_at: + adjust_learning_rate(optimizer, decay_lr_to) + + train(train_loader, model, loss_function, optimizer, epoch) + + print("Epoch:", epoch) + + if epoch % 10 == 0: + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300' + '_e' + str(epoch)+'.pth.tar') + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar')# apparently not defined + + +def train(data_loader, model, loss_function, optimizer, epochs): + + model.train() + losses = np.zeros(470) + for i, (images, boxes, labels) in enumerate(data_loader): + images = images.to('cuda') + + predicted_locs, predicted_classes_scores= model(images) + loss = loss_function(predicted_locs, predicted_classes_scores, + boxes, labels) + + + + losses[i] = loss + #print('i', i, 'Loss:',loss) + optimizer.zero_grad() + loss.backward() + + if grad_clip is not None: + clip_gradient(optimizer, grad_clip) + + optimizer.step() + + print('Average Loss', np.average(losses)) + f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/loss.txt', "a") + f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') + f.close() + del predicted_locs, predicted_classes_scores, images, boxes, labels + + +def save_checkpoint(epoch, model, optim, path): + state = {'epoch': epoch, + 'model': model, + 'optimizer': optim} + filename = path + torch.save(state, filename) + + +def adjust_learning_rate(optimizer, scale): + + for param_group in optimizer.param_groups: + param_group['lr'] = param_group['lr']*scale + print('Decaying learning rate') + + +def clip_gradient(optimizer, grad_clip): + """ + Clips gradients computed during backpropagation to avoid explosion of gradients. + :param optimizer: optimizer with the gradients to be clipped + :param grad_clip: clip value + """ + for group in optimizer.param_groups: + for param in group['params']: + if param.grad is not None: + param.grad.data.clamp_(-grad_clip, grad_clip) diff --git a/source_detection/yolo_data.ipynb b/source_detection/yolo_data.ipynb deleted file mode 100644 index 9787ed76..00000000 --- a/source_detection/yolo_data.ipynb +++ /dev/null @@ -1,263 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from tqdm import tqdm\n", - "from scipy.ndimage import gaussian_filter\n", - "from scipy import ndimage\n", - "from radionets.simulations.gaussians import create_grid, create_gaussian_source, add_gaussian, gaussian_component, create_rot_mat\n", - "import matplotlib.pyplot as plt" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "def gauss_paramters(comps, pixel):\n", - " \"\"\"\n", - " Generate a random set of Gaussian parameters.\n", - "\n", - " Parameters\n", - " ----------\n", - " None\n", - "\n", - " Returns\n", - " -------\n", - " comps: int\n", - " Number of components\n", - " amp: float\n", - " Amplitude of the core component\n", - " x: array\n", - " x positions of components\n", - " y: array\n", - " y positions of components\n", - " sig_x:\n", - " standard deviation in x\n", - " sig_y:\n", - " standard deviation in y\n", - " rot: int\n", - " rotation in degree\n", - " sides: int\n", - " 0 for one-sided and 1 for two-sided jets\n", - " \"\"\"\n", - " # random number of components between 4 and 9\n", - " # comps = np.random.randint(4, 7) # decrease for smaller images\n", - "\n", - " # start amplitude between 10 and 1e-3\n", - " amp_start = (np.random.randint(0, 100) * np.random.random()) / 10 ############ <-------100 -> 1000\n", - " # if start amp is 0, draw a new number\n", - " while amp_start == 0:\n", - " amp_start = (np.random.randint(0, 100) * np.random.random()) / 10\n", - " # logarithmic decrease to outer components\n", - " amp = np.array([(i+1)**2*amp_start / np.exp(i) for i in range(comps)]) ######!\n", - "\n", - " # linear distance bestween the components\n", - " x = np.arange(0, comps) * (pixel//(2*comps))###########<------War 5 statt 50\n", - " y = np.zeros(comps)\n", - "\n", - " # extension of components\n", - " # random start value between 1 - 0.375 and 1 - 0\n", - " # linear distance between components\n", - " # distances scaled by factor between 0.25 and 0.5\n", - " # randomnized for each sigma\n", - " off1 = (np.random.random() + 0.5) / 4\n", - " off2 = (np.random.random() + 0.5) / 4\n", - " fac1 = (np.random.random() + 1) / 4\n", - " fac2 = (np.random.random() + 1) / 4\n", - " sig_x =(np.arange(1, comps + 1) - off1) * fac1 *8\n", - " sig_y =(np.arange(1, comps + 1) - off2) * fac2 *8\n", - " \n", - "\n", - " return amp, x, y, sig_x, sig_y\n", - "\n", - "def gaussian_source(grid, rot, comps, sides):\n", - " \"\"\"\n", - " Creates random Gaussian source parameters and returns an image\n", - " of a Gaussian source.\n", - "\n", - " Parameters\n", - " ----------\n", - " grid: nd array\n", - " array holding 2d grid and axis for one image\n", - "\n", - " Returns\n", - " -------\n", - " s: 2darray\n", - " Image containing a simulated Gaussian source.\n", - " \"\"\"\n", - " pixel = grid.shape[1]\n", - " amp, x, y, sig_x, sig_y = gauss_paramters(comps, pixel)\n", - " rot = rot\n", - " s = create_gaussian_source(\n", - " grid, comps, amp, x, y, sig_x, sig_y, rot, sides, blur=True\n", - " )\n", - " return s\n", - "\n", - "\n", - "def create_mask(pixel, rot, comps, sides):\n", - " x = np.arange(0, comps) * (pixel//(2*comps))\n", - " y = np.zeros(comps)\n", - " if sides == 1:\n", - " comps += comps - 1\n", - " x = np.append(x, -x[1:])\n", - " y = np.append(y, -y[1:])\n", - " a = np.linspace(0, pixel - 1, num=pixel)\n", - " b = np.linspace(0, pixel - 1, num=pixel)\n", - " k, j = np.meshgrid(a, b)\n", - " mask = np.zeros(k.shape)\n", - " for i in range(comps):\n", - " cent = np.array([pixel // 2 + x[i], pixel // 2 + y[i]])\n", - " rot_mat = create_rot_mat(np.deg2rad(rot))\n", - " x_0, y_0 = ((cent - pixel // 2) @ rot_mat) + pixel // 2\n", - " x[i]= int(x_0)\n", - " y[i]= int(y_0)\n", - " if abs(x[i]) < pixel and abs(y[i]) < pixel:\n", - " mask[int(y_0),int(x_0)] = 1\n", - " return mask\n", - "def head(grid, bundle_size, pixel):\n", - " grid_copy = grid.copy()\n", - " grid_copy2 = grid.copy()\n", - " mask_bundle = grid_copy[:,0]\n", - " gauss_bundle = grid_copy2[:,0]\n", - " for i in range(bundle_size):\n", - " rot = np.random.randint(0, 360)\n", - " comps = np.random.randint(4, 7)\n", - " sides = np.random.randint(0, 2)\n", - " gaussian_image = gaussian_source(grid[i,:], rot, comps, sides)\n", - " gauss_bundle[i] += gaussian_image\n", - " mask= create_mask(pixel, rot, comps, sides)\n", - " mask_bundle[i]+=mask\n", - " pair = [gauss_bundle, mask_bundle]\n", - " return pair\n", - "def simulate_sources(\n", - " data_path,\n", - " num_bundles,\n", - " bundle_size,\n", - " img_size,\n", - " option\n", - "):\n", - " gaussians = np.zeros(num_bundles)\n", - " points = np.zeros(num_bundles)\n", - " for i in tqdm(range(num_bundles)):\n", - " grid = create_grid(img_size, bundle_size)\n", - " pairs = head(grid, bundle_size, img_size)\n", - " gaussian_bundle = pairs[0]\n", - " point_bundle = pairs [1]\n", - " return gaussian_bundle, point_bundle\n" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [], - "source": [ - "def yolo_data(path, mode, pixel, bundle_size, num_bundles):\n", - " images, pos = simulate_sources(path,num_bundles,bundle_size,pixel, mode)\n", - " for i in tqdm(range(bundle_size)):\n", - " cent = np.array([pixel / 2, pixel/ 2])\n", - " plt.axis('off')\n", - " plt.imshow(images[i])\n", - " plt.savefig(path + \"images/\" + mode + \"/image\" + str(i) + \".jpg\",bbox_inches='tight', dpi = 200)\n", - " plt.cla()\n", - " occurences = np.where(pos[i] == pos[i].max())\n", - " comps = len(occurences[1])\n", - " with open(path + \"labels/\" + mode + \"/image\" + str(i) + \".txt\", \"w\") as file:\n", - " for j in range(comps):\n", - " x = occurences[0][j]\n", - " y = occurences[1][j] \n", - " x_0= x - cent[0]\n", - " y_0= y - cent[1]\n", - " x_rel = x/(pixel)\n", - " y_rel = y/(pixel)\n", - " width = 64/pixel\n", - " file.write(\"%d\\t\" %0 + \"%f\\t\" %y_rel + \"%f\\t\" %x_rel + \"%f\\t\" %width + \"%f\" %width+'\\n')\n", - " return occurences\n" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 1/1 [00:00<00:00, 1.53it/s]\n", - "100%|██████████| 4/4 [00:00<00:00, 4.62it/s]\n", - "100%|██████████| 1/1 [00:00<00:00, 4.51it/s]\n", - "100%|██████████| 2/2 [00:00<00:00, 4.40it/s]\n", - "100%|██████████| 1/1 [00:00<00:00, 12.97it/s]\n", - "100%|██████████| 1/1 [00:00<00:00, 6.18it/s]\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQYAAAD8CAYAAACVSwr3AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAAAMbElEQVR4nO3bcYikd33H8ffHXFOpjbGYFeTuNJFeqldbMF1Si1BTTMslhbs/LHIHobUED62RglJIsaQS/7JSC8K19kpDVDDx9I+y4EmgNiEQPM2GaPQuRNbTNhelOTXNP8HE0G//mEk72e/uzZO72Znb+n7BwjzP/Hbmu8PwvmeeeS5VhSRNetmiB5B08TEMkhrDIKkxDJIawyCpMQySmqlhSHJHkieTfHuT+5Pkk0nWkjyS5JrZjylpnoYcMdwJ7DvH/TcAe8Y/h4F/uPCxJC3S1DBU1f3AT86x5ADwmRo5AbwqyWtnNaCk+dsxg8fYCTw+sX1mvO+H6xcmOczoqIJXvOIVv/XGN75xBk8vaTMPPfTQj6pq6aX+3izCMFhVHQWOAiwvL9fq6uo8n176uZPk38/n92bxrcQTwO6J7V3jfZK2qVmEYQX44/G3E28Fnq6q9jFC0vYx9aNEkruA64ArkpwB/hr4BYCq+hRwHLgRWAOeAf50q4aVNB9Tw1BVh6bcX8D7ZzaRpIXzykdJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBknNoDAk2ZfksSRrSW7d4P7XJbk3ycNJHkly4+xHlTQvU8OQ5BLgCHADsBc4lGTvumV/BRyrqrcAB4G/n/WgkuZnyBHDtcBaVZ2uqueAu4ED69YU8Mrx7cuBH8xuREnzNiQMO4HHJ7bPjPdN+ghwU5IzwHHgAxs9UJLDSVaTrJ49e/Y8xpU0D7M6+XgIuLOqdgE3Ap9N0h67qo5W1XJVLS8tLc3oqSXN2pAwPAHsntjeNd436WbgGEBVfRV4OXDFLAaUNH9DwvAgsCfJVUkuZXRycWXdmv8A3gGQ5E2MwuBnBWmbmhqGqnoeuAW4B3iU0bcPJ5PcnmT/eNmHgPck+SZwF/DuqqqtGlrS1toxZFFVHWd0UnFy320Tt08Bb5vtaJIWxSsfJTWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSYxgkNYZBUmMYJDWGQVJjGCQ1hkFSMygMSfYleSzJWpJbN1nzriSnkpxM8rnZjilpnnZMW5DkEuAI8PvAGeDBJCtVdWpizR7gL4G3VdVTSV6zVQNL2npDjhiuBdaq6nRVPQfcDRxYt+Y9wJGqegqgqp6c7ZiS5mlIGHYCj09snxnvm3Q1cHWSB5KcSLJvowdKcjjJapLVs2fPnt/EkrbcrE4+7gD2ANcBh4B/SvKq9Yuq6mhVLVfV8tLS0oyeWtKsDQnDE8Duie1d432TzgArVfWzqvoe8B1GoZC0DQ0Jw4PAniRXJbkUOAisrFvzL4yOFkhyBaOPFqdnN6akeZoahqp6HrgFuAd4FDhWVSeT3J5k/3jZPcCPk5wC7gX+oqp+vFVDS9paqaqFPPHy8nKtrq4u5LmlnxdJHqqq5Zf6e175KKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqTEMkhrDIKkxDJIawyCpMQySGsMgqRkUhiT7kjyWZC3JredY984klWR5diNKmrepYUhyCXAEuAHYCxxKsneDdZcBfw58bdZDSpqvIUcM1wJrVXW6qp4D7gYObLDuo8DHgJ/OcD5JCzAkDDuBxye2z4z3/a8k1wC7q+pL53qgJIeTrCZZPXv27EseVtJ8XPDJxyQvAz4BfGja2qo6WlXLVbW8tLR0oU8taYsMCcMTwO6J7V3jfS+4DHgzcF+S7wNvBVY8ASltX0PC8CCwJ8lVSS4FDgIrL9xZVU9X1RVVdWVVXQmcAPZX1eqWTCxpy00NQ1U9D9wC3AM8ChyrqpNJbk+yf6sHlDR/O4YsqqrjwPF1+27bZO11Fz6WpEXyykdJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQYBkmNYZDUGAZJjWGQ1BgGSY1hkNQMCkOSfUkeS7KW5NYN7v9gklNJHknylSSvn/2okuZlahiSXAIcAW4A9gKHkuxdt+xhYLmqfhP4IvA3sx5U0vwMOWK4FlirqtNV9RxwN3BgckFV3VtVz4w3TwC7ZjumpHkaEoadwOMT22fG+zZzM/Dlje5IcjjJapLVs2fPDp9S0lzN9ORjkpuAZeDjG91fVUerarmqlpeWlmb51JJmaMeANU8Auye2d433vUiS64EPA2+vqmdnM56kRRhyxPAgsCfJVUkuBQ4CK5MLkrwF+Edgf1U9OfsxJc3T1DBU1fPALcA9wKPAsao6meT2JPvHyz4O/DLwhSTfSLKyycNJ2gaGfJSgqo4Dx9ftu23i9vUznkvSAnnlo6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpMYwSGoMg6TGMEhqDIOkxjBIagyDpGZQGJLsS/JYkrUkt25w/y8m+fz4/q8luXLmk0qam6lhSHIJcAS4AdgLHEqyd92ym4GnqupXgb8DPjbrQSXNz5AjhmuBtao6XVXPAXcDB9atOQB8enz7i8A7kmR2Y0qapx0D1uwEHp/YPgP89mZrqur5JE8DrwZ+NLkoyWHg8Hjz2STfPp+hF+QK1v09F7HtNCtsr3m306wAv3Y+vzQkDDNTVUeBowBJVqtqeZ7PfyG207zbaVbYXvNup1lhNO/5/N6QjxJPALsntneN9224JskO4HLgx+czkKTFGxKGB4E9Sa5KcilwEFhZt2YF+JPx7T8C/q2qanZjSpqnqR8lxucMbgHuAS4B7qiqk0luB1aragX4Z+CzSdaAnzCKxzRHL2DuRdhO826nWWF7zbudZoXznDf+wy5pPa98lNQYBknNlodhO11OPWDWDyY5leSRJF9J8vpFzDkxzznnnVj3ziSVZGFfsw2ZNcm7xq/vySSfm/eM62aZ9l54XZJ7kzw8fj/cuIg5x7PckeTJza4Lysgnx3/LI0mumfqgVbVlP4xOVn4XeANwKfBNYO+6NX8GfGp8+yDw+a2c6QJn/T3gl8a337eoWYfOO153GXA/cAJYvlhnBfYADwO/Mt5+zcX82jI6qfe+8e29wPcXOO/vAtcA397k/huBLwMB3gp8bdpjbvURw3a6nHrqrFV1b1U9M948weiajkUZ8toCfJTR/1356TyHW2fIrO8BjlTVUwBV9eScZ5w0ZN4CXjm+fTnwgznO9+JBqu5n9G3gZg4An6mRE8Crkrz2XI+51WHY6HLqnZutqarngRcup563IbNOuplRhRdl6rzjQ8bdVfWleQ62gSGv7dXA1UkeSHIiyb65TdcNmfcjwE1JzgDHgQ/MZ7Tz8lLf2/O9JPr/iyQ3AcvA2xc9y2aSvAz4BPDuBY8y1A5GHyeuY3Qkdn+S36iq/1rkUOdwCLizqv42ye8wuo7nzVX134sebBa2+ohhO11OPWRWklwPfBjYX1XPzmm2jUyb9zLgzcB9Sb7P6LPlyoJOQA55bc8AK1X1s6r6HvAdRqFYhCHz3gwcA6iqrwIvZ/QfrC5Gg97bL7LFJ0V2AKeBq/i/kzi/vm7N+3nxycdjCzqBM2TWtzA6KbVnETO+1HnXrb+PxZ18HPLa7gM+Pb59BaND31dfxPN+GXj3+PabGJ1jyALfD1ey+cnHP+TFJx+/PvXx5jDwjYzq/13gw+N9tzP6FxdGpf0CsAZ8HXjDAl/cabP+K/CfwDfGPyuLmnXIvOvWLiwMA1/bMProcwr4FnDwYn5tGX0T8cA4Gt8A/mCBs94F/BD4GaMjr5uB9wLvnXhtj4z/lm8NeR94SbSkxisfJTWGQVJjGCQ1hkFSYxgkNYZBUmMYJDX/AwqkUdVj8DQ4AAAAAElFTkSuQmCC\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "for mode in [\"train\", \"valid\", \"test\"]:\n", - " bundle_size = 4\n", - " if mode == \"valid\":\n", - " bundle_size = bundle_size//2\n", - " if mode == \"test\":\n", - " bundle_size = bundle_size//4\n", - " yolo_data('./test_folder/data/', mode, 640, bundle_size, 1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/source_detection/yolo_model.yaml b/source_detection/yolo_model.yaml deleted file mode 100644 index 9656bfa6..00000000 --- a/source_detection/yolo_model.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# parameters -# parameters -nc: 1 # number of classes -depth_multiple: 0.33 # model depth multiple -width_multiple: 0.50 # layer channel multiple - -# anchors -anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 - -# YOLOv5 backbone -backbone: - # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, BottleneckCSP, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, BottleneckCSP, [1024, False]], # 9 - ] - -# YOLOv5 head -head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, BottleneckCSP, [512, False]], # 13 - - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) - - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) - - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) - ] - diff --git a/source_detection/yolo_settings.yaml b/source_detection/yolo_settings.yaml deleted file mode 100644 index 4e9a2ec5..00000000 --- a/source_detection/yolo_settings.yaml +++ /dev/null @@ -1,6 +0,0 @@ -train: //net/big-tank/POOL/users/pblomenkamp/radionets/yolo/data/multi/images/train - -val: //net/big-tank/POOL/users/pblomenkamp/radionets/yolo/data/multi/images/valid - -nc: 1 -names: ['gauss'] \ No newline at end of file From 0e00ad2e4c22ce1a78871ab306864e110245124a Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Tue, 30 Mar 2021 11:14:05 +0200 Subject: [PATCH 5/9] Added Mosaic Images and minor changes --- source_detection/FPN/FPNeval.py | 5 ++--- source_detection/evaluation.py | 6 +++--- source_detection/source_data.py | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/source_detection/FPN/FPNeval.py b/source_detection/FPN/FPNeval.py index 13185ca3..7764a2b0 100644 --- a/source_detection/FPN/FPNeval.py +++ b/source_detection/FPN/FPNeval.py @@ -62,11 +62,10 @@ def detect_sources(checkpoint_path, data_path, img_size, n = 0): ax1.imshow(eval_dataset[n][0].squeeze(0)) ax2.imshow(eval_dataset[n][0].squeeze(0)) - ax1.legend() - ax2.legend() -def image_detection(checkpoint, image): +def image_detection(checkpoint_path, image): image = torch.FloatTensor(image).unsqueeze(0).unsqueeze(0) + checkpoint = checkpoint_path checkpoint = torch.load(checkpoint) model = checkpoint['model'] model = model.to('cuda') diff --git a/source_detection/evaluation.py b/source_detection/evaluation.py index c1d89b0e..76993b83 100644 --- a/source_detection/evaluation.py +++ b/source_detection/evaluation.py @@ -1,13 +1,13 @@ # + -import train +import source_detection.train import torch from radionets.evaluation.utils import load_pretrained_model, eval_model import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches import h5py -import model -from model import center_to_boundary +import source_detection.model +from source_detection.model import center_to_boundary from radionets.dl_framework.data import get_bundles from tqdm import tqdm diff --git a/source_detection/source_data.py b/source_detection/source_data.py index 64a8319a..acafd46b 100644 --- a/source_detection/source_data.py +++ b/source_detection/source_data.py @@ -87,7 +87,7 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): all_labels = [] for r in range(bundle_size): grid = create_grid(img_size*10, 1) - num_objects = np.random.randint(15,40) + num_objects = np.random.randint(25,60) bboxes = np.zeros((num_objects,4)) labels = np.zeros((num_objects,1)) if num_objects == 0: From 64525b24082aeaec8f96931e37173deb362a39ff Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Tue, 11 May 2021 18:36:33 +0200 Subject: [PATCH 6/9] New Jets and Noise --- radionets/simulations/gaussians.py | 4 +- source_detection/FPN/FPN.py | 9 +- source_detection/FPN/FPNeval.py | 193 ++++++++++++++++- source_detection/FPN/FPNloss.py | 2 - source_detection/FPN/FPNtrain.py | 12 +- source_detection/evaluation.py | 180 ++++++++++++++-- source_detection/jets/jets.ipynb | 318 +++++++++++++++++++++++++++++ source_detection/model.py | 45 ++-- source_detection/source_data.py | 18 +- source_detection/train.py | 8 +- 10 files changed, 720 insertions(+), 69 deletions(-) create mode 100644 source_detection/jets/jets.ipynb diff --git a/radionets/simulations/gaussians.py b/radionets/simulations/gaussians.py index b5b55ccb..174d1eac 100644 --- a/radionets/simulations/gaussians.py +++ b/radionets/simulations/gaussians.py @@ -353,8 +353,8 @@ def create_ext_gauss_bundle(grid): # pointlike gaussians -def create_gauss(img, N, sources, source_list, img_size, diffuse = False, bboxes = False, mosaic = False): - # img = [img] +def create_gauss(img, N, sources, source_list, img_size=63, diffuse = False, bboxes = False, mosaic = False): + mos = 1 if mosaic: mos = 10 diff --git a/source_detection/FPN/FPN.py b/source_detection/FPN/FPN.py index dcfa3e43..25a3110f 100644 --- a/source_detection/FPN/FPN.py +++ b/source_detection/FPN/FPN.py @@ -12,6 +12,7 @@ import torch import torchvision import os +import matplotlib.pyplot as plt # - @@ -97,8 +98,7 @@ def forward(self, image): out = F.relu(self.conv14(out)) fmap15 = F.relu(self.conv15(out)) # (N, 1024, 19, 19) - - + base_fmaps = {'fmap7': fmap7,'fmap10': fmap10, 'fmap15':fmap15} return base_fmaps @@ -151,7 +151,6 @@ def forward(self, fmap15): return fmaps -# + class feature_pyramid(nn.Module): def __init__(self): super(feature_pyramid, self).__init__() @@ -195,8 +194,8 @@ def forward(self,fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23): p7 = self.smooth7(p7) return p7, p10, p15, p17, p19, p21, p23 - - + + # + def create_prior_boxes(): diff --git a/source_detection/FPN/FPNeval.py b/source_detection/FPN/FPNeval.py index 7764a2b0..cdddb522 100644 --- a/source_detection/FPN/FPNeval.py +++ b/source_detection/FPN/FPNeval.py @@ -11,8 +11,10 @@ from radionets.dl_framework.data import get_bundles from tqdm import tqdm -label_map = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') -color_map = ('r', 'g', 'w', 'y','brown') +class_labels = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') +color_map = ('y', 'g', 'w', 'r','brown') +label_map = {k: v for v, k in enumerate(class_labels)} +rev_label_map = {v: k for k, v in label_map.items()} def box_coord(coord, img_size): x = coord[0].item()*img_size y = coord[3].item()*img_size @@ -41,10 +43,10 @@ def detect_sources(checkpoint_path, data_path, img_size, n = 0): print(images.shape) predicted_locs, predicted_scores = model(images) predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, - min_score = 0.5, max_overlap = 0.45, top_k = 10) + min_score = 0.5, max_overlap = 0.45, top_k = 100) fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,8)) for j in range(len(eval_dataset[n][1][0])): - true_label = label_map[eval_dataset[n][2][0][j].item()] + true_label = class_labels[eval_dataset[n][2][0][j].item()] color = color_map[eval_dataset[n][2][0][j].item()] trux, truy, truw, truh = box_coord(eval_dataset[n][1][0][j],img_size) trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor=color, facecolor='none') @@ -52,7 +54,7 @@ def detect_sources(checkpoint_path, data_path, img_size, n = 0): ax1.add_patch(trurect) for k in range(len(predl[n])): - predicted_label = label_map[predl[n][k].item()] + predicted_label = class_labels[predl[n][k].item()] color = color_map[predl[n][k].item()] predx, predy, predw, predh = box_coord(predb[n][k],img_size) predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=1, edgecolor=color, @@ -61,9 +63,10 @@ def detect_sources(checkpoint_path, data_path, img_size, n = 0): ax2.add_patch(predrect) ax1.imshow(eval_dataset[n][0].squeeze(0)) - ax2.imshow(eval_dataset[n][0].squeeze(0)) - + img = ax2.imshow(eval_dataset[n][0].squeeze(0)) + fig.colorbar(img) def image_detection(checkpoint_path, image): + img_size = image.shape[0] image = torch.FloatTensor(image).unsqueeze(0).unsqueeze(0) checkpoint = checkpoint_path checkpoint = torch.load(checkpoint) @@ -74,7 +77,17 @@ def image_detection(checkpoint_path, image): image = image.to('cuda') predicted_locs, predicted_scores = model(image) predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, - min_score = 0.2, max_overlap = 0.1, top_k = 100) + min_score = 0.5, max_overlap = 0.2, top_k = 100) + #fig, ax1 = plt.subplots(1,1,figsize=(12,8)) + #for k in range(len(predl[0])): + # predicted_label = class_labels[predl[0][k].item()] + # color = color_map[predl[0][k].item()] + # predx, predy, predw, predh = box_coord(predb[0][k],img_size) + # predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=1, edgecolor=color, + # facecolor='none') + # ax1.text(predx,(predy+predh-7),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) + # ax1.add_patch(predrect) + #ax1.imshow(image[0][0].cpu()) return predb, predl def classifier_eval(arch, img_batch): @@ -82,3 +95,167 @@ def classifier_eval(arch, img_batch): pred = eval_model(img, arch) _, l = torch.max(pred, dim = 1) return l + +def mAPeval(checkpoint_path, data_path, curve = False): + data = get_bundles(data_path) + eval_dataset = FPNtrain.detect_dataset(data) + eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size = 32, + shuffle = False, + collate_fn = eval_dataset.collate_fn) + checkpoint = checkpoint_path + checkpoint = torch.load(checkpoint) + model = checkpoint['model'] + model = model.to('cuda') + model.eval() + pred_boxes = list() + pred_labels = list() + pred_scores = list() + true_boxes = list() + true_labels = list() + + with torch.no_grad(): + for i, (images, boxes, labels) in enumerate(tqdm(eval_loader)): + images = images.to('cuda') + # boxes = boxes.to('cuda') + # scores = scores.to('cuda') + predicted_locs, predicted_scores = model(images) + predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, + min_score = 0.5, max_overlap = 0.45, top_k = 10) + boxes = [boxes[b][0] for b in range(len(boxes))] + labels = [labels[l][0][0] for l in range(len(labels))] + pred_boxes.extend(predb) + pred_labels.extend(predl) + pred_scores.extend(preds) + true_boxes.extend(boxes) + true_labels.extend(labels) + + if curve: + c = calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels, curve) + return c + else: + APs, mAP = calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels, curve) + print(APs) + print('\nMean Average Precision: %.3f' %mAP) + +def calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels, curve): + assert len(pred_boxes) == len(pred_labels) == len(pred_scores) == len(true_boxes) == len( + true_labels) + + n_classes = len(label_map) + + true_images = list() + for i in range(len(true_labels)): + true_images.extend([i] * len(true_labels[i])) + true_images = torch.LongTensor(true_images).to('cuda') + true_boxes = torch.cat(true_boxes, dim=0) + true_labels = torch.cat(true_labels, dim=0) + + pred_images = list() + for i in range(len(pred_labels)): + pred_images.extend([i] * pred_labels[i].size(0)) + pred_images = torch.LongTensor(pred_images).to('cuda') + pred_boxes = torch.cat(pred_boxes, dim=0) + pred_labels = torch.cat(pred_labels, dim=0) + pred_scores = torch.cat(pred_scores, dim=0) + + average_precisions = torch.zeros((n_classes - 1), dtype=torch.float) + + #BEWARE BELOW + for c in range(0, n_classes-1): + true_class_images = true_images[true_labels == c] + true_class_boxes = true_boxes[true_labels == c] + + # Keep track of which true objects with this class have already been 'detected' + # So far, none + true_class_boxes_detected = torch.zeros((true_class_boxes.size(0)), dtype=torch.uint8).to( + 'cuda') # (n_class_objects) + + # Extract only detections with this class + pred_class_images = pred_images[pred_labels == c] # (n_class_detections) + pred_class_boxes = pred_boxes[pred_labels == c] # (n_class_detections, 4) + pred_class_scores = pred_scores[pred_labels == c] # (n_class_detections) + n_class_detections = pred_class_boxes.size(0) + if n_class_detections == 0: + continue + + # Sort detections in decreasing order of confidence/scores + pred_class_scores, sort_ind = torch.sort(pred_class_scores, dim=0, descending=True) # (n_class_detections) + pred_class_images = pred_class_images[sort_ind] # (n_class_detections) + pred_class_boxes = pred_class_boxes[sort_ind] # (n_class_detections, 4) + + # In the order of decreasing scores, check if true or false positive + true_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') # (n_class_detections) + false_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') # (n_class_detections) + for d in range(n_class_detections): + this_detection_box = pred_class_boxes[d].unsqueeze(0) # (1, 4) + this_image = pred_class_images[d] # (), scalar + + # Find objects in the same image with this class, their difficulties, and whether they have been detected before + object_boxes = true_class_boxes[true_class_images == this_image] # (n_class_objects_in_img) + # If no such object in this image, then the detection is a false positive + if object_boxes.size(0) == 0: + false_positives[d] = 1 + continue + + # Find maximum overlap of this detection with objects in this image of this class + overlaps = FPN.jaccard(this_detection_box, object_boxes) # (1, n_class_objects_in_img) + max_overlap, ind = torch.max(overlaps.squeeze(0), dim=0) # (), () - scalars + + # 'ind' is the index of the object in these image-level tensors 'object_boxes', 'object_difficulties' + # In the original class-level tensors 'true_class_boxes', etc., 'ind' corresponds to object with index... + original_ind = torch.LongTensor(range(true_class_boxes.size(0)))[true_class_images == this_image][ind] + # We need 'original_ind' to update 'true_class_boxes_detected' + + # If the maximum overlap is greater than the threshold of 0.5, it's a match + if max_overlap.item() > 0.5: + if true_class_boxes_detected[original_ind] == 0: + true_positives[d] = 1 + true_class_boxes_detected[original_ind] = 1 # this object has now been detected/accounted for + # Otherwise, it's a false positive (since this object is already accounted for) + else: + false_positives[d] = 1 + # Otherwise, the detection occurs in a different location than the actual object, and is a false positive + else: + false_positives[d] = 1 + + # Compute cumulative precision and recall at each detection in the order of decreasing scores + cumul_true_positives = torch.cumsum(true_positives, dim=0) # (n_class_detections) + cumul_false_positives = torch.cumsum(false_positives, dim=0) # (n_class_detections) + cumul_precision = cumul_true_positives / ( + cumul_true_positives + cumul_false_positives + 1e-10) # (n_class_detections) + cumul_recall = cumul_true_positives / len(true_class_images) # (n_class_detections) + if curve: + c_recall_thresholds = torch.arange(start=0, end=1.1, step=.01).tolist() # (11) + c_precisions = torch.zeros((len(c_recall_thresholds)), dtype=torch.float).to('cuda') # (11) + for i, t in enumerate(c_recall_thresholds): + c_recalls_above_t = cumul_recall >= t + if c_recalls_above_t.any(): + c_precisions[i] = cumul_precision[c_recalls_above_t].max() + else: + c_precisions[i] = 0. + print(c) + return c_precisions + print(c) + # Find the mean of the maximum of the precisions corresponding to recalls above the threshold 't' + recall_thresholds = torch.arange(start=0, end=1.1, step=.1).tolist() # (11) + precisions = torch.zeros((len(recall_thresholds)), dtype=torch.float).to('cuda') # (11) + for i, t in enumerate(recall_thresholds): + recalls_above_t = cumul_recall >= t + if recalls_above_t.any(): + precisions[i] = cumul_precision[recalls_above_t].max() + else: + precisions[i] = 0. + average_precisions[c] = precisions.mean() # c is in [1, n_classes - 1] + print(precisions) + # Calculate Mean Average Precision (mAP) + + + mean_average_precision = average_precisions.mean().item() + + # Keep class-wise average precisions in a dictionary + average_precisions = {rev_label_map[c]: v for c, v in enumerate(average_precisions.tolist())} + + return average_precisions, mean_average_precision +# - + + diff --git a/source_detection/FPN/FPNloss.py b/source_detection/FPN/FPNloss.py index f76fd154..2f8a0c16 100644 --- a/source_detection/FPN/FPNloss.py +++ b/source_detection/FPN/FPNloss.py @@ -41,7 +41,6 @@ def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): n_classes = predicted_scores.size(2) n_priors = self.priors_cxcy.size(0) assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) - true_locs = torch.zeros((batch_size, n_priors, 4), dtype = torch.float).to('cuda') true_classes = torch.zeros((batch_size, n_priors), dtype = torch.long).to('cuda') @@ -78,7 +77,6 @@ def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to('cuda') hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) conf_loss_hard_neg = conf_loss_neg[hard_negatives] - conf_loss = (conf_loss_hard_neg.sum()+conf_loss_pos.sum())/n_positives.sum().float() return conf_loss + self.alpha * loc_loss # - diff --git a/source_detection/FPN/FPNtrain.py b/source_detection/FPN/FPNtrain.py index 0791750c..7314f53b 100644 --- a/source_detection/FPN/FPNtrain.py +++ b/source_detection/FPN/FPNtrain.py @@ -9,14 +9,14 @@ import matplotlib.pyplot as plt path = get_bundles('//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/train/') -iterations = 120000 +iterations = 240000 n_classes = 5 #nodiff -checkpoint = None -#checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar' +#checkpoint = None +checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar' batch_size = 32 workers = 4 lr = 1e-4 -decay_lr_at = [80000,100000] +decay_lr_at = [80000,120000] decay_lr_to = 0.1 momentum = 0.9 weight_decay = 5e-4 @@ -176,7 +176,7 @@ def main(): def train(data_loader, model, loss_function, optimizer, epochs): model.train() - losses = np.zeros(939) + losses = np.zeros(1877) for i, (images, boxes, labels) in enumerate(data_loader): images = images.to('cuda') @@ -187,7 +187,7 @@ def train(data_loader, model, loss_function, optimizer, epochs): losses[i] = loss - print('i', i, 'Loss:',loss) + #print('i', i, 'Loss:',loss) optimizer.zero_grad() loss.backward() diff --git a/source_detection/evaluation.py b/source_detection/evaluation.py index 76993b83..d2594de0 100644 --- a/source_detection/evaluation.py +++ b/source_detection/evaluation.py @@ -1,16 +1,20 @@ # + -import source_detection.train +import train import torch from radionets.evaluation.utils import load_pretrained_model, eval_model import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches import h5py -import source_detection.model +import model from source_detection.model import center_to_boundary from radionets.dl_framework.data import get_bundles from tqdm import tqdm +class_labels = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') +color_map = ('y', 'g', 'w', 'r','brown') +label_map = {k: v for v, k in enumerate(class_labels)} +rev_label_map = {v: k for k, v in label_map.items()} def box_coord(coord, img_size): x = coord[0].item()*img_size y = coord[3].item()*img_size @@ -39,23 +43,28 @@ def detect_sources(checkpoint_path, data_path, img_size, n = 0): print(images.shape) predicted_locs, predicted_scores = model(images) predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, - min_score = 0.2, max_overlap = 0.45, top_k = 10) - fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,8)) + min_score = 0.5, max_overlap = 0.45, top_k = 10) + fig, (ax2) = plt.subplots(1,1,figsize=(12,8)) for j in range(len(eval_dataset[n][1][0])): + true_label = class_labels[eval_dataset[n][2][0][j].item()] + color = color_map[eval_dataset[n][2][0][j].item()] trux, truy, truw, truh = box_coord(eval_dataset[n][1][0][j],img_size) - trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor='g', facecolor='none', label = eval_dataset[n][2][0][j].item()) - ax1.add_patch(trurect) - print(preds[n]) + trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor=color, facecolor='none') + #ax1.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) + #ax1.add_patch(trurect) + for k in range(len(predl[n])): + predicted_label = class_labels[predl[n][k].item()] + color = color_map[predl[n][k].item()] predx, predy, predw, predh = box_coord(predb[n][k],img_size) - predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=1, edgecolor='r', facecolor='none', label = predl[n][k].item()) + predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=1, edgecolor=color, + facecolor='none') + #ax2.text(predx,(predy+predh-7),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) ax2.add_patch(predrect) - - ax1.imshow(eval_dataset[n][0].squeeze(0)) - ax2.imshow(eval_dataset[n][0].squeeze(0)) - ax1.legend() - ax2.legend() + #ax1.imshow(eval_dataset[n][0].squeeze(0)) + img = ax2.imshow(eval_dataset[n][0].squeeze(0)) + fig.colorbar(img) def image_detection(checkpoint, image): image = torch.FloatTensor(image).unsqueeze(0).unsqueeze(0) checkpoint = torch.load(checkpoint) @@ -74,3 +83,148 @@ def classifier_eval(arch, img_batch): pred = eval_model(img, arch) _, l = torch.max(pred, dim = 1) return l + +def mAPeval(checkpoint_path, data_path): + data = get_bundles(data_path) + eval_dataset = train.detect_dataset(data) + eval_loader = torch.utils.data.DataLoader(eval_dataset, batch_size = 32, + shuffle = False, + collate_fn = eval_dataset.collate_fn) + checkpoint = checkpoint_path + checkpoint = torch.load(checkpoint) + model = checkpoint['model'] + model = model.to('cuda') + model.eval() + pred_boxes = list() + pred_labels = list() + pred_scores = list() + true_boxes = list() + true_labels = list() + + with torch.no_grad(): + for i, (images, boxes, labels) in enumerate(tqdm(eval_loader)): + images = images.to('cuda') + # boxes = boxes.to('cuda') + # scores = scores.to('cuda') + predicted_locs, predicted_scores = model(images) + predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, + min_score = 0.5, max_overlap = 0.45, top_k = 10) + boxes = [boxes[b][0] for b in range(len(boxes))] + labels = [labels[l][0][0] for l in range(len(labels))] + pred_boxes.extend(predb) + pred_labels.extend(predl) + pred_scores.extend(preds) + true_boxes.extend(boxes) + true_labels.extend(labels) + + APs, mAP = calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels) + print(APs) + print('\nMean Average Precision: %.3f' %mAP) + +def calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels): + assert len(pred_boxes) == len(pred_labels) == len(pred_scores) == len(true_boxes) == len( + true_labels) + + n_classes = len(label_map) + + true_images = list() + for i in range(len(true_labels)): + true_images.extend([i] * len(true_labels[i])) + true_images = torch.LongTensor(true_images).to('cuda') + true_boxes = torch.cat(true_boxes, dim=0) + true_labels = torch.cat(true_labels, dim=0) + + pred_images = list() + for i in range(len(pred_labels)): + pred_images.extend([i] * pred_labels[i].size(0)) + pred_images = torch.LongTensor(pred_images).to('cuda') + pred_boxes = torch.cat(pred_boxes, dim=0) + pred_labels = torch.cat(pred_labels, dim=0) + pred_scores = torch.cat(pred_scores, dim=0) + + average_precisions = torch.zeros((n_classes - 1), dtype=torch.float) + + #BEWARE BELOW + for c in range(0, n_classes-1): + print(c) + true_class_images = true_images[true_labels == c] + true_class_boxes = true_boxes[true_labels == c] + + # Keep track of which true objects with this class have already been 'detected' + # So far, none + true_class_boxes_detected = torch.zeros((true_class_boxes.size(0)), dtype=torch.uint8).to( + 'cuda') # (n_class_objects) + + # Extract only detections with this class + pred_class_images = pred_images[pred_labels == c] # (n_class_detections) + pred_class_boxes = pred_boxes[pred_labels == c] # (n_class_detections, 4) + pred_class_scores = pred_scores[pred_labels == c] # (n_class_detections) + n_class_detections = pred_class_boxes.size(0) + if n_class_detections == 0: + continue + + # Sort detections in decreasing order of confidence/scores + pred_class_scores, sort_ind = torch.sort(pred_class_scores, dim=0, descending=True) # (n_class_detections) + pred_class_images = pred_class_images[sort_ind] # (n_class_detections) + pred_class_boxes = pred_class_boxes[sort_ind] # (n_class_detections, 4) + + # In the order of decreasing scores, check if true or false positive + true_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') # (n_class_detections) + false_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') # (n_class_detections) + for d in range(n_class_detections): + this_detection_box = pred_class_boxes[d].unsqueeze(0) # (1, 4) + this_image = pred_class_images[d] # (), scalar + + # Find objects in the same image with this class, their difficulties, and whether they have been detected before + object_boxes = true_class_boxes[true_class_images == this_image] # (n_class_objects_in_img) + # If no such object in this image, then the detection is a false positive + if object_boxes.size(0) == 0: + false_positives[d] = 1 + continue + + # Find maximum overlap of this detection with objects in this image of this class + overlaps = model.jaccard(this_detection_box, object_boxes) # (1, n_class_objects_in_img) + max_overlap, ind = torch.max(overlaps.squeeze(0), dim=0) # (), () - scalars + + # 'ind' is the index of the object in these image-level tensors 'object_boxes', 'object_difficulties' + # In the original class-level tensors 'true_class_boxes', etc., 'ind' corresponds to object with index... + original_ind = torch.LongTensor(range(true_class_boxes.size(0)))[true_class_images == this_image][ind] + # We need 'original_ind' to update 'true_class_boxes_detected' + + # If the maximum overlap is greater than the threshold of 0.5, it's a match + if max_overlap.item() > 0.5: + if true_class_boxes_detected[original_ind] == 0: + true_positives[d] = 1 + true_class_boxes_detected[original_ind] = 1 # this object has now been detected/accounted for + # Otherwise, it's a false positive (since this object is already accounted for) + else: + false_positives[d] = 1 + # Otherwise, the detection occurs in a different location than the actual object, and is a false positive + else: + false_positives[d] = 1 + + # Compute cumulative precision and recall at each detection in the order of decreasing scores + cumul_true_positives = torch.cumsum(true_positives, dim=0) # (n_class_detections) + cumul_false_positives = torch.cumsum(false_positives, dim=0) # (n_class_detections) + cumul_precision = cumul_true_positives / ( + cumul_true_positives + cumul_false_positives + 1e-10) # (n_class_detections) + cumul_recall = cumul_true_positives / len(true_class_images) # (n_class_detections) + + # Find the mean of the maximum of the precisions corresponding to recalls above the threshold 't' + recall_thresholds = torch.arange(start=0, end=1.1, step=.1).tolist() # (11) + precisions = torch.zeros((len(recall_thresholds)), dtype=torch.float).to('cuda') # (11) + for i, t in enumerate(recall_thresholds): + recalls_above_t = cumul_recall >= t + if recalls_above_t.any(): + precisions[i] = cumul_precision[recalls_above_t].max() + else: + precisions[i] = 0. + average_precisions[c] = precisions.mean() # c is in [1, n_classes - 1] + + # Calculate Mean Average Precision (mAP) + mean_average_precision = average_precisions.mean().item() + + # Keep class-wise average precisions in a dictionary + average_precisions = {rev_label_map[c]: v for c, v in enumerate(average_precisions.tolist())} + + return average_precisions, mean_average_precision diff --git a/source_detection/jets/jets.ipynb b/source_detection/jets/jets.ipynb new file mode 100644 index 00000000..e9bc7c25 --- /dev/null +++ b/source_detection/jets/jets.ipynb @@ -0,0 +1,318 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from radionets.simulations.gaussians import create_grid, gaussian_source\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from matplotlib.pyplot import figure" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(1, 3, 300, 300)\n" + ] + } + ], + "source": [ + "g = create_grid(300,1)\n", + "print(g.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def create_gauss(img, N, sources, source_list, img_size=63, diffuse = False, bboxes = False, mosaic = False):\n", + "\n", + " mos = 1\n", + " if mosaic:\n", + " mos = 10\n", + " mx = np.random.randint(1, img_size*mos, size=(N, sources))\n", + " my = np.random.randint(1, img_size*mos, size=(N, sources))\n", + "\n", + " if diffuse:\n", + " amp = (\n", + " np.random.randint(30, 40, size=(N))# * 1 / 10 * np.random.randint(3, 5)\n", + " ) #/ 1e2\n", + " sx = np.random.randint((img_size**2)/200, (img_size**2)/100, size=(N, sources))*10\n", + " sy = sx\n", + " else: \n", + " amp = (\n", + " np.random.randint(50, 100, size=(N))# * 1 / 10 * np.random.randint(5, 10)\n", + " )# / 1e2\n", + " sx = np.random.randint((img_size**2)/720, (img_size**2)/360, size=(N, sources))\n", + " sy = sx\n", + " # Doesnt work properly right now\n", + " #if spherical:\n", + " # sx = np.random.randint(3, 8, size=(N, sources))\n", + " # sy = sx\n", + " #else:\n", + " # sx = np.random.randint(1, 15, size=(N, sources))\n", + " # sy = np.random.randint(1, 15, size=(N, sources))\n", + " # theta = np.random.randint(0, 360, size=(N, sources))\n", + "\n", + " s = np.zeros((N, sources, 1)) # changed from 5\n", + " for i in range(N):\n", + " for j in range(sources):\n", + " g = gauss(img_size*mos, mx[i, j], my[i, j], sx[i, j], sy[i, j], amp[i]) #DPG\n", + " # s[i,j] = np.array([mx[i,j],my[i,j],sx[i,j],sy[i,j],amp[i]])\n", + " s[i, j] = np.array([mx[i, j]])\n", + " #if spherical:\n", + " img[i] += g\n", + " #else:\n", + " # # rotation around center of the source\n", + " # padX = [g.shape[0] - mx[i, j], mx[i, j]]\n", + " # padY = [g.shape[1] - my[i, j], my[i, j]]\n", + " # imgP = np.pad(g, [padY, padX], \"constant\")\n", + " # imgR = ndimage.rotate(imgP, theta[i, j], reshape=False)\n", + " # imgC = imgR[padY[0] : -padY[1], padX[0] : -padX[1]]\n", + " # img[i] += imgC\n", + " if source_list:\n", + " return img, s\n", + " elif bboxes:\n", + " return img/amp, [mx[0][0],my[0][0]], [sx[0][0],sy[0][0]] \n", + " else:\n", + " return img\n", + " \n", + "def gauss(img_size, mx, my, sx, sy, amp=0.01):\n", + " x = np.arange(img_size)[None].astype(np.float)\n", + " y = x.T\n", + " return amp * np.exp(-((y - my) ** 2) / sy).dot(np.exp(-((x - mx) ** 2) / sx))" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "a = create_gauss(g,1,1, False, img_size = 300)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAAD8CAYAAAB3lxGOAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAAATbUlEQVR4nO3dXYxcZ33H8e/Pu2t7/W7HYTG2SwK4qsKNidw0UiNELwpJbhxukKlUrArJXCQSSPTCwEVz26qAhEojGRFhKkoaCVB8QVuChURvCLGj4FdCDDjY1trGXmyv33a9u/9ePM9hjifjfZuZnSXP7yMdnTPPnJn5+yTnt895zjO7igjMrFxLel2AmfWWQ8CscA4Bs8I5BMwK5xAwK5xDwKxwXQsBSY9LekPSKUl7u/U5ZtYedWOegKQ+4FfA3wJngVeBT0bEiY5/mJm1pVs9gUeAUxHxm4gYB14Adnbps8ysDf1det/NwJna47PAX91rZ0metmh/clRb15cleelrWvqb1n1AXx8sWZJeKEFEWqYmYWoKJoDJvExw9+NqmQIir8nb1dLkUkTc39zYrRCYkaQ9wJ5efb7ZfFQnebWuTuoBYCmwHFgBrAZWAeuBDXl5F7ARuA/YuBrW3gdrNsLKNbB0MIXBxB0YuwU3rsK1y/CHC3B5HC4Bvwcuk7ZHgGt5uQHcyss4KSjukEKgCgnS47da/Zu6FQLngK21x1ty2x9FxD5gH7gnYH8a1OJxPRT687I0L4M0AmENKRDuA4bWwbveC+vvh1VDwDpgWXqjpZOw4gasH4Gr62HZIOh3wO3GyX07L+PAWF7qPY7JXNNkrb4qCFrpVgi8CmyT9CDp5N8F/F2XPsusZ6oAqHf5+0k9gkFgJalHsI7UG9i4Gu7fCu9+AAY2AUPAhvfBivthST9M3Ibr5+HSOdauSZcLMQXxO7g9nk7+m6Sf/rdJP/0H8jJO41KkCqjZ6EoIRMSEpGeA/yUdm+cj4ng3PsusF6qTrb5dhUG9R7CcRhCsJl0CbBiCgSFg81J4z1/Chg/Aio3QvzyFwLWzMHgSlh5h1SRsGIPbt+DqufReK/L7LqMRAH3M/eSvdG1MICJ+CPywW+9v1gvViVapDwhWA37V+EAVAtVlwaq1aQxg1RCwiRQAWx6FjX8Bg/fBwCCMjcLa98LSlekDbhxh7TW4fgVWDcOKqbsDoJ9GCDUv1eDg5Az/pp4NDJq9k9R7AtWAYfVTeinppB1clQYBWU26BNjwgcay6t2pJzA2CstWpx7BrSswcoq+NTcZXAWDq2H51fR+VQBUPY96b2SuHAJmHVYfLKzGCgaAgQHor0YMl69PlwArh1IArBxKPYGBlTA1ASvPw8qNsPJ+GHyLpcvz62l0/+uXAM0TfprDoBoobMUhYNaG+k/g5kG5t20vgf7qDO5fnpa+gbQeGEzrqYm8vQz6lud1mkOgJXfPTaiv2+EQMGvDFHdP1qm2o2l7gjTKP3En3QZkbDQtd26lddUDGM/t4zfgzvW8hsnJ9Prmz4taHfPlEDDroPpg3CTpvv4dUgjcvpkmAq0YBW5cSMu1s2kMYGqicYvw2tm0jJ6H68NwC8ZvpTsE1XtVk4CmmrbrddT1Yp6A2TtW8wlW/VSerK2rab7VhJ5bwK0baZR/7QgsGbkAg0fTXYDJ27Bq090hcOEoXP4ljExxeyS97taNxqzAce4OhPpU4eqEn23vwCFgNgdTNAbh6t3xehDcoXHyV5N7bgLXb8DoCFxdB+vPA/0n0ol/60oaBFR/CoTR8ykAzv8GLsLV36fXXacxPXiMxgzCKnRaBcBsgsAhYDYPU6TxveYuefUln2o6723SyXsNuAIsH4aly9JMwDUTwI0jsOEUDK5Pg4ATY+kSYGQKLsKl38HlYRi5BFeBURrfFaimDte/VHSPLw5NyyFgNk/VyTZJY85+fRzgFumEXU5j0lD/RP4uADA+BmuvwcDlm7DsZupiTKQX3r6SegCXh+H3Z9IXhq6QwqTqEYzRuCyo9wjqA5Kz4RAwm6Xg7bfkWg0EVtfs1dz+66TJQsvJlxLjEKcbYwQr16T5A3196S7A7Rtw81q6BBi5lAKgCoGr3H1ZUPUEpnj7nQOatu/FIWA2R9W4wBSNeQBVEFSXA7fzPs3z+qvr+LEJuDYMK4fTTMD+gfRV4qmp1EO4eSP1Iq6STv4rpCCoLglGaYTAGK17Ah4YNFsAzT2BSdKJWc3ou0kjBKpZe9WYwXXyl4FG0z5VmFSXEjdpjCdcpfWYQDVAONOYwHSXBg4BszmoLgla9Qaqy4HmXzxSndxTNC4XbpFO7kHSdwH6au9X9STGSCf89byM1h5Xz1eXA81zB+bSG3AImLWp+dt6zV8zrs8dqAKgGjCsvg1Y7wlUPYWqR1DtX+8BVOvmQcFWvYGZBggdAmZzdK/eQOVO7fFt7v6pXJ9DUH3duJ+7fwNQ837VT/36JUD99mDzhCFPFjJbYPVeQHVZMM7dtxCrsYDq14MtoxEA1aUATfvX7zRUgdA8W7C5F9Dq7sBMHAJm89DcG6jaWn1dd6r23B1S938sr5fQ6AlU79E89bg64atBwHpb/eSvz2CcC4eA2Ty1uiyof2+/3j2fJJ301Qlc/8Wgzb8PoB4aVXBUlwhVMLT66d/8jUJPFjJbANMFQf2krGYU1k/+5t83QG3/Vrcem7+gVP+V4vMNAHAImLWtVRBU7dAY9a9O2urXj9VvIbZ6z+qEn2m7nQAAh4BZR7QaI6hOyvo8geqkr/4mANzdC6g0T/udbt28/1w5BMw6pDkImkfpq5O//tO/HgBVWFRaBQEttuufPx8OAbMOqgcBvL1XUP/LQNX+rbQ6uZt/2nciAMAhYNZx9bGA5jCoB0B932bNE31a9RCme/1cOATMuqT+1eP6ids8MWg2pguFdjkEzLqofrK2CoR23q9THAJmC6RVIMzlNd3SVghIOk36huMkMBEROyRtAP4LeAA4DXwiIv7QXplm7ywLcXLPVqtblHP1NxGxPSJ25Md7gYMRsQ04mB+b2SLViRBothPYn7f3A0914TPMrEPaDYEAfiTpsKQ9uW0oIobz9nlgqM3PMLMuandg8LGIOCfpXcDLkn5ZfzIiQlLLy58cGntaPWdmC6etnkBEnMvri8APgEeAC5I2AeT1xXu8dl9E7KiNJZhZD8w7BCStlLS62gY+ChwDDgC78267gZfaLdLMuqedy4Eh4AeSqvf5z4j4H0mvAi9K+jTwFvCJ9ss0s25RRO/vWN5r3MDMOupwq8vvbtwiNLM/IQ4Bs8I5BMwK5xAwK5xDwKxwDgGzwjkEzArnEDArnEPArHAOAbPCOQTMCucQMCucQ8CscA4Bs8I5BMwK5xAwK5xDwKxwDgGzwjkEzArnEDArnEPArHAOAbPCOQTMCucQMCucQ8CscA4Bs8I5BMwK5xAwK9yMISDpeUkXJR2rtW2Q9LKkN/N6fW6XpK9JOiXpiKSHu1m8mbVvNj2BbwGPN7XtBQ5GxDbgYH4M8ASwLS97gOc6U6aZdcuMIRARPwVGmpp3Avvz9n7gqVr7tyP5GbBO0qYO1WpmXTDfMYGhiBjO2+eBoby9GThT2+9sbnsbSXskHZJ0aJ41mFkH9Lf7BhERkmIer9sH7AOYz+vNrDPm2xO4UHXz8/pibj8HbK3ttyW3mdkiNd8QOADsztu7gZdq7Z/KdwkeBa7WLhvMbDGKiGkX4LvAMHCHdI3/aeA+0l2BN4EfAxvyvgK+DvwaOArsmOn98+vCixcvXV8OtTr/lE/CnvKYgNmCOBwRO5obPWPQrHAOAbPCOQTMCucQMCucQ8CscA4Bs8I5BMwK5xAwK5xDwKxwDgGzwjkEzArnEDArnEPArHAOAbPCOQTMCucQMCucQ8CscA4Bs8I5BMwK5xAwK5xDwKxwDgGzwjkEzArnEDArnEPArHAOAbPCOQTMCjdjCEh6XtJFScdqbc9KOifp9bw8WXvuC5JOSXpD0se6VbiZdcZsegLfAh5v0f7ViNielx8CSHoI2AV8ML/m3yX1dapYM+u8GUMgIn4KjMzy/XYCL0TEWET8FjgFPNJGfWbWZe2MCTwj6Ui+XFif2zYDZ2r7nM1tbyNpj6RDkg61UYOZtWm+IfAc8H5gOzAMfHmubxAR+yJiR6u/l25mC2deIRARFyJiMiKmgG/Q6PKfA7bWdt2S28xskZpXCEjaVHv4caC6c3AA2CVpmaQHgW3Az9sr0cy6qX+mHSR9F/gIsFHSWeCfgI9I2g4EcBr4DEBEHJf0InACmACejojJrlRuZh2hiOh1DUjqfRFm73yHW43BecagWeEcAmaFcwiYFc4hYFY4h4BZ4RwCZoVzCJgVziFgVjiHgFnhHAJmhXMImBXOIWBWOIeAWeEcAmaFcwiYFc4hYFY4h4BZ4RwCZoVzCJgVziFgVjiHgFnhHAJmhXMImBXOIWBWOIeAWeEcAmaFcwiYFW7GEJC0VdJPJJ2QdFzSZ3P7BkkvS3ozr9fndkn6mqRTko5Ierjb/wgzm7/Z9AQmgM9HxEPAo8DTkh4C9gIHI2IbcDA/BniC9CfJtwF7gOc6XrWZdcyMIRARwxHxWt4eBU4Cm4GdwP68237gqby9E/h2JD8D1kna1OnCzawz5jQmIOkB4EPAK8BQRAznp84DQ3l7M3Cm9rKzuc3MFqH+2e4oaRXwPeBzEXFN0h+fi4iQFHP5YEl7SJcLZtZDs+oJSBogBcB3IuL7uflC1c3P64u5/RywtfbyLbntLhGxLyJ2RMSO+RZvZu2bzd0BAd8ETkbEV2pPHQB25+3dwEu19k/luwSPAldrlw1mtsgoYvpevKTHgP8DjgJTufmLpHGBF4E/A94CPhERIzk0/g14HLgJ/ENEHJrhM+Z0KWFm83K4Vc97xhBYCA4BswXRMgQ8Y9CscA4Bs8I5BMwK5xAwK5xDwKxwDgGzwjkEzArnEDArnEPArHAOAbPCOQTMCucQMCucQ8CscA4Bs8I5BMwK5xAwK5xDwKxwDgGzwjkEzArnEDArnEPArHAOAbPCOQTMCucQMCucQ8CscA4Bs8I5BMwKN5u/SrxV0k8knZB0XNJnc/uzks5Jej0vT9Ze8wVJpyS9Ielj3fwHmFl7+mexzwTw+Yh4TdJq4LCkl/NzX42If63vLOkhYBfwQeA9wI8l/XlETHaycDPrjBl7AhExHBGv5e1R4CSweZqX7AReiIixiPgtcAp4pBPFmlnnzWlMQNIDwIeAV3LTM5KOSHpe0vrcthk4U3vZWaYPDTProVmHgKRVwPeAz0XENeA54P3AdmAY+PJcPljSHkmHJB2ay+vMrLNmFQKSBkgB8J2I+D5ARFyIiMmImAK+QaPLfw7YWnv5ltx2l4jYFxE7ImJHO/8AM2vPbO4OCPgmcDIivlJr31Tb7ePAsbx9ANglaZmkB4FtwM87V7KZddJs7g78NfD3wFFJr+e2LwKflLQdCOA08BmAiDgu6UXgBOnOwtO+M2C2eCkiel0DknpfhNk73+FWl9+eMWhWOIeAWeEcAmaFcwiYFc4hYFY4h4BZ4RwCZoVzCJgVziFgVjiHgFnhHAJmhXMImBXOIWBWOIeAWeEcAmaFcwiYFc4hYFY4h4BZ4RwCZoVzCJgVziFgVjiHgFnhHAJmhXMImBXOIWBWOIeAWeEcAmaFcwiYFc4hYFY4h4BZ4RwCZoXr73UB2SXgRl4vFhtxPdNZbPXA4qtpsdXz3laNioiFLqQlSYciYkev66i4nukttnpg8dW02Oq5F18OmBXOIWBWuMUUAvt6XUAT1zO9xVYPLL6aFls9LS2aMQEz643F1BMwsx7oeQhIelzSG5JOSdrboxpOSzoq6XVJh3LbBkkvS3ozr9d3uYbnJV2UdKzW1rIGJV/Lx+yIpIcXqJ5nJZ3Lx+l1SU/WnvtCrucNSR/rQj1bJf1E0glJxyV9Nrf35BhNU0/PjtG8RUTPFqAP+DXwPmAp8AvgoR7UcRrY2NT2L8DevL0X+Ocu1/Bh4GHg2Ew1AE8C/w0IeBR4ZYHqeRb4xxb7PpT/2y0DHsz/Tfs6XM8m4OG8vRr4Vf7cnhyjaerp2TGa79LrnsAjwKmI+E1EjAMvADt7XFNlJ7A/b+8Hnurmh0XET4GRWdawE/h2JD8D1knatAD13MtO4IWIGIuI3wKnSP9tO1nPcES8lrdHgZPAZnp0jKap5166fozmq9chsBk4U3t8lukPZLcE8CNJhyXtyW1DETGct88DQz2o61419PK4PZO718/XLpEWtB5JDwAfAl5hERyjpnpgERyjueh1CCwWj0XEw8ATwNOSPlx/MlJ/rqe3URZDDcBzwPuB7cAw8OWFLkDSKuB7wOci4lr9uV4coxb19PwYzVWvQ+AcsLX2eEtuW1ARcS6vLwI/IHXTLlTdx7y+uNB1TVNDT45bRFyIiMmImAK+QaM7uyD1SBognXDfiYjv5+aeHaNW9fT6GM1Hr0PgVWCbpAclLQV2AQcWsgBJKyWtrraBjwLHch278267gZcWsq7sXjUcAD6VR8AfBa7WusRd03RN/XHScarq2SVpmaQHgW3Azzv82QK+CZyMiK/UnurJMbpXPb08RvPW65FJ0ijur0ijpV/qwee/jzRq+wvgeFUDcB9wEHgT+DGwoct1fJfUfbxDul789L1qII14fz0fs6PAjgWq5z/y5x0h/U+9qbb/l3I9bwBPdKGex0hd/SPA63l5slfHaJp6enaM5rt4xqBZ4Xp9OWBmPeYQMCucQ8CscA4Bs8I5BMwK5xAwK5xDwKxwDgGzwv0/wZNsuIAPDsoAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plt.imshow(a[0][0],cmap = 'gist_heat')" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def create_jet(img, bboxes = False):\n", + " \n", + " img_size = img.shape[2]\n", + " \n", + " center = img_size//2\n", + " theta = np.random.randint(0, 2*np.pi)\n", + " comps = np.random.randint(3, 6)\n", + " \n", + " coord = []\n", + " x = np.zeros(comps)\n", + " y = np.zeros(comps)\n", + " amp = np.zeros(comps)\n", + " sx = np.zeros(comps)\n", + " sy = np.zeros(comps)\n", + " rot_mat = np.array(\n", + " [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]\n", + " )\n", + " base_amp = np.random.randint(50, 100)\n", + " \n", + " sides = np.random.randint(1, 2)\n", + " for i in range(comps):\n", + " coord.append(np.array([2*i*img_size*0.05+np.random.randint(-0.01*img_size, 0.01*img_size),0]))\n", + " c[0][0] = 0\n", + " x[i], y[i] = (coord[i] @ rot_mat)+center\n", + " amp[i] = (\n", + " base_amp/(0.5*i**(1.09)+1))\n", + " sx[i] = np.random.randint((img_size**2)/720, (img_size**2)/360)*(0.5*i+1)\n", + " sy[i] = np.random.randint((img_size**2)/720, (img_size**2)/360)*(0.5*i+1)\n", + " if sides == 1:\n", + " print('ay caramba')\n", + " comps += comps - 1\n", + " amp = np.append(amp, amp[1:])\n", + " x = np.append(x, -x[1:]+img_size)\n", + " y = np.append(y, -y[1:]+img_size)\n", + " sx = np.append(sx, sx[1:])\n", + " sy = np.append(sy, sy[1:])\n", + " print(x) \n", + " x_curve = np.zeros(comps)\n", + " y_curve = np.zeros(comps) \n", + " for i in range(comps):\n", + " if i!=0:\n", + " x_curve[i] = np.random.randint(x_curve[i-1],0.05*img_size+x_curve[i-1])\n", + " y_curve[i] = np.random.randint(y_curve[i-1],0.05*img_size+y_curve[i-1])\n", + " x[i]+=x_curve[i]\n", + " y[i]+=y_curve[i]\n", + " g = gauss(img_size, x[i], y[i], sx[i], sy[i], amp[i])\n", + " img += g\n", + " #print(amp[i])\n", + " #print(coord[i])\n", + " return img" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ay caramba\n", + "[151.97998499 120.3002251 92.5804352 62.8806603 29.22091541\n", + " 179.6997749 207.4195648 237.1193397 270.77908459]\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA44AAAOHCAYAAABo+B1cAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz9Taw037ffB62qPuf5/f7++8p2FLCMbSkMzAAYRMiyGDAwA1BASA4TK5nEAovLgCgTJoZJkKJIDHiREFKki7DiDJLgSRQLRYTgSSZE+HqCnIgXC2zlWo6dwOW+/n7Pc7qrGXSv7tXfs9baa9dLd5/zfD/S0anee+2Xqq6q3t+9Vu0ajsejEEIIIYQQQgghEeOjO0AIIYQQQggh5LmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKZsJx2EY/olhGP7vwzD87WEY/tJW7RBCCCGEEEII2ZZhi/c4DsOwE5H/h4j810TkN0Tkb4jIP308Hv+D1RsjhBBCCCGEELIpLxvV+2dE5G8fj8f/l4jIMAz/uoj8ORFxheMwDOurV0IIIYQQQgghPfwnx+PxP+VlbBWq+sdF5D80n3/jnHZhGIZfHYbh14dh+PWN+kAIIYQQQgghpM7fjTK28jg2OR6PvyYivyZCjyMhhBBCCCGEPDNbeRz/noj8SfP5T5zTCCGEEEIIIYR8MLYSjn9DRP7UMAz/2WEYvojIPyUif22jtgghhBBCCCGEbMgmoarH43E/DMM/KyL/tojsROQvH4/Hf3+LtgghhBBCCCGEbMsmr+Po7gSfcSSEEEIIIYSQR/M3j8fjn/YytgpVJYQQQgghhBDySaBwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkvCwpPAzD3xGR3xGRg4jsj8fjnx6G4R8Rkf+diPxjIvJ3ROTPH4/H31zWTUIIIYQQQgghj2INj+N/9Xg8/uPH4/FPnz//JRH568fj8U+JyF8/fyaEEEIIIYQQ8kHZIlT1z4nIXzlv/xUR+Sc3aIMQQgghhBBCyJ1YKhyPIvJ/HIbhbw7D8KvntD96PB7//nn7PxKRP+oVHIbhV4dh+PVhGH59YR8IIYQQQgghhGzIomccReS/cjwe/94wDP9pEfl3hmH4v9nM4/F4HIbh6BU8Ho+/JiK/JiIS2RBCCCGEEEIIeTyLPI7H4/Hvnf//QxH5N0Tkz4jIPxiG4Y+JiJz//8OlnSSEEEIIIYQQ8jhmC8dhGH45DMOv6LaI/NdF5G+JyF8Tkb9wNvsLIvJvLu0kIYQQQgghhJDHsSRU9Y+KyL8xDIPW868ej8f/wzAMf0NE/uowDH9RRP6uiPz55d0khBBCCCGEEPIohuPx8Y8X8hlHQgghhBBCCHk4f9O8ZvGGLV7HQQghhBBCCCHkE0HhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJoXAkhBBCCCGEEJJC4UgIIYQQQgghJIXCkRBCCCGEEEJICoUjIYQQQgghhJAUCkdCCCGEEEIIISkUjoQQQgghhBBCUigcCSGEEEIIIYSkUDgSQgghhBBCCEmhcCSEEEIIIYQQkkLhSAghhBBCCCEkhcKREEIIIYQQQkgKhSMhhBBCCCGEkBQKR0IIIYQQQgghKRSOhBBCCCGEEEJSKBwJIYQQQgghhKRQOBJCCCGEEEIISaFwJIQQQgghhBCSQuFICCGEEEIIISSFwpEQQgghhBBCSAqFIyGEEEIIIYSQFApHQgghhBBCCCEpFI6EEEIIIYQQQlIoHAkhhBBCCCGEpFA4EkIIIYQQQghJaQrHYRj+8jAM/3AYhr9l0v6RYRj+nWEY/p/n/3/knD4Mw/C/Gobhbw/D8H8dhuG/tGXnCSGEEEIIIYRsT8Xj+C+LyD8BaX9JRP768Xj8UyLy18+fRUT+GyLyp85/vyoi/9I63SSEEEIIIYQQ8iiawvF4PP67IvL/heQ/JyJ/5bz9V0TknzTp/8rxxL8nIn94GIY/tlJfCSGEEEIIIYQ8gLnPOP7R4/H498/b/5GI/NHz9h8Xkf/Q2P3GOe0dwzD86jAMvz4Mw6/P7AMhhBBCCCGEkDvwsrSC4/F4HIbhOKPcr4nIr4mIzClPCCGEEEIIIeQ+zPU4/gMNQT3//4fn9L8nIn/S2P2JcxohpMGw8R8hhBBCCCFzmSsc/5qI/IXz9l8QkX/TpP8z59VV/8si8lsmpJWQ75pHCzsKSkIIIYQQMpdmqOowDP+aiPxZEflHh2H4DRH550Xkfyoif3UYhr8oIn9XRP782fzfEpH/poj8bRH5fRH572zQZ0Kejo8swFp9Zxw5IYQQQggZjsfHDwv5jCN5Zj6yKLwHvHgJIYQQQj4Nf/N4PP5pL2Px4jiEfEYoFuvosaKAJIQQQgj5vFA4ku8aCsT18I4lxSQhhBBCyOeAwpF8aigMHwvFJCGEEELI52DuqqqEPDVcMfR54fdCCCGEEPLxoMeRfGgoQj4m+L3RC0kIIYQQ8txQOJIPAQXi54ZCkhBCCCHkuWGoKnl6KBoJIYQQQgh5LPQ4kqeBApEo9EASQgghhDwXFI7kIVAkvmdr9/+0cf1bYs8XikhCCCGEkPtD4UjuzvcuGh8VH+61+xHF5CAUj4QQQggh94bCkWzK9yISP+rDwpV+P6O4pAeSEEIIIeS+UDiSVfmsQvGjCsM1iPb9WQQlRSQhhBBCyPZQOJJV+IyC8XsWixX0+DyLgCSEEEIIIdtB4Uhm8VmEIsXhcp7JI0nvIyGEEELINlA4kjIfWSxSIN4fe8wfKSIpIAkhhBBClkPhSEp8VNFIwfgcPDKslauwEkIIIYQsh8KRuHwkofjRxOGzHNtHiCn8ru4lJBnCSgghhBCyDApHcuFZBE2LZxeKH+U4Zv28l7h6RDgrPZCEEEIIIf1QOJIPIXSeWSx+hOPXyyM8dPcMZ+Xzj4QQQgghfVA4fqc8q9h5NoH4rMfpnrSOwdri655eSApIQgghhJAaFI7fIc8mhp5FLD7bcfkobOmdvJcXkuGrhBBCCCE5FI7fEc8mjJ5BMD7bMfnobCUi7yEgKR4JIYQQQmIoHD85zyKMHiUSn2H/n0EgV1hblOGxX0OUbR3GytBVQgghhBAfCsdPyjMIJpHHiKZH7vtHEYkeW78qY21RNsp2HkgKSEIIIYSQWygcPyGPFo3fg1j8yAKxirePawi1NcNZtw5hZfgqIYQQQsgJCsdPwqPE4r0F1L3283sQhnOIjstc4baWiNwyhJXeR0IIIYQQCscPz/cQlvm9iMUt93Nr0bOG528tgbZVCCu9j4QQQgj5nqFw/MA8QjR+FrH4WfajSqUfz7J4zRoCcqsQVnofCSGEEPK9QuH4wfiMYnGrffqo/X4Urf3pFUtLF9tZI4x1SwFJ8UgIIYSQ7wkKRxLyUYXXlv3+bGKxh6XetiUibqlQ2yJ8ld5HQgghhHxPUDh+ED7Dc35b7MNW/b3H8X7UM5VLBZR3bHrE09xw1qUeSHofCSGEEELmQ+H4xHx0sbh2/9fs50f0dq5FpY9LwkqViph6hIjcQkDS+0gIIYSQzw6F43fOFkLnmQWjyPP37xlYa5GbXk/kVu9jfIb2CCGEEEI+MhSOT8hHDJNcs89r9W2tPt1DGG79na/1jkSlKrh6PYNzBOszeR/Xei8lIYQQQsizQeH4RHy0V1Cs0d9nEokfIRR2LtX+VMXOHDGJfWi1tUREPlpAal8oHgkhhBDyWfiMUXbEYW1R9CxCbY2+jCv35dlEYw9L9qH3OPa00fv9zP0OnnFyhRBCCCHkGaDH8QnYcnD5mTx6awjER7Yfca/ZmzUWvBFZ11PYE9rZ64Fc4n3kwjmEEEIIIbdQOD6QZxcijxRqS9p+tEB8Vjf+Wqup9qygim1m9c8RkVsJyDUWB1qjH4QQQgghz8KzjnE/Pc8ewrZUuH1E0bg0zHStkNdHMncfqsetWvfa9fXUSQghhBBC3kOP4wPYYgC71jN6927/I7X5LJ7cNegJC0UiD1x18ZuqF7LqgZwTGvtI7yNXXiWEEELIR4TC8Y48o2D8KMLtI7Q1t71H0OpnVaiJrCckK2GsFcG79fOPUmyjAsNXCSGEEPJRoHD8wDwqLPLZPX339ig+24TAml6xnncvVrx9LUG6tI6e/lTruwfP0g9CCCGEkAgKx415NlEhcj8vXG87n6WNpe0tZW6bntDqWVm14olseSF766h4Rj9C+OrcfhBCCCGE3AsKxw/GPcMon1HIPVv9c9pYs+0Wc8WQsiQMtSKuWmKpJf4qYmtLAan18/UdhBBCCPnsUDhuyNpC4JlF4zPV/+xi8Z7PQc59F6NS9axVBGCr/JIw1jVDPefUtbZ4nNsPQgghhJCtoHDcgGcRjB9VzH20envrXrPdjOo7Ez0qoaheG5knslW+FYLaErFV72VUvqeurP4tvI8KhSQhhBBCHgWF48qsKRqfUTBuVfcj39v37B7KuVTarIaiiiwTc1n5NcvZsmuFrz565VULw1gJIYQQ8igoHFfko7yKwfJoIfbZ6pxT9yOpvlpDZLvnESvlMtHaWw7r+CjPPloYxkoIIYSQe0PhuBKPDlXcIgxz7TofUd+jBWdPvY+iFU6qqAhqeSPnhqRm3svMAzm3HJavCsisrqjuSv29VL3ChBBCCCFrQOG4Ao8UBh8hdPTedX2E8Nil7VSpvssQ6Q1LXRqS2uOFzLxtS0JYt37/45YeSGWOuCWEEEIIqUDhuJBHPtO4hWh8RpH3rGLxI4SvRm0u9a5VROQaYrBXcLbaydpqtdlbT1S3FOtfChfWIYQQQsiaUDgu4FHhqc8a8rlGPffqyyNCa+fWvTZH6Vs4p/VKj+g5yUi4VOx7Q1jXEreVsl49WV1L6l+TyjlHcUkIIYSQCArHmVA09tVzjzruJVwr9fTWd0+qnqi5r7+IvGo99mvV3aK6AutWHkitX4ptbA2fmySEEEJIBIXjDNYQA3PCFp8tjPQedWxd/l51zK13TZa8emPO841RmUwU9ojN3pBX7EelXLW8rUcadS1t497MeecnIYQQQj4fFI6dPEI03lMwbinUtmz7Gfa92o/eOtegGp4qchIuWXhqtmpqJTw1s+0JY20JU2urPPMKrNiG8mxCEmEILCGEEPJ9QOF4Z7byMm0tnB4p+p6139U6euraip4QxNYiNl7ZnhDSntVFIy/kGiGsrRVY1whfrdTV4h6rsW4NV3slhBBCPj4UjkWe1dP4aE/bXNH2KKG5RGQubXtOfWuSedgs0SI2to4ez6K1r3ogvZDXueGuat8bwrr26zuyulp8NC9kRuu8I4QQQshzQuFY4N6ica3Q1HuLr7kCdYtyjxC01Tp661uDnpVU54SpVsNOW3a9oalrh7BmAtIrl7XnsdYrMua+ZuWZya4FikpCCCHk8VA43oFnE41Lyj2DiLu3QF1atlrHlvSupLr0XYtROGlL2ETexepCOj0hrHMW0WnRG1a6NIzVa186+/ARWOqxJYQQQshyKBwTntHTuESgzBWbcwTVM5fJyt3jmchKPVsx992MWjZ6VnKuN7Aanho9K2ltIju1nfMOyCUL6HjlPbZ4/q91fn1UYckwV0IIIeRxUDgGLBWN9xaMa3v81vQs3kMorrkfW5TrqWNLdpIPsjNBURWT2bORiic25wrNe4SwYplWuUp5j7VCWVvMnbh4VsHJV4YQQggh20Ph+MlZuhjM0nYeKQDvIWQrZSvll5btGRgvedZvbshoqy6sp9LW0hDWnnDcFpVyc8JI1w5lXUplUuAZ4aquhBBCyHIoHB0+krfxHsLpEfbP1JdWmVa5HpvetluhpxE6eO49Pp5IqCyCk3kF53gX54awZu1ltpl9qxyy1AtZaeOeVBddehYe6fF/lu+MEEIImQOFI7DF4H5uO88oGO8t/raynWOflVkjhHVOmSWLofSWqayUWnneMROIUR1LRKbth+1LS2xiv59BREZteDyTSPmsz1z2Muf35Zm+R0IIId83FI5PyjOKxoiqaHwWT+Sax2INwbjGQjlzXs/QG5Y6J3y0tUpqb6hrJPoqwnDJKqwZrZVUq+Gma6+I+pHCMz/rarBr8JG+R0IIIZ8bCkfDM3gb11759J7evY9SZ2Qb2c/9vp5hoZzd+f/cAWdlIO95IS2thXBaXszK4jdzQmG9erDNNRbRwbJe+aiurM4elp5n9xIs9EzmfCQvMyGEkM8HhaPc75nGJd6pewvGar2Pqm9p270hp1H/lz73uIa3sUI24I5WWp2kLQo9WqGo1TBUTwD2CMTKCqtrPweZlbH0CEms03IvITX3Gdq1obDM8b4niklCCCFrQeG4kHsN/NegR0BV7NYWbtVjuZUXsqdPc0N8554vSxdIqXrEWl63Vugpfm6FINowz4oA7FmJtRKe2gozjfqa1YllpNBGNZw1qr/Sxj3pFcZr8WiB/YzMPbcIIYQQ5LsXjvcIT93C0xiVW1O4PUIs3qPNatl7LKrTU0eV1iC5Fb6K5Vvvfqy0mQmc1kI3UdnKuyB7w1y9etTO66vS8vRUPLdrTg70srWwqpz/W4qbjzTBZ9nieVeFYpIQQkgv37Vw3Pr5skoba4WnVm3nCq572nh2c/u9xIu45iI6a00yVKgIPZHcC4h2le8xsj/C5yxUtSoEW3mS5Gc2Xv+s3Vorq/aEAC8VlBWeQXRm5/73KnK29KBy0R1CCCG9fNfCcQlbz2DP8Wq1bD+CaFxTfFbFztrPabbsszJrUfEwRGGUrZBRtYlCPlshphjKOicUtZXX098Mrx2R+rGM2m+1U+FZBv5znoPt5VGhr8/K2iHKDGclhBBS4bsVjluHqM71NK7h5dpKMPbm36uOark1RWlkm9n3nnO9kxOV5+hE3g8Qs9DVCezQppVf6V8mPKp5PYvwiMQhqkjFU6l2rf72hrO2+mZZYyJiLeHQc97OFT33iBZ5NEtDlJe+F5RCkhBCCPLdCse5bCkae+238NY9QlCuJRYrzymuFTob2WXpS7zF1TKVwWLvOwqjsi3xhYLLCzPFMNaecNQsxFXE90JG9Xo2Iu/FduUZx7nhrFjWskRUVlkixuaKjNZ5/r0vahPR815QkXnHMZpoIoQQ8v3yXQrHZ52t7unXliGePXVsne/ZtMrc+1nLLL01MF475LnifegRP1nYavTcXyTcpsAG26usqtoKca320euDR/aMYyvUt2VfKZtxj1DRFpXQ6Dk8w75Z1rxel+xL73lSOffWaosQQsjn5bsTjnNF4xqLm6wRnrpGOCja9Ob3iMGldVfqu4dYjdIq/WnV4TF3gBoNDHdmu+oRy54RxLDUSnipLeN5R1tCMGvH2+8s3+tj6zyLVluN9rcazur1sXKezPFOrkVFgPTea3u9aB+dNULRe72SS4T4szxTSwgh5HF8V8LxEQuSzGUr0biV6Fvb89jrVbyHZ7PSj6hcq0xvHR6eiPGoPsvXM8iMRFolvNRbVMcLZc3CWOeGqq4dyhrZWVus1yvnlUda58ijX2+x9Bk75F5i5dERKT0TAku913PDWemFJISQ75PvSjjOZcvnGquDlCVhlFF+j2isCC8vr/f5xnsLxq3F4hrexypV70MmgNTWs/FCR9W+Je56BaQn/qJQVS/Pa0dMfkv4tUJZIxvPTqTtLaqIhbWFGLK2CKh4Vnt4tKC7Fz1hv5Xzo+opnBPOitEBhBBCPjffjXDcOkS1t/yS8NR7hZbeo1xmu+V+riUQ1xKRWbk5VFc3jUJYrR2uuloNM43CWSvp2EZUP/Z7zoqsc/Izm2pIs62zIvqX3Iu2CC2tUAmXrLLW8433CHddM6wzOqew/jkh0K3vp/e8oYgkhJDPzXchHLcUjfcOT13iZewRWtVyc/N6nlt8pFicKxTnepjXOJ9wpVKkIng8r2MWGjo3zLSannkRI09ktD+tUNRqqGo1nNXri1IJB46+yyUL6MzlkeGn936+ca17+1LPb2sSoyXcKl7vzKP56AWJCCGEPA/fhXB8FFuEp2b5PaKyR4xVyvXUF9muFTp77/BXzyayy9I9qoPlysIquFKqTc8EjBdO2ivwthaQnj32N1utVTry1aY1eO8VkVgGyyFrirIqc87HjGp49ZosFYRrC9hoVV6k9znanlV9s5DTnhDWlhAlhBDysfn0wvFR3sYe0biGp3FLb+GS9Fabc+pbK/R1yTH28qO07FxaaxCa1RMN+DJhhOGkau+Fh3p51VDTLUNZs/1o5VXzpWGjzH3eb0lY5xahpy3WDKvdov+9/brHMdzB52qYudJzbs1ZBdhrp3XeMnyVEEI+J59eOD6CR4rGjKUCcE3RuKVg7PEobiEWo+8j+562fL4sCnVreRlboatenhUsrZDYe4ay2s9eOOtcb6TatBbEqYSqVsJUq4PwpRMSWy1g86yv3NgiCqCH6nOIkTc9s83OVbWb44VkCCshhHx/fGrhOGcwvtTTuJS1PI1Z3tbpa4ei9razVlutNqttZ2VbZZYQDewqr+3IBoVzRd7WoazZPrTCWVsiuSc/6lfLs1PZj557zxJPz1bCs9r/tbxUawvCre79vRED0flVCZmeE1bd++ykJROghBBCPhafVjg+IkxrS09jj6C8l2DsFYu9bVbbmNNOT1ut9jx7zyaz7cnPwDDNKM/irZ4aDSRRtEUrm3rhpl79PXW3QlkxLwtntfW2VmedE9KKNlE7kS2WW2PgfU8P0Rp9ntPfHsG7dCJnrd+YVmhqtvKx2ojk14UyV0RmoacVAcnQVUII+Rx8WuG4Fb2Dhap9T/hpr8csa2upsMvY2oNZrasnr0cwzp0QaKUvIarTW2l1Tnhmr8fQS7MeCCvuMIQUV02teCKx/1Hfvf2seCS38uL0hqxi+QpreLV7xFx2LlZY0wvfutaWhpHP6Wv2LGfrPIsWasquC82PJlYy76Il8iAyfJUQQj4/n1I4zh2QLxmozBVUIvO9iVneXM9hTx1beBnX8mLOaSNrp2Xr5UdpXtm5NkplwOat3lhdSCbrT9S2J4ZaaXPrFck9Lr2hgJX2orrRphJWmNWHbLl4jEckcHu5d7+VOaJwqZCsUIkIqHq+q+d+FubtTdpkkx9zw1erApUQQsjz8SmF4xxaA6I5g4WKF3FueGolb6mQbImtuWJxC6G4pueyWl+U5n3Pc8NWq+Ag1NIaBNo0L4wzWkG1Z4VTa58NdpeGs7bq13RbJmpHwf3A9tB+iY1ni/Sswrk1PR6me/R7rmexGvGxNLxVaT1DrOzANrtebL29Kxa3Fr8SeX+dZAKw4n2MxCchhJDnhcJRlodE9YQnzu3HXEFZqadHGPU8n1dtay3BmOX1ttHKy+rM0tYaeLZordKYef4yb1glFLUVzpqFnEZej0o4q9eXakgr9qsnrBXLirFZY7VVLINU7jVbDdCXeHKr90g85j30CMDq9bpkkjFbCCl6nrByPs+9tiRJi/K8EFeKR0II+T74VMJxqxCo3sF+pR9LBdha6ZltS3gtDYGdU2c1vepdXBrO6n3uGWxudc72eLQ8D1zkfbN44rO6UIxnGy2qg7Zz+tfqY3WBHbTtDfeNbDy7SpmlbPlMWusc3Iqe8NPe8PM1Q1jxuKB3EW28SYbI69jy8Geh4za9Msni5Uf1eFA8EkLIx+FTCcc5LPHyVMu27OZ4veaItiXeQM9Lt3Z9UZ1LhOIjwlm9/Mgmsl2DTDRmKzWuFTIaeTxsXXPCWTG91b+sj1H4qs2vhLbaz164r1c31tFaPTMrO5csxLnK3EH/0v73ev56RGEryiJqu/qbEIWK2/JH+G+FZZTmna8ozCJhmUUhaJonICsL7LQEZBb2Sggh5Hn47oXj2qzhbVSWeN+ytFZfPI9gtQ0Uhj0DsJ59uadorAr7Sn5kh/ROaHgDslYYZOZN8PKqIaNYzyDvBeiScFYvHct4+5iFtWZ1Vr0uveGttowtZ23R3tJzjmzpWYzO5ZYIWDJpV+1Ha0JnrlhcEkHgLZaEeOHiWZi42uP1gdctTuxg+Z4QVsXzys/1ItL7SAghz82nEY5zPDZLnlWphqjOneWutDvH41f1SKL93PpbYmyOt3ROnb11tOxaeZgf2Xh2c/A8R60w1R4vm5eeefqqXkQ76KyGs3rpWf1eH0XeD1KzhYCk0Z7Nr3oksc5qaOccEbiGZ9GydHC/lbexZ3JqyYRTpa0Mb/+jRWswTSdGrEfSm6SIQletbeRZ9CZookmO6F7giUBPZBJCCPk4fBrh2MvaIaq9onGOoFySZtMrXkEcSPUKvCwktdKnar882zliM7OZ+715+Z5Ny76XqmBE20xQRs9SqX32/KEOIFv1RgPTiicuE7sivuD12qmGmWbhvtFxboXrecLSa7sqAp/BcxPt6xpCtnIf9q7dpZNvXjtzf09QgOE25nthplZIesKxJSK9kNUsfBW9mBhlIPL+XM4iHhCvDCGEkOfgUwjHNQbb96iz2lbLC7nEu+oNiCr9iNK8vmZiaY7g7RWhUV+WCMYleZhvqX6XWL612ITSCoushm1mHojMdnTycaAZDUArYbFZ/zyxhoNS7CvWYfsclcv6IiYfy6JNlO6dI5VB9b3uYVlfKoIqO5cr5TOxiHVUJ6rmTj4tBY8FikQ8t7JzKRKCGBUQeQa1zih8NbouCSGEfF4+vHC8p8ATWcfbWK2/6uWz6T0CLRNXFe8keiIrAq/idfTs53oD5ngZ5qRjXiU/s21R8dhknsdK+GUlRBQ9GJ4tDnZR3C0JixW5HbRG+xWJaWxL5PZ7isJNs75X2vTajUR+1JdH0HqWtkKvBzWrvzKRZdN77l/Ve5XXjyqRx9F69zCU23oWJ2d7gs848VHZrngfvQmbKPIg8jy2JsAoRgkh5Ln48MJxDq0f+WigsoZorOb1hnPOFZHVwdKSOnrE4lpexR6huSQd87JykQ2yZDKkFaYqsvzZxiy9YhuJwLliMqsL66vsg5eHbbYEpTTazPKjejOBda8B9hoCaa12K5ENXlrlftVzv8v62MKbCDnCfxSRkTjcichBriut2nP1YOrwvP9oH21rudH57NngPqIYjK6nyJ4QQshj+e6E41zROJel9UUz3EtoDYCw7Z46MrGK9XrCL2u74pnE7S1FYyYmMb9qsxSsLwuP9MIvW6GXWXrr2SdMj8LpojA6rx9eX3Awivvp7YO19+pslcv6Vmkz8k7iOdN6LuzRRIP8uYITad0regVjZF8Vm16fWvuKkwGRN9peJ5rW8kTbflpvJO4nlvGuRbwGNd27fpHM+9gLxSMhhDwPH1o49g6Wlgxe5gqDKH+OpzGyrXokI8FVsZ8za5/1qeIlXNu72HP81/A2evlzPJBzQREjknvfBOwr3kWR94Nfr47IsxcdW88To2Wr/W+JLvwc7XvPi9mXtOnVjTyLQGyx5kC/RyxG94fsvuTd03pEZNYfjyg83PM4orcRvdXWA6nXhvVCYr7WYUWg1u95H61owwmSlucxmzjC49ASlxSPhBDyHHxo4bg2aw/qWx6pXvuqQMw+zxV8W5evlMnabaV59Vdso3oz+4oHtXU+9AoEb1AVCTH7GQdxa4akzrH10jNBGfU/q7NVL37OnmXMnlOc22YmJtd+rcbabDG4945H5Z7QEn+ZQIxEZNZO1NcIPFei5xbx2UYUgZFI3J/7dpTbUFXd3pv+HmC/rICMnnfEZx9tv9E7iWUE8rAeD4pHQgh5PBSOK9HyNEV5VQFREZ6RwGn1qSWYI8G0VDR67VdEWq9o3Mrj2PImVsXiGl6klucKBQsO0lCAZd6CHs9CK9Q1Cmv17DEvEmFReJ61Re9idHywbksWzrq0TWy7co5UB9VreS299rbwiLYmYir3IRSMmN6yyerE/vR+VzrB4F172jZ6G1XojXISgJg+yenH3XoY5Zxv98HzHArkV8Sj4om7TDwSQgj5WHxI4ThncLKmh6dlW/VMVQXLHM9ij6cwqr/HyxgJtsgzkAm8OV7JHm/hEkGZ2XifK5MEWwy2ReJnp+asIIoCDutqhapGdUS2mIf5GD6aiS7cVltsz3r1Ws+hYd29daKtDT0UJ/2je1oqYiG7R0f31Ox+VxWInjgc4b9Xj9e+11cPvI5smhdeKnISfUezraLwxdgdTLkDlLMiULfVZjLbXv92Th0SbOMxWTNs1bMnhBByPz6kcFyblsetRWY3x3PlpVWET49ojNrsKdszeMN8b/8ze+9zNS2z8ezQNiqXfYeZgMzSsjoyvIGWN8iKQkO9PBzAoQcwCzON6q8Iz0qe198oX+T9sfREcvSdt45VJL6j4+X1We0xFPWjD5C97yKiMrES3TdsnicArU0kCndQTvtd8Uq2+qpEkwdeSKrIVQjqtoo3+yyjFZHWi6hiTQWi9nkv769t2z8rEnemHlu/FYXRtj0OrbBVhJ5JQgh5Tr4L4dgzAG/REghL8OpqebRatPobiUhbNhOcLbHaage9ktjPHtE4V1hGaUsFYyb+ozJzyQapnufOChoUPl4eDgrtINAbEEZ1tNJ78rx8kffHYk5oavW78sL7PFqDYOxTtVxU11Ze7F6q53bW30wsar53L6h4FndgH/0XKBt5Iqv7coT/2hf77KJXnxWEeA2qKLRCT+QqGm0oq60L982Grx5MvrXzbBDbvywtSqd4JISQ5+PDCcdnGRB5VEVE5u1qiRmvjKZVvIWebcv7V/UyZu1FtmjfIwwrx3GOZ7HHk9ASh5HgzMqsRUWI4LOBkVcsEnxePZG3LkrPwji9PMzH9ntt7D6j18/zNtrPKASidqt1Yhp+h2t6ILe+l/b0tSLUK/c7vJd4AnBn/qNn0ROZg2NfuYe2sOLO21ZxFoWh2nTPEzkaWxu2ejD59p6n74C0HkzcnygyIAphxXuqnXRCYRiJSg97PyKEEHI/Ppxw7KU1493jBerxNs4RjVndLdGB+Zm3MBOk1qYi3DyR2iqHA61oMGbz1xKUWHfUF68PXllxbKLP3rk29/xUWgMn75m57HmiaCDXCjON6sE6sM9RGCvmYbnIJrNrhYFmAk8k94p6fVK7rP/eoHwtz2MPVc9ghTl9rUyqZBNjVgBqXsuLqP912wpKrfvFKSNQLpoEi/CuE/tMo/dcIwpEKyL3ZltMOeuZxOsTz8W9vAc9mLZfVvzZBXnsNRFdH1je2qB4pNeREEKei08vHNdi7dn5an0VbyPmVwRuS/whmbhD8eXV5fUL228Jwsg+KxN5U8XJr5TNymdlPNvIroesbCX8MbJROzuzH3kdvUEg5tv2vHqwX+L0yxtAVkVWto/VuqL+RC9xx/KeR6Xa/zUXydna07j0lSHRvQSvu8z7VxWM+gOIHkfPK+kJ1MqEnId9tlE/W7FovYa4jV49LW8XusF27Ks30EavVxv2ip5Ie2xaz1J6RF7GClmZHi8lIYSQ5Xwo4Xgv8VaZfc+8SRVvY2TbI4YiEWdtooGNJ+hwttwTlyg0vTJz+7TGZy/NE4JZOa8s9l8c26gOLy8qvyYVz1vkRRSJRZ0XlhqFnlZWJLXteX1GjyWWQ1tPuGShsFmdWVhplFct4+Xj85ZZ2blsLRzn0Jqs8tJ6xKJuqyj0hKMVg6PJj+oZTFnFm0TzsIJP/0/mv7ciqg0z1W1MfzP5b6aOnbFV7+Kb2QddMEexYg0F6mDq9O4fWdjq0fmMbUQTLPQ8EkLI4/lQwrGXyg/4nDJz6rV4A7fIs4ifs1ntHoEW1ZENzHS7p52WuMR+LxWQ2Db202szKtcqi+k9eVmaR8X75tWNA7BoxdNWXstjh21V6/IGhPg52g9x0qN6shDUSrhrVXBX0rM253zPS1h6L1uT1gRMNKHlibgR/qzdLtlGsRjVgeK0NRGl4ATDZNK9cFUMSVVhNphtTbcex8Fsa1vocfSewdVtDT/VfqAQ1HQ9Fl54qRWIFfFo6fEk0utICCH348MIx3t5G+fYLvU29no+KyLSK1vxGla8mlE7c8Vlqy2vbLQ/Xr2eTUtIV8VmVA7Tvc9eXVWqXo3WuZsJlcjj2BKRrXZaIamZd8ETlSL+wLElLquCMaqr5VWd86oRWy/2LbLPmHPffJSYjDz0rftE5l30BODOSbcex2zbq9u7b2XHEL2NKBrR46jexEGuoaqj3L6qw046ZNvKaD6PcvVCWhFm20LxODjp+j96RjG6rlFMYnoFikdCCLkPH0Y49rLV4GeOaKjORqO4sXW1PG4tz17mNey1x76iEM3sWu1Io5z3ucezuMSrWPmuo8Gvl+fZrInXXhYqiWGe1nYopGdexp6QVK3LK9srpDIx2wpN9dKq4apZXitcNerHFqw9IddDNrll8737jN2OVke1nkObvjNp1stovZVoY9uxdYjJt/cy3BcLCkbdboWnvpky+/PfUUS+GdsXY2vDU1VY7eQazjrIVXzabe2D7oudgLLiztYRiUgs0xKS2aSR7Q8hhJDH8GmF4xxaA5m16vTq97wuUfuZCMJ6s+25eF5GWzeKUU2LttFOP7cEZST+Kp5Fbx9awrAiFrOJBS8/StsCzwMZeQIxtK3qfax4JtGuReX4RPVl3sRsIBo9x9nyPGZ5Pd7MqC9zqRzDR4jIiniMJqg8z59Ntx5CfJZxNOmDxCLSplvhOIKN7ZO3H5aKcNRtDUNFoaf127BW73zWelRQ2nb1v07k4HO/NpR2kNt2vLBV9YJ64rHFGh5Deh0JIWR7PqVwrIq1Fi27igfKIxKJUf2ZiKx6Ar1B15r2vR7PSJhl4lJtvfRW3T1eSZuGdWf5Wbr3Ge3Xxms/e54w8xhK0S4SmN5rQLI2POGGRB7KzM5rxyubCWL8HHljszyv3aifW09erdFGL73eRu8eZD97YtEKSxSI1qs4Oum7wB49mGLysZ+4TyK34k0/q4DzhOOb3Iaqqp2KNPuMpPeMo3oYbdu2fezDAGn22Ufdtyxs1d5LIxGJ29gfrJsL5RBCyPPwIYTjmoOaqK7K4GqJZ6gqAisi0hNDFbG2hX1vnbj/c8WiV4/93CMU54jESCDi95cJSc9mTVoLvIjEQk8kF0QV4bQTf4DnCT6088TcEm9Ca6BZCRdthflGdWVhqpWw1cxe6TmH1ppYW4LXRmsiyN4HXsy2t8ANCkMrHF9MHS/GVtNfJRaO3ms68JUd3j1M8QRbJBw15DTbPsgpVFW37YI5byZN3/Wox1A9jbptxZ8+72jFnSdKR7muxqri8BjYWRF5hG3FitaWeIyg15EQQrblQwjHZ2COaOwRoziQygYfURtLBnwolDJBuNTLGAnIqE9eX6J6vDqztjIRWfVEVm28/Ep6Lzpwwvo8L1rUVwx9s4M6CfK8frTajOw8lngZWuGgmRcjGsBWF8XBurMFQCqrxc4hO7eq592SibNqndlEkL3+0eOHYlG9jCgcUUS+wrbINVQ18j4OcitKB/gvIjKcN0bYwen85R+nW+FkRR968A7y/lzCUFErAlXkHc1/G4Zqn5+0os/bDxV9mnY0adi+9tc771uCruVFpCAkhJDngMJR+gb3Xn4mRKK0SluZ0KqIk0zktTx+WO/S+qrbdt8i8Wr71/IwZt7CJWKxKhQjz6VliwG5SFtoVV/N4Q3qKs/ueZ7IXmGEfVqDqO3M+5l5BjHMztra/W4901gJ263QM9lk2crT2OPh9K5jvFfof08sWhH5auxejS16GaNtMeWsiNTtYTwJw/FFZDx/qeNoRGNwQKfzF3+cTkJyOpy2D/vzf7l9R6N6C/U1GZ4XUdNE3j+j6Ylt+/yifU7SpnvPU2q6vSYqzztqHSPY2z4gmZjM8igyCSFkO55eOPYOZrJByloDo5ZobLUZCclMREXlWx49r73M+4d131swZmKxIiwz+0gotkR+RSRGAtE7H+dMVPSAXkHFE39Rv7Nn9bDuLMQ1qi/rV9TOFmT98vKysNNqXmbX6lMPc7yMS493b/lsYkfTrEjUvOhdi9YT+GpsPIH4Iu89joNJFzFhq6PIyxeRl9fT9m53FYm71+u2isXM43g8nwDT8Soej5PI4e20fTicROR0ENl/EzmcxaSGnk7yPjxVt1/kKjZVQOn+YuTAINcVWcXY2+Mupk1cMMuKVRW56C3VuqzIjESfLVe5BjLxSAghZBueWjiuKRqX0FtvRSxU6qyISK9MJMRa5SJR2GItAZn11RONkQCriMjMs1j1KmbexNb3nR3bOedxtpiMJyQxFNWWwwHisWEb1evZt/r1CHo8F5kYry4a1Gp/qcdkzrl1jwm3rC1PLGq6vd7s6qiDSbMiMlrMRv+sWFSbV1PPq6njZRTZvZyE4OsPV8G4e716HHe76/YwGtHouMinc4zncbqKRpGTWBzGU9r4dhKg03lnx/3pzx4fe86oV1SPi4hcVgDW6xHDbY9yfT7RHmf7HKQez6O8F4LoTcQ6vH56bCX+6HUkhJBteGrh+CiqM/WRsOipOxJAUbmKxw/b8TxvUTmbVvEEera9271eRhR2VQ/jXLEYfc/RudA6L6Lve8kAPXvthJKFomJ+9BxeK8zSe74v68cz4IXAVhbLsSLZW/QmCn2du7pqDz0exbUnMTK8+5/XFt4T7L3GisERbDRMUwUgehNVIGr5L8bmi0l/+XIVi6/qZXw9ichxuOaPu7OwHIzXMRGOIlfxeNhfheN0ENm/XUNW9+elUL/9dLY7nrZfv11F3Iu8D1VVUfft3Ba+j1FMunoRvbBVPfaedxyfkbT3ZJ1Isqu/es81azoulGPtlyySQwghZBu+G+FYHUitObteFTTWPhN5FdFoB1f6GUVNS7jZQVrUrtfm2oJxrlj0xF1ULhOKWT1RWUz38r3PWdkKmcdR5FbI6Gcsb/NHyENb9Ej2iJ9otdUWS0WM9ZJWqQpIb7snlLXVryi0LyM6x7xBfE/5KpXvK7r+vGvfPss4mM823a6IaoXjjSiUq+dN01/GkxgcdyehqNsvX67C8fWHU9p43r54HEE4qqAUETdU1XodD2dPooanHieRt6+nz8dJ5O0Xp3DV6Sjy7Ydz6OpB5PUnkbf9rTh7k2vYqh4DnMDwJjpwezL2Wu8gt+LS2njPO9r7u5g+ohjNqIjHnvoIIYQs59MIx7VnxufWGwmUpW1XvFtZWy3PphWNmQ3W4wnvJaLR8xJG4t4rF/XFK+cJxDVFY0UsrnXeevV4YhJXXI28jjYfRWRk69UvTl7U3x6WiJqest7COCLzwlDRrmfAO2dhoIpAnHtOrv39ede9pu/MNorI0dgM8B9XQNW0F/g8ykn86fOLL1+uwvD1VeTlh5MAHHcir2dBuXu5CkYNZVVRaZ9vHMyOHo/nSZPzF3+cjO3+ZK95wxuEsu5PHk8ts38T2Z2VnV0IB7cPkK73xAlsbDpOCqLoizySeB/0Fso5gI2tjx5FQgh5fj6NcMyY692JBAvm9YjElscMtytUPIjYJgozFIUVT6Td3jlp0XbFc5rZ4L5IUM6zy4S9d5wEtisisTIYbwn0XjwREokea48C0gtFxRBXzUdB2erXvT1Ya+HtW+Y9jI5dZRXVpd6TaP+zCY2s3NzvrFIum4ix15sVPfYZRxSJVgyiQPRCUl9GkS+/OAm2l9fbbfU4fvnhHJ66E/ny4/V5xhuP46vppDaKO2XY6Ze8F/lyjvmc9sbL+PXsfTyKvP58+jxNIi8/i+x/ONmNo8jb28lu/L2TV/Ac3XoRgrqtC+DY13RMcivsvJVVUfyZbl8E54vchqVaO/0etT9HkxY9G+nli7yvtyIwKUQJIWR9nlY4Lh1ktqjOvnt5rUFYlNYanC0RWliPZ2vTWqLQtomicASbKC3ajyzf9snbp4pX0qsnqstLx+1MZOJ2dm543/9aQgfraYmS1rOKVlC2np/0ntNT27n717r+s3q3une0hN/cENa1V5yt3JOwbM/ERqUPLTxvvHfN471J/1svWhSeahe5+cHkv3w5ib3Xs1hUz+LrjyeB+PIq8oOmm5VU9XnHS4W6mo73Hg+8OSs2plPjQEVOHse3U9qXrycheTyKfP3JhKd+EXn7dgp13e1OXse3rycR+fom8vbzqVkNWRV5LyK1aRtuqq/10L/D+TjZd0tGE0VavwrJ0eR7YnAUcRfWWUJWF8UjIYSsy9MKx8/EXI/nWm01JsBL3i9vYBkJV1s2Enet9mx9mccxG4Bm6VH5SPh5HsctBONc0ZOt/jklaehxHBv5tn/Raq3i2C6h91htPemktMR61G+0ywa3vSGqFW9iSyRuKcor1xtOHOE7CQdn2xOVuLqqvlZDF7m5hJuOt6Gqr1+u6S+vt2GrlyVMX+RWlaLKxVk3RVelsUrLmWEYz9sv5/DUcbxdTGf3cn6tx/m5x2k6e0b3V7Fnj5tOFtrwVOyuN7mIom8y6SL+uYS7pGLRIxJ9ti0KP0IIeR4+hXB8hPeh0odswBt51HAbxQmKKVtP5J1DsYMeTFtfyxPpeQCwrahPmacR++HtR7afuC9YztpF9l6eVy7Kx/SqF2ftCQQciHneRRSIWLblkRTxReScZ/EiKiJojm3LJvOEtBbLybyQlVDVOYPkyr63Jnha9Xl2vUQTOphvr9udsUGxiOGp6lHU9FeT98WskvrlvMjNlx98L+OXH27DVsdfmAq/aIXQkP6SvnwRGfQ9Gi+nP8u0P/2JiBx+Pq+WIye331c5nQDfzttn7+OX82I548spVHX/duqbehxFRF6+iuxfReR3T4vmqPBSwXaAbQ0xtYJQPYuqbYfztg0xRT2sdWn4Koa5WtGo29755y2cE4lKjx5bQggh83lK4dgj9tYadPd6nrwyNi0SNdkgryW6vD5Y25ZIsnae4PMEXEsgRoLS2kZt4b557UT7gjat452JSwnSWh7Hitj38iObzN4jGihVQkut7RxBGdUd4Xk9M+Z4zrxyrfai76G1b5lAjI5dxTbrS+85E52r+Ll6zJZMwrXq9K5f715jw1M13Tr/7Iqpr3L2wr2I/PDj2auownE8bf9wFohffnFKH8ZT2uW5xR/l5FFUsfhqtlUk/vArV5H48sOtcPSY9iLHs4Dcn5Xf198R+QM/n9J+nk7xpgcR+enU/u4g8gd21+cah/HkaXx5PRV/eRX59lVkOoqMX0XGn98Lx6PZVsH3Krev15hM2kFOglCFox57ff+jXUhHxaN+JzYU1t7v8ZxET6TWa68R9TpacUhPJCGEPIanFI6kDQpFj0wkYV1Yzqvf82BG9pnIm4PnqVxLNFa9i3NEY0Uwzj0uFW+ZtpeJw4qNJyhxX7KBXLaPPR6u1vFcW6BHwg7LtcR49LxjtoBRD63zau5EWOXcnCMoo3uLt+2Fqg7m82jScaXU8RyCuns5icHL6zPG2/DU3c7kq+pUFarhqXb75YvI6x88FXr58SQYRUR2P54rcryNIrcex7efTjY2bdiLfPn9q/2rXE+Y1+vCOrudyPHl/LzjOZz18o7J11N1L/vrM8o2PNXeB+25qYJPj7s3ESgmzdr3cG8BSJFJCCHr8XTCccmsdpXewWW1Pk+IRPnYZhRKZgdFXllPFLU8cVF9I5TPZv9F3i/xPkC6V0erH57oi46vJyJ7xWJ2LL0BdmUAjnlevmcTYcu2PFQVj2ArHDWzyQTTHKrerR5vWpYX1V+hdTw9mywP8yttVUV2dvyi+1TruK1xb660ifcYe19peRy/yFUYfvnFWRC+XLdfvpxXR325hq1q/qt6Fq2X8Qe5LoTzxYjFH//QVSR++YNyehfHyylNBePu1T8Ih7erWDz8fFJ/+59PXsfjXuTb7522Dz+L/PztFLZ6dhGOX0RG1Z1fzyLx/Czk+PPJG3l4O+3/4bduQ0rV63iAv0muoauDSdPvxAp3ex9QryKKUm9BnOzVHPf4zSeEELIOTycc12TuD1LP7PwcTwq25Ym4locOhZhN88SlplcFpecBaM1AZ15G7EMkXLP2o37b+qVoK428Jd6aysTE3PMmWr00Ilp4omUXecKi5yez/lSvg6hMNa8illrtZlREXs9Kq17+HCrHqvcc9urybObQ6mNFOOpCpoOYZxnHkzB8eT2JRFwx9eX1+tqN3csp/YdfysnL+KOcBOMgIr+U64I3vxCR1z9w8ir+8Csir788CcNf/JFrWKoXqjq++MJRRaPISRjuz0uovv0k8uWXJxH57XdP9UwHkd1virz89ikOVA/SXuR1Oq+s+np6/nH/7ZQ97UXezr/qbz+K7DQiVq5iTReDfT1VJXu5dWzq84p6nK1YtJOF9n6ur/OwnsiD2dZ0fDWHmLptiCp6CfWz2nk2No8QQsg2fGrhOIfKQNIbPFUHpj0DVa+u3oFbpa+tPnrt9oqzlhDuIRON2L8e70om8NYQK2t6v6LyldVVPfsRbKydF8KKtlF/IiqiZI5grIr5qM2Mipj2bHrye2gdw9axWCIgq7SOb3b/wMmqHaTdLI4zngTgeH7Pov7Z1VFt2jietd54Dk21bkv903d3vPwo8vqLk5h7/cU1NFW9iy/nFXbU45iFqmpo6g3nMNdpLzKeld7ux9P26y9OAlO+XUNl5dTf8exZ1LBV3ddxOHdnd16RVXyh503m2TQUd7itNph32VWhkCOEkM/GhxaOc7w2PYOgysAzqh/b94RT5tWy3kEsG3nicLBl+zE6NliHN1jz6sO8qL/oBRzhc6Uv0f4O8B/bx3JeupeW5dm0arqXH9lEZMIwo7K6Ktpl4auR7RyWih6b3vv9RO33UlkVdW6oaoXeyZAewVg5h+cS9Tu6zm2opBuqen4vo3oRdfvLD3J5xvGHX5yE1MuX07Z6H8dfniv5QU6exkFOXsYfv5zE2y/+yMnL+PqLU3jq6x88VWq9j9bjuPvhKhg9r6MNUz3urx7Hr79zqn/ai/z0/zt5H/dfT97Llx9PoazyD047fl5xVUaR128ncfjtp5NoPOyvTb79IHI4v7bjbX+7KI4eV31k0777UV/ngcd8hG0Un/Z+7wlOLx/F5WDSBiefEELIY/nQwnENlgyGqoPPTETidtYfFFpRndguijbMw7pRLGaCL6snEraenddOSzCimK4KRhwgry0WKyKlet5h2Z5BVHR+Vt43mK3YOje8MvJKRDZVoTP3+4v6VEEXHbF4Yai7JF9t5tJzXq4pID27uUR9QUGi1+nlnYzj1cP2+sPJ66aro9rnGu27GHdGRO7s84ujnMWibv+Bk2AcX0R++EPn7Z3ID3/4JBJffhD58itnz+OPJ7H48uN1oZyLcNzJZYVV5bg/haCqeHz76bySzY+nEFX1Ru5eTyJTROSncwjs208i4++KjNPNOy5evl6rf/1y+n/4ctrn8U3k8IPI6/kVHRqaardF3ocB6wqpKOQwTb8fDSPFdPvZPjdZfc+jB8UkIYQ8ju9KOK7hZajUv9bMfG+70WfM84RsVDYTvVjGm222+Z4QjPqYkQ2Cq6Kx4mGcM7CeK1Kq52ZVDGJ7lVVVrZ1tJ1qpdS6t49IrdirnQ6utOVSeC62soLp0MqAqrLcQj71kkwfepNAg1x+pnckbx5MoHHdyCs0cT1rLhqlqGOdlVdXzyqnDxVV5/q8PSepn+7yihqgOL7fbKhrH3TWU1Xsdh+dxHPanctPZTkXkiwlTfTlLqXF3qlvkGrb68u3a7/2pmeFgjgcci3G8FX32u7BCcJTb0FM74ajbkcCbc0/oFYxRu5XJl6odIYSQnKcSjvcWXEhlgBR5+6r1eV4/3LY/2ALb1t56y9AGBwTorbTlRigjThmvjh2URU+B2mD/PI+C15eoX97+2s+9YjEbbNu0OelevmWtc75nNVVbpjcktXfwVbk25grEngkEbDPqW+se0LMKamUhnF7PSVU49ohGz2bOseklm2Dy7jH6eg2RszdtkMtrNlQY2rDVLz+ctl/P3reb8FR9ieEvz438QkR+OC+C84s/LPLjHzkviPOHbr2PP5w9ja+/vIaoasiq9TiOLycVh6i3UeQUfmo9jl9/PD/n+CLy7RenPBG5PEd5+Pm0/fV3RH787etBers++rg/P/c4Taf934+n8NXXsxtR9fEgt17Gm+MM34UKysFsazirXbDG3kutl9H+10VzEOudJIQQ8rw8lXBci+pgPLOrzri3BsaZ+PPqannicNYY+4A23gDd65NtOxOLUfueaPTqE8dObSKx3BKW2K+WYKx6qOaKSMyLbDLbOXgeREUHeFGZim1kH1ERy1UB2ftZ09YQ95ae0NNd0WYOrf2qnNc9Intt0YjtYT8u2+NJBIrI5T2Ll/cunrXZy5fr59cfTmm6reGpl3DWH+T6jkbdfpXTqzYuovAPnp431OcXv/xSZHi5Pu/oraqqgjF7xlFDT/c/X/NxsRz7Tsfx5SQWD2+ndr78yjXv629f369xbmK3P+3jdF4oZ/dyfb/jMJ7De6fbyTwVcygQcfVUvMdreRSG0Xc7yPuoh4OT7rGWoKy0RQghJOdTCsd7sGRgFQmkiEhQtspGIksk/pGviiEUep6t1y6KxpZYbvWr18vYKxCXeB4xL7KJiARgBNbrrZCKdYwdtl4bVbJzqXL8l36O0ry+VaksJNTy1i4R4r2e2uo1gvV4ba+F29/zhorEmxDM3VVMqpBU0ah2KhbVC6lCcxzFhKPK1es4ilzCTzU0dHy5puFKqZeFcOC5Rl1ZVQUjrqrqrqgqt4vlTOpd/HptW0NV9fN0OO3M63R9YHG8Hq9xvD12ekzHUWQ0J9wAfwLb1m6ta4YQQsjH5sMKx6UDmSU/fFnbS4SCtY8EVSQCI68fblt73bb5mXfP1p21ieGp3kw1tpN5JLN9ierEct7nzKZlh+mtvMgG6TlXWqIjqssr11r0ZquQSpsepUXfQzT5UCnr9cnS+h56QlbXWk01GrxXzuey4DaNjNDg0HNyAkdnh7G+i2DcXdvW10uo8FEH3suXa7q+u3HcncJSNVT15fUaqjr+IFeh+IvzDr+KyJfx/eqpX355ClXVMFT1KGqo6vgi8uMfvgrG11++XxzHWxhH5HZxnN3r6aWLVkzaVVenvcgPP5/KjC8ib2bxnNc/KDL99slr+iKnE+ocmnuczl2B7b2cvZLnpjQkVT2O6ImcYHsw9iLy7h5tbQ6wffM9S3yvUXutr1rOlme4KyGEbEdTOA7D8JdF5L8lIv/weDz+F89p/xMR+e+JyH98NvsfH4/Hf+uc9z8Skb8op/v/P3c8Hv/tSkd6hNyc8UurTKXOah+zusZg2wqyavstb100mLY/9nbwmaWjaEQ7HKBG4hfTcXCPM95eG16fvf3D9F4PTSsN2/Dy5wqTCi1hiG17C99U7LO2MqrHJTrG3vfuXTMi/veMti3hivRci0pVJPbc71qTUUtEohWH1jt1aceJpUVBiUwzRu7Yj3G47aP1JloPoq6aqnn2eUe7LS/wN8h5UZwfr+9l1Hc0app6H3Xl1J7FcbyFcURuF8exacf99fnGw8/nhXDe5OLZnPbXeseX60I5+2+XRXLkfKz0/ZT4Lstx508s2jR7L8DfAZz8O5g8+x+3rZD0bOi9JISQj0PF4/gvi8j/WkT+FUj/Xx6Px/+ZTRiG4T8vIv+UiPwXROQ/IyL/p2EY/nPH43Hp4ml3peJp8tJanyvteXkoemy/7GdvwFzZrgyos33DQbtXR0VE4sDE6y/aRHV7xwLbxbqjcpXj0xKTUV9aZTKyWXulJ1w1s8/azfrt7XNL0GBaJgqzcwEHppVJgKpAjsS3l4baa43XmUTXRvr5vKGi4ibNdNITkyJXMddid65r6thRW7cVtCp4ROT2BfcqkHZX0aghmvh3uZnYm6ldQdULQ7Vhp3Z7AJsBymv+Zcecn9iLCDxc8/emnNahwlHrsWGwl/rN/+R7HYf3nj89LL3h6J54JIQQ8n3QFI7H4/HfHYbhHyvW9+dE5F8/Ho9fReT/PQzD3xaRPyMi/+f5Xdye6Idy7iDSy8MBbUuQemIx8/hZG8xDsRUJtCzdtokLJ3jjMm/wirZeP3HfPBEQ9QvTsH3vcyttTjrmRTZI9XxTKo6dqM6obG8fMnqOYfY99Wxn9bSE4xqej+qKtq0yVS+jTbfCUD+/ExAmTUQu4Z62nK3Llr3pH5woXiiqZSooDNsOilztp10oZ/ciMgznUFR8tlFXUNXwVH2W0Yasfvly62X8cl745vUXp8+6mqk+Z6heSfUsWhv7yg4UeyK34aj6zKKmq0g87s+ew3OdIiJvP1ztrYjVfo3fTvvzdtq/8Sy0x/F0bMbzsdNjacViawJFQ0ZHqYXFH8x/Qgghn5Mlzzj+s8Mw/DMi8usi8j88Ho+/KSJ/XET+PWPzG+e0dwzD8Ksi8qsL2neZ4+lplV+rH2viCcZsIK3pWT2eIM3qrYg4W84bnFTEatSHTEBjHdgf77NN8+zQFvMq+ZHdHLw6oncuRl6F1jsae7xjs8QOpLU8iHPPD3HSozSR98IoIxJNa7zSJDu/IsEXPSso4gtFFJdLnnOMjkXleHrC1fYXF8C5CCMbwqo257/Lg3yjXMWjpl28h7ur13AAz54VfzZvAJsbT6QRjpedg59au3KqFZVa55tTBvtzOWjXpVL1mGiWCvZhFBmcE89eO3i/rFwC1ctkkPp532NLCCHkvswVjv+SiPwLcrq//wsi8j8Xkf9uTwXH4/HXROTXRESGwftJe0488SOSC8UoDwfNngBCcdXqk7ddFXhYviUqsR5bBoVl1B+bh/meF8kTmV792ec5aVl6K8+z8eiZbNg6THVOn7w6sZ7WdxJNOmTb2fndahcFTfT8Xuphc8pM0/sQ1XflCmT9C8MSTTo+K3jZBpHoCcyoDylmp+2+VsNXbX+1X3Y/X85RmsNwDU+9edH9ed8GvUHYv0sj5rMXSnojzHax3dZ4K7A2OB7jZ0y9c+8oFGmEEELqzPo1PB6P/0C3h2H434jI//788e+JyJ80pn/inHYXtvbyVdrqFQeeiIrqQnFZ9cZ4gmyEPK/ezNOI6biSqtdGNuj3bFuCsSkKGp/npImTh/nR99lzfi4RdNEiOFrXll5QkfhYZB7HikDsEYtu/YFIeifO5r5cEYjCM1viMRJqnjAUeS/yIoFYEZTYBzwWkbD2xAruf49ojkJs1aOm73C8vJrj7F29LJKzk1sPoy6Qc/FAjhI+v3jZWRMX24OGn85F3/fopaOY1APvnGu9ixQd5fZeUylebaJHnFLIEkLI8zLr120Yhj92PB7//vnjf1tE/tZ5+6+JyL86DMP/Qk6L4/wpEfm/LO7lyvQKzLXtx4ZNK19tvPYq6ZUyLSGEE/j2f+Sp9GzQGZC1k+XPFZHYRo/XEfO8fM+mZV+1iQZt2p43ANO6MjE6Z+Dm7WN2DL3jjJ9xuzqZ4G47AqlXHPU+1zeOweC9U5hGIZw2reVNjEJVW15Lm5exc46NrWMyL7Xsed7x5nnN4X2f7SI4+PlUQbstf4c6xaJSEY2ZJzHKs+n6qo4OIm9j2I2u2p+LUT52/wkh5JmpvI7jXxORPysi/+gwDL8hIv+8iPzZYRj+cTn99vwdEfnvi4gcj8d/fxiGvyoi/4GcHvP/H6y9omrvOGAtL4ql6rGJvCUZkfCIBtQ99WX9Q69NVDbz9Gga1p15gaJj1Bt+GIm/zMsV9RvrwXTMq+RHdkvAurLnFavPOGK5pX2qHOuqp7HiZYw8jL0CqbSCaEMATsf3gmoOXtho9nxi5lmMFsyxddnn5LDdFheBoiJxurZ1PJ+EusBNJrxvvJ/DtZ/2fY7WCznAvp0KmP+V/h/3cvlJVK/fdLhuH41gs9s2TVEBiYvjTPur5/DwVWT/82l7/7PI/utVFF7Sv4q8/XRtT98BOe1Pr+OY5PRLO53+pulsMl3DVjU8eJouZu/uB9FXEdl7dlk99yTrwyD0aBJCyBIqq6r+007y/zax/xdF5F9c0ql7Eo0nKuOMzKZSPvLCePmeDQ6UcQDtDbytLbZTKatp2L5Xh9cnCfI0vdejhLZeWdzPalqW3srzbCxLxJnSWkY/e7VGFs6KZSMq+5Yd69A7KPk5kU04qGB855lqhHmu9eL7m+f6ApsohNDzdEarm3oeVM+bGC2Ug2LMtn95RjDpV4TdN3ssrJcxsvHAfmjf0MOo37GuJurOfrkdPr8SQ8XYsJcbj56KOLV9++m0yun+6znNWUTHLnyze70NPZ32ZxF4ONX79tN7EXk4i8X9z6e/w1lUHn6+ltFyKhr3p7+Dasr96dge3s7/D6cm9ZnGCf6Oyf/LdyXvy0d44gzTjkE6IYSQ5+UBT/x/XFZwHqxW7xBs99aRja+8vKyvXpirJ4ZbDoCKaIz64wlw2w8UnVhHb3pE5TgtpRVaOkoezpoN2Oae6z2i0SsTbeMkQ5RuBZV+vthaIdkQjJ5YzJ59tKtXZmAIp6XVJva1En4aPSN4yTdi0bbRG6p66ZfZbqwf9M4mrNMRtBfxOPT179qwVbXGU6iePet9VJtpL7L7wTxr+MMpz75vcTqcT0pYLfXGU2m8hvindVvhin92Hw5yfb4RDqbuov4/TrcmVgimh6phEwlA/YxtEkII+bh8d8IxG7QvEWAZmfDyxjxeaGokYqIwzyhMdIR8CfKyAX4UTop1ZW3a8l59In37HPUjC5VspWF6Kw/zM7s1sfVnXsjqqzqWtI9tZscav7/oXLbnME4iXP4nwikSWWgv8l4cVkVJKyQVQzg9XOHoCLjWyqO6DxoSis8GjsbG9VyOV48k9iHDPsd4PF531QoX3M8x8Epe8gMvaxldDcrGUl62jRA7vF07cHi7ehS//e5JFB6N5/HlB5H9+RlI62XUd0Bq+vhy8h5qvm3z8Hb2Jp7DU7/9rsi33zn14evviLz93in97adruKr+f/s9kbffFfkmF0+jvJ3+Dm8n7+JBt7Wps+fxKFetiV7HA+TZxXLsZ93GfIE0kff3HLWJyEJeKToJIeTxfHfCsRcvlLEnPyrjiShJ0nU78+J425FAtPmZKEMxiXVEZaLyUd3YX08keNvZflaO1xLPo5cf2WT2c2k9nzj33Y1e2cwW60fbTNRH2/YzrtiL6fj8nvey+8grl3nabPq9yZ5r1LQsDNWGcqqN2qugtALRew5Uy930yzkeUyD+jiAYLwLyCJ+na7+8MF5XPJv91H6molLfTm+VzeEc9rl7PYnBb78r8vKjyPi7p7RpL+/e3aiCT+S0rXZqs//5KiZH8Fjqfxuquv/5VM/b753aP7yJfP2t0/bbT6ftn37z1NeffvP0+e0nkW+TyFe5CEbZi7ydBeLbt9PfYX/6bMNX93IrED2xiAIR/1tPoicePSj6+thqspHfAyFkKZ9GOG7t1anSM7DuzZ9ra1nD44oC0NuulM1sMqHo1YefMwGe9atim7W/lm21vjmLUVTK9V5PLdGY1R9tZ/24EfbgNcQ0FY2tF94vWiinQfX9hW77Tr8igYyi0fMsjmCr+d4qs5WQ1eg7vglRNCG6lwVzxtO21puF8Wbtu+mqfEZIE7mKyGk6h5+++h5BkXhBnL3xJOJ7Hw9nwal570JhD+KGptptfYbRCs3LAjn79y7AwzkUdTp7Fo+3wl0Xxjk6f5eumUPUCmHF+0cUptoqXxEyz7Dgzj24xxgmm1gkhJAKn0Y43ovKzb0qelrtRAPq0dn2vDco8Frex1bdVhy2+pZ5K20dkdcpasMTqC3PYuZpjNpreRFb+ZHd2mj9kfcxe/4xKje3D7Zdry9qG00AeOGptv4oPFXMtg1NzbyMmVj0vH1LqbyBI/J2eh5ATyyKGG+iWUTGpruvtACPpe3LEJ3YFghPtcJQzOfLc6CTCVsdrmVl9D2OFq9f9tm9wYpPXTRG4L8NXX35+Srs1Kuo23sThrr78Wz/9SoGX38hlzBW9UzqQjlah5KtqipyDU/d/3za/vpbJ5uff1Pk57OX8effOn3efxP5WU6hqj+d/t7eRN6+nrLevp7/zuGqb28i+yn3NB7kKhat99Es2Jp6IgXyxcnPFtTBENfPzLNPclNQEkIyPpRwXDqGaw3+MX2O2OsBxRi2733OvDNemlcXij9PYGVC0xN+KC69OqL6bXomhL3tqP9emy0bTMe8Sr5nE1E9Xyo/5JEQ1DbmLKBTbdNrz+Z7xzwT/VXR6L27EEVj9H7DnoVyLn3beBbgndc0eV2Gpmeexd3Le1tPKHoeyHfxwC1MiOrOhKZaMbl7uXq/RE5CcgCv2M6I0DmoiBwGkdEuGmNF5CinC+JNRMZvpz8RuVkZVT+LnNTX6y9OAnH349VDefj5JBg13FXLj0a9K6qa7SI3bz+dX6lxuIamHn4W+fZ7p7DU417k9/8/Z7H4VeT3/+NriOpPcvr/TeTbWSh++yry9adTeOr+2/mRyPPjmge5RLSGAvIg10cmrZi0+TaUtRW+ar9C77nHSojrR+ZZBGIvXr8pJgkhyocSjluyNDTvke20bHBw7vUj65dXf8/+W0G4pJ6Mlpcrsumps+plnFN3y/aj/nD37qel8r29e3efSBpa6oWsNvsV2HnprddLVOrwQkUj0Yh1ZWG7kWi0dbqCseci1TDQ8z8vLNWmYz9bHseM6XDdt6Oql528d5d5LrAoJPVwfmbx7adbj6R6CkcjCLs9jj/fhrwefn4vKHVRnGl/OjhvchXB5/5fwlPPQn2CsNVpuvUc4p89FMfgs0D6u2OfpH/Ue9cSPqpojPjIv0GEkHX51MJxYwdBSsszGHkVsz5HXjLNQ89d1fuYhbCiVzDy/o2w7XmZoja9Pmfb2LZXP+5P5FWspGOel+/ZIGsIzS1+vOd4HbPjkU00VL3HWG6U99959KoN/R+Fp1o7DGf16u56NUfVQ+cQtu+IRc/LiELwEp5qBOM7sai/ADvxb0LRtojvRnqRGzVivX7DcBY2clqFVoXidDj16zhdm8CFdJDj8VTfNN12azormKPu41ezj+pp9PZr+iYivymXZxNv3uN4rmT/s8jL75zE4ZdfXoWkhqzO9jie2/j6O6ftr7912j78LPJ7/8lpBdX9N5HfO+2P9Ti+vYl8/f3TYjjffjr97c9eyG9fT9V+m66eRvUoHuTW+xht4x96HLM/7zlIfHWHZ9N6/tGW8YRtiy1E0GcTih64jxSShHyffBjheC8R2OPhm/tjEZXLxIonajxPXsuTaENKvXRbBgfzkbDCsl75VgiipSIasR8tsZL1f6lgjNpvlbknrcFSj3icey1Wz3s8JzyxGIagGiFYDU/1PJDZyqK2/TV45zk0B6T6Gg27yuhl9dTXqw0+4yijvBeL3syPSPyFX963cd5GNTCZsm/ndkFAjmIEJDQ5GVsPuzqrNnN4u+7/wYTPjra/ct4/dZNdTv5z2Kqutjq+nATfyw8ngfjDz+dVV3cnYfdyDlt9+/H6xeBzkiJXESpy8maql3H/83URHA1R1ecav/3eeVXV3z6JxElEfvfURXkTOZzF4v6ryLefb0NVD/uriDxMtyGq385/k1wXZMUwVXxdBz4LabfRkxk954iL8AikRfeercJYl4rHZ7inPxp7DCgiCfl++DDCcW16xn33Eq2WyGM5py84DsQ8r900PDCot+qFi+xaoq3VL+xbxX4Nnl00itRXObwXlePSOtezUNOKsKuEoFZeRzEHr+05705ED6JX/7tFdFQ0ivjCseX+VSaTPsl1ND46eTsxq5le+3KAE089jyLSFbo6Tddjps85qog+WjeW9kf/a7o+BLgTkXE6ewI19HQnp/ddJCGoFy/jQWTcX+vEVVX1NRwitx5HFYrqfdQ8VXO6Us1ZrU3T9Z2Ne/PuRpt+nN4/p2gFHnoH0dOHXsPoHlIVDa2v0hOXz8iz3NOfCYayEvL98N0KR8vSH4KqlzISRplXK6o7E2zWw+eVif5jfzyvoCfIPO+QF5JY8X5G+xu1kX2Oyt8zPHVNthJ/veWsI2lrqtdmtrDNu7BVJzwVn5WMXknhtTcHb7XS6H2F2h9cIRYXtcHnGNETeeNl1Lb0F8C7yO0+eiG4xqP3bqUTrU9Plv3183i2sa/iGEVu3gHp0RKRx6NcFtsZR5G36Xo8ppfzsdC3Z+zktO+Hc1+/mD5+FZEfzt7Ht98Xef0Dp0p/+JWTl1FE5PUcqoqL4ljP403nNfTVvIpDxaHISThqqOrb7188i5fQ1L2InM0Pe5Gffvf8vsavp+1pf/Y4/t4pfPXbT6eFVw9yWUPn4nFU/Wk9kfvz56P5jF5GDGW1HkZccdVLF+e/SE1wRKGtS6iInUeKxC3usVtOENIDScj3wacVjvf2EnqiJfP0YbmeNry0IciPnAYtEdtqS/9HQhHLCNi10nEb28B+Z/1tiV2v/Bqicc1Bx1zR+GyexoyW5/ZyjqLIcwRXD63VTCt1l15bAWT1Z4Lxsn1uc/di0nfXsNSdWQhHXrViud71cYbJCkr0SIr5PEHeZOxVUahY1BDWl/flRrmGmx6ms8BriMds8aGDEaIoIqfDVVDvjmcB+SYiP5h+q2PwB7l6R3ci8vr7p+233xd5+XIKRdVnHa1gtGGqbuf1+cbzTr4LVT27Sr/KNY5UVd9ZDL59PQnHn3/vHKr6dhue+vPvn7enW7Go4al78UNVdc0dKwzxWUcMR7XPPaJgzEJUMYx1LmuIk0d7D+89TonaW/t3gCKSfBS2vgd8xvP/0wrHLSh7P+7QRkTvD1Grvbk/bF65NY9LK/R2qSB/Jra48SwdKKDX8SjPeRzXeoVGGNY6c6ezUNSecFi0dct6wtD7b0VjFgZgP988I2jyvXRFw1YNg3ojd+/DV0Xeexsj76PVsvY9ksN0rvdVRPZGaGt4qiqhQU4qCvfzsn2WYce9yKTex/PiODcr8ch1BsKqYfU0ipyE4+FnuSx7+lWuik63z+nTdA5J3V//T+fw1MP+Gp562J/20xN8+Hyi5yH0vIgY0uotfJOJQG9RnGzhG8z7jAMvkfuLxgx73RDyWXnEGMVr86Pf0ygcF7K2UIvqri4I47XhhYTa8ii2vNDVKC/rp1e+kteiFaK6xjOhXnu9LBFTlRtL9iOfla8ODlr9R8eT2mO6V59XdysEVh1b03RalbPCcYpFnyeyPG/jGqIxEoOR5zRbBEfbnhWeqp/FSYtE5aWzZtuuTpqJRJH3X+zli5R08Rt9D6S+VuKSptuOd1KT7PETEdmdM8a367sixxeRl7MndvdyTh/lJC7tarMvJv1yvMz7H19/+3os7H88BvrfuvZUhWk4KghHfUfjYX/yLL59PQlEDUndfzt5Gd++nrZ/mq5exq/ib6snUh2b++BPdasNUdWuHyHdeiF1N6MwVW8l1EiMZp8/Cs8kEFtkp+5c6H0k9+AZJ64jevr6jNfMpxCOH+mEUbwxWSTGImEW1aVpUfjmWl5EdGC0RGXWLyyfhaxW+obMfa5xKb3icY2FJirL2FfLYHrvtWbF5CjXAWLVW3kMbDJBeA8qorHlOcxEY/iOxuFqP0s04kWLNmL+Zxceehhb2C+9IDatRxFF4+XNFtNtvuVgvYrj1WYYRQ56rA5ys7bNRUQacXkjIvXZSNz2jp3I7SyJyFVFaX9VtenCN2eVdng7haFOh5MgfPt2SlMROe1Ffj6/3vHt7SQiD/tbUehtW31qn2u0YatHuYavWi8leirtqznsn+6m9U5qGv73xKKA3dIFeR7FRxKLLXDOZwl4SRAyh4845p9La2z0CD6FcNyC7+nEFKmNFdG2Ut+Scp59NWS12t490At8rZCFNUVjb/vevlhN0COUW4PClgfy0r7xQB6rhc7lUNzps3Be3ppURe+iPkSv2xCzHb13shrDPp1tj5A2E/QiTolotIIy6+JhutqMo8jxLMyPcGx1MR2tcxzPh+cg13BW/cVEIdw6ntYVZ2NCjZfxcA6R1ddsTNN1AZzD/upVPByuovGwP+dJ/v5F9BRaLyGGq3qizx5idJ4enTRLKzz1M7HVLWPJeGTN477CJS4i11sGIVW+tzF5i0dOwlA4FqmctBWPofe52lZWZ+RJXPosYBQGGi2A421nC/dUPaC9x/AedGgUEZl/gbd+pLcSi9n+2TowPNXzMtrPtu7IxrMVufU0Rtso+i6vZxBfsKH3EsWj9VZdyhzbXsc54tN7Z6T76o7x1qPmvnLjUqm8d92jh8ymZeEPFvySMvuOkeZxuoavHqdb0XjQxUkdmwwMPcZ3Xo7jaa0bkVPYKr4XUz27uvDQO8+uUhCO+n7Jw/m1GrrQ6uFw+rw/i8VpOr2jcTqLw0vY6uG0EM7+26nan+UqGn+S62I3Njz1Z7l6Fn+W0/Wmoao2nFU9jjZqFj2RNkzVike70qoVoiK34tETli0PY4VHhbIu+S26x2C40kbvcV/DC0nvI4mgSKyTHautri0Kxycg++J7F6+JPH2VVUcjcVi5iKvPFVY8mi0y0VtNWwsrmLaoN8O7KcwRi1lbXh7uq/UyRkIQP3sey+i5x52xEbkKwZ2Kut01XbcvL4WHgbzaeKGuXhq+HxDFo0guILfwXHrPQmL6KUHyGwSGWnrC0tpZDsYG1b79b0mOk/UmKhOIRv0scv2s2zZc1Qtd9d5rqSG+mrbbX7fHs1C0r3LxxKTWo3jftRW1KhJFrkLxOF0XvTlOp/9vX0/b+hzjdLgNVf22vwo5FYhvcg1PxW1PCNpQ1WwRHRSFB3kvEj3P5QTbFvRqitn27jmPEoURzy4We1nyHOJSLyS9j+QZr4nPwFbPF1M4fmDmLiqzVXtV0TbX4/ooKuGXvd7HqI5KX3rKznnesUVVLHueRqzHm9RAsegJ0xv7w3XAPh1FRut9PLz34k1HkXEwnsjxKgr1v1dOJBCc5iB7ItKGSWb1rEZrNmmNdp2VUZsUfrnUe/hObE23nkj11GkZL2zVCkjvedLjdF5I9SwSrWfZCkQUkWpjFyqy9Xpcnsc8Xr2mujLqNN1u78+hqdPx5Fk87K9eRl01VQWgikF8fYbd9vKPsI0L31gx6XkQBdIwHNU+83hzHIJ0j2cVE3Mvn2f7XYuYO9j05op623zW75xsx0e5Lj46a4rI70Y4rnVyrjnTOLdPrT5EHsal7WL5rJ05otaLjtvqGEVEoih7ThHLrskcT+FansgMG54q4ocuqejTNHRQ2W0UhFhOB6gXQTndCoOLOBO5eBQjB9goIpN6i6bbdBSPiA1dtdj3B77LG6CPjgdTPZORYF2d7D2Nnl1UR694tJyFoPUsqjhUAamvmxC5vm5C5JR+E8ZqyijZOx9F3ovJiwdxeB8GrOISbUVuJwuseLzpC4TVqpcU9+04XcNRdXv/7XSOqgfxANtfzfbPchWJ9u0e1vto392I73Q8GluzXs+NJ9L+WfFohab1Mnoi0tqJk38E2yjvXvT+ntz7d2sOld8r3I/WsV8awkrv4+eGIvE5WLrgzocXjp/tRIyeF6yEfyJR2GpWRxS+Wq0z2rZ1Z8zxUC6hZzGXOfZzWEssRvZr/DCjkEax6HkaI/Go9VkPZGR7432cjKfRCK7p3DnVPN4zjtmzkhYrIPW5RwU9iLaMBZ+HvFmoxfE8TodrX71Xj9g0621N8R4mji5ypSpgrTqvCElH8Ivceg0vgtCIQQ1ZVcGl9hryKXJdYKa1cM6l68aDeEkzq9pmglLxVsDF88j2R59x1P3QZzc1XcNRp0lkP109i/Y1Gd/kKupQUOrziLac9TradM/7qHlHk3eEfAxJtWkC//VQoFj0aImNLSIoMrYWjPcUilnb1ePX4xmcG8JK7+Pn4rON0T8zVa/khxeOW7P1jf1eM5lL61nS7lpeyR4isTGnPIILwyyh+uO4pXex94fd8wxmzzhqGXzGMfM2thbTuXgfp/cDFOs51DS76uokV9E2jbdhq2LyrPdR5FZAivSJSC+cFRffse1rPTciBNoIQ129k/fopEdfgKKribaY4H/F1oi97BUbGJ56NH/qZVTxdalrMsfUHHdvUuBghO44XNNF3otKfE2KJzoRFLBW/FoRedhfvaiH/dV7Z9/YYZ9VtGIRRaGGnu6dOjA81ROO9nlHu/rqET5PkKfoYc68hh6ezVaCsMJWv82PFIsRvSKyV0DO+R7pffz4UDR+TigcZ3Lv5wsr9HgYq/lrlYvsVUR4bHXMPKHitWd/7Coicilrzrb3CMqs7mp73sCj4m1E7XKQW+/3ZLYFbaf3uufSt0lEXk6hqCJXD9UwXstetuW8rbYqykDYqUA4wGeR96ISD8aNVzAIZ/Rm6C/7N956WEWu+zOZ47Cz7Ucn8EVNy3vw4sjEo/Uu4kkxyftlN41o1DBVK/TUc3jz7N/BfN5fn/3T71O9dRiuahfMqXoflcibiF5Jz/6y+6YtK2pFruGp+oylHhJP3L3BNgpHDS1VEWhtNIRV7VUI6uqpVmjacuiV1D5p/mTatmlqJyZN5L330Yahe/ed6F50D29jz+/a2kLx3oPs6Hh6/Y2OcdVDQe/j54ci8fuBwtHwjDOBZB0qXsfMxhNDa1D5Id1CMC75IfY8rvb49IaqZo4vHHSOZts+7yjgudPFTvRZxndhq/Y5yPHWVj2QA4g1FJCeZ9E+F2nFBYanapnKaz20/CjXA6ThqnYlWRHj2YT0uGKZd1JHIalWMUT59iOIPBHHa2g8dCoOj9NtWKp67nRlUvu8o/UmixTO/en9fQAPUc/CRrZ9Rz+/CwPNhKO3+M1XUwfa2LBV3Z6cdlC4ar/ws02PvmYrCD2PZFTm0awtGiv1PXKw3bNYRsVrWPEQLhGQFI/Py2cVjT33hGe4h90LCsdPwmcUvXNCTG+egZP4uERCMPI+YrmtmDPAwjJRH9ecsbdhwCJ+qKqtOwtV9cJWUViK3A5esI1huqZjGRGR4xDXg7Z24RwR44EEuxZ4XN89IwkbdoEfW3aU20V0RE6eRVzER4XlReweTGXeAcbG7JdhG89Eov2PsYnJnz67eBO2qe9ohDQbnmoXx7ELy6BgVLGWhVJWznv8ri+fixeNd4hQfKE4O5jP+2RbP1sRqZ5F6yG0YlLFZSs81fvarLjVvyP8zwS6/R4q96h7DsTW8gquLRbX+l2viD6l4omM6qt6COeEr1I8Ph8fTTBuOU7+nkQmheMHYI0FY3rsw8HSBljxIOKLRRSAVpz0LmwjUKYlIC33ep5RpE/8oe0azz22sCGlFgw39YSc5nv6RfN3UI8Xtqrs5Bo+qnVdRMR49fwN550NF8ZRO/08ms8qzobbti75kIarrw7j+xDXy4I+Z6F3VLvDSQiPo6n2cOqfvoD+ouWmU9rl2Gi56XzsUbmgeHynbo19ZXSHasCqDFQX53T7/sKLIDRp+k5DkZNA3H87bb+9XZ8DfHu7DWG9lDVN7k3X9Hihp6xHqFTug9G16AlH/K99x0VqNP/N2OCrN7TcN1OXJyIxPDUTqCLvPZTHxjb+F3l/zKP07LvYQjDcSyyuITaX0mrDHm+vv3j8WyKyR4j2DKIZuvpYnlkofiTnydzJ52eBwvGJ2OqinPsM4Ue6EFFcttLn2LV+rJbOiPaKO89+qfcxK5eJ697Fcaxt5nG0IW+2XU8HDXIbFmhXHrX9n0a5edejF+KqYaq435P5oIupuCGrVsiCiLS2Kh7tQjk3r+jQftk07fc5zPXm3ZPjqcOXkFXvGUVU8fhcJH5BEZ4aUCWBdo5ItSunisjlmUX9bF+1ofn2+cAbz+T03nNmxaInHCui0TvHM5ub/YN8T1DZZxytgLTCzQpK3be3YBvDWidTn922Yajo/cTPKPRQIHr77B2b6r3xs4jGNX9fl4wNKsevJeKy37Y53sM1y5PtoWB8DBUv/yOgcPyOyQY8OIhf+mo5rw70GHpj1sir6OWj2PCevxOJB4N4YfbekCo/0JWLvyoUl9jNuQlFZewxt3ael7iyHXkcW95KOfdjd/5w2MvldQq276OcBeDhVqiNYsJSzWI3N6GqKibHW310ybfPOJ47H4W6attuPohH9VbqtnoZ1buoobrTJCJ7ub7EHhvFAxZRcQegp1HkVt2g2gHvoIrE4/HqZdRVVHX7svDNdA5PNc8y2ldWqIjBlUIFtj0xhLs5Z1IlOixeWuSdsyISxaLI++cdNd8+m/jW2PaemdTjtjft2+1WeCqKSPwcbXvHbGvWEmtRPVsKzbn0THhkA9XMi5jdLipewl7vIz2P9+EZBeNnFooZkWPkEVA4fgfoD/k93isetd+zME0WrurleeLRq8cTNJlHYauLMvuxq3g+MtuKx3FO+4g9hjrgHSAfQ1at3dHZ9sSiRb9Pex7oue1OCkxXT9/u5SQ0hlFuwleP547hu/eyBXBE5BLW6uVdbOAgZXXevJbD9G8cr55FOQsl+xqI43jaNxXJhzcReZV33sBR5HSARznd9e0XcraRV/iMYtPm2e2KaHwTf5XUs3jXZxZVRKooPBxO7zXE8NT9t6uX8Q2a8kI+MxFpd8kTfQJ5SCQ6ozpRcOm2FY6231bcWVFsBaCm2ecTbQjr3qlPBaXIbXiqt3pqz/H0xDkeC+/42DRx8ubSO9CMfqt6xeK9vJE92OPbEpTYh6is/Y4qwrMlIHt+d/nc47o8k1D8XgViD9Ex2lpQUjiSu1ERkPdoE8fE1lZZq59zw7Oy8tVBbWUQNvcGY8uhaLOhp3bQ3PJA2jK6rQMDrXPXKGv7duM1n8R9Z6KIhKuq3tQFwhKPG55PNtQVy9+Eu05X22jQtTuLR11J9bJAjvE2qudxN8r12UmR96u2WqGHblsrJKODGh0Aqx5QFU3XvojITYjpTajqETyRk/P/XFafZ/TEl24fg238L7CNu9d7bXllUUShuMI0K+48HY5eSRWWUT7WLfL+nYyesI36K/K+/5ifURGNj2Ct+/4cj+Q9BsqV0LdIjK0VUtoSexSPj+FZRCMF43J6Pfi9UDg+EY8QVnOJxBfaLPFyWiGBYgSfl0Ovo0CeyO2x9YSPZYsfotZFXPFoRLZr2czBeglFrsfT8zh6n73wVJt/hDQUmUdTFs/LSz3TVbgNo9y8YuMiGK0X8oxdUMeCL4J3F8wxWA+l9TIenDa0fxhiq55FETndud/OobR7ubmT716u/TmOpj09OPYAvsjtgbN5qtIjUCxqGqqhc4jp5R2NxpuoHkQbkup5IqfjKTx1/+3sfTRNWU+cet+seEKvmBWcUTilOJ8F0sXJj0Tj0aRFIjJ63hH3Z4JyuEqqt+/4jkgUo5ouUNYKzUh44/7hPreOZ8SSe9NaXsaorh4v4xYeyblE3kFFv5uWR9F+hz22tkxLPEZlPSge5/EM400Kxe3IogaWQOH4AYhmvKM1Lbbqg/X6RH1BcYdi2A6c0Lvklcf98vJtXWLaiwSkBHZb0vpRq4q6LUVk1q5Htqoq5tvzwBON1hbPGzyXVDzuHHs7gLBayJYbppOwGuVWxGko68Gki5zFmVztROQm9FSfuxORy3ONl31Rm/1tiKlof8xKrpdVXI+3IlJXUp0OIi8mnPSonkoUqcbLeBOWezx7J9XQXsj2YtTPrQsD3U0iV7eXyCk81YafqhA0IlKfZdy/3Yak7r/JJST17e26rc8z4vN5B/ibTFfQE4kCTcyuZ4Kn4pGc4H8kqLy2PO3tCToUjp5Y1GN0TGywLqzPO15Hs4374+13Jhozb+NcEfAIwVixm+OBzMrNYUra0+PtDTS9RwYiz6UXjtqyRXtLj/eRzz22eQahKPJ4sfjo4/CoczQ67r1jYArHT4QnHluCcmvB2UMkFm1eTz1KtI+eJ3JNKjeH6IKtiMY5NlWBWsGWqzxjioLuWNi2gwFMiyYGPO2j5Sbcnvy6LJe86D2MZ4ZRLiuAityGvIartJ7rtavBjnIWfOcd9eq4CW8d5bLQzvG8gxr2aVdbneQsMr2ZGpyRqR4UT2nptvmM72y0r+K4WVVV09T2KDfhqdqc/UPxY8WM59Wz6dHzjpGHEHdfnPxIJOEhw3Y8QYvCEfuO+4R51uMoYOP1xR5Db58Eynr7j3YeWwycnkE0VoVl1v5Wv8ctD6DI++/FE27o3Yu8kGt9xz3ice22PxOPFksijx1rPsP+K5VJk3vS6+GncDQ8k4jamkiIoWdR/0c2Ogi3IiwSfog3fs3a8byMAmlibDVP5P0FYdva6sKtXIRzvYpot5bQbLVbmSGvehzV5uhs27BVzPc8kvZcROGKjjRbn3ohZTp/nsx5ZbyRInLxSCoHsLscg901P/JQqigcRrlZ/VVE5Li7ej7HUWR3rm+art5H7dfl2cdzuvVIaplxPAmHcSdXr+YkInuR8UXeX+D6BZm+h790VmVMcl3uU249i/r+RQ1PVS+jhqS+fb2GpB72V5ub0FZ5H55qtw9yG6rqLQgzmXJWrHniSdMzb1kmnqI6Wt5H69nbm/LYb2/bs/e8lt62JxxRxIrcHis8Ft49xl5/Iv4xQvse5vxm94jGOYJxibcysl0D9BYq+j1UvYuRZxHFo2cj8v57b3kLKR77eRahdK8x9bPsby9Zv+99Dtvv6hBaUTjejR6PWaueKp7o89Kq9fbugx1YoAC0QhFFQPY8IzpEMpFo8ywoNLciO6ZR+3ME4ByBOEdEZliNIXJ7nqgOwcGB99kTqhXheBOKCv8nuYaxWhvbNzsJYcWkiFxedWH3U+S9YBS5hrxqvicuNfR1PDc+jFcxiULyaN7xqCGs6lkcjVi8CU89QAjs+QDqgjKX+s55w/7al8vNYYT/HlY1nXdUQ1MvzzMeriJRvYmH87YNTz3sTyGpKiLVfv9V5G1/+o7fxH+txDeJBaUVTlZoeSISxaOYz7iNwrH1ORORmXBE4YYeRVvWisUJ8jDd1m3rFGOPfTxCWYG01jGwaZiOeVvR6+XD+1irvjHJ8+q4t9cxusdHYhLni1D4RfmeTSYyxdhm4tHW2+J7C119FuG0tVB8lv28B88kKi0UjjNZSwhGQq6njbmCbol9VEerbisY8bMnDr2yUT6mY1uR3VpU9r2nbGs2vjJbHw3aqv2KyuHxs2LdlkHRL/L+e/TOAVsflseJB932vnvsl82z/VYbb7DrDYZErmGuyruVVMUHj/elL6Op8ywO7X6MxmYSuXl3o668akNeh9EI5+F8jMaTmLu0a7yW3T/4jltK68IVUEXkuvgNpFkb7ZuGtk6TL5Cs0LN5uI2CMPK4oVjCOkRuzw28Dr3zplXWiji7jUINxaK1wf3fB3XgPkX99vI98RtRHdR/BFq/EdXnGKO8Hg9kVsajJb5Ebr+rHnFn6+n5vr0Q117xSN7zLELqHp7FZ9nXZ+CR1wmFY4NIfGxFSzRV8AbSlbpVxB4dexy8Y73WPhr0e2LD8zrafK3H60cWoipOfmS3BnPEV1YuE4Bb5Lf60wI9jiJ+dGPmdcy21QtozxlcHEe3R9jGfAGbGy+jY3eAMt7+WQ/lJT8QkTYE1i7GYz2R+qzkOFzDUIdRZPd6dgy+yI33UQXX8fVaj3oWx53I8UUuq7Xuzs9UWhs51ykHubwz8mbRnWDEOU1yfc2GCkYVfbB6qnoe7YI4mv72dvIuqidSQ1v30zX6VV9SP4m/OM7kbO+NjQohfXehFY6TvBeOIr63DQVU5nVr2drtipfPE8N7qCMSjVgHinFPLGdp3rZ3DGwapnv5W9Dj2WsJwqqXsadcZNOyb5Hdz7G+Sd7fsyOhqd+XFZAt76SILx69fmbewkzQekRR9h+VZxJPW46NP8J+PnqiLDpGW5/nH144buU92gLs69y+ZyIus2/1p9qGFZpitr0fIvvePfQ4ocg8wmcUi+KUkSQd+46hrGuxpqexMqjqEYnR99dqM6tfQQ+fgueBiq+WkFwiIiORGAlKDE8V5zNuZ2Fo3j5b2xvxOZ3TJtPmKDerrqqQPBoRqYJP5CoaVQjePMt43j4er3kavjqMItPLWbQO13puFu9RQXu4fsYHHqxn1YpFu+CNPr84TSYk9XAbnnoTkmpDVb9dxZyGp+7Ff9m9FZG6fRD/NRWT2Y6e8fNElZjPYvLFsRFThzifMwHmiVRP2GWCz8u3nlVvoRwUqAJ5Xn+j/ZQgDdO9/C2oisaWB3DOvWBuXqtfVexEMILfhff7rbREJApAe+/18kRu+5UJyEg8evYtPlL46rOOa7cSi/fY37X7vrS+rYRnFJG3Fh9eON6bSIQ9CuyPirUqeDK1xGSrbusJjDyNXlvecV0irCWoT5l7k+q5+KKbQkXMeXaZYGyVr4rFnv3zbD0vMH4fKPLt+RKFrkpg4+XjhEbUD29Cw+YjmTiuzux7fRCRS/ipm3f+fxzOfR5PIm13FnvDKDfhq1pO83HfLyGwJj/bP/fcMomeaLyEzU6QDn9T9F9uhUskkI6QjwIwEl1WXIljEy2aI0E5MWkVgdjaxv0Wee+RrIrGqK/RPglsR3i/Hb088wA+E3m9Zdb2PvYQedxagm6EvMp3lZXp9RQqWdtb1PlovjfBKLLtPj/TeB1p/c6uyZrn/HcjHOeKkHvQEldKJtx0ENASX3Zw0hKcdiDeencjehaxTTvoP4JtJgKyQX02kMd8j+oNpeeCzi7Mijj07HoEJOb31pX1q0Vrlsv7Du25oraZ1zDaRs9ja1uSz600cfK8/MxDMUL6IHJ5vtG+a9G+x3E/XldN3e2ui+hY7+PhcF2FdfdyCm3V7WG4is7R2GjorJbD901m3LxO43grAHURHF0kR9P23+SyeqoNT728x3F/603UbetxtAvioJfRhrBiqCqusKreN7saqRWOKsR0uyL6xNSD2/g5Eq+27UjcajqGqnq2U2Lj2ffsQzRB1TOxtQVzPY29XsYl6a32M7sW0fHF7wK9ktHvaiX9aPIm8cUo5mG+d660xCP2o8IjQ1ifdSxq+UhC8ZnFYZVsH9YSlWt5Ir8b4XgPPOHTsvOofJGtOrJ6UAiiSLODebXVNBy4o1i09i2h4B0vFJQit33z0my6bS87jugZy6heWFl9FYHo2WXCD/N6RKLXn6U/nljehmhGYar2HPBEoX5GAWjDTfG8PDg2tg9eHuZjmpd+CPK8cjir74lXKyYv+zmJHL+d0ncvIm/TNax09yKXV15oeOnu9SokReRm0ZlxZ7yMZ3srIo/mWUobnjo4vzb4XKPdvojE8wqrGn6q4an6zOPh7Rqeun+7hqeqQNRtKxCtALQhrPa5xqO8F5Fa32Rso4VyvLBVT9RVRSSKP5uHIlHTNN177hHrjEJLMSQV66mIxKy/9rNNy9IxbyvmiMZqnpdeTWu1ifmZXZWWUESbHrGYCcWWgIx+LyMhuGWY6RYi8iOIQ2QrAbbmsfgMIrGXTEssBb+byvlP4VgEvWg9VEReVgaF3tw2MS2rd21hYUVmi4pdS5grkc0aPw6ti7Y647vU69gjLr26qqK2Ak4SiLwXdjhRgW3ZQYU3EaPbGLIqTrpIuy7se2UQ1cqz+fb5Tu+a1HRsF/fNeiIv5Ua5hrDKSYzJ63nb1GdDWC/PM04i8nIVkpo3yun5y26PoxGNx0nehatOk8h09kpaOy88VbePwTba2jLeIjJe3dH/LA/FVSayIsHYssvEnCcuPVEoYOfVaW28bWWJaHwmWp6+KG/u5FJWPkvP6srsRfxj792nqmGpUbo0bHvx6u6tb0n72Bel91z+iGJR5GN4F+8tGNfy8G/JXG97RmW/KRwDlgjFCih89Ae99WoOr19YDm28urE+O0jFAfaxkY+Ddlt3NmjHzziI7hnIbzkjY2ndHKL2Wp7Eik2PQMzEYUW4enYV0IunVDxw1oOiHjf0GFa2JfjcClGtDupaA9Be74TXR7ti7ChXMTdO1zQNYR1frt5EG8K6exU5nL2H9j2RGp467k4CDkNV7aqvIlePpMW+NuPG42jEoq6MqqGoaustlOOFp6qn8JvZVhsNVdV0DFu16fZP04/GXs7b1uPYEo7oCRTI98ShpmcisldURl7Iqjcx2s761EoTJy+yWZs1w1Or1/GSsq2+rTVY1u9CL2Xvu8I8WwbTKt5HKdpquudR9MTgFmGrER9VCGZ8byLxXt/hknaW3hfvNf5VPoVw3FrkZaAQatHT16huFHtDUqb1Gevbye2PCAq4HZTRbVsX9guFpc1HcRl5kcTkaVrLs4Y2a9C6EKveu5ZdS9xVvQa99URpPWSC3nrC7CDLDqg1zxusHE1+JiptuKo0yuHrNrw0KeRl+VE7x8L2Tq5iWm/YOxGZ9ufrZDqHrYrI9CrXMNRJbkNYzwffPhN5s4Lr+UAP4yk89dJfZ0dvVlU1YtEuknPYi0xHs324XUn18PZ+9dRJbl+7oWGo6km0fzZsde/YeCuwWgGpaZNJw238j0IQxZ8nED07tfGu+UxcLvFWivh9yPpnP7fScD8ksWnhCZQWS8NT54SbzhWRXv2tvlbSo+M8Jzw1S8vEoqbp55anEsG8SDyKxHWs5X38DGztqVtj7L1WHz+q0O+9nqtEjpalfArh+GwsEbJzy9qBpmKFXlbGa7+3H96PjVcHCs4oPxO44uRH+2/puTlVL7Ieb11FVPYIvUgstupptRnV2YsV+iLvJwOiWezMWy1BmgrSAfKislldNg1fI4J50sj3Bp74HWbXA+7X3tSjgnI3Xb2M6gkcRS4hrNrgcXcViiog9X2Qx0lkOgtImeQmdFVfy4FYT6OIvFtZVUWjXSgHBaYKFi+8FP8fwG4yf5HoO4CtpgnYRenYFvZPTJ6YtKpY88qhrbddEZm2vkrdkth6Nh5rDE7WHOAsEY0RVdE4xxOJ6d7nSt8s+Nuon+39yOaNJs3a2rSqjQTlJCgrTp6XX+F7F49bC0aR5xGNH1UwtmhNkPSwpjf+0wpHT3gsJRNTnuDR7ahMpY9W/EVCwRMN6PHTgTqmaf1HsLP12vrQTk/CndkenboyASFggye4PU7W3hvUo40lGP920SuyPPsecYj5cwViq82KqJ2D9/1a75sdUHkL2QiktQZto5Omdq26vT73egR6BobYVzwWum3DVr3tF5HT84+TyG5/6028rKoKoaoiJ5G40wVwxmu+iJRWVsVFcUTei0UMVZ0OJy/jcbr1MuK2DUn1VlV9g20MTz3K+0VzsG61F2kvlCOmLjHlRd4LTnE+e0IP0z0R1xKlUVnbv2o7kT2mYbqXH9lELLnXVDyNvddpj7dw7meb1hKuS8YyeGzxe/RWVfXS8F6Oi+DZftrzpyUoxUkXYx/VrTYi8bm25mD5GbmHOFQeLRLXFof3PHYi29zjRPpFpbffvX37tMLx2bAC0MvLPHXeTT3CEwRYF9bhCWJrgyIQ++x5KK3ARMFp03HfbdoR0hDskxXFW9EjEKP0qhfRS4tss8EB2rbEZ9SPzF7Bm9LNAi2OnZ439pkxm289bljO1omTHNq2PUc9YeqdP95zmjZPOvMHJ93rD26j59UO5nClY/U+TnISkMN0eo+jhqXaVVU1TcNHRU55l9VUxQjI5BdrOnfGW1UVQ1X3b9e0w3T18FlhZ7dtmCmugqqCb3LSD5C+h3T0WqJYPBo7gbSWWEShmHkRs8+eUOwp3/u5lZalY15k47HlfTobFPZ4Als23kRV9XMkFrO+WHAfo+PphaliGfT+YX12shfFYOZZ9MQi1uG1b9NF2ucTikzkowvIe4sc5TOIxUcdOyTrx5qico530pvgz6BwFF849YDiZ412si/f825W8GYNvR8fHNTbdCv6ojYE6vXqwXRrW+mH1xYesyXf6dyBT3TR9XgK8XPPoC2y7W2/eiPzzikPb+LB5nn1Yv12QGHrtBML1lvnDcxQhEa2lTBUcWww36sX27eDIx2ITU7aQW6vBRwQvjvfp6tI3L2cxY96GI8nUaghrofpvJjO2V6mczrWiU1MRjhq+KkKSCMap/01HT2C6oWbGv890TeZ8nuwt6IR39N4lFtRaPO89tGmVzB65z56a6qCEeuMBGDUZlYn1pOlY15kM5e1BnzVaAKvbc/GiqWorapoHOG/12b1OER2KuAyO7RROyvg7O9uJA6zzxa0EbDDe33UL6/OjKzeZ+LRYmct797c/Vja/qOPXy/VSaAKleugpy8eH0Y49oqkufW1xJT9PFdwel9qq91Wf62g8wTVJLGXMROUOIAYnf9ZHZ6QjGYyPW+VN9uLIaprnxse2YUcfZ+ZTatMNkDL6l5DcEb9i+w9cdHjeYy8dJpmBxbeAKg1SGvlRbP9aNeb3xqwev223kYvzf7X63ln7HeQP8rJ+zh+M6L0/II/G8o6jiLDOW7ThqxqHnKzOM5ZJF62p5OIRLE4ya34m+TWs2hDT1vhqVm69VxqmhWV3kI5k7Mt8l5copgUuQpKMfkC+fazJ9pQRFa8gr2ew7XEIuZndpVyS+i5PivXoJcWXafedlQuEootj2dLoHrg92AfI8F8bN+eo7bf9vy0trjiajSp633uEY84GMZ6cX8q3kev/NY8k6BZSxwq9xaJax7LtY+FMlfAZftWOWej/VlrUu/DCMePjCcwe4SrzfNWNUVRmNWXicKdvL/5Z8LQW77bE5YDbB8dG69vg/gDoVHe//hls5MVKhdjdNFVB1HRwKyVXxWKaJu1N2dgGNl6RB47xfPM4eACPXI2bzBl8dw8GhtbzpYZHHuty+unZ6vYujDMdXDSMQ09jjb96PwXub1W8frX68Omv5jyuhrrMMo1bFWuIvIgUlocR0RuwlTt4jgi51BVufUOYlhptu3Zq2DD1VZtOetdRE+jfX4xSu95xjHyPGbexMwT6KVVhWGrvkr5nrzIxmOLAXprsFcVjeLYeeIvE3uZuLRlK5NckYisDm49j4P3zKJtK5q8xd9dTcs8jJLkZZ8F0gTq9EShzZ9DJEDX4JlEovIsYlFkXl+WHtOtBOKc9tYQlb3nbTTu6eVTC8dMhG1N5I3s9VJGg+JKuzig9LyMnhhFMee9mkPtI2HqicCsD3awK+azyO0PSeRRtaA3spfsGPcIqtagaw2vItrOEZ89g8Hem40n4kT8hY1GyMNzC4Wk/f7x/MCBGpaz9llYKmom71h5fVS8G/UAtt61IZCGwlEg3SszQr62d3k+0og8KyJFRKbzf+8djoqGvWodN2Gqcr2erYCbKxzFpB3hP3oOtQ5s0yu7N8dM0zRfHHsrHAXyxJSbnDQBW7SP0uaKwLXSvfzMrlKuRet32/sNrfzWe+UqnsXMNtq2thUvJrbR8qa2wOOOgszaRJPLXl0VgZjlRfexrK9zBsg9v1XRJP6ccs/EVkLpnt7Fe4vTexHpgx7WEJFzBOSHEo5LhWAm2jxBk9l7fZkjCkXe/0BNkBZ9PnZui/m8Axu7D7h/OLhAD2EmHhX8QcFjoGWigb7tn/XYiGx7c+gRUJWBFdpkg7VM3FUEYrX8moPECtGACGeWcbtnxh/zW3Vm9l5+ZBPtm9dmNlDVbZu2M2UwPNXaYwjrKNfXeAxyW69ec7oqa9TXCBQ31guMzyNasabpUaiqXQQnCk+1HkZvkR3bB/tfxSK+xzEKT/XEJYpFFJL2mNg075i10rAucdIj+yy9lee1l9lWym1Fdm1WRBpee5qHtpiH5SOx2BKOmN57L7LosfdWY8ffCv39j4ScJwhV/GnddlvHFt62mHrw2GI+9iNK1zzcN7VT5vx2PbMovJcgureHr7e9tY7Dlt91z71wiYcy2odW+3OO4YcSjmuzVIhuTUu4Rp7AiJZdVCemC9RjB0p4s0Zx6XmSPA+hFbQoQNFz411QPRdD64KM8ucIRUzbUixmea1+9gwie8FzMPM+4kQBes9EfC80evOyOrEOr4+e97E1oIu8md7kDPYL07R9HOChXTRzb/+PkGaPHQ5oqwJB7azIqjw/6Hkcj5Cu4s4+p4h1WvGXbaNX0Qpdex/z0u3xt+koLMWxaYnAOSIS06tlWuW8/MgOWeMe0ett7BGNnp3XnudpjAQlTvxmorEqGHv6hnirqHq/25gnxsYTcpl4a5F5Ij1bcWywfYHymWeyt7/PxL29ZmuMiZ9VMD5ivD/Xm43MnQiJJlaW8F0LxzlUvIp2Js/bjuq1Ai2yaVERf1oXejBb6eLk4X9vUOzVj/ZqJ1Cf1ycxdh69PxJzB0MVcdUaoM0ZBOLnymCxx9NQEcRIdIy87ygK5bT2nuCy5wuej5pvw1yx/Uh0ef3E/fXEbaVcJFxtPt7YUdipALXeRHs9H+XWy+hN1uycOqzXRNuKxHkGCiTPM1d5fjGzseIwK+uJVNsX9IJaEYnCUuRWLFqRjQLUyxfncyttzXTMq+R7NpltpdyatH57W95yb0LIlvO8iNXtGw8+1G23PbHYErvZgLc1yYP3Ck2z7dtzXiT2Go6Ovfc7j9tHU85uo0czm/yyfY34qOLx3uLQspaY2lIsfiShWGGud1DxJph72lx6r/40wrEi6JbUG4k+Twi1+uENerE++9kTVBhOUk2PBB/+F6ddW6dAHs4oilwHoToAxR8rm2/JBv8j2Gmf1iT7Aa6kZYINy2QDu6pd1N6c8pjnfY7KeXjCI/q+UKhE3jsvr5VesfHyvc/VspX2cRCqtlbU4QAz+++FsIrchq0OkC6Q5l2T2Q8ViinrocsEG4o/G5K6d9LfTFm10W0vVBVXT7XpIm3BKfJeeOq+okAW2MbPc4VjT14l37OJ7CLbSrkt6LnevPSWSNS86vYg/vVpt62dpkd1Yh3ZPlXAiShFJ5rseaiTS3iOD2CTCUhvjCGNbW3nCJ8xv9fzaOuxzBlwz+WRYlDZQjjN2a+tROKS/XvE99M637L9ye61kZboaafnXv5phONHxROkFe8j2vdsV/qULYgj4rvNrehFL6E3E2nbE7n90czKi7G37Vuq4j0jupAqg7GeAV4kFjM7zFvipfDqrngmKnlINuvbunFFM2aVdHs+ZaGxmi9ye/55dpWyaKMC2MuPJl5wUBSVt23rZ7vf1oOJE1Q44XSEPKRHOIpJx7DRA/xHAReFp1pvofdn+6J/Av89O1ve2onJE7mtE4+H9epkn21alo55lfy5Npltpdyz4nnxkEiwDfAfxZ1NmyMusX/ZJNNcvO/Levmi32d7LYix9cQd1mvvY5F4tJ+jc87zRmId3u9Kyzup5SRpu4dHC8WtvWtbC8aeNubs66O/H5FlExfZhEjWVrWNyvWifHfCMfMIzvVatspVhFPmzbR23gPvmify3iuJN3grSicn3XonxdQRiVvbJrYf1eENxDOvLpapsmRWpWdAtcQLUBWKmTDtFYrV9rO0NQeO3gQAehg9T7eC/bMCsepdxBlsrz6tE8tWQ2+9watebxgi6g1A7fVhn2PyJllankn7X5Jtb1+0HcUKI3tfsGGeViy+mTTr/XuTqzjztluhrd42ClDveUcUoigWM0FpbauC8Z5isccus6+WXUrPgLDlbYx+k1ueR89rKJBmbXZO2Z3413Pk4UcBGd0revDK4r3fDi51XGDHAmiLednvul4fA6ThNk5YeUJRwAbrwDzsR4tnEBUZW4tCZMnx2MK7uJUAfTSZFonoHdd6Tp6euj2+O+GIRGKlIvBaQqf1JXgDPiznCQbP1tbjvV7D2lhRN8n7Hy3vx0HBH1Jv5tF6RbQtFLET2IspZ/uK/Y0G8d4FWB3g9HrTWoMzLz8TgVFe1a5XIFZFaKUfLfD88IhCUJWWiMH81mAL01r1t2wj+8iD4Q1cMR0HlTr49AamGJI6FmxsO5kHBPtuaXkd7X9vQRxN1/8q7uxqp3aF1SwkFcNTj1BuMu143k3vfY9i6hX4j4JxDbG4xnUY3ct6JsKqZdcmu0dkEQIt0eid1553MNrG62SQ68ApstkV67HXvncdLhGO3rllz0f7m20ne+24RK+D0djhxPJBrvurNra/FcFpt4/w2bZv68N0zbP7afshcr9zucW9haBlTVH1aJHYuy+PPO5IVexZMq98pW6v3jne9u9eOM5hrmdyCZ5ozUJZK8JV8QSsl6efvb6I3Aq8rB1P8GLbUZ88AWnTl9Iz8GoN5DIPQct7sIVHsSpKs/qyMhkV22h22BN6OLsdES1q02qj9fxsxT66jrDv2Eecmdf6J7nesNWDqQM3W7/d1sGpF7aK+2Pb7Hl+OBJRB5NmhaQVZ9kCNUf4m5I/bUeCerL6vfL2v91GETlXNFYnkrx8zyayi2wz+0rZj0x0TnsTPd6klCcIo7o9seh5IlseSNzuBb9H7x4jkOZNHnueQnuPimxseuu88zyHth0Ur94Ed0TVA7k2n0Uoimzr/avU/ZHFomXOhIY3MZLVXak3ut4yPr1w7BFQEVWhaGftrEDKtrN6PIGGg1HbJraPfbf9iryM3qqMdnCEbdm+ibz/QfJ+nCJ7pHVhrX1DqA6wPLtsIJjlL/UqYl6lnTl1Rfat9CoqgDxa3z2GpGp9kb03Q93yXmM92NeWx9HzkOi5j4NU9D5Yb8WbscW8Aba9fJHb0DoMoY08Nh7e4NATaCKxl9GKRvtOxz1sax1ZSKq3CM7R5GHfvOck0eNohakd9B6dtJaAtGlZOuZFNpFdZNsqUy1/b3q9jfjZE2G2vOftG6GM5/mPrr0oX8T3RNr6xdSR9b3ikYhWV9f/3hgh8ixaRjldW9pnvVZwIjsbE2TjBNs2frZkA95sgF31zlS5tzC5p9Ni7r6t7V2cM/lb5V7HM7vvtn5jkarncIknMuPDCcdMCFYF3hoew6iOllBFIWfLqHDz6kPvG3rqohftejOGesNFD4iKQuxTZUZS8zTNelJwG/dPsXbYljjbPSt2iiwbMLUGba38ZxKImTjMPJ+efWS3lJanD8Ufngteee+6zK7VioBslYkGtpFA88QjDkIFPmeDVm/A6g1ocTvaH49IRKGAtIJN096Mvbfaql1J1drsE3v0TmKblZVUtVzkQY0Eo3fNLb3WI5vMtlKup46tqV5PkV1LaEVizROI0TX0kuTPEZTe9ext2/3KjoEFzy1vUsNu288aenqU66SNvcfa82kv1zHLQfwoKDs55Y0rsm0UjwL5nudR80Ruj1t0Hdxb+Cn3FIDIWvv8CG9iT9/XPMZeuz33zZ7f0qzNqN2KmFy6D8qHE44fkR6hGtliujfA72lD/6NYRTHoDWjwBt26kL0fBu0vCthR3u+LJ4AlaHeOgJkrFj2bTHi1bHuF3r0EY88gNrKvkJ13IldveVZO7aoem2p/kCgEtrdMdC3hpAnO0ou8H4DZUDEcxGGoajax44XOekQiyQ5ANe0A+Va0abr980Qa2tjBsB0co2jEfnp9jf7j/kXnfWSPA3abFpVdSzRW74WPFo1zyTyJaFOpB/9bIiEabbdEo5h0MXnettd+a4K0cl/x7jHHIA9XRkWPIG4P5rPn/bO2FfDeZ9Myz6OlarcljxCLWwjjtQVjtc4161pSf9W+996aecq9dlv1V85579pqQeHYSSScPGHXc/Laeo/mM9aJN37PxtaX/WiMThkvVNXbZ5Hber38yCYTf1GeN+huiciIOYOtHqHVsq96GnpFYpa3tC4v37NBem5I1R9/xAtVzeyjcxXL9Hq0sW787ImxAfIzD4iIvyqj9SbaZx/38n7QKvDfG8SKxANWJPJsoGizQm5v0tCbqN7BaGGbLGx1gvSjk+aJSvQ42gGtZ4f76nl0pJGWpXv5lu9VLEbXUpQWefBwO7o+BrkN67bXXiQKrb0nGL0yIvFqq637B+Ldw/GdjbitYxXrZdQxjb0HWpGGk74i7+/D+JtvJ4jtPqFQ9bbR83gEGxxwRwPrigdyLluIwi1EX5WtxVfvb+mcOnrq2oJKu9kYxOKdr9n42atrLa/7pxOOnmBDYYOgiEIPWIvMzgrLaFucPiuRiFSR59lYwbdz8sTk25uzXeLfE49aTuT2BI1mKPGHzqZF9djjHy3Q4R3r6Pm4FtEAquJZ6BVavaIuK7OFRxLzKvmeTcs+orW6KoKDlmrZ7Mdm6Y9eyzPgDf4874L1fli7A9jgdZWFpKJI9DwbnmDM7k0KiiQ7KBV5v2qpFWtWGIq8f8bRC1WNwlPtqqpYBwrEg9n2RK6Ycppe2cbjU73e5gjFrFxPHVVaba05eJ7jgfCuJZsXCTO85jJxaQXlDvK0PgxrRXvdxgmhSOTivrQ8jgpel61JESsKrUiz9569SffGWipWo77hq4RQYFoxaNNRPAqkCaQLlEc+i/evxVb7uZZArNa1htDsqW8LKuJPySY9ovq8unCiptKnFp9OOH5P4E1X07KBXusHx6vPE6423ctX0CbzzEZiGsvgBVW9YVQGT95FtERctcouEXpLBOOSPMzP7OaS1WUneRA7OdJTrlUW66kO8lsTVp4tXoM4uWJtvOsVJ2jw2sNzxdp6NlEfbR12Gz0aOujzhNsEdgL2OLD18gTKYx0eWZ89G0skFKP6o7TKuZbZVH/w7yUa1aZ3QOZdI711ZJEF6LnLyqLQjOrzPkdi05uw8fqVCVuvnQj0yIm8nzC2tjhBjP/Ry+hNAmf2g5NnsaLUpmX3IO/e2frNWPP3qYd7icV7CaGe/Xk20fgIsRi137qfRmMAr76srta5712TLb4b4YgDrIxIzOBJF4mgrIw3u6ZlcYDoDQq9ffHEGYqwSWoL6GCoqjhp2aDWPisVDULtDwX+OEaDaRSTlrneRlsvUhFLLW9Br7DL7Cp1ZWWWpHv5kY1ntzbW69Yi8koqc34Eo/OtMgDGwaaXFnlCokEneh9tOuaL+N4NHAy39iU6b60As/cafN5R89HLaG30P4akitx6GdFmktt2bH3YphWkKFztf5H392gvfYvrzHJPsThnJlrLVQdo1evPE1DetVSpp+VNtN7DKDzVehqtN9Fuoy2mZ/9F3l/juP8ZeL7hdWnrtJM5en3s5Ppbbq+hbBx0kNOA0jv3dGwhJt/zFGraMdgW+IwiGeuzxwHPtaXXyFqC8NFiRmSdfVlDJN6rjko9a4PnW6uf2bmL9XnXpKV17veeg08jHHt+bHpEYLW9Sp0oDCv1VuuOaAk2+0Oj6Z5A1L555a1A9MQppmG9tj6s0xOQtm4UkbYOkXgRErSr0PqhmCOWMD+zrw4etxKcvemtdiKbzHZNloS2enUgvbOY3qw52qHojdIiMRnZqN2L3A7CdNsOgL1JG7yPVAaq3nlj7xUorDwPYiToPLHoPcso4oenem156SpWPYFo/0tjG/c9Oj7i5EU2mW21bA9zheLaRJMtNs+7NqNJFjy3bVomGgfIiwRiZdvWUWlbJL4PVMg86lZcHU2f9Jy2nkJPLOJ9zi7E5U2m7+CzLVcZY6B4HJzPYtJEbvfRa1vEP4fWYm0xeG+R49G7T3N/k3vqWMsT2VNnBe9e2upLRcxVJ0KWlK3wNMLxe8PeYHtEc1RXVHdkE9XjiVxMi/qLYjO6aUeC3ZbxhCuWVbslVEVPj2DMvICZba9gnCP+qoI1q69V1mOLQSmeRxk6gdFTd/Xcin4QsHw08eOlRfl6XbQmcHCyCZfEx3awj9G+e2VE3osmT2R5ggwFnUCerW8Ce9ueJ069NmydXv+x7x6V/c+gaHwcGOHigR56zIu2PcFnRajdtnaeYJXANutP9ptkr2vvGtf6caL66Gx7tiraDo4tilH0DNp821/s39FJj+5HHtjG2qwhOJ5BIFrm7tOWgm8Nr2Rvfb20hJtH67dX643qaZVfUhb5sMLREzhKVYhldq067M3U80JG+YPEN2VbztpjnV4/PHGn+SK3N1hvAOnV5ZX1bv7Zj5c9jtbOej2w76NjE1G9QWQXRI/HbG2BmNmtKfiWCs5KvmdTzVtK6zzoCS+tkM3Ae/UO4ntIPS8fDhLR84CeRjtoPTTys7rs/+i42MkuC3rZIg+eyG1IqhV32QI2R7n1TqqXUW3sQji2Dk+0orCMBG20H962PQZ4fKrXrqU1yFhyLX0kkYjXAeIJKvwt8WzQ4+ddI+oh1M/oOcT0Ua4DKs/7aO1Fbtv1RKTtv92v6thG5P0Ejv2ME9fetaDpej1qCKuuvGrPQ3tdY1iqxROv2bjD2qJ49LyMNg37UImesCwVFWsKwS0EzlzWFGZLROJSgdpbXw947rf6oudg1Ac72RKV9cpn5z+e99X9/7DCcQsiMZqJVJGaB65lG5Wz4hJv9J7Qw9lC6zmxwhXDWTUf2/T2o5WPF4w3m4HHxYakRsfbS1vyfGNLGCo9AhHzp6JdVm7r9CV5URqWm5Pfw9y68PxZa4Y1eg5zMPk4KEbRp+W9PBxQep4JHJBG+bgPXlqGd45lA1b7PJUNLfUWzfG29Q9DVbVOFIFYTyYavb5H27i/9nN0XLy8yAb5bGLRO7d6vAloG02KYL7339br2ViBF4nClyC9JTg9j2NlciejMoEzONt2AgfHG57HwopF208NX7V9xUnniniM+qk2Ns+mCaQr0TW0lWdtzba2Yi2xtIYncImTYKnIXJve385eQWjLZ4IwO/9bQjLiqYRj1VP4SB7ZRxRsGSgebR2Dk47lKvVG2wJ1R30R8X+gtIziCU+bXmGOJ2yJYMT8aBC5pvdwTpklA93W/rfS12RuG3NvnFiHd3xwAKRpSnaOe5My2Ux8Vi8uWIUTT9nETusZY+88QTHliTSR96/oQPvJ2W6JPE804nfQM5mT4XkaKywRgHN4RtF4T3CCxOJ58zzRiJ5AgTycBIrEoCdEM7tKJIAXBWAHlJ7g8+4fKNrwt13vBVjnALaDsUdsO63ny73+WLHo3V8t3n5HY4keegXI2uPFewugjJ59y/o9V/StIVZ766tSue+2zkfv2sbyXtmoXKW9Sr+fSjhujSeKlghBb8CWiTX7ORNdWpfI+5u7rQPt0DvpDTrVNsuzq6nZGUJpbHsrqmqabc/+aKB3wxOe9gcGBekcegROjzjEz2sKxDlllorN1r5Xjldmm9H73d7rx7R6r/Bu6Ho9YF32ekDvgucRifIyT4W18+qxn3EfW8fWO888T4faep6+yOM4QXrklbS2Xht2VVdPxNp2or7jtmUrT+Pa97hnJjvvsmsAy6LosvVl3sZIKFovI3oLR7kuSLUz6d7iOFE6eiWj6zgSwN7vOE6maJqdPLJRR/a6sd5H3cYweyvitKxFPZH2+Go7tqzut9prKKwto/89T6O9z+pneyxw3GX3YQ3WEk5r8GinS2X/lngF53oje4/LGt+TnqMV7HnbW1dU1jv/1V6cMvb6rfT70wrHNTyD6AGrnFCRnectzLwGaGPFl7VF7wGKR7Xx2og8FShQo35623ZwFZ2cKCIxX5w8258532vv4KwiiuaIRMzfSvTNEZ1Z+V5va2RXzevlXl6c6o8KhqKKxANj+8OA144Xoqrbx2A7sreDLpsWiVbsZ0Q0YLX/K2KtVzjafFvnEeqbzN9R/PMfBS/uR+a5nHPdYj4y53z+iGIxwhONEd5EikAaiq9INKK3EEWdFX+DXMNTM+HohbCOcitEB6jf63s0sWP3VckmcHDCxgowKxzt9YNMpp/aNoatojjFiWYcl3jiUPfXm7C24tH2EwfTrUF0xpai49FCr5c5gmoLobhEJN5rgrnnNzMThV5dUdloYmSJfcSHFo5VMfcIUNApa/UZxaFXdyZ+otlLW0+PYPTqtj8gOLiNRLCAjdLarx6iC6NXLHpltvIezilTrWuuYOzxOFZuRs868O31cnseAJvu1ZXlWRscPPVs24gAJQpFRRtx7FrnVDZ4RYFmvRY2H+vBfDzfrUDFfmBfI7GItK6JZ+AZ+5Qx5zdw6e8mnr/Ww2g/23TbpmeL9aD4zNpAcYoeT5Hb1+hgXzzwGtR2vIlra2/Fmf3vXV+2L/hqH+vVRHsUj3ZMICbvGNiLY4t1ZPUiawi36jm5pkh81jGvUtnXewrGR4j51v24ItTQ8YNlsdza9hkfWji28LxTLeHmiS/0OnoCyGsr846hsIxEpoI2OHOAN3xvhg/rReHmtWkHm5VtrN8OTK1AHExeNoDGH6iI6CLIiGZSkSViak2BOKe+tQVi5ZhVB+Aeczwtj6Yl+vTcjASYXie2HgzxsteLpqMnYky2MzuvTa//Edl5iILRbts06zXEZx+tiLT2nsC09RzAXoyNQPmsr9Lx2aaJk+flR3YtPppYVLzf3+gc89K989QKPjHbrf9WuEXpg2MThafqgOoFtlUAvkIbGMYqcntto/cxOy4it+eEN9FiJ1esN/DN2FoB9ya31wxui7EXUwdu62I5npcRJ46xXm8S2/NM2nwcH1WEttIrzJYKpaV195LdN9Zqr7K/c8Te2mWqdfTScy9v6RGv/9E57dm3PJdzPPGfWjjOpSUue8u3PovkItOzwRlEFJ42PxKckbgcnDS8cXsi0SvjfbZpuN94wS39LiIqAtFLaw0Cq4KrV/TNLRPZVdvB/MoguMcTGdn3cg/BGZ2H0aRGZeZUBaFIfH3YuvD6xjQrUvV6t9tYp7eSqxdmW6ElpjwRh4NagW30RHpiz4pF9DZGfUKxKGBX2a+WaKzeYyLbjI8qGitUzj0UiB72+hvN/8Hk4WSKJzAjMYnizwtpFXkvLrUvr0md2L7to90fi3ce6kBShaJ33eDEuJgy9hrK2lBRaK9D62ncQZraZJPdygDp0bb2Gccb2aRyz31uLYG4hRjs4VHez2cQiluMJee0Y8/xTLj1ij1vrI2TKF7dWX88nk44VgTUUlCMLG1T62vVg/lr7qv1YIrk+4eiM6pLjF3kHfX2B4+H/SxOP226PbFtH5DKDaAyGFtbLGL+UlE4p77M+zGnncoAuCoWWwPeewjAufT2rTLLXZ0J9yZj9PrCBaiqEzz6GcNVNU1Z4nHUtGx7KtigfXVQ6/XDO++zNi0oGu/NRxCMW/5+V+pGMdmawMFrMBON0ecB/nYFeysu1YspTlncJ6+/mK7XhvfaLXtf0Eki9ATaSaVRrq+8QYGHfRwcG3s/8QQi3m8iYRjhDZa9sr2elTWE4iO8Xc/EXBFXmXRdo0yr7FKicywSbdgXLB+d6zh2bonCar0RTycce8kEUA+ZiItm5TLbTDxFIs7zIojcfpnZzJs4/URb78flBcpE7do078Y/FD/b+rxnrOyMpII2ve9vzH4kKgKnRzzO9eYtEYhoN6edVl97j4lnk9lWyvWytRDN7gU4GLIMYBMNdO0gzuZZgejZR7aaHpX1+lDFO688L91k0o4mzdpGXkkUmpV8ET88Feu3/cz2QRIbTJcgP7LL+AiicStwklLg8+DYeOdz5lXEsFEr7vC/5r+abQ1P1bpezTbay9kW28N+ROIR918kvv7s+b2X22tEhaWm46qndlvrG+QUwoqT08ge+qh98ryZut/ePuDYxZsQs95LgTTEtpmxRHA8Uqw8E0uEtVd2TUFZzV8CCjoPPO8t0dg0O69btvZ6tnYS1OvxlMKx1xO3lnjsrXOJx9ATaWsJYJztw5u3FWbo/asIVyuEbZ04a5l5QT1RKmAvgc0csgFaRfCs4WWcW67iVWm1tbb49PKrNp5db37EvQfXOIER3Q+i6zuaQPHS7TVlQzitMPSuVbQVufXyC6Rl+9HCE4r2sx3EegLOE3zWZm+2M9GYte2JWOxzJAizSaFW+ly7jyQY5wzGozKeJw0/e4IyEpD2PwozFJFi0iKRidtWAOJrN7z0F2jDS8f/eGw8jnINSxV5/0yiHTgO8v46Ebm9T+jA9kXeXyPWo2nFH3oe7TWPbeAk9yDv72fZhLm2fYRyLe9KL2t7z3rqWMojJ1Atc45hr1jM2tjaA+wd58wh5dm0PIctW9sHzzazq/7WPKVwXJMl4q4Hb2CIAgr71BKLkafTpkfbaG9FpN0+OrZZfZFgtDdyz3OKAvLopFt7SyQme+md1emx2VIstupfs75esbhUKLa+1yWD5mcbcEcrmCqtiZXoOrFiyRORKDi9e88o+eI9GdE5gKIMB48o5DIvo1dHJBrxvEeBKBKf85XJEExricnPdA5vzRoDUM/O87Db9EiMep7K0dlGYYmiz/MiRl5GfE3HHOGINjhIxbFBZG8Foh0DoDfSPuPoiUUUkrYOtUHxWNlXgfqOkBbtV0bl3JojWrJyLbx6e+4NW4rSjLXFdu9xn9v+XLI68bch8vpVvIHZuR2NI9D72RKaGZ9eOEZ4oi0TmRURl9WDAs2zj0SZmHwx6Sjcqva4D/pZ03QQOZiydtCpoJ1IO2QVbcTJU7YS/FVx0xpEriEQMW+OQMzyqvVltnOFdVVUtvKi+qs8YvDdIxDV1hNueg3hwNdLswMoTBfHPkpr9T8jEmEoADENRSK+gxG3My9jJDixrLcd9RXzvX1EeidRquU/E0u8jZhuf4ciLyKKPSv0PCGH72vUNG/7RW7DUzUU1Yaz2u3onY64LXK7b9FvJ56X3rViVyO2i+W8yVUg6vOMdrxjr8do0tqOPSLxigNeGxqr+xL97npjF5ygbk1CV669lqBY0wNWaW8pa0y6z2HucYzK9orDrP1HCGl7nkdUv6vque05syp1tXha4XgvT+FcPBEYCcPMxgq+1iyiJ1yj7S3ssc8oXAX2BcVsdFP3PIrRD4jaZ1QGXFXPQEv83FMsZrZRfa065wphL796TJcMqJcMpu/1A5qdu14/KrOU9jqMBk3eexptelYH9iOa/YyIzo2K9zESg2ibbWt5LBu14W1jPy0V0bjm+fU9iEakdwCD57LSOmett9Ar0/IeYtrOyfPSxHyuCkfP4xgJRxRpKPAiYafXgU4O75yyeu9Qe/v7byesvOOE4wOBz3Y8gf3DdJtm+2H7JSZN90FM2V56hMkaHq41Ju3mtKv03Md6655zfLwyvWJxiYhdAn4nLcGH2kDE9ypm57atr8fL2POb87TCcU0iUdYSepgf1ZOJvqiNilBEUWnT7I082p5rr0Ti0ZazPwBVsYhC0fvhz25eFXHeoiISPbseMfUoz2JPnXP7731utdsqm9lXyra412A8CvlUImHn2eCEjv63g2bPyy+QpvXgu1M9sZj1vUV2fh7BJhOCnhhsiT70NGJ/om3srydIW1TPya0nRz4rVW+D5023vzueYIzyR/gbYBvDT61XEr2Wo9le4nGMPLEit881okexIhyjQa3aqJ0NW9VFckbHxgrSEbbFpOF4wabbyAq8tlHEtgSn4l1fc72CXrke2562PDwB0EtF3PRS6csjhOKSflWpfCfRZEaviPREJZar2Ni6K78/n0Y4tkRgS6jNrTeytzdmFIDZZ29mztratMmx8/rcEoPeIBLLWBschArYegNXrNOmzxloV2kN1CpegyXisSoQM9tMsK0hOlt9adm2ymflIttWmZ46tizr0bpfVAYukYgbId8TjTgw9tLUfgK7qD9VInHWEmWeWNR0FIQTfK56FDPRiYNSTzRWr8/qpJTHZxaN3gShR+t3wIoPcdKswLIevtFsq50VdPgZxZ/dfgm2bUjqq9l+MdvR6qyRcLT9zYSj4k3I6P+Ds633EBWbbyZd5P257r13NprIwXuXtmsFN0ZKWDFoRapts/XZpi1hqbdxqYhssWubzL6n9P4u9hzvudd+Jb23jqzMHOx3kh177/hmK6xWyiO4z95vVSY0Iz6NcJwLiqwegdkrKnvr8oSlQFoFT3RGbXp98srYWUz0QNofgMi74aVHg+W1BvY9wqVXCPUIrbnibo5g7BG9WXutNqtlPJvMtlJuqW2FqG9RJEPE6OTba1vrbM08Rt4Brz3MsxM2nigVmTfRVpkIiQRkRQh65dcSjVG/s33xWCL8PrNobLHFoC46t+1nFJn6f3DSrfC0IhPLjGDrlbEex8F8RuGI/wW2ERVb6PmzeF5G9A5qmvXmeZ5DFOI4+YRpaof3I3tdehNh+B5IrMt6H8Wk6X70sEQszhGKa4qWHm9q67isNbZt9SPL8/qwhlicc8yrv7lZOxVPo3feRr/jrXKD5PVE+S2+e+HokQ2aorzWQAsFF37GeqL8qF5sww4w8YdiMumZkPQEoecVtX1BoosjSo/KrkGPYGkJoLmiLrO9R51ZfkX09dQd2UR1Z/bV/Fb9PVTqiCY7IjwvvXc9ta4bT/TpvcJ68LHuUXwPvzfom0P0/XvnDYpFtPe2tYznKaxsYz+9/kX99ezWnqD4rMy5l7cGiFa8YBn8r9tjkoeiz9p773XEz9GiOd42CseWx9HuJ/5GK+oxFLk9561HUe8P9vw/GDscc1hRpn9WlI1yfU2H5qlg1Tpx23oc7QDWji9sf48mX7H7NzbS1hIJFY+iZzPHA7aUyn0JRcM9yL6L6HjM/S5aba5x/HvqiM5F9PpZ+4od2nt6wn4Wia+P6vnw1MKxd9a7xwvXQ1Zv5GHDNEskDjOx6A0uUdTZNFtHNAjC2YdR/HZwcY1o9sP+MHkeTltvZen/KCR17gyaUhUzvZ6HqpjDvB7PXsvzsUWbFfuKiIzsWj9yre+198fvUYN9e0+w6Plsz/dokiUSql66Vy57XySW8froUfmePU+hl+59tmkVkai2lbq9a6tyvS053z0+g7dxTc9Jq07vd8PzEOpn6x2z+dbjh6Ix8wjiaqv69ypXMfVqbLz0nhVWRW6fd7T7reC5qp/ts44q3nRbP+/lKuq0rr3Z9tpAb4cKRh0L2Gsdr/uDsd3LdexhhS+CYxMrMDUdy/WOBysiA20qIibrx1rXTcVzVLkfbfX7mB2Dnt+dpUIysp9jg2T38V3DpnLuZjbe+d/6LivOH4+nFo5zyERerxCNqNSzhojNhGTWDnoHcQAZiTqbb/MyERuJYr04PK+mmM+4b9GAGduo0itKKsJmLcGY2S4Rblt5Ryv5nk1kVxXw1bxq/S3WGsRH143iCUmc7EGvod2ObFAo2bYQtPGOma2zVUeU7p3bmXCbHJtebyKW8T57fZ3DknPmM4jGe9IzIKwKTxSXKDqtqBRj59l6dWE65nursGIbVvB6x8Bevzh57Y0RsG471rBpEthiuvUievvhiXvsd0QkDKJ7Knpfsmus6pFqnVsVsVgZf7bGjdGxirxZrbqrYrvnHlkZ+67hIVzD89jK66HyHUTnZHTuWls857EuzPfscbyBEw6V7/npheNaYi8jE1CRTSTAvHRPjGG9LS8k9k3xBo2eWLN5dmAmiZ39bPurZTxBaOuz9Lz83JaRRrmMpYNdr44lwmquZ7FVb0+baL+FUFxDJM757nrqyFhzxjX7AY2uieg+U/XkRx5FL92KT/Tw27YqVK+VlljE9JZnMaqn6tX0+tszEVOdQfagaLzF85br594Bpa3L20bRI2YbBZ/9s95GK5js5ygktfI+SM/LGe0D4l0r+JsfeekOTlk7FtBrcYJ0/cPjZsNS7SQ2tm3LTWZbwNbum/3Oo8kuadhEtsoQbGOZiqCZKyI9ogVxWveS6BhUF2Txxnot2xZbiMTqZFFWx5r0jCeySR6lJ9+7VlCQ4ufK8Xh64bg2kRCteAgrNq128ebpfYmZeIwELAq3TDwqXjkrDEXWfTccltc+2DJbXsQ94mVNgYj2PSIwy28JtLW9iWt4Z1vp2U228uPWe5PeGi/cuvUDlj1riNe+El1znjdT22jNqHv1eVSuq9Y5jGJujkDU/B7BmKVhOuZlUDTmrDEZjCIQf4dsuidEUSTuwB6FH66qGqVrHbiqqqbZ5xp38IeC1Ian7s47MZ7/D7BTR3PSTdPp8yS3oap7uT7jOJjtSxtaXm4F46UNyEcxqSJyZ/6jgNVtreNg7G0bl30xZUT8cYq186isPqpkg3Q8t+bkRW0toTLRisdgidBcg55jUhGIvb9fPWPN7HtqHceW4LbpnjfSE/ZTkp/lZW338N0Jx2fAmwXoOYk9DyKe2N5MQiQ6sVzWR6+M5nmi2N7gM7Fs60LmiMnWBVERi149rYFkZt8jzJa029q3irjrEbPe50o/WuVbZVrlqnVU6LnBts7XSMx54i+6PvB6iq657PrD9vAY9Xoctb+W1jnunWe9nsJWntePKC2jeo6TPlrXS8u749lmg9JWHeitxDz8j55JFKT2v8Bn78+rT+S9YEThaNFrfycih+n973O2rW0e5Lacptn60Stnj79nE/XVTiRHYxp7P8v2WevpuTZbYmUtwZgdix4RGd2rvfxIXOM4DOnx2lbIrrulnseqUK9c+714ZVrH0/utx+8Gxwn4nUb2rTzbVmab8SGEY+QljGgJsarXsdWuzbdlo/SsjsjLWP2sbSn2h0A/i/MZy/WWxTrs/rbaUDxBaams9lj9kegZWC4Ria3yvWJtiUDtrRtt5noWlxzrln21bE89a7Qh0g7LtkT3htYESnQ92ftBtroqlltjRVWldf5lYlHzM6/g3M+ZTWSXUZlQqZb76GTnTHXwHIm1Vp2R5yHzSPQIN8+zOAbp0WI33mI6r076iGXPI7RhFNm9nvte9DhOh1PaOF09kFbQvcmt11G3X+X2HY46mLS/8TtTz87Y2HrsOGBn0tVj9SLXBXjsGMObhMaBLtp7VO/3mVdRJPZAVtK9zz0TJUp1X6xwtuhxrLxjsOJpXCrKkR7vY+V4VgVnq/2lZMcSj6E3MZKJukzY41gBJwmya6byfX0I4fhIvBn/7KBn4hG9At4XP1c8KnaQKOLfYKOy0WA0EpA2DfcD20Tb7PnFltDupdeb1SsQMa1XiK3pTazYL2k/KlMdRH8kj+PSGVZLdE63BBteM5FN9IoNkdv7DobOtgTqXKoTG9G5uoYYrNad2WJeZEPmMccLUUm3ItQKwui/br8Ye3y+cDR/KCJRIL6avChU9SWwfxlP4lDFom6P550axqt4RKbzyXqcRKbz6O6wv4rIYS8yTLfXiB4rHVDascuL3Nrae4ra25VUDybNjlMmk25DU1U82tVjtV7tW+YlySa1vYG4QD6yRCRGNtWJk6xfNpS3t2w08VsRlzZ9K3qu85bI98r1iNKMTKhFtMR25lDRz9GkayYkrV0r1HXO7z2F40IyYdnrKfWIhGpUd9ZmJDyxbCvda8OmRR5X7+bu1dW6mDKWzPivIZiyAXOvoGwJvJ6257RfqXNOOy3bzL5VrqeO3vqWULlRe553vGYiGy8f2/TKZm22+mupfo9riEavvooNtrk236u38V54A/AR8qLB+1xQgGpaJFLxf1Q+EqSDXIWiCkQVjeph1DSPm4liFZGjSRvl8uyjvWdYoYai2oan4gTVaMoKbON3YW2iV20peHxt/wQ+Y/91n9SuwhqCsVcs9p6fmb13zL1j4I3BsrKYjnlzycbFawnGHrvMfo6t91uA56atA78DtPGuL7XH33DrUZwcO69f9lyp/I59GOHYK8Iyz2BWn1eu5XWs9M0TlJknEuuMZv3sCdUa/A3y/saBnkhvth7tbZr1gkTHGz0l0XeTlV+D6IJYw6PW69lribSt2/fy1/Is9ojEud7Eyo9Xr0i416C+1S/vOvC8jSPki8QDndYKrrjv1rb3+ltyLVQ8il7aXI/i0msosiPr4AnCahndRpEWeRs9IYflX8w2hqpimudZfIEyuqCO1rt7ERlfRMbdaXu3k4t3UUNVyx7H84U77K8eyMPuVP44icj+NqRUr3ProcBIAetBxEVtdHsw21rXm9lfu+Ceeh01X0x9dmzkTUBr3ZkYapEJPE8oZiIxm7SoesHmEjkRLPp9YH7ruGHaXCFZuYYr48KKQOwV6ksdPEivp7HHK+iJu+h8j75bPBa9HsgPIxxFHisee2wir6NXpiIoM7EYeQkF0lEg2jbwxjtAuiWbvYqW/Lf7hmUF0ta+ofaKj7mDx94BaMt+bW9iqw9z+xGVW8PrmJVplauUn1vnGmTneeU1HZFdS0xGNp7dGvRcK1UPZGQ7t16vn2udv62yn4E1zptqHTiAF7mezzjQjwahKvIGZ9t+9rxvVhxaUVl53lG37WqrF/vxJBSHUeT1B7l4GHdnEanbInLjfUT0OcfpeBaL06n84e06mB3Hq8Acz+LReuvsM442hNTugxWW1iNpxxoahqpl7Ks5RG6vW/Rgar5Nj67daFI7I7snVr2JXl6WlrUf9UVp3WMqYi6zsWIc8+cI8l6i72uOSPTq6v0OWvUh2XHxvIg7Jw3rmbtaaq+IPCZ5ER9KOD4zvaJ2bRHc00YkNtHb6ZWvil/P1uZFoRAtsRrRc0OLLrg1xNhSsdZb/xp9qPSj2pdq/ZltZp+VqZTtrWsrWoMcDD+JyqDdXBtrh+32MPecqIi7zHaJaCTPx9qTGJX6ULx4nkuBtMGUQyEqcisyUJjaOi4hqburMBzHq2gcR6k946j1TiLTeBVy6mUch1O6Gg6jyDAZO+cPQaFu99EeA5sncnucME3bP8ptX6x3xUZEeaBtlapozDyLrTTvc894LnNSeHVbUABimUg42Do9EWlt57BELHp2awjGuY6LynHJxr7Zd1TNm5xtaxPZR33I+O6FY9XrGA3ksgu6JaasgBOJPYgtT6RXj/bV8xxG7Yi8rwPTsR4vHz0arQvcO7aWNUJVlwqXe3j0HiUSK/VU+9NjF9m2yrTKVcrPrXNt7HXr0RowZLaVmcdskao1WHoOr+1lbOVV+hfZRXxmb+M9qHiHPKGi6WiDIs6KQhRNNgxVPw/yfhEdTfPe/Rh5Ii8L5YwiLz+cxOBOQ1RfjcfRCEl8FcdoXETTwQwuz4vgyE7kcLh6GYfxtFiOitDD20lkHvfXYxF5RuzqqZ73Ub2KOqbYmfSjU59t7+ik62frxfTGK3ZA3HpvnZKJukgoVkRi5qGM2vZsIvCeU/Fc4eeW57EnzHKtdzoqawnEnmNcPfYVr65XZ/Q7gd8JXnctD3uWHrVjydpo8eGE4z08dXPxZvar4hHr8YQglhXHBgWm9RziSYaDr0iUWqKBbnahzBnQrsUc79QaItErs9RbWbXpFaw95ZZ6E+cMwlsDgcoNr3fwvrVHCr2Bvf3wrpWKmMzqjOznUpksWMMTOUcwVvI9m8zW43sXjZVB81p12zas18u2G3kgxuRvAJud+fwit+15dhjaeiMiX24F4u7lKiKH8fqMI76OwwtV3b3INVR1J9fXcZzFoi1znESm88k5HUR2++uxsWLAE472lRp2lVRdHXWUq4C06Xqs7XGanM/4X4I8S8/9OhMieN60xGOPV9Jru5WOeGMycdLQW4jeKrttPU6RaIzE4lr3t2j/54hELJPdJzJ6fwszgSby/ljZ87tVR0Us9opI1B49+/vhhONnJhO5mcCsgLbRzTcSpShC0R7FsICdOPnWxtopa124yBKhM0eo9Q5g59rMEa/VctU+9dQX2bbKVMpWyvfWtybVc70aEaG2Iv41NTp2aOv1K+tbq5yl93zqyaswRzQu5XsXjS3uNaGLg/qoDz2DV7S3AgLFUTb41dVTve1xkJsVVkXA6wgdPh6N3TltkpNIHMazZ3EUOe7O949BLiuuDjCKxQGl9dx64simR3h16rZ9ThKvRQy3UxvPG+n1K8LzCkZ9RJvMs+jleedK1JeIaP/wN8IKRVsua0cFvJbXdnQ7qq/HC2fJvqOKWPTSWtd31u4a96LWJC1+R7YMTpBY+yNs2zK96ZGNtlf9/r4L4djyOvYOzloDs6iOitcRvYVemifo0EPoeSK98C7se+TltGm2bzYU1Tte0Q0nsrGstZpq1rZIXUzN8eTNEaP39Creyy6yzexb5SplK3UsqXcpOLniUZk9bNlHC+602tCyS5hzLvVcI2t7GT27lj1Zh+w8bHlrBsfG+z1SQeAJgSH4Uw+hfva8idYTOZo0L1RVvY36nsZXDU19vW6Po1xWWNXnHUVij6OI8TgOV0/m4bw4ztGcvJeVVcV4PCcR2b8PST3K1XsoJt3uv/72Ww/k8bxtV00djM1o6rYCEG2QaMLaE51IJCQ8kWeFY+ZZ9ERnJizRNutj5OlTsgk1HCeifeRt8ryStr0t3vPY44nNxphemcrET09eCww59dKVCewq3uGjKeN9/1F67zgi40MKxx5Pm7KleGzVtUQ8irwXhnMFJc5g4EydVw7trZ32Dfspxq7Xfb4V2c2tIsy8OtbyFK7lTfTS7iEUI9ueelv1V8pWylfr6aF3QqTaj0xU2msQy8zpz5bXYPXcWFNMVuv3WCoa6Wlcn9ZgsTUobw0eR7CxAgHFwej8j8QiCk19DnKUsygcT+GoKhovYannENaXs4gcBrOqqhGRyOUVHJNcVlXVcpOZAbLCUfantg9v59Vdz+m6Dyrg9BjY12ngsbCiD9P1eB4g3dahnhAdQNvJbluH56HRzxmRWFoqFjPbrD0E+69fc+V3N/I6RkIQPU6a3xIxlXp66fEE9l7rFW9l1t4cvO/L8+hVn2Vs1Y32ahN9h0vH4h9SOH50IvHYwhOBXp2YlpXzZu9aQrEiILFukfjExYtpyWyPV59HdIFsKdDm2qzlXVwqBLf2Ls71SGZle+tZUn9PmR5RGU1oeXVFAwlbxmt7zWtwjrhfWxTOPfcp+tZj7iBs6f0f28dBvfUkemW8Qb9+tvk4gI3q9NAVVO0iN5eVVU2aisZLGpTz+q/o768ujiMip0VzRjl5F8/9uAmPPeept1DrRAE0Qn7kHfQE4yTX68zWY+tXGysibTlvn624zIgEo/3sCcFecRm1hekS2ER9tkS/Ay2REB0r71ha8aF5XltrX7dZWkswrikWK/cx/C2JvgtPZNuxc+v443gdnUHW3k7CiEnz2owmYiI+rHCc43WcW2fF64g2LRHVar/ijYxEXMvziHgnTeUmVJnBiFZt7BHMUf29g/nMfq4Y8+zWEoheuSWevaUicQ3bzD4rUylbraO3vi2othtds8ocD2MrbLVaD7Lku9nKU7/1+Vqtg/SDgkXTss+Y7ok//OzZep5F+2fTsnc6Dt62CrTxuvjN7uXqedTw1Ju0sb04jojchJ+O09XLqKGr0+HkjTzsr15NEZHdWVy+Tdd+qvdJBZ9dHMful/7GW2E4wvZRrgNZ/WwHt957HhUcLIuIOw5p3ac84VYVimpTFZSZOMW+WHrGOUe5DYX0ojMwTduNPF3eeDKbVERR2eqzR4+IbgnISplsgmMu0eqy3vHxvLcoKCthqCjysF6vDlyoJ6q3xYcVjnPoFSstWuIxax/FoMjtxeqJwkg8Rm2jWMw8hdheS7iK+H0V8W+Q3s2ndXOJRKrHGoO7tUM47+lN7LG7t21mn5VplauU76lnbt1rEP1wZTOZIvmPurfPeO1W+jLn2K3hdYxst75OW/YRFI3L6P1tbonKnvasgOgZfKKowHpUQN0I0bM4tK/Y0D/1KO5eTp5Gm6Zew9J7HAXGBHuRaTqFvu7fbldgPZ7fHTlpH0xZFMlWIE+QjiI8KuuJSCs6pbgt8t4bE+EJDk9IRkJwKNigGI0+C6Rn/bSDfYs3tvBEAYoHbzxmzxN7fD3vVNRHZO5rUSwVgdc7sTTX6ziX7De4ao9lWg6bVh1WqHpple/tuxKOS6iKzh5x2us1bYnHar2eKPTqrgharz1PXArYZCF1tp6ltC6CtT141cHqXO/i2gPupXXOtc/KtMq1ylbrmFvvFmQC0NL60WhdV63yXl/WovdcmHt9rTHR0QtF4/3IftsqA8NB6r/RKBiy/5X+jXL18l3ez2iUwU2oqhGNGE6KK6pe6j/b3fyGH4wgPJzqO5gL4hKuejBtTb5oamEHpOcq/X46n1Go6LjD84R43rVKHzMPo6aj+NX8yCbaVloCEu0tPYureN4lz2lgP2vaXDGSedTmePHmiEWvnDcp1Gonsp1DdE5agaZ2UV+i379I7NkJH0/0D2CHaV7/Ij60cOwVXiJtYWcPcqVsqw9YXya6ql6+SDx6Qi+6IXi24uQj0c0b86JwuDmD3V6WiIq1PXHVwe9H8ipG9r31V9ppla3W0VPXGu1UmSPe8F4S1TvH67c1vefB0smOLT3dlXrI9rQEA4oFLOv9oVjyvEj2r1XPKOdQz1Funm/U/7vdNXRUP9/YjVcvpIjceB+Vy4qq0yl/klNYrIiIvJ3HC6pEXuVmlVWtT9/7qNt2oRu7YqrnfdTto8m3YxRNH6CcTVM7b/Iax05iyovE16En4nQ7+l41DW2GIA3ryCYqKkIoIptcnkzazvmMNlG+1mnriMZ93qRdFLbZQ6849MpUJnSye0fvWLTyexK909GbAEBBr+f60bHVfE3DCQIUitUVWSM+tHCcS49XsELmcYtsIpaKx2odrfaicpk30RPFNl/k/Y3G2mOZNZgrSqoCaK43MUrbYvC8hu2a9lmZStlK+Wo9S+tfSmWCBql4J7NJnaierL5eqsd9jXNtzfMyKzenLnJ/PK8P5vXUhZ63yJs0gK3rrTyLQfUmXkJRraA04vAmnBXSFXzecRQjHs//j+cR63EUOR5Nv0eR4Wi8jKO88ziqgBL4j6LPE+he+ii3E8v62bPJxKPnXfFAsYjt6GdPMHpi0NvOxGN0XKI+ekTjDDteQw+jF7o6Qn7LI+kJxqyva/5uVo5RdkxbQtKzadlnzPHKem15zh787rI2JkjDsrjP353HUWSe13EJVdHZI04z4dkrHkWuIq7HVpz0yMa7gWAbAjbZYLd1s4mOY+9Nau6Aca5nMSpbEZ8fQQCuLRjX8izOGcw/wvuWURF+SsVjP7e+LbiHJ5ui8ftm6zFBj6cCRZdIQ/gZYfhupVUVkU4H9BUbl7DUs2g8mLTDHto9n/Dj7lRWvZBazoo6FHyVfcd0O66wdaIAtOMJ7IN6xXoG9t5kgjcpYNNREEaex4pYdM+Bjv57Iav2mInceqRaohDzUXTY93cqnnCZK64q5SKbJWJx7mRS9l3Z78Oj5SzBCQBPxHl1ZyLQK9sjIFt8eOEoso147K3Ts0fx6IkrFFY9XsNWeiQIe9KRbGAVndxV24ilLyDP+iHSv69LPIpe+Y8sErMyS8q1ylbr6K1vrbbmkN1vqtddq4+Z5//ezDlvHnmu9dRH6rR+Z5cM7CoCr+Up08/RIHSEP5v2rg0jAt9tmwVw7LON+kyjFY3hexyhz5MVkud2hvNgQeu2K7Ha/7oPdpCJQsvaoPBCcXU0/9E7a/tsQ1VR/Fz2Cz5HZN9pVSxGAhHTPA+zTcP+ZAIIicYMUaiq/WzTVVTaNPyc5WG/vVBJBCcKIqrev4r4ax3bijdyDfCYeE4DL9/zGNt0KxBtnp048OrzJh1wwqbCpxCOc+jxCHplZWb5zLtYLbeGqKymY17mWWx5GitexqUX8FKxsVQsRunV8ku8npntHPuszJx2Ku1Vylfq6KlrSf1r4rVZmbzqmQGuhLluwRreZIpGsgaeCFSsoKnke/VEg9ibcsNV/I0mA9+reFNP0OnLgjjTqc7p8D5fF8MZR5HjeHq347s2dREdWDwH90OPn/eqrcgG7UbYHhx7LWMHyUdjK+J7ThAr4rw+alpLIEaCMqsvahf71sLurwVFXiQUNC3ySI7Btte27XPrt6fy/SAVoefZLfFEZu3MwfNC4rHwwksHJx29kRVvIkYOYjnPu9jzXX23wlGkLdrmeB0FynhtZPW2vJL3FI/YtsjtrIQnBL0bnCciFc9LuyZzB429IuujicWszJx2lrTVKluto6euuXXfm16hh/veKyR72uqtL+JRgrHS9pw6yechEpmR58grb/+LiBumiq/YiDyKXljruzZ3t0Lx4ITs6POSNlzVbWuKhU4kmgRsoiYioYhl7Gcdf+AAOyMSbr1exh3YiFzf4SmOvdee7YdNq+B5p9BzZPcVx2melwnT7fgtE6ItEalUF1uxVESiZ5d5JVtCMmtnDtH5Htm0xLgnFq0tCshW+3bCQe2jyQmPTyMc54arLhGPVW9hy0OZCURsJxJ3lZDTnhMV6/LyPeYM2te6WC2VfswZdG4h/O7hIVxbWM5pq9putY5KPXPrXbvdCpXroBqKqlT6GbW7pkjqOV5LJxm2EIzVesl96P2tbw0U1/I0tARl5DW03AjIUW7CVEXkJqQVy03TbTs2DBWFGgrM484JWzX7ouVbQtl7tMQTL166bUPHL5P5LKZ8K7Qu8vBFIlHTVJx5Hkfd1jp38v4YRe1gX6LjaL1CHlF46gQ2KBI1VFXT7Dba7MAG++T1Efdn6f2y18vYyq+KR8+2BxT4InmIahTu6x1btMVxfLaqqr1evHJaf2XfP41w3JI5nseWfcum6pUUqXkTq32JxGjUFuZn3kitF+kZ3C4ZsM8dcK4h7NYSdFvbb1VurfLVeubWu1Z7a7Qx58cr+yGvtrukD3OP01pij17Gz8sWE4xLyPpT9ZqIgAfSFPTE4RqglxHbGUd5F+76ro5GG6Njg7uTeUY8j5g32R1FQGGbOEGAHsXIUxiJxiGwt4IS2xMo5/UzIrpnVT2EWIfnZazYVCLLNN37PuY8jlERfFVvY891mZVB7H55nkUcI/d4GDHU1Io/9BxqWbSx5bHftp7WZIzyqYTjXK/jEiJxNkc8Yl3VsNWs3kjoefVJoVzUFwGbKMRu7iC1Otir2mUXxyPE3D08hRSL27SzFZVwl4w5P9KVPqzFmt/7s51DZH2qE6H3ovceHN7jg4xpWuedeC2mhRc5ehnxHuMN5L2VWqN0PJbewDzKjwSjyPvQ0+i/2npeRi9UNQpZ9frj7Y9H5M3DiX7PIxiFqqLoRpE5QHpLnNrvYU6IqsdSr2NFoLc8vxUywdVylngisvKqlCyt11njCdaMTyUcReaJR2/mxatXgrqr4tFrp2XT8i5qn/BEqYrESjnMw74K2M2lFQaT0XuTmhPumZXzyjzKm9gq1yo7t1ylfLWOSj1z6pxb/7PQ6nPr+us5TnOuxTUGC2tM/sypb07d5DE8YpL4HhyPclnk5l3eJKVQ12YbG934skG+Dqw9T50tN4AtekywXtz2+uMJO5G2cBzkOkAe5NaL+GLqQFGKwjHzevZg998TaxOkH43tBNueiLT5+DoOtJHks03L9sGj9V1mtpEY98r2enwjm9b+eN5Gkdgb7KVjGh73SNBnNl4fj1KfqPp0wvEj0hu2mnkmM1rlUJT2il6vDWsrhX5uOWCbK7SeMZx0C+G3huC7t2h8RsFY6dNWg97WTGMP9xZPW0wUfMTJAfI58M7RrUTbmvR4IO/lAbYismovcisUcdumRWGpAulYj4CdZxsJxpanC88fzwsVCZBIaFvRWAmXjIQ7hk/aclhfFsaZER2fHo8iTkpktq30HltvvIziG0NRPRs8xl7Yqvc9aPueDYanemGwLT6lcJw7G7kkFKbqdYxsK+JQjE0mAjF8ActpXtQulvXKV22srd2PFtXvb23h8NHEZVamVa5Vdo3ylTp66uqpb07dW7S9pJ4lAnPtiIA1uYcXcO53Tm/j5yMaE2Qz8ZpvvU1efUdIx/K2HZGzeNzdbh+nk2DbnS/Q6XDania5LHqzG8920IZ9HYeWPU63IlXrv+mbiS+dptPnqmj0PF5zrhtPmNi0g5OH7Ub1iuQC0HocrTdxZ/Jb3kcMW9X6IqHpiazKPRk9i3Y8Zcd41lNoF7hBIYFhqva4aplRbq+P0eQPUB/2cY53DmmFpKJNJhLRtidkdQ7ePQXFN34nWM6bHIiug+j1Kl4ZzwZtW3xK4bglmSj1viAtIxILtKjulo0nAgXyPcHmhaHaPHHye0WkBdvL6BWY2I8KSzxnjxZ8S4TePTyKa9bTU9/cutdqcyta/ej9sZtzbHoHNmsw9/gv6cezfOfkliUTulF9FhR+2b0Xf+esMPS2bf1HuYq56XgWhy+n7YutCr7xuj3JSRxqqOol7axUrGC09VzSTWemc/rxCO0Bmh6FO+JxWQsVi4rnRbEeGiTyBOq2JxCtV/DF2EciEr2SUchrxeuIfY7wzjlNnyD9aNKtkFRBKGa7KiK9fGuD++H1d+7zui0BmXkSs7KVc2gO2f3DW70UxSROMtjvLAtJjURk5TUqODnQ4tMKx7lex49I9sPa+6MbidzM+6k2ErTl2UesPQD1+jGn7TmC7FlCUFtl1yhfraenrmp9c+teo71H03NtzWXLa9LjER7lj/a9k1taYu8R2IFdaDNdvYyY3nrOUYUi2nlCcAoEolcGV1Td+vr3BqvZADYKqWuJxsFJQ88glvU+RyGpUair10bLO5aReY6s8BgdG/TqRtueo+AY5GPdURimtcnoEXSREGyJzYo3s5qP52IkoL0QUkz3zvtKuKmWFSfNK+NdQ9amxacVjiLzxGMmgGy9ktQd/Vh5/amKsh4bz/Mohfy5NpFdZt+idxauhyVes3uJyyVttcpWylfqqNZTraunvjl1r9XeM5J5+5+ZpcefgvHjs8Ukb+V7RRv0kmQDXjtAtwLR+62ezH/1+I1yFWsXz+JZsGn+dJDL+xcnkcurOnBscRmwm/o8r+IlhPVo0qarx9N6H9WrGXm38HhJIb2Fehu9QTV6wqphi1bQifgeRw1DVbuK99F6G61ARLFoRSN66yreLxHfQ45ePr2GokVtNB+P4VFujyeO6dD7KHL7/UQi3rMVme91bAm/zIPb8u62zqVe7PeFx7Xy7kYrCPG77/EmRp/xHIlWxs341MJxCZWZyuwHr1c8Ctij3RyblvireA69WQlvdjAbuFZPRk8cV+kdQC4VTmuLxCXttcpWylfqqNZTraunvt5612hrbbzJl62p7vOWAnPN476W9+PR5wLZDv3txTA8m+eV0WfC0ENjfwutt8V+tn9itq1ovNRpBOJFoE0iw3n7+HL9PE5mf8ZruZvfSnNRaFiqrs6qafr84qXN6SoUjzb/KLdhrs5+WRFxhDQc8FavVytuEM9rEtVhy0TeQPQKqgDUNLtiqvfc4oupewf1ePV7Iatefz2sqBB5P8C334P3nezkGvprv0srCvdQr/0u7BixIiK1fc/75X3OiI5J9RhmIa2Z8PSIfh+935HWhJPWZ8Ukfq9e6GhLRGK7LZHZCnPN+PTCcYvZzCo94tGj6qFs1dWyqXg5MzLblncW69maRwjGJeW2LFuto6eunvruyb2EQnXfM7t7iUqLN0G0Vl1rQdFI1qbnd66nznDyUL16+nzi4broDb5uA0NLdaEc3M54t0iOOfnts4/W03hJa1eft72wvHogLZGHC8uhaLR5dtuKPJtv06IQV5sWiUYJymBfovtt5F320q3AFvPZ2tp3ZqqQjMIjrY2moSfYpnthkJH3q5eWKMzyWwK9VXeGZ2vPz8z7h95iL98eV6wL2/HS54S5Vvb/0wvHrVlTmHo/YlXxKMbOOyHWtpHEFst45TzuOVhdw7v2mT2L1bp66uupc07dS9uqsrUwrtS/tbh8FnG15rF+ln0iy8DvEQfN3u+I5zm0efofZ+Hx/9BIv3gH5TRI35ltbeM4iRzeRA4vJ/F3eDstkjNMIrIXOeyuonD/dto+7k42h7O4HEeRA6oqMR5H6zU8iBz2Vy/j/u0qKA/GE6nbh/3VE4reROu1Qs+jmDyLl9YCvV4oVDxQzNltT9R5i9pY7+MIaWqH+d4iO17dtu8oaHE/vPveUa5ecZtmvUn2OxK5/Y6sGETPojdWy7yPx6Au7/2B4qRXaYWRZl5FtG0J9Tlex+p4Dfcfv+OWN9HzBGYivbIgjucZrY4rvgvh2OP1slQPZiYeozqiPnn2kUewatdbF85YWBtJbFtlvPKWNQd2a4qZR3kE1xC31XqqdfXU11Pn3PrXaC/jGb2nIs/nsVyDrY41BePHoNf7t/R7RTGp/60wsrYCedYjg+XsgBoH1/YZx8P+LAj3pi0jCMfxJBaV4dzocbz1TorIu5BVDUG9iMaj3ISoTufPByMk9TlIFMfe/uFxio7XUrwQVW/844mySDSit9E+z2jDVncmbYQ0KyLFpGObkaBVvHMenwX0xKJu29duaJ/shAZ6m2yayGlSw35PVmgeoaw421awZAIIie730T3AG2Nn3uSoXNXDOPd31O5XJPgwz34fVREZlel5NUf0ucV3IRyfmR6P5VzvpveDXPFkZm1mP/KVfkZCdC5zBp1be/ey8kvbrtSxZj099c2pt7fupW21eFbBWKF39vAZoGgkFbyBULbghuf9ij575wp6dLJ+TfK+P9bbdhFi552YJpHdTt6FjNpXcNj3OI7n9z2qYIxWT9U8+15GXRDnIgyNgBSRm1eCeMfFEwW4/5W0CpHHDW1a6WOQZgWjmM/ipFsvIoavYv2eaBTx27HlKmO6yFukgnGE9APY2GM6QhqGTOLxt58jR4Hn9bJ1ivns7VOLzLOI9USisSIYq32y+5zVgcfX5qMgx+9wkvdt4CQApksxvRrCGkHhWKAyE9oSS71CK7JHW2+QmHkMq95Ha4snbyU8tTVgizyta7G2GHqkyKyUr9TRU1dvnXPqnlP/krYyPrJQjKjs073E5T2PL8Xic1D53fTQ3zgt3ztZit6WKOTLpmveZMoOcjuosgLqAGVHY3Mwf9ZG83YiInuR8UXkMIoMx5PXT+TsSdyfxOJeTgO04/EUxqqiUUNXPd4tvDOdyh/2ZyG5P2+bNA1N3b+d88/93UPfdf/VQ3WAz9YGPViYXr1Go4GygsIgE4ORN9Cm2wVxdBs9jZ5HEuvNQlWrXkeR955GLGe9jweT5nkXPQ9iK3zVtm+vi8j7qO1H3jAUURkVYVcRiXM9jq37TpRv963qcfTSvDLoefeOJ4pVK0S9BXF6JuKU70o4zvXYidTFoyRtZJ6ASDx69pnga3kMq0LT2lbtsYxX1qvjXqzpLcvqWkPkrSkUq/X11tlT75I2lrYX8RnFYi+f5RhQLH5ucECDv8etAU82EVupww6srRCynrhMRKrAsEJrtz+Lw9drqOpxZ0JQz/mjeeZRppO4nIIL9+JBPAtGkatA1NDYy8qpZ9F4OBjRuL/to25bwWz3HY8fikOvm9V7jucV8UAhFok1FX1emk0fnHQrIq1QzF7Zge3bPmF/I+xrXhR7XO34To8TCselAhLHm/aVKejVFPN559h4RONMpCX45ojIlqicSyaWURyioLNpnuiMPLzW8+2J9p5XebT4roSjyPbicQlR33ra9Wx7RWlPH1piOWvrHqwtru4h+OhdXKe9/397Zxeq75vV9XXt30wdZFARDDYOKTEdTCdjSAhGGEGlJ1MnogdmEUwHIyR4kHlShx2UUVDChJKCZYJGEtKbFBGUpTKk4yANqegwKVGoEOj/99t3B/u5/nvttdda17rul+dl788HNvt+rvt6u9+e5/re33Vfd8RLEUvwAKIROpXIn1Foq/c3alOX9drSjtv7g7X70yDv5A7q2VZ7uGp/RYfIo6C0r+OwdNH4voh8J0/CUr1QVW82VSsC9bbZZfud6u2zI67TzIES8c8FLQ6bSe+i0Oa36VGoqpi8UTu2r9k5a8VXE18w9PDU+8H/Xs62a5f12M6257la9kaCrScKgxyNC9cKxmxdVTDOaAW97VZE97o8p9bbL1bk2+Nohb2uw/bDS+9EIasjXp1wFNkmHveoPxOCmXATU27kFO6RV+f3Tu6oTNbWpdhbRL0mkThb/5Z2trQXcelzD/YFsXj7eL+Da34brTOi6+mDoe4UvnHWaYdQnDLaZXynPos8unGi6u7p/R16fUDfTPl2LyK//TA5jn7NhsiDSHxzGp3d3T06hHdvHtN6vvf3g/qS6+9u7C5jnwynL79776nb+H66PL7XT7uN78yyTdNu432QJvL0ezhyLj2iwbXIc6dRL3dXUNRn6wDacFPPZdQO4ht5dBq9UNaep6nlqH/e9nQ8Ma7PMX09RC5j5j7qer1+vFX193M4cy09kZY5+qNjXglVrTiLUZ5qG2uwTp/I832h91E1hDRzIO9M3qxN/X2pv7+qv6uvUjhuoer+jZy4zIXLymbOn+cqzuQd9cf2KROSXlnLEeL9SHGzl8g7Zz2zdW6pf207e7TpgVh8WSAWbw/7G7TnTdvIPcza0IMlXV6LRZ23998uW4Ek8vS5xr7cxeIbVVbnuTsJuy4A2yJy995zQfj+s43dRTytv1M7QM/G+v4kO6f//dUdfQbV/vqN/mqOd/ePffPCUe1yFLKq90dP1/tL1Oe1jJw6LRqjcNEuBu26D6hlG8Kq82gxqF/PoUXnrHAcbZvnVnnL2X/9ipjOW3kUpP1mh5y2s0/2q2dqtX30HGrrbNpts+ssVQdwVixmIjJr1+tHdg57NzqifWHzZSGkmYCMynvbVO1LxKsVjlt+wKri8RqIBGEkTjMB2cutEcNRXRnR3eU9mBEV1yYYq3XN1rm2/q1tbW3T49pE49ptOzIy4lZAML5u7G9OdEd9TX1RSKVNt2meILbLepCnRZYVn3dK5N2JPHldx3L/IBLv7v1xR3+vo8hTx1GLxv7aDSsmn82sKs/FXe+jXbb7xTqM0XHZ8r2cjSsiISbii0YbLpp9z1ohql2bZj6LSbszdXguadZuR7tFWpRYgdJUml6nnXWR588p2jBXUWl9WSSuOxKvUT67fSOqgtF+ropGry+j397RzSkrnvVn/f3jiUy7/6LPuv/62vX66Z0rXp8r1+irFY4i5xGPozayembFXeQMrimT3SnyfhQyJ9JjxrWdZe2P0yWE3Wt1Fre063FpoXiUuKnW+1IEJiLxdWJ/J6PfzSxd5OnvqRVpPe2Nk9bzdkemD/67KGrydACsZ0zVzqKecVQP9t471dsdHCtA5LcfnUWRh/893PTuzaPj2NffqZ1ghWOf/EZEOY6nWVvfU47j2986OY/3D/3qIar9/3sSh6p6jqMnNj1B6aWLKVPBE19W5N05aVZA6lBVu/xG5YnCU+2EODqPiO9a2n6PRIgNUbXvbuz16TTvmtJ4eXTeNyqPqLp7O3rCJ3HyNlWP1/7oOFdcx1mRaOvMBGlEdtPJ4t3ksuLMy2P3X+Qgen3JykbvgNT1MKtqgXOJR0naWevyZeXWOIqjdkZ97FT25y0N9PcUd0c4ijP1rq1/bTt7tu1xqfPo2gTOGvf+0lzbPoR1bPkN3aNN7YBUnMhIXFpR+cQVlKcDdy0mtejUwlG/3kOn6VBVkYdB2L2IyNuH5xrfvidPQlC7oOyv4Gh3Iov+IVYhqiKPYlFEnjznKOLMsCpPn2t85/y3Is+KwhmxGDH7Pe45hlaURS6SFpJWUHqC01vW4adWLFqBav97/R9hz219ruobHNalvDNlertvTB7vevFcSJ1u2+ifxazv7VsROyPSorRZsTgSipXvsZnvOm/feO3qPHrf2OvCis7oOERldbqX3zqZEa9eOF4TI/cxE54ztn9WZqvI1XVk9Zybo4TYJQXjGtG0ZbB+TQ6jyGVE462KnSh85dzc6v6D20EPsvtgyHsuyDsX7Z17LRCt8+IJIi0yrYupQwL1s2Z6EhI5Pe+o69ROpPTyJk3nFXkUhyLPX8Hxfsiqsx2RWPTEYeYg6j9L1Vnc47vCikO7zgo5EV903Dl5dd3irLOi0ebx2o6wYY79szeT6qLy3Jv/iyqn89j+eU6mzbuY/DqvmM9emVky0TcrGiuCcdTHkUvc8+h9I/J8f9h95q3Tx8QTlboO73jpGww2PTq+GQhH2e46iuzjPPb6Rq7ejPuYibjojoQtVynr1eHVc23MiI+9ReJMnbP1rql/a1t7tR2BUNyPl7pd8HLQLornqGjnZeTA3Kv/eoBkXUSd1z6f2EXVnVrWorQLwN6H/tkuizwKET32eH/5t+X90NQeqnp39zCZTQ9R9UTjk313r8SiEozv3j6dCEeHp3b30f4tMg5btY6kXrbr9b7W4nLtd5Idm0RO38hF7MfSW2/dSB3Camdh7cuecNRhgDPC0Y79vPHZEvy3E+LoGyteGxotJLwbMloYWSGSOVuzVF1DL19WduRAZn2pbIcnmrP3OGoBaL+HbJtRCKv+LvTq88p6+TOGwrG19hER+X4R+dCp7k8vy/L3Wmu/T0T+mYh8uYj8ooh8w7Is/7e11kTk74nI14vI/xORv7gsy08X+nJRtojHvdsaOYhr3ceKmzgqm5XvdWhmndAjmf3COkrUvWSxuKX9CAQjwHWz9vdzTbRMdG1m6ZGg1ELHcxb1ANlzExeTrsWSdRnfyFOHqEeYeoKy3T86hu9PkPNG3g9RbWpj79qDSHyyzUYwvj+T6v1jH3V4aiQEu3AciUD7p8W2mDR7XMTk0589ovPMun8R1gGMPt85f9bB1E6kXY4ErN4Gr7+ZMNGzm/bP1lWMXMZ+LltnybqTVozYcvp61c/yilrWeewxXzsmnBWMWXrmPlb7l+XzRPPIbNH72avPCjvPRRR5esyiYzFyIEdU8rwVkW9fluVjIvLVIvKp1trHROQ7ROTHl2X5qIj8+OmziMjXichHT3+fFJHvLrRxFWwZLO49uB3Vl/VV3+3Zu2wvP+O+6b9z4P2QVdvOQmyydmbqrjC7v2b6vbWtvduPOOc509l7GwDgKaPra+sNPns3Pyu3mM+9jHXErEjKlq3I0p+tUNNlrHh7JyeH8K3I/duHCW7evvf43sX3/7SbePp7+97jqzfeT7+PHUWv7++Cz55gtM6st7/EpEXHaM33byQiNF5YqRUWkYjwxKKuz1v2HEeR56LRilNx0m39VZHaTFkrisX57+X10uy2eOX1dvR1MzeYbP9tfVHdUXqUx9v/tv3qX1TfSMRG55rtS3TzQafr80Zj05/dsDLbETF0HJdl+aKIfPG0/Juttc+JyIdF5BMi8rWnbN8nIv9BRP7aKf37l2VZROS/tNZ+T2vtS0/1vGi8uwwRI/evUt/IBczKV8t2RnVoZhzJS7BWXB1V/zldxbXt7d0HD5xFgJfHyFXUTkf0W3Rnlm2dkUvS1LJ2CEWeDpC0+6eFnA7peyuPAyz9jNh7ZlkPru3vrHYuRR7DG+/Vf+1W3p826u5ezc6pNvxOLd+rL9D+DGPftt6Pt+qzFbJduL5Vn98zeZZTmg5tvTflPFGdCU2R+Lt/ZkylsQLKiiw7uI5cwUgs6nQdtuqJtzdOff1zZ3R92NDCfp7a9y9GIak9TU/Q1Ew5nd+23/unHSrPybKOaMWVi/DKesLOrqs4i5GIi9qdRW+bPgZ3znp7PK373s8zfa3Y/WzL2/1vzyGvbZ1/xNQzjq21LxeRrxSRnxCRDykx+L/kIZRV5EFU/rIq9iuntCfCsbX2SXlwJK+K6Mfrku2NfnhHVAXoFhHr1dc55/60nFNgzbS1ViSt3Z5rFYsdRCPA62GP31lv8GoHwNE1Hg2S+6CrD6ozR/LOLNvZJrVrafNYp7Gv6/tFi72+rtPuH9t5Z744dZv9sxZt2jW0jqJ2FkfOqrdOVN3WgfS+321fbb9tXm/sMXMORcLBc5qsYzRyHG1/rFPnicaoPxp706Tn7eeoLqvP3aby6fb0uaiPmT4Hu5DQ9dsbK1o86v+9nO6jPo/1cZ0Z01aE3hGCcc13lBaIIs/FXW/LO552v+t0W6c+h/R+ten6eNm6bXqVsnBsrX2JiPywiHzbsiy/8fAo4wPLsiyt6aj7McuyfFpEPn2q+6rGcFt/1LYKvahOSeqtir+sXzMCMutLVK/HXqJyrxPoSLG4pv41bWxtb+8+ZCAYAaCz541bXZcdVGtnMRKCtowdYIs8f+1GH6h1J6cP2vSg0HO2vDzaJbJOpRUNIk+/S7VYe6eWdR9HwtEu92civTDUe1Of3R+6T4tJE7M8+k3IzpFoTFI5p6yA9ARJVLd1Na3zGIUVZkLGOlTRcs+rBZv+b88jT6jY/tl+WpHR+6ZvhIhaZ8va7dHpEZm7aNePRGNVMGbHOltnIwt0erTN1eNp023Z/tmWXUy6dyNBY4V+Rkk4ttY+KA+i8QeWZfmRU/Kv9hDU1tqXisivndK/ICIfUcW/7JR2U+whHkXmHbqt7uNI/NmL1aur6hhGF/6MaD73IP6cTt+tikSRY47LJURiB7EIcCx7ij496NWiyS7rvLoPVqz0urRo0XftbT49wOrcyYMg1LOnijwVk71f76kyHS1idWiZXdbbp2db1AP8qN5OJNjeOcs6DNWGp/blt/J0Mp2+fVpQegJU1OfeF8+JjFxJTTT2iQSedfl0Wi+n89hlUZ+tyxilj5a1kIlcMbvNIv5MnL2cdozu5fkkTHZb7Hlu3cf+Xx8TK0g7oxsYel1l7KkZOYJrxOJIjHrfXzPjWbuNVoh5+8kLIbV16X0YhZx6jqS9+RCFwvb/1THacJ+cZkn9HhH53LIs36VW/aiIfMtp+VtE5F+o9L/QHvhqEfn11/B84znpd/Iy9A/kqK4R1bp0nfrvkmzty+y26zbXtLWGvfazHVjtyaXOg6O2BwCuByuUepo4yx6eg6ZFjV2v27Aum+fK2XyRYzeavMZrL3IJF6cur70lWG/b9PaBFYD2f7SvbB59fBbnT0wZnddSGehXnKVIVHgiUufxXOFMJGb91U6mJ1jFrLdiyctXdVWzNCvEMlFs29J1ZX9RWeuQ2uPi5fe2wx43e4ysA13BK1PpbyZsvRtGnnPt7Tcvr3cez1JxHL9GRL5ZRH6mtfaZU9p3isjfEpEfaq39ZRH5JRH5htO6H5OHV3F8Xh5ex/GXVvTrKhi5dxVmnMfZNkfuY6+v4mJKsa7OzD6ZDUnYq941bBFvt9DeXu1XwGEEAJHab1WljP4964IjmtCjh5BqN0YPvrWD0tPv1WctdPrdey80T4cAvnPasenedog8Oo52vXYys4G+Rosx/byk5zgu8lSgeuGp2kHMZouNhKYVobqftt/R9kTb7J0n3rnmCcHonPREWVRvTx8JMisURuKm010j60BZV+mdWmfFeF/f8zezLCpN90c7+SLPrxnbN3tt2PM8cyYtM+5g1WWMXMpKe1Xsdoo8vfa9vJ5DGJXVDqIur+uxeey+0Ddt7PGrUJlV9T9JvA//lJN/EZFPFdu/CSria8TsD2e1zcoBrwq+mVCCbGA+u6/OLTC2iIotfb20SNzShyqXdBYB4Hqxv4H6Ny77fRz9Ftp6RB5DsvpgrNevBU1z0nt9dvZJkafirZcVeTpQE/EHebqvH1Tl+0ycH1Bt6dk5bahqRThax1WHiWbC0bqQNjzVhqouJr/NY4WldRz1hD+eS9xZe2M5qiNydSJn0HNsrPNn67CfrbszciE7WjR4gkF/tjcd7kx5TxTq818vizw9ZlaMROIxEof2GvYEkiVzz3r/veXIYcvKe21tOe/seRyFGntiMRrTe+Nz+33X02wd0dh+lC9ij2sSiswOqm2Yxh51z9a5RgjMtHEuvLCXGdbuC9322nb34OhjsmX/AAB01nxPWYfFq8tbr7+3shBKW48Vmvfms3XcotdWWJdvMfVZ0aUF3uL82fXvTLoWb3bZ5rXbYNuSII/eTxGe6xjl947Bkb9l3qC46vTuTRQumblvUR+jUE5J0kb1VdvO+lttcyQavZDOiEw0Rvvcrq/k0WTbXA1HtWWifoyc8lH46mj/Tb2O4zXTv6jO7Tz2tvdyH3WdMllvtW7bRsReX8JH/Ihcwlnco23NS3UXO9d2cwIAjsE6ItYd0fmsy6jTdSiXTvPqtjfDrPuoJ8rp+fV7HPXATr8DsvdHTLu6P9bdtG5Vx7o4HSt++7qq42gFr3UivYlvtBC14lOHueq8uq92G/T2ecfHouvTbu0a1joqFedq9HlUt3USbaiiHtvpfeZxJ0+d314uGnNql0vnjfLp/DZvZfw32p+eeLVpFZdxRkhX8nhjcc891nn1d9foGEf5vby2Dl1W98W7QROBcJykKuIyZgSebleKbc8IvdkLec9QkmsY+G8VP69FJIpcXiiKXMc5AwA+W34fPUFg0/RnPTjSefpAWT/j6K3XA0s9uG7yII4+IM9/S+0g7IPy9BlCEf839Y0pI6r+nu6Fqo6Eo25jRjj25b7+PbWsRWMUhmrTRZW5N3V7/20fNXp8pMc93nHWZd44ebZScds8x8qec9Y1eub4BJ1e7h/zRWOvSnoXiM0s9+vVisVMlHoCVee1YkZfr3ZfRTcKvO3xljPBaPuWpXmft4zzR+egN0a3xzH63vPqsPV47dtjYcnWRXnhRjhy4HyJMM5LoH8ct7DlWOy5z87hLF7D8UU0ArwOZq91GyaZ5fPq91yvyh34++Cvl/NEkxdeatfbMlaseQJPp1lH0Vs3atOW9fJ7n+0+9Y6N3bfeb3LlN6f6u7Tl92utQzhb7zkZhYJqon7OuqtePc38zbQ/yjNy6LPyW82hbHsiMqfU++wJZZt3FKIarSNU9QD2DFsVWRe6Wm1/xn3UdVfrj9qynPNL8giRs4do2bNfLz0MtYNYBLgtMtfRc41asn62DV1+Uf97unaubFiqDe0SeTpJjnX99PLiLIvKY/vruYQ65LXX9cas02UjV6GjRZknCN+qPN011M7iIk/f1+i9usNOgqNDUm2botZXw1T1vtDHzh5n71yIfju6O1nF5ref17rse7ujnSh8tIrev14Yq5e32ocof9SP6PMWp1GnjwTj2uPTt3EUDqvz2hBVG07a072IC9vOvfj7wEtfM9ZDOG5gj7BVkfMISN1OtS3vwt8ShnQL7ClUbslRvCYQiwCvh2zgb4VeJPx6ug5X1P912Koe1OlBln4OUbtlIo/PM+oBZyXkrfdTb6MWtL1OvR39tRw6pHDUpsZz87RA1GGrWvR5bqT3vKMWnZFY9F4BImq97p/ttxX+XpiqmDS9jyNhOPO70uuo3KDw1kV9jlju43DVCp7Is+wxVtXt6XGDPre9kFWR58fXG3d4uyATd5FLNyMYK+JuZt/pbRZ5LiLtPomEnr5OrGAUefq9odNtHdF3hqc7qmNBhOMLYI2AXXu3ay+39RrZS7DgLK4DwQgAa+i/gSMRasVnX6fRwsOKzJ7fE6+ew9fLNpNfTH4rdK0QsE5kX5c9MxaJNbvshdHq0NEsXDVyNW0fMmweT0B6rvTMDfNZh7Es+CbrtDdB0ronfpwz5/ZaWROCWhF5W9qzrBnnWrfVCju93l73lfo0I8fXrp9xfDMQjhvZU0itdR7X9mPWgfTa09yKmNz7i/WWhKIIYhEAboupAbc8dx97ev/fxWOv653J/87kE3kqDrWj1uSpyOt1flCeftfqGUU7XQBpV+FOLa9xHHsbnuNoxWLmMo7cR5GnYa3exDsizx1HcT5H29C3VQveLgKt8O5pTZ6eAzqvXt/zNLOs239jlke/V975aYWo5xLdSS4URy7tCCvoz4UnkqrMOI162XMTZ5zGtc9aeg6siL/9mXj0nEevr/2Yek6iPs8r+W3dIxCOO7FX2KrI+QWkbrOzpe2Io4XlLYqu1+QodhCLAC+b7Pdw5Oh4QiDKE7l/vazOL+I/I9TzajHRB3HvVB4t4nT9tn/RIK8vazHTw1L1dr5Ry32A5j3jmGG3XYeq9vX6ecMuAK3TqAWiFoV6JlWd1xOOkYgViX+fotBkLfK8MGR7Xtm2KuJP57UiT4ce2zxanC6mHl3ee+41uiY8JzES4TpvtF8X899bd+kxg7cfRk6jFVtrw1P3djS16LOi7T5I1+W0a6i/J6ybqD97Aj3KH/V1xIaoarhmtg7Oj/jyWIK/veo6kv4DuRfn6vO1gWgEAMvM94KXN/uu89Z5g2Q9ALcCJ1r2hJAWFrouvW4xaW+dddYV1G5f9Ftqy2nH8K0874ededX+99q326jdwGj/ePu0csx0O1meaN2oHdvPqL6sXPZ5lM/uj2q/oz7a87RSR1TXNY4fNBWRMyNCjzY2ovq99OgmkX2W06ZXn9HM2qyA47gja92+iC3Oo8jTL4Q1fdrDhaxwbWLiqC9MhCIAwDrX0ZbJXBp9d127THownU2UY50zPTmO7Y91OLsY0+9utG6ZqDy9j93d1JP0dMdPO1WzjqNu0xOjVgTaV3uISdNOZPV9jXp59nfKusnaufNcPnusvedH79VydoPA20f2mVWd7rmf9iaFF6ba0eee/e30nEZP8Nqb0lbce+I0E5GZ67k3WXiqXp+FqEZl7LpK/qgfI/p+st9X0QQ50TOJvW0vrzdhjtemBPmrYe8eCMcD6AdpL6KTYoatItL2o3PrlvWRYus1CkURxCIAzGN/Nz1hGInHZpZtvZnosOu8dF2XyKPI0wPAjhfOqkNEtfDUA/Y+sGtmWeSpQLVteEROViQWPWcwEpHWyawIx8iRzPDGPZ4A1IJa3wAQebxpYI9tdgNB163DTG0obF+v69Y3CHQ73nnl3RDJhJsVbXYf2+3Sx1FM2kjAH/Ub7gkjvW5U1pJdB9l1GbW1lwOpBaKuW59ni0m3zyR2PGE5G7Zqw+E9IWr7mHHr4/6r5ZoHz3v2bc0dxEtjfwj3xn5ZH8G17vNrPu8B4Hzs+V0wqstz2CLHxqtPD7Y9YWXzRg5Or1uXt/V5n63g6qJN53vrlPf+tNh7O6gzE422b2LWizzfD9k+F7M+6r/Na/swIjrO0fEa1WXb1f20dVXEnF3XP0d/Ub/ssrettt9ZXdV6PM75u782RHVU14wYujN/lfpn+7OVkXur89j9MNoXOI4HsrfzKOLfhVvDHg6k5hruUlxCTJ3rC/MahSIiEQAyst9A7bRU8nvlKs5bz29DRb1QLy8sUtPL6Hc6ijwVa942eULrA/LoXOr236hlL1RVpLbdetkTMXqWVy0WdbrOO3IXI5cxE1oeUfhxX3dv/ut9q12YSAjZEFN7zLvA1u6lPk5ZqOqdWe6z9YpatpMsRdvsbX9ftkJe57H72wr7yIm0/+05mwlikaf1RWlHjWNmwk4jsSROelZPtl5vZxaeGqVnrmHmOvY6tIOo++RFQ1jnvjKuQzgeTD8IRzx46/1IrWFvERlxjeJnltcsFEUQiwBwXdjBv8jTQVcnEpFWPEbPO1rhZt+zqGcprfS3i88uKPSzd73O3lc9u6vti4cVBZmg04JCzHZUBaOoZSs6xOQZYUV8FlYswbK+AXEvjyGlOvRU12WFWHR+eGJRi0jvXIxuUMz8xluB5olEu69nBGKUJxJ+njOc5bHbUGHWDcyuiS3P820pYwWkFWs23eadFY+6vJg0e86NBG0GwvFMHOE+ijz9ItqDI4XuLXNOwYRoBADYfnPUExoizwfKb8w662ZZwdl/z3X9XZyIPAoKb9kTpXYgKfL0d6ALzMpg2nOBRk6SFnwVV9GrT4tS23ZFRNjn/KwI9M4Fvd/15DT62UZvcqQ3Tj1WIOp8i1PW1ulNjmMFqLct+jzIGDmL/f/oOGXur82j0yt93DJ28a7Rc4xDsxsy1/wsnxWPNj36LE6ZWRCOZ+Ro91FkfwHZeS1C8hLiCKEIAC+VbADoiQGb34q4JvlAfFSP5wT1unVoqL77rh3CxZTX6Vqw6EG4XtZhjF1caPdKu4vNLIuz7OE5QjPOo90f9/LUibSC026nbUucZYs9hjaEV/dHO7XeftZ1aeHmOYK6LRuSei9Pj412lq3rrOvqz5La0EDvnMtuBnhCXC97xywT+/oY6mNcEZ52va0n6use45utN5As5xrPei5f5jp6Ii9yHTVeCKpX78hZrO5jhOMFOPJOyt4CsuOJiJcgJhGKT0EsAsA5WTsotOU812hx8unQLisorAuoB8XVsNXuPNnnIPX6Jfjf29TP2I3CVL2wuL6dYtZ5jqNezoRj5mR54sW2Xflt8QbNPd07PnoAHIWN6v/6+GnR6Z0vVvBrZ9E+46jzdEFrb3jYGxfeuaFdZc/16/XoNE/MReJRr68IvUwARsdzi2jcWyCKrB+nZv3w6oz2x1rx6AnEKGQ16nuUNwqVrTqRCMcXylECUmPvbNwKlxJHCEYAgDGRW1jJm2EH8z1ND/StQIxCKHXYqsjT31ybZkNVrbi4V+lWlHiOql72XK9MuHmu4EggjlxGTzR6gjH6ndFiKTvuen0/bjpNi7s3QR06nw1J1f22ocj2uGgRKeIfTy1sPddHJB+jjQSdFYQ6bfRf5OkxjtJ12155u972VaPTzxUGurdRc+1j3cozk6PyIxCOF+JcoutcF2pFeBy5ndcofBCKAADrBm9RGSsevJBVPfD17rTbQbyd3VSLR9sPL2xV960Lqy5G9LOP2lnU7fe8NjTVC1Ot/o5HQkP3M1r2BGUkFiOhuMZ50sdSu2+Zs5K5hV24vZWHwW4kjnS4pg4X1vntNnnnphay0X8domzrzBgJx0j028mOet5KuicII3HpicbsuEdj3ua9UAAAO81JREFUU3sDJrrWLXsLxFmy41kRbF7+TPDNOJRemhe2Wj0XEY5XwLlO+HO4kBmvQawgFgEAnpP9zu0ZolapyxOPIrHoycJWRZ66TrqsfZZRC04tJnQd1kHT62zfMkbCUX+OxELFXcwczWqfM3HgjVus+6sdQe0E2psK3Ym8l+eDZy0UW1CnOHXr7e4iLHIfbXsi/us5PKL96on+yBW0x1LX54nCSAR6YroqGD2ysemlBeERROGqWV57LmV5M9fRthmFrWYgHK+Ec14ce/5IwwMIRgCA49C/W97vZfYbasWYFw5p/y8mLQpbFfHFRR94f0Ce9t2m6z7ZAZ113USeP0/pbasmExp9veda6c+RQxkJlFE/NF7ocE/30vq+tA5z5P6OZlht8lQoeudDk6fngy0r4gvLaKAfHduIyk0AL9zY5ovEoic4dXuR0+iNLbzj7OVbM96Nxq6zY9pLi9G9x2SZIzkLoao3xigMYk+8ixsxOeaaBWIHoQgAt4Y3+ItEnl4XCQ3r7ngDf89x0oN6GzJpnUKvrp6vh5++p/LeqeWeLmqdyFOB6gmL6tjAE2M63QpGm1YVi56IyISFJXrNiN2nmRAXeS7gel29/jfyONupnbxIi1e9bCc4as5ydFOiO806DLmvtzPn2udUI1cpc44j8R/Nhmv/6zxeenbe2OXKcffGu94Ngp7Xuu7Wsffy6etd3xQQJ48lSo/G6dc67opCUr3rrDq+RTheKZe4I5KFC7x2rl0wXuuXFgBAZ/S7tjUapn8P2jZGglEv2wFr1D/P0fLEjE3rYkY/D6d/X7TYGYUzRgNeTSbotghGmxYJh8pvpxZRVjx46/u6N04e3Yc+WNbHqNdjj6N37KyrafNHy94zs1F4oNf30YRHnoC0y9GxsqLQ5hk5lSNH21JxG+11612LXr17h77Pjru1gD0n1gnXbOnPKGy2g3C8Yi5lp2dfaK+FaxeKHQQjALxkKq6jTdefPbehLzdn2Xs+zQ60tROo3aPuUEWhr7afuh5vfeRueGGrmkg8WodoJAQj8eDVGYnF0W/pSCBaV0fvz0hwePn6QFsfF08sR8fMir/+vzuZelkL2l6nfcWKPYf0dlgy93gkHqM0L48VhotTh8jzfWDHIaNxSebaZTd9PNcwch1HTmJlfF0xUypjsOwauPRYc3QTwwPheOVEd1DPRXRSvxRBeemLdhaEIgC8ZLwBXzbI89bZwWMmHu2ydovsIN+6gz3s0b5GwzpP9tUQXYhUQ1Xtskj8G5w5jpH7OCso7Trb7szvlN6/+jjoAW0/flagizy6iX3mVC3i+n4Wp7wWcV2g9naic62LwjdBn+3NAhtO2c8VL0x1NKaadRz1cs9vn4Psy1ZIZnV6YjFzISOimyN226yQ965rK/xtXyKhaB1/fbMgulk1yyXHmHp/Wgdc7/eq09hBON4IlxaQllt1JW9NKHYQjADwEtgjkmbkJqxp13MyvDSb1w5Yo4Gvtywmb5bHErmNlsyt6mk2fSRMdH2RYKz8ZulnrvTniO4I92UbzugtL6qcdXOjdL3eOs5e2Grvu+c0e3XbUOTe34hImEXCPhP8kctYcTJtu9G5VaFyjvd8W0SbF/K6ps61ZSzXOpabEY8IxxvjUuGrGdfmSt6qOLRc6xcMAMAWZkWcV8YOyL1wRS9kLRIDNoxR5HnYas8ryWcrCOwEK72MLd/FhCc0er0etn3LKITUcxdHnyticfQ7rAWYyHPnqOfpdXn7xDq4noizzqIVE++cshrdHy+fdm+0A2b/W2z6aFwXucU6zROMR7iM9tzw+jjCmxBHX8fe9d7zea6jzmdDnHX/ItdRl43yzohdj6ozG11fdv2lxogIxxtk5svmkrwUAXcuEIoAAA/sLR5F4kFn5Fp1sdFDFG3dnhNlwxPts3I2PNK+EN6GlImz3PP1PlqyAX1FgPS0GaFp6xnhPdd3Z9b3/WXdEH0sNNYNtHV3kWiPuw5htX3yhIY+5+x7IZs8DqybSvNuFkTb7hEd0xn30R5b6y5Heb32dJrXvwzvpoG3bzyH3/bFq6vnsTeDou+ETDzqOkXlmSUTjaOoAJ2+17jau6FRdR0RjjfOtYWwwjwIRgB4bZwresa2YweUkbMQPbe2yPOBrG3PK28HppFY9Rw3b5CXDfBs3kqoo807cpuicpX+ieSCX0xaz59NgpPR82k32HMu9TYu5rMW8/1P16n7ovscHW+Rp/2uDNgz0e8Jfi8k1S7btr1jGonUar8tdvtHTmAvUw07zW4MVdqr5K+SOYaXJnLDRyAcXwjn+hGGfUAsAgDMM+s6ipM3chpFnruSXgirXbaT5ozy2rDU7oaJ6r+XpuvPfkO8gelI8HmORiYWM/FQ+X3zBLJOj9Iy4a0d3Z5fCzorAHXe7h7q0OJsTKWPQXekPQGp89mJj/REOaLSLRW3akYsVsSlPb4VoTmDvS7sdkSuZCYePTfZ62ckPEfiUfclY+banHEbz8HoJowIwvFFYU9WhOT1gFAEAHjK6IZnxU2y+aKQVZFxmGoWthoJwWjw6/XHhrD2NPt6BpHnokLvp+rEOL1+TdVd0uv3chk1o0G9N/C/l4dBq82/yKPg6wLwncqnnRXvGUTbB8/59M4r+y7OO5Xe3UgrFvtyJE6j8z0SHJHg95ziqljMHOns+Hv9rG5PzzsSkJ541MdIVFp0bXvpti3vZlBnzXhudHOlIhoreZZgeU8Qji8YXMjLg2AEAIhZIx5HZWbrtGJy1H6lXU88eqJUi5ie7lHtg4etczTQzBwtWybqbyQM7Ho7WLd1RK5x32dRnjdq/Rt5ji1rw1mtU6nzeO/61G3bmwDejQYxecTk9fob5YuE3mLS9LInLG3ZqmDMbhhk4mvmeM+mRd8B1XxeZIM47Y6o3Ew5p6O4BwjHF86MvQ7bQSgCAOxLRTxGAi9y//pnkedOpB0kVlxI6xr2UESd3+azgsmKKB2qOhKUo8Gn/W2aCZHLhKLXbvQ7OOqjdXB7mUrYql7f3+eo11nHsS+LPJ0Eqee1TmR0HvVynvto89lja8+HCqPjlrmGnqC0eSMHM2uziucU23S9XsQPO/XcQG+/22vZc5ejfkQ3NLaIvJFDO+s0jtZF+Sr5MxCOrwhE5L4gEgEAtrM2OiYTj17IWRYKZweeYj5nItG+l88OdN856VqoRKF2mpmQv0qe2WerRqF2UZq33obdetuq96m3f7tAtP3ynknVx6+Z/7ofdubc3pf3xHcw+3mjReydSuuhqqLWiUnz+jJidOys+PNEZRSGOrpx4H1eQ+REeuLNXsM6r3V9PfEYha3a9r0bBjZUtlM5ZtUbKDMObpRvNkR1yzFEOL5SotAQqIFoBAC4PiLncZTfG2Bm67NQu8jpEMkHrJHwjcpl2+QxGqCucTzW/BaOwggzh/eNyW/DUD3H2GtfCwu7bOvqoakiT51FLb6s6Iyc0t6vSAxViI5b5hbq/TISmFHayDHz8J4NtK7vKFogu7HklY3Eo+5HFIbqXXde3+y+8G7+rCFz9DPnf6bOrSAcXznRyQ+PIBIBAI5l5DpGg+wonCxyEbL0yN3wnMi+3ub13DFReXSb3jaNIoO0UzmiGp62ViRuGZBWHFRPPOr3XloXuB8/6yj146En0Onp2fKdyq/f19gHzm/Enzyni0jtQIpati5rtC9GYcgjxylyECMBGYnEitscETnKFu+a88pEQlHnyxzKnhZdy1nEgtefTtVdzPJXrsPRsahco7MhrRaEIzxhrSX/EkAgAgBcjkrI6qyj2OsVGc+gmKVVBI0WCFF+mxY9y+ht44xo7FREh8jcAPTIUMVofTTA99K7uFzMOv3cqchYOIo8P/YiD6Gr+vzQrpa+AaFFbu+HF55qn9/reMe6clw8AWLFx0gk7nmTQCR3X7127DUW3ayxzzLqst51bduwN4tsnyNzZXZ/VIVldp1l12zFDR5dt9VtQjjCkMqP+S2DYAQAuG288LM1IYCjsLdKmd4fkXF4q07znrESeS6S1lJxH0bhcLPOisY6q6NwwOwYeuGhNl33axTyqEVXFCYr4rvJWrR6ffbEyeh5u4hMDFQEo03LXMdZt8pSiQKQJK2nZ+fBKGR8NhQ2uimh25Ogr1H/PLx9u1Y0Vure6jJqEI5QYvRlce3CEnEIAHD9bHEdR+JxNmxVJJ40R1R5G54avZfRTqgj8rS/1uHo66zztGVijmjdaGA6E3IXEYUf6nWecO/73woz7ei9VXl02KoOMRWJw1ZFre9ipb8P0s6YqkNVF5NHtyHyXGjY80KnedtvGYmCSvhqtUzU3gx6eyrXhXZrIwffpkVupl6XicdRHR4zN6Wy+mavu71cxi3HFeEIu1D58ThCXCIIAQBeFkeLR5Fx2KpNr6RVw1OjZ9yiVzNUn6caUXUi9nYaRXyRlu0vK6yiUMVq2GozaSJ+GKo3mYqotDvJQ1V7Hvs6EBuyqoWsbnsmFLnqFs+me5+r51zk/nWy6yK7eeCFrkaisDn59LHWbq91g+13waivM8zcfMnE/KUEYwfhCGdjVlwiCgEAYCtrwlZn6hq5lFWXU+Tp7170PFVlW0YDxHMPPqOwv2yCokzQ25sDo7BVTzjY8NSev//vfVmcPDrNhlNa0WLDJWdCUzNmBEclfa1YjMpE4Z+eOOtlo9DSTChG9Xoi0+a1z7ZmAlJk/tjNXofXdt16IBzhqkAsAgDAnq6jl9d7ntC6YhVH0ubV6RW3sa/zQldtns7sBDmdtY7HKO+WPmThqqP9rF3ASthqL6PriITrIo+is8+eakNS38iD+9jr6W3qUNVm1tvtmQlT9ZgJddzqYlXxbo7o815fi/bcr4jDvmzrsderN+nNyN0Usz7LE0UIaEbXS0X0VcTl2uO35hgjHAEAAODqqIpHkTicsxq26q332sjcidFzkqLye/VH60Vqg9SI2cHrqMwWQWEdqWywH4kIWyYKX7yXh0Gut++tuPRCaTVRNJR9Ps8Lke3r7+X583u6T+/M5xHecciEyFoBMkvlfPbyWFc4u3FgndzM1RSTT0zerJ9eX22fK2TXTEUsenXMikUcRwAAAIAJRmGrXtjo2gl1xEn3+uA9lynyvJ97RuTMOop7DDpHrq8V4NX6PIEg4j+HaMNHRR7DU62w8EJOtbiJHLLMfa6IqNE56pXpjJzEreIjY3Td6HQbluodfxtK7IWhjsRjxwtV7umj69u7CdXzb2GtYJzJV1k3C8IRAAAArpJo0GaZDVsVyQe6dn0W1po5ilF6ZSIcHZK6x+Rya13EIwVrNVR1FLYq8ijkrNCInOZs+T15nFX1Th7DVnsbdhKdO7NOzLKdTTQKV9XsJR69vHuGIkfXjajPnstqHeZIuFXcR+86s7Md63K6bOV6tX3ayswNmhmRWBGIW69nhCMAAADcPJl4FJl77tGuz8TlyJkUU8aW63jPZOn8W9ljULnGuaiGB1dCVaOw1Z7fOofe8bKOoRU1UbrIozup+6uxTpUnlmy/+zrLlmMxE4a6VUjY7azcFPGEYMU9tMd/NPFQ5DTqsqM+2vr2ZIv4H6VH9WwF4QgAAABXTfVufyXML8uficdRP0YT6HjOpc7jPTun+3EERwjFqI5IQFoR7u1f7SJGzpSY9doV9NxHW5cXpqrTe389EfMuKed9tgLaE1tr9/2sQNxyjKNzN3Ljexkbqiomf1U8euGsnnhsJo8Nl+15RzeE7HasYdbtnRWMR4hFDcIRAAAArp6t4nHGedT5MqdwJAArg80jXcaIc4S0jdqM9mVFeOkwxEWeikAtABaVr5exQnAxdXnP1Nnn6USl9f/ehDg9XW9HNJPqXmGQnSMmONL0faqJHNYoJNUTa3oCIX1M9TH3wlBtmne+2Tz6OyWa+VWX11RDySvX0ZrnE7N6jxSPCEcAAAC4CfZwHivPPfZ8YvJmIrNSx4zTWBnczwjRjJmB5tp+VZ8ljZ5R9Fwqz0EUZ30U4mrDW7UQFZNH5Gmoal8XCRotRLzPIvlrWGad8xHnvBEQTRBl1/X1vT7PEbTrIofQTl5k10d12El7oj5mN4H2un7Whp0e7TJqEI4AAADw4pgVjxGj8FWbZ0aERvk6lUmBZqkOMre4Up4w0O1nz5J6IYxWINwHy1Xx6IkCKzQioafbHTliUV2RYLQO2l7sHeqcicFMQOr8tl8jIajr9p4j9UJQvXNAJD4Pet+qQncrRwrFNf2r3KxAOAIAAMDNEIWceozEo60nEjxrnMNRaKbXpsfeIauzA8otbkY04Y/3XFtPtxPIWAHpuY2e0LSirs+SGj0T6YmQkUPVxUiTp8JQi5CZUNVILI7Ogb2FYcYoRNW7rqxjp/eFd51E4aveTQa7bN3prF+6L9m1byf/0cyGGFeupz2eR61et7NRDghHAAAAuDlmwlZF5t3HkbOY1b9GlNr8Xrmj2TvkrfJc6WiiHE8UjsIWvbaisEXPFRuFuGZtatFq6xST5k2K452ne7qPWxjdEPH2pT2GXjiyyPN97k1oI/L83Incxei6qpw3nsup8YTyWkblR9f/0e1bEI4AAADwqpkVj+LkrwrIKK/OrznnpCkVooFs5opVQnWjZ9qsGIkEhbecPUun12XPtGn3UcR/J6Et603wotd7oaq6fs0ex3/mmGftecc4uiEShYVGn6O6IpGp89lzYqZuj6hcX3fEM4V7CcTqjaa1kQwIRwAAALhJZsNWReZCV3U5W7YyqY0XxqrzZ4O8qOxe7O1kVkJuvf1QfTbOc5b6ei3mbNiiF+ao+xSFyVpBm82a2sQ/XjqcVUx+2w8rGPcOUa6SXUuZO26Pg5fmfY5cZhtq3Nvs+azbmNXdP4s8Pbd0enVWVc/9PSJctTNznVbyjkTzCIQjAAAA3DQzk91kzz2O6orKzjiWPX8na8tjVlDsLRBnJ/DR/chCf6Pn1zz3MQtlteWjumybo7DUmfy6nDfJjvfc3N7O8izZjYrs2d6+PhP6lWNnj000sc7seaL7b7dv5saF5zx6da5lb4E4U3bmOwXhCAAAAK+KreJRnPKzjqUt1xkJiKOeeVw7+M3KzYbzZoP4SICIxAKkErI4+7yb53TqvkTnzihE1Qqa0XONewvN0Xlq0/YKUZ3d/z3Ny6PdR1u3J+A1o+05B7Ntrb1ms++oEQhHAAAAuHlmwlZF6qGrUZ2VEFZbtuIiHhWauhej/aaphOhG7lFfl01go+uIBJ1X3yjM0ZulU+SpYNHn2zuV7onCSohqtD+3CMRZd7q35T1nGZ3vniCuhKhmx8ge63cqj3e+WJdT54va7vm9+ta+LqV6rNZe43uJ2C1h0AhHAAAAeDHMhK2KjN3HSp1ZHRVBOyPGjqY6OF0TShvNSuuJMb3O24e2Ls+NzMIXozDZqD7d304mbD1nVGPFmEdU1qvLY3ailEjoR8fFLo9CUqNj1Pug19llvU1RyK/nUo9CmyOXO9puUfkte930OdrlHG1HBsIRAAAAXhRrxKPIWPRIUu9WB1PXEbFFWJ4r5C57Fkxk/QyrPd0b6HvioOfxnMpRmGNlApd38rzukbslKt3uB+1geUJFs/Y8iMpFDpstY1057xjdOeuj42A/j5ZFpen+2ba8WXV1fk84eRP7dLzvk2yfbeFcEQejcywC4QgAAAAvjtnQVZE5ARnVPRJOtg7NpZ5v9NjrFQ7R/vD2Y2WikiXIO5pIZfS8WyXM0QoIuy1eiKUXwqrTxVlv67f0dmbO7ZHD6bWlha9tzx4j7/UjlX0fhRjb/S5qnRdu7NXnPXfqHV9v22bE9GsC4QgAAACgqISvztQlxfoqruTe7DHwnel3Fhbq5RmFlFZCEbOwSW8bKmGOXl0Vx1OcPLb9zA2aES3R9nnro0lobB9nwnurx0ZM3ugY2TLZeWPbipzuisvtldPs9V3hsfVm0SgCYvZ7BuEIAAAAL5a1A6Q93EevvlGdXt23RGWWWG/feiKg58lekxA9Q6fzZJ+j2TZtCGtfn4VJ2n5p1yo6n6qz6moX06N6fkfC0zqikcCyffH2nV6nQ4B7XRJ8jparkx1psmNrj0u0bT2PrdcSubIzHHW9j1zm2XYRjgAAAPDimX3usVN1DGdeqzEbMnjLZOK66rplrlAUJuqlRZ97mifutegbhTlGaXq77DZWbijYkFFL5byOhGcUiuk9gxm5jZlI6/mjEOCRgMxmu7WvQ8lCZrP3adpt87ZhZv9dG9F5teb9oS/t+wkAAADApbsTa5gNGdvSzjmfZbTtrvmrEu3/UR16vQ0prdTV06zItH3x+reY9Hvz36vXpkX99+r29utony/On2VU1ub11nv7QS9H+9crk5XN8mfnj20va8MrY88t7xh4/cn2+56sbSO7VmfrxHEEAACAV8VW91FkXbjpmol6boGorzPPV0Xu48zEOVGaV7/9PHrvYO+TdbX0+spkK966Xrddn7lb3j7PzkkvVNPWqfNF2yHiO3Ve+SgEuOI+Zk6k7kNUhxZEM330nFb9efY74JxU+jN7rVoQjgAAAPDqWPvsY2fNAHI0QclLY7SPjhSQUd5RuKqXlolIkVhI6vJ6G7x13nrbr9H5UxWa3rHwxJRd59UxChHV5byQUl1f9uxpJbS190ckF5FRqG1fL2adrbdzbWGrlbD6aF31RtW1iWUAAACAs3HpSWjOFeZ2abKBaWXbt+yfSohk1NYozNELj9R5ovx6nV3vhVpWw1SjdV59Ns2Ws+uiUFVdTq+LQli9dnuI6uKUi/qpy3lE+8vWE4USe+1l16sXMrzlby3Z+ZKdRxVwHAEAAOBVs6f7KLL+rvzMYHGmr0eL0mpfMkckchZ1/mWwvpKe5R3NsKrTRy6kzRNtuzfpyuzMqqP8vW82bxSGGc0y6oXfbplVNXMfo3c6ep8zp1K3qdsVebpPsuMzM2vqno5c9bod9ak6CU6l7whHAAAAANkuIDvneA7qmhzK2RDcbP+Mwlej9ZG4tOnVUNcZYZqFd2ZiUteXhY/qsr2/HtX83j4fPT9py0XPiHrbkYUDR/u2r8tEoq2n8pxqNexY96lybDre7LfnCmGd/a5Z8xw1oaoAAAAAij1FWTSb4UvnHDNMaqJww7XpNixzJt2rf0k+27qyNF0+C2n18mf12v7pvN6yF/LZw0y9ur2wXIsNVa2Ew0ZhsKMQY6/vth+2rN0XWUio7fM5QlezvmShzLZ8Bo4jAAAAgEEPqPZwDCouzkujsg8jZ88rG4UcVifOsW0cka7XVSdc0Xm8sMhqKLQ3c2r0nsZR//T+0/m0A5e5eJGTGglW61Z6Lq52Br0Jc9Y4jrOT4szOdGs54nr36qzMnNuxjnMGwhEAAAAgYa8QVo/XIihnRGQ1fHV25tVs3V4i0q4blY3yiMQD+uiZtWpY5UiIjkKtvfLWxZsJSe3rq+Grtt7Kc42VEOORcK08mzviEqGslePP5DgAAAAAOzJzZ34re024s6Yty7lFbCSARvm8Z/WiZw2zdZrRM5M9vVN9DlOCPCLPt8nLO1OHt590OVsm2mYx6yMBop1J77PXb1vvaD9W2vG2YeY512zCJp1P5/UYfWdsnRSreiOgcqNlBMIRAAAAoMjeIaxVLvmcZLXtqsBc6z5m4auRKPNEgl1n28kG4rNu4mz4qM5XySviC9/qJDxb3VW7Xuex5bwJdKL3P3rHzYpDW6/+LCvSbLpdZ7fvKPcwu44qE1FF55l3EyA67yIQjgAAAAAruJSIvFZGAsNjFOoXuY/RTKCRAznjusyIT13Wtu+5cjZPJ8pnhcJMnR6VEE4vn217RkBaJ7Dy/KMXkhpN2uPlX5MmkoevRnk8trj0szeJZtqqHv892gIAAAAAh+rMh6+F2cFvtu+iurwy0eQre6y3s05GM5va9ZUZVGfzra0z2x792ZtV1KZZ7Lr7ID36rNFt2rxeP7I+eoLT2z4x6xa1LsuTHY/sL2J2dtWoTq+Mt912H2bgOAIAAADshOcQvVZm3YzMfYxcqug5OJ13FKI6Eo+zk8tUJ1aJnCuvPgny2vyVdzdatzZz0DIX0puUxtbZnUZvQppRSKrthy2r80tSRq/bGqK6Njw1Ov4jZuuuhtlGjv0IhCMAAADAQRDO+sCMiDxKQPZ8W/Nkk6ZUxGqWz9YX5d3SvjfxTDRJjBV2dsIZO1GODk/1JtHxJrIR8QWk3TZdv9evxclv61zzXKs42+HVHRGVHbGl7tmw6ioIRwAAAIAzgIicw3OeOtVnH7282WydozwisYDUebx8Om81X5R3Tb2V15VYAamF2mgWUm9WUysSM8FZLR/l14yeBc0mR6q40lkbts6ZEPaqCzh7MyE7VyoCtYNwBAAAADgzFSfhpbLGfRTxBaFXx1pB57lOFUESbUMmXDTVUNEs78yEO9H6teG09jh4ArsFdXllvJDUqHyvX4IydhvsDQIvhNb2z4aUZtdmFDbqMXONR+HLWT1627x8o233QDgCAAAAXBlrXYotdV9CrEbib486opDXKDzV5p3Nt0feSv4tDua9s37kRNowT+0c9no89y/af149Xh2RAxn1ZRQ2K8767CZG5EpG7qPuV0R0zCNG9UY3N2wZz13GcQQAAAB4ZcyIzK31HCUuo9BTTfbsY1ZHFPLq5Z8VkHvk3ZJ/FIJo66w87+iJQr3shY16os17ltGKPyv8JPgsTh1RXyQoZ7H7YXbSJE31xkf1WcdRmGv2POzIJY/CcivbgHAEAAAAgBJHupXVENbK5Dm2jr2fH9T5Z56vi/Zf5ihV89u8ozyZoBgdi1F/o/BVW5+dfbWv1+GVUR06hLXntfXoNmz7+jyyoZ+Vc3EUlrpXKOqoPq8fUVhqz+Otr8zyinAEAAAAgE2M3MBLtJk5kF65PUJe1+TvZaquaJR/dhKgkdPY13kOZLTe9s+bVMfmiSbQ0X0WleaFsNo2LdmkONEEQV5+XddImGcOfnRuzNQ3E6bsPXdq68BxBAAAAICzsdfMsTPhc6PZV6N6RgLPljsy5NTm12UqTudeIiKa2MYLQfXWi8nX+6Tri56tG008NJqEx+I9y5i5stVw05nJZDIq7c2Gx85MiGRFcyWMFuEIAAAAALuzh4jcI3x1VE9l5lZbriryernZ1znMlInyjvJlz/SNQlhnQ2jtTKlR+6P9KE5dUSiqqPwi+fN8NlRzNJNsdM5E4Z6jsGqvjdG6KCzV5o32T1a+2gcAAAAAgF3ZawKfSjujtjJnJSs7Kjf7/OKojFfOK5PlzfoRCUWvL7aMl5a15ZX12l+CvFGdupxXv27DtmfrttsU7WdvP9o/SyWPbcO2U2m/sj3euio4jgAAAABwOFufg5wJK8zCV3VdWQirOHWM+jAbnmrLVMvNhqlWXtEwCiEdha/abYgcz+iVGZVwUnHyeNvtOZy671Hd3rN+3nGuhKtuPUd7f6J0r/7RvpuZAMkD4QgAAAAAZ2OvENbKwHzUzowQtPXMTo5SCcOcKTfzrGNVaGZlq/ktM88tRu3bPuvjPyMi7TrvZobNXxVYW2ZGjerIzmEv1NTbn9Ww1cq1SKgqAAAAANwcUYimRyUcr1Lf1jDY2XDWrFwWtpjVPQrnjUJYq6GtNq8to/tdDX3VIay2jK3PC8mcCeXUn6thwXpd5ViuCVWNyo6OeRaamu0jDxxHAAAAALgIW8NXRWru40xbowlgRk7mHuGsXtlRuexVHFEY69oQ1ii0NAtftevvxXc0Ky6nrc8SuZCjdV47XkjpaGKZmTDQbH1l+zqzYbij/eCBcAQAAACAi7I1fHX2+cdKWyMhZ+vy6psRg7Z8VnYUetrzZ/my9aNXdYzSveNh39loiWZblSRdiyUdbjkrkrJwzihfb78TlRnN1romFDYSrtnNA9tWVC6DUFUAAAAAuBpmZnn0qLonva1qe5V6t4SyjspHZav9n51ltRKmWkm3aV64adaPrM6MKFxzSzhvFoo6CkOt5MnCXSuhyVEeb9nunwo4jgAAAABwVew1A2vVIVnjQu4RyurVs2ZW19lZVns+u5+8ENdLuo9R+Ww/VBw2r2z1vYg6b5Q/K9sZneOR8xmFZlcmwentRi7tCIQjAAAAAFwlewlIkcuLyKi+akiqLT8SkZXnIGefcbT1VwWkFaLZ84+W0SysVZGUtRG1F7VRLVctG5WPzn+vziw8dzSzbPX6IlQVAAAAAK6areGrInMhrLPtjmbTnKlvNCtntUxFnK2dYTWr1yuTzXY6asuruxpK67UxCu2cndV0VM6WHZX3mJlN9d5ZFifvGnAcAQAAAODq2WsG1s7eDmS1/kp9WT2VWVkr73jc6j5GM7fOzMpq07xylso7JDMXUuS5yzYbFhrVq+uToM5R+VEYbGXWVyu8PSdytI88EI4AAAAAcDPsISBF5p+D1G1X2t9bRM6EMkbiUOfNBKKuJwtRtW3s8fyjxZatzLo6Wue1F4VsZqGmI2HptVOtOyqbhZ1GzIrwmf4AAAAAAFw1e4TeiawLYZ1tvxrGumZW1awve4TnZqGfW6nMoJqFlmahmHZdFtaZ5cvSdT3Rvh6tz0JYR6GvUb4srDUKZx2dfyI4jgAAAABwo+ztPoqcx4HM2lkzq2pWLprcRucbuY+VCXJsn6rplbosFQdNpObOrXlv4+idjWvc46jNXm+ljzNu5JqbCghHAAAAALhp9hKQIucRkbqdrI1su0YhrGvCV3u+mdd3zIS2VsJXK+sqz0KO6rX5LNXnLNewRrRV+ujNWFs5n6v9IVQVAAAAAF4Ee4ZRilxXGOts+Sz8cVT/qE9ZGOiW2Vdn143CT70Qzihs1VIJIV0Tfrq2bi+9MkvsbDhwBo4jAAAAALwYZp2/EecOYz0qhLXyXkedbxTiuiZ8NXMSM+esGuYb5ffwQlZ7Hyv5q+VG5TNXufoOSFt/NmNqtK4iHhGOAAAAAPAi2TOEVWTdTKy6H5W+zIjImRDWGQE5kycTkH29F9pafe1HVKctp9dn4am9zkiMegKqErYaCS9ve706onarrnFWX/W50hEIRwAAAAB40USvWlhL5TUM52Cv7bLbUxHc2fONWf2jumdFabY+K6PLZem6Dlu3LTcq65XP2o7aHT2bWhXlszdWEI4AAAAA8OI5MoRVpC4kz+U+zoqRqrOo88w4gdVQ1MqENlH4q7c+I3uFxuzMp52toauzs7zqvDPvhczCWSMQjgAAAADwqtg7hFVknQs5049R/Zn7GJWNZl+dmXnVy7NmffXVIDpPFKqaiVydNwoPtXXoerx1Xp6s7qiOrK+2TPTsaeaw2rZmQ1YRjgAAAADwKjkihFVknYDc4xUeo2cft766Y+RQztRRCTMdhbhGAlXnWxsG6rVlsds+yjMKW608dxntP53PimJPsEevXclAOAIAAADAq2XvEFaRdTOxrglhPUf4asV97PVVhNtMCGslDDUSazOTymT7MTsWM7OeZsy+X9GGnIrTj0oo7KxLjnAEAAAAAJDbC2E9InzVyyeSu49efVWHcI1Q3dtlrE6MY8t567O6ozb2fh7V5rH7MZvBNgPhCAAAAACguKUQ1or7OCMeo/yz7mNUX3Wm1Nk8oz5FebP8towt55X38szMxLomXLXnG4W0Zs9D8owjAAAAAMAKri2EdYuArLxOYovTNcq3VfR4eXq+iqOX5Y3ydyqCanQs9w559dobzZK6ZhbVSrsAAAAAAHCiOnnIDDMvXp/pw2y9o3LVZwX3zLc4+ap1ZXmz/P0vKlctn+3HrJ4o3aszakvXEa3XdXh9y8BxBAAAAAAYcA3PP+7x7OPRM6/auvfMV3kdha0vqlMz80xiJK5GdYzqsc8f2nVRCOsotDd7vcmsA45wBAAAAAAosreAXPP8Y6UP55x5tZJ3Np/OuyVE1dbplcmctsrzf6M6qv2YoeosV56frNZHqCoAAAAAwCSV0L4ZsjDHrA+VerfWUaEa6pqFhK6ts5J/FGYaUTnOozDVrf2ohrZGYa1RPV64agaOIwAAAADASo6YgXXWfTxq5tWo3MitrOSfDWGN8mb5szKjcqOyM/XY+rJw2Zn3Ujbx94PdZ9nMqjM3KxCOAAAAAAAbuHT46jmeffT6s+X5wtnQVJs3y5+V0WW3hqhW6tnalyws99zhsYSqAgAAAADswN6zrx4VujoKzczKbml7xF6hqb3M2m2plNf1bK2rMqtrpV0vbzSTqvd5BI4jAAAAAMBOvFb3MSq3JdR0TWhqVq5SdlR+ti5d35oZWWdc2Gim2Ug83smceEQ4AgAAAADszBEC8lpmXh2V3XvG1iz/qFyl7Kj8bF3V+iqCshqmGr2yoyoiKxCqCgAAAABwEHvPvHoEW2Ze3Tt8dcvspxF7hJPauqr1benXbJiqly/LM3t8cBwBAAAAAA5kT/fxCOex1ytJ3edyH3WZUTlbdtaJrNbh9aNaX6XOWUcyClMduZAjB3IEwhEAAAAA4Azs9eqOTHRlbUuh/ZEwrQhIr19VMTgSXTNlK+VtHZpqWKqtd0uo69rnHUdhqra+6qysXvsAAAAAAHAwe8+8egSVkNgt27F1ttOs7F4hqdX6ZutdG56ala2Eqc72wwPHEQAAAADgjOwduipyjPM4qnet+6jLeuWzcFRbtlK+UsdsfVvrXTNhzuy7L0f7sfquyg7CEQAAAADgAryEZx8rdVWfn4zqOCoctdKfNfVGdW95HYdXPhOH3j4dPe84glBVAAAAAIAXwJpZV2fCNrfWdS116Lpm9tmaMNet/c3qqIaoVvpdyYPjCAAAAABwQS4ZujrT/p7uY1bPjAMZ1bWHa+jVm9UdtbF28p09J8qxeUdhrB4IRwAAAACAK2CvWVdF1gvISvszAlKSOvcQkV5d1frW1Duqf6aNkQDOQnC3hKh6+SrOKKGqAAAAAABXwqVnXZ1pf01obFbXpWdEnd32I8Jct8y46uWfqX8EjiMAAAAAwBVxK6Gruv6sjcrkNVvqy+qcdQxnZlvN6o/aWTNBjq0n2p97zbYagXAEAAAAALhCriF0VSb6UG1j9pnKmTor9dq6K/XbNqptrQ2hzZ6TjMRoJD490Vl5NYiFUFUAAAAAgCvl0qGrIvN9mA0R3bNOXe+a0NPZUM7ZtqrtjMJSZ2ZP9fLP7h8RHEcAAAAAgKvmiNBVkXXuY7UfR7iFM+9i9OrWrJkQZ9TeHmGuldBWLzRVl90SypoxPF9aax9prf371trPtdY+21r7q6f0v9la+0Jr7TOnv69XZf56a+3zrbWfb639mYn+AAAAAACAw97u49qJUtY4kEdNVHMup9Brb60LWnlv45pJcmacyDXbUHEc34rIty/L8tOttd8tIj/VWvu3p3V/d1mWv60zt9Y+JiLfKCJ/RET+gIj8u9baH16W5d1EvwAAAAAAwLDnc48iD8JhzbNra1zQ2bZmt3UPN1U2tDnTbmX/jbYnqiN61jTLv8t7HJdl+aKIfPG0/Juttc+JyIeTIp8QkR9cluW3ROQXWmufF5E/JiL/udAfAAAAAABI2DN0VWS94NJ9Edk/hNXWv7Ydzdo2q23Phrmufefj7CyrukxlplWPqXOjtfblIvKVIvITp6Rvba3999ba97bWfu8p7cMi8suq2K+IIzRba59srf1ka+0nZ/oAAAAAAADXMXGO5sgQVtvOmjBT3eaWMN09wlyrbYzqispHZWbyW8rCsbX2JSLywyLybcuy/IaIfLeI/CER+bg8OJJ/p1qXiMiyLJ9eluWrlmX5qplyAAAAAADwwBHPPW55SfyRgiprb6uI3ENIrm27Uv+onmq/Zmdk1ZRmVW2tfVAeROMPLMvyIyIiy7L8qlr/j0TkX54+fkFEPqKKf9kpDQAAAAAAdmbv0FWRde991MyGsNp2O1ueVdza9mw/1oa4bp1d1asjCmUdhbFmVGZVbSLyPSLyuWVZvkulf6nK9udF5GdPyz8qIt/YWvudrbWvEJGPish/LfQFAAAAAACuiK0OpMg2V3QvB3SrM7uXMznTXqXOrPzWdz1aKo7j14jIN4vIz7TWPnNK+04R+abW2sdPbf+iiPwVEZFlWT7bWvshEfk5eZiR9VPMqAoAAAAAcCxHOI+dtbOvdrb2bcsEPrYPnT360pmdTbXSh8o2j5zd2RlWM9qyXP6x2tba5TsBAAAAAPCCOEJAdraISM1efdyrPyL77rc1/aq2n9U9qiMq++siPxXNQbPnPgYAAAAAgCvhSGdma/hqZ68+7tUfkf3CW0XWhbdW287q3av/mtLkOAAAAAAAcHsscpzzuHUCnc7aiXQsW0JII7ZOuNNZMxlNZb+MwlmjOtaE/iIcAQAAAABeMHsJs4g9nj/s7PUcYidy5fZ4XtOydQbXrE/nEpEZhKoCAAAAAMAu7BkyKnJMyKXIPrPFWrb2tdqfSjuj7VvTTxxHAAAAAIBXwpEzr3b2dCA7R7mm1xbeOhPSutWFnN2nCEcAAAAAgFfG0eGrnaNFpGav7cicur1eBSJS629VSO4pIiMIVQUAAAAAgMPZOzTUco73+x0xe+ve7W+dkTUCxxEAAAAA4BVzjvDVzhEOpGavWVAz9p5wZ9b9rezDPSbTsSAcAQAAAADgrAJS5HgR2Tk6tLWzx/OSs7PKzorIqM6KA0moKgAAAAAAvM9RM5lmHDHL6YhzbOfWbZrpY3Ufrt1mHEcAAAAAAHjGIudzHzszs4ruxSUm29niRO7tQla3E+EIAAAAAAAu55p9NeISQrJz5POSW7brSBGZQagqAAAAAAAMOXf4qse5w1ktR4W3rg3VnenL1n2H4wgAAAAAACUu7UCK7D+r6RqOciPXTBg0M6HOlgmJEI4AAAAAADDNuWdhHXGuWVoj9haTa7enKu55HQcAAAAAAJyNS0yiM+LSIrKz177p23PUtlTCWBGOAAAAAACwidn3D56TS4e27ulEzk6qs+dxQTgCAAAAAMCuXFsYq8cer8lYy17Pis46kVuOC7OqAgAAAADAIVzDTKwzrJ3ddAt7zNQ62+c1beI4AgAAAADAYRz5PsSjuER469aw0jXvhpwRjwhHAAAAAAA4K7cQyuqxRpytZWs4694T6hCqCgAAAAAAF2GPMM1Lcq7Q1i37aK8+4jgCAAAAAMBFucVwVs05Qlv3DGVd0y8cRwAAAAAAuDpu3Y0UOdaRPLcLieMIAAAAAABXy16vrrgkez9v2Dnnc5AIRwAAAAAAuAleYkjrXmJyi4isuI+EqgIAAAAAwM1y6yGtR4SyHrFPcBwBAAAAAODmuWU3cuvENRF7vvYE4QgAAAAAAC+SyHW7ZkF5xLsit87Iulc/AAAAAAAAboZbCm89KpR1FhxHAAAAAAB4lezhxJ2DI0JZZ8NYEY4AAAAAAAByG6Gte4vIqvtIqCoAAAAAAEDCtYa23ssxoaweOI4AAAAAAAAFrjW0tYvHI11BhCMAAAAAAMAKru0VIEfMyHpEXQAAAAAAAK+aawxp3QMcRwAAAAAAgB3R4vFaHMitjiHCEQAAAAAA4CCuJZx1axgroaoAAAAAAACvjNnZWHEcAQAAAAAAzsi1zM46Ix4RjgAAAAAAABfkWp6JzCBUFQAAAAAA4EpY5DpnZsVxBAAAAAAAuDKuzYVEOAIAAAAAAFwx1yAiCVUFAAAAAACAFBxHAAAAAACAG+FSM7LiOAIAAAAAANwo55pMB+EIAAAAAABw4xwtIAlVBQAAAAAAeCEcNZEOwhEAAAAAAOAFsqeIJFQVAAAAAADghbM1jBXHEQAAAAAA4BWwZUZWHEcAAAAAAIBXyIwLieMIAAAAAADwSqmKRxxHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJCCcAQAAAAAAIAUhCMAAAAAAACkIBwBAAAAAAAgBeEIAAAAAAAAKQhHAAAAAAAASEE4AgAAAAAAQArCEQAAAAAAAFIQjgAAAAAAAJDygUt34MT/FpFfEpHff1oGWAvnEGyFcwi2wjkEW+D8ga1wDsEW/mC0oi3Lcs6OpLTWfnJZlq+6dD/gduEcgq1wDsFWOIdgC5w/sBXOITgKQlUBAAAAAAAgBeEIAAAAAAAAKdcmHD996Q7AzcM5BFvhHIKtcA7BFjh/YCucQ3AIV/WMIwAAAAAAAFwf1+Y4AgAAAAAAwJWBcAQAAAAAAICUqxCOrbU/21r7+dba51tr33Hp/sBt0Fr7xdbaz7TWPtNa+8lT2u9rrf3b1tr/OP3/vZfuJ1wPrbXvba39WmvtZ1Wae860B/7+6Xvpv7fW/ujleg7XQnAO/c3W2hdO30Wfaa19vVr310/n0M+31v7MZXoN10Rr7SOttX/fWvu51tpnW2t/9ZTOdxGUSM4hvovgUC4uHFtrb0TkH4jI14nIx0Tkm1prH7tsr+CG+JPLsnxcva/oO0Tkx5dl+aiI/PjpM0DnH4vInzVp0TnzdSLy0dPfJ0Xku8/UR7hu/rE8P4dERP7u6bvo48uy/JiIyOm37BtF5I+cyvzD028evG7eisi3L8vyMRH5ahH51Olc4bsIqkTnkAjfRXAgFxeOIvLHROTzy7L8z2VZfltEflBEPnHhPsHt8gkR+b7T8veJyJ+7XFfg2liW5T+KyP8xydE58wkR+f7lgf8iIr+ntfalZ+koXC3BORTxCRH5wWVZfmtZll8Qkc/Lw28evGKWZfnisiw/fVr+TRH5nIh8WPgugiLJORTBdxHswjUIxw+LyC+rz78i+ckP0FlE5N+01n6qtfbJU9qHlmX54mn5f4nIhy7TNbghonOG7yaY4VtPYYTfq0LkOYcgpbX25SLylSLyE8J3EazAnEMifBfBgVyDcARYyx9fluWPykMYz6daa39Cr1we3jXD+2agDOcMrOS7ReQPicjHReSLIvJ3LtobuAlaa18iIj8sIt+2LMtv6HV8F0EF5xziuwgO5RqE4xdE5CPq85ed0gBSlmX5wun/r4nIP5eHsItf7SE8p/+/drkewo0QnTN8N0GJZVl+dVmWd8uy3IvIP5LHEDDOIXBprX1QHgb8P7Asy4+ckvkugjLeOcR3ERzNNQjH/yYiH22tfUVr7XfIw8O7P3rhPsGV01r7Xa21392XReRPi8jPysO58y2nbN8iIv/iMj2EGyI6Z35URP7CaUbDrxaRX1dhZADvY543+/Py8F0k8nAOfWNr7Xe21r5CHiY3+a/n7h9cF621JiLfIyKfW5blu9QqvougRHQO8V0ER/OBS3dgWZa3rbVvFZF/LSJvROR7l2X57IW7BdfPh0Tknz98d8oHROSfLMvyr1pr/01Efqi19pdF5JdE5Bsu2Ee4Mlpr/1REvlZEfn9r7VdE5G+IyN8S/5z5MRH5enmYROD/ichfOnuH4eoIzqGvba19XB5CC39RRP6KiMiyLJ9trf2QiPycPMyC+KllWd5doNtwXXyNiHyziPxMa+0zp7TvFL6LoE50Dn0T30VwJO0hjB4AAAAAAADA5xpCVQEAAAAAAOCKQTgCAAAAAABACsIRAAAAAAAAUhCOAAAAAAAAkIJwBAAAAAAAgBSEIwAAAAAAAKQgHAEAAAAAACDl/wO4XQMeu1FKfQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "g = create_grid(300,1)\n", + "jet = create_jet(g)\n", + "figure(figsize=(24,16))\n", + "#plt.contour(jet[0][0])\n", + "plt.imshow(jet[0][0],cmap = 'gist_heat')" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "theta = 0#-np.pi/2\n", + "comps = 4\n", + "img_size = 300\n", + "c = []\n", + "x = np.zeros(comps)\n", + "y = np.zeros(comps)\n", + "rot_mat = np.array(\n", + " [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]\n", + " )\n", + "for i in range(comps):\n", + " c.append(np.array([i*5,0]))\n", + " x[i], y[i] = (c[i] @ rot_mat)+img_size//2" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[150. 155. 160. 165.]\n", + "[150. 150. 150. 150.]\n" + ] + } + ], + "source": [ + "print(x)\n", + "print(y)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "t = np.array" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/source_detection/model.py b/source_detection/model.py index 6b09d974..97eb6243 100644 --- a/source_detection/model.py +++ b/source_detection/model.py @@ -153,7 +153,7 @@ def forward(self, fmap15): # + def create_prior_boxes(): - fmap_dims = { 'fmap7' : 75, #update + fmap_dims = { #'fmap7' : 75, #update 'fmap10': 38, #was 38 with old 37 'fmap15': 19, #was 19 with old 18 'fmap17': 10, #was 10 with old 9 @@ -162,7 +162,7 @@ def create_prior_boxes(): 'fmap23': 1} maps = list(fmap_dims.keys()) - scales = { 'fmap7' : 0.02, + scales = { #'fmap7' : 0.02, 'fmap10': 0.06, 'fmap15': 0.11, 'fmap17': 0.16, @@ -171,7 +171,7 @@ def create_prior_boxes(): 'fmap23': 0.3} - aspect_ratios = {'fmap7': [1.], + aspect_ratios = {#'fmap7': [1.], 'fmap10': [1.], 'fmap15': [1.], 'fmap17': [1.], @@ -215,14 +215,14 @@ def __init__(self, nclasses): # 'fmap21': 4, # 'fmap23': 4} - n_boxes = { 'fmap7': 2, + n_boxes = { #'fmap7': 2, 'fmap10': 2, 'fmap15': 2, 'fmap17': 2, 'fmap19': 2, 'fmap21': 2, 'fmap23': 2} - self.loc_fmap7 = nn.Conv2d(256, n_boxes['fmap7'] * 4, kernel_size=3, padding=1) + # self.loc_fmap7 = nn.Conv2d(256, n_boxes['fmap7'] * 4, kernel_size=3, padding=1) self.loc_fmap10 = nn.Conv2d(512, n_boxes['fmap10'] * 4, kernel_size=3, padding=1) self.loc_fmap15 = nn.Conv2d(1024, n_boxes['fmap15'] * 4, kernel_size=3, padding=1) self.loc_fmap17 = nn.Conv2d(512, n_boxes['fmap17'] * 4, kernel_size=3, padding=1) @@ -230,7 +230,7 @@ def __init__(self, nclasses): self.loc_fmap21 = nn.Conv2d(256, n_boxes['fmap21'] * 4, kernel_size=3, padding=1) self.loc_fmap23 = nn.Conv2d(256, n_boxes['fmap23'] * 4, kernel_size=3, padding=1) - self.cl_fmap7 = nn.Conv2d(256, n_boxes['fmap7'] * nclasses, kernel_size=3, padding=1) + #self.cl_fmap7 = nn.Conv2d(256, n_boxes['fmap7'] * nclasses, kernel_size=3, padding=1) self.cl_fmap10 = nn.Conv2d(512, n_boxes['fmap10'] * nclasses, kernel_size=3, padding=1) self.cl_fmap15 = nn.Conv2d(1024, n_boxes['fmap15'] * nclasses, kernel_size=3, padding=1) self.cl_fmap17 = nn.Conv2d(512, n_boxes['fmap17'] * nclasses, kernel_size=3, padding=1) @@ -246,7 +246,7 @@ def init_conv2d(self): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.) - def forward(self,fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23): + def forward(self, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23): #stuff batch_size = fmap10.size(0) l_fmap10 = self.loc_fmap10(fmap10) @@ -273,9 +273,9 @@ def forward(self,fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23): l_fmap23 = l_fmap23.permute(0,2,3,1).contiguous() l_fmap23 = l_fmap23.view(batch_size,-1,4) - l_fmap7 = self.loc_fmap7(fmap7) - l_fmap7 = l_fmap7.permute(0,2,3,1).contiguous() - l_fmap7 = l_fmap7.view(batch_size,-1,4) + # l_fmap7 = self.loc_fmap7(fmap7) + # l_fmap7 = l_fmap7.permute(0,2,3,1).contiguous() + # l_fmap7 = l_fmap7.view(batch_size,-1,4) c_fmap10 = self.cl_fmap10(fmap10) c_fmap10 = c_fmap10.permute(0,2,3,1).contiguous() @@ -301,12 +301,12 @@ def forward(self,fmap7, fmap10, fmap15, fmap17, fmap19, fmap21, fmap23): c_fmap23 = c_fmap23.permute(0,2,3,1).contiguous() c_fmap23 = c_fmap23.view(batch_size,-1,self.nclasses) - c_fmap7 = self.cl_fmap7(fmap7) - c_fmap7 = c_fmap7.permute(0,2,3,1).contiguous() - c_fmap7 = c_fmap7.view(batch_size,-1,self.nclasses) + #c_fmap7 = self.cl_fmap7(fmap7) + #c_fmap7 = c_fmap7.permute(0,2,3,1).contiguous() + #c_fmap7 = c_fmap7.view(batch_size,-1,self.nclasses) - locs = torch.cat([l_fmap7, l_fmap10, l_fmap15, l_fmap17, l_fmap19, l_fmap21, l_fmap23], dim = 1) - classes_scores = torch.cat([c_fmap7, c_fmap10, c_fmap15, c_fmap17, c_fmap19, c_fmap21, c_fmap23], dim = 1) + locs = torch.cat([ l_fmap10, l_fmap15, l_fmap17, l_fmap19, l_fmap21, l_fmap23], dim = 1) + classes_scores = torch.cat([ c_fmap10, c_fmap15, c_fmap17, c_fmap19, c_fmap21, c_fmap23], dim = 1) return locs, classes_scores class SSD300(nn.Module): @@ -322,9 +322,9 @@ def __init__(self, nclasses): self.pred_convs = predconvs(nclasses) self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) - self.rescale_factors7 = nn.Parameter(torch.FloatTensor(1, 256, 1, 1)) + # self.rescale_factors7 = nn.Parameter(torch.FloatTensor(1, 256, 1, 1)) nn.init.constant_(self.rescale_factors, 20) - nn.init.constant_(self.rescale_factors7, 20) + #nn.init.constant_(self.rescale_factors7, 20) self.priors_cxcy = create_prior_boxes() @@ -335,23 +335,22 @@ def forward(self, image): bmaps = self.base(image) fmap10 = bmaps['fmap10'] #[256, 38, 38] fmap15 = bmaps['fmap15'] - fmap7 = bmaps['fmap7'] + #fmap7 = bmaps['fmap7'] #fmap_13 = bmaps['fmap_13'] - norm7 = fmap7.pow(2).sum(dim=1, keepdim=True).sqrt() + #norm7 = fmap7.pow(2).sum(dim=1, keepdim=True).sqrt() norm = fmap10.pow(2).sum(dim=1, keepdim=True).sqrt() fmap10 = fmap10 / norm fmap10 = fmap10 * self.rescale_factors - fmap7 = fmap7 / norm7 - fmap7 = fmap7 * self.rescale_factors7 + #fmap7 = fmap7 / norm7 + #fmap7 = fmap7 * self.rescale_factors7 amaps = self.adv(fmap15) fmap17 = amaps['fmap17'] fmap19 = amaps['fmap19'] fmap21 = amaps['fmap21'] fmap23 = amaps['fmap23'] - locs, classes_scores = self.pred_convs(fmap7, fmap10,fmap15,fmap17,fmap19,fmap21,fmap23) - + locs, classes_scores = self.pred_convs(fmap10,fmap15,fmap17,fmap19,fmap21,fmap23) return locs, classes_scores def object_detection(self, locs, class_scores, priors, min_score=0.01, max_overlap=0.45,top_k=200): diff --git a/source_detection/source_data.py b/source_detection/source_data.py index acafd46b..ce8404f2 100644 --- a/source_detection/source_data.py +++ b/source_detection/source_data.py @@ -1,6 +1,7 @@ from radionets.simulations.gaussians import create_grid, create_gauss, create_diamond, create_square from radionets.dl_framework.data import save_fft_pair, open_fft_pair from scipy import ndimage +from tqdm import tqdm import numpy as np import matplotlib.pyplot as plt @@ -14,16 +15,16 @@ def detector_data(img_size, bundle_size, num_bundles,path): - for t in range(num_bundles): + for t in tqdm(range(num_bundles)): with h5py.File(path+str(t)+'.h5', "w") as hf: all_images = [] all_bboxes = [] all_labels = [] for r in range(bundle_size): grid = create_grid(img_size, 1) - num_objects = np.random.randint(1,4) + num_objects = np.random.randint(1,2) #noise bboxes = np.zeros((num_objects,4)) - labels = np.zeros((num_objects,1)) + labels = np.zeros((num_objects,1)) if num_objects == 0: bboxes = np.zeros((1,4)) labels = np.zeros((1,1)) @@ -33,6 +34,7 @@ def detector_data(img_size, bundle_size, num_bundles,path): else: for i in range(num_objects): rand = np.random.randint(0,4) + rand = 0 #noise if rand == 0: g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True) xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size @@ -87,9 +89,10 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): all_labels = [] for r in range(bundle_size): grid = create_grid(img_size*10, 1) - num_objects = np.random.randint(25,60) + num_objects = np.random.randint(150,200) bboxes = np.zeros((num_objects,4)) - labels = np.zeros((num_objects,1)) + labels = np.zeros((num_objects,1)) + diffuse_limiter = 0 if num_objects == 0: bboxes = np.zeros((1,4)) labels = np.zeros((1,1)) @@ -110,6 +113,10 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): bboxes[i] = bbox labels[i] = label elif rand == 1: + diffuse_limiter +=1 + if diffuse_limiter > 5: + continue + print(diffuse_limiter) g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True, True) xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size @@ -139,7 +146,6 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): label = np.array([3]) #nodiff bboxes[i] = bbox labels[i] = label - hf.create_dataset('x'+str(r), data=g) hf.create_dataset('y'+str(r), data=bboxes) hf.create_dataset('z'+str(r), data=labels) diff --git a/source_detection/train.py b/source_detection/train.py index 721c6db3..9404e8f6 100644 --- a/source_detection/train.py +++ b/source_detection/train.py @@ -8,10 +8,10 @@ import matplotlib.pyplot as plt path = get_bundles('//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/train/') -iterations = 180000 +iterations = 240000 n_classes = 5 #nodiff -#checkpoint = None -checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar' +checkpoint = None +#checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300_e120.pth.tar' batch_size = 32 workers = 4 lr = 1e-4 @@ -175,7 +175,7 @@ def main(): def train(data_loader, model, loss_function, optimizer, epochs): model.train() - losses = np.zeros(470) + losses = np.zeros(1877) for i, (images, boxes, labels) in enumerate(data_loader): images = images.to('cuda') From e041cbe35870c5f49d1295079d1d5ff725b5fc02 Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Wed, 21 Jul 2021 11:11:24 +0200 Subject: [PATCH 7/9] Small updates to the FPN architecture, data_augmentation and code for SDC data --- radionets/simulations/gaussians.py | 62 ++-- source_detection/FPN/FPN.py | 15 +- source_detection/FPN/FPNeval.py | 74 +++-- source_detection/FPN/FPNloss.py | 58 +++- source_detection/FPN/FPNtrain.py | 35 ++- source_detection/FPN/SDC.py | 179 ++++++++++++ source_detection/FPN/data_augmentation.py | 108 +++++++ source_detection/FPN/mosaic_eval.py | 164 +++++++++++ source_detection/evaluation.py | 12 +- source_detection/source_data.py | 333 ++++++++++++++++++++-- source_detection/source_utils.py | 24 ++ 11 files changed, 968 insertions(+), 96 deletions(-) create mode 100644 source_detection/FPN/SDC.py create mode 100644 source_detection/FPN/data_augmentation.py create mode 100644 source_detection/FPN/mosaic_eval.py diff --git a/radionets/simulations/gaussians.py b/radionets/simulations/gaussians.py index 174d1eac..60fa2418 100644 --- a/radionets/simulations/gaussians.py +++ b/radionets/simulations/gaussians.py @@ -353,55 +353,42 @@ def create_ext_gauss_bundle(grid): # pointlike gaussians -def create_gauss(img, N, sources, source_list, img_size=63, diffuse = False, bboxes = False, mosaic = False): +def create_gauss(img, N, sources, source_list, img_size=63, diffuse = False, bboxes = False, mosaic_factor = 1, spherical = True): - mos = 1 - if mosaic: - mos = 10 - mx = np.random.randint(1, img_size*mos, size=(N, sources)) - my = np.random.randint(1, img_size*mos, size=(N, sources)) + mx = np.random.randint(1, img_size*mosaic_factor, size=(N, sources)) + my = np.random.randint(1, img_size*mosaic_factor, size=(N, sources)) if diffuse: amp = ( - np.random.randint(30, 40, size=(N))# * 1 / 10 * np.random.randint(3, 5) + np.random.randint(5, 10, size=(N))# * 1 / 10 * np.random.randint(3, 5)#1,5 ) #/ 1e2 - sx = np.random.randint((img_size**2)/200, (img_size**2)/100, size=(N, sources))*10 + sx = np.random.uniform((img_size**2)/200, (img_size**2)/100, size=(N, sources))*10 sy = sx else: amp = ( - np.random.randint(50, 100, size=(N))# * 1 / 10 * np.random.randint(5, 10) + np.random.randint(10, 100, size=(N))# * 1 / 10 * np.random.randint(5, 10) )# / 1e2 - sx = np.random.randint((img_size**2)/720, (img_size**2)/360, size=(N, sources)) - sy = sx - # Doesnt work properly right now - #if spherical: - # sx = np.random.randint(3, 8, size=(N, sources)) - # sy = sx - #else: - # sx = np.random.randint(1, 15, size=(N, sources)) - # sy = np.random.randint(1, 15, size=(N, sources)) - # theta = np.random.randint(0, 360, size=(N, sources)) + if spherical: + sx = np.random.uniform(1/2*(img_size**2)/720, 2*(img_size**2)/360, size=(N, sources)) + sy = sx + else: + sx = np.random.uniform(1/16*(img_size**2)/720, 1/2*(img_size**2)/360, size=(N, sources)) + sy = np.random.uniform(1/16*(img_size**2)/720, 1/2*(img_size**2)/360, size=(N, sources)) + s = np.zeros((N, sources, 1)) # changed from 5 for i in range(N): for j in range(sources): - g = gauss(img_size*mos, mx[i, j], my[i, j], sx[i, j], sy[i, j], amp[i]) #DPG - # s[i,j] = np.array([mx[i,j],my[i,j],sx[i,j],sy[i,j],amp[i]]) + g, theta = gauss(img_size*mosaic_factor, mx[i, j], my[i, j], sx[i, j], sy[i, j], amp[i], spherical) #DPG s[i, j] = np.array([mx[i, j]]) - #if spherical: img[i] += g - #else: - # # rotation around center of the source - # padX = [g.shape[0] - mx[i, j], mx[i, j]] - # padY = [g.shape[1] - my[i, j], my[i, j]] - # imgP = np.pad(g, [padY, padX], "constant") - # imgR = ndimage.rotate(imgP, theta[i, j], reshape=False) - # imgC = imgR[padY[0] : -padY[1], padX[0] : -padX[1]] - # img[i] += imgC if source_list: return img, s elif bboxes: - return img/amp, [mx[0][0],my[0][0]], [sx[0][0],sy[0][0]] + if spherical: + return img, [mx[0][0],my[0][0]], [sx[0][0],sy[0][0]] + else: + return img, [mx[0][0],my[0][0]], [sx[0][0],sy[0][0]], theta else: return img @@ -420,16 +407,23 @@ def gauss_pointsources(img, N, sources, source_list): g = gauss(mx[i, j], my[i, j], sigma, sigma, amp[i]) s[i, j] = np.array([mx[i, j], my[i, j], amp[i]]) img[i] += g - print(s.shape) if source_list: return img, s return np.array(img) -def gauss(img_size, mx, my, sx, sy, amp=0.01): +def gauss(img_size, mx, my, sx, sy, amp=0.01, spherical = True): x = np.arange(img_size)[None].astype(np.float) y = x.T - return amp * np.exp(-((y - my) ** 2) / sy).dot(np.exp(-((x - mx) ** 2) / sx)) + if spherical: + theta = 0 + return amp * np.exp(-((y - my) ** 2) / sy).dot(np.exp(-((x - mx) ** 2) / sx)), theta + else: + theta = np.random.uniform(0, 2*np.pi) + a = np.cos(theta)**2/(2*sx)+np.sin(theta)**2/(2*sy) + b = -np.sin(2*theta)/(4*sx)+np.sin(2*theta)/(4*sy) + c = np.sin(theta)**2/(2*sx)+np.cos(theta)**2/(2*sy) + return amp * np.exp(-(a*(x-mx)**2+2*b*(x-mx)*(y-my)+c*(y-my)**2)), theta def create_diamond(img, num_img, sources, pixel, bboxes = False, mosaic = False): mos = 1 diff --git a/source_detection/FPN/FPN.py b/source_detection/FPN/FPN.py index 25a3110f..1c82a27f 100644 --- a/source_detection/FPN/FPN.py +++ b/source_detection/FPN/FPN.py @@ -50,7 +50,8 @@ def __init__(self): self.load_arch() def load_arch(self): - arch = load_pretrained_model('VGG', '//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/build/VGG_test/temp_20.model', 300) + arch = load_pretrained_model('VGG','/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/build/temp_20.model', 300) + #arch = load_pretrained_model('VGG', '//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/build/VGG_test/temp_20.model', 300) state_dict = self.state_dict() param_names = list(state_dict.keys()) pretrained_state_dict = arch.state_dict() @@ -127,7 +128,7 @@ def init_conv2d(self): for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) - nn.init.constant_(c.bias, 0.) + nn.init.constant_(c.bias, 0) def forward(self, fmap15): @@ -218,7 +219,7 @@ def create_prior_boxes(): aspect_ratios = {'fmap7': [1.], - 'fmap10': [1.], + 'fmap10': [1.,2.,0.5], 'fmap15': [1.], 'fmap17': [1.], 'fmap19': [1.], @@ -230,9 +231,8 @@ def create_prior_boxes(): for f in range(fmap_dims[s]): x = (d + 0.5) / fmap_dims[s] y = (f + 0.5) / fmap_dims[s] - for ratio in aspect_ratios[s]: - priors.append([x, y, scales[s] * sqrt(ratio), scales[s] / sqrt(ratio)]) + priors.append([y, x, scales[s] * sqrt(ratio), scales[s] / sqrt(ratio)]) if ratio == 1.: try: @@ -262,7 +262,7 @@ def __init__(self, nclasses): # 'fmap23': 4} n_boxes = { 'fmap7': 2, - 'fmap10': 2, + 'fmap10': 4, 'fmap15': 2, 'fmap17': 2, 'fmap19': 2, @@ -431,7 +431,6 @@ def object_detection(self, locs, class_scores, priors, min_score=0.01, max_overl overlap = jaccard(c_boundary_locs, c_boundary_locs) suppress = torch.zeros((n_above_min)).bool().to('cuda') - for box in range(c_boundary_locs.size(0)): if suppress[box] == 1: continue @@ -445,7 +444,7 @@ def object_detection(self, locs, class_scores, priors, min_score=0.01, max_overl predicted_scores.append(c_scores[~suppress]) if len(predicted_boxes) == 0: predicted_boxes.append(torch.FloatTensor([[0.,0.,1.,1.]]).to('cuda')) - predicted_labels.append(torch.LongTensor([4]).to('cuda')) + predicted_labels.append(torch.LongTensor([4]).to('cuda')) #nodiff predicted_scores.append(torch.FloatTensor([0.]).to('cuda')) predicted_boxes = torch.cat(predicted_boxes, dim = 0) diff --git a/source_detection/FPN/FPNeval.py b/source_detection/FPN/FPNeval.py index cdddb522..31b8e522 100644 --- a/source_detection/FPN/FPNeval.py +++ b/source_detection/FPN/FPNeval.py @@ -12,17 +12,28 @@ from tqdm import tqdm class_labels = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') -color_map = ('y', 'g', 'w', 'r','brown') +color_map = ('w', 'r', 'pink', 'r','brown') label_map = {k: v for v, k in enumerate(class_labels)} rev_label_map = {v: k for k, v in label_map.items()} def box_coord(coord, img_size): - x = coord[0].item()*img_size - y = coord[3].item()*img_size - xmax = coord[2] - ymin = coord[1] + x = coord[0].item()*img_size #0 + y = coord[3].item()*img_size#3 + xmax = coord[2]#2 + ymin = coord[1]#1 w = xmax.item()*img_size - x h = -(y - ymin.item()*img_size) return x,y,w,h +def box_coord_inv(coord): + newcoord = np.zeros(coord.shape) + xmin = coord[:,0] + ymax = coord[:,1] + xmax = xmin+coord[:,2] + ymin = ymax + coord[:,3] + newcoord[:,0] = xmin + newcoord[:,1] = ymin + newcoord[:,2] = xmax + newcoord[:,3] = ymax + return newcoord def detect_sources(checkpoint_path, data_path, img_size, n = 0): data = get_bundles(data_path) @@ -38,12 +49,10 @@ def detect_sources(checkpoint_path, data_path, img_size, n = 0): #print(eval_dataset[31]) with torch.no_grad(): for i, (images, boxes, labels) in enumerate(tqdm(eval_loader)): - print(enumerate(eval_loader)) images = images.to('cuda') - print(images.shape) predicted_locs, predicted_scores = model(images) predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, - min_score = 0.5, max_overlap = 0.45, top_k = 100) + min_score = 0.2, max_overlap = 0.2, top_k = 100) fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,8)) for j in range(len(eval_dataset[n][1][0])): true_label = class_labels[eval_dataset[n][2][0][j].item()] @@ -61,10 +70,12 @@ def detect_sources(checkpoint_path, data_path, img_size, n = 0): facecolor='none') ax2.text(predx,(predy+predh-7),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) ax2.add_patch(predrect) - - ax1.imshow(eval_dataset[n][0].squeeze(0)) - img = ax2.imshow(eval_dataset[n][0].squeeze(0)) - fig.colorbar(img) + + cbar_ax = fig.add_axes([1, 0.1, 0.05, 0.8]) + + img2 = ax1.imshow(eval_dataset[n][0].squeeze(0),cmap = 'gist_heat') + img = ax2.imshow(eval_dataset[n][0].squeeze(0),cmap = 'gist_heat') + cbar = fig.colorbar(img, cax=cbar_ax) def image_detection(checkpoint_path, image): img_size = image.shape[0] image = torch.FloatTensor(image).unsqueeze(0).unsqueeze(0) @@ -73,11 +84,13 @@ def image_detection(checkpoint_path, image): model = checkpoint['model'] model = model.to('cuda') model.eval() + priors = FPN.create_prior_boxes() with torch.no_grad(): image = image.to('cuda') predicted_locs, predicted_scores = model(image) - predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, - min_score = 0.5, max_overlap = 0.2, top_k = 100) + #print(predicted_locs.shape) + predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors, + min_score = 0.2, max_overlap = 0.1, top_k = 200) #fig, ax1 = plt.subplots(1,1,figsize=(12,8)) #for k in range(len(predl[0])): # predicted_label = class_labels[predl[0][k].item()] @@ -88,7 +101,7 @@ def image_detection(checkpoint_path, image): # ax1.text(predx,(predy+predh-7),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) # ax1.add_patch(predrect) #ax1.imshow(image[0][0].cpu()) - return predb, predl + return predb, predl,preds def classifier_eval(arch, img_batch): @@ -246,7 +259,7 @@ def calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels, else: precisions[i] = 0. average_precisions[c] = precisions.mean() # c is in [1, n_classes - 1] - print(precisions) + #print(precisions) # Calculate Mean Average Precision (mAP) @@ -256,6 +269,35 @@ def calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels, average_precisions = {rev_label_map[c]: v for c, v in enumerate(average_precisions.tolist())} return average_precisions, mean_average_precision + + # - +def open_bundle_pack(path): + bundle_x = [] + bundle_y = [] + bundle_z = [] + f = h5py.File(path, "r") + bundle_size = len(f)//3 + for i in range(bundle_size): + bundle_x_i = np.array(f["x"+str(i)]) + bundle_y_i = np.array(f["y"+str(i)]) + bundle_z_i = np.array(f["z"+str(i)]) + bundle_x.append(bundle_x_i) + bundle_y.append(bundle_y_i) + bundle_z.append(bundle_z_i) + return bundle_x, bundle_y, bundle_z +def annotate(img, bbox, labels): + #class_labels = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') + #color_map = ('w', 'g', 'r', 'y','brown') + img_size = img.shape[0] + fig, ax2 = plt.subplots(1,1,figsize=(50,40)) + for j in range(bbox.shape[0]): + true_label = labels[j] + color = color_map[labels[j].astype('int')] + trux, truy, truw, truh = box_coord(bbox[j],img_size) + trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor=color, facecolor='none') + #ax2.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) + ax2.add_patch(trurect) + ax2.imshow(img, cmap = 'gist_heat') diff --git a/source_detection/FPN/FPNloss.py b/source_detection/FPN/FPNloss.py index 2f8a0c16..9a303626 100644 --- a/source_detection/FPN/FPNloss.py +++ b/source_detection/FPN/FPNloss.py @@ -36,7 +36,6 @@ def __init__(self, priors_cxcy, threshold = 0.5, neg_pos_ratio = 3, alpha = 1.): self.cross_entropy = nn.CrossEntropyLoss(reduce = False) def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): - batch_size = predicted_locs.size(0) n_classes = predicted_scores.size(2) n_priors = self.priors_cxcy.size(0) @@ -58,11 +57,11 @@ def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): overlap_for_each_prior[prior_for_each_object] = 1. label_for_each_prior = data_labels[image_i][0][0][object_for_each_prior]#very ugly shapes watch out - label_for_each_prior[overlap_for_each_prior < self.threshold] = 4 #nodiff + label_for_each_prior[overlap_for_each_prior < self.threshold] = 2 #nodiff true_classes[image_i] = label_for_each_prior true_locs[image_i] = center_to_offset(boundary_to_center(data_locs[image_i][0][object_for_each_prior]), self.priors_cxcy) - positive_priors = true_classes != 4 #nodiff + positive_priors = true_classes != 2 #nodiff loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) n_positives = positive_priors.sum(dim = 1) n_hard_negatives = self.neg_pos_ratio * n_positives @@ -79,6 +78,57 @@ def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): conf_loss_hard_neg = conf_loss_neg[hard_negatives] conf_loss = (conf_loss_hard_neg.sum()+conf_loss_pos.sum())/n_positives.sum().float() return conf_loss + self.alpha * loc_loss -# - +# - +class FocalLoss(nn.Module): #0.6 #9 #20. + def __init__(self, priors_cxcy, threshold = 0.5, neg_pos_ratio = 3, alpha = 50., gamma = 2): + + super(FocalLoss, self).__init__() + self.priors_cxcy = priors_cxcy + self.priors_xy = center_to_boundary(priors_cxcy) + self.threshold = threshold + self.neg_pos_ratio = neg_pos_ratio + self.alpha = alpha + self.gamma = gamma + self.smooth_l1 = nn.SmoothL1Loss() + self.cross_entropy = nn.CrossEntropyLoss(reduce = False) + + def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): + batch_size = predicted_locs.size(0) + n_classes = predicted_scores.size(2) + n_priors = self.priors_cxcy.size(0) + assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) + true_locs = torch.zeros((batch_size, n_priors, 4), dtype = torch.float).to('cuda') + true_classes = torch.zeros((batch_size, n_priors), dtype = torch.long).to('cuda') + + for image_i in range(batch_size): + n_objects = data_locs[image_i][0].size(0) + overlap = jaccard(data_locs[image_i][0], self.priors_xy) #overlap of the boxes in this image with the priors + overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) + #overlap has shape [a,s,f,g,....] + #[h,d,g,h,....].... each entry is the overlap of one true box with all the priors. + #each row describes one object. Max gives the maximum overlap value and the index of the object. + + _, prior_for_each_object = overlap.max(dim = 1) + + object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to('cuda') + + overlap_for_each_prior[prior_for_each_object] = 1. + label_for_each_prior = data_labels[image_i][0][0][object_for_each_prior]#very ugly shapes watch out + label_for_each_prior[overlap_for_each_prior < self.threshold] = 2 #nodiff + true_classes[image_i] = label_for_each_prior + + true_locs[image_i] = center_to_offset(boundary_to_center(data_locs[image_i][0][object_for_each_prior]), self.priors_cxcy) + positive_priors = true_classes != 2 #nodiff + loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) + n_positives = positive_priors.sum(dim = 1) + n_hard_negatives = self.neg_pos_ratio * n_positives + conf_loss_all = -self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1)) + pt = torch.exp(conf_loss_all) + print(positive_priors.shape[0]) + class_loss = (-((1-pt)**self.gamma) * conf_loss_all)/positive_priors.shape[0] + print(class_loss.sum()) + print(loc_loss*self.alpha) + return class_loss.sum() + self.alpha * loc_loss + diff --git a/source_detection/FPN/FPNtrain.py b/source_detection/FPN/FPNtrain.py index 7314f53b..9338e7be 100644 --- a/source_detection/FPN/FPNtrain.py +++ b/source_detection/FPN/FPNtrain.py @@ -3,21 +3,25 @@ import h5py import numpy as np from radionets.dl_framework.data import get_bundles -from FPNloss import detectionLoss +from FPNloss import detectionLoss, FocalLoss from FPN import SSD300 from tqdm import tqdm import matplotlib.pyplot as plt -path = get_bundles('//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/train/') -iterations = 240000 -n_classes = 5 #nodiff -#checkpoint = None -checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar' +path = get_bundles('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/july/mixed') +iterations = 120000 +n_classes = 3 #nodiff +checkpoint = None +#checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/june/mixedcheckpoints/checkpoint_ssd300.pth.tar' batch_size = 32 workers = 4 -lr = 1e-4 +lr = 1e-9 decay_lr_at = [80000,120000] decay_lr_to = 0.1 +#increase_lr_at = [10,30000] +#increase_lr_to = 10 +#increase_lr_at2 = [100,80000] +#increase_lr_to2 = 100 momentum = 0.9 weight_decay = 5e-4 grad_clip = None @@ -46,7 +50,7 @@ def open_image(self, var, i): bundle = indices // self.num_img image = indices - bundle * self.num_img bundle_unique = torch.unique(bundle) - + #print('bundle:',bundle) bundle_paths = [ h5py.File(self.bundles[bundle], "r") for bundle in bundle_unique ] @@ -147,7 +151,7 @@ def main(): optimizer = checkpoint['optimizer'] model = model.to('cuda') - loss_function = detectionLoss(priors_cxcy = model.priors_cxcy).to('cuda') + loss_function = FocalLoss(priors_cxcy = model.priors_cxcy).to('cuda')#detectionLoss(priors_cxcy = model.priors_cxcy).to('cuda') @@ -163,20 +167,23 @@ def main(): if epoch in decay_lr_at: adjust_learning_rate(optimizer, decay_lr_to) - + #if epoch in increase_lr_at: + # adjust_learning_rate(optimizer, increase_lr_to) + #if epoch in increase_lr_at2: + #adjust_learning_rate(optimizer, increase_lr_to2) train(train_loader, model, loss_function, optimizer, epoch) print("Epoch:", epoch) if epoch % 10 == 0: - save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300' + '_e' + str(epoch)+'.pth.tar') - save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar')# apparently not defined + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/july/focal/checkpoint_ssd300' + '_e' + str(epoch)+'.pth.tar') + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/july/focal/checkpoint_ssd300.pth.tar')# apparently not defined def train(data_loader, model, loss_function, optimizer, epochs): model.train() - losses = np.zeros(1877) + losses = np.zeros(940) for i, (images, boxes, labels) in enumerate(data_loader): images = images.to('cuda') @@ -197,7 +204,7 @@ def train(data_loader, model, loss_function, optimizer, epochs): optimizer.step() print('Average Loss', np.average(losses)) - f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/loss.txt', "a") + f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/focalloss.txt', "a") f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') f.close() del predicted_locs, predicted_classes_scores, images, boxes, labels diff --git a/source_detection/FPN/SDC.py b/source_detection/FPN/SDC.py new file mode 100644 index 00000000..46f4f464 --- /dev/null +++ b/source_detection/FPN/SDC.py @@ -0,0 +1,179 @@ +from astropy.io import fits +import matplotlib.pyplot as plt +import matplotlib.colors as colors +import numpy as np +import torch.nn.functional as F +import torch +import h5py +from tqdm import tqdm +import matplotlib.patches as patches +import numpy as np +from FPNeval import detect_sources, box_coord,image_detection, open_bundle_pack, annotate +import scipy +from scipy.spatial import distance +#from data_augmentation import gaussian_noise, psf_noise + + +class sdc_dataset(): + + def __init__(self, num_images, label_path, img_path, img_size): + + self.num_images = num_images + + self.label_path = label_path + + self.img_path = img_path + + self.img_size = img_size + + def create_image(self, x, y, image, img_size): + img = image[0][0][y:y+img_size,x:x+img_size].astype('float64') + img = img/img.max() + return img + + def get_coords(self): + classes = np.loadtxt(self.label_path, skiprows = 18, usecols = (11) ) + cx = np.loadtxt(self.label_path, skiprows = 18, usecols = (13) ) + cy = np.loadtxt(self.label_path, skiprows = 18, usecols = (14) ) + use = np.loadtxt(self.label_path, dtype = 'bool', skiprows = 18, usecols = (12) ) + flux = np.loadtxt(self.label_path, skiprows = 18, usecols = (5) ) + cx = cx[use] + cy = cy[use] + flux = flux[use] + oid = np.linspace(0,cx.shape[0], cx.shape[0], dtype = 'int32') + cy = np.vstack((oid,cy)) + cx = np.vstack((oid,cx)) + flux = np.vstack((oid,flux)) + + all_coords = cx, cy, flux + return all_coords + def create_labels(self,imgx,imgy, SNRfactor, mean, all_coords): + coords = [] + x, y, flux = all_coords + print(mean) + for i in range(x.shape[1]): + if flux[1][i] > 70*mean: + if imgx < x[1][i] < imgx+self.img_size: + #print(x[0][i]) + woy = np.where(x[0][i] == y[0]) + if imgy < y[1][woy[0].item()] < imgy+self.img_size: + c = np.array([x[1][i]-imgx,y[1][woy[0].item()]-imgy]) + c = np.round(c).astype('int') + coords.append(c) + return coords + def forward(self, SNRfactor, save_path): + all_images = [] + all_labels = [] + all_pos = [] + f = fits.open(self.img_path) + img = f[0].data + f.close() + all_coords = self.get_coords() + mean = img.mean() #NEEDS PROPER CALCULATION + with h5py.File(save_path+'.h5', "w") as hf: + for i in range(self.num_images): + x = np.random.randint(16350 ,19900) + y = np.random.randint(16700,19950) + pos = (x,y) + image = self.create_image(x,y,img, self.img_size) + labels = self.create_labels(x,y, SNRfactor, mean, all_coords) + all_images.append(image) + all_labels.append(labels) + all_pos.append((x,y)) + hf.create_dataset('x'+str(i), data=image) + hf.create_dataset('y'+str(i), data=labels) + hf.create_dataset('z'+str(i), data=pos) + hf.close() + + +class sdceval(): + + def __init__(self,checkpoint, sdcset_path): + + self.checkpoint = checkpoint + + self.sdcset_path = sdcset_path + + def boxtoxy(self,boxes, img_size = 50): + coords = np.array([boxes[:,2]-(boxes[:,2]-boxes[:,0])/2, boxes[:,3]-(boxes[:,3]-boxes[:,1])/2]).T + + return coords + def image_comp(self,image_number): + + f = h5py.File(self.sdcset_path, "r") + img = np.array(f["x"+str(image_number)]) + true_labels = np.array(f["y"+str(image_number)]) + img_pos = np.array(f["z"+str(image_number)]) + f.close() + + fig, (ax1, ax2) = plt.subplots(1,2,figsize=(12,8)) + ax1.imshow(img,cmap = 'hot') + for i in range(len(true_labels)): + ax1.scatter(true_labels[i][0], true_labels[i][1], color='b', s=20) + + dimg = torch.FloatTensor(img) + dimg = dimg.unsqueeze(0) + dimg = dimg.unsqueeze(0) + uimg = F.interpolate(dimg, size = (300,300) , mode = 'bilinear') + uimg = uimg[0][0].detach().numpy() + + b,l,s = image_detection(self.checkpoint,uimg) + + bbox = b[0] + labels = l[0].cpu().detach().numpy() + img_size = img.shape[0] + + for j in range(bbox.shape[0]): + true_label = labels[j] + trux, truy, truw, truh = box_coord(bbox[j],img_size) + trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor='w', facecolor='none') + #ax2.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) + ax2.add_patch(trurect) + ax2.imshow(img, cmap = 'hot') + + def precisioneval(self): + + num_sources = [] + f = h5py.File(self.sdcset_path, "r") + for image_number in range(len(f.keys())//3): + + img = np.array(f["x"+str(image_number)]) + true_labels = np.array(f["y"+str(image_number)]) + img_pos = np.array(f["z"+str(image_number)]) + img_size = img.shape[0] + + dimg = torch.FloatTensor(img) + dimg = dimg.unsqueeze(0) + dimg = dimg.unsqueeze(0) + uimg = F.interpolate(dimg, size = (300,300) , mode = 'bilinear') + uimg = uimg[0][0].detach().numpy() + + b,l,s = image_detection(self.checkpoint,uimg) + coords = self.boxtoxy(b[0].cpu().detach().numpy()*img_size) + + coords = np.round(coords).astype('int') + + dist = scipy.spatial.distance.cdist(coords, np.array(true_labels)) + + dist = torch.tensor(dist) + + sort_dist, sort_ind = dist.sort(dim = 1, descending = False) + + sort_dist = sort_dist.detach().numpy() + sort_ind = sort_ind.detach().numpy() + keep_mask = [True]*sort_dist.shape[0] + for i in range(sort_dist.shape[0]): + for j in range(sort_dist.shape[0]): + if keep_mask[j] == False: + continue + if sort_ind[:,0][j] == sort_ind[:,0][i]: + if sort_dist[:,0][j] > sort_dist[:,0][i]: + keep_mask[j] = False + continue + if sort_dist[:,0][j] > img_size*0.05: + keep_mask[j] = False + detected_sources = sort_dist[keep_mask].shape[0] + true_sources = true_labels.shape[0] + num_sources.append((true_sources, detected_sources)) + f.close() + return num_sources diff --git a/source_detection/FPN/data_augmentation.py b/source_detection/FPN/data_augmentation.py new file mode 100644 index 00000000..669460cb --- /dev/null +++ b/source_detection/FPN/data_augmentation.py @@ -0,0 +1,108 @@ +#Same as the other one +import matplotlib.pyplot as plt +import numpy as np +from radionets.evaluation.utils import load_pretrained_model, eval_model +import FPNtrain +from radionets.dl_framework.data import get_bundles +import torch +from tqdm import tqdm +from FPNeval import box_coord +import matplotlib.patches as patches +import random +import cv2 +from matplotlib.pyplot import figure +import h5py +from astropy.io import fits +from astropy.convolution import convolve_fft, Gaussian2DKernel + + +class_labels = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') +color_map = ('r', 'g', 'w', 'y','brown') + +class random_shear(object): + def __init__(self, shear_factor = 0.5): + + self.shear_factor = shear_factor + self.shear_factor = (-self.shear_factor, self.shear_factor) + shear_factor = random.uniform(*self.shear_factor) + self.horizontal_flip + def __call__(self, img, bboxes): + shear_factor = random.uniform(*self.shear_factor) + bboxes = bboxes * img.shape[1] + w,h = img.shape[1], img.shape[0] + + if shear_factor < 0: + img, bboxes = self.horizontal_flip(img, bboxes) + + M = np.array([[1, abs(shear_factor), 0],[0,1,0]]) + + nW = img.shape[1] + abs(shear_factor*img.shape[0]) + + bboxes[:,[0,2]] += ((bboxes[:,[1,3]]) * abs(shear_factor) ).astype(int) + + + img = cv2.warpAffine(img, M, (int(nW), img.shape[0])) + + if shear_factor < 0: + img, bboxes = self.horizontal_flip(img, bboxes) + + img = cv2.resize(img, (w,h)) + + scale_factor_x = nW / w + + bboxes[:,:4] /= [scale_factor_x, 1, scale_factor_x, 1] + + bboxes = bboxes / img.shape[1] + return img, bboxes + def horizontal_flip(self, img, bboxes): + img_center = np.array(img.shape[:2])[::-1]/2 + img_center = np.hstack((img_center, img_center)) + img = img[::-1,:] + bboxes[:,[0,2]] += 2*(img_center[[0,2]] - bboxes[:,[0,2]]) + + box_w = abs(bboxes[:,0] - bboxes[:,2]) + + bboxes[:,0] -= box_w + bboxes[:,2] += box_w + + return img, bboxes + + +def gaussian_noise(image, strength = 0.05): + pixel = image.shape[1] + bundle_size = 1 + x = np.linspace(0, pixel - 1, num=pixel) + y = np.linspace(0, pixel - 1, num=pixel) + X, Y = np.meshgrid(x, y) + grid = np.array([np.random.normal(0,1,X.shape) *strength, X, Y]) + grid = np.repeat( + grid[None, :, :, :], + bundle_size, + axis=0, + ) + k = np.random.normal(0,1,X.shape) * image.max()/10 + return image+grid[0][0] + +def psf_noise(image, psf_path = '/net/big-tank/POOL/projects/ska/0836-BAND8_POSTAIPS.UVFITS'): + f = fits.open(psf_path) + u = f[0].data['UU--'] + v = f[0].data['VV--'] + U = np.append(u,-u) + V = np.append(v,-v) + u[(u<-0.002)&(u>0.002)] = 0 + v[(v<-0.002)&(v>0.002)] = 0 + + gaussian2dkernel = Gaussian2DKernel(x_size = 2999, y_size = 2999, x_stddev = 75, y_stddev = 75) + + gaussian2dkernel = gaussian2dkernel.array/gaussian2dkernel.array.max() + uv_hist, _, _ = np.histogram2d(U,V ,bins=3000) + uv_hist[uv_hist >0] = 1 + psf = np.abs(np.fft.fftshift(np.fft.fft2(uv_hist))) + psfnorm = psf/psf.max() + psfcut = psfnorm[0:2999,0:2999] + psfcut = psfcut / psfcut.sum() + psfcut = psfcut *gaussian2dkernel + psfcut[psfcut < 1e-12] = 1e-12 + noisy_image = convolve_fft(image, psfcut, normalize_kernel = False, boundary = 'wrap') + + return noisy_image diff --git a/source_detection/FPN/mosaic_eval.py b/source_detection/FPN/mosaic_eval.py new file mode 100644 index 00000000..5ee541a4 --- /dev/null +++ b/source_detection/FPN/mosaic_eval.py @@ -0,0 +1,164 @@ +from FPNeval import detect_sources, box_coord,image_detection,box_coord_inv, annotate +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.patches as patches +import torch +from matplotlib.pyplot import figure +from FPN import jaccard +import torch.nn.functional as F + +label_map = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') +color_map = ('w', 'r', 'pink', 'r','brown') + + +def mosaic_detection(model_path,img, nclasses, img_size = 300): #classes without background + all_boxes = np.array([0,0,0,0]) + all_labels = np.array([]) + all_scores = np.array([]) + for tile_y in range(10): + for tile_x in range(10): + predboxes,predlabels,preds = image_detection(model_path,img[img_size*tile_y:img_size*tile_y+img_size,img_size*tile_x:img_size*tile_x+img_size]) + if predlabels[0].max().item() != 4: #and predlabels[0].max().item()!=4 : + for n in range(predboxes[0].shape[0]): + predx, predy, predw, predh = box_coord(predboxes[0][n],img_size) + boxes = np.array([predx+tile_x*img_size, predy+tile_y*img_size, predw, predh]) + all_boxes = np.vstack((all_boxes,boxes)) + all_labels = np.concatenate((all_labels,predlabels[0].cpu().detach().numpy()), axis = 0) + all_scores = np.concatenate((all_labels,preds[0].cpu().detach().numpy()), axis = 0) + testbox = predboxes[0][n] + all_boxes = np.delete(all_boxes, (0), axis=0) + + + fig, ax = plt.subplots(1,1, figsize=(50,40)) + for k in range(len(all_boxes)): + predicted_label = label_map[int(all_labels[k])] + color = color_map[int(all_labels[k])] + predrect = patches.Rectangle((all_boxes[k][0], all_boxes[k][1]), all_boxes[k][2], all_boxes[k][3], linewidth=1, edgecolor=color, + facecolor='none') + #ax.text(all_boxes[k][0],(all_boxes[k][1]+all_boxes[k][2]-20),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) + ax.add_patch(predrect) + + ax.imshow(img,cmap = 'gist_heat') + return (torch.tensor(all_boxes)/(img_size*10)), all_labels, torch.tensor(all_scores) + +def mosaic_clean(mos_boxes,labels,scores): + bboxes = torch.tensor(box_coord_inv(mos_boxes)) + num_classes = 4 + max_overlap = 0.1 + predicted_boxes = list() + predicted_labels = list() + for c in range(num_classes): + label_mask = np.where(labels == c) + c_boxes = bboxes[label_mask] + c_labels = labels[label_mask] + if c_labels.shape[0] == 0: + continue + c_scores = scores[c_labels] + c_scores, sort_ind = c_scores.sort(dim = 0, descending = True) + c_boxes= c_boxes[sort_ind] + overlap = jaccard(c_boxes,c_boxes) + suppress = torch.zeros((c_boxes.size(0))).bool() + for box in range(c_boxes.size(0)): + if suppress[box] == 1: + continue + + suppress = suppress | (overlap[box] > max_overlap) + + suppress[box] = 0 + predicted_boxes.append(c_boxes[~suppress]) + predicted_labels.append(c_labels[~suppress]) + + #final_boxes = np.concatenate((predicted_boxes[0])) + + #final_labels = np.concatenate((predicted_labels[0])) + final_boxes = predicted_boxes[0]#np.concatenate((predicted_boxes[0],predicted_boxes[1],predicted_boxes[2],predicted_boxes[3])) + final_labels = predicted_labels[0]#np.concatenate((predicted_labels[0],predicted_labels[1],predicted_labels[2],predicted_labels[3])) + return final_boxes, final_labels + + +def upscale_mosaic_detection(model_path,img, nclasses, img_size = 150): #classes without background + all_boxes = np.array([0,0,0,0]) + all_labels = np.array([]) + all_scores = np.array([]) + for tile_y in range(img.shape[0]//img_size): + print('row:', tile_y) + for tile_x in range(img.shape[0]//img_size): + dimg = img[img_size*tile_y:img_size*tile_y+img_size,img_size*tile_x:img_size*tile_x+img_size] + #print(1, img_size*tile_y,img_size*tile_y+img_size,img_size*tile_x,img_size*tile_x+img_size) + + dimg = torch.FloatTensor(dimg) + #print('test',img_size*tile_y,img_size*tile_y+img_size) + dimg = dimg.unsqueeze(0) + #print(2, tile_x) + dimg = dimg.unsqueeze(0) + #print(3, dimg.shape) + uimg = F.interpolate(dimg, size = (300,300) , mode = 'bilinear') + #print(uimg.shape) + uimg = uimg[0][0] + uimg = uimg/uimg.max() + predboxes,predlabels,preds = image_detection(model_path,uimg) + if predlabels[0].max().item() != nclasses: + for n in range(predboxes[0].shape[0]): + predx, predy, predw, predh = box_coord(predboxes[0][n],img_size) + boxes = np.array([predx+tile_x*img_size, predy+tile_y*img_size, predw, predh]) + all_boxes = np.vstack((all_boxes,boxes)) + all_labels = np.concatenate((all_labels,predlabels[0].cpu().detach().numpy()), axis = 0) + all_scores = np.concatenate((all_labels,preds[0].cpu().detach().numpy()), axis = 0) + testbox = predboxes[0][n] + del dimg, uimg + all_boxes = np.delete(all_boxes, (0), axis=0) + + + fig, ax = plt.subplots(1,1, figsize=(50,40)) + for k in range(len(all_boxes)): + predicted_label = label_map[int(all_labels[k])] + color = color_map[int(all_labels[k])] + predrect = patches.Rectangle((all_boxes[k][0], all_boxes[k][1]), all_boxes[k][2], all_boxes[k][3], linewidth=1, edgecolor=color, + facecolor='none') + #ax.text(all_boxes[k][0],(all_boxes[k][1]+all_boxes[k][2]-20),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) + ax.add_patch(predrect) + + ax.imshow(img,cmap = 'gist_heat') + return (torch.tensor(all_boxes)/(img_size*10)), all_labels, torch.tensor(all_scores) + + +def shifted_mosaic_detection(model_path,img, nclasses, img_size = 300): #classes without background + all_boxes = np.array([0,0,0,0]) + all_labels = np.array([]) + all_scores = np.array([]) + for tile_y in range(10): + for tile_x in range(10): + predboxes,predlabels,preds = image_detection(model_path,img[img_size*tile_y:img_size*tile_y+img_size,img_size*tile_x:img_size*tile_x+img_size]) + if predlabels[0].max().item() != 4: #and predlabels[0].max().item()!=4 : + for n in range(predboxes[0].shape[0]): + predx, predy, predw, predh = box_coord(predboxes[0][n],img_size) + boxes = np.array([predx+tile_x*img_size, predy+tile_y*img_size, predw, predh]) + all_boxes = np.vstack((all_boxes,boxes)) + all_labels = np.concatenate((all_labels,predlabels[0].cpu().detach().numpy()), axis = 0) + all_scores = np.concatenate((all_labels,preds[0].cpu().detach().numpy()), axis = 0) + testbox = predboxes[0][n] + for tile_y in range(9): + for tile_x in range(9): + predboxes,predlabels,preds = image_detection(model_path,img[img_size//2+img_size*tile_y:img_size*tile_y+img_size+img_size//2,img_size//2+img_size*tile_x:img_size*tile_x+img_size+img_size//2]) + if predlabels[0].max().item() != 4: #and predlabels[0].max().item()!=4 : + for n in range(predboxes[0].shape[0]): + predx, predy, predw, predh = box_coord(predboxes[0][n],img_size) + boxes = np.array([predx+tile_x*img_size+img_size//2, predy+tile_y*img_size+img_size//2, predw, predh]) + all_boxes = np.vstack((all_boxes,boxes)) + all_labels = np.concatenate((all_labels,predlabels[0].cpu().detach().numpy()), axis = 0) + all_scores = np.concatenate((all_labels,preds[0].cpu().detach().numpy()), axis = 0) + testbox = predboxes[0][n] + all_boxes = np.delete(all_boxes, (0), axis=0) + + + fig, ax = plt.subplots(1,1, figsize=(50,40)) + for k in range(len(all_boxes)): + predicted_label = label_map[int(all_labels[k])] + color = color_map[int(all_labels[k])] + predrect = patches.Rectangle((all_boxes[k][0], all_boxes[k][1]), all_boxes[k][2], all_boxes[k][3], linewidth=1, edgecolor=color, + facecolor='none') + #ax.text(all_boxes[k][0],(all_boxes[k][1]+all_boxes[k][2]-20),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) + ax.add_patch(predrect) + + ax.imshow(img,cmap = 'gist_heat') + return (torch.tensor(all_boxes)/(img_size*10)), all_labels, torch.tensor(all_scores) diff --git a/source_detection/evaluation.py b/source_detection/evaluation.py index d2594de0..0af19e88 100644 --- a/source_detection/evaluation.py +++ b/source_detection/evaluation.py @@ -78,11 +78,12 @@ def image_detection(checkpoint, image): min_score = 0.2, max_overlap = 0.1, top_k = 100) return predb, predl -def classifier_eval(arch, img_batch): - - pred = eval_model(img, arch) +def classifier_eval(arch, img_batch, labels): + pred = eval_model(img_batch, arch) _, l = torch.max(pred, dim = 1) - return l + diff = labels -l + diff_sum = diff.sum() + return diff, diff_sum, pred def mAPeval(checkpoint_path, data_path): data = get_bundles(data_path) @@ -228,3 +229,6 @@ def calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels) average_precisions = {rev_label_map[c]: v for c, v in enumerate(average_precisions.tolist())} return average_precisions, mean_average_precision +# - + + diff --git a/source_detection/source_data.py b/source_detection/source_data.py index ce8404f2..e2aaedce 100644 --- a/source_detection/source_data.py +++ b/source_detection/source_data.py @@ -12,6 +12,8 @@ from torchvision import transforms #om utils import * from PIL import Image, ImageDraw, ImageFont +from data_augmentation import random_shear, gaussian_noise, psf_noise, rough_gaussian_noise +import oldgaussian def detector_data(img_size, bundle_size, num_bundles,path): @@ -22,7 +24,7 @@ def detector_data(img_size, bundle_size, num_bundles,path): all_labels = [] for r in range(bundle_size): grid = create_grid(img_size, 1) - num_objects = np.random.randint(1,2) #noise + num_objects = np.random.randint(1,5) #noise bboxes = np.zeros((num_objects,4)) labels = np.zeros((num_objects,1)) if num_objects == 0: @@ -33,9 +35,9 @@ def detector_data(img_size, bundle_size, num_bundles,path): labels[0] = np.array([4]) else: for i in range(num_objects): - rand = np.random.randint(0,4) - rand = 0 #noise + rand = 0#np.random.randint(0,1) if rand == 0: + #g,c,s,theta = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True, spherical = True) g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True) xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size @@ -45,6 +47,23 @@ def detector_data(img_size, bundle_size, num_bundles,path): label = np.array([0]) bboxes[i] = bbox labels[i] = label + + #xmin = (c[0]-(4*np.sqrt(s[0])/2))/img_size + #ymin = (c[1]-(4*np.sqrt(s[1])/2))/img_size + #xmax = (c[0]+(4*np.sqrt(s[0])/2))/img_size + #ymax = (c[1]+(4*np.sqrt(s[1])/2))/img_size + #w = xmax-xmin + #h = ymax-ymin + #wn = np.sqrt((w*np.cos(theta))**2+(h*np.sin(theta))**2) + #hn = np.sqrt((h*np.cos(theta))**2+(w*np.sin(theta))**2) + #xmin = c[0]/img_size-wn/2 + #xmax = c[0]/img_size+wn/2 + #ymin = c[1]/img_size-hn/2 + #ymax = c[1]/img_size+hn/2 + #bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + #label = np.array([0]) + #bboxes[i] = bbox + #labels[i] = label elif rand == 1: g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True) xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size @@ -75,7 +94,7 @@ def detector_data(img_size, bundle_size, num_bundles,path): label = np.array([3]) #nodiff bboxes[i] = bbox labels[i] = label - + g = g/g.max() hf.create_dataset('x'+str(r), data=g) hf.create_dataset('y'+str(r), data=bboxes) hf.create_dataset('z'+str(r), data=labels) @@ -89,7 +108,7 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): all_labels = [] for r in range(bundle_size): grid = create_grid(img_size*10, 1) - num_objects = np.random.randint(150,200) + num_objects = 20#np.random.randint(150,200) bboxes = np.zeros((num_objects,4)) labels = np.zeros((num_objects,1)) diffuse_limiter = 0 @@ -101,23 +120,24 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): labels[0] = np.array([4]) else: for i in range(num_objects): - rand = np.random.randint(0,4) + rand = 0#np.random.randint(0,2) if rand == 0: - g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True, True) - xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size - ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size - xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size - ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True, 10, spherical = True) + print(c) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size*10)/(img_size*10) + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size*10)/(img_size*10) + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size*10)/(img_size*10) + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size*10)/(img_size*10) bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) label = np.array([0]) bboxes[i] = bbox labels[i] = label elif rand == 1: diffuse_limiter +=1 - if diffuse_limiter > 5: + if diffuse_limiter > 0: continue print(diffuse_limiter) - g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True, True) + g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True, 10) xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size @@ -127,7 +147,7 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): bboxes[i] = bbox labels[i] = label elif rand == 2: - g,c,s = create_diamond(grid[:, 0], 1, 1, img_size,True, True) + g,c,s = create_diamond(grid[:, 0], 1, 1, img_size,True, 10) xmin = (c[0]-2*s[0])/img_size ymin = (c[1]-2*s[1])/img_size xmax = (c[0]+2*s[0])/img_size @@ -137,7 +157,7 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): bboxes[i] = bbox labels[i] = label elif rand == 3: - g,c = create_square(grid[:, 0],1, 1, img_size, True, True) + g,c = create_square(grid[:, 0],1, 1, img_size, True, 10) xmin = (c[0]-(img_size/50+1))/img_size ymin = (c[1]-(img_size/50+1))/img_size xmax = (c[0]+(img_size/50+1))/img_size @@ -146,12 +166,14 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): label = np.array([3]) #nodiff bboxes[i] = bbox labels[i] = label + g = g/g.max() hf.create_dataset('x'+str(r), data=g) hf.create_dataset('y'+str(r), data=bboxes) hf.create_dataset('z'+str(r), data=labels) hf.close() + def feature_data(num_gauss, num_diff, num_diamond, num_square, img_size, num_files, path): for j in tqdm(range(num_files)): with h5py.File(path+str(j)+'.h5', "w") as hf: @@ -159,7 +181,7 @@ def feature_data(num_gauss, num_diff, num_diamond, num_square, img_size, num_fil diff_grid = create_grid(img_size, num_diff) diamond_grid = create_grid(img_size, num_diamond) square_grid = create_grid(img_size, num_square) - gaussians = create_gauss(gauss_grid[:, 0], num_gauss, 1, False,img_size, False) + gaussians = create_gauss(gauss_grid[:, 0], num_gauss, 1, False,img_size, False, spherical = True) y_gauss = np.array([0]*len(gaussians)) diff = create_gauss(diff_grid[:, 0], num_diff, 1, False,img_size, True) y_diff = np.array([1]*len(diff)) @@ -173,3 +195,282 @@ def feature_data(num_gauss, num_diff, num_diamond, num_square, img_size, num_fil hf.create_dataset('x', data=arr[shuff]) hf.create_dataset('y', data=keys[shuff]) hf.close() + + +def noisy_data(img_size, mosaic_scale, bundle_size, num_bundles,path): #TEMP CHANGES: NO NOISE! + for t in tqdm(range(num_bundles)): + with h5py.File(path+str(t)+'.h5', "w") as hf: + all_images = [] + all_bboxes = [] + all_labels = [] + grid = create_grid(img_size*mosaic_scale, 1) + num_objects = 180 + bboxes = np.zeros((num_objects,5)) + diffuse_limiter = 0 + bundle = 0 + if num_objects == 0: + bboxes = np.zeros((1,5)) + labels = np.zeros((1,1)) + g = grid[:,0] + bboxes[0] = np.array([0, 0, 1, 1], dtype = float) + labels[0] = np.array([4]) + else: + for i in range(num_objects): + #rand = np.random.randint(0,2) + rand = 0 + #if diffuse_limiter >= 3: + # rand = 0 + if rand == 0: + g,c,s,theta = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True,mosaic_scale, spherical = False) + xmin = (c[0]-(4*np.sqrt(s[0])/2))/(img_size*mosaic_scale) + ymin = (c[1]-(4*np.sqrt(s[1])/2))/(img_size*mosaic_scale) + xmax = (c[0]+(4*np.sqrt(s[0])/2))/(img_size*mosaic_scale) + ymax = (c[1]+(4*np.sqrt(s[1])/2))/(img_size*mosaic_scale) + w = xmax-xmin + h = ymax-ymin + wn = np.sqrt((w*np.cos(theta))**2+(h*np.sin(theta))**2) + hn = np.sqrt((h*np.cos(theta))**2+(w*np.sin(theta))**2) + xmin = c[0]/(img_size*mosaic_scale)-wn/2 + xmax = c[0]/(img_size*mosaic_scale)+wn/2 + ymin = c[1]/(img_size*mosaic_scale)-hn/2 + ymax = c[1]/(img_size*mosaic_scale)+hn/2 + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1),0], dtype = float) + bboxes[i] = bbox + elif rand == 1: + diffuse_limiter +=1 + g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True, mosaic_scale) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,(img_size*mosaic_scale))/(img_size*mosaic_scale) + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,(img_size*mosaic_scale))/(img_size*mosaic_scale) + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,(img_size*mosaic_scale))/(img_size*mosaic_scale) + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,(img_size*mosaic_scale))/(img_size*mosaic_scale) + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1),1], dtype = float) + bboxes[i] = bbox + image = g[0] + image = gaussian_noise(image) + image = psf_noise(image) + image = image/image.max() + while bundle < bundle_size: + x_crop = np.random.randint(0,((img_size*mosaic_scale))-img_size) + y_crop = np.random.randint(0,((img_size*mosaic_scale))-img_size) + + crop_image = image[y_crop:y_crop+img_size,x_crop:x_crop+img_size] + boxes = np.array(bboxes) + boxes[:,:4] = boxes[:,:4]*(img_size*mosaic_scale) + + box_crop = boxes[boxes[:,0] < x_crop+img_size-(boxes[:,2]-boxes[:,0])/2] + box_crop = box_crop[box_crop[:,2] > x_crop+(box_crop[:,2]-box_crop[:,0])/2] + box_crop = box_crop[box_crop[:,1] < y_crop+img_size-(box_crop[:,3]-box_crop[:,1])/2] + box_crop = box_crop[box_crop[:,3] > y_crop+(box_crop[:,3]-box_crop[:,1])/2] + + if len(box_crop) == 0: + continue + box_crop[:,0] = (box_crop[:,0]-x_crop)/img_size + box_crop[:,1] = (box_crop[:,1]-y_crop)/img_size + box_crop[:,2] = (box_crop[:,2]-x_crop)/img_size + box_crop[:,3] = (box_crop[:,3]-y_crop)/img_size + labels = np.zeros((box_crop.shape[0],1)) + for g in range(box_crop.shape[0]): + labels[g] = box_crop[g,4].astype(int) + final_image = np.expand_dims(crop_image, axis=0) + #final_image = rough_gaussian_noise(final_image) + hf.create_dataset('x'+str(bundle), data=final_image) + hf.create_dataset('y'+str(bundle), data=box_crop[:,:4]) + hf.create_dataset('z'+str(bundle), data=labels) + bundle += 1 + hf.close() + + +def noisy_feature_data(num_gauss, num_diff, num_diamond, num_square, img_size, num_files, path): + for j in tqdm(range(num_files)): + with h5py.File(path+str(j)+'.h5', "w") as hf: + gauss_grid = create_grid(img_size, num_gauss) + diff_grid = create_grid(img_size, num_diff) + diamond_grid = create_grid(img_size, num_diamond) + square_grid = create_grid(img_size, num_square) + gaussians = create_gauss(gauss_grid[:, 0], num_gauss, 1, False,img_size, False, spherical = False) + y_gauss = np.array([0]*len(gaussians)) + diff = create_gauss(diff_grid[:, 0], num_diff, 1, False,img_size, True) + y_diff = np.array([1]*len(diff)) + diamonds = create_diamond(diamond_grid[:, 0], num_diamond, 1, img_size) + y_diamond = np.array([2]*len(diamonds)) + squares = create_square(square_grid[:, 0],num_square, 1, img_size) + y_square = np.array([3]*len(squares)) + arr = np.concatenate((gaussians, diff, diamonds, squares), axis=0) + num_superimages = arr.shape[0]//100 + for k in range(num_superimages): + super_grid = create_grid(img_size*10, 1) + for i in range(10): + for o in range(10): + super_grid[0,0][o*img_size:img_size+o*img_size,i*img_size:img_size+i*img_size] = arr[100*k+10*i+o] + + #super_grid[0,0] = gaussian_noise(super_grid[0,0]) + #super_grid[0,0] = psf_noise(super_grid[0,0]) + for p in range(10): + for a in range(10): + arr[10*p+a+100*k] = super_grid[0,0][a*img_size:img_size+a*img_size,p*img_size:img_size+p*img_size] + keys = np.concatenate((y_gauss, y_diff, y_diamond, y_square), axis=0) + shuff = np.random.permutation(len(arr)) + + hf.create_dataset('x', data=arr[shuff]) + hf.create_dataset('y', data=keys[shuff]) + hf.close() + + +def NoisyTBCData(img_size, mosaic_scale, bundle_size, num_bundles,path): + for t in tqdm(range(num_bundles)): + with h5py.File(path+str(t)+'.h5', "w") as hf: + images = [] + truth = [] + grid = create_grid(img_size*mosaic_scale, 1) + num_objects = 90 + bboxes = np.zeros((num_objects,5)) + diffuse_limiter = 0 + bundle = 0 + if num_objects == 0: + bboxes = np.zeros((1,5)) + labels = np.zeros((1,1)) + g = grid[:,0] + bboxes[0] = np.array([0, 0, 1, 1], dtype = float) + labels[0] = np.array([4]) + else: + for i in range(num_objects): + rand = np.random.randint(0,2) + rand = 0 + if diffuse_limiter >= 1: + rand = 0 + if rand == 0: + g,c,s,theta = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True,mosaic_scale, spherical = False) + + + + elif rand == 1: + diffuse_limiter +=1 + g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True, mosaic_scale) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,(img_size*mosaic_scale))/(img_size*mosaic_scale) + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,(img_size*mosaic_scale))/(img_size*mosaic_scale) + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,(img_size*mosaic_scale))/(img_size*mosaic_scale) + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,(img_size*mosaic_scale))/(img_size*mosaic_scale) + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1),1], dtype = float) + bboxes[i] = bbox + image = g[0] + image = gaussian_noise(image) + image = psf_noise(image) + while bundle < bundle_size: + x_crop = np.random.randint(0,((img_size*mosaic_scale))-img_size) + y_crop = np.random.randint(0,((img_size*mosaic_scale))-img_size) + + crop_image = image[y_crop:y_crop+img_size,x_crop:x_crop+img_size] + boxes = np.array(bboxes) + boxes[:,:4] = boxes[:,:4]*(img_size*mosaic_scale) + + box_crop = boxes[boxes[:,0] < x_crop+img_size-(boxes[:,2]-boxes[:,0])/2] + box_crop = box_crop[box_crop[:,2] > x_crop+(box_crop[:,2]-box_crop[:,0])/2] + box_crop = box_crop[box_crop[:,1] < y_crop+img_size-(box_crop[:,3]-box_crop[:,1])/2] + box_crop = box_crop[box_crop[:,3] > y_crop+(box_crop[:,3]-box_crop[:,1])/2] + + if len(box_crop) == 0: + continue + box_crop[:,0] = (box_crop[:,0]-x_crop)/img_size + box_crop[:,1] = (box_crop[:,1]-y_crop)/img_size + box_crop[:,2] = (box_crop[:,2]-x_crop)/img_size + box_crop[:,3] = (box_crop[:,3]-y_crop)/img_size + labels = np.zeros((box_crop.shape[0],1)) + for g in range(box_crop.shape[0]): + labels[g] = box_crop[g,4].astype(int) + final_image = np.expand_dims(crop_image, axis=0) + hf.create_dataset('x'+str(bundle), data=final_image) + hf.create_dataset('y'+str(bundle), data=box_crop[:,:4]) + hf.create_dataset('z'+str(bundle), data=labels) + bundle += 1 + hf.close() + + +def old_detector_data(img_size, bundle_size, num_bundles,path): + for t in tqdm(range(num_bundles)): + with h5py.File(path+str(t)+'.h5', "w") as hf: + all_images = [] + all_bboxes = [] + all_labels = [] + for r in range(bundle_size): + grid = create_grid(img_size, 1) + num_objects = np.random.randint(1,5) #noise + bboxes = np.zeros((num_objects,4)) + labels = np.zeros((num_objects,1)) + if num_objects == 0: + bboxes = np.zeros((1,4)) + labels = np.zeros((1,1)) + g = grid[:,0] + bboxes[0] = np.array([0, 0, 1, 1], dtype = float) + labels[0] = np.array([4]) + else: + for i in range(num_objects): + rand = np.random.randint(0,2) + rand = 0 #noise + if rand == 0: + g,c,s = oldgaussian.create_gauss(grid[:, 0], 1, 1, False,img_size, False, True) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([0]) + bboxes[i] = bbox + labels[i] = label + elif rand == 1: + g,c,s = oldgaussian.create_gauss(grid[:, 0], 1, 1, False,img_size, True, True) + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([1]) + bboxes[i] = bbox + labels[i] = label + elif rand == 2: + g,c,s = create_diamond(grid[:, 0], 1, 1, img_size,True) + xmin = (c[0]-2*s[0])/img_size + ymin = (c[1]-2*s[1])/img_size + xmax = (c[0]+2*s[0])/img_size + ymax = (c[1]+2*s[1])/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([2]) #nodiff + bboxes[i] = bbox + labels[i] = label + elif rand == 3: + g,c = create_square(grid[:, 0],1, 1, img_size, True) + xmin = (c[0]-(img_size/50+1))/img_size + ymin = (c[1]-(img_size/50+1))/img_size + xmax = (c[0]+(img_size/50+1))/img_size + ymax = (c[1]+(img_size/50+1))/img_size + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) + label = np.array([3]) #nodiff + bboxes[i] = bbox + labels[i] = label + + hf.create_dataset('x'+str(r), data=g) + hf.create_dataset('y'+str(r), data=bboxes) + hf.create_dataset('z'+str(r), data=labels) + hf.close() + + +def old_feature_data(num_gauss, num_diff, num_diamond, num_square, img_size, num_files, path): + for j in tqdm(range(num_files)): + with h5py.File(path+str(j)+'.h5', "w") as hf: + gauss_grid = create_grid(img_size, num_gauss) + diff_grid = create_grid(img_size, num_diff) + diamond_grid = create_grid(img_size, num_diamond) + square_grid = create_grid(img_size, num_square) + gaussians = oldgaussian.create_gauss(gauss_grid[:, 0], num_gauss, 1, False,img_size, False) + y_gauss = np.array([0]*len(gaussians)) + diff = oldgaussian.create_gauss(diff_grid[:, 0], num_diff, 1, False,img_size, True) + y_diff = np.array([1]*len(diff)) + diamonds = create_diamond(diamond_grid[:, 0], num_diamond, 1, img_size) + y_diamond = np.array([2]*len(diamonds)) + squares = create_square(square_grid[:, 0],num_square, 1, img_size) + y_square = np.array([3]*len(squares)) + arr = np.concatenate((gaussians, diff, diamonds, squares), axis=0) + keys = np.concatenate((y_gauss, y_diff, y_diamond, y_square), axis=0) + shuff = np.random.permutation(len(arr)) + hf.create_dataset('x', data=arr[shuff]) + hf.create_dataset('y', data=keys[shuff]) + hf.close() diff --git a/source_detection/source_utils.py b/source_detection/source_utils.py index 5ed45bf5..b7d2cc6d 100644 --- a/source_detection/source_utils.py +++ b/source_detection/source_utils.py @@ -1,6 +1,11 @@ # + import h5py import numpy as np +import matplotlib.pyplot as plt +import matplotlib.patches as patches +from evaluation import box_coord + + def open_detector_bundle(path): bundle_x = [] @@ -16,3 +21,22 @@ def open_detector_bundle(path): bundle_y.append(bundle_y_i) bundle_z.append(bundle_z_i) return bundle_x, bundle_y, bundle_z + + +# - + +def annotate(img, bbox, labels): + class_labels = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') + color_map = ('w', 'g', 'r', 'y','brown') + img_size = img.shape[0] + fig, ax2 = plt.subplots(1,1) + for j in range(bbox.shape[0]): + true_label = labels[j][0] + color = color_map[labels[j][0]] + trux, truy, truw, truh = box_coord(bbox[j],img_size) + trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor=color, facecolor='none') + ax2.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) + ax2.add_patch(trurect) + ax2.imshow(img,cmap = 'gist_heat') + + From 75279de76756e00dc7bc9f247a5273e0a3dd63b9 Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Tue, 9 Nov 2021 14:30:49 +0100 Subject: [PATCH 8/9] PyBDSF files and final cleanup --- radionets/simulations/gaussians.py | 3 +- source_detection/FPN/FPNeval.py | 163 ++++++++++++++------------ source_detection/FPN/FPNloss.py | 4 +- source_detection/FPN/FPNtrain.py | 127 +++++++++++++------- source_detection/FPN/SDC.py | 85 ++++++++++---- source_detection/FPN/mosaic_eval.py | 74 ++++++++++-- source_detection/PyBDSF/PyBDSF.py | 97 +++++++++++++++ source_detection/PyBDSF/SDC.py | 106 +++++++++++++++++ source_detection/data_augmentation.py | 131 +++++++++++++++++++++ source_detection/loss.py | 2 +- source_detection/source_data.py | 123 +++++++++++++++---- source_detection/train.py | 117 ++++++++++++------ 12 files changed, 828 insertions(+), 204 deletions(-) create mode 100644 source_detection/PyBDSF/PyBDSF.py create mode 100644 source_detection/PyBDSF/SDC.py create mode 100644 source_detection/data_augmentation.py diff --git a/radionets/simulations/gaussians.py b/radionets/simulations/gaussians.py index 60fa2418..49e556bb 100644 --- a/radionets/simulations/gaussians.py +++ b/radionets/simulations/gaussians.py @@ -366,8 +366,7 @@ def create_gauss(img, N, sources, source_list, img_size=63, diffuse = False, bbo sy = sx else: amp = ( - np.random.randint(10, 100, size=(N))# * 1 / 10 * np.random.randint(5, 10) - )# / 1e2 + np.random.randint(20, 150, size=(N)))# was 10, 100 if spherical: sx = np.random.uniform(1/2*(img_size**2)/720, 2*(img_size**2)/360, size=(N, sources)) sy = sx diff --git a/source_detection/FPN/FPNeval.py b/source_detection/FPN/FPNeval.py index 31b8e522..c432976a 100644 --- a/source_detection/FPN/FPNeval.py +++ b/source_detection/FPN/FPNeval.py @@ -10,9 +10,10 @@ from FPN import center_to_boundary from radionets.dl_framework.data import get_bundles from tqdm import tqdm +import matplotlib.patches as mpatches class_labels = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') -color_map = ('w', 'r', 'pink', 'r','brown') +color_map = ('w', 'r', '#84b819', 'cyan','brown') label_map = {k: v for v, k in enumerate(class_labels)} rev_label_map = {v: k for k, v in label_map.items()} def box_coord(coord, img_size): @@ -52,30 +53,54 @@ def detect_sources(checkpoint_path, data_path, img_size, n = 0): images = images.to('cuda') predicted_locs, predicted_scores = model(images) predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, - min_score = 0.2, max_overlap = 0.2, top_k = 100) + min_score = 0.3, max_overlap = 0.45, top_k = 100) + plt.rcParams.update({'font.size': 20}) fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,8)) + for j in range(len(eval_dataset[n][1][0])): true_label = class_labels[eval_dataset[n][2][0][j].item()] color = color_map[eval_dataset[n][2][0][j].item()] trux, truy, truw, truh = box_coord(eval_dataset[n][1][0][j],img_size) - trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor=color, facecolor='none') - ax1.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) + trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=2, edgecolor=color, facecolor='none') + #ax1.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) ax1.add_patch(trurect) for k in range(len(predl[n])): predicted_label = class_labels[predl[n][k].item()] color = color_map[predl[n][k].item()] predx, predy, predw, predh = box_coord(predb[n][k],img_size) - predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=1, edgecolor=color, + predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=2, edgecolor=color, facecolor='none') - ax2.text(predx,(predy+predh-7),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) + #ax2.text(predx,(predy+predh-7),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) ax2.add_patch(predrect) - cbar_ax = fig.add_axes([1, 0.1, 0.05, 0.8]) - + cbar_ax = fig.add_axes([0.982, 0.183, 0.03, 0.632]) + + ax1.set(xlabel='Pixels', ylabel='Pixels') + ax1.label_outer() + ax2.set(xlabel='Pixels', ylabel='Pixels') + ax2.label_outer() + + #ax1.set(xlabel='', ylabel='Pixels') + #ax1.label_outer() + #ax2.set(xlabel='', ylabel='Pixels') + #ax2.label_outer() + #ax1.set_xticks([], []) + #ax2.set_xticks([], []) + + print('max', eval_dataset[n][0].squeeze(0).max()) + img2 = ax1.imshow(eval_dataset[n][0].squeeze(0),cmap = 'gist_heat') img = ax2.imshow(eval_dataset[n][0].squeeze(0),cmap = 'gist_heat') - cbar = fig.colorbar(img, cax=cbar_ax) + cbar = fig.colorbar(img, cax=cbar_ax,label = 'Normalized Intensity') + + green_patch = mpatches.Patch(color='cyan', label='Square') + white_patch = mpatches.Patch(facecolor='w',edgecolor='black', label='Pointlike Gaussian') + red_patch = mpatches.Patch(color='red', label='Diffuse Gaussian') + pink_patch = mpatches.Patch(color='#84b819', label='diamond') + #plt.figlegend([white_patch,red_patch, pink_patch, green_patch],['Pointlike Gaussian','Diffuse Gaussian', 'Diamond', 'Square'],loc =9,ncol=4, bbox_to_anchor=(0.1, .45, .9, 0.5)) + fig.tight_layout() + return fig def image_detection(checkpoint_path, image): img_size = image.shape[0] image = torch.FloatTensor(image).unsqueeze(0).unsqueeze(0) @@ -88,19 +113,10 @@ def image_detection(checkpoint_path, image): with torch.no_grad(): image = image.to('cuda') predicted_locs, predicted_scores = model(image) - #print(predicted_locs.shape) - predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors, - min_score = 0.2, max_overlap = 0.1, top_k = 200) - #fig, ax1 = plt.subplots(1,1,figsize=(12,8)) - #for k in range(len(predl[0])): - # predicted_label = class_labels[predl[0][k].item()] - # color = color_map[predl[0][k].item()] - # predx, predy, predw, predh = box_coord(predb[0][k],img_size) - # predrect = patches.Rectangle((predx, predy), predw, predh, linewidth=1, edgecolor=color, - # facecolor='none') - # ax1.text(predx,(predy+predh-7),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) - # ax1.add_patch(predrect) - #ax1.imshow(image[0][0].cpu()) + + predb, predl, preds = model.object_detection(predicted_locs, predicted_scores, model.priors_cxcy, + min_score = 0.3, max_overlap = 0.45, top_k = 100) + return predb, predl,preds def classifier_eval(arch, img_batch): @@ -133,7 +149,7 @@ def mAPeval(checkpoint_path, data_path, curve = False): # scores = scores.to('cuda') predicted_locs, predicted_scores = model(images) predb, predl, preds = model.object_detection(predicted_locs, predicted_scores,priors= model.priors_cxcy, - min_score = 0.5, max_overlap = 0.45, top_k = 10) + min_score = 0.3, max_overlap = 0.45, top_k = 100) boxes = [boxes[b][0] for b in range(len(boxes))] labels = [labels[l][0][0] for l in range(len(labels))] pred_boxes.extend(predb) @@ -172,71 +188,67 @@ def calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels, pred_scores = torch.cat(pred_scores, dim=0) average_precisions = torch.zeros((n_classes - 1), dtype=torch.float) - + curve_values = [] #BEWARE BELOW + print(n_classes-1) for c in range(0, n_classes-1): true_class_images = true_images[true_labels == c] true_class_boxes = true_boxes[true_labels == c] - # Keep track of which true objects with this class have already been 'detected' - # So far, none + true_class_boxes_detected = torch.zeros((true_class_boxes.size(0)), dtype=torch.uint8).to( - 'cuda') # (n_class_objects) + 'cuda') - # Extract only detections with this class - pred_class_images = pred_images[pred_labels == c] # (n_class_detections) - pred_class_boxes = pred_boxes[pred_labels == c] # (n_class_detections, 4) - pred_class_scores = pred_scores[pred_labels == c] # (n_class_detections) + + pred_class_images = pred_images[pred_labels == c] + pred_class_boxes = pred_boxes[pred_labels == c] + pred_class_scores = pred_scores[pred_labels == c] n_class_detections = pred_class_boxes.size(0) if n_class_detections == 0: continue - # Sort detections in decreasing order of confidence/scores - pred_class_scores, sort_ind = torch.sort(pred_class_scores, dim=0, descending=True) # (n_class_detections) - pred_class_images = pred_class_images[sort_ind] # (n_class_detections) - pred_class_boxes = pred_class_boxes[sort_ind] # (n_class_detections, 4) - - # In the order of decreasing scores, check if true or false positive - true_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') # (n_class_detections) - false_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') # (n_class_detections) + + pred_class_scores, sort_ind = torch.sort(pred_class_scores, dim=0, descending=True) + pred_class_images = pred_class_images[sort_ind] + pred_class_boxes = pred_class_boxes[sort_ind] + + true_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') + false_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') for d in range(n_class_detections): - this_detection_box = pred_class_boxes[d].unsqueeze(0) # (1, 4) - this_image = pred_class_images[d] # (), scalar + this_detection_box = pred_class_boxes[d].unsqueeze(0) + this_image = pred_class_images[d] - # Find objects in the same image with this class, their difficulties, and whether they have been detected before - object_boxes = true_class_boxes[true_class_images == this_image] # (n_class_objects_in_img) - # If no such object in this image, then the detection is a false positive + + object_boxes = true_class_boxes[true_class_images == this_image] + if object_boxes.size(0) == 0: false_positives[d] = 1 continue - # Find maximum overlap of this detection with objects in this image of this class - overlaps = FPN.jaccard(this_detection_box, object_boxes) # (1, n_class_objects_in_img) - max_overlap, ind = torch.max(overlaps.squeeze(0), dim=0) # (), () - scalars - - # 'ind' is the index of the object in these image-level tensors 'object_boxes', 'object_difficulties' - # In the original class-level tensors 'true_class_boxes', etc., 'ind' corresponds to object with index... + + overlaps = FPN.jaccard(this_detection_box, object_boxes) + max_overlap, ind = torch.max(overlaps.squeeze(0), dim=0) + original_ind = torch.LongTensor(range(true_class_boxes.size(0)))[true_class_images == this_image][ind] - # We need 'original_ind' to update 'true_class_boxes_detected' - - # If the maximum overlap is greater than the threshold of 0.5, it's a match + + if max_overlap.item() > 0.5: if true_class_boxes_detected[original_ind] == 0: true_positives[d] = 1 - true_class_boxes_detected[original_ind] = 1 # this object has now been detected/accounted for - # Otherwise, it's a false positive (since this object is already accounted for) + true_class_boxes_detected[original_ind] = 1 + else: false_positives[d] = 1 - # Otherwise, the detection occurs in a different location than the actual object, and is a false positive + else: false_positives[d] = 1 - # Compute cumulative precision and recall at each detection in the order of decreasing scores - cumul_true_positives = torch.cumsum(true_positives, dim=0) # (n_class_detections) - cumul_false_positives = torch.cumsum(false_positives, dim=0) # (n_class_detections) + + cumul_true_positives = torch.cumsum(true_positives, dim=0) + cumul_false_positives = torch.cumsum(false_positives, dim=0) cumul_precision = cumul_true_positives / ( - cumul_true_positives + cumul_false_positives + 1e-10) # (n_class_detections) - cumul_recall = cumul_true_positives / len(true_class_images) # (n_class_detections) + cumul_true_positives + cumul_false_positives + 1e-10) + cumul_recall = cumul_true_positives / len(true_class_images) if curve: c_recall_thresholds = torch.arange(start=0, end=1.1, step=.01).tolist() # (11) c_precisions = torch.zeros((len(c_recall_thresholds)), dtype=torch.float).to('cuda') # (11) @@ -246,10 +258,9 @@ def calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels, c_precisions[i] = cumul_precision[c_recalls_above_t].max() else: c_precisions[i] = 0. - print(c) - return c_precisions - print(c) - # Find the mean of the maximum of the precisions corresponding to recalls above the threshold 't' + curve_values.append(c_precisions) + + recall_thresholds = torch.arange(start=0, end=1.1, step=.1).tolist() # (11) precisions = torch.zeros((len(recall_thresholds)), dtype=torch.float).to('cuda') # (11) for i, t in enumerate(recall_thresholds): @@ -259,16 +270,18 @@ def calculate_mAP(pred_boxes, pred_labels, pred_scores, true_boxes, true_labels, else: precisions[i] = 0. average_precisions[c] = precisions.mean() # c is in [1, n_classes - 1] - #print(precisions) - # Calculate Mean Average Precision (mAP) + + if curve == False: + mean_average_precision = average_precisions.mean().item() - mean_average_precision = average_precisions.mean().item() - # Keep class-wise average precisions in a dictionary - average_precisions = {rev_label_map[c]: v for c, v in enumerate(average_precisions.tolist())} + average_precisions = {rev_label_map[c]: v for c, v in enumerate(average_precisions.tolist())} + + return average_precisions, mean_average_precision + else: + return curve_values - return average_precisions, mean_average_precision # - @@ -297,7 +310,11 @@ def annotate(img, bbox, labels): true_label = labels[j] color = color_map[labels[j].astype('int')] trux, truy, truw, truh = box_coord(bbox[j],img_size) - trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor=color, facecolor='none') + trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=2, edgecolor=color, facecolor='none') #ax2.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) ax2.add_patch(trurect) - ax2.imshow(img, cmap = 'gist_heat') + mi = ax2.imshow(img, cmap = 'gist_heat') + ax2.axis('off') + cbar_ax = fig.add_axes([0.2109, 0.11, 0.604, 0.013]) + plt.rcParams.update({'font.size': 50}) + plt.colorbar(mi,cax = cbar_ax,orientation='horizontal', label = 'Normalized Intensity') diff --git a/source_detection/FPN/FPNloss.py b/source_detection/FPN/FPNloss.py index 9a303626..0eb52468 100644 --- a/source_detection/FPN/FPNloss.py +++ b/source_detection/FPN/FPNloss.py @@ -57,11 +57,11 @@ def forward(self, predicted_locs, predicted_scores, data_locs, data_labels): overlap_for_each_prior[prior_for_each_object] = 1. label_for_each_prior = data_labels[image_i][0][0][object_for_each_prior]#very ugly shapes watch out - label_for_each_prior[overlap_for_each_prior < self.threshold] = 2 #nodiff + label_for_each_prior[overlap_for_each_prior < self.threshold] = 1 #nodiff true_classes[image_i] = label_for_each_prior true_locs[image_i] = center_to_offset(boundary_to_center(data_locs[image_i][0][object_for_each_prior]), self.priors_cxcy) - positive_priors = true_classes != 2 #nodiff + positive_priors = true_classes != 1 #nodiff loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) n_positives = positive_priors.sum(dim = 1) n_hard_negatives = self.neg_pos_ratio * n_positives diff --git a/source_detection/FPN/FPNtrain.py b/source_detection/FPN/FPNtrain.py index 9338e7be..bc947e47 100644 --- a/source_detection/FPN/FPNtrain.py +++ b/source_detection/FPN/FPNtrain.py @@ -8,14 +8,15 @@ from tqdm import tqdm import matplotlib.pyplot as plt -path = get_bundles('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/july/mixed') +path = get_bundles('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/noisetrain/') +valid_path = get_bundles('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/valid_noisetrain') iterations = 120000 -n_classes = 3 #nodiff +n_classes = 2 #nodiff checkpoint = None -#checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/june/mixedcheckpoints/checkpoint_ssd300.pth.tar' +#checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/july/mixedcheckpoints/checkpoint_ssd300.pth.tar' batch_size = 32 workers = 4 -lr = 1e-9 +lr = 1e-4 decay_lr_at = [80000,120000] decay_lr_to = 0.1 #increase_lr_at = [10,30000] @@ -151,15 +152,19 @@ def main(): optimizer = checkpoint['optimizer'] model = model.to('cuda') - loss_function = FocalLoss(priors_cxcy = model.priors_cxcy).to('cuda')#detectionLoss(priors_cxcy = model.priors_cxcy).to('cuda') + loss_function = detectionLoss(priors_cxcy = model.priors_cxcy).to('cuda') #FocalLoss(priors_cxcy = model.priors_cxcy).to('cuda')# - train_dataset = detect_dataset(path) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size, shuffle = True, collate_fn = train_dataset.collate_fn) + valid_dataset = detect_dataset(valid_path) + valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size, + shuffle = True, + collate_fn = valid_dataset.collate_fn) + epochs = iterations//(len(train_dataset)//batch_size) decay_lr_at = [it // (len(train_dataset)//batch_size) for it in decay_lr_at] @@ -167,46 +172,57 @@ def main(): if epoch in decay_lr_at: adjust_learning_rate(optimizer, decay_lr_to) - #if epoch in increase_lr_at: - # adjust_learning_rate(optimizer, increase_lr_to) - #if epoch in increase_lr_at2: - #adjust_learning_rate(optimizer, increase_lr_to2) - train(train_loader, model, loss_function, optimizer, epoch) + train(valid_loader, model, loss_function, optimizer, epoch, valid = True) + train(train_loader, model, loss_function, optimizer, epoch, valid = False) print("Epoch:", epoch) + if epoch % 10 == 0: - save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/july/focal/checkpoint_ssd300' + '_e' + str(epoch)+'.pth.tar') - save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/july/focal/checkpoint_ssd300.pth.tar')# apparently not defined + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/fpnnoise2/checkpoint_ssd300' + '_e' + str(epoch)+'.pth.tar') + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/fpnnoise2/checkpoint_ssd300.pth.tar')# apparently not defined -def train(data_loader, model, loss_function, optimizer, epochs): - - model.train() - losses = np.zeros(940) - for i, (images, boxes, labels) in enumerate(data_loader): - images = images.to('cuda') - - predicted_locs, predicted_classes_scores= model(images) - loss = loss_function(predicted_locs, predicted_classes_scores, - boxes, labels) - - - - losses[i] = loss - #print('i', i, 'Loss:',loss) - optimizer.zero_grad() - loss.backward() - - if grad_clip is not None: - clip_gradient(optimizer, grad_clip) - - optimizer.step() - - print('Average Loss', np.average(losses)) - f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/focalloss.txt', "a") - f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') - f.close() +def train(data_loader, model, loss_function, optimizer, epochs, valid): + if valid == False: + model.train() + losses = np.zeros(1879) + for i, (images, boxes, labels) in enumerate(data_loader): + images = images.to('cuda') + + predicted_locs, predicted_classes_scores= model(images) + loss = loss_function(predicted_locs, predicted_classes_scores, + boxes, labels) + losses[i] = loss + optimizer.zero_grad() + loss.backward() + + if grad_clip is not None: + clip_gradient(optimizer, grad_clip) + optimizer.step() + else: + model.eval() + valid_losses = np.zeros(238) + with torch.no_grad(): + for i, (images, boxes, labels) in enumerate(data_loader): + images = images.to('cuda') + + predicted_locs, predicted_classes_scores= model(images) + loss = loss_function(predicted_locs, predicted_classes_scores, + boxes, labels) + valid_losses[i] = loss +# print('Average Loss', np.average(losses)) + if valid == False: + #print('train',loss) + f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/noiseloss2.txt', "a") + f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') + f.close() + print('Average train Loss',str(np.average(losses))) + else: + #print('valid',loss) + f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/valid_noiseloss2.txt', "a") + f.write(str(epochs) + '\t' + str(np.average(valid_losses)) +'\n') + f.close() del predicted_locs, predicted_classes_scores, images, boxes, labels @@ -223,6 +239,7 @@ def adjust_learning_rate(optimizer, scale): for param_group in optimizer.param_groups: param_group['lr'] = param_group['lr']*scale print('Decaying learning rate') + print('Reallty decaying learning rate') def clip_gradient(optimizer, grad_clip): @@ -235,3 +252,33 @@ def clip_gradient(optimizer, grad_clip): for param in group['params']: if param.grad is not None: param.grad.data.clamp_(-grad_clip, grad_clip) + + +def old_train(data_loader, model, loss_function, optimizer, epochs): + + model.train() + losses = np.zeros(939) + for i, (images, boxes, labels) in enumerate(data_loader): + images = images.to('cuda') + + predicted_locs, predicted_classes_scores= model(images) + loss = loss_function(predicted_locs, predicted_classes_scores, + boxes, labels) + + + + losses[i] = loss + #print('i', i, 'Loss:',loss) + optimizer.zero_grad() + loss.backward() + + if grad_clip is not None: + clip_gradient(optimizer, grad_clip) + + optimizer.step() + + print('Average Loss', np.average(losses)) + f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/fpnloss.txt', "a") + f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') + f.close() + del predicted_locs, predicted_classes_scores, images, boxes, labels diff --git a/source_detection/FPN/SDC.py b/source_detection/FPN/SDC.py index 46f4f464..8fb2df5b 100644 --- a/source_detection/FPN/SDC.py +++ b/source_detection/FPN/SDC.py @@ -5,6 +5,7 @@ import torch.nn.functional as F import torch import h5py +#import bdsf from tqdm import tqdm import matplotlib.patches as patches import numpy as np @@ -28,7 +29,7 @@ def __init__(self, num_images, label_path, img_path, img_size): def create_image(self, x, y, image, img_size): img = image[0][0][y:y+img_size,x:x+img_size].astype('float64') - img = img/img.max() + img = img#/img.max() return img def get_coords(self): @@ -47,21 +48,37 @@ def get_coords(self): all_coords = cx, cy, flux return all_coords - def create_labels(self,imgx,imgy, SNRfactor, mean, all_coords): + + def RMS(self, image, img_size): + img_save = image.reshape(1,1,img_size,img_size) + g = fits.open('/net/big-tank/POOL/users/pblomenkamp/sdc1/dataset.fits') + g[0].header + g[0].data = img_save + g[0].header['CRPIX1'] = img_size//2 + g[0].header['CRPIX2'] = img_size//2 + g[0].header['CRVAL3'] = 560000000 + g[0].header['BMAJ'] = 4.16666676756E-04 + g[0].header['BMIN']=4.16666676756E-04 + g[0].header['BPA'] = 0 + g.writeto('temp.fits',overwrite=True) + + img = bdsf.process_image('./temp.fits', rms_map = True,quiet=True) + return img.rms_arr[0][0] + def create_labels(self,imgx,imgy, RMSfactor, mean, all_coords, image): coords = [] x, y, flux = all_coords - print(mean) for i in range(x.shape[1]): - if flux[1][i] > 70*mean: - if imgx < x[1][i] < imgx+self.img_size: - #print(x[0][i]) - woy = np.where(x[0][i] == y[0]) - if imgy < y[1][woy[0].item()] < imgy+self.img_size: - c = np.array([x[1][i]-imgx,y[1][woy[0].item()]-imgy]) - c = np.round(c).astype('int') + if imgx < x[1][i] < imgx+self.img_size: + #print(x[0][i]) + woy = np.where(x[0][i] == y[0]) + if imgy < y[1][woy[0].item()] < imgy+self.img_size: + c = np.array([x[1][i]-imgx,y[1][woy[0].item()]-imgy]) + c = np.round(c).astype('int') + print(image[int(x[1][i]-imgx),int(y[1][woy[0].item()]-imgy)]) + if image[int(y[1][woy[0].item()]-imgy),int(x[1][i]-imgx)] > RMSfactor: coords.append(c) return coords - def forward(self, SNRfactor, save_path): + def forward(self, save_path): all_images = [] all_labels = [] all_pos = [] @@ -71,12 +88,13 @@ def forward(self, SNRfactor, save_path): all_coords = self.get_coords() mean = img.mean() #NEEDS PROPER CALCULATION with h5py.File(save_path+'.h5', "w") as hf: - for i in range(self.num_images): + for i in tqdm(range(self.num_images)): x = np.random.randint(16350 ,19900) y = np.random.randint(16700,19950) pos = (x,y) image = self.create_image(x,y,img, self.img_size) - labels = self.create_labels(x,y, SNRfactor, mean, all_coords) + RMSfactor = self.RMS(image, self.img_size) + labels = self.create_labels(x,y, RMSfactor, mean, all_coords, image) all_images.append(image) all_labels.append(labels) all_pos.append((x,y)) @@ -106,10 +124,10 @@ def image_comp(self,image_number): img_pos = np.array(f["z"+str(image_number)]) f.close() - fig, (ax1, ax2) = plt.subplots(1,2,figsize=(12,8)) - ax1.imshow(img,cmap = 'hot') + fig, axs = plt.subplots(1,2,figsize=(12,8)) + axs[0].imshow(img,cmap = 'gist_heat') for i in range(len(true_labels)): - ax1.scatter(true_labels[i][0], true_labels[i][1], color='b', s=20) + axs[0].scatter(true_labels[i][0], true_labels[i][1], color='b', s=20) dimg = torch.FloatTensor(img) dimg = dimg.unsqueeze(0) @@ -128,13 +146,25 @@ def image_comp(self,image_number): trux, truy, truw, truh = box_coord(bbox[j],img_size) trurect = patches.Rectangle((trux, truy), truw, truh, linewidth=1, edgecolor='w', facecolor='none') #ax2.text(trux,(truy+truh-7),true_label, color = 'k',fontsize=8,backgroundcolor = color) - ax2.add_patch(trurect) - ax2.imshow(img, cmap = 'hot') + axs[1].add_patch(trurect) + im = axs[1].imshow(img, cmap = 'gist_heat') + + for ax in axs.flat: + ax.set(xlabel='Pixels', ylabel='Pixels') + ax.label_outer() + fig.tight_layout() + cbar_ax = fig.add_axes([0.99, 0.155, 0.03, 0.69]) + fig.colorbar(im, cax=cbar_ax,label = 'Normalized Intensity') + plt.rcParams.update({'font.size': 20}) + plt.savefig('./sdc_example.pdf', bbox_inches = 'tight') def precisioneval(self): num_sources = [] f = h5py.File(self.sdcset_path, "r") + sum_true_sources = 0 + sum_detected_sources = 0 + false_pos = 0 for image_number in range(len(f.keys())//3): img = np.array(f["x"+str(image_number)]) @@ -150,7 +180,11 @@ def precisioneval(self): b,l,s = image_detection(self.checkpoint,uimg) coords = self.boxtoxy(b[0].cpu().detach().numpy()*img_size) - + if np.array(true_labels).shape[0] == 0: + false_pos += coords.shape[0] + num_sources.append((0, 0)) + # print('ay caramba',coords.shape[0]) + continue coords = np.round(coords).astype('int') dist = scipy.spatial.distance.cdist(coords, np.array(true_labels)) @@ -164,16 +198,21 @@ def precisioneval(self): keep_mask = [True]*sort_dist.shape[0] for i in range(sort_dist.shape[0]): for j in range(sort_dist.shape[0]): - if keep_mask[j] == False: + if keep_mask[j] == False: #If already thrown out: Continue continue - if sort_ind[:,0][j] == sort_ind[:,0][i]: - if sort_dist[:,0][j] > sort_dist[:,0][i]: + if sort_ind[:,0][j] == sort_ind[:,0][i]: # If i and j are closest to the same object + if sort_dist[:,0][j] > sort_dist[:,0][i]: #keep only the one that is closest keep_mask[j] = False + false_pos += 1 continue if sort_dist[:,0][j] > img_size*0.05: keep_mask[j] = False + false_pos += 1 detected_sources = sort_dist[keep_mask].shape[0] true_sources = true_labels.shape[0] + sum_detected_sources = sum_detected_sources + detected_sources + sum_true_sources = sum_true_sources + true_sources num_sources.append((true_sources, detected_sources)) + sum_sources = (sum_true_sources, sum_detected_sources) f.close() - return num_sources + return sum_sources, false_pos diff --git a/source_detection/FPN/mosaic_eval.py b/source_detection/FPN/mosaic_eval.py index 5ee541a4..e81fb3be 100644 --- a/source_detection/FPN/mosaic_eval.py +++ b/source_detection/FPN/mosaic_eval.py @@ -39,12 +39,13 @@ def mosaic_detection(model_path,img, nclasses, img_size = 300): #classes without ax.add_patch(predrect) ax.imshow(img,cmap = 'gist_heat') + ax.axis('off') return (torch.tensor(all_boxes)/(img_size*10)), all_labels, torch.tensor(all_scores) def mosaic_clean(mos_boxes,labels,scores): bboxes = torch.tensor(box_coord_inv(mos_boxes)) num_classes = 4 - max_overlap = 0.1 + max_overlap = 0 predicted_boxes = list() predicted_labels = list() for c in range(num_classes): @@ -59,8 +60,8 @@ def mosaic_clean(mos_boxes,labels,scores): overlap = jaccard(c_boxes,c_boxes) suppress = torch.zeros((c_boxes.size(0))).bool() for box in range(c_boxes.size(0)): - if suppress[box] == 1: - continue + #if suppress[box] == 1: + # continue suppress = suppress | (overlap[box] > max_overlap) @@ -70,9 +71,13 @@ def mosaic_clean(mos_boxes,labels,scores): #final_boxes = np.concatenate((predicted_boxes[0])) - #final_labels = np.concatenate((predicted_labels[0])) - final_boxes = predicted_boxes[0]#np.concatenate((predicted_boxes[0],predicted_boxes[1],predicted_boxes[2],predicted_boxes[3])) - final_labels = predicted_labels[0]#np.concatenate((predicted_labels[0],predicted_labels[1],predicted_labels[2],predicted_labels[3])) + #final_labels = np.concatenate((predicted_labels[0])) + if len(predicted_boxes) == 4: + final_boxes = np.concatenate((predicted_boxes[0],predicted_boxes[1],predicted_boxes[2],predicted_boxes[3])) + final_labels = np.concatenate((predicted_labels[0],predicted_labels[1],predicted_labels[2],predicted_labels[3])) + else: + final_boxes = np.concatenate((predicted_boxes[0],predicted_boxes[1],predicted_boxes[2])) + final_labels = np.concatenate((predicted_labels[0],predicted_labels[1],predicted_labels[2])) return final_boxes, final_labels @@ -155,10 +160,65 @@ def shifted_mosaic_detection(model_path,img, nclasses, img_size = 300): #classes for k in range(len(all_boxes)): predicted_label = label_map[int(all_labels[k])] color = color_map[int(all_labels[k])] - predrect = patches.Rectangle((all_boxes[k][0], all_boxes[k][1]), all_boxes[k][2], all_boxes[k][3], linewidth=1, edgecolor=color, + predrect = patches.Rectangle((all_boxes[k][0], all_boxes[k][1]), all_boxes[k][2], all_boxes[k][3], linewidth=3, edgecolor=color, facecolor='none') #ax.text(all_boxes[k][0],(all_boxes[k][1]+all_boxes[k][2]-20),predicted_label, color = 'k',fontsize=8,backgroundcolor = color) ax.add_patch(predrect) ax.imshow(img,cmap = 'gist_heat') return (torch.tensor(all_boxes)/(img_size*10)), all_labels, torch.tensor(all_scores) + + +def mosaicTPFP(pred_boxes, pred_labels, true_boxes, true_labels): + pred_boxes = torch.tensor(pred_boxes).unsqueeze(0) + pred_labels = torch.tensor(pred_labels).unsqueeze(0) + true_boxes = torch.tensor(true_boxes) + true_labels = torch.tensor(true_labels) + TP = 0 + FP = 0 + n_classes = 4 + + #BEWARE BELOW + for c in range(0, n_classes): + mask = (true_labels == c)[0].T[0] + pred_mask = (pred_labels == c)[0] + true_class_boxes = true_boxes[0][mask] + + # Keep track of which true objects with this class have already been 'detected' + # So far, none + true_class_boxes_detected = torch.zeros((true_class_boxes.size(0)), dtype=torch.uint8).to( + 'cuda') # (n_class_objects) + + # Extract only detections with this class + pred_class_boxes = pred_boxes[pred_mask] # (n_class_detections, 4) + + n_class_detections = pred_class_boxes.size(0) + if n_class_detections == 0: + continue + + true_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') # (n_class_detections) + false_positives = torch.zeros((n_class_detections), dtype=torch.float).to('cuda') # (n_class_detections) + for d in range(n_class_detections): + this_detection_box = pred_class_boxes[d].unsqueeze(0) # (1, 4) + object_boxes = true_class_boxes # (n_class_objects_in_img) + if object_boxes.size(0) == 0: + false_positives[d] = 1 + continue + + overlaps = jaccard(this_detection_box, object_boxes) # (1, n_class_objects_in_img) + max_overlap, ind = torch.max(overlaps.squeeze(0), dim=0) # (), () - scalars + original_ind = torch.LongTensor(range(true_class_boxes.size(0)))[ind] + if max_overlap.item() >= 0.3: + if true_class_boxes_detected[ original_ind] == 0: + true_positives[d] = 1 + true_class_boxes_detected[original_ind] = 1 # this object has now been detected/accounted for + # Otherwise, it's a false positive (since this object is already accounted for) + else: + false_positives[d] = 1 + else: + false_positives[d] = 1 + cumul_true_positives = torch.cumsum(true_positives, dim=0) # (n_class_detections) + cumul_false_positives = torch.cumsum(false_positives, dim=0) # (n_class_detections) + TP += cumul_true_positives.max().item() + FP += cumul_false_positives.max().item() + return TP, FP diff --git a/source_detection/PyBDSF/PyBDSF.py b/source_detection/PyBDSF/PyBDSF.py new file mode 100644 index 00000000..bcd7b542 --- /dev/null +++ b/source_detection/PyBDSF/PyBDSF.py @@ -0,0 +1,97 @@ +import bdsf +import matplotlib.pyplot as plt +import numpy as np +from astropy.io import fits +import h5py +import scipy +from scipy.spatial import distance +import torch + +def image_eval(image): #Evaluate an image with PyBDSF + image = np.flip(image, 0) + img_save = image.reshape(1,1,50,50) + g = fits.open('/net/big-tank/POOL/users/pblomenkamp/sdc1/dataset.fits') + g[0].header + g[0].data = img_save + g[0].header['CRPIX1'] = 25 + g[0].header['CRPIX2'] = 25 + g[0].header['CRVAL3'] = 560000000 + g[0].header['BMAJ'] = 4.16666676756E-04 + g[0].header['BMIN']=4.16666676756E-04 + g[0].header['BPA'] = 0 + g.writeto('temp.fits',overwrite=True) + + img = bdsf.process_image('./temp.fits', rms_map = True) + img.show_fit() + img.export_image(img_format = 'fits', img_type = 'gaus_model') + + + +def precisioneval(sdcset_path): + #Evaluates an entire SKA Data Challenge Dataset. PyBDSF detections are made on every image and are evaluated. If the predicted source position is further away than 5% of the image resolution, the detection is considered a false positive. + #Creates a temporary FITS file to since PyBDSF only accepts those. + num_sources = [] + f = h5py.File(sdcset_path, "r") + g = fits.open('/net/big-tank/POOL/users/pblomenkamp/sdc1/dataset.fits') + sum_true_sources = 0 + sum_detected_sources = 0 + false_pos = 0 + for image_number in range(len(f.keys())//3): + img = np.array(f["x"+str(image_number)]) + true_labels = np.array(f["y"+str(image_number)]) + img_pos = np.array(f["z"+str(image_number)]) + img_size = img.shape[0] + img_save = img.reshape(1,1,50,50) + + g[0].header + g[0].data = img_save + g[0].header['CRPIX1'] = 25 + g[0].header['CRPIX2'] = 25 + g[0].header['BPA'] = 0 + g.writeto('TEMP.fits',overwrite=True) + + img = bdsf.process_image('TEMP.fits',quiet=True) + img.write_catalog(format='fits', catalog_type='gaul', clobber = True) + l = fits.open('TEMP.pybdsm.gaul.fits') + xcoords = l[1].data.field(12) + ycoords = l[1].data.field(14) + coords = np.vstack((xcoords,ycoords)).T + + coords = np.round(coords).astype('int') + if np.array(true_labels).shape[0] == 0: + false_pos += coords.shape[0] + num_sources.append((0, coords.shape[0])) + continue + dist = scipy.spatial.distance.cdist(coords, np.array(true_labels)) + + dist = torch.tensor(dist) + + sort_dist, sort_ind = dist.sort(dim = 1, descending = False) + + sort_dist = sort_dist.detach().numpy() + sort_ind = sort_ind.detach().numpy() + keep_mask = [True]*sort_dist.shape[0] + for i in range(sort_dist.shape[0]): + for j in range(sort_dist.shape[0]): + if keep_mask[j] == False: + continue + if sort_ind[:,0][j] == sort_ind[:,0][i]: + if sort_dist[:,0][j] > sort_dist[:,0][i]: + keep_mask[j] = False + false_pos += 1 + continue + if sort_dist[:,0][j] > img_size*0.05: #FP threshold + keep_mask[j] = False + false_pos += 1 + detected_sources = sort_dist[keep_mask].shape[0] + true_sources = true_labels.shape[0] + num_sources.append((true_sources, detected_sources)) + sum_detected_sources = sum_detected_sources + detected_sources + sum_true_sources = sum_true_sources + true_sources + sum_sources = (sum_true_sources, sum_detected_sources) + f.close() + g.close() + print('True Sources',sum_sources[0], 'True Positives',sum_sources[1], 'False Positives', false_pos, 'False Negatives', + sum_sources[0]-sum_sources[1] ) + return num_sources, sum_sources, false_pos + #True number of sources, Number of True Positives, Number of false positives \ No newline at end of file diff --git a/source_detection/PyBDSF/SDC.py b/source_detection/PyBDSF/SDC.py new file mode 100644 index 00000000..206290fc --- /dev/null +++ b/source_detection/PyBDSF/SDC.py @@ -0,0 +1,106 @@ +from astropy.io import fits +import matplotlib.pyplot as plt +import matplotlib.colors as colors +import numpy as np +import torch.nn.functional as F +import torch +import h5py +import bdsf +from tqdm import tqdm +import matplotlib.patches as patches +import numpy as np +import scipy +from scipy.spatial import distance +#from data_augmentation import gaussian_noise, psf_noise + + +class sdc_dataset(): + + def __init__(self, num_images, label_path, img_path, img_size): + + self.num_images = num_images + + self.label_path = label_path + + self.img_path = img_path + + self.img_size = img_size + + def create_image(self, x, y, image, img_size): + img = image[0][0][y:y+img_size,x:x+img_size].astype('float64') + img = img#/img.max() + return img + + def get_coords(self): + classes = np.loadtxt(self.label_path, skiprows = 18, usecols = (11) ) + cx = np.loadtxt(self.label_path, skiprows = 18, usecols = (13) ) + cy = np.loadtxt(self.label_path, skiprows = 18, usecols = (14) ) + use = np.loadtxt(self.label_path, dtype = 'bool', skiprows = 18, usecols = (12) ) + flux = np.loadtxt(self.label_path, skiprows = 18, usecols = (5) ) + cx = cx[use] + cy = cy[use] + flux = flux[use] + oid = np.linspace(0,cx.shape[0], cx.shape[0], dtype = 'int32') + cy = np.vstack((oid,cy)) + cx = np.vstack((oid,cx)) + flux = np.vstack((oid,flux)) + + all_coords = cx, cy, flux + return all_coords + + def RMS(self, image, img_size): + img_save = image.reshape(1,1,img_size,img_size) + g = fits.open('/net/big-tank/POOL/users/pblomenkamp/sdc1/dataset.fits') + g[0].header + g[0].data = img_save + g[0].header['CRPIX1'] = img_size//2 + g[0].header['CRPIX2'] = img_size//2 + g[0].header['CRVAL3'] = 560000000 + g[0].header['BMAJ'] = 4.16666676756E-04 + g[0].header['BMIN']=4.16666676756E-04 + g[0].header['BPA'] = 0 + g.writeto('temp.fits',overwrite=True) + + img = bdsf.process_image('./temp.fits', rms_map = True,quiet=True) + return img.rms_arr[0][0] + def create_labels(self,imgx,imgy, RMSfactor, mean, all_coords, image): + coords = [] + x, y, flux = all_coords + for i in range(x.shape[1]): + if imgx < np.round(x[1][i]).astype('int') < imgx+self.img_size: + #print(x[0][i]) + woy = np.where(x[0][i] == y[0]) + if imgy < np.round(y[1][woy[0].item()]).astype('int') < imgy+self.img_size: + c = np.array([x[1][i]-imgx,y[1][woy[0].item()]-imgy]) + c = np.round(c).astype('int') + if image[int(y[1][woy[0].item()]-imgy),int(x[1][i]-imgx)] > RMSfactor: + coords.append(c) + return coords + def forward(self, save_path): + all_images = [] + all_labels = [] + all_pos = [] + f = fits.open(self.img_path) + img = f[0].data + f.close() + all_coords = self.get_coords() + mean = img.mean() #NEEDS PROPER CALCULATION + with h5py.File(save_path+'.h5', "w") as hf: + for i in tqdm(range(self.num_images)): + x = np.random.randint(16350 ,19900) + y = np.random.randint(16700,19950) + pos = (x,y) + image = self.create_image(x,y,img, self.img_size) + RMSfactor = 2*self.RMS(image, self.img_size) #SNR + #print(RMSfactor) + labels = self.create_labels(x,y, RMSfactor, mean, all_coords, image) + image = image/image.max() + all_images.append(image) + all_labels.append(labels) + all_pos.append((x,y)) + hf.create_dataset('x'+str(i), data=image) + hf.create_dataset('y'+str(i), data=labels) + hf.create_dataset('z'+str(i), data=pos) + hf.close() + + diff --git a/source_detection/data_augmentation.py b/source_detection/data_augmentation.py new file mode 100644 index 00000000..cae47a1f --- /dev/null +++ b/source_detection/data_augmentation.py @@ -0,0 +1,131 @@ +import matplotlib.pyplot as plt +import numpy as np +from radionets.evaluation.utils import load_pretrained_model, eval_model +import train +from radionets.dl_framework.data import get_bundles +import torch +from tqdm import tqdm +from evaluation import box_coord +import matplotlib.patches as patches +import random +#import cv2 +from matplotlib.pyplot import figure +import h5py +from astropy.io import fits +from astropy.convolution import convolve_fft, Gaussian2DKernel +import torch.nn.functional as F + + +class_labels = ('pointlike gaussian', 'diffuse gaussian', 'diamond', 'square', 'background') +color_map = ('r', 'g', 'w', 'y','brown') + +class random_shear(object): + def __init__(self, shear_factor = 0.5): + + self.shear_factor = shear_factor + self.shear_factor = (-self.shear_factor, self.shear_factor) + shear_factor = random.uniform(*self.shear_factor) + print(shear_factor) + self.horizontal_flip + def __call__(self, img, bboxes): + shear_factor = random.uniform(*self.shear_factor) + bboxes = bboxes * img.shape[1] + w,h = img.shape[1], img.shape[0] + + if shear_factor < 0: + img, bboxes = self.horizontal_flip(img, bboxes) + + M = np.array([[1, abs(shear_factor), 0],[0,1,0]]) + + nW = img.shape[1] + abs(shear_factor*img.shape[0]) + + bboxes[:,[0,2]] += ((bboxes[:,[1,3]]) * abs(shear_factor) ).astype(int) + + + img = cv2.warpAffine(img, M, (int(nW), img.shape[0])) + + if shear_factor < 0: + img, bboxes = self.horizontal_flip(img, bboxes) + + img = cv2.resize(img, (w,h)) + + scale_factor_x = nW / w + + bboxes[:,:4] /= [scale_factor_x, 1, scale_factor_x, 1] + + bboxes = bboxes / img.shape[1] + return img, bboxes + def horizontal_flip(self, img, bboxes): + img_center = np.array(img.shape[:2])[::-1]/2 + img_center = np.hstack((img_center, img_center)) + img = img[::-1,:] + bboxes[:,[0,2]] += 2*(img_center[[0,2]] - bboxes[:,[0,2]]) + + box_w = abs(bboxes[:,0] - bboxes[:,2]) + + bboxes[:,0] -= box_w + bboxes[:,2] += box_w + + return img, bboxes + + +def gaussian_noise(image, strength = 0.05): + pixel = image.shape[1] + bundle_size = 1 + x = np.linspace(0, pixel - 1, num=pixel) + y = np.linspace(0, pixel - 1, num=pixel) + X, Y = np.meshgrid(x, y) + grid = np.array([np.random.normal(0,1,X.shape) *strength, X, Y]) + grid = np.repeat( + grid[None, :, :, :], + bundle_size, + axis=0, + ) + k = np.random.normal(0,1,X.shape) * image.max()/10 + return image+grid[0][0] + +def psf_noise(image, psf_path = '/net/big-tank/POOL/projects/ska/0836-BAND8_POSTAIPS.UVFITS'): + f = fits.open(psf_path) + u = f[0].data['UU--'] + v = f[0].data['VV--'] + U = np.append(u,-u) + V = np.append(v,-v) + u[(u<-0.002)&(u>0.002)] = 0 + v[(v<-0.002)&(v>0.002)] = 0 + + gaussian2dkernel = Gaussian2DKernel(x_size = 2999, y_size = 2999, x_stddev = 75, y_stddev = 75) + + gaussian2dkernel = gaussian2dkernel.array/gaussian2dkernel.array.max() + uv_hist, _, _ = np.histogram2d(U,V ,bins=3000) + uv_hist[uv_hist >0] = 1 + psf = np.abs(np.fft.fftshift(np.fft.fft2(uv_hist))) + psfnorm = psf/psf.max() + psfcut = psfnorm[0:2999,0:2999] + psfcut = psfcut / psfcut.sum() + psfcut = psfcut *gaussian2dkernel + psfcut[psfcut < 1e-12] = 1e-12 + noisy_image = convolve_fft(image, psfcut, normalize_kernel = False, boundary = 'wrap') + + return noisy_image + + +def rough_gaussian_noise(image, strength = 0.05): + pixel = image.shape[1] + bundle_size = 1 + x = np.linspace(0, (pixel/2) - 1, num=pixel//12) + y = np.linspace(0, (pixel/2) - 1, num=pixel//12) + X, Y = np.meshgrid(x, y) + strength = np.random.uniform(0.02,0.05) + grid = np.array([np.random.normal(0,strength,X.shape) , X, Y]) + grid = np.repeat( + grid[None, :, :, :], + bundle_size, + axis=0, + ) + dimg = torch.FloatTensor(grid[0][0]) + dimg = dimg.unsqueeze(0) + dimg = dimg.unsqueeze(0) + uimg = F.interpolate(dimg, size = (pixel,pixel) , mode = 'bilinear') + uimg = uimg[0][0].detach().numpy() + k = np.random.normal(0,1,X.shape) * image.max()/10 + return image+uimg diff --git a/source_detection/loss.py b/source_detection/loss.py index 87350ef2..0e8beedc 100644 --- a/source_detection/loss.py +++ b/source_detection/loss.py @@ -5,7 +5,7 @@ # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.6.0 +# jupytext_version: 1.11.2 # kernelspec: # display_name: Python 3 # language: python diff --git a/source_detection/source_data.py b/source_detection/source_data.py index e2aaedce..6f9998ee 100644 --- a/source_detection/source_data.py +++ b/source_detection/source_data.py @@ -35,7 +35,7 @@ def detector_data(img_size, bundle_size, num_bundles,path): labels[0] = np.array([4]) else: for i in range(num_objects): - rand = 0#np.random.randint(0,1) + rand = np.random.randint(0,4) if rand == 0: #g,c,s,theta = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True, spherical = True) g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True) @@ -108,7 +108,7 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): all_labels = [] for r in range(bundle_size): grid = create_grid(img_size*10, 1) - num_objects = 20#np.random.randint(150,200) + num_objects = 200#np.random.randint(150,200) bboxes = np.zeros((num_objects,4)) labels = np.zeros((num_objects,1)) diffuse_limiter = 0 @@ -120,10 +120,12 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): labels[0] = np.array([4]) else: for i in range(num_objects): - rand = 0#np.random.randint(0,2) + rand = np.random.randint(0,4) + if rand == 1: + if diffuse_limiter >= 5: + rand = np.random.choice([0,2,3]) if rand == 0: g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True, 10, spherical = True) - print(c) xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size*10)/(img_size*10) ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size*10)/(img_size*10) xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size*10)/(img_size*10) @@ -134,34 +136,31 @@ def create_mosaic(img_size, bundle_size, num_bundles,path): labels[i] = label elif rand == 1: diffuse_limiter +=1 - if diffuse_limiter > 0: - continue - print(diffuse_limiter) g,c,s = create_gauss(grid[:, 0], 1, 1, False,img_size, True, True, 10) - xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size - ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size - xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size)/img_size - ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size)/img_size + xmin = (c[0]-(3*np.sqrt(s[0])/2)).clip(0,img_size*10)/(img_size*10) + ymin = (c[1]-(3*np.sqrt(s[1])/2)).clip(0,img_size*10)/(img_size*10) + xmax = (c[0]+(3*np.sqrt(s[0])/2)).clip(0,img_size*10)/(img_size*10) + ymax = (c[1]+(3*np.sqrt(s[1])/2)).clip(0,img_size*10)/(img_size*10) bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) label = np.array([1]) bboxes[i] = bbox labels[i] = label elif rand == 2: g,c,s = create_diamond(grid[:, 0], 1, 1, img_size,True, 10) - xmin = (c[0]-2*s[0])/img_size - ymin = (c[1]-2*s[1])/img_size - xmax = (c[0]+2*s[0])/img_size - ymax = (c[1]+2*s[1])/img_size + xmin = (c[0]-2*s[0]).clip(0,img_size*10)/(img_size*10) + ymin = (c[1]-2*s[1]).clip(0,img_size*10)/(img_size*10) + xmax = (c[0]+2*s[0]).clip(0,img_size*10)/(img_size*10) + ymax = (c[1]+2*s[1]).clip(0,img_size*10)/(img_size*10) bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) label = np.array([2]) #nodiff bboxes[i] = bbox labels[i] = label elif rand == 3: g,c = create_square(grid[:, 0],1, 1, img_size, True, 10) - xmin = (c[0]-(img_size/50+1))/img_size - ymin = (c[1]-(img_size/50+1))/img_size - xmax = (c[0]+(img_size/50+1))/img_size - ymax = (c[1]+(img_size/50+1))/img_size + xmin = (c[0]-(img_size/50+1)).clip(0,img_size*10)/(img_size*10) + ymin = (c[1]-(img_size/50+1)).clip(0,img_size*10)/(img_size*10) + xmax = (c[0]+(img_size/50+1)).clip(0,img_size*10)/(img_size*10) + ymax = (c[1]+(img_size/50+1)).clip(0,img_size*10)/(img_size*10) bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1)], dtype = float) label = np.array([3]) #nodiff bboxes[i] = bbox @@ -204,7 +203,7 @@ def noisy_data(img_size, mosaic_scale, bundle_size, num_bundles,path): #TEMP CHA all_bboxes = [] all_labels = [] grid = create_grid(img_size*mosaic_scale, 1) - num_objects = 180 + num_objects = 220 bboxes = np.zeros((num_objects,5)) diffuse_limiter = 0 bundle = 0 @@ -246,8 +245,8 @@ def noisy_data(img_size, mosaic_scale, bundle_size, num_bundles,path): #TEMP CHA bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1),1], dtype = float) bboxes[i] = bbox image = g[0] - image = gaussian_noise(image) - image = psf_noise(image) + #image = gaussian_noise(image) + #image = psf_noise(image) image = image/image.max() while bundle < bundle_size: x_crop = np.random.randint(0,((img_size*mosaic_scale))-img_size) @@ -272,7 +271,7 @@ def noisy_data(img_size, mosaic_scale, bundle_size, num_bundles,path): #TEMP CHA for g in range(box_crop.shape[0]): labels[g] = box_crop[g,4].astype(int) final_image = np.expand_dims(crop_image, axis=0) - #final_image = rough_gaussian_noise(final_image) + #final_image = rough_gaussian_noise(final_image) #add rough noise hf.create_dataset('x'+str(bundle), data=final_image) hf.create_dataset('y'+str(bundle), data=box_crop[:,:4]) hf.create_dataset('z'+str(bundle), data=labels) @@ -474,3 +473,81 @@ def old_feature_data(num_gauss, num_diff, num_diamond, num_square, img_size, num hf.create_dataset('x', data=arr[shuff]) hf.create_dataset('y', data=keys[shuff]) hf.close() + + +def noisy_data_multiclass(img_size, mosaic_scale, bundle_size, num_bundles,path): #TEMP CHANGES: NO NOISE! + for t in tqdm(range(num_bundles)): + with h5py.File(path+str(t)+'.h5', "w") as hf: + all_images = [] + all_bboxes = [] + all_labels = [] + grid = create_grid(img_size*mosaic_scale, 1) + num_objects = 220 + bboxes = np.zeros((num_objects,5)) + diffuse_limiter = 0 + bundle = 0 + if num_objects == 0: + bboxes = np.zeros((1,5)) + labels = np.zeros((1,1)) + g = grid[:,0] + bboxes[0] = np.array([0, 0, 1, 1], dtype = float) + labels[0] = np.array([4]) + else: + for i in range(num_objects): + #rand = np.random.randint(0,2) + rand = 0 + #if diffuse_limiter >= 3: + # rand = 0 + if rand == 0: + g,c,s,theta = create_gauss(grid[:, 0], 1, 1, False,img_size, False, True,mosaic_scale, spherical = False) + xmin = (c[0]-(4*np.sqrt(s[0])/2))/(img_size*mosaic_scale) + ymin = (c[1]-(4*np.sqrt(s[1])/2))/(img_size*mosaic_scale) + xmax = (c[0]+(4*np.sqrt(s[0])/2))/(img_size*mosaic_scale) + ymax = (c[1]+(4*np.sqrt(s[1])/2))/(img_size*mosaic_scale) + w = xmax-xmin + h = ymax-ymin + wn = np.sqrt((w*np.cos(theta))**2+(h*np.sin(theta))**2) + hn = np.sqrt((h*np.cos(theta))**2+(w*np.sin(theta))**2) + xmin = c[0]/(img_size*mosaic_scale)-wn/2 + xmax = c[0]/(img_size*mosaic_scale)+wn/2 + ymin = c[1]/(img_size*mosaic_scale)-hn/2 + ymax = c[1]/(img_size*mosaic_scale)+hn/2 + if np.abs(s[0]-s[1]) < s[0]/2: + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1),0], dtype = float) + bboxes[i] = bbox + else: + bbox = np.array([xmin.clip(0,1), ymin.clip(0,1), xmax.clip(0,1), ymax.clip(0,1),1], dtype = float) + bboxes[i] = bbox + image = g[0] + image = gaussian_noise(image) + image = psf_noise(image) + image = image/image.max() + while bundle < bundle_size: + x_crop = np.random.randint(0,((img_size*mosaic_scale))-img_size) + y_crop = np.random.randint(0,((img_size*mosaic_scale))-img_size) + + crop_image = image[y_crop:y_crop+img_size,x_crop:x_crop+img_size] + boxes = np.array(bboxes) + boxes[:,:4] = boxes[:,:4]*(img_size*mosaic_scale) + + box_crop = boxes[boxes[:,0] < x_crop+img_size-(boxes[:,2]-boxes[:,0])/2] + box_crop = box_crop[box_crop[:,2] > x_crop+(box_crop[:,2]-box_crop[:,0])/2] + box_crop = box_crop[box_crop[:,1] < y_crop+img_size-(box_crop[:,3]-box_crop[:,1])/2] + box_crop = box_crop[box_crop[:,3] > y_crop+(box_crop[:,3]-box_crop[:,1])/2] + + if len(box_crop) == 0: + continue + box_crop[:,0] = (box_crop[:,0]-x_crop)/img_size + box_crop[:,1] = (box_crop[:,1]-y_crop)/img_size + box_crop[:,2] = (box_crop[:,2]-x_crop)/img_size + box_crop[:,3] = (box_crop[:,3]-y_crop)/img_size + labels = np.zeros((box_crop.shape[0],1)) + for g in range(box_crop.shape[0]): + labels[g] = box_crop[g,4].astype(int) + final_image = np.expand_dims(crop_image, axis=0) + #final_image = rough_gaussian_noise(final_image) #add rough noise + hf.create_dataset('x'+str(bundle), data=final_image) + hf.create_dataset('y'+str(bundle), data=box_crop[:,:4]) + hf.create_dataset('z'+str(bundle), data=labels) + bundle += 1 + hf.close() diff --git a/source_detection/train.py b/source_detection/train.py index 9404e8f6..0f28706b 100644 --- a/source_detection/train.py +++ b/source_detection/train.py @@ -7,8 +7,9 @@ from tqdm import tqdm import matplotlib.pyplot as plt -path = get_bundles('//net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/train/') -iterations = 240000 +path = get_bundles('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/ssd') +valid_path = get_bundles('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/valid') +iterations = 120000 n_classes = 5 #nodiff checkpoint = None #checkpoint = '/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300_e120.pth.tar' @@ -155,6 +156,11 @@ def main(): shuffle = True, collate_fn = train_dataset.collate_fn) + valid_dataset = detect_dataset(valid_path) + valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size, + shuffle = True, + collate_fn = valid_dataset.collate_fn) + epochs = iterations//(len(train_dataset)//batch_size) decay_lr_at = [it // (len(train_dataset)//batch_size) for it in decay_lr_at] @@ -163,42 +169,57 @@ def main(): if epoch in decay_lr_at: adjust_learning_rate(optimizer, decay_lr_to) - train(train_loader, model, loss_function, optimizer, epoch) - + train(valid_loader, model, loss_function, optimizer, epoch, valid = True) + train(train_loader, model, loss_function, optimizer, epoch, valid = False) print("Epoch:", epoch) if epoch % 10 == 0: - save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300' + '_e' + str(epoch)+'.pth.tar') - save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/checkpoints/checkpoint_ssd300.pth.tar')# apparently not defined + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/ssdcheckpoints/checkpoint_ssd300' + '_e' + str(epoch)+'.pth.tar') + save_checkpoint(epoch, model, optimizer,'/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/ssdcheckpoints/checkpoint_ssd300.pth.tar')# apparently not defined -def train(data_loader, model, loss_function, optimizer, epochs): - - model.train() - losses = np.zeros(1877) - for i, (images, boxes, labels) in enumerate(data_loader): - images = images.to('cuda') - - predicted_locs, predicted_classes_scores= model(images) - loss = loss_function(predicted_locs, predicted_classes_scores, - boxes, labels) - - - - losses[i] = loss - #print('i', i, 'Loss:',loss) - optimizer.zero_grad() - loss.backward() - - if grad_clip is not None: - clip_gradient(optimizer, grad_clip) - - optimizer.step() - - print('Average Loss', np.average(losses)) - f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/loss.txt', "a") - f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') - f.close() +def train(data_loader, model, loss_function, optimizer, epochs, valid): + if valid == False: + model.train() + losses = np.zeros(939) + for i, (images, boxes, labels) in enumerate(data_loader): + images = images.to('cuda') + + predicted_locs, predicted_classes_scores= model(images) + loss = loss_function(predicted_locs, predicted_classes_scores, + boxes, labels) + losses[i] = loss + optimizer.zero_grad() + loss.backward() + + if grad_clip is not None: + clip_gradient(optimizer, grad_clip) + optimizer.step() + else: + model.eval() + valid_losses = np.zeros(236) + with torch.no_grad(): + for i, (images, boxes, labels) in enumerate(data_loader): + images = images.to('cuda') + + predicted_locs, predicted_classes_scores= model(images) + loss = loss_function(predicted_locs, predicted_classes_scores, + boxes, labels) + valid_losses[i] = loss +# print('Average Loss', np.average(losses)) + if valid == False: + #print('train',loss) + f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/ssdloss.txt', "a") + f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') + f.close() + print('Average train Loss',str(np.average(losses))) + else: + #print('valid',loss) + f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/september/valid_ssdloss.txt', "a") + f.write(str(epochs) + '\t' + str(np.average(valid_losses)) +'\n') + f.close() + print('Average valid Loss',str(np.average(valid_losses))) + del predicted_locs, predicted_classes_scores, images, boxes, labels @@ -227,3 +248,33 @@ def clip_gradient(optimizer, grad_clip): for param in group['params']: if param.grad is not None: param.grad.data.clamp_(-grad_clip, grad_clip) + + +def old_train(data_loader, model, loss_function, optimizer, epochs): + + model.train() + losses = np.zeros(939) + for i, (images, boxes, labels) in enumerate(data_loader): + images = images.to('cuda') + + predicted_locs, predicted_classes_scores= model(images) + loss = loss_function(predicted_locs, predicted_classes_scores, + boxes, labels) + + + + losses[i] = loss + print('i', i, 'Loss:',loss) + optimizer.zero_grad() + loss.backward() + + if grad_clip is not None: + clip_gradient(optimizer, grad_clip) + + optimizer.step() + + #print('Average Loss', np.average(losses)) + #f = open('/net/big-tank/POOL/users/pblomenkamp/radionets/objectdetection/loss.txt', "a") + #f.write(str(epochs) + '\t' + str(np.average(losses)) +'\n') + #f.close() + del predicted_locs, predicted_classes_scores, images, boxes, labels From 09e3c1022e36155e796f69f3db06336adb2d2805 Mon Sep 17 00:00:00 2001 From: Paul Simon Blomenkamp Date: Tue, 9 Nov 2021 14:35:53 +0100 Subject: [PATCH 9/9] Final PyBDSF files --- source_detection/PyBDSF/PyBDSF.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source_detection/PyBDSF/PyBDSF.py b/source_detection/PyBDSF/PyBDSF.py index bcd7b542..3791dcb3 100644 --- a/source_detection/PyBDSF/PyBDSF.py +++ b/source_detection/PyBDSF/PyBDSF.py @@ -24,9 +24,9 @@ def image_eval(image): #Evaluate an image with PyBDSF img = bdsf.process_image('./temp.fits', rms_map = True) img.show_fit() img.export_image(img_format = 'fits', img_type = 'gaus_model') + - - + def precisioneval(sdcset_path): #Evaluates an entire SKA Data Challenge Dataset. PyBDSF detections are made on every image and are evaluated. If the predicted source position is further away than 5% of the image resolution, the detection is considered a false positive. #Creates a temporary FITS file to since PyBDSF only accepts those. @@ -94,4 +94,4 @@ def precisioneval(sdcset_path): print('True Sources',sum_sources[0], 'True Positives',sum_sources[1], 'False Positives', false_pos, 'False Negatives', sum_sources[0]-sum_sources[1] ) return num_sources, sum_sources, false_pos - #True number of sources, Number of True Positives, Number of false positives \ No newline at end of file + #True number of sources, Number of True Positives, Number of false positives