From c6c3f5bd8e267a93310fecc08a388dcc8f3a65a1 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 18 Apr 2024 20:16:21 +0000 Subject: [PATCH 01/21] add: notebook demostrating artifacts --- life-science/monai_decathlon.ipynb | 392 +++++++++++++++++++++++++++++ 1 file changed, 392 insertions(+) create mode 100644 life-science/monai_decathlon.ipynb diff --git a/life-science/monai_decathlon.ipynb b/life-science/monai_decathlon.ipynb new file mode 100644 index 00000000..9d382e1b --- /dev/null +++ b/life-science/monai_decathlon.ipynb @@ -0,0 +1,392 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "33e2ac00-22be-4f7c-9445-5c3220d0f1bf", + "metadata": {}, + "source": [ + "# Fetching Brain Tumor Segemntation Dataset\n", + "\n", + "In this notebook, we will learn:\n", + "- how we can use [MONAI Core APIs](https://github.com/Project-MONAI/MONAI) to download the brain tumor segmentation data from the [Medical Segmentation Decathlon](http://medicaldecathlon.com) challenge.\n", + "- how we can upload the dataset to Weights & Biases and use it as a dataset artifact." + ] + }, + { + "cell_type": "markdown", + "id": "813a28eb-8d05-412c-b3d4-9e64eb2962dc", + "metadata": {}, + "source": [ + "## 🌴 Setup and Installation\n", + "\n", + "First, let us install the latest version of both MONAI and Weights and Biases." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d8a4eaa-6c15-44f0-81f8-b0c2800b1017", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -q -U \"monai[nibabel, tqdm]\"\n", + "!pip install -q -U wandb" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "34ccb87f-320b-4a11-b95c-eff70487c6e2", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/lib/python3/dist-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.25.2\n", + " warnings.warn(f\"A NumPy version >={np_minversion} and <{np_maxversion}\"\n" + ] + } + ], + "source": [ + "import os\n", + "import wandb\n", + "from monai.apps import DecathlonDataset" + ] + }, + { + "cell_type": "markdown", + "id": "752e1f77-a825-4eb7-afb7-5c2807b29ada", + "metadata": {}, + "source": [ + "## 🌳 Initialize a W&B Run\n", + "\n", + "We will start a new W&B run to start tracking our experiment." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a2315b79-8c0a-4cfd-aa6d-4fca55d78137", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mgeekyrakshit\u001b[0m (\u001b[33mlifesciences\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.16.6" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/ubuntu/wandb/run-20240418_193230-3kmw0fh1" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run silvery-universe-1 to Weights & Biases (docs)
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/lifesciences/brain-tumor-segmentation" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/3kmw0fh1" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "wandb.init(\n", + " project=\"brain-tumor-segmentation\",\n", + " entity=\"lifesciences\",\n", + " job_type=\"fetch_dataset\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "308bd1ff-0999-4b85-b9a7-2a9d5753e69e", + "metadata": {}, + "source": [ + "## 🍁 Fetching the Dataset using MONAI\n", + "\n", + "The [`monai.apps.DecathlonDataset`](https://docs.monai.io/en/stable/apps.html#monai.apps.DecathlonDataset) lets us automatically download the data of [Medical Segmentation Decathlon challenge](http://medicaldecathlon.com/) and generate items for training, validation, or testing. We will use this API in the later notebooks to load and transform our datasets automatically." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "42189439-2c3d-403b-915a-98f897d049e4", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Task01_BrainTumour.tar: 7.09GB [04:44, 26.7MB/s] " + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-04-18 19:37:16,210 - INFO - Downloaded: dataset/Task01_BrainTumour.tar\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-04-18 19:37:28,014 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", + "2024-04-18 19:37:28,016 - INFO - Writing into directory: dataset.\n", + "2024-04-18 19:37:52,073 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", + "2024-04-18 19:37:52,074 - INFO - File exists: dataset/Task01_BrainTumour.tar, skipped downloading.\n", + "2024-04-18 19:37:52,075 - INFO - Non-empty folder exists in dataset/Task01_BrainTumour, skipped extracting.\n", + "2024-04-18 19:38:03,877 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", + "2024-04-18 19:38:03,878 - INFO - File exists: dataset/Task01_BrainTumour.tar, skipped downloading.\n", + "2024-04-18 19:38:03,879 - INFO - Non-empty folder exists in dataset/Task01_BrainTumour, skipped extracting.\n" + ] + } + ], + "source": [ + "# Make the dataset directory\n", + "os.makedirs(\"./dataset/\", exist_ok=True)\n", + "\n", + "# Fetch the training split of the brain tumor segmentation dataset\n", + "train_dataset = DecathlonDataset(\n", + " root_dir=\"./dataset/\",\n", + " task=\"Task01_BrainTumour\",\n", + " section=\"training\",\n", + " download=True,\n", + " cache_rate=0.0,\n", + " num_workers=4,\n", + ")\n", + "\n", + "# Fetch the validation split of the brain tumor segmentation dataset\n", + "val_dataset = DecathlonDataset(\n", + " root_dir=\"./dataset/\",\n", + " task=\"Task01_BrainTumour\",\n", + " section=\"validation\",\n", + " download=True,\n", + " cache_rate=0.0,\n", + " num_workers=4,\n", + ")\n", + "\n", + "# Fetch the test split of the brain tumor segmentation dataset\n", + "test_dataset = DecathlonDataset(\n", + " root_dir=\"./dataset/\",\n", + " task=\"Task01_BrainTumour\",\n", + " section=\"test\",\n", + " download=True,\n", + " cache_rate=0.0,\n", + " num_workers=4,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "07461dbc-3056-4f06-bb1a-462246a35791", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train Set Size: 388\n", + "Validation Set Size: 96\n", + "Test Set Size: 266\n" + ] + } + ], + "source": [ + "print(\"Train Set Size:\", len(train_dataset))\n", + "print(\"Validation Set Size:\", len(val_dataset))\n", + "print(\"Test Set Size:\", len(test_dataset))" + ] + }, + { + "cell_type": "markdown", + "id": "93e0609f-3009-4bd0-baf9-e8e10084801c", + "metadata": {}, + "source": [ + "## 💿 Upload the Dataset to W&B as an Artifact\n", + "\n", + "[W&B Artifacts](https://docs.wandb.ai/guides/artifacts) can be used to track and version any serialized data as the inputs and outputs of your W&B Runs. For example, a model training run might take in a dataset as input and a trained model as output.\n", + "\n", + "Let us now see how we can upload this dataset as a W&B artifact." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "9f1f35e5-927e-4baf-a351-652e7e99fe76", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./dataset)... Done. 24.3s\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "artifact = wandb.Artifact(name=\"decathlon_brain_tumor\", type=\"dataset\")\n", + "artifact.add_dir(local_path=\"./dataset/\")\n", + "wandb.log_artifact(artifact)" + ] + }, + { + "cell_type": "markdown", + "id": "e1cbbe47-f83f-4db3-9c81-879121041881", + "metadata": {}, + "source": [ + "Now we end the experiment by calling `wandb.finish()`." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "25ea852b-04d7-4e94-97c3-45d972b21886", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f4fc03bb2a9d48e38445d3997a021f37", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Label(value='14510.722 MB of 14510.722 MB uploaded (0.010 MB deduped)\\r'), FloatProgress(value=…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run silvery-universe-1 at: https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/3kmw0fh1
View project at: https://wandb.ai/lifesciences/brain-tumor-segmentation
Synced 5 W&B file(s), 0 media file(s), 1282 artifact file(s) and 1 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20240418_193230-3kmw0fh1/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "wandb.finish()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From bf7ec54a03c2b245056db454e413ddff57ac40e0 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Fri, 19 Apr 2024 09:09:47 +0000 Subject: [PATCH 02/21] add: visualization notebook --- life-science/monai_decathlon.ipynb | 66 +- life-science/monai_decathlon_visualize.ipynb | 7442 ++++++++++++++++++ 2 files changed, 7477 insertions(+), 31 deletions(-) create mode 100644 life-science/monai_decathlon_visualize.ipynb diff --git a/life-science/monai_decathlon.ipynb b/life-science/monai_decathlon.ipynb index 9d382e1b..f488645b 100644 --- a/life-science/monai_decathlon.ipynb +++ b/life-science/monai_decathlon.ipynb @@ -24,10 +24,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "6d8a4eaa-6c15-44f0-81f8-b0c2800b1017", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mDEPRECATION: flatbuffers 1.12.1-git20200711.33e2d80-dfsg1-0.6 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of flatbuffers or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n", + "\u001b[0m\u001b[33mDEPRECATION: flatbuffers 1.12.1-git20200711.33e2d80-dfsg1-0.6 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of flatbuffers or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n", + "\u001b[0m" + ] + } + ], "source": [ "!pip install -q -U \"monai[nibabel, tqdm]\"\n", "!pip install -q -U wandb" @@ -35,7 +45,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "34ccb87f-320b-4a11-b95c-eff70487c6e2", "metadata": {}, "outputs": [ @@ -66,7 +76,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "a2315b79-8c0a-4cfd-aa6d-4fca55d78137", "metadata": {}, "outputs": [ @@ -92,7 +102,7 @@ { "data": { "text/html": [ - "Run data is saved locally in /home/ubuntu/wandb/run-20240418_193230-3kmw0fh1" + "Run data is saved locally in /home/ubuntu/edu/life-science/wandb/run-20240418_221202-62jrsztu" ], "text/plain": [ "" @@ -104,7 +114,7 @@ { "data": { "text/html": [ - "Syncing run silvery-universe-1 to Weights & Biases (docs)
" + "Syncing run usual-brook-3 to Weights & Biases (docs)
" ], "text/plain": [ "" @@ -128,7 +138,7 @@ { "data": { "text/html": [ - " View run at https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/3kmw0fh1" + " View run at https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/62jrsztu" ], "text/plain": [ "" @@ -140,13 +150,13 @@ { "data": { "text/html": [ - "" + "" ], "text/plain": [ - "" + "" ] }, - "execution_count": 2, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -171,7 +181,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "42189439-2c3d-403b-915a-98f897d049e4", "metadata": {}, "outputs": [ @@ -179,14 +189,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "Task01_BrainTumour.tar: 7.09GB [04:44, 26.7MB/s] " + "Task01_BrainTumour.tar: 7.09GB [05:55, 21.4MB/s] " ] }, { "name": "stdout", "output_type": "stream", "text": [ - "2024-04-18 19:37:16,210 - INFO - Downloaded: dataset/Task01_BrainTumour.tar\n" + "2024-04-18 22:17:59,197 - INFO - Downloaded: dataset/Task01_BrainTumour.tar\n" ] }, { @@ -200,14 +210,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "2024-04-18 19:37:28,014 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", - "2024-04-18 19:37:28,016 - INFO - Writing into directory: dataset.\n", - "2024-04-18 19:37:52,073 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", - "2024-04-18 19:37:52,074 - INFO - File exists: dataset/Task01_BrainTumour.tar, skipped downloading.\n", - "2024-04-18 19:37:52,075 - INFO - Non-empty folder exists in dataset/Task01_BrainTumour, skipped extracting.\n", - "2024-04-18 19:38:03,877 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", - "2024-04-18 19:38:03,878 - INFO - File exists: dataset/Task01_BrainTumour.tar, skipped downloading.\n", - "2024-04-18 19:38:03,879 - INFO - Non-empty folder exists in dataset/Task01_BrainTumour, skipped extracting.\n" + "2024-04-18 22:18:10,984 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", + "2024-04-18 22:18:10,985 - INFO - Writing into directory: dataset.\n" ] } ], @@ -230,7 +234,7 @@ " root_dir=\"./dataset/\",\n", " task=\"Task01_BrainTumour\",\n", " section=\"validation\",\n", - " download=True,\n", + " download=False,\n", " cache_rate=0.0,\n", " num_workers=4,\n", ")\n", @@ -240,7 +244,7 @@ " root_dir=\"./dataset/\",\n", " task=\"Task01_BrainTumour\",\n", " section=\"test\",\n", - " download=True,\n", + " download=False,\n", " cache_rate=0.0,\n", " num_workers=4,\n", ")" @@ -248,7 +252,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "07461dbc-3056-4f06-bb1a-462246a35791", "metadata": {}, "outputs": [ @@ -282,7 +286,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "9f1f35e5-927e-4baf-a351-652e7e99fe76", "metadata": {}, "outputs": [ @@ -299,7 +303,7 @@ "" ] }, - "execution_count": 5, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -320,19 +324,19 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "id": "25ea852b-04d7-4e94-97c3-45d972b21886", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "f4fc03bb2a9d48e38445d3997a021f37", + "model_id": "388076d4600649e58671f5681e757f4d", "version_major": 2, "version_minor": 0 }, "text/plain": [ - "VBox(children=(Label(value='14510.722 MB of 14510.722 MB uploaded (0.010 MB deduped)\\r'), FloatProgress(value=…" + "VBox(children=(Label(value='14510.691 MB of 14510.691 MB uploaded\\r'), FloatProgress(value=1.0, max=1.0)))" ] }, "metadata": {}, @@ -341,7 +345,7 @@ { "data": { "text/html": [ - " View run silvery-universe-1 at: https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/3kmw0fh1
View project at: https://wandb.ai/lifesciences/brain-tumor-segmentation
Synced 5 W&B file(s), 0 media file(s), 1282 artifact file(s) and 1 other file(s)" + " View run usual-brook-3 at: https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/62jrsztu
View project at: https://wandb.ai/lifesciences/brain-tumor-segmentation
Synced 6 W&B file(s), 0 media file(s), 1278 artifact file(s) and 1 other file(s)" ], "text/plain": [ "" @@ -353,7 +357,7 @@ { "data": { "text/html": [ - "Find logs at: ./wandb/run-20240418_193230-3kmw0fh1/logs" + "Find logs at: ./wandb/run-20240418_221202-62jrsztu/logs" ], "text/plain": [ "" diff --git a/life-science/monai_decathlon_visualize.ipynb b/life-science/monai_decathlon_visualize.ipynb new file mode 100644 index 00000000..b6b1f4bf --- /dev/null +++ b/life-science/monai_decathlon_visualize.ipynb @@ -0,0 +1,7442 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "13953901-66c4-437c-b3a5-fadf8136d54c", + "metadata": {}, + "source": [ + "# Visualize Bain Tumor Segmentation Data\n", + "\n", + "In this notebook we will learn:\n", + "- MONAI transform API:\n", + " - MONAI Transforms for dictionary format data.\n", + " - Creating custom transforms using [`monai.transforms`](https://docs.monai.io/en/stable/transforms.html) API.\n", + "- how we can visualize the brain tumor segmentation dataset using W&B image overlays.\n", + "- how we can analyze our data using W&B Tables." + ] + }, + { + "cell_type": "markdown", + "id": "f4023f9a-1e58-468d-8ea3-56a694fa89ec", + "metadata": {}, + "source": [ + "## 🌴 Setup and Installation\n", + "\n", + "First, let us install the latest version of both MONAI and Weights and Biases." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4def9c4c-89b9-4f02-9853-91624690dc4f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mDEPRECATION: flatbuffers 1.12.1-git20200711.33e2d80-dfsg1-0.6 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of flatbuffers or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n", + "\u001b[0m\u001b[33mDEPRECATION: flatbuffers 1.12.1-git20200711.33e2d80-dfsg1-0.6 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of flatbuffers or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n", + "\u001b[0m" + ] + } + ], + "source": [ + "!pip install -q -U \"monai[nibabel, tqdm]\"\n", + "!pip install -q -U wandb" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "47df07a7-7fd4-4b9e-9bc8-bbcfba400a22", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/lib/python3/dist-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.25.2\n", + " warnings.warn(f\"A NumPy version >={np_minversion} and <{np_maxversion}\"\n" + ] + } + ], + "source": [ + "import os\n", + "\n", + "import numpy as np\n", + "import torch\n", + "import wandb\n", + "from tqdm.auto import tqdm\n", + "\n", + "from monai.apps import DecathlonDataset\n", + "from monai.transforms import (\n", + " Compose,\n", + " LoadImaged,\n", + " MapTransform,\n", + " NormalizeIntensityd,\n", + " Orientationd,\n", + " RandFlipd,\n", + " RandScaleIntensityd,\n", + " RandShiftIntensityd,\n", + " RandSpatialCropd,\n", + " Spacingd,\n", + " EnsureTyped,\n", + " EnsureChannelFirstd,\n", + ")\n", + "\n", + "from collections import Counter" + ] + }, + { + "cell_type": "markdown", + "id": "48509346-08a2-41e3-bc98-1aea79fe42d3", + "metadata": {}, + "source": [ + "## 🌳 Initialize a W&B Run\n", + "\n", + "We will start a new W&B run to start tracking our experiment." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "69d72b50-1a24-4a32-97c8-6f859cc203df", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mgeekyrakshit\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mgeekyrakshit\u001b[0m (\u001b[33mlifesciences\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.16.6" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/ubuntu/edu/life-science/wandb/run-20240419_054517-42vd37y7" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run solar-glitter-8 to Weights & Biases (docs)
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/lifesciences/brain-tumor-segmentation" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/42vd37y7" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "wandb.login()\n", + "wandb.init(\n", + " project=\"brain-tumor-segmentation\",\n", + " entity=\"lifesciences\",\n", + " job_type=\"visualize_dataset\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "245625a8-be24-40f8-8cef-615917611a49", + "metadata": {}, + "source": [ + "## 💿 Loading and Transforming the Data\n", + "\n", + "We will now learn using the [`monai.transforms`](https://docs.monai.io/en/stable/transforms.html) API to create and apply transforms to our data.\n", + "\n", + "### Creating a Custom Transform\n", + "\n", + "First, we demonstrate the creation of a custom transform `ConvertToMultiChannelBasedOnBratsClassesd` using [`monai.transforms.MapTransform`](https://docs.monai.io/en/stable/transforms.html#maptransform) that converts labels to multi-channel tensors based on brats18 classes:\n", + "- label 1 is the necrotic and non-enhancing tumor core\n", + "- label 2 is the peritumoral edema\n", + "- label 4 is the GD-enhancing tumor.\n", + "\n", + "The target classes for the semantic segmentation task after applying this transform on the dataset will be\n", + "- Tumor core\n", + "- Whole tumor\n", + "- Enhancing tumor" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "1ce8d513-cd46-43c1-839e-0ae15f750a5e", + "metadata": {}, + "outputs": [], + "source": [ + "class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):\n", + " \"\"\"\n", + " Convert labels to multi-channels based on brats classes:\n", + " label 1 is the peritumoral edema\n", + " label 2 is the GD-enhancing tumor\n", + " label 3 is the necrotic and non-enhancing tumor core\n", + " The possible classes are TC (Tumor core), WT (Whole tumor)\n", + " and ET (Enhancing tumor).\n", + "\n", + " Reference: https://github.com/Project-MONAI/tutorials/blob/main/3d_segmentation/brats_segmentation_3d.ipynb\n", + "\n", + " \"\"\"\n", + "\n", + " def __call__(self, data):\n", + " data_dict = dict(data)\n", + " for key in self.keys:\n", + " result = []\n", + " # merge label 2 and label 3 to construct Tumor Core\n", + " result.append(torch.logical_or(data_dict[key] == 2, data_dict[key] == 3))\n", + " # merge labels 1, 2 and 3 to construct Whole Tumor\n", + " result.append(\n", + " torch.logical_or(\n", + " torch.logical_or(data_dict[key] == 2, data_dict[key] == 3), data_dict[key] == 1\n", + " )\n", + " )\n", + " # label 2 is Enhancing Tumor\n", + " result.append(data_dict[key] == 2)\n", + " data_dict[key] = torch.stack(result, axis=0).float()\n", + " return data_dict" + ] + }, + { + "cell_type": "markdown", + "id": "d72a65c1-701c-4700-88a9-df30b51eb10e", + "metadata": {}, + "source": [ + "Next, we compose all the necessary transforms for visualizing the data using [`monai.transforms.Compose`](https://docs.monai.io/en/stable/transforms.html#monai.transforms.Compose).\n", + "\n", + "**Note:** During training, we will apply a differnt set of transforms to the data." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "a542d505-3713-43f1-adf1-c33ade5696b1", + "metadata": {}, + "outputs": [], + "source": [ + "transforms = Compose(\n", + " [\n", + " # Load 4 Nifti images and stack them together\n", + " LoadImaged(keys=[\"image\", \"label\"]),\n", + " # Ensure loaded images are in channels-first format\n", + " EnsureChannelFirstd(keys=\"image\"),\n", + " # Ensure the input data to be a PyTorch Tensor or numpy array\n", + " EnsureTyped(keys=[\"image\", \"label\"]),\n", + " # Convert labels to multi-channels based on brats18 classes\n", + " ConvertToMultiChannelBasedOnBratsClassesd(keys=\"label\"),\n", + " # Change the input image’s orientation into the specified based on axis codes\n", + " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", + " # Resample the input images to the specified pixel dimension\n", + " Spacingd(\n", + " keys=[\"image\", \"label\"],\n", + " pixdim=(1.0, 1.0, 1.0),\n", + " mode=(\"bilinear\", \"nearest\"),\n", + " ),\n", + " # Normalize input image intensity\n", + " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3925b068-31fd-4a66-873d-850e6cd7fd87", + "metadata": {}, + "source": [ + "For loading the dataset, we first fetch it from the W&B dataset artifact that we had created earlier. This enables us to use the dataset as an input artifact to our visualization run, and establish the necessary lineage for our experiment." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5b6bf7fc-dc39-4247-9001-cd8832045b84", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Downloading large artifact decathlon_brain_tumor:v0, 14510.68MB. 1275 files... \n", + "\u001b[34m\u001b[1mwandb\u001b[0m: 1275 of 1275 files downloaded. \n", + "Done. 0:0:20.3\n" + ] + } + ], + "source": [ + "artifact = wandb.use_artifact(\n", + " \"lifesciences/brain-tumor-segmentation/decathlon_brain_tumor:v0\", type=\"dataset\"\n", + ")\n", + "artifact_dir = artifact.download()" + ] + }, + { + "cell_type": "markdown", + "id": "54920325-570f-4c19-a299-66d4ff21ca15", + "metadata": {}, + "source": [ + "We now use the [`monai.apps.DecathlonDataset`](https://docs.monai.io/en/stable/apps.html#monai.apps.DecathlonDataset) to load our dataset and apply the transforms we defined on the data samples so that we can visualize it." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "cc829ac0-ee0a-4924-889d-72fba089ec7b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-04-19 05:45:51,755 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", + "2024-04-19 05:45:51,756 - INFO - File exists: /home/ubuntu/edu/life-science/artifacts/decathlon_brain_tumor:v0/Task01_BrainTumour.tar, skipped downloading.\n", + "2024-04-19 05:45:51,757 - INFO - Non-empty folder exists in /home/ubuntu/edu/life-science/artifacts/decathlon_brain_tumor:v0/Task01_BrainTumour, skipped extracting.\n" + ] + } + ], + "source": [ + "train_dataset = DecathlonDataset(\n", + " root_dir=artifact_dir,\n", + " task=\"Task01_BrainTumour\",\n", + " transform=transforms,\n", + " section=\"training\",\n", + " download=True,\n", + " cache_rate=0.0,\n", + " num_workers=4,\n", + ")\n", + "val_dataset = DecathlonDataset(\n", + " root_dir=artifact_dir,\n", + " task=\"Task01_BrainTumour\",\n", + " transform=transforms,\n", + " section=\"validation\",\n", + " download=False,\n", + " cache_rate=0.0,\n", + " num_workers=4,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "fd917fe9-2fcd-482c-9e00-243d81e26dc7", + "metadata": {}, + "source": [ + "## 📸 Visualizing the Dataset\n", + "\n", + "Weights & Biases supports images, video, audio, and more. You can log rich media to explore your results and visually compare our runs, models, and datasets. Now, you will learn using the [segmentation mask overlay](https://docs.wandb.ai/guides/track/log/media#image-overlays-in-tables) system to visualize our data volumes. To log segmentation masks in [W&B tables](https://docs.wandb.ai/guides/tables), you must provide a [`wandb.Image`](https://docs.wandb.ai/ref/python/data-types/image) object containing the segmentation annotations for each row in the table.\n", + "\n", + "An example is provided in the pseudocode below:\n", + "\n", + "```python\n", + "table = wandb.Table(columns=[\"ID\", \"Image\"])\n", + "\n", + "for id, img, label in zip(ids, images, labels):\n", + " mask_img = wandb.Image(\n", + " img,\n", + " masks={\n", + " \"ground-truth\": {\"mask_data\": label, \"class_labels\": class_labels}\n", + " # ...\n", + " },\n", + " )\n", + "\n", + " table.add_data(id, img)\n", + "\n", + "wandb.log({\"Table\": table})\n", + "```\n", + "\n", + "However, in our case, since the volume of the target classes might overlap one another, we will log them as separate overlays on the same image, so that we do not miss the relevant information.\n", + "\n", + "An example is provided in the pseudocode below:\n", + "\n", + "```python\n", + "mask_img = wandb.Image(\n", + " img,\n", + " masks={\n", + " \"ground-truth/Tumor-Core\": {\n", + " \"mask_data\": label_tumor_core,\n", + " \"class_labels\": {0: \"background\", 1: \"Tumor Core\"}\n", + " },\n", + " \"ground-truth/Whole-Tumor\": {\n", + " \"mask_data\": label_tumor_core,\n", + " \"class_labels\": {0: \"background\", 1: \"Whole-Tumor\"}\n", + " },\n", + " \"ground-truth/Enhancing-Tumor\": {\n", + " \"mask_data\": label_tumor_core,\n", + " \"class_labels\": {0: \"background\", 1: \"Enhancing-Tumor\"}\n", + " },\n", + " },\n", + ")\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "e9acc72e-7091-40d7-a97b-023df48d9e12", + "metadata": {}, + "outputs": [], + "source": [ + "def get_target_area_percentage(segmentation_map):\n", + " segmentation_map_list = segmentation_map.flatten().tolist()\n", + " return segmentation_map_list.count(1.0) * 100 / len(segmentation_map_list)\n", + "\n", + "\n", + "def log_data_samples_into_tables(\n", + " sample_image: np.array,\n", + " sample_label: np.array,\n", + " split: str = None,\n", + " data_idx: int = None,\n", + " table: wandb.Table = None,\n", + "):\n", + " \"\"\"Utility function for logging a data sample into a W&B Table\"\"\"\n", + " num_channels, _, _, num_slices = sample_image.shape\n", + " with tqdm(total=num_slices, leave=False) as progress_bar:\n", + " for slice_idx in range(num_slices):\n", + " ground_truth_wandb_images, tumor_area_percentages = [], []\n", + " for channel_idx in range(num_channels):\n", + " masks = {\n", + " \"ground-truth/Tumor-Core\": {\n", + " \"mask_data\": sample_label[0, :, :, slice_idx],\n", + " \"class_labels\": {0: \"background\", 1: \"Tumor Core\"},\n", + " },\n", + " \"ground-truth/Whole-Tumor\": {\n", + " \"mask_data\": sample_label[1, :, :, slice_idx] * 2,\n", + " \"class_labels\": {0: \"background\", 2: \"Whole Tumor\"},\n", + " },\n", + " \"ground-truth/Enhancing-Tumor\": {\n", + " \"mask_data\": sample_label[2, :, :, slice_idx] * 3,\n", + " \"class_labels\": {0: \"background\", 3: \"Enhancing Tumor\"},\n", + " },\n", + " }\n", + "\n", + " ground_truth_wandb_images.append(\n", + " wandb.Image(\n", + " sample_image[channel_idx, :, :, slice_idx],\n", + " masks=masks,\n", + " )\n", + " )\n", + " tumor_area_percentages.append(\n", + " {\n", + " \"Tumor-Core-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[0, :, :, slice_idx]\n", + " ),\n", + " \"Whole-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[1, :, :, slice_idx]\n", + " ),\n", + " \"Enhancing-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[2, :, :, slice_idx]\n", + " ),\n", + " }\n", + " )\n", + " table.add_data(\n", + " split,\n", + " data_idx,\n", + " slice_idx,\n", + " *tumor_area_percentages,\n", + " *ground_truth_wandb_images\n", + " )\n", + " progress_bar.update(1)\n", + " return table" + ] + }, + { + "cell_type": "markdown", + "id": "7919a2fc-7e44-4283-a862-93ff8cdcfa5f", + "metadata": {}, + "source": [ + "Next, we iterate over our respective datasets and populate the table on our W&B dashboard." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "386b6d4b-ebf6-4d9d-bc78-7ce489e0a926", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "06206f8fb8324919a68ab922a42085f5", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Generating Train Dataset Visualizations:: 0%| | 0/388 [00:00solar-glitter-8 at: https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/42vd37y7
View project at: https://wandb.ai/lifesciences/brain-tumor-segmentation
Synced 6 W&B file(s), 1 media file(s), 342496 artifact file(s) and 1 other file(s)" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Find logs at: ./wandb/run-20240419_054517-42vd37y7/logs" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "wandb.finish()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From c862caee3fbc8a76384dadf1069360e0b1c5f2f2 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 25 Apr 2024 23:03:13 +0000 Subject: [PATCH 03/21] add: baseline training notebook --- ...cathlon.ipynb => 00_monai_decathlon.ipynb} | 33 +- .../01_monai_decathlon_visualize.ipynb | 648 ++ .../02_monai_train_baseline_model.ipynb | 510 ++ life-science/monai_decathlon_visualize.ipynb | 7442 ----------------- 4 files changed, 1168 insertions(+), 7465 deletions(-) rename life-science/{monai_decathlon.ipynb => 00_monai_decathlon.ipynb} (94%) create mode 100644 life-science/01_monai_decathlon_visualize.ipynb create mode 100644 life-science/02_monai_train_baseline_model.ipynb delete mode 100644 life-science/monai_decathlon_visualize.ipynb diff --git a/life-science/monai_decathlon.ipynb b/life-science/00_monai_decathlon.ipynb similarity index 94% rename from life-science/monai_decathlon.ipynb rename to life-science/00_monai_decathlon.ipynb index f488645b..748edbb3 100644 --- a/life-science/monai_decathlon.ipynb +++ b/life-science/00_monai_decathlon.ipynb @@ -39,29 +39,7 @@ } ], "source": [ - "!pip install -q -U \"monai[nibabel, tqdm]\"\n", - "!pip install -q -U wandb" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "34ccb87f-320b-4a11-b95c-eff70487c6e2", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/usr/lib/python3/dist-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.25.2\n", - " warnings.warn(f\"A NumPy version >={np_minversion} and <{np_maxversion}\"\n" - ] - } - ], - "source": [ - "import os\n", - "import wandb\n", - "from monai.apps import DecathlonDataset" + "!pip install -q -U monai wandb" ] }, { @@ -162,6 +140,8 @@ } ], "source": [ + "import wandb\n", + "\n", "wandb.init(\n", " project=\"brain-tumor-segmentation\",\n", " entity=\"lifesciences\",\n", @@ -216,9 +196,14 @@ } ], "source": [ + "import os\n", + "\n", "# Make the dataset directory\n", "os.makedirs(\"./dataset/\", exist_ok=True)\n", "\n", + "\n", + "from monai.apps import DecathlonDataset\n", + "\n", "# Fetch the training split of the brain tumor segmentation dataset\n", "train_dataset = DecathlonDataset(\n", " root_dir=\"./dataset/\",\n", @@ -281,6 +266,8 @@ "\n", "[W&B Artifacts](https://docs.wandb.ai/guides/artifacts) can be used to track and version any serialized data as the inputs and outputs of your W&B Runs. For example, a model training run might take in a dataset as input and a trained model as output.\n", "\n", + "![](https://docs.wandb.ai/assets/images/artifacts_landing_page2-b6bd49ea5db62eff00f582a95845fed9.png)\n", + "\n", "Let us now see how we can upload this dataset as a W&B artifact." ] }, diff --git a/life-science/01_monai_decathlon_visualize.ipynb b/life-science/01_monai_decathlon_visualize.ipynb new file mode 100644 index 00000000..8047728e --- /dev/null +++ b/life-science/01_monai_decathlon_visualize.ipynb @@ -0,0 +1,648 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "13953901-66c4-437c-b3a5-fadf8136d54c", + "metadata": {}, + "source": [ + "# Visualize Bain Tumor Segmentation Data\n", + "\n", + "In this notebook we will learn:\n", + "- MONAI transform API:\n", + " - MONAI Transforms for dictionary format data.\n", + " - Creating custom transforms using [`monai.transforms`](https://docs.monai.io/en/stable/transforms.html) API.\n", + "- how we can visualize the brain tumor segmentation dataset using W&B image overlays.\n", + "- how we can analyze our data using W&B Tables." + ] + }, + { + "cell_type": "markdown", + "id": "f4023f9a-1e58-468d-8ea3-56a694fa89ec", + "metadata": {}, + "source": [ + "## 🌴 Setup and Installation\n", + "\n", + "First, let us install the latest version of both MONAI and Weights and Biases." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4def9c4c-89b9-4f02-9853-91624690dc4f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mDEPRECATION: flatbuffers 1.12.1-git20200711.33e2d80-dfsg1-0.6 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of flatbuffers or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n", + "\u001b[0m" + ] + } + ], + "source": [ + "!pip install -q -U monai wandb" + ] + }, + { + "cell_type": "markdown", + "id": "48509346-08a2-41e3-bc98-1aea79fe42d3", + "metadata": {}, + "source": [ + "## 🌳 Initialize a W&B Run\n", + "\n", + "We will start a new W&B run to start tracking our experiment." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "69d72b50-1a24-4a32-97c8-6f859cc203df", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mgeekyrakshit\u001b[0m (\u001b[33mlifesciences\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.16.6" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /home/ubuntu/edu/life-science/wandb/run-20240424_221832-rtpvhd7n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run lilac-dew-12 to Weights & Biases (docs)
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/lifesciences/brain-tumor-segmentation" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/rtpvhd7n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import wandb\n", + "\n", + "wandb.init(\n", + " project=\"brain-tumor-segmentation\",\n", + " entity=\"lifesciences\",\n", + " job_type=\"visualize_dataset\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "245625a8-be24-40f8-8cef-615917611a49", + "metadata": {}, + "source": [ + "## 💿 Loading and Transforming the Data\n", + "\n", + "We will now learn using the [`monai.transforms`](https://docs.monai.io/en/stable/transforms.html) API to create and apply transforms to our data.\n", + "\n", + "### Creating a Custom Transform\n", + "\n", + "First, we demonstrate the creation of a custom transform `ConvertToMultiChannelBasedOnBratsClassesd` using [`monai.transforms.MapTransform`](https://docs.monai.io/en/stable/transforms.html#maptransform) that converts labels to multi-channel tensors based on brats18 classes:\n", + "- label 1 is the necrotic and non-enhancing tumor core\n", + "- label 2 is the peritumoral edema\n", + "- label 3 is the GD-enhancing tumor.\n", + "\n", + "The target classes for the semantic segmentation task after applying this transform on the dataset will be\n", + "- Tumor core\n", + "- Whole tumor\n", + "- Enhancing tumor" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1ce8d513-cd46-43c1-839e-0ae15f750a5e", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from monai.transforms import MapTransform\n", + "\n", + "\n", + "class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):\n", + " \"\"\"\n", + " Convert labels to multi-channels based on brats classes:\n", + " label 1 is the peritumoral edema\n", + " label 2 is the GD-enhancing tumor\n", + " label 3 is the necrotic and non-enhancing tumor core\n", + " The possible classes are TC (Tumor core), WT (Whole tumor), and ET (Enhancing tumor).\n", + "\n", + " Reference: https://github.com/Project-MONAI/tutorials/blob/main/3d_segmentation/brats_segmentation_3d.ipynb\n", + "\n", + " \"\"\"\n", + "\n", + " def __call__(self, data):\n", + " data_dict = dict(data)\n", + " for key in self.keys:\n", + " result = []\n", + " # merge label 2 and label 3 to construct Tumor Core\n", + " result.append(torch.logical_or(data_dict[key] == 2, data_dict[key] == 3))\n", + " # merge labels 1, 2 and 3 to construct Whole Tumor\n", + " result.append(\n", + " torch.logical_or(\n", + " torch.logical_or(data_dict[key] == 2, data_dict[key] == 3), data_dict[key] == 1\n", + " )\n", + " )\n", + " # label 2 is Enhancing Tumor\n", + " result.append(data_dict[key] == 2)\n", + " data_dict[key] = torch.stack(result, axis=0).float()\n", + " return data_dict" + ] + }, + { + "cell_type": "markdown", + "id": "d72a65c1-701c-4700-88a9-df30b51eb10e", + "metadata": {}, + "source": [ + "Next, we compose all the necessary transforms for visualizing the data using [`monai.transforms.Compose`](https://docs.monai.io/en/stable/transforms.html#monai.transforms.Compose).\n", + "\n", + "**Note:** During training, we will apply a differnt set of transforms to the data." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a542d505-3713-43f1-adf1-c33ade5696b1", + "metadata": {}, + "outputs": [], + "source": [ + "from monai.transforms import (\n", + " Compose,\n", + " LoadImaged,\n", + " NormalizeIntensityd,\n", + " Orientationd,\n", + " Spacingd,\n", + " EnsureTyped,\n", + " EnsureChannelFirstd,\n", + ")\n", + "\n", + "\n", + "transforms = Compose(\n", + " [\n", + " # Load 4 Nifti images and stack them together\n", + " LoadImaged(keys=[\"image\", \"label\"]),\n", + " # Ensure loaded images are in channels-first format\n", + " EnsureChannelFirstd(keys=\"image\"),\n", + " # Ensure the input data to be a PyTorch Tensor or numpy array\n", + " EnsureTyped(keys=[\"image\", \"label\"]),\n", + " # Convert labels to multi-channels based on brats18 classes\n", + " ConvertToMultiChannelBasedOnBratsClassesd(keys=\"label\"),\n", + " # Change the input image’s orientation into the specified based on axis codes\n", + " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", + " # Resample the input images to the specified pixel dimension\n", + " Spacingd(\n", + " keys=[\"image\", \"label\"],\n", + " pixdim=(1.0, 1.0, 1.0),\n", + " mode=(\"bilinear\", \"nearest\"),\n", + " ),\n", + " # Normalize input image intensity\n", + " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "3925b068-31fd-4a66-873d-850e6cd7fd87", + "metadata": {}, + "source": [ + "For loading the dataset, we first fetch it from the W&B dataset artifact that we had created earlier. This enables us to use the dataset as an input artifact to our visualization run, and establish the necessary lineage for our experiment.\n", + "\n", + "![](./assets/artifact_usage.png)" + ] + }, + { + "cell_type": "code", + "id": "5b6bf7fc-dc39-4247-9001-cd8832045b84", + "metadata": {}, + "outputs": [], + "source": [ + "artifact = wandb.use_artifact(\n", + " \"lifesciences/brain-tumor-segmentation/decathlon_brain_tumor:v0\", type=\"dataset\"\n", + ")\n", + "artifact_dir = artifact.download()" + ] + }, + { + "cell_type": "markdown", + "id": "54920325-570f-4c19-a299-66d4ff21ca15", + "metadata": {}, + "source": [ + "We now use the [`monai.apps.DecathlonDataset`](https://docs.monai.io/en/stable/apps.html#monai.apps.DecathlonDataset) to load our dataset and apply the transforms we defined on the data samples so that we can visualize it." + ] + }, + { + "cell_type": "code", + "id": "cc829ac0-ee0a-4924-889d-72fba089ec7b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-04-24 22:19:13,147 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", + "2024-04-24 22:19:13,149 - INFO - File exists: /home/ubuntu/edu/life-science/artifacts/decathlon_brain_tumor:v0/Task01_BrainTumour.tar, skipped downloading.\n", + "2024-04-24 22:19:13,150 - INFO - Non-empty folder exists in /home/ubuntu/edu/life-science/artifacts/decathlon_brain_tumor:v0/Task01_BrainTumour, skipped extracting.\n" + ] + } + ], + "source": [ + "from monai.apps import DecathlonDataset\n", + "\n", + "\n", + "# Create the dataset for the training split\n", + "# of the brain tumor segmentation dataset\n", + "train_dataset = DecathlonDataset(\n", + " root_dir=artifact_dir,\n", + " task=\"Task01_BrainTumour\",\n", + " transform=transforms,\n", + " section=\"training\",\n", + " download=True,\n", + " cache_rate=0.0,\n", + " num_workers=4,\n", + ")\n", + "\n", + "# Create the dataset for the validation split\n", + "# of the brain tumor segmentation dataset\n", + "val_dataset = DecathlonDataset(\n", + " root_dir=artifact_dir,\n", + " task=\"Task01_BrainTumour\",\n", + " transform=transforms,\n", + " section=\"validation\",\n", + " download=False,\n", + " cache_rate=0.0,\n", + " num_workers=4,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "fd917fe9-2fcd-482c-9e00-243d81e26dc7", + "metadata": {}, + "source": [ + "## 📸 Visualizing the Dataset\n", + "\n", + "Weights & Biases supports images, video, audio, and more. You can log rich media to explore your results and visually compare our runs, models, and datasets. Now, you will learn using the [segmentation mask overlay](https://docs.wandb.ai/guides/track/log/media#image-overlays-in-tables) system to visualize our data volumes. To log segmentation masks in [W&B tables](https://docs.wandb.ai/guides/tables), you must provide a [`wandb.Image`](https://docs.wandb.ai/ref/python/data-types/image) object containing the segmentation annotations for each row in the table.\n", + "\n", + "![](https://docs.wandb.ai/assets/images/viz-2-e3652d015abbf1d6d894e8edb1424eac.gif)\n", + "\n", + "An example is provided in the pseudocode below:\n", + "\n", + "```python\n", + "table = wandb.Table(columns=[\"ID\", \"Image\"])\n", + "\n", + "for id, img, label in zip(ids, images, labels):\n", + " mask_img = wandb.Image(\n", + " img,\n", + " masks={\n", + " \"ground-truth\": {\"mask_data\": label, \"class_labels\": class_labels}\n", + " # ...\n", + " },\n", + " )\n", + "\n", + " table.add_data(id, img)\n", + "\n", + "wandb.log({\"Table\": table})\n", + "```\n", + "\n", + "However, in our case, since the volume of the target classes might overlap one another, we will log them as separate overlays on the same image, so that we do not miss the relevant information.\n", + "\n", + "An example is provided in the pseudocode below:\n", + "\n", + "```python\n", + "mask_img = wandb.Image(\n", + " img,\n", + " masks={\n", + " \"ground-truth/Tumor-Core\": {\n", + " \"mask_data\": label_tumor_core,\n", + " \"class_labels\": {0: \"background\", 1: \"Tumor Core\"}\n", + " },\n", + " \"ground-truth/Whole-Tumor\": {\n", + " \"mask_data\": label_tumor_core,\n", + " \"class_labels\": {0: \"background\", 2: \"Whole-Tumor\"}\n", + " },\n", + " \"ground-truth/Enhancing-Tumor\": {\n", + " \"mask_data\": label_tumor_core,\n", + " \"class_labels\": {0: \"background\", 3: \"Enhancing-Tumor\"}\n", + " },\n", + " },\n", + ")\n", + "```" + ] + }, + { + "cell_type": "code", + "id": "e9acc72e-7091-40d7-a97b-023df48d9e12", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from tqdm.auto import tqdm\n", + "\n", + "\n", + "def get_target_area_percentage(segmentation_map):\n", + " segmentation_map_list = segmentation_map.flatten().tolist()\n", + " return segmentation_map_list.count(1.0) * 100 / len(segmentation_map_list)\n", + "\n", + "\n", + "def log_data_samples_into_tables(\n", + " sample_image: np.array,\n", + " sample_label: np.array,\n", + " split: str = None,\n", + " data_idx: int = None,\n", + " table: wandb.Table = None,\n", + "):\n", + " \"\"\"Utility function for logging a data sample into a W&B Table\"\"\"\n", + " num_channels, _, _, num_slices = sample_image.shape\n", + " with tqdm(total=num_slices, leave=False) as progress_bar:\n", + " for slice_idx in range(num_slices):\n", + " ground_truth_wandb_images, tumor_area_percentages = [], []\n", + " for channel_idx in range(num_channels):\n", + " masks = {\n", + " \"ground-truth/Tumor-Core\": {\n", + " \"mask_data\": sample_label[0, :, :, slice_idx],\n", + " \"class_labels\": {0: \"background\", 1: \"Tumor Core\"},\n", + " },\n", + " \"ground-truth/Whole-Tumor\": {\n", + " \"mask_data\": sample_label[1, :, :, slice_idx] * 2,\n", + " \"class_labels\": {0: \"background\", 2: \"Whole Tumor\"},\n", + " },\n", + " \"ground-truth/Enhancing-Tumor\": {\n", + " \"mask_data\": sample_label[2, :, :, slice_idx] * 3,\n", + " \"class_labels\": {0: \"background\", 3: \"Enhancing Tumor\"},\n", + " },\n", + " }\n", + "\n", + " ground_truth_wandb_images.append(\n", + " wandb.Image(\n", + " sample_image[channel_idx, :, :, slice_idx],\n", + " masks=masks,\n", + " )\n", + " )\n", + " tumor_area_percentages.append(\n", + " {\n", + " \"Tumor-Core-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[0, :, :, slice_idx]\n", + " ),\n", + " \"Whole-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[1, :, :, slice_idx]\n", + " ),\n", + " \"Enhancing-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[2, :, :, slice_idx]\n", + " ),\n", + " }\n", + " )\n", + " table.add_data(\n", + " split,\n", + " data_idx,\n", + " slice_idx,\n", + " *tumor_area_percentages,\n", + " *ground_truth_wandb_images\n", + " )\n", + " progress_bar.update(1)\n", + " return table" + ] + }, + { + "cell_type": "markdown", + "id": "7919a2fc-7e44-4283-a862-93ff8cdcfa5f", + "metadata": {}, + "source": [ + "Next, we iterate over our respective datasets and populate the table on our W&B dashboard." + ] + }, + { + "cell_type": "code", + "id": "85601b14-e693-4cca-b8cf-5e3ce863bb18", + "metadata": {}, + "outputs": [], + "source": [ + "# Define the schema of the table\n", + "table = wandb.Table(\n", + " columns=[\n", + " \"Split\",\n", + " \"Data Index\",\n", + " \"Slice Index\",\n", + " \"Tumor-Area-Pixel-Percentages-Channel-0\",\n", + " \"Tumor-Area-Pixel-Percentages-Channel-1\",\n", + " \"Tumor-Area-Pixel-Percentages-Channel-2\",\n", + " \"Tumor-Area-Pixel-Percentages-Channel-3\",\n", + " \"Image-Channel-0\",\n", + " \"Image-Channel-1\",\n", + " \"Image-Channel-2\",\n", + " \"Image-Channel-3\",\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "id": "386b6d4b-ebf6-4d9d-bc78-7ce489e0a926", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d2f78eb2ab5f4cc48e7b6a89cad7eb1c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/155 [00:00=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.25.2\n", - " warnings.warn(f\"A NumPy version >={np_minversion} and <{np_maxversion}\"\n" - ] - } - ], - "source": [ - "import os\n", - "\n", - "import numpy as np\n", - "import torch\n", - "import wandb\n", - "from tqdm.auto import tqdm\n", - "\n", - "from monai.apps import DecathlonDataset\n", - "from monai.transforms import (\n", - " Compose,\n", - " LoadImaged,\n", - " MapTransform,\n", - " NormalizeIntensityd,\n", - " Orientationd,\n", - " RandFlipd,\n", - " RandScaleIntensityd,\n", - " RandShiftIntensityd,\n", - " RandSpatialCropd,\n", - " Spacingd,\n", - " EnsureTyped,\n", - " EnsureChannelFirstd,\n", - ")\n", - "\n", - "from collections import Counter" - ] - }, - { - "cell_type": "markdown", - "id": "48509346-08a2-41e3-bc98-1aea79fe42d3", - "metadata": {}, - "source": [ - "## 🌳 Initialize a W&B Run\n", - "\n", - "We will start a new W&B run to start tracking our experiment." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "69d72b50-1a24-4a32-97c8-6f859cc203df", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mgeekyrakshit\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mgeekyrakshit\u001b[0m (\u001b[33mlifesciences\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" - ] - }, - { - "data": { - "text/html": [ - "Tracking run with wandb version 0.16.6" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Run data is saved locally in /home/ubuntu/edu/life-science/wandb/run-20240419_054517-42vd37y7" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Syncing run solar-glitter-8 to Weights & Biases (docs)
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View project at https://wandb.ai/lifesciences/brain-tumor-segmentation" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View run at https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/42vd37y7" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "wandb.login()\n", - "wandb.init(\n", - " project=\"brain-tumor-segmentation\",\n", - " entity=\"lifesciences\",\n", - " job_type=\"visualize_dataset\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "245625a8-be24-40f8-8cef-615917611a49", - "metadata": {}, - "source": [ - "## 💿 Loading and Transforming the Data\n", - "\n", - "We will now learn using the [`monai.transforms`](https://docs.monai.io/en/stable/transforms.html) API to create and apply transforms to our data.\n", - "\n", - "### Creating a Custom Transform\n", - "\n", - "First, we demonstrate the creation of a custom transform `ConvertToMultiChannelBasedOnBratsClassesd` using [`monai.transforms.MapTransform`](https://docs.monai.io/en/stable/transforms.html#maptransform) that converts labels to multi-channel tensors based on brats18 classes:\n", - "- label 1 is the necrotic and non-enhancing tumor core\n", - "- label 2 is the peritumoral edema\n", - "- label 4 is the GD-enhancing tumor.\n", - "\n", - "The target classes for the semantic segmentation task after applying this transform on the dataset will be\n", - "- Tumor core\n", - "- Whole tumor\n", - "- Enhancing tumor" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "1ce8d513-cd46-43c1-839e-0ae15f750a5e", - "metadata": {}, - "outputs": [], - "source": [ - "class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):\n", - " \"\"\"\n", - " Convert labels to multi-channels based on brats classes:\n", - " label 1 is the peritumoral edema\n", - " label 2 is the GD-enhancing tumor\n", - " label 3 is the necrotic and non-enhancing tumor core\n", - " The possible classes are TC (Tumor core), WT (Whole tumor)\n", - " and ET (Enhancing tumor).\n", - "\n", - " Reference: https://github.com/Project-MONAI/tutorials/blob/main/3d_segmentation/brats_segmentation_3d.ipynb\n", - "\n", - " \"\"\"\n", - "\n", - " def __call__(self, data):\n", - " data_dict = dict(data)\n", - " for key in self.keys:\n", - " result = []\n", - " # merge label 2 and label 3 to construct Tumor Core\n", - " result.append(torch.logical_or(data_dict[key] == 2, data_dict[key] == 3))\n", - " # merge labels 1, 2 and 3 to construct Whole Tumor\n", - " result.append(\n", - " torch.logical_or(\n", - " torch.logical_or(data_dict[key] == 2, data_dict[key] == 3), data_dict[key] == 1\n", - " )\n", - " )\n", - " # label 2 is Enhancing Tumor\n", - " result.append(data_dict[key] == 2)\n", - " data_dict[key] = torch.stack(result, axis=0).float()\n", - " return data_dict" - ] - }, - { - "cell_type": "markdown", - "id": "d72a65c1-701c-4700-88a9-df30b51eb10e", - "metadata": {}, - "source": [ - "Next, we compose all the necessary transforms for visualizing the data using [`monai.transforms.Compose`](https://docs.monai.io/en/stable/transforms.html#monai.transforms.Compose).\n", - "\n", - "**Note:** During training, we will apply a differnt set of transforms to the data." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "a542d505-3713-43f1-adf1-c33ade5696b1", - "metadata": {}, - "outputs": [], - "source": [ - "transforms = Compose(\n", - " [\n", - " # Load 4 Nifti images and stack them together\n", - " LoadImaged(keys=[\"image\", \"label\"]),\n", - " # Ensure loaded images are in channels-first format\n", - " EnsureChannelFirstd(keys=\"image\"),\n", - " # Ensure the input data to be a PyTorch Tensor or numpy array\n", - " EnsureTyped(keys=[\"image\", \"label\"]),\n", - " # Convert labels to multi-channels based on brats18 classes\n", - " ConvertToMultiChannelBasedOnBratsClassesd(keys=\"label\"),\n", - " # Change the input image’s orientation into the specified based on axis codes\n", - " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", - " # Resample the input images to the specified pixel dimension\n", - " Spacingd(\n", - " keys=[\"image\", \"label\"],\n", - " pixdim=(1.0, 1.0, 1.0),\n", - " mode=(\"bilinear\", \"nearest\"),\n", - " ),\n", - " # Normalize input image intensity\n", - " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", - " ]\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "3925b068-31fd-4a66-873d-850e6cd7fd87", - "metadata": {}, - "source": [ - "For loading the dataset, we first fetch it from the W&B dataset artifact that we had created earlier. This enables us to use the dataset as an input artifact to our visualization run, and establish the necessary lineage for our experiment." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "5b6bf7fc-dc39-4247-9001-cd8832045b84", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mwandb\u001b[0m: Downloading large artifact decathlon_brain_tumor:v0, 14510.68MB. 1275 files... \n", - "\u001b[34m\u001b[1mwandb\u001b[0m: 1275 of 1275 files downloaded. \n", - "Done. 0:0:20.3\n" - ] - } - ], - "source": [ - "artifact = wandb.use_artifact(\n", - " \"lifesciences/brain-tumor-segmentation/decathlon_brain_tumor:v0\", type=\"dataset\"\n", - ")\n", - "artifact_dir = artifact.download()" - ] - }, - { - "cell_type": "markdown", - "id": "54920325-570f-4c19-a299-66d4ff21ca15", - "metadata": {}, - "source": [ - "We now use the [`monai.apps.DecathlonDataset`](https://docs.monai.io/en/stable/apps.html#monai.apps.DecathlonDataset) to load our dataset and apply the transforms we defined on the data samples so that we can visualize it." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "cc829ac0-ee0a-4924-889d-72fba089ec7b", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2024-04-19 05:45:51,755 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", - "2024-04-19 05:45:51,756 - INFO - File exists: /home/ubuntu/edu/life-science/artifacts/decathlon_brain_tumor:v0/Task01_BrainTumour.tar, skipped downloading.\n", - "2024-04-19 05:45:51,757 - INFO - Non-empty folder exists in /home/ubuntu/edu/life-science/artifacts/decathlon_brain_tumor:v0/Task01_BrainTumour, skipped extracting.\n" - ] - } - ], - "source": [ - "train_dataset = DecathlonDataset(\n", - " root_dir=artifact_dir,\n", - " task=\"Task01_BrainTumour\",\n", - " transform=transforms,\n", - " section=\"training\",\n", - " download=True,\n", - " cache_rate=0.0,\n", - " num_workers=4,\n", - ")\n", - "val_dataset = DecathlonDataset(\n", - " root_dir=artifact_dir,\n", - " task=\"Task01_BrainTumour\",\n", - " transform=transforms,\n", - " section=\"validation\",\n", - " download=False,\n", - " cache_rate=0.0,\n", - " num_workers=4,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "fd917fe9-2fcd-482c-9e00-243d81e26dc7", - "metadata": {}, - "source": [ - "## 📸 Visualizing the Dataset\n", - "\n", - "Weights & Biases supports images, video, audio, and more. You can log rich media to explore your results and visually compare our runs, models, and datasets. Now, you will learn using the [segmentation mask overlay](https://docs.wandb.ai/guides/track/log/media#image-overlays-in-tables) system to visualize our data volumes. To log segmentation masks in [W&B tables](https://docs.wandb.ai/guides/tables), you must provide a [`wandb.Image`](https://docs.wandb.ai/ref/python/data-types/image) object containing the segmentation annotations for each row in the table.\n", - "\n", - "An example is provided in the pseudocode below:\n", - "\n", - "```python\n", - "table = wandb.Table(columns=[\"ID\", \"Image\"])\n", - "\n", - "for id, img, label in zip(ids, images, labels):\n", - " mask_img = wandb.Image(\n", - " img,\n", - " masks={\n", - " \"ground-truth\": {\"mask_data\": label, \"class_labels\": class_labels}\n", - " # ...\n", - " },\n", - " )\n", - "\n", - " table.add_data(id, img)\n", - "\n", - "wandb.log({\"Table\": table})\n", - "```\n", - "\n", - "However, in our case, since the volume of the target classes might overlap one another, we will log them as separate overlays on the same image, so that we do not miss the relevant information.\n", - "\n", - "An example is provided in the pseudocode below:\n", - "\n", - "```python\n", - "mask_img = wandb.Image(\n", - " img,\n", - " masks={\n", - " \"ground-truth/Tumor-Core\": {\n", - " \"mask_data\": label_tumor_core,\n", - " \"class_labels\": {0: \"background\", 1: \"Tumor Core\"}\n", - " },\n", - " \"ground-truth/Whole-Tumor\": {\n", - " \"mask_data\": label_tumor_core,\n", - " \"class_labels\": {0: \"background\", 1: \"Whole-Tumor\"}\n", - " },\n", - " \"ground-truth/Enhancing-Tumor\": {\n", - " \"mask_data\": label_tumor_core,\n", - " \"class_labels\": {0: \"background\", 1: \"Enhancing-Tumor\"}\n", - " },\n", - " },\n", - ")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "e9acc72e-7091-40d7-a97b-023df48d9e12", - "metadata": {}, - "outputs": [], - "source": [ - "def get_target_area_percentage(segmentation_map):\n", - " segmentation_map_list = segmentation_map.flatten().tolist()\n", - " return segmentation_map_list.count(1.0) * 100 / len(segmentation_map_list)\n", - "\n", - "\n", - "def log_data_samples_into_tables(\n", - " sample_image: np.array,\n", - " sample_label: np.array,\n", - " split: str = None,\n", - " data_idx: int = None,\n", - " table: wandb.Table = None,\n", - "):\n", - " \"\"\"Utility function for logging a data sample into a W&B Table\"\"\"\n", - " num_channels, _, _, num_slices = sample_image.shape\n", - " with tqdm(total=num_slices, leave=False) as progress_bar:\n", - " for slice_idx in range(num_slices):\n", - " ground_truth_wandb_images, tumor_area_percentages = [], []\n", - " for channel_idx in range(num_channels):\n", - " masks = {\n", - " \"ground-truth/Tumor-Core\": {\n", - " \"mask_data\": sample_label[0, :, :, slice_idx],\n", - " \"class_labels\": {0: \"background\", 1: \"Tumor Core\"},\n", - " },\n", - " \"ground-truth/Whole-Tumor\": {\n", - " \"mask_data\": sample_label[1, :, :, slice_idx] * 2,\n", - " \"class_labels\": {0: \"background\", 2: \"Whole Tumor\"},\n", - " },\n", - " \"ground-truth/Enhancing-Tumor\": {\n", - " \"mask_data\": sample_label[2, :, :, slice_idx] * 3,\n", - " \"class_labels\": {0: \"background\", 3: \"Enhancing Tumor\"},\n", - " },\n", - " }\n", - "\n", - " ground_truth_wandb_images.append(\n", - " wandb.Image(\n", - " sample_image[channel_idx, :, :, slice_idx],\n", - " masks=masks,\n", - " )\n", - " )\n", - " tumor_area_percentages.append(\n", - " {\n", - " \"Tumor-Core-Area-Percentage\": get_target_area_percentage(\n", - " sample_label[0, :, :, slice_idx]\n", - " ),\n", - " \"Whole-Tumor-Area-Percentage\": get_target_area_percentage(\n", - " sample_label[1, :, :, slice_idx]\n", - " ),\n", - " \"Enhancing-Tumor-Area-Percentage\": get_target_area_percentage(\n", - " sample_label[2, :, :, slice_idx]\n", - " ),\n", - " }\n", - " )\n", - " table.add_data(\n", - " split,\n", - " data_idx,\n", - " slice_idx,\n", - " *tumor_area_percentages,\n", - " *ground_truth_wandb_images\n", - " )\n", - " progress_bar.update(1)\n", - " return table" - ] - }, - { - "cell_type": "markdown", - "id": "7919a2fc-7e44-4283-a862-93ff8cdcfa5f", - "metadata": {}, - "source": [ - "Next, we iterate over our respective datasets and populate the table on our W&B dashboard." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "386b6d4b-ebf6-4d9d-bc78-7ce489e0a926", - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "06206f8fb8324919a68ab922a42085f5", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Generating Train Dataset Visualizations:: 0%| | 0/388 [00:00solar-glitter-8 at: https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/42vd37y7
View project at: https://wandb.ai/lifesciences/brain-tumor-segmentation
Synced 6 W&B file(s), 1 media file(s), 342496 artifact file(s) and 1 other file(s)" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Find logs at: ./wandb/run-20240419_054517-42vd37y7/logs" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "wandb.finish()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From 32cd2e12757f65441709c750600c1c6df24d974c Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 25 Apr 2024 23:06:47 +0000 Subject: [PATCH 04/21] add: assets --- life-science/assets/artifact_usage.png | Bin 0 -> 108977 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 life-science/assets/artifact_usage.png diff --git a/life-science/assets/artifact_usage.png b/life-science/assets/artifact_usage.png new file mode 100644 index 0000000000000000000000000000000000000000..949348acfbdd7be0d02bed331d26585777890b3c GIT binary patch literal 108977 zcmd?RWmp`|);5YW_y9@pVUQ4l4Z+}8G4ep+x!QCymyPe7N z?7g4;z2D*edHx)(o@<)!s$Q#>R9CHauL)C9kjB9z#Y8|rz>$@aP(eUI#YI3siUXj* zuRJZ|U_wAZRI?HnSCSPM2P-*2&8=+B5DU(?)5FniumSrjPngSdy@6JOOe?YYWV&cdZx{+I_8S(u$dqpOO? zSI?UDuMYDqW5K13EqBqQN$j>6w*efV+4~T4tyad$J5b_Tjfa&w-?o`Y458RCH`}{$ zjmoZgtxyS;ZiG^X(P-<~S38zF6!;B*^C($gr{FB-%9@r3(La+-@$)<~hlC-sel#+o zq&s%V28&{dBcdHKy#Dnn2H^MvE4Oy);GhXrYaLB9HM?{dtRsv*_loq2^GCaM>k_hq zhDqUN1~W@h%zmQa_}3=O!t5GSqFG+i|1-|(40?O2RVp~hw`Fgu4oT@VCdeDG5{ zGZ!N;%+A){nGYsN{Z|V<`1zm95NhyWON5GEK57H~2( z=Tni8`d4@Oe}dGOE-nsy5Qw|GJBvFz3)IO1@`{(27sAR0VPj*4w_tYmuy-+nG21)S z{QZ!BKS#pM*~H1p!Nm${5B~F9BV(wmiy$@iA4326`dkd+WsgCXu_pf)}kC+fk&`7i>;`9L?%%*;T6ghW97{Fxikih-5HOHpOf*UA!4 zBt;~}WJSNA(k1{Uemz{x_vmfxrOjvfKn;%hy+IB9&YKJTi(8B7&h~{!s91L|I0(Rq zj~)&{nZ3)wQd$wDM-M}Q63<%n|Iz>$fDqI{EW_iEBJ=LCqn+yFaVEh2#3=OS-6HK6 zk1`>G^aQsKFBSpgv7-|#vdoG`pzyzpHQV8b&!Y^Y$biuhF+*O(Iv_ntBNib@d>{FL z;qet*RFVFHgpbPsK29gf|Aog;(Rb}e*tF^Yi?~IWV{aapL#zlkTvR6DWt2yC4)%xP z<3gdSdWS?WBV2sqwuyGA9#fA0$Jb!HTn%jMMb)O0%fgJSt*qvQ@+*R^YvWOL7V$@| zCW*`+cEr;-2X^w2eN5)yFZiLgJvpNtzpmzS(s3+s)dzWpuI9f9DbPH|4kAG)w!a!G z=Cf-E+uIpG3V->0Sl-qh_VM|EI5RGc1@`fU)*Xgfe|=pv)vuc1ox`i}I)1kIbsw8V0Ex zR=JkLpFRlrcrJ2y|4id99V<}QD}4KnA%)E>oD1?n7^#Z#Czqzi8>FwQ`CPMt;=##1 z;{7A(D^jfwnof*_vsNVR=CRb0;WCA4UyXH8lNurchXUKpkW0MK4xw?Zz0p#s)L;$X1A!^KV5ewj_%%m)0TXl zNWVO5?c8IEs|>BT=TG`t|7+^Yd_`7}tlpm@k?dYZXg&HS-Ma zX9tXvdhXx)bQP%mf1{|3czjp?mb%fHBy7<9n~`J#(s)bC;qdNs&nq7)+lWHO2Fw0# zGb^QCZOfve_x2a}fzzeasC-oa*+V#ENiDfLxA9K0TCGRj2J9u_E7s`nLnP9RMD7Jx zfQhwfb3H|0+}D(<&P0*(-RUp7brm08C}vKnx2^wcQUI@@P+wbGo~!b_9>;2t9?Ng0zRvoL<_#RL#*npZFS^!Ser|Y^B_!@UQ$28E%?Vw2vpw;ZSyABXdpf`SAXAdc;Ad3KCx=a) zy+wGr&O*sMVG(3~t35G&*Jr{%?(fjOU3H!1QGXU@DlhuANC|15mh09!v~feK4OFg= z5{P$_i0!)FZhCs0wno0h=Vy6}CHggq30;{wdUM1TZ@cg((v&0FZ$`&Zz%?YNQ)dUyUF6sdImGh*-#`x!?Jc>G~$tQ+SBu^DRmxnerMLHF0^dnq}B_K z*&~RH*4s!h?E|@H;t&P+{bo*7+&!UhRCL;CRB_vzp~w%B26#Q(-;~-eHqXNTB9)3eq6=( z{)*`8@Ux`KXHU-f3BZLV8JF$poOx~uNhX=8=wELd|GxJkok8NIW9gva!2|syMcnn* z2@GGv)KuHZI-o5&%zHCcPv>B*o)`Pi)5aNbRxlUdaoah?FSW7Ar+%-nyWzU}-61Jt zgwOjfmayDqt7v`f>!>GkxoETYeN~;ms2-rMShb-#m~$<0)$f+7&lS`tdOIsUtz_rA zH=?ER5X8mxdQ~dwjo-!Hd0OMcf-SG(@dt;e1@hnh(=xWli{#vVV2d@-EGEs%IC3F! z*UzXiA3aIwbep|-MOj)6e6QXL=^dwdUg;H*xuR5dF!U!dt7;2|RrkmWG-l%s*IF&q z9B^y7oDveEsnt4Ajk3KV(|Oj5DQ!hSioXOjlzzD=I4U&OAC+3voPj5Pb`ywuJuXr&<(V?7RO?_hA< zR_k8f?Z6ZMP-*eG(W`v!%(K5ggL~1|%PYO#;4nwmeUEd4r?a`vb~;H)({1NPV7kaG z>O!%Gf!^z`RrmGZ!=KHMK=aTfCasFpdn2T42l*gOww-zRF9|~QPezr#-=X9F@bXN5 zI_4|xFp$O_W2+Lg?omHvWKb3S>zdTlThGG`cb>>M>A74YCWy(f?Odm!@r3Y3_e=Ow z&KDN=S9%66#Upp~p1r{{7Wl`DpCs8|>K4S_e#;!qSCI7B-k)ibe#UakIv|5&71VCldFHXK%Fc6=9 z`z0QO@I{LEm%$Sr%asw>fuGn4}Fl9WYE<8b=HBypu363;nKmzEg(i5 z#1N2KYJA4DpFrU`H zuKB6uym#e0puLsd!%;$XzuA=Qg!46f^PxM6OcwX)8$b2vtK0Uf zKX4~MF?)L2_RiJk#>^WB+^M3;sV{QE+?#A1>Eajcp?F;pMDZ2Xyy6F-v-e9U^$ z_$-WgAGoc6ZrRiA?I!~l9V z`{-KbPG3^cs3#xu6~% z4PRFG=A~;;G*yV@rUJ#l8S2FwgWq+1nWcNKwu9&={jVZS4p(D{(tIivGPE~*Sv3aJ zH-yZNRup|Qr_QHpdf2WBx5>#0hx`ZIU{{V72brFV3EB%V=Zdd%a*6#TMVk~AUPJm? z*_p1avqIF|N<`Opql&XAn}gchaT=1P{!?mh$gEdxRv zo6MiQRMDhsxeE4fGX5fPcgmip2m`?m+iuKy$E?_FCK}9Vm}|aI++^c!eBmGTkAI;! zd~)O6k)N`F?s_mWFtgwbn*%KWP z#GvDM{Zm&1|iFZO(!zb?xIj0stwjjoa&dADDKxavrrA? zt&jML;J|sN?|UmvS)b$4aEHP^q=HJq)^>MD~wesx#ikdATh)Zd3tnhngj zN|ni!;%P2VQTp9l@rVd?M!C>pLoFR?oUcF82cZ+%Hq7)AUD4wegwP0B85(NlyD`)3 z?yJY4;L!jcDtvxs2S5$A@-4*a_x+RgYY$5eeab19a!~MgrDG`ATLSio^ixDQBz=H* zqhHaDb#e4VfM`eP0igh>xc3SkRlmM7rLou2hVy1ghLlq6d8$pneBK^SHA&!tzMxk! zIu=Z2*&llr3vbV6vLpc+9WV9nN@_m;n-Q;V7WUR>w9@h(*&%*U8Cjp$PI3BbGu5Z; zWt6pA*?aFRcu$Q$XZfRPW;PyU@vY4qtsJL_Vn4I}-P7!@HnUztURW$PyT9&<-aA8W zb);bt{VwXuXv5QN-*EQqDHVI4xuAxK%hoqMWB7c?A^Comn|-od_UIfTC9ZZm6uFH7hyDY$f>+qKh;LY#szXHfZ2q8e{+ zDb!@+pk*i}B;I@=^@r@)+}-&sbRZ&$?pi5T*iWswMfmI%yPVWbQ}A^ex$o>&7HUSr7mhjvbsyCA^3*n@8jo#y?q!`QW^lUP}XgqD6-ZufvL zY=|5Y7(o)aL4dw z;*0YOni;WE?R?1Z3VLKO#V3)JBX~<)^t+`kgUKehoj(h1Sl@JT<~KC2@p^3M?lj@` z@-P>cx~C+`d#QaC{w&5^vd$wswK|rG9)e3^x<3HsS=O{IxDZ>`cJ)C-Li(VM8e6VJ7jw(R`b?|MxlUhtn{P17da#mC7&KRdEQOjBTkBUqD8c7TN0|`l z7-z4I7ktVR`S>jx!gb^{qkT?3J-}g0rg?vRW!C>&dE?7I-EhP~OU(L0vyI<&q;p9=&Mw;`o?wxyg9O8l{yBrS*VR;(iH`qCla2SJ}@ z$1mZ9s{QwTuDzyp>IVBGe{XpRLR!DPm<3cFJa;$Zk(aD*c9@BFnSP$d%t<6Qx28Sx z5{AEGPMRqMkbfr%ZXbBjSUi$12enrMYKPVMoo^7YJ2-ZW6NBufqi+p3Vnq5cYLx7z z=X`7HQdk|+!_Cn9JBEBN7EC%p0=XzQgvMeW!DnaYx7kN5;y1M^`)*zugrp(ddFy=% zoGZAE)Q+ksieXd9Cx~>_nIW~ZjQMvjv$;hk4g8w6f**AB=GGK4xaKd)>`2;GEqcec z%)Jb=pTbf&3k-(V&W{Z!8{TBwdGF5&i`LLAEVZ*^b7yi10HI5ctJ~EEmo=2z+@0c@ zx`KH4@3D=5XhULTAF$iXVitgW&frur&^?);d3vyA7OTDvY!+vj9J>F_{Y4={OZh6@ zWpvb}RMaaxhvX2llOB+dW-N3o4cgq5`|i9*$g=4(bi`3WR4Jeb4FoL1yCDOa&s9hG z)hV?sr@X&I!fTBYJ`cr5&?n=ZftfBF;o-r6>}|jvjn7gz{f_L%2^s^NHH+Rakk#ld zr$42Zxmxk6X7$#N5~c5V--js}sJ-E7 zwS3o^@E9Y-ie%97)3J_C@kPggEBl)$GP#=%D1EI<%bEP{_HEgWMa@^;Fm+?TMtsN2 zn#pgw4C);_==#ZrpSoI&tVVJZ-N*PRH;{st-XHr%m*zU))X56s7|BQ?EdwWNhBCT$ z`fqw6b>n^F-G?3?W7a9&Ig@|NoP5!=<$18$O;=c& zGT;GR@Y4jf(@2SQyS1N9lMV)OAuZ=D<0U8M)e3pY5*hA@+$+I)JKWwbn(2?I7pY6a zV+JM>#6}LMzss9%+Bz5nM0L9y0RcZ^i0EMch|qD7XS+hIzu4S7%bd%Y3AfpOu20!s zPa^l=S99;vJltIf3uZ-g?z{(pPK@P)R2!A>EObN!T-h+ki8j3*0%3q5aZR4gkVD+6 z3(+Z}h%Gr$-J3lh3w3$ffIX9;qyoxGV5rOTJ|ldt$lfyyCgK6w36E?h%Em9S`c(@| z#sr^Eo0_}hHGDZ-7W{%d>Uc_&QN;(HmVfFvRXq5>~vrBQ4AVgG+pUD+CWuF ztKNHKc}XkC!fk&k+O<}4ymT1yhT=BnuTUD;2tz`b=XBak@`~Dlj^7>cp&!a!Sn2$w3!d~XBQh{5os86bIXVmBvQ7|+^!&M zI_Ej$5i906!Af#~moWN3R2%k|X31#Jo3!2m#oI-zJ(nbXM+ff`8%(-xlPw z7Oh<#3HLWTJaiyqqdXOm5vv89W>XgE3;bBc3nn85G@N%Yumt=P+Q|v9N?bnQYq_-F z_Z^AF1cU@(NeA)Hn(gK5(f#6*s&lBO>80fNoeNZG*dt1&ED;sx?C|ErF`CQHEGmDM z|03s%sq@_5DE7%5D~-#v!pTVY&-fSeuR^g8_a%6Q1w7K*mx=lkuc}coP5in7*xFw$ z%cGT(fp~JTbLuspxycB+8crQh9g8f95I@;ne7NTn3%SDYd065_?13xyhN`L)~u!2 zu~H8e`Eg`j9iu;pIF-j!)4Gdn4w}z5HaQms;uQm)cN{H^FMaVRQUOX*HIl1BOd4UW zj);oxHupDC8K7Ck^mwXl5w+kX{Aj(<@vGCBpgs45aH6TCIEJ>=lbmS#~5iGNUC@N~OFo7!-N&s`sht z!J=V1Rc~Mi(mIWuOc!G1BoKSexYVb`piw%An=`P(p*l$Tv%`&Dmw0Z`m9&BGif@0! zor5wDQ$9YZwzTt(?3{#={JsdE4zTJ*>MPD#bwM}ik<v!K* z7b!_}$g{$4BCbopT|&-N0t)taVz=QH58gZPU9auWtZ`Dpj;%X%t^~a1r$ik0x~{DHzFU*v28%crngk=lUBSxch^PXhC)oG7&q z#6QM=Sq=-S-mx0Vdu0GgZ!vt4qBv)((=lN6;OjHCLs#TU>}%5K6w8S7UYExlIwJ0*e89d{?iMN1|!Z zIcclxg?Eu*p6*tt0PoN-aY}e=K9fSnCEj4!(!EEiV?FtPQo? z!Z+miJ^I^7dNVqyT}2~jtQFszg_6&i%ux)>4Jv^bQP6HUruB{!+Pt6^$VH4?K;zD_Ag4cDHI(Htv86DWu>$Pz<_ z50hEJgHqv7sD5?oDhm=H3y2k*GX$&khx1iaK(c^9CPK)%a(ANif;6nT^%oxTF}ObE z&s(RO``-SIGNlBiX5Jf;f&4qjFW+*b4k?|^(H9cpfiGz`^XBsfm|^9>V-hj)K!jZ= z7n8+i^hz_Ud1y%28C7?9*nMB^U3U~mplosF8JY_tdK?n848NpWUEIa` zUyaq+ZwUAr+=J!>pzh}>F>ivr)}33lCF61-3urMN-1U9!z9XC%Vi~Y^xSnK?qMLj8 zyFZmP0xFF6eQ4z}pobKN3iiD8VHth(E^toWv0SMqe6oJ;3gJrX+xQr|CG4Gi3fnlW z_flg4$4 zFpUKjV&=BZTb%6K6+MY~0AF?55UsxWD?=$lP2k|y8@Y+w*6?ewy7tWpB!0|~24C2o z9+>wYzpatj9uRvA&uKt^q5Rx5n^l~ZFJMC<5CAM0&wp{2ZmQFKDSe7a!JBJXYq*LB zaz5m1wQP{_&X6PQ>!86}ptyfl$oz0@-y&WWc54m@YXf}wQH0!v?y+*{fQk6Uprx7a zVVuRMFI?AvmF)cYMCL5V%@WcO%G%457Ai*_I+al-@xc4#ZcHi$;&nV9&HY+Lmuib7 zjPFt6uOvgM+EWhS;S3YwDG=-N|ANn-{A2Enq@R2vt%*yf57$Jf4aZJ{yJ_&-sX>;U z?UMSido9IwDuH(3NPrKf@nHSgj=ZIPwy(A*su2sY3s@0?eruR1>c5T2Id9isyLexD zv|9jAj$-_j&J={9EK|KKaZ@p1nOeoFy8;%5NF0@!8TeWWbzI?P(k}&^U{5@^nXuiE zx{t&ydt#l{)d6apI(+A!=6Tq4M67`|UNU+8iXH`f;p4;IP1gRbS3g)JSsfm;r|@yj zBB^`Uk`Wx@x5jHbb4u4%-T6-S-u#AmkhirZ+QDxX5vb45Qn-X$&P*PlL50?>S-QDe zc`ag&lW7Ay2406Den3hglY{gSHky_hJRJ^E6rdGns$Di_(VBfmuuOAca$&C*1#nU5 zqj_T29a%F_ala`T8X>z(`o1yqaArF%z_8W%=_Jnskgt#9*F757A1BeWXrkoF*SGtD zks^74MegpFT~*qmZ-v%wnFAqqY!dm+dt6||8bwimP+L`}ci~+6Y|^Hb2>$Ls?4jHb z0&dSXXei37RnQOixOSmRt99qiG*Nv}`>NJ_$h8*z9*`HBMAb*?B>AK8iMp7mScRfjuwfW`9f>7KPU zgNIaMZ@e|T(DnGff?ik$UMc0=6Bs8zVKlGB%ziuW0T~1vaN*FPjSI;TF1=FaU@CLn zQFpR{MrLZa16a=68x5DmR<(k-_iDa3YeDIXZUk~}Z-9T?(BBu`h7h??q_t6dF_^}* zpoy+b3dAu71Z(u|&o{^u?C`thnnk{e3_+mv!0UNku#~JI7!s>NPbDdSioYj6nMNF# zBvRJ(BNS$HKM|Qi*V%3-*yw=Q{lt~!`jU`luyyr&Ky)pIb&JDJ$OA_pX8S0R+rAth z$Ib@^1egm!UPx0hnMg|atXUeA$B1Xwo=r^9d81!_64Jd?{?2wU?6x~)6leuAFv1|VyPrkfTSBDB3kgI=Q*i1;|V(~Pq7vmkTjf99sfeLcC)pPFhFs_(s8i0>HVl2B_uF^s~Ne}FLFk|5%!8O?8!E$uZ7aF&U~t#uE;_NJLgB$sifs( z$^0(UN!STcN}wA|H=3`=iT}zSkK8n{C4o_+m)iANflhwO8&984_$R_=&gQU#ri(}f zA){L!Emprwru#LMD~}sRz8}33st@R-oX2I+)z}L|@O=FnRYZKq&LL8`)ZZWi9u$?Q z^qRkoSH9G`Bh+CT_<^rwn-)WnMJ3ymzVS)F^))_Tjy3v~vmJ481zY-=jkh$oHl{z@ zS)ny%Mrri?emP-ltO!A02xi!W^SNcYt#7a=z>XekLcPFj@7B<6MV=a@?HG@OhmgO# zjLuD--`U^Jn{^6O%*&N)R%5QamW*A{_IUM`=GhQ}U?+n4BLg{JE`h}9)Xth$SA z7#@Nk0B#f8BO0u}Ymv75ylWIBcs=RAXkUkO*LRh4^sr~Ja76|kEdTbZK3k)MjYna3 z)~2+tD=&T4&EEo70(C9YgY`g)^xk2qSR|Enec2M_eVhP_I7 z#*lqMh}efX^ZB9aOHFVK$vWZBsUtSpl8p)bTGy5IP-U-I2=RgjhNNg8F=%yxbbdSjSm6e+Rh!vujBZm+-U40C$zoD$O?@5}M z3iH7c#YAfCtl;gPg9Xkk&P@zqvFl>pkK6cUd*OuQ{aD0^q4aaM8EuK2J&@7!YH|?2 zX`4VialEAp3OTv#c7fcMF_jSVgrM;eWwZEoc>LT0;mwj<@Ga}+-~QQJ+z1`-pz;|s zjQg)*-JP^(I5LuLS=n#m%z2p(Mej_0IdP($jViVC_dNQm85Hy_D{+hQF5DJR$M(fGhD+{!jGU z)%DX!)`(234w(C22uvQT?$-ZaU%lIaM$^LAGpF2Z?AROfzYT=y9=at*9@=_#_)vA8 zap!V{MChk49*nQ7-i(3%VtSNRk|`MV~mX8GRi)(^dqY;@k9FCZaik1b~# zn`6XqL||PN znSGP}r?6QLi>BrDthqtN!FuW=icIG6Gy5=!o9bw#Ia}Tb#oeMyEb}R-t!1enZ_O%W zy4{)qrLXih>3brB9DU!Qm5pj&{p29eP$}yGk&OxS?25rHjGs7rXgcWqcrLdd%S4u1 zV`TkE8A)Pxn#`#Wd|W%c5O#bt^F{j5R*ud_qR)zyN5GS zY|HOF<%xxr=W3T0Xy#nY&b+Q%Y)1Q>a(X*8n(p_y&+e)IQe;lFDB$4InZ3r# z@MD0bL8-o;ywUd`+*25x_oB$?sYywL78GTM(c;|o>3_`y7>eLe?KwJ`Vm02uZk)^V z04%QLaOcdEhHn8`w6SV{RvnjLt%!O2fVcBYA1Il>o+jLZ{cC|D#H3OZlst#_6vZ2{ zo=c{zIU0a`9L^~z-I?`@KPb}J4*>AwZ+PN`bBQ77*b}$bP+%}Q=ksEod|G!ED*sg4 z&Km_<#ODaZzJutLmJFZWRGh_eXs{C*H6~zAxJ?$xvM!Ew3&6y8BAX8KllQDH?t zy49kPrrbr9Jo*oM6-G7#O14Z#4iz^6p zeGCr_zn;qkN5ZO3oUhG1+DJs=4`LS}r7rd;lJ#W-9Nqk}O0bUWQQ$8SA5MtoeO$uh zbb6`b@MJSP#S)xHVW1L$a6+Mmrq3Ux6I%i2_KntTyv^glTYM@wA!=eRj7RDG|5Plo z)#8)-_+LXGR}$P}$dSI@tIF{fIa|qB+b=#}e@)cD;lBuX2o(;0yV+{la^o(QQ$DAE zV>$mD*$k$_U{&L#vZ`c$6nHH|2Zw*t#p86@e|QYQEh0vc?T{S%efrs>`lBU;TU-3k zU}Nt8U~d2M*uOb~^8z;%nHk%NN8RiL1|04F(^R}X?QwI9{lhM;UA5$+&hhh)&6-x^ zOvyh!Xc;)WgdezOsULG@e^W6y=$&m^X_xNtLDRz7#gI+2i+faZY=102#g^VJ{?S1b ze1NkXm^)DYKLz$5MS@d+aQVOCdeR{K=%6uV;p~cynCm<)xi^0tm5wZY{_hat{|n&X zKadvA?*BLDVzpY7Rn%fYW74C@HeRaV)a?!58*3G+msmFAH8%Kpa}Id^V@lvbl)-df znh;!y5AZ$fPdKucX!BZAbOk7rz>r}2^MmB^#sDK6bN)JAq3rEHT9?T~S&`%+=ePfa zk3;nYm;9Foil4Q*$oKcuT^*+NlV!925>{+Gh6u-8xk<>{&v@zM^}ZKhRoP`|>g%V8O{KE8iiE>(bm!;_nb z9Dy2Cog(=%zZD5GgN5z%E$B>0NdGZM+UH0B#lb{o9hGf@8v<#hsm!U8|D&+x0EL>4 zeCE)UFm-qG{>FI&^ZpjcENU}nLp=8%lNC{+dY2xIt*F4DL&J90dTv_M`=iX1`9F4) zgy$cM5h8*{^@|y5G{{)`)FQ z+|DOTE{r`^`zW~g_YK854*|Q=m6J^f`S6*iGfbrN*|%Kfcyl&~!&sj2_$jOwPLn(> zyy8AjK?@k?$DYMsMi-uXw8MQ}ZhAEHjxa94t5f!o?ZjCWpHKgc=5OVQIMD!kmzA8! z+P!`a&@~Hb9TNT6we0iXFo+(-Bj*7ZaR!pnAl6Tp@{O1@;AZ4{^rd}q&uVs(ORHw{ z?ANUM^kc=Xd{V$KRFLZ~6TuZjQH;&cjqmkp(>n z%YL?5@0nk{1YEh)P=yo*I9U0jmNw%5Vr&Eqe6`FvwY}DxAa|HFXFJZl*ee|&lqgB* z_aU3t@%%v8P(bZ^o>N5M$Ib=YmgqFc@3-IuW~cbt&eyk`T3CYgxEs%#GE?<;tgVwedIEOxjl(7%omV-)*8m zvvB^$R8I>#B0PcX|OG*UuAoME5~UO8Lu#Dm_${6SuAL+ey1c#E}_{> z}^;bqSAGz0OY)Xy>*reYfe+7>uRW4)BtJP=eEvU7DOLp_ z@rDNli0yPdp?2Z0ZrBQ+B#3R@KK0*tcD<${-RMix@D+BE5xU#0gd6Ip#o)(32z+D7 zh69}^tL8OA{R2`^^?)y$GMHXSf^ir~ia_GZhB z0vJvjtwGY8FB{(t%SA1stUFfX(yMntj0mCueo@Hi)rfhxzBcb%=@TZ!r6`VRE5uIH zE2~dGN#7w&yGLDJHEYO*EQbcS%{Z4NO&)|q(-voA$G~<+?Vs&)MGu^dH7r-JPH6nk zVxmxLxK#iH(p_3Um;USv8UB!=e$>5_A?&l*6P;$+*`3QH()(d3G1Yg0-K7lM?^5~7 zc5Bn=Ku}!XvJ8r9e6>C#e2-mc+JSb4oXG3O(Ti1X`jY^$k9G_jvhzX7$B3U6NQ4Bq zqvZDeWII(W8We)1QlhIx!eRK%wD+Uz{r%0sySLEk`nPcCYPjt}h2;H+cW76{Q+A`T zaO~-dXEV>|g?`n1`6Ot|or2r`j9Fkc8|mOo)9fA`d+%z%kB&?J@xpv45pHXJ(HZ=x z`oDro>R;P8*Cn{@&HdE4$d?m}j}f}pghylS6N8dm4utOdFW#iE^-c|c_I90tz>9!B zCTw2`xGxB3J#@r$cU)cXl)wXEr2E%w9W0yYb#OeP*$7!YD*658D&9-o+A_5wZOQAi zI#o7{L2^6%z~1K5V;Oe4v7(3g&(bkJ#%91IEqyjc+jQ}Iwk1Takf7hX_?@0 zi%4~`=WhP9CxWPNFr$GMnklfot}hvk3!>G~b(H+B%~a!$yw66}4SzAkn)Nk*tgOGY zP{?8k8atWWaVvj1%ZFcayL5Lk{j=imbBt-*d%X3+BFx@VMWJPQ;Pkdu6x`h9<~4=p zd$q1X!fo`)1JNvB@T|Y`y&ay{ZMk%=WgPYKK+R%rT6})0mHX*$I#=sSuEnE8-+`)| zsa3_)#j~Bsc>8)q9kN%)Pb-I(wa4pJrP(k0HVv5AZ>*=;%+{XyY>jJQybUda*H_Tg z=GotEz36B$2{|_>(MdXCuf7>gRxa)J4?uS{@K&AN;{TZhDed=lPpsUA@2&!WJLvoB zCdSr|{I`#ZV!%BtP!oXoYhb6$r(sr?-@iiBd};npZ}Y{3guty|lW~yg@UM#QDAJ$f zU-Ev!v$52$>>}7Ij^qABmS6BQ+yKtwtXm87{MQNKmka@}k}LHrqKunw2xP!WjOryH zq$&N{L!GN-BL%!ITJA+D>n(<^MD)o7y8(i3yy=XGHAbsaB)lun`KP_bo_HMQghXAM zTC^5nuFP*R>zJK;Rifw*0u%H}U$JR7(YII}=s)Ft@dI%n;JWor`7`-}n(~nsh0IUV zK%~%QR=?%J#5O>Sj0b{p&y0JKM&FXxew-G&Md0bX?|uFEOUNXgp0?jbg_mANwXFBG zBqm8W7HkfwRM(Q2cAeX8@E`o}Iv6_t5+FhqsuvJEXTq~Klr&VMtJ^Ga->KSdGdJNM z_nXM~GVTuwnXXBQDwR8Xk1TM;3OWE}_5Y%nrH}c99ysAvzBkQxC#Z}^!Dm}Nx-Zxe z)yjRc=f2V?Xq$E^k%|@iULkL z_L%95aM;YyN*OF1|W0qrTuQ7qyxuns3a)iU(#onMygztFq7 zlz+*$*-DmPKvDJc+3KNrFRW^Rs*gy+cBxPpGgn z1r3-DScu07GiA0$^5aJ|44C!!Tro+UqS~~UwgS=1^|tX*A!#-YYOEk?STcIr%k5X| zu6pq0+sn~B#iXj8Nvv%aC2-3U)#-$e_RYjVG0FptzXbRl5h|qd^n)UY?PAHdvA3m+ zYuJIw$fY9O9lUMf!7`9vUroGT7H*+$3}!$D5U8Yluf{d#l3pf;h+JOAD_%)-&~bAn z=E%>pz)OSW5244$85EH=0Cqzsf9+zOWZ(5b= zg_j;tX}!1i0f6h70{Y=6NW=5)XWXDTM)hxOAYWkU(lVD%uj%BkaVt-u&MTDnXaWyH z$^44vd$SfiIl^~0=SMcL+=Sz-k(cu42S9h7V*4tmAfU&2L1ZuKu9#1gBPlf^vyilNx8<=A*X zdJ`D7t|g^lTp3ZVesHWjXJ*Eqs!5k zp@$F{ws0Oq|8>$!=9}~VcpHubctN4iuIHR|t(E%U<#zwFV+0gx2J%lWQ27j6E|lr> zMczvc6mjg(4^ISf?ELVr(Tg-{?8hvc>ZML~uCIuWTYWC-J;8Uyc!DPzkl5Yn3fcX+ zx-S+7LPCnE@Z}I$IS!SYf)2dSoNyDs-VT{#Jn=b z1V}r@f6#2x`2Fy%S_c~Tta=+;x!XH*r*hHfMe3v)ytHtwf13UH7E3`xyYSz?1prC8 zFIII#Yd5-bTJK*hzF6Si@xK@Oh-VI<>8k7B_{6AG_AU~3^|TVuIMN6QunmE8pPlcH zDt^@@Oei{+I_vGeLf)i<0mV={v4DOZ*SCt!bz&}o&d~S8~-iynJk?e#khl{bI z$e)9{5|tf42Y4E_Eoj6jb1%a;O_MAcX20fpK%AG@pAS<0(0v+CYq!7VyP+>S`_QXayhoY!X^?$NvGx zh^+Z=Ma~KS3Nq(kovQmhnQSGk@z`B=B-TCK3%ZCy*WjgV73!$p8AKNr9*XgLBM#~&VYsfA ziwcDrMRc2xbZ#Rp!k(CM^lf&w-R7GCYpNdg=?b%c_Xk$4nzNG6cPW~%Vz2-K61Y7z z7&nZ7_joOuG=o=uhNIi0!ZeJaJAM;hS2Lb&5)>IS;5^hqFKj;rrg^wX^)%yPU3MRI zd**W+mW^Gku%|te?+(S`Ww1Mz5?$+f_X>_tPrxQ&3!QZJ-=uX)YHQengN2FxkuyKS zG)dX*(~owY&ixlfp+g!?MUS0piXs#?pQx2_Qcu@2S$k zzM$7GZ5Av5!{?NW|KqfhTp%H(z-7a2{i+5H()4=l5Oa0#6Kr)335u==JpyKjOe`}^ zzcu|pGZknfSxF1a3^h<}PYx&Dx!7TiDKLpzoul)VIu$z9yvDPVgEv_J-UqS{NZ{JD z#FhFU;T?G?UXR-N%Rzfzx9&}})F#J`pis4(ZcWIZjJ@)xm>>1x_Oix9|-5j-x7B;jAk)om42ynxp= z#cWgO;coiDzFy{s2Qlk#aK~3}^^FE^8`WQqK7WFnk$su*PmJlqt3SV|(AM!1ME63- zz_)5H5I`gGOX$stXhEhY#UQ+-G7uA;5Ld%G8T4UE(ZfU~^47R^T6&Pm1^^9gT^f(Q z!S(|l8vdr-b|KwR%(xB4K$U9wMm03WNFyx+->63-grB(x{aRl3SbNat?9!>SrBW0; zsvROk!Gi#NI^Q7z0s=j>hyWlP&Oi*bQuNReRzM8kp@T9Cg&++T_HW0rJwPyRZ0~ot}KUp z*kDWZdEJDb*y8V6il7E3WG@x=8y*nrq)Tgff`#55?Je9;e!Q?~vO8B^%ht`Ae4ylb zVGYN@=1LqH)1@vC_hdb3ENH|5;}=Dqb_iqm~koCre2Wp^`Usix+OaLh5%z_os%*q%DNUg z7fGT}K2Rf)CE^{3l?Lm2On~{!j>rV320LDvpn8*|vqF0Cop5l8Ii>&znxIUz1P-Xq z|HIc?heg$`|NjyK0>U7Y(nyGOcXvoP3%j(`9<1_@O<^pJ%b(I2t=?P|nm$Q*~H4#1f@RLJK4s zy?~AoN+#l|!UDM`_k03IM#-r)3~9C6&KYMOfh#tR+@+?U<6&Z4`bp-8rMEH|f}GBn z#&;ci+Vd66C`CCg00 zt$=dClnfyPF3e=wzXS#9&G$M58y{9-VHPNg-8?FB;JBLiaS z2?-2V8X06Eyp_9l=%0h!-mEYeh51aOT0w-ykn7Xxj@viduk-ctYVX|ciHE}Rn}`JG z7F|-Yl!nuJ_EJA93uLpT{g+q5jmRj#^775COrbh-vd3T5^gOm>y9?iI9A@Y=`FWS% zQI&sQz4M~byWtXfQF;Gw%tq_4$Apnx>aV9b9fq4P2Rf%|t|grWFBR32z8;_jNZ?Bb zMt*4%dxXgf!ew?aA4s)}X+p+ip%j#=B%YFhub^6f{*n1v!}xM!$ek4o$sP-HoSFa5 zSU5&W0~pGy;G5-UqTXeJfMAFg!#R6(Ho5oQ$Q#c~uD$aRK{1p_MgQ&4=Sa-!Pk`96 zS@q_AZ$pxnq$<}-st)rr{45d;D5L`4a4PB0v|@qL)|qO(`a4F{$UX8!k2xWU-BfX= zrH^sIK|uLBR48x&863P+n77jEuAp~+pJMLjP5!-&lBW#A+W6u9eLy|_cjjy!D2RL$ zlZah>xn|4MjA`mf2-7C}KsYF734wru;BJ_Un+77&v#dcg)Z==w`wE0+c-3jQ z^I!3(5FsErLToM6jGr43-OjSs-E(zo<1-;P%}UI`Jc9tu=@-8aYcZY}(LQRSk(glH z1!$%5(bBx$u(AZ1r^F2tgRnbFS@Ygls_?meEyWK1@7PfSbU=tayKh`KDfmpis3j(= zV4TA-WtdrhH1u4zjTvLCBq9d0su?Nn#tbZ0RX97;Cg7>j<*Y$EkJ!W6pCHr7(eses4X@qEpUE#C0cPF0bnd!_qeQU=!B7yMQS zAA~e<=(nv1t}mliv6<4T-?)pxE@x?%pjWV1y^Fkhhi{oyFF0-EI0zd)tla?R_Lu6~ zKxj(9^no>})Og+bHW+SJlwd5HAM)VEtJQyM%I;bR7bI(W*p}noQb7sM8S-Jzdj%$< zU^28WmQNn5G&qX1W=Gil_K3;E+1+|DEZFUId4_Wo5Ea zK_I^N<*px_2wc~_Os4&%;F6r@Q%*x~o-JYFqBF^$d_c)4E_NG*YAHVz=tSreRB$38 zbpZqvPWH!f)_rdeCzZ3ddgCm{)lsh7P(py(^2jJ^ZJH+#iVq?v(PAKf6Ed7OAV`0c z;C~#cEULJD-L}MBkLeKORe7b#*6si($PBuzk)G?+`sou8U(bIu4P!@gJ>Pqs)ANxn zeGp}-4&m94##GJ*8L7`@+3-Hj2g9w{bqQe8m-ZtG(HD@2bl z4FY%=@6WJ@!Opd$L&IS>c5(ON7)kH@F4A}hpgdKB*2O|MXqeUkS}qOEO<4TodhH{e z0qDna^kVfdk?ipE!9y?&eS_>@5epxOZ%g^FIp?U}20mfWG4#JN&G7G8m-Vthz;wdr zpD3O7wP6N*kE8>OBn55FpfN=rpBKx<_g88nj|oG(JarvrGAP;}A;fR=eVT(NiCcoq z;zSeQq^(!bq_b1Ta*Z0J?I@EmYZU1HxMXc>#gvw)6py+c;-%ns2w&y>Xw2mAj&<|m zqe=C{$?LLRv}UQ7%?TNqIV@{QHEuZfiBHa7q)&htVlsZhJc0>E-2yQpSXaN|i82s6 z33hfEiXkI2bR_@R?j-hsBp8p!=Vuq|ns1DdkM~ZAA&br2JnmYVS5vKuj2+ZaPEgm~ zzVFL<1*GrUIC)40SVD~cZSzyk!n_Y&$5BxnVq;Fn#HTtFC_~4OW@|iLJ=wc>Wxlcb z;$`@}nXR$GJ|HLY>o;FeB>(uB6*6x0WfTC!QYlbI{mnz*#B7hov>jvOBWr0LJdKu^ z99X{p7!psfAooY!x&E4+8~~SOTQAs;{iKI7pxvEt^=jyQ-FN?1{h{kn5iF#$BQtRX z`QQ(s8$hUegC6(76(ttL{1yl@PsFS+xv?0XIin*5>P0Yn9MTr~?M!ejNuR}x$28g8 z#Bt?pc#3IJ*LJ+71haZ!Y_-l7^xee&W{UT=xb ztW4D=gURd}!ATx!qc1x7Fn%(Dg|My7+#)1&k!zpiQ`3kK0TjjUB0292`d&k4!6B91 zYE-txph;2^dIhy|d)Pp@_r58hiQr^o(PRL ziqJ?GG7#*`5X#u|J}O2wdL({2-{2r4@)Yk}=nYV(le+z@0`e^q7|BND9;I)g!8nfx z^Eso+49zaj9L6P|XdzY@x}r7u@gunQB{Dh+#7ao$YB$@MkGIhS?0$rt$9+BDkYLXD zML7nrvxYXqKxnRe<}K24zIg8*qpn9yQGFivxwUEi%BYH!YHvoWz1ELc-G-gz{{BvtAB6+d*`IX=j(O_U84y`P!mx+jC4c^*ouqQH7;L& z7}<+2(&!r>7NkjLHH}T-u(}FcCEx7AqTFaP6yuO_BWDwsXe4{L`v!-u$-S- zGNaJ!<%zW+Vf$ww=k*+A*>{e|$b#FeKmK*z@^F#JXYqVj13m=nmI7tjiFid$Ymso%K!LAhcx%0mb)B5v z+(M;EQL%pOqAG`l`dNn@bDjuFb^6B8&IRAl7Eqy@Hg{0nc^6T3|B7Y0&uXsVe8zP% z_pFkrJq~MeZ*r9~PrGAVJfs_BG&Gdw+@K;R)4H1`VT%Q4M@x9WiPF;(3w_=jHB!lgEi+^7v zVp%S!&pa85P}ORtt^i8M3+DIb`|4!{zG6Xy!$iekL)6*7_|I z%7rVwfxg(emT$zMD?N`emaK-T12ozWw@38_o|jfn{`^18BOB`*8L=%7aSl%7)tars zD9B=Tq9m;|32F5G{MD266iE_!pOukuKYH>1#W}nYpXqapU#S@di<379p)Ur2p%2lW z#O>q(ED?HPpS4$J4)mMwMV{tpQtaG)Zg5m+>g91hsL7I2M4!?)N_WM_?7v*hVBm8- zWc5i_XNDHoQ#M?tXAyq9Z~jT=RFo6F>Ev;gmV7e1&0_ja;SAY_ImF}M>A5;YnwMdw zkxcQWq(2ddX#~Nf)6cw&8{9x-)eJrlIXs~V||I=fnI^O&eda~5I$ZGj^Bs9*l zM29`mlK1^bJh^z@_(UW6Kj!?uo*(%W#P9YY{BIn$7$!!ho~DfN((TNC&Dql9BiC)# zdgPBXQ9Lfa>2+Khq=a+Yn3U(RvdMj>W)Xl!9(TEw;iIv_UJ^&AO_81MntwZO;LXv1 zKM?ooKnCPoO2W?^1ND0Fy{CpgO;2c}pJ>b!T$dK>Jym~P#!9XZK4E@r_32dln+(}A z9*nf}pL|cEzilXxRX7lV7bn(y4wBcHVFXo=|AUM8#z~N6zP~!FV%qI(%do0f^#DAb zWEqr)wtZK%^|0jULX6TE`wM0PGG?%*`U$f{1&sErgV(=mC05@6@Xb6yCDJo8UpatJ z0vpA|wkUJU%jkaE`wQZW1|jOH3rAdBFWx$?`%oH0Hy!Q1u3Ai|Yu%+y>+QSzb}VJG zco&#!hla}-+mqor6%sa_KcZE1s4!OH=!)$f4qDG;u#K-+Gf1Q!w2P@TYi3g9KmV+a zk#1dAb71YTdA|KabAZ&L#2AP>^b1@HnkTsJY-n<5F3N4*=0piGWH4%5ww(S;py!Wm z<~IlA+l&1k2gCJXu_&v=Vm)XtNAm7_evT8^)#foJ2_O-sEC_oSy3ysiJcr1s7#`BXwuBBv`Lv|pE4VxkgqPR>A!S3tUN0ft8IV|Y+rXFaeqDVM0p1zp)G^ktK ze-w-%69x9#kUN*X)9n(qBA%_oLWz1JG|EJTs zoc3WazdsNSFE*AcYyimm4rKb?n@kqkZQv1`=Onm475!GE^EDRAYx!BNOrr}uNu^i| ztFIvEaOg8Emw=qW&WWWythL7ahdi$LS?K=4W$Ki&_JE%Iry~vIgfi`%l5~J9f<_2O8&PDXo8?Q$~ot;3z(qedhC(MH(R6>{^0l6@}A93OAo9%Fge3RUSvH zOaUV_8vMQBM#Dd;M-8s$A-O&|W!+y~VRgQdm&p)JgC5V1tCYW;=xr>~;Y_?G^%Mt& zRKowuU`dI|^+~)+o;P-1DL6Dr=p<9EM282dG-20-`y^g@tUyect+)UBG%-M|j@PnK zNxMqYnc$-BJpt<#<0owU`2d-d)lE%7&)Wo<$%x5Eh;&ff zC?9o%dnP@F$X)1Ax#+pi|C^@zHzzCn?B(*V{q{ahyCYO5fj+qz_o)`+GiSu&&uqT| zt-sK`{~YjRy=^B^OwtPZ9fJLzvl*EHubQdsV6f(&0b)g9?Qn1J6#e}m{*}i1>*sqn ztjzZiHygWBhX3cn|AVDL0IGKcn;Q3@(Z+#a^zeX{hojB!Y4V>N`S(?S!v@gCbmj`s zKmHp%@UMjqJeO)E_rJT?UzeYb^5tADEb!NNk`aJ|K|KY|3?BCpL$%3 znEvsS0O52cZl`6~QR~CIg&WfKVQd;A)$90w9&jOeboG|{K|p}SV8=yDu`h+HX#Yo( z=3%RA5_`VE(a9WTefM9N`o9O6h}-rMd#)=$)gzY1l_Z(lyEp>!(=D0TrV)?`_2v6x z75X2*S})<(z|8NvovbG;`8??V0vCY|IW{ttQzp@N!DOvlA#vklTl_!i{?#4y&Dg_#XXzpIGv`Xyc@@qs#0U z?;blWe}fwqxB`OLzoN{;8*4daqTLq%dw2htP@#%sgP$xzmm+&FmwQKrUMiLn8LI2m z$l~mbHGDeTUQ{S;J>;b%nkrG%E$xky`4wO3ixFswWAv)AUby*m)@=3RyQf)#H2}X6 za+-rQ_?ldf5`I}A+Eu3UfJrZ*dr^0PYcf^rN+ouK9?N8~Xnxv$4YyKmRY#&yVI=Vi z3{A?D@VGpn0Vxf{F%suM4^C%m_TB-zLZ$PZB3*8Jv}hoz%r9}^xTwJZyVlz`v(EeU z@wQOqUz9~EUHx|x`DGw=77L8^K-V& z>b0Fb5mN>|nBMi`fN7Zj0eZGKu8YfbqXfwBnFg@8TKd7}YpfFGKc5B3rwUR^HehZJ zv7LI)*OPZcJ3qg@_1wR*Z(9n(DSCI+5ha_)ST_J@98-*Oge2&z*E47oYxUk}SiR}T zNNWtB@A;c4|F36KK?xAID0M}sxN%clb+J#%6j{@q6@|{f++A;Fbgwb%*6zmwL_uE? zOU|A1W*J#c{V<(Ibv~f)7YOKSel=O4st3|PD%s-pmx1j`gs+%{AE-&|kH7@5Z}+C4 zWS*nJSYkfcej*$`O2cE^7yRa#>)+i>dUrn(3z?oM2hrmzK9-E|q)N9mt@-;&63q@^ z!{`;n?w7l-47|_dBM1c~d0h_a_$GDBO~P?cMku4C6X%-HPKc(8l_|v;Sj_U|65o`z z!g#Om!cAI|_W)Y0@?9Uj+i54dayp+~OZCBsvgNcDnb++S6+o|S>S7oSr3<_sZaxMg zF=J{vwJj34!%43I9myz-O(3Yidg4hWD$!QJy+7(jw#OTz>fOx}9l6L^=dWU0jAIQ}Tpd_A`-@dQcY=T*aRH{6 z$L@~VSMQwEFZSb`Co|ZJg~40O$*?K(Pr)abV!`s)B+c%rCt4&VS;)@OsW1d2S^sW@ zcSkK=x#&nx``N3|vVr=a*q=YO-@st|JSXjPl!!)5k$=AwsZjXJ**CnhKWNvZ2@yFm zH+P7k2z;FG+`AMUJ)?GG2W!XNIE1@5S!s`2PtHqMYCDb-UJfg9C6lzX9W5^Kyc+Fs za4bs5&CBqt)Y4(MV;pF?W7QcpYraj7NM%+g@q*N2?QZU7(g?U;Bw#N3OrK!|VsYL9 zvY)+G03O7DQO*dX2HmQO;4&I$QR2m9iS%!$fh`t+kDB@j7G= z7yeEJxIz49%Lpo89jMygzP@TUU-9e%@FP||T08l43S+5e)0_JVwqg7go8=F1rfwk_ z`D+;f728VgT?#&J{%QL?fg_dvZktks)-7$eCbIYHJS?8|-Ldfbo&1_EN&5x>g{XWS zd(rbdE?X|%)Lo^R&dv#i_L7(v90{JqnX{cR@caI zg17C7I0chYbEC8Kw}cl8*QNT6z0H?GWB~rc(R%-YfCu47Xt}Y(#$9Z+BUOFbgrTDP zMH;Iw$uP23pJCPr^w`dFLYmikdpUKncF=}ZxnmF+qkdrG)Zg6&=5=95DSU&vlX%Yy z?_5vbTeam$R%1CeabCDq9JciLyD!-fY{2ZW#|9KwF(`wi)#!$qye?f5XGJ-I4`EnY_St^~#>;xj- zFDOctb(7HoO3%u}-!)7H4o-Xp0s0BwMq`|sD;adaVKpT?S*X5#YBCV#agU!hfBeSt zFgY)W;l(y`dv05{;t}}yQv{LKvv*>TZdn~_$6_(ypr3i(A7=vmGuMt&m_P$P%2ftt zw8wP{>V_8CLVFBE%; zA%6b)60L^*0A{AUW6Fl}gek1KZ4`y{%)(=C%aHkWx9vz9<+&pWGH|8U~>& zdx`msSqUkZaYRVlbWJg)@&2+%A~i|H8wRceb;!xd7my9z9fR;`P#Pi`;lKr0g{_7_G`Lf3IvPm$**m*ngd$l(+b zqZ1yH)WewI0F51L0APC#8E#+WjJ>!|;UHXmD2HD352Uzq^c(ZUp3|H+UThB}f8E8= z&wFFj(5O~mz7U9avf7oAg2#oU#w{bQTk3BH431xWmZ*^PZd?Qbo22{MHu>Z8jr4J9 zg_M`^bn2`=LpygQN#EcVn*An-U{jS6j;DSVHoC|HEi^VL84Qb>y+ZW}q+8@H?49`0Yt&m5c%3)B1by-NWyqp}Hf1&c9x*^`mA z#V8I}y@r%6MrmZMEf@XLlmRmv3xX?8ew5n0aC=yAoj9#3hYDCFyh`9zr{gITEztB| zhHra6zB!m{^8hj{JD+jT@(Bw{KoX*qc)OOXz$)hGaWj`_zyL@(Z@s|Oe{i$Ek6f!T zzIAc8#*MU6?{m0bXmTBhD;Qrta9xZXeF!A=Hs7?Ft>o#33S(1S>6bxobTWVZQ?&;iotH^KRYMm4>HMjr^ce$a6E6VS=zSb-ja5U?N_%RVY7a(=fqgas z2+`l39(JZmyyMp3bmY=x@pLh^!C`Yk*^j0lkit(IF^x#tueK!2)-AQx_T75a`D6yr z;okrPPCs${&C(w}(6uCc8@ylh)O_&nwgO?2l=C&ZCdSi!X#vFc$B_e0if=Y`4YoNp zHlC;ZT5kd=}g#!I}MTESBJy0zas_h$91suqkY$3DPWUO_H@TaEVS#uW1EqlOlDKJ|#O%ZQW8vmxRgP)k7ZQ(_<-5!Sq^f@P8W?LaHnW(8e#MxNRS4U8L_0;M z1XC2lTG0=|O5WpYl$mwe{zav|4VOO9;dtyviA43&$i}u=JBpM3h;#7~69CG3kJl--@hM^7&$XU=yr`cq5bA_Otfz`)BHVyRH!{A$ zB`F*cAXxL}S~U`u{lU%osDp)3zzO4myL43o*ML8!fRQAmZi5AXbmr%7V2w-3+4iK| zq0jsJlvY%)?=u5AyQ;)%4ic!(7=on~xFiy_dg<(-QGPP2? z@Z*wfdFh7D=^L*N@2JiC zxNU@kq*fMtlTBqd_cu>#CBhSDQUqpIhOKU1CYT+o8`RIl(bU~|Jv4gFi3NKZrg9qR zmUxsUT331B^m#83`VD-1=Um9!7ahS-Gf0>O zma6cjVAt_YhMUGFmWxpAHeXcb+5%CX+U4bCWEWi^N-EV>c^ml+z*4e#DV&SyF1X6#yP zYAna!JdpIvD-YUTw3+>9R|Yfk?VIlh$oxIfmSuM5=C6hp_# z%ABqZxHjBk-{ihYAv+cqMqzxmL8`P!GTxez4ZUUU9C+GZ#oT(5{oVv6L!_l6pB|70 z-t`{iJJ_`K&O`XD7vP#x<))Nue>FnRTRe*{M)Fk%_IZmF5&L@@!Qu1|-k}?c20Sn&p6cKB@|qTE3D9YoY9>jZWRzblUxu!o~%vdc%+)^JUUZemYUB^yxn!-t)I?ICQN+li=PK*BM1%M%R z)?#2?P=10d1lcZU{AkqOMoENeJS=qi`OM?Mxoy$k4|PWrL$~%0+d|*fIC`3^fMD@b z+Dy4fg8|1qL!!~;z_L~1j_+iztaWyHg@M~)T{^9&H222yKozWeW%uZ;uM{e;TW8A< zKWXK#Kf_{C2Ay1KsDHmA>Z?fsUV7HQ)@;z~?dK+VcPw@b4c&PAkq+h?N7JozcJL~G zkDV@jm+gaj@EqNigUR{rt0LK~XLFwI$_G7(vwVW5)%|$4Ulvtn*)1l0_*%$l;-p-? zao|6$YDiB4^f(hS#v)W(RoP&%mtVd5X~Dbfn_{FQ{&d=ib4ZhmyQkUtb;kMH@G9wY znNM9#c|y~*S19)7??l-Z5OIP&q=;zkE*905#)S(_m=)5s)L;DA68mbfBp3t34?0%U z+<4L9zscjTgCIimo)X(GklPQB~A5VMBrJvMcmJ@y0TkZ1MDR1wP!qC+b z*Pmme?Z$~Gf3ECCXVJFOJ4)#w6<9%u$rK=|iTjDAbW=8eZ*zd;{9qeF_ZtVwemO3K zPRI#iT;VR-E48;l??d|QyKNVtuYkr&RrCfedV#w~qbmb*YK<5u!TD1WZX-QOC#%mw zddq?4PTQ;E`G?l%D_^A#d|9X;N-a*$%~0m9>SnZBphK;hYzLb^J`D>apM$-{zDk>C z!v^iE6dXk>3$f7NASwu^{_;b^*Agu#`Gv zx9=dsQ+8|L!(;i;ffop&mS|OYx#Mzad{K3-&Tcx}u;uPrt!6#B|228InBq~o=*yks zvrq7LWT(jauA0l%FP8=2wd3>DfEV!Heb7_*GG?{7z;d{z5{Ke8GL?4?Oy7o@!|GRwGh3EIotC7xnMtnfQ zT&TPfNty&IvJ6{1+VH4RKk4GoX?z_as&tm@T~K5jtM?t^?b-22mlnox;8bRm zU;<6NrdC<{0@XJmVP0qNMH~D4q=*Xo9kyVexDVmO@Bz3|9ULLv%pt9(@yOGDkI~+> z2Rwm~!BkJofUG__P(U%nk_c*aJz|WH!!K?Be9+ZI2D&(0(1qnW^q%id%1xCSlt~aq z1d>nFLiq-D>Vwu+Iv!3NyJ|X(K&UG_xcaB-Y$ID=!LsbuFIx*`kD1Qhw0(ZwShgnS zG^u4D9$npn0OctLDVKATPOpxNSF$*hW;p4o#~0qWX_sFPJDewOJ+s(t=cU7AlNbxH z2U9%%h-)N)jtk#&Tb^-0S*(`u^XGD_P6QfT$(|R+t8YsxM&0z7Yo3+E-DiAadw`8A>v>h$a9Os(LSfIbaxQ5*H}=oRbI?Z9mm-H4NjaG z3dEs?A`orhf|xJ9=h8TazvgKau#MBUI|A7xixU2BCcS#Hw2Zx%w7Dkqh1Uu~f`6%0 zL<`HKKp1|!5bprOk5=Tzx3AW^J|S~Pu;8bT1kJPf3*Cw*Fc@UPxEU$zT3|Y&cA#n#CJN}89j@|;S1+30{_JtgE@CU<8l-2MZyk154AE02 zPxuH9H02jXQ-`Be?&3eYL+QC&EyCixs|t7qj;$1NvK-A$p3opkmn^PXETg$|-0XeT zm)k4AHO3DlQi{gs+!Rx!wLLv= zC{u23baHE{8VWT(w~VlaZ67Ay|8_aP$$qmU2?jkP$E6@Y(>Rw>Jk5=ChPU4{;>K94 z?>mgSx?e{b1sQv_?SZRnsMW^WKlsn?vtaejJKfUWMXNto!{(#{O+wQ>p>>>qjH55Y{0LpI}3k%C( z#&tl=!byVcTr^Q}9;&gknetD&rIr%L;;~8vzO(`7v4Ed9nD?W`J$>~**QgW^`A@V4 zq)f)Xyt*(^ir70|={WiN%v{RzKJvNxq6|a)^!zSI(Sg^p%~fAshD>-uNlpHbDk)bg zl80$tC39K2oizW7LR~q zpOZeCu{t{ghM8=oo{0ig=cdAtVzi8Jj#$||n?)r_1Y(Qa0aQhWeX{GaU&nP2A3wGr zw+H%KrR;o9W^?9tq{ImdUww_jM9OX+9rs#uNdD8y46DiFtjA1{`ZlYP1+z!mw66Q} zSxb2YYFL~BoR>ap&Q#01Oud@}-@I>aJ}w*97??c}#oSJ~kNO+mfr$mz9j^gTE%~-5 z1D?}j{Oe>41B&Ly-P1^-L5oQ|&;f~$s7m1_O_(Z<2FbGXq{mgmo8eu7hSVj0hDx;G z>Gt}U%X#giB;Pp?E1OP{LVVMjoDv22>UxvxmtHX?ca0VGpqVrly)$_f$R@sP@h4vTP#RcPv?@+fjbH`RsfB%U>a< zn+zMoTl`;uDDxV}@K>;qSGe$41vcq6R)D5Shx z=|`hY6VRkJUp|>#ISiMy<7#y zGJ+o<<=IWS%&xB451d>nYYtYz9gHH{#ad9P6#6}_ zI&pk~UVQ^Vpp!98ePb_})LRR!v^Oz{Tnx$Wr^J>)Q1>L5o9PMnGjpZZ^dSz9y(ybl z+SvhmEe;aF{6Yl|iwTVC{S|z}f2>eNzy7=6{RFn39(}ib67J9~bV5IvDwR`BE%&$n z4Im;$t6N(PNFHnBlqTn#DzqEhfBOK(8)YPV7;g3H5eol>IogJWt{M#7D$*+Cm~SMV zQRL^HpDX{H^6{;wUUptN-ik=E2?FI6(u)9mqWsat5BLc_%)4%82 z|By-_5TAYgkfD&wl^EQUm17=a#k}WiHVtivTDbivz4J_D@pG&rFbl7keSak$OLXhJ zAi?v0%!9uM;3XqG7!Sj1r(1-V00K0Id9Z~exX}=GIS;>~ql{Vge zi>UiMr0HKcg-klYTD^RDc8c_mcR+T72nDP2+~`WrU;*7%0m0zX-h;Wiu7MpA?2GPt z-X|}O;Jv-QV}Tx`eV=)%z5pF;v6i0Hz4`4y{Y7qsCJI;LVvqxU%e-^|c8zWv*>=dRN5eSlDL z;#C?8a@l{h06bS6{|mkQYr=;3!1w8Ji$ho?I<9=i?IoTa$2}6vxcdC>HT5qd=U)e_ z8NV(zd7K%x^j#_)Z|K;({0ZuZYj;lD_I+iN$JmJR{@ zbFQ73RpQ@k&wtRke?L26R*JWH7-mB^+OT`q)Ry!3qN(0hD!KEY>CLzArlMLQ5%11s zj@Ca72SMhA3Y-GohEZSudR~byxNM~%5Qs_x$KpS|LBgyrqub|CKc+RU&-}y&5U0o2 zhO_bw?&m4&He=ENL(9;k_?dm-YF~I6(3$%MYcb&KI97a>PgXhE8fNMW!};{>n{co) z|35>5OVn>Kx#@LttM`|DgxlNOH`WWrWl@^Q>TDNcy>2mOfmYRL`gfdWrTk-=$rDyH zdfpj)iUz6N7KP}yMql`+cI~gP6PNZ~J+_93-AV_-E3L0(1@JxsB`RNUg8~D0*0_+N zuP3dhK20y;=T8fGZ5Hz{)dWwteF;4~xC}a(Zz|YInOt0(xVc>`XP+ zO?cPGKw+D@?!{g!5Pojg!_UPAtza^I4WN5FA(t&ZAfTVczk`YME**>FaSe;u?)J?AIDfAUe0!McuP9aTNc!RoR!hX zck~o!8a&)rv&Z#r=*Xu(N!dW=)}`d&5_Zumh|2(z^H^Z`EF{$^xFmyS zb8J+so0o70n~SP+ALKpPdd=hbE)nkDAYOd{Z?|E(Sf9phC_UcSu9fMinN1k?`>lE* zov&@@bPkpDf&Ju(gu~BODxmFbyw}1N?+5dGw^P&q6Q=^idmT3VsR~rS#(wzl2moiB zfxfKbyHh1aN6sBVPoE{OW=)Q>vb^JVbfR4f42S&pLP0X_m98s0q>ho##10fP>Ed>ox# zo${xIO4Ea-8$xe2Wh+1wQP&R@Cl<&<^Z^giq`mXiNvkc>6VVTs2Yi_U-@Rl*e!nuo7;+rLRe>I&o(Sj!CFijnv$q@05IJe z2oPD9yx%X*XtFvUns0P28b|I1yf4{Y#S9XwtQ|lZ$^EIThs-t*i+@Y}?9h^Q(P+ie z+YannJM9CzmI4$bzw(=RoOPt`*)#slvH`3dA&hWM?@>fbwMHpeFQcwi8dM~xcGXo@ zm!2`~f0g+p8S3%aIpny)xM$;dg|ZOTtImMSs|zwB;|C#o@oNS1-G*jF+}xlM-2EW) z8jNW0x~)ye21f>>A@?y=ZP5-xJy@Mj>gXowLJl>`-@;K*fMfK5EJXCX0L6|uQ1*K9 zj_P`yfvNNyXNl9&)s6l32>Guy{G{KBfiR_6n7>%3rm&yDZL(zB%Z2 zN7_as5|Fkb@a&Yc*1uAz=E{E*^SUAGGA4tpo}VRL)1g7IhSK=B#%y@!UU$v`l#?aq z6p)yZ+$CMWHnWm+#klxei$Z!4alcFXLl%&EIO)ki`|=r8_u>>=pZ=@N8d3_Vv$3Vw zLw2qwV|1$wdKdegKH7kk4@PUyQan+1S^kWCt51_sN{*ZgrjX97a5wqIX0E#0)hFkH zukje@a{lzIO9^4DcBlb1%>&uN1mP=H$>+ed=a#;KPKfb3?PqsZM4W8ym7z=>PHw4N z6gyD2f3m;9{8Y{L#cP$D|HHEL=63>h@!ag5B0#iI1GuMLM-qSn8eg-Y`UsFusuZ>) zdsJTN%6Qi}*r^N69-5+JV#e!%JG)@7_{S$+tKgDN>oSy%H;IgCHM$W|X(a?}ecaLi zWnKJx2O`sccC6|)Wj)$(3UH&lhXA-4HpJZEdX)awItYEICPd{hYxiV2Tc|pZsFXmA zQP{`FM<#|MHn*T>)_MiLtqUJYLQZ`nvf)(zZz}^slQM%u&{J!RHo|lAo`4?)EcFFw znPiDsd9@yww#450O)fTy!Q0-;ERLJ<7PHj_sw*HP>rT|}gthCnGe&jL5IX(hHV}^ns>Ee8`$a+n zS{kT^?*WYL`7IpLa&xGX{kk!t&t3|PNrl&~%a114GU4yvGuu}x7)cm(VKff@flVHw zP4V}$HCE^MZT=w7W$n;gF^um(vuL$Tj*2gkCmk4C zkGm`1w8jRV#>jtv0_ZDSU||}~l5&fml zf!EdwQw&89*uclV1^~sqE=I_#;ZYLysv)r8pCcUM6qP@2!xEjNjN;GEARi$Je}Wd0 zTF-=%2ykiRm%i6llZ7U^H9}2IG*0a|pbat8Z8<5677p45%5RRG6poPx zfQ-BJ{muEgSx^6zAPeUI7-IkaRTwK|e+o}gdfB zXgsUFMYt#H7xCmk8kTT190r|Mnhq8=iQxyBWwNZx2biNA3E=auEO)sA`KIv=Uo#YW zg%tKt5=T(`vdEJP%r1ksAWuM2*EMYb>l(N12{^i&ETWAKWAXhA7nH~_U(%-CaTH^v zNW>rM!b2jYZgQDi6X+u5^~7i&=*yked@x_(+yf8x0$rA4;){=N3?%N2IC!<4P!@WK zYH}a9%Z;TxJoD%PM{?zw$lj9Al97q3aggtTE+c$jf!QEdro-v_WmP5vxCboe(EVcc z&m~VV#rFpx{VA8h+Wy0TS<(M>@Rll?tUb#aA@l$ro*;m6ohP7oEtI?yC*2%{BeFmw zq%zE08Aq&5Fj7R^&m6H#Jf!y6$zK?k*#9`NqPx6hjnG_-yZFZo{$r4Ac)2;sCoNK| zrf2*s3c;L3!j@BP!^iV%rc-Uey!~FCYS!bWqrF^Ht+5gG-r~S+lX*CO9=nsr4qsdv zB^6~LXqpyvADCmxC02gJa?BBUGfW6BN@Z}=}@wBBP??tv5 z(A~Nw6kF^#thL@U#?q|D3*%6lc?uO zBAb7)nDv6wy1do49-}I(L3!}S^XFdzwb%8;OT{1%3Rh}z!+*NsXPHLJ(w+W}D9p#R zeph}<`mU=hF_cnHzGwk6l2Xr4-@afu^ixuY$csjYy%vK3O|K|iGuhbS(`z^}MRQm5 zgROEGerVZ&{V&O(I2D?ZWAeJ}$*S>24YkX62UY^z&ElH*fr-@ZjeJhq%W6cO-YiTv z_``CFQ{wd;{FvmMFrXhfFF)}$jl}~NH{>`Iw>buFEO|!AYKort@IZ@ytChg>7Uk6w z&}tOO%d9IpI@JkKiMvSsP>z0N2~~{t8Nd0#?xkC8o;7#i5BX22#qUWK{SxJhCbTJJ zD^<8f2=XBdo~ctTV2PKIzk4|zrHm1u4(m3I{2d`s^vS%BZ8$;OxuPHjlTynJG9waa zgPdg5hp>El4fIQR3qp%2!W1Pbq%$^vkR?*^zg+7t7i`ZU56e^ve?1D?lX6xpy$JE> z+KW;$E>hYpY}H1)#0+9aVGqI?BHtvN$5$^Myu};_6&ilDGGcuH*Z@CiwZTZC+>z;vf9|%Fg!rs$iDD9bTCyEy#4~yOW{;g0oJ)ole6v7 zlOevNq=7#&X8$R@lx|17n;?y?{Q~i6x+IPnqwo2zI8R70$j1}t;hVnf9Q6Ou_0>^Tt=rpzASrA>N~ujqDALkhQX(Y{0s_(v zo9^zC2I(#V>5vWq>28p2X^`*b+}}C(e#djh@DGMVVX^jF@0|0QPxv4HYGbo_;`0Kw zCHTS408&O)lqlJpgQ?<`QbiDCbo_%wMA9cwSfq{b;1Y^NCL$Z~Y~Yl5jJI~0)S6NV zA>=jN9ScwV#e*hk&LrmoK4r)F<+{SirJPOGiyX|4UP z8X*s?%;j=3_w}*n>E?*I@)tj(RHgh8P$7l8S5tfw>v&(+V?KBPzprh`Gl zgiFscq*YEMSX?eJ3R0GwP~Q+aaM|_qu}J=dhmSu%`@g~M^Z6KY1zG0s7C1BC5T&IA ziiEim4f0~WUNYM-Ht*W5$<3QD40B$rb%uvHEC8R|o3(pEyNkg+o8hAc9a& z?LL_N9b}5g15AmKuDCur5}_SI7$dz73K`GirQrh}Uk-1G`J*lmb^_l>A6GN%zAeZK znE;rX)=#~J4|=@=klU`1b@ph!zyq#oY3r|>tG?#jUm)>ybZFur8(yRh0K{65r|3ys zb_E0M$om2m=R(pxxuosFFh=3-Bxe1fkSFY~3zP~C_H(Jru01A^N?#$bF+Q?S@hXW>kK^;9Ec-Y!ikfD%=OX>cS6@nil-V7S(maa*B}W5a)9 z^1*5=((>jI^CViyM-Ol2wE6Kt2P*@LzD?#&HBGit%lyb#2#y%pkI{#ZqKnE$*6=~8 z`l^8F2FRJLusdfSMzY-=?^kuu`Y+u4*PH4E#DMYl7z*_EDi9{&;~He|c|qH8@KBc2 z$#W1$+|G40+aDkbznQ)3$09|9SQw7%TURh0F|Z0klKscu*sk*WKp#!TbH0SG6CkBy z0At0r)!Fb-Z8gwnhIcPQ!*iKSVR` zJzLc6|4L3%Zwd47bM7lVK%9OW9GAcj(jgXU;gC%j18fG+C$OAM(RGQ005-bUhd`Ok6rPr2nlnbmcLU_zY}cz(u1b8N<;ilrDu0p!+m#xV ziITc-LVu>tOvnV80J$m2(pSJspHS*}rc{uHRvKSsxKfNBd6;0B%A_EA(E#Gz8u#69 zE~ji55`ncLx`l}N5@a5z0zc3kCj5Q$i=w_m5s$)`S1)qbHK|{~AKq|^j_JYn=(QSD z&|Hzn*dBxn!S)kcscT3`NX!Q<0vcO0A-FMMq$>3nnf? zG?KF^#JjN#+lOE*Vby}o{pR=dw)?vVZe51R-g)n|n%!=Y=<k7mXjoo~fhDL)~uM=If zu?FD3^65f|S0BZ~h-lMXPZI_-kq;jtN`5h&%CLdYz_}E@C*m0idVjQ_n0NEQcX@KX zvg0UGb$a_|6d$SV$b$O*{wAP>{| zdRR^Ydk7MT3z@B!MF_!?RSSEWw19~F2eRod10B%@K@(aa7k;x}? zK;6y;x0q`IDijc!K5PwG!}^y$H!pQcRtU}RKR}N#o%(DvD2g1g(is{J_WT>>o}Qju zqa2i!l=5tbth%A#F_Hk(d0Xu+(!6^Qt?yH0O}dFx&{?zd z0v4?%IZdy+a9BA&;Wx^@QU;MrKG+d}ZxXvlPTc-H;A*_LZSNEy1zOHnSPnNCBYq1g z2iPKGO8|V~BkNUN7y2daf(2)Vx%??paI{S$zKaQg&=u=7dOG%eFe41)xq|X%Y|#oD zCdt86Uc00VD=@m<)T7y`2^ai-OTE8IqB71<$9sXL<0TK|X#kfF1`GBVeAR7L|1f|G zM7icMFWkR7Nfyx%Aol10A6dFP4hLERpl<>7n`W2m%=j{xI3-k8OSX>|9)it^^+T9$b74xq63b- zSSTUG*RM$#Z2D<&by8{byqRo%DgV0M0!);T9%nvnS6)kJz&#%hYsQaeV<4n^vev}m z!tDs=pb3bX|9WMRaD;}ZM6ENHg9G$Vps6sAT9i@Ga;hpYw?N$OR2VW7fBE}U`PW0z z{_Jk$3b}okDHc952zgk%e%aoEIWi&2!tf6-!=Lwy^4{J5GFT8pV1;)7;QF6E`(Ll* zpI>0(!vxQ zhxkz~xp~Zg|6D!6=gJ@uGVqA{b#erF@kI=gum1IJ2s8i*Cjn56fJtq8yf}DWsg~$p zzqLrXhjMVHh=tL@4ly=1Iaoeo6A+xYJlLPBr+%3vBcCIEA)v0I>EXXToGq0tmnjy- z@BXVs?dfm>O1VMS!vh}#?D&>Sqmfl0>4*dxguAU7{Jvk`KiqYJ*1LUk0BX-zp;G90 z3c}SJ(KT>*_q6(=QIU}e0B5VGKrC9l&=>lI{XJM6VdLRFz1t)K72xzthRr=WW-j}y z)7=f*Gsy)Xm62yp)+s3$5#evH&f-7^2-A7``hS?Fe?4b`Ngm2R!U4Wxh03@A9+%}t z!8y!;!dPwVrNsYp|5)Ck`ewNh5ch(0MOXrS#CTXUps6|=>w%WY`|SV{>;e69)dbk? zb+j+=TibH{n69tzc>b?`?rp+2 z1qaaZUljS)fE9Bv7TWe`J_WenW9yT5#_xzXJWR;$5DbNE$!y>Hm9B894<9~crK_MV zllt)c2no~p2&go>x$?dc4GM^(0l7F{NJ4o_Cp9{44|*b?-jC?j6wwlpFi9Q-gk?*{ z4j=0cBG>z=1UPd7-6o$H#inZz6yv)={sP^P~2#}*+u84t>m4nSwQ>ASz)KLl)+0M;z~-Dx#+ zR_M2bdp_O%4aR_)H=eNoKIv@GG(=hxeKDU?8w~6rqZ~+NlZr?JLC2ss0x0-1`A-6H z^(r6meB+Uq7){2DrZcf8Y*xFW;B&%9PTCu1wK+&Yh@yjoE&cO88(garhcw-rP!8n7 z?<%$;!XciMNP?I;9S0mfgjR3TW^dszg^=Aqh}m$ee|H74Cze(KGSprlLWyOLUXJOy z)B^)47<0s_0bmXsGefQy31CGKg)s=dX>THvzMBxw{JT-S7hh3$u1b6OzKJ?69CZGD z&0|RkqOVf*OqlunVaVkWJWW6k3?aZ*Li}+TMGV#|m3hk9lDPFCT1;5k{%lV(f!!kY zj+Wz$Fpbia+zxwfM7!Npw;heA8NTXt{?{JiCa8{;e$R;ah+D=g($&5@dHmxKZu^*#{FDE5bs#V0q&uvLrjv^UxBqQl?71ooO7f#pJ82PN z=>$GkC*mBJ(;ZdS8q4$-d0v+93#p+6#1t+B%jE4XQgJVOmfHL&c9CPlbh;ZK2x49! zhtdQxA$#9hL(Dcfj%YCv#wIEkt5NCq#te<6VWpyb8$!o@;sYcH4{SxOaFG-9T0?^R z84XQ3W4u%UH{tEiy6~T>iA@aWN7uC^ zbaBF7!#4@t>+p(DBYXKaLnLq#eO=`rYThe(gyd(hCARQzei=Z>cLvC|YH=PijZYEf zs8tl!GBGp|X%h0_&Q)=%kBn(F{1d0;?$C~?L2*Nz03o@CG?+MnfBDZkC+0`P}YWTK{V$H&Lfxv3vYb5$D+Qg&vlwO?p8 zP)6rolSMxb!PQ{t!l9OlZ4(dc4I{1xY9#~dpNq5(?%~P}y5+Ob$+l@Mr?I_iRjOY|StegfHx6x!{9$P~{7LY> zYL#)UkFT%jmOO}v0nDEQU=AmwT`HI9_lQAD158Cv!Du;ZW*`9O)6xCw#;7~$<@u8T z8ko#Ko6m8L6s3Uz{XCrKD1gNOZ=H0*78MK?7O6fa~wjG|u+lf&zeZa=9l zr{xyyJ$cjMiSVe90ZyMJ_JCED@{=u##NivCvoCv)crkbryAeG~*B26UD~>2M1dhbF zF0ns}XbWs(4n`AB+~6hI0w>XY+K42J6!9W5VeMAqtypFx*Dfoe>{%a%GV0c&W%+19 zA9Q&K3EeeAuE46!5;)~nzwi4f^Z2DHO)wsPlow(wnbX3ZhRyL}Ee`PpJpAMUf~~4F zn;B=78p)B-KU(dH0@fAz%B86x#N6Za1{i$5&h;EmrwmUT_^%JV!R}^?^>?i36jnq6Pj`S+}3Cb6+Nj5l;L!P!p7}Z z_}4_NJHhnQ0jLEn)F?jM+|L|tQz&SqUSi*l5|ZyS&Ek~^c@OrbXa-X;vMR!V#*3)0 zzm1A*P_thGs( zOSvXT-ns_*X?0pTPP)W!xpq=%0w0p$%J5FcFm z!4|OY=gyP@z~jw$>G!Zwy}H!{^kAd_aFPcEjm+DAk1}Ma;YM!z53L;*6jji$g-4gDZyW_ou8d>1E6v(n%_>RA^MBTL@Uy7dW-^R0Z1=N$T=rx$wp*NqBSU2Jz}}cEN4Z->m;#zTSE0#{^2Esak|B8Do# z_m>0`ojbxQ{9&oEKL&7FGe6)y#Q4upmG?DX*=ub zy;s8laS%!jLk3J9EHP#>h!gE!m zMSBg(x=g+|!*7O%(rBx=I&X%e+#wIo`~!RD;ubXn;r%yysQ-A6yS%yx6lwfMl8v!!=fVnl%VsWF^4&=Nn0m>1jIR z&=vK#9*iD5%6EB^MWSHUd`A9ln!#ee!FtuLHyBK^s6G-lCEUlq(>?`~vC4kk5E=$s z>i+Rydm^D^b!*Eh^8-KRg3mi?#SuO!^Vz-?5t&I;zI30KbFJ|T&Y|RfJ`Lg&!orujg*M!Irzn`cRK$8_2eLf zxU$D^Od{llNH2oE-}>N<567F&A}G!uv{$tbZR`Oo$?5td1mVF0$^0m)lovr|9Y8z1 z_2qotLZdYFdk?6^xi*bTC5l6vH^;h1l#6NA4OuipuN;pN>F&`HaX3hz%yG)%zdVfT zZmhVXEZeG3BY31lv^M@xo2r8sce+sIb6MLqC8ck#luBkHmvV==xn@+7yE_AI+n&hJ zGt0<``zDlTCt*?K?o{QzzTJ%}XgoxOk?U{PtrV}OYOFHx&LOSn>P;@ScWf7D3UYc; zKXk%M2y&^gEV|G;-~(<~9{upi%Z*Vyz`%(HYqeJZRkq^*yR}?>K+^%#mB+;6E=Q{p zn?srYc=Wug-mZUd7(Ebl#am?8;;o;o;ni>AnE%{!70`_)4;Eb z01)Uqz76h%?_*s!-#h@($6}OUJD&i|P@8=!v%Q2g=k-VH0w;QU zE37iq4`JNZUkHaDx=QOnMo5L!r$_TCd3JKn3k}7E-A~-k5>n^=jfW+b<#dABJBm@jDaf^3cO?ZodxqWo#sGmi7m(mGZJm)iq#HUd42; zZ^^%elV`cw@zjo}AWmA@6g75;f&+3v!}Bl;XFUw%Qaps_Nfkxs-~!&4CN|H1gXcU< z0IIB65%_t{-)Lm54~t^%)Ok17?MQ<A@wrOPtEMClxj}DEz~!U_!h)DcZC9f-ZU6!~U6(fZN3s|W zN_trP%%XdDFctvh@BFtL-^F0NC)+d1;JwRmCUaQ)Nyw6ImNd@Q zaBQeli+gqY-n^^IlF$9C(1ik1gZ*wI&yl4j;rCk^35SLHe4B||iH*iLgxm%(E*u+r zs-@Niu1uzRr|s4qh&GA5x`TRRuHI5!wR^s^(NJ&x=u+{}yD;L--m?9Mn~!&4!Dm)y zD_}Rryb&$CS};w$lX5m6$C^LVym%@-ho$#jg8DNJ7Q?JyLf`uwy$( z{J`o=a6LsGbolCGeWCc0?dIUdVT_1+wH^g$EdZ6c!{qj>^S-gHe2DPI`lQv0{zVY3 zwG8eT8s#q`4YSqZyzW%iCQZkCSMqg=w42|TXfIYrzH+&*4R@fOXfY!7b0h5Ss^=`6 z9U%Wh?T_$azy_dgureY%zE5ez`T%DN6^O1@L$I0tP|NXt((*$8wDb~H=-BOg(@@H! zeW}jQT`JA3bi=X@9uak8nU>V;XE~ltBCB~;Y4dfj$!9?&B44PfLie=m8#HhyWTO&l~mEPFG}+U9hgJ;sn2Ka5);f4mG#ges7o zwGGiS;kR)CM9$60O-`z1&4~=Cb&}xqbc18?HLHo={22h7ezZP~!*Y-aS z8`NtiF7=gO)o=S1!ELs&?rjIWVKFz3NqDt~O_p`vqzf@G7FTOp!#%4IUxl$6{BYVz z2shBoiz1ihfV;5GUeb5E%e^Dh75g2jK35Ch$J7f2{oA{DfhI3c`1K4h z2YIfNn;0|}>dcZFzBl@#ZZ;T`qRG|BwfSug321MR<(889KShUTBe%`h7*$ALxdnYH zdl;`%e2hQPH{E~LJ_o4DR|3DL>=;$vABzx#ERBS46~DVlEfH zdo0^sq$kHw%^#;l+dO+m%uj;!nXht5vtRE{19gNa#Q)<9Ez0BDb4@f;2-1?{S#ybI zrSfAga>J>Qp-)!BIkALtNL+3&H!PYy;!52P&xh4C$vIBfeg^(aBN;j%@o=?ziodUD zbENW&v;ksBGbk<|JTM!k*MIRW=;65?Y>U=)voyPJH3OTv*nYVk<-&f4K(*F1{{`1d zSg>7k#R{-hGa|WJfN8R~CE<{`$`Q7v9X@`1<hdanNYRS^EOoKnO>^33K)ME74%;IKF(#LOdH?C1Ddzn`x8<|z73 z;ae5>4`2m=U3z?gc1oq``@t`pN0^}>0>0##rXBgPr0Scgi~H>S=#OR}W`yk$b6SgN zt2ew4ye?IDAPx#b139u26V+zn2jt#yv(d6VC5_jp_!z@x=O74&Z+8v26>1#;gQL3P zXC-M$I$(ziC?tTpo*2cX_W)>2vPg6r9;JjXG*Pr>mH?Z}C=gY0=Ti*)-LxF$teX2n zg7;<(l2>d%+Im&4{_zhPJDdjmaAIz}{24TcYgv%jS+IZqVfm7L^=o2Y*|Dpo9-2i< z4V36ZBbmqS<3i?OTG{Or@)!WV@iDXtYTDdcLYX>0*aTzu64{JD>?^?fTNgu7wca{k z6~ocZ^1f1z`J%PX^jkH!!}bMtB4 ze0T0FMN6#9H7GofSY5IzW3rPiWg&i}ZX%O-Ply#)J9} z>bzs0tIFfAPd4ZW&o;TL3n|VEt}c4V%9~l~IP8fa7Xgck@sZ385vUSbXRKpu$Jpeg zmTdG7{zw9?w6)%kV+!PHpUQ>(F(p6{?)!;U-XHQssZt;5(z{4GtrcG<^Uyk4uYYVF zbH;d^0XsxOFq~^4{X2sA$C|k3E{p@djDt0H5mpp#P%Q}{b(k6VeR`;Th7$4 z*@~;+I@jS<+Rr+|Wf&}?a={i|VdE?6RW8ywc|pB~1Idvj3@78Y+i#Sr(+c9%mztEx zg)$bLu85TmRzAOQ(n-@G{>11<`+B%oTvpTUq8RR`SW|-}(=vcC*jWJMt2u*htbSiy z9~AfLOo5rRrpRht!E*4M^{_!(OHO6e$94Ir!@x5!3P`Xvj8b6@jg1ALD;!6%Wpf$o zSC&na*-P}5F7AU3NxHWGARlQ)aIMSH5Sb<+VFrEs3~;+E$e=W`EyuAanmQU1R~p+j z^5O{xMxm9fZ@gHoU$-9o%J}E)xJN9=$|+{W-Z*DbLPu;RM#l|}%!Fw9y=|+4vuH{` z?uU}j6^xls#?jp^DP-mzDp|-Kz%TVND$4l9Bel)mxo7kFO~%u!zjdq>wQ)6fBa72? zP*S|FGXS~zam}Y3?3yz-X9Lg+AoU%ulRD^>sUx>3&k)rg?Z$$a{RYAnQALDdkHem@ zTzWd&uvh1r(QwE&Yw^-QUT#WaH^~uIa9H78S~hnvoi6KgSa6d9ThLrz4VXkmB2x+f z#HK1QTk9Z*rGU2ixsW7f@91H5hMgf>^R!kx%7CJN`a~dWrnKSvdoTRQbMY45jjl9d zJ#e&$!u1+<)ase$wy#HG5#Rr%6RbSK3~~*dSi^05zKucLr!jFe1bIZ(?c~7P|~E!u~*I)8L>fFn>LxxyzrkjE_(wCX>Xe ziH6T@w*xGgPbITXH-e%Gc2MG81&dfQ+t0!hUh4VvX_)|uTJYy?IYPyauRI~hn|NO- z%)gB}Q<}eBDWZ&UL5@+#r#^i)cv$a1VO_F}MgY^=Ka?*SUKNgV_!~VDa8PK7E?Wy* zcsZy%;K^~CpX^sY#MeZLeSy@j&^98=Py6uks6%aH6=P7}Yf~~$%2)UHZ|-bg=G|{y zJ*#&O(_EiwyPm`**QJ@T|jL@-$Alpyh_W02NK%2`P~4d$@#SyD!=n`EUEb%?W}#V9ln4)5Y!8B; z-4x*I0cFQEr-`YA^Yo5J)x6!df1Ps7m#r0F^@YptJsOXhOz)GBkcv^==Kdq;{Dn(j}+ z!kw|Q0I;2Mhiyp@*cY#XAM6xT>kj*_RHC8U8$(M0l#UY@H$~BSFEmtaS~+a?!&=oc z#X>3Fe_g{fSST;2*uAz1S(aTyzrPMpnuMn}J1Bayq(L}>qZ;-0#dM~&U3^HW(o0Ro zV+GlTZ4_CLcflIE{OqeB(0Q>*DP}tub_U-7uH;@_`ETqd7e9}yW@{INq*>&GygC|SPi-_?Q$GJ>Z^ZZjVmh1^*yLkpsZnjNX@c+EaSkBK zskUeeyBc@+@}G-ey`up8qWUH|bdBxrAoD94fZ|Hi9BIS>nwuOBqfYffmiLBXI|r;u zkn*$B*pk@LLdaK6>%*@K59j@WZEO@+ux?t8J1n?PWb(L5RcqT%V_pgT5^%7;IMVkN zhc;XvTrUSR^a2%kRTpK$c^T=B`}a?R(-hb?y8C|u=45z@&b44s*Z|nla?wp~THHdYa0gP~5X$2h)kiTZIm zDa1m)U!Vc^GmBQ;yg5;tx5N70*uiQaL<4ojszDi zP?D(qx&+->_!V0NLR)L=T}=Hnhi9MYpI!hqn9c$mB~#q3(P%(XcctH-s5G1_TiW)q z_;?_msm<*cz~wlwx2?PY&{)CEL|pAN4Vu9wOuV;X@nnsx1~s^3w`& zEL#Ya@o@Ea`gaDcFaq*mpX8Z2IQ2-du`@*V+P+y~Y;#^N!ofxURyqScJ+_;_V7Kkn zir;TFmWdzPAp~(IR?`Lrua6@%4 z#JQbMi*B$*xL}q-pMHMq@GGCXzX#Wv&fFp5SXWlR|L`FXFSvhuuZXnt z@BNNPDFk*<`dWL6@azwkQegZ&1V-2Tk z;BI|~`6i*u0Hooz3t%32fJySawDB|%<6^Gay?M{$K--5n(-O?4JGeSy$O>N)Pe_Wo z4s?OpQEtggQv_icLn}zb^H}YUq_}jNt~9D*G#WVRQ2A9>qMvgVQ@)8=DunD^vIG9T zJYag$d8;Al@{VQp4+0`3E_`0u>}P!`S8imh-O>DL z2aA4f$EOeSH2lIaHO^Ydh{zpCMR)^Dx~&4pDZsXRJoC<4d7DcSU(1K)5x}%dVT?8+ z_^U5OaM!Bmouq=zA@ixA6d`pw+`-!S0IVI};02i|dRzHHPIGS3iJmVUnv-$KjII@z zS82)yd;b<#djblTUL(;afmRH`x=JLcJay`S=l1{1As*p?3b3KafLskMl0d}CCoU$s zL(mgxy8(Hkpy)EN+Yv>b3++!4`e1W%@LmfhBE))E#q1mcT#y;Wpg06RB8sSD>Da4G zB6jbQ-={u49dCAbM?po6y2G3qKUjJ|-{ad~{xkwr+!t@;d#+lQaos(%PPJn2t>u9; zERZsX@B{%?@+fY`@mgPQI1)O+fdIb){=p?#HMuFE${($S^YeWt9;=YMa0KE_W}Q}q zT~VW5AIlTqWMUjHjZl24$V?x9e<^%D(om<4p1*ieK09>dx;O72A{mDq zG*$wD*5^tEih*N9L|`os;YT2wC40nr$KpT<_g_(_7E*ye;zZSqN4P-6S`O~*K-L!x zJ&y=MZqjPGH|W4`Ccm7>TZW;oS)VH(p6&Er?oyFwB7@cG-1f5$?T4 zu->kl?8}i!;u+8}N=f<+Oo(^`dbv6%DUe*R9N{zU8W_9;Zz3#Opd`eos!Q1gBwg*1 zF28j_@l4&Lt-H~6#uSUOsVY%Aw;AigFA=vRh4@8QV0qDyFv66VPXN`CM^soFhEVC{^6b*K;qf{;_kd4azGn}Y5 zuI0(f(Oq{4Sw|oej@Nru*LmSr`#+=hf1djqC}h7?2h2HK(V-e32TsAN-ThfB;%YMs^+M;v$9JMRDgZ_$5g(jwsc z(|aK(cXndRufMYC1R_n`A+8*ss9*y+c zp4Zl=JA~y^t-BLEoCkpJlp=W#80Li7y6vgfTCMm=C-W#_0xC`TqwX|W(upED%>$3` z=lyx*)~g$GjyY`#@?SIv6F7{5k~tjZ&yx7`KKjhoQ!||Gya1%u7)4rzzzRp6sV7(e zS5$z(CeSs!2CW=!bUM-h2B1brBP$jtO2EUzNA=8g?wUC%afHkiKP{dz8Owi09Y(60 z>XG|1v{t=UEH6dvIq{R|wMR$Sq^&U&C5^vpw9?TeFD%Nxw%cK(rI2()LB0J9;b4PPGK ztNR>^kc=I$<@)iZncC(=Y1&-?%O$YM1Y{8_E?EsNAC-VXA@J?Tnq%ECpDPu;&U-&_ zyohh=*ykg$>6d;PS^s|W{>nFBX=Bq)4a_k1T{us2-$Gx>T7$E!5B+aYq|I1l4v&aF z>53f1pyJ$8f^^x7UO$vQ6g)*X#m{(_{d^h9JMK@}{K~0Y33Y|Y>>ebo(qNzMEAKpP zj!wSOV!0ZKXnX2g=$|-(;69*9EAijG>zTCVhqs~L;);Imw+B-!w|in^{7hB#m;3zJ)w<(Fs%s!wA`bLG)H&)$J&p1l z(x2j}z#J%FXs)SRVVyk>=hOBe^zr5jxEH;<>eV~k^^V7W=T}!!bw6(d15Z0?Fdzl9 z?u#==YheAg&~#IG2IyNUVL~NBldJg)QVVXS63xomaWmEXA7n*c4nVkCdicWa0RgVtz+_+C0(#{)(11jrGrPsYhI_H5!9(LAyD<6-i@8E zDS>nYole&D;$cL9MJG}RFMC-LZh;-Jdu2MBCwJ+}Wj@ORK%|_XyE7Ao>{~roMZlQ? z`|)GC3WI+6+m~~0?`}+eTPgJ~k#1KmhI=eI%cW4vXPmyN&MG#3*W4$nI%^7A^ zQxS3=Mtdidh^!B*_B(`LzKPaDy%lIU?tk9sL}jrwuE0f`0w*W@!rPDNK*LMtk% zUyKrwTzih4Y;7?{W>K6L*T|B7RB04AEuHsSk>q zVOKR`4oci}#N|Ie?!eu~dZ&lF+@+;JBP%tQhB1Um#LaS@L>jkGaqWy&J{3prigB{= zatkY#c7d`-iATCMbMLWHfsb37Isqb7j;Rq^ z@J)N6kJ*)$W}~yL$BcnGf~F%V?t6Vtv53zJ$O&+GFxakfYq-y|m__sDg&TX6%>d^! zL2K+c{EV3y@i5ej77l(9Y_s^-F!9&7MKttC;6n4I+F)*3H*WS+nl@b9+ux*0Y4Q zpvppGTEI+@4R8@-*M>PmHDtClkWpXGbsD>9>bi7rxn25X2*TbSh(rAtO-I|vregmt z;tLG?-2kMwiB54uMfPk6sQ`1c1Yn(H0f0lS3{bvOTi}_ZppzVTo~@4R=$^9+tZTNK z>uB!tOB+oDJ+n{fW4Cf76f19$JT6u=y<{8a!yCP^D2(5C?^{kKV^Jf0DXh}a@M|YR zT51{BzYUX4`1wVRt6)>l_NNt3S+M{Dze&NX`lSW*K_r4(O`LLyb#`GDpN~cH(5vbW z@uGp}G(7NB&-7mf+sLo&4_faJd^+jKXSR?|rVQRcHoa+(R%O@LR$M(+cvOS}RlDYf zN?xlvy%%uh$5;<@U^v&$IzdrP8OU`Hb0V_V>b%jfm< z*3JC%hZ`-1Ik%|E8&-I|u58~qS}Zz&Xyp|UBi!L~KOB49O$c!U1Vjemi#~+LB`E9^}8t8~0nUA++VM@OQcXb!9-u;zIoEdPH`Z zrSI`jO820cM9Ok z9UTugC;GQ0Zr!&!DPS80T5E~duRqEyxKRT&u2Qq#Shiw*R8(Ts;|d2(FiIS9U^GXx z^hkaJfV`Nr=R5zTVX+fTV__GUhGfSd3CymSwtD5tKslmX_RbS^KUfAkzzQ6vOdov? zn&qSh?E?J!KGL%s9&`;ADl=5LD$++7a%nw%XUMC`gQAPt+fj@5><55MyFpZj;x`B0 z9f5<*=`p(jb0zKUKXA%i=O*L!i|Hm;=V&Ca2wX<9`QTA~xLKTJo3QQ2Y(O@fHm;SE z5q7(*o0%oev|vMrNum&3T)D_^>&4^~frw^Y+5dpiyBoy-kOpE$M-`)i0U2M;JV30* zV^aL3I-6K^Z8f%sVs`|1QoTR3mBURl!_epL8-Ny#teP236G zvN8Lc(y%1WI_6Hj21??vVSgNlw-o?PhQTJ(^A$^;m=F+67iY9$O*s0Az%Z55?fu@N zQjnORTAURbMq($(zxM5_(IuBa%A~Z?tE3%tq|BmKpdWdKd*vXq*=A>dQKlQLL{G<3 zk+(Bb;ONSl4I`|wAJJ<`ytkJ7wDw5Pm?Z~pWvQz&TQ_HCf;nub?*-{ei=rtJb(2aC z(R3&h)gVl|UyL&kY0J{K*^q}oM~zBkT)5tbxy;OWOk34#J*ua7c<pZ0sTsFY!^pqX=rlN0avU-K1EHTf|8i9uC)dfZ9&7sHqDjk`Fa7Kv!HCyFSX zGd0LlC@?PWGl}p_`@8wD;g$ef+7PQ#iUGMwQr}F{A@P?8dhbVcT$GI?N?i9IQ;+we zG|L`bJ*9iR9*2|F)xvao36N(b?0ai}} z#QNOHD~X^~;W8~5Sp_jJjOP>%R5zvw+xJ6s4!Z0nAN(}zmE*<#ndoVQCXO%$bH?Iqd7hLK8(4IL(DywcBEZqY6Y%Kth?_7aw><6{vgUIRT1`{09@VQJ|&0fafwpR1k<<9Tv2oi`kU zq6NEq&5zj%w!LV*I|71^f7G8*JcP$n5KMgK&l`A^E$`dVuitf_mBgp@VYhOfQ1+)O z%j4Nh%tjnVj)tqIba}~K8j449aGyT&ed9kHjb=VvN5Lk=XKwVZAAf*OG0*#o%*X4z(U2Y|IwEvNup#?d3wW3gm+fj=2HllAjt?Dm55Q zM*LUxk7i=>FFmgODfeVpFAiF+28*V^rYoFTe{25-0uk)y-pYD!ee&_Z?uuJYaM*-) zD>`MwAc7=bV03%CNWMzBZfe-iadivd{c&)+gbyvP1cbJ=(QkBge!pMEl5Z)Uu^Mw%HRTMKNssH zcEGPJ77Gt5AT;TJFCgv#W0$x`y!T35E>dsfK`UDSL!7GvX1Jv&sTWw%g1#V;g8!(S zV$17YT+whx;v(}>-S+5ZjTHVdIo!lOiwq7MBG<3NH{@bm?9ZW9yc9w+A?c6vsebcw5R6Nqq_JJ3?z16*pq)vv( z$IwaqX8yapE91Mf_DhawAqU*(~Ru>1MUbet}MS>HO>qC=Q@)$xD-;CMZqdO=1 z=GI*_`{w3jN=ktc3c|Q_)g!AwDad|<@ck2$FNRWeM3dMSA}Z~fzo>pvKe;4ZQ^^@M z-aOj=;_Pa#UCG0*B$@YJu2HpaNu|ol4~rXvS}e&w?n_+uhbPcvI=y1doG-g7*%uWA zNV){56#MoZ&0U?_Y}c$OBPt_QEjcE{L2-5{zJzqBWR=Z4qV^?2d=mDq402lZE4G(c zKSayr40nn_9ZziC#2aoC0450Kuyr1eI1x*6yDFey4yN`ClNb1LLM zllAz-#LI!uFKS(|lwL=ecFsF#*1eApR{+)AsJAxVYV z+K1M3@tYj&1O*FnSB{1`d>Z0OOyO4UKi}mLF6b*Ya62&yC!T(HQ_B=e4jU6#A+Qs> zvTPdXcsR1lobBcu;!DGsp?5D`?U*%xTqKv)B-WIaa+9NqoX5xGEmzpc2~Y4_xi7Du z1bA}|C@~7sXme{`wX)$^nd6PwU{&|E(HW45Y>S7EjKvk8?u;dF#mrDmt>7HK3*hr@ zF0YJ{&Yx4u?J!nLHbI6MZyBXL%l|-7m@K6{^rfW4VX zcwD&K0vb}HLbkDE5mJQ+hjrr#J9+|Phhr#`!+w4-A~Kbwe0^jtf0W@YkUt z1AZyPN*u#ByxXF}jACmiT#^jEWHhnjEm2nh45bE6)i9c_inr_ zVLzEEOz)9beqyr4Ilfb*C?PRyqbb_K<+Qh_7997sWLue0jN|EEZjHg>wX9vh3S@C+ ztIBj@ddHbi18wnftx@DW{zzt4)8+1L(Tq`-?ulBKkgkb-cI({7j?@(@dQt68VmZ&l zPwQ?({0wldPxlBpUTTpof{n!&S9J{+OJW}roQBo%^!y_sSIfp0w5ZPM(<3&!jH2A7eD}M^Ppc3=FsT%^FiPn+vzA9<2Z9ym;}} zv5`b*63up@f18&o=BhxSbmxq8cxei;S^w|~7p;@oN0v<2kT9oX{7vNUg`wYC9^x{@ zr9-E=Ckb;I+WTTd=YZG&^;g6F=MZ2xtMkBq>T^1Gy2ZpPdPUIZ8(fE^5|5DR`!#jv$8!mI>Svxnhk%yj-s*+PPeWGL2>kT#62}ss>XEhrzx)t>4i#R(EbntKo9!fv zCM=ca{uFLy^X40n?9;~iK^r1~PQ+!Kht-toyvM2V@woYp^RU{bxW>NVGJ+g7a9txP zbnf*jXa(wWwOcJFaJ}SD8gg&-Q6uEGNsUWASZB380Udwu*$SL^ZQ(KihK z+g+Xrx=cd@^=!^)!AGK+-oEe9u`ER27^=P`UH*TBy#-WN;kN!wv*`^=N+aFf8O6GA9U15EgaZMGx0kSDEEV%)egCfNYU&5eOQnLeUny$UN@gU0? zo9lpy?G`aN@wkUvBVE&)D~3c)`;OMqpc~X0i8y)NkRw}Lc1dwjAIJIJ{uV33N~0h0 zw3FxVgJ)d3wtf)sK2Xh;vYN)ESwg8UC$&fS?uK^>$`0Mf03k#)=JXejmFL=_yzCw* z{Gx=qPdYS~q*;CnLVgELe|(HBB5ov0`!Xx}>NS<5 z`pEn5db~XZK1k}WATxZ@-(P72-kchcX9ydP1zjBu>isxG)p z@+e2^0)L>oG4|(!cB=MOzV34Nui?}=lu?RL3Zvbq@j)Xd{##K<{KNnUmY5-(zU(+yw4IO4VxTlndC#h1K~;hui5&Ls29$Cum%G+#_@=?3 zUSI&U1{SBVj4OHQT6-73g+$s@7`D&sqA7{tgN* zK{K!OcY-z{67}YD&Ij3YoWBa+Fyk*56e}#0NWf<0_T18Ec=#+Ae`l42LAx2E<}6M&nG;dOigKRW`l0+y!w^jbZpp=I<0s z$Yv|DoiZd{NSM(kJIB(ed*<*5wJi3RhE%v~6spX`#|^0&X(t_~?fQh3ZhnjydhciT zncM$_BIyqtu8~O=PRi9T{i9n%B@_B}p7%wLS5F2W-e;U@UmcJ@F{OmGrTFR=+jZi_ zT6v}enIR1@+bw1V6Z5gi8^x*1DmJMkQ;!75%=e=1lkp4Rryi7n1x(uS6jb59J@4Rw z66c7s>j`~%1kK8u0i?zperZRcI3G46B!1QvNXQygPpTe#S#1 zX&ZC*&Is|^(=D=r>a4E9*&_E~KHoP<>xv}}smNT1^W@d6+DY72-Ejj`%qb#|aJt7Gw-_E=VI|9$ORzI8Hl3=+R3WAI z^_mJPQ;LW)ByzdBDKqtqQ2)-2g@B>ndu89)o&(Qh}N7ooOrvzlo*J$Ib0RGg3(o2K$$|C%%DhCTR2 zpP+$jY8i|OY|DepEeo=vF}ct8?=X15ZS?^KMc{>BQJ+(+(W95mqmk1r!86#!X2Dh< z|MEStHY`;AZQLtOim}%Y56Ejnp`XEa?pr~tLFT#806fTU@f3L||9){*_}mi^*DSgK zd})*7enpmH^Oz8KfIelLXRAuFN@G20fE}_LMX^V~)V58fmzvI;Fo2;B02H~WXWP#L zm7grinrXmJ6y(4m{Pmx*uue=DcEm@ZuqUvF8BM5D^Y%_Wiu0s{%(sj^@bOl7n9Z6! zl}U_vLgU1_^ydBkVo>E+ZN8a@(JA)m(=0TY*Bn9^Qlq;S! zM^)zJwHxi?w40mx6uIeyGBrr$tM(GT1p(m|vi8|MVwKH!B=WN3$i_1h_@p=#JMCwA zW=b0R`y0_(Z*mfY*yG8wvEEP{<;cruJZ4_rsmY!jyK?3(Tk9myn_fhGGs544&(ON1 zwhsNC6~b`BPimrw3!Mdqp0BK3ErC%{oi1Y74RnT^ijz}5@%5{W0n{MrpXL?QS;2A} zIMtR<`4^eWrJ)42>@;}ljOJQW7fEOi4NAb zj~6tz^O?!ga?^~*J|0)3!!=wXb}4N}C_9sY7SyQ=U{XtclZx0tT|rK3sVSEp`rov! zTu_4&euHG%2&sh2+;SN9YmBN=X%ntfaGk8Al?R1qD&E%PiPoj@%HPw5QH0v2?3Z$I zOc-6Xl(i~cH2ChrG4WQ+q7;Xbzg!h~6+Sjx!$4Z39gy$^_qAiPFKsRdsme86``c^+ zB!S4M&GCJk;wzv1hoxqUZss3z?g^tcpY5GQ)|`KM=)VY8DtAMTG$i=&a!P=XN|-gv zSQ+EeR(n9tugYR-C%(m>e9TBizt98i;CwnN&H!x{x z=_h!UQs|OP28)!dgzGQgd`Z@x-9W9=v(_f1&bxlcJN;QyJID5irEY03H>O&9Gx|b= zTZ5j~hu8bUL`RZ}a#Gx1X?|XNYLn@wUGX}F>rEJEUNn5;R$nxqF#gb(x}cV{6SK!} z`|(X~XNXPwIu1efaCnttq-`yW1d-$TeM2S4sWkBX6_I%}JW~N~QQ{s6*>UJf4Mg+P z&tLL{A^V2u|CrTw7SgY$ihuhHAdQyY$n}2}go~Wub4Db`M`$hhPP^FxGn7a+5qqwh z@%9`4`?9XYQhiUv(w_WR@F>Z}2yB=5Dp3G=n(X}dfY7G7@n9_DHU)9V_z=<}LJI84 zP?F(8L$Hl^K8lR^qQlw<{6qpf$z00^zwZV>mCVzsjrGEwLE1iZ&C-i`)0)bP@v6iY z>r3q15+TZkt3+CLvI>4@MB|-bYeNJbDN8|QPpeh|Y^D&e1)j2q>!--6`PElvg+-;` z0DjCXG?nl$e}4o{M^csS_9Y~!ol(P=e4hbxAJe(itcn(#qGO+`fS(BAg%$}3q;3rggS!^VFcQ007UP|orIK>oHnE!Ks26QXF8k9=cm?2Ans=4jXs{cYcCV=_02Tm#B~Q*(gbg z$m1mzd;CZe{=F44VrnNO2^=wn6iS!Pym_lVz!n?`}EK{)(sG3q}t$$}o)*}<6MJgcXSfjI8uP4!H z43?=hHn{vY*Xx@Ts52`Re=gyZ0K7pfH zI%MQvqq;e$LT8hY$hR!zjqFKKwtvE>59F5_UWS*Uk?!LW1%)fI6WoOXAukWBb!G1l zE37mBp1!!F`xi63+B68noFBzZHW}D$$P!+(UuO*Ixmmi0OuvcqxNfV_)BmODXN@&kvf_)%BvxdE4z$*)3eqEgV6sN48<|;rXxD z#ja~S;`%HI^`EyddAV|>Hm(j1I?-!joLWx$_AkN46r&#a;b(RV6^k|)=nncdyKis1 zIEmb|I5#6jugr#ZJ)K!ly#VU4Bh`DaQ|0 z;e`L%F$S*`^vDRc(1nr2sFBSIm)9|vayHVW4v?}(&Ff>)`Z=D=SABfw~Ay9J(upp?(lfUE}6(GOG|LmTACez zn41$s>zj^#Jc-83Q7KEt9q%27rbrfHWwP5c7)ju&?BiZy*FdIjuJ)Wt2GelyT=_DO z@%E~1cVXp~dNsAu53`VUu&ykgY;!xr=u4UB?rm6R&r+g}`5?t*q4^SDFPmdhWFFy2 zCWEjZYwBEpiIGK6p6qpv`$K+|T3BHIq|qmdZ@KC|3BQXc(%e1_gpa5m4xmm=J?ms@ z$>b0#@Aq3ijz|_w#kx&czGTe`=W#ijP8W7u*U*+DyW#)%owT6Ru;tEH#e&xDbG|AToUD^RxkwLJ zppv(CR*7#ft^dS$Y^KT24J~g-$CDy_aWtE*^K#vRz~W~th%$|hlO6RYC)IF6ji zMp$6=;P-qLc3y-Z;WU?Cv00jJ;!-Q*kyE)EYrwz<&C#+1qPa6Z3Bq0{64az?=!a-t zH+;neXNZF_NaoX|3Zz~*MptLiD_2A|u#UD)aSD6Q7$sAb;R@O+_)W14zgfUL-Y;nW zGtw@UnnzZ1Xb`*_6@S#L^J#qrA^^JUrKUv{OEvcC$0ZG`ABvDu9I~F5HWb~!dI|At z*eWIeEx?YD>2Rd%aQaPv8m?8$l*IayrR6SjGt>BCFm=5;^r^Ewocc99WS9ahW{K zE=?42o8n#w*GWbYr0+{F6yiln?nmLq&8E**nz=0-7cW`VScWg`qccyHivqOd zO$v-J=X2gMJ7z{AQ-??MCBGp07PVVuh#@$&Cgl0D^EtWM$_N(s1`mz;U?msp=FXz@ zO(VfIki*fvKqsp`v1Bx<9w{t#KIL-gLd$-C<(|H za@&=#MR4V%B^6$(F`|+j?@*-(eat5lN<6ycv^D|rI+7&E^zBu6U@uL{*y)GVHb^8oZN_tF5@w!30ZR1#wGs$oQL-9VK zfV}VVdgb$S;8dyZ!p$$gFaF53Oqm=Xe-XHxcH@f){Gv^BH^Mt?qh$I5)Ap(sppe&} z8x3sN%;;InR~WRz*}Fp>4cW0w33#oOHBAsH379aKJ+7C&er{TOXmcRtoIC`Ot%Pgf zqQj06(k{i|iYNE#J4Sp)#7>PZzzGQ*ISI2!MvS1VgZ3mUScJ*bc(9k9g2jm1G#n)u z4}iyh7d&p7c^bfnAzCWs55R@XvUB&19P-DP>ey?N7o!JA??$*3ZLAwBkxAHy-^!Fiv!GE@=lF@b_}+=u8-H&H)rVl7%jI#gMPRfM1QTGN-O~N68+-Y9>p1myX_h#X~ zBwOisoW+Gj7nw)pLIKb~M0BUUfjk@ggzTa?mLCbPnTOSj2uZTOfCFQveGd65CY>`- z6{V`>37*x6R)wTBY~Pc`O#NI5tZygdJrjtux-SQ}m12Bic99F^f4;H|=LoxLvopI4 zmo+ckHqv6eq|Mu&NR=z)nQbZ^q0>B92uz+vE@Cx)U$x_lT;ZyhzoLh`H#|Dkcp1Zv zwXTaylQcCpGh|nDyc!ABe9*2eeZAj+*=D-&jUMWs8GLMe>|#L0WuO#8?FzZWndO`? zo`hpm(@IODxpZL1N-jb{bG$sv)uO=2VDrB;lhyfA-9jfZqIO}NTWS)v$Z%!hF?Pj+ zpGU|@4Z4LQyv2Cg?CT||hhpzul4GQ5)r==VXjHR8n6@9FB&J9Bn=d_zgkUDYZspQ- zW^canqPt&GSfgsu<*qNfnenBhOLLwH8S`k|GlZ{!Rg#CrZs~jZ4%V`?>y;5RU|%w|FLA*NS!EE78#9az5)z^~V4w zlb?u;VrfoJfi1*sha=xvbJctI9LvvPZs4bJ6MwEfjN9f?``eYe#^cK`MLX}Rj#)?q zMxL8&v>0E?7N13~kh3s${d2B5cU7o3!~iz#=5QXqh;LL&*~hGpYafkWfl6T=oP6bmI&56FSa#NSb2+?X zVrbkN4Z@iDLao(ZoL&G-&D*YZ{JM8*Kj+ z;xXIk<*wqS$3f_uol&o$Y;TJ@3R^&Q$Sc@3=-EEtKs&>XU_Mh#I^;`HBG89?vGQH( z+!N44kTJ4xaKoAu1rzTna>ekO=)$NTEhiHX9L4mU;K&3kq^cV%H2QkaR6BT4%`>_Wq{VnRK8FZdXc z=Fib*P`Wy|!Nv8FyKpZE6!_vT%Qq0rGpC|-_SiK@4)_qgCds_=$)1v@!QC}&W^Anu8aOyXF5`l&2O-RHz37b}B(&Pvqfrqh8> zw!5RVMi?>=ZO|&r-tlFB=hpmtMJqbMgy#fPo$g*?9$i)RK$mMpf zKMaCA5Hi{|dDazc&leply)U(MBzsNvaqzKBlMZ<1y|31EI6M4PU(n6O5r$`2=@+te zsL1wGmf}{p8VL5tF4Q#Pe8}3|p}-Z&T>r&a(`scY{J?%jS|xqlbWBP1#I_7(a{?!O zB;W@+&SxhD(8^}$J(oPvs%x;$bZ1}O@L3n%9dxd%GIP=q9dU z!Yv#|?`22n)l4?)7&@l8b&!_mjC`j|R#z z1!qz)Oq}0*GzC4cInQjx>$Cjv0de~k@O3DqEA?}EY8M*4sR$*uJK_d)_ph?4V5xmG zvAG}Sd9$?(PB#r@^IT65+vk8X8==;=K5vT8fI{ha1era+bbkqkE0+{V?Z~j=v3?jv z=51_EcV7<-Gw{m4Db&a}hl^b+THL5%jk!ZELK`@lX;B=&iXrR0QbY3plouswO43|H z#y_m%a6%57L4d_~!)Vs7A+jKV6pBV0A-BKJ8Do4iu)P?>@sNThjZe3{a(e3RYe1Gf zgfH@p$grY-cWv#}>rBfh|H@|`9AeL}cpGhIRDR}RTRP7r@hLTqe&4jWfIE9|y;R*o zH(1<*hUS-T$VJBVkj8Ds@0wk>8Va1Pg^ingbcY8~7xF%6#hkvt0hYQaofW#Ou z^t&J&{Cv-+GkoT-Zd#q0SI1mHY3)H}KrelPxrIC}tg3HRB9>{JXKc zyy129!tgL97ySrH=R@Qo@T z7L;cO5tG_cgQ{9sa2%2)GSd!hJv>}zmIUSWqR8GqNl#Kh36~EmSAZRuMYs&PJMx{C z@x=Lrh>xZDq8CxmKh-EV6pCLZ#8mlXl1#G4Z{EZlH;NT_mA^+|pi)d`=7kdJpC)h^ zdfCj`9nki~a$@x+hM{}jw4Y5RMQu4{illh_){Pgn<*}kOx75;4a~X=_@QcN!Th`a1 z^Ubh|YUQwWPakR8#89=av-R1-{zq%CO;2gu8EoB$t~4RU#Rx3bzxi<|?&l~UBTqQ| z)rxWQt>CXb4E?S9Leq2)p8?QI{QO@w>vs0&nXBA^>j{ByzJ0f5<8U^!7A%bWU68VPlKdEjDU0Ktfa?f-kUy(vcsxg^jmKT zj$|e#BoxCOKQbZ_1t=v(yKOqtD6b+p(IqIcrrN8SQA=}dFB=Lo5I!V;YDtG`#U9Ut zAG(Djx0x1JPR}bvpttSoek6i15AEX!VNxV--)$6?Leaw&u$#nlOgAMW#gcI+cplTsw6Iej~g-y9CGZ`j6Chry1GT`Se$H=R1 zQdh?I0@S%$&&K=R@&A&b0V*_5kZ6~`SaB#)Pz(D1r6U7=ACM+m>fZ^}qz}IO9PyPl z%~S6wk-Jy1ZY_6VEAO;*Gqi8sasR{f6MX%^pzQz0eWGc>wKfR}33MK;jmqStJo)+x zEqAUl*?C3riWtMOg&hTZ_N+~m=lBn=7aE7drXMmBpbWvR1V3mT1 zVIlYGe9_`m633+i;nQzWfp%VG)8p?ViNYLr-jB)s zKTQ=f0fCW%C+TxDP<^8dr(`?3fi+n2bMf_e8U^;j9|{j!$b=4$FloJO^8bfK4rmRZ z2Wp^6p#beev(egbL@UKp3yog4p?b1VP34aaFEKIkY+-U`YY*1Xf#K|}&yHDVH@+_U z2f$i=c9tMzotT=Po&64!eYONo;5f)I5vOMdlC4IW*KP? zW5zS;Wj=}CIth9oa+6Gy!sX0>H_Nh;tldl34Kj&tbtb~}|Ttc-60xArj2>q!%U zpwe0`WPB}x05+V70*;^Lb1CB6{Qs-3(T2k4mGb~~pc^N^LHKt-r|5RDf&W*#(V-)B zT-knU(L(6JU&G*C`dEyxP-8`OBBP-dvZt`-2nq$ZeMo4pk67_kC@- zNC&76zAA`3zt2>2ZxVkbBR&55?VAx3ImJiUAV4fYwbR|(>n3&kuv~4f0%-8k?_8PI zI8z1xv=ljk&mZzAFePmPJ?%2-Vy~zy4p;{If#Ru6pkgW-mr?yj&W(}^P&Uvg2w%ns zJ>6{B0rt7hYJkBVt+~1RpL6i*78R=vAaxxBh{yo;-GT>Cv-MH~hd~=m0pK70?z$Ua zf2YG_7c!B$lLAmYxYcfeGTA5@HByfYrE_g~`Ug7y0398@-qTKs(_tcJHuq2lz3aL*KS9 zOp}3v(y#{~)EYvQ8YpM;oS%s$}{$nd?7_UjdfnW^MAOQw`EO1*f2vA@~EC=@! z05VHJZZ}_i$Mk>zihKLkfoQ&XLEnf@n^2FPj39*TUjUbYG1;$%dW_oeRpefiMUOw?S|$pE2&hx#~t&rPo>zjOAF=SJ^_|(L89xC==PoZ&DKD!SH*NuL%s(bPNCQJ=E1kSvCK^$0PXy0-T=NP zZ&)PY+}V}wh^peUg=a||r~pOA;M8 z$STvHW=gHb4YXbq<31ffKaHdWz5=Ed+Ef^(m$pZ5#l63*CHsQ*cR5E?)5<6EWPsFp z#1yF>5g??SRV6mkNfB`Tb(szbhZ^!#nT=h7@sU8_F>-P8Rm2Wpejlm@N^&V=!~D4o z#2nF-0BS{5IZiJ9MgRt!-ux;Nl1v_;&i!HjZFxewNVR|x2v2!k3^2?PWs|Nm!&o?2 zord|hiB)L)v9`I4t2o=d@@e_n3IX2g69>K3qd^9u5<2WmC*XvgWOc947RZy*Iue@W zzOo*fW6~GBnLM2Y6mk+juvfEsp1ehgOWR@8**-NtM=}a#_XnJi!SkPQ7aF_;b6eJ1 zY?fWN6PH$Zs%O!;Fr#6wiDi*{q3tsCvM9ZiHvR@1qpwlV$a)y`stt*_dyst(jTt%en%|pewdi~~_2iv)$Gayn zws8gk-4Z%5C0g9!vz{0{hOAmr_3(GK0~S4v`Y#0rdwKRk&Km)?k8;&VIp8K$&N!Zb zZDD-iq{t7tl~8riD0L`=5?0rc+84&b_ttmNKMc*5pPDhl5 z2qJE~FhXz%psKLgQ+?ynYcy~UAajIs74+=-se8^)p0FuCwxR9;=pddTQO`+~brXO^ zJ;c*u?3E8YBLJ20l>k=V%_ZlR%RViD5;7qP9evyuc|Hy9j5k2HH?cVgRzIw^?_7sG z@2URU?VOIDY9>S#yNUl(W}9e)PH|IQHOv7aR+6SeZ^wUp0EY69-JK&pQ!9t#<)sV8 zJFti4{bDZK2VzgaFX5~W9xEYU4=}H9b-$d_2xJ}r@c?py2M?pc`GTiU#I-4H2GcKCq~>n%FdTJeXM zU2;qvus&e8KET!`8(^zh%ZSJZ1Rzqvoh<>&T5e0-l@PF$d=&A2ubn!#wk=W(za_>M zKuz*RR6M{j2cuj_?A7!A%JciJ0e<{Z)Sl6}jeSnB=IhAR`U#B~!vl-=<<9E{=+UHc zUgdjTVWiOBwMN{4w}!#C{!f1r`a!rEC4+ug5V{#Xw-Rjp=j+zz#HC>f@Z#D5Gp8lH z1*@Nlo}FI;X3a}>{367(vAoh|bnGK4ihCXG7t6?%jT%^V9LWvd;6uCgY}yff8kV&V z3`Ow1{4t2d22nJ$|74wgW5A+Onrt~+-foo$ii;AyN-&2X@P+}cB(%c~qsU(O&idIz zjsm=!Dy&B&(sqKk4v#xoQJVm-hUb>z+j@@oxsYfx1t<{5My{rvXqy)0Yg-7F&1N5Vl{XGkJbCS!P`86| z@Y8TXQ~6N`mk|OZk<30eTf-Hbk27SmvFT!6pTlMnwgr-y6fN;ys!8ZYq#M>$#&IcBC4c_3f>sT&L{;sIYE z+b0PtZRENa6|#pAuQa~a)sb)irio!)QLxayl!HECC%|+k&v-Be8^7mF~; z&Yk0)*ubp>gH9^C062;w8>r3hc-ieZ7T8GiGPxQ^AVS~s2Z-J9*TAkvajpvL(R~R6 z4|CNo1+K!$hzC=jB%)%Y7@^~tiFyZ+b+P1`Z7U~w5;kYvyLr(4_Nh|9rqYNRW@8B8=Jl87$2ScIc$~P&n%|xnLZ-A0rqsVeeMwM zC2T@0Z3-7VzQ-Bxbxg-7&+eTxpnY*tiFXAiT>Aze))`L3Js>+2R3^U2X-lfP23}9a z0Ph~$e!RZD_y3Sq1>qsNqQLAh1RCx0sHEwmEd&}Sr%gJ)In>eP;c)Y)C`|!8d z6k-Ix8YUOUK^Yq*h4UOOSA-+YTva5C96M!02)bPEy*(ch84`mO`<#45V5G3HG929a zM<1ntVTZ?)PM|?Aq6Amqu}Xt_Mn7Ur zr!I0-Z{6Pmear9`W7=e3k91L`+hnpkp3@UW`Cv5<(;7M(Rc^QS z-WVGVt?Rh;c{$i6p{L37(KX6;-Q-}g_QT!1zXX-Pbg(0M0J}WGBN(j*yv;cu+^upF z;(ZQM4H^zvyOO9ve8K^sqraU!`dW^0c;&7;Jzq zgSkF`cnaR+a`2xg)eNM7MPJ>`%Jl%Ue z5t608gamfmlT&u^QsOcCP#e64n-9cy0#x#x8zd7mX&L5 zavpyu%!(lxl#&6E7>>%+Wy?Ji7qJ(CX6MU5FsQ!(Lb;?Vb3-}ged^*1B5UuQ6`*SR zBc7HwD5RUbe2+OdZan}N0S)vDELa|l>ORqYvmQo4@*99avKK*&PtkP56mxaU;Sdc_ zY4Dar1@V}m2I7iPxi=a4BFt$4mQnV% zgh3}%!KZ$)-t3q7#%sVpx~Ah9<*P=qJ@yqnV%}lasT*ZV_OlISM}PK9u(N!mS*Iq% zb8Lqb0e7=wo|4=jK4Qgr<>TL>Z4hs?@6wJ6R z55M~h%2fjJGZ-ILj44#POi!4K$0!bF*wLokN<5Jd^1x-pP9B32_U;(w4X#Hx#P*5h zM85a6l#CQGDONCv|0&t67WNU6T&}JKc`O+cAPO5y;<^=4U(!RQ$q`$Ud zz=i@|UqLx4*7K3O!5K)h$6bT4Oye*XRKcv}sU2vrAe_4Db;07tlO=soe(A8?0l?Gz z2f6S-r?Xo~4f?eLYCS~+nOA*JvDGCnQ*BCldDhEs7yTC-ooIWJ~3sd-xikOcc!oN0M%>(7<% zcgI(v`Sj>mPPEVWKUU84FGt)1&5C!6uc+P;MBpwP<;G=(jnBT~XI7tMp*$gN1&sx3x zX}$D|%MyB**>W|@lbZH-^DCk#^8E@*^|^!>M;9V+IGmrl;T#C+ESCn7ImkX6%*69z zVr@34R!&}TZXH7DpY~pUBFdYEjmB+RcSnSN)nrFLrCUjy(Ma`eVton8m=qD=_vgSo zKff{aDnIk<4(ebNnE=XG?x+xm2I=6v66UQuC@B?|8XO~5q(D)Kx2`@79RERf`VUs% zchM_GuWdc+?GPAu;r2WDI7m6f=+A(3L3!zbP5!md(2bhcd^KtqmA;nv9))I@BV~a{ z&hRc=^^r;~lGQ0Ayz@Z_viGw1!QixI-cjfp(U{=udZg6;2eEN_4H<+Ywas_n-djn*eST(CIshR{VE1=wI*ok_|8GSF z&4F*UhHVDGGQSWe{P;JYEpG(0gTngohd(B^9aJ{J6cH5fQ}91hrGLKI>T4RH;b9!C zKYq+zWld&V+txRk$Q@cUVK+A&N@YgFdwAv4@|{~uQc`j>w?w;Dk4El^*{s0;#csj0 zec*%pCJ3E?O_Zg6^IIQo-?x9$RO#S9KF>bG_h&_b?@AnVJx8kgqS1(uZ4#Nq#(B!Ro+8M+!nP>w^s|W6aF}f3lOv% z%iIuRA0Wl`j@0YH)_l3CestS#v##>>U)j|^`vRX1De~GAwffbmo0_%HeKS<8(O4^9 z8bIO+*9q}n&%FZmrIO+HU>W+c$4LfYJ*hx?x8k)Fl)!0P+H^L*&*3^LecjU0q6C0P z0e9b`Y?a=~quiTMje_4^<)OnuN!R~uCjQqum;V5?KGo#Cp!4CkB;=Ji#?zCB^Wg|Z zW=fS`slslrg_)PK%A4=61}cnug|2X~Se2BN>=stFg?_9R9g?El1pJ$O0-qv}4VPR< z+#XrGzV>@)?Ut1I>9_XTaImxRCxF6$HD2}{3vl&W>$j#COtuQuNKbDnpW9$vE=~={ z_mh|H%v%|Of$KRC-7^zeSI0;AxJ0n)HN{P{lK)w%@w@QNNfrQNQ@u5w=c>P(e+K2w zcs_NEs%U-E7|Bl{p`cbpNG7)`xcRGLHdmm{4sLxv3%E`{|`>r5N zB|P#Z!k;(dayI*-^EEsB7)jd7D=)cc?K^#s2#165p=#XA-j&c;#-WVQO&`i-?=2eU zuBM!{TkB@|5_!E^$c5)m1Fr~k1r|F|0Igc$1>{|g*WAQ z-A*OOo3(j&XEXc&ZbQ#O;|8GW)dE4$@R`5$1_{$^=+EgxZcpGj)$VHrk7E3gbu;I; z^*yQWe-Fi8OExj(>CefcTzjK)f+m#`qJfscy?i7-DQ_+;d;lDA%?w6b{KM1W`Omz3{d48_WB#aWuO%Wjv`Jw4h(`&Xh~(UJ&MJLjF&i~$bXE}v#!&=firsdLy((j|&``Cw0+ zGy`T{$^}Lez$mU!Gh@^;N$rJbU%e`8-n%-y=`xNY*spu{opum6{`&h8L#|qhmKqW7 zClkCMb(e8TR<-WW$=s%q=7X^sIur?U?i(0>n?+?(?;T{T0Vh02rG(73nL6#vU`E-)Ci#ccGmiW#ftU-&irs7^!!%(Z`ro>x+g(2YF z#pNh5Pd0_%WV=P;ar(*Zf7dR^|!{Nr+*Te&QN*$oSM+88l!z&Q~- z63O%G;P~)mr7!a3~%#gX7-E_@Hj7@^!9Ui6|mrt;qbG#Zy$Z zB31TvA_N})m`xMT*q;ss2UkH__|=1OU#2HO;HN~2Ed_^Hg74MACdVm`&CUhc1*!hbEe8Q?zWwUa|^vIN&ZM~lmz+s%r5 zs%8_=F*aUC#sx{T!83i0*Db+IA0T1*IZ2mR+t_P2aL-Qa387I@-vM%4{WCe;Et(Rx z-q@Tb7RYj=$30Ia{eBRj$mbmAJeoY-VLTP$j6Lc$Iw;BX{jggZeI7`tFy1YK|D*uGuKEG8z<)jSzh2bS zfC(=FO9HLUF3>eaz}U79SZh{@g2 zn$7*sEhy@;;_DQpKvg?*nE#$>E)PP>u68?@mxc-jhBWf8ueh2S?uc^*o?xrC82blA zv|-4(d{~Q4F4~BRiOCnK#ENqFTh#tvzZ`l?bQ|T%3FLt1TD-6OKajQc)L%b$P~_5e z&Yy&hK)zxgDp-%5w74C!qs44?kB>T$GyO71;eGo^Y}Aee5(8|Qzlr2MQqMe^ddq!; zRPglz*1-{?+-FHjaJHv=MWq+g-;-F?7&M{*udPJ-3H9ni<(x|Fp7Pb;CDTDtPN(EH z0ZcR}q3e#YhT!++RMQks6CgL3wWk+yd*-=E zr94RV)RQC$7I%U2Ax|_%JWEI1i$3bTO( z@&WCmhI_pUeIB?EMr02re{Z?}d#?)GTv(?AV;Fw;QW+NLeUoNU)-Ki6@m26x(aW9Q zZf@&wNOkI_j;|`ApU_InjW|nlg2wLB8@9o_2Rmef3(A*cyCF$xZg-yJ^*s1t;J9^g zu6cwT?<-a7-CK#DoH_3{dVrN&?c?3~4Ove^*m6>xhHJ{i!VMNsq|<;t)2&<_V63B4 zjGfJa8lYUsBMFIKhF5}0~)$ivGk&7$*HTuF$?cafnQl|kaF*{ z;I?IAj_=}@h8}g!`?wh7z~wwfhU1ZFZpAbbAqVM?zmn-|99=Ipn_b@nbqO(BjAz-U zal)T2oO6rSnYDjRWvsQH4y~2-zvxWmSZ5Lk-oC)#izt!bw!h4QLny#}Xp5$%w%QPI z*X1}*^D;SJZa$JQy4o8Unw?QjbljUAquBxyRR7(O|9Ra;FH8QOdA@R6KoW$IODD#s zQ*Y_hD9-eCJY$KFKbL7czP#?A1)wt?4~v$-rd)r%)+#Bf*l>&RZUq@w1t>>^F0-W| z(49l2H+il#pFZAIN|{XKL!Ye{T67C{2fEoA!X+3)gXaMQHq}zqer6WKd;|Scqi)2o z)?%$@tV-79(kI3o>bCld@> zq}(09ThZ_f!PnvLf@c=ylYQ5dpY^S=mT{PQVi?9M=?Z&1uRCcces6$tx4Tpo&x}P2Prs?1&WSlk|~&jD&xgP68ZRext#( z7=%;hR{wXqK~!p=U~OW7JUYP;QLbdp&py~MQ|{NEPlg4p9ghUIYMl?4sZ9%UlbyY% z^oEGIo1ER;gG6zeBBTPrt!=*8Xi0qV2-Nz$zTkj{yB8QrfzHIaBfvSo!tlCq?TE;+ z4(=n3>w&!|q3g59*+5Ylzq5;_$X%b9K(|{Z_BX&-Bx@pXFyZFgCk&7eH#K)RRr7gR zS*_p(yp={VlwGkp1Z~n3d;|o)1b1j^YFtfh|KUgBfk%_?O*4?U9Ixt46p(O9NTk#P?-S(91CrGk z=VezF2Gx8pUE)ZcsEUin$CC=q*~xvl$dj;fZn5@Hd4_#B*0R>SN-oPbwIGkx zO_u)^qx|C;PRPNUKr%uoG0RI%J9!NftVu4BM=#hd?lqCPlqeq^F`*zRccKA>&uw?w zT`UmQAd3sUG5!ngaWl(*2b{rz8ASS;ifYP;3cCicsA@R_iB_ z?5J?qq<$#W`UR(pklmBCg{y*m4GAgTXZ5tK@L=ZtKX^@icZb3Q}q+7bX zTal8G?vU>8Mi8XCTe_PIxQK6Z?|tv**|)yq{qcQ2zA+dK7!$6w)?72rd7Q_29IFp~ z0+9T3Q)^xzE)U@pY76JUr?aHY-oG;ImydPSpny_wuhIH(>taC{gsC6jIMO#BzCOrx zzYN7JHR_8aF-mdCUOYDHPas!;UqGNe{Pe?8t=hahI-3r|Gvc@F`N|M;@8Y}s^rq@T zd#-Eo6I=%%2;r?@?yLgFN;Hs?OC5)TRG7g6?;!R|essdFGp*|tfrxFJ2<4oy^LYdruTnJ5CQsP{nc}YWDu(g2?TNdw0`u@ zx0X~7cdW_j`_>p+Y#C!s!6~h5jm*0Ixj4{F{U+?+1F}xRL;iW+0TSQLd$IIsq|VN0 zbQk3MdjZpe2H0T4xSnm_d-VWa^^xNabUATQSuRGtkmo!mh{pma}Z!D0H~I|fRHG+{-0KxvS?W$iKolD{SOJ!t%oFAyK11+-k8 zK8yhT51{@L(F**KLok<-85)Umk{uIFR?^>m2@VKg0K75LC&3{h(IBTggy;tjZ_)P% z0H`C<1SA_}=@h^-inl=g3?iOL;PN9WFCX#jvQ$Uvl`dfK$b)5x%k?yujEw9juq(;} z!g~9z$qo~$$J0is-D7zQ_cGS|FPWlo&UP1?8r+Ef#I0MZMfC6Z%ZY%J21Z&zG$3?9 z%M3{&+TsG_s||ogt|*3YG|6jVz@7z&cb$(&vMIoF`0k!4#^tyhH|H>^DatBR51yw7 zT$Qfn?v|$+0i_gI#2Ycdd!?<0B%6*@hiwxOsk-lZPhc2C<>tPs@B%jA_eB{j{WH5u zUjI8f{rAsl06(&QaX%~t2@zW*QPUSm!+2koZtjfcgt1!AO=-R?geRf?kWe~mi-zTg zsPY5nJBd3{42|-D-gbGzS2PZ=2AE)p`8)`Wj$R%;1oR=6Un>MobGnjTkU6^$v^qfe z0wrA`K#WU7hXnA+me9h!>;HZuIY=OpnjZ?2$h-c}tST*A9#TnFDcn~uVrCWK7A|>AL}WQMy@Fl z3hPtV2CF+U&^TvVYj4rbd=1`kNJ$6qw37t3Rnldp-1mMsv zA65r^xbkG%Wc)o{ntP~-SPpIp@SmfpnO%E z-VeDyy*L0DZi=>{+&5W2^B!LU3$mFAZ-YEjAwt>rBN>kQRiDS-i8}Z%SbaYe!FIga z_&_@4M^5UF;t3cd6evo(ou>wP_vUAVyeGkpz}4w^OhGPwzVvlRGg$g&^kD=*X@V+x zm|~O-Z$J#1kS@#?!R|{cl{xqV4wm0v-&b!ApI0734t!g#T^Pp)G)k1>6`c+zV?luW zhzCO~_nDCe0O2+yrKaQ5Ccx)MBc}+2xc;il|;IZiOOe%8Xr(xRXjdNADS5`h~t#c+|TQ`uMk_QZGr26A-Mr<@W-w3xQV{M_Thg_ z`esBCbHz0jbD)s^3mH8QYrQ(#F}OHfE_d8h1O7Sz zr<=p1cTYPb{%}WpJ^`xuuhH<&_Fe$kCS;GU{&l-IGSu z+mgXI$HxU$wWG!T-eq`_JCyb4~n};rsF-$6`1@TI0E0J{}oetj`|Q?|8OdZuH2l zi1wbg7;P^;!PL)=t3>y@6hhDxXiY9=v~a&1p9Gs%jE3^%{|6vB$$#$XfB)bkjt)wj ztuK$GM^^eQ<$>2QSB0ChYPES?!-B=2F&)@0qw5{EUwHZdMZs3o=Y`(UN3|GTugEIL zv1{So`pwz*7L~i$U0`_|8!diei21@$U)sug*@Vg;wW00aoxu-G6r*pIHCM_?O1-F5 z%QH3X&J;)T^u(ML+^gmZy8oKn|A+&?ubiI|S5-N$EqV@Y0nJ9W$~6SIm`tPO{zce2INA+h}n}H`3_B;w1HdyEPY^ zC_w4#77pVjp`yw={&gaUXAgN&H-EvtJ69F^zF1zB-x(rP(IOu1?6N(LJq~O0Imzx^ zb)-(=(dG8eyW_15ZHdYt&e&5rP9>*0oQF!h*TA|xOQ}eW1e@u)zlMySU1`7ehTSQp zY{m0yNz0|E3n$_Vh|L)*;maIZGC%}pIT76!n^#qGC z#QoGsPrnbMN6yBkSiI}?S;N7=D{`;RpD0?j!Z2`vDK>htI09@`664g0`loSBokYN1 z{Z{N(6_LQ*s^0WHxbJpsQg_$vwp!#Q`l44}CF1tL=bJ5{t;Qb%mi6i-wpN5dwoIgo zh2DlN!CDH5lf-8v%#VsJFOTjq{sls&*QMJ|XQtzs#>45^b6rGkA_;63)Eo=UD#dt= zf^_-~;EhiFq~!)$fy?gF_%l~%>pv~HebG){}6R_y5zxZczo0qOI3{IZi6b*s?> z;#dCj8Z7ZEWr3E&BkCki3G2M?DMGr4Pof@wiFe>XvK@B$_)yTgx<`VwBVc- zHJTmu{=%n&&4#sEuG8;y+6H*n?!qJ=n53NS3<-Q zQ=^lC2eofwJ0M&aMt1e1=!?eY8q#Ou#p9zPQ5N0DKx2GM_Jdbo*x^}kEODUj5|!LqWD1hfJxYY z@(Cc{1X%?M62@PP$P7*9SLureu?b|4K}-ae4GY_oI>S9~yQiE^expW9Up!0RWz;|M zx>3ZO%rN+sDdD#`oFQhA)>#GoT>0Ja_$1~MU&#qTlLU?30bri&zsmm4W6Zy{96L#WA?g*CCy&u>G&>*N#lBP zTTqn#Vff_<@A-64Q4zqmpVO7_H6?hAkJ=AGt#T||Ks$L&Y>51-rdKVMEilTG#D zSa;(B!BXRSPp|9s^K22GqN|V;~StwGfTHx7L>VW5(&1J6&&Y`jeG5%2)a>f)Eaaq!FL zK65--r!$-+@{%2m_FlBPYpcmbG3be^P85AxpwJ*&?9MXtItkb=EOBqIjL>6z`YV%w zrG&%gT8aNos%{07@dvI#*^&R(h$LE29@2TtG{>E$IgyU#!bCn>{xpY{l4jNJJl=_` zH-;gW-D0Npjj+-6eGZV-)+l+VfcQyE2(UP}rC9qvzI~9g)6hLh?hf)>zTh~e--?r! zd-LjfDxbsj8J|{;UBfSFSfV>2jEe8?-QANgXq5VH9tm}82aK4khd%+lF&#XPb)FTd zRbO1$2xP>w-1EjyoN&NO^$$q{WtxEn`3A?$Tgn?u!mNqNa70<9g*BmFI{6Me{3+HK z$g;y|P~G9c+pbElKb<*+;-w3OUQK-(`i6Ydah6;5ECrOKP>sl@k{aP%X? zfm3`iA)9=T@Feq>W;hzk=SOIhOLFMQN4ulZjeq9|D*q=(p!w48uY!| z=^6+t$Vf@sd3u6Buk|X6gLFTS<=%`#r<1||B}V{AA2qA^m!-ds#E2R<&QCT1klfI* z3{{q$ed$9#Dt@_3ZGw6?+v8O#ytIV#GyCDC4>zt3e8;|PGWk(?`p|QN>fTE9N=XC5u8Gz-SF+GxMh7ta>5|<$fqcx(`39rN|~_%s1^_gd5Isvg~Ieyc$gAp(J{WYu-@XFxbpD z*7uWl`AZ<-_=UqB_tw}Nn_E*7iduH@s136OBC+=a*#G+GjpY0^!e1f|&i;HkKsuSn zk#Xf{{wr?P_Ed4cO@9Xycf7>D27@a;h1j{de9Jph7N1T7p1NS!qzJa1Q9_sR$Jw&U zYUTEqwp0cbT*FvjN3&(wg=*3x=**PzRC7}HZm;(mxr);VllV!=C1a~r9%ajz89rc- z%;KcOU+D<6cqorsI$DNj$A1!BX}-``Wu*_Gy=%LOS2i!Hl#eiXUhpW9<{&#*V;wqII(0(nq?fxM5Mpv9)6^xMeY9t=BYfl$|Lcx3UrnkyH4yKy-I ziJ(=g%xDvftxVwm-JJjsK^CKdT8cm}Jwv?-he5oG*pm&X^5dX?bmaqB)H1;*qv?Bh z9t0WBI*OYfk%>(NFLP&r(@T#OLlZ_Z^YwHc6t8KwZ|nfYxr?-+dI;JDXFrlHq1V9R zW`2DTQhm$gx*L}%k*xM1zEwqp97TkH%k{lH|7|TzZRVY#z2Guj-HlX|{zHalAfRTr|bi2I(D5z4Vi1Mk#dxuye-m5vmdho zq%y0NBoRUOxW2nucFa2usuJi*aK_mkNeTH`GAU;?_B2m1yGs^zD3Q-lJ`N<%`2^Y0 zssC%BfzE=fen9t>$a}}>cua!IRILqOX2$d&(0=P<@wsyeO#atn=iQ}^M9%E!3|aGE zDXeL$dn&jABlTBExTZKlH71a0V5eA%eNd)R_Syyg6xybj3{BiGaf&1%LUqtyE1Rp4 zvl{*37KlOyR=+;`R9V9sO7NmS^gzUv;I)#B$M&b+Y@JXYX&oHZ8hINBOx^0TpXc0&Iu5>F^Qe;+ICjv zCbBrCjB)6Ro5q?v|L+O;*P!yYf~lv5Rx?FvidZVISpd)!z_xoLFPqg8;1!yn%@UMj z_c7rr6Z$8B7D1XPyVT(U-GWZwuDnw=B4R}PFntAL(A`(qzEBA3L7*A^@CHfV3l2^2 z0nC5C`8<*VkPYN0h!?V3kuKnqP>E;W2FoBYG(H*QcP0G#fBos-|Ad(AeSm}iTKQ?+ zv;X}`e>V#f<32{?3>L+XR1o^Ze+v?)!y)2}Mv<-kwhG-pG+M}gYsQ!EygxU;gN1ua zDj8w>Uk~y3r2|AkYsT=8)%SmHegy;T|BPIM>z_mNuSD{U;A|e0u4{!Ma?65d!Wv!1(Li%I#GjhQt z#&j~ONB{eRe?Op)F&SvZ)s&Wn|6_B&b7Jck?;@f;{kBYjP7q|hZw;ZR$ocrkW|8|B z`AVYa&+qa7F`+yO8wA{ev@yJIOK^FX=m<<1a{@_sUplX~{BEb#tI32Yt8uQ+_t28+GJ*O= z&-UzwS?QF`h-~Q_$)`m8`0Jx45YtCnLwR<&=R~K&Xw&5mX1V9XeJsuKjR>|@Y3 zT?F)6jjw)4#>6mCc;f7C-LoS^gYj|93BB0;&Jr#a*j@g9bN}l#6YfrSMBcN4m^m)iN2k^J$zOvdZT|OSKEHSvA8eHwB6rO~AyQ5qs zi8;F6e>dS5?jft!84_DzG$?tHVpaRJ%JasGS$~uAxc}6o*ukgyE2qr|leaA?jY$G6 zJMY0VXhw?s`%GvRCPUMudph3dD8^odL@9Ur1lQC|1O}V0T`*~d8hSp)5<166N%-W4#?F<&rH|%{=uQs76P`cBPZAxxXXyGhd z`RNye_vU2B&;#6VxpbIK7Iw#QBL(QZymsq_i{k)M55VW~oQu8#1;8Q=0yUP?ud`(nUV-BB*zA7w z1(wiX7mrhWK_Eqm-Wd{34Rjy8ofbJ_TIza^ssK{|#iF#tsf0sXs;iuxGgnAZ=4^W+ z=H9o}cHLdH-hSV>{+)8n%g@MCx9*o@z);sw)Bk+WLJLL5Vxj!af-96DPrD}6oyjw4 zFMqySSPbNbEZ66o@+TJX8w*e$if;Heb6Yf7NK zTy~>+mtLF1Ttroc?C5v8wPy+Pcq|T0I)C-{`4Kv$w6zn6H7BI&9rj*U$mlqAhe;*( z+b%5$ak*?q?KIKvvY_WDakObdFcQqNn_cJGoH=(R!`326gvlHWRVEI4qG?9@%+UjN z>wkuYH+#7Df*fZo>LV0K0sP+IXO&LNUO0cB`{f+33tP-H7&Nv9{G<7C5@4 z&qt_N8`CQQ?;9q)A5p%K(HHF8wKs}!(8(oV=?UPG1MABwD4k}tG`r;t8JpSrmYk#| zCExXg{vuCfzS@t{SOI9CvB^6WPn! z7MHf$*ni(YiOGBr&1_zr3UU{eC(B&66!+rdkL~x&&`ryRe(2=0mltn)L#a*Nm@OGU z0>Yk`Z;lximAk|n-7m{6mN&EnZ?7-)CyN{A&63ZA)b~w#JB0oB2lg93tWmR|AHJFl zgI@T8gqK55DNNOqEf!xBbI}|$h-*g}kv6mbwU-O`WxxPu(R_N2*GY&5JKb73b(U;& z55gpCYxY*>*-kZ$)d<<0fS1=!`oNJFeAr>>^S3tGmFjqkf9c|)4UXA76KD0de_x<;R6phM9*ytq&LTp=p6Ir1AFswNT#*97;QLMY$cn@*Q3az1E!b05ueMu+rYHv-)Ie>0Gp{#tv2i zyq<8l$Ioj$K`c%4BVY+9d4A^`Om$s_VFLY-gZ@Mzmwc703@Gz!aaSaNJ&8@NGF0j7 z{<_}5OW3eKo>WuIZQik8HkJRBMf*{aN^sn>lY$jX%+nXRyCK+vCwD%PVs{L+kC+`i zq!lQeZh@1#Q~A;Fry@jORbu&iYBQaoYQ}3T3Iobln~jEp@jM;$eY*S^&`LcKIFzbw zIfEYkhk`RReSEE12Nn9=&v&P{GRbBPR3j1kPbq1svLBNzTus_Vgv3!CI``UVJf+&2 z@_U4OOBaI2Zf`Jd*;Zvbfkt3f>M-kgJU*#CYgtwNB)l@Mt?COlgG7kwxOuF}UcN@9 z__9ve`rh29`Q?_7ux#h|vT-(O$(fo1XOx+%p8X%!$IUKRMhtAB=N3?}?;Z`e+CviN z`*U@A>NwlgH_$t@@o z=+@qx$C`uo-H24(9%nevz!1D;LNDEX(myFPxtT^)YnH8<-{82jpAcB5B*P+_No6*5 z_>7%6cWtYKYi52nl*ptygk?p`)cY8OWR4#&k~=8es?CerpWf)lNndd6rq$DK6=V#GBmUf zwN3IM(!4d9D;4rhk=zI+=yP`G$P_K&t}Za0L`Q+O$c+pRSO&5e?~s!;K{m z?an~Y(eJ;i6iX>>1a`GCw3^KtlF@Wl>VmR~+|zLLL6X&ls#%7~uGP?!nv?a~Abz{6 z;HX!c6QM+L!2KP^kV0yh35M-QAo?sqW@SNT=zQEsyiumXiJ_yq{#^!(ETv?Uc#op{ z5AN;Yj(8cf#t(0T#F2>{4gtP1Rw2D5Up@i1YC2v-De;^#hH$ zFUP$JY!M6gCG@K4wV5)Bohnbmo%+nDM_b}edR><@ts=&3U zeP8N*>Q#{1#iV?58ks4c@>(r^B1ZO}uUL3_rVPB4gUur1SPa(*S>+eRQ<3v~O;<)q4wN5=rzuvatpb_h(IwW(P3c;cR-fK_K4A zXaqiJ%hc_u$qg;Ty>K(Xy4@Zw9Q`b&JH2IsNv<>*nHyCc@6Ii{3nk>CFzM_2Q6{ZJ zn;wCX9*WOLO$B@?F*Qc+sO#x=3Ud3p_EHz)jPnf+c>QnUqIg)nFq1fXN26>Ju4}T ziv|{Lx#)?tlqj!Ucw!LJD~p>y`l*QySM(&ZafcFHg2<_JM;QK zyM*VJU1%YDqqFu$S|1)tfGvw1`eBVUCdC z&0}1+G4F4(F~8uR5oqzS|GGkPOsv?~b=9nP|J;|%p$(i>DVtnRo#SLR)mY7_!A5@M zGJrd0I$9q+xQFXbqmipqIvYkLpxbpbT83?>KS*yR{iPmMB1m|!_+>iAQ;_@)F+ z1y;b<;vU%}*&2O3vOYaS+5Mq`>D8nBOZ!4$2nOraW-}daug{R8%k24HxzV5&Pk2B{ zjG-Yld-A<1OqS@BD6x(<3H;#K{HKffT3-^#})#tt%l z`QJAI7cA(D@HDx?kwVA@HlSWBb~16|Gk0%!bG?)Lgy4svi2zwuT@MuBR$Z0BkQ+3? zB`ts*KoUw3{^wBl*H1p6D^-*YAd<#X?P)&W-6?c(B_8^vYsSt~ytC1EWbilmJ>0q9 zQECNX!=H1GDGcin!?Z!|-f>a?wn*NC63dtYI18{-ME|(zzkf|n1(?L9lMEN>A20Hq z4gd#pz8L@gU5Mdhm0)Rqu2{0>$Z!ow7{`pbYSZP1LExy+m`F$e< z-Lm|Q*!tp7!)yTKkGI$YkBEOI^G5WaANtq1kdP4cX`9B*y~0#nk@=sHC_&;YB!Eyf zPsXqa|LOX~eXwl^Pl2oq1(64@#`)cmQNW`Mj}xp*$l@%?IIFsfQlGc)`0PJVhnqe+kr`V)GTL zr^{JiIByJCH0+=6@euZUKu;@O)(39YkT8BS@VTBxOJer+eyn5BKZ?jY7yIPzMZ`^u z!(#YVSNKu4vbGl~!`c8j20y;MPwF(g6+pj=NLU z(Mv#1E=zhVB7#~`vis~8vSaY~ku1q6-XB32w(t+WVAIeV4J1W_V5%FCPS;N3S(uFv z-v$y1GKWx7<#b`;IQ-fIoa1JDx%nsMD|{w>twJopcz4>Alkr}>E##p zf{r{0n}zQAquCW>PdfEphmmX`7FO*?_CI~#jD#;>^V;tz-hN$#6Bts0aLD(3$ZE@Z zthOw9{QGsF_0y{*HtSrDjaNK?Pj@fCU#xq)s;(kU@!{f4=zASm4M?G#j7GaA?BT>Qu?E$br zMmZ8bGJrz!1apc@;=gIf_&PgcGiXJP7kD8tX-BTpWqw%8U%$<7#GqIDPQL?QYh z%YXwuk`X?pM1|^&R8+)$$_W~kO{dlaR0r|1BUSHa5mXQ7opfiEon{)Wh*CLpC-lVN z2%o@&;9)W-c4jb7mT1SuB4Zw@bS1W82k3SZHlojr4#oJ+ln?QlFd6k>udV`;DwAF( z>wKcstZEF{{dZI7It=J$xAJ%OCju4eHELgVM+&^;u$(I`LaU;&vo@Jrco;!y*11$( zigFL_wgT9lOugECnVa-R7&|4?r=1Yw8#L;)D)lOxp|}3oS&pzS1y-qt7`@Hl^>`aE zmCFA>k}o!&)?qu}TV&u{*d#w$??Ylwt*|+h6X;!~QZV>l%QQf(^-PdB0m*pDdSaK0 z*>x3>2Vh}Ql8D*MJ`+e%C z$3UPSY5YLp9&oH`@x4YHtwKjeZS+f4>>k2xAfpfZ`opEe<@UCi4Y?E@WF1o2VGTh_J8CUZE;1t| z=g`T)Vz6CVqG`mpR$y|zuBnUI|+DKi~WoYx7lw5Xk*@G2-q34xSV8H8qarz>fW^f zB0v62RddJlFjM+6XapcXJhD}hmo+GOZ-~;2dw)D56`MXPNVaHv38n#g+G0r%7L|ER z3&AyXAMO1WPm{$gKtc+e`;ypZm8K?*`Xh*9dvXVF?^=Z@$9JddzF2fm&GR;Ldfhhe z_-^ziQi8hFs#KJF+|-~qc6KoLhrq3|0L$j_Zu$opA7je2Ci49ECAwaIAs1uPLybI@g1hA-IT0bLW>o2FjiGXCq?o#cDyuAkslbh*z=Hsf!&(eQ@F_@cVA2%Sr9MXE4Uk~ey>X8-n_X>dAPZM0_#%61qu>PhPQTMKu;m_X7<7rWy zws{ughgm+K_#@W)6C_g?nliA(AEcuA2s2{S53P+Safy*J4 zo$2T#p;UTLUTBuWAy4ad57Khxcj>=ctn@l%*_-jMM zAH%>A^26-?nyh^%ED_6IlnF4UISr0*7i6qjV)=x@@GFO%sU_WU>S!5;uPWuUPd1k1 zMjr3myPJB2lEom=#X)mpV#gUf|lfns*8 z?8E2da?6`}W(}SS5G6n~m%d(l9v%(pjub)hMhf51nCLbq?Q8@@Ca_wXdxfEQgfWnK zOmIR00kHQRiAFuUvf~T398hmG;1_0}>aW-shy(-%i5S{=a*1CZ6e=n|Jv}#V(|rTU zWD>?4iE?P~SDGtCV>$UfrhHT?`;pmL?Exz}`b;-J+qE9a=0+ECn_nWsk4Rew$sD~W z1?2OAklSD{kPX;1ADFG4Po|@-ae*Z=mgxHOL=I`jfS1>xi~)>f!5t+!P@>Kp=*Tg_ za1K(#M>}KlUW=eqpCXq2&Wqip1&J6P7Lk_~*8eF{_>-;Ch5?Qy)d?I~kE||2kG@K4 z{tdGez8&qnw{54qDkSj87+={a7^j@yw*zup5TGZIRqF#BF9@H#>&~DbSM_YMgLdY_ z(d9N+;@62c*MJZTX%jQtGVrDV+Ss-dO?_sC_C#y2ymZteW=r>9L^r zxEB;H?=mQY(nf>8t&Nfv4WLn3BR2V9aU@I1LMkIx+jF{j)Z;90UI9S44brJxcAX1p z*mP>ADS;-9NgvG@8mEK9s~0WRT0EQRNK;L4lc*J`FK0!r9bP`(qsTiODi*zziG}e% zS|Yozk*zvg1M1Cvqphs+t?oWU+?SH3XNe}?tgkO@5*a|KattUhOcYrRj1yu~Fdn9< zXInX0x;L(dNu^B8_CIypQfz*8#C(aR7wxrM&jfq%F+6>O#IFxs%}^&bN+xSmxD*~I zS$Q%DUs+q8*DVv|McAdL1VQRq1(V(yD1xlpt#P^_ zXR(Wf6a-LB-U!zP?~7L_eQv-SG5Gie%xnG}5^>&b5l%?Zr$ZU~Pky|uuuz4C#pP1s{Rv#lNE2d{qa zc~pf)J8uq=s9GXC02NEs`h9W3hGX8z3#&X54Hw2bZyUsTBp^3{S+l3|3DOsi89LJ789F^)5{yZ;+8S)0dEARGsTAqRDHhe_nNFXHVje^!47E|6-9ygArU)~ z0rkEBZ17iEVFe}7ZhC7k4U6)^^a~XMscr05z2NI5U5s3_`p5gz4Pxv!97_42iy3;J zTL&?>&3D(m3I5@q?EqdV0DsH0lJ@k@K64MDFck;dzqkz)U#FUo6CcbsUY_i21bSxo zF=@ge$CrIivj4&Rb?k<%NTW*aLqhRu)xJe{ugC?v)&4{=emtH5djmlDX+C;GP4!v)+ml+FG zN*a9VLHtdkC3Q2nkPHqD(cI_y^0-I&PUb-#giKo|(Gr(@t%5SE1;XRB5b_9vN@J=6 zk?@(v=1)H1B~3due9>dJQGtY+GcM&~jc@!BjwgBM2sjfYVRR+m5;847ob* zxaQHJmuNOnkV{6P?J;L<&04iY1LA!T%evu5-_~}Dv?6N&J5Z#C;?rqMzo2AjKK$4b zL?D$vUPx{-ni>5(`wc2s)6ove)95bjk9|zpOuJQ2O5dp?8n8E^v5s?~r(Ek2(of_q z4k5r-IlWe3SxAc{WT6XMU}quHuD72?KwI2e9L!MmiDal%-YcGK)}tJaE~^k@Zz?gK z&Mum(cai##7&Ar`PcNR3;&!fTGM!$>YC6(~KvjkPdXc~B%|oR7PL=lg%5RobF+#iq z@Un2H1rNDSxr@2tV%3cLv0N<~+&ID)S1yVTmLB&ZJMa0F9<%lpJefcE&eeH{RmK`P zj(&aVAIN2VAi9qOyZJIPzmu_Oet*E9K;`UdS#`rX;a7ZB7QeVbWrU_>2Y}2IqD*{t zOlrT(X7jG5H3u)z@#@-4mBp~HlnxQ(aC59Il$YXJj1uDCk4oaOD14(ZTBMLksI9+} zjc5LWaBMnJXma9v8s>;Mp%eiM8;=q3O;@$mSxn8uEpBq#WGq6a(u5wWXV&PMO`3ST z&OIA-Md;6%FR4nksF{lrvWZyZGe3qs3XLE)Dxwd3TG57TKgxEl-}3B_Y2Xo==#lM3Z>H3O zwaU3P*2HFW?Mg&sXsAiW$_ugmdC8elDAeX!b)xkrsL3-Ilv96UYSwYD?fx1mZj+&k zg7VdiQHbFHvpwpZ(-f+>jPshfFD*!z^roZNQoslvPSDoSfc8D zQ&yURG2zX^5}d*_dq`9oyYjDbrk9~mR0la4hkx2 zxUN(qdeM$|<3~Z3bG5o%sAH7Y;o(USQV6>wW=!ASO<}XHkn&kjSPx;y3(9{=;&%K- z;qh^kZtKx!e%-NdW&2%`zYo`NDXk!+FgQ=D8z~j?n2jE~c`}mYHL|eUP@;!(A9?xQ zK3s^V>|YJS?1}cckWFE^&#w@^20)M&eSP)!(K0v|gC30Rvd2vrTV+6RWXy?DjFIlf z-jHSQ1CIRRK*j@sMNSzXP<07(LO0O`DQ8MxM3M#9T74d-`$&bHKa!c@Q&mUVpRE~u zYFr2-M8K#SDT|viUmAwb^`SwiAYQh-T(=#8=V>~j)Tfa876kOa$&!al$eVDq4+WeD z&E6n4UYAdv6^cCY@t4w>>Jla_Mqf7v_>qslhPP^T)AgevZ*?Gt{F2-_*MIau62hK+ zuu!rCICETUb-^CWSG_G7Y0W{{t0Z5_S1i~)0z^AfP_0yoo(#4sb#frV9tnvmsGu@q zJOXvdRV+kXzLb~8^T`AfEIVAr5Lgrp(~8G*9rl%UWP#~dP*cur@yZFOTfTz8h z6Ye7dFNsf|XPQ(S^dr#%sC@+E@yYXfIe()sEn_0`n_?`COfTkmSD)I|qd@yG`uxE0 zW@dk2p*&MO{I!|^3i&+`3m_P8fHFQALKds~BS;t^4b#aPEUnoC)&=k9 zKy^h_a6Go>pO1zXf6rV~h%E?()8{3xx}8*AxK zOi;VoySVXo(JsQXM(ub-e#0WF(QgAf>V(`9+j7T`Va_){$S@VWJof?+0tiYNGntSd z$O{%)_P7y@wZNPv{oFF+d(Cw)FKgWA?LAlY@h z|4W&_XweB7`zo3von?#Y3XsJX6m|h+L_hMe;ccmN+4bdyHpJWrMCE^V>>MVi=XGt9 z09d@-eIwzTasm$@zSR*#KD;AMW6%zW{!6Ou$~fTE%gFm4^3kZZX6o=rf7x!N*Co+C zV$koDUT|)GHE+7QIe5)~y31u}uv>d{ZuA`+-C*GwK0P(#o5b?vZDzK5wdI)$CyFH2 zD~+*?<-ozdD_r$?wg zAe8HO3d2o@`SFOvpgTzwccO$6Am{dqz+BYclhe3#$3uj!BQ*D8B|Zvbv383P!0q+A zXvb6|*qn3GQ#rdm8Y)7S!Ehq3M+Qtbc~C07W|I)DmA84spS}R#Y|`|g`hzvzaQb9W z3NmRX$oTHN$gwD7GV6VJM0LsuwmXd#s zS_Om$2Lo??DKXtI4Ym$V-ISl*D;>IDcQfW`73FsX<1FtTYIGeLEtho>Mh+x%yXxNn z*^Jdi^*h(XF996SY_n(b8Qlivs`(q7=1`uSqP;VcNV(23@i6`vm~gySm*^k(miEyd zSd3ZGFUDEDN)%drESQUnRx6=}jwJxOuEGZ#dBC8Ws7`^NH#E$92oQ2hpXR-Fj*OWv zvs0#$Z_FGs8xpT1*BY|_wJNc7D=J5&RbKf%UtUHu3}@%rPmR;|55rd=v#4GZKC(vr z^YB?J!tD4`ESp)T>KCPA%HbIc74_@rqQjt@%gvQ<={u|@H4i)vQcfi@xlQKXvp&Kj zPbtL%;q?YesZjiBO(Eg=I1V|&#EnHS(%Y~#zDIvgDj4cgr$v4fsOcjrjY3~m%NdR8 zgQ;ci$WLB(bVW0jkt>w4;tk3?l8;CG<m(e{`bF@WLAHD72xLMlg_yBSIJfX-J~;jzs19%IsIW>uPxDmflZlOA|6z6E>- zBixe8ufQl|_NeWYFb^QC=gsTC-VD-5*{u&mCOB-XA=4@JTFbc-x(Yx5`6Nr@hq69k zMD8bQ_WFn6ho2L0Ph+_%=1(?m05_r}F8L-U0xGriDEqyM`3$xR5RcZkChJW*W30WeVwaTD)u3LS^H&Uh?igxXyqm&(}jryWV^8DN}Aa zOhEbe9P=RlN2EZ8uzpZ;F~GQTN?` zj+u^Ls=R|XIJE`i z6i{UjcBZW%+}y72K{KHIcMG?Zo-O%@l51|Bk4_;f0yL~Dxp|X`9tK+@2IU{dsT;H! zxW(QhIW4_a$26T>1-xRL{@`6WgeG{@s$cbg9ZUmcVL=9?1uu?9NxC^vK$+Xom|?PH z@vBJSbBS5@A`x|%7#amNz?XdGqE!z?dt=2MM!$<>Owhia^OoS0}zuamOG>yW0^Br!hWO{K8lk)C@1iDw{P;m0Efr72>b9CCQct;WUJ+Fw;GHUgk14GV{6G=1-> z6#RMxohEDr@OYF`-1b$wflia^e@X?cVB1B5qY%~)AlNHi-RN?>BzrSX)_kuT0a^7L z5H}i!3u=H0;(O817Zl($z#f@T%ZR!o`P0`%FB1?(_46NR?g5xn(S67b*P{d#o$XcL zB~28AQJ3H-L;ZrhHAL!>GFPk#mP;iO_D6}Gpi}=jCV@`wcD_3@Hpf7~?>?pYhA&JaGG}I~ zwZW=|4wELz$-+*1=z-Q;t&RTrX^FvME9yPx^Lg*J@d(o*KaiyaKxU3RlWr0q3Cd}4 zv~g3S;PZ-{@6OXiHeYCT#<}GJZ>2zfPK&WppZ;`*A^#F!@)Y~T84670>o&~amKD`m z0;U_;K?_G~=n1dm^@eOXd+9e+r_n4K%0=l^ilRO)Pj1snP9Kh@ff;J)en|`l3ZgjX z`()Az#T=b*gP>A@+K-sY@;7#FcM05z)ao^g0ykv+3G6W@3KZ&LV3NH({bpf3$+&rk z>IJ5T1q@jK1 zpw5lkb~P8@0h)plv-t4FGXuuTN&=6=Sceagy6zGDg>q-joHlWb#>1baVN-+gU-|Lo zksn{(bWcLWARc}wcqHBZW&GDmP5+u3M2^OS+6qneReMIR14)BZi_;+2+<=W)yk_mB z=%|M7xNBWwl3&el4}gfJw_dDYbLmj4X3Hro0>$qWO6ifUpqS9yDU`R)t~Vo$pZYTr zmVI){Ez^iVAs`Pw9VenthH0bSe1XRyJ#?HQ^l%R)(a>VDI%M;LS$5=z>l8I;0i!bn zPdO!qcBEu5(UCV|T_$l#fH6V21DWj#@jCF)lw#T_r0_K2a= zreEugc8JECfcN-V!f|=!uWeQP_epX|U8jSRDDlB;B)c#`8_d73S+FUAoJ51H(H5;j ze$-fjLYLytuWb)Cd#^b7tx{al=?rW^VVs%A5Il-CigQDY=FKl;hM$#NqXsLTG7%&m zS81wyO=8Xmu^?OUDg<{9rFuMHwH+aTz3;m7Oz@~Ht9K3L&-Oe*5b8za1;rYwi~5_y zxzvd~i~&^5K6+;Ru@O2jTg+`LRez)>5D9d)8YbX1hqN%Vq++6g7^k?Jgx-M!YnWXU z1fkJ_5Whv=X=h=NZ5CEu9Rdn?-+A7yVQ-BsD^4=fU`kut4yH6YCeOEBpL_=?1a%MS zR1H=CL*-ju2YX7 zZUF+6&u11tE%l5Al7ZClOFlPyRQkk1=pIKM*RL3lx8FioPThuPt#L%>cJ)U%G(!@ECny@I8ZCyVV2!Ih*O#n<|z>w0oyKG49S@yy}rOz@&h z1jIN9!?AQxv%#tNOEz2T!p}=dSwa+CZi$AoZ6Y2I$*dw2)*oR+iSr0CLcOkY$B?;hq$FvuA(b?s$s{jwj(Ni(2*9!yvif<8-uTF0QTyO=<&NWQN>n#E}AS4)FhW|Rlq*rL5Gx_Q1s6QIV3*%9R^~qe*6_#)1ZvSXij5OccKRe|r1su&BB(T%|-&L<9w-6eOg( zqy*_!S{RT4v+s{||DxVqfYjGh(yvr3_E00V}4aJW-x??FC25=X*^r|)|a|^0Zmcw$$cZMC;!k3`w zVZ638&*%DG%Lg(PYu2hPp7jwu1<>Bp=)fqec9Y&0<=C>}+h-$Eoqgvr%U^ABi+7Ey zd?BCpYnHXx>k$UNd|cTSFNG^w`=JjE);;8R4$b%-_JbMPmU}nau2-QQo;}e{Tc)PQ zsXtMukK9i~+!Juzj7T;JJ312llAz_gyVs~5by#OGz30E`jFn+jSWs&xIamIo?|a38 zj*v;q$zZ$@NvwT{aKiigdf|hulMT@WGHhpcBTV~h8erFEnv11$gIA?YVy-Zr@_bAB zw#5xE_U=UjY4P5m9i|?3_R@YG_F>X0E7GyMvw44})!*7Wd#8mze(77Xw6&f!R+Pit zMYb~H8rMFhiWQ0%Co_q&A^S3Ha`B1MHOvXDvtEWh#~754TWxJx@#CJAMEn5AW@PMYx>-Ce;> z`e2j#V=H+~o9Njv?)_F&zf=}-giB_0ypSNWS1eBS4kz&gL+Or)H|`)v9Sy9d=!*Bax zDBWA{-Bmy3UtK2T@Ldp$iTD0yD{TDXvriZGjrz_Vr^%b8_#~^j$~l3E56Mg_8IjBr z^j%FBWGg+BbCIu`CngPoS(Y)kZ;Qvl%H&4&3oP}tTR#sH46olljTMz@xG>SdYu0i) zn1t@I|60*Eh_y)&^NcTC7@|91SCK>fYD$dltY6Sq;QL-l|HvL~9>la>7)uahy>L@= z<@BtQK3393>!!I9DLPSgsa0&yt(5;VEq@Kw%SLqCL<8Mzp1j3UoC4gC z@np?M0y&Xb%T_-2z6=Pb>zrv^igAwI`iR}>mA9JVjGC(=RW6Sfjz5hyoLxP7tx==s zN3-;P-DaS9DPXBQ1wH$j^J|m%c75;HhvGWk&krZGp+$H&H}6sVcRyMNSqv@?k9M7` zCOT{&8J>$nL(+$7lGZb4Y>Hz)XE(fJB)S1a1{>1eE^$pW0F3uUX)2taA)MJ(@bY%p z$m9-xs{c_TbC1WwS_HZLMy9*z66lKLCl?i=Mdx=kT!d#P5UP_aNCoc^!}y`vP|EQf z8f=XQE3*EJDr)&!2=f=2OqW-Nvk;a%vz~q523$GGS{2b*wA7xf7;k-dDp`uoz}k7T**0V%lRC^lu}!78qaz^ zQxD6?srY=zi0@U~G4+RS#iyR++tU}m3CJqk!Wg+t5PKW{lkWlSZ2hYR}GE=GAwnIQa5#ES35&E{vml_i)2E zzc=J}eTryNL+<9mcfgy!+Ro}IX6*!JVr|YuslAHc#`+GX?A7=A1+>~1@xSnVs*TAH z9gcalx0ip#3MUNmqWM(+bU#$z3Ckpo%zCxb){5_oQ;d+2F&vsPdf2K`bI}uc6cF(# zvuM6FOM2r$!l#vnozdWM_0(B!)2KsE1#P_LPyw<9iBMdJTI)%Vpc%*7FFyV^+N*aO zq%}D-295 zodHMY)sjX=4x!?5G$_k1#br|JAZl5fwC+gX`Vgm#fn$D6uhSEqvO`*q`e`l5Xj9n) z`~)h?GGA^$Wxi(`C-~R|cWAA2S{g=J5r6T!8J-BOMgJCbG?eS>f)IhfaBH8Mq{~^# zo?!I0X!fw8pH07-hD&TgHwHaSPXEoJ+8X%miV4Je7 z6~of1v2XVs?wi*IZSwGiPV$XttwYKjr~6Y{SNTj2_-T?AM5g0wb;R}%*UZ*+nLCwP z3ZY#SIihw5M9QS-_EAvJX~3-^QFV8?J8nikyfhr0Uv4P zN#J5?4JMABbukB*iB=BDE-pQXkqIczi0A97pL%c~9-b0F_gaIOXDwa-rIc`@!-7om z)gf*|tFf<;E`Q<_$qY?dtn`a6m9^WAbvD6s#Jt)~O%GcR1~^=7@ta=oZ@%`O!+9#X zhGz6S`LC+3Q82nst|CZ0B}_^NHuM~7CceCo1we{Ig9{*YCfY9MQ)WT5a6JE~HwCGR znWX}S3Lz@xpBbQ;(j5JwW#gd}jqi!LAJ?B)B@x`y`jHaRvMg?sGA)CvMxuWD|(FlOl_MAiGmZgni*GMO~At_-UOo z{rG9CU2O@M$?#Vro%oIIqo8SsF9g*I6Ndmp7i6eJ@x5!09 z+^}r5h?#GZf-XDkv!y3wV^-vFhgTo9jB8u=go)@LzKZ}{)@omg9l$!$qBjXRSGc93 z1=N{?PJmqnUH&F7K6-&EVo#YNan^O0^%KttbK;rxjzAv!V@V~Su(0?DCppOq2oLQ!$xR*Ntq+fp)78{S*XyC$Zb zJE`f65TxWQHr^HxCI*3tLd@MrX`y1D;+-dku|+};hiQ%ZYc%@uZ}VeU2-yx~bktW9 zu7vWtS$XFaAWH$22dmx(D zg+$3g@sKv7x+-Zng$V@NbcB(F~&H2Tio*NOHGoc}|MI zGkbuEH9KhC!{bns)A|(cI%gX#?C|A}m_O4@X_|)V=~)7gC)^-Lny(aYX-EUvlZC;Q zv}-ZzDzL3}wkPEM_%@s-@uQ2&xRS~{+_w7(bZPSj!r!(AC48@x96YLUxpGSyBuam6 zU$#1Pg=5I~v=vS#L|-Hdi6Yj%;FEqycWc=_Wa5@NB`c%!c-e)39wG#F-N?|eTK!s$ zn~s=?=K@#qYTV}YI!`nh`r>9<=k8fW!DK%e988J%dos%hucQ)8^tMdZ3;Q;U3abN0 z_C0!esw;7c>~~aNU%&;a&D206D`Fs?hu5!t)0oy;yaAs#z*M;?sB}N~YZj>(8U^?CZd<3B~f7#eXpLqSEYVH%&XdcTuOwm04_!yQ&NN2BQ&Z{F^ zx4qE8#Ch2R-S8k`62`l-1kRIR*paJ4BQ*Z>r#ajR!;L*0ow~={Q!w~kUs6;0&=jq* z+D%;K5VMc}rm$Uk`<#ORgFSDv#=^G;{RD(eSH^?7ku?C&}yPmbgc;E$6z;H6hcTVeetWBM9 z>`ya=bR!u$0zvzY&y&R)FQ>8?Efvpig5c_C5k6Sl&;h{ui{4-!eIaZCWY}(+0K8nE zcjfcik9YPzrgY4HoGgrtsgrn7L@jr9#p24AO2WK?Yynup>tQJD6+Ba!Qrf32J zN5zYN&B^G#v^;sn{Pi#eKwhx2^?E$0fFl6+;A@F&Jl4&IX!iDDNUIGQ=RVZ{zDcO% zhwCs~l^s%t-4|qIkXD6kot$AoNQb9pNlVwD&?6@V%xL9Fn%&(c!^@@Q{QfIiaSw+X z*R3igb=@{Fmqt@+Fe-v>Y_7l@jc3xQ%RBWYw$7!Qg-WOZl%jThpfmT=IXX=cjXKh$ zeYptDO38~Tm>mr`27u6h8Ie}Xf!SW%5F2$N_4FDNhIWhd3CMmNt9I_*DBSI=@UXlt0>O0 zmw*)PPE1W2aL>@M*uu zn8)=#H|xf80*MV+s$4b;)NzbPYqfxQoU#5?>!v`A*}Bk4>f@}C^t5dbl>#fvemygG z4o%Tn-z#0Im_brfM~idU$0rx*kzpTzz}GDBJe43>8a;RJuD`Un=wm(vsmjDET+t!7 z)izyVrv1^Gh9@FE_q?`st39F(?VnW>uuC5#ZwX$E*snAIWdO@So9hmhES*of&U}`&o z)VnKs%pYQJZS{H8u(Fw&Q*E^Nc@sgvfu& zNcxg@iF}uT$s!D`;BhV`m1IyjK-rg^ub}8A(n7#WH?@3{A6}9z&sCItuT_=?PG3+d zapB_7^CT&F;hr~0IeOJUq7CM_GS>^}ZC^gHz+Q9xk4q8;lkSE8dLYYUn7Q@3eGYK?Wd}gXt=h z?^e32q)IWrzs2=YK?lMy9@-hb*3V`6Oo2(M7f=m|v%gqY&)+QDnFa_c%ff!XKj|&J zj;QgrgvcVAeq#c}lSQNl5WB|*IK_@|Zn*p|$iFW?DyR@!uJoL34J<<6Tp{utG9Ts^ zZET(>cF}R(Z@YHgA(x6@O}~!c(Xb=+ZMv+@C3xJ(3a+jiXpx6?oh7;jOO%=m@OEA- zd#IQFkzPmm;ia1)AXq1$`O3Y{0xjBE4i|xjU|1@GJ1^4B3ZFdz*!id0iZ>Xv1_#f| z3v?{>I8)tAhvp&@6oUaJyNth@u^@49qgEZK=52i(d?=?ImzR} zVue`VX&Bq>BoY8v$dH7+^P zG6%B(n3@}`V;%S!cIeIEU-Oo3AkNxS|_lUvbNAG6wM+TxC(86 zKsc1;h_A=4QZq$!5njR9We@E$b$37`v8G?#sGZj*`_-6HWMeG!J0yRiAl5=(I;rax z*@Ycw(i3(}#&wTqs{#kFKEQB|XrJ`AE0i+i(OjWh{^|$($0aL!2)H$k&_QDgk)_V_ z?L{^L^pc*!>_0o%rlyMTSlxMlvD z6RFg=2VlEX)~3Z2j4z2t#+HAy8l?CD4yH8j)?Y(Cp1hqa!qd+ zU_cMv5wOq$mX#vW5+2tbhxw5xFUKdJRE#=1SHK~E6^A=T=O5CFffxg|bjX(pAN_BMxzoU73)z(6uXbMgD>T{n@2zz+fV6D-L+B22vCzuC%Q+i8B#@6<$ z)WcSbtrXI$UyIVrw!&3v9}@%my8P~SQjW!q*d35_@~??2)8owsN-Mu;%;X>bm*Gv> zkv9;TETO3vM64tX(2ILjH%Q3=@&nuAyro&D0b06r9E^%}F*dCHoov*m?E*rupjt0^16Av14ZQ{g@~V%DeJDln*-A{Mh9u8#hJpTiEsq3YH=CNQuJTHhMf7`t{R zGEoE}SCie0&>jq$+lUmv*K^l#`LS+Lj!rliWz?{oL`t5(cxc6Jm9+@~*8QW`Hx}m` z(EVVJ2Fs57tM4vtZB(n~Fz2Mb|#0gc71V$rqLi&Un6&`i9 zd#8Rv&gbWz~VPv?fbo{QHsycZWN(wQm~=xMldRagYnEL z4J58sh&J)>M7M=A!Mhk;X}h1Z63+KcQx$ujQK&TN+iOE(t8h}qvn~Nu8gXomSv!q$ zb7VTCWtVjm@f#V(GMSwi`ig|pAbeWg6&b6Kyq(XkE-ic`gA)zA4uHf5s zxg6-K1w6to2IWo&mjlZt8tH`7iySNCwj9{olI(~amU)nO*Pi&v*`#2lzofun?W@8K z4*~6#fTPb{JKF9PJ`_O19=h1t~#AQ zSrJ)exY-e3cht4|ykfp=#I=LANfK64<+77a%4?%U<0d2xD_x-ux*sCf6D#=q1%T|F@y@fO(6X0zflReVWJ|&ORY;t?jI@o1lAo zoM67@fJ6wTDB8ioHZi>tWdq2`k_Eb90%?pPNXi?9EnRO3}(ezqGSyta) zVUt27C$QGyLi3OhwA+|@s9Cj4OT67rTRso**))>N`FuYaU4d3QR)btRTn?XBff_)% zLS|bPrNHs076^#th}J#TEThR4p~KS?=cCSBb%!09{RW6bTLYo87vMMvD{z3Y3nDsz zmxfY0$W9=LoJ{U<(`JdD{=>UciOKsf4nxJyET96S( z&3;~l)~v$diUbYEc}e+%%M?bm8X(V5@$F5P_-EzDc`(3)Xc1v-0`Y|W+{W_=8~~SM zBPLk>P>mKF;e0PiJp8X5?(?E~l<_)b-xBt?Z_>Z69;flpb1by{vBN9hY9VkKMzRsD zTx6eMOq|WbR?$B)Q~y-3$l#^d_iTtXRR@@nSmWQl`1jRw8;s*<()mwW%v}HFPxz;& zd1c~(k%q7Xxh?#=2CsA1L8;!&faPDK?w<|8BSbElA4D1a`BlFR5Kj-3qOZPE)@TA%MUw-iFLhY6{eRW>hiQvE#s(?pmy7~`EqDI>0+E*>N9b}Pj)Ws> zsOTjC;8afPV*9(j$eDr_3Ga0Jl*x>`kKYDM<$E9buuztsrvasn%tS*d^SmkVA!!wT z*js<==Z`DOMo^0N6*Hy!PZ9rWw78GRY9=rCk7v3h(8Rq((fXb{yNJ0Pm%xn3_J~_ z(pOxRwNa`ySsbYKBda|g%2G60P^zcHVvbtrf3)py5s4rZ!!GBpJhViubbO@JKX-|u zRyrdn^>VDsWJ0M{0YFK1k0JOCgeU1x)ka5M}ef%JV9`}A(-J@+sF{$MJ z{zJcIRAUm)i{aaN?#N~9OVN(J_eJ4v%R}0}%^9GUv#E3%x3)vTxC%^& zggx9|@OkPMz%}0f>KniMGP39FK`5bUeuAz$q1Qq3Fo#?)rI&jhnET!3!`Q)L4;xJ< z!mj53^Th8r>mpw!rPr6kgWAflvAv{XNbgei#LJ)*)XSIZNH(`f7He|XmmgAEr0U(A zBkIWeE$0t+ZmkNKKq}->W(6R9f&_00f#acRj-|D|t>4-6STF-1>Na>7z?An>iGN;E zzQee~X_Lux0gzZ0l<5=1QTY8|CL&gB))sxDzOSe$R2P{+7p#Wp_gqC~I9{w^*t?8Q zQiFO-8b{jX9z)l`e{AxfyYaAqQR#~q+(c<3$u&p<7Z0DmSyz5E^D`y*uFIyYDAyGL zAPq*+@dDNbkmU1QeLt^6h`@;Nd;Y!7p91?O#5ZfmarTJt+cC;wY;O?@&)ow+qykRp zPW$rab_nqNWGN-Z0?O)+1Rb$B$)KHM`(Ei>jW3*H?^|(yzVv@SL_GKMc$Ka~t!gDQ|ffuZ`TB$B2Z)&^2r@2hX(q7poq#veBpG$swx8FjR#>5gkb z&+>TY6Fe2*34o|uCWo6WRqun!rc2Po=6XYg(QJBm@}~v~c`cdx2>Qk zP9N_d_)Ece!Jkx6nfd4n(uAvOVzy$@OA~yGv#T z&10hwSueB$z{kJ=oRl-ha2}ORJuklle%pUM*Z=)G@vN7VAcJ7)d-lakr{(&gZv0Q* zftQ9pxa|oj*JH1wF zFxVguI3_!qj|A^;%*w866s=YRj|F_{>55u6k{Je?sBF(?^F5J47zM*DQfhcrXdV}E z2hydPbolG;fe1xd(QJKT8FbhefPTW3Anx?`Ci0=?stoWQ*tiC|FKmbxtV}~dRyUm8 zc3?fm()pWwgY^fwD0W!w+L@`pxO@b&PH{ojqqO3@EB=HGj42X9B`fjul&OQ`@7@AKzHgYkSY(xmXR2tGcj!AuqDNFgvyLO3Nj zdOR{NSsfpDE!x}dSv!d}bX}O>KN}Tv)(y#^AvsWOC^W zV0n+V>-+5UaOYTe7DD>_sb7y%7Aj&)Y&@Pf&(oT4mZ_dCkI7cGkN>CY-#3AmdEw&> z3i2}ALb%?eDkZ_h@Z)-rCGqc9{qbmG7zT!4-gfy;*x%2IyhnT0M3VHF>St?yKi4M% ziqu@-hcYOtlsNxM|GiT7wcoe@tibdxC}OCybie!am*4j&rM#Rfl%N~>LwY|8OG7|W z*hG0f{?A{2%PLb@#72Yaqxqjw|MxB}X%U;RGS=FVi@$IGS-~J3C2#IE8Lt1))<1=L z)RepyX%%?|D5^9kz#|>TP9ynWEhC@vQp3k1Er-H~)lpQ*bqO2y*=rRPx)EWxh>feg zWVynCqRRizYV+zb-|>}fh4!_Xl8}(erx+LvW%JCrNxHf&uPsV}EX=GNvEflXLmm{` zV-?kFnX+k}vLPWNk5{VW^Sh>j`vo!_exRe1WumKYPcoB*LY!LSda!9%I2M-dH4+7n ztJlaS-~=edhe1NiYGgx1b;YkFtKEpHzuYwC!QXcGQp5n`-m4~!fx^oJp*fB`Fd|D; z6vl*?*&y=JhUivOS2%kNj9*)>EEI+5V?W07yBnW;{~)Ycp%-jj|Ni)$o0-_S1{Y>W zRg52LoA_rrp)gKtuO11GCk=-irA`A32L{X<^^9a^6JjJMQhT(2_W%(+7EF>Gq&K{# z4OnVJde#M)uU_@Ef!8g0YueC>jCEFgvqSM6S8kX}*$^d7U&%>mLvLc})v3zBad#DB z?)@xw{;Bt5HvIZeoB!PlzRG&NN<>mmUd*?l?o1!zS);@2q zQMPcQZ1y~U<}(|O0QT5>tg&Aa8VjswUm`z~j?n!rAPnq}vWfYh^m8FV*ty-qT)ArR zmbz@;qf}iL#&UIwe!!go5u0?b%d>8;{hQP2ha_mj{biq_P-3aCh>mv?kr|Pw-m&}y z#$DdC5;J@7gsr@sjv9*l@#um?bTaNKj;$JQ($Lv5cYKk5^v;G&c$xx*L@{Kj`pN|u zu4|VV9re70O!nIbkk_6jwdN>@#aGr(V|GR$hTb%J??G6NZjD%ejSOezGw-nXpcc_v zySTl#moYIhQ8mIQc;|mLERUbL@_2qxtf)tSHT?dIi1|Aj0WRuBE4QPQN%2vL+H3RT zAQx}IQrp5F8BcwWK3g#m^;f|co4$0-2a)x;zqR>Wze?m@)mVPlNF=0)(!NsP6{4cN zKmA>-Rg&li)m4AWqWh#;D20zb==i?rRVk6QMt~5bM^Nuqyt`;kL|&B@E4Nxsa&3t& zfzKP}-S+%XpBq$9%%{TjPj9AcqI@K0k;Kv4Kg{7lX zYag#yPm~o|%3*bhBZi-Z#6aJoDGAxFm@Jp3p60doqH8jx&D(RnY&C2s1ZnFU!8qZVb`> z{krpLcW-9W^B#|su%zdx3&s{W^6Ge!!%JV>XFP6pe+CU&uN5vJ8SyxUX+wM0H3i4h z2pgp3i8hsMq89=}cbcjwWY#2zFd)!pkYTZ-SgHQ?P_%W9cZe{fU-?Ma$GK2}!{ z6mnmfbwAh;KCPj5uia~Nn6F~UsB_wUD=;DyXQf->vDsUXVi2S(48q8^3fyeT|5SRK zrathHK|yZM>N&T&!$z`&^ZfYIF=RYk$Z?EqiAC{#Tp-{6xY=_DMxFf?_`ua(tq<{I z$6Byrcgnc3)vpdm4Lqqur2&lsQPL~LmB%O0*VkXC;)R_6$b*eY#+{I_vP#CF}dZM|_ z^U2WOjL;io;R>#2$=jVr6!KIC235mb1q~m3B_szNAx9Rla34?Q^@`Dr^IHq^xAk(L zRqhx1EalDyb&EB!n55t(biZosjjNkYAGSM|&T&inE4K&td@I7zGHc(}d~0P%n`a`<>P4!s;MVf+eWfRR zGvV7s&624n3yvAy#eJ;#L5G&3kN~;kFBbxPUN!An#C;J?_2X6$R(1JoK!#$@r7Vm< z&6Ry{tX-eD8!_Ihfvn9|nntsYB9;3vUnqzXb1w}$2X3(!5`gK3BM zC0at^v^}FokrX}Y^sUo+(HX^F0VCTR3 z$@EUElV@6Pi=W*d>&I5lm4-r{t*dC`+`HwMf=_OyPP*-^T97qUL5)VOSsIF{;^OnN zo;NMiZXU8BG;jazCBMez#0@mdni7?u)-UqqYhz&rwnUHKXE30PQJHx-8|e~1yCZQ< z^P`uAmIJ4ysIz+gM#of}lu%q1F9cyf?)%a_8PT%BYf{Ux`f|Rv`_x}!t zoRCEJPe;OV0_ABhoZTFkoySS~PdM2)1J<`n%C7t4KT4|Mul6qxLb341Gc9Zkct*;- zKWe4;>zjXHh)9F2%3XK1=!d_5`}yv~D6r`;UUub1VR|88g)MvdloQpc%>kWlYxkC<l`-b?ehB`B$Qot=-`Y*bmRh>ki3 P{-hr&i041hd-Z<+i}Ls8 literal 0 HcmV?d00001 From ebefae8db49aad94d5c3c3166673680648a8720f Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 25 Apr 2024 23:09:11 +0000 Subject: [PATCH 05/21] update: notebooks --- life-science/00_monai_decathlon.ipynb | 212 +----------------- .../01_monai_decathlon_visualize.ipynb | 192 ++-------------- 2 files changed, 25 insertions(+), 379 deletions(-) diff --git a/life-science/00_monai_decathlon.ipynb b/life-science/00_monai_decathlon.ipynb index 748edbb3..18b2cd0d 100644 --- a/life-science/00_monai_decathlon.ipynb +++ b/life-science/00_monai_decathlon.ipynb @@ -24,20 +24,10 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "6d8a4eaa-6c15-44f0-81f8-b0c2800b1017", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33mDEPRECATION: flatbuffers 1.12.1-git20200711.33e2d80-dfsg1-0.6 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of flatbuffers or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n", - "\u001b[0m\u001b[33mDEPRECATION: flatbuffers 1.12.1-git20200711.33e2d80-dfsg1-0.6 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of flatbuffers or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n", - "\u001b[0m" - ] - } - ], + "outputs": [], "source": [ "!pip install -q -U monai wandb" ] @@ -54,91 +44,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "a2315b79-8c0a-4cfd-aa6d-4fca55d78137", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mgeekyrakshit\u001b[0m (\u001b[33mlifesciences\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" - ] - }, - { - "data": { - "text/html": [ - "Tracking run with wandb version 0.16.6" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Run data is saved locally in /home/ubuntu/edu/life-science/wandb/run-20240418_221202-62jrsztu" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Syncing run usual-brook-3 to Weights & Biases (docs)
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View project at https://wandb.ai/lifesciences/brain-tumor-segmentation" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View run at https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/62jrsztu" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import wandb\n", "\n", @@ -161,40 +70,10 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "42189439-2c3d-403b-915a-98f897d049e4", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Task01_BrainTumour.tar: 7.09GB [05:55, 21.4MB/s] " - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2024-04-18 22:17:59,197 - INFO - Downloaded: dataset/Task01_BrainTumour.tar\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2024-04-18 22:18:10,984 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", - "2024-04-18 22:18:10,985 - INFO - Writing into directory: dataset.\n" - ] - } - ], + "outputs": [], "source": [ "import os\n", "\n", @@ -237,20 +116,10 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "07461dbc-3056-4f06-bb1a-462246a35791", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Train Set Size: 388\n", - "Validation Set Size: 96\n", - "Test Set Size: 266\n" - ] - } - ], + "outputs": [], "source": [ "print(\"Train Set Size:\", len(train_dataset))\n", "print(\"Validation Set Size:\", len(val_dataset))\n", @@ -273,28 +142,10 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "9f1f35e5-927e-4baf-a351-652e7e99fe76", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./dataset)... Done. 24.3s\n" - ] - }, - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "artifact = wandb.Artifact(name=\"decathlon_brain_tumor\", type=\"dataset\")\n", "artifact.add_dir(local_path=\"./dataset/\")\n", @@ -311,49 +162,10 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "25ea852b-04d7-4e94-97c3-45d972b21886", "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "388076d4600649e58671f5681e757f4d", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "VBox(children=(Label(value='14510.691 MB of 14510.691 MB uploaded\\r'), FloatProgress(value=1.0, max=1.0)))" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View run usual-brook-3 at: https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/62jrsztu
View project at: https://wandb.ai/lifesciences/brain-tumor-segmentation
Synced 6 W&B file(s), 0 media file(s), 1278 artifact file(s) and 1 other file(s)" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Find logs at: ./wandb/run-20240418_221202-62jrsztu/logs" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "wandb.finish()" ] diff --git a/life-science/01_monai_decathlon_visualize.ipynb b/life-science/01_monai_decathlon_visualize.ipynb index 8047728e..78f2c0cd 100644 --- a/life-science/01_monai_decathlon_visualize.ipynb +++ b/life-science/01_monai_decathlon_visualize.ipynb @@ -27,19 +27,10 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "4def9c4c-89b9-4f02-9853-91624690dc4f", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33mDEPRECATION: flatbuffers 1.12.1-git20200711.33e2d80-dfsg1-0.6 has a non-standard version number. pip 24.1 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of flatbuffers or contact the author to suggest that they release a version with a conforming version number. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n", - "\u001b[0m" - ] - } - ], + "outputs": [], "source": [ "!pip install -q -U monai wandb" ] @@ -56,91 +47,10 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "69d72b50-1a24-4a32-97c8-6f859cc203df", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mgeekyrakshit\u001b[0m (\u001b[33mlifesciences\u001b[0m). Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n" - ] - }, - { - "data": { - "text/html": [ - "Tracking run with wandb version 0.16.6" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Run data is saved locally in /home/ubuntu/edu/life-science/wandb/run-20240424_221832-rtpvhd7n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Syncing run lilac-dew-12 to Weights & Biases (docs)
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View project at https://wandb.ai/lifesciences/brain-tumor-segmentation" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View run at https://wandb.ai/lifesciences/brain-tumor-segmentation/runs/rtpvhd7n" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import wandb\n", "\n", @@ -175,7 +85,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "1ce8d513-cd46-43c1-839e-0ae15f750a5e", "metadata": {}, "outputs": [], @@ -226,7 +136,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "id": "a542d505-3713-43f1-adf1-c33ade5696b1", "metadata": {}, "outputs": [], @@ -278,6 +188,7 @@ }, { "cell_type": "code", + "execution_count": null, "id": "5b6bf7fc-dc39-4247-9001-cd8832045b84", "metadata": {}, "outputs": [], @@ -298,19 +209,10 @@ }, { "cell_type": "code", + "execution_count": null, "id": "cc829ac0-ee0a-4924-889d-72fba089ec7b", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2024-04-24 22:19:13,147 - INFO - Verified 'Task01_BrainTumour.tar', md5: 240a19d752f0d9e9101544901065d872.\n", - "2024-04-24 22:19:13,149 - INFO - File exists: /home/ubuntu/edu/life-science/artifacts/decathlon_brain_tumor:v0/Task01_BrainTumour.tar, skipped downloading.\n", - "2024-04-24 22:19:13,150 - INFO - Non-empty folder exists in /home/ubuntu/edu/life-science/artifacts/decathlon_brain_tumor:v0/Task01_BrainTumour, skipped extracting.\n" - ] - } - ], + "outputs": [], "source": [ "from monai.apps import DecathlonDataset\n", "\n", @@ -397,6 +299,7 @@ }, { "cell_type": "code", + "execution_count": null, "id": "e9acc72e-7091-40d7-a97b-023df48d9e12", "metadata": {}, "outputs": [], @@ -478,6 +381,7 @@ }, { "cell_type": "code", + "execution_count": null, "id": "85601b14-e693-4cca-b8cf-5e3ce863bb18", "metadata": {}, "outputs": [], @@ -502,80 +406,10 @@ }, { "cell_type": "code", + "execution_count": null, "id": "386b6d4b-ebf6-4d9d-bc78-7ce489e0a926", "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "d2f78eb2ab5f4cc48e7b6a89cad7eb1c", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - " 0%| | 0/155 [00:00 Date: Thu, 2 May 2024 10:24:02 +0000 Subject: [PATCH 06/21] update: baseline training notebook --- .../02_monai_train_baseline_model.ipynb | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/life-science/02_monai_train_baseline_model.ipynb b/life-science/02_monai_train_baseline_model.ipynb index 5298216f..196555f9 100644 --- a/life-science/02_monai_train_baseline_model.ipynb +++ b/life-science/02_monai_train_baseline_model.ipynb @@ -22,7 +22,7 @@ "wandb.init(\n", " project=\"brain-tumor-segmentation\",\n", " entity=\"lifesciences\",\n", - " job_type=\"train_baseline\"\n", + " job_type=\"train_baseline\",\n", ")\n", "\n", "config = wandb.config" @@ -73,7 +73,8 @@ " # merge labels 1, 2 and 3 to construct Whole Tumor\n", " result.append(\n", " torch.logical_or(\n", - " torch.logical_or(data_dict[key] == 2, data_dict[key] == 3), data_dict[key] == 1\n", + " torch.logical_or(data_dict[key] == 2, data_dict[key] == 3),\n", + " data_dict[key] == 1,\n", " )\n", " )\n", " # label 2 is Enhancing Tumor\n", @@ -205,7 +206,7 @@ "# apply train_transforms to the training dataset\n", "train_dataset.transform = train_transform\n", "\n", - "config.batch_size = 1\n", + "config.batch_size = 2\n", "\n", "# create the train_loader\n", "train_loader = DataLoader(\n", @@ -233,7 +234,7 @@ "source": [ "from monai.networks.nets import SegResNet\n", "\n", - "device = torch.device(\"cuda:0\")\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", "\n", "config.model_blocks_down = [1, 2, 2, 4]\n", "config.model_blocks_up = [1, 1, 1]\n", @@ -262,7 +263,7 @@ "source": [ "config.initial_learning_rate = 1e-4\n", "config.weight_decay = 1e-5\n", - "config.max_train_epochs = 50\n", + "config.max_train_epochs = 25\n", "\n", "# create optimizer\n", "optimizer = torch.optim.Adam(\n", @@ -338,6 +339,7 @@ "\n", "config.inference_roi_size = (240, 240, 160)\n", "\n", + "\n", "def inference(model, input):\n", " def _compute(input):\n", " return sliding_window_inference(\n", @@ -385,20 +387,21 @@ "from tqdm.auto import tqdm\n", "from monai.data import decollate_batch\n", "\n", - "\n", "config.validation_intervals = 1\n", "config.checkpoint_dir = \"./checkpoints\"\n", "\n", "# Create checkpoint directory\n", "os.makedirs(config.checkpoint_dir, exist_ok=True)\n", "\n", - "for epoch in tqdm(range(config.max_train_epochs), desc=\"Training:\"):\n", + "epoch_progress_bar = tqdm(range(config.max_train_epochs), desc=\"Training:\")\n", + "\n", + "for epoch in epoch_progress_bar:\n", " model.train()\n", " epoch_loss = 0\n", "\n", " total_batch_steps = len(train_dataset) // train_loader.batch_size\n", " batch_progress_bar = tqdm(train_loader, total=total_batch_steps, leave=False)\n", - " \n", + "\n", " # Training Step\n", " for batch_data in batch_progress_bar:\n", " inputs, labels = (\n", @@ -454,12 +457,12 @@ "\n", " checkpoint_path = os.path.join(config.checkpoint_dir, \"model.pth\")\n", " torch.save(model.state_dict(), checkpoint_path)\n", - " \n", + "\n", " # Log and versison model checkpoints using W&B artifacts.\n", " wandb.log_model(\n", " checkpoint_path,\n", " name=f\"{wandb.run.id}-checkpoint\",\n", - " aliases=[f\"epoch_{epoch}\"]\n", + " aliases=[f\"epoch_{epoch}\"],\n", " )\n", "\n", " # Log validation metrics to W&B dashboard.\n", From f7ff05166c29572692777eb42c03fdcc7172ef8b Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 2 May 2024 13:59:53 +0000 Subject: [PATCH 07/21] update: training baseline model --- .../02_monai_train_baseline_model.ipynb | 257 +++++++++++++++++- 1 file changed, 244 insertions(+), 13 deletions(-) diff --git a/life-science/02_monai_train_baseline_model.ipynb b/life-science/02_monai_train_baseline_model.ipynb index 196555f9..43fd1bc2 100644 --- a/life-science/02_monai_train_baseline_model.ipynb +++ b/life-science/02_monai_train_baseline_model.ipynb @@ -1,5 +1,27 @@ { "cells": [ + { + "cell_type": "markdown", + "id": "6d4fbce3", + "metadata": {}, + "source": [ + "# Train a Baseline Segmentation Model\n", + "\n", + "In this notebook we will learn:\n", + "- We will learn how to use specific MONAI APIs to write our training workflow, including a SoTA neural network architecture and loss function and metrics for our task.\n", + "- Use Weights & Biases for tracking our experiments and logging and verisioning our model checkpoints." + ] + }, + { + "cell_type": "markdown", + "id": "0c6e5724", + "metadata": {}, + "source": [ + "## 🌴 Setup and Installation\n", + "\n", + "First, let us install the latest version of both MONAI and Weights and Biases." + ] + }, { "cell_type": "code", "execution_count": null, @@ -10,6 +32,16 @@ "!pip install -q -U monai wandb" ] }, + { + "cell_type": "markdown", + "id": "c4f9e1e8", + "metadata": {}, + "source": [ + "## 🌳 Initialize a W&B Run\n", + "\n", + "We will start a new W&B run to start tracking our experiment." + ] + }, { "cell_type": "code", "execution_count": null, @@ -23,11 +55,37 @@ " project=\"brain-tumor-segmentation\",\n", " entity=\"lifesciences\",\n", " job_type=\"train_baseline\",\n", - ")\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "27f85008", + "metadata": {}, + "source": [ + "# 🌼 Reproducibility and Configuration Management\n", "\n", + "`wandb.config` allows us to easily define and manage the configurations of our experiments. This includes hyperparameters, model settings, and any other experiment variables that we use in a particular run. By centralizing this information, we can ensure consistency across runs and make your experiments more organized and reproducible." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c26047ad", + "metadata": {}, + "outputs": [], + "source": [ "config = wandb.config" ] }, + { + "cell_type": "markdown", + "id": "6ef790e1", + "metadata": {}, + "source": [ + "Next, we set random seed for modules to enable deterministic training by setting a global seed using `monai.utils.set_determinism`. Setting a random seed (or multiple random seeds) and storing them as a configuration, we can make sure that a particular run is reproducible." + ] + }, { "cell_type": "code", "execution_count": null, @@ -41,6 +99,28 @@ "set_determinism(seed=config.seed)" ] }, + { + "cell_type": "markdown", + "id": "9333a301", + "metadata": {}, + "source": [ + "## 💿 Loading and Transforming the Data\n", + "\n", + "We will now learn using the [`monai.transforms`](https://docs.monai.io/en/stable/transforms.html) API to create and apply transforms to our data.\n", + "\n", + "### Creating a Custom Transform\n", + "\n", + "First, we demonstrate the creation of a custom transform `ConvertToMultiChannelBasedOnBratsClassesd` using [`monai.transforms.MapTransform`](https://docs.monai.io/en/stable/transforms.html#maptransform) that converts labels to multi-channel tensors based on brats18 classes:\n", + "- label 1 is the necrotic and non-enhancing tumor core\n", + "- label 2 is the peritumoral edema\n", + "- label 3 is the GD-enhancing tumor.\n", + "\n", + "The target classes for the semantic segmentation task after applying this transform on the dataset will be\n", + "- Tumor core\n", + "- Whole tumor\n", + "- Enhancing tumor" + ] + }, { "cell_type": "code", "execution_count": null, @@ -83,6 +163,16 @@ " return data_dict" ] }, + { + "cell_type": "markdown", + "id": "052b23d5", + "metadata": {}, + "source": [ + "Next, we compose all the necessary transforms for the training and validations sets of the data respectively using [`monai.transforms.Compose`](https://docs.monai.io/en/stable/transforms.html#monai.transforms.Compose).\n", + "\n", + "The difference between the set of transforms for is that for the training dataset we're applying several transforms that are augmenting the data that are not applied to the validation data." + ] + }, { "cell_type": "code", "execution_count": null, @@ -113,43 +203,72 @@ " [\n", " # load 4 Nifti images and stack them together\n", " LoadImaged(keys=[\"image\", \"label\"]),\n", + " # Ensure loaded images are in channels-first format\n", " EnsureChannelFirstd(keys=\"image\"),\n", + " # Ensure the input data to be a PyTorch Tensor or numpy array\n", " EnsureTyped(keys=[\"image\", \"label\"]),\n", + " # Convert labels to multi-channels based on brats18 classes\n", " ConvertToMultiChannelBasedOnBratsClassesd(keys=\"label\"),\n", + " # Change the input image’s orientation into the specified based on axis codes\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", + " # Resample the input images to the specified pixel dimension\n", " Spacingd(\n", " keys=[\"image\", \"label\"],\n", " pixdim=(1.0, 1.0, 1.0),\n", " mode=(\"bilinear\", \"nearest\"),\n", " ),\n", + " # Augmentation: Crop image with random size or specific size ROI\n", " RandSpatialCropd(\n", " keys=[\"image\", \"label\"], roi_size=config.roi_size, random_size=False\n", " ),\n", + " \n", + " # Augmentation: Randomly flip the image on the specified axes\n", " RandFlipd(keys=[\"image\", \"label\"], prob=0.5, spatial_axis=0),\n", " RandFlipd(keys=[\"image\", \"label\"], prob=0.5, spatial_axis=1),\n", " RandFlipd(keys=[\"image\", \"label\"], prob=0.5, spatial_axis=2),\n", + " \n", + " # Normalize input image intensity\n", " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", + " \n", + " # Augmentation: Randomly scale the image intensity\n", " RandScaleIntensityd(keys=\"image\", factors=0.1, prob=1.0),\n", " RandShiftIntensityd(keys=\"image\", offsets=0.1, prob=1.0),\n", " ]\n", ")\n", "val_transform = Compose(\n", " [\n", + " # load 4 Nifti images and stack them together\n", " LoadImaged(keys=[\"image\", \"label\"]),\n", + " # Ensure loaded images are in channels-first format\n", " EnsureChannelFirstd(keys=\"image\"),\n", + " # Ensure the input data to be a PyTorch Tensor or numpy array\n", " EnsureTyped(keys=[\"image\", \"label\"]),\n", + " # Convert labels to multi-channels based on brats18 classes\n", " ConvertToMultiChannelBasedOnBratsClassesd(keys=\"label\"),\n", + " # Change the input image’s orientation into the specified based on axis codes\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", + " # Resample the input images to the specified pixel dimension\n", " Spacingd(\n", " keys=[\"image\", \"label\"],\n", " pixdim=(1.0, 1.0, 1.0),\n", " mode=(\"bilinear\", \"nearest\"),\n", " ),\n", + " # Normalize input image intensity\n", " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", " ]\n", ")" ] }, + { + "cell_type": "markdown", + "id": "fbd8e71d", + "metadata": {}, + "source": [ + "For loading the dataset, we first fetch it from the W&B dataset artifact that we had created earlier. This enables us to use the dataset as an input artifact to our visualization run, and establish the necessary lineage for our experiment.\n", + "\n", + "![](./assets/artifact_usage.png)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -163,6 +282,14 @@ "artifact_dir = artifact.download()" ] }, + { + "cell_type": "markdown", + "id": "8b5cc684", + "metadata": {}, + "source": [ + "We now use the [`monai.apps.DecathlonDataset`](https://docs.monai.io/en/stable/apps.html#monai.apps.DecathlonDataset) to load our dataset and apply the transforms we defined on the data samples so that we can visualize it." + ] + }, { "cell_type": "code", "execution_count": null, @@ -174,15 +301,20 @@ "\n", "config.num_workers = 4\n", "\n", + "# Create the dataset for the training split\n", + "# of the brain tumor segmentation dataset\n", "train_dataset = DecathlonDataset(\n", " root_dir=artifact_dir,\n", " task=\"Task01_BrainTumour\",\n", - " transform=val_transform,\n", + " transform=train_transform,\n", " section=\"training\",\n", " download=False,\n", " cache_rate=0.0,\n", " num_workers=config.num_workers,\n", ")\n", + "\n", + "# Create the dataset for the validation split\n", + "# of the brain tumor segmentation dataset\n", "val_dataset = DecathlonDataset(\n", " root_dir=artifact_dir,\n", " task=\"Task01_BrainTumour\",\n", @@ -194,6 +326,14 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "472c90a0", + "metadata": {}, + "source": [ + "We now create DataLoaders for the train and validation datasets respectively using [`monai.data.DataLoader`](https://docs.monai.io/en/stable/data.html#dataloader) which provides an iterable over the given dataset." + ] + }, { "cell_type": "code", "execution_count": null, @@ -203,9 +343,6 @@ "source": [ "from monai.data import DataLoader\n", "\n", - "# apply train_transforms to the training dataset\n", - "train_dataset.transform = train_transform\n", - "\n", "config.batch_size = 2\n", "\n", "# create the train_loader\n", @@ -225,6 +362,16 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "b5fb6112", + "metadata": {}, + "source": [ + "## 🤖 Creating the Model, Loss, and Optimizer\n", + "\n", + "We will be training a **SegResNet** model based on the paper [3D MRI brain tumor segmentation using auto-encoder regularization](https://arxiv.org/pdf/1810.11654.pdf). The [SegResNet](https://docs.monai.io/en/stable/networks.html#segresnet) model that comes implemented as a PyTorch Module as part of the [`monai.networks.nets`](https://docs.monai.io/en/stable/networks.html#nets) API that provides out-of-the-box implementations of SoTA neural network models for different medical imaging tasks." + ] + }, { "cell_type": "code", "execution_count": null, @@ -254,6 +401,14 @@ ").to(device)" ] }, + { + "cell_type": "markdown", + "id": "e64fbd02", + "metadata": {}, + "source": [ + "We will be using [Adam Optimizer](https://pytorch.org/docs/stable/generated/torch.optim.Adam.html) and the [cosine annealing schedule](https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.CosineAnnealingLR.html) to schedule our learning rate. This approach is designed to help in finding global minima in the optimization landscape and to provide a form of reset mechanism during training, which can improve the performance of the model." + ] + }, { "cell_type": "code", "execution_count": null, @@ -278,6 +433,14 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "ec0d91b5", + "metadata": {}, + "source": [ + "Next, we would define the loss as multi-label DiceLoss as proposed by the paper [V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation](https://arxiv.org/abs/1606.04797) using the [`monai.losses`](https://docs.monai.io/en/stable/losses.html) API and the corresponding dice metrics using the [`monai.metrics`](https://docs.monai.io/en/stable/metrics.html) API." + ] + }, { "cell_type": "code", "execution_count": null, @@ -316,6 +479,30 @@ "post_trans = Compose([Activations(sigmoid=True), AsDiscrete(threshold=0.5)])" ] }, + { + "cell_type": "markdown", + "id": "dfae7eb0", + "metadata": {}, + "source": [ + "## 🚀 Automatic Mixed Precision\n", + "\n", + "Mixed precision training is a technique used in training neural networks that utilizes both 16-bit and 32-bit floating-point types for different parts of the computation, rather than using a single precision type throughout the entire process. This method is primarily aimed at accelerating the training process while also reducing the memory usage of the models.\n", + "\n", + "We will be using [`torch.amp`](https://pytorch.org/docs/stable/amp.html#module-torch.amp) provides convenience methods for mixed precision, where some operations use the `torch.float32` datatype and other operations use lower precision floating point datatype such as `torch.float16` or `torch.bfloat16`." + ] + }, + { + "cell_type": "markdown", + "id": "bba8633e", + "metadata": {}, + "source": [ + "### ⚖️ Gradient and Loss Scaling\n", + "\n", + "If the forward pass for a particular op has float16 inputs, the backward pass for that op will produce float16 gradients. Gradient values with small magnitudes may not be representable in float16. These values will **gradient underflow**, so the update for the corresponding parameters will be lost. \n", + "\n", + "In order to counteract the gradient underflow issues of FP16, especially in handling small gradient values, gradient and loss scaling is applied. This involves scaling up the loss before the gradient computation and scaling it back down afterwards. We will be using [`torch.cuda.amp.GradScaler`](https://pytorch.org/docs/stable/amp.html#gradient-scaling) to perform the scaling." + ] + }, { "cell_type": "code", "execution_count": null, @@ -328,6 +515,14 @@ "torch.backends.cudnn.benchmark = True" ] }, + { + "cell_type": "markdown", + "id": "28ba3120", + "metadata": {}, + "source": [ + "Next, we write a utility function to perform sliding window inference using [`from monai.inferers.sliding_window_inference`](https://docs.monai.io/en/stable/inferers.html#sliding-window-inference-function) and [AMP autocast](https://pytorch.org/docs/stable/amp.html#autocasting)." + ] + }, { "cell_type": "code", "execution_count": null, @@ -354,6 +549,26 @@ " return _compute(input)" ] }, + { + "cell_type": "markdown", + "id": "2faf2a8f", + "metadata": {}, + "source": [ + "## 🦾 Training the Model\n", + "\n", + "Let's finally get to training the model!" + ] + }, + { + "cell_type": "markdown", + "id": "0736f970", + "metadata": {}, + "source": [ + "### 🐝 Customize Log Axes on W&B\n", + "\n", + "We will use Use [`wandb.define_metric`](https://docs.wandb.ai/guides/track/log/customize-logging-axes) to set a custom x axis for our W&B charts. Custom x-axes are useful in contexts where you need to log to different time steps in the past during training, asynchronously. For example, for training our brain tumor segmentation model, we can log the training loss and metrics every training step but log the validation metrics every epoch." + ] + }, { "cell_type": "code", "execution_count": null, @@ -366,14 +581,15 @@ "wandb.define_metric(\"batch/batch_step\")\n", "wandb.define_metric(\"batch/*\", step_metric=\"batch/batch_step\")\n", "wandb.define_metric(\"validation/validation_step\")\n", - "wandb.define_metric(\"validation/*\", step_metric=\"validation/validation_step\")\n", - "\n", - "batch_step = 0\n", - "validation_step = 0\n", - "metric_values = []\n", - "metric_values_tumor_core = []\n", - "metric_values_whole_tumor = []\n", - "metric_values_enhanced_tumor = []" + "wandb.define_metric(\"validation/*\", step_metric=\"validation/validation_step\")" + ] + }, + { + "cell_type": "markdown", + "id": "3b93fc74", + "metadata": {}, + "source": [ + "Next, we write simple a PyTorch-based training loop to train the brain tumor segmentation model." ] }, { @@ -393,6 +609,13 @@ "# Create checkpoint directory\n", "os.makedirs(config.checkpoint_dir, exist_ok=True)\n", "\n", + "batch_step = 0\n", + "validation_step = 0\n", + "metric_values = []\n", + "metric_values_tumor_core = []\n", + "metric_values_whole_tumor = []\n", + "metric_values_enhanced_tumor = []\n", + "\n", "epoch_progress_bar = tqdm(range(config.max_train_epochs), desc=\"Training:\")\n", "\n", "for epoch in epoch_progress_bar:\n", @@ -478,6 +701,14 @@ " validation_step += 1" ] }, + { + "cell_type": "markdown", + "id": "ea1663c6", + "metadata": {}, + "source": [ + "Now we end the experiment by calling `wandb.finish()`." + ] + }, { "cell_type": "code", "execution_count": null, From e26fff4c12bc41a51b07069b29ca79e54df871fa Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Mon, 6 May 2024 19:48:12 +0000 Subject: [PATCH 08/21] update: traininig notebook --- .../02_monai_train_baseline_model.ipynb | 183 ++++++++++++------ 1 file changed, 120 insertions(+), 63 deletions(-) diff --git a/life-science/02_monai_train_baseline_model.ipynb b/life-science/02_monai_train_baseline_model.ipynb index 43fd1bc2..df8660cc 100644 --- a/life-science/02_monai_train_baseline_model.ipynb +++ b/life-science/02_monai_train_baseline_model.ipynb @@ -2,19 +2,19 @@ "cells": [ { "cell_type": "markdown", - "id": "6d4fbce3", + "id": "a93e6b75-b1d9-4703-8834-ab3fcd8f934b", "metadata": {}, "source": [ "# Train a Baseline Segmentation Model\n", - "\n", "In this notebook we will learn:\n", + "\n", "- We will learn how to use specific MONAI APIs to write our training workflow, including a SoTA neural network architecture and loss function and metrics for our task.\n", "- Use Weights & Biases for tracking our experiments and logging and verisioning our model checkpoints." ] }, { "cell_type": "markdown", - "id": "0c6e5724", + "id": "95118e9e-e6d0-4bde-bd5b-af792ca8153a", "metadata": {}, "source": [ "## 🌴 Setup and Installation\n", @@ -25,7 +25,7 @@ { "cell_type": "code", "execution_count": null, - "id": "04a51c32-6279-428d-90e8-96be4e4953c4", + "id": "d3ab1bc3-d503-4e4c-afd1-ad77d72ac472", "metadata": {}, "outputs": [], "source": [ @@ -34,7 +34,7 @@ }, { "cell_type": "markdown", - "id": "c4f9e1e8", + "id": "6a4f71b2-ffb6-475c-a563-1cb631e33d84", "metadata": {}, "source": [ "## 🌳 Initialize a W&B Run\n", @@ -45,7 +45,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d78244d6-353b-4dd8-aa51-d6a162ffbd24", + "id": "b258cfa2-3795-4502-8878-aff469ba5077", "metadata": {}, "outputs": [], "source": [ @@ -60,10 +60,10 @@ }, { "cell_type": "markdown", - "id": "27f85008", + "id": "7ad41880-1613-489b-a18a-80986a74964a", "metadata": {}, "source": [ - "# 🌼 Reproducibility and Configuration Management\n", + "## 🌼 Reproducibility and Configuration Management\n", "\n", "`wandb.config` allows us to easily define and manage the configurations of our experiments. This includes hyperparameters, model settings, and any other experiment variables that we use in a particular run. By centralizing this information, we can ensure consistency across runs and make your experiments more organized and reproducible." ] @@ -71,7 +71,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c26047ad", + "id": "e4b1832f-43fb-41b7-bd4e-ee7d31b44b39", "metadata": {}, "outputs": [], "source": [ @@ -80,7 +80,7 @@ }, { "cell_type": "markdown", - "id": "6ef790e1", + "id": "ab7693aa-5686-463d-aa8f-be92bec82c31", "metadata": {}, "source": [ "Next, we set random seed for modules to enable deterministic training by setting a global seed using `monai.utils.set_determinism`. Setting a random seed (or multiple random seeds) and storing them as a configuration, we can make sure that a particular run is reproducible." @@ -89,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a3e692b9-5f34-4d32-922b-836c92aee0e5", + "id": "59460776-36a9-4e9e-8955-a93453aaa310", "metadata": {}, "outputs": [], "source": [ @@ -101,7 +101,7 @@ }, { "cell_type": "markdown", - "id": "9333a301", + "id": "0d2da2f5-b1a8-4215-927c-dc5884b0b41f", "metadata": {}, "source": [ "## 💿 Loading and Transforming the Data\n", @@ -124,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7534ed76-7b3e-4c66-8f5f-4358c8392472", + "id": "4cef0b54-8bdc-4bee-9b77-c16745bcec2a", "metadata": {}, "outputs": [], "source": [ @@ -165,18 +165,18 @@ }, { "cell_type": "markdown", - "id": "052b23d5", + "id": "1f38e5d1-e3de-4bda-9b89-67193a6baca2", "metadata": {}, "source": [ - "Next, we compose all the necessary transforms for the training and validations sets of the data respectively using [`monai.transforms.Compose`](https://docs.monai.io/en/stable/transforms.html#monai.transforms.Compose).\n", + "Next, we compose all the necessary transforms for visualizing the data using [`monai.transforms.Compose`](https://docs.monai.io/en/stable/transforms.html#monai.transforms.Compose).\n", "\n", - "The difference between the set of transforms for is that for the training dataset we're applying several transforms that are augmenting the data that are not applied to the validation data." + "**Note:** During training, we will apply a differnt set of transforms to the data." ] }, { "cell_type": "code", "execution_count": null, - "id": "89d511f9-492f-4677-bd1f-e97156144e47", + "id": "62a9e73a-aeb1-49b2-b06b-d09857b74966", "metadata": {}, "outputs": [], "source": [ @@ -261,7 +261,7 @@ }, { "cell_type": "markdown", - "id": "fbd8e71d", + "id": "14df1019-7cf6-4baa-baa1-fa2085da17bb", "metadata": {}, "source": [ "For loading the dataset, we first fetch it from the W&B dataset artifact that we had created earlier. This enables us to use the dataset as an input artifact to our visualization run, and establish the necessary lineage for our experiment.\n", @@ -272,7 +272,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3ce52f78-48d1-4b98-940c-c3cb31ecb5b9", + "id": "35b2d2c9-8e7f-479b-a822-deb0984d22ae", "metadata": {}, "outputs": [], "source": [ @@ -284,16 +284,16 @@ }, { "cell_type": "markdown", - "id": "8b5cc684", + "id": "8fd9de15-afa4-4ccf-9d4f-78b90ae3db8f", "metadata": {}, "source": [ - "We now use the [`monai.apps.DecathlonDataset`](https://docs.monai.io/en/stable/apps.html#monai.apps.DecathlonDataset) to load our dataset and apply the transforms we defined on the data samples so that we can visualize it." + "We now use the [`monai.apps.DecathlonDataset`](https://docs.monai.io/en/stable/apps.html#monai.apps.DecathlonDataset) to load our dataset and apply the transforms we defined on the data samples so that we use them for training and validation." ] }, { "cell_type": "code", "execution_count": null, - "id": "cf4ebc99-581b-4acc-853f-dab9c9772592", + "id": "16faedc0-f50a-4289-9fa4-6948549ea74f", "metadata": {}, "outputs": [], "source": [ @@ -328,7 +328,7 @@ }, { "cell_type": "markdown", - "id": "472c90a0", + "id": "e8eb84cf-eea0-431b-a66e-9afd2b7144ba", "metadata": {}, "source": [ "We now create DataLoaders for the train and validation datasets respectively using [`monai.data.DataLoader`](https://docs.monai.io/en/stable/data.html#dataloader) which provides an iterable over the given dataset." @@ -337,7 +337,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7f2192be-3276-446e-b8c6-65324123a17b", + "id": "4ad33748-060f-4652-87ed-f8b56de02824", "metadata": {}, "outputs": [], "source": [ @@ -364,7 +364,7 @@ }, { "cell_type": "markdown", - "id": "b5fb6112", + "id": "ba68b8b8-9527-482d-8f4f-da8c71dcbd87", "metadata": {}, "source": [ "## 🤖 Creating the Model, Loss, and Optimizer\n", @@ -375,7 +375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1aa76d0b-7e9b-4e2b-964e-ca9aada74eb2", + "id": "1d2a5e5a-84ee-4fbe-9b06-1fcf9639d443", "metadata": {}, "outputs": [], "source": [ @@ -403,7 +403,7 @@ }, { "cell_type": "markdown", - "id": "e64fbd02", + "id": "20863f94-f7d0-4b82-8a55-41a1952d1cae", "metadata": {}, "source": [ "We will be using [Adam Optimizer](https://pytorch.org/docs/stable/generated/torch.optim.Adam.html) and the [cosine annealing schedule](https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.CosineAnnealingLR.html) to schedule our learning rate. This approach is designed to help in finding global minima in the optimization landscape and to provide a form of reset mechanism during training, which can improve the performance of the model." @@ -412,7 +412,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7e9aba9d-055c-45a8-93c8-5a27c8aab31b", + "id": "a9a9b1a6-d127-4fe9-8831-a33b71dbe438", "metadata": {}, "outputs": [], "source": [ @@ -435,7 +435,7 @@ }, { "cell_type": "markdown", - "id": "ec0d91b5", + "id": "541dc8c6-009e-4115-81c5-d35386a92b6d", "metadata": {}, "source": [ "Next, we would define the loss as multi-label DiceLoss as proposed by the paper [V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation](https://arxiv.org/abs/1606.04797) using the [`monai.losses`](https://docs.monai.io/en/stable/losses.html) API and the corresponding dice metrics using the [`monai.metrics`](https://docs.monai.io/en/stable/metrics.html) API." @@ -444,7 +444,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8cd11cb2-61a3-435f-9b89-986e459db757", + "id": "b2276a62-24a9-4352-8058-eb807e16819e", "metadata": {}, "outputs": [], "source": [ @@ -468,7 +468,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ff49cb10-7718-4147-9901-85ee59fbe8ca", + "id": "187d9b11-3df9-4ae8-96c0-e7c16468878d", "metadata": {}, "outputs": [], "source": [ @@ -481,24 +481,18 @@ }, { "cell_type": "markdown", - "id": "dfae7eb0", + "id": "70aa2a71-9ad1-43f8-97e8-00ea458b7c47", "metadata": {}, "source": [ "## 🚀 Automatic Mixed Precision\n", "\n", "Mixed precision training is a technique used in training neural networks that utilizes both 16-bit and 32-bit floating-point types for different parts of the computation, rather than using a single precision type throughout the entire process. This method is primarily aimed at accelerating the training process while also reducing the memory usage of the models.\n", "\n", - "We will be using [`torch.amp`](https://pytorch.org/docs/stable/amp.html#module-torch.amp) provides convenience methods for mixed precision, where some operations use the `torch.float32` datatype and other operations use lower precision floating point datatype such as `torch.float16` or `torch.bfloat16`." - ] - }, - { - "cell_type": "markdown", - "id": "bba8633e", - "metadata": {}, - "source": [ + "We will be using [`torch.amp provides`](https://pytorch.org/docs/stable/amp.html#module-torch.amp) convenience methods for mixed precision, where some operations use the `torch.float32` datatype and other operations use lower precision floating point datatype such as `torch.float16` or `torch.bfloat16`.\n", + "\n", "### ⚖️ Gradient and Loss Scaling\n", "\n", - "If the forward pass for a particular op has float16 inputs, the backward pass for that op will produce float16 gradients. Gradient values with small magnitudes may not be representable in float16. These values will **gradient underflow**, so the update for the corresponding parameters will be lost. \n", + "If the forward pass for a particular op has float16 inputs, the backward pass for that op will produce float16 gradients. Gradient values with small magnitudes may not be representable in float16. These values will gradient underflow, so the update for the corresponding parameters will be lost.\n", "\n", "In order to counteract the gradient underflow issues of FP16, especially in handling small gradient values, gradient and loss scaling is applied. This involves scaling up the loss before the gradient computation and scaling it back down afterwards. We will be using [`torch.cuda.amp.GradScaler`](https://pytorch.org/docs/stable/amp.html#gradient-scaling) to perform the scaling." ] @@ -506,7 +500,7 @@ { "cell_type": "code", "execution_count": null, - "id": "eed4d531-1db1-4c29-9dfa-8e570665f6aa", + "id": "9fd9b23e-d40d-462d-860a-b0303621e250", "metadata": {}, "outputs": [], "source": [ @@ -517,16 +511,16 @@ }, { "cell_type": "markdown", - "id": "28ba3120", + "id": "7ff081e5-de2a-42e6-ba16-bc9048b9aaee", "metadata": {}, "source": [ - "Next, we write a utility function to perform sliding window inference using [`from monai.inferers.sliding_window_inference`](https://docs.monai.io/en/stable/inferers.html#sliding-window-inference-function) and [AMP autocast](https://pytorch.org/docs/stable/amp.html#autocasting)." + "Next, we write a utility function to perform sliding window inference using from [`monai.inferers.sliding_window_inference`](https://docs.monai.io/en/stable/inferers.html#sliding-window-inference-function) and AMP autocast. This function would be used durring the validation step in our training and validation loop." ] }, { "cell_type": "code", "execution_count": null, - "id": "b5bb4253-3fcd-491d-a378-3a05b999cc65", + "id": "c6f51bb2-1c5b-4a16-b078-d6179c2bd083", "metadata": {}, "outputs": [], "source": [ @@ -551,19 +545,12 @@ }, { "cell_type": "markdown", - "id": "2faf2a8f", + "id": "55423f02-8686-4a56-9778-09070b7edcc5", "metadata": {}, "source": [ "## 🦾 Training the Model\n", + "Let's finally get to training the model!\n", "\n", - "Let's finally get to training the model!" - ] - }, - { - "cell_type": "markdown", - "id": "0736f970", - "metadata": {}, - "source": [ "### 🐝 Customize Log Axes on W&B\n", "\n", "We will use Use [`wandb.define_metric`](https://docs.wandb.ai/guides/track/log/customize-logging-axes) to set a custom x axis for our W&B charts. Custom x-axes are useful in contexts where you need to log to different time steps in the past during training, asynchronously. For example, for training our brain tumor segmentation model, we can log the training loss and metrics every training step but log the validation metrics every epoch." @@ -572,7 +559,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4de81064-3ef7-4cef-a5ed-6e1fd2733c49", + "id": "02d4563f-1522-416d-ad3d-fb9b936bb368", "metadata": {}, "outputs": [], "source": [ @@ -586,16 +573,87 @@ }, { "cell_type": "markdown", - "id": "3b93fc74", + "id": "094a1fd5-0113-4e22-be81-7a30f754a10b", "metadata": {}, "source": [ - "Next, we write simple a PyTorch-based training loop to train the brain tumor segmentation model." + "### 🏋️ Training and Validation Loop\n", + "\n", + "Next, we will proceed to writing the training and validation loop for the brain tumor segmentation model. The traininng loop consists of 3 different logical steps:\n", + "\n", + "1. **The training step**: In this step, we actually train the model, by looping over the `train_loader`. Note that we use autocast to speed up the forward pass and loss calculation and during the backpropagation, we use gradient scaler to avoid the vanishing gradient problem. At the end of each batch step, we log the batch step under `batch/batch_step` and the training loss under `batch/train_loss`. This ensure that the training loss is logged under its section against the batch step in the x-axis in the W&B workspace. Here's how the training step is written:\n", + " \n", + " ```python\n", + " for batch_data in train_loader:\n", + " inputs, labels = (\n", + " batch_data[\"image\"].to(device),\n", + " batch_data[\"label\"].to(device),\n", + " )\n", + " optimizer.zero_grad()\n", + " with torch.cuda.amp.autocast():\n", + " outputs = model(inputs)\n", + " loss = loss_function(outputs, labels)\n", + " scaler.scale(loss).backward()\n", + " scaler.step(optimizer)\n", + " scaler.update()\n", + " epoch_loss += loss.item()\n", + " batch_progress_bar.set_description(f\"train_loss: {loss.item():.4f}:\")\n", + " ## Log batch-wise training loss to W&B\n", + " wandb.log({\"batch/batch_step\": batch_step, \"batch/train_loss\": loss.item()})\n", + " batch_step += 1\n", + " \n", + " ```\n", + "\n", + "2. **The epoch-wise logging step:** In this step, we log the learning rate and mean training loss for the epoch under the section `epoch/*`. We also update the learning rate using our learning rate scheduler after logging.\n", + "\n", + " ```python\n", + " wandb.log(\n", + " {\n", + " \"epoch/epoch_step\": epoch,\n", + " \"epoch/mean_train_loss\": total_epoch_loss / total_batch_steps,\n", + " \"epoch/learning_rate\": lr_scheduler.get_last_lr()[0],\n", + " }\n", + " )\n", + " lr_scheduler.step()\n", + " \n", + " ```\n", + "\n", + "3. **The validation step:** This step is executed at the interval of a certain number of epochs. In this step, we use the aforementioned `inference` function to predict the segmentation masks for the images from the validation dataloader and use `dice_metric` to calculate the dice coefficients for each of our target classes and log the dice coefficient values under the `validation/*` section. We also save our model checkpoint to W&B using `wandb.log_model` .\n", + "\n", + " ```python\n", + " for val_data in val_loader:\n", + " val_inputs, val_labels = (\n", + " val_data[\"image\"].to(device),\n", + " val_data[\"label\"].to(device),\n", + " )\n", + " val_outputs = inference(model, val_inputs)\n", + " val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]\n", + " dice_metric(y_pred=val_outputs, y=val_labels)\n", + " dice_metric_batch(y_pred=val_outputs, y=val_labels)\n", + "\n", + " wandb.log(\n", + " {\n", + " \"validation/validation_step\": validation_step,\n", + " \"validation/mean_dice\": metric_values[-1],\n", + " \"validation/mean_dice_tumor_core\": metric_values_tumor_core[-1],\n", + " \"validation/mean_dice_whole_tumor\": metric_values_whole_tumor[-1],\n", + " \"validation/mean_dice_enhanced_tumor\": metric_values_enhanced_tumor[-1],\n", + " }\n", + " )\n", + "\n", + " checkpoint_path = os.path.join(config.checkpoint_dir, \"model.pth\")\n", + " torch.save(model.state_dict(), checkpoint_path)\n", + " wandb.log_model(\n", + " checkpoint_path,\n", + " name=f\"{wandb.run.id}-checkpoint\",\n", + " aliases=[f\"epoch_{epoch}\"],\n", + " )\n", + " ```" ] }, { "cell_type": "code", "execution_count": null, - "id": "07b71668-13c2-4414-9f76-c5d78b99d32f", + "id": "7df12631-8e09-48d3-b0b0-655775ec22e4", "metadata": {}, "outputs": [], "source": [ @@ -644,7 +702,6 @@ " wandb.log({\"batch/batch_step\": batch_step, \"batch/train_loss\": loss.item()})\n", " batch_step += 1\n", "\n", - " lr_scheduler.step()\n", " epoch_loss /= total_batch_steps\n", " ## Log batch-wise training loss and learning rate to W&B\n", " wandb.log(\n", @@ -654,6 +711,7 @@ " \"epoch/learning_rate\": lr_scheduler.get_last_lr()[0],\n", " }\n", " )\n", + " lr_scheduler.step()\n", " epoch_progress_bar.set_description(f\"Training: train_loss: {epoch_loss:.4f}:\")\n", "\n", " # Validation and model checkpointing step\n", @@ -678,10 +736,9 @@ " dice_metric.reset()\n", " dice_metric_batch.reset()\n", "\n", + " # Log and versison model checkpoints using W&B artifacts.\n", " checkpoint_path = os.path.join(config.checkpoint_dir, \"model.pth\")\n", " torch.save(model.state_dict(), checkpoint_path)\n", - "\n", - " # Log and versison model checkpoints using W&B artifacts.\n", " wandb.log_model(\n", " checkpoint_path,\n", " name=f\"{wandb.run.id}-checkpoint\",\n", @@ -703,7 +760,7 @@ }, { "cell_type": "markdown", - "id": "ea1663c6", + "id": "2b161a87-6558-4fa4-8f1b-578f754a95bc", "metadata": {}, "source": [ "Now we end the experiment by calling `wandb.finish()`." @@ -712,7 +769,7 @@ { "cell_type": "code", "execution_count": null, - "id": "92aeee85-8a37-4a85-8808-e12fcb5c7b3b", + "id": "3600d23f-76d7-4702-b566-b17b9f412b4b", "metadata": {}, "outputs": [], "source": [ From 5cbf20a3788dc91e13d43b96963d2e9905c809e3 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Tue, 7 May 2024 14:18:58 +0000 Subject: [PATCH 09/21] add: evaluation notebook --- life-science/03_monai_evaluate_model.ipynb | 418 +++++++++++++++++++++ 1 file changed, 418 insertions(+) create mode 100644 life-science/03_monai_evaluate_model.ipynb diff --git a/life-science/03_monai_evaluate_model.ipynb b/life-science/03_monai_evaluate_model.ipynb new file mode 100644 index 00000000..fc7541a0 --- /dev/null +++ b/life-science/03_monai_evaluate_model.ipynb @@ -0,0 +1,418 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Bain Tumor Segmentation Data\n", + "\n", + "In this notebook we will learn:\n", + "- how we can evaluate a pre-trained model checkpoint for brain tumor segmentation using MONAI and Weights & Biases.\n", + "- how we can visually compare the ground-truth labels with the predicted labels." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 🌴 Setup and Installation\n", + "\n", + "First, let us install the latest version of both MONAI and Weights and Biases." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -q -U monai wandb" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 🌳 Initialize a W&B Run\n", + "\n", + "We will start a new W&B run to start tracking our experiment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import wandb\n", + "\n", + "wandb.init(\n", + " project=\"brain-tumor-segmentation\",\n", + " entity=\"lifesciences\",\n", + " job_type=\"evaluate\"\n", + ")\n", + "\n", + "config = wandb.config" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from monai.utils import set_determinism\n", + "\n", + "config.seed = 0\n", + "set_determinism(seed=config.seed)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 💿 Loading and Transforming the Data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from utils import ConvertToMultiChannelBasedOnBratsClassesd\n", + "from monai.transforms import (\n", + " Compose,\n", + " LoadImaged,\n", + " NormalizeIntensityd,\n", + " Orientationd,\n", + " Spacingd,\n", + " EnsureTyped,\n", + " EnsureChannelFirstd,\n", + ")\n", + "\n", + "\n", + "transforms = Compose(\n", + " [\n", + " # load 4 Nifti images and stack them together\n", + " LoadImaged(keys=[\"image\", \"label\"]),\n", + " # Ensure loaded images are in channels-first format\n", + " EnsureChannelFirstd(keys=\"image\"),\n", + " # Ensure the input data to be a PyTorch Tensor or numpy array\n", + " EnsureTyped(keys=[\"image\", \"label\"]),\n", + " # Convert labels to multi-channels based on brats18 classes\n", + " ConvertToMultiChannelBasedOnBratsClassesd(keys=\"label\"),\n", + " # Change the input image’s orientation into the specified based on axis codes\n", + " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", + " # Resample the input images to the specified pixel dimension\n", + " Spacingd(\n", + " keys=[\"image\", \"label\"],\n", + " pixdim=(1.0, 1.0, 1.0),\n", + " mode=(\"bilinear\", \"nearest\"),\n", + " ),\n", + " # Normalize input image intensity\n", + " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from monai.apps import DecathlonDataset\n", + "\n", + "\n", + "artifact = wandb.use_artifact(\n", + " \"lifesciences/brain-tumor-segmentation/decathlon_brain_tumor:latest\", type=\"dataset\"\n", + ")\n", + "artifact_dir = artifact.download()\n", + "\n", + "# Create the dataset for the test split\n", + "# of the brain tumor segmentation dataset\n", + "val_dataset = DecathlonDataset(\n", + " root_dir=artifact_dir,\n", + " task=\"Task01_BrainTumour\",\n", + " transform=transforms,\n", + " section=\"validation\",\n", + " download=False,\n", + " cache_rate=0.0,\n", + " num_workers=4,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from monai.networks.nets import SegResNet\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "\n", + "config.model_blocks_down = [1, 2, 2, 4]\n", + "config.model_blocks_up = [1, 1, 1]\n", + "config.model_init_filters = 16\n", + "config.model_in_channels = 4\n", + "config.model_out_channels = 3\n", + "config.model_dropout_prob = 0.2\n", + "\n", + "# create model\n", + "model = SegResNet(\n", + " blocks_down=config.model_blocks_down,\n", + " blocks_up=config.model_blocks_up,\n", + " init_filters=config.model_init_filters,\n", + " in_channels=config.model_in_channels,\n", + " out_channels=config.model_out_channels,\n", + " dropout_prob=config.model_dropout_prob,\n", + ").to(device)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "model_artifact = wandb.use_artifact(\n", + " \"lifesciences/brain-tumor-segmentation/8vmqcqao-checkpoint:latest\",\n", + " type=\"model\",\n", + ")\n", + "model_artifact_dir = model_artifact.download()\n", + "model.load_state_dict(torch.load(os.path.join(model_artifact_dir, \"model.pth\")))\n", + "model.eval()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from monai.inferers import sliding_window_inference\n", + "\n", + "config.inference_roi_size = (240, 240, 160)\n", + "\n", + "\n", + "def inference(model, input):\n", + " def _compute(input):\n", + " return sliding_window_inference(\n", + " inputs=input,\n", + " roi_size=config.inference_roi_size,\n", + " sw_batch_size=1,\n", + " predictor=model,\n", + " overlap=0.5,\n", + " )\n", + "\n", + " with torch.cuda.amp.autocast():\n", + " return _compute(input)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from monai.metrics import DiceMetric\n", + "from monai.transforms import Activations, AsDiscrete\n", + "\n", + "dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", + "dice_metric_batch = DiceMetric(include_background=True, reduction=\"mean_batch\")\n", + "postprocessing_transforms = Compose(\n", + " [Activations(sigmoid=True), AsDiscrete(threshold=0.5)]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from tqdm.auto import tqdm\n", + "\n", + "\n", + "def get_target_area_percentage(segmentation_map):\n", + " segmentation_map_list = segmentation_map.flatten().tolist()\n", + " return segmentation_map_list.count(1.0) * 100 / len(segmentation_map_list)\n", + "\n", + "\n", + "def log_predictions_into_tables(\n", + " sample_image,\n", + " sample_label,\n", + " predicted_label,\n", + " split: str = None,\n", + " data_idx: int = None,\n", + " table: wandb.Table = None,\n", + "):\n", + " sample_image = sample_image.cpu().numpy()\n", + " sample_label = sample_label.cpu().numpy()\n", + " predicted_label = predicted_label.cpu().numpy()\n", + " _, _, _, num_slices = sample_image.shape\n", + " with tqdm(total=num_slices, leave=False) as progress_bar:\n", + " for slice_idx in range(num_slices):\n", + " wandb_images = [\n", + " wandb.Image(\n", + " sample_image[0, :, :, slice_idx],\n", + " masks={\n", + " \"ground-truth/Tumor-Core\": {\n", + " \"mask_data\": sample_label[0, :, :, slice_idx],\n", + " \"class_labels\": {0: \"background\", 1: \"Tumor Core\"},\n", + " },\n", + " \"prediction/Tumor-Core\": {\n", + " \"mask_data\": predicted_label[0, :, :, slice_idx] * 2,\n", + " \"class_labels\": {0: \"background\", 2: \"Tumor Core\"},\n", + " },\n", + " },\n", + " ),\n", + " wandb.Image(\n", + " sample_image[0, :, :, slice_idx],\n", + " masks={\n", + " \"ground-truth/Whole-Tumor\": {\n", + " \"mask_data\": sample_label[1, :, :, slice_idx],\n", + " \"class_labels\": {0: \"background\", 1: \"Whole Tumor\"},\n", + " },\n", + " \"prediction/Whole-Tumor\": {\n", + " \"mask_data\": predicted_label[1, :, :, slice_idx] * 2,\n", + " \"class_labels\": {0: \"background\", 2: \"Whole Tumor\"},\n", + " },\n", + " },\n", + " ),\n", + " wandb.Image(\n", + " sample_image[0, :, :, slice_idx],\n", + " masks={\n", + " \"ground-truth/Enhancing-Tumor\": {\n", + " \"mask_data\": sample_label[2, :, :, slice_idx],\n", + " \"class_labels\": {0: \"background\", 1: \"Enhancing Tumor\"},\n", + " },\n", + " \"prediction/Enhancing-Tumor\": {\n", + " \"mask_data\": predicted_label[2, :, :, slice_idx] * 2,\n", + " \"class_labels\": {0: \"background\", 2: \"Enhancing Tumor\"},\n", + " },\n", + " },\n", + " ),\n", + " ]\n", + " tumor_area_percentage = {\n", + " \"Ground-Truth\": {\n", + " \"Tumor-Core-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[0, :, :, slice_idx]\n", + " ),\n", + " \"Whole-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[1, :, :, slice_idx]\n", + " ),\n", + " \"Enhancing-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " sample_label[2, :, :, slice_idx]\n", + " ),\n", + " },\n", + " \"Prediction\": {\n", + " \"Tumor-Core-Area-Percentage\": get_target_area_percentage(\n", + " predicted_label[0, :, :, slice_idx]\n", + " ),\n", + " \"Whole-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " predicted_label[1, :, :, slice_idx]\n", + " ),\n", + " \"Enhancing-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " predicted_label[2, :, :, slice_idx]\n", + " ),\n", + " },\n", + " }\n", + " table.add_data(\n", + " split, data_idx, slice_idx, tumor_area_percentage, *wandb_images\n", + " )\n", + " progress_bar.update(1)\n", + " return table" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create the prediction table\n", + "prediction_table = wandb.Table(\n", + " columns=[\n", + " \"Split\",\n", + " \"Data Index\",\n", + " \"Slice Index\",\n", + " \"Tumor-Area-Pixel-Percentage\",\n", + " \"Prediction/Tumor-Core\",\n", + " \"Prediction/Whole-Tumor\",\n", + " \"Prediction/Enhancing-Tumor\",\n", + " ]\n", + ")\n", + "\n", + "config.max_prediction_images_visualized = 1\n", + "\n", + "# Perform inference and visualization\n", + "with torch.no_grad():\n", + " config.max_prediction_images_visualized\n", + " max_samples = (\n", + " min(config.max_prediction_images_visualized, len(val_dataset))\n", + " if config.max_prediction_images_visualized > 0\n", + " else len(val_dataset)\n", + " )\n", + " progress_bar = tqdm(\n", + " enumerate(val_dataset[:max_samples]),\n", + " total=max_samples,\n", + " desc=\"Generating Predictions:\",\n", + " )\n", + " for data_idx, sample in progress_bar:\n", + " test_input, test_labels = (\n", + " torch.unsqueeze(sample[\"image\"], 0).to(device),\n", + " torch.unsqueeze(sample[\"label\"], 0).to(device),\n", + " )\n", + " test_output = inference(model, test_input)\n", + " test_output = postprocessing_transforms(test_output[0])\n", + " prediction_table = log_predictions_into_tables(\n", + " sample_image=sample[\"image\"],\n", + " sample_label=sample[\"label\"],\n", + " predicted_label=test_output,\n", + " data_idx=data_idx,\n", + " split=\"validation\",\n", + " table=prediction_table,\n", + " )\n", + "\n", + " wandb.log({\"Evaluation/Tumor-Segmentation-Prediction\": prediction_table})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# End the experiment\n", + "wandb.finish()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From f549ed1b72b6b99b8612e151503c2d5535658e4b Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Tue, 7 May 2024 15:15:13 +0000 Subject: [PATCH 10/21] add: utils --- life-science/utils.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 life-science/utils.py diff --git a/life-science/utils.py b/life-science/utils.py new file mode 100644 index 00000000..4d57d9a2 --- /dev/null +++ b/life-science/utils.py @@ -0,0 +1,32 @@ +import torch +from monai.transforms import MapTransform + + +class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform): + """ + Convert labels to multi-channels based on brats classes: + label 1 is the peritumoral edema + label 2 is the GD-enhancing tumor + label 3 is the necrotic and non-enhancing tumor core + The possible classes are TC (Tumor core), WT (Whole tumor), and ET (Enhancing tumor). + + Reference: https://github.com/Project-MONAI/tutorials/blob/main/3d_segmentation/brats_segmentation_3d.ipynb + + """ + + def __call__(self, data): + data_dict = dict(data) + for key in self.keys: + result = [] + # merge label 2 and label 3 to construct Tumor Core + result.append(torch.logical_or(data_dict[key] == 2, data_dict[key] == 3)) + # merge labels 1, 2 and 3 to construct Whole Tumor + result.append( + torch.logical_or( + torch.logical_or(data_dict[key] == 2, data_dict[key] == 3), data_dict[key] == 1 + ) + ) + # label 2 is Enhancing Tumor + result.append(data_dict[key] == 2) + data_dict[key] = torch.stack(result, axis=0).float() + return data_dict \ No newline at end of file From b44427df2bdaf8f6d06f1ab350578112d019a28f Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Tue, 7 May 2024 17:08:54 +0000 Subject: [PATCH 11/21] update: evaludation notebook --- life-science/03_monai_evaluate_model.ipynb | 86 +++++++++++++++++----- 1 file changed, 66 insertions(+), 20 deletions(-) diff --git a/life-science/03_monai_evaluate_model.ipynb b/life-science/03_monai_evaluate_model.ipynb index fc7541a0..e65c5932 100644 --- a/life-science/03_monai_evaluate_model.ipynb +++ b/life-science/03_monai_evaluate_model.ipynb @@ -223,7 +223,9 @@ "from monai.metrics import DiceMetric\n", "from monai.transforms import Activations, AsDiscrete\n", "\n", - "dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", + "tumor_core_dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", + "enhancing_tumor_dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", + "whole_tumor_dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", "dice_metric_batch = DiceMetric(include_background=True, reduction=\"mean_batch\")\n", "postprocessing_transforms = Compose(\n", " [Activations(sigmoid=True), AsDiscrete(threshold=0.5)]\n", @@ -236,6 +238,7 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", "from tqdm.auto import tqdm\n", "\n", "\n", @@ -244,6 +247,32 @@ " return segmentation_map_list.count(1.0) * 100 / len(segmentation_map_list)\n", "\n", "\n", + "def get_class_wise_dice_scores(sample_label, predicted_label, slice_idx):\n", + " sample_label = torch.from_numpy(sample_label).to(device)\n", + " predicted_label = torch.from_numpy(predicted_label).to(device)\n", + " tumor_core_dice_metric(\n", + " y_pred=torch.unsqueeze(predicted_label[1, :, :, slice_idx], dim=0),\n", + " y=torch.unsqueeze(sample_label[0, :, :, slice_idx], dim=0),\n", + " )\n", + " whole_tumor_dice_metric(\n", + " y_pred=torch.unsqueeze(predicted_label[1, :, :, slice_idx], dim=0),\n", + " y=torch.unsqueeze(sample_label[1, :, :, slice_idx], dim=0),\n", + " )\n", + " enhancing_tumor_dice_metric(\n", + " y_pred=torch.unsqueeze(predicted_label[2, :, :, slice_idx], dim=0),\n", + " y=torch.unsqueeze(sample_label[2, :, :, slice_idx], dim=0),\n", + " )\n", + " dice_scores = {\n", + " \"Tumor-Core\": tumor_core_dice_metric.aggregate().item(),\n", + " \"Enhancing-Tumor\": enhancing_tumor_dice_metric.aggregate().item(),\n", + " \"Whole-Tumor\": whole_tumor_dice_metric.aggregate().item(),\n", + " }\n", + " tumor_core_dice_metric.reset()\n", + " whole_tumor_dice_metric.reset()\n", + " enhancing_tumor_dice_metric.reset()\n", + " return dice_scores\n", + "\n", + "\n", "def log_predictions_into_tables(\n", " sample_image,\n", " sample_label,\n", @@ -258,6 +287,7 @@ " _, _, _, num_slices = sample_image.shape\n", " with tqdm(total=num_slices, leave=False) as progress_bar:\n", " for slice_idx in range(num_slices):\n", + " tumor_core_dice_metric\n", " wandb_images = [\n", " wandb.Image(\n", " sample_image[0, :, :, slice_idx],\n", @@ -301,30 +331,38 @@ " ]\n", " tumor_area_percentage = {\n", " \"Ground-Truth\": {\n", - " \"Tumor-Core-Area-Percentage\": get_target_area_percentage(\n", + " \"Tumor-Core\": get_target_area_percentage(\n", " sample_label[0, :, :, slice_idx]\n", " ),\n", - " \"Whole-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " \"Whole-Tumor\": get_target_area_percentage(\n", " sample_label[1, :, :, slice_idx]\n", " ),\n", - " \"Enhancing-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " \"Enhancing-Tumor\": get_target_area_percentage(\n", " sample_label[2, :, :, slice_idx]\n", " ),\n", " },\n", " \"Prediction\": {\n", - " \"Tumor-Core-Area-Percentage\": get_target_area_percentage(\n", + " \"Tumor-Core\": get_target_area_percentage(\n", " predicted_label[0, :, :, slice_idx]\n", " ),\n", - " \"Whole-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " \"Whole-Tumor\": get_target_area_percentage(\n", " predicted_label[1, :, :, slice_idx]\n", " ),\n", - " \"Enhancing-Tumor-Area-Percentage\": get_target_area_percentage(\n", + " \"Enhancing-Tumor\": get_target_area_percentage(\n", " predicted_label[2, :, :, slice_idx]\n", " ),\n", " },\n", " }\n", + " dice_scores = get_class_wise_dice_scores(\n", + " sample_label, predicted_label, slice_idx\n", + " )\n", " table.add_data(\n", - " split, data_idx, slice_idx, tumor_area_percentage, *wandb_images\n", + " split,\n", + " data_idx,\n", + " slice_idx,\n", + " dice_scores,\n", + " tumor_area_percentage,\n", + " *wandb_images\n", " )\n", " progress_bar.update(1)\n", " return table" @@ -342,6 +380,7 @@ " \"Split\",\n", " \"Data Index\",\n", " \"Slice Index\",\n", + " \"Dice-Score\",\n", " \"Tumor-Area-Pixel-Percentage\",\n", " \"Prediction/Tumor-Core\",\n", " \"Prediction/Whole-Tumor\",\n", @@ -349,7 +388,13 @@ " ]\n", ")\n", "\n", - "config.max_prediction_images_visualized = 1\n", + "config.max_prediction_images_visualized = 5\n", + "\n", + "(\n", + " total_tumor_core_dice_score,\n", + " total_whole_tumor_dice_score,\n", + " total_enhancing_tumor_dice_score,\n", + ") = (0.0, 0.0, 0.0)\n", "\n", "# Perform inference and visualization\n", "with torch.no_grad():\n", @@ -371,24 +416,25 @@ " )\n", " test_output = inference(model, test_input)\n", " test_output = postprocessing_transforms(test_output[0])\n", + " dice_metric_batch(y_pred=torch.unsqueeze(test_output, dim=0), y=test_labels)\n", + " metric_batch = dice_metric_batch.aggregate()\n", " prediction_table = log_predictions_into_tables(\n", - " sample_image=sample[\"image\"],\n", - " sample_label=sample[\"label\"],\n", + " sample_image=torch.squeeze(test_input),\n", + " sample_label=torch.squeeze(test_labels),\n", " predicted_label=test_output,\n", " data_idx=data_idx,\n", " split=\"validation\",\n", " table=prediction_table,\n", " )\n", + " total_tumor_core_dice_score += metric_batch[0].item()\n", + " total_whole_tumor_dice_score += metric_batch[1].item()\n", + " total_enhancing_tumor_dice_score += metric_batch[2].item()\n", + "\n", + " wandb.log({\"Tumor-Segmentation-Evaludation\": prediction_table})\n", + " wandb.summary[\"Tumor-Score-Dice-Score\"] = total_tumor_core_dice_score / max_samples\n", + " wandb.summary[\"Whole-Tumor-Dice-Score\"] = total_whole_tumor_dice_score / max_samples\n", + " wandb.summary[\"Enhancing-Tumor-Dice-Score\"] = total_enhancing_tumor_dice_score / max_samples\n", "\n", - " wandb.log({\"Evaluation/Tumor-Segmentation-Prediction\": prediction_table})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ "# End the experiment\n", "wandb.finish()" ] From 2981f521c063340fd669bdece3d1d81c4ed656e9 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Tue, 7 May 2024 20:51:38 +0000 Subject: [PATCH 12/21] update: evaluation notebook --- life-science/03_monai_evaluate_model.ipynb | 163 +++++++++++---------- life-science/utils.py | 20 ++- 2 files changed, 100 insertions(+), 83 deletions(-) diff --git a/life-science/03_monai_evaluate_model.ipynb b/life-science/03_monai_evaluate_model.ipynb index e65c5932..7c5f29be 100644 --- a/life-science/03_monai_evaluate_model.ipynb +++ b/life-science/03_monai_evaluate_model.ipynb @@ -35,7 +35,7 @@ "source": [ "## 🌳 Initialize a W&B Run\n", "\n", - "We will start a new W&B run to start tracking our experiment." + "We will start a new W&B run to start tracking our experiment. Note that we set the job type for this run as `evaluate`." ] }, { @@ -44,7 +44,9 @@ "metadata": {}, "outputs": [], "source": [ + "\n", "import wandb\n", + "from monai.utils import set_determinism\n", "\n", "wandb.init(\n", " project=\"brain-tumor-segmentation\",\n", @@ -52,17 +54,9 @@ " job_type=\"evaluate\"\n", ")\n", "\n", - "config = wandb.config" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from monai.utils import set_determinism\n", + "config = wandb.config\n", "\n", + "# Ensure deterministic behavior and reproducibility\n", "config.seed = 0\n", "set_determinism(seed=config.seed)" ] @@ -71,7 +65,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 💿 Loading and Transforming the Data" + "## 💿 Loading and Transforming the Data\n", + "\n", + "We will use the validation transforms from the previous lessons to load and transform the validation dataset using the Decathlon dataset artifact on W&B." ] }, { @@ -81,6 +77,7 @@ "outputs": [], "source": [ "from utils import ConvertToMultiChannelBasedOnBratsClassesd\n", + "from monai.apps import DecathlonDataset\n", "from monai.transforms import (\n", " Compose,\n", " LoadImaged,\n", @@ -113,23 +110,17 @@ " # Normalize input image intensity\n", " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", " ]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from monai.apps import DecathlonDataset\n", + ")\n", "\n", "\n", + "# Fetch the brain tumor segmentation dataset artifact from W&B\n", "artifact = wandb.use_artifact(\n", - " \"lifesciences/brain-tumor-segmentation/decathlon_brain_tumor:latest\", type=\"dataset\"\n", + " \"lifesciences/brain-tumor-segmentation/decathlon_brain_tumor:latest\",\n", + " type=\"dataset\",\n", ")\n", "artifact_dir = artifact.download()\n", "\n", + "\n", "# Create the dataset for the test split\n", "# of the brain tumor segmentation dataset\n", "val_dataset = DecathlonDataset(\n", @@ -143,12 +134,22 @@ ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 🤖 Loading the Model Checkpoint\n", + "\n", + "We are going to fetch the model checkpoints from the training run and load them." + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ + "import os\n", "import torch\n", "from monai.networks.nets import SegResNet\n", "\n", @@ -169,49 +170,29 @@ " in_channels=config.model_in_channels,\n", " out_channels=config.model_out_channels,\n", " dropout_prob=config.model_dropout_prob,\n", - ").to(device)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", + ").to(device)\n", "\n", + "\n", + "# Fetch the latest model checkpoint artifact from the training run\n", "model_artifact = wandb.use_artifact(\n", " \"lifesciences/brain-tumor-segmentation/8vmqcqao-checkpoint:latest\",\n", " type=\"model\",\n", ")\n", "model_artifact_dir = model_artifact.download()\n", + "\n", + "\n", + "# Load the model checkpoint\n", "model.load_state_dict(torch.load(os.path.join(model_artifact_dir, \"model.pth\")))\n", "model.eval()" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "from monai.inferers import sliding_window_inference\n", + "## 📈 Evaluating the Model\n", "\n", - "config.inference_roi_size = (240, 240, 160)\n", - "\n", - "\n", - "def inference(model, input):\n", - " def _compute(input):\n", - " return sliding_window_inference(\n", - " inputs=input,\n", - " roi_size=config.inference_roi_size,\n", - " sw_batch_size=1,\n", - " predictor=model,\n", - " overlap=0.5,\n", - " )\n", - "\n", - " with torch.cuda.amp.autocast():\n", - " return _compute(input)" + "First we define some instances of `monai.metrics.DiceMetric` for all the metrics that we will be evaluating the model against on the validation split of our dataset." ] }, { @@ -223,22 +204,33 @@ "from monai.metrics import DiceMetric\n", "from monai.transforms import Activations, AsDiscrete\n", "\n", + "# Dice score for each class\n", "tumor_core_dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", "enhancing_tumor_dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", "whole_tumor_dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", + "\n", + "# Mean dice score across all classes\n", "dice_metric_batch = DiceMetric(include_background=True, reduction=\"mean_batch\")\n", + "\n", + "# transforms to postprocess the outputs of the model for evaluation and visualization\n", "postprocessing_transforms = Compose(\n", " [Activations(sigmoid=True), AsDiscrete(threshold=0.5)]\n", ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we write some utility functions for evaluating each data-point from the validation dataset by logging dice score for each target class and the ground-truth and predicted segmentation labels (for granular visual comparison and analysis) to a W&B Table." + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "from tqdm.auto import tqdm\n", "\n", "\n", @@ -368,14 +360,20 @@ " return table" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we create the prediction table." + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# create the prediction table\n", - "prediction_table = wandb.Table(\n", + "evaluation_table = wandb.Table(\n", " columns=[\n", " \"Split\",\n", " \"Data Index\",\n", @@ -386,54 +384,57 @@ " \"Prediction/Whole-Tumor\",\n", " \"Prediction/Enhancing-Tumor\",\n", " ]\n", - ")\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we loop over the validation dataset and log the evaluation table and the mean dice scores for each class across the entore validation set to W&B." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from utils import inference\n", "\n", - "config.max_prediction_images_visualized = 5\n", + "total_tumor_core_dice_score = 0.0\n", + "total_whole_tumor_dice_score = 0.0\n", + "total_enhancing_tumor_dice_score = 0.0\n", "\n", - "(\n", - " total_tumor_core_dice_score,\n", - " total_whole_tumor_dice_score,\n", - " total_enhancing_tumor_dice_score,\n", - ") = (0.0, 0.0, 0.0)\n", + "config.inference_roi_size = (240, 240, 160)\n", "\n", "# Perform inference and visualization\n", "with torch.no_grad():\n", - " config.max_prediction_images_visualized\n", - " max_samples = (\n", - " min(config.max_prediction_images_visualized, len(val_dataset))\n", - " if config.max_prediction_images_visualized > 0\n", - " else len(val_dataset)\n", - " )\n", - " progress_bar = tqdm(\n", - " enumerate(val_dataset[:max_samples]),\n", - " total=max_samples,\n", - " desc=\"Generating Predictions:\",\n", - " )\n", - " for data_idx, sample in progress_bar:\n", + " for data_idx, sample in tqdm(enumerate(val_dataset), total=len(val_dataset), desc=\"Evaluating:\"):\n", " test_input, test_labels = (\n", " torch.unsqueeze(sample[\"image\"], 0).to(device),\n", " torch.unsqueeze(sample[\"label\"], 0).to(device),\n", " )\n", - " test_output = inference(model, test_input)\n", + " test_output = inference(model, test_input, config.inference_roi_size)\n", " test_output = postprocessing_transforms(test_output[0])\n", " dice_metric_batch(y_pred=torch.unsqueeze(test_output, dim=0), y=test_labels)\n", " metric_batch = dice_metric_batch.aggregate()\n", - " prediction_table = log_predictions_into_tables(\n", + " evaluation_table = log_predictions_into_tables(\n", " sample_image=torch.squeeze(test_input),\n", " sample_label=torch.squeeze(test_labels),\n", " predicted_label=test_output,\n", " data_idx=data_idx,\n", " split=\"validation\",\n", - " table=prediction_table,\n", + " table=evaluation_table,\n", " )\n", " total_tumor_core_dice_score += metric_batch[0].item()\n", " total_whole_tumor_dice_score += metric_batch[1].item()\n", " total_enhancing_tumor_dice_score += metric_batch[2].item()\n", "\n", - " wandb.log({\"Tumor-Segmentation-Evaludation\": prediction_table})\n", - " wandb.summary[\"Tumor-Score-Dice-Score\"] = total_tumor_core_dice_score / max_samples\n", - " wandb.summary[\"Whole-Tumor-Dice-Score\"] = total_whole_tumor_dice_score / max_samples\n", - " wandb.summary[\"Enhancing-Tumor-Dice-Score\"] = total_enhancing_tumor_dice_score / max_samples\n", + " wandb.log({\"Tumor-Segmentation-Evaludation\": evaluation_table})\n", + " wandb.summary[\"Tumor-Score-Dice-Score\"] = total_tumor_core_dice_score / len(val_dataset)\n", + " wandb.summary[\"Whole-Tumor-Dice-Score\"] = total_whole_tumor_dice_score / len(val_dataset)\n", + " wandb.summary[\"Enhancing-Tumor-Dice-Score\"] = total_enhancing_tumor_dice_score / len(val_dataset)\n", "\n", "# End the experiment\n", "wandb.finish()" diff --git a/life-science/utils.py b/life-science/utils.py index 4d57d9a2..b6521ccf 100644 --- a/life-science/utils.py +++ b/life-science/utils.py @@ -1,4 +1,5 @@ import torch +from monai.inferers import sliding_window_inference from monai.transforms import MapTransform @@ -23,10 +24,25 @@ def __call__(self, data): # merge labels 1, 2 and 3 to construct Whole Tumor result.append( torch.logical_or( - torch.logical_or(data_dict[key] == 2, data_dict[key] == 3), data_dict[key] == 1 + torch.logical_or(data_dict[key] == 2, data_dict[key] == 3), + data_dict[key] == 1, ) ) # label 2 is Enhancing Tumor result.append(data_dict[key] == 2) data_dict[key] = torch.stack(result, axis=0).float() - return data_dict \ No newline at end of file + return data_dict + + +def inference(model, input, roi_size): + def _compute(input): + return sliding_window_inference( + inputs=input, + roi_size=roi_size, + sw_batch_size=1, + predictor=model, + overlap=0.5, + ) + + with torch.cuda.amp.autocast(): + return _compute(input) From 8ec1e77feea4ec4310779d78db27fdd50359b9bc Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Tue, 7 May 2024 21:20:42 +0000 Subject: [PATCH 13/21] add: training script + sweep config --- life-science/config.yaml | 30 ++++ life-science/train.py | 302 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 332 insertions(+) create mode 100644 life-science/config.yaml create mode 100644 life-science/train.py diff --git a/life-science/config.yaml b/life-science/config.yaml new file mode 100644 index 00000000..b4a4b13b --- /dev/null +++ b/life-science/config.yaml @@ -0,0 +1,30 @@ +method: bayes +metric: + goal: maximize + name: validation/mean_dice +parameters: + model_dropout_prob: + distribution: uniform + max: 0.5 + min: 0.1 + model_init_filters: + values: + - 8 + - 16 + - 32 + initial_learning_rate: + distribution: uniform + max: 0.001 + min: 1e-06 + dice_loss_smoothen_denominator: + distribution: uniform + max: 0.0001 + min: 1e-06 + dice_loss_smoothen_numerator: + distribution: uniform + max: 0.0001 + min: 1e-06 + weight_decay: + distribution: uniform + max: 0.001 + min: 1e-06 \ No newline at end of file diff --git a/life-science/train.py b/life-science/train.py new file mode 100644 index 00000000..e4655bf1 --- /dev/null +++ b/life-science/train.py @@ -0,0 +1,302 @@ +import os + +import torch +import wandb +from tqdm.auto import tqdm + +from monai.apps import DecathlonDataset +from monai.data import DataLoader, decollate_batch +from monai.losses import DiceLoss +from monai.metrics import DiceMetric +from monai.networks.nets import SegResNet +from monai.transforms import ( + Activations, + AsDiscrete, + Compose, + LoadImaged, + NormalizeIntensityd, + Orientationd, + RandFlipd, + RandScaleIntensityd, + RandShiftIntensityd, + RandSpatialCropd, + Spacingd, + EnsureTyped, + EnsureChannelFirstd, +) +from monai.utils import set_determinism + +from utils import ConvertToMultiChannelBasedOnBratsClassesd, inference + + +def main(): + wandb.init( + project="brain-tumor-segmentation", + entity="lifesciences", + job_type="train_baseline", + ) + config = wandb.config + config.seed = 0 + config.roi_size = [224, 224, 144] + config.num_workers = 4 + config.batch_size = 2 + config.model_blocks_down = [1, 2, 2, 4] + config.model_blocks_up = [1, 1, 1] + config.model_in_channels = 4 + config.model_out_channels = 3 + config.max_train_epochs = 3 + config.dice_loss_squared_prediction = True + config.dice_loss_target_onehot = False + config.dice_loss_apply_sigmoid = True + config.inference_roi_size = (240, 240, 160) + config.validation_intervals = 1 + config.checkpoint_dir = "./checkpoints" + + # Create checkpoint directory + os.makedirs(config.checkpoint_dir, exist_ok=True) + + set_determinism(seed=config.seed) + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + + train_transform = Compose( + [ + # load 4 Nifti images and stack them together + LoadImaged(keys=["image", "label"]), + # Ensure loaded images are in channels-first format + EnsureChannelFirstd(keys="image"), + # Ensure the input data to be a PyTorch Tensor or numpy array + EnsureTyped(keys=["image", "label"]), + # Convert labels to multi-channels based on brats18 classes + ConvertToMultiChannelBasedOnBratsClassesd(keys="label"), + # Change the input image’s orientation into the specified based on axis codes + Orientationd(keys=["image", "label"], axcodes="RAS"), + # Resample the input images to the specified pixel dimension + Spacingd( + keys=["image", "label"], + pixdim=(1.0, 1.0, 1.0), + mode=("bilinear", "nearest"), + ), + # Augmentation: Crop image with random size or specific size ROI + RandSpatialCropd( + keys=["image", "label"], roi_size=config.roi_size, random_size=False + ), + # Augmentation: Randomly flip the image on the specified axes + RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0), + RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=1), + RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=2), + # Normalize input image intensity + NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True), + # Augmentation: Randomly scale the image intensity + RandScaleIntensityd(keys="image", factors=0.1, prob=1.0), + RandShiftIntensityd(keys="image", offsets=0.1, prob=1.0), + ] + ) + val_transform = Compose( + [ + # load 4 Nifti images and stack them together + LoadImaged(keys=["image", "label"]), + # Ensure loaded images are in channels-first format + EnsureChannelFirstd(keys="image"), + # Ensure the input data to be a PyTorch Tensor or numpy array + EnsureTyped(keys=["image", "label"]), + # Convert labels to multi-channels based on brats18 classes + ConvertToMultiChannelBasedOnBratsClassesd(keys="label"), + # Change the input image’s orientation into the specified based on axis codes + Orientationd(keys=["image", "label"], axcodes="RAS"), + # Resample the input images to the specified pixel dimension + Spacingd( + keys=["image", "label"], + pixdim=(1.0, 1.0, 1.0), + mode=("bilinear", "nearest"), + ), + # Normalize input image intensity + NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True), + ] + ) + + # Create the dataset for the training split + # of the brain tumor segmentation dataset + train_dataset = DecathlonDataset( + root_dir="./artifacts/decathlon_brain_tumor:v0", + task="Task01_BrainTumour", + transform=train_transform, + section="training", + download=False, + cache_rate=0.0, + num_workers=config.num_workers, + ) + + # Create the dataset for the validation split + # of the brain tumor segmentation dataset + val_dataset = DecathlonDataset( + root_dir="./artifacts/decathlon_brain_tumor:v0", + task="Task01_BrainTumour", + transform=val_transform, + section="validation", + download=False, + cache_rate=0.0, + num_workers=config.num_workers, + ) + + # create the train_loader + train_loader = DataLoader( + train_dataset, + batch_size=config.batch_size, + shuffle=True, + num_workers=config.num_workers, + ) + + # create the val_loader + val_loader = DataLoader( + val_dataset, + batch_size=config.batch_size, + shuffle=False, + num_workers=config.num_workers, + ) + + # create model + model = SegResNet( + blocks_down=config.model_blocks_down, + blocks_up=config.model_blocks_up, + init_filters=config.model_init_filters, + in_channels=config.model_in_channels, + out_channels=config.model_out_channels, + dropout_prob=config.model_dropout_prob, + ).to(device) + + # create optimizer + optimizer = torch.optim.Adam( + model.parameters(), + config.initial_learning_rate, + weight_decay=config.weight_decay, + ) + + # create learning rate scheduler + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + optimizer, T_max=config.max_train_epochs + ) + + loss_function = DiceLoss( + smooth_nr=config.dice_loss_smoothen_numerator, + smooth_dr=config.dice_loss_smoothen_denominator, + squared_pred=config.dice_loss_squared_prediction, + to_onehot_y=config.dice_loss_target_onehot, + sigmoid=config.dice_loss_apply_sigmoid, + ) + + dice_metric = DiceMetric(include_background=True, reduction="mean") + dice_metric_batch = DiceMetric(include_background=True, reduction="mean_batch") + postprocessing_transforms = Compose( + [Activations(sigmoid=True), AsDiscrete(threshold=0.5)] + ) + + scaler = torch.cuda.amp.GradScaler() + torch.backends.cudnn.benchmark = True + + wandb.define_metric("epoch/epoch_step") + wandb.define_metric("epoch/*", step_metric="epoch/epoch_step") + wandb.define_metric("batch/batch_step") + wandb.define_metric("batch/*", step_metric="batch/batch_step") + wandb.define_metric("validation/validation_step") + wandb.define_metric("validation/*", step_metric="validation/validation_step") + + batch_step = 0 + validation_step = 0 + metric_values = [] + metric_values_tumor_core = [] + metric_values_whole_tumor = [] + metric_values_enhanced_tumor = [] + + epoch_progress_bar = tqdm(range(config.max_train_epochs), desc="Training:") + + for epoch in epoch_progress_bar: + model.train() + epoch_loss = 0 + + total_batch_steps = len(train_dataset) // train_loader.batch_size + batch_progress_bar = tqdm(train_loader, total=total_batch_steps, leave=False) + + # Training Step + for batch_data in batch_progress_bar: + inputs, labels = ( + batch_data["image"].to(device), + batch_data["label"].to(device), + ) + optimizer.zero_grad() + with torch.cuda.amp.autocast(): + outputs = model(inputs) + loss = loss_function(outputs, labels) + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + epoch_loss += loss.item() + batch_progress_bar.set_description(f"train_loss: {loss.item():.4f}:") + ## Log batch-wise training loss to W&B + wandb.log({"batch/batch_step": batch_step, "batch/train_loss": loss.item()}) + batch_step += 1 + + epoch_loss /= total_batch_steps + ## Log batch-wise training loss and learning rate to W&B + wandb.log( + { + "epoch/epoch_step": epoch, + "epoch/mean_train_loss": epoch_loss, + "epoch/learning_rate": lr_scheduler.get_last_lr()[0], + } + ) + lr_scheduler.step() + epoch_progress_bar.set_description(f"Training: train_loss: {epoch_loss:.4f}:") + + # Validation and model checkpointing step + if (epoch + 1) % config.validation_intervals == 0: + model.eval() + with torch.no_grad(): + for val_data in val_loader: + val_inputs, val_labels = ( + val_data["image"].to(device), + val_data["label"].to(device), + ) + val_outputs = inference(model, val_inputs) + val_outputs = [ + postprocessing_transforms(i) + for i in decollate_batch(val_outputs) + ] + dice_metric(y_pred=val_outputs, y=val_labels) + dice_metric_batch(y_pred=val_outputs, y=val_labels) + + metric_values.append(dice_metric.aggregate().item()) + metric_batch = dice_metric_batch.aggregate() + metric_values_tumor_core.append(metric_batch[0].item()) + metric_values_whole_tumor.append(metric_batch[1].item()) + metric_values_enhanced_tumor.append(metric_batch[2].item()) + dice_metric.reset() + dice_metric_batch.reset() + + # Log and versison model checkpoints using W&B artifacts. + checkpoint_path = os.path.join(config.checkpoint_dir, "model.pth") + torch.save(model.state_dict(), checkpoint_path) + wandb.log_model( + checkpoint_path, + name=f"{wandb.run.id}-checkpoint", + aliases=[f"epoch_{epoch}"], + ) + + # Log validation metrics to W&B dashboard. + wandb.log( + { + "validation/validation_step": validation_step, + "validation/mean_dice": metric_values[-1], + "validation/mean_dice_tumor_core": metric_values_tumor_core[-1], + "validation/mean_dice_whole_tumor": metric_values_whole_tumor[ + -1 + ], + "validation/mean_dice_enhanced_tumor": metric_values_enhanced_tumor[ + -1 + ], + } + ) + validation_step += 1 + + +if __name__ == "__main__": + main() From e6e28bbca57c33adeec416db0492de27b163d659 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Tue, 7 May 2024 21:22:18 +0000 Subject: [PATCH 14/21] update: disable model checkpointing for sweep --- life-science/train.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/life-science/train.py b/life-science/train.py index e4655bf1..b30ee2f7 100644 --- a/life-science/train.py +++ b/life-science/train.py @@ -1,5 +1,3 @@ -import os - import torch import wandb from tqdm.auto import tqdm @@ -50,10 +48,6 @@ def main(): config.dice_loss_apply_sigmoid = True config.inference_roi_size = (240, 240, 160) config.validation_intervals = 1 - config.checkpoint_dir = "./checkpoints" - - # Create checkpoint directory - os.makedirs(config.checkpoint_dir, exist_ok=True) set_determinism(seed=config.seed) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") @@ -272,15 +266,6 @@ def main(): dice_metric.reset() dice_metric_batch.reset() - # Log and versison model checkpoints using W&B artifacts. - checkpoint_path = os.path.join(config.checkpoint_dir, "model.pth") - torch.save(model.state_dict(), checkpoint_path) - wandb.log_model( - checkpoint_path, - name=f"{wandb.run.id}-checkpoint", - aliases=[f"epoch_{epoch}"], - ) - # Log validation metrics to W&B dashboard. wandb.log( { From 2289142e88b8157732bb6562dc04bcc2ec56d7b1 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Wed, 8 May 2024 11:20:14 +0000 Subject: [PATCH 15/21] update: sweep config --- life-science/config.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/life-science/config.yaml b/life-science/config.yaml index b4a4b13b..ae1f094e 100644 --- a/life-science/config.yaml +++ b/life-science/config.yaml @@ -1,3 +1,5 @@ +program: train.py +name: segmentation_sweep method: bayes metric: goal: maximize @@ -8,10 +10,7 @@ parameters: max: 0.5 min: 0.1 model_init_filters: - values: - - 8 - - 16 - - 32 + values: [8, 16, 32] initial_learning_rate: distribution: uniform max: 0.001 From e39c830e877b51c31be2c6c1842a01f13a29ab88 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Wed, 8 May 2024 11:25:34 +0000 Subject: [PATCH 16/21] update: training script --- life-science/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/life-science/train.py b/life-science/train.py index b30ee2f7..d1cac765 100644 --- a/life-science/train.py +++ b/life-science/train.py @@ -42,7 +42,7 @@ def main(): config.model_blocks_up = [1, 1, 1] config.model_in_channels = 4 config.model_out_channels = 3 - config.max_train_epochs = 3 + config.max_train_epochs = 5 config.dice_loss_squared_prediction = True config.dice_loss_target_onehot = False config.dice_loss_apply_sigmoid = True @@ -250,7 +250,7 @@ def main(): val_data["image"].to(device), val_data["label"].to(device), ) - val_outputs = inference(model, val_inputs) + val_outputs = inference(model, val_inputs, config.roi_size) val_outputs = [ postprocessing_transforms(i) for i in decollate_batch(val_outputs) From 936f9725ba7e287d988c0642a834aee788b946cc Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 9 May 2024 18:22:57 +0000 Subject: [PATCH 17/21] add: notebook to train improved model --- .../04_monai_train_improved_model.ipynb | 567 ++++++++++++++++++ life-science/utils.py | 14 +- 2 files changed, 580 insertions(+), 1 deletion(-) create mode 100644 life-science/04_monai_train_improved_model.ipynb diff --git a/life-science/04_monai_train_improved_model.ipynb b/life-science/04_monai_train_improved_model.ipynb new file mode 100644 index 00000000..364ce438 --- /dev/null +++ b/life-science/04_monai_train_improved_model.ipynb @@ -0,0 +1,567 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a93e6b75-b1d9-4703-8834-ab3fcd8f934b", + "metadata": {}, + "source": [ + "# Train a Baseline Segmentation Model\n", + "In this notebook we will learn:\n", + "\n", + "- We will learn how to use specific MONAI APIs to write our training workflow, including a SoTA neural network architecture and loss function and metrics for our task.\n", + "- Use Weights & Biases for tracking our experiments and logging and verisioning our model checkpoints." + ] + }, + { + "cell_type": "markdown", + "id": "95118e9e-e6d0-4bde-bd5b-af792ca8153a", + "metadata": {}, + "source": [ + "## 🌴 Setup and Installation\n", + "\n", + "First, let us install the latest version of both MONAI and Weights and Biases." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3ab1bc3-d503-4e4c-afd1-ad77d72ac472", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install -q -U monai wandb" + ] + }, + { + "cell_type": "markdown", + "id": "281b7942", + "metadata": {}, + "source": [ + "## 🦄 Getting the Best Configs\n", + "\n", + "For training a model that is an improvement over the baseline model we would need to get the configs of the best performing run from the sweep." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d004c6bf", + "metadata": {}, + "outputs": [], + "source": [ + "import wandb\n", + "\n", + "\n", + "def get_best_config_from_sweep(entity: str, project: str, sweep_id: str, metric: str):\n", + " api = wandb.Api()\n", + " sweep = api.sweep(f\"{entity}/{project}/{sweep_id}\")\n", + " runs = sorted(\n", + " sweep.runs, key=lambda run: run.summary.get(metric, 0), reverse=True\n", + " )\n", + " best_run = runs[0]\n", + " return best_run.config\n", + "\n", + "\n", + "config = get_best_config_from_sweep(\n", + " entity=\"lifesciences\",\n", + " project=\"brain-tumor-segmentation\",\n", + " sweep_id=\"580gsolt\",\n", + " metric=\"validation/mean_dice\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "6a4f71b2-ffb6-475c-a563-1cb631e33d84", + "metadata": {}, + "source": [ + "Next, we will start a new W&B run to start tracking our experiment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b258cfa2-3795-4502-8878-aff469ba5077", + "metadata": {}, + "outputs": [], + "source": [ + "from monai.utils import set_determinism\n", + "\n", + "wandb.init(\n", + " project=\"brain-tumor-segmentation\",\n", + " entity=\"lifesciences\",\n", + " job_type=\"train_improved\",\n", + " config=config,\n", + ")\n", + "\n", + "config = wandb.config\n", + "config.initial_learning_rate = 1e-4\n", + "\n", + "set_determinism(seed=config.seed)" + ] + }, + { + "cell_type": "markdown", + "id": "0d2da2f5-b1a8-4215-927c-dc5884b0b41f", + "metadata": {}, + "source": [ + "## 💿 Loading and Transforming the Data\n", + "\n", + "We will now learn using the [`monai.transforms`](https://docs.monai.io/en/stable/transforms.html) API to create and apply transforms to our data." + ] + }, + { + "cell_type": "markdown", + "id": "1f38e5d1-e3de-4bda-9b89-67193a6baca2", + "metadata": {}, + "source": [ + "Next, we compose all the necessary transforms for visualizing the data using [`monai.transforms.Compose`](https://docs.monai.io/en/stable/transforms.html#monai.transforms.Compose).\n", + "\n", + "**Note:** During training, we will apply a differnt set of transforms to the data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62a9e73a-aeb1-49b2-b06b-d09857b74966", + "metadata": {}, + "outputs": [], + "source": [ + "from monai.transforms import (\n", + " Activations,\n", + " AsDiscrete,\n", + " Compose,\n", + " LoadImaged,\n", + " NormalizeIntensityd,\n", + " Orientationd,\n", + " RandFlipd,\n", + " RandScaleIntensityd,\n", + " RandShiftIntensityd,\n", + " RandSpatialCropd,\n", + " Spacingd,\n", + " EnsureTyped,\n", + " EnsureChannelFirstd,\n", + ")\n", + "from utils import ConvertToMultiChannelBasedOnBratsClassesd\n", + "\n", + "\n", + "config.roi_size = [224, 224, 144]\n", + "\n", + "train_transform = Compose(\n", + " [\n", + " # load 4 Nifti images and stack them together\n", + " LoadImaged(keys=[\"image\", \"label\"]),\n", + " # Ensure loaded images are in channels-first format\n", + " EnsureChannelFirstd(keys=\"image\"),\n", + " # Ensure the input data to be a PyTorch Tensor or numpy array\n", + " EnsureTyped(keys=[\"image\", \"label\"]),\n", + " # Convert labels to multi-channels based on brats18 classes\n", + " ConvertToMultiChannelBasedOnBratsClassesd(keys=\"label\"),\n", + " # Change the input image’s orientation into the specified based on axis codes\n", + " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", + " # Resample the input images to the specified pixel dimension\n", + " Spacingd(\n", + " keys=[\"image\", \"label\"],\n", + " pixdim=(1.0, 1.0, 1.0),\n", + " mode=(\"bilinear\", \"nearest\"),\n", + " ),\n", + " # Augmentation: Crop image with random size or specific size ROI\n", + " RandSpatialCropd(\n", + " keys=[\"image\", \"label\"], roi_size=config.roi_size, random_size=False\n", + " ),\n", + " \n", + " # Augmentation: Randomly flip the image on the specified axes\n", + " RandFlipd(keys=[\"image\", \"label\"], prob=0.5, spatial_axis=0),\n", + " RandFlipd(keys=[\"image\", \"label\"], prob=0.5, spatial_axis=1),\n", + " RandFlipd(keys=[\"image\", \"label\"], prob=0.5, spatial_axis=2),\n", + " \n", + " # Normalize input image intensity\n", + " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", + " \n", + " # Augmentation: Randomly scale the image intensity\n", + " RandScaleIntensityd(keys=\"image\", factors=0.1, prob=1.0),\n", + " RandShiftIntensityd(keys=\"image\", offsets=0.1, prob=1.0),\n", + " ]\n", + ")\n", + "val_transform = Compose(\n", + " [\n", + " # load 4 Nifti images and stack them together\n", + " LoadImaged(keys=[\"image\", \"label\"]),\n", + " # Ensure loaded images are in channels-first format\n", + " EnsureChannelFirstd(keys=\"image\"),\n", + " # Ensure the input data to be a PyTorch Tensor or numpy array\n", + " EnsureTyped(keys=[\"image\", \"label\"]),\n", + " # Convert labels to multi-channels based on brats18 classes\n", + " ConvertToMultiChannelBasedOnBratsClassesd(keys=\"label\"),\n", + " # Change the input image’s orientation into the specified based on axis codes\n", + " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", + " # Resample the input images to the specified pixel dimension\n", + " Spacingd(\n", + " keys=[\"image\", \"label\"],\n", + " pixdim=(1.0, 1.0, 1.0),\n", + " mode=(\"bilinear\", \"nearest\"),\n", + " ),\n", + " # Normalize input image intensity\n", + " NormalizeIntensityd(keys=\"image\", nonzero=True, channel_wise=True),\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "14df1019-7cf6-4baa-baa1-fa2085da17bb", + "metadata": {}, + "source": [ + "For loading the dataset, we first fetch it from the W&B dataset artifact that we had created earlier. This enables us to use the dataset as an input artifact to our visualization run, and establish the necessary lineage for our experiment.\n", + "\n", + "![](./assets/artifact_usage.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35b2d2c9-8e7f-479b-a822-deb0984d22ae", + "metadata": {}, + "outputs": [], + "source": [ + "artifact = wandb.use_artifact(\n", + " \"lifesciences/brain-tumor-segmentation/decathlon_brain_tumor:v0\", type=\"dataset\"\n", + ")\n", + "artifact_dir = artifact.download()" + ] + }, + { + "cell_type": "markdown", + "id": "8fd9de15-afa4-4ccf-9d4f-78b90ae3db8f", + "metadata": {}, + "source": [ + "We now use the [`monai.apps.DecathlonDataset`](https://docs.monai.io/en/stable/apps.html#monai.apps.DecathlonDataset) to load our dataset and apply the transforms we defined on the data samples so that we use them for training and validation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16faedc0-f50a-4289-9fa4-6948549ea74f", + "metadata": {}, + "outputs": [], + "source": [ + "from monai.apps import DecathlonDataset\n", + "\n", + "\n", + "# Create the dataset for the training split\n", + "# of the brain tumor segmentation dataset\n", + "train_dataset = DecathlonDataset(\n", + " root_dir=artifact_dir,\n", + " task=\"Task01_BrainTumour\",\n", + " transform=train_transform,\n", + " section=\"training\",\n", + " download=False,\n", + " cache_rate=0.0,\n", + " num_workers=config.num_workers,\n", + ")\n", + "\n", + "# Create the dataset for the validation split\n", + "# of the brain tumor segmentation dataset\n", + "val_dataset = DecathlonDataset(\n", + " root_dir=artifact_dir,\n", + " task=\"Task01_BrainTumour\",\n", + " transform=val_transform,\n", + " section=\"validation\",\n", + " download=False,\n", + " cache_rate=0.0,\n", + " num_workers=config.num_workers,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e8eb84cf-eea0-431b-a66e-9afd2b7144ba", + "metadata": {}, + "source": [ + "We now create DataLoaders for the train and validation datasets respectively using [`monai.data.DataLoader`](https://docs.monai.io/en/stable/data.html#dataloader) which provides an iterable over the given dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4ad33748-060f-4652-87ed-f8b56de02824", + "metadata": {}, + "outputs": [], + "source": [ + "from monai.data import DataLoader\n", + "\n", + "\n", + "# create the train_loader\n", + "train_loader = DataLoader(\n", + " train_dataset,\n", + " batch_size=config.batch_size,\n", + " shuffle=True,\n", + " num_workers=config.num_workers,\n", + ")\n", + "\n", + "# create the val_loader\n", + "val_loader = DataLoader(\n", + " val_dataset,\n", + " batch_size=config.batch_size,\n", + " shuffle=False,\n", + " num_workers=config.num_workers,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "ba68b8b8-9527-482d-8f4f-da8c71dcbd87", + "metadata": {}, + "source": [ + "## 🤖 Creating the Model, Loss, and Optimizer\n", + "\n", + "We will be training a **SegResNet** model based on the paper [3D MRI brain tumor segmentation using auto-encoder regularization](https://arxiv.org/pdf/1810.11654.pdf). The [SegResNet](https://docs.monai.io/en/stable/networks.html#segresnet) model that comes implemented as a PyTorch Module as part of the [`monai.networks.nets`](https://docs.monai.io/en/stable/networks.html#nets) API that provides out-of-the-box implementations of SoTA neural network models for different medical imaging tasks." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d2a5e5a-84ee-4fbe-9b06-1fcf9639d443", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from monai.networks.nets import SegResNet\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "\n", + "# create model\n", + "model = SegResNet(\n", + " blocks_down=config.model_blocks_down,\n", + " blocks_up=config.model_blocks_up,\n", + " init_filters=config.model_init_filters,\n", + " in_channels=config.model_in_channels,\n", + " out_channels=config.model_out_channels,\n", + " dropout_prob=config.model_dropout_prob,\n", + ").to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "20863f94-f7d0-4b82-8a55-41a1952d1cae", + "metadata": {}, + "source": [ + "We will be using [Adam Optimizer](https://pytorch.org/docs/stable/generated/torch.optim.Adam.html) and the [cosine annealing schedule](https://pytorch.org/docs/stable/generated/torch.optim.lr_scheduler.CosineAnnealingLR.html) to schedule our learning rate. This approach is designed to help in finding global minima in the optimization landscape and to provide a form of reset mechanism during training, which can improve the performance of the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a9a9b1a6-d127-4fe9-8831-a33b71dbe438", + "metadata": {}, + "outputs": [], + "source": [ + "# create optimizer\n", + "optimizer = torch.optim.Adam(\n", + " model.parameters(),\n", + " config.initial_learning_rate,\n", + " weight_decay=config.weight_decay,\n", + ")\n", + "\n", + "# create learning rate scheduler\n", + "lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n", + " optimizer, T_max=config.max_train_epochs\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "541dc8c6-009e-4115-81c5-d35386a92b6d", + "metadata": {}, + "source": [ + "Next, we would define the loss as multi-label DiceLoss as proposed by the paper [V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation](https://arxiv.org/abs/1606.04797) using the [`monai.losses`](https://docs.monai.io/en/stable/losses.html) API and the corresponding dice metrics using the [`monai.metrics`](https://docs.monai.io/en/stable/metrics.html) API." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2276a62-24a9-4352-8058-eb807e16819e", + "metadata": {}, + "outputs": [], + "source": [ + "from monai.losses import DiceLoss\n", + "\n", + "loss_function = DiceLoss(\n", + " smooth_nr=config.dice_loss_smoothen_numerator,\n", + " smooth_dr=config.dice_loss_smoothen_denominator,\n", + " squared_pred=config.dice_loss_squared_prediction,\n", + " to_onehot_y=config.dice_loss_target_onehot,\n", + " sigmoid=config.dice_loss_apply_sigmoid,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "187d9b11-3df9-4ae8-96c0-e7c16468878d", + "metadata": {}, + "outputs": [], + "source": [ + "from monai.metrics import DiceMetric\n", + "\n", + "dice_metric = DiceMetric(include_background=True, reduction=\"mean\")\n", + "dice_metric_batch = DiceMetric(include_background=True, reduction=\"mean_batch\")\n", + "post_trans = Compose([Activations(sigmoid=True), AsDiscrete(threshold=0.5)])" + ] + }, + { + "cell_type": "markdown", + "id": "55423f02-8686-4a56-9778-09070b7edcc5", + "metadata": {}, + "source": [ + "## 🦾 Training the Model\n", + "\n", + "Finally, we proceed to writing the training and validation loop for the brain tumor segmentation model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7df12631-8e09-48d3-b0b0-655775ec22e4", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from tqdm.auto import tqdm\n", + "from monai.data import decollate_batch\n", + "from utils import inference\n", + "\n", + "\n", + "# Define custom x-axes\n", + "wandb.define_metric(\"epoch/epoch_step\")\n", + "wandb.define_metric(\"epoch/*\", step_metric=\"epoch/epoch_step\")\n", + "wandb.define_metric(\"batch/batch_step\")\n", + "wandb.define_metric(\"batch/*\", step_metric=\"batch/batch_step\")\n", + "wandb.define_metric(\"validation/validation_step\")\n", + "wandb.define_metric(\"validation/*\", step_metric=\"validation/validation_step\")\n", + "\n", + "# use automatic mixed-precision to accelerate training\n", + "scaler = torch.cuda.amp.GradScaler()\n", + "torch.backends.cudnn.benchmark = True\n", + "\n", + "# Create checkpoint directory\n", + "os.makedirs(config.checkpoint_dir, exist_ok=True)\n", + "\n", + "batch_step = 0\n", + "validation_step = 0\n", + "metric_values = []\n", + "metric_values_tumor_core = []\n", + "metric_values_whole_tumor = []\n", + "metric_values_enhanced_tumor = []\n", + "\n", + "epoch_progress_bar = tqdm(range(config.max_train_epochs), desc=\"Training:\")\n", + "\n", + "for epoch in epoch_progress_bar:\n", + " model.train()\n", + " epoch_loss = 0\n", + "\n", + " total_batch_steps = len(train_dataset) // train_loader.batch_size\n", + " batch_progress_bar = tqdm(train_loader, total=total_batch_steps, leave=False)\n", + "\n", + " # Training Step\n", + " for batch_data in batch_progress_bar:\n", + " inputs, labels = (\n", + " batch_data[\"image\"].to(device),\n", + " batch_data[\"label\"].to(device),\n", + " )\n", + " optimizer.zero_grad()\n", + " with torch.cuda.amp.autocast():\n", + " outputs = model(inputs)\n", + " loss = loss_function(outputs, labels)\n", + " scaler.scale(loss).backward()\n", + " scaler.step(optimizer)\n", + " scaler.update()\n", + " epoch_loss += loss.item()\n", + " batch_progress_bar.set_description(f\"train_loss: {loss.item():.4f}:\")\n", + " ## Log batch-wise training loss to W&B\n", + " wandb.log({\"batch/batch_step\": batch_step, \"batch/train_loss\": loss.item()})\n", + " batch_step += 1\n", + "\n", + " epoch_loss /= total_batch_steps\n", + " ## Log batch-wise training loss and learning rate to W&B\n", + " wandb.log(\n", + " {\n", + " \"epoch/epoch_step\": epoch,\n", + " \"epoch/mean_train_loss\": epoch_loss,\n", + " \"epoch/learning_rate\": lr_scheduler.get_last_lr()[0],\n", + " }\n", + " )\n", + " lr_scheduler.step()\n", + " epoch_progress_bar.set_description(f\"Training: train_loss: {epoch_loss:.4f}:\")\n", + "\n", + " # Validation and model checkpointing step\n", + " if (epoch + 1) % config.validation_intervals == 0:\n", + " model.eval()\n", + " with torch.no_grad():\n", + " for val_data in val_loader:\n", + " val_inputs, val_labels = (\n", + " val_data[\"image\"].to(device),\n", + " val_data[\"label\"].to(device),\n", + " )\n", + " val_outputs = inference(model, val_inputs)\n", + " val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]\n", + " dice_metric(y_pred=val_outputs, y=val_labels)\n", + " dice_metric_batch(y_pred=val_outputs, y=val_labels)\n", + "\n", + " metric_values.append(dice_metric.aggregate().item())\n", + " metric_batch = dice_metric_batch.aggregate()\n", + " metric_values_tumor_core.append(metric_batch[0].item())\n", + " metric_values_whole_tumor.append(metric_batch[1].item())\n", + " metric_values_enhanced_tumor.append(metric_batch[2].item())\n", + " dice_metric.reset()\n", + " dice_metric_batch.reset()\n", + "\n", + " # Log and versison model checkpoints using W&B artifacts.\n", + " checkpoint_path = os.path.join(config.checkpoint_dir, \"model.pth\")\n", + " torch.save(model.state_dict(), checkpoint_path)\n", + " wandb.log_model(\n", + " checkpoint_path,\n", + " name=f\"{wandb.run.id}-checkpoint\",\n", + " aliases=[f\"epoch_{epoch}\"],\n", + " )\n", + "\n", + " # Log validation metrics to W&B dashboard.\n", + " wandb.log(\n", + " {\n", + " \"validation/validation_step\": validation_step,\n", + " \"validation/mean_dice\": metric_values[-1],\n", + " \"validation/mean_dice_tumor_core\": metric_values_tumor_core[-1],\n", + " \"validation/mean_dice_whole_tumor\": metric_values_whole_tumor[-1],\n", + " \"validation/mean_dice_enhanced_tumor\": metric_values_enhanced_tumor[-1],\n", + " }\n", + " )\n", + " validation_step += 1\n", + "\n", + "\n", + "# Finish the experiment\n", + "wandb.finish()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/life-science/utils.py b/life-science/utils.py index b6521ccf..5eb5ade0 100644 --- a/life-science/utils.py +++ b/life-science/utils.py @@ -1,7 +1,9 @@ -import torch from monai.inferers import sliding_window_inference from monai.transforms import MapTransform +import torch +import wandb + class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform): """ @@ -46,3 +48,13 @@ def _compute(input): with torch.cuda.amp.autocast(): return _compute(input) + + +def get_best_config_from_sweep( + entity: str, project: str, sweep_id: str, metric: str = "validation/mean_dice" +): + api = wandb.Api() + sweep = api.sweep(f"{entity}/{project}/{sweep_id}") + runs = sorted(sweep.runs, key=lambda run: run.summary.get(metric, 0), reverse=True) + best_run = runs[0] + return best_run.config From 6e922c1f77b83f54c2281dba9bf894870f6017ad Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 9 May 2024 18:27:34 +0000 Subject: [PATCH 18/21] update: train script --- life-science/train.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/life-science/train.py b/life-science/train.py index d1cac765..f59ba5ae 100644 --- a/life-science/train.py +++ b/life-science/train.py @@ -34,6 +34,8 @@ def main(): job_type="train_baseline", ) config = wandb.config + + # Manually setting the values of the configs unaffected by the sweep config.seed = 0 config.roi_size = [224, 224, 144] config.num_workers = 4 @@ -48,6 +50,15 @@ def main(): config.dice_loss_apply_sigmoid = True config.inference_roi_size = (240, 240, 160) config.validation_intervals = 1 + + # We are not setting the values of the following configs as their values + # will be determined the sweep + # - config.model_dropout_prob = 0.2 + # - config.model_init_filters = 16 + # - config.initial_learning_rate = 1e-4 + # - config.dice_loss_smoothen_denominator = 1e-5 + # - config.dice_loss_smoothen_numerator = 0 + # - config.weight_decay = 1e-5 set_determinism(seed=config.seed) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") From c8fc4ed20d01ae74a5dd9b47102a2ee45a4d979d Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 9 May 2024 19:03:36 +0000 Subject: [PATCH 19/21] update: training improved model notebook --- life-science/04_monai_train_improved_model.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/life-science/04_monai_train_improved_model.ipynb b/life-science/04_monai_train_improved_model.ipynb index 364ce438..c0d478ac 100644 --- a/life-science/04_monai_train_improved_model.ipynb +++ b/life-science/04_monai_train_improved_model.ipynb @@ -67,7 +67,8 @@ " project=\"brain-tumor-segmentation\",\n", " sweep_id=\"580gsolt\",\n", " metric=\"validation/mean_dice\",\n", - ")" + ")\n", + "config[\"initial_learning_rate\"] = 1e-4" ] }, { @@ -93,9 +94,7 @@ " job_type=\"train_improved\",\n", " config=config,\n", ")\n", - "\n", "config = wandb.config\n", - "config.initial_learning_rate = 1e-4\n", "\n", "set_determinism(seed=config.seed)" ] @@ -445,7 +444,8 @@ "torch.backends.cudnn.benchmark = True\n", "\n", "# Create checkpoint directory\n", - "os.makedirs(config.checkpoint_dir, exist_ok=True)\n", + "checkpoint_dir = \"./checkpoints\"\n", + "os.makedirs(checkpoint_dir, exist_ok=True)\n", "\n", "batch_step = 0\n", "validation_step = 0\n", @@ -517,7 +517,7 @@ " dice_metric_batch.reset()\n", "\n", " # Log and versison model checkpoints using W&B artifacts.\n", - " checkpoint_path = os.path.join(config.checkpoint_dir, \"model.pth\")\n", + " checkpoint_path = os.path.join(checkpoint_dir, \"model.pth\")\n", " torch.save(model.state_dict(), checkpoint_path)\n", " wandb.log_model(\n", " checkpoint_path,\n", From 79b9b390044c95c9741ad3577206d31d0e9cc304 Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 9 May 2024 19:26:19 +0000 Subject: [PATCH 20/21] update: training improved model notebook --- life-science/04_monai_train_improved_model.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/life-science/04_monai_train_improved_model.ipynb b/life-science/04_monai_train_improved_model.ipynb index c0d478ac..e0da229a 100644 --- a/life-science/04_monai_train_improved_model.ipynb +++ b/life-science/04_monai_train_improved_model.ipynb @@ -68,7 +68,8 @@ " sweep_id=\"580gsolt\",\n", " metric=\"validation/mean_dice\",\n", ")\n", - "config[\"initial_learning_rate\"] = 1e-4" + "config[\"initial_learning_rate\"] = 1e-4\n", + "config[\"max_train_epochs\"] = 25" ] }, { From 6957a287a38cf21a337f5a9392db868fc36fa66e Mon Sep 17 00:00:00 2001 From: soumik12345 <19soumik.rakshit96@gmail.com> Date: Thu, 9 May 2024 22:52:05 +0000 Subject: [PATCH 21/21] update: training script --- life-science/train.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/life-science/train.py b/life-science/train.py index f59ba5ae..ef2f7438 100644 --- a/life-science/train.py +++ b/life-science/train.py @@ -28,11 +28,7 @@ def main(): - wandb.init( - project="brain-tumor-segmentation", - entity="lifesciences", - job_type="train_baseline", - ) + wandb.init(project="brain-tumor-segmentation", entity="lifesciences") config = wandb.config # Manually setting the values of the configs unaffected by the sweep